xref: /freebsd/sys/dev/mpr/mpr_sas.c (revision 5bf5ca772c6de2d53344a78cf461447cc322ccea)
1 /*-
2  * Copyright (c) 2009 Yahoo! Inc.
3  * Copyright (c) 2011-2015 LSI Corp.
4  * Copyright (c) 2013-2016 Avago Technologies
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  * Avago Technologies (LSI) MPT-Fusion Host Adapter FreeBSD
29  *
30  */
31 
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 /* Communications core for Avago Technologies (LSI) MPT3 */
36 
37 /* TODO Move headers to mprvar */
38 #include <sys/types.h>
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/kernel.h>
42 #include <sys/selinfo.h>
43 #include <sys/module.h>
44 #include <sys/bus.h>
45 #include <sys/conf.h>
46 #include <sys/bio.h>
47 #include <sys/malloc.h>
48 #include <sys/uio.h>
49 #include <sys/sysctl.h>
50 #include <sys/endian.h>
51 #include <sys/queue.h>
52 #include <sys/kthread.h>
53 #include <sys/taskqueue.h>
54 #include <sys/sbuf.h>
55 
56 #include <machine/bus.h>
57 #include <machine/resource.h>
58 #include <sys/rman.h>
59 
60 #include <machine/stdarg.h>
61 
62 #include <cam/cam.h>
63 #include <cam/cam_ccb.h>
64 #include <cam/cam_debug.h>
65 #include <cam/cam_sim.h>
66 #include <cam/cam_xpt_sim.h>
67 #include <cam/cam_xpt_periph.h>
68 #include <cam/cam_periph.h>
69 #include <cam/scsi/scsi_all.h>
70 #include <cam/scsi/scsi_message.h>
71 #if __FreeBSD_version >= 900026
72 #include <cam/scsi/smp_all.h>
73 #endif
74 
75 #include <dev/nvme/nvme.h>
76 
77 #include <dev/mpr/mpi/mpi2_type.h>
78 #include <dev/mpr/mpi/mpi2.h>
79 #include <dev/mpr/mpi/mpi2_ioc.h>
80 #include <dev/mpr/mpi/mpi2_sas.h>
81 #include <dev/mpr/mpi/mpi2_pci.h>
82 #include <dev/mpr/mpi/mpi2_cnfg.h>
83 #include <dev/mpr/mpi/mpi2_init.h>
84 #include <dev/mpr/mpi/mpi2_tool.h>
85 #include <dev/mpr/mpr_ioctl.h>
86 #include <dev/mpr/mprvar.h>
87 #include <dev/mpr/mpr_table.h>
88 #include <dev/mpr/mpr_sas.h>
89 
90 #define MPRSAS_DISCOVERY_TIMEOUT	20
91 #define MPRSAS_MAX_DISCOVERY_TIMEOUTS	10 /* 200 seconds */
92 
93 /*
94  * static array to check SCSI OpCode for EEDP protection bits
95  */
96 #define	PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
97 #define	PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
98 #define	PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
99 static uint8_t op_code_prot[256] = {
100 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
101 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
102 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
103 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
104 	0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
105 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
106 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
107 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
108 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
109 	0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
110 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
111 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
112 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
113 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
114 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
115 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
116 };
117 
118 MALLOC_DEFINE(M_MPRSAS, "MPRSAS", "MPR SAS memory");
119 
120 static void mprsas_remove_device(struct mpr_softc *, struct mpr_command *);
121 static void mprsas_remove_complete(struct mpr_softc *, struct mpr_command *);
122 static void mprsas_action(struct cam_sim *sim, union ccb *ccb);
123 static void mprsas_poll(struct cam_sim *sim);
124 static void mprsas_scsiio_timeout(void *data);
125 static void mprsas_abort_complete(struct mpr_softc *sc, struct mpr_command *cm);
126 static void mprsas_action_scsiio(struct mprsas_softc *, union ccb *);
127 static void mprsas_scsiio_complete(struct mpr_softc *, struct mpr_command *);
128 static void mprsas_action_resetdev(struct mprsas_softc *, union ccb *);
129 static void mprsas_resetdev_complete(struct mpr_softc *, struct mpr_command *);
130 static int mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm,
131     struct mpr_command *cm);
132 static void mprsas_async(void *callback_arg, uint32_t code,
133     struct cam_path *path, void *arg);
134 #if (__FreeBSD_version < 901503) || \
135     ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
136 static void mprsas_check_eedp(struct mpr_softc *sc, struct cam_path *path,
137     struct ccb_getdev *cgd);
138 static void mprsas_read_cap_done(struct cam_periph *periph,
139     union ccb *done_ccb);
140 #endif
141 static int mprsas_send_portenable(struct mpr_softc *sc);
142 static void mprsas_portenable_complete(struct mpr_softc *sc,
143     struct mpr_command *cm);
144 
145 #if __FreeBSD_version >= 900026
146 static void mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm);
147 static void mprsas_send_smpcmd(struct mprsas_softc *sassc, union ccb *ccb,
148     uint64_t sasaddr);
149 static void mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb);
150 #endif //FreeBSD_version >= 900026
151 
152 struct mprsas_target *
153 mprsas_find_target_by_handle(struct mprsas_softc *sassc, int start,
154     uint16_t handle)
155 {
156 	struct mprsas_target *target;
157 	int i;
158 
159 	for (i = start; i < sassc->maxtargets; i++) {
160 		target = &sassc->targets[i];
161 		if (target->handle == handle)
162 			return (target);
163 	}
164 
165 	return (NULL);
166 }
167 
168 /* we need to freeze the simq during attach and diag reset, to avoid failing
169  * commands before device handles have been found by discovery.  Since
170  * discovery involves reading config pages and possibly sending commands,
171  * discovery actions may continue even after we receive the end of discovery
172  * event, so refcount discovery actions instead of assuming we can unfreeze
173  * the simq when we get the event.
174  */
175 void
176 mprsas_startup_increment(struct mprsas_softc *sassc)
177 {
178 	MPR_FUNCTRACE(sassc->sc);
179 
180 	if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) {
181 		if (sassc->startup_refcount++ == 0) {
182 			/* just starting, freeze the simq */
183 			mpr_dprint(sassc->sc, MPR_INIT,
184 			    "%s freezing simq\n", __func__);
185 #if (__FreeBSD_version >= 1000039) || \
186     ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
187 			xpt_hold_boot();
188 #endif
189 			xpt_freeze_simq(sassc->sim, 1);
190 		}
191 		mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__,
192 		    sassc->startup_refcount);
193 	}
194 }
195 
196 void
197 mprsas_release_simq_reinit(struct mprsas_softc *sassc)
198 {
199 	if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
200 		sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
201 		xpt_release_simq(sassc->sim, 1);
202 		mpr_dprint(sassc->sc, MPR_INFO, "Unfreezing SIM queue\n");
203 	}
204 }
205 
206 void
207 mprsas_startup_decrement(struct mprsas_softc *sassc)
208 {
209 	MPR_FUNCTRACE(sassc->sc);
210 
211 	if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) {
212 		if (--sassc->startup_refcount == 0) {
213 			/* finished all discovery-related actions, release
214 			 * the simq and rescan for the latest topology.
215 			 */
216 			mpr_dprint(sassc->sc, MPR_INIT,
217 			    "%s releasing simq\n", __func__);
218 			sassc->flags &= ~MPRSAS_IN_STARTUP;
219 			xpt_release_simq(sassc->sim, 1);
220 #if (__FreeBSD_version >= 1000039) || \
221     ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
222 			xpt_release_boot();
223 #else
224 			mprsas_rescan_target(sassc->sc, NULL);
225 #endif
226 		}
227 		mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__,
228 		    sassc->startup_refcount);
229 	}
230 }
231 
232 /* The firmware requires us to stop sending commands when we're doing task
233  * management, so refcount the TMs and keep the simq frozen when any are in
234  * use.
235  */
236 struct mpr_command *
237 mprsas_alloc_tm(struct mpr_softc *sc)
238 {
239 	struct mpr_command *tm;
240 
241 	MPR_FUNCTRACE(sc);
242 	tm = mpr_alloc_high_priority_command(sc);
243 	return tm;
244 }
245 
246 void
247 mprsas_free_tm(struct mpr_softc *sc, struct mpr_command *tm)
248 {
249 	int target_id = 0xFFFFFFFF;
250 
251 	MPR_FUNCTRACE(sc);
252 	if (tm == NULL)
253 		return;
254 
255 	/*
256 	 * For TM's the devq is frozen for the device.  Unfreeze it here and
257 	 * free the resources used for freezing the devq.  Must clear the
258 	 * INRESET flag as well or scsi I/O will not work.
259 	 */
260 	if (tm->cm_targ != NULL) {
261 		tm->cm_targ->flags &= ~MPRSAS_TARGET_INRESET;
262 		target_id = tm->cm_targ->tid;
263 	}
264 	if (tm->cm_ccb) {
265 		mpr_dprint(sc, MPR_INFO, "Unfreezing devq for target ID %d\n",
266 		    target_id);
267 		xpt_release_devq(tm->cm_ccb->ccb_h.path, 1, TRUE);
268 		xpt_free_path(tm->cm_ccb->ccb_h.path);
269 		xpt_free_ccb(tm->cm_ccb);
270 	}
271 
272 	mpr_free_high_priority_command(sc, tm);
273 }
274 
275 void
276 mprsas_rescan_target(struct mpr_softc *sc, struct mprsas_target *targ)
277 {
278 	struct mprsas_softc *sassc = sc->sassc;
279 	path_id_t pathid;
280 	target_id_t targetid;
281 	union ccb *ccb;
282 
283 	MPR_FUNCTRACE(sc);
284 	pathid = cam_sim_path(sassc->sim);
285 	if (targ == NULL)
286 		targetid = CAM_TARGET_WILDCARD;
287 	else
288 		targetid = targ - sassc->targets;
289 
290 	/*
291 	 * Allocate a CCB and schedule a rescan.
292 	 */
293 	ccb = xpt_alloc_ccb_nowait();
294 	if (ccb == NULL) {
295 		mpr_dprint(sc, MPR_ERROR, "unable to alloc CCB for rescan\n");
296 		return;
297 	}
298 
299 	if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid, targetid,
300 	    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
301 		mpr_dprint(sc, MPR_ERROR, "unable to create path for rescan\n");
302 		xpt_free_ccb(ccb);
303 		return;
304 	}
305 
306 	if (targetid == CAM_TARGET_WILDCARD)
307 		ccb->ccb_h.func_code = XPT_SCAN_BUS;
308 	else
309 		ccb->ccb_h.func_code = XPT_SCAN_TGT;
310 
311 	mpr_dprint(sc, MPR_TRACE, "%s targetid %u\n", __func__, targetid);
312 	xpt_rescan(ccb);
313 }
314 
315 static void
316 mprsas_log_command(struct mpr_command *cm, u_int level, const char *fmt, ...)
317 {
318 	struct sbuf sb;
319 	va_list ap;
320 	char str[192];
321 	char path_str[64];
322 
323 	if (cm == NULL)
324 		return;
325 
326 	/* No need to be in here if debugging isn't enabled */
327 	if ((cm->cm_sc->mpr_debug & level) == 0)
328 		return;
329 
330 	sbuf_new(&sb, str, sizeof(str), 0);
331 
332 	va_start(ap, fmt);
333 
334 	if (cm->cm_ccb != NULL) {
335 		xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
336 		    sizeof(path_str));
337 		sbuf_cat(&sb, path_str);
338 		if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
339 			scsi_command_string(&cm->cm_ccb->csio, &sb);
340 			sbuf_printf(&sb, "length %d ",
341 			    cm->cm_ccb->csio.dxfer_len);
342 		}
343 	} else {
344 		sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
345 		    cam_sim_name(cm->cm_sc->sassc->sim),
346 		    cam_sim_unit(cm->cm_sc->sassc->sim),
347 		    cam_sim_bus(cm->cm_sc->sassc->sim),
348 		    cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
349 		    cm->cm_lun);
350 	}
351 
352 	sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
353 	sbuf_vprintf(&sb, fmt, ap);
354 	sbuf_finish(&sb);
355 	mpr_print_field(cm->cm_sc, "%s", sbuf_data(&sb));
356 
357 	va_end(ap);
358 }
359 
360 static void
361 mprsas_remove_volume(struct mpr_softc *sc, struct mpr_command *tm)
362 {
363 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
364 	struct mprsas_target *targ;
365 	uint16_t handle;
366 
367 	MPR_FUNCTRACE(sc);
368 
369 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
370 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
371 	targ = tm->cm_targ;
372 
373 	if (reply == NULL) {
374 		/* XXX retry the remove after the diag reset completes? */
375 		mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device "
376 		    "0x%04x\n", __func__, handle);
377 		mprsas_free_tm(sc, tm);
378 		return;
379 	}
380 
381 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
382 	    MPI2_IOCSTATUS_SUCCESS) {
383 		mpr_dprint(sc, MPR_ERROR, "IOCStatus = 0x%x while resetting "
384 		    "device 0x%x\n", le16toh(reply->IOCStatus), handle);
385 	}
386 
387 	mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n",
388 	    le32toh(reply->TerminationCount));
389 	mpr_free_reply(sc, tm->cm_reply_data);
390 	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
391 
392 	mpr_dprint(sc, MPR_XINFO, "clearing target %u handle 0x%04x\n",
393 	    targ->tid, handle);
394 
395 	/*
396 	 * Don't clear target if remove fails because things will get confusing.
397 	 * Leave the devname and sasaddr intact so that we know to avoid reusing
398 	 * this target id if possible, and so we can assign the same target id
399 	 * to this device if it comes back in the future.
400 	 */
401 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
402 	    MPI2_IOCSTATUS_SUCCESS) {
403 		targ = tm->cm_targ;
404 		targ->handle = 0x0;
405 		targ->encl_handle = 0x0;
406 		targ->encl_level_valid = 0x0;
407 		targ->encl_level = 0x0;
408 		targ->connector_name[0] = ' ';
409 		targ->connector_name[1] = ' ';
410 		targ->connector_name[2] = ' ';
411 		targ->connector_name[3] = ' ';
412 		targ->encl_slot = 0x0;
413 		targ->exp_dev_handle = 0x0;
414 		targ->phy_num = 0x0;
415 		targ->linkrate = 0x0;
416 		targ->devinfo = 0x0;
417 		targ->flags = 0x0;
418 		targ->scsi_req_desc_type = 0;
419 	}
420 
421 	mprsas_free_tm(sc, tm);
422 }
423 
424 
425 /*
426  * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal.
427  * Otherwise Volume Delete is same as Bare Drive Removal.
428  */
429 void
430 mprsas_prepare_volume_remove(struct mprsas_softc *sassc, uint16_t handle)
431 {
432 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
433 	struct mpr_softc *sc;
434 	struct mpr_command *cm;
435 	struct mprsas_target *targ = NULL;
436 
437 	MPR_FUNCTRACE(sassc->sc);
438 	sc = sassc->sc;
439 
440 	targ = mprsas_find_target_by_handle(sassc, 0, handle);
441 	if (targ == NULL) {
442 		/* FIXME: what is the action? */
443 		/* We don't know about this device? */
444 		mpr_dprint(sc, MPR_ERROR,
445 		   "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
446 		return;
447 	}
448 
449 	targ->flags |= MPRSAS_TARGET_INREMOVAL;
450 
451 	cm = mprsas_alloc_tm(sc);
452 	if (cm == NULL) {
453 		mpr_dprint(sc, MPR_ERROR,
454 		    "%s: command alloc failure\n", __func__);
455 		return;
456 	}
457 
458 	mprsas_rescan_target(sc, targ);
459 
460 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
461 	req->DevHandle = targ->handle;
462 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
463 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
464 
465 	/* SAS Hard Link Reset / SATA Link Reset */
466 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
467 
468 	cm->cm_targ = targ;
469 	cm->cm_data = NULL;
470 	cm->cm_desc.HighPriority.RequestFlags =
471 	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
472 	cm->cm_complete = mprsas_remove_volume;
473 	cm->cm_complete_data = (void *)(uintptr_t)handle;
474 
475 	mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
476 	    __func__, targ->tid);
477 	mprsas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
478 
479 	mpr_map_command(sc, cm);
480 }
481 
482 /*
483  * The firmware performs debounce on the link to avoid transient link errors
484  * and false removals.  When it does decide that link has been lost and a
485  * device needs to go away, it expects that the host will perform a target reset
486  * and then an op remove.  The reset has the side-effect of aborting any
487  * outstanding requests for the device, which is required for the op-remove to
488  * succeed.  It's not clear if the host should check for the device coming back
489  * alive after the reset.
490  */
491 void
492 mprsas_prepare_remove(struct mprsas_softc *sassc, uint16_t handle)
493 {
494 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
495 	struct mpr_softc *sc;
496 	struct mpr_command *cm;
497 	struct mprsas_target *targ = NULL;
498 
499 	MPR_FUNCTRACE(sassc->sc);
500 
501 	sc = sassc->sc;
502 
503 	targ = mprsas_find_target_by_handle(sassc, 0, handle);
504 	if (targ == NULL) {
505 		/* FIXME: what is the action? */
506 		/* We don't know about this device? */
507 		mpr_dprint(sc, MPR_ERROR, "%s : invalid handle 0x%x \n",
508 		    __func__, handle);
509 		return;
510 	}
511 
512 	targ->flags |= MPRSAS_TARGET_INREMOVAL;
513 
514 	cm = mprsas_alloc_tm(sc);
515 	if (cm == NULL) {
516 		mpr_dprint(sc, MPR_ERROR, "%s: command alloc failure\n",
517 		    __func__);
518 		return;
519 	}
520 
521 	mprsas_rescan_target(sc, targ);
522 
523 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
524 	memset(req, 0, sizeof(*req));
525 	req->DevHandle = htole16(targ->handle);
526 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
527 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
528 
529 	/* SAS Hard Link Reset / SATA Link Reset */
530 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
531 
532 	cm->cm_targ = targ;
533 	cm->cm_data = NULL;
534 	cm->cm_desc.HighPriority.RequestFlags =
535 	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
536 	cm->cm_complete = mprsas_remove_device;
537 	cm->cm_complete_data = (void *)(uintptr_t)handle;
538 
539 	mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
540 	    __func__, targ->tid);
541 	mprsas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
542 
543 	mpr_map_command(sc, cm);
544 }
545 
546 static void
547 mprsas_remove_device(struct mpr_softc *sc, struct mpr_command *tm)
548 {
549 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
550 	MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
551 	struct mprsas_target *targ;
552 	struct mpr_command *next_cm;
553 	uint16_t handle;
554 
555 	MPR_FUNCTRACE(sc);
556 
557 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
558 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
559 	targ = tm->cm_targ;
560 
561 	/*
562 	 * Currently there should be no way we can hit this case.  It only
563 	 * happens when we have a failure to allocate chain frames, and
564 	 * task management commands don't have S/G lists.
565 	 */
566 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
567 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for remove of "
568 		    "handle %#04x! This should not happen!\n", __func__,
569 		    tm->cm_flags, handle);
570 	}
571 
572 	if (reply == NULL) {
573 		/* XXX retry the remove after the diag reset completes? */
574 		mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device "
575 		    "0x%04x\n", __func__, handle);
576 		mprsas_free_tm(sc, tm);
577 		return;
578 	}
579 
580 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
581 	    MPI2_IOCSTATUS_SUCCESS) {
582 		mpr_dprint(sc, MPR_ERROR, "IOCStatus = 0x%x while resetting "
583 		    "device 0x%x\n", le16toh(reply->IOCStatus), handle);
584 	}
585 
586 	mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n",
587 	    le32toh(reply->TerminationCount));
588 	mpr_free_reply(sc, tm->cm_reply_data);
589 	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
590 
591 	/* Reuse the existing command */
592 	req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
593 	memset(req, 0, sizeof(*req));
594 	req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
595 	req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
596 	req->DevHandle = htole16(handle);
597 	tm->cm_data = NULL;
598 	tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
599 	tm->cm_complete = mprsas_remove_complete;
600 	tm->cm_complete_data = (void *)(uintptr_t)handle;
601 
602 	mpr_map_command(sc, tm);
603 
604 	mpr_dprint(sc, MPR_INFO, "clearing target %u handle 0x%04x\n",
605 	    targ->tid, handle);
606 	if (targ->encl_level_valid) {
607 		mpr_dprint(sc, MPR_INFO, "At enclosure level %d, slot %d, "
608 		    "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
609 		    targ->connector_name);
610 	}
611 	TAILQ_FOREACH_SAFE(tm, &targ->commands, cm_link, next_cm) {
612 		union ccb *ccb;
613 
614 		mpr_dprint(sc, MPR_XINFO, "Completing missed command %p\n", tm);
615 		ccb = tm->cm_complete_data;
616 		mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
617 		mprsas_scsiio_complete(sc, tm);
618 	}
619 }
620 
621 static void
622 mprsas_remove_complete(struct mpr_softc *sc, struct mpr_command *tm)
623 {
624 	MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
625 	uint16_t handle;
626 	struct mprsas_target *targ;
627 	struct mprsas_lun *lun;
628 
629 	MPR_FUNCTRACE(sc);
630 
631 	reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
632 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
633 
634 	/*
635 	 * Currently there should be no way we can hit this case.  It only
636 	 * happens when we have a failure to allocate chain frames, and
637 	 * task management commands don't have S/G lists.
638 	 */
639 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
640 		mpr_dprint(sc, MPR_XINFO, "%s: cm_flags = %#x for remove of "
641 		    "handle %#04x! This should not happen!\n", __func__,
642 		    tm->cm_flags, handle);
643 		mprsas_free_tm(sc, tm);
644 		return;
645 	}
646 
647 	if (reply == NULL) {
648 		/* most likely a chip reset */
649 		mpr_dprint(sc, MPR_FAULT, "%s NULL reply removing device "
650 		    "0x%04x\n", __func__, handle);
651 		mprsas_free_tm(sc, tm);
652 		return;
653 	}
654 
655 	mpr_dprint(sc, MPR_XINFO, "%s on handle 0x%04x, IOCStatus= 0x%x\n",
656 	    __func__, handle, le16toh(reply->IOCStatus));
657 
658 	/*
659 	 * Don't clear target if remove fails because things will get confusing.
660 	 * Leave the devname and sasaddr intact so that we know to avoid reusing
661 	 * this target id if possible, and so we can assign the same target id
662 	 * to this device if it comes back in the future.
663 	 */
664 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
665 	    MPI2_IOCSTATUS_SUCCESS) {
666 		targ = tm->cm_targ;
667 		targ->handle = 0x0;
668 		targ->encl_handle = 0x0;
669 		targ->encl_level_valid = 0x0;
670 		targ->encl_level = 0x0;
671 		targ->connector_name[0] = ' ';
672 		targ->connector_name[1] = ' ';
673 		targ->connector_name[2] = ' ';
674 		targ->connector_name[3] = ' ';
675 		targ->encl_slot = 0x0;
676 		targ->exp_dev_handle = 0x0;
677 		targ->phy_num = 0x0;
678 		targ->linkrate = 0x0;
679 		targ->devinfo = 0x0;
680 		targ->flags = 0x0;
681 		targ->scsi_req_desc_type = 0;
682 
683 		while (!SLIST_EMPTY(&targ->luns)) {
684 			lun = SLIST_FIRST(&targ->luns);
685 			SLIST_REMOVE_HEAD(&targ->luns, lun_link);
686 			free(lun, M_MPR);
687 		}
688 	}
689 
690 	mprsas_free_tm(sc, tm);
691 }
692 
693 static int
694 mprsas_register_events(struct mpr_softc *sc)
695 {
696 	uint8_t events[16];
697 
698 	bzero(events, 16);
699 	setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
700 	setbit(events, MPI2_EVENT_SAS_DISCOVERY);
701 	setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
702 	setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
703 	setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
704 	setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
705 	setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
706 	setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
707 	setbit(events, MPI2_EVENT_IR_VOLUME);
708 	setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
709 	setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
710 	setbit(events, MPI2_EVENT_TEMP_THRESHOLD);
711 	if (sc->facts->MsgVersion >= MPI2_VERSION_02_06) {
712 		setbit(events, MPI2_EVENT_ACTIVE_CABLE_EXCEPTION);
713 		if (sc->mpr_flags & MPR_FLAGS_GEN35_IOC) {
714 			setbit(events, MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE);
715 			setbit(events, MPI2_EVENT_PCIE_ENUMERATION);
716 			setbit(events, MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST);
717 		}
718 	}
719 
720 	mpr_register_events(sc, events, mprsas_evt_handler, NULL,
721 	    &sc->sassc->mprsas_eh);
722 
723 	return (0);
724 }
725 
726 int
727 mpr_attach_sas(struct mpr_softc *sc)
728 {
729 	struct mprsas_softc *sassc;
730 	cam_status status;
731 	int unit, error = 0, reqs;
732 
733 	MPR_FUNCTRACE(sc);
734 	mpr_dprint(sc, MPR_INIT, "%s entered\n", __func__);
735 
736 	sassc = malloc(sizeof(struct mprsas_softc), M_MPR, M_WAITOK|M_ZERO);
737 	if (!sassc) {
738 		mpr_dprint(sc, MPR_INIT|MPR_ERROR,
739 		    "Cannot allocate SAS subsystem memory\n");
740 		return (ENOMEM);
741 	}
742 
743 	/*
744 	 * XXX MaxTargets could change during a reinit.  Since we don't
745 	 * resize the targets[] array during such an event, cache the value
746 	 * of MaxTargets here so that we don't get into trouble later.  This
747 	 * should move into the reinit logic.
748 	 */
749 	sassc->maxtargets = sc->facts->MaxTargets + sc->facts->MaxVolumes;
750 	sassc->targets = malloc(sizeof(struct mprsas_target) *
751 	    sassc->maxtargets, M_MPR, M_WAITOK|M_ZERO);
752 	if (!sassc->targets) {
753 		mpr_dprint(sc, MPR_INIT|MPR_ERROR,
754 		    "Cannot allocate SAS target memory\n");
755 		free(sassc, M_MPR);
756 		return (ENOMEM);
757 	}
758 	sc->sassc = sassc;
759 	sassc->sc = sc;
760 
761 	reqs = sc->num_reqs - sc->num_prireqs - 1;
762 	if ((sassc->devq = cam_simq_alloc(reqs)) == NULL) {
763 		mpr_dprint(sc, MPR_INIT|MPR_ERROR, "Cannot allocate SIMQ\n");
764 		error = ENOMEM;
765 		goto out;
766 	}
767 
768 	unit = device_get_unit(sc->mpr_dev);
769 	sassc->sim = cam_sim_alloc(mprsas_action, mprsas_poll, "mpr", sassc,
770 	    unit, &sc->mpr_mtx, reqs, reqs, sassc->devq);
771 	if (sassc->sim == NULL) {
772 		mpr_dprint(sc, MPR_INIT|MPR_ERROR, "Cannot allocate SIM\n");
773 		error = EINVAL;
774 		goto out;
775 	}
776 
777 	TAILQ_INIT(&sassc->ev_queue);
778 
779 	/* Initialize taskqueue for Event Handling */
780 	TASK_INIT(&sassc->ev_task, 0, mprsas_firmware_event_work, sc);
781 	sassc->ev_tq = taskqueue_create("mpr_taskq", M_NOWAIT | M_ZERO,
782 	    taskqueue_thread_enqueue, &sassc->ev_tq);
783 	taskqueue_start_threads(&sassc->ev_tq, 1, PRIBIO, "%s taskq",
784 	    device_get_nameunit(sc->mpr_dev));
785 
786 	mpr_lock(sc);
787 
788 	/*
789 	 * XXX There should be a bus for every port on the adapter, but since
790 	 * we're just going to fake the topology for now, we'll pretend that
791 	 * everything is just a target on a single bus.
792 	 */
793 	if ((error = xpt_bus_register(sassc->sim, sc->mpr_dev, 0)) != 0) {
794 		mpr_dprint(sc, MPR_INIT|MPR_ERROR,
795 		    "Error %d registering SCSI bus\n", error);
796 		mpr_unlock(sc);
797 		goto out;
798 	}
799 
800 	/*
801 	 * Assume that discovery events will start right away.
802 	 *
803 	 * Hold off boot until discovery is complete.
804 	 */
805 	sassc->flags |= MPRSAS_IN_STARTUP | MPRSAS_IN_DISCOVERY;
806 	sc->sassc->startup_refcount = 0;
807 	mprsas_startup_increment(sassc);
808 
809 	callout_init(&sassc->discovery_callout, 1 /*mpsafe*/);
810 
811 	/*
812 	 * Register for async events so we can determine the EEDP
813 	 * capabilities of devices.
814 	 */
815 	status = xpt_create_path(&sassc->path, /*periph*/NULL,
816 	    cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD,
817 	    CAM_LUN_WILDCARD);
818 	if (status != CAM_REQ_CMP) {
819 		mpr_dprint(sc, MPR_INIT|MPR_ERROR,
820 		    "Error %#x creating sim path\n", status);
821 		sassc->path = NULL;
822 	} else {
823 		int event;
824 
825 #if (__FreeBSD_version >= 1000006) || \
826     ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
827 		event = AC_ADVINFO_CHANGED | AC_FOUND_DEVICE;
828 #else
829 		event = AC_FOUND_DEVICE;
830 #endif
831 
832 		/*
833 		 * Prior to the CAM locking improvements, we can't call
834 		 * xpt_register_async() with a particular path specified.
835 		 *
836 		 * If a path isn't specified, xpt_register_async() will
837 		 * generate a wildcard path and acquire the XPT lock while
838 		 * it calls xpt_action() to execute the XPT_SASYNC_CB CCB.
839 		 * It will then drop the XPT lock once that is done.
840 		 *
841 		 * If a path is specified for xpt_register_async(), it will
842 		 * not acquire and drop the XPT lock around the call to
843 		 * xpt_action().  xpt_action() asserts that the caller
844 		 * holds the SIM lock, so the SIM lock has to be held when
845 		 * calling xpt_register_async() when the path is specified.
846 		 *
847 		 * But xpt_register_async calls xpt_for_all_devices(),
848 		 * which calls xptbustraverse(), which will acquire each
849 		 * SIM lock.  When it traverses our particular bus, it will
850 		 * necessarily acquire the SIM lock, which will lead to a
851 		 * recursive lock acquisition.
852 		 *
853 		 * The CAM locking changes fix this problem by acquiring
854 		 * the XPT topology lock around bus traversal in
855 		 * xptbustraverse(), so the caller can hold the SIM lock
856 		 * and it does not cause a recursive lock acquisition.
857 		 *
858 		 * These __FreeBSD_version values are approximate, especially
859 		 * for stable/10, which is two months later than the actual
860 		 * change.
861 		 */
862 
863 #if (__FreeBSD_version < 1000703) || \
864     ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
865 		mpr_unlock(sc);
866 		status = xpt_register_async(event, mprsas_async, sc,
867 					    NULL);
868 		mpr_lock(sc);
869 #else
870 		status = xpt_register_async(event, mprsas_async, sc,
871 					    sassc->path);
872 #endif
873 
874 		if (status != CAM_REQ_CMP) {
875 			mpr_dprint(sc, MPR_ERROR,
876 			    "Error %#x registering async handler for "
877 			    "AC_ADVINFO_CHANGED events\n", status);
878 			xpt_free_path(sassc->path);
879 			sassc->path = NULL;
880 		}
881 	}
882 	if (status != CAM_REQ_CMP) {
883 		/*
884 		 * EEDP use is the exception, not the rule.
885 		 * Warn the user, but do not fail to attach.
886 		 */
887 		mpr_printf(sc, "EEDP capabilities disabled.\n");
888 	}
889 
890 	mpr_unlock(sc);
891 
892 	mprsas_register_events(sc);
893 out:
894 	if (error)
895 		mpr_detach_sas(sc);
896 
897 	mpr_dprint(sc, MPR_INIT, "%s exit, error= %d\n", __func__, error);
898 	return (error);
899 }
900 
901 int
902 mpr_detach_sas(struct mpr_softc *sc)
903 {
904 	struct mprsas_softc *sassc;
905 	struct mprsas_lun *lun, *lun_tmp;
906 	struct mprsas_target *targ;
907 	int i;
908 
909 	MPR_FUNCTRACE(sc);
910 
911 	if (sc->sassc == NULL)
912 		return (0);
913 
914 	sassc = sc->sassc;
915 	mpr_deregister_events(sc, sassc->mprsas_eh);
916 
917 	/*
918 	 * Drain and free the event handling taskqueue with the lock
919 	 * unheld so that any parallel processing tasks drain properly
920 	 * without deadlocking.
921 	 */
922 	if (sassc->ev_tq != NULL)
923 		taskqueue_free(sassc->ev_tq);
924 
925 	/* Make sure CAM doesn't wedge if we had to bail out early. */
926 	mpr_lock(sc);
927 
928 	while (sassc->startup_refcount != 0)
929 		mprsas_startup_decrement(sassc);
930 
931 	/* Deregister our async handler */
932 	if (sassc->path != NULL) {
933 		xpt_register_async(0, mprsas_async, sc, sassc->path);
934 		xpt_free_path(sassc->path);
935 		sassc->path = NULL;
936 	}
937 
938 	if (sassc->flags & MPRSAS_IN_STARTUP)
939 		xpt_release_simq(sassc->sim, 1);
940 
941 	if (sassc->sim != NULL) {
942 		xpt_bus_deregister(cam_sim_path(sassc->sim));
943 		cam_sim_free(sassc->sim, FALSE);
944 	}
945 
946 	mpr_unlock(sc);
947 
948 	if (sassc->devq != NULL)
949 		cam_simq_free(sassc->devq);
950 
951 	for (i = 0; i < sassc->maxtargets; i++) {
952 		targ = &sassc->targets[i];
953 		SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
954 			free(lun, M_MPR);
955 		}
956 	}
957 	free(sassc->targets, M_MPR);
958 	free(sassc, M_MPR);
959 	sc->sassc = NULL;
960 
961 	return (0);
962 }
963 
964 void
965 mprsas_discovery_end(struct mprsas_softc *sassc)
966 {
967 	struct mpr_softc *sc = sassc->sc;
968 
969 	MPR_FUNCTRACE(sc);
970 
971 	if (sassc->flags & MPRSAS_DISCOVERY_TIMEOUT_PENDING)
972 		callout_stop(&sassc->discovery_callout);
973 
974 	/*
975 	 * After discovery has completed, check the mapping table for any
976 	 * missing devices and update their missing counts. Only do this once
977 	 * whenever the driver is initialized so that missing counts aren't
978 	 * updated unnecessarily. Note that just because discovery has
979 	 * completed doesn't mean that events have been processed yet. The
980 	 * check_devices function is a callout timer that checks if ALL devices
981 	 * are missing. If so, it will wait a little longer for events to
982 	 * complete and keep resetting itself until some device in the mapping
983 	 * table is not missing, meaning that event processing has started.
984 	 */
985 	if (sc->track_mapping_events) {
986 		mpr_dprint(sc, MPR_XINFO | MPR_MAPPING, "Discovery has "
987 		    "completed. Check for missing devices in the mapping "
988 		    "table.\n");
989 		callout_reset(&sc->device_check_callout,
990 		    MPR_MISSING_CHECK_DELAY * hz, mpr_mapping_check_devices,
991 		    sc);
992 	}
993 }
994 
995 static void
996 mprsas_action(struct cam_sim *sim, union ccb *ccb)
997 {
998 	struct mprsas_softc *sassc;
999 
1000 	sassc = cam_sim_softc(sim);
1001 
1002 	MPR_FUNCTRACE(sassc->sc);
1003 	mpr_dprint(sassc->sc, MPR_TRACE, "ccb func_code 0x%x\n",
1004 	    ccb->ccb_h.func_code);
1005 	mtx_assert(&sassc->sc->mpr_mtx, MA_OWNED);
1006 
1007 	switch (ccb->ccb_h.func_code) {
1008 	case XPT_PATH_INQ:
1009 	{
1010 		struct ccb_pathinq *cpi = &ccb->cpi;
1011 		struct mpr_softc *sc = sassc->sc;
1012 
1013 		cpi->version_num = 1;
1014 		cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
1015 		cpi->target_sprt = 0;
1016 #if (__FreeBSD_version >= 1000039) || \
1017     ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
1018 		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN;
1019 #else
1020 		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
1021 #endif
1022 		cpi->hba_eng_cnt = 0;
1023 		cpi->max_target = sassc->maxtargets - 1;
1024 		cpi->max_lun = 255;
1025 
1026 		/*
1027 		 * initiator_id is set here to an ID outside the set of valid
1028 		 * target IDs (including volumes).
1029 		 */
1030 		cpi->initiator_id = sassc->maxtargets;
1031 		strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
1032 		strlcpy(cpi->hba_vid, "Avago Tech", HBA_IDLEN);
1033 		strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
1034 		cpi->unit_number = cam_sim_unit(sim);
1035 		cpi->bus_id = cam_sim_bus(sim);
1036 		/*
1037 		 * XXXSLM-I think this needs to change based on config page or
1038 		 * something instead of hardcoded to 150000.
1039 		 */
1040 		cpi->base_transfer_speed = 150000;
1041 		cpi->transport = XPORT_SAS;
1042 		cpi->transport_version = 0;
1043 		cpi->protocol = PROTO_SCSI;
1044 		cpi->protocol_version = SCSI_REV_SPC;
1045 		cpi->maxio = sc->maxio;
1046 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1047 		break;
1048 	}
1049 	case XPT_GET_TRAN_SETTINGS:
1050 	{
1051 		struct ccb_trans_settings	*cts;
1052 		struct ccb_trans_settings_sas	*sas;
1053 		struct ccb_trans_settings_scsi	*scsi;
1054 		struct mprsas_target *targ;
1055 
1056 		cts = &ccb->cts;
1057 		sas = &cts->xport_specific.sas;
1058 		scsi = &cts->proto_specific.scsi;
1059 
1060 		KASSERT(cts->ccb_h.target_id < sassc->maxtargets,
1061 		    ("Target %d out of bounds in XPT_GET_TRAN_SETTINGS\n",
1062 		    cts->ccb_h.target_id));
1063 		targ = &sassc->targets[cts->ccb_h.target_id];
1064 		if (targ->handle == 0x0) {
1065 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1066 			break;
1067 		}
1068 
1069 		cts->protocol_version = SCSI_REV_SPC2;
1070 		cts->transport = XPORT_SAS;
1071 		cts->transport_version = 0;
1072 
1073 		sas->valid = CTS_SAS_VALID_SPEED;
1074 		switch (targ->linkrate) {
1075 		case 0x08:
1076 			sas->bitrate = 150000;
1077 			break;
1078 		case 0x09:
1079 			sas->bitrate = 300000;
1080 			break;
1081 		case 0x0a:
1082 			sas->bitrate = 600000;
1083 			break;
1084 		case 0x0b:
1085 			sas->bitrate = 1200000;
1086 			break;
1087 		default:
1088 			sas->valid = 0;
1089 		}
1090 
1091 		cts->protocol = PROTO_SCSI;
1092 		scsi->valid = CTS_SCSI_VALID_TQ;
1093 		scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
1094 
1095 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1096 		break;
1097 	}
1098 	case XPT_CALC_GEOMETRY:
1099 		cam_calc_geometry(&ccb->ccg, /*extended*/1);
1100 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1101 		break;
1102 	case XPT_RESET_DEV:
1103 		mpr_dprint(sassc->sc, MPR_XINFO, "mprsas_action "
1104 		    "XPT_RESET_DEV\n");
1105 		mprsas_action_resetdev(sassc, ccb);
1106 		return;
1107 	case XPT_RESET_BUS:
1108 	case XPT_ABORT:
1109 	case XPT_TERM_IO:
1110 		mpr_dprint(sassc->sc, MPR_XINFO, "mprsas_action faking success "
1111 		    "for abort or reset\n");
1112 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1113 		break;
1114 	case XPT_SCSI_IO:
1115 		mprsas_action_scsiio(sassc, ccb);
1116 		return;
1117 #if __FreeBSD_version >= 900026
1118 	case XPT_SMP_IO:
1119 		mprsas_action_smpio(sassc, ccb);
1120 		return;
1121 #endif
1122 	default:
1123 		mprsas_set_ccbstatus(ccb, CAM_FUNC_NOTAVAIL);
1124 		break;
1125 	}
1126 	xpt_done(ccb);
1127 
1128 }
1129 
1130 static void
1131 mprsas_announce_reset(struct mpr_softc *sc, uint32_t ac_code,
1132     target_id_t target_id, lun_id_t lun_id)
1133 {
1134 	path_id_t path_id = cam_sim_path(sc->sassc->sim);
1135 	struct cam_path *path;
1136 
1137 	mpr_dprint(sc, MPR_XINFO, "%s code %x target %d lun %jx\n", __func__,
1138 	    ac_code, target_id, (uintmax_t)lun_id);
1139 
1140 	if (xpt_create_path(&path, NULL,
1141 		path_id, target_id, lun_id) != CAM_REQ_CMP) {
1142 		mpr_dprint(sc, MPR_ERROR, "unable to create path for reset "
1143 		    "notification\n");
1144 		return;
1145 	}
1146 
1147 	xpt_async(ac_code, path, NULL);
1148 	xpt_free_path(path);
1149 }
1150 
1151 static void
1152 mprsas_complete_all_commands(struct mpr_softc *sc)
1153 {
1154 	struct mpr_command *cm;
1155 	int i;
1156 	int completed;
1157 
1158 	MPR_FUNCTRACE(sc);
1159 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1160 
1161 	/* complete all commands with a NULL reply */
1162 	for (i = 1; i < sc->num_reqs; i++) {
1163 		cm = &sc->commands[i];
1164 		if (cm->cm_state == MPR_CM_STATE_FREE)
1165 			continue;
1166 
1167 		cm->cm_state = MPR_CM_STATE_BUSY;
1168 		cm->cm_reply = NULL;
1169 		completed = 0;
1170 
1171 		if (cm->cm_flags & MPR_CM_FLAGS_POLLED)
1172 			cm->cm_flags |= MPR_CM_FLAGS_COMPLETE;
1173 
1174 		if (cm->cm_complete != NULL) {
1175 			mprsas_log_command(cm, MPR_RECOVERY,
1176 			    "completing cm %p state %x ccb %p for diag reset\n",
1177 			    cm, cm->cm_state, cm->cm_ccb);
1178 			cm->cm_complete(sc, cm);
1179 			completed = 1;
1180 		} else if (cm->cm_flags & MPR_CM_FLAGS_WAKEUP) {
1181 			mprsas_log_command(cm, MPR_RECOVERY,
1182 			    "waking up cm %p state %x ccb %p for diag reset\n",
1183 			    cm, cm->cm_state, cm->cm_ccb);
1184 			wakeup(cm);
1185 			completed = 1;
1186 		}
1187 
1188 		if ((completed == 0) && (cm->cm_state != MPR_CM_STATE_FREE)) {
1189 			/* this should never happen, but if it does, log */
1190 			mprsas_log_command(cm, MPR_RECOVERY,
1191 			    "cm %p state %x flags 0x%x ccb %p during diag "
1192 			    "reset\n", cm, cm->cm_state, cm->cm_flags,
1193 			    cm->cm_ccb);
1194 		}
1195 	}
1196 
1197 	sc->io_cmds_active = 0;
1198 }
1199 
1200 void
1201 mprsas_handle_reinit(struct mpr_softc *sc)
1202 {
1203 	int i;
1204 
1205 	/* Go back into startup mode and freeze the simq, so that CAM
1206 	 * doesn't send any commands until after we've rediscovered all
1207 	 * targets and found the proper device handles for them.
1208 	 *
1209 	 * After the reset, portenable will trigger discovery, and after all
1210 	 * discovery-related activities have finished, the simq will be
1211 	 * released.
1212 	 */
1213 	mpr_dprint(sc, MPR_INIT, "%s startup\n", __func__);
1214 	sc->sassc->flags |= MPRSAS_IN_STARTUP;
1215 	sc->sassc->flags |= MPRSAS_IN_DISCOVERY;
1216 	mprsas_startup_increment(sc->sassc);
1217 
1218 	/* notify CAM of a bus reset */
1219 	mprsas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1220 	    CAM_LUN_WILDCARD);
1221 
1222 	/* complete and cleanup after all outstanding commands */
1223 	mprsas_complete_all_commands(sc);
1224 
1225 	mpr_dprint(sc, MPR_INIT, "%s startup %u after command completion\n",
1226 	    __func__, sc->sassc->startup_refcount);
1227 
1228 	/* zero all the target handles, since they may change after the
1229 	 * reset, and we have to rediscover all the targets and use the new
1230 	 * handles.
1231 	 */
1232 	for (i = 0; i < sc->sassc->maxtargets; i++) {
1233 		if (sc->sassc->targets[i].outstanding != 0)
1234 			mpr_dprint(sc, MPR_INIT, "target %u outstanding %u\n",
1235 			    i, sc->sassc->targets[i].outstanding);
1236 		sc->sassc->targets[i].handle = 0x0;
1237 		sc->sassc->targets[i].exp_dev_handle = 0x0;
1238 		sc->sassc->targets[i].outstanding = 0;
1239 		sc->sassc->targets[i].flags = MPRSAS_TARGET_INDIAGRESET;
1240 	}
1241 }
1242 static void
1243 mprsas_tm_timeout(void *data)
1244 {
1245 	struct mpr_command *tm = data;
1246 	struct mpr_softc *sc = tm->cm_sc;
1247 
1248 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1249 
1250 	mprsas_log_command(tm, MPR_INFO|MPR_RECOVERY, "task mgmt %p timed "
1251 	    "out\n", tm);
1252 
1253 	KASSERT(tm->cm_state == MPR_CM_STATE_INQUEUE,
1254 	    ("command not inqueue\n"));
1255 
1256 	tm->cm_state = MPR_CM_STATE_BUSY;
1257 	mpr_reinit(sc);
1258 }
1259 
1260 static void
1261 mprsas_logical_unit_reset_complete(struct mpr_softc *sc, struct mpr_command *tm)
1262 {
1263 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1264 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1265 	unsigned int cm_count = 0;
1266 	struct mpr_command *cm;
1267 	struct mprsas_target *targ;
1268 
1269 	callout_stop(&tm->cm_callout);
1270 
1271 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1272 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1273 	targ = tm->cm_targ;
1274 
1275 	/*
1276 	 * Currently there should be no way we can hit this case.  It only
1277 	 * happens when we have a failure to allocate chain frames, and
1278 	 * task management commands don't have S/G lists.
1279 	 */
1280 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1281 		mpr_dprint(sc, MPR_RECOVERY|MPR_ERROR,
1282 		    "%s: cm_flags = %#x for LUN reset! "
1283 		    "This should not happen!\n", __func__, tm->cm_flags);
1284 		mprsas_free_tm(sc, tm);
1285 		return;
1286 	}
1287 
1288 	if (reply == NULL) {
1289 		mpr_dprint(sc, MPR_RECOVERY, "NULL reset reply for tm %p\n",
1290 		    tm);
1291 		if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1292 			/* this completion was due to a reset, just cleanup */
1293 			mpr_dprint(sc, MPR_RECOVERY, "Hardware undergoing "
1294 			    "reset, ignoring NULL LUN reset reply\n");
1295 			targ->tm = NULL;
1296 			mprsas_free_tm(sc, tm);
1297 		}
1298 		else {
1299 			/* we should have gotten a reply. */
1300 			mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, "NULL reply on "
1301 			    "LUN reset attempt, resetting controller\n");
1302 			mpr_reinit(sc);
1303 		}
1304 		return;
1305 	}
1306 
1307 	mpr_dprint(sc, MPR_RECOVERY,
1308 	    "logical unit reset status 0x%x code 0x%x count %u\n",
1309 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1310 	    le32toh(reply->TerminationCount));
1311 
1312 	/*
1313 	 * See if there are any outstanding commands for this LUN.
1314 	 * This could be made more efficient by using a per-LU data
1315 	 * structure of some sort.
1316 	 */
1317 	TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1318 		if (cm->cm_lun == tm->cm_lun)
1319 			cm_count++;
1320 	}
1321 
1322 	if (cm_count == 0) {
1323 		mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1324 		    "Finished recovery after LUN reset for target %u\n",
1325 		    targ->tid);
1326 
1327 		mprsas_announce_reset(sc, AC_SENT_BDR, targ->tid,
1328 		    tm->cm_lun);
1329 
1330 		/*
1331 		 * We've finished recovery for this logical unit.  check and
1332 		 * see if some other logical unit has a timedout command
1333 		 * that needs to be processed.
1334 		 */
1335 		cm = TAILQ_FIRST(&targ->timedout_commands);
1336 		if (cm) {
1337 			mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1338 			   "More commands to abort for target %u\n", targ->tid);
1339 			mprsas_send_abort(sc, tm, cm);
1340 		} else {
1341 			targ->tm = NULL;
1342 			mprsas_free_tm(sc, tm);
1343 		}
1344 	} else {
1345 		/* if we still have commands for this LUN, the reset
1346 		 * effectively failed, regardless of the status reported.
1347 		 * Escalate to a target reset.
1348 		 */
1349 		mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1350 		    "logical unit reset complete for target %u, but still "
1351 		    "have %u command(s), sending target reset\n", targ->tid,
1352 		    cm_count);
1353 		mprsas_send_reset(sc, tm,
1354 		    MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1355 	}
1356 }
1357 
1358 static void
1359 mprsas_target_reset_complete(struct mpr_softc *sc, struct mpr_command *tm)
1360 {
1361 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1362 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1363 	struct mprsas_target *targ;
1364 
1365 	callout_stop(&tm->cm_callout);
1366 
1367 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1368 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1369 	targ = tm->cm_targ;
1370 
1371 	/*
1372 	 * Currently there should be no way we can hit this case.  It only
1373 	 * happens when we have a failure to allocate chain frames, and
1374 	 * task management commands don't have S/G lists.
1375 	 */
1376 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1377 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for target "
1378 		    "reset! This should not happen!\n", __func__, tm->cm_flags);
1379 		mprsas_free_tm(sc, tm);
1380 		return;
1381 	}
1382 
1383 	if (reply == NULL) {
1384 		mpr_dprint(sc, MPR_RECOVERY,
1385 		    "NULL target reset reply for tm %p TaskMID %u\n",
1386 		    tm, le16toh(req->TaskMID));
1387 		if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1388 			/* this completion was due to a reset, just cleanup */
1389 			mpr_dprint(sc, MPR_RECOVERY, "Hardware undergoing "
1390 			    "reset, ignoring NULL target reset reply\n");
1391 			targ->tm = NULL;
1392 			mprsas_free_tm(sc, tm);
1393 		}
1394 		else {
1395 			/* we should have gotten a reply. */
1396 			mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, "NULL reply on "
1397 			    "target reset attempt, resetting controller\n");
1398 			mpr_reinit(sc);
1399 		}
1400 		return;
1401 	}
1402 
1403 	mpr_dprint(sc, MPR_RECOVERY,
1404 	    "target reset status 0x%x code 0x%x count %u\n",
1405 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1406 	    le32toh(reply->TerminationCount));
1407 
1408 	if (targ->outstanding == 0) {
1409 		/*
1410 		 * We've finished recovery for this target and all
1411 		 * of its logical units.
1412 		 */
1413 		mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1414 		    "Finished reset recovery for target %u\n", targ->tid);
1415 
1416 		mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1417 		    CAM_LUN_WILDCARD);
1418 
1419 		targ->tm = NULL;
1420 		mprsas_free_tm(sc, tm);
1421 	} else {
1422 		/*
1423 		 * After a target reset, if this target still has
1424 		 * outstanding commands, the reset effectively failed,
1425 		 * regardless of the status reported.  escalate.
1426 		 */
1427 		mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1428 		    "Target reset complete for target %u, but still have %u "
1429 		    "command(s), resetting controller\n", targ->tid,
1430 		    targ->outstanding);
1431 		mpr_reinit(sc);
1432 	}
1433 }
1434 
1435 #define MPR_RESET_TIMEOUT 30
1436 
1437 int
1438 mprsas_send_reset(struct mpr_softc *sc, struct mpr_command *tm, uint8_t type)
1439 {
1440 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1441 	struct mprsas_target *target;
1442 	int err;
1443 
1444 	target = tm->cm_targ;
1445 	if (target->handle == 0) {
1446 		mpr_dprint(sc, MPR_ERROR, "%s null devhandle for target_id "
1447 		    "%d\n", __func__, target->tid);
1448 		return -1;
1449 	}
1450 
1451 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1452 	req->DevHandle = htole16(target->handle);
1453 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1454 	req->TaskType = type;
1455 
1456 	if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1457 		/* XXX Need to handle invalid LUNs */
1458 		MPR_SET_LUN(req->LUN, tm->cm_lun);
1459 		tm->cm_targ->logical_unit_resets++;
1460 		mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1461 		    "Sending logical unit reset to target %u lun %d\n",
1462 		    target->tid, tm->cm_lun);
1463 		tm->cm_complete = mprsas_logical_unit_reset_complete;
1464 		mprsas_prepare_for_tm(sc, tm, target, tm->cm_lun);
1465 	} else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1466 		/*
1467 		 * Target reset method =
1468 		 *     SAS Hard Link Reset / SATA Link Reset
1469 		 */
1470 		req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1471 		tm->cm_targ->target_resets++;
1472 		mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1473 		    "Sending target reset to target %u\n", target->tid);
1474 		tm->cm_complete = mprsas_target_reset_complete;
1475 		mprsas_prepare_for_tm(sc, tm, target, CAM_LUN_WILDCARD);
1476 	}
1477 	else {
1478 		mpr_dprint(sc, MPR_ERROR, "unexpected reset type 0x%x\n", type);
1479 		return -1;
1480 	}
1481 
1482 	if (target->encl_level_valid) {
1483 		mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1484 		    "At enclosure level %d, slot %d, connector name (%4s)\n",
1485 		    target->encl_level, target->encl_slot,
1486 		    target->connector_name);
1487 	}
1488 
1489 	tm->cm_data = NULL;
1490 	tm->cm_desc.HighPriority.RequestFlags =
1491 	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1492 	tm->cm_complete_data = (void *)tm;
1493 
1494 	callout_reset(&tm->cm_callout, MPR_RESET_TIMEOUT * hz,
1495 	    mprsas_tm_timeout, tm);
1496 
1497 	err = mpr_map_command(sc, tm);
1498 	if (err)
1499 		mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1500 		    "error %d sending reset type %u\n", err, type);
1501 
1502 	return err;
1503 }
1504 
1505 
1506 static void
1507 mprsas_abort_complete(struct mpr_softc *sc, struct mpr_command *tm)
1508 {
1509 	struct mpr_command *cm;
1510 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1511 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1512 	struct mprsas_target *targ;
1513 
1514 	callout_stop(&tm->cm_callout);
1515 
1516 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1517 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1518 	targ = tm->cm_targ;
1519 
1520 	/*
1521 	 * Currently there should be no way we can hit this case.  It only
1522 	 * happens when we have a failure to allocate chain frames, and
1523 	 * task management commands don't have S/G lists.
1524 	 */
1525 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1526 		mpr_dprint(sc, MPR_RECOVERY|MPR_ERROR,
1527 		    "cm_flags = %#x for abort %p TaskMID %u!\n",
1528 		    tm->cm_flags, tm, le16toh(req->TaskMID));
1529 		mprsas_free_tm(sc, tm);
1530 		return;
1531 	}
1532 
1533 	if (reply == NULL) {
1534 		mpr_dprint(sc, MPR_RECOVERY,
1535 		    "NULL abort reply for tm %p TaskMID %u\n",
1536 		    tm, le16toh(req->TaskMID));
1537 		if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1538 			/* this completion was due to a reset, just cleanup */
1539 			mpr_dprint(sc, MPR_RECOVERY, "Hardware undergoing "
1540 			    "reset, ignoring NULL abort reply\n");
1541 			targ->tm = NULL;
1542 			mprsas_free_tm(sc, tm);
1543 		} else {
1544 			/* we should have gotten a reply. */
1545 			mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, "NULL reply on "
1546 			    "abort attempt, resetting controller\n");
1547 			mpr_reinit(sc);
1548 		}
1549 		return;
1550 	}
1551 
1552 	mpr_dprint(sc, MPR_RECOVERY,
1553 	    "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1554 	    le16toh(req->TaskMID),
1555 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1556 	    le32toh(reply->TerminationCount));
1557 
1558 	cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1559 	if (cm == NULL) {
1560 		/*
1561 		 * if there are no more timedout commands, we're done with
1562 		 * error recovery for this target.
1563 		 */
1564 		mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1565 		    "Finished abort recovery for target %u\n", targ->tid);
1566 		targ->tm = NULL;
1567 		mprsas_free_tm(sc, tm);
1568 	} else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) {
1569 		/* abort success, but we have more timedout commands to abort */
1570 		mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1571 		    "Continuing abort recovery for target %u\n", targ->tid);
1572 		mprsas_send_abort(sc, tm, cm);
1573 	} else {
1574 		/*
1575 		 * we didn't get a command completion, so the abort
1576 		 * failed as far as we're concerned.  escalate.
1577 		 */
1578 		mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1579 		    "Abort failed for target %u, sending logical unit reset\n",
1580 		    targ->tid);
1581 
1582 		mprsas_send_reset(sc, tm,
1583 		    MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1584 	}
1585 }
1586 
1587 #define MPR_ABORT_TIMEOUT 5
1588 
1589 static int
1590 mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm,
1591     struct mpr_command *cm)
1592 {
1593 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1594 	struct mprsas_target *targ;
1595 	int err;
1596 
1597 	targ = cm->cm_targ;
1598 	if (targ->handle == 0) {
1599 		mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1600 		   "%s null devhandle for target_id %d\n",
1601 		    __func__, cm->cm_ccb->ccb_h.target_id);
1602 		return -1;
1603 	}
1604 
1605 	mprsas_log_command(cm, MPR_RECOVERY|MPR_INFO,
1606 	    "Aborting command %p\n", cm);
1607 
1608 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1609 	req->DevHandle = htole16(targ->handle);
1610 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1611 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1612 
1613 	/* XXX Need to handle invalid LUNs */
1614 	MPR_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1615 
1616 	req->TaskMID = htole16(cm->cm_desc.Default.SMID);
1617 
1618 	tm->cm_data = NULL;
1619 	tm->cm_desc.HighPriority.RequestFlags =
1620 	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1621 	tm->cm_complete = mprsas_abort_complete;
1622 	tm->cm_complete_data = (void *)tm;
1623 	tm->cm_targ = cm->cm_targ;
1624 	tm->cm_lun = cm->cm_lun;
1625 
1626 	callout_reset(&tm->cm_callout, MPR_ABORT_TIMEOUT * hz,
1627 	    mprsas_tm_timeout, tm);
1628 
1629 	targ->aborts++;
1630 
1631 	mprsas_prepare_for_tm(sc, tm, targ, tm->cm_lun);
1632 
1633 	err = mpr_map_command(sc, tm);
1634 	if (err)
1635 		mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1636 		    "error %d sending abort for cm %p SMID %u\n",
1637 		    err, cm, req->TaskMID);
1638 	return err;
1639 }
1640 
1641 static void
1642 mprsas_scsiio_timeout(void *data)
1643 {
1644 	sbintime_t elapsed, now;
1645 	union ccb *ccb;
1646 	struct mpr_softc *sc;
1647 	struct mpr_command *cm;
1648 	struct mprsas_target *targ;
1649 
1650 	cm = (struct mpr_command *)data;
1651 	sc = cm->cm_sc;
1652 	ccb = cm->cm_ccb;
1653 	now = sbinuptime();
1654 
1655 	MPR_FUNCTRACE(sc);
1656 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1657 
1658 	mpr_dprint(sc, MPR_XINFO|MPR_RECOVERY, "Timeout checking cm %p\n", cm);
1659 
1660 	/*
1661 	 * Run the interrupt handler to make sure it's not pending.  This
1662 	 * isn't perfect because the command could have already completed
1663 	 * and been re-used, though this is unlikely.
1664 	 */
1665 	mpr_intr_locked(sc);
1666 	if (cm->cm_state != MPR_CM_STATE_INQUEUE) {
1667 		mprsas_log_command(cm, MPR_XINFO,
1668 		    "SCSI command %p almost timed out\n", cm);
1669 		return;
1670 	}
1671 
1672 	if (cm->cm_ccb == NULL) {
1673 		mpr_dprint(sc, MPR_ERROR, "command timeout with NULL ccb\n");
1674 		return;
1675 	}
1676 
1677 	targ = cm->cm_targ;
1678 	targ->timeouts++;
1679 
1680 	elapsed = now - ccb->ccb_h.qos.sim_data;
1681 	mprsas_log_command(cm, MPR_INFO|MPR_RECOVERY,
1682 	    "Command timeout on target %u(0x%04x), %d set, %d.%d elapsed\n",
1683 	    targ->tid, targ->handle, ccb->ccb_h.timeout,
1684 	    sbintime_getsec(elapsed), elapsed & 0xffffffff);
1685 	if (targ->encl_level_valid) {
1686 		mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1687 		    "At enclosure level %d, slot %d, connector name (%4s)\n",
1688 		    targ->encl_level, targ->encl_slot, targ->connector_name);
1689 	}
1690 
1691 	/* XXX first, check the firmware state, to see if it's still
1692 	 * operational.  if not, do a diag reset.
1693 	 */
1694 	mprsas_set_ccbstatus(cm->cm_ccb, CAM_CMD_TIMEOUT);
1695 	cm->cm_state = MPR_CM_STATE_TIMEDOUT;
1696 	TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1697 
1698 	if (targ->tm != NULL) {
1699 		/* target already in recovery, just queue up another
1700 		 * timedout command to be processed later.
1701 		 */
1702 		mpr_dprint(sc, MPR_RECOVERY, "queued timedout cm %p for "
1703 		    "processing by tm %p\n", cm, targ->tm);
1704 	}
1705 	else if ((targ->tm = mprsas_alloc_tm(sc)) != NULL) {
1706 
1707 		/* start recovery by aborting the first timedout command */
1708 		mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1709 		    "Sending abort to target %u for SMID %d\n", targ->tid,
1710 		    cm->cm_desc.Default.SMID);
1711 		mpr_dprint(sc, MPR_RECOVERY, "timedout cm %p allocated tm %p\n",
1712 		    cm, targ->tm);
1713 		mprsas_send_abort(sc, targ->tm, cm);
1714 	}
1715 	else {
1716 		/* XXX queue this target up for recovery once a TM becomes
1717 		 * available.  The firmware only has a limited number of
1718 		 * HighPriority credits for the high priority requests used
1719 		 * for task management, and we ran out.
1720 		 *
1721 		 * Isilon: don't worry about this for now, since we have
1722 		 * more credits than disks in an enclosure, and limit
1723 		 * ourselves to one TM per target for recovery.
1724 		 */
1725 		mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1726 		    "timedout cm %p failed to allocate a tm\n", cm);
1727 	}
1728 }
1729 
1730 /**
1731  * mprsas_build_nvme_unmap - Build Native NVMe DSM command equivalent
1732  *			     to SCSI Unmap.
1733  * Return 0 - for success,
1734  *	  1 - to immediately return back the command with success status to CAM
1735  *	  negative value - to fallback to firmware path i.e. issue scsi unmap
1736  *			   to FW without any translation.
1737  */
1738 static int
1739 mprsas_build_nvme_unmap(struct mpr_softc *sc, struct mpr_command *cm,
1740     union ccb *ccb, struct mprsas_target *targ)
1741 {
1742 	Mpi26NVMeEncapsulatedRequest_t *req = NULL;
1743 	struct ccb_scsiio *csio;
1744 	struct unmap_parm_list *plist;
1745 	struct nvme_dsm_range *nvme_dsm_ranges = NULL;
1746 	struct nvme_command *c;
1747 	int i, res;
1748 	uint16_t ndesc, list_len, data_length;
1749 	struct mpr_prp_page *prp_page_info;
1750 	uint64_t nvme_dsm_ranges_dma_handle;
1751 
1752 	csio = &ccb->csio;
1753 #if __FreeBSD_version >= 1100103
1754 	list_len = (scsiio_cdb_ptr(csio)[7] << 8 | scsiio_cdb_ptr(csio)[8]);
1755 #else
1756 	if (csio->ccb_h.flags & CAM_CDB_POINTER) {
1757 		list_len = (ccb->csio.cdb_io.cdb_ptr[7] << 8 |
1758 		    ccb->csio.cdb_io.cdb_ptr[8]);
1759 	} else {
1760 		list_len = (ccb->csio.cdb_io.cdb_bytes[7] << 8 |
1761 		    ccb->csio.cdb_io.cdb_bytes[8]);
1762 	}
1763 #endif
1764 	if (!list_len) {
1765 		mpr_dprint(sc, MPR_ERROR, "Parameter list length is Zero\n");
1766 		return -EINVAL;
1767 	}
1768 
1769 	plist = malloc(csio->dxfer_len, M_MPR, M_ZERO|M_NOWAIT);
1770 	if (!plist) {
1771 		mpr_dprint(sc, MPR_ERROR, "Unable to allocate memory to "
1772 		    "save UNMAP data\n");
1773 		return -ENOMEM;
1774 	}
1775 
1776 	/* Copy SCSI unmap data to a local buffer */
1777 	bcopy(csio->data_ptr, plist, csio->dxfer_len);
1778 
1779 	/* return back the unmap command to CAM with success status,
1780 	 * if number of descripts is zero.
1781 	 */
1782 	ndesc = be16toh(plist->unmap_blk_desc_data_len) >> 4;
1783 	if (!ndesc) {
1784 		mpr_dprint(sc, MPR_XINFO, "Number of descriptors in "
1785 		    "UNMAP cmd is Zero\n");
1786 		res = 1;
1787 		goto out;
1788 	}
1789 
1790 	data_length = ndesc * sizeof(struct nvme_dsm_range);
1791 	if (data_length > targ->MDTS) {
1792 		mpr_dprint(sc, MPR_ERROR, "data length: %d is greater than "
1793 		    "Device's MDTS: %d\n", data_length, targ->MDTS);
1794 		res = -EINVAL;
1795 		goto out;
1796 	}
1797 
1798 	prp_page_info = mpr_alloc_prp_page(sc);
1799 	KASSERT(prp_page_info != NULL, ("%s: There is no PRP Page for "
1800 	    "UNMAP command.\n", __func__));
1801 
1802 	/*
1803 	 * Insert the allocated PRP page into the command's PRP page list. This
1804 	 * will be freed when the command is freed.
1805 	 */
1806 	TAILQ_INSERT_TAIL(&cm->cm_prp_page_list, prp_page_info, prp_page_link);
1807 
1808 	nvme_dsm_ranges = (struct nvme_dsm_range *)prp_page_info->prp_page;
1809 	nvme_dsm_ranges_dma_handle = prp_page_info->prp_page_busaddr;
1810 
1811 	bzero(nvme_dsm_ranges, data_length);
1812 
1813 	/* Convert SCSI unmap's descriptor data to NVMe DSM specific Range data
1814 	 * for each descriptors contained in SCSI UNMAP data.
1815 	 */
1816 	for (i = 0; i < ndesc; i++) {
1817 		nvme_dsm_ranges[i].length =
1818 		    htole32(be32toh(plist->desc[i].nlb));
1819 		nvme_dsm_ranges[i].starting_lba =
1820 		    htole64(be64toh(plist->desc[i].slba));
1821 		nvme_dsm_ranges[i].attributes = 0;
1822 	}
1823 
1824 	/* Build MPI2.6's NVMe Encapsulated Request Message */
1825 	req = (Mpi26NVMeEncapsulatedRequest_t *)cm->cm_req;
1826 	bzero(req, sizeof(*req));
1827 	req->DevHandle = htole16(targ->handle);
1828 	req->Function = MPI2_FUNCTION_NVME_ENCAPSULATED;
1829 	req->Flags = MPI26_NVME_FLAGS_WRITE;
1830 	req->ErrorResponseBaseAddress.High =
1831 	    htole32((uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32));
1832 	req->ErrorResponseBaseAddress.Low =
1833 	    htole32(cm->cm_sense_busaddr);
1834 	req->ErrorResponseAllocationLength =
1835 	    htole16(sizeof(struct nvme_completion));
1836 	req->EncapsulatedCommandLength =
1837 	    htole16(sizeof(struct nvme_command));
1838 	req->DataLength = htole32(data_length);
1839 
1840 	/* Build NVMe DSM command */
1841 	c = (struct nvme_command *) req->NVMe_Command;
1842 	c->opc_fuse = NVME_CMD_SET_OPC(NVME_OPC_DATASET_MANAGEMENT);
1843 	c->nsid = htole32(csio->ccb_h.target_lun + 1);
1844 	c->cdw10 = htole32(ndesc - 1);
1845 	c->cdw11 = htole32(NVME_DSM_ATTR_DEALLOCATE);
1846 
1847 	cm->cm_length = data_length;
1848 	cm->cm_data = NULL;
1849 
1850 	cm->cm_complete = mprsas_scsiio_complete;
1851 	cm->cm_complete_data = ccb;
1852 	cm->cm_targ = targ;
1853 	cm->cm_lun = csio->ccb_h.target_lun;
1854 	cm->cm_ccb = ccb;
1855 
1856 	cm->cm_desc.Default.RequestFlags =
1857 	    MPI26_REQ_DESCRIPT_FLAGS_PCIE_ENCAPSULATED;
1858 
1859 	csio->ccb_h.qos.sim_data = sbinuptime();
1860 #if __FreeBSD_version >= 1000029
1861 	callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
1862 	    mprsas_scsiio_timeout, cm, 0);
1863 #else //__FreeBSD_version < 1000029
1864 	callout_reset(&cm->cm_callout, (ccb->ccb_h.timeout * hz) / 1000,
1865 	    mprsas_scsiio_timeout, cm);
1866 #endif //__FreeBSD_version >= 1000029
1867 
1868 	targ->issued++;
1869 	targ->outstanding++;
1870 	TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1871 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
1872 
1873 	mprsas_log_command(cm, MPR_XINFO, "%s cm %p ccb %p outstanding %u\n",
1874 	    __func__, cm, ccb, targ->outstanding);
1875 
1876 	mpr_build_nvme_prp(sc, cm, req,
1877 	    (void *)(uintptr_t)nvme_dsm_ranges_dma_handle, 0, data_length);
1878 	mpr_map_command(sc, cm);
1879 
1880 out:
1881 	free(plist, M_MPR);
1882 	return 0;
1883 }
1884 
1885 static void
1886 mprsas_action_scsiio(struct mprsas_softc *sassc, union ccb *ccb)
1887 {
1888 	MPI2_SCSI_IO_REQUEST *req;
1889 	struct ccb_scsiio *csio;
1890 	struct mpr_softc *sc;
1891 	struct mprsas_target *targ;
1892 	struct mprsas_lun *lun;
1893 	struct mpr_command *cm;
1894 	uint8_t i, lba_byte, *ref_tag_addr, scsi_opcode;
1895 	uint16_t eedp_flags;
1896 	uint32_t mpi_control;
1897 	int rc;
1898 
1899 	sc = sassc->sc;
1900 	MPR_FUNCTRACE(sc);
1901 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1902 
1903 	csio = &ccb->csio;
1904 	KASSERT(csio->ccb_h.target_id < sassc->maxtargets,
1905 	    ("Target %d out of bounds in XPT_SCSI_IO\n",
1906 	     csio->ccb_h.target_id));
1907 	targ = &sassc->targets[csio->ccb_h.target_id];
1908 	mpr_dprint(sc, MPR_TRACE, "ccb %p target flag %x\n", ccb, targ->flags);
1909 	if (targ->handle == 0x0) {
1910 		mpr_dprint(sc, MPR_ERROR, "%s NULL handle for target %u\n",
1911 		    __func__, csio->ccb_h.target_id);
1912 		mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1913 		xpt_done(ccb);
1914 		return;
1915 	}
1916 	if (targ->flags & MPR_TARGET_FLAGS_RAID_COMPONENT) {
1917 		mpr_dprint(sc, MPR_ERROR, "%s Raid component no SCSI IO "
1918 		    "supported %u\n", __func__, csio->ccb_h.target_id);
1919 		mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1920 		xpt_done(ccb);
1921 		return;
1922 	}
1923 	/*
1924 	 * Sometimes, it is possible to get a command that is not "In
1925 	 * Progress" and was actually aborted by the upper layer.  Check for
1926 	 * this here and complete the command without error.
1927 	 */
1928 	if (mprsas_get_ccbstatus(ccb) != CAM_REQ_INPROG) {
1929 		mpr_dprint(sc, MPR_TRACE, "%s Command is not in progress for "
1930 		    "target %u\n", __func__, csio->ccb_h.target_id);
1931 		xpt_done(ccb);
1932 		return;
1933 	}
1934 	/*
1935 	 * If devinfo is 0 this will be a volume.  In that case don't tell CAM
1936 	 * that the volume has timed out.  We want volumes to be enumerated
1937 	 * until they are deleted/removed, not just failed.
1938 	 */
1939 	if (targ->flags & MPRSAS_TARGET_INREMOVAL) {
1940 		if (targ->devinfo == 0)
1941 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1942 		else
1943 			mprsas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
1944 		xpt_done(ccb);
1945 		return;
1946 	}
1947 
1948 	if ((sc->mpr_flags & MPR_FLAGS_SHUTDOWN) != 0) {
1949 		mpr_dprint(sc, MPR_INFO, "%s shutting down\n", __func__);
1950 		mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1951 		xpt_done(ccb);
1952 		return;
1953 	}
1954 
1955 	/*
1956 	 * If target has a reset in progress, freeze the devq and return.  The
1957 	 * devq will be released when the TM reset is finished.
1958 	 */
1959 	if (targ->flags & MPRSAS_TARGET_INRESET) {
1960 		ccb->ccb_h.status = CAM_BUSY | CAM_DEV_QFRZN;
1961 		mpr_dprint(sc, MPR_INFO, "%s: Freezing devq for target ID %d\n",
1962 		    __func__, targ->tid);
1963 		xpt_freeze_devq(ccb->ccb_h.path, 1);
1964 		xpt_done(ccb);
1965 		return;
1966 	}
1967 
1968 	cm = mpr_alloc_command(sc);
1969 	if (cm == NULL || (sc->mpr_flags & MPR_FLAGS_DIAGRESET)) {
1970 		if (cm != NULL) {
1971 			mpr_free_command(sc, cm);
1972 		}
1973 		if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) {
1974 			xpt_freeze_simq(sassc->sim, 1);
1975 			sassc->flags |= MPRSAS_QUEUE_FROZEN;
1976 		}
1977 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1978 		ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1979 		xpt_done(ccb);
1980 		return;
1981 	}
1982 
1983 	/* For NVME device's issue UNMAP command directly to NVME drives by
1984 	 * constructing equivalent native NVMe DataSetManagement command.
1985 	 */
1986 #if __FreeBSD_version >= 1100103
1987 	scsi_opcode = scsiio_cdb_ptr(csio)[0];
1988 #else
1989 	if (csio->ccb_h.flags & CAM_CDB_POINTER)
1990 		scsi_opcode = csio->cdb_io.cdb_ptr[0];
1991 	else
1992 		scsi_opcode = csio->cdb_io.cdb_bytes[0];
1993 #endif
1994 	if (scsi_opcode == UNMAP &&
1995 	    targ->is_nvme &&
1996 	    (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) {
1997 		rc = mprsas_build_nvme_unmap(sc, cm, ccb, targ);
1998 		if (rc == 1) { /* return command to CAM with success status */
1999 			mpr_free_command(sc, cm);
2000 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2001 			xpt_done(ccb);
2002 			return;
2003 		} else if (!rc) /* Issued NVMe Encapsulated Request Message */
2004 			return;
2005 	}
2006 
2007 	req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
2008 	bzero(req, sizeof(*req));
2009 	req->DevHandle = htole16(targ->handle);
2010 	req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
2011 	req->MsgFlags = 0;
2012 	req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr);
2013 	req->SenseBufferLength = MPR_SENSE_LEN;
2014 	req->SGLFlags = 0;
2015 	req->ChainOffset = 0;
2016 	req->SGLOffset0 = 24;	/* 32bit word offset to the SGL */
2017 	req->SGLOffset1= 0;
2018 	req->SGLOffset2= 0;
2019 	req->SGLOffset3= 0;
2020 	req->SkipCount = 0;
2021 	req->DataLength = htole32(csio->dxfer_len);
2022 	req->BidirectionalDataLength = 0;
2023 	req->IoFlags = htole16(csio->cdb_len);
2024 	req->EEDPFlags = 0;
2025 
2026 	/* Note: BiDirectional transfers are not supported */
2027 	switch (csio->ccb_h.flags & CAM_DIR_MASK) {
2028 	case CAM_DIR_IN:
2029 		mpi_control = MPI2_SCSIIO_CONTROL_READ;
2030 		cm->cm_flags |= MPR_CM_FLAGS_DATAIN;
2031 		break;
2032 	case CAM_DIR_OUT:
2033 		mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
2034 		cm->cm_flags |= MPR_CM_FLAGS_DATAOUT;
2035 		break;
2036 	case CAM_DIR_NONE:
2037 	default:
2038 		mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
2039 		break;
2040 	}
2041 
2042 	if (csio->cdb_len == 32)
2043 		mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
2044 	/*
2045 	 * It looks like the hardware doesn't require an explicit tag
2046 	 * number for each transaction.  SAM Task Management not supported
2047 	 * at the moment.
2048 	 */
2049 	switch (csio->tag_action) {
2050 	case MSG_HEAD_OF_Q_TAG:
2051 		mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
2052 		break;
2053 	case MSG_ORDERED_Q_TAG:
2054 		mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
2055 		break;
2056 	case MSG_ACA_TASK:
2057 		mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ;
2058 		break;
2059 	case CAM_TAG_ACTION_NONE:
2060 	case MSG_SIMPLE_Q_TAG:
2061 	default:
2062 		mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
2063 		break;
2064 	}
2065 	mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
2066 	req->Control = htole32(mpi_control);
2067 
2068 	if (MPR_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
2069 		mpr_free_command(sc, cm);
2070 		mprsas_set_ccbstatus(ccb, CAM_LUN_INVALID);
2071 		xpt_done(ccb);
2072 		return;
2073 	}
2074 
2075 	if (csio->ccb_h.flags & CAM_CDB_POINTER)
2076 		bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
2077 	else {
2078 		KASSERT(csio->cdb_len <= IOCDBLEN,
2079 		    ("cdb_len %d is greater than IOCDBLEN but CAM_CDB_POINTER "
2080 		    "is not set", csio->cdb_len));
2081 		bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
2082 	}
2083 	req->IoFlags = htole16(csio->cdb_len);
2084 
2085 	/*
2086 	 * Check if EEDP is supported and enabled.  If it is then check if the
2087 	 * SCSI opcode could be using EEDP.  If so, make sure the LUN exists and
2088 	 * is formatted for EEDP support.  If all of this is true, set CDB up
2089 	 * for EEDP transfer.
2090 	 */
2091 	eedp_flags = op_code_prot[req->CDB.CDB32[0]];
2092 	if (sc->eedp_enabled && eedp_flags) {
2093 		SLIST_FOREACH(lun, &targ->luns, lun_link) {
2094 			if (lun->lun_id == csio->ccb_h.target_lun) {
2095 				break;
2096 			}
2097 		}
2098 
2099 		if ((lun != NULL) && (lun->eedp_formatted)) {
2100 			req->EEDPBlockSize = htole16(lun->eedp_block_size);
2101 			eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
2102 			    MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
2103 			    MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
2104 			if (sc->mpr_flags & MPR_FLAGS_GEN35_IOC) {
2105 				eedp_flags |=
2106 				    MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE;
2107 			}
2108 			req->EEDPFlags = htole16(eedp_flags);
2109 
2110 			/*
2111 			 * If CDB less than 32, fill in Primary Ref Tag with
2112 			 * low 4 bytes of LBA.  If CDB is 32, tag stuff is
2113 			 * already there.  Also, set protection bit.  FreeBSD
2114 			 * currently does not support CDBs bigger than 16, but
2115 			 * the code doesn't hurt, and will be here for the
2116 			 * future.
2117 			 */
2118 			if (csio->cdb_len != 32) {
2119 				lba_byte = (csio->cdb_len == 16) ? 6 : 2;
2120 				ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
2121 				    PrimaryReferenceTag;
2122 				for (i = 0; i < 4; i++) {
2123 					*ref_tag_addr =
2124 					    req->CDB.CDB32[lba_byte + i];
2125 					ref_tag_addr++;
2126 				}
2127 				req->CDB.EEDP32.PrimaryReferenceTag =
2128 				    htole32(req->
2129 				    CDB.EEDP32.PrimaryReferenceTag);
2130 				req->CDB.EEDP32.PrimaryApplicationTagMask =
2131 				    0xFFFF;
2132 				req->CDB.CDB32[1] = (req->CDB.CDB32[1] & 0x1F) |
2133 				    0x20;
2134 			} else {
2135 				eedp_flags |=
2136 				    MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
2137 				req->EEDPFlags = htole16(eedp_flags);
2138 				req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
2139 				    0x1F) | 0x20;
2140 			}
2141 		}
2142 	}
2143 
2144 	cm->cm_length = csio->dxfer_len;
2145 	if (cm->cm_length != 0) {
2146 		cm->cm_data = ccb;
2147 		cm->cm_flags |= MPR_CM_FLAGS_USE_CCB;
2148 	} else {
2149 		cm->cm_data = NULL;
2150 	}
2151 	cm->cm_sge = &req->SGL;
2152 	cm->cm_sglsize = (32 - 24) * 4;
2153 	cm->cm_complete = mprsas_scsiio_complete;
2154 	cm->cm_complete_data = ccb;
2155 	cm->cm_targ = targ;
2156 	cm->cm_lun = csio->ccb_h.target_lun;
2157 	cm->cm_ccb = ccb;
2158 	/*
2159 	 * If using FP desc type, need to set a bit in IoFlags (SCSI IO is 0)
2160 	 * and set descriptor type.
2161 	 */
2162 	if (targ->scsi_req_desc_type ==
2163 	    MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO) {
2164 		req->IoFlags |= MPI25_SCSIIO_IOFLAGS_FAST_PATH;
2165 		cm->cm_desc.FastPathSCSIIO.RequestFlags =
2166 		    MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
2167 		if (!sc->atomic_desc_capable) {
2168 			cm->cm_desc.FastPathSCSIIO.DevHandle =
2169 			    htole16(targ->handle);
2170 		}
2171 	} else {
2172 		cm->cm_desc.SCSIIO.RequestFlags =
2173 		    MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
2174 		if (!sc->atomic_desc_capable)
2175 			cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle);
2176 	}
2177 
2178 	csio->ccb_h.qos.sim_data = sbinuptime();
2179 #if __FreeBSD_version >= 1000029
2180 	callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
2181 	    mprsas_scsiio_timeout, cm, 0);
2182 #else //__FreeBSD_version < 1000029
2183 	callout_reset(&cm->cm_callout, (ccb->ccb_h.timeout * hz) / 1000,
2184 	    mprsas_scsiio_timeout, cm);
2185 #endif //__FreeBSD_version >= 1000029
2186 
2187 	targ->issued++;
2188 	targ->outstanding++;
2189 	TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
2190 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
2191 
2192 	mprsas_log_command(cm, MPR_XINFO, "%s cm %p ccb %p outstanding %u\n",
2193 	    __func__, cm, ccb, targ->outstanding);
2194 
2195 	mpr_map_command(sc, cm);
2196 	return;
2197 }
2198 
2199 /**
2200  * mpr_sc_failed_io_info - translated non-succesfull SCSI_IO request
2201  */
2202 static void
2203 mpr_sc_failed_io_info(struct mpr_softc *sc, struct ccb_scsiio *csio,
2204     Mpi2SCSIIOReply_t *mpi_reply, struct mprsas_target *targ)
2205 {
2206 	u32 response_info;
2207 	u8 *response_bytes;
2208 	u16 ioc_status = le16toh(mpi_reply->IOCStatus) &
2209 	    MPI2_IOCSTATUS_MASK;
2210 	u8 scsi_state = mpi_reply->SCSIState;
2211 	u8 scsi_status = mpi_reply->SCSIStatus;
2212 	char *desc_ioc_state = NULL;
2213 	char *desc_scsi_status = NULL;
2214 	u32 log_info = le32toh(mpi_reply->IOCLogInfo);
2215 
2216 	if (log_info == 0x31170000)
2217 		return;
2218 
2219 	desc_ioc_state = mpr_describe_table(mpr_iocstatus_string,
2220 	     ioc_status);
2221 	desc_scsi_status = mpr_describe_table(mpr_scsi_status_string,
2222 	    scsi_status);
2223 
2224 	mpr_dprint(sc, MPR_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n",
2225 	    le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status);
2226 	if (targ->encl_level_valid) {
2227 		mpr_dprint(sc, MPR_XINFO, "At enclosure level %d, slot %d, "
2228 		    "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
2229 		    targ->connector_name);
2230 	}
2231 
2232 	/*
2233 	 * We can add more detail about underflow data here
2234 	 * TO-DO
2235 	 */
2236 	mpr_dprint(sc, MPR_XINFO, "\tscsi_status(%s)(0x%02x), "
2237 	    "scsi_state %b\n", desc_scsi_status, scsi_status,
2238 	    scsi_state, "\20" "\1AutosenseValid" "\2AutosenseFailed"
2239 	    "\3NoScsiStatus" "\4Terminated" "\5Response InfoValid");
2240 
2241 	if (sc->mpr_debug & MPR_XINFO &&
2242 	    scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2243 		mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : Start :\n");
2244 		scsi_sense_print(csio);
2245 		mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : End :\n");
2246 	}
2247 
2248 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
2249 		response_info = le32toh(mpi_reply->ResponseInfo);
2250 		response_bytes = (u8 *)&response_info;
2251 		mpr_dprint(sc, MPR_XINFO, "response code(0x%01x): %s\n",
2252 		    response_bytes[0],
2253 		    mpr_describe_table(mpr_scsi_taskmgmt_string,
2254 		    response_bytes[0]));
2255 	}
2256 }
2257 
2258 /** mprsas_nvme_trans_status_code
2259  *
2260  * Convert Native NVMe command error status to
2261  * equivalent SCSI error status.
2262  *
2263  * Returns appropriate scsi_status
2264  */
2265 static u8
2266 mprsas_nvme_trans_status_code(uint16_t nvme_status,
2267     struct mpr_command *cm)
2268 {
2269 	u8 status = MPI2_SCSI_STATUS_GOOD;
2270 	int skey, asc, ascq;
2271 	union ccb *ccb = cm->cm_complete_data;
2272 	int returned_sense_len;
2273 	uint8_t sct, sc;
2274 
2275 	sct = NVME_STATUS_GET_SCT(nvme_status);
2276 	sc = NVME_STATUS_GET_SC(nvme_status);
2277 
2278 	status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2279 	skey = SSD_KEY_ILLEGAL_REQUEST;
2280 	asc = SCSI_ASC_NO_SENSE;
2281 	ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2282 
2283 	switch (sct) {
2284 	case NVME_SCT_GENERIC:
2285 		switch (sc) {
2286 		case NVME_SC_SUCCESS:
2287 			status = MPI2_SCSI_STATUS_GOOD;
2288 			skey = SSD_KEY_NO_SENSE;
2289 			asc = SCSI_ASC_NO_SENSE;
2290 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2291 			break;
2292 		case NVME_SC_INVALID_OPCODE:
2293 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2294 			skey = SSD_KEY_ILLEGAL_REQUEST;
2295 			asc = SCSI_ASC_ILLEGAL_COMMAND;
2296 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2297 			break;
2298 		case NVME_SC_INVALID_FIELD:
2299 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2300 			skey = SSD_KEY_ILLEGAL_REQUEST;
2301 			asc = SCSI_ASC_INVALID_CDB;
2302 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2303 			break;
2304 		case NVME_SC_DATA_TRANSFER_ERROR:
2305 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2306 			skey = SSD_KEY_MEDIUM_ERROR;
2307 			asc = SCSI_ASC_NO_SENSE;
2308 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2309 			break;
2310 		case NVME_SC_ABORTED_POWER_LOSS:
2311 			status = MPI2_SCSI_STATUS_TASK_ABORTED;
2312 			skey = SSD_KEY_ABORTED_COMMAND;
2313 			asc = SCSI_ASC_WARNING;
2314 			ascq = SCSI_ASCQ_POWER_LOSS_EXPECTED;
2315 			break;
2316 		case NVME_SC_INTERNAL_DEVICE_ERROR:
2317 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2318 			skey = SSD_KEY_HARDWARE_ERROR;
2319 			asc = SCSI_ASC_INTERNAL_TARGET_FAILURE;
2320 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2321 			break;
2322 		case NVME_SC_ABORTED_BY_REQUEST:
2323 		case NVME_SC_ABORTED_SQ_DELETION:
2324 		case NVME_SC_ABORTED_FAILED_FUSED:
2325 		case NVME_SC_ABORTED_MISSING_FUSED:
2326 			status = MPI2_SCSI_STATUS_TASK_ABORTED;
2327 			skey = SSD_KEY_ABORTED_COMMAND;
2328 			asc = SCSI_ASC_NO_SENSE;
2329 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2330 			break;
2331 		case NVME_SC_INVALID_NAMESPACE_OR_FORMAT:
2332 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2333 			skey = SSD_KEY_ILLEGAL_REQUEST;
2334 			asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID;
2335 			ascq = SCSI_ASCQ_INVALID_LUN_ID;
2336 			break;
2337 		case NVME_SC_LBA_OUT_OF_RANGE:
2338 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2339 			skey = SSD_KEY_ILLEGAL_REQUEST;
2340 			asc = SCSI_ASC_ILLEGAL_BLOCK;
2341 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2342 			break;
2343 		case NVME_SC_CAPACITY_EXCEEDED:
2344 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2345 			skey = SSD_KEY_MEDIUM_ERROR;
2346 			asc = SCSI_ASC_NO_SENSE;
2347 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2348 			break;
2349 		case NVME_SC_NAMESPACE_NOT_READY:
2350 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2351 			skey = SSD_KEY_NOT_READY;
2352 			asc = SCSI_ASC_LUN_NOT_READY;
2353 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2354 			break;
2355 		}
2356 		break;
2357 	case NVME_SCT_COMMAND_SPECIFIC:
2358 		switch (sc) {
2359 		case NVME_SC_INVALID_FORMAT:
2360 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2361 			skey = SSD_KEY_ILLEGAL_REQUEST;
2362 			asc = SCSI_ASC_FORMAT_COMMAND_FAILED;
2363 			ascq = SCSI_ASCQ_FORMAT_COMMAND_FAILED;
2364 			break;
2365 		case NVME_SC_CONFLICTING_ATTRIBUTES:
2366 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2367 			skey = SSD_KEY_ILLEGAL_REQUEST;
2368 			asc = SCSI_ASC_INVALID_CDB;
2369 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2370 			break;
2371 		}
2372 		break;
2373 	case NVME_SCT_MEDIA_ERROR:
2374 		switch (sc) {
2375 		case NVME_SC_WRITE_FAULTS:
2376 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2377 			skey = SSD_KEY_MEDIUM_ERROR;
2378 			asc = SCSI_ASC_PERIPHERAL_DEV_WRITE_FAULT;
2379 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2380 			break;
2381 		case NVME_SC_UNRECOVERED_READ_ERROR:
2382 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2383 			skey = SSD_KEY_MEDIUM_ERROR;
2384 			asc = SCSI_ASC_UNRECOVERED_READ_ERROR;
2385 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2386 			break;
2387 		case NVME_SC_GUARD_CHECK_ERROR:
2388 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2389 			skey = SSD_KEY_MEDIUM_ERROR;
2390 			asc = SCSI_ASC_LOG_BLOCK_GUARD_CHECK_FAILED;
2391 			ascq = SCSI_ASCQ_LOG_BLOCK_GUARD_CHECK_FAILED;
2392 			break;
2393 		case NVME_SC_APPLICATION_TAG_CHECK_ERROR:
2394 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2395 			skey = SSD_KEY_MEDIUM_ERROR;
2396 			asc = SCSI_ASC_LOG_BLOCK_APPTAG_CHECK_FAILED;
2397 			ascq = SCSI_ASCQ_LOG_BLOCK_APPTAG_CHECK_FAILED;
2398 			break;
2399 		case NVME_SC_REFERENCE_TAG_CHECK_ERROR:
2400 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2401 			skey = SSD_KEY_MEDIUM_ERROR;
2402 			asc = SCSI_ASC_LOG_BLOCK_REFTAG_CHECK_FAILED;
2403 			ascq = SCSI_ASCQ_LOG_BLOCK_REFTAG_CHECK_FAILED;
2404 			break;
2405 		case NVME_SC_COMPARE_FAILURE:
2406 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2407 			skey = SSD_KEY_MISCOMPARE;
2408 			asc = SCSI_ASC_MISCOMPARE_DURING_VERIFY;
2409 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2410 			break;
2411 		case NVME_SC_ACCESS_DENIED:
2412 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2413 			skey = SSD_KEY_ILLEGAL_REQUEST;
2414 			asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID;
2415 			ascq = SCSI_ASCQ_INVALID_LUN_ID;
2416 			break;
2417 		}
2418 		break;
2419 	}
2420 
2421 	returned_sense_len = sizeof(struct scsi_sense_data);
2422 	if (returned_sense_len < ccb->csio.sense_len)
2423 		ccb->csio.sense_resid = ccb->csio.sense_len -
2424 		    returned_sense_len;
2425 	else
2426 		ccb->csio.sense_resid = 0;
2427 
2428 	scsi_set_sense_data(&ccb->csio.sense_data, SSD_TYPE_FIXED,
2429 	    1, skey, asc, ascq, SSD_ELEM_NONE);
2430 	ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2431 
2432 	return status;
2433 }
2434 
2435 /** mprsas_complete_nvme_unmap
2436  *
2437  * Complete native NVMe command issued using NVMe Encapsulated
2438  * Request Message.
2439  */
2440 static u8
2441 mprsas_complete_nvme_unmap(struct mpr_softc *sc, struct mpr_command *cm)
2442 {
2443 	Mpi26NVMeEncapsulatedErrorReply_t *mpi_reply;
2444 	struct nvme_completion *nvme_completion = NULL;
2445 	u8 scsi_status = MPI2_SCSI_STATUS_GOOD;
2446 
2447 	mpi_reply =(Mpi26NVMeEncapsulatedErrorReply_t *)cm->cm_reply;
2448 	if (le16toh(mpi_reply->ErrorResponseCount)){
2449 		nvme_completion = (struct nvme_completion *)cm->cm_sense;
2450 		scsi_status = mprsas_nvme_trans_status_code(
2451 		    nvme_completion->status, cm);
2452 	}
2453 	return scsi_status;
2454 }
2455 
2456 static void
2457 mprsas_scsiio_complete(struct mpr_softc *sc, struct mpr_command *cm)
2458 {
2459 	MPI2_SCSI_IO_REPLY *rep;
2460 	union ccb *ccb;
2461 	struct ccb_scsiio *csio;
2462 	struct mprsas_softc *sassc;
2463 	struct scsi_vpd_supported_page_list *vpd_list = NULL;
2464 	u8 *TLR_bits, TLR_on, *scsi_cdb;
2465 	int dir = 0, i;
2466 	u16 alloc_len;
2467 	struct mprsas_target *target;
2468 	target_id_t target_id;
2469 
2470 	MPR_FUNCTRACE(sc);
2471 	mpr_dprint(sc, MPR_TRACE,
2472 	    "cm %p SMID %u ccb %p reply %p outstanding %u\n", cm,
2473 	    cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
2474 	    cm->cm_targ->outstanding);
2475 
2476 	callout_stop(&cm->cm_callout);
2477 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
2478 
2479 	sassc = sc->sassc;
2480 	ccb = cm->cm_complete_data;
2481 	csio = &ccb->csio;
2482 	target_id = csio->ccb_h.target_id;
2483 	rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
2484 	/*
2485 	 * XXX KDM if the chain allocation fails, does it matter if we do
2486 	 * the sync and unload here?  It is simpler to do it in every case,
2487 	 * assuming it doesn't cause problems.
2488 	 */
2489 	if (cm->cm_data != NULL) {
2490 		if (cm->cm_flags & MPR_CM_FLAGS_DATAIN)
2491 			dir = BUS_DMASYNC_POSTREAD;
2492 		else if (cm->cm_flags & MPR_CM_FLAGS_DATAOUT)
2493 			dir = BUS_DMASYNC_POSTWRITE;
2494 		bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
2495 		bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2496 	}
2497 
2498 	cm->cm_targ->completed++;
2499 	cm->cm_targ->outstanding--;
2500 	TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
2501 	ccb->ccb_h.status &= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED);
2502 
2503 	if (cm->cm_state == MPR_CM_STATE_TIMEDOUT) {
2504 		TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
2505 		cm->cm_state = MPR_CM_STATE_BUSY;
2506 		if (cm->cm_reply != NULL)
2507 			mprsas_log_command(cm, MPR_RECOVERY,
2508 			    "completed timedout cm %p ccb %p during recovery "
2509 			    "ioc %x scsi %x state %x xfer %u\n", cm, cm->cm_ccb,
2510 			    le16toh(rep->IOCStatus), rep->SCSIStatus,
2511 			    rep->SCSIState, le32toh(rep->TransferCount));
2512 		else
2513 			mprsas_log_command(cm, MPR_RECOVERY,
2514 			    "completed timedout cm %p ccb %p during recovery\n",
2515 			    cm, cm->cm_ccb);
2516 	} else if (cm->cm_targ->tm != NULL) {
2517 		if (cm->cm_reply != NULL)
2518 			mprsas_log_command(cm, MPR_RECOVERY,
2519 			    "completed cm %p ccb %p during recovery "
2520 			    "ioc %x scsi %x state %x xfer %u\n",
2521 			    cm, cm->cm_ccb, le16toh(rep->IOCStatus),
2522 			    rep->SCSIStatus, rep->SCSIState,
2523 			    le32toh(rep->TransferCount));
2524 		else
2525 			mprsas_log_command(cm, MPR_RECOVERY,
2526 			    "completed cm %p ccb %p during recovery\n",
2527 			    cm, cm->cm_ccb);
2528 	} else if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
2529 		mprsas_log_command(cm, MPR_RECOVERY,
2530 		    "reset completed cm %p ccb %p\n", cm, cm->cm_ccb);
2531 	}
2532 
2533 	if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
2534 		/*
2535 		 * We ran into an error after we tried to map the command,
2536 		 * so we're getting a callback without queueing the command
2537 		 * to the hardware.  So we set the status here, and it will
2538 		 * be retained below.  We'll go through the "fast path",
2539 		 * because there can be no reply when we haven't actually
2540 		 * gone out to the hardware.
2541 		 */
2542 		mprsas_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
2543 
2544 		/*
2545 		 * Currently the only error included in the mask is
2546 		 * MPR_CM_FLAGS_CHAIN_FAILED, which means we're out of
2547 		 * chain frames.  We need to freeze the queue until we get
2548 		 * a command that completed without this error, which will
2549 		 * hopefully have some chain frames attached that we can
2550 		 * use.  If we wanted to get smarter about it, we would
2551 		 * only unfreeze the queue in this condition when we're
2552 		 * sure that we're getting some chain frames back.  That's
2553 		 * probably unnecessary.
2554 		 */
2555 		if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) {
2556 			xpt_freeze_simq(sassc->sim, 1);
2557 			sassc->flags |= MPRSAS_QUEUE_FROZEN;
2558 			mpr_dprint(sc, MPR_XINFO, "Error sending command, "
2559 			    "freezing SIM queue\n");
2560 		}
2561 	}
2562 
2563 	/*
2564 	 * Point to the SCSI CDB, which is dependent on the CAM_CDB_POINTER
2565 	 * flag, and use it in a few places in the rest of this function for
2566 	 * convenience. Use the macro if available.
2567 	 */
2568 #if __FreeBSD_version >= 1100103
2569 	scsi_cdb = scsiio_cdb_ptr(csio);
2570 #else
2571 	if (csio->ccb_h.flags & CAM_CDB_POINTER)
2572 		scsi_cdb = csio->cdb_io.cdb_ptr;
2573 	else
2574 		scsi_cdb = csio->cdb_io.cdb_bytes;
2575 #endif
2576 
2577 	/*
2578 	 * If this is a Start Stop Unit command and it was issued by the driver
2579 	 * during shutdown, decrement the refcount to account for all of the
2580 	 * commands that were sent.  All SSU commands should be completed before
2581 	 * shutdown completes, meaning SSU_refcount will be 0 after SSU_started
2582 	 * is TRUE.
2583 	 */
2584 	if (sc->SSU_started && (scsi_cdb[0] == START_STOP_UNIT)) {
2585 		mpr_dprint(sc, MPR_INFO, "Decrementing SSU count.\n");
2586 		sc->SSU_refcount--;
2587 	}
2588 
2589 	/* Take the fast path to completion */
2590 	if (cm->cm_reply == NULL) {
2591 		if (mprsas_get_ccbstatus(ccb) == CAM_REQ_INPROG) {
2592 			if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0)
2593 				mprsas_set_ccbstatus(ccb, CAM_SCSI_BUS_RESET);
2594 			else {
2595 				mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2596 				csio->scsi_status = SCSI_STATUS_OK;
2597 			}
2598 			if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
2599 				ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2600 				sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
2601 				mpr_dprint(sc, MPR_XINFO,
2602 				    "Unfreezing SIM queue\n");
2603 			}
2604 		}
2605 
2606 		/*
2607 		 * There are two scenarios where the status won't be
2608 		 * CAM_REQ_CMP.  The first is if MPR_CM_FLAGS_ERROR_MASK is
2609 		 * set, the second is in the MPR_FLAGS_DIAGRESET above.
2610 		 */
2611 		if (mprsas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2612 			/*
2613 			 * Freeze the dev queue so that commands are
2614 			 * executed in the correct order after error
2615 			 * recovery.
2616 			 */
2617 			ccb->ccb_h.status |= CAM_DEV_QFRZN;
2618 			xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2619 		}
2620 		mpr_free_command(sc, cm);
2621 		xpt_done(ccb);
2622 		return;
2623 	}
2624 
2625 	target = &sassc->targets[target_id];
2626 	if (scsi_cdb[0] == UNMAP &&
2627 	    target->is_nvme &&
2628 	    (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) {
2629 		rep->SCSIStatus = mprsas_complete_nvme_unmap(sc, cm);
2630 		csio->scsi_status = rep->SCSIStatus;
2631 	}
2632 
2633 	mprsas_log_command(cm, MPR_XINFO,
2634 	    "ioc %x scsi %x state %x xfer %u\n",
2635 	    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2636 	    le32toh(rep->TransferCount));
2637 
2638 	switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) {
2639 	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2640 		csio->resid = cm->cm_length - le32toh(rep->TransferCount);
2641 		/* FALLTHROUGH */
2642 	case MPI2_IOCSTATUS_SUCCESS:
2643 	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2644 		if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
2645 		    MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
2646 			mprsas_log_command(cm, MPR_XINFO, "recovered error\n");
2647 
2648 		/* Completion failed at the transport level. */
2649 		if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
2650 		    MPI2_SCSI_STATE_TERMINATED)) {
2651 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2652 			break;
2653 		}
2654 
2655 		/* In a modern packetized environment, an autosense failure
2656 		 * implies that there's not much else that can be done to
2657 		 * recover the command.
2658 		 */
2659 		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
2660 			mprsas_set_ccbstatus(ccb, CAM_AUTOSENSE_FAIL);
2661 			break;
2662 		}
2663 
2664 		/*
2665 		 * CAM doesn't care about SAS Response Info data, but if this is
2666 		 * the state check if TLR should be done.  If not, clear the
2667 		 * TLR_bits for the target.
2668 		 */
2669 		if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
2670 		    ((le32toh(rep->ResponseInfo) & MPI2_SCSI_RI_MASK_REASONCODE)
2671 		    == MPR_SCSI_RI_INVALID_FRAME)) {
2672 			sc->mapping_table[target_id].TLR_bits =
2673 			    (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2674 		}
2675 
2676 		/*
2677 		 * Intentionally override the normal SCSI status reporting
2678 		 * for these two cases.  These are likely to happen in a
2679 		 * multi-initiator environment, and we want to make sure that
2680 		 * CAM retries these commands rather than fail them.
2681 		 */
2682 		if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
2683 		    (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
2684 			mprsas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2685 			break;
2686 		}
2687 
2688 		/* Handle normal status and sense */
2689 		csio->scsi_status = rep->SCSIStatus;
2690 		if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
2691 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2692 		else
2693 			mprsas_set_ccbstatus(ccb, CAM_SCSI_STATUS_ERROR);
2694 
2695 		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2696 			int sense_len, returned_sense_len;
2697 
2698 			returned_sense_len = min(le32toh(rep->SenseCount),
2699 			    sizeof(struct scsi_sense_data));
2700 			if (returned_sense_len < csio->sense_len)
2701 				csio->sense_resid = csio->sense_len -
2702 				    returned_sense_len;
2703 			else
2704 				csio->sense_resid = 0;
2705 
2706 			sense_len = min(returned_sense_len,
2707 			    csio->sense_len - csio->sense_resid);
2708 			bzero(&csio->sense_data, sizeof(csio->sense_data));
2709 			bcopy(cm->cm_sense, &csio->sense_data, sense_len);
2710 			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2711 		}
2712 
2713 		/*
2714 		 * Check if this is an INQUIRY command.  If it's a VPD inquiry,
2715 		 * and it's page code 0 (Supported Page List), and there is
2716 		 * inquiry data, and this is for a sequential access device, and
2717 		 * the device is an SSP target, and TLR is supported by the
2718 		 * controller, turn the TLR_bits value ON if page 0x90 is
2719 		 * supported.
2720 		 */
2721 		if ((scsi_cdb[0] == INQUIRY) &&
2722 		    (scsi_cdb[1] & SI_EVPD) &&
2723 		    (scsi_cdb[2] == SVPD_SUPPORTED_PAGE_LIST) &&
2724 		    ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) &&
2725 		    (csio->data_ptr != NULL) &&
2726 		    ((csio->data_ptr[0] & 0x1f) == T_SEQUENTIAL) &&
2727 		    (sc->control_TLR) &&
2728 		    (sc->mapping_table[target_id].device_info &
2729 		    MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
2730 			vpd_list = (struct scsi_vpd_supported_page_list *)
2731 			    csio->data_ptr;
2732 			TLR_bits = &sc->mapping_table[target_id].TLR_bits;
2733 			*TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2734 			TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
2735 			alloc_len = ((u16)scsi_cdb[3] << 8) + scsi_cdb[4];
2736 			alloc_len -= csio->resid;
2737 			for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
2738 				if (vpd_list->list[i] == 0x90) {
2739 					*TLR_bits = TLR_on;
2740 					break;
2741 				}
2742 			}
2743 		}
2744 
2745 		/*
2746 		 * If this is a SATA direct-access end device, mark it so that
2747 		 * a SCSI StartStopUnit command will be sent to it when the
2748 		 * driver is being shutdown.
2749 		 */
2750 		if ((scsi_cdb[0] == INQUIRY) &&
2751 		    (csio->data_ptr != NULL) &&
2752 		    ((csio->data_ptr[0] & 0x1f) == T_DIRECT) &&
2753 		    (sc->mapping_table[target_id].device_info &
2754 		    MPI2_SAS_DEVICE_INFO_SATA_DEVICE) &&
2755 		    ((sc->mapping_table[target_id].device_info &
2756 		    MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) ==
2757 		    MPI2_SAS_DEVICE_INFO_END_DEVICE)) {
2758 			target = &sassc->targets[target_id];
2759 			target->supports_SSU = TRUE;
2760 			mpr_dprint(sc, MPR_XINFO, "Target %d supports SSU\n",
2761 			    target_id);
2762 		}
2763 		break;
2764 	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2765 	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2766 		/*
2767 		 * If devinfo is 0 this will be a volume.  In that case don't
2768 		 * tell CAM that the volume is not there.  We want volumes to
2769 		 * be enumerated until they are deleted/removed, not just
2770 		 * failed.
2771 		 */
2772 		if (cm->cm_targ->devinfo == 0)
2773 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2774 		else
2775 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2776 		break;
2777 	case MPI2_IOCSTATUS_INVALID_SGL:
2778 		mpr_print_scsiio_cmd(sc, cm);
2779 		mprsas_set_ccbstatus(ccb, CAM_UNREC_HBA_ERROR);
2780 		break;
2781 	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2782 		/*
2783 		 * This is one of the responses that comes back when an I/O
2784 		 * has been aborted.  If it is because of a timeout that we
2785 		 * initiated, just set the status to CAM_CMD_TIMEOUT.
2786 		 * Otherwise set it to CAM_REQ_ABORTED.  The effect on the
2787 		 * command is the same (it gets retried, subject to the
2788 		 * retry counter), the only difference is what gets printed
2789 		 * on the console.
2790 		 */
2791 		if (cm->cm_state == MPR_CM_STATE_TIMEDOUT)
2792 			mprsas_set_ccbstatus(ccb, CAM_CMD_TIMEOUT);
2793 		else
2794 			mprsas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2795 		break;
2796 	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2797 		/* resid is ignored for this condition */
2798 		csio->resid = 0;
2799 		mprsas_set_ccbstatus(ccb, CAM_DATA_RUN_ERR);
2800 		break;
2801 	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2802 	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2803 		/*
2804 		 * These can sometimes be transient transport-related
2805 		 * errors, and sometimes persistent drive-related errors.
2806 		 * We used to retry these without decrementing the retry
2807 		 * count by returning CAM_REQUEUE_REQ.  Unfortunately, if
2808 		 * we hit a persistent drive problem that returns one of
2809 		 * these error codes, we would retry indefinitely.  So,
2810 		 * return CAM_REQ_CMP_ERROR so that we decrement the retry
2811 		 * count and avoid infinite retries.  We're taking the
2812 		 * potential risk of flagging false failures in the event
2813 		 * of a topology-related error (e.g. a SAS expander problem
2814 		 * causes a command addressed to a drive to fail), but
2815 		 * avoiding getting into an infinite retry loop.
2816 		 */
2817 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2818 		mpr_dprint(sc, MPR_INFO,
2819 		    "Controller reported %s tgt %u SMID %u loginfo %x\n",
2820 		    mpr_describe_table(mpr_iocstatus_string,
2821 		    le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK),
2822 		    target_id, cm->cm_desc.Default.SMID,
2823 		    le32toh(rep->IOCLogInfo));
2824 		mpr_dprint(sc, MPR_XINFO,
2825 		    "SCSIStatus %x SCSIState %x xfercount %u\n",
2826 		    rep->SCSIStatus, rep->SCSIState,
2827 		    le32toh(rep->TransferCount));
2828 		break;
2829 	case MPI2_IOCSTATUS_INVALID_FUNCTION:
2830 	case MPI2_IOCSTATUS_INTERNAL_ERROR:
2831 	case MPI2_IOCSTATUS_INVALID_VPID:
2832 	case MPI2_IOCSTATUS_INVALID_FIELD:
2833 	case MPI2_IOCSTATUS_INVALID_STATE:
2834 	case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2835 	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2836 	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2837 	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2838 	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2839 	default:
2840 		mprsas_log_command(cm, MPR_XINFO,
2841 		    "completed ioc %x loginfo %x scsi %x state %x xfer %u\n",
2842 		    le16toh(rep->IOCStatus), le32toh(rep->IOCLogInfo),
2843 		    rep->SCSIStatus, rep->SCSIState,
2844 		    le32toh(rep->TransferCount));
2845 		csio->resid = cm->cm_length;
2846 
2847 		if (scsi_cdb[0] == UNMAP &&
2848 		    target->is_nvme &&
2849 		    (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR)
2850 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2851 		else
2852 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2853 
2854 		break;
2855 	}
2856 
2857 	mpr_sc_failed_io_info(sc, csio, rep, cm->cm_targ);
2858 
2859 	if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
2860 		ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2861 		sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
2862 		mpr_dprint(sc, MPR_XINFO, "Command completed, unfreezing SIM "
2863 		    "queue\n");
2864 	}
2865 
2866 	if (mprsas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2867 		ccb->ccb_h.status |= CAM_DEV_QFRZN;
2868 		xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2869 	}
2870 
2871 	mpr_free_command(sc, cm);
2872 	xpt_done(ccb);
2873 }
2874 
2875 #if __FreeBSD_version >= 900026
2876 static void
2877 mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm)
2878 {
2879 	MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2880 	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2881 	uint64_t sasaddr;
2882 	union ccb *ccb;
2883 
2884 	ccb = cm->cm_complete_data;
2885 
2886 	/*
2887 	 * Currently there should be no way we can hit this case.  It only
2888 	 * happens when we have a failure to allocate chain frames, and SMP
2889 	 * commands require two S/G elements only.  That should be handled
2890 	 * in the standard request size.
2891 	 */
2892 	if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
2893 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x on SMP "
2894 		    "request!\n", __func__, cm->cm_flags);
2895 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2896 		goto bailout;
2897         }
2898 
2899 	rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
2900 	if (rpl == NULL) {
2901 		mpr_dprint(sc, MPR_ERROR, "%s: NULL cm_reply!\n", __func__);
2902 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2903 		goto bailout;
2904 	}
2905 
2906 	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2907 	sasaddr = le32toh(req->SASAddress.Low);
2908 	sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
2909 
2910 	if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
2911 	    MPI2_IOCSTATUS_SUCCESS ||
2912 	    rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
2913 		mpr_dprint(sc, MPR_XINFO, "%s: IOCStatus %04x SASStatus %02x\n",
2914 		    __func__, le16toh(rpl->IOCStatus), rpl->SASStatus);
2915 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2916 		goto bailout;
2917 	}
2918 
2919 	mpr_dprint(sc, MPR_XINFO, "%s: SMP request to SAS address %#jx "
2920 	    "completed successfully\n", __func__, (uintmax_t)sasaddr);
2921 
2922 	if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
2923 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2924 	else
2925 		mprsas_set_ccbstatus(ccb, CAM_SMP_STATUS_ERROR);
2926 
2927 bailout:
2928 	/*
2929 	 * We sync in both directions because we had DMAs in the S/G list
2930 	 * in both directions.
2931 	 */
2932 	bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
2933 			BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2934 	bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2935 	mpr_free_command(sc, cm);
2936 	xpt_done(ccb);
2937 }
2938 
2939 static void
2940 mprsas_send_smpcmd(struct mprsas_softc *sassc, union ccb *ccb, uint64_t sasaddr)
2941 {
2942 	struct mpr_command *cm;
2943 	uint8_t *request, *response;
2944 	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2945 	struct mpr_softc *sc;
2946 	struct sglist *sg;
2947 	int error;
2948 
2949 	sc = sassc->sc;
2950 	sg = NULL;
2951 	error = 0;
2952 
2953 #if (__FreeBSD_version >= 1000028) || \
2954     ((__FreeBSD_version >= 902001) && (__FreeBSD_version < 1000000))
2955 	switch (ccb->ccb_h.flags & CAM_DATA_MASK) {
2956 	case CAM_DATA_PADDR:
2957 	case CAM_DATA_SG_PADDR:
2958 		/*
2959 		 * XXX We don't yet support physical addresses here.
2960 		 */
2961 		mpr_dprint(sc, MPR_ERROR, "%s: physical addresses not "
2962 		    "supported\n", __func__);
2963 		mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2964 		xpt_done(ccb);
2965 		return;
2966 	case CAM_DATA_SG:
2967 		/*
2968 		 * The chip does not support more than one buffer for the
2969 		 * request or response.
2970 		 */
2971 		if ((ccb->smpio.smp_request_sglist_cnt > 1)
2972 		    || (ccb->smpio.smp_response_sglist_cnt > 1)) {
2973 			mpr_dprint(sc, MPR_ERROR, "%s: multiple request or "
2974 			    "response buffer segments not supported for SMP\n",
2975 			    __func__);
2976 			mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2977 			xpt_done(ccb);
2978 			return;
2979 		}
2980 
2981 		/*
2982 		 * The CAM_SCATTER_VALID flag was originally implemented
2983 		 * for the XPT_SCSI_IO CCB, which only has one data pointer.
2984 		 * We have two.  So, just take that flag to mean that we
2985 		 * might have S/G lists, and look at the S/G segment count
2986 		 * to figure out whether that is the case for each individual
2987 		 * buffer.
2988 		 */
2989 		if (ccb->smpio.smp_request_sglist_cnt != 0) {
2990 			bus_dma_segment_t *req_sg;
2991 
2992 			req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
2993 			request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
2994 		} else
2995 			request = ccb->smpio.smp_request;
2996 
2997 		if (ccb->smpio.smp_response_sglist_cnt != 0) {
2998 			bus_dma_segment_t *rsp_sg;
2999 
3000 			rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
3001 			response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
3002 		} else
3003 			response = ccb->smpio.smp_response;
3004 		break;
3005 	case CAM_DATA_VADDR:
3006 		request = ccb->smpio.smp_request;
3007 		response = ccb->smpio.smp_response;
3008 		break;
3009 	default:
3010 		mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
3011 		xpt_done(ccb);
3012 		return;
3013 	}
3014 #else /* __FreeBSD_version < 1000028 */
3015 	/*
3016 	 * XXX We don't yet support physical addresses here.
3017 	 */
3018 	if (ccb->ccb_h.flags & (CAM_DATA_PHYS|CAM_SG_LIST_PHYS)) {
3019 		mpr_dprint(sc, MPR_ERROR, "%s: physical addresses not "
3020 		    "supported\n", __func__);
3021 		mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
3022 		xpt_done(ccb);
3023 		return;
3024 	}
3025 
3026 	/*
3027 	 * If the user wants to send an S/G list, check to make sure they
3028 	 * have single buffers.
3029 	 */
3030 	if (ccb->ccb_h.flags & CAM_SCATTER_VALID) {
3031 		/*
3032 		 * The chip does not support more than one buffer for the
3033 		 * request or response.
3034 		 */
3035 	 	if ((ccb->smpio.smp_request_sglist_cnt > 1)
3036 		  || (ccb->smpio.smp_response_sglist_cnt > 1)) {
3037 			mpr_dprint(sc, MPR_ERROR, "%s: multiple request or "
3038 			    "response buffer segments not supported for SMP\n",
3039 			    __func__);
3040 			mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
3041 			xpt_done(ccb);
3042 			return;
3043 		}
3044 
3045 		/*
3046 		 * The CAM_SCATTER_VALID flag was originally implemented
3047 		 * for the XPT_SCSI_IO CCB, which only has one data pointer.
3048 		 * We have two.  So, just take that flag to mean that we
3049 		 * might have S/G lists, and look at the S/G segment count
3050 		 * to figure out whether that is the case for each individual
3051 		 * buffer.
3052 		 */
3053 		if (ccb->smpio.smp_request_sglist_cnt != 0) {
3054 			bus_dma_segment_t *req_sg;
3055 
3056 			req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
3057 			request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
3058 		} else
3059 			request = ccb->smpio.smp_request;
3060 
3061 		if (ccb->smpio.smp_response_sglist_cnt != 0) {
3062 			bus_dma_segment_t *rsp_sg;
3063 
3064 			rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
3065 			response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
3066 		} else
3067 			response = ccb->smpio.smp_response;
3068 	} else {
3069 		request = ccb->smpio.smp_request;
3070 		response = ccb->smpio.smp_response;
3071 	}
3072 #endif /* __FreeBSD_version < 1000028 */
3073 
3074 	cm = mpr_alloc_command(sc);
3075 	if (cm == NULL) {
3076 		mpr_dprint(sc, MPR_ERROR, "%s: cannot allocate command\n",
3077 		    __func__);
3078 		mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3079 		xpt_done(ccb);
3080 		return;
3081 	}
3082 
3083 	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
3084 	bzero(req, sizeof(*req));
3085 	req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
3086 
3087 	/* Allow the chip to use any route to this SAS address. */
3088 	req->PhysicalPort = 0xff;
3089 
3090 	req->RequestDataLength = htole16(ccb->smpio.smp_request_len);
3091 	req->SGLFlags =
3092 	    MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
3093 
3094 	mpr_dprint(sc, MPR_XINFO, "%s: sending SMP request to SAS address "
3095 	    "%#jx\n", __func__, (uintmax_t)sasaddr);
3096 
3097 	mpr_init_sge(cm, req, &req->SGL);
3098 
3099 	/*
3100 	 * Set up a uio to pass into mpr_map_command().  This allows us to
3101 	 * do one map command, and one busdma call in there.
3102 	 */
3103 	cm->cm_uio.uio_iov = cm->cm_iovec;
3104 	cm->cm_uio.uio_iovcnt = 2;
3105 	cm->cm_uio.uio_segflg = UIO_SYSSPACE;
3106 
3107 	/*
3108 	 * The read/write flag isn't used by busdma, but set it just in
3109 	 * case.  This isn't exactly accurate, either, since we're going in
3110 	 * both directions.
3111 	 */
3112 	cm->cm_uio.uio_rw = UIO_WRITE;
3113 
3114 	cm->cm_iovec[0].iov_base = request;
3115 	cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength);
3116 	cm->cm_iovec[1].iov_base = response;
3117 	cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
3118 
3119 	cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
3120 			       cm->cm_iovec[1].iov_len;
3121 
3122 	/*
3123 	 * Trigger a warning message in mpr_data_cb() for the user if we
3124 	 * wind up exceeding two S/G segments.  The chip expects one
3125 	 * segment for the request and another for the response.
3126 	 */
3127 	cm->cm_max_segs = 2;
3128 
3129 	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3130 	cm->cm_complete = mprsas_smpio_complete;
3131 	cm->cm_complete_data = ccb;
3132 
3133 	/*
3134 	 * Tell the mapping code that we're using a uio, and that this is
3135 	 * an SMP passthrough request.  There is a little special-case
3136 	 * logic there (in mpr_data_cb()) to handle the bidirectional
3137 	 * transfer.
3138 	 */
3139 	cm->cm_flags |= MPR_CM_FLAGS_USE_UIO | MPR_CM_FLAGS_SMP_PASS |
3140 			MPR_CM_FLAGS_DATAIN | MPR_CM_FLAGS_DATAOUT;
3141 
3142 	/* The chip data format is little endian. */
3143 	req->SASAddress.High = htole32(sasaddr >> 32);
3144 	req->SASAddress.Low = htole32(sasaddr);
3145 
3146 	/*
3147 	 * XXX Note that we don't have a timeout/abort mechanism here.
3148 	 * From the manual, it looks like task management requests only
3149 	 * work for SCSI IO and SATA passthrough requests.  We may need to
3150 	 * have a mechanism to retry requests in the event of a chip reset
3151 	 * at least.  Hopefully the chip will insure that any errors short
3152 	 * of that are relayed back to the driver.
3153 	 */
3154 	error = mpr_map_command(sc, cm);
3155 	if ((error != 0) && (error != EINPROGRESS)) {
3156 		mpr_dprint(sc, MPR_ERROR, "%s: error %d returned from "
3157 		    "mpr_map_command()\n", __func__, error);
3158 		goto bailout_error;
3159 	}
3160 
3161 	return;
3162 
3163 bailout_error:
3164 	mpr_free_command(sc, cm);
3165 	mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3166 	xpt_done(ccb);
3167 	return;
3168 }
3169 
3170 static void
3171 mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb)
3172 {
3173 	struct mpr_softc *sc;
3174 	struct mprsas_target *targ;
3175 	uint64_t sasaddr = 0;
3176 
3177 	sc = sassc->sc;
3178 
3179 	/*
3180 	 * Make sure the target exists.
3181 	 */
3182 	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
3183 	    ("Target %d out of bounds in XPT_SMP_IO\n", ccb->ccb_h.target_id));
3184 	targ = &sassc->targets[ccb->ccb_h.target_id];
3185 	if (targ->handle == 0x0) {
3186 		mpr_dprint(sc, MPR_ERROR, "%s: target %d does not exist!\n",
3187 		    __func__, ccb->ccb_h.target_id);
3188 		mprsas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
3189 		xpt_done(ccb);
3190 		return;
3191 	}
3192 
3193 	/*
3194 	 * If this device has an embedded SMP target, we'll talk to it
3195 	 * directly.
3196 	 * figure out what the expander's address is.
3197 	 */
3198 	if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
3199 		sasaddr = targ->sasaddr;
3200 
3201 	/*
3202 	 * If we don't have a SAS address for the expander yet, try
3203 	 * grabbing it from the page 0x83 information cached in the
3204 	 * transport layer for this target.  LSI expanders report the
3205 	 * expander SAS address as the port-associated SAS address in
3206 	 * Inquiry VPD page 0x83.  Maxim expanders don't report it in page
3207 	 * 0x83.
3208 	 *
3209 	 * XXX KDM disable this for now, but leave it commented out so that
3210 	 * it is obvious that this is another possible way to get the SAS
3211 	 * address.
3212 	 *
3213 	 * The parent handle method below is a little more reliable, and
3214 	 * the other benefit is that it works for devices other than SES
3215 	 * devices.  So you can send a SMP request to a da(4) device and it
3216 	 * will get routed to the expander that device is attached to.
3217 	 * (Assuming the da(4) device doesn't contain an SMP target...)
3218 	 */
3219 #if 0
3220 	if (sasaddr == 0)
3221 		sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
3222 #endif
3223 
3224 	/*
3225 	 * If we still don't have a SAS address for the expander, look for
3226 	 * the parent device of this device, which is probably the expander.
3227 	 */
3228 	if (sasaddr == 0) {
3229 #ifdef OLD_MPR_PROBE
3230 		struct mprsas_target *parent_target;
3231 #endif
3232 
3233 		if (targ->parent_handle == 0x0) {
3234 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have "
3235 			    "a valid parent handle!\n", __func__, targ->handle);
3236 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3237 			goto bailout;
3238 		}
3239 #ifdef OLD_MPR_PROBE
3240 		parent_target = mprsas_find_target_by_handle(sassc, 0,
3241 		    targ->parent_handle);
3242 
3243 		if (parent_target == NULL) {
3244 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have "
3245 			    "a valid parent target!\n", __func__, targ->handle);
3246 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3247 			goto bailout;
3248 		}
3249 
3250 		if ((parent_target->devinfo &
3251 		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3252 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d "
3253 			    "does not have an SMP target!\n", __func__,
3254 			    targ->handle, parent_target->handle);
3255 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3256 			goto bailout;
3257 		}
3258 
3259 		sasaddr = parent_target->sasaddr;
3260 #else /* OLD_MPR_PROBE */
3261 		if ((targ->parent_devinfo &
3262 		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3263 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d "
3264 			    "does not have an SMP target!\n", __func__,
3265 			    targ->handle, targ->parent_handle);
3266 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3267 			goto bailout;
3268 
3269 		}
3270 		if (targ->parent_sasaddr == 0x0) {
3271 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent handle "
3272 			    "%d does not have a valid SAS address!\n", __func__,
3273 			    targ->handle, targ->parent_handle);
3274 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3275 			goto bailout;
3276 		}
3277 
3278 		sasaddr = targ->parent_sasaddr;
3279 #endif /* OLD_MPR_PROBE */
3280 
3281 	}
3282 
3283 	if (sasaddr == 0) {
3284 		mpr_dprint(sc, MPR_INFO, "%s: unable to find SAS address for "
3285 		    "handle %d\n", __func__, targ->handle);
3286 		mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3287 		goto bailout;
3288 	}
3289 	mprsas_send_smpcmd(sassc, ccb, sasaddr);
3290 
3291 	return;
3292 
3293 bailout:
3294 	xpt_done(ccb);
3295 
3296 }
3297 #endif //__FreeBSD_version >= 900026
3298 
3299 static void
3300 mprsas_action_resetdev(struct mprsas_softc *sassc, union ccb *ccb)
3301 {
3302 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3303 	struct mpr_softc *sc;
3304 	struct mpr_command *tm;
3305 	struct mprsas_target *targ;
3306 
3307 	MPR_FUNCTRACE(sassc->sc);
3308 	mtx_assert(&sassc->sc->mpr_mtx, MA_OWNED);
3309 
3310 	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets, ("Target %d out of "
3311 	    "bounds in XPT_RESET_DEV\n", ccb->ccb_h.target_id));
3312 	sc = sassc->sc;
3313 	tm = mpr_alloc_command(sc);
3314 	if (tm == NULL) {
3315 		mpr_dprint(sc, MPR_ERROR, "command alloc failure in "
3316 		    "mprsas_action_resetdev\n");
3317 		mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3318 		xpt_done(ccb);
3319 		return;
3320 	}
3321 
3322 	targ = &sassc->targets[ccb->ccb_h.target_id];
3323 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3324 	req->DevHandle = htole16(targ->handle);
3325 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
3326 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3327 
3328 	/* SAS Hard Link Reset / SATA Link Reset */
3329 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3330 
3331 	tm->cm_data = NULL;
3332 	tm->cm_desc.HighPriority.RequestFlags =
3333 	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
3334 	tm->cm_complete = mprsas_resetdev_complete;
3335 	tm->cm_complete_data = ccb;
3336 
3337 	mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
3338 	    __func__, targ->tid);
3339 	tm->cm_targ = targ;
3340 	targ->flags |= MPRSAS_TARGET_INRESET;
3341 
3342 	mpr_map_command(sc, tm);
3343 }
3344 
3345 static void
3346 mprsas_resetdev_complete(struct mpr_softc *sc, struct mpr_command *tm)
3347 {
3348 	MPI2_SCSI_TASK_MANAGE_REPLY *resp;
3349 	union ccb *ccb;
3350 
3351 	MPR_FUNCTRACE(sc);
3352 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
3353 
3354 	resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
3355 	ccb = tm->cm_complete_data;
3356 
3357 	/*
3358 	 * Currently there should be no way we can hit this case.  It only
3359 	 * happens when we have a failure to allocate chain frames, and
3360 	 * task management commands don't have S/G lists.
3361 	 */
3362 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
3363 		MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3364 
3365 		req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3366 
3367 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for reset of "
3368 		    "handle %#04x! This should not happen!\n", __func__,
3369 		    tm->cm_flags, req->DevHandle);
3370 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3371 		goto bailout;
3372 	}
3373 
3374 	mpr_dprint(sc, MPR_XINFO, "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n",
3375 	    __func__, le16toh(resp->IOCStatus), le32toh(resp->ResponseCode));
3376 
3377 	if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
3378 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
3379 		mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
3380 		    CAM_LUN_WILDCARD);
3381 	}
3382 	else
3383 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3384 
3385 bailout:
3386 
3387 	mprsas_free_tm(sc, tm);
3388 	xpt_done(ccb);
3389 }
3390 
3391 static void
3392 mprsas_poll(struct cam_sim *sim)
3393 {
3394 	struct mprsas_softc *sassc;
3395 
3396 	sassc = cam_sim_softc(sim);
3397 
3398 	if (sassc->sc->mpr_debug & MPR_TRACE) {
3399 		/* frequent debug messages during a panic just slow
3400 		 * everything down too much.
3401 		 */
3402 		mpr_dprint(sassc->sc, MPR_XINFO, "%s clearing MPR_TRACE\n",
3403 		    __func__);
3404 		sassc->sc->mpr_debug &= ~MPR_TRACE;
3405 	}
3406 
3407 	mpr_intr_locked(sassc->sc);
3408 }
3409 
3410 static void
3411 mprsas_async(void *callback_arg, uint32_t code, struct cam_path *path,
3412     void *arg)
3413 {
3414 	struct mpr_softc *sc;
3415 
3416 	sc = (struct mpr_softc *)callback_arg;
3417 
3418 	switch (code) {
3419 #if (__FreeBSD_version >= 1000006) || \
3420     ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
3421 	case AC_ADVINFO_CHANGED: {
3422 		struct mprsas_target *target;
3423 		struct mprsas_softc *sassc;
3424 		struct scsi_read_capacity_data_long rcap_buf;
3425 		struct ccb_dev_advinfo cdai;
3426 		struct mprsas_lun *lun;
3427 		lun_id_t lunid;
3428 		int found_lun;
3429 		uintptr_t buftype;
3430 
3431 		buftype = (uintptr_t)arg;
3432 
3433 		found_lun = 0;
3434 		sassc = sc->sassc;
3435 
3436 		/*
3437 		 * We're only interested in read capacity data changes.
3438 		 */
3439 		if (buftype != CDAI_TYPE_RCAPLONG)
3440 			break;
3441 
3442 		/*
3443 		 * See the comment in mpr_attach_sas() for a detailed
3444 		 * explanation.  In these versions of FreeBSD we register
3445 		 * for all events and filter out the events that don't
3446 		 * apply to us.
3447 		 */
3448 #if (__FreeBSD_version < 1000703) || \
3449     ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
3450 		if (xpt_path_path_id(path) != sassc->sim->path_id)
3451 			break;
3452 #endif
3453 
3454 		/*
3455 		 * We should have a handle for this, but check to make sure.
3456 		 */
3457 		KASSERT(xpt_path_target_id(path) < sassc->maxtargets,
3458 		    ("Target %d out of bounds in mprsas_async\n",
3459 		    xpt_path_target_id(path)));
3460 		target = &sassc->targets[xpt_path_target_id(path)];
3461 		if (target->handle == 0)
3462 			break;
3463 
3464 		lunid = xpt_path_lun_id(path);
3465 
3466 		SLIST_FOREACH(lun, &target->luns, lun_link) {
3467 			if (lun->lun_id == lunid) {
3468 				found_lun = 1;
3469 				break;
3470 			}
3471 		}
3472 
3473 		if (found_lun == 0) {
3474 			lun = malloc(sizeof(struct mprsas_lun), M_MPR,
3475 			    M_NOWAIT | M_ZERO);
3476 			if (lun == NULL) {
3477 				mpr_dprint(sc, MPR_ERROR, "Unable to alloc "
3478 				    "LUN for EEDP support.\n");
3479 				break;
3480 			}
3481 			lun->lun_id = lunid;
3482 			SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3483 		}
3484 
3485 		bzero(&rcap_buf, sizeof(rcap_buf));
3486 		xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
3487 		cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
3488 		cdai.ccb_h.flags = CAM_DIR_IN;
3489 		cdai.buftype = CDAI_TYPE_RCAPLONG;
3490 #if (__FreeBSD_version >= 1100061) || \
3491     ((__FreeBSD_version >= 1001510) && (__FreeBSD_version < 1100000))
3492 		cdai.flags = CDAI_FLAG_NONE;
3493 #else
3494 		cdai.flags = 0;
3495 #endif
3496 		cdai.bufsiz = sizeof(rcap_buf);
3497 		cdai.buf = (uint8_t *)&rcap_buf;
3498 		xpt_action((union ccb *)&cdai);
3499 		if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
3500 			cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE);
3501 
3502 		if ((mprsas_get_ccbstatus((union ccb *)&cdai) == CAM_REQ_CMP)
3503 		    && (rcap_buf.prot & SRC16_PROT_EN)) {
3504 			lun->eedp_formatted = TRUE;
3505 			lun->eedp_block_size = scsi_4btoul(rcap_buf.length);
3506 		} else {
3507 			lun->eedp_formatted = FALSE;
3508 			lun->eedp_block_size = 0;
3509 		}
3510 		break;
3511 	}
3512 #endif
3513 	case AC_FOUND_DEVICE: {
3514 		struct ccb_getdev *cgd;
3515 
3516 		/*
3517 		 * See the comment in mpr_attach_sas() for a detailed
3518 		 * explanation.  In these versions of FreeBSD we register
3519 		 * for all events and filter out the events that don't
3520 		 * apply to us.
3521 		 */
3522 #if (__FreeBSD_version < 1000703) || \
3523     ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
3524 		if (xpt_path_path_id(path) != sc->sassc->sim->path_id)
3525 			break;
3526 #endif
3527 
3528 		cgd = arg;
3529 #if (__FreeBSD_version < 901503) || \
3530     ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3531 		mprsas_check_eedp(sc, path, cgd);
3532 #endif
3533 		break;
3534 	}
3535 	default:
3536 		break;
3537 	}
3538 }
3539 
3540 #if (__FreeBSD_version < 901503) || \
3541     ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3542 static void
3543 mprsas_check_eedp(struct mpr_softc *sc, struct cam_path *path,
3544     struct ccb_getdev *cgd)
3545 {
3546 	struct mprsas_softc *sassc = sc->sassc;
3547 	struct ccb_scsiio *csio;
3548 	struct scsi_read_capacity_16 *scsi_cmd;
3549 	struct scsi_read_capacity_eedp *rcap_buf;
3550 	path_id_t pathid;
3551 	target_id_t targetid;
3552 	lun_id_t lunid;
3553 	union ccb *ccb;
3554 	struct cam_path *local_path;
3555 	struct mprsas_target *target;
3556 	struct mprsas_lun *lun;
3557 	uint8_t	found_lun;
3558 	char path_str[64];
3559 
3560 	pathid = cam_sim_path(sassc->sim);
3561 	targetid = xpt_path_target_id(path);
3562 	lunid = xpt_path_lun_id(path);
3563 
3564 	KASSERT(targetid < sassc->maxtargets, ("Target %d out of bounds in "
3565 	    "mprsas_check_eedp\n", targetid));
3566 	target = &sassc->targets[targetid];
3567 	if (target->handle == 0x0)
3568 		return;
3569 
3570 	/*
3571 	 * Determine if the device is EEDP capable.
3572 	 *
3573 	 * If this flag is set in the inquiry data, the device supports
3574 	 * protection information, and must support the 16 byte read capacity
3575 	 * command, otherwise continue without sending read cap 16.
3576 	 */
3577 	if ((cgd->inq_data.spc3_flags & SPC3_SID_PROTECT) == 0)
3578 		return;
3579 
3580 	/*
3581 	 * Issue a READ CAPACITY 16 command.  This info is used to determine if
3582 	 * the LUN is formatted for EEDP support.
3583 	 */
3584 	ccb = xpt_alloc_ccb_nowait();
3585 	if (ccb == NULL) {
3586 		mpr_dprint(sc, MPR_ERROR, "Unable to alloc CCB for EEDP "
3587 		    "support.\n");
3588 		return;
3589 	}
3590 
3591 	if (xpt_create_path(&local_path, xpt_periph, pathid, targetid, lunid) !=
3592 	    CAM_REQ_CMP) {
3593 		mpr_dprint(sc, MPR_ERROR, "Unable to create path for EEDP "
3594 		    "support.\n");
3595 		xpt_free_ccb(ccb);
3596 		return;
3597 	}
3598 
3599 	/*
3600 	 * If LUN is already in list, don't create a new one.
3601 	 */
3602 	found_lun = FALSE;
3603 	SLIST_FOREACH(lun, &target->luns, lun_link) {
3604 		if (lun->lun_id == lunid) {
3605 			found_lun = TRUE;
3606 			break;
3607 		}
3608 	}
3609 	if (!found_lun) {
3610 		lun = malloc(sizeof(struct mprsas_lun), M_MPR,
3611 		    M_NOWAIT | M_ZERO);
3612 		if (lun == NULL) {
3613 			mpr_dprint(sc, MPR_ERROR, "Unable to alloc LUN for "
3614 			    "EEDP support.\n");
3615 			xpt_free_path(local_path);
3616 			xpt_free_ccb(ccb);
3617 			return;
3618 		}
3619 		lun->lun_id = lunid;
3620 		SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3621 	}
3622 
3623 	xpt_path_string(local_path, path_str, sizeof(path_str));
3624 	mpr_dprint(sc, MPR_INFO, "Sending read cap: path %s handle %d\n",
3625 	    path_str, target->handle);
3626 
3627 	/*
3628 	 * Issue a READ CAPACITY 16 command for the LUN.  The
3629 	 * mprsas_read_cap_done function will load the read cap info into the
3630 	 * LUN struct.
3631 	 */
3632 	rcap_buf = malloc(sizeof(struct scsi_read_capacity_eedp), M_MPR,
3633 	    M_NOWAIT | M_ZERO);
3634 	if (rcap_buf == NULL) {
3635 		mpr_dprint(sc, MPR_ERROR, "Unable to alloc read capacity "
3636 		    "buffer for EEDP support.\n");
3637 		xpt_free_path(ccb->ccb_h.path);
3638 		xpt_free_ccb(ccb);
3639 		return;
3640 	}
3641 	xpt_setup_ccb(&ccb->ccb_h, local_path, CAM_PRIORITY_XPT);
3642 	csio = &ccb->csio;
3643 	csio->ccb_h.func_code = XPT_SCSI_IO;
3644 	csio->ccb_h.flags = CAM_DIR_IN;
3645 	csio->ccb_h.retry_count = 4;
3646 	csio->ccb_h.cbfcnp = mprsas_read_cap_done;
3647 	csio->ccb_h.timeout = 60000;
3648 	csio->data_ptr = (uint8_t *)rcap_buf;
3649 	csio->dxfer_len = sizeof(struct scsi_read_capacity_eedp);
3650 	csio->sense_len = MPR_SENSE_LEN;
3651 	csio->cdb_len = sizeof(*scsi_cmd);
3652 	csio->tag_action = MSG_SIMPLE_Q_TAG;
3653 
3654 	scsi_cmd = (struct scsi_read_capacity_16 *)&csio->cdb_io.cdb_bytes;
3655 	bzero(scsi_cmd, sizeof(*scsi_cmd));
3656 	scsi_cmd->opcode = 0x9E;
3657 	scsi_cmd->service_action = SRC16_SERVICE_ACTION;
3658 	((uint8_t *)scsi_cmd)[13] = sizeof(struct scsi_read_capacity_eedp);
3659 
3660 	ccb->ccb_h.ppriv_ptr1 = sassc;
3661 	xpt_action(ccb);
3662 }
3663 
3664 static void
3665 mprsas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb)
3666 {
3667 	struct mprsas_softc *sassc;
3668 	struct mprsas_target *target;
3669 	struct mprsas_lun *lun;
3670 	struct scsi_read_capacity_eedp *rcap_buf;
3671 
3672 	if (done_ccb == NULL)
3673 		return;
3674 
3675 	/* Driver need to release devq, it Scsi command is
3676 	 * generated by driver internally.
3677 	 * Currently there is a single place where driver
3678 	 * calls scsi command internally. In future if driver
3679 	 * calls more scsi command internally, it needs to release
3680 	 * devq internally, since those command will not go back to
3681 	 * cam_periph.
3682 	 */
3683 	if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) ) {
3684         	done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
3685 		xpt_release_devq(done_ccb->ccb_h.path,
3686 			       	/*count*/ 1, /*run_queue*/TRUE);
3687 	}
3688 
3689 	rcap_buf = (struct scsi_read_capacity_eedp *)done_ccb->csio.data_ptr;
3690 
3691 	/*
3692 	 * Get the LUN ID for the path and look it up in the LUN list for the
3693 	 * target.
3694 	 */
3695 	sassc = (struct mprsas_softc *)done_ccb->ccb_h.ppriv_ptr1;
3696 	KASSERT(done_ccb->ccb_h.target_id < sassc->maxtargets, ("Target %d out "
3697 	    "of bounds in mprsas_read_cap_done\n", done_ccb->ccb_h.target_id));
3698 	target = &sassc->targets[done_ccb->ccb_h.target_id];
3699 	SLIST_FOREACH(lun, &target->luns, lun_link) {
3700 		if (lun->lun_id != done_ccb->ccb_h.target_lun)
3701 			continue;
3702 
3703 		/*
3704 		 * Got the LUN in the target's LUN list.  Fill it in with EEDP
3705 		 * info. If the READ CAP 16 command had some SCSI error (common
3706 		 * if command is not supported), mark the lun as not supporting
3707 		 * EEDP and set the block size to 0.
3708 		 */
3709 		if ((mprsas_get_ccbstatus(done_ccb) != CAM_REQ_CMP) ||
3710 		    (done_ccb->csio.scsi_status != SCSI_STATUS_OK)) {
3711 			lun->eedp_formatted = FALSE;
3712 			lun->eedp_block_size = 0;
3713 			break;
3714 		}
3715 
3716 		if (rcap_buf->protect & 0x01) {
3717 			mpr_dprint(sassc->sc, MPR_INFO, "LUN %d for target ID "
3718 			    "%d is formatted for EEDP support.\n",
3719 			    done_ccb->ccb_h.target_lun,
3720 			    done_ccb->ccb_h.target_id);
3721 			lun->eedp_formatted = TRUE;
3722 			lun->eedp_block_size = scsi_4btoul(rcap_buf->length);
3723 		}
3724 		break;
3725 	}
3726 
3727 	// Finished with this CCB and path.
3728 	free(rcap_buf, M_MPR);
3729 	xpt_free_path(done_ccb->ccb_h.path);
3730 	xpt_free_ccb(done_ccb);
3731 }
3732 #endif /* (__FreeBSD_version < 901503) || \
3733           ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) */
3734 
3735 void
3736 mprsas_prepare_for_tm(struct mpr_softc *sc, struct mpr_command *tm,
3737     struct mprsas_target *target, lun_id_t lun_id)
3738 {
3739 	union ccb *ccb;
3740 	path_id_t path_id;
3741 
3742 	/*
3743 	 * Set the INRESET flag for this target so that no I/O will be sent to
3744 	 * the target until the reset has completed.  If an I/O request does
3745 	 * happen, the devq will be frozen.  The CCB holds the path which is
3746 	 * used to release the devq.  The devq is released and the CCB is freed
3747 	 * when the TM completes.
3748 	 */
3749 	ccb = xpt_alloc_ccb_nowait();
3750 	if (ccb) {
3751 		path_id = cam_sim_path(sc->sassc->sim);
3752 		if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, path_id,
3753 		    target->tid, lun_id) != CAM_REQ_CMP) {
3754 			xpt_free_ccb(ccb);
3755 		} else {
3756 			tm->cm_ccb = ccb;
3757 			tm->cm_targ = target;
3758 			target->flags |= MPRSAS_TARGET_INRESET;
3759 		}
3760 	}
3761 }
3762 
3763 int
3764 mprsas_startup(struct mpr_softc *sc)
3765 {
3766 	/*
3767 	 * Send the port enable message and set the wait_for_port_enable flag.
3768 	 * This flag helps to keep the simq frozen until all discovery events
3769 	 * are processed.
3770 	 */
3771 	sc->wait_for_port_enable = 1;
3772 	mprsas_send_portenable(sc);
3773 	return (0);
3774 }
3775 
3776 static int
3777 mprsas_send_portenable(struct mpr_softc *sc)
3778 {
3779 	MPI2_PORT_ENABLE_REQUEST *request;
3780 	struct mpr_command *cm;
3781 
3782 	MPR_FUNCTRACE(sc);
3783 
3784 	if ((cm = mpr_alloc_command(sc)) == NULL)
3785 		return (EBUSY);
3786 	request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3787 	request->Function = MPI2_FUNCTION_PORT_ENABLE;
3788 	request->MsgFlags = 0;
3789 	request->VP_ID = 0;
3790 	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3791 	cm->cm_complete = mprsas_portenable_complete;
3792 	cm->cm_data = NULL;
3793 	cm->cm_sge = NULL;
3794 
3795 	mpr_map_command(sc, cm);
3796 	mpr_dprint(sc, MPR_XINFO,
3797 	    "mpr_send_portenable finished cm %p req %p complete %p\n",
3798 	    cm, cm->cm_req, cm->cm_complete);
3799 	return (0);
3800 }
3801 
3802 static void
3803 mprsas_portenable_complete(struct mpr_softc *sc, struct mpr_command *cm)
3804 {
3805 	MPI2_PORT_ENABLE_REPLY *reply;
3806 	struct mprsas_softc *sassc;
3807 
3808 	MPR_FUNCTRACE(sc);
3809 	sassc = sc->sassc;
3810 
3811 	/*
3812 	 * Currently there should be no way we can hit this case.  It only
3813 	 * happens when we have a failure to allocate chain frames, and
3814 	 * port enable commands don't have S/G lists.
3815 	 */
3816 	if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
3817 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for port enable! "
3818 		    "This should not happen!\n", __func__, cm->cm_flags);
3819 	}
3820 
3821 	reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3822 	if (reply == NULL)
3823 		mpr_dprint(sc, MPR_FAULT, "Portenable NULL reply\n");
3824 	else if (le16toh(reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
3825 	    MPI2_IOCSTATUS_SUCCESS)
3826 		mpr_dprint(sc, MPR_FAULT, "Portenable failed\n");
3827 
3828 	mpr_free_command(sc, cm);
3829 	/*
3830 	 * Done waiting for port enable to complete.  Decrement the refcount.
3831 	 * If refcount is 0, discovery is complete and a rescan of the bus can
3832 	 * take place.
3833 	 */
3834 	sc->wait_for_port_enable = 0;
3835 	sc->port_enable_complete = 1;
3836 	wakeup(&sc->port_enable_complete);
3837 	mprsas_startup_decrement(sassc);
3838 }
3839 
3840 int
3841 mprsas_check_id(struct mprsas_softc *sassc, int id)
3842 {
3843 	struct mpr_softc *sc = sassc->sc;
3844 	char *ids;
3845 	char *name;
3846 
3847 	ids = &sc->exclude_ids[0];
3848 	while((name = strsep(&ids, ",")) != NULL) {
3849 		if (name[0] == '\0')
3850 			continue;
3851 		if (strtol(name, NULL, 0) == (long)id)
3852 			return (1);
3853 	}
3854 
3855 	return (0);
3856 }
3857 
3858 void
3859 mprsas_realloc_targets(struct mpr_softc *sc, int maxtargets)
3860 {
3861 	struct mprsas_softc *sassc;
3862 	struct mprsas_lun *lun, *lun_tmp;
3863 	struct mprsas_target *targ;
3864 	int i;
3865 
3866 	sassc = sc->sassc;
3867 	/*
3868 	 * The number of targets is based on IOC Facts, so free all of
3869 	 * the allocated LUNs for each target and then the target buffer
3870 	 * itself.
3871 	 */
3872 	for (i=0; i< maxtargets; i++) {
3873 		targ = &sassc->targets[i];
3874 		SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
3875 			free(lun, M_MPR);
3876 		}
3877 	}
3878 	free(sassc->targets, M_MPR);
3879 
3880 	sassc->targets = malloc(sizeof(struct mprsas_target) * maxtargets,
3881 	    M_MPR, M_WAITOK|M_ZERO);
3882 	if (!sassc->targets) {
3883 		panic("%s failed to alloc targets with error %d\n",
3884 		    __func__, ENOMEM);
3885 	}
3886 }
3887