xref: /freebsd/sys/dev/mps/mps_sas.c (revision f11c7f63056671247335df83a3fe80b94c6616ac)
1 /*-
2  * Copyright (c) 2009 Yahoo! Inc.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 /*-
27  * Copyright (c) 2011 LSI Corp.
28  * All rights reserved.
29  *
30  * Redistribution and use in source and binary forms, with or without
31  * modification, are permitted provided that the following conditions
32  * are met:
33  * 1. Redistributions of source code must retain the above copyright
34  *    notice, this list of conditions and the following disclaimer.
35  * 2. Redistributions in binary form must reproduce the above copyright
36  *    notice, this list of conditions and the following disclaimer in the
37  *    documentation and/or other materials provided with the distribution.
38  *
39  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
40  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
41  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
42  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
43  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
44  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
45  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
46  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
47  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
48  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
49  * SUCH DAMAGE.
50  *
51  * LSI MPT-Fusion Host Adapter FreeBSD
52  *
53  * $FreeBSD$
54  */
55 
56 #include <sys/cdefs.h>
57 __FBSDID("$FreeBSD$");
58 
59 /* Communications core for LSI MPT2 */
60 
61 /* TODO Move headers to mpsvar */
62 #include <sys/types.h>
63 #include <sys/param.h>
64 #include <sys/systm.h>
65 #include <sys/kernel.h>
66 #include <sys/selinfo.h>
67 #include <sys/module.h>
68 #include <sys/bus.h>
69 #include <sys/conf.h>
70 #include <sys/bio.h>
71 #include <sys/malloc.h>
72 #include <sys/uio.h>
73 #include <sys/sysctl.h>
74 #include <sys/endian.h>
75 #include <sys/queue.h>
76 #include <sys/kthread.h>
77 #include <sys/taskqueue.h>
78 #include <sys/sbuf.h>
79 
80 #include <machine/bus.h>
81 #include <machine/resource.h>
82 #include <sys/rman.h>
83 
84 #include <machine/stdarg.h>
85 
86 #include <cam/cam.h>
87 #include <cam/cam_ccb.h>
88 #include <cam/cam_xpt.h>
89 #include <cam/cam_debug.h>
90 #include <cam/cam_sim.h>
91 #include <cam/cam_xpt_sim.h>
92 #include <cam/cam_xpt_periph.h>
93 #include <cam/cam_periph.h>
94 #include <cam/scsi/scsi_all.h>
95 #include <cam/scsi/scsi_message.h>
96 #if __FreeBSD_version >= 900026
97 #include <cam/scsi/smp_all.h>
98 #endif
99 
100 #include <dev/mps/mpi/mpi2_type.h>
101 #include <dev/mps/mpi/mpi2.h>
102 #include <dev/mps/mpi/mpi2_ioc.h>
103 #include <dev/mps/mpi/mpi2_sas.h>
104 #include <dev/mps/mpi/mpi2_cnfg.h>
105 #include <dev/mps/mpi/mpi2_init.h>
106 #include <dev/mps/mpi/mpi2_tool.h>
107 #include <dev/mps/mps_ioctl.h>
108 #include <dev/mps/mpsvar.h>
109 #include <dev/mps/mps_table.h>
110 #include <dev/mps/mps_sas.h>
111 
112 #define MPSSAS_DISCOVERY_TIMEOUT	20
113 #define MPSSAS_MAX_DISCOVERY_TIMEOUTS	10 /* 200 seconds */
114 
115 /*
116  * static array to check SCSI OpCode for EEDP protection bits
117  */
118 #define	PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
119 #define	PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
120 #define	PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
121 static uint8_t op_code_prot[256] = {
122 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
123 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
124 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
125 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
126 	0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
127 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
128 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
129 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
130 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
131 	0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
132 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
133 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
134 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
135 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
136 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
137 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
138 };
139 
140 MALLOC_DEFINE(M_MPSSAS, "MPSSAS", "MPS SAS memory");
141 
142 static struct mpssas_target * mpssas_find_target_by_handle(struct mpssas_softc *, int, uint16_t);
143 static void mpssas_discovery_timeout(void *data);
144 static void mpssas_remove_device(struct mps_softc *, struct mps_command *);
145 static void mpssas_remove_complete(struct mps_softc *, struct mps_command *);
146 static void mpssas_action(struct cam_sim *sim, union ccb *ccb);
147 static void mpssas_poll(struct cam_sim *sim);
148 static void mpssas_scsiio_timeout(void *data);
149 static void mpssas_abort_complete(struct mps_softc *sc, struct mps_command *cm);
150 static void mpssas_direct_drive_io(struct mpssas_softc *sassc,
151     struct mps_command *cm, union ccb *ccb);
152 static void mpssas_action_scsiio(struct mpssas_softc *, union ccb *);
153 static void mpssas_scsiio_complete(struct mps_softc *, struct mps_command *);
154 static void mpssas_action_resetdev(struct mpssas_softc *, union ccb *);
155 #if __FreeBSD_version >= 900026
156 static void mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm);
157 static void mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb,
158 			       uint64_t sasaddr);
159 static void mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb);
160 #endif //FreeBSD_version >= 900026
161 static void mpssas_resetdev_complete(struct mps_softc *, struct mps_command *);
162 static int  mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm);
163 static int  mpssas_send_reset(struct mps_softc *sc, struct mps_command *tm, uint8_t type);
164 static void mpssas_rescan(struct mpssas_softc *sassc, union ccb *ccb);
165 static void mpssas_rescan_done(struct cam_periph *periph, union ccb *done_ccb);
166 static void mpssas_scanner_thread(void *arg);
167 #if __FreeBSD_version >= 1000006
168 static void mpssas_async(void *callback_arg, uint32_t code,
169 			 struct cam_path *path, void *arg);
170 #else
171 static void mpssas_check_eedp(struct mpssas_softc *sassc);
172 static void mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb);
173 #endif
174 static int mpssas_send_portenable(struct mps_softc *sc);
175 static void mpssas_portenable_complete(struct mps_softc *sc,
176     struct mps_command *cm);
177 
178 static struct mpssas_target *
179 mpssas_find_target_by_handle(struct mpssas_softc *sassc, int start, uint16_t handle)
180 {
181 	struct mpssas_target *target;
182 	int i;
183 
184 	for (i = start; i < sassc->sc->facts->MaxTargets; i++) {
185 		target = &sassc->targets[i];
186 		if (target->handle == handle)
187 			return (target);
188 	}
189 
190 	return (NULL);
191 }
192 
193 /* we need to freeze the simq during attach and diag reset, to avoid failing
194  * commands before device handles have been found by discovery.  Since
195  * discovery involves reading config pages and possibly sending commands,
196  * discovery actions may continue even after we receive the end of discovery
197  * event, so refcount discovery actions instead of assuming we can unfreeze
198  * the simq when we get the event.
199  */
200 void
201 mpssas_startup_increment(struct mpssas_softc *sassc)
202 {
203 	if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
204 		if (sassc->startup_refcount++ == 0) {
205 			/* just starting, freeze the simq */
206 			mps_dprint(sassc->sc, MPS_INFO,
207 			    "%s freezing simq\n", __func__);
208 			xpt_freeze_simq(sassc->sim, 1);
209 		}
210 		mps_dprint(sassc->sc, MPS_TRACE, "%s refcount %u\n", __func__,
211 		    sassc->startup_refcount);
212 	}
213 }
214 
215 void
216 mpssas_startup_decrement(struct mpssas_softc *sassc)
217 {
218 	if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
219 		if (--sassc->startup_refcount == 0) {
220 			/* finished all discovery-related actions, release
221 			 * the simq and rescan for the latest topology.
222 			 */
223 			mps_dprint(sassc->sc, MPS_INFO,
224 			    "%s releasing simq\n", __func__);
225 			sassc->flags &= ~MPSSAS_IN_STARTUP;
226 			xpt_release_simq(sassc->sim, 1);
227 			mpssas_rescan_target(sassc->sc, NULL);
228 		}
229 		mps_dprint(sassc->sc, MPS_TRACE, "%s refcount %u\n", __func__,
230 		    sassc->startup_refcount);
231 	}
232 }
233 
234 /* LSI's firmware requires us to stop sending commands when we're doing task
235  * management, so refcount the TMs and keep the simq frozen when any are in
236  * use.
237  */
238 struct mps_command *
239 mpssas_alloc_tm(struct mps_softc *sc)
240 {
241 	struct mps_command *tm;
242 
243 	tm = mps_alloc_high_priority_command(sc);
244 	if (tm != NULL) {
245 		if (sc->sassc->tm_count++ == 0) {
246 			mps_printf(sc, "%s freezing simq\n", __func__);
247 			xpt_freeze_simq(sc->sassc->sim, 1);
248 		}
249 		mps_dprint(sc, MPS_TRACE, "%s tm_count %u\n", __func__,
250 		    sc->sassc->tm_count);
251 	}
252 	return tm;
253 }
254 
255 void
256 mpssas_free_tm(struct mps_softc *sc, struct mps_command *tm)
257 {
258 	if (tm == NULL)
259 		return;
260 
261 	/* if there are no TMs in use, we can release the simq.  We use our
262 	 * own refcount so that it's easier for a diag reset to cleanup and
263 	 * release the simq.
264 	 */
265 	if (--sc->sassc->tm_count == 0) {
266 		mps_printf(sc, "%s releasing simq\n", __func__);
267 		xpt_release_simq(sc->sassc->sim, 1);
268 	}
269 	mps_dprint(sc, MPS_TRACE, "%s tm_count %u\n", __func__,
270 	    sc->sassc->tm_count);
271 
272 	mps_free_high_priority_command(sc, tm);
273 }
274 
275 
276 void
277 mpssas_rescan_target(struct mps_softc *sc, struct mpssas_target *targ)
278 {
279 	struct mpssas_softc *sassc = sc->sassc;
280 	path_id_t pathid;
281 	target_id_t targetid;
282 	union ccb *ccb;
283 
284 	pathid = cam_sim_path(sassc->sim);
285 	if (targ == NULL)
286 		targetid = CAM_TARGET_WILDCARD;
287 	else
288 		targetid = targ - sassc->targets;
289 
290 	/*
291 	 * Allocate a CCB and schedule a rescan.
292 	 */
293 	ccb = xpt_alloc_ccb_nowait();
294 	if (ccb == NULL) {
295 		mps_dprint(sc, MPS_FAULT, "unable to alloc CCB for rescan\n");
296 		return;
297 	}
298 
299 	if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, pathid,
300 		            targetid, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
301 		mps_dprint(sc, MPS_FAULT, "unable to create path for rescan\n");
302 		xpt_free_ccb(ccb);
303 		return;
304 	}
305 
306 	/* XXX Hardwired to scan the bus for now */
307 	ccb->ccb_h.func_code = XPT_SCAN_BUS;
308 	mps_dprint(sc, MPS_TRACE, "%s targetid %u\n", __func__, targetid);
309 	mpssas_rescan(sassc, ccb);
310 }
311 
312 static void
313 mpssas_log_command(struct mps_command *cm, const char *fmt, ...)
314 {
315 	struct sbuf sb;
316 	va_list ap;
317 	char str[192];
318 	char path_str[64];
319 
320 	if (cm == NULL)
321 		return;
322 
323 	sbuf_new(&sb, str, sizeof(str), 0);
324 
325 	va_start(ap, fmt);
326 
327 	if (cm->cm_ccb != NULL) {
328 		xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
329 				sizeof(path_str));
330 		sbuf_cat(&sb, path_str);
331 		if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
332 			scsi_command_string(&cm->cm_ccb->csio, &sb);
333 			sbuf_printf(&sb, "length %d ",
334 				    cm->cm_ccb->csio.dxfer_len);
335 		}
336 	}
337 	else {
338 		sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
339 		    cam_sim_name(cm->cm_sc->sassc->sim),
340 		    cam_sim_unit(cm->cm_sc->sassc->sim),
341 		    cam_sim_bus(cm->cm_sc->sassc->sim),
342 		    cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
343 		    cm->cm_lun);
344 	}
345 
346 	sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
347 	sbuf_vprintf(&sb, fmt, ap);
348 	sbuf_finish(&sb);
349 	printf("%s", sbuf_data(&sb));
350 
351 	va_end(ap);
352 }
353 
354 static void
355 mpssas_lost_target(struct mps_softc *sc, struct mpssas_target *targ)
356 {
357 	struct mpssas_softc *sassc = sc->sassc;
358 	path_id_t pathid = cam_sim_path(sassc->sim);
359 	struct cam_path *path;
360 
361 	mps_printf(sc, "%s targetid %u\n", __func__, targ->tid);
362 	if (xpt_create_path(&path, NULL, pathid, targ->tid, 0) != CAM_REQ_CMP) {
363 		mps_printf(sc, "unable to create path for lost target %d\n",
364 		    targ->tid);
365 		return;
366 	}
367 
368 	xpt_async(AC_LOST_DEVICE, path, NULL);
369 	xpt_free_path(path);
370 }
371 
372 /*
373  * The MPT2 firmware performs debounce on the link to avoid transient link
374  * errors and false removals.  When it does decide that link has been lost
375  * and a device need to go away, it expects that the host will perform a
376  * target reset and then an op remove.  The reset has the side-effect of
377  * aborting any outstanding requests for the device, which is required for
378  * the op-remove to succeed.  It's not clear if the host should check for
379  * the device coming back alive after the reset.
380  */
381 void
382 mpssas_prepare_remove(struct mpssas_softc *sassc, uint16_t handle)
383 {
384 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
385 	struct mps_softc *sc;
386 	struct mps_command *cm;
387 	struct mpssas_target *targ = NULL;
388 
389 	mps_dprint(sassc->sc, MPS_TRACE, "%s\n", __func__);
390 
391 	/*
392 	 * If this is a WD controller, determine if the disk should be exposed
393 	 * to the OS or not.  If disk should be exposed, return from this
394 	 * function without doing anything.
395 	 */
396 	sc = sassc->sc;
397 	if ((sc->mps_flags & MPS_FLAGS_WD_AVAILABLE) && (sc->WD_hide_expose ==
398 	    MPS_WD_EXPOSE_ALWAYS)) {
399 		return;
400 	}
401 
402 	targ = mpssas_find_target_by_handle(sassc, 0, handle);
403 	if (targ == NULL) {
404 		/* FIXME: what is the action? */
405 		/* We don't know about this device? */
406 		printf("%s: invalid handle 0x%x \n", __func__, handle);
407 		return;
408 	}
409 
410 	targ->flags |= MPSSAS_TARGET_INREMOVAL;
411 
412 	cm = mpssas_alloc_tm(sc);
413 	if (cm == NULL) {
414 		mps_printf(sc, "%s: command alloc failure\n", __func__);
415 		return;
416 	}
417 
418 	mpssas_lost_target(sc, targ);
419 
420 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
421 	memset(req, 0, sizeof(*req));
422 	req->DevHandle = targ->handle;
423 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
424 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
425 
426 	/* SAS Hard Link Reset / SATA Link Reset */
427 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
428 
429 	cm->cm_targ = targ;
430 	cm->cm_data = NULL;
431 	cm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
432 	cm->cm_complete = mpssas_remove_device;
433 	cm->cm_complete_data = (void *)(uintptr_t)handle;
434 	mps_map_command(sc, cm);
435 }
436 
437 static void
438 mpssas_remove_device(struct mps_softc *sc, struct mps_command *tm)
439 {
440 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
441 	MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
442 	struct mpssas_target *targ;
443 	struct mps_command *next_cm;
444 	uint16_t handle;
445 
446 	mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
447 
448 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
449 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
450 	targ = tm->cm_targ;
451 
452 	/*
453 	 * Currently there should be no way we can hit this case.  It only
454 	 * happens when we have a failure to allocate chain frames, and
455 	 * task management commands don't have S/G lists.
456 	 */
457 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
458 		mps_printf(sc, "%s: cm_flags = %#x for remove of handle %#04x! "
459 			   "This should not happen!\n", __func__, tm->cm_flags,
460 			   handle);
461 		mpssas_free_tm(sc, tm);
462 		return;
463 	}
464 
465 	if (reply == NULL) {
466 		/* XXX retry the remove after the diag reset completes? */
467 		mps_printf(sc, "%s NULL reply reseting device 0x%04x\n",
468 		    __func__, handle);
469 		mpssas_free_tm(sc, tm);
470 		return;
471 	}
472 
473 	if (reply->IOCStatus != MPI2_IOCSTATUS_SUCCESS) {
474 		mps_printf(sc, "IOCStatus = 0x%x while resetting device 0x%x\n",
475 		   reply->IOCStatus, handle);
476 		mpssas_free_tm(sc, tm);
477 		return;
478 	}
479 
480 	mps_dprint(sc, MPS_INFO, "Reset aborted %u commands\n",
481 	    reply->TerminationCount);
482 	mps_free_reply(sc, tm->cm_reply_data);
483 	tm->cm_reply = NULL;	/* Ensures the the reply won't get re-freed */
484 
485 	/* Reuse the existing command */
486 	req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
487 	memset(req, 0, sizeof(*req));
488 	req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
489 	req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
490 	req->DevHandle = handle;
491 	tm->cm_data = NULL;
492 	tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
493 	tm->cm_complete = mpssas_remove_complete;
494 	tm->cm_complete_data = (void *)(uintptr_t)handle;
495 
496 	mps_map_command(sc, tm);
497 
498 	mps_dprint(sc, MPS_INFO, "clearing target %u handle 0x%04x\n",
499 		   targ->tid, handle);
500 	TAILQ_FOREACH_SAFE(tm, &targ->commands, cm_link, next_cm) {
501 		union ccb *ccb;
502 
503 		mps_dprint(sc, MPS_INFO, "Completing missed command %p\n", tm);
504 		ccb = tm->cm_complete_data;
505 		ccb->ccb_h.status = CAM_DEV_NOT_THERE;
506 		mpssas_scsiio_complete(sc, tm);
507 	}
508 }
509 
510 static void
511 mpssas_remove_complete(struct mps_softc *sc, struct mps_command *tm)
512 {
513 	MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
514 	uint16_t handle;
515 	struct mpssas_target *targ;
516 
517 	mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
518 
519 	reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
520 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
521 
522 	/*
523 	 * Currently there should be no way we can hit this case.  It only
524 	 * happens when we have a failure to allocate chain frames, and
525 	 * task management commands don't have S/G lists.
526 	 */
527 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
528 		mps_printf(sc, "%s: cm_flags = %#x for remove of handle %#04x! "
529 			   "This should not happen!\n", __func__, tm->cm_flags,
530 			   handle);
531 		mpssas_free_tm(sc, tm);
532 		return;
533 	}
534 
535 	if (reply == NULL) {
536 		/* most likely a chip reset */
537 		mps_printf(sc, "%s NULL reply removing device 0x%04x\n",
538 		    __func__, handle);
539 		mpssas_free_tm(sc, tm);
540 		return;
541 	}
542 
543 	mps_printf(sc, "%s on handle 0x%04x, IOCStatus= 0x%x\n", __func__,
544 	    handle, reply->IOCStatus);
545 
546 	/*
547 	 * Don't clear target if remove fails because things will get confusing.
548 	 * Leave the devname and sasaddr intact so that we know to avoid reusing
549 	 * this target id if possible, and so we can assign the same target id
550 	 * to this device if it comes back in the future.
551 	 */
552 	if (reply->IOCStatus == MPI2_IOCSTATUS_SUCCESS) {
553 		targ = tm->cm_targ;
554 		targ->handle = 0x0;
555 		targ->encl_handle = 0x0;
556 		targ->encl_slot = 0x0;
557 		targ->exp_dev_handle = 0x0;
558 		targ->phy_num = 0x0;
559 		targ->linkrate = 0x0;
560 		targ->devinfo = 0x0;
561 	}
562 
563 	mpssas_free_tm(sc, tm);
564 }
565 
566 static int
567 mpssas_register_events(struct mps_softc *sc)
568 {
569 	uint8_t events[16];
570 
571 	bzero(events, 16);
572 	setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
573 	setbit(events, MPI2_EVENT_SAS_DISCOVERY);
574 	setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
575 	setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
576 	setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
577 	setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
578 	setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
579 	setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
580 	setbit(events, MPI2_EVENT_IR_VOLUME);
581 	setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
582 	setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
583 	setbit(events, MPI2_EVENT_LOG_ENTRY_ADDED);
584 
585 	mps_register_events(sc, events, mpssas_evt_handler, NULL,
586 	    &sc->sassc->mpssas_eh);
587 
588 	return (0);
589 }
590 
591 int
592 mps_attach_sas(struct mps_softc *sc)
593 {
594 	struct mpssas_softc *sassc;
595 #if __FreeBSD_version >= 1000006
596 	cam_status status;
597 #endif
598 	int unit, error = 0;
599 
600 	mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
601 
602 	sassc = malloc(sizeof(struct mpssas_softc), M_MPT2, M_WAITOK|M_ZERO);
603 	sassc->targets = malloc(sizeof(struct mpssas_target) *
604 	    sc->facts->MaxTargets, M_MPT2, M_WAITOK|M_ZERO);
605 	sc->sassc = sassc;
606 	sassc->sc = sc;
607 
608 	if ((sassc->devq = cam_simq_alloc(sc->num_reqs)) == NULL) {
609 		mps_dprint(sc, MPS_FAULT, "Cannot allocate SIMQ\n");
610 		error = ENOMEM;
611 		goto out;
612 	}
613 
614 	unit = device_get_unit(sc->mps_dev);
615 	sassc->sim = cam_sim_alloc(mpssas_action, mpssas_poll, "mps", sassc,
616 	    unit, &sc->mps_mtx, sc->num_reqs, sc->num_reqs, sassc->devq);
617 	if (sassc->sim == NULL) {
618 		mps_dprint(sc, MPS_FAULT, "Cannot allocate SIM\n");
619 		error = EINVAL;
620 		goto out;
621 	}
622 
623 	TAILQ_INIT(&sassc->ev_queue);
624 
625 	/* Initialize taskqueue for Event Handling */
626 	TASK_INIT(&sassc->ev_task, 0, mpssas_firmware_event_work, sc);
627 	sassc->ev_tq = taskqueue_create("mps_taskq", M_NOWAIT | M_ZERO,
628 	    taskqueue_thread_enqueue, &sassc->ev_tq);
629 
630 	/* Run the task queue with lowest priority */
631 	taskqueue_start_threads(&sassc->ev_tq, 1, 255, "%s taskq",
632 	    device_get_nameunit(sc->mps_dev));
633 
634 	TAILQ_INIT(&sassc->ccb_scanq);
635 	error = mps_kproc_create(mpssas_scanner_thread, sassc,
636 	    &sassc->rescan_thread, 0, 0, "mps_scan%d", unit);
637 	if (error) {
638 		mps_printf(sc, "Error %d starting rescan thread\n", error);
639 		goto out;
640 	}
641 
642 	mps_lock(sc);
643 	sassc->flags |= MPSSAS_SCANTHREAD;
644 
645 	/*
646 	 * XXX There should be a bus for every port on the adapter, but since
647 	 * we're just going to fake the topology for now, we'll pretend that
648 	 * everything is just a target on a single bus.
649 	 */
650 	if ((error = xpt_bus_register(sassc->sim, sc->mps_dev, 0)) != 0) {
651 		mps_dprint(sc, MPS_FAULT, "Error %d registering SCSI bus\n",
652 		    error);
653 		mps_unlock(sc);
654 		goto out;
655 	}
656 
657 	/*
658 	 * Assume that discovery events will start right away.  Freezing
659 	 * the simq will prevent the CAM boottime scanner from running
660 	 * before discovery is complete.
661 	 */
662 	sassc->flags |= MPSSAS_IN_STARTUP | MPSSAS_IN_DISCOVERY;
663 	xpt_freeze_simq(sassc->sim, 1);
664 	sc->sassc->startup_refcount = 0;
665 
666 	callout_init(&sassc->discovery_callout, 1 /*mpsafe*/);
667 	sassc->discovery_timeouts = 0;
668 
669 	sassc->tm_count = 0;
670 
671 #if __FreeBSD_version >= 1000006
672 	status = xpt_register_async(AC_ADVINFO_CHANGED, mpssas_async, sc, NULL);
673 	if (status != CAM_REQ_CMP) {
674 		mps_printf(sc, "Error %#x registering async handler for "
675 			   "AC_ADVINFO_CHANGED events\n", status);
676 	}
677 #endif
678 
679 	mps_unlock(sc);
680 
681 	mpssas_register_events(sc);
682 out:
683 	if (error)
684 		mps_detach_sas(sc);
685 	return (error);
686 }
687 
688 int
689 mps_detach_sas(struct mps_softc *sc)
690 {
691 	struct mpssas_softc *sassc;
692 
693 	mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
694 
695 	if (sc->sassc == NULL)
696 		return (0);
697 
698 	sassc = sc->sassc;
699 	mps_deregister_events(sc, sassc->mpssas_eh);
700 
701 	/*
702 	 * Drain and free the event handling taskqueue with the lock
703 	 * unheld so that any parallel processing tasks drain properly
704 	 * without deadlocking.
705 	 */
706 	if (sassc->ev_tq != NULL)
707 		taskqueue_free(sassc->ev_tq);
708 
709 	/* Make sure CAM doesn't wedge if we had to bail out early. */
710 	mps_lock(sc);
711 
712 	/* Deregister our async handler */
713 #if __FreeBSD_version >= 1000006
714 	xpt_register_async(0, mpssas_async, sc, NULL);
715 #endif
716 
717 	if (sassc->flags & MPSSAS_IN_STARTUP)
718 		xpt_release_simq(sassc->sim, 1);
719 
720 	if (sassc->sim != NULL) {
721 		xpt_bus_deregister(cam_sim_path(sassc->sim));
722 		cam_sim_free(sassc->sim, FALSE);
723 	}
724 
725 	if (sassc->flags & MPSSAS_SCANTHREAD) {
726 		sassc->flags |= MPSSAS_SHUTDOWN;
727 		wakeup(&sassc->ccb_scanq);
728 
729 		if (sassc->flags & MPSSAS_SCANTHREAD) {
730 			msleep(&sassc->flags, &sc->mps_mtx, PRIBIO,
731 			       "mps_shutdown", 30 * hz);
732 		}
733 	}
734 	mps_unlock(sc);
735 
736 	if (sassc->devq != NULL)
737 		cam_simq_free(sassc->devq);
738 
739 	free(sassc->targets, M_MPT2);
740 	free(sassc, M_MPT2);
741 	sc->sassc = NULL;
742 
743 	return (0);
744 }
745 
746 void
747 mpssas_discovery_end(struct mpssas_softc *sassc)
748 {
749 	struct mps_softc *sc = sassc->sc;
750 
751 	mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
752 
753 	if (sassc->flags & MPSSAS_DISCOVERY_TIMEOUT_PENDING)
754 		callout_stop(&sassc->discovery_callout);
755 
756 }
757 
758 static void
759 mpssas_discovery_timeout(void *data)
760 {
761 	struct mpssas_softc *sassc = data;
762 	struct mps_softc *sc;
763 
764 	sc = sassc->sc;
765 	mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
766 
767 	mps_lock(sc);
768 	mps_printf(sc,
769 	    "Timeout waiting for discovery, interrupts may not be working!\n");
770 	sassc->flags &= ~MPSSAS_DISCOVERY_TIMEOUT_PENDING;
771 
772 	/* Poll the hardware for events in case interrupts aren't working */
773 	mps_intr_locked(sc);
774 
775 	mps_printf(sassc->sc,
776 	    "Finished polling after discovery timeout at %d\n", ticks);
777 
778 	if ((sassc->flags & MPSSAS_IN_DISCOVERY) == 0) {
779 		mpssas_discovery_end(sassc);
780 	} else {
781 		if (sassc->discovery_timeouts < MPSSAS_MAX_DISCOVERY_TIMEOUTS) {
782 			sassc->flags |= MPSSAS_DISCOVERY_TIMEOUT_PENDING;
783 			callout_reset(&sassc->discovery_callout,
784 			    MPSSAS_DISCOVERY_TIMEOUT * hz,
785 			    mpssas_discovery_timeout, sassc);
786 			sassc->discovery_timeouts++;
787 		} else {
788 			mps_dprint(sassc->sc, MPS_FAULT,
789 			    "Discovery timed out, continuing.\n");
790 			sassc->flags &= ~MPSSAS_IN_DISCOVERY;
791 			mpssas_discovery_end(sassc);
792 		}
793 	}
794 
795 	mps_unlock(sc);
796 }
797 
798 static void
799 mpssas_action(struct cam_sim *sim, union ccb *ccb)
800 {
801 	struct mpssas_softc *sassc;
802 
803 	sassc = cam_sim_softc(sim);
804 
805 	mps_dprint(sassc->sc, MPS_TRACE, "%s func 0x%x\n", __func__,
806 	    ccb->ccb_h.func_code);
807 	mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
808 
809 	switch (ccb->ccb_h.func_code) {
810 	case XPT_PATH_INQ:
811 	{
812 		struct ccb_pathinq *cpi = &ccb->cpi;
813 
814 		cpi->version_num = 1;
815 		cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
816 		cpi->target_sprt = 0;
817 		cpi->hba_misc = PIM_NOBUSRESET;
818 		cpi->hba_eng_cnt = 0;
819 		cpi->max_target = sassc->sc->facts->MaxTargets - 1;
820 		cpi->max_lun = 0;
821 		cpi->initiator_id = 255;
822 		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
823 		strncpy(cpi->hba_vid, "LSILogic", HBA_IDLEN);
824 		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
825 		cpi->unit_number = cam_sim_unit(sim);
826 		cpi->bus_id = cam_sim_bus(sim);
827 		cpi->base_transfer_speed = 150000;
828 		cpi->transport = XPORT_SAS;
829 		cpi->transport_version = 0;
830 		cpi->protocol = PROTO_SCSI;
831 		cpi->protocol_version = SCSI_REV_SPC;
832 #if __FreeBSD_version >= 800001
833 		/*
834 		 * XXX KDM where does this number come from?
835 		 */
836 		cpi->maxio = 256 * 1024;
837 #endif
838 		cpi->ccb_h.status = CAM_REQ_CMP;
839 		break;
840 	}
841 	case XPT_GET_TRAN_SETTINGS:
842 	{
843 		struct ccb_trans_settings	*cts;
844 		struct ccb_trans_settings_sas	*sas;
845 		struct ccb_trans_settings_scsi	*scsi;
846 		struct mpssas_target *targ;
847 
848 		cts = &ccb->cts;
849 		sas = &cts->xport_specific.sas;
850 		scsi = &cts->proto_specific.scsi;
851 
852 		targ = &sassc->targets[cts->ccb_h.target_id];
853 		if (targ->handle == 0x0) {
854 			cts->ccb_h.status = CAM_TID_INVALID;
855 			break;
856 		}
857 
858 		cts->protocol_version = SCSI_REV_SPC2;
859 		cts->transport = XPORT_SAS;
860 		cts->transport_version = 0;
861 
862 		sas->valid = CTS_SAS_VALID_SPEED;
863 		switch (targ->linkrate) {
864 		case 0x08:
865 			sas->bitrate = 150000;
866 			break;
867 		case 0x09:
868 			sas->bitrate = 300000;
869 			break;
870 		case 0x0a:
871 			sas->bitrate = 600000;
872 			break;
873 		default:
874 			sas->valid = 0;
875 		}
876 
877 		cts->protocol = PROTO_SCSI;
878 		scsi->valid = CTS_SCSI_VALID_TQ;
879 		scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
880 
881 		cts->ccb_h.status = CAM_REQ_CMP;
882 		break;
883 	}
884 	case XPT_CALC_GEOMETRY:
885 		cam_calc_geometry(&ccb->ccg, /*extended*/1);
886 		ccb->ccb_h.status = CAM_REQ_CMP;
887 		break;
888 	case XPT_RESET_DEV:
889 		mps_printf(sassc->sc, "mpssas_action XPT_RESET_DEV\n");
890 		mpssas_action_resetdev(sassc, ccb);
891 		return;
892 	case XPT_RESET_BUS:
893 	case XPT_ABORT:
894 	case XPT_TERM_IO:
895 		mps_printf(sassc->sc, "mpssas_action faking success for "
896 			   "abort or reset\n");
897 		ccb->ccb_h.status = CAM_REQ_CMP;
898 		break;
899 	case XPT_SCSI_IO:
900 		mpssas_action_scsiio(sassc, ccb);
901 		return;
902 #if __FreeBSD_version >= 900026
903 	case XPT_SMP_IO:
904 		mpssas_action_smpio(sassc, ccb);
905 		return;
906 #endif
907 	default:
908 		ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
909 		break;
910 	}
911 	xpt_done(ccb);
912 
913 }
914 
915 static void
916 mpssas_announce_reset(struct mps_softc *sc, uint32_t ac_code,
917     target_id_t target_id, lun_id_t lun_id)
918 {
919 	path_id_t path_id = cam_sim_path(sc->sassc->sim);
920 	struct cam_path *path;
921 
922 	mps_printf(sc, "%s code %x target %d lun %d\n", __func__,
923 	    ac_code, target_id, lun_id);
924 
925 	if (xpt_create_path(&path, NULL,
926 		path_id, target_id, lun_id) != CAM_REQ_CMP) {
927 		mps_printf(sc, "unable to create path for reset "
928 			   "notification\n");
929 		return;
930 	}
931 
932 	xpt_async(ac_code, path, NULL);
933 	xpt_free_path(path);
934 }
935 
936 static void
937 mpssas_complete_all_commands(struct mps_softc *sc)
938 {
939 	struct mps_command *cm;
940 	int i;
941 	int completed;
942 
943 	mps_printf(sc, "%s\n", __func__);
944 	mtx_assert(&sc->mps_mtx, MA_OWNED);
945 
946 	/* complete all commands with a NULL reply */
947 	for (i = 1; i < sc->num_reqs; i++) {
948 		cm = &sc->commands[i];
949 		cm->cm_reply = NULL;
950 		completed = 0;
951 
952 		if (cm->cm_flags & MPS_CM_FLAGS_POLLED)
953 			cm->cm_flags |= MPS_CM_FLAGS_COMPLETE;
954 
955 		if (cm->cm_complete != NULL) {
956 			mpssas_log_command(cm,
957 			    "completing cm %p state %x ccb %p for diag reset\n",
958 			    cm, cm->cm_state, cm->cm_ccb);
959 
960 			cm->cm_complete(sc, cm);
961 			completed = 1;
962 		}
963 
964 		if (cm->cm_flags & MPS_CM_FLAGS_WAKEUP) {
965 			mpssas_log_command(cm,
966 			    "waking up cm %p state %x ccb %p for diag reset\n",
967 			    cm, cm->cm_state, cm->cm_ccb);
968 			wakeup(cm);
969 			completed = 1;
970 		}
971 
972 		if ((completed == 0) && (cm->cm_state != MPS_CM_STATE_FREE)) {
973 			/* this should never happen, but if it does, log */
974 			mpssas_log_command(cm,
975 			    "cm %p state %x flags 0x%x ccb %p during diag "
976 			    "reset\n", cm, cm->cm_state, cm->cm_flags,
977 			    cm->cm_ccb);
978 		}
979 	}
980 }
981 
982 void
983 mpssas_handle_reinit(struct mps_softc *sc)
984 {
985 	int i;
986 
987 	/* Go back into startup mode and freeze the simq, so that CAM
988 	 * doesn't send any commands until after we've rediscovered all
989 	 * targets and found the proper device handles for them.
990 	 *
991 	 * After the reset, portenable will trigger discovery, and after all
992 	 * discovery-related activities have finished, the simq will be
993 	 * released.
994 	 */
995 	mps_printf(sc, "%s startup\n", __func__);
996 	sc->sassc->flags |= MPSSAS_IN_STARTUP;
997 	sc->sassc->flags |= MPSSAS_IN_DISCOVERY;
998 	xpt_freeze_simq(sc->sassc->sim, 1);
999 
1000 	/* notify CAM of a bus reset */
1001 	mpssas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1002 	    CAM_LUN_WILDCARD);
1003 
1004 	/* complete and cleanup after all outstanding commands */
1005 	mpssas_complete_all_commands(sc);
1006 
1007 	mps_printf(sc, "%s startup %u tm %u after command completion\n",
1008 	    __func__, sc->sassc->startup_refcount, sc->sassc->tm_count);
1009 
1010 	/*
1011 	 * The simq was explicitly frozen above, so set the refcount to 0.
1012 	 * The simq will be explicitly released after port enable completes.
1013 	 */
1014 	sc->sassc->startup_refcount = 0;
1015 
1016 	/* zero all the target handles, since they may change after the
1017 	 * reset, and we have to rediscover all the targets and use the new
1018 	 * handles.
1019 	 */
1020 	for (i = 0; i < sc->facts->MaxTargets; i++) {
1021 		if (sc->sassc->targets[i].outstanding != 0)
1022 			mps_printf(sc, "target %u outstanding %u\n",
1023 			    i, sc->sassc->targets[i].outstanding);
1024 		sc->sassc->targets[i].handle = 0x0;
1025 		sc->sassc->targets[i].exp_dev_handle = 0x0;
1026 		sc->sassc->targets[i].outstanding = 0;
1027 		sc->sassc->targets[i].flags = MPSSAS_TARGET_INDIAGRESET;
1028 	}
1029 }
1030 static void
1031 mpssas_tm_timeout(void *data)
1032 {
1033 	struct mps_command *tm = data;
1034 	struct mps_softc *sc = tm->cm_sc;
1035 
1036 	mtx_assert(&sc->mps_mtx, MA_OWNED);
1037 
1038 	mpssas_log_command(tm, "task mgmt %p timed out\n", tm);
1039 	mps_reinit(sc);
1040 }
1041 
1042 static void
1043 mpssas_logical_unit_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1044 {
1045 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1046 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1047 	unsigned int cm_count = 0;
1048 	struct mps_command *cm;
1049 	struct mpssas_target *targ;
1050 
1051 	callout_stop(&tm->cm_callout);
1052 
1053 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1054 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1055 	targ = tm->cm_targ;
1056 
1057 	/*
1058 	 * Currently there should be no way we can hit this case.  It only
1059 	 * happens when we have a failure to allocate chain frames, and
1060 	 * task management commands don't have S/G lists.
1061 	 */
1062 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1063 		mps_printf(sc, "%s: cm_flags = %#x for LUN reset! "
1064 			   "This should not happen!\n", __func__, tm->cm_flags);
1065 		mpssas_free_tm(sc, tm);
1066 		return;
1067 	}
1068 
1069 	if (reply == NULL) {
1070 		mpssas_log_command(tm, "NULL reset reply for tm %p\n", tm);
1071 		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1072 			/* this completion was due to a reset, just cleanup */
1073 			targ->flags &= ~MPSSAS_TARGET_INRESET;
1074 			targ->tm = NULL;
1075 			mpssas_free_tm(sc, tm);
1076 		}
1077 		else {
1078 			/* we should have gotten a reply. */
1079 			mps_reinit(sc);
1080 		}
1081 		return;
1082 	}
1083 
1084 	mpssas_log_command(tm,
1085 	    "logical unit reset status 0x%x code 0x%x count %u\n",
1086 	    reply->IOCStatus, reply->ResponseCode,
1087 	    reply->TerminationCount);
1088 
1089 	/* See if there are any outstanding commands for this LUN.
1090 	 * This could be made more efficient by using a per-LU data
1091 	 * structure of some sort.
1092 	 */
1093 	TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1094 		if (cm->cm_lun == tm->cm_lun)
1095 			cm_count++;
1096 	}
1097 
1098 	if (cm_count == 0) {
1099 		mpssas_log_command(tm,
1100 		    "logical unit %u finished recovery after reset\n",
1101 		    tm->cm_lun, tm);
1102 
1103 		mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1104 		    tm->cm_lun);
1105 
1106 		/* we've finished recovery for this logical unit.  check and
1107 		 * see if some other logical unit has a timedout command
1108 		 * that needs to be processed.
1109 		 */
1110 		cm = TAILQ_FIRST(&targ->timedout_commands);
1111 		if (cm) {
1112 			mpssas_send_abort(sc, tm, cm);
1113 		}
1114 		else {
1115 			targ->tm = NULL;
1116 			mpssas_free_tm(sc, tm);
1117 		}
1118 	}
1119 	else {
1120 		/* if we still have commands for this LUN, the reset
1121 		 * effectively failed, regardless of the status reported.
1122 		 * Escalate to a target reset.
1123 		 */
1124 		mpssas_log_command(tm,
1125 		    "logical unit reset complete for tm %p, but still have %u command(s)\n",
1126 		    tm, cm_count);
1127 		mpssas_send_reset(sc, tm,
1128 		    MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1129 	}
1130 }
1131 
1132 static void
1133 mpssas_target_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1134 {
1135 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1136 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1137 	struct mpssas_target *targ;
1138 
1139 	callout_stop(&tm->cm_callout);
1140 
1141 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1142 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1143 	targ = tm->cm_targ;
1144 
1145 	/*
1146 	 * Currently there should be no way we can hit this case.  It only
1147 	 * happens when we have a failure to allocate chain frames, and
1148 	 * task management commands don't have S/G lists.
1149 	 */
1150 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1151 		mps_printf(sc, "%s: cm_flags = %#x for target reset! "
1152 			   "This should not happen!\n", __func__, tm->cm_flags);
1153 		mpssas_free_tm(sc, tm);
1154 		return;
1155 	}
1156 
1157 	if (reply == NULL) {
1158 		mpssas_log_command(tm, "NULL reset reply for tm %p\n", tm);
1159 		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1160 			/* this completion was due to a reset, just cleanup */
1161 			targ->flags &= ~MPSSAS_TARGET_INRESET;
1162 			targ->tm = NULL;
1163 			mpssas_free_tm(sc, tm);
1164 		}
1165 		else {
1166 			/* we should have gotten a reply. */
1167 			mps_reinit(sc);
1168 		}
1169 		return;
1170 	}
1171 
1172 	mpssas_log_command(tm,
1173 	    "target reset status 0x%x code 0x%x count %u\n",
1174 	    reply->IOCStatus, reply->ResponseCode,
1175 	    reply->TerminationCount);
1176 
1177 	targ->flags &= ~MPSSAS_TARGET_INRESET;
1178 
1179 	if (targ->outstanding == 0) {
1180 		/* we've finished recovery for this target and all
1181 		 * of its logical units.
1182 		 */
1183 		mpssas_log_command(tm,
1184 		    "recovery finished after target reset\n");
1185 
1186 		mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1187 		    CAM_LUN_WILDCARD);
1188 
1189 		targ->tm = NULL;
1190 		mpssas_free_tm(sc, tm);
1191 	}
1192 	else {
1193 		/* after a target reset, if this target still has
1194 		 * outstanding commands, the reset effectively failed,
1195 		 * regardless of the status reported.  escalate.
1196 		 */
1197 		mpssas_log_command(tm,
1198 		    "target reset complete for tm %p, but still have %u command(s)\n",
1199 		    tm, targ->outstanding);
1200 		mps_reinit(sc);
1201 	}
1202 }
1203 
1204 #define MPS_RESET_TIMEOUT 30
1205 
1206 static int
1207 mpssas_send_reset(struct mps_softc *sc, struct mps_command *tm, uint8_t type)
1208 {
1209 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1210 	struct mpssas_target *target;
1211 	int err;
1212 
1213 	target = tm->cm_targ;
1214 	if (target->handle == 0) {
1215 		mps_printf(sc, "%s null devhandle for target_id %d\n",
1216 		    __func__, target->tid);
1217 		return -1;
1218 	}
1219 
1220 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1221 	req->DevHandle = target->handle;
1222 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1223 	req->TaskType = type;
1224 
1225 	if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1226 		/* XXX Need to handle invalid LUNs */
1227 		MPS_SET_LUN(req->LUN, tm->cm_lun);
1228 		tm->cm_targ->logical_unit_resets++;
1229 		mpssas_log_command(tm, "sending logical unit reset\n");
1230 		tm->cm_complete = mpssas_logical_unit_reset_complete;
1231 	}
1232 	else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1233 		/* Target reset method =  SAS Hard Link Reset / SATA Link Reset */
1234 		req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1235 		tm->cm_targ->target_resets++;
1236 		tm->cm_targ->flags |= MPSSAS_TARGET_INRESET;
1237 		mpssas_log_command(tm, "sending target reset\n");
1238 		tm->cm_complete = mpssas_target_reset_complete;
1239 	}
1240 	else {
1241 		mps_printf(sc, "unexpected reset type 0x%x\n", type);
1242 		return -1;
1243 	}
1244 
1245 	tm->cm_data = NULL;
1246 	tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1247 	tm->cm_complete_data = (void *)tm;
1248 
1249 	callout_reset(&tm->cm_callout, MPS_RESET_TIMEOUT * hz,
1250 	    mpssas_tm_timeout, tm);
1251 
1252 	err = mps_map_command(sc, tm);
1253 	if (err)
1254 		mpssas_log_command(tm,
1255 		    "error %d sending reset type %u\n",
1256 		    err, type);
1257 
1258 	return err;
1259 }
1260 
1261 
1262 static void
1263 mpssas_abort_complete(struct mps_softc *sc, struct mps_command *tm)
1264 {
1265 	struct mps_command *cm;
1266 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1267 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1268 	struct mpssas_target *targ;
1269 
1270 	callout_stop(&tm->cm_callout);
1271 
1272 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1273 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1274 	targ = tm->cm_targ;
1275 
1276 	/*
1277 	 * Currently there should be no way we can hit this case.  It only
1278 	 * happens when we have a failure to allocate chain frames, and
1279 	 * task management commands don't have S/G lists.
1280 	 */
1281 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1282 		mpssas_log_command(tm,
1283 		    "cm_flags = %#x for abort %p TaskMID %u!\n",
1284 		    tm->cm_flags, tm, req->TaskMID);
1285 		mpssas_free_tm(sc, tm);
1286 		return;
1287 	}
1288 
1289 	if (reply == NULL) {
1290 		mpssas_log_command(tm,
1291 		    "NULL abort reply for tm %p TaskMID %u\n",
1292 		    tm, req->TaskMID);
1293 		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1294 			/* this completion was due to a reset, just cleanup */
1295 			targ->tm = NULL;
1296 			mpssas_free_tm(sc, tm);
1297 		}
1298 		else {
1299 			/* we should have gotten a reply. */
1300 			mps_reinit(sc);
1301 		}
1302 		return;
1303 	}
1304 
1305 	mpssas_log_command(tm,
1306 	    "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1307 	    req->TaskMID,
1308 	    reply->IOCStatus, reply->ResponseCode,
1309 	    reply->TerminationCount);
1310 
1311 	cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1312 	if (cm == NULL) {
1313 		/* if there are no more timedout commands, we're done with
1314 		 * error recovery for this target.
1315 		 */
1316 		mpssas_log_command(tm,
1317 		    "finished recovery after aborting TaskMID %u\n",
1318 		    req->TaskMID);
1319 
1320 		targ->tm = NULL;
1321 		mpssas_free_tm(sc, tm);
1322 	}
1323 	else if (req->TaskMID != cm->cm_desc.Default.SMID) {
1324 		/* abort success, but we have more timedout commands to abort */
1325 		mpssas_log_command(tm,
1326 		    "continuing recovery after aborting TaskMID %u\n",
1327 		    req->TaskMID);
1328 
1329 		mpssas_send_abort(sc, tm, cm);
1330 	}
1331 	else {
1332 		/* we didn't get a command completion, so the abort
1333 		 * failed as far as we're concerned.  escalate.
1334 		 */
1335 		mpssas_log_command(tm,
1336 		    "abort failed for TaskMID %u tm %p\n",
1337 		    req->TaskMID, tm);
1338 
1339 		mpssas_send_reset(sc, tm,
1340 		    MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1341 	}
1342 }
1343 
1344 #define MPS_ABORT_TIMEOUT 5
1345 
1346 static int
1347 mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm)
1348 {
1349 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1350 	struct mpssas_target *targ;
1351 	int err;
1352 
1353 	targ = cm->cm_targ;
1354 	if (targ->handle == 0) {
1355 		mps_printf(sc, "%s null devhandle for target_id %d\n",
1356 		    __func__, cm->cm_ccb->ccb_h.target_id);
1357 		return -1;
1358 	}
1359 
1360 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1361 	req->DevHandle = targ->handle;
1362 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1363 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1364 
1365 	/* XXX Need to handle invalid LUNs */
1366 	MPS_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1367 
1368 	req->TaskMID = cm->cm_desc.Default.SMID;
1369 
1370 	tm->cm_data = NULL;
1371 	tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1372 	tm->cm_complete = mpssas_abort_complete;
1373 	tm->cm_complete_data = (void *)tm;
1374 	tm->cm_targ = cm->cm_targ;
1375 	tm->cm_lun = cm->cm_lun;
1376 
1377 	callout_reset(&tm->cm_callout, MPS_ABORT_TIMEOUT * hz,
1378 	    mpssas_tm_timeout, tm);
1379 
1380 	targ->aborts++;
1381 
1382 	err = mps_map_command(sc, tm);
1383 	if (err)
1384 		mpssas_log_command(tm,
1385 		    "error %d sending abort for cm %p SMID %u\n",
1386 		    err, cm, req->TaskMID);
1387 	return err;
1388 }
1389 
1390 
1391 static void
1392 mpssas_scsiio_timeout(void *data)
1393 {
1394 	struct mps_softc *sc;
1395 	struct mps_command *cm;
1396 	struct mpssas_target *targ;
1397 
1398 	cm = (struct mps_command *)data;
1399 	sc = cm->cm_sc;
1400 
1401 	mtx_assert(&sc->mps_mtx, MA_OWNED);
1402 
1403 	mps_printf(sc, "%s checking sc %p cm %p\n", __func__, sc, cm);
1404 
1405 	/*
1406 	 * Run the interrupt handler to make sure it's not pending.  This
1407 	 * isn't perfect because the command could have already completed
1408 	 * and been re-used, though this is unlikely.
1409 	 */
1410 	mps_intr_locked(sc);
1411 	if (cm->cm_state == MPS_CM_STATE_FREE) {
1412 		mps_printf(sc, "SCSI command %p sc %p almost timed out\n", cm, sc);
1413 		return;
1414 	}
1415 
1416 	if (cm->cm_ccb == NULL) {
1417 		mps_printf(sc, "command timeout with NULL ccb\n");
1418 		return;
1419 	}
1420 
1421 	mpssas_log_command(cm, "command timeout cm %p ccb %p\n",
1422 	    cm, cm->cm_ccb);
1423 
1424 	targ = cm->cm_targ;
1425 	targ->timeouts++;
1426 
1427 	/* XXX first, check the firmware state, to see if it's still
1428 	 * operational.  if not, do a diag reset.
1429 	 */
1430 
1431 	cm->cm_ccb->ccb_h.status = CAM_CMD_TIMEOUT;
1432 	cm->cm_state = MPS_CM_STATE_TIMEDOUT;
1433 	TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1434 
1435 	if (targ->tm != NULL) {
1436 		/* target already in recovery, just queue up another
1437 		 * timedout command to be processed later.
1438 		 */
1439 		mps_printf(sc, "queued timedout cm %p for processing by tm %p\n",
1440 		    cm, targ->tm);
1441 	}
1442 	else if ((targ->tm = mpssas_alloc_tm(sc)) != NULL) {
1443 		mps_printf(sc, "timedout cm %p allocated tm %p\n",
1444 		    cm, targ->tm);
1445 
1446 		/* start recovery by aborting the first timedout command */
1447 		mpssas_send_abort(sc, targ->tm, cm);
1448 	}
1449 	else {
1450 		/* XXX queue this target up for recovery once a TM becomes
1451 		 * available.  The firmware only has a limited number of
1452 		 * HighPriority credits for the high priority requests used
1453 		 * for task management, and we ran out.
1454 		 *
1455 		 * Isilon: don't worry about this for now, since we have
1456 		 * more credits than disks in an enclosure, and limit
1457 		 * ourselves to one TM per target for recovery.
1458 		 */
1459 		mps_printf(sc, "timedout cm %p failed to allocate a tm\n",
1460 		    cm);
1461 	}
1462 
1463 }
1464 
1465 static void
1466 mpssas_action_scsiio(struct mpssas_softc *sassc, union ccb *ccb)
1467 {
1468 	MPI2_SCSI_IO_REQUEST *req;
1469 	struct ccb_scsiio *csio;
1470 	struct mps_softc *sc;
1471 	struct mpssas_target *targ;
1472 	struct mpssas_lun *lun;
1473 	struct mps_command *cm;
1474 	uint8_t i, lba_byte, *ref_tag_addr;
1475 	uint16_t eedp_flags;
1476 
1477 	sc = sassc->sc;
1478 	mps_dprint(sc, MPS_TRACE, "%s ccb %p\n", __func__, ccb);
1479 	mtx_assert(&sc->mps_mtx, MA_OWNED);
1480 
1481 	csio = &ccb->csio;
1482 	targ = &sassc->targets[csio->ccb_h.target_id];
1483 	if (targ->handle == 0x0) {
1484 		mps_dprint(sc, MPS_TRACE, "%s NULL handle for target %u\n",
1485 		    __func__, csio->ccb_h.target_id);
1486 		csio->ccb_h.status = CAM_TID_INVALID;
1487 		xpt_done(ccb);
1488 		return;
1489 	}
1490 	/*
1491 	 * If devinfo is 0 this will be a volume.  In that case don't tell CAM
1492 	 * that the volume has timed out.  We want volumes to be enumerated
1493 	 * until they are deleted/removed, not just failed.
1494 	 */
1495 	if (targ->flags & MPSSAS_TARGET_INREMOVAL) {
1496 		if (targ->devinfo == 0)
1497 			csio->ccb_h.status = CAM_REQ_CMP;
1498 		else
1499 			csio->ccb_h.status = CAM_SEL_TIMEOUT;
1500 		xpt_done(ccb);
1501 		return;
1502 	}
1503 
1504 	if ((sc->mps_flags & MPS_FLAGS_SHUTDOWN) != 0) {
1505 		mps_dprint(sc, MPS_TRACE, "%s shutting down\n", __func__);
1506 		csio->ccb_h.status = CAM_TID_INVALID;
1507 		xpt_done(ccb);
1508 		return;
1509 	}
1510 
1511 	cm = mps_alloc_command(sc);
1512 	if (cm == NULL) {
1513 		if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
1514 			xpt_freeze_simq(sassc->sim, 1);
1515 			sassc->flags |= MPSSAS_QUEUE_FROZEN;
1516 		}
1517 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1518 		ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1519 		xpt_done(ccb);
1520 		return;
1521 	}
1522 
1523 	req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
1524 	bzero(req, sizeof(*req));
1525 	req->DevHandle = targ->handle;
1526 	req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1527 	req->MsgFlags = 0;
1528 	req->SenseBufferLowAddress = cm->cm_sense_busaddr;
1529 	req->SenseBufferLength = MPS_SENSE_LEN;
1530 	req->SGLFlags = 0;
1531 	req->ChainOffset = 0;
1532 	req->SGLOffset0 = 24;	/* 32bit word offset to the SGL */
1533 	req->SGLOffset1= 0;
1534 	req->SGLOffset2= 0;
1535 	req->SGLOffset3= 0;
1536 	req->SkipCount = 0;
1537 	req->DataLength = csio->dxfer_len;
1538 	req->BidirectionalDataLength = 0;
1539 	req->IoFlags = csio->cdb_len;
1540 	req->EEDPFlags = 0;
1541 
1542 	/* Note: BiDirectional transfers are not supported */
1543 	switch (csio->ccb_h.flags & CAM_DIR_MASK) {
1544 	case CAM_DIR_IN:
1545 		req->Control = MPI2_SCSIIO_CONTROL_READ;
1546 		cm->cm_flags |= MPS_CM_FLAGS_DATAIN;
1547 		break;
1548 	case CAM_DIR_OUT:
1549 		req->Control = MPI2_SCSIIO_CONTROL_WRITE;
1550 		cm->cm_flags |= MPS_CM_FLAGS_DATAOUT;
1551 		break;
1552 	case CAM_DIR_NONE:
1553 	default:
1554 		req->Control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
1555 		break;
1556 	}
1557 
1558 	/*
1559 	 * It looks like the hardware doesn't require an explicit tag
1560 	 * number for each transaction.  SAM Task Management not supported
1561 	 * at the moment.
1562 	 */
1563 	switch (csio->tag_action) {
1564 	case MSG_HEAD_OF_Q_TAG:
1565 		req->Control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
1566 		break;
1567 	case MSG_ORDERED_Q_TAG:
1568 		req->Control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
1569 		break;
1570 	case MSG_ACA_TASK:
1571 		req->Control |= MPI2_SCSIIO_CONTROL_ACAQ;
1572 		break;
1573 	case CAM_TAG_ACTION_NONE:
1574 	case MSG_SIMPLE_Q_TAG:
1575 	default:
1576 		req->Control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
1577 		break;
1578 	}
1579 	req->Control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
1580 
1581 	if (MPS_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
1582 		mps_free_command(sc, cm);
1583 		ccb->ccb_h.status = CAM_LUN_INVALID;
1584 		xpt_done(ccb);
1585 		return;
1586 	}
1587 
1588 	if (csio->ccb_h.flags & CAM_CDB_POINTER)
1589 		bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
1590 	else
1591 		bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
1592 	req->IoFlags = csio->cdb_len;
1593 
1594 	/*
1595 	 * Check if EEDP is supported and enabled.  If it is then check if the
1596 	 * SCSI opcode could be using EEDP.  If so, make sure the LUN exists and
1597 	 * is formatted for EEDP support.  If all of this is true, set CDB up
1598 	 * for EEDP transfer.
1599 	 */
1600 	eedp_flags = op_code_prot[req->CDB.CDB32[0]];
1601 	if (sc->eedp_enabled && eedp_flags) {
1602 		SLIST_FOREACH(lun, &targ->luns, lun_link) {
1603 			if (lun->lun_id == csio->ccb_h.target_lun) {
1604 				break;
1605 			}
1606 		}
1607 
1608 		if ((lun != NULL) && (lun->eedp_formatted)) {
1609 			req->EEDPBlockSize = lun->eedp_block_size;
1610 			eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
1611 			    MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
1612 			    MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
1613 			req->EEDPFlags = eedp_flags;
1614 
1615 			/*
1616 			 * If CDB less than 32, fill in Primary Ref Tag with
1617 			 * low 4 bytes of LBA.  If CDB is 32, tag stuff is
1618 			 * already there.  Also, set protection bit.  FreeBSD
1619 			 * currently does not support CDBs bigger than 16, but
1620 			 * the code doesn't hurt, and will be here for the
1621 			 * future.
1622 			 */
1623 			if (csio->cdb_len != 32) {
1624 				lba_byte = (csio->cdb_len == 16) ? 6 : 2;
1625 				ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
1626 				    PrimaryReferenceTag;
1627 				for (i = 0; i < 4; i++) {
1628 					*ref_tag_addr =
1629 					    req->CDB.CDB32[lba_byte + i];
1630 					ref_tag_addr++;
1631 				}
1632 				req->CDB.EEDP32.PrimaryApplicationTagMask =
1633 				    0xFFFF;
1634 				req->CDB.CDB32[1] = (req->CDB.CDB32[1] & 0x1F) |
1635 				    0x20;
1636 			} else {
1637 				eedp_flags |=
1638 				    MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
1639 				req->EEDPFlags = eedp_flags;
1640 				req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
1641 				    0x1F) | 0x20;
1642 			}
1643 		}
1644 	}
1645 
1646 	cm->cm_data = csio->data_ptr;
1647 	cm->cm_length = csio->dxfer_len;
1648 	cm->cm_sge = &req->SGL;
1649 	cm->cm_sglsize = (32 - 24) * 4;
1650 	cm->cm_desc.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
1651 	cm->cm_desc.SCSIIO.DevHandle = targ->handle;
1652 	cm->cm_complete = mpssas_scsiio_complete;
1653 	cm->cm_complete_data = ccb;
1654 	cm->cm_targ = targ;
1655 	cm->cm_lun = csio->ccb_h.target_lun;
1656 	cm->cm_ccb = ccb;
1657 
1658 	/*
1659 	 * If HBA is a WD and the command is not for a retry, try to build a
1660 	 * direct I/O message. If failed, or the command is for a retry, send
1661 	 * the I/O to the IR volume itself.
1662 	 */
1663 	if (sc->WD_valid_config) {
1664 		if (ccb->ccb_h.status != MPS_WD_RETRY) {
1665 			mpssas_direct_drive_io(sassc, cm, ccb);
1666 		} else {
1667 			ccb->ccb_h.status = CAM_REQ_INPROG;
1668 		}
1669 	}
1670 
1671 	callout_reset(&cm->cm_callout, (ccb->ccb_h.timeout * hz) / 1000,
1672 	   mpssas_scsiio_timeout, cm);
1673 
1674 	targ->issued++;
1675 	targ->outstanding++;
1676 	TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1677 
1678 	if ((sc->mps_debug & MPS_TRACE) != 0)
1679 		mpssas_log_command(cm, "%s cm %p ccb %p outstanding %u\n",
1680 		    __func__, cm, ccb, targ->outstanding);
1681 
1682 	mps_map_command(sc, cm);
1683 	return;
1684 }
1685 
1686 static void
1687 mpssas_scsiio_complete(struct mps_softc *sc, struct mps_command *cm)
1688 {
1689 	MPI2_SCSI_IO_REPLY *rep;
1690 	union ccb *ccb;
1691 	struct ccb_scsiio *csio;
1692 	struct mpssas_softc *sassc;
1693 	struct scsi_vpd_supported_page_list *vpd_list = NULL;
1694 	u8 *TLR_bits, TLR_on;
1695 	int dir = 0, i;
1696 	u16 alloc_len;
1697 
1698 	mps_dprint(sc, MPS_TRACE,
1699 	    "%s cm %p SMID %u ccb %p reply %p outstanding %u\n",
1700 	    __func__, cm, cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
1701 	    cm->cm_targ->outstanding);
1702 
1703 	callout_stop(&cm->cm_callout);
1704 	mtx_assert(&sc->mps_mtx, MA_OWNED);
1705 
1706 	sassc = sc->sassc;
1707 	ccb = cm->cm_complete_data;
1708 	csio = &ccb->csio;
1709 	rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
1710 	/*
1711 	 * XXX KDM if the chain allocation fails, does it matter if we do
1712 	 * the sync and unload here?  It is simpler to do it in every case,
1713 	 * assuming it doesn't cause problems.
1714 	 */
1715 	if (cm->cm_data != NULL) {
1716 		if (cm->cm_flags & MPS_CM_FLAGS_DATAIN)
1717 			dir = BUS_DMASYNC_POSTREAD;
1718 		else if (cm->cm_flags & MPS_CM_FLAGS_DATAOUT)
1719 			dir = BUS_DMASYNC_POSTWRITE;;
1720 		bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
1721 		bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
1722 	}
1723 
1724 	cm->cm_targ->completed++;
1725 	cm->cm_targ->outstanding--;
1726 	TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
1727 
1728 	if (cm->cm_state == MPS_CM_STATE_TIMEDOUT) {
1729 		TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
1730 		if (cm->cm_reply != NULL)
1731 			mpssas_log_command(cm,
1732 			    "completed timedout cm %p ccb %p during recovery "
1733 			    "ioc %x scsi %x state %x xfer %u\n",
1734 			    cm, cm->cm_ccb,
1735 			    rep->IOCStatus, rep->SCSIStatus, rep->SCSIState,
1736 			    rep->TransferCount);
1737 		else
1738 			mpssas_log_command(cm,
1739 			    "completed timedout cm %p ccb %p during recovery\n",
1740 			    cm, cm->cm_ccb);
1741 	} else if (cm->cm_targ->tm != NULL) {
1742 		if (cm->cm_reply != NULL)
1743 			mpssas_log_command(cm,
1744 			    "completed cm %p ccb %p during recovery "
1745 			    "ioc %x scsi %x state %x xfer %u\n",
1746 			    cm, cm->cm_ccb,
1747 			    rep->IOCStatus, rep->SCSIStatus, rep->SCSIState,
1748 			    rep->TransferCount);
1749 		else
1750 			mpssas_log_command(cm,
1751 			    "completed cm %p ccb %p during recovery\n",
1752 			    cm, cm->cm_ccb);
1753 	} else if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1754 		mpssas_log_command(cm,
1755 		    "reset completed cm %p ccb %p\n",
1756 		    cm, cm->cm_ccb);
1757 	}
1758 
1759 	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1760 		/*
1761 		 * We ran into an error after we tried to map the command,
1762 		 * so we're getting a callback without queueing the command
1763 		 * to the hardware.  So we set the status here, and it will
1764 		 * be retained below.  We'll go through the "fast path",
1765 		 * because there can be no reply when we haven't actually
1766 		 * gone out to the hardware.
1767 		 */
1768 		ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1769 
1770 		/*
1771 		 * Currently the only error included in the mask is
1772 		 * MPS_CM_FLAGS_CHAIN_FAILED, which means we're out of
1773 		 * chain frames.  We need to freeze the queue until we get
1774 		 * a command that completed without this error, which will
1775 		 * hopefully have some chain frames attached that we can
1776 		 * use.  If we wanted to get smarter about it, we would
1777 		 * only unfreeze the queue in this condition when we're
1778 		 * sure that we're getting some chain frames back.  That's
1779 		 * probably unnecessary.
1780 		 */
1781 		if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
1782 			xpt_freeze_simq(sassc->sim, 1);
1783 			sassc->flags |= MPSSAS_QUEUE_FROZEN;
1784 			mps_dprint(sc, MPS_INFO, "Error sending command, "
1785 				   "freezing SIM queue\n");
1786 		}
1787 	}
1788 
1789 	/* Take the fast path to completion */
1790 	if (cm->cm_reply == NULL) {
1791 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
1792 			if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0)
1793 				ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
1794 			else {
1795 				ccb->ccb_h.status = CAM_REQ_CMP;
1796 				ccb->csio.scsi_status = SCSI_STATUS_OK;
1797 			}
1798 			if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
1799 				ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
1800 				sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
1801 				mps_dprint(sc, MPS_INFO,
1802 					   "Unfreezing SIM queue\n");
1803 			}
1804 		}
1805 
1806 		/*
1807 		 * There are two scenarios where the status won't be
1808 		 * CAM_REQ_CMP.  The first is if MPS_CM_FLAGS_ERROR_MASK is
1809 		 * set, the second is in the MPS_FLAGS_DIAGRESET above.
1810 		 */
1811 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1812 			/*
1813 			 * Freeze the dev queue so that commands are
1814 			 * executed in the correct order with after error
1815 			 * recovery.
1816 			 */
1817 			ccb->ccb_h.status |= CAM_DEV_QFRZN;
1818 			xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
1819 		}
1820 		mps_free_command(sc, cm);
1821 		xpt_done(ccb);
1822 		return;
1823 	}
1824 
1825 	if (sc->mps_debug & MPS_TRACE)
1826 		mpssas_log_command(cm,
1827 		    "ioc %x scsi %x state %x xfer %u\n",
1828 		    rep->IOCStatus, rep->SCSIStatus,
1829 		    rep->SCSIState, rep->TransferCount);
1830 
1831 	/*
1832 	 * If this is a Direct Drive I/O, reissue the I/O to the original IR
1833 	 * Volume if an error occurred (normal I/O retry).  Use the original
1834 	 * CCB, but set a flag that this will be a retry so that it's sent to
1835 	 * the original volume.  Free the command but reuse the CCB.
1836 	 */
1837 	if (cm->cm_flags & MPS_CM_FLAGS_DD_IO) {
1838 		mps_free_command(sc, cm);
1839 		ccb->ccb_h.status = MPS_WD_RETRY;
1840 		mpssas_action_scsiio(sassc, ccb);
1841 		return;
1842 	}
1843 
1844 	switch (rep->IOCStatus & MPI2_IOCSTATUS_MASK) {
1845 	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
1846 		csio->resid = cm->cm_length - rep->TransferCount;
1847 		/* FALLTHROUGH */
1848 	case MPI2_IOCSTATUS_SUCCESS:
1849 	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
1850 
1851 		if ((rep->IOCStatus & MPI2_IOCSTATUS_MASK) ==
1852 		    MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
1853 			mpssas_log_command(cm, "recovered error\n");
1854 
1855 		/* Completion failed at the transport level. */
1856 		if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
1857 		    MPI2_SCSI_STATE_TERMINATED)) {
1858 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1859 			break;
1860 		}
1861 
1862 		/* In a modern packetized environment, an autosense failure
1863 		 * implies that there's not much else that can be done to
1864 		 * recover the command.
1865 		 */
1866 		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
1867 			ccb->ccb_h.status = CAM_AUTOSENSE_FAIL;
1868 			break;
1869 		}
1870 
1871 		/*
1872 		 * CAM doesn't care about SAS Response Info data, but if this is
1873 		 * the state check if TLR should be done.  If not, clear the
1874 		 * TLR_bits for the target.
1875 		 */
1876 		if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
1877 		    ((rep->ResponseInfo & MPI2_SCSI_RI_MASK_REASONCODE) ==
1878 		    MPS_SCSI_RI_INVALID_FRAME)) {
1879 			sc->mapping_table[csio->ccb_h.target_id].TLR_bits =
1880 			    (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
1881 		}
1882 
1883 		/*
1884 		 * Intentionally override the normal SCSI status reporting
1885 		 * for these two cases.  These are likely to happen in a
1886 		 * multi-initiator environment, and we want to make sure that
1887 		 * CAM retries these commands rather than fail them.
1888 		 */
1889 		if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
1890 		    (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
1891 			ccb->ccb_h.status = CAM_REQ_ABORTED;
1892 			break;
1893 		}
1894 
1895 		/* Handle normal status and sense */
1896 		csio->scsi_status = rep->SCSIStatus;
1897 		if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
1898 			ccb->ccb_h.status = CAM_REQ_CMP;
1899 		else
1900 			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
1901 
1902 		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
1903 			int sense_len, returned_sense_len;
1904 
1905 			returned_sense_len = min(rep->SenseCount,
1906 			    sizeof(struct scsi_sense_data));
1907 			if (returned_sense_len < ccb->csio.sense_len)
1908 				ccb->csio.sense_resid = ccb->csio.sense_len -
1909 					returned_sense_len;
1910 			else
1911 				ccb->csio.sense_resid = 0;
1912 
1913 			sense_len = min(returned_sense_len,
1914 			    ccb->csio.sense_len - ccb->csio.sense_resid);
1915 			bzero(&ccb->csio.sense_data,
1916 			      sizeof(&ccb->csio.sense_data));
1917 			bcopy(cm->cm_sense, &ccb->csio.sense_data, sense_len);
1918 			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
1919 		}
1920 
1921 		/*
1922 		 * Check if this is an INQUIRY command.  If it's a VPD inquiry,
1923 		 * and it's page code 0 (Supported Page List), and there is
1924 		 * inquiry data, and this is for a sequential access device, and
1925 		 * the device is an SSP target, and TLR is supported by the
1926 		 * controller, turn the TLR_bits value ON if page 0x90 is
1927 		 * supported.
1928 		 */
1929 		if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
1930 		    (csio->cdb_io.cdb_bytes[1] & SI_EVPD) &&
1931 		    (csio->cdb_io.cdb_bytes[2] == SVPD_SUPPORTED_PAGE_LIST) &&
1932 		    (csio->data_ptr != NULL) && (((uint8_t *)cm->cm_data)[0] ==
1933 		    T_SEQUENTIAL) && (sc->control_TLR) &&
1934 		    (sc->mapping_table[csio->ccb_h.target_id].device_info &
1935 		    MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
1936 			vpd_list = (struct scsi_vpd_supported_page_list *)
1937 			    csio->data_ptr;
1938 			TLR_bits = &sc->mapping_table[csio->ccb_h.target_id].
1939 			    TLR_bits;
1940 			*TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
1941 			TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
1942 			alloc_len = ((u16)csio->cdb_io.cdb_bytes[3] << 8) +
1943 			    csio->cdb_io.cdb_bytes[4];
1944 			for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
1945 				if (vpd_list->list[i] == 0x90) {
1946 					*TLR_bits = TLR_on;
1947 					break;
1948 				}
1949 			}
1950 		}
1951 		break;
1952 	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
1953 	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
1954 		/*
1955 		 * If devinfo is 0 this will be a volume.  In that case don't
1956 		 * tell CAM that the volume is not there.  We want volumes to
1957 		 * be enumerated until they are deleted/removed, not just
1958 		 * failed.
1959 		 */
1960 		if (cm->cm_targ->devinfo == 0)
1961 			ccb->ccb_h.status = CAM_REQ_CMP;
1962 		else
1963 			ccb->ccb_h.status = CAM_DEV_NOT_THERE;
1964 		break;
1965 	case MPI2_IOCSTATUS_INVALID_SGL:
1966 		mps_print_scsiio_cmd(sc, cm);
1967 		ccb->ccb_h.status = CAM_UNREC_HBA_ERROR;
1968 		break;
1969 	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
1970 		/*
1971 		 * This is one of the responses that comes back when an I/O
1972 		 * has been aborted.  If it is because of a timeout that we
1973 		 * initiated, just set the status to CAM_CMD_TIMEOUT.
1974 		 * Otherwise set it to CAM_REQ_ABORTED.  The effect on the
1975 		 * command is the same (it gets retried, subject to the
1976 		 * retry counter), the only difference is what gets printed
1977 		 * on the console.
1978 		 */
1979 		if (cm->cm_state == MPS_CM_STATE_TIMEDOUT)
1980 			ccb->ccb_h.status = CAM_CMD_TIMEOUT;
1981 		else
1982 			ccb->ccb_h.status = CAM_REQ_ABORTED;
1983 		break;
1984 	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
1985 		/* resid is ignored for this condition */
1986 		csio->resid = 0;
1987 		ccb->ccb_h.status = CAM_DATA_RUN_ERR;
1988 		break;
1989 	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
1990 	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
1991 		/*
1992 		 * Since these are generally external (i.e. hopefully
1993 		 * transient transport-related) errors, retry these without
1994 		 * decrementing the retry count.
1995 		 */
1996 		ccb->ccb_h.status = CAM_REQUEUE_REQ;
1997 		mpssas_log_command(cm,
1998 		    "terminated ioc %x scsi %x state %x xfer %u\n",
1999 		    rep->IOCStatus, rep->SCSIStatus, rep->SCSIState,
2000 		    rep->TransferCount);
2001 		break;
2002 	case MPI2_IOCSTATUS_INVALID_FUNCTION:
2003 	case MPI2_IOCSTATUS_INTERNAL_ERROR:
2004 	case MPI2_IOCSTATUS_INVALID_VPID:
2005 	case MPI2_IOCSTATUS_INVALID_FIELD:
2006 	case MPI2_IOCSTATUS_INVALID_STATE:
2007 	case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2008 	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2009 	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2010 	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2011 	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2012 	default:
2013 		mpssas_log_command(cm,
2014 		    "completed ioc %x scsi %x state %x xfer %u\n",
2015 		    rep->IOCStatus, rep->SCSIStatus, rep->SCSIState,
2016 		    rep->TransferCount);
2017 		csio->resid = cm->cm_length;
2018 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2019 		break;
2020 	}
2021 
2022 	if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2023 		ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2024 		sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2025 		mps_dprint(sc, MPS_INFO, "Command completed, "
2026 			   "unfreezing SIM queue\n");
2027 	}
2028 
2029 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2030 		ccb->ccb_h.status |= CAM_DEV_QFRZN;
2031 		xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2032 	}
2033 
2034 	mps_free_command(sc, cm);
2035 	xpt_done(ccb);
2036 }
2037 
2038 static void
2039 mpssas_direct_drive_io(struct mpssas_softc *sassc, struct mps_command *cm,
2040     union ccb *ccb) {
2041 	pMpi2SCSIIORequest_t	pIO_req;
2042 	struct mps_softc	*sc = sassc->sc;
2043 	uint64_t		virtLBA;
2044 	uint32_t		physLBA, stripe_offset, stripe_unit;
2045 	uint32_t		io_size, column;
2046 	uint8_t			*ptrLBA, lba_idx, physLBA_byte, *CDB;
2047 
2048 	/*
2049 	 * If this is a valid SCSI command (Read6, Read10, Read16, Write6,
2050 	 * Write10, or Write16), build a direct I/O message.  Otherwise, the I/O
2051 	 * will be sent to the IR volume itself.  Since Read6 and Write6 are a
2052 	 * bit different than the 10/16 CDBs, handle them separately.
2053 	 */
2054 	pIO_req = (pMpi2SCSIIORequest_t)cm->cm_req;
2055 	CDB = pIO_req->CDB.CDB32;
2056 
2057 	/*
2058 	 * Handle 6 byte CDBs.
2059 	 */
2060 	if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_6) ||
2061 	    (CDB[0] == WRITE_6))) {
2062 		/*
2063 		 * Get the transfer size in blocks.
2064 		 */
2065 		io_size = (cm->cm_length >> sc->DD_block_exponent);
2066 
2067 		/*
2068 		 * Get virtual LBA given in the CDB.
2069 		 */
2070 		virtLBA = ((uint64_t)(CDB[1] & 0x1F) << 16) |
2071 		    ((uint64_t)CDB[2] << 8) | (uint64_t)CDB[3];
2072 
2073 		/*
2074 		 * Check that LBA range for I/O does not exceed volume's
2075 		 * MaxLBA.
2076 		 */
2077 		if ((virtLBA + (uint64_t)io_size - 1) <=
2078 		    sc->DD_max_lba) {
2079 			/*
2080 			 * Check if the I/O crosses a stripe boundary.  If not,
2081 			 * translate the virtual LBA to a physical LBA and set
2082 			 * the DevHandle for the PhysDisk to be used.  If it
2083 			 * does cross a boundry, do normal I/O.  To get the
2084 			 * right DevHandle to use, get the map number for the
2085 			 * column, then use that map number to look up the
2086 			 * DevHandle of the PhysDisk.
2087 			 */
2088 			stripe_offset = (uint32_t)virtLBA &
2089 			    (sc->DD_stripe_size - 1);
2090 			if ((stripe_offset + io_size) <= sc->DD_stripe_size) {
2091 				physLBA = (uint32_t)virtLBA >>
2092 				    sc->DD_stripe_exponent;
2093 				stripe_unit = physLBA / sc->DD_num_phys_disks;
2094 				column = physLBA % sc->DD_num_phys_disks;
2095 				pIO_req->DevHandle =
2096 				    sc->DD_column_map[column].dev_handle;
2097 				cm->cm_desc.SCSIIO.DevHandle =
2098 				    pIO_req->DevHandle;
2099 
2100 				physLBA = (stripe_unit <<
2101 				    sc->DD_stripe_exponent) + stripe_offset;
2102 				ptrLBA = &pIO_req->CDB.CDB32[1];
2103 				physLBA_byte = (uint8_t)(physLBA >> 16);
2104 				*ptrLBA = physLBA_byte;
2105 				ptrLBA = &pIO_req->CDB.CDB32[2];
2106 				physLBA_byte = (uint8_t)(physLBA >> 8);
2107 				*ptrLBA = physLBA_byte;
2108 				ptrLBA = &pIO_req->CDB.CDB32[3];
2109 				physLBA_byte = (uint8_t)physLBA;
2110 				*ptrLBA = physLBA_byte;
2111 
2112 				/*
2113 				 * Set flag that Direct Drive I/O is
2114 				 * being done.
2115 				 */
2116 				cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2117 			}
2118 		}
2119 		return;
2120 	}
2121 
2122 	/*
2123 	 * Handle 10 or 16 byte CDBs.
2124 	 */
2125 	if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_10) ||
2126 	    (CDB[0] == WRITE_10) || (CDB[0] == READ_16) ||
2127 	    (CDB[0] == WRITE_16))) {
2128 		/*
2129 		 * For 16-byte CDB's, verify that the upper 4 bytes of the CDB
2130 		 * are 0.  If not, this is accessing beyond 2TB so handle it in
2131 		 * the else section.  10-byte CDB's are OK.
2132 		 */
2133 		if ((CDB[0] < READ_16) ||
2134 		    !(CDB[2] | CDB[3] | CDB[4] | CDB[5])) {
2135 			/*
2136 			 * Get the transfer size in blocks.
2137 			 */
2138 			io_size = (cm->cm_length >> sc->DD_block_exponent);
2139 
2140 			/*
2141 			 * Get virtual LBA.  Point to correct lower 4 bytes of
2142 			 * LBA in the CDB depending on command.
2143 			 */
2144 			lba_idx = (CDB[0] < READ_16) ? 2 : 6;
2145 			virtLBA = ((uint64_t)CDB[lba_idx] << 24) |
2146 			    ((uint64_t)CDB[lba_idx + 1] << 16) |
2147 			    ((uint64_t)CDB[lba_idx + 2] << 8) |
2148 			    (uint64_t)CDB[lba_idx + 3];
2149 
2150 			/*
2151 			 * Check that LBA range for I/O does not exceed volume's
2152 			 * MaxLBA.
2153 			 */
2154 			if ((virtLBA + (uint64_t)io_size - 1) <=
2155 			    sc->DD_max_lba) {
2156 				/*
2157 				 * Check if the I/O crosses a stripe boundary.
2158 				 * If not, translate the virtual LBA to a
2159 				 * physical LBA and set the DevHandle for the
2160 				 * PhysDisk to be used.  If it does cross a
2161 				 * boundry, do normal I/O.  To get the right
2162 				 * DevHandle to use, get the map number for the
2163 				 * column, then use that map number to look up
2164 				 * the DevHandle of the PhysDisk.
2165 				 */
2166 				stripe_offset = (uint32_t)virtLBA &
2167 				    (sc->DD_stripe_size - 1);
2168 				if ((stripe_offset + io_size) <=
2169 				    sc->DD_stripe_size) {
2170 					physLBA = (uint32_t)virtLBA >>
2171 					    sc->DD_stripe_exponent;
2172 					stripe_unit = physLBA /
2173 					    sc->DD_num_phys_disks;
2174 					column = physLBA %
2175 					    sc->DD_num_phys_disks;
2176 					pIO_req->DevHandle =
2177 					    sc->DD_column_map[column].
2178 					    dev_handle;
2179 					cm->cm_desc.SCSIIO.DevHandle =
2180 					    pIO_req->DevHandle;
2181 
2182 					physLBA = (stripe_unit <<
2183 					    sc->DD_stripe_exponent) +
2184 					    stripe_offset;
2185 					ptrLBA =
2186 					    &pIO_req->CDB.CDB32[lba_idx];
2187 					physLBA_byte = (uint8_t)(physLBA >> 24);
2188 					*ptrLBA = physLBA_byte;
2189 					ptrLBA =
2190 					    &pIO_req->CDB.CDB32[lba_idx + 1];
2191 					physLBA_byte = (uint8_t)(physLBA >> 16);
2192 					*ptrLBA = physLBA_byte;
2193 					ptrLBA =
2194 					    &pIO_req->CDB.CDB32[lba_idx + 2];
2195 					physLBA_byte = (uint8_t)(physLBA >> 8);
2196 					*ptrLBA = physLBA_byte;
2197 					ptrLBA =
2198 					    &pIO_req->CDB.CDB32[lba_idx + 3];
2199 					physLBA_byte = (uint8_t)physLBA;
2200 					*ptrLBA = physLBA_byte;
2201 
2202 					/*
2203 					 * Set flag that Direct Drive I/O is
2204 					 * being done.
2205 					 */
2206 					cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2207 				}
2208 			}
2209 		} else {
2210 			/*
2211 			 * 16-byte CDB and the upper 4 bytes of the CDB are not
2212 			 * 0.  Get the transfer size in blocks.
2213 			 */
2214 			io_size = (cm->cm_length >> sc->DD_block_exponent);
2215 
2216 			/*
2217 			 * Get virtual LBA.
2218 			 */
2219 			virtLBA = ((uint64_t)CDB[2] << 54) |
2220 			    ((uint64_t)CDB[3] << 48) |
2221 			    ((uint64_t)CDB[4] << 40) |
2222 			    ((uint64_t)CDB[5] << 32) |
2223 			    ((uint64_t)CDB[6] << 24) |
2224 			    ((uint64_t)CDB[7] << 16) |
2225 			    ((uint64_t)CDB[8] << 8) |
2226 			    (uint64_t)CDB[9];
2227 
2228 			/*
2229 			 * Check that LBA range for I/O does not exceed volume's
2230 			 * MaxLBA.
2231 			 */
2232 			if ((virtLBA + (uint64_t)io_size - 1) <=
2233 			    sc->DD_max_lba) {
2234 				/*
2235 				 * Check if the I/O crosses a stripe boundary.
2236 				 * If not, translate the virtual LBA to a
2237 				 * physical LBA and set the DevHandle for the
2238 				 * PhysDisk to be used.  If it does cross a
2239 				 * boundry, do normal I/O.  To get the right
2240 				 * DevHandle to use, get the map number for the
2241 				 * column, then use that map number to look up
2242 				 * the DevHandle of the PhysDisk.
2243 				 */
2244 				stripe_offset = (uint32_t)virtLBA &
2245 				    (sc->DD_stripe_size - 1);
2246 				if ((stripe_offset + io_size) <=
2247 				    sc->DD_stripe_size) {
2248 					physLBA = (uint32_t)(virtLBA >>
2249 					    sc->DD_stripe_exponent);
2250 					stripe_unit = physLBA /
2251 					    sc->DD_num_phys_disks;
2252 					column = physLBA %
2253 					    sc->DD_num_phys_disks;
2254 					pIO_req->DevHandle =
2255 					    sc->DD_column_map[column].
2256 					    dev_handle;
2257 					cm->cm_desc.SCSIIO.DevHandle =
2258 					    pIO_req->DevHandle;
2259 
2260 					physLBA = (stripe_unit <<
2261 					    sc->DD_stripe_exponent) +
2262 					    stripe_offset;
2263 
2264 					/*
2265 					 * Set upper 4 bytes of LBA to 0.  We
2266 					 * assume that the phys disks are less
2267 					 * than 2 TB's in size.  Then, set the
2268 					 * lower 4 bytes.
2269 					 */
2270 					pIO_req->CDB.CDB32[2] = 0;
2271 					pIO_req->CDB.CDB32[3] = 0;
2272 					pIO_req->CDB.CDB32[4] = 0;
2273 					pIO_req->CDB.CDB32[5] = 0;
2274 					ptrLBA = &pIO_req->CDB.CDB32[6];
2275 					physLBA_byte = (uint8_t)(physLBA >> 24);
2276 					*ptrLBA = physLBA_byte;
2277 					ptrLBA = &pIO_req->CDB.CDB32[7];
2278 					physLBA_byte = (uint8_t)(physLBA >> 16);
2279 					*ptrLBA = physLBA_byte;
2280 					ptrLBA = &pIO_req->CDB.CDB32[8];
2281 					physLBA_byte = (uint8_t)(physLBA >> 8);
2282 					*ptrLBA = physLBA_byte;
2283 					ptrLBA = &pIO_req->CDB.CDB32[9];
2284 					physLBA_byte = (uint8_t)physLBA;
2285 					*ptrLBA = physLBA_byte;
2286 
2287 					/*
2288 					 * Set flag that Direct Drive I/O is
2289 					 * being done.
2290 					 */
2291 					cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2292 				}
2293 			}
2294 		}
2295 	}
2296 }
2297 
2298 #if __FreeBSD_version >= 900026
2299 static void
2300 mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm)
2301 {
2302 	MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2303 	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2304 	uint64_t sasaddr;
2305 	union ccb *ccb;
2306 
2307 	ccb = cm->cm_complete_data;
2308 
2309 	/*
2310 	 * Currently there should be no way we can hit this case.  It only
2311 	 * happens when we have a failure to allocate chain frames, and SMP
2312 	 * commands require two S/G elements only.  That should be handled
2313 	 * in the standard request size.
2314 	 */
2315 	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2316 		mps_printf(sc, "%s: cm_flags = %#x on SMP request!\n",
2317 			   __func__, cm->cm_flags);
2318 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2319 		goto bailout;
2320         }
2321 
2322 	rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
2323 	if (rpl == NULL) {
2324 		mps_dprint(sc, MPS_INFO, "%s: NULL cm_reply!\n", __func__);
2325 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2326 		goto bailout;
2327 	}
2328 
2329 	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2330 	sasaddr = le32toh(req->SASAddress.Low);
2331 	sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
2332 
2333 	if ((rpl->IOCStatus & MPI2_IOCSTATUS_MASK) != MPI2_IOCSTATUS_SUCCESS ||
2334 	    rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
2335 		mps_dprint(sc, MPS_INFO, "%s: IOCStatus %04x SASStatus %02x\n",
2336 		    __func__, rpl->IOCStatus, rpl->SASStatus);
2337 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2338 		goto bailout;
2339 	}
2340 
2341 	mps_dprint(sc, MPS_INFO, "%s: SMP request to SAS address "
2342 		   "%#jx completed successfully\n", __func__,
2343 		   (uintmax_t)sasaddr);
2344 
2345 	if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
2346 		ccb->ccb_h.status = CAM_REQ_CMP;
2347 	else
2348 		ccb->ccb_h.status = CAM_SMP_STATUS_ERROR;
2349 
2350 bailout:
2351 	/*
2352 	 * We sync in both directions because we had DMAs in the S/G list
2353 	 * in both directions.
2354 	 */
2355 	bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
2356 			BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2357 	bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2358 	mps_free_command(sc, cm);
2359 	xpt_done(ccb);
2360 }
2361 
2362 static void
2363 mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb, uint64_t sasaddr)
2364 {
2365 	struct mps_command *cm;
2366 	uint8_t *request, *response;
2367 	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2368 	struct mps_softc *sc;
2369 	struct sglist *sg;
2370 	int error;
2371 
2372 	sc = sassc->sc;
2373 	sg = NULL;
2374 	error = 0;
2375 
2376 	/*
2377 	 * XXX We don't yet support physical addresses here.
2378 	 */
2379 	if (ccb->ccb_h.flags & (CAM_DATA_PHYS|CAM_SG_LIST_PHYS)) {
2380 		mps_printf(sc, "%s: physical addresses not supported\n",
2381 			   __func__);
2382 		ccb->ccb_h.status = CAM_REQ_INVALID;
2383 		xpt_done(ccb);
2384 		return;
2385 	}
2386 
2387 	/*
2388 	 * If the user wants to send an S/G list, check to make sure they
2389 	 * have single buffers.
2390 	 */
2391 	if (ccb->ccb_h.flags & CAM_SCATTER_VALID) {
2392 		/*
2393 		 * The chip does not support more than one buffer for the
2394 		 * request or response.
2395 		 */
2396 	 	if ((ccb->smpio.smp_request_sglist_cnt > 1)
2397 		  || (ccb->smpio.smp_response_sglist_cnt > 1)) {
2398 			mps_printf(sc, "%s: multiple request or response "
2399 				   "buffer segments not supported for SMP\n",
2400 				   __func__);
2401 			ccb->ccb_h.status = CAM_REQ_INVALID;
2402 			xpt_done(ccb);
2403 			return;
2404 		}
2405 
2406 		/*
2407 		 * The CAM_SCATTER_VALID flag was originally implemented
2408 		 * for the XPT_SCSI_IO CCB, which only has one data pointer.
2409 		 * We have two.  So, just take that flag to mean that we
2410 		 * might have S/G lists, and look at the S/G segment count
2411 		 * to figure out whether that is the case for each individual
2412 		 * buffer.
2413 		 */
2414 		if (ccb->smpio.smp_request_sglist_cnt != 0) {
2415 			bus_dma_segment_t *req_sg;
2416 
2417 			req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
2418 			request = (uint8_t *)req_sg[0].ds_addr;
2419 		} else
2420 			request = ccb->smpio.smp_request;
2421 
2422 		if (ccb->smpio.smp_response_sglist_cnt != 0) {
2423 			bus_dma_segment_t *rsp_sg;
2424 
2425 			rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
2426 			response = (uint8_t *)rsp_sg[0].ds_addr;
2427 		} else
2428 			response = ccb->smpio.smp_response;
2429 	} else {
2430 		request = ccb->smpio.smp_request;
2431 		response = ccb->smpio.smp_response;
2432 	}
2433 
2434 	cm = mps_alloc_command(sc);
2435 	if (cm == NULL) {
2436 		mps_printf(sc, "%s: cannot allocate command\n", __func__);
2437 		ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2438 		xpt_done(ccb);
2439 		return;
2440 	}
2441 
2442 	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2443 	bzero(req, sizeof(*req));
2444 	req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
2445 
2446 	/* Allow the chip to use any route to this SAS address. */
2447 	req->PhysicalPort = 0xff;
2448 
2449 	req->RequestDataLength = ccb->smpio.smp_request_len;
2450 	req->SGLFlags =
2451 	    MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
2452 
2453 	mps_dprint(sc, MPS_INFO, "%s: sending SMP request to SAS "
2454 		   "address %#jx\n", __func__, (uintmax_t)sasaddr);
2455 
2456 	mpi_init_sge(cm, req, &req->SGL);
2457 
2458 	/*
2459 	 * Set up a uio to pass into mps_map_command().  This allows us to
2460 	 * do one map command, and one busdma call in there.
2461 	 */
2462 	cm->cm_uio.uio_iov = cm->cm_iovec;
2463 	cm->cm_uio.uio_iovcnt = 2;
2464 	cm->cm_uio.uio_segflg = UIO_SYSSPACE;
2465 
2466 	/*
2467 	 * The read/write flag isn't used by busdma, but set it just in
2468 	 * case.  This isn't exactly accurate, either, since we're going in
2469 	 * both directions.
2470 	 */
2471 	cm->cm_uio.uio_rw = UIO_WRITE;
2472 
2473 	cm->cm_iovec[0].iov_base = request;
2474 	cm->cm_iovec[0].iov_len = req->RequestDataLength;
2475 	cm->cm_iovec[1].iov_base = response;
2476 	cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
2477 
2478 	cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
2479 			       cm->cm_iovec[1].iov_len;
2480 
2481 	/*
2482 	 * Trigger a warning message in mps_data_cb() for the user if we
2483 	 * wind up exceeding two S/G segments.  The chip expects one
2484 	 * segment for the request and another for the response.
2485 	 */
2486 	cm->cm_max_segs = 2;
2487 
2488 	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
2489 	cm->cm_complete = mpssas_smpio_complete;
2490 	cm->cm_complete_data = ccb;
2491 
2492 	/*
2493 	 * Tell the mapping code that we're using a uio, and that this is
2494 	 * an SMP passthrough request.  There is a little special-case
2495 	 * logic there (in mps_data_cb()) to handle the bidirectional
2496 	 * transfer.
2497 	 */
2498 	cm->cm_flags |= MPS_CM_FLAGS_USE_UIO | MPS_CM_FLAGS_SMP_PASS |
2499 			MPS_CM_FLAGS_DATAIN | MPS_CM_FLAGS_DATAOUT;
2500 
2501 	/* The chip data format is little endian. */
2502 	req->SASAddress.High = htole32(sasaddr >> 32);
2503 	req->SASAddress.Low = htole32(sasaddr);
2504 
2505 	/*
2506 	 * XXX Note that we don't have a timeout/abort mechanism here.
2507 	 * From the manual, it looks like task management requests only
2508 	 * work for SCSI IO and SATA passthrough requests.  We may need to
2509 	 * have a mechanism to retry requests in the event of a chip reset
2510 	 * at least.  Hopefully the chip will insure that any errors short
2511 	 * of that are relayed back to the driver.
2512 	 */
2513 	error = mps_map_command(sc, cm);
2514 	if ((error != 0) && (error != EINPROGRESS)) {
2515 		mps_printf(sc, "%s: error %d returned from mps_map_command()\n",
2516 			   __func__, error);
2517 		goto bailout_error;
2518 	}
2519 
2520 	return;
2521 
2522 bailout_error:
2523 	mps_free_command(sc, cm);
2524 	ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2525 	xpt_done(ccb);
2526 	return;
2527 
2528 }
2529 
2530 static void
2531 mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb)
2532 {
2533 	struct mps_softc *sc;
2534 	struct mpssas_target *targ;
2535 	uint64_t sasaddr = 0;
2536 
2537 	sc = sassc->sc;
2538 
2539 	/*
2540 	 * Make sure the target exists.
2541 	 */
2542 	targ = &sassc->targets[ccb->ccb_h.target_id];
2543 	if (targ->handle == 0x0) {
2544 		mps_printf(sc, "%s: target %d does not exist!\n", __func__,
2545 			   ccb->ccb_h.target_id);
2546 		ccb->ccb_h.status = CAM_SEL_TIMEOUT;
2547 		xpt_done(ccb);
2548 		return;
2549 	}
2550 
2551 	/*
2552 	 * If this device has an embedded SMP target, we'll talk to it
2553 	 * directly.
2554 	 * figure out what the expander's address is.
2555 	 */
2556 	if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
2557 		sasaddr = targ->sasaddr;
2558 
2559 	/*
2560 	 * If we don't have a SAS address for the expander yet, try
2561 	 * grabbing it from the page 0x83 information cached in the
2562 	 * transport layer for this target.  LSI expanders report the
2563 	 * expander SAS address as the port-associated SAS address in
2564 	 * Inquiry VPD page 0x83.  Maxim expanders don't report it in page
2565 	 * 0x83.
2566 	 *
2567 	 * XXX KDM disable this for now, but leave it commented out so that
2568 	 * it is obvious that this is another possible way to get the SAS
2569 	 * address.
2570 	 *
2571 	 * The parent handle method below is a little more reliable, and
2572 	 * the other benefit is that it works for devices other than SES
2573 	 * devices.  So you can send a SMP request to a da(4) device and it
2574 	 * will get routed to the expander that device is attached to.
2575 	 * (Assuming the da(4) device doesn't contain an SMP target...)
2576 	 */
2577 #if 0
2578 	if (sasaddr == 0)
2579 		sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
2580 #endif
2581 
2582 	/*
2583 	 * If we still don't have a SAS address for the expander, look for
2584 	 * the parent device of this device, which is probably the expander.
2585 	 */
2586 	if (sasaddr == 0) {
2587 #ifdef OLD_MPS_PROBE
2588 		struct mpssas_target *parent_target;
2589 #endif
2590 
2591 		if (targ->parent_handle == 0x0) {
2592 			mps_printf(sc, "%s: handle %d does not have a valid "
2593 				   "parent handle!\n", __func__, targ->handle);
2594 			ccb->ccb_h.status = CAM_REQ_INVALID;
2595 			goto bailout;
2596 		}
2597 #ifdef OLD_MPS_PROBE
2598 		parent_target = mpssas_find_target_by_handle(sassc, 0,
2599 			targ->parent_handle);
2600 
2601 		if (parent_target == NULL) {
2602 			mps_printf(sc, "%s: handle %d does not have a valid "
2603 				   "parent target!\n", __func__, targ->handle);
2604 			ccb->ccb_h.status = CAM_REQ_INVALID;
2605 			goto bailout;
2606 		}
2607 
2608 		if ((parent_target->devinfo &
2609 		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
2610 			mps_printf(sc, "%s: handle %d parent %d does not "
2611 				   "have an SMP target!\n", __func__,
2612 				   targ->handle, parent_target->handle);
2613 			ccb->ccb_h.status = CAM_REQ_INVALID;
2614 			goto bailout;
2615 
2616 		}
2617 
2618 		sasaddr = parent_target->sasaddr;
2619 #else /* OLD_MPS_PROBE */
2620 		if ((targ->parent_devinfo &
2621 		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
2622 			mps_printf(sc, "%s: handle %d parent %d does not "
2623 				   "have an SMP target!\n", __func__,
2624 				   targ->handle, targ->parent_handle);
2625 			ccb->ccb_h.status = CAM_REQ_INVALID;
2626 			goto bailout;
2627 
2628 		}
2629 		if (targ->parent_sasaddr == 0x0) {
2630 			mps_printf(sc, "%s: handle %d parent handle %d does "
2631 				   "not have a valid SAS address!\n",
2632 				   __func__, targ->handle, targ->parent_handle);
2633 			ccb->ccb_h.status = CAM_REQ_INVALID;
2634 			goto bailout;
2635 		}
2636 
2637 		sasaddr = targ->parent_sasaddr;
2638 #endif /* OLD_MPS_PROBE */
2639 
2640 	}
2641 
2642 	if (sasaddr == 0) {
2643 		mps_printf(sc, "%s: unable to find SAS address for handle %d\n",
2644 			   __func__, targ->handle);
2645 		ccb->ccb_h.status = CAM_REQ_INVALID;
2646 		goto bailout;
2647 	}
2648 	mpssas_send_smpcmd(sassc, ccb, sasaddr);
2649 
2650 	return;
2651 
2652 bailout:
2653 	xpt_done(ccb);
2654 
2655 }
2656 #endif //__FreeBSD_version >= 900026
2657 
2658 static void
2659 mpssas_action_resetdev(struct mpssas_softc *sassc, union ccb *ccb)
2660 {
2661 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
2662 	struct mps_softc *sc;
2663 	struct mps_command *tm;
2664 	struct mpssas_target *targ;
2665 
2666 	mps_dprint(sassc->sc, MPS_TRACE, __func__);
2667 	mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
2668 
2669 	sc = sassc->sc;
2670 	tm = mps_alloc_command(sc);
2671 	if (tm == NULL) {
2672 		mps_printf(sc, "comand alloc failure in mpssas_action_resetdev\n");
2673 		ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2674 		xpt_done(ccb);
2675 		return;
2676 	}
2677 
2678 	targ = &sassc->targets[ccb->ccb_h.target_id];
2679 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
2680 	req->DevHandle = targ->handle;
2681 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
2682 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
2683 
2684 	/* SAS Hard Link Reset / SATA Link Reset */
2685 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
2686 
2687 	tm->cm_data = NULL;
2688 	tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
2689 	tm->cm_complete = mpssas_resetdev_complete;
2690 	tm->cm_complete_data = ccb;
2691 	mps_map_command(sc, tm);
2692 }
2693 
2694 static void
2695 mpssas_resetdev_complete(struct mps_softc *sc, struct mps_command *tm)
2696 {
2697 	MPI2_SCSI_TASK_MANAGE_REPLY *resp;
2698 	union ccb *ccb;
2699 
2700 	mps_dprint(sc, MPS_TRACE, __func__);
2701 	mtx_assert(&sc->mps_mtx, MA_OWNED);
2702 
2703 	resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
2704 	ccb = tm->cm_complete_data;
2705 
2706 	/*
2707 	 * Currently there should be no way we can hit this case.  It only
2708 	 * happens when we have a failure to allocate chain frames, and
2709 	 * task management commands don't have S/G lists.
2710 	 */
2711 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2712 		MPI2_SCSI_TASK_MANAGE_REQUEST *req;
2713 
2714 		req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
2715 
2716 		mps_printf(sc, "%s: cm_flags = %#x for reset of handle %#04x! "
2717 			   "This should not happen!\n", __func__, tm->cm_flags,
2718 			   req->DevHandle);
2719 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2720 		goto bailout;
2721 	}
2722 
2723 	printf("%s: IOCStatus = 0x%x ResponseCode = 0x%x\n", __func__,
2724 	    resp->IOCStatus, resp->ResponseCode);
2725 
2726 	if (resp->ResponseCode == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
2727 		ccb->ccb_h.status = CAM_REQ_CMP;
2728 		mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
2729 		    CAM_LUN_WILDCARD);
2730 	}
2731 	else
2732 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2733 
2734 bailout:
2735 
2736 	mpssas_free_tm(sc, tm);
2737 	xpt_done(ccb);
2738 }
2739 
2740 static void
2741 mpssas_poll(struct cam_sim *sim)
2742 {
2743 	struct mpssas_softc *sassc;
2744 
2745 	sassc = cam_sim_softc(sim);
2746 
2747 	if (sassc->sc->mps_debug & MPS_TRACE) {
2748 		/* frequent debug messages during a panic just slow
2749 		 * everything down too much.
2750 		 */
2751 		mps_printf(sassc->sc, "%s clearing MPS_TRACE\n", __func__);
2752 		sassc->sc->mps_debug &= ~MPS_TRACE;
2753 	}
2754 
2755 	mps_intr_locked(sassc->sc);
2756 }
2757 
2758 static void
2759 mpssas_rescan_done(struct cam_periph *periph, union ccb *done_ccb)
2760 {
2761 	struct mpssas_softc *sassc;
2762 	char path_str[64];
2763 
2764 	if (done_ccb == NULL)
2765 		return;
2766 
2767 	sassc = (struct mpssas_softc *)done_ccb->ccb_h.ppriv_ptr1;
2768 
2769 	mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
2770 
2771 	xpt_path_string(done_ccb->ccb_h.path, path_str, sizeof(path_str));
2772 	mps_dprint(sassc->sc, MPS_INFO, "Completing rescan for %s\n", path_str);
2773 
2774 	xpt_free_path(done_ccb->ccb_h.path);
2775 	xpt_free_ccb(done_ccb);
2776 
2777 #if __FreeBSD_version < 1000006
2778 	/*
2779 	 * Before completing scan, get EEDP stuff for all of the existing
2780 	 * targets.
2781 	 */
2782 	mpssas_check_eedp(sassc);
2783 #endif
2784 
2785 }
2786 
2787 /* thread to handle bus rescans */
2788 static void
2789 mpssas_scanner_thread(void *arg)
2790 {
2791 	struct mpssas_softc *sassc;
2792 	struct mps_softc *sc;
2793 	union ccb	*ccb;
2794 
2795 	sassc = (struct mpssas_softc *)arg;
2796 	sc = sassc->sc;
2797 
2798 	mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
2799 
2800 	mps_lock(sc);
2801 	for (;;) {
2802 		msleep(&sassc->ccb_scanq, &sc->mps_mtx, PRIBIO,
2803 		       "mps_scanq", 0);
2804 		if (sassc->flags & MPSSAS_SHUTDOWN) {
2805 			mps_dprint(sc, MPS_TRACE, "Scanner shutting down\n");
2806 			break;
2807 		}
2808 		ccb = (union ccb *)TAILQ_FIRST(&sassc->ccb_scanq);
2809 		if (ccb == NULL)
2810 			continue;
2811 		TAILQ_REMOVE(&sassc->ccb_scanq, &ccb->ccb_h, sim_links.tqe);
2812 		xpt_action(ccb);
2813 	}
2814 
2815 	sassc->flags &= ~MPSSAS_SCANTHREAD;
2816 	wakeup(&sassc->flags);
2817 	mps_unlock(sc);
2818 	mps_dprint(sc, MPS_TRACE, "Scanner exiting\n");
2819 	mps_kproc_exit(0);
2820 }
2821 
2822 static void
2823 mpssas_rescan(struct mpssas_softc *sassc, union ccb *ccb)
2824 {
2825 	char path_str[64];
2826 
2827 	mps_dprint(sassc->sc, MPS_TRACE, "%s\n", __func__);
2828 
2829 	mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
2830 
2831 	if (ccb == NULL)
2832 		return;
2833 
2834 	xpt_path_string(ccb->ccb_h.path, path_str, sizeof(path_str));
2835 	mps_dprint(sassc->sc, MPS_INFO, "Queueing rescan for %s\n", path_str);
2836 
2837 	/* Prepare request */
2838 	ccb->ccb_h.ppriv_ptr1 = sassc;
2839 	ccb->ccb_h.cbfcnp = mpssas_rescan_done;
2840 	xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, MPS_PRIORITY_XPT);
2841 	TAILQ_INSERT_TAIL(&sassc->ccb_scanq, &ccb->ccb_h, sim_links.tqe);
2842 	wakeup(&sassc->ccb_scanq);
2843 }
2844 
2845 #if __FreeBSD_version >= 1000006
2846 static void
2847 mpssas_async(void *callback_arg, uint32_t code, struct cam_path *path,
2848 	     void *arg)
2849 {
2850 	struct mps_softc *sc;
2851 
2852 	sc = (struct mps_softc *)callback_arg;
2853 
2854 	switch (code) {
2855 	case AC_ADVINFO_CHANGED: {
2856 		struct mpssas_target *target;
2857 		struct mpssas_softc *sassc;
2858 		struct scsi_read_capacity_data_long rcap_buf;
2859 		struct ccb_dev_advinfo cdai;
2860 		struct mpssas_lun *lun;
2861 		lun_id_t lunid;
2862 		int found_lun;
2863 		uintptr_t buftype;
2864 
2865 		buftype = (uintptr_t)arg;
2866 
2867 		found_lun = 0;
2868 		sassc = sc->sassc;
2869 
2870 		/*
2871 		 * We're only interested in read capacity data changes.
2872 		 */
2873 		if (buftype != CDAI_TYPE_RCAPLONG)
2874 			break;
2875 
2876 		/*
2877 		 * We're only interested in devices that are attached to
2878 		 * this controller.
2879 		 */
2880 		if (xpt_path_path_id(path) != sassc->sim->path_id)
2881 			break;
2882 
2883 		/*
2884 		 * We should have a handle for this, but check to make sure.
2885 		 */
2886 		target = &sassc->targets[xpt_path_target_id(path)];
2887 		if (target->handle == 0)
2888 			break;
2889 
2890 		lunid = xpt_path_lun_id(path);
2891 
2892 		SLIST_FOREACH(lun, &target->luns, lun_link) {
2893 			if (lun->lun_id == lunid) {
2894 				found_lun = 1;
2895 				break;
2896 			}
2897 		}
2898 
2899 		if (found_lun == 0) {
2900 			lun = malloc(sizeof(struct mpssas_lun), M_MPT2,
2901 				     M_NOWAIT | M_ZERO);
2902 			if (lun == NULL) {
2903 				mps_dprint(sc, MPS_FAULT, "Unable to alloc "
2904 					   "LUN for EEDP support.\n");
2905 				break;
2906 			}
2907 			lun->lun_id = lunid;
2908 			SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
2909 		}
2910 
2911 		bzero(&rcap_buf, sizeof(rcap_buf));
2912 		xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
2913 		cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
2914 		cdai.ccb_h.flags = CAM_DIR_IN;
2915 		cdai.buftype = CDAI_TYPE_RCAPLONG;
2916 		cdai.flags = 0;
2917 		cdai.bufsiz = sizeof(rcap_buf);
2918 		cdai.buf = (uint8_t *)&rcap_buf;
2919 		xpt_action((union ccb *)&cdai);
2920 		if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
2921 			cam_release_devq(cdai.ccb_h.path,
2922 					 0, 0, 0, FALSE);
2923 
2924 		if (((cdai.ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP)
2925 		 && (rcap_buf.prot & SRC16_PROT_EN)) {
2926 			lun->eedp_formatted = TRUE;
2927 			lun->eedp_block_size = scsi_4btoul(rcap_buf.length);
2928 		} else {
2929 			lun->eedp_formatted = FALSE;
2930 			lun->eedp_block_size = 0;
2931 		}
2932 		break;
2933 	}
2934 	default:
2935 		break;
2936 	}
2937 }
2938 #else /* __FreeBSD_version >= 1000006 */
2939 
2940 static void
2941 mpssas_check_eedp(struct mpssas_softc *sassc)
2942 {
2943 	struct mps_softc *sc = sassc->sc;
2944 	struct ccb_scsiio *csio;
2945 	struct scsi_read_capacity_16 *scsi_cmd;
2946 	struct scsi_read_capacity_eedp *rcap_buf;
2947 	union ccb *ccb;
2948 	path_id_t pathid = cam_sim_path(sassc->sim);
2949 	target_id_t targetid;
2950 	lun_id_t lunid;
2951 	struct cam_periph *found_periph;
2952 	struct mpssas_target *target;
2953 	struct mpssas_lun *lun;
2954 	uint8_t	found_lun;
2955 
2956 	/*
2957 	 * Issue a READ CAPACITY 16 command to each LUN of each target.  This
2958 	 * info is used to determine if the LUN is formatted for EEDP support.
2959 	 */
2960 	for (targetid = 0; targetid < sc->facts->MaxTargets; targetid++) {
2961 		target = &sassc->targets[targetid];
2962 		if (target->handle == 0x0) {
2963 			continue;
2964 		}
2965 
2966 		lunid = 0;
2967 		do {
2968 			rcap_buf =
2969 			    malloc(sizeof(struct scsi_read_capacity_eedp),
2970 			    M_MPT2, M_NOWAIT | M_ZERO);
2971 			if (rcap_buf == NULL) {
2972 				mps_dprint(sc, MPS_FAULT, "Unable to alloc read "
2973 				    "capacity buffer for EEDP support.\n");
2974 				return;
2975 			}
2976 			ccb = xpt_alloc_ccb_nowait();
2977 			if (ccb == NULL) {
2978 				mps_dprint(sc, MPS_FAULT, "Unable to alloc CCB "
2979 				    "for EEDP support.\n");
2980 				free(rcap_buf, M_MPT2);
2981 				return;
2982 			}
2983 
2984 			if (xpt_create_path(&ccb->ccb_h.path, xpt_periph,
2985 			    pathid, targetid, lunid) != CAM_REQ_CMP) {
2986 				mps_dprint(sc, MPS_FAULT, "Unable to create "
2987 				    "path for EEDP support\n");
2988 				free(rcap_buf, M_MPT2);
2989 				xpt_free_ccb(ccb);
2990 				return;
2991 			}
2992 
2993 			/*
2994 			 * If a periph is returned, the LUN exists.  Create an
2995 			 * entry in the target's LUN list.
2996 			 */
2997 			if ((found_periph = cam_periph_find(ccb->ccb_h.path,
2998 			    NULL)) != NULL) {
2999 				/*
3000 				 * If LUN is already in list, don't create a new
3001 				 * one.
3002 				 */
3003 				found_lun = FALSE;
3004 				SLIST_FOREACH(lun, &target->luns, lun_link) {
3005 					if (lun->lun_id == lunid) {
3006 						found_lun = TRUE;
3007 						break;
3008 					}
3009 				}
3010 				if (!found_lun) {
3011 					lun = malloc(sizeof(struct mpssas_lun),
3012 					    M_MPT2, M_WAITOK | M_ZERO);
3013 					if (lun == NULL) {
3014 						mps_dprint(sc, MPS_FAULT,
3015 						    "Unable to alloc LUN for "
3016 						    "EEDP support.\n");
3017 						free(rcap_buf, M_MPT2);
3018 						xpt_free_path(ccb->ccb_h.path);
3019 						xpt_free_ccb(ccb);
3020 						return;
3021 					}
3022 					lun->lun_id = lunid;
3023 					SLIST_INSERT_HEAD(&target->luns, lun,
3024 					    lun_link);
3025 				}
3026 				lunid++;
3027 
3028 				/*
3029 				 * Issue a READ CAPACITY 16 command for the LUN.
3030 				 * The mpssas_read_cap_done function will load
3031 				 * the read cap info into the LUN struct.
3032 				 */
3033 				csio = &ccb->csio;
3034 				csio->ccb_h.func_code = XPT_SCSI_IO;
3035 				csio->ccb_h.flags = CAM_DIR_IN;
3036 				csio->ccb_h.retry_count = 4;
3037 				csio->ccb_h.cbfcnp = mpssas_read_cap_done;
3038 				csio->ccb_h.timeout = 60000;
3039 				csio->data_ptr = (uint8_t *)rcap_buf;
3040 				csio->dxfer_len = sizeof(struct
3041 				    scsi_read_capacity_eedp);
3042 				csio->sense_len = MPS_SENSE_LEN;
3043 				csio->cdb_len = sizeof(*scsi_cmd);
3044 				csio->tag_action = MSG_SIMPLE_Q_TAG;
3045 
3046 				scsi_cmd = (struct scsi_read_capacity_16 *)
3047 				    &csio->cdb_io.cdb_bytes;
3048 				bzero(scsi_cmd, sizeof(*scsi_cmd));
3049 				scsi_cmd->opcode = 0x9E;
3050 				scsi_cmd->service_action = SRC16_SERVICE_ACTION;
3051 				((uint8_t *)scsi_cmd)[13] = sizeof(struct
3052 				    scsi_read_capacity_eedp);
3053 
3054 				/*
3055 				 * Set the path, target and lun IDs for the READ
3056 				 * CAPACITY request.
3057 				 */
3058 				ccb->ccb_h.path_id =
3059 				    xpt_path_path_id(ccb->ccb_h.path);
3060 				ccb->ccb_h.target_id =
3061 				    xpt_path_target_id(ccb->ccb_h.path);
3062 				ccb->ccb_h.target_lun =
3063 				    xpt_path_lun_id(ccb->ccb_h.path);
3064 
3065 				ccb->ccb_h.ppriv_ptr1 = sassc;
3066 				xpt_action(ccb);
3067 			} else {
3068 				free(rcap_buf, M_MPT2);
3069 				xpt_free_path(ccb->ccb_h.path);
3070 				xpt_free_ccb(ccb);
3071 			}
3072 		} while (found_periph);
3073 	}
3074 }
3075 
3076 
3077 static void
3078 mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb)
3079 {
3080 	struct mpssas_softc *sassc;
3081 	struct mpssas_target *target;
3082 	struct mpssas_lun *lun;
3083 	struct scsi_read_capacity_eedp *rcap_buf;
3084 
3085 	if (done_ccb == NULL)
3086 		return;
3087 
3088 	rcap_buf = (struct scsi_read_capacity_eedp *)done_ccb->csio.data_ptr;
3089 
3090 	/*
3091 	 * Get the LUN ID for the path and look it up in the LUN list for the
3092 	 * target.
3093 	 */
3094 	sassc = (struct mpssas_softc *)done_ccb->ccb_h.ppriv_ptr1;
3095 	target = &sassc->targets[done_ccb->ccb_h.target_id];
3096 	SLIST_FOREACH(lun, &target->luns, lun_link) {
3097 		if (lun->lun_id != done_ccb->ccb_h.target_lun)
3098 			continue;
3099 
3100 		/*
3101 		 * Got the LUN in the target's LUN list.  Fill it in
3102 		 * with EEDP info.  If the READ CAP 16 command had some
3103 		 * SCSI error (common if command is not supported), mark
3104 		 * the lun as not supporting EEDP and set the block size
3105 		 * to 0.
3106 		 */
3107 		if (((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)
3108 		 || (done_ccb->csio.scsi_status != SCSI_STATUS_OK)) {
3109 			lun->eedp_formatted = FALSE;
3110 			lun->eedp_block_size = 0;
3111 			break;
3112 		}
3113 
3114 		if (rcap_buf->protect & 0x01) {
3115 			lun->eedp_formatted = TRUE;
3116 			lun->eedp_block_size = scsi_4btoul(rcap_buf->length);
3117 		}
3118 		break;
3119 	}
3120 
3121 	// Finished with this CCB and path.
3122 	free(rcap_buf, M_MPT2);
3123 	xpt_free_path(done_ccb->ccb_h.path);
3124 	xpt_free_ccb(done_ccb);
3125 }
3126 #endif /* __FreeBSD_version >= 1000006 */
3127 
3128 int
3129 mpssas_startup(struct mps_softc *sc)
3130 {
3131 	struct mpssas_softc *sassc;
3132 
3133 	/*
3134 	 * Send the port enable message and set the wait_for_port_enable flag.
3135 	 * This flag helps to keep the simq frozen until all discovery events
3136 	 * are processed.
3137 	 */
3138 	sassc = sc->sassc;
3139 	mpssas_startup_increment(sassc);
3140 	sc->wait_for_port_enable = 1;
3141 	mpssas_send_portenable(sc);
3142 	return (0);
3143 }
3144 
3145 static int
3146 mpssas_send_portenable(struct mps_softc *sc)
3147 {
3148 	MPI2_PORT_ENABLE_REQUEST *request;
3149 	struct mps_command *cm;
3150 
3151 	mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
3152 
3153 	if ((cm = mps_alloc_command(sc)) == NULL)
3154 		return (EBUSY);
3155 	request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3156 	request->Function = MPI2_FUNCTION_PORT_ENABLE;
3157 	request->MsgFlags = 0;
3158 	request->VP_ID = 0;
3159 	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3160 	cm->cm_complete = mpssas_portenable_complete;
3161 	cm->cm_data = NULL;
3162 	cm->cm_sge = NULL;
3163 
3164 	mps_map_command(sc, cm);
3165 	mps_dprint(sc, MPS_TRACE,
3166 	    "mps_send_portenable finished cm %p req %p complete %p\n",
3167 	    cm, cm->cm_req, cm->cm_complete);
3168 	return (0);
3169 }
3170 
3171 static void
3172 mpssas_portenable_complete(struct mps_softc *sc, struct mps_command *cm)
3173 {
3174 	MPI2_PORT_ENABLE_REPLY *reply;
3175 	struct mpssas_softc *sassc;
3176 	struct mpssas_target *target;
3177 	int i;
3178 
3179 	mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
3180 	sassc = sc->sassc;
3181 
3182 	/*
3183 	 * Currently there should be no way we can hit this case.  It only
3184 	 * happens when we have a failure to allocate chain frames, and
3185 	 * port enable commands don't have S/G lists.
3186 	 */
3187 	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3188 		mps_printf(sc, "%s: cm_flags = %#x for port enable! "
3189 			   "This should not happen!\n", __func__, cm->cm_flags);
3190 	}
3191 
3192 	reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3193 	if (reply == NULL)
3194 		mps_dprint(sc, MPS_FAULT, "Portenable NULL reply\n");
3195 	else if ((reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
3196 	    MPI2_IOCSTATUS_SUCCESS)
3197 		mps_dprint(sc, MPS_FAULT, "Portenable failed\n");
3198 
3199 	mps_free_command(sc, cm);
3200 	if (sc->mps_ich.ich_arg != NULL) {
3201 		mps_dprint(sc, MPS_INFO, "disestablish config intrhook\n");
3202 		config_intrhook_disestablish(&sc->mps_ich);
3203 		sc->mps_ich.ich_arg = NULL;
3204 	}
3205 
3206 	/*
3207 	 * Get WarpDrive info after discovery is complete but before the scan
3208 	 * starts.  At this point, all devices are ready to be exposed to the
3209 	 * OS.  If devices should be hidden instead, take them out of the
3210 	 * 'targets' array before the scan.  The devinfo for a disk will have
3211 	 * some info and a volume's will be 0.  Use that to remove disks.
3212 	 */
3213 	mps_wd_config_pages(sc);
3214 	if (((sc->mps_flags & MPS_FLAGS_WD_AVAILABLE)
3215 	  && (sc->WD_hide_expose == MPS_WD_HIDE_ALWAYS))
3216 	 || (sc->WD_valid_config && (sc->WD_hide_expose ==
3217 	    MPS_WD_HIDE_IF_VOLUME))) {
3218 		for (i = 0; i < sassc->sc->facts->MaxTargets; i++) {
3219 			target = &sassc->targets[i];
3220 			if (target->devinfo) {
3221 				target->devinfo = 0x0;
3222 				target->encl_handle = 0x0;
3223 				target->encl_slot = 0x0;
3224 				target->handle = 0x0;
3225 				target->tid = 0x0;
3226 				target->linkrate = 0x0;
3227 				target->flags = 0x0;
3228 			}
3229 		}
3230 	}
3231 
3232 	/*
3233 	 * Done waiting for port enable to complete.  Decrement the refcount.
3234 	 * If refcount is 0, discovery is complete and a rescan of the bus can
3235 	 * take place.  Since the simq was explicitly frozen before port
3236 	 * enable, it must be explicitly released here to keep the
3237 	 * freeze/release count in sync.
3238 	 */
3239 	sc->wait_for_port_enable = 0;
3240 	sc->port_enable_complete = 1;
3241 	mpssas_startup_decrement(sassc);
3242 	xpt_release_simq(sassc->sim, 1);
3243 }
3244 
3245