xref: /freebsd/sys/dev/mps/mps_sas.c (revision 7750ad47a9a7dbc83f87158464170c8640723293)
1 /*-
2  * Copyright (c) 2009 Yahoo! Inc.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 /*-
27  * Copyright (c) 2011 LSI Corp.
28  * All rights reserved.
29  *
30  * Redistribution and use in source and binary forms, with or without
31  * modification, are permitted provided that the following conditions
32  * are met:
33  * 1. Redistributions of source code must retain the above copyright
34  *    notice, this list of conditions and the following disclaimer.
35  * 2. Redistributions in binary form must reproduce the above copyright
36  *    notice, this list of conditions and the following disclaimer in the
37  *    documentation and/or other materials provided with the distribution.
38  *
39  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
40  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
41  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
42  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
43  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
44  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
45  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
46  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
47  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
48  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
49  * SUCH DAMAGE.
50  *
51  * LSI MPT-Fusion Host Adapter FreeBSD
52  *
53  * $FreeBSD$
54  */
55 
56 #include <sys/cdefs.h>
57 __FBSDID("$FreeBSD$");
58 
59 /* Communications core for LSI MPT2 */
60 
61 /* TODO Move headers to mpsvar */
62 #include <sys/types.h>
63 #include <sys/param.h>
64 #include <sys/systm.h>
65 #include <sys/kernel.h>
66 #include <sys/selinfo.h>
67 #include <sys/module.h>
68 #include <sys/bus.h>
69 #include <sys/conf.h>
70 #include <sys/bio.h>
71 #include <sys/malloc.h>
72 #include <sys/uio.h>
73 #include <sys/sysctl.h>
74 #include <sys/endian.h>
75 #include <sys/queue.h>
76 #include <sys/kthread.h>
77 #include <sys/taskqueue.h>
78 #include <sys/sbuf.h>
79 
80 #include <machine/bus.h>
81 #include <machine/resource.h>
82 #include <sys/rman.h>
83 
84 #include <machine/stdarg.h>
85 
86 #include <cam/cam.h>
87 #include <cam/cam_ccb.h>
88 #include <cam/cam_xpt.h>
89 #include <cam/cam_debug.h>
90 #include <cam/cam_sim.h>
91 #include <cam/cam_xpt_sim.h>
92 #include <cam/cam_xpt_periph.h>
93 #include <cam/cam_periph.h>
94 #include <cam/scsi/scsi_all.h>
95 #include <cam/scsi/scsi_message.h>
96 #if __FreeBSD_version >= 900026
97 #include <cam/scsi/smp_all.h>
98 #endif
99 
100 #include <dev/mps/mpi/mpi2_type.h>
101 #include <dev/mps/mpi/mpi2.h>
102 #include <dev/mps/mpi/mpi2_ioc.h>
103 #include <dev/mps/mpi/mpi2_sas.h>
104 #include <dev/mps/mpi/mpi2_cnfg.h>
105 #include <dev/mps/mpi/mpi2_init.h>
106 #include <dev/mps/mpi/mpi2_tool.h>
107 #include <dev/mps/mps_ioctl.h>
108 #include <dev/mps/mpsvar.h>
109 #include <dev/mps/mps_table.h>
110 #include <dev/mps/mps_sas.h>
111 
112 #define MPSSAS_DISCOVERY_TIMEOUT	20
113 #define MPSSAS_MAX_DISCOVERY_TIMEOUTS	10 /* 200 seconds */
114 
115 /*
116  * static array to check SCSI OpCode for EEDP protection bits
117  */
118 #define	PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
119 #define	PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
120 #define	PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
121 static uint8_t op_code_prot[256] = {
122 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
123 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
124 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
125 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
126 	0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
127 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
128 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
129 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
130 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
131 	0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
132 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
133 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
134 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
135 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
136 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
137 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
138 };
139 
140 MALLOC_DEFINE(M_MPSSAS, "MPSSAS", "MPS SAS memory");
141 
142 static void mpssas_discovery_timeout(void *data);
143 static void mpssas_remove_device(struct mps_softc *, struct mps_command *);
144 static void mpssas_remove_complete(struct mps_softc *, struct mps_command *);
145 static void mpssas_action(struct cam_sim *sim, union ccb *ccb);
146 static void mpssas_poll(struct cam_sim *sim);
147 static void mpssas_scsiio_timeout(void *data);
148 static void mpssas_abort_complete(struct mps_softc *sc, struct mps_command *cm);
149 static void mpssas_direct_drive_io(struct mpssas_softc *sassc,
150     struct mps_command *cm, union ccb *ccb);
151 static void mpssas_action_scsiio(struct mpssas_softc *, union ccb *);
152 static void mpssas_scsiio_complete(struct mps_softc *, struct mps_command *);
153 static void mpssas_action_resetdev(struct mpssas_softc *, union ccb *);
154 #if __FreeBSD_version >= 900026
155 static void mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm);
156 static void mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb,
157 			       uint64_t sasaddr);
158 static void mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb);
159 #endif //FreeBSD_version >= 900026
160 static void mpssas_resetdev_complete(struct mps_softc *, struct mps_command *);
161 static int  mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm);
162 static int  mpssas_send_reset(struct mps_softc *sc, struct mps_command *tm, uint8_t type);
163 static void mpssas_rescan(struct mpssas_softc *sassc, union ccb *ccb);
164 static void mpssas_rescan_done(struct cam_periph *periph, union ccb *done_ccb);
165 static void mpssas_scanner_thread(void *arg);
166 #if __FreeBSD_version >= 1000006
167 static void mpssas_async(void *callback_arg, uint32_t code,
168 			 struct cam_path *path, void *arg);
169 #else
170 static void mpssas_check_eedp(struct mpssas_softc *sassc);
171 static void mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb);
172 #endif
173 static int mpssas_send_portenable(struct mps_softc *sc);
174 static void mpssas_portenable_complete(struct mps_softc *sc,
175     struct mps_command *cm);
176 
177 struct mpssas_target *
178 mpssas_find_target_by_handle(struct mpssas_softc *sassc, int start, uint16_t handle)
179 {
180 	struct mpssas_target *target;
181 	int i;
182 
183 	for (i = start; i < sassc->sc->facts->MaxTargets; i++) {
184 		target = &sassc->targets[i];
185 		if (target->handle == handle)
186 			return (target);
187 	}
188 
189 	return (NULL);
190 }
191 
192 /* we need to freeze the simq during attach and diag reset, to avoid failing
193  * commands before device handles have been found by discovery.  Since
194  * discovery involves reading config pages and possibly sending commands,
195  * discovery actions may continue even after we receive the end of discovery
196  * event, so refcount discovery actions instead of assuming we can unfreeze
197  * the simq when we get the event.
198  */
199 void
200 mpssas_startup_increment(struct mpssas_softc *sassc)
201 {
202 	if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
203 		if (sassc->startup_refcount++ == 0) {
204 			/* just starting, freeze the simq */
205 			mps_dprint(sassc->sc, MPS_INFO,
206 			    "%s freezing simq\n", __func__);
207 			xpt_freeze_simq(sassc->sim, 1);
208 		}
209 		mps_dprint(sassc->sc, MPS_TRACE, "%s refcount %u\n", __func__,
210 		    sassc->startup_refcount);
211 	}
212 }
213 
214 void
215 mpssas_startup_decrement(struct mpssas_softc *sassc)
216 {
217 	if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
218 		if (--sassc->startup_refcount == 0) {
219 			/* finished all discovery-related actions, release
220 			 * the simq and rescan for the latest topology.
221 			 */
222 			mps_dprint(sassc->sc, MPS_INFO,
223 			    "%s releasing simq\n", __func__);
224 			sassc->flags &= ~MPSSAS_IN_STARTUP;
225 			xpt_release_simq(sassc->sim, 1);
226 			mpssas_rescan_target(sassc->sc, NULL);
227 		}
228 		mps_dprint(sassc->sc, MPS_TRACE, "%s refcount %u\n", __func__,
229 		    sassc->startup_refcount);
230 	}
231 }
232 
233 /* LSI's firmware requires us to stop sending commands when we're doing task
234  * management, so refcount the TMs and keep the simq frozen when any are in
235  * use.
236  */
237 struct mps_command *
238 mpssas_alloc_tm(struct mps_softc *sc)
239 {
240 	struct mps_command *tm;
241 
242 	tm = mps_alloc_high_priority_command(sc);
243 	if (tm != NULL) {
244 		if (sc->sassc->tm_count++ == 0) {
245 			mps_printf(sc, "%s freezing simq\n", __func__);
246 			xpt_freeze_simq(sc->sassc->sim, 1);
247 		}
248 		mps_dprint(sc, MPS_TRACE, "%s tm_count %u\n", __func__,
249 		    sc->sassc->tm_count);
250 	}
251 	return tm;
252 }
253 
254 void
255 mpssas_free_tm(struct mps_softc *sc, struct mps_command *tm)
256 {
257 	if (tm == NULL)
258 		return;
259 
260 	/* if there are no TMs in use, we can release the simq.  We use our
261 	 * own refcount so that it's easier for a diag reset to cleanup and
262 	 * release the simq.
263 	 */
264 	if (--sc->sassc->tm_count == 0) {
265 		mps_printf(sc, "%s releasing simq\n", __func__);
266 		xpt_release_simq(sc->sassc->sim, 1);
267 	}
268 	mps_dprint(sc, MPS_TRACE, "%s tm_count %u\n", __func__,
269 	    sc->sassc->tm_count);
270 
271 	mps_free_high_priority_command(sc, tm);
272 }
273 
274 
275 void
276 mpssas_rescan_target(struct mps_softc *sc, struct mpssas_target *targ)
277 {
278 	struct mpssas_softc *sassc = sc->sassc;
279 	path_id_t pathid;
280 	target_id_t targetid;
281 	union ccb *ccb;
282 
283 	pathid = cam_sim_path(sassc->sim);
284 	if (targ == NULL)
285 		targetid = CAM_TARGET_WILDCARD;
286 	else
287 		targetid = targ - sassc->targets;
288 
289 	/*
290 	 * Allocate a CCB and schedule a rescan.
291 	 */
292 	ccb = xpt_alloc_ccb_nowait();
293 	if (ccb == NULL) {
294 		mps_dprint(sc, MPS_FAULT, "unable to alloc CCB for rescan\n");
295 		return;
296 	}
297 
298 	if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, pathid,
299 		            targetid, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
300 		mps_dprint(sc, MPS_FAULT, "unable to create path for rescan\n");
301 		xpt_free_ccb(ccb);
302 		return;
303 	}
304 
305 	/* XXX Hardwired to scan the bus for now */
306 	ccb->ccb_h.func_code = XPT_SCAN_BUS;
307 	mps_dprint(sc, MPS_TRACE, "%s targetid %u\n", __func__, targetid);
308 	mpssas_rescan(sassc, ccb);
309 }
310 
311 static void
312 mpssas_log_command(struct mps_command *cm, const char *fmt, ...)
313 {
314 	struct sbuf sb;
315 	va_list ap;
316 	char str[192];
317 	char path_str[64];
318 
319 	if (cm == NULL)
320 		return;
321 
322 	sbuf_new(&sb, str, sizeof(str), 0);
323 
324 	va_start(ap, fmt);
325 
326 	if (cm->cm_ccb != NULL) {
327 		xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
328 				sizeof(path_str));
329 		sbuf_cat(&sb, path_str);
330 		if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
331 			scsi_command_string(&cm->cm_ccb->csio, &sb);
332 			sbuf_printf(&sb, "length %d ",
333 				    cm->cm_ccb->csio.dxfer_len);
334 		}
335 	}
336 	else {
337 		sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
338 		    cam_sim_name(cm->cm_sc->sassc->sim),
339 		    cam_sim_unit(cm->cm_sc->sassc->sim),
340 		    cam_sim_bus(cm->cm_sc->sassc->sim),
341 		    cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
342 		    cm->cm_lun);
343 	}
344 
345 	sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
346 	sbuf_vprintf(&sb, fmt, ap);
347 	sbuf_finish(&sb);
348 	printf("%s", sbuf_data(&sb));
349 
350 	va_end(ap);
351 }
352 
353 
354 static void
355 mpssas_remove_volume(struct mps_softc *sc, struct mps_command *tm)
356 {
357 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
358 	struct mpssas_target *targ;
359 	uint16_t handle;
360 
361 	mps_dprint(sc, MPS_INFO, "%s\n", __func__);
362 
363 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
364 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
365 	targ = tm->cm_targ;
366 
367 	if (reply == NULL) {
368 		/* XXX retry the remove after the diag reset completes? */
369 		mps_printf(sc, "%s NULL reply reseting device 0x%04x\n",
370 		    __func__, handle);
371 		mpssas_free_tm(sc, tm);
372 		return;
373 	}
374 
375 	if (reply->IOCStatus != MPI2_IOCSTATUS_SUCCESS) {
376 		mps_printf(sc, "IOCStatus = 0x%x while resetting device 0x%x\n",
377 		   reply->IOCStatus, handle);
378 		mpssas_free_tm(sc, tm);
379 		return;
380 	}
381 
382 	mps_printf(sc, "Reset aborted %u commands\n", reply->TerminationCount);
383 	mps_free_reply(sc, tm->cm_reply_data);
384 	tm->cm_reply = NULL;	/* Ensures the the reply won't get re-freed */
385 
386 	mps_printf(sc, "clearing target %u handle 0x%04x\n", targ->tid, handle);
387 
388 	/*
389 	 * Don't clear target if remove fails because things will get confusing.
390 	 * Leave the devname and sasaddr intact so that we know to avoid reusing
391 	 * this target id if possible, and so we can assign the same target id
392 	 * to this device if it comes back in the future.
393 	 */
394 	if (reply->IOCStatus == MPI2_IOCSTATUS_SUCCESS) {
395 		targ = tm->cm_targ;
396 		targ->handle = 0x0;
397 		targ->encl_handle = 0x0;
398 		targ->encl_slot = 0x0;
399 		targ->exp_dev_handle = 0x0;
400 		targ->phy_num = 0x0;
401 		targ->linkrate = 0x0;
402 		targ->devinfo = 0x0;
403 		targ->flags = 0x0;
404 	}
405 
406 	mpssas_free_tm(sc, tm);
407 }
408 
409 
410 /*
411  * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal.
412  * Otherwise Volume Delete is same as Bare Drive Removal.
413  */
414 void
415 mpssas_prepare_volume_remove(struct mpssas_softc *sassc, uint16_t handle)
416 {
417 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
418 	struct mps_softc *sc;
419 	struct mps_command *cm;
420 	struct mpssas_target *targ = NULL;
421 
422 	mps_dprint(sassc->sc, MPS_INFO, "%s\n", __func__);
423 	sc = sassc->sc;
424 
425 #ifdef WD_SUPPORT
426 	/*
427 	 * If this is a WD controller, determine if the disk should be exposed
428 	 * to the OS or not.  If disk should be exposed, return from this
429 	 * function without doing anything.
430 	 */
431 	if (sc->WD_available && (sc->WD_hide_expose ==
432 	    MPS_WD_EXPOSE_ALWAYS)) {
433 		return;
434 	}
435 #endif //WD_SUPPORT
436 
437 	targ = mpssas_find_target_by_handle(sassc, 0, handle);
438 	if (targ == NULL) {
439 		/* FIXME: what is the action? */
440 		/* We don't know about this device? */
441 		printf("%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
442 		return;
443 	}
444 
445 	targ->flags |= MPSSAS_TARGET_INREMOVAL;
446 
447 	cm = mpssas_alloc_tm(sc);
448 	if (cm == NULL) {
449 		mps_printf(sc, "%s: command alloc failure\n", __func__);
450 		return;
451 	}
452 
453 	mpssas_rescan_target(sc, targ);
454 
455 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
456 	req->DevHandle = targ->handle;
457 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
458 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
459 
460 	/* SAS Hard Link Reset / SATA Link Reset */
461 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
462 
463 	cm->cm_targ = targ;
464 	cm->cm_data = NULL;
465 	cm->cm_desc.HighPriority.RequestFlags =
466 	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
467 	cm->cm_complete = mpssas_remove_volume;
468 	cm->cm_complete_data = (void *)(uintptr_t)handle;
469 	mps_map_command(sc, cm);
470 }
471 
472 /*
473  * The MPT2 firmware performs debounce on the link to avoid transient link
474  * errors and false removals.  When it does decide that link has been lost
475  * and a device need to go away, it expects that the host will perform a
476  * target reset and then an op remove.  The reset has the side-effect of
477  * aborting any outstanding requests for the device, which is required for
478  * the op-remove to succeed.  It's not clear if the host should check for
479  * the device coming back alive after the reset.
480  */
481 void
482 mpssas_prepare_remove(struct mpssas_softc *sassc, uint16_t handle)
483 {
484 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
485 	struct mps_softc *sc;
486 	struct mps_command *cm;
487 	struct mpssas_target *targ = NULL;
488 
489 	mps_dprint(sassc->sc, MPS_INFO, "%s\n", __func__);
490 
491 	/*
492 	 * If this is a WD controller, determine if the disk should be exposed
493 	 * to the OS or not.  If disk should be exposed, return from this
494 	 * function without doing anything.
495 	 */
496 	sc = sassc->sc;
497 	if ((sc->mps_flags & MPS_FLAGS_WD_AVAILABLE) && (sc->WD_hide_expose ==
498 	    MPS_WD_EXPOSE_ALWAYS)) {
499 		return;
500 	}
501 
502 	targ = mpssas_find_target_by_handle(sassc, 0, handle);
503 	if (targ == NULL) {
504 		/* FIXME: what is the action? */
505 		/* We don't know about this device? */
506 		printf("%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
507 		return;
508 	}
509 
510 	targ->flags |= MPSSAS_TARGET_INREMOVAL;
511 
512 	cm = mpssas_alloc_tm(sc);
513 	if (cm == NULL) {
514 		mps_printf(sc, "%s: command alloc failure\n", __func__);
515 		return;
516 	}
517 
518 	mpssas_rescan_target(sc, targ);
519 
520 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
521 	memset(req, 0, sizeof(*req));
522 	req->DevHandle = targ->handle;
523 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
524 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
525 
526 	/* SAS Hard Link Reset / SATA Link Reset */
527 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
528 
529 	cm->cm_targ = targ;
530 	cm->cm_data = NULL;
531 	cm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
532 	cm->cm_complete = mpssas_remove_device;
533 	cm->cm_complete_data = (void *)(uintptr_t)handle;
534 	mps_map_command(sc, cm);
535 }
536 
537 static void
538 mpssas_remove_device(struct mps_softc *sc, struct mps_command *tm)
539 {
540 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
541 	MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
542 	struct mpssas_target *targ;
543 	struct mps_command *next_cm;
544 	uint16_t handle;
545 
546 	mps_dprint(sc, MPS_INFO, "%s\n", __func__);
547 
548 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
549 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
550 	targ = tm->cm_targ;
551 
552 	/*
553 	 * Currently there should be no way we can hit this case.  It only
554 	 * happens when we have a failure to allocate chain frames, and
555 	 * task management commands don't have S/G lists.
556 	 */
557 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
558 		mps_printf(sc, "%s: cm_flags = %#x for remove of handle %#04x! "
559 			   "This should not happen!\n", __func__, tm->cm_flags,
560 			   handle);
561 		mpssas_free_tm(sc, tm);
562 		return;
563 	}
564 
565 	if (reply == NULL) {
566 		/* XXX retry the remove after the diag reset completes? */
567 		mps_printf(sc, "%s NULL reply reseting device 0x%04x\n",
568 		    __func__, handle);
569 		mpssas_free_tm(sc, tm);
570 		return;
571 	}
572 
573 	if (reply->IOCStatus != MPI2_IOCSTATUS_SUCCESS) {
574 		mps_printf(sc, "IOCStatus = 0x%x while resetting device 0x%x\n",
575 		   reply->IOCStatus, handle);
576 		mpssas_free_tm(sc, tm);
577 		return;
578 	}
579 
580 	mps_dprint(sc, MPS_INFO, "Reset aborted %u commands\n",
581 	    reply->TerminationCount);
582 	mps_free_reply(sc, tm->cm_reply_data);
583 	tm->cm_reply = NULL;	/* Ensures the the reply won't get re-freed */
584 
585 	/* Reuse the existing command */
586 	req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
587 	memset(req, 0, sizeof(*req));
588 	req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
589 	req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
590 	req->DevHandle = handle;
591 	tm->cm_data = NULL;
592 	tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
593 	tm->cm_complete = mpssas_remove_complete;
594 	tm->cm_complete_data = (void *)(uintptr_t)handle;
595 
596 	mps_map_command(sc, tm);
597 
598 	mps_dprint(sc, MPS_INFO, "clearing target %u handle 0x%04x\n",
599 		   targ->tid, handle);
600 	TAILQ_FOREACH_SAFE(tm, &targ->commands, cm_link, next_cm) {
601 		union ccb *ccb;
602 
603 		mps_dprint(sc, MPS_INFO, "Completing missed command %p\n", tm);
604 		ccb = tm->cm_complete_data;
605 		ccb->ccb_h.status = CAM_DEV_NOT_THERE;
606 		mpssas_scsiio_complete(sc, tm);
607 	}
608 }
609 
610 static void
611 mpssas_remove_complete(struct mps_softc *sc, struct mps_command *tm)
612 {
613 	MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
614 	uint16_t handle;
615 	struct mpssas_target *targ;
616 
617 	mps_dprint(sc, MPS_INFO, "%s\n", __func__);
618 
619 	reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
620 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
621 
622 	/*
623 	 * Currently there should be no way we can hit this case.  It only
624 	 * happens when we have a failure to allocate chain frames, and
625 	 * task management commands don't have S/G lists.
626 	 */
627 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
628 		mps_printf(sc, "%s: cm_flags = %#x for remove of handle %#04x! "
629 			   "This should not happen!\n", __func__, tm->cm_flags,
630 			   handle);
631 		mpssas_free_tm(sc, tm);
632 		return;
633 	}
634 
635 	if (reply == NULL) {
636 		/* most likely a chip reset */
637 		mps_printf(sc, "%s NULL reply removing device 0x%04x\n",
638 		    __func__, handle);
639 		mpssas_free_tm(sc, tm);
640 		return;
641 	}
642 
643 	mps_printf(sc, "%s on handle 0x%04x, IOCStatus= 0x%x\n", __func__,
644 	    handle, reply->IOCStatus);
645 
646 	/*
647 	 * Don't clear target if remove fails because things will get confusing.
648 	 * Leave the devname and sasaddr intact so that we know to avoid reusing
649 	 * this target id if possible, and so we can assign the same target id
650 	 * to this device if it comes back in the future.
651 	 */
652 	if (reply->IOCStatus == MPI2_IOCSTATUS_SUCCESS) {
653 		targ = tm->cm_targ;
654 		targ->handle = 0x0;
655 		targ->encl_handle = 0x0;
656 		targ->encl_slot = 0x0;
657 		targ->exp_dev_handle = 0x0;
658 		targ->phy_num = 0x0;
659 		targ->linkrate = 0x0;
660 		targ->devinfo = 0x0;
661 		targ->flags = 0x0;
662 	}
663 
664 	mpssas_free_tm(sc, tm);
665 }
666 
667 static int
668 mpssas_register_events(struct mps_softc *sc)
669 {
670 	uint8_t events[16];
671 
672 	bzero(events, 16);
673 	setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
674 	setbit(events, MPI2_EVENT_SAS_DISCOVERY);
675 	setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
676 	setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
677 	setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
678 	setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
679 	setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
680 	setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
681 	setbit(events, MPI2_EVENT_IR_VOLUME);
682 	setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
683 	setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
684 	setbit(events, MPI2_EVENT_LOG_ENTRY_ADDED);
685 
686 	mps_register_events(sc, events, mpssas_evt_handler, NULL,
687 	    &sc->sassc->mpssas_eh);
688 
689 	return (0);
690 }
691 
692 int
693 mps_attach_sas(struct mps_softc *sc)
694 {
695 	struct mpssas_softc *sassc;
696 #if __FreeBSD_version >= 1000006
697 	cam_status status;
698 #endif
699 	int unit, error = 0;
700 
701 	mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
702 
703 	sassc = malloc(sizeof(struct mpssas_softc), M_MPT2, M_WAITOK|M_ZERO);
704 	sassc->targets = malloc(sizeof(struct mpssas_target) *
705 	    sc->facts->MaxTargets, M_MPT2, M_WAITOK|M_ZERO);
706 	sc->sassc = sassc;
707 	sassc->sc = sc;
708 
709 	if ((sassc->devq = cam_simq_alloc(sc->num_reqs)) == NULL) {
710 		mps_dprint(sc, MPS_FAULT, "Cannot allocate SIMQ\n");
711 		error = ENOMEM;
712 		goto out;
713 	}
714 
715 	unit = device_get_unit(sc->mps_dev);
716 	sassc->sim = cam_sim_alloc(mpssas_action, mpssas_poll, "mps", sassc,
717 	    unit, &sc->mps_mtx, sc->num_reqs, sc->num_reqs, sassc->devq);
718 	if (sassc->sim == NULL) {
719 		mps_dprint(sc, MPS_FAULT, "Cannot allocate SIM\n");
720 		error = EINVAL;
721 		goto out;
722 	}
723 
724 	TAILQ_INIT(&sassc->ev_queue);
725 
726 	/* Initialize taskqueue for Event Handling */
727 	TASK_INIT(&sassc->ev_task, 0, mpssas_firmware_event_work, sc);
728 	sassc->ev_tq = taskqueue_create("mps_taskq", M_NOWAIT | M_ZERO,
729 	    taskqueue_thread_enqueue, &sassc->ev_tq);
730 
731 	/* Run the task queue with lowest priority */
732 	taskqueue_start_threads(&sassc->ev_tq, 1, 255, "%s taskq",
733 	    device_get_nameunit(sc->mps_dev));
734 
735 	TAILQ_INIT(&sassc->ccb_scanq);
736 	error = mps_kproc_create(mpssas_scanner_thread, sassc,
737 	    &sassc->rescan_thread, 0, 0, "mps_scan%d", unit);
738 	if (error) {
739 		mps_printf(sc, "Error %d starting rescan thread\n", error);
740 		goto out;
741 	}
742 
743 	mps_lock(sc);
744 	sassc->flags |= MPSSAS_SCANTHREAD;
745 
746 	/*
747 	 * XXX There should be a bus for every port on the adapter, but since
748 	 * we're just going to fake the topology for now, we'll pretend that
749 	 * everything is just a target on a single bus.
750 	 */
751 	if ((error = xpt_bus_register(sassc->sim, sc->mps_dev, 0)) != 0) {
752 		mps_dprint(sc, MPS_FAULT, "Error %d registering SCSI bus\n",
753 		    error);
754 		mps_unlock(sc);
755 		goto out;
756 	}
757 
758 	/*
759 	 * Assume that discovery events will start right away.  Freezing
760 	 * the simq will prevent the CAM boottime scanner from running
761 	 * before discovery is complete.
762 	 */
763 	sassc->flags |= MPSSAS_IN_STARTUP | MPSSAS_IN_DISCOVERY;
764 	xpt_freeze_simq(sassc->sim, 1);
765 	sc->sassc->startup_refcount = 0;
766 
767 	callout_init(&sassc->discovery_callout, 1 /*mpsafe*/);
768 	sassc->discovery_timeouts = 0;
769 
770 	sassc->tm_count = 0;
771 
772 #if __FreeBSD_version >= 1000006
773 	status = xpt_register_async(AC_ADVINFO_CHANGED, mpssas_async, sc, NULL);
774 	if (status != CAM_REQ_CMP) {
775 		mps_printf(sc, "Error %#x registering async handler for "
776 			   "AC_ADVINFO_CHANGED events\n", status);
777 	}
778 #endif
779 
780 	mps_unlock(sc);
781 
782 	mpssas_register_events(sc);
783 out:
784 	if (error)
785 		mps_detach_sas(sc);
786 	return (error);
787 }
788 
789 int
790 mps_detach_sas(struct mps_softc *sc)
791 {
792 	struct mpssas_softc *sassc;
793 
794 	mps_dprint(sc, MPS_INFO, "%s\n", __func__);
795 
796 	if (sc->sassc == NULL)
797 		return (0);
798 
799 	sassc = sc->sassc;
800 	mps_deregister_events(sc, sassc->mpssas_eh);
801 
802 	/*
803 	 * Drain and free the event handling taskqueue with the lock
804 	 * unheld so that any parallel processing tasks drain properly
805 	 * without deadlocking.
806 	 */
807 	if (sassc->ev_tq != NULL)
808 		taskqueue_free(sassc->ev_tq);
809 
810 	/* Make sure CAM doesn't wedge if we had to bail out early. */
811 	mps_lock(sc);
812 
813 	/* Deregister our async handler */
814 #if __FreeBSD_version >= 1000006
815 	xpt_register_async(0, mpssas_async, sc, NULL);
816 #endif
817 
818 	if (sassc->flags & MPSSAS_IN_STARTUP)
819 		xpt_release_simq(sassc->sim, 1);
820 
821 	if (sassc->sim != NULL) {
822 		xpt_bus_deregister(cam_sim_path(sassc->sim));
823 		cam_sim_free(sassc->sim, FALSE);
824 	}
825 
826 	if (sassc->flags & MPSSAS_SCANTHREAD) {
827 		sassc->flags |= MPSSAS_SHUTDOWN;
828 		wakeup(&sassc->ccb_scanq);
829 
830 		if (sassc->flags & MPSSAS_SCANTHREAD) {
831 			msleep(&sassc->flags, &sc->mps_mtx, PRIBIO,
832 			       "mps_shutdown", 30 * hz);
833 		}
834 	}
835 	mps_unlock(sc);
836 
837 	mps_dprint(sc, MPS_INFO, "%s:%d\n", __func__,__LINE__);
838 	if (sassc->devq != NULL)
839 		cam_simq_free(sassc->devq);
840 
841 	free(sassc->targets, M_MPT2);
842 	free(sassc, M_MPT2);
843 	sc->sassc = NULL;
844 
845 	return (0);
846 }
847 
848 void
849 mpssas_discovery_end(struct mpssas_softc *sassc)
850 {
851 	struct mps_softc *sc = sassc->sc;
852 
853 	mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
854 
855 	if (sassc->flags & MPSSAS_DISCOVERY_TIMEOUT_PENDING)
856 		callout_stop(&sassc->discovery_callout);
857 
858 }
859 
860 static void
861 mpssas_discovery_timeout(void *data)
862 {
863 	struct mpssas_softc *sassc = data;
864 	struct mps_softc *sc;
865 
866 	sc = sassc->sc;
867 	mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
868 
869 	mps_lock(sc);
870 	mps_printf(sc,
871 	    "Timeout waiting for discovery, interrupts may not be working!\n");
872 	sassc->flags &= ~MPSSAS_DISCOVERY_TIMEOUT_PENDING;
873 
874 	/* Poll the hardware for events in case interrupts aren't working */
875 	mps_intr_locked(sc);
876 
877 	mps_printf(sassc->sc,
878 	    "Finished polling after discovery timeout at %d\n", ticks);
879 
880 	if ((sassc->flags & MPSSAS_IN_DISCOVERY) == 0) {
881 		mpssas_discovery_end(sassc);
882 	} else {
883 		if (sassc->discovery_timeouts < MPSSAS_MAX_DISCOVERY_TIMEOUTS) {
884 			sassc->flags |= MPSSAS_DISCOVERY_TIMEOUT_PENDING;
885 			callout_reset(&sassc->discovery_callout,
886 			    MPSSAS_DISCOVERY_TIMEOUT * hz,
887 			    mpssas_discovery_timeout, sassc);
888 			sassc->discovery_timeouts++;
889 		} else {
890 			mps_dprint(sassc->sc, MPS_FAULT,
891 			    "Discovery timed out, continuing.\n");
892 			sassc->flags &= ~MPSSAS_IN_DISCOVERY;
893 			mpssas_discovery_end(sassc);
894 		}
895 	}
896 
897 	mps_unlock(sc);
898 }
899 
900 static void
901 mpssas_action(struct cam_sim *sim, union ccb *ccb)
902 {
903 	struct mpssas_softc *sassc;
904 
905 	sassc = cam_sim_softc(sim);
906 
907 	mps_dprint(sassc->sc, MPS_TRACE, "%s func 0x%x\n", __func__,
908 	    ccb->ccb_h.func_code);
909 	mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
910 
911 	switch (ccb->ccb_h.func_code) {
912 	case XPT_PATH_INQ:
913 	{
914 		struct ccb_pathinq *cpi = &ccb->cpi;
915 
916 		cpi->version_num = 1;
917 		cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
918 		cpi->target_sprt = 0;
919 		cpi->hba_misc = PIM_NOBUSRESET;
920 		cpi->hba_eng_cnt = 0;
921 		cpi->max_target = sassc->sc->facts->MaxTargets - 1;
922 		cpi->max_lun = 8;
923 		cpi->initiator_id = 255;
924 		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
925 		strncpy(cpi->hba_vid, "LSILogic", HBA_IDLEN);
926 		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
927 		cpi->unit_number = cam_sim_unit(sim);
928 		cpi->bus_id = cam_sim_bus(sim);
929 		cpi->base_transfer_speed = 150000;
930 		cpi->transport = XPORT_SAS;
931 		cpi->transport_version = 0;
932 		cpi->protocol = PROTO_SCSI;
933 		cpi->protocol_version = SCSI_REV_SPC;
934 #if __FreeBSD_version >= 800001
935 		/*
936 		 * XXX KDM where does this number come from?
937 		 */
938 		cpi->maxio = 256 * 1024;
939 #endif
940 		cpi->ccb_h.status = CAM_REQ_CMP;
941 		break;
942 	}
943 	case XPT_GET_TRAN_SETTINGS:
944 	{
945 		struct ccb_trans_settings	*cts;
946 		struct ccb_trans_settings_sas	*sas;
947 		struct ccb_trans_settings_scsi	*scsi;
948 		struct mpssas_target *targ;
949 
950 		cts = &ccb->cts;
951 		sas = &cts->xport_specific.sas;
952 		scsi = &cts->proto_specific.scsi;
953 
954 		targ = &sassc->targets[cts->ccb_h.target_id];
955 		if (targ->handle == 0x0) {
956 			cts->ccb_h.status = CAM_TID_INVALID;
957 			break;
958 		}
959 
960 		cts->protocol_version = SCSI_REV_SPC2;
961 		cts->transport = XPORT_SAS;
962 		cts->transport_version = 0;
963 
964 		sas->valid = CTS_SAS_VALID_SPEED;
965 		switch (targ->linkrate) {
966 		case 0x08:
967 			sas->bitrate = 150000;
968 			break;
969 		case 0x09:
970 			sas->bitrate = 300000;
971 			break;
972 		case 0x0a:
973 			sas->bitrate = 600000;
974 			break;
975 		default:
976 			sas->valid = 0;
977 		}
978 
979 		cts->protocol = PROTO_SCSI;
980 		scsi->valid = CTS_SCSI_VALID_TQ;
981 		scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
982 
983 		cts->ccb_h.status = CAM_REQ_CMP;
984 		break;
985 	}
986 	case XPT_CALC_GEOMETRY:
987 		cam_calc_geometry(&ccb->ccg, /*extended*/1);
988 		ccb->ccb_h.status = CAM_REQ_CMP;
989 		break;
990 	case XPT_RESET_DEV:
991 		mps_printf(sassc->sc, "mpssas_action XPT_RESET_DEV\n");
992 		mpssas_action_resetdev(sassc, ccb);
993 		return;
994 	case XPT_RESET_BUS:
995 	case XPT_ABORT:
996 	case XPT_TERM_IO:
997 		mps_printf(sassc->sc, "mpssas_action faking success for "
998 			   "abort or reset\n");
999 		ccb->ccb_h.status = CAM_REQ_CMP;
1000 		break;
1001 	case XPT_SCSI_IO:
1002 		mpssas_action_scsiio(sassc, ccb);
1003 		return;
1004 #if __FreeBSD_version >= 900026
1005 	case XPT_SMP_IO:
1006 		mpssas_action_smpio(sassc, ccb);
1007 		return;
1008 #endif
1009 	default:
1010 		ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
1011 		break;
1012 	}
1013 	xpt_done(ccb);
1014 
1015 }
1016 
1017 static void
1018 mpssas_announce_reset(struct mps_softc *sc, uint32_t ac_code,
1019     target_id_t target_id, lun_id_t lun_id)
1020 {
1021 	path_id_t path_id = cam_sim_path(sc->sassc->sim);
1022 	struct cam_path *path;
1023 
1024 	mps_printf(sc, "%s code %x target %d lun %d\n", __func__,
1025 	    ac_code, target_id, lun_id);
1026 
1027 	if (xpt_create_path(&path, NULL,
1028 		path_id, target_id, lun_id) != CAM_REQ_CMP) {
1029 		mps_printf(sc, "unable to create path for reset "
1030 			   "notification\n");
1031 		return;
1032 	}
1033 
1034 	xpt_async(ac_code, path, NULL);
1035 	xpt_free_path(path);
1036 }
1037 
1038 static void
1039 mpssas_complete_all_commands(struct mps_softc *sc)
1040 {
1041 	struct mps_command *cm;
1042 	int i;
1043 	int completed;
1044 
1045 	mps_printf(sc, "%s\n", __func__);
1046 	mtx_assert(&sc->mps_mtx, MA_OWNED);
1047 
1048 	/* complete all commands with a NULL reply */
1049 	for (i = 1; i < sc->num_reqs; i++) {
1050 		cm = &sc->commands[i];
1051 		cm->cm_reply = NULL;
1052 		completed = 0;
1053 
1054 		if (cm->cm_flags & MPS_CM_FLAGS_POLLED)
1055 			cm->cm_flags |= MPS_CM_FLAGS_COMPLETE;
1056 
1057 		if (cm->cm_complete != NULL) {
1058 			mpssas_log_command(cm,
1059 			    "completing cm %p state %x ccb %p for diag reset\n",
1060 			    cm, cm->cm_state, cm->cm_ccb);
1061 
1062 			cm->cm_complete(sc, cm);
1063 			completed = 1;
1064 		}
1065 
1066 		if (cm->cm_flags & MPS_CM_FLAGS_WAKEUP) {
1067 			mpssas_log_command(cm,
1068 			    "waking up cm %p state %x ccb %p for diag reset\n",
1069 			    cm, cm->cm_state, cm->cm_ccb);
1070 			wakeup(cm);
1071 			completed = 1;
1072 		}
1073 
1074 		if ((completed == 0) && (cm->cm_state != MPS_CM_STATE_FREE)) {
1075 			/* this should never happen, but if it does, log */
1076 			mpssas_log_command(cm,
1077 			    "cm %p state %x flags 0x%x ccb %p during diag "
1078 			    "reset\n", cm, cm->cm_state, cm->cm_flags,
1079 			    cm->cm_ccb);
1080 		}
1081 	}
1082 }
1083 
1084 void
1085 mpssas_handle_reinit(struct mps_softc *sc)
1086 {
1087 	int i;
1088 
1089 	/* Go back into startup mode and freeze the simq, so that CAM
1090 	 * doesn't send any commands until after we've rediscovered all
1091 	 * targets and found the proper device handles for them.
1092 	 *
1093 	 * After the reset, portenable will trigger discovery, and after all
1094 	 * discovery-related activities have finished, the simq will be
1095 	 * released.
1096 	 */
1097 	mps_printf(sc, "%s startup\n", __func__);
1098 	sc->sassc->flags |= MPSSAS_IN_STARTUP;
1099 	sc->sassc->flags |= MPSSAS_IN_DISCOVERY;
1100 	xpt_freeze_simq(sc->sassc->sim, 1);
1101 
1102 	/* notify CAM of a bus reset */
1103 	mpssas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1104 	    CAM_LUN_WILDCARD);
1105 
1106 	/* complete and cleanup after all outstanding commands */
1107 	mpssas_complete_all_commands(sc);
1108 
1109 	mps_printf(sc, "%s startup %u tm %u after command completion\n",
1110 	    __func__, sc->sassc->startup_refcount, sc->sassc->tm_count);
1111 
1112 	/*
1113 	 * The simq was explicitly frozen above, so set the refcount to 0.
1114 	 * The simq will be explicitly released after port enable completes.
1115 	 */
1116 	sc->sassc->startup_refcount = 0;
1117 
1118 	/* zero all the target handles, since they may change after the
1119 	 * reset, and we have to rediscover all the targets and use the new
1120 	 * handles.
1121 	 */
1122 	for (i = 0; i < sc->facts->MaxTargets; i++) {
1123 		if (sc->sassc->targets[i].outstanding != 0)
1124 			mps_printf(sc, "target %u outstanding %u\n",
1125 			    i, sc->sassc->targets[i].outstanding);
1126 		sc->sassc->targets[i].handle = 0x0;
1127 		sc->sassc->targets[i].exp_dev_handle = 0x0;
1128 		sc->sassc->targets[i].outstanding = 0;
1129 		sc->sassc->targets[i].flags = MPSSAS_TARGET_INDIAGRESET;
1130 	}
1131 }
1132 static void
1133 mpssas_tm_timeout(void *data)
1134 {
1135 	struct mps_command *tm = data;
1136 	struct mps_softc *sc = tm->cm_sc;
1137 
1138 	mtx_assert(&sc->mps_mtx, MA_OWNED);
1139 
1140 	mpssas_log_command(tm, "task mgmt %p timed out\n", tm);
1141 	mps_reinit(sc);
1142 }
1143 
1144 static void
1145 mpssas_logical_unit_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1146 {
1147 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1148 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1149 	unsigned int cm_count = 0;
1150 	struct mps_command *cm;
1151 	struct mpssas_target *targ;
1152 
1153 	callout_stop(&tm->cm_callout);
1154 
1155 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1156 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1157 	targ = tm->cm_targ;
1158 
1159 	/*
1160 	 * Currently there should be no way we can hit this case.  It only
1161 	 * happens when we have a failure to allocate chain frames, and
1162 	 * task management commands don't have S/G lists.
1163 	 */
1164 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1165 		mps_printf(sc, "%s: cm_flags = %#x for LUN reset! "
1166 			   "This should not happen!\n", __func__, tm->cm_flags);
1167 		mpssas_free_tm(sc, tm);
1168 		return;
1169 	}
1170 
1171 	if (reply == NULL) {
1172 		mpssas_log_command(tm, "NULL reset reply for tm %p\n", tm);
1173 		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1174 			/* this completion was due to a reset, just cleanup */
1175 			targ->flags &= ~MPSSAS_TARGET_INRESET;
1176 			targ->tm = NULL;
1177 			mpssas_free_tm(sc, tm);
1178 		}
1179 		else {
1180 			/* we should have gotten a reply. */
1181 			mps_reinit(sc);
1182 		}
1183 		return;
1184 	}
1185 
1186 	mpssas_log_command(tm,
1187 	    "logical unit reset status 0x%x code 0x%x count %u\n",
1188 	    reply->IOCStatus, reply->ResponseCode,
1189 	    reply->TerminationCount);
1190 
1191 	/* See if there are any outstanding commands for this LUN.
1192 	 * This could be made more efficient by using a per-LU data
1193 	 * structure of some sort.
1194 	 */
1195 	TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1196 		if (cm->cm_lun == tm->cm_lun)
1197 			cm_count++;
1198 	}
1199 
1200 	if (cm_count == 0) {
1201 		mpssas_log_command(tm,
1202 		    "logical unit %u finished recovery after reset\n",
1203 		    tm->cm_lun, tm);
1204 
1205 		mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1206 		    tm->cm_lun);
1207 
1208 		/* we've finished recovery for this logical unit.  check and
1209 		 * see if some other logical unit has a timedout command
1210 		 * that needs to be processed.
1211 		 */
1212 		cm = TAILQ_FIRST(&targ->timedout_commands);
1213 		if (cm) {
1214 			mpssas_send_abort(sc, tm, cm);
1215 		}
1216 		else {
1217 			targ->tm = NULL;
1218 			mpssas_free_tm(sc, tm);
1219 		}
1220 	}
1221 	else {
1222 		/* if we still have commands for this LUN, the reset
1223 		 * effectively failed, regardless of the status reported.
1224 		 * Escalate to a target reset.
1225 		 */
1226 		mpssas_log_command(tm,
1227 		    "logical unit reset complete for tm %p, but still have %u command(s)\n",
1228 		    tm, cm_count);
1229 		mpssas_send_reset(sc, tm,
1230 		    MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1231 	}
1232 }
1233 
1234 static void
1235 mpssas_target_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1236 {
1237 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1238 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1239 	struct mpssas_target *targ;
1240 
1241 	callout_stop(&tm->cm_callout);
1242 
1243 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1244 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1245 	targ = tm->cm_targ;
1246 
1247 	/*
1248 	 * Currently there should be no way we can hit this case.  It only
1249 	 * happens when we have a failure to allocate chain frames, and
1250 	 * task management commands don't have S/G lists.
1251 	 */
1252 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1253 		mps_printf(sc, "%s: cm_flags = %#x for target reset! "
1254 			   "This should not happen!\n", __func__, tm->cm_flags);
1255 		mpssas_free_tm(sc, tm);
1256 		return;
1257 	}
1258 
1259 	if (reply == NULL) {
1260 		mpssas_log_command(tm, "NULL reset reply for tm %p\n", tm);
1261 		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1262 			/* this completion was due to a reset, just cleanup */
1263 			targ->flags &= ~MPSSAS_TARGET_INRESET;
1264 			targ->tm = NULL;
1265 			mpssas_free_tm(sc, tm);
1266 		}
1267 		else {
1268 			/* we should have gotten a reply. */
1269 			mps_reinit(sc);
1270 		}
1271 		return;
1272 	}
1273 
1274 	mpssas_log_command(tm,
1275 	    "target reset status 0x%x code 0x%x count %u\n",
1276 	    reply->IOCStatus, reply->ResponseCode,
1277 	    reply->TerminationCount);
1278 
1279 	targ->flags &= ~MPSSAS_TARGET_INRESET;
1280 
1281 	if (targ->outstanding == 0) {
1282 		/* we've finished recovery for this target and all
1283 		 * of its logical units.
1284 		 */
1285 		mpssas_log_command(tm,
1286 		    "recovery finished after target reset\n");
1287 
1288 		mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1289 		    CAM_LUN_WILDCARD);
1290 
1291 		targ->tm = NULL;
1292 		mpssas_free_tm(sc, tm);
1293 	}
1294 	else {
1295 		/* after a target reset, if this target still has
1296 		 * outstanding commands, the reset effectively failed,
1297 		 * regardless of the status reported.  escalate.
1298 		 */
1299 		mpssas_log_command(tm,
1300 		    "target reset complete for tm %p, but still have %u command(s)\n",
1301 		    tm, targ->outstanding);
1302 		mps_reinit(sc);
1303 	}
1304 }
1305 
1306 #define MPS_RESET_TIMEOUT 30
1307 
1308 static int
1309 mpssas_send_reset(struct mps_softc *sc, struct mps_command *tm, uint8_t type)
1310 {
1311 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1312 	struct mpssas_target *target;
1313 	int err;
1314 
1315 	target = tm->cm_targ;
1316 	if (target->handle == 0) {
1317 		mps_printf(sc, "%s null devhandle for target_id %d\n",
1318 		    __func__, target->tid);
1319 		return -1;
1320 	}
1321 
1322 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1323 	req->DevHandle = target->handle;
1324 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1325 	req->TaskType = type;
1326 
1327 	if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1328 		/* XXX Need to handle invalid LUNs */
1329 		MPS_SET_LUN(req->LUN, tm->cm_lun);
1330 		tm->cm_targ->logical_unit_resets++;
1331 		mpssas_log_command(tm, "sending logical unit reset\n");
1332 		tm->cm_complete = mpssas_logical_unit_reset_complete;
1333 	}
1334 	else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1335 		/* Target reset method =  SAS Hard Link Reset / SATA Link Reset */
1336 		req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1337 		tm->cm_targ->target_resets++;
1338 		tm->cm_targ->flags |= MPSSAS_TARGET_INRESET;
1339 		mpssas_log_command(tm, "sending target reset\n");
1340 		tm->cm_complete = mpssas_target_reset_complete;
1341 	}
1342 	else {
1343 		mps_printf(sc, "unexpected reset type 0x%x\n", type);
1344 		return -1;
1345 	}
1346 
1347 	tm->cm_data = NULL;
1348 	tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1349 	tm->cm_complete_data = (void *)tm;
1350 
1351 	callout_reset(&tm->cm_callout, MPS_RESET_TIMEOUT * hz,
1352 	    mpssas_tm_timeout, tm);
1353 
1354 	err = mps_map_command(sc, tm);
1355 	if (err)
1356 		mpssas_log_command(tm,
1357 		    "error %d sending reset type %u\n",
1358 		    err, type);
1359 
1360 	return err;
1361 }
1362 
1363 
1364 static void
1365 mpssas_abort_complete(struct mps_softc *sc, struct mps_command *tm)
1366 {
1367 	struct mps_command *cm;
1368 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1369 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1370 	struct mpssas_target *targ;
1371 
1372 	callout_stop(&tm->cm_callout);
1373 
1374 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1375 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1376 	targ = tm->cm_targ;
1377 
1378 	/*
1379 	 * Currently there should be no way we can hit this case.  It only
1380 	 * happens when we have a failure to allocate chain frames, and
1381 	 * task management commands don't have S/G lists.
1382 	 */
1383 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1384 		mpssas_log_command(tm,
1385 		    "cm_flags = %#x for abort %p TaskMID %u!\n",
1386 		    tm->cm_flags, tm, req->TaskMID);
1387 		mpssas_free_tm(sc, tm);
1388 		return;
1389 	}
1390 
1391 	if (reply == NULL) {
1392 		mpssas_log_command(tm,
1393 		    "NULL abort reply for tm %p TaskMID %u\n",
1394 		    tm, req->TaskMID);
1395 		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1396 			/* this completion was due to a reset, just cleanup */
1397 			targ->tm = NULL;
1398 			mpssas_free_tm(sc, tm);
1399 		}
1400 		else {
1401 			/* we should have gotten a reply. */
1402 			mps_reinit(sc);
1403 		}
1404 		return;
1405 	}
1406 
1407 	mpssas_log_command(tm,
1408 	    "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1409 	    req->TaskMID,
1410 	    reply->IOCStatus, reply->ResponseCode,
1411 	    reply->TerminationCount);
1412 
1413 	cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1414 	if (cm == NULL) {
1415 		/* if there are no more timedout commands, we're done with
1416 		 * error recovery for this target.
1417 		 */
1418 		mpssas_log_command(tm,
1419 		    "finished recovery after aborting TaskMID %u\n",
1420 		    req->TaskMID);
1421 
1422 		targ->tm = NULL;
1423 		mpssas_free_tm(sc, tm);
1424 	}
1425 	else if (req->TaskMID != cm->cm_desc.Default.SMID) {
1426 		/* abort success, but we have more timedout commands to abort */
1427 		mpssas_log_command(tm,
1428 		    "continuing recovery after aborting TaskMID %u\n",
1429 		    req->TaskMID);
1430 
1431 		mpssas_send_abort(sc, tm, cm);
1432 	}
1433 	else {
1434 		/* we didn't get a command completion, so the abort
1435 		 * failed as far as we're concerned.  escalate.
1436 		 */
1437 		mpssas_log_command(tm,
1438 		    "abort failed for TaskMID %u tm %p\n",
1439 		    req->TaskMID, tm);
1440 
1441 		mpssas_send_reset(sc, tm,
1442 		    MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1443 	}
1444 }
1445 
1446 #define MPS_ABORT_TIMEOUT 5
1447 
1448 static int
1449 mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm)
1450 {
1451 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1452 	struct mpssas_target *targ;
1453 	int err;
1454 
1455 	targ = cm->cm_targ;
1456 	if (targ->handle == 0) {
1457 		mps_printf(sc, "%s null devhandle for target_id %d\n",
1458 		    __func__, cm->cm_ccb->ccb_h.target_id);
1459 		return -1;
1460 	}
1461 
1462 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1463 	req->DevHandle = targ->handle;
1464 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1465 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1466 
1467 	/* XXX Need to handle invalid LUNs */
1468 	MPS_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1469 
1470 	req->TaskMID = cm->cm_desc.Default.SMID;
1471 
1472 	tm->cm_data = NULL;
1473 	tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1474 	tm->cm_complete = mpssas_abort_complete;
1475 	tm->cm_complete_data = (void *)tm;
1476 	tm->cm_targ = cm->cm_targ;
1477 	tm->cm_lun = cm->cm_lun;
1478 
1479 	callout_reset(&tm->cm_callout, MPS_ABORT_TIMEOUT * hz,
1480 	    mpssas_tm_timeout, tm);
1481 
1482 	targ->aborts++;
1483 
1484 	err = mps_map_command(sc, tm);
1485 	if (err)
1486 		mpssas_log_command(tm,
1487 		    "error %d sending abort for cm %p SMID %u\n",
1488 		    err, cm, req->TaskMID);
1489 	return err;
1490 }
1491 
1492 
1493 static void
1494 mpssas_scsiio_timeout(void *data)
1495 {
1496 	struct mps_softc *sc;
1497 	struct mps_command *cm;
1498 	struct mpssas_target *targ;
1499 
1500 	cm = (struct mps_command *)data;
1501 	sc = cm->cm_sc;
1502 
1503 	mtx_assert(&sc->mps_mtx, MA_OWNED);
1504 
1505 	mps_printf(sc, "%s checking sc %p cm %p\n", __func__, sc, cm);
1506 
1507 	/*
1508 	 * Run the interrupt handler to make sure it's not pending.  This
1509 	 * isn't perfect because the command could have already completed
1510 	 * and been re-used, though this is unlikely.
1511 	 */
1512 	mps_intr_locked(sc);
1513 	if (cm->cm_state == MPS_CM_STATE_FREE) {
1514 		mps_printf(sc, "SCSI command %p sc %p almost timed out\n", cm, sc);
1515 		return;
1516 	}
1517 
1518 	if (cm->cm_ccb == NULL) {
1519 		mps_printf(sc, "command timeout with NULL ccb\n");
1520 		return;
1521 	}
1522 
1523 	mpssas_log_command(cm, "command timeout cm %p ccb %p\n",
1524 	    cm, cm->cm_ccb);
1525 
1526 	targ = cm->cm_targ;
1527 	targ->timeouts++;
1528 
1529 	/* XXX first, check the firmware state, to see if it's still
1530 	 * operational.  if not, do a diag reset.
1531 	 */
1532 
1533 	cm->cm_ccb->ccb_h.status = CAM_CMD_TIMEOUT;
1534 	cm->cm_state = MPS_CM_STATE_TIMEDOUT;
1535 	TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1536 
1537 	if (targ->tm != NULL) {
1538 		/* target already in recovery, just queue up another
1539 		 * timedout command to be processed later.
1540 		 */
1541 		mps_printf(sc, "queued timedout cm %p for processing by tm %p\n",
1542 		    cm, targ->tm);
1543 	}
1544 	else if ((targ->tm = mpssas_alloc_tm(sc)) != NULL) {
1545 		mps_printf(sc, "timedout cm %p allocated tm %p\n",
1546 		    cm, targ->tm);
1547 
1548 		/* start recovery by aborting the first timedout command */
1549 		mpssas_send_abort(sc, targ->tm, cm);
1550 	}
1551 	else {
1552 		/* XXX queue this target up for recovery once a TM becomes
1553 		 * available.  The firmware only has a limited number of
1554 		 * HighPriority credits for the high priority requests used
1555 		 * for task management, and we ran out.
1556 		 *
1557 		 * Isilon: don't worry about this for now, since we have
1558 		 * more credits than disks in an enclosure, and limit
1559 		 * ourselves to one TM per target for recovery.
1560 		 */
1561 		mps_printf(sc, "timedout cm %p failed to allocate a tm\n",
1562 		    cm);
1563 	}
1564 
1565 }
1566 
1567 static void
1568 mpssas_action_scsiio(struct mpssas_softc *sassc, union ccb *ccb)
1569 {
1570 	MPI2_SCSI_IO_REQUEST *req;
1571 	struct ccb_scsiio *csio;
1572 	struct mps_softc *sc;
1573 	struct mpssas_target *targ;
1574 	struct mpssas_lun *lun;
1575 	struct mps_command *cm;
1576 	uint8_t i, lba_byte, *ref_tag_addr;
1577 	uint16_t eedp_flags;
1578 
1579 	sc = sassc->sc;
1580 	mtx_assert(&sc->mps_mtx, MA_OWNED);
1581 
1582 	csio = &ccb->csio;
1583 	targ = &sassc->targets[csio->ccb_h.target_id];
1584 	mps_dprint(sc, MPS_TRACE, "%s ccb %p target flag %x\n", __func__, ccb, targ->flags);
1585 	if (targ->handle == 0x0) {
1586 		mps_dprint(sc, MPS_TRACE, "%s NULL handle for target %u\n",
1587 		    __func__, csio->ccb_h.target_id);
1588 		csio->ccb_h.status = CAM_TID_INVALID;
1589 		xpt_done(ccb);
1590 		return;
1591 	}
1592 	if (targ->flags & MPS_TARGET_FLAGS_RAID_COMPONENT) {
1593 		mps_dprint(sc, MPS_TRACE, "%s Raid component no SCSI IO supported %u\n",
1594 		    __func__, csio->ccb_h.target_id);
1595 		csio->ccb_h.status = CAM_TID_INVALID;
1596 		xpt_done(ccb);
1597 		return;
1598 	}
1599 	/*
1600 	 * If devinfo is 0 this will be a volume.  In that case don't tell CAM
1601 	 * that the volume has timed out.  We want volumes to be enumerated
1602 	 * until they are deleted/removed, not just failed.
1603 	 */
1604 	if (targ->flags & MPSSAS_TARGET_INREMOVAL) {
1605 		if (targ->devinfo == 0)
1606 			csio->ccb_h.status = CAM_REQ_CMP;
1607 		else
1608 			csio->ccb_h.status = CAM_SEL_TIMEOUT;
1609 		xpt_done(ccb);
1610 		return;
1611 	}
1612 
1613 	if ((sc->mps_flags & MPS_FLAGS_SHUTDOWN) != 0) {
1614 		mps_dprint(sc, MPS_TRACE, "%s shutting down\n", __func__);
1615 		csio->ccb_h.status = CAM_TID_INVALID;
1616 		xpt_done(ccb);
1617 		return;
1618 	}
1619 
1620 	cm = mps_alloc_command(sc);
1621 	if (cm == NULL) {
1622 		if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
1623 			xpt_freeze_simq(sassc->sim, 1);
1624 			sassc->flags |= MPSSAS_QUEUE_FROZEN;
1625 		}
1626 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1627 		ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1628 		xpt_done(ccb);
1629 		return;
1630 	}
1631 
1632 	req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
1633 	bzero(req, sizeof(*req));
1634 	req->DevHandle = targ->handle;
1635 	req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1636 	req->MsgFlags = 0;
1637 	req->SenseBufferLowAddress = cm->cm_sense_busaddr;
1638 	req->SenseBufferLength = MPS_SENSE_LEN;
1639 	req->SGLFlags = 0;
1640 	req->ChainOffset = 0;
1641 	req->SGLOffset0 = 24;	/* 32bit word offset to the SGL */
1642 	req->SGLOffset1= 0;
1643 	req->SGLOffset2= 0;
1644 	req->SGLOffset3= 0;
1645 	req->SkipCount = 0;
1646 	req->DataLength = csio->dxfer_len;
1647 	req->BidirectionalDataLength = 0;
1648 	req->IoFlags = csio->cdb_len;
1649 	req->EEDPFlags = 0;
1650 
1651 	/* Note: BiDirectional transfers are not supported */
1652 	switch (csio->ccb_h.flags & CAM_DIR_MASK) {
1653 	case CAM_DIR_IN:
1654 		req->Control = MPI2_SCSIIO_CONTROL_READ;
1655 		cm->cm_flags |= MPS_CM_FLAGS_DATAIN;
1656 		break;
1657 	case CAM_DIR_OUT:
1658 		req->Control = MPI2_SCSIIO_CONTROL_WRITE;
1659 		cm->cm_flags |= MPS_CM_FLAGS_DATAOUT;
1660 		break;
1661 	case CAM_DIR_NONE:
1662 	default:
1663 		req->Control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
1664 		break;
1665 	}
1666 
1667 	/*
1668 	 * It looks like the hardware doesn't require an explicit tag
1669 	 * number for each transaction.  SAM Task Management not supported
1670 	 * at the moment.
1671 	 */
1672 	switch (csio->tag_action) {
1673 	case MSG_HEAD_OF_Q_TAG:
1674 		req->Control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
1675 		break;
1676 	case MSG_ORDERED_Q_TAG:
1677 		req->Control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
1678 		break;
1679 	case MSG_ACA_TASK:
1680 		req->Control |= MPI2_SCSIIO_CONTROL_ACAQ;
1681 		break;
1682 	case CAM_TAG_ACTION_NONE:
1683 	case MSG_SIMPLE_Q_TAG:
1684 	default:
1685 		req->Control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
1686 		break;
1687 	}
1688 	req->Control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
1689 
1690 	if (MPS_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
1691 		mps_free_command(sc, cm);
1692 		ccb->ccb_h.status = CAM_LUN_INVALID;
1693 		xpt_done(ccb);
1694 		return;
1695 	}
1696 
1697 	if (csio->ccb_h.flags & CAM_CDB_POINTER)
1698 		bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
1699 	else
1700 		bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
1701 	req->IoFlags = csio->cdb_len;
1702 
1703 	/*
1704 	 * Check if EEDP is supported and enabled.  If it is then check if the
1705 	 * SCSI opcode could be using EEDP.  If so, make sure the LUN exists and
1706 	 * is formatted for EEDP support.  If all of this is true, set CDB up
1707 	 * for EEDP transfer.
1708 	 */
1709 	eedp_flags = op_code_prot[req->CDB.CDB32[0]];
1710 	if (sc->eedp_enabled && eedp_flags) {
1711 		SLIST_FOREACH(lun, &targ->luns, lun_link) {
1712 			if (lun->lun_id == csio->ccb_h.target_lun) {
1713 				break;
1714 			}
1715 		}
1716 
1717 		if ((lun != NULL) && (lun->eedp_formatted)) {
1718 			req->EEDPBlockSize = lun->eedp_block_size;
1719 			eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
1720 			    MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
1721 			    MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
1722 			req->EEDPFlags = eedp_flags;
1723 
1724 			/*
1725 			 * If CDB less than 32, fill in Primary Ref Tag with
1726 			 * low 4 bytes of LBA.  If CDB is 32, tag stuff is
1727 			 * already there.  Also, set protection bit.  FreeBSD
1728 			 * currently does not support CDBs bigger than 16, but
1729 			 * the code doesn't hurt, and will be here for the
1730 			 * future.
1731 			 */
1732 			if (csio->cdb_len != 32) {
1733 				lba_byte = (csio->cdb_len == 16) ? 6 : 2;
1734 				ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
1735 				    PrimaryReferenceTag;
1736 				for (i = 0; i < 4; i++) {
1737 					*ref_tag_addr =
1738 					    req->CDB.CDB32[lba_byte + i];
1739 					ref_tag_addr++;
1740 				}
1741 				req->CDB.EEDP32.PrimaryApplicationTagMask =
1742 				    0xFFFF;
1743 				req->CDB.CDB32[1] = (req->CDB.CDB32[1] & 0x1F) |
1744 				    0x20;
1745 			} else {
1746 				eedp_flags |=
1747 				    MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
1748 				req->EEDPFlags = eedp_flags;
1749 				req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
1750 				    0x1F) | 0x20;
1751 			}
1752 		}
1753 	}
1754 
1755 	cm->cm_data = csio->data_ptr;
1756 	cm->cm_length = csio->dxfer_len;
1757 	cm->cm_sge = &req->SGL;
1758 	cm->cm_sglsize = (32 - 24) * 4;
1759 	cm->cm_desc.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
1760 	cm->cm_desc.SCSIIO.DevHandle = targ->handle;
1761 	cm->cm_complete = mpssas_scsiio_complete;
1762 	cm->cm_complete_data = ccb;
1763 	cm->cm_targ = targ;
1764 	cm->cm_lun = csio->ccb_h.target_lun;
1765 	cm->cm_ccb = ccb;
1766 
1767 	/*
1768 	 * If HBA is a WD and the command is not for a retry, try to build a
1769 	 * direct I/O message. If failed, or the command is for a retry, send
1770 	 * the I/O to the IR volume itself.
1771 	 */
1772 	if (sc->WD_valid_config) {
1773 		if (ccb->ccb_h.status != MPS_WD_RETRY) {
1774 			mpssas_direct_drive_io(sassc, cm, ccb);
1775 		} else {
1776 			ccb->ccb_h.status = CAM_REQ_INPROG;
1777 		}
1778 	}
1779 
1780 	callout_reset(&cm->cm_callout, (ccb->ccb_h.timeout * hz) / 1000,
1781 	   mpssas_scsiio_timeout, cm);
1782 
1783 	targ->issued++;
1784 	targ->outstanding++;
1785 	TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1786 
1787 	if ((sc->mps_debug & MPS_TRACE) != 0)
1788 		mpssas_log_command(cm, "%s cm %p ccb %p outstanding %u\n",
1789 		    __func__, cm, ccb, targ->outstanding);
1790 
1791 	mps_map_command(sc, cm);
1792 	return;
1793 }
1794 
1795 static void
1796 mps_response_code(struct mps_softc *sc, u8 response_code)
1797 {
1798         char *desc;
1799 
1800         switch (response_code) {
1801         case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE:
1802                 desc = "task management request completed";
1803                 break;
1804         case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME:
1805                 desc = "invalid frame";
1806                 break;
1807         case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
1808                 desc = "task management request not supported";
1809                 break;
1810         case MPI2_SCSITASKMGMT_RSP_TM_FAILED:
1811                 desc = "task management request failed";
1812                 break;
1813         case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED:
1814                 desc = "task management request succeeded";
1815                 break;
1816         case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN:
1817                 desc = "invalid lun";
1818                 break;
1819         case 0xA:
1820                 desc = "overlapped tag attempted";
1821                 break;
1822         case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
1823                 desc = "task queued, however not sent to target";
1824                 break;
1825         default:
1826                 desc = "unknown";
1827                 break;
1828         }
1829 		mps_dprint(sc, MPS_INFO, "response_code(0x%01x): %s\n",
1830                 response_code, desc);
1831 }
1832 /**
1833  * mps_sc_failed_io_info - translated non-succesfull SCSI_IO request
1834  */
1835 static void
1836 mps_sc_failed_io_info(struct mps_softc *sc, struct ccb_scsiio *csio,
1837     Mpi2SCSIIOReply_t *mpi_reply)
1838 {
1839 	u32 response_info;
1840 	u8 *response_bytes;
1841 	u16 ioc_status = le16toh(mpi_reply->IOCStatus) &
1842 	    MPI2_IOCSTATUS_MASK;
1843 	u8 scsi_state = mpi_reply->SCSIState;
1844 	u8 scsi_status = mpi_reply->SCSIStatus;
1845 	char *desc_ioc_state = NULL;
1846 	char *desc_scsi_status = NULL;
1847 	char *desc_scsi_state = sc->tmp_string;
1848 	u32 log_info = le32toh(mpi_reply->IOCLogInfo);
1849 
1850 	if (log_info == 0x31170000)
1851 		return;
1852 
1853 	switch (ioc_status) {
1854 	case MPI2_IOCSTATUS_SUCCESS:
1855 		desc_ioc_state = "success";
1856 		break;
1857 	case MPI2_IOCSTATUS_INVALID_FUNCTION:
1858 		desc_ioc_state = "invalid function";
1859 		break;
1860 	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
1861 		desc_ioc_state = "scsi recovered error";
1862 		break;
1863 	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
1864 		desc_ioc_state = "scsi invalid dev handle";
1865 		break;
1866 	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
1867 		desc_ioc_state = "scsi device not there";
1868 		break;
1869 	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
1870 		desc_ioc_state = "scsi data overrun";
1871 		break;
1872 	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
1873 		desc_ioc_state = "scsi data underrun";
1874 		break;
1875 	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
1876 		desc_ioc_state = "scsi io data error";
1877 		break;
1878 	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
1879 		desc_ioc_state = "scsi protocol error";
1880 		break;
1881 	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
1882 		desc_ioc_state = "scsi task terminated";
1883 		break;
1884 	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
1885 		desc_ioc_state = "scsi residual mismatch";
1886 		break;
1887 	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
1888 		desc_ioc_state = "scsi task mgmt failed";
1889 		break;
1890 	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
1891 		desc_ioc_state = "scsi ioc terminated";
1892 		break;
1893 	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
1894 		desc_ioc_state = "scsi ext terminated";
1895 		break;
1896 	case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
1897 		desc_ioc_state = "eedp guard error";
1898 		break;
1899 	case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
1900 		desc_ioc_state = "eedp ref tag error";
1901 		break;
1902 	case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
1903 		desc_ioc_state = "eedp app tag error";
1904 		break;
1905 	default:
1906 		desc_ioc_state = "unknown";
1907 		break;
1908 	}
1909 
1910 	switch (scsi_status) {
1911 	case MPI2_SCSI_STATUS_GOOD:
1912 		desc_scsi_status = "good";
1913 		break;
1914 	case MPI2_SCSI_STATUS_CHECK_CONDITION:
1915 		desc_scsi_status = "check condition";
1916 		break;
1917 	case MPI2_SCSI_STATUS_CONDITION_MET:
1918 		desc_scsi_status = "condition met";
1919 		break;
1920 	case MPI2_SCSI_STATUS_BUSY:
1921 		desc_scsi_status = "busy";
1922 		break;
1923 	case MPI2_SCSI_STATUS_INTERMEDIATE:
1924 		desc_scsi_status = "intermediate";
1925 		break;
1926 	case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET:
1927 		desc_scsi_status = "intermediate condmet";
1928 		break;
1929 	case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
1930 		desc_scsi_status = "reservation conflict";
1931 		break;
1932 	case MPI2_SCSI_STATUS_COMMAND_TERMINATED:
1933 		desc_scsi_status = "command terminated";
1934 		break;
1935 	case MPI2_SCSI_STATUS_TASK_SET_FULL:
1936 		desc_scsi_status = "task set full";
1937 		break;
1938 	case MPI2_SCSI_STATUS_ACA_ACTIVE:
1939 		desc_scsi_status = "aca active";
1940 		break;
1941 	case MPI2_SCSI_STATUS_TASK_ABORTED:
1942 		desc_scsi_status = "task aborted";
1943 		break;
1944 	default:
1945 		desc_scsi_status = "unknown";
1946 		break;
1947 	}
1948 
1949 	desc_scsi_state[0] = '\0';
1950 	if (!scsi_state)
1951 		desc_scsi_state = " ";
1952 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
1953 		strcat(desc_scsi_state, "response info ");
1954 	if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
1955 		strcat(desc_scsi_state, "state terminated ");
1956 	if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS)
1957 		strcat(desc_scsi_state, "no status ");
1958 	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED)
1959 		strcat(desc_scsi_state, "autosense failed ");
1960 	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID)
1961 		strcat(desc_scsi_state, "autosense valid ");
1962 
1963 	mps_dprint(sc, MPS_INFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x), \n",
1964 		le16toh(mpi_reply->DevHandle),
1965 	    desc_ioc_state, ioc_status);
1966 	/* We can add more detail about underflow data here
1967 	 * TO-DO
1968 	 * */
1969 	mps_dprint(sc, MPS_INFO, "\tscsi_status(%s)(0x%02x), "
1970 	    "scsi_state(%s)(0x%02x)\n", desc_scsi_status,
1971 	    scsi_status, desc_scsi_state, scsi_state);
1972 
1973 	if (sc->mps_debug & MPS_INFO &&
1974 		scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
1975 		mps_dprint(sc, MPS_INFO, "-> Sense Buffer Data : Start :\n");
1976 		scsi_sense_print(csio);
1977 		mps_dprint(sc, MPS_INFO, "-> Sense Buffer Data : End :\n");
1978 	}
1979 
1980 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
1981 		response_info = le32toh(mpi_reply->ResponseInfo);
1982 		response_bytes = (u8 *)&response_info;
1983 		mps_response_code(sc,response_bytes[0]);
1984 	}
1985 }
1986 
1987 static void
1988 mpssas_scsiio_complete(struct mps_softc *sc, struct mps_command *cm)
1989 {
1990 	MPI2_SCSI_IO_REPLY *rep;
1991 	union ccb *ccb;
1992 	struct ccb_scsiio *csio;
1993 	struct mpssas_softc *sassc;
1994 	struct scsi_vpd_supported_page_list *vpd_list = NULL;
1995 	u8 *TLR_bits, TLR_on;
1996 	int dir = 0, i;
1997 	u16 alloc_len;
1998 
1999 	mps_dprint(sc, MPS_TRACE,
2000 	    "%s cm %p SMID %u ccb %p reply %p outstanding %u\n",
2001 	    __func__, cm, cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
2002 	    cm->cm_targ->outstanding);
2003 
2004 	callout_stop(&cm->cm_callout);
2005 	mtx_assert(&sc->mps_mtx, MA_OWNED);
2006 
2007 	sassc = sc->sassc;
2008 	ccb = cm->cm_complete_data;
2009 	csio = &ccb->csio;
2010 	rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
2011 	/*
2012 	 * XXX KDM if the chain allocation fails, does it matter if we do
2013 	 * the sync and unload here?  It is simpler to do it in every case,
2014 	 * assuming it doesn't cause problems.
2015 	 */
2016 	if (cm->cm_data != NULL) {
2017 		if (cm->cm_flags & MPS_CM_FLAGS_DATAIN)
2018 			dir = BUS_DMASYNC_POSTREAD;
2019 		else if (cm->cm_flags & MPS_CM_FLAGS_DATAOUT)
2020 			dir = BUS_DMASYNC_POSTWRITE;;
2021 		bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
2022 		bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2023 	}
2024 
2025 	cm->cm_targ->completed++;
2026 	cm->cm_targ->outstanding--;
2027 	TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
2028 
2029 	if (cm->cm_state == MPS_CM_STATE_TIMEDOUT) {
2030 		TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
2031 		if (cm->cm_reply != NULL)
2032 			mpssas_log_command(cm,
2033 			    "completed timedout cm %p ccb %p during recovery "
2034 			    "ioc %x scsi %x state %x xfer %u\n",
2035 			    cm, cm->cm_ccb,
2036 			    rep->IOCStatus, rep->SCSIStatus, rep->SCSIState,
2037 			    rep->TransferCount);
2038 		else
2039 			mpssas_log_command(cm,
2040 			    "completed timedout cm %p ccb %p during recovery\n",
2041 			    cm, cm->cm_ccb);
2042 	} else if (cm->cm_targ->tm != NULL) {
2043 		if (cm->cm_reply != NULL)
2044 			mpssas_log_command(cm,
2045 			    "completed cm %p ccb %p during recovery "
2046 			    "ioc %x scsi %x state %x xfer %u\n",
2047 			    cm, cm->cm_ccb,
2048 			    rep->IOCStatus, rep->SCSIStatus, rep->SCSIState,
2049 			    rep->TransferCount);
2050 		else
2051 			mpssas_log_command(cm,
2052 			    "completed cm %p ccb %p during recovery\n",
2053 			    cm, cm->cm_ccb);
2054 	} else if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
2055 		mpssas_log_command(cm,
2056 		    "reset completed cm %p ccb %p\n",
2057 		    cm, cm->cm_ccb);
2058 	}
2059 
2060 	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2061 		/*
2062 		 * We ran into an error after we tried to map the command,
2063 		 * so we're getting a callback without queueing the command
2064 		 * to the hardware.  So we set the status here, and it will
2065 		 * be retained below.  We'll go through the "fast path",
2066 		 * because there can be no reply when we haven't actually
2067 		 * gone out to the hardware.
2068 		 */
2069 		ccb->ccb_h.status |= CAM_REQUEUE_REQ;
2070 
2071 		/*
2072 		 * Currently the only error included in the mask is
2073 		 * MPS_CM_FLAGS_CHAIN_FAILED, which means we're out of
2074 		 * chain frames.  We need to freeze the queue until we get
2075 		 * a command that completed without this error, which will
2076 		 * hopefully have some chain frames attached that we can
2077 		 * use.  If we wanted to get smarter about it, we would
2078 		 * only unfreeze the queue in this condition when we're
2079 		 * sure that we're getting some chain frames back.  That's
2080 		 * probably unnecessary.
2081 		 */
2082 		if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
2083 			xpt_freeze_simq(sassc->sim, 1);
2084 			sassc->flags |= MPSSAS_QUEUE_FROZEN;
2085 			mps_dprint(sc, MPS_INFO, "Error sending command, "
2086 				   "freezing SIM queue\n");
2087 		}
2088 	}
2089 
2090 	/* Take the fast path to completion */
2091 	if (cm->cm_reply == NULL) {
2092 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
2093 			if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0)
2094 				ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
2095 			else {
2096 				ccb->ccb_h.status = CAM_REQ_CMP;
2097 				ccb->csio.scsi_status = SCSI_STATUS_OK;
2098 			}
2099 			if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2100 				ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2101 				sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2102 				mps_dprint(sc, MPS_INFO,
2103 					   "Unfreezing SIM queue\n");
2104 			}
2105 		}
2106 
2107 		/*
2108 		 * There are two scenarios where the status won't be
2109 		 * CAM_REQ_CMP.  The first is if MPS_CM_FLAGS_ERROR_MASK is
2110 		 * set, the second is in the MPS_FLAGS_DIAGRESET above.
2111 		 */
2112 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2113 			/*
2114 			 * Freeze the dev queue so that commands are
2115 			 * executed in the correct order with after error
2116 			 * recovery.
2117 			 */
2118 			ccb->ccb_h.status |= CAM_DEV_QFRZN;
2119 			xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2120 		}
2121 		mps_free_command(sc, cm);
2122 		xpt_done(ccb);
2123 		return;
2124 	}
2125 
2126 	if (sc->mps_debug & MPS_TRACE)
2127 		mpssas_log_command(cm,
2128 		    "ioc %x scsi %x state %x xfer %u\n",
2129 		    rep->IOCStatus, rep->SCSIStatus,
2130 		    rep->SCSIState, rep->TransferCount);
2131 
2132 	/*
2133 	 * If this is a Direct Drive I/O, reissue the I/O to the original IR
2134 	 * Volume if an error occurred (normal I/O retry).  Use the original
2135 	 * CCB, but set a flag that this will be a retry so that it's sent to
2136 	 * the original volume.  Free the command but reuse the CCB.
2137 	 */
2138 	if (cm->cm_flags & MPS_CM_FLAGS_DD_IO) {
2139 		mps_free_command(sc, cm);
2140 		ccb->ccb_h.status = MPS_WD_RETRY;
2141 		mpssas_action_scsiio(sassc, ccb);
2142 		return;
2143 	}
2144 
2145 	switch (rep->IOCStatus & MPI2_IOCSTATUS_MASK) {
2146 	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2147 		csio->resid = cm->cm_length - rep->TransferCount;
2148 		/* FALLTHROUGH */
2149 	case MPI2_IOCSTATUS_SUCCESS:
2150 	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2151 
2152 		if ((rep->IOCStatus & MPI2_IOCSTATUS_MASK) ==
2153 		    MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
2154 			mpssas_log_command(cm, "recovered error\n");
2155 
2156 		/* Completion failed at the transport level. */
2157 		if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
2158 		    MPI2_SCSI_STATE_TERMINATED)) {
2159 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2160 			break;
2161 		}
2162 
2163 		/* In a modern packetized environment, an autosense failure
2164 		 * implies that there's not much else that can be done to
2165 		 * recover the command.
2166 		 */
2167 		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
2168 			ccb->ccb_h.status = CAM_AUTOSENSE_FAIL;
2169 			break;
2170 		}
2171 
2172 		/*
2173 		 * CAM doesn't care about SAS Response Info data, but if this is
2174 		 * the state check if TLR should be done.  If not, clear the
2175 		 * TLR_bits for the target.
2176 		 */
2177 		if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
2178 		    ((rep->ResponseInfo & MPI2_SCSI_RI_MASK_REASONCODE) ==
2179 		    MPS_SCSI_RI_INVALID_FRAME)) {
2180 			sc->mapping_table[csio->ccb_h.target_id].TLR_bits =
2181 			    (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2182 		}
2183 
2184 		/*
2185 		 * Intentionally override the normal SCSI status reporting
2186 		 * for these two cases.  These are likely to happen in a
2187 		 * multi-initiator environment, and we want to make sure that
2188 		 * CAM retries these commands rather than fail them.
2189 		 */
2190 		if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
2191 		    (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
2192 			ccb->ccb_h.status = CAM_REQ_ABORTED;
2193 			break;
2194 		}
2195 
2196 		/* Handle normal status and sense */
2197 		csio->scsi_status = rep->SCSIStatus;
2198 		if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
2199 			ccb->ccb_h.status = CAM_REQ_CMP;
2200 		else
2201 			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
2202 
2203 		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2204 			int sense_len, returned_sense_len;
2205 
2206 			returned_sense_len = min(rep->SenseCount,
2207 			    sizeof(struct scsi_sense_data));
2208 			if (returned_sense_len < ccb->csio.sense_len)
2209 				ccb->csio.sense_resid = ccb->csio.sense_len -
2210 					returned_sense_len;
2211 			else
2212 				ccb->csio.sense_resid = 0;
2213 
2214 			sense_len = min(returned_sense_len,
2215 			    ccb->csio.sense_len - ccb->csio.sense_resid);
2216 			bzero(&ccb->csio.sense_data,
2217 			      sizeof(&ccb->csio.sense_data));
2218 			bcopy(cm->cm_sense, &ccb->csio.sense_data, sense_len);
2219 			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2220 		}
2221 
2222 		/*
2223 		 * Check if this is an INQUIRY command.  If it's a VPD inquiry,
2224 		 * and it's page code 0 (Supported Page List), and there is
2225 		 * inquiry data, and this is for a sequential access device, and
2226 		 * the device is an SSP target, and TLR is supported by the
2227 		 * controller, turn the TLR_bits value ON if page 0x90 is
2228 		 * supported.
2229 		 */
2230 		if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
2231 		    (csio->cdb_io.cdb_bytes[1] & SI_EVPD) &&
2232 		    (csio->cdb_io.cdb_bytes[2] == SVPD_SUPPORTED_PAGE_LIST) &&
2233 		    (csio->data_ptr != NULL) && (((uint8_t *)cm->cm_data)[0] ==
2234 		    T_SEQUENTIAL) && (sc->control_TLR) &&
2235 		    (sc->mapping_table[csio->ccb_h.target_id].device_info &
2236 		    MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
2237 			vpd_list = (struct scsi_vpd_supported_page_list *)
2238 			    csio->data_ptr;
2239 			TLR_bits = &sc->mapping_table[csio->ccb_h.target_id].
2240 			    TLR_bits;
2241 			*TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2242 			TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
2243 			alloc_len = ((u16)csio->cdb_io.cdb_bytes[3] << 8) +
2244 			    csio->cdb_io.cdb_bytes[4];
2245 			for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
2246 				if (vpd_list->list[i] == 0x90) {
2247 					*TLR_bits = TLR_on;
2248 					break;
2249 				}
2250 			}
2251 		}
2252 		break;
2253 	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2254 	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2255 		/*
2256 		 * If devinfo is 0 this will be a volume.  In that case don't
2257 		 * tell CAM that the volume is not there.  We want volumes to
2258 		 * be enumerated until they are deleted/removed, not just
2259 		 * failed.
2260 		 */
2261 		if (cm->cm_targ->devinfo == 0)
2262 			ccb->ccb_h.status = CAM_REQ_CMP;
2263 		else
2264 			ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2265 		break;
2266 	case MPI2_IOCSTATUS_INVALID_SGL:
2267 		mps_print_scsiio_cmd(sc, cm);
2268 		ccb->ccb_h.status = CAM_UNREC_HBA_ERROR;
2269 		break;
2270 	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2271 		/*
2272 		 * This is one of the responses that comes back when an I/O
2273 		 * has been aborted.  If it is because of a timeout that we
2274 		 * initiated, just set the status to CAM_CMD_TIMEOUT.
2275 		 * Otherwise set it to CAM_REQ_ABORTED.  The effect on the
2276 		 * command is the same (it gets retried, subject to the
2277 		 * retry counter), the only difference is what gets printed
2278 		 * on the console.
2279 		 */
2280 		if (cm->cm_state == MPS_CM_STATE_TIMEDOUT)
2281 			ccb->ccb_h.status = CAM_CMD_TIMEOUT;
2282 		else
2283 			ccb->ccb_h.status = CAM_REQ_ABORTED;
2284 		break;
2285 	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2286 		/* resid is ignored for this condition */
2287 		csio->resid = 0;
2288 		ccb->ccb_h.status = CAM_DATA_RUN_ERR;
2289 		break;
2290 	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2291 	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2292 		/*
2293 		 * Since these are generally external (i.e. hopefully
2294 		 * transient transport-related) errors, retry these without
2295 		 * decrementing the retry count.
2296 		 */
2297 		ccb->ccb_h.status = CAM_REQUEUE_REQ;
2298 		mpssas_log_command(cm,
2299 		    "terminated ioc %x scsi %x state %x xfer %u\n",
2300 		    rep->IOCStatus, rep->SCSIStatus, rep->SCSIState,
2301 		    rep->TransferCount);
2302 		break;
2303 	case MPI2_IOCSTATUS_INVALID_FUNCTION:
2304 	case MPI2_IOCSTATUS_INTERNAL_ERROR:
2305 	case MPI2_IOCSTATUS_INVALID_VPID:
2306 	case MPI2_IOCSTATUS_INVALID_FIELD:
2307 	case MPI2_IOCSTATUS_INVALID_STATE:
2308 	case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2309 	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2310 	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2311 	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2312 	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2313 	default:
2314 		mpssas_log_command(cm,
2315 		    "completed ioc %x scsi %x state %x xfer %u\n",
2316 		    rep->IOCStatus, rep->SCSIStatus, rep->SCSIState,
2317 		    rep->TransferCount);
2318 		csio->resid = cm->cm_length;
2319 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2320 		break;
2321 	}
2322 
2323 	mps_sc_failed_io_info(sc,csio,rep);
2324 
2325 	if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2326 		ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2327 		sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2328 		mps_dprint(sc, MPS_INFO, "Command completed, "
2329 			   "unfreezing SIM queue\n");
2330 	}
2331 
2332 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2333 		ccb->ccb_h.status |= CAM_DEV_QFRZN;
2334 		xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2335 	}
2336 
2337 	mps_free_command(sc, cm);
2338 	xpt_done(ccb);
2339 }
2340 
2341 static void
2342 mpssas_direct_drive_io(struct mpssas_softc *sassc, struct mps_command *cm,
2343     union ccb *ccb) {
2344 	pMpi2SCSIIORequest_t	pIO_req;
2345 	struct mps_softc	*sc = sassc->sc;
2346 	uint64_t		virtLBA;
2347 	uint32_t		physLBA, stripe_offset, stripe_unit;
2348 	uint32_t		io_size, column;
2349 	uint8_t			*ptrLBA, lba_idx, physLBA_byte, *CDB;
2350 
2351 	/*
2352 	 * If this is a valid SCSI command (Read6, Read10, Read16, Write6,
2353 	 * Write10, or Write16), build a direct I/O message.  Otherwise, the I/O
2354 	 * will be sent to the IR volume itself.  Since Read6 and Write6 are a
2355 	 * bit different than the 10/16 CDBs, handle them separately.
2356 	 */
2357 	pIO_req = (pMpi2SCSIIORequest_t)cm->cm_req;
2358 	CDB = pIO_req->CDB.CDB32;
2359 
2360 	/*
2361 	 * Handle 6 byte CDBs.
2362 	 */
2363 	if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_6) ||
2364 	    (CDB[0] == WRITE_6))) {
2365 		/*
2366 		 * Get the transfer size in blocks.
2367 		 */
2368 		io_size = (cm->cm_length >> sc->DD_block_exponent);
2369 
2370 		/*
2371 		 * Get virtual LBA given in the CDB.
2372 		 */
2373 		virtLBA = ((uint64_t)(CDB[1] & 0x1F) << 16) |
2374 		    ((uint64_t)CDB[2] << 8) | (uint64_t)CDB[3];
2375 
2376 		/*
2377 		 * Check that LBA range for I/O does not exceed volume's
2378 		 * MaxLBA.
2379 		 */
2380 		if ((virtLBA + (uint64_t)io_size - 1) <=
2381 		    sc->DD_max_lba) {
2382 			/*
2383 			 * Check if the I/O crosses a stripe boundary.  If not,
2384 			 * translate the virtual LBA to a physical LBA and set
2385 			 * the DevHandle for the PhysDisk to be used.  If it
2386 			 * does cross a boundry, do normal I/O.  To get the
2387 			 * right DevHandle to use, get the map number for the
2388 			 * column, then use that map number to look up the
2389 			 * DevHandle of the PhysDisk.
2390 			 */
2391 			stripe_offset = (uint32_t)virtLBA &
2392 			    (sc->DD_stripe_size - 1);
2393 			if ((stripe_offset + io_size) <= sc->DD_stripe_size) {
2394 				physLBA = (uint32_t)virtLBA >>
2395 				    sc->DD_stripe_exponent;
2396 				stripe_unit = physLBA / sc->DD_num_phys_disks;
2397 				column = physLBA % sc->DD_num_phys_disks;
2398 				pIO_req->DevHandle =
2399 				    sc->DD_column_map[column].dev_handle;
2400 				cm->cm_desc.SCSIIO.DevHandle =
2401 				    pIO_req->DevHandle;
2402 
2403 				physLBA = (stripe_unit <<
2404 				    sc->DD_stripe_exponent) + stripe_offset;
2405 				ptrLBA = &pIO_req->CDB.CDB32[1];
2406 				physLBA_byte = (uint8_t)(physLBA >> 16);
2407 				*ptrLBA = physLBA_byte;
2408 				ptrLBA = &pIO_req->CDB.CDB32[2];
2409 				physLBA_byte = (uint8_t)(physLBA >> 8);
2410 				*ptrLBA = physLBA_byte;
2411 				ptrLBA = &pIO_req->CDB.CDB32[3];
2412 				physLBA_byte = (uint8_t)physLBA;
2413 				*ptrLBA = physLBA_byte;
2414 
2415 				/*
2416 				 * Set flag that Direct Drive I/O is
2417 				 * being done.
2418 				 */
2419 				cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2420 			}
2421 		}
2422 		return;
2423 	}
2424 
2425 	/*
2426 	 * Handle 10 or 16 byte CDBs.
2427 	 */
2428 	if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_10) ||
2429 	    (CDB[0] == WRITE_10) || (CDB[0] == READ_16) ||
2430 	    (CDB[0] == WRITE_16))) {
2431 		/*
2432 		 * For 16-byte CDB's, verify that the upper 4 bytes of the CDB
2433 		 * are 0.  If not, this is accessing beyond 2TB so handle it in
2434 		 * the else section.  10-byte CDB's are OK.
2435 		 */
2436 		if ((CDB[0] < READ_16) ||
2437 		    !(CDB[2] | CDB[3] | CDB[4] | CDB[5])) {
2438 			/*
2439 			 * Get the transfer size in blocks.
2440 			 */
2441 			io_size = (cm->cm_length >> sc->DD_block_exponent);
2442 
2443 			/*
2444 			 * Get virtual LBA.  Point to correct lower 4 bytes of
2445 			 * LBA in the CDB depending on command.
2446 			 */
2447 			lba_idx = (CDB[0] < READ_16) ? 2 : 6;
2448 			virtLBA = ((uint64_t)CDB[lba_idx] << 24) |
2449 			    ((uint64_t)CDB[lba_idx + 1] << 16) |
2450 			    ((uint64_t)CDB[lba_idx + 2] << 8) |
2451 			    (uint64_t)CDB[lba_idx + 3];
2452 
2453 			/*
2454 			 * Check that LBA range for I/O does not exceed volume's
2455 			 * MaxLBA.
2456 			 */
2457 			if ((virtLBA + (uint64_t)io_size - 1) <=
2458 			    sc->DD_max_lba) {
2459 				/*
2460 				 * Check if the I/O crosses a stripe boundary.
2461 				 * If not, translate the virtual LBA to a
2462 				 * physical LBA and set the DevHandle for the
2463 				 * PhysDisk to be used.  If it does cross a
2464 				 * boundry, do normal I/O.  To get the right
2465 				 * DevHandle to use, get the map number for the
2466 				 * column, then use that map number to look up
2467 				 * the DevHandle of the PhysDisk.
2468 				 */
2469 				stripe_offset = (uint32_t)virtLBA &
2470 				    (sc->DD_stripe_size - 1);
2471 				if ((stripe_offset + io_size) <=
2472 				    sc->DD_stripe_size) {
2473 					physLBA = (uint32_t)virtLBA >>
2474 					    sc->DD_stripe_exponent;
2475 					stripe_unit = physLBA /
2476 					    sc->DD_num_phys_disks;
2477 					column = physLBA %
2478 					    sc->DD_num_phys_disks;
2479 					pIO_req->DevHandle =
2480 					    sc->DD_column_map[column].
2481 					    dev_handle;
2482 					cm->cm_desc.SCSIIO.DevHandle =
2483 					    pIO_req->DevHandle;
2484 
2485 					physLBA = (stripe_unit <<
2486 					    sc->DD_stripe_exponent) +
2487 					    stripe_offset;
2488 					ptrLBA =
2489 					    &pIO_req->CDB.CDB32[lba_idx];
2490 					physLBA_byte = (uint8_t)(physLBA >> 24);
2491 					*ptrLBA = physLBA_byte;
2492 					ptrLBA =
2493 					    &pIO_req->CDB.CDB32[lba_idx + 1];
2494 					physLBA_byte = (uint8_t)(physLBA >> 16);
2495 					*ptrLBA = physLBA_byte;
2496 					ptrLBA =
2497 					    &pIO_req->CDB.CDB32[lba_idx + 2];
2498 					physLBA_byte = (uint8_t)(physLBA >> 8);
2499 					*ptrLBA = physLBA_byte;
2500 					ptrLBA =
2501 					    &pIO_req->CDB.CDB32[lba_idx + 3];
2502 					physLBA_byte = (uint8_t)physLBA;
2503 					*ptrLBA = physLBA_byte;
2504 
2505 					/*
2506 					 * Set flag that Direct Drive I/O is
2507 					 * being done.
2508 					 */
2509 					cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2510 				}
2511 			}
2512 		} else {
2513 			/*
2514 			 * 16-byte CDB and the upper 4 bytes of the CDB are not
2515 			 * 0.  Get the transfer size in blocks.
2516 			 */
2517 			io_size = (cm->cm_length >> sc->DD_block_exponent);
2518 
2519 			/*
2520 			 * Get virtual LBA.
2521 			 */
2522 			virtLBA = ((uint64_t)CDB[2] << 54) |
2523 			    ((uint64_t)CDB[3] << 48) |
2524 			    ((uint64_t)CDB[4] << 40) |
2525 			    ((uint64_t)CDB[5] << 32) |
2526 			    ((uint64_t)CDB[6] << 24) |
2527 			    ((uint64_t)CDB[7] << 16) |
2528 			    ((uint64_t)CDB[8] << 8) |
2529 			    (uint64_t)CDB[9];
2530 
2531 			/*
2532 			 * Check that LBA range for I/O does not exceed volume's
2533 			 * MaxLBA.
2534 			 */
2535 			if ((virtLBA + (uint64_t)io_size - 1) <=
2536 			    sc->DD_max_lba) {
2537 				/*
2538 				 * Check if the I/O crosses a stripe boundary.
2539 				 * If not, translate the virtual LBA to a
2540 				 * physical LBA and set the DevHandle for the
2541 				 * PhysDisk to be used.  If it does cross a
2542 				 * boundry, do normal I/O.  To get the right
2543 				 * DevHandle to use, get the map number for the
2544 				 * column, then use that map number to look up
2545 				 * the DevHandle of the PhysDisk.
2546 				 */
2547 				stripe_offset = (uint32_t)virtLBA &
2548 				    (sc->DD_stripe_size - 1);
2549 				if ((stripe_offset + io_size) <=
2550 				    sc->DD_stripe_size) {
2551 					physLBA = (uint32_t)(virtLBA >>
2552 					    sc->DD_stripe_exponent);
2553 					stripe_unit = physLBA /
2554 					    sc->DD_num_phys_disks;
2555 					column = physLBA %
2556 					    sc->DD_num_phys_disks;
2557 					pIO_req->DevHandle =
2558 					    sc->DD_column_map[column].
2559 					    dev_handle;
2560 					cm->cm_desc.SCSIIO.DevHandle =
2561 					    pIO_req->DevHandle;
2562 
2563 					physLBA = (stripe_unit <<
2564 					    sc->DD_stripe_exponent) +
2565 					    stripe_offset;
2566 
2567 					/*
2568 					 * Set upper 4 bytes of LBA to 0.  We
2569 					 * assume that the phys disks are less
2570 					 * than 2 TB's in size.  Then, set the
2571 					 * lower 4 bytes.
2572 					 */
2573 					pIO_req->CDB.CDB32[2] = 0;
2574 					pIO_req->CDB.CDB32[3] = 0;
2575 					pIO_req->CDB.CDB32[4] = 0;
2576 					pIO_req->CDB.CDB32[5] = 0;
2577 					ptrLBA = &pIO_req->CDB.CDB32[6];
2578 					physLBA_byte = (uint8_t)(physLBA >> 24);
2579 					*ptrLBA = physLBA_byte;
2580 					ptrLBA = &pIO_req->CDB.CDB32[7];
2581 					physLBA_byte = (uint8_t)(physLBA >> 16);
2582 					*ptrLBA = physLBA_byte;
2583 					ptrLBA = &pIO_req->CDB.CDB32[8];
2584 					physLBA_byte = (uint8_t)(physLBA >> 8);
2585 					*ptrLBA = physLBA_byte;
2586 					ptrLBA = &pIO_req->CDB.CDB32[9];
2587 					physLBA_byte = (uint8_t)physLBA;
2588 					*ptrLBA = physLBA_byte;
2589 
2590 					/*
2591 					 * Set flag that Direct Drive I/O is
2592 					 * being done.
2593 					 */
2594 					cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2595 				}
2596 			}
2597 		}
2598 	}
2599 }
2600 
2601 #if __FreeBSD_version >= 900026
2602 static void
2603 mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm)
2604 {
2605 	MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2606 	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2607 	uint64_t sasaddr;
2608 	union ccb *ccb;
2609 
2610 	ccb = cm->cm_complete_data;
2611 
2612 	/*
2613 	 * Currently there should be no way we can hit this case.  It only
2614 	 * happens when we have a failure to allocate chain frames, and SMP
2615 	 * commands require two S/G elements only.  That should be handled
2616 	 * in the standard request size.
2617 	 */
2618 	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2619 		mps_printf(sc, "%s: cm_flags = %#x on SMP request!\n",
2620 			   __func__, cm->cm_flags);
2621 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2622 		goto bailout;
2623         }
2624 
2625 	rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
2626 	if (rpl == NULL) {
2627 		mps_dprint(sc, MPS_INFO, "%s: NULL cm_reply!\n", __func__);
2628 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2629 		goto bailout;
2630 	}
2631 
2632 	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2633 	sasaddr = le32toh(req->SASAddress.Low);
2634 	sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
2635 
2636 	if ((rpl->IOCStatus & MPI2_IOCSTATUS_MASK) != MPI2_IOCSTATUS_SUCCESS ||
2637 	    rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
2638 		mps_dprint(sc, MPS_INFO, "%s: IOCStatus %04x SASStatus %02x\n",
2639 		    __func__, rpl->IOCStatus, rpl->SASStatus);
2640 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2641 		goto bailout;
2642 	}
2643 
2644 	mps_dprint(sc, MPS_INFO, "%s: SMP request to SAS address "
2645 		   "%#jx completed successfully\n", __func__,
2646 		   (uintmax_t)sasaddr);
2647 
2648 	if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
2649 		ccb->ccb_h.status = CAM_REQ_CMP;
2650 	else
2651 		ccb->ccb_h.status = CAM_SMP_STATUS_ERROR;
2652 
2653 bailout:
2654 	/*
2655 	 * We sync in both directions because we had DMAs in the S/G list
2656 	 * in both directions.
2657 	 */
2658 	bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
2659 			BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2660 	bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2661 	mps_free_command(sc, cm);
2662 	xpt_done(ccb);
2663 }
2664 
2665 static void
2666 mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb, uint64_t sasaddr)
2667 {
2668 	struct mps_command *cm;
2669 	uint8_t *request, *response;
2670 	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2671 	struct mps_softc *sc;
2672 	struct sglist *sg;
2673 	int error;
2674 
2675 	sc = sassc->sc;
2676 	sg = NULL;
2677 	error = 0;
2678 
2679 	/*
2680 	 * XXX We don't yet support physical addresses here.
2681 	 */
2682 	if (ccb->ccb_h.flags & (CAM_DATA_PHYS|CAM_SG_LIST_PHYS)) {
2683 		mps_printf(sc, "%s: physical addresses not supported\n",
2684 			   __func__);
2685 		ccb->ccb_h.status = CAM_REQ_INVALID;
2686 		xpt_done(ccb);
2687 		return;
2688 	}
2689 
2690 	/*
2691 	 * If the user wants to send an S/G list, check to make sure they
2692 	 * have single buffers.
2693 	 */
2694 	if (ccb->ccb_h.flags & CAM_SCATTER_VALID) {
2695 		/*
2696 		 * The chip does not support more than one buffer for the
2697 		 * request or response.
2698 		 */
2699 	 	if ((ccb->smpio.smp_request_sglist_cnt > 1)
2700 		  || (ccb->smpio.smp_response_sglist_cnt > 1)) {
2701 			mps_printf(sc, "%s: multiple request or response "
2702 				   "buffer segments not supported for SMP\n",
2703 				   __func__);
2704 			ccb->ccb_h.status = CAM_REQ_INVALID;
2705 			xpt_done(ccb);
2706 			return;
2707 		}
2708 
2709 		/*
2710 		 * The CAM_SCATTER_VALID flag was originally implemented
2711 		 * for the XPT_SCSI_IO CCB, which only has one data pointer.
2712 		 * We have two.  So, just take that flag to mean that we
2713 		 * might have S/G lists, and look at the S/G segment count
2714 		 * to figure out whether that is the case for each individual
2715 		 * buffer.
2716 		 */
2717 		if (ccb->smpio.smp_request_sglist_cnt != 0) {
2718 			bus_dma_segment_t *req_sg;
2719 
2720 			req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
2721 			request = (uint8_t *)req_sg[0].ds_addr;
2722 		} else
2723 			request = ccb->smpio.smp_request;
2724 
2725 		if (ccb->smpio.smp_response_sglist_cnt != 0) {
2726 			bus_dma_segment_t *rsp_sg;
2727 
2728 			rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
2729 			response = (uint8_t *)rsp_sg[0].ds_addr;
2730 		} else
2731 			response = ccb->smpio.smp_response;
2732 	} else {
2733 		request = ccb->smpio.smp_request;
2734 		response = ccb->smpio.smp_response;
2735 	}
2736 
2737 	cm = mps_alloc_command(sc);
2738 	if (cm == NULL) {
2739 		mps_printf(sc, "%s: cannot allocate command\n", __func__);
2740 		ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2741 		xpt_done(ccb);
2742 		return;
2743 	}
2744 
2745 	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2746 	bzero(req, sizeof(*req));
2747 	req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
2748 
2749 	/* Allow the chip to use any route to this SAS address. */
2750 	req->PhysicalPort = 0xff;
2751 
2752 	req->RequestDataLength = ccb->smpio.smp_request_len;
2753 	req->SGLFlags =
2754 	    MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
2755 
2756 	mps_dprint(sc, MPS_INFO, "%s: sending SMP request to SAS "
2757 		   "address %#jx\n", __func__, (uintmax_t)sasaddr);
2758 
2759 	mpi_init_sge(cm, req, &req->SGL);
2760 
2761 	/*
2762 	 * Set up a uio to pass into mps_map_command().  This allows us to
2763 	 * do one map command, and one busdma call in there.
2764 	 */
2765 	cm->cm_uio.uio_iov = cm->cm_iovec;
2766 	cm->cm_uio.uio_iovcnt = 2;
2767 	cm->cm_uio.uio_segflg = UIO_SYSSPACE;
2768 
2769 	/*
2770 	 * The read/write flag isn't used by busdma, but set it just in
2771 	 * case.  This isn't exactly accurate, either, since we're going in
2772 	 * both directions.
2773 	 */
2774 	cm->cm_uio.uio_rw = UIO_WRITE;
2775 
2776 	cm->cm_iovec[0].iov_base = request;
2777 	cm->cm_iovec[0].iov_len = req->RequestDataLength;
2778 	cm->cm_iovec[1].iov_base = response;
2779 	cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
2780 
2781 	cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
2782 			       cm->cm_iovec[1].iov_len;
2783 
2784 	/*
2785 	 * Trigger a warning message in mps_data_cb() for the user if we
2786 	 * wind up exceeding two S/G segments.  The chip expects one
2787 	 * segment for the request and another for the response.
2788 	 */
2789 	cm->cm_max_segs = 2;
2790 
2791 	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
2792 	cm->cm_complete = mpssas_smpio_complete;
2793 	cm->cm_complete_data = ccb;
2794 
2795 	/*
2796 	 * Tell the mapping code that we're using a uio, and that this is
2797 	 * an SMP passthrough request.  There is a little special-case
2798 	 * logic there (in mps_data_cb()) to handle the bidirectional
2799 	 * transfer.
2800 	 */
2801 	cm->cm_flags |= MPS_CM_FLAGS_USE_UIO | MPS_CM_FLAGS_SMP_PASS |
2802 			MPS_CM_FLAGS_DATAIN | MPS_CM_FLAGS_DATAOUT;
2803 
2804 	/* The chip data format is little endian. */
2805 	req->SASAddress.High = htole32(sasaddr >> 32);
2806 	req->SASAddress.Low = htole32(sasaddr);
2807 
2808 	/*
2809 	 * XXX Note that we don't have a timeout/abort mechanism here.
2810 	 * From the manual, it looks like task management requests only
2811 	 * work for SCSI IO and SATA passthrough requests.  We may need to
2812 	 * have a mechanism to retry requests in the event of a chip reset
2813 	 * at least.  Hopefully the chip will insure that any errors short
2814 	 * of that are relayed back to the driver.
2815 	 */
2816 	error = mps_map_command(sc, cm);
2817 	if ((error != 0) && (error != EINPROGRESS)) {
2818 		mps_printf(sc, "%s: error %d returned from mps_map_command()\n",
2819 			   __func__, error);
2820 		goto bailout_error;
2821 	}
2822 
2823 	return;
2824 
2825 bailout_error:
2826 	mps_free_command(sc, cm);
2827 	ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2828 	xpt_done(ccb);
2829 	return;
2830 
2831 }
2832 
2833 static void
2834 mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb)
2835 {
2836 	struct mps_softc *sc;
2837 	struct mpssas_target *targ;
2838 	uint64_t sasaddr = 0;
2839 
2840 	sc = sassc->sc;
2841 
2842 	/*
2843 	 * Make sure the target exists.
2844 	 */
2845 	targ = &sassc->targets[ccb->ccb_h.target_id];
2846 	if (targ->handle == 0x0) {
2847 		mps_printf(sc, "%s: target %d does not exist!\n", __func__,
2848 			   ccb->ccb_h.target_id);
2849 		ccb->ccb_h.status = CAM_SEL_TIMEOUT;
2850 		xpt_done(ccb);
2851 		return;
2852 	}
2853 
2854 	/*
2855 	 * If this device has an embedded SMP target, we'll talk to it
2856 	 * directly.
2857 	 * figure out what the expander's address is.
2858 	 */
2859 	if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
2860 		sasaddr = targ->sasaddr;
2861 
2862 	/*
2863 	 * If we don't have a SAS address for the expander yet, try
2864 	 * grabbing it from the page 0x83 information cached in the
2865 	 * transport layer for this target.  LSI expanders report the
2866 	 * expander SAS address as the port-associated SAS address in
2867 	 * Inquiry VPD page 0x83.  Maxim expanders don't report it in page
2868 	 * 0x83.
2869 	 *
2870 	 * XXX KDM disable this for now, but leave it commented out so that
2871 	 * it is obvious that this is another possible way to get the SAS
2872 	 * address.
2873 	 *
2874 	 * The parent handle method below is a little more reliable, and
2875 	 * the other benefit is that it works for devices other than SES
2876 	 * devices.  So you can send a SMP request to a da(4) device and it
2877 	 * will get routed to the expander that device is attached to.
2878 	 * (Assuming the da(4) device doesn't contain an SMP target...)
2879 	 */
2880 #if 0
2881 	if (sasaddr == 0)
2882 		sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
2883 #endif
2884 
2885 	/*
2886 	 * If we still don't have a SAS address for the expander, look for
2887 	 * the parent device of this device, which is probably the expander.
2888 	 */
2889 	if (sasaddr == 0) {
2890 #ifdef OLD_MPS_PROBE
2891 		struct mpssas_target *parent_target;
2892 #endif
2893 
2894 		if (targ->parent_handle == 0x0) {
2895 			mps_printf(sc, "%s: handle %d does not have a valid "
2896 				   "parent handle!\n", __func__, targ->handle);
2897 			ccb->ccb_h.status = CAM_REQ_INVALID;
2898 			goto bailout;
2899 		}
2900 #ifdef OLD_MPS_PROBE
2901 		parent_target = mpssas_find_target_by_handle(sassc, 0,
2902 			targ->parent_handle);
2903 
2904 		if (parent_target == NULL) {
2905 			mps_printf(sc, "%s: handle %d does not have a valid "
2906 				   "parent target!\n", __func__, targ->handle);
2907 			ccb->ccb_h.status = CAM_REQ_INVALID;
2908 			goto bailout;
2909 		}
2910 
2911 		if ((parent_target->devinfo &
2912 		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
2913 			mps_printf(sc, "%s: handle %d parent %d does not "
2914 				   "have an SMP target!\n", __func__,
2915 				   targ->handle, parent_target->handle);
2916 			ccb->ccb_h.status = CAM_REQ_INVALID;
2917 			goto bailout;
2918 
2919 		}
2920 
2921 		sasaddr = parent_target->sasaddr;
2922 #else /* OLD_MPS_PROBE */
2923 		if ((targ->parent_devinfo &
2924 		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
2925 			mps_printf(sc, "%s: handle %d parent %d does not "
2926 				   "have an SMP target!\n", __func__,
2927 				   targ->handle, targ->parent_handle);
2928 			ccb->ccb_h.status = CAM_REQ_INVALID;
2929 			goto bailout;
2930 
2931 		}
2932 		if (targ->parent_sasaddr == 0x0) {
2933 			mps_printf(sc, "%s: handle %d parent handle %d does "
2934 				   "not have a valid SAS address!\n",
2935 				   __func__, targ->handle, targ->parent_handle);
2936 			ccb->ccb_h.status = CAM_REQ_INVALID;
2937 			goto bailout;
2938 		}
2939 
2940 		sasaddr = targ->parent_sasaddr;
2941 #endif /* OLD_MPS_PROBE */
2942 
2943 	}
2944 
2945 	if (sasaddr == 0) {
2946 		mps_printf(sc, "%s: unable to find SAS address for handle %d\n",
2947 			   __func__, targ->handle);
2948 		ccb->ccb_h.status = CAM_REQ_INVALID;
2949 		goto bailout;
2950 	}
2951 	mpssas_send_smpcmd(sassc, ccb, sasaddr);
2952 
2953 	return;
2954 
2955 bailout:
2956 	xpt_done(ccb);
2957 
2958 }
2959 #endif //__FreeBSD_version >= 900026
2960 
2961 static void
2962 mpssas_action_resetdev(struct mpssas_softc *sassc, union ccb *ccb)
2963 {
2964 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
2965 	struct mps_softc *sc;
2966 	struct mps_command *tm;
2967 	struct mpssas_target *targ;
2968 
2969 	mps_dprint(sassc->sc, MPS_TRACE, __func__);
2970 	mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
2971 
2972 	sc = sassc->sc;
2973 	tm = mps_alloc_command(sc);
2974 	if (tm == NULL) {
2975 		mps_printf(sc, "comand alloc failure in mpssas_action_resetdev\n");
2976 		ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2977 		xpt_done(ccb);
2978 		return;
2979 	}
2980 
2981 	targ = &sassc->targets[ccb->ccb_h.target_id];
2982 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
2983 	req->DevHandle = targ->handle;
2984 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
2985 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
2986 
2987 	/* SAS Hard Link Reset / SATA Link Reset */
2988 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
2989 
2990 	tm->cm_data = NULL;
2991 	tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
2992 	tm->cm_complete = mpssas_resetdev_complete;
2993 	tm->cm_complete_data = ccb;
2994 	mps_map_command(sc, tm);
2995 }
2996 
2997 static void
2998 mpssas_resetdev_complete(struct mps_softc *sc, struct mps_command *tm)
2999 {
3000 	MPI2_SCSI_TASK_MANAGE_REPLY *resp;
3001 	union ccb *ccb;
3002 
3003 	mps_dprint(sc, MPS_TRACE, __func__);
3004 	mtx_assert(&sc->mps_mtx, MA_OWNED);
3005 
3006 	resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
3007 	ccb = tm->cm_complete_data;
3008 
3009 	/*
3010 	 * Currently there should be no way we can hit this case.  It only
3011 	 * happens when we have a failure to allocate chain frames, and
3012 	 * task management commands don't have S/G lists.
3013 	 */
3014 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3015 		MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3016 
3017 		req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3018 
3019 		mps_printf(sc, "%s: cm_flags = %#x for reset of handle %#04x! "
3020 			   "This should not happen!\n", __func__, tm->cm_flags,
3021 			   req->DevHandle);
3022 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
3023 		goto bailout;
3024 	}
3025 
3026 	printf("%s: IOCStatus = 0x%x ResponseCode = 0x%x\n", __func__,
3027 	    resp->IOCStatus, resp->ResponseCode);
3028 
3029 	if (resp->ResponseCode == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
3030 		ccb->ccb_h.status = CAM_REQ_CMP;
3031 		mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
3032 		    CAM_LUN_WILDCARD);
3033 	}
3034 	else
3035 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
3036 
3037 bailout:
3038 
3039 	mpssas_free_tm(sc, tm);
3040 	xpt_done(ccb);
3041 }
3042 
3043 static void
3044 mpssas_poll(struct cam_sim *sim)
3045 {
3046 	struct mpssas_softc *sassc;
3047 
3048 	sassc = cam_sim_softc(sim);
3049 
3050 	if (sassc->sc->mps_debug & MPS_TRACE) {
3051 		/* frequent debug messages during a panic just slow
3052 		 * everything down too much.
3053 		 */
3054 		mps_printf(sassc->sc, "%s clearing MPS_TRACE\n", __func__);
3055 		sassc->sc->mps_debug &= ~MPS_TRACE;
3056 	}
3057 
3058 	mps_intr_locked(sassc->sc);
3059 }
3060 
3061 static void
3062 mpssas_rescan_done(struct cam_periph *periph, union ccb *done_ccb)
3063 {
3064 	struct mpssas_softc *sassc;
3065 	char path_str[64];
3066 
3067 	if (done_ccb == NULL)
3068 		return;
3069 
3070 	sassc = (struct mpssas_softc *)done_ccb->ccb_h.ppriv_ptr1;
3071 
3072 	mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
3073 
3074 	xpt_path_string(done_ccb->ccb_h.path, path_str, sizeof(path_str));
3075 	mps_dprint(sassc->sc, MPS_INFO, "Completing rescan for %s\n", path_str);
3076 
3077 	xpt_free_path(done_ccb->ccb_h.path);
3078 	xpt_free_ccb(done_ccb);
3079 
3080 #if __FreeBSD_version < 1000006
3081 	/*
3082 	 * Before completing scan, get EEDP stuff for all of the existing
3083 	 * targets.
3084 	 */
3085 	mpssas_check_eedp(sassc);
3086 #endif
3087 
3088 }
3089 
3090 /* thread to handle bus rescans */
3091 static void
3092 mpssas_scanner_thread(void *arg)
3093 {
3094 	struct mpssas_softc *sassc;
3095 	struct mps_softc *sc;
3096 	union ccb	*ccb;
3097 
3098 	sassc = (struct mpssas_softc *)arg;
3099 	sc = sassc->sc;
3100 
3101 	mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
3102 
3103 	mps_lock(sc);
3104 	for (;;) {
3105 		/* Sleep for 1 second and check the queue status*/
3106 		msleep(&sassc->ccb_scanq, &sc->mps_mtx, PRIBIO,
3107 		       "mps_scanq", 1 * hz);
3108 		if (sassc->flags & MPSSAS_SHUTDOWN) {
3109 			mps_dprint(sc, MPS_TRACE, "Scanner shutting down\n");
3110 			break;
3111 		}
3112 next_work:
3113 		// Get first work.
3114 		ccb = (union ccb *)TAILQ_FIRST(&sassc->ccb_scanq);
3115 		if (ccb == NULL)
3116 			continue;
3117 		// Got first work.
3118 		TAILQ_REMOVE(&sassc->ccb_scanq, &ccb->ccb_h, sim_links.tqe);
3119 		xpt_action(ccb);
3120 		if (sassc->flags & MPSSAS_SHUTDOWN) {
3121 			mps_dprint(sc, MPS_TRACE, "Scanner shutting down\n");
3122 			break;
3123 		}
3124 		goto next_work;
3125 	}
3126 
3127 	sassc->flags &= ~MPSSAS_SCANTHREAD;
3128 	wakeup(&sassc->flags);
3129 	mps_unlock(sc);
3130 	mps_dprint(sc, MPS_TRACE, "Scanner exiting\n");
3131 	mps_kproc_exit(0);
3132 }
3133 
3134 static void
3135 mpssas_rescan(struct mpssas_softc *sassc, union ccb *ccb)
3136 {
3137 	char path_str[64];
3138 
3139 	mps_dprint(sassc->sc, MPS_TRACE, "%s\n", __func__);
3140 
3141 	mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
3142 
3143 	if (ccb == NULL)
3144 		return;
3145 
3146 	xpt_path_string(ccb->ccb_h.path, path_str, sizeof(path_str));
3147 	mps_dprint(sassc->sc, MPS_INFO, "Queueing rescan for %s\n", path_str);
3148 
3149 	/* Prepare request */
3150 	ccb->ccb_h.ppriv_ptr1 = sassc;
3151 	ccb->ccb_h.cbfcnp = mpssas_rescan_done;
3152 	xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, MPS_PRIORITY_XPT);
3153 	TAILQ_INSERT_TAIL(&sassc->ccb_scanq, &ccb->ccb_h, sim_links.tqe);
3154 	wakeup(&sassc->ccb_scanq);
3155 }
3156 
3157 #if __FreeBSD_version >= 1000006
3158 static void
3159 mpssas_async(void *callback_arg, uint32_t code, struct cam_path *path,
3160 	     void *arg)
3161 {
3162 	struct mps_softc *sc;
3163 
3164 	sc = (struct mps_softc *)callback_arg;
3165 
3166 	switch (code) {
3167 	case AC_ADVINFO_CHANGED: {
3168 		struct mpssas_target *target;
3169 		struct mpssas_softc *sassc;
3170 		struct scsi_read_capacity_data_long rcap_buf;
3171 		struct ccb_dev_advinfo cdai;
3172 		struct mpssas_lun *lun;
3173 		lun_id_t lunid;
3174 		int found_lun;
3175 		uintptr_t buftype;
3176 
3177 		buftype = (uintptr_t)arg;
3178 
3179 		found_lun = 0;
3180 		sassc = sc->sassc;
3181 
3182 		/*
3183 		 * We're only interested in read capacity data changes.
3184 		 */
3185 		if (buftype != CDAI_TYPE_RCAPLONG)
3186 			break;
3187 
3188 		/*
3189 		 * We're only interested in devices that are attached to
3190 		 * this controller.
3191 		 */
3192 		if (xpt_path_path_id(path) != sassc->sim->path_id)
3193 			break;
3194 
3195 		/*
3196 		 * We should have a handle for this, but check to make sure.
3197 		 */
3198 		target = &sassc->targets[xpt_path_target_id(path)];
3199 		if (target->handle == 0)
3200 			break;
3201 
3202 		lunid = xpt_path_lun_id(path);
3203 
3204 		SLIST_FOREACH(lun, &target->luns, lun_link) {
3205 			if (lun->lun_id == lunid) {
3206 				found_lun = 1;
3207 				break;
3208 			}
3209 		}
3210 
3211 		if (found_lun == 0) {
3212 			lun = malloc(sizeof(struct mpssas_lun), M_MPT2,
3213 				     M_NOWAIT | M_ZERO);
3214 			if (lun == NULL) {
3215 				mps_dprint(sc, MPS_FAULT, "Unable to alloc "
3216 					   "LUN for EEDP support.\n");
3217 				break;
3218 			}
3219 			lun->lun_id = lunid;
3220 			SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3221 		}
3222 
3223 		bzero(&rcap_buf, sizeof(rcap_buf));
3224 		xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
3225 		cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
3226 		cdai.ccb_h.flags = CAM_DIR_IN;
3227 		cdai.buftype = CDAI_TYPE_RCAPLONG;
3228 		cdai.flags = 0;
3229 		cdai.bufsiz = sizeof(rcap_buf);
3230 		cdai.buf = (uint8_t *)&rcap_buf;
3231 		xpt_action((union ccb *)&cdai);
3232 		if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
3233 			cam_release_devq(cdai.ccb_h.path,
3234 					 0, 0, 0, FALSE);
3235 
3236 		if (((cdai.ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP)
3237 		 && (rcap_buf.prot & SRC16_PROT_EN)) {
3238 			lun->eedp_formatted = TRUE;
3239 			lun->eedp_block_size = scsi_4btoul(rcap_buf.length);
3240 		} else {
3241 			lun->eedp_formatted = FALSE;
3242 			lun->eedp_block_size = 0;
3243 		}
3244 		break;
3245 	}
3246 	default:
3247 		break;
3248 	}
3249 }
3250 #else /* __FreeBSD_version >= 1000006 */
3251 
3252 static void
3253 mpssas_check_eedp(struct mpssas_softc *sassc)
3254 {
3255 	struct mps_softc *sc = sassc->sc;
3256 	struct ccb_scsiio *csio;
3257 	struct scsi_read_capacity_16 *scsi_cmd;
3258 	struct scsi_read_capacity_eedp *rcap_buf;
3259 	union ccb *ccb;
3260 	path_id_t pathid = cam_sim_path(sassc->sim);
3261 	target_id_t targetid;
3262 	lun_id_t lunid;
3263 	struct cam_periph *found_periph;
3264 	struct mpssas_target *target;
3265 	struct mpssas_lun *lun;
3266 	uint8_t	found_lun;
3267 
3268 	/*
3269 	 * Issue a READ CAPACITY 16 command to each LUN of each target.  This
3270 	 * info is used to determine if the LUN is formatted for EEDP support.
3271 	 */
3272 	for (targetid = 0; targetid < sc->facts->MaxTargets; targetid++) {
3273 		target = &sassc->targets[targetid];
3274 		if (target->handle == 0x0) {
3275 			continue;
3276 		}
3277 
3278 		lunid = 0;
3279 		do {
3280 			rcap_buf =
3281 			    malloc(sizeof(struct scsi_read_capacity_eedp),
3282 			    M_MPT2, M_NOWAIT | M_ZERO);
3283 			if (rcap_buf == NULL) {
3284 				mps_dprint(sc, MPS_FAULT, "Unable to alloc read "
3285 				    "capacity buffer for EEDP support.\n");
3286 				return;
3287 			}
3288 			ccb = xpt_alloc_ccb_nowait();
3289 			if (ccb == NULL) {
3290 				mps_dprint(sc, MPS_FAULT, "Unable to alloc CCB "
3291 				    "for EEDP support.\n");
3292 				free(rcap_buf, M_MPT2);
3293 				return;
3294 			}
3295 
3296 			if (xpt_create_path(&ccb->ccb_h.path, xpt_periph,
3297 			    pathid, targetid, lunid) != CAM_REQ_CMP) {
3298 				mps_dprint(sc, MPS_FAULT, "Unable to create "
3299 				    "path for EEDP support\n");
3300 				free(rcap_buf, M_MPT2);
3301 				xpt_free_ccb(ccb);
3302 				return;
3303 			}
3304 
3305 			/*
3306 			 * If a periph is returned, the LUN exists.  Create an
3307 			 * entry in the target's LUN list.
3308 			 */
3309 			if ((found_periph = cam_periph_find(ccb->ccb_h.path,
3310 			    NULL)) != NULL) {
3311 				/*
3312 				 * If LUN is already in list, don't create a new
3313 				 * one.
3314 				 */
3315 				found_lun = FALSE;
3316 				SLIST_FOREACH(lun, &target->luns, lun_link) {
3317 					if (lun->lun_id == lunid) {
3318 						found_lun = TRUE;
3319 						break;
3320 					}
3321 				}
3322 				if (!found_lun) {
3323 					lun = malloc(sizeof(struct mpssas_lun),
3324 					    M_MPT2, M_NOWAIT | M_ZERO);
3325 					if (lun == NULL) {
3326 						mps_dprint(sc, MPS_FAULT,
3327 						    "Unable to alloc LUN for "
3328 						    "EEDP support.\n");
3329 						free(rcap_buf, M_MPT2);
3330 						xpt_free_path(ccb->ccb_h.path);
3331 						xpt_free_ccb(ccb);
3332 						return;
3333 					}
3334 					lun->lun_id = lunid;
3335 					SLIST_INSERT_HEAD(&target->luns, lun,
3336 					    lun_link);
3337 				}
3338 				lunid++;
3339 
3340 				/*
3341 				 * Issue a READ CAPACITY 16 command for the LUN.
3342 				 * The mpssas_read_cap_done function will load
3343 				 * the read cap info into the LUN struct.
3344 				 */
3345 				csio = &ccb->csio;
3346 				csio->ccb_h.func_code = XPT_SCSI_IO;
3347 				csio->ccb_h.flags = CAM_DIR_IN;
3348 				csio->ccb_h.retry_count = 4;
3349 				csio->ccb_h.cbfcnp = mpssas_read_cap_done;
3350 				csio->ccb_h.timeout = 60000;
3351 				csio->data_ptr = (uint8_t *)rcap_buf;
3352 				csio->dxfer_len = sizeof(struct
3353 				    scsi_read_capacity_eedp);
3354 				csio->sense_len = MPS_SENSE_LEN;
3355 				csio->cdb_len = sizeof(*scsi_cmd);
3356 				csio->tag_action = MSG_SIMPLE_Q_TAG;
3357 
3358 				scsi_cmd = (struct scsi_read_capacity_16 *)
3359 				    &csio->cdb_io.cdb_bytes;
3360 				bzero(scsi_cmd, sizeof(*scsi_cmd));
3361 				scsi_cmd->opcode = 0x9E;
3362 				scsi_cmd->service_action = SRC16_SERVICE_ACTION;
3363 				((uint8_t *)scsi_cmd)[13] = sizeof(struct
3364 				    scsi_read_capacity_eedp);
3365 
3366 				/*
3367 				 * Set the path, target and lun IDs for the READ
3368 				 * CAPACITY request.
3369 				 */
3370 				ccb->ccb_h.path_id =
3371 				    xpt_path_path_id(ccb->ccb_h.path);
3372 				ccb->ccb_h.target_id =
3373 				    xpt_path_target_id(ccb->ccb_h.path);
3374 				ccb->ccb_h.target_lun =
3375 				    xpt_path_lun_id(ccb->ccb_h.path);
3376 
3377 				ccb->ccb_h.ppriv_ptr1 = sassc;
3378 				xpt_action(ccb);
3379 			} else {
3380 				free(rcap_buf, M_MPT2);
3381 				xpt_free_path(ccb->ccb_h.path);
3382 				xpt_free_ccb(ccb);
3383 			}
3384 		} while (found_periph);
3385 	}
3386 }
3387 
3388 
3389 static void
3390 mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb)
3391 {
3392 	struct mpssas_softc *sassc;
3393 	struct mpssas_target *target;
3394 	struct mpssas_lun *lun;
3395 	struct scsi_read_capacity_eedp *rcap_buf;
3396 
3397 	if (done_ccb == NULL)
3398 		return;
3399 
3400 	/* Driver need to release devq, it Scsi command is
3401 	 * generated by driver internally.
3402 	 * Currently there is a single place where driver
3403 	 * calls scsi command internally. In future if driver
3404 	 * calls more scsi command internally, it needs to release
3405 	 * devq internally, since those command will not go back to
3406 	 * cam_periph.
3407 	 */
3408 	if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) ) {
3409         	done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
3410 		xpt_release_devq(done_ccb->ccb_h.path,
3411 			       	/*count*/ 1, /*run_queue*/TRUE);
3412 	}
3413 
3414 	rcap_buf = (struct scsi_read_capacity_eedp *)done_ccb->csio.data_ptr;
3415 
3416 	/*
3417 	 * Get the LUN ID for the path and look it up in the LUN list for the
3418 	 * target.
3419 	 */
3420 	sassc = (struct mpssas_softc *)done_ccb->ccb_h.ppriv_ptr1;
3421 	target = &sassc->targets[done_ccb->ccb_h.target_id];
3422 	SLIST_FOREACH(lun, &target->luns, lun_link) {
3423 		if (lun->lun_id != done_ccb->ccb_h.target_lun)
3424 			continue;
3425 
3426 		/*
3427 		 * Got the LUN in the target's LUN list.  Fill it in
3428 		 * with EEDP info.  If the READ CAP 16 command had some
3429 		 * SCSI error (common if command is not supported), mark
3430 		 * the lun as not supporting EEDP and set the block size
3431 		 * to 0.
3432 		 */
3433 		if (((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)
3434 		 || (done_ccb->csio.scsi_status != SCSI_STATUS_OK)) {
3435 			lun->eedp_formatted = FALSE;
3436 			lun->eedp_block_size = 0;
3437 			break;
3438 		}
3439 
3440 		if (rcap_buf->protect & 0x01) {
3441 			lun->eedp_formatted = TRUE;
3442 			lun->eedp_block_size = scsi_4btoul(rcap_buf->length);
3443 		}
3444 		break;
3445 	}
3446 
3447 	// Finished with this CCB and path.
3448 	free(rcap_buf, M_MPT2);
3449 	xpt_free_path(done_ccb->ccb_h.path);
3450 	xpt_free_ccb(done_ccb);
3451 }
3452 #endif /* __FreeBSD_version >= 1000006 */
3453 
3454 int
3455 mpssas_startup(struct mps_softc *sc)
3456 {
3457 	struct mpssas_softc *sassc;
3458 
3459 	/*
3460 	 * Send the port enable message and set the wait_for_port_enable flag.
3461 	 * This flag helps to keep the simq frozen until all discovery events
3462 	 * are processed.
3463 	 */
3464 	sassc = sc->sassc;
3465 	mpssas_startup_increment(sassc);
3466 	sc->wait_for_port_enable = 1;
3467 	mpssas_send_portenable(sc);
3468 	return (0);
3469 }
3470 
3471 static int
3472 mpssas_send_portenable(struct mps_softc *sc)
3473 {
3474 	MPI2_PORT_ENABLE_REQUEST *request;
3475 	struct mps_command *cm;
3476 
3477 	mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
3478 
3479 	if ((cm = mps_alloc_command(sc)) == NULL)
3480 		return (EBUSY);
3481 	request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3482 	request->Function = MPI2_FUNCTION_PORT_ENABLE;
3483 	request->MsgFlags = 0;
3484 	request->VP_ID = 0;
3485 	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3486 	cm->cm_complete = mpssas_portenable_complete;
3487 	cm->cm_data = NULL;
3488 	cm->cm_sge = NULL;
3489 
3490 	mps_map_command(sc, cm);
3491 	mps_dprint(sc, MPS_TRACE,
3492 	    "mps_send_portenable finished cm %p req %p complete %p\n",
3493 	    cm, cm->cm_req, cm->cm_complete);
3494 	return (0);
3495 }
3496 
3497 static void
3498 mpssas_portenable_complete(struct mps_softc *sc, struct mps_command *cm)
3499 {
3500 	MPI2_PORT_ENABLE_REPLY *reply;
3501 	struct mpssas_softc *sassc;
3502 	struct mpssas_target *target;
3503 	int i;
3504 
3505 	mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
3506 	sassc = sc->sassc;
3507 
3508 	/*
3509 	 * Currently there should be no way we can hit this case.  It only
3510 	 * happens when we have a failure to allocate chain frames, and
3511 	 * port enable commands don't have S/G lists.
3512 	 */
3513 	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3514 		mps_printf(sc, "%s: cm_flags = %#x for port enable! "
3515 			   "This should not happen!\n", __func__, cm->cm_flags);
3516 	}
3517 
3518 	reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3519 	if (reply == NULL)
3520 		mps_dprint(sc, MPS_FAULT, "Portenable NULL reply\n");
3521 	else if ((reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
3522 	    MPI2_IOCSTATUS_SUCCESS)
3523 		mps_dprint(sc, MPS_FAULT, "Portenable failed\n");
3524 
3525 	mps_free_command(sc, cm);
3526 	if (sc->mps_ich.ich_arg != NULL) {
3527 		mps_dprint(sc, MPS_INFO, "disestablish config intrhook\n");
3528 		config_intrhook_disestablish(&sc->mps_ich);
3529 		sc->mps_ich.ich_arg = NULL;
3530 	}
3531 
3532 	/*
3533 	 * Get WarpDrive info after discovery is complete but before the scan
3534 	 * starts.  At this point, all devices are ready to be exposed to the
3535 	 * OS.  If devices should be hidden instead, take them out of the
3536 	 * 'targets' array before the scan.  The devinfo for a disk will have
3537 	 * some info and a volume's will be 0.  Use that to remove disks.
3538 	 */
3539 	mps_wd_config_pages(sc);
3540 	if (((sc->mps_flags & MPS_FLAGS_WD_AVAILABLE)
3541 	  && (sc->WD_hide_expose == MPS_WD_HIDE_ALWAYS))
3542 	 || (sc->WD_valid_config && (sc->WD_hide_expose ==
3543 	    MPS_WD_HIDE_IF_VOLUME))) {
3544 		for (i = 0; i < sassc->sc->facts->MaxTargets; i++) {
3545 			target = &sassc->targets[i];
3546 			if (target->devinfo) {
3547 				target->devinfo = 0x0;
3548 				target->encl_handle = 0x0;
3549 				target->encl_slot = 0x0;
3550 				target->handle = 0x0;
3551 				target->tid = 0x0;
3552 				target->linkrate = 0x0;
3553 				target->flags = 0x0;
3554 			}
3555 		}
3556 	}
3557 
3558 	/*
3559 	 * Done waiting for port enable to complete.  Decrement the refcount.
3560 	 * If refcount is 0, discovery is complete and a rescan of the bus can
3561 	 * take place.  Since the simq was explicitly frozen before port
3562 	 * enable, it must be explicitly released here to keep the
3563 	 * freeze/release count in sync.
3564 	 */
3565 	sc->wait_for_port_enable = 0;
3566 	sc->port_enable_complete = 1;
3567 	mpssas_startup_decrement(sassc);
3568 	xpt_release_simq(sassc->sim, 1);
3569 }
3570 
3571