xref: /freebsd/sys/dev/mps/mps_sas.c (revision 0572ccaa4543b0abef8ef81e384c1d04de9f3da1)
1 /*-
2  * Copyright (c) 2009 Yahoo! Inc.
3  * Copyright (c) 2011, 2012 LSI Corp.
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  * LSI MPT-Fusion Host Adapter FreeBSD
28  *
29  * $FreeBSD$
30  */
31 
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 /* Communications core for LSI MPT2 */
36 
37 /* TODO Move headers to mpsvar */
38 #include <sys/types.h>
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/kernel.h>
42 #include <sys/selinfo.h>
43 #include <sys/module.h>
44 #include <sys/bus.h>
45 #include <sys/conf.h>
46 #include <sys/bio.h>
47 #include <sys/malloc.h>
48 #include <sys/uio.h>
49 #include <sys/sysctl.h>
50 #include <sys/endian.h>
51 #include <sys/queue.h>
52 #include <sys/kthread.h>
53 #include <sys/taskqueue.h>
54 #include <sys/sbuf.h>
55 
56 #include <machine/bus.h>
57 #include <machine/resource.h>
58 #include <sys/rman.h>
59 
60 #include <machine/stdarg.h>
61 
62 #include <cam/cam.h>
63 #include <cam/cam_ccb.h>
64 #include <cam/cam_xpt.h>
65 #include <cam/cam_debug.h>
66 #include <cam/cam_sim.h>
67 #include <cam/cam_xpt_sim.h>
68 #include <cam/cam_xpt_periph.h>
69 #include <cam/cam_periph.h>
70 #include <cam/scsi/scsi_all.h>
71 #include <cam/scsi/scsi_message.h>
72 #if __FreeBSD_version >= 900026
73 #include <cam/scsi/smp_all.h>
74 #endif
75 
76 #include <dev/mps/mpi/mpi2_type.h>
77 #include <dev/mps/mpi/mpi2.h>
78 #include <dev/mps/mpi/mpi2_ioc.h>
79 #include <dev/mps/mpi/mpi2_sas.h>
80 #include <dev/mps/mpi/mpi2_cnfg.h>
81 #include <dev/mps/mpi/mpi2_init.h>
82 #include <dev/mps/mpi/mpi2_tool.h>
83 #include <dev/mps/mps_ioctl.h>
84 #include <dev/mps/mpsvar.h>
85 #include <dev/mps/mps_table.h>
86 #include <dev/mps/mps_sas.h>
87 
88 #define MPSSAS_DISCOVERY_TIMEOUT	20
89 #define MPSSAS_MAX_DISCOVERY_TIMEOUTS	10 /* 200 seconds */
90 
91 /*
92  * static array to check SCSI OpCode for EEDP protection bits
93  */
94 #define	PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
95 #define	PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
96 #define	PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
97 static uint8_t op_code_prot[256] = {
98 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
99 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
100 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
101 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
102 	0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
103 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
104 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
105 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
106 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
107 	0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
108 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
109 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
110 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
111 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
112 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
113 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
114 };
115 
116 MALLOC_DEFINE(M_MPSSAS, "MPSSAS", "MPS SAS memory");
117 
118 static void mpssas_discovery_timeout(void *data);
119 static void mpssas_remove_device(struct mps_softc *, struct mps_command *);
120 static void mpssas_remove_complete(struct mps_softc *, struct mps_command *);
121 static void mpssas_action(struct cam_sim *sim, union ccb *ccb);
122 static void mpssas_poll(struct cam_sim *sim);
123 static void mpssas_scsiio_timeout(void *data);
124 static void mpssas_abort_complete(struct mps_softc *sc, struct mps_command *cm);
125 static void mpssas_direct_drive_io(struct mpssas_softc *sassc,
126     struct mps_command *cm, union ccb *ccb);
127 static void mpssas_action_scsiio(struct mpssas_softc *, union ccb *);
128 static void mpssas_scsiio_complete(struct mps_softc *, struct mps_command *);
129 static void mpssas_action_resetdev(struct mpssas_softc *, union ccb *);
130 #if __FreeBSD_version >= 900026
131 static void mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm);
132 static void mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb,
133 			       uint64_t sasaddr);
134 static void mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb);
135 #endif //FreeBSD_version >= 900026
136 static void mpssas_resetdev_complete(struct mps_softc *, struct mps_command *);
137 static int  mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm);
138 static int  mpssas_send_reset(struct mps_softc *sc, struct mps_command *tm, uint8_t type);
139 static void mpssas_async(void *callback_arg, uint32_t code,
140 			 struct cam_path *path, void *arg);
141 #if (__FreeBSD_version < 901503) || \
142     ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
143 static void mpssas_check_eedp(struct mps_softc *sc, struct cam_path *path,
144 			      struct ccb_getdev *cgd);
145 static void mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb);
146 #endif
147 static int mpssas_send_portenable(struct mps_softc *sc);
148 static void mpssas_portenable_complete(struct mps_softc *sc,
149     struct mps_command *cm);
150 
151 struct mpssas_target *
152 mpssas_find_target_by_handle(struct mpssas_softc *sassc, int start, uint16_t handle)
153 {
154 	struct mpssas_target *target;
155 	int i;
156 
157 	for (i = start; i < sassc->maxtargets; i++) {
158 		target = &sassc->targets[i];
159 		if (target->handle == handle)
160 			return (target);
161 	}
162 
163 	return (NULL);
164 }
165 
166 /* we need to freeze the simq during attach and diag reset, to avoid failing
167  * commands before device handles have been found by discovery.  Since
168  * discovery involves reading config pages and possibly sending commands,
169  * discovery actions may continue even after we receive the end of discovery
170  * event, so refcount discovery actions instead of assuming we can unfreeze
171  * the simq when we get the event.
172  */
173 void
174 mpssas_startup_increment(struct mpssas_softc *sassc)
175 {
176 	MPS_FUNCTRACE(sassc->sc);
177 
178 	if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
179 		if (sassc->startup_refcount++ == 0) {
180 			/* just starting, freeze the simq */
181 			mps_dprint(sassc->sc, MPS_INIT,
182 			    "%s freezing simq\n", __func__);
183 #if __FreeBSD_version >= 1000039
184 			xpt_hold_boot();
185 #endif
186 			xpt_freeze_simq(sassc->sim, 1);
187 		}
188 		mps_dprint(sassc->sc, MPS_INIT, "%s refcount %u\n", __func__,
189 		    sassc->startup_refcount);
190 	}
191 }
192 
193 void
194 mpssas_startup_decrement(struct mpssas_softc *sassc)
195 {
196 	MPS_FUNCTRACE(sassc->sc);
197 
198 	if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
199 		if (--sassc->startup_refcount == 0) {
200 			/* finished all discovery-related actions, release
201 			 * the simq and rescan for the latest topology.
202 			 */
203 			mps_dprint(sassc->sc, MPS_INIT,
204 			    "%s releasing simq\n", __func__);
205 			sassc->flags &= ~MPSSAS_IN_STARTUP;
206 			xpt_release_simq(sassc->sim, 1);
207 #if __FreeBSD_version >= 1000039
208 			xpt_release_boot();
209 #else
210 			mpssas_rescan_target(sassc->sc, NULL);
211 #endif
212 		}
213 		mps_dprint(sassc->sc, MPS_INIT, "%s refcount %u\n", __func__,
214 		    sassc->startup_refcount);
215 	}
216 }
217 
218 /* LSI's firmware requires us to stop sending commands when we're doing task
219  * management, so refcount the TMs and keep the simq frozen when any are in
220  * use.
221  */
222 struct mps_command *
223 mpssas_alloc_tm(struct mps_softc *sc)
224 {
225 	struct mps_command *tm;
226 
227 	MPS_FUNCTRACE(sc);
228 	tm = mps_alloc_high_priority_command(sc);
229 	if (tm != NULL) {
230 		if (sc->sassc->tm_count++ == 0) {
231 			mps_dprint(sc, MPS_RECOVERY,
232 			    "%s freezing simq\n", __func__);
233 			xpt_freeze_simq(sc->sassc->sim, 1);
234 		}
235 		mps_dprint(sc, MPS_RECOVERY, "%s tm_count %u\n", __func__,
236 		    sc->sassc->tm_count);
237 	}
238 	return tm;
239 }
240 
241 void
242 mpssas_free_tm(struct mps_softc *sc, struct mps_command *tm)
243 {
244 	mps_dprint(sc, MPS_TRACE, "%s", __func__);
245 	if (tm == NULL)
246 		return;
247 
248 	/* if there are no TMs in use, we can release the simq.  We use our
249 	 * own refcount so that it's easier for a diag reset to cleanup and
250 	 * release the simq.
251 	 */
252 	if (--sc->sassc->tm_count == 0) {
253 		mps_dprint(sc, MPS_RECOVERY, "%s releasing simq\n", __func__);
254 		xpt_release_simq(sc->sassc->sim, 1);
255 	}
256 	mps_dprint(sc, MPS_RECOVERY, "%s tm_count %u\n", __func__,
257 	    sc->sassc->tm_count);
258 
259 	mps_free_high_priority_command(sc, tm);
260 }
261 
262 void
263 mpssas_rescan_target(struct mps_softc *sc, struct mpssas_target *targ)
264 {
265 	struct mpssas_softc *sassc = sc->sassc;
266 	path_id_t pathid;
267 	target_id_t targetid;
268 	union ccb *ccb;
269 
270 	MPS_FUNCTRACE(sc);
271 	pathid = cam_sim_path(sassc->sim);
272 	if (targ == NULL)
273 		targetid = CAM_TARGET_WILDCARD;
274 	else
275 		targetid = targ - sassc->targets;
276 
277 	/*
278 	 * Allocate a CCB and schedule a rescan.
279 	 */
280 	ccb = xpt_alloc_ccb_nowait();
281 	if (ccb == NULL) {
282 		mps_dprint(sc, MPS_ERROR, "unable to alloc CCB for rescan\n");
283 		return;
284 	}
285 
286 	if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid,
287 	    targetid, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
288 		mps_dprint(sc, MPS_ERROR, "unable to create path for rescan\n");
289 		xpt_free_ccb(ccb);
290 		return;
291 	}
292 
293 	if (targetid == CAM_TARGET_WILDCARD)
294 		ccb->ccb_h.func_code = XPT_SCAN_BUS;
295 	else
296 		ccb->ccb_h.func_code = XPT_SCAN_TGT;
297 
298 	mps_dprint(sc, MPS_TRACE, "%s targetid %u\n", __func__, targetid);
299 	xpt_rescan(ccb);
300 }
301 
302 static void
303 mpssas_log_command(struct mps_command *cm, u_int level, const char *fmt, ...)
304 {
305 	struct sbuf sb;
306 	va_list ap;
307 	char str[192];
308 	char path_str[64];
309 
310 	if (cm == NULL)
311 		return;
312 
313 	/* No need to be in here if debugging isn't enabled */
314 	if ((cm->cm_sc->mps_debug & level) == 0)
315 		return;
316 
317 	sbuf_new(&sb, str, sizeof(str), 0);
318 
319 	va_start(ap, fmt);
320 
321 	if (cm->cm_ccb != NULL) {
322 		xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
323 				sizeof(path_str));
324 		sbuf_cat(&sb, path_str);
325 		if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
326 			scsi_command_string(&cm->cm_ccb->csio, &sb);
327 			sbuf_printf(&sb, "length %d ",
328 				    cm->cm_ccb->csio.dxfer_len);
329 		}
330 	}
331 	else {
332 		sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
333 		    cam_sim_name(cm->cm_sc->sassc->sim),
334 		    cam_sim_unit(cm->cm_sc->sassc->sim),
335 		    cam_sim_bus(cm->cm_sc->sassc->sim),
336 		    cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
337 		    cm->cm_lun);
338 	}
339 
340 	sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
341 	sbuf_vprintf(&sb, fmt, ap);
342 	sbuf_finish(&sb);
343 	mps_dprint_field(cm->cm_sc, level, "%s", sbuf_data(&sb));
344 
345 	va_end(ap);
346 }
347 
348 
349 static void
350 mpssas_remove_volume(struct mps_softc *sc, struct mps_command *tm)
351 {
352 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
353 	struct mpssas_target *targ;
354 	uint16_t handle;
355 
356 	MPS_FUNCTRACE(sc);
357 
358 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
359 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
360 	targ = tm->cm_targ;
361 
362 	if (reply == NULL) {
363 		/* XXX retry the remove after the diag reset completes? */
364 		mps_dprint(sc, MPS_FAULT,
365 		    "%s NULL reply reseting device 0x%04x\n", __func__, handle);
366 		mpssas_free_tm(sc, tm);
367 		return;
368 	}
369 
370 	if (reply->IOCStatus != MPI2_IOCSTATUS_SUCCESS) {
371 		mps_dprint(sc, MPS_FAULT,
372 		   "IOCStatus = 0x%x while resetting device 0x%x\n",
373 		   reply->IOCStatus, handle);
374 		mpssas_free_tm(sc, tm);
375 		return;
376 	}
377 
378 	mps_dprint(sc, MPS_XINFO,
379 	    "Reset aborted %u commands\n", reply->TerminationCount);
380 	mps_free_reply(sc, tm->cm_reply_data);
381 	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
382 
383 	mps_dprint(sc, MPS_XINFO,
384 	    "clearing target %u handle 0x%04x\n", targ->tid, handle);
385 
386 	/*
387 	 * Don't clear target if remove fails because things will get confusing.
388 	 * Leave the devname and sasaddr intact so that we know to avoid reusing
389 	 * this target id if possible, and so we can assign the same target id
390 	 * to this device if it comes back in the future.
391 	 */
392 	if (reply->IOCStatus == MPI2_IOCSTATUS_SUCCESS) {
393 		targ = tm->cm_targ;
394 		targ->handle = 0x0;
395 		targ->encl_handle = 0x0;
396 		targ->encl_slot = 0x0;
397 		targ->exp_dev_handle = 0x0;
398 		targ->phy_num = 0x0;
399 		targ->linkrate = 0x0;
400 		targ->devinfo = 0x0;
401 		targ->flags = 0x0;
402 	}
403 
404 	mpssas_free_tm(sc, tm);
405 }
406 
407 
408 /*
409  * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal.
410  * Otherwise Volume Delete is same as Bare Drive Removal.
411  */
412 void
413 mpssas_prepare_volume_remove(struct mpssas_softc *sassc, uint16_t handle)
414 {
415 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
416 	struct mps_softc *sc;
417 	struct mps_command *cm;
418 	struct mpssas_target *targ = NULL;
419 
420 	MPS_FUNCTRACE(sassc->sc);
421 	sc = sassc->sc;
422 
423 #ifdef WD_SUPPORT
424 	/*
425 	 * If this is a WD controller, determine if the disk should be exposed
426 	 * to the OS or not.  If disk should be exposed, return from this
427 	 * function without doing anything.
428 	 */
429 	if (sc->WD_available && (sc->WD_hide_expose ==
430 	    MPS_WD_EXPOSE_ALWAYS)) {
431 		return;
432 	}
433 #endif //WD_SUPPORT
434 
435 	targ = mpssas_find_target_by_handle(sassc, 0, handle);
436 	if (targ == NULL) {
437 		/* FIXME: what is the action? */
438 		/* We don't know about this device? */
439 		mps_dprint(sc, MPS_ERROR,
440 		   "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
441 		return;
442 	}
443 
444 	targ->flags |= MPSSAS_TARGET_INREMOVAL;
445 
446 	cm = mpssas_alloc_tm(sc);
447 	if (cm == NULL) {
448 		mps_dprint(sc, MPS_ERROR,
449 		    "%s: command alloc failure\n", __func__);
450 		return;
451 	}
452 
453 	mpssas_rescan_target(sc, targ);
454 
455 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
456 	req->DevHandle = targ->handle;
457 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
458 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
459 
460 	/* SAS Hard Link Reset / SATA Link Reset */
461 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
462 
463 	cm->cm_targ = targ;
464 	cm->cm_data = NULL;
465 	cm->cm_desc.HighPriority.RequestFlags =
466 	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
467 	cm->cm_complete = mpssas_remove_volume;
468 	cm->cm_complete_data = (void *)(uintptr_t)handle;
469 	mps_map_command(sc, cm);
470 }
471 
472 /*
473  * The MPT2 firmware performs debounce on the link to avoid transient link
474  * errors and false removals.  When it does decide that link has been lost
475  * and a device need to go away, it expects that the host will perform a
476  * target reset and then an op remove.  The reset has the side-effect of
477  * aborting any outstanding requests for the device, which is required for
478  * the op-remove to succeed.  It's not clear if the host should check for
479  * the device coming back alive after the reset.
480  */
481 void
482 mpssas_prepare_remove(struct mpssas_softc *sassc, uint16_t handle)
483 {
484 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
485 	struct mps_softc *sc;
486 	struct mps_command *cm;
487 	struct mpssas_target *targ = NULL;
488 
489 	MPS_FUNCTRACE(sassc->sc);
490 
491 	sc = sassc->sc;
492 
493 	targ = mpssas_find_target_by_handle(sassc, 0, handle);
494 	if (targ == NULL) {
495 		/* FIXME: what is the action? */
496 		/* We don't know about this device? */
497 		mps_dprint(sc, MPS_ERROR,
498 		    "%s : invalid handle 0x%x \n", __func__, handle);
499 		return;
500 	}
501 
502 	targ->flags |= MPSSAS_TARGET_INREMOVAL;
503 
504 	cm = mpssas_alloc_tm(sc);
505 	if (cm == NULL) {
506 		mps_dprint(sc, MPS_ERROR,
507 		    "%s: command alloc failure\n", __func__);
508 		return;
509 	}
510 
511 	mpssas_rescan_target(sc, targ);
512 
513 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
514 	memset(req, 0, sizeof(*req));
515 	req->DevHandle = htole16(targ->handle);
516 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
517 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
518 
519 	/* SAS Hard Link Reset / SATA Link Reset */
520 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
521 
522 	cm->cm_targ = targ;
523 	cm->cm_data = NULL;
524 	cm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
525 	cm->cm_complete = mpssas_remove_device;
526 	cm->cm_complete_data = (void *)(uintptr_t)handle;
527 	mps_map_command(sc, cm);
528 }
529 
530 static void
531 mpssas_remove_device(struct mps_softc *sc, struct mps_command *tm)
532 {
533 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
534 	MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
535 	struct mpssas_target *targ;
536 	struct mps_command *next_cm;
537 	uint16_t handle;
538 
539 	MPS_FUNCTRACE(sc);
540 
541 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
542 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
543 	targ = tm->cm_targ;
544 
545 	/*
546 	 * Currently there should be no way we can hit this case.  It only
547 	 * happens when we have a failure to allocate chain frames, and
548 	 * task management commands don't have S/G lists.
549 	 */
550 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
551 		mps_dprint(sc, MPS_ERROR,
552 		    "%s: cm_flags = %#x for remove of handle %#04x! "
553 		    "This should not happen!\n", __func__, tm->cm_flags,
554 		    handle);
555 		mpssas_free_tm(sc, tm);
556 		return;
557 	}
558 
559 	if (reply == NULL) {
560 		/* XXX retry the remove after the diag reset completes? */
561 		mps_dprint(sc, MPS_FAULT,
562 		    "%s NULL reply reseting device 0x%04x\n", __func__, handle);
563 		mpssas_free_tm(sc, tm);
564 		return;
565 	}
566 
567 	if (le16toh(reply->IOCStatus) != MPI2_IOCSTATUS_SUCCESS) {
568 		mps_dprint(sc, MPS_FAULT,
569 		   "IOCStatus = 0x%x while resetting device 0x%x\n",
570 		   le16toh(reply->IOCStatus), handle);
571 		mpssas_free_tm(sc, tm);
572 		return;
573 	}
574 
575 	mps_dprint(sc, MPS_XINFO, "Reset aborted %u commands\n",
576 	    le32toh(reply->TerminationCount));
577 	mps_free_reply(sc, tm->cm_reply_data);
578 	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
579 
580 	/* Reuse the existing command */
581 	req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
582 	memset(req, 0, sizeof(*req));
583 	req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
584 	req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
585 	req->DevHandle = htole16(handle);
586 	tm->cm_data = NULL;
587 	tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
588 	tm->cm_complete = mpssas_remove_complete;
589 	tm->cm_complete_data = (void *)(uintptr_t)handle;
590 
591 	mps_map_command(sc, tm);
592 
593 	mps_dprint(sc, MPS_XINFO, "clearing target %u handle 0x%04x\n",
594 		   targ->tid, handle);
595 	TAILQ_FOREACH_SAFE(tm, &targ->commands, cm_link, next_cm) {
596 		union ccb *ccb;
597 
598 		mps_dprint(sc, MPS_XINFO, "Completing missed command %p\n", tm);
599 		ccb = tm->cm_complete_data;
600 		ccb->ccb_h.status = CAM_DEV_NOT_THERE;
601 		mpssas_scsiio_complete(sc, tm);
602 	}
603 }
604 
605 static void
606 mpssas_remove_complete(struct mps_softc *sc, struct mps_command *tm)
607 {
608 	MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
609 	uint16_t handle;
610 	struct mpssas_target *targ;
611 	struct mpssas_lun *lun;
612 
613 	MPS_FUNCTRACE(sc);
614 
615 	reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
616 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
617 
618 	/*
619 	 * Currently there should be no way we can hit this case.  It only
620 	 * happens when we have a failure to allocate chain frames, and
621 	 * task management commands don't have S/G lists.
622 	 */
623 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
624 		mps_dprint(sc, MPS_XINFO,
625 			   "%s: cm_flags = %#x for remove of handle %#04x! "
626 			   "This should not happen!\n", __func__, tm->cm_flags,
627 			   handle);
628 		mpssas_free_tm(sc, tm);
629 		return;
630 	}
631 
632 	if (reply == NULL) {
633 		/* most likely a chip reset */
634 		mps_dprint(sc, MPS_FAULT,
635 		    "%s NULL reply removing device 0x%04x\n", __func__, handle);
636 		mpssas_free_tm(sc, tm);
637 		return;
638 	}
639 
640 	mps_dprint(sc, MPS_XINFO,
641 	    "%s on handle 0x%04x, IOCStatus= 0x%x\n", __func__,
642 	    handle, le16toh(reply->IOCStatus));
643 
644 	/*
645 	 * Don't clear target if remove fails because things will get confusing.
646 	 * Leave the devname and sasaddr intact so that we know to avoid reusing
647 	 * this target id if possible, and so we can assign the same target id
648 	 * to this device if it comes back in the future.
649 	 */
650 	if (le16toh(reply->IOCStatus) == MPI2_IOCSTATUS_SUCCESS) {
651 		targ = tm->cm_targ;
652 		targ->handle = 0x0;
653 		targ->encl_handle = 0x0;
654 		targ->encl_slot = 0x0;
655 		targ->exp_dev_handle = 0x0;
656 		targ->phy_num = 0x0;
657 		targ->linkrate = 0x0;
658 		targ->devinfo = 0x0;
659 		targ->flags = 0x0;
660 
661 		while(!SLIST_EMPTY(&targ->luns)) {
662 			lun = SLIST_FIRST(&targ->luns);
663 			SLIST_REMOVE_HEAD(&targ->luns, lun_link);
664 			free(lun, M_MPT2);
665 		}
666 	}
667 
668 
669 	mpssas_free_tm(sc, tm);
670 }
671 
672 static int
673 mpssas_register_events(struct mps_softc *sc)
674 {
675 	u32 events[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS];
676 
677 	bzero(events, 16);
678 	setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
679 	setbit(events, MPI2_EVENT_SAS_DISCOVERY);
680 	setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
681 	setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
682 	setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
683 	setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
684 	setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
685 	setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
686 	setbit(events, MPI2_EVENT_IR_VOLUME);
687 	setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
688 	setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
689 	setbit(events, MPI2_EVENT_LOG_ENTRY_ADDED);
690 
691 	mps_register_events(sc, events, mpssas_evt_handler, NULL,
692 	    &sc->sassc->mpssas_eh);
693 
694 	return (0);
695 }
696 
697 int
698 mps_attach_sas(struct mps_softc *sc)
699 {
700 	struct mpssas_softc *sassc;
701 	cam_status status;
702 	int unit, error = 0;
703 
704 	MPS_FUNCTRACE(sc);
705 
706 	sassc = malloc(sizeof(struct mpssas_softc), M_MPT2, M_WAITOK|M_ZERO);
707 	if(!sassc) {
708 		device_printf(sc->mps_dev, "Cannot allocate memory %s %d\n",
709 		__func__, __LINE__);
710 		return (ENOMEM);
711 	}
712 
713 	/*
714 	 * XXX MaxTargets could change during a reinit.  Since we don't
715 	 * resize the targets[] array during such an event, cache the value
716 	 * of MaxTargets here so that we don't get into trouble later.  This
717 	 * should move into the reinit logic.
718 	 */
719 	sassc->maxtargets = sc->facts->MaxTargets;
720 	sassc->targets = malloc(sizeof(struct mpssas_target) *
721 	    sassc->maxtargets, M_MPT2, M_WAITOK|M_ZERO);
722 	if(!sassc->targets) {
723 		device_printf(sc->mps_dev, "Cannot allocate memory %s %d\n",
724 		__func__, __LINE__);
725 		free(sassc, M_MPT2);
726 		return (ENOMEM);
727 	}
728 	sc->sassc = sassc;
729 	sassc->sc = sc;
730 
731 	if ((sassc->devq = cam_simq_alloc(sc->num_reqs)) == NULL) {
732 		mps_dprint(sc, MPS_ERROR, "Cannot allocate SIMQ\n");
733 		error = ENOMEM;
734 		goto out;
735 	}
736 
737 	unit = device_get_unit(sc->mps_dev);
738 	sassc->sim = cam_sim_alloc(mpssas_action, mpssas_poll, "mps", sassc,
739 	    unit, &sc->mps_mtx, sc->num_reqs, sc->num_reqs, sassc->devq);
740 	if (sassc->sim == NULL) {
741 		mps_dprint(sc, MPS_ERROR, "Cannot allocate SIM\n");
742 		error = EINVAL;
743 		goto out;
744 	}
745 
746 	TAILQ_INIT(&sassc->ev_queue);
747 
748 	/* Initialize taskqueue for Event Handling */
749 	TASK_INIT(&sassc->ev_task, 0, mpssas_firmware_event_work, sc);
750 	sassc->ev_tq = taskqueue_create("mps_taskq", M_NOWAIT | M_ZERO,
751 	    taskqueue_thread_enqueue, &sassc->ev_tq);
752 
753 	/* Run the task queue with lowest priority */
754 	taskqueue_start_threads(&sassc->ev_tq, 1, 255, "%s taskq",
755 	    device_get_nameunit(sc->mps_dev));
756 
757 	mps_lock(sc);
758 
759 	/*
760 	 * XXX There should be a bus for every port on the adapter, but since
761 	 * we're just going to fake the topology for now, we'll pretend that
762 	 * everything is just a target on a single bus.
763 	 */
764 	if ((error = xpt_bus_register(sassc->sim, sc->mps_dev, 0)) != 0) {
765 		mps_dprint(sc, MPS_ERROR, "Error %d registering SCSI bus\n",
766 		    error);
767 		mps_unlock(sc);
768 		goto out;
769 	}
770 
771 	/*
772 	 * Assume that discovery events will start right away.
773 	 *
774 	 * Hold off boot until discovery is complete.
775 	 */
776 	sassc->flags |= MPSSAS_IN_STARTUP | MPSSAS_IN_DISCOVERY;
777 	sc->sassc->startup_refcount = 0;
778 	mpssas_startup_increment(sassc);
779 
780 	callout_init(&sassc->discovery_callout, 1 /*mpsafe*/);
781 	sassc->discovery_timeouts = 0;
782 
783 	sassc->tm_count = 0;
784 
785 	/*
786 	 * Register for async events so we can determine the EEDP
787 	 * capabilities of devices.
788 	 */
789 	status = xpt_create_path(&sassc->path, /*periph*/NULL,
790 	    cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD,
791 	    CAM_LUN_WILDCARD);
792 	if (status != CAM_REQ_CMP) {
793 		mps_printf(sc, "Error %#x creating sim path\n", status);
794 		sassc->path = NULL;
795 	} else {
796 		int event;
797 
798 #if (__FreeBSD_version >= 1000006) || \
799     ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
800 		event = AC_ADVINFO_CHANGED;
801 #else
802 		event = AC_FOUND_DEVICE;
803 #endif
804 		status = xpt_register_async(event, mpssas_async, sc,
805 					    sassc->path);
806 		if (status != CAM_REQ_CMP) {
807 			mps_dprint(sc, MPS_ERROR,
808 			    "Error %#x registering async handler for "
809 			    "AC_ADVINFO_CHANGED events\n", status);
810 			xpt_free_path(sassc->path);
811 			sassc->path = NULL;
812 		}
813 	}
814 	if (status != CAM_REQ_CMP) {
815 		/*
816 		 * EEDP use is the exception, not the rule.
817 		 * Warn the user, but do not fail to attach.
818 		 */
819 		mps_printf(sc, "EEDP capabilities disabled.\n");
820 	}
821 
822 	mps_unlock(sc);
823 
824 	mpssas_register_events(sc);
825 out:
826 	if (error)
827 		mps_detach_sas(sc);
828 	return (error);
829 }
830 
831 int
832 mps_detach_sas(struct mps_softc *sc)
833 {
834 	struct mpssas_softc *sassc;
835 	struct mpssas_lun *lun, *lun_tmp;
836 	struct mpssas_target *targ;
837 	int i;
838 
839 	MPS_FUNCTRACE(sc);
840 
841 	if (sc->sassc == NULL)
842 		return (0);
843 
844 	sassc = sc->sassc;
845 	mps_deregister_events(sc, sassc->mpssas_eh);
846 
847 	/*
848 	 * Drain and free the event handling taskqueue with the lock
849 	 * unheld so that any parallel processing tasks drain properly
850 	 * without deadlocking.
851 	 */
852 	if (sassc->ev_tq != NULL)
853 		taskqueue_free(sassc->ev_tq);
854 
855 	/* Make sure CAM doesn't wedge if we had to bail out early. */
856 	mps_lock(sc);
857 
858 	/* Deregister our async handler */
859 	if (sassc->path != NULL) {
860 		xpt_register_async(0, mpssas_async, sc, sassc->path);
861 		xpt_free_path(sassc->path);
862 		sassc->path = NULL;
863 	}
864 
865 	if (sassc->flags & MPSSAS_IN_STARTUP)
866 		xpt_release_simq(sassc->sim, 1);
867 
868 	if (sassc->sim != NULL) {
869 		xpt_bus_deregister(cam_sim_path(sassc->sim));
870 		cam_sim_free(sassc->sim, FALSE);
871 	}
872 
873 	sassc->flags |= MPSSAS_SHUTDOWN;
874 	mps_unlock(sc);
875 
876 	if (sassc->devq != NULL)
877 		cam_simq_free(sassc->devq);
878 
879 	for(i=0; i< sassc->maxtargets ;i++) {
880 		targ = &sassc->targets[i];
881 		SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
882 			free(lun, M_MPT2);
883 		}
884 	}
885 	free(sassc->targets, M_MPT2);
886 	free(sassc, M_MPT2);
887 	sc->sassc = NULL;
888 
889 	return (0);
890 }
891 
892 void
893 mpssas_discovery_end(struct mpssas_softc *sassc)
894 {
895 	struct mps_softc *sc = sassc->sc;
896 
897 	MPS_FUNCTRACE(sc);
898 
899 	if (sassc->flags & MPSSAS_DISCOVERY_TIMEOUT_PENDING)
900 		callout_stop(&sassc->discovery_callout);
901 
902 }
903 
904 static void
905 mpssas_discovery_timeout(void *data)
906 {
907 	struct mpssas_softc *sassc = data;
908 	struct mps_softc *sc;
909 
910 	sc = sassc->sc;
911 	MPS_FUNCTRACE(sc);
912 
913 	mps_lock(sc);
914 	mps_dprint(sc, MPS_INFO,
915 	    "Timeout waiting for discovery, interrupts may not be working!\n");
916 	sassc->flags &= ~MPSSAS_DISCOVERY_TIMEOUT_PENDING;
917 
918 	/* Poll the hardware for events in case interrupts aren't working */
919 	mps_intr_locked(sc);
920 
921 	mps_dprint(sassc->sc, MPS_INFO,
922 	    "Finished polling after discovery timeout at %d\n", ticks);
923 
924 	if ((sassc->flags & MPSSAS_IN_DISCOVERY) == 0) {
925 		mpssas_discovery_end(sassc);
926 	} else {
927 		if (sassc->discovery_timeouts < MPSSAS_MAX_DISCOVERY_TIMEOUTS) {
928 			sassc->flags |= MPSSAS_DISCOVERY_TIMEOUT_PENDING;
929 			callout_reset(&sassc->discovery_callout,
930 			    MPSSAS_DISCOVERY_TIMEOUT * hz,
931 			    mpssas_discovery_timeout, sassc);
932 			sassc->discovery_timeouts++;
933 		} else {
934 			mps_dprint(sassc->sc, MPS_FAULT,
935 			    "Discovery timed out, continuing.\n");
936 			sassc->flags &= ~MPSSAS_IN_DISCOVERY;
937 			mpssas_discovery_end(sassc);
938 		}
939 	}
940 
941 	mps_unlock(sc);
942 }
943 
944 static void
945 mpssas_action(struct cam_sim *sim, union ccb *ccb)
946 {
947 	struct mpssas_softc *sassc;
948 
949 	sassc = cam_sim_softc(sim);
950 
951 	MPS_FUNCTRACE(sassc->sc);
952 	mps_dprint(sassc->sc, MPS_TRACE, "ccb func_code 0x%x\n",
953 	    ccb->ccb_h.func_code);
954 	mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
955 
956 	switch (ccb->ccb_h.func_code) {
957 	case XPT_PATH_INQ:
958 	{
959 		struct ccb_pathinq *cpi = &ccb->cpi;
960 
961 		cpi->version_num = 1;
962 		cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
963 		cpi->target_sprt = 0;
964 #if __FreeBSD_version >= 1000039
965 		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN;
966 #else
967 		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
968 #endif
969 		cpi->hba_eng_cnt = 0;
970 		cpi->max_target = sassc->maxtargets - 1;
971 		cpi->max_lun = 255;
972 		cpi->initiator_id = sassc->maxtargets - 1;
973 		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
974 		strncpy(cpi->hba_vid, "LSILogic", HBA_IDLEN);
975 		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
976 		cpi->unit_number = cam_sim_unit(sim);
977 		cpi->bus_id = cam_sim_bus(sim);
978 		cpi->base_transfer_speed = 150000;
979 		cpi->transport = XPORT_SAS;
980 		cpi->transport_version = 0;
981 		cpi->protocol = PROTO_SCSI;
982 		cpi->protocol_version = SCSI_REV_SPC;
983 #if __FreeBSD_version >= 800001
984 		/*
985 		 * XXX KDM where does this number come from?
986 		 */
987 		cpi->maxio = 256 * 1024;
988 #endif
989 		cpi->ccb_h.status = CAM_REQ_CMP;
990 		break;
991 	}
992 	case XPT_GET_TRAN_SETTINGS:
993 	{
994 		struct ccb_trans_settings	*cts;
995 		struct ccb_trans_settings_sas	*sas;
996 		struct ccb_trans_settings_scsi	*scsi;
997 		struct mpssas_target *targ;
998 
999 		cts = &ccb->cts;
1000 		sas = &cts->xport_specific.sas;
1001 		scsi = &cts->proto_specific.scsi;
1002 
1003 		KASSERT(cts->ccb_h.target_id < sassc->maxtargets,
1004 		    ("Target %d out of bounds in XPT_GET_TRANS_SETTINGS\n",
1005 		    cts->ccb_h.target_id));
1006 		targ = &sassc->targets[cts->ccb_h.target_id];
1007 		if (targ->handle == 0x0) {
1008 			cts->ccb_h.status = CAM_SEL_TIMEOUT;
1009 			break;
1010 		}
1011 
1012 		cts->protocol_version = SCSI_REV_SPC2;
1013 		cts->transport = XPORT_SAS;
1014 		cts->transport_version = 0;
1015 
1016 		sas->valid = CTS_SAS_VALID_SPEED;
1017 		switch (targ->linkrate) {
1018 		case 0x08:
1019 			sas->bitrate = 150000;
1020 			break;
1021 		case 0x09:
1022 			sas->bitrate = 300000;
1023 			break;
1024 		case 0x0a:
1025 			sas->bitrate = 600000;
1026 			break;
1027 		default:
1028 			sas->valid = 0;
1029 		}
1030 
1031 		cts->protocol = PROTO_SCSI;
1032 		scsi->valid = CTS_SCSI_VALID_TQ;
1033 		scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
1034 
1035 		cts->ccb_h.status = CAM_REQ_CMP;
1036 		break;
1037 	}
1038 	case XPT_CALC_GEOMETRY:
1039 		cam_calc_geometry(&ccb->ccg, /*extended*/1);
1040 		ccb->ccb_h.status = CAM_REQ_CMP;
1041 		break;
1042 	case XPT_RESET_DEV:
1043 		mps_dprint(sassc->sc, MPS_XINFO, "mpssas_action XPT_RESET_DEV\n");
1044 		mpssas_action_resetdev(sassc, ccb);
1045 		return;
1046 	case XPT_RESET_BUS:
1047 	case XPT_ABORT:
1048 	case XPT_TERM_IO:
1049 		mps_dprint(sassc->sc, MPS_XINFO,
1050 		    "mpssas_action faking success for abort or reset\n");
1051 		ccb->ccb_h.status = CAM_REQ_CMP;
1052 		break;
1053 	case XPT_SCSI_IO:
1054 		mpssas_action_scsiio(sassc, ccb);
1055 		return;
1056 #if __FreeBSD_version >= 900026
1057 	case XPT_SMP_IO:
1058 		mpssas_action_smpio(sassc, ccb);
1059 		return;
1060 #endif
1061 	default:
1062 		ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
1063 		break;
1064 	}
1065 	xpt_done(ccb);
1066 
1067 }
1068 
1069 static void
1070 mpssas_announce_reset(struct mps_softc *sc, uint32_t ac_code,
1071     target_id_t target_id, lun_id_t lun_id)
1072 {
1073 	path_id_t path_id = cam_sim_path(sc->sassc->sim);
1074 	struct cam_path *path;
1075 
1076 	mps_dprint(sc, MPS_XINFO, "%s code %x target %d lun %jx\n", __func__,
1077 	    ac_code, target_id, (uintmax_t)lun_id);
1078 
1079 	if (xpt_create_path(&path, NULL,
1080 		path_id, target_id, lun_id) != CAM_REQ_CMP) {
1081 		mps_dprint(sc, MPS_ERROR, "unable to create path for reset "
1082 			   "notification\n");
1083 		return;
1084 	}
1085 
1086 	xpt_async(ac_code, path, NULL);
1087 	xpt_free_path(path);
1088 }
1089 
1090 static void
1091 mpssas_complete_all_commands(struct mps_softc *sc)
1092 {
1093 	struct mps_command *cm;
1094 	int i;
1095 	int completed;
1096 
1097 	MPS_FUNCTRACE(sc);
1098 	mtx_assert(&sc->mps_mtx, MA_OWNED);
1099 
1100 	/* complete all commands with a NULL reply */
1101 	for (i = 1; i < sc->num_reqs; i++) {
1102 		cm = &sc->commands[i];
1103 		cm->cm_reply = NULL;
1104 		completed = 0;
1105 
1106 		if (cm->cm_flags & MPS_CM_FLAGS_POLLED)
1107 			cm->cm_flags |= MPS_CM_FLAGS_COMPLETE;
1108 
1109 		if (cm->cm_complete != NULL) {
1110 			mpssas_log_command(cm, MPS_RECOVERY,
1111 			    "completing cm %p state %x ccb %p for diag reset\n",
1112 			    cm, cm->cm_state, cm->cm_ccb);
1113 
1114 			cm->cm_complete(sc, cm);
1115 			completed = 1;
1116 		}
1117 
1118 		if (cm->cm_flags & MPS_CM_FLAGS_WAKEUP) {
1119 			mpssas_log_command(cm, MPS_RECOVERY,
1120 			    "waking up cm %p state %x ccb %p for diag reset\n",
1121 			    cm, cm->cm_state, cm->cm_ccb);
1122 			wakeup(cm);
1123 			completed = 1;
1124 		}
1125 
1126 		if ((completed == 0) && (cm->cm_state != MPS_CM_STATE_FREE)) {
1127 			/* this should never happen, but if it does, log */
1128 			mpssas_log_command(cm, MPS_RECOVERY,
1129 			    "cm %p state %x flags 0x%x ccb %p during diag "
1130 			    "reset\n", cm, cm->cm_state, cm->cm_flags,
1131 			    cm->cm_ccb);
1132 		}
1133 	}
1134 }
1135 
1136 void
1137 mpssas_handle_reinit(struct mps_softc *sc)
1138 {
1139 	int i;
1140 
1141 	/* Go back into startup mode and freeze the simq, so that CAM
1142 	 * doesn't send any commands until after we've rediscovered all
1143 	 * targets and found the proper device handles for them.
1144 	 *
1145 	 * After the reset, portenable will trigger discovery, and after all
1146 	 * discovery-related activities have finished, the simq will be
1147 	 * released.
1148 	 */
1149 	mps_dprint(sc, MPS_INIT, "%s startup\n", __func__);
1150 	sc->sassc->flags |= MPSSAS_IN_STARTUP;
1151 	sc->sassc->flags |= MPSSAS_IN_DISCOVERY;
1152 	mpssas_startup_increment(sc->sassc);
1153 
1154 	/* notify CAM of a bus reset */
1155 	mpssas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1156 	    CAM_LUN_WILDCARD);
1157 
1158 	/* complete and cleanup after all outstanding commands */
1159 	mpssas_complete_all_commands(sc);
1160 
1161 	mps_dprint(sc, MPS_INIT,
1162 	    "%s startup %u tm %u after command completion\n",
1163 	    __func__, sc->sassc->startup_refcount, sc->sassc->tm_count);
1164 
1165 	/* zero all the target handles, since they may change after the
1166 	 * reset, and we have to rediscover all the targets and use the new
1167 	 * handles.
1168 	 */
1169 	for (i = 0; i < sc->sassc->maxtargets; i++) {
1170 		if (sc->sassc->targets[i].outstanding != 0)
1171 			mps_dprint(sc, MPS_INIT, "target %u outstanding %u\n",
1172 			    i, sc->sassc->targets[i].outstanding);
1173 		sc->sassc->targets[i].handle = 0x0;
1174 		sc->sassc->targets[i].exp_dev_handle = 0x0;
1175 		sc->sassc->targets[i].outstanding = 0;
1176 		sc->sassc->targets[i].flags = MPSSAS_TARGET_INDIAGRESET;
1177 	}
1178 }
1179 
1180 static void
1181 mpssas_tm_timeout(void *data)
1182 {
1183 	struct mps_command *tm = data;
1184 	struct mps_softc *sc = tm->cm_sc;
1185 
1186 	mtx_assert(&sc->mps_mtx, MA_OWNED);
1187 
1188 	mpssas_log_command(tm, MPS_INFO|MPS_RECOVERY,
1189 	    "task mgmt %p timed out\n", tm);
1190 	mps_reinit(sc);
1191 }
1192 
1193 static void
1194 mpssas_logical_unit_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1195 {
1196 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1197 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1198 	unsigned int cm_count = 0;
1199 	struct mps_command *cm;
1200 	struct mpssas_target *targ;
1201 
1202 	callout_stop(&tm->cm_callout);
1203 
1204 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1205 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1206 	targ = tm->cm_targ;
1207 
1208 	/*
1209 	 * Currently there should be no way we can hit this case.  It only
1210 	 * happens when we have a failure to allocate chain frames, and
1211 	 * task management commands don't have S/G lists.
1212 	 * XXXSL So should it be an assertion?
1213 	 */
1214 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1215 		mps_dprint(sc, MPS_ERROR, "%s: cm_flags = %#x for LUN reset! "
1216 			   "This should not happen!\n", __func__, tm->cm_flags);
1217 		mpssas_free_tm(sc, tm);
1218 		return;
1219 	}
1220 
1221 	if (reply == NULL) {
1222 		mpssas_log_command(tm, MPS_RECOVERY,
1223 		    "NULL reset reply for tm %p\n", tm);
1224 		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1225 			/* this completion was due to a reset, just cleanup */
1226 			targ->flags &= ~MPSSAS_TARGET_INRESET;
1227 			targ->tm = NULL;
1228 			mpssas_free_tm(sc, tm);
1229 		}
1230 		else {
1231 			/* we should have gotten a reply. */
1232 			mps_reinit(sc);
1233 		}
1234 		return;
1235 	}
1236 
1237 	mpssas_log_command(tm, MPS_RECOVERY,
1238 	    "logical unit reset status 0x%x code 0x%x count %u\n",
1239 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1240 	    le32toh(reply->TerminationCount));
1241 
1242 	/* See if there are any outstanding commands for this LUN.
1243 	 * This could be made more efficient by using a per-LU data
1244 	 * structure of some sort.
1245 	 */
1246 	TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1247 		if (cm->cm_lun == tm->cm_lun)
1248 			cm_count++;
1249 	}
1250 
1251 	if (cm_count == 0) {
1252 		mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1253 		    "logical unit %u finished recovery after reset\n",
1254 		    tm->cm_lun, tm);
1255 
1256 		mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1257 		    tm->cm_lun);
1258 
1259 		/* we've finished recovery for this logical unit.  check and
1260 		 * see if some other logical unit has a timedout command
1261 		 * that needs to be processed.
1262 		 */
1263 		cm = TAILQ_FIRST(&targ->timedout_commands);
1264 		if (cm) {
1265 			mpssas_send_abort(sc, tm, cm);
1266 		}
1267 		else {
1268 			targ->tm = NULL;
1269 			mpssas_free_tm(sc, tm);
1270 		}
1271 	}
1272 	else {
1273 		/* if we still have commands for this LUN, the reset
1274 		 * effectively failed, regardless of the status reported.
1275 		 * Escalate to a target reset.
1276 		 */
1277 		mpssas_log_command(tm, MPS_RECOVERY,
1278 		    "logical unit reset complete for tm %p, but still have %u command(s)\n",
1279 		    tm, cm_count);
1280 		mpssas_send_reset(sc, tm,
1281 		    MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1282 	}
1283 }
1284 
1285 static void
1286 mpssas_target_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1287 {
1288 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1289 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1290 	struct mpssas_target *targ;
1291 
1292 	callout_stop(&tm->cm_callout);
1293 
1294 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1295 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1296 	targ = tm->cm_targ;
1297 
1298 	/*
1299 	 * Currently there should be no way we can hit this case.  It only
1300 	 * happens when we have a failure to allocate chain frames, and
1301 	 * task management commands don't have S/G lists.
1302 	 */
1303 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1304 		mps_dprint(sc, MPS_ERROR,"%s: cm_flags = %#x for target reset! "
1305 			   "This should not happen!\n", __func__, tm->cm_flags);
1306 		mpssas_free_tm(sc, tm);
1307 		return;
1308 	}
1309 
1310 	if (reply == NULL) {
1311 		mpssas_log_command(tm, MPS_RECOVERY,
1312 		    "NULL reset reply for tm %p\n", tm);
1313 		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1314 			/* this completion was due to a reset, just cleanup */
1315 			targ->flags &= ~MPSSAS_TARGET_INRESET;
1316 			targ->tm = NULL;
1317 			mpssas_free_tm(sc, tm);
1318 		}
1319 		else {
1320 			/* we should have gotten a reply. */
1321 			mps_reinit(sc);
1322 		}
1323 		return;
1324 	}
1325 
1326 	mpssas_log_command(tm, MPS_RECOVERY,
1327 	    "target reset status 0x%x code 0x%x count %u\n",
1328 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1329 	    le32toh(reply->TerminationCount));
1330 
1331 	targ->flags &= ~MPSSAS_TARGET_INRESET;
1332 
1333 	if (targ->outstanding == 0) {
1334 		/* we've finished recovery for this target and all
1335 		 * of its logical units.
1336 		 */
1337 		mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1338 		    "recovery finished after target reset\n");
1339 
1340 		mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1341 		    CAM_LUN_WILDCARD);
1342 
1343 		targ->tm = NULL;
1344 		mpssas_free_tm(sc, tm);
1345 	}
1346 	else {
1347 		/* after a target reset, if this target still has
1348 		 * outstanding commands, the reset effectively failed,
1349 		 * regardless of the status reported.  escalate.
1350 		 */
1351 		mpssas_log_command(tm, MPS_RECOVERY,
1352 		    "target reset complete for tm %p, but still have %u command(s)\n",
1353 		    tm, targ->outstanding);
1354 		mps_reinit(sc);
1355 	}
1356 }
1357 
1358 #define MPS_RESET_TIMEOUT 30
1359 
1360 static int
1361 mpssas_send_reset(struct mps_softc *sc, struct mps_command *tm, uint8_t type)
1362 {
1363 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1364 	struct mpssas_target *target;
1365 	int err;
1366 
1367 	target = tm->cm_targ;
1368 	if (target->handle == 0) {
1369 		mps_dprint(sc, MPS_ERROR,"%s null devhandle for target_id %d\n",
1370 		    __func__, target->tid);
1371 		return -1;
1372 	}
1373 
1374 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1375 	req->DevHandle = htole16(target->handle);
1376 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1377 	req->TaskType = type;
1378 
1379 	if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1380 		/* XXX Need to handle invalid LUNs */
1381 		MPS_SET_LUN(req->LUN, tm->cm_lun);
1382 		tm->cm_targ->logical_unit_resets++;
1383 		mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1384 		    "sending logical unit reset\n");
1385 		tm->cm_complete = mpssas_logical_unit_reset_complete;
1386 	}
1387 	else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1388 		/* Target reset method =  SAS Hard Link Reset / SATA Link Reset */
1389 		req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1390 		tm->cm_targ->target_resets++;
1391 		tm->cm_targ->flags |= MPSSAS_TARGET_INRESET;
1392 		mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1393 		    "sending target reset\n");
1394 		tm->cm_complete = mpssas_target_reset_complete;
1395 	}
1396 	else {
1397 		mps_dprint(sc, MPS_ERROR, "unexpected reset type 0x%x\n", type);
1398 		return -1;
1399 	}
1400 
1401 	tm->cm_data = NULL;
1402 	tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1403 	tm->cm_complete_data = (void *)tm;
1404 
1405 	callout_reset(&tm->cm_callout, MPS_RESET_TIMEOUT * hz,
1406 	    mpssas_tm_timeout, tm);
1407 
1408 	err = mps_map_command(sc, tm);
1409 	if (err)
1410 		mpssas_log_command(tm, MPS_RECOVERY,
1411 		    "error %d sending reset type %u\n",
1412 		    err, type);
1413 
1414 	return err;
1415 }
1416 
1417 
1418 static void
1419 mpssas_abort_complete(struct mps_softc *sc, struct mps_command *tm)
1420 {
1421 	struct mps_command *cm;
1422 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1423 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1424 	struct mpssas_target *targ;
1425 
1426 	callout_stop(&tm->cm_callout);
1427 
1428 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1429 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1430 	targ = tm->cm_targ;
1431 
1432 	/*
1433 	 * Currently there should be no way we can hit this case.  It only
1434 	 * happens when we have a failure to allocate chain frames, and
1435 	 * task management commands don't have S/G lists.
1436 	 */
1437 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1438 		mpssas_log_command(tm, MPS_RECOVERY,
1439 		    "cm_flags = %#x for abort %p TaskMID %u!\n",
1440 		    tm->cm_flags, tm, le16toh(req->TaskMID));
1441 		mpssas_free_tm(sc, tm);
1442 		return;
1443 	}
1444 
1445 	if (reply == NULL) {
1446 		mpssas_log_command(tm, MPS_RECOVERY,
1447 		    "NULL abort reply for tm %p TaskMID %u\n",
1448 		    tm, le16toh(req->TaskMID));
1449 		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1450 			/* this completion was due to a reset, just cleanup */
1451 			targ->tm = NULL;
1452 			mpssas_free_tm(sc, tm);
1453 		}
1454 		else {
1455 			/* we should have gotten a reply. */
1456 			mps_reinit(sc);
1457 		}
1458 		return;
1459 	}
1460 
1461 	mpssas_log_command(tm, MPS_RECOVERY,
1462 	    "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1463 	    le16toh(req->TaskMID),
1464 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1465 	    le32toh(reply->TerminationCount));
1466 
1467 	cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1468 	if (cm == NULL) {
1469 		/* if there are no more timedout commands, we're done with
1470 		 * error recovery for this target.
1471 		 */
1472 		mpssas_log_command(tm, MPS_RECOVERY,
1473 		    "finished recovery after aborting TaskMID %u\n",
1474 		    le16toh(req->TaskMID));
1475 
1476 		targ->tm = NULL;
1477 		mpssas_free_tm(sc, tm);
1478 	}
1479 	else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) {
1480 		/* abort success, but we have more timedout commands to abort */
1481 		mpssas_log_command(tm, MPS_RECOVERY,
1482 		    "continuing recovery after aborting TaskMID %u\n",
1483 		    le16toh(req->TaskMID));
1484 
1485 		mpssas_send_abort(sc, tm, cm);
1486 	}
1487 	else {
1488 		/* we didn't get a command completion, so the abort
1489 		 * failed as far as we're concerned.  escalate.
1490 		 */
1491 		mpssas_log_command(tm, MPS_RECOVERY,
1492 		    "abort failed for TaskMID %u tm %p\n",
1493 		    le16toh(req->TaskMID), tm);
1494 
1495 		mpssas_send_reset(sc, tm,
1496 		    MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1497 	}
1498 }
1499 
1500 #define MPS_ABORT_TIMEOUT 5
1501 
1502 static int
1503 mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm)
1504 {
1505 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1506 	struct mpssas_target *targ;
1507 	int err;
1508 
1509 	targ = cm->cm_targ;
1510 	if (targ->handle == 0) {
1511 		mps_dprint(sc, MPS_ERROR,"%s null devhandle for target_id %d\n",
1512 		    __func__, cm->cm_ccb->ccb_h.target_id);
1513 		return -1;
1514 	}
1515 
1516 	mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1517 	    "Aborting command %p\n", cm);
1518 
1519 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1520 	req->DevHandle = htole16(targ->handle);
1521 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1522 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1523 
1524 	/* XXX Need to handle invalid LUNs */
1525 	MPS_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1526 
1527 	req->TaskMID = htole16(cm->cm_desc.Default.SMID);
1528 
1529 	tm->cm_data = NULL;
1530 	tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1531 	tm->cm_complete = mpssas_abort_complete;
1532 	tm->cm_complete_data = (void *)tm;
1533 	tm->cm_targ = cm->cm_targ;
1534 	tm->cm_lun = cm->cm_lun;
1535 
1536 	callout_reset(&tm->cm_callout, MPS_ABORT_TIMEOUT * hz,
1537 	    mpssas_tm_timeout, tm);
1538 
1539 	targ->aborts++;
1540 
1541 	err = mps_map_command(sc, tm);
1542 	if (err)
1543 		mpssas_log_command(tm, MPS_RECOVERY,
1544 		    "error %d sending abort for cm %p SMID %u\n",
1545 		    err, cm, req->TaskMID);
1546 	return err;
1547 }
1548 
1549 
1550 static void
1551 mpssas_scsiio_timeout(void *data)
1552 {
1553 	struct mps_softc *sc;
1554 	struct mps_command *cm;
1555 	struct mpssas_target *targ;
1556 
1557 	cm = (struct mps_command *)data;
1558 	sc = cm->cm_sc;
1559 
1560 	MPS_FUNCTRACE(sc);
1561 	mtx_assert(&sc->mps_mtx, MA_OWNED);
1562 
1563 	mps_dprint(sc, MPS_XINFO, "Timeout checking cm %p\n", sc);
1564 
1565 	/*
1566 	 * Run the interrupt handler to make sure it's not pending.  This
1567 	 * isn't perfect because the command could have already completed
1568 	 * and been re-used, though this is unlikely.
1569 	 */
1570 	mps_intr_locked(sc);
1571 	if (cm->cm_state == MPS_CM_STATE_FREE) {
1572 		mpssas_log_command(cm, MPS_XINFO,
1573 		    "SCSI command %p almost timed out\n", cm);
1574 		return;
1575 	}
1576 
1577 	if (cm->cm_ccb == NULL) {
1578 		mps_dprint(sc, MPS_ERROR, "command timeout with NULL ccb\n");
1579 		return;
1580 	}
1581 
1582 	mpssas_log_command(cm, MPS_INFO, "command timeout cm %p ccb %p\n",
1583 	    cm, cm->cm_ccb);
1584 
1585 	targ = cm->cm_targ;
1586 	targ->timeouts++;
1587 
1588 	/* XXX first, check the firmware state, to see if it's still
1589 	 * operational.  if not, do a diag reset.
1590 	 */
1591 
1592 	cm->cm_ccb->ccb_h.status = CAM_CMD_TIMEOUT;
1593 	cm->cm_state = MPS_CM_STATE_TIMEDOUT;
1594 	TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1595 
1596 	if (targ->tm != NULL) {
1597 		/* target already in recovery, just queue up another
1598 		 * timedout command to be processed later.
1599 		 */
1600 		mps_dprint(sc, MPS_RECOVERY,
1601 		    "queued timedout cm %p for processing by tm %p\n",
1602 		    cm, targ->tm);
1603 	}
1604 	else if ((targ->tm = mpssas_alloc_tm(sc)) != NULL) {
1605 		mps_dprint(sc, MPS_RECOVERY, "timedout cm %p allocated tm %p\n",
1606 		    cm, targ->tm);
1607 
1608 		/* start recovery by aborting the first timedout command */
1609 		mpssas_send_abort(sc, targ->tm, cm);
1610 	}
1611 	else {
1612 		/* XXX queue this target up for recovery once a TM becomes
1613 		 * available.  The firmware only has a limited number of
1614 		 * HighPriority credits for the high priority requests used
1615 		 * for task management, and we ran out.
1616 		 *
1617 		 * Isilon: don't worry about this for now, since we have
1618 		 * more credits than disks in an enclosure, and limit
1619 		 * ourselves to one TM per target for recovery.
1620 		 */
1621 		mps_dprint(sc, MPS_RECOVERY,
1622 		    "timedout cm %p failed to allocate a tm\n", cm);
1623 	}
1624 
1625 }
1626 
1627 static void
1628 mpssas_action_scsiio(struct mpssas_softc *sassc, union ccb *ccb)
1629 {
1630 	MPI2_SCSI_IO_REQUEST *req;
1631 	struct ccb_scsiio *csio;
1632 	struct mps_softc *sc;
1633 	struct mpssas_target *targ;
1634 	struct mpssas_lun *lun;
1635 	struct mps_command *cm;
1636 	uint8_t i, lba_byte, *ref_tag_addr;
1637 	uint16_t eedp_flags;
1638 	uint32_t mpi_control;
1639 
1640 	sc = sassc->sc;
1641 	MPS_FUNCTRACE(sc);
1642 	mtx_assert(&sc->mps_mtx, MA_OWNED);
1643 
1644 	csio = &ccb->csio;
1645 	KASSERT(csio->ccb_h.target_id < sassc->maxtargets,
1646 	    ("Target %d out of bounds in XPT_SCSI_IO\n",
1647 	     csio->ccb_h.target_id));
1648 	targ = &sassc->targets[csio->ccb_h.target_id];
1649 	mps_dprint(sc, MPS_TRACE, "ccb %p target flag %x\n", ccb, targ->flags);
1650 	if (targ->handle == 0x0) {
1651 		mps_dprint(sc, MPS_ERROR, "%s NULL handle for target %u\n",
1652 		    __func__, csio->ccb_h.target_id);
1653 		csio->ccb_h.status = CAM_SEL_TIMEOUT;
1654 		xpt_done(ccb);
1655 		return;
1656 	}
1657 	if (targ->flags & MPS_TARGET_FLAGS_RAID_COMPONENT) {
1658 		mps_dprint(sc, MPS_ERROR, "%s Raid component no SCSI IO "
1659 		    "supported %u\n", __func__, csio->ccb_h.target_id);
1660 		csio->ccb_h.status = CAM_TID_INVALID;
1661 		xpt_done(ccb);
1662 		return;
1663 	}
1664 	/*
1665 	 * Sometimes, it is possible to get a command that is not "In
1666 	 * Progress" and was actually aborted by the upper layer.  Check for
1667 	 * this here and complete the command without error.
1668 	 */
1669 	if (ccb->ccb_h.status != CAM_REQ_INPROG) {
1670 		mps_dprint(sc, MPS_TRACE, "%s Command is not in progress for "
1671 		    "target %u\n", __func__, csio->ccb_h.target_id);
1672 		xpt_done(ccb);
1673 		return;
1674 	}
1675 	/*
1676 	 * If devinfo is 0 this will be a volume.  In that case don't tell CAM
1677 	 * that the volume has timed out.  We want volumes to be enumerated
1678 	 * until they are deleted/removed, not just failed.
1679 	 */
1680 	if (targ->flags & MPSSAS_TARGET_INREMOVAL) {
1681 		if (targ->devinfo == 0)
1682 			csio->ccb_h.status = CAM_REQ_CMP;
1683 		else
1684 			csio->ccb_h.status = CAM_SEL_TIMEOUT;
1685 		xpt_done(ccb);
1686 		return;
1687 	}
1688 
1689 	if ((sc->mps_flags & MPS_FLAGS_SHUTDOWN) != 0) {
1690 		mps_dprint(sc, MPS_INFO, "%s shutting down\n", __func__);
1691 		csio->ccb_h.status = CAM_TID_INVALID;
1692 		xpt_done(ccb);
1693 		return;
1694 	}
1695 
1696 	cm = mps_alloc_command(sc);
1697 	if (cm == NULL) {
1698 		if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
1699 			xpt_freeze_simq(sassc->sim, 1);
1700 			sassc->flags |= MPSSAS_QUEUE_FROZEN;
1701 		}
1702 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1703 		ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1704 		xpt_done(ccb);
1705 		return;
1706 	}
1707 
1708 	req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
1709 	bzero(req, sizeof(*req));
1710 	req->DevHandle = htole16(targ->handle);
1711 	req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1712 	req->MsgFlags = 0;
1713 	req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr);
1714 	req->SenseBufferLength = MPS_SENSE_LEN;
1715 	req->SGLFlags = 0;
1716 	req->ChainOffset = 0;
1717 	req->SGLOffset0 = 24;	/* 32bit word offset to the SGL */
1718 	req->SGLOffset1= 0;
1719 	req->SGLOffset2= 0;
1720 	req->SGLOffset3= 0;
1721 	req->SkipCount = 0;
1722 	req->DataLength = htole32(csio->dxfer_len);
1723 	req->BidirectionalDataLength = 0;
1724 	req->IoFlags = htole16(csio->cdb_len);
1725 	req->EEDPFlags = 0;
1726 
1727 	/* Note: BiDirectional transfers are not supported */
1728 	switch (csio->ccb_h.flags & CAM_DIR_MASK) {
1729 	case CAM_DIR_IN:
1730 		mpi_control = MPI2_SCSIIO_CONTROL_READ;
1731 		cm->cm_flags |= MPS_CM_FLAGS_DATAIN;
1732 		break;
1733 	case CAM_DIR_OUT:
1734 		mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
1735 		cm->cm_flags |= MPS_CM_FLAGS_DATAOUT;
1736 		break;
1737 	case CAM_DIR_NONE:
1738 	default:
1739 		mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
1740 		break;
1741 	}
1742 
1743 	if (csio->cdb_len == 32)
1744                 mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
1745 	/*
1746 	 * It looks like the hardware doesn't require an explicit tag
1747 	 * number for each transaction.  SAM Task Management not supported
1748 	 * at the moment.
1749 	 */
1750 	switch (csio->tag_action) {
1751 	case MSG_HEAD_OF_Q_TAG:
1752 		mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
1753 		break;
1754 	case MSG_ORDERED_Q_TAG:
1755 		mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
1756 		break;
1757 	case MSG_ACA_TASK:
1758 		mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ;
1759 		break;
1760 	case CAM_TAG_ACTION_NONE:
1761 	case MSG_SIMPLE_Q_TAG:
1762 	default:
1763 		mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
1764 		break;
1765 	}
1766 	mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
1767 	req->Control = htole32(mpi_control);
1768 	if (MPS_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
1769 		mps_free_command(sc, cm);
1770 		ccb->ccb_h.status = CAM_LUN_INVALID;
1771 		xpt_done(ccb);
1772 		return;
1773 	}
1774 
1775 	if (csio->ccb_h.flags & CAM_CDB_POINTER)
1776 		bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
1777 	else
1778 		bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
1779 	req->IoFlags = htole16(csio->cdb_len);
1780 
1781 	/*
1782 	 * Check if EEDP is supported and enabled.  If it is then check if the
1783 	 * SCSI opcode could be using EEDP.  If so, make sure the LUN exists and
1784 	 * is formatted for EEDP support.  If all of this is true, set CDB up
1785 	 * for EEDP transfer.
1786 	 */
1787 	eedp_flags = op_code_prot[req->CDB.CDB32[0]];
1788 	if (sc->eedp_enabled && eedp_flags) {
1789 		SLIST_FOREACH(lun, &targ->luns, lun_link) {
1790 			if (lun->lun_id == csio->ccb_h.target_lun) {
1791 				break;
1792 			}
1793 		}
1794 
1795 		if ((lun != NULL) && (lun->eedp_formatted)) {
1796 			req->EEDPBlockSize = htole16(lun->eedp_block_size);
1797 			eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
1798 			    MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
1799 			    MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
1800 			req->EEDPFlags = htole16(eedp_flags);
1801 
1802 			/*
1803 			 * If CDB less than 32, fill in Primary Ref Tag with
1804 			 * low 4 bytes of LBA.  If CDB is 32, tag stuff is
1805 			 * already there.  Also, set protection bit.  FreeBSD
1806 			 * currently does not support CDBs bigger than 16, but
1807 			 * the code doesn't hurt, and will be here for the
1808 			 * future.
1809 			 */
1810 			if (csio->cdb_len != 32) {
1811 				lba_byte = (csio->cdb_len == 16) ? 6 : 2;
1812 				ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
1813 				    PrimaryReferenceTag;
1814 				for (i = 0; i < 4; i++) {
1815 					*ref_tag_addr =
1816 					    req->CDB.CDB32[lba_byte + i];
1817 					ref_tag_addr++;
1818 				}
1819 				req->CDB.EEDP32.PrimaryReferenceTag =
1820 					htole32(req->CDB.EEDP32.PrimaryReferenceTag);
1821 				req->CDB.EEDP32.PrimaryApplicationTagMask =
1822 				    0xFFFF;
1823 				req->CDB.CDB32[1] = (req->CDB.CDB32[1] & 0x1F) |
1824 				    0x20;
1825 			} else {
1826 				eedp_flags |=
1827 				    MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
1828 				req->EEDPFlags = htole16(eedp_flags);
1829 				req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
1830 				    0x1F) | 0x20;
1831 			}
1832 		}
1833 	}
1834 
1835 	cm->cm_length = csio->dxfer_len;
1836 	if (cm->cm_length != 0) {
1837 		cm->cm_data = ccb;
1838 		cm->cm_flags |= MPS_CM_FLAGS_USE_CCB;
1839 	} else {
1840 		cm->cm_data = NULL;
1841 	}
1842 	cm->cm_sge = &req->SGL;
1843 	cm->cm_sglsize = (32 - 24) * 4;
1844 	cm->cm_desc.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
1845 	cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle);
1846 	cm->cm_complete = mpssas_scsiio_complete;
1847 	cm->cm_complete_data = ccb;
1848 	cm->cm_targ = targ;
1849 	cm->cm_lun = csio->ccb_h.target_lun;
1850 	cm->cm_ccb = ccb;
1851 
1852 	/*
1853 	 * If HBA is a WD and the command is not for a retry, try to build a
1854 	 * direct I/O message. If failed, or the command is for a retry, send
1855 	 * the I/O to the IR volume itself.
1856 	 */
1857 	if (sc->WD_valid_config) {
1858 		if (ccb->ccb_h.status != MPS_WD_RETRY) {
1859 			mpssas_direct_drive_io(sassc, cm, ccb);
1860 		} else {
1861 			ccb->ccb_h.status = CAM_REQ_INPROG;
1862 		}
1863 	}
1864 
1865 	callout_reset(&cm->cm_callout, (ccb->ccb_h.timeout * hz) / 1000,
1866 	   mpssas_scsiio_timeout, cm);
1867 
1868 	targ->issued++;
1869 	targ->outstanding++;
1870 	TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1871 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
1872 
1873 	mpssas_log_command(cm, MPS_XINFO, "%s cm %p ccb %p outstanding %u\n",
1874 	    __func__, cm, ccb, targ->outstanding);
1875 
1876 	mps_map_command(sc, cm);
1877 	return;
1878 }
1879 
1880 static void
1881 mps_response_code(struct mps_softc *sc, u8 response_code)
1882 {
1883         char *desc;
1884 
1885         switch (response_code) {
1886         case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE:
1887                 desc = "task management request completed";
1888                 break;
1889         case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME:
1890                 desc = "invalid frame";
1891                 break;
1892         case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
1893                 desc = "task management request not supported";
1894                 break;
1895         case MPI2_SCSITASKMGMT_RSP_TM_FAILED:
1896                 desc = "task management request failed";
1897                 break;
1898         case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED:
1899                 desc = "task management request succeeded";
1900                 break;
1901         case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN:
1902                 desc = "invalid lun";
1903                 break;
1904         case 0xA:
1905                 desc = "overlapped tag attempted";
1906                 break;
1907         case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
1908                 desc = "task queued, however not sent to target";
1909                 break;
1910         default:
1911                 desc = "unknown";
1912                 break;
1913         }
1914 		mps_dprint(sc, MPS_XINFO, "response_code(0x%01x): %s\n",
1915                 response_code, desc);
1916 }
1917 /**
1918  * mps_sc_failed_io_info - translated non-succesfull SCSI_IO request
1919  */
1920 static void
1921 mps_sc_failed_io_info(struct mps_softc *sc, struct ccb_scsiio *csio,
1922     Mpi2SCSIIOReply_t *mpi_reply)
1923 {
1924 	u32 response_info;
1925 	u8 *response_bytes;
1926 	u16 ioc_status = le16toh(mpi_reply->IOCStatus) &
1927 	    MPI2_IOCSTATUS_MASK;
1928 	u8 scsi_state = mpi_reply->SCSIState;
1929 	u8 scsi_status = mpi_reply->SCSIStatus;
1930 	char *desc_ioc_state = NULL;
1931 	char *desc_scsi_status = NULL;
1932 	char *desc_scsi_state = sc->tmp_string;
1933 	u32 log_info = le32toh(mpi_reply->IOCLogInfo);
1934 
1935 	if (log_info == 0x31170000)
1936 		return;
1937 
1938 	switch (ioc_status) {
1939 	case MPI2_IOCSTATUS_SUCCESS:
1940 		desc_ioc_state = "success";
1941 		break;
1942 	case MPI2_IOCSTATUS_INVALID_FUNCTION:
1943 		desc_ioc_state = "invalid function";
1944 		break;
1945 	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
1946 		desc_ioc_state = "scsi recovered error";
1947 		break;
1948 	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
1949 		desc_ioc_state = "scsi invalid dev handle";
1950 		break;
1951 	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
1952 		desc_ioc_state = "scsi device not there";
1953 		break;
1954 	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
1955 		desc_ioc_state = "scsi data overrun";
1956 		break;
1957 	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
1958 		desc_ioc_state = "scsi data underrun";
1959 		break;
1960 	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
1961 		desc_ioc_state = "scsi io data error";
1962 		break;
1963 	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
1964 		desc_ioc_state = "scsi protocol error";
1965 		break;
1966 	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
1967 		desc_ioc_state = "scsi task terminated";
1968 		break;
1969 	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
1970 		desc_ioc_state = "scsi residual mismatch";
1971 		break;
1972 	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
1973 		desc_ioc_state = "scsi task mgmt failed";
1974 		break;
1975 	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
1976 		desc_ioc_state = "scsi ioc terminated";
1977 		break;
1978 	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
1979 		desc_ioc_state = "scsi ext terminated";
1980 		break;
1981 	case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
1982 		desc_ioc_state = "eedp guard error";
1983 		break;
1984 	case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
1985 		desc_ioc_state = "eedp ref tag error";
1986 		break;
1987 	case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
1988 		desc_ioc_state = "eedp app tag error";
1989 		break;
1990 	default:
1991 		desc_ioc_state = "unknown";
1992 		break;
1993 	}
1994 
1995 	switch (scsi_status) {
1996 	case MPI2_SCSI_STATUS_GOOD:
1997 		desc_scsi_status = "good";
1998 		break;
1999 	case MPI2_SCSI_STATUS_CHECK_CONDITION:
2000 		desc_scsi_status = "check condition";
2001 		break;
2002 	case MPI2_SCSI_STATUS_CONDITION_MET:
2003 		desc_scsi_status = "condition met";
2004 		break;
2005 	case MPI2_SCSI_STATUS_BUSY:
2006 		desc_scsi_status = "busy";
2007 		break;
2008 	case MPI2_SCSI_STATUS_INTERMEDIATE:
2009 		desc_scsi_status = "intermediate";
2010 		break;
2011 	case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET:
2012 		desc_scsi_status = "intermediate condmet";
2013 		break;
2014 	case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
2015 		desc_scsi_status = "reservation conflict";
2016 		break;
2017 	case MPI2_SCSI_STATUS_COMMAND_TERMINATED:
2018 		desc_scsi_status = "command terminated";
2019 		break;
2020 	case MPI2_SCSI_STATUS_TASK_SET_FULL:
2021 		desc_scsi_status = "task set full";
2022 		break;
2023 	case MPI2_SCSI_STATUS_ACA_ACTIVE:
2024 		desc_scsi_status = "aca active";
2025 		break;
2026 	case MPI2_SCSI_STATUS_TASK_ABORTED:
2027 		desc_scsi_status = "task aborted";
2028 		break;
2029 	default:
2030 		desc_scsi_status = "unknown";
2031 		break;
2032 	}
2033 
2034 	desc_scsi_state[0] = '\0';
2035 	if (!scsi_state)
2036 		desc_scsi_state = " ";
2037 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
2038 		strcat(desc_scsi_state, "response info ");
2039 	if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
2040 		strcat(desc_scsi_state, "state terminated ");
2041 	if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS)
2042 		strcat(desc_scsi_state, "no status ");
2043 	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED)
2044 		strcat(desc_scsi_state, "autosense failed ");
2045 	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID)
2046 		strcat(desc_scsi_state, "autosense valid ");
2047 
2048 	mps_dprint(sc, MPS_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n",
2049 	    le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status);
2050 	/* We can add more detail about underflow data here
2051 	 * TO-DO
2052 	 * */
2053 	mps_dprint(sc, MPS_XINFO, "\tscsi_status(%s)(0x%02x), "
2054 	    "scsi_state(%s)(0x%02x)\n", desc_scsi_status, scsi_status,
2055 	    desc_scsi_state, scsi_state);
2056 
2057 	if (sc->mps_debug & MPS_XINFO &&
2058 		scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2059 		mps_dprint(sc, MPS_XINFO, "-> Sense Buffer Data : Start :\n");
2060 		scsi_sense_print(csio);
2061 		mps_dprint(sc, MPS_XINFO, "-> Sense Buffer Data : End :\n");
2062 	}
2063 
2064 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
2065 		response_info = le32toh(mpi_reply->ResponseInfo);
2066 		response_bytes = (u8 *)&response_info;
2067 		mps_response_code(sc,response_bytes[0]);
2068 	}
2069 }
2070 
2071 static void
2072 mpssas_scsiio_complete(struct mps_softc *sc, struct mps_command *cm)
2073 {
2074 	MPI2_SCSI_IO_REPLY *rep;
2075 	union ccb *ccb;
2076 	struct ccb_scsiio *csio;
2077 	struct mpssas_softc *sassc;
2078 	struct scsi_vpd_supported_page_list *vpd_list = NULL;
2079 	u8 *TLR_bits, TLR_on;
2080 	int dir = 0, i;
2081 	u16 alloc_len;
2082 
2083 	MPS_FUNCTRACE(sc);
2084 	mps_dprint(sc, MPS_TRACE,
2085 	    "cm %p SMID %u ccb %p reply %p outstanding %u\n", cm,
2086 	    cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
2087 	    cm->cm_targ->outstanding);
2088 
2089 	callout_stop(&cm->cm_callout);
2090 	mtx_assert(&sc->mps_mtx, MA_OWNED);
2091 
2092 	sassc = sc->sassc;
2093 	ccb = cm->cm_complete_data;
2094 	csio = &ccb->csio;
2095 	rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
2096 	/*
2097 	 * XXX KDM if the chain allocation fails, does it matter if we do
2098 	 * the sync and unload here?  It is simpler to do it in every case,
2099 	 * assuming it doesn't cause problems.
2100 	 */
2101 	if (cm->cm_data != NULL) {
2102 		if (cm->cm_flags & MPS_CM_FLAGS_DATAIN)
2103 			dir = BUS_DMASYNC_POSTREAD;
2104 		else if (cm->cm_flags & MPS_CM_FLAGS_DATAOUT)
2105 			dir = BUS_DMASYNC_POSTWRITE;
2106 		bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
2107 		bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2108 	}
2109 
2110 	cm->cm_targ->completed++;
2111 	cm->cm_targ->outstanding--;
2112 	TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
2113 	ccb->ccb_h.status &= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED);
2114 
2115 	if (cm->cm_state == MPS_CM_STATE_TIMEDOUT) {
2116 		TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
2117 		if (cm->cm_reply != NULL)
2118 			mpssas_log_command(cm, MPS_RECOVERY,
2119 			    "completed timedout cm %p ccb %p during recovery "
2120 			    "ioc %x scsi %x state %x xfer %u\n",
2121 			    cm, cm->cm_ccb,
2122 			    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2123 			    le32toh(rep->TransferCount));
2124 		else
2125 			mpssas_log_command(cm, MPS_RECOVERY,
2126 			    "completed timedout cm %p ccb %p during recovery\n",
2127 			    cm, cm->cm_ccb);
2128 	} else if (cm->cm_targ->tm != NULL) {
2129 		if (cm->cm_reply != NULL)
2130 			mpssas_log_command(cm, MPS_RECOVERY,
2131 			    "completed cm %p ccb %p during recovery "
2132 			    "ioc %x scsi %x state %x xfer %u\n",
2133 			    cm, cm->cm_ccb,
2134 			    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2135 			    le32toh(rep->TransferCount));
2136 		else
2137 			mpssas_log_command(cm, MPS_RECOVERY,
2138 			    "completed cm %p ccb %p during recovery\n",
2139 			    cm, cm->cm_ccb);
2140 	} else if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
2141 		mpssas_log_command(cm, MPS_RECOVERY,
2142 		    "reset completed cm %p ccb %p\n",
2143 		    cm, cm->cm_ccb);
2144 	}
2145 
2146 	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2147 		/*
2148 		 * We ran into an error after we tried to map the command,
2149 		 * so we're getting a callback without queueing the command
2150 		 * to the hardware.  So we set the status here, and it will
2151 		 * be retained below.  We'll go through the "fast path",
2152 		 * because there can be no reply when we haven't actually
2153 		 * gone out to the hardware.
2154 		 */
2155 		ccb->ccb_h.status = CAM_REQUEUE_REQ;
2156 
2157 		/*
2158 		 * Currently the only error included in the mask is
2159 		 * MPS_CM_FLAGS_CHAIN_FAILED, which means we're out of
2160 		 * chain frames.  We need to freeze the queue until we get
2161 		 * a command that completed without this error, which will
2162 		 * hopefully have some chain frames attached that we can
2163 		 * use.  If we wanted to get smarter about it, we would
2164 		 * only unfreeze the queue in this condition when we're
2165 		 * sure that we're getting some chain frames back.  That's
2166 		 * probably unnecessary.
2167 		 */
2168 		if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
2169 			xpt_freeze_simq(sassc->sim, 1);
2170 			sassc->flags |= MPSSAS_QUEUE_FROZEN;
2171 			mps_dprint(sc, MPS_XINFO, "Error sending command, "
2172 				   "freezing SIM queue\n");
2173 		}
2174 	}
2175 
2176 	/* Take the fast path to completion */
2177 	if (cm->cm_reply == NULL) {
2178 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
2179 			if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0)
2180 				ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
2181 			else {
2182 				ccb->ccb_h.status = CAM_REQ_CMP;
2183 				ccb->csio.scsi_status = SCSI_STATUS_OK;
2184 			}
2185 			if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2186 				ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2187 				sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2188 				mps_dprint(sc, MPS_XINFO,
2189 				    "Unfreezing SIM queue\n");
2190 			}
2191 		}
2192 
2193 		/*
2194 		 * There are two scenarios where the status won't be
2195 		 * CAM_REQ_CMP.  The first is if MPS_CM_FLAGS_ERROR_MASK is
2196 		 * set, the second is in the MPS_FLAGS_DIAGRESET above.
2197 		 */
2198 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2199 			/*
2200 			 * Freeze the dev queue so that commands are
2201 			 * executed in the correct order with after error
2202 			 * recovery.
2203 			 */
2204 			ccb->ccb_h.status |= CAM_DEV_QFRZN;
2205 			xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2206 		}
2207 		mps_free_command(sc, cm);
2208 		xpt_done(ccb);
2209 		return;
2210 	}
2211 
2212 	mpssas_log_command(cm, MPS_XINFO,
2213 	    "ioc %x scsi %x state %x xfer %u\n",
2214 	    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2215 	    le32toh(rep->TransferCount));
2216 
2217 	/*
2218 	 * If this is a Direct Drive I/O, reissue the I/O to the original IR
2219 	 * Volume if an error occurred (normal I/O retry).  Use the original
2220 	 * CCB, but set a flag that this will be a retry so that it's sent to
2221 	 * the original volume.  Free the command but reuse the CCB.
2222 	 */
2223 	if (cm->cm_flags & MPS_CM_FLAGS_DD_IO) {
2224 		mps_free_command(sc, cm);
2225 		ccb->ccb_h.status = MPS_WD_RETRY;
2226 		mpssas_action_scsiio(sassc, ccb);
2227 		return;
2228 	}
2229 
2230 	switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) {
2231 	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2232 		csio->resid = cm->cm_length - le32toh(rep->TransferCount);
2233 		/* FALLTHROUGH */
2234 	case MPI2_IOCSTATUS_SUCCESS:
2235 	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2236 
2237 		if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
2238 		    MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
2239 			mpssas_log_command(cm, MPS_XINFO, "recovered error\n");
2240 
2241 		/* Completion failed at the transport level. */
2242 		if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
2243 		    MPI2_SCSI_STATE_TERMINATED)) {
2244 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2245 			break;
2246 		}
2247 
2248 		/* In a modern packetized environment, an autosense failure
2249 		 * implies that there's not much else that can be done to
2250 		 * recover the command.
2251 		 */
2252 		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
2253 			ccb->ccb_h.status = CAM_AUTOSENSE_FAIL;
2254 			break;
2255 		}
2256 
2257 		/*
2258 		 * CAM doesn't care about SAS Response Info data, but if this is
2259 		 * the state check if TLR should be done.  If not, clear the
2260 		 * TLR_bits for the target.
2261 		 */
2262 		if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
2263 		    ((le32toh(rep->ResponseInfo) & MPI2_SCSI_RI_MASK_REASONCODE) ==
2264 		    MPS_SCSI_RI_INVALID_FRAME)) {
2265 			sc->mapping_table[csio->ccb_h.target_id].TLR_bits =
2266 			    (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2267 		}
2268 
2269 		/*
2270 		 * Intentionally override the normal SCSI status reporting
2271 		 * for these two cases.  These are likely to happen in a
2272 		 * multi-initiator environment, and we want to make sure that
2273 		 * CAM retries these commands rather than fail them.
2274 		 */
2275 		if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
2276 		    (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
2277 			ccb->ccb_h.status = CAM_REQ_ABORTED;
2278 			break;
2279 		}
2280 
2281 		/* Handle normal status and sense */
2282 		csio->scsi_status = rep->SCSIStatus;
2283 		if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
2284 			ccb->ccb_h.status = CAM_REQ_CMP;
2285 		else
2286 			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
2287 
2288 		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2289 			int sense_len, returned_sense_len;
2290 
2291 			returned_sense_len = min(le32toh(rep->SenseCount),
2292 			    sizeof(struct scsi_sense_data));
2293 			if (returned_sense_len < ccb->csio.sense_len)
2294 				ccb->csio.sense_resid = ccb->csio.sense_len -
2295 					returned_sense_len;
2296 			else
2297 				ccb->csio.sense_resid = 0;
2298 
2299 			sense_len = min(returned_sense_len,
2300 			    ccb->csio.sense_len - ccb->csio.sense_resid);
2301 			bzero(&ccb->csio.sense_data,
2302 			      sizeof(ccb->csio.sense_data));
2303 			bcopy(cm->cm_sense, &ccb->csio.sense_data, sense_len);
2304 			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2305 		}
2306 
2307 		/*
2308 		 * Check if this is an INQUIRY command.  If it's a VPD inquiry,
2309 		 * and it's page code 0 (Supported Page List), and there is
2310 		 * inquiry data, and this is for a sequential access device, and
2311 		 * the device is an SSP target, and TLR is supported by the
2312 		 * controller, turn the TLR_bits value ON if page 0x90 is
2313 		 * supported.
2314 		 */
2315 		if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
2316 		    (csio->cdb_io.cdb_bytes[1] & SI_EVPD) &&
2317 		    (csio->cdb_io.cdb_bytes[2] == SVPD_SUPPORTED_PAGE_LIST) &&
2318 		    ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) &&
2319 		    (csio->data_ptr != NULL) &&
2320 		    ((csio->data_ptr[0] & 0x1f) == T_SEQUENTIAL) &&
2321 		    (sc->control_TLR) &&
2322 		    (sc->mapping_table[csio->ccb_h.target_id].device_info &
2323 		    MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
2324 			vpd_list = (struct scsi_vpd_supported_page_list *)
2325 			    csio->data_ptr;
2326 			TLR_bits = &sc->mapping_table[csio->ccb_h.target_id].
2327 			    TLR_bits;
2328 			*TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2329 			TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
2330 			alloc_len = ((u16)csio->cdb_io.cdb_bytes[3] << 8) +
2331 			    csio->cdb_io.cdb_bytes[4];
2332 			alloc_len -= csio->resid;
2333 			for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
2334 				if (vpd_list->list[i] == 0x90) {
2335 					*TLR_bits = TLR_on;
2336 					break;
2337 				}
2338 			}
2339 		}
2340 		break;
2341 	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2342 	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2343 		/*
2344 		 * If devinfo is 0 this will be a volume.  In that case don't
2345 		 * tell CAM that the volume is not there.  We want volumes to
2346 		 * be enumerated until they are deleted/removed, not just
2347 		 * failed.
2348 		 */
2349 		if (cm->cm_targ->devinfo == 0)
2350 			ccb->ccb_h.status = CAM_REQ_CMP;
2351 		else
2352 			ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2353 		break;
2354 	case MPI2_IOCSTATUS_INVALID_SGL:
2355 		mps_print_scsiio_cmd(sc, cm);
2356 		ccb->ccb_h.status = CAM_UNREC_HBA_ERROR;
2357 		break;
2358 	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2359 		/*
2360 		 * This is one of the responses that comes back when an I/O
2361 		 * has been aborted.  If it is because of a timeout that we
2362 		 * initiated, just set the status to CAM_CMD_TIMEOUT.
2363 		 * Otherwise set it to CAM_REQ_ABORTED.  The effect on the
2364 		 * command is the same (it gets retried, subject to the
2365 		 * retry counter), the only difference is what gets printed
2366 		 * on the console.
2367 		 */
2368 		if (cm->cm_state == MPS_CM_STATE_TIMEDOUT)
2369 			ccb->ccb_h.status = CAM_CMD_TIMEOUT;
2370 		else
2371 			ccb->ccb_h.status = CAM_REQ_ABORTED;
2372 		break;
2373 	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2374 		/* resid is ignored for this condition */
2375 		csio->resid = 0;
2376 		ccb->ccb_h.status = CAM_DATA_RUN_ERR;
2377 		break;
2378 	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2379 	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2380 		/*
2381 		 * Since these are generally external (i.e. hopefully
2382 		 * transient transport-related) errors, retry these without
2383 		 * decrementing the retry count.
2384 		 */
2385 		ccb->ccb_h.status = CAM_REQUEUE_REQ;
2386 		mpssas_log_command(cm, MPS_INFO,
2387 		    "terminated ioc %x scsi %x state %x xfer %u\n",
2388 		    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2389 		    le32toh(rep->TransferCount));
2390 		break;
2391 	case MPI2_IOCSTATUS_INVALID_FUNCTION:
2392 	case MPI2_IOCSTATUS_INTERNAL_ERROR:
2393 	case MPI2_IOCSTATUS_INVALID_VPID:
2394 	case MPI2_IOCSTATUS_INVALID_FIELD:
2395 	case MPI2_IOCSTATUS_INVALID_STATE:
2396 	case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2397 	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2398 	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2399 	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2400 	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2401 	default:
2402 		mpssas_log_command(cm, MPS_XINFO,
2403 		    "completed ioc %x scsi %x state %x xfer %u\n",
2404 		    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2405 		    le32toh(rep->TransferCount));
2406 		csio->resid = cm->cm_length;
2407 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2408 		break;
2409 	}
2410 
2411 	mps_sc_failed_io_info(sc,csio,rep);
2412 
2413 	if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2414 		ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2415 		sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2416 		mps_dprint(sc, MPS_XINFO, "Command completed, "
2417 		    "unfreezing SIM queue\n");
2418 	}
2419 
2420 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2421 		ccb->ccb_h.status |= CAM_DEV_QFRZN;
2422 		xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2423 	}
2424 
2425 	mps_free_command(sc, cm);
2426 	xpt_done(ccb);
2427 }
2428 
2429 /* All Request reached here are Endian safe */
2430 static void
2431 mpssas_direct_drive_io(struct mpssas_softc *sassc, struct mps_command *cm,
2432     union ccb *ccb) {
2433 	pMpi2SCSIIORequest_t	pIO_req;
2434 	struct mps_softc	*sc = sassc->sc;
2435 	uint64_t		virtLBA;
2436 	uint32_t		physLBA, stripe_offset, stripe_unit;
2437 	uint32_t		io_size, column;
2438 	uint8_t			*ptrLBA, lba_idx, physLBA_byte, *CDB;
2439 
2440 	/*
2441 	 * If this is a valid SCSI command (Read6, Read10, Read16, Write6,
2442 	 * Write10, or Write16), build a direct I/O message.  Otherwise, the I/O
2443 	 * will be sent to the IR volume itself.  Since Read6 and Write6 are a
2444 	 * bit different than the 10/16 CDBs, handle them separately.
2445 	 */
2446 	pIO_req = (pMpi2SCSIIORequest_t)cm->cm_req;
2447 	CDB = pIO_req->CDB.CDB32;
2448 
2449 	/*
2450 	 * Handle 6 byte CDBs.
2451 	 */
2452 	if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_6) ||
2453 	    (CDB[0] == WRITE_6))) {
2454 		/*
2455 		 * Get the transfer size in blocks.
2456 		 */
2457 		io_size = (cm->cm_length >> sc->DD_block_exponent);
2458 
2459 		/*
2460 		 * Get virtual LBA given in the CDB.
2461 		 */
2462 		virtLBA = ((uint64_t)(CDB[1] & 0x1F) << 16) |
2463 		    ((uint64_t)CDB[2] << 8) | (uint64_t)CDB[3];
2464 
2465 		/*
2466 		 * Check that LBA range for I/O does not exceed volume's
2467 		 * MaxLBA.
2468 		 */
2469 		if ((virtLBA + (uint64_t)io_size - 1) <=
2470 		    sc->DD_max_lba) {
2471 			/*
2472 			 * Check if the I/O crosses a stripe boundary.  If not,
2473 			 * translate the virtual LBA to a physical LBA and set
2474 			 * the DevHandle for the PhysDisk to be used.  If it
2475 			 * does cross a boundry, do normal I/O.  To get the
2476 			 * right DevHandle to use, get the map number for the
2477 			 * column, then use that map number to look up the
2478 			 * DevHandle of the PhysDisk.
2479 			 */
2480 			stripe_offset = (uint32_t)virtLBA &
2481 			    (sc->DD_stripe_size - 1);
2482 			if ((stripe_offset + io_size) <= sc->DD_stripe_size) {
2483 				physLBA = (uint32_t)virtLBA >>
2484 				    sc->DD_stripe_exponent;
2485 				stripe_unit = physLBA / sc->DD_num_phys_disks;
2486 				column = physLBA % sc->DD_num_phys_disks;
2487 				pIO_req->DevHandle =
2488 				    htole16(sc->DD_column_map[column].dev_handle);
2489 				/* ???? Is this endian safe*/
2490 				cm->cm_desc.SCSIIO.DevHandle =
2491 				    pIO_req->DevHandle;
2492 
2493 				physLBA = (stripe_unit <<
2494 				    sc->DD_stripe_exponent) + stripe_offset;
2495 				ptrLBA = &pIO_req->CDB.CDB32[1];
2496 				physLBA_byte = (uint8_t)(physLBA >> 16);
2497 				*ptrLBA = physLBA_byte;
2498 				ptrLBA = &pIO_req->CDB.CDB32[2];
2499 				physLBA_byte = (uint8_t)(physLBA >> 8);
2500 				*ptrLBA = physLBA_byte;
2501 				ptrLBA = &pIO_req->CDB.CDB32[3];
2502 				physLBA_byte = (uint8_t)physLBA;
2503 				*ptrLBA = physLBA_byte;
2504 
2505 				/*
2506 				 * Set flag that Direct Drive I/O is
2507 				 * being done.
2508 				 */
2509 				cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2510 			}
2511 		}
2512 		return;
2513 	}
2514 
2515 	/*
2516 	 * Handle 10, 12 or 16 byte CDBs.
2517 	 */
2518 	if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_10) ||
2519 	    (CDB[0] == WRITE_10) || (CDB[0] == READ_16) ||
2520 	    (CDB[0] == WRITE_16) || (CDB[0] == READ_12) ||
2521 	    (CDB[0] == WRITE_12))) {
2522 		/*
2523 		 * For 16-byte CDB's, verify that the upper 4 bytes of the CDB
2524 		 * are 0.  If not, this is accessing beyond 2TB so handle it in
2525 		 * the else section.  10-byte and 12-byte CDB's are OK.
2526 		 * FreeBSD sends very rare 12 byte READ/WRITE, but driver is
2527 		 * ready to accept 12byte CDB for Direct IOs.
2528 		 */
2529 		if ((CDB[0] == READ_10 || CDB[0] == WRITE_10) ||
2530 		    (CDB[0] == READ_12 || CDB[0] == WRITE_12) ||
2531 		    !(CDB[2] | CDB[3] | CDB[4] | CDB[5])) {
2532 			/*
2533 			 * Get the transfer size in blocks.
2534 			 */
2535 			io_size = (cm->cm_length >> sc->DD_block_exponent);
2536 
2537 			/*
2538 			 * Get virtual LBA.  Point to correct lower 4 bytes of
2539 			 * LBA in the CDB depending on command.
2540 			 */
2541 			lba_idx = ((CDB[0] == READ_12) ||
2542 				(CDB[0] == WRITE_12) ||
2543 				(CDB[0] == READ_10) ||
2544 				(CDB[0] == WRITE_10))? 2 : 6;
2545 			virtLBA = ((uint64_t)CDB[lba_idx] << 24) |
2546 			    ((uint64_t)CDB[lba_idx + 1] << 16) |
2547 			    ((uint64_t)CDB[lba_idx + 2] << 8) |
2548 			    (uint64_t)CDB[lba_idx + 3];
2549 
2550 			/*
2551 			 * Check that LBA range for I/O does not exceed volume's
2552 			 * MaxLBA.
2553 			 */
2554 			if ((virtLBA + (uint64_t)io_size - 1) <=
2555 			    sc->DD_max_lba) {
2556 				/*
2557 				 * Check if the I/O crosses a stripe boundary.
2558 				 * If not, translate the virtual LBA to a
2559 				 * physical LBA and set the DevHandle for the
2560 				 * PhysDisk to be used.  If it does cross a
2561 				 * boundry, do normal I/O.  To get the right
2562 				 * DevHandle to use, get the map number for the
2563 				 * column, then use that map number to look up
2564 				 * the DevHandle of the PhysDisk.
2565 				 */
2566 				stripe_offset = (uint32_t)virtLBA &
2567 				    (sc->DD_stripe_size - 1);
2568 				if ((stripe_offset + io_size) <=
2569 				    sc->DD_stripe_size) {
2570 					physLBA = (uint32_t)virtLBA >>
2571 					    sc->DD_stripe_exponent;
2572 					stripe_unit = physLBA /
2573 					    sc->DD_num_phys_disks;
2574 					column = physLBA %
2575 					    sc->DD_num_phys_disks;
2576 					pIO_req->DevHandle =
2577 					    htole16(sc->DD_column_map[column].
2578 					    dev_handle);
2579 					cm->cm_desc.SCSIIO.DevHandle =
2580 					    pIO_req->DevHandle;
2581 
2582 					physLBA = (stripe_unit <<
2583 					    sc->DD_stripe_exponent) +
2584 					    stripe_offset;
2585 					ptrLBA =
2586 					    &pIO_req->CDB.CDB32[lba_idx];
2587 					physLBA_byte = (uint8_t)(physLBA >> 24);
2588 					*ptrLBA = physLBA_byte;
2589 					ptrLBA =
2590 					    &pIO_req->CDB.CDB32[lba_idx + 1];
2591 					physLBA_byte = (uint8_t)(physLBA >> 16);
2592 					*ptrLBA = physLBA_byte;
2593 					ptrLBA =
2594 					    &pIO_req->CDB.CDB32[lba_idx + 2];
2595 					physLBA_byte = (uint8_t)(physLBA >> 8);
2596 					*ptrLBA = physLBA_byte;
2597 					ptrLBA =
2598 					    &pIO_req->CDB.CDB32[lba_idx + 3];
2599 					physLBA_byte = (uint8_t)physLBA;
2600 					*ptrLBA = physLBA_byte;
2601 
2602 					/*
2603 					 * Set flag that Direct Drive I/O is
2604 					 * being done.
2605 					 */
2606 					cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2607 				}
2608 			}
2609 		} else {
2610 			/*
2611 			 * 16-byte CDB and the upper 4 bytes of the CDB are not
2612 			 * 0.  Get the transfer size in blocks.
2613 			 */
2614 			io_size = (cm->cm_length >> sc->DD_block_exponent);
2615 
2616 			/*
2617 			 * Get virtual LBA.
2618 			 */
2619 			virtLBA = ((uint64_t)CDB[2] << 54) |
2620 			    ((uint64_t)CDB[3] << 48) |
2621 			    ((uint64_t)CDB[4] << 40) |
2622 			    ((uint64_t)CDB[5] << 32) |
2623 			    ((uint64_t)CDB[6] << 24) |
2624 			    ((uint64_t)CDB[7] << 16) |
2625 			    ((uint64_t)CDB[8] << 8) |
2626 			    (uint64_t)CDB[9];
2627 
2628 			/*
2629 			 * Check that LBA range for I/O does not exceed volume's
2630 			 * MaxLBA.
2631 			 */
2632 			if ((virtLBA + (uint64_t)io_size - 1) <=
2633 			    sc->DD_max_lba) {
2634 				/*
2635 				 * Check if the I/O crosses a stripe boundary.
2636 				 * If not, translate the virtual LBA to a
2637 				 * physical LBA and set the DevHandle for the
2638 				 * PhysDisk to be used.  If it does cross a
2639 				 * boundry, do normal I/O.  To get the right
2640 				 * DevHandle to use, get the map number for the
2641 				 * column, then use that map number to look up
2642 				 * the DevHandle of the PhysDisk.
2643 				 */
2644 				stripe_offset = (uint32_t)virtLBA &
2645 				    (sc->DD_stripe_size - 1);
2646 				if ((stripe_offset + io_size) <=
2647 				    sc->DD_stripe_size) {
2648 					physLBA = (uint32_t)(virtLBA >>
2649 					    sc->DD_stripe_exponent);
2650 					stripe_unit = physLBA /
2651 					    sc->DD_num_phys_disks;
2652 					column = physLBA %
2653 					    sc->DD_num_phys_disks;
2654 					pIO_req->DevHandle =
2655 					    htole16(sc->DD_column_map[column].
2656 					    dev_handle);
2657 					cm->cm_desc.SCSIIO.DevHandle =
2658 					    pIO_req->DevHandle;
2659 
2660 					physLBA = (stripe_unit <<
2661 					    sc->DD_stripe_exponent) +
2662 					    stripe_offset;
2663 
2664 					/*
2665 					 * Set upper 4 bytes of LBA to 0.  We
2666 					 * assume that the phys disks are less
2667 					 * than 2 TB's in size.  Then, set the
2668 					 * lower 4 bytes.
2669 					 */
2670 					pIO_req->CDB.CDB32[2] = 0;
2671 					pIO_req->CDB.CDB32[3] = 0;
2672 					pIO_req->CDB.CDB32[4] = 0;
2673 					pIO_req->CDB.CDB32[5] = 0;
2674 					ptrLBA = &pIO_req->CDB.CDB32[6];
2675 					physLBA_byte = (uint8_t)(physLBA >> 24);
2676 					*ptrLBA = physLBA_byte;
2677 					ptrLBA = &pIO_req->CDB.CDB32[7];
2678 					physLBA_byte = (uint8_t)(physLBA >> 16);
2679 					*ptrLBA = physLBA_byte;
2680 					ptrLBA = &pIO_req->CDB.CDB32[8];
2681 					physLBA_byte = (uint8_t)(physLBA >> 8);
2682 					*ptrLBA = physLBA_byte;
2683 					ptrLBA = &pIO_req->CDB.CDB32[9];
2684 					physLBA_byte = (uint8_t)physLBA;
2685 					*ptrLBA = physLBA_byte;
2686 
2687 					/*
2688 					 * Set flag that Direct Drive I/O is
2689 					 * being done.
2690 					 */
2691 					cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2692 				}
2693 			}
2694 		}
2695 	}
2696 }
2697 
2698 #if __FreeBSD_version >= 900026
2699 static void
2700 mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm)
2701 {
2702 	MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2703 	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2704 	uint64_t sasaddr;
2705 	union ccb *ccb;
2706 
2707 	ccb = cm->cm_complete_data;
2708 
2709 	/*
2710 	 * Currently there should be no way we can hit this case.  It only
2711 	 * happens when we have a failure to allocate chain frames, and SMP
2712 	 * commands require two S/G elements only.  That should be handled
2713 	 * in the standard request size.
2714 	 */
2715 	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2716 		mps_dprint(sc, MPS_ERROR,"%s: cm_flags = %#x on SMP request!\n",
2717 			   __func__, cm->cm_flags);
2718 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2719 		goto bailout;
2720         }
2721 
2722 	rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
2723 	if (rpl == NULL) {
2724 		mps_dprint(sc, MPS_ERROR, "%s: NULL cm_reply!\n", __func__);
2725 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2726 		goto bailout;
2727 	}
2728 
2729 	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2730 	sasaddr = le32toh(req->SASAddress.Low);
2731 	sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
2732 
2733 	if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
2734 	    MPI2_IOCSTATUS_SUCCESS ||
2735 	    rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
2736 		mps_dprint(sc, MPS_XINFO, "%s: IOCStatus %04x SASStatus %02x\n",
2737 		    __func__, le16toh(rpl->IOCStatus), rpl->SASStatus);
2738 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2739 		goto bailout;
2740 	}
2741 
2742 	mps_dprint(sc, MPS_XINFO, "%s: SMP request to SAS address "
2743 		   "%#jx completed successfully\n", __func__,
2744 		   (uintmax_t)sasaddr);
2745 
2746 	if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
2747 		ccb->ccb_h.status = CAM_REQ_CMP;
2748 	else
2749 		ccb->ccb_h.status = CAM_SMP_STATUS_ERROR;
2750 
2751 bailout:
2752 	/*
2753 	 * We sync in both directions because we had DMAs in the S/G list
2754 	 * in both directions.
2755 	 */
2756 	bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
2757 			BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2758 	bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2759 	mps_free_command(sc, cm);
2760 	xpt_done(ccb);
2761 }
2762 
2763 static void
2764 mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb, uint64_t sasaddr)
2765 {
2766 	struct mps_command *cm;
2767 	uint8_t *request, *response;
2768 	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2769 	struct mps_softc *sc;
2770 	struct sglist *sg;
2771 	int error;
2772 
2773 	sc = sassc->sc;
2774 	sg = NULL;
2775 	error = 0;
2776 
2777 	/*
2778 	 * XXX We don't yet support physical addresses here.
2779 	 */
2780 	switch ((ccb->ccb_h.flags & CAM_DATA_MASK)) {
2781 	case CAM_DATA_PADDR:
2782 	case CAM_DATA_SG_PADDR:
2783 		mps_dprint(sc, MPS_ERROR,
2784 			   "%s: physical addresses not supported\n", __func__);
2785 		ccb->ccb_h.status = CAM_REQ_INVALID;
2786 		xpt_done(ccb);
2787 		return;
2788 	case CAM_DATA_SG:
2789 		/*
2790 		 * The chip does not support more than one buffer for the
2791 		 * request or response.
2792 		 */
2793 	 	if ((ccb->smpio.smp_request_sglist_cnt > 1)
2794 		  || (ccb->smpio.smp_response_sglist_cnt > 1)) {
2795 			mps_dprint(sc, MPS_ERROR,
2796 				   "%s: multiple request or response "
2797 				   "buffer segments not supported for SMP\n",
2798 				   __func__);
2799 			ccb->ccb_h.status = CAM_REQ_INVALID;
2800 			xpt_done(ccb);
2801 			return;
2802 		}
2803 
2804 		/*
2805 		 * The CAM_SCATTER_VALID flag was originally implemented
2806 		 * for the XPT_SCSI_IO CCB, which only has one data pointer.
2807 		 * We have two.  So, just take that flag to mean that we
2808 		 * might have S/G lists, and look at the S/G segment count
2809 		 * to figure out whether that is the case for each individual
2810 		 * buffer.
2811 		 */
2812 		if (ccb->smpio.smp_request_sglist_cnt != 0) {
2813 			bus_dma_segment_t *req_sg;
2814 
2815 			req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
2816 			request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
2817 		} else
2818 			request = ccb->smpio.smp_request;
2819 
2820 		if (ccb->smpio.smp_response_sglist_cnt != 0) {
2821 			bus_dma_segment_t *rsp_sg;
2822 
2823 			rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
2824 			response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
2825 		} else
2826 			response = ccb->smpio.smp_response;
2827 		break;
2828 	case CAM_DATA_VADDR:
2829 		request = ccb->smpio.smp_request;
2830 		response = ccb->smpio.smp_response;
2831 		break;
2832 	default:
2833 		ccb->ccb_h.status = CAM_REQ_INVALID;
2834 		xpt_done(ccb);
2835 		return;
2836 	}
2837 
2838 	cm = mps_alloc_command(sc);
2839 	if (cm == NULL) {
2840 		mps_dprint(sc, MPS_ERROR,
2841 		    "%s: cannot allocate command\n", __func__);
2842 		ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2843 		xpt_done(ccb);
2844 		return;
2845 	}
2846 
2847 	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2848 	bzero(req, sizeof(*req));
2849 	req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
2850 
2851 	/* Allow the chip to use any route to this SAS address. */
2852 	req->PhysicalPort = 0xff;
2853 
2854 	req->RequestDataLength = htole16(ccb->smpio.smp_request_len);
2855 	req->SGLFlags =
2856 	    MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
2857 
2858 	mps_dprint(sc, MPS_XINFO, "%s: sending SMP request to SAS "
2859 	    "address %#jx\n", __func__, (uintmax_t)sasaddr);
2860 
2861 	mpi_init_sge(cm, req, &req->SGL);
2862 
2863 	/*
2864 	 * Set up a uio to pass into mps_map_command().  This allows us to
2865 	 * do one map command, and one busdma call in there.
2866 	 */
2867 	cm->cm_uio.uio_iov = cm->cm_iovec;
2868 	cm->cm_uio.uio_iovcnt = 2;
2869 	cm->cm_uio.uio_segflg = UIO_SYSSPACE;
2870 
2871 	/*
2872 	 * The read/write flag isn't used by busdma, but set it just in
2873 	 * case.  This isn't exactly accurate, either, since we're going in
2874 	 * both directions.
2875 	 */
2876 	cm->cm_uio.uio_rw = UIO_WRITE;
2877 
2878 	cm->cm_iovec[0].iov_base = request;
2879 	cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength);
2880 	cm->cm_iovec[1].iov_base = response;
2881 	cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
2882 
2883 	cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
2884 			       cm->cm_iovec[1].iov_len;
2885 
2886 	/*
2887 	 * Trigger a warning message in mps_data_cb() for the user if we
2888 	 * wind up exceeding two S/G segments.  The chip expects one
2889 	 * segment for the request and another for the response.
2890 	 */
2891 	cm->cm_max_segs = 2;
2892 
2893 	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
2894 	cm->cm_complete = mpssas_smpio_complete;
2895 	cm->cm_complete_data = ccb;
2896 
2897 	/*
2898 	 * Tell the mapping code that we're using a uio, and that this is
2899 	 * an SMP passthrough request.  There is a little special-case
2900 	 * logic there (in mps_data_cb()) to handle the bidirectional
2901 	 * transfer.
2902 	 */
2903 	cm->cm_flags |= MPS_CM_FLAGS_USE_UIO | MPS_CM_FLAGS_SMP_PASS |
2904 			MPS_CM_FLAGS_DATAIN | MPS_CM_FLAGS_DATAOUT;
2905 
2906 	/* The chip data format is little endian. */
2907 	req->SASAddress.High = htole32(sasaddr >> 32);
2908 	req->SASAddress.Low = htole32(sasaddr);
2909 
2910 	/*
2911 	 * XXX Note that we don't have a timeout/abort mechanism here.
2912 	 * From the manual, it looks like task management requests only
2913 	 * work for SCSI IO and SATA passthrough requests.  We may need to
2914 	 * have a mechanism to retry requests in the event of a chip reset
2915 	 * at least.  Hopefully the chip will insure that any errors short
2916 	 * of that are relayed back to the driver.
2917 	 */
2918 	error = mps_map_command(sc, cm);
2919 	if ((error != 0) && (error != EINPROGRESS)) {
2920 		mps_dprint(sc, MPS_ERROR,
2921 			   "%s: error %d returned from mps_map_command()\n",
2922 			   __func__, error);
2923 		goto bailout_error;
2924 	}
2925 
2926 	return;
2927 
2928 bailout_error:
2929 	mps_free_command(sc, cm);
2930 	ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2931 	xpt_done(ccb);
2932 	return;
2933 
2934 }
2935 
2936 static void
2937 mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb)
2938 {
2939 	struct mps_softc *sc;
2940 	struct mpssas_target *targ;
2941 	uint64_t sasaddr = 0;
2942 
2943 	sc = sassc->sc;
2944 
2945 	/*
2946 	 * Make sure the target exists.
2947 	 */
2948 	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
2949 	    ("Target %d out of bounds in XPT_SMP_IO\n", ccb->ccb_h.target_id));
2950 	targ = &sassc->targets[ccb->ccb_h.target_id];
2951 	if (targ->handle == 0x0) {
2952 		mps_dprint(sc, MPS_ERROR,
2953 			   "%s: target %d does not exist!\n", __func__,
2954 			   ccb->ccb_h.target_id);
2955 		ccb->ccb_h.status = CAM_SEL_TIMEOUT;
2956 		xpt_done(ccb);
2957 		return;
2958 	}
2959 
2960 	/*
2961 	 * If this device has an embedded SMP target, we'll talk to it
2962 	 * directly.
2963 	 * figure out what the expander's address is.
2964 	 */
2965 	if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
2966 		sasaddr = targ->sasaddr;
2967 
2968 	/*
2969 	 * If we don't have a SAS address for the expander yet, try
2970 	 * grabbing it from the page 0x83 information cached in the
2971 	 * transport layer for this target.  LSI expanders report the
2972 	 * expander SAS address as the port-associated SAS address in
2973 	 * Inquiry VPD page 0x83.  Maxim expanders don't report it in page
2974 	 * 0x83.
2975 	 *
2976 	 * XXX KDM disable this for now, but leave it commented out so that
2977 	 * it is obvious that this is another possible way to get the SAS
2978 	 * address.
2979 	 *
2980 	 * The parent handle method below is a little more reliable, and
2981 	 * the other benefit is that it works for devices other than SES
2982 	 * devices.  So you can send a SMP request to a da(4) device and it
2983 	 * will get routed to the expander that device is attached to.
2984 	 * (Assuming the da(4) device doesn't contain an SMP target...)
2985 	 */
2986 #if 0
2987 	if (sasaddr == 0)
2988 		sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
2989 #endif
2990 
2991 	/*
2992 	 * If we still don't have a SAS address for the expander, look for
2993 	 * the parent device of this device, which is probably the expander.
2994 	 */
2995 	if (sasaddr == 0) {
2996 #ifdef OLD_MPS_PROBE
2997 		struct mpssas_target *parent_target;
2998 #endif
2999 
3000 		if (targ->parent_handle == 0x0) {
3001 			mps_dprint(sc, MPS_ERROR,
3002 				   "%s: handle %d does not have a valid "
3003 				   "parent handle!\n", __func__, targ->handle);
3004 			ccb->ccb_h.status = CAM_REQ_INVALID;
3005 			goto bailout;
3006 		}
3007 #ifdef OLD_MPS_PROBE
3008 		parent_target = mpssas_find_target_by_handle(sassc, 0,
3009 			targ->parent_handle);
3010 
3011 		if (parent_target == NULL) {
3012 			mps_dprint(sc, MPS_ERROR,
3013 				   "%s: handle %d does not have a valid "
3014 				   "parent target!\n", __func__, targ->handle);
3015 			ccb->ccb_h.status = CAM_REQ_INVALID;
3016 			goto bailout;
3017 		}
3018 
3019 		if ((parent_target->devinfo &
3020 		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3021 			mps_dprint(sc, MPS_ERROR,
3022 				   "%s: handle %d parent %d does not "
3023 				   "have an SMP target!\n", __func__,
3024 				   targ->handle, parent_target->handle);
3025 			ccb->ccb_h.status = CAM_REQ_INVALID;
3026 			goto bailout;
3027 
3028 		}
3029 
3030 		sasaddr = parent_target->sasaddr;
3031 #else /* OLD_MPS_PROBE */
3032 		if ((targ->parent_devinfo &
3033 		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3034 			mps_dprint(sc, MPS_ERROR,
3035 				   "%s: handle %d parent %d does not "
3036 				   "have an SMP target!\n", __func__,
3037 				   targ->handle, targ->parent_handle);
3038 			ccb->ccb_h.status = CAM_REQ_INVALID;
3039 			goto bailout;
3040 
3041 		}
3042 		if (targ->parent_sasaddr == 0x0) {
3043 			mps_dprint(sc, MPS_ERROR,
3044 				   "%s: handle %d parent handle %d does "
3045 				   "not have a valid SAS address!\n",
3046 				   __func__, targ->handle, targ->parent_handle);
3047 			ccb->ccb_h.status = CAM_REQ_INVALID;
3048 			goto bailout;
3049 		}
3050 
3051 		sasaddr = targ->parent_sasaddr;
3052 #endif /* OLD_MPS_PROBE */
3053 
3054 	}
3055 
3056 	if (sasaddr == 0) {
3057 		mps_dprint(sc, MPS_INFO,
3058 			   "%s: unable to find SAS address for handle %d\n",
3059 			   __func__, targ->handle);
3060 		ccb->ccb_h.status = CAM_REQ_INVALID;
3061 		goto bailout;
3062 	}
3063 	mpssas_send_smpcmd(sassc, ccb, sasaddr);
3064 
3065 	return;
3066 
3067 bailout:
3068 	xpt_done(ccb);
3069 
3070 }
3071 #endif //__FreeBSD_version >= 900026
3072 
3073 static void
3074 mpssas_action_resetdev(struct mpssas_softc *sassc, union ccb *ccb)
3075 {
3076 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3077 	struct mps_softc *sc;
3078 	struct mps_command *tm;
3079 	struct mpssas_target *targ;
3080 
3081 	MPS_FUNCTRACE(sassc->sc);
3082 	mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
3083 
3084 	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
3085 	    ("Target %d out of bounds in XPT_RESET_DEV\n",
3086 	     ccb->ccb_h.target_id));
3087 	sc = sassc->sc;
3088 	tm = mps_alloc_command(sc);
3089 	if (tm == NULL) {
3090 		mps_dprint(sc, MPS_ERROR,
3091 		    "command alloc failure in mpssas_action_resetdev\n");
3092 		ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
3093 		xpt_done(ccb);
3094 		return;
3095 	}
3096 
3097 	targ = &sassc->targets[ccb->ccb_h.target_id];
3098 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3099 	req->DevHandle = htole16(targ->handle);
3100 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
3101 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3102 
3103 	/* SAS Hard Link Reset / SATA Link Reset */
3104 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3105 
3106 	tm->cm_data = NULL;
3107 	tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
3108 	tm->cm_complete = mpssas_resetdev_complete;
3109 	tm->cm_complete_data = ccb;
3110 	tm->cm_targ = targ;
3111 	mps_map_command(sc, tm);
3112 }
3113 
3114 static void
3115 mpssas_resetdev_complete(struct mps_softc *sc, struct mps_command *tm)
3116 {
3117 	MPI2_SCSI_TASK_MANAGE_REPLY *resp;
3118 	union ccb *ccb;
3119 
3120 	MPS_FUNCTRACE(sc);
3121 	mtx_assert(&sc->mps_mtx, MA_OWNED);
3122 
3123 	resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
3124 	ccb = tm->cm_complete_data;
3125 
3126 	/*
3127 	 * Currently there should be no way we can hit this case.  It only
3128 	 * happens when we have a failure to allocate chain frames, and
3129 	 * task management commands don't have S/G lists.
3130 	 */
3131 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3132 		MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3133 
3134 		req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3135 
3136 		mps_dprint(sc, MPS_ERROR,
3137 			   "%s: cm_flags = %#x for reset of handle %#04x! "
3138 			   "This should not happen!\n", __func__, tm->cm_flags,
3139 			   req->DevHandle);
3140 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
3141 		goto bailout;
3142 	}
3143 
3144 	mps_dprint(sc, MPS_XINFO,
3145 	    "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n", __func__,
3146 	    le16toh(resp->IOCStatus), le32toh(resp->ResponseCode));
3147 
3148 	if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
3149 		ccb->ccb_h.status = CAM_REQ_CMP;
3150 		mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
3151 		    CAM_LUN_WILDCARD);
3152 	}
3153 	else
3154 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
3155 
3156 bailout:
3157 
3158 	mpssas_free_tm(sc, tm);
3159 	xpt_done(ccb);
3160 }
3161 
3162 static void
3163 mpssas_poll(struct cam_sim *sim)
3164 {
3165 	struct mpssas_softc *sassc;
3166 
3167 	sassc = cam_sim_softc(sim);
3168 
3169 	if (sassc->sc->mps_debug & MPS_TRACE) {
3170 		/* frequent debug messages during a panic just slow
3171 		 * everything down too much.
3172 		 */
3173 		mps_printf(sassc->sc, "%s clearing MPS_TRACE\n", __func__);
3174 		sassc->sc->mps_debug &= ~MPS_TRACE;
3175 	}
3176 
3177 	mps_intr_locked(sassc->sc);
3178 }
3179 
3180 static void
3181 mpssas_async(void *callback_arg, uint32_t code, struct cam_path *path,
3182 	     void *arg)
3183 {
3184 	struct mps_softc *sc;
3185 
3186 	sc = (struct mps_softc *)callback_arg;
3187 
3188 	switch (code) {
3189 #if (__FreeBSD_version >= 1000006) || \
3190     ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
3191 	case AC_ADVINFO_CHANGED: {
3192 		struct mpssas_target *target;
3193 		struct mpssas_softc *sassc;
3194 		struct scsi_read_capacity_data_long rcap_buf;
3195 		struct ccb_dev_advinfo cdai;
3196 		struct mpssas_lun *lun;
3197 		lun_id_t lunid;
3198 		int found_lun;
3199 		uintptr_t buftype;
3200 
3201 		buftype = (uintptr_t)arg;
3202 
3203 		found_lun = 0;
3204 		sassc = sc->sassc;
3205 
3206 		/*
3207 		 * We're only interested in read capacity data changes.
3208 		 */
3209 		if (buftype != CDAI_TYPE_RCAPLONG)
3210 			break;
3211 
3212 		/*
3213 		 * We should have a handle for this, but check to make sure.
3214 		 */
3215 		KASSERT(xpt_path_target_id(path) < sassc->maxtargets,
3216 		    ("Target %d out of bounds in mpssas_async\n",
3217 		    xpt_path_target_id(path)));
3218 		target = &sassc->targets[xpt_path_target_id(path)];
3219 		if (target->handle == 0)
3220 			break;
3221 
3222 		lunid = xpt_path_lun_id(path);
3223 
3224 		SLIST_FOREACH(lun, &target->luns, lun_link) {
3225 			if (lun->lun_id == lunid) {
3226 				found_lun = 1;
3227 				break;
3228 			}
3229 		}
3230 
3231 		if (found_lun == 0) {
3232 			lun = malloc(sizeof(struct mpssas_lun), M_MPT2,
3233 				     M_NOWAIT | M_ZERO);
3234 			if (lun == NULL) {
3235 				mps_dprint(sc, MPS_ERROR, "Unable to alloc "
3236 					   "LUN for EEDP support.\n");
3237 				break;
3238 			}
3239 			lun->lun_id = lunid;
3240 			SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3241 		}
3242 
3243 		bzero(&rcap_buf, sizeof(rcap_buf));
3244 		xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
3245 		cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
3246 		cdai.ccb_h.flags = CAM_DIR_IN;
3247 		cdai.buftype = CDAI_TYPE_RCAPLONG;
3248 		cdai.flags = 0;
3249 		cdai.bufsiz = sizeof(rcap_buf);
3250 		cdai.buf = (uint8_t *)&rcap_buf;
3251 		xpt_action((union ccb *)&cdai);
3252 		if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
3253 			cam_release_devq(cdai.ccb_h.path,
3254 					 0, 0, 0, FALSE);
3255 
3256 		if (((cdai.ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP)
3257 		 && (rcap_buf.prot & SRC16_PROT_EN)) {
3258 			lun->eedp_formatted = TRUE;
3259 			lun->eedp_block_size = scsi_4btoul(rcap_buf.length);
3260 		} else {
3261 			lun->eedp_formatted = FALSE;
3262 			lun->eedp_block_size = 0;
3263 		}
3264 		break;
3265 	}
3266 #else
3267 	case AC_FOUND_DEVICE: {
3268 		struct ccb_getdev *cgd;
3269 
3270 		cgd = arg;
3271 		mpssas_check_eedp(sc, path, cgd);
3272 		break;
3273 	}
3274 #endif
3275 	default:
3276 		break;
3277 	}
3278 }
3279 
3280 #if (__FreeBSD_version < 901503) || \
3281     ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3282 static void
3283 mpssas_check_eedp(struct mps_softc *sc, struct cam_path *path,
3284 		  struct ccb_getdev *cgd)
3285 {
3286 	struct mpssas_softc *sassc = sc->sassc;
3287 	struct ccb_scsiio *csio;
3288 	struct scsi_read_capacity_16 *scsi_cmd;
3289 	struct scsi_read_capacity_eedp *rcap_buf;
3290 	path_id_t pathid;
3291 	target_id_t targetid;
3292 	lun_id_t lunid;
3293 	union ccb *ccb;
3294 	struct cam_path *local_path;
3295 	struct mpssas_target *target;
3296 	struct mpssas_lun *lun;
3297 	uint8_t	found_lun;
3298 	char path_str[64];
3299 
3300 	sassc = sc->sassc;
3301 	pathid = cam_sim_path(sassc->sim);
3302 	targetid = xpt_path_target_id(path);
3303 	lunid = xpt_path_lun_id(path);
3304 
3305 	KASSERT(targetid < sassc->maxtargets,
3306 	    ("Target %d out of bounds in mpssas_check_eedp\n",
3307 	     targetid));
3308 	target = &sassc->targets[targetid];
3309 	if (target->handle == 0x0)
3310 		return;
3311 
3312 	/*
3313 	 * Determine if the device is EEDP capable.
3314 	 *
3315 	 * If this flag is set in the inquiry data,
3316 	 * the device supports protection information,
3317 	 * and must support the 16 byte read
3318 	 * capacity command, otherwise continue without
3319 	 * sending read cap 16
3320 	 */
3321 	if ((cgd->inq_data.spc3_flags & SPC3_SID_PROTECT) == 0)
3322 		return;
3323 
3324 	/*
3325 	 * Issue a READ CAPACITY 16 command.  This info
3326 	 * is used to determine if the LUN is formatted
3327 	 * for EEDP support.
3328 	 */
3329 	ccb = xpt_alloc_ccb_nowait();
3330 	if (ccb == NULL) {
3331 		mps_dprint(sc, MPS_ERROR, "Unable to alloc CCB "
3332 		    "for EEDP support.\n");
3333 		return;
3334 	}
3335 
3336 	if (xpt_create_path(&local_path, xpt_periph,
3337 	    pathid, targetid, lunid) != CAM_REQ_CMP) {
3338 		mps_dprint(sc, MPS_ERROR, "Unable to create "
3339 		    "path for EEDP support\n");
3340 		xpt_free_ccb(ccb);
3341 		return;
3342 	}
3343 
3344 	/*
3345 	 * If LUN is already in list, don't create a new
3346 	 * one.
3347 	 */
3348 	found_lun = FALSE;
3349 	SLIST_FOREACH(lun, &target->luns, lun_link) {
3350 		if (lun->lun_id == lunid) {
3351 			found_lun = TRUE;
3352 			break;
3353 		}
3354 	}
3355 	if (!found_lun) {
3356 		lun = malloc(sizeof(struct mpssas_lun), M_MPT2,
3357 		    M_NOWAIT | M_ZERO);
3358 		if (lun == NULL) {
3359 			mps_dprint(sc, MPS_ERROR,
3360 			    "Unable to alloc LUN for EEDP support.\n");
3361 			xpt_free_path(local_path);
3362 			xpt_free_ccb(ccb);
3363 			return;
3364 		}
3365 		lun->lun_id = lunid;
3366 		SLIST_INSERT_HEAD(&target->luns, lun,
3367 		    lun_link);
3368 	}
3369 
3370 	xpt_path_string(local_path, path_str, sizeof(path_str));
3371 	mps_dprint(sc, MPS_INFO, "Sending read cap: path %s handle %d\n",
3372 	    path_str, target->handle);
3373 
3374 	/*
3375 	 * Issue a READ CAPACITY 16 command for the LUN.
3376 	 * The mpssas_read_cap_done function will load
3377 	 * the read cap info into the LUN struct.
3378 	 */
3379 	rcap_buf = malloc(sizeof(struct scsi_read_capacity_eedp),
3380 	    M_MPT2, M_NOWAIT | M_ZERO);
3381 	if (rcap_buf == NULL) {
3382 		mps_dprint(sc, MPS_FAULT,
3383 		    "Unable to alloc read capacity buffer for EEDP support.\n");
3384 		xpt_free_path(ccb->ccb_h.path);
3385 		xpt_free_ccb(ccb);
3386 		return;
3387 	}
3388 	xpt_setup_ccb(&ccb->ccb_h, local_path, CAM_PRIORITY_XPT);
3389 	csio = &ccb->csio;
3390 	csio->ccb_h.func_code = XPT_SCSI_IO;
3391 	csio->ccb_h.flags = CAM_DIR_IN;
3392 	csio->ccb_h.retry_count = 4;
3393 	csio->ccb_h.cbfcnp = mpssas_read_cap_done;
3394 	csio->ccb_h.timeout = 60000;
3395 	csio->data_ptr = (uint8_t *)rcap_buf;
3396 	csio->dxfer_len = sizeof(struct scsi_read_capacity_eedp);
3397 	csio->sense_len = MPS_SENSE_LEN;
3398 	csio->cdb_len = sizeof(*scsi_cmd);
3399 	csio->tag_action = MSG_SIMPLE_Q_TAG;
3400 
3401 	scsi_cmd = (struct scsi_read_capacity_16 *)&csio->cdb_io.cdb_bytes;
3402 	bzero(scsi_cmd, sizeof(*scsi_cmd));
3403 	scsi_cmd->opcode = 0x9E;
3404 	scsi_cmd->service_action = SRC16_SERVICE_ACTION;
3405 	((uint8_t *)scsi_cmd)[13] = sizeof(struct scsi_read_capacity_eedp);
3406 
3407 	ccb->ccb_h.ppriv_ptr1 = sassc;
3408 	xpt_action(ccb);
3409 }
3410 
3411 static void
3412 mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb)
3413 {
3414 	struct mpssas_softc *sassc;
3415 	struct mpssas_target *target;
3416 	struct mpssas_lun *lun;
3417 	struct scsi_read_capacity_eedp *rcap_buf;
3418 
3419 	if (done_ccb == NULL)
3420 		return;
3421 
3422 	/* Driver need to release devq, it Scsi command is
3423 	 * generated by driver internally.
3424 	 * Currently there is a single place where driver
3425 	 * calls scsi command internally. In future if driver
3426 	 * calls more scsi command internally, it needs to release
3427 	 * devq internally, since those command will not go back to
3428 	 * cam_periph.
3429 	 */
3430 	if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) ) {
3431         	done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
3432 		xpt_release_devq(done_ccb->ccb_h.path,
3433 			       	/*count*/ 1, /*run_queue*/TRUE);
3434 	}
3435 
3436 	rcap_buf = (struct scsi_read_capacity_eedp *)done_ccb->csio.data_ptr;
3437 
3438 	/*
3439 	 * Get the LUN ID for the path and look it up in the LUN list for the
3440 	 * target.
3441 	 */
3442 	sassc = (struct mpssas_softc *)done_ccb->ccb_h.ppriv_ptr1;
3443 	KASSERT(done_ccb->ccb_h.target_id < sassc->maxtargets,
3444 	    ("Target %d out of bounds in mpssas_read_cap_done\n",
3445 	     done_ccb->ccb_h.target_id));
3446 	target = &sassc->targets[done_ccb->ccb_h.target_id];
3447 	SLIST_FOREACH(lun, &target->luns, lun_link) {
3448 		if (lun->lun_id != done_ccb->ccb_h.target_lun)
3449 			continue;
3450 
3451 		/*
3452 		 * Got the LUN in the target's LUN list.  Fill it in
3453 		 * with EEDP info.  If the READ CAP 16 command had some
3454 		 * SCSI error (common if command is not supported), mark
3455 		 * the lun as not supporting EEDP and set the block size
3456 		 * to 0.
3457 		 */
3458 		if (((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)
3459 		 || (done_ccb->csio.scsi_status != SCSI_STATUS_OK)) {
3460 			lun->eedp_formatted = FALSE;
3461 			lun->eedp_block_size = 0;
3462 			break;
3463 		}
3464 
3465 		if (rcap_buf->protect & 0x01) {
3466 			mps_dprint(sassc->sc, MPS_INFO, "LUN %d for "
3467  			    "target ID %d is formatted for EEDP "
3468  			    "support.\n", done_ccb->ccb_h.target_lun,
3469  			    done_ccb->ccb_h.target_id);
3470 			lun->eedp_formatted = TRUE;
3471 			lun->eedp_block_size = scsi_4btoul(rcap_buf->length);
3472 		}
3473 		break;
3474 	}
3475 
3476 	// Finished with this CCB and path.
3477 	free(rcap_buf, M_MPT2);
3478 	xpt_free_path(done_ccb->ccb_h.path);
3479 	xpt_free_ccb(done_ccb);
3480 }
3481 #endif /* (__FreeBSD_version < 901503) || \
3482           ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) */
3483 
3484 int
3485 mpssas_startup(struct mps_softc *sc)
3486 {
3487 
3488 	/*
3489 	 * Send the port enable message and set the wait_for_port_enable flag.
3490 	 * This flag helps to keep the simq frozen until all discovery events
3491 	 * are processed.
3492 	 */
3493 	sc->wait_for_port_enable = 1;
3494 	mpssas_send_portenable(sc);
3495 	return (0);
3496 }
3497 
3498 static int
3499 mpssas_send_portenable(struct mps_softc *sc)
3500 {
3501 	MPI2_PORT_ENABLE_REQUEST *request;
3502 	struct mps_command *cm;
3503 
3504 	MPS_FUNCTRACE(sc);
3505 
3506 	if ((cm = mps_alloc_command(sc)) == NULL)
3507 		return (EBUSY);
3508 	request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3509 	request->Function = MPI2_FUNCTION_PORT_ENABLE;
3510 	request->MsgFlags = 0;
3511 	request->VP_ID = 0;
3512 	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3513 	cm->cm_complete = mpssas_portenable_complete;
3514 	cm->cm_data = NULL;
3515 	cm->cm_sge = NULL;
3516 
3517 	mps_map_command(sc, cm);
3518 	mps_dprint(sc, MPS_XINFO,
3519 	    "mps_send_portenable finished cm %p req %p complete %p\n",
3520 	    cm, cm->cm_req, cm->cm_complete);
3521 	return (0);
3522 }
3523 
3524 static void
3525 mpssas_portenable_complete(struct mps_softc *sc, struct mps_command *cm)
3526 {
3527 	MPI2_PORT_ENABLE_REPLY *reply;
3528 	struct mpssas_softc *sassc;
3529 
3530 	MPS_FUNCTRACE(sc);
3531 	sassc = sc->sassc;
3532 
3533 	/*
3534 	 * Currently there should be no way we can hit this case.  It only
3535 	 * happens when we have a failure to allocate chain frames, and
3536 	 * port enable commands don't have S/G lists.
3537 	 */
3538 	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3539 		mps_dprint(sc, MPS_ERROR, "%s: cm_flags = %#x for port enable! "
3540 			   "This should not happen!\n", __func__, cm->cm_flags);
3541 	}
3542 
3543 	reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3544 	if (reply == NULL)
3545 		mps_dprint(sc, MPS_FAULT, "Portenable NULL reply\n");
3546 	else if (le16toh(reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
3547 	    MPI2_IOCSTATUS_SUCCESS)
3548 		mps_dprint(sc, MPS_FAULT, "Portenable failed\n");
3549 
3550 	mps_free_command(sc, cm);
3551 	if (sc->mps_ich.ich_arg != NULL) {
3552 		mps_dprint(sc, MPS_XINFO, "disestablish config intrhook\n");
3553 		config_intrhook_disestablish(&sc->mps_ich);
3554 		sc->mps_ich.ich_arg = NULL;
3555 	}
3556 
3557 	/*
3558 	 * Get WarpDrive info after discovery is complete but before the scan
3559 	 * starts.  At this point, all devices are ready to be exposed to the
3560 	 * OS.  If devices should be hidden instead, take them out of the
3561 	 * 'targets' array before the scan.  The devinfo for a disk will have
3562 	 * some info and a volume's will be 0.  Use that to remove disks.
3563 	 */
3564 	mps_wd_config_pages(sc);
3565 
3566 	/*
3567 	 * Done waiting for port enable to complete.  Decrement the refcount.
3568 	 * If refcount is 0, discovery is complete and a rescan of the bus can
3569 	 * take place.  Since the simq was explicitly frozen before port
3570 	 * enable, it must be explicitly released here to keep the
3571 	 * freeze/release count in sync.
3572 	 */
3573 	sc->wait_for_port_enable = 0;
3574 	sc->port_enable_complete = 1;
3575 	wakeup(&sc->port_enable_complete);
3576 	mpssas_startup_decrement(sassc);
3577 }
3578 
3579 int
3580 mpssas_check_id(struct mpssas_softc *sassc, int id)
3581 {
3582 	struct mps_softc *sc = sassc->sc;
3583 	char *ids;
3584 	char *name;
3585 
3586 	ids = &sc->exclude_ids[0];
3587 	while((name = strsep(&ids, ",")) != NULL) {
3588 		if (name[0] == '\0')
3589 			continue;
3590 		if (strtol(name, NULL, 0) == (long)id)
3591 			return (1);
3592 	}
3593 
3594 	return (0);
3595 }
3596