xref: /freebsd/sys/dev/mps/mps_sas.c (revision 38f0b757fd84d17d0fc24739a7cda160c4516d81)
1 /*-
2  * Copyright (c) 2009 Yahoo! Inc.
3  * Copyright (c) 2011, 2012 LSI Corp.
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  * LSI MPT-Fusion Host Adapter FreeBSD
28  *
29  * $FreeBSD$
30  */
31 
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 /* Communications core for LSI MPT2 */
36 
37 /* TODO Move headers to mpsvar */
38 #include <sys/types.h>
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/kernel.h>
42 #include <sys/selinfo.h>
43 #include <sys/module.h>
44 #include <sys/bus.h>
45 #include <sys/conf.h>
46 #include <sys/bio.h>
47 #include <sys/malloc.h>
48 #include <sys/uio.h>
49 #include <sys/sysctl.h>
50 #include <sys/endian.h>
51 #include <sys/queue.h>
52 #include <sys/kthread.h>
53 #include <sys/taskqueue.h>
54 #include <sys/sbuf.h>
55 
56 #include <machine/bus.h>
57 #include <machine/resource.h>
58 #include <sys/rman.h>
59 
60 #include <machine/stdarg.h>
61 
62 #include <cam/cam.h>
63 #include <cam/cam_ccb.h>
64 #include <cam/cam_xpt.h>
65 #include <cam/cam_debug.h>
66 #include <cam/cam_sim.h>
67 #include <cam/cam_xpt_sim.h>
68 #include <cam/cam_xpt_periph.h>
69 #include <cam/cam_periph.h>
70 #include <cam/scsi/scsi_all.h>
71 #include <cam/scsi/scsi_message.h>
72 #if __FreeBSD_version >= 900026
73 #include <cam/scsi/smp_all.h>
74 #endif
75 
76 #include <dev/mps/mpi/mpi2_type.h>
77 #include <dev/mps/mpi/mpi2.h>
78 #include <dev/mps/mpi/mpi2_ioc.h>
79 #include <dev/mps/mpi/mpi2_sas.h>
80 #include <dev/mps/mpi/mpi2_cnfg.h>
81 #include <dev/mps/mpi/mpi2_init.h>
82 #include <dev/mps/mpi/mpi2_tool.h>
83 #include <dev/mps/mps_ioctl.h>
84 #include <dev/mps/mpsvar.h>
85 #include <dev/mps/mps_table.h>
86 #include <dev/mps/mps_sas.h>
87 
88 #define MPSSAS_DISCOVERY_TIMEOUT	20
89 #define MPSSAS_MAX_DISCOVERY_TIMEOUTS	10 /* 200 seconds */
90 
91 /*
92  * static array to check SCSI OpCode for EEDP protection bits
93  */
94 #define	PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
95 #define	PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
96 #define	PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
97 static uint8_t op_code_prot[256] = {
98 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
99 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
100 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
101 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
102 	0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
103 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
104 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
105 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
106 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
107 	0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
108 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
109 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
110 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
111 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
112 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
113 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
114 };
115 
116 MALLOC_DEFINE(M_MPSSAS, "MPSSAS", "MPS SAS memory");
117 
118 static void mpssas_discovery_timeout(void *data);
119 static void mpssas_remove_device(struct mps_softc *, struct mps_command *);
120 static void mpssas_remove_complete(struct mps_softc *, struct mps_command *);
121 static void mpssas_action(struct cam_sim *sim, union ccb *ccb);
122 static void mpssas_poll(struct cam_sim *sim);
123 static void mpssas_scsiio_timeout(void *data);
124 static void mpssas_abort_complete(struct mps_softc *sc, struct mps_command *cm);
125 static void mpssas_direct_drive_io(struct mpssas_softc *sassc,
126     struct mps_command *cm, union ccb *ccb);
127 static void mpssas_action_scsiio(struct mpssas_softc *, union ccb *);
128 static void mpssas_scsiio_complete(struct mps_softc *, struct mps_command *);
129 static void mpssas_action_resetdev(struct mpssas_softc *, union ccb *);
130 #if __FreeBSD_version >= 900026
131 static void mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm);
132 static void mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb,
133 			       uint64_t sasaddr);
134 static void mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb);
135 #endif //FreeBSD_version >= 900026
136 static void mpssas_resetdev_complete(struct mps_softc *, struct mps_command *);
137 static int  mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm);
138 static int  mpssas_send_reset(struct mps_softc *sc, struct mps_command *tm, uint8_t type);
139 static void mpssas_async(void *callback_arg, uint32_t code,
140 			 struct cam_path *path, void *arg);
141 #if (__FreeBSD_version < 901503) || \
142     ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
143 static void mpssas_check_eedp(struct mps_softc *sc, struct cam_path *path,
144 			      struct ccb_getdev *cgd);
145 static void mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb);
146 #endif
147 static int mpssas_send_portenable(struct mps_softc *sc);
148 static void mpssas_portenable_complete(struct mps_softc *sc,
149     struct mps_command *cm);
150 
151 struct mpssas_target *
152 mpssas_find_target_by_handle(struct mpssas_softc *sassc, int start, uint16_t handle)
153 {
154 	struct mpssas_target *target;
155 	int i;
156 
157 	for (i = start; i < sassc->maxtargets; i++) {
158 		target = &sassc->targets[i];
159 		if (target->handle == handle)
160 			return (target);
161 	}
162 
163 	return (NULL);
164 }
165 
166 /* we need to freeze the simq during attach and diag reset, to avoid failing
167  * commands before device handles have been found by discovery.  Since
168  * discovery involves reading config pages and possibly sending commands,
169  * discovery actions may continue even after we receive the end of discovery
170  * event, so refcount discovery actions instead of assuming we can unfreeze
171  * the simq when we get the event.
172  */
173 void
174 mpssas_startup_increment(struct mpssas_softc *sassc)
175 {
176 	MPS_FUNCTRACE(sassc->sc);
177 
178 	if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
179 		if (sassc->startup_refcount++ == 0) {
180 			/* just starting, freeze the simq */
181 			mps_dprint(sassc->sc, MPS_INIT,
182 			    "%s freezing simq\n", __func__);
183 #if __FreeBSD_version >= 1000039
184 			xpt_hold_boot();
185 #endif
186 			xpt_freeze_simq(sassc->sim, 1);
187 		}
188 		mps_dprint(sassc->sc, MPS_INIT, "%s refcount %u\n", __func__,
189 		    sassc->startup_refcount);
190 	}
191 }
192 
193 void
194 mpssas_startup_decrement(struct mpssas_softc *sassc)
195 {
196 	MPS_FUNCTRACE(sassc->sc);
197 
198 	if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
199 		if (--sassc->startup_refcount == 0) {
200 			/* finished all discovery-related actions, release
201 			 * the simq and rescan for the latest topology.
202 			 */
203 			mps_dprint(sassc->sc, MPS_INIT,
204 			    "%s releasing simq\n", __func__);
205 			sassc->flags &= ~MPSSAS_IN_STARTUP;
206 			xpt_release_simq(sassc->sim, 1);
207 #if __FreeBSD_version >= 1000039
208 			xpt_release_boot();
209 #else
210 			mpssas_rescan_target(sassc->sc, NULL);
211 #endif
212 		}
213 		mps_dprint(sassc->sc, MPS_INIT, "%s refcount %u\n", __func__,
214 		    sassc->startup_refcount);
215 	}
216 }
217 
218 /* LSI's firmware requires us to stop sending commands when we're doing task
219  * management, so refcount the TMs and keep the simq frozen when any are in
220  * use.
221  */
222 struct mps_command *
223 mpssas_alloc_tm(struct mps_softc *sc)
224 {
225 	struct mps_command *tm;
226 
227 	MPS_FUNCTRACE(sc);
228 	tm = mps_alloc_high_priority_command(sc);
229 	if (tm != NULL) {
230 		if (sc->sassc->tm_count++ == 0) {
231 			mps_dprint(sc, MPS_RECOVERY,
232 			    "%s freezing simq\n", __func__);
233 			xpt_freeze_simq(sc->sassc->sim, 1);
234 		}
235 		mps_dprint(sc, MPS_RECOVERY, "%s tm_count %u\n", __func__,
236 		    sc->sassc->tm_count);
237 	}
238 	return tm;
239 }
240 
241 void
242 mpssas_free_tm(struct mps_softc *sc, struct mps_command *tm)
243 {
244 	mps_dprint(sc, MPS_TRACE, "%s", __func__);
245 	if (tm == NULL)
246 		return;
247 
248 	/* if there are no TMs in use, we can release the simq.  We use our
249 	 * own refcount so that it's easier for a diag reset to cleanup and
250 	 * release the simq.
251 	 */
252 	if (--sc->sassc->tm_count == 0) {
253 		mps_dprint(sc, MPS_RECOVERY, "%s releasing simq\n", __func__);
254 		xpt_release_simq(sc->sassc->sim, 1);
255 	}
256 	mps_dprint(sc, MPS_RECOVERY, "%s tm_count %u\n", __func__,
257 	    sc->sassc->tm_count);
258 
259 	mps_free_high_priority_command(sc, tm);
260 }
261 
262 void
263 mpssas_rescan_target(struct mps_softc *sc, struct mpssas_target *targ)
264 {
265 	struct mpssas_softc *sassc = sc->sassc;
266 	path_id_t pathid;
267 	target_id_t targetid;
268 	union ccb *ccb;
269 
270 	MPS_FUNCTRACE(sc);
271 	pathid = cam_sim_path(sassc->sim);
272 	if (targ == NULL)
273 		targetid = CAM_TARGET_WILDCARD;
274 	else
275 		targetid = targ - sassc->targets;
276 
277 	/*
278 	 * Allocate a CCB and schedule a rescan.
279 	 */
280 	ccb = xpt_alloc_ccb_nowait();
281 	if (ccb == NULL) {
282 		mps_dprint(sc, MPS_ERROR, "unable to alloc CCB for rescan\n");
283 		return;
284 	}
285 
286 	if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid,
287 	    targetid, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
288 		mps_dprint(sc, MPS_ERROR, "unable to create path for rescan\n");
289 		xpt_free_ccb(ccb);
290 		return;
291 	}
292 
293 	if (targetid == CAM_TARGET_WILDCARD)
294 		ccb->ccb_h.func_code = XPT_SCAN_BUS;
295 	else
296 		ccb->ccb_h.func_code = XPT_SCAN_TGT;
297 
298 	mps_dprint(sc, MPS_TRACE, "%s targetid %u\n", __func__, targetid);
299 	xpt_rescan(ccb);
300 }
301 
302 static void
303 mpssas_log_command(struct mps_command *cm, u_int level, const char *fmt, ...)
304 {
305 	struct sbuf sb;
306 	va_list ap;
307 	char str[192];
308 	char path_str[64];
309 
310 	if (cm == NULL)
311 		return;
312 
313 	/* No need to be in here if debugging isn't enabled */
314 	if ((cm->cm_sc->mps_debug & level) == 0)
315 		return;
316 
317 	sbuf_new(&sb, str, sizeof(str), 0);
318 
319 	va_start(ap, fmt);
320 
321 	if (cm->cm_ccb != NULL) {
322 		xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
323 				sizeof(path_str));
324 		sbuf_cat(&sb, path_str);
325 		if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
326 			scsi_command_string(&cm->cm_ccb->csio, &sb);
327 			sbuf_printf(&sb, "length %d ",
328 				    cm->cm_ccb->csio.dxfer_len);
329 		}
330 	}
331 	else {
332 		sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
333 		    cam_sim_name(cm->cm_sc->sassc->sim),
334 		    cam_sim_unit(cm->cm_sc->sassc->sim),
335 		    cam_sim_bus(cm->cm_sc->sassc->sim),
336 		    cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
337 		    cm->cm_lun);
338 	}
339 
340 	sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
341 	sbuf_vprintf(&sb, fmt, ap);
342 	sbuf_finish(&sb);
343 	mps_dprint_field(cm->cm_sc, level, "%s", sbuf_data(&sb));
344 
345 	va_end(ap);
346 }
347 
348 
349 static void
350 mpssas_remove_volume(struct mps_softc *sc, struct mps_command *tm)
351 {
352 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
353 	struct mpssas_target *targ;
354 	uint16_t handle;
355 
356 	MPS_FUNCTRACE(sc);
357 
358 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
359 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
360 	targ = tm->cm_targ;
361 
362 	if (reply == NULL) {
363 		/* XXX retry the remove after the diag reset completes? */
364 		mps_dprint(sc, MPS_FAULT,
365 		    "%s NULL reply reseting device 0x%04x\n", __func__, handle);
366 		mpssas_free_tm(sc, tm);
367 		return;
368 	}
369 
370 	if (reply->IOCStatus != MPI2_IOCSTATUS_SUCCESS) {
371 		mps_dprint(sc, MPS_FAULT,
372 		   "IOCStatus = 0x%x while resetting device 0x%x\n",
373 		   reply->IOCStatus, handle);
374 		mpssas_free_tm(sc, tm);
375 		return;
376 	}
377 
378 	mps_dprint(sc, MPS_XINFO,
379 	    "Reset aborted %u commands\n", reply->TerminationCount);
380 	mps_free_reply(sc, tm->cm_reply_data);
381 	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
382 
383 	mps_dprint(sc, MPS_XINFO,
384 	    "clearing target %u handle 0x%04x\n", targ->tid, handle);
385 
386 	/*
387 	 * Don't clear target if remove fails because things will get confusing.
388 	 * Leave the devname and sasaddr intact so that we know to avoid reusing
389 	 * this target id if possible, and so we can assign the same target id
390 	 * to this device if it comes back in the future.
391 	 */
392 	if (reply->IOCStatus == MPI2_IOCSTATUS_SUCCESS) {
393 		targ = tm->cm_targ;
394 		targ->handle = 0x0;
395 		targ->encl_handle = 0x0;
396 		targ->encl_slot = 0x0;
397 		targ->exp_dev_handle = 0x0;
398 		targ->phy_num = 0x0;
399 		targ->linkrate = 0x0;
400 		targ->devinfo = 0x0;
401 		targ->flags = 0x0;
402 	}
403 
404 	mpssas_free_tm(sc, tm);
405 }
406 
407 
408 /*
409  * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal.
410  * Otherwise Volume Delete is same as Bare Drive Removal.
411  */
412 void
413 mpssas_prepare_volume_remove(struct mpssas_softc *sassc, uint16_t handle)
414 {
415 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
416 	struct mps_softc *sc;
417 	struct mps_command *cm;
418 	struct mpssas_target *targ = NULL;
419 
420 	MPS_FUNCTRACE(sassc->sc);
421 	sc = sassc->sc;
422 
423 #ifdef WD_SUPPORT
424 	/*
425 	 * If this is a WD controller, determine if the disk should be exposed
426 	 * to the OS or not.  If disk should be exposed, return from this
427 	 * function without doing anything.
428 	 */
429 	if (sc->WD_available && (sc->WD_hide_expose ==
430 	    MPS_WD_EXPOSE_ALWAYS)) {
431 		return;
432 	}
433 #endif //WD_SUPPORT
434 
435 	targ = mpssas_find_target_by_handle(sassc, 0, handle);
436 	if (targ == NULL) {
437 		/* FIXME: what is the action? */
438 		/* We don't know about this device? */
439 		mps_dprint(sc, MPS_ERROR,
440 		   "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
441 		return;
442 	}
443 
444 	targ->flags |= MPSSAS_TARGET_INREMOVAL;
445 
446 	cm = mpssas_alloc_tm(sc);
447 	if (cm == NULL) {
448 		mps_dprint(sc, MPS_ERROR,
449 		    "%s: command alloc failure\n", __func__);
450 		return;
451 	}
452 
453 	mpssas_rescan_target(sc, targ);
454 
455 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
456 	req->DevHandle = targ->handle;
457 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
458 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
459 
460 	/* SAS Hard Link Reset / SATA Link Reset */
461 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
462 
463 	cm->cm_targ = targ;
464 	cm->cm_data = NULL;
465 	cm->cm_desc.HighPriority.RequestFlags =
466 	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
467 	cm->cm_complete = mpssas_remove_volume;
468 	cm->cm_complete_data = (void *)(uintptr_t)handle;
469 	mps_map_command(sc, cm);
470 }
471 
472 /*
473  * The MPT2 firmware performs debounce on the link to avoid transient link
474  * errors and false removals.  When it does decide that link has been lost
475  * and a device need to go away, it expects that the host will perform a
476  * target reset and then an op remove.  The reset has the side-effect of
477  * aborting any outstanding requests for the device, which is required for
478  * the op-remove to succeed.  It's not clear if the host should check for
479  * the device coming back alive after the reset.
480  */
481 void
482 mpssas_prepare_remove(struct mpssas_softc *sassc, uint16_t handle)
483 {
484 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
485 	struct mps_softc *sc;
486 	struct mps_command *cm;
487 	struct mpssas_target *targ = NULL;
488 
489 	MPS_FUNCTRACE(sassc->sc);
490 
491 	sc = sassc->sc;
492 
493 	targ = mpssas_find_target_by_handle(sassc, 0, handle);
494 	if (targ == NULL) {
495 		/* FIXME: what is the action? */
496 		/* We don't know about this device? */
497 		mps_dprint(sc, MPS_ERROR,
498 		    "%s : invalid handle 0x%x \n", __func__, handle);
499 		return;
500 	}
501 
502 	targ->flags |= MPSSAS_TARGET_INREMOVAL;
503 
504 	cm = mpssas_alloc_tm(sc);
505 	if (cm == NULL) {
506 		mps_dprint(sc, MPS_ERROR,
507 		    "%s: command alloc failure\n", __func__);
508 		return;
509 	}
510 
511 	mpssas_rescan_target(sc, targ);
512 
513 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
514 	memset(req, 0, sizeof(*req));
515 	req->DevHandle = htole16(targ->handle);
516 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
517 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
518 
519 	/* SAS Hard Link Reset / SATA Link Reset */
520 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
521 
522 	cm->cm_targ = targ;
523 	cm->cm_data = NULL;
524 	cm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
525 	cm->cm_complete = mpssas_remove_device;
526 	cm->cm_complete_data = (void *)(uintptr_t)handle;
527 	mps_map_command(sc, cm);
528 }
529 
530 static void
531 mpssas_remove_device(struct mps_softc *sc, struct mps_command *tm)
532 {
533 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
534 	MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
535 	struct mpssas_target *targ;
536 	struct mps_command *next_cm;
537 	uint16_t handle;
538 
539 	MPS_FUNCTRACE(sc);
540 
541 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
542 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
543 	targ = tm->cm_targ;
544 
545 	/*
546 	 * Currently there should be no way we can hit this case.  It only
547 	 * happens when we have a failure to allocate chain frames, and
548 	 * task management commands don't have S/G lists.
549 	 */
550 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
551 		mps_dprint(sc, MPS_ERROR,
552 		    "%s: cm_flags = %#x for remove of handle %#04x! "
553 		    "This should not happen!\n", __func__, tm->cm_flags,
554 		    handle);
555 		mpssas_free_tm(sc, tm);
556 		return;
557 	}
558 
559 	if (reply == NULL) {
560 		/* XXX retry the remove after the diag reset completes? */
561 		mps_dprint(sc, MPS_FAULT,
562 		    "%s NULL reply reseting device 0x%04x\n", __func__, handle);
563 		mpssas_free_tm(sc, tm);
564 		return;
565 	}
566 
567 	if (le16toh(reply->IOCStatus) != MPI2_IOCSTATUS_SUCCESS) {
568 		mps_dprint(sc, MPS_FAULT,
569 		   "IOCStatus = 0x%x while resetting device 0x%x\n",
570 		   le16toh(reply->IOCStatus), handle);
571 		mpssas_free_tm(sc, tm);
572 		return;
573 	}
574 
575 	mps_dprint(sc, MPS_XINFO, "Reset aborted %u commands\n",
576 	    le32toh(reply->TerminationCount));
577 	mps_free_reply(sc, tm->cm_reply_data);
578 	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
579 
580 	/* Reuse the existing command */
581 	req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
582 	memset(req, 0, sizeof(*req));
583 	req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
584 	req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
585 	req->DevHandle = htole16(handle);
586 	tm->cm_data = NULL;
587 	tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
588 	tm->cm_complete = mpssas_remove_complete;
589 	tm->cm_complete_data = (void *)(uintptr_t)handle;
590 
591 	mps_map_command(sc, tm);
592 
593 	mps_dprint(sc, MPS_XINFO, "clearing target %u handle 0x%04x\n",
594 		   targ->tid, handle);
595 	TAILQ_FOREACH_SAFE(tm, &targ->commands, cm_link, next_cm) {
596 		union ccb *ccb;
597 
598 		mps_dprint(sc, MPS_XINFO, "Completing missed command %p\n", tm);
599 		ccb = tm->cm_complete_data;
600 		ccb->ccb_h.status = CAM_DEV_NOT_THERE;
601 		mpssas_scsiio_complete(sc, tm);
602 	}
603 }
604 
605 static void
606 mpssas_remove_complete(struct mps_softc *sc, struct mps_command *tm)
607 {
608 	MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
609 	uint16_t handle;
610 	struct mpssas_target *targ;
611 	struct mpssas_lun *lun;
612 
613 	MPS_FUNCTRACE(sc);
614 
615 	reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
616 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
617 
618 	/*
619 	 * Currently there should be no way we can hit this case.  It only
620 	 * happens when we have a failure to allocate chain frames, and
621 	 * task management commands don't have S/G lists.
622 	 */
623 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
624 		mps_dprint(sc, MPS_XINFO,
625 			   "%s: cm_flags = %#x for remove of handle %#04x! "
626 			   "This should not happen!\n", __func__, tm->cm_flags,
627 			   handle);
628 		mpssas_free_tm(sc, tm);
629 		return;
630 	}
631 
632 	if (reply == NULL) {
633 		/* most likely a chip reset */
634 		mps_dprint(sc, MPS_FAULT,
635 		    "%s NULL reply removing device 0x%04x\n", __func__, handle);
636 		mpssas_free_tm(sc, tm);
637 		return;
638 	}
639 
640 	mps_dprint(sc, MPS_XINFO,
641 	    "%s on handle 0x%04x, IOCStatus= 0x%x\n", __func__,
642 	    handle, le16toh(reply->IOCStatus));
643 
644 	/*
645 	 * Don't clear target if remove fails because things will get confusing.
646 	 * Leave the devname and sasaddr intact so that we know to avoid reusing
647 	 * this target id if possible, and so we can assign the same target id
648 	 * to this device if it comes back in the future.
649 	 */
650 	if (le16toh(reply->IOCStatus) == MPI2_IOCSTATUS_SUCCESS) {
651 		targ = tm->cm_targ;
652 		targ->handle = 0x0;
653 		targ->encl_handle = 0x0;
654 		targ->encl_slot = 0x0;
655 		targ->exp_dev_handle = 0x0;
656 		targ->phy_num = 0x0;
657 		targ->linkrate = 0x0;
658 		targ->devinfo = 0x0;
659 		targ->flags = 0x0;
660 
661 		while(!SLIST_EMPTY(&targ->luns)) {
662 			lun = SLIST_FIRST(&targ->luns);
663 			SLIST_REMOVE_HEAD(&targ->luns, lun_link);
664 			free(lun, M_MPT2);
665 		}
666 	}
667 
668 
669 	mpssas_free_tm(sc, tm);
670 }
671 
672 static int
673 mpssas_register_events(struct mps_softc *sc)
674 {
675 	u32 events[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS];
676 
677 	bzero(events, 16);
678 	setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
679 	setbit(events, MPI2_EVENT_SAS_DISCOVERY);
680 	setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
681 	setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
682 	setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
683 	setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
684 	setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
685 	setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
686 	setbit(events, MPI2_EVENT_IR_VOLUME);
687 	setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
688 	setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
689 	setbit(events, MPI2_EVENT_LOG_ENTRY_ADDED);
690 
691 	mps_register_events(sc, events, mpssas_evt_handler, NULL,
692 	    &sc->sassc->mpssas_eh);
693 
694 	return (0);
695 }
696 
697 int
698 mps_attach_sas(struct mps_softc *sc)
699 {
700 	struct mpssas_softc *sassc;
701 	cam_status status;
702 	int unit, error = 0;
703 
704 	MPS_FUNCTRACE(sc);
705 
706 	sassc = malloc(sizeof(struct mpssas_softc), M_MPT2, M_WAITOK|M_ZERO);
707 	if(!sassc) {
708 		device_printf(sc->mps_dev, "Cannot allocate memory %s %d\n",
709 		__func__, __LINE__);
710 		return (ENOMEM);
711 	}
712 
713 	/*
714 	 * XXX MaxTargets could change during a reinit.  Since we don't
715 	 * resize the targets[] array during such an event, cache the value
716 	 * of MaxTargets here so that we don't get into trouble later.  This
717 	 * should move into the reinit logic.
718 	 */
719 	sassc->maxtargets = sc->facts->MaxTargets;
720 	sassc->targets = malloc(sizeof(struct mpssas_target) *
721 	    sassc->maxtargets, M_MPT2, M_WAITOK|M_ZERO);
722 	if(!sassc->targets) {
723 		device_printf(sc->mps_dev, "Cannot allocate memory %s %d\n",
724 		__func__, __LINE__);
725 		free(sassc, M_MPT2);
726 		return (ENOMEM);
727 	}
728 	sc->sassc = sassc;
729 	sassc->sc = sc;
730 
731 	if ((sassc->devq = cam_simq_alloc(sc->num_reqs)) == NULL) {
732 		mps_dprint(sc, MPS_ERROR, "Cannot allocate SIMQ\n");
733 		error = ENOMEM;
734 		goto out;
735 	}
736 
737 	unit = device_get_unit(sc->mps_dev);
738 	sassc->sim = cam_sim_alloc(mpssas_action, mpssas_poll, "mps", sassc,
739 	    unit, &sc->mps_mtx, sc->num_reqs, sc->num_reqs, sassc->devq);
740 	if (sassc->sim == NULL) {
741 		mps_dprint(sc, MPS_ERROR, "Cannot allocate SIM\n");
742 		error = EINVAL;
743 		goto out;
744 	}
745 
746 	TAILQ_INIT(&sassc->ev_queue);
747 
748 	/* Initialize taskqueue for Event Handling */
749 	TASK_INIT(&sassc->ev_task, 0, mpssas_firmware_event_work, sc);
750 	sassc->ev_tq = taskqueue_create("mps_taskq", M_NOWAIT | M_ZERO,
751 	    taskqueue_thread_enqueue, &sassc->ev_tq);
752 
753 	/* Run the task queue with lowest priority */
754 	taskqueue_start_threads(&sassc->ev_tq, 1, 255, "%s taskq",
755 	    device_get_nameunit(sc->mps_dev));
756 
757 	mps_lock(sc);
758 
759 	/*
760 	 * XXX There should be a bus for every port on the adapter, but since
761 	 * we're just going to fake the topology for now, we'll pretend that
762 	 * everything is just a target on a single bus.
763 	 */
764 	if ((error = xpt_bus_register(sassc->sim, sc->mps_dev, 0)) != 0) {
765 		mps_dprint(sc, MPS_ERROR, "Error %d registering SCSI bus\n",
766 		    error);
767 		mps_unlock(sc);
768 		goto out;
769 	}
770 
771 	/*
772 	 * Assume that discovery events will start right away.
773 	 *
774 	 * Hold off boot until discovery is complete.
775 	 */
776 	sassc->flags |= MPSSAS_IN_STARTUP | MPSSAS_IN_DISCOVERY;
777 	sc->sassc->startup_refcount = 0;
778 	mpssas_startup_increment(sassc);
779 
780 	callout_init(&sassc->discovery_callout, 1 /*mpsafe*/);
781 	sassc->discovery_timeouts = 0;
782 
783 	sassc->tm_count = 0;
784 
785 	/*
786 	 * Register for async events so we can determine the EEDP
787 	 * capabilities of devices.
788 	 */
789 	status = xpt_create_path(&sassc->path, /*periph*/NULL,
790 	    cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD,
791 	    CAM_LUN_WILDCARD);
792 	if (status != CAM_REQ_CMP) {
793 		mps_printf(sc, "Error %#x creating sim path\n", status);
794 		sassc->path = NULL;
795 	} else {
796 		int event;
797 
798 #if (__FreeBSD_version >= 1000006) || \
799     ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
800 		event = AC_ADVINFO_CHANGED;
801 #else
802 		event = AC_FOUND_DEVICE;
803 #endif
804 		status = xpt_register_async(event, mpssas_async, sc,
805 					    sassc->path);
806 		if (status != CAM_REQ_CMP) {
807 			mps_dprint(sc, MPS_ERROR,
808 			    "Error %#x registering async handler for "
809 			    "AC_ADVINFO_CHANGED events\n", status);
810 			xpt_free_path(sassc->path);
811 			sassc->path = NULL;
812 		}
813 	}
814 	if (status != CAM_REQ_CMP) {
815 		/*
816 		 * EEDP use is the exception, not the rule.
817 		 * Warn the user, but do not fail to attach.
818 		 */
819 		mps_printf(sc, "EEDP capabilities disabled.\n");
820 	}
821 
822 	mps_unlock(sc);
823 
824 	mpssas_register_events(sc);
825 out:
826 	if (error)
827 		mps_detach_sas(sc);
828 	return (error);
829 }
830 
831 int
832 mps_detach_sas(struct mps_softc *sc)
833 {
834 	struct mpssas_softc *sassc;
835 	struct mpssas_lun *lun, *lun_tmp;
836 	struct mpssas_target *targ;
837 	int i;
838 
839 	MPS_FUNCTRACE(sc);
840 
841 	if (sc->sassc == NULL)
842 		return (0);
843 
844 	sassc = sc->sassc;
845 	mps_deregister_events(sc, sassc->mpssas_eh);
846 
847 	/*
848 	 * Drain and free the event handling taskqueue with the lock
849 	 * unheld so that any parallel processing tasks drain properly
850 	 * without deadlocking.
851 	 */
852 	if (sassc->ev_tq != NULL)
853 		taskqueue_free(sassc->ev_tq);
854 
855 	/* Make sure CAM doesn't wedge if we had to bail out early. */
856 	mps_lock(sc);
857 
858 	/* Deregister our async handler */
859 	if (sassc->path != NULL) {
860 		xpt_register_async(0, mpssas_async, sc, sassc->path);
861 		xpt_free_path(sassc->path);
862 		sassc->path = NULL;
863 	}
864 
865 	if (sassc->flags & MPSSAS_IN_STARTUP)
866 		xpt_release_simq(sassc->sim, 1);
867 
868 	if (sassc->sim != NULL) {
869 		xpt_bus_deregister(cam_sim_path(sassc->sim));
870 		cam_sim_free(sassc->sim, FALSE);
871 	}
872 
873 	sassc->flags |= MPSSAS_SHUTDOWN;
874 	mps_unlock(sc);
875 
876 	if (sassc->devq != NULL)
877 		cam_simq_free(sassc->devq);
878 
879 	for(i=0; i< sassc->maxtargets ;i++) {
880 		targ = &sassc->targets[i];
881 		SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
882 			free(lun, M_MPT2);
883 		}
884 	}
885 	free(sassc->targets, M_MPT2);
886 	free(sassc, M_MPT2);
887 	sc->sassc = NULL;
888 
889 	return (0);
890 }
891 
892 void
893 mpssas_discovery_end(struct mpssas_softc *sassc)
894 {
895 	struct mps_softc *sc = sassc->sc;
896 
897 	MPS_FUNCTRACE(sc);
898 
899 	if (sassc->flags & MPSSAS_DISCOVERY_TIMEOUT_PENDING)
900 		callout_stop(&sassc->discovery_callout);
901 
902 }
903 
904 static void
905 mpssas_discovery_timeout(void *data)
906 {
907 	struct mpssas_softc *sassc = data;
908 	struct mps_softc *sc;
909 
910 	sc = sassc->sc;
911 	MPS_FUNCTRACE(sc);
912 
913 	mps_lock(sc);
914 	mps_dprint(sc, MPS_INFO,
915 	    "Timeout waiting for discovery, interrupts may not be working!\n");
916 	sassc->flags &= ~MPSSAS_DISCOVERY_TIMEOUT_PENDING;
917 
918 	/* Poll the hardware for events in case interrupts aren't working */
919 	mps_intr_locked(sc);
920 
921 	mps_dprint(sassc->sc, MPS_INFO,
922 	    "Finished polling after discovery timeout at %d\n", ticks);
923 
924 	if ((sassc->flags & MPSSAS_IN_DISCOVERY) == 0) {
925 		mpssas_discovery_end(sassc);
926 	} else {
927 		if (sassc->discovery_timeouts < MPSSAS_MAX_DISCOVERY_TIMEOUTS) {
928 			sassc->flags |= MPSSAS_DISCOVERY_TIMEOUT_PENDING;
929 			callout_reset(&sassc->discovery_callout,
930 			    MPSSAS_DISCOVERY_TIMEOUT * hz,
931 			    mpssas_discovery_timeout, sassc);
932 			sassc->discovery_timeouts++;
933 		} else {
934 			mps_dprint(sassc->sc, MPS_FAULT,
935 			    "Discovery timed out, continuing.\n");
936 			sassc->flags &= ~MPSSAS_IN_DISCOVERY;
937 			mpssas_discovery_end(sassc);
938 		}
939 	}
940 
941 	mps_unlock(sc);
942 }
943 
944 static void
945 mpssas_action(struct cam_sim *sim, union ccb *ccb)
946 {
947 	struct mpssas_softc *sassc;
948 
949 	sassc = cam_sim_softc(sim);
950 
951 	MPS_FUNCTRACE(sassc->sc);
952 	mps_dprint(sassc->sc, MPS_TRACE, "ccb func_code 0x%x\n",
953 	    ccb->ccb_h.func_code);
954 	mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
955 
956 	switch (ccb->ccb_h.func_code) {
957 	case XPT_PATH_INQ:
958 	{
959 		struct ccb_pathinq *cpi = &ccb->cpi;
960 
961 		cpi->version_num = 1;
962 		cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
963 		cpi->target_sprt = 0;
964 #if __FreeBSD_version >= 1000039
965 		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN;
966 #else
967 		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
968 #endif
969 		cpi->hba_eng_cnt = 0;
970 		cpi->max_target = sassc->maxtargets - 1;
971 		cpi->max_lun = 255;
972 		cpi->initiator_id = sassc->maxtargets - 1;
973 		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
974 		strncpy(cpi->hba_vid, "LSILogic", HBA_IDLEN);
975 		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
976 		cpi->unit_number = cam_sim_unit(sim);
977 		cpi->bus_id = cam_sim_bus(sim);
978 		cpi->base_transfer_speed = 150000;
979 		cpi->transport = XPORT_SAS;
980 		cpi->transport_version = 0;
981 		cpi->protocol = PROTO_SCSI;
982 		cpi->protocol_version = SCSI_REV_SPC;
983 #if __FreeBSD_version >= 800001
984 		/*
985 		 * XXX KDM where does this number come from?
986 		 */
987 		cpi->maxio = 256 * 1024;
988 #endif
989 		cpi->ccb_h.status = CAM_REQ_CMP;
990 		break;
991 	}
992 	case XPT_GET_TRAN_SETTINGS:
993 	{
994 		struct ccb_trans_settings	*cts;
995 		struct ccb_trans_settings_sas	*sas;
996 		struct ccb_trans_settings_scsi	*scsi;
997 		struct mpssas_target *targ;
998 
999 		cts = &ccb->cts;
1000 		sas = &cts->xport_specific.sas;
1001 		scsi = &cts->proto_specific.scsi;
1002 
1003 		KASSERT(cts->ccb_h.target_id < sassc->maxtargets,
1004 		    ("Target %d out of bounds in XPT_GET_TRANS_SETTINGS\n",
1005 		    cts->ccb_h.target_id));
1006 		targ = &sassc->targets[cts->ccb_h.target_id];
1007 		if (targ->handle == 0x0) {
1008 			cts->ccb_h.status = CAM_SEL_TIMEOUT;
1009 			break;
1010 		}
1011 
1012 		cts->protocol_version = SCSI_REV_SPC2;
1013 		cts->transport = XPORT_SAS;
1014 		cts->transport_version = 0;
1015 
1016 		sas->valid = CTS_SAS_VALID_SPEED;
1017 		switch (targ->linkrate) {
1018 		case 0x08:
1019 			sas->bitrate = 150000;
1020 			break;
1021 		case 0x09:
1022 			sas->bitrate = 300000;
1023 			break;
1024 		case 0x0a:
1025 			sas->bitrate = 600000;
1026 			break;
1027 		default:
1028 			sas->valid = 0;
1029 		}
1030 
1031 		cts->protocol = PROTO_SCSI;
1032 		scsi->valid = CTS_SCSI_VALID_TQ;
1033 		scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
1034 
1035 		cts->ccb_h.status = CAM_REQ_CMP;
1036 		break;
1037 	}
1038 	case XPT_CALC_GEOMETRY:
1039 		cam_calc_geometry(&ccb->ccg, /*extended*/1);
1040 		ccb->ccb_h.status = CAM_REQ_CMP;
1041 		break;
1042 	case XPT_RESET_DEV:
1043 		mps_dprint(sassc->sc, MPS_XINFO, "mpssas_action XPT_RESET_DEV\n");
1044 		mpssas_action_resetdev(sassc, ccb);
1045 		return;
1046 	case XPT_RESET_BUS:
1047 	case XPT_ABORT:
1048 	case XPT_TERM_IO:
1049 		mps_dprint(sassc->sc, MPS_XINFO,
1050 		    "mpssas_action faking success for abort or reset\n");
1051 		ccb->ccb_h.status = CAM_REQ_CMP;
1052 		break;
1053 	case XPT_SCSI_IO:
1054 		mpssas_action_scsiio(sassc, ccb);
1055 		return;
1056 #if __FreeBSD_version >= 900026
1057 	case XPT_SMP_IO:
1058 		mpssas_action_smpio(sassc, ccb);
1059 		return;
1060 #endif
1061 	default:
1062 		ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
1063 		break;
1064 	}
1065 	xpt_done(ccb);
1066 
1067 }
1068 
1069 static void
1070 mpssas_announce_reset(struct mps_softc *sc, uint32_t ac_code,
1071     target_id_t target_id, lun_id_t lun_id)
1072 {
1073 	path_id_t path_id = cam_sim_path(sc->sassc->sim);
1074 	struct cam_path *path;
1075 
1076 	mps_dprint(sc, MPS_XINFO, "%s code %x target %d lun %jx\n", __func__,
1077 	    ac_code, target_id, (uintmax_t)lun_id);
1078 
1079 	if (xpt_create_path(&path, NULL,
1080 		path_id, target_id, lun_id) != CAM_REQ_CMP) {
1081 		mps_dprint(sc, MPS_ERROR, "unable to create path for reset "
1082 			   "notification\n");
1083 		return;
1084 	}
1085 
1086 	xpt_async(ac_code, path, NULL);
1087 	xpt_free_path(path);
1088 }
1089 
1090 static void
1091 mpssas_complete_all_commands(struct mps_softc *sc)
1092 {
1093 	struct mps_command *cm;
1094 	int i;
1095 	int completed;
1096 
1097 	MPS_FUNCTRACE(sc);
1098 	mtx_assert(&sc->mps_mtx, MA_OWNED);
1099 
1100 	/* complete all commands with a NULL reply */
1101 	for (i = 1; i < sc->num_reqs; i++) {
1102 		cm = &sc->commands[i];
1103 		cm->cm_reply = NULL;
1104 		completed = 0;
1105 
1106 		if (cm->cm_flags & MPS_CM_FLAGS_POLLED)
1107 			cm->cm_flags |= MPS_CM_FLAGS_COMPLETE;
1108 
1109 		if (cm->cm_complete != NULL) {
1110 			mpssas_log_command(cm, MPS_RECOVERY,
1111 			    "completing cm %p state %x ccb %p for diag reset\n",
1112 			    cm, cm->cm_state, cm->cm_ccb);
1113 
1114 			cm->cm_complete(sc, cm);
1115 			completed = 1;
1116 		}
1117 
1118 		if (cm->cm_flags & MPS_CM_FLAGS_WAKEUP) {
1119 			mpssas_log_command(cm, MPS_RECOVERY,
1120 			    "waking up cm %p state %x ccb %p for diag reset\n",
1121 			    cm, cm->cm_state, cm->cm_ccb);
1122 			wakeup(cm);
1123 			completed = 1;
1124 		}
1125 
1126 		if ((completed == 0) && (cm->cm_state != MPS_CM_STATE_FREE)) {
1127 			/* this should never happen, but if it does, log */
1128 			mpssas_log_command(cm, MPS_RECOVERY,
1129 			    "cm %p state %x flags 0x%x ccb %p during diag "
1130 			    "reset\n", cm, cm->cm_state, cm->cm_flags,
1131 			    cm->cm_ccb);
1132 		}
1133 	}
1134 }
1135 
1136 void
1137 mpssas_handle_reinit(struct mps_softc *sc)
1138 {
1139 	int i;
1140 
1141 	/* Go back into startup mode and freeze the simq, so that CAM
1142 	 * doesn't send any commands until after we've rediscovered all
1143 	 * targets and found the proper device handles for them.
1144 	 *
1145 	 * After the reset, portenable will trigger discovery, and after all
1146 	 * discovery-related activities have finished, the simq will be
1147 	 * released.
1148 	 */
1149 	mps_dprint(sc, MPS_INIT, "%s startup\n", __func__);
1150 	sc->sassc->flags |= MPSSAS_IN_STARTUP;
1151 	sc->sassc->flags |= MPSSAS_IN_DISCOVERY;
1152 	mpssas_startup_increment(sc->sassc);
1153 
1154 	/* notify CAM of a bus reset */
1155 	mpssas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1156 	    CAM_LUN_WILDCARD);
1157 
1158 	/* complete and cleanup after all outstanding commands */
1159 	mpssas_complete_all_commands(sc);
1160 
1161 	mps_dprint(sc, MPS_INIT,
1162 	    "%s startup %u tm %u after command completion\n",
1163 	    __func__, sc->sassc->startup_refcount, sc->sassc->tm_count);
1164 
1165 	/* zero all the target handles, since they may change after the
1166 	 * reset, and we have to rediscover all the targets and use the new
1167 	 * handles.
1168 	 */
1169 	for (i = 0; i < sc->sassc->maxtargets; i++) {
1170 		if (sc->sassc->targets[i].outstanding != 0)
1171 			mps_dprint(sc, MPS_INIT, "target %u outstanding %u\n",
1172 			    i, sc->sassc->targets[i].outstanding);
1173 		sc->sassc->targets[i].handle = 0x0;
1174 		sc->sassc->targets[i].exp_dev_handle = 0x0;
1175 		sc->sassc->targets[i].outstanding = 0;
1176 		sc->sassc->targets[i].flags = MPSSAS_TARGET_INDIAGRESET;
1177 	}
1178 }
1179 
1180 static void
1181 mpssas_tm_timeout(void *data)
1182 {
1183 	struct mps_command *tm = data;
1184 	struct mps_softc *sc = tm->cm_sc;
1185 
1186 	mtx_assert(&sc->mps_mtx, MA_OWNED);
1187 
1188 	mpssas_log_command(tm, MPS_INFO|MPS_RECOVERY,
1189 	    "task mgmt %p timed out\n", tm);
1190 	mps_reinit(sc);
1191 }
1192 
1193 static void
1194 mpssas_logical_unit_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1195 {
1196 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1197 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1198 	unsigned int cm_count = 0;
1199 	struct mps_command *cm;
1200 	struct mpssas_target *targ;
1201 
1202 	callout_stop(&tm->cm_callout);
1203 
1204 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1205 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1206 	targ = tm->cm_targ;
1207 
1208 	/*
1209 	 * Currently there should be no way we can hit this case.  It only
1210 	 * happens when we have a failure to allocate chain frames, and
1211 	 * task management commands don't have S/G lists.
1212 	 * XXXSL So should it be an assertion?
1213 	 */
1214 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1215 		mps_dprint(sc, MPS_ERROR, "%s: cm_flags = %#x for LUN reset! "
1216 			   "This should not happen!\n", __func__, tm->cm_flags);
1217 		mpssas_free_tm(sc, tm);
1218 		return;
1219 	}
1220 
1221 	if (reply == NULL) {
1222 		mpssas_log_command(tm, MPS_RECOVERY,
1223 		    "NULL reset reply for tm %p\n", tm);
1224 		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1225 			/* this completion was due to a reset, just cleanup */
1226 			targ->flags &= ~MPSSAS_TARGET_INRESET;
1227 			targ->tm = NULL;
1228 			mpssas_free_tm(sc, tm);
1229 		}
1230 		else {
1231 			/* we should have gotten a reply. */
1232 			mps_reinit(sc);
1233 		}
1234 		return;
1235 	}
1236 
1237 	mpssas_log_command(tm, MPS_RECOVERY,
1238 	    "logical unit reset status 0x%x code 0x%x count %u\n",
1239 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1240 	    le32toh(reply->TerminationCount));
1241 
1242 	/* See if there are any outstanding commands for this LUN.
1243 	 * This could be made more efficient by using a per-LU data
1244 	 * structure of some sort.
1245 	 */
1246 	TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1247 		if (cm->cm_lun == tm->cm_lun)
1248 			cm_count++;
1249 	}
1250 
1251 	if (cm_count == 0) {
1252 		mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1253 		    "logical unit %u finished recovery after reset\n",
1254 		    tm->cm_lun, tm);
1255 
1256 		mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1257 		    tm->cm_lun);
1258 
1259 		/* we've finished recovery for this logical unit.  check and
1260 		 * see if some other logical unit has a timedout command
1261 		 * that needs to be processed.
1262 		 */
1263 		cm = TAILQ_FIRST(&targ->timedout_commands);
1264 		if (cm) {
1265 			mpssas_send_abort(sc, tm, cm);
1266 		}
1267 		else {
1268 			targ->tm = NULL;
1269 			mpssas_free_tm(sc, tm);
1270 		}
1271 	}
1272 	else {
1273 		/* if we still have commands for this LUN, the reset
1274 		 * effectively failed, regardless of the status reported.
1275 		 * Escalate to a target reset.
1276 		 */
1277 		mpssas_log_command(tm, MPS_RECOVERY,
1278 		    "logical unit reset complete for tm %p, but still have %u command(s)\n",
1279 		    tm, cm_count);
1280 		mpssas_send_reset(sc, tm,
1281 		    MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1282 	}
1283 }
1284 
1285 static void
1286 mpssas_target_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1287 {
1288 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1289 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1290 	struct mpssas_target *targ;
1291 
1292 	callout_stop(&tm->cm_callout);
1293 
1294 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1295 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1296 	targ = tm->cm_targ;
1297 
1298 	/*
1299 	 * Currently there should be no way we can hit this case.  It only
1300 	 * happens when we have a failure to allocate chain frames, and
1301 	 * task management commands don't have S/G lists.
1302 	 */
1303 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1304 		mps_dprint(sc, MPS_ERROR,"%s: cm_flags = %#x for target reset! "
1305 			   "This should not happen!\n", __func__, tm->cm_flags);
1306 		mpssas_free_tm(sc, tm);
1307 		return;
1308 	}
1309 
1310 	if (reply == NULL) {
1311 		mpssas_log_command(tm, MPS_RECOVERY,
1312 		    "NULL reset reply for tm %p\n", tm);
1313 		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1314 			/* this completion was due to a reset, just cleanup */
1315 			targ->flags &= ~MPSSAS_TARGET_INRESET;
1316 			targ->tm = NULL;
1317 			mpssas_free_tm(sc, tm);
1318 		}
1319 		else {
1320 			/* we should have gotten a reply. */
1321 			mps_reinit(sc);
1322 		}
1323 		return;
1324 	}
1325 
1326 	mpssas_log_command(tm, MPS_RECOVERY,
1327 	    "target reset status 0x%x code 0x%x count %u\n",
1328 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1329 	    le32toh(reply->TerminationCount));
1330 
1331 	targ->flags &= ~MPSSAS_TARGET_INRESET;
1332 
1333 	if (targ->outstanding == 0) {
1334 		/* we've finished recovery for this target and all
1335 		 * of its logical units.
1336 		 */
1337 		mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1338 		    "recovery finished after target reset\n");
1339 
1340 		mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1341 		    CAM_LUN_WILDCARD);
1342 
1343 		targ->tm = NULL;
1344 		mpssas_free_tm(sc, tm);
1345 	}
1346 	else {
1347 		/* after a target reset, if this target still has
1348 		 * outstanding commands, the reset effectively failed,
1349 		 * regardless of the status reported.  escalate.
1350 		 */
1351 		mpssas_log_command(tm, MPS_RECOVERY,
1352 		    "target reset complete for tm %p, but still have %u command(s)\n",
1353 		    tm, targ->outstanding);
1354 		mps_reinit(sc);
1355 	}
1356 }
1357 
1358 #define MPS_RESET_TIMEOUT 30
1359 
1360 static int
1361 mpssas_send_reset(struct mps_softc *sc, struct mps_command *tm, uint8_t type)
1362 {
1363 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1364 	struct mpssas_target *target;
1365 	int err;
1366 
1367 	target = tm->cm_targ;
1368 	if (target->handle == 0) {
1369 		mps_dprint(sc, MPS_ERROR,"%s null devhandle for target_id %d\n",
1370 		    __func__, target->tid);
1371 		return -1;
1372 	}
1373 
1374 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1375 	req->DevHandle = htole16(target->handle);
1376 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1377 	req->TaskType = type;
1378 
1379 	if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1380 		/* XXX Need to handle invalid LUNs */
1381 		MPS_SET_LUN(req->LUN, tm->cm_lun);
1382 		tm->cm_targ->logical_unit_resets++;
1383 		mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1384 		    "sending logical unit reset\n");
1385 		tm->cm_complete = mpssas_logical_unit_reset_complete;
1386 	}
1387 	else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1388 		/* Target reset method =  SAS Hard Link Reset / SATA Link Reset */
1389 		req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1390 		tm->cm_targ->target_resets++;
1391 		tm->cm_targ->flags |= MPSSAS_TARGET_INRESET;
1392 		mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1393 		    "sending target reset\n");
1394 		tm->cm_complete = mpssas_target_reset_complete;
1395 	}
1396 	else {
1397 		mps_dprint(sc, MPS_ERROR, "unexpected reset type 0x%x\n", type);
1398 		return -1;
1399 	}
1400 
1401 	tm->cm_data = NULL;
1402 	tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1403 	tm->cm_complete_data = (void *)tm;
1404 
1405 	callout_reset(&tm->cm_callout, MPS_RESET_TIMEOUT * hz,
1406 	    mpssas_tm_timeout, tm);
1407 
1408 	err = mps_map_command(sc, tm);
1409 	if (err)
1410 		mpssas_log_command(tm, MPS_RECOVERY,
1411 		    "error %d sending reset type %u\n",
1412 		    err, type);
1413 
1414 	return err;
1415 }
1416 
1417 
1418 static void
1419 mpssas_abort_complete(struct mps_softc *sc, struct mps_command *tm)
1420 {
1421 	struct mps_command *cm;
1422 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1423 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1424 	struct mpssas_target *targ;
1425 
1426 	callout_stop(&tm->cm_callout);
1427 
1428 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1429 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1430 	targ = tm->cm_targ;
1431 
1432 	/*
1433 	 * Currently there should be no way we can hit this case.  It only
1434 	 * happens when we have a failure to allocate chain frames, and
1435 	 * task management commands don't have S/G lists.
1436 	 */
1437 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1438 		mpssas_log_command(tm, MPS_RECOVERY,
1439 		    "cm_flags = %#x for abort %p TaskMID %u!\n",
1440 		    tm->cm_flags, tm, le16toh(req->TaskMID));
1441 		mpssas_free_tm(sc, tm);
1442 		return;
1443 	}
1444 
1445 	if (reply == NULL) {
1446 		mpssas_log_command(tm, MPS_RECOVERY,
1447 		    "NULL abort reply for tm %p TaskMID %u\n",
1448 		    tm, le16toh(req->TaskMID));
1449 		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1450 			/* this completion was due to a reset, just cleanup */
1451 			targ->tm = NULL;
1452 			mpssas_free_tm(sc, tm);
1453 		}
1454 		else {
1455 			/* we should have gotten a reply. */
1456 			mps_reinit(sc);
1457 		}
1458 		return;
1459 	}
1460 
1461 	mpssas_log_command(tm, MPS_RECOVERY,
1462 	    "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1463 	    le16toh(req->TaskMID),
1464 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1465 	    le32toh(reply->TerminationCount));
1466 
1467 	cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1468 	if (cm == NULL) {
1469 		/* if there are no more timedout commands, we're done with
1470 		 * error recovery for this target.
1471 		 */
1472 		mpssas_log_command(tm, MPS_RECOVERY,
1473 		    "finished recovery after aborting TaskMID %u\n",
1474 		    le16toh(req->TaskMID));
1475 
1476 		targ->tm = NULL;
1477 		mpssas_free_tm(sc, tm);
1478 	}
1479 	else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) {
1480 		/* abort success, but we have more timedout commands to abort */
1481 		mpssas_log_command(tm, MPS_RECOVERY,
1482 		    "continuing recovery after aborting TaskMID %u\n",
1483 		    le16toh(req->TaskMID));
1484 
1485 		mpssas_send_abort(sc, tm, cm);
1486 	}
1487 	else {
1488 		/* we didn't get a command completion, so the abort
1489 		 * failed as far as we're concerned.  escalate.
1490 		 */
1491 		mpssas_log_command(tm, MPS_RECOVERY,
1492 		    "abort failed for TaskMID %u tm %p\n",
1493 		    le16toh(req->TaskMID), tm);
1494 
1495 		mpssas_send_reset(sc, tm,
1496 		    MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1497 	}
1498 }
1499 
1500 #define MPS_ABORT_TIMEOUT 5
1501 
1502 static int
1503 mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm)
1504 {
1505 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1506 	struct mpssas_target *targ;
1507 	int err;
1508 
1509 	targ = cm->cm_targ;
1510 	if (targ->handle == 0) {
1511 		mps_dprint(sc, MPS_ERROR,"%s null devhandle for target_id %d\n",
1512 		    __func__, cm->cm_ccb->ccb_h.target_id);
1513 		return -1;
1514 	}
1515 
1516 	mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1517 	    "Aborting command %p\n", cm);
1518 
1519 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1520 	req->DevHandle = htole16(targ->handle);
1521 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1522 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1523 
1524 	/* XXX Need to handle invalid LUNs */
1525 	MPS_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1526 
1527 	req->TaskMID = htole16(cm->cm_desc.Default.SMID);
1528 
1529 	tm->cm_data = NULL;
1530 	tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1531 	tm->cm_complete = mpssas_abort_complete;
1532 	tm->cm_complete_data = (void *)tm;
1533 	tm->cm_targ = cm->cm_targ;
1534 	tm->cm_lun = cm->cm_lun;
1535 
1536 	callout_reset(&tm->cm_callout, MPS_ABORT_TIMEOUT * hz,
1537 	    mpssas_tm_timeout, tm);
1538 
1539 	targ->aborts++;
1540 
1541 	err = mps_map_command(sc, tm);
1542 	if (err)
1543 		mpssas_log_command(tm, MPS_RECOVERY,
1544 		    "error %d sending abort for cm %p SMID %u\n",
1545 		    err, cm, req->TaskMID);
1546 	return err;
1547 }
1548 
1549 
1550 static void
1551 mpssas_scsiio_timeout(void *data)
1552 {
1553 	struct mps_softc *sc;
1554 	struct mps_command *cm;
1555 	struct mpssas_target *targ;
1556 
1557 	cm = (struct mps_command *)data;
1558 	sc = cm->cm_sc;
1559 
1560 	MPS_FUNCTRACE(sc);
1561 	mtx_assert(&sc->mps_mtx, MA_OWNED);
1562 
1563 	mps_dprint(sc, MPS_XINFO, "Timeout checking cm %p\n", sc);
1564 
1565 	/*
1566 	 * Run the interrupt handler to make sure it's not pending.  This
1567 	 * isn't perfect because the command could have already completed
1568 	 * and been re-used, though this is unlikely.
1569 	 */
1570 	mps_intr_locked(sc);
1571 	if (cm->cm_state == MPS_CM_STATE_FREE) {
1572 		mpssas_log_command(cm, MPS_XINFO,
1573 		    "SCSI command %p almost timed out\n", cm);
1574 		return;
1575 	}
1576 
1577 	if (cm->cm_ccb == NULL) {
1578 		mps_dprint(sc, MPS_ERROR, "command timeout with NULL ccb\n");
1579 		return;
1580 	}
1581 
1582 	mpssas_log_command(cm, MPS_INFO, "command timeout cm %p ccb %p\n",
1583 	    cm, cm->cm_ccb);
1584 
1585 	targ = cm->cm_targ;
1586 	targ->timeouts++;
1587 
1588 	/* XXX first, check the firmware state, to see if it's still
1589 	 * operational.  if not, do a diag reset.
1590 	 */
1591 
1592 	cm->cm_ccb->ccb_h.status = CAM_CMD_TIMEOUT;
1593 	cm->cm_state = MPS_CM_STATE_TIMEDOUT;
1594 	TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1595 
1596 	if (targ->tm != NULL) {
1597 		/* target already in recovery, just queue up another
1598 		 * timedout command to be processed later.
1599 		 */
1600 		mps_dprint(sc, MPS_RECOVERY,
1601 		    "queued timedout cm %p for processing by tm %p\n",
1602 		    cm, targ->tm);
1603 	}
1604 	else if ((targ->tm = mpssas_alloc_tm(sc)) != NULL) {
1605 		mps_dprint(sc, MPS_RECOVERY, "timedout cm %p allocated tm %p\n",
1606 		    cm, targ->tm);
1607 
1608 		/* start recovery by aborting the first timedout command */
1609 		mpssas_send_abort(sc, targ->tm, cm);
1610 	}
1611 	else {
1612 		/* XXX queue this target up for recovery once a TM becomes
1613 		 * available.  The firmware only has a limited number of
1614 		 * HighPriority credits for the high priority requests used
1615 		 * for task management, and we ran out.
1616 		 *
1617 		 * Isilon: don't worry about this for now, since we have
1618 		 * more credits than disks in an enclosure, and limit
1619 		 * ourselves to one TM per target for recovery.
1620 		 */
1621 		mps_dprint(sc, MPS_RECOVERY,
1622 		    "timedout cm %p failed to allocate a tm\n", cm);
1623 	}
1624 
1625 }
1626 
1627 static void
1628 mpssas_action_scsiio(struct mpssas_softc *sassc, union ccb *ccb)
1629 {
1630 	MPI2_SCSI_IO_REQUEST *req;
1631 	struct ccb_scsiio *csio;
1632 	struct mps_softc *sc;
1633 	struct mpssas_target *targ;
1634 	struct mpssas_lun *lun;
1635 	struct mps_command *cm;
1636 	uint8_t i, lba_byte, *ref_tag_addr;
1637 	uint16_t eedp_flags;
1638 	uint32_t mpi_control;
1639 
1640 	sc = sassc->sc;
1641 	MPS_FUNCTRACE(sc);
1642 	mtx_assert(&sc->mps_mtx, MA_OWNED);
1643 
1644 	csio = &ccb->csio;
1645 	KASSERT(csio->ccb_h.target_id < sassc->maxtargets,
1646 	    ("Target %d out of bounds in XPT_SCSI_IO\n",
1647 	     csio->ccb_h.target_id));
1648 	targ = &sassc->targets[csio->ccb_h.target_id];
1649 	mps_dprint(sc, MPS_TRACE, "ccb %p target flag %x\n", ccb, targ->flags);
1650 	if (targ->handle == 0x0) {
1651 		mps_dprint(sc, MPS_ERROR, "%s NULL handle for target %u\n",
1652 		    __func__, csio->ccb_h.target_id);
1653 		csio->ccb_h.status = CAM_SEL_TIMEOUT;
1654 		xpt_done(ccb);
1655 		return;
1656 	}
1657 	if (targ->flags & MPS_TARGET_FLAGS_RAID_COMPONENT) {
1658 		mps_dprint(sc, MPS_ERROR, "%s Raid component no SCSI IO "
1659 		    "supported %u\n", __func__, csio->ccb_h.target_id);
1660 		csio->ccb_h.status = CAM_TID_INVALID;
1661 		xpt_done(ccb);
1662 		return;
1663 	}
1664 	/*
1665 	 * Sometimes, it is possible to get a command that is not "In
1666 	 * Progress" and was actually aborted by the upper layer.  Check for
1667 	 * this here and complete the command without error.
1668 	 */
1669 	if (ccb->ccb_h.status != CAM_REQ_INPROG) {
1670 		mps_dprint(sc, MPS_TRACE, "%s Command is not in progress for "
1671 		    "target %u\n", __func__, csio->ccb_h.target_id);
1672 		xpt_done(ccb);
1673 		return;
1674 	}
1675 	/*
1676 	 * If devinfo is 0 this will be a volume.  In that case don't tell CAM
1677 	 * that the volume has timed out.  We want volumes to be enumerated
1678 	 * until they are deleted/removed, not just failed.
1679 	 */
1680 	if (targ->flags & MPSSAS_TARGET_INREMOVAL) {
1681 		if (targ->devinfo == 0)
1682 			csio->ccb_h.status = CAM_REQ_CMP;
1683 		else
1684 			csio->ccb_h.status = CAM_SEL_TIMEOUT;
1685 		xpt_done(ccb);
1686 		return;
1687 	}
1688 
1689 	if ((sc->mps_flags & MPS_FLAGS_SHUTDOWN) != 0) {
1690 		mps_dprint(sc, MPS_INFO, "%s shutting down\n", __func__);
1691 		csio->ccb_h.status = CAM_TID_INVALID;
1692 		xpt_done(ccb);
1693 		return;
1694 	}
1695 
1696 	cm = mps_alloc_command(sc);
1697 	if (cm == NULL) {
1698 		if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
1699 			xpt_freeze_simq(sassc->sim, 1);
1700 			sassc->flags |= MPSSAS_QUEUE_FROZEN;
1701 		}
1702 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1703 		ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1704 		xpt_done(ccb);
1705 		return;
1706 	}
1707 
1708 	req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
1709 	bzero(req, sizeof(*req));
1710 	req->DevHandle = htole16(targ->handle);
1711 	req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1712 	req->MsgFlags = 0;
1713 	req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr);
1714 	req->SenseBufferLength = MPS_SENSE_LEN;
1715 	req->SGLFlags = 0;
1716 	req->ChainOffset = 0;
1717 	req->SGLOffset0 = 24;	/* 32bit word offset to the SGL */
1718 	req->SGLOffset1= 0;
1719 	req->SGLOffset2= 0;
1720 	req->SGLOffset3= 0;
1721 	req->SkipCount = 0;
1722 	req->DataLength = htole32(csio->dxfer_len);
1723 	req->BidirectionalDataLength = 0;
1724 	req->IoFlags = htole16(csio->cdb_len);
1725 	req->EEDPFlags = 0;
1726 
1727 	/* Note: BiDirectional transfers are not supported */
1728 	switch (csio->ccb_h.flags & CAM_DIR_MASK) {
1729 	case CAM_DIR_IN:
1730 		mpi_control = MPI2_SCSIIO_CONTROL_READ;
1731 		cm->cm_flags |= MPS_CM_FLAGS_DATAIN;
1732 		break;
1733 	case CAM_DIR_OUT:
1734 		mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
1735 		cm->cm_flags |= MPS_CM_FLAGS_DATAOUT;
1736 		break;
1737 	case CAM_DIR_NONE:
1738 	default:
1739 		mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
1740 		break;
1741 	}
1742 
1743 	if (csio->cdb_len == 32)
1744                 mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
1745 	/*
1746 	 * It looks like the hardware doesn't require an explicit tag
1747 	 * number for each transaction.  SAM Task Management not supported
1748 	 * at the moment.
1749 	 */
1750 	switch (csio->tag_action) {
1751 	case MSG_HEAD_OF_Q_TAG:
1752 		mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
1753 		break;
1754 	case MSG_ORDERED_Q_TAG:
1755 		mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
1756 		break;
1757 	case MSG_ACA_TASK:
1758 		mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ;
1759 		break;
1760 	case CAM_TAG_ACTION_NONE:
1761 	case MSG_SIMPLE_Q_TAG:
1762 	default:
1763 		mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
1764 		break;
1765 	}
1766 	mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
1767 	req->Control = htole32(mpi_control);
1768 	if (MPS_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
1769 		mps_free_command(sc, cm);
1770 		ccb->ccb_h.status = CAM_LUN_INVALID;
1771 		xpt_done(ccb);
1772 		return;
1773 	}
1774 
1775 	if (csio->ccb_h.flags & CAM_CDB_POINTER)
1776 		bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
1777 	else
1778 		bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
1779 	req->IoFlags = htole16(csio->cdb_len);
1780 
1781 	/*
1782 	 * Check if EEDP is supported and enabled.  If it is then check if the
1783 	 * SCSI opcode could be using EEDP.  If so, make sure the LUN exists and
1784 	 * is formatted for EEDP support.  If all of this is true, set CDB up
1785 	 * for EEDP transfer.
1786 	 */
1787 	eedp_flags = op_code_prot[req->CDB.CDB32[0]];
1788 	if (sc->eedp_enabled && eedp_flags) {
1789 		SLIST_FOREACH(lun, &targ->luns, lun_link) {
1790 			if (lun->lun_id == csio->ccb_h.target_lun) {
1791 				break;
1792 			}
1793 		}
1794 
1795 		if ((lun != NULL) && (lun->eedp_formatted)) {
1796 			req->EEDPBlockSize = htole16(lun->eedp_block_size);
1797 			eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
1798 			    MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
1799 			    MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
1800 			req->EEDPFlags = htole16(eedp_flags);
1801 
1802 			/*
1803 			 * If CDB less than 32, fill in Primary Ref Tag with
1804 			 * low 4 bytes of LBA.  If CDB is 32, tag stuff is
1805 			 * already there.  Also, set protection bit.  FreeBSD
1806 			 * currently does not support CDBs bigger than 16, but
1807 			 * the code doesn't hurt, and will be here for the
1808 			 * future.
1809 			 */
1810 			if (csio->cdb_len != 32) {
1811 				lba_byte = (csio->cdb_len == 16) ? 6 : 2;
1812 				ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
1813 				    PrimaryReferenceTag;
1814 				for (i = 0; i < 4; i++) {
1815 					*ref_tag_addr =
1816 					    req->CDB.CDB32[lba_byte + i];
1817 					ref_tag_addr++;
1818 				}
1819 				req->CDB.EEDP32.PrimaryReferenceTag =
1820 					htole32(req->CDB.EEDP32.PrimaryReferenceTag);
1821 				req->CDB.EEDP32.PrimaryApplicationTagMask =
1822 				    0xFFFF;
1823 				req->CDB.CDB32[1] = (req->CDB.CDB32[1] & 0x1F) |
1824 				    0x20;
1825 			} else {
1826 				eedp_flags |=
1827 				    MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
1828 				req->EEDPFlags = htole16(eedp_flags);
1829 				req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
1830 				    0x1F) | 0x20;
1831 			}
1832 		}
1833 	}
1834 
1835 	cm->cm_length = csio->dxfer_len;
1836 	if (cm->cm_length != 0) {
1837 		cm->cm_data = ccb;
1838 		cm->cm_flags |= MPS_CM_FLAGS_USE_CCB;
1839 	} else {
1840 		cm->cm_data = NULL;
1841 	}
1842 	cm->cm_sge = &req->SGL;
1843 	cm->cm_sglsize = (32 - 24) * 4;
1844 	cm->cm_desc.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
1845 	cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle);
1846 	cm->cm_complete = mpssas_scsiio_complete;
1847 	cm->cm_complete_data = ccb;
1848 	cm->cm_targ = targ;
1849 	cm->cm_lun = csio->ccb_h.target_lun;
1850 	cm->cm_ccb = ccb;
1851 
1852 	/*
1853 	 * If HBA is a WD and the command is not for a retry, try to build a
1854 	 * direct I/O message. If failed, or the command is for a retry, send
1855 	 * the I/O to the IR volume itself.
1856 	 */
1857 	if (sc->WD_valid_config) {
1858 		if (ccb->ccb_h.status != MPS_WD_RETRY) {
1859 			mpssas_direct_drive_io(sassc, cm, ccb);
1860 		} else {
1861 			ccb->ccb_h.status = CAM_REQ_INPROG;
1862 		}
1863 	}
1864 
1865 	callout_reset(&cm->cm_callout, (ccb->ccb_h.timeout * hz) / 1000,
1866 	   mpssas_scsiio_timeout, cm);
1867 
1868 	targ->issued++;
1869 	targ->outstanding++;
1870 	TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1871 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
1872 
1873 	mpssas_log_command(cm, MPS_XINFO, "%s cm %p ccb %p outstanding %u\n",
1874 	    __func__, cm, ccb, targ->outstanding);
1875 
1876 	mps_map_command(sc, cm);
1877 	return;
1878 }
1879 
1880 static void
1881 mps_response_code(struct mps_softc *sc, u8 response_code)
1882 {
1883         char *desc;
1884 
1885         switch (response_code) {
1886         case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE:
1887                 desc = "task management request completed";
1888                 break;
1889         case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME:
1890                 desc = "invalid frame";
1891                 break;
1892         case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
1893                 desc = "task management request not supported";
1894                 break;
1895         case MPI2_SCSITASKMGMT_RSP_TM_FAILED:
1896                 desc = "task management request failed";
1897                 break;
1898         case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED:
1899                 desc = "task management request succeeded";
1900                 break;
1901         case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN:
1902                 desc = "invalid lun";
1903                 break;
1904         case 0xA:
1905                 desc = "overlapped tag attempted";
1906                 break;
1907         case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
1908                 desc = "task queued, however not sent to target";
1909                 break;
1910         default:
1911                 desc = "unknown";
1912                 break;
1913         }
1914 		mps_dprint(sc, MPS_XINFO, "response_code(0x%01x): %s\n",
1915                 response_code, desc);
1916 }
1917 /**
1918  * mps_sc_failed_io_info - translated non-succesfull SCSI_IO request
1919  */
1920 static void
1921 mps_sc_failed_io_info(struct mps_softc *sc, struct ccb_scsiio *csio,
1922     Mpi2SCSIIOReply_t *mpi_reply)
1923 {
1924 	u32 response_info;
1925 	u8 *response_bytes;
1926 	u16 ioc_status = le16toh(mpi_reply->IOCStatus) &
1927 	    MPI2_IOCSTATUS_MASK;
1928 	u8 scsi_state = mpi_reply->SCSIState;
1929 	u8 scsi_status = mpi_reply->SCSIStatus;
1930 	char *desc_ioc_state = NULL;
1931 	char *desc_scsi_status = NULL;
1932 	char *desc_scsi_state = sc->tmp_string;
1933 	u32 log_info = le32toh(mpi_reply->IOCLogInfo);
1934 
1935 	if (log_info == 0x31170000)
1936 		return;
1937 
1938 	switch (ioc_status) {
1939 	case MPI2_IOCSTATUS_SUCCESS:
1940 		desc_ioc_state = "success";
1941 		break;
1942 	case MPI2_IOCSTATUS_INVALID_FUNCTION:
1943 		desc_ioc_state = "invalid function";
1944 		break;
1945 	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
1946 		desc_ioc_state = "scsi recovered error";
1947 		break;
1948 	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
1949 		desc_ioc_state = "scsi invalid dev handle";
1950 		break;
1951 	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
1952 		desc_ioc_state = "scsi device not there";
1953 		break;
1954 	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
1955 		desc_ioc_state = "scsi data overrun";
1956 		break;
1957 	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
1958 		desc_ioc_state = "scsi data underrun";
1959 		break;
1960 	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
1961 		desc_ioc_state = "scsi io data error";
1962 		break;
1963 	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
1964 		desc_ioc_state = "scsi protocol error";
1965 		break;
1966 	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
1967 		desc_ioc_state = "scsi task terminated";
1968 		break;
1969 	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
1970 		desc_ioc_state = "scsi residual mismatch";
1971 		break;
1972 	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
1973 		desc_ioc_state = "scsi task mgmt failed";
1974 		break;
1975 	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
1976 		desc_ioc_state = "scsi ioc terminated";
1977 		break;
1978 	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
1979 		desc_ioc_state = "scsi ext terminated";
1980 		break;
1981 	case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
1982 		desc_ioc_state = "eedp guard error";
1983 		break;
1984 	case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
1985 		desc_ioc_state = "eedp ref tag error";
1986 		break;
1987 	case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
1988 		desc_ioc_state = "eedp app tag error";
1989 		break;
1990 	default:
1991 		desc_ioc_state = "unknown";
1992 		break;
1993 	}
1994 
1995 	switch (scsi_status) {
1996 	case MPI2_SCSI_STATUS_GOOD:
1997 		desc_scsi_status = "good";
1998 		break;
1999 	case MPI2_SCSI_STATUS_CHECK_CONDITION:
2000 		desc_scsi_status = "check condition";
2001 		break;
2002 	case MPI2_SCSI_STATUS_CONDITION_MET:
2003 		desc_scsi_status = "condition met";
2004 		break;
2005 	case MPI2_SCSI_STATUS_BUSY:
2006 		desc_scsi_status = "busy";
2007 		break;
2008 	case MPI2_SCSI_STATUS_INTERMEDIATE:
2009 		desc_scsi_status = "intermediate";
2010 		break;
2011 	case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET:
2012 		desc_scsi_status = "intermediate condmet";
2013 		break;
2014 	case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
2015 		desc_scsi_status = "reservation conflict";
2016 		break;
2017 	case MPI2_SCSI_STATUS_COMMAND_TERMINATED:
2018 		desc_scsi_status = "command terminated";
2019 		break;
2020 	case MPI2_SCSI_STATUS_TASK_SET_FULL:
2021 		desc_scsi_status = "task set full";
2022 		break;
2023 	case MPI2_SCSI_STATUS_ACA_ACTIVE:
2024 		desc_scsi_status = "aca active";
2025 		break;
2026 	case MPI2_SCSI_STATUS_TASK_ABORTED:
2027 		desc_scsi_status = "task aborted";
2028 		break;
2029 	default:
2030 		desc_scsi_status = "unknown";
2031 		break;
2032 	}
2033 
2034 	desc_scsi_state[0] = '\0';
2035 	if (!scsi_state)
2036 		desc_scsi_state = " ";
2037 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
2038 		strcat(desc_scsi_state, "response info ");
2039 	if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
2040 		strcat(desc_scsi_state, "state terminated ");
2041 	if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS)
2042 		strcat(desc_scsi_state, "no status ");
2043 	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED)
2044 		strcat(desc_scsi_state, "autosense failed ");
2045 	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID)
2046 		strcat(desc_scsi_state, "autosense valid ");
2047 
2048 	mps_dprint(sc, MPS_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n",
2049 	    le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status);
2050 	/* We can add more detail about underflow data here
2051 	 * TO-DO
2052 	 * */
2053 	mps_dprint(sc, MPS_XINFO, "\tscsi_status(%s)(0x%02x), "
2054 	    "scsi_state(%s)(0x%02x)\n", desc_scsi_status, scsi_status,
2055 	    desc_scsi_state, scsi_state);
2056 
2057 	if (sc->mps_debug & MPS_XINFO &&
2058 		scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2059 		mps_dprint(sc, MPS_XINFO, "-> Sense Buffer Data : Start :\n");
2060 		scsi_sense_print(csio);
2061 		mps_dprint(sc, MPS_XINFO, "-> Sense Buffer Data : End :\n");
2062 	}
2063 
2064 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
2065 		response_info = le32toh(mpi_reply->ResponseInfo);
2066 		response_bytes = (u8 *)&response_info;
2067 		mps_response_code(sc,response_bytes[0]);
2068 	}
2069 }
2070 
2071 static void
2072 mpssas_scsiio_complete(struct mps_softc *sc, struct mps_command *cm)
2073 {
2074 	MPI2_SCSI_IO_REPLY *rep;
2075 	union ccb *ccb;
2076 	struct ccb_scsiio *csio;
2077 	struct mpssas_softc *sassc;
2078 	struct scsi_vpd_supported_page_list *vpd_list = NULL;
2079 	u8 *TLR_bits, TLR_on;
2080 	int dir = 0, i;
2081 	u16 alloc_len;
2082 
2083 	MPS_FUNCTRACE(sc);
2084 	mps_dprint(sc, MPS_TRACE,
2085 	    "cm %p SMID %u ccb %p reply %p outstanding %u\n", cm,
2086 	    cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
2087 	    cm->cm_targ->outstanding);
2088 
2089 	callout_stop(&cm->cm_callout);
2090 	mtx_assert(&sc->mps_mtx, MA_OWNED);
2091 
2092 	sassc = sc->sassc;
2093 	ccb = cm->cm_complete_data;
2094 	csio = &ccb->csio;
2095 	rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
2096 	/*
2097 	 * XXX KDM if the chain allocation fails, does it matter if we do
2098 	 * the sync and unload here?  It is simpler to do it in every case,
2099 	 * assuming it doesn't cause problems.
2100 	 */
2101 	if (cm->cm_data != NULL) {
2102 		if (cm->cm_flags & MPS_CM_FLAGS_DATAIN)
2103 			dir = BUS_DMASYNC_POSTREAD;
2104 		else if (cm->cm_flags & MPS_CM_FLAGS_DATAOUT)
2105 			dir = BUS_DMASYNC_POSTWRITE;
2106 		bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
2107 		bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2108 	}
2109 
2110 	cm->cm_targ->completed++;
2111 	cm->cm_targ->outstanding--;
2112 	TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
2113 	ccb->ccb_h.status &= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED);
2114 
2115 	if (cm->cm_state == MPS_CM_STATE_TIMEDOUT) {
2116 		TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
2117 		if (cm->cm_reply != NULL)
2118 			mpssas_log_command(cm, MPS_RECOVERY,
2119 			    "completed timedout cm %p ccb %p during recovery "
2120 			    "ioc %x scsi %x state %x xfer %u\n",
2121 			    cm, cm->cm_ccb,
2122 			    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2123 			    le32toh(rep->TransferCount));
2124 		else
2125 			mpssas_log_command(cm, MPS_RECOVERY,
2126 			    "completed timedout cm %p ccb %p during recovery\n",
2127 			    cm, cm->cm_ccb);
2128 	} else if (cm->cm_targ->tm != NULL) {
2129 		if (cm->cm_reply != NULL)
2130 			mpssas_log_command(cm, MPS_RECOVERY,
2131 			    "completed cm %p ccb %p during recovery "
2132 			    "ioc %x scsi %x state %x xfer %u\n",
2133 			    cm, cm->cm_ccb,
2134 			    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2135 			    le32toh(rep->TransferCount));
2136 		else
2137 			mpssas_log_command(cm, MPS_RECOVERY,
2138 			    "completed cm %p ccb %p during recovery\n",
2139 			    cm, cm->cm_ccb);
2140 	} else if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
2141 		mpssas_log_command(cm, MPS_RECOVERY,
2142 		    "reset completed cm %p ccb %p\n",
2143 		    cm, cm->cm_ccb);
2144 	}
2145 
2146 	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2147 		/*
2148 		 * We ran into an error after we tried to map the command,
2149 		 * so we're getting a callback without queueing the command
2150 		 * to the hardware.  So we set the status here, and it will
2151 		 * be retained below.  We'll go through the "fast path",
2152 		 * because there can be no reply when we haven't actually
2153 		 * gone out to the hardware.
2154 		 */
2155 		ccb->ccb_h.status = CAM_REQUEUE_REQ;
2156 
2157 		/*
2158 		 * Currently the only error included in the mask is
2159 		 * MPS_CM_FLAGS_CHAIN_FAILED, which means we're out of
2160 		 * chain frames.  We need to freeze the queue until we get
2161 		 * a command that completed without this error, which will
2162 		 * hopefully have some chain frames attached that we can
2163 		 * use.  If we wanted to get smarter about it, we would
2164 		 * only unfreeze the queue in this condition when we're
2165 		 * sure that we're getting some chain frames back.  That's
2166 		 * probably unnecessary.
2167 		 */
2168 		if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
2169 			xpt_freeze_simq(sassc->sim, 1);
2170 			sassc->flags |= MPSSAS_QUEUE_FROZEN;
2171 			mps_dprint(sc, MPS_XINFO, "Error sending command, "
2172 				   "freezing SIM queue\n");
2173 		}
2174 	}
2175 
2176 	/* Take the fast path to completion */
2177 	if (cm->cm_reply == NULL) {
2178 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
2179 			if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0)
2180 				ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
2181 			else {
2182 				ccb->ccb_h.status = CAM_REQ_CMP;
2183 				ccb->csio.scsi_status = SCSI_STATUS_OK;
2184 			}
2185 			if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2186 				ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2187 				sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2188 				mps_dprint(sc, MPS_XINFO,
2189 				    "Unfreezing SIM queue\n");
2190 			}
2191 		}
2192 
2193 		/*
2194 		 * There are two scenarios where the status won't be
2195 		 * CAM_REQ_CMP.  The first is if MPS_CM_FLAGS_ERROR_MASK is
2196 		 * set, the second is in the MPS_FLAGS_DIAGRESET above.
2197 		 */
2198 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2199 			/*
2200 			 * Freeze the dev queue so that commands are
2201 			 * executed in the correct order with after error
2202 			 * recovery.
2203 			 */
2204 			ccb->ccb_h.status |= CAM_DEV_QFRZN;
2205 			xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2206 		}
2207 		mps_free_command(sc, cm);
2208 		xpt_done(ccb);
2209 		return;
2210 	}
2211 
2212 	mpssas_log_command(cm, MPS_XINFO,
2213 	    "ioc %x scsi %x state %x xfer %u\n",
2214 	    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2215 	    le32toh(rep->TransferCount));
2216 
2217 	/*
2218 	 * If this is a Direct Drive I/O, reissue the I/O to the original IR
2219 	 * Volume if an error occurred (normal I/O retry).  Use the original
2220 	 * CCB, but set a flag that this will be a retry so that it's sent to
2221 	 * the original volume.  Free the command but reuse the CCB.
2222 	 */
2223 	if (cm->cm_flags & MPS_CM_FLAGS_DD_IO) {
2224 		mps_free_command(sc, cm);
2225 		ccb->ccb_h.status = MPS_WD_RETRY;
2226 		mpssas_action_scsiio(sassc, ccb);
2227 		return;
2228 	}
2229 
2230 	switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) {
2231 	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2232 		csio->resid = cm->cm_length - le32toh(rep->TransferCount);
2233 		/* FALLTHROUGH */
2234 	case MPI2_IOCSTATUS_SUCCESS:
2235 	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2236 
2237 		if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
2238 		    MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
2239 			mpssas_log_command(cm, MPS_XINFO, "recovered error\n");
2240 
2241 		/* Completion failed at the transport level. */
2242 		if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
2243 		    MPI2_SCSI_STATE_TERMINATED)) {
2244 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2245 			break;
2246 		}
2247 
2248 		/* In a modern packetized environment, an autosense failure
2249 		 * implies that there's not much else that can be done to
2250 		 * recover the command.
2251 		 */
2252 		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
2253 			ccb->ccb_h.status = CAM_AUTOSENSE_FAIL;
2254 			break;
2255 		}
2256 
2257 		/*
2258 		 * CAM doesn't care about SAS Response Info data, but if this is
2259 		 * the state check if TLR should be done.  If not, clear the
2260 		 * TLR_bits for the target.
2261 		 */
2262 		if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
2263 		    ((le32toh(rep->ResponseInfo) & MPI2_SCSI_RI_MASK_REASONCODE) ==
2264 		    MPS_SCSI_RI_INVALID_FRAME)) {
2265 			sc->mapping_table[csio->ccb_h.target_id].TLR_bits =
2266 			    (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2267 		}
2268 
2269 		/*
2270 		 * Intentionally override the normal SCSI status reporting
2271 		 * for these two cases.  These are likely to happen in a
2272 		 * multi-initiator environment, and we want to make sure that
2273 		 * CAM retries these commands rather than fail them.
2274 		 */
2275 		if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
2276 		    (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
2277 			ccb->ccb_h.status = CAM_REQ_ABORTED;
2278 			break;
2279 		}
2280 
2281 		/* Handle normal status and sense */
2282 		csio->scsi_status = rep->SCSIStatus;
2283 		if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
2284 			ccb->ccb_h.status = CAM_REQ_CMP;
2285 		else
2286 			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
2287 
2288 		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2289 			int sense_len, returned_sense_len;
2290 
2291 			returned_sense_len = min(le32toh(rep->SenseCount),
2292 			    sizeof(struct scsi_sense_data));
2293 			if (returned_sense_len < ccb->csio.sense_len)
2294 				ccb->csio.sense_resid = ccb->csio.sense_len -
2295 					returned_sense_len;
2296 			else
2297 				ccb->csio.sense_resid = 0;
2298 
2299 			sense_len = min(returned_sense_len,
2300 			    ccb->csio.sense_len - ccb->csio.sense_resid);
2301 			bzero(&ccb->csio.sense_data,
2302 			      sizeof(ccb->csio.sense_data));
2303 			bcopy(cm->cm_sense, &ccb->csio.sense_data, sense_len);
2304 			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2305 		}
2306 
2307 		/*
2308 		 * Check if this is an INQUIRY command.  If it's a VPD inquiry,
2309 		 * and it's page code 0 (Supported Page List), and there is
2310 		 * inquiry data, and this is for a sequential access device, and
2311 		 * the device is an SSP target, and TLR is supported by the
2312 		 * controller, turn the TLR_bits value ON if page 0x90 is
2313 		 * supported.
2314 		 */
2315 		if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
2316 		    (csio->cdb_io.cdb_bytes[1] & SI_EVPD) &&
2317 		    (csio->cdb_io.cdb_bytes[2] == SVPD_SUPPORTED_PAGE_LIST) &&
2318 		    ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) &&
2319 		    (csio->data_ptr != NULL) && (((uint8_t *)cm->cm_data)[0] ==
2320 		    T_SEQUENTIAL) && (sc->control_TLR) &&
2321 		    (sc->mapping_table[csio->ccb_h.target_id].device_info &
2322 		    MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
2323 			vpd_list = (struct scsi_vpd_supported_page_list *)
2324 			    csio->data_ptr;
2325 			TLR_bits = &sc->mapping_table[csio->ccb_h.target_id].
2326 			    TLR_bits;
2327 			*TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2328 			TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
2329 			alloc_len = ((u16)csio->cdb_io.cdb_bytes[3] << 8) +
2330 			    csio->cdb_io.cdb_bytes[4];
2331 			for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
2332 				if (vpd_list->list[i] == 0x90) {
2333 					*TLR_bits = TLR_on;
2334 					break;
2335 				}
2336 			}
2337 		}
2338 		break;
2339 	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2340 	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2341 		/*
2342 		 * If devinfo is 0 this will be a volume.  In that case don't
2343 		 * tell CAM that the volume is not there.  We want volumes to
2344 		 * be enumerated until they are deleted/removed, not just
2345 		 * failed.
2346 		 */
2347 		if (cm->cm_targ->devinfo == 0)
2348 			ccb->ccb_h.status = CAM_REQ_CMP;
2349 		else
2350 			ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2351 		break;
2352 	case MPI2_IOCSTATUS_INVALID_SGL:
2353 		mps_print_scsiio_cmd(sc, cm);
2354 		ccb->ccb_h.status = CAM_UNREC_HBA_ERROR;
2355 		break;
2356 	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2357 		/*
2358 		 * This is one of the responses that comes back when an I/O
2359 		 * has been aborted.  If it is because of a timeout that we
2360 		 * initiated, just set the status to CAM_CMD_TIMEOUT.
2361 		 * Otherwise set it to CAM_REQ_ABORTED.  The effect on the
2362 		 * command is the same (it gets retried, subject to the
2363 		 * retry counter), the only difference is what gets printed
2364 		 * on the console.
2365 		 */
2366 		if (cm->cm_state == MPS_CM_STATE_TIMEDOUT)
2367 			ccb->ccb_h.status = CAM_CMD_TIMEOUT;
2368 		else
2369 			ccb->ccb_h.status = CAM_REQ_ABORTED;
2370 		break;
2371 	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2372 		/* resid is ignored for this condition */
2373 		csio->resid = 0;
2374 		ccb->ccb_h.status = CAM_DATA_RUN_ERR;
2375 		break;
2376 	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2377 	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2378 		/*
2379 		 * Since these are generally external (i.e. hopefully
2380 		 * transient transport-related) errors, retry these without
2381 		 * decrementing the retry count.
2382 		 */
2383 		ccb->ccb_h.status = CAM_REQUEUE_REQ;
2384 		mpssas_log_command(cm, MPS_INFO,
2385 		    "terminated ioc %x scsi %x state %x xfer %u\n",
2386 		    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2387 		    le32toh(rep->TransferCount));
2388 		break;
2389 	case MPI2_IOCSTATUS_INVALID_FUNCTION:
2390 	case MPI2_IOCSTATUS_INTERNAL_ERROR:
2391 	case MPI2_IOCSTATUS_INVALID_VPID:
2392 	case MPI2_IOCSTATUS_INVALID_FIELD:
2393 	case MPI2_IOCSTATUS_INVALID_STATE:
2394 	case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2395 	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2396 	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2397 	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2398 	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2399 	default:
2400 		mpssas_log_command(cm, MPS_XINFO,
2401 		    "completed ioc %x scsi %x state %x xfer %u\n",
2402 		    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2403 		    le32toh(rep->TransferCount));
2404 		csio->resid = cm->cm_length;
2405 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2406 		break;
2407 	}
2408 
2409 	mps_sc_failed_io_info(sc,csio,rep);
2410 
2411 	if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2412 		ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2413 		sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2414 		mps_dprint(sc, MPS_XINFO, "Command completed, "
2415 		    "unfreezing SIM queue\n");
2416 	}
2417 
2418 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2419 		ccb->ccb_h.status |= CAM_DEV_QFRZN;
2420 		xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2421 	}
2422 
2423 	mps_free_command(sc, cm);
2424 	xpt_done(ccb);
2425 }
2426 
2427 /* All Request reached here are Endian safe */
2428 static void
2429 mpssas_direct_drive_io(struct mpssas_softc *sassc, struct mps_command *cm,
2430     union ccb *ccb) {
2431 	pMpi2SCSIIORequest_t	pIO_req;
2432 	struct mps_softc	*sc = sassc->sc;
2433 	uint64_t		virtLBA;
2434 	uint32_t		physLBA, stripe_offset, stripe_unit;
2435 	uint32_t		io_size, column;
2436 	uint8_t			*ptrLBA, lba_idx, physLBA_byte, *CDB;
2437 
2438 	/*
2439 	 * If this is a valid SCSI command (Read6, Read10, Read16, Write6,
2440 	 * Write10, or Write16), build a direct I/O message.  Otherwise, the I/O
2441 	 * will be sent to the IR volume itself.  Since Read6 and Write6 are a
2442 	 * bit different than the 10/16 CDBs, handle them separately.
2443 	 */
2444 	pIO_req = (pMpi2SCSIIORequest_t)cm->cm_req;
2445 	CDB = pIO_req->CDB.CDB32;
2446 
2447 	/*
2448 	 * Handle 6 byte CDBs.
2449 	 */
2450 	if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_6) ||
2451 	    (CDB[0] == WRITE_6))) {
2452 		/*
2453 		 * Get the transfer size in blocks.
2454 		 */
2455 		io_size = (cm->cm_length >> sc->DD_block_exponent);
2456 
2457 		/*
2458 		 * Get virtual LBA given in the CDB.
2459 		 */
2460 		virtLBA = ((uint64_t)(CDB[1] & 0x1F) << 16) |
2461 		    ((uint64_t)CDB[2] << 8) | (uint64_t)CDB[3];
2462 
2463 		/*
2464 		 * Check that LBA range for I/O does not exceed volume's
2465 		 * MaxLBA.
2466 		 */
2467 		if ((virtLBA + (uint64_t)io_size - 1) <=
2468 		    sc->DD_max_lba) {
2469 			/*
2470 			 * Check if the I/O crosses a stripe boundary.  If not,
2471 			 * translate the virtual LBA to a physical LBA and set
2472 			 * the DevHandle for the PhysDisk to be used.  If it
2473 			 * does cross a boundry, do normal I/O.  To get the
2474 			 * right DevHandle to use, get the map number for the
2475 			 * column, then use that map number to look up the
2476 			 * DevHandle of the PhysDisk.
2477 			 */
2478 			stripe_offset = (uint32_t)virtLBA &
2479 			    (sc->DD_stripe_size - 1);
2480 			if ((stripe_offset + io_size) <= sc->DD_stripe_size) {
2481 				physLBA = (uint32_t)virtLBA >>
2482 				    sc->DD_stripe_exponent;
2483 				stripe_unit = physLBA / sc->DD_num_phys_disks;
2484 				column = physLBA % sc->DD_num_phys_disks;
2485 				pIO_req->DevHandle =
2486 				    htole16(sc->DD_column_map[column].dev_handle);
2487 				/* ???? Is this endian safe*/
2488 				cm->cm_desc.SCSIIO.DevHandle =
2489 				    pIO_req->DevHandle;
2490 
2491 				physLBA = (stripe_unit <<
2492 				    sc->DD_stripe_exponent) + stripe_offset;
2493 				ptrLBA = &pIO_req->CDB.CDB32[1];
2494 				physLBA_byte = (uint8_t)(physLBA >> 16);
2495 				*ptrLBA = physLBA_byte;
2496 				ptrLBA = &pIO_req->CDB.CDB32[2];
2497 				physLBA_byte = (uint8_t)(physLBA >> 8);
2498 				*ptrLBA = physLBA_byte;
2499 				ptrLBA = &pIO_req->CDB.CDB32[3];
2500 				physLBA_byte = (uint8_t)physLBA;
2501 				*ptrLBA = physLBA_byte;
2502 
2503 				/*
2504 				 * Set flag that Direct Drive I/O is
2505 				 * being done.
2506 				 */
2507 				cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2508 			}
2509 		}
2510 		return;
2511 	}
2512 
2513 	/*
2514 	 * Handle 10, 12 or 16 byte CDBs.
2515 	 */
2516 	if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_10) ||
2517 	    (CDB[0] == WRITE_10) || (CDB[0] == READ_16) ||
2518 	    (CDB[0] == WRITE_16) || (CDB[0] == READ_12) ||
2519 	    (CDB[0] == WRITE_12))) {
2520 		/*
2521 		 * For 16-byte CDB's, verify that the upper 4 bytes of the CDB
2522 		 * are 0.  If not, this is accessing beyond 2TB so handle it in
2523 		 * the else section.  10-byte and 12-byte CDB's are OK.
2524 		 * FreeBSD sends very rare 12 byte READ/WRITE, but driver is
2525 		 * ready to accept 12byte CDB for Direct IOs.
2526 		 */
2527 		if ((CDB[0] == READ_10 || CDB[0] == WRITE_10) ||
2528 		    (CDB[0] == READ_12 || CDB[0] == WRITE_12) ||
2529 		    !(CDB[2] | CDB[3] | CDB[4] | CDB[5])) {
2530 			/*
2531 			 * Get the transfer size in blocks.
2532 			 */
2533 			io_size = (cm->cm_length >> sc->DD_block_exponent);
2534 
2535 			/*
2536 			 * Get virtual LBA.  Point to correct lower 4 bytes of
2537 			 * LBA in the CDB depending on command.
2538 			 */
2539 			lba_idx = ((CDB[0] == READ_12) ||
2540 				(CDB[0] == WRITE_12) ||
2541 				(CDB[0] == READ_10) ||
2542 				(CDB[0] == WRITE_10))? 2 : 6;
2543 			virtLBA = ((uint64_t)CDB[lba_idx] << 24) |
2544 			    ((uint64_t)CDB[lba_idx + 1] << 16) |
2545 			    ((uint64_t)CDB[lba_idx + 2] << 8) |
2546 			    (uint64_t)CDB[lba_idx + 3];
2547 
2548 			/*
2549 			 * Check that LBA range for I/O does not exceed volume's
2550 			 * MaxLBA.
2551 			 */
2552 			if ((virtLBA + (uint64_t)io_size - 1) <=
2553 			    sc->DD_max_lba) {
2554 				/*
2555 				 * Check if the I/O crosses a stripe boundary.
2556 				 * If not, translate the virtual LBA to a
2557 				 * physical LBA and set the DevHandle for the
2558 				 * PhysDisk to be used.  If it does cross a
2559 				 * boundry, do normal I/O.  To get the right
2560 				 * DevHandle to use, get the map number for the
2561 				 * column, then use that map number to look up
2562 				 * the DevHandle of the PhysDisk.
2563 				 */
2564 				stripe_offset = (uint32_t)virtLBA &
2565 				    (sc->DD_stripe_size - 1);
2566 				if ((stripe_offset + io_size) <=
2567 				    sc->DD_stripe_size) {
2568 					physLBA = (uint32_t)virtLBA >>
2569 					    sc->DD_stripe_exponent;
2570 					stripe_unit = physLBA /
2571 					    sc->DD_num_phys_disks;
2572 					column = physLBA %
2573 					    sc->DD_num_phys_disks;
2574 					pIO_req->DevHandle =
2575 					    htole16(sc->DD_column_map[column].
2576 					    dev_handle);
2577 					cm->cm_desc.SCSIIO.DevHandle =
2578 					    pIO_req->DevHandle;
2579 
2580 					physLBA = (stripe_unit <<
2581 					    sc->DD_stripe_exponent) +
2582 					    stripe_offset;
2583 					ptrLBA =
2584 					    &pIO_req->CDB.CDB32[lba_idx];
2585 					physLBA_byte = (uint8_t)(physLBA >> 24);
2586 					*ptrLBA = physLBA_byte;
2587 					ptrLBA =
2588 					    &pIO_req->CDB.CDB32[lba_idx + 1];
2589 					physLBA_byte = (uint8_t)(physLBA >> 16);
2590 					*ptrLBA = physLBA_byte;
2591 					ptrLBA =
2592 					    &pIO_req->CDB.CDB32[lba_idx + 2];
2593 					physLBA_byte = (uint8_t)(physLBA >> 8);
2594 					*ptrLBA = physLBA_byte;
2595 					ptrLBA =
2596 					    &pIO_req->CDB.CDB32[lba_idx + 3];
2597 					physLBA_byte = (uint8_t)physLBA;
2598 					*ptrLBA = physLBA_byte;
2599 
2600 					/*
2601 					 * Set flag that Direct Drive I/O is
2602 					 * being done.
2603 					 */
2604 					cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2605 				}
2606 			}
2607 		} else {
2608 			/*
2609 			 * 16-byte CDB and the upper 4 bytes of the CDB are not
2610 			 * 0.  Get the transfer size in blocks.
2611 			 */
2612 			io_size = (cm->cm_length >> sc->DD_block_exponent);
2613 
2614 			/*
2615 			 * Get virtual LBA.
2616 			 */
2617 			virtLBA = ((uint64_t)CDB[2] << 54) |
2618 			    ((uint64_t)CDB[3] << 48) |
2619 			    ((uint64_t)CDB[4] << 40) |
2620 			    ((uint64_t)CDB[5] << 32) |
2621 			    ((uint64_t)CDB[6] << 24) |
2622 			    ((uint64_t)CDB[7] << 16) |
2623 			    ((uint64_t)CDB[8] << 8) |
2624 			    (uint64_t)CDB[9];
2625 
2626 			/*
2627 			 * Check that LBA range for I/O does not exceed volume's
2628 			 * MaxLBA.
2629 			 */
2630 			if ((virtLBA + (uint64_t)io_size - 1) <=
2631 			    sc->DD_max_lba) {
2632 				/*
2633 				 * Check if the I/O crosses a stripe boundary.
2634 				 * If not, translate the virtual LBA to a
2635 				 * physical LBA and set the DevHandle for the
2636 				 * PhysDisk to be used.  If it does cross a
2637 				 * boundry, do normal I/O.  To get the right
2638 				 * DevHandle to use, get the map number for the
2639 				 * column, then use that map number to look up
2640 				 * the DevHandle of the PhysDisk.
2641 				 */
2642 				stripe_offset = (uint32_t)virtLBA &
2643 				    (sc->DD_stripe_size - 1);
2644 				if ((stripe_offset + io_size) <=
2645 				    sc->DD_stripe_size) {
2646 					physLBA = (uint32_t)(virtLBA >>
2647 					    sc->DD_stripe_exponent);
2648 					stripe_unit = physLBA /
2649 					    sc->DD_num_phys_disks;
2650 					column = physLBA %
2651 					    sc->DD_num_phys_disks;
2652 					pIO_req->DevHandle =
2653 					    htole16(sc->DD_column_map[column].
2654 					    dev_handle);
2655 					cm->cm_desc.SCSIIO.DevHandle =
2656 					    pIO_req->DevHandle;
2657 
2658 					physLBA = (stripe_unit <<
2659 					    sc->DD_stripe_exponent) +
2660 					    stripe_offset;
2661 
2662 					/*
2663 					 * Set upper 4 bytes of LBA to 0.  We
2664 					 * assume that the phys disks are less
2665 					 * than 2 TB's in size.  Then, set the
2666 					 * lower 4 bytes.
2667 					 */
2668 					pIO_req->CDB.CDB32[2] = 0;
2669 					pIO_req->CDB.CDB32[3] = 0;
2670 					pIO_req->CDB.CDB32[4] = 0;
2671 					pIO_req->CDB.CDB32[5] = 0;
2672 					ptrLBA = &pIO_req->CDB.CDB32[6];
2673 					physLBA_byte = (uint8_t)(physLBA >> 24);
2674 					*ptrLBA = physLBA_byte;
2675 					ptrLBA = &pIO_req->CDB.CDB32[7];
2676 					physLBA_byte = (uint8_t)(physLBA >> 16);
2677 					*ptrLBA = physLBA_byte;
2678 					ptrLBA = &pIO_req->CDB.CDB32[8];
2679 					physLBA_byte = (uint8_t)(physLBA >> 8);
2680 					*ptrLBA = physLBA_byte;
2681 					ptrLBA = &pIO_req->CDB.CDB32[9];
2682 					physLBA_byte = (uint8_t)physLBA;
2683 					*ptrLBA = physLBA_byte;
2684 
2685 					/*
2686 					 * Set flag that Direct Drive I/O is
2687 					 * being done.
2688 					 */
2689 					cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2690 				}
2691 			}
2692 		}
2693 	}
2694 }
2695 
2696 #if __FreeBSD_version >= 900026
2697 static void
2698 mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm)
2699 {
2700 	MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2701 	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2702 	uint64_t sasaddr;
2703 	union ccb *ccb;
2704 
2705 	ccb = cm->cm_complete_data;
2706 
2707 	/*
2708 	 * Currently there should be no way we can hit this case.  It only
2709 	 * happens when we have a failure to allocate chain frames, and SMP
2710 	 * commands require two S/G elements only.  That should be handled
2711 	 * in the standard request size.
2712 	 */
2713 	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2714 		mps_dprint(sc, MPS_ERROR,"%s: cm_flags = %#x on SMP request!\n",
2715 			   __func__, cm->cm_flags);
2716 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2717 		goto bailout;
2718         }
2719 
2720 	rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
2721 	if (rpl == NULL) {
2722 		mps_dprint(sc, MPS_ERROR, "%s: NULL cm_reply!\n", __func__);
2723 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2724 		goto bailout;
2725 	}
2726 
2727 	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2728 	sasaddr = le32toh(req->SASAddress.Low);
2729 	sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
2730 
2731 	if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
2732 	    MPI2_IOCSTATUS_SUCCESS ||
2733 	    rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
2734 		mps_dprint(sc, MPS_XINFO, "%s: IOCStatus %04x SASStatus %02x\n",
2735 		    __func__, le16toh(rpl->IOCStatus), rpl->SASStatus);
2736 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2737 		goto bailout;
2738 	}
2739 
2740 	mps_dprint(sc, MPS_XINFO, "%s: SMP request to SAS address "
2741 		   "%#jx completed successfully\n", __func__,
2742 		   (uintmax_t)sasaddr);
2743 
2744 	if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
2745 		ccb->ccb_h.status = CAM_REQ_CMP;
2746 	else
2747 		ccb->ccb_h.status = CAM_SMP_STATUS_ERROR;
2748 
2749 bailout:
2750 	/*
2751 	 * We sync in both directions because we had DMAs in the S/G list
2752 	 * in both directions.
2753 	 */
2754 	bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
2755 			BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2756 	bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2757 	mps_free_command(sc, cm);
2758 	xpt_done(ccb);
2759 }
2760 
2761 static void
2762 mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb, uint64_t sasaddr)
2763 {
2764 	struct mps_command *cm;
2765 	uint8_t *request, *response;
2766 	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2767 	struct mps_softc *sc;
2768 	struct sglist *sg;
2769 	int error;
2770 
2771 	sc = sassc->sc;
2772 	sg = NULL;
2773 	error = 0;
2774 
2775 	/*
2776 	 * XXX We don't yet support physical addresses here.
2777 	 */
2778 	switch ((ccb->ccb_h.flags & CAM_DATA_MASK)) {
2779 	case CAM_DATA_PADDR:
2780 	case CAM_DATA_SG_PADDR:
2781 		mps_dprint(sc, MPS_ERROR,
2782 			   "%s: physical addresses not supported\n", __func__);
2783 		ccb->ccb_h.status = CAM_REQ_INVALID;
2784 		xpt_done(ccb);
2785 		return;
2786 	case CAM_DATA_SG:
2787 		/*
2788 		 * The chip does not support more than one buffer for the
2789 		 * request or response.
2790 		 */
2791 	 	if ((ccb->smpio.smp_request_sglist_cnt > 1)
2792 		  || (ccb->smpio.smp_response_sglist_cnt > 1)) {
2793 			mps_dprint(sc, MPS_ERROR,
2794 				   "%s: multiple request or response "
2795 				   "buffer segments not supported for SMP\n",
2796 				   __func__);
2797 			ccb->ccb_h.status = CAM_REQ_INVALID;
2798 			xpt_done(ccb);
2799 			return;
2800 		}
2801 
2802 		/*
2803 		 * The CAM_SCATTER_VALID flag was originally implemented
2804 		 * for the XPT_SCSI_IO CCB, which only has one data pointer.
2805 		 * We have two.  So, just take that flag to mean that we
2806 		 * might have S/G lists, and look at the S/G segment count
2807 		 * to figure out whether that is the case for each individual
2808 		 * buffer.
2809 		 */
2810 		if (ccb->smpio.smp_request_sglist_cnt != 0) {
2811 			bus_dma_segment_t *req_sg;
2812 
2813 			req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
2814 			request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
2815 		} else
2816 			request = ccb->smpio.smp_request;
2817 
2818 		if (ccb->smpio.smp_response_sglist_cnt != 0) {
2819 			bus_dma_segment_t *rsp_sg;
2820 
2821 			rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
2822 			response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
2823 		} else
2824 			response = ccb->smpio.smp_response;
2825 		break;
2826 	case CAM_DATA_VADDR:
2827 		request = ccb->smpio.smp_request;
2828 		response = ccb->smpio.smp_response;
2829 		break;
2830 	default:
2831 		ccb->ccb_h.status = CAM_REQ_INVALID;
2832 		xpt_done(ccb);
2833 		return;
2834 	}
2835 
2836 	cm = mps_alloc_command(sc);
2837 	if (cm == NULL) {
2838 		mps_dprint(sc, MPS_ERROR,
2839 		    "%s: cannot allocate command\n", __func__);
2840 		ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2841 		xpt_done(ccb);
2842 		return;
2843 	}
2844 
2845 	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2846 	bzero(req, sizeof(*req));
2847 	req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
2848 
2849 	/* Allow the chip to use any route to this SAS address. */
2850 	req->PhysicalPort = 0xff;
2851 
2852 	req->RequestDataLength = htole16(ccb->smpio.smp_request_len);
2853 	req->SGLFlags =
2854 	    MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
2855 
2856 	mps_dprint(sc, MPS_XINFO, "%s: sending SMP request to SAS "
2857 	    "address %#jx\n", __func__, (uintmax_t)sasaddr);
2858 
2859 	mpi_init_sge(cm, req, &req->SGL);
2860 
2861 	/*
2862 	 * Set up a uio to pass into mps_map_command().  This allows us to
2863 	 * do one map command, and one busdma call in there.
2864 	 */
2865 	cm->cm_uio.uio_iov = cm->cm_iovec;
2866 	cm->cm_uio.uio_iovcnt = 2;
2867 	cm->cm_uio.uio_segflg = UIO_SYSSPACE;
2868 
2869 	/*
2870 	 * The read/write flag isn't used by busdma, but set it just in
2871 	 * case.  This isn't exactly accurate, either, since we're going in
2872 	 * both directions.
2873 	 */
2874 	cm->cm_uio.uio_rw = UIO_WRITE;
2875 
2876 	cm->cm_iovec[0].iov_base = request;
2877 	cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength);
2878 	cm->cm_iovec[1].iov_base = response;
2879 	cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
2880 
2881 	cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
2882 			       cm->cm_iovec[1].iov_len;
2883 
2884 	/*
2885 	 * Trigger a warning message in mps_data_cb() for the user if we
2886 	 * wind up exceeding two S/G segments.  The chip expects one
2887 	 * segment for the request and another for the response.
2888 	 */
2889 	cm->cm_max_segs = 2;
2890 
2891 	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
2892 	cm->cm_complete = mpssas_smpio_complete;
2893 	cm->cm_complete_data = ccb;
2894 
2895 	/*
2896 	 * Tell the mapping code that we're using a uio, and that this is
2897 	 * an SMP passthrough request.  There is a little special-case
2898 	 * logic there (in mps_data_cb()) to handle the bidirectional
2899 	 * transfer.
2900 	 */
2901 	cm->cm_flags |= MPS_CM_FLAGS_USE_UIO | MPS_CM_FLAGS_SMP_PASS |
2902 			MPS_CM_FLAGS_DATAIN | MPS_CM_FLAGS_DATAOUT;
2903 
2904 	/* The chip data format is little endian. */
2905 	req->SASAddress.High = htole32(sasaddr >> 32);
2906 	req->SASAddress.Low = htole32(sasaddr);
2907 
2908 	/*
2909 	 * XXX Note that we don't have a timeout/abort mechanism here.
2910 	 * From the manual, it looks like task management requests only
2911 	 * work for SCSI IO and SATA passthrough requests.  We may need to
2912 	 * have a mechanism to retry requests in the event of a chip reset
2913 	 * at least.  Hopefully the chip will insure that any errors short
2914 	 * of that are relayed back to the driver.
2915 	 */
2916 	error = mps_map_command(sc, cm);
2917 	if ((error != 0) && (error != EINPROGRESS)) {
2918 		mps_dprint(sc, MPS_ERROR,
2919 			   "%s: error %d returned from mps_map_command()\n",
2920 			   __func__, error);
2921 		goto bailout_error;
2922 	}
2923 
2924 	return;
2925 
2926 bailout_error:
2927 	mps_free_command(sc, cm);
2928 	ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2929 	xpt_done(ccb);
2930 	return;
2931 
2932 }
2933 
2934 static void
2935 mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb)
2936 {
2937 	struct mps_softc *sc;
2938 	struct mpssas_target *targ;
2939 	uint64_t sasaddr = 0;
2940 
2941 	sc = sassc->sc;
2942 
2943 	/*
2944 	 * Make sure the target exists.
2945 	 */
2946 	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
2947 	    ("Target %d out of bounds in XPT_SMP_IO\n", ccb->ccb_h.target_id));
2948 	targ = &sassc->targets[ccb->ccb_h.target_id];
2949 	if (targ->handle == 0x0) {
2950 		mps_dprint(sc, MPS_ERROR,
2951 			   "%s: target %d does not exist!\n", __func__,
2952 			   ccb->ccb_h.target_id);
2953 		ccb->ccb_h.status = CAM_SEL_TIMEOUT;
2954 		xpt_done(ccb);
2955 		return;
2956 	}
2957 
2958 	/*
2959 	 * If this device has an embedded SMP target, we'll talk to it
2960 	 * directly.
2961 	 * figure out what the expander's address is.
2962 	 */
2963 	if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
2964 		sasaddr = targ->sasaddr;
2965 
2966 	/*
2967 	 * If we don't have a SAS address for the expander yet, try
2968 	 * grabbing it from the page 0x83 information cached in the
2969 	 * transport layer for this target.  LSI expanders report the
2970 	 * expander SAS address as the port-associated SAS address in
2971 	 * Inquiry VPD page 0x83.  Maxim expanders don't report it in page
2972 	 * 0x83.
2973 	 *
2974 	 * XXX KDM disable this for now, but leave it commented out so that
2975 	 * it is obvious that this is another possible way to get the SAS
2976 	 * address.
2977 	 *
2978 	 * The parent handle method below is a little more reliable, and
2979 	 * the other benefit is that it works for devices other than SES
2980 	 * devices.  So you can send a SMP request to a da(4) device and it
2981 	 * will get routed to the expander that device is attached to.
2982 	 * (Assuming the da(4) device doesn't contain an SMP target...)
2983 	 */
2984 #if 0
2985 	if (sasaddr == 0)
2986 		sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
2987 #endif
2988 
2989 	/*
2990 	 * If we still don't have a SAS address for the expander, look for
2991 	 * the parent device of this device, which is probably the expander.
2992 	 */
2993 	if (sasaddr == 0) {
2994 #ifdef OLD_MPS_PROBE
2995 		struct mpssas_target *parent_target;
2996 #endif
2997 
2998 		if (targ->parent_handle == 0x0) {
2999 			mps_dprint(sc, MPS_ERROR,
3000 				   "%s: handle %d does not have a valid "
3001 				   "parent handle!\n", __func__, targ->handle);
3002 			ccb->ccb_h.status = CAM_REQ_INVALID;
3003 			goto bailout;
3004 		}
3005 #ifdef OLD_MPS_PROBE
3006 		parent_target = mpssas_find_target_by_handle(sassc, 0,
3007 			targ->parent_handle);
3008 
3009 		if (parent_target == NULL) {
3010 			mps_dprint(sc, MPS_ERROR,
3011 				   "%s: handle %d does not have a valid "
3012 				   "parent target!\n", __func__, targ->handle);
3013 			ccb->ccb_h.status = CAM_REQ_INVALID;
3014 			goto bailout;
3015 		}
3016 
3017 		if ((parent_target->devinfo &
3018 		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3019 			mps_dprint(sc, MPS_ERROR,
3020 				   "%s: handle %d parent %d does not "
3021 				   "have an SMP target!\n", __func__,
3022 				   targ->handle, parent_target->handle);
3023 			ccb->ccb_h.status = CAM_REQ_INVALID;
3024 			goto bailout;
3025 
3026 		}
3027 
3028 		sasaddr = parent_target->sasaddr;
3029 #else /* OLD_MPS_PROBE */
3030 		if ((targ->parent_devinfo &
3031 		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3032 			mps_dprint(sc, MPS_ERROR,
3033 				   "%s: handle %d parent %d does not "
3034 				   "have an SMP target!\n", __func__,
3035 				   targ->handle, targ->parent_handle);
3036 			ccb->ccb_h.status = CAM_REQ_INVALID;
3037 			goto bailout;
3038 
3039 		}
3040 		if (targ->parent_sasaddr == 0x0) {
3041 			mps_dprint(sc, MPS_ERROR,
3042 				   "%s: handle %d parent handle %d does "
3043 				   "not have a valid SAS address!\n",
3044 				   __func__, targ->handle, targ->parent_handle);
3045 			ccb->ccb_h.status = CAM_REQ_INVALID;
3046 			goto bailout;
3047 		}
3048 
3049 		sasaddr = targ->parent_sasaddr;
3050 #endif /* OLD_MPS_PROBE */
3051 
3052 	}
3053 
3054 	if (sasaddr == 0) {
3055 		mps_dprint(sc, MPS_INFO,
3056 			   "%s: unable to find SAS address for handle %d\n",
3057 			   __func__, targ->handle);
3058 		ccb->ccb_h.status = CAM_REQ_INVALID;
3059 		goto bailout;
3060 	}
3061 	mpssas_send_smpcmd(sassc, ccb, sasaddr);
3062 
3063 	return;
3064 
3065 bailout:
3066 	xpt_done(ccb);
3067 
3068 }
3069 #endif //__FreeBSD_version >= 900026
3070 
3071 static void
3072 mpssas_action_resetdev(struct mpssas_softc *sassc, union ccb *ccb)
3073 {
3074 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3075 	struct mps_softc *sc;
3076 	struct mps_command *tm;
3077 	struct mpssas_target *targ;
3078 
3079 	MPS_FUNCTRACE(sassc->sc);
3080 	mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
3081 
3082 	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
3083 	    ("Target %d out of bounds in XPT_RESET_DEV\n",
3084 	     ccb->ccb_h.target_id));
3085 	sc = sassc->sc;
3086 	tm = mps_alloc_command(sc);
3087 	if (tm == NULL) {
3088 		mps_dprint(sc, MPS_ERROR,
3089 		    "command alloc failure in mpssas_action_resetdev\n");
3090 		ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
3091 		xpt_done(ccb);
3092 		return;
3093 	}
3094 
3095 	targ = &sassc->targets[ccb->ccb_h.target_id];
3096 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3097 	req->DevHandle = htole16(targ->handle);
3098 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
3099 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3100 
3101 	/* SAS Hard Link Reset / SATA Link Reset */
3102 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3103 
3104 	tm->cm_data = NULL;
3105 	tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
3106 	tm->cm_complete = mpssas_resetdev_complete;
3107 	tm->cm_complete_data = ccb;
3108 	tm->cm_targ = targ;
3109 	mps_map_command(sc, tm);
3110 }
3111 
3112 static void
3113 mpssas_resetdev_complete(struct mps_softc *sc, struct mps_command *tm)
3114 {
3115 	MPI2_SCSI_TASK_MANAGE_REPLY *resp;
3116 	union ccb *ccb;
3117 
3118 	MPS_FUNCTRACE(sc);
3119 	mtx_assert(&sc->mps_mtx, MA_OWNED);
3120 
3121 	resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
3122 	ccb = tm->cm_complete_data;
3123 
3124 	/*
3125 	 * Currently there should be no way we can hit this case.  It only
3126 	 * happens when we have a failure to allocate chain frames, and
3127 	 * task management commands don't have S/G lists.
3128 	 */
3129 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3130 		MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3131 
3132 		req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3133 
3134 		mps_dprint(sc, MPS_ERROR,
3135 			   "%s: cm_flags = %#x for reset of handle %#04x! "
3136 			   "This should not happen!\n", __func__, tm->cm_flags,
3137 			   req->DevHandle);
3138 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
3139 		goto bailout;
3140 	}
3141 
3142 	mps_dprint(sc, MPS_XINFO,
3143 	    "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n", __func__,
3144 	    le16toh(resp->IOCStatus), le32toh(resp->ResponseCode));
3145 
3146 	if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
3147 		ccb->ccb_h.status = CAM_REQ_CMP;
3148 		mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
3149 		    CAM_LUN_WILDCARD);
3150 	}
3151 	else
3152 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
3153 
3154 bailout:
3155 
3156 	mpssas_free_tm(sc, tm);
3157 	xpt_done(ccb);
3158 }
3159 
3160 static void
3161 mpssas_poll(struct cam_sim *sim)
3162 {
3163 	struct mpssas_softc *sassc;
3164 
3165 	sassc = cam_sim_softc(sim);
3166 
3167 	if (sassc->sc->mps_debug & MPS_TRACE) {
3168 		/* frequent debug messages during a panic just slow
3169 		 * everything down too much.
3170 		 */
3171 		mps_printf(sassc->sc, "%s clearing MPS_TRACE\n", __func__);
3172 		sassc->sc->mps_debug &= ~MPS_TRACE;
3173 	}
3174 
3175 	mps_intr_locked(sassc->sc);
3176 }
3177 
3178 static void
3179 mpssas_async(void *callback_arg, uint32_t code, struct cam_path *path,
3180 	     void *arg)
3181 {
3182 	struct mps_softc *sc;
3183 
3184 	sc = (struct mps_softc *)callback_arg;
3185 
3186 	switch (code) {
3187 #if (__FreeBSD_version >= 1000006) || \
3188     ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
3189 	case AC_ADVINFO_CHANGED: {
3190 		struct mpssas_target *target;
3191 		struct mpssas_softc *sassc;
3192 		struct scsi_read_capacity_data_long rcap_buf;
3193 		struct ccb_dev_advinfo cdai;
3194 		struct mpssas_lun *lun;
3195 		lun_id_t lunid;
3196 		int found_lun;
3197 		uintptr_t buftype;
3198 
3199 		buftype = (uintptr_t)arg;
3200 
3201 		found_lun = 0;
3202 		sassc = sc->sassc;
3203 
3204 		/*
3205 		 * We're only interested in read capacity data changes.
3206 		 */
3207 		if (buftype != CDAI_TYPE_RCAPLONG)
3208 			break;
3209 
3210 		/*
3211 		 * We should have a handle for this, but check to make sure.
3212 		 */
3213 		KASSERT(xpt_path_target_id(path) < sassc->maxtargets,
3214 		    ("Target %d out of bounds in mpssas_async\n",
3215 		    xpt_path_target_id(path)));
3216 		target = &sassc->targets[xpt_path_target_id(path)];
3217 		if (target->handle == 0)
3218 			break;
3219 
3220 		lunid = xpt_path_lun_id(path);
3221 
3222 		SLIST_FOREACH(lun, &target->luns, lun_link) {
3223 			if (lun->lun_id == lunid) {
3224 				found_lun = 1;
3225 				break;
3226 			}
3227 		}
3228 
3229 		if (found_lun == 0) {
3230 			lun = malloc(sizeof(struct mpssas_lun), M_MPT2,
3231 				     M_NOWAIT | M_ZERO);
3232 			if (lun == NULL) {
3233 				mps_dprint(sc, MPS_ERROR, "Unable to alloc "
3234 					   "LUN for EEDP support.\n");
3235 				break;
3236 			}
3237 			lun->lun_id = lunid;
3238 			SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3239 		}
3240 
3241 		bzero(&rcap_buf, sizeof(rcap_buf));
3242 		xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
3243 		cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
3244 		cdai.ccb_h.flags = CAM_DIR_IN;
3245 		cdai.buftype = CDAI_TYPE_RCAPLONG;
3246 		cdai.flags = 0;
3247 		cdai.bufsiz = sizeof(rcap_buf);
3248 		cdai.buf = (uint8_t *)&rcap_buf;
3249 		xpt_action((union ccb *)&cdai);
3250 		if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
3251 			cam_release_devq(cdai.ccb_h.path,
3252 					 0, 0, 0, FALSE);
3253 
3254 		if (((cdai.ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP)
3255 		 && (rcap_buf.prot & SRC16_PROT_EN)) {
3256 			lun->eedp_formatted = TRUE;
3257 			lun->eedp_block_size = scsi_4btoul(rcap_buf.length);
3258 		} else {
3259 			lun->eedp_formatted = FALSE;
3260 			lun->eedp_block_size = 0;
3261 		}
3262 		break;
3263 	}
3264 #else
3265 	case AC_FOUND_DEVICE: {
3266 		struct ccb_getdev *cgd;
3267 
3268 		cgd = arg;
3269 		mpssas_check_eedp(sc, path, cgd);
3270 		break;
3271 	}
3272 #endif
3273 	default:
3274 		break;
3275 	}
3276 }
3277 
3278 #if (__FreeBSD_version < 901503) || \
3279     ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3280 static void
3281 mpssas_check_eedp(struct mps_softc *sc, struct cam_path *path,
3282 		  struct ccb_getdev *cgd)
3283 {
3284 	struct mpssas_softc *sassc = sc->sassc;
3285 	struct ccb_scsiio *csio;
3286 	struct scsi_read_capacity_16 *scsi_cmd;
3287 	struct scsi_read_capacity_eedp *rcap_buf;
3288 	path_id_t pathid;
3289 	target_id_t targetid;
3290 	lun_id_t lunid;
3291 	union ccb *ccb;
3292 	struct cam_path *local_path;
3293 	struct mpssas_target *target;
3294 	struct mpssas_lun *lun;
3295 	uint8_t	found_lun;
3296 	char path_str[64];
3297 
3298 	sassc = sc->sassc;
3299 	pathid = cam_sim_path(sassc->sim);
3300 	targetid = xpt_path_target_id(path);
3301 	lunid = xpt_path_lun_id(path);
3302 
3303 	KASSERT(targetid < sassc->maxtargets,
3304 	    ("Target %d out of bounds in mpssas_check_eedp\n",
3305 	     targetid));
3306 	target = &sassc->targets[targetid];
3307 	if (target->handle == 0x0)
3308 		return;
3309 
3310 	/*
3311 	 * Determine if the device is EEDP capable.
3312 	 *
3313 	 * If this flag is set in the inquiry data,
3314 	 * the device supports protection information,
3315 	 * and must support the 16 byte read
3316 	 * capacity command, otherwise continue without
3317 	 * sending read cap 16
3318 	 */
3319 	if ((cgd->inq_data.spc3_flags & SPC3_SID_PROTECT) == 0)
3320 		return;
3321 
3322 	/*
3323 	 * Issue a READ CAPACITY 16 command.  This info
3324 	 * is used to determine if the LUN is formatted
3325 	 * for EEDP support.
3326 	 */
3327 	ccb = xpt_alloc_ccb_nowait();
3328 	if (ccb == NULL) {
3329 		mps_dprint(sc, MPS_ERROR, "Unable to alloc CCB "
3330 		    "for EEDP support.\n");
3331 		return;
3332 	}
3333 
3334 	if (xpt_create_path(&local_path, xpt_periph,
3335 	    pathid, targetid, lunid) != CAM_REQ_CMP) {
3336 		mps_dprint(sc, MPS_ERROR, "Unable to create "
3337 		    "path for EEDP support\n");
3338 		xpt_free_ccb(ccb);
3339 		return;
3340 	}
3341 
3342 	/*
3343 	 * If LUN is already in list, don't create a new
3344 	 * one.
3345 	 */
3346 	found_lun = FALSE;
3347 	SLIST_FOREACH(lun, &target->luns, lun_link) {
3348 		if (lun->lun_id == lunid) {
3349 			found_lun = TRUE;
3350 			break;
3351 		}
3352 	}
3353 	if (!found_lun) {
3354 		lun = malloc(sizeof(struct mpssas_lun), M_MPT2,
3355 		    M_NOWAIT | M_ZERO);
3356 		if (lun == NULL) {
3357 			mps_dprint(sc, MPS_ERROR,
3358 			    "Unable to alloc LUN for EEDP support.\n");
3359 			xpt_free_path(local_path);
3360 			xpt_free_ccb(ccb);
3361 			return;
3362 		}
3363 		lun->lun_id = lunid;
3364 		SLIST_INSERT_HEAD(&target->luns, lun,
3365 		    lun_link);
3366 	}
3367 
3368 	xpt_path_string(local_path, path_str, sizeof(path_str));
3369 	mps_dprint(sc, MPS_INFO, "Sending read cap: path %s handle %d\n",
3370 	    path_str, target->handle);
3371 
3372 	/*
3373 	 * Issue a READ CAPACITY 16 command for the LUN.
3374 	 * The mpssas_read_cap_done function will load
3375 	 * the read cap info into the LUN struct.
3376 	 */
3377 	rcap_buf = malloc(sizeof(struct scsi_read_capacity_eedp),
3378 	    M_MPT2, M_NOWAIT | M_ZERO);
3379 	if (rcap_buf == NULL) {
3380 		mps_dprint(sc, MPS_FAULT,
3381 		    "Unable to alloc read capacity buffer for EEDP support.\n");
3382 		xpt_free_path(ccb->ccb_h.path);
3383 		xpt_free_ccb(ccb);
3384 		return;
3385 	}
3386 	xpt_setup_ccb(&ccb->ccb_h, local_path, CAM_PRIORITY_XPT);
3387 	csio = &ccb->csio;
3388 	csio->ccb_h.func_code = XPT_SCSI_IO;
3389 	csio->ccb_h.flags = CAM_DIR_IN;
3390 	csio->ccb_h.retry_count = 4;
3391 	csio->ccb_h.cbfcnp = mpssas_read_cap_done;
3392 	csio->ccb_h.timeout = 60000;
3393 	csio->data_ptr = (uint8_t *)rcap_buf;
3394 	csio->dxfer_len = sizeof(struct scsi_read_capacity_eedp);
3395 	csio->sense_len = MPS_SENSE_LEN;
3396 	csio->cdb_len = sizeof(*scsi_cmd);
3397 	csio->tag_action = MSG_SIMPLE_Q_TAG;
3398 
3399 	scsi_cmd = (struct scsi_read_capacity_16 *)&csio->cdb_io.cdb_bytes;
3400 	bzero(scsi_cmd, sizeof(*scsi_cmd));
3401 	scsi_cmd->opcode = 0x9E;
3402 	scsi_cmd->service_action = SRC16_SERVICE_ACTION;
3403 	((uint8_t *)scsi_cmd)[13] = sizeof(struct scsi_read_capacity_eedp);
3404 
3405 	ccb->ccb_h.ppriv_ptr1 = sassc;
3406 	xpt_action(ccb);
3407 }
3408 
3409 static void
3410 mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb)
3411 {
3412 	struct mpssas_softc *sassc;
3413 	struct mpssas_target *target;
3414 	struct mpssas_lun *lun;
3415 	struct scsi_read_capacity_eedp *rcap_buf;
3416 
3417 	if (done_ccb == NULL)
3418 		return;
3419 
3420 	/* Driver need to release devq, it Scsi command is
3421 	 * generated by driver internally.
3422 	 * Currently there is a single place where driver
3423 	 * calls scsi command internally. In future if driver
3424 	 * calls more scsi command internally, it needs to release
3425 	 * devq internally, since those command will not go back to
3426 	 * cam_periph.
3427 	 */
3428 	if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) ) {
3429         	done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
3430 		xpt_release_devq(done_ccb->ccb_h.path,
3431 			       	/*count*/ 1, /*run_queue*/TRUE);
3432 	}
3433 
3434 	rcap_buf = (struct scsi_read_capacity_eedp *)done_ccb->csio.data_ptr;
3435 
3436 	/*
3437 	 * Get the LUN ID for the path and look it up in the LUN list for the
3438 	 * target.
3439 	 */
3440 	sassc = (struct mpssas_softc *)done_ccb->ccb_h.ppriv_ptr1;
3441 	KASSERT(done_ccb->ccb_h.target_id < sassc->maxtargets,
3442 	    ("Target %d out of bounds in mpssas_read_cap_done\n",
3443 	     done_ccb->ccb_h.target_id));
3444 	target = &sassc->targets[done_ccb->ccb_h.target_id];
3445 	SLIST_FOREACH(lun, &target->luns, lun_link) {
3446 		if (lun->lun_id != done_ccb->ccb_h.target_lun)
3447 			continue;
3448 
3449 		/*
3450 		 * Got the LUN in the target's LUN list.  Fill it in
3451 		 * with EEDP info.  If the READ CAP 16 command had some
3452 		 * SCSI error (common if command is not supported), mark
3453 		 * the lun as not supporting EEDP and set the block size
3454 		 * to 0.
3455 		 */
3456 		if (((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)
3457 		 || (done_ccb->csio.scsi_status != SCSI_STATUS_OK)) {
3458 			lun->eedp_formatted = FALSE;
3459 			lun->eedp_block_size = 0;
3460 			break;
3461 		}
3462 
3463 		if (rcap_buf->protect & 0x01) {
3464 			mps_dprint(sassc->sc, MPS_INFO, "LUN %d for "
3465  			    "target ID %d is formatted for EEDP "
3466  			    "support.\n", done_ccb->ccb_h.target_lun,
3467  			    done_ccb->ccb_h.target_id);
3468 			lun->eedp_formatted = TRUE;
3469 			lun->eedp_block_size = scsi_4btoul(rcap_buf->length);
3470 		}
3471 		break;
3472 	}
3473 
3474 	// Finished with this CCB and path.
3475 	free(rcap_buf, M_MPT2);
3476 	xpt_free_path(done_ccb->ccb_h.path);
3477 	xpt_free_ccb(done_ccb);
3478 }
3479 #endif /* (__FreeBSD_version < 901503) || \
3480           ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) */
3481 
3482 int
3483 mpssas_startup(struct mps_softc *sc)
3484 {
3485 
3486 	/*
3487 	 * Send the port enable message and set the wait_for_port_enable flag.
3488 	 * This flag helps to keep the simq frozen until all discovery events
3489 	 * are processed.
3490 	 */
3491 	sc->wait_for_port_enable = 1;
3492 	mpssas_send_portenable(sc);
3493 	return (0);
3494 }
3495 
3496 static int
3497 mpssas_send_portenable(struct mps_softc *sc)
3498 {
3499 	MPI2_PORT_ENABLE_REQUEST *request;
3500 	struct mps_command *cm;
3501 
3502 	MPS_FUNCTRACE(sc);
3503 
3504 	if ((cm = mps_alloc_command(sc)) == NULL)
3505 		return (EBUSY);
3506 	request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3507 	request->Function = MPI2_FUNCTION_PORT_ENABLE;
3508 	request->MsgFlags = 0;
3509 	request->VP_ID = 0;
3510 	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3511 	cm->cm_complete = mpssas_portenable_complete;
3512 	cm->cm_data = NULL;
3513 	cm->cm_sge = NULL;
3514 
3515 	mps_map_command(sc, cm);
3516 	mps_dprint(sc, MPS_XINFO,
3517 	    "mps_send_portenable finished cm %p req %p complete %p\n",
3518 	    cm, cm->cm_req, cm->cm_complete);
3519 	return (0);
3520 }
3521 
3522 static void
3523 mpssas_portenable_complete(struct mps_softc *sc, struct mps_command *cm)
3524 {
3525 	MPI2_PORT_ENABLE_REPLY *reply;
3526 	struct mpssas_softc *sassc;
3527 
3528 	MPS_FUNCTRACE(sc);
3529 	sassc = sc->sassc;
3530 
3531 	/*
3532 	 * Currently there should be no way we can hit this case.  It only
3533 	 * happens when we have a failure to allocate chain frames, and
3534 	 * port enable commands don't have S/G lists.
3535 	 */
3536 	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3537 		mps_dprint(sc, MPS_ERROR, "%s: cm_flags = %#x for port enable! "
3538 			   "This should not happen!\n", __func__, cm->cm_flags);
3539 	}
3540 
3541 	reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3542 	if (reply == NULL)
3543 		mps_dprint(sc, MPS_FAULT, "Portenable NULL reply\n");
3544 	else if (le16toh(reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
3545 	    MPI2_IOCSTATUS_SUCCESS)
3546 		mps_dprint(sc, MPS_FAULT, "Portenable failed\n");
3547 
3548 	mps_free_command(sc, cm);
3549 	if (sc->mps_ich.ich_arg != NULL) {
3550 		mps_dprint(sc, MPS_XINFO, "disestablish config intrhook\n");
3551 		config_intrhook_disestablish(&sc->mps_ich);
3552 		sc->mps_ich.ich_arg = NULL;
3553 	}
3554 
3555 	/*
3556 	 * Get WarpDrive info after discovery is complete but before the scan
3557 	 * starts.  At this point, all devices are ready to be exposed to the
3558 	 * OS.  If devices should be hidden instead, take them out of the
3559 	 * 'targets' array before the scan.  The devinfo for a disk will have
3560 	 * some info and a volume's will be 0.  Use that to remove disks.
3561 	 */
3562 	mps_wd_config_pages(sc);
3563 
3564 	/*
3565 	 * Done waiting for port enable to complete.  Decrement the refcount.
3566 	 * If refcount is 0, discovery is complete and a rescan of the bus can
3567 	 * take place.  Since the simq was explicitly frozen before port
3568 	 * enable, it must be explicitly released here to keep the
3569 	 * freeze/release count in sync.
3570 	 */
3571 	sc->wait_for_port_enable = 0;
3572 	sc->port_enable_complete = 1;
3573 	wakeup(&sc->port_enable_complete);
3574 	mpssas_startup_decrement(sassc);
3575 }
3576 
3577 int
3578 mpssas_check_id(struct mpssas_softc *sassc, int id)
3579 {
3580 	struct mps_softc *sc = sassc->sc;
3581 	char *ids;
3582 	char *name;
3583 
3584 	ids = &sc->exclude_ids[0];
3585 	while((name = strsep(&ids, ",")) != NULL) {
3586 		if (name[0] == '\0')
3587 			continue;
3588 		if (strtol(name, NULL, 0) == (long)id)
3589 			return (1);
3590 	}
3591 
3592 	return (0);
3593 }
3594