xref: /freebsd/sys/dev/mps/mps_sas.c (revision 95d45410b5100e07f6f98450bcd841a8945d4726)
1 /*-
2  * Copyright (c) 2009 Yahoo! Inc.
3  * Copyright (c) 2011, 2012 LSI Corp.
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  * LSI MPT-Fusion Host Adapter FreeBSD
28  *
29  * $FreeBSD$
30  */
31 
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 /* Communications core for LSI MPT2 */
36 
37 /* TODO Move headers to mpsvar */
38 #include <sys/types.h>
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/kernel.h>
42 #include <sys/selinfo.h>
43 #include <sys/module.h>
44 #include <sys/bus.h>
45 #include <sys/conf.h>
46 #include <sys/bio.h>
47 #include <sys/malloc.h>
48 #include <sys/uio.h>
49 #include <sys/sysctl.h>
50 #include <sys/endian.h>
51 #include <sys/queue.h>
52 #include <sys/kthread.h>
53 #include <sys/taskqueue.h>
54 #include <sys/sbuf.h>
55 
56 #include <machine/bus.h>
57 #include <machine/resource.h>
58 #include <sys/rman.h>
59 
60 #include <machine/stdarg.h>
61 
62 #include <cam/cam.h>
63 #include <cam/cam_ccb.h>
64 #include <cam/cam_xpt.h>
65 #include <cam/cam_debug.h>
66 #include <cam/cam_sim.h>
67 #include <cam/cam_xpt_sim.h>
68 #include <cam/cam_xpt_periph.h>
69 #include <cam/cam_periph.h>
70 #include <cam/scsi/scsi_all.h>
71 #include <cam/scsi/scsi_message.h>
72 #if __FreeBSD_version >= 900026
73 #include <cam/scsi/smp_all.h>
74 #endif
75 
76 #include <dev/mps/mpi/mpi2_type.h>
77 #include <dev/mps/mpi/mpi2.h>
78 #include <dev/mps/mpi/mpi2_ioc.h>
79 #include <dev/mps/mpi/mpi2_sas.h>
80 #include <dev/mps/mpi/mpi2_cnfg.h>
81 #include <dev/mps/mpi/mpi2_init.h>
82 #include <dev/mps/mpi/mpi2_tool.h>
83 #include <dev/mps/mps_ioctl.h>
84 #include <dev/mps/mpsvar.h>
85 #include <dev/mps/mps_table.h>
86 #include <dev/mps/mps_sas.h>
87 
88 #define MPSSAS_DISCOVERY_TIMEOUT	20
89 #define MPSSAS_MAX_DISCOVERY_TIMEOUTS	10 /* 200 seconds */
90 
91 /*
92  * static array to check SCSI OpCode for EEDP protection bits
93  */
94 #define	PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
95 #define	PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
96 #define	PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
97 static uint8_t op_code_prot[256] = {
98 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
99 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
100 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
101 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
102 	0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
103 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
104 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
105 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
106 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
107 	0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
108 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
109 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
110 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
111 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
112 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
113 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
114 };
115 
116 MALLOC_DEFINE(M_MPSSAS, "MPSSAS", "MPS SAS memory");
117 
118 static void mpssas_discovery_timeout(void *data);
119 static void mpssas_remove_device(struct mps_softc *, struct mps_command *);
120 static void mpssas_remove_complete(struct mps_softc *, struct mps_command *);
121 static void mpssas_action(struct cam_sim *sim, union ccb *ccb);
122 static void mpssas_poll(struct cam_sim *sim);
123 static void mpssas_scsiio_timeout(void *data);
124 static void mpssas_abort_complete(struct mps_softc *sc, struct mps_command *cm);
125 static void mpssas_direct_drive_io(struct mpssas_softc *sassc,
126     struct mps_command *cm, union ccb *ccb);
127 static void mpssas_action_scsiio(struct mpssas_softc *, union ccb *);
128 static void mpssas_scsiio_complete(struct mps_softc *, struct mps_command *);
129 static void mpssas_action_resetdev(struct mpssas_softc *, union ccb *);
130 #if __FreeBSD_version >= 900026
131 static void mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm);
132 static void mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb,
133 			       uint64_t sasaddr);
134 static void mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb);
135 #endif //FreeBSD_version >= 900026
136 static void mpssas_resetdev_complete(struct mps_softc *, struct mps_command *);
137 static int  mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm);
138 static int  mpssas_send_reset(struct mps_softc *sc, struct mps_command *tm, uint8_t type);
139 static void mpssas_async(void *callback_arg, uint32_t code,
140 			 struct cam_path *path, void *arg);
141 #if (__FreeBSD_version < 901503) || \
142     ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
143 static void mpssas_check_eedp(struct mps_softc *sc, struct cam_path *path,
144 			      struct ccb_getdev *cgd);
145 static void mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb);
146 #endif
147 static int mpssas_send_portenable(struct mps_softc *sc);
148 static void mpssas_portenable_complete(struct mps_softc *sc,
149     struct mps_command *cm);
150 
151 struct mpssas_target *
152 mpssas_find_target_by_handle(struct mpssas_softc *sassc, int start, uint16_t handle)
153 {
154 	struct mpssas_target *target;
155 	int i;
156 
157 	for (i = start; i < sassc->maxtargets; i++) {
158 		target = &sassc->targets[i];
159 		if (target->handle == handle)
160 			return (target);
161 	}
162 
163 	return (NULL);
164 }
165 
166 /* we need to freeze the simq during attach and diag reset, to avoid failing
167  * commands before device handles have been found by discovery.  Since
168  * discovery involves reading config pages and possibly sending commands,
169  * discovery actions may continue even after we receive the end of discovery
170  * event, so refcount discovery actions instead of assuming we can unfreeze
171  * the simq when we get the event.
172  */
173 void
174 mpssas_startup_increment(struct mpssas_softc *sassc)
175 {
176 	MPS_FUNCTRACE(sassc->sc);
177 
178 	if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
179 		if (sassc->startup_refcount++ == 0) {
180 			/* just starting, freeze the simq */
181 			mps_dprint(sassc->sc, MPS_INIT,
182 			    "%s freezing simq\n", __func__);
183 #if __FreeBSD_version >= 1000039
184 			xpt_hold_boot();
185 #endif
186 			xpt_freeze_simq(sassc->sim, 1);
187 		}
188 		mps_dprint(sassc->sc, MPS_INIT, "%s refcount %u\n", __func__,
189 		    sassc->startup_refcount);
190 	}
191 }
192 
193 void
194 mpssas_startup_decrement(struct mpssas_softc *sassc)
195 {
196 	MPS_FUNCTRACE(sassc->sc);
197 
198 	if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
199 		if (--sassc->startup_refcount == 0) {
200 			/* finished all discovery-related actions, release
201 			 * the simq and rescan for the latest topology.
202 			 */
203 			mps_dprint(sassc->sc, MPS_INIT,
204 			    "%s releasing simq\n", __func__);
205 			sassc->flags &= ~MPSSAS_IN_STARTUP;
206 			xpt_release_simq(sassc->sim, 1);
207 #if __FreeBSD_version >= 1000039
208 			xpt_release_boot();
209 #else
210 			mpssas_rescan_target(sassc->sc, NULL);
211 #endif
212 		}
213 		mps_dprint(sassc->sc, MPS_INIT, "%s refcount %u\n", __func__,
214 		    sassc->startup_refcount);
215 	}
216 }
217 
218 /* LSI's firmware requires us to stop sending commands when we're doing task
219  * management, so refcount the TMs and keep the simq frozen when any are in
220  * use.
221  */
222 struct mps_command *
223 mpssas_alloc_tm(struct mps_softc *sc)
224 {
225 	struct mps_command *tm;
226 
227 	MPS_FUNCTRACE(sc);
228 	tm = mps_alloc_high_priority_command(sc);
229 	if (tm != NULL) {
230 		if (sc->sassc->tm_count++ == 0) {
231 			mps_dprint(sc, MPS_RECOVERY,
232 			    "%s freezing simq\n", __func__);
233 			xpt_freeze_simq(sc->sassc->sim, 1);
234 		}
235 		mps_dprint(sc, MPS_RECOVERY, "%s tm_count %u\n", __func__,
236 		    sc->sassc->tm_count);
237 	}
238 	return tm;
239 }
240 
241 void
242 mpssas_free_tm(struct mps_softc *sc, struct mps_command *tm)
243 {
244 	mps_dprint(sc, MPS_TRACE, "%s", __func__);
245 	if (tm == NULL)
246 		return;
247 
248 	/* if there are no TMs in use, we can release the simq.  We use our
249 	 * own refcount so that it's easier for a diag reset to cleanup and
250 	 * release the simq.
251 	 */
252 	if (--sc->sassc->tm_count == 0) {
253 		mps_dprint(sc, MPS_RECOVERY, "%s releasing simq\n", __func__);
254 		xpt_release_simq(sc->sassc->sim, 1);
255 	}
256 	mps_dprint(sc, MPS_RECOVERY, "%s tm_count %u\n", __func__,
257 	    sc->sassc->tm_count);
258 
259 	mps_free_high_priority_command(sc, tm);
260 }
261 
262 void
263 mpssas_rescan_target(struct mps_softc *sc, struct mpssas_target *targ)
264 {
265 	struct mpssas_softc *sassc = sc->sassc;
266 	path_id_t pathid;
267 	target_id_t targetid;
268 	union ccb *ccb;
269 
270 	MPS_FUNCTRACE(sc);
271 	pathid = cam_sim_path(sassc->sim);
272 	if (targ == NULL)
273 		targetid = CAM_TARGET_WILDCARD;
274 	else
275 		targetid = targ - sassc->targets;
276 
277 	/*
278 	 * Allocate a CCB and schedule a rescan.
279 	 */
280 	ccb = xpt_alloc_ccb_nowait();
281 	if (ccb == NULL) {
282 		mps_dprint(sc, MPS_ERROR, "unable to alloc CCB for rescan\n");
283 		return;
284 	}
285 
286 	if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid,
287 	    targetid, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
288 		mps_dprint(sc, MPS_ERROR, "unable to create path for rescan\n");
289 		xpt_free_ccb(ccb);
290 		return;
291 	}
292 
293 	if (targetid == CAM_TARGET_WILDCARD)
294 		ccb->ccb_h.func_code = XPT_SCAN_BUS;
295 	else
296 		ccb->ccb_h.func_code = XPT_SCAN_TGT;
297 
298 	mps_dprint(sc, MPS_TRACE, "%s targetid %u\n", __func__, targetid);
299 	xpt_rescan(ccb);
300 }
301 
302 static void
303 mpssas_log_command(struct mps_command *cm, u_int level, const char *fmt, ...)
304 {
305 	struct sbuf sb;
306 	va_list ap;
307 	char str[192];
308 	char path_str[64];
309 
310 	if (cm == NULL)
311 		return;
312 
313 	/* No need to be in here if debugging isn't enabled */
314 	if ((cm->cm_sc->mps_debug & level) == 0)
315 		return;
316 
317 	sbuf_new(&sb, str, sizeof(str), 0);
318 
319 	va_start(ap, fmt);
320 
321 	if (cm->cm_ccb != NULL) {
322 		xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
323 				sizeof(path_str));
324 		sbuf_cat(&sb, path_str);
325 		if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
326 			scsi_command_string(&cm->cm_ccb->csio, &sb);
327 			sbuf_printf(&sb, "length %d ",
328 				    cm->cm_ccb->csio.dxfer_len);
329 		}
330 	}
331 	else {
332 		sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
333 		    cam_sim_name(cm->cm_sc->sassc->sim),
334 		    cam_sim_unit(cm->cm_sc->sassc->sim),
335 		    cam_sim_bus(cm->cm_sc->sassc->sim),
336 		    cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
337 		    cm->cm_lun);
338 	}
339 
340 	sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
341 	sbuf_vprintf(&sb, fmt, ap);
342 	sbuf_finish(&sb);
343 	mps_dprint_field(cm->cm_sc, level, "%s", sbuf_data(&sb));
344 
345 	va_end(ap);
346 }
347 
348 
349 static void
350 mpssas_remove_volume(struct mps_softc *sc, struct mps_command *tm)
351 {
352 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
353 	struct mpssas_target *targ;
354 	uint16_t handle;
355 
356 	MPS_FUNCTRACE(sc);
357 
358 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
359 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
360 	targ = tm->cm_targ;
361 
362 	if (reply == NULL) {
363 		/* XXX retry the remove after the diag reset completes? */
364 		mps_dprint(sc, MPS_FAULT,
365 		    "%s NULL reply reseting device 0x%04x\n", __func__, handle);
366 		mpssas_free_tm(sc, tm);
367 		return;
368 	}
369 
370 	if (reply->IOCStatus != MPI2_IOCSTATUS_SUCCESS) {
371 		mps_dprint(sc, MPS_FAULT,
372 		   "IOCStatus = 0x%x while resetting device 0x%x\n",
373 		   reply->IOCStatus, handle);
374 		mpssas_free_tm(sc, tm);
375 		return;
376 	}
377 
378 	mps_dprint(sc, MPS_XINFO,
379 	    "Reset aborted %u commands\n", reply->TerminationCount);
380 	mps_free_reply(sc, tm->cm_reply_data);
381 	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
382 
383 	mps_dprint(sc, MPS_XINFO,
384 	    "clearing target %u handle 0x%04x\n", targ->tid, handle);
385 
386 	/*
387 	 * Don't clear target if remove fails because things will get confusing.
388 	 * Leave the devname and sasaddr intact so that we know to avoid reusing
389 	 * this target id if possible, and so we can assign the same target id
390 	 * to this device if it comes back in the future.
391 	 */
392 	if (reply->IOCStatus == MPI2_IOCSTATUS_SUCCESS) {
393 		targ = tm->cm_targ;
394 		targ->handle = 0x0;
395 		targ->encl_handle = 0x0;
396 		targ->encl_slot = 0x0;
397 		targ->exp_dev_handle = 0x0;
398 		targ->phy_num = 0x0;
399 		targ->linkrate = 0x0;
400 		targ->devinfo = 0x0;
401 		targ->flags = 0x0;
402 	}
403 
404 	mpssas_free_tm(sc, tm);
405 }
406 
407 
408 /*
409  * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal.
410  * Otherwise Volume Delete is same as Bare Drive Removal.
411  */
412 void
413 mpssas_prepare_volume_remove(struct mpssas_softc *sassc, uint16_t handle)
414 {
415 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
416 	struct mps_softc *sc;
417 	struct mps_command *cm;
418 	struct mpssas_target *targ = NULL;
419 
420 	MPS_FUNCTRACE(sassc->sc);
421 	sc = sassc->sc;
422 
423 #ifdef WD_SUPPORT
424 	/*
425 	 * If this is a WD controller, determine if the disk should be exposed
426 	 * to the OS or not.  If disk should be exposed, return from this
427 	 * function without doing anything.
428 	 */
429 	if (sc->WD_available && (sc->WD_hide_expose ==
430 	    MPS_WD_EXPOSE_ALWAYS)) {
431 		return;
432 	}
433 #endif //WD_SUPPORT
434 
435 	targ = mpssas_find_target_by_handle(sassc, 0, handle);
436 	if (targ == NULL) {
437 		/* FIXME: what is the action? */
438 		/* We don't know about this device? */
439 		mps_dprint(sc, MPS_ERROR,
440 		   "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
441 		return;
442 	}
443 
444 	targ->flags |= MPSSAS_TARGET_INREMOVAL;
445 
446 	cm = mpssas_alloc_tm(sc);
447 	if (cm == NULL) {
448 		mps_dprint(sc, MPS_ERROR,
449 		    "%s: command alloc failure\n", __func__);
450 		return;
451 	}
452 
453 	mpssas_rescan_target(sc, targ);
454 
455 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
456 	req->DevHandle = targ->handle;
457 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
458 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
459 
460 	/* SAS Hard Link Reset / SATA Link Reset */
461 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
462 
463 	cm->cm_targ = targ;
464 	cm->cm_data = NULL;
465 	cm->cm_desc.HighPriority.RequestFlags =
466 	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
467 	cm->cm_complete = mpssas_remove_volume;
468 	cm->cm_complete_data = (void *)(uintptr_t)handle;
469 	mps_map_command(sc, cm);
470 }
471 
472 /*
473  * The MPT2 firmware performs debounce on the link to avoid transient link
474  * errors and false removals.  When it does decide that link has been lost
475  * and a device need to go away, it expects that the host will perform a
476  * target reset and then an op remove.  The reset has the side-effect of
477  * aborting any outstanding requests for the device, which is required for
478  * the op-remove to succeed.  It's not clear if the host should check for
479  * the device coming back alive after the reset.
480  */
481 void
482 mpssas_prepare_remove(struct mpssas_softc *sassc, uint16_t handle)
483 {
484 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
485 	struct mps_softc *sc;
486 	struct mps_command *cm;
487 	struct mpssas_target *targ = NULL;
488 
489 	MPS_FUNCTRACE(sassc->sc);
490 
491 	sc = sassc->sc;
492 
493 	targ = mpssas_find_target_by_handle(sassc, 0, handle);
494 	if (targ == NULL) {
495 		/* FIXME: what is the action? */
496 		/* We don't know about this device? */
497 		mps_dprint(sc, MPS_ERROR,
498 		    "%s : invalid handle 0x%x \n", __func__, handle);
499 		return;
500 	}
501 
502 	targ->flags |= MPSSAS_TARGET_INREMOVAL;
503 
504 	cm = mpssas_alloc_tm(sc);
505 	if (cm == NULL) {
506 		mps_dprint(sc, MPS_ERROR,
507 		    "%s: command alloc failure\n", __func__);
508 		return;
509 	}
510 
511 	mpssas_rescan_target(sc, targ);
512 
513 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
514 	memset(req, 0, sizeof(*req));
515 	req->DevHandle = htole16(targ->handle);
516 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
517 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
518 
519 	/* SAS Hard Link Reset / SATA Link Reset */
520 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
521 
522 	cm->cm_targ = targ;
523 	cm->cm_data = NULL;
524 	cm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
525 	cm->cm_complete = mpssas_remove_device;
526 	cm->cm_complete_data = (void *)(uintptr_t)handle;
527 	mps_map_command(sc, cm);
528 }
529 
530 static void
531 mpssas_remove_device(struct mps_softc *sc, struct mps_command *tm)
532 {
533 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
534 	MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
535 	struct mpssas_target *targ;
536 	struct mps_command *next_cm;
537 	uint16_t handle;
538 
539 	MPS_FUNCTRACE(sc);
540 
541 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
542 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
543 	targ = tm->cm_targ;
544 
545 	/*
546 	 * Currently there should be no way we can hit this case.  It only
547 	 * happens when we have a failure to allocate chain frames, and
548 	 * task management commands don't have S/G lists.
549 	 */
550 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
551 		mps_dprint(sc, MPS_ERROR,
552 		    "%s: cm_flags = %#x for remove of handle %#04x! "
553 		    "This should not happen!\n", __func__, tm->cm_flags,
554 		    handle);
555 		mpssas_free_tm(sc, tm);
556 		return;
557 	}
558 
559 	if (reply == NULL) {
560 		/* XXX retry the remove after the diag reset completes? */
561 		mps_dprint(sc, MPS_FAULT,
562 		    "%s NULL reply reseting device 0x%04x\n", __func__, handle);
563 		mpssas_free_tm(sc, tm);
564 		return;
565 	}
566 
567 	if (le16toh(reply->IOCStatus) != MPI2_IOCSTATUS_SUCCESS) {
568 		mps_dprint(sc, MPS_FAULT,
569 		   "IOCStatus = 0x%x while resetting device 0x%x\n",
570 		   le16toh(reply->IOCStatus), handle);
571 		mpssas_free_tm(sc, tm);
572 		return;
573 	}
574 
575 	mps_dprint(sc, MPS_XINFO, "Reset aborted %u commands\n",
576 	    le32toh(reply->TerminationCount));
577 	mps_free_reply(sc, tm->cm_reply_data);
578 	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
579 
580 	/* Reuse the existing command */
581 	req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
582 	memset(req, 0, sizeof(*req));
583 	req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
584 	req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
585 	req->DevHandle = htole16(handle);
586 	tm->cm_data = NULL;
587 	tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
588 	tm->cm_complete = mpssas_remove_complete;
589 	tm->cm_complete_data = (void *)(uintptr_t)handle;
590 
591 	mps_map_command(sc, tm);
592 
593 	mps_dprint(sc, MPS_XINFO, "clearing target %u handle 0x%04x\n",
594 		   targ->tid, handle);
595 	TAILQ_FOREACH_SAFE(tm, &targ->commands, cm_link, next_cm) {
596 		union ccb *ccb;
597 
598 		mps_dprint(sc, MPS_XINFO, "Completing missed command %p\n", tm);
599 		ccb = tm->cm_complete_data;
600 		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
601 		mpssas_scsiio_complete(sc, tm);
602 	}
603 }
604 
605 static void
606 mpssas_remove_complete(struct mps_softc *sc, struct mps_command *tm)
607 {
608 	MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
609 	uint16_t handle;
610 	struct mpssas_target *targ;
611 	struct mpssas_lun *lun;
612 
613 	MPS_FUNCTRACE(sc);
614 
615 	reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
616 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
617 
618 	/*
619 	 * Currently there should be no way we can hit this case.  It only
620 	 * happens when we have a failure to allocate chain frames, and
621 	 * task management commands don't have S/G lists.
622 	 */
623 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
624 		mps_dprint(sc, MPS_XINFO,
625 			   "%s: cm_flags = %#x for remove of handle %#04x! "
626 			   "This should not happen!\n", __func__, tm->cm_flags,
627 			   handle);
628 		mpssas_free_tm(sc, tm);
629 		return;
630 	}
631 
632 	if (reply == NULL) {
633 		/* most likely a chip reset */
634 		mps_dprint(sc, MPS_FAULT,
635 		    "%s NULL reply removing device 0x%04x\n", __func__, handle);
636 		mpssas_free_tm(sc, tm);
637 		return;
638 	}
639 
640 	mps_dprint(sc, MPS_XINFO,
641 	    "%s on handle 0x%04x, IOCStatus= 0x%x\n", __func__,
642 	    handle, le16toh(reply->IOCStatus));
643 
644 	/*
645 	 * Don't clear target if remove fails because things will get confusing.
646 	 * Leave the devname and sasaddr intact so that we know to avoid reusing
647 	 * this target id if possible, and so we can assign the same target id
648 	 * to this device if it comes back in the future.
649 	 */
650 	if (le16toh(reply->IOCStatus) == MPI2_IOCSTATUS_SUCCESS) {
651 		targ = tm->cm_targ;
652 		targ->handle = 0x0;
653 		targ->encl_handle = 0x0;
654 		targ->encl_slot = 0x0;
655 		targ->exp_dev_handle = 0x0;
656 		targ->phy_num = 0x0;
657 		targ->linkrate = 0x0;
658 		targ->devinfo = 0x0;
659 		targ->flags = 0x0;
660 
661 		while(!SLIST_EMPTY(&targ->luns)) {
662 			lun = SLIST_FIRST(&targ->luns);
663 			SLIST_REMOVE_HEAD(&targ->luns, lun_link);
664 			free(lun, M_MPT2);
665 		}
666 	}
667 
668 
669 	mpssas_free_tm(sc, tm);
670 }
671 
672 static int
673 mpssas_register_events(struct mps_softc *sc)
674 {
675 	u32 events[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS];
676 
677 	bzero(events, 16);
678 	setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
679 	setbit(events, MPI2_EVENT_SAS_DISCOVERY);
680 	setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
681 	setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
682 	setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
683 	setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
684 	setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
685 	setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
686 	setbit(events, MPI2_EVENT_IR_VOLUME);
687 	setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
688 	setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
689 	setbit(events, MPI2_EVENT_LOG_ENTRY_ADDED);
690 
691 	mps_register_events(sc, events, mpssas_evt_handler, NULL,
692 	    &sc->sassc->mpssas_eh);
693 
694 	return (0);
695 }
696 
697 int
698 mps_attach_sas(struct mps_softc *sc)
699 {
700 	struct mpssas_softc *sassc;
701 	cam_status status;
702 	int unit, error = 0;
703 
704 	MPS_FUNCTRACE(sc);
705 
706 	sassc = malloc(sizeof(struct mpssas_softc), M_MPT2, M_WAITOK|M_ZERO);
707 	if(!sassc) {
708 		device_printf(sc->mps_dev, "Cannot allocate memory %s %d\n",
709 		__func__, __LINE__);
710 		return (ENOMEM);
711 	}
712 
713 	/*
714 	 * XXX MaxTargets could change during a reinit.  Since we don't
715 	 * resize the targets[] array during such an event, cache the value
716 	 * of MaxTargets here so that we don't get into trouble later.  This
717 	 * should move into the reinit logic.
718 	 */
719 	sassc->maxtargets = sc->facts->MaxTargets;
720 	sassc->targets = malloc(sizeof(struct mpssas_target) *
721 	    sassc->maxtargets, M_MPT2, M_WAITOK|M_ZERO);
722 	if(!sassc->targets) {
723 		device_printf(sc->mps_dev, "Cannot allocate memory %s %d\n",
724 		__func__, __LINE__);
725 		free(sassc, M_MPT2);
726 		return (ENOMEM);
727 	}
728 	sc->sassc = sassc;
729 	sassc->sc = sc;
730 
731 	if ((sassc->devq = cam_simq_alloc(sc->num_reqs)) == NULL) {
732 		mps_dprint(sc, MPS_ERROR, "Cannot allocate SIMQ\n");
733 		error = ENOMEM;
734 		goto out;
735 	}
736 
737 	unit = device_get_unit(sc->mps_dev);
738 	sassc->sim = cam_sim_alloc(mpssas_action, mpssas_poll, "mps", sassc,
739 	    unit, &sc->mps_mtx, sc->num_reqs, sc->num_reqs, sassc->devq);
740 	if (sassc->sim == NULL) {
741 		mps_dprint(sc, MPS_ERROR, "Cannot allocate SIM\n");
742 		error = EINVAL;
743 		goto out;
744 	}
745 
746 	TAILQ_INIT(&sassc->ev_queue);
747 
748 	/* Initialize taskqueue for Event Handling */
749 	TASK_INIT(&sassc->ev_task, 0, mpssas_firmware_event_work, sc);
750 	sassc->ev_tq = taskqueue_create("mps_taskq", M_NOWAIT | M_ZERO,
751 	    taskqueue_thread_enqueue, &sassc->ev_tq);
752 	taskqueue_start_threads(&sassc->ev_tq, 1, PRIBIO, "%s taskq",
753 	    device_get_nameunit(sc->mps_dev));
754 
755 	mps_lock(sc);
756 
757 	/*
758 	 * XXX There should be a bus for every port on the adapter, but since
759 	 * we're just going to fake the topology for now, we'll pretend that
760 	 * everything is just a target on a single bus.
761 	 */
762 	if ((error = xpt_bus_register(sassc->sim, sc->mps_dev, 0)) != 0) {
763 		mps_dprint(sc, MPS_ERROR, "Error %d registering SCSI bus\n",
764 		    error);
765 		mps_unlock(sc);
766 		goto out;
767 	}
768 
769 	/*
770 	 * Assume that discovery events will start right away.
771 	 *
772 	 * Hold off boot until discovery is complete.
773 	 */
774 	sassc->flags |= MPSSAS_IN_STARTUP | MPSSAS_IN_DISCOVERY;
775 	sc->sassc->startup_refcount = 0;
776 	mpssas_startup_increment(sassc);
777 
778 	callout_init(&sassc->discovery_callout, 1 /*mpsafe*/);
779 	sassc->discovery_timeouts = 0;
780 
781 	sassc->tm_count = 0;
782 
783 	/*
784 	 * Register for async events so we can determine the EEDP
785 	 * capabilities of devices.
786 	 */
787 	status = xpt_create_path(&sassc->path, /*periph*/NULL,
788 	    cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD,
789 	    CAM_LUN_WILDCARD);
790 	if (status != CAM_REQ_CMP) {
791 		mps_printf(sc, "Error %#x creating sim path\n", status);
792 		sassc->path = NULL;
793 	} else {
794 		int event;
795 
796 #if (__FreeBSD_version >= 1000006) || \
797     ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
798 		event = AC_ADVINFO_CHANGED;
799 #else
800 		event = AC_FOUND_DEVICE;
801 #endif
802 		status = xpt_register_async(event, mpssas_async, sc,
803 					    sassc->path);
804 		if (status != CAM_REQ_CMP) {
805 			mps_dprint(sc, MPS_ERROR,
806 			    "Error %#x registering async handler for "
807 			    "AC_ADVINFO_CHANGED events\n", status);
808 			xpt_free_path(sassc->path);
809 			sassc->path = NULL;
810 		}
811 	}
812 	if (status != CAM_REQ_CMP) {
813 		/*
814 		 * EEDP use is the exception, not the rule.
815 		 * Warn the user, but do not fail to attach.
816 		 */
817 		mps_printf(sc, "EEDP capabilities disabled.\n");
818 	}
819 
820 	mps_unlock(sc);
821 
822 	mpssas_register_events(sc);
823 out:
824 	if (error)
825 		mps_detach_sas(sc);
826 	return (error);
827 }
828 
829 int
830 mps_detach_sas(struct mps_softc *sc)
831 {
832 	struct mpssas_softc *sassc;
833 	struct mpssas_lun *lun, *lun_tmp;
834 	struct mpssas_target *targ;
835 	int i;
836 
837 	MPS_FUNCTRACE(sc);
838 
839 	if (sc->sassc == NULL)
840 		return (0);
841 
842 	sassc = sc->sassc;
843 	mps_deregister_events(sc, sassc->mpssas_eh);
844 
845 	/*
846 	 * Drain and free the event handling taskqueue with the lock
847 	 * unheld so that any parallel processing tasks drain properly
848 	 * without deadlocking.
849 	 */
850 	if (sassc->ev_tq != NULL)
851 		taskqueue_free(sassc->ev_tq);
852 
853 	/* Make sure CAM doesn't wedge if we had to bail out early. */
854 	mps_lock(sc);
855 
856 	/* Deregister our async handler */
857 	if (sassc->path != NULL) {
858 		xpt_register_async(0, mpssas_async, sc, sassc->path);
859 		xpt_free_path(sassc->path);
860 		sassc->path = NULL;
861 	}
862 
863 	if (sassc->flags & MPSSAS_IN_STARTUP)
864 		xpt_release_simq(sassc->sim, 1);
865 
866 	if (sassc->sim != NULL) {
867 		xpt_bus_deregister(cam_sim_path(sassc->sim));
868 		cam_sim_free(sassc->sim, FALSE);
869 	}
870 
871 	sassc->flags |= MPSSAS_SHUTDOWN;
872 	mps_unlock(sc);
873 
874 	if (sassc->devq != NULL)
875 		cam_simq_free(sassc->devq);
876 
877 	for(i=0; i< sassc->maxtargets ;i++) {
878 		targ = &sassc->targets[i];
879 		SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
880 			free(lun, M_MPT2);
881 		}
882 	}
883 	free(sassc->targets, M_MPT2);
884 	free(sassc, M_MPT2);
885 	sc->sassc = NULL;
886 
887 	return (0);
888 }
889 
890 void
891 mpssas_discovery_end(struct mpssas_softc *sassc)
892 {
893 	struct mps_softc *sc = sassc->sc;
894 
895 	MPS_FUNCTRACE(sc);
896 
897 	if (sassc->flags & MPSSAS_DISCOVERY_TIMEOUT_PENDING)
898 		callout_stop(&sassc->discovery_callout);
899 
900 }
901 
902 static void
903 mpssas_discovery_timeout(void *data)
904 {
905 	struct mpssas_softc *sassc = data;
906 	struct mps_softc *sc;
907 
908 	sc = sassc->sc;
909 	MPS_FUNCTRACE(sc);
910 
911 	mps_lock(sc);
912 	mps_dprint(sc, MPS_INFO,
913 	    "Timeout waiting for discovery, interrupts may not be working!\n");
914 	sassc->flags &= ~MPSSAS_DISCOVERY_TIMEOUT_PENDING;
915 
916 	/* Poll the hardware for events in case interrupts aren't working */
917 	mps_intr_locked(sc);
918 
919 	mps_dprint(sassc->sc, MPS_INFO,
920 	    "Finished polling after discovery timeout at %d\n", ticks);
921 
922 	if ((sassc->flags & MPSSAS_IN_DISCOVERY) == 0) {
923 		mpssas_discovery_end(sassc);
924 	} else {
925 		if (sassc->discovery_timeouts < MPSSAS_MAX_DISCOVERY_TIMEOUTS) {
926 			sassc->flags |= MPSSAS_DISCOVERY_TIMEOUT_PENDING;
927 			callout_reset(&sassc->discovery_callout,
928 			    MPSSAS_DISCOVERY_TIMEOUT * hz,
929 			    mpssas_discovery_timeout, sassc);
930 			sassc->discovery_timeouts++;
931 		} else {
932 			mps_dprint(sassc->sc, MPS_FAULT,
933 			    "Discovery timed out, continuing.\n");
934 			sassc->flags &= ~MPSSAS_IN_DISCOVERY;
935 			mpssas_discovery_end(sassc);
936 		}
937 	}
938 
939 	mps_unlock(sc);
940 }
941 
942 static void
943 mpssas_action(struct cam_sim *sim, union ccb *ccb)
944 {
945 	struct mpssas_softc *sassc;
946 
947 	sassc = cam_sim_softc(sim);
948 
949 	MPS_FUNCTRACE(sassc->sc);
950 	mps_dprint(sassc->sc, MPS_TRACE, "ccb func_code 0x%x\n",
951 	    ccb->ccb_h.func_code);
952 	mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
953 
954 	switch (ccb->ccb_h.func_code) {
955 	case XPT_PATH_INQ:
956 	{
957 		struct ccb_pathinq *cpi = &ccb->cpi;
958 
959 		cpi->version_num = 1;
960 		cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
961 		cpi->target_sprt = 0;
962 #if __FreeBSD_version >= 1000039
963 		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN;
964 #else
965 		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
966 #endif
967 		cpi->hba_eng_cnt = 0;
968 		cpi->max_target = sassc->maxtargets - 1;
969 		cpi->max_lun = 255;
970 		cpi->initiator_id = sassc->maxtargets - 1;
971 		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
972 		strncpy(cpi->hba_vid, "LSILogic", HBA_IDLEN);
973 		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
974 		cpi->unit_number = cam_sim_unit(sim);
975 		cpi->bus_id = cam_sim_bus(sim);
976 		cpi->base_transfer_speed = 150000;
977 		cpi->transport = XPORT_SAS;
978 		cpi->transport_version = 0;
979 		cpi->protocol = PROTO_SCSI;
980 		cpi->protocol_version = SCSI_REV_SPC;
981 #if __FreeBSD_version >= 800001
982 		/*
983 		 * XXX KDM where does this number come from?
984 		 */
985 		cpi->maxio = 256 * 1024;
986 #endif
987 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
988 		break;
989 	}
990 	case XPT_GET_TRAN_SETTINGS:
991 	{
992 		struct ccb_trans_settings	*cts;
993 		struct ccb_trans_settings_sas	*sas;
994 		struct ccb_trans_settings_scsi	*scsi;
995 		struct mpssas_target *targ;
996 
997 		cts = &ccb->cts;
998 		sas = &cts->xport_specific.sas;
999 		scsi = &cts->proto_specific.scsi;
1000 
1001 		KASSERT(cts->ccb_h.target_id < sassc->maxtargets,
1002 		    ("Target %d out of bounds in XPT_GET_TRANS_SETTINGS\n",
1003 		    cts->ccb_h.target_id));
1004 		targ = &sassc->targets[cts->ccb_h.target_id];
1005 		if (targ->handle == 0x0) {
1006 			mpssas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
1007 			break;
1008 		}
1009 
1010 		cts->protocol_version = SCSI_REV_SPC2;
1011 		cts->transport = XPORT_SAS;
1012 		cts->transport_version = 0;
1013 
1014 		sas->valid = CTS_SAS_VALID_SPEED;
1015 		switch (targ->linkrate) {
1016 		case 0x08:
1017 			sas->bitrate = 150000;
1018 			break;
1019 		case 0x09:
1020 			sas->bitrate = 300000;
1021 			break;
1022 		case 0x0a:
1023 			sas->bitrate = 600000;
1024 			break;
1025 		default:
1026 			sas->valid = 0;
1027 		}
1028 
1029 		cts->protocol = PROTO_SCSI;
1030 		scsi->valid = CTS_SCSI_VALID_TQ;
1031 		scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
1032 
1033 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1034 		break;
1035 	}
1036 	case XPT_CALC_GEOMETRY:
1037 		cam_calc_geometry(&ccb->ccg, /*extended*/1);
1038 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1039 		break;
1040 	case XPT_RESET_DEV:
1041 		mps_dprint(sassc->sc, MPS_XINFO, "mpssas_action XPT_RESET_DEV\n");
1042 		mpssas_action_resetdev(sassc, ccb);
1043 		return;
1044 	case XPT_RESET_BUS:
1045 	case XPT_ABORT:
1046 	case XPT_TERM_IO:
1047 		mps_dprint(sassc->sc, MPS_XINFO,
1048 		    "mpssas_action faking success for abort or reset\n");
1049 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1050 		break;
1051 	case XPT_SCSI_IO:
1052 		mpssas_action_scsiio(sassc, ccb);
1053 		return;
1054 #if __FreeBSD_version >= 900026
1055 	case XPT_SMP_IO:
1056 		mpssas_action_smpio(sassc, ccb);
1057 		return;
1058 #endif
1059 	default:
1060 		mpssas_set_ccbstatus(ccb, CAM_FUNC_NOTAVAIL);
1061 		break;
1062 	}
1063 	xpt_done(ccb);
1064 
1065 }
1066 
1067 static void
1068 mpssas_announce_reset(struct mps_softc *sc, uint32_t ac_code,
1069     target_id_t target_id, lun_id_t lun_id)
1070 {
1071 	path_id_t path_id = cam_sim_path(sc->sassc->sim);
1072 	struct cam_path *path;
1073 
1074 	mps_dprint(sc, MPS_XINFO, "%s code %x target %d lun %jx\n", __func__,
1075 	    ac_code, target_id, (uintmax_t)lun_id);
1076 
1077 	if (xpt_create_path(&path, NULL,
1078 		path_id, target_id, lun_id) != CAM_REQ_CMP) {
1079 		mps_dprint(sc, MPS_ERROR, "unable to create path for reset "
1080 			   "notification\n");
1081 		return;
1082 	}
1083 
1084 	xpt_async(ac_code, path, NULL);
1085 	xpt_free_path(path);
1086 }
1087 
1088 static void
1089 mpssas_complete_all_commands(struct mps_softc *sc)
1090 {
1091 	struct mps_command *cm;
1092 	int i;
1093 	int completed;
1094 
1095 	MPS_FUNCTRACE(sc);
1096 	mtx_assert(&sc->mps_mtx, MA_OWNED);
1097 
1098 	/* complete all commands with a NULL reply */
1099 	for (i = 1; i < sc->num_reqs; i++) {
1100 		cm = &sc->commands[i];
1101 		cm->cm_reply = NULL;
1102 		completed = 0;
1103 
1104 		if (cm->cm_flags & MPS_CM_FLAGS_POLLED)
1105 			cm->cm_flags |= MPS_CM_FLAGS_COMPLETE;
1106 
1107 		if (cm->cm_complete != NULL) {
1108 			mpssas_log_command(cm, MPS_RECOVERY,
1109 			    "completing cm %p state %x ccb %p for diag reset\n",
1110 			    cm, cm->cm_state, cm->cm_ccb);
1111 
1112 			cm->cm_complete(sc, cm);
1113 			completed = 1;
1114 		}
1115 
1116 		if (cm->cm_flags & MPS_CM_FLAGS_WAKEUP) {
1117 			mpssas_log_command(cm, MPS_RECOVERY,
1118 			    "waking up cm %p state %x ccb %p for diag reset\n",
1119 			    cm, cm->cm_state, cm->cm_ccb);
1120 			wakeup(cm);
1121 			completed = 1;
1122 		}
1123 
1124 		if ((completed == 0) && (cm->cm_state != MPS_CM_STATE_FREE)) {
1125 			/* this should never happen, but if it does, log */
1126 			mpssas_log_command(cm, MPS_RECOVERY,
1127 			    "cm %p state %x flags 0x%x ccb %p during diag "
1128 			    "reset\n", cm, cm->cm_state, cm->cm_flags,
1129 			    cm->cm_ccb);
1130 		}
1131 	}
1132 }
1133 
1134 void
1135 mpssas_handle_reinit(struct mps_softc *sc)
1136 {
1137 	int i;
1138 
1139 	/* Go back into startup mode and freeze the simq, so that CAM
1140 	 * doesn't send any commands until after we've rediscovered all
1141 	 * targets and found the proper device handles for them.
1142 	 *
1143 	 * After the reset, portenable will trigger discovery, and after all
1144 	 * discovery-related activities have finished, the simq will be
1145 	 * released.
1146 	 */
1147 	mps_dprint(sc, MPS_INIT, "%s startup\n", __func__);
1148 	sc->sassc->flags |= MPSSAS_IN_STARTUP;
1149 	sc->sassc->flags |= MPSSAS_IN_DISCOVERY;
1150 	mpssas_startup_increment(sc->sassc);
1151 
1152 	/* notify CAM of a bus reset */
1153 	mpssas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1154 	    CAM_LUN_WILDCARD);
1155 
1156 	/* complete and cleanup after all outstanding commands */
1157 	mpssas_complete_all_commands(sc);
1158 
1159 	mps_dprint(sc, MPS_INIT,
1160 	    "%s startup %u tm %u after command completion\n",
1161 	    __func__, sc->sassc->startup_refcount, sc->sassc->tm_count);
1162 
1163 	/* zero all the target handles, since they may change after the
1164 	 * reset, and we have to rediscover all the targets and use the new
1165 	 * handles.
1166 	 */
1167 	for (i = 0; i < sc->sassc->maxtargets; i++) {
1168 		if (sc->sassc->targets[i].outstanding != 0)
1169 			mps_dprint(sc, MPS_INIT, "target %u outstanding %u\n",
1170 			    i, sc->sassc->targets[i].outstanding);
1171 		sc->sassc->targets[i].handle = 0x0;
1172 		sc->sassc->targets[i].exp_dev_handle = 0x0;
1173 		sc->sassc->targets[i].outstanding = 0;
1174 		sc->sassc->targets[i].flags = MPSSAS_TARGET_INDIAGRESET;
1175 	}
1176 }
1177 
1178 static void
1179 mpssas_tm_timeout(void *data)
1180 {
1181 	struct mps_command *tm = data;
1182 	struct mps_softc *sc = tm->cm_sc;
1183 
1184 	mtx_assert(&sc->mps_mtx, MA_OWNED);
1185 
1186 	mpssas_log_command(tm, MPS_INFO|MPS_RECOVERY,
1187 	    "task mgmt %p timed out\n", tm);
1188 	mps_reinit(sc);
1189 }
1190 
1191 static void
1192 mpssas_logical_unit_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1193 {
1194 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1195 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1196 	unsigned int cm_count = 0;
1197 	struct mps_command *cm;
1198 	struct mpssas_target *targ;
1199 
1200 	callout_stop(&tm->cm_callout);
1201 
1202 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1203 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1204 	targ = tm->cm_targ;
1205 
1206 	/*
1207 	 * Currently there should be no way we can hit this case.  It only
1208 	 * happens when we have a failure to allocate chain frames, and
1209 	 * task management commands don't have S/G lists.
1210 	 * XXXSL So should it be an assertion?
1211 	 */
1212 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1213 		mps_dprint(sc, MPS_ERROR, "%s: cm_flags = %#x for LUN reset! "
1214 			   "This should not happen!\n", __func__, tm->cm_flags);
1215 		mpssas_free_tm(sc, tm);
1216 		return;
1217 	}
1218 
1219 	if (reply == NULL) {
1220 		mpssas_log_command(tm, MPS_RECOVERY,
1221 		    "NULL reset reply for tm %p\n", tm);
1222 		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1223 			/* this completion was due to a reset, just cleanup */
1224 			targ->flags &= ~MPSSAS_TARGET_INRESET;
1225 			targ->tm = NULL;
1226 			mpssas_free_tm(sc, tm);
1227 		}
1228 		else {
1229 			/* we should have gotten a reply. */
1230 			mps_reinit(sc);
1231 		}
1232 		return;
1233 	}
1234 
1235 	mpssas_log_command(tm, MPS_RECOVERY,
1236 	    "logical unit reset status 0x%x code 0x%x count %u\n",
1237 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1238 	    le32toh(reply->TerminationCount));
1239 
1240 	/* See if there are any outstanding commands for this LUN.
1241 	 * This could be made more efficient by using a per-LU data
1242 	 * structure of some sort.
1243 	 */
1244 	TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1245 		if (cm->cm_lun == tm->cm_lun)
1246 			cm_count++;
1247 	}
1248 
1249 	if (cm_count == 0) {
1250 		mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1251 		    "logical unit %u finished recovery after reset\n",
1252 		    tm->cm_lun, tm);
1253 
1254 		mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1255 		    tm->cm_lun);
1256 
1257 		/* we've finished recovery for this logical unit.  check and
1258 		 * see if some other logical unit has a timedout command
1259 		 * that needs to be processed.
1260 		 */
1261 		cm = TAILQ_FIRST(&targ->timedout_commands);
1262 		if (cm) {
1263 			mpssas_send_abort(sc, tm, cm);
1264 		}
1265 		else {
1266 			targ->tm = NULL;
1267 			mpssas_free_tm(sc, tm);
1268 		}
1269 	}
1270 	else {
1271 		/* if we still have commands for this LUN, the reset
1272 		 * effectively failed, regardless of the status reported.
1273 		 * Escalate to a target reset.
1274 		 */
1275 		mpssas_log_command(tm, MPS_RECOVERY,
1276 		    "logical unit reset complete for tm %p, but still have %u command(s)\n",
1277 		    tm, cm_count);
1278 		mpssas_send_reset(sc, tm,
1279 		    MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1280 	}
1281 }
1282 
1283 static void
1284 mpssas_target_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1285 {
1286 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1287 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1288 	struct mpssas_target *targ;
1289 
1290 	callout_stop(&tm->cm_callout);
1291 
1292 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1293 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1294 	targ = tm->cm_targ;
1295 
1296 	/*
1297 	 * Currently there should be no way we can hit this case.  It only
1298 	 * happens when we have a failure to allocate chain frames, and
1299 	 * task management commands don't have S/G lists.
1300 	 */
1301 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1302 		mps_dprint(sc, MPS_ERROR,"%s: cm_flags = %#x for target reset! "
1303 			   "This should not happen!\n", __func__, tm->cm_flags);
1304 		mpssas_free_tm(sc, tm);
1305 		return;
1306 	}
1307 
1308 	if (reply == NULL) {
1309 		mpssas_log_command(tm, MPS_RECOVERY,
1310 		    "NULL reset reply for tm %p\n", tm);
1311 		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1312 			/* this completion was due to a reset, just cleanup */
1313 			targ->flags &= ~MPSSAS_TARGET_INRESET;
1314 			targ->tm = NULL;
1315 			mpssas_free_tm(sc, tm);
1316 		}
1317 		else {
1318 			/* we should have gotten a reply. */
1319 			mps_reinit(sc);
1320 		}
1321 		return;
1322 	}
1323 
1324 	mpssas_log_command(tm, MPS_RECOVERY,
1325 	    "target reset status 0x%x code 0x%x count %u\n",
1326 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1327 	    le32toh(reply->TerminationCount));
1328 
1329 	targ->flags &= ~MPSSAS_TARGET_INRESET;
1330 
1331 	if (targ->outstanding == 0) {
1332 		/* we've finished recovery for this target and all
1333 		 * of its logical units.
1334 		 */
1335 		mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1336 		    "recovery finished after target reset\n");
1337 
1338 		mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1339 		    CAM_LUN_WILDCARD);
1340 
1341 		targ->tm = NULL;
1342 		mpssas_free_tm(sc, tm);
1343 	}
1344 	else {
1345 		/* after a target reset, if this target still has
1346 		 * outstanding commands, the reset effectively failed,
1347 		 * regardless of the status reported.  escalate.
1348 		 */
1349 		mpssas_log_command(tm, MPS_RECOVERY,
1350 		    "target reset complete for tm %p, but still have %u command(s)\n",
1351 		    tm, targ->outstanding);
1352 		mps_reinit(sc);
1353 	}
1354 }
1355 
1356 #define MPS_RESET_TIMEOUT 30
1357 
1358 static int
1359 mpssas_send_reset(struct mps_softc *sc, struct mps_command *tm, uint8_t type)
1360 {
1361 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1362 	struct mpssas_target *target;
1363 	int err;
1364 
1365 	target = tm->cm_targ;
1366 	if (target->handle == 0) {
1367 		mps_dprint(sc, MPS_ERROR,"%s null devhandle for target_id %d\n",
1368 		    __func__, target->tid);
1369 		return -1;
1370 	}
1371 
1372 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1373 	req->DevHandle = htole16(target->handle);
1374 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1375 	req->TaskType = type;
1376 
1377 	if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1378 		/* XXX Need to handle invalid LUNs */
1379 		MPS_SET_LUN(req->LUN, tm->cm_lun);
1380 		tm->cm_targ->logical_unit_resets++;
1381 		mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1382 		    "sending logical unit reset\n");
1383 		tm->cm_complete = mpssas_logical_unit_reset_complete;
1384 	}
1385 	else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1386 		/* Target reset method =  SAS Hard Link Reset / SATA Link Reset */
1387 		req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1388 		tm->cm_targ->target_resets++;
1389 		tm->cm_targ->flags |= MPSSAS_TARGET_INRESET;
1390 		mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1391 		    "sending target reset\n");
1392 		tm->cm_complete = mpssas_target_reset_complete;
1393 	}
1394 	else {
1395 		mps_dprint(sc, MPS_ERROR, "unexpected reset type 0x%x\n", type);
1396 		return -1;
1397 	}
1398 
1399 	tm->cm_data = NULL;
1400 	tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1401 	tm->cm_complete_data = (void *)tm;
1402 
1403 	callout_reset(&tm->cm_callout, MPS_RESET_TIMEOUT * hz,
1404 	    mpssas_tm_timeout, tm);
1405 
1406 	err = mps_map_command(sc, tm);
1407 	if (err)
1408 		mpssas_log_command(tm, MPS_RECOVERY,
1409 		    "error %d sending reset type %u\n",
1410 		    err, type);
1411 
1412 	return err;
1413 }
1414 
1415 
1416 static void
1417 mpssas_abort_complete(struct mps_softc *sc, struct mps_command *tm)
1418 {
1419 	struct mps_command *cm;
1420 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1421 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1422 	struct mpssas_target *targ;
1423 
1424 	callout_stop(&tm->cm_callout);
1425 
1426 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1427 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1428 	targ = tm->cm_targ;
1429 
1430 	/*
1431 	 * Currently there should be no way we can hit this case.  It only
1432 	 * happens when we have a failure to allocate chain frames, and
1433 	 * task management commands don't have S/G lists.
1434 	 */
1435 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1436 		mpssas_log_command(tm, MPS_RECOVERY,
1437 		    "cm_flags = %#x for abort %p TaskMID %u!\n",
1438 		    tm->cm_flags, tm, le16toh(req->TaskMID));
1439 		mpssas_free_tm(sc, tm);
1440 		return;
1441 	}
1442 
1443 	if (reply == NULL) {
1444 		mpssas_log_command(tm, MPS_RECOVERY,
1445 		    "NULL abort reply for tm %p TaskMID %u\n",
1446 		    tm, le16toh(req->TaskMID));
1447 		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1448 			/* this completion was due to a reset, just cleanup */
1449 			targ->tm = NULL;
1450 			mpssas_free_tm(sc, tm);
1451 		}
1452 		else {
1453 			/* we should have gotten a reply. */
1454 			mps_reinit(sc);
1455 		}
1456 		return;
1457 	}
1458 
1459 	mpssas_log_command(tm, MPS_RECOVERY,
1460 	    "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1461 	    le16toh(req->TaskMID),
1462 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1463 	    le32toh(reply->TerminationCount));
1464 
1465 	cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1466 	if (cm == NULL) {
1467 		/* if there are no more timedout commands, we're done with
1468 		 * error recovery for this target.
1469 		 */
1470 		mpssas_log_command(tm, MPS_RECOVERY,
1471 		    "finished recovery after aborting TaskMID %u\n",
1472 		    le16toh(req->TaskMID));
1473 
1474 		targ->tm = NULL;
1475 		mpssas_free_tm(sc, tm);
1476 	}
1477 	else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) {
1478 		/* abort success, but we have more timedout commands to abort */
1479 		mpssas_log_command(tm, MPS_RECOVERY,
1480 		    "continuing recovery after aborting TaskMID %u\n",
1481 		    le16toh(req->TaskMID));
1482 
1483 		mpssas_send_abort(sc, tm, cm);
1484 	}
1485 	else {
1486 		/* we didn't get a command completion, so the abort
1487 		 * failed as far as we're concerned.  escalate.
1488 		 */
1489 		mpssas_log_command(tm, MPS_RECOVERY,
1490 		    "abort failed for TaskMID %u tm %p\n",
1491 		    le16toh(req->TaskMID), tm);
1492 
1493 		mpssas_send_reset(sc, tm,
1494 		    MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1495 	}
1496 }
1497 
1498 #define MPS_ABORT_TIMEOUT 5
1499 
1500 static int
1501 mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm)
1502 {
1503 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1504 	struct mpssas_target *targ;
1505 	int err;
1506 
1507 	targ = cm->cm_targ;
1508 	if (targ->handle == 0) {
1509 		mps_dprint(sc, MPS_ERROR,"%s null devhandle for target_id %d\n",
1510 		    __func__, cm->cm_ccb->ccb_h.target_id);
1511 		return -1;
1512 	}
1513 
1514 	mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1515 	    "Aborting command %p\n", cm);
1516 
1517 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1518 	req->DevHandle = htole16(targ->handle);
1519 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1520 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1521 
1522 	/* XXX Need to handle invalid LUNs */
1523 	MPS_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1524 
1525 	req->TaskMID = htole16(cm->cm_desc.Default.SMID);
1526 
1527 	tm->cm_data = NULL;
1528 	tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1529 	tm->cm_complete = mpssas_abort_complete;
1530 	tm->cm_complete_data = (void *)tm;
1531 	tm->cm_targ = cm->cm_targ;
1532 	tm->cm_lun = cm->cm_lun;
1533 
1534 	callout_reset(&tm->cm_callout, MPS_ABORT_TIMEOUT * hz,
1535 	    mpssas_tm_timeout, tm);
1536 
1537 	targ->aborts++;
1538 
1539 	err = mps_map_command(sc, tm);
1540 	if (err)
1541 		mpssas_log_command(tm, MPS_RECOVERY,
1542 		    "error %d sending abort for cm %p SMID %u\n",
1543 		    err, cm, req->TaskMID);
1544 	return err;
1545 }
1546 
1547 
1548 static void
1549 mpssas_scsiio_timeout(void *data)
1550 {
1551 	struct mps_softc *sc;
1552 	struct mps_command *cm;
1553 	struct mpssas_target *targ;
1554 
1555 	cm = (struct mps_command *)data;
1556 	sc = cm->cm_sc;
1557 
1558 	MPS_FUNCTRACE(sc);
1559 	mtx_assert(&sc->mps_mtx, MA_OWNED);
1560 
1561 	mps_dprint(sc, MPS_XINFO, "Timeout checking cm %p\n", sc);
1562 
1563 	/*
1564 	 * Run the interrupt handler to make sure it's not pending.  This
1565 	 * isn't perfect because the command could have already completed
1566 	 * and been re-used, though this is unlikely.
1567 	 */
1568 	mps_intr_locked(sc);
1569 	if (cm->cm_state == MPS_CM_STATE_FREE) {
1570 		mpssas_log_command(cm, MPS_XINFO,
1571 		    "SCSI command %p almost timed out\n", cm);
1572 		return;
1573 	}
1574 
1575 	if (cm->cm_ccb == NULL) {
1576 		mps_dprint(sc, MPS_ERROR, "command timeout with NULL ccb\n");
1577 		return;
1578 	}
1579 
1580 	mpssas_log_command(cm, MPS_INFO, "command timeout cm %p ccb %p\n",
1581 	    cm, cm->cm_ccb);
1582 
1583 	targ = cm->cm_targ;
1584 	targ->timeouts++;
1585 
1586 	/* XXX first, check the firmware state, to see if it's still
1587 	 * operational.  if not, do a diag reset.
1588 	 */
1589 	mpssas_set_ccbstatus(cm->cm_ccb, CAM_CMD_TIMEOUT);
1590 	cm->cm_state = MPS_CM_STATE_TIMEDOUT;
1591 	TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1592 
1593 	if (targ->tm != NULL) {
1594 		/* target already in recovery, just queue up another
1595 		 * timedout command to be processed later.
1596 		 */
1597 		mps_dprint(sc, MPS_RECOVERY,
1598 		    "queued timedout cm %p for processing by tm %p\n",
1599 		    cm, targ->tm);
1600 	}
1601 	else if ((targ->tm = mpssas_alloc_tm(sc)) != NULL) {
1602 		mps_dprint(sc, MPS_RECOVERY, "timedout cm %p allocated tm %p\n",
1603 		    cm, targ->tm);
1604 
1605 		/* start recovery by aborting the first timedout command */
1606 		mpssas_send_abort(sc, targ->tm, cm);
1607 	}
1608 	else {
1609 		/* XXX queue this target up for recovery once a TM becomes
1610 		 * available.  The firmware only has a limited number of
1611 		 * HighPriority credits for the high priority requests used
1612 		 * for task management, and we ran out.
1613 		 *
1614 		 * Isilon: don't worry about this for now, since we have
1615 		 * more credits than disks in an enclosure, and limit
1616 		 * ourselves to one TM per target for recovery.
1617 		 */
1618 		mps_dprint(sc, MPS_RECOVERY,
1619 		    "timedout cm %p failed to allocate a tm\n", cm);
1620 	}
1621 
1622 }
1623 
1624 static void
1625 mpssas_action_scsiio(struct mpssas_softc *sassc, union ccb *ccb)
1626 {
1627 	MPI2_SCSI_IO_REQUEST *req;
1628 	struct ccb_scsiio *csio;
1629 	struct mps_softc *sc;
1630 	struct mpssas_target *targ;
1631 	struct mpssas_lun *lun;
1632 	struct mps_command *cm;
1633 	uint8_t i, lba_byte, *ref_tag_addr;
1634 	uint16_t eedp_flags;
1635 	uint32_t mpi_control;
1636 
1637 	sc = sassc->sc;
1638 	MPS_FUNCTRACE(sc);
1639 	mtx_assert(&sc->mps_mtx, MA_OWNED);
1640 
1641 	csio = &ccb->csio;
1642 	KASSERT(csio->ccb_h.target_id < sassc->maxtargets,
1643 	    ("Target %d out of bounds in XPT_SCSI_IO\n",
1644 	     csio->ccb_h.target_id));
1645 	targ = &sassc->targets[csio->ccb_h.target_id];
1646 	mps_dprint(sc, MPS_TRACE, "ccb %p target flag %x\n", ccb, targ->flags);
1647 	if (targ->handle == 0x0) {
1648 		mps_dprint(sc, MPS_ERROR, "%s NULL handle for target %u\n",
1649 		    __func__, csio->ccb_h.target_id);
1650 		mpssas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
1651 		xpt_done(ccb);
1652 		return;
1653 	}
1654 	if (targ->flags & MPS_TARGET_FLAGS_RAID_COMPONENT) {
1655 		mps_dprint(sc, MPS_ERROR, "%s Raid component no SCSI IO "
1656 		    "supported %u\n", __func__, csio->ccb_h.target_id);
1657 		mpssas_set_ccbstatus(ccb, CAM_TID_INVALID);
1658 		xpt_done(ccb);
1659 		return;
1660 	}
1661 	/*
1662 	 * Sometimes, it is possible to get a command that is not "In
1663 	 * Progress" and was actually aborted by the upper layer.  Check for
1664 	 * this here and complete the command without error.
1665 	 */
1666 	if (mpssas_get_ccbstatus(ccb) != CAM_REQ_INPROG) {
1667 		mps_dprint(sc, MPS_TRACE, "%s Command is not in progress for "
1668 		    "target %u\n", __func__, csio->ccb_h.target_id);
1669 		xpt_done(ccb);
1670 		return;
1671 	}
1672 	/*
1673 	 * If devinfo is 0 this will be a volume.  In that case don't tell CAM
1674 	 * that the volume has timed out.  We want volumes to be enumerated
1675 	 * until they are deleted/removed, not just failed.
1676 	 */
1677 	if (targ->flags & MPSSAS_TARGET_INREMOVAL) {
1678 		if (targ->devinfo == 0)
1679 			mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1680 		else
1681 			mpssas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
1682 		xpt_done(ccb);
1683 		return;
1684 	}
1685 
1686 	if ((sc->mps_flags & MPS_FLAGS_SHUTDOWN) != 0) {
1687 		mps_dprint(sc, MPS_INFO, "%s shutting down\n", __func__);
1688 		mpssas_set_ccbstatus(ccb, CAM_TID_INVALID);
1689 		xpt_done(ccb);
1690 		return;
1691 	}
1692 
1693 	cm = mps_alloc_command(sc);
1694 	if (cm == NULL) {
1695 		if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
1696 			xpt_freeze_simq(sassc->sim, 1);
1697 			sassc->flags |= MPSSAS_QUEUE_FROZEN;
1698 		}
1699 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1700 		ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1701 		xpt_done(ccb);
1702 		return;
1703 	}
1704 
1705 	req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
1706 	bzero(req, sizeof(*req));
1707 	req->DevHandle = htole16(targ->handle);
1708 	req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1709 	req->MsgFlags = 0;
1710 	req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr);
1711 	req->SenseBufferLength = MPS_SENSE_LEN;
1712 	req->SGLFlags = 0;
1713 	req->ChainOffset = 0;
1714 	req->SGLOffset0 = 24;	/* 32bit word offset to the SGL */
1715 	req->SGLOffset1= 0;
1716 	req->SGLOffset2= 0;
1717 	req->SGLOffset3= 0;
1718 	req->SkipCount = 0;
1719 	req->DataLength = htole32(csio->dxfer_len);
1720 	req->BidirectionalDataLength = 0;
1721 	req->IoFlags = htole16(csio->cdb_len);
1722 	req->EEDPFlags = 0;
1723 
1724 	/* Note: BiDirectional transfers are not supported */
1725 	switch (csio->ccb_h.flags & CAM_DIR_MASK) {
1726 	case CAM_DIR_IN:
1727 		mpi_control = MPI2_SCSIIO_CONTROL_READ;
1728 		cm->cm_flags |= MPS_CM_FLAGS_DATAIN;
1729 		break;
1730 	case CAM_DIR_OUT:
1731 		mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
1732 		cm->cm_flags |= MPS_CM_FLAGS_DATAOUT;
1733 		break;
1734 	case CAM_DIR_NONE:
1735 	default:
1736 		mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
1737 		break;
1738 	}
1739 
1740 	if (csio->cdb_len == 32)
1741                 mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
1742 	/*
1743 	 * It looks like the hardware doesn't require an explicit tag
1744 	 * number for each transaction.  SAM Task Management not supported
1745 	 * at the moment.
1746 	 */
1747 	switch (csio->tag_action) {
1748 	case MSG_HEAD_OF_Q_TAG:
1749 		mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
1750 		break;
1751 	case MSG_ORDERED_Q_TAG:
1752 		mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
1753 		break;
1754 	case MSG_ACA_TASK:
1755 		mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ;
1756 		break;
1757 	case CAM_TAG_ACTION_NONE:
1758 	case MSG_SIMPLE_Q_TAG:
1759 	default:
1760 		mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
1761 		break;
1762 	}
1763 	mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
1764 	req->Control = htole32(mpi_control);
1765 	if (MPS_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
1766 		mps_free_command(sc, cm);
1767 		mpssas_set_ccbstatus(ccb, CAM_LUN_INVALID);
1768 		xpt_done(ccb);
1769 		return;
1770 	}
1771 
1772 	if (csio->ccb_h.flags & CAM_CDB_POINTER)
1773 		bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
1774 	else
1775 		bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
1776 	req->IoFlags = htole16(csio->cdb_len);
1777 
1778 	/*
1779 	 * Check if EEDP is supported and enabled.  If it is then check if the
1780 	 * SCSI opcode could be using EEDP.  If so, make sure the LUN exists and
1781 	 * is formatted for EEDP support.  If all of this is true, set CDB up
1782 	 * for EEDP transfer.
1783 	 */
1784 	eedp_flags = op_code_prot[req->CDB.CDB32[0]];
1785 	if (sc->eedp_enabled && eedp_flags) {
1786 		SLIST_FOREACH(lun, &targ->luns, lun_link) {
1787 			if (lun->lun_id == csio->ccb_h.target_lun) {
1788 				break;
1789 			}
1790 		}
1791 
1792 		if ((lun != NULL) && (lun->eedp_formatted)) {
1793 			req->EEDPBlockSize = htole16(lun->eedp_block_size);
1794 			eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
1795 			    MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
1796 			    MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
1797 			req->EEDPFlags = htole16(eedp_flags);
1798 
1799 			/*
1800 			 * If CDB less than 32, fill in Primary Ref Tag with
1801 			 * low 4 bytes of LBA.  If CDB is 32, tag stuff is
1802 			 * already there.  Also, set protection bit.  FreeBSD
1803 			 * currently does not support CDBs bigger than 16, but
1804 			 * the code doesn't hurt, and will be here for the
1805 			 * future.
1806 			 */
1807 			if (csio->cdb_len != 32) {
1808 				lba_byte = (csio->cdb_len == 16) ? 6 : 2;
1809 				ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
1810 				    PrimaryReferenceTag;
1811 				for (i = 0; i < 4; i++) {
1812 					*ref_tag_addr =
1813 					    req->CDB.CDB32[lba_byte + i];
1814 					ref_tag_addr++;
1815 				}
1816 				req->CDB.EEDP32.PrimaryReferenceTag =
1817 					htole32(req->CDB.EEDP32.PrimaryReferenceTag);
1818 				req->CDB.EEDP32.PrimaryApplicationTagMask =
1819 				    0xFFFF;
1820 				req->CDB.CDB32[1] = (req->CDB.CDB32[1] & 0x1F) |
1821 				    0x20;
1822 			} else {
1823 				eedp_flags |=
1824 				    MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
1825 				req->EEDPFlags = htole16(eedp_flags);
1826 				req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
1827 				    0x1F) | 0x20;
1828 			}
1829 		}
1830 	}
1831 
1832 	cm->cm_length = csio->dxfer_len;
1833 	if (cm->cm_length != 0) {
1834 		cm->cm_data = ccb;
1835 		cm->cm_flags |= MPS_CM_FLAGS_USE_CCB;
1836 	} else {
1837 		cm->cm_data = NULL;
1838 	}
1839 	cm->cm_sge = &req->SGL;
1840 	cm->cm_sglsize = (32 - 24) * 4;
1841 	cm->cm_desc.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
1842 	cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle);
1843 	cm->cm_complete = mpssas_scsiio_complete;
1844 	cm->cm_complete_data = ccb;
1845 	cm->cm_targ = targ;
1846 	cm->cm_lun = csio->ccb_h.target_lun;
1847 	cm->cm_ccb = ccb;
1848 
1849 	/*
1850 	 * If HBA is a WD and the command is not for a retry, try to build a
1851 	 * direct I/O message. If failed, or the command is for a retry, send
1852 	 * the I/O to the IR volume itself.
1853 	 */
1854 	if (sc->WD_valid_config) {
1855 		if (ccb->ccb_h.sim_priv.entries[0].field == MPS_WD_RETRY) {
1856 			mpssas_direct_drive_io(sassc, cm, ccb);
1857 		} else {
1858 			mpssas_set_ccbstatus(ccb, CAM_REQ_INPROG);
1859 		}
1860 	}
1861 
1862 	callout_reset(&cm->cm_callout, (ccb->ccb_h.timeout * hz) / 1000,
1863 	   mpssas_scsiio_timeout, cm);
1864 
1865 	targ->issued++;
1866 	targ->outstanding++;
1867 	TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1868 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
1869 
1870 	mpssas_log_command(cm, MPS_XINFO, "%s cm %p ccb %p outstanding %u\n",
1871 	    __func__, cm, ccb, targ->outstanding);
1872 
1873 	mps_map_command(sc, cm);
1874 	return;
1875 }
1876 
1877 static void
1878 mps_response_code(struct mps_softc *sc, u8 response_code)
1879 {
1880         char *desc;
1881 
1882         switch (response_code) {
1883         case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE:
1884                 desc = "task management request completed";
1885                 break;
1886         case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME:
1887                 desc = "invalid frame";
1888                 break;
1889         case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
1890                 desc = "task management request not supported";
1891                 break;
1892         case MPI2_SCSITASKMGMT_RSP_TM_FAILED:
1893                 desc = "task management request failed";
1894                 break;
1895         case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED:
1896                 desc = "task management request succeeded";
1897                 break;
1898         case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN:
1899                 desc = "invalid lun";
1900                 break;
1901         case 0xA:
1902                 desc = "overlapped tag attempted";
1903                 break;
1904         case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
1905                 desc = "task queued, however not sent to target";
1906                 break;
1907         default:
1908                 desc = "unknown";
1909                 break;
1910         }
1911 		mps_dprint(sc, MPS_XINFO, "response_code(0x%01x): %s\n",
1912                 response_code, desc);
1913 }
1914 /**
1915  * mps_sc_failed_io_info - translated non-succesfull SCSI_IO request
1916  */
1917 static void
1918 mps_sc_failed_io_info(struct mps_softc *sc, struct ccb_scsiio *csio,
1919     Mpi2SCSIIOReply_t *mpi_reply)
1920 {
1921 	u32 response_info;
1922 	u8 *response_bytes;
1923 	u16 ioc_status = le16toh(mpi_reply->IOCStatus) &
1924 	    MPI2_IOCSTATUS_MASK;
1925 	u8 scsi_state = mpi_reply->SCSIState;
1926 	u8 scsi_status = mpi_reply->SCSIStatus;
1927 	char *desc_ioc_state = NULL;
1928 	char *desc_scsi_status = NULL;
1929 	char *desc_scsi_state = sc->tmp_string;
1930 	u32 log_info = le32toh(mpi_reply->IOCLogInfo);
1931 
1932 	if (log_info == 0x31170000)
1933 		return;
1934 
1935 	switch (ioc_status) {
1936 	case MPI2_IOCSTATUS_SUCCESS:
1937 		desc_ioc_state = "success";
1938 		break;
1939 	case MPI2_IOCSTATUS_INVALID_FUNCTION:
1940 		desc_ioc_state = "invalid function";
1941 		break;
1942 	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
1943 		desc_ioc_state = "scsi recovered error";
1944 		break;
1945 	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
1946 		desc_ioc_state = "scsi invalid dev handle";
1947 		break;
1948 	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
1949 		desc_ioc_state = "scsi device not there";
1950 		break;
1951 	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
1952 		desc_ioc_state = "scsi data overrun";
1953 		break;
1954 	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
1955 		desc_ioc_state = "scsi data underrun";
1956 		break;
1957 	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
1958 		desc_ioc_state = "scsi io data error";
1959 		break;
1960 	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
1961 		desc_ioc_state = "scsi protocol error";
1962 		break;
1963 	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
1964 		desc_ioc_state = "scsi task terminated";
1965 		break;
1966 	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
1967 		desc_ioc_state = "scsi residual mismatch";
1968 		break;
1969 	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
1970 		desc_ioc_state = "scsi task mgmt failed";
1971 		break;
1972 	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
1973 		desc_ioc_state = "scsi ioc terminated";
1974 		break;
1975 	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
1976 		desc_ioc_state = "scsi ext terminated";
1977 		break;
1978 	case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
1979 		desc_ioc_state = "eedp guard error";
1980 		break;
1981 	case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
1982 		desc_ioc_state = "eedp ref tag error";
1983 		break;
1984 	case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
1985 		desc_ioc_state = "eedp app tag error";
1986 		break;
1987 	default:
1988 		desc_ioc_state = "unknown";
1989 		break;
1990 	}
1991 
1992 	switch (scsi_status) {
1993 	case MPI2_SCSI_STATUS_GOOD:
1994 		desc_scsi_status = "good";
1995 		break;
1996 	case MPI2_SCSI_STATUS_CHECK_CONDITION:
1997 		desc_scsi_status = "check condition";
1998 		break;
1999 	case MPI2_SCSI_STATUS_CONDITION_MET:
2000 		desc_scsi_status = "condition met";
2001 		break;
2002 	case MPI2_SCSI_STATUS_BUSY:
2003 		desc_scsi_status = "busy";
2004 		break;
2005 	case MPI2_SCSI_STATUS_INTERMEDIATE:
2006 		desc_scsi_status = "intermediate";
2007 		break;
2008 	case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET:
2009 		desc_scsi_status = "intermediate condmet";
2010 		break;
2011 	case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
2012 		desc_scsi_status = "reservation conflict";
2013 		break;
2014 	case MPI2_SCSI_STATUS_COMMAND_TERMINATED:
2015 		desc_scsi_status = "command terminated";
2016 		break;
2017 	case MPI2_SCSI_STATUS_TASK_SET_FULL:
2018 		desc_scsi_status = "task set full";
2019 		break;
2020 	case MPI2_SCSI_STATUS_ACA_ACTIVE:
2021 		desc_scsi_status = "aca active";
2022 		break;
2023 	case MPI2_SCSI_STATUS_TASK_ABORTED:
2024 		desc_scsi_status = "task aborted";
2025 		break;
2026 	default:
2027 		desc_scsi_status = "unknown";
2028 		break;
2029 	}
2030 
2031 	desc_scsi_state[0] = '\0';
2032 	if (!scsi_state)
2033 		desc_scsi_state = " ";
2034 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
2035 		strcat(desc_scsi_state, "response info ");
2036 	if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
2037 		strcat(desc_scsi_state, "state terminated ");
2038 	if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS)
2039 		strcat(desc_scsi_state, "no status ");
2040 	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED)
2041 		strcat(desc_scsi_state, "autosense failed ");
2042 	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID)
2043 		strcat(desc_scsi_state, "autosense valid ");
2044 
2045 	mps_dprint(sc, MPS_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n",
2046 	    le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status);
2047 	/* We can add more detail about underflow data here
2048 	 * TO-DO
2049 	 * */
2050 	mps_dprint(sc, MPS_XINFO, "\tscsi_status(%s)(0x%02x), "
2051 	    "scsi_state(%s)(0x%02x)\n", desc_scsi_status, scsi_status,
2052 	    desc_scsi_state, scsi_state);
2053 
2054 	if (sc->mps_debug & MPS_XINFO &&
2055 		scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2056 		mps_dprint(sc, MPS_XINFO, "-> Sense Buffer Data : Start :\n");
2057 		scsi_sense_print(csio);
2058 		mps_dprint(sc, MPS_XINFO, "-> Sense Buffer Data : End :\n");
2059 	}
2060 
2061 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
2062 		response_info = le32toh(mpi_reply->ResponseInfo);
2063 		response_bytes = (u8 *)&response_info;
2064 		mps_response_code(sc,response_bytes[0]);
2065 	}
2066 }
2067 
2068 static void
2069 mpssas_scsiio_complete(struct mps_softc *sc, struct mps_command *cm)
2070 {
2071 	MPI2_SCSI_IO_REPLY *rep;
2072 	union ccb *ccb;
2073 	struct ccb_scsiio *csio;
2074 	struct mpssas_softc *sassc;
2075 	struct scsi_vpd_supported_page_list *vpd_list = NULL;
2076 	u8 *TLR_bits, TLR_on;
2077 	int dir = 0, i;
2078 	u16 alloc_len;
2079 
2080 	MPS_FUNCTRACE(sc);
2081 	mps_dprint(sc, MPS_TRACE,
2082 	    "cm %p SMID %u ccb %p reply %p outstanding %u\n", cm,
2083 	    cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
2084 	    cm->cm_targ->outstanding);
2085 
2086 	callout_stop(&cm->cm_callout);
2087 	mtx_assert(&sc->mps_mtx, MA_OWNED);
2088 
2089 	sassc = sc->sassc;
2090 	ccb = cm->cm_complete_data;
2091 	csio = &ccb->csio;
2092 	rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
2093 	/*
2094 	 * XXX KDM if the chain allocation fails, does it matter if we do
2095 	 * the sync and unload here?  It is simpler to do it in every case,
2096 	 * assuming it doesn't cause problems.
2097 	 */
2098 	if (cm->cm_data != NULL) {
2099 		if (cm->cm_flags & MPS_CM_FLAGS_DATAIN)
2100 			dir = BUS_DMASYNC_POSTREAD;
2101 		else if (cm->cm_flags & MPS_CM_FLAGS_DATAOUT)
2102 			dir = BUS_DMASYNC_POSTWRITE;
2103 		bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
2104 		bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2105 	}
2106 
2107 	cm->cm_targ->completed++;
2108 	cm->cm_targ->outstanding--;
2109 	TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
2110 	ccb->ccb_h.status &= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED);
2111 
2112 	if (cm->cm_state == MPS_CM_STATE_TIMEDOUT) {
2113 		TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
2114 		if (cm->cm_reply != NULL)
2115 			mpssas_log_command(cm, MPS_RECOVERY,
2116 			    "completed timedout cm %p ccb %p during recovery "
2117 			    "ioc %x scsi %x state %x xfer %u\n",
2118 			    cm, cm->cm_ccb,
2119 			    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2120 			    le32toh(rep->TransferCount));
2121 		else
2122 			mpssas_log_command(cm, MPS_RECOVERY,
2123 			    "completed timedout cm %p ccb %p during recovery\n",
2124 			    cm, cm->cm_ccb);
2125 	} else if (cm->cm_targ->tm != NULL) {
2126 		if (cm->cm_reply != NULL)
2127 			mpssas_log_command(cm, MPS_RECOVERY,
2128 			    "completed cm %p ccb %p during recovery "
2129 			    "ioc %x scsi %x state %x xfer %u\n",
2130 			    cm, cm->cm_ccb,
2131 			    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2132 			    le32toh(rep->TransferCount));
2133 		else
2134 			mpssas_log_command(cm, MPS_RECOVERY,
2135 			    "completed cm %p ccb %p during recovery\n",
2136 			    cm, cm->cm_ccb);
2137 	} else if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
2138 		mpssas_log_command(cm, MPS_RECOVERY,
2139 		    "reset completed cm %p ccb %p\n",
2140 		    cm, cm->cm_ccb);
2141 	}
2142 
2143 	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2144 		/*
2145 		 * We ran into an error after we tried to map the command,
2146 		 * so we're getting a callback without queueing the command
2147 		 * to the hardware.  So we set the status here, and it will
2148 		 * be retained below.  We'll go through the "fast path",
2149 		 * because there can be no reply when we haven't actually
2150 		 * gone out to the hardware.
2151 		 */
2152 		mpssas_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
2153 
2154 		/*
2155 		 * Currently the only error included in the mask is
2156 		 * MPS_CM_FLAGS_CHAIN_FAILED, which means we're out of
2157 		 * chain frames.  We need to freeze the queue until we get
2158 		 * a command that completed without this error, which will
2159 		 * hopefully have some chain frames attached that we can
2160 		 * use.  If we wanted to get smarter about it, we would
2161 		 * only unfreeze the queue in this condition when we're
2162 		 * sure that we're getting some chain frames back.  That's
2163 		 * probably unnecessary.
2164 		 */
2165 		if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
2166 			xpt_freeze_simq(sassc->sim, 1);
2167 			sassc->flags |= MPSSAS_QUEUE_FROZEN;
2168 			mps_dprint(sc, MPS_XINFO, "Error sending command, "
2169 				   "freezing SIM queue\n");
2170 		}
2171 	}
2172 
2173 	/* Take the fast path to completion */
2174 	if (cm->cm_reply == NULL) {
2175 		if (mpssas_get_ccbstatus(ccb) == CAM_REQ_INPROG) {
2176 			if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0)
2177 				mpssas_set_ccbstatus(ccb, CAM_SCSI_BUS_RESET);
2178 			else {
2179 				mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2180 				ccb->csio.scsi_status = SCSI_STATUS_OK;
2181 			}
2182 			if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2183 				ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2184 				sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2185 				mps_dprint(sc, MPS_XINFO,
2186 				    "Unfreezing SIM queue\n");
2187 			}
2188 		}
2189 
2190 		/*
2191 		 * There are two scenarios where the status won't be
2192 		 * CAM_REQ_CMP.  The first is if MPS_CM_FLAGS_ERROR_MASK is
2193 		 * set, the second is in the MPS_FLAGS_DIAGRESET above.
2194 		 */
2195 		if (mpssas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2196 			/*
2197 			 * Freeze the dev queue so that commands are
2198 			 * executed in the correct order after error
2199 			 * recovery.
2200 			 */
2201 			ccb->ccb_h.status |= CAM_DEV_QFRZN;
2202 			xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2203 		}
2204 		mps_free_command(sc, cm);
2205 		xpt_done(ccb);
2206 		return;
2207 	}
2208 
2209 	mpssas_log_command(cm, MPS_XINFO,
2210 	    "ioc %x scsi %x state %x xfer %u\n",
2211 	    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2212 	    le32toh(rep->TransferCount));
2213 
2214 	/*
2215 	 * If this is a Direct Drive I/O, reissue the I/O to the original IR
2216 	 * Volume if an error occurred (normal I/O retry).  Use the original
2217 	 * CCB, but set a flag that this will be a retry so that it's sent to
2218 	 * the original volume.  Free the command but reuse the CCB.
2219 	 */
2220 	if (cm->cm_flags & MPS_CM_FLAGS_DD_IO) {
2221 		mps_free_command(sc, cm);
2222 		ccb->ccb_h.sim_priv.entries[0].field = MPS_WD_RETRY;
2223 		mpssas_action_scsiio(sassc, ccb);
2224 		return;
2225 	} else
2226 		ccb->ccb_h.sim_priv.entries[0].field = 0;
2227 
2228 	switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) {
2229 	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2230 		csio->resid = cm->cm_length - le32toh(rep->TransferCount);
2231 		/* FALLTHROUGH */
2232 	case MPI2_IOCSTATUS_SUCCESS:
2233 	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2234 
2235 		if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
2236 		    MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
2237 			mpssas_log_command(cm, MPS_XINFO, "recovered error\n");
2238 
2239 		/* Completion failed at the transport level. */
2240 		if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
2241 		    MPI2_SCSI_STATE_TERMINATED)) {
2242 			mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2243 			break;
2244 		}
2245 
2246 		/* In a modern packetized environment, an autosense failure
2247 		 * implies that there's not much else that can be done to
2248 		 * recover the command.
2249 		 */
2250 		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
2251 			mpssas_set_ccbstatus(ccb, CAM_AUTOSENSE_FAIL);
2252 			break;
2253 		}
2254 
2255 		/*
2256 		 * CAM doesn't care about SAS Response Info data, but if this is
2257 		 * the state check if TLR should be done.  If not, clear the
2258 		 * TLR_bits for the target.
2259 		 */
2260 		if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
2261 		    ((le32toh(rep->ResponseInfo) & MPI2_SCSI_RI_MASK_REASONCODE) ==
2262 		    MPS_SCSI_RI_INVALID_FRAME)) {
2263 			sc->mapping_table[csio->ccb_h.target_id].TLR_bits =
2264 			    (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2265 		}
2266 
2267 		/*
2268 		 * Intentionally override the normal SCSI status reporting
2269 		 * for these two cases.  These are likely to happen in a
2270 		 * multi-initiator environment, and we want to make sure that
2271 		 * CAM retries these commands rather than fail them.
2272 		 */
2273 		if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
2274 		    (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
2275 			mpssas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2276 			break;
2277 		}
2278 
2279 		/* Handle normal status and sense */
2280 		csio->scsi_status = rep->SCSIStatus;
2281 		if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
2282 			mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2283 		else
2284 			mpssas_set_ccbstatus(ccb, CAM_SCSI_STATUS_ERROR);
2285 
2286 		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2287 			int sense_len, returned_sense_len;
2288 
2289 			returned_sense_len = min(le32toh(rep->SenseCount),
2290 			    sizeof(struct scsi_sense_data));
2291 			if (returned_sense_len < ccb->csio.sense_len)
2292 				ccb->csio.sense_resid = ccb->csio.sense_len -
2293 					returned_sense_len;
2294 			else
2295 				ccb->csio.sense_resid = 0;
2296 
2297 			sense_len = min(returned_sense_len,
2298 			    ccb->csio.sense_len - ccb->csio.sense_resid);
2299 			bzero(&ccb->csio.sense_data,
2300 			      sizeof(ccb->csio.sense_data));
2301 			bcopy(cm->cm_sense, &ccb->csio.sense_data, sense_len);
2302 			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2303 		}
2304 
2305 		/*
2306 		 * Check if this is an INQUIRY command.  If it's a VPD inquiry,
2307 		 * and it's page code 0 (Supported Page List), and there is
2308 		 * inquiry data, and this is for a sequential access device, and
2309 		 * the device is an SSP target, and TLR is supported by the
2310 		 * controller, turn the TLR_bits value ON if page 0x90 is
2311 		 * supported.
2312 		 */
2313 		if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
2314 		    (csio->cdb_io.cdb_bytes[1] & SI_EVPD) &&
2315 		    (csio->cdb_io.cdb_bytes[2] == SVPD_SUPPORTED_PAGE_LIST) &&
2316 		    ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) &&
2317 		    (csio->data_ptr != NULL) &&
2318 		    ((csio->data_ptr[0] & 0x1f) == T_SEQUENTIAL) &&
2319 		    (sc->control_TLR) &&
2320 		    (sc->mapping_table[csio->ccb_h.target_id].device_info &
2321 		    MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
2322 			vpd_list = (struct scsi_vpd_supported_page_list *)
2323 			    csio->data_ptr;
2324 			TLR_bits = &sc->mapping_table[csio->ccb_h.target_id].
2325 			    TLR_bits;
2326 			*TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2327 			TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
2328 			alloc_len = ((u16)csio->cdb_io.cdb_bytes[3] << 8) +
2329 			    csio->cdb_io.cdb_bytes[4];
2330 			alloc_len -= csio->resid;
2331 			for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
2332 				if (vpd_list->list[i] == 0x90) {
2333 					*TLR_bits = TLR_on;
2334 					break;
2335 				}
2336 			}
2337 		}
2338 		break;
2339 	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2340 	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2341 		/*
2342 		 * If devinfo is 0 this will be a volume.  In that case don't
2343 		 * tell CAM that the volume is not there.  We want volumes to
2344 		 * be enumerated until they are deleted/removed, not just
2345 		 * failed.
2346 		 */
2347 		if (cm->cm_targ->devinfo == 0)
2348 			mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2349 		else
2350 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2351 		break;
2352 	case MPI2_IOCSTATUS_INVALID_SGL:
2353 		mps_print_scsiio_cmd(sc, cm);
2354 		mpssas_set_ccbstatus(ccb, CAM_UNREC_HBA_ERROR);
2355 		break;
2356 	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2357 		/*
2358 		 * This is one of the responses that comes back when an I/O
2359 		 * has been aborted.  If it is because of a timeout that we
2360 		 * initiated, just set the status to CAM_CMD_TIMEOUT.
2361 		 * Otherwise set it to CAM_REQ_ABORTED.  The effect on the
2362 		 * command is the same (it gets retried, subject to the
2363 		 * retry counter), the only difference is what gets printed
2364 		 * on the console.
2365 		 */
2366 		if (cm->cm_state == MPS_CM_STATE_TIMEDOUT)
2367 			mpssas_set_ccbstatus(ccb, CAM_CMD_TIMEOUT);
2368 		else
2369 			mpssas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2370 		break;
2371 	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2372 		/* resid is ignored for this condition */
2373 		csio->resid = 0;
2374 		mpssas_set_ccbstatus(ccb, CAM_DATA_RUN_ERR);
2375 		break;
2376 	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2377 	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2378 		/*
2379 		 * Since these are generally external (i.e. hopefully
2380 		 * transient transport-related) errors, retry these without
2381 		 * decrementing the retry count.
2382 		 */
2383 		mpssas_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
2384 		mpssas_log_command(cm, MPS_INFO,
2385 		    "terminated ioc %x scsi %x state %x xfer %u\n",
2386 		    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2387 		    le32toh(rep->TransferCount));
2388 		break;
2389 	case MPI2_IOCSTATUS_INVALID_FUNCTION:
2390 	case MPI2_IOCSTATUS_INTERNAL_ERROR:
2391 	case MPI2_IOCSTATUS_INVALID_VPID:
2392 	case MPI2_IOCSTATUS_INVALID_FIELD:
2393 	case MPI2_IOCSTATUS_INVALID_STATE:
2394 	case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2395 	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2396 	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2397 	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2398 	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2399 	default:
2400 		mpssas_log_command(cm, MPS_XINFO,
2401 		    "completed ioc %x scsi %x state %x xfer %u\n",
2402 		    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2403 		    le32toh(rep->TransferCount));
2404 		csio->resid = cm->cm_length;
2405 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2406 		break;
2407 	}
2408 
2409 	mps_sc_failed_io_info(sc,csio,rep);
2410 
2411 	if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2412 		ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2413 		sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2414 		mps_dprint(sc, MPS_XINFO, "Command completed, "
2415 		    "unfreezing SIM queue\n");
2416 	}
2417 
2418 	if (mpssas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2419 		ccb->ccb_h.status |= CAM_DEV_QFRZN;
2420 		xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2421 	}
2422 
2423 	mps_free_command(sc, cm);
2424 	xpt_done(ccb);
2425 }
2426 
2427 /* All Request reached here are Endian safe */
2428 static void
2429 mpssas_direct_drive_io(struct mpssas_softc *sassc, struct mps_command *cm,
2430     union ccb *ccb) {
2431 	pMpi2SCSIIORequest_t	pIO_req;
2432 	struct mps_softc	*sc = sassc->sc;
2433 	uint64_t		virtLBA;
2434 	uint32_t		physLBA, stripe_offset, stripe_unit;
2435 	uint32_t		io_size, column;
2436 	uint8_t			*ptrLBA, lba_idx, physLBA_byte, *CDB;
2437 
2438 	/*
2439 	 * If this is a valid SCSI command (Read6, Read10, Read16, Write6,
2440 	 * Write10, or Write16), build a direct I/O message.  Otherwise, the I/O
2441 	 * will be sent to the IR volume itself.  Since Read6 and Write6 are a
2442 	 * bit different than the 10/16 CDBs, handle them separately.
2443 	 */
2444 	pIO_req = (pMpi2SCSIIORequest_t)cm->cm_req;
2445 	CDB = pIO_req->CDB.CDB32;
2446 
2447 	/*
2448 	 * Handle 6 byte CDBs.
2449 	 */
2450 	if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_6) ||
2451 	    (CDB[0] == WRITE_6))) {
2452 		/*
2453 		 * Get the transfer size in blocks.
2454 		 */
2455 		io_size = (cm->cm_length >> sc->DD_block_exponent);
2456 
2457 		/*
2458 		 * Get virtual LBA given in the CDB.
2459 		 */
2460 		virtLBA = ((uint64_t)(CDB[1] & 0x1F) << 16) |
2461 		    ((uint64_t)CDB[2] << 8) | (uint64_t)CDB[3];
2462 
2463 		/*
2464 		 * Check that LBA range for I/O does not exceed volume's
2465 		 * MaxLBA.
2466 		 */
2467 		if ((virtLBA + (uint64_t)io_size - 1) <=
2468 		    sc->DD_max_lba) {
2469 			/*
2470 			 * Check if the I/O crosses a stripe boundary.  If not,
2471 			 * translate the virtual LBA to a physical LBA and set
2472 			 * the DevHandle for the PhysDisk to be used.  If it
2473 			 * does cross a boundry, do normal I/O.  To get the
2474 			 * right DevHandle to use, get the map number for the
2475 			 * column, then use that map number to look up the
2476 			 * DevHandle of the PhysDisk.
2477 			 */
2478 			stripe_offset = (uint32_t)virtLBA &
2479 			    (sc->DD_stripe_size - 1);
2480 			if ((stripe_offset + io_size) <= sc->DD_stripe_size) {
2481 				physLBA = (uint32_t)virtLBA >>
2482 				    sc->DD_stripe_exponent;
2483 				stripe_unit = physLBA / sc->DD_num_phys_disks;
2484 				column = physLBA % sc->DD_num_phys_disks;
2485 				pIO_req->DevHandle =
2486 				    htole16(sc->DD_column_map[column].dev_handle);
2487 				/* ???? Is this endian safe*/
2488 				cm->cm_desc.SCSIIO.DevHandle =
2489 				    pIO_req->DevHandle;
2490 
2491 				physLBA = (stripe_unit <<
2492 				    sc->DD_stripe_exponent) + stripe_offset;
2493 				ptrLBA = &pIO_req->CDB.CDB32[1];
2494 				physLBA_byte = (uint8_t)(physLBA >> 16);
2495 				*ptrLBA = physLBA_byte;
2496 				ptrLBA = &pIO_req->CDB.CDB32[2];
2497 				physLBA_byte = (uint8_t)(physLBA >> 8);
2498 				*ptrLBA = physLBA_byte;
2499 				ptrLBA = &pIO_req->CDB.CDB32[3];
2500 				physLBA_byte = (uint8_t)physLBA;
2501 				*ptrLBA = physLBA_byte;
2502 
2503 				/*
2504 				 * Set flag that Direct Drive I/O is
2505 				 * being done.
2506 				 */
2507 				cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2508 			}
2509 		}
2510 		return;
2511 	}
2512 
2513 	/*
2514 	 * Handle 10, 12 or 16 byte CDBs.
2515 	 */
2516 	if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_10) ||
2517 	    (CDB[0] == WRITE_10) || (CDB[0] == READ_16) ||
2518 	    (CDB[0] == WRITE_16) || (CDB[0] == READ_12) ||
2519 	    (CDB[0] == WRITE_12))) {
2520 		/*
2521 		 * For 16-byte CDB's, verify that the upper 4 bytes of the CDB
2522 		 * are 0.  If not, this is accessing beyond 2TB so handle it in
2523 		 * the else section.  10-byte and 12-byte CDB's are OK.
2524 		 * FreeBSD sends very rare 12 byte READ/WRITE, but driver is
2525 		 * ready to accept 12byte CDB for Direct IOs.
2526 		 */
2527 		if ((CDB[0] == READ_10 || CDB[0] == WRITE_10) ||
2528 		    (CDB[0] == READ_12 || CDB[0] == WRITE_12) ||
2529 		    !(CDB[2] | CDB[3] | CDB[4] | CDB[5])) {
2530 			/*
2531 			 * Get the transfer size in blocks.
2532 			 */
2533 			io_size = (cm->cm_length >> sc->DD_block_exponent);
2534 
2535 			/*
2536 			 * Get virtual LBA.  Point to correct lower 4 bytes of
2537 			 * LBA in the CDB depending on command.
2538 			 */
2539 			lba_idx = ((CDB[0] == READ_12) ||
2540 				(CDB[0] == WRITE_12) ||
2541 				(CDB[0] == READ_10) ||
2542 				(CDB[0] == WRITE_10))? 2 : 6;
2543 			virtLBA = ((uint64_t)CDB[lba_idx] << 24) |
2544 			    ((uint64_t)CDB[lba_idx + 1] << 16) |
2545 			    ((uint64_t)CDB[lba_idx + 2] << 8) |
2546 			    (uint64_t)CDB[lba_idx + 3];
2547 
2548 			/*
2549 			 * Check that LBA range for I/O does not exceed volume's
2550 			 * MaxLBA.
2551 			 */
2552 			if ((virtLBA + (uint64_t)io_size - 1) <=
2553 			    sc->DD_max_lba) {
2554 				/*
2555 				 * Check if the I/O crosses a stripe boundary.
2556 				 * If not, translate the virtual LBA to a
2557 				 * physical LBA and set the DevHandle for the
2558 				 * PhysDisk to be used.  If it does cross a
2559 				 * boundry, do normal I/O.  To get the right
2560 				 * DevHandle to use, get the map number for the
2561 				 * column, then use that map number to look up
2562 				 * the DevHandle of the PhysDisk.
2563 				 */
2564 				stripe_offset = (uint32_t)virtLBA &
2565 				    (sc->DD_stripe_size - 1);
2566 				if ((stripe_offset + io_size) <=
2567 				    sc->DD_stripe_size) {
2568 					physLBA = (uint32_t)virtLBA >>
2569 					    sc->DD_stripe_exponent;
2570 					stripe_unit = physLBA /
2571 					    sc->DD_num_phys_disks;
2572 					column = physLBA %
2573 					    sc->DD_num_phys_disks;
2574 					pIO_req->DevHandle =
2575 					    htole16(sc->DD_column_map[column].
2576 					    dev_handle);
2577 					cm->cm_desc.SCSIIO.DevHandle =
2578 					    pIO_req->DevHandle;
2579 
2580 					physLBA = (stripe_unit <<
2581 					    sc->DD_stripe_exponent) +
2582 					    stripe_offset;
2583 					ptrLBA =
2584 					    &pIO_req->CDB.CDB32[lba_idx];
2585 					physLBA_byte = (uint8_t)(physLBA >> 24);
2586 					*ptrLBA = physLBA_byte;
2587 					ptrLBA =
2588 					    &pIO_req->CDB.CDB32[lba_idx + 1];
2589 					physLBA_byte = (uint8_t)(physLBA >> 16);
2590 					*ptrLBA = physLBA_byte;
2591 					ptrLBA =
2592 					    &pIO_req->CDB.CDB32[lba_idx + 2];
2593 					physLBA_byte = (uint8_t)(physLBA >> 8);
2594 					*ptrLBA = physLBA_byte;
2595 					ptrLBA =
2596 					    &pIO_req->CDB.CDB32[lba_idx + 3];
2597 					physLBA_byte = (uint8_t)physLBA;
2598 					*ptrLBA = physLBA_byte;
2599 
2600 					/*
2601 					 * Set flag that Direct Drive I/O is
2602 					 * being done.
2603 					 */
2604 					cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2605 				}
2606 			}
2607 		} else {
2608 			/*
2609 			 * 16-byte CDB and the upper 4 bytes of the CDB are not
2610 			 * 0.  Get the transfer size in blocks.
2611 			 */
2612 			io_size = (cm->cm_length >> sc->DD_block_exponent);
2613 
2614 			/*
2615 			 * Get virtual LBA.
2616 			 */
2617 			virtLBA = ((uint64_t)CDB[2] << 54) |
2618 			    ((uint64_t)CDB[3] << 48) |
2619 			    ((uint64_t)CDB[4] << 40) |
2620 			    ((uint64_t)CDB[5] << 32) |
2621 			    ((uint64_t)CDB[6] << 24) |
2622 			    ((uint64_t)CDB[7] << 16) |
2623 			    ((uint64_t)CDB[8] << 8) |
2624 			    (uint64_t)CDB[9];
2625 
2626 			/*
2627 			 * Check that LBA range for I/O does not exceed volume's
2628 			 * MaxLBA.
2629 			 */
2630 			if ((virtLBA + (uint64_t)io_size - 1) <=
2631 			    sc->DD_max_lba) {
2632 				/*
2633 				 * Check if the I/O crosses a stripe boundary.
2634 				 * If not, translate the virtual LBA to a
2635 				 * physical LBA and set the DevHandle for the
2636 				 * PhysDisk to be used.  If it does cross a
2637 				 * boundry, do normal I/O.  To get the right
2638 				 * DevHandle to use, get the map number for the
2639 				 * column, then use that map number to look up
2640 				 * the DevHandle of the PhysDisk.
2641 				 */
2642 				stripe_offset = (uint32_t)virtLBA &
2643 				    (sc->DD_stripe_size - 1);
2644 				if ((stripe_offset + io_size) <=
2645 				    sc->DD_stripe_size) {
2646 					physLBA = (uint32_t)(virtLBA >>
2647 					    sc->DD_stripe_exponent);
2648 					stripe_unit = physLBA /
2649 					    sc->DD_num_phys_disks;
2650 					column = physLBA %
2651 					    sc->DD_num_phys_disks;
2652 					pIO_req->DevHandle =
2653 					    htole16(sc->DD_column_map[column].
2654 					    dev_handle);
2655 					cm->cm_desc.SCSIIO.DevHandle =
2656 					    pIO_req->DevHandle;
2657 
2658 					physLBA = (stripe_unit <<
2659 					    sc->DD_stripe_exponent) +
2660 					    stripe_offset;
2661 
2662 					/*
2663 					 * Set upper 4 bytes of LBA to 0.  We
2664 					 * assume that the phys disks are less
2665 					 * than 2 TB's in size.  Then, set the
2666 					 * lower 4 bytes.
2667 					 */
2668 					pIO_req->CDB.CDB32[2] = 0;
2669 					pIO_req->CDB.CDB32[3] = 0;
2670 					pIO_req->CDB.CDB32[4] = 0;
2671 					pIO_req->CDB.CDB32[5] = 0;
2672 					ptrLBA = &pIO_req->CDB.CDB32[6];
2673 					physLBA_byte = (uint8_t)(physLBA >> 24);
2674 					*ptrLBA = physLBA_byte;
2675 					ptrLBA = &pIO_req->CDB.CDB32[7];
2676 					physLBA_byte = (uint8_t)(physLBA >> 16);
2677 					*ptrLBA = physLBA_byte;
2678 					ptrLBA = &pIO_req->CDB.CDB32[8];
2679 					physLBA_byte = (uint8_t)(physLBA >> 8);
2680 					*ptrLBA = physLBA_byte;
2681 					ptrLBA = &pIO_req->CDB.CDB32[9];
2682 					physLBA_byte = (uint8_t)physLBA;
2683 					*ptrLBA = physLBA_byte;
2684 
2685 					/*
2686 					 * Set flag that Direct Drive I/O is
2687 					 * being done.
2688 					 */
2689 					cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2690 				}
2691 			}
2692 		}
2693 	}
2694 }
2695 
2696 #if __FreeBSD_version >= 900026
2697 static void
2698 mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm)
2699 {
2700 	MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2701 	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2702 	uint64_t sasaddr;
2703 	union ccb *ccb;
2704 
2705 	ccb = cm->cm_complete_data;
2706 
2707 	/*
2708 	 * Currently there should be no way we can hit this case.  It only
2709 	 * happens when we have a failure to allocate chain frames, and SMP
2710 	 * commands require two S/G elements only.  That should be handled
2711 	 * in the standard request size.
2712 	 */
2713 	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2714 		mps_dprint(sc, MPS_ERROR,"%s: cm_flags = %#x on SMP request!\n",
2715 			   __func__, cm->cm_flags);
2716 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2717 		goto bailout;
2718         }
2719 
2720 	rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
2721 	if (rpl == NULL) {
2722 		mps_dprint(sc, MPS_ERROR, "%s: NULL cm_reply!\n", __func__);
2723 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2724 		goto bailout;
2725 	}
2726 
2727 	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2728 	sasaddr = le32toh(req->SASAddress.Low);
2729 	sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
2730 
2731 	if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
2732 	    MPI2_IOCSTATUS_SUCCESS ||
2733 	    rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
2734 		mps_dprint(sc, MPS_XINFO, "%s: IOCStatus %04x SASStatus %02x\n",
2735 		    __func__, le16toh(rpl->IOCStatus), rpl->SASStatus);
2736 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2737 		goto bailout;
2738 	}
2739 
2740 	mps_dprint(sc, MPS_XINFO, "%s: SMP request to SAS address "
2741 		   "%#jx completed successfully\n", __func__,
2742 		   (uintmax_t)sasaddr);
2743 
2744 	if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
2745 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2746 	else
2747 		mpssas_set_ccbstatus(ccb, CAM_SMP_STATUS_ERROR);
2748 
2749 bailout:
2750 	/*
2751 	 * We sync in both directions because we had DMAs in the S/G list
2752 	 * in both directions.
2753 	 */
2754 	bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
2755 			BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2756 	bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2757 	mps_free_command(sc, cm);
2758 	xpt_done(ccb);
2759 }
2760 
2761 static void
2762 mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb, uint64_t sasaddr)
2763 {
2764 	struct mps_command *cm;
2765 	uint8_t *request, *response;
2766 	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2767 	struct mps_softc *sc;
2768 	struct sglist *sg;
2769 	int error;
2770 
2771 	sc = sassc->sc;
2772 	sg = NULL;
2773 	error = 0;
2774 
2775 	/*
2776 	 * XXX We don't yet support physical addresses here.
2777 	 */
2778 	switch ((ccb->ccb_h.flags & CAM_DATA_MASK)) {
2779 	case CAM_DATA_PADDR:
2780 	case CAM_DATA_SG_PADDR:
2781 		mps_dprint(sc, MPS_ERROR,
2782 			   "%s: physical addresses not supported\n", __func__);
2783 		mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2784 		xpt_done(ccb);
2785 		return;
2786 	case CAM_DATA_SG:
2787 		/*
2788 		 * The chip does not support more than one buffer for the
2789 		 * request or response.
2790 		 */
2791 	 	if ((ccb->smpio.smp_request_sglist_cnt > 1)
2792 		  || (ccb->smpio.smp_response_sglist_cnt > 1)) {
2793 			mps_dprint(sc, MPS_ERROR,
2794 				   "%s: multiple request or response "
2795 				   "buffer segments not supported for SMP\n",
2796 				   __func__);
2797 			mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2798 			xpt_done(ccb);
2799 			return;
2800 		}
2801 
2802 		/*
2803 		 * The CAM_SCATTER_VALID flag was originally implemented
2804 		 * for the XPT_SCSI_IO CCB, which only has one data pointer.
2805 		 * We have two.  So, just take that flag to mean that we
2806 		 * might have S/G lists, and look at the S/G segment count
2807 		 * to figure out whether that is the case for each individual
2808 		 * buffer.
2809 		 */
2810 		if (ccb->smpio.smp_request_sglist_cnt != 0) {
2811 			bus_dma_segment_t *req_sg;
2812 
2813 			req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
2814 			request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
2815 		} else
2816 			request = ccb->smpio.smp_request;
2817 
2818 		if (ccb->smpio.smp_response_sglist_cnt != 0) {
2819 			bus_dma_segment_t *rsp_sg;
2820 
2821 			rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
2822 			response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
2823 		} else
2824 			response = ccb->smpio.smp_response;
2825 		break;
2826 	case CAM_DATA_VADDR:
2827 		request = ccb->smpio.smp_request;
2828 		response = ccb->smpio.smp_response;
2829 		break;
2830 	default:
2831 		mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2832 		xpt_done(ccb);
2833 		return;
2834 	}
2835 
2836 	cm = mps_alloc_command(sc);
2837 	if (cm == NULL) {
2838 		mps_dprint(sc, MPS_ERROR,
2839 		    "%s: cannot allocate command\n", __func__);
2840 		mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
2841 		xpt_done(ccb);
2842 		return;
2843 	}
2844 
2845 	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2846 	bzero(req, sizeof(*req));
2847 	req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
2848 
2849 	/* Allow the chip to use any route to this SAS address. */
2850 	req->PhysicalPort = 0xff;
2851 
2852 	req->RequestDataLength = htole16(ccb->smpio.smp_request_len);
2853 	req->SGLFlags =
2854 	    MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
2855 
2856 	mps_dprint(sc, MPS_XINFO, "%s: sending SMP request to SAS "
2857 	    "address %#jx\n", __func__, (uintmax_t)sasaddr);
2858 
2859 	mpi_init_sge(cm, req, &req->SGL);
2860 
2861 	/*
2862 	 * Set up a uio to pass into mps_map_command().  This allows us to
2863 	 * do one map command, and one busdma call in there.
2864 	 */
2865 	cm->cm_uio.uio_iov = cm->cm_iovec;
2866 	cm->cm_uio.uio_iovcnt = 2;
2867 	cm->cm_uio.uio_segflg = UIO_SYSSPACE;
2868 
2869 	/*
2870 	 * The read/write flag isn't used by busdma, but set it just in
2871 	 * case.  This isn't exactly accurate, either, since we're going in
2872 	 * both directions.
2873 	 */
2874 	cm->cm_uio.uio_rw = UIO_WRITE;
2875 
2876 	cm->cm_iovec[0].iov_base = request;
2877 	cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength);
2878 	cm->cm_iovec[1].iov_base = response;
2879 	cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
2880 
2881 	cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
2882 			       cm->cm_iovec[1].iov_len;
2883 
2884 	/*
2885 	 * Trigger a warning message in mps_data_cb() for the user if we
2886 	 * wind up exceeding two S/G segments.  The chip expects one
2887 	 * segment for the request and another for the response.
2888 	 */
2889 	cm->cm_max_segs = 2;
2890 
2891 	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
2892 	cm->cm_complete = mpssas_smpio_complete;
2893 	cm->cm_complete_data = ccb;
2894 
2895 	/*
2896 	 * Tell the mapping code that we're using a uio, and that this is
2897 	 * an SMP passthrough request.  There is a little special-case
2898 	 * logic there (in mps_data_cb()) to handle the bidirectional
2899 	 * transfer.
2900 	 */
2901 	cm->cm_flags |= MPS_CM_FLAGS_USE_UIO | MPS_CM_FLAGS_SMP_PASS |
2902 			MPS_CM_FLAGS_DATAIN | MPS_CM_FLAGS_DATAOUT;
2903 
2904 	/* The chip data format is little endian. */
2905 	req->SASAddress.High = htole32(sasaddr >> 32);
2906 	req->SASAddress.Low = htole32(sasaddr);
2907 
2908 	/*
2909 	 * XXX Note that we don't have a timeout/abort mechanism here.
2910 	 * From the manual, it looks like task management requests only
2911 	 * work for SCSI IO and SATA passthrough requests.  We may need to
2912 	 * have a mechanism to retry requests in the event of a chip reset
2913 	 * at least.  Hopefully the chip will insure that any errors short
2914 	 * of that are relayed back to the driver.
2915 	 */
2916 	error = mps_map_command(sc, cm);
2917 	if ((error != 0) && (error != EINPROGRESS)) {
2918 		mps_dprint(sc, MPS_ERROR,
2919 			   "%s: error %d returned from mps_map_command()\n",
2920 			   __func__, error);
2921 		goto bailout_error;
2922 	}
2923 
2924 	return;
2925 
2926 bailout_error:
2927 	mps_free_command(sc, cm);
2928 	mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
2929 	xpt_done(ccb);
2930 	return;
2931 
2932 }
2933 
2934 static void
2935 mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb)
2936 {
2937 	struct mps_softc *sc;
2938 	struct mpssas_target *targ;
2939 	uint64_t sasaddr = 0;
2940 
2941 	sc = sassc->sc;
2942 
2943 	/*
2944 	 * Make sure the target exists.
2945 	 */
2946 	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
2947 	    ("Target %d out of bounds in XPT_SMP_IO\n", ccb->ccb_h.target_id));
2948 	targ = &sassc->targets[ccb->ccb_h.target_id];
2949 	if (targ->handle == 0x0) {
2950 		mps_dprint(sc, MPS_ERROR,
2951 			   "%s: target %d does not exist!\n", __func__,
2952 			   ccb->ccb_h.target_id);
2953 		mpssas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
2954 		xpt_done(ccb);
2955 		return;
2956 	}
2957 
2958 	/*
2959 	 * If this device has an embedded SMP target, we'll talk to it
2960 	 * directly.
2961 	 * figure out what the expander's address is.
2962 	 */
2963 	if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
2964 		sasaddr = targ->sasaddr;
2965 
2966 	/*
2967 	 * If we don't have a SAS address for the expander yet, try
2968 	 * grabbing it from the page 0x83 information cached in the
2969 	 * transport layer for this target.  LSI expanders report the
2970 	 * expander SAS address as the port-associated SAS address in
2971 	 * Inquiry VPD page 0x83.  Maxim expanders don't report it in page
2972 	 * 0x83.
2973 	 *
2974 	 * XXX KDM disable this for now, but leave it commented out so that
2975 	 * it is obvious that this is another possible way to get the SAS
2976 	 * address.
2977 	 *
2978 	 * The parent handle method below is a little more reliable, and
2979 	 * the other benefit is that it works for devices other than SES
2980 	 * devices.  So you can send a SMP request to a da(4) device and it
2981 	 * will get routed to the expander that device is attached to.
2982 	 * (Assuming the da(4) device doesn't contain an SMP target...)
2983 	 */
2984 #if 0
2985 	if (sasaddr == 0)
2986 		sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
2987 #endif
2988 
2989 	/*
2990 	 * If we still don't have a SAS address for the expander, look for
2991 	 * the parent device of this device, which is probably the expander.
2992 	 */
2993 	if (sasaddr == 0) {
2994 #ifdef OLD_MPS_PROBE
2995 		struct mpssas_target *parent_target;
2996 #endif
2997 
2998 		if (targ->parent_handle == 0x0) {
2999 			mps_dprint(sc, MPS_ERROR,
3000 				   "%s: handle %d does not have a valid "
3001 				   "parent handle!\n", __func__, targ->handle);
3002 			mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
3003 			goto bailout;
3004 		}
3005 #ifdef OLD_MPS_PROBE
3006 		parent_target = mpssas_find_target_by_handle(sassc, 0,
3007 			targ->parent_handle);
3008 
3009 		if (parent_target == NULL) {
3010 			mps_dprint(sc, MPS_ERROR,
3011 				   "%s: handle %d does not have a valid "
3012 				   "parent target!\n", __func__, targ->handle);
3013 			mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
3014 			goto bailout;
3015 		}
3016 
3017 		if ((parent_target->devinfo &
3018 		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3019 			mps_dprint(sc, MPS_ERROR,
3020 				   "%s: handle %d parent %d does not "
3021 				   "have an SMP target!\n", __func__,
3022 				   targ->handle, parent_target->handle);
3023 			mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
3024 			goto bailout;
3025 
3026 		}
3027 
3028 		sasaddr = parent_target->sasaddr;
3029 #else /* OLD_MPS_PROBE */
3030 		if ((targ->parent_devinfo &
3031 		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3032 			mps_dprint(sc, MPS_ERROR,
3033 				   "%s: handle %d parent %d does not "
3034 				   "have an SMP target!\n", __func__,
3035 				   targ->handle, targ->parent_handle);
3036 			mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
3037 			goto bailout;
3038 
3039 		}
3040 		if (targ->parent_sasaddr == 0x0) {
3041 			mps_dprint(sc, MPS_ERROR,
3042 				   "%s: handle %d parent handle %d does "
3043 				   "not have a valid SAS address!\n",
3044 				   __func__, targ->handle, targ->parent_handle);
3045 			mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
3046 			goto bailout;
3047 		}
3048 
3049 		sasaddr = targ->parent_sasaddr;
3050 #endif /* OLD_MPS_PROBE */
3051 
3052 	}
3053 
3054 	if (sasaddr == 0) {
3055 		mps_dprint(sc, MPS_INFO,
3056 			   "%s: unable to find SAS address for handle %d\n",
3057 			   __func__, targ->handle);
3058 		mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
3059 		goto bailout;
3060 	}
3061 	mpssas_send_smpcmd(sassc, ccb, sasaddr);
3062 
3063 	return;
3064 
3065 bailout:
3066 	xpt_done(ccb);
3067 
3068 }
3069 #endif //__FreeBSD_version >= 900026
3070 
3071 static void
3072 mpssas_action_resetdev(struct mpssas_softc *sassc, union ccb *ccb)
3073 {
3074 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3075 	struct mps_softc *sc;
3076 	struct mps_command *tm;
3077 	struct mpssas_target *targ;
3078 
3079 	MPS_FUNCTRACE(sassc->sc);
3080 	mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
3081 
3082 	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
3083 	    ("Target %d out of bounds in XPT_RESET_DEV\n",
3084 	     ccb->ccb_h.target_id));
3085 	sc = sassc->sc;
3086 	tm = mps_alloc_command(sc);
3087 	if (tm == NULL) {
3088 		mps_dprint(sc, MPS_ERROR,
3089 		    "command alloc failure in mpssas_action_resetdev\n");
3090 		mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3091 		xpt_done(ccb);
3092 		return;
3093 	}
3094 
3095 	targ = &sassc->targets[ccb->ccb_h.target_id];
3096 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3097 	req->DevHandle = htole16(targ->handle);
3098 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
3099 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3100 
3101 	/* SAS Hard Link Reset / SATA Link Reset */
3102 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3103 
3104 	tm->cm_data = NULL;
3105 	tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
3106 	tm->cm_complete = mpssas_resetdev_complete;
3107 	tm->cm_complete_data = ccb;
3108 	tm->cm_targ = targ;
3109 	mps_map_command(sc, tm);
3110 }
3111 
3112 static void
3113 mpssas_resetdev_complete(struct mps_softc *sc, struct mps_command *tm)
3114 {
3115 	MPI2_SCSI_TASK_MANAGE_REPLY *resp;
3116 	union ccb *ccb;
3117 
3118 	MPS_FUNCTRACE(sc);
3119 	mtx_assert(&sc->mps_mtx, MA_OWNED);
3120 
3121 	resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
3122 	ccb = tm->cm_complete_data;
3123 
3124 	/*
3125 	 * Currently there should be no way we can hit this case.  It only
3126 	 * happens when we have a failure to allocate chain frames, and
3127 	 * task management commands don't have S/G lists.
3128 	 */
3129 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3130 		MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3131 
3132 		req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3133 
3134 		mps_dprint(sc, MPS_ERROR,
3135 			   "%s: cm_flags = %#x for reset of handle %#04x! "
3136 			   "This should not happen!\n", __func__, tm->cm_flags,
3137 			   req->DevHandle);
3138 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3139 		goto bailout;
3140 	}
3141 
3142 	mps_dprint(sc, MPS_XINFO,
3143 	    "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n", __func__,
3144 	    le16toh(resp->IOCStatus), le32toh(resp->ResponseCode));
3145 
3146 	if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
3147 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
3148 		mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
3149 		    CAM_LUN_WILDCARD);
3150 	}
3151 	else
3152 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3153 
3154 bailout:
3155 
3156 	mpssas_free_tm(sc, tm);
3157 	xpt_done(ccb);
3158 }
3159 
3160 static void
3161 mpssas_poll(struct cam_sim *sim)
3162 {
3163 	struct mpssas_softc *sassc;
3164 
3165 	sassc = cam_sim_softc(sim);
3166 
3167 	if (sassc->sc->mps_debug & MPS_TRACE) {
3168 		/* frequent debug messages during a panic just slow
3169 		 * everything down too much.
3170 		 */
3171 		mps_printf(sassc->sc, "%s clearing MPS_TRACE\n", __func__);
3172 		sassc->sc->mps_debug &= ~MPS_TRACE;
3173 	}
3174 
3175 	mps_intr_locked(sassc->sc);
3176 }
3177 
3178 static void
3179 mpssas_async(void *callback_arg, uint32_t code, struct cam_path *path,
3180 	     void *arg)
3181 {
3182 	struct mps_softc *sc;
3183 
3184 	sc = (struct mps_softc *)callback_arg;
3185 
3186 	switch (code) {
3187 #if (__FreeBSD_version >= 1000006) || \
3188     ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
3189 	case AC_ADVINFO_CHANGED: {
3190 		struct mpssas_target *target;
3191 		struct mpssas_softc *sassc;
3192 		struct scsi_read_capacity_data_long rcap_buf;
3193 		struct ccb_dev_advinfo cdai;
3194 		struct mpssas_lun *lun;
3195 		lun_id_t lunid;
3196 		int found_lun;
3197 		uintptr_t buftype;
3198 
3199 		buftype = (uintptr_t)arg;
3200 
3201 		found_lun = 0;
3202 		sassc = sc->sassc;
3203 
3204 		/*
3205 		 * We're only interested in read capacity data changes.
3206 		 */
3207 		if (buftype != CDAI_TYPE_RCAPLONG)
3208 			break;
3209 
3210 		/*
3211 		 * We should have a handle for this, but check to make sure.
3212 		 */
3213 		KASSERT(xpt_path_target_id(path) < sassc->maxtargets,
3214 		    ("Target %d out of bounds in mpssas_async\n",
3215 		    xpt_path_target_id(path)));
3216 		target = &sassc->targets[xpt_path_target_id(path)];
3217 		if (target->handle == 0)
3218 			break;
3219 
3220 		lunid = xpt_path_lun_id(path);
3221 
3222 		SLIST_FOREACH(lun, &target->luns, lun_link) {
3223 			if (lun->lun_id == lunid) {
3224 				found_lun = 1;
3225 				break;
3226 			}
3227 		}
3228 
3229 		if (found_lun == 0) {
3230 			lun = malloc(sizeof(struct mpssas_lun), M_MPT2,
3231 				     M_NOWAIT | M_ZERO);
3232 			if (lun == NULL) {
3233 				mps_dprint(sc, MPS_ERROR, "Unable to alloc "
3234 					   "LUN for EEDP support.\n");
3235 				break;
3236 			}
3237 			lun->lun_id = lunid;
3238 			SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3239 		}
3240 
3241 		bzero(&rcap_buf, sizeof(rcap_buf));
3242 		xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
3243 		cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
3244 		cdai.ccb_h.flags = CAM_DIR_IN;
3245 		cdai.buftype = CDAI_TYPE_RCAPLONG;
3246 		cdai.flags = 0;
3247 		cdai.bufsiz = sizeof(rcap_buf);
3248 		cdai.buf = (uint8_t *)&rcap_buf;
3249 		xpt_action((union ccb *)&cdai);
3250 		if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
3251 			cam_release_devq(cdai.ccb_h.path,
3252 					 0, 0, 0, FALSE);
3253 
3254 		if ((mpssas_get_ccbstatus((union ccb *)&cdai) == CAM_REQ_CMP)
3255 		 && (rcap_buf.prot & SRC16_PROT_EN)) {
3256 			lun->eedp_formatted = TRUE;
3257 			lun->eedp_block_size = scsi_4btoul(rcap_buf.length);
3258 		} else {
3259 			lun->eedp_formatted = FALSE;
3260 			lun->eedp_block_size = 0;
3261 		}
3262 		break;
3263 	}
3264 #else
3265 	case AC_FOUND_DEVICE: {
3266 		struct ccb_getdev *cgd;
3267 
3268 		cgd = arg;
3269 		mpssas_check_eedp(sc, path, cgd);
3270 		break;
3271 	}
3272 #endif
3273 	default:
3274 		break;
3275 	}
3276 }
3277 
3278 #if (__FreeBSD_version < 901503) || \
3279     ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3280 static void
3281 mpssas_check_eedp(struct mps_softc *sc, struct cam_path *path,
3282 		  struct ccb_getdev *cgd)
3283 {
3284 	struct mpssas_softc *sassc = sc->sassc;
3285 	struct ccb_scsiio *csio;
3286 	struct scsi_read_capacity_16 *scsi_cmd;
3287 	struct scsi_read_capacity_eedp *rcap_buf;
3288 	path_id_t pathid;
3289 	target_id_t targetid;
3290 	lun_id_t lunid;
3291 	union ccb *ccb;
3292 	struct cam_path *local_path;
3293 	struct mpssas_target *target;
3294 	struct mpssas_lun *lun;
3295 	uint8_t	found_lun;
3296 	char path_str[64];
3297 
3298 	sassc = sc->sassc;
3299 	pathid = cam_sim_path(sassc->sim);
3300 	targetid = xpt_path_target_id(path);
3301 	lunid = xpt_path_lun_id(path);
3302 
3303 	KASSERT(targetid < sassc->maxtargets,
3304 	    ("Target %d out of bounds in mpssas_check_eedp\n",
3305 	     targetid));
3306 	target = &sassc->targets[targetid];
3307 	if (target->handle == 0x0)
3308 		return;
3309 
3310 	/*
3311 	 * Determine if the device is EEDP capable.
3312 	 *
3313 	 * If this flag is set in the inquiry data,
3314 	 * the device supports protection information,
3315 	 * and must support the 16 byte read
3316 	 * capacity command, otherwise continue without
3317 	 * sending read cap 16
3318 	 */
3319 	if ((cgd->inq_data.spc3_flags & SPC3_SID_PROTECT) == 0)
3320 		return;
3321 
3322 	/*
3323 	 * Issue a READ CAPACITY 16 command.  This info
3324 	 * is used to determine if the LUN is formatted
3325 	 * for EEDP support.
3326 	 */
3327 	ccb = xpt_alloc_ccb_nowait();
3328 	if (ccb == NULL) {
3329 		mps_dprint(sc, MPS_ERROR, "Unable to alloc CCB "
3330 		    "for EEDP support.\n");
3331 		return;
3332 	}
3333 
3334 	if (xpt_create_path(&local_path, xpt_periph,
3335 	    pathid, targetid, lunid) != CAM_REQ_CMP) {
3336 		mps_dprint(sc, MPS_ERROR, "Unable to create "
3337 		    "path for EEDP support\n");
3338 		xpt_free_ccb(ccb);
3339 		return;
3340 	}
3341 
3342 	/*
3343 	 * If LUN is already in list, don't create a new
3344 	 * one.
3345 	 */
3346 	found_lun = FALSE;
3347 	SLIST_FOREACH(lun, &target->luns, lun_link) {
3348 		if (lun->lun_id == lunid) {
3349 			found_lun = TRUE;
3350 			break;
3351 		}
3352 	}
3353 	if (!found_lun) {
3354 		lun = malloc(sizeof(struct mpssas_lun), M_MPT2,
3355 		    M_NOWAIT | M_ZERO);
3356 		if (lun == NULL) {
3357 			mps_dprint(sc, MPS_ERROR,
3358 			    "Unable to alloc LUN for EEDP support.\n");
3359 			xpt_free_path(local_path);
3360 			xpt_free_ccb(ccb);
3361 			return;
3362 		}
3363 		lun->lun_id = lunid;
3364 		SLIST_INSERT_HEAD(&target->luns, lun,
3365 		    lun_link);
3366 	}
3367 
3368 	xpt_path_string(local_path, path_str, sizeof(path_str));
3369 	mps_dprint(sc, MPS_INFO, "Sending read cap: path %s handle %d\n",
3370 	    path_str, target->handle);
3371 
3372 	/*
3373 	 * Issue a READ CAPACITY 16 command for the LUN.
3374 	 * The mpssas_read_cap_done function will load
3375 	 * the read cap info into the LUN struct.
3376 	 */
3377 	rcap_buf = malloc(sizeof(struct scsi_read_capacity_eedp),
3378 	    M_MPT2, M_NOWAIT | M_ZERO);
3379 	if (rcap_buf == NULL) {
3380 		mps_dprint(sc, MPS_FAULT,
3381 		    "Unable to alloc read capacity buffer for EEDP support.\n");
3382 		xpt_free_path(ccb->ccb_h.path);
3383 		xpt_free_ccb(ccb);
3384 		return;
3385 	}
3386 	xpt_setup_ccb(&ccb->ccb_h, local_path, CAM_PRIORITY_XPT);
3387 	csio = &ccb->csio;
3388 	csio->ccb_h.func_code = XPT_SCSI_IO;
3389 	csio->ccb_h.flags = CAM_DIR_IN;
3390 	csio->ccb_h.retry_count = 4;
3391 	csio->ccb_h.cbfcnp = mpssas_read_cap_done;
3392 	csio->ccb_h.timeout = 60000;
3393 	csio->data_ptr = (uint8_t *)rcap_buf;
3394 	csio->dxfer_len = sizeof(struct scsi_read_capacity_eedp);
3395 	csio->sense_len = MPS_SENSE_LEN;
3396 	csio->cdb_len = sizeof(*scsi_cmd);
3397 	csio->tag_action = MSG_SIMPLE_Q_TAG;
3398 
3399 	scsi_cmd = (struct scsi_read_capacity_16 *)&csio->cdb_io.cdb_bytes;
3400 	bzero(scsi_cmd, sizeof(*scsi_cmd));
3401 	scsi_cmd->opcode = 0x9E;
3402 	scsi_cmd->service_action = SRC16_SERVICE_ACTION;
3403 	((uint8_t *)scsi_cmd)[13] = sizeof(struct scsi_read_capacity_eedp);
3404 
3405 	ccb->ccb_h.ppriv_ptr1 = sassc;
3406 	xpt_action(ccb);
3407 }
3408 
3409 static void
3410 mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb)
3411 {
3412 	struct mpssas_softc *sassc;
3413 	struct mpssas_target *target;
3414 	struct mpssas_lun *lun;
3415 	struct scsi_read_capacity_eedp *rcap_buf;
3416 
3417 	if (done_ccb == NULL)
3418 		return;
3419 
3420 	/* Driver need to release devq, it Scsi command is
3421 	 * generated by driver internally.
3422 	 * Currently there is a single place where driver
3423 	 * calls scsi command internally. In future if driver
3424 	 * calls more scsi command internally, it needs to release
3425 	 * devq internally, since those command will not go back to
3426 	 * cam_periph.
3427 	 */
3428 	if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) ) {
3429         	done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
3430 		xpt_release_devq(done_ccb->ccb_h.path,
3431 			       	/*count*/ 1, /*run_queue*/TRUE);
3432 	}
3433 
3434 	rcap_buf = (struct scsi_read_capacity_eedp *)done_ccb->csio.data_ptr;
3435 
3436 	/*
3437 	 * Get the LUN ID for the path and look it up in the LUN list for the
3438 	 * target.
3439 	 */
3440 	sassc = (struct mpssas_softc *)done_ccb->ccb_h.ppriv_ptr1;
3441 	KASSERT(done_ccb->ccb_h.target_id < sassc->maxtargets,
3442 	    ("Target %d out of bounds in mpssas_read_cap_done\n",
3443 	     done_ccb->ccb_h.target_id));
3444 	target = &sassc->targets[done_ccb->ccb_h.target_id];
3445 	SLIST_FOREACH(lun, &target->luns, lun_link) {
3446 		if (lun->lun_id != done_ccb->ccb_h.target_lun)
3447 			continue;
3448 
3449 		/*
3450 		 * Got the LUN in the target's LUN list.  Fill it in
3451 		 * with EEDP info.  If the READ CAP 16 command had some
3452 		 * SCSI error (common if command is not supported), mark
3453 		 * the lun as not supporting EEDP and set the block size
3454 		 * to 0.
3455 		 */
3456 		if ((mpssas_get_ccbstatus(done_ccb) != CAM_REQ_CMP)
3457 		 || (done_ccb->csio.scsi_status != SCSI_STATUS_OK)) {
3458 			lun->eedp_formatted = FALSE;
3459 			lun->eedp_block_size = 0;
3460 			break;
3461 		}
3462 
3463 		if (rcap_buf->protect & 0x01) {
3464 			mps_dprint(sassc->sc, MPS_INFO, "LUN %d for "
3465  			    "target ID %d is formatted for EEDP "
3466  			    "support.\n", done_ccb->ccb_h.target_lun,
3467  			    done_ccb->ccb_h.target_id);
3468 			lun->eedp_formatted = TRUE;
3469 			lun->eedp_block_size = scsi_4btoul(rcap_buf->length);
3470 		}
3471 		break;
3472 	}
3473 
3474 	// Finished with this CCB and path.
3475 	free(rcap_buf, M_MPT2);
3476 	xpt_free_path(done_ccb->ccb_h.path);
3477 	xpt_free_ccb(done_ccb);
3478 }
3479 #endif /* (__FreeBSD_version < 901503) || \
3480           ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) */
3481 
3482 int
3483 mpssas_startup(struct mps_softc *sc)
3484 {
3485 
3486 	/*
3487 	 * Send the port enable message and set the wait_for_port_enable flag.
3488 	 * This flag helps to keep the simq frozen until all discovery events
3489 	 * are processed.
3490 	 */
3491 	sc->wait_for_port_enable = 1;
3492 	mpssas_send_portenable(sc);
3493 	return (0);
3494 }
3495 
3496 static int
3497 mpssas_send_portenable(struct mps_softc *sc)
3498 {
3499 	MPI2_PORT_ENABLE_REQUEST *request;
3500 	struct mps_command *cm;
3501 
3502 	MPS_FUNCTRACE(sc);
3503 
3504 	if ((cm = mps_alloc_command(sc)) == NULL)
3505 		return (EBUSY);
3506 	request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3507 	request->Function = MPI2_FUNCTION_PORT_ENABLE;
3508 	request->MsgFlags = 0;
3509 	request->VP_ID = 0;
3510 	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3511 	cm->cm_complete = mpssas_portenable_complete;
3512 	cm->cm_data = NULL;
3513 	cm->cm_sge = NULL;
3514 
3515 	mps_map_command(sc, cm);
3516 	mps_dprint(sc, MPS_XINFO,
3517 	    "mps_send_portenable finished cm %p req %p complete %p\n",
3518 	    cm, cm->cm_req, cm->cm_complete);
3519 	return (0);
3520 }
3521 
3522 static void
3523 mpssas_portenable_complete(struct mps_softc *sc, struct mps_command *cm)
3524 {
3525 	MPI2_PORT_ENABLE_REPLY *reply;
3526 	struct mpssas_softc *sassc;
3527 
3528 	MPS_FUNCTRACE(sc);
3529 	sassc = sc->sassc;
3530 
3531 	/*
3532 	 * Currently there should be no way we can hit this case.  It only
3533 	 * happens when we have a failure to allocate chain frames, and
3534 	 * port enable commands don't have S/G lists.
3535 	 */
3536 	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3537 		mps_dprint(sc, MPS_ERROR, "%s: cm_flags = %#x for port enable! "
3538 			   "This should not happen!\n", __func__, cm->cm_flags);
3539 	}
3540 
3541 	reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3542 	if (reply == NULL)
3543 		mps_dprint(sc, MPS_FAULT, "Portenable NULL reply\n");
3544 	else if (le16toh(reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
3545 	    MPI2_IOCSTATUS_SUCCESS)
3546 		mps_dprint(sc, MPS_FAULT, "Portenable failed\n");
3547 
3548 	mps_free_command(sc, cm);
3549 	if (sc->mps_ich.ich_arg != NULL) {
3550 		mps_dprint(sc, MPS_XINFO, "disestablish config intrhook\n");
3551 		config_intrhook_disestablish(&sc->mps_ich);
3552 		sc->mps_ich.ich_arg = NULL;
3553 	}
3554 
3555 	/*
3556 	 * Get WarpDrive info after discovery is complete but before the scan
3557 	 * starts.  At this point, all devices are ready to be exposed to the
3558 	 * OS.  If devices should be hidden instead, take them out of the
3559 	 * 'targets' array before the scan.  The devinfo for a disk will have
3560 	 * some info and a volume's will be 0.  Use that to remove disks.
3561 	 */
3562 	mps_wd_config_pages(sc);
3563 
3564 	/*
3565 	 * Done waiting for port enable to complete.  Decrement the refcount.
3566 	 * If refcount is 0, discovery is complete and a rescan of the bus can
3567 	 * take place.  Since the simq was explicitly frozen before port
3568 	 * enable, it must be explicitly released here to keep the
3569 	 * freeze/release count in sync.
3570 	 */
3571 	sc->wait_for_port_enable = 0;
3572 	sc->port_enable_complete = 1;
3573 	wakeup(&sc->port_enable_complete);
3574 	mpssas_startup_decrement(sassc);
3575 }
3576 
3577 int
3578 mpssas_check_id(struct mpssas_softc *sassc, int id)
3579 {
3580 	struct mps_softc *sc = sassc->sc;
3581 	char *ids;
3582 	char *name;
3583 
3584 	ids = &sc->exclude_ids[0];
3585 	while((name = strsep(&ids, ",")) != NULL) {
3586 		if (name[0] == '\0')
3587 			continue;
3588 		if (strtol(name, NULL, 0) == (long)id)
3589 			return (1);
3590 	}
3591 
3592 	return (0);
3593 }
3594 
3595 void
3596 mpssas_realloc_targets(struct mps_softc *sc, int maxtargets)
3597 {
3598 	struct mpssas_softc *sassc;
3599 	struct mpssas_lun *lun, *lun_tmp;
3600 	struct mpssas_target *targ;
3601 	int i;
3602 
3603 	sassc = sc->sassc;
3604 	/*
3605 	 * The number of targets is based on IOC Facts, so free all of
3606 	 * the allocated LUNs for each target and then the target buffer
3607 	 * itself.
3608 	 */
3609 	for (i=0; i< maxtargets; i++) {
3610 		targ = &sassc->targets[i];
3611 		SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
3612 			free(lun, M_MPT2);
3613 		}
3614 	}
3615 	free(sassc->targets, M_MPT2);
3616 
3617 	sassc->targets = malloc(sizeof(struct mpssas_target) * maxtargets,
3618 	    M_MPT2, M_WAITOK|M_ZERO);
3619 	if (!sassc->targets) {
3620 		panic("%s failed to alloc targets with error %d\n",
3621 		    __func__, ENOMEM);
3622 	}
3623 }
3624