xref: /freebsd/sys/dev/mps/mps_sas.c (revision 8657387683946d0c03e09fe77029edfe309eeb20)
1 /*-
2  * Copyright (c) 2009 Yahoo! Inc.
3  * Copyright (c) 2011-2015 LSI Corp.
4  * Copyright (c) 2013-2015 Avago Technologies
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  * Avago Technologies (LSI) MPT-Fusion Host Adapter FreeBSD
29  *
30  * $FreeBSD$
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 /* Communications core for Avago Technologies (LSI) MPT2 */
37 
38 /* TODO Move headers to mpsvar */
39 #include <sys/types.h>
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kernel.h>
43 #include <sys/selinfo.h>
44 #include <sys/module.h>
45 #include <sys/bus.h>
46 #include <sys/conf.h>
47 #include <sys/bio.h>
48 #include <sys/malloc.h>
49 #include <sys/uio.h>
50 #include <sys/sysctl.h>
51 #include <sys/endian.h>
52 #include <sys/queue.h>
53 #include <sys/kthread.h>
54 #include <sys/taskqueue.h>
55 #include <sys/sbuf.h>
56 
57 #include <machine/bus.h>
58 #include <machine/resource.h>
59 #include <sys/rman.h>
60 
61 #include <machine/stdarg.h>
62 
63 #include <cam/cam.h>
64 #include <cam/cam_ccb.h>
65 #include <cam/cam_xpt.h>
66 #include <cam/cam_debug.h>
67 #include <cam/cam_sim.h>
68 #include <cam/cam_xpt_sim.h>
69 #include <cam/cam_xpt_periph.h>
70 #include <cam/cam_periph.h>
71 #include <cam/scsi/scsi_all.h>
72 #include <cam/scsi/scsi_message.h>
73 #if __FreeBSD_version >= 900026
74 #include <cam/scsi/smp_all.h>
75 #endif
76 
77 #include <dev/mps/mpi/mpi2_type.h>
78 #include <dev/mps/mpi/mpi2.h>
79 #include <dev/mps/mpi/mpi2_ioc.h>
80 #include <dev/mps/mpi/mpi2_sas.h>
81 #include <dev/mps/mpi/mpi2_cnfg.h>
82 #include <dev/mps/mpi/mpi2_init.h>
83 #include <dev/mps/mpi/mpi2_tool.h>
84 #include <dev/mps/mps_ioctl.h>
85 #include <dev/mps/mpsvar.h>
86 #include <dev/mps/mps_table.h>
87 #include <dev/mps/mps_sas.h>
88 
89 #define MPSSAS_DISCOVERY_TIMEOUT	20
90 #define MPSSAS_MAX_DISCOVERY_TIMEOUTS	10 /* 200 seconds */
91 
92 /*
93  * static array to check SCSI OpCode for EEDP protection bits
94  */
95 #define	PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
96 #define	PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
97 #define	PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
98 static uint8_t op_code_prot[256] = {
99 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
100 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
101 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
102 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
103 	0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
104 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
105 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
106 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
107 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
108 	0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
109 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
110 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
111 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
112 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
113 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
114 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
115 };
116 
117 MALLOC_DEFINE(M_MPSSAS, "MPSSAS", "MPS SAS memory");
118 
119 static void mpssas_remove_device(struct mps_softc *, struct mps_command *);
120 static void mpssas_remove_complete(struct mps_softc *, struct mps_command *);
121 static void mpssas_action(struct cam_sim *sim, union ccb *ccb);
122 static void mpssas_poll(struct cam_sim *sim);
123 static int mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm,
124     struct mps_command *cm);
125 static void mpssas_scsiio_timeout(void *data);
126 static void mpssas_abort_complete(struct mps_softc *sc, struct mps_command *cm);
127 static void mpssas_direct_drive_io(struct mpssas_softc *sassc,
128     struct mps_command *cm, union ccb *ccb);
129 static void mpssas_action_scsiio(struct mpssas_softc *, union ccb *);
130 static void mpssas_scsiio_complete(struct mps_softc *, struct mps_command *);
131 static void mpssas_action_resetdev(struct mpssas_softc *, union ccb *);
132 #if __FreeBSD_version >= 900026
133 static void mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm);
134 static void mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb,
135 			       uint64_t sasaddr);
136 static void mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb);
137 #endif //FreeBSD_version >= 900026
138 static void mpssas_resetdev_complete(struct mps_softc *, struct mps_command *);
139 static void mpssas_async(void *callback_arg, uint32_t code,
140 			 struct cam_path *path, void *arg);
141 #if (__FreeBSD_version < 901503) || \
142     ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
143 static void mpssas_check_eedp(struct mps_softc *sc, struct cam_path *path,
144 			      struct ccb_getdev *cgd);
145 static void mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb);
146 #endif
147 static int mpssas_send_portenable(struct mps_softc *sc);
148 static void mpssas_portenable_complete(struct mps_softc *sc,
149     struct mps_command *cm);
150 
151 struct mpssas_target *
152 mpssas_find_target_by_handle(struct mpssas_softc *sassc, int start, uint16_t handle)
153 {
154 	struct mpssas_target *target;
155 	int i;
156 
157 	for (i = start; i < sassc->maxtargets; i++) {
158 		target = &sassc->targets[i];
159 		if (target->handle == handle)
160 			return (target);
161 	}
162 
163 	return (NULL);
164 }
165 
166 /* we need to freeze the simq during attach and diag reset, to avoid failing
167  * commands before device handles have been found by discovery.  Since
168  * discovery involves reading config pages and possibly sending commands,
169  * discovery actions may continue even after we receive the end of discovery
170  * event, so refcount discovery actions instead of assuming we can unfreeze
171  * the simq when we get the event.
172  */
173 void
174 mpssas_startup_increment(struct mpssas_softc *sassc)
175 {
176 	MPS_FUNCTRACE(sassc->sc);
177 
178 	if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
179 		if (sassc->startup_refcount++ == 0) {
180 			/* just starting, freeze the simq */
181 			mps_dprint(sassc->sc, MPS_INIT,
182 			    "%s freezing simq\n", __func__);
183 #if __FreeBSD_version >= 1000039
184 			xpt_hold_boot();
185 #endif
186 			xpt_freeze_simq(sassc->sim, 1);
187 		}
188 		mps_dprint(sassc->sc, MPS_INIT, "%s refcount %u\n", __func__,
189 		    sassc->startup_refcount);
190 	}
191 }
192 
193 void
194 mpssas_release_simq_reinit(struct mpssas_softc *sassc)
195 {
196 	if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
197 		sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
198 		xpt_release_simq(sassc->sim, 1);
199 		mps_dprint(sassc->sc, MPS_INFO, "Unfreezing SIM queue\n");
200 	}
201 }
202 
203 void
204 mpssas_startup_decrement(struct mpssas_softc *sassc)
205 {
206 	MPS_FUNCTRACE(sassc->sc);
207 
208 	if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
209 		if (--sassc->startup_refcount == 0) {
210 			/* finished all discovery-related actions, release
211 			 * the simq and rescan for the latest topology.
212 			 */
213 			mps_dprint(sassc->sc, MPS_INIT,
214 			    "%s releasing simq\n", __func__);
215 			sassc->flags &= ~MPSSAS_IN_STARTUP;
216 			xpt_release_simq(sassc->sim, 1);
217 #if __FreeBSD_version >= 1000039
218 			xpt_release_boot();
219 #else
220 			mpssas_rescan_target(sassc->sc, NULL);
221 #endif
222 		}
223 		mps_dprint(sassc->sc, MPS_INIT, "%s refcount %u\n", __func__,
224 		    sassc->startup_refcount);
225 	}
226 }
227 
228 /* The firmware requires us to stop sending commands when we're doing task
229  * management, so refcount the TMs and keep the simq frozen when any are in
230  * use.
231  */
232 struct mps_command *
233 mpssas_alloc_tm(struct mps_softc *sc)
234 {
235 	struct mps_command *tm;
236 
237 	tm = mps_alloc_high_priority_command(sc);
238 	return tm;
239 }
240 
241 void
242 mpssas_free_tm(struct mps_softc *sc, struct mps_command *tm)
243 {
244 	int target_id = 0xFFFFFFFF;
245 
246 	if (tm == NULL)
247 		return;
248 
249 	/*
250 	 * For TM's the devq is frozen for the device.  Unfreeze it here and
251 	 * free the resources used for freezing the devq.  Must clear the
252 	 * INRESET flag as well or scsi I/O will not work.
253 	 */
254 	if (tm->cm_targ != NULL) {
255 		tm->cm_targ->flags &= ~MPSSAS_TARGET_INRESET;
256 		target_id = tm->cm_targ->tid;
257 	}
258 	if (tm->cm_ccb) {
259 		mps_dprint(sc, MPS_INFO, "Unfreezing devq for target ID %d\n",
260 		    target_id);
261 		xpt_release_devq(tm->cm_ccb->ccb_h.path, 1, TRUE);
262 		xpt_free_path(tm->cm_ccb->ccb_h.path);
263 		xpt_free_ccb(tm->cm_ccb);
264 	}
265 
266 	mps_free_high_priority_command(sc, tm);
267 }
268 
269 void
270 mpssas_rescan_target(struct mps_softc *sc, struct mpssas_target *targ)
271 {
272 	struct mpssas_softc *sassc = sc->sassc;
273 	path_id_t pathid;
274 	target_id_t targetid;
275 	union ccb *ccb;
276 
277 	MPS_FUNCTRACE(sc);
278 	pathid = cam_sim_path(sassc->sim);
279 	if (targ == NULL)
280 		targetid = CAM_TARGET_WILDCARD;
281 	else
282 		targetid = targ - sassc->targets;
283 
284 	/*
285 	 * Allocate a CCB and schedule a rescan.
286 	 */
287 	ccb = xpt_alloc_ccb_nowait();
288 	if (ccb == NULL) {
289 		mps_dprint(sc, MPS_ERROR, "unable to alloc CCB for rescan\n");
290 		return;
291 	}
292 
293 	if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid,
294 	    targetid, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
295 		mps_dprint(sc, MPS_ERROR, "unable to create path for rescan\n");
296 		xpt_free_ccb(ccb);
297 		return;
298 	}
299 
300 	if (targetid == CAM_TARGET_WILDCARD)
301 		ccb->ccb_h.func_code = XPT_SCAN_BUS;
302 	else
303 		ccb->ccb_h.func_code = XPT_SCAN_TGT;
304 
305 	mps_dprint(sc, MPS_TRACE, "%s targetid %u\n", __func__, targetid);
306 	xpt_rescan(ccb);
307 }
308 
309 static void
310 mpssas_log_command(struct mps_command *cm, u_int level, const char *fmt, ...)
311 {
312 	struct sbuf sb;
313 	va_list ap;
314 	char str[192];
315 	char path_str[64];
316 
317 	if (cm == NULL)
318 		return;
319 
320 	/* No need to be in here if debugging isn't enabled */
321 	if ((cm->cm_sc->mps_debug & level) == 0)
322 		return;
323 
324 	sbuf_new(&sb, str, sizeof(str), 0);
325 
326 	va_start(ap, fmt);
327 
328 	if (cm->cm_ccb != NULL) {
329 		xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
330 				sizeof(path_str));
331 		sbuf_cat(&sb, path_str);
332 		if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
333 			scsi_command_string(&cm->cm_ccb->csio, &sb);
334 			sbuf_printf(&sb, "length %d ",
335 				    cm->cm_ccb->csio.dxfer_len);
336 		}
337 	}
338 	else {
339 		sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
340 		    cam_sim_name(cm->cm_sc->sassc->sim),
341 		    cam_sim_unit(cm->cm_sc->sassc->sim),
342 		    cam_sim_bus(cm->cm_sc->sassc->sim),
343 		    cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
344 		    cm->cm_lun);
345 	}
346 
347 	sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
348 	sbuf_vprintf(&sb, fmt, ap);
349 	sbuf_finish(&sb);
350 	mps_print_field(cm->cm_sc, "%s", sbuf_data(&sb));
351 
352 	va_end(ap);
353 }
354 
355 
356 static void
357 mpssas_remove_volume(struct mps_softc *sc, struct mps_command *tm)
358 {
359 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
360 	struct mpssas_target *targ;
361 	uint16_t handle;
362 
363 	MPS_FUNCTRACE(sc);
364 
365 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
366 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
367 	targ = tm->cm_targ;
368 
369 	if (reply == NULL) {
370 		/* XXX retry the remove after the diag reset completes? */
371 		mps_dprint(sc, MPS_FAULT,
372 		    "%s NULL reply resetting device 0x%04x\n", __func__,
373 		    handle);
374 		mpssas_free_tm(sc, tm);
375 		return;
376 	}
377 
378 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
379 	    MPI2_IOCSTATUS_SUCCESS) {
380 		mps_dprint(sc, MPS_ERROR,
381 		   "IOCStatus = 0x%x while resetting device 0x%x\n",
382 		   le16toh(reply->IOCStatus), handle);
383 	}
384 
385 	mps_dprint(sc, MPS_XINFO,
386 	    "Reset aborted %u commands\n", reply->TerminationCount);
387 	mps_free_reply(sc, tm->cm_reply_data);
388 	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
389 
390 	mps_dprint(sc, MPS_XINFO,
391 	    "clearing target %u handle 0x%04x\n", targ->tid, handle);
392 
393 	/*
394 	 * Don't clear target if remove fails because things will get confusing.
395 	 * Leave the devname and sasaddr intact so that we know to avoid reusing
396 	 * this target id if possible, and so we can assign the same target id
397 	 * to this device if it comes back in the future.
398 	 */
399 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
400 	    MPI2_IOCSTATUS_SUCCESS) {
401 		targ = tm->cm_targ;
402 		targ->handle = 0x0;
403 		targ->encl_handle = 0x0;
404 		targ->encl_slot = 0x0;
405 		targ->exp_dev_handle = 0x0;
406 		targ->phy_num = 0x0;
407 		targ->linkrate = 0x0;
408 		targ->devinfo = 0x0;
409 		targ->flags = 0x0;
410 	}
411 
412 	mpssas_free_tm(sc, tm);
413 }
414 
415 
416 /*
417  * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal.
418  * Otherwise Volume Delete is same as Bare Drive Removal.
419  */
420 void
421 mpssas_prepare_volume_remove(struct mpssas_softc *sassc, uint16_t handle)
422 {
423 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
424 	struct mps_softc *sc;
425 	struct mps_command *cm;
426 	struct mpssas_target *targ = NULL;
427 
428 	MPS_FUNCTRACE(sassc->sc);
429 	sc = sassc->sc;
430 
431 #ifdef WD_SUPPORT
432 	/*
433 	 * If this is a WD controller, determine if the disk should be exposed
434 	 * to the OS or not.  If disk should be exposed, return from this
435 	 * function without doing anything.
436 	 */
437 	if (sc->WD_available && (sc->WD_hide_expose ==
438 	    MPS_WD_EXPOSE_ALWAYS)) {
439 		return;
440 	}
441 #endif //WD_SUPPORT
442 
443 	targ = mpssas_find_target_by_handle(sassc, 0, handle);
444 	if (targ == NULL) {
445 		/* FIXME: what is the action? */
446 		/* We don't know about this device? */
447 		mps_dprint(sc, MPS_ERROR,
448 		   "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
449 		return;
450 	}
451 
452 	targ->flags |= MPSSAS_TARGET_INREMOVAL;
453 
454 	cm = mpssas_alloc_tm(sc);
455 	if (cm == NULL) {
456 		mps_dprint(sc, MPS_ERROR,
457 		    "%s: command alloc failure\n", __func__);
458 		return;
459 	}
460 
461 	mpssas_rescan_target(sc, targ);
462 
463 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
464 	req->DevHandle = targ->handle;
465 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
466 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
467 
468 	/* SAS Hard Link Reset / SATA Link Reset */
469 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
470 
471 	cm->cm_targ = targ;
472 	cm->cm_data = NULL;
473 	cm->cm_desc.HighPriority.RequestFlags =
474 	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
475 	cm->cm_complete = mpssas_remove_volume;
476 	cm->cm_complete_data = (void *)(uintptr_t)handle;
477 
478 	mps_dprint(sc, MPS_INFO, "%s: Sending reset for target ID %d\n",
479 	    __func__, targ->tid);
480 	mpssas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
481 
482 	mps_map_command(sc, cm);
483 }
484 
485 /*
486  * The MPT2 firmware performs debounce on the link to avoid transient link
487  * errors and false removals.  When it does decide that link has been lost
488  * and a device need to go away, it expects that the host will perform a
489  * target reset and then an op remove.  The reset has the side-effect of
490  * aborting any outstanding requests for the device, which is required for
491  * the op-remove to succeed.  It's not clear if the host should check for
492  * the device coming back alive after the reset.
493  */
494 void
495 mpssas_prepare_remove(struct mpssas_softc *sassc, uint16_t handle)
496 {
497 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
498 	struct mps_softc *sc;
499 	struct mps_command *cm;
500 	struct mpssas_target *targ = NULL;
501 
502 	MPS_FUNCTRACE(sassc->sc);
503 
504 	sc = sassc->sc;
505 
506 	targ = mpssas_find_target_by_handle(sassc, 0, handle);
507 	if (targ == NULL) {
508 		/* FIXME: what is the action? */
509 		/* We don't know about this device? */
510 		mps_dprint(sc, MPS_ERROR,
511 		    "%s : invalid handle 0x%x \n", __func__, handle);
512 		return;
513 	}
514 
515 	targ->flags |= MPSSAS_TARGET_INREMOVAL;
516 
517 	cm = mpssas_alloc_tm(sc);
518 	if (cm == NULL) {
519 		mps_dprint(sc, MPS_ERROR,
520 		    "%s: command alloc failure\n", __func__);
521 		return;
522 	}
523 
524 	mpssas_rescan_target(sc, targ);
525 
526 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
527 	memset(req, 0, sizeof(*req));
528 	req->DevHandle = htole16(targ->handle);
529 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
530 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
531 
532 	/* SAS Hard Link Reset / SATA Link Reset */
533 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
534 
535 	cm->cm_targ = targ;
536 	cm->cm_data = NULL;
537 	cm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
538 	cm->cm_complete = mpssas_remove_device;
539 	cm->cm_complete_data = (void *)(uintptr_t)handle;
540 
541 	mps_dprint(sc, MPS_INFO, "%s: Sending reset for target ID %d\n",
542 	    __func__, targ->tid);
543 	mpssas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
544 
545 	mps_map_command(sc, cm);
546 }
547 
548 static void
549 mpssas_remove_device(struct mps_softc *sc, struct mps_command *tm)
550 {
551 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
552 	MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
553 	struct mpssas_target *targ;
554 	struct mps_command *next_cm;
555 	uint16_t handle;
556 
557 	MPS_FUNCTRACE(sc);
558 
559 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
560 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
561 	targ = tm->cm_targ;
562 
563 	/*
564 	 * Currently there should be no way we can hit this case.  It only
565 	 * happens when we have a failure to allocate chain frames, and
566 	 * task management commands don't have S/G lists.
567 	 */
568 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
569 		mps_dprint(sc, MPS_ERROR,
570 		    "%s: cm_flags = %#x for remove of handle %#04x! "
571 		    "This should not happen!\n", __func__, tm->cm_flags,
572 		    handle);
573 	}
574 
575 	if (reply == NULL) {
576 		/* XXX retry the remove after the diag reset completes? */
577 		mps_dprint(sc, MPS_FAULT,
578 		    "%s NULL reply resetting device 0x%04x\n", __func__,
579 		    handle);
580 		mpssas_free_tm(sc, tm);
581 		return;
582 	}
583 
584 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
585 	    MPI2_IOCSTATUS_SUCCESS) {
586 		mps_dprint(sc, MPS_ERROR,
587 		   "IOCStatus = 0x%x while resetting device 0x%x\n",
588 		   le16toh(reply->IOCStatus), handle);
589 	}
590 
591 	mps_dprint(sc, MPS_XINFO, "Reset aborted %u commands\n",
592 	    le32toh(reply->TerminationCount));
593 	mps_free_reply(sc, tm->cm_reply_data);
594 	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
595 
596 	/* Reuse the existing command */
597 	req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
598 	memset(req, 0, sizeof(*req));
599 	req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
600 	req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
601 	req->DevHandle = htole16(handle);
602 	tm->cm_data = NULL;
603 	tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
604 	tm->cm_complete = mpssas_remove_complete;
605 	tm->cm_complete_data = (void *)(uintptr_t)handle;
606 
607 	mps_map_command(sc, tm);
608 
609 	mps_dprint(sc, MPS_XINFO, "clearing target %u handle 0x%04x\n",
610 		   targ->tid, handle);
611 	TAILQ_FOREACH_SAFE(tm, &targ->commands, cm_link, next_cm) {
612 		union ccb *ccb;
613 
614 		mps_dprint(sc, MPS_XINFO, "Completing missed command %p\n", tm);
615 		ccb = tm->cm_complete_data;
616 		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
617 		mpssas_scsiio_complete(sc, tm);
618 	}
619 }
620 
621 static void
622 mpssas_remove_complete(struct mps_softc *sc, struct mps_command *tm)
623 {
624 	MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
625 	uint16_t handle;
626 	struct mpssas_target *targ;
627 	struct mpssas_lun *lun;
628 
629 	MPS_FUNCTRACE(sc);
630 
631 	reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
632 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
633 
634 	/*
635 	 * Currently there should be no way we can hit this case.  It only
636 	 * happens when we have a failure to allocate chain frames, and
637 	 * task management commands don't have S/G lists.
638 	 */
639 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
640 		mps_dprint(sc, MPS_XINFO,
641 			   "%s: cm_flags = %#x for remove of handle %#04x! "
642 			   "This should not happen!\n", __func__, tm->cm_flags,
643 			   handle);
644 		mpssas_free_tm(sc, tm);
645 		return;
646 	}
647 
648 	if (reply == NULL) {
649 		/* most likely a chip reset */
650 		mps_dprint(sc, MPS_FAULT,
651 		    "%s NULL reply removing device 0x%04x\n", __func__, handle);
652 		mpssas_free_tm(sc, tm);
653 		return;
654 	}
655 
656 	mps_dprint(sc, MPS_XINFO,
657 	    "%s on handle 0x%04x, IOCStatus= 0x%x\n", __func__,
658 	    handle, le16toh(reply->IOCStatus));
659 
660 	/*
661 	 * Don't clear target if remove fails because things will get confusing.
662 	 * Leave the devname and sasaddr intact so that we know to avoid reusing
663 	 * this target id if possible, and so we can assign the same target id
664 	 * to this device if it comes back in the future.
665 	 */
666 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
667 	    MPI2_IOCSTATUS_SUCCESS) {
668 		targ = tm->cm_targ;
669 		targ->handle = 0x0;
670 		targ->encl_handle = 0x0;
671 		targ->encl_slot = 0x0;
672 		targ->exp_dev_handle = 0x0;
673 		targ->phy_num = 0x0;
674 		targ->linkrate = 0x0;
675 		targ->devinfo = 0x0;
676 		targ->flags = 0x0;
677 
678 		while(!SLIST_EMPTY(&targ->luns)) {
679 			lun = SLIST_FIRST(&targ->luns);
680 			SLIST_REMOVE_HEAD(&targ->luns, lun_link);
681 			free(lun, M_MPT2);
682 		}
683 	}
684 
685 
686 	mpssas_free_tm(sc, tm);
687 }
688 
689 static int
690 mpssas_register_events(struct mps_softc *sc)
691 {
692 	u32 events[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS];
693 
694 	bzero(events, 16);
695 	setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
696 	setbit(events, MPI2_EVENT_SAS_DISCOVERY);
697 	setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
698 	setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
699 	setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
700 	setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
701 	setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
702 	setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
703 	setbit(events, MPI2_EVENT_IR_VOLUME);
704 	setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
705 	setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
706 	setbit(events, MPI2_EVENT_LOG_ENTRY_ADDED);
707 
708 	mps_register_events(sc, events, mpssas_evt_handler, NULL,
709 	    &sc->sassc->mpssas_eh);
710 
711 	return (0);
712 }
713 
714 int
715 mps_attach_sas(struct mps_softc *sc)
716 {
717 	struct mpssas_softc *sassc;
718 	cam_status status;
719 	int unit, error = 0;
720 
721 	MPS_FUNCTRACE(sc);
722 
723 	sassc = malloc(sizeof(struct mpssas_softc), M_MPT2, M_WAITOK|M_ZERO);
724 	if(!sassc) {
725 		device_printf(sc->mps_dev, "Cannot allocate memory %s %d\n",
726 		__func__, __LINE__);
727 		return (ENOMEM);
728 	}
729 
730 	/*
731 	 * XXX MaxTargets could change during a reinit.  Since we don't
732 	 * resize the targets[] array during such an event, cache the value
733 	 * of MaxTargets here so that we don't get into trouble later.  This
734 	 * should move into the reinit logic.
735 	 */
736 	sassc->maxtargets = sc->facts->MaxTargets + sc->facts->MaxVolumes;
737 	sassc->targets = malloc(sizeof(struct mpssas_target) *
738 	    sassc->maxtargets, M_MPT2, M_WAITOK|M_ZERO);
739 	if(!sassc->targets) {
740 		device_printf(sc->mps_dev, "Cannot allocate memory %s %d\n",
741 		__func__, __LINE__);
742 		free(sassc, M_MPT2);
743 		return (ENOMEM);
744 	}
745 	sc->sassc = sassc;
746 	sassc->sc = sc;
747 
748 	if ((sassc->devq = cam_simq_alloc(sc->num_reqs)) == NULL) {
749 		mps_dprint(sc, MPS_ERROR, "Cannot allocate SIMQ\n");
750 		error = ENOMEM;
751 		goto out;
752 	}
753 
754 	unit = device_get_unit(sc->mps_dev);
755 	sassc->sim = cam_sim_alloc(mpssas_action, mpssas_poll, "mps", sassc,
756 	    unit, &sc->mps_mtx, sc->num_reqs, sc->num_reqs, sassc->devq);
757 	if (sassc->sim == NULL) {
758 		mps_dprint(sc, MPS_ERROR, "Cannot allocate SIM\n");
759 		error = EINVAL;
760 		goto out;
761 	}
762 
763 	TAILQ_INIT(&sassc->ev_queue);
764 
765 	/* Initialize taskqueue for Event Handling */
766 	TASK_INIT(&sassc->ev_task, 0, mpssas_firmware_event_work, sc);
767 	sassc->ev_tq = taskqueue_create("mps_taskq", M_NOWAIT | M_ZERO,
768 	    taskqueue_thread_enqueue, &sassc->ev_tq);
769 	taskqueue_start_threads(&sassc->ev_tq, 1, PRIBIO, "%s taskq",
770 	    device_get_nameunit(sc->mps_dev));
771 
772 	mps_lock(sc);
773 
774 	/*
775 	 * XXX There should be a bus for every port on the adapter, but since
776 	 * we're just going to fake the topology for now, we'll pretend that
777 	 * everything is just a target on a single bus.
778 	 */
779 	if ((error = xpt_bus_register(sassc->sim, sc->mps_dev, 0)) != 0) {
780 		mps_dprint(sc, MPS_ERROR, "Error %d registering SCSI bus\n",
781 		    error);
782 		mps_unlock(sc);
783 		goto out;
784 	}
785 
786 	/*
787 	 * Assume that discovery events will start right away.
788 	 *
789 	 * Hold off boot until discovery is complete.
790 	 */
791 	sassc->flags |= MPSSAS_IN_STARTUP | MPSSAS_IN_DISCOVERY;
792 	sc->sassc->startup_refcount = 0;
793 	mpssas_startup_increment(sassc);
794 
795 	callout_init(&sassc->discovery_callout, 1 /*mpsafe*/);
796 
797 	/*
798 	 * Register for async events so we can determine the EEDP
799 	 * capabilities of devices.
800 	 */
801 	status = xpt_create_path(&sassc->path, /*periph*/NULL,
802 	    cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD,
803 	    CAM_LUN_WILDCARD);
804 	if (status != CAM_REQ_CMP) {
805 		mps_printf(sc, "Error %#x creating sim path\n", status);
806 		sassc->path = NULL;
807 	} else {
808 		int event;
809 
810 #if (__FreeBSD_version >= 1000006) || \
811     ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
812 		event = AC_ADVINFO_CHANGED;
813 #else
814 		event = AC_FOUND_DEVICE;
815 #endif
816 		status = xpt_register_async(event, mpssas_async, sc,
817 					    sassc->path);
818 		if (status != CAM_REQ_CMP) {
819 			mps_dprint(sc, MPS_ERROR,
820 			    "Error %#x registering async handler for "
821 			    "AC_ADVINFO_CHANGED events\n", status);
822 			xpt_free_path(sassc->path);
823 			sassc->path = NULL;
824 		}
825 	}
826 	if (status != CAM_REQ_CMP) {
827 		/*
828 		 * EEDP use is the exception, not the rule.
829 		 * Warn the user, but do not fail to attach.
830 		 */
831 		mps_printf(sc, "EEDP capabilities disabled.\n");
832 	}
833 
834 	mps_unlock(sc);
835 
836 	mpssas_register_events(sc);
837 out:
838 	if (error)
839 		mps_detach_sas(sc);
840 	return (error);
841 }
842 
843 int
844 mps_detach_sas(struct mps_softc *sc)
845 {
846 	struct mpssas_softc *sassc;
847 	struct mpssas_lun *lun, *lun_tmp;
848 	struct mpssas_target *targ;
849 	int i;
850 
851 	MPS_FUNCTRACE(sc);
852 
853 	if (sc->sassc == NULL)
854 		return (0);
855 
856 	sassc = sc->sassc;
857 	mps_deregister_events(sc, sassc->mpssas_eh);
858 
859 	/*
860 	 * Drain and free the event handling taskqueue with the lock
861 	 * unheld so that any parallel processing tasks drain properly
862 	 * without deadlocking.
863 	 */
864 	if (sassc->ev_tq != NULL)
865 		taskqueue_free(sassc->ev_tq);
866 
867 	/* Make sure CAM doesn't wedge if we had to bail out early. */
868 	mps_lock(sc);
869 
870 	/* Deregister our async handler */
871 	if (sassc->path != NULL) {
872 		xpt_register_async(0, mpssas_async, sc, sassc->path);
873 		xpt_free_path(sassc->path);
874 		sassc->path = NULL;
875 	}
876 
877 	if (sassc->flags & MPSSAS_IN_STARTUP)
878 		xpt_release_simq(sassc->sim, 1);
879 
880 	if (sassc->sim != NULL) {
881 		xpt_bus_deregister(cam_sim_path(sassc->sim));
882 		cam_sim_free(sassc->sim, FALSE);
883 	}
884 
885 	mps_unlock(sc);
886 
887 	if (sassc->devq != NULL)
888 		cam_simq_free(sassc->devq);
889 
890 	for(i=0; i< sassc->maxtargets ;i++) {
891 		targ = &sassc->targets[i];
892 		SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
893 			free(lun, M_MPT2);
894 		}
895 	}
896 	free(sassc->targets, M_MPT2);
897 	free(sassc, M_MPT2);
898 	sc->sassc = NULL;
899 
900 	return (0);
901 }
902 
903 void
904 mpssas_discovery_end(struct mpssas_softc *sassc)
905 {
906 	struct mps_softc *sc = sassc->sc;
907 
908 	MPS_FUNCTRACE(sc);
909 
910 	if (sassc->flags & MPSSAS_DISCOVERY_TIMEOUT_PENDING)
911 		callout_stop(&sassc->discovery_callout);
912 
913 	/*
914 	 * After discovery has completed, check the mapping table for any
915 	 * missing devices and update their missing counts. Only do this once
916 	 * whenever the driver is initialized so that missing counts aren't
917 	 * updated unnecessarily. Note that just because discovery has
918 	 * completed doesn't mean that events have been processed yet. The
919 	 * check_devices function is a callout timer that checks if ALL devices
920 	 * are missing. If so, it will wait a little longer for events to
921 	 * complete and keep resetting itself until some device in the mapping
922 	 * table is not missing, meaning that event processing has started.
923 	 */
924 	if (sc->track_mapping_events) {
925 		mps_dprint(sc, MPS_XINFO | MPS_MAPPING, "Discovery has "
926 		    "completed. Check for missing devices in the mapping "
927 		    "table.\n");
928 		callout_reset(&sc->device_check_callout,
929 		    MPS_MISSING_CHECK_DELAY * hz, mps_mapping_check_devices,
930 		    sc);
931 	}
932 }
933 
934 static void
935 mpssas_action(struct cam_sim *sim, union ccb *ccb)
936 {
937 	struct mpssas_softc *sassc;
938 
939 	sassc = cam_sim_softc(sim);
940 
941 	MPS_FUNCTRACE(sassc->sc);
942 	mps_dprint(sassc->sc, MPS_TRACE, "ccb func_code 0x%x\n",
943 	    ccb->ccb_h.func_code);
944 	mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
945 
946 	switch (ccb->ccb_h.func_code) {
947 	case XPT_PATH_INQ:
948 	{
949 		struct ccb_pathinq *cpi = &ccb->cpi;
950 		struct mps_softc *sc = sassc->sc;
951 		uint8_t sges_per_frame;
952 
953 		cpi->version_num = 1;
954 		cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
955 		cpi->target_sprt = 0;
956 #if __FreeBSD_version >= 1000039
957 		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN;
958 #else
959 		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
960 #endif
961 		cpi->hba_eng_cnt = 0;
962 		cpi->max_target = sassc->maxtargets - 1;
963 		cpi->max_lun = 255;
964 
965 		/*
966 		 * initiator_id is set here to an ID outside the set of valid
967 		 * target IDs (including volumes).
968 		 */
969 		cpi->initiator_id = sassc->maxtargets;
970 		strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
971 		strlcpy(cpi->hba_vid, "Avago Tech", HBA_IDLEN);
972 		strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
973 		cpi->unit_number = cam_sim_unit(sim);
974 		cpi->bus_id = cam_sim_bus(sim);
975 		cpi->base_transfer_speed = 150000;
976 		cpi->transport = XPORT_SAS;
977 		cpi->transport_version = 0;
978 		cpi->protocol = PROTO_SCSI;
979 		cpi->protocol_version = SCSI_REV_SPC;
980 
981 		/*
982 		 * Max IO Size is Page Size * the following:
983 		 * ((SGEs per frame - 1 for chain element) *
984 		 * Max Chain Depth) + 1 for no chain needed in last frame
985 		 *
986 		 * If user suggests a Max IO size to use, use the smaller of the
987 		 * user's value and the calculated value as long as the user's
988 		 * value is larger than 0. The user's value is in pages.
989 		 */
990 		sges_per_frame = ((sc->facts->IOCRequestFrameSize * 4) /
991 		    sizeof(MPI2_SGE_SIMPLE64)) - 1;
992 		cpi->maxio = (sges_per_frame * sc->facts->MaxChainDepth) + 1;
993 		cpi->maxio *= PAGE_SIZE;
994 		if ((sc->max_io_pages > 0) && (sc->max_io_pages * PAGE_SIZE <
995 		    cpi->maxio))
996 			cpi->maxio = sc->max_io_pages * PAGE_SIZE;
997 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
998 		break;
999 	}
1000 	case XPT_GET_TRAN_SETTINGS:
1001 	{
1002 		struct ccb_trans_settings	*cts;
1003 		struct ccb_trans_settings_sas	*sas;
1004 		struct ccb_trans_settings_scsi	*scsi;
1005 		struct mpssas_target *targ;
1006 
1007 		cts = &ccb->cts;
1008 		sas = &cts->xport_specific.sas;
1009 		scsi = &cts->proto_specific.scsi;
1010 
1011 		KASSERT(cts->ccb_h.target_id < sassc->maxtargets,
1012 		    ("Target %d out of bounds in XPT_GET_TRANS_SETTINGS\n",
1013 		    cts->ccb_h.target_id));
1014 		targ = &sassc->targets[cts->ccb_h.target_id];
1015 		if (targ->handle == 0x0) {
1016 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1017 			break;
1018 		}
1019 
1020 		cts->protocol_version = SCSI_REV_SPC2;
1021 		cts->transport = XPORT_SAS;
1022 		cts->transport_version = 0;
1023 
1024 		sas->valid = CTS_SAS_VALID_SPEED;
1025 		switch (targ->linkrate) {
1026 		case 0x08:
1027 			sas->bitrate = 150000;
1028 			break;
1029 		case 0x09:
1030 			sas->bitrate = 300000;
1031 			break;
1032 		case 0x0a:
1033 			sas->bitrate = 600000;
1034 			break;
1035 		default:
1036 			sas->valid = 0;
1037 		}
1038 
1039 		cts->protocol = PROTO_SCSI;
1040 		scsi->valid = CTS_SCSI_VALID_TQ;
1041 		scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
1042 
1043 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1044 		break;
1045 	}
1046 	case XPT_CALC_GEOMETRY:
1047 		cam_calc_geometry(&ccb->ccg, /*extended*/1);
1048 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1049 		break;
1050 	case XPT_RESET_DEV:
1051 		mps_dprint(sassc->sc, MPS_XINFO, "mpssas_action XPT_RESET_DEV\n");
1052 		mpssas_action_resetdev(sassc, ccb);
1053 		return;
1054 	case XPT_RESET_BUS:
1055 	case XPT_ABORT:
1056 	case XPT_TERM_IO:
1057 		mps_dprint(sassc->sc, MPS_XINFO,
1058 		    "mpssas_action faking success for abort or reset\n");
1059 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1060 		break;
1061 	case XPT_SCSI_IO:
1062 		mpssas_action_scsiio(sassc, ccb);
1063 		return;
1064 #if __FreeBSD_version >= 900026
1065 	case XPT_SMP_IO:
1066 		mpssas_action_smpio(sassc, ccb);
1067 		return;
1068 #endif
1069 	default:
1070 		mpssas_set_ccbstatus(ccb, CAM_FUNC_NOTAVAIL);
1071 		break;
1072 	}
1073 	xpt_done(ccb);
1074 
1075 }
1076 
1077 static void
1078 mpssas_announce_reset(struct mps_softc *sc, uint32_t ac_code,
1079     target_id_t target_id, lun_id_t lun_id)
1080 {
1081 	path_id_t path_id = cam_sim_path(sc->sassc->sim);
1082 	struct cam_path *path;
1083 
1084 	mps_dprint(sc, MPS_XINFO, "%s code %x target %d lun %jx\n", __func__,
1085 	    ac_code, target_id, (uintmax_t)lun_id);
1086 
1087 	if (xpt_create_path(&path, NULL,
1088 		path_id, target_id, lun_id) != CAM_REQ_CMP) {
1089 		mps_dprint(sc, MPS_ERROR, "unable to create path for reset "
1090 			   "notification\n");
1091 		return;
1092 	}
1093 
1094 	xpt_async(ac_code, path, NULL);
1095 	xpt_free_path(path);
1096 }
1097 
1098 static void
1099 mpssas_complete_all_commands(struct mps_softc *sc)
1100 {
1101 	struct mps_command *cm;
1102 	int i;
1103 	int completed;
1104 
1105 	MPS_FUNCTRACE(sc);
1106 	mtx_assert(&sc->mps_mtx, MA_OWNED);
1107 
1108 	/* complete all commands with a NULL reply */
1109 	for (i = 1; i < sc->num_reqs; i++) {
1110 		cm = &sc->commands[i];
1111 		cm->cm_reply = NULL;
1112 		completed = 0;
1113 
1114 		if (cm->cm_flags & MPS_CM_FLAGS_POLLED)
1115 			cm->cm_flags |= MPS_CM_FLAGS_COMPLETE;
1116 
1117 		if (cm->cm_complete != NULL) {
1118 			mpssas_log_command(cm, MPS_RECOVERY,
1119 			    "completing cm %p state %x ccb %p for diag reset\n",
1120 			    cm, cm->cm_state, cm->cm_ccb);
1121 
1122 			cm->cm_complete(sc, cm);
1123 			completed = 1;
1124 		}
1125 
1126 		if (cm->cm_flags & MPS_CM_FLAGS_WAKEUP) {
1127 			mpssas_log_command(cm, MPS_RECOVERY,
1128 			    "waking up cm %p state %x ccb %p for diag reset\n",
1129 			    cm, cm->cm_state, cm->cm_ccb);
1130 			wakeup(cm);
1131 			completed = 1;
1132 		}
1133 
1134 		if (cm->cm_sc->io_cmds_active != 0)
1135 			cm->cm_sc->io_cmds_active--;
1136 
1137 		if ((completed == 0) && (cm->cm_state != MPS_CM_STATE_FREE)) {
1138 			/* this should never happen, but if it does, log */
1139 			mpssas_log_command(cm, MPS_RECOVERY,
1140 			    "cm %p state %x flags 0x%x ccb %p during diag "
1141 			    "reset\n", cm, cm->cm_state, cm->cm_flags,
1142 			    cm->cm_ccb);
1143 		}
1144 	}
1145 }
1146 
1147 void
1148 mpssas_handle_reinit(struct mps_softc *sc)
1149 {
1150 	int i;
1151 
1152 	/* Go back into startup mode and freeze the simq, so that CAM
1153 	 * doesn't send any commands until after we've rediscovered all
1154 	 * targets and found the proper device handles for them.
1155 	 *
1156 	 * After the reset, portenable will trigger discovery, and after all
1157 	 * discovery-related activities have finished, the simq will be
1158 	 * released.
1159 	 */
1160 	mps_dprint(sc, MPS_INIT, "%s startup\n", __func__);
1161 	sc->sassc->flags |= MPSSAS_IN_STARTUP;
1162 	sc->sassc->flags |= MPSSAS_IN_DISCOVERY;
1163 	mpssas_startup_increment(sc->sassc);
1164 
1165 	/* notify CAM of a bus reset */
1166 	mpssas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1167 	    CAM_LUN_WILDCARD);
1168 
1169 	/* complete and cleanup after all outstanding commands */
1170 	mpssas_complete_all_commands(sc);
1171 
1172 	mps_dprint(sc, MPS_INIT,
1173 	    "%s startup %u after command completion\n", __func__,
1174 	    sc->sassc->startup_refcount);
1175 
1176 	/* zero all the target handles, since they may change after the
1177 	 * reset, and we have to rediscover all the targets and use the new
1178 	 * handles.
1179 	 */
1180 	for (i = 0; i < sc->sassc->maxtargets; i++) {
1181 		if (sc->sassc->targets[i].outstanding != 0)
1182 			mps_dprint(sc, MPS_INIT, "target %u outstanding %u\n",
1183 			    i, sc->sassc->targets[i].outstanding);
1184 		sc->sassc->targets[i].handle = 0x0;
1185 		sc->sassc->targets[i].exp_dev_handle = 0x0;
1186 		sc->sassc->targets[i].outstanding = 0;
1187 		sc->sassc->targets[i].flags = MPSSAS_TARGET_INDIAGRESET;
1188 	}
1189 }
1190 
1191 static void
1192 mpssas_tm_timeout(void *data)
1193 {
1194 	struct mps_command *tm = data;
1195 	struct mps_softc *sc = tm->cm_sc;
1196 
1197 	mtx_assert(&sc->mps_mtx, MA_OWNED);
1198 
1199 	mpssas_log_command(tm, MPS_INFO|MPS_RECOVERY,
1200 	    "task mgmt %p timed out\n", tm);
1201 	mps_reinit(sc);
1202 }
1203 
1204 static void
1205 mpssas_logical_unit_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1206 {
1207 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1208 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1209 	unsigned int cm_count = 0;
1210 	struct mps_command *cm;
1211 	struct mpssas_target *targ;
1212 
1213 	callout_stop(&tm->cm_callout);
1214 
1215 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1216 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1217 	targ = tm->cm_targ;
1218 
1219 	/*
1220 	 * Currently there should be no way we can hit this case.  It only
1221 	 * happens when we have a failure to allocate chain frames, and
1222 	 * task management commands don't have S/G lists.
1223 	 * XXXSL So should it be an assertion?
1224 	 */
1225 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1226 		mps_dprint(sc, MPS_ERROR, "%s: cm_flags = %#x for LUN reset! "
1227 			   "This should not happen!\n", __func__, tm->cm_flags);
1228 		mpssas_free_tm(sc, tm);
1229 		return;
1230 	}
1231 
1232 	if (reply == NULL) {
1233 		mpssas_log_command(tm, MPS_RECOVERY,
1234 		    "NULL reset reply for tm %p\n", tm);
1235 		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1236 			/* this completion was due to a reset, just cleanup */
1237 			targ->tm = NULL;
1238 			mpssas_free_tm(sc, tm);
1239 		}
1240 		else {
1241 			/* we should have gotten a reply. */
1242 			mps_reinit(sc);
1243 		}
1244 		return;
1245 	}
1246 
1247 	mpssas_log_command(tm, MPS_RECOVERY,
1248 	    "logical unit reset status 0x%x code 0x%x count %u\n",
1249 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1250 	    le32toh(reply->TerminationCount));
1251 
1252 	/* See if there are any outstanding commands for this LUN.
1253 	 * This could be made more efficient by using a per-LU data
1254 	 * structure of some sort.
1255 	 */
1256 	TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1257 		if (cm->cm_lun == tm->cm_lun)
1258 			cm_count++;
1259 	}
1260 
1261 	if (cm_count == 0) {
1262 		mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1263 		    "logical unit %u finished recovery after reset\n",
1264 		    tm->cm_lun, tm);
1265 
1266 		mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1267 		    tm->cm_lun);
1268 
1269 		/* we've finished recovery for this logical unit.  check and
1270 		 * see if some other logical unit has a timedout command
1271 		 * that needs to be processed.
1272 		 */
1273 		cm = TAILQ_FIRST(&targ->timedout_commands);
1274 		if (cm) {
1275 			mpssas_send_abort(sc, tm, cm);
1276 		}
1277 		else {
1278 			targ->tm = NULL;
1279 			mpssas_free_tm(sc, tm);
1280 		}
1281 	}
1282 	else {
1283 		/* if we still have commands for this LUN, the reset
1284 		 * effectively failed, regardless of the status reported.
1285 		 * Escalate to a target reset.
1286 		 */
1287 		mpssas_log_command(tm, MPS_RECOVERY,
1288 		    "logical unit reset complete for tm %p, but still have %u command(s)\n",
1289 		    tm, cm_count);
1290 		mpssas_send_reset(sc, tm,
1291 		    MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1292 	}
1293 }
1294 
1295 static void
1296 mpssas_target_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1297 {
1298 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1299 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1300 	struct mpssas_target *targ;
1301 
1302 	callout_stop(&tm->cm_callout);
1303 
1304 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1305 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1306 	targ = tm->cm_targ;
1307 
1308 	/*
1309 	 * Currently there should be no way we can hit this case.  It only
1310 	 * happens when we have a failure to allocate chain frames, and
1311 	 * task management commands don't have S/G lists.
1312 	 */
1313 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1314 		mps_dprint(sc, MPS_ERROR,"%s: cm_flags = %#x for target reset! "
1315 			   "This should not happen!\n", __func__, tm->cm_flags);
1316 		mpssas_free_tm(sc, tm);
1317 		return;
1318 	}
1319 
1320 	if (reply == NULL) {
1321 		mpssas_log_command(tm, MPS_RECOVERY,
1322 		    "NULL reset reply for tm %p\n", tm);
1323 		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1324 			/* this completion was due to a reset, just cleanup */
1325 			targ->tm = NULL;
1326 			mpssas_free_tm(sc, tm);
1327 		}
1328 		else {
1329 			/* we should have gotten a reply. */
1330 			mps_reinit(sc);
1331 		}
1332 		return;
1333 	}
1334 
1335 	mpssas_log_command(tm, MPS_RECOVERY,
1336 	    "target reset status 0x%x code 0x%x count %u\n",
1337 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1338 	    le32toh(reply->TerminationCount));
1339 
1340 	if (targ->outstanding == 0) {
1341 		/* we've finished recovery for this target and all
1342 		 * of its logical units.
1343 		 */
1344 		mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1345 		    "recovery finished after target reset\n");
1346 
1347 		mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1348 		    CAM_LUN_WILDCARD);
1349 
1350 		targ->tm = NULL;
1351 		mpssas_free_tm(sc, tm);
1352 	}
1353 	else {
1354 		/* after a target reset, if this target still has
1355 		 * outstanding commands, the reset effectively failed,
1356 		 * regardless of the status reported.  escalate.
1357 		 */
1358 		mpssas_log_command(tm, MPS_RECOVERY,
1359 		    "target reset complete for tm %p, but still have %u command(s)\n",
1360 		    tm, targ->outstanding);
1361 		mps_reinit(sc);
1362 	}
1363 }
1364 
1365 #define MPS_RESET_TIMEOUT 30
1366 
1367 int
1368 mpssas_send_reset(struct mps_softc *sc, struct mps_command *tm, uint8_t type)
1369 {
1370 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1371 	struct mpssas_target *target;
1372 	int err;
1373 
1374 	target = tm->cm_targ;
1375 	if (target->handle == 0) {
1376 		mps_dprint(sc, MPS_ERROR,"%s null devhandle for target_id %d\n",
1377 		    __func__, target->tid);
1378 		return -1;
1379 	}
1380 
1381 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1382 	req->DevHandle = htole16(target->handle);
1383 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1384 	req->TaskType = type;
1385 
1386 	if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1387 		/* XXX Need to handle invalid LUNs */
1388 		MPS_SET_LUN(req->LUN, tm->cm_lun);
1389 		tm->cm_targ->logical_unit_resets++;
1390 		mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1391 		    "sending logical unit reset\n");
1392 		tm->cm_complete = mpssas_logical_unit_reset_complete;
1393 		mpssas_prepare_for_tm(sc, tm, target, tm->cm_lun);
1394 	}
1395 	else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1396 		/*
1397 		 * Target reset method =
1398 		 * 	SAS Hard Link Reset / SATA Link Reset
1399 		 */
1400 		req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1401 		tm->cm_targ->target_resets++;
1402 		mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1403 		    "sending target reset\n");
1404 		tm->cm_complete = mpssas_target_reset_complete;
1405 		mpssas_prepare_for_tm(sc, tm, target, CAM_LUN_WILDCARD);
1406 	}
1407 	else {
1408 		mps_dprint(sc, MPS_ERROR, "unexpected reset type 0x%x\n", type);
1409 		return -1;
1410 	}
1411 
1412 	tm->cm_data = NULL;
1413 	tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1414 	tm->cm_complete_data = (void *)tm;
1415 
1416 	callout_reset(&tm->cm_callout, MPS_RESET_TIMEOUT * hz,
1417 	    mpssas_tm_timeout, tm);
1418 
1419 	err = mps_map_command(sc, tm);
1420 	if (err)
1421 		mpssas_log_command(tm, MPS_RECOVERY,
1422 		    "error %d sending reset type %u\n",
1423 		    err, type);
1424 
1425 	return err;
1426 }
1427 
1428 
1429 static void
1430 mpssas_abort_complete(struct mps_softc *sc, struct mps_command *tm)
1431 {
1432 	struct mps_command *cm;
1433 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1434 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1435 	struct mpssas_target *targ;
1436 
1437 	callout_stop(&tm->cm_callout);
1438 
1439 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1440 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1441 	targ = tm->cm_targ;
1442 
1443 	/*
1444 	 * Currently there should be no way we can hit this case.  It only
1445 	 * happens when we have a failure to allocate chain frames, and
1446 	 * task management commands don't have S/G lists.
1447 	 */
1448 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1449 		mpssas_log_command(tm, MPS_RECOVERY,
1450 		    "cm_flags = %#x for abort %p TaskMID %u!\n",
1451 		    tm->cm_flags, tm, le16toh(req->TaskMID));
1452 		mpssas_free_tm(sc, tm);
1453 		return;
1454 	}
1455 
1456 	if (reply == NULL) {
1457 		mpssas_log_command(tm, MPS_RECOVERY,
1458 		    "NULL abort reply for tm %p TaskMID %u\n",
1459 		    tm, le16toh(req->TaskMID));
1460 		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1461 			/* this completion was due to a reset, just cleanup */
1462 			targ->tm = NULL;
1463 			mpssas_free_tm(sc, tm);
1464 		}
1465 		else {
1466 			/* we should have gotten a reply. */
1467 			mps_reinit(sc);
1468 		}
1469 		return;
1470 	}
1471 
1472 	mpssas_log_command(tm, MPS_RECOVERY,
1473 	    "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1474 	    le16toh(req->TaskMID),
1475 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1476 	    le32toh(reply->TerminationCount));
1477 
1478 	cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1479 	if (cm == NULL) {
1480 		/* if there are no more timedout commands, we're done with
1481 		 * error recovery for this target.
1482 		 */
1483 		mpssas_log_command(tm, MPS_RECOVERY,
1484 		    "finished recovery after aborting TaskMID %u\n",
1485 		    le16toh(req->TaskMID));
1486 
1487 		targ->tm = NULL;
1488 		mpssas_free_tm(sc, tm);
1489 	}
1490 	else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) {
1491 		/* abort success, but we have more timedout commands to abort */
1492 		mpssas_log_command(tm, MPS_RECOVERY,
1493 		    "continuing recovery after aborting TaskMID %u\n",
1494 		    le16toh(req->TaskMID));
1495 
1496 		mpssas_send_abort(sc, tm, cm);
1497 	}
1498 	else {
1499 		/* we didn't get a command completion, so the abort
1500 		 * failed as far as we're concerned.  escalate.
1501 		 */
1502 		mpssas_log_command(tm, MPS_RECOVERY,
1503 		    "abort failed for TaskMID %u tm %p\n",
1504 		    le16toh(req->TaskMID), tm);
1505 
1506 		mpssas_send_reset(sc, tm,
1507 		    MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1508 	}
1509 }
1510 
1511 #define MPS_ABORT_TIMEOUT 5
1512 
1513 static int
1514 mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm)
1515 {
1516 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1517 	struct mpssas_target *targ;
1518 	int err;
1519 
1520 	targ = cm->cm_targ;
1521 	if (targ->handle == 0) {
1522 		mps_dprint(sc, MPS_ERROR,"%s null devhandle for target_id %d\n",
1523 		    __func__, cm->cm_ccb->ccb_h.target_id);
1524 		return -1;
1525 	}
1526 
1527 	mpssas_log_command(cm, MPS_RECOVERY|MPS_INFO,
1528 	    "Aborting command %p\n", cm);
1529 
1530 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1531 	req->DevHandle = htole16(targ->handle);
1532 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1533 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1534 
1535 	/* XXX Need to handle invalid LUNs */
1536 	MPS_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1537 
1538 	req->TaskMID = htole16(cm->cm_desc.Default.SMID);
1539 
1540 	tm->cm_data = NULL;
1541 	tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1542 	tm->cm_complete = mpssas_abort_complete;
1543 	tm->cm_complete_data = (void *)tm;
1544 	tm->cm_targ = cm->cm_targ;
1545 	tm->cm_lun = cm->cm_lun;
1546 
1547 	callout_reset(&tm->cm_callout, MPS_ABORT_TIMEOUT * hz,
1548 	    mpssas_tm_timeout, tm);
1549 
1550 	targ->aborts++;
1551 
1552 	mps_dprint(sc, MPS_INFO, "Sending reset from %s for target ID %d\n",
1553 	    __func__, targ->tid);
1554 	mpssas_prepare_for_tm(sc, tm, targ, tm->cm_lun);
1555 
1556 	err = mps_map_command(sc, tm);
1557 	if (err)
1558 		mps_dprint(sc, MPS_RECOVERY,
1559 		    "error %d sending abort for cm %p SMID %u\n",
1560 		    err, cm, req->TaskMID);
1561 	return err;
1562 }
1563 
1564 static void
1565 mpssas_scsiio_timeout(void *data)
1566 {
1567 	struct mps_softc *sc;
1568 	struct mps_command *cm;
1569 	struct mpssas_target *targ;
1570 
1571 	cm = (struct mps_command *)data;
1572 	sc = cm->cm_sc;
1573 
1574 	MPS_FUNCTRACE(sc);
1575 	mtx_assert(&sc->mps_mtx, MA_OWNED);
1576 
1577 	mps_dprint(sc, MPS_XINFO, "Timeout checking cm %p\n", sc);
1578 
1579 	/*
1580 	 * Run the interrupt handler to make sure it's not pending.  This
1581 	 * isn't perfect because the command could have already completed
1582 	 * and been re-used, though this is unlikely.
1583 	 */
1584 	mps_intr_locked(sc);
1585 	if (cm->cm_state == MPS_CM_STATE_FREE) {
1586 		mpssas_log_command(cm, MPS_XINFO,
1587 		    "SCSI command %p almost timed out\n", cm);
1588 		return;
1589 	}
1590 
1591 	if (cm->cm_ccb == NULL) {
1592 		mps_dprint(sc, MPS_ERROR, "command timeout with NULL ccb\n");
1593 		return;
1594 	}
1595 
1596 	targ = cm->cm_targ;
1597 	targ->timeouts++;
1598 
1599 	mpssas_log_command(cm, MPS_ERROR, "command timeout %d cm %p target "
1600 	    "%u, handle(0x%04x)\n", cm->cm_ccb->ccb_h.timeout, cm,  targ->tid,
1601 	    targ->handle);
1602 
1603 	/* XXX first, check the firmware state, to see if it's still
1604 	 * operational.  if not, do a diag reset.
1605 	 */
1606 	mpssas_set_ccbstatus(cm->cm_ccb, CAM_CMD_TIMEOUT);
1607 	cm->cm_state = MPS_CM_STATE_TIMEDOUT;
1608 	TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1609 
1610 	if (targ->tm != NULL) {
1611 		/* target already in recovery, just queue up another
1612 		 * timedout command to be processed later.
1613 		 */
1614 		mps_dprint(sc, MPS_RECOVERY,
1615 		    "queued timedout cm %p for processing by tm %p\n",
1616 		    cm, targ->tm);
1617 	}
1618 	else if ((targ->tm = mpssas_alloc_tm(sc)) != NULL) {
1619 		mps_dprint(sc, MPS_RECOVERY, "timedout cm %p allocated tm %p\n",
1620 		    cm, targ->tm);
1621 
1622 		/* start recovery by aborting the first timedout command */
1623 		mpssas_send_abort(sc, targ->tm, cm);
1624 	}
1625 	else {
1626 		/* XXX queue this target up for recovery once a TM becomes
1627 		 * available.  The firmware only has a limited number of
1628 		 * HighPriority credits for the high priority requests used
1629 		 * for task management, and we ran out.
1630 		 *
1631 		 * Isilon: don't worry about this for now, since we have
1632 		 * more credits than disks in an enclosure, and limit
1633 		 * ourselves to one TM per target for recovery.
1634 		 */
1635 		mps_dprint(sc, MPS_RECOVERY,
1636 		    "timedout cm %p failed to allocate a tm\n", cm);
1637 	}
1638 
1639 }
1640 
1641 static void
1642 mpssas_action_scsiio(struct mpssas_softc *sassc, union ccb *ccb)
1643 {
1644 	MPI2_SCSI_IO_REQUEST *req;
1645 	struct ccb_scsiio *csio;
1646 	struct mps_softc *sc;
1647 	struct mpssas_target *targ;
1648 	struct mpssas_lun *lun;
1649 	struct mps_command *cm;
1650 	uint8_t i, lba_byte, *ref_tag_addr;
1651 	uint16_t eedp_flags;
1652 	uint32_t mpi_control;
1653 
1654 	sc = sassc->sc;
1655 	MPS_FUNCTRACE(sc);
1656 	mtx_assert(&sc->mps_mtx, MA_OWNED);
1657 
1658 	csio = &ccb->csio;
1659 	KASSERT(csio->ccb_h.target_id < sassc->maxtargets,
1660 	    ("Target %d out of bounds in XPT_SCSI_IO\n",
1661 	     csio->ccb_h.target_id));
1662 	targ = &sassc->targets[csio->ccb_h.target_id];
1663 	mps_dprint(sc, MPS_TRACE, "ccb %p target flag %x\n", ccb, targ->flags);
1664 	if (targ->handle == 0x0) {
1665 		mps_dprint(sc, MPS_ERROR, "%s NULL handle for target %u\n",
1666 		    __func__, csio->ccb_h.target_id);
1667 		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1668 		xpt_done(ccb);
1669 		return;
1670 	}
1671 	if (targ->flags & MPS_TARGET_FLAGS_RAID_COMPONENT) {
1672 		mps_dprint(sc, MPS_ERROR, "%s Raid component no SCSI IO "
1673 		    "supported %u\n", __func__, csio->ccb_h.target_id);
1674 		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1675 		xpt_done(ccb);
1676 		return;
1677 	}
1678 	/*
1679 	 * Sometimes, it is possible to get a command that is not "In
1680 	 * Progress" and was actually aborted by the upper layer.  Check for
1681 	 * this here and complete the command without error.
1682 	 */
1683 	if (mpssas_get_ccbstatus(ccb) != CAM_REQ_INPROG) {
1684 		mps_dprint(sc, MPS_TRACE, "%s Command is not in progress for "
1685 		    "target %u\n", __func__, csio->ccb_h.target_id);
1686 		xpt_done(ccb);
1687 		return;
1688 	}
1689 	/*
1690 	 * If devinfo is 0 this will be a volume.  In that case don't tell CAM
1691 	 * that the volume has timed out.  We want volumes to be enumerated
1692 	 * until they are deleted/removed, not just failed.
1693 	 */
1694 	if (targ->flags & MPSSAS_TARGET_INREMOVAL) {
1695 		if (targ->devinfo == 0)
1696 			mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1697 		else
1698 			mpssas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
1699 		xpt_done(ccb);
1700 		return;
1701 	}
1702 
1703 	if ((sc->mps_flags & MPS_FLAGS_SHUTDOWN) != 0) {
1704 		mps_dprint(sc, MPS_INFO, "%s shutting down\n", __func__);
1705 		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1706 		xpt_done(ccb);
1707 		return;
1708 	}
1709 
1710 	/*
1711 	 * If target has a reset in progress, freeze the devq and return.  The
1712 	 * devq will be released when the TM reset is finished.
1713 	 */
1714 	if (targ->flags & MPSSAS_TARGET_INRESET) {
1715 		ccb->ccb_h.status = CAM_BUSY | CAM_DEV_QFRZN;
1716 		mps_dprint(sc, MPS_INFO, "%s: Freezing devq for target ID %d\n",
1717 		    __func__, targ->tid);
1718 		xpt_freeze_devq(ccb->ccb_h.path, 1);
1719 		xpt_done(ccb);
1720 		return;
1721 	}
1722 
1723 	cm = mps_alloc_command(sc);
1724 	if (cm == NULL || (sc->mps_flags & MPS_FLAGS_DIAGRESET)) {
1725 		if (cm != NULL) {
1726 			mps_free_command(sc, cm);
1727 		}
1728 		if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
1729 			xpt_freeze_simq(sassc->sim, 1);
1730 			sassc->flags |= MPSSAS_QUEUE_FROZEN;
1731 		}
1732 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1733 		ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1734 		xpt_done(ccb);
1735 		return;
1736 	}
1737 
1738 	req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
1739 	bzero(req, sizeof(*req));
1740 	req->DevHandle = htole16(targ->handle);
1741 	req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1742 	req->MsgFlags = 0;
1743 	req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr);
1744 	req->SenseBufferLength = MPS_SENSE_LEN;
1745 	req->SGLFlags = 0;
1746 	req->ChainOffset = 0;
1747 	req->SGLOffset0 = 24;	/* 32bit word offset to the SGL */
1748 	req->SGLOffset1= 0;
1749 	req->SGLOffset2= 0;
1750 	req->SGLOffset3= 0;
1751 	req->SkipCount = 0;
1752 	req->DataLength = htole32(csio->dxfer_len);
1753 	req->BidirectionalDataLength = 0;
1754 	req->IoFlags = htole16(csio->cdb_len);
1755 	req->EEDPFlags = 0;
1756 
1757 	/* Note: BiDirectional transfers are not supported */
1758 	switch (csio->ccb_h.flags & CAM_DIR_MASK) {
1759 	case CAM_DIR_IN:
1760 		mpi_control = MPI2_SCSIIO_CONTROL_READ;
1761 		cm->cm_flags |= MPS_CM_FLAGS_DATAIN;
1762 		break;
1763 	case CAM_DIR_OUT:
1764 		mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
1765 		cm->cm_flags |= MPS_CM_FLAGS_DATAOUT;
1766 		break;
1767 	case CAM_DIR_NONE:
1768 	default:
1769 		mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
1770 		break;
1771 	}
1772 
1773 	if (csio->cdb_len == 32)
1774                 mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
1775 	/*
1776 	 * It looks like the hardware doesn't require an explicit tag
1777 	 * number for each transaction.  SAM Task Management not supported
1778 	 * at the moment.
1779 	 */
1780 	switch (csio->tag_action) {
1781 	case MSG_HEAD_OF_Q_TAG:
1782 		mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
1783 		break;
1784 	case MSG_ORDERED_Q_TAG:
1785 		mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
1786 		break;
1787 	case MSG_ACA_TASK:
1788 		mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ;
1789 		break;
1790 	case CAM_TAG_ACTION_NONE:
1791 	case MSG_SIMPLE_Q_TAG:
1792 	default:
1793 		mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
1794 		break;
1795 	}
1796 	mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
1797 	req->Control = htole32(mpi_control);
1798 	if (MPS_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
1799 		mps_free_command(sc, cm);
1800 		mpssas_set_ccbstatus(ccb, CAM_LUN_INVALID);
1801 		xpt_done(ccb);
1802 		return;
1803 	}
1804 
1805 	if (csio->ccb_h.flags & CAM_CDB_POINTER)
1806 		bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
1807 	else
1808 		bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
1809 	req->IoFlags = htole16(csio->cdb_len);
1810 
1811 	/*
1812 	 * Check if EEDP is supported and enabled.  If it is then check if the
1813 	 * SCSI opcode could be using EEDP.  If so, make sure the LUN exists and
1814 	 * is formatted for EEDP support.  If all of this is true, set CDB up
1815 	 * for EEDP transfer.
1816 	 */
1817 	eedp_flags = op_code_prot[req->CDB.CDB32[0]];
1818 	if (sc->eedp_enabled && eedp_flags) {
1819 		SLIST_FOREACH(lun, &targ->luns, lun_link) {
1820 			if (lun->lun_id == csio->ccb_h.target_lun) {
1821 				break;
1822 			}
1823 		}
1824 
1825 		if ((lun != NULL) && (lun->eedp_formatted)) {
1826 			req->EEDPBlockSize = htole16(lun->eedp_block_size);
1827 			eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
1828 			    MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
1829 			    MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
1830 			req->EEDPFlags = htole16(eedp_flags);
1831 
1832 			/*
1833 			 * If CDB less than 32, fill in Primary Ref Tag with
1834 			 * low 4 bytes of LBA.  If CDB is 32, tag stuff is
1835 			 * already there.  Also, set protection bit.  FreeBSD
1836 			 * currently does not support CDBs bigger than 16, but
1837 			 * the code doesn't hurt, and will be here for the
1838 			 * future.
1839 			 */
1840 			if (csio->cdb_len != 32) {
1841 				lba_byte = (csio->cdb_len == 16) ? 6 : 2;
1842 				ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
1843 				    PrimaryReferenceTag;
1844 				for (i = 0; i < 4; i++) {
1845 					*ref_tag_addr =
1846 					    req->CDB.CDB32[lba_byte + i];
1847 					ref_tag_addr++;
1848 				}
1849 				req->CDB.EEDP32.PrimaryReferenceTag =
1850 					htole32(req->CDB.EEDP32.PrimaryReferenceTag);
1851 				req->CDB.EEDP32.PrimaryApplicationTagMask =
1852 				    0xFFFF;
1853 				req->CDB.CDB32[1] = (req->CDB.CDB32[1] & 0x1F) |
1854 				    0x20;
1855 			} else {
1856 				eedp_flags |=
1857 				    MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
1858 				req->EEDPFlags = htole16(eedp_flags);
1859 				req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
1860 				    0x1F) | 0x20;
1861 			}
1862 		}
1863 	}
1864 
1865 	cm->cm_length = csio->dxfer_len;
1866 	if (cm->cm_length != 0) {
1867 		cm->cm_data = ccb;
1868 		cm->cm_flags |= MPS_CM_FLAGS_USE_CCB;
1869 	} else {
1870 		cm->cm_data = NULL;
1871 	}
1872 	cm->cm_sge = &req->SGL;
1873 	cm->cm_sglsize = (32 - 24) * 4;
1874 	cm->cm_desc.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
1875 	cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle);
1876 	cm->cm_complete = mpssas_scsiio_complete;
1877 	cm->cm_complete_data = ccb;
1878 	cm->cm_targ = targ;
1879 	cm->cm_lun = csio->ccb_h.target_lun;
1880 	cm->cm_ccb = ccb;
1881 
1882 	/*
1883 	 * If HBA is a WD and the command is not for a retry, try to build a
1884 	 * direct I/O message. If failed, or the command is for a retry, send
1885 	 * the I/O to the IR volume itself.
1886 	 */
1887 	if (sc->WD_valid_config) {
1888 		if (ccb->ccb_h.sim_priv.entries[0].field == MPS_WD_RETRY) {
1889 			mpssas_direct_drive_io(sassc, cm, ccb);
1890 		} else {
1891 			mpssas_set_ccbstatus(ccb, CAM_REQ_INPROG);
1892 		}
1893 	}
1894 
1895 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
1896 	if (csio->bio != NULL)
1897 		biotrack(csio->bio, __func__);
1898 #endif
1899 	callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
1900 	    mpssas_scsiio_timeout, cm, 0);
1901 
1902 	targ->issued++;
1903 	targ->outstanding++;
1904 	TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1905 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
1906 
1907 	mpssas_log_command(cm, MPS_XINFO, "%s cm %p ccb %p outstanding %u\n",
1908 	    __func__, cm, ccb, targ->outstanding);
1909 
1910 	mps_map_command(sc, cm);
1911 	return;
1912 }
1913 
1914 static void
1915 mps_response_code(struct mps_softc *sc, u8 response_code)
1916 {
1917         char *desc;
1918 
1919         switch (response_code) {
1920         case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE:
1921                 desc = "task management request completed";
1922                 break;
1923         case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME:
1924                 desc = "invalid frame";
1925                 break;
1926         case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
1927                 desc = "task management request not supported";
1928                 break;
1929         case MPI2_SCSITASKMGMT_RSP_TM_FAILED:
1930                 desc = "task management request failed";
1931                 break;
1932         case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED:
1933                 desc = "task management request succeeded";
1934                 break;
1935         case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN:
1936                 desc = "invalid lun";
1937                 break;
1938         case 0xA:
1939                 desc = "overlapped tag attempted";
1940                 break;
1941         case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
1942                 desc = "task queued, however not sent to target";
1943                 break;
1944         default:
1945                 desc = "unknown";
1946                 break;
1947         }
1948 		mps_dprint(sc, MPS_XINFO, "response_code(0x%01x): %s\n",
1949                 response_code, desc);
1950 }
1951 /**
1952  * mps_sc_failed_io_info - translated non-succesfull SCSI_IO request
1953  */
1954 static void
1955 mps_sc_failed_io_info(struct mps_softc *sc, struct ccb_scsiio *csio,
1956     Mpi2SCSIIOReply_t *mpi_reply)
1957 {
1958 	u32 response_info;
1959 	u8 *response_bytes;
1960 	u16 ioc_status = le16toh(mpi_reply->IOCStatus) &
1961 	    MPI2_IOCSTATUS_MASK;
1962 	u8 scsi_state = mpi_reply->SCSIState;
1963 	u8 scsi_status = mpi_reply->SCSIStatus;
1964 	char *desc_ioc_state = NULL;
1965 	char *desc_scsi_status = NULL;
1966 	char *desc_scsi_state = sc->tmp_string;
1967 	u32 log_info = le32toh(mpi_reply->IOCLogInfo);
1968 
1969 	if (log_info == 0x31170000)
1970 		return;
1971 
1972 	switch (ioc_status) {
1973 	case MPI2_IOCSTATUS_SUCCESS:
1974 		desc_ioc_state = "success";
1975 		break;
1976 	case MPI2_IOCSTATUS_INVALID_FUNCTION:
1977 		desc_ioc_state = "invalid function";
1978 		break;
1979 	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
1980 		desc_ioc_state = "scsi recovered error";
1981 		break;
1982 	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
1983 		desc_ioc_state = "scsi invalid dev handle";
1984 		break;
1985 	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
1986 		desc_ioc_state = "scsi device not there";
1987 		break;
1988 	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
1989 		desc_ioc_state = "scsi data overrun";
1990 		break;
1991 	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
1992 		desc_ioc_state = "scsi data underrun";
1993 		break;
1994 	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
1995 		desc_ioc_state = "scsi io data error";
1996 		break;
1997 	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
1998 		desc_ioc_state = "scsi protocol error";
1999 		break;
2000 	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2001 		desc_ioc_state = "scsi task terminated";
2002 		break;
2003 	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2004 		desc_ioc_state = "scsi residual mismatch";
2005 		break;
2006 	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2007 		desc_ioc_state = "scsi task mgmt failed";
2008 		break;
2009 	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2010 		desc_ioc_state = "scsi ioc terminated";
2011 		break;
2012 	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2013 		desc_ioc_state = "scsi ext terminated";
2014 		break;
2015 	case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
2016 		desc_ioc_state = "eedp guard error";
2017 		break;
2018 	case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
2019 		desc_ioc_state = "eedp ref tag error";
2020 		break;
2021 	case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
2022 		desc_ioc_state = "eedp app tag error";
2023 		break;
2024 	default:
2025 		desc_ioc_state = "unknown";
2026 		break;
2027 	}
2028 
2029 	switch (scsi_status) {
2030 	case MPI2_SCSI_STATUS_GOOD:
2031 		desc_scsi_status = "good";
2032 		break;
2033 	case MPI2_SCSI_STATUS_CHECK_CONDITION:
2034 		desc_scsi_status = "check condition";
2035 		break;
2036 	case MPI2_SCSI_STATUS_CONDITION_MET:
2037 		desc_scsi_status = "condition met";
2038 		break;
2039 	case MPI2_SCSI_STATUS_BUSY:
2040 		desc_scsi_status = "busy";
2041 		break;
2042 	case MPI2_SCSI_STATUS_INTERMEDIATE:
2043 		desc_scsi_status = "intermediate";
2044 		break;
2045 	case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET:
2046 		desc_scsi_status = "intermediate condmet";
2047 		break;
2048 	case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
2049 		desc_scsi_status = "reservation conflict";
2050 		break;
2051 	case MPI2_SCSI_STATUS_COMMAND_TERMINATED:
2052 		desc_scsi_status = "command terminated";
2053 		break;
2054 	case MPI2_SCSI_STATUS_TASK_SET_FULL:
2055 		desc_scsi_status = "task set full";
2056 		break;
2057 	case MPI2_SCSI_STATUS_ACA_ACTIVE:
2058 		desc_scsi_status = "aca active";
2059 		break;
2060 	case MPI2_SCSI_STATUS_TASK_ABORTED:
2061 		desc_scsi_status = "task aborted";
2062 		break;
2063 	default:
2064 		desc_scsi_status = "unknown";
2065 		break;
2066 	}
2067 
2068 	desc_scsi_state[0] = '\0';
2069 	if (!scsi_state)
2070 		desc_scsi_state = " ";
2071 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
2072 		strcat(desc_scsi_state, "response info ");
2073 	if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
2074 		strcat(desc_scsi_state, "state terminated ");
2075 	if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS)
2076 		strcat(desc_scsi_state, "no status ");
2077 	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED)
2078 		strcat(desc_scsi_state, "autosense failed ");
2079 	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID)
2080 		strcat(desc_scsi_state, "autosense valid ");
2081 
2082 	mps_dprint(sc, MPS_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n",
2083 	    le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status);
2084 	/* We can add more detail about underflow data here
2085 	 * TO-DO
2086 	 * */
2087 	mps_dprint(sc, MPS_XINFO, "\tscsi_status(%s)(0x%02x), "
2088 	    "scsi_state(%s)(0x%02x)\n", desc_scsi_status, scsi_status,
2089 	    desc_scsi_state, scsi_state);
2090 
2091 	if (sc->mps_debug & MPS_XINFO &&
2092 		scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2093 		mps_dprint(sc, MPS_XINFO, "-> Sense Buffer Data : Start :\n");
2094 		scsi_sense_print(csio);
2095 		mps_dprint(sc, MPS_XINFO, "-> Sense Buffer Data : End :\n");
2096 	}
2097 
2098 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
2099 		response_info = le32toh(mpi_reply->ResponseInfo);
2100 		response_bytes = (u8 *)&response_info;
2101 		mps_response_code(sc,response_bytes[0]);
2102 	}
2103 }
2104 
2105 static void
2106 mpssas_scsiio_complete(struct mps_softc *sc, struct mps_command *cm)
2107 {
2108 	MPI2_SCSI_IO_REPLY *rep;
2109 	union ccb *ccb;
2110 	struct ccb_scsiio *csio;
2111 	struct mpssas_softc *sassc;
2112 	struct scsi_vpd_supported_page_list *vpd_list = NULL;
2113 	u8 *TLR_bits, TLR_on;
2114 	int dir = 0, i;
2115 	u16 alloc_len;
2116 	struct mpssas_target *target;
2117 	target_id_t target_id;
2118 
2119 	MPS_FUNCTRACE(sc);
2120 	mps_dprint(sc, MPS_TRACE,
2121 	    "cm %p SMID %u ccb %p reply %p outstanding %u\n", cm,
2122 	    cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
2123 	    cm->cm_targ->outstanding);
2124 
2125 	callout_stop(&cm->cm_callout);
2126 	mtx_assert(&sc->mps_mtx, MA_OWNED);
2127 
2128 	sassc = sc->sassc;
2129 	ccb = cm->cm_complete_data;
2130 	csio = &ccb->csio;
2131 	target_id = csio->ccb_h.target_id;
2132 	rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
2133 	/*
2134 	 * XXX KDM if the chain allocation fails, does it matter if we do
2135 	 * the sync and unload here?  It is simpler to do it in every case,
2136 	 * assuming it doesn't cause problems.
2137 	 */
2138 	if (cm->cm_data != NULL) {
2139 		if (cm->cm_flags & MPS_CM_FLAGS_DATAIN)
2140 			dir = BUS_DMASYNC_POSTREAD;
2141 		else if (cm->cm_flags & MPS_CM_FLAGS_DATAOUT)
2142 			dir = BUS_DMASYNC_POSTWRITE;
2143 		bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
2144 		bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2145 	}
2146 
2147 	cm->cm_targ->completed++;
2148 	cm->cm_targ->outstanding--;
2149 	TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
2150 	ccb->ccb_h.status &= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED);
2151 
2152 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
2153 	if (ccb->csio.bio != NULL)
2154 		biotrack(ccb->csio.bio, __func__);
2155 #endif
2156 
2157 	if (cm->cm_state == MPS_CM_STATE_TIMEDOUT) {
2158 		TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
2159 		if (cm->cm_reply != NULL)
2160 			mpssas_log_command(cm, MPS_RECOVERY,
2161 			    "completed timedout cm %p ccb %p during recovery "
2162 			    "ioc %x scsi %x state %x xfer %u\n",
2163 			    cm, cm->cm_ccb,
2164 			    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2165 			    le32toh(rep->TransferCount));
2166 		else
2167 			mpssas_log_command(cm, MPS_RECOVERY,
2168 			    "completed timedout cm %p ccb %p during recovery\n",
2169 			    cm, cm->cm_ccb);
2170 	} else if (cm->cm_targ->tm != NULL) {
2171 		if (cm->cm_reply != NULL)
2172 			mpssas_log_command(cm, MPS_RECOVERY,
2173 			    "completed cm %p ccb %p during recovery "
2174 			    "ioc %x scsi %x state %x xfer %u\n",
2175 			    cm, cm->cm_ccb,
2176 			    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2177 			    le32toh(rep->TransferCount));
2178 		else
2179 			mpssas_log_command(cm, MPS_RECOVERY,
2180 			    "completed cm %p ccb %p during recovery\n",
2181 			    cm, cm->cm_ccb);
2182 	} else if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
2183 		mpssas_log_command(cm, MPS_RECOVERY,
2184 		    "reset completed cm %p ccb %p\n",
2185 		    cm, cm->cm_ccb);
2186 	}
2187 
2188 	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2189 		/*
2190 		 * We ran into an error after we tried to map the command,
2191 		 * so we're getting a callback without queueing the command
2192 		 * to the hardware.  So we set the status here, and it will
2193 		 * be retained below.  We'll go through the "fast path",
2194 		 * because there can be no reply when we haven't actually
2195 		 * gone out to the hardware.
2196 		 */
2197 		mpssas_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
2198 
2199 		/*
2200 		 * Currently the only error included in the mask is
2201 		 * MPS_CM_FLAGS_CHAIN_FAILED, which means we're out of
2202 		 * chain frames.  We need to freeze the queue until we get
2203 		 * a command that completed without this error, which will
2204 		 * hopefully have some chain frames attached that we can
2205 		 * use.  If we wanted to get smarter about it, we would
2206 		 * only unfreeze the queue in this condition when we're
2207 		 * sure that we're getting some chain frames back.  That's
2208 		 * probably unnecessary.
2209 		 */
2210 		if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
2211 			xpt_freeze_simq(sassc->sim, 1);
2212 			sassc->flags |= MPSSAS_QUEUE_FROZEN;
2213 			mps_dprint(sc, MPS_XINFO, "Error sending command, "
2214 				   "freezing SIM queue\n");
2215 		}
2216 	}
2217 
2218 	/*
2219 	 * If this is a Start Stop Unit command and it was issued by the driver
2220 	 * during shutdown, decrement the refcount to account for all of the
2221 	 * commands that were sent.  All SSU commands should be completed before
2222 	 * shutdown completes, meaning SSU_refcount will be 0 after SSU_started
2223 	 * is TRUE.
2224 	 */
2225 	if (sc->SSU_started && (csio->cdb_io.cdb_bytes[0] == START_STOP_UNIT)) {
2226 		mps_dprint(sc, MPS_INFO, "Decrementing SSU count.\n");
2227 		sc->SSU_refcount--;
2228 	}
2229 
2230 	/* Take the fast path to completion */
2231 	if (cm->cm_reply == NULL) {
2232 		if (mpssas_get_ccbstatus(ccb) == CAM_REQ_INPROG) {
2233 			if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0)
2234 				mpssas_set_ccbstatus(ccb, CAM_SCSI_BUS_RESET);
2235 			else {
2236 				mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2237 				ccb->csio.scsi_status = SCSI_STATUS_OK;
2238 			}
2239 			if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2240 				ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2241 				sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2242 				mps_dprint(sc, MPS_XINFO,
2243 				    "Unfreezing SIM queue\n");
2244 			}
2245 		}
2246 
2247 		/*
2248 		 * There are two scenarios where the status won't be
2249 		 * CAM_REQ_CMP.  The first is if MPS_CM_FLAGS_ERROR_MASK is
2250 		 * set, the second is in the MPS_FLAGS_DIAGRESET above.
2251 		 */
2252 		if (mpssas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2253 			/*
2254 			 * Freeze the dev queue so that commands are
2255 			 * executed in the correct order after error
2256 			 * recovery.
2257 			 */
2258 			ccb->ccb_h.status |= CAM_DEV_QFRZN;
2259 			xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2260 		}
2261 		mps_free_command(sc, cm);
2262 		xpt_done(ccb);
2263 		return;
2264 	}
2265 
2266 	mpssas_log_command(cm, MPS_XINFO,
2267 	    "ioc %x scsi %x state %x xfer %u\n",
2268 	    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2269 	    le32toh(rep->TransferCount));
2270 
2271 	/*
2272 	 * If this is a Direct Drive I/O, reissue the I/O to the original IR
2273 	 * Volume if an error occurred (normal I/O retry).  Use the original
2274 	 * CCB, but set a flag that this will be a retry so that it's sent to
2275 	 * the original volume.  Free the command but reuse the CCB.
2276 	 */
2277 	if (cm->cm_flags & MPS_CM_FLAGS_DD_IO) {
2278 		mps_free_command(sc, cm);
2279 		ccb->ccb_h.sim_priv.entries[0].field = MPS_WD_RETRY;
2280 		mpssas_action_scsiio(sassc, ccb);
2281 		return;
2282 	} else
2283 		ccb->ccb_h.sim_priv.entries[0].field = 0;
2284 
2285 	switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) {
2286 	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2287 		csio->resid = cm->cm_length - le32toh(rep->TransferCount);
2288 		/* FALLTHROUGH */
2289 	case MPI2_IOCSTATUS_SUCCESS:
2290 	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2291 
2292 		if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
2293 		    MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
2294 			mpssas_log_command(cm, MPS_XINFO, "recovered error\n");
2295 
2296 		/* Completion failed at the transport level. */
2297 		if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
2298 		    MPI2_SCSI_STATE_TERMINATED)) {
2299 			mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2300 			break;
2301 		}
2302 
2303 		/* In a modern packetized environment, an autosense failure
2304 		 * implies that there's not much else that can be done to
2305 		 * recover the command.
2306 		 */
2307 		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
2308 			mpssas_set_ccbstatus(ccb, CAM_AUTOSENSE_FAIL);
2309 			break;
2310 		}
2311 
2312 		/*
2313 		 * CAM doesn't care about SAS Response Info data, but if this is
2314 		 * the state check if TLR should be done.  If not, clear the
2315 		 * TLR_bits for the target.
2316 		 */
2317 		if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
2318 		    ((le32toh(rep->ResponseInfo) &
2319 		    MPI2_SCSI_RI_MASK_REASONCODE) ==
2320 		    MPS_SCSI_RI_INVALID_FRAME)) {
2321 			sc->mapping_table[target_id].TLR_bits =
2322 			    (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2323 		}
2324 
2325 		/*
2326 		 * Intentionally override the normal SCSI status reporting
2327 		 * for these two cases.  These are likely to happen in a
2328 		 * multi-initiator environment, and we want to make sure that
2329 		 * CAM retries these commands rather than fail them.
2330 		 */
2331 		if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
2332 		    (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
2333 			mpssas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2334 			break;
2335 		}
2336 
2337 		/* Handle normal status and sense */
2338 		csio->scsi_status = rep->SCSIStatus;
2339 		if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
2340 			mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2341 		else
2342 			mpssas_set_ccbstatus(ccb, CAM_SCSI_STATUS_ERROR);
2343 
2344 		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2345 			int sense_len, returned_sense_len;
2346 
2347 			returned_sense_len = min(le32toh(rep->SenseCount),
2348 			    sizeof(struct scsi_sense_data));
2349 			if (returned_sense_len < ccb->csio.sense_len)
2350 				ccb->csio.sense_resid = ccb->csio.sense_len -
2351 					returned_sense_len;
2352 			else
2353 				ccb->csio.sense_resid = 0;
2354 
2355 			sense_len = min(returned_sense_len,
2356 			    ccb->csio.sense_len - ccb->csio.sense_resid);
2357 			bzero(&ccb->csio.sense_data,
2358 			      sizeof(ccb->csio.sense_data));
2359 			bcopy(cm->cm_sense, &ccb->csio.sense_data, sense_len);
2360 			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2361 		}
2362 
2363 		/*
2364 		 * Check if this is an INQUIRY command.  If it's a VPD inquiry,
2365 		 * and it's page code 0 (Supported Page List), and there is
2366 		 * inquiry data, and this is for a sequential access device, and
2367 		 * the device is an SSP target, and TLR is supported by the
2368 		 * controller, turn the TLR_bits value ON if page 0x90 is
2369 		 * supported.
2370 		 */
2371 		if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
2372 		    (csio->cdb_io.cdb_bytes[1] & SI_EVPD) &&
2373 		    (csio->cdb_io.cdb_bytes[2] == SVPD_SUPPORTED_PAGE_LIST) &&
2374 		    ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) &&
2375 		    (csio->data_ptr != NULL) &&
2376 		    ((csio->data_ptr[0] & 0x1f) == T_SEQUENTIAL) &&
2377 		    (sc->control_TLR) &&
2378 		    (sc->mapping_table[target_id].device_info &
2379 		    MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
2380 			vpd_list = (struct scsi_vpd_supported_page_list *)
2381 			    csio->data_ptr;
2382 			TLR_bits = &sc->mapping_table[target_id].TLR_bits;
2383 			*TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2384 			TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
2385 			alloc_len = ((u16)csio->cdb_io.cdb_bytes[3] << 8) +
2386 			    csio->cdb_io.cdb_bytes[4];
2387 			alloc_len -= csio->resid;
2388 			for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
2389 				if (vpd_list->list[i] == 0x90) {
2390 					*TLR_bits = TLR_on;
2391 					break;
2392 				}
2393 			}
2394 		}
2395 
2396 		/*
2397 		 * If this is a SATA direct-access end device, mark it so that
2398 		 * a SCSI StartStopUnit command will be sent to it when the
2399 		 * driver is being shutdown.
2400 		 */
2401 		if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
2402 		    ((csio->data_ptr[0] & 0x1f) == T_DIRECT) &&
2403 		    (sc->mapping_table[target_id].device_info &
2404 		    MPI2_SAS_DEVICE_INFO_SATA_DEVICE) &&
2405 		    ((sc->mapping_table[target_id].device_info &
2406 		    MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) ==
2407 		    MPI2_SAS_DEVICE_INFO_END_DEVICE)) {
2408 			target = &sassc->targets[target_id];
2409 			target->supports_SSU = TRUE;
2410 			mps_dprint(sc, MPS_XINFO, "Target %d supports SSU\n",
2411 			    target_id);
2412 		}
2413 		break;
2414 	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2415 	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2416 		/*
2417 		 * If devinfo is 0 this will be a volume.  In that case don't
2418 		 * tell CAM that the volume is not there.  We want volumes to
2419 		 * be enumerated until they are deleted/removed, not just
2420 		 * failed.
2421 		 */
2422 		if (cm->cm_targ->devinfo == 0)
2423 			mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2424 		else
2425 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2426 		break;
2427 	case MPI2_IOCSTATUS_INVALID_SGL:
2428 		mps_print_scsiio_cmd(sc, cm);
2429 		mpssas_set_ccbstatus(ccb, CAM_UNREC_HBA_ERROR);
2430 		break;
2431 	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2432 		/*
2433 		 * This is one of the responses that comes back when an I/O
2434 		 * has been aborted.  If it is because of a timeout that we
2435 		 * initiated, just set the status to CAM_CMD_TIMEOUT.
2436 		 * Otherwise set it to CAM_REQ_ABORTED.  The effect on the
2437 		 * command is the same (it gets retried, subject to the
2438 		 * retry counter), the only difference is what gets printed
2439 		 * on the console.
2440 		 */
2441 		if (cm->cm_state == MPS_CM_STATE_TIMEDOUT)
2442 			mpssas_set_ccbstatus(ccb, CAM_CMD_TIMEOUT);
2443 		else
2444 			mpssas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2445 		break;
2446 	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2447 		/* resid is ignored for this condition */
2448 		csio->resid = 0;
2449 		mpssas_set_ccbstatus(ccb, CAM_DATA_RUN_ERR);
2450 		break;
2451 	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2452 	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2453 		/*
2454 		 * These can sometimes be transient transport-related
2455 		 * errors, and sometimes persistent drive-related errors.
2456 		 * We used to retry these without decrementing the retry
2457 		 * count by returning CAM_REQUEUE_REQ.  Unfortunately, if
2458 		 * we hit a persistent drive problem that returns one of
2459 		 * these error codes, we would retry indefinitely.  So,
2460 		 * return CAM_REQ_CMP_ERROR so that we decrement the retry
2461 		 * count and avoid infinite retries.  We're taking the
2462 		 * potential risk of flagging false failures in the event
2463 		 * of a topology-related error (e.g. a SAS expander problem
2464 		 * causes a command addressed to a drive to fail), but
2465 		 * avoiding getting into an infinite retry loop.
2466 		 */
2467 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2468 		mpssas_log_command(cm, MPS_INFO,
2469 		    "terminated ioc %x loginfo %x scsi %x state %x xfer %u\n",
2470 		    le16toh(rep->IOCStatus), le32toh(rep->IOCLogInfo),
2471 		    rep->SCSIStatus, rep->SCSIState,
2472 		    le32toh(rep->TransferCount));
2473 		break;
2474 	case MPI2_IOCSTATUS_INVALID_FUNCTION:
2475 	case MPI2_IOCSTATUS_INTERNAL_ERROR:
2476 	case MPI2_IOCSTATUS_INVALID_VPID:
2477 	case MPI2_IOCSTATUS_INVALID_FIELD:
2478 	case MPI2_IOCSTATUS_INVALID_STATE:
2479 	case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2480 	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2481 	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2482 	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2483 	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2484 	default:
2485 		mpssas_log_command(cm, MPS_XINFO,
2486 		    "completed ioc %x loginfo %x scsi %x state %x xfer %u\n",
2487 		    le16toh(rep->IOCStatus), le32toh(rep->IOCLogInfo),
2488 		    rep->SCSIStatus, rep->SCSIState,
2489 		    le32toh(rep->TransferCount));
2490 		csio->resid = cm->cm_length;
2491 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2492 		break;
2493 	}
2494 
2495 	mps_sc_failed_io_info(sc,csio,rep);
2496 
2497 	if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2498 		ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2499 		sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2500 		mps_dprint(sc, MPS_XINFO, "Command completed, "
2501 		    "unfreezing SIM queue\n");
2502 	}
2503 
2504 	if (mpssas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2505 		ccb->ccb_h.status |= CAM_DEV_QFRZN;
2506 		xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2507 	}
2508 
2509 	mps_free_command(sc, cm);
2510 	xpt_done(ccb);
2511 }
2512 
2513 /* All Request reached here are Endian safe */
2514 static void
2515 mpssas_direct_drive_io(struct mpssas_softc *sassc, struct mps_command *cm,
2516     union ccb *ccb) {
2517 	pMpi2SCSIIORequest_t	pIO_req;
2518 	struct mps_softc	*sc = sassc->sc;
2519 	uint64_t		virtLBA;
2520 	uint32_t		physLBA, stripe_offset, stripe_unit;
2521 	uint32_t		io_size, column;
2522 	uint8_t			*ptrLBA, lba_idx, physLBA_byte, *CDB;
2523 
2524 	/*
2525 	 * If this is a valid SCSI command (Read6, Read10, Read16, Write6,
2526 	 * Write10, or Write16), build a direct I/O message.  Otherwise, the I/O
2527 	 * will be sent to the IR volume itself.  Since Read6 and Write6 are a
2528 	 * bit different than the 10/16 CDBs, handle them separately.
2529 	 */
2530 	pIO_req = (pMpi2SCSIIORequest_t)cm->cm_req;
2531 	CDB = pIO_req->CDB.CDB32;
2532 
2533 	/*
2534 	 * Handle 6 byte CDBs.
2535 	 */
2536 	if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_6) ||
2537 	    (CDB[0] == WRITE_6))) {
2538 		/*
2539 		 * Get the transfer size in blocks.
2540 		 */
2541 		io_size = (cm->cm_length >> sc->DD_block_exponent);
2542 
2543 		/*
2544 		 * Get virtual LBA given in the CDB.
2545 		 */
2546 		virtLBA = ((uint64_t)(CDB[1] & 0x1F) << 16) |
2547 		    ((uint64_t)CDB[2] << 8) | (uint64_t)CDB[3];
2548 
2549 		/*
2550 		 * Check that LBA range for I/O does not exceed volume's
2551 		 * MaxLBA.
2552 		 */
2553 		if ((virtLBA + (uint64_t)io_size - 1) <=
2554 		    sc->DD_max_lba) {
2555 			/*
2556 			 * Check if the I/O crosses a stripe boundary.  If not,
2557 			 * translate the virtual LBA to a physical LBA and set
2558 			 * the DevHandle for the PhysDisk to be used.  If it
2559 			 * does cross a boundary, do normal I/O.  To get the
2560 			 * right DevHandle to use, get the map number for the
2561 			 * column, then use that map number to look up the
2562 			 * DevHandle of the PhysDisk.
2563 			 */
2564 			stripe_offset = (uint32_t)virtLBA &
2565 			    (sc->DD_stripe_size - 1);
2566 			if ((stripe_offset + io_size) <= sc->DD_stripe_size) {
2567 				physLBA = (uint32_t)virtLBA >>
2568 				    sc->DD_stripe_exponent;
2569 				stripe_unit = physLBA / sc->DD_num_phys_disks;
2570 				column = physLBA % sc->DD_num_phys_disks;
2571 				pIO_req->DevHandle =
2572 				    htole16(sc->DD_column_map[column].dev_handle);
2573 				/* ???? Is this endian safe*/
2574 				cm->cm_desc.SCSIIO.DevHandle =
2575 				    pIO_req->DevHandle;
2576 
2577 				physLBA = (stripe_unit <<
2578 				    sc->DD_stripe_exponent) + stripe_offset;
2579 				ptrLBA = &pIO_req->CDB.CDB32[1];
2580 				physLBA_byte = (uint8_t)(physLBA >> 16);
2581 				*ptrLBA = physLBA_byte;
2582 				ptrLBA = &pIO_req->CDB.CDB32[2];
2583 				physLBA_byte = (uint8_t)(physLBA >> 8);
2584 				*ptrLBA = physLBA_byte;
2585 				ptrLBA = &pIO_req->CDB.CDB32[3];
2586 				physLBA_byte = (uint8_t)physLBA;
2587 				*ptrLBA = physLBA_byte;
2588 
2589 				/*
2590 				 * Set flag that Direct Drive I/O is
2591 				 * being done.
2592 				 */
2593 				cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2594 			}
2595 		}
2596 		return;
2597 	}
2598 
2599 	/*
2600 	 * Handle 10, 12 or 16 byte CDBs.
2601 	 */
2602 	if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_10) ||
2603 	    (CDB[0] == WRITE_10) || (CDB[0] == READ_16) ||
2604 	    (CDB[0] == WRITE_16) || (CDB[0] == READ_12) ||
2605 	    (CDB[0] == WRITE_12))) {
2606 		/*
2607 		 * For 16-byte CDB's, verify that the upper 4 bytes of the CDB
2608 		 * are 0.  If not, this is accessing beyond 2TB so handle it in
2609 		 * the else section.  10-byte and 12-byte CDB's are OK.
2610 		 * FreeBSD sends very rare 12 byte READ/WRITE, but driver is
2611 		 * ready to accept 12byte CDB for Direct IOs.
2612 		 */
2613 		if ((CDB[0] == READ_10 || CDB[0] == WRITE_10) ||
2614 		    (CDB[0] == READ_12 || CDB[0] == WRITE_12) ||
2615 		    !(CDB[2] | CDB[3] | CDB[4] | CDB[5])) {
2616 			/*
2617 			 * Get the transfer size in blocks.
2618 			 */
2619 			io_size = (cm->cm_length >> sc->DD_block_exponent);
2620 
2621 			/*
2622 			 * Get virtual LBA.  Point to correct lower 4 bytes of
2623 			 * LBA in the CDB depending on command.
2624 			 */
2625 			lba_idx = ((CDB[0] == READ_12) ||
2626 				(CDB[0] == WRITE_12) ||
2627 				(CDB[0] == READ_10) ||
2628 				(CDB[0] == WRITE_10))? 2 : 6;
2629 			virtLBA = ((uint64_t)CDB[lba_idx] << 24) |
2630 			    ((uint64_t)CDB[lba_idx + 1] << 16) |
2631 			    ((uint64_t)CDB[lba_idx + 2] << 8) |
2632 			    (uint64_t)CDB[lba_idx + 3];
2633 
2634 			/*
2635 			 * Check that LBA range for I/O does not exceed volume's
2636 			 * MaxLBA.
2637 			 */
2638 			if ((virtLBA + (uint64_t)io_size - 1) <=
2639 			    sc->DD_max_lba) {
2640 				/*
2641 				 * Check if the I/O crosses a stripe boundary.
2642 				 * If not, translate the virtual LBA to a
2643 				 * physical LBA and set the DevHandle for the
2644 				 * PhysDisk to be used.  If it does cross a
2645 				 * boundary, do normal I/O.  To get the right
2646 				 * DevHandle to use, get the map number for the
2647 				 * column, then use that map number to look up
2648 				 * the DevHandle of the PhysDisk.
2649 				 */
2650 				stripe_offset = (uint32_t)virtLBA &
2651 				    (sc->DD_stripe_size - 1);
2652 				if ((stripe_offset + io_size) <=
2653 				    sc->DD_stripe_size) {
2654 					physLBA = (uint32_t)virtLBA >>
2655 					    sc->DD_stripe_exponent;
2656 					stripe_unit = physLBA /
2657 					    sc->DD_num_phys_disks;
2658 					column = physLBA %
2659 					    sc->DD_num_phys_disks;
2660 					pIO_req->DevHandle =
2661 					    htole16(sc->DD_column_map[column].
2662 					    dev_handle);
2663 					cm->cm_desc.SCSIIO.DevHandle =
2664 					    pIO_req->DevHandle;
2665 
2666 					physLBA = (stripe_unit <<
2667 					    sc->DD_stripe_exponent) +
2668 					    stripe_offset;
2669 					ptrLBA =
2670 					    &pIO_req->CDB.CDB32[lba_idx];
2671 					physLBA_byte = (uint8_t)(physLBA >> 24);
2672 					*ptrLBA = physLBA_byte;
2673 					ptrLBA =
2674 					    &pIO_req->CDB.CDB32[lba_idx + 1];
2675 					physLBA_byte = (uint8_t)(physLBA >> 16);
2676 					*ptrLBA = physLBA_byte;
2677 					ptrLBA =
2678 					    &pIO_req->CDB.CDB32[lba_idx + 2];
2679 					physLBA_byte = (uint8_t)(physLBA >> 8);
2680 					*ptrLBA = physLBA_byte;
2681 					ptrLBA =
2682 					    &pIO_req->CDB.CDB32[lba_idx + 3];
2683 					physLBA_byte = (uint8_t)physLBA;
2684 					*ptrLBA = physLBA_byte;
2685 
2686 					/*
2687 					 * Set flag that Direct Drive I/O is
2688 					 * being done.
2689 					 */
2690 					cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2691 				}
2692 			}
2693 		} else {
2694 			/*
2695 			 * 16-byte CDB and the upper 4 bytes of the CDB are not
2696 			 * 0.  Get the transfer size in blocks.
2697 			 */
2698 			io_size = (cm->cm_length >> sc->DD_block_exponent);
2699 
2700 			/*
2701 			 * Get virtual LBA.
2702 			 */
2703 			virtLBA = ((uint64_t)CDB[2] << 54) |
2704 			    ((uint64_t)CDB[3] << 48) |
2705 			    ((uint64_t)CDB[4] << 40) |
2706 			    ((uint64_t)CDB[5] << 32) |
2707 			    ((uint64_t)CDB[6] << 24) |
2708 			    ((uint64_t)CDB[7] << 16) |
2709 			    ((uint64_t)CDB[8] << 8) |
2710 			    (uint64_t)CDB[9];
2711 
2712 			/*
2713 			 * Check that LBA range for I/O does not exceed volume's
2714 			 * MaxLBA.
2715 			 */
2716 			if ((virtLBA + (uint64_t)io_size - 1) <=
2717 			    sc->DD_max_lba) {
2718 				/*
2719 				 * Check if the I/O crosses a stripe boundary.
2720 				 * If not, translate the virtual LBA to a
2721 				 * physical LBA and set the DevHandle for the
2722 				 * PhysDisk to be used.  If it does cross a
2723 				 * boundary, do normal I/O.  To get the right
2724 				 * DevHandle to use, get the map number for the
2725 				 * column, then use that map number to look up
2726 				 * the DevHandle of the PhysDisk.
2727 				 */
2728 				stripe_offset = (uint32_t)virtLBA &
2729 				    (sc->DD_stripe_size - 1);
2730 				if ((stripe_offset + io_size) <=
2731 				    sc->DD_stripe_size) {
2732 					physLBA = (uint32_t)(virtLBA >>
2733 					    sc->DD_stripe_exponent);
2734 					stripe_unit = physLBA /
2735 					    sc->DD_num_phys_disks;
2736 					column = physLBA %
2737 					    sc->DD_num_phys_disks;
2738 					pIO_req->DevHandle =
2739 					    htole16(sc->DD_column_map[column].
2740 					    dev_handle);
2741 					cm->cm_desc.SCSIIO.DevHandle =
2742 					    pIO_req->DevHandle;
2743 
2744 					physLBA = (stripe_unit <<
2745 					    sc->DD_stripe_exponent) +
2746 					    stripe_offset;
2747 
2748 					/*
2749 					 * Set upper 4 bytes of LBA to 0.  We
2750 					 * assume that the phys disks are less
2751 					 * than 2 TB's in size.  Then, set the
2752 					 * lower 4 bytes.
2753 					 */
2754 					pIO_req->CDB.CDB32[2] = 0;
2755 					pIO_req->CDB.CDB32[3] = 0;
2756 					pIO_req->CDB.CDB32[4] = 0;
2757 					pIO_req->CDB.CDB32[5] = 0;
2758 					ptrLBA = &pIO_req->CDB.CDB32[6];
2759 					physLBA_byte = (uint8_t)(physLBA >> 24);
2760 					*ptrLBA = physLBA_byte;
2761 					ptrLBA = &pIO_req->CDB.CDB32[7];
2762 					physLBA_byte = (uint8_t)(physLBA >> 16);
2763 					*ptrLBA = physLBA_byte;
2764 					ptrLBA = &pIO_req->CDB.CDB32[8];
2765 					physLBA_byte = (uint8_t)(physLBA >> 8);
2766 					*ptrLBA = physLBA_byte;
2767 					ptrLBA = &pIO_req->CDB.CDB32[9];
2768 					physLBA_byte = (uint8_t)physLBA;
2769 					*ptrLBA = physLBA_byte;
2770 
2771 					/*
2772 					 * Set flag that Direct Drive I/O is
2773 					 * being done.
2774 					 */
2775 					cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2776 				}
2777 			}
2778 		}
2779 	}
2780 }
2781 
2782 #if __FreeBSD_version >= 900026
2783 static void
2784 mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm)
2785 {
2786 	MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2787 	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2788 	uint64_t sasaddr;
2789 	union ccb *ccb;
2790 
2791 	ccb = cm->cm_complete_data;
2792 
2793 	/*
2794 	 * Currently there should be no way we can hit this case.  It only
2795 	 * happens when we have a failure to allocate chain frames, and SMP
2796 	 * commands require two S/G elements only.  That should be handled
2797 	 * in the standard request size.
2798 	 */
2799 	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2800 		mps_dprint(sc, MPS_ERROR,"%s: cm_flags = %#x on SMP request!\n",
2801 			   __func__, cm->cm_flags);
2802 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2803 		goto bailout;
2804         }
2805 
2806 	rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
2807 	if (rpl == NULL) {
2808 		mps_dprint(sc, MPS_ERROR, "%s: NULL cm_reply!\n", __func__);
2809 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2810 		goto bailout;
2811 	}
2812 
2813 	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2814 	sasaddr = le32toh(req->SASAddress.Low);
2815 	sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
2816 
2817 	if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
2818 	    MPI2_IOCSTATUS_SUCCESS ||
2819 	    rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
2820 		mps_dprint(sc, MPS_XINFO, "%s: IOCStatus %04x SASStatus %02x\n",
2821 		    __func__, le16toh(rpl->IOCStatus), rpl->SASStatus);
2822 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2823 		goto bailout;
2824 	}
2825 
2826 	mps_dprint(sc, MPS_XINFO, "%s: SMP request to SAS address "
2827 		   "%#jx completed successfully\n", __func__,
2828 		   (uintmax_t)sasaddr);
2829 
2830 	if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
2831 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2832 	else
2833 		mpssas_set_ccbstatus(ccb, CAM_SMP_STATUS_ERROR);
2834 
2835 bailout:
2836 	/*
2837 	 * We sync in both directions because we had DMAs in the S/G list
2838 	 * in both directions.
2839 	 */
2840 	bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
2841 			BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2842 	bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2843 	mps_free_command(sc, cm);
2844 	xpt_done(ccb);
2845 }
2846 
2847 static void
2848 mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb, uint64_t sasaddr)
2849 {
2850 	struct mps_command *cm;
2851 	uint8_t *request, *response;
2852 	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2853 	struct mps_softc *sc;
2854 	int error;
2855 
2856 	sc = sassc->sc;
2857 	error = 0;
2858 
2859 	/*
2860 	 * XXX We don't yet support physical addresses here.
2861 	 */
2862 	switch ((ccb->ccb_h.flags & CAM_DATA_MASK)) {
2863 	case CAM_DATA_PADDR:
2864 	case CAM_DATA_SG_PADDR:
2865 		mps_dprint(sc, MPS_ERROR,
2866 			   "%s: physical addresses not supported\n", __func__);
2867 		mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2868 		xpt_done(ccb);
2869 		return;
2870 	case CAM_DATA_SG:
2871 		/*
2872 		 * The chip does not support more than one buffer for the
2873 		 * request or response.
2874 		 */
2875 	 	if ((ccb->smpio.smp_request_sglist_cnt > 1)
2876 		  || (ccb->smpio.smp_response_sglist_cnt > 1)) {
2877 			mps_dprint(sc, MPS_ERROR,
2878 				   "%s: multiple request or response "
2879 				   "buffer segments not supported for SMP\n",
2880 				   __func__);
2881 			mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2882 			xpt_done(ccb);
2883 			return;
2884 		}
2885 
2886 		/*
2887 		 * The CAM_SCATTER_VALID flag was originally implemented
2888 		 * for the XPT_SCSI_IO CCB, which only has one data pointer.
2889 		 * We have two.  So, just take that flag to mean that we
2890 		 * might have S/G lists, and look at the S/G segment count
2891 		 * to figure out whether that is the case for each individual
2892 		 * buffer.
2893 		 */
2894 		if (ccb->smpio.smp_request_sglist_cnt != 0) {
2895 			bus_dma_segment_t *req_sg;
2896 
2897 			req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
2898 			request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
2899 		} else
2900 			request = ccb->smpio.smp_request;
2901 
2902 		if (ccb->smpio.smp_response_sglist_cnt != 0) {
2903 			bus_dma_segment_t *rsp_sg;
2904 
2905 			rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
2906 			response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
2907 		} else
2908 			response = ccb->smpio.smp_response;
2909 		break;
2910 	case CAM_DATA_VADDR:
2911 		request = ccb->smpio.smp_request;
2912 		response = ccb->smpio.smp_response;
2913 		break;
2914 	default:
2915 		mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2916 		xpt_done(ccb);
2917 		return;
2918 	}
2919 
2920 	cm = mps_alloc_command(sc);
2921 	if (cm == NULL) {
2922 		mps_dprint(sc, MPS_ERROR,
2923 		    "%s: cannot allocate command\n", __func__);
2924 		mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
2925 		xpt_done(ccb);
2926 		return;
2927 	}
2928 
2929 	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2930 	bzero(req, sizeof(*req));
2931 	req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
2932 
2933 	/* Allow the chip to use any route to this SAS address. */
2934 	req->PhysicalPort = 0xff;
2935 
2936 	req->RequestDataLength = htole16(ccb->smpio.smp_request_len);
2937 	req->SGLFlags =
2938 	    MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
2939 
2940 	mps_dprint(sc, MPS_XINFO, "%s: sending SMP request to SAS "
2941 	    "address %#jx\n", __func__, (uintmax_t)sasaddr);
2942 
2943 	mpi_init_sge(cm, req, &req->SGL);
2944 
2945 	/*
2946 	 * Set up a uio to pass into mps_map_command().  This allows us to
2947 	 * do one map command, and one busdma call in there.
2948 	 */
2949 	cm->cm_uio.uio_iov = cm->cm_iovec;
2950 	cm->cm_uio.uio_iovcnt = 2;
2951 	cm->cm_uio.uio_segflg = UIO_SYSSPACE;
2952 
2953 	/*
2954 	 * The read/write flag isn't used by busdma, but set it just in
2955 	 * case.  This isn't exactly accurate, either, since we're going in
2956 	 * both directions.
2957 	 */
2958 	cm->cm_uio.uio_rw = UIO_WRITE;
2959 
2960 	cm->cm_iovec[0].iov_base = request;
2961 	cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength);
2962 	cm->cm_iovec[1].iov_base = response;
2963 	cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
2964 
2965 	cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
2966 			       cm->cm_iovec[1].iov_len;
2967 
2968 	/*
2969 	 * Trigger a warning message in mps_data_cb() for the user if we
2970 	 * wind up exceeding two S/G segments.  The chip expects one
2971 	 * segment for the request and another for the response.
2972 	 */
2973 	cm->cm_max_segs = 2;
2974 
2975 	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
2976 	cm->cm_complete = mpssas_smpio_complete;
2977 	cm->cm_complete_data = ccb;
2978 
2979 	/*
2980 	 * Tell the mapping code that we're using a uio, and that this is
2981 	 * an SMP passthrough request.  There is a little special-case
2982 	 * logic there (in mps_data_cb()) to handle the bidirectional
2983 	 * transfer.
2984 	 */
2985 	cm->cm_flags |= MPS_CM_FLAGS_USE_UIO | MPS_CM_FLAGS_SMP_PASS |
2986 			MPS_CM_FLAGS_DATAIN | MPS_CM_FLAGS_DATAOUT;
2987 
2988 	/* The chip data format is little endian. */
2989 	req->SASAddress.High = htole32(sasaddr >> 32);
2990 	req->SASAddress.Low = htole32(sasaddr);
2991 
2992 	/*
2993 	 * XXX Note that we don't have a timeout/abort mechanism here.
2994 	 * From the manual, it looks like task management requests only
2995 	 * work for SCSI IO and SATA passthrough requests.  We may need to
2996 	 * have a mechanism to retry requests in the event of a chip reset
2997 	 * at least.  Hopefully the chip will insure that any errors short
2998 	 * of that are relayed back to the driver.
2999 	 */
3000 	error = mps_map_command(sc, cm);
3001 	if ((error != 0) && (error != EINPROGRESS)) {
3002 		mps_dprint(sc, MPS_ERROR,
3003 			   "%s: error %d returned from mps_map_command()\n",
3004 			   __func__, error);
3005 		goto bailout_error;
3006 	}
3007 
3008 	return;
3009 
3010 bailout_error:
3011 	mps_free_command(sc, cm);
3012 	mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3013 	xpt_done(ccb);
3014 	return;
3015 
3016 }
3017 
3018 static void
3019 mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb)
3020 {
3021 	struct mps_softc *sc;
3022 	struct mpssas_target *targ;
3023 	uint64_t sasaddr = 0;
3024 
3025 	sc = sassc->sc;
3026 
3027 	/*
3028 	 * Make sure the target exists.
3029 	 */
3030 	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
3031 	    ("Target %d out of bounds in XPT_SMP_IO\n", ccb->ccb_h.target_id));
3032 	targ = &sassc->targets[ccb->ccb_h.target_id];
3033 	if (targ->handle == 0x0) {
3034 		mps_dprint(sc, MPS_ERROR,
3035 			   "%s: target %d does not exist!\n", __func__,
3036 			   ccb->ccb_h.target_id);
3037 		mpssas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
3038 		xpt_done(ccb);
3039 		return;
3040 	}
3041 
3042 	/*
3043 	 * If this device has an embedded SMP target, we'll talk to it
3044 	 * directly.
3045 	 * figure out what the expander's address is.
3046 	 */
3047 	if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
3048 		sasaddr = targ->sasaddr;
3049 
3050 	/*
3051 	 * If we don't have a SAS address for the expander yet, try
3052 	 * grabbing it from the page 0x83 information cached in the
3053 	 * transport layer for this target.  LSI expanders report the
3054 	 * expander SAS address as the port-associated SAS address in
3055 	 * Inquiry VPD page 0x83.  Maxim expanders don't report it in page
3056 	 * 0x83.
3057 	 *
3058 	 * XXX KDM disable this for now, but leave it commented out so that
3059 	 * it is obvious that this is another possible way to get the SAS
3060 	 * address.
3061 	 *
3062 	 * The parent handle method below is a little more reliable, and
3063 	 * the other benefit is that it works for devices other than SES
3064 	 * devices.  So you can send a SMP request to a da(4) device and it
3065 	 * will get routed to the expander that device is attached to.
3066 	 * (Assuming the da(4) device doesn't contain an SMP target...)
3067 	 */
3068 #if 0
3069 	if (sasaddr == 0)
3070 		sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
3071 #endif
3072 
3073 	/*
3074 	 * If we still don't have a SAS address for the expander, look for
3075 	 * the parent device of this device, which is probably the expander.
3076 	 */
3077 	if (sasaddr == 0) {
3078 #ifdef OLD_MPS_PROBE
3079 		struct mpssas_target *parent_target;
3080 #endif
3081 
3082 		if (targ->parent_handle == 0x0) {
3083 			mps_dprint(sc, MPS_ERROR,
3084 				   "%s: handle %d does not have a valid "
3085 				   "parent handle!\n", __func__, targ->handle);
3086 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3087 			goto bailout;
3088 		}
3089 #ifdef OLD_MPS_PROBE
3090 		parent_target = mpssas_find_target_by_handle(sassc, 0,
3091 			targ->parent_handle);
3092 
3093 		if (parent_target == NULL) {
3094 			mps_dprint(sc, MPS_ERROR,
3095 				   "%s: handle %d does not have a valid "
3096 				   "parent target!\n", __func__, targ->handle);
3097 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3098 			goto bailout;
3099 		}
3100 
3101 		if ((parent_target->devinfo &
3102 		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3103 			mps_dprint(sc, MPS_ERROR,
3104 				   "%s: handle %d parent %d does not "
3105 				   "have an SMP target!\n", __func__,
3106 				   targ->handle, parent_target->handle);
3107 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3108 			goto bailout;
3109 
3110 		}
3111 
3112 		sasaddr = parent_target->sasaddr;
3113 #else /* OLD_MPS_PROBE */
3114 		if ((targ->parent_devinfo &
3115 		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3116 			mps_dprint(sc, MPS_ERROR,
3117 				   "%s: handle %d parent %d does not "
3118 				   "have an SMP target!\n", __func__,
3119 				   targ->handle, targ->parent_handle);
3120 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3121 			goto bailout;
3122 
3123 		}
3124 		if (targ->parent_sasaddr == 0x0) {
3125 			mps_dprint(sc, MPS_ERROR,
3126 				   "%s: handle %d parent handle %d does "
3127 				   "not have a valid SAS address!\n",
3128 				   __func__, targ->handle, targ->parent_handle);
3129 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3130 			goto bailout;
3131 		}
3132 
3133 		sasaddr = targ->parent_sasaddr;
3134 #endif /* OLD_MPS_PROBE */
3135 
3136 	}
3137 
3138 	if (sasaddr == 0) {
3139 		mps_dprint(sc, MPS_INFO,
3140 			   "%s: unable to find SAS address for handle %d\n",
3141 			   __func__, targ->handle);
3142 		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3143 		goto bailout;
3144 	}
3145 	mpssas_send_smpcmd(sassc, ccb, sasaddr);
3146 
3147 	return;
3148 
3149 bailout:
3150 	xpt_done(ccb);
3151 
3152 }
3153 #endif //__FreeBSD_version >= 900026
3154 
3155 static void
3156 mpssas_action_resetdev(struct mpssas_softc *sassc, union ccb *ccb)
3157 {
3158 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3159 	struct mps_softc *sc;
3160 	struct mps_command *tm;
3161 	struct mpssas_target *targ;
3162 
3163 	MPS_FUNCTRACE(sassc->sc);
3164 	mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
3165 
3166 	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
3167 	    ("Target %d out of bounds in XPT_RESET_DEV\n",
3168 	     ccb->ccb_h.target_id));
3169 	sc = sassc->sc;
3170 	tm = mps_alloc_command(sc);
3171 	if (tm == NULL) {
3172 		mps_dprint(sc, MPS_ERROR,
3173 		    "command alloc failure in mpssas_action_resetdev\n");
3174 		mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3175 		xpt_done(ccb);
3176 		return;
3177 	}
3178 
3179 	targ = &sassc->targets[ccb->ccb_h.target_id];
3180 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3181 	req->DevHandle = htole16(targ->handle);
3182 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
3183 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3184 
3185 	/* SAS Hard Link Reset / SATA Link Reset */
3186 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3187 
3188 	tm->cm_data = NULL;
3189 	tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
3190 	tm->cm_complete = mpssas_resetdev_complete;
3191 	tm->cm_complete_data = ccb;
3192 	tm->cm_targ = targ;
3193 	targ->flags |= MPSSAS_TARGET_INRESET;
3194 
3195 	mps_map_command(sc, tm);
3196 }
3197 
3198 static void
3199 mpssas_resetdev_complete(struct mps_softc *sc, struct mps_command *tm)
3200 {
3201 	MPI2_SCSI_TASK_MANAGE_REPLY *resp;
3202 	union ccb *ccb;
3203 
3204 	MPS_FUNCTRACE(sc);
3205 	mtx_assert(&sc->mps_mtx, MA_OWNED);
3206 
3207 	resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
3208 	ccb = tm->cm_complete_data;
3209 
3210 	/*
3211 	 * Currently there should be no way we can hit this case.  It only
3212 	 * happens when we have a failure to allocate chain frames, and
3213 	 * task management commands don't have S/G lists.
3214 	 */
3215 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3216 		MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3217 
3218 		req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3219 
3220 		mps_dprint(sc, MPS_ERROR,
3221 			   "%s: cm_flags = %#x for reset of handle %#04x! "
3222 			   "This should not happen!\n", __func__, tm->cm_flags,
3223 			   req->DevHandle);
3224 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3225 		goto bailout;
3226 	}
3227 
3228 	mps_dprint(sc, MPS_XINFO,
3229 	    "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n", __func__,
3230 	    le16toh(resp->IOCStatus), le32toh(resp->ResponseCode));
3231 
3232 	if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
3233 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
3234 		mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
3235 		    CAM_LUN_WILDCARD);
3236 	}
3237 	else
3238 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3239 
3240 bailout:
3241 
3242 	mpssas_free_tm(sc, tm);
3243 	xpt_done(ccb);
3244 }
3245 
3246 static void
3247 mpssas_poll(struct cam_sim *sim)
3248 {
3249 	struct mpssas_softc *sassc;
3250 
3251 	sassc = cam_sim_softc(sim);
3252 
3253 	if (sassc->sc->mps_debug & MPS_TRACE) {
3254 		/* frequent debug messages during a panic just slow
3255 		 * everything down too much.
3256 		 */
3257 		mps_printf(sassc->sc, "%s clearing MPS_TRACE\n", __func__);
3258 		sassc->sc->mps_debug &= ~MPS_TRACE;
3259 	}
3260 
3261 	mps_intr_locked(sassc->sc);
3262 }
3263 
3264 static void
3265 mpssas_async(void *callback_arg, uint32_t code, struct cam_path *path,
3266 	     void *arg)
3267 {
3268 	struct mps_softc *sc;
3269 
3270 	sc = (struct mps_softc *)callback_arg;
3271 
3272 	switch (code) {
3273 #if (__FreeBSD_version >= 1000006) || \
3274     ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
3275 	case AC_ADVINFO_CHANGED: {
3276 		struct mpssas_target *target;
3277 		struct mpssas_softc *sassc;
3278 		struct scsi_read_capacity_data_long rcap_buf;
3279 		struct ccb_dev_advinfo cdai;
3280 		struct mpssas_lun *lun;
3281 		lun_id_t lunid;
3282 		int found_lun;
3283 		uintptr_t buftype;
3284 
3285 		buftype = (uintptr_t)arg;
3286 
3287 		found_lun = 0;
3288 		sassc = sc->sassc;
3289 
3290 		/*
3291 		 * We're only interested in read capacity data changes.
3292 		 */
3293 		if (buftype != CDAI_TYPE_RCAPLONG)
3294 			break;
3295 
3296 		/*
3297 		 * We should have a handle for this, but check to make sure.
3298 		 */
3299 		KASSERT(xpt_path_target_id(path) < sassc->maxtargets,
3300 		    ("Target %d out of bounds in mpssas_async\n",
3301 		    xpt_path_target_id(path)));
3302 		target = &sassc->targets[xpt_path_target_id(path)];
3303 		if (target->handle == 0)
3304 			break;
3305 
3306 		lunid = xpt_path_lun_id(path);
3307 
3308 		SLIST_FOREACH(lun, &target->luns, lun_link) {
3309 			if (lun->lun_id == lunid) {
3310 				found_lun = 1;
3311 				break;
3312 			}
3313 		}
3314 
3315 		if (found_lun == 0) {
3316 			lun = malloc(sizeof(struct mpssas_lun), M_MPT2,
3317 				     M_NOWAIT | M_ZERO);
3318 			if (lun == NULL) {
3319 				mps_dprint(sc, MPS_ERROR, "Unable to alloc "
3320 					   "LUN for EEDP support.\n");
3321 				break;
3322 			}
3323 			lun->lun_id = lunid;
3324 			SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3325 		}
3326 
3327 		bzero(&rcap_buf, sizeof(rcap_buf));
3328 		xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
3329 		cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
3330 		cdai.ccb_h.flags = CAM_DIR_IN;
3331 		cdai.buftype = CDAI_TYPE_RCAPLONG;
3332 #if (__FreeBSD_version >= 1100061) || \
3333     ((__FreeBSD_version >= 1001510) && (__FreeBSD_version < 1100000))
3334 		cdai.flags = CDAI_FLAG_NONE;
3335 #else
3336 		cdai.flags = 0;
3337 #endif
3338 		cdai.bufsiz = sizeof(rcap_buf);
3339 		cdai.buf = (uint8_t *)&rcap_buf;
3340 		xpt_action((union ccb *)&cdai);
3341 		if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
3342 			cam_release_devq(cdai.ccb_h.path,
3343 					 0, 0, 0, FALSE);
3344 
3345 		if ((mpssas_get_ccbstatus((union ccb *)&cdai) == CAM_REQ_CMP)
3346 		 && (rcap_buf.prot & SRC16_PROT_EN)) {
3347 			lun->eedp_formatted = TRUE;
3348 			lun->eedp_block_size = scsi_4btoul(rcap_buf.length);
3349 		} else {
3350 			lun->eedp_formatted = FALSE;
3351 			lun->eedp_block_size = 0;
3352 		}
3353 		break;
3354 	}
3355 #else
3356 	case AC_FOUND_DEVICE: {
3357 		struct ccb_getdev *cgd;
3358 
3359 		cgd = arg;
3360 		mpssas_check_eedp(sc, path, cgd);
3361 		break;
3362 	}
3363 #endif
3364 	default:
3365 		break;
3366 	}
3367 }
3368 
3369 #if (__FreeBSD_version < 901503) || \
3370     ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3371 static void
3372 mpssas_check_eedp(struct mps_softc *sc, struct cam_path *path,
3373 		  struct ccb_getdev *cgd)
3374 {
3375 	struct mpssas_softc *sassc = sc->sassc;
3376 	struct ccb_scsiio *csio;
3377 	struct scsi_read_capacity_16 *scsi_cmd;
3378 	struct scsi_read_capacity_eedp *rcap_buf;
3379 	path_id_t pathid;
3380 	target_id_t targetid;
3381 	lun_id_t lunid;
3382 	union ccb *ccb;
3383 	struct cam_path *local_path;
3384 	struct mpssas_target *target;
3385 	struct mpssas_lun *lun;
3386 	uint8_t	found_lun;
3387 	char path_str[64];
3388 
3389 	sassc = sc->sassc;
3390 	pathid = cam_sim_path(sassc->sim);
3391 	targetid = xpt_path_target_id(path);
3392 	lunid = xpt_path_lun_id(path);
3393 
3394 	KASSERT(targetid < sassc->maxtargets,
3395 	    ("Target %d out of bounds in mpssas_check_eedp\n",
3396 	     targetid));
3397 	target = &sassc->targets[targetid];
3398 	if (target->handle == 0x0)
3399 		return;
3400 
3401 	/*
3402 	 * Determine if the device is EEDP capable.
3403 	 *
3404 	 * If this flag is set in the inquiry data,
3405 	 * the device supports protection information,
3406 	 * and must support the 16 byte read
3407 	 * capacity command, otherwise continue without
3408 	 * sending read cap 16
3409 	 */
3410 	if ((cgd->inq_data.spc3_flags & SPC3_SID_PROTECT) == 0)
3411 		return;
3412 
3413 	/*
3414 	 * Issue a READ CAPACITY 16 command.  This info
3415 	 * is used to determine if the LUN is formatted
3416 	 * for EEDP support.
3417 	 */
3418 	ccb = xpt_alloc_ccb_nowait();
3419 	if (ccb == NULL) {
3420 		mps_dprint(sc, MPS_ERROR, "Unable to alloc CCB "
3421 		    "for EEDP support.\n");
3422 		return;
3423 	}
3424 
3425 	if (xpt_create_path(&local_path, xpt_periph,
3426 	    pathid, targetid, lunid) != CAM_REQ_CMP) {
3427 		mps_dprint(sc, MPS_ERROR, "Unable to create "
3428 		    "path for EEDP support\n");
3429 		xpt_free_ccb(ccb);
3430 		return;
3431 	}
3432 
3433 	/*
3434 	 * If LUN is already in list, don't create a new
3435 	 * one.
3436 	 */
3437 	found_lun = FALSE;
3438 	SLIST_FOREACH(lun, &target->luns, lun_link) {
3439 		if (lun->lun_id == lunid) {
3440 			found_lun = TRUE;
3441 			break;
3442 		}
3443 	}
3444 	if (!found_lun) {
3445 		lun = malloc(sizeof(struct mpssas_lun), M_MPT2,
3446 		    M_NOWAIT | M_ZERO);
3447 		if (lun == NULL) {
3448 			mps_dprint(sc, MPS_ERROR,
3449 			    "Unable to alloc LUN for EEDP support.\n");
3450 			xpt_free_path(local_path);
3451 			xpt_free_ccb(ccb);
3452 			return;
3453 		}
3454 		lun->lun_id = lunid;
3455 		SLIST_INSERT_HEAD(&target->luns, lun,
3456 		    lun_link);
3457 	}
3458 
3459 	xpt_path_string(local_path, path_str, sizeof(path_str));
3460 
3461 	mps_dprint(sc, MPS_INFO, "Sending read cap: path %s handle %d\n",
3462 	    path_str, target->handle);
3463 
3464 	/*
3465 	 * Issue a READ CAPACITY 16 command for the LUN.
3466 	 * The mpssas_read_cap_done function will load
3467 	 * the read cap info into the LUN struct.
3468 	 */
3469 	rcap_buf = malloc(sizeof(struct scsi_read_capacity_eedp),
3470 	    M_MPT2, M_NOWAIT | M_ZERO);
3471 	if (rcap_buf == NULL) {
3472 		mps_dprint(sc, MPS_FAULT,
3473 		    "Unable to alloc read capacity buffer for EEDP support.\n");
3474 		xpt_free_path(ccb->ccb_h.path);
3475 		xpt_free_ccb(ccb);
3476 		return;
3477 	}
3478 	xpt_setup_ccb(&ccb->ccb_h, local_path, CAM_PRIORITY_XPT);
3479 	csio = &ccb->csio;
3480 	csio->ccb_h.func_code = XPT_SCSI_IO;
3481 	csio->ccb_h.flags = CAM_DIR_IN;
3482 	csio->ccb_h.retry_count = 4;
3483 	csio->ccb_h.cbfcnp = mpssas_read_cap_done;
3484 	csio->ccb_h.timeout = 60000;
3485 	csio->data_ptr = (uint8_t *)rcap_buf;
3486 	csio->dxfer_len = sizeof(struct scsi_read_capacity_eedp);
3487 	csio->sense_len = MPS_SENSE_LEN;
3488 	csio->cdb_len = sizeof(*scsi_cmd);
3489 	csio->tag_action = MSG_SIMPLE_Q_TAG;
3490 
3491 	scsi_cmd = (struct scsi_read_capacity_16 *)&csio->cdb_io.cdb_bytes;
3492 	bzero(scsi_cmd, sizeof(*scsi_cmd));
3493 	scsi_cmd->opcode = 0x9E;
3494 	scsi_cmd->service_action = SRC16_SERVICE_ACTION;
3495 	((uint8_t *)scsi_cmd)[13] = sizeof(struct scsi_read_capacity_eedp);
3496 
3497 	ccb->ccb_h.ppriv_ptr1 = sassc;
3498 	xpt_action(ccb);
3499 }
3500 
3501 static void
3502 mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb)
3503 {
3504 	struct mpssas_softc *sassc;
3505 	struct mpssas_target *target;
3506 	struct mpssas_lun *lun;
3507 	struct scsi_read_capacity_eedp *rcap_buf;
3508 
3509 	if (done_ccb == NULL)
3510 		return;
3511 
3512 	/* Driver need to release devq, it Scsi command is
3513 	 * generated by driver internally.
3514 	 * Currently there is a single place where driver
3515 	 * calls scsi command internally. In future if driver
3516 	 * calls more scsi command internally, it needs to release
3517 	 * devq internally, since those command will not go back to
3518 	 * cam_periph.
3519 	 */
3520 	if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) ) {
3521         	done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
3522 		xpt_release_devq(done_ccb->ccb_h.path,
3523 			       	/*count*/ 1, /*run_queue*/TRUE);
3524 	}
3525 
3526 	rcap_buf = (struct scsi_read_capacity_eedp *)done_ccb->csio.data_ptr;
3527 
3528 	/*
3529 	 * Get the LUN ID for the path and look it up in the LUN list for the
3530 	 * target.
3531 	 */
3532 	sassc = (struct mpssas_softc *)done_ccb->ccb_h.ppriv_ptr1;
3533 	KASSERT(done_ccb->ccb_h.target_id < sassc->maxtargets,
3534 	    ("Target %d out of bounds in mpssas_read_cap_done\n",
3535 	     done_ccb->ccb_h.target_id));
3536 	target = &sassc->targets[done_ccb->ccb_h.target_id];
3537 	SLIST_FOREACH(lun, &target->luns, lun_link) {
3538 		if (lun->lun_id != done_ccb->ccb_h.target_lun)
3539 			continue;
3540 
3541 		/*
3542 		 * Got the LUN in the target's LUN list.  Fill it in
3543 		 * with EEDP info.  If the READ CAP 16 command had some
3544 		 * SCSI error (common if command is not supported), mark
3545 		 * the lun as not supporting EEDP and set the block size
3546 		 * to 0.
3547 		 */
3548 		if ((mpssas_get_ccbstatus(done_ccb) != CAM_REQ_CMP)
3549 		 || (done_ccb->csio.scsi_status != SCSI_STATUS_OK)) {
3550 			lun->eedp_formatted = FALSE;
3551 			lun->eedp_block_size = 0;
3552 			break;
3553 		}
3554 
3555 		if (rcap_buf->protect & 0x01) {
3556 			mps_dprint(sassc->sc, MPS_INFO, "LUN %d for "
3557  			    "target ID %d is formatted for EEDP "
3558  			    "support.\n", done_ccb->ccb_h.target_lun,
3559  			    done_ccb->ccb_h.target_id);
3560 			lun->eedp_formatted = TRUE;
3561 			lun->eedp_block_size = scsi_4btoul(rcap_buf->length);
3562 		}
3563 		break;
3564 	}
3565 
3566 	// Finished with this CCB and path.
3567 	free(rcap_buf, M_MPT2);
3568 	xpt_free_path(done_ccb->ccb_h.path);
3569 	xpt_free_ccb(done_ccb);
3570 }
3571 #endif /* (__FreeBSD_version < 901503) || \
3572           ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) */
3573 
3574 void
3575 mpssas_prepare_for_tm(struct mps_softc *sc, struct mps_command *tm,
3576     struct mpssas_target *target, lun_id_t lun_id)
3577 {
3578 	union ccb *ccb;
3579 	path_id_t path_id;
3580 
3581 	/*
3582 	 * Set the INRESET flag for this target so that no I/O will be sent to
3583 	 * the target until the reset has completed.  If an I/O request does
3584 	 * happen, the devq will be frozen.  The CCB holds the path which is
3585 	 * used to release the devq.  The devq is released and the CCB is freed
3586 	 * when the TM completes.
3587 	 */
3588 	ccb = xpt_alloc_ccb_nowait();
3589 	if (ccb) {
3590 		path_id = cam_sim_path(sc->sassc->sim);
3591 		if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, path_id,
3592 		    target->tid, lun_id) != CAM_REQ_CMP) {
3593 			xpt_free_ccb(ccb);
3594 		} else {
3595 			tm->cm_ccb = ccb;
3596 			tm->cm_targ = target;
3597 			target->flags |= MPSSAS_TARGET_INRESET;
3598 		}
3599 	}
3600 }
3601 
3602 int
3603 mpssas_startup(struct mps_softc *sc)
3604 {
3605 
3606 	/*
3607 	 * Send the port enable message and set the wait_for_port_enable flag.
3608 	 * This flag helps to keep the simq frozen until all discovery events
3609 	 * are processed.
3610 	 */
3611 	sc->wait_for_port_enable = 1;
3612 	mpssas_send_portenable(sc);
3613 	return (0);
3614 }
3615 
3616 static int
3617 mpssas_send_portenable(struct mps_softc *sc)
3618 {
3619 	MPI2_PORT_ENABLE_REQUEST *request;
3620 	struct mps_command *cm;
3621 
3622 	MPS_FUNCTRACE(sc);
3623 
3624 	if ((cm = mps_alloc_command(sc)) == NULL)
3625 		return (EBUSY);
3626 	request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3627 	request->Function = MPI2_FUNCTION_PORT_ENABLE;
3628 	request->MsgFlags = 0;
3629 	request->VP_ID = 0;
3630 	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3631 	cm->cm_complete = mpssas_portenable_complete;
3632 	cm->cm_data = NULL;
3633 	cm->cm_sge = NULL;
3634 
3635 	mps_map_command(sc, cm);
3636 	mps_dprint(sc, MPS_XINFO,
3637 	    "mps_send_portenable finished cm %p req %p complete %p\n",
3638 	    cm, cm->cm_req, cm->cm_complete);
3639 	return (0);
3640 }
3641 
3642 static void
3643 mpssas_portenable_complete(struct mps_softc *sc, struct mps_command *cm)
3644 {
3645 	MPI2_PORT_ENABLE_REPLY *reply;
3646 	struct mpssas_softc *sassc;
3647 
3648 	MPS_FUNCTRACE(sc);
3649 	sassc = sc->sassc;
3650 
3651 	/*
3652 	 * Currently there should be no way we can hit this case.  It only
3653 	 * happens when we have a failure to allocate chain frames, and
3654 	 * port enable commands don't have S/G lists.
3655 	 */
3656 	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3657 		mps_dprint(sc, MPS_ERROR, "%s: cm_flags = %#x for port enable! "
3658 			   "This should not happen!\n", __func__, cm->cm_flags);
3659 	}
3660 
3661 	reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3662 	if (reply == NULL)
3663 		mps_dprint(sc, MPS_FAULT, "Portenable NULL reply\n");
3664 	else if (le16toh(reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
3665 	    MPI2_IOCSTATUS_SUCCESS)
3666 		mps_dprint(sc, MPS_FAULT, "Portenable failed\n");
3667 
3668 	mps_free_command(sc, cm);
3669 	if (sc->mps_ich.ich_arg != NULL) {
3670 		mps_dprint(sc, MPS_XINFO, "disestablish config intrhook\n");
3671 		config_intrhook_disestablish(&sc->mps_ich);
3672 		sc->mps_ich.ich_arg = NULL;
3673 	}
3674 
3675 	/*
3676 	 * Get WarpDrive info after discovery is complete but before the scan
3677 	 * starts.  At this point, all devices are ready to be exposed to the
3678 	 * OS.  If devices should be hidden instead, take them out of the
3679 	 * 'targets' array before the scan.  The devinfo for a disk will have
3680 	 * some info and a volume's will be 0.  Use that to remove disks.
3681 	 */
3682 	mps_wd_config_pages(sc);
3683 
3684 	/*
3685 	 * Done waiting for port enable to complete.  Decrement the refcount.
3686 	 * If refcount is 0, discovery is complete and a rescan of the bus can
3687 	 * take place.  Since the simq was explicitly frozen before port
3688 	 * enable, it must be explicitly released here to keep the
3689 	 * freeze/release count in sync.
3690 	 */
3691 	sc->wait_for_port_enable = 0;
3692 	sc->port_enable_complete = 1;
3693 	wakeup(&sc->port_enable_complete);
3694 	mpssas_startup_decrement(sassc);
3695 }
3696 
3697 int
3698 mpssas_check_id(struct mpssas_softc *sassc, int id)
3699 {
3700 	struct mps_softc *sc = sassc->sc;
3701 	char *ids;
3702 	char *name;
3703 
3704 	ids = &sc->exclude_ids[0];
3705 	while((name = strsep(&ids, ",")) != NULL) {
3706 		if (name[0] == '\0')
3707 			continue;
3708 		if (strtol(name, NULL, 0) == (long)id)
3709 			return (1);
3710 	}
3711 
3712 	return (0);
3713 }
3714 
3715 void
3716 mpssas_realloc_targets(struct mps_softc *sc, int maxtargets)
3717 {
3718 	struct mpssas_softc *sassc;
3719 	struct mpssas_lun *lun, *lun_tmp;
3720 	struct mpssas_target *targ;
3721 	int i;
3722 
3723 	sassc = sc->sassc;
3724 	/*
3725 	 * The number of targets is based on IOC Facts, so free all of
3726 	 * the allocated LUNs for each target and then the target buffer
3727 	 * itself.
3728 	 */
3729 	for (i=0; i< maxtargets; i++) {
3730 		targ = &sassc->targets[i];
3731 		SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
3732 			free(lun, M_MPT2);
3733 		}
3734 	}
3735 	free(sassc->targets, M_MPT2);
3736 
3737 	sassc->targets = malloc(sizeof(struct mpssas_target) * maxtargets,
3738 	    M_MPT2, M_WAITOK|M_ZERO);
3739 	if (!sassc->targets) {
3740 		panic("%s failed to alloc targets with error %d\n",
3741 		    __func__, ENOMEM);
3742 	}
3743 }
3744