xref: /freebsd/sys/dev/mps/mps_sas.c (revision f37852c17391fdf0e8309bcf684384dd0d854e43)
1 /*-
2  * Copyright (c) 2009 Yahoo! Inc.
3  * Copyright (c) 2011-2015 LSI Corp.
4  * Copyright (c) 2013-2015 Avago Technologies
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  * Avago Technologies (LSI) MPT-Fusion Host Adapter FreeBSD
29  *
30  * $FreeBSD$
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 /* Communications core for Avago Technologies (LSI) MPT2 */
37 
38 /* TODO Move headers to mpsvar */
39 #include <sys/types.h>
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kernel.h>
43 #include <sys/selinfo.h>
44 #include <sys/module.h>
45 #include <sys/bus.h>
46 #include <sys/conf.h>
47 #include <sys/bio.h>
48 #include <sys/malloc.h>
49 #include <sys/uio.h>
50 #include <sys/sysctl.h>
51 #include <sys/endian.h>
52 #include <sys/queue.h>
53 #include <sys/kthread.h>
54 #include <sys/taskqueue.h>
55 #include <sys/sbuf.h>
56 
57 #include <machine/bus.h>
58 #include <machine/resource.h>
59 #include <sys/rman.h>
60 
61 #include <machine/stdarg.h>
62 
63 #include <cam/cam.h>
64 #include <cam/cam_ccb.h>
65 #include <cam/cam_xpt.h>
66 #include <cam/cam_debug.h>
67 #include <cam/cam_sim.h>
68 #include <cam/cam_xpt_sim.h>
69 #include <cam/cam_xpt_periph.h>
70 #include <cam/cam_periph.h>
71 #include <cam/scsi/scsi_all.h>
72 #include <cam/scsi/scsi_message.h>
73 #if __FreeBSD_version >= 900026
74 #include <cam/scsi/smp_all.h>
75 #endif
76 
77 #include <dev/mps/mpi/mpi2_type.h>
78 #include <dev/mps/mpi/mpi2.h>
79 #include <dev/mps/mpi/mpi2_ioc.h>
80 #include <dev/mps/mpi/mpi2_sas.h>
81 #include <dev/mps/mpi/mpi2_cnfg.h>
82 #include <dev/mps/mpi/mpi2_init.h>
83 #include <dev/mps/mpi/mpi2_tool.h>
84 #include <dev/mps/mps_ioctl.h>
85 #include <dev/mps/mpsvar.h>
86 #include <dev/mps/mps_table.h>
87 #include <dev/mps/mps_sas.h>
88 
89 #define MPSSAS_DISCOVERY_TIMEOUT	20
90 #define MPSSAS_MAX_DISCOVERY_TIMEOUTS	10 /* 200 seconds */
91 
92 /*
93  * static array to check SCSI OpCode for EEDP protection bits
94  */
95 #define	PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
96 #define	PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
97 #define	PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
98 static uint8_t op_code_prot[256] = {
99 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
100 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
101 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
102 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
103 	0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
104 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
105 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
106 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
107 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
108 	0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
109 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
110 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
111 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
112 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
113 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
114 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
115 };
116 
117 MALLOC_DEFINE(M_MPSSAS, "MPSSAS", "MPS SAS memory");
118 
119 static void mpssas_remove_device(struct mps_softc *, struct mps_command *);
120 static void mpssas_remove_complete(struct mps_softc *, struct mps_command *);
121 static void mpssas_action(struct cam_sim *sim, union ccb *ccb);
122 static void mpssas_poll(struct cam_sim *sim);
123 static int mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm,
124     struct mps_command *cm);
125 static void mpssas_scsiio_timeout(void *data);
126 static void mpssas_abort_complete(struct mps_softc *sc, struct mps_command *cm);
127 static void mpssas_direct_drive_io(struct mpssas_softc *sassc,
128     struct mps_command *cm, union ccb *ccb);
129 static void mpssas_action_scsiio(struct mpssas_softc *, union ccb *);
130 static void mpssas_scsiio_complete(struct mps_softc *, struct mps_command *);
131 static void mpssas_action_resetdev(struct mpssas_softc *, union ccb *);
132 #if __FreeBSD_version >= 900026
133 static void mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm);
134 static void mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb,
135 			       uint64_t sasaddr);
136 static void mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb);
137 #endif //FreeBSD_version >= 900026
138 static void mpssas_resetdev_complete(struct mps_softc *, struct mps_command *);
139 static void mpssas_async(void *callback_arg, uint32_t code,
140 			 struct cam_path *path, void *arg);
141 #if (__FreeBSD_version < 901503) || \
142     ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
143 static void mpssas_check_eedp(struct mps_softc *sc, struct cam_path *path,
144 			      struct ccb_getdev *cgd);
145 static void mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb);
146 #endif
147 static int mpssas_send_portenable(struct mps_softc *sc);
148 static void mpssas_portenable_complete(struct mps_softc *sc,
149     struct mps_command *cm);
150 
151 struct mpssas_target *
152 mpssas_find_target_by_handle(struct mpssas_softc *sassc, int start, uint16_t handle)
153 {
154 	struct mpssas_target *target;
155 	int i;
156 
157 	for (i = start; i < sassc->maxtargets; i++) {
158 		target = &sassc->targets[i];
159 		if (target->handle == handle)
160 			return (target);
161 	}
162 
163 	return (NULL);
164 }
165 
166 /* we need to freeze the simq during attach and diag reset, to avoid failing
167  * commands before device handles have been found by discovery.  Since
168  * discovery involves reading config pages and possibly sending commands,
169  * discovery actions may continue even after we receive the end of discovery
170  * event, so refcount discovery actions instead of assuming we can unfreeze
171  * the simq when we get the event.
172  */
173 void
174 mpssas_startup_increment(struct mpssas_softc *sassc)
175 {
176 	MPS_FUNCTRACE(sassc->sc);
177 
178 	if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
179 		if (sassc->startup_refcount++ == 0) {
180 			/* just starting, freeze the simq */
181 			mps_dprint(sassc->sc, MPS_INIT,
182 			    "%s freezing simq\n", __func__);
183 #if __FreeBSD_version >= 1000039
184 			xpt_hold_boot();
185 #endif
186 			xpt_freeze_simq(sassc->sim, 1);
187 		}
188 		mps_dprint(sassc->sc, MPS_INIT, "%s refcount %u\n", __func__,
189 		    sassc->startup_refcount);
190 	}
191 }
192 
193 void
194 mpssas_release_simq_reinit(struct mpssas_softc *sassc)
195 {
196 	if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
197 		sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
198 		xpt_release_simq(sassc->sim, 1);
199 		mps_dprint(sassc->sc, MPS_INFO, "Unfreezing SIM queue\n");
200 	}
201 }
202 
203 void
204 mpssas_startup_decrement(struct mpssas_softc *sassc)
205 {
206 	MPS_FUNCTRACE(sassc->sc);
207 
208 	if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
209 		if (--sassc->startup_refcount == 0) {
210 			/* finished all discovery-related actions, release
211 			 * the simq and rescan for the latest topology.
212 			 */
213 			mps_dprint(sassc->sc, MPS_INIT,
214 			    "%s releasing simq\n", __func__);
215 			sassc->flags &= ~MPSSAS_IN_STARTUP;
216 			xpt_release_simq(sassc->sim, 1);
217 #if __FreeBSD_version >= 1000039
218 			xpt_release_boot();
219 #else
220 			mpssas_rescan_target(sassc->sc, NULL);
221 #endif
222 		}
223 		mps_dprint(sassc->sc, MPS_INIT, "%s refcount %u\n", __func__,
224 		    sassc->startup_refcount);
225 	}
226 }
227 
228 /* The firmware requires us to stop sending commands when we're doing task
229  * management, so refcount the TMs and keep the simq frozen when any are in
230  * use.
231  */
232 struct mps_command *
233 mpssas_alloc_tm(struct mps_softc *sc)
234 {
235 	struct mps_command *tm;
236 
237 	tm = mps_alloc_high_priority_command(sc);
238 	return tm;
239 }
240 
241 void
242 mpssas_free_tm(struct mps_softc *sc, struct mps_command *tm)
243 {
244 	int target_id = 0xFFFFFFFF;
245 
246 	if (tm == NULL)
247 		return;
248 
249 	/*
250 	 * For TM's the devq is frozen for the device.  Unfreeze it here and
251 	 * free the resources used for freezing the devq.  Must clear the
252 	 * INRESET flag as well or scsi I/O will not work.
253 	 */
254 	if (tm->cm_targ != NULL) {
255 		tm->cm_targ->flags &= ~MPSSAS_TARGET_INRESET;
256 		target_id = tm->cm_targ->tid;
257 	}
258 	if (tm->cm_ccb) {
259 		mps_dprint(sc, MPS_INFO, "Unfreezing devq for target ID %d\n",
260 		    target_id);
261 		xpt_release_devq(tm->cm_ccb->ccb_h.path, 1, TRUE);
262 		xpt_free_path(tm->cm_ccb->ccb_h.path);
263 		xpt_free_ccb(tm->cm_ccb);
264 	}
265 
266 	mps_free_high_priority_command(sc, tm);
267 }
268 
269 void
270 mpssas_rescan_target(struct mps_softc *sc, struct mpssas_target *targ)
271 {
272 	struct mpssas_softc *sassc = sc->sassc;
273 	path_id_t pathid;
274 	target_id_t targetid;
275 	union ccb *ccb;
276 
277 	MPS_FUNCTRACE(sc);
278 	pathid = cam_sim_path(sassc->sim);
279 	if (targ == NULL)
280 		targetid = CAM_TARGET_WILDCARD;
281 	else
282 		targetid = targ - sassc->targets;
283 
284 	/*
285 	 * Allocate a CCB and schedule a rescan.
286 	 */
287 	ccb = xpt_alloc_ccb_nowait();
288 	if (ccb == NULL) {
289 		mps_dprint(sc, MPS_ERROR, "unable to alloc CCB for rescan\n");
290 		return;
291 	}
292 
293 	if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid,
294 	    targetid, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
295 		mps_dprint(sc, MPS_ERROR, "unable to create path for rescan\n");
296 		xpt_free_ccb(ccb);
297 		return;
298 	}
299 
300 	if (targetid == CAM_TARGET_WILDCARD)
301 		ccb->ccb_h.func_code = XPT_SCAN_BUS;
302 	else
303 		ccb->ccb_h.func_code = XPT_SCAN_TGT;
304 
305 	mps_dprint(sc, MPS_TRACE, "%s targetid %u\n", __func__, targetid);
306 	xpt_rescan(ccb);
307 }
308 
309 static void
310 mpssas_log_command(struct mps_command *cm, u_int level, const char *fmt, ...)
311 {
312 	struct sbuf sb;
313 	va_list ap;
314 	char str[192];
315 	char path_str[64];
316 
317 	if (cm == NULL)
318 		return;
319 
320 	/* No need to be in here if debugging isn't enabled */
321 	if ((cm->cm_sc->mps_debug & level) == 0)
322 		return;
323 
324 	sbuf_new(&sb, str, sizeof(str), 0);
325 
326 	va_start(ap, fmt);
327 
328 	if (cm->cm_ccb != NULL) {
329 		xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
330 				sizeof(path_str));
331 		sbuf_cat(&sb, path_str);
332 		if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
333 			scsi_command_string(&cm->cm_ccb->csio, &sb);
334 			sbuf_printf(&sb, "length %d ",
335 				    cm->cm_ccb->csio.dxfer_len);
336 		}
337 	}
338 	else {
339 		sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
340 		    cam_sim_name(cm->cm_sc->sassc->sim),
341 		    cam_sim_unit(cm->cm_sc->sassc->sim),
342 		    cam_sim_bus(cm->cm_sc->sassc->sim),
343 		    cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
344 		    cm->cm_lun);
345 	}
346 
347 	sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
348 	sbuf_vprintf(&sb, fmt, ap);
349 	sbuf_finish(&sb);
350 	mps_print_field(cm->cm_sc, "%s", sbuf_data(&sb));
351 
352 	va_end(ap);
353 }
354 
355 
356 static void
357 mpssas_remove_volume(struct mps_softc *sc, struct mps_command *tm)
358 {
359 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
360 	struct mpssas_target *targ;
361 	uint16_t handle;
362 
363 	MPS_FUNCTRACE(sc);
364 
365 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
366 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
367 	targ = tm->cm_targ;
368 
369 	if (reply == NULL) {
370 		/* XXX retry the remove after the diag reset completes? */
371 		mps_dprint(sc, MPS_FAULT,
372 		    "%s NULL reply resetting device 0x%04x\n", __func__,
373 		    handle);
374 		mpssas_free_tm(sc, tm);
375 		return;
376 	}
377 
378 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
379 	    MPI2_IOCSTATUS_SUCCESS) {
380 		mps_dprint(sc, MPS_ERROR,
381 		   "IOCStatus = 0x%x while resetting device 0x%x\n",
382 		   le16toh(reply->IOCStatus), handle);
383 	}
384 
385 	mps_dprint(sc, MPS_XINFO,
386 	    "Reset aborted %u commands\n", reply->TerminationCount);
387 	mps_free_reply(sc, tm->cm_reply_data);
388 	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
389 
390 	mps_dprint(sc, MPS_XINFO,
391 	    "clearing target %u handle 0x%04x\n", targ->tid, handle);
392 
393 	/*
394 	 * Don't clear target if remove fails because things will get confusing.
395 	 * Leave the devname and sasaddr intact so that we know to avoid reusing
396 	 * this target id if possible, and so we can assign the same target id
397 	 * to this device if it comes back in the future.
398 	 */
399 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
400 	    MPI2_IOCSTATUS_SUCCESS) {
401 		targ = tm->cm_targ;
402 		targ->handle = 0x0;
403 		targ->encl_handle = 0x0;
404 		targ->encl_slot = 0x0;
405 		targ->exp_dev_handle = 0x0;
406 		targ->phy_num = 0x0;
407 		targ->linkrate = 0x0;
408 		targ->devinfo = 0x0;
409 		targ->flags = 0x0;
410 	}
411 
412 	mpssas_free_tm(sc, tm);
413 }
414 
415 
416 /*
417  * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal.
418  * Otherwise Volume Delete is same as Bare Drive Removal.
419  */
420 void
421 mpssas_prepare_volume_remove(struct mpssas_softc *sassc, uint16_t handle)
422 {
423 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
424 	struct mps_softc *sc;
425 	struct mps_command *cm;
426 	struct mpssas_target *targ = NULL;
427 
428 	MPS_FUNCTRACE(sassc->sc);
429 	sc = sassc->sc;
430 
431 #ifdef WD_SUPPORT
432 	/*
433 	 * If this is a WD controller, determine if the disk should be exposed
434 	 * to the OS or not.  If disk should be exposed, return from this
435 	 * function without doing anything.
436 	 */
437 	if (sc->WD_available && (sc->WD_hide_expose ==
438 	    MPS_WD_EXPOSE_ALWAYS)) {
439 		return;
440 	}
441 #endif //WD_SUPPORT
442 
443 	targ = mpssas_find_target_by_handle(sassc, 0, handle);
444 	if (targ == NULL) {
445 		/* FIXME: what is the action? */
446 		/* We don't know about this device? */
447 		mps_dprint(sc, MPS_ERROR,
448 		   "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
449 		return;
450 	}
451 
452 	targ->flags |= MPSSAS_TARGET_INREMOVAL;
453 
454 	cm = mpssas_alloc_tm(sc);
455 	if (cm == NULL) {
456 		mps_dprint(sc, MPS_ERROR,
457 		    "%s: command alloc failure\n", __func__);
458 		return;
459 	}
460 
461 	mpssas_rescan_target(sc, targ);
462 
463 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
464 	req->DevHandle = targ->handle;
465 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
466 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
467 
468 	/* SAS Hard Link Reset / SATA Link Reset */
469 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
470 
471 	cm->cm_targ = targ;
472 	cm->cm_data = NULL;
473 	cm->cm_desc.HighPriority.RequestFlags =
474 	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
475 	cm->cm_complete = mpssas_remove_volume;
476 	cm->cm_complete_data = (void *)(uintptr_t)handle;
477 
478 	mps_dprint(sc, MPS_INFO, "%s: Sending reset for target ID %d\n",
479 	    __func__, targ->tid);
480 	mpssas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
481 
482 	mps_map_command(sc, cm);
483 }
484 
485 /*
486  * The MPT2 firmware performs debounce on the link to avoid transient link
487  * errors and false removals.  When it does decide that link has been lost
488  * and a device need to go away, it expects that the host will perform a
489  * target reset and then an op remove.  The reset has the side-effect of
490  * aborting any outstanding requests for the device, which is required for
491  * the op-remove to succeed.  It's not clear if the host should check for
492  * the device coming back alive after the reset.
493  */
494 void
495 mpssas_prepare_remove(struct mpssas_softc *sassc, uint16_t handle)
496 {
497 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
498 	struct mps_softc *sc;
499 	struct mps_command *cm;
500 	struct mpssas_target *targ = NULL;
501 
502 	MPS_FUNCTRACE(sassc->sc);
503 
504 	sc = sassc->sc;
505 
506 	targ = mpssas_find_target_by_handle(sassc, 0, handle);
507 	if (targ == NULL) {
508 		/* FIXME: what is the action? */
509 		/* We don't know about this device? */
510 		mps_dprint(sc, MPS_ERROR,
511 		    "%s : invalid handle 0x%x \n", __func__, handle);
512 		return;
513 	}
514 
515 	targ->flags |= MPSSAS_TARGET_INREMOVAL;
516 
517 	cm = mpssas_alloc_tm(sc);
518 	if (cm == NULL) {
519 		mps_dprint(sc, MPS_ERROR,
520 		    "%s: command alloc failure\n", __func__);
521 		return;
522 	}
523 
524 	mpssas_rescan_target(sc, targ);
525 
526 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
527 	memset(req, 0, sizeof(*req));
528 	req->DevHandle = htole16(targ->handle);
529 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
530 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
531 
532 	/* SAS Hard Link Reset / SATA Link Reset */
533 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
534 
535 	cm->cm_targ = targ;
536 	cm->cm_data = NULL;
537 	cm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
538 	cm->cm_complete = mpssas_remove_device;
539 	cm->cm_complete_data = (void *)(uintptr_t)handle;
540 
541 	mps_dprint(sc, MPS_INFO, "%s: Sending reset for target ID %d\n",
542 	    __func__, targ->tid);
543 	mpssas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
544 
545 	mps_map_command(sc, cm);
546 }
547 
548 static void
549 mpssas_remove_device(struct mps_softc *sc, struct mps_command *tm)
550 {
551 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
552 	MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
553 	struct mpssas_target *targ;
554 	struct mps_command *next_cm;
555 	uint16_t handle;
556 
557 	MPS_FUNCTRACE(sc);
558 
559 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
560 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
561 	targ = tm->cm_targ;
562 
563 	/*
564 	 * Currently there should be no way we can hit this case.  It only
565 	 * happens when we have a failure to allocate chain frames, and
566 	 * task management commands don't have S/G lists.
567 	 */
568 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
569 		mps_dprint(sc, MPS_ERROR,
570 		    "%s: cm_flags = %#x for remove of handle %#04x! "
571 		    "This should not happen!\n", __func__, tm->cm_flags,
572 		    handle);
573 	}
574 
575 	if (reply == NULL) {
576 		/* XXX retry the remove after the diag reset completes? */
577 		mps_dprint(sc, MPS_FAULT,
578 		    "%s NULL reply resetting device 0x%04x\n", __func__,
579 		    handle);
580 		mpssas_free_tm(sc, tm);
581 		return;
582 	}
583 
584 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
585 	    MPI2_IOCSTATUS_SUCCESS) {
586 		mps_dprint(sc, MPS_ERROR,
587 		   "IOCStatus = 0x%x while resetting device 0x%x\n",
588 		   le16toh(reply->IOCStatus), handle);
589 	}
590 
591 	mps_dprint(sc, MPS_XINFO, "Reset aborted %u commands\n",
592 	    le32toh(reply->TerminationCount));
593 	mps_free_reply(sc, tm->cm_reply_data);
594 	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
595 
596 	/* Reuse the existing command */
597 	req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
598 	memset(req, 0, sizeof(*req));
599 	req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
600 	req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
601 	req->DevHandle = htole16(handle);
602 	tm->cm_data = NULL;
603 	tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
604 	tm->cm_complete = mpssas_remove_complete;
605 	tm->cm_complete_data = (void *)(uintptr_t)handle;
606 
607 	mps_map_command(sc, tm);
608 
609 	mps_dprint(sc, MPS_XINFO, "clearing target %u handle 0x%04x\n",
610 		   targ->tid, handle);
611 	TAILQ_FOREACH_SAFE(tm, &targ->commands, cm_link, next_cm) {
612 		union ccb *ccb;
613 
614 		mps_dprint(sc, MPS_XINFO, "Completing missed command %p\n", tm);
615 		ccb = tm->cm_complete_data;
616 		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
617 		mpssas_scsiio_complete(sc, tm);
618 	}
619 }
620 
621 static void
622 mpssas_remove_complete(struct mps_softc *sc, struct mps_command *tm)
623 {
624 	MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
625 	uint16_t handle;
626 	struct mpssas_target *targ;
627 	struct mpssas_lun *lun;
628 
629 	MPS_FUNCTRACE(sc);
630 
631 	reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
632 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
633 
634 	/*
635 	 * Currently there should be no way we can hit this case.  It only
636 	 * happens when we have a failure to allocate chain frames, and
637 	 * task management commands don't have S/G lists.
638 	 */
639 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
640 		mps_dprint(sc, MPS_XINFO,
641 			   "%s: cm_flags = %#x for remove of handle %#04x! "
642 			   "This should not happen!\n", __func__, tm->cm_flags,
643 			   handle);
644 		mpssas_free_tm(sc, tm);
645 		return;
646 	}
647 
648 	if (reply == NULL) {
649 		/* most likely a chip reset */
650 		mps_dprint(sc, MPS_FAULT,
651 		    "%s NULL reply removing device 0x%04x\n", __func__, handle);
652 		mpssas_free_tm(sc, tm);
653 		return;
654 	}
655 
656 	mps_dprint(sc, MPS_XINFO,
657 	    "%s on handle 0x%04x, IOCStatus= 0x%x\n", __func__,
658 	    handle, le16toh(reply->IOCStatus));
659 
660 	/*
661 	 * Don't clear target if remove fails because things will get confusing.
662 	 * Leave the devname and sasaddr intact so that we know to avoid reusing
663 	 * this target id if possible, and so we can assign the same target id
664 	 * to this device if it comes back in the future.
665 	 */
666 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
667 	    MPI2_IOCSTATUS_SUCCESS) {
668 		targ = tm->cm_targ;
669 		targ->handle = 0x0;
670 		targ->encl_handle = 0x0;
671 		targ->encl_slot = 0x0;
672 		targ->exp_dev_handle = 0x0;
673 		targ->phy_num = 0x0;
674 		targ->linkrate = 0x0;
675 		targ->devinfo = 0x0;
676 		targ->flags = 0x0;
677 
678 		while(!SLIST_EMPTY(&targ->luns)) {
679 			lun = SLIST_FIRST(&targ->luns);
680 			SLIST_REMOVE_HEAD(&targ->luns, lun_link);
681 			free(lun, M_MPT2);
682 		}
683 	}
684 
685 
686 	mpssas_free_tm(sc, tm);
687 }
688 
689 static int
690 mpssas_register_events(struct mps_softc *sc)
691 {
692 	u32 events[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS];
693 
694 	bzero(events, 16);
695 	setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
696 	setbit(events, MPI2_EVENT_SAS_DISCOVERY);
697 	setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
698 	setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
699 	setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
700 	setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
701 	setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
702 	setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
703 	setbit(events, MPI2_EVENT_IR_VOLUME);
704 	setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
705 	setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
706 	setbit(events, MPI2_EVENT_LOG_ENTRY_ADDED);
707 
708 	mps_register_events(sc, events, mpssas_evt_handler, NULL,
709 	    &sc->sassc->mpssas_eh);
710 
711 	return (0);
712 }
713 
714 int
715 mps_attach_sas(struct mps_softc *sc)
716 {
717 	struct mpssas_softc *sassc;
718 	cam_status status;
719 	int unit, error = 0;
720 
721 	MPS_FUNCTRACE(sc);
722 
723 	sassc = malloc(sizeof(struct mpssas_softc), M_MPT2, M_WAITOK|M_ZERO);
724 	if(!sassc) {
725 		device_printf(sc->mps_dev, "Cannot allocate memory %s %d\n",
726 		__func__, __LINE__);
727 		return (ENOMEM);
728 	}
729 
730 	/*
731 	 * XXX MaxTargets could change during a reinit.  Since we don't
732 	 * resize the targets[] array during such an event, cache the value
733 	 * of MaxTargets here so that we don't get into trouble later.  This
734 	 * should move into the reinit logic.
735 	 */
736 	sassc->maxtargets = sc->facts->MaxTargets + sc->facts->MaxVolumes;
737 	sassc->targets = malloc(sizeof(struct mpssas_target) *
738 	    sassc->maxtargets, M_MPT2, M_WAITOK|M_ZERO);
739 	if(!sassc->targets) {
740 		device_printf(sc->mps_dev, "Cannot allocate memory %s %d\n",
741 		__func__, __LINE__);
742 		free(sassc, M_MPT2);
743 		return (ENOMEM);
744 	}
745 	sc->sassc = sassc;
746 	sassc->sc = sc;
747 
748 	if ((sassc->devq = cam_simq_alloc(sc->num_reqs)) == NULL) {
749 		mps_dprint(sc, MPS_ERROR, "Cannot allocate SIMQ\n");
750 		error = ENOMEM;
751 		goto out;
752 	}
753 
754 	unit = device_get_unit(sc->mps_dev);
755 	sassc->sim = cam_sim_alloc(mpssas_action, mpssas_poll, "mps", sassc,
756 	    unit, &sc->mps_mtx, sc->num_reqs, sc->num_reqs, sassc->devq);
757 	if (sassc->sim == NULL) {
758 		mps_dprint(sc, MPS_ERROR, "Cannot allocate SIM\n");
759 		error = EINVAL;
760 		goto out;
761 	}
762 
763 	TAILQ_INIT(&sassc->ev_queue);
764 
765 	/* Initialize taskqueue for Event Handling */
766 	TASK_INIT(&sassc->ev_task, 0, mpssas_firmware_event_work, sc);
767 	sassc->ev_tq = taskqueue_create("mps_taskq", M_NOWAIT | M_ZERO,
768 	    taskqueue_thread_enqueue, &sassc->ev_tq);
769 	taskqueue_start_threads(&sassc->ev_tq, 1, PRIBIO, "%s taskq",
770 	    device_get_nameunit(sc->mps_dev));
771 
772 	mps_lock(sc);
773 
774 	/*
775 	 * XXX There should be a bus for every port on the adapter, but since
776 	 * we're just going to fake the topology for now, we'll pretend that
777 	 * everything is just a target on a single bus.
778 	 */
779 	if ((error = xpt_bus_register(sassc->sim, sc->mps_dev, 0)) != 0) {
780 		mps_dprint(sc, MPS_ERROR, "Error %d registering SCSI bus\n",
781 		    error);
782 		mps_unlock(sc);
783 		goto out;
784 	}
785 
786 	/*
787 	 * Assume that discovery events will start right away.
788 	 *
789 	 * Hold off boot until discovery is complete.
790 	 */
791 	sassc->flags |= MPSSAS_IN_STARTUP | MPSSAS_IN_DISCOVERY;
792 	sc->sassc->startup_refcount = 0;
793 	mpssas_startup_increment(sassc);
794 
795 	callout_init(&sassc->discovery_callout, 1 /*mpsafe*/);
796 
797 	/*
798 	 * Register for async events so we can determine the EEDP
799 	 * capabilities of devices.
800 	 */
801 	status = xpt_create_path(&sassc->path, /*periph*/NULL,
802 	    cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD,
803 	    CAM_LUN_WILDCARD);
804 	if (status != CAM_REQ_CMP) {
805 		mps_printf(sc, "Error %#x creating sim path\n", status);
806 		sassc->path = NULL;
807 	} else {
808 		int event;
809 
810 #if (__FreeBSD_version >= 1000006) || \
811     ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
812 		event = AC_ADVINFO_CHANGED;
813 #else
814 		event = AC_FOUND_DEVICE;
815 #endif
816 		status = xpt_register_async(event, mpssas_async, sc,
817 					    sassc->path);
818 		if (status != CAM_REQ_CMP) {
819 			mps_dprint(sc, MPS_ERROR,
820 			    "Error %#x registering async handler for "
821 			    "AC_ADVINFO_CHANGED events\n", status);
822 			xpt_free_path(sassc->path);
823 			sassc->path = NULL;
824 		}
825 	}
826 	if (status != CAM_REQ_CMP) {
827 		/*
828 		 * EEDP use is the exception, not the rule.
829 		 * Warn the user, but do not fail to attach.
830 		 */
831 		mps_printf(sc, "EEDP capabilities disabled.\n");
832 	}
833 
834 	mps_unlock(sc);
835 
836 	mpssas_register_events(sc);
837 out:
838 	if (error)
839 		mps_detach_sas(sc);
840 	return (error);
841 }
842 
843 int
844 mps_detach_sas(struct mps_softc *sc)
845 {
846 	struct mpssas_softc *sassc;
847 	struct mpssas_lun *lun, *lun_tmp;
848 	struct mpssas_target *targ;
849 	int i;
850 
851 	MPS_FUNCTRACE(sc);
852 
853 	if (sc->sassc == NULL)
854 		return (0);
855 
856 	sassc = sc->sassc;
857 	mps_deregister_events(sc, sassc->mpssas_eh);
858 
859 	/*
860 	 * Drain and free the event handling taskqueue with the lock
861 	 * unheld so that any parallel processing tasks drain properly
862 	 * without deadlocking.
863 	 */
864 	if (sassc->ev_tq != NULL)
865 		taskqueue_free(sassc->ev_tq);
866 
867 	/* Make sure CAM doesn't wedge if we had to bail out early. */
868 	mps_lock(sc);
869 
870 	/* Deregister our async handler */
871 	if (sassc->path != NULL) {
872 		xpt_register_async(0, mpssas_async, sc, sassc->path);
873 		xpt_free_path(sassc->path);
874 		sassc->path = NULL;
875 	}
876 
877 	if (sassc->flags & MPSSAS_IN_STARTUP)
878 		xpt_release_simq(sassc->sim, 1);
879 
880 	if (sassc->sim != NULL) {
881 		xpt_bus_deregister(cam_sim_path(sassc->sim));
882 		cam_sim_free(sassc->sim, FALSE);
883 	}
884 
885 	mps_unlock(sc);
886 
887 	if (sassc->devq != NULL)
888 		cam_simq_free(sassc->devq);
889 
890 	for(i=0; i< sassc->maxtargets ;i++) {
891 		targ = &sassc->targets[i];
892 		SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
893 			free(lun, M_MPT2);
894 		}
895 	}
896 	free(sassc->targets, M_MPT2);
897 	free(sassc, M_MPT2);
898 	sc->sassc = NULL;
899 
900 	return (0);
901 }
902 
903 void
904 mpssas_discovery_end(struct mpssas_softc *sassc)
905 {
906 	struct mps_softc *sc = sassc->sc;
907 
908 	MPS_FUNCTRACE(sc);
909 
910 	if (sassc->flags & MPSSAS_DISCOVERY_TIMEOUT_PENDING)
911 		callout_stop(&sassc->discovery_callout);
912 
913 	/*
914 	 * After discovery has completed, check the mapping table for any
915 	 * missing devices and update their missing counts. Only do this once
916 	 * whenever the driver is initialized so that missing counts aren't
917 	 * updated unnecessarily. Note that just because discovery has
918 	 * completed doesn't mean that events have been processed yet. The
919 	 * check_devices function is a callout timer that checks if ALL devices
920 	 * are missing. If so, it will wait a little longer for events to
921 	 * complete and keep resetting itself until some device in the mapping
922 	 * table is not missing, meaning that event processing has started.
923 	 */
924 	if (sc->track_mapping_events) {
925 		mps_dprint(sc, MPS_XINFO | MPS_MAPPING, "Discovery has "
926 		    "completed. Check for missing devices in the mapping "
927 		    "table.\n");
928 		callout_reset(&sc->device_check_callout,
929 		    MPS_MISSING_CHECK_DELAY * hz, mps_mapping_check_devices,
930 		    sc);
931 	}
932 }
933 
934 static void
935 mpssas_action(struct cam_sim *sim, union ccb *ccb)
936 {
937 	struct mpssas_softc *sassc;
938 
939 	sassc = cam_sim_softc(sim);
940 
941 	MPS_FUNCTRACE(sassc->sc);
942 	mps_dprint(sassc->sc, MPS_TRACE, "ccb func_code 0x%x\n",
943 	    ccb->ccb_h.func_code);
944 	mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
945 
946 	switch (ccb->ccb_h.func_code) {
947 	case XPT_PATH_INQ:
948 	{
949 		struct ccb_pathinq *cpi = &ccb->cpi;
950 		struct mps_softc *sc = sassc->sc;
951 		uint8_t sges_per_frame;
952 
953 		cpi->version_num = 1;
954 		cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
955 		cpi->target_sprt = 0;
956 #if __FreeBSD_version >= 1000039
957 		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN;
958 #else
959 		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
960 #endif
961 		cpi->hba_eng_cnt = 0;
962 		cpi->max_target = sassc->maxtargets - 1;
963 		cpi->max_lun = 255;
964 
965 		/*
966 		 * initiator_id is set here to an ID outside the set of valid
967 		 * target IDs (including volumes).
968 		 */
969 		cpi->initiator_id = sassc->maxtargets;
970 		strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
971 		strlcpy(cpi->hba_vid, "Avago Tech", HBA_IDLEN);
972 		strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
973 		cpi->unit_number = cam_sim_unit(sim);
974 		cpi->bus_id = cam_sim_bus(sim);
975 		cpi->base_transfer_speed = 150000;
976 		cpi->transport = XPORT_SAS;
977 		cpi->transport_version = 0;
978 		cpi->protocol = PROTO_SCSI;
979 		cpi->protocol_version = SCSI_REV_SPC;
980 
981 		/*
982 		 * Max IO Size is Page Size * the following:
983 		 * ((SGEs per frame - 1 for chain element) *
984 		 * Max Chain Depth) + 1 for no chain needed in last frame
985 		 *
986 		 * If user suggests a Max IO size to use, use the smaller of the
987 		 * user's value and the calculated value as long as the user's
988 		 * value is larger than 0. The user's value is in pages.
989 		 */
990 		sges_per_frame = ((sc->facts->IOCRequestFrameSize * 4) /
991 		    sizeof(MPI2_SGE_SIMPLE64)) - 1;
992 		cpi->maxio = (sges_per_frame * sc->facts->MaxChainDepth) + 1;
993 		cpi->maxio *= PAGE_SIZE;
994 		if ((sc->max_io_pages > 0) && (sc->max_io_pages * PAGE_SIZE <
995 		    cpi->maxio))
996 			cpi->maxio = sc->max_io_pages * PAGE_SIZE;
997 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
998 		break;
999 	}
1000 	case XPT_GET_TRAN_SETTINGS:
1001 	{
1002 		struct ccb_trans_settings	*cts;
1003 		struct ccb_trans_settings_sas	*sas;
1004 		struct ccb_trans_settings_scsi	*scsi;
1005 		struct mpssas_target *targ;
1006 
1007 		cts = &ccb->cts;
1008 		sas = &cts->xport_specific.sas;
1009 		scsi = &cts->proto_specific.scsi;
1010 
1011 		KASSERT(cts->ccb_h.target_id < sassc->maxtargets,
1012 		    ("Target %d out of bounds in XPT_GET_TRANS_SETTINGS\n",
1013 		    cts->ccb_h.target_id));
1014 		targ = &sassc->targets[cts->ccb_h.target_id];
1015 		if (targ->handle == 0x0) {
1016 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1017 			break;
1018 		}
1019 
1020 		cts->protocol_version = SCSI_REV_SPC2;
1021 		cts->transport = XPORT_SAS;
1022 		cts->transport_version = 0;
1023 
1024 		sas->valid = CTS_SAS_VALID_SPEED;
1025 		switch (targ->linkrate) {
1026 		case 0x08:
1027 			sas->bitrate = 150000;
1028 			break;
1029 		case 0x09:
1030 			sas->bitrate = 300000;
1031 			break;
1032 		case 0x0a:
1033 			sas->bitrate = 600000;
1034 			break;
1035 		default:
1036 			sas->valid = 0;
1037 		}
1038 
1039 		cts->protocol = PROTO_SCSI;
1040 		scsi->valid = CTS_SCSI_VALID_TQ;
1041 		scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
1042 
1043 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1044 		break;
1045 	}
1046 	case XPT_CALC_GEOMETRY:
1047 		cam_calc_geometry(&ccb->ccg, /*extended*/1);
1048 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1049 		break;
1050 	case XPT_RESET_DEV:
1051 		mps_dprint(sassc->sc, MPS_XINFO, "mpssas_action XPT_RESET_DEV\n");
1052 		mpssas_action_resetdev(sassc, ccb);
1053 		return;
1054 	case XPT_RESET_BUS:
1055 	case XPT_ABORT:
1056 	case XPT_TERM_IO:
1057 		mps_dprint(sassc->sc, MPS_XINFO,
1058 		    "mpssas_action faking success for abort or reset\n");
1059 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1060 		break;
1061 	case XPT_SCSI_IO:
1062 		mpssas_action_scsiio(sassc, ccb);
1063 		return;
1064 #if __FreeBSD_version >= 900026
1065 	case XPT_SMP_IO:
1066 		mpssas_action_smpio(sassc, ccb);
1067 		return;
1068 #endif
1069 	default:
1070 		mpssas_set_ccbstatus(ccb, CAM_FUNC_NOTAVAIL);
1071 		break;
1072 	}
1073 	xpt_done(ccb);
1074 
1075 }
1076 
1077 static void
1078 mpssas_announce_reset(struct mps_softc *sc, uint32_t ac_code,
1079     target_id_t target_id, lun_id_t lun_id)
1080 {
1081 	path_id_t path_id = cam_sim_path(sc->sassc->sim);
1082 	struct cam_path *path;
1083 
1084 	mps_dprint(sc, MPS_XINFO, "%s code %x target %d lun %jx\n", __func__,
1085 	    ac_code, target_id, (uintmax_t)lun_id);
1086 
1087 	if (xpt_create_path(&path, NULL,
1088 		path_id, target_id, lun_id) != CAM_REQ_CMP) {
1089 		mps_dprint(sc, MPS_ERROR, "unable to create path for reset "
1090 			   "notification\n");
1091 		return;
1092 	}
1093 
1094 	xpt_async(ac_code, path, NULL);
1095 	xpt_free_path(path);
1096 }
1097 
1098 static void
1099 mpssas_complete_all_commands(struct mps_softc *sc)
1100 {
1101 	struct mps_command *cm;
1102 	int i;
1103 	int completed;
1104 
1105 	MPS_FUNCTRACE(sc);
1106 	mtx_assert(&sc->mps_mtx, MA_OWNED);
1107 
1108 	/* complete all commands with a NULL reply */
1109 	for (i = 1; i < sc->num_reqs; i++) {
1110 		cm = &sc->commands[i];
1111 		cm->cm_reply = NULL;
1112 		completed = 0;
1113 
1114 		if (cm->cm_flags & MPS_CM_FLAGS_POLLED)
1115 			cm->cm_flags |= MPS_CM_FLAGS_COMPLETE;
1116 
1117 		if (cm->cm_complete != NULL) {
1118 			mpssas_log_command(cm, MPS_RECOVERY,
1119 			    "completing cm %p state %x ccb %p for diag reset\n",
1120 			    cm, cm->cm_state, cm->cm_ccb);
1121 
1122 			cm->cm_complete(sc, cm);
1123 			completed = 1;
1124 		}
1125 
1126 		if (cm->cm_flags & MPS_CM_FLAGS_WAKEUP) {
1127 			mpssas_log_command(cm, MPS_RECOVERY,
1128 			    "waking up cm %p state %x ccb %p for diag reset\n",
1129 			    cm, cm->cm_state, cm->cm_ccb);
1130 			wakeup(cm);
1131 			completed = 1;
1132 		}
1133 
1134 		if (cm->cm_sc->io_cmds_active != 0) {
1135 			cm->cm_sc->io_cmds_active--;
1136 		} else {
1137 			mps_dprint(cm->cm_sc, MPS_INFO, "Warning: "
1138 			    "io_cmds_active is out of sync - resynching to "
1139 			    "0\n");
1140 		}
1141 
1142 		if ((completed == 0) && (cm->cm_state != MPS_CM_STATE_FREE)) {
1143 			/* this should never happen, but if it does, log */
1144 			mpssas_log_command(cm, MPS_RECOVERY,
1145 			    "cm %p state %x flags 0x%x ccb %p during diag "
1146 			    "reset\n", cm, cm->cm_state, cm->cm_flags,
1147 			    cm->cm_ccb);
1148 		}
1149 	}
1150 }
1151 
1152 void
1153 mpssas_handle_reinit(struct mps_softc *sc)
1154 {
1155 	int i;
1156 
1157 	/* Go back into startup mode and freeze the simq, so that CAM
1158 	 * doesn't send any commands until after we've rediscovered all
1159 	 * targets and found the proper device handles for them.
1160 	 *
1161 	 * After the reset, portenable will trigger discovery, and after all
1162 	 * discovery-related activities have finished, the simq will be
1163 	 * released.
1164 	 */
1165 	mps_dprint(sc, MPS_INIT, "%s startup\n", __func__);
1166 	sc->sassc->flags |= MPSSAS_IN_STARTUP;
1167 	sc->sassc->flags |= MPSSAS_IN_DISCOVERY;
1168 	mpssas_startup_increment(sc->sassc);
1169 
1170 	/* notify CAM of a bus reset */
1171 	mpssas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1172 	    CAM_LUN_WILDCARD);
1173 
1174 	/* complete and cleanup after all outstanding commands */
1175 	mpssas_complete_all_commands(sc);
1176 
1177 	mps_dprint(sc, MPS_INIT,
1178 	    "%s startup %u after command completion\n", __func__,
1179 	    sc->sassc->startup_refcount);
1180 
1181 	/* zero all the target handles, since they may change after the
1182 	 * reset, and we have to rediscover all the targets and use the new
1183 	 * handles.
1184 	 */
1185 	for (i = 0; i < sc->sassc->maxtargets; i++) {
1186 		if (sc->sassc->targets[i].outstanding != 0)
1187 			mps_dprint(sc, MPS_INIT, "target %u outstanding %u\n",
1188 			    i, sc->sassc->targets[i].outstanding);
1189 		sc->sassc->targets[i].handle = 0x0;
1190 		sc->sassc->targets[i].exp_dev_handle = 0x0;
1191 		sc->sassc->targets[i].outstanding = 0;
1192 		sc->sassc->targets[i].flags = MPSSAS_TARGET_INDIAGRESET;
1193 	}
1194 }
1195 
1196 static void
1197 mpssas_tm_timeout(void *data)
1198 {
1199 	struct mps_command *tm = data;
1200 	struct mps_softc *sc = tm->cm_sc;
1201 
1202 	mtx_assert(&sc->mps_mtx, MA_OWNED);
1203 
1204 	mpssas_log_command(tm, MPS_INFO|MPS_RECOVERY,
1205 	    "task mgmt %p timed out\n", tm);
1206 	mps_reinit(sc);
1207 }
1208 
1209 static void
1210 mpssas_logical_unit_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1211 {
1212 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1213 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1214 	unsigned int cm_count = 0;
1215 	struct mps_command *cm;
1216 	struct mpssas_target *targ;
1217 
1218 	callout_stop(&tm->cm_callout);
1219 
1220 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1221 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1222 	targ = tm->cm_targ;
1223 
1224 	/*
1225 	 * Currently there should be no way we can hit this case.  It only
1226 	 * happens when we have a failure to allocate chain frames, and
1227 	 * task management commands don't have S/G lists.
1228 	 * XXXSL So should it be an assertion?
1229 	 */
1230 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1231 		mps_dprint(sc, MPS_ERROR, "%s: cm_flags = %#x for LUN reset! "
1232 			   "This should not happen!\n", __func__, tm->cm_flags);
1233 		mpssas_free_tm(sc, tm);
1234 		return;
1235 	}
1236 
1237 	if (reply == NULL) {
1238 		mpssas_log_command(tm, MPS_RECOVERY,
1239 		    "NULL reset reply for tm %p\n", tm);
1240 		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1241 			/* this completion was due to a reset, just cleanup */
1242 			targ->tm = NULL;
1243 			mpssas_free_tm(sc, tm);
1244 		}
1245 		else {
1246 			/* we should have gotten a reply. */
1247 			mps_reinit(sc);
1248 		}
1249 		return;
1250 	}
1251 
1252 	mpssas_log_command(tm, MPS_RECOVERY,
1253 	    "logical unit reset status 0x%x code 0x%x count %u\n",
1254 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1255 	    le32toh(reply->TerminationCount));
1256 
1257 	/* See if there are any outstanding commands for this LUN.
1258 	 * This could be made more efficient by using a per-LU data
1259 	 * structure of some sort.
1260 	 */
1261 	TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1262 		if (cm->cm_lun == tm->cm_lun)
1263 			cm_count++;
1264 	}
1265 
1266 	if (cm_count == 0) {
1267 		mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1268 		    "logical unit %u finished recovery after reset\n",
1269 		    tm->cm_lun, tm);
1270 
1271 		mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1272 		    tm->cm_lun);
1273 
1274 		/* we've finished recovery for this logical unit.  check and
1275 		 * see if some other logical unit has a timedout command
1276 		 * that needs to be processed.
1277 		 */
1278 		cm = TAILQ_FIRST(&targ->timedout_commands);
1279 		if (cm) {
1280 			mpssas_send_abort(sc, tm, cm);
1281 		}
1282 		else {
1283 			targ->tm = NULL;
1284 			mpssas_free_tm(sc, tm);
1285 		}
1286 	}
1287 	else {
1288 		/* if we still have commands for this LUN, the reset
1289 		 * effectively failed, regardless of the status reported.
1290 		 * Escalate to a target reset.
1291 		 */
1292 		mpssas_log_command(tm, MPS_RECOVERY,
1293 		    "logical unit reset complete for tm %p, but still have %u command(s)\n",
1294 		    tm, cm_count);
1295 		mpssas_send_reset(sc, tm,
1296 		    MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1297 	}
1298 }
1299 
1300 static void
1301 mpssas_target_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1302 {
1303 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1304 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1305 	struct mpssas_target *targ;
1306 
1307 	callout_stop(&tm->cm_callout);
1308 
1309 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1310 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1311 	targ = tm->cm_targ;
1312 
1313 	/*
1314 	 * Currently there should be no way we can hit this case.  It only
1315 	 * happens when we have a failure to allocate chain frames, and
1316 	 * task management commands don't have S/G lists.
1317 	 */
1318 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1319 		mps_dprint(sc, MPS_ERROR,"%s: cm_flags = %#x for target reset! "
1320 			   "This should not happen!\n", __func__, tm->cm_flags);
1321 		mpssas_free_tm(sc, tm);
1322 		return;
1323 	}
1324 
1325 	if (reply == NULL) {
1326 		mpssas_log_command(tm, MPS_RECOVERY,
1327 		    "NULL reset reply for tm %p\n", tm);
1328 		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1329 			/* this completion was due to a reset, just cleanup */
1330 			targ->tm = NULL;
1331 			mpssas_free_tm(sc, tm);
1332 		}
1333 		else {
1334 			/* we should have gotten a reply. */
1335 			mps_reinit(sc);
1336 		}
1337 		return;
1338 	}
1339 
1340 	mpssas_log_command(tm, MPS_RECOVERY,
1341 	    "target reset status 0x%x code 0x%x count %u\n",
1342 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1343 	    le32toh(reply->TerminationCount));
1344 
1345 	if (targ->outstanding == 0) {
1346 		/* we've finished recovery for this target and all
1347 		 * of its logical units.
1348 		 */
1349 		mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1350 		    "recovery finished after target reset\n");
1351 
1352 		mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1353 		    CAM_LUN_WILDCARD);
1354 
1355 		targ->tm = NULL;
1356 		mpssas_free_tm(sc, tm);
1357 	}
1358 	else {
1359 		/* after a target reset, if this target still has
1360 		 * outstanding commands, the reset effectively failed,
1361 		 * regardless of the status reported.  escalate.
1362 		 */
1363 		mpssas_log_command(tm, MPS_RECOVERY,
1364 		    "target reset complete for tm %p, but still have %u command(s)\n",
1365 		    tm, targ->outstanding);
1366 		mps_reinit(sc);
1367 	}
1368 }
1369 
1370 #define MPS_RESET_TIMEOUT 30
1371 
1372 int
1373 mpssas_send_reset(struct mps_softc *sc, struct mps_command *tm, uint8_t type)
1374 {
1375 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1376 	struct mpssas_target *target;
1377 	int err;
1378 
1379 	target = tm->cm_targ;
1380 	if (target->handle == 0) {
1381 		mps_dprint(sc, MPS_ERROR,"%s null devhandle for target_id %d\n",
1382 		    __func__, target->tid);
1383 		return -1;
1384 	}
1385 
1386 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1387 	req->DevHandle = htole16(target->handle);
1388 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1389 	req->TaskType = type;
1390 
1391 	if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1392 		/* XXX Need to handle invalid LUNs */
1393 		MPS_SET_LUN(req->LUN, tm->cm_lun);
1394 		tm->cm_targ->logical_unit_resets++;
1395 		mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1396 		    "sending logical unit reset\n");
1397 		tm->cm_complete = mpssas_logical_unit_reset_complete;
1398 		mpssas_prepare_for_tm(sc, tm, target, tm->cm_lun);
1399 	}
1400 	else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1401 		/*
1402 		 * Target reset method =
1403 		 * 	SAS Hard Link Reset / SATA Link Reset
1404 		 */
1405 		req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1406 		tm->cm_targ->target_resets++;
1407 		mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1408 		    "sending target reset\n");
1409 		tm->cm_complete = mpssas_target_reset_complete;
1410 		mpssas_prepare_for_tm(sc, tm, target, CAM_LUN_WILDCARD);
1411 	}
1412 	else {
1413 		mps_dprint(sc, MPS_ERROR, "unexpected reset type 0x%x\n", type);
1414 		return -1;
1415 	}
1416 
1417 	tm->cm_data = NULL;
1418 	tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1419 	tm->cm_complete_data = (void *)tm;
1420 
1421 	callout_reset(&tm->cm_callout, MPS_RESET_TIMEOUT * hz,
1422 	    mpssas_tm_timeout, tm);
1423 
1424 	err = mps_map_command(sc, tm);
1425 	if (err)
1426 		mpssas_log_command(tm, MPS_RECOVERY,
1427 		    "error %d sending reset type %u\n",
1428 		    err, type);
1429 
1430 	return err;
1431 }
1432 
1433 
1434 static void
1435 mpssas_abort_complete(struct mps_softc *sc, struct mps_command *tm)
1436 {
1437 	struct mps_command *cm;
1438 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1439 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1440 	struct mpssas_target *targ;
1441 
1442 	callout_stop(&tm->cm_callout);
1443 
1444 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1445 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1446 	targ = tm->cm_targ;
1447 
1448 	/*
1449 	 * Currently there should be no way we can hit this case.  It only
1450 	 * happens when we have a failure to allocate chain frames, and
1451 	 * task management commands don't have S/G lists.
1452 	 */
1453 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1454 		mpssas_log_command(tm, MPS_RECOVERY,
1455 		    "cm_flags = %#x for abort %p TaskMID %u!\n",
1456 		    tm->cm_flags, tm, le16toh(req->TaskMID));
1457 		mpssas_free_tm(sc, tm);
1458 		return;
1459 	}
1460 
1461 	if (reply == NULL) {
1462 		mpssas_log_command(tm, MPS_RECOVERY,
1463 		    "NULL abort reply for tm %p TaskMID %u\n",
1464 		    tm, le16toh(req->TaskMID));
1465 		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1466 			/* this completion was due to a reset, just cleanup */
1467 			targ->tm = NULL;
1468 			mpssas_free_tm(sc, tm);
1469 		}
1470 		else {
1471 			/* we should have gotten a reply. */
1472 			mps_reinit(sc);
1473 		}
1474 		return;
1475 	}
1476 
1477 	mpssas_log_command(tm, MPS_RECOVERY,
1478 	    "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1479 	    le16toh(req->TaskMID),
1480 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1481 	    le32toh(reply->TerminationCount));
1482 
1483 	cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1484 	if (cm == NULL) {
1485 		/* if there are no more timedout commands, we're done with
1486 		 * error recovery for this target.
1487 		 */
1488 		mpssas_log_command(tm, MPS_RECOVERY,
1489 		    "finished recovery after aborting TaskMID %u\n",
1490 		    le16toh(req->TaskMID));
1491 
1492 		targ->tm = NULL;
1493 		mpssas_free_tm(sc, tm);
1494 	}
1495 	else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) {
1496 		/* abort success, but we have more timedout commands to abort */
1497 		mpssas_log_command(tm, MPS_RECOVERY,
1498 		    "continuing recovery after aborting TaskMID %u\n",
1499 		    le16toh(req->TaskMID));
1500 
1501 		mpssas_send_abort(sc, tm, cm);
1502 	}
1503 	else {
1504 		/* we didn't get a command completion, so the abort
1505 		 * failed as far as we're concerned.  escalate.
1506 		 */
1507 		mpssas_log_command(tm, MPS_RECOVERY,
1508 		    "abort failed for TaskMID %u tm %p\n",
1509 		    le16toh(req->TaskMID), tm);
1510 
1511 		mpssas_send_reset(sc, tm,
1512 		    MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1513 	}
1514 }
1515 
1516 #define MPS_ABORT_TIMEOUT 5
1517 
1518 static int
1519 mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm)
1520 {
1521 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1522 	struct mpssas_target *targ;
1523 	int err;
1524 
1525 	targ = cm->cm_targ;
1526 	if (targ->handle == 0) {
1527 		mps_dprint(sc, MPS_ERROR,"%s null devhandle for target_id %d\n",
1528 		    __func__, cm->cm_ccb->ccb_h.target_id);
1529 		return -1;
1530 	}
1531 
1532 	mpssas_log_command(cm, MPS_RECOVERY|MPS_INFO,
1533 	    "Aborting command %p\n", cm);
1534 
1535 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1536 	req->DevHandle = htole16(targ->handle);
1537 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1538 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1539 
1540 	/* XXX Need to handle invalid LUNs */
1541 	MPS_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1542 
1543 	req->TaskMID = htole16(cm->cm_desc.Default.SMID);
1544 
1545 	tm->cm_data = NULL;
1546 	tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1547 	tm->cm_complete = mpssas_abort_complete;
1548 	tm->cm_complete_data = (void *)tm;
1549 	tm->cm_targ = cm->cm_targ;
1550 	tm->cm_lun = cm->cm_lun;
1551 
1552 	callout_reset(&tm->cm_callout, MPS_ABORT_TIMEOUT * hz,
1553 	    mpssas_tm_timeout, tm);
1554 
1555 	targ->aborts++;
1556 
1557 	mps_dprint(sc, MPS_INFO, "Sending reset from %s for target ID %d\n",
1558 	    __func__, targ->tid);
1559 	mpssas_prepare_for_tm(sc, tm, targ, tm->cm_lun);
1560 
1561 	err = mps_map_command(sc, tm);
1562 	if (err)
1563 		mps_dprint(sc, MPS_RECOVERY,
1564 		    "error %d sending abort for cm %p SMID %u\n",
1565 		    err, cm, req->TaskMID);
1566 	return err;
1567 }
1568 
1569 static void
1570 mpssas_scsiio_timeout(void *data)
1571 {
1572 	struct mps_softc *sc;
1573 	struct mps_command *cm;
1574 	struct mpssas_target *targ;
1575 
1576 	cm = (struct mps_command *)data;
1577 	sc = cm->cm_sc;
1578 
1579 	MPS_FUNCTRACE(sc);
1580 	mtx_assert(&sc->mps_mtx, MA_OWNED);
1581 
1582 	mps_dprint(sc, MPS_XINFO, "Timeout checking cm %p\n", sc);
1583 
1584 	/*
1585 	 * Run the interrupt handler to make sure it's not pending.  This
1586 	 * isn't perfect because the command could have already completed
1587 	 * and been re-used, though this is unlikely.
1588 	 */
1589 	mps_intr_locked(sc);
1590 	if (cm->cm_state == MPS_CM_STATE_FREE) {
1591 		mpssas_log_command(cm, MPS_XINFO,
1592 		    "SCSI command %p almost timed out\n", cm);
1593 		return;
1594 	}
1595 
1596 	if (cm->cm_ccb == NULL) {
1597 		mps_dprint(sc, MPS_ERROR, "command timeout with NULL ccb\n");
1598 		return;
1599 	}
1600 
1601 	targ = cm->cm_targ;
1602 	targ->timeouts++;
1603 
1604 	mpssas_log_command(cm, MPS_ERROR, "command timeout %d cm %p target "
1605 	    "%u, handle(0x%04x)\n", cm->cm_ccb->ccb_h.timeout, cm,  targ->tid,
1606 	    targ->handle);
1607 
1608 	/* XXX first, check the firmware state, to see if it's still
1609 	 * operational.  if not, do a diag reset.
1610 	 */
1611 	mpssas_set_ccbstatus(cm->cm_ccb, CAM_CMD_TIMEOUT);
1612 	cm->cm_state = MPS_CM_STATE_TIMEDOUT;
1613 	TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1614 
1615 	if (targ->tm != NULL) {
1616 		/* target already in recovery, just queue up another
1617 		 * timedout command to be processed later.
1618 		 */
1619 		mps_dprint(sc, MPS_RECOVERY,
1620 		    "queued timedout cm %p for processing by tm %p\n",
1621 		    cm, targ->tm);
1622 	}
1623 	else if ((targ->tm = mpssas_alloc_tm(sc)) != NULL) {
1624 		mps_dprint(sc, MPS_RECOVERY, "timedout cm %p allocated tm %p\n",
1625 		    cm, targ->tm);
1626 
1627 		/* start recovery by aborting the first timedout command */
1628 		mpssas_send_abort(sc, targ->tm, cm);
1629 	}
1630 	else {
1631 		/* XXX queue this target up for recovery once a TM becomes
1632 		 * available.  The firmware only has a limited number of
1633 		 * HighPriority credits for the high priority requests used
1634 		 * for task management, and we ran out.
1635 		 *
1636 		 * Isilon: don't worry about this for now, since we have
1637 		 * more credits than disks in an enclosure, and limit
1638 		 * ourselves to one TM per target for recovery.
1639 		 */
1640 		mps_dprint(sc, MPS_RECOVERY,
1641 		    "timedout cm %p failed to allocate a tm\n", cm);
1642 	}
1643 
1644 }
1645 
1646 static void
1647 mpssas_action_scsiio(struct mpssas_softc *sassc, union ccb *ccb)
1648 {
1649 	MPI2_SCSI_IO_REQUEST *req;
1650 	struct ccb_scsiio *csio;
1651 	struct mps_softc *sc;
1652 	struct mpssas_target *targ;
1653 	struct mpssas_lun *lun;
1654 	struct mps_command *cm;
1655 	uint8_t i, lba_byte, *ref_tag_addr;
1656 	uint16_t eedp_flags;
1657 	uint32_t mpi_control;
1658 
1659 	sc = sassc->sc;
1660 	MPS_FUNCTRACE(sc);
1661 	mtx_assert(&sc->mps_mtx, MA_OWNED);
1662 
1663 	csio = &ccb->csio;
1664 	KASSERT(csio->ccb_h.target_id < sassc->maxtargets,
1665 	    ("Target %d out of bounds in XPT_SCSI_IO\n",
1666 	     csio->ccb_h.target_id));
1667 	targ = &sassc->targets[csio->ccb_h.target_id];
1668 	mps_dprint(sc, MPS_TRACE, "ccb %p target flag %x\n", ccb, targ->flags);
1669 	if (targ->handle == 0x0) {
1670 		mps_dprint(sc, MPS_ERROR, "%s NULL handle for target %u\n",
1671 		    __func__, csio->ccb_h.target_id);
1672 		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1673 		xpt_done(ccb);
1674 		return;
1675 	}
1676 	if (targ->flags & MPS_TARGET_FLAGS_RAID_COMPONENT) {
1677 		mps_dprint(sc, MPS_ERROR, "%s Raid component no SCSI IO "
1678 		    "supported %u\n", __func__, csio->ccb_h.target_id);
1679 		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1680 		xpt_done(ccb);
1681 		return;
1682 	}
1683 	/*
1684 	 * Sometimes, it is possible to get a command that is not "In
1685 	 * Progress" and was actually aborted by the upper layer.  Check for
1686 	 * this here and complete the command without error.
1687 	 */
1688 	if (mpssas_get_ccbstatus(ccb) != CAM_REQ_INPROG) {
1689 		mps_dprint(sc, MPS_TRACE, "%s Command is not in progress for "
1690 		    "target %u\n", __func__, csio->ccb_h.target_id);
1691 		xpt_done(ccb);
1692 		return;
1693 	}
1694 	/*
1695 	 * If devinfo is 0 this will be a volume.  In that case don't tell CAM
1696 	 * that the volume has timed out.  We want volumes to be enumerated
1697 	 * until they are deleted/removed, not just failed.
1698 	 */
1699 	if (targ->flags & MPSSAS_TARGET_INREMOVAL) {
1700 		if (targ->devinfo == 0)
1701 			mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1702 		else
1703 			mpssas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
1704 		xpt_done(ccb);
1705 		return;
1706 	}
1707 
1708 	if ((sc->mps_flags & MPS_FLAGS_SHUTDOWN) != 0) {
1709 		mps_dprint(sc, MPS_INFO, "%s shutting down\n", __func__);
1710 		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1711 		xpt_done(ccb);
1712 		return;
1713 	}
1714 
1715 	/*
1716 	 * If target has a reset in progress, freeze the devq and return.  The
1717 	 * devq will be released when the TM reset is finished.
1718 	 */
1719 	if (targ->flags & MPSSAS_TARGET_INRESET) {
1720 		ccb->ccb_h.status = CAM_BUSY | CAM_DEV_QFRZN;
1721 		mps_dprint(sc, MPS_INFO, "%s: Freezing devq for target ID %d\n",
1722 		    __func__, targ->tid);
1723 		xpt_freeze_devq(ccb->ccb_h.path, 1);
1724 		xpt_done(ccb);
1725 		return;
1726 	}
1727 
1728 	cm = mps_alloc_command(sc);
1729 	if (cm == NULL || (sc->mps_flags & MPS_FLAGS_DIAGRESET)) {
1730 		if (cm != NULL) {
1731 			mps_free_command(sc, cm);
1732 		}
1733 		if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
1734 			xpt_freeze_simq(sassc->sim, 1);
1735 			sassc->flags |= MPSSAS_QUEUE_FROZEN;
1736 		}
1737 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1738 		ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1739 		xpt_done(ccb);
1740 		return;
1741 	}
1742 
1743 	req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
1744 	bzero(req, sizeof(*req));
1745 	req->DevHandle = htole16(targ->handle);
1746 	req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1747 	req->MsgFlags = 0;
1748 	req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr);
1749 	req->SenseBufferLength = MPS_SENSE_LEN;
1750 	req->SGLFlags = 0;
1751 	req->ChainOffset = 0;
1752 	req->SGLOffset0 = 24;	/* 32bit word offset to the SGL */
1753 	req->SGLOffset1= 0;
1754 	req->SGLOffset2= 0;
1755 	req->SGLOffset3= 0;
1756 	req->SkipCount = 0;
1757 	req->DataLength = htole32(csio->dxfer_len);
1758 	req->BidirectionalDataLength = 0;
1759 	req->IoFlags = htole16(csio->cdb_len);
1760 	req->EEDPFlags = 0;
1761 
1762 	/* Note: BiDirectional transfers are not supported */
1763 	switch (csio->ccb_h.flags & CAM_DIR_MASK) {
1764 	case CAM_DIR_IN:
1765 		mpi_control = MPI2_SCSIIO_CONTROL_READ;
1766 		cm->cm_flags |= MPS_CM_FLAGS_DATAIN;
1767 		break;
1768 	case CAM_DIR_OUT:
1769 		mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
1770 		cm->cm_flags |= MPS_CM_FLAGS_DATAOUT;
1771 		break;
1772 	case CAM_DIR_NONE:
1773 	default:
1774 		mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
1775 		break;
1776 	}
1777 
1778 	if (csio->cdb_len == 32)
1779                 mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
1780 	/*
1781 	 * It looks like the hardware doesn't require an explicit tag
1782 	 * number for each transaction.  SAM Task Management not supported
1783 	 * at the moment.
1784 	 */
1785 	switch (csio->tag_action) {
1786 	case MSG_HEAD_OF_Q_TAG:
1787 		mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
1788 		break;
1789 	case MSG_ORDERED_Q_TAG:
1790 		mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
1791 		break;
1792 	case MSG_ACA_TASK:
1793 		mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ;
1794 		break;
1795 	case CAM_TAG_ACTION_NONE:
1796 	case MSG_SIMPLE_Q_TAG:
1797 	default:
1798 		mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
1799 		break;
1800 	}
1801 	mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
1802 	req->Control = htole32(mpi_control);
1803 	if (MPS_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
1804 		mps_free_command(sc, cm);
1805 		mpssas_set_ccbstatus(ccb, CAM_LUN_INVALID);
1806 		xpt_done(ccb);
1807 		return;
1808 	}
1809 
1810 	if (csio->ccb_h.flags & CAM_CDB_POINTER)
1811 		bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
1812 	else
1813 		bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
1814 	req->IoFlags = htole16(csio->cdb_len);
1815 
1816 	/*
1817 	 * Check if EEDP is supported and enabled.  If it is then check if the
1818 	 * SCSI opcode could be using EEDP.  If so, make sure the LUN exists and
1819 	 * is formatted for EEDP support.  If all of this is true, set CDB up
1820 	 * for EEDP transfer.
1821 	 */
1822 	eedp_flags = op_code_prot[req->CDB.CDB32[0]];
1823 	if (sc->eedp_enabled && eedp_flags) {
1824 		SLIST_FOREACH(lun, &targ->luns, lun_link) {
1825 			if (lun->lun_id == csio->ccb_h.target_lun) {
1826 				break;
1827 			}
1828 		}
1829 
1830 		if ((lun != NULL) && (lun->eedp_formatted)) {
1831 			req->EEDPBlockSize = htole16(lun->eedp_block_size);
1832 			eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
1833 			    MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
1834 			    MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
1835 			req->EEDPFlags = htole16(eedp_flags);
1836 
1837 			/*
1838 			 * If CDB less than 32, fill in Primary Ref Tag with
1839 			 * low 4 bytes of LBA.  If CDB is 32, tag stuff is
1840 			 * already there.  Also, set protection bit.  FreeBSD
1841 			 * currently does not support CDBs bigger than 16, but
1842 			 * the code doesn't hurt, and will be here for the
1843 			 * future.
1844 			 */
1845 			if (csio->cdb_len != 32) {
1846 				lba_byte = (csio->cdb_len == 16) ? 6 : 2;
1847 				ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
1848 				    PrimaryReferenceTag;
1849 				for (i = 0; i < 4; i++) {
1850 					*ref_tag_addr =
1851 					    req->CDB.CDB32[lba_byte + i];
1852 					ref_tag_addr++;
1853 				}
1854 				req->CDB.EEDP32.PrimaryReferenceTag =
1855 					htole32(req->CDB.EEDP32.PrimaryReferenceTag);
1856 				req->CDB.EEDP32.PrimaryApplicationTagMask =
1857 				    0xFFFF;
1858 				req->CDB.CDB32[1] = (req->CDB.CDB32[1] & 0x1F) |
1859 				    0x20;
1860 			} else {
1861 				eedp_flags |=
1862 				    MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
1863 				req->EEDPFlags = htole16(eedp_flags);
1864 				req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
1865 				    0x1F) | 0x20;
1866 			}
1867 		}
1868 	}
1869 
1870 	cm->cm_length = csio->dxfer_len;
1871 	if (cm->cm_length != 0) {
1872 		cm->cm_data = ccb;
1873 		cm->cm_flags |= MPS_CM_FLAGS_USE_CCB;
1874 	} else {
1875 		cm->cm_data = NULL;
1876 	}
1877 	cm->cm_sge = &req->SGL;
1878 	cm->cm_sglsize = (32 - 24) * 4;
1879 	cm->cm_desc.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
1880 	cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle);
1881 	cm->cm_complete = mpssas_scsiio_complete;
1882 	cm->cm_complete_data = ccb;
1883 	cm->cm_targ = targ;
1884 	cm->cm_lun = csio->ccb_h.target_lun;
1885 	cm->cm_ccb = ccb;
1886 
1887 	/*
1888 	 * If HBA is a WD and the command is not for a retry, try to build a
1889 	 * direct I/O message. If failed, or the command is for a retry, send
1890 	 * the I/O to the IR volume itself.
1891 	 */
1892 	if (sc->WD_valid_config) {
1893 		if (ccb->ccb_h.sim_priv.entries[0].field == MPS_WD_RETRY) {
1894 			mpssas_direct_drive_io(sassc, cm, ccb);
1895 		} else {
1896 			mpssas_set_ccbstatus(ccb, CAM_REQ_INPROG);
1897 		}
1898 	}
1899 
1900 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
1901 	if (csio->bio != NULL)
1902 		biotrack(csio->bio, __func__);
1903 #endif
1904 	callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
1905 	    mpssas_scsiio_timeout, cm, 0);
1906 
1907 	targ->issued++;
1908 	targ->outstanding++;
1909 	TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1910 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
1911 
1912 	mpssas_log_command(cm, MPS_XINFO, "%s cm %p ccb %p outstanding %u\n",
1913 	    __func__, cm, ccb, targ->outstanding);
1914 
1915 	mps_map_command(sc, cm);
1916 	return;
1917 }
1918 
1919 static void
1920 mps_response_code(struct mps_softc *sc, u8 response_code)
1921 {
1922         char *desc;
1923 
1924         switch (response_code) {
1925         case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE:
1926                 desc = "task management request completed";
1927                 break;
1928         case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME:
1929                 desc = "invalid frame";
1930                 break;
1931         case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
1932                 desc = "task management request not supported";
1933                 break;
1934         case MPI2_SCSITASKMGMT_RSP_TM_FAILED:
1935                 desc = "task management request failed";
1936                 break;
1937         case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED:
1938                 desc = "task management request succeeded";
1939                 break;
1940         case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN:
1941                 desc = "invalid lun";
1942                 break;
1943         case 0xA:
1944                 desc = "overlapped tag attempted";
1945                 break;
1946         case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
1947                 desc = "task queued, however not sent to target";
1948                 break;
1949         default:
1950                 desc = "unknown";
1951                 break;
1952         }
1953 		mps_dprint(sc, MPS_XINFO, "response_code(0x%01x): %s\n",
1954                 response_code, desc);
1955 }
1956 /**
1957  * mps_sc_failed_io_info - translated non-succesfull SCSI_IO request
1958  */
1959 static void
1960 mps_sc_failed_io_info(struct mps_softc *sc, struct ccb_scsiio *csio,
1961     Mpi2SCSIIOReply_t *mpi_reply)
1962 {
1963 	u32 response_info;
1964 	u8 *response_bytes;
1965 	u16 ioc_status = le16toh(mpi_reply->IOCStatus) &
1966 	    MPI2_IOCSTATUS_MASK;
1967 	u8 scsi_state = mpi_reply->SCSIState;
1968 	u8 scsi_status = mpi_reply->SCSIStatus;
1969 	char *desc_ioc_state = NULL;
1970 	char *desc_scsi_status = NULL;
1971 	char *desc_scsi_state = sc->tmp_string;
1972 	u32 log_info = le32toh(mpi_reply->IOCLogInfo);
1973 
1974 	if (log_info == 0x31170000)
1975 		return;
1976 
1977 	switch (ioc_status) {
1978 	case MPI2_IOCSTATUS_SUCCESS:
1979 		desc_ioc_state = "success";
1980 		break;
1981 	case MPI2_IOCSTATUS_INVALID_FUNCTION:
1982 		desc_ioc_state = "invalid function";
1983 		break;
1984 	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
1985 		desc_ioc_state = "scsi recovered error";
1986 		break;
1987 	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
1988 		desc_ioc_state = "scsi invalid dev handle";
1989 		break;
1990 	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
1991 		desc_ioc_state = "scsi device not there";
1992 		break;
1993 	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
1994 		desc_ioc_state = "scsi data overrun";
1995 		break;
1996 	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
1997 		desc_ioc_state = "scsi data underrun";
1998 		break;
1999 	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2000 		desc_ioc_state = "scsi io data error";
2001 		break;
2002 	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2003 		desc_ioc_state = "scsi protocol error";
2004 		break;
2005 	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2006 		desc_ioc_state = "scsi task terminated";
2007 		break;
2008 	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2009 		desc_ioc_state = "scsi residual mismatch";
2010 		break;
2011 	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2012 		desc_ioc_state = "scsi task mgmt failed";
2013 		break;
2014 	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2015 		desc_ioc_state = "scsi ioc terminated";
2016 		break;
2017 	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2018 		desc_ioc_state = "scsi ext terminated";
2019 		break;
2020 	case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
2021 		desc_ioc_state = "eedp guard error";
2022 		break;
2023 	case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
2024 		desc_ioc_state = "eedp ref tag error";
2025 		break;
2026 	case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
2027 		desc_ioc_state = "eedp app tag error";
2028 		break;
2029 	default:
2030 		desc_ioc_state = "unknown";
2031 		break;
2032 	}
2033 
2034 	switch (scsi_status) {
2035 	case MPI2_SCSI_STATUS_GOOD:
2036 		desc_scsi_status = "good";
2037 		break;
2038 	case MPI2_SCSI_STATUS_CHECK_CONDITION:
2039 		desc_scsi_status = "check condition";
2040 		break;
2041 	case MPI2_SCSI_STATUS_CONDITION_MET:
2042 		desc_scsi_status = "condition met";
2043 		break;
2044 	case MPI2_SCSI_STATUS_BUSY:
2045 		desc_scsi_status = "busy";
2046 		break;
2047 	case MPI2_SCSI_STATUS_INTERMEDIATE:
2048 		desc_scsi_status = "intermediate";
2049 		break;
2050 	case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET:
2051 		desc_scsi_status = "intermediate condmet";
2052 		break;
2053 	case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
2054 		desc_scsi_status = "reservation conflict";
2055 		break;
2056 	case MPI2_SCSI_STATUS_COMMAND_TERMINATED:
2057 		desc_scsi_status = "command terminated";
2058 		break;
2059 	case MPI2_SCSI_STATUS_TASK_SET_FULL:
2060 		desc_scsi_status = "task set full";
2061 		break;
2062 	case MPI2_SCSI_STATUS_ACA_ACTIVE:
2063 		desc_scsi_status = "aca active";
2064 		break;
2065 	case MPI2_SCSI_STATUS_TASK_ABORTED:
2066 		desc_scsi_status = "task aborted";
2067 		break;
2068 	default:
2069 		desc_scsi_status = "unknown";
2070 		break;
2071 	}
2072 
2073 	desc_scsi_state[0] = '\0';
2074 	if (!scsi_state)
2075 		desc_scsi_state = " ";
2076 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
2077 		strcat(desc_scsi_state, "response info ");
2078 	if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
2079 		strcat(desc_scsi_state, "state terminated ");
2080 	if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS)
2081 		strcat(desc_scsi_state, "no status ");
2082 	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED)
2083 		strcat(desc_scsi_state, "autosense failed ");
2084 	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID)
2085 		strcat(desc_scsi_state, "autosense valid ");
2086 
2087 	mps_dprint(sc, MPS_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n",
2088 	    le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status);
2089 	/* We can add more detail about underflow data here
2090 	 * TO-DO
2091 	 * */
2092 	mps_dprint(sc, MPS_XINFO, "\tscsi_status(%s)(0x%02x), "
2093 	    "scsi_state(%s)(0x%02x)\n", desc_scsi_status, scsi_status,
2094 	    desc_scsi_state, scsi_state);
2095 
2096 	if (sc->mps_debug & MPS_XINFO &&
2097 		scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2098 		mps_dprint(sc, MPS_XINFO, "-> Sense Buffer Data : Start :\n");
2099 		scsi_sense_print(csio);
2100 		mps_dprint(sc, MPS_XINFO, "-> Sense Buffer Data : End :\n");
2101 	}
2102 
2103 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
2104 		response_info = le32toh(mpi_reply->ResponseInfo);
2105 		response_bytes = (u8 *)&response_info;
2106 		mps_response_code(sc,response_bytes[0]);
2107 	}
2108 }
2109 
2110 static void
2111 mpssas_scsiio_complete(struct mps_softc *sc, struct mps_command *cm)
2112 {
2113 	MPI2_SCSI_IO_REPLY *rep;
2114 	union ccb *ccb;
2115 	struct ccb_scsiio *csio;
2116 	struct mpssas_softc *sassc;
2117 	struct scsi_vpd_supported_page_list *vpd_list = NULL;
2118 	u8 *TLR_bits, TLR_on;
2119 	int dir = 0, i;
2120 	u16 alloc_len;
2121 	struct mpssas_target *target;
2122 	target_id_t target_id;
2123 
2124 	MPS_FUNCTRACE(sc);
2125 	mps_dprint(sc, MPS_TRACE,
2126 	    "cm %p SMID %u ccb %p reply %p outstanding %u\n", cm,
2127 	    cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
2128 	    cm->cm_targ->outstanding);
2129 
2130 	callout_stop(&cm->cm_callout);
2131 	mtx_assert(&sc->mps_mtx, MA_OWNED);
2132 
2133 	sassc = sc->sassc;
2134 	ccb = cm->cm_complete_data;
2135 	csio = &ccb->csio;
2136 	target_id = csio->ccb_h.target_id;
2137 	rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
2138 	/*
2139 	 * XXX KDM if the chain allocation fails, does it matter if we do
2140 	 * the sync and unload here?  It is simpler to do it in every case,
2141 	 * assuming it doesn't cause problems.
2142 	 */
2143 	if (cm->cm_data != NULL) {
2144 		if (cm->cm_flags & MPS_CM_FLAGS_DATAIN)
2145 			dir = BUS_DMASYNC_POSTREAD;
2146 		else if (cm->cm_flags & MPS_CM_FLAGS_DATAOUT)
2147 			dir = BUS_DMASYNC_POSTWRITE;
2148 		bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
2149 		bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2150 	}
2151 
2152 	cm->cm_targ->completed++;
2153 	cm->cm_targ->outstanding--;
2154 	TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
2155 	ccb->ccb_h.status &= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED);
2156 
2157 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
2158 	if (ccb->csio.bio != NULL)
2159 		biotrack(ccb->csio.bio, __func__);
2160 #endif
2161 
2162 	if (cm->cm_state == MPS_CM_STATE_TIMEDOUT) {
2163 		TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
2164 		if (cm->cm_reply != NULL)
2165 			mpssas_log_command(cm, MPS_RECOVERY,
2166 			    "completed timedout cm %p ccb %p during recovery "
2167 			    "ioc %x scsi %x state %x xfer %u\n",
2168 			    cm, cm->cm_ccb,
2169 			    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2170 			    le32toh(rep->TransferCount));
2171 		else
2172 			mpssas_log_command(cm, MPS_RECOVERY,
2173 			    "completed timedout cm %p ccb %p during recovery\n",
2174 			    cm, cm->cm_ccb);
2175 	} else if (cm->cm_targ->tm != NULL) {
2176 		if (cm->cm_reply != NULL)
2177 			mpssas_log_command(cm, MPS_RECOVERY,
2178 			    "completed cm %p ccb %p during recovery "
2179 			    "ioc %x scsi %x state %x xfer %u\n",
2180 			    cm, cm->cm_ccb,
2181 			    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2182 			    le32toh(rep->TransferCount));
2183 		else
2184 			mpssas_log_command(cm, MPS_RECOVERY,
2185 			    "completed cm %p ccb %p during recovery\n",
2186 			    cm, cm->cm_ccb);
2187 	} else if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
2188 		mpssas_log_command(cm, MPS_RECOVERY,
2189 		    "reset completed cm %p ccb %p\n",
2190 		    cm, cm->cm_ccb);
2191 	}
2192 
2193 	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2194 		/*
2195 		 * We ran into an error after we tried to map the command,
2196 		 * so we're getting a callback without queueing the command
2197 		 * to the hardware.  So we set the status here, and it will
2198 		 * be retained below.  We'll go through the "fast path",
2199 		 * because there can be no reply when we haven't actually
2200 		 * gone out to the hardware.
2201 		 */
2202 		mpssas_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
2203 
2204 		/*
2205 		 * Currently the only error included in the mask is
2206 		 * MPS_CM_FLAGS_CHAIN_FAILED, which means we're out of
2207 		 * chain frames.  We need to freeze the queue until we get
2208 		 * a command that completed without this error, which will
2209 		 * hopefully have some chain frames attached that we can
2210 		 * use.  If we wanted to get smarter about it, we would
2211 		 * only unfreeze the queue in this condition when we're
2212 		 * sure that we're getting some chain frames back.  That's
2213 		 * probably unnecessary.
2214 		 */
2215 		if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
2216 			xpt_freeze_simq(sassc->sim, 1);
2217 			sassc->flags |= MPSSAS_QUEUE_FROZEN;
2218 			mps_dprint(sc, MPS_XINFO, "Error sending command, "
2219 				   "freezing SIM queue\n");
2220 		}
2221 	}
2222 
2223 	/*
2224 	 * If this is a Start Stop Unit command and it was issued by the driver
2225 	 * during shutdown, decrement the refcount to account for all of the
2226 	 * commands that were sent.  All SSU commands should be completed before
2227 	 * shutdown completes, meaning SSU_refcount will be 0 after SSU_started
2228 	 * is TRUE.
2229 	 */
2230 	if (sc->SSU_started && (csio->cdb_io.cdb_bytes[0] == START_STOP_UNIT)) {
2231 		mps_dprint(sc, MPS_INFO, "Decrementing SSU count.\n");
2232 		sc->SSU_refcount--;
2233 	}
2234 
2235 	/* Take the fast path to completion */
2236 	if (cm->cm_reply == NULL) {
2237 		if (mpssas_get_ccbstatus(ccb) == CAM_REQ_INPROG) {
2238 			if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0)
2239 				mpssas_set_ccbstatus(ccb, CAM_SCSI_BUS_RESET);
2240 			else {
2241 				mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2242 				ccb->csio.scsi_status = SCSI_STATUS_OK;
2243 			}
2244 			if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2245 				ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2246 				sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2247 				mps_dprint(sc, MPS_XINFO,
2248 				    "Unfreezing SIM queue\n");
2249 			}
2250 		}
2251 
2252 		/*
2253 		 * There are two scenarios where the status won't be
2254 		 * CAM_REQ_CMP.  The first is if MPS_CM_FLAGS_ERROR_MASK is
2255 		 * set, the second is in the MPS_FLAGS_DIAGRESET above.
2256 		 */
2257 		if (mpssas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2258 			/*
2259 			 * Freeze the dev queue so that commands are
2260 			 * executed in the correct order after error
2261 			 * recovery.
2262 			 */
2263 			ccb->ccb_h.status |= CAM_DEV_QFRZN;
2264 			xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2265 		}
2266 		mps_free_command(sc, cm);
2267 		xpt_done(ccb);
2268 		return;
2269 	}
2270 
2271 	mpssas_log_command(cm, MPS_XINFO,
2272 	    "ioc %x scsi %x state %x xfer %u\n",
2273 	    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2274 	    le32toh(rep->TransferCount));
2275 
2276 	/*
2277 	 * If this is a Direct Drive I/O, reissue the I/O to the original IR
2278 	 * Volume if an error occurred (normal I/O retry).  Use the original
2279 	 * CCB, but set a flag that this will be a retry so that it's sent to
2280 	 * the original volume.  Free the command but reuse the CCB.
2281 	 */
2282 	if (cm->cm_flags & MPS_CM_FLAGS_DD_IO) {
2283 		mps_free_command(sc, cm);
2284 		ccb->ccb_h.sim_priv.entries[0].field = MPS_WD_RETRY;
2285 		mpssas_action_scsiio(sassc, ccb);
2286 		return;
2287 	} else
2288 		ccb->ccb_h.sim_priv.entries[0].field = 0;
2289 
2290 	switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) {
2291 	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2292 		csio->resid = cm->cm_length - le32toh(rep->TransferCount);
2293 		/* FALLTHROUGH */
2294 	case MPI2_IOCSTATUS_SUCCESS:
2295 	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2296 
2297 		if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
2298 		    MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
2299 			mpssas_log_command(cm, MPS_XINFO, "recovered error\n");
2300 
2301 		/* Completion failed at the transport level. */
2302 		if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
2303 		    MPI2_SCSI_STATE_TERMINATED)) {
2304 			mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2305 			break;
2306 		}
2307 
2308 		/* In a modern packetized environment, an autosense failure
2309 		 * implies that there's not much else that can be done to
2310 		 * recover the command.
2311 		 */
2312 		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
2313 			mpssas_set_ccbstatus(ccb, CAM_AUTOSENSE_FAIL);
2314 			break;
2315 		}
2316 
2317 		/*
2318 		 * CAM doesn't care about SAS Response Info data, but if this is
2319 		 * the state check if TLR should be done.  If not, clear the
2320 		 * TLR_bits for the target.
2321 		 */
2322 		if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
2323 		    ((le32toh(rep->ResponseInfo) &
2324 		    MPI2_SCSI_RI_MASK_REASONCODE) ==
2325 		    MPS_SCSI_RI_INVALID_FRAME)) {
2326 			sc->mapping_table[target_id].TLR_bits =
2327 			    (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2328 		}
2329 
2330 		/*
2331 		 * Intentionally override the normal SCSI status reporting
2332 		 * for these two cases.  These are likely to happen in a
2333 		 * multi-initiator environment, and we want to make sure that
2334 		 * CAM retries these commands rather than fail them.
2335 		 */
2336 		if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
2337 		    (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
2338 			mpssas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2339 			break;
2340 		}
2341 
2342 		/* Handle normal status and sense */
2343 		csio->scsi_status = rep->SCSIStatus;
2344 		if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
2345 			mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2346 		else
2347 			mpssas_set_ccbstatus(ccb, CAM_SCSI_STATUS_ERROR);
2348 
2349 		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2350 			int sense_len, returned_sense_len;
2351 
2352 			returned_sense_len = min(le32toh(rep->SenseCount),
2353 			    sizeof(struct scsi_sense_data));
2354 			if (returned_sense_len < ccb->csio.sense_len)
2355 				ccb->csio.sense_resid = ccb->csio.sense_len -
2356 					returned_sense_len;
2357 			else
2358 				ccb->csio.sense_resid = 0;
2359 
2360 			sense_len = min(returned_sense_len,
2361 			    ccb->csio.sense_len - ccb->csio.sense_resid);
2362 			bzero(&ccb->csio.sense_data,
2363 			      sizeof(ccb->csio.sense_data));
2364 			bcopy(cm->cm_sense, &ccb->csio.sense_data, sense_len);
2365 			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2366 		}
2367 
2368 		/*
2369 		 * Check if this is an INQUIRY command.  If it's a VPD inquiry,
2370 		 * and it's page code 0 (Supported Page List), and there is
2371 		 * inquiry data, and this is for a sequential access device, and
2372 		 * the device is an SSP target, and TLR is supported by the
2373 		 * controller, turn the TLR_bits value ON if page 0x90 is
2374 		 * supported.
2375 		 */
2376 		if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
2377 		    (csio->cdb_io.cdb_bytes[1] & SI_EVPD) &&
2378 		    (csio->cdb_io.cdb_bytes[2] == SVPD_SUPPORTED_PAGE_LIST) &&
2379 		    ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) &&
2380 		    (csio->data_ptr != NULL) &&
2381 		    ((csio->data_ptr[0] & 0x1f) == T_SEQUENTIAL) &&
2382 		    (sc->control_TLR) &&
2383 		    (sc->mapping_table[target_id].device_info &
2384 		    MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
2385 			vpd_list = (struct scsi_vpd_supported_page_list *)
2386 			    csio->data_ptr;
2387 			TLR_bits = &sc->mapping_table[target_id].TLR_bits;
2388 			*TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2389 			TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
2390 			alloc_len = ((u16)csio->cdb_io.cdb_bytes[3] << 8) +
2391 			    csio->cdb_io.cdb_bytes[4];
2392 			alloc_len -= csio->resid;
2393 			for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
2394 				if (vpd_list->list[i] == 0x90) {
2395 					*TLR_bits = TLR_on;
2396 					break;
2397 				}
2398 			}
2399 		}
2400 
2401 		/*
2402 		 * If this is a SATA direct-access end device, mark it so that
2403 		 * a SCSI StartStopUnit command will be sent to it when the
2404 		 * driver is being shutdown.
2405 		 */
2406 		if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
2407 		    ((csio->data_ptr[0] & 0x1f) == T_DIRECT) &&
2408 		    (sc->mapping_table[target_id].device_info &
2409 		    MPI2_SAS_DEVICE_INFO_SATA_DEVICE) &&
2410 		    ((sc->mapping_table[target_id].device_info &
2411 		    MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) ==
2412 		    MPI2_SAS_DEVICE_INFO_END_DEVICE)) {
2413 			target = &sassc->targets[target_id];
2414 			target->supports_SSU = TRUE;
2415 			mps_dprint(sc, MPS_XINFO, "Target %d supports SSU\n",
2416 			    target_id);
2417 		}
2418 		break;
2419 	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2420 	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2421 		/*
2422 		 * If devinfo is 0 this will be a volume.  In that case don't
2423 		 * tell CAM that the volume is not there.  We want volumes to
2424 		 * be enumerated until they are deleted/removed, not just
2425 		 * failed.
2426 		 */
2427 		if (cm->cm_targ->devinfo == 0)
2428 			mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2429 		else
2430 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2431 		break;
2432 	case MPI2_IOCSTATUS_INVALID_SGL:
2433 		mps_print_scsiio_cmd(sc, cm);
2434 		mpssas_set_ccbstatus(ccb, CAM_UNREC_HBA_ERROR);
2435 		break;
2436 	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2437 		/*
2438 		 * This is one of the responses that comes back when an I/O
2439 		 * has been aborted.  If it is because of a timeout that we
2440 		 * initiated, just set the status to CAM_CMD_TIMEOUT.
2441 		 * Otherwise set it to CAM_REQ_ABORTED.  The effect on the
2442 		 * command is the same (it gets retried, subject to the
2443 		 * retry counter), the only difference is what gets printed
2444 		 * on the console.
2445 		 */
2446 		if (cm->cm_state == MPS_CM_STATE_TIMEDOUT)
2447 			mpssas_set_ccbstatus(ccb, CAM_CMD_TIMEOUT);
2448 		else
2449 			mpssas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2450 		break;
2451 	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2452 		/* resid is ignored for this condition */
2453 		csio->resid = 0;
2454 		mpssas_set_ccbstatus(ccb, CAM_DATA_RUN_ERR);
2455 		break;
2456 	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2457 	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2458 		/*
2459 		 * These can sometimes be transient transport-related
2460 		 * errors, and sometimes persistent drive-related errors.
2461 		 * We used to retry these without decrementing the retry
2462 		 * count by returning CAM_REQUEUE_REQ.  Unfortunately, if
2463 		 * we hit a persistent drive problem that returns one of
2464 		 * these error codes, we would retry indefinitely.  So,
2465 		 * return CAM_REQ_CMP_ERROR so that we decrement the retry
2466 		 * count and avoid infinite retries.  We're taking the
2467 		 * potential risk of flagging false failures in the event
2468 		 * of a topology-related error (e.g. a SAS expander problem
2469 		 * causes a command addressed to a drive to fail), but
2470 		 * avoiding getting into an infinite retry loop.
2471 		 */
2472 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2473 		mpssas_log_command(cm, MPS_INFO,
2474 		    "terminated ioc %x loginfo %x scsi %x state %x xfer %u\n",
2475 		    le16toh(rep->IOCStatus), le32toh(rep->IOCLogInfo),
2476 		    rep->SCSIStatus, rep->SCSIState,
2477 		    le32toh(rep->TransferCount));
2478 		break;
2479 	case MPI2_IOCSTATUS_INVALID_FUNCTION:
2480 	case MPI2_IOCSTATUS_INTERNAL_ERROR:
2481 	case MPI2_IOCSTATUS_INVALID_VPID:
2482 	case MPI2_IOCSTATUS_INVALID_FIELD:
2483 	case MPI2_IOCSTATUS_INVALID_STATE:
2484 	case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2485 	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2486 	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2487 	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2488 	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2489 	default:
2490 		mpssas_log_command(cm, MPS_XINFO,
2491 		    "completed ioc %x loginfo %x scsi %x state %x xfer %u\n",
2492 		    le16toh(rep->IOCStatus), le32toh(rep->IOCLogInfo),
2493 		    rep->SCSIStatus, rep->SCSIState,
2494 		    le32toh(rep->TransferCount));
2495 		csio->resid = cm->cm_length;
2496 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2497 		break;
2498 	}
2499 
2500 	mps_sc_failed_io_info(sc,csio,rep);
2501 
2502 	if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2503 		ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2504 		sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2505 		mps_dprint(sc, MPS_XINFO, "Command completed, "
2506 		    "unfreezing SIM queue\n");
2507 	}
2508 
2509 	if (mpssas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2510 		ccb->ccb_h.status |= CAM_DEV_QFRZN;
2511 		xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2512 	}
2513 
2514 	mps_free_command(sc, cm);
2515 	xpt_done(ccb);
2516 }
2517 
2518 /* All Request reached here are Endian safe */
2519 static void
2520 mpssas_direct_drive_io(struct mpssas_softc *sassc, struct mps_command *cm,
2521     union ccb *ccb) {
2522 	pMpi2SCSIIORequest_t	pIO_req;
2523 	struct mps_softc	*sc = sassc->sc;
2524 	uint64_t		virtLBA;
2525 	uint32_t		physLBA, stripe_offset, stripe_unit;
2526 	uint32_t		io_size, column;
2527 	uint8_t			*ptrLBA, lba_idx, physLBA_byte, *CDB;
2528 
2529 	/*
2530 	 * If this is a valid SCSI command (Read6, Read10, Read16, Write6,
2531 	 * Write10, or Write16), build a direct I/O message.  Otherwise, the I/O
2532 	 * will be sent to the IR volume itself.  Since Read6 and Write6 are a
2533 	 * bit different than the 10/16 CDBs, handle them separately.
2534 	 */
2535 	pIO_req = (pMpi2SCSIIORequest_t)cm->cm_req;
2536 	CDB = pIO_req->CDB.CDB32;
2537 
2538 	/*
2539 	 * Handle 6 byte CDBs.
2540 	 */
2541 	if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_6) ||
2542 	    (CDB[0] == WRITE_6))) {
2543 		/*
2544 		 * Get the transfer size in blocks.
2545 		 */
2546 		io_size = (cm->cm_length >> sc->DD_block_exponent);
2547 
2548 		/*
2549 		 * Get virtual LBA given in the CDB.
2550 		 */
2551 		virtLBA = ((uint64_t)(CDB[1] & 0x1F) << 16) |
2552 		    ((uint64_t)CDB[2] << 8) | (uint64_t)CDB[3];
2553 
2554 		/*
2555 		 * Check that LBA range for I/O does not exceed volume's
2556 		 * MaxLBA.
2557 		 */
2558 		if ((virtLBA + (uint64_t)io_size - 1) <=
2559 		    sc->DD_max_lba) {
2560 			/*
2561 			 * Check if the I/O crosses a stripe boundary.  If not,
2562 			 * translate the virtual LBA to a physical LBA and set
2563 			 * the DevHandle for the PhysDisk to be used.  If it
2564 			 * does cross a boundary, do normal I/O.  To get the
2565 			 * right DevHandle to use, get the map number for the
2566 			 * column, then use that map number to look up the
2567 			 * DevHandle of the PhysDisk.
2568 			 */
2569 			stripe_offset = (uint32_t)virtLBA &
2570 			    (sc->DD_stripe_size - 1);
2571 			if ((stripe_offset + io_size) <= sc->DD_stripe_size) {
2572 				physLBA = (uint32_t)virtLBA >>
2573 				    sc->DD_stripe_exponent;
2574 				stripe_unit = physLBA / sc->DD_num_phys_disks;
2575 				column = physLBA % sc->DD_num_phys_disks;
2576 				pIO_req->DevHandle =
2577 				    htole16(sc->DD_column_map[column].dev_handle);
2578 				/* ???? Is this endian safe*/
2579 				cm->cm_desc.SCSIIO.DevHandle =
2580 				    pIO_req->DevHandle;
2581 
2582 				physLBA = (stripe_unit <<
2583 				    sc->DD_stripe_exponent) + stripe_offset;
2584 				ptrLBA = &pIO_req->CDB.CDB32[1];
2585 				physLBA_byte = (uint8_t)(physLBA >> 16);
2586 				*ptrLBA = physLBA_byte;
2587 				ptrLBA = &pIO_req->CDB.CDB32[2];
2588 				physLBA_byte = (uint8_t)(physLBA >> 8);
2589 				*ptrLBA = physLBA_byte;
2590 				ptrLBA = &pIO_req->CDB.CDB32[3];
2591 				physLBA_byte = (uint8_t)physLBA;
2592 				*ptrLBA = physLBA_byte;
2593 
2594 				/*
2595 				 * Set flag that Direct Drive I/O is
2596 				 * being done.
2597 				 */
2598 				cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2599 			}
2600 		}
2601 		return;
2602 	}
2603 
2604 	/*
2605 	 * Handle 10, 12 or 16 byte CDBs.
2606 	 */
2607 	if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_10) ||
2608 	    (CDB[0] == WRITE_10) || (CDB[0] == READ_16) ||
2609 	    (CDB[0] == WRITE_16) || (CDB[0] == READ_12) ||
2610 	    (CDB[0] == WRITE_12))) {
2611 		/*
2612 		 * For 16-byte CDB's, verify that the upper 4 bytes of the CDB
2613 		 * are 0.  If not, this is accessing beyond 2TB so handle it in
2614 		 * the else section.  10-byte and 12-byte CDB's are OK.
2615 		 * FreeBSD sends very rare 12 byte READ/WRITE, but driver is
2616 		 * ready to accept 12byte CDB for Direct IOs.
2617 		 */
2618 		if ((CDB[0] == READ_10 || CDB[0] == WRITE_10) ||
2619 		    (CDB[0] == READ_12 || CDB[0] == WRITE_12) ||
2620 		    !(CDB[2] | CDB[3] | CDB[4] | CDB[5])) {
2621 			/*
2622 			 * Get the transfer size in blocks.
2623 			 */
2624 			io_size = (cm->cm_length >> sc->DD_block_exponent);
2625 
2626 			/*
2627 			 * Get virtual LBA.  Point to correct lower 4 bytes of
2628 			 * LBA in the CDB depending on command.
2629 			 */
2630 			lba_idx = ((CDB[0] == READ_12) ||
2631 				(CDB[0] == WRITE_12) ||
2632 				(CDB[0] == READ_10) ||
2633 				(CDB[0] == WRITE_10))? 2 : 6;
2634 			virtLBA = ((uint64_t)CDB[lba_idx] << 24) |
2635 			    ((uint64_t)CDB[lba_idx + 1] << 16) |
2636 			    ((uint64_t)CDB[lba_idx + 2] << 8) |
2637 			    (uint64_t)CDB[lba_idx + 3];
2638 
2639 			/*
2640 			 * Check that LBA range for I/O does not exceed volume's
2641 			 * MaxLBA.
2642 			 */
2643 			if ((virtLBA + (uint64_t)io_size - 1) <=
2644 			    sc->DD_max_lba) {
2645 				/*
2646 				 * Check if the I/O crosses a stripe boundary.
2647 				 * If not, translate the virtual LBA to a
2648 				 * physical LBA and set the DevHandle for the
2649 				 * PhysDisk to be used.  If it does cross a
2650 				 * boundary, do normal I/O.  To get the right
2651 				 * DevHandle to use, get the map number for the
2652 				 * column, then use that map number to look up
2653 				 * the DevHandle of the PhysDisk.
2654 				 */
2655 				stripe_offset = (uint32_t)virtLBA &
2656 				    (sc->DD_stripe_size - 1);
2657 				if ((stripe_offset + io_size) <=
2658 				    sc->DD_stripe_size) {
2659 					physLBA = (uint32_t)virtLBA >>
2660 					    sc->DD_stripe_exponent;
2661 					stripe_unit = physLBA /
2662 					    sc->DD_num_phys_disks;
2663 					column = physLBA %
2664 					    sc->DD_num_phys_disks;
2665 					pIO_req->DevHandle =
2666 					    htole16(sc->DD_column_map[column].
2667 					    dev_handle);
2668 					cm->cm_desc.SCSIIO.DevHandle =
2669 					    pIO_req->DevHandle;
2670 
2671 					physLBA = (stripe_unit <<
2672 					    sc->DD_stripe_exponent) +
2673 					    stripe_offset;
2674 					ptrLBA =
2675 					    &pIO_req->CDB.CDB32[lba_idx];
2676 					physLBA_byte = (uint8_t)(physLBA >> 24);
2677 					*ptrLBA = physLBA_byte;
2678 					ptrLBA =
2679 					    &pIO_req->CDB.CDB32[lba_idx + 1];
2680 					physLBA_byte = (uint8_t)(physLBA >> 16);
2681 					*ptrLBA = physLBA_byte;
2682 					ptrLBA =
2683 					    &pIO_req->CDB.CDB32[lba_idx + 2];
2684 					physLBA_byte = (uint8_t)(physLBA >> 8);
2685 					*ptrLBA = physLBA_byte;
2686 					ptrLBA =
2687 					    &pIO_req->CDB.CDB32[lba_idx + 3];
2688 					physLBA_byte = (uint8_t)physLBA;
2689 					*ptrLBA = physLBA_byte;
2690 
2691 					/*
2692 					 * Set flag that Direct Drive I/O is
2693 					 * being done.
2694 					 */
2695 					cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2696 				}
2697 			}
2698 		} else {
2699 			/*
2700 			 * 16-byte CDB and the upper 4 bytes of the CDB are not
2701 			 * 0.  Get the transfer size in blocks.
2702 			 */
2703 			io_size = (cm->cm_length >> sc->DD_block_exponent);
2704 
2705 			/*
2706 			 * Get virtual LBA.
2707 			 */
2708 			virtLBA = ((uint64_t)CDB[2] << 54) |
2709 			    ((uint64_t)CDB[3] << 48) |
2710 			    ((uint64_t)CDB[4] << 40) |
2711 			    ((uint64_t)CDB[5] << 32) |
2712 			    ((uint64_t)CDB[6] << 24) |
2713 			    ((uint64_t)CDB[7] << 16) |
2714 			    ((uint64_t)CDB[8] << 8) |
2715 			    (uint64_t)CDB[9];
2716 
2717 			/*
2718 			 * Check that LBA range for I/O does not exceed volume's
2719 			 * MaxLBA.
2720 			 */
2721 			if ((virtLBA + (uint64_t)io_size - 1) <=
2722 			    sc->DD_max_lba) {
2723 				/*
2724 				 * Check if the I/O crosses a stripe boundary.
2725 				 * If not, translate the virtual LBA to a
2726 				 * physical LBA and set the DevHandle for the
2727 				 * PhysDisk to be used.  If it does cross a
2728 				 * boundary, do normal I/O.  To get the right
2729 				 * DevHandle to use, get the map number for the
2730 				 * column, then use that map number to look up
2731 				 * the DevHandle of the PhysDisk.
2732 				 */
2733 				stripe_offset = (uint32_t)virtLBA &
2734 				    (sc->DD_stripe_size - 1);
2735 				if ((stripe_offset + io_size) <=
2736 				    sc->DD_stripe_size) {
2737 					physLBA = (uint32_t)(virtLBA >>
2738 					    sc->DD_stripe_exponent);
2739 					stripe_unit = physLBA /
2740 					    sc->DD_num_phys_disks;
2741 					column = physLBA %
2742 					    sc->DD_num_phys_disks;
2743 					pIO_req->DevHandle =
2744 					    htole16(sc->DD_column_map[column].
2745 					    dev_handle);
2746 					cm->cm_desc.SCSIIO.DevHandle =
2747 					    pIO_req->DevHandle;
2748 
2749 					physLBA = (stripe_unit <<
2750 					    sc->DD_stripe_exponent) +
2751 					    stripe_offset;
2752 
2753 					/*
2754 					 * Set upper 4 bytes of LBA to 0.  We
2755 					 * assume that the phys disks are less
2756 					 * than 2 TB's in size.  Then, set the
2757 					 * lower 4 bytes.
2758 					 */
2759 					pIO_req->CDB.CDB32[2] = 0;
2760 					pIO_req->CDB.CDB32[3] = 0;
2761 					pIO_req->CDB.CDB32[4] = 0;
2762 					pIO_req->CDB.CDB32[5] = 0;
2763 					ptrLBA = &pIO_req->CDB.CDB32[6];
2764 					physLBA_byte = (uint8_t)(physLBA >> 24);
2765 					*ptrLBA = physLBA_byte;
2766 					ptrLBA = &pIO_req->CDB.CDB32[7];
2767 					physLBA_byte = (uint8_t)(physLBA >> 16);
2768 					*ptrLBA = physLBA_byte;
2769 					ptrLBA = &pIO_req->CDB.CDB32[8];
2770 					physLBA_byte = (uint8_t)(physLBA >> 8);
2771 					*ptrLBA = physLBA_byte;
2772 					ptrLBA = &pIO_req->CDB.CDB32[9];
2773 					physLBA_byte = (uint8_t)physLBA;
2774 					*ptrLBA = physLBA_byte;
2775 
2776 					/*
2777 					 * Set flag that Direct Drive I/O is
2778 					 * being done.
2779 					 */
2780 					cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2781 				}
2782 			}
2783 		}
2784 	}
2785 }
2786 
2787 #if __FreeBSD_version >= 900026
2788 static void
2789 mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm)
2790 {
2791 	MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2792 	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2793 	uint64_t sasaddr;
2794 	union ccb *ccb;
2795 
2796 	ccb = cm->cm_complete_data;
2797 
2798 	/*
2799 	 * Currently there should be no way we can hit this case.  It only
2800 	 * happens when we have a failure to allocate chain frames, and SMP
2801 	 * commands require two S/G elements only.  That should be handled
2802 	 * in the standard request size.
2803 	 */
2804 	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2805 		mps_dprint(sc, MPS_ERROR,"%s: cm_flags = %#x on SMP request!\n",
2806 			   __func__, cm->cm_flags);
2807 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2808 		goto bailout;
2809         }
2810 
2811 	rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
2812 	if (rpl == NULL) {
2813 		mps_dprint(sc, MPS_ERROR, "%s: NULL cm_reply!\n", __func__);
2814 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2815 		goto bailout;
2816 	}
2817 
2818 	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2819 	sasaddr = le32toh(req->SASAddress.Low);
2820 	sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
2821 
2822 	if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
2823 	    MPI2_IOCSTATUS_SUCCESS ||
2824 	    rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
2825 		mps_dprint(sc, MPS_XINFO, "%s: IOCStatus %04x SASStatus %02x\n",
2826 		    __func__, le16toh(rpl->IOCStatus), rpl->SASStatus);
2827 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2828 		goto bailout;
2829 	}
2830 
2831 	mps_dprint(sc, MPS_XINFO, "%s: SMP request to SAS address "
2832 		   "%#jx completed successfully\n", __func__,
2833 		   (uintmax_t)sasaddr);
2834 
2835 	if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
2836 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2837 	else
2838 		mpssas_set_ccbstatus(ccb, CAM_SMP_STATUS_ERROR);
2839 
2840 bailout:
2841 	/*
2842 	 * We sync in both directions because we had DMAs in the S/G list
2843 	 * in both directions.
2844 	 */
2845 	bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
2846 			BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2847 	bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2848 	mps_free_command(sc, cm);
2849 	xpt_done(ccb);
2850 }
2851 
2852 static void
2853 mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb, uint64_t sasaddr)
2854 {
2855 	struct mps_command *cm;
2856 	uint8_t *request, *response;
2857 	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2858 	struct mps_softc *sc;
2859 	int error;
2860 
2861 	sc = sassc->sc;
2862 	error = 0;
2863 
2864 	/*
2865 	 * XXX We don't yet support physical addresses here.
2866 	 */
2867 	switch ((ccb->ccb_h.flags & CAM_DATA_MASK)) {
2868 	case CAM_DATA_PADDR:
2869 	case CAM_DATA_SG_PADDR:
2870 		mps_dprint(sc, MPS_ERROR,
2871 			   "%s: physical addresses not supported\n", __func__);
2872 		mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2873 		xpt_done(ccb);
2874 		return;
2875 	case CAM_DATA_SG:
2876 		/*
2877 		 * The chip does not support more than one buffer for the
2878 		 * request or response.
2879 		 */
2880 	 	if ((ccb->smpio.smp_request_sglist_cnt > 1)
2881 		  || (ccb->smpio.smp_response_sglist_cnt > 1)) {
2882 			mps_dprint(sc, MPS_ERROR,
2883 				   "%s: multiple request or response "
2884 				   "buffer segments not supported for SMP\n",
2885 				   __func__);
2886 			mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2887 			xpt_done(ccb);
2888 			return;
2889 		}
2890 
2891 		/*
2892 		 * The CAM_SCATTER_VALID flag was originally implemented
2893 		 * for the XPT_SCSI_IO CCB, which only has one data pointer.
2894 		 * We have two.  So, just take that flag to mean that we
2895 		 * might have S/G lists, and look at the S/G segment count
2896 		 * to figure out whether that is the case for each individual
2897 		 * buffer.
2898 		 */
2899 		if (ccb->smpio.smp_request_sglist_cnt != 0) {
2900 			bus_dma_segment_t *req_sg;
2901 
2902 			req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
2903 			request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
2904 		} else
2905 			request = ccb->smpio.smp_request;
2906 
2907 		if (ccb->smpio.smp_response_sglist_cnt != 0) {
2908 			bus_dma_segment_t *rsp_sg;
2909 
2910 			rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
2911 			response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
2912 		} else
2913 			response = ccb->smpio.smp_response;
2914 		break;
2915 	case CAM_DATA_VADDR:
2916 		request = ccb->smpio.smp_request;
2917 		response = ccb->smpio.smp_response;
2918 		break;
2919 	default:
2920 		mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2921 		xpt_done(ccb);
2922 		return;
2923 	}
2924 
2925 	cm = mps_alloc_command(sc);
2926 	if (cm == NULL) {
2927 		mps_dprint(sc, MPS_ERROR,
2928 		    "%s: cannot allocate command\n", __func__);
2929 		mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
2930 		xpt_done(ccb);
2931 		return;
2932 	}
2933 
2934 	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2935 	bzero(req, sizeof(*req));
2936 	req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
2937 
2938 	/* Allow the chip to use any route to this SAS address. */
2939 	req->PhysicalPort = 0xff;
2940 
2941 	req->RequestDataLength = htole16(ccb->smpio.smp_request_len);
2942 	req->SGLFlags =
2943 	    MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
2944 
2945 	mps_dprint(sc, MPS_XINFO, "%s: sending SMP request to SAS "
2946 	    "address %#jx\n", __func__, (uintmax_t)sasaddr);
2947 
2948 	mpi_init_sge(cm, req, &req->SGL);
2949 
2950 	/*
2951 	 * Set up a uio to pass into mps_map_command().  This allows us to
2952 	 * do one map command, and one busdma call in there.
2953 	 */
2954 	cm->cm_uio.uio_iov = cm->cm_iovec;
2955 	cm->cm_uio.uio_iovcnt = 2;
2956 	cm->cm_uio.uio_segflg = UIO_SYSSPACE;
2957 
2958 	/*
2959 	 * The read/write flag isn't used by busdma, but set it just in
2960 	 * case.  This isn't exactly accurate, either, since we're going in
2961 	 * both directions.
2962 	 */
2963 	cm->cm_uio.uio_rw = UIO_WRITE;
2964 
2965 	cm->cm_iovec[0].iov_base = request;
2966 	cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength);
2967 	cm->cm_iovec[1].iov_base = response;
2968 	cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
2969 
2970 	cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
2971 			       cm->cm_iovec[1].iov_len;
2972 
2973 	/*
2974 	 * Trigger a warning message in mps_data_cb() for the user if we
2975 	 * wind up exceeding two S/G segments.  The chip expects one
2976 	 * segment for the request and another for the response.
2977 	 */
2978 	cm->cm_max_segs = 2;
2979 
2980 	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
2981 	cm->cm_complete = mpssas_smpio_complete;
2982 	cm->cm_complete_data = ccb;
2983 
2984 	/*
2985 	 * Tell the mapping code that we're using a uio, and that this is
2986 	 * an SMP passthrough request.  There is a little special-case
2987 	 * logic there (in mps_data_cb()) to handle the bidirectional
2988 	 * transfer.
2989 	 */
2990 	cm->cm_flags |= MPS_CM_FLAGS_USE_UIO | MPS_CM_FLAGS_SMP_PASS |
2991 			MPS_CM_FLAGS_DATAIN | MPS_CM_FLAGS_DATAOUT;
2992 
2993 	/* The chip data format is little endian. */
2994 	req->SASAddress.High = htole32(sasaddr >> 32);
2995 	req->SASAddress.Low = htole32(sasaddr);
2996 
2997 	/*
2998 	 * XXX Note that we don't have a timeout/abort mechanism here.
2999 	 * From the manual, it looks like task management requests only
3000 	 * work for SCSI IO and SATA passthrough requests.  We may need to
3001 	 * have a mechanism to retry requests in the event of a chip reset
3002 	 * at least.  Hopefully the chip will insure that any errors short
3003 	 * of that are relayed back to the driver.
3004 	 */
3005 	error = mps_map_command(sc, cm);
3006 	if ((error != 0) && (error != EINPROGRESS)) {
3007 		mps_dprint(sc, MPS_ERROR,
3008 			   "%s: error %d returned from mps_map_command()\n",
3009 			   __func__, error);
3010 		goto bailout_error;
3011 	}
3012 
3013 	return;
3014 
3015 bailout_error:
3016 	mps_free_command(sc, cm);
3017 	mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3018 	xpt_done(ccb);
3019 	return;
3020 
3021 }
3022 
3023 static void
3024 mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb)
3025 {
3026 	struct mps_softc *sc;
3027 	struct mpssas_target *targ;
3028 	uint64_t sasaddr = 0;
3029 
3030 	sc = sassc->sc;
3031 
3032 	/*
3033 	 * Make sure the target exists.
3034 	 */
3035 	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
3036 	    ("Target %d out of bounds in XPT_SMP_IO\n", ccb->ccb_h.target_id));
3037 	targ = &sassc->targets[ccb->ccb_h.target_id];
3038 	if (targ->handle == 0x0) {
3039 		mps_dprint(sc, MPS_ERROR,
3040 			   "%s: target %d does not exist!\n", __func__,
3041 			   ccb->ccb_h.target_id);
3042 		mpssas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
3043 		xpt_done(ccb);
3044 		return;
3045 	}
3046 
3047 	/*
3048 	 * If this device has an embedded SMP target, we'll talk to it
3049 	 * directly.
3050 	 * figure out what the expander's address is.
3051 	 */
3052 	if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
3053 		sasaddr = targ->sasaddr;
3054 
3055 	/*
3056 	 * If we don't have a SAS address for the expander yet, try
3057 	 * grabbing it from the page 0x83 information cached in the
3058 	 * transport layer for this target.  LSI expanders report the
3059 	 * expander SAS address as the port-associated SAS address in
3060 	 * Inquiry VPD page 0x83.  Maxim expanders don't report it in page
3061 	 * 0x83.
3062 	 *
3063 	 * XXX KDM disable this for now, but leave it commented out so that
3064 	 * it is obvious that this is another possible way to get the SAS
3065 	 * address.
3066 	 *
3067 	 * The parent handle method below is a little more reliable, and
3068 	 * the other benefit is that it works for devices other than SES
3069 	 * devices.  So you can send a SMP request to a da(4) device and it
3070 	 * will get routed to the expander that device is attached to.
3071 	 * (Assuming the da(4) device doesn't contain an SMP target...)
3072 	 */
3073 #if 0
3074 	if (sasaddr == 0)
3075 		sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
3076 #endif
3077 
3078 	/*
3079 	 * If we still don't have a SAS address for the expander, look for
3080 	 * the parent device of this device, which is probably the expander.
3081 	 */
3082 	if (sasaddr == 0) {
3083 #ifdef OLD_MPS_PROBE
3084 		struct mpssas_target *parent_target;
3085 #endif
3086 
3087 		if (targ->parent_handle == 0x0) {
3088 			mps_dprint(sc, MPS_ERROR,
3089 				   "%s: handle %d does not have a valid "
3090 				   "parent handle!\n", __func__, targ->handle);
3091 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3092 			goto bailout;
3093 		}
3094 #ifdef OLD_MPS_PROBE
3095 		parent_target = mpssas_find_target_by_handle(sassc, 0,
3096 			targ->parent_handle);
3097 
3098 		if (parent_target == NULL) {
3099 			mps_dprint(sc, MPS_ERROR,
3100 				   "%s: handle %d does not have a valid "
3101 				   "parent target!\n", __func__, targ->handle);
3102 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3103 			goto bailout;
3104 		}
3105 
3106 		if ((parent_target->devinfo &
3107 		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3108 			mps_dprint(sc, MPS_ERROR,
3109 				   "%s: handle %d parent %d does not "
3110 				   "have an SMP target!\n", __func__,
3111 				   targ->handle, parent_target->handle);
3112 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3113 			goto bailout;
3114 
3115 		}
3116 
3117 		sasaddr = parent_target->sasaddr;
3118 #else /* OLD_MPS_PROBE */
3119 		if ((targ->parent_devinfo &
3120 		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3121 			mps_dprint(sc, MPS_ERROR,
3122 				   "%s: handle %d parent %d does not "
3123 				   "have an SMP target!\n", __func__,
3124 				   targ->handle, targ->parent_handle);
3125 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3126 			goto bailout;
3127 
3128 		}
3129 		if (targ->parent_sasaddr == 0x0) {
3130 			mps_dprint(sc, MPS_ERROR,
3131 				   "%s: handle %d parent handle %d does "
3132 				   "not have a valid SAS address!\n",
3133 				   __func__, targ->handle, targ->parent_handle);
3134 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3135 			goto bailout;
3136 		}
3137 
3138 		sasaddr = targ->parent_sasaddr;
3139 #endif /* OLD_MPS_PROBE */
3140 
3141 	}
3142 
3143 	if (sasaddr == 0) {
3144 		mps_dprint(sc, MPS_INFO,
3145 			   "%s: unable to find SAS address for handle %d\n",
3146 			   __func__, targ->handle);
3147 		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3148 		goto bailout;
3149 	}
3150 	mpssas_send_smpcmd(sassc, ccb, sasaddr);
3151 
3152 	return;
3153 
3154 bailout:
3155 	xpt_done(ccb);
3156 
3157 }
3158 #endif //__FreeBSD_version >= 900026
3159 
3160 static void
3161 mpssas_action_resetdev(struct mpssas_softc *sassc, union ccb *ccb)
3162 {
3163 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3164 	struct mps_softc *sc;
3165 	struct mps_command *tm;
3166 	struct mpssas_target *targ;
3167 
3168 	MPS_FUNCTRACE(sassc->sc);
3169 	mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
3170 
3171 	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
3172 	    ("Target %d out of bounds in XPT_RESET_DEV\n",
3173 	     ccb->ccb_h.target_id));
3174 	sc = sassc->sc;
3175 	tm = mps_alloc_command(sc);
3176 	if (tm == NULL) {
3177 		mps_dprint(sc, MPS_ERROR,
3178 		    "command alloc failure in mpssas_action_resetdev\n");
3179 		mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3180 		xpt_done(ccb);
3181 		return;
3182 	}
3183 
3184 	targ = &sassc->targets[ccb->ccb_h.target_id];
3185 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3186 	req->DevHandle = htole16(targ->handle);
3187 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
3188 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3189 
3190 	/* SAS Hard Link Reset / SATA Link Reset */
3191 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3192 
3193 	tm->cm_data = NULL;
3194 	tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
3195 	tm->cm_complete = mpssas_resetdev_complete;
3196 	tm->cm_complete_data = ccb;
3197 	tm->cm_targ = targ;
3198 	targ->flags |= MPSSAS_TARGET_INRESET;
3199 
3200 	mps_map_command(sc, tm);
3201 }
3202 
3203 static void
3204 mpssas_resetdev_complete(struct mps_softc *sc, struct mps_command *tm)
3205 {
3206 	MPI2_SCSI_TASK_MANAGE_REPLY *resp;
3207 	union ccb *ccb;
3208 
3209 	MPS_FUNCTRACE(sc);
3210 	mtx_assert(&sc->mps_mtx, MA_OWNED);
3211 
3212 	resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
3213 	ccb = tm->cm_complete_data;
3214 
3215 	/*
3216 	 * Currently there should be no way we can hit this case.  It only
3217 	 * happens when we have a failure to allocate chain frames, and
3218 	 * task management commands don't have S/G lists.
3219 	 */
3220 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3221 		MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3222 
3223 		req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3224 
3225 		mps_dprint(sc, MPS_ERROR,
3226 			   "%s: cm_flags = %#x for reset of handle %#04x! "
3227 			   "This should not happen!\n", __func__, tm->cm_flags,
3228 			   req->DevHandle);
3229 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3230 		goto bailout;
3231 	}
3232 
3233 	mps_dprint(sc, MPS_XINFO,
3234 	    "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n", __func__,
3235 	    le16toh(resp->IOCStatus), le32toh(resp->ResponseCode));
3236 
3237 	if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
3238 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
3239 		mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
3240 		    CAM_LUN_WILDCARD);
3241 	}
3242 	else
3243 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3244 
3245 bailout:
3246 
3247 	mpssas_free_tm(sc, tm);
3248 	xpt_done(ccb);
3249 }
3250 
3251 static void
3252 mpssas_poll(struct cam_sim *sim)
3253 {
3254 	struct mpssas_softc *sassc;
3255 
3256 	sassc = cam_sim_softc(sim);
3257 
3258 	if (sassc->sc->mps_debug & MPS_TRACE) {
3259 		/* frequent debug messages during a panic just slow
3260 		 * everything down too much.
3261 		 */
3262 		mps_printf(sassc->sc, "%s clearing MPS_TRACE\n", __func__);
3263 		sassc->sc->mps_debug &= ~MPS_TRACE;
3264 	}
3265 
3266 	mps_intr_locked(sassc->sc);
3267 }
3268 
3269 static void
3270 mpssas_async(void *callback_arg, uint32_t code, struct cam_path *path,
3271 	     void *arg)
3272 {
3273 	struct mps_softc *sc;
3274 
3275 	sc = (struct mps_softc *)callback_arg;
3276 
3277 	switch (code) {
3278 #if (__FreeBSD_version >= 1000006) || \
3279     ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
3280 	case AC_ADVINFO_CHANGED: {
3281 		struct mpssas_target *target;
3282 		struct mpssas_softc *sassc;
3283 		struct scsi_read_capacity_data_long rcap_buf;
3284 		struct ccb_dev_advinfo cdai;
3285 		struct mpssas_lun *lun;
3286 		lun_id_t lunid;
3287 		int found_lun;
3288 		uintptr_t buftype;
3289 
3290 		buftype = (uintptr_t)arg;
3291 
3292 		found_lun = 0;
3293 		sassc = sc->sassc;
3294 
3295 		/*
3296 		 * We're only interested in read capacity data changes.
3297 		 */
3298 		if (buftype != CDAI_TYPE_RCAPLONG)
3299 			break;
3300 
3301 		/*
3302 		 * We should have a handle for this, but check to make sure.
3303 		 */
3304 		KASSERT(xpt_path_target_id(path) < sassc->maxtargets,
3305 		    ("Target %d out of bounds in mpssas_async\n",
3306 		    xpt_path_target_id(path)));
3307 		target = &sassc->targets[xpt_path_target_id(path)];
3308 		if (target->handle == 0)
3309 			break;
3310 
3311 		lunid = xpt_path_lun_id(path);
3312 
3313 		SLIST_FOREACH(lun, &target->luns, lun_link) {
3314 			if (lun->lun_id == lunid) {
3315 				found_lun = 1;
3316 				break;
3317 			}
3318 		}
3319 
3320 		if (found_lun == 0) {
3321 			lun = malloc(sizeof(struct mpssas_lun), M_MPT2,
3322 				     M_NOWAIT | M_ZERO);
3323 			if (lun == NULL) {
3324 				mps_dprint(sc, MPS_ERROR, "Unable to alloc "
3325 					   "LUN for EEDP support.\n");
3326 				break;
3327 			}
3328 			lun->lun_id = lunid;
3329 			SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3330 		}
3331 
3332 		bzero(&rcap_buf, sizeof(rcap_buf));
3333 		xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
3334 		cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
3335 		cdai.ccb_h.flags = CAM_DIR_IN;
3336 		cdai.buftype = CDAI_TYPE_RCAPLONG;
3337 #if (__FreeBSD_version >= 1100061) || \
3338     ((__FreeBSD_version >= 1001510) && (__FreeBSD_version < 1100000))
3339 		cdai.flags = CDAI_FLAG_NONE;
3340 #else
3341 		cdai.flags = 0;
3342 #endif
3343 		cdai.bufsiz = sizeof(rcap_buf);
3344 		cdai.buf = (uint8_t *)&rcap_buf;
3345 		xpt_action((union ccb *)&cdai);
3346 		if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
3347 			cam_release_devq(cdai.ccb_h.path,
3348 					 0, 0, 0, FALSE);
3349 
3350 		if ((mpssas_get_ccbstatus((union ccb *)&cdai) == CAM_REQ_CMP)
3351 		 && (rcap_buf.prot & SRC16_PROT_EN)) {
3352 			lun->eedp_formatted = TRUE;
3353 			lun->eedp_block_size = scsi_4btoul(rcap_buf.length);
3354 		} else {
3355 			lun->eedp_formatted = FALSE;
3356 			lun->eedp_block_size = 0;
3357 		}
3358 		break;
3359 	}
3360 #else
3361 	case AC_FOUND_DEVICE: {
3362 		struct ccb_getdev *cgd;
3363 
3364 		cgd = arg;
3365 		mpssas_check_eedp(sc, path, cgd);
3366 		break;
3367 	}
3368 #endif
3369 	default:
3370 		break;
3371 	}
3372 }
3373 
3374 #if (__FreeBSD_version < 901503) || \
3375     ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3376 static void
3377 mpssas_check_eedp(struct mps_softc *sc, struct cam_path *path,
3378 		  struct ccb_getdev *cgd)
3379 {
3380 	struct mpssas_softc *sassc = sc->sassc;
3381 	struct ccb_scsiio *csio;
3382 	struct scsi_read_capacity_16 *scsi_cmd;
3383 	struct scsi_read_capacity_eedp *rcap_buf;
3384 	path_id_t pathid;
3385 	target_id_t targetid;
3386 	lun_id_t lunid;
3387 	union ccb *ccb;
3388 	struct cam_path *local_path;
3389 	struct mpssas_target *target;
3390 	struct mpssas_lun *lun;
3391 	uint8_t	found_lun;
3392 	char path_str[64];
3393 
3394 	sassc = sc->sassc;
3395 	pathid = cam_sim_path(sassc->sim);
3396 	targetid = xpt_path_target_id(path);
3397 	lunid = xpt_path_lun_id(path);
3398 
3399 	KASSERT(targetid < sassc->maxtargets,
3400 	    ("Target %d out of bounds in mpssas_check_eedp\n",
3401 	     targetid));
3402 	target = &sassc->targets[targetid];
3403 	if (target->handle == 0x0)
3404 		return;
3405 
3406 	/*
3407 	 * Determine if the device is EEDP capable.
3408 	 *
3409 	 * If this flag is set in the inquiry data,
3410 	 * the device supports protection information,
3411 	 * and must support the 16 byte read
3412 	 * capacity command, otherwise continue without
3413 	 * sending read cap 16
3414 	 */
3415 	if ((cgd->inq_data.spc3_flags & SPC3_SID_PROTECT) == 0)
3416 		return;
3417 
3418 	/*
3419 	 * Issue a READ CAPACITY 16 command.  This info
3420 	 * is used to determine if the LUN is formatted
3421 	 * for EEDP support.
3422 	 */
3423 	ccb = xpt_alloc_ccb_nowait();
3424 	if (ccb == NULL) {
3425 		mps_dprint(sc, MPS_ERROR, "Unable to alloc CCB "
3426 		    "for EEDP support.\n");
3427 		return;
3428 	}
3429 
3430 	if (xpt_create_path(&local_path, xpt_periph,
3431 	    pathid, targetid, lunid) != CAM_REQ_CMP) {
3432 		mps_dprint(sc, MPS_ERROR, "Unable to create "
3433 		    "path for EEDP support\n");
3434 		xpt_free_ccb(ccb);
3435 		return;
3436 	}
3437 
3438 	/*
3439 	 * If LUN is already in list, don't create a new
3440 	 * one.
3441 	 */
3442 	found_lun = FALSE;
3443 	SLIST_FOREACH(lun, &target->luns, lun_link) {
3444 		if (lun->lun_id == lunid) {
3445 			found_lun = TRUE;
3446 			break;
3447 		}
3448 	}
3449 	if (!found_lun) {
3450 		lun = malloc(sizeof(struct mpssas_lun), M_MPT2,
3451 		    M_NOWAIT | M_ZERO);
3452 		if (lun == NULL) {
3453 			mps_dprint(sc, MPS_ERROR,
3454 			    "Unable to alloc LUN for EEDP support.\n");
3455 			xpt_free_path(local_path);
3456 			xpt_free_ccb(ccb);
3457 			return;
3458 		}
3459 		lun->lun_id = lunid;
3460 		SLIST_INSERT_HEAD(&target->luns, lun,
3461 		    lun_link);
3462 	}
3463 
3464 	xpt_path_string(local_path, path_str, sizeof(path_str));
3465 
3466 	mps_dprint(sc, MPS_INFO, "Sending read cap: path %s handle %d\n",
3467 	    path_str, target->handle);
3468 
3469 	/*
3470 	 * Issue a READ CAPACITY 16 command for the LUN.
3471 	 * The mpssas_read_cap_done function will load
3472 	 * the read cap info into the LUN struct.
3473 	 */
3474 	rcap_buf = malloc(sizeof(struct scsi_read_capacity_eedp),
3475 	    M_MPT2, M_NOWAIT | M_ZERO);
3476 	if (rcap_buf == NULL) {
3477 		mps_dprint(sc, MPS_FAULT,
3478 		    "Unable to alloc read capacity buffer for EEDP support.\n");
3479 		xpt_free_path(ccb->ccb_h.path);
3480 		xpt_free_ccb(ccb);
3481 		return;
3482 	}
3483 	xpt_setup_ccb(&ccb->ccb_h, local_path, CAM_PRIORITY_XPT);
3484 	csio = &ccb->csio;
3485 	csio->ccb_h.func_code = XPT_SCSI_IO;
3486 	csio->ccb_h.flags = CAM_DIR_IN;
3487 	csio->ccb_h.retry_count = 4;
3488 	csio->ccb_h.cbfcnp = mpssas_read_cap_done;
3489 	csio->ccb_h.timeout = 60000;
3490 	csio->data_ptr = (uint8_t *)rcap_buf;
3491 	csio->dxfer_len = sizeof(struct scsi_read_capacity_eedp);
3492 	csio->sense_len = MPS_SENSE_LEN;
3493 	csio->cdb_len = sizeof(*scsi_cmd);
3494 	csio->tag_action = MSG_SIMPLE_Q_TAG;
3495 
3496 	scsi_cmd = (struct scsi_read_capacity_16 *)&csio->cdb_io.cdb_bytes;
3497 	bzero(scsi_cmd, sizeof(*scsi_cmd));
3498 	scsi_cmd->opcode = 0x9E;
3499 	scsi_cmd->service_action = SRC16_SERVICE_ACTION;
3500 	((uint8_t *)scsi_cmd)[13] = sizeof(struct scsi_read_capacity_eedp);
3501 
3502 	ccb->ccb_h.ppriv_ptr1 = sassc;
3503 	xpt_action(ccb);
3504 }
3505 
3506 static void
3507 mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb)
3508 {
3509 	struct mpssas_softc *sassc;
3510 	struct mpssas_target *target;
3511 	struct mpssas_lun *lun;
3512 	struct scsi_read_capacity_eedp *rcap_buf;
3513 
3514 	if (done_ccb == NULL)
3515 		return;
3516 
3517 	/* Driver need to release devq, it Scsi command is
3518 	 * generated by driver internally.
3519 	 * Currently there is a single place where driver
3520 	 * calls scsi command internally. In future if driver
3521 	 * calls more scsi command internally, it needs to release
3522 	 * devq internally, since those command will not go back to
3523 	 * cam_periph.
3524 	 */
3525 	if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) ) {
3526         	done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
3527 		xpt_release_devq(done_ccb->ccb_h.path,
3528 			       	/*count*/ 1, /*run_queue*/TRUE);
3529 	}
3530 
3531 	rcap_buf = (struct scsi_read_capacity_eedp *)done_ccb->csio.data_ptr;
3532 
3533 	/*
3534 	 * Get the LUN ID for the path and look it up in the LUN list for the
3535 	 * target.
3536 	 */
3537 	sassc = (struct mpssas_softc *)done_ccb->ccb_h.ppriv_ptr1;
3538 	KASSERT(done_ccb->ccb_h.target_id < sassc->maxtargets,
3539 	    ("Target %d out of bounds in mpssas_read_cap_done\n",
3540 	     done_ccb->ccb_h.target_id));
3541 	target = &sassc->targets[done_ccb->ccb_h.target_id];
3542 	SLIST_FOREACH(lun, &target->luns, lun_link) {
3543 		if (lun->lun_id != done_ccb->ccb_h.target_lun)
3544 			continue;
3545 
3546 		/*
3547 		 * Got the LUN in the target's LUN list.  Fill it in
3548 		 * with EEDP info.  If the READ CAP 16 command had some
3549 		 * SCSI error (common if command is not supported), mark
3550 		 * the lun as not supporting EEDP and set the block size
3551 		 * to 0.
3552 		 */
3553 		if ((mpssas_get_ccbstatus(done_ccb) != CAM_REQ_CMP)
3554 		 || (done_ccb->csio.scsi_status != SCSI_STATUS_OK)) {
3555 			lun->eedp_formatted = FALSE;
3556 			lun->eedp_block_size = 0;
3557 			break;
3558 		}
3559 
3560 		if (rcap_buf->protect & 0x01) {
3561 			mps_dprint(sassc->sc, MPS_INFO, "LUN %d for "
3562  			    "target ID %d is formatted for EEDP "
3563  			    "support.\n", done_ccb->ccb_h.target_lun,
3564  			    done_ccb->ccb_h.target_id);
3565 			lun->eedp_formatted = TRUE;
3566 			lun->eedp_block_size = scsi_4btoul(rcap_buf->length);
3567 		}
3568 		break;
3569 	}
3570 
3571 	// Finished with this CCB and path.
3572 	free(rcap_buf, M_MPT2);
3573 	xpt_free_path(done_ccb->ccb_h.path);
3574 	xpt_free_ccb(done_ccb);
3575 }
3576 #endif /* (__FreeBSD_version < 901503) || \
3577           ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) */
3578 
3579 void
3580 mpssas_prepare_for_tm(struct mps_softc *sc, struct mps_command *tm,
3581     struct mpssas_target *target, lun_id_t lun_id)
3582 {
3583 	union ccb *ccb;
3584 	path_id_t path_id;
3585 
3586 	/*
3587 	 * Set the INRESET flag for this target so that no I/O will be sent to
3588 	 * the target until the reset has completed.  If an I/O request does
3589 	 * happen, the devq will be frozen.  The CCB holds the path which is
3590 	 * used to release the devq.  The devq is released and the CCB is freed
3591 	 * when the TM completes.
3592 	 */
3593 	ccb = xpt_alloc_ccb_nowait();
3594 	if (ccb) {
3595 		path_id = cam_sim_path(sc->sassc->sim);
3596 		if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, path_id,
3597 		    target->tid, lun_id) != CAM_REQ_CMP) {
3598 			xpt_free_ccb(ccb);
3599 		} else {
3600 			tm->cm_ccb = ccb;
3601 			tm->cm_targ = target;
3602 			target->flags |= MPSSAS_TARGET_INRESET;
3603 		}
3604 	}
3605 }
3606 
3607 int
3608 mpssas_startup(struct mps_softc *sc)
3609 {
3610 
3611 	/*
3612 	 * Send the port enable message and set the wait_for_port_enable flag.
3613 	 * This flag helps to keep the simq frozen until all discovery events
3614 	 * are processed.
3615 	 */
3616 	sc->wait_for_port_enable = 1;
3617 	mpssas_send_portenable(sc);
3618 	return (0);
3619 }
3620 
3621 static int
3622 mpssas_send_portenable(struct mps_softc *sc)
3623 {
3624 	MPI2_PORT_ENABLE_REQUEST *request;
3625 	struct mps_command *cm;
3626 
3627 	MPS_FUNCTRACE(sc);
3628 
3629 	if ((cm = mps_alloc_command(sc)) == NULL)
3630 		return (EBUSY);
3631 	request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3632 	request->Function = MPI2_FUNCTION_PORT_ENABLE;
3633 	request->MsgFlags = 0;
3634 	request->VP_ID = 0;
3635 	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3636 	cm->cm_complete = mpssas_portenable_complete;
3637 	cm->cm_data = NULL;
3638 	cm->cm_sge = NULL;
3639 
3640 	mps_map_command(sc, cm);
3641 	mps_dprint(sc, MPS_XINFO,
3642 	    "mps_send_portenable finished cm %p req %p complete %p\n",
3643 	    cm, cm->cm_req, cm->cm_complete);
3644 	return (0);
3645 }
3646 
3647 static void
3648 mpssas_portenable_complete(struct mps_softc *sc, struct mps_command *cm)
3649 {
3650 	MPI2_PORT_ENABLE_REPLY *reply;
3651 	struct mpssas_softc *sassc;
3652 
3653 	MPS_FUNCTRACE(sc);
3654 	sassc = sc->sassc;
3655 
3656 	/*
3657 	 * Currently there should be no way we can hit this case.  It only
3658 	 * happens when we have a failure to allocate chain frames, and
3659 	 * port enable commands don't have S/G lists.
3660 	 */
3661 	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3662 		mps_dprint(sc, MPS_ERROR, "%s: cm_flags = %#x for port enable! "
3663 			   "This should not happen!\n", __func__, cm->cm_flags);
3664 	}
3665 
3666 	reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3667 	if (reply == NULL)
3668 		mps_dprint(sc, MPS_FAULT, "Portenable NULL reply\n");
3669 	else if (le16toh(reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
3670 	    MPI2_IOCSTATUS_SUCCESS)
3671 		mps_dprint(sc, MPS_FAULT, "Portenable failed\n");
3672 
3673 	mps_free_command(sc, cm);
3674 	if (sc->mps_ich.ich_arg != NULL) {
3675 		mps_dprint(sc, MPS_XINFO, "disestablish config intrhook\n");
3676 		config_intrhook_disestablish(&sc->mps_ich);
3677 		sc->mps_ich.ich_arg = NULL;
3678 	}
3679 
3680 	/*
3681 	 * Get WarpDrive info after discovery is complete but before the scan
3682 	 * starts.  At this point, all devices are ready to be exposed to the
3683 	 * OS.  If devices should be hidden instead, take them out of the
3684 	 * 'targets' array before the scan.  The devinfo for a disk will have
3685 	 * some info and a volume's will be 0.  Use that to remove disks.
3686 	 */
3687 	mps_wd_config_pages(sc);
3688 
3689 	/*
3690 	 * Done waiting for port enable to complete.  Decrement the refcount.
3691 	 * If refcount is 0, discovery is complete and a rescan of the bus can
3692 	 * take place.  Since the simq was explicitly frozen before port
3693 	 * enable, it must be explicitly released here to keep the
3694 	 * freeze/release count in sync.
3695 	 */
3696 	sc->wait_for_port_enable = 0;
3697 	sc->port_enable_complete = 1;
3698 	wakeup(&sc->port_enable_complete);
3699 	mpssas_startup_decrement(sassc);
3700 }
3701 
3702 int
3703 mpssas_check_id(struct mpssas_softc *sassc, int id)
3704 {
3705 	struct mps_softc *sc = sassc->sc;
3706 	char *ids;
3707 	char *name;
3708 
3709 	ids = &sc->exclude_ids[0];
3710 	while((name = strsep(&ids, ",")) != NULL) {
3711 		if (name[0] == '\0')
3712 			continue;
3713 		if (strtol(name, NULL, 0) == (long)id)
3714 			return (1);
3715 	}
3716 
3717 	return (0);
3718 }
3719 
3720 void
3721 mpssas_realloc_targets(struct mps_softc *sc, int maxtargets)
3722 {
3723 	struct mpssas_softc *sassc;
3724 	struct mpssas_lun *lun, *lun_tmp;
3725 	struct mpssas_target *targ;
3726 	int i;
3727 
3728 	sassc = sc->sassc;
3729 	/*
3730 	 * The number of targets is based on IOC Facts, so free all of
3731 	 * the allocated LUNs for each target and then the target buffer
3732 	 * itself.
3733 	 */
3734 	for (i=0; i< maxtargets; i++) {
3735 		targ = &sassc->targets[i];
3736 		SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
3737 			free(lun, M_MPT2);
3738 		}
3739 	}
3740 	free(sassc->targets, M_MPT2);
3741 
3742 	sassc->targets = malloc(sizeof(struct mpssas_target) * maxtargets,
3743 	    M_MPT2, M_WAITOK|M_ZERO);
3744 	if (!sassc->targets) {
3745 		panic("%s failed to alloc targets with error %d\n",
3746 		    __func__, ENOMEM);
3747 	}
3748 }
3749