xref: /freebsd/sys/dev/mps/mps_sas.c (revision b4af4f93c682e445bf159f0d1ec90b636296c946)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2009 Yahoo! Inc.
5  * Copyright (c) 2011-2015 LSI Corp.
6  * Copyright (c) 2013-2015 Avago Technologies
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  *
30  * Avago Technologies (LSI) MPT-Fusion Host Adapter FreeBSD
31  *
32  * $FreeBSD$
33  */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 /* Communications core for Avago Technologies (LSI) MPT2 */
39 
40 /* TODO Move headers to mpsvar */
41 #include <sys/types.h>
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
45 #include <sys/selinfo.h>
46 #include <sys/module.h>
47 #include <sys/bus.h>
48 #include <sys/conf.h>
49 #include <sys/bio.h>
50 #include <sys/malloc.h>
51 #include <sys/uio.h>
52 #include <sys/sysctl.h>
53 #include <sys/endian.h>
54 #include <sys/queue.h>
55 #include <sys/kthread.h>
56 #include <sys/taskqueue.h>
57 #include <sys/sbuf.h>
58 
59 #include <machine/bus.h>
60 #include <machine/resource.h>
61 #include <sys/rman.h>
62 
63 #include <machine/stdarg.h>
64 
65 #include <cam/cam.h>
66 #include <cam/cam_ccb.h>
67 #include <cam/cam_xpt.h>
68 #include <cam/cam_debug.h>
69 #include <cam/cam_sim.h>
70 #include <cam/cam_xpt_sim.h>
71 #include <cam/cam_xpt_periph.h>
72 #include <cam/cam_periph.h>
73 #include <cam/scsi/scsi_all.h>
74 #include <cam/scsi/scsi_message.h>
75 #include <cam/scsi/smp_all.h>
76 
77 #include <dev/mps/mpi/mpi2_type.h>
78 #include <dev/mps/mpi/mpi2.h>
79 #include <dev/mps/mpi/mpi2_ioc.h>
80 #include <dev/mps/mpi/mpi2_sas.h>
81 #include <dev/mps/mpi/mpi2_cnfg.h>
82 #include <dev/mps/mpi/mpi2_init.h>
83 #include <dev/mps/mpi/mpi2_tool.h>
84 #include <dev/mps/mps_ioctl.h>
85 #include <dev/mps/mpsvar.h>
86 #include <dev/mps/mps_table.h>
87 #include <dev/mps/mps_sas.h>
88 
89 #define MPSSAS_DISCOVERY_TIMEOUT	20
90 #define MPSSAS_MAX_DISCOVERY_TIMEOUTS	10 /* 200 seconds */
91 
92 /*
93  * static array to check SCSI OpCode for EEDP protection bits
94  */
95 #define	PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
96 #define	PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
97 #define	PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
98 static uint8_t op_code_prot[256] = {
99 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
100 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
101 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
102 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
103 	0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
104 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
105 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
106 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
107 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
108 	0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
109 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
110 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
111 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
112 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
113 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
114 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
115 };
116 
117 MALLOC_DEFINE(M_MPSSAS, "MPSSAS", "MPS SAS memory");
118 
119 static void mpssas_remove_device(struct mps_softc *, struct mps_command *);
120 static void mpssas_remove_complete(struct mps_softc *, struct mps_command *);
121 static void mpssas_action(struct cam_sim *sim, union ccb *ccb);
122 static void mpssas_poll(struct cam_sim *sim);
123 static int mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm,
124     struct mps_command *cm);
125 static void mpssas_scsiio_timeout(void *data);
126 static void mpssas_abort_complete(struct mps_softc *sc, struct mps_command *cm);
127 static void mpssas_direct_drive_io(struct mpssas_softc *sassc,
128     struct mps_command *cm, union ccb *ccb);
129 static void mpssas_action_scsiio(struct mpssas_softc *, union ccb *);
130 static void mpssas_scsiio_complete(struct mps_softc *, struct mps_command *);
131 static void mpssas_action_resetdev(struct mpssas_softc *, union ccb *);
132 static void mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm);
133 static void mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb,
134 			       uint64_t sasaddr);
135 static void mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb);
136 static void mpssas_resetdev_complete(struct mps_softc *, struct mps_command *);
137 static void mpssas_async(void *callback_arg, uint32_t code,
138 			 struct cam_path *path, void *arg);
139 static int mpssas_send_portenable(struct mps_softc *sc);
140 static void mpssas_portenable_complete(struct mps_softc *sc,
141     struct mps_command *cm);
142 
143 struct mpssas_target *
144 mpssas_find_target_by_handle(struct mpssas_softc *sassc, int start, uint16_t handle)
145 {
146 	struct mpssas_target *target;
147 	int i;
148 
149 	for (i = start; i < sassc->maxtargets; i++) {
150 		target = &sassc->targets[i];
151 		if (target->handle == handle)
152 			return (target);
153 	}
154 
155 	return (NULL);
156 }
157 
158 /* we need to freeze the simq during attach and diag reset, to avoid failing
159  * commands before device handles have been found by discovery.  Since
160  * discovery involves reading config pages and possibly sending commands,
161  * discovery actions may continue even after we receive the end of discovery
162  * event, so refcount discovery actions instead of assuming we can unfreeze
163  * the simq when we get the event.
164  */
165 void
166 mpssas_startup_increment(struct mpssas_softc *sassc)
167 {
168 	MPS_FUNCTRACE(sassc->sc);
169 
170 	if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
171 		if (sassc->startup_refcount++ == 0) {
172 			/* just starting, freeze the simq */
173 			mps_dprint(sassc->sc, MPS_INIT,
174 			    "%s freezing simq\n", __func__);
175 			xpt_hold_boot();
176 			xpt_freeze_simq(sassc->sim, 1);
177 		}
178 		mps_dprint(sassc->sc, MPS_INIT, "%s refcount %u\n", __func__,
179 		    sassc->startup_refcount);
180 	}
181 }
182 
183 void
184 mpssas_release_simq_reinit(struct mpssas_softc *sassc)
185 {
186 	if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
187 		sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
188 		xpt_release_simq(sassc->sim, 1);
189 		mps_dprint(sassc->sc, MPS_INFO, "Unfreezing SIM queue\n");
190 	}
191 }
192 
193 void
194 mpssas_startup_decrement(struct mpssas_softc *sassc)
195 {
196 	MPS_FUNCTRACE(sassc->sc);
197 
198 	if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
199 		if (--sassc->startup_refcount == 0) {
200 			/* finished all discovery-related actions, release
201 			 * the simq and rescan for the latest topology.
202 			 */
203 			mps_dprint(sassc->sc, MPS_INIT,
204 			    "%s releasing simq\n", __func__);
205 			sassc->flags &= ~MPSSAS_IN_STARTUP;
206 			xpt_release_simq(sassc->sim, 1);
207 			xpt_release_boot();
208 		}
209 		mps_dprint(sassc->sc, MPS_INIT, "%s refcount %u\n", __func__,
210 		    sassc->startup_refcount);
211 	}
212 }
213 
214 /*
215  * The firmware requires us to stop sending commands when we're doing task
216  * management.
217  * XXX The logic for serializing the device has been made lazy and moved to
218  * mpssas_prepare_for_tm().
219  */
220 struct mps_command *
221 mpssas_alloc_tm(struct mps_softc *sc)
222 {
223 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
224 	struct mps_command *tm;
225 
226 	tm = mps_alloc_high_priority_command(sc);
227 	if (tm == NULL)
228 		return (NULL);
229 
230 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
231 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
232 	return tm;
233 }
234 
235 void
236 mpssas_free_tm(struct mps_softc *sc, struct mps_command *tm)
237 {
238 	int target_id = 0xFFFFFFFF;
239 
240 	if (tm == NULL)
241 		return;
242 
243 	/*
244 	 * For TM's the devq is frozen for the device.  Unfreeze it here and
245 	 * free the resources used for freezing the devq.  Must clear the
246 	 * INRESET flag as well or scsi I/O will not work.
247 	 */
248 	if (tm->cm_targ != NULL) {
249 		tm->cm_targ->flags &= ~MPSSAS_TARGET_INRESET;
250 		target_id = tm->cm_targ->tid;
251 	}
252 	if (tm->cm_ccb) {
253 		mps_dprint(sc, MPS_INFO, "Unfreezing devq for target ID %d\n",
254 		    target_id);
255 		xpt_release_devq(tm->cm_ccb->ccb_h.path, 1, TRUE);
256 		xpt_free_path(tm->cm_ccb->ccb_h.path);
257 		xpt_free_ccb(tm->cm_ccb);
258 	}
259 
260 	mps_free_high_priority_command(sc, tm);
261 }
262 
263 void
264 mpssas_rescan_target(struct mps_softc *sc, struct mpssas_target *targ)
265 {
266 	struct mpssas_softc *sassc = sc->sassc;
267 	path_id_t pathid;
268 	target_id_t targetid;
269 	union ccb *ccb;
270 
271 	MPS_FUNCTRACE(sc);
272 	pathid = cam_sim_path(sassc->sim);
273 	if (targ == NULL)
274 		targetid = CAM_TARGET_WILDCARD;
275 	else
276 		targetid = targ - sassc->targets;
277 
278 	/*
279 	 * Allocate a CCB and schedule a rescan.
280 	 */
281 	ccb = xpt_alloc_ccb_nowait();
282 	if (ccb == NULL) {
283 		mps_dprint(sc, MPS_ERROR, "unable to alloc CCB for rescan\n");
284 		return;
285 	}
286 
287 	if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid,
288 	    targetid, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
289 		mps_dprint(sc, MPS_ERROR, "unable to create path for rescan\n");
290 		xpt_free_ccb(ccb);
291 		return;
292 	}
293 
294 	if (targetid == CAM_TARGET_WILDCARD)
295 		ccb->ccb_h.func_code = XPT_SCAN_BUS;
296 	else
297 		ccb->ccb_h.func_code = XPT_SCAN_TGT;
298 
299 	mps_dprint(sc, MPS_TRACE, "%s targetid %u\n", __func__, targetid);
300 	xpt_rescan(ccb);
301 }
302 
303 static void
304 mpssas_log_command(struct mps_command *cm, u_int level, const char *fmt, ...)
305 {
306 	struct sbuf sb;
307 	va_list ap;
308 	char str[224];
309 	char path_str[64];
310 
311 	if (cm == NULL)
312 		return;
313 
314 	/* No need to be in here if debugging isn't enabled */
315 	if ((cm->cm_sc->mps_debug & level) == 0)
316 		return;
317 
318 	sbuf_new(&sb, str, sizeof(str), 0);
319 
320 	va_start(ap, fmt);
321 
322 	if (cm->cm_ccb != NULL) {
323 		xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
324 				sizeof(path_str));
325 		sbuf_cat(&sb, path_str);
326 		if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
327 			scsi_command_string(&cm->cm_ccb->csio, &sb);
328 			sbuf_printf(&sb, "length %d ",
329 				    cm->cm_ccb->csio.dxfer_len);
330 		}
331 	}
332 	else {
333 		sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
334 		    cam_sim_name(cm->cm_sc->sassc->sim),
335 		    cam_sim_unit(cm->cm_sc->sassc->sim),
336 		    cam_sim_bus(cm->cm_sc->sassc->sim),
337 		    cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
338 		    cm->cm_lun);
339 	}
340 
341 	sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
342 	sbuf_vprintf(&sb, fmt, ap);
343 	sbuf_finish(&sb);
344 	mps_print_field(cm->cm_sc, "%s", sbuf_data(&sb));
345 
346 	va_end(ap);
347 }
348 
349 
350 static void
351 mpssas_remove_volume(struct mps_softc *sc, struct mps_command *tm)
352 {
353 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
354 	struct mpssas_target *targ;
355 	uint16_t handle;
356 
357 	MPS_FUNCTRACE(sc);
358 
359 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
360 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
361 	targ = tm->cm_targ;
362 
363 	if (reply == NULL) {
364 		/* XXX retry the remove after the diag reset completes? */
365 		mps_dprint(sc, MPS_FAULT,
366 		    "%s NULL reply resetting device 0x%04x\n", __func__,
367 		    handle);
368 		mpssas_free_tm(sc, tm);
369 		return;
370 	}
371 
372 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
373 	    MPI2_IOCSTATUS_SUCCESS) {
374 		mps_dprint(sc, MPS_ERROR,
375 		   "IOCStatus = 0x%x while resetting device 0x%x\n",
376 		   le16toh(reply->IOCStatus), handle);
377 	}
378 
379 	mps_dprint(sc, MPS_XINFO,
380 	    "Reset aborted %u commands\n", reply->TerminationCount);
381 	mps_free_reply(sc, tm->cm_reply_data);
382 	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
383 
384 	mps_dprint(sc, MPS_XINFO,
385 	    "clearing target %u handle 0x%04x\n", targ->tid, handle);
386 
387 	/*
388 	 * Don't clear target if remove fails because things will get confusing.
389 	 * Leave the devname and sasaddr intact so that we know to avoid reusing
390 	 * this target id if possible, and so we can assign the same target id
391 	 * to this device if it comes back in the future.
392 	 */
393 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
394 	    MPI2_IOCSTATUS_SUCCESS) {
395 		targ = tm->cm_targ;
396 		targ->handle = 0x0;
397 		targ->encl_handle = 0x0;
398 		targ->encl_slot = 0x0;
399 		targ->exp_dev_handle = 0x0;
400 		targ->phy_num = 0x0;
401 		targ->linkrate = 0x0;
402 		targ->devinfo = 0x0;
403 		targ->flags = 0x0;
404 	}
405 
406 	mpssas_free_tm(sc, tm);
407 }
408 
409 
410 /*
411  * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal.
412  * Otherwise Volume Delete is same as Bare Drive Removal.
413  */
414 void
415 mpssas_prepare_volume_remove(struct mpssas_softc *sassc, uint16_t handle)
416 {
417 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
418 	struct mps_softc *sc;
419 	struct mps_command *tm;
420 	struct mpssas_target *targ = NULL;
421 
422 	MPS_FUNCTRACE(sassc->sc);
423 	sc = sassc->sc;
424 
425 #ifdef WD_SUPPORT
426 	/*
427 	 * If this is a WD controller, determine if the disk should be exposed
428 	 * to the OS or not.  If disk should be exposed, return from this
429 	 * function without doing anything.
430 	 */
431 	if (sc->WD_available && (sc->WD_hide_expose ==
432 	    MPS_WD_EXPOSE_ALWAYS)) {
433 		return;
434 	}
435 #endif //WD_SUPPORT
436 
437 	targ = mpssas_find_target_by_handle(sassc, 0, handle);
438 	if (targ == NULL) {
439 		/* FIXME: what is the action? */
440 		/* We don't know about this device? */
441 		mps_dprint(sc, MPS_ERROR,
442 		   "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
443 		return;
444 	}
445 
446 	targ->flags |= MPSSAS_TARGET_INREMOVAL;
447 
448 	tm = mpssas_alloc_tm(sc);
449 	if (tm == NULL) {
450 		mps_dprint(sc, MPS_ERROR,
451 		    "%s: command alloc failure\n", __func__);
452 		return;
453 	}
454 
455 	mpssas_rescan_target(sc, targ);
456 
457 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
458 	req->DevHandle = targ->handle;
459 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
460 
461 	/* SAS Hard Link Reset / SATA Link Reset */
462 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
463 
464 	tm->cm_targ = targ;
465 	tm->cm_data = NULL;
466 	tm->cm_complete = mpssas_remove_volume;
467 	tm->cm_complete_data = (void *)(uintptr_t)handle;
468 
469 	mps_dprint(sc, MPS_INFO, "%s: Sending reset for target ID %d\n",
470 	    __func__, targ->tid);
471 	mpssas_prepare_for_tm(sc, tm, targ, CAM_LUN_WILDCARD);
472 
473 	mps_map_command(sc, tm);
474 }
475 
476 /*
477  * The MPT2 firmware performs debounce on the link to avoid transient link
478  * errors and false removals.  When it does decide that link has been lost
479  * and a device need to go away, it expects that the host will perform a
480  * target reset and then an op remove.  The reset has the side-effect of
481  * aborting any outstanding requests for the device, which is required for
482  * the op-remove to succeed.  It's not clear if the host should check for
483  * the device coming back alive after the reset.
484  */
485 void
486 mpssas_prepare_remove(struct mpssas_softc *sassc, uint16_t handle)
487 {
488 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
489 	struct mps_softc *sc;
490 	struct mps_command *cm;
491 	struct mpssas_target *targ = NULL;
492 
493 	MPS_FUNCTRACE(sassc->sc);
494 
495 	sc = sassc->sc;
496 
497 	targ = mpssas_find_target_by_handle(sassc, 0, handle);
498 	if (targ == NULL) {
499 		/* FIXME: what is the action? */
500 		/* We don't know about this device? */
501 		mps_dprint(sc, MPS_ERROR,
502 		    "%s : invalid handle 0x%x \n", __func__, handle);
503 		return;
504 	}
505 
506 	targ->flags |= MPSSAS_TARGET_INREMOVAL;
507 
508 	cm = mpssas_alloc_tm(sc);
509 	if (cm == NULL) {
510 		mps_dprint(sc, MPS_ERROR,
511 		    "%s: command alloc failure\n", __func__);
512 		return;
513 	}
514 
515 	mpssas_rescan_target(sc, targ);
516 
517 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
518 	memset(req, 0, sizeof(*req));
519 	req->DevHandle = htole16(targ->handle);
520 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
521 
522 	/* SAS Hard Link Reset / SATA Link Reset */
523 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
524 
525 	cm->cm_targ = targ;
526 	cm->cm_data = NULL;
527 	cm->cm_complete = mpssas_remove_device;
528 	cm->cm_complete_data = (void *)(uintptr_t)handle;
529 
530 	mps_dprint(sc, MPS_INFO, "%s: Sending reset for target ID %d\n",
531 	    __func__, targ->tid);
532 	mpssas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
533 
534 	mps_map_command(sc, cm);
535 }
536 
537 static void
538 mpssas_remove_device(struct mps_softc *sc, struct mps_command *tm)
539 {
540 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
541 	MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
542 	struct mpssas_target *targ;
543 	uint16_t handle;
544 
545 	MPS_FUNCTRACE(sc);
546 
547 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
548 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
549 	targ = tm->cm_targ;
550 
551 	/*
552 	 * Currently there should be no way we can hit this case.  It only
553 	 * happens when we have a failure to allocate chain frames, and
554 	 * task management commands don't have S/G lists.
555 	 */
556 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
557 		mps_dprint(sc, MPS_ERROR,
558 		    "%s: cm_flags = %#x for remove of handle %#04x! "
559 		    "This should not happen!\n", __func__, tm->cm_flags,
560 		    handle);
561 	}
562 
563 	if (reply == NULL) {
564 		/* XXX retry the remove after the diag reset completes? */
565 		mps_dprint(sc, MPS_FAULT,
566 		    "%s NULL reply resetting device 0x%04x\n", __func__,
567 		    handle);
568 		mpssas_free_tm(sc, tm);
569 		return;
570 	}
571 
572 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
573 	    MPI2_IOCSTATUS_SUCCESS) {
574 		mps_dprint(sc, MPS_ERROR,
575 		   "IOCStatus = 0x%x while resetting device 0x%x\n",
576 		   le16toh(reply->IOCStatus), handle);
577 	}
578 
579 	mps_dprint(sc, MPS_XINFO, "Reset aborted %u commands\n",
580 	    le32toh(reply->TerminationCount));
581 	mps_free_reply(sc, tm->cm_reply_data);
582 	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
583 
584 	/* Reuse the existing command */
585 	req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
586 	memset(req, 0, sizeof(*req));
587 	req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
588 	req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
589 	req->DevHandle = htole16(handle);
590 	tm->cm_data = NULL;
591 	tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
592 	tm->cm_complete = mpssas_remove_complete;
593 	tm->cm_complete_data = (void *)(uintptr_t)handle;
594 
595 	/*
596 	 * Wait to send the REMOVE_DEVICE until all the commands have cleared.
597 	 * They should be aborted or time out and we'll kick thus off there
598 	 * if so.
599 	 */
600 	if (TAILQ_FIRST(&targ->commands) == NULL) {
601 		mps_dprint(sc, MPS_INFO, "No pending commands: starting remove_device\n");
602 		mps_map_command(sc, tm);
603 		targ->pending_remove_tm = NULL;
604 	} else {
605 		targ->pending_remove_tm = tm;
606 	}
607 
608 
609 	mps_dprint(sc, MPS_XINFO, "clearing target %u handle 0x%04x\n",
610 		   targ->tid, handle);
611 }
612 
613 static void
614 mpssas_remove_complete(struct mps_softc *sc, struct mps_command *tm)
615 {
616 	MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
617 	uint16_t handle;
618 	struct mpssas_target *targ;
619 	struct mpssas_lun *lun;
620 
621 	MPS_FUNCTRACE(sc);
622 
623 	reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
624 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
625 	targ = tm->cm_targ;
626 
627 	/*
628 	 * At this point, we should have no pending commands for the target.
629 	 * The remove target has just completed.
630 	 */
631 	KASSERT(TAILQ_FIRST(&targ->commands) == NULL,
632 	    ("%s: no commands should be pending\n", __func__));
633 
634 
635 	/*
636 	 * Currently there should be no way we can hit this case.  It only
637 	 * happens when we have a failure to allocate chain frames, and
638 	 * task management commands don't have S/G lists.
639 	 */
640 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
641 		mps_dprint(sc, MPS_XINFO,
642 			   "%s: cm_flags = %#x for remove of handle %#04x! "
643 			   "This should not happen!\n", __func__, tm->cm_flags,
644 			   handle);
645 		mpssas_free_tm(sc, tm);
646 		return;
647 	}
648 
649 	if (reply == NULL) {
650 		/* most likely a chip reset */
651 		mps_dprint(sc, MPS_FAULT,
652 		    "%s NULL reply removing device 0x%04x\n", __func__, handle);
653 		mpssas_free_tm(sc, tm);
654 		return;
655 	}
656 
657 	mps_dprint(sc, MPS_XINFO,
658 	    "%s on handle 0x%04x, IOCStatus= 0x%x\n", __func__,
659 	    handle, le16toh(reply->IOCStatus));
660 
661 	/*
662 	 * Don't clear target if remove fails because things will get confusing.
663 	 * Leave the devname and sasaddr intact so that we know to avoid reusing
664 	 * this target id if possible, and so we can assign the same target id
665 	 * to this device if it comes back in the future.
666 	 */
667 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
668 	    MPI2_IOCSTATUS_SUCCESS) {
669 		targ->handle = 0x0;
670 		targ->encl_handle = 0x0;
671 		targ->encl_slot = 0x0;
672 		targ->exp_dev_handle = 0x0;
673 		targ->phy_num = 0x0;
674 		targ->linkrate = 0x0;
675 		targ->devinfo = 0x0;
676 		targ->flags = 0x0;
677 
678 		while(!SLIST_EMPTY(&targ->luns)) {
679 			lun = SLIST_FIRST(&targ->luns);
680 			SLIST_REMOVE_HEAD(&targ->luns, lun_link);
681 			free(lun, M_MPT2);
682 		}
683 	}
684 
685 
686 	mpssas_free_tm(sc, tm);
687 }
688 
689 static int
690 mpssas_register_events(struct mps_softc *sc)
691 {
692 	u32 events[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS];
693 
694 	bzero(events, 16);
695 	setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
696 	setbit(events, MPI2_EVENT_SAS_DISCOVERY);
697 	setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
698 	setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
699 	setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
700 	setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
701 	setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
702 	setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
703 	setbit(events, MPI2_EVENT_IR_VOLUME);
704 	setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
705 	setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
706 	setbit(events, MPI2_EVENT_LOG_ENTRY_ADDED);
707 
708 	mps_register_events(sc, events, mpssas_evt_handler, NULL,
709 	    &sc->sassc->mpssas_eh);
710 
711 	return (0);
712 }
713 
714 int
715 mps_attach_sas(struct mps_softc *sc)
716 {
717 	struct mpssas_softc *sassc;
718 	cam_status status;
719 	int unit, error = 0, reqs;
720 
721 	MPS_FUNCTRACE(sc);
722 	mps_dprint(sc, MPS_INIT, "%s entered\n", __func__);
723 
724 	sassc = malloc(sizeof(struct mpssas_softc), M_MPT2, M_WAITOK|M_ZERO);
725 
726 	/*
727 	 * XXX MaxTargets could change during a reinit.  Since we don't
728 	 * resize the targets[] array during such an event, cache the value
729 	 * of MaxTargets here so that we don't get into trouble later.  This
730 	 * should move into the reinit logic.
731 	 */
732 	sassc->maxtargets = sc->facts->MaxTargets + sc->facts->MaxVolumes;
733 	sassc->targets = malloc(sizeof(struct mpssas_target) *
734 	    sassc->maxtargets, M_MPT2, M_WAITOK|M_ZERO);
735 	sc->sassc = sassc;
736 	sassc->sc = sc;
737 
738 	reqs = sc->num_reqs - sc->num_prireqs - 1;
739 	if ((sassc->devq = cam_simq_alloc(reqs)) == NULL) {
740 		mps_dprint(sc, MPS_ERROR, "Cannot allocate SIMQ\n");
741 		error = ENOMEM;
742 		goto out;
743 	}
744 
745 	unit = device_get_unit(sc->mps_dev);
746 	sassc->sim = cam_sim_alloc(mpssas_action, mpssas_poll, "mps", sassc,
747 	    unit, &sc->mps_mtx, reqs, reqs, sassc->devq);
748 	if (sassc->sim == NULL) {
749 		mps_dprint(sc, MPS_INIT|MPS_ERROR, "Cannot allocate SIM\n");
750 		error = EINVAL;
751 		goto out;
752 	}
753 
754 	TAILQ_INIT(&sassc->ev_queue);
755 
756 	/* Initialize taskqueue for Event Handling */
757 	TASK_INIT(&sassc->ev_task, 0, mpssas_firmware_event_work, sc);
758 	sassc->ev_tq = taskqueue_create("mps_taskq", M_NOWAIT | M_ZERO,
759 	    taskqueue_thread_enqueue, &sassc->ev_tq);
760 	taskqueue_start_threads(&sassc->ev_tq, 1, PRIBIO, "%s taskq",
761 	    device_get_nameunit(sc->mps_dev));
762 
763 	mps_lock(sc);
764 
765 	/*
766 	 * XXX There should be a bus for every port on the adapter, but since
767 	 * we're just going to fake the topology for now, we'll pretend that
768 	 * everything is just a target on a single bus.
769 	 */
770 	if ((error = xpt_bus_register(sassc->sim, sc->mps_dev, 0)) != 0) {
771 		mps_dprint(sc, MPS_INIT|MPS_ERROR,
772 		    "Error %d registering SCSI bus\n", error);
773 		mps_unlock(sc);
774 		goto out;
775 	}
776 
777 	/*
778 	 * Assume that discovery events will start right away.
779 	 *
780 	 * Hold off boot until discovery is complete.
781 	 */
782 	sassc->flags |= MPSSAS_IN_STARTUP | MPSSAS_IN_DISCOVERY;
783 	sc->sassc->startup_refcount = 0;
784 	mpssas_startup_increment(sassc);
785 
786 	callout_init(&sassc->discovery_callout, 1 /*mpsafe*/);
787 
788 	/*
789 	 * Register for async events so we can determine the EEDP
790 	 * capabilities of devices.
791 	 */
792 	status = xpt_create_path(&sassc->path, /*periph*/NULL,
793 	    cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD,
794 	    CAM_LUN_WILDCARD);
795 	if (status != CAM_REQ_CMP) {
796 		mps_dprint(sc, MPS_ERROR|MPS_INIT,
797 		    "Error %#x creating sim path\n", status);
798 		sassc->path = NULL;
799 	} else {
800 		int event;
801 
802 		event = AC_ADVINFO_CHANGED;
803 		status = xpt_register_async(event, mpssas_async, sc,
804 					    sassc->path);
805 		if (status != CAM_REQ_CMP) {
806 			mps_dprint(sc, MPS_ERROR,
807 			    "Error %#x registering async handler for "
808 			    "AC_ADVINFO_CHANGED events\n", status);
809 			xpt_free_path(sassc->path);
810 			sassc->path = NULL;
811 		}
812 	}
813 	if (status != CAM_REQ_CMP) {
814 		/*
815 		 * EEDP use is the exception, not the rule.
816 		 * Warn the user, but do not fail to attach.
817 		 */
818 		mps_printf(sc, "EEDP capabilities disabled.\n");
819 	}
820 
821 	mps_unlock(sc);
822 
823 	mpssas_register_events(sc);
824 out:
825 	if (error)
826 		mps_detach_sas(sc);
827 
828 	mps_dprint(sc, MPS_INIT, "%s exit error= %d\n", __func__, error);
829 	return (error);
830 }
831 
832 int
833 mps_detach_sas(struct mps_softc *sc)
834 {
835 	struct mpssas_softc *sassc;
836 	struct mpssas_lun *lun, *lun_tmp;
837 	struct mpssas_target *targ;
838 	int i;
839 
840 	MPS_FUNCTRACE(sc);
841 
842 	if (sc->sassc == NULL)
843 		return (0);
844 
845 	sassc = sc->sassc;
846 	mps_deregister_events(sc, sassc->mpssas_eh);
847 
848 	/*
849 	 * Drain and free the event handling taskqueue with the lock
850 	 * unheld so that any parallel processing tasks drain properly
851 	 * without deadlocking.
852 	 */
853 	if (sassc->ev_tq != NULL)
854 		taskqueue_free(sassc->ev_tq);
855 
856 	/* Make sure CAM doesn't wedge if we had to bail out early. */
857 	mps_lock(sc);
858 
859 	while (sassc->startup_refcount != 0)
860 		mpssas_startup_decrement(sassc);
861 
862 	/* Deregister our async handler */
863 	if (sassc->path != NULL) {
864 		xpt_register_async(0, mpssas_async, sc, sassc->path);
865 		xpt_free_path(sassc->path);
866 		sassc->path = NULL;
867 	}
868 
869 	if (sassc->flags & MPSSAS_IN_STARTUP)
870 		xpt_release_simq(sassc->sim, 1);
871 
872 	if (sassc->sim != NULL) {
873 		xpt_bus_deregister(cam_sim_path(sassc->sim));
874 		cam_sim_free(sassc->sim, FALSE);
875 	}
876 
877 	mps_unlock(sc);
878 
879 	if (sassc->devq != NULL)
880 		cam_simq_free(sassc->devq);
881 
882 	for(i=0; i< sassc->maxtargets ;i++) {
883 		targ = &sassc->targets[i];
884 		SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
885 			free(lun, M_MPT2);
886 		}
887 	}
888 	free(sassc->targets, M_MPT2);
889 	free(sassc, M_MPT2);
890 	sc->sassc = NULL;
891 
892 	return (0);
893 }
894 
895 void
896 mpssas_discovery_end(struct mpssas_softc *sassc)
897 {
898 	struct mps_softc *sc = sassc->sc;
899 
900 	MPS_FUNCTRACE(sc);
901 
902 	if (sassc->flags & MPSSAS_DISCOVERY_TIMEOUT_PENDING)
903 		callout_stop(&sassc->discovery_callout);
904 
905 	/*
906 	 * After discovery has completed, check the mapping table for any
907 	 * missing devices and update their missing counts. Only do this once
908 	 * whenever the driver is initialized so that missing counts aren't
909 	 * updated unnecessarily. Note that just because discovery has
910 	 * completed doesn't mean that events have been processed yet. The
911 	 * check_devices function is a callout timer that checks if ALL devices
912 	 * are missing. If so, it will wait a little longer for events to
913 	 * complete and keep resetting itself until some device in the mapping
914 	 * table is not missing, meaning that event processing has started.
915 	 */
916 	if (sc->track_mapping_events) {
917 		mps_dprint(sc, MPS_XINFO | MPS_MAPPING, "Discovery has "
918 		    "completed. Check for missing devices in the mapping "
919 		    "table.\n");
920 		callout_reset(&sc->device_check_callout,
921 		    MPS_MISSING_CHECK_DELAY * hz, mps_mapping_check_devices,
922 		    sc);
923 	}
924 }
925 
926 static void
927 mpssas_action(struct cam_sim *sim, union ccb *ccb)
928 {
929 	struct mpssas_softc *sassc;
930 
931 	sassc = cam_sim_softc(sim);
932 
933 	MPS_FUNCTRACE(sassc->sc);
934 	mps_dprint(sassc->sc, MPS_TRACE, "ccb func_code 0x%x\n",
935 	    ccb->ccb_h.func_code);
936 	mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
937 
938 	switch (ccb->ccb_h.func_code) {
939 	case XPT_PATH_INQ:
940 	{
941 		struct ccb_pathinq *cpi = &ccb->cpi;
942 		struct mps_softc *sc = sassc->sc;
943 
944 		cpi->version_num = 1;
945 		cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
946 		cpi->target_sprt = 0;
947 		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN;
948 		cpi->hba_eng_cnt = 0;
949 		cpi->max_target = sassc->maxtargets - 1;
950 		cpi->max_lun = 255;
951 
952 		/*
953 		 * initiator_id is set here to an ID outside the set of valid
954 		 * target IDs (including volumes).
955 		 */
956 		cpi->initiator_id = sassc->maxtargets;
957 		strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
958 		strlcpy(cpi->hba_vid, "Avago Tech", HBA_IDLEN);
959 		strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
960 		cpi->unit_number = cam_sim_unit(sim);
961 		cpi->bus_id = cam_sim_bus(sim);
962 		cpi->base_transfer_speed = 150000;
963 		cpi->transport = XPORT_SAS;
964 		cpi->transport_version = 0;
965 		cpi->protocol = PROTO_SCSI;
966 		cpi->protocol_version = SCSI_REV_SPC;
967 		cpi->maxio = sc->maxio;
968 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
969 		break;
970 	}
971 	case XPT_GET_TRAN_SETTINGS:
972 	{
973 		struct ccb_trans_settings	*cts;
974 		struct ccb_trans_settings_sas	*sas;
975 		struct ccb_trans_settings_scsi	*scsi;
976 		struct mpssas_target *targ;
977 
978 		cts = &ccb->cts;
979 		sas = &cts->xport_specific.sas;
980 		scsi = &cts->proto_specific.scsi;
981 
982 		KASSERT(cts->ccb_h.target_id < sassc->maxtargets,
983 		    ("Target %d out of bounds in XPT_GET_TRANS_SETTINGS\n",
984 		    cts->ccb_h.target_id));
985 		targ = &sassc->targets[cts->ccb_h.target_id];
986 		if (targ->handle == 0x0) {
987 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
988 			break;
989 		}
990 
991 		cts->protocol_version = SCSI_REV_SPC2;
992 		cts->transport = XPORT_SAS;
993 		cts->transport_version = 0;
994 
995 		sas->valid = CTS_SAS_VALID_SPEED;
996 		switch (targ->linkrate) {
997 		case 0x08:
998 			sas->bitrate = 150000;
999 			break;
1000 		case 0x09:
1001 			sas->bitrate = 300000;
1002 			break;
1003 		case 0x0a:
1004 			sas->bitrate = 600000;
1005 			break;
1006 		default:
1007 			sas->valid = 0;
1008 		}
1009 
1010 		cts->protocol = PROTO_SCSI;
1011 		scsi->valid = CTS_SCSI_VALID_TQ;
1012 		scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
1013 
1014 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1015 		break;
1016 	}
1017 	case XPT_CALC_GEOMETRY:
1018 		cam_calc_geometry(&ccb->ccg, /*extended*/1);
1019 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1020 		break;
1021 	case XPT_RESET_DEV:
1022 		mps_dprint(sassc->sc, MPS_XINFO, "mpssas_action XPT_RESET_DEV\n");
1023 		mpssas_action_resetdev(sassc, ccb);
1024 		return;
1025 	case XPT_RESET_BUS:
1026 	case XPT_ABORT:
1027 	case XPT_TERM_IO:
1028 		mps_dprint(sassc->sc, MPS_XINFO,
1029 		    "mpssas_action faking success for abort or reset\n");
1030 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1031 		break;
1032 	case XPT_SCSI_IO:
1033 		mpssas_action_scsiio(sassc, ccb);
1034 		return;
1035 	case XPT_SMP_IO:
1036 		mpssas_action_smpio(sassc, ccb);
1037 		return;
1038 	default:
1039 		mpssas_set_ccbstatus(ccb, CAM_FUNC_NOTAVAIL);
1040 		break;
1041 	}
1042 	xpt_done(ccb);
1043 
1044 }
1045 
1046 static void
1047 mpssas_announce_reset(struct mps_softc *sc, uint32_t ac_code,
1048     target_id_t target_id, lun_id_t lun_id)
1049 {
1050 	path_id_t path_id = cam_sim_path(sc->sassc->sim);
1051 	struct cam_path *path;
1052 
1053 	mps_dprint(sc, MPS_XINFO, "%s code %x target %d lun %jx\n", __func__,
1054 	    ac_code, target_id, (uintmax_t)lun_id);
1055 
1056 	if (xpt_create_path(&path, NULL,
1057 		path_id, target_id, lun_id) != CAM_REQ_CMP) {
1058 		mps_dprint(sc, MPS_ERROR, "unable to create path for reset "
1059 			   "notification\n");
1060 		return;
1061 	}
1062 
1063 	xpt_async(ac_code, path, NULL);
1064 	xpt_free_path(path);
1065 }
1066 
1067 static void
1068 mpssas_complete_all_commands(struct mps_softc *sc)
1069 {
1070 	struct mps_command *cm;
1071 	int i;
1072 	int completed;
1073 
1074 	MPS_FUNCTRACE(sc);
1075 	mtx_assert(&sc->mps_mtx, MA_OWNED);
1076 
1077 	/* complete all commands with a NULL reply */
1078 	for (i = 1; i < sc->num_reqs; i++) {
1079 		cm = &sc->commands[i];
1080 		if (cm->cm_state == MPS_CM_STATE_FREE)
1081 			continue;
1082 
1083 		cm->cm_state = MPS_CM_STATE_BUSY;
1084 		cm->cm_reply = NULL;
1085 		completed = 0;
1086 
1087 		if (cm->cm_flags & MPS_CM_FLAGS_SATA_ID_TIMEOUT) {
1088 			MPASS(cm->cm_data);
1089 			free(cm->cm_data, M_MPT2);
1090 			cm->cm_data = NULL;
1091 		}
1092 
1093 		if (cm->cm_flags & MPS_CM_FLAGS_POLLED)
1094 			cm->cm_flags |= MPS_CM_FLAGS_COMPLETE;
1095 
1096 		if (cm->cm_complete != NULL) {
1097 			mpssas_log_command(cm, MPS_RECOVERY,
1098 			    "completing cm %p state %x ccb %p for diag reset\n",
1099 			    cm, cm->cm_state, cm->cm_ccb);
1100 
1101 			cm->cm_complete(sc, cm);
1102 			completed = 1;
1103 		} else if (cm->cm_flags & MPS_CM_FLAGS_WAKEUP) {
1104 			mpssas_log_command(cm, MPS_RECOVERY,
1105 			    "waking up cm %p state %x ccb %p for diag reset\n",
1106 			    cm, cm->cm_state, cm->cm_ccb);
1107 			wakeup(cm);
1108 			completed = 1;
1109 		}
1110 
1111 		if ((completed == 0) && (cm->cm_state != MPS_CM_STATE_FREE)) {
1112 			/* this should never happen, but if it does, log */
1113 			mpssas_log_command(cm, MPS_RECOVERY,
1114 			    "cm %p state %x flags 0x%x ccb %p during diag "
1115 			    "reset\n", cm, cm->cm_state, cm->cm_flags,
1116 			    cm->cm_ccb);
1117 		}
1118 	}
1119 
1120 	sc->io_cmds_active = 0;
1121 }
1122 
1123 void
1124 mpssas_handle_reinit(struct mps_softc *sc)
1125 {
1126 	int i;
1127 
1128 	/* Go back into startup mode and freeze the simq, so that CAM
1129 	 * doesn't send any commands until after we've rediscovered all
1130 	 * targets and found the proper device handles for them.
1131 	 *
1132 	 * After the reset, portenable will trigger discovery, and after all
1133 	 * discovery-related activities have finished, the simq will be
1134 	 * released.
1135 	 */
1136 	mps_dprint(sc, MPS_INIT, "%s startup\n", __func__);
1137 	sc->sassc->flags |= MPSSAS_IN_STARTUP;
1138 	sc->sassc->flags |= MPSSAS_IN_DISCOVERY;
1139 	mpssas_startup_increment(sc->sassc);
1140 
1141 	/* notify CAM of a bus reset */
1142 	mpssas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1143 	    CAM_LUN_WILDCARD);
1144 
1145 	/* complete and cleanup after all outstanding commands */
1146 	mpssas_complete_all_commands(sc);
1147 
1148 	mps_dprint(sc, MPS_INIT,
1149 	    "%s startup %u after command completion\n", __func__,
1150 	    sc->sassc->startup_refcount);
1151 
1152 	/* zero all the target handles, since they may change after the
1153 	 * reset, and we have to rediscover all the targets and use the new
1154 	 * handles.
1155 	 */
1156 	for (i = 0; i < sc->sassc->maxtargets; i++) {
1157 		if (sc->sassc->targets[i].outstanding != 0)
1158 			mps_dprint(sc, MPS_INIT, "target %u outstanding %u\n",
1159 			    i, sc->sassc->targets[i].outstanding);
1160 		sc->sassc->targets[i].handle = 0x0;
1161 		sc->sassc->targets[i].exp_dev_handle = 0x0;
1162 		sc->sassc->targets[i].outstanding = 0;
1163 		sc->sassc->targets[i].flags = MPSSAS_TARGET_INDIAGRESET;
1164 	}
1165 }
1166 
1167 static void
1168 mpssas_tm_timeout(void *data)
1169 {
1170 	struct mps_command *tm = data;
1171 	struct mps_softc *sc = tm->cm_sc;
1172 
1173 	mtx_assert(&sc->mps_mtx, MA_OWNED);
1174 
1175 	mpssas_log_command(tm, MPS_INFO|MPS_RECOVERY,
1176 	    "task mgmt %p timed out\n", tm);
1177 
1178 	KASSERT(tm->cm_state == MPS_CM_STATE_INQUEUE,
1179 	    ("command not inqueue\n"));
1180 
1181 	tm->cm_state = MPS_CM_STATE_BUSY;
1182 	mps_reinit(sc);
1183 }
1184 
1185 static void
1186 mpssas_logical_unit_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1187 {
1188 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1189 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1190 	unsigned int cm_count = 0;
1191 	struct mps_command *cm;
1192 	struct mpssas_target *targ;
1193 
1194 	callout_stop(&tm->cm_callout);
1195 
1196 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1197 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1198 	targ = tm->cm_targ;
1199 
1200 	/*
1201 	 * Currently there should be no way we can hit this case.  It only
1202 	 * happens when we have a failure to allocate chain frames, and
1203 	 * task management commands don't have S/G lists.
1204 	 * XXXSL So should it be an assertion?
1205 	 */
1206 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1207 		mps_dprint(sc, MPS_RECOVERY|MPS_ERROR,
1208 		    "%s: cm_flags = %#x for LUN reset! "
1209 		   "This should not happen!\n", __func__, tm->cm_flags);
1210 		mpssas_free_tm(sc, tm);
1211 		return;
1212 	}
1213 
1214 	if (reply == NULL) {
1215 		mps_dprint(sc, MPS_RECOVERY, "NULL reset reply for tm %p\n",
1216 		    tm);
1217 		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1218 			/* this completion was due to a reset, just cleanup */
1219 			mps_dprint(sc, MPS_RECOVERY, "Hardware undergoing "
1220 			    "reset, ignoring NULL LUN reset reply\n");
1221 			targ->tm = NULL;
1222 			mpssas_free_tm(sc, tm);
1223 		}
1224 		else {
1225 			/* we should have gotten a reply. */
1226 			mps_dprint(sc, MPS_INFO|MPS_RECOVERY, "NULL reply on "
1227 			    "LUN reset attempt, resetting controller\n");
1228 			mps_reinit(sc);
1229 		}
1230 		return;
1231 	}
1232 
1233 	mps_dprint(sc, MPS_RECOVERY,
1234 	    "logical unit reset status 0x%x code 0x%x count %u\n",
1235 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1236 	    le32toh(reply->TerminationCount));
1237 
1238 	/*
1239 	 * See if there are any outstanding commands for this LUN.
1240 	 * This could be made more efficient by using a per-LU data
1241 	 * structure of some sort.
1242 	 */
1243 	TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1244 		if (cm->cm_lun == tm->cm_lun)
1245 			cm_count++;
1246 	}
1247 
1248 	if (cm_count == 0) {
1249 		mps_dprint(sc, MPS_RECOVERY|MPS_INFO,
1250 		    "Finished recovery after LUN reset for target %u\n",
1251 		    targ->tid);
1252 
1253 		mpssas_announce_reset(sc, AC_SENT_BDR, targ->tid, tm->cm_lun);
1254 
1255 		/*
1256 		 * We've finished recovery for this logical unit.  check and
1257 		 * see if some other logical unit has a timedout command
1258 		 * that needs to be processed.
1259 		 */
1260 		cm = TAILQ_FIRST(&targ->timedout_commands);
1261 		if (cm) {
1262 			mps_dprint(sc, MPS_INFO|MPS_RECOVERY,
1263 			    "More commands to abort for target %u\n",
1264 			    targ->tid);
1265 			mpssas_send_abort(sc, tm, cm);
1266 		} else {
1267 			targ->tm = NULL;
1268 			mpssas_free_tm(sc, tm);
1269 		}
1270 	} else {
1271 		/*
1272 		 * If we still have commands for this LUN, the reset
1273 		 * effectively failed, regardless of the status reported.
1274 		 * Escalate to a target reset.
1275 		 */
1276 		mps_dprint(sc, MPS_INFO|MPS_RECOVERY,
1277 		    "logical unit reset complete for target %u, but still "
1278 		    "have %u command(s), sending target reset\n", targ->tid,
1279 		    cm_count);
1280 		mpssas_send_reset(sc, tm,
1281 		    MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1282 	}
1283 }
1284 
1285 static void
1286 mpssas_target_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1287 {
1288 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1289 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1290 	struct mpssas_target *targ;
1291 
1292 	callout_stop(&tm->cm_callout);
1293 
1294 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1295 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1296 	targ = tm->cm_targ;
1297 
1298 	/*
1299 	 * Currently there should be no way we can hit this case.  It only
1300 	 * happens when we have a failure to allocate chain frames, and
1301 	 * task management commands don't have S/G lists.
1302 	 */
1303 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1304 		mps_dprint(sc, MPS_ERROR,"%s: cm_flags = %#x for target reset! "
1305 			   "This should not happen!\n", __func__, tm->cm_flags);
1306 		mpssas_free_tm(sc, tm);
1307 		return;
1308 	}
1309 
1310 	if (reply == NULL) {
1311 		mps_dprint(sc, MPS_RECOVERY,
1312 		    "NULL target reset reply for tm %pi TaskMID %u\n",
1313 		    tm, le16toh(req->TaskMID));
1314 		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1315 			/* this completion was due to a reset, just cleanup */
1316 			mps_dprint(sc, MPS_RECOVERY, "Hardware undergoing "
1317 			    "reset, ignoring NULL target reset reply\n");
1318 			targ->tm = NULL;
1319 			mpssas_free_tm(sc, tm);
1320 		} else {
1321 			/* we should have gotten a reply. */
1322 			mps_dprint(sc, MPS_INFO|MPS_RECOVERY, "NULL reply on "
1323 			    "target reset attempt, resetting controller\n");
1324 			mps_reinit(sc);
1325 		}
1326 		return;
1327 	}
1328 
1329 	mps_dprint(sc, MPS_RECOVERY,
1330 	    "target reset status 0x%x code 0x%x count %u\n",
1331 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1332 	    le32toh(reply->TerminationCount));
1333 
1334 	if (targ->outstanding == 0) {
1335 		/* we've finished recovery for this target and all
1336 		 * of its logical units.
1337 		 */
1338 		mps_dprint(sc, MPS_RECOVERY|MPS_INFO,
1339 		    "Finished reset recovery for target %u\n", targ->tid);
1340 
1341 		mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1342 		    CAM_LUN_WILDCARD);
1343 
1344 		targ->tm = NULL;
1345 		mpssas_free_tm(sc, tm);
1346 	} else {
1347 		/*
1348 		 * After a target reset, if this target still has
1349 		 * outstanding commands, the reset effectively failed,
1350 		 * regardless of the status reported.  escalate.
1351 		 */
1352 		mps_dprint(sc, MPS_INFO|MPS_RECOVERY,
1353 		    "Target reset complete for target %u, but still have %u "
1354 		    "command(s), resetting controller\n", targ->tid,
1355 		    targ->outstanding);
1356 		mps_reinit(sc);
1357 	}
1358 }
1359 
1360 #define MPS_RESET_TIMEOUT 30
1361 
1362 int
1363 mpssas_send_reset(struct mps_softc *sc, struct mps_command *tm, uint8_t type)
1364 {
1365 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1366 	struct mpssas_target *target;
1367 	int err;
1368 
1369 	target = tm->cm_targ;
1370 	if (target->handle == 0) {
1371 		mps_dprint(sc, MPS_ERROR,"%s null devhandle for target_id %d\n",
1372 		    __func__, target->tid);
1373 		return -1;
1374 	}
1375 
1376 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1377 	req->DevHandle = htole16(target->handle);
1378 	req->TaskType = type;
1379 
1380 	if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1381 		/* XXX Need to handle invalid LUNs */
1382 		MPS_SET_LUN(req->LUN, tm->cm_lun);
1383 		tm->cm_targ->logical_unit_resets++;
1384 		mps_dprint(sc, MPS_RECOVERY|MPS_INFO,
1385 		    "Sending logical unit reset to target %u lun %d\n",
1386 		    target->tid, tm->cm_lun);
1387 		tm->cm_complete = mpssas_logical_unit_reset_complete;
1388 		mpssas_prepare_for_tm(sc, tm, target, tm->cm_lun);
1389 	} else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1390 		/*
1391 		 * Target reset method =
1392 		 * 	SAS Hard Link Reset / SATA Link Reset
1393 		 */
1394 		req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1395 		tm->cm_targ->target_resets++;
1396 		mps_dprint(sc, MPS_RECOVERY|MPS_INFO,
1397 		    "Sending target reset to target %u\n", target->tid);
1398 		tm->cm_complete = mpssas_target_reset_complete;
1399 		mpssas_prepare_for_tm(sc, tm, target, CAM_LUN_WILDCARD);
1400 	} else {
1401 		mps_dprint(sc, MPS_ERROR, "unexpected reset type 0x%x\n", type);
1402 		return -1;
1403 	}
1404 
1405 	tm->cm_data = NULL;
1406 	tm->cm_complete_data = (void *)tm;
1407 
1408 	callout_reset(&tm->cm_callout, MPS_RESET_TIMEOUT * hz,
1409 	    mpssas_tm_timeout, tm);
1410 
1411 	err = mps_map_command(sc, tm);
1412 	if (err)
1413 		mps_dprint(sc, MPS_ERROR|MPS_RECOVERY,
1414 		    "error %d sending reset type %u\n",
1415 		    err, type);
1416 
1417 	return err;
1418 }
1419 
1420 
1421 static void
1422 mpssas_abort_complete(struct mps_softc *sc, struct mps_command *tm)
1423 {
1424 	struct mps_command *cm;
1425 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1426 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1427 	struct mpssas_target *targ;
1428 
1429 	callout_stop(&tm->cm_callout);
1430 
1431 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1432 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1433 	targ = tm->cm_targ;
1434 
1435 	/*
1436 	 * Currently there should be no way we can hit this case.  It only
1437 	 * happens when we have a failure to allocate chain frames, and
1438 	 * task management commands don't have S/G lists.
1439 	 */
1440 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1441 		mps_dprint(sc, MPS_RECOVERY,
1442 		    "cm_flags = %#x for abort %p TaskMID %u!\n",
1443 		    tm->cm_flags, tm, le16toh(req->TaskMID));
1444 		mpssas_free_tm(sc, tm);
1445 		return;
1446 	}
1447 
1448 	if (reply == NULL) {
1449 		mps_dprint(sc, MPS_RECOVERY,
1450 		    "NULL abort reply for tm %p TaskMID %u\n",
1451 		    tm, le16toh(req->TaskMID));
1452 		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1453 			/* this completion was due to a reset, just cleanup */
1454 			mps_dprint(sc, MPS_RECOVERY, "Hardware undergoing "
1455 			    "reset, ignoring NULL abort reply\n");
1456 			targ->tm = NULL;
1457 			mpssas_free_tm(sc, tm);
1458 		} else {
1459 			/* we should have gotten a reply. */
1460 			mps_dprint(sc, MPS_INFO|MPS_RECOVERY, "NULL reply on "
1461 			    "abort attempt, resetting controller\n");
1462 			mps_reinit(sc);
1463 		}
1464 		return;
1465 	}
1466 
1467 	mps_dprint(sc, MPS_RECOVERY,
1468 	    "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1469 	    le16toh(req->TaskMID),
1470 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1471 	    le32toh(reply->TerminationCount));
1472 
1473 	cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1474 	if (cm == NULL) {
1475 		/*
1476 		 * If there are no more timedout commands, we're done with
1477 		 * error recovery for this target.
1478 		 */
1479 		mps_dprint(sc, MPS_INFO|MPS_RECOVERY,
1480 		    "Finished abort recovery for target %u\n", targ->tid);
1481 
1482 		targ->tm = NULL;
1483 		mpssas_free_tm(sc, tm);
1484 	} else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) {
1485 		/* abort success, but we have more timedout commands to abort */
1486 		mps_dprint(sc, MPS_INFO|MPS_RECOVERY,
1487 		    "Continuing abort recovery for target %u\n", targ->tid);
1488 
1489 		mpssas_send_abort(sc, tm, cm);
1490 	} else {
1491 		/* we didn't get a command completion, so the abort
1492 		 * failed as far as we're concerned.  escalate.
1493 		 */
1494 		mps_dprint(sc, MPS_RECOVERY,
1495 		    "Abort failed for target %u, sending logical unit reset\n",
1496 		    targ->tid);
1497 
1498 		mpssas_send_reset(sc, tm,
1499 		    MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1500 	}
1501 }
1502 
1503 #define MPS_ABORT_TIMEOUT 5
1504 
1505 static int
1506 mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm)
1507 {
1508 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1509 	struct mpssas_target *targ;
1510 	int err;
1511 
1512 	targ = cm->cm_targ;
1513 	if (targ->handle == 0) {
1514 		mps_dprint(sc, MPS_ERROR|MPS_RECOVERY,
1515 		    "%s null devhandle for target_id %d\n",
1516 		    __func__, cm->cm_ccb->ccb_h.target_id);
1517 		return -1;
1518 	}
1519 
1520 	mpssas_log_command(cm, MPS_RECOVERY|MPS_INFO,
1521 	    "Aborting command %p\n", cm);
1522 
1523 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1524 	req->DevHandle = htole16(targ->handle);
1525 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1526 
1527 	/* XXX Need to handle invalid LUNs */
1528 	MPS_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1529 
1530 	req->TaskMID = htole16(cm->cm_desc.Default.SMID);
1531 
1532 	tm->cm_data = NULL;
1533 	tm->cm_complete = mpssas_abort_complete;
1534 	tm->cm_complete_data = (void *)tm;
1535 	tm->cm_targ = cm->cm_targ;
1536 	tm->cm_lun = cm->cm_lun;
1537 
1538 	callout_reset(&tm->cm_callout, MPS_ABORT_TIMEOUT * hz,
1539 	    mpssas_tm_timeout, tm);
1540 
1541 	targ->aborts++;
1542 
1543 	mpssas_prepare_for_tm(sc, tm, targ, tm->cm_lun);
1544 
1545 	err = mps_map_command(sc, tm);
1546 	if (err)
1547 		mps_dprint(sc, MPS_ERROR|MPS_RECOVERY,
1548 		    "error %d sending abort for cm %p SMID %u\n",
1549 		    err, cm, req->TaskMID);
1550 	return err;
1551 }
1552 
1553 static void
1554 mpssas_scsiio_timeout(void *data)
1555 {
1556 	sbintime_t elapsed, now;
1557 	union ccb *ccb;
1558 	struct mps_softc *sc;
1559 	struct mps_command *cm;
1560 	struct mpssas_target *targ;
1561 
1562 	cm = (struct mps_command *)data;
1563 	sc = cm->cm_sc;
1564 	ccb = cm->cm_ccb;
1565 	now = sbinuptime();
1566 
1567 	MPS_FUNCTRACE(sc);
1568 	mtx_assert(&sc->mps_mtx, MA_OWNED);
1569 
1570 	mps_dprint(sc, MPS_XINFO|MPS_RECOVERY, "Timeout checking cm %p\n", sc);
1571 
1572 	/*
1573 	 * Run the interrupt handler to make sure it's not pending.  This
1574 	 * isn't perfect because the command could have already completed
1575 	 * and been re-used, though this is unlikely.
1576 	 */
1577 	mps_intr_locked(sc);
1578 	if (cm->cm_flags & MPS_CM_FLAGS_ON_RECOVERY) {
1579 		mpssas_log_command(cm, MPS_XINFO,
1580 		    "SCSI command %p almost timed out\n", cm);
1581 		return;
1582 	}
1583 
1584 	if (cm->cm_ccb == NULL) {
1585 		mps_dprint(sc, MPS_ERROR, "command timeout with NULL ccb\n");
1586 		return;
1587 	}
1588 
1589 	targ = cm->cm_targ;
1590 	targ->timeouts++;
1591 
1592 	elapsed = now - ccb->ccb_h.qos.sim_data;
1593 	mpssas_log_command(cm, MPS_INFO|MPS_RECOVERY,
1594 	    "Command timeout on target %u(0x%04x) %d set, %d.%d elapsed\n",
1595 	    targ->tid, targ->handle, ccb->ccb_h.timeout,
1596 	    sbintime_getsec(elapsed), elapsed & 0xffffffff);
1597 
1598 	/* XXX first, check the firmware state, to see if it's still
1599 	 * operational.  if not, do a diag reset.
1600 	 */
1601 	mpssas_set_ccbstatus(cm->cm_ccb, CAM_CMD_TIMEOUT);
1602 	cm->cm_flags |= MPS_CM_FLAGS_ON_RECOVERY | MPS_CM_FLAGS_TIMEDOUT;
1603 	TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1604 
1605 	if (targ->tm != NULL) {
1606 		/* target already in recovery, just queue up another
1607 		 * timedout command to be processed later.
1608 		 */
1609 		mps_dprint(sc, MPS_RECOVERY,
1610 		    "queued timedout cm %p for processing by tm %p\n",
1611 		    cm, targ->tm);
1612 	} else if ((targ->tm = mpssas_alloc_tm(sc)) != NULL) {
1613 		mps_dprint(sc, MPS_RECOVERY|MPS_INFO,
1614 		    "Sending abort to target %u for SMID %d\n", targ->tid,
1615 		    cm->cm_desc.Default.SMID);
1616 		mps_dprint(sc, MPS_RECOVERY, "timedout cm %p allocated tm %p\n",
1617 		    cm, targ->tm);
1618 
1619 		/* start recovery by aborting the first timedout command */
1620 		mpssas_send_abort(sc, targ->tm, cm);
1621 	} else {
1622 		/* XXX queue this target up for recovery once a TM becomes
1623 		 * available.  The firmware only has a limited number of
1624 		 * HighPriority credits for the high priority requests used
1625 		 * for task management, and we ran out.
1626 		 *
1627 		 * Isilon: don't worry about this for now, since we have
1628 		 * more credits than disks in an enclosure, and limit
1629 		 * ourselves to one TM per target for recovery.
1630 		 */
1631 		mps_dprint(sc, MPS_ERROR|MPS_RECOVERY,
1632 		    "timedout cm %p failed to allocate a tm\n", cm);
1633 	}
1634 
1635 }
1636 
1637 static void
1638 mpssas_action_scsiio(struct mpssas_softc *sassc, union ccb *ccb)
1639 {
1640 	MPI2_SCSI_IO_REQUEST *req;
1641 	struct ccb_scsiio *csio;
1642 	struct mps_softc *sc;
1643 	struct mpssas_target *targ;
1644 	struct mpssas_lun *lun;
1645 	struct mps_command *cm;
1646 	uint8_t i, lba_byte, *ref_tag_addr;
1647 	uint16_t eedp_flags;
1648 	uint32_t mpi_control;
1649 
1650 	sc = sassc->sc;
1651 	MPS_FUNCTRACE(sc);
1652 	mtx_assert(&sc->mps_mtx, MA_OWNED);
1653 
1654 	csio = &ccb->csio;
1655 	KASSERT(csio->ccb_h.target_id < sassc->maxtargets,
1656 	    ("Target %d out of bounds in XPT_SCSI_IO\n",
1657 	     csio->ccb_h.target_id));
1658 	targ = &sassc->targets[csio->ccb_h.target_id];
1659 	mps_dprint(sc, MPS_TRACE, "ccb %p target flag %x\n", ccb, targ->flags);
1660 	if (targ->handle == 0x0) {
1661 		mps_dprint(sc, MPS_ERROR, "%s NULL handle for target %u\n",
1662 		    __func__, csio->ccb_h.target_id);
1663 		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1664 		xpt_done(ccb);
1665 		return;
1666 	}
1667 	if (targ->flags & MPS_TARGET_FLAGS_RAID_COMPONENT) {
1668 		mps_dprint(sc, MPS_ERROR, "%s Raid component no SCSI IO "
1669 		    "supported %u\n", __func__, csio->ccb_h.target_id);
1670 		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1671 		xpt_done(ccb);
1672 		return;
1673 	}
1674 	/*
1675 	 * Sometimes, it is possible to get a command that is not "In
1676 	 * Progress" and was actually aborted by the upper layer.  Check for
1677 	 * this here and complete the command without error.
1678 	 */
1679 	if (mpssas_get_ccbstatus(ccb) != CAM_REQ_INPROG) {
1680 		mps_dprint(sc, MPS_TRACE, "%s Command is not in progress for "
1681 		    "target %u\n", __func__, csio->ccb_h.target_id);
1682 		xpt_done(ccb);
1683 		return;
1684 	}
1685 	/*
1686 	 * If devinfo is 0 this will be a volume.  In that case don't tell CAM
1687 	 * that the volume has timed out.  We want volumes to be enumerated
1688 	 * until they are deleted/removed, not just failed. In either event,
1689 	 * we're removing the target due to a firmware event telling us
1690 	 * the device is now gone (as opposed to some transient event). Since
1691 	 * we're opting to remove failed devices from the OS's view, we need
1692 	 * to propagate that status up the stack.
1693 	 */
1694 	if (targ->flags & MPSSAS_TARGET_INREMOVAL) {
1695 		if (targ->devinfo == 0)
1696 			mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1697 		else
1698 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1699 		xpt_done(ccb);
1700 		return;
1701 	}
1702 
1703 	if ((sc->mps_flags & MPS_FLAGS_SHUTDOWN) != 0) {
1704 		mps_dprint(sc, MPS_INFO, "%s shutting down\n", __func__);
1705 		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1706 		xpt_done(ccb);
1707 		return;
1708 	}
1709 
1710 	/*
1711 	 * If target has a reset in progress, freeze the devq and return.  The
1712 	 * devq will be released when the TM reset is finished.
1713 	 */
1714 	if (targ->flags & MPSSAS_TARGET_INRESET) {
1715 		ccb->ccb_h.status = CAM_BUSY | CAM_DEV_QFRZN;
1716 		mps_dprint(sc, MPS_INFO, "%s: Freezing devq for target ID %d\n",
1717 		    __func__, targ->tid);
1718 		xpt_freeze_devq(ccb->ccb_h.path, 1);
1719 		xpt_done(ccb);
1720 		return;
1721 	}
1722 
1723 	cm = mps_alloc_command(sc);
1724 	if (cm == NULL || (sc->mps_flags & MPS_FLAGS_DIAGRESET)) {
1725 		if (cm != NULL) {
1726 			mps_free_command(sc, cm);
1727 		}
1728 		if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
1729 			xpt_freeze_simq(sassc->sim, 1);
1730 			sassc->flags |= MPSSAS_QUEUE_FROZEN;
1731 		}
1732 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1733 		ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1734 		xpt_done(ccb);
1735 		return;
1736 	}
1737 
1738 	req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
1739 	bzero(req, sizeof(*req));
1740 	req->DevHandle = htole16(targ->handle);
1741 	req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1742 	req->MsgFlags = 0;
1743 	req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr);
1744 	req->SenseBufferLength = MPS_SENSE_LEN;
1745 	req->SGLFlags = 0;
1746 	req->ChainOffset = 0;
1747 	req->SGLOffset0 = 24;	/* 32bit word offset to the SGL */
1748 	req->SGLOffset1= 0;
1749 	req->SGLOffset2= 0;
1750 	req->SGLOffset3= 0;
1751 	req->SkipCount = 0;
1752 	req->DataLength = htole32(csio->dxfer_len);
1753 	req->BidirectionalDataLength = 0;
1754 	req->IoFlags = htole16(csio->cdb_len);
1755 	req->EEDPFlags = 0;
1756 
1757 	/* Note: BiDirectional transfers are not supported */
1758 	switch (csio->ccb_h.flags & CAM_DIR_MASK) {
1759 	case CAM_DIR_IN:
1760 		mpi_control = MPI2_SCSIIO_CONTROL_READ;
1761 		cm->cm_flags |= MPS_CM_FLAGS_DATAIN;
1762 		break;
1763 	case CAM_DIR_OUT:
1764 		mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
1765 		cm->cm_flags |= MPS_CM_FLAGS_DATAOUT;
1766 		break;
1767 	case CAM_DIR_NONE:
1768 	default:
1769 		mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
1770 		break;
1771 	}
1772 
1773 	if (csio->cdb_len == 32)
1774                 mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
1775 	/*
1776 	 * It looks like the hardware doesn't require an explicit tag
1777 	 * number for each transaction.  SAM Task Management not supported
1778 	 * at the moment.
1779 	 */
1780 	switch (csio->tag_action) {
1781 	case MSG_HEAD_OF_Q_TAG:
1782 		mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
1783 		break;
1784 	case MSG_ORDERED_Q_TAG:
1785 		mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
1786 		break;
1787 	case MSG_ACA_TASK:
1788 		mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ;
1789 		break;
1790 	case CAM_TAG_ACTION_NONE:
1791 	case MSG_SIMPLE_Q_TAG:
1792 	default:
1793 		mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
1794 		break;
1795 	}
1796 	mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
1797 	req->Control = htole32(mpi_control);
1798 	if (MPS_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
1799 		mps_free_command(sc, cm);
1800 		mpssas_set_ccbstatus(ccb, CAM_LUN_INVALID);
1801 		xpt_done(ccb);
1802 		return;
1803 	}
1804 
1805 	if (csio->ccb_h.flags & CAM_CDB_POINTER)
1806 		bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
1807 	else
1808 		bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
1809 	req->IoFlags = htole16(csio->cdb_len);
1810 
1811 	/*
1812 	 * Check if EEDP is supported and enabled.  If it is then check if the
1813 	 * SCSI opcode could be using EEDP.  If so, make sure the LUN exists and
1814 	 * is formatted for EEDP support.  If all of this is true, set CDB up
1815 	 * for EEDP transfer.
1816 	 */
1817 	eedp_flags = op_code_prot[req->CDB.CDB32[0]];
1818 	if (sc->eedp_enabled && eedp_flags) {
1819 		SLIST_FOREACH(lun, &targ->luns, lun_link) {
1820 			if (lun->lun_id == csio->ccb_h.target_lun) {
1821 				break;
1822 			}
1823 		}
1824 
1825 		if ((lun != NULL) && (lun->eedp_formatted)) {
1826 			req->EEDPBlockSize = htole16(lun->eedp_block_size);
1827 			eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
1828 			    MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
1829 			    MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
1830 			req->EEDPFlags = htole16(eedp_flags);
1831 
1832 			/*
1833 			 * If CDB less than 32, fill in Primary Ref Tag with
1834 			 * low 4 bytes of LBA.  If CDB is 32, tag stuff is
1835 			 * already there.  Also, set protection bit.  FreeBSD
1836 			 * currently does not support CDBs bigger than 16, but
1837 			 * the code doesn't hurt, and will be here for the
1838 			 * future.
1839 			 */
1840 			if (csio->cdb_len != 32) {
1841 				lba_byte = (csio->cdb_len == 16) ? 6 : 2;
1842 				ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
1843 				    PrimaryReferenceTag;
1844 				for (i = 0; i < 4; i++) {
1845 					*ref_tag_addr =
1846 					    req->CDB.CDB32[lba_byte + i];
1847 					ref_tag_addr++;
1848 				}
1849 				req->CDB.EEDP32.PrimaryReferenceTag =
1850 					htole32(req->CDB.EEDP32.PrimaryReferenceTag);
1851 				req->CDB.EEDP32.PrimaryApplicationTagMask =
1852 				    0xFFFF;
1853 				req->CDB.CDB32[1] = (req->CDB.CDB32[1] & 0x1F) |
1854 				    0x20;
1855 			} else {
1856 				eedp_flags |=
1857 				    MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
1858 				req->EEDPFlags = htole16(eedp_flags);
1859 				req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
1860 				    0x1F) | 0x20;
1861 			}
1862 		}
1863 	}
1864 
1865 	cm->cm_length = csio->dxfer_len;
1866 	if (cm->cm_length != 0) {
1867 		cm->cm_data = ccb;
1868 		cm->cm_flags |= MPS_CM_FLAGS_USE_CCB;
1869 	} else {
1870 		cm->cm_data = NULL;
1871 	}
1872 	cm->cm_sge = &req->SGL;
1873 	cm->cm_sglsize = (32 - 24) * 4;
1874 	cm->cm_desc.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
1875 	cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle);
1876 	cm->cm_complete = mpssas_scsiio_complete;
1877 	cm->cm_complete_data = ccb;
1878 	cm->cm_targ = targ;
1879 	cm->cm_lun = csio->ccb_h.target_lun;
1880 	cm->cm_ccb = ccb;
1881 
1882 	/*
1883 	 * If HBA is a WD and the command is not for a retry, try to build a
1884 	 * direct I/O message. If failed, or the command is for a retry, send
1885 	 * the I/O to the IR volume itself.
1886 	 */
1887 	if (sc->WD_valid_config) {
1888 		if (ccb->ccb_h.sim_priv.entries[0].field == MPS_WD_RETRY) {
1889 			mpssas_direct_drive_io(sassc, cm, ccb);
1890 		} else {
1891 			mpssas_set_ccbstatus(ccb, CAM_REQ_INPROG);
1892 		}
1893 	}
1894 
1895 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
1896 	if (csio->bio != NULL)
1897 		biotrack(csio->bio, __func__);
1898 #endif
1899 	csio->ccb_h.qos.sim_data = sbinuptime();
1900 	callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
1901 	    mpssas_scsiio_timeout, cm, 0);
1902 
1903 	targ->issued++;
1904 	targ->outstanding++;
1905 	TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1906 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
1907 
1908 	mpssas_log_command(cm, MPS_XINFO, "%s cm %p ccb %p outstanding %u\n",
1909 	    __func__, cm, ccb, targ->outstanding);
1910 
1911 	mps_map_command(sc, cm);
1912 	return;
1913 }
1914 
1915 /**
1916  * mps_sc_failed_io_info - translated non-succesfull SCSI_IO request
1917  */
1918 static void
1919 mps_sc_failed_io_info(struct mps_softc *sc, struct ccb_scsiio *csio,
1920     Mpi2SCSIIOReply_t *mpi_reply)
1921 {
1922 	u32 response_info;
1923 	u8 *response_bytes;
1924 	u16 ioc_status = le16toh(mpi_reply->IOCStatus) &
1925 	    MPI2_IOCSTATUS_MASK;
1926 	u8 scsi_state = mpi_reply->SCSIState;
1927 	u8 scsi_status = mpi_reply->SCSIStatus;
1928 	u32 log_info = le32toh(mpi_reply->IOCLogInfo);
1929 	const char *desc_ioc_state, *desc_scsi_status;
1930 
1931 	if (log_info == 0x31170000)
1932 		return;
1933 
1934 	desc_ioc_state = mps_describe_table(mps_iocstatus_string,
1935 	    ioc_status);
1936 	desc_scsi_status = mps_describe_table(mps_scsi_status_string,
1937 	    scsi_status);
1938 
1939 	mps_dprint(sc, MPS_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n",
1940 	    le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status);
1941 
1942 	/*
1943 	 *We can add more detail about underflow data here
1944 	 * TO-DO
1945 	 */
1946 	mps_dprint(sc, MPS_XINFO, "\tscsi_status(%s)(0x%02x), "
1947 	    "scsi_state %b\n", desc_scsi_status, scsi_status,
1948 	    scsi_state, "\20" "\1AutosenseValid" "\2AutosenseFailed"
1949 	    "\3NoScsiStatus" "\4Terminated" "\5Response InfoValid");
1950 
1951 	if (sc->mps_debug & MPS_XINFO &&
1952 		scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
1953 		mps_dprint(sc, MPS_XINFO, "-> Sense Buffer Data : Start :\n");
1954 		scsi_sense_print(csio);
1955 		mps_dprint(sc, MPS_XINFO, "-> Sense Buffer Data : End :\n");
1956 	}
1957 
1958 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
1959 		response_info = le32toh(mpi_reply->ResponseInfo);
1960 		response_bytes = (u8 *)&response_info;
1961 		mps_dprint(sc, MPS_XINFO, "response code(0x%1x): %s\n",
1962 		    response_bytes[0],
1963 		    mps_describe_table(mps_scsi_taskmgmt_string,
1964 		    response_bytes[0]));
1965 	}
1966 }
1967 
1968 static void
1969 mpssas_scsiio_complete(struct mps_softc *sc, struct mps_command *cm)
1970 {
1971 	MPI2_SCSI_IO_REPLY *rep;
1972 	union ccb *ccb;
1973 	struct ccb_scsiio *csio;
1974 	struct mpssas_softc *sassc;
1975 	struct scsi_vpd_supported_page_list *vpd_list = NULL;
1976 	u8 *TLR_bits, TLR_on;
1977 	int dir = 0, i;
1978 	u16 alloc_len;
1979 	struct mpssas_target *target;
1980 	target_id_t target_id;
1981 
1982 	MPS_FUNCTRACE(sc);
1983 	mps_dprint(sc, MPS_TRACE,
1984 	    "cm %p SMID %u ccb %p reply %p outstanding %u\n", cm,
1985 	    cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
1986 	    cm->cm_targ->outstanding);
1987 
1988 	callout_stop(&cm->cm_callout);
1989 	mtx_assert(&sc->mps_mtx, MA_OWNED);
1990 
1991 	sassc = sc->sassc;
1992 	ccb = cm->cm_complete_data;
1993 	csio = &ccb->csio;
1994 	target_id = csio->ccb_h.target_id;
1995 	rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
1996 	/*
1997 	 * XXX KDM if the chain allocation fails, does it matter if we do
1998 	 * the sync and unload here?  It is simpler to do it in every case,
1999 	 * assuming it doesn't cause problems.
2000 	 */
2001 	if (cm->cm_data != NULL) {
2002 		if (cm->cm_flags & MPS_CM_FLAGS_DATAIN)
2003 			dir = BUS_DMASYNC_POSTREAD;
2004 		else if (cm->cm_flags & MPS_CM_FLAGS_DATAOUT)
2005 			dir = BUS_DMASYNC_POSTWRITE;
2006 		bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
2007 		bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2008 	}
2009 
2010 	cm->cm_targ->completed++;
2011 	cm->cm_targ->outstanding--;
2012 	TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
2013 	ccb->ccb_h.status &= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED);
2014 
2015 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
2016 	if (ccb->csio.bio != NULL)
2017 		biotrack(ccb->csio.bio, __func__);
2018 #endif
2019 
2020 	if (cm->cm_flags & MPS_CM_FLAGS_ON_RECOVERY) {
2021 		TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
2022 		KASSERT(cm->cm_state == MPS_CM_STATE_BUSY,
2023 		    ("Not busy for CM_FLAGS_TIMEDOUT: %d\n", cm->cm_state));
2024 		cm->cm_flags &= ~MPS_CM_FLAGS_ON_RECOVERY;
2025 		if (cm->cm_reply != NULL)
2026 			mpssas_log_command(cm, MPS_RECOVERY,
2027 			    "completed timedout cm %p ccb %p during recovery "
2028 			    "ioc %x scsi %x state %x xfer %u\n",
2029 			    cm, cm->cm_ccb, le16toh(rep->IOCStatus),
2030 			    rep->SCSIStatus, rep->SCSIState,
2031 			    le32toh(rep->TransferCount));
2032 		else
2033 			mpssas_log_command(cm, MPS_RECOVERY,
2034 			    "completed timedout cm %p ccb %p during recovery\n",
2035 			    cm, cm->cm_ccb);
2036 	} else if (cm->cm_targ->tm != NULL) {
2037 		if (cm->cm_reply != NULL)
2038 			mpssas_log_command(cm, MPS_RECOVERY,
2039 			    "completed cm %p ccb %p during recovery "
2040 			    "ioc %x scsi %x state %x xfer %u\n",
2041 			    cm, cm->cm_ccb, le16toh(rep->IOCStatus),
2042 			    rep->SCSIStatus, rep->SCSIState,
2043 			    le32toh(rep->TransferCount));
2044 		else
2045 			mpssas_log_command(cm, MPS_RECOVERY,
2046 			    "completed cm %p ccb %p during recovery\n",
2047 			    cm, cm->cm_ccb);
2048 	} else if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
2049 		mpssas_log_command(cm, MPS_RECOVERY,
2050 		    "reset completed cm %p ccb %p\n",
2051 		    cm, cm->cm_ccb);
2052 	}
2053 
2054 	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2055 		/*
2056 		 * We ran into an error after we tried to map the command,
2057 		 * so we're getting a callback without queueing the command
2058 		 * to the hardware.  So we set the status here, and it will
2059 		 * be retained below.  We'll go through the "fast path",
2060 		 * because there can be no reply when we haven't actually
2061 		 * gone out to the hardware.
2062 		 */
2063 		mpssas_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
2064 
2065 		/*
2066 		 * Currently the only error included in the mask is
2067 		 * MPS_CM_FLAGS_CHAIN_FAILED, which means we're out of
2068 		 * chain frames.  We need to freeze the queue until we get
2069 		 * a command that completed without this error, which will
2070 		 * hopefully have some chain frames attached that we can
2071 		 * use.  If we wanted to get smarter about it, we would
2072 		 * only unfreeze the queue in this condition when we're
2073 		 * sure that we're getting some chain frames back.  That's
2074 		 * probably unnecessary.
2075 		 */
2076 		if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
2077 			xpt_freeze_simq(sassc->sim, 1);
2078 			sassc->flags |= MPSSAS_QUEUE_FROZEN;
2079 			mps_dprint(sc, MPS_XINFO, "Error sending command, "
2080 				   "freezing SIM queue\n");
2081 		}
2082 	}
2083 
2084 	/*
2085 	 * If this is a Start Stop Unit command and it was issued by the driver
2086 	 * during shutdown, decrement the refcount to account for all of the
2087 	 * commands that were sent.  All SSU commands should be completed before
2088 	 * shutdown completes, meaning SSU_refcount will be 0 after SSU_started
2089 	 * is TRUE.
2090 	 */
2091 	if (sc->SSU_started && (csio->cdb_io.cdb_bytes[0] == START_STOP_UNIT)) {
2092 		mps_dprint(sc, MPS_INFO, "Decrementing SSU count.\n");
2093 		sc->SSU_refcount--;
2094 	}
2095 
2096 	/* Take the fast path to completion */
2097 	if (cm->cm_reply == NULL) {
2098 		if (mpssas_get_ccbstatus(ccb) == CAM_REQ_INPROG) {
2099 			if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0)
2100 				mpssas_set_ccbstatus(ccb, CAM_SCSI_BUS_RESET);
2101 			else {
2102 				mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2103 				ccb->csio.scsi_status = SCSI_STATUS_OK;
2104 			}
2105 			if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2106 				ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2107 				sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2108 				mps_dprint(sc, MPS_XINFO,
2109 				    "Unfreezing SIM queue\n");
2110 			}
2111 		}
2112 
2113 		/*
2114 		 * There are two scenarios where the status won't be
2115 		 * CAM_REQ_CMP.  The first is if MPS_CM_FLAGS_ERROR_MASK is
2116 		 * set, the second is in the MPS_FLAGS_DIAGRESET above.
2117 		 */
2118 		if (mpssas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2119 			/*
2120 			 * Freeze the dev queue so that commands are
2121 			 * executed in the correct order after error
2122 			 * recovery.
2123 			 */
2124 			ccb->ccb_h.status |= CAM_DEV_QFRZN;
2125 			xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2126 		}
2127 		mps_free_command(sc, cm);
2128 		xpt_done(ccb);
2129 		return;
2130 	}
2131 
2132 	mpssas_log_command(cm, MPS_XINFO,
2133 	    "ioc %x scsi %x state %x xfer %u\n",
2134 	    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2135 	    le32toh(rep->TransferCount));
2136 
2137 	/*
2138 	 * If this is a Direct Drive I/O, reissue the I/O to the original IR
2139 	 * Volume if an error occurred (normal I/O retry).  Use the original
2140 	 * CCB, but set a flag that this will be a retry so that it's sent to
2141 	 * the original volume.  Free the command but reuse the CCB.
2142 	 */
2143 	if (cm->cm_flags & MPS_CM_FLAGS_DD_IO) {
2144 		mps_free_command(sc, cm);
2145 		ccb->ccb_h.sim_priv.entries[0].field = MPS_WD_RETRY;
2146 		mpssas_action_scsiio(sassc, ccb);
2147 		return;
2148 	} else
2149 		ccb->ccb_h.sim_priv.entries[0].field = 0;
2150 
2151 	switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) {
2152 	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2153 		csio->resid = cm->cm_length - le32toh(rep->TransferCount);
2154 		/* FALLTHROUGH */
2155 	case MPI2_IOCSTATUS_SUCCESS:
2156 	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2157 
2158 		if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
2159 		    MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
2160 			mpssas_log_command(cm, MPS_XINFO, "recovered error\n");
2161 
2162 		/* Completion failed at the transport level. */
2163 		if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
2164 		    MPI2_SCSI_STATE_TERMINATED)) {
2165 			mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2166 			break;
2167 		}
2168 
2169 		/* In a modern packetized environment, an autosense failure
2170 		 * implies that there's not much else that can be done to
2171 		 * recover the command.
2172 		 */
2173 		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
2174 			mpssas_set_ccbstatus(ccb, CAM_AUTOSENSE_FAIL);
2175 			break;
2176 		}
2177 
2178 		/*
2179 		 * CAM doesn't care about SAS Response Info data, but if this is
2180 		 * the state check if TLR should be done.  If not, clear the
2181 		 * TLR_bits for the target.
2182 		 */
2183 		if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
2184 		    ((le32toh(rep->ResponseInfo) &
2185 		    MPI2_SCSI_RI_MASK_REASONCODE) ==
2186 		    MPS_SCSI_RI_INVALID_FRAME)) {
2187 			sc->mapping_table[target_id].TLR_bits =
2188 			    (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2189 		}
2190 
2191 		/*
2192 		 * Intentionally override the normal SCSI status reporting
2193 		 * for these two cases.  These are likely to happen in a
2194 		 * multi-initiator environment, and we want to make sure that
2195 		 * CAM retries these commands rather than fail them.
2196 		 */
2197 		if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
2198 		    (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
2199 			mpssas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2200 			break;
2201 		}
2202 
2203 		/* Handle normal status and sense */
2204 		csio->scsi_status = rep->SCSIStatus;
2205 		if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
2206 			mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2207 		else
2208 			mpssas_set_ccbstatus(ccb, CAM_SCSI_STATUS_ERROR);
2209 
2210 		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2211 			int sense_len, returned_sense_len;
2212 
2213 			returned_sense_len = min(le32toh(rep->SenseCount),
2214 			    sizeof(struct scsi_sense_data));
2215 			if (returned_sense_len < ccb->csio.sense_len)
2216 				ccb->csio.sense_resid = ccb->csio.sense_len -
2217 					returned_sense_len;
2218 			else
2219 				ccb->csio.sense_resid = 0;
2220 
2221 			sense_len = min(returned_sense_len,
2222 			    ccb->csio.sense_len - ccb->csio.sense_resid);
2223 			bzero(&ccb->csio.sense_data,
2224 			      sizeof(ccb->csio.sense_data));
2225 			bcopy(cm->cm_sense, &ccb->csio.sense_data, sense_len);
2226 			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2227 		}
2228 
2229 		/*
2230 		 * Check if this is an INQUIRY command.  If it's a VPD inquiry,
2231 		 * and it's page code 0 (Supported Page List), and there is
2232 		 * inquiry data, and this is for a sequential access device, and
2233 		 * the device is an SSP target, and TLR is supported by the
2234 		 * controller, turn the TLR_bits value ON if page 0x90 is
2235 		 * supported.
2236 		 */
2237 		if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
2238 		    (csio->cdb_io.cdb_bytes[1] & SI_EVPD) &&
2239 		    (csio->cdb_io.cdb_bytes[2] == SVPD_SUPPORTED_PAGE_LIST) &&
2240 		    ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) &&
2241 		    (csio->data_ptr != NULL) &&
2242 		    ((csio->data_ptr[0] & 0x1f) == T_SEQUENTIAL) &&
2243 		    (sc->control_TLR) &&
2244 		    (sc->mapping_table[target_id].device_info &
2245 		    MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
2246 			vpd_list = (struct scsi_vpd_supported_page_list *)
2247 			    csio->data_ptr;
2248 			TLR_bits = &sc->mapping_table[target_id].TLR_bits;
2249 			*TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2250 			TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
2251 			alloc_len = ((u16)csio->cdb_io.cdb_bytes[3] << 8) +
2252 			    csio->cdb_io.cdb_bytes[4];
2253 			alloc_len -= csio->resid;
2254 			for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
2255 				if (vpd_list->list[i] == 0x90) {
2256 					*TLR_bits = TLR_on;
2257 					break;
2258 				}
2259 			}
2260 		}
2261 
2262 		/*
2263 		 * If this is a SATA direct-access end device, mark it so that
2264 		 * a SCSI StartStopUnit command will be sent to it when the
2265 		 * driver is being shutdown.
2266 		 */
2267 		if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
2268 		    ((csio->data_ptr[0] & 0x1f) == T_DIRECT) &&
2269 		    (sc->mapping_table[target_id].device_info &
2270 		    MPI2_SAS_DEVICE_INFO_SATA_DEVICE) &&
2271 		    ((sc->mapping_table[target_id].device_info &
2272 		    MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) ==
2273 		    MPI2_SAS_DEVICE_INFO_END_DEVICE)) {
2274 			target = &sassc->targets[target_id];
2275 			target->supports_SSU = TRUE;
2276 			mps_dprint(sc, MPS_XINFO, "Target %d supports SSU\n",
2277 			    target_id);
2278 		}
2279 		break;
2280 	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2281 	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2282 		/*
2283 		 * If devinfo is 0 this will be a volume.  In that case don't
2284 		 * tell CAM that the volume is not there.  We want volumes to
2285 		 * be enumerated until they are deleted/removed, not just
2286 		 * failed.
2287 		 */
2288 		if (cm->cm_targ->devinfo == 0)
2289 			mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2290 		else
2291 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2292 		break;
2293 	case MPI2_IOCSTATUS_INVALID_SGL:
2294 		mps_print_scsiio_cmd(sc, cm);
2295 		mpssas_set_ccbstatus(ccb, CAM_UNREC_HBA_ERROR);
2296 		break;
2297 	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2298 		/*
2299 		 * This is one of the responses that comes back when an I/O
2300 		 * has been aborted.  If it is because of a timeout that we
2301 		 * initiated, just set the status to CAM_CMD_TIMEOUT.
2302 		 * Otherwise set it to CAM_REQ_ABORTED.  The effect on the
2303 		 * command is the same (it gets retried, subject to the
2304 		 * retry counter), the only difference is what gets printed
2305 		 * on the console.
2306 		 */
2307 		if (cm->cm_flags & MPS_CM_FLAGS_TIMEDOUT)
2308 			mpssas_set_ccbstatus(ccb, CAM_CMD_TIMEOUT);
2309 		else
2310 			mpssas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2311 		break;
2312 	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2313 		/* resid is ignored for this condition */
2314 		csio->resid = 0;
2315 		mpssas_set_ccbstatus(ccb, CAM_DATA_RUN_ERR);
2316 		break;
2317 	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2318 	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2319 		/*
2320 		 * These can sometimes be transient transport-related
2321 		 * errors, and sometimes persistent drive-related errors.
2322 		 * We used to retry these without decrementing the retry
2323 		 * count by returning CAM_REQUEUE_REQ.  Unfortunately, if
2324 		 * we hit a persistent drive problem that returns one of
2325 		 * these error codes, we would retry indefinitely.  So,
2326 		 * return CAM_REQ_CMP_ERROR so that we decrement the retry
2327 		 * count and avoid infinite retries.  We're taking the
2328 		 * potential risk of flagging false failures in the event
2329 		 * of a topology-related error (e.g. a SAS expander problem
2330 		 * causes a command addressed to a drive to fail), but
2331 		 * avoiding getting into an infinite retry loop. However,
2332 		 * if we get them while were moving a device, we should
2333 		 * fail the request as 'not there' because the device
2334 		 * is effectively gone.
2335 		 */
2336 		if (cm->cm_targ->flags & MPSSAS_TARGET_INREMOVAL)
2337 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2338 		else
2339 			mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2340 		mps_dprint(sc, MPS_INFO,
2341 		    "Controller reported %s tgt %u SMID %u loginfo %x%s\n",
2342 		    mps_describe_table(mps_iocstatus_string,
2343 		    le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK),
2344 		    target_id, cm->cm_desc.Default.SMID,
2345 		    le32toh(rep->IOCLogInfo),
2346 		    (cm->cm_targ->flags & MPSSAS_TARGET_INREMOVAL) ? " departing" : "");
2347 		mps_dprint(sc, MPS_XINFO,
2348 		    "SCSIStatus %x SCSIState %x xfercount %u\n",
2349 		    rep->SCSIStatus, rep->SCSIState,
2350 		    le32toh(rep->TransferCount));
2351 		break;
2352 	case MPI2_IOCSTATUS_INVALID_FUNCTION:
2353 	case MPI2_IOCSTATUS_INTERNAL_ERROR:
2354 	case MPI2_IOCSTATUS_INVALID_VPID:
2355 	case MPI2_IOCSTATUS_INVALID_FIELD:
2356 	case MPI2_IOCSTATUS_INVALID_STATE:
2357 	case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2358 	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2359 	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2360 	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2361 	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2362 	default:
2363 		mpssas_log_command(cm, MPS_XINFO,
2364 		    "completed ioc %x loginfo %x scsi %x state %x xfer %u\n",
2365 		    le16toh(rep->IOCStatus), le32toh(rep->IOCLogInfo),
2366 		    rep->SCSIStatus, rep->SCSIState,
2367 		    le32toh(rep->TransferCount));
2368 		csio->resid = cm->cm_length;
2369 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2370 		break;
2371 	}
2372 
2373 	mps_sc_failed_io_info(sc,csio,rep);
2374 
2375 	if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2376 		ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2377 		sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2378 		mps_dprint(sc, MPS_XINFO, "Command completed, "
2379 		    "unfreezing SIM queue\n");
2380 	}
2381 
2382 	if (mpssas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2383 		ccb->ccb_h.status |= CAM_DEV_QFRZN;
2384 		xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2385 	}
2386 
2387 	/*
2388 	 * Check to see if we're removing the device. If so, and this is the
2389 	 * last command on the queue, proceed with the deferred removal of the
2390 	 * device.  Note, for removing a volume, this won't trigger because
2391 	 * pending_remove_tm will be NULL.
2392 	 */
2393 	if (cm->cm_targ->flags & MPSSAS_TARGET_INREMOVAL) {
2394 		if (TAILQ_FIRST(&cm->cm_targ->commands) == NULL &&
2395 		    cm->cm_targ->pending_remove_tm != NULL) {
2396 			mps_dprint(sc, MPS_INFO, "Last pending command complete: starting remove_device\n");
2397 			mps_map_command(sc, cm->cm_targ->pending_remove_tm);
2398 			cm->cm_targ->pending_remove_tm = NULL;
2399 		}
2400 	}
2401 
2402 	mps_free_command(sc, cm);
2403 	xpt_done(ccb);
2404 }
2405 
2406 /* All Request reached here are Endian safe */
2407 static void
2408 mpssas_direct_drive_io(struct mpssas_softc *sassc, struct mps_command *cm,
2409     union ccb *ccb) {
2410 	pMpi2SCSIIORequest_t	pIO_req;
2411 	struct mps_softc	*sc = sassc->sc;
2412 	uint64_t		virtLBA;
2413 	uint32_t		physLBA, stripe_offset, stripe_unit;
2414 	uint32_t		io_size, column;
2415 	uint8_t			*ptrLBA, lba_idx, physLBA_byte, *CDB;
2416 
2417 	/*
2418 	 * If this is a valid SCSI command (Read6, Read10, Read16, Write6,
2419 	 * Write10, or Write16), build a direct I/O message.  Otherwise, the I/O
2420 	 * will be sent to the IR volume itself.  Since Read6 and Write6 are a
2421 	 * bit different than the 10/16 CDBs, handle them separately.
2422 	 */
2423 	pIO_req = (pMpi2SCSIIORequest_t)cm->cm_req;
2424 	CDB = pIO_req->CDB.CDB32;
2425 
2426 	/*
2427 	 * Handle 6 byte CDBs.
2428 	 */
2429 	if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_6) ||
2430 	    (CDB[0] == WRITE_6))) {
2431 		/*
2432 		 * Get the transfer size in blocks.
2433 		 */
2434 		io_size = (cm->cm_length >> sc->DD_block_exponent);
2435 
2436 		/*
2437 		 * Get virtual LBA given in the CDB.
2438 		 */
2439 		virtLBA = ((uint64_t)(CDB[1] & 0x1F) << 16) |
2440 		    ((uint64_t)CDB[2] << 8) | (uint64_t)CDB[3];
2441 
2442 		/*
2443 		 * Check that LBA range for I/O does not exceed volume's
2444 		 * MaxLBA.
2445 		 */
2446 		if ((virtLBA + (uint64_t)io_size - 1) <=
2447 		    sc->DD_max_lba) {
2448 			/*
2449 			 * Check if the I/O crosses a stripe boundary.  If not,
2450 			 * translate the virtual LBA to a physical LBA and set
2451 			 * the DevHandle for the PhysDisk to be used.  If it
2452 			 * does cross a boundary, do normal I/O.  To get the
2453 			 * right DevHandle to use, get the map number for the
2454 			 * column, then use that map number to look up the
2455 			 * DevHandle of the PhysDisk.
2456 			 */
2457 			stripe_offset = (uint32_t)virtLBA &
2458 			    (sc->DD_stripe_size - 1);
2459 			if ((stripe_offset + io_size) <= sc->DD_stripe_size) {
2460 				physLBA = (uint32_t)virtLBA >>
2461 				    sc->DD_stripe_exponent;
2462 				stripe_unit = physLBA / sc->DD_num_phys_disks;
2463 				column = physLBA % sc->DD_num_phys_disks;
2464 				pIO_req->DevHandle =
2465 				    htole16(sc->DD_column_map[column].dev_handle);
2466 				/* ???? Is this endian safe*/
2467 				cm->cm_desc.SCSIIO.DevHandle =
2468 				    pIO_req->DevHandle;
2469 
2470 				physLBA = (stripe_unit <<
2471 				    sc->DD_stripe_exponent) + stripe_offset;
2472 				ptrLBA = &pIO_req->CDB.CDB32[1];
2473 				physLBA_byte = (uint8_t)(physLBA >> 16);
2474 				*ptrLBA = physLBA_byte;
2475 				ptrLBA = &pIO_req->CDB.CDB32[2];
2476 				physLBA_byte = (uint8_t)(physLBA >> 8);
2477 				*ptrLBA = physLBA_byte;
2478 				ptrLBA = &pIO_req->CDB.CDB32[3];
2479 				physLBA_byte = (uint8_t)physLBA;
2480 				*ptrLBA = physLBA_byte;
2481 
2482 				/*
2483 				 * Set flag that Direct Drive I/O is
2484 				 * being done.
2485 				 */
2486 				cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2487 			}
2488 		}
2489 		return;
2490 	}
2491 
2492 	/*
2493 	 * Handle 10, 12 or 16 byte CDBs.
2494 	 */
2495 	if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_10) ||
2496 	    (CDB[0] == WRITE_10) || (CDB[0] == READ_16) ||
2497 	    (CDB[0] == WRITE_16) || (CDB[0] == READ_12) ||
2498 	    (CDB[0] == WRITE_12))) {
2499 		/*
2500 		 * For 16-byte CDB's, verify that the upper 4 bytes of the CDB
2501 		 * are 0.  If not, this is accessing beyond 2TB so handle it in
2502 		 * the else section.  10-byte and 12-byte CDB's are OK.
2503 		 * FreeBSD sends very rare 12 byte READ/WRITE, but driver is
2504 		 * ready to accept 12byte CDB for Direct IOs.
2505 		 */
2506 		if ((CDB[0] == READ_10 || CDB[0] == WRITE_10) ||
2507 		    (CDB[0] == READ_12 || CDB[0] == WRITE_12) ||
2508 		    !(CDB[2] | CDB[3] | CDB[4] | CDB[5])) {
2509 			/*
2510 			 * Get the transfer size in blocks.
2511 			 */
2512 			io_size = (cm->cm_length >> sc->DD_block_exponent);
2513 
2514 			/*
2515 			 * Get virtual LBA.  Point to correct lower 4 bytes of
2516 			 * LBA in the CDB depending on command.
2517 			 */
2518 			lba_idx = ((CDB[0] == READ_12) ||
2519 				(CDB[0] == WRITE_12) ||
2520 				(CDB[0] == READ_10) ||
2521 				(CDB[0] == WRITE_10))? 2 : 6;
2522 			virtLBA = ((uint64_t)CDB[lba_idx] << 24) |
2523 			    ((uint64_t)CDB[lba_idx + 1] << 16) |
2524 			    ((uint64_t)CDB[lba_idx + 2] << 8) |
2525 			    (uint64_t)CDB[lba_idx + 3];
2526 
2527 			/*
2528 			 * Check that LBA range for I/O does not exceed volume's
2529 			 * MaxLBA.
2530 			 */
2531 			if ((virtLBA + (uint64_t)io_size - 1) <=
2532 			    sc->DD_max_lba) {
2533 				/*
2534 				 * Check if the I/O crosses a stripe boundary.
2535 				 * If not, translate the virtual LBA to a
2536 				 * physical LBA and set the DevHandle for the
2537 				 * PhysDisk to be used.  If it does cross a
2538 				 * boundary, do normal I/O.  To get the right
2539 				 * DevHandle to use, get the map number for the
2540 				 * column, then use that map number to look up
2541 				 * the DevHandle of the PhysDisk.
2542 				 */
2543 				stripe_offset = (uint32_t)virtLBA &
2544 				    (sc->DD_stripe_size - 1);
2545 				if ((stripe_offset + io_size) <=
2546 				    sc->DD_stripe_size) {
2547 					physLBA = (uint32_t)virtLBA >>
2548 					    sc->DD_stripe_exponent;
2549 					stripe_unit = physLBA /
2550 					    sc->DD_num_phys_disks;
2551 					column = physLBA %
2552 					    sc->DD_num_phys_disks;
2553 					pIO_req->DevHandle =
2554 					    htole16(sc->DD_column_map[column].
2555 					    dev_handle);
2556 					cm->cm_desc.SCSIIO.DevHandle =
2557 					    pIO_req->DevHandle;
2558 
2559 					physLBA = (stripe_unit <<
2560 					    sc->DD_stripe_exponent) +
2561 					    stripe_offset;
2562 					ptrLBA =
2563 					    &pIO_req->CDB.CDB32[lba_idx];
2564 					physLBA_byte = (uint8_t)(physLBA >> 24);
2565 					*ptrLBA = physLBA_byte;
2566 					ptrLBA =
2567 					    &pIO_req->CDB.CDB32[lba_idx + 1];
2568 					physLBA_byte = (uint8_t)(physLBA >> 16);
2569 					*ptrLBA = physLBA_byte;
2570 					ptrLBA =
2571 					    &pIO_req->CDB.CDB32[lba_idx + 2];
2572 					physLBA_byte = (uint8_t)(physLBA >> 8);
2573 					*ptrLBA = physLBA_byte;
2574 					ptrLBA =
2575 					    &pIO_req->CDB.CDB32[lba_idx + 3];
2576 					physLBA_byte = (uint8_t)physLBA;
2577 					*ptrLBA = physLBA_byte;
2578 
2579 					/*
2580 					 * Set flag that Direct Drive I/O is
2581 					 * being done.
2582 					 */
2583 					cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2584 				}
2585 			}
2586 		} else {
2587 			/*
2588 			 * 16-byte CDB and the upper 4 bytes of the CDB are not
2589 			 * 0.  Get the transfer size in blocks.
2590 			 */
2591 			io_size = (cm->cm_length >> sc->DD_block_exponent);
2592 
2593 			/*
2594 			 * Get virtual LBA.
2595 			 */
2596 			virtLBA = ((uint64_t)CDB[2] << 54) |
2597 			    ((uint64_t)CDB[3] << 48) |
2598 			    ((uint64_t)CDB[4] << 40) |
2599 			    ((uint64_t)CDB[5] << 32) |
2600 			    ((uint64_t)CDB[6] << 24) |
2601 			    ((uint64_t)CDB[7] << 16) |
2602 			    ((uint64_t)CDB[8] << 8) |
2603 			    (uint64_t)CDB[9];
2604 
2605 			/*
2606 			 * Check that LBA range for I/O does not exceed volume's
2607 			 * MaxLBA.
2608 			 */
2609 			if ((virtLBA + (uint64_t)io_size - 1) <=
2610 			    sc->DD_max_lba) {
2611 				/*
2612 				 * Check if the I/O crosses a stripe boundary.
2613 				 * If not, translate the virtual LBA to a
2614 				 * physical LBA and set the DevHandle for the
2615 				 * PhysDisk to be used.  If it does cross a
2616 				 * boundary, do normal I/O.  To get the right
2617 				 * DevHandle to use, get the map number for the
2618 				 * column, then use that map number to look up
2619 				 * the DevHandle of the PhysDisk.
2620 				 */
2621 				stripe_offset = (uint32_t)virtLBA &
2622 				    (sc->DD_stripe_size - 1);
2623 				if ((stripe_offset + io_size) <=
2624 				    sc->DD_stripe_size) {
2625 					physLBA = (uint32_t)(virtLBA >>
2626 					    sc->DD_stripe_exponent);
2627 					stripe_unit = physLBA /
2628 					    sc->DD_num_phys_disks;
2629 					column = physLBA %
2630 					    sc->DD_num_phys_disks;
2631 					pIO_req->DevHandle =
2632 					    htole16(sc->DD_column_map[column].
2633 					    dev_handle);
2634 					cm->cm_desc.SCSIIO.DevHandle =
2635 					    pIO_req->DevHandle;
2636 
2637 					physLBA = (stripe_unit <<
2638 					    sc->DD_stripe_exponent) +
2639 					    stripe_offset;
2640 
2641 					/*
2642 					 * Set upper 4 bytes of LBA to 0.  We
2643 					 * assume that the phys disks are less
2644 					 * than 2 TB's in size.  Then, set the
2645 					 * lower 4 bytes.
2646 					 */
2647 					pIO_req->CDB.CDB32[2] = 0;
2648 					pIO_req->CDB.CDB32[3] = 0;
2649 					pIO_req->CDB.CDB32[4] = 0;
2650 					pIO_req->CDB.CDB32[5] = 0;
2651 					ptrLBA = &pIO_req->CDB.CDB32[6];
2652 					physLBA_byte = (uint8_t)(physLBA >> 24);
2653 					*ptrLBA = physLBA_byte;
2654 					ptrLBA = &pIO_req->CDB.CDB32[7];
2655 					physLBA_byte = (uint8_t)(physLBA >> 16);
2656 					*ptrLBA = physLBA_byte;
2657 					ptrLBA = &pIO_req->CDB.CDB32[8];
2658 					physLBA_byte = (uint8_t)(physLBA >> 8);
2659 					*ptrLBA = physLBA_byte;
2660 					ptrLBA = &pIO_req->CDB.CDB32[9];
2661 					physLBA_byte = (uint8_t)physLBA;
2662 					*ptrLBA = physLBA_byte;
2663 
2664 					/*
2665 					 * Set flag that Direct Drive I/O is
2666 					 * being done.
2667 					 */
2668 					cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2669 				}
2670 			}
2671 		}
2672 	}
2673 }
2674 
2675 static void
2676 mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm)
2677 {
2678 	MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2679 	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2680 	uint64_t sasaddr;
2681 	union ccb *ccb;
2682 
2683 	ccb = cm->cm_complete_data;
2684 
2685 	/*
2686 	 * Currently there should be no way we can hit this case.  It only
2687 	 * happens when we have a failure to allocate chain frames, and SMP
2688 	 * commands require two S/G elements only.  That should be handled
2689 	 * in the standard request size.
2690 	 */
2691 	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2692 		mps_dprint(sc, MPS_ERROR,"%s: cm_flags = %#x on SMP request!\n",
2693 			   __func__, cm->cm_flags);
2694 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2695 		goto bailout;
2696         }
2697 
2698 	rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
2699 	if (rpl == NULL) {
2700 		mps_dprint(sc, MPS_ERROR, "%s: NULL cm_reply!\n", __func__);
2701 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2702 		goto bailout;
2703 	}
2704 
2705 	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2706 	sasaddr = le32toh(req->SASAddress.Low);
2707 	sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
2708 
2709 	if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
2710 	    MPI2_IOCSTATUS_SUCCESS ||
2711 	    rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
2712 		mps_dprint(sc, MPS_XINFO, "%s: IOCStatus %04x SASStatus %02x\n",
2713 		    __func__, le16toh(rpl->IOCStatus), rpl->SASStatus);
2714 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2715 		goto bailout;
2716 	}
2717 
2718 	mps_dprint(sc, MPS_XINFO, "%s: SMP request to SAS address "
2719 		   "%#jx completed successfully\n", __func__,
2720 		   (uintmax_t)sasaddr);
2721 
2722 	if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
2723 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2724 	else
2725 		mpssas_set_ccbstatus(ccb, CAM_SMP_STATUS_ERROR);
2726 
2727 bailout:
2728 	/*
2729 	 * We sync in both directions because we had DMAs in the S/G list
2730 	 * in both directions.
2731 	 */
2732 	bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
2733 			BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2734 	bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2735 	mps_free_command(sc, cm);
2736 	xpt_done(ccb);
2737 }
2738 
2739 static void
2740 mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb, uint64_t sasaddr)
2741 {
2742 	struct mps_command *cm;
2743 	uint8_t *request, *response;
2744 	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2745 	struct mps_softc *sc;
2746 	int error;
2747 
2748 	sc = sassc->sc;
2749 	error = 0;
2750 
2751 	/*
2752 	 * XXX We don't yet support physical addresses here.
2753 	 */
2754 	switch ((ccb->ccb_h.flags & CAM_DATA_MASK)) {
2755 	case CAM_DATA_PADDR:
2756 	case CAM_DATA_SG_PADDR:
2757 		mps_dprint(sc, MPS_ERROR,
2758 			   "%s: physical addresses not supported\n", __func__);
2759 		mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2760 		xpt_done(ccb);
2761 		return;
2762 	case CAM_DATA_SG:
2763 		/*
2764 		 * The chip does not support more than one buffer for the
2765 		 * request or response.
2766 		 */
2767 	 	if ((ccb->smpio.smp_request_sglist_cnt > 1)
2768 		  || (ccb->smpio.smp_response_sglist_cnt > 1)) {
2769 			mps_dprint(sc, MPS_ERROR,
2770 				   "%s: multiple request or response "
2771 				   "buffer segments not supported for SMP\n",
2772 				   __func__);
2773 			mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2774 			xpt_done(ccb);
2775 			return;
2776 		}
2777 
2778 		/*
2779 		 * The CAM_SCATTER_VALID flag was originally implemented
2780 		 * for the XPT_SCSI_IO CCB, which only has one data pointer.
2781 		 * We have two.  So, just take that flag to mean that we
2782 		 * might have S/G lists, and look at the S/G segment count
2783 		 * to figure out whether that is the case for each individual
2784 		 * buffer.
2785 		 */
2786 		if (ccb->smpio.smp_request_sglist_cnt != 0) {
2787 			bus_dma_segment_t *req_sg;
2788 
2789 			req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
2790 			request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
2791 		} else
2792 			request = ccb->smpio.smp_request;
2793 
2794 		if (ccb->smpio.smp_response_sglist_cnt != 0) {
2795 			bus_dma_segment_t *rsp_sg;
2796 
2797 			rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
2798 			response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
2799 		} else
2800 			response = ccb->smpio.smp_response;
2801 		break;
2802 	case CAM_DATA_VADDR:
2803 		request = ccb->smpio.smp_request;
2804 		response = ccb->smpio.smp_response;
2805 		break;
2806 	default:
2807 		mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2808 		xpt_done(ccb);
2809 		return;
2810 	}
2811 
2812 	cm = mps_alloc_command(sc);
2813 	if (cm == NULL) {
2814 		mps_dprint(sc, MPS_ERROR,
2815 		    "%s: cannot allocate command\n", __func__);
2816 		mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
2817 		xpt_done(ccb);
2818 		return;
2819 	}
2820 
2821 	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2822 	bzero(req, sizeof(*req));
2823 	req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
2824 
2825 	/* Allow the chip to use any route to this SAS address. */
2826 	req->PhysicalPort = 0xff;
2827 
2828 	req->RequestDataLength = htole16(ccb->smpio.smp_request_len);
2829 	req->SGLFlags =
2830 	    MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
2831 
2832 	mps_dprint(sc, MPS_XINFO, "%s: sending SMP request to SAS "
2833 	    "address %#jx\n", __func__, (uintmax_t)sasaddr);
2834 
2835 	mpi_init_sge(cm, req, &req->SGL);
2836 
2837 	/*
2838 	 * Set up a uio to pass into mps_map_command().  This allows us to
2839 	 * do one map command, and one busdma call in there.
2840 	 */
2841 	cm->cm_uio.uio_iov = cm->cm_iovec;
2842 	cm->cm_uio.uio_iovcnt = 2;
2843 	cm->cm_uio.uio_segflg = UIO_SYSSPACE;
2844 
2845 	/*
2846 	 * The read/write flag isn't used by busdma, but set it just in
2847 	 * case.  This isn't exactly accurate, either, since we're going in
2848 	 * both directions.
2849 	 */
2850 	cm->cm_uio.uio_rw = UIO_WRITE;
2851 
2852 	cm->cm_iovec[0].iov_base = request;
2853 	cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength);
2854 	cm->cm_iovec[1].iov_base = response;
2855 	cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
2856 
2857 	cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
2858 			       cm->cm_iovec[1].iov_len;
2859 
2860 	/*
2861 	 * Trigger a warning message in mps_data_cb() for the user if we
2862 	 * wind up exceeding two S/G segments.  The chip expects one
2863 	 * segment for the request and another for the response.
2864 	 */
2865 	cm->cm_max_segs = 2;
2866 
2867 	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
2868 	cm->cm_complete = mpssas_smpio_complete;
2869 	cm->cm_complete_data = ccb;
2870 
2871 	/*
2872 	 * Tell the mapping code that we're using a uio, and that this is
2873 	 * an SMP passthrough request.  There is a little special-case
2874 	 * logic there (in mps_data_cb()) to handle the bidirectional
2875 	 * transfer.
2876 	 */
2877 	cm->cm_flags |= MPS_CM_FLAGS_USE_UIO | MPS_CM_FLAGS_SMP_PASS |
2878 			MPS_CM_FLAGS_DATAIN | MPS_CM_FLAGS_DATAOUT;
2879 
2880 	/* The chip data format is little endian. */
2881 	req->SASAddress.High = htole32(sasaddr >> 32);
2882 	req->SASAddress.Low = htole32(sasaddr);
2883 
2884 	/*
2885 	 * XXX Note that we don't have a timeout/abort mechanism here.
2886 	 * From the manual, it looks like task management requests only
2887 	 * work for SCSI IO and SATA passthrough requests.  We may need to
2888 	 * have a mechanism to retry requests in the event of a chip reset
2889 	 * at least.  Hopefully the chip will insure that any errors short
2890 	 * of that are relayed back to the driver.
2891 	 */
2892 	error = mps_map_command(sc, cm);
2893 	if ((error != 0) && (error != EINPROGRESS)) {
2894 		mps_dprint(sc, MPS_ERROR,
2895 			   "%s: error %d returned from mps_map_command()\n",
2896 			   __func__, error);
2897 		goto bailout_error;
2898 	}
2899 
2900 	return;
2901 
2902 bailout_error:
2903 	mps_free_command(sc, cm);
2904 	mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
2905 	xpt_done(ccb);
2906 	return;
2907 
2908 }
2909 
2910 static void
2911 mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb)
2912 {
2913 	struct mps_softc *sc;
2914 	struct mpssas_target *targ;
2915 	uint64_t sasaddr = 0;
2916 
2917 	sc = sassc->sc;
2918 
2919 	/*
2920 	 * Make sure the target exists.
2921 	 */
2922 	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
2923 	    ("Target %d out of bounds in XPT_SMP_IO\n", ccb->ccb_h.target_id));
2924 	targ = &sassc->targets[ccb->ccb_h.target_id];
2925 	if (targ->handle == 0x0) {
2926 		mps_dprint(sc, MPS_ERROR,
2927 			   "%s: target %d does not exist!\n", __func__,
2928 			   ccb->ccb_h.target_id);
2929 		mpssas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
2930 		xpt_done(ccb);
2931 		return;
2932 	}
2933 
2934 	/*
2935 	 * If this device has an embedded SMP target, we'll talk to it
2936 	 * directly.
2937 	 * figure out what the expander's address is.
2938 	 */
2939 	if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
2940 		sasaddr = targ->sasaddr;
2941 
2942 	/*
2943 	 * If we don't have a SAS address for the expander yet, try
2944 	 * grabbing it from the page 0x83 information cached in the
2945 	 * transport layer for this target.  LSI expanders report the
2946 	 * expander SAS address as the port-associated SAS address in
2947 	 * Inquiry VPD page 0x83.  Maxim expanders don't report it in page
2948 	 * 0x83.
2949 	 *
2950 	 * XXX KDM disable this for now, but leave it commented out so that
2951 	 * it is obvious that this is another possible way to get the SAS
2952 	 * address.
2953 	 *
2954 	 * The parent handle method below is a little more reliable, and
2955 	 * the other benefit is that it works for devices other than SES
2956 	 * devices.  So you can send a SMP request to a da(4) device and it
2957 	 * will get routed to the expander that device is attached to.
2958 	 * (Assuming the da(4) device doesn't contain an SMP target...)
2959 	 */
2960 #if 0
2961 	if (sasaddr == 0)
2962 		sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
2963 #endif
2964 
2965 	/*
2966 	 * If we still don't have a SAS address for the expander, look for
2967 	 * the parent device of this device, which is probably the expander.
2968 	 */
2969 	if (sasaddr == 0) {
2970 #ifdef OLD_MPS_PROBE
2971 		struct mpssas_target *parent_target;
2972 #endif
2973 
2974 		if (targ->parent_handle == 0x0) {
2975 			mps_dprint(sc, MPS_ERROR,
2976 				   "%s: handle %d does not have a valid "
2977 				   "parent handle!\n", __func__, targ->handle);
2978 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2979 			goto bailout;
2980 		}
2981 #ifdef OLD_MPS_PROBE
2982 		parent_target = mpssas_find_target_by_handle(sassc, 0,
2983 			targ->parent_handle);
2984 
2985 		if (parent_target == NULL) {
2986 			mps_dprint(sc, MPS_ERROR,
2987 				   "%s: handle %d does not have a valid "
2988 				   "parent target!\n", __func__, targ->handle);
2989 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2990 			goto bailout;
2991 		}
2992 
2993 		if ((parent_target->devinfo &
2994 		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
2995 			mps_dprint(sc, MPS_ERROR,
2996 				   "%s: handle %d parent %d does not "
2997 				   "have an SMP target!\n", __func__,
2998 				   targ->handle, parent_target->handle);
2999 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3000 			goto bailout;
3001 
3002 		}
3003 
3004 		sasaddr = parent_target->sasaddr;
3005 #else /* OLD_MPS_PROBE */
3006 		if ((targ->parent_devinfo &
3007 		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3008 			mps_dprint(sc, MPS_ERROR,
3009 				   "%s: handle %d parent %d does not "
3010 				   "have an SMP target!\n", __func__,
3011 				   targ->handle, targ->parent_handle);
3012 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3013 			goto bailout;
3014 
3015 		}
3016 		if (targ->parent_sasaddr == 0x0) {
3017 			mps_dprint(sc, MPS_ERROR,
3018 				   "%s: handle %d parent handle %d does "
3019 				   "not have a valid SAS address!\n",
3020 				   __func__, targ->handle, targ->parent_handle);
3021 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3022 			goto bailout;
3023 		}
3024 
3025 		sasaddr = targ->parent_sasaddr;
3026 #endif /* OLD_MPS_PROBE */
3027 
3028 	}
3029 
3030 	if (sasaddr == 0) {
3031 		mps_dprint(sc, MPS_INFO,
3032 			   "%s: unable to find SAS address for handle %d\n",
3033 			   __func__, targ->handle);
3034 		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3035 		goto bailout;
3036 	}
3037 	mpssas_send_smpcmd(sassc, ccb, sasaddr);
3038 
3039 	return;
3040 
3041 bailout:
3042 	xpt_done(ccb);
3043 
3044 }
3045 
3046 static void
3047 mpssas_action_resetdev(struct mpssas_softc *sassc, union ccb *ccb)
3048 {
3049 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3050 	struct mps_softc *sc;
3051 	struct mps_command *tm;
3052 	struct mpssas_target *targ;
3053 
3054 	MPS_FUNCTRACE(sassc->sc);
3055 	mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
3056 
3057 	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
3058 	    ("Target %d out of bounds in XPT_RESET_DEV\n",
3059 	     ccb->ccb_h.target_id));
3060 	sc = sassc->sc;
3061 	tm = mpssas_alloc_tm(sc);
3062 	if (tm == NULL) {
3063 		mps_dprint(sc, MPS_ERROR,
3064 		    "command alloc failure in mpssas_action_resetdev\n");
3065 		mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3066 		xpt_done(ccb);
3067 		return;
3068 	}
3069 
3070 	targ = &sassc->targets[ccb->ccb_h.target_id];
3071 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3072 	req->DevHandle = htole16(targ->handle);
3073 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3074 
3075 	/* SAS Hard Link Reset / SATA Link Reset */
3076 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3077 
3078 	tm->cm_data = NULL;
3079 	tm->cm_complete = mpssas_resetdev_complete;
3080 	tm->cm_complete_data = ccb;
3081 	tm->cm_targ = targ;
3082 
3083 	mpssas_prepare_for_tm(sc, tm, targ, CAM_LUN_WILDCARD);
3084 	mps_map_command(sc, tm);
3085 }
3086 
3087 static void
3088 mpssas_resetdev_complete(struct mps_softc *sc, struct mps_command *tm)
3089 {
3090 	MPI2_SCSI_TASK_MANAGE_REPLY *resp;
3091 	union ccb *ccb;
3092 
3093 	MPS_FUNCTRACE(sc);
3094 	mtx_assert(&sc->mps_mtx, MA_OWNED);
3095 
3096 	resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
3097 	ccb = tm->cm_complete_data;
3098 
3099 	/*
3100 	 * Currently there should be no way we can hit this case.  It only
3101 	 * happens when we have a failure to allocate chain frames, and
3102 	 * task management commands don't have S/G lists.
3103 	 */
3104 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3105 		MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3106 
3107 		req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3108 
3109 		mps_dprint(sc, MPS_ERROR,
3110 			   "%s: cm_flags = %#x for reset of handle %#04x! "
3111 			   "This should not happen!\n", __func__, tm->cm_flags,
3112 			   req->DevHandle);
3113 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3114 		goto bailout;
3115 	}
3116 
3117 	mps_dprint(sc, MPS_XINFO,
3118 	    "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n", __func__,
3119 	    le16toh(resp->IOCStatus), le32toh(resp->ResponseCode));
3120 
3121 	if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
3122 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
3123 		mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
3124 		    CAM_LUN_WILDCARD);
3125 	}
3126 	else
3127 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3128 
3129 bailout:
3130 
3131 	mpssas_free_tm(sc, tm);
3132 	xpt_done(ccb);
3133 }
3134 
3135 static void
3136 mpssas_poll(struct cam_sim *sim)
3137 {
3138 	struct mpssas_softc *sassc;
3139 
3140 	sassc = cam_sim_softc(sim);
3141 
3142 	if (sassc->sc->mps_debug & MPS_TRACE) {
3143 		/* frequent debug messages during a panic just slow
3144 		 * everything down too much.
3145 		 */
3146 		mps_printf(sassc->sc, "%s clearing MPS_TRACE\n", __func__);
3147 		sassc->sc->mps_debug &= ~MPS_TRACE;
3148 	}
3149 
3150 	mps_intr_locked(sassc->sc);
3151 }
3152 
3153 static void
3154 mpssas_async(void *callback_arg, uint32_t code, struct cam_path *path,
3155 	     void *arg)
3156 {
3157 	struct mps_softc *sc;
3158 
3159 	sc = (struct mps_softc *)callback_arg;
3160 
3161 	switch (code) {
3162 	case AC_ADVINFO_CHANGED: {
3163 		struct mpssas_target *target;
3164 		struct mpssas_softc *sassc;
3165 		struct scsi_read_capacity_data_long rcap_buf;
3166 		struct ccb_dev_advinfo cdai;
3167 		struct mpssas_lun *lun;
3168 		lun_id_t lunid;
3169 		int found_lun;
3170 		uintptr_t buftype;
3171 
3172 		buftype = (uintptr_t)arg;
3173 
3174 		found_lun = 0;
3175 		sassc = sc->sassc;
3176 
3177 		/*
3178 		 * We're only interested in read capacity data changes.
3179 		 */
3180 		if (buftype != CDAI_TYPE_RCAPLONG)
3181 			break;
3182 
3183 		/*
3184 		 * We should have a handle for this, but check to make sure.
3185 		 */
3186 		KASSERT(xpt_path_target_id(path) < sassc->maxtargets,
3187 		    ("Target %d out of bounds in mpssas_async\n",
3188 		    xpt_path_target_id(path)));
3189 		target = &sassc->targets[xpt_path_target_id(path)];
3190 		if (target->handle == 0)
3191 			break;
3192 
3193 		lunid = xpt_path_lun_id(path);
3194 
3195 		SLIST_FOREACH(lun, &target->luns, lun_link) {
3196 			if (lun->lun_id == lunid) {
3197 				found_lun = 1;
3198 				break;
3199 			}
3200 		}
3201 
3202 		if (found_lun == 0) {
3203 			lun = malloc(sizeof(struct mpssas_lun), M_MPT2,
3204 				     M_NOWAIT | M_ZERO);
3205 			if (lun == NULL) {
3206 				mps_dprint(sc, MPS_ERROR, "Unable to alloc "
3207 					   "LUN for EEDP support.\n");
3208 				break;
3209 			}
3210 			lun->lun_id = lunid;
3211 			SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3212 		}
3213 
3214 		bzero(&rcap_buf, sizeof(rcap_buf));
3215 		xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
3216 		cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
3217 		cdai.ccb_h.flags = CAM_DIR_IN;
3218 		cdai.buftype = CDAI_TYPE_RCAPLONG;
3219 		cdai.flags = CDAI_FLAG_NONE;
3220 		cdai.bufsiz = sizeof(rcap_buf);
3221 		cdai.buf = (uint8_t *)&rcap_buf;
3222 		xpt_action((union ccb *)&cdai);
3223 		if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
3224 			cam_release_devq(cdai.ccb_h.path,
3225 					 0, 0, 0, FALSE);
3226 
3227 		if ((mpssas_get_ccbstatus((union ccb *)&cdai) == CAM_REQ_CMP)
3228 		 && (rcap_buf.prot & SRC16_PROT_EN)) {
3229 			switch (rcap_buf.prot & SRC16_P_TYPE) {
3230 			case SRC16_PTYPE_1:
3231 			case SRC16_PTYPE_3:
3232 				lun->eedp_formatted = TRUE;
3233 				lun->eedp_block_size =
3234 				    scsi_4btoul(rcap_buf.length);
3235 				break;
3236 			case SRC16_PTYPE_2:
3237 			default:
3238 				lun->eedp_formatted = FALSE;
3239 				lun->eedp_block_size = 0;
3240 				break;
3241 			}
3242 		} else {
3243 			lun->eedp_formatted = FALSE;
3244 			lun->eedp_block_size = 0;
3245 		}
3246 		break;
3247 	}
3248 	default:
3249 		break;
3250 	}
3251 }
3252 
3253 /*
3254  * Set the INRESET flag for this target so that no I/O will be sent to
3255  * the target until the reset has completed.  If an I/O request does
3256  * happen, the devq will be frozen.  The CCB holds the path which is
3257  * used to release the devq.  The devq is released and the CCB is freed
3258  * when the TM completes.
3259  */
3260 void
3261 mpssas_prepare_for_tm(struct mps_softc *sc, struct mps_command *tm,
3262     struct mpssas_target *target, lun_id_t lun_id)
3263 {
3264 	union ccb *ccb;
3265 	path_id_t path_id;
3266 
3267 	ccb = xpt_alloc_ccb_nowait();
3268 	if (ccb) {
3269 		path_id = cam_sim_path(sc->sassc->sim);
3270 		if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, path_id,
3271 		    target->tid, lun_id) != CAM_REQ_CMP) {
3272 			xpt_free_ccb(ccb);
3273 		} else {
3274 			tm->cm_ccb = ccb;
3275 			tm->cm_targ = target;
3276 			target->flags |= MPSSAS_TARGET_INRESET;
3277 		}
3278 	}
3279 }
3280 
3281 int
3282 mpssas_startup(struct mps_softc *sc)
3283 {
3284 
3285 	/*
3286 	 * Send the port enable message and set the wait_for_port_enable flag.
3287 	 * This flag helps to keep the simq frozen until all discovery events
3288 	 * are processed.
3289 	 */
3290 	sc->wait_for_port_enable = 1;
3291 	mpssas_send_portenable(sc);
3292 	return (0);
3293 }
3294 
3295 static int
3296 mpssas_send_portenable(struct mps_softc *sc)
3297 {
3298 	MPI2_PORT_ENABLE_REQUEST *request;
3299 	struct mps_command *cm;
3300 
3301 	MPS_FUNCTRACE(sc);
3302 
3303 	if ((cm = mps_alloc_command(sc)) == NULL)
3304 		return (EBUSY);
3305 	request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3306 	request->Function = MPI2_FUNCTION_PORT_ENABLE;
3307 	request->MsgFlags = 0;
3308 	request->VP_ID = 0;
3309 	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3310 	cm->cm_complete = mpssas_portenable_complete;
3311 	cm->cm_data = NULL;
3312 	cm->cm_sge = NULL;
3313 
3314 	mps_map_command(sc, cm);
3315 	mps_dprint(sc, MPS_XINFO,
3316 	    "mps_send_portenable finished cm %p req %p complete %p\n",
3317 	    cm, cm->cm_req, cm->cm_complete);
3318 	return (0);
3319 }
3320 
3321 static void
3322 mpssas_portenable_complete(struct mps_softc *sc, struct mps_command *cm)
3323 {
3324 	MPI2_PORT_ENABLE_REPLY *reply;
3325 	struct mpssas_softc *sassc;
3326 
3327 	MPS_FUNCTRACE(sc);
3328 	sassc = sc->sassc;
3329 
3330 	/*
3331 	 * Currently there should be no way we can hit this case.  It only
3332 	 * happens when we have a failure to allocate chain frames, and
3333 	 * port enable commands don't have S/G lists.
3334 	 */
3335 	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3336 		mps_dprint(sc, MPS_ERROR, "%s: cm_flags = %#x for port enable! "
3337 			   "This should not happen!\n", __func__, cm->cm_flags);
3338 	}
3339 
3340 	reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3341 	if (reply == NULL)
3342 		mps_dprint(sc, MPS_FAULT, "Portenable NULL reply\n");
3343 	else if (le16toh(reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
3344 	    MPI2_IOCSTATUS_SUCCESS)
3345 		mps_dprint(sc, MPS_FAULT, "Portenable failed\n");
3346 
3347 	mps_free_command(sc, cm);
3348 
3349 	/*
3350 	 * Get WarpDrive info after discovery is complete but before the scan
3351 	 * starts.  At this point, all devices are ready to be exposed to the
3352 	 * OS.  If devices should be hidden instead, take them out of the
3353 	 * 'targets' array before the scan.  The devinfo for a disk will have
3354 	 * some info and a volume's will be 0.  Use that to remove disks.
3355 	 */
3356 	mps_wd_config_pages(sc);
3357 
3358 	/*
3359 	 * Done waiting for port enable to complete.  Decrement the refcount.
3360 	 * If refcount is 0, discovery is complete and a rescan of the bus can
3361 	 * take place.  Since the simq was explicitly frozen before port
3362 	 * enable, it must be explicitly released here to keep the
3363 	 * freeze/release count in sync.
3364 	 */
3365 	sc->wait_for_port_enable = 0;
3366 	sc->port_enable_complete = 1;
3367 	wakeup(&sc->port_enable_complete);
3368 	mpssas_startup_decrement(sassc);
3369 }
3370 
3371 int
3372 mpssas_check_id(struct mpssas_softc *sassc, int id)
3373 {
3374 	struct mps_softc *sc = sassc->sc;
3375 	char *ids;
3376 	char *name;
3377 
3378 	ids = &sc->exclude_ids[0];
3379 	while((name = strsep(&ids, ",")) != NULL) {
3380 		if (name[0] == '\0')
3381 			continue;
3382 		if (strtol(name, NULL, 0) == (long)id)
3383 			return (1);
3384 	}
3385 
3386 	return (0);
3387 }
3388 
3389 void
3390 mpssas_realloc_targets(struct mps_softc *sc, int maxtargets)
3391 {
3392 	struct mpssas_softc *sassc;
3393 	struct mpssas_lun *lun, *lun_tmp;
3394 	struct mpssas_target *targ;
3395 	int i;
3396 
3397 	sassc = sc->sassc;
3398 	/*
3399 	 * The number of targets is based on IOC Facts, so free all of
3400 	 * the allocated LUNs for each target and then the target buffer
3401 	 * itself.
3402 	 */
3403 	for (i=0; i< maxtargets; i++) {
3404 		targ = &sassc->targets[i];
3405 		SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
3406 			free(lun, M_MPT2);
3407 		}
3408 	}
3409 	free(sassc->targets, M_MPT2);
3410 
3411 	sassc->targets = malloc(sizeof(struct mpssas_target) * maxtargets,
3412 	    M_MPT2, M_WAITOK|M_ZERO);
3413 }
3414