xref: /freebsd/sys/dev/mps/mps_sas.c (revision 5bb3134a8c21cb87b30e135ef168483f0333dabb)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2009 Yahoo! Inc.
5  * Copyright (c) 2011-2015 LSI Corp.
6  * Copyright (c) 2013-2015 Avago Technologies
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  *
30  * Avago Technologies (LSI) MPT-Fusion Host Adapter FreeBSD
31  *
32  * $FreeBSD$
33  */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 /* Communications core for Avago Technologies (LSI) MPT2 */
39 
40 /* TODO Move headers to mpsvar */
41 #include <sys/types.h>
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
45 #include <sys/selinfo.h>
46 #include <sys/module.h>
47 #include <sys/bus.h>
48 #include <sys/conf.h>
49 #include <sys/bio.h>
50 #include <sys/malloc.h>
51 #include <sys/uio.h>
52 #include <sys/sysctl.h>
53 #include <sys/endian.h>
54 #include <sys/queue.h>
55 #include <sys/kthread.h>
56 #include <sys/taskqueue.h>
57 #include <sys/sbuf.h>
58 
59 #include <machine/bus.h>
60 #include <machine/resource.h>
61 #include <sys/rman.h>
62 
63 #include <machine/stdarg.h>
64 
65 #include <cam/cam.h>
66 #include <cam/cam_ccb.h>
67 #include <cam/cam_xpt.h>
68 #include <cam/cam_debug.h>
69 #include <cam/cam_sim.h>
70 #include <cam/cam_xpt_sim.h>
71 #include <cam/cam_xpt_periph.h>
72 #include <cam/cam_periph.h>
73 #include <cam/scsi/scsi_all.h>
74 #include <cam/scsi/scsi_message.h>
75 #include <cam/scsi/smp_all.h>
76 
77 #include <dev/mps/mpi/mpi2_type.h>
78 #include <dev/mps/mpi/mpi2.h>
79 #include <dev/mps/mpi/mpi2_ioc.h>
80 #include <dev/mps/mpi/mpi2_sas.h>
81 #include <dev/mps/mpi/mpi2_cnfg.h>
82 #include <dev/mps/mpi/mpi2_init.h>
83 #include <dev/mps/mpi/mpi2_tool.h>
84 #include <dev/mps/mps_ioctl.h>
85 #include <dev/mps/mpsvar.h>
86 #include <dev/mps/mps_table.h>
87 #include <dev/mps/mps_sas.h>
88 
89 #define MPSSAS_DISCOVERY_TIMEOUT	20
90 #define MPSSAS_MAX_DISCOVERY_TIMEOUTS	10 /* 200 seconds */
91 
92 /*
93  * static array to check SCSI OpCode for EEDP protection bits
94  */
95 #define	PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
96 #define	PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
97 #define	PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
98 static uint8_t op_code_prot[256] = {
99 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
100 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
101 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
102 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
103 	0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
104 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
105 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
106 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
107 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
108 	0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
109 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
110 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
111 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
112 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
113 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
114 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
115 };
116 
117 MALLOC_DEFINE(M_MPSSAS, "MPSSAS", "MPS SAS memory");
118 
119 static void mpssas_remove_device(struct mps_softc *, struct mps_command *);
120 static void mpssas_remove_complete(struct mps_softc *, struct mps_command *);
121 static void mpssas_action(struct cam_sim *sim, union ccb *ccb);
122 static void mpssas_poll(struct cam_sim *sim);
123 static int mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm,
124     struct mps_command *cm);
125 static void mpssas_scsiio_timeout(void *data);
126 static void mpssas_abort_complete(struct mps_softc *sc, struct mps_command *cm);
127 static void mpssas_direct_drive_io(struct mpssas_softc *sassc,
128     struct mps_command *cm, union ccb *ccb);
129 static void mpssas_action_scsiio(struct mpssas_softc *, union ccb *);
130 static void mpssas_scsiio_complete(struct mps_softc *, struct mps_command *);
131 static void mpssas_action_resetdev(struct mpssas_softc *, union ccb *);
132 static void mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm);
133 static void mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb,
134 			       uint64_t sasaddr);
135 static void mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb);
136 static void mpssas_resetdev_complete(struct mps_softc *, struct mps_command *);
137 static void mpssas_async(void *callback_arg, uint32_t code,
138 			 struct cam_path *path, void *arg);
139 static int mpssas_send_portenable(struct mps_softc *sc);
140 static void mpssas_portenable_complete(struct mps_softc *sc,
141     struct mps_command *cm);
142 
143 struct mpssas_target *
144 mpssas_find_target_by_handle(struct mpssas_softc *sassc, int start, uint16_t handle)
145 {
146 	struct mpssas_target *target;
147 	int i;
148 
149 	for (i = start; i < sassc->maxtargets; i++) {
150 		target = &sassc->targets[i];
151 		if (target->handle == handle)
152 			return (target);
153 	}
154 
155 	return (NULL);
156 }
157 
158 /* we need to freeze the simq during attach and diag reset, to avoid failing
159  * commands before device handles have been found by discovery.  Since
160  * discovery involves reading config pages and possibly sending commands,
161  * discovery actions may continue even after we receive the end of discovery
162  * event, so refcount discovery actions instead of assuming we can unfreeze
163  * the simq when we get the event.
164  */
165 void
166 mpssas_startup_increment(struct mpssas_softc *sassc)
167 {
168 	MPS_FUNCTRACE(sassc->sc);
169 
170 	if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
171 		if (sassc->startup_refcount++ == 0) {
172 			/* just starting, freeze the simq */
173 			mps_dprint(sassc->sc, MPS_INIT,
174 			    "%s freezing simq\n", __func__);
175 			xpt_hold_boot();
176 			xpt_freeze_simq(sassc->sim, 1);
177 		}
178 		mps_dprint(sassc->sc, MPS_INIT, "%s refcount %u\n", __func__,
179 		    sassc->startup_refcount);
180 	}
181 }
182 
183 void
184 mpssas_release_simq_reinit(struct mpssas_softc *sassc)
185 {
186 	if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
187 		sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
188 		xpt_release_simq(sassc->sim, 1);
189 		mps_dprint(sassc->sc, MPS_INFO, "Unfreezing SIM queue\n");
190 	}
191 }
192 
193 void
194 mpssas_startup_decrement(struct mpssas_softc *sassc)
195 {
196 	MPS_FUNCTRACE(sassc->sc);
197 
198 	if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
199 		if (--sassc->startup_refcount == 0) {
200 			/* finished all discovery-related actions, release
201 			 * the simq and rescan for the latest topology.
202 			 */
203 			mps_dprint(sassc->sc, MPS_INIT,
204 			    "%s releasing simq\n", __func__);
205 			sassc->flags &= ~MPSSAS_IN_STARTUP;
206 			xpt_release_simq(sassc->sim, 1);
207 			xpt_release_boot();
208 		}
209 		mps_dprint(sassc->sc, MPS_INIT, "%s refcount %u\n", __func__,
210 		    sassc->startup_refcount);
211 	}
212 }
213 
214 /*
215  * The firmware requires us to stop sending commands when we're doing task
216  * management.
217  * XXX The logic for serializing the device has been made lazy and moved to
218  * mpssas_prepare_for_tm().
219  */
220 struct mps_command *
221 mpssas_alloc_tm(struct mps_softc *sc)
222 {
223 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
224 	struct mps_command *tm;
225 
226 	tm = mps_alloc_high_priority_command(sc);
227 	if (tm == NULL)
228 		return (NULL);
229 
230 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
231 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
232 	return tm;
233 }
234 
235 void
236 mpssas_free_tm(struct mps_softc *sc, struct mps_command *tm)
237 {
238 	int target_id = 0xFFFFFFFF;
239 
240 	if (tm == NULL)
241 		return;
242 
243 	/*
244 	 * For TM's the devq is frozen for the device.  Unfreeze it here and
245 	 * free the resources used for freezing the devq.  Must clear the
246 	 * INRESET flag as well or scsi I/O will not work.
247 	 */
248 	if (tm->cm_targ != NULL) {
249 		tm->cm_targ->flags &= ~MPSSAS_TARGET_INRESET;
250 		target_id = tm->cm_targ->tid;
251 	}
252 	if (tm->cm_ccb) {
253 		mps_dprint(sc, MPS_INFO, "Unfreezing devq for target ID %d\n",
254 		    target_id);
255 		xpt_release_devq(tm->cm_ccb->ccb_h.path, 1, TRUE);
256 		xpt_free_path(tm->cm_ccb->ccb_h.path);
257 		xpt_free_ccb(tm->cm_ccb);
258 	}
259 
260 	mps_free_high_priority_command(sc, tm);
261 }
262 
263 void
264 mpssas_rescan_target(struct mps_softc *sc, struct mpssas_target *targ)
265 {
266 	struct mpssas_softc *sassc = sc->sassc;
267 	path_id_t pathid;
268 	target_id_t targetid;
269 	union ccb *ccb;
270 
271 	MPS_FUNCTRACE(sc);
272 	pathid = cam_sim_path(sassc->sim);
273 	if (targ == NULL)
274 		targetid = CAM_TARGET_WILDCARD;
275 	else
276 		targetid = targ - sassc->targets;
277 
278 	/*
279 	 * Allocate a CCB and schedule a rescan.
280 	 */
281 	ccb = xpt_alloc_ccb_nowait();
282 	if (ccb == NULL) {
283 		mps_dprint(sc, MPS_ERROR, "unable to alloc CCB for rescan\n");
284 		return;
285 	}
286 
287 	if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid,
288 	    targetid, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
289 		mps_dprint(sc, MPS_ERROR, "unable to create path for rescan\n");
290 		xpt_free_ccb(ccb);
291 		return;
292 	}
293 
294 	if (targetid == CAM_TARGET_WILDCARD)
295 		ccb->ccb_h.func_code = XPT_SCAN_BUS;
296 	else
297 		ccb->ccb_h.func_code = XPT_SCAN_TGT;
298 
299 	mps_dprint(sc, MPS_TRACE, "%s targetid %u\n", __func__, targetid);
300 	xpt_rescan(ccb);
301 }
302 
303 static void
304 mpssas_log_command(struct mps_command *cm, u_int level, const char *fmt, ...)
305 {
306 	struct sbuf sb;
307 	va_list ap;
308 	char str[224];
309 	char path_str[64];
310 
311 	if (cm == NULL)
312 		return;
313 
314 	/* No need to be in here if debugging isn't enabled */
315 	if ((cm->cm_sc->mps_debug & level) == 0)
316 		return;
317 
318 	sbuf_new(&sb, str, sizeof(str), 0);
319 
320 	va_start(ap, fmt);
321 
322 	if (cm->cm_ccb != NULL) {
323 		xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
324 				sizeof(path_str));
325 		sbuf_cat(&sb, path_str);
326 		if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
327 			scsi_command_string(&cm->cm_ccb->csio, &sb);
328 			sbuf_printf(&sb, "length %d ",
329 				    cm->cm_ccb->csio.dxfer_len);
330 		}
331 	}
332 	else {
333 		sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
334 		    cam_sim_name(cm->cm_sc->sassc->sim),
335 		    cam_sim_unit(cm->cm_sc->sassc->sim),
336 		    cam_sim_bus(cm->cm_sc->sassc->sim),
337 		    cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
338 		    cm->cm_lun);
339 	}
340 
341 	sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
342 	sbuf_vprintf(&sb, fmt, ap);
343 	sbuf_finish(&sb);
344 	mps_print_field(cm->cm_sc, "%s", sbuf_data(&sb));
345 
346 	va_end(ap);
347 }
348 
349 static void
350 mpssas_remove_volume(struct mps_softc *sc, struct mps_command *tm)
351 {
352 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
353 	struct mpssas_target *targ;
354 	uint16_t handle;
355 
356 	MPS_FUNCTRACE(sc);
357 
358 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
359 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
360 	targ = tm->cm_targ;
361 
362 	if (reply == NULL) {
363 		/* XXX retry the remove after the diag reset completes? */
364 		mps_dprint(sc, MPS_FAULT,
365 		    "%s NULL reply resetting device 0x%04x\n", __func__,
366 		    handle);
367 		mpssas_free_tm(sc, tm);
368 		return;
369 	}
370 
371 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
372 	    MPI2_IOCSTATUS_SUCCESS) {
373 		mps_dprint(sc, MPS_ERROR,
374 		   "IOCStatus = 0x%x while resetting device 0x%x\n",
375 		   le16toh(reply->IOCStatus), handle);
376 	}
377 
378 	mps_dprint(sc, MPS_XINFO,
379 	    "Reset aborted %u commands\n", reply->TerminationCount);
380 	mps_free_reply(sc, tm->cm_reply_data);
381 	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
382 
383 	mps_dprint(sc, MPS_XINFO,
384 	    "clearing target %u handle 0x%04x\n", targ->tid, handle);
385 
386 	/*
387 	 * Don't clear target if remove fails because things will get confusing.
388 	 * Leave the devname and sasaddr intact so that we know to avoid reusing
389 	 * this target id if possible, and so we can assign the same target id
390 	 * to this device if it comes back in the future.
391 	 */
392 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
393 	    MPI2_IOCSTATUS_SUCCESS) {
394 		targ = tm->cm_targ;
395 		targ->handle = 0x0;
396 		targ->encl_handle = 0x0;
397 		targ->encl_slot = 0x0;
398 		targ->exp_dev_handle = 0x0;
399 		targ->phy_num = 0x0;
400 		targ->linkrate = 0x0;
401 		targ->devinfo = 0x0;
402 		targ->flags = 0x0;
403 	}
404 
405 	mpssas_free_tm(sc, tm);
406 }
407 
408 /*
409  * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal.
410  * Otherwise Volume Delete is same as Bare Drive Removal.
411  */
412 void
413 mpssas_prepare_volume_remove(struct mpssas_softc *sassc, uint16_t handle)
414 {
415 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
416 	struct mps_softc *sc;
417 	struct mps_command *tm;
418 	struct mpssas_target *targ = NULL;
419 
420 	MPS_FUNCTRACE(sassc->sc);
421 	sc = sassc->sc;
422 
423 #ifdef WD_SUPPORT
424 	/*
425 	 * If this is a WD controller, determine if the disk should be exposed
426 	 * to the OS or not.  If disk should be exposed, return from this
427 	 * function without doing anything.
428 	 */
429 	if (sc->WD_available && (sc->WD_hide_expose ==
430 	    MPS_WD_EXPOSE_ALWAYS)) {
431 		return;
432 	}
433 #endif //WD_SUPPORT
434 
435 	targ = mpssas_find_target_by_handle(sassc, 0, handle);
436 	if (targ == NULL) {
437 		/* FIXME: what is the action? */
438 		/* We don't know about this device? */
439 		mps_dprint(sc, MPS_ERROR,
440 		   "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
441 		return;
442 	}
443 
444 	targ->flags |= MPSSAS_TARGET_INREMOVAL;
445 
446 	tm = mpssas_alloc_tm(sc);
447 	if (tm == NULL) {
448 		mps_dprint(sc, MPS_ERROR,
449 		    "%s: command alloc failure\n", __func__);
450 		return;
451 	}
452 
453 	mpssas_rescan_target(sc, targ);
454 
455 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
456 	req->DevHandle = targ->handle;
457 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
458 
459 	/* SAS Hard Link Reset / SATA Link Reset */
460 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
461 
462 	tm->cm_targ = targ;
463 	tm->cm_data = NULL;
464 	tm->cm_complete = mpssas_remove_volume;
465 	tm->cm_complete_data = (void *)(uintptr_t)handle;
466 
467 	mps_dprint(sc, MPS_INFO, "%s: Sending reset for target ID %d\n",
468 	    __func__, targ->tid);
469 	mpssas_prepare_for_tm(sc, tm, targ, CAM_LUN_WILDCARD);
470 
471 	mps_map_command(sc, tm);
472 }
473 
474 /*
475  * The MPT2 firmware performs debounce on the link to avoid transient link
476  * errors and false removals.  When it does decide that link has been lost
477  * and a device need to go away, it expects that the host will perform a
478  * target reset and then an op remove.  The reset has the side-effect of
479  * aborting any outstanding requests for the device, which is required for
480  * the op-remove to succeed.  It's not clear if the host should check for
481  * the device coming back alive after the reset.
482  */
483 void
484 mpssas_prepare_remove(struct mpssas_softc *sassc, uint16_t handle)
485 {
486 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
487 	struct mps_softc *sc;
488 	struct mps_command *cm;
489 	struct mpssas_target *targ = NULL;
490 
491 	MPS_FUNCTRACE(sassc->sc);
492 
493 	sc = sassc->sc;
494 
495 	targ = mpssas_find_target_by_handle(sassc, 0, handle);
496 	if (targ == NULL) {
497 		/* FIXME: what is the action? */
498 		/* We don't know about this device? */
499 		mps_dprint(sc, MPS_ERROR,
500 		    "%s : invalid handle 0x%x \n", __func__, handle);
501 		return;
502 	}
503 
504 	targ->flags |= MPSSAS_TARGET_INREMOVAL;
505 
506 	cm = mpssas_alloc_tm(sc);
507 	if (cm == NULL) {
508 		mps_dprint(sc, MPS_ERROR,
509 		    "%s: command alloc failure\n", __func__);
510 		return;
511 	}
512 
513 	mpssas_rescan_target(sc, targ);
514 
515 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
516 	req->DevHandle = htole16(targ->handle);
517 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
518 
519 	/* SAS Hard Link Reset / SATA Link Reset */
520 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
521 
522 	cm->cm_targ = targ;
523 	cm->cm_data = NULL;
524 	cm->cm_complete = mpssas_remove_device;
525 	cm->cm_complete_data = (void *)(uintptr_t)handle;
526 
527 	mps_dprint(sc, MPS_INFO, "%s: Sending reset for target ID %d\n",
528 	    __func__, targ->tid);
529 	mpssas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
530 
531 	mps_map_command(sc, cm);
532 }
533 
534 static void
535 mpssas_remove_device(struct mps_softc *sc, struct mps_command *tm)
536 {
537 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
538 	MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
539 	struct mpssas_target *targ;
540 	uint16_t handle;
541 
542 	MPS_FUNCTRACE(sc);
543 
544 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
545 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
546 	targ = tm->cm_targ;
547 
548 	/*
549 	 * Currently there should be no way we can hit this case.  It only
550 	 * happens when we have a failure to allocate chain frames, and
551 	 * task management commands don't have S/G lists.
552 	 */
553 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
554 		mps_dprint(sc, MPS_ERROR,
555 		    "%s: cm_flags = %#x for remove of handle %#04x! "
556 		    "This should not happen!\n", __func__, tm->cm_flags,
557 		    handle);
558 	}
559 
560 	if (reply == NULL) {
561 		/* XXX retry the remove after the diag reset completes? */
562 		mps_dprint(sc, MPS_FAULT,
563 		    "%s NULL reply resetting device 0x%04x\n", __func__,
564 		    handle);
565 		mpssas_free_tm(sc, tm);
566 		return;
567 	}
568 
569 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
570 	    MPI2_IOCSTATUS_SUCCESS) {
571 		mps_dprint(sc, MPS_ERROR,
572 		   "IOCStatus = 0x%x while resetting device 0x%x\n",
573 		   le16toh(reply->IOCStatus), handle);
574 	}
575 
576 	mps_dprint(sc, MPS_XINFO, "Reset aborted %u commands\n",
577 	    le32toh(reply->TerminationCount));
578 	mps_free_reply(sc, tm->cm_reply_data);
579 	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
580 
581 	/* Reuse the existing command */
582 	req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
583 	memset(req, 0, sizeof(*req));
584 	req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
585 	req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
586 	req->DevHandle = htole16(handle);
587 	tm->cm_data = NULL;
588 	tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
589 	tm->cm_complete = mpssas_remove_complete;
590 	tm->cm_complete_data = (void *)(uintptr_t)handle;
591 
592 	/*
593 	 * Wait to send the REMOVE_DEVICE until all the commands have cleared.
594 	 * They should be aborted or time out and we'll kick thus off there
595 	 * if so.
596 	 */
597 	if (TAILQ_FIRST(&targ->commands) == NULL) {
598 		mps_dprint(sc, MPS_INFO, "No pending commands: starting remove_device\n");
599 		mps_map_command(sc, tm);
600 		targ->pending_remove_tm = NULL;
601 	} else {
602 		targ->pending_remove_tm = tm;
603 	}
604 
605 	mps_dprint(sc, MPS_XINFO, "clearing target %u handle 0x%04x\n",
606 		   targ->tid, handle);
607 }
608 
609 static void
610 mpssas_remove_complete(struct mps_softc *sc, struct mps_command *tm)
611 {
612 	MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
613 	uint16_t handle;
614 	struct mpssas_target *targ;
615 	struct mpssas_lun *lun;
616 
617 	MPS_FUNCTRACE(sc);
618 
619 	reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
620 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
621 	targ = tm->cm_targ;
622 
623 	/*
624 	 * At this point, we should have no pending commands for the target.
625 	 * The remove target has just completed.
626 	 */
627 	KASSERT(TAILQ_FIRST(&targ->commands) == NULL,
628 	    ("%s: no commands should be pending\n", __func__));
629 
630 	/*
631 	 * Currently there should be no way we can hit this case.  It only
632 	 * happens when we have a failure to allocate chain frames, and
633 	 * task management commands don't have S/G lists.
634 	 */
635 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
636 		mps_dprint(sc, MPS_XINFO,
637 			   "%s: cm_flags = %#x for remove of handle %#04x! "
638 			   "This should not happen!\n", __func__, tm->cm_flags,
639 			   handle);
640 		mpssas_free_tm(sc, tm);
641 		return;
642 	}
643 
644 	if (reply == NULL) {
645 		/* most likely a chip reset */
646 		mps_dprint(sc, MPS_FAULT,
647 		    "%s NULL reply removing device 0x%04x\n", __func__, handle);
648 		mpssas_free_tm(sc, tm);
649 		return;
650 	}
651 
652 	mps_dprint(sc, MPS_XINFO,
653 	    "%s on handle 0x%04x, IOCStatus= 0x%x\n", __func__,
654 	    handle, le16toh(reply->IOCStatus));
655 
656 	/*
657 	 * Don't clear target if remove fails because things will get confusing.
658 	 * Leave the devname and sasaddr intact so that we know to avoid reusing
659 	 * this target id if possible, and so we can assign the same target id
660 	 * to this device if it comes back in the future.
661 	 */
662 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
663 	    MPI2_IOCSTATUS_SUCCESS) {
664 		targ->handle = 0x0;
665 		targ->encl_handle = 0x0;
666 		targ->encl_slot = 0x0;
667 		targ->exp_dev_handle = 0x0;
668 		targ->phy_num = 0x0;
669 		targ->linkrate = 0x0;
670 		targ->devinfo = 0x0;
671 		targ->flags = 0x0;
672 
673 		while(!SLIST_EMPTY(&targ->luns)) {
674 			lun = SLIST_FIRST(&targ->luns);
675 			SLIST_REMOVE_HEAD(&targ->luns, lun_link);
676 			free(lun, M_MPT2);
677 		}
678 	}
679 
680 	mpssas_free_tm(sc, tm);
681 }
682 
683 static int
684 mpssas_register_events(struct mps_softc *sc)
685 {
686 	u32 events[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS];
687 
688 	bzero(events, 16);
689 	setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
690 	setbit(events, MPI2_EVENT_SAS_DISCOVERY);
691 	setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
692 	setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
693 	setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
694 	setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
695 	setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
696 	setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
697 	setbit(events, MPI2_EVENT_IR_VOLUME);
698 	setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
699 	setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
700 	setbit(events, MPI2_EVENT_LOG_ENTRY_ADDED);
701 
702 	mps_register_events(sc, events, mpssas_evt_handler, NULL,
703 	    &sc->sassc->mpssas_eh);
704 
705 	return (0);
706 }
707 
708 int
709 mps_attach_sas(struct mps_softc *sc)
710 {
711 	struct mpssas_softc *sassc;
712 	cam_status status;
713 	int unit, error = 0, reqs;
714 
715 	MPS_FUNCTRACE(sc);
716 	mps_dprint(sc, MPS_INIT, "%s entered\n", __func__);
717 
718 	sassc = malloc(sizeof(struct mpssas_softc), M_MPT2, M_WAITOK|M_ZERO);
719 
720 	/*
721 	 * XXX MaxTargets could change during a reinit.  Since we don't
722 	 * resize the targets[] array during such an event, cache the value
723 	 * of MaxTargets here so that we don't get into trouble later.  This
724 	 * should move into the reinit logic.
725 	 */
726 	sassc->maxtargets = sc->facts->MaxTargets + sc->facts->MaxVolumes;
727 	sassc->targets = malloc(sizeof(struct mpssas_target) *
728 	    sassc->maxtargets, M_MPT2, M_WAITOK|M_ZERO);
729 	sc->sassc = sassc;
730 	sassc->sc = sc;
731 
732 	reqs = sc->num_reqs - sc->num_prireqs - 1;
733 	if ((sassc->devq = cam_simq_alloc(reqs)) == NULL) {
734 		mps_dprint(sc, MPS_ERROR, "Cannot allocate SIMQ\n");
735 		error = ENOMEM;
736 		goto out;
737 	}
738 
739 	unit = device_get_unit(sc->mps_dev);
740 	sassc->sim = cam_sim_alloc(mpssas_action, mpssas_poll, "mps", sassc,
741 	    unit, &sc->mps_mtx, reqs, reqs, sassc->devq);
742 	if (sassc->sim == NULL) {
743 		mps_dprint(sc, MPS_INIT|MPS_ERROR, "Cannot allocate SIM\n");
744 		error = EINVAL;
745 		goto out;
746 	}
747 
748 	TAILQ_INIT(&sassc->ev_queue);
749 
750 	/* Initialize taskqueue for Event Handling */
751 	TASK_INIT(&sassc->ev_task, 0, mpssas_firmware_event_work, sc);
752 	sassc->ev_tq = taskqueue_create("mps_taskq", M_NOWAIT | M_ZERO,
753 	    taskqueue_thread_enqueue, &sassc->ev_tq);
754 	taskqueue_start_threads(&sassc->ev_tq, 1, PRIBIO, "%s taskq",
755 	    device_get_nameunit(sc->mps_dev));
756 
757 	mps_lock(sc);
758 
759 	/*
760 	 * XXX There should be a bus for every port on the adapter, but since
761 	 * we're just going to fake the topology for now, we'll pretend that
762 	 * everything is just a target on a single bus.
763 	 */
764 	if ((error = xpt_bus_register(sassc->sim, sc->mps_dev, 0)) != 0) {
765 		mps_dprint(sc, MPS_INIT|MPS_ERROR,
766 		    "Error %d registering SCSI bus\n", error);
767 		mps_unlock(sc);
768 		goto out;
769 	}
770 
771 	/*
772 	 * Assume that discovery events will start right away.
773 	 *
774 	 * Hold off boot until discovery is complete.
775 	 */
776 	sassc->flags |= MPSSAS_IN_STARTUP | MPSSAS_IN_DISCOVERY;
777 	sc->sassc->startup_refcount = 0;
778 	mpssas_startup_increment(sassc);
779 
780 	callout_init(&sassc->discovery_callout, 1 /*mpsafe*/);
781 
782 	mps_unlock(sc);
783 
784 	/*
785 	 * Register for async events so we can determine the EEDP
786 	 * capabilities of devices.
787 	 */
788 	status = xpt_create_path(&sassc->path, /*periph*/NULL,
789 	    cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD,
790 	    CAM_LUN_WILDCARD);
791 	if (status != CAM_REQ_CMP) {
792 		mps_dprint(sc, MPS_ERROR|MPS_INIT,
793 		    "Error %#x creating sim path\n", status);
794 		sassc->path = NULL;
795 	} else {
796 		int event;
797 
798 		event = AC_ADVINFO_CHANGED;
799 		status = xpt_register_async(event, mpssas_async, sc,
800 					    sassc->path);
801 		if (status != CAM_REQ_CMP) {
802 			mps_dprint(sc, MPS_ERROR,
803 			    "Error %#x registering async handler for "
804 			    "AC_ADVINFO_CHANGED events\n", status);
805 			xpt_free_path(sassc->path);
806 			sassc->path = NULL;
807 		}
808 	}
809 	if (status != CAM_REQ_CMP) {
810 		/*
811 		 * EEDP use is the exception, not the rule.
812 		 * Warn the user, but do not fail to attach.
813 		 */
814 		mps_printf(sc, "EEDP capabilities disabled.\n");
815 	}
816 
817 	mpssas_register_events(sc);
818 out:
819 	if (error)
820 		mps_detach_sas(sc);
821 
822 	mps_dprint(sc, MPS_INIT, "%s exit error= %d\n", __func__, error);
823 	return (error);
824 }
825 
826 int
827 mps_detach_sas(struct mps_softc *sc)
828 {
829 	struct mpssas_softc *sassc;
830 	struct mpssas_lun *lun, *lun_tmp;
831 	struct mpssas_target *targ;
832 	int i;
833 
834 	MPS_FUNCTRACE(sc);
835 
836 	if (sc->sassc == NULL)
837 		return (0);
838 
839 	sassc = sc->sassc;
840 	mps_deregister_events(sc, sassc->mpssas_eh);
841 
842 	/*
843 	 * Drain and free the event handling taskqueue with the lock
844 	 * unheld so that any parallel processing tasks drain properly
845 	 * without deadlocking.
846 	 */
847 	if (sassc->ev_tq != NULL)
848 		taskqueue_free(sassc->ev_tq);
849 
850 	/* Deregister our async handler */
851 	if (sassc->path != NULL) {
852 		xpt_register_async(0, mpssas_async, sc, sassc->path);
853 		xpt_free_path(sassc->path);
854 		sassc->path = NULL;
855 	}
856 
857 	/* Make sure CAM doesn't wedge if we had to bail out early. */
858 	mps_lock(sc);
859 
860 	while (sassc->startup_refcount != 0)
861 		mpssas_startup_decrement(sassc);
862 
863 	if (sassc->flags & MPSSAS_IN_STARTUP)
864 		xpt_release_simq(sassc->sim, 1);
865 
866 	if (sassc->sim != NULL) {
867 		xpt_bus_deregister(cam_sim_path(sassc->sim));
868 		cam_sim_free(sassc->sim, FALSE);
869 	}
870 
871 	mps_unlock(sc);
872 
873 	if (sassc->devq != NULL)
874 		cam_simq_free(sassc->devq);
875 
876 	for(i=0; i< sassc->maxtargets ;i++) {
877 		targ = &sassc->targets[i];
878 		SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
879 			free(lun, M_MPT2);
880 		}
881 	}
882 	free(sassc->targets, M_MPT2);
883 	free(sassc, M_MPT2);
884 	sc->sassc = NULL;
885 
886 	return (0);
887 }
888 
889 void
890 mpssas_discovery_end(struct mpssas_softc *sassc)
891 {
892 	struct mps_softc *sc = sassc->sc;
893 
894 	MPS_FUNCTRACE(sc);
895 
896 	if (sassc->flags & MPSSAS_DISCOVERY_TIMEOUT_PENDING)
897 		callout_stop(&sassc->discovery_callout);
898 
899 	/*
900 	 * After discovery has completed, check the mapping table for any
901 	 * missing devices and update their missing counts. Only do this once
902 	 * whenever the driver is initialized so that missing counts aren't
903 	 * updated unnecessarily. Note that just because discovery has
904 	 * completed doesn't mean that events have been processed yet. The
905 	 * check_devices function is a callout timer that checks if ALL devices
906 	 * are missing. If so, it will wait a little longer for events to
907 	 * complete and keep resetting itself until some device in the mapping
908 	 * table is not missing, meaning that event processing has started.
909 	 */
910 	if (sc->track_mapping_events) {
911 		mps_dprint(sc, MPS_XINFO | MPS_MAPPING, "Discovery has "
912 		    "completed. Check for missing devices in the mapping "
913 		    "table.\n");
914 		callout_reset(&sc->device_check_callout,
915 		    MPS_MISSING_CHECK_DELAY * hz, mps_mapping_check_devices,
916 		    sc);
917 	}
918 }
919 
920 static void
921 mpssas_action(struct cam_sim *sim, union ccb *ccb)
922 {
923 	struct mpssas_softc *sassc;
924 
925 	sassc = cam_sim_softc(sim);
926 
927 	MPS_FUNCTRACE(sassc->sc);
928 	mps_dprint(sassc->sc, MPS_TRACE, "ccb func_code 0x%x\n",
929 	    ccb->ccb_h.func_code);
930 	mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
931 
932 	switch (ccb->ccb_h.func_code) {
933 	case XPT_PATH_INQ:
934 	{
935 		struct ccb_pathinq *cpi = &ccb->cpi;
936 		struct mps_softc *sc = sassc->sc;
937 
938 		cpi->version_num = 1;
939 		cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
940 		cpi->target_sprt = 0;
941 		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN;
942 		cpi->hba_eng_cnt = 0;
943 		cpi->max_target = sassc->maxtargets - 1;
944 		cpi->max_lun = 255;
945 
946 		/*
947 		 * initiator_id is set here to an ID outside the set of valid
948 		 * target IDs (including volumes).
949 		 */
950 		cpi->initiator_id = sassc->maxtargets;
951 		strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
952 		strlcpy(cpi->hba_vid, "Avago Tech", HBA_IDLEN);
953 		strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
954 		cpi->unit_number = cam_sim_unit(sim);
955 		cpi->bus_id = cam_sim_bus(sim);
956 		cpi->base_transfer_speed = 150000;
957 		cpi->transport = XPORT_SAS;
958 		cpi->transport_version = 0;
959 		cpi->protocol = PROTO_SCSI;
960 		cpi->protocol_version = SCSI_REV_SPC;
961 		cpi->maxio = sc->maxio;
962 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
963 		break;
964 	}
965 	case XPT_GET_TRAN_SETTINGS:
966 	{
967 		struct ccb_trans_settings	*cts;
968 		struct ccb_trans_settings_sas	*sas;
969 		struct ccb_trans_settings_scsi	*scsi;
970 		struct mpssas_target *targ;
971 
972 		cts = &ccb->cts;
973 		sas = &cts->xport_specific.sas;
974 		scsi = &cts->proto_specific.scsi;
975 
976 		KASSERT(cts->ccb_h.target_id < sassc->maxtargets,
977 		    ("Target %d out of bounds in XPT_GET_TRANS_SETTINGS\n",
978 		    cts->ccb_h.target_id));
979 		targ = &sassc->targets[cts->ccb_h.target_id];
980 		if (targ->handle == 0x0) {
981 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
982 			break;
983 		}
984 
985 		cts->protocol_version = SCSI_REV_SPC2;
986 		cts->transport = XPORT_SAS;
987 		cts->transport_version = 0;
988 
989 		sas->valid = CTS_SAS_VALID_SPEED;
990 		switch (targ->linkrate) {
991 		case 0x08:
992 			sas->bitrate = 150000;
993 			break;
994 		case 0x09:
995 			sas->bitrate = 300000;
996 			break;
997 		case 0x0a:
998 			sas->bitrate = 600000;
999 			break;
1000 		default:
1001 			sas->valid = 0;
1002 		}
1003 
1004 		cts->protocol = PROTO_SCSI;
1005 		scsi->valid = CTS_SCSI_VALID_TQ;
1006 		scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
1007 
1008 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1009 		break;
1010 	}
1011 	case XPT_CALC_GEOMETRY:
1012 		cam_calc_geometry(&ccb->ccg, /*extended*/1);
1013 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1014 		break;
1015 	case XPT_RESET_DEV:
1016 		mps_dprint(sassc->sc, MPS_XINFO, "mpssas_action XPT_RESET_DEV\n");
1017 		mpssas_action_resetdev(sassc, ccb);
1018 		return;
1019 	case XPT_RESET_BUS:
1020 	case XPT_ABORT:
1021 	case XPT_TERM_IO:
1022 		mps_dprint(sassc->sc, MPS_XINFO,
1023 		    "mpssas_action faking success for abort or reset\n");
1024 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1025 		break;
1026 	case XPT_SCSI_IO:
1027 		mpssas_action_scsiio(sassc, ccb);
1028 		return;
1029 	case XPT_SMP_IO:
1030 		mpssas_action_smpio(sassc, ccb);
1031 		return;
1032 	default:
1033 		mpssas_set_ccbstatus(ccb, CAM_FUNC_NOTAVAIL);
1034 		break;
1035 	}
1036 	xpt_done(ccb);
1037 
1038 }
1039 
1040 static void
1041 mpssas_announce_reset(struct mps_softc *sc, uint32_t ac_code,
1042     target_id_t target_id, lun_id_t lun_id)
1043 {
1044 	path_id_t path_id = cam_sim_path(sc->sassc->sim);
1045 	struct cam_path *path;
1046 
1047 	mps_dprint(sc, MPS_XINFO, "%s code %x target %d lun %jx\n", __func__,
1048 	    ac_code, target_id, (uintmax_t)lun_id);
1049 
1050 	if (xpt_create_path(&path, NULL,
1051 		path_id, target_id, lun_id) != CAM_REQ_CMP) {
1052 		mps_dprint(sc, MPS_ERROR, "unable to create path for reset "
1053 			   "notification\n");
1054 		return;
1055 	}
1056 
1057 	xpt_async(ac_code, path, NULL);
1058 	xpt_free_path(path);
1059 }
1060 
1061 static void
1062 mpssas_complete_all_commands(struct mps_softc *sc)
1063 {
1064 	struct mps_command *cm;
1065 	int i;
1066 	int completed;
1067 
1068 	MPS_FUNCTRACE(sc);
1069 	mtx_assert(&sc->mps_mtx, MA_OWNED);
1070 
1071 	/* complete all commands with a NULL reply */
1072 	for (i = 1; i < sc->num_reqs; i++) {
1073 		cm = &sc->commands[i];
1074 		if (cm->cm_state == MPS_CM_STATE_FREE)
1075 			continue;
1076 
1077 		cm->cm_state = MPS_CM_STATE_BUSY;
1078 		cm->cm_reply = NULL;
1079 		completed = 0;
1080 
1081 		if (cm->cm_flags & MPS_CM_FLAGS_SATA_ID_TIMEOUT) {
1082 			MPASS(cm->cm_data);
1083 			free(cm->cm_data, M_MPT2);
1084 			cm->cm_data = NULL;
1085 		}
1086 
1087 		if (cm->cm_flags & MPS_CM_FLAGS_POLLED)
1088 			cm->cm_flags |= MPS_CM_FLAGS_COMPLETE;
1089 
1090 		if (cm->cm_complete != NULL) {
1091 			mpssas_log_command(cm, MPS_RECOVERY,
1092 			    "completing cm %p state %x ccb %p for diag reset\n",
1093 			    cm, cm->cm_state, cm->cm_ccb);
1094 
1095 			cm->cm_complete(sc, cm);
1096 			completed = 1;
1097 		} else if (cm->cm_flags & MPS_CM_FLAGS_WAKEUP) {
1098 			mpssas_log_command(cm, MPS_RECOVERY,
1099 			    "waking up cm %p state %x ccb %p for diag reset\n",
1100 			    cm, cm->cm_state, cm->cm_ccb);
1101 			wakeup(cm);
1102 			completed = 1;
1103 		}
1104 
1105 		if ((completed == 0) && (cm->cm_state != MPS_CM_STATE_FREE)) {
1106 			/* this should never happen, but if it does, log */
1107 			mpssas_log_command(cm, MPS_RECOVERY,
1108 			    "cm %p state %x flags 0x%x ccb %p during diag "
1109 			    "reset\n", cm, cm->cm_state, cm->cm_flags,
1110 			    cm->cm_ccb);
1111 		}
1112 	}
1113 
1114 	sc->io_cmds_active = 0;
1115 }
1116 
1117 void
1118 mpssas_handle_reinit(struct mps_softc *sc)
1119 {
1120 	int i;
1121 
1122 	/* Go back into startup mode and freeze the simq, so that CAM
1123 	 * doesn't send any commands until after we've rediscovered all
1124 	 * targets and found the proper device handles for them.
1125 	 *
1126 	 * After the reset, portenable will trigger discovery, and after all
1127 	 * discovery-related activities have finished, the simq will be
1128 	 * released.
1129 	 */
1130 	mps_dprint(sc, MPS_INIT, "%s startup\n", __func__);
1131 	sc->sassc->flags |= MPSSAS_IN_STARTUP;
1132 	sc->sassc->flags |= MPSSAS_IN_DISCOVERY;
1133 	mpssas_startup_increment(sc->sassc);
1134 
1135 	/* notify CAM of a bus reset */
1136 	mpssas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1137 	    CAM_LUN_WILDCARD);
1138 
1139 	/* complete and cleanup after all outstanding commands */
1140 	mpssas_complete_all_commands(sc);
1141 
1142 	mps_dprint(sc, MPS_INIT,
1143 	    "%s startup %u after command completion\n", __func__,
1144 	    sc->sassc->startup_refcount);
1145 
1146 	/* zero all the target handles, since they may change after the
1147 	 * reset, and we have to rediscover all the targets and use the new
1148 	 * handles.
1149 	 */
1150 	for (i = 0; i < sc->sassc->maxtargets; i++) {
1151 		if (sc->sassc->targets[i].outstanding != 0)
1152 			mps_dprint(sc, MPS_INIT, "target %u outstanding %u\n",
1153 			    i, sc->sassc->targets[i].outstanding);
1154 		sc->sassc->targets[i].handle = 0x0;
1155 		sc->sassc->targets[i].exp_dev_handle = 0x0;
1156 		sc->sassc->targets[i].outstanding = 0;
1157 		sc->sassc->targets[i].flags = MPSSAS_TARGET_INDIAGRESET;
1158 	}
1159 }
1160 
1161 static void
1162 mpssas_tm_timeout(void *data)
1163 {
1164 	struct mps_command *tm = data;
1165 	struct mps_softc *sc = tm->cm_sc;
1166 
1167 	mtx_assert(&sc->mps_mtx, MA_OWNED);
1168 
1169 	mpssas_log_command(tm, MPS_INFO|MPS_RECOVERY,
1170 	    "task mgmt %p timed out\n", tm);
1171 
1172 	KASSERT(tm->cm_state == MPS_CM_STATE_INQUEUE,
1173 	    ("command not inqueue, state = %u\n", tm->cm_state));
1174 
1175 	tm->cm_state = MPS_CM_STATE_BUSY;
1176 	mps_reinit(sc);
1177 }
1178 
1179 static void
1180 mpssas_logical_unit_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1181 {
1182 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1183 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1184 	unsigned int cm_count = 0;
1185 	struct mps_command *cm;
1186 	struct mpssas_target *targ;
1187 
1188 	callout_stop(&tm->cm_callout);
1189 
1190 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1191 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1192 	targ = tm->cm_targ;
1193 
1194 	/*
1195 	 * Currently there should be no way we can hit this case.  It only
1196 	 * happens when we have a failure to allocate chain frames, and
1197 	 * task management commands don't have S/G lists.
1198 	 * XXXSL So should it be an assertion?
1199 	 */
1200 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1201 		mps_dprint(sc, MPS_RECOVERY|MPS_ERROR,
1202 		    "%s: cm_flags = %#x for LUN reset! "
1203 		   "This should not happen!\n", __func__, tm->cm_flags);
1204 		mpssas_free_tm(sc, tm);
1205 		return;
1206 	}
1207 
1208 	if (reply == NULL) {
1209 		mps_dprint(sc, MPS_RECOVERY, "NULL reset reply for tm %p\n",
1210 		    tm);
1211 		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1212 			/* this completion was due to a reset, just cleanup */
1213 			mps_dprint(sc, MPS_RECOVERY, "Hardware undergoing "
1214 			    "reset, ignoring NULL LUN reset reply\n");
1215 			targ->tm = NULL;
1216 			mpssas_free_tm(sc, tm);
1217 		}
1218 		else {
1219 			/* we should have gotten a reply. */
1220 			mps_dprint(sc, MPS_INFO|MPS_RECOVERY, "NULL reply on "
1221 			    "LUN reset attempt, resetting controller\n");
1222 			mps_reinit(sc);
1223 		}
1224 		return;
1225 	}
1226 
1227 	mps_dprint(sc, MPS_RECOVERY,
1228 	    "logical unit reset status 0x%x code 0x%x count %u\n",
1229 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1230 	    le32toh(reply->TerminationCount));
1231 
1232 	/*
1233 	 * See if there are any outstanding commands for this LUN.
1234 	 * This could be made more efficient by using a per-LU data
1235 	 * structure of some sort.
1236 	 */
1237 	TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1238 		if (cm->cm_lun == tm->cm_lun)
1239 			cm_count++;
1240 	}
1241 
1242 	if (cm_count == 0) {
1243 		mps_dprint(sc, MPS_RECOVERY|MPS_INFO,
1244 		    "Finished recovery after LUN reset for target %u\n",
1245 		    targ->tid);
1246 
1247 		mpssas_announce_reset(sc, AC_SENT_BDR, targ->tid, tm->cm_lun);
1248 
1249 		/*
1250 		 * We've finished recovery for this logical unit.  check and
1251 		 * see if some other logical unit has a timedout command
1252 		 * that needs to be processed.
1253 		 */
1254 		cm = TAILQ_FIRST(&targ->timedout_commands);
1255 		if (cm) {
1256 			mps_dprint(sc, MPS_INFO|MPS_RECOVERY,
1257 			    "More commands to abort for target %u\n",
1258 			    targ->tid);
1259 			mpssas_send_abort(sc, tm, cm);
1260 		} else {
1261 			targ->tm = NULL;
1262 			mpssas_free_tm(sc, tm);
1263 		}
1264 	} else {
1265 		/*
1266 		 * If we still have commands for this LUN, the reset
1267 		 * effectively failed, regardless of the status reported.
1268 		 * Escalate to a target reset.
1269 		 */
1270 		mps_dprint(sc, MPS_INFO|MPS_RECOVERY,
1271 		    "logical unit reset complete for target %u, but still "
1272 		    "have %u command(s), sending target reset\n", targ->tid,
1273 		    cm_count);
1274 		mpssas_send_reset(sc, tm,
1275 		    MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1276 	}
1277 }
1278 
1279 static void
1280 mpssas_target_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1281 {
1282 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1283 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1284 	struct mpssas_target *targ;
1285 
1286 	callout_stop(&tm->cm_callout);
1287 
1288 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1289 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1290 	targ = tm->cm_targ;
1291 
1292 	/*
1293 	 * Currently there should be no way we can hit this case.  It only
1294 	 * happens when we have a failure to allocate chain frames, and
1295 	 * task management commands don't have S/G lists.
1296 	 */
1297 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1298 		mps_dprint(sc, MPS_ERROR,"%s: cm_flags = %#x for target reset! "
1299 			   "This should not happen!\n", __func__, tm->cm_flags);
1300 		mpssas_free_tm(sc, tm);
1301 		return;
1302 	}
1303 
1304 	if (reply == NULL) {
1305 		mps_dprint(sc, MPS_RECOVERY,
1306 		    "NULL target reset reply for tm %pi TaskMID %u\n",
1307 		    tm, le16toh(req->TaskMID));
1308 		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1309 			/* this completion was due to a reset, just cleanup */
1310 			mps_dprint(sc, MPS_RECOVERY, "Hardware undergoing "
1311 			    "reset, ignoring NULL target reset reply\n");
1312 			targ->tm = NULL;
1313 			mpssas_free_tm(sc, tm);
1314 		} else {
1315 			/* we should have gotten a reply. */
1316 			mps_dprint(sc, MPS_INFO|MPS_RECOVERY, "NULL reply on "
1317 			    "target reset attempt, resetting controller\n");
1318 			mps_reinit(sc);
1319 		}
1320 		return;
1321 	}
1322 
1323 	mps_dprint(sc, MPS_RECOVERY,
1324 	    "target reset status 0x%x code 0x%x count %u\n",
1325 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1326 	    le32toh(reply->TerminationCount));
1327 
1328 	if (targ->outstanding == 0) {
1329 		/* we've finished recovery for this target and all
1330 		 * of its logical units.
1331 		 */
1332 		mps_dprint(sc, MPS_RECOVERY|MPS_INFO,
1333 		    "Finished reset recovery for target %u\n", targ->tid);
1334 
1335 		mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1336 		    CAM_LUN_WILDCARD);
1337 
1338 		targ->tm = NULL;
1339 		mpssas_free_tm(sc, tm);
1340 	} else {
1341 		/*
1342 		 * After a target reset, if this target still has
1343 		 * outstanding commands, the reset effectively failed,
1344 		 * regardless of the status reported.  escalate.
1345 		 */
1346 		mps_dprint(sc, MPS_INFO|MPS_RECOVERY,
1347 		    "Target reset complete for target %u, but still have %u "
1348 		    "command(s), resetting controller\n", targ->tid,
1349 		    targ->outstanding);
1350 		mps_reinit(sc);
1351 	}
1352 }
1353 
1354 #define MPS_RESET_TIMEOUT 30
1355 
1356 int
1357 mpssas_send_reset(struct mps_softc *sc, struct mps_command *tm, uint8_t type)
1358 {
1359 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1360 	struct mpssas_target *target;
1361 	int err;
1362 
1363 	target = tm->cm_targ;
1364 	if (target->handle == 0) {
1365 		mps_dprint(sc, MPS_ERROR,"%s null devhandle for target_id %d\n",
1366 		    __func__, target->tid);
1367 		return -1;
1368 	}
1369 
1370 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1371 	req->DevHandle = htole16(target->handle);
1372 	req->TaskType = type;
1373 
1374 	if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1375 		/* XXX Need to handle invalid LUNs */
1376 		MPS_SET_LUN(req->LUN, tm->cm_lun);
1377 		tm->cm_targ->logical_unit_resets++;
1378 		mps_dprint(sc, MPS_RECOVERY|MPS_INFO,
1379 		    "Sending logical unit reset to target %u lun %d\n",
1380 		    target->tid, tm->cm_lun);
1381 		tm->cm_complete = mpssas_logical_unit_reset_complete;
1382 		mpssas_prepare_for_tm(sc, tm, target, tm->cm_lun);
1383 	} else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1384 		/*
1385 		 * Target reset method =
1386 		 * 	SAS Hard Link Reset / SATA Link Reset
1387 		 */
1388 		req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1389 		tm->cm_targ->target_resets++;
1390 		mps_dprint(sc, MPS_RECOVERY|MPS_INFO,
1391 		    "Sending target reset to target %u\n", target->tid);
1392 		tm->cm_complete = mpssas_target_reset_complete;
1393 		mpssas_prepare_for_tm(sc, tm, target, CAM_LUN_WILDCARD);
1394 	} else {
1395 		mps_dprint(sc, MPS_ERROR, "unexpected reset type 0x%x\n", type);
1396 		return -1;
1397 	}
1398 
1399 	tm->cm_data = NULL;
1400 	tm->cm_complete_data = (void *)tm;
1401 
1402 	callout_reset(&tm->cm_callout, MPS_RESET_TIMEOUT * hz,
1403 	    mpssas_tm_timeout, tm);
1404 
1405 	err = mps_map_command(sc, tm);
1406 	if (err)
1407 		mps_dprint(sc, MPS_ERROR|MPS_RECOVERY,
1408 		    "error %d sending reset type %u\n",
1409 		    err, type);
1410 
1411 	return err;
1412 }
1413 
1414 static void
1415 mpssas_abort_complete(struct mps_softc *sc, struct mps_command *tm)
1416 {
1417 	struct mps_command *cm;
1418 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1419 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1420 	struct mpssas_target *targ;
1421 
1422 	callout_stop(&tm->cm_callout);
1423 
1424 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1425 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1426 	targ = tm->cm_targ;
1427 
1428 	/*
1429 	 * Currently there should be no way we can hit this case.  It only
1430 	 * happens when we have a failure to allocate chain frames, and
1431 	 * task management commands don't have S/G lists.
1432 	 */
1433 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1434 		mps_dprint(sc, MPS_RECOVERY,
1435 		    "cm_flags = %#x for abort %p TaskMID %u!\n",
1436 		    tm->cm_flags, tm, le16toh(req->TaskMID));
1437 		mpssas_free_tm(sc, tm);
1438 		return;
1439 	}
1440 
1441 	if (reply == NULL) {
1442 		mps_dprint(sc, MPS_RECOVERY,
1443 		    "NULL abort reply for tm %p TaskMID %u\n",
1444 		    tm, le16toh(req->TaskMID));
1445 		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1446 			/* this completion was due to a reset, just cleanup */
1447 			mps_dprint(sc, MPS_RECOVERY, "Hardware undergoing "
1448 			    "reset, ignoring NULL abort reply\n");
1449 			targ->tm = NULL;
1450 			mpssas_free_tm(sc, tm);
1451 		} else {
1452 			/* we should have gotten a reply. */
1453 			mps_dprint(sc, MPS_INFO|MPS_RECOVERY, "NULL reply on "
1454 			    "abort attempt, resetting controller\n");
1455 			mps_reinit(sc);
1456 		}
1457 		return;
1458 	}
1459 
1460 	mps_dprint(sc, MPS_RECOVERY,
1461 	    "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1462 	    le16toh(req->TaskMID),
1463 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1464 	    le32toh(reply->TerminationCount));
1465 
1466 	cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1467 	if (cm == NULL) {
1468 		/*
1469 		 * If there are no more timedout commands, we're done with
1470 		 * error recovery for this target.
1471 		 */
1472 		mps_dprint(sc, MPS_INFO|MPS_RECOVERY,
1473 		    "Finished abort recovery for target %u\n", targ->tid);
1474 
1475 		targ->tm = NULL;
1476 		mpssas_free_tm(sc, tm);
1477 	} else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) {
1478 		/* abort success, but we have more timedout commands to abort */
1479 		mps_dprint(sc, MPS_INFO|MPS_RECOVERY,
1480 		    "Continuing abort recovery for target %u\n", targ->tid);
1481 
1482 		mpssas_send_abort(sc, tm, cm);
1483 	} else {
1484 		/* we didn't get a command completion, so the abort
1485 		 * failed as far as we're concerned.  escalate.
1486 		 */
1487 		mps_dprint(sc, MPS_RECOVERY,
1488 		    "Abort failed for target %u, sending logical unit reset\n",
1489 		    targ->tid);
1490 
1491 		mpssas_send_reset(sc, tm,
1492 		    MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1493 	}
1494 }
1495 
1496 #define MPS_ABORT_TIMEOUT 5
1497 
1498 static int
1499 mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm)
1500 {
1501 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1502 	struct mpssas_target *targ;
1503 	int err;
1504 
1505 	targ = cm->cm_targ;
1506 	if (targ->handle == 0) {
1507 		mps_dprint(sc, MPS_ERROR|MPS_RECOVERY,
1508 		    "%s null devhandle for target_id %d\n",
1509 		    __func__, cm->cm_ccb->ccb_h.target_id);
1510 		return -1;
1511 	}
1512 
1513 	mpssas_log_command(cm, MPS_RECOVERY|MPS_INFO,
1514 	    "Aborting command %p\n", cm);
1515 
1516 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1517 	req->DevHandle = htole16(targ->handle);
1518 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1519 
1520 	/* XXX Need to handle invalid LUNs */
1521 	MPS_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1522 
1523 	req->TaskMID = htole16(cm->cm_desc.Default.SMID);
1524 
1525 	tm->cm_data = NULL;
1526 	tm->cm_complete = mpssas_abort_complete;
1527 	tm->cm_complete_data = (void *)tm;
1528 	tm->cm_targ = cm->cm_targ;
1529 	tm->cm_lun = cm->cm_lun;
1530 
1531 	callout_reset(&tm->cm_callout, MPS_ABORT_TIMEOUT * hz,
1532 	    mpssas_tm_timeout, tm);
1533 
1534 	targ->aborts++;
1535 
1536 	mpssas_prepare_for_tm(sc, tm, targ, tm->cm_lun);
1537 
1538 	err = mps_map_command(sc, tm);
1539 	if (err)
1540 		mps_dprint(sc, MPS_ERROR|MPS_RECOVERY,
1541 		    "error %d sending abort for cm %p SMID %u\n",
1542 		    err, cm, req->TaskMID);
1543 	return err;
1544 }
1545 
1546 static void
1547 mpssas_scsiio_timeout(void *data)
1548 {
1549 	sbintime_t elapsed, now;
1550 	union ccb *ccb;
1551 	struct mps_softc *sc;
1552 	struct mps_command *cm;
1553 	struct mpssas_target *targ;
1554 
1555 	cm = (struct mps_command *)data;
1556 	sc = cm->cm_sc;
1557 	ccb = cm->cm_ccb;
1558 	now = sbinuptime();
1559 
1560 	MPS_FUNCTRACE(sc);
1561 	mtx_assert(&sc->mps_mtx, MA_OWNED);
1562 
1563 	mps_dprint(sc, MPS_XINFO|MPS_RECOVERY, "Timeout checking cm %p\n", cm);
1564 
1565 	/*
1566 	 * Run the interrupt handler to make sure it's not pending.  This
1567 	 * isn't perfect because the command could have already completed
1568 	 * and been re-used, though this is unlikely.
1569 	 */
1570 	mps_intr_locked(sc);
1571 	if (cm->cm_flags & MPS_CM_FLAGS_ON_RECOVERY) {
1572 		mpssas_log_command(cm, MPS_XINFO,
1573 		    "SCSI command %p almost timed out\n", cm);
1574 		return;
1575 	}
1576 
1577 	if (cm->cm_ccb == NULL) {
1578 		mps_dprint(sc, MPS_ERROR, "command timeout with NULL ccb\n");
1579 		return;
1580 	}
1581 
1582 	targ = cm->cm_targ;
1583 	targ->timeouts++;
1584 
1585 	elapsed = now - ccb->ccb_h.qos.sim_data;
1586 	mpssas_log_command(cm, MPS_INFO|MPS_RECOVERY,
1587 	    "Command timeout on target %u(0x%04x) %d set, %d.%d elapsed\n",
1588 	    targ->tid, targ->handle, ccb->ccb_h.timeout,
1589 	    sbintime_getsec(elapsed), elapsed & 0xffffffff);
1590 
1591 	/* XXX first, check the firmware state, to see if it's still
1592 	 * operational.  if not, do a diag reset.
1593 	 */
1594 	mpssas_set_ccbstatus(cm->cm_ccb, CAM_CMD_TIMEOUT);
1595 	cm->cm_flags |= MPS_CM_FLAGS_ON_RECOVERY | MPS_CM_FLAGS_TIMEDOUT;
1596 	TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1597 
1598 	if (targ->tm != NULL) {
1599 		/* target already in recovery, just queue up another
1600 		 * timedout command to be processed later.
1601 		 */
1602 		mps_dprint(sc, MPS_RECOVERY,
1603 		    "queued timedout cm %p for processing by tm %p\n",
1604 		    cm, targ->tm);
1605 	} else if ((targ->tm = mpssas_alloc_tm(sc)) != NULL) {
1606 		mps_dprint(sc, MPS_RECOVERY|MPS_INFO,
1607 		    "Sending abort to target %u for SMID %d\n", targ->tid,
1608 		    cm->cm_desc.Default.SMID);
1609 		mps_dprint(sc, MPS_RECOVERY, "timedout cm %p allocated tm %p\n",
1610 		    cm, targ->tm);
1611 
1612 		/* start recovery by aborting the first timedout command */
1613 		mpssas_send_abort(sc, targ->tm, cm);
1614 	} else {
1615 		/* XXX queue this target up for recovery once a TM becomes
1616 		 * available.  The firmware only has a limited number of
1617 		 * HighPriority credits for the high priority requests used
1618 		 * for task management, and we ran out.
1619 		 *
1620 		 * Isilon: don't worry about this for now, since we have
1621 		 * more credits than disks in an enclosure, and limit
1622 		 * ourselves to one TM per target for recovery.
1623 		 */
1624 		mps_dprint(sc, MPS_ERROR|MPS_RECOVERY,
1625 		    "timedout cm %p failed to allocate a tm\n", cm);
1626 	}
1627 
1628 }
1629 
1630 static void
1631 mpssas_action_scsiio(struct mpssas_softc *sassc, union ccb *ccb)
1632 {
1633 	MPI2_SCSI_IO_REQUEST *req;
1634 	struct ccb_scsiio *csio;
1635 	struct mps_softc *sc;
1636 	struct mpssas_target *targ;
1637 	struct mpssas_lun *lun;
1638 	struct mps_command *cm;
1639 	uint8_t i, lba_byte, *ref_tag_addr;
1640 	uint16_t eedp_flags;
1641 	uint32_t mpi_control;
1642 
1643 	sc = sassc->sc;
1644 	MPS_FUNCTRACE(sc);
1645 	mtx_assert(&sc->mps_mtx, MA_OWNED);
1646 
1647 	csio = &ccb->csio;
1648 	KASSERT(csio->ccb_h.target_id < sassc->maxtargets,
1649 	    ("Target %d out of bounds in XPT_SCSI_IO\n",
1650 	     csio->ccb_h.target_id));
1651 	targ = &sassc->targets[csio->ccb_h.target_id];
1652 	mps_dprint(sc, MPS_TRACE, "ccb %p target flag %x\n", ccb, targ->flags);
1653 	if (targ->handle == 0x0) {
1654 		mps_dprint(sc, MPS_ERROR, "%s NULL handle for target %u\n",
1655 		    __func__, csio->ccb_h.target_id);
1656 		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1657 		xpt_done(ccb);
1658 		return;
1659 	}
1660 	if (targ->flags & MPS_TARGET_FLAGS_RAID_COMPONENT) {
1661 		mps_dprint(sc, MPS_ERROR, "%s Raid component no SCSI IO "
1662 		    "supported %u\n", __func__, csio->ccb_h.target_id);
1663 		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1664 		xpt_done(ccb);
1665 		return;
1666 	}
1667 	/*
1668 	 * Sometimes, it is possible to get a command that is not "In
1669 	 * Progress" and was actually aborted by the upper layer.  Check for
1670 	 * this here and complete the command without error.
1671 	 */
1672 	if (mpssas_get_ccbstatus(ccb) != CAM_REQ_INPROG) {
1673 		mps_dprint(sc, MPS_TRACE, "%s Command is not in progress for "
1674 		    "target %u\n", __func__, csio->ccb_h.target_id);
1675 		xpt_done(ccb);
1676 		return;
1677 	}
1678 	/*
1679 	 * If devinfo is 0 this will be a volume.  In that case don't tell CAM
1680 	 * that the volume has timed out.  We want volumes to be enumerated
1681 	 * until they are deleted/removed, not just failed. In either event,
1682 	 * we're removing the target due to a firmware event telling us
1683 	 * the device is now gone (as opposed to some transient event). Since
1684 	 * we're opting to remove failed devices from the OS's view, we need
1685 	 * to propagate that status up the stack.
1686 	 */
1687 	if (targ->flags & MPSSAS_TARGET_INREMOVAL) {
1688 		if (targ->devinfo == 0)
1689 			mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1690 		else
1691 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1692 		xpt_done(ccb);
1693 		return;
1694 	}
1695 
1696 	if ((sc->mps_flags & MPS_FLAGS_SHUTDOWN) != 0) {
1697 		mps_dprint(sc, MPS_INFO, "%s shutting down\n", __func__);
1698 		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1699 		xpt_done(ccb);
1700 		return;
1701 	}
1702 
1703 	/*
1704 	 * If target has a reset in progress, freeze the devq and return.  The
1705 	 * devq will be released when the TM reset is finished.
1706 	 */
1707 	if (targ->flags & MPSSAS_TARGET_INRESET) {
1708 		ccb->ccb_h.status = CAM_BUSY | CAM_DEV_QFRZN;
1709 		mps_dprint(sc, MPS_INFO, "%s: Freezing devq for target ID %d\n",
1710 		    __func__, targ->tid);
1711 		xpt_freeze_devq(ccb->ccb_h.path, 1);
1712 		xpt_done(ccb);
1713 		return;
1714 	}
1715 
1716 	cm = mps_alloc_command(sc);
1717 	if (cm == NULL || (sc->mps_flags & MPS_FLAGS_DIAGRESET)) {
1718 		if (cm != NULL) {
1719 			mps_free_command(sc, cm);
1720 		}
1721 		if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
1722 			xpt_freeze_simq(sassc->sim, 1);
1723 			sassc->flags |= MPSSAS_QUEUE_FROZEN;
1724 		}
1725 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1726 		ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1727 		xpt_done(ccb);
1728 		return;
1729 	}
1730 
1731 	req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
1732 	bzero(req, sizeof(*req));
1733 	req->DevHandle = htole16(targ->handle);
1734 	req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1735 	req->MsgFlags = 0;
1736 	req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr);
1737 	req->SenseBufferLength = MPS_SENSE_LEN;
1738 	req->SGLFlags = 0;
1739 	req->ChainOffset = 0;
1740 	req->SGLOffset0 = 24;	/* 32bit word offset to the SGL */
1741 	req->SGLOffset1= 0;
1742 	req->SGLOffset2= 0;
1743 	req->SGLOffset3= 0;
1744 	req->SkipCount = 0;
1745 	req->DataLength = htole32(csio->dxfer_len);
1746 	req->BidirectionalDataLength = 0;
1747 	req->IoFlags = htole16(csio->cdb_len);
1748 	req->EEDPFlags = 0;
1749 
1750 	/* Note: BiDirectional transfers are not supported */
1751 	switch (csio->ccb_h.flags & CAM_DIR_MASK) {
1752 	case CAM_DIR_IN:
1753 		mpi_control = MPI2_SCSIIO_CONTROL_READ;
1754 		cm->cm_flags |= MPS_CM_FLAGS_DATAIN;
1755 		break;
1756 	case CAM_DIR_OUT:
1757 		mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
1758 		cm->cm_flags |= MPS_CM_FLAGS_DATAOUT;
1759 		break;
1760 	case CAM_DIR_NONE:
1761 	default:
1762 		mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
1763 		break;
1764 	}
1765 
1766 	if (csio->cdb_len == 32)
1767                 mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
1768 	/*
1769 	 * It looks like the hardware doesn't require an explicit tag
1770 	 * number for each transaction.  SAM Task Management not supported
1771 	 * at the moment.
1772 	 */
1773 	switch (csio->tag_action) {
1774 	case MSG_HEAD_OF_Q_TAG:
1775 		mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
1776 		break;
1777 	case MSG_ORDERED_Q_TAG:
1778 		mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
1779 		break;
1780 	case MSG_ACA_TASK:
1781 		mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ;
1782 		break;
1783 	case CAM_TAG_ACTION_NONE:
1784 	case MSG_SIMPLE_Q_TAG:
1785 	default:
1786 		mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
1787 		break;
1788 	}
1789 	mpi_control |= (csio->priority << MPI2_SCSIIO_CONTROL_TASKPRI_SHIFT) &
1790 	    MPI2_SCSIIO_CONTROL_TASKPRI_MASK;
1791 	mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
1792 	req->Control = htole32(mpi_control);
1793 	if (MPS_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
1794 		mps_free_command(sc, cm);
1795 		mpssas_set_ccbstatus(ccb, CAM_LUN_INVALID);
1796 		xpt_done(ccb);
1797 		return;
1798 	}
1799 
1800 	if (csio->ccb_h.flags & CAM_CDB_POINTER)
1801 		bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
1802 	else
1803 		bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
1804 	req->IoFlags = htole16(csio->cdb_len);
1805 
1806 	/*
1807 	 * Check if EEDP is supported and enabled.  If it is then check if the
1808 	 * SCSI opcode could be using EEDP.  If so, make sure the LUN exists and
1809 	 * is formatted for EEDP support.  If all of this is true, set CDB up
1810 	 * for EEDP transfer.
1811 	 */
1812 	eedp_flags = op_code_prot[req->CDB.CDB32[0]];
1813 	if (sc->eedp_enabled && eedp_flags) {
1814 		SLIST_FOREACH(lun, &targ->luns, lun_link) {
1815 			if (lun->lun_id == csio->ccb_h.target_lun) {
1816 				break;
1817 			}
1818 		}
1819 
1820 		if ((lun != NULL) && (lun->eedp_formatted)) {
1821 			req->EEDPBlockSize = htole16(lun->eedp_block_size);
1822 			eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
1823 			    MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
1824 			    MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
1825 			req->EEDPFlags = htole16(eedp_flags);
1826 
1827 			/*
1828 			 * If CDB less than 32, fill in Primary Ref Tag with
1829 			 * low 4 bytes of LBA.  If CDB is 32, tag stuff is
1830 			 * already there.  Also, set protection bit.  FreeBSD
1831 			 * currently does not support CDBs bigger than 16, but
1832 			 * the code doesn't hurt, and will be here for the
1833 			 * future.
1834 			 */
1835 			if (csio->cdb_len != 32) {
1836 				lba_byte = (csio->cdb_len == 16) ? 6 : 2;
1837 				ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
1838 				    PrimaryReferenceTag;
1839 				for (i = 0; i < 4; i++) {
1840 					*ref_tag_addr =
1841 					    req->CDB.CDB32[lba_byte + i];
1842 					ref_tag_addr++;
1843 				}
1844 				req->CDB.EEDP32.PrimaryReferenceTag =
1845 					htole32(req->CDB.EEDP32.PrimaryReferenceTag);
1846 				req->CDB.EEDP32.PrimaryApplicationTagMask =
1847 				    0xFFFF;
1848 				req->CDB.CDB32[1] = (req->CDB.CDB32[1] & 0x1F) |
1849 				    0x20;
1850 			} else {
1851 				eedp_flags |=
1852 				    MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
1853 				req->EEDPFlags = htole16(eedp_flags);
1854 				req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
1855 				    0x1F) | 0x20;
1856 			}
1857 		}
1858 	}
1859 
1860 	cm->cm_length = csio->dxfer_len;
1861 	if (cm->cm_length != 0) {
1862 		cm->cm_data = ccb;
1863 		cm->cm_flags |= MPS_CM_FLAGS_USE_CCB;
1864 	} else {
1865 		cm->cm_data = NULL;
1866 	}
1867 	cm->cm_sge = &req->SGL;
1868 	cm->cm_sglsize = (32 - 24) * 4;
1869 	cm->cm_desc.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
1870 	cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle);
1871 	cm->cm_complete = mpssas_scsiio_complete;
1872 	cm->cm_complete_data = ccb;
1873 	cm->cm_targ = targ;
1874 	cm->cm_lun = csio->ccb_h.target_lun;
1875 	cm->cm_ccb = ccb;
1876 
1877 	/*
1878 	 * If HBA is a WD and the command is not for a retry, try to build a
1879 	 * direct I/O message. If failed, or the command is for a retry, send
1880 	 * the I/O to the IR volume itself.
1881 	 */
1882 	if (sc->WD_valid_config) {
1883 		if (ccb->ccb_h.sim_priv.entries[0].field == MPS_WD_RETRY) {
1884 			mpssas_direct_drive_io(sassc, cm, ccb);
1885 		} else {
1886 			mpssas_set_ccbstatus(ccb, CAM_REQ_INPROG);
1887 		}
1888 	}
1889 
1890 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
1891 	if (csio->bio != NULL)
1892 		biotrack(csio->bio, __func__);
1893 #endif
1894 	csio->ccb_h.qos.sim_data = sbinuptime();
1895 	callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
1896 	    mpssas_scsiio_timeout, cm, 0);
1897 
1898 	targ->issued++;
1899 	targ->outstanding++;
1900 	TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1901 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
1902 
1903 	mpssas_log_command(cm, MPS_XINFO, "%s cm %p ccb %p outstanding %u\n",
1904 	    __func__, cm, ccb, targ->outstanding);
1905 
1906 	mps_map_command(sc, cm);
1907 	return;
1908 }
1909 
1910 /**
1911  * mps_sc_failed_io_info - translated non-succesfull SCSI_IO request
1912  */
1913 static void
1914 mps_sc_failed_io_info(struct mps_softc *sc, struct ccb_scsiio *csio,
1915     Mpi2SCSIIOReply_t *mpi_reply)
1916 {
1917 	u32 response_info;
1918 	u8 *response_bytes;
1919 	u16 ioc_status = le16toh(mpi_reply->IOCStatus) &
1920 	    MPI2_IOCSTATUS_MASK;
1921 	u8 scsi_state = mpi_reply->SCSIState;
1922 	u8 scsi_status = mpi_reply->SCSIStatus;
1923 	u32 log_info = le32toh(mpi_reply->IOCLogInfo);
1924 	const char *desc_ioc_state, *desc_scsi_status;
1925 
1926 	if (log_info == 0x31170000)
1927 		return;
1928 
1929 	desc_ioc_state = mps_describe_table(mps_iocstatus_string,
1930 	    ioc_status);
1931 	desc_scsi_status = mps_describe_table(mps_scsi_status_string,
1932 	    scsi_status);
1933 
1934 	mps_dprint(sc, MPS_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n",
1935 	    le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status);
1936 
1937 	/*
1938 	 *We can add more detail about underflow data here
1939 	 * TO-DO
1940 	 */
1941 	mps_dprint(sc, MPS_XINFO, "\tscsi_status(%s)(0x%02x), "
1942 	    "scsi_state %b\n", desc_scsi_status, scsi_status,
1943 	    scsi_state, "\20" "\1AutosenseValid" "\2AutosenseFailed"
1944 	    "\3NoScsiStatus" "\4Terminated" "\5Response InfoValid");
1945 
1946 	if (sc->mps_debug & MPS_XINFO &&
1947 		scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
1948 		mps_dprint(sc, MPS_XINFO, "-> Sense Buffer Data : Start :\n");
1949 		scsi_sense_print(csio);
1950 		mps_dprint(sc, MPS_XINFO, "-> Sense Buffer Data : End :\n");
1951 	}
1952 
1953 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
1954 		response_info = le32toh(mpi_reply->ResponseInfo);
1955 		response_bytes = (u8 *)&response_info;
1956 		mps_dprint(sc, MPS_XINFO, "response code(0x%1x): %s\n",
1957 		    response_bytes[0],
1958 		    mps_describe_table(mps_scsi_taskmgmt_string,
1959 		    response_bytes[0]));
1960 	}
1961 }
1962 
1963 static void
1964 mpssas_scsiio_complete(struct mps_softc *sc, struct mps_command *cm)
1965 {
1966 	MPI2_SCSI_IO_REPLY *rep;
1967 	union ccb *ccb;
1968 	struct ccb_scsiio *csio;
1969 	struct mpssas_softc *sassc;
1970 	struct scsi_vpd_supported_page_list *vpd_list = NULL;
1971 	u8 *TLR_bits, TLR_on;
1972 	int dir = 0, i;
1973 	u16 alloc_len;
1974 	struct mpssas_target *target;
1975 	target_id_t target_id;
1976 
1977 	MPS_FUNCTRACE(sc);
1978 	mps_dprint(sc, MPS_TRACE,
1979 	    "cm %p SMID %u ccb %p reply %p outstanding %u\n", cm,
1980 	    cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
1981 	    cm->cm_targ->outstanding);
1982 
1983 	callout_stop(&cm->cm_callout);
1984 	mtx_assert(&sc->mps_mtx, MA_OWNED);
1985 
1986 	sassc = sc->sassc;
1987 	ccb = cm->cm_complete_data;
1988 	csio = &ccb->csio;
1989 	target_id = csio->ccb_h.target_id;
1990 	rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
1991 	/*
1992 	 * XXX KDM if the chain allocation fails, does it matter if we do
1993 	 * the sync and unload here?  It is simpler to do it in every case,
1994 	 * assuming it doesn't cause problems.
1995 	 */
1996 	if (cm->cm_data != NULL) {
1997 		if (cm->cm_flags & MPS_CM_FLAGS_DATAIN)
1998 			dir = BUS_DMASYNC_POSTREAD;
1999 		else if (cm->cm_flags & MPS_CM_FLAGS_DATAOUT)
2000 			dir = BUS_DMASYNC_POSTWRITE;
2001 		bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
2002 		bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2003 	}
2004 
2005 	cm->cm_targ->completed++;
2006 	cm->cm_targ->outstanding--;
2007 	TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
2008 	ccb->ccb_h.status &= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED);
2009 
2010 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
2011 	if (ccb->csio.bio != NULL)
2012 		biotrack(ccb->csio.bio, __func__);
2013 #endif
2014 
2015 	if (cm->cm_flags & MPS_CM_FLAGS_ON_RECOVERY) {
2016 		TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
2017 		KASSERT(cm->cm_state == MPS_CM_STATE_BUSY,
2018 		    ("Not busy for CM_FLAGS_TIMEDOUT: %u\n", cm->cm_state));
2019 		cm->cm_flags &= ~MPS_CM_FLAGS_ON_RECOVERY;
2020 		if (cm->cm_reply != NULL)
2021 			mpssas_log_command(cm, MPS_RECOVERY,
2022 			    "completed timedout cm %p ccb %p during recovery "
2023 			    "ioc %x scsi %x state %x xfer %u\n",
2024 			    cm, cm->cm_ccb, le16toh(rep->IOCStatus),
2025 			    rep->SCSIStatus, rep->SCSIState,
2026 			    le32toh(rep->TransferCount));
2027 		else
2028 			mpssas_log_command(cm, MPS_RECOVERY,
2029 			    "completed timedout cm %p ccb %p during recovery\n",
2030 			    cm, cm->cm_ccb);
2031 	} else if (cm->cm_targ->tm != NULL) {
2032 		if (cm->cm_reply != NULL)
2033 			mpssas_log_command(cm, MPS_RECOVERY,
2034 			    "completed cm %p ccb %p during recovery "
2035 			    "ioc %x scsi %x state %x xfer %u\n",
2036 			    cm, cm->cm_ccb, le16toh(rep->IOCStatus),
2037 			    rep->SCSIStatus, rep->SCSIState,
2038 			    le32toh(rep->TransferCount));
2039 		else
2040 			mpssas_log_command(cm, MPS_RECOVERY,
2041 			    "completed cm %p ccb %p during recovery\n",
2042 			    cm, cm->cm_ccb);
2043 	} else if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
2044 		mpssas_log_command(cm, MPS_RECOVERY,
2045 		    "reset completed cm %p ccb %p\n",
2046 		    cm, cm->cm_ccb);
2047 	}
2048 
2049 	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2050 		/*
2051 		 * We ran into an error after we tried to map the command,
2052 		 * so we're getting a callback without queueing the command
2053 		 * to the hardware.  So we set the status here, and it will
2054 		 * be retained below.  We'll go through the "fast path",
2055 		 * because there can be no reply when we haven't actually
2056 		 * gone out to the hardware.
2057 		 */
2058 		mpssas_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
2059 
2060 		/*
2061 		 * Currently the only error included in the mask is
2062 		 * MPS_CM_FLAGS_CHAIN_FAILED, which means we're out of
2063 		 * chain frames.  We need to freeze the queue until we get
2064 		 * a command that completed without this error, which will
2065 		 * hopefully have some chain frames attached that we can
2066 		 * use.  If we wanted to get smarter about it, we would
2067 		 * only unfreeze the queue in this condition when we're
2068 		 * sure that we're getting some chain frames back.  That's
2069 		 * probably unnecessary.
2070 		 */
2071 		if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
2072 			xpt_freeze_simq(sassc->sim, 1);
2073 			sassc->flags |= MPSSAS_QUEUE_FROZEN;
2074 			mps_dprint(sc, MPS_XINFO, "Error sending command, "
2075 				   "freezing SIM queue\n");
2076 		}
2077 	}
2078 
2079 	/*
2080 	 * If this is a Start Stop Unit command and it was issued by the driver
2081 	 * during shutdown, decrement the refcount to account for all of the
2082 	 * commands that were sent.  All SSU commands should be completed before
2083 	 * shutdown completes, meaning SSU_refcount will be 0 after SSU_started
2084 	 * is TRUE.
2085 	 */
2086 	if (sc->SSU_started && (csio->cdb_io.cdb_bytes[0] == START_STOP_UNIT)) {
2087 		mps_dprint(sc, MPS_INFO, "Decrementing SSU count.\n");
2088 		sc->SSU_refcount--;
2089 	}
2090 
2091 	/* Take the fast path to completion */
2092 	if (cm->cm_reply == NULL) {
2093 		if (mpssas_get_ccbstatus(ccb) == CAM_REQ_INPROG) {
2094 			if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0)
2095 				mpssas_set_ccbstatus(ccb, CAM_SCSI_BUS_RESET);
2096 			else {
2097 				mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2098 				ccb->csio.scsi_status = SCSI_STATUS_OK;
2099 			}
2100 			if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2101 				ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2102 				sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2103 				mps_dprint(sc, MPS_XINFO,
2104 				    "Unfreezing SIM queue\n");
2105 			}
2106 		}
2107 
2108 		/*
2109 		 * There are two scenarios where the status won't be
2110 		 * CAM_REQ_CMP.  The first is if MPS_CM_FLAGS_ERROR_MASK is
2111 		 * set, the second is in the MPS_FLAGS_DIAGRESET above.
2112 		 */
2113 		if (mpssas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2114 			/*
2115 			 * Freeze the dev queue so that commands are
2116 			 * executed in the correct order after error
2117 			 * recovery.
2118 			 */
2119 			ccb->ccb_h.status |= CAM_DEV_QFRZN;
2120 			xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2121 		}
2122 		mps_free_command(sc, cm);
2123 		xpt_done(ccb);
2124 		return;
2125 	}
2126 
2127 	mpssas_log_command(cm, MPS_XINFO,
2128 	    "ioc %x scsi %x state %x xfer %u\n",
2129 	    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2130 	    le32toh(rep->TransferCount));
2131 
2132 	/*
2133 	 * If this is a Direct Drive I/O, reissue the I/O to the original IR
2134 	 * Volume if an error occurred (normal I/O retry).  Use the original
2135 	 * CCB, but set a flag that this will be a retry so that it's sent to
2136 	 * the original volume.  Free the command but reuse the CCB.
2137 	 */
2138 	if (cm->cm_flags & MPS_CM_FLAGS_DD_IO) {
2139 		mps_free_command(sc, cm);
2140 		ccb->ccb_h.sim_priv.entries[0].field = MPS_WD_RETRY;
2141 		mpssas_action_scsiio(sassc, ccb);
2142 		return;
2143 	} else
2144 		ccb->ccb_h.sim_priv.entries[0].field = 0;
2145 
2146 	switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) {
2147 	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2148 		csio->resid = cm->cm_length - le32toh(rep->TransferCount);
2149 		/* FALLTHROUGH */
2150 	case MPI2_IOCSTATUS_SUCCESS:
2151 	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2152 
2153 		if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
2154 		    MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
2155 			mpssas_log_command(cm, MPS_XINFO, "recovered error\n");
2156 
2157 		/* Completion failed at the transport level. */
2158 		if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
2159 		    MPI2_SCSI_STATE_TERMINATED)) {
2160 			mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2161 			break;
2162 		}
2163 
2164 		/* In a modern packetized environment, an autosense failure
2165 		 * implies that there's not much else that can be done to
2166 		 * recover the command.
2167 		 */
2168 		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
2169 			mpssas_set_ccbstatus(ccb, CAM_AUTOSENSE_FAIL);
2170 			break;
2171 		}
2172 
2173 		/*
2174 		 * CAM doesn't care about SAS Response Info data, but if this is
2175 		 * the state check if TLR should be done.  If not, clear the
2176 		 * TLR_bits for the target.
2177 		 */
2178 		if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
2179 		    ((le32toh(rep->ResponseInfo) &
2180 		    MPI2_SCSI_RI_MASK_REASONCODE) ==
2181 		    MPS_SCSI_RI_INVALID_FRAME)) {
2182 			sc->mapping_table[target_id].TLR_bits =
2183 			    (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2184 		}
2185 
2186 		/*
2187 		 * Intentionally override the normal SCSI status reporting
2188 		 * for these two cases.  These are likely to happen in a
2189 		 * multi-initiator environment, and we want to make sure that
2190 		 * CAM retries these commands rather than fail them.
2191 		 */
2192 		if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
2193 		    (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
2194 			mpssas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2195 			break;
2196 		}
2197 
2198 		/* Handle normal status and sense */
2199 		csio->scsi_status = rep->SCSIStatus;
2200 		if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
2201 			mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2202 		else
2203 			mpssas_set_ccbstatus(ccb, CAM_SCSI_STATUS_ERROR);
2204 
2205 		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2206 			int sense_len, returned_sense_len;
2207 
2208 			returned_sense_len = min(le32toh(rep->SenseCount),
2209 			    sizeof(struct scsi_sense_data));
2210 			if (returned_sense_len < ccb->csio.sense_len)
2211 				ccb->csio.sense_resid = ccb->csio.sense_len -
2212 					returned_sense_len;
2213 			else
2214 				ccb->csio.sense_resid = 0;
2215 
2216 			sense_len = min(returned_sense_len,
2217 			    ccb->csio.sense_len - ccb->csio.sense_resid);
2218 			bzero(&ccb->csio.sense_data,
2219 			      sizeof(ccb->csio.sense_data));
2220 			bcopy(cm->cm_sense, &ccb->csio.sense_data, sense_len);
2221 			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2222 		}
2223 
2224 		/*
2225 		 * Check if this is an INQUIRY command.  If it's a VPD inquiry,
2226 		 * and it's page code 0 (Supported Page List), and there is
2227 		 * inquiry data, and this is for a sequential access device, and
2228 		 * the device is an SSP target, and TLR is supported by the
2229 		 * controller, turn the TLR_bits value ON if page 0x90 is
2230 		 * supported.
2231 		 */
2232 		if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
2233 		    (csio->cdb_io.cdb_bytes[1] & SI_EVPD) &&
2234 		    (csio->cdb_io.cdb_bytes[2] == SVPD_SUPPORTED_PAGE_LIST) &&
2235 		    ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) &&
2236 		    (csio->data_ptr != NULL) &&
2237 		    ((csio->data_ptr[0] & 0x1f) == T_SEQUENTIAL) &&
2238 		    (sc->control_TLR) &&
2239 		    (sc->mapping_table[target_id].device_info &
2240 		    MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
2241 			vpd_list = (struct scsi_vpd_supported_page_list *)
2242 			    csio->data_ptr;
2243 			TLR_bits = &sc->mapping_table[target_id].TLR_bits;
2244 			*TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2245 			TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
2246 			alloc_len = ((u16)csio->cdb_io.cdb_bytes[3] << 8) +
2247 			    csio->cdb_io.cdb_bytes[4];
2248 			alloc_len -= csio->resid;
2249 			for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
2250 				if (vpd_list->list[i] == 0x90) {
2251 					*TLR_bits = TLR_on;
2252 					break;
2253 				}
2254 			}
2255 		}
2256 
2257 		/*
2258 		 * If this is a SATA direct-access end device, mark it so that
2259 		 * a SCSI StartStopUnit command will be sent to it when the
2260 		 * driver is being shutdown.
2261 		 */
2262 		if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
2263 		    ((csio->data_ptr[0] & 0x1f) == T_DIRECT) &&
2264 		    (sc->mapping_table[target_id].device_info &
2265 		    MPI2_SAS_DEVICE_INFO_SATA_DEVICE) &&
2266 		    ((sc->mapping_table[target_id].device_info &
2267 		    MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) ==
2268 		    MPI2_SAS_DEVICE_INFO_END_DEVICE)) {
2269 			target = &sassc->targets[target_id];
2270 			target->supports_SSU = TRUE;
2271 			mps_dprint(sc, MPS_XINFO, "Target %d supports SSU\n",
2272 			    target_id);
2273 		}
2274 		break;
2275 	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2276 	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2277 		/*
2278 		 * If devinfo is 0 this will be a volume.  In that case don't
2279 		 * tell CAM that the volume is not there.  We want volumes to
2280 		 * be enumerated until they are deleted/removed, not just
2281 		 * failed.
2282 		 */
2283 		if (cm->cm_targ->devinfo == 0)
2284 			mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2285 		else
2286 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2287 		break;
2288 	case MPI2_IOCSTATUS_INVALID_SGL:
2289 		mps_print_scsiio_cmd(sc, cm);
2290 		mpssas_set_ccbstatus(ccb, CAM_UNREC_HBA_ERROR);
2291 		break;
2292 	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2293 		/*
2294 		 * This is one of the responses that comes back when an I/O
2295 		 * has been aborted.  If it is because of a timeout that we
2296 		 * initiated, just set the status to CAM_CMD_TIMEOUT.
2297 		 * Otherwise set it to CAM_REQ_ABORTED.  The effect on the
2298 		 * command is the same (it gets retried, subject to the
2299 		 * retry counter), the only difference is what gets printed
2300 		 * on the console.
2301 		 */
2302 		if (cm->cm_flags & MPS_CM_FLAGS_TIMEDOUT)
2303 			mpssas_set_ccbstatus(ccb, CAM_CMD_TIMEOUT);
2304 		else
2305 			mpssas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2306 		break;
2307 	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2308 		/* resid is ignored for this condition */
2309 		csio->resid = 0;
2310 		mpssas_set_ccbstatus(ccb, CAM_DATA_RUN_ERR);
2311 		break;
2312 	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2313 	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2314 		/*
2315 		 * These can sometimes be transient transport-related
2316 		 * errors, and sometimes persistent drive-related errors.
2317 		 * We used to retry these without decrementing the retry
2318 		 * count by returning CAM_REQUEUE_REQ.  Unfortunately, if
2319 		 * we hit a persistent drive problem that returns one of
2320 		 * these error codes, we would retry indefinitely.  So,
2321 		 * return CAM_REQ_CMP_ERROR so that we decrement the retry
2322 		 * count and avoid infinite retries.  We're taking the
2323 		 * potential risk of flagging false failures in the event
2324 		 * of a topology-related error (e.g. a SAS expander problem
2325 		 * causes a command addressed to a drive to fail), but
2326 		 * avoiding getting into an infinite retry loop. However,
2327 		 * if we get them while were moving a device, we should
2328 		 * fail the request as 'not there' because the device
2329 		 * is effectively gone.
2330 		 */
2331 		if (cm->cm_targ->flags & MPSSAS_TARGET_INREMOVAL)
2332 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2333 		else
2334 			mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2335 		mps_dprint(sc, MPS_INFO,
2336 		    "Controller reported %s tgt %u SMID %u loginfo %x%s\n",
2337 		    mps_describe_table(mps_iocstatus_string,
2338 		    le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK),
2339 		    target_id, cm->cm_desc.Default.SMID,
2340 		    le32toh(rep->IOCLogInfo),
2341 		    (cm->cm_targ->flags & MPSSAS_TARGET_INREMOVAL) ? " departing" : "");
2342 		mps_dprint(sc, MPS_XINFO,
2343 		    "SCSIStatus %x SCSIState %x xfercount %u\n",
2344 		    rep->SCSIStatus, rep->SCSIState,
2345 		    le32toh(rep->TransferCount));
2346 		break;
2347 	case MPI2_IOCSTATUS_INVALID_FUNCTION:
2348 	case MPI2_IOCSTATUS_INTERNAL_ERROR:
2349 	case MPI2_IOCSTATUS_INVALID_VPID:
2350 	case MPI2_IOCSTATUS_INVALID_FIELD:
2351 	case MPI2_IOCSTATUS_INVALID_STATE:
2352 	case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2353 	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2354 	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2355 	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2356 	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2357 	default:
2358 		mpssas_log_command(cm, MPS_XINFO,
2359 		    "completed ioc %x loginfo %x scsi %x state %x xfer %u\n",
2360 		    le16toh(rep->IOCStatus), le32toh(rep->IOCLogInfo),
2361 		    rep->SCSIStatus, rep->SCSIState,
2362 		    le32toh(rep->TransferCount));
2363 		csio->resid = cm->cm_length;
2364 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2365 		break;
2366 	}
2367 
2368 	mps_sc_failed_io_info(sc,csio,rep);
2369 
2370 	if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2371 		ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2372 		sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2373 		mps_dprint(sc, MPS_XINFO, "Command completed, "
2374 		    "unfreezing SIM queue\n");
2375 	}
2376 
2377 	if (mpssas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2378 		ccb->ccb_h.status |= CAM_DEV_QFRZN;
2379 		xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2380 	}
2381 
2382 	/*
2383 	 * Check to see if we're removing the device. If so, and this is the
2384 	 * last command on the queue, proceed with the deferred removal of the
2385 	 * device.  Note, for removing a volume, this won't trigger because
2386 	 * pending_remove_tm will be NULL.
2387 	 */
2388 	if (cm->cm_targ->flags & MPSSAS_TARGET_INREMOVAL) {
2389 		if (TAILQ_FIRST(&cm->cm_targ->commands) == NULL &&
2390 		    cm->cm_targ->pending_remove_tm != NULL) {
2391 			mps_dprint(sc, MPS_INFO, "Last pending command complete: starting remove_device\n");
2392 			mps_map_command(sc, cm->cm_targ->pending_remove_tm);
2393 			cm->cm_targ->pending_remove_tm = NULL;
2394 		}
2395 	}
2396 
2397 	mps_free_command(sc, cm);
2398 	xpt_done(ccb);
2399 }
2400 
2401 /* All Request reached here are Endian safe */
2402 static void
2403 mpssas_direct_drive_io(struct mpssas_softc *sassc, struct mps_command *cm,
2404     union ccb *ccb) {
2405 	pMpi2SCSIIORequest_t	pIO_req;
2406 	struct mps_softc	*sc = sassc->sc;
2407 	uint64_t		virtLBA;
2408 	uint32_t		physLBA, stripe_offset, stripe_unit;
2409 	uint32_t		io_size, column;
2410 	uint8_t			*ptrLBA, lba_idx, physLBA_byte, *CDB;
2411 
2412 	/*
2413 	 * If this is a valid SCSI command (Read6, Read10, Read16, Write6,
2414 	 * Write10, or Write16), build a direct I/O message.  Otherwise, the I/O
2415 	 * will be sent to the IR volume itself.  Since Read6 and Write6 are a
2416 	 * bit different than the 10/16 CDBs, handle them separately.
2417 	 */
2418 	pIO_req = (pMpi2SCSIIORequest_t)cm->cm_req;
2419 	CDB = pIO_req->CDB.CDB32;
2420 
2421 	/*
2422 	 * Handle 6 byte CDBs.
2423 	 */
2424 	if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_6) ||
2425 	    (CDB[0] == WRITE_6))) {
2426 		/*
2427 		 * Get the transfer size in blocks.
2428 		 */
2429 		io_size = (cm->cm_length >> sc->DD_block_exponent);
2430 
2431 		/*
2432 		 * Get virtual LBA given in the CDB.
2433 		 */
2434 		virtLBA = ((uint64_t)(CDB[1] & 0x1F) << 16) |
2435 		    ((uint64_t)CDB[2] << 8) | (uint64_t)CDB[3];
2436 
2437 		/*
2438 		 * Check that LBA range for I/O does not exceed volume's
2439 		 * MaxLBA.
2440 		 */
2441 		if ((virtLBA + (uint64_t)io_size - 1) <=
2442 		    sc->DD_max_lba) {
2443 			/*
2444 			 * Check if the I/O crosses a stripe boundary.  If not,
2445 			 * translate the virtual LBA to a physical LBA and set
2446 			 * the DevHandle for the PhysDisk to be used.  If it
2447 			 * does cross a boundary, do normal I/O.  To get the
2448 			 * right DevHandle to use, get the map number for the
2449 			 * column, then use that map number to look up the
2450 			 * DevHandle of the PhysDisk.
2451 			 */
2452 			stripe_offset = (uint32_t)virtLBA &
2453 			    (sc->DD_stripe_size - 1);
2454 			if ((stripe_offset + io_size) <= sc->DD_stripe_size) {
2455 				physLBA = (uint32_t)virtLBA >>
2456 				    sc->DD_stripe_exponent;
2457 				stripe_unit = physLBA / sc->DD_num_phys_disks;
2458 				column = physLBA % sc->DD_num_phys_disks;
2459 				pIO_req->DevHandle =
2460 				    htole16(sc->DD_column_map[column].dev_handle);
2461 				/* ???? Is this endian safe*/
2462 				cm->cm_desc.SCSIIO.DevHandle =
2463 				    pIO_req->DevHandle;
2464 
2465 				physLBA = (stripe_unit <<
2466 				    sc->DD_stripe_exponent) + stripe_offset;
2467 				ptrLBA = &pIO_req->CDB.CDB32[1];
2468 				physLBA_byte = (uint8_t)(physLBA >> 16);
2469 				*ptrLBA = physLBA_byte;
2470 				ptrLBA = &pIO_req->CDB.CDB32[2];
2471 				physLBA_byte = (uint8_t)(physLBA >> 8);
2472 				*ptrLBA = physLBA_byte;
2473 				ptrLBA = &pIO_req->CDB.CDB32[3];
2474 				physLBA_byte = (uint8_t)physLBA;
2475 				*ptrLBA = physLBA_byte;
2476 
2477 				/*
2478 				 * Set flag that Direct Drive I/O is
2479 				 * being done.
2480 				 */
2481 				cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2482 			}
2483 		}
2484 		return;
2485 	}
2486 
2487 	/*
2488 	 * Handle 10, 12 or 16 byte CDBs.
2489 	 */
2490 	if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_10) ||
2491 	    (CDB[0] == WRITE_10) || (CDB[0] == READ_16) ||
2492 	    (CDB[0] == WRITE_16) || (CDB[0] == READ_12) ||
2493 	    (CDB[0] == WRITE_12))) {
2494 		/*
2495 		 * For 16-byte CDB's, verify that the upper 4 bytes of the CDB
2496 		 * are 0.  If not, this is accessing beyond 2TB so handle it in
2497 		 * the else section.  10-byte and 12-byte CDB's are OK.
2498 		 * FreeBSD sends very rare 12 byte READ/WRITE, but driver is
2499 		 * ready to accept 12byte CDB for Direct IOs.
2500 		 */
2501 		if ((CDB[0] == READ_10 || CDB[0] == WRITE_10) ||
2502 		    (CDB[0] == READ_12 || CDB[0] == WRITE_12) ||
2503 		    !(CDB[2] | CDB[3] | CDB[4] | CDB[5])) {
2504 			/*
2505 			 * Get the transfer size in blocks.
2506 			 */
2507 			io_size = (cm->cm_length >> sc->DD_block_exponent);
2508 
2509 			/*
2510 			 * Get virtual LBA.  Point to correct lower 4 bytes of
2511 			 * LBA in the CDB depending on command.
2512 			 */
2513 			lba_idx = ((CDB[0] == READ_12) ||
2514 				(CDB[0] == WRITE_12) ||
2515 				(CDB[0] == READ_10) ||
2516 				(CDB[0] == WRITE_10))? 2 : 6;
2517 			virtLBA = ((uint64_t)CDB[lba_idx] << 24) |
2518 			    ((uint64_t)CDB[lba_idx + 1] << 16) |
2519 			    ((uint64_t)CDB[lba_idx + 2] << 8) |
2520 			    (uint64_t)CDB[lba_idx + 3];
2521 
2522 			/*
2523 			 * Check that LBA range for I/O does not exceed volume's
2524 			 * MaxLBA.
2525 			 */
2526 			if ((virtLBA + (uint64_t)io_size - 1) <=
2527 			    sc->DD_max_lba) {
2528 				/*
2529 				 * Check if the I/O crosses a stripe boundary.
2530 				 * If not, translate the virtual LBA to a
2531 				 * physical LBA and set the DevHandle for the
2532 				 * PhysDisk to be used.  If it does cross a
2533 				 * boundary, do normal I/O.  To get the right
2534 				 * DevHandle to use, get the map number for the
2535 				 * column, then use that map number to look up
2536 				 * the DevHandle of the PhysDisk.
2537 				 */
2538 				stripe_offset = (uint32_t)virtLBA &
2539 				    (sc->DD_stripe_size - 1);
2540 				if ((stripe_offset + io_size) <=
2541 				    sc->DD_stripe_size) {
2542 					physLBA = (uint32_t)virtLBA >>
2543 					    sc->DD_stripe_exponent;
2544 					stripe_unit = physLBA /
2545 					    sc->DD_num_phys_disks;
2546 					column = physLBA %
2547 					    sc->DD_num_phys_disks;
2548 					pIO_req->DevHandle =
2549 					    htole16(sc->DD_column_map[column].
2550 					    dev_handle);
2551 					cm->cm_desc.SCSIIO.DevHandle =
2552 					    pIO_req->DevHandle;
2553 
2554 					physLBA = (stripe_unit <<
2555 					    sc->DD_stripe_exponent) +
2556 					    stripe_offset;
2557 					ptrLBA =
2558 					    &pIO_req->CDB.CDB32[lba_idx];
2559 					physLBA_byte = (uint8_t)(physLBA >> 24);
2560 					*ptrLBA = physLBA_byte;
2561 					ptrLBA =
2562 					    &pIO_req->CDB.CDB32[lba_idx + 1];
2563 					physLBA_byte = (uint8_t)(physLBA >> 16);
2564 					*ptrLBA = physLBA_byte;
2565 					ptrLBA =
2566 					    &pIO_req->CDB.CDB32[lba_idx + 2];
2567 					physLBA_byte = (uint8_t)(physLBA >> 8);
2568 					*ptrLBA = physLBA_byte;
2569 					ptrLBA =
2570 					    &pIO_req->CDB.CDB32[lba_idx + 3];
2571 					physLBA_byte = (uint8_t)physLBA;
2572 					*ptrLBA = physLBA_byte;
2573 
2574 					/*
2575 					 * Set flag that Direct Drive I/O is
2576 					 * being done.
2577 					 */
2578 					cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2579 				}
2580 			}
2581 		} else {
2582 			/*
2583 			 * 16-byte CDB and the upper 4 bytes of the CDB are not
2584 			 * 0.  Get the transfer size in blocks.
2585 			 */
2586 			io_size = (cm->cm_length >> sc->DD_block_exponent);
2587 
2588 			/*
2589 			 * Get virtual LBA.
2590 			 */
2591 			virtLBA = ((uint64_t)CDB[2] << 54) |
2592 			    ((uint64_t)CDB[3] << 48) |
2593 			    ((uint64_t)CDB[4] << 40) |
2594 			    ((uint64_t)CDB[5] << 32) |
2595 			    ((uint64_t)CDB[6] << 24) |
2596 			    ((uint64_t)CDB[7] << 16) |
2597 			    ((uint64_t)CDB[8] << 8) |
2598 			    (uint64_t)CDB[9];
2599 
2600 			/*
2601 			 * Check that LBA range for I/O does not exceed volume's
2602 			 * MaxLBA.
2603 			 */
2604 			if ((virtLBA + (uint64_t)io_size - 1) <=
2605 			    sc->DD_max_lba) {
2606 				/*
2607 				 * Check if the I/O crosses a stripe boundary.
2608 				 * If not, translate the virtual LBA to a
2609 				 * physical LBA and set the DevHandle for the
2610 				 * PhysDisk to be used.  If it does cross a
2611 				 * boundary, do normal I/O.  To get the right
2612 				 * DevHandle to use, get the map number for the
2613 				 * column, then use that map number to look up
2614 				 * the DevHandle of the PhysDisk.
2615 				 */
2616 				stripe_offset = (uint32_t)virtLBA &
2617 				    (sc->DD_stripe_size - 1);
2618 				if ((stripe_offset + io_size) <=
2619 				    sc->DD_stripe_size) {
2620 					physLBA = (uint32_t)(virtLBA >>
2621 					    sc->DD_stripe_exponent);
2622 					stripe_unit = physLBA /
2623 					    sc->DD_num_phys_disks;
2624 					column = physLBA %
2625 					    sc->DD_num_phys_disks;
2626 					pIO_req->DevHandle =
2627 					    htole16(sc->DD_column_map[column].
2628 					    dev_handle);
2629 					cm->cm_desc.SCSIIO.DevHandle =
2630 					    pIO_req->DevHandle;
2631 
2632 					physLBA = (stripe_unit <<
2633 					    sc->DD_stripe_exponent) +
2634 					    stripe_offset;
2635 
2636 					/*
2637 					 * Set upper 4 bytes of LBA to 0.  We
2638 					 * assume that the phys disks are less
2639 					 * than 2 TB's in size.  Then, set the
2640 					 * lower 4 bytes.
2641 					 */
2642 					pIO_req->CDB.CDB32[2] = 0;
2643 					pIO_req->CDB.CDB32[3] = 0;
2644 					pIO_req->CDB.CDB32[4] = 0;
2645 					pIO_req->CDB.CDB32[5] = 0;
2646 					ptrLBA = &pIO_req->CDB.CDB32[6];
2647 					physLBA_byte = (uint8_t)(physLBA >> 24);
2648 					*ptrLBA = physLBA_byte;
2649 					ptrLBA = &pIO_req->CDB.CDB32[7];
2650 					physLBA_byte = (uint8_t)(physLBA >> 16);
2651 					*ptrLBA = physLBA_byte;
2652 					ptrLBA = &pIO_req->CDB.CDB32[8];
2653 					physLBA_byte = (uint8_t)(physLBA >> 8);
2654 					*ptrLBA = physLBA_byte;
2655 					ptrLBA = &pIO_req->CDB.CDB32[9];
2656 					physLBA_byte = (uint8_t)physLBA;
2657 					*ptrLBA = physLBA_byte;
2658 
2659 					/*
2660 					 * Set flag that Direct Drive I/O is
2661 					 * being done.
2662 					 */
2663 					cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2664 				}
2665 			}
2666 		}
2667 	}
2668 }
2669 
2670 static void
2671 mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm)
2672 {
2673 	MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2674 	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2675 	uint64_t sasaddr;
2676 	union ccb *ccb;
2677 
2678 	ccb = cm->cm_complete_data;
2679 
2680 	/*
2681 	 * Currently there should be no way we can hit this case.  It only
2682 	 * happens when we have a failure to allocate chain frames, and SMP
2683 	 * commands require two S/G elements only.  That should be handled
2684 	 * in the standard request size.
2685 	 */
2686 	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2687 		mps_dprint(sc, MPS_ERROR,"%s: cm_flags = %#x on SMP request!\n",
2688 			   __func__, cm->cm_flags);
2689 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2690 		goto bailout;
2691         }
2692 
2693 	rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
2694 	if (rpl == NULL) {
2695 		mps_dprint(sc, MPS_ERROR, "%s: NULL cm_reply!\n", __func__);
2696 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2697 		goto bailout;
2698 	}
2699 
2700 	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2701 	sasaddr = le32toh(req->SASAddress.Low);
2702 	sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
2703 
2704 	if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
2705 	    MPI2_IOCSTATUS_SUCCESS ||
2706 	    rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
2707 		mps_dprint(sc, MPS_XINFO, "%s: IOCStatus %04x SASStatus %02x\n",
2708 		    __func__, le16toh(rpl->IOCStatus), rpl->SASStatus);
2709 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2710 		goto bailout;
2711 	}
2712 
2713 	mps_dprint(sc, MPS_XINFO, "%s: SMP request to SAS address "
2714 		   "%#jx completed successfully\n", __func__,
2715 		   (uintmax_t)sasaddr);
2716 
2717 	if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
2718 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2719 	else
2720 		mpssas_set_ccbstatus(ccb, CAM_SMP_STATUS_ERROR);
2721 
2722 bailout:
2723 	/*
2724 	 * We sync in both directions because we had DMAs in the S/G list
2725 	 * in both directions.
2726 	 */
2727 	bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
2728 			BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2729 	bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2730 	mps_free_command(sc, cm);
2731 	xpt_done(ccb);
2732 }
2733 
2734 static void
2735 mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb, uint64_t sasaddr)
2736 {
2737 	struct mps_command *cm;
2738 	uint8_t *request, *response;
2739 	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2740 	struct mps_softc *sc;
2741 	int error;
2742 
2743 	sc = sassc->sc;
2744 	error = 0;
2745 
2746 	/*
2747 	 * XXX We don't yet support physical addresses here.
2748 	 */
2749 	switch ((ccb->ccb_h.flags & CAM_DATA_MASK)) {
2750 	case CAM_DATA_PADDR:
2751 	case CAM_DATA_SG_PADDR:
2752 		mps_dprint(sc, MPS_ERROR,
2753 			   "%s: physical addresses not supported\n", __func__);
2754 		mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2755 		xpt_done(ccb);
2756 		return;
2757 	case CAM_DATA_SG:
2758 		/*
2759 		 * The chip does not support more than one buffer for the
2760 		 * request or response.
2761 		 */
2762 	 	if ((ccb->smpio.smp_request_sglist_cnt > 1)
2763 		  || (ccb->smpio.smp_response_sglist_cnt > 1)) {
2764 			mps_dprint(sc, MPS_ERROR,
2765 				   "%s: multiple request or response "
2766 				   "buffer segments not supported for SMP\n",
2767 				   __func__);
2768 			mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2769 			xpt_done(ccb);
2770 			return;
2771 		}
2772 
2773 		/*
2774 		 * The CAM_SCATTER_VALID flag was originally implemented
2775 		 * for the XPT_SCSI_IO CCB, which only has one data pointer.
2776 		 * We have two.  So, just take that flag to mean that we
2777 		 * might have S/G lists, and look at the S/G segment count
2778 		 * to figure out whether that is the case for each individual
2779 		 * buffer.
2780 		 */
2781 		if (ccb->smpio.smp_request_sglist_cnt != 0) {
2782 			bus_dma_segment_t *req_sg;
2783 
2784 			req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
2785 			request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
2786 		} else
2787 			request = ccb->smpio.smp_request;
2788 
2789 		if (ccb->smpio.smp_response_sglist_cnt != 0) {
2790 			bus_dma_segment_t *rsp_sg;
2791 
2792 			rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
2793 			response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
2794 		} else
2795 			response = ccb->smpio.smp_response;
2796 		break;
2797 	case CAM_DATA_VADDR:
2798 		request = ccb->smpio.smp_request;
2799 		response = ccb->smpio.smp_response;
2800 		break;
2801 	default:
2802 		mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2803 		xpt_done(ccb);
2804 		return;
2805 	}
2806 
2807 	cm = mps_alloc_command(sc);
2808 	if (cm == NULL) {
2809 		mps_dprint(sc, MPS_ERROR,
2810 		    "%s: cannot allocate command\n", __func__);
2811 		mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
2812 		xpt_done(ccb);
2813 		return;
2814 	}
2815 
2816 	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2817 	bzero(req, sizeof(*req));
2818 	req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
2819 
2820 	/* Allow the chip to use any route to this SAS address. */
2821 	req->PhysicalPort = 0xff;
2822 
2823 	req->RequestDataLength = htole16(ccb->smpio.smp_request_len);
2824 	req->SGLFlags =
2825 	    MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
2826 
2827 	mps_dprint(sc, MPS_XINFO, "%s: sending SMP request to SAS "
2828 	    "address %#jx\n", __func__, (uintmax_t)sasaddr);
2829 
2830 	mpi_init_sge(cm, req, &req->SGL);
2831 
2832 	/*
2833 	 * Set up a uio to pass into mps_map_command().  This allows us to
2834 	 * do one map command, and one busdma call in there.
2835 	 */
2836 	cm->cm_uio.uio_iov = cm->cm_iovec;
2837 	cm->cm_uio.uio_iovcnt = 2;
2838 	cm->cm_uio.uio_segflg = UIO_SYSSPACE;
2839 
2840 	/*
2841 	 * The read/write flag isn't used by busdma, but set it just in
2842 	 * case.  This isn't exactly accurate, either, since we're going in
2843 	 * both directions.
2844 	 */
2845 	cm->cm_uio.uio_rw = UIO_WRITE;
2846 
2847 	cm->cm_iovec[0].iov_base = request;
2848 	cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength);
2849 	cm->cm_iovec[1].iov_base = response;
2850 	cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
2851 
2852 	cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
2853 			       cm->cm_iovec[1].iov_len;
2854 
2855 	/*
2856 	 * Trigger a warning message in mps_data_cb() for the user if we
2857 	 * wind up exceeding two S/G segments.  The chip expects one
2858 	 * segment for the request and another for the response.
2859 	 */
2860 	cm->cm_max_segs = 2;
2861 
2862 	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
2863 	cm->cm_complete = mpssas_smpio_complete;
2864 	cm->cm_complete_data = ccb;
2865 
2866 	/*
2867 	 * Tell the mapping code that we're using a uio, and that this is
2868 	 * an SMP passthrough request.  There is a little special-case
2869 	 * logic there (in mps_data_cb()) to handle the bidirectional
2870 	 * transfer.
2871 	 */
2872 	cm->cm_flags |= MPS_CM_FLAGS_USE_UIO | MPS_CM_FLAGS_SMP_PASS |
2873 			MPS_CM_FLAGS_DATAIN | MPS_CM_FLAGS_DATAOUT;
2874 
2875 	/* The chip data format is little endian. */
2876 	req->SASAddress.High = htole32(sasaddr >> 32);
2877 	req->SASAddress.Low = htole32(sasaddr);
2878 
2879 	/*
2880 	 * XXX Note that we don't have a timeout/abort mechanism here.
2881 	 * From the manual, it looks like task management requests only
2882 	 * work for SCSI IO and SATA passthrough requests.  We may need to
2883 	 * have a mechanism to retry requests in the event of a chip reset
2884 	 * at least.  Hopefully the chip will insure that any errors short
2885 	 * of that are relayed back to the driver.
2886 	 */
2887 	error = mps_map_command(sc, cm);
2888 	if ((error != 0) && (error != EINPROGRESS)) {
2889 		mps_dprint(sc, MPS_ERROR,
2890 			   "%s: error %d returned from mps_map_command()\n",
2891 			   __func__, error);
2892 		goto bailout_error;
2893 	}
2894 
2895 	return;
2896 
2897 bailout_error:
2898 	mps_free_command(sc, cm);
2899 	mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
2900 	xpt_done(ccb);
2901 	return;
2902 
2903 }
2904 
2905 static void
2906 mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb)
2907 {
2908 	struct mps_softc *sc;
2909 	struct mpssas_target *targ;
2910 	uint64_t sasaddr = 0;
2911 
2912 	sc = sassc->sc;
2913 
2914 	/*
2915 	 * Make sure the target exists.
2916 	 */
2917 	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
2918 	    ("Target %d out of bounds in XPT_SMP_IO\n", ccb->ccb_h.target_id));
2919 	targ = &sassc->targets[ccb->ccb_h.target_id];
2920 	if (targ->handle == 0x0) {
2921 		mps_dprint(sc, MPS_ERROR,
2922 			   "%s: target %d does not exist!\n", __func__,
2923 			   ccb->ccb_h.target_id);
2924 		mpssas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
2925 		xpt_done(ccb);
2926 		return;
2927 	}
2928 
2929 	/*
2930 	 * If this device has an embedded SMP target, we'll talk to it
2931 	 * directly.
2932 	 * figure out what the expander's address is.
2933 	 */
2934 	if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
2935 		sasaddr = targ->sasaddr;
2936 
2937 	/*
2938 	 * If we don't have a SAS address for the expander yet, try
2939 	 * grabbing it from the page 0x83 information cached in the
2940 	 * transport layer for this target.  LSI expanders report the
2941 	 * expander SAS address as the port-associated SAS address in
2942 	 * Inquiry VPD page 0x83.  Maxim expanders don't report it in page
2943 	 * 0x83.
2944 	 *
2945 	 * XXX KDM disable this for now, but leave it commented out so that
2946 	 * it is obvious that this is another possible way to get the SAS
2947 	 * address.
2948 	 *
2949 	 * The parent handle method below is a little more reliable, and
2950 	 * the other benefit is that it works for devices other than SES
2951 	 * devices.  So you can send a SMP request to a da(4) device and it
2952 	 * will get routed to the expander that device is attached to.
2953 	 * (Assuming the da(4) device doesn't contain an SMP target...)
2954 	 */
2955 #if 0
2956 	if (sasaddr == 0)
2957 		sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
2958 #endif
2959 
2960 	/*
2961 	 * If we still don't have a SAS address for the expander, look for
2962 	 * the parent device of this device, which is probably the expander.
2963 	 */
2964 	if (sasaddr == 0) {
2965 #ifdef OLD_MPS_PROBE
2966 		struct mpssas_target *parent_target;
2967 #endif
2968 
2969 		if (targ->parent_handle == 0x0) {
2970 			mps_dprint(sc, MPS_ERROR,
2971 				   "%s: handle %d does not have a valid "
2972 				   "parent handle!\n", __func__, targ->handle);
2973 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2974 			goto bailout;
2975 		}
2976 #ifdef OLD_MPS_PROBE
2977 		parent_target = mpssas_find_target_by_handle(sassc, 0,
2978 			targ->parent_handle);
2979 
2980 		if (parent_target == NULL) {
2981 			mps_dprint(sc, MPS_ERROR,
2982 				   "%s: handle %d does not have a valid "
2983 				   "parent target!\n", __func__, targ->handle);
2984 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2985 			goto bailout;
2986 		}
2987 
2988 		if ((parent_target->devinfo &
2989 		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
2990 			mps_dprint(sc, MPS_ERROR,
2991 				   "%s: handle %d parent %d does not "
2992 				   "have an SMP target!\n", __func__,
2993 				   targ->handle, parent_target->handle);
2994 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2995 			goto bailout;
2996 		}
2997 
2998 		sasaddr = parent_target->sasaddr;
2999 #else /* OLD_MPS_PROBE */
3000 		if ((targ->parent_devinfo &
3001 		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3002 			mps_dprint(sc, MPS_ERROR,
3003 				   "%s: handle %d parent %d does not "
3004 				   "have an SMP target!\n", __func__,
3005 				   targ->handle, targ->parent_handle);
3006 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3007 			goto bailout;
3008 		}
3009 		if (targ->parent_sasaddr == 0x0) {
3010 			mps_dprint(sc, MPS_ERROR,
3011 				   "%s: handle %d parent handle %d does "
3012 				   "not have a valid SAS address!\n",
3013 				   __func__, targ->handle, targ->parent_handle);
3014 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3015 			goto bailout;
3016 		}
3017 
3018 		sasaddr = targ->parent_sasaddr;
3019 #endif /* OLD_MPS_PROBE */
3020 	}
3021 
3022 	if (sasaddr == 0) {
3023 		mps_dprint(sc, MPS_INFO,
3024 			   "%s: unable to find SAS address for handle %d\n",
3025 			   __func__, targ->handle);
3026 		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3027 		goto bailout;
3028 	}
3029 	mpssas_send_smpcmd(sassc, ccb, sasaddr);
3030 
3031 	return;
3032 
3033 bailout:
3034 	xpt_done(ccb);
3035 
3036 }
3037 
3038 static void
3039 mpssas_action_resetdev(struct mpssas_softc *sassc, union ccb *ccb)
3040 {
3041 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3042 	struct mps_softc *sc;
3043 	struct mps_command *tm;
3044 	struct mpssas_target *targ;
3045 
3046 	MPS_FUNCTRACE(sassc->sc);
3047 	mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
3048 
3049 	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
3050 	    ("Target %d out of bounds in XPT_RESET_DEV\n",
3051 	     ccb->ccb_h.target_id));
3052 	sc = sassc->sc;
3053 	tm = mpssas_alloc_tm(sc);
3054 	if (tm == NULL) {
3055 		mps_dprint(sc, MPS_ERROR,
3056 		    "command alloc failure in mpssas_action_resetdev\n");
3057 		mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3058 		xpt_done(ccb);
3059 		return;
3060 	}
3061 
3062 	targ = &sassc->targets[ccb->ccb_h.target_id];
3063 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3064 	req->DevHandle = htole16(targ->handle);
3065 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3066 
3067 	/* SAS Hard Link Reset / SATA Link Reset */
3068 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3069 
3070 	tm->cm_data = NULL;
3071 	tm->cm_complete = mpssas_resetdev_complete;
3072 	tm->cm_complete_data = ccb;
3073 	tm->cm_targ = targ;
3074 
3075 	mpssas_prepare_for_tm(sc, tm, targ, CAM_LUN_WILDCARD);
3076 	mps_map_command(sc, tm);
3077 }
3078 
3079 static void
3080 mpssas_resetdev_complete(struct mps_softc *sc, struct mps_command *tm)
3081 {
3082 	MPI2_SCSI_TASK_MANAGE_REPLY *resp;
3083 	union ccb *ccb;
3084 
3085 	MPS_FUNCTRACE(sc);
3086 	mtx_assert(&sc->mps_mtx, MA_OWNED);
3087 
3088 	resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
3089 	ccb = tm->cm_complete_data;
3090 
3091 	/*
3092 	 * Currently there should be no way we can hit this case.  It only
3093 	 * happens when we have a failure to allocate chain frames, and
3094 	 * task management commands don't have S/G lists.
3095 	 */
3096 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3097 		MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3098 
3099 		req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3100 
3101 		mps_dprint(sc, MPS_ERROR,
3102 			   "%s: cm_flags = %#x for reset of handle %#04x! "
3103 			   "This should not happen!\n", __func__, tm->cm_flags,
3104 			   req->DevHandle);
3105 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3106 		goto bailout;
3107 	}
3108 
3109 	mps_dprint(sc, MPS_XINFO,
3110 	    "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n", __func__,
3111 	    le16toh(resp->IOCStatus), le32toh(resp->ResponseCode));
3112 
3113 	if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
3114 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
3115 		mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
3116 		    CAM_LUN_WILDCARD);
3117 	}
3118 	else
3119 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3120 
3121 bailout:
3122 
3123 	mpssas_free_tm(sc, tm);
3124 	xpt_done(ccb);
3125 }
3126 
3127 static void
3128 mpssas_poll(struct cam_sim *sim)
3129 {
3130 	struct mpssas_softc *sassc;
3131 
3132 	sassc = cam_sim_softc(sim);
3133 
3134 	if (sassc->sc->mps_debug & MPS_TRACE) {
3135 		/* frequent debug messages during a panic just slow
3136 		 * everything down too much.
3137 		 */
3138 		mps_printf(sassc->sc, "%s clearing MPS_TRACE\n", __func__);
3139 		sassc->sc->mps_debug &= ~MPS_TRACE;
3140 	}
3141 
3142 	mps_intr_locked(sassc->sc);
3143 }
3144 
3145 static void
3146 mpssas_async(void *callback_arg, uint32_t code, struct cam_path *path,
3147 	     void *arg)
3148 {
3149 	struct mps_softc *sc;
3150 
3151 	sc = (struct mps_softc *)callback_arg;
3152 
3153 	mps_lock(sc);
3154 	switch (code) {
3155 	case AC_ADVINFO_CHANGED: {
3156 		struct mpssas_target *target;
3157 		struct mpssas_softc *sassc;
3158 		struct scsi_read_capacity_data_long rcap_buf;
3159 		struct ccb_dev_advinfo cdai;
3160 		struct mpssas_lun *lun;
3161 		lun_id_t lunid;
3162 		int found_lun;
3163 		uintptr_t buftype;
3164 
3165 		buftype = (uintptr_t)arg;
3166 
3167 		found_lun = 0;
3168 		sassc = sc->sassc;
3169 
3170 		/*
3171 		 * We're only interested in read capacity data changes.
3172 		 */
3173 		if (buftype != CDAI_TYPE_RCAPLONG)
3174 			break;
3175 
3176 		/*
3177 		 * We should have a handle for this, but check to make sure.
3178 		 */
3179 		KASSERT(xpt_path_target_id(path) < sassc->maxtargets,
3180 		    ("Target %d out of bounds in mpssas_async\n",
3181 		    xpt_path_target_id(path)));
3182 		target = &sassc->targets[xpt_path_target_id(path)];
3183 		if (target->handle == 0)
3184 			break;
3185 
3186 		lunid = xpt_path_lun_id(path);
3187 
3188 		SLIST_FOREACH(lun, &target->luns, lun_link) {
3189 			if (lun->lun_id == lunid) {
3190 				found_lun = 1;
3191 				break;
3192 			}
3193 		}
3194 
3195 		if (found_lun == 0) {
3196 			lun = malloc(sizeof(struct mpssas_lun), M_MPT2,
3197 				     M_NOWAIT | M_ZERO);
3198 			if (lun == NULL) {
3199 				mps_dprint(sc, MPS_ERROR, "Unable to alloc "
3200 					   "LUN for EEDP support.\n");
3201 				break;
3202 			}
3203 			lun->lun_id = lunid;
3204 			SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3205 		}
3206 
3207 		bzero(&rcap_buf, sizeof(rcap_buf));
3208 		bzero(&cdai, sizeof(cdai));
3209 		xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
3210 		cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
3211 		cdai.ccb_h.flags = CAM_DIR_IN;
3212 		cdai.buftype = CDAI_TYPE_RCAPLONG;
3213 		cdai.flags = CDAI_FLAG_NONE;
3214 		cdai.bufsiz = sizeof(rcap_buf);
3215 		cdai.buf = (uint8_t *)&rcap_buf;
3216 		xpt_action((union ccb *)&cdai);
3217 		if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
3218 			cam_release_devq(cdai.ccb_h.path,
3219 					 0, 0, 0, FALSE);
3220 
3221 		if ((mpssas_get_ccbstatus((union ccb *)&cdai) == CAM_REQ_CMP)
3222 		 && (rcap_buf.prot & SRC16_PROT_EN)) {
3223 			switch (rcap_buf.prot & SRC16_P_TYPE) {
3224 			case SRC16_PTYPE_1:
3225 			case SRC16_PTYPE_3:
3226 				lun->eedp_formatted = TRUE;
3227 				lun->eedp_block_size =
3228 				    scsi_4btoul(rcap_buf.length);
3229 				break;
3230 			case SRC16_PTYPE_2:
3231 			default:
3232 				lun->eedp_formatted = FALSE;
3233 				lun->eedp_block_size = 0;
3234 				break;
3235 			}
3236 		} else {
3237 			lun->eedp_formatted = FALSE;
3238 			lun->eedp_block_size = 0;
3239 		}
3240 		break;
3241 	}
3242 	default:
3243 		break;
3244 	}
3245 	mps_unlock(sc);
3246 }
3247 
3248 /*
3249  * Set the INRESET flag for this target so that no I/O will be sent to
3250  * the target until the reset has completed.  If an I/O request does
3251  * happen, the devq will be frozen.  The CCB holds the path which is
3252  * used to release the devq.  The devq is released and the CCB is freed
3253  * when the TM completes.
3254  */
3255 void
3256 mpssas_prepare_for_tm(struct mps_softc *sc, struct mps_command *tm,
3257     struct mpssas_target *target, lun_id_t lun_id)
3258 {
3259 	union ccb *ccb;
3260 	path_id_t path_id;
3261 
3262 	ccb = xpt_alloc_ccb_nowait();
3263 	if (ccb) {
3264 		path_id = cam_sim_path(sc->sassc->sim);
3265 		if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, path_id,
3266 		    target->tid, lun_id) != CAM_REQ_CMP) {
3267 			xpt_free_ccb(ccb);
3268 		} else {
3269 			tm->cm_ccb = ccb;
3270 			tm->cm_targ = target;
3271 			target->flags |= MPSSAS_TARGET_INRESET;
3272 		}
3273 	}
3274 }
3275 
3276 int
3277 mpssas_startup(struct mps_softc *sc)
3278 {
3279 
3280 	/*
3281 	 * Send the port enable message and set the wait_for_port_enable flag.
3282 	 * This flag helps to keep the simq frozen until all discovery events
3283 	 * are processed.
3284 	 */
3285 	sc->wait_for_port_enable = 1;
3286 	mpssas_send_portenable(sc);
3287 	return (0);
3288 }
3289 
3290 static int
3291 mpssas_send_portenable(struct mps_softc *sc)
3292 {
3293 	MPI2_PORT_ENABLE_REQUEST *request;
3294 	struct mps_command *cm;
3295 
3296 	MPS_FUNCTRACE(sc);
3297 
3298 	if ((cm = mps_alloc_command(sc)) == NULL)
3299 		return (EBUSY);
3300 	request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3301 	request->Function = MPI2_FUNCTION_PORT_ENABLE;
3302 	request->MsgFlags = 0;
3303 	request->VP_ID = 0;
3304 	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3305 	cm->cm_complete = mpssas_portenable_complete;
3306 	cm->cm_data = NULL;
3307 	cm->cm_sge = NULL;
3308 
3309 	mps_map_command(sc, cm);
3310 	mps_dprint(sc, MPS_XINFO,
3311 	    "mps_send_portenable finished cm %p req %p complete %p\n",
3312 	    cm, cm->cm_req, cm->cm_complete);
3313 	return (0);
3314 }
3315 
3316 static void
3317 mpssas_portenable_complete(struct mps_softc *sc, struct mps_command *cm)
3318 {
3319 	MPI2_PORT_ENABLE_REPLY *reply;
3320 	struct mpssas_softc *sassc;
3321 
3322 	MPS_FUNCTRACE(sc);
3323 	sassc = sc->sassc;
3324 
3325 	/*
3326 	 * Currently there should be no way we can hit this case.  It only
3327 	 * happens when we have a failure to allocate chain frames, and
3328 	 * port enable commands don't have S/G lists.
3329 	 */
3330 	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3331 		mps_dprint(sc, MPS_ERROR, "%s: cm_flags = %#x for port enable! "
3332 			   "This should not happen!\n", __func__, cm->cm_flags);
3333 	}
3334 
3335 	reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3336 	if (reply == NULL)
3337 		mps_dprint(sc, MPS_FAULT, "Portenable NULL reply\n");
3338 	else if (le16toh(reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
3339 	    MPI2_IOCSTATUS_SUCCESS)
3340 		mps_dprint(sc, MPS_FAULT, "Portenable failed\n");
3341 
3342 	mps_free_command(sc, cm);
3343 
3344 	/*
3345 	 * Get WarpDrive info after discovery is complete but before the scan
3346 	 * starts.  At this point, all devices are ready to be exposed to the
3347 	 * OS.  If devices should be hidden instead, take them out of the
3348 	 * 'targets' array before the scan.  The devinfo for a disk will have
3349 	 * some info and a volume's will be 0.  Use that to remove disks.
3350 	 */
3351 	mps_wd_config_pages(sc);
3352 
3353 	/*
3354 	 * Done waiting for port enable to complete.  Decrement the refcount.
3355 	 * If refcount is 0, discovery is complete and a rescan of the bus can
3356 	 * take place.  Since the simq was explicitly frozen before port
3357 	 * enable, it must be explicitly released here to keep the
3358 	 * freeze/release count in sync.
3359 	 */
3360 	sc->wait_for_port_enable = 0;
3361 	sc->port_enable_complete = 1;
3362 	wakeup(&sc->port_enable_complete);
3363 	mpssas_startup_decrement(sassc);
3364 }
3365 
3366 int
3367 mpssas_check_id(struct mpssas_softc *sassc, int id)
3368 {
3369 	struct mps_softc *sc = sassc->sc;
3370 	char *ids;
3371 	char *name;
3372 
3373 	ids = &sc->exclude_ids[0];
3374 	while((name = strsep(&ids, ",")) != NULL) {
3375 		if (name[0] == '\0')
3376 			continue;
3377 		if (strtol(name, NULL, 0) == (long)id)
3378 			return (1);
3379 	}
3380 
3381 	return (0);
3382 }
3383 
3384 void
3385 mpssas_realloc_targets(struct mps_softc *sc, int maxtargets)
3386 {
3387 	struct mpssas_softc *sassc;
3388 	struct mpssas_lun *lun, *lun_tmp;
3389 	struct mpssas_target *targ;
3390 	int i;
3391 
3392 	sassc = sc->sassc;
3393 	/*
3394 	 * The number of targets is based on IOC Facts, so free all of
3395 	 * the allocated LUNs for each target and then the target buffer
3396 	 * itself.
3397 	 */
3398 	for (i=0; i< maxtargets; i++) {
3399 		targ = &sassc->targets[i];
3400 		SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
3401 			free(lun, M_MPT2);
3402 		}
3403 	}
3404 	free(sassc->targets, M_MPT2);
3405 
3406 	sassc->targets = malloc(sizeof(struct mpssas_target) * maxtargets,
3407 	    M_MPT2, M_WAITOK|M_ZERO);
3408 }
3409