xref: /freebsd/sys/dev/mps/mps_sas.c (revision 2ff63af9b88c7413b7d71715b5532625752a248e)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2009 Yahoo! Inc.
5  * Copyright (c) 2011-2015 LSI Corp.
6  * Copyright (c) 2013-2015 Avago Technologies
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  *
30  * Avago Technologies (LSI) MPT-Fusion Host Adapter FreeBSD
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 /* Communications core for Avago Technologies (LSI) MPT2 */
37 
38 /* TODO Move headers to mpsvar */
39 #include <sys/types.h>
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kernel.h>
43 #include <sys/selinfo.h>
44 #include <sys/module.h>
45 #include <sys/bus.h>
46 #include <sys/conf.h>
47 #include <sys/bio.h>
48 #include <sys/malloc.h>
49 #include <sys/uio.h>
50 #include <sys/sysctl.h>
51 #include <sys/endian.h>
52 #include <sys/queue.h>
53 #include <sys/kthread.h>
54 #include <sys/taskqueue.h>
55 #include <sys/sbuf.h>
56 
57 #include <machine/bus.h>
58 #include <machine/resource.h>
59 #include <sys/rman.h>
60 
61 #include <machine/stdarg.h>
62 
63 #include <cam/cam.h>
64 #include <cam/cam_ccb.h>
65 #include <cam/cam_xpt.h>
66 #include <cam/cam_debug.h>
67 #include <cam/cam_sim.h>
68 #include <cam/cam_xpt_sim.h>
69 #include <cam/cam_xpt_periph.h>
70 #include <cam/cam_periph.h>
71 #include <cam/scsi/scsi_all.h>
72 #include <cam/scsi/scsi_message.h>
73 #include <cam/scsi/smp_all.h>
74 
75 #include <dev/mps/mpi/mpi2_type.h>
76 #include <dev/mps/mpi/mpi2.h>
77 #include <dev/mps/mpi/mpi2_ioc.h>
78 #include <dev/mps/mpi/mpi2_sas.h>
79 #include <dev/mps/mpi/mpi2_cnfg.h>
80 #include <dev/mps/mpi/mpi2_init.h>
81 #include <dev/mps/mpi/mpi2_tool.h>
82 #include <dev/mps/mps_ioctl.h>
83 #include <dev/mps/mpsvar.h>
84 #include <dev/mps/mps_table.h>
85 #include <dev/mps/mps_sas.h>
86 
87 /*
88  * static array to check SCSI OpCode for EEDP protection bits
89  */
90 #define	PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
91 #define	PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
92 #define	PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
93 static uint8_t op_code_prot[256] = {
94 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
95 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
96 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
97 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
98 	0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
99 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
100 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
101 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
102 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
103 	0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
104 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
105 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
106 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
107 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
108 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
109 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
110 };
111 
112 MALLOC_DEFINE(M_MPSSAS, "MPSSAS", "MPS SAS memory");
113 
114 static void mpssas_remove_device(struct mps_softc *, struct mps_command *);
115 static void mpssas_remove_complete(struct mps_softc *, struct mps_command *);
116 static void mpssas_action(struct cam_sim *sim, union ccb *ccb);
117 static void mpssas_poll(struct cam_sim *sim);
118 static int mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm,
119     struct mps_command *cm);
120 static void mpssas_scsiio_timeout(void *data);
121 static void mpssas_abort_complete(struct mps_softc *sc, struct mps_command *cm);
122 static void mpssas_direct_drive_io(struct mpssas_softc *sassc,
123     struct mps_command *cm, union ccb *ccb);
124 static void mpssas_action_scsiio(struct mpssas_softc *, union ccb *);
125 static void mpssas_scsiio_complete(struct mps_softc *, struct mps_command *);
126 static void mpssas_action_resetdev(struct mpssas_softc *, union ccb *);
127 static void mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm);
128 static void mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb,
129 			       uint64_t sasaddr);
130 static void mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb);
131 static void mpssas_resetdev_complete(struct mps_softc *, struct mps_command *);
132 static void mpssas_async(void *callback_arg, uint32_t code,
133 			 struct cam_path *path, void *arg);
134 static int mpssas_send_portenable(struct mps_softc *sc);
135 static void mpssas_portenable_complete(struct mps_softc *sc,
136     struct mps_command *cm);
137 
138 struct mpssas_target *
139 mpssas_find_target_by_handle(struct mpssas_softc *sassc, int start, uint16_t handle)
140 {
141 	struct mpssas_target *target;
142 	int i;
143 
144 	for (i = start; i < sassc->maxtargets; i++) {
145 		target = &sassc->targets[i];
146 		if (target->handle == handle)
147 			return (target);
148 	}
149 
150 	return (NULL);
151 }
152 
153 /* we need to freeze the simq during attach and diag reset, to avoid failing
154  * commands before device handles have been found by discovery.  Since
155  * discovery involves reading config pages and possibly sending commands,
156  * discovery actions may continue even after we receive the end of discovery
157  * event, so refcount discovery actions instead of assuming we can unfreeze
158  * the simq when we get the event.
159  */
160 void
161 mpssas_startup_increment(struct mpssas_softc *sassc)
162 {
163 	MPS_FUNCTRACE(sassc->sc);
164 
165 	if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
166 		if (sassc->startup_refcount++ == 0) {
167 			/* just starting, freeze the simq */
168 			mps_dprint(sassc->sc, MPS_INIT,
169 			    "%s freezing simq\n", __func__);
170 			xpt_hold_boot();
171 			xpt_freeze_simq(sassc->sim, 1);
172 		}
173 		mps_dprint(sassc->sc, MPS_INIT, "%s refcount %u\n", __func__,
174 		    sassc->startup_refcount);
175 	}
176 }
177 
178 void
179 mpssas_release_simq_reinit(struct mpssas_softc *sassc)
180 {
181 	if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
182 		sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
183 		xpt_release_simq(sassc->sim, 1);
184 		mps_dprint(sassc->sc, MPS_INFO, "Unfreezing SIM queue\n");
185 	}
186 }
187 
188 void
189 mpssas_startup_decrement(struct mpssas_softc *sassc)
190 {
191 	MPS_FUNCTRACE(sassc->sc);
192 
193 	if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
194 		if (--sassc->startup_refcount == 0) {
195 			/* finished all discovery-related actions, release
196 			 * the simq and rescan for the latest topology.
197 			 */
198 			mps_dprint(sassc->sc, MPS_INIT,
199 			    "%s releasing simq\n", __func__);
200 			sassc->flags &= ~MPSSAS_IN_STARTUP;
201 			xpt_release_simq(sassc->sim, 1);
202 			xpt_release_boot();
203 		}
204 		mps_dprint(sassc->sc, MPS_INIT, "%s refcount %u\n", __func__,
205 		    sassc->startup_refcount);
206 	}
207 }
208 
209 /*
210  * The firmware requires us to stop sending commands when we're doing task
211  * management.
212  * XXX The logic for serializing the device has been made lazy and moved to
213  * mpssas_prepare_for_tm().
214  */
215 struct mps_command *
216 mpssas_alloc_tm(struct mps_softc *sc)
217 {
218 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
219 	struct mps_command *tm;
220 
221 	tm = mps_alloc_high_priority_command(sc);
222 	if (tm == NULL)
223 		return (NULL);
224 
225 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
226 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
227 	return tm;
228 }
229 
230 void
231 mpssas_free_tm(struct mps_softc *sc, struct mps_command *tm)
232 {
233 	if (tm == NULL)
234 		return;
235 
236 	/*
237 	 * For TM's the devq is frozen for the device.  Unfreeze it here and
238 	 * free the resources used for freezing the devq.  Must clear the
239 	 * INRESET flag as well or scsi I/O will not work.
240 	 */
241 	if (tm->cm_ccb) {
242 		mps_dprint(sc, MPS_XINFO | MPS_RECOVERY,
243 		    "Unfreezing devq for target ID %d\n",
244 		    tm->cm_targ->tid);
245 		tm->cm_targ->flags &= ~MPSSAS_TARGET_INRESET;
246 		xpt_release_devq(tm->cm_ccb->ccb_h.path, 1, TRUE);
247 		xpt_free_path(tm->cm_ccb->ccb_h.path);
248 		xpt_free_ccb(tm->cm_ccb);
249 	}
250 
251 	mps_free_high_priority_command(sc, tm);
252 }
253 
254 void
255 mpssas_rescan_target(struct mps_softc *sc, struct mpssas_target *targ)
256 {
257 	struct mpssas_softc *sassc = sc->sassc;
258 	path_id_t pathid;
259 	target_id_t targetid;
260 	union ccb *ccb;
261 
262 	MPS_FUNCTRACE(sc);
263 	pathid = cam_sim_path(sassc->sim);
264 	if (targ == NULL)
265 		targetid = CAM_TARGET_WILDCARD;
266 	else
267 		targetid = targ - sassc->targets;
268 
269 	/*
270 	 * Allocate a CCB and schedule a rescan.
271 	 */
272 	ccb = xpt_alloc_ccb_nowait();
273 	if (ccb == NULL) {
274 		mps_dprint(sc, MPS_ERROR, "unable to alloc CCB for rescan\n");
275 		return;
276 	}
277 
278 	if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid,
279 	    targetid, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
280 		mps_dprint(sc, MPS_ERROR, "unable to create path for rescan\n");
281 		xpt_free_ccb(ccb);
282 		return;
283 	}
284 
285 	if (targetid == CAM_TARGET_WILDCARD)
286 		ccb->ccb_h.func_code = XPT_SCAN_BUS;
287 	else
288 		ccb->ccb_h.func_code = XPT_SCAN_TGT;
289 
290 	mps_dprint(sc, MPS_TRACE, "%s targetid %u\n", __func__, targetid);
291 	xpt_rescan(ccb);
292 }
293 
294 static void
295 mpssas_log_command(struct mps_command *cm, u_int level, const char *fmt, ...)
296 {
297 	struct sbuf sb;
298 	va_list ap;
299 	char str[224];
300 	char path_str[64];
301 
302 	if (cm == NULL)
303 		return;
304 
305 	/* No need to be in here if debugging isn't enabled */
306 	if ((cm->cm_sc->mps_debug & level) == 0)
307 		return;
308 
309 	sbuf_new(&sb, str, sizeof(str), 0);
310 
311 	va_start(ap, fmt);
312 
313 	if (cm->cm_ccb != NULL) {
314 		xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
315 				sizeof(path_str));
316 		sbuf_cat(&sb, path_str);
317 		if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
318 			scsi_command_string(&cm->cm_ccb->csio, &sb);
319 			sbuf_printf(&sb, "length %d ",
320 				    cm->cm_ccb->csio.dxfer_len);
321 		}
322 	}
323 	else {
324 		sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
325 		    cam_sim_name(cm->cm_sc->sassc->sim),
326 		    cam_sim_unit(cm->cm_sc->sassc->sim),
327 		    cam_sim_bus(cm->cm_sc->sassc->sim),
328 		    cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
329 		    cm->cm_lun);
330 	}
331 
332 	sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
333 	sbuf_vprintf(&sb, fmt, ap);
334 	sbuf_finish(&sb);
335 	mps_print_field(cm->cm_sc, "%s", sbuf_data(&sb));
336 
337 	va_end(ap);
338 }
339 
340 static void
341 mpssas_remove_volume(struct mps_softc *sc, struct mps_command *tm)
342 {
343 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
344 	struct mpssas_target *targ;
345 	uint16_t handle;
346 
347 	MPS_FUNCTRACE(sc);
348 
349 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
350 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
351 	targ = tm->cm_targ;
352 
353 	if (reply == NULL) {
354 		/* XXX retry the remove after the diag reset completes? */
355 		mps_dprint(sc, MPS_FAULT,
356 		    "%s NULL reply resetting device 0x%04x\n", __func__,
357 		    handle);
358 		mpssas_free_tm(sc, tm);
359 		return;
360 	}
361 
362 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
363 	    MPI2_IOCSTATUS_SUCCESS) {
364 		mps_dprint(sc, MPS_ERROR,
365 		   "IOCStatus = 0x%x while resetting device 0x%x\n",
366 		   le16toh(reply->IOCStatus), handle);
367 	}
368 
369 	mps_dprint(sc, MPS_XINFO,
370 	    "Reset aborted %u commands\n", reply->TerminationCount);
371 	mps_free_reply(sc, tm->cm_reply_data);
372 	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
373 
374 	mps_dprint(sc, MPS_XINFO,
375 	    "clearing target %u handle 0x%04x\n", targ->tid, handle);
376 
377 	/*
378 	 * Don't clear target if remove fails because things will get confusing.
379 	 * Leave the devname and sasaddr intact so that we know to avoid reusing
380 	 * this target id if possible, and so we can assign the same target id
381 	 * to this device if it comes back in the future.
382 	 */
383 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
384 	    MPI2_IOCSTATUS_SUCCESS) {
385 		targ = tm->cm_targ;
386 		targ->handle = 0x0;
387 		targ->encl_handle = 0x0;
388 		targ->encl_slot = 0x0;
389 		targ->exp_dev_handle = 0x0;
390 		targ->phy_num = 0x0;
391 		targ->linkrate = 0x0;
392 		targ->devinfo = 0x0;
393 		targ->flags = 0x0;
394 	}
395 
396 	mpssas_free_tm(sc, tm);
397 }
398 
399 /*
400  * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal.
401  * Otherwise Volume Delete is same as Bare Drive Removal.
402  */
403 void
404 mpssas_prepare_volume_remove(struct mpssas_softc *sassc, uint16_t handle)
405 {
406 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
407 	struct mps_softc *sc;
408 	struct mps_command *tm;
409 	struct mpssas_target *targ = NULL;
410 
411 	MPS_FUNCTRACE(sassc->sc);
412 	sc = sassc->sc;
413 
414 #ifdef WD_SUPPORT
415 	/*
416 	 * If this is a WD controller, determine if the disk should be exposed
417 	 * to the OS or not.  If disk should be exposed, return from this
418 	 * function without doing anything.
419 	 */
420 	if (sc->WD_available && (sc->WD_hide_expose ==
421 	    MPS_WD_EXPOSE_ALWAYS)) {
422 		return;
423 	}
424 #endif //WD_SUPPORT
425 
426 	targ = mpssas_find_target_by_handle(sassc, 0, handle);
427 	if (targ == NULL) {
428 		/* FIXME: what is the action? */
429 		/* We don't know about this device? */
430 		mps_dprint(sc, MPS_ERROR,
431 		   "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
432 		return;
433 	}
434 
435 	targ->flags |= MPSSAS_TARGET_INREMOVAL;
436 
437 	tm = mpssas_alloc_tm(sc);
438 	if (tm == NULL) {
439 		mps_dprint(sc, MPS_ERROR,
440 		    "%s: command alloc failure\n", __func__);
441 		return;
442 	}
443 
444 	mpssas_rescan_target(sc, targ);
445 
446 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
447 	req->DevHandle = targ->handle;
448 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
449 
450 	/* SAS Hard Link Reset / SATA Link Reset */
451 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
452 
453 	tm->cm_targ = targ;
454 	tm->cm_data = NULL;
455 	tm->cm_complete = mpssas_remove_volume;
456 	tm->cm_complete_data = (void *)(uintptr_t)handle;
457 
458 	mps_dprint(sc, MPS_INFO, "%s: Sending reset for target ID %d\n",
459 	    __func__, targ->tid);
460 	mpssas_prepare_for_tm(sc, tm, targ, CAM_LUN_WILDCARD);
461 
462 	mps_map_command(sc, tm);
463 }
464 
465 /*
466  * The MPT2 firmware performs debounce on the link to avoid transient link
467  * errors and false removals.  When it does decide that link has been lost
468  * and a device need to go away, it expects that the host will perform a
469  * target reset and then an op remove.  The reset has the side-effect of
470  * aborting any outstanding requests for the device, which is required for
471  * the op-remove to succeed.  It's not clear if the host should check for
472  * the device coming back alive after the reset.
473  */
474 void
475 mpssas_prepare_remove(struct mpssas_softc *sassc, uint16_t handle)
476 {
477 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
478 	struct mps_softc *sc;
479 	struct mps_command *cm;
480 	struct mpssas_target *targ = NULL;
481 
482 	MPS_FUNCTRACE(sassc->sc);
483 
484 	sc = sassc->sc;
485 
486 	targ = mpssas_find_target_by_handle(sassc, 0, handle);
487 	if (targ == NULL) {
488 		/* FIXME: what is the action? */
489 		/* We don't know about this device? */
490 		mps_dprint(sc, MPS_ERROR,
491 		    "%s : invalid handle 0x%x \n", __func__, handle);
492 		return;
493 	}
494 
495 	targ->flags |= MPSSAS_TARGET_INREMOVAL;
496 
497 	cm = mpssas_alloc_tm(sc);
498 	if (cm == NULL) {
499 		mps_dprint(sc, MPS_ERROR,
500 		    "%s: command alloc failure\n", __func__);
501 		return;
502 	}
503 
504 	mpssas_rescan_target(sc, targ);
505 
506 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
507 	req->DevHandle = htole16(targ->handle);
508 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
509 
510 	/* SAS Hard Link Reset / SATA Link Reset */
511 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
512 
513 	cm->cm_targ = targ;
514 	cm->cm_data = NULL;
515 	cm->cm_complete = mpssas_remove_device;
516 	cm->cm_complete_data = (void *)(uintptr_t)handle;
517 
518 	mps_dprint(sc, MPS_INFO, "%s: Sending reset for target ID %d\n",
519 	    __func__, targ->tid);
520 	mpssas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
521 
522 	mps_map_command(sc, cm);
523 }
524 
525 static void
526 mpssas_remove_device(struct mps_softc *sc, struct mps_command *tm)
527 {
528 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
529 	MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
530 	struct mpssas_target *targ;
531 	uint16_t handle;
532 
533 	MPS_FUNCTRACE(sc);
534 
535 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
536 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
537 	targ = tm->cm_targ;
538 
539 	/*
540 	 * Currently there should be no way we can hit this case.  It only
541 	 * happens when we have a failure to allocate chain frames, and
542 	 * task management commands don't have S/G lists.
543 	 */
544 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
545 		mps_dprint(sc, MPS_ERROR,
546 		    "%s: cm_flags = %#x for remove of handle %#04x! "
547 		    "This should not happen!\n", __func__, tm->cm_flags,
548 		    handle);
549 	}
550 
551 	if (reply == NULL) {
552 		/* XXX retry the remove after the diag reset completes? */
553 		mps_dprint(sc, MPS_FAULT,
554 		    "%s NULL reply resetting device 0x%04x\n", __func__,
555 		    handle);
556 		mpssas_free_tm(sc, tm);
557 		return;
558 	}
559 
560 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
561 	    MPI2_IOCSTATUS_SUCCESS) {
562 		mps_dprint(sc, MPS_ERROR,
563 		   "IOCStatus = 0x%x while resetting device 0x%x\n",
564 		   le16toh(reply->IOCStatus), handle);
565 	}
566 
567 	mps_dprint(sc, MPS_XINFO, "Reset aborted %u commands\n",
568 	    le32toh(reply->TerminationCount));
569 	mps_free_reply(sc, tm->cm_reply_data);
570 	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
571 
572 	/* Reuse the existing command */
573 	req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
574 	memset(req, 0, sizeof(*req));
575 	req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
576 	req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
577 	req->DevHandle = htole16(handle);
578 	tm->cm_data = NULL;
579 	tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
580 	tm->cm_complete = mpssas_remove_complete;
581 	tm->cm_complete_data = (void *)(uintptr_t)handle;
582 
583 	/*
584 	 * Wait to send the REMOVE_DEVICE until all the commands have cleared.
585 	 * They should be aborted or time out and we'll kick thus off there
586 	 * if so.
587 	 */
588 	if (TAILQ_FIRST(&targ->commands) == NULL) {
589 		mps_dprint(sc, MPS_INFO,
590 		    "No pending commands: starting remove_device target %u handle 0x%04x\n",
591 		    targ->tid, handle);
592 		mps_map_command(sc, tm);
593 		targ->pending_remove_tm = NULL;
594 	} else {
595 		targ->pending_remove_tm = tm;
596 	}
597 
598 	mps_dprint(sc, MPS_XINFO, "clearing target %u handle 0x%04x\n",
599 		   targ->tid, handle);
600 }
601 
602 static void
603 mpssas_remove_complete(struct mps_softc *sc, struct mps_command *tm)
604 {
605 	MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
606 	uint16_t handle;
607 	struct mpssas_target *targ;
608 	struct mpssas_lun *lun;
609 
610 	MPS_FUNCTRACE(sc);
611 
612 	reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
613 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
614 	targ = tm->cm_targ;
615 
616 	/*
617 	 * At this point, we should have no pending commands for the target.
618 	 * The remove target has just completed.
619 	 */
620 	KASSERT(TAILQ_FIRST(&targ->commands) == NULL,
621 	    ("%s: no commands should be pending\n", __func__));
622 
623 	/*
624 	 * Currently there should be no way we can hit this case.  It only
625 	 * happens when we have a failure to allocate chain frames, and
626 	 * task management commands don't have S/G lists.
627 	 */
628 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
629 		mps_dprint(sc, MPS_XINFO,
630 			   "%s: cm_flags = %#x for remove of handle %#04x! "
631 			   "This should not happen!\n", __func__, tm->cm_flags,
632 			   handle);
633 		mpssas_free_tm(sc, tm);
634 		return;
635 	}
636 
637 	if (reply == NULL) {
638 		/* most likely a chip reset */
639 		mps_dprint(sc, MPS_FAULT,
640 		    "%s NULL reply removing device 0x%04x\n", __func__, handle);
641 		mpssas_free_tm(sc, tm);
642 		return;
643 	}
644 
645 	mps_dprint(sc, MPS_XINFO,
646 	    "%s on handle 0x%04x, IOCStatus= 0x%x\n", __func__,
647 	    handle, le16toh(reply->IOCStatus));
648 
649 	/*
650 	 * Don't clear target if remove fails because things will get confusing.
651 	 * Leave the devname and sasaddr intact so that we know to avoid reusing
652 	 * this target id if possible, and so we can assign the same target id
653 	 * to this device if it comes back in the future.
654 	 */
655 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
656 	    MPI2_IOCSTATUS_SUCCESS) {
657 		targ->handle = 0x0;
658 		targ->encl_handle = 0x0;
659 		targ->encl_slot = 0x0;
660 		targ->exp_dev_handle = 0x0;
661 		targ->phy_num = 0x0;
662 		targ->linkrate = 0x0;
663 		targ->devinfo = 0x0;
664 		targ->flags = 0x0;
665 
666 		while(!SLIST_EMPTY(&targ->luns)) {
667 			lun = SLIST_FIRST(&targ->luns);
668 			SLIST_REMOVE_HEAD(&targ->luns, lun_link);
669 			free(lun, M_MPT2);
670 		}
671 	}
672 
673 	mpssas_free_tm(sc, tm);
674 }
675 
676 static int
677 mpssas_register_events(struct mps_softc *sc)
678 {
679 	u32 events[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS];
680 
681 	bzero(events, 16);
682 	setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
683 	setbit(events, MPI2_EVENT_SAS_DISCOVERY);
684 	setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
685 	setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
686 	setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
687 	setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
688 	setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
689 	setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
690 	setbit(events, MPI2_EVENT_IR_VOLUME);
691 	setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
692 	setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
693 	setbit(events, MPI2_EVENT_LOG_ENTRY_ADDED);
694 
695 	mps_register_events(sc, events, mpssas_evt_handler, NULL,
696 	    &sc->sassc->mpssas_eh);
697 
698 	return (0);
699 }
700 
701 int
702 mps_attach_sas(struct mps_softc *sc)
703 {
704 	struct mpssas_softc *sassc;
705 	cam_status status;
706 	int unit, error = 0, reqs;
707 
708 	MPS_FUNCTRACE(sc);
709 	mps_dprint(sc, MPS_INIT, "%s entered\n", __func__);
710 
711 	sassc = malloc(sizeof(struct mpssas_softc), M_MPT2, M_WAITOK|M_ZERO);
712 
713 	/*
714 	 * XXX MaxTargets could change during a reinit.  Since we don't
715 	 * resize the targets[] array during such an event, cache the value
716 	 * of MaxTargets here so that we don't get into trouble later.  This
717 	 * should move into the reinit logic.
718 	 */
719 	sassc->maxtargets = sc->facts->MaxTargets + sc->facts->MaxVolumes;
720 	sassc->targets = malloc(sizeof(struct mpssas_target) *
721 	    sassc->maxtargets, M_MPT2, M_WAITOK|M_ZERO);
722 	sc->sassc = sassc;
723 	sassc->sc = sc;
724 
725 	reqs = sc->num_reqs - sc->num_prireqs - 1;
726 	if ((sassc->devq = cam_simq_alloc(reqs)) == NULL) {
727 		mps_dprint(sc, MPS_ERROR, "Cannot allocate SIMQ\n");
728 		error = ENOMEM;
729 		goto out;
730 	}
731 
732 	unit = device_get_unit(sc->mps_dev);
733 	sassc->sim = cam_sim_alloc(mpssas_action, mpssas_poll, "mps", sassc,
734 	    unit, &sc->mps_mtx, reqs, reqs, sassc->devq);
735 	if (sassc->sim == NULL) {
736 		mps_dprint(sc, MPS_INIT|MPS_ERROR, "Cannot allocate SIM\n");
737 		error = EINVAL;
738 		goto out;
739 	}
740 
741 	TAILQ_INIT(&sassc->ev_queue);
742 
743 	/* Initialize taskqueue for Event Handling */
744 	TASK_INIT(&sassc->ev_task, 0, mpssas_firmware_event_work, sc);
745 	sassc->ev_tq = taskqueue_create("mps_taskq", M_NOWAIT | M_ZERO,
746 	    taskqueue_thread_enqueue, &sassc->ev_tq);
747 	taskqueue_start_threads(&sassc->ev_tq, 1, PRIBIO, "%s taskq",
748 	    device_get_nameunit(sc->mps_dev));
749 
750 	mps_lock(sc);
751 
752 	/*
753 	 * XXX There should be a bus for every port on the adapter, but since
754 	 * we're just going to fake the topology for now, we'll pretend that
755 	 * everything is just a target on a single bus.
756 	 */
757 	if ((error = xpt_bus_register(sassc->sim, sc->mps_dev, 0)) != 0) {
758 		mps_dprint(sc, MPS_INIT|MPS_ERROR,
759 		    "Error %d registering SCSI bus\n", error);
760 		mps_unlock(sc);
761 		goto out;
762 	}
763 
764 	/*
765 	 * Assume that discovery events will start right away.
766 	 *
767 	 * Hold off boot until discovery is complete.
768 	 */
769 	sassc->flags |= MPSSAS_IN_STARTUP | MPSSAS_IN_DISCOVERY;
770 	sc->sassc->startup_refcount = 0;
771 	mpssas_startup_increment(sassc);
772 
773 	mps_unlock(sc);
774 
775 	/*
776 	 * Register for async events so we can determine the EEDP
777 	 * capabilities of devices.
778 	 */
779 	status = xpt_create_path(&sassc->path, /*periph*/NULL,
780 	    cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD,
781 	    CAM_LUN_WILDCARD);
782 	if (status != CAM_REQ_CMP) {
783 		mps_dprint(sc, MPS_ERROR|MPS_INIT,
784 		    "Error %#x creating sim path\n", status);
785 		sassc->path = NULL;
786 	} else {
787 		int event;
788 
789 		event = AC_ADVINFO_CHANGED;
790 		status = xpt_register_async(event, mpssas_async, sc,
791 					    sassc->path);
792 		if (status != CAM_REQ_CMP) {
793 			mps_dprint(sc, MPS_ERROR,
794 			    "Error %#x registering async handler for "
795 			    "AC_ADVINFO_CHANGED events\n", status);
796 			xpt_free_path(sassc->path);
797 			sassc->path = NULL;
798 		}
799 	}
800 	if (status != CAM_REQ_CMP) {
801 		/*
802 		 * EEDP use is the exception, not the rule.
803 		 * Warn the user, but do not fail to attach.
804 		 */
805 		mps_printf(sc, "EEDP capabilities disabled.\n");
806 	}
807 
808 	mpssas_register_events(sc);
809 out:
810 	if (error)
811 		mps_detach_sas(sc);
812 
813 	mps_dprint(sc, MPS_INIT, "%s exit error= %d\n", __func__, error);
814 	return (error);
815 }
816 
817 int
818 mps_detach_sas(struct mps_softc *sc)
819 {
820 	struct mpssas_softc *sassc;
821 	struct mpssas_lun *lun, *lun_tmp;
822 	struct mpssas_target *targ;
823 	int i;
824 
825 	MPS_FUNCTRACE(sc);
826 
827 	if (sc->sassc == NULL)
828 		return (0);
829 
830 	sassc = sc->sassc;
831 	mps_deregister_events(sc, sassc->mpssas_eh);
832 
833 	/*
834 	 * Drain and free the event handling taskqueue with the lock
835 	 * unheld so that any parallel processing tasks drain properly
836 	 * without deadlocking.
837 	 */
838 	if (sassc->ev_tq != NULL)
839 		taskqueue_free(sassc->ev_tq);
840 
841 	/* Deregister our async handler */
842 	if (sassc->path != NULL) {
843 		xpt_register_async(0, mpssas_async, sc, sassc->path);
844 		xpt_free_path(sassc->path);
845 		sassc->path = NULL;
846 	}
847 
848 	/* Make sure CAM doesn't wedge if we had to bail out early. */
849 	mps_lock(sc);
850 
851 	while (sassc->startup_refcount != 0)
852 		mpssas_startup_decrement(sassc);
853 
854 	if (sassc->flags & MPSSAS_IN_STARTUP)
855 		xpt_release_simq(sassc->sim, 1);
856 
857 	if (sassc->sim != NULL) {
858 		xpt_bus_deregister(cam_sim_path(sassc->sim));
859 		cam_sim_free(sassc->sim, FALSE);
860 	}
861 
862 	mps_unlock(sc);
863 
864 	if (sassc->devq != NULL)
865 		cam_simq_free(sassc->devq);
866 
867 	for(i=0; i< sassc->maxtargets ;i++) {
868 		targ = &sassc->targets[i];
869 		SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
870 			free(lun, M_MPT2);
871 		}
872 	}
873 	free(sassc->targets, M_MPT2);
874 	free(sassc, M_MPT2);
875 	sc->sassc = NULL;
876 
877 	return (0);
878 }
879 
880 void
881 mpssas_discovery_end(struct mpssas_softc *sassc)
882 {
883 	struct mps_softc *sc = sassc->sc;
884 
885 	MPS_FUNCTRACE(sc);
886 
887 	/*
888 	 * After discovery has completed, check the mapping table for any
889 	 * missing devices and update their missing counts. Only do this once
890 	 * whenever the driver is initialized so that missing counts aren't
891 	 * updated unnecessarily. Note that just because discovery has
892 	 * completed doesn't mean that events have been processed yet. The
893 	 * check_devices function is a callout timer that checks if ALL devices
894 	 * are missing. If so, it will wait a little longer for events to
895 	 * complete and keep resetting itself until some device in the mapping
896 	 * table is not missing, meaning that event processing has started.
897 	 */
898 	if (sc->track_mapping_events) {
899 		mps_dprint(sc, MPS_XINFO | MPS_MAPPING, "Discovery has "
900 		    "completed. Check for missing devices in the mapping "
901 		    "table.\n");
902 		callout_reset(&sc->device_check_callout,
903 		    MPS_MISSING_CHECK_DELAY * hz, mps_mapping_check_devices,
904 		    sc);
905 	}
906 }
907 
908 static void
909 mpssas_action(struct cam_sim *sim, union ccb *ccb)
910 {
911 	struct mpssas_softc *sassc;
912 
913 	sassc = cam_sim_softc(sim);
914 
915 	MPS_FUNCTRACE(sassc->sc);
916 	mps_dprint(sassc->sc, MPS_TRACE, "ccb func_code 0x%x\n",
917 	    ccb->ccb_h.func_code);
918 	mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
919 
920 	switch (ccb->ccb_h.func_code) {
921 	case XPT_PATH_INQ:
922 	{
923 		struct ccb_pathinq *cpi = &ccb->cpi;
924 		struct mps_softc *sc = sassc->sc;
925 
926 		cpi->version_num = 1;
927 		cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
928 		cpi->target_sprt = 0;
929 		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN;
930 		cpi->hba_eng_cnt = 0;
931 		cpi->max_target = sassc->maxtargets - 1;
932 		cpi->max_lun = 255;
933 
934 		/*
935 		 * initiator_id is set here to an ID outside the set of valid
936 		 * target IDs (including volumes).
937 		 */
938 		cpi->initiator_id = sassc->maxtargets;
939 		strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
940 		strlcpy(cpi->hba_vid, "Avago Tech", HBA_IDLEN);
941 		strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
942 		cpi->unit_number = cam_sim_unit(sim);
943 		cpi->bus_id = cam_sim_bus(sim);
944 		cpi->base_transfer_speed = 150000;
945 		cpi->transport = XPORT_SAS;
946 		cpi->transport_version = 0;
947 		cpi->protocol = PROTO_SCSI;
948 		cpi->protocol_version = SCSI_REV_SPC;
949 		cpi->maxio = sc->maxio;
950 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
951 		break;
952 	}
953 	case XPT_GET_TRAN_SETTINGS:
954 	{
955 		struct ccb_trans_settings	*cts;
956 		struct ccb_trans_settings_sas	*sas;
957 		struct ccb_trans_settings_scsi	*scsi;
958 		struct mpssas_target *targ;
959 
960 		cts = &ccb->cts;
961 		sas = &cts->xport_specific.sas;
962 		scsi = &cts->proto_specific.scsi;
963 
964 		KASSERT(cts->ccb_h.target_id < sassc->maxtargets,
965 		    ("Target %d out of bounds in XPT_GET_TRANS_SETTINGS\n",
966 		    cts->ccb_h.target_id));
967 		targ = &sassc->targets[cts->ccb_h.target_id];
968 		if (targ->handle == 0x0) {
969 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
970 			break;
971 		}
972 
973 		cts->protocol_version = SCSI_REV_SPC2;
974 		cts->transport = XPORT_SAS;
975 		cts->transport_version = 0;
976 
977 		sas->valid = CTS_SAS_VALID_SPEED;
978 		switch (targ->linkrate) {
979 		case 0x08:
980 			sas->bitrate = 150000;
981 			break;
982 		case 0x09:
983 			sas->bitrate = 300000;
984 			break;
985 		case 0x0a:
986 			sas->bitrate = 600000;
987 			break;
988 		default:
989 			sas->valid = 0;
990 		}
991 
992 		cts->protocol = PROTO_SCSI;
993 		scsi->valid = CTS_SCSI_VALID_TQ;
994 		scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
995 
996 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
997 		break;
998 	}
999 	case XPT_CALC_GEOMETRY:
1000 		cam_calc_geometry(&ccb->ccg, /*extended*/1);
1001 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1002 		break;
1003 	case XPT_RESET_DEV:
1004 		mps_dprint(sassc->sc, MPS_XINFO, "mpssas_action XPT_RESET_DEV\n");
1005 		mpssas_action_resetdev(sassc, ccb);
1006 		return;
1007 	case XPT_RESET_BUS:
1008 	case XPT_ABORT:
1009 	case XPT_TERM_IO:
1010 		mps_dprint(sassc->sc, MPS_XINFO,
1011 		    "mpssas_action faking success for abort or reset\n");
1012 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1013 		break;
1014 	case XPT_SCSI_IO:
1015 		mpssas_action_scsiio(sassc, ccb);
1016 		return;
1017 	case XPT_SMP_IO:
1018 		mpssas_action_smpio(sassc, ccb);
1019 		return;
1020 	default:
1021 		mpssas_set_ccbstatus(ccb, CAM_FUNC_NOTAVAIL);
1022 		break;
1023 	}
1024 	xpt_done(ccb);
1025 
1026 }
1027 
1028 static void
1029 mpssas_announce_reset(struct mps_softc *sc, uint32_t ac_code,
1030     target_id_t target_id, lun_id_t lun_id)
1031 {
1032 	path_id_t path_id = cam_sim_path(sc->sassc->sim);
1033 	struct cam_path *path;
1034 
1035 	mps_dprint(sc, MPS_XINFO, "%s code %x target %d lun %jx\n", __func__,
1036 	    ac_code, target_id, (uintmax_t)lun_id);
1037 
1038 	if (xpt_create_path(&path, NULL,
1039 		path_id, target_id, lun_id) != CAM_REQ_CMP) {
1040 		mps_dprint(sc, MPS_ERROR, "unable to create path for reset "
1041 			   "notification\n");
1042 		return;
1043 	}
1044 
1045 	xpt_async(ac_code, path, NULL);
1046 	xpt_free_path(path);
1047 }
1048 
1049 static void
1050 mpssas_complete_all_commands(struct mps_softc *sc)
1051 {
1052 	struct mps_command *cm;
1053 	int i;
1054 	int completed;
1055 
1056 	MPS_FUNCTRACE(sc);
1057 	mtx_assert(&sc->mps_mtx, MA_OWNED);
1058 
1059 	/* complete all commands with a NULL reply */
1060 	for (i = 1; i < sc->num_reqs; i++) {
1061 		cm = &sc->commands[i];
1062 		if (cm->cm_state == MPS_CM_STATE_FREE)
1063 			continue;
1064 
1065 		cm->cm_state = MPS_CM_STATE_BUSY;
1066 		cm->cm_reply = NULL;
1067 		completed = 0;
1068 
1069 		if (cm->cm_flags & MPS_CM_FLAGS_SATA_ID_TIMEOUT) {
1070 			MPASS(cm->cm_data);
1071 			free(cm->cm_data, M_MPT2);
1072 			cm->cm_data = NULL;
1073 		}
1074 
1075 		if (cm->cm_flags & MPS_CM_FLAGS_POLLED)
1076 			cm->cm_flags |= MPS_CM_FLAGS_COMPLETE;
1077 
1078 		if (cm->cm_complete != NULL) {
1079 			mpssas_log_command(cm, MPS_RECOVERY,
1080 			    "completing cm %p state %x ccb %p for diag reset\n",
1081 			    cm, cm->cm_state, cm->cm_ccb);
1082 
1083 			cm->cm_complete(sc, cm);
1084 			completed = 1;
1085 		} else if (cm->cm_flags & MPS_CM_FLAGS_WAKEUP) {
1086 			mpssas_log_command(cm, MPS_RECOVERY,
1087 			    "waking up cm %p state %x ccb %p for diag reset\n",
1088 			    cm, cm->cm_state, cm->cm_ccb);
1089 			wakeup(cm);
1090 			completed = 1;
1091 		}
1092 
1093 		if ((completed == 0) && (cm->cm_state != MPS_CM_STATE_FREE)) {
1094 			/* this should never happen, but if it does, log */
1095 			mpssas_log_command(cm, MPS_RECOVERY,
1096 			    "cm %p state %x flags 0x%x ccb %p during diag "
1097 			    "reset\n", cm, cm->cm_state, cm->cm_flags,
1098 			    cm->cm_ccb);
1099 		}
1100 	}
1101 
1102 	sc->io_cmds_active = 0;
1103 }
1104 
1105 void
1106 mpssas_handle_reinit(struct mps_softc *sc)
1107 {
1108 	int i;
1109 
1110 	/* Go back into startup mode and freeze the simq, so that CAM
1111 	 * doesn't send any commands until after we've rediscovered all
1112 	 * targets and found the proper device handles for them.
1113 	 *
1114 	 * After the reset, portenable will trigger discovery, and after all
1115 	 * discovery-related activities have finished, the simq will be
1116 	 * released.
1117 	 */
1118 	mps_dprint(sc, MPS_INIT, "%s startup\n", __func__);
1119 	sc->sassc->flags |= MPSSAS_IN_STARTUP;
1120 	sc->sassc->flags |= MPSSAS_IN_DISCOVERY;
1121 	mpssas_startup_increment(sc->sassc);
1122 
1123 	/* notify CAM of a bus reset */
1124 	mpssas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1125 	    CAM_LUN_WILDCARD);
1126 
1127 	/* complete and cleanup after all outstanding commands */
1128 	mpssas_complete_all_commands(sc);
1129 
1130 	mps_dprint(sc, MPS_INIT,
1131 	    "%s startup %u after command completion\n", __func__,
1132 	    sc->sassc->startup_refcount);
1133 
1134 	/* zero all the target handles, since they may change after the
1135 	 * reset, and we have to rediscover all the targets and use the new
1136 	 * handles.
1137 	 */
1138 	for (i = 0; i < sc->sassc->maxtargets; i++) {
1139 		if (sc->sassc->targets[i].outstanding != 0)
1140 			mps_dprint(sc, MPS_INIT, "target %u outstanding %u\n",
1141 			    i, sc->sassc->targets[i].outstanding);
1142 		sc->sassc->targets[i].handle = 0x0;
1143 		sc->sassc->targets[i].exp_dev_handle = 0x0;
1144 		sc->sassc->targets[i].outstanding = 0;
1145 		sc->sassc->targets[i].flags = MPSSAS_TARGET_INDIAGRESET;
1146 	}
1147 }
1148 
1149 static void
1150 mpssas_tm_timeout(void *data)
1151 {
1152 	struct mps_command *tm = data;
1153 	struct mps_softc *sc = tm->cm_sc;
1154 
1155 	mtx_assert(&sc->mps_mtx, MA_OWNED);
1156 
1157 	mpssas_log_command(tm, MPS_INFO|MPS_RECOVERY,
1158 	    "task mgmt %p timed out\n", tm);
1159 
1160 	KASSERT(tm->cm_state == MPS_CM_STATE_INQUEUE,
1161 	    ("command not inqueue, state = %u\n", tm->cm_state));
1162 
1163 	tm->cm_state = MPS_CM_STATE_BUSY;
1164 	mps_reinit(sc);
1165 }
1166 
1167 static void
1168 mpssas_logical_unit_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1169 {
1170 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1171 	unsigned int cm_count = 0;
1172 	struct mps_command *cm;
1173 	struct mpssas_target *targ;
1174 
1175 	callout_stop(&tm->cm_callout);
1176 
1177 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1178 	targ = tm->cm_targ;
1179 
1180 	/*
1181 	 * Currently there should be no way we can hit this case.  It only
1182 	 * happens when we have a failure to allocate chain frames, and
1183 	 * task management commands don't have S/G lists.
1184 	 * XXXSL So should it be an assertion?
1185 	 */
1186 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1187 		mps_dprint(sc, MPS_RECOVERY|MPS_ERROR,
1188 		    "%s: cm_flags = %#x for LUN reset! "
1189 		   "This should not happen!\n", __func__, tm->cm_flags);
1190 		mpssas_free_tm(sc, tm);
1191 		return;
1192 	}
1193 
1194 	if (reply == NULL) {
1195 		mps_dprint(sc, MPS_RECOVERY, "NULL reset reply for tm %p\n",
1196 		    tm);
1197 		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1198 			/* this completion was due to a reset, just cleanup */
1199 			mps_dprint(sc, MPS_RECOVERY, "Hardware undergoing "
1200 			    "reset, ignoring NULL LUN reset reply\n");
1201 			targ->tm = NULL;
1202 			mpssas_free_tm(sc, tm);
1203 		}
1204 		else {
1205 			/* we should have gotten a reply. */
1206 			mps_dprint(sc, MPS_INFO|MPS_RECOVERY, "NULL reply on "
1207 			    "LUN reset attempt, resetting controller\n");
1208 			mps_reinit(sc);
1209 		}
1210 		return;
1211 	}
1212 
1213 	mps_dprint(sc, MPS_RECOVERY,
1214 	    "logical unit reset status 0x%x code 0x%x count %u\n",
1215 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1216 	    le32toh(reply->TerminationCount));
1217 
1218 	/*
1219 	 * See if there are any outstanding commands for this LUN.
1220 	 * This could be made more efficient by using a per-LU data
1221 	 * structure of some sort.
1222 	 */
1223 	TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1224 		if (cm->cm_lun == tm->cm_lun)
1225 			cm_count++;
1226 	}
1227 
1228 	if (cm_count == 0) {
1229 		mps_dprint(sc, MPS_RECOVERY|MPS_INFO,
1230 		    "Finished recovery after LUN reset for target %u\n",
1231 		    targ->tid);
1232 
1233 		mpssas_announce_reset(sc, AC_SENT_BDR, targ->tid, tm->cm_lun);
1234 
1235 		/*
1236 		 * We've finished recovery for this logical unit.  check and
1237 		 * see if some other logical unit has a timedout command
1238 		 * that needs to be processed.
1239 		 */
1240 		cm = TAILQ_FIRST(&targ->timedout_commands);
1241 		if (cm) {
1242 			mps_dprint(sc, MPS_INFO|MPS_RECOVERY,
1243 			    "More commands to abort for target %u\n",
1244 			    targ->tid);
1245 			mpssas_send_abort(sc, tm, cm);
1246 		} else {
1247 			targ->tm = NULL;
1248 			mpssas_free_tm(sc, tm);
1249 		}
1250 	} else {
1251 		/*
1252 		 * If we still have commands for this LUN, the reset
1253 		 * effectively failed, regardless of the status reported.
1254 		 * Escalate to a target reset.
1255 		 */
1256 		mps_dprint(sc, MPS_INFO|MPS_RECOVERY,
1257 		    "logical unit reset complete for target %u, but still "
1258 		    "have %u command(s), sending target reset\n", targ->tid,
1259 		    cm_count);
1260 		mpssas_send_reset(sc, tm,
1261 		    MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1262 	}
1263 }
1264 
1265 static void
1266 mpssas_target_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1267 {
1268 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1269 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1270 	struct mpssas_target *targ;
1271 
1272 	callout_stop(&tm->cm_callout);
1273 
1274 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1275 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1276 	targ = tm->cm_targ;
1277 
1278 	/*
1279 	 * Currently there should be no way we can hit this case.  It only
1280 	 * happens when we have a failure to allocate chain frames, and
1281 	 * task management commands don't have S/G lists.
1282 	 */
1283 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1284 		mps_dprint(sc, MPS_ERROR,"%s: cm_flags = %#x for target reset! "
1285 			   "This should not happen!\n", __func__, tm->cm_flags);
1286 		mpssas_free_tm(sc, tm);
1287 		return;
1288 	}
1289 
1290 	if (reply == NULL) {
1291 		mps_dprint(sc, MPS_RECOVERY,
1292 		    "NULL target reset reply for tm %pi TaskMID %u\n",
1293 		    tm, le16toh(req->TaskMID));
1294 		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1295 			/* this completion was due to a reset, just cleanup */
1296 			mps_dprint(sc, MPS_RECOVERY, "Hardware undergoing "
1297 			    "reset, ignoring NULL target reset reply\n");
1298 			targ->tm = NULL;
1299 			mpssas_free_tm(sc, tm);
1300 		} else {
1301 			/* we should have gotten a reply. */
1302 			mps_dprint(sc, MPS_INFO|MPS_RECOVERY, "NULL reply on "
1303 			    "target reset attempt, resetting controller\n");
1304 			mps_reinit(sc);
1305 		}
1306 		return;
1307 	}
1308 
1309 	mps_dprint(sc, MPS_RECOVERY,
1310 	    "target reset status 0x%x code 0x%x count %u\n",
1311 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1312 	    le32toh(reply->TerminationCount));
1313 
1314 	if (targ->outstanding == 0) {
1315 		/* we've finished recovery for this target and all
1316 		 * of its logical units.
1317 		 */
1318 		mps_dprint(sc, MPS_RECOVERY|MPS_INFO,
1319 		    "Finished reset recovery for target %u\n", targ->tid);
1320 
1321 		mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1322 		    CAM_LUN_WILDCARD);
1323 
1324 		targ->tm = NULL;
1325 		mpssas_free_tm(sc, tm);
1326 	} else {
1327 		/*
1328 		 * After a target reset, if this target still has
1329 		 * outstanding commands, the reset effectively failed,
1330 		 * regardless of the status reported.  escalate.
1331 		 */
1332 		mps_dprint(sc, MPS_INFO|MPS_RECOVERY,
1333 		    "Target reset complete for target %u, but still have %u "
1334 		    "command(s), resetting controller\n", targ->tid,
1335 		    targ->outstanding);
1336 		mps_reinit(sc);
1337 	}
1338 }
1339 
1340 #define MPS_RESET_TIMEOUT 30
1341 
1342 int
1343 mpssas_send_reset(struct mps_softc *sc, struct mps_command *tm, uint8_t type)
1344 {
1345 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1346 	struct mpssas_target *target;
1347 	int err;
1348 
1349 	target = tm->cm_targ;
1350 	if (target->handle == 0) {
1351 		mps_dprint(sc, MPS_ERROR,"%s null devhandle for target_id %d\n",
1352 		    __func__, target->tid);
1353 		return -1;
1354 	}
1355 
1356 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1357 	req->DevHandle = htole16(target->handle);
1358 	req->TaskType = type;
1359 
1360 	if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1361 		/* XXX Need to handle invalid LUNs */
1362 		MPS_SET_LUN(req->LUN, tm->cm_lun);
1363 		tm->cm_targ->logical_unit_resets++;
1364 		mps_dprint(sc, MPS_RECOVERY|MPS_INFO,
1365 		    "Sending logical unit reset to target %u lun %d\n",
1366 		    target->tid, tm->cm_lun);
1367 		tm->cm_complete = mpssas_logical_unit_reset_complete;
1368 		mpssas_prepare_for_tm(sc, tm, target, tm->cm_lun);
1369 	} else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1370 		/*
1371 		 * Target reset method =
1372 		 * 	SAS Hard Link Reset / SATA Link Reset
1373 		 */
1374 		req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1375 		tm->cm_targ->target_resets++;
1376 		mps_dprint(sc, MPS_RECOVERY|MPS_INFO,
1377 		    "Sending target reset to target %u\n", target->tid);
1378 		tm->cm_complete = mpssas_target_reset_complete;
1379 		mpssas_prepare_for_tm(sc, tm, target, CAM_LUN_WILDCARD);
1380 	} else {
1381 		mps_dprint(sc, MPS_ERROR, "unexpected reset type 0x%x\n", type);
1382 		return -1;
1383 	}
1384 
1385 	tm->cm_data = NULL;
1386 	tm->cm_complete_data = (void *)tm;
1387 
1388 	callout_reset(&tm->cm_callout, MPS_RESET_TIMEOUT * hz,
1389 	    mpssas_tm_timeout, tm);
1390 
1391 	err = mps_map_command(sc, tm);
1392 	if (err)
1393 		mps_dprint(sc, MPS_ERROR|MPS_RECOVERY,
1394 		    "error %d sending reset type %u\n",
1395 		    err, type);
1396 
1397 	return err;
1398 }
1399 
1400 static void
1401 mpssas_abort_complete(struct mps_softc *sc, struct mps_command *tm)
1402 {
1403 	struct mps_command *cm;
1404 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1405 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1406 	struct mpssas_target *targ;
1407 
1408 	callout_stop(&tm->cm_callout);
1409 
1410 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1411 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1412 	targ = tm->cm_targ;
1413 
1414 	/*
1415 	 * Currently there should be no way we can hit this case.  It only
1416 	 * happens when we have a failure to allocate chain frames, and
1417 	 * task management commands don't have S/G lists.
1418 	 */
1419 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1420 		mps_dprint(sc, MPS_RECOVERY,
1421 		    "cm_flags = %#x for abort %p TaskMID %u!\n",
1422 		    tm->cm_flags, tm, le16toh(req->TaskMID));
1423 		mpssas_free_tm(sc, tm);
1424 		return;
1425 	}
1426 
1427 	if (reply == NULL) {
1428 		mps_dprint(sc, MPS_RECOVERY,
1429 		    "NULL abort reply for tm %p TaskMID %u\n",
1430 		    tm, le16toh(req->TaskMID));
1431 		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1432 			/* this completion was due to a reset, just cleanup */
1433 			mps_dprint(sc, MPS_RECOVERY, "Hardware undergoing "
1434 			    "reset, ignoring NULL abort reply\n");
1435 			targ->tm = NULL;
1436 			mpssas_free_tm(sc, tm);
1437 		} else {
1438 			/* we should have gotten a reply. */
1439 			mps_dprint(sc, MPS_INFO|MPS_RECOVERY, "NULL reply on "
1440 			    "abort attempt, resetting controller\n");
1441 			mps_reinit(sc);
1442 		}
1443 		return;
1444 	}
1445 
1446 	mps_dprint(sc, MPS_RECOVERY,
1447 	    "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1448 	    le16toh(req->TaskMID),
1449 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1450 	    le32toh(reply->TerminationCount));
1451 
1452 	cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1453 	if (cm == NULL) {
1454 		/*
1455 		 * If there are no more timedout commands, we're done with
1456 		 * error recovery for this target.
1457 		 */
1458 		mps_dprint(sc, MPS_INFO|MPS_RECOVERY,
1459 		    "Finished abort recovery for target %u\n", targ->tid);
1460 
1461 		targ->tm = NULL;
1462 		mpssas_free_tm(sc, tm);
1463 	} else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) {
1464 		/* abort success, but we have more timedout commands to abort */
1465 		mps_dprint(sc, MPS_INFO|MPS_RECOVERY,
1466 		    "Continuing abort recovery for target %u\n", targ->tid);
1467 
1468 		mpssas_send_abort(sc, tm, cm);
1469 	} else {
1470 		/* we didn't get a command completion, so the abort
1471 		 * failed as far as we're concerned.  escalate.
1472 		 */
1473 		mps_dprint(sc, MPS_RECOVERY,
1474 		    "Abort failed for target %u, sending logical unit reset\n",
1475 		    targ->tid);
1476 
1477 		mpssas_send_reset(sc, tm,
1478 		    MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1479 	}
1480 }
1481 
1482 #define MPS_ABORT_TIMEOUT 5
1483 
1484 static int
1485 mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm)
1486 {
1487 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1488 	struct mpssas_target *targ;
1489 	int err;
1490 
1491 	targ = cm->cm_targ;
1492 	if (targ->handle == 0) {
1493 		mps_dprint(sc, MPS_ERROR|MPS_RECOVERY,
1494 		    "%s null devhandle for target_id %d\n",
1495 		    __func__, cm->cm_ccb->ccb_h.target_id);
1496 		return -1;
1497 	}
1498 
1499 	mpssas_log_command(cm, MPS_RECOVERY|MPS_INFO,
1500 	    "Aborting command %p\n", cm);
1501 
1502 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1503 	req->DevHandle = htole16(targ->handle);
1504 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1505 
1506 	/* XXX Need to handle invalid LUNs */
1507 	MPS_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1508 
1509 	req->TaskMID = htole16(cm->cm_desc.Default.SMID);
1510 
1511 	tm->cm_data = NULL;
1512 	tm->cm_complete = mpssas_abort_complete;
1513 	tm->cm_complete_data = (void *)tm;
1514 	tm->cm_targ = cm->cm_targ;
1515 	tm->cm_lun = cm->cm_lun;
1516 
1517 	callout_reset(&tm->cm_callout, MPS_ABORT_TIMEOUT * hz,
1518 	    mpssas_tm_timeout, tm);
1519 
1520 	targ->aborts++;
1521 
1522 	mpssas_prepare_for_tm(sc, tm, targ, tm->cm_lun);
1523 
1524 	err = mps_map_command(sc, tm);
1525 	if (err)
1526 		mps_dprint(sc, MPS_ERROR|MPS_RECOVERY,
1527 		    "error %d sending abort for cm %p SMID %u\n",
1528 		    err, cm, req->TaskMID);
1529 	return err;
1530 }
1531 
1532 static void
1533 mpssas_scsiio_timeout(void *data)
1534 {
1535 	sbintime_t elapsed, now;
1536 	union ccb *ccb;
1537 	struct mps_softc *sc;
1538 	struct mps_command *cm;
1539 	struct mpssas_target *targ;
1540 
1541 	cm = (struct mps_command *)data;
1542 	sc = cm->cm_sc;
1543 	ccb = cm->cm_ccb;
1544 	now = sbinuptime();
1545 
1546 	MPS_FUNCTRACE(sc);
1547 	mtx_assert(&sc->mps_mtx, MA_OWNED);
1548 
1549 	mps_dprint(sc, MPS_XINFO|MPS_RECOVERY, "Timeout checking cm %p\n", cm);
1550 
1551 	/*
1552 	 * Run the interrupt handler to make sure it's not pending.  This
1553 	 * isn't perfect because the command could have already completed
1554 	 * and been re-used, though this is unlikely.
1555 	 */
1556 	mps_intr_locked(sc);
1557 	if (cm->cm_flags & MPS_CM_FLAGS_ON_RECOVERY) {
1558 		mpssas_log_command(cm, MPS_XINFO,
1559 		    "SCSI command %p almost timed out\n", cm);
1560 		return;
1561 	}
1562 
1563 	if (cm->cm_ccb == NULL) {
1564 		mps_dprint(sc, MPS_ERROR, "command timeout with NULL ccb\n");
1565 		return;
1566 	}
1567 
1568 	targ = cm->cm_targ;
1569 	targ->timeouts++;
1570 
1571 	elapsed = now - ccb->ccb_h.qos.sim_data;
1572 	mpssas_log_command(cm, MPS_INFO|MPS_RECOVERY,
1573 	    "Command timeout on target %u(0x%04x) %d set, %d.%d elapsed\n",
1574 	    targ->tid, targ->handle, ccb->ccb_h.timeout,
1575 	    sbintime_getsec(elapsed), elapsed & 0xffffffff);
1576 
1577 	/* XXX first, check the firmware state, to see if it's still
1578 	 * operational.  if not, do a diag reset.
1579 	 */
1580 	mpssas_set_ccbstatus(cm->cm_ccb, CAM_CMD_TIMEOUT);
1581 	cm->cm_flags |= MPS_CM_FLAGS_ON_RECOVERY | MPS_CM_FLAGS_TIMEDOUT;
1582 	TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1583 
1584 	if (targ->tm != NULL) {
1585 		/* target already in recovery, just queue up another
1586 		 * timedout command to be processed later.
1587 		 */
1588 		mps_dprint(sc, MPS_RECOVERY,
1589 		    "queued timedout cm %p for processing by tm %p\n",
1590 		    cm, targ->tm);
1591 	} else if ((targ->tm = mpssas_alloc_tm(sc)) != NULL) {
1592 		mps_dprint(sc, MPS_RECOVERY|MPS_INFO,
1593 		    "Sending abort to target %u for SMID %d\n", targ->tid,
1594 		    cm->cm_desc.Default.SMID);
1595 		mps_dprint(sc, MPS_RECOVERY, "timedout cm %p allocated tm %p\n",
1596 		    cm, targ->tm);
1597 
1598 		/* start recovery by aborting the first timedout command */
1599 		mpssas_send_abort(sc, targ->tm, cm);
1600 	} else {
1601 		/* XXX queue this target up for recovery once a TM becomes
1602 		 * available.  The firmware only has a limited number of
1603 		 * HighPriority credits for the high priority requests used
1604 		 * for task management, and we ran out.
1605 		 *
1606 		 * Isilon: don't worry about this for now, since we have
1607 		 * more credits than disks in an enclosure, and limit
1608 		 * ourselves to one TM per target for recovery.
1609 		 */
1610 		mps_dprint(sc, MPS_ERROR|MPS_RECOVERY,
1611 		    "timedout cm %p failed to allocate a tm\n", cm);
1612 	}
1613 
1614 }
1615 
1616 static void
1617 mpssas_action_scsiio(struct mpssas_softc *sassc, union ccb *ccb)
1618 {
1619 	MPI2_SCSI_IO_REQUEST *req;
1620 	struct ccb_scsiio *csio;
1621 	struct mps_softc *sc;
1622 	struct mpssas_target *targ;
1623 	struct mpssas_lun *lun;
1624 	struct mps_command *cm;
1625 	uint8_t i, lba_byte, *ref_tag_addr;
1626 	uint16_t eedp_flags;
1627 	uint32_t mpi_control;
1628 
1629 	sc = sassc->sc;
1630 	MPS_FUNCTRACE(sc);
1631 	mtx_assert(&sc->mps_mtx, MA_OWNED);
1632 
1633 	csio = &ccb->csio;
1634 	KASSERT(csio->ccb_h.target_id < sassc->maxtargets,
1635 	    ("Target %d out of bounds in XPT_SCSI_IO\n",
1636 	     csio->ccb_h.target_id));
1637 	targ = &sassc->targets[csio->ccb_h.target_id];
1638 	mps_dprint(sc, MPS_TRACE, "ccb %p target flag %x\n", ccb, targ->flags);
1639 	if (targ->handle == 0x0) {
1640 		if (targ->flags & MPSSAS_TARGET_INDIAGRESET) {
1641 			mps_dprint(sc, MPS_ERROR,
1642 			    "%s NULL handle for target %u in diag reset freezing queue\n",
1643 			    __func__, csio->ccb_h.target_id);
1644 			ccb->ccb_h.status = CAM_REQUEUE_REQ | CAM_DEV_QFRZN;
1645 			xpt_freeze_devq(ccb->ccb_h.path, 1);
1646 			xpt_done(ccb);
1647 			return;
1648 		}
1649 		mps_dprint(sc, MPS_ERROR, "%s NULL handle for target %u\n",
1650 		    __func__, csio->ccb_h.target_id);
1651 		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1652 		xpt_done(ccb);
1653 		return;
1654 	}
1655 	if (targ->flags & MPS_TARGET_FLAGS_RAID_COMPONENT) {
1656 		mps_dprint(sc, MPS_ERROR, "%s Raid component no SCSI IO "
1657 		    "supported %u\n", __func__, csio->ccb_h.target_id);
1658 		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1659 		xpt_done(ccb);
1660 		return;
1661 	}
1662 	/*
1663 	 * Sometimes, it is possible to get a command that is not "In
1664 	 * Progress" and was actually aborted by the upper layer.  Check for
1665 	 * this here and complete the command without error.
1666 	 */
1667 	if (mpssas_get_ccbstatus(ccb) != CAM_REQ_INPROG) {
1668 		mps_dprint(sc, MPS_TRACE, "%s Command is not in progress for "
1669 		    "target %u\n", __func__, csio->ccb_h.target_id);
1670 		xpt_done(ccb);
1671 		return;
1672 	}
1673 	/*
1674 	 * If devinfo is 0 this will be a volume.  In that case don't tell CAM
1675 	 * that the volume has timed out.  We want volumes to be enumerated
1676 	 * until they are deleted/removed, not just failed. In either event,
1677 	 * we're removing the target due to a firmware event telling us
1678 	 * the device is now gone (as opposed to some transient event). Since
1679 	 * we're opting to remove failed devices from the OS's view, we need
1680 	 * to propagate that status up the stack.
1681 	 */
1682 	if (targ->flags & MPSSAS_TARGET_INREMOVAL) {
1683 		if (targ->devinfo == 0)
1684 			mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1685 		else
1686 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1687 		xpt_done(ccb);
1688 		return;
1689 	}
1690 
1691 	if ((sc->mps_flags & MPS_FLAGS_SHUTDOWN) != 0) {
1692 		mps_dprint(sc, MPS_INFO, "%s shutting down\n", __func__);
1693 		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1694 		xpt_done(ccb);
1695 		return;
1696 	}
1697 
1698 	/*
1699 	 * If target has a reset in progress, the devq should be frozen.
1700 	 * Geting here we likely hit a race, so just requeue.
1701 	 */
1702 	if (targ->flags & MPSSAS_TARGET_INRESET) {
1703 		ccb->ccb_h.status = CAM_REQUEUE_REQ | CAM_DEV_QFRZN;
1704 		mps_dprint(sc, MPS_INFO, "%s: Freezing devq for target ID %d\n",
1705 		    __func__, targ->tid);
1706 		xpt_freeze_devq(ccb->ccb_h.path, 1);
1707 		xpt_done(ccb);
1708 		return;
1709 	}
1710 
1711 	cm = mps_alloc_command(sc);
1712 	if (cm == NULL || (sc->mps_flags & MPS_FLAGS_DIAGRESET)) {
1713 		if (cm != NULL) {
1714 			mps_free_command(sc, cm);
1715 		}
1716 		if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
1717 			xpt_freeze_simq(sassc->sim, 1);
1718 			sassc->flags |= MPSSAS_QUEUE_FROZEN;
1719 		}
1720 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1721 		ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1722 		xpt_done(ccb);
1723 		return;
1724 	}
1725 
1726 	req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
1727 	bzero(req, sizeof(*req));
1728 	req->DevHandle = htole16(targ->handle);
1729 	req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1730 	req->MsgFlags = 0;
1731 	req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr);
1732 	req->SenseBufferLength = MPS_SENSE_LEN;
1733 	req->SGLFlags = 0;
1734 	req->ChainOffset = 0;
1735 	req->SGLOffset0 = 24;	/* 32bit word offset to the SGL */
1736 	req->SGLOffset1= 0;
1737 	req->SGLOffset2= 0;
1738 	req->SGLOffset3= 0;
1739 	req->SkipCount = 0;
1740 	req->DataLength = htole32(csio->dxfer_len);
1741 	req->BidirectionalDataLength = 0;
1742 	req->IoFlags = htole16(csio->cdb_len);
1743 	req->EEDPFlags = 0;
1744 
1745 	/* Note: BiDirectional transfers are not supported */
1746 	switch (csio->ccb_h.flags & CAM_DIR_MASK) {
1747 	case CAM_DIR_IN:
1748 		mpi_control = MPI2_SCSIIO_CONTROL_READ;
1749 		cm->cm_flags |= MPS_CM_FLAGS_DATAIN;
1750 		break;
1751 	case CAM_DIR_OUT:
1752 		mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
1753 		cm->cm_flags |= MPS_CM_FLAGS_DATAOUT;
1754 		break;
1755 	case CAM_DIR_NONE:
1756 	default:
1757 		mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
1758 		break;
1759 	}
1760 
1761 	if (csio->cdb_len == 32)
1762                 mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
1763 	/*
1764 	 * It looks like the hardware doesn't require an explicit tag
1765 	 * number for each transaction.  SAM Task Management not supported
1766 	 * at the moment.
1767 	 */
1768 	switch (csio->tag_action) {
1769 	case MSG_HEAD_OF_Q_TAG:
1770 		mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
1771 		break;
1772 	case MSG_ORDERED_Q_TAG:
1773 		mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
1774 		break;
1775 	case MSG_ACA_TASK:
1776 		mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ;
1777 		break;
1778 	case CAM_TAG_ACTION_NONE:
1779 	case MSG_SIMPLE_Q_TAG:
1780 	default:
1781 		mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
1782 		break;
1783 	}
1784 	mpi_control |= (csio->priority << MPI2_SCSIIO_CONTROL_TASKPRI_SHIFT) &
1785 	    MPI2_SCSIIO_CONTROL_TASKPRI_MASK;
1786 	mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
1787 	req->Control = htole32(mpi_control);
1788 	if (MPS_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
1789 		mps_free_command(sc, cm);
1790 		mpssas_set_ccbstatus(ccb, CAM_LUN_INVALID);
1791 		xpt_done(ccb);
1792 		return;
1793 	}
1794 
1795 	if (csio->ccb_h.flags & CAM_CDB_POINTER)
1796 		bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
1797 	else
1798 		bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
1799 	req->IoFlags = htole16(csio->cdb_len);
1800 
1801 	/*
1802 	 * Check if EEDP is supported and enabled.  If it is then check if the
1803 	 * SCSI opcode could be using EEDP.  If so, make sure the LUN exists and
1804 	 * is formatted for EEDP support.  If all of this is true, set CDB up
1805 	 * for EEDP transfer.
1806 	 */
1807 	eedp_flags = op_code_prot[req->CDB.CDB32[0]];
1808 	if (sc->eedp_enabled && eedp_flags) {
1809 		SLIST_FOREACH(lun, &targ->luns, lun_link) {
1810 			if (lun->lun_id == csio->ccb_h.target_lun) {
1811 				break;
1812 			}
1813 		}
1814 
1815 		if ((lun != NULL) && (lun->eedp_formatted)) {
1816 			req->EEDPBlockSize = htole16(lun->eedp_block_size);
1817 			eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
1818 			    MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
1819 			    MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
1820 			req->EEDPFlags = htole16(eedp_flags);
1821 
1822 			/*
1823 			 * If CDB less than 32, fill in Primary Ref Tag with
1824 			 * low 4 bytes of LBA.  If CDB is 32, tag stuff is
1825 			 * already there.  Also, set protection bit.  FreeBSD
1826 			 * currently does not support CDBs bigger than 16, but
1827 			 * the code doesn't hurt, and will be here for the
1828 			 * future.
1829 			 */
1830 			if (csio->cdb_len != 32) {
1831 				lba_byte = (csio->cdb_len == 16) ? 6 : 2;
1832 				ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
1833 				    PrimaryReferenceTag;
1834 				for (i = 0; i < 4; i++) {
1835 					*ref_tag_addr =
1836 					    req->CDB.CDB32[lba_byte + i];
1837 					ref_tag_addr++;
1838 				}
1839 				req->CDB.EEDP32.PrimaryReferenceTag =
1840 					htole32(req->CDB.EEDP32.PrimaryReferenceTag);
1841 				req->CDB.EEDP32.PrimaryApplicationTagMask =
1842 				    0xFFFF;
1843 				req->CDB.CDB32[1] = (req->CDB.CDB32[1] & 0x1F) |
1844 				    0x20;
1845 			} else {
1846 				eedp_flags |=
1847 				    MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
1848 				req->EEDPFlags = htole16(eedp_flags);
1849 				req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
1850 				    0x1F) | 0x20;
1851 			}
1852 		}
1853 	}
1854 
1855 	cm->cm_length = csio->dxfer_len;
1856 	if (cm->cm_length != 0) {
1857 		cm->cm_data = ccb;
1858 		cm->cm_flags |= MPS_CM_FLAGS_USE_CCB;
1859 	} else {
1860 		cm->cm_data = NULL;
1861 	}
1862 	cm->cm_sge = &req->SGL;
1863 	cm->cm_sglsize = (32 - 24) * 4;
1864 	cm->cm_desc.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
1865 	cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle);
1866 	cm->cm_complete = mpssas_scsiio_complete;
1867 	cm->cm_complete_data = ccb;
1868 	cm->cm_targ = targ;
1869 	cm->cm_lun = csio->ccb_h.target_lun;
1870 	cm->cm_ccb = ccb;
1871 
1872 	/*
1873 	 * If HBA is a WD and the command is not for a retry, try to build a
1874 	 * direct I/O message. If failed, or the command is for a retry, send
1875 	 * the I/O to the IR volume itself.
1876 	 */
1877 	if (sc->WD_valid_config) {
1878 		if (ccb->ccb_h.sim_priv.entries[0].field == MPS_WD_RETRY) {
1879 			mpssas_direct_drive_io(sassc, cm, ccb);
1880 		} else {
1881 			mpssas_set_ccbstatus(ccb, CAM_REQ_INPROG);
1882 		}
1883 	}
1884 
1885 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
1886 	if (csio->bio != NULL)
1887 		biotrack(csio->bio, __func__);
1888 #endif
1889 	csio->ccb_h.qos.sim_data = sbinuptime();
1890 	callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
1891 	    mpssas_scsiio_timeout, cm, 0);
1892 
1893 	targ->issued++;
1894 	targ->outstanding++;
1895 	TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1896 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
1897 
1898 	mpssas_log_command(cm, MPS_XINFO, "%s cm %p ccb %p outstanding %u\n",
1899 	    __func__, cm, ccb, targ->outstanding);
1900 
1901 	mps_map_command(sc, cm);
1902 	return;
1903 }
1904 
1905 /**
1906  * mps_sc_failed_io_info - translated non-succesfull SCSI_IO request
1907  */
1908 static void
1909 mps_sc_failed_io_info(struct mps_softc *sc, struct ccb_scsiio *csio,
1910     Mpi2SCSIIOReply_t *mpi_reply)
1911 {
1912 	u32 response_info;
1913 	u8 *response_bytes;
1914 	u16 ioc_status = le16toh(mpi_reply->IOCStatus) &
1915 	    MPI2_IOCSTATUS_MASK;
1916 	u8 scsi_state = mpi_reply->SCSIState;
1917 	u8 scsi_status = mpi_reply->SCSIStatus;
1918 	u32 log_info = le32toh(mpi_reply->IOCLogInfo);
1919 	const char *desc_ioc_state, *desc_scsi_status;
1920 
1921 	if (log_info == 0x31170000)
1922 		return;
1923 
1924 	desc_ioc_state = mps_describe_table(mps_iocstatus_string,
1925 	    ioc_status);
1926 	desc_scsi_status = mps_describe_table(mps_scsi_status_string,
1927 	    scsi_status);
1928 
1929 	mps_dprint(sc, MPS_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n",
1930 	    le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status);
1931 
1932 	/*
1933 	 *We can add more detail about underflow data here
1934 	 * TO-DO
1935 	 */
1936 	mps_dprint(sc, MPS_XINFO, "\tscsi_status(%s)(0x%02x), "
1937 	    "scsi_state %b\n", desc_scsi_status, scsi_status,
1938 	    scsi_state, "\20" "\1AutosenseValid" "\2AutosenseFailed"
1939 	    "\3NoScsiStatus" "\4Terminated" "\5Response InfoValid");
1940 
1941 	if (sc->mps_debug & MPS_XINFO &&
1942 		scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
1943 		mps_dprint(sc, MPS_XINFO, "-> Sense Buffer Data : Start :\n");
1944 		scsi_sense_print(csio);
1945 		mps_dprint(sc, MPS_XINFO, "-> Sense Buffer Data : End :\n");
1946 	}
1947 
1948 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
1949 		response_info = le32toh(mpi_reply->ResponseInfo);
1950 		response_bytes = (u8 *)&response_info;
1951 		mps_dprint(sc, MPS_XINFO, "response code(0x%1x): %s\n",
1952 		    response_bytes[0],
1953 		    mps_describe_table(mps_scsi_taskmgmt_string,
1954 		    response_bytes[0]));
1955 	}
1956 }
1957 
1958 static void
1959 mpssas_scsiio_complete(struct mps_softc *sc, struct mps_command *cm)
1960 {
1961 	MPI2_SCSI_IO_REPLY *rep;
1962 	union ccb *ccb;
1963 	struct ccb_scsiio *csio;
1964 	struct mpssas_softc *sassc;
1965 	struct scsi_vpd_supported_page_list *vpd_list = NULL;
1966 	u8 *TLR_bits, TLR_on;
1967 	int dir = 0, i;
1968 	u16 alloc_len;
1969 	struct mpssas_target *target;
1970 	target_id_t target_id;
1971 
1972 	MPS_FUNCTRACE(sc);
1973 	mps_dprint(sc, MPS_TRACE,
1974 	    "cm %p SMID %u ccb %p reply %p outstanding %u\n", cm,
1975 	    cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
1976 	    cm->cm_targ->outstanding);
1977 
1978 	callout_stop(&cm->cm_callout);
1979 	mtx_assert(&sc->mps_mtx, MA_OWNED);
1980 
1981 	sassc = sc->sassc;
1982 	ccb = cm->cm_complete_data;
1983 	csio = &ccb->csio;
1984 	target_id = csio->ccb_h.target_id;
1985 	rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
1986 	/*
1987 	 * XXX KDM if the chain allocation fails, does it matter if we do
1988 	 * the sync and unload here?  It is simpler to do it in every case,
1989 	 * assuming it doesn't cause problems.
1990 	 */
1991 	if (cm->cm_data != NULL) {
1992 		if (cm->cm_flags & MPS_CM_FLAGS_DATAIN)
1993 			dir = BUS_DMASYNC_POSTREAD;
1994 		else if (cm->cm_flags & MPS_CM_FLAGS_DATAOUT)
1995 			dir = BUS_DMASYNC_POSTWRITE;
1996 		bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
1997 		bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
1998 	}
1999 
2000 	cm->cm_targ->completed++;
2001 	cm->cm_targ->outstanding--;
2002 	TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
2003 	ccb->ccb_h.status &= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED);
2004 
2005 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
2006 	if (ccb->csio.bio != NULL)
2007 		biotrack(ccb->csio.bio, __func__);
2008 #endif
2009 
2010 	if (cm->cm_flags & MPS_CM_FLAGS_ON_RECOVERY) {
2011 		TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
2012 		KASSERT(cm->cm_state == MPS_CM_STATE_BUSY,
2013 		    ("Not busy for CM_FLAGS_TIMEDOUT: %u\n", cm->cm_state));
2014 		cm->cm_flags &= ~MPS_CM_FLAGS_ON_RECOVERY;
2015 		if (cm->cm_reply != NULL)
2016 			mpssas_log_command(cm, MPS_RECOVERY,
2017 			    "completed timedout cm %p ccb %p during recovery "
2018 			    "ioc %x scsi %x state %x xfer %u\n",
2019 			    cm, cm->cm_ccb, le16toh(rep->IOCStatus),
2020 			    rep->SCSIStatus, rep->SCSIState,
2021 			    le32toh(rep->TransferCount));
2022 		else
2023 			mpssas_log_command(cm, MPS_RECOVERY,
2024 			    "completed timedout cm %p ccb %p during recovery\n",
2025 			    cm, cm->cm_ccb);
2026 	} else if (cm->cm_targ->tm != NULL) {
2027 		if (cm->cm_reply != NULL)
2028 			mpssas_log_command(cm, MPS_RECOVERY,
2029 			    "completed cm %p ccb %p during recovery "
2030 			    "ioc %x scsi %x state %x xfer %u\n",
2031 			    cm, cm->cm_ccb, le16toh(rep->IOCStatus),
2032 			    rep->SCSIStatus, rep->SCSIState,
2033 			    le32toh(rep->TransferCount));
2034 		else
2035 			mpssas_log_command(cm, MPS_RECOVERY,
2036 			    "completed cm %p ccb %p during recovery\n",
2037 			    cm, cm->cm_ccb);
2038 	} else if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
2039 		mpssas_log_command(cm, MPS_RECOVERY,
2040 		    "reset completed cm %p ccb %p\n",
2041 		    cm, cm->cm_ccb);
2042 	}
2043 
2044 	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2045 		/*
2046 		 * We ran into an error after we tried to map the command,
2047 		 * so we're getting a callback without queueing the command
2048 		 * to the hardware.  So we set the status here, and it will
2049 		 * be retained below.  We'll go through the "fast path",
2050 		 * because there can be no reply when we haven't actually
2051 		 * gone out to the hardware.
2052 		 */
2053 		mpssas_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
2054 
2055 		/*
2056 		 * Currently the only error included in the mask is
2057 		 * MPS_CM_FLAGS_CHAIN_FAILED, which means we're out of
2058 		 * chain frames.  We need to freeze the queue until we get
2059 		 * a command that completed without this error, which will
2060 		 * hopefully have some chain frames attached that we can
2061 		 * use.  If we wanted to get smarter about it, we would
2062 		 * only unfreeze the queue in this condition when we're
2063 		 * sure that we're getting some chain frames back.  That's
2064 		 * probably unnecessary.
2065 		 */
2066 		if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
2067 			xpt_freeze_simq(sassc->sim, 1);
2068 			sassc->flags |= MPSSAS_QUEUE_FROZEN;
2069 			mps_dprint(sc, MPS_XINFO, "Error sending command, "
2070 				   "freezing SIM queue\n");
2071 		}
2072 	}
2073 
2074 	/*
2075 	 * If this is a Start Stop Unit command and it was issued by the driver
2076 	 * during shutdown, decrement the refcount to account for all of the
2077 	 * commands that were sent.  All SSU commands should be completed before
2078 	 * shutdown completes, meaning SSU_refcount will be 0 after SSU_started
2079 	 * is TRUE.
2080 	 */
2081 	if (sc->SSU_started && (csio->cdb_io.cdb_bytes[0] == START_STOP_UNIT)) {
2082 		mps_dprint(sc, MPS_INFO, "Decrementing SSU count.\n");
2083 		sc->SSU_refcount--;
2084 	}
2085 
2086 	/* Take the fast path to completion */
2087 	if (cm->cm_reply == NULL) {
2088 		if (mpssas_get_ccbstatus(ccb) == CAM_REQ_INPROG) {
2089 			if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0)
2090 				mpssas_set_ccbstatus(ccb, CAM_SCSI_BUS_RESET);
2091 			else {
2092 				mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2093 				ccb->csio.scsi_status = SCSI_STATUS_OK;
2094 			}
2095 			if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2096 				ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2097 				sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2098 				mps_dprint(sc, MPS_XINFO,
2099 				    "Unfreezing SIM queue\n");
2100 			}
2101 		}
2102 
2103 		/*
2104 		 * There are two scenarios where the status won't be
2105 		 * CAM_REQ_CMP.  The first is if MPS_CM_FLAGS_ERROR_MASK is
2106 		 * set, the second is in the MPS_FLAGS_DIAGRESET above.
2107 		 */
2108 		if (mpssas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2109 			/*
2110 			 * Freeze the dev queue so that commands are
2111 			 * executed in the correct order after error
2112 			 * recovery.
2113 			 */
2114 			ccb->ccb_h.status |= CAM_DEV_QFRZN;
2115 			xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2116 		}
2117 		mps_free_command(sc, cm);
2118 		xpt_done(ccb);
2119 		return;
2120 	}
2121 
2122 	mpssas_log_command(cm, MPS_XINFO,
2123 	    "ioc %x scsi %x state %x xfer %u\n",
2124 	    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2125 	    le32toh(rep->TransferCount));
2126 
2127 	/*
2128 	 * If this is a Direct Drive I/O, reissue the I/O to the original IR
2129 	 * Volume if an error occurred (normal I/O retry).  Use the original
2130 	 * CCB, but set a flag that this will be a retry so that it's sent to
2131 	 * the original volume.  Free the command but reuse the CCB.
2132 	 */
2133 	if (cm->cm_flags & MPS_CM_FLAGS_DD_IO) {
2134 		mps_free_command(sc, cm);
2135 		ccb->ccb_h.sim_priv.entries[0].field = MPS_WD_RETRY;
2136 		mpssas_action_scsiio(sassc, ccb);
2137 		return;
2138 	} else
2139 		ccb->ccb_h.sim_priv.entries[0].field = 0;
2140 
2141 	switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) {
2142 	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2143 		csio->resid = cm->cm_length - le32toh(rep->TransferCount);
2144 		/* FALLTHROUGH */
2145 	case MPI2_IOCSTATUS_SUCCESS:
2146 	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2147 
2148 		if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
2149 		    MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
2150 			mpssas_log_command(cm, MPS_XINFO, "recovered error\n");
2151 
2152 		/* Completion failed at the transport level. */
2153 		if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
2154 		    MPI2_SCSI_STATE_TERMINATED)) {
2155 			mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2156 			break;
2157 		}
2158 
2159 		/* In a modern packetized environment, an autosense failure
2160 		 * implies that there's not much else that can be done to
2161 		 * recover the command.
2162 		 */
2163 		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
2164 			mpssas_set_ccbstatus(ccb, CAM_AUTOSENSE_FAIL);
2165 			break;
2166 		}
2167 
2168 		/*
2169 		 * CAM doesn't care about SAS Response Info data, but if this is
2170 		 * the state check if TLR should be done.  If not, clear the
2171 		 * TLR_bits for the target.
2172 		 */
2173 		if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
2174 		    ((le32toh(rep->ResponseInfo) &
2175 		    MPI2_SCSI_RI_MASK_REASONCODE) ==
2176 		    MPS_SCSI_RI_INVALID_FRAME)) {
2177 			sc->mapping_table[target_id].TLR_bits =
2178 			    (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2179 		}
2180 
2181 		/*
2182 		 * Intentionally override the normal SCSI status reporting
2183 		 * for these two cases.  These are likely to happen in a
2184 		 * multi-initiator environment, and we want to make sure that
2185 		 * CAM retries these commands rather than fail them.
2186 		 */
2187 		if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
2188 		    (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
2189 			mpssas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2190 			break;
2191 		}
2192 
2193 		/* Handle normal status and sense */
2194 		csio->scsi_status = rep->SCSIStatus;
2195 		if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
2196 			mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2197 		else
2198 			mpssas_set_ccbstatus(ccb, CAM_SCSI_STATUS_ERROR);
2199 
2200 		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2201 			int sense_len, returned_sense_len;
2202 
2203 			returned_sense_len = min(le32toh(rep->SenseCount),
2204 			    sizeof(struct scsi_sense_data));
2205 			if (returned_sense_len < ccb->csio.sense_len)
2206 				ccb->csio.sense_resid = ccb->csio.sense_len -
2207 					returned_sense_len;
2208 			else
2209 				ccb->csio.sense_resid = 0;
2210 
2211 			sense_len = min(returned_sense_len,
2212 			    ccb->csio.sense_len - ccb->csio.sense_resid);
2213 			bzero(&ccb->csio.sense_data,
2214 			      sizeof(ccb->csio.sense_data));
2215 			bcopy(cm->cm_sense, &ccb->csio.sense_data, sense_len);
2216 			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2217 		}
2218 
2219 		/*
2220 		 * Check if this is an INQUIRY command.  If it's a VPD inquiry,
2221 		 * and it's page code 0 (Supported Page List), and there is
2222 		 * inquiry data, and this is for a sequential access device, and
2223 		 * the device is an SSP target, and TLR is supported by the
2224 		 * controller, turn the TLR_bits value ON if page 0x90 is
2225 		 * supported.
2226 		 */
2227 		if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
2228 		    (csio->cdb_io.cdb_bytes[1] & SI_EVPD) &&
2229 		    (csio->cdb_io.cdb_bytes[2] == SVPD_SUPPORTED_PAGE_LIST) &&
2230 		    ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) &&
2231 		    (csio->data_ptr != NULL) &&
2232 		    ((csio->data_ptr[0] & 0x1f) == T_SEQUENTIAL) &&
2233 		    (sc->control_TLR) &&
2234 		    (sc->mapping_table[target_id].device_info &
2235 		    MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
2236 			vpd_list = (struct scsi_vpd_supported_page_list *)
2237 			    csio->data_ptr;
2238 			TLR_bits = &sc->mapping_table[target_id].TLR_bits;
2239 			*TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2240 			TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
2241 			alloc_len = ((u16)csio->cdb_io.cdb_bytes[3] << 8) +
2242 			    csio->cdb_io.cdb_bytes[4];
2243 			alloc_len -= csio->resid;
2244 			for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
2245 				if (vpd_list->list[i] == 0x90) {
2246 					*TLR_bits = TLR_on;
2247 					break;
2248 				}
2249 			}
2250 		}
2251 
2252 		/*
2253 		 * If this is a SATA direct-access end device, mark it so that
2254 		 * a SCSI StartStopUnit command will be sent to it when the
2255 		 * driver is being shutdown.
2256 		 */
2257 		if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
2258 		    ((csio->data_ptr[0] & 0x1f) == T_DIRECT) &&
2259 		    (sc->mapping_table[target_id].device_info &
2260 		    MPI2_SAS_DEVICE_INFO_SATA_DEVICE) &&
2261 		    ((sc->mapping_table[target_id].device_info &
2262 		    MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) ==
2263 		    MPI2_SAS_DEVICE_INFO_END_DEVICE)) {
2264 			target = &sassc->targets[target_id];
2265 			target->supports_SSU = TRUE;
2266 			mps_dprint(sc, MPS_XINFO, "Target %d supports SSU\n",
2267 			    target_id);
2268 		}
2269 		break;
2270 	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2271 	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2272 		/*
2273 		 * If devinfo is 0 this will be a volume.  In that case don't
2274 		 * tell CAM that the volume is not there.  We want volumes to
2275 		 * be enumerated until they are deleted/removed, not just
2276 		 * failed.
2277 		 */
2278 		if (cm->cm_targ->devinfo == 0)
2279 			mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2280 		else
2281 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2282 		break;
2283 	case MPI2_IOCSTATUS_INVALID_SGL:
2284 		mps_print_scsiio_cmd(sc, cm);
2285 		mpssas_set_ccbstatus(ccb, CAM_UNREC_HBA_ERROR);
2286 		break;
2287 	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2288 		/*
2289 		 * This is one of the responses that comes back when an I/O
2290 		 * has been aborted.  If it is because of a timeout that we
2291 		 * initiated, just set the status to CAM_CMD_TIMEOUT.
2292 		 * Otherwise set it to CAM_REQ_ABORTED.  The effect on the
2293 		 * command is the same (it gets retried, subject to the
2294 		 * retry counter), the only difference is what gets printed
2295 		 * on the console.
2296 		 */
2297 		if (cm->cm_flags & MPS_CM_FLAGS_TIMEDOUT)
2298 			mpssas_set_ccbstatus(ccb, CAM_CMD_TIMEOUT);
2299 		else
2300 			mpssas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2301 		break;
2302 	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2303 		/* resid is ignored for this condition */
2304 		csio->resid = 0;
2305 		mpssas_set_ccbstatus(ccb, CAM_DATA_RUN_ERR);
2306 		break;
2307 	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2308 	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2309 		/*
2310 		 * These can sometimes be transient transport-related
2311 		 * errors, and sometimes persistent drive-related errors.
2312 		 * We used to retry these without decrementing the retry
2313 		 * count by returning CAM_REQUEUE_REQ.  Unfortunately, if
2314 		 * we hit a persistent drive problem that returns one of
2315 		 * these error codes, we would retry indefinitely.  So,
2316 		 * return CAM_REQ_CMP_ERROR so that we decrement the retry
2317 		 * count and avoid infinite retries.  We're taking the
2318 		 * potential risk of flagging false failures in the event
2319 		 * of a topology-related error (e.g. a SAS expander problem
2320 		 * causes a command addressed to a drive to fail), but
2321 		 * avoiding getting into an infinite retry loop. However,
2322 		 * if we get them while were moving a device, we should
2323 		 * fail the request as 'not there' because the device
2324 		 * is effectively gone.
2325 		 */
2326 		if (cm->cm_targ->flags & MPSSAS_TARGET_INREMOVAL)
2327 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2328 		else
2329 			mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2330 		mps_dprint(sc, MPS_INFO,
2331 		    "Controller reported %s tgt %u SMID %u loginfo %x%s\n",
2332 		    mps_describe_table(mps_iocstatus_string,
2333 		    le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK),
2334 		    target_id, cm->cm_desc.Default.SMID,
2335 		    le32toh(rep->IOCLogInfo),
2336 		    (cm->cm_targ->flags & MPSSAS_TARGET_INREMOVAL) ? " departing" : "");
2337 		mps_dprint(sc, MPS_XINFO,
2338 		    "SCSIStatus %x SCSIState %x xfercount %u\n",
2339 		    rep->SCSIStatus, rep->SCSIState,
2340 		    le32toh(rep->TransferCount));
2341 		break;
2342 	case MPI2_IOCSTATUS_INVALID_FUNCTION:
2343 	case MPI2_IOCSTATUS_INTERNAL_ERROR:
2344 	case MPI2_IOCSTATUS_INVALID_VPID:
2345 	case MPI2_IOCSTATUS_INVALID_FIELD:
2346 	case MPI2_IOCSTATUS_INVALID_STATE:
2347 	case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2348 	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2349 	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2350 	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2351 	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2352 	default:
2353 		mpssas_log_command(cm, MPS_XINFO,
2354 		    "completed ioc %x loginfo %x scsi %x state %x xfer %u\n",
2355 		    le16toh(rep->IOCStatus), le32toh(rep->IOCLogInfo),
2356 		    rep->SCSIStatus, rep->SCSIState,
2357 		    le32toh(rep->TransferCount));
2358 		csio->resid = cm->cm_length;
2359 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2360 		break;
2361 	}
2362 
2363 	mps_sc_failed_io_info(sc,csio,rep);
2364 
2365 	if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2366 		ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2367 		sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2368 		mps_dprint(sc, MPS_XINFO, "Command completed, "
2369 		    "unfreezing SIM queue\n");
2370 	}
2371 
2372 	if (mpssas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2373 		ccb->ccb_h.status |= CAM_DEV_QFRZN;
2374 		xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2375 	}
2376 
2377 	/*
2378 	 * Check to see if we're removing the device. If so, and this is the
2379 	 * last command on the queue, proceed with the deferred removal of the
2380 	 * device.  Note, for removing a volume, this won't trigger because
2381 	 * pending_remove_tm will be NULL.
2382 	 */
2383 	if (cm->cm_targ->flags & MPSSAS_TARGET_INREMOVAL) {
2384 		if (TAILQ_FIRST(&cm->cm_targ->commands) == NULL &&
2385 		    cm->cm_targ->pending_remove_tm != NULL) {
2386 			mps_dprint(sc, MPS_INFO,
2387 			    "Last pending command complete: starting remove_device target %u handle 0x%04x\n",
2388 			    cm->cm_targ->tid, cm->cm_targ->handle);
2389 			mps_map_command(sc, cm->cm_targ->pending_remove_tm);
2390 			cm->cm_targ->pending_remove_tm = NULL;
2391 		}
2392 	}
2393 
2394 	mps_free_command(sc, cm);
2395 	xpt_done(ccb);
2396 }
2397 
2398 /* All Request reached here are Endian safe */
2399 static void
2400 mpssas_direct_drive_io(struct mpssas_softc *sassc, struct mps_command *cm,
2401     union ccb *ccb) {
2402 	pMpi2SCSIIORequest_t	pIO_req;
2403 	struct mps_softc	*sc = sassc->sc;
2404 	uint64_t		virtLBA;
2405 	uint32_t		physLBA, stripe_offset, stripe_unit;
2406 	uint32_t		io_size, column;
2407 	uint8_t			*ptrLBA, lba_idx, physLBA_byte, *CDB;
2408 
2409 	/*
2410 	 * If this is a valid SCSI command (Read6, Read10, Read16, Write6,
2411 	 * Write10, or Write16), build a direct I/O message.  Otherwise, the I/O
2412 	 * will be sent to the IR volume itself.  Since Read6 and Write6 are a
2413 	 * bit different than the 10/16 CDBs, handle them separately.
2414 	 */
2415 	pIO_req = (pMpi2SCSIIORequest_t)cm->cm_req;
2416 	CDB = pIO_req->CDB.CDB32;
2417 
2418 	/*
2419 	 * Handle 6 byte CDBs.
2420 	 */
2421 	if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_6) ||
2422 	    (CDB[0] == WRITE_6))) {
2423 		/*
2424 		 * Get the transfer size in blocks.
2425 		 */
2426 		io_size = (cm->cm_length >> sc->DD_block_exponent);
2427 
2428 		/*
2429 		 * Get virtual LBA given in the CDB.
2430 		 */
2431 		virtLBA = ((uint64_t)(CDB[1] & 0x1F) << 16) |
2432 		    ((uint64_t)CDB[2] << 8) | (uint64_t)CDB[3];
2433 
2434 		/*
2435 		 * Check that LBA range for I/O does not exceed volume's
2436 		 * MaxLBA.
2437 		 */
2438 		if ((virtLBA + (uint64_t)io_size - 1) <=
2439 		    sc->DD_max_lba) {
2440 			/*
2441 			 * Check if the I/O crosses a stripe boundary.  If not,
2442 			 * translate the virtual LBA to a physical LBA and set
2443 			 * the DevHandle for the PhysDisk to be used.  If it
2444 			 * does cross a boundary, do normal I/O.  To get the
2445 			 * right DevHandle to use, get the map number for the
2446 			 * column, then use that map number to look up the
2447 			 * DevHandle of the PhysDisk.
2448 			 */
2449 			stripe_offset = (uint32_t)virtLBA &
2450 			    (sc->DD_stripe_size - 1);
2451 			if ((stripe_offset + io_size) <= sc->DD_stripe_size) {
2452 				physLBA = (uint32_t)virtLBA >>
2453 				    sc->DD_stripe_exponent;
2454 				stripe_unit = physLBA / sc->DD_num_phys_disks;
2455 				column = physLBA % sc->DD_num_phys_disks;
2456 				pIO_req->DevHandle =
2457 				    htole16(sc->DD_column_map[column].dev_handle);
2458 				/* ???? Is this endian safe*/
2459 				cm->cm_desc.SCSIIO.DevHandle =
2460 				    pIO_req->DevHandle;
2461 
2462 				physLBA = (stripe_unit <<
2463 				    sc->DD_stripe_exponent) + stripe_offset;
2464 				ptrLBA = &pIO_req->CDB.CDB32[1];
2465 				physLBA_byte = (uint8_t)(physLBA >> 16);
2466 				*ptrLBA = physLBA_byte;
2467 				ptrLBA = &pIO_req->CDB.CDB32[2];
2468 				physLBA_byte = (uint8_t)(physLBA >> 8);
2469 				*ptrLBA = physLBA_byte;
2470 				ptrLBA = &pIO_req->CDB.CDB32[3];
2471 				physLBA_byte = (uint8_t)physLBA;
2472 				*ptrLBA = physLBA_byte;
2473 
2474 				/*
2475 				 * Set flag that Direct Drive I/O is
2476 				 * being done.
2477 				 */
2478 				cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2479 			}
2480 		}
2481 		return;
2482 	}
2483 
2484 	/*
2485 	 * Handle 10, 12 or 16 byte CDBs.
2486 	 */
2487 	if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_10) ||
2488 	    (CDB[0] == WRITE_10) || (CDB[0] == READ_16) ||
2489 	    (CDB[0] == WRITE_16) || (CDB[0] == READ_12) ||
2490 	    (CDB[0] == WRITE_12))) {
2491 		/*
2492 		 * For 16-byte CDB's, verify that the upper 4 bytes of the CDB
2493 		 * are 0.  If not, this is accessing beyond 2TB so handle it in
2494 		 * the else section.  10-byte and 12-byte CDB's are OK.
2495 		 * FreeBSD sends very rare 12 byte READ/WRITE, but driver is
2496 		 * ready to accept 12byte CDB for Direct IOs.
2497 		 */
2498 		if ((CDB[0] == READ_10 || CDB[0] == WRITE_10) ||
2499 		    (CDB[0] == READ_12 || CDB[0] == WRITE_12) ||
2500 		    !(CDB[2] | CDB[3] | CDB[4] | CDB[5])) {
2501 			/*
2502 			 * Get the transfer size in blocks.
2503 			 */
2504 			io_size = (cm->cm_length >> sc->DD_block_exponent);
2505 
2506 			/*
2507 			 * Get virtual LBA.  Point to correct lower 4 bytes of
2508 			 * LBA in the CDB depending on command.
2509 			 */
2510 			lba_idx = ((CDB[0] == READ_12) ||
2511 				(CDB[0] == WRITE_12) ||
2512 				(CDB[0] == READ_10) ||
2513 				(CDB[0] == WRITE_10))? 2 : 6;
2514 			virtLBA = ((uint64_t)CDB[lba_idx] << 24) |
2515 			    ((uint64_t)CDB[lba_idx + 1] << 16) |
2516 			    ((uint64_t)CDB[lba_idx + 2] << 8) |
2517 			    (uint64_t)CDB[lba_idx + 3];
2518 
2519 			/*
2520 			 * Check that LBA range for I/O does not exceed volume's
2521 			 * MaxLBA.
2522 			 */
2523 			if ((virtLBA + (uint64_t)io_size - 1) <=
2524 			    sc->DD_max_lba) {
2525 				/*
2526 				 * Check if the I/O crosses a stripe boundary.
2527 				 * If not, translate the virtual LBA to a
2528 				 * physical LBA and set the DevHandle for the
2529 				 * PhysDisk to be used.  If it does cross a
2530 				 * boundary, do normal I/O.  To get the right
2531 				 * DevHandle to use, get the map number for the
2532 				 * column, then use that map number to look up
2533 				 * the DevHandle of the PhysDisk.
2534 				 */
2535 				stripe_offset = (uint32_t)virtLBA &
2536 				    (sc->DD_stripe_size - 1);
2537 				if ((stripe_offset + io_size) <=
2538 				    sc->DD_stripe_size) {
2539 					physLBA = (uint32_t)virtLBA >>
2540 					    sc->DD_stripe_exponent;
2541 					stripe_unit = physLBA /
2542 					    sc->DD_num_phys_disks;
2543 					column = physLBA %
2544 					    sc->DD_num_phys_disks;
2545 					pIO_req->DevHandle =
2546 					    htole16(sc->DD_column_map[column].
2547 					    dev_handle);
2548 					cm->cm_desc.SCSIIO.DevHandle =
2549 					    pIO_req->DevHandle;
2550 
2551 					physLBA = (stripe_unit <<
2552 					    sc->DD_stripe_exponent) +
2553 					    stripe_offset;
2554 					ptrLBA =
2555 					    &pIO_req->CDB.CDB32[lba_idx];
2556 					physLBA_byte = (uint8_t)(physLBA >> 24);
2557 					*ptrLBA = physLBA_byte;
2558 					ptrLBA =
2559 					    &pIO_req->CDB.CDB32[lba_idx + 1];
2560 					physLBA_byte = (uint8_t)(physLBA >> 16);
2561 					*ptrLBA = physLBA_byte;
2562 					ptrLBA =
2563 					    &pIO_req->CDB.CDB32[lba_idx + 2];
2564 					physLBA_byte = (uint8_t)(physLBA >> 8);
2565 					*ptrLBA = physLBA_byte;
2566 					ptrLBA =
2567 					    &pIO_req->CDB.CDB32[lba_idx + 3];
2568 					physLBA_byte = (uint8_t)physLBA;
2569 					*ptrLBA = physLBA_byte;
2570 
2571 					/*
2572 					 * Set flag that Direct Drive I/O is
2573 					 * being done.
2574 					 */
2575 					cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2576 				}
2577 			}
2578 		} else {
2579 			/*
2580 			 * 16-byte CDB and the upper 4 bytes of the CDB are not
2581 			 * 0.  Get the transfer size in blocks.
2582 			 */
2583 			io_size = (cm->cm_length >> sc->DD_block_exponent);
2584 
2585 			/*
2586 			 * Get virtual LBA.
2587 			 */
2588 			virtLBA = ((uint64_t)CDB[2] << 54) |
2589 			    ((uint64_t)CDB[3] << 48) |
2590 			    ((uint64_t)CDB[4] << 40) |
2591 			    ((uint64_t)CDB[5] << 32) |
2592 			    ((uint64_t)CDB[6] << 24) |
2593 			    ((uint64_t)CDB[7] << 16) |
2594 			    ((uint64_t)CDB[8] << 8) |
2595 			    (uint64_t)CDB[9];
2596 
2597 			/*
2598 			 * Check that LBA range for I/O does not exceed volume's
2599 			 * MaxLBA.
2600 			 */
2601 			if ((virtLBA + (uint64_t)io_size - 1) <=
2602 			    sc->DD_max_lba) {
2603 				/*
2604 				 * Check if the I/O crosses a stripe boundary.
2605 				 * If not, translate the virtual LBA to a
2606 				 * physical LBA and set the DevHandle for the
2607 				 * PhysDisk to be used.  If it does cross a
2608 				 * boundary, do normal I/O.  To get the right
2609 				 * DevHandle to use, get the map number for the
2610 				 * column, then use that map number to look up
2611 				 * the DevHandle of the PhysDisk.
2612 				 */
2613 				stripe_offset = (uint32_t)virtLBA &
2614 				    (sc->DD_stripe_size - 1);
2615 				if ((stripe_offset + io_size) <=
2616 				    sc->DD_stripe_size) {
2617 					physLBA = (uint32_t)(virtLBA >>
2618 					    sc->DD_stripe_exponent);
2619 					stripe_unit = physLBA /
2620 					    sc->DD_num_phys_disks;
2621 					column = physLBA %
2622 					    sc->DD_num_phys_disks;
2623 					pIO_req->DevHandle =
2624 					    htole16(sc->DD_column_map[column].
2625 					    dev_handle);
2626 					cm->cm_desc.SCSIIO.DevHandle =
2627 					    pIO_req->DevHandle;
2628 
2629 					physLBA = (stripe_unit <<
2630 					    sc->DD_stripe_exponent) +
2631 					    stripe_offset;
2632 
2633 					/*
2634 					 * Set upper 4 bytes of LBA to 0.  We
2635 					 * assume that the phys disks are less
2636 					 * than 2 TB's in size.  Then, set the
2637 					 * lower 4 bytes.
2638 					 */
2639 					pIO_req->CDB.CDB32[2] = 0;
2640 					pIO_req->CDB.CDB32[3] = 0;
2641 					pIO_req->CDB.CDB32[4] = 0;
2642 					pIO_req->CDB.CDB32[5] = 0;
2643 					ptrLBA = &pIO_req->CDB.CDB32[6];
2644 					physLBA_byte = (uint8_t)(physLBA >> 24);
2645 					*ptrLBA = physLBA_byte;
2646 					ptrLBA = &pIO_req->CDB.CDB32[7];
2647 					physLBA_byte = (uint8_t)(physLBA >> 16);
2648 					*ptrLBA = physLBA_byte;
2649 					ptrLBA = &pIO_req->CDB.CDB32[8];
2650 					physLBA_byte = (uint8_t)(physLBA >> 8);
2651 					*ptrLBA = physLBA_byte;
2652 					ptrLBA = &pIO_req->CDB.CDB32[9];
2653 					physLBA_byte = (uint8_t)physLBA;
2654 					*ptrLBA = physLBA_byte;
2655 
2656 					/*
2657 					 * Set flag that Direct Drive I/O is
2658 					 * being done.
2659 					 */
2660 					cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2661 				}
2662 			}
2663 		}
2664 	}
2665 }
2666 
2667 static void
2668 mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm)
2669 {
2670 	MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2671 	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2672 	uint64_t sasaddr;
2673 	union ccb *ccb;
2674 
2675 	ccb = cm->cm_complete_data;
2676 
2677 	/*
2678 	 * Currently there should be no way we can hit this case.  It only
2679 	 * happens when we have a failure to allocate chain frames, and SMP
2680 	 * commands require two S/G elements only.  That should be handled
2681 	 * in the standard request size.
2682 	 */
2683 	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2684 		mps_dprint(sc, MPS_ERROR,"%s: cm_flags = %#x on SMP request!\n",
2685 			   __func__, cm->cm_flags);
2686 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2687 		goto bailout;
2688         }
2689 
2690 	rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
2691 	if (rpl == NULL) {
2692 		mps_dprint(sc, MPS_ERROR, "%s: NULL cm_reply!\n", __func__);
2693 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2694 		goto bailout;
2695 	}
2696 
2697 	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2698 	sasaddr = le32toh(req->SASAddress.Low);
2699 	sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
2700 
2701 	if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
2702 	    MPI2_IOCSTATUS_SUCCESS ||
2703 	    rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
2704 		mps_dprint(sc, MPS_XINFO, "%s: IOCStatus %04x SASStatus %02x\n",
2705 		    __func__, le16toh(rpl->IOCStatus), rpl->SASStatus);
2706 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2707 		goto bailout;
2708 	}
2709 
2710 	mps_dprint(sc, MPS_XINFO, "%s: SMP request to SAS address "
2711 		   "%#jx completed successfully\n", __func__,
2712 		   (uintmax_t)sasaddr);
2713 
2714 	if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
2715 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2716 	else
2717 		mpssas_set_ccbstatus(ccb, CAM_SMP_STATUS_ERROR);
2718 
2719 bailout:
2720 	/*
2721 	 * We sync in both directions because we had DMAs in the S/G list
2722 	 * in both directions.
2723 	 */
2724 	bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
2725 			BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2726 	bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2727 	mps_free_command(sc, cm);
2728 	xpt_done(ccb);
2729 }
2730 
2731 static void
2732 mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb, uint64_t sasaddr)
2733 {
2734 	struct mps_command *cm;
2735 	uint8_t *request, *response;
2736 	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2737 	struct mps_softc *sc;
2738 	int error;
2739 
2740 	sc = sassc->sc;
2741 	error = 0;
2742 
2743 	/*
2744 	 * XXX We don't yet support physical addresses here.
2745 	 */
2746 	switch ((ccb->ccb_h.flags & CAM_DATA_MASK)) {
2747 	case CAM_DATA_PADDR:
2748 	case CAM_DATA_SG_PADDR:
2749 		mps_dprint(sc, MPS_ERROR,
2750 			   "%s: physical addresses not supported\n", __func__);
2751 		mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2752 		xpt_done(ccb);
2753 		return;
2754 	case CAM_DATA_SG:
2755 		/*
2756 		 * The chip does not support more than one buffer for the
2757 		 * request or response.
2758 		 */
2759 	 	if ((ccb->smpio.smp_request_sglist_cnt > 1)
2760 		  || (ccb->smpio.smp_response_sglist_cnt > 1)) {
2761 			mps_dprint(sc, MPS_ERROR,
2762 				   "%s: multiple request or response "
2763 				   "buffer segments not supported for SMP\n",
2764 				   __func__);
2765 			mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2766 			xpt_done(ccb);
2767 			return;
2768 		}
2769 
2770 		/*
2771 		 * The CAM_SCATTER_VALID flag was originally implemented
2772 		 * for the XPT_SCSI_IO CCB, which only has one data pointer.
2773 		 * We have two.  So, just take that flag to mean that we
2774 		 * might have S/G lists, and look at the S/G segment count
2775 		 * to figure out whether that is the case for each individual
2776 		 * buffer.
2777 		 */
2778 		if (ccb->smpio.smp_request_sglist_cnt != 0) {
2779 			bus_dma_segment_t *req_sg;
2780 
2781 			req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
2782 			request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
2783 		} else
2784 			request = ccb->smpio.smp_request;
2785 
2786 		if (ccb->smpio.smp_response_sglist_cnt != 0) {
2787 			bus_dma_segment_t *rsp_sg;
2788 
2789 			rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
2790 			response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
2791 		} else
2792 			response = ccb->smpio.smp_response;
2793 		break;
2794 	case CAM_DATA_VADDR:
2795 		request = ccb->smpio.smp_request;
2796 		response = ccb->smpio.smp_response;
2797 		break;
2798 	default:
2799 		mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2800 		xpt_done(ccb);
2801 		return;
2802 	}
2803 
2804 	cm = mps_alloc_command(sc);
2805 	if (cm == NULL) {
2806 		mps_dprint(sc, MPS_ERROR,
2807 		    "%s: cannot allocate command\n", __func__);
2808 		mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
2809 		xpt_done(ccb);
2810 		return;
2811 	}
2812 
2813 	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2814 	bzero(req, sizeof(*req));
2815 	req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
2816 
2817 	/* Allow the chip to use any route to this SAS address. */
2818 	req->PhysicalPort = 0xff;
2819 
2820 	req->RequestDataLength = htole16(ccb->smpio.smp_request_len);
2821 	req->SGLFlags =
2822 	    MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
2823 
2824 	mps_dprint(sc, MPS_XINFO, "%s: sending SMP request to SAS "
2825 	    "address %#jx\n", __func__, (uintmax_t)sasaddr);
2826 
2827 	mpi_init_sge(cm, req, &req->SGL);
2828 
2829 	/*
2830 	 * Set up a uio to pass into mps_map_command().  This allows us to
2831 	 * do one map command, and one busdma call in there.
2832 	 */
2833 	cm->cm_uio.uio_iov = cm->cm_iovec;
2834 	cm->cm_uio.uio_iovcnt = 2;
2835 	cm->cm_uio.uio_segflg = UIO_SYSSPACE;
2836 
2837 	/*
2838 	 * The read/write flag isn't used by busdma, but set it just in
2839 	 * case.  This isn't exactly accurate, either, since we're going in
2840 	 * both directions.
2841 	 */
2842 	cm->cm_uio.uio_rw = UIO_WRITE;
2843 
2844 	cm->cm_iovec[0].iov_base = request;
2845 	cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength);
2846 	cm->cm_iovec[1].iov_base = response;
2847 	cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
2848 
2849 	cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
2850 			       cm->cm_iovec[1].iov_len;
2851 
2852 	/*
2853 	 * Trigger a warning message in mps_data_cb() for the user if we
2854 	 * wind up exceeding two S/G segments.  The chip expects one
2855 	 * segment for the request and another for the response.
2856 	 */
2857 	cm->cm_max_segs = 2;
2858 
2859 	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
2860 	cm->cm_complete = mpssas_smpio_complete;
2861 	cm->cm_complete_data = ccb;
2862 
2863 	/*
2864 	 * Tell the mapping code that we're using a uio, and that this is
2865 	 * an SMP passthrough request.  There is a little special-case
2866 	 * logic there (in mps_data_cb()) to handle the bidirectional
2867 	 * transfer.
2868 	 */
2869 	cm->cm_flags |= MPS_CM_FLAGS_USE_UIO | MPS_CM_FLAGS_SMP_PASS |
2870 			MPS_CM_FLAGS_DATAIN | MPS_CM_FLAGS_DATAOUT;
2871 
2872 	/* The chip data format is little endian. */
2873 	req->SASAddress.High = htole32(sasaddr >> 32);
2874 	req->SASAddress.Low = htole32(sasaddr);
2875 
2876 	/*
2877 	 * XXX Note that we don't have a timeout/abort mechanism here.
2878 	 * From the manual, it looks like task management requests only
2879 	 * work for SCSI IO and SATA passthrough requests.  We may need to
2880 	 * have a mechanism to retry requests in the event of a chip reset
2881 	 * at least.  Hopefully the chip will insure that any errors short
2882 	 * of that are relayed back to the driver.
2883 	 */
2884 	error = mps_map_command(sc, cm);
2885 	if ((error != 0) && (error != EINPROGRESS)) {
2886 		mps_dprint(sc, MPS_ERROR,
2887 			   "%s: error %d returned from mps_map_command()\n",
2888 			   __func__, error);
2889 		goto bailout_error;
2890 	}
2891 
2892 	return;
2893 
2894 bailout_error:
2895 	mps_free_command(sc, cm);
2896 	mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
2897 	xpt_done(ccb);
2898 	return;
2899 
2900 }
2901 
2902 static void
2903 mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb)
2904 {
2905 	struct mps_softc *sc;
2906 	struct mpssas_target *targ;
2907 	uint64_t sasaddr = 0;
2908 
2909 	sc = sassc->sc;
2910 
2911 	/*
2912 	 * Make sure the target exists.
2913 	 */
2914 	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
2915 	    ("Target %d out of bounds in XPT_SMP_IO\n", ccb->ccb_h.target_id));
2916 	targ = &sassc->targets[ccb->ccb_h.target_id];
2917 	if (targ->handle == 0x0) {
2918 		mps_dprint(sc, MPS_ERROR,
2919 			   "%s: target %d does not exist!\n", __func__,
2920 			   ccb->ccb_h.target_id);
2921 		mpssas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
2922 		xpt_done(ccb);
2923 		return;
2924 	}
2925 
2926 	/*
2927 	 * If this device has an embedded SMP target, we'll talk to it
2928 	 * directly.
2929 	 * figure out what the expander's address is.
2930 	 */
2931 	if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
2932 		sasaddr = targ->sasaddr;
2933 
2934 	/*
2935 	 * If we don't have a SAS address for the expander yet, try
2936 	 * grabbing it from the page 0x83 information cached in the
2937 	 * transport layer for this target.  LSI expanders report the
2938 	 * expander SAS address as the port-associated SAS address in
2939 	 * Inquiry VPD page 0x83.  Maxim expanders don't report it in page
2940 	 * 0x83.
2941 	 *
2942 	 * XXX KDM disable this for now, but leave it commented out so that
2943 	 * it is obvious that this is another possible way to get the SAS
2944 	 * address.
2945 	 *
2946 	 * The parent handle method below is a little more reliable, and
2947 	 * the other benefit is that it works for devices other than SES
2948 	 * devices.  So you can send a SMP request to a da(4) device and it
2949 	 * will get routed to the expander that device is attached to.
2950 	 * (Assuming the da(4) device doesn't contain an SMP target...)
2951 	 */
2952 #if 0
2953 	if (sasaddr == 0)
2954 		sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
2955 #endif
2956 
2957 	/*
2958 	 * If we still don't have a SAS address for the expander, look for
2959 	 * the parent device of this device, which is probably the expander.
2960 	 */
2961 	if (sasaddr == 0) {
2962 #ifdef OLD_MPS_PROBE
2963 		struct mpssas_target *parent_target;
2964 #endif
2965 
2966 		if (targ->parent_handle == 0x0) {
2967 			mps_dprint(sc, MPS_ERROR,
2968 				   "%s: handle %d does not have a valid "
2969 				   "parent handle!\n", __func__, targ->handle);
2970 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2971 			goto bailout;
2972 		}
2973 #ifdef OLD_MPS_PROBE
2974 		parent_target = mpssas_find_target_by_handle(sassc, 0,
2975 			targ->parent_handle);
2976 
2977 		if (parent_target == NULL) {
2978 			mps_dprint(sc, MPS_ERROR,
2979 				   "%s: handle %d does not have a valid "
2980 				   "parent target!\n", __func__, targ->handle);
2981 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2982 			goto bailout;
2983 		}
2984 
2985 		if ((parent_target->devinfo &
2986 		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
2987 			mps_dprint(sc, MPS_ERROR,
2988 				   "%s: handle %d parent %d does not "
2989 				   "have an SMP target!\n", __func__,
2990 				   targ->handle, parent_target->handle);
2991 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2992 			goto bailout;
2993 		}
2994 
2995 		sasaddr = parent_target->sasaddr;
2996 #else /* OLD_MPS_PROBE */
2997 		if ((targ->parent_devinfo &
2998 		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
2999 			mps_dprint(sc, MPS_ERROR,
3000 				   "%s: handle %d parent %d does not "
3001 				   "have an SMP target!\n", __func__,
3002 				   targ->handle, targ->parent_handle);
3003 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3004 			goto bailout;
3005 		}
3006 		if (targ->parent_sasaddr == 0x0) {
3007 			mps_dprint(sc, MPS_ERROR,
3008 				   "%s: handle %d parent handle %d does "
3009 				   "not have a valid SAS address!\n",
3010 				   __func__, targ->handle, targ->parent_handle);
3011 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3012 			goto bailout;
3013 		}
3014 
3015 		sasaddr = targ->parent_sasaddr;
3016 #endif /* OLD_MPS_PROBE */
3017 	}
3018 
3019 	if (sasaddr == 0) {
3020 		mps_dprint(sc, MPS_INFO,
3021 			   "%s: unable to find SAS address for handle %d\n",
3022 			   __func__, targ->handle);
3023 		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3024 		goto bailout;
3025 	}
3026 	mpssas_send_smpcmd(sassc, ccb, sasaddr);
3027 
3028 	return;
3029 
3030 bailout:
3031 	xpt_done(ccb);
3032 
3033 }
3034 
3035 static void
3036 mpssas_action_resetdev(struct mpssas_softc *sassc, union ccb *ccb)
3037 {
3038 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3039 	struct mps_softc *sc;
3040 	struct mps_command *tm;
3041 	struct mpssas_target *targ;
3042 
3043 	MPS_FUNCTRACE(sassc->sc);
3044 	mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
3045 
3046 	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
3047 	    ("Target %d out of bounds in XPT_RESET_DEV\n",
3048 	     ccb->ccb_h.target_id));
3049 	sc = sassc->sc;
3050 	tm = mpssas_alloc_tm(sc);
3051 	if (tm == NULL) {
3052 		mps_dprint(sc, MPS_ERROR,
3053 		    "command alloc failure in mpssas_action_resetdev\n");
3054 		mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3055 		xpt_done(ccb);
3056 		return;
3057 	}
3058 
3059 	targ = &sassc->targets[ccb->ccb_h.target_id];
3060 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3061 	req->DevHandle = htole16(targ->handle);
3062 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3063 
3064 	/* SAS Hard Link Reset / SATA Link Reset */
3065 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3066 
3067 	tm->cm_data = NULL;
3068 	tm->cm_complete = mpssas_resetdev_complete;
3069 	tm->cm_complete_data = ccb;
3070 	tm->cm_targ = targ;
3071 
3072 	mpssas_prepare_for_tm(sc, tm, targ, CAM_LUN_WILDCARD);
3073 	mps_map_command(sc, tm);
3074 }
3075 
3076 static void
3077 mpssas_resetdev_complete(struct mps_softc *sc, struct mps_command *tm)
3078 {
3079 	MPI2_SCSI_TASK_MANAGE_REPLY *resp;
3080 	union ccb *ccb;
3081 
3082 	MPS_FUNCTRACE(sc);
3083 	mtx_assert(&sc->mps_mtx, MA_OWNED);
3084 
3085 	resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
3086 	ccb = tm->cm_complete_data;
3087 
3088 	/*
3089 	 * Currently there should be no way we can hit this case.  It only
3090 	 * happens when we have a failure to allocate chain frames, and
3091 	 * task management commands don't have S/G lists.
3092 	 */
3093 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3094 		MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3095 
3096 		req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3097 
3098 		mps_dprint(sc, MPS_ERROR,
3099 			   "%s: cm_flags = %#x for reset of handle %#04x! "
3100 			   "This should not happen!\n", __func__, tm->cm_flags,
3101 			   req->DevHandle);
3102 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3103 		goto bailout;
3104 	}
3105 
3106 	mps_dprint(sc, MPS_XINFO,
3107 	    "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n", __func__,
3108 	    le16toh(resp->IOCStatus), le32toh(resp->ResponseCode));
3109 
3110 	if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
3111 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
3112 		mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
3113 		    CAM_LUN_WILDCARD);
3114 	}
3115 	else
3116 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3117 
3118 bailout:
3119 
3120 	mpssas_free_tm(sc, tm);
3121 	xpt_done(ccb);
3122 }
3123 
3124 static void
3125 mpssas_poll(struct cam_sim *sim)
3126 {
3127 	struct mpssas_softc *sassc;
3128 
3129 	sassc = cam_sim_softc(sim);
3130 
3131 	if (sassc->sc->mps_debug & MPS_TRACE) {
3132 		/* frequent debug messages during a panic just slow
3133 		 * everything down too much.
3134 		 */
3135 		mps_printf(sassc->sc, "%s clearing MPS_TRACE\n", __func__);
3136 		sassc->sc->mps_debug &= ~MPS_TRACE;
3137 	}
3138 
3139 	mps_intr_locked(sassc->sc);
3140 }
3141 
3142 static void
3143 mpssas_async(void *callback_arg, uint32_t code, struct cam_path *path,
3144 	     void *arg)
3145 {
3146 	struct mps_softc *sc;
3147 
3148 	sc = (struct mps_softc *)callback_arg;
3149 
3150 	mps_lock(sc);
3151 	switch (code) {
3152 	case AC_ADVINFO_CHANGED: {
3153 		struct mpssas_target *target;
3154 		struct mpssas_softc *sassc;
3155 		struct scsi_read_capacity_data_long rcap_buf;
3156 		struct ccb_dev_advinfo cdai;
3157 		struct mpssas_lun *lun;
3158 		lun_id_t lunid;
3159 		int found_lun;
3160 		uintptr_t buftype;
3161 
3162 		buftype = (uintptr_t)arg;
3163 
3164 		found_lun = 0;
3165 		sassc = sc->sassc;
3166 
3167 		/*
3168 		 * We're only interested in read capacity data changes.
3169 		 */
3170 		if (buftype != CDAI_TYPE_RCAPLONG)
3171 			break;
3172 
3173 		/*
3174 		 * We should have a handle for this, but check to make sure.
3175 		 */
3176 		KASSERT(xpt_path_target_id(path) < sassc->maxtargets,
3177 		    ("Target %d out of bounds in mpssas_async\n",
3178 		    xpt_path_target_id(path)));
3179 		target = &sassc->targets[xpt_path_target_id(path)];
3180 		if (target->handle == 0)
3181 			break;
3182 
3183 		lunid = xpt_path_lun_id(path);
3184 
3185 		SLIST_FOREACH(lun, &target->luns, lun_link) {
3186 			if (lun->lun_id == lunid) {
3187 				found_lun = 1;
3188 				break;
3189 			}
3190 		}
3191 
3192 		if (found_lun == 0) {
3193 			lun = malloc(sizeof(struct mpssas_lun), M_MPT2,
3194 				     M_NOWAIT | M_ZERO);
3195 			if (lun == NULL) {
3196 				mps_dprint(sc, MPS_ERROR, "Unable to alloc "
3197 					   "LUN for EEDP support.\n");
3198 				break;
3199 			}
3200 			lun->lun_id = lunid;
3201 			SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3202 		}
3203 
3204 		bzero(&rcap_buf, sizeof(rcap_buf));
3205 		bzero(&cdai, sizeof(cdai));
3206 		xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
3207 		cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
3208 		cdai.ccb_h.flags = CAM_DIR_IN;
3209 		cdai.buftype = CDAI_TYPE_RCAPLONG;
3210 		cdai.flags = CDAI_FLAG_NONE;
3211 		cdai.bufsiz = sizeof(rcap_buf);
3212 		cdai.buf = (uint8_t *)&rcap_buf;
3213 		xpt_action((union ccb *)&cdai);
3214 		if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
3215 			cam_release_devq(cdai.ccb_h.path,
3216 					 0, 0, 0, FALSE);
3217 
3218 		if ((mpssas_get_ccbstatus((union ccb *)&cdai) == CAM_REQ_CMP)
3219 		 && (rcap_buf.prot & SRC16_PROT_EN)) {
3220 			switch (rcap_buf.prot & SRC16_P_TYPE) {
3221 			case SRC16_PTYPE_1:
3222 			case SRC16_PTYPE_3:
3223 				lun->eedp_formatted = TRUE;
3224 				lun->eedp_block_size =
3225 				    scsi_4btoul(rcap_buf.length);
3226 				break;
3227 			case SRC16_PTYPE_2:
3228 			default:
3229 				lun->eedp_formatted = FALSE;
3230 				lun->eedp_block_size = 0;
3231 				break;
3232 			}
3233 		} else {
3234 			lun->eedp_formatted = FALSE;
3235 			lun->eedp_block_size = 0;
3236 		}
3237 		break;
3238 	}
3239 	default:
3240 		break;
3241 	}
3242 	mps_unlock(sc);
3243 }
3244 
3245 /*
3246  * Freeze the devq and set the INRESET flag so that no I/O will be sent to
3247  * the target until the reset has completed.  The CCB holds the path which
3248  * is used to release the devq.  The devq is released and the CCB is freed
3249  * when the TM completes.
3250  * We only need to do this when we're entering reset, not at each time we
3251  * need to send an abort (which will happen if multiple commands timeout
3252  * while we're sending the abort). We do not release the queue for each
3253  * command we complete (just at the end when we free the tm), so freezing
3254  * it each time doesn't make sense.
3255  */
3256 void
3257 mpssas_prepare_for_tm(struct mps_softc *sc, struct mps_command *tm,
3258     struct mpssas_target *target, lun_id_t lun_id)
3259 {
3260 	union ccb *ccb;
3261 	path_id_t path_id;
3262 
3263 	ccb = xpt_alloc_ccb_nowait();
3264 	if (ccb) {
3265 		path_id = cam_sim_path(sc->sassc->sim);
3266 		if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, path_id,
3267 		    target->tid, lun_id) != CAM_REQ_CMP) {
3268 			xpt_free_ccb(ccb);
3269 		} else {
3270 			tm->cm_ccb = ccb;
3271 			tm->cm_targ = target;
3272 			if ((target->flags & MPSSAS_TARGET_INRESET) == 0) {
3273 				mps_dprint(sc, MPS_XINFO | MPS_RECOVERY,
3274 				    "%s: Freezing devq for target ID %d\n",
3275 				    __func__, target->tid);
3276 				xpt_freeze_devq(ccb->ccb_h.path, 1);
3277 				target->flags |= MPSSAS_TARGET_INRESET;
3278 			}
3279 		}
3280 	}
3281 }
3282 
3283 int
3284 mpssas_startup(struct mps_softc *sc)
3285 {
3286 
3287 	/*
3288 	 * Send the port enable message and set the wait_for_port_enable flag.
3289 	 * This flag helps to keep the simq frozen until all discovery events
3290 	 * are processed.
3291 	 */
3292 	sc->wait_for_port_enable = 1;
3293 	mpssas_send_portenable(sc);
3294 	return (0);
3295 }
3296 
3297 static int
3298 mpssas_send_portenable(struct mps_softc *sc)
3299 {
3300 	MPI2_PORT_ENABLE_REQUEST *request;
3301 	struct mps_command *cm;
3302 
3303 	MPS_FUNCTRACE(sc);
3304 
3305 	if ((cm = mps_alloc_command(sc)) == NULL)
3306 		return (EBUSY);
3307 	request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3308 	request->Function = MPI2_FUNCTION_PORT_ENABLE;
3309 	request->MsgFlags = 0;
3310 	request->VP_ID = 0;
3311 	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3312 	cm->cm_complete = mpssas_portenable_complete;
3313 	cm->cm_data = NULL;
3314 	cm->cm_sge = NULL;
3315 
3316 	mps_map_command(sc, cm);
3317 	mps_dprint(sc, MPS_XINFO,
3318 	    "mps_send_portenable finished cm %p req %p complete %p\n",
3319 	    cm, cm->cm_req, cm->cm_complete);
3320 	return (0);
3321 }
3322 
3323 static void
3324 mpssas_portenable_complete(struct mps_softc *sc, struct mps_command *cm)
3325 {
3326 	MPI2_PORT_ENABLE_REPLY *reply;
3327 	struct mpssas_softc *sassc;
3328 
3329 	MPS_FUNCTRACE(sc);
3330 	sassc = sc->sassc;
3331 
3332 	/*
3333 	 * Currently there should be no way we can hit this case.  It only
3334 	 * happens when we have a failure to allocate chain frames, and
3335 	 * port enable commands don't have S/G lists.
3336 	 */
3337 	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3338 		mps_dprint(sc, MPS_ERROR, "%s: cm_flags = %#x for port enable! "
3339 			   "This should not happen!\n", __func__, cm->cm_flags);
3340 	}
3341 
3342 	reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3343 	if (reply == NULL)
3344 		mps_dprint(sc, MPS_FAULT, "Portenable NULL reply\n");
3345 	else if (le16toh(reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
3346 	    MPI2_IOCSTATUS_SUCCESS)
3347 		mps_dprint(sc, MPS_FAULT, "Portenable failed\n");
3348 
3349 	mps_free_command(sc, cm);
3350 
3351 	/*
3352 	 * Get WarpDrive info after discovery is complete but before the scan
3353 	 * starts.  At this point, all devices are ready to be exposed to the
3354 	 * OS.  If devices should be hidden instead, take them out of the
3355 	 * 'targets' array before the scan.  The devinfo for a disk will have
3356 	 * some info and a volume's will be 0.  Use that to remove disks.
3357 	 */
3358 	mps_wd_config_pages(sc);
3359 
3360 	/*
3361 	 * Done waiting for port enable to complete.  Decrement the refcount.
3362 	 * If refcount is 0, discovery is complete and a rescan of the bus can
3363 	 * take place.  Since the simq was explicitly frozen before port
3364 	 * enable, it must be explicitly released here to keep the
3365 	 * freeze/release count in sync.
3366 	 */
3367 	sc->wait_for_port_enable = 0;
3368 	sc->port_enable_complete = 1;
3369 	wakeup(&sc->port_enable_complete);
3370 	mpssas_startup_decrement(sassc);
3371 }
3372 
3373 int
3374 mpssas_check_id(struct mpssas_softc *sassc, int id)
3375 {
3376 	struct mps_softc *sc = sassc->sc;
3377 	char *ids;
3378 	char *name;
3379 
3380 	ids = &sc->exclude_ids[0];
3381 	while((name = strsep(&ids, ",")) != NULL) {
3382 		if (name[0] == '\0')
3383 			continue;
3384 		if (strtol(name, NULL, 0) == (long)id)
3385 			return (1);
3386 	}
3387 
3388 	return (0);
3389 }
3390 
3391 void
3392 mpssas_realloc_targets(struct mps_softc *sc, int maxtargets)
3393 {
3394 	struct mpssas_softc *sassc;
3395 	struct mpssas_lun *lun, *lun_tmp;
3396 	struct mpssas_target *targ;
3397 	int i;
3398 
3399 	sassc = sc->sassc;
3400 	/*
3401 	 * The number of targets is based on IOC Facts, so free all of
3402 	 * the allocated LUNs for each target and then the target buffer
3403 	 * itself.
3404 	 */
3405 	for (i=0; i< maxtargets; i++) {
3406 		targ = &sassc->targets[i];
3407 		SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
3408 			free(lun, M_MPT2);
3409 		}
3410 	}
3411 	free(sassc->targets, M_MPT2);
3412 
3413 	sassc->targets = malloc(sizeof(struct mpssas_target) * maxtargets,
3414 	    M_MPT2, M_WAITOK|M_ZERO);
3415 }
3416