xref: /freebsd/sys/dev/mps/mps_sas.c (revision a07d59d1daafdaae0d1b1ad1f977f9eda92dc83b)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2009 Yahoo! Inc.
5  * Copyright (c) 2011-2015 LSI Corp.
6  * Copyright (c) 2013-2015 Avago Technologies
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  *
30  * Avago Technologies (LSI) MPT-Fusion Host Adapter FreeBSD
31  *
32  * $FreeBSD$
33  */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 /* Communications core for Avago Technologies (LSI) MPT2 */
39 
40 /* TODO Move headers to mpsvar */
41 #include <sys/types.h>
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
45 #include <sys/selinfo.h>
46 #include <sys/module.h>
47 #include <sys/bus.h>
48 #include <sys/conf.h>
49 #include <sys/bio.h>
50 #include <sys/malloc.h>
51 #include <sys/uio.h>
52 #include <sys/sysctl.h>
53 #include <sys/endian.h>
54 #include <sys/queue.h>
55 #include <sys/kthread.h>
56 #include <sys/taskqueue.h>
57 #include <sys/sbuf.h>
58 
59 #include <machine/bus.h>
60 #include <machine/resource.h>
61 #include <sys/rman.h>
62 
63 #include <machine/stdarg.h>
64 
65 #include <cam/cam.h>
66 #include <cam/cam_ccb.h>
67 #include <cam/cam_xpt.h>
68 #include <cam/cam_debug.h>
69 #include <cam/cam_sim.h>
70 #include <cam/cam_xpt_sim.h>
71 #include <cam/cam_xpt_periph.h>
72 #include <cam/cam_periph.h>
73 #include <cam/scsi/scsi_all.h>
74 #include <cam/scsi/scsi_message.h>
75 #if __FreeBSD_version >= 900026
76 #include <cam/scsi/smp_all.h>
77 #endif
78 
79 #include <dev/mps/mpi/mpi2_type.h>
80 #include <dev/mps/mpi/mpi2.h>
81 #include <dev/mps/mpi/mpi2_ioc.h>
82 #include <dev/mps/mpi/mpi2_sas.h>
83 #include <dev/mps/mpi/mpi2_cnfg.h>
84 #include <dev/mps/mpi/mpi2_init.h>
85 #include <dev/mps/mpi/mpi2_tool.h>
86 #include <dev/mps/mps_ioctl.h>
87 #include <dev/mps/mpsvar.h>
88 #include <dev/mps/mps_table.h>
89 #include <dev/mps/mps_sas.h>
90 
91 #define MPSSAS_DISCOVERY_TIMEOUT	20
92 #define MPSSAS_MAX_DISCOVERY_TIMEOUTS	10 /* 200 seconds */
93 
94 /*
95  * static array to check SCSI OpCode for EEDP protection bits
96  */
97 #define	PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
98 #define	PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
99 #define	PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
100 static uint8_t op_code_prot[256] = {
101 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
102 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
103 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
104 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
105 	0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
106 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
107 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
108 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
109 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
110 	0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
111 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
112 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
113 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
114 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
115 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
116 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
117 };
118 
119 MALLOC_DEFINE(M_MPSSAS, "MPSSAS", "MPS SAS memory");
120 
121 static void mpssas_remove_device(struct mps_softc *, struct mps_command *);
122 static void mpssas_remove_complete(struct mps_softc *, struct mps_command *);
123 static void mpssas_action(struct cam_sim *sim, union ccb *ccb);
124 static void mpssas_poll(struct cam_sim *sim);
125 static int mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm,
126     struct mps_command *cm);
127 static void mpssas_scsiio_timeout(void *data);
128 static void mpssas_abort_complete(struct mps_softc *sc, struct mps_command *cm);
129 static void mpssas_direct_drive_io(struct mpssas_softc *sassc,
130     struct mps_command *cm, union ccb *ccb);
131 static void mpssas_action_scsiio(struct mpssas_softc *, union ccb *);
132 static void mpssas_scsiio_complete(struct mps_softc *, struct mps_command *);
133 static void mpssas_action_resetdev(struct mpssas_softc *, union ccb *);
134 #if __FreeBSD_version >= 900026
135 static void mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm);
136 static void mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb,
137 			       uint64_t sasaddr);
138 static void mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb);
139 #endif //FreeBSD_version >= 900026
140 static void mpssas_resetdev_complete(struct mps_softc *, struct mps_command *);
141 static void mpssas_async(void *callback_arg, uint32_t code,
142 			 struct cam_path *path, void *arg);
143 #if (__FreeBSD_version < 901503) || \
144     ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
145 static void mpssas_check_eedp(struct mps_softc *sc, struct cam_path *path,
146 			      struct ccb_getdev *cgd);
147 static void mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb);
148 #endif
149 static int mpssas_send_portenable(struct mps_softc *sc);
150 static void mpssas_portenable_complete(struct mps_softc *sc,
151     struct mps_command *cm);
152 
153 struct mpssas_target *
154 mpssas_find_target_by_handle(struct mpssas_softc *sassc, int start, uint16_t handle)
155 {
156 	struct mpssas_target *target;
157 	int i;
158 
159 	for (i = start; i < sassc->maxtargets; i++) {
160 		target = &sassc->targets[i];
161 		if (target->handle == handle)
162 			return (target);
163 	}
164 
165 	return (NULL);
166 }
167 
168 /* we need to freeze the simq during attach and diag reset, to avoid failing
169  * commands before device handles have been found by discovery.  Since
170  * discovery involves reading config pages and possibly sending commands,
171  * discovery actions may continue even after we receive the end of discovery
172  * event, so refcount discovery actions instead of assuming we can unfreeze
173  * the simq when we get the event.
174  */
175 void
176 mpssas_startup_increment(struct mpssas_softc *sassc)
177 {
178 	MPS_FUNCTRACE(sassc->sc);
179 
180 	if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
181 		if (sassc->startup_refcount++ == 0) {
182 			/* just starting, freeze the simq */
183 			mps_dprint(sassc->sc, MPS_INIT,
184 			    "%s freezing simq\n", __func__);
185 #if __FreeBSD_version >= 1000039
186 			xpt_hold_boot();
187 #endif
188 			xpt_freeze_simq(sassc->sim, 1);
189 		}
190 		mps_dprint(sassc->sc, MPS_INIT, "%s refcount %u\n", __func__,
191 		    sassc->startup_refcount);
192 	}
193 }
194 
195 void
196 mpssas_release_simq_reinit(struct mpssas_softc *sassc)
197 {
198 	if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
199 		sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
200 		xpt_release_simq(sassc->sim, 1);
201 		mps_dprint(sassc->sc, MPS_INFO, "Unfreezing SIM queue\n");
202 	}
203 }
204 
205 void
206 mpssas_startup_decrement(struct mpssas_softc *sassc)
207 {
208 	MPS_FUNCTRACE(sassc->sc);
209 
210 	if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
211 		if (--sassc->startup_refcount == 0) {
212 			/* finished all discovery-related actions, release
213 			 * the simq and rescan for the latest topology.
214 			 */
215 			mps_dprint(sassc->sc, MPS_INIT,
216 			    "%s releasing simq\n", __func__);
217 			sassc->flags &= ~MPSSAS_IN_STARTUP;
218 			xpt_release_simq(sassc->sim, 1);
219 #if __FreeBSD_version >= 1000039
220 			xpt_release_boot();
221 #else
222 			mpssas_rescan_target(sassc->sc, NULL);
223 #endif
224 		}
225 		mps_dprint(sassc->sc, MPS_INIT, "%s refcount %u\n", __func__,
226 		    sassc->startup_refcount);
227 	}
228 }
229 
230 /* The firmware requires us to stop sending commands when we're doing task
231  * management, so refcount the TMs and keep the simq frozen when any are in
232  * use.
233  */
234 struct mps_command *
235 mpssas_alloc_tm(struct mps_softc *sc)
236 {
237 	struct mps_command *tm;
238 
239 	tm = mps_alloc_high_priority_command(sc);
240 	return tm;
241 }
242 
243 void
244 mpssas_free_tm(struct mps_softc *sc, struct mps_command *tm)
245 {
246 	int target_id = 0xFFFFFFFF;
247 
248 	if (tm == NULL)
249 		return;
250 
251 	/*
252 	 * For TM's the devq is frozen for the device.  Unfreeze it here and
253 	 * free the resources used for freezing the devq.  Must clear the
254 	 * INRESET flag as well or scsi I/O will not work.
255 	 */
256 	if (tm->cm_targ != NULL) {
257 		tm->cm_targ->flags &= ~MPSSAS_TARGET_INRESET;
258 		target_id = tm->cm_targ->tid;
259 	}
260 	if (tm->cm_ccb) {
261 		mps_dprint(sc, MPS_INFO, "Unfreezing devq for target ID %d\n",
262 		    target_id);
263 		xpt_release_devq(tm->cm_ccb->ccb_h.path, 1, TRUE);
264 		xpt_free_path(tm->cm_ccb->ccb_h.path);
265 		xpt_free_ccb(tm->cm_ccb);
266 	}
267 
268 	mps_free_high_priority_command(sc, tm);
269 }
270 
271 void
272 mpssas_rescan_target(struct mps_softc *sc, struct mpssas_target *targ)
273 {
274 	struct mpssas_softc *sassc = sc->sassc;
275 	path_id_t pathid;
276 	target_id_t targetid;
277 	union ccb *ccb;
278 
279 	MPS_FUNCTRACE(sc);
280 	pathid = cam_sim_path(sassc->sim);
281 	if (targ == NULL)
282 		targetid = CAM_TARGET_WILDCARD;
283 	else
284 		targetid = targ - sassc->targets;
285 
286 	/*
287 	 * Allocate a CCB and schedule a rescan.
288 	 */
289 	ccb = xpt_alloc_ccb_nowait();
290 	if (ccb == NULL) {
291 		mps_dprint(sc, MPS_ERROR, "unable to alloc CCB for rescan\n");
292 		return;
293 	}
294 
295 	if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid,
296 	    targetid, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
297 		mps_dprint(sc, MPS_ERROR, "unable to create path for rescan\n");
298 		xpt_free_ccb(ccb);
299 		return;
300 	}
301 
302 	if (targetid == CAM_TARGET_WILDCARD)
303 		ccb->ccb_h.func_code = XPT_SCAN_BUS;
304 	else
305 		ccb->ccb_h.func_code = XPT_SCAN_TGT;
306 
307 	mps_dprint(sc, MPS_TRACE, "%s targetid %u\n", __func__, targetid);
308 	xpt_rescan(ccb);
309 }
310 
311 static void
312 mpssas_log_command(struct mps_command *cm, u_int level, const char *fmt, ...)
313 {
314 	struct sbuf sb;
315 	va_list ap;
316 	char str[192];
317 	char path_str[64];
318 
319 	if (cm == NULL)
320 		return;
321 
322 	/* No need to be in here if debugging isn't enabled */
323 	if ((cm->cm_sc->mps_debug & level) == 0)
324 		return;
325 
326 	sbuf_new(&sb, str, sizeof(str), 0);
327 
328 	va_start(ap, fmt);
329 
330 	if (cm->cm_ccb != NULL) {
331 		xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
332 				sizeof(path_str));
333 		sbuf_cat(&sb, path_str);
334 		if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
335 			scsi_command_string(&cm->cm_ccb->csio, &sb);
336 			sbuf_printf(&sb, "length %d ",
337 				    cm->cm_ccb->csio.dxfer_len);
338 		}
339 	}
340 	else {
341 		sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
342 		    cam_sim_name(cm->cm_sc->sassc->sim),
343 		    cam_sim_unit(cm->cm_sc->sassc->sim),
344 		    cam_sim_bus(cm->cm_sc->sassc->sim),
345 		    cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
346 		    cm->cm_lun);
347 	}
348 
349 	sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
350 	sbuf_vprintf(&sb, fmt, ap);
351 	sbuf_finish(&sb);
352 	mps_print_field(cm->cm_sc, "%s", sbuf_data(&sb));
353 
354 	va_end(ap);
355 }
356 
357 
358 static void
359 mpssas_remove_volume(struct mps_softc *sc, struct mps_command *tm)
360 {
361 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
362 	struct mpssas_target *targ;
363 	uint16_t handle;
364 
365 	MPS_FUNCTRACE(sc);
366 
367 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
368 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
369 	targ = tm->cm_targ;
370 
371 	if (reply == NULL) {
372 		/* XXX retry the remove after the diag reset completes? */
373 		mps_dprint(sc, MPS_FAULT,
374 		    "%s NULL reply resetting device 0x%04x\n", __func__,
375 		    handle);
376 		mpssas_free_tm(sc, tm);
377 		return;
378 	}
379 
380 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
381 	    MPI2_IOCSTATUS_SUCCESS) {
382 		mps_dprint(sc, MPS_ERROR,
383 		   "IOCStatus = 0x%x while resetting device 0x%x\n",
384 		   le16toh(reply->IOCStatus), handle);
385 	}
386 
387 	mps_dprint(sc, MPS_XINFO,
388 	    "Reset aborted %u commands\n", reply->TerminationCount);
389 	mps_free_reply(sc, tm->cm_reply_data);
390 	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
391 
392 	mps_dprint(sc, MPS_XINFO,
393 	    "clearing target %u handle 0x%04x\n", targ->tid, handle);
394 
395 	/*
396 	 * Don't clear target if remove fails because things will get confusing.
397 	 * Leave the devname and sasaddr intact so that we know to avoid reusing
398 	 * this target id if possible, and so we can assign the same target id
399 	 * to this device if it comes back in the future.
400 	 */
401 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
402 	    MPI2_IOCSTATUS_SUCCESS) {
403 		targ = tm->cm_targ;
404 		targ->handle = 0x0;
405 		targ->encl_handle = 0x0;
406 		targ->encl_slot = 0x0;
407 		targ->exp_dev_handle = 0x0;
408 		targ->phy_num = 0x0;
409 		targ->linkrate = 0x0;
410 		targ->devinfo = 0x0;
411 		targ->flags = 0x0;
412 	}
413 
414 	mpssas_free_tm(sc, tm);
415 }
416 
417 
418 /*
419  * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal.
420  * Otherwise Volume Delete is same as Bare Drive Removal.
421  */
422 void
423 mpssas_prepare_volume_remove(struct mpssas_softc *sassc, uint16_t handle)
424 {
425 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
426 	struct mps_softc *sc;
427 	struct mps_command *cm;
428 	struct mpssas_target *targ = NULL;
429 
430 	MPS_FUNCTRACE(sassc->sc);
431 	sc = sassc->sc;
432 
433 #ifdef WD_SUPPORT
434 	/*
435 	 * If this is a WD controller, determine if the disk should be exposed
436 	 * to the OS or not.  If disk should be exposed, return from this
437 	 * function without doing anything.
438 	 */
439 	if (sc->WD_available && (sc->WD_hide_expose ==
440 	    MPS_WD_EXPOSE_ALWAYS)) {
441 		return;
442 	}
443 #endif //WD_SUPPORT
444 
445 	targ = mpssas_find_target_by_handle(sassc, 0, handle);
446 	if (targ == NULL) {
447 		/* FIXME: what is the action? */
448 		/* We don't know about this device? */
449 		mps_dprint(sc, MPS_ERROR,
450 		   "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
451 		return;
452 	}
453 
454 	targ->flags |= MPSSAS_TARGET_INREMOVAL;
455 
456 	cm = mpssas_alloc_tm(sc);
457 	if (cm == NULL) {
458 		mps_dprint(sc, MPS_ERROR,
459 		    "%s: command alloc failure\n", __func__);
460 		return;
461 	}
462 
463 	mpssas_rescan_target(sc, targ);
464 
465 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
466 	req->DevHandle = targ->handle;
467 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
468 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
469 
470 	/* SAS Hard Link Reset / SATA Link Reset */
471 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
472 
473 	cm->cm_targ = targ;
474 	cm->cm_data = NULL;
475 	cm->cm_desc.HighPriority.RequestFlags =
476 	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
477 	cm->cm_complete = mpssas_remove_volume;
478 	cm->cm_complete_data = (void *)(uintptr_t)handle;
479 
480 	mps_dprint(sc, MPS_INFO, "%s: Sending reset for target ID %d\n",
481 	    __func__, targ->tid);
482 	mpssas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
483 
484 	mps_map_command(sc, cm);
485 }
486 
487 /*
488  * The MPT2 firmware performs debounce on the link to avoid transient link
489  * errors and false removals.  When it does decide that link has been lost
490  * and a device need to go away, it expects that the host will perform a
491  * target reset and then an op remove.  The reset has the side-effect of
492  * aborting any outstanding requests for the device, which is required for
493  * the op-remove to succeed.  It's not clear if the host should check for
494  * the device coming back alive after the reset.
495  */
496 void
497 mpssas_prepare_remove(struct mpssas_softc *sassc, uint16_t handle)
498 {
499 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
500 	struct mps_softc *sc;
501 	struct mps_command *cm;
502 	struct mpssas_target *targ = NULL;
503 
504 	MPS_FUNCTRACE(sassc->sc);
505 
506 	sc = sassc->sc;
507 
508 	targ = mpssas_find_target_by_handle(sassc, 0, handle);
509 	if (targ == NULL) {
510 		/* FIXME: what is the action? */
511 		/* We don't know about this device? */
512 		mps_dprint(sc, MPS_ERROR,
513 		    "%s : invalid handle 0x%x \n", __func__, handle);
514 		return;
515 	}
516 
517 	targ->flags |= MPSSAS_TARGET_INREMOVAL;
518 
519 	cm = mpssas_alloc_tm(sc);
520 	if (cm == NULL) {
521 		mps_dprint(sc, MPS_ERROR,
522 		    "%s: command alloc failure\n", __func__);
523 		return;
524 	}
525 
526 	mpssas_rescan_target(sc, targ);
527 
528 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
529 	memset(req, 0, sizeof(*req));
530 	req->DevHandle = htole16(targ->handle);
531 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
532 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
533 
534 	/* SAS Hard Link Reset / SATA Link Reset */
535 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
536 
537 	cm->cm_targ = targ;
538 	cm->cm_data = NULL;
539 	cm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
540 	cm->cm_complete = mpssas_remove_device;
541 	cm->cm_complete_data = (void *)(uintptr_t)handle;
542 
543 	mps_dprint(sc, MPS_INFO, "%s: Sending reset for target ID %d\n",
544 	    __func__, targ->tid);
545 	mpssas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
546 
547 	mps_map_command(sc, cm);
548 }
549 
550 static void
551 mpssas_remove_device(struct mps_softc *sc, struct mps_command *tm)
552 {
553 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
554 	MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
555 	struct mpssas_target *targ;
556 	struct mps_command *next_cm;
557 	uint16_t handle;
558 
559 	MPS_FUNCTRACE(sc);
560 
561 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
562 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
563 	targ = tm->cm_targ;
564 
565 	/*
566 	 * Currently there should be no way we can hit this case.  It only
567 	 * happens when we have a failure to allocate chain frames, and
568 	 * task management commands don't have S/G lists.
569 	 */
570 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
571 		mps_dprint(sc, MPS_ERROR,
572 		    "%s: cm_flags = %#x for remove of handle %#04x! "
573 		    "This should not happen!\n", __func__, tm->cm_flags,
574 		    handle);
575 	}
576 
577 	if (reply == NULL) {
578 		/* XXX retry the remove after the diag reset completes? */
579 		mps_dprint(sc, MPS_FAULT,
580 		    "%s NULL reply resetting device 0x%04x\n", __func__,
581 		    handle);
582 		mpssas_free_tm(sc, tm);
583 		return;
584 	}
585 
586 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
587 	    MPI2_IOCSTATUS_SUCCESS) {
588 		mps_dprint(sc, MPS_ERROR,
589 		   "IOCStatus = 0x%x while resetting device 0x%x\n",
590 		   le16toh(reply->IOCStatus), handle);
591 	}
592 
593 	mps_dprint(sc, MPS_XINFO, "Reset aborted %u commands\n",
594 	    le32toh(reply->TerminationCount));
595 	mps_free_reply(sc, tm->cm_reply_data);
596 	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
597 
598 	/* Reuse the existing command */
599 	req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
600 	memset(req, 0, sizeof(*req));
601 	req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
602 	req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
603 	req->DevHandle = htole16(handle);
604 	tm->cm_data = NULL;
605 	tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
606 	tm->cm_complete = mpssas_remove_complete;
607 	tm->cm_complete_data = (void *)(uintptr_t)handle;
608 
609 	mps_map_command(sc, tm);
610 
611 	mps_dprint(sc, MPS_XINFO, "clearing target %u handle 0x%04x\n",
612 		   targ->tid, handle);
613 	TAILQ_FOREACH_SAFE(tm, &targ->commands, cm_link, next_cm) {
614 		union ccb *ccb;
615 
616 		mps_dprint(sc, MPS_XINFO, "Completing missed command %p\n", tm);
617 		ccb = tm->cm_complete_data;
618 		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
619 		mpssas_scsiio_complete(sc, tm);
620 	}
621 }
622 
623 static void
624 mpssas_remove_complete(struct mps_softc *sc, struct mps_command *tm)
625 {
626 	MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
627 	uint16_t handle;
628 	struct mpssas_target *targ;
629 	struct mpssas_lun *lun;
630 
631 	MPS_FUNCTRACE(sc);
632 
633 	reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
634 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
635 
636 	/*
637 	 * Currently there should be no way we can hit this case.  It only
638 	 * happens when we have a failure to allocate chain frames, and
639 	 * task management commands don't have S/G lists.
640 	 */
641 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
642 		mps_dprint(sc, MPS_XINFO,
643 			   "%s: cm_flags = %#x for remove of handle %#04x! "
644 			   "This should not happen!\n", __func__, tm->cm_flags,
645 			   handle);
646 		mpssas_free_tm(sc, tm);
647 		return;
648 	}
649 
650 	if (reply == NULL) {
651 		/* most likely a chip reset */
652 		mps_dprint(sc, MPS_FAULT,
653 		    "%s NULL reply removing device 0x%04x\n", __func__, handle);
654 		mpssas_free_tm(sc, tm);
655 		return;
656 	}
657 
658 	mps_dprint(sc, MPS_XINFO,
659 	    "%s on handle 0x%04x, IOCStatus= 0x%x\n", __func__,
660 	    handle, le16toh(reply->IOCStatus));
661 
662 	/*
663 	 * Don't clear target if remove fails because things will get confusing.
664 	 * Leave the devname and sasaddr intact so that we know to avoid reusing
665 	 * this target id if possible, and so we can assign the same target id
666 	 * to this device if it comes back in the future.
667 	 */
668 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
669 	    MPI2_IOCSTATUS_SUCCESS) {
670 		targ = tm->cm_targ;
671 		targ->handle = 0x0;
672 		targ->encl_handle = 0x0;
673 		targ->encl_slot = 0x0;
674 		targ->exp_dev_handle = 0x0;
675 		targ->phy_num = 0x0;
676 		targ->linkrate = 0x0;
677 		targ->devinfo = 0x0;
678 		targ->flags = 0x0;
679 
680 		while(!SLIST_EMPTY(&targ->luns)) {
681 			lun = SLIST_FIRST(&targ->luns);
682 			SLIST_REMOVE_HEAD(&targ->luns, lun_link);
683 			free(lun, M_MPT2);
684 		}
685 	}
686 
687 
688 	mpssas_free_tm(sc, tm);
689 }
690 
691 static int
692 mpssas_register_events(struct mps_softc *sc)
693 {
694 	u32 events[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS];
695 
696 	bzero(events, 16);
697 	setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
698 	setbit(events, MPI2_EVENT_SAS_DISCOVERY);
699 	setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
700 	setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
701 	setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
702 	setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
703 	setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
704 	setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
705 	setbit(events, MPI2_EVENT_IR_VOLUME);
706 	setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
707 	setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
708 	setbit(events, MPI2_EVENT_LOG_ENTRY_ADDED);
709 
710 	mps_register_events(sc, events, mpssas_evt_handler, NULL,
711 	    &sc->sassc->mpssas_eh);
712 
713 	return (0);
714 }
715 
716 int
717 mps_attach_sas(struct mps_softc *sc)
718 {
719 	struct mpssas_softc *sassc;
720 	cam_status status;
721 	int unit, error = 0, reqs;
722 
723 	MPS_FUNCTRACE(sc);
724 	mps_dprint(sc, MPS_INIT, "%s entered\n", __func__);
725 
726 	sassc = malloc(sizeof(struct mpssas_softc), M_MPT2, M_WAITOK|M_ZERO);
727 	if(!sassc) {
728 		mps_dprint(sc, MPS_INIT|MPS_ERROR,
729 		    "Cannot allocate SAS controller memory\n");
730 		return (ENOMEM);
731 	}
732 
733 	/*
734 	 * XXX MaxTargets could change during a reinit.  Since we don't
735 	 * resize the targets[] array during such an event, cache the value
736 	 * of MaxTargets here so that we don't get into trouble later.  This
737 	 * should move into the reinit logic.
738 	 */
739 	sassc->maxtargets = sc->facts->MaxTargets + sc->facts->MaxVolumes;
740 	sassc->targets = malloc(sizeof(struct mpssas_target) *
741 	    sassc->maxtargets, M_MPT2, M_WAITOK|M_ZERO);
742 	if(!sassc->targets) {
743 		mps_dprint(sc, MPS_INIT|MPS_ERROR,
744 		    "Cannot allocate SAS target memory\n");
745 		free(sassc, M_MPT2);
746 		return (ENOMEM);
747 	}
748 	sc->sassc = sassc;
749 	sassc->sc = sc;
750 
751 	reqs = sc->num_reqs - sc->num_prireqs - 1;
752 	if ((sassc->devq = cam_simq_alloc(reqs)) == NULL) {
753 		mps_dprint(sc, MPS_ERROR, "Cannot allocate SIMQ\n");
754 		error = ENOMEM;
755 		goto out;
756 	}
757 
758 	unit = device_get_unit(sc->mps_dev);
759 	sassc->sim = cam_sim_alloc(mpssas_action, mpssas_poll, "mps", sassc,
760 	    unit, &sc->mps_mtx, reqs, reqs, sassc->devq);
761 	if (sassc->sim == NULL) {
762 		mps_dprint(sc, MPS_INIT|MPS_ERROR, "Cannot allocate SIM\n");
763 		error = EINVAL;
764 		goto out;
765 	}
766 
767 	TAILQ_INIT(&sassc->ev_queue);
768 
769 	/* Initialize taskqueue for Event Handling */
770 	TASK_INIT(&sassc->ev_task, 0, mpssas_firmware_event_work, sc);
771 	sassc->ev_tq = taskqueue_create("mps_taskq", M_NOWAIT | M_ZERO,
772 	    taskqueue_thread_enqueue, &sassc->ev_tq);
773 	taskqueue_start_threads(&sassc->ev_tq, 1, PRIBIO, "%s taskq",
774 	    device_get_nameunit(sc->mps_dev));
775 
776 	mps_lock(sc);
777 
778 	/*
779 	 * XXX There should be a bus for every port on the adapter, but since
780 	 * we're just going to fake the topology for now, we'll pretend that
781 	 * everything is just a target on a single bus.
782 	 */
783 	if ((error = xpt_bus_register(sassc->sim, sc->mps_dev, 0)) != 0) {
784 		mps_dprint(sc, MPS_INIT|MPS_ERROR,
785 		    "Error %d registering SCSI bus\n", error);
786 		mps_unlock(sc);
787 		goto out;
788 	}
789 
790 	/*
791 	 * Assume that discovery events will start right away.
792 	 *
793 	 * Hold off boot until discovery is complete.
794 	 */
795 	sassc->flags |= MPSSAS_IN_STARTUP | MPSSAS_IN_DISCOVERY;
796 	sc->sassc->startup_refcount = 0;
797 	mpssas_startup_increment(sassc);
798 
799 	callout_init(&sassc->discovery_callout, 1 /*mpsafe*/);
800 
801 	/*
802 	 * Register for async events so we can determine the EEDP
803 	 * capabilities of devices.
804 	 */
805 	status = xpt_create_path(&sassc->path, /*periph*/NULL,
806 	    cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD,
807 	    CAM_LUN_WILDCARD);
808 	if (status != CAM_REQ_CMP) {
809 		mps_dprint(sc, MPS_ERROR|MPS_INIT,
810 		    "Error %#x creating sim path\n", status);
811 		sassc->path = NULL;
812 	} else {
813 		int event;
814 
815 #if (__FreeBSD_version >= 1000006) || \
816     ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
817 		event = AC_ADVINFO_CHANGED;
818 #else
819 		event = AC_FOUND_DEVICE;
820 #endif
821 		status = xpt_register_async(event, mpssas_async, sc,
822 					    sassc->path);
823 		if (status != CAM_REQ_CMP) {
824 			mps_dprint(sc, MPS_ERROR,
825 			    "Error %#x registering async handler for "
826 			    "AC_ADVINFO_CHANGED events\n", status);
827 			xpt_free_path(sassc->path);
828 			sassc->path = NULL;
829 		}
830 	}
831 	if (status != CAM_REQ_CMP) {
832 		/*
833 		 * EEDP use is the exception, not the rule.
834 		 * Warn the user, but do not fail to attach.
835 		 */
836 		mps_printf(sc, "EEDP capabilities disabled.\n");
837 	}
838 
839 	mps_unlock(sc);
840 
841 	mpssas_register_events(sc);
842 out:
843 	if (error)
844 		mps_detach_sas(sc);
845 
846 	mps_dprint(sc, MPS_INIT, "%s exit error= %d\n", __func__, error);
847 	return (error);
848 }
849 
850 int
851 mps_detach_sas(struct mps_softc *sc)
852 {
853 	struct mpssas_softc *sassc;
854 	struct mpssas_lun *lun, *lun_tmp;
855 	struct mpssas_target *targ;
856 	int i;
857 
858 	MPS_FUNCTRACE(sc);
859 
860 	if (sc->sassc == NULL)
861 		return (0);
862 
863 	sassc = sc->sassc;
864 	mps_deregister_events(sc, sassc->mpssas_eh);
865 
866 	/*
867 	 * Drain and free the event handling taskqueue with the lock
868 	 * unheld so that any parallel processing tasks drain properly
869 	 * without deadlocking.
870 	 */
871 	if (sassc->ev_tq != NULL)
872 		taskqueue_free(sassc->ev_tq);
873 
874 	/* Make sure CAM doesn't wedge if we had to bail out early. */
875 	mps_lock(sc);
876 
877 	while (sassc->startup_refcount != 0)
878 		mpssas_startup_decrement(sassc);
879 
880 	/* Deregister our async handler */
881 	if (sassc->path != NULL) {
882 		xpt_register_async(0, mpssas_async, sc, sassc->path);
883 		xpt_free_path(sassc->path);
884 		sassc->path = NULL;
885 	}
886 
887 	if (sassc->flags & MPSSAS_IN_STARTUP)
888 		xpt_release_simq(sassc->sim, 1);
889 
890 	if (sassc->sim != NULL) {
891 		xpt_bus_deregister(cam_sim_path(sassc->sim));
892 		cam_sim_free(sassc->sim, FALSE);
893 	}
894 
895 	mps_unlock(sc);
896 
897 	if (sassc->devq != NULL)
898 		cam_simq_free(sassc->devq);
899 
900 	for(i=0; i< sassc->maxtargets ;i++) {
901 		targ = &sassc->targets[i];
902 		SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
903 			free(lun, M_MPT2);
904 		}
905 	}
906 	free(sassc->targets, M_MPT2);
907 	free(sassc, M_MPT2);
908 	sc->sassc = NULL;
909 
910 	return (0);
911 }
912 
913 void
914 mpssas_discovery_end(struct mpssas_softc *sassc)
915 {
916 	struct mps_softc *sc = sassc->sc;
917 
918 	MPS_FUNCTRACE(sc);
919 
920 	if (sassc->flags & MPSSAS_DISCOVERY_TIMEOUT_PENDING)
921 		callout_stop(&sassc->discovery_callout);
922 
923 	/*
924 	 * After discovery has completed, check the mapping table for any
925 	 * missing devices and update their missing counts. Only do this once
926 	 * whenever the driver is initialized so that missing counts aren't
927 	 * updated unnecessarily. Note that just because discovery has
928 	 * completed doesn't mean that events have been processed yet. The
929 	 * check_devices function is a callout timer that checks if ALL devices
930 	 * are missing. If so, it will wait a little longer for events to
931 	 * complete and keep resetting itself until some device in the mapping
932 	 * table is not missing, meaning that event processing has started.
933 	 */
934 	if (sc->track_mapping_events) {
935 		mps_dprint(sc, MPS_XINFO | MPS_MAPPING, "Discovery has "
936 		    "completed. Check for missing devices in the mapping "
937 		    "table.\n");
938 		callout_reset(&sc->device_check_callout,
939 		    MPS_MISSING_CHECK_DELAY * hz, mps_mapping_check_devices,
940 		    sc);
941 	}
942 }
943 
944 static void
945 mpssas_action(struct cam_sim *sim, union ccb *ccb)
946 {
947 	struct mpssas_softc *sassc;
948 
949 	sassc = cam_sim_softc(sim);
950 
951 	MPS_FUNCTRACE(sassc->sc);
952 	mps_dprint(sassc->sc, MPS_TRACE, "ccb func_code 0x%x\n",
953 	    ccb->ccb_h.func_code);
954 	mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
955 
956 	switch (ccb->ccb_h.func_code) {
957 	case XPT_PATH_INQ:
958 	{
959 		struct ccb_pathinq *cpi = &ccb->cpi;
960 		struct mps_softc *sc = sassc->sc;
961 
962 		cpi->version_num = 1;
963 		cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
964 		cpi->target_sprt = 0;
965 #if __FreeBSD_version >= 1000039
966 		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN;
967 #else
968 		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
969 #endif
970 		cpi->hba_eng_cnt = 0;
971 		cpi->max_target = sassc->maxtargets - 1;
972 		cpi->max_lun = 255;
973 
974 		/*
975 		 * initiator_id is set here to an ID outside the set of valid
976 		 * target IDs (including volumes).
977 		 */
978 		cpi->initiator_id = sassc->maxtargets;
979 		strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
980 		strlcpy(cpi->hba_vid, "Avago Tech", HBA_IDLEN);
981 		strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
982 		cpi->unit_number = cam_sim_unit(sim);
983 		cpi->bus_id = cam_sim_bus(sim);
984 		cpi->base_transfer_speed = 150000;
985 		cpi->transport = XPORT_SAS;
986 		cpi->transport_version = 0;
987 		cpi->protocol = PROTO_SCSI;
988 		cpi->protocol_version = SCSI_REV_SPC;
989 		cpi->maxio = sc->maxio;
990 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
991 		break;
992 	}
993 	case XPT_GET_TRAN_SETTINGS:
994 	{
995 		struct ccb_trans_settings	*cts;
996 		struct ccb_trans_settings_sas	*sas;
997 		struct ccb_trans_settings_scsi	*scsi;
998 		struct mpssas_target *targ;
999 
1000 		cts = &ccb->cts;
1001 		sas = &cts->xport_specific.sas;
1002 		scsi = &cts->proto_specific.scsi;
1003 
1004 		KASSERT(cts->ccb_h.target_id < sassc->maxtargets,
1005 		    ("Target %d out of bounds in XPT_GET_TRANS_SETTINGS\n",
1006 		    cts->ccb_h.target_id));
1007 		targ = &sassc->targets[cts->ccb_h.target_id];
1008 		if (targ->handle == 0x0) {
1009 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1010 			break;
1011 		}
1012 
1013 		cts->protocol_version = SCSI_REV_SPC2;
1014 		cts->transport = XPORT_SAS;
1015 		cts->transport_version = 0;
1016 
1017 		sas->valid = CTS_SAS_VALID_SPEED;
1018 		switch (targ->linkrate) {
1019 		case 0x08:
1020 			sas->bitrate = 150000;
1021 			break;
1022 		case 0x09:
1023 			sas->bitrate = 300000;
1024 			break;
1025 		case 0x0a:
1026 			sas->bitrate = 600000;
1027 			break;
1028 		default:
1029 			sas->valid = 0;
1030 		}
1031 
1032 		cts->protocol = PROTO_SCSI;
1033 		scsi->valid = CTS_SCSI_VALID_TQ;
1034 		scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
1035 
1036 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1037 		break;
1038 	}
1039 	case XPT_CALC_GEOMETRY:
1040 		cam_calc_geometry(&ccb->ccg, /*extended*/1);
1041 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1042 		break;
1043 	case XPT_RESET_DEV:
1044 		mps_dprint(sassc->sc, MPS_XINFO, "mpssas_action XPT_RESET_DEV\n");
1045 		mpssas_action_resetdev(sassc, ccb);
1046 		return;
1047 	case XPT_RESET_BUS:
1048 	case XPT_ABORT:
1049 	case XPT_TERM_IO:
1050 		mps_dprint(sassc->sc, MPS_XINFO,
1051 		    "mpssas_action faking success for abort or reset\n");
1052 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1053 		break;
1054 	case XPT_SCSI_IO:
1055 		mpssas_action_scsiio(sassc, ccb);
1056 		return;
1057 #if __FreeBSD_version >= 900026
1058 	case XPT_SMP_IO:
1059 		mpssas_action_smpio(sassc, ccb);
1060 		return;
1061 #endif
1062 	default:
1063 		mpssas_set_ccbstatus(ccb, CAM_FUNC_NOTAVAIL);
1064 		break;
1065 	}
1066 	xpt_done(ccb);
1067 
1068 }
1069 
1070 static void
1071 mpssas_announce_reset(struct mps_softc *sc, uint32_t ac_code,
1072     target_id_t target_id, lun_id_t lun_id)
1073 {
1074 	path_id_t path_id = cam_sim_path(sc->sassc->sim);
1075 	struct cam_path *path;
1076 
1077 	mps_dprint(sc, MPS_XINFO, "%s code %x target %d lun %jx\n", __func__,
1078 	    ac_code, target_id, (uintmax_t)lun_id);
1079 
1080 	if (xpt_create_path(&path, NULL,
1081 		path_id, target_id, lun_id) != CAM_REQ_CMP) {
1082 		mps_dprint(sc, MPS_ERROR, "unable to create path for reset "
1083 			   "notification\n");
1084 		return;
1085 	}
1086 
1087 	xpt_async(ac_code, path, NULL);
1088 	xpt_free_path(path);
1089 }
1090 
1091 static void
1092 mpssas_complete_all_commands(struct mps_softc *sc)
1093 {
1094 	struct mps_command *cm;
1095 	int i;
1096 	int completed;
1097 
1098 	MPS_FUNCTRACE(sc);
1099 	mtx_assert(&sc->mps_mtx, MA_OWNED);
1100 
1101 	/* complete all commands with a NULL reply */
1102 	for (i = 1; i < sc->num_reqs; i++) {
1103 		cm = &sc->commands[i];
1104 		cm->cm_reply = NULL;
1105 		completed = 0;
1106 
1107 		if (cm->cm_flags & MPS_CM_FLAGS_POLLED)
1108 			cm->cm_flags |= MPS_CM_FLAGS_COMPLETE;
1109 
1110 		if (cm->cm_complete != NULL) {
1111 			mpssas_log_command(cm, MPS_RECOVERY,
1112 			    "completing cm %p state %x ccb %p for diag reset\n",
1113 			    cm, cm->cm_state, cm->cm_ccb);
1114 
1115 			cm->cm_complete(sc, cm);
1116 			completed = 1;
1117 		}
1118 
1119 		if (cm->cm_flags & MPS_CM_FLAGS_WAKEUP) {
1120 			mpssas_log_command(cm, MPS_RECOVERY,
1121 			    "waking up cm %p state %x ccb %p for diag reset\n",
1122 			    cm, cm->cm_state, cm->cm_ccb);
1123 			wakeup(cm);
1124 			completed = 1;
1125 		}
1126 
1127 		if (cm->cm_sc->io_cmds_active != 0)
1128 			cm->cm_sc->io_cmds_active--;
1129 
1130 		if ((completed == 0) && (cm->cm_state != MPS_CM_STATE_FREE)) {
1131 			/* this should never happen, but if it does, log */
1132 			mpssas_log_command(cm, MPS_RECOVERY,
1133 			    "cm %p state %x flags 0x%x ccb %p during diag "
1134 			    "reset\n", cm, cm->cm_state, cm->cm_flags,
1135 			    cm->cm_ccb);
1136 		}
1137 	}
1138 }
1139 
1140 void
1141 mpssas_handle_reinit(struct mps_softc *sc)
1142 {
1143 	int i;
1144 
1145 	/* Go back into startup mode and freeze the simq, so that CAM
1146 	 * doesn't send any commands until after we've rediscovered all
1147 	 * targets and found the proper device handles for them.
1148 	 *
1149 	 * After the reset, portenable will trigger discovery, and after all
1150 	 * discovery-related activities have finished, the simq will be
1151 	 * released.
1152 	 */
1153 	mps_dprint(sc, MPS_INIT, "%s startup\n", __func__);
1154 	sc->sassc->flags |= MPSSAS_IN_STARTUP;
1155 	sc->sassc->flags |= MPSSAS_IN_DISCOVERY;
1156 	mpssas_startup_increment(sc->sassc);
1157 
1158 	/* notify CAM of a bus reset */
1159 	mpssas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1160 	    CAM_LUN_WILDCARD);
1161 
1162 	/* complete and cleanup after all outstanding commands */
1163 	mpssas_complete_all_commands(sc);
1164 
1165 	mps_dprint(sc, MPS_INIT,
1166 	    "%s startup %u after command completion\n", __func__,
1167 	    sc->sassc->startup_refcount);
1168 
1169 	/* zero all the target handles, since they may change after the
1170 	 * reset, and we have to rediscover all the targets and use the new
1171 	 * handles.
1172 	 */
1173 	for (i = 0; i < sc->sassc->maxtargets; i++) {
1174 		if (sc->sassc->targets[i].outstanding != 0)
1175 			mps_dprint(sc, MPS_INIT, "target %u outstanding %u\n",
1176 			    i, sc->sassc->targets[i].outstanding);
1177 		sc->sassc->targets[i].handle = 0x0;
1178 		sc->sassc->targets[i].exp_dev_handle = 0x0;
1179 		sc->sassc->targets[i].outstanding = 0;
1180 		sc->sassc->targets[i].flags = MPSSAS_TARGET_INDIAGRESET;
1181 	}
1182 }
1183 
1184 static void
1185 mpssas_tm_timeout(void *data)
1186 {
1187 	struct mps_command *tm = data;
1188 	struct mps_softc *sc = tm->cm_sc;
1189 
1190 	mtx_assert(&sc->mps_mtx, MA_OWNED);
1191 
1192 	mpssas_log_command(tm, MPS_INFO|MPS_RECOVERY,
1193 	    "task mgmt %p timed out\n", tm);
1194 	mps_reinit(sc);
1195 }
1196 
1197 static void
1198 mpssas_logical_unit_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1199 {
1200 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1201 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1202 	unsigned int cm_count = 0;
1203 	struct mps_command *cm;
1204 	struct mpssas_target *targ;
1205 
1206 	callout_stop(&tm->cm_callout);
1207 
1208 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1209 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1210 	targ = tm->cm_targ;
1211 
1212 	/*
1213 	 * Currently there should be no way we can hit this case.  It only
1214 	 * happens when we have a failure to allocate chain frames, and
1215 	 * task management commands don't have S/G lists.
1216 	 * XXXSL So should it be an assertion?
1217 	 */
1218 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1219 		mps_dprint(sc, MPS_RECOVERY|MPS_ERROR,
1220 		    "%s: cm_flags = %#x for LUN reset! "
1221 		   "This should not happen!\n", __func__, tm->cm_flags);
1222 		mpssas_free_tm(sc, tm);
1223 		return;
1224 	}
1225 
1226 	if (reply == NULL) {
1227 		mps_dprint(sc, MPS_RECOVERY, "NULL reset reply for tm %p\n",
1228 		    tm);
1229 		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1230 			/* this completion was due to a reset, just cleanup */
1231 			mps_dprint(sc, MPS_RECOVERY, "Hardware undergoing "
1232 			    "reset, ignoring NULL LUN reset reply\n");
1233 			targ->tm = NULL;
1234 			mpssas_free_tm(sc, tm);
1235 		}
1236 		else {
1237 			/* we should have gotten a reply. */
1238 			mps_dprint(sc, MPS_INFO|MPS_RECOVERY, "NULL reply on "
1239 			    "LUN reset attempt, resetting controller\n");
1240 			mps_reinit(sc);
1241 		}
1242 		return;
1243 	}
1244 
1245 	mps_dprint(sc, MPS_RECOVERY,
1246 	    "logical unit reset status 0x%x code 0x%x count %u\n",
1247 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1248 	    le32toh(reply->TerminationCount));
1249 
1250 	/*
1251 	 * See if there are any outstanding commands for this LUN.
1252 	 * This could be made more efficient by using a per-LU data
1253 	 * structure of some sort.
1254 	 */
1255 	TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1256 		if (cm->cm_lun == tm->cm_lun)
1257 			cm_count++;
1258 	}
1259 
1260 	if (cm_count == 0) {
1261 		mps_dprint(sc, MPS_RECOVERY|MPS_INFO,
1262 		    "Finished recovery after LUN reset for target %u\n",
1263 		    targ->tid);
1264 
1265 		mpssas_announce_reset(sc, AC_SENT_BDR, targ->tid, tm->cm_lun);
1266 
1267 		/*
1268 		 * We've finished recovery for this logical unit.  check and
1269 		 * see if some other logical unit has a timedout command
1270 		 * that needs to be processed.
1271 		 */
1272 		cm = TAILQ_FIRST(&targ->timedout_commands);
1273 		if (cm) {
1274 			mps_dprint(sc, MPS_INFO|MPS_RECOVERY,
1275 			    "More commands to abort for target %u\n",
1276 			    targ->tid);
1277 			mpssas_send_abort(sc, tm, cm);
1278 		} else {
1279 			targ->tm = NULL;
1280 			mpssas_free_tm(sc, tm);
1281 		}
1282 	} else {
1283 		/*
1284 		 * If we still have commands for this LUN, the reset
1285 		 * effectively failed, regardless of the status reported.
1286 		 * Escalate to a target reset.
1287 		 */
1288 		mps_dprint(sc, MPS_INFO|MPS_RECOVERY,
1289 		    "logical unit reset complete for target %u, but still "
1290 		    "have %u command(s), sending target reset\n", targ->tid,
1291 		    cm_count);
1292 		mpssas_send_reset(sc, tm,
1293 		    MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1294 	}
1295 }
1296 
1297 static void
1298 mpssas_target_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1299 {
1300 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1301 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1302 	struct mpssas_target *targ;
1303 
1304 	callout_stop(&tm->cm_callout);
1305 
1306 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1307 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1308 	targ = tm->cm_targ;
1309 
1310 	/*
1311 	 * Currently there should be no way we can hit this case.  It only
1312 	 * happens when we have a failure to allocate chain frames, and
1313 	 * task management commands don't have S/G lists.
1314 	 */
1315 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1316 		mps_dprint(sc, MPS_ERROR,"%s: cm_flags = %#x for target reset! "
1317 			   "This should not happen!\n", __func__, tm->cm_flags);
1318 		mpssas_free_tm(sc, tm);
1319 		return;
1320 	}
1321 
1322 	if (reply == NULL) {
1323 		mps_dprint(sc, MPS_RECOVERY,
1324 		    "NULL target reset reply for tm %pi TaskMID %u\n",
1325 		    tm, le16toh(req->TaskMID));
1326 		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1327 			/* this completion was due to a reset, just cleanup */
1328 			mps_dprint(sc, MPS_RECOVERY, "Hardware undergoing "
1329 			    "reset, ignoring NULL target reset reply\n");
1330 			targ->tm = NULL;
1331 			mpssas_free_tm(sc, tm);
1332 		} else {
1333 			/* we should have gotten a reply. */
1334 			mps_dprint(sc, MPS_INFO|MPS_RECOVERY, "NULL reply on "
1335 			    "target reset attempt, resetting controller\n");
1336 			mps_reinit(sc);
1337 		}
1338 		return;
1339 	}
1340 
1341 	mps_dprint(sc, MPS_RECOVERY,
1342 	    "target reset status 0x%x code 0x%x count %u\n",
1343 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1344 	    le32toh(reply->TerminationCount));
1345 
1346 	if (targ->outstanding == 0) {
1347 		/* we've finished recovery for this target and all
1348 		 * of its logical units.
1349 		 */
1350 		mps_dprint(sc, MPS_RECOVERY|MPS_INFO,
1351 		    "Finished reset recovery for target %u\n", targ->tid);
1352 
1353 		mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1354 		    CAM_LUN_WILDCARD);
1355 
1356 		targ->tm = NULL;
1357 		mpssas_free_tm(sc, tm);
1358 	} else {
1359 		/*
1360 		 * After a target reset, if this target still has
1361 		 * outstanding commands, the reset effectively failed,
1362 		 * regardless of the status reported.  escalate.
1363 		 */
1364 		mps_dprint(sc, MPS_INFO|MPS_RECOVERY,
1365 		    "Target reset complete for target %u, but still have %u "
1366 		    "command(s), resetting controller\n", targ->tid,
1367 		    targ->outstanding);
1368 		mps_reinit(sc);
1369 	}
1370 }
1371 
1372 #define MPS_RESET_TIMEOUT 30
1373 
1374 int
1375 mpssas_send_reset(struct mps_softc *sc, struct mps_command *tm, uint8_t type)
1376 {
1377 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1378 	struct mpssas_target *target;
1379 	int err;
1380 
1381 	target = tm->cm_targ;
1382 	if (target->handle == 0) {
1383 		mps_dprint(sc, MPS_ERROR,"%s null devhandle for target_id %d\n",
1384 		    __func__, target->tid);
1385 		return -1;
1386 	}
1387 
1388 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1389 	req->DevHandle = htole16(target->handle);
1390 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1391 	req->TaskType = type;
1392 
1393 	if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1394 		/* XXX Need to handle invalid LUNs */
1395 		MPS_SET_LUN(req->LUN, tm->cm_lun);
1396 		tm->cm_targ->logical_unit_resets++;
1397 		mps_dprint(sc, MPS_RECOVERY|MPS_INFO,
1398 		    "Sending logical unit reset to target %u lun %d\n",
1399 		    target->tid, tm->cm_lun);
1400 		tm->cm_complete = mpssas_logical_unit_reset_complete;
1401 		mpssas_prepare_for_tm(sc, tm, target, tm->cm_lun);
1402 	} else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1403 		/*
1404 		 * Target reset method =
1405 		 * 	SAS Hard Link Reset / SATA Link Reset
1406 		 */
1407 		req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1408 		tm->cm_targ->target_resets++;
1409 		mps_dprint(sc, MPS_RECOVERY|MPS_INFO,
1410 		    "Sending target reset to target %u\n", target->tid);
1411 		tm->cm_complete = mpssas_target_reset_complete;
1412 		mpssas_prepare_for_tm(sc, tm, target, CAM_LUN_WILDCARD);
1413 	} else {
1414 		mps_dprint(sc, MPS_ERROR, "unexpected reset type 0x%x\n", type);
1415 		return -1;
1416 	}
1417 
1418 	tm->cm_data = NULL;
1419 	tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1420 	tm->cm_complete_data = (void *)tm;
1421 
1422 	callout_reset(&tm->cm_callout, MPS_RESET_TIMEOUT * hz,
1423 	    mpssas_tm_timeout, tm);
1424 
1425 	err = mps_map_command(sc, tm);
1426 	if (err)
1427 		mps_dprint(sc, MPS_ERROR|MPS_RECOVERY,
1428 		    "error %d sending reset type %u\n",
1429 		    err, type);
1430 
1431 	return err;
1432 }
1433 
1434 
1435 static void
1436 mpssas_abort_complete(struct mps_softc *sc, struct mps_command *tm)
1437 {
1438 	struct mps_command *cm;
1439 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1440 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1441 	struct mpssas_target *targ;
1442 
1443 	callout_stop(&tm->cm_callout);
1444 
1445 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1446 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1447 	targ = tm->cm_targ;
1448 
1449 	/*
1450 	 * Currently there should be no way we can hit this case.  It only
1451 	 * happens when we have a failure to allocate chain frames, and
1452 	 * task management commands don't have S/G lists.
1453 	 */
1454 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1455 		mps_dprint(sc, MPS_RECOVERY,
1456 		    "cm_flags = %#x for abort %p TaskMID %u!\n",
1457 		    tm->cm_flags, tm, le16toh(req->TaskMID));
1458 		mpssas_free_tm(sc, tm);
1459 		return;
1460 	}
1461 
1462 	if (reply == NULL) {
1463 		mps_dprint(sc, MPS_RECOVERY,
1464 		    "NULL abort reply for tm %p TaskMID %u\n",
1465 		    tm, le16toh(req->TaskMID));
1466 		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1467 			/* this completion was due to a reset, just cleanup */
1468 			mps_dprint(sc, MPS_RECOVERY, "Hardware undergoing "
1469 			    "reset, ignoring NULL abort reply\n");
1470 			targ->tm = NULL;
1471 			mpssas_free_tm(sc, tm);
1472 		} else {
1473 			/* we should have gotten a reply. */
1474 			mps_dprint(sc, MPS_INFO|MPS_RECOVERY, "NULL reply on "
1475 			    "abort attempt, resetting controller\n");
1476 			mps_reinit(sc);
1477 		}
1478 		return;
1479 	}
1480 
1481 	mps_dprint(sc, MPS_RECOVERY,
1482 	    "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1483 	    le16toh(req->TaskMID),
1484 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1485 	    le32toh(reply->TerminationCount));
1486 
1487 	cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1488 	if (cm == NULL) {
1489 		/*
1490 		 * If there are no more timedout commands, we're done with
1491 		 * error recovery for this target.
1492 		 */
1493 		mps_dprint(sc, MPS_INFO|MPS_RECOVERY,
1494 		    "Finished abort recovery for target %u\n", targ->tid);
1495 
1496 		targ->tm = NULL;
1497 		mpssas_free_tm(sc, tm);
1498 	} else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) {
1499 		/* abort success, but we have more timedout commands to abort */
1500 		mps_dprint(sc, MPS_INFO|MPS_RECOVERY,
1501 		    "Continuing abort recovery for target %u\n", targ->tid);
1502 
1503 		mpssas_send_abort(sc, tm, cm);
1504 	} else {
1505 		/* we didn't get a command completion, so the abort
1506 		 * failed as far as we're concerned.  escalate.
1507 		 */
1508 		mps_dprint(sc, MPS_RECOVERY,
1509 		    "Abort failed for target %u, sending logical unit reset\n",
1510 		    targ->tid);
1511 
1512 		mpssas_send_reset(sc, tm,
1513 		    MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1514 	}
1515 }
1516 
1517 #define MPS_ABORT_TIMEOUT 5
1518 
1519 static int
1520 mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm)
1521 {
1522 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1523 	struct mpssas_target *targ;
1524 	int err;
1525 
1526 	targ = cm->cm_targ;
1527 	if (targ->handle == 0) {
1528 		mps_dprint(sc, MPS_ERROR|MPS_RECOVERY,
1529 		    "%s null devhandle for target_id %d\n",
1530 		    __func__, cm->cm_ccb->ccb_h.target_id);
1531 		return -1;
1532 	}
1533 
1534 	mpssas_log_command(cm, MPS_RECOVERY|MPS_INFO,
1535 	    "Aborting command %p\n", cm);
1536 
1537 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1538 	req->DevHandle = htole16(targ->handle);
1539 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1540 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1541 
1542 	/* XXX Need to handle invalid LUNs */
1543 	MPS_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1544 
1545 	req->TaskMID = htole16(cm->cm_desc.Default.SMID);
1546 
1547 	tm->cm_data = NULL;
1548 	tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1549 	tm->cm_complete = mpssas_abort_complete;
1550 	tm->cm_complete_data = (void *)tm;
1551 	tm->cm_targ = cm->cm_targ;
1552 	tm->cm_lun = cm->cm_lun;
1553 
1554 	callout_reset(&tm->cm_callout, MPS_ABORT_TIMEOUT * hz,
1555 	    mpssas_tm_timeout, tm);
1556 
1557 	targ->aborts++;
1558 
1559 	mpssas_prepare_for_tm(sc, tm, targ, tm->cm_lun);
1560 
1561 	err = mps_map_command(sc, tm);
1562 	if (err)
1563 		mps_dprint(sc, MPS_ERROR|MPS_RECOVERY,
1564 		    "error %d sending abort for cm %p SMID %u\n",
1565 		    err, cm, req->TaskMID);
1566 	return err;
1567 }
1568 
1569 static void
1570 mpssas_scsiio_timeout(void *data)
1571 {
1572 	sbintime_t elapsed, now;
1573 	union ccb *ccb;
1574 	struct mps_softc *sc;
1575 	struct mps_command *cm;
1576 	struct mpssas_target *targ;
1577 
1578 	cm = (struct mps_command *)data;
1579 	sc = cm->cm_sc;
1580 	ccb = cm->cm_ccb;
1581 	now = sbinuptime();
1582 
1583 	MPS_FUNCTRACE(sc);
1584 	mtx_assert(&sc->mps_mtx, MA_OWNED);
1585 
1586 	mps_dprint(sc, MPS_XINFO|MPS_RECOVERY, "Timeout checking cm %p\n", sc);
1587 
1588 	/*
1589 	 * Run the interrupt handler to make sure it's not pending.  This
1590 	 * isn't perfect because the command could have already completed
1591 	 * and been re-used, though this is unlikely.
1592 	 */
1593 	mps_intr_locked(sc);
1594 	if (cm->cm_state == MPS_CM_STATE_FREE) {
1595 		mpssas_log_command(cm, MPS_XINFO,
1596 		    "SCSI command %p almost timed out\n", cm);
1597 		return;
1598 	}
1599 
1600 	if (cm->cm_ccb == NULL) {
1601 		mps_dprint(sc, MPS_ERROR, "command timeout with NULL ccb\n");
1602 		return;
1603 	}
1604 
1605 	targ = cm->cm_targ;
1606 	targ->timeouts++;
1607 
1608 	elapsed = now - ccb->ccb_h.qos.sim_data;
1609 	mpssas_log_command(cm, MPS_INFO|MPS_RECOVERY,
1610 	    "Command timeout on target %u(0x%04x) %d set, %d.%d elapsed\n",
1611 	    targ->tid, targ->handle, ccb->ccb_h.timeout,
1612 	    sbintime_getsec(elapsed), elapsed & 0xffffffff);
1613 
1614 	/* XXX first, check the firmware state, to see if it's still
1615 	 * operational.  if not, do a diag reset.
1616 	 */
1617 	mpssas_set_ccbstatus(cm->cm_ccb, CAM_CMD_TIMEOUT);
1618 	cm->cm_state = MPS_CM_STATE_TIMEDOUT;
1619 	TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1620 
1621 	if (targ->tm != NULL) {
1622 		/* target already in recovery, just queue up another
1623 		 * timedout command to be processed later.
1624 		 */
1625 		mps_dprint(sc, MPS_RECOVERY,
1626 		    "queued timedout cm %p for processing by tm %p\n",
1627 		    cm, targ->tm);
1628 	} else if ((targ->tm = mpssas_alloc_tm(sc)) != NULL) {
1629 		mps_dprint(sc, MPS_RECOVERY|MPS_INFO,
1630 		    "Sending abort to target %u for SMID %d\n", targ->tid,
1631 		    cm->cm_desc.Default.SMID);
1632 		mps_dprint(sc, MPS_RECOVERY, "timedout cm %p allocated tm %p\n",
1633 		    cm, targ->tm);
1634 
1635 		/* start recovery by aborting the first timedout command */
1636 		mpssas_send_abort(sc, targ->tm, cm);
1637 	} else {
1638 		/* XXX queue this target up for recovery once a TM becomes
1639 		 * available.  The firmware only has a limited number of
1640 		 * HighPriority credits for the high priority requests used
1641 		 * for task management, and we ran out.
1642 		 *
1643 		 * Isilon: don't worry about this for now, since we have
1644 		 * more credits than disks in an enclosure, and limit
1645 		 * ourselves to one TM per target for recovery.
1646 		 */
1647 		mps_dprint(sc, MPS_ERROR|MPS_RECOVERY,
1648 		    "timedout cm %p failed to allocate a tm\n", cm);
1649 	}
1650 
1651 }
1652 
1653 static void
1654 mpssas_action_scsiio(struct mpssas_softc *sassc, union ccb *ccb)
1655 {
1656 	MPI2_SCSI_IO_REQUEST *req;
1657 	struct ccb_scsiio *csio;
1658 	struct mps_softc *sc;
1659 	struct mpssas_target *targ;
1660 	struct mpssas_lun *lun;
1661 	struct mps_command *cm;
1662 	uint8_t i, lba_byte, *ref_tag_addr;
1663 	uint16_t eedp_flags;
1664 	uint32_t mpi_control;
1665 
1666 	sc = sassc->sc;
1667 	MPS_FUNCTRACE(sc);
1668 	mtx_assert(&sc->mps_mtx, MA_OWNED);
1669 
1670 	csio = &ccb->csio;
1671 	KASSERT(csio->ccb_h.target_id < sassc->maxtargets,
1672 	    ("Target %d out of bounds in XPT_SCSI_IO\n",
1673 	     csio->ccb_h.target_id));
1674 	targ = &sassc->targets[csio->ccb_h.target_id];
1675 	mps_dprint(sc, MPS_TRACE, "ccb %p target flag %x\n", ccb, targ->flags);
1676 	if (targ->handle == 0x0) {
1677 		mps_dprint(sc, MPS_ERROR, "%s NULL handle for target %u\n",
1678 		    __func__, csio->ccb_h.target_id);
1679 		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1680 		xpt_done(ccb);
1681 		return;
1682 	}
1683 	if (targ->flags & MPS_TARGET_FLAGS_RAID_COMPONENT) {
1684 		mps_dprint(sc, MPS_ERROR, "%s Raid component no SCSI IO "
1685 		    "supported %u\n", __func__, csio->ccb_h.target_id);
1686 		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1687 		xpt_done(ccb);
1688 		return;
1689 	}
1690 	/*
1691 	 * Sometimes, it is possible to get a command that is not "In
1692 	 * Progress" and was actually aborted by the upper layer.  Check for
1693 	 * this here and complete the command without error.
1694 	 */
1695 	if (mpssas_get_ccbstatus(ccb) != CAM_REQ_INPROG) {
1696 		mps_dprint(sc, MPS_TRACE, "%s Command is not in progress for "
1697 		    "target %u\n", __func__, csio->ccb_h.target_id);
1698 		xpt_done(ccb);
1699 		return;
1700 	}
1701 	/*
1702 	 * If devinfo is 0 this will be a volume.  In that case don't tell CAM
1703 	 * that the volume has timed out.  We want volumes to be enumerated
1704 	 * until they are deleted/removed, not just failed.
1705 	 */
1706 	if (targ->flags & MPSSAS_TARGET_INREMOVAL) {
1707 		if (targ->devinfo == 0)
1708 			mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1709 		else
1710 			mpssas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
1711 		xpt_done(ccb);
1712 		return;
1713 	}
1714 
1715 	if ((sc->mps_flags & MPS_FLAGS_SHUTDOWN) != 0) {
1716 		mps_dprint(sc, MPS_INFO, "%s shutting down\n", __func__);
1717 		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1718 		xpt_done(ccb);
1719 		return;
1720 	}
1721 
1722 	/*
1723 	 * If target has a reset in progress, freeze the devq and return.  The
1724 	 * devq will be released when the TM reset is finished.
1725 	 */
1726 	if (targ->flags & MPSSAS_TARGET_INRESET) {
1727 		ccb->ccb_h.status = CAM_BUSY | CAM_DEV_QFRZN;
1728 		mps_dprint(sc, MPS_INFO, "%s: Freezing devq for target ID %d\n",
1729 		    __func__, targ->tid);
1730 		xpt_freeze_devq(ccb->ccb_h.path, 1);
1731 		xpt_done(ccb);
1732 		return;
1733 	}
1734 
1735 	cm = mps_alloc_command(sc);
1736 	if (cm == NULL || (sc->mps_flags & MPS_FLAGS_DIAGRESET)) {
1737 		if (cm != NULL) {
1738 			mps_free_command(sc, cm);
1739 		}
1740 		if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
1741 			xpt_freeze_simq(sassc->sim, 1);
1742 			sassc->flags |= MPSSAS_QUEUE_FROZEN;
1743 		}
1744 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1745 		ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1746 		xpt_done(ccb);
1747 		return;
1748 	}
1749 
1750 	req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
1751 	bzero(req, sizeof(*req));
1752 	req->DevHandle = htole16(targ->handle);
1753 	req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1754 	req->MsgFlags = 0;
1755 	req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr);
1756 	req->SenseBufferLength = MPS_SENSE_LEN;
1757 	req->SGLFlags = 0;
1758 	req->ChainOffset = 0;
1759 	req->SGLOffset0 = 24;	/* 32bit word offset to the SGL */
1760 	req->SGLOffset1= 0;
1761 	req->SGLOffset2= 0;
1762 	req->SGLOffset3= 0;
1763 	req->SkipCount = 0;
1764 	req->DataLength = htole32(csio->dxfer_len);
1765 	req->BidirectionalDataLength = 0;
1766 	req->IoFlags = htole16(csio->cdb_len);
1767 	req->EEDPFlags = 0;
1768 
1769 	/* Note: BiDirectional transfers are not supported */
1770 	switch (csio->ccb_h.flags & CAM_DIR_MASK) {
1771 	case CAM_DIR_IN:
1772 		mpi_control = MPI2_SCSIIO_CONTROL_READ;
1773 		cm->cm_flags |= MPS_CM_FLAGS_DATAIN;
1774 		break;
1775 	case CAM_DIR_OUT:
1776 		mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
1777 		cm->cm_flags |= MPS_CM_FLAGS_DATAOUT;
1778 		break;
1779 	case CAM_DIR_NONE:
1780 	default:
1781 		mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
1782 		break;
1783 	}
1784 
1785 	if (csio->cdb_len == 32)
1786                 mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
1787 	/*
1788 	 * It looks like the hardware doesn't require an explicit tag
1789 	 * number for each transaction.  SAM Task Management not supported
1790 	 * at the moment.
1791 	 */
1792 	switch (csio->tag_action) {
1793 	case MSG_HEAD_OF_Q_TAG:
1794 		mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
1795 		break;
1796 	case MSG_ORDERED_Q_TAG:
1797 		mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
1798 		break;
1799 	case MSG_ACA_TASK:
1800 		mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ;
1801 		break;
1802 	case CAM_TAG_ACTION_NONE:
1803 	case MSG_SIMPLE_Q_TAG:
1804 	default:
1805 		mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
1806 		break;
1807 	}
1808 	mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
1809 	req->Control = htole32(mpi_control);
1810 	if (MPS_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
1811 		mps_free_command(sc, cm);
1812 		mpssas_set_ccbstatus(ccb, CAM_LUN_INVALID);
1813 		xpt_done(ccb);
1814 		return;
1815 	}
1816 
1817 	if (csio->ccb_h.flags & CAM_CDB_POINTER)
1818 		bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
1819 	else
1820 		bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
1821 	req->IoFlags = htole16(csio->cdb_len);
1822 
1823 	/*
1824 	 * Check if EEDP is supported and enabled.  If it is then check if the
1825 	 * SCSI opcode could be using EEDP.  If so, make sure the LUN exists and
1826 	 * is formatted for EEDP support.  If all of this is true, set CDB up
1827 	 * for EEDP transfer.
1828 	 */
1829 	eedp_flags = op_code_prot[req->CDB.CDB32[0]];
1830 	if (sc->eedp_enabled && eedp_flags) {
1831 		SLIST_FOREACH(lun, &targ->luns, lun_link) {
1832 			if (lun->lun_id == csio->ccb_h.target_lun) {
1833 				break;
1834 			}
1835 		}
1836 
1837 		if ((lun != NULL) && (lun->eedp_formatted)) {
1838 			req->EEDPBlockSize = htole16(lun->eedp_block_size);
1839 			eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
1840 			    MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
1841 			    MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
1842 			req->EEDPFlags = htole16(eedp_flags);
1843 
1844 			/*
1845 			 * If CDB less than 32, fill in Primary Ref Tag with
1846 			 * low 4 bytes of LBA.  If CDB is 32, tag stuff is
1847 			 * already there.  Also, set protection bit.  FreeBSD
1848 			 * currently does not support CDBs bigger than 16, but
1849 			 * the code doesn't hurt, and will be here for the
1850 			 * future.
1851 			 */
1852 			if (csio->cdb_len != 32) {
1853 				lba_byte = (csio->cdb_len == 16) ? 6 : 2;
1854 				ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
1855 				    PrimaryReferenceTag;
1856 				for (i = 0; i < 4; i++) {
1857 					*ref_tag_addr =
1858 					    req->CDB.CDB32[lba_byte + i];
1859 					ref_tag_addr++;
1860 				}
1861 				req->CDB.EEDP32.PrimaryReferenceTag =
1862 					htole32(req->CDB.EEDP32.PrimaryReferenceTag);
1863 				req->CDB.EEDP32.PrimaryApplicationTagMask =
1864 				    0xFFFF;
1865 				req->CDB.CDB32[1] = (req->CDB.CDB32[1] & 0x1F) |
1866 				    0x20;
1867 			} else {
1868 				eedp_flags |=
1869 				    MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
1870 				req->EEDPFlags = htole16(eedp_flags);
1871 				req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
1872 				    0x1F) | 0x20;
1873 			}
1874 		}
1875 	}
1876 
1877 	cm->cm_length = csio->dxfer_len;
1878 	if (cm->cm_length != 0) {
1879 		cm->cm_data = ccb;
1880 		cm->cm_flags |= MPS_CM_FLAGS_USE_CCB;
1881 	} else {
1882 		cm->cm_data = NULL;
1883 	}
1884 	cm->cm_sge = &req->SGL;
1885 	cm->cm_sglsize = (32 - 24) * 4;
1886 	cm->cm_desc.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
1887 	cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle);
1888 	cm->cm_complete = mpssas_scsiio_complete;
1889 	cm->cm_complete_data = ccb;
1890 	cm->cm_targ = targ;
1891 	cm->cm_lun = csio->ccb_h.target_lun;
1892 	cm->cm_ccb = ccb;
1893 
1894 	/*
1895 	 * If HBA is a WD and the command is not for a retry, try to build a
1896 	 * direct I/O message. If failed, or the command is for a retry, send
1897 	 * the I/O to the IR volume itself.
1898 	 */
1899 	if (sc->WD_valid_config) {
1900 		if (ccb->ccb_h.sim_priv.entries[0].field == MPS_WD_RETRY) {
1901 			mpssas_direct_drive_io(sassc, cm, ccb);
1902 		} else {
1903 			mpssas_set_ccbstatus(ccb, CAM_REQ_INPROG);
1904 		}
1905 	}
1906 
1907 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
1908 	if (csio->bio != NULL)
1909 		biotrack(csio->bio, __func__);
1910 #endif
1911 	csio->ccb_h.qos.sim_data = sbinuptime();
1912 	callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
1913 	    mpssas_scsiio_timeout, cm, 0);
1914 
1915 	targ->issued++;
1916 	targ->outstanding++;
1917 	TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1918 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
1919 
1920 	mpssas_log_command(cm, MPS_XINFO, "%s cm %p ccb %p outstanding %u\n",
1921 	    __func__, cm, ccb, targ->outstanding);
1922 
1923 	mps_map_command(sc, cm);
1924 	return;
1925 }
1926 
1927 /**
1928  * mps_sc_failed_io_info - translated non-succesfull SCSI_IO request
1929  */
1930 static void
1931 mps_sc_failed_io_info(struct mps_softc *sc, struct ccb_scsiio *csio,
1932     Mpi2SCSIIOReply_t *mpi_reply)
1933 {
1934 	u32 response_info;
1935 	u8 *response_bytes;
1936 	u16 ioc_status = le16toh(mpi_reply->IOCStatus) &
1937 	    MPI2_IOCSTATUS_MASK;
1938 	u8 scsi_state = mpi_reply->SCSIState;
1939 	u8 scsi_status = mpi_reply->SCSIStatus;
1940 	u32 log_info = le32toh(mpi_reply->IOCLogInfo);
1941 	const char *desc_ioc_state, *desc_scsi_status;
1942 
1943 	if (log_info == 0x31170000)
1944 		return;
1945 
1946 	desc_ioc_state = mps_describe_table(mps_iocstatus_string,
1947 	    ioc_status);
1948 	desc_scsi_status = mps_describe_table(mps_scsi_status_string,
1949 	    scsi_status);
1950 
1951 	mps_dprint(sc, MPS_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n",
1952 	    le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status);
1953 
1954 	/*
1955 	 *We can add more detail about underflow data here
1956 	 * TO-DO
1957 	 */
1958 	mps_dprint(sc, MPS_XINFO, "\tscsi_status(%s)(0x%02x), "
1959 	    "scsi_state %b\n", desc_scsi_status, scsi_status,
1960 	    scsi_state, "\20" "\1AutosenseValid" "\2AutosenseFailed"
1961 	    "\3NoScsiStatus" "\4Terminated" "\5Response InfoValid");
1962 
1963 	if (sc->mps_debug & MPS_XINFO &&
1964 		scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
1965 		mps_dprint(sc, MPS_XINFO, "-> Sense Buffer Data : Start :\n");
1966 		scsi_sense_print(csio);
1967 		mps_dprint(sc, MPS_XINFO, "-> Sense Buffer Data : End :\n");
1968 	}
1969 
1970 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
1971 		response_info = le32toh(mpi_reply->ResponseInfo);
1972 		response_bytes = (u8 *)&response_info;
1973 		mps_dprint(sc, MPS_XINFO, "response code(0x%1x): %s\n",
1974 		    response_bytes[0],
1975 		    mps_describe_table(mps_scsi_taskmgmt_string,
1976 		    response_bytes[0]));
1977 	}
1978 }
1979 
1980 static void
1981 mpssas_scsiio_complete(struct mps_softc *sc, struct mps_command *cm)
1982 {
1983 	MPI2_SCSI_IO_REPLY *rep;
1984 	union ccb *ccb;
1985 	struct ccb_scsiio *csio;
1986 	struct mpssas_softc *sassc;
1987 	struct scsi_vpd_supported_page_list *vpd_list = NULL;
1988 	u8 *TLR_bits, TLR_on;
1989 	int dir = 0, i;
1990 	u16 alloc_len;
1991 	struct mpssas_target *target;
1992 	target_id_t target_id;
1993 
1994 	MPS_FUNCTRACE(sc);
1995 	mps_dprint(sc, MPS_TRACE,
1996 	    "cm %p SMID %u ccb %p reply %p outstanding %u\n", cm,
1997 	    cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
1998 	    cm->cm_targ->outstanding);
1999 
2000 	callout_stop(&cm->cm_callout);
2001 	mtx_assert(&sc->mps_mtx, MA_OWNED);
2002 
2003 	sassc = sc->sassc;
2004 	ccb = cm->cm_complete_data;
2005 	csio = &ccb->csio;
2006 	target_id = csio->ccb_h.target_id;
2007 	rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
2008 	/*
2009 	 * XXX KDM if the chain allocation fails, does it matter if we do
2010 	 * the sync and unload here?  It is simpler to do it in every case,
2011 	 * assuming it doesn't cause problems.
2012 	 */
2013 	if (cm->cm_data != NULL) {
2014 		if (cm->cm_flags & MPS_CM_FLAGS_DATAIN)
2015 			dir = BUS_DMASYNC_POSTREAD;
2016 		else if (cm->cm_flags & MPS_CM_FLAGS_DATAOUT)
2017 			dir = BUS_DMASYNC_POSTWRITE;
2018 		bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
2019 		bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2020 	}
2021 
2022 	cm->cm_targ->completed++;
2023 	cm->cm_targ->outstanding--;
2024 	TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
2025 	ccb->ccb_h.status &= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED);
2026 
2027 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
2028 	if (ccb->csio.bio != NULL)
2029 		biotrack(ccb->csio.bio, __func__);
2030 #endif
2031 
2032 	if (cm->cm_state == MPS_CM_STATE_TIMEDOUT) {
2033 		TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
2034 		if (cm->cm_reply != NULL)
2035 			mpssas_log_command(cm, MPS_RECOVERY,
2036 			    "completed timedout cm %p ccb %p during recovery "
2037 			    "ioc %x scsi %x state %x xfer %u\n",
2038 			    cm, cm->cm_ccb,
2039 			    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2040 			    le32toh(rep->TransferCount));
2041 		else
2042 			mpssas_log_command(cm, MPS_RECOVERY,
2043 			    "completed timedout cm %p ccb %p during recovery\n",
2044 			    cm, cm->cm_ccb);
2045 	} else if (cm->cm_targ->tm != NULL) {
2046 		if (cm->cm_reply != NULL)
2047 			mpssas_log_command(cm, MPS_RECOVERY,
2048 			    "completed cm %p ccb %p during recovery "
2049 			    "ioc %x scsi %x state %x xfer %u\n",
2050 			    cm, cm->cm_ccb,
2051 			    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2052 			    le32toh(rep->TransferCount));
2053 		else
2054 			mpssas_log_command(cm, MPS_RECOVERY,
2055 			    "completed cm %p ccb %p during recovery\n",
2056 			    cm, cm->cm_ccb);
2057 	} else if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
2058 		mpssas_log_command(cm, MPS_RECOVERY,
2059 		    "reset completed cm %p ccb %p\n",
2060 		    cm, cm->cm_ccb);
2061 	}
2062 
2063 	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2064 		/*
2065 		 * We ran into an error after we tried to map the command,
2066 		 * so we're getting a callback without queueing the command
2067 		 * to the hardware.  So we set the status here, and it will
2068 		 * be retained below.  We'll go through the "fast path",
2069 		 * because there can be no reply when we haven't actually
2070 		 * gone out to the hardware.
2071 		 */
2072 		mpssas_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
2073 
2074 		/*
2075 		 * Currently the only error included in the mask is
2076 		 * MPS_CM_FLAGS_CHAIN_FAILED, which means we're out of
2077 		 * chain frames.  We need to freeze the queue until we get
2078 		 * a command that completed without this error, which will
2079 		 * hopefully have some chain frames attached that we can
2080 		 * use.  If we wanted to get smarter about it, we would
2081 		 * only unfreeze the queue in this condition when we're
2082 		 * sure that we're getting some chain frames back.  That's
2083 		 * probably unnecessary.
2084 		 */
2085 		if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
2086 			xpt_freeze_simq(sassc->sim, 1);
2087 			sassc->flags |= MPSSAS_QUEUE_FROZEN;
2088 			mps_dprint(sc, MPS_XINFO, "Error sending command, "
2089 				   "freezing SIM queue\n");
2090 		}
2091 	}
2092 
2093 	/*
2094 	 * If this is a Start Stop Unit command and it was issued by the driver
2095 	 * during shutdown, decrement the refcount to account for all of the
2096 	 * commands that were sent.  All SSU commands should be completed before
2097 	 * shutdown completes, meaning SSU_refcount will be 0 after SSU_started
2098 	 * is TRUE.
2099 	 */
2100 	if (sc->SSU_started && (csio->cdb_io.cdb_bytes[0] == START_STOP_UNIT)) {
2101 		mps_dprint(sc, MPS_INFO, "Decrementing SSU count.\n");
2102 		sc->SSU_refcount--;
2103 	}
2104 
2105 	/* Take the fast path to completion */
2106 	if (cm->cm_reply == NULL) {
2107 		if (mpssas_get_ccbstatus(ccb) == CAM_REQ_INPROG) {
2108 			if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0)
2109 				mpssas_set_ccbstatus(ccb, CAM_SCSI_BUS_RESET);
2110 			else {
2111 				mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2112 				ccb->csio.scsi_status = SCSI_STATUS_OK;
2113 			}
2114 			if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2115 				ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2116 				sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2117 				mps_dprint(sc, MPS_XINFO,
2118 				    "Unfreezing SIM queue\n");
2119 			}
2120 		}
2121 
2122 		/*
2123 		 * There are two scenarios where the status won't be
2124 		 * CAM_REQ_CMP.  The first is if MPS_CM_FLAGS_ERROR_MASK is
2125 		 * set, the second is in the MPS_FLAGS_DIAGRESET above.
2126 		 */
2127 		if (mpssas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2128 			/*
2129 			 * Freeze the dev queue so that commands are
2130 			 * executed in the correct order after error
2131 			 * recovery.
2132 			 */
2133 			ccb->ccb_h.status |= CAM_DEV_QFRZN;
2134 			xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2135 		}
2136 		mps_free_command(sc, cm);
2137 		xpt_done(ccb);
2138 		return;
2139 	}
2140 
2141 	mpssas_log_command(cm, MPS_XINFO,
2142 	    "ioc %x scsi %x state %x xfer %u\n",
2143 	    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2144 	    le32toh(rep->TransferCount));
2145 
2146 	/*
2147 	 * If this is a Direct Drive I/O, reissue the I/O to the original IR
2148 	 * Volume if an error occurred (normal I/O retry).  Use the original
2149 	 * CCB, but set a flag that this will be a retry so that it's sent to
2150 	 * the original volume.  Free the command but reuse the CCB.
2151 	 */
2152 	if (cm->cm_flags & MPS_CM_FLAGS_DD_IO) {
2153 		mps_free_command(sc, cm);
2154 		ccb->ccb_h.sim_priv.entries[0].field = MPS_WD_RETRY;
2155 		mpssas_action_scsiio(sassc, ccb);
2156 		return;
2157 	} else
2158 		ccb->ccb_h.sim_priv.entries[0].field = 0;
2159 
2160 	switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) {
2161 	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2162 		csio->resid = cm->cm_length - le32toh(rep->TransferCount);
2163 		/* FALLTHROUGH */
2164 	case MPI2_IOCSTATUS_SUCCESS:
2165 	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2166 
2167 		if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
2168 		    MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
2169 			mpssas_log_command(cm, MPS_XINFO, "recovered error\n");
2170 
2171 		/* Completion failed at the transport level. */
2172 		if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
2173 		    MPI2_SCSI_STATE_TERMINATED)) {
2174 			mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2175 			break;
2176 		}
2177 
2178 		/* In a modern packetized environment, an autosense failure
2179 		 * implies that there's not much else that can be done to
2180 		 * recover the command.
2181 		 */
2182 		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
2183 			mpssas_set_ccbstatus(ccb, CAM_AUTOSENSE_FAIL);
2184 			break;
2185 		}
2186 
2187 		/*
2188 		 * CAM doesn't care about SAS Response Info data, but if this is
2189 		 * the state check if TLR should be done.  If not, clear the
2190 		 * TLR_bits for the target.
2191 		 */
2192 		if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
2193 		    ((le32toh(rep->ResponseInfo) &
2194 		    MPI2_SCSI_RI_MASK_REASONCODE) ==
2195 		    MPS_SCSI_RI_INVALID_FRAME)) {
2196 			sc->mapping_table[target_id].TLR_bits =
2197 			    (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2198 		}
2199 
2200 		/*
2201 		 * Intentionally override the normal SCSI status reporting
2202 		 * for these two cases.  These are likely to happen in a
2203 		 * multi-initiator environment, and we want to make sure that
2204 		 * CAM retries these commands rather than fail them.
2205 		 */
2206 		if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
2207 		    (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
2208 			mpssas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2209 			break;
2210 		}
2211 
2212 		/* Handle normal status and sense */
2213 		csio->scsi_status = rep->SCSIStatus;
2214 		if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
2215 			mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2216 		else
2217 			mpssas_set_ccbstatus(ccb, CAM_SCSI_STATUS_ERROR);
2218 
2219 		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2220 			int sense_len, returned_sense_len;
2221 
2222 			returned_sense_len = min(le32toh(rep->SenseCount),
2223 			    sizeof(struct scsi_sense_data));
2224 			if (returned_sense_len < ccb->csio.sense_len)
2225 				ccb->csio.sense_resid = ccb->csio.sense_len -
2226 					returned_sense_len;
2227 			else
2228 				ccb->csio.sense_resid = 0;
2229 
2230 			sense_len = min(returned_sense_len,
2231 			    ccb->csio.sense_len - ccb->csio.sense_resid);
2232 			bzero(&ccb->csio.sense_data,
2233 			      sizeof(ccb->csio.sense_data));
2234 			bcopy(cm->cm_sense, &ccb->csio.sense_data, sense_len);
2235 			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2236 		}
2237 
2238 		/*
2239 		 * Check if this is an INQUIRY command.  If it's a VPD inquiry,
2240 		 * and it's page code 0 (Supported Page List), and there is
2241 		 * inquiry data, and this is for a sequential access device, and
2242 		 * the device is an SSP target, and TLR is supported by the
2243 		 * controller, turn the TLR_bits value ON if page 0x90 is
2244 		 * supported.
2245 		 */
2246 		if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
2247 		    (csio->cdb_io.cdb_bytes[1] & SI_EVPD) &&
2248 		    (csio->cdb_io.cdb_bytes[2] == SVPD_SUPPORTED_PAGE_LIST) &&
2249 		    ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) &&
2250 		    (csio->data_ptr != NULL) &&
2251 		    ((csio->data_ptr[0] & 0x1f) == T_SEQUENTIAL) &&
2252 		    (sc->control_TLR) &&
2253 		    (sc->mapping_table[target_id].device_info &
2254 		    MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
2255 			vpd_list = (struct scsi_vpd_supported_page_list *)
2256 			    csio->data_ptr;
2257 			TLR_bits = &sc->mapping_table[target_id].TLR_bits;
2258 			*TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2259 			TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
2260 			alloc_len = ((u16)csio->cdb_io.cdb_bytes[3] << 8) +
2261 			    csio->cdb_io.cdb_bytes[4];
2262 			alloc_len -= csio->resid;
2263 			for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
2264 				if (vpd_list->list[i] == 0x90) {
2265 					*TLR_bits = TLR_on;
2266 					break;
2267 				}
2268 			}
2269 		}
2270 
2271 		/*
2272 		 * If this is a SATA direct-access end device, mark it so that
2273 		 * a SCSI StartStopUnit command will be sent to it when the
2274 		 * driver is being shutdown.
2275 		 */
2276 		if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
2277 		    ((csio->data_ptr[0] & 0x1f) == T_DIRECT) &&
2278 		    (sc->mapping_table[target_id].device_info &
2279 		    MPI2_SAS_DEVICE_INFO_SATA_DEVICE) &&
2280 		    ((sc->mapping_table[target_id].device_info &
2281 		    MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) ==
2282 		    MPI2_SAS_DEVICE_INFO_END_DEVICE)) {
2283 			target = &sassc->targets[target_id];
2284 			target->supports_SSU = TRUE;
2285 			mps_dprint(sc, MPS_XINFO, "Target %d supports SSU\n",
2286 			    target_id);
2287 		}
2288 		break;
2289 	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2290 	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2291 		/*
2292 		 * If devinfo is 0 this will be a volume.  In that case don't
2293 		 * tell CAM that the volume is not there.  We want volumes to
2294 		 * be enumerated until they are deleted/removed, not just
2295 		 * failed.
2296 		 */
2297 		if (cm->cm_targ->devinfo == 0)
2298 			mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2299 		else
2300 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2301 		break;
2302 	case MPI2_IOCSTATUS_INVALID_SGL:
2303 		mps_print_scsiio_cmd(sc, cm);
2304 		mpssas_set_ccbstatus(ccb, CAM_UNREC_HBA_ERROR);
2305 		break;
2306 	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2307 		/*
2308 		 * This is one of the responses that comes back when an I/O
2309 		 * has been aborted.  If it is because of a timeout that we
2310 		 * initiated, just set the status to CAM_CMD_TIMEOUT.
2311 		 * Otherwise set it to CAM_REQ_ABORTED.  The effect on the
2312 		 * command is the same (it gets retried, subject to the
2313 		 * retry counter), the only difference is what gets printed
2314 		 * on the console.
2315 		 */
2316 		if (cm->cm_state == MPS_CM_STATE_TIMEDOUT)
2317 			mpssas_set_ccbstatus(ccb, CAM_CMD_TIMEOUT);
2318 		else
2319 			mpssas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2320 		break;
2321 	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2322 		/* resid is ignored for this condition */
2323 		csio->resid = 0;
2324 		mpssas_set_ccbstatus(ccb, CAM_DATA_RUN_ERR);
2325 		break;
2326 	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2327 	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2328 		/*
2329 		 * These can sometimes be transient transport-related
2330 		 * errors, and sometimes persistent drive-related errors.
2331 		 * We used to retry these without decrementing the retry
2332 		 * count by returning CAM_REQUEUE_REQ.  Unfortunately, if
2333 		 * we hit a persistent drive problem that returns one of
2334 		 * these error codes, we would retry indefinitely.  So,
2335 		 * return CAM_REQ_CMP_ERROR so that we decrement the retry
2336 		 * count and avoid infinite retries.  We're taking the
2337 		 * potential risk of flagging false failures in the event
2338 		 * of a topology-related error (e.g. a SAS expander problem
2339 		 * causes a command addressed to a drive to fail), but
2340 		 * avoiding getting into an infinite retry loop.
2341 		 */
2342 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2343 		mps_dprint(sc, MPS_INFO,
2344 		    "Controller reported %s tgt %u SMID %u loginfo %x\n",
2345 		    mps_describe_table(mps_iocstatus_string,
2346 		    le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK),
2347 		    target_id, cm->cm_desc.Default.SMID,
2348 		    le32toh(rep->IOCLogInfo));
2349 		mps_dprint(sc, MPS_XINFO,
2350 		    "SCSIStatus %x SCSIState %x xfercount %u\n",
2351 		    rep->SCSIStatus, rep->SCSIState,
2352 		    le32toh(rep->TransferCount));
2353 		break;
2354 	case MPI2_IOCSTATUS_INVALID_FUNCTION:
2355 	case MPI2_IOCSTATUS_INTERNAL_ERROR:
2356 	case MPI2_IOCSTATUS_INVALID_VPID:
2357 	case MPI2_IOCSTATUS_INVALID_FIELD:
2358 	case MPI2_IOCSTATUS_INVALID_STATE:
2359 	case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2360 	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2361 	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2362 	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2363 	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2364 	default:
2365 		mpssas_log_command(cm, MPS_XINFO,
2366 		    "completed ioc %x loginfo %x scsi %x state %x xfer %u\n",
2367 		    le16toh(rep->IOCStatus), le32toh(rep->IOCLogInfo),
2368 		    rep->SCSIStatus, rep->SCSIState,
2369 		    le32toh(rep->TransferCount));
2370 		csio->resid = cm->cm_length;
2371 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2372 		break;
2373 	}
2374 
2375 	mps_sc_failed_io_info(sc,csio,rep);
2376 
2377 	if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2378 		ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2379 		sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2380 		mps_dprint(sc, MPS_XINFO, "Command completed, "
2381 		    "unfreezing SIM queue\n");
2382 	}
2383 
2384 	if (mpssas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2385 		ccb->ccb_h.status |= CAM_DEV_QFRZN;
2386 		xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2387 	}
2388 
2389 	mps_free_command(sc, cm);
2390 	xpt_done(ccb);
2391 }
2392 
2393 /* All Request reached here are Endian safe */
2394 static void
2395 mpssas_direct_drive_io(struct mpssas_softc *sassc, struct mps_command *cm,
2396     union ccb *ccb) {
2397 	pMpi2SCSIIORequest_t	pIO_req;
2398 	struct mps_softc	*sc = sassc->sc;
2399 	uint64_t		virtLBA;
2400 	uint32_t		physLBA, stripe_offset, stripe_unit;
2401 	uint32_t		io_size, column;
2402 	uint8_t			*ptrLBA, lba_idx, physLBA_byte, *CDB;
2403 
2404 	/*
2405 	 * If this is a valid SCSI command (Read6, Read10, Read16, Write6,
2406 	 * Write10, or Write16), build a direct I/O message.  Otherwise, the I/O
2407 	 * will be sent to the IR volume itself.  Since Read6 and Write6 are a
2408 	 * bit different than the 10/16 CDBs, handle them separately.
2409 	 */
2410 	pIO_req = (pMpi2SCSIIORequest_t)cm->cm_req;
2411 	CDB = pIO_req->CDB.CDB32;
2412 
2413 	/*
2414 	 * Handle 6 byte CDBs.
2415 	 */
2416 	if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_6) ||
2417 	    (CDB[0] == WRITE_6))) {
2418 		/*
2419 		 * Get the transfer size in blocks.
2420 		 */
2421 		io_size = (cm->cm_length >> sc->DD_block_exponent);
2422 
2423 		/*
2424 		 * Get virtual LBA given in the CDB.
2425 		 */
2426 		virtLBA = ((uint64_t)(CDB[1] & 0x1F) << 16) |
2427 		    ((uint64_t)CDB[2] << 8) | (uint64_t)CDB[3];
2428 
2429 		/*
2430 		 * Check that LBA range for I/O does not exceed volume's
2431 		 * MaxLBA.
2432 		 */
2433 		if ((virtLBA + (uint64_t)io_size - 1) <=
2434 		    sc->DD_max_lba) {
2435 			/*
2436 			 * Check if the I/O crosses a stripe boundary.  If not,
2437 			 * translate the virtual LBA to a physical LBA and set
2438 			 * the DevHandle for the PhysDisk to be used.  If it
2439 			 * does cross a boundary, do normal I/O.  To get the
2440 			 * right DevHandle to use, get the map number for the
2441 			 * column, then use that map number to look up the
2442 			 * DevHandle of the PhysDisk.
2443 			 */
2444 			stripe_offset = (uint32_t)virtLBA &
2445 			    (sc->DD_stripe_size - 1);
2446 			if ((stripe_offset + io_size) <= sc->DD_stripe_size) {
2447 				physLBA = (uint32_t)virtLBA >>
2448 				    sc->DD_stripe_exponent;
2449 				stripe_unit = physLBA / sc->DD_num_phys_disks;
2450 				column = physLBA % sc->DD_num_phys_disks;
2451 				pIO_req->DevHandle =
2452 				    htole16(sc->DD_column_map[column].dev_handle);
2453 				/* ???? Is this endian safe*/
2454 				cm->cm_desc.SCSIIO.DevHandle =
2455 				    pIO_req->DevHandle;
2456 
2457 				physLBA = (stripe_unit <<
2458 				    sc->DD_stripe_exponent) + stripe_offset;
2459 				ptrLBA = &pIO_req->CDB.CDB32[1];
2460 				physLBA_byte = (uint8_t)(physLBA >> 16);
2461 				*ptrLBA = physLBA_byte;
2462 				ptrLBA = &pIO_req->CDB.CDB32[2];
2463 				physLBA_byte = (uint8_t)(physLBA >> 8);
2464 				*ptrLBA = physLBA_byte;
2465 				ptrLBA = &pIO_req->CDB.CDB32[3];
2466 				physLBA_byte = (uint8_t)physLBA;
2467 				*ptrLBA = physLBA_byte;
2468 
2469 				/*
2470 				 * Set flag that Direct Drive I/O is
2471 				 * being done.
2472 				 */
2473 				cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2474 			}
2475 		}
2476 		return;
2477 	}
2478 
2479 	/*
2480 	 * Handle 10, 12 or 16 byte CDBs.
2481 	 */
2482 	if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_10) ||
2483 	    (CDB[0] == WRITE_10) || (CDB[0] == READ_16) ||
2484 	    (CDB[0] == WRITE_16) || (CDB[0] == READ_12) ||
2485 	    (CDB[0] == WRITE_12))) {
2486 		/*
2487 		 * For 16-byte CDB's, verify that the upper 4 bytes of the CDB
2488 		 * are 0.  If not, this is accessing beyond 2TB so handle it in
2489 		 * the else section.  10-byte and 12-byte CDB's are OK.
2490 		 * FreeBSD sends very rare 12 byte READ/WRITE, but driver is
2491 		 * ready to accept 12byte CDB for Direct IOs.
2492 		 */
2493 		if ((CDB[0] == READ_10 || CDB[0] == WRITE_10) ||
2494 		    (CDB[0] == READ_12 || CDB[0] == WRITE_12) ||
2495 		    !(CDB[2] | CDB[3] | CDB[4] | CDB[5])) {
2496 			/*
2497 			 * Get the transfer size in blocks.
2498 			 */
2499 			io_size = (cm->cm_length >> sc->DD_block_exponent);
2500 
2501 			/*
2502 			 * Get virtual LBA.  Point to correct lower 4 bytes of
2503 			 * LBA in the CDB depending on command.
2504 			 */
2505 			lba_idx = ((CDB[0] == READ_12) ||
2506 				(CDB[0] == WRITE_12) ||
2507 				(CDB[0] == READ_10) ||
2508 				(CDB[0] == WRITE_10))? 2 : 6;
2509 			virtLBA = ((uint64_t)CDB[lba_idx] << 24) |
2510 			    ((uint64_t)CDB[lba_idx + 1] << 16) |
2511 			    ((uint64_t)CDB[lba_idx + 2] << 8) |
2512 			    (uint64_t)CDB[lba_idx + 3];
2513 
2514 			/*
2515 			 * Check that LBA range for I/O does not exceed volume's
2516 			 * MaxLBA.
2517 			 */
2518 			if ((virtLBA + (uint64_t)io_size - 1) <=
2519 			    sc->DD_max_lba) {
2520 				/*
2521 				 * Check if the I/O crosses a stripe boundary.
2522 				 * If not, translate the virtual LBA to a
2523 				 * physical LBA and set the DevHandle for the
2524 				 * PhysDisk to be used.  If it does cross a
2525 				 * boundary, do normal I/O.  To get the right
2526 				 * DevHandle to use, get the map number for the
2527 				 * column, then use that map number to look up
2528 				 * the DevHandle of the PhysDisk.
2529 				 */
2530 				stripe_offset = (uint32_t)virtLBA &
2531 				    (sc->DD_stripe_size - 1);
2532 				if ((stripe_offset + io_size) <=
2533 				    sc->DD_stripe_size) {
2534 					physLBA = (uint32_t)virtLBA >>
2535 					    sc->DD_stripe_exponent;
2536 					stripe_unit = physLBA /
2537 					    sc->DD_num_phys_disks;
2538 					column = physLBA %
2539 					    sc->DD_num_phys_disks;
2540 					pIO_req->DevHandle =
2541 					    htole16(sc->DD_column_map[column].
2542 					    dev_handle);
2543 					cm->cm_desc.SCSIIO.DevHandle =
2544 					    pIO_req->DevHandle;
2545 
2546 					physLBA = (stripe_unit <<
2547 					    sc->DD_stripe_exponent) +
2548 					    stripe_offset;
2549 					ptrLBA =
2550 					    &pIO_req->CDB.CDB32[lba_idx];
2551 					physLBA_byte = (uint8_t)(physLBA >> 24);
2552 					*ptrLBA = physLBA_byte;
2553 					ptrLBA =
2554 					    &pIO_req->CDB.CDB32[lba_idx + 1];
2555 					physLBA_byte = (uint8_t)(physLBA >> 16);
2556 					*ptrLBA = physLBA_byte;
2557 					ptrLBA =
2558 					    &pIO_req->CDB.CDB32[lba_idx + 2];
2559 					physLBA_byte = (uint8_t)(physLBA >> 8);
2560 					*ptrLBA = physLBA_byte;
2561 					ptrLBA =
2562 					    &pIO_req->CDB.CDB32[lba_idx + 3];
2563 					physLBA_byte = (uint8_t)physLBA;
2564 					*ptrLBA = physLBA_byte;
2565 
2566 					/*
2567 					 * Set flag that Direct Drive I/O is
2568 					 * being done.
2569 					 */
2570 					cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2571 				}
2572 			}
2573 		} else {
2574 			/*
2575 			 * 16-byte CDB and the upper 4 bytes of the CDB are not
2576 			 * 0.  Get the transfer size in blocks.
2577 			 */
2578 			io_size = (cm->cm_length >> sc->DD_block_exponent);
2579 
2580 			/*
2581 			 * Get virtual LBA.
2582 			 */
2583 			virtLBA = ((uint64_t)CDB[2] << 54) |
2584 			    ((uint64_t)CDB[3] << 48) |
2585 			    ((uint64_t)CDB[4] << 40) |
2586 			    ((uint64_t)CDB[5] << 32) |
2587 			    ((uint64_t)CDB[6] << 24) |
2588 			    ((uint64_t)CDB[7] << 16) |
2589 			    ((uint64_t)CDB[8] << 8) |
2590 			    (uint64_t)CDB[9];
2591 
2592 			/*
2593 			 * Check that LBA range for I/O does not exceed volume's
2594 			 * MaxLBA.
2595 			 */
2596 			if ((virtLBA + (uint64_t)io_size - 1) <=
2597 			    sc->DD_max_lba) {
2598 				/*
2599 				 * Check if the I/O crosses a stripe boundary.
2600 				 * If not, translate the virtual LBA to a
2601 				 * physical LBA and set the DevHandle for the
2602 				 * PhysDisk to be used.  If it does cross a
2603 				 * boundary, do normal I/O.  To get the right
2604 				 * DevHandle to use, get the map number for the
2605 				 * column, then use that map number to look up
2606 				 * the DevHandle of the PhysDisk.
2607 				 */
2608 				stripe_offset = (uint32_t)virtLBA &
2609 				    (sc->DD_stripe_size - 1);
2610 				if ((stripe_offset + io_size) <=
2611 				    sc->DD_stripe_size) {
2612 					physLBA = (uint32_t)(virtLBA >>
2613 					    sc->DD_stripe_exponent);
2614 					stripe_unit = physLBA /
2615 					    sc->DD_num_phys_disks;
2616 					column = physLBA %
2617 					    sc->DD_num_phys_disks;
2618 					pIO_req->DevHandle =
2619 					    htole16(sc->DD_column_map[column].
2620 					    dev_handle);
2621 					cm->cm_desc.SCSIIO.DevHandle =
2622 					    pIO_req->DevHandle;
2623 
2624 					physLBA = (stripe_unit <<
2625 					    sc->DD_stripe_exponent) +
2626 					    stripe_offset;
2627 
2628 					/*
2629 					 * Set upper 4 bytes of LBA to 0.  We
2630 					 * assume that the phys disks are less
2631 					 * than 2 TB's in size.  Then, set the
2632 					 * lower 4 bytes.
2633 					 */
2634 					pIO_req->CDB.CDB32[2] = 0;
2635 					pIO_req->CDB.CDB32[3] = 0;
2636 					pIO_req->CDB.CDB32[4] = 0;
2637 					pIO_req->CDB.CDB32[5] = 0;
2638 					ptrLBA = &pIO_req->CDB.CDB32[6];
2639 					physLBA_byte = (uint8_t)(physLBA >> 24);
2640 					*ptrLBA = physLBA_byte;
2641 					ptrLBA = &pIO_req->CDB.CDB32[7];
2642 					physLBA_byte = (uint8_t)(physLBA >> 16);
2643 					*ptrLBA = physLBA_byte;
2644 					ptrLBA = &pIO_req->CDB.CDB32[8];
2645 					physLBA_byte = (uint8_t)(physLBA >> 8);
2646 					*ptrLBA = physLBA_byte;
2647 					ptrLBA = &pIO_req->CDB.CDB32[9];
2648 					physLBA_byte = (uint8_t)physLBA;
2649 					*ptrLBA = physLBA_byte;
2650 
2651 					/*
2652 					 * Set flag that Direct Drive I/O is
2653 					 * being done.
2654 					 */
2655 					cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2656 				}
2657 			}
2658 		}
2659 	}
2660 }
2661 
2662 #if __FreeBSD_version >= 900026
2663 static void
2664 mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm)
2665 {
2666 	MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2667 	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2668 	uint64_t sasaddr;
2669 	union ccb *ccb;
2670 
2671 	ccb = cm->cm_complete_data;
2672 
2673 	/*
2674 	 * Currently there should be no way we can hit this case.  It only
2675 	 * happens when we have a failure to allocate chain frames, and SMP
2676 	 * commands require two S/G elements only.  That should be handled
2677 	 * in the standard request size.
2678 	 */
2679 	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2680 		mps_dprint(sc, MPS_ERROR,"%s: cm_flags = %#x on SMP request!\n",
2681 			   __func__, cm->cm_flags);
2682 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2683 		goto bailout;
2684         }
2685 
2686 	rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
2687 	if (rpl == NULL) {
2688 		mps_dprint(sc, MPS_ERROR, "%s: NULL cm_reply!\n", __func__);
2689 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2690 		goto bailout;
2691 	}
2692 
2693 	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2694 	sasaddr = le32toh(req->SASAddress.Low);
2695 	sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
2696 
2697 	if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
2698 	    MPI2_IOCSTATUS_SUCCESS ||
2699 	    rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
2700 		mps_dprint(sc, MPS_XINFO, "%s: IOCStatus %04x SASStatus %02x\n",
2701 		    __func__, le16toh(rpl->IOCStatus), rpl->SASStatus);
2702 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2703 		goto bailout;
2704 	}
2705 
2706 	mps_dprint(sc, MPS_XINFO, "%s: SMP request to SAS address "
2707 		   "%#jx completed successfully\n", __func__,
2708 		   (uintmax_t)sasaddr);
2709 
2710 	if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
2711 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2712 	else
2713 		mpssas_set_ccbstatus(ccb, CAM_SMP_STATUS_ERROR);
2714 
2715 bailout:
2716 	/*
2717 	 * We sync in both directions because we had DMAs in the S/G list
2718 	 * in both directions.
2719 	 */
2720 	bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
2721 			BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2722 	bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2723 	mps_free_command(sc, cm);
2724 	xpt_done(ccb);
2725 }
2726 
2727 static void
2728 mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb, uint64_t sasaddr)
2729 {
2730 	struct mps_command *cm;
2731 	uint8_t *request, *response;
2732 	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2733 	struct mps_softc *sc;
2734 	int error;
2735 
2736 	sc = sassc->sc;
2737 	error = 0;
2738 
2739 	/*
2740 	 * XXX We don't yet support physical addresses here.
2741 	 */
2742 	switch ((ccb->ccb_h.flags & CAM_DATA_MASK)) {
2743 	case CAM_DATA_PADDR:
2744 	case CAM_DATA_SG_PADDR:
2745 		mps_dprint(sc, MPS_ERROR,
2746 			   "%s: physical addresses not supported\n", __func__);
2747 		mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2748 		xpt_done(ccb);
2749 		return;
2750 	case CAM_DATA_SG:
2751 		/*
2752 		 * The chip does not support more than one buffer for the
2753 		 * request or response.
2754 		 */
2755 	 	if ((ccb->smpio.smp_request_sglist_cnt > 1)
2756 		  || (ccb->smpio.smp_response_sglist_cnt > 1)) {
2757 			mps_dprint(sc, MPS_ERROR,
2758 				   "%s: multiple request or response "
2759 				   "buffer segments not supported for SMP\n",
2760 				   __func__);
2761 			mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2762 			xpt_done(ccb);
2763 			return;
2764 		}
2765 
2766 		/*
2767 		 * The CAM_SCATTER_VALID flag was originally implemented
2768 		 * for the XPT_SCSI_IO CCB, which only has one data pointer.
2769 		 * We have two.  So, just take that flag to mean that we
2770 		 * might have S/G lists, and look at the S/G segment count
2771 		 * to figure out whether that is the case for each individual
2772 		 * buffer.
2773 		 */
2774 		if (ccb->smpio.smp_request_sglist_cnt != 0) {
2775 			bus_dma_segment_t *req_sg;
2776 
2777 			req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
2778 			request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
2779 		} else
2780 			request = ccb->smpio.smp_request;
2781 
2782 		if (ccb->smpio.smp_response_sglist_cnt != 0) {
2783 			bus_dma_segment_t *rsp_sg;
2784 
2785 			rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
2786 			response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
2787 		} else
2788 			response = ccb->smpio.smp_response;
2789 		break;
2790 	case CAM_DATA_VADDR:
2791 		request = ccb->smpio.smp_request;
2792 		response = ccb->smpio.smp_response;
2793 		break;
2794 	default:
2795 		mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2796 		xpt_done(ccb);
2797 		return;
2798 	}
2799 
2800 	cm = mps_alloc_command(sc);
2801 	if (cm == NULL) {
2802 		mps_dprint(sc, MPS_ERROR,
2803 		    "%s: cannot allocate command\n", __func__);
2804 		mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
2805 		xpt_done(ccb);
2806 		return;
2807 	}
2808 
2809 	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2810 	bzero(req, sizeof(*req));
2811 	req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
2812 
2813 	/* Allow the chip to use any route to this SAS address. */
2814 	req->PhysicalPort = 0xff;
2815 
2816 	req->RequestDataLength = htole16(ccb->smpio.smp_request_len);
2817 	req->SGLFlags =
2818 	    MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
2819 
2820 	mps_dprint(sc, MPS_XINFO, "%s: sending SMP request to SAS "
2821 	    "address %#jx\n", __func__, (uintmax_t)sasaddr);
2822 
2823 	mpi_init_sge(cm, req, &req->SGL);
2824 
2825 	/*
2826 	 * Set up a uio to pass into mps_map_command().  This allows us to
2827 	 * do one map command, and one busdma call in there.
2828 	 */
2829 	cm->cm_uio.uio_iov = cm->cm_iovec;
2830 	cm->cm_uio.uio_iovcnt = 2;
2831 	cm->cm_uio.uio_segflg = UIO_SYSSPACE;
2832 
2833 	/*
2834 	 * The read/write flag isn't used by busdma, but set it just in
2835 	 * case.  This isn't exactly accurate, either, since we're going in
2836 	 * both directions.
2837 	 */
2838 	cm->cm_uio.uio_rw = UIO_WRITE;
2839 
2840 	cm->cm_iovec[0].iov_base = request;
2841 	cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength);
2842 	cm->cm_iovec[1].iov_base = response;
2843 	cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
2844 
2845 	cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
2846 			       cm->cm_iovec[1].iov_len;
2847 
2848 	/*
2849 	 * Trigger a warning message in mps_data_cb() for the user if we
2850 	 * wind up exceeding two S/G segments.  The chip expects one
2851 	 * segment for the request and another for the response.
2852 	 */
2853 	cm->cm_max_segs = 2;
2854 
2855 	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
2856 	cm->cm_complete = mpssas_smpio_complete;
2857 	cm->cm_complete_data = ccb;
2858 
2859 	/*
2860 	 * Tell the mapping code that we're using a uio, and that this is
2861 	 * an SMP passthrough request.  There is a little special-case
2862 	 * logic there (in mps_data_cb()) to handle the bidirectional
2863 	 * transfer.
2864 	 */
2865 	cm->cm_flags |= MPS_CM_FLAGS_USE_UIO | MPS_CM_FLAGS_SMP_PASS |
2866 			MPS_CM_FLAGS_DATAIN | MPS_CM_FLAGS_DATAOUT;
2867 
2868 	/* The chip data format is little endian. */
2869 	req->SASAddress.High = htole32(sasaddr >> 32);
2870 	req->SASAddress.Low = htole32(sasaddr);
2871 
2872 	/*
2873 	 * XXX Note that we don't have a timeout/abort mechanism here.
2874 	 * From the manual, it looks like task management requests only
2875 	 * work for SCSI IO and SATA passthrough requests.  We may need to
2876 	 * have a mechanism to retry requests in the event of a chip reset
2877 	 * at least.  Hopefully the chip will insure that any errors short
2878 	 * of that are relayed back to the driver.
2879 	 */
2880 	error = mps_map_command(sc, cm);
2881 	if ((error != 0) && (error != EINPROGRESS)) {
2882 		mps_dprint(sc, MPS_ERROR,
2883 			   "%s: error %d returned from mps_map_command()\n",
2884 			   __func__, error);
2885 		goto bailout_error;
2886 	}
2887 
2888 	return;
2889 
2890 bailout_error:
2891 	mps_free_command(sc, cm);
2892 	mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
2893 	xpt_done(ccb);
2894 	return;
2895 
2896 }
2897 
2898 static void
2899 mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb)
2900 {
2901 	struct mps_softc *sc;
2902 	struct mpssas_target *targ;
2903 	uint64_t sasaddr = 0;
2904 
2905 	sc = sassc->sc;
2906 
2907 	/*
2908 	 * Make sure the target exists.
2909 	 */
2910 	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
2911 	    ("Target %d out of bounds in XPT_SMP_IO\n", ccb->ccb_h.target_id));
2912 	targ = &sassc->targets[ccb->ccb_h.target_id];
2913 	if (targ->handle == 0x0) {
2914 		mps_dprint(sc, MPS_ERROR,
2915 			   "%s: target %d does not exist!\n", __func__,
2916 			   ccb->ccb_h.target_id);
2917 		mpssas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
2918 		xpt_done(ccb);
2919 		return;
2920 	}
2921 
2922 	/*
2923 	 * If this device has an embedded SMP target, we'll talk to it
2924 	 * directly.
2925 	 * figure out what the expander's address is.
2926 	 */
2927 	if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
2928 		sasaddr = targ->sasaddr;
2929 
2930 	/*
2931 	 * If we don't have a SAS address for the expander yet, try
2932 	 * grabbing it from the page 0x83 information cached in the
2933 	 * transport layer for this target.  LSI expanders report the
2934 	 * expander SAS address as the port-associated SAS address in
2935 	 * Inquiry VPD page 0x83.  Maxim expanders don't report it in page
2936 	 * 0x83.
2937 	 *
2938 	 * XXX KDM disable this for now, but leave it commented out so that
2939 	 * it is obvious that this is another possible way to get the SAS
2940 	 * address.
2941 	 *
2942 	 * The parent handle method below is a little more reliable, and
2943 	 * the other benefit is that it works for devices other than SES
2944 	 * devices.  So you can send a SMP request to a da(4) device and it
2945 	 * will get routed to the expander that device is attached to.
2946 	 * (Assuming the da(4) device doesn't contain an SMP target...)
2947 	 */
2948 #if 0
2949 	if (sasaddr == 0)
2950 		sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
2951 #endif
2952 
2953 	/*
2954 	 * If we still don't have a SAS address for the expander, look for
2955 	 * the parent device of this device, which is probably the expander.
2956 	 */
2957 	if (sasaddr == 0) {
2958 #ifdef OLD_MPS_PROBE
2959 		struct mpssas_target *parent_target;
2960 #endif
2961 
2962 		if (targ->parent_handle == 0x0) {
2963 			mps_dprint(sc, MPS_ERROR,
2964 				   "%s: handle %d does not have a valid "
2965 				   "parent handle!\n", __func__, targ->handle);
2966 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2967 			goto bailout;
2968 		}
2969 #ifdef OLD_MPS_PROBE
2970 		parent_target = mpssas_find_target_by_handle(sassc, 0,
2971 			targ->parent_handle);
2972 
2973 		if (parent_target == NULL) {
2974 			mps_dprint(sc, MPS_ERROR,
2975 				   "%s: handle %d does not have a valid "
2976 				   "parent target!\n", __func__, targ->handle);
2977 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2978 			goto bailout;
2979 		}
2980 
2981 		if ((parent_target->devinfo &
2982 		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
2983 			mps_dprint(sc, MPS_ERROR,
2984 				   "%s: handle %d parent %d does not "
2985 				   "have an SMP target!\n", __func__,
2986 				   targ->handle, parent_target->handle);
2987 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2988 			goto bailout;
2989 
2990 		}
2991 
2992 		sasaddr = parent_target->sasaddr;
2993 #else /* OLD_MPS_PROBE */
2994 		if ((targ->parent_devinfo &
2995 		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
2996 			mps_dprint(sc, MPS_ERROR,
2997 				   "%s: handle %d parent %d does not "
2998 				   "have an SMP target!\n", __func__,
2999 				   targ->handle, targ->parent_handle);
3000 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3001 			goto bailout;
3002 
3003 		}
3004 		if (targ->parent_sasaddr == 0x0) {
3005 			mps_dprint(sc, MPS_ERROR,
3006 				   "%s: handle %d parent handle %d does "
3007 				   "not have a valid SAS address!\n",
3008 				   __func__, targ->handle, targ->parent_handle);
3009 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3010 			goto bailout;
3011 		}
3012 
3013 		sasaddr = targ->parent_sasaddr;
3014 #endif /* OLD_MPS_PROBE */
3015 
3016 	}
3017 
3018 	if (sasaddr == 0) {
3019 		mps_dprint(sc, MPS_INFO,
3020 			   "%s: unable to find SAS address for handle %d\n",
3021 			   __func__, targ->handle);
3022 		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3023 		goto bailout;
3024 	}
3025 	mpssas_send_smpcmd(sassc, ccb, sasaddr);
3026 
3027 	return;
3028 
3029 bailout:
3030 	xpt_done(ccb);
3031 
3032 }
3033 #endif //__FreeBSD_version >= 900026
3034 
3035 static void
3036 mpssas_action_resetdev(struct mpssas_softc *sassc, union ccb *ccb)
3037 {
3038 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3039 	struct mps_softc *sc;
3040 	struct mps_command *tm;
3041 	struct mpssas_target *targ;
3042 
3043 	MPS_FUNCTRACE(sassc->sc);
3044 	mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
3045 
3046 	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
3047 	    ("Target %d out of bounds in XPT_RESET_DEV\n",
3048 	     ccb->ccb_h.target_id));
3049 	sc = sassc->sc;
3050 	tm = mps_alloc_command(sc);
3051 	if (tm == NULL) {
3052 		mps_dprint(sc, MPS_ERROR,
3053 		    "command alloc failure in mpssas_action_resetdev\n");
3054 		mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3055 		xpt_done(ccb);
3056 		return;
3057 	}
3058 
3059 	targ = &sassc->targets[ccb->ccb_h.target_id];
3060 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3061 	req->DevHandle = htole16(targ->handle);
3062 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
3063 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3064 
3065 	/* SAS Hard Link Reset / SATA Link Reset */
3066 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3067 
3068 	tm->cm_data = NULL;
3069 	tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
3070 	tm->cm_complete = mpssas_resetdev_complete;
3071 	tm->cm_complete_data = ccb;
3072 	tm->cm_targ = targ;
3073 	targ->flags |= MPSSAS_TARGET_INRESET;
3074 
3075 	mps_map_command(sc, tm);
3076 }
3077 
3078 static void
3079 mpssas_resetdev_complete(struct mps_softc *sc, struct mps_command *tm)
3080 {
3081 	MPI2_SCSI_TASK_MANAGE_REPLY *resp;
3082 	union ccb *ccb;
3083 
3084 	MPS_FUNCTRACE(sc);
3085 	mtx_assert(&sc->mps_mtx, MA_OWNED);
3086 
3087 	resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
3088 	ccb = tm->cm_complete_data;
3089 
3090 	/*
3091 	 * Currently there should be no way we can hit this case.  It only
3092 	 * happens when we have a failure to allocate chain frames, and
3093 	 * task management commands don't have S/G lists.
3094 	 */
3095 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3096 		MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3097 
3098 		req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3099 
3100 		mps_dprint(sc, MPS_ERROR,
3101 			   "%s: cm_flags = %#x for reset of handle %#04x! "
3102 			   "This should not happen!\n", __func__, tm->cm_flags,
3103 			   req->DevHandle);
3104 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3105 		goto bailout;
3106 	}
3107 
3108 	mps_dprint(sc, MPS_XINFO,
3109 	    "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n", __func__,
3110 	    le16toh(resp->IOCStatus), le32toh(resp->ResponseCode));
3111 
3112 	if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
3113 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
3114 		mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
3115 		    CAM_LUN_WILDCARD);
3116 	}
3117 	else
3118 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3119 
3120 bailout:
3121 
3122 	mpssas_free_tm(sc, tm);
3123 	xpt_done(ccb);
3124 }
3125 
3126 static void
3127 mpssas_poll(struct cam_sim *sim)
3128 {
3129 	struct mpssas_softc *sassc;
3130 
3131 	sassc = cam_sim_softc(sim);
3132 
3133 	if (sassc->sc->mps_debug & MPS_TRACE) {
3134 		/* frequent debug messages during a panic just slow
3135 		 * everything down too much.
3136 		 */
3137 		mps_printf(sassc->sc, "%s clearing MPS_TRACE\n", __func__);
3138 		sassc->sc->mps_debug &= ~MPS_TRACE;
3139 	}
3140 
3141 	mps_intr_locked(sassc->sc);
3142 }
3143 
3144 static void
3145 mpssas_async(void *callback_arg, uint32_t code, struct cam_path *path,
3146 	     void *arg)
3147 {
3148 	struct mps_softc *sc;
3149 
3150 	sc = (struct mps_softc *)callback_arg;
3151 
3152 	switch (code) {
3153 #if (__FreeBSD_version >= 1000006) || \
3154     ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
3155 	case AC_ADVINFO_CHANGED: {
3156 		struct mpssas_target *target;
3157 		struct mpssas_softc *sassc;
3158 		struct scsi_read_capacity_data_long rcap_buf;
3159 		struct ccb_dev_advinfo cdai;
3160 		struct mpssas_lun *lun;
3161 		lun_id_t lunid;
3162 		int found_lun;
3163 		uintptr_t buftype;
3164 
3165 		buftype = (uintptr_t)arg;
3166 
3167 		found_lun = 0;
3168 		sassc = sc->sassc;
3169 
3170 		/*
3171 		 * We're only interested in read capacity data changes.
3172 		 */
3173 		if (buftype != CDAI_TYPE_RCAPLONG)
3174 			break;
3175 
3176 		/*
3177 		 * We should have a handle for this, but check to make sure.
3178 		 */
3179 		KASSERT(xpt_path_target_id(path) < sassc->maxtargets,
3180 		    ("Target %d out of bounds in mpssas_async\n",
3181 		    xpt_path_target_id(path)));
3182 		target = &sassc->targets[xpt_path_target_id(path)];
3183 		if (target->handle == 0)
3184 			break;
3185 
3186 		lunid = xpt_path_lun_id(path);
3187 
3188 		SLIST_FOREACH(lun, &target->luns, lun_link) {
3189 			if (lun->lun_id == lunid) {
3190 				found_lun = 1;
3191 				break;
3192 			}
3193 		}
3194 
3195 		if (found_lun == 0) {
3196 			lun = malloc(sizeof(struct mpssas_lun), M_MPT2,
3197 				     M_NOWAIT | M_ZERO);
3198 			if (lun == NULL) {
3199 				mps_dprint(sc, MPS_ERROR, "Unable to alloc "
3200 					   "LUN for EEDP support.\n");
3201 				break;
3202 			}
3203 			lun->lun_id = lunid;
3204 			SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3205 		}
3206 
3207 		bzero(&rcap_buf, sizeof(rcap_buf));
3208 		xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
3209 		cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
3210 		cdai.ccb_h.flags = CAM_DIR_IN;
3211 		cdai.buftype = CDAI_TYPE_RCAPLONG;
3212 #if (__FreeBSD_version >= 1100061) || \
3213     ((__FreeBSD_version >= 1001510) && (__FreeBSD_version < 1100000))
3214 		cdai.flags = CDAI_FLAG_NONE;
3215 #else
3216 		cdai.flags = 0;
3217 #endif
3218 		cdai.bufsiz = sizeof(rcap_buf);
3219 		cdai.buf = (uint8_t *)&rcap_buf;
3220 		xpt_action((union ccb *)&cdai);
3221 		if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
3222 			cam_release_devq(cdai.ccb_h.path,
3223 					 0, 0, 0, FALSE);
3224 
3225 		if ((mpssas_get_ccbstatus((union ccb *)&cdai) == CAM_REQ_CMP)
3226 		 && (rcap_buf.prot & SRC16_PROT_EN)) {
3227 			lun->eedp_formatted = TRUE;
3228 			lun->eedp_block_size = scsi_4btoul(rcap_buf.length);
3229 		} else {
3230 			lun->eedp_formatted = FALSE;
3231 			lun->eedp_block_size = 0;
3232 		}
3233 		break;
3234 	}
3235 #else
3236 	case AC_FOUND_DEVICE: {
3237 		struct ccb_getdev *cgd;
3238 
3239 		cgd = arg;
3240 		mpssas_check_eedp(sc, path, cgd);
3241 		break;
3242 	}
3243 #endif
3244 	default:
3245 		break;
3246 	}
3247 }
3248 
3249 #if (__FreeBSD_version < 901503) || \
3250     ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3251 static void
3252 mpssas_check_eedp(struct mps_softc *sc, struct cam_path *path,
3253 		  struct ccb_getdev *cgd)
3254 {
3255 	struct mpssas_softc *sassc = sc->sassc;
3256 	struct ccb_scsiio *csio;
3257 	struct scsi_read_capacity_16 *scsi_cmd;
3258 	struct scsi_read_capacity_eedp *rcap_buf;
3259 	path_id_t pathid;
3260 	target_id_t targetid;
3261 	lun_id_t lunid;
3262 	union ccb *ccb;
3263 	struct cam_path *local_path;
3264 	struct mpssas_target *target;
3265 	struct mpssas_lun *lun;
3266 	uint8_t	found_lun;
3267 	char path_str[64];
3268 
3269 	sassc = sc->sassc;
3270 	pathid = cam_sim_path(sassc->sim);
3271 	targetid = xpt_path_target_id(path);
3272 	lunid = xpt_path_lun_id(path);
3273 
3274 	KASSERT(targetid < sassc->maxtargets,
3275 	    ("Target %d out of bounds in mpssas_check_eedp\n",
3276 	     targetid));
3277 	target = &sassc->targets[targetid];
3278 	if (target->handle == 0x0)
3279 		return;
3280 
3281 	/*
3282 	 * Determine if the device is EEDP capable.
3283 	 *
3284 	 * If this flag is set in the inquiry data,
3285 	 * the device supports protection information,
3286 	 * and must support the 16 byte read
3287 	 * capacity command, otherwise continue without
3288 	 * sending read cap 16
3289 	 */
3290 	if ((cgd->inq_data.spc3_flags & SPC3_SID_PROTECT) == 0)
3291 		return;
3292 
3293 	/*
3294 	 * Issue a READ CAPACITY 16 command.  This info
3295 	 * is used to determine if the LUN is formatted
3296 	 * for EEDP support.
3297 	 */
3298 	ccb = xpt_alloc_ccb_nowait();
3299 	if (ccb == NULL) {
3300 		mps_dprint(sc, MPS_ERROR, "Unable to alloc CCB "
3301 		    "for EEDP support.\n");
3302 		return;
3303 	}
3304 
3305 	if (xpt_create_path(&local_path, xpt_periph,
3306 	    pathid, targetid, lunid) != CAM_REQ_CMP) {
3307 		mps_dprint(sc, MPS_ERROR, "Unable to create "
3308 		    "path for EEDP support\n");
3309 		xpt_free_ccb(ccb);
3310 		return;
3311 	}
3312 
3313 	/*
3314 	 * If LUN is already in list, don't create a new
3315 	 * one.
3316 	 */
3317 	found_lun = FALSE;
3318 	SLIST_FOREACH(lun, &target->luns, lun_link) {
3319 		if (lun->lun_id == lunid) {
3320 			found_lun = TRUE;
3321 			break;
3322 		}
3323 	}
3324 	if (!found_lun) {
3325 		lun = malloc(sizeof(struct mpssas_lun), M_MPT2,
3326 		    M_NOWAIT | M_ZERO);
3327 		if (lun == NULL) {
3328 			mps_dprint(sc, MPS_ERROR,
3329 			    "Unable to alloc LUN for EEDP support.\n");
3330 			xpt_free_path(local_path);
3331 			xpt_free_ccb(ccb);
3332 			return;
3333 		}
3334 		lun->lun_id = lunid;
3335 		SLIST_INSERT_HEAD(&target->luns, lun,
3336 		    lun_link);
3337 	}
3338 
3339 	xpt_path_string(local_path, path_str, sizeof(path_str));
3340 
3341 	mps_dprint(sc, MPS_INFO, "Sending read cap: path %s handle %d\n",
3342 	    path_str, target->handle);
3343 
3344 	/*
3345 	 * Issue a READ CAPACITY 16 command for the LUN.
3346 	 * The mpssas_read_cap_done function will load
3347 	 * the read cap info into the LUN struct.
3348 	 */
3349 	rcap_buf = malloc(sizeof(struct scsi_read_capacity_eedp),
3350 	    M_MPT2, M_NOWAIT | M_ZERO);
3351 	if (rcap_buf == NULL) {
3352 		mps_dprint(sc, MPS_FAULT,
3353 		    "Unable to alloc read capacity buffer for EEDP support.\n");
3354 		xpt_free_path(ccb->ccb_h.path);
3355 		xpt_free_ccb(ccb);
3356 		return;
3357 	}
3358 	xpt_setup_ccb(&ccb->ccb_h, local_path, CAM_PRIORITY_XPT);
3359 	csio = &ccb->csio;
3360 	csio->ccb_h.func_code = XPT_SCSI_IO;
3361 	csio->ccb_h.flags = CAM_DIR_IN;
3362 	csio->ccb_h.retry_count = 4;
3363 	csio->ccb_h.cbfcnp = mpssas_read_cap_done;
3364 	csio->ccb_h.timeout = 60000;
3365 	csio->data_ptr = (uint8_t *)rcap_buf;
3366 	csio->dxfer_len = sizeof(struct scsi_read_capacity_eedp);
3367 	csio->sense_len = MPS_SENSE_LEN;
3368 	csio->cdb_len = sizeof(*scsi_cmd);
3369 	csio->tag_action = MSG_SIMPLE_Q_TAG;
3370 
3371 	scsi_cmd = (struct scsi_read_capacity_16 *)&csio->cdb_io.cdb_bytes;
3372 	bzero(scsi_cmd, sizeof(*scsi_cmd));
3373 	scsi_cmd->opcode = 0x9E;
3374 	scsi_cmd->service_action = SRC16_SERVICE_ACTION;
3375 	((uint8_t *)scsi_cmd)[13] = sizeof(struct scsi_read_capacity_eedp);
3376 
3377 	ccb->ccb_h.ppriv_ptr1 = sassc;
3378 	xpt_action(ccb);
3379 }
3380 
3381 static void
3382 mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb)
3383 {
3384 	struct mpssas_softc *sassc;
3385 	struct mpssas_target *target;
3386 	struct mpssas_lun *lun;
3387 	struct scsi_read_capacity_eedp *rcap_buf;
3388 
3389 	if (done_ccb == NULL)
3390 		return;
3391 
3392 	/* Driver need to release devq, it Scsi command is
3393 	 * generated by driver internally.
3394 	 * Currently there is a single place where driver
3395 	 * calls scsi command internally. In future if driver
3396 	 * calls more scsi command internally, it needs to release
3397 	 * devq internally, since those command will not go back to
3398 	 * cam_periph.
3399 	 */
3400 	if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) ) {
3401         	done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
3402 		xpt_release_devq(done_ccb->ccb_h.path,
3403 			       	/*count*/ 1, /*run_queue*/TRUE);
3404 	}
3405 
3406 	rcap_buf = (struct scsi_read_capacity_eedp *)done_ccb->csio.data_ptr;
3407 
3408 	/*
3409 	 * Get the LUN ID for the path and look it up in the LUN list for the
3410 	 * target.
3411 	 */
3412 	sassc = (struct mpssas_softc *)done_ccb->ccb_h.ppriv_ptr1;
3413 	KASSERT(done_ccb->ccb_h.target_id < sassc->maxtargets,
3414 	    ("Target %d out of bounds in mpssas_read_cap_done\n",
3415 	     done_ccb->ccb_h.target_id));
3416 	target = &sassc->targets[done_ccb->ccb_h.target_id];
3417 	SLIST_FOREACH(lun, &target->luns, lun_link) {
3418 		if (lun->lun_id != done_ccb->ccb_h.target_lun)
3419 			continue;
3420 
3421 		/*
3422 		 * Got the LUN in the target's LUN list.  Fill it in
3423 		 * with EEDP info.  If the READ CAP 16 command had some
3424 		 * SCSI error (common if command is not supported), mark
3425 		 * the lun as not supporting EEDP and set the block size
3426 		 * to 0.
3427 		 */
3428 		if ((mpssas_get_ccbstatus(done_ccb) != CAM_REQ_CMP)
3429 		 || (done_ccb->csio.scsi_status != SCSI_STATUS_OK)) {
3430 			lun->eedp_formatted = FALSE;
3431 			lun->eedp_block_size = 0;
3432 			break;
3433 		}
3434 
3435 		if (rcap_buf->protect & 0x01) {
3436 			mps_dprint(sassc->sc, MPS_INFO, "LUN %d for "
3437  			    "target ID %d is formatted for EEDP "
3438  			    "support.\n", done_ccb->ccb_h.target_lun,
3439  			    done_ccb->ccb_h.target_id);
3440 			lun->eedp_formatted = TRUE;
3441 			lun->eedp_block_size = scsi_4btoul(rcap_buf->length);
3442 		}
3443 		break;
3444 	}
3445 
3446 	// Finished with this CCB and path.
3447 	free(rcap_buf, M_MPT2);
3448 	xpt_free_path(done_ccb->ccb_h.path);
3449 	xpt_free_ccb(done_ccb);
3450 }
3451 #endif /* (__FreeBSD_version < 901503) || \
3452           ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) */
3453 
3454 void
3455 mpssas_prepare_for_tm(struct mps_softc *sc, struct mps_command *tm,
3456     struct mpssas_target *target, lun_id_t lun_id)
3457 {
3458 	union ccb *ccb;
3459 	path_id_t path_id;
3460 
3461 	/*
3462 	 * Set the INRESET flag for this target so that no I/O will be sent to
3463 	 * the target until the reset has completed.  If an I/O request does
3464 	 * happen, the devq will be frozen.  The CCB holds the path which is
3465 	 * used to release the devq.  The devq is released and the CCB is freed
3466 	 * when the TM completes.
3467 	 */
3468 	ccb = xpt_alloc_ccb_nowait();
3469 	if (ccb) {
3470 		path_id = cam_sim_path(sc->sassc->sim);
3471 		if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, path_id,
3472 		    target->tid, lun_id) != CAM_REQ_CMP) {
3473 			xpt_free_ccb(ccb);
3474 		} else {
3475 			tm->cm_ccb = ccb;
3476 			tm->cm_targ = target;
3477 			target->flags |= MPSSAS_TARGET_INRESET;
3478 		}
3479 	}
3480 }
3481 
3482 int
3483 mpssas_startup(struct mps_softc *sc)
3484 {
3485 
3486 	/*
3487 	 * Send the port enable message and set the wait_for_port_enable flag.
3488 	 * This flag helps to keep the simq frozen until all discovery events
3489 	 * are processed.
3490 	 */
3491 	sc->wait_for_port_enable = 1;
3492 	mpssas_send_portenable(sc);
3493 	return (0);
3494 }
3495 
3496 static int
3497 mpssas_send_portenable(struct mps_softc *sc)
3498 {
3499 	MPI2_PORT_ENABLE_REQUEST *request;
3500 	struct mps_command *cm;
3501 
3502 	MPS_FUNCTRACE(sc);
3503 
3504 	if ((cm = mps_alloc_command(sc)) == NULL)
3505 		return (EBUSY);
3506 	request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3507 	request->Function = MPI2_FUNCTION_PORT_ENABLE;
3508 	request->MsgFlags = 0;
3509 	request->VP_ID = 0;
3510 	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3511 	cm->cm_complete = mpssas_portenable_complete;
3512 	cm->cm_data = NULL;
3513 	cm->cm_sge = NULL;
3514 
3515 	mps_map_command(sc, cm);
3516 	mps_dprint(sc, MPS_XINFO,
3517 	    "mps_send_portenable finished cm %p req %p complete %p\n",
3518 	    cm, cm->cm_req, cm->cm_complete);
3519 	return (0);
3520 }
3521 
3522 static void
3523 mpssas_portenable_complete(struct mps_softc *sc, struct mps_command *cm)
3524 {
3525 	MPI2_PORT_ENABLE_REPLY *reply;
3526 	struct mpssas_softc *sassc;
3527 
3528 	MPS_FUNCTRACE(sc);
3529 	sassc = sc->sassc;
3530 
3531 	/*
3532 	 * Currently there should be no way we can hit this case.  It only
3533 	 * happens when we have a failure to allocate chain frames, and
3534 	 * port enable commands don't have S/G lists.
3535 	 */
3536 	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3537 		mps_dprint(sc, MPS_ERROR, "%s: cm_flags = %#x for port enable! "
3538 			   "This should not happen!\n", __func__, cm->cm_flags);
3539 	}
3540 
3541 	reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3542 	if (reply == NULL)
3543 		mps_dprint(sc, MPS_FAULT, "Portenable NULL reply\n");
3544 	else if (le16toh(reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
3545 	    MPI2_IOCSTATUS_SUCCESS)
3546 		mps_dprint(sc, MPS_FAULT, "Portenable failed\n");
3547 
3548 	mps_free_command(sc, cm);
3549 
3550 	/*
3551 	 * Get WarpDrive info after discovery is complete but before the scan
3552 	 * starts.  At this point, all devices are ready to be exposed to the
3553 	 * OS.  If devices should be hidden instead, take them out of the
3554 	 * 'targets' array before the scan.  The devinfo for a disk will have
3555 	 * some info and a volume's will be 0.  Use that to remove disks.
3556 	 */
3557 	mps_wd_config_pages(sc);
3558 
3559 	/*
3560 	 * Done waiting for port enable to complete.  Decrement the refcount.
3561 	 * If refcount is 0, discovery is complete and a rescan of the bus can
3562 	 * take place.  Since the simq was explicitly frozen before port
3563 	 * enable, it must be explicitly released here to keep the
3564 	 * freeze/release count in sync.
3565 	 */
3566 	sc->wait_for_port_enable = 0;
3567 	sc->port_enable_complete = 1;
3568 	wakeup(&sc->port_enable_complete);
3569 	mpssas_startup_decrement(sassc);
3570 }
3571 
3572 int
3573 mpssas_check_id(struct mpssas_softc *sassc, int id)
3574 {
3575 	struct mps_softc *sc = sassc->sc;
3576 	char *ids;
3577 	char *name;
3578 
3579 	ids = &sc->exclude_ids[0];
3580 	while((name = strsep(&ids, ",")) != NULL) {
3581 		if (name[0] == '\0')
3582 			continue;
3583 		if (strtol(name, NULL, 0) == (long)id)
3584 			return (1);
3585 	}
3586 
3587 	return (0);
3588 }
3589 
3590 void
3591 mpssas_realloc_targets(struct mps_softc *sc, int maxtargets)
3592 {
3593 	struct mpssas_softc *sassc;
3594 	struct mpssas_lun *lun, *lun_tmp;
3595 	struct mpssas_target *targ;
3596 	int i;
3597 
3598 	sassc = sc->sassc;
3599 	/*
3600 	 * The number of targets is based on IOC Facts, so free all of
3601 	 * the allocated LUNs for each target and then the target buffer
3602 	 * itself.
3603 	 */
3604 	for (i=0; i< maxtargets; i++) {
3605 		targ = &sassc->targets[i];
3606 		SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
3607 			free(lun, M_MPT2);
3608 		}
3609 	}
3610 	free(sassc->targets, M_MPT2);
3611 
3612 	sassc->targets = malloc(sizeof(struct mpssas_target) * maxtargets,
3613 	    M_MPT2, M_WAITOK|M_ZERO);
3614 	if (!sassc->targets) {
3615 		panic("%s failed to alloc targets with error %d\n",
3616 		    __func__, ENOMEM);
3617 	}
3618 }
3619