xref: /freebsd/sys/dev/mps/mps_sas.c (revision 46c1105fbb6fbff6d6ccd0a18571342eb992d637)
1 /*-
2  * Copyright (c) 2009 Yahoo! Inc.
3  * Copyright (c) 2011-2015 LSI Corp.
4  * Copyright (c) 2013-2015 Avago Technologies
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  * Avago Technologies (LSI) MPT-Fusion Host Adapter FreeBSD
29  *
30  * $FreeBSD$
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 /* Communications core for Avago Technologies (LSI) MPT2 */
37 
38 /* TODO Move headers to mpsvar */
39 #include <sys/types.h>
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kernel.h>
43 #include <sys/selinfo.h>
44 #include <sys/module.h>
45 #include <sys/bus.h>
46 #include <sys/conf.h>
47 #include <sys/bio.h>
48 #include <sys/malloc.h>
49 #include <sys/uio.h>
50 #include <sys/sysctl.h>
51 #include <sys/endian.h>
52 #include <sys/queue.h>
53 #include <sys/kthread.h>
54 #include <sys/taskqueue.h>
55 #include <sys/sbuf.h>
56 
57 #include <machine/bus.h>
58 #include <machine/resource.h>
59 #include <sys/rman.h>
60 
61 #include <machine/stdarg.h>
62 
63 #include <cam/cam.h>
64 #include <cam/cam_ccb.h>
65 #include <cam/cam_xpt.h>
66 #include <cam/cam_debug.h>
67 #include <cam/cam_sim.h>
68 #include <cam/cam_xpt_sim.h>
69 #include <cam/cam_xpt_periph.h>
70 #include <cam/cam_periph.h>
71 #include <cam/scsi/scsi_all.h>
72 #include <cam/scsi/scsi_message.h>
73 #if __FreeBSD_version >= 900026
74 #include <cam/scsi/smp_all.h>
75 #endif
76 
77 #include <dev/mps/mpi/mpi2_type.h>
78 #include <dev/mps/mpi/mpi2.h>
79 #include <dev/mps/mpi/mpi2_ioc.h>
80 #include <dev/mps/mpi/mpi2_sas.h>
81 #include <dev/mps/mpi/mpi2_cnfg.h>
82 #include <dev/mps/mpi/mpi2_init.h>
83 #include <dev/mps/mpi/mpi2_tool.h>
84 #include <dev/mps/mps_ioctl.h>
85 #include <dev/mps/mpsvar.h>
86 #include <dev/mps/mps_table.h>
87 #include <dev/mps/mps_sas.h>
88 
89 #define MPSSAS_DISCOVERY_TIMEOUT	20
90 #define MPSSAS_MAX_DISCOVERY_TIMEOUTS	10 /* 200 seconds */
91 
92 /*
93  * static array to check SCSI OpCode for EEDP protection bits
94  */
95 #define	PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
96 #define	PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
97 #define	PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
98 static uint8_t op_code_prot[256] = {
99 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
100 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
101 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
102 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
103 	0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
104 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
105 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
106 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
107 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
108 	0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
109 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
110 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
111 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
112 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
113 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
114 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
115 };
116 
117 MALLOC_DEFINE(M_MPSSAS, "MPSSAS", "MPS SAS memory");
118 
119 static void mpssas_remove_device(struct mps_softc *, struct mps_command *);
120 static void mpssas_remove_complete(struct mps_softc *, struct mps_command *);
121 static void mpssas_action(struct cam_sim *sim, union ccb *ccb);
122 static void mpssas_poll(struct cam_sim *sim);
123 static int mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm,
124     struct mps_command *cm);
125 static void mpssas_scsiio_timeout(void *data);
126 static void mpssas_abort_complete(struct mps_softc *sc, struct mps_command *cm);
127 static void mpssas_direct_drive_io(struct mpssas_softc *sassc,
128     struct mps_command *cm, union ccb *ccb);
129 static void mpssas_action_scsiio(struct mpssas_softc *, union ccb *);
130 static void mpssas_scsiio_complete(struct mps_softc *, struct mps_command *);
131 static void mpssas_action_resetdev(struct mpssas_softc *, union ccb *);
132 #if __FreeBSD_version >= 900026
133 static void mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm);
134 static void mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb,
135 			       uint64_t sasaddr);
136 static void mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb);
137 #endif //FreeBSD_version >= 900026
138 static void mpssas_resetdev_complete(struct mps_softc *, struct mps_command *);
139 static void mpssas_async(void *callback_arg, uint32_t code,
140 			 struct cam_path *path, void *arg);
141 #if (__FreeBSD_version < 901503) || \
142     ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
143 static void mpssas_check_eedp(struct mps_softc *sc, struct cam_path *path,
144 			      struct ccb_getdev *cgd);
145 static void mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb);
146 #endif
147 static int mpssas_send_portenable(struct mps_softc *sc);
148 static void mpssas_portenable_complete(struct mps_softc *sc,
149     struct mps_command *cm);
150 
151 struct mpssas_target *
152 mpssas_find_target_by_handle(struct mpssas_softc *sassc, int start, uint16_t handle)
153 {
154 	struct mpssas_target *target;
155 	int i;
156 
157 	for (i = start; i < sassc->maxtargets; i++) {
158 		target = &sassc->targets[i];
159 		if (target->handle == handle)
160 			return (target);
161 	}
162 
163 	return (NULL);
164 }
165 
166 /* we need to freeze the simq during attach and diag reset, to avoid failing
167  * commands before device handles have been found by discovery.  Since
168  * discovery involves reading config pages and possibly sending commands,
169  * discovery actions may continue even after we receive the end of discovery
170  * event, so refcount discovery actions instead of assuming we can unfreeze
171  * the simq when we get the event.
172  */
173 void
174 mpssas_startup_increment(struct mpssas_softc *sassc)
175 {
176 	MPS_FUNCTRACE(sassc->sc);
177 
178 	if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
179 		if (sassc->startup_refcount++ == 0) {
180 			/* just starting, freeze the simq */
181 			mps_dprint(sassc->sc, MPS_INIT,
182 			    "%s freezing simq\n", __func__);
183 #if __FreeBSD_version >= 1000039
184 			xpt_hold_boot();
185 #endif
186 			xpt_freeze_simq(sassc->sim, 1);
187 		}
188 		mps_dprint(sassc->sc, MPS_INIT, "%s refcount %u\n", __func__,
189 		    sassc->startup_refcount);
190 	}
191 }
192 
193 void
194 mpssas_release_simq_reinit(struct mpssas_softc *sassc)
195 {
196 	if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
197 		sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
198 		xpt_release_simq(sassc->sim, 1);
199 		mps_dprint(sassc->sc, MPS_INFO, "Unfreezing SIM queue\n");
200 	}
201 }
202 
203 void
204 mpssas_startup_decrement(struct mpssas_softc *sassc)
205 {
206 	MPS_FUNCTRACE(sassc->sc);
207 
208 	if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
209 		if (--sassc->startup_refcount == 0) {
210 			/* finished all discovery-related actions, release
211 			 * the simq and rescan for the latest topology.
212 			 */
213 			mps_dprint(sassc->sc, MPS_INIT,
214 			    "%s releasing simq\n", __func__);
215 			sassc->flags &= ~MPSSAS_IN_STARTUP;
216 			xpt_release_simq(sassc->sim, 1);
217 #if __FreeBSD_version >= 1000039
218 			xpt_release_boot();
219 #else
220 			mpssas_rescan_target(sassc->sc, NULL);
221 #endif
222 		}
223 		mps_dprint(sassc->sc, MPS_INIT, "%s refcount %u\n", __func__,
224 		    sassc->startup_refcount);
225 	}
226 }
227 
228 /* The firmware requires us to stop sending commands when we're doing task
229  * management, so refcount the TMs and keep the simq frozen when any are in
230  * use.
231  */
232 struct mps_command *
233 mpssas_alloc_tm(struct mps_softc *sc)
234 {
235 	struct mps_command *tm;
236 
237 	tm = mps_alloc_high_priority_command(sc);
238 	return tm;
239 }
240 
241 void
242 mpssas_free_tm(struct mps_softc *sc, struct mps_command *tm)
243 {
244 	if (tm == NULL)
245 		return;
246 
247 	/*
248 	 * For TM's the devq is frozen for the device.  Unfreeze it here and
249 	 * free the resources used for freezing the devq.  Must clear the
250 	 * INRESET flag as well or scsi I/O will not work.
251 	 */
252 	if (tm->cm_targ != NULL) {
253 		tm->cm_targ->flags &= ~MPSSAS_TARGET_INRESET;
254 	}
255 	if (tm->cm_ccb) {
256 		mps_dprint(sc, MPS_INFO, "Unfreezing devq for target ID %d\n",
257 		    tm->cm_targ->tid);
258 		xpt_release_devq(tm->cm_ccb->ccb_h.path, 1, TRUE);
259 		xpt_free_path(tm->cm_ccb->ccb_h.path);
260 		xpt_free_ccb(tm->cm_ccb);
261 	}
262 
263 	mps_free_high_priority_command(sc, tm);
264 }
265 
266 void
267 mpssas_rescan_target(struct mps_softc *sc, struct mpssas_target *targ)
268 {
269 	struct mpssas_softc *sassc = sc->sassc;
270 	path_id_t pathid;
271 	target_id_t targetid;
272 	union ccb *ccb;
273 
274 	MPS_FUNCTRACE(sc);
275 	pathid = cam_sim_path(sassc->sim);
276 	if (targ == NULL)
277 		targetid = CAM_TARGET_WILDCARD;
278 	else
279 		targetid = targ - sassc->targets;
280 
281 	/*
282 	 * Allocate a CCB and schedule a rescan.
283 	 */
284 	ccb = xpt_alloc_ccb_nowait();
285 	if (ccb == NULL) {
286 		mps_dprint(sc, MPS_ERROR, "unable to alloc CCB for rescan\n");
287 		return;
288 	}
289 
290 	if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid,
291 	    targetid, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
292 		mps_dprint(sc, MPS_ERROR, "unable to create path for rescan\n");
293 		xpt_free_ccb(ccb);
294 		return;
295 	}
296 
297 	if (targetid == CAM_TARGET_WILDCARD)
298 		ccb->ccb_h.func_code = XPT_SCAN_BUS;
299 	else
300 		ccb->ccb_h.func_code = XPT_SCAN_TGT;
301 
302 	mps_dprint(sc, MPS_TRACE, "%s targetid %u\n", __func__, targetid);
303 	xpt_rescan(ccb);
304 }
305 
306 static void
307 mpssas_log_command(struct mps_command *cm, u_int level, const char *fmt, ...)
308 {
309 	struct sbuf sb;
310 	va_list ap;
311 	char str[192];
312 	char path_str[64];
313 
314 	if (cm == NULL)
315 		return;
316 
317 	/* No need to be in here if debugging isn't enabled */
318 	if ((cm->cm_sc->mps_debug & level) == 0)
319 		return;
320 
321 	sbuf_new(&sb, str, sizeof(str), 0);
322 
323 	va_start(ap, fmt);
324 
325 	if (cm->cm_ccb != NULL) {
326 		xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
327 				sizeof(path_str));
328 		sbuf_cat(&sb, path_str);
329 		if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
330 			scsi_command_string(&cm->cm_ccb->csio, &sb);
331 			sbuf_printf(&sb, "length %d ",
332 				    cm->cm_ccb->csio.dxfer_len);
333 		}
334 	}
335 	else {
336 		sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
337 		    cam_sim_name(cm->cm_sc->sassc->sim),
338 		    cam_sim_unit(cm->cm_sc->sassc->sim),
339 		    cam_sim_bus(cm->cm_sc->sassc->sim),
340 		    cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
341 		    cm->cm_lun);
342 	}
343 
344 	sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
345 	sbuf_vprintf(&sb, fmt, ap);
346 	sbuf_finish(&sb);
347 	mps_dprint_field(cm->cm_sc, level, "%s", sbuf_data(&sb));
348 
349 	va_end(ap);
350 }
351 
352 
353 static void
354 mpssas_remove_volume(struct mps_softc *sc, struct mps_command *tm)
355 {
356 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
357 	struct mpssas_target *targ;
358 	uint16_t handle;
359 
360 	MPS_FUNCTRACE(sc);
361 
362 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
363 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
364 	targ = tm->cm_targ;
365 
366 	if (reply == NULL) {
367 		/* XXX retry the remove after the diag reset completes? */
368 		mps_dprint(sc, MPS_FAULT,
369 		    "%s NULL reply resetting device 0x%04x\n", __func__,
370 		    handle);
371 		mpssas_free_tm(sc, tm);
372 		return;
373 	}
374 
375 	if (reply->IOCStatus != MPI2_IOCSTATUS_SUCCESS) {
376 		mps_dprint(sc, MPS_FAULT,
377 		   "IOCStatus = 0x%x while resetting device 0x%x\n",
378 		   reply->IOCStatus, handle);
379 		mpssas_free_tm(sc, tm);
380 		return;
381 	}
382 
383 	mps_dprint(sc, MPS_XINFO,
384 	    "Reset aborted %u commands\n", reply->TerminationCount);
385 	mps_free_reply(sc, tm->cm_reply_data);
386 	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
387 
388 	mps_dprint(sc, MPS_XINFO,
389 	    "clearing target %u handle 0x%04x\n", targ->tid, handle);
390 
391 	/*
392 	 * Don't clear target if remove fails because things will get confusing.
393 	 * Leave the devname and sasaddr intact so that we know to avoid reusing
394 	 * this target id if possible, and so we can assign the same target id
395 	 * to this device if it comes back in the future.
396 	 */
397 	if (reply->IOCStatus == MPI2_IOCSTATUS_SUCCESS) {
398 		targ = tm->cm_targ;
399 		targ->handle = 0x0;
400 		targ->encl_handle = 0x0;
401 		targ->encl_slot = 0x0;
402 		targ->exp_dev_handle = 0x0;
403 		targ->phy_num = 0x0;
404 		targ->linkrate = 0x0;
405 		targ->devinfo = 0x0;
406 		targ->flags = 0x0;
407 	}
408 
409 	mpssas_free_tm(sc, tm);
410 }
411 
412 
413 /*
414  * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal.
415  * Otherwise Volume Delete is same as Bare Drive Removal.
416  */
417 void
418 mpssas_prepare_volume_remove(struct mpssas_softc *sassc, uint16_t handle)
419 {
420 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
421 	struct mps_softc *sc;
422 	struct mps_command *cm;
423 	struct mpssas_target *targ = NULL;
424 
425 	MPS_FUNCTRACE(sassc->sc);
426 	sc = sassc->sc;
427 
428 #ifdef WD_SUPPORT
429 	/*
430 	 * If this is a WD controller, determine if the disk should be exposed
431 	 * to the OS or not.  If disk should be exposed, return from this
432 	 * function without doing anything.
433 	 */
434 	if (sc->WD_available && (sc->WD_hide_expose ==
435 	    MPS_WD_EXPOSE_ALWAYS)) {
436 		return;
437 	}
438 #endif //WD_SUPPORT
439 
440 	targ = mpssas_find_target_by_handle(sassc, 0, handle);
441 	if (targ == NULL) {
442 		/* FIXME: what is the action? */
443 		/* We don't know about this device? */
444 		mps_dprint(sc, MPS_ERROR,
445 		   "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
446 		return;
447 	}
448 
449 	targ->flags |= MPSSAS_TARGET_INREMOVAL;
450 
451 	cm = mpssas_alloc_tm(sc);
452 	if (cm == NULL) {
453 		mps_dprint(sc, MPS_ERROR,
454 		    "%s: command alloc failure\n", __func__);
455 		return;
456 	}
457 
458 	mpssas_rescan_target(sc, targ);
459 
460 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
461 	req->DevHandle = targ->handle;
462 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
463 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
464 
465 	/* SAS Hard Link Reset / SATA Link Reset */
466 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
467 
468 	cm->cm_targ = targ;
469 	cm->cm_data = NULL;
470 	cm->cm_desc.HighPriority.RequestFlags =
471 	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
472 	cm->cm_complete = mpssas_remove_volume;
473 	cm->cm_complete_data = (void *)(uintptr_t)handle;
474 
475 	mps_dprint(sc, MPS_INFO, "%s: Sending reset for target ID %d\n",
476 	    __func__, targ->tid);
477 	mpssas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
478 
479 	mps_map_command(sc, cm);
480 }
481 
482 /*
483  * The MPT2 firmware performs debounce on the link to avoid transient link
484  * errors and false removals.  When it does decide that link has been lost
485  * and a device need to go away, it expects that the host will perform a
486  * target reset and then an op remove.  The reset has the side-effect of
487  * aborting any outstanding requests for the device, which is required for
488  * the op-remove to succeed.  It's not clear if the host should check for
489  * the device coming back alive after the reset.
490  */
491 void
492 mpssas_prepare_remove(struct mpssas_softc *sassc, uint16_t handle)
493 {
494 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
495 	struct mps_softc *sc;
496 	struct mps_command *cm;
497 	struct mpssas_target *targ = NULL;
498 
499 	MPS_FUNCTRACE(sassc->sc);
500 
501 	sc = sassc->sc;
502 
503 	targ = mpssas_find_target_by_handle(sassc, 0, handle);
504 	if (targ == NULL) {
505 		/* FIXME: what is the action? */
506 		/* We don't know about this device? */
507 		mps_dprint(sc, MPS_ERROR,
508 		    "%s : invalid handle 0x%x \n", __func__, handle);
509 		return;
510 	}
511 
512 	targ->flags |= MPSSAS_TARGET_INREMOVAL;
513 
514 	cm = mpssas_alloc_tm(sc);
515 	if (cm == NULL) {
516 		mps_dprint(sc, MPS_ERROR,
517 		    "%s: command alloc failure\n", __func__);
518 		return;
519 	}
520 
521 	mpssas_rescan_target(sc, targ);
522 
523 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
524 	memset(req, 0, sizeof(*req));
525 	req->DevHandle = htole16(targ->handle);
526 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
527 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
528 
529 	/* SAS Hard Link Reset / SATA Link Reset */
530 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
531 
532 	cm->cm_targ = targ;
533 	cm->cm_data = NULL;
534 	cm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
535 	cm->cm_complete = mpssas_remove_device;
536 	cm->cm_complete_data = (void *)(uintptr_t)handle;
537 
538 	mps_dprint(sc, MPS_INFO, "%s: Sending reset for target ID %d\n",
539 	    __func__, targ->tid);
540 	mpssas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
541 
542 	mps_map_command(sc, cm);
543 }
544 
545 static void
546 mpssas_remove_device(struct mps_softc *sc, struct mps_command *tm)
547 {
548 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
549 	MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
550 	struct mpssas_target *targ;
551 	struct mps_command *next_cm;
552 	uint16_t handle;
553 
554 	MPS_FUNCTRACE(sc);
555 
556 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
557 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
558 	targ = tm->cm_targ;
559 
560 	/*
561 	 * Currently there should be no way we can hit this case.  It only
562 	 * happens when we have a failure to allocate chain frames, and
563 	 * task management commands don't have S/G lists.
564 	 */
565 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
566 		mps_dprint(sc, MPS_ERROR,
567 		    "%s: cm_flags = %#x for remove of handle %#04x! "
568 		    "This should not happen!\n", __func__, tm->cm_flags,
569 		    handle);
570 		mpssas_free_tm(sc, tm);
571 		return;
572 	}
573 
574 	if (reply == NULL) {
575 		/* XXX retry the remove after the diag reset completes? */
576 		mps_dprint(sc, MPS_FAULT,
577 		    "%s NULL reply reseting device 0x%04x\n", __func__, handle);
578 		mpssas_free_tm(sc, tm);
579 		return;
580 	}
581 
582 	if (le16toh(reply->IOCStatus) != MPI2_IOCSTATUS_SUCCESS) {
583 		mps_dprint(sc, MPS_FAULT,
584 		   "IOCStatus = 0x%x while resetting device 0x%x\n",
585 		   le16toh(reply->IOCStatus), handle);
586 		mpssas_free_tm(sc, tm);
587 		return;
588 	}
589 
590 	mps_dprint(sc, MPS_XINFO, "Reset aborted %u commands\n",
591 	    le32toh(reply->TerminationCount));
592 	mps_free_reply(sc, tm->cm_reply_data);
593 	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
594 
595 	/* Reuse the existing command */
596 	req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
597 	memset(req, 0, sizeof(*req));
598 	req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
599 	req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
600 	req->DevHandle = htole16(handle);
601 	tm->cm_data = NULL;
602 	tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
603 	tm->cm_complete = mpssas_remove_complete;
604 	tm->cm_complete_data = (void *)(uintptr_t)handle;
605 
606 	mps_map_command(sc, tm);
607 
608 	mps_dprint(sc, MPS_XINFO, "clearing target %u handle 0x%04x\n",
609 		   targ->tid, handle);
610 	TAILQ_FOREACH_SAFE(tm, &targ->commands, cm_link, next_cm) {
611 		union ccb *ccb;
612 
613 		mps_dprint(sc, MPS_XINFO, "Completing missed command %p\n", tm);
614 		ccb = tm->cm_complete_data;
615 		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
616 		mpssas_scsiio_complete(sc, tm);
617 	}
618 }
619 
620 static void
621 mpssas_remove_complete(struct mps_softc *sc, struct mps_command *tm)
622 {
623 	MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
624 	uint16_t handle;
625 	struct mpssas_target *targ;
626 	struct mpssas_lun *lun;
627 
628 	MPS_FUNCTRACE(sc);
629 
630 	reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
631 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
632 
633 	/*
634 	 * Currently there should be no way we can hit this case.  It only
635 	 * happens when we have a failure to allocate chain frames, and
636 	 * task management commands don't have S/G lists.
637 	 */
638 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
639 		mps_dprint(sc, MPS_XINFO,
640 			   "%s: cm_flags = %#x for remove of handle %#04x! "
641 			   "This should not happen!\n", __func__, tm->cm_flags,
642 			   handle);
643 		mpssas_free_tm(sc, tm);
644 		return;
645 	}
646 
647 	if (reply == NULL) {
648 		/* most likely a chip reset */
649 		mps_dprint(sc, MPS_FAULT,
650 		    "%s NULL reply removing device 0x%04x\n", __func__, handle);
651 		mpssas_free_tm(sc, tm);
652 		return;
653 	}
654 
655 	mps_dprint(sc, MPS_XINFO,
656 	    "%s on handle 0x%04x, IOCStatus= 0x%x\n", __func__,
657 	    handle, le16toh(reply->IOCStatus));
658 
659 	/*
660 	 * Don't clear target if remove fails because things will get confusing.
661 	 * Leave the devname and sasaddr intact so that we know to avoid reusing
662 	 * this target id if possible, and so we can assign the same target id
663 	 * to this device if it comes back in the future.
664 	 */
665 	if (le16toh(reply->IOCStatus) == MPI2_IOCSTATUS_SUCCESS) {
666 		targ = tm->cm_targ;
667 		targ->handle = 0x0;
668 		targ->encl_handle = 0x0;
669 		targ->encl_slot = 0x0;
670 		targ->exp_dev_handle = 0x0;
671 		targ->phy_num = 0x0;
672 		targ->linkrate = 0x0;
673 		targ->devinfo = 0x0;
674 		targ->flags = 0x0;
675 
676 		while(!SLIST_EMPTY(&targ->luns)) {
677 			lun = SLIST_FIRST(&targ->luns);
678 			SLIST_REMOVE_HEAD(&targ->luns, lun_link);
679 			free(lun, M_MPT2);
680 		}
681 	}
682 
683 
684 	mpssas_free_tm(sc, tm);
685 }
686 
687 static int
688 mpssas_register_events(struct mps_softc *sc)
689 {
690 	u32 events[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS];
691 
692 	bzero(events, 16);
693 	setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
694 	setbit(events, MPI2_EVENT_SAS_DISCOVERY);
695 	setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
696 	setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
697 	setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
698 	setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
699 	setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
700 	setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
701 	setbit(events, MPI2_EVENT_IR_VOLUME);
702 	setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
703 	setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
704 	setbit(events, MPI2_EVENT_LOG_ENTRY_ADDED);
705 
706 	mps_register_events(sc, events, mpssas_evt_handler, NULL,
707 	    &sc->sassc->mpssas_eh);
708 
709 	return (0);
710 }
711 
712 int
713 mps_attach_sas(struct mps_softc *sc)
714 {
715 	struct mpssas_softc *sassc;
716 	cam_status status;
717 	int unit, error = 0;
718 
719 	MPS_FUNCTRACE(sc);
720 
721 	sassc = malloc(sizeof(struct mpssas_softc), M_MPT2, M_WAITOK|M_ZERO);
722 	if(!sassc) {
723 		device_printf(sc->mps_dev, "Cannot allocate memory %s %d\n",
724 		__func__, __LINE__);
725 		return (ENOMEM);
726 	}
727 
728 	/*
729 	 * XXX MaxTargets could change during a reinit.  Since we don't
730 	 * resize the targets[] array during such an event, cache the value
731 	 * of MaxTargets here so that we don't get into trouble later.  This
732 	 * should move into the reinit logic.
733 	 */
734 	sassc->maxtargets = sc->facts->MaxTargets;
735 	sassc->targets = malloc(sizeof(struct mpssas_target) *
736 	    sassc->maxtargets, M_MPT2, M_WAITOK|M_ZERO);
737 	if(!sassc->targets) {
738 		device_printf(sc->mps_dev, "Cannot allocate memory %s %d\n",
739 		__func__, __LINE__);
740 		free(sassc, M_MPT2);
741 		return (ENOMEM);
742 	}
743 	sc->sassc = sassc;
744 	sassc->sc = sc;
745 
746 	if ((sassc->devq = cam_simq_alloc(sc->num_reqs)) == NULL) {
747 		mps_dprint(sc, MPS_ERROR, "Cannot allocate SIMQ\n");
748 		error = ENOMEM;
749 		goto out;
750 	}
751 
752 	unit = device_get_unit(sc->mps_dev);
753 	sassc->sim = cam_sim_alloc(mpssas_action, mpssas_poll, "mps", sassc,
754 	    unit, &sc->mps_mtx, sc->num_reqs, sc->num_reqs, sassc->devq);
755 	if (sassc->sim == NULL) {
756 		mps_dprint(sc, MPS_ERROR, "Cannot allocate SIM\n");
757 		error = EINVAL;
758 		goto out;
759 	}
760 
761 	TAILQ_INIT(&sassc->ev_queue);
762 
763 	/* Initialize taskqueue for Event Handling */
764 	TASK_INIT(&sassc->ev_task, 0, mpssas_firmware_event_work, sc);
765 	sassc->ev_tq = taskqueue_create("mps_taskq", M_NOWAIT | M_ZERO,
766 	    taskqueue_thread_enqueue, &sassc->ev_tq);
767 	taskqueue_start_threads(&sassc->ev_tq, 1, PRIBIO, "%s taskq",
768 	    device_get_nameunit(sc->mps_dev));
769 
770 	mps_lock(sc);
771 
772 	/*
773 	 * XXX There should be a bus for every port on the adapter, but since
774 	 * we're just going to fake the topology for now, we'll pretend that
775 	 * everything is just a target on a single bus.
776 	 */
777 	if ((error = xpt_bus_register(sassc->sim, sc->mps_dev, 0)) != 0) {
778 		mps_dprint(sc, MPS_ERROR, "Error %d registering SCSI bus\n",
779 		    error);
780 		mps_unlock(sc);
781 		goto out;
782 	}
783 
784 	/*
785 	 * Assume that discovery events will start right away.
786 	 *
787 	 * Hold off boot until discovery is complete.
788 	 */
789 	sassc->flags |= MPSSAS_IN_STARTUP | MPSSAS_IN_DISCOVERY;
790 	sc->sassc->startup_refcount = 0;
791 	mpssas_startup_increment(sassc);
792 
793 	callout_init(&sassc->discovery_callout, 1 /*mpsafe*/);
794 
795 	/*
796 	 * Register for async events so we can determine the EEDP
797 	 * capabilities of devices.
798 	 */
799 	status = xpt_create_path(&sassc->path, /*periph*/NULL,
800 	    cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD,
801 	    CAM_LUN_WILDCARD);
802 	if (status != CAM_REQ_CMP) {
803 		mps_printf(sc, "Error %#x creating sim path\n", status);
804 		sassc->path = NULL;
805 	} else {
806 		int event;
807 
808 #if (__FreeBSD_version >= 1000006) || \
809     ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
810 		event = AC_ADVINFO_CHANGED;
811 #else
812 		event = AC_FOUND_DEVICE;
813 #endif
814 		status = xpt_register_async(event, mpssas_async, sc,
815 					    sassc->path);
816 		if (status != CAM_REQ_CMP) {
817 			mps_dprint(sc, MPS_ERROR,
818 			    "Error %#x registering async handler for "
819 			    "AC_ADVINFO_CHANGED events\n", status);
820 			xpt_free_path(sassc->path);
821 			sassc->path = NULL;
822 		}
823 	}
824 	if (status != CAM_REQ_CMP) {
825 		/*
826 		 * EEDP use is the exception, not the rule.
827 		 * Warn the user, but do not fail to attach.
828 		 */
829 		mps_printf(sc, "EEDP capabilities disabled.\n");
830 	}
831 
832 	mps_unlock(sc);
833 
834 	mpssas_register_events(sc);
835 out:
836 	if (error)
837 		mps_detach_sas(sc);
838 	return (error);
839 }
840 
841 int
842 mps_detach_sas(struct mps_softc *sc)
843 {
844 	struct mpssas_softc *sassc;
845 	struct mpssas_lun *lun, *lun_tmp;
846 	struct mpssas_target *targ;
847 	int i;
848 
849 	MPS_FUNCTRACE(sc);
850 
851 	if (sc->sassc == NULL)
852 		return (0);
853 
854 	sassc = sc->sassc;
855 	mps_deregister_events(sc, sassc->mpssas_eh);
856 
857 	/*
858 	 * Drain and free the event handling taskqueue with the lock
859 	 * unheld so that any parallel processing tasks drain properly
860 	 * without deadlocking.
861 	 */
862 	if (sassc->ev_tq != NULL)
863 		taskqueue_free(sassc->ev_tq);
864 
865 	/* Make sure CAM doesn't wedge if we had to bail out early. */
866 	mps_lock(sc);
867 
868 	/* Deregister our async handler */
869 	if (sassc->path != NULL) {
870 		xpt_register_async(0, mpssas_async, sc, sassc->path);
871 		xpt_free_path(sassc->path);
872 		sassc->path = NULL;
873 	}
874 
875 	if (sassc->flags & MPSSAS_IN_STARTUP)
876 		xpt_release_simq(sassc->sim, 1);
877 
878 	if (sassc->sim != NULL) {
879 		xpt_bus_deregister(cam_sim_path(sassc->sim));
880 		cam_sim_free(sassc->sim, FALSE);
881 	}
882 
883 	sassc->flags |= MPSSAS_SHUTDOWN;
884 	mps_unlock(sc);
885 
886 	if (sassc->devq != NULL)
887 		cam_simq_free(sassc->devq);
888 
889 	for(i=0; i< sassc->maxtargets ;i++) {
890 		targ = &sassc->targets[i];
891 		SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
892 			free(lun, M_MPT2);
893 		}
894 	}
895 	free(sassc->targets, M_MPT2);
896 	free(sassc, M_MPT2);
897 	sc->sassc = NULL;
898 
899 	return (0);
900 }
901 
902 void
903 mpssas_discovery_end(struct mpssas_softc *sassc)
904 {
905 	struct mps_softc *sc = sassc->sc;
906 
907 	MPS_FUNCTRACE(sc);
908 
909 	if (sassc->flags & MPSSAS_DISCOVERY_TIMEOUT_PENDING)
910 		callout_stop(&sassc->discovery_callout);
911 
912 }
913 
914 static void
915 mpssas_action(struct cam_sim *sim, union ccb *ccb)
916 {
917 	struct mpssas_softc *sassc;
918 
919 	sassc = cam_sim_softc(sim);
920 
921 	MPS_FUNCTRACE(sassc->sc);
922 	mps_dprint(sassc->sc, MPS_TRACE, "ccb func_code 0x%x\n",
923 	    ccb->ccb_h.func_code);
924 	mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
925 
926 	switch (ccb->ccb_h.func_code) {
927 	case XPT_PATH_INQ:
928 	{
929 		struct ccb_pathinq *cpi = &ccb->cpi;
930 
931 		cpi->version_num = 1;
932 		cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
933 		cpi->target_sprt = 0;
934 #if __FreeBSD_version >= 1000039
935 		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN;
936 #else
937 		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
938 #endif
939 		cpi->hba_eng_cnt = 0;
940 		cpi->max_target = sassc->maxtargets - 1;
941 		cpi->max_lun = 255;
942 		cpi->initiator_id = sassc->maxtargets - 1;
943 		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
944 		strncpy(cpi->hba_vid, "Avago Tech (LSI)", HBA_IDLEN);
945 		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
946 		cpi->unit_number = cam_sim_unit(sim);
947 		cpi->bus_id = cam_sim_bus(sim);
948 		cpi->base_transfer_speed = 150000;
949 		cpi->transport = XPORT_SAS;
950 		cpi->transport_version = 0;
951 		cpi->protocol = PROTO_SCSI;
952 		cpi->protocol_version = SCSI_REV_SPC;
953 #if __FreeBSD_version >= 800001
954 		/*
955 		 * XXX KDM where does this number come from?
956 		 */
957 		cpi->maxio = 256 * 1024;
958 #endif
959 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
960 		break;
961 	}
962 	case XPT_GET_TRAN_SETTINGS:
963 	{
964 		struct ccb_trans_settings	*cts;
965 		struct ccb_trans_settings_sas	*sas;
966 		struct ccb_trans_settings_scsi	*scsi;
967 		struct mpssas_target *targ;
968 
969 		cts = &ccb->cts;
970 		sas = &cts->xport_specific.sas;
971 		scsi = &cts->proto_specific.scsi;
972 
973 		KASSERT(cts->ccb_h.target_id < sassc->maxtargets,
974 		    ("Target %d out of bounds in XPT_GET_TRANS_SETTINGS\n",
975 		    cts->ccb_h.target_id));
976 		targ = &sassc->targets[cts->ccb_h.target_id];
977 		if (targ->handle == 0x0) {
978 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
979 			break;
980 		}
981 
982 		cts->protocol_version = SCSI_REV_SPC2;
983 		cts->transport = XPORT_SAS;
984 		cts->transport_version = 0;
985 
986 		sas->valid = CTS_SAS_VALID_SPEED;
987 		switch (targ->linkrate) {
988 		case 0x08:
989 			sas->bitrate = 150000;
990 			break;
991 		case 0x09:
992 			sas->bitrate = 300000;
993 			break;
994 		case 0x0a:
995 			sas->bitrate = 600000;
996 			break;
997 		default:
998 			sas->valid = 0;
999 		}
1000 
1001 		cts->protocol = PROTO_SCSI;
1002 		scsi->valid = CTS_SCSI_VALID_TQ;
1003 		scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
1004 
1005 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1006 		break;
1007 	}
1008 	case XPT_CALC_GEOMETRY:
1009 		cam_calc_geometry(&ccb->ccg, /*extended*/1);
1010 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1011 		break;
1012 	case XPT_RESET_DEV:
1013 		mps_dprint(sassc->sc, MPS_XINFO, "mpssas_action XPT_RESET_DEV\n");
1014 		mpssas_action_resetdev(sassc, ccb);
1015 		return;
1016 	case XPT_RESET_BUS:
1017 	case XPT_ABORT:
1018 	case XPT_TERM_IO:
1019 		mps_dprint(sassc->sc, MPS_XINFO,
1020 		    "mpssas_action faking success for abort or reset\n");
1021 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1022 		break;
1023 	case XPT_SCSI_IO:
1024 		mpssas_action_scsiio(sassc, ccb);
1025 		return;
1026 #if __FreeBSD_version >= 900026
1027 	case XPT_SMP_IO:
1028 		mpssas_action_smpio(sassc, ccb);
1029 		return;
1030 #endif
1031 	default:
1032 		mpssas_set_ccbstatus(ccb, CAM_FUNC_NOTAVAIL);
1033 		break;
1034 	}
1035 	xpt_done(ccb);
1036 
1037 }
1038 
1039 static void
1040 mpssas_announce_reset(struct mps_softc *sc, uint32_t ac_code,
1041     target_id_t target_id, lun_id_t lun_id)
1042 {
1043 	path_id_t path_id = cam_sim_path(sc->sassc->sim);
1044 	struct cam_path *path;
1045 
1046 	mps_dprint(sc, MPS_XINFO, "%s code %x target %d lun %jx\n", __func__,
1047 	    ac_code, target_id, (uintmax_t)lun_id);
1048 
1049 	if (xpt_create_path(&path, NULL,
1050 		path_id, target_id, lun_id) != CAM_REQ_CMP) {
1051 		mps_dprint(sc, MPS_ERROR, "unable to create path for reset "
1052 			   "notification\n");
1053 		return;
1054 	}
1055 
1056 	xpt_async(ac_code, path, NULL);
1057 	xpt_free_path(path);
1058 }
1059 
1060 static void
1061 mpssas_complete_all_commands(struct mps_softc *sc)
1062 {
1063 	struct mps_command *cm;
1064 	int i;
1065 	int completed;
1066 
1067 	MPS_FUNCTRACE(sc);
1068 	mtx_assert(&sc->mps_mtx, MA_OWNED);
1069 
1070 	/* complete all commands with a NULL reply */
1071 	for (i = 1; i < sc->num_reqs; i++) {
1072 		cm = &sc->commands[i];
1073 		cm->cm_reply = NULL;
1074 		completed = 0;
1075 
1076 		if (cm->cm_flags & MPS_CM_FLAGS_POLLED)
1077 			cm->cm_flags |= MPS_CM_FLAGS_COMPLETE;
1078 
1079 		if (cm->cm_complete != NULL) {
1080 			mpssas_log_command(cm, MPS_RECOVERY,
1081 			    "completing cm %p state %x ccb %p for diag reset\n",
1082 			    cm, cm->cm_state, cm->cm_ccb);
1083 
1084 			cm->cm_complete(sc, cm);
1085 			completed = 1;
1086 		}
1087 
1088 		if (cm->cm_flags & MPS_CM_FLAGS_WAKEUP) {
1089 			mpssas_log_command(cm, MPS_RECOVERY,
1090 			    "waking up cm %p state %x ccb %p for diag reset\n",
1091 			    cm, cm->cm_state, cm->cm_ccb);
1092 			wakeup(cm);
1093 			completed = 1;
1094 		}
1095 
1096 		if (cm->cm_sc->io_cmds_active != 0) {
1097 			cm->cm_sc->io_cmds_active--;
1098 		} else {
1099 			mps_dprint(cm->cm_sc, MPS_INFO, "Warning: "
1100 			    "io_cmds_active is out of sync - resynching to "
1101 			    "0\n");
1102 		}
1103 
1104 		if ((completed == 0) && (cm->cm_state != MPS_CM_STATE_FREE)) {
1105 			/* this should never happen, but if it does, log */
1106 			mpssas_log_command(cm, MPS_RECOVERY,
1107 			    "cm %p state %x flags 0x%x ccb %p during diag "
1108 			    "reset\n", cm, cm->cm_state, cm->cm_flags,
1109 			    cm->cm_ccb);
1110 		}
1111 	}
1112 }
1113 
1114 void
1115 mpssas_handle_reinit(struct mps_softc *sc)
1116 {
1117 	int i;
1118 
1119 	/* Go back into startup mode and freeze the simq, so that CAM
1120 	 * doesn't send any commands until after we've rediscovered all
1121 	 * targets and found the proper device handles for them.
1122 	 *
1123 	 * After the reset, portenable will trigger discovery, and after all
1124 	 * discovery-related activities have finished, the simq will be
1125 	 * released.
1126 	 */
1127 	mps_dprint(sc, MPS_INIT, "%s startup\n", __func__);
1128 	sc->sassc->flags |= MPSSAS_IN_STARTUP;
1129 	sc->sassc->flags |= MPSSAS_IN_DISCOVERY;
1130 	mpssas_startup_increment(sc->sassc);
1131 
1132 	/* notify CAM of a bus reset */
1133 	mpssas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1134 	    CAM_LUN_WILDCARD);
1135 
1136 	/* complete and cleanup after all outstanding commands */
1137 	mpssas_complete_all_commands(sc);
1138 
1139 	mps_dprint(sc, MPS_INIT,
1140 	    "%s startup %u after command completion\n", __func__,
1141 	    sc->sassc->startup_refcount);
1142 
1143 	/* zero all the target handles, since they may change after the
1144 	 * reset, and we have to rediscover all the targets and use the new
1145 	 * handles.
1146 	 */
1147 	for (i = 0; i < sc->sassc->maxtargets; i++) {
1148 		if (sc->sassc->targets[i].outstanding != 0)
1149 			mps_dprint(sc, MPS_INIT, "target %u outstanding %u\n",
1150 			    i, sc->sassc->targets[i].outstanding);
1151 		sc->sassc->targets[i].handle = 0x0;
1152 		sc->sassc->targets[i].exp_dev_handle = 0x0;
1153 		sc->sassc->targets[i].outstanding = 0;
1154 		sc->sassc->targets[i].flags = MPSSAS_TARGET_INDIAGRESET;
1155 	}
1156 }
1157 
1158 static void
1159 mpssas_tm_timeout(void *data)
1160 {
1161 	struct mps_command *tm = data;
1162 	struct mps_softc *sc = tm->cm_sc;
1163 
1164 	mtx_assert(&sc->mps_mtx, MA_OWNED);
1165 
1166 	mpssas_log_command(tm, MPS_INFO|MPS_RECOVERY,
1167 	    "task mgmt %p timed out\n", tm);
1168 	mps_reinit(sc);
1169 }
1170 
1171 static void
1172 mpssas_logical_unit_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1173 {
1174 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1175 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1176 	unsigned int cm_count = 0;
1177 	struct mps_command *cm;
1178 	struct mpssas_target *targ;
1179 
1180 	callout_stop(&tm->cm_callout);
1181 
1182 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1183 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1184 	targ = tm->cm_targ;
1185 
1186 	/*
1187 	 * Currently there should be no way we can hit this case.  It only
1188 	 * happens when we have a failure to allocate chain frames, and
1189 	 * task management commands don't have S/G lists.
1190 	 * XXXSL So should it be an assertion?
1191 	 */
1192 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1193 		mps_dprint(sc, MPS_ERROR, "%s: cm_flags = %#x for LUN reset! "
1194 			   "This should not happen!\n", __func__, tm->cm_flags);
1195 		mpssas_free_tm(sc, tm);
1196 		return;
1197 	}
1198 
1199 	if (reply == NULL) {
1200 		mpssas_log_command(tm, MPS_RECOVERY,
1201 		    "NULL reset reply for tm %p\n", tm);
1202 		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1203 			/* this completion was due to a reset, just cleanup */
1204 			targ->tm = NULL;
1205 			mpssas_free_tm(sc, tm);
1206 		}
1207 		else {
1208 			/* we should have gotten a reply. */
1209 			mps_reinit(sc);
1210 		}
1211 		return;
1212 	}
1213 
1214 	mpssas_log_command(tm, MPS_RECOVERY,
1215 	    "logical unit reset status 0x%x code 0x%x count %u\n",
1216 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1217 	    le32toh(reply->TerminationCount));
1218 
1219 	/* See if there are any outstanding commands for this LUN.
1220 	 * This could be made more efficient by using a per-LU data
1221 	 * structure of some sort.
1222 	 */
1223 	TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1224 		if (cm->cm_lun == tm->cm_lun)
1225 			cm_count++;
1226 	}
1227 
1228 	if (cm_count == 0) {
1229 		mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1230 		    "logical unit %u finished recovery after reset\n",
1231 		    tm->cm_lun, tm);
1232 
1233 		mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1234 		    tm->cm_lun);
1235 
1236 		/* we've finished recovery for this logical unit.  check and
1237 		 * see if some other logical unit has a timedout command
1238 		 * that needs to be processed.
1239 		 */
1240 		cm = TAILQ_FIRST(&targ->timedout_commands);
1241 		if (cm) {
1242 			mpssas_send_abort(sc, tm, cm);
1243 		}
1244 		else {
1245 			targ->tm = NULL;
1246 			mpssas_free_tm(sc, tm);
1247 		}
1248 	}
1249 	else {
1250 		/* if we still have commands for this LUN, the reset
1251 		 * effectively failed, regardless of the status reported.
1252 		 * Escalate to a target reset.
1253 		 */
1254 		mpssas_log_command(tm, MPS_RECOVERY,
1255 		    "logical unit reset complete for tm %p, but still have %u command(s)\n",
1256 		    tm, cm_count);
1257 		mpssas_send_reset(sc, tm,
1258 		    MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1259 	}
1260 }
1261 
1262 static void
1263 mpssas_target_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1264 {
1265 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1266 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1267 	struct mpssas_target *targ;
1268 
1269 	callout_stop(&tm->cm_callout);
1270 
1271 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1272 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1273 	targ = tm->cm_targ;
1274 
1275 	/*
1276 	 * Currently there should be no way we can hit this case.  It only
1277 	 * happens when we have a failure to allocate chain frames, and
1278 	 * task management commands don't have S/G lists.
1279 	 */
1280 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1281 		mps_dprint(sc, MPS_ERROR,"%s: cm_flags = %#x for target reset! "
1282 			   "This should not happen!\n", __func__, tm->cm_flags);
1283 		mpssas_free_tm(sc, tm);
1284 		return;
1285 	}
1286 
1287 	if (reply == NULL) {
1288 		mpssas_log_command(tm, MPS_RECOVERY,
1289 		    "NULL reset reply for tm %p\n", tm);
1290 		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1291 			/* this completion was due to a reset, just cleanup */
1292 			targ->tm = NULL;
1293 			mpssas_free_tm(sc, tm);
1294 		}
1295 		else {
1296 			/* we should have gotten a reply. */
1297 			mps_reinit(sc);
1298 		}
1299 		return;
1300 	}
1301 
1302 	mpssas_log_command(tm, MPS_RECOVERY,
1303 	    "target reset status 0x%x code 0x%x count %u\n",
1304 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1305 	    le32toh(reply->TerminationCount));
1306 
1307 	if (targ->outstanding == 0) {
1308 		/* we've finished recovery for this target and all
1309 		 * of its logical units.
1310 		 */
1311 		mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1312 		    "recovery finished after target reset\n");
1313 
1314 		mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1315 		    CAM_LUN_WILDCARD);
1316 
1317 		targ->tm = NULL;
1318 		mpssas_free_tm(sc, tm);
1319 	}
1320 	else {
1321 		/* after a target reset, if this target still has
1322 		 * outstanding commands, the reset effectively failed,
1323 		 * regardless of the status reported.  escalate.
1324 		 */
1325 		mpssas_log_command(tm, MPS_RECOVERY,
1326 		    "target reset complete for tm %p, but still have %u command(s)\n",
1327 		    tm, targ->outstanding);
1328 		mps_reinit(sc);
1329 	}
1330 }
1331 
1332 #define MPS_RESET_TIMEOUT 30
1333 
1334 int
1335 mpssas_send_reset(struct mps_softc *sc, struct mps_command *tm, uint8_t type)
1336 {
1337 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1338 	struct mpssas_target *target;
1339 	int err;
1340 
1341 	target = tm->cm_targ;
1342 	if (target->handle == 0) {
1343 		mps_dprint(sc, MPS_ERROR,"%s null devhandle for target_id %d\n",
1344 		    __func__, target->tid);
1345 		return -1;
1346 	}
1347 
1348 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1349 	req->DevHandle = htole16(target->handle);
1350 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1351 	req->TaskType = type;
1352 
1353 	if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1354 		/* XXX Need to handle invalid LUNs */
1355 		MPS_SET_LUN(req->LUN, tm->cm_lun);
1356 		tm->cm_targ->logical_unit_resets++;
1357 		mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1358 		    "sending logical unit reset\n");
1359 		tm->cm_complete = mpssas_logical_unit_reset_complete;
1360 		mpssas_prepare_for_tm(sc, tm, target, tm->cm_lun);
1361 	}
1362 	else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1363 		/*
1364 		 * Target reset method =
1365 		 * 	SAS Hard Link Reset / SATA Link Reset
1366 		 */
1367 		req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1368 		tm->cm_targ->target_resets++;
1369 		mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1370 		    "sending target reset\n");
1371 		tm->cm_complete = mpssas_target_reset_complete;
1372 		mpssas_prepare_for_tm(sc, tm, target, CAM_LUN_WILDCARD);
1373 	}
1374 	else {
1375 		mps_dprint(sc, MPS_ERROR, "unexpected reset type 0x%x\n", type);
1376 		return -1;
1377 	}
1378 
1379 	tm->cm_data = NULL;
1380 	tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1381 	tm->cm_complete_data = (void *)tm;
1382 
1383 	callout_reset(&tm->cm_callout, MPS_RESET_TIMEOUT * hz,
1384 	    mpssas_tm_timeout, tm);
1385 
1386 	err = mps_map_command(sc, tm);
1387 	if (err)
1388 		mpssas_log_command(tm, MPS_RECOVERY,
1389 		    "error %d sending reset type %u\n",
1390 		    err, type);
1391 
1392 	return err;
1393 }
1394 
1395 
1396 static void
1397 mpssas_abort_complete(struct mps_softc *sc, struct mps_command *tm)
1398 {
1399 	struct mps_command *cm;
1400 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1401 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1402 	struct mpssas_target *targ;
1403 
1404 	callout_stop(&tm->cm_callout);
1405 
1406 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1407 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1408 	targ = tm->cm_targ;
1409 
1410 	/*
1411 	 * Currently there should be no way we can hit this case.  It only
1412 	 * happens when we have a failure to allocate chain frames, and
1413 	 * task management commands don't have S/G lists.
1414 	 */
1415 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1416 		mpssas_log_command(tm, MPS_RECOVERY,
1417 		    "cm_flags = %#x for abort %p TaskMID %u!\n",
1418 		    tm->cm_flags, tm, le16toh(req->TaskMID));
1419 		mpssas_free_tm(sc, tm);
1420 		return;
1421 	}
1422 
1423 	if (reply == NULL) {
1424 		mpssas_log_command(tm, MPS_RECOVERY,
1425 		    "NULL abort reply for tm %p TaskMID %u\n",
1426 		    tm, le16toh(req->TaskMID));
1427 		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1428 			/* this completion was due to a reset, just cleanup */
1429 			targ->tm = NULL;
1430 			mpssas_free_tm(sc, tm);
1431 		}
1432 		else {
1433 			/* we should have gotten a reply. */
1434 			mps_reinit(sc);
1435 		}
1436 		return;
1437 	}
1438 
1439 	mpssas_log_command(tm, MPS_RECOVERY,
1440 	    "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1441 	    le16toh(req->TaskMID),
1442 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1443 	    le32toh(reply->TerminationCount));
1444 
1445 	cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1446 	if (cm == NULL) {
1447 		/* if there are no more timedout commands, we're done with
1448 		 * error recovery for this target.
1449 		 */
1450 		mpssas_log_command(tm, MPS_RECOVERY,
1451 		    "finished recovery after aborting TaskMID %u\n",
1452 		    le16toh(req->TaskMID));
1453 
1454 		targ->tm = NULL;
1455 		mpssas_free_tm(sc, tm);
1456 	}
1457 	else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) {
1458 		/* abort success, but we have more timedout commands to abort */
1459 		mpssas_log_command(tm, MPS_RECOVERY,
1460 		    "continuing recovery after aborting TaskMID %u\n",
1461 		    le16toh(req->TaskMID));
1462 
1463 		mpssas_send_abort(sc, tm, cm);
1464 	}
1465 	else {
1466 		/* we didn't get a command completion, so the abort
1467 		 * failed as far as we're concerned.  escalate.
1468 		 */
1469 		mpssas_log_command(tm, MPS_RECOVERY,
1470 		    "abort failed for TaskMID %u tm %p\n",
1471 		    le16toh(req->TaskMID), tm);
1472 
1473 		mpssas_send_reset(sc, tm,
1474 		    MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1475 	}
1476 }
1477 
1478 #define MPS_ABORT_TIMEOUT 5
1479 
1480 static int
1481 mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm)
1482 {
1483 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1484 	struct mpssas_target *targ;
1485 	int err;
1486 
1487 	targ = cm->cm_targ;
1488 	if (targ->handle == 0) {
1489 		mps_dprint(sc, MPS_ERROR,"%s null devhandle for target_id %d\n",
1490 		    __func__, cm->cm_ccb->ccb_h.target_id);
1491 		return -1;
1492 	}
1493 
1494 	mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1495 	    "Aborting command %p\n", cm);
1496 
1497 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1498 	req->DevHandle = htole16(targ->handle);
1499 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1500 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1501 
1502 	/* XXX Need to handle invalid LUNs */
1503 	MPS_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1504 
1505 	req->TaskMID = htole16(cm->cm_desc.Default.SMID);
1506 
1507 	tm->cm_data = NULL;
1508 	tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1509 	tm->cm_complete = mpssas_abort_complete;
1510 	tm->cm_complete_data = (void *)tm;
1511 	tm->cm_targ = cm->cm_targ;
1512 	tm->cm_lun = cm->cm_lun;
1513 
1514 	callout_reset(&tm->cm_callout, MPS_ABORT_TIMEOUT * hz,
1515 	    mpssas_tm_timeout, tm);
1516 
1517 	targ->aborts++;
1518 
1519 	mps_dprint(sc, MPS_INFO, "Sending reset from %s for target ID %d\n",
1520 	    __func__, targ->tid);
1521 	mpssas_prepare_for_tm(sc, tm, targ, tm->cm_lun);
1522 
1523 	err = mps_map_command(sc, tm);
1524 	if (err)
1525 		mpssas_log_command(tm, MPS_RECOVERY,
1526 		    "error %d sending abort for cm %p SMID %u\n",
1527 		    err, cm, req->TaskMID);
1528 	return err;
1529 }
1530 
1531 static void
1532 mpssas_scsiio_timeout(void *data)
1533 {
1534 	struct mps_softc *sc;
1535 	struct mps_command *cm;
1536 	struct mpssas_target *targ;
1537 
1538 	cm = (struct mps_command *)data;
1539 	sc = cm->cm_sc;
1540 
1541 	MPS_FUNCTRACE(sc);
1542 	mtx_assert(&sc->mps_mtx, MA_OWNED);
1543 
1544 	mps_dprint(sc, MPS_XINFO, "Timeout checking cm %p\n", sc);
1545 
1546 	/*
1547 	 * Run the interrupt handler to make sure it's not pending.  This
1548 	 * isn't perfect because the command could have already completed
1549 	 * and been re-used, though this is unlikely.
1550 	 */
1551 	mps_intr_locked(sc);
1552 	if (cm->cm_state == MPS_CM_STATE_FREE) {
1553 		mpssas_log_command(cm, MPS_XINFO,
1554 		    "SCSI command %p almost timed out\n", cm);
1555 		return;
1556 	}
1557 
1558 	if (cm->cm_ccb == NULL) {
1559 		mps_dprint(sc, MPS_ERROR, "command timeout with NULL ccb\n");
1560 		return;
1561 	}
1562 
1563 	mpssas_log_command(cm, MPS_INFO, "command timeout cm %p ccb %p\n",
1564 	    cm, cm->cm_ccb);
1565 
1566 	targ = cm->cm_targ;
1567 	targ->timeouts++;
1568 
1569 	/* XXX first, check the firmware state, to see if it's still
1570 	 * operational.  if not, do a diag reset.
1571 	 */
1572 	mpssas_set_ccbstatus(cm->cm_ccb, CAM_CMD_TIMEOUT);
1573 	cm->cm_state = MPS_CM_STATE_TIMEDOUT;
1574 	TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1575 
1576 	if (targ->tm != NULL) {
1577 		/* target already in recovery, just queue up another
1578 		 * timedout command to be processed later.
1579 		 */
1580 		mps_dprint(sc, MPS_RECOVERY,
1581 		    "queued timedout cm %p for processing by tm %p\n",
1582 		    cm, targ->tm);
1583 	}
1584 	else if ((targ->tm = mpssas_alloc_tm(sc)) != NULL) {
1585 		mps_dprint(sc, MPS_RECOVERY, "timedout cm %p allocated tm %p\n",
1586 		    cm, targ->tm);
1587 
1588 		/* start recovery by aborting the first timedout command */
1589 		mpssas_send_abort(sc, targ->tm, cm);
1590 	}
1591 	else {
1592 		/* XXX queue this target up for recovery once a TM becomes
1593 		 * available.  The firmware only has a limited number of
1594 		 * HighPriority credits for the high priority requests used
1595 		 * for task management, and we ran out.
1596 		 *
1597 		 * Isilon: don't worry about this for now, since we have
1598 		 * more credits than disks in an enclosure, and limit
1599 		 * ourselves to one TM per target for recovery.
1600 		 */
1601 		mps_dprint(sc, MPS_RECOVERY,
1602 		    "timedout cm %p failed to allocate a tm\n", cm);
1603 	}
1604 
1605 }
1606 
1607 static void
1608 mpssas_action_scsiio(struct mpssas_softc *sassc, union ccb *ccb)
1609 {
1610 	MPI2_SCSI_IO_REQUEST *req;
1611 	struct ccb_scsiio *csio;
1612 	struct mps_softc *sc;
1613 	struct mpssas_target *targ;
1614 	struct mpssas_lun *lun;
1615 	struct mps_command *cm;
1616 	uint8_t i, lba_byte, *ref_tag_addr;
1617 	uint16_t eedp_flags;
1618 	uint32_t mpi_control;
1619 
1620 	sc = sassc->sc;
1621 	MPS_FUNCTRACE(sc);
1622 	mtx_assert(&sc->mps_mtx, MA_OWNED);
1623 
1624 	csio = &ccb->csio;
1625 	KASSERT(csio->ccb_h.target_id < sassc->maxtargets,
1626 	    ("Target %d out of bounds in XPT_SCSI_IO\n",
1627 	     csio->ccb_h.target_id));
1628 	targ = &sassc->targets[csio->ccb_h.target_id];
1629 	mps_dprint(sc, MPS_TRACE, "ccb %p target flag %x\n", ccb, targ->flags);
1630 	if (targ->handle == 0x0) {
1631 		mps_dprint(sc, MPS_ERROR, "%s NULL handle for target %u\n",
1632 		    __func__, csio->ccb_h.target_id);
1633 		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1634 		xpt_done(ccb);
1635 		return;
1636 	}
1637 	if (targ->flags & MPS_TARGET_FLAGS_RAID_COMPONENT) {
1638 		mps_dprint(sc, MPS_ERROR, "%s Raid component no SCSI IO "
1639 		    "supported %u\n", __func__, csio->ccb_h.target_id);
1640 		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1641 		xpt_done(ccb);
1642 		return;
1643 	}
1644 	/*
1645 	 * Sometimes, it is possible to get a command that is not "In
1646 	 * Progress" and was actually aborted by the upper layer.  Check for
1647 	 * this here and complete the command without error.
1648 	 */
1649 	if (mpssas_get_ccbstatus(ccb) != CAM_REQ_INPROG) {
1650 		mps_dprint(sc, MPS_TRACE, "%s Command is not in progress for "
1651 		    "target %u\n", __func__, csio->ccb_h.target_id);
1652 		xpt_done(ccb);
1653 		return;
1654 	}
1655 	/*
1656 	 * If devinfo is 0 this will be a volume.  In that case don't tell CAM
1657 	 * that the volume has timed out.  We want volumes to be enumerated
1658 	 * until they are deleted/removed, not just failed.
1659 	 */
1660 	if (targ->flags & MPSSAS_TARGET_INREMOVAL) {
1661 		if (targ->devinfo == 0)
1662 			mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1663 		else
1664 			mpssas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
1665 		xpt_done(ccb);
1666 		return;
1667 	}
1668 
1669 	if ((sc->mps_flags & MPS_FLAGS_SHUTDOWN) != 0) {
1670 		mps_dprint(sc, MPS_INFO, "%s shutting down\n", __func__);
1671 		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1672 		xpt_done(ccb);
1673 		return;
1674 	}
1675 
1676 	/*
1677 	 * If target has a reset in progress, freeze the devq and return.  The
1678 	 * devq will be released when the TM reset is finished.
1679 	 */
1680 	if (targ->flags & MPSSAS_TARGET_INRESET) {
1681 		ccb->ccb_h.status = CAM_BUSY | CAM_DEV_QFRZN;
1682 		mps_dprint(sc, MPS_INFO, "%s: Freezing devq for target ID %d\n",
1683 		    __func__, targ->tid);
1684 		xpt_freeze_devq(ccb->ccb_h.path, 1);
1685 		xpt_done(ccb);
1686 		return;
1687 	}
1688 
1689 	cm = mps_alloc_command(sc);
1690 	if (cm == NULL || (sc->mps_flags & MPS_FLAGS_DIAGRESET)) {
1691 		if (cm != NULL) {
1692 			mps_free_command(sc, cm);
1693 		}
1694 		if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
1695 			xpt_freeze_simq(sassc->sim, 1);
1696 			sassc->flags |= MPSSAS_QUEUE_FROZEN;
1697 		}
1698 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1699 		ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1700 		xpt_done(ccb);
1701 		return;
1702 	}
1703 
1704 	req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
1705 	bzero(req, sizeof(*req));
1706 	req->DevHandle = htole16(targ->handle);
1707 	req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1708 	req->MsgFlags = 0;
1709 	req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr);
1710 	req->SenseBufferLength = MPS_SENSE_LEN;
1711 	req->SGLFlags = 0;
1712 	req->ChainOffset = 0;
1713 	req->SGLOffset0 = 24;	/* 32bit word offset to the SGL */
1714 	req->SGLOffset1= 0;
1715 	req->SGLOffset2= 0;
1716 	req->SGLOffset3= 0;
1717 	req->SkipCount = 0;
1718 	req->DataLength = htole32(csio->dxfer_len);
1719 	req->BidirectionalDataLength = 0;
1720 	req->IoFlags = htole16(csio->cdb_len);
1721 	req->EEDPFlags = 0;
1722 
1723 	/* Note: BiDirectional transfers are not supported */
1724 	switch (csio->ccb_h.flags & CAM_DIR_MASK) {
1725 	case CAM_DIR_IN:
1726 		mpi_control = MPI2_SCSIIO_CONTROL_READ;
1727 		cm->cm_flags |= MPS_CM_FLAGS_DATAIN;
1728 		break;
1729 	case CAM_DIR_OUT:
1730 		mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
1731 		cm->cm_flags |= MPS_CM_FLAGS_DATAOUT;
1732 		break;
1733 	case CAM_DIR_NONE:
1734 	default:
1735 		mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
1736 		break;
1737 	}
1738 
1739 	if (csio->cdb_len == 32)
1740                 mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
1741 	/*
1742 	 * It looks like the hardware doesn't require an explicit tag
1743 	 * number for each transaction.  SAM Task Management not supported
1744 	 * at the moment.
1745 	 */
1746 	switch (csio->tag_action) {
1747 	case MSG_HEAD_OF_Q_TAG:
1748 		mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
1749 		break;
1750 	case MSG_ORDERED_Q_TAG:
1751 		mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
1752 		break;
1753 	case MSG_ACA_TASK:
1754 		mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ;
1755 		break;
1756 	case CAM_TAG_ACTION_NONE:
1757 	case MSG_SIMPLE_Q_TAG:
1758 	default:
1759 		mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
1760 		break;
1761 	}
1762 	mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
1763 	req->Control = htole32(mpi_control);
1764 	if (MPS_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
1765 		mps_free_command(sc, cm);
1766 		mpssas_set_ccbstatus(ccb, CAM_LUN_INVALID);
1767 		xpt_done(ccb);
1768 		return;
1769 	}
1770 
1771 	if (csio->ccb_h.flags & CAM_CDB_POINTER)
1772 		bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
1773 	else
1774 		bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
1775 	req->IoFlags = htole16(csio->cdb_len);
1776 
1777 	/*
1778 	 * Check if EEDP is supported and enabled.  If it is then check if the
1779 	 * SCSI opcode could be using EEDP.  If so, make sure the LUN exists and
1780 	 * is formatted for EEDP support.  If all of this is true, set CDB up
1781 	 * for EEDP transfer.
1782 	 */
1783 	eedp_flags = op_code_prot[req->CDB.CDB32[0]];
1784 	if (sc->eedp_enabled && eedp_flags) {
1785 		SLIST_FOREACH(lun, &targ->luns, lun_link) {
1786 			if (lun->lun_id == csio->ccb_h.target_lun) {
1787 				break;
1788 			}
1789 		}
1790 
1791 		if ((lun != NULL) && (lun->eedp_formatted)) {
1792 			req->EEDPBlockSize = htole16(lun->eedp_block_size);
1793 			eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
1794 			    MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
1795 			    MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
1796 			req->EEDPFlags = htole16(eedp_flags);
1797 
1798 			/*
1799 			 * If CDB less than 32, fill in Primary Ref Tag with
1800 			 * low 4 bytes of LBA.  If CDB is 32, tag stuff is
1801 			 * already there.  Also, set protection bit.  FreeBSD
1802 			 * currently does not support CDBs bigger than 16, but
1803 			 * the code doesn't hurt, and will be here for the
1804 			 * future.
1805 			 */
1806 			if (csio->cdb_len != 32) {
1807 				lba_byte = (csio->cdb_len == 16) ? 6 : 2;
1808 				ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
1809 				    PrimaryReferenceTag;
1810 				for (i = 0; i < 4; i++) {
1811 					*ref_tag_addr =
1812 					    req->CDB.CDB32[lba_byte + i];
1813 					ref_tag_addr++;
1814 				}
1815 				req->CDB.EEDP32.PrimaryReferenceTag =
1816 					htole32(req->CDB.EEDP32.PrimaryReferenceTag);
1817 				req->CDB.EEDP32.PrimaryApplicationTagMask =
1818 				    0xFFFF;
1819 				req->CDB.CDB32[1] = (req->CDB.CDB32[1] & 0x1F) |
1820 				    0x20;
1821 			} else {
1822 				eedp_flags |=
1823 				    MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
1824 				req->EEDPFlags = htole16(eedp_flags);
1825 				req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
1826 				    0x1F) | 0x20;
1827 			}
1828 		}
1829 	}
1830 
1831 	cm->cm_length = csio->dxfer_len;
1832 	if (cm->cm_length != 0) {
1833 		cm->cm_data = ccb;
1834 		cm->cm_flags |= MPS_CM_FLAGS_USE_CCB;
1835 	} else {
1836 		cm->cm_data = NULL;
1837 	}
1838 	cm->cm_sge = &req->SGL;
1839 	cm->cm_sglsize = (32 - 24) * 4;
1840 	cm->cm_desc.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
1841 	cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle);
1842 	cm->cm_complete = mpssas_scsiio_complete;
1843 	cm->cm_complete_data = ccb;
1844 	cm->cm_targ = targ;
1845 	cm->cm_lun = csio->ccb_h.target_lun;
1846 	cm->cm_ccb = ccb;
1847 
1848 	/*
1849 	 * If HBA is a WD and the command is not for a retry, try to build a
1850 	 * direct I/O message. If failed, or the command is for a retry, send
1851 	 * the I/O to the IR volume itself.
1852 	 */
1853 	if (sc->WD_valid_config) {
1854 		if (ccb->ccb_h.sim_priv.entries[0].field == MPS_WD_RETRY) {
1855 			mpssas_direct_drive_io(sassc, cm, ccb);
1856 		} else {
1857 			mpssas_set_ccbstatus(ccb, CAM_REQ_INPROG);
1858 		}
1859 	}
1860 
1861 	callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
1862 	    mpssas_scsiio_timeout, cm, 0);
1863 
1864 	targ->issued++;
1865 	targ->outstanding++;
1866 	TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1867 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
1868 
1869 	mpssas_log_command(cm, MPS_XINFO, "%s cm %p ccb %p outstanding %u\n",
1870 	    __func__, cm, ccb, targ->outstanding);
1871 
1872 	mps_map_command(sc, cm);
1873 	return;
1874 }
1875 
1876 static void
1877 mps_response_code(struct mps_softc *sc, u8 response_code)
1878 {
1879         char *desc;
1880 
1881         switch (response_code) {
1882         case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE:
1883                 desc = "task management request completed";
1884                 break;
1885         case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME:
1886                 desc = "invalid frame";
1887                 break;
1888         case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
1889                 desc = "task management request not supported";
1890                 break;
1891         case MPI2_SCSITASKMGMT_RSP_TM_FAILED:
1892                 desc = "task management request failed";
1893                 break;
1894         case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED:
1895                 desc = "task management request succeeded";
1896                 break;
1897         case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN:
1898                 desc = "invalid lun";
1899                 break;
1900         case 0xA:
1901                 desc = "overlapped tag attempted";
1902                 break;
1903         case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
1904                 desc = "task queued, however not sent to target";
1905                 break;
1906         default:
1907                 desc = "unknown";
1908                 break;
1909         }
1910 		mps_dprint(sc, MPS_XINFO, "response_code(0x%01x): %s\n",
1911                 response_code, desc);
1912 }
1913 /**
1914  * mps_sc_failed_io_info - translated non-succesfull SCSI_IO request
1915  */
1916 static void
1917 mps_sc_failed_io_info(struct mps_softc *sc, struct ccb_scsiio *csio,
1918     Mpi2SCSIIOReply_t *mpi_reply)
1919 {
1920 	u32 response_info;
1921 	u8 *response_bytes;
1922 	u16 ioc_status = le16toh(mpi_reply->IOCStatus) &
1923 	    MPI2_IOCSTATUS_MASK;
1924 	u8 scsi_state = mpi_reply->SCSIState;
1925 	u8 scsi_status = mpi_reply->SCSIStatus;
1926 	char *desc_ioc_state = NULL;
1927 	char *desc_scsi_status = NULL;
1928 	char *desc_scsi_state = sc->tmp_string;
1929 	u32 log_info = le32toh(mpi_reply->IOCLogInfo);
1930 
1931 	if (log_info == 0x31170000)
1932 		return;
1933 
1934 	switch (ioc_status) {
1935 	case MPI2_IOCSTATUS_SUCCESS:
1936 		desc_ioc_state = "success";
1937 		break;
1938 	case MPI2_IOCSTATUS_INVALID_FUNCTION:
1939 		desc_ioc_state = "invalid function";
1940 		break;
1941 	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
1942 		desc_ioc_state = "scsi recovered error";
1943 		break;
1944 	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
1945 		desc_ioc_state = "scsi invalid dev handle";
1946 		break;
1947 	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
1948 		desc_ioc_state = "scsi device not there";
1949 		break;
1950 	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
1951 		desc_ioc_state = "scsi data overrun";
1952 		break;
1953 	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
1954 		desc_ioc_state = "scsi data underrun";
1955 		break;
1956 	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
1957 		desc_ioc_state = "scsi io data error";
1958 		break;
1959 	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
1960 		desc_ioc_state = "scsi protocol error";
1961 		break;
1962 	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
1963 		desc_ioc_state = "scsi task terminated";
1964 		break;
1965 	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
1966 		desc_ioc_state = "scsi residual mismatch";
1967 		break;
1968 	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
1969 		desc_ioc_state = "scsi task mgmt failed";
1970 		break;
1971 	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
1972 		desc_ioc_state = "scsi ioc terminated";
1973 		break;
1974 	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
1975 		desc_ioc_state = "scsi ext terminated";
1976 		break;
1977 	case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
1978 		desc_ioc_state = "eedp guard error";
1979 		break;
1980 	case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
1981 		desc_ioc_state = "eedp ref tag error";
1982 		break;
1983 	case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
1984 		desc_ioc_state = "eedp app tag error";
1985 		break;
1986 	default:
1987 		desc_ioc_state = "unknown";
1988 		break;
1989 	}
1990 
1991 	switch (scsi_status) {
1992 	case MPI2_SCSI_STATUS_GOOD:
1993 		desc_scsi_status = "good";
1994 		break;
1995 	case MPI2_SCSI_STATUS_CHECK_CONDITION:
1996 		desc_scsi_status = "check condition";
1997 		break;
1998 	case MPI2_SCSI_STATUS_CONDITION_MET:
1999 		desc_scsi_status = "condition met";
2000 		break;
2001 	case MPI2_SCSI_STATUS_BUSY:
2002 		desc_scsi_status = "busy";
2003 		break;
2004 	case MPI2_SCSI_STATUS_INTERMEDIATE:
2005 		desc_scsi_status = "intermediate";
2006 		break;
2007 	case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET:
2008 		desc_scsi_status = "intermediate condmet";
2009 		break;
2010 	case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
2011 		desc_scsi_status = "reservation conflict";
2012 		break;
2013 	case MPI2_SCSI_STATUS_COMMAND_TERMINATED:
2014 		desc_scsi_status = "command terminated";
2015 		break;
2016 	case MPI2_SCSI_STATUS_TASK_SET_FULL:
2017 		desc_scsi_status = "task set full";
2018 		break;
2019 	case MPI2_SCSI_STATUS_ACA_ACTIVE:
2020 		desc_scsi_status = "aca active";
2021 		break;
2022 	case MPI2_SCSI_STATUS_TASK_ABORTED:
2023 		desc_scsi_status = "task aborted";
2024 		break;
2025 	default:
2026 		desc_scsi_status = "unknown";
2027 		break;
2028 	}
2029 
2030 	desc_scsi_state[0] = '\0';
2031 	if (!scsi_state)
2032 		desc_scsi_state = " ";
2033 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
2034 		strcat(desc_scsi_state, "response info ");
2035 	if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
2036 		strcat(desc_scsi_state, "state terminated ");
2037 	if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS)
2038 		strcat(desc_scsi_state, "no status ");
2039 	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED)
2040 		strcat(desc_scsi_state, "autosense failed ");
2041 	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID)
2042 		strcat(desc_scsi_state, "autosense valid ");
2043 
2044 	mps_dprint(sc, MPS_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n",
2045 	    le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status);
2046 	/* We can add more detail about underflow data here
2047 	 * TO-DO
2048 	 * */
2049 	mps_dprint(sc, MPS_XINFO, "\tscsi_status(%s)(0x%02x), "
2050 	    "scsi_state(%s)(0x%02x)\n", desc_scsi_status, scsi_status,
2051 	    desc_scsi_state, scsi_state);
2052 
2053 	if (sc->mps_debug & MPS_XINFO &&
2054 		scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2055 		mps_dprint(sc, MPS_XINFO, "-> Sense Buffer Data : Start :\n");
2056 		scsi_sense_print(csio);
2057 		mps_dprint(sc, MPS_XINFO, "-> Sense Buffer Data : End :\n");
2058 	}
2059 
2060 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
2061 		response_info = le32toh(mpi_reply->ResponseInfo);
2062 		response_bytes = (u8 *)&response_info;
2063 		mps_response_code(sc,response_bytes[0]);
2064 	}
2065 }
2066 
2067 static void
2068 mpssas_scsiio_complete(struct mps_softc *sc, struct mps_command *cm)
2069 {
2070 	MPI2_SCSI_IO_REPLY *rep;
2071 	union ccb *ccb;
2072 	struct ccb_scsiio *csio;
2073 	struct mpssas_softc *sassc;
2074 	struct scsi_vpd_supported_page_list *vpd_list = NULL;
2075 	u8 *TLR_bits, TLR_on;
2076 	int dir = 0, i;
2077 	u16 alloc_len;
2078 	struct mpssas_target *target;
2079 	target_id_t target_id;
2080 
2081 	MPS_FUNCTRACE(sc);
2082 	mps_dprint(sc, MPS_TRACE,
2083 	    "cm %p SMID %u ccb %p reply %p outstanding %u\n", cm,
2084 	    cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
2085 	    cm->cm_targ->outstanding);
2086 
2087 	callout_stop(&cm->cm_callout);
2088 	mtx_assert(&sc->mps_mtx, MA_OWNED);
2089 
2090 	sassc = sc->sassc;
2091 	ccb = cm->cm_complete_data;
2092 	csio = &ccb->csio;
2093 	target_id = csio->ccb_h.target_id;
2094 	rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
2095 	/*
2096 	 * XXX KDM if the chain allocation fails, does it matter if we do
2097 	 * the sync and unload here?  It is simpler to do it in every case,
2098 	 * assuming it doesn't cause problems.
2099 	 */
2100 	if (cm->cm_data != NULL) {
2101 		if (cm->cm_flags & MPS_CM_FLAGS_DATAIN)
2102 			dir = BUS_DMASYNC_POSTREAD;
2103 		else if (cm->cm_flags & MPS_CM_FLAGS_DATAOUT)
2104 			dir = BUS_DMASYNC_POSTWRITE;
2105 		bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
2106 		bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2107 	}
2108 
2109 	cm->cm_targ->completed++;
2110 	cm->cm_targ->outstanding--;
2111 	TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
2112 	ccb->ccb_h.status &= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED);
2113 
2114 	if (cm->cm_state == MPS_CM_STATE_TIMEDOUT) {
2115 		TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
2116 		if (cm->cm_reply != NULL)
2117 			mpssas_log_command(cm, MPS_RECOVERY,
2118 			    "completed timedout cm %p ccb %p during recovery "
2119 			    "ioc %x scsi %x state %x xfer %u\n",
2120 			    cm, cm->cm_ccb,
2121 			    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2122 			    le32toh(rep->TransferCount));
2123 		else
2124 			mpssas_log_command(cm, MPS_RECOVERY,
2125 			    "completed timedout cm %p ccb %p during recovery\n",
2126 			    cm, cm->cm_ccb);
2127 	} else if (cm->cm_targ->tm != NULL) {
2128 		if (cm->cm_reply != NULL)
2129 			mpssas_log_command(cm, MPS_RECOVERY,
2130 			    "completed cm %p ccb %p during recovery "
2131 			    "ioc %x scsi %x state %x xfer %u\n",
2132 			    cm, cm->cm_ccb,
2133 			    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2134 			    le32toh(rep->TransferCount));
2135 		else
2136 			mpssas_log_command(cm, MPS_RECOVERY,
2137 			    "completed cm %p ccb %p during recovery\n",
2138 			    cm, cm->cm_ccb);
2139 	} else if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
2140 		mpssas_log_command(cm, MPS_RECOVERY,
2141 		    "reset completed cm %p ccb %p\n",
2142 		    cm, cm->cm_ccb);
2143 	}
2144 
2145 	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2146 		/*
2147 		 * We ran into an error after we tried to map the command,
2148 		 * so we're getting a callback without queueing the command
2149 		 * to the hardware.  So we set the status here, and it will
2150 		 * be retained below.  We'll go through the "fast path",
2151 		 * because there can be no reply when we haven't actually
2152 		 * gone out to the hardware.
2153 		 */
2154 		mpssas_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
2155 
2156 		/*
2157 		 * Currently the only error included in the mask is
2158 		 * MPS_CM_FLAGS_CHAIN_FAILED, which means we're out of
2159 		 * chain frames.  We need to freeze the queue until we get
2160 		 * a command that completed without this error, which will
2161 		 * hopefully have some chain frames attached that we can
2162 		 * use.  If we wanted to get smarter about it, we would
2163 		 * only unfreeze the queue in this condition when we're
2164 		 * sure that we're getting some chain frames back.  That's
2165 		 * probably unnecessary.
2166 		 */
2167 		if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
2168 			xpt_freeze_simq(sassc->sim, 1);
2169 			sassc->flags |= MPSSAS_QUEUE_FROZEN;
2170 			mps_dprint(sc, MPS_XINFO, "Error sending command, "
2171 				   "freezing SIM queue\n");
2172 		}
2173 	}
2174 
2175 	/*
2176 	 * If this is a Start Stop Unit command and it was issued by the driver
2177 	 * during shutdown, decrement the refcount to account for all of the
2178 	 * commands that were sent.  All SSU commands should be completed before
2179 	 * shutdown completes, meaning SSU_refcount will be 0 after SSU_started
2180 	 * is TRUE.
2181 	 */
2182 	if (sc->SSU_started && (csio->cdb_io.cdb_bytes[0] == START_STOP_UNIT)) {
2183 		mps_dprint(sc, MPS_INFO, "Decrementing SSU count.\n");
2184 		sc->SSU_refcount--;
2185 	}
2186 
2187 	/* Take the fast path to completion */
2188 	if (cm->cm_reply == NULL) {
2189 		if (mpssas_get_ccbstatus(ccb) == CAM_REQ_INPROG) {
2190 			if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0)
2191 				mpssas_set_ccbstatus(ccb, CAM_SCSI_BUS_RESET);
2192 			else {
2193 				mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2194 				ccb->csio.scsi_status = SCSI_STATUS_OK;
2195 			}
2196 			if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2197 				ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2198 				sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2199 				mps_dprint(sc, MPS_XINFO,
2200 				    "Unfreezing SIM queue\n");
2201 			}
2202 		}
2203 
2204 		/*
2205 		 * There are two scenarios where the status won't be
2206 		 * CAM_REQ_CMP.  The first is if MPS_CM_FLAGS_ERROR_MASK is
2207 		 * set, the second is in the MPS_FLAGS_DIAGRESET above.
2208 		 */
2209 		if (mpssas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2210 			/*
2211 			 * Freeze the dev queue so that commands are
2212 			 * executed in the correct order after error
2213 			 * recovery.
2214 			 */
2215 			ccb->ccb_h.status |= CAM_DEV_QFRZN;
2216 			xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2217 		}
2218 		mps_free_command(sc, cm);
2219 		xpt_done(ccb);
2220 		return;
2221 	}
2222 
2223 	mpssas_log_command(cm, MPS_XINFO,
2224 	    "ioc %x scsi %x state %x xfer %u\n",
2225 	    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2226 	    le32toh(rep->TransferCount));
2227 
2228 	/*
2229 	 * If this is a Direct Drive I/O, reissue the I/O to the original IR
2230 	 * Volume if an error occurred (normal I/O retry).  Use the original
2231 	 * CCB, but set a flag that this will be a retry so that it's sent to
2232 	 * the original volume.  Free the command but reuse the CCB.
2233 	 */
2234 	if (cm->cm_flags & MPS_CM_FLAGS_DD_IO) {
2235 		mps_free_command(sc, cm);
2236 		ccb->ccb_h.sim_priv.entries[0].field = MPS_WD_RETRY;
2237 		mpssas_action_scsiio(sassc, ccb);
2238 		return;
2239 	} else
2240 		ccb->ccb_h.sim_priv.entries[0].field = 0;
2241 
2242 	switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) {
2243 	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2244 		csio->resid = cm->cm_length - le32toh(rep->TransferCount);
2245 		/* FALLTHROUGH */
2246 	case MPI2_IOCSTATUS_SUCCESS:
2247 	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2248 
2249 		if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
2250 		    MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
2251 			mpssas_log_command(cm, MPS_XINFO, "recovered error\n");
2252 
2253 		/* Completion failed at the transport level. */
2254 		if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
2255 		    MPI2_SCSI_STATE_TERMINATED)) {
2256 			mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2257 			break;
2258 		}
2259 
2260 		/* In a modern packetized environment, an autosense failure
2261 		 * implies that there's not much else that can be done to
2262 		 * recover the command.
2263 		 */
2264 		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
2265 			mpssas_set_ccbstatus(ccb, CAM_AUTOSENSE_FAIL);
2266 			break;
2267 		}
2268 
2269 		/*
2270 		 * CAM doesn't care about SAS Response Info data, but if this is
2271 		 * the state check if TLR should be done.  If not, clear the
2272 		 * TLR_bits for the target.
2273 		 */
2274 		if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
2275 		    ((le32toh(rep->ResponseInfo) &
2276 		    MPI2_SCSI_RI_MASK_REASONCODE) ==
2277 		    MPS_SCSI_RI_INVALID_FRAME)) {
2278 			sc->mapping_table[target_id].TLR_bits =
2279 			    (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2280 		}
2281 
2282 		/*
2283 		 * Intentionally override the normal SCSI status reporting
2284 		 * for these two cases.  These are likely to happen in a
2285 		 * multi-initiator environment, and we want to make sure that
2286 		 * CAM retries these commands rather than fail them.
2287 		 */
2288 		if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
2289 		    (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
2290 			mpssas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2291 			break;
2292 		}
2293 
2294 		/* Handle normal status and sense */
2295 		csio->scsi_status = rep->SCSIStatus;
2296 		if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
2297 			mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2298 		else
2299 			mpssas_set_ccbstatus(ccb, CAM_SCSI_STATUS_ERROR);
2300 
2301 		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2302 			int sense_len, returned_sense_len;
2303 
2304 			returned_sense_len = min(le32toh(rep->SenseCount),
2305 			    sizeof(struct scsi_sense_data));
2306 			if (returned_sense_len < ccb->csio.sense_len)
2307 				ccb->csio.sense_resid = ccb->csio.sense_len -
2308 					returned_sense_len;
2309 			else
2310 				ccb->csio.sense_resid = 0;
2311 
2312 			sense_len = min(returned_sense_len,
2313 			    ccb->csio.sense_len - ccb->csio.sense_resid);
2314 			bzero(&ccb->csio.sense_data,
2315 			      sizeof(ccb->csio.sense_data));
2316 			bcopy(cm->cm_sense, &ccb->csio.sense_data, sense_len);
2317 			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2318 		}
2319 
2320 		/*
2321 		 * Check if this is an INQUIRY command.  If it's a VPD inquiry,
2322 		 * and it's page code 0 (Supported Page List), and there is
2323 		 * inquiry data, and this is for a sequential access device, and
2324 		 * the device is an SSP target, and TLR is supported by the
2325 		 * controller, turn the TLR_bits value ON if page 0x90 is
2326 		 * supported.
2327 		 */
2328 		if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
2329 		    (csio->cdb_io.cdb_bytes[1] & SI_EVPD) &&
2330 		    (csio->cdb_io.cdb_bytes[2] == SVPD_SUPPORTED_PAGE_LIST) &&
2331 		    ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) &&
2332 		    (csio->data_ptr != NULL) &&
2333 		    ((csio->data_ptr[0] & 0x1f) == T_SEQUENTIAL) &&
2334 		    (sc->control_TLR) &&
2335 		    (sc->mapping_table[target_id].device_info &
2336 		    MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
2337 			vpd_list = (struct scsi_vpd_supported_page_list *)
2338 			    csio->data_ptr;
2339 			TLR_bits = &sc->mapping_table[target_id].TLR_bits;
2340 			*TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2341 			TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
2342 			alloc_len = ((u16)csio->cdb_io.cdb_bytes[3] << 8) +
2343 			    csio->cdb_io.cdb_bytes[4];
2344 			alloc_len -= csio->resid;
2345 			for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
2346 				if (vpd_list->list[i] == 0x90) {
2347 					*TLR_bits = TLR_on;
2348 					break;
2349 				}
2350 			}
2351 		}
2352 
2353 		/*
2354 		 * If this is a SATA direct-access end device, mark it so that
2355 		 * a SCSI StartStopUnit command will be sent to it when the
2356 		 * driver is being shutdown.
2357 		 */
2358 		if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
2359 		    ((csio->data_ptr[0] & 0x1f) == T_DIRECT) &&
2360 		    (sc->mapping_table[target_id].device_info &
2361 		    MPI2_SAS_DEVICE_INFO_SATA_DEVICE) &&
2362 		    ((sc->mapping_table[target_id].device_info &
2363 		    MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) ==
2364 		    MPI2_SAS_DEVICE_INFO_END_DEVICE)) {
2365 			target = &sassc->targets[target_id];
2366 			target->supports_SSU = TRUE;
2367 			mps_dprint(sc, MPS_XINFO, "Target %d supports SSU\n",
2368 			    target_id);
2369 		}
2370 		break;
2371 	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2372 	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2373 		/*
2374 		 * If devinfo is 0 this will be a volume.  In that case don't
2375 		 * tell CAM that the volume is not there.  We want volumes to
2376 		 * be enumerated until they are deleted/removed, not just
2377 		 * failed.
2378 		 */
2379 		if (cm->cm_targ->devinfo == 0)
2380 			mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2381 		else
2382 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2383 		break;
2384 	case MPI2_IOCSTATUS_INVALID_SGL:
2385 		mps_print_scsiio_cmd(sc, cm);
2386 		mpssas_set_ccbstatus(ccb, CAM_UNREC_HBA_ERROR);
2387 		break;
2388 	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2389 		/*
2390 		 * This is one of the responses that comes back when an I/O
2391 		 * has been aborted.  If it is because of a timeout that we
2392 		 * initiated, just set the status to CAM_CMD_TIMEOUT.
2393 		 * Otherwise set it to CAM_REQ_ABORTED.  The effect on the
2394 		 * command is the same (it gets retried, subject to the
2395 		 * retry counter), the only difference is what gets printed
2396 		 * on the console.
2397 		 */
2398 		if (cm->cm_state == MPS_CM_STATE_TIMEDOUT)
2399 			mpssas_set_ccbstatus(ccb, CAM_CMD_TIMEOUT);
2400 		else
2401 			mpssas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2402 		break;
2403 	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2404 		/* resid is ignored for this condition */
2405 		csio->resid = 0;
2406 		mpssas_set_ccbstatus(ccb, CAM_DATA_RUN_ERR);
2407 		break;
2408 	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2409 	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2410 		/*
2411 		 * These can sometimes be transient transport-related
2412 		 * errors, and sometimes persistent drive-related errors.
2413 		 * We used to retry these without decrementing the retry
2414 		 * count by returning CAM_REQUEUE_REQ.  Unfortunately, if
2415 		 * we hit a persistent drive problem that returns one of
2416 		 * these error codes, we would retry indefinitely.  So,
2417 		 * return CAM_REQ_CMP_ERROR so that we decrement the retry
2418 		 * count and avoid infinite retries.  We're taking the
2419 		 * potential risk of flagging false failures in the event
2420 		 * of a topology-related error (e.g. a SAS expander problem
2421 		 * causes a command addressed to a drive to fail), but
2422 		 * avoiding getting into an infinite retry loop.
2423 		 */
2424 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2425 		mpssas_log_command(cm, MPS_INFO,
2426 		    "terminated ioc %x scsi %x state %x xfer %u\n",
2427 		    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2428 		    le32toh(rep->TransferCount));
2429 		break;
2430 	case MPI2_IOCSTATUS_INVALID_FUNCTION:
2431 	case MPI2_IOCSTATUS_INTERNAL_ERROR:
2432 	case MPI2_IOCSTATUS_INVALID_VPID:
2433 	case MPI2_IOCSTATUS_INVALID_FIELD:
2434 	case MPI2_IOCSTATUS_INVALID_STATE:
2435 	case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2436 	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2437 	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2438 	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2439 	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2440 	default:
2441 		mpssas_log_command(cm, MPS_XINFO,
2442 		    "completed ioc %x scsi %x state %x xfer %u\n",
2443 		    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2444 		    le32toh(rep->TransferCount));
2445 		csio->resid = cm->cm_length;
2446 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2447 		break;
2448 	}
2449 
2450 	mps_sc_failed_io_info(sc,csio,rep);
2451 
2452 	if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2453 		ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2454 		sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2455 		mps_dprint(sc, MPS_XINFO, "Command completed, "
2456 		    "unfreezing SIM queue\n");
2457 	}
2458 
2459 	if (mpssas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2460 		ccb->ccb_h.status |= CAM_DEV_QFRZN;
2461 		xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2462 	}
2463 
2464 	mps_free_command(sc, cm);
2465 	xpt_done(ccb);
2466 }
2467 
2468 /* All Request reached here are Endian safe */
2469 static void
2470 mpssas_direct_drive_io(struct mpssas_softc *sassc, struct mps_command *cm,
2471     union ccb *ccb) {
2472 	pMpi2SCSIIORequest_t	pIO_req;
2473 	struct mps_softc	*sc = sassc->sc;
2474 	uint64_t		virtLBA;
2475 	uint32_t		physLBA, stripe_offset, stripe_unit;
2476 	uint32_t		io_size, column;
2477 	uint8_t			*ptrLBA, lba_idx, physLBA_byte, *CDB;
2478 
2479 	/*
2480 	 * If this is a valid SCSI command (Read6, Read10, Read16, Write6,
2481 	 * Write10, or Write16), build a direct I/O message.  Otherwise, the I/O
2482 	 * will be sent to the IR volume itself.  Since Read6 and Write6 are a
2483 	 * bit different than the 10/16 CDBs, handle them separately.
2484 	 */
2485 	pIO_req = (pMpi2SCSIIORequest_t)cm->cm_req;
2486 	CDB = pIO_req->CDB.CDB32;
2487 
2488 	/*
2489 	 * Handle 6 byte CDBs.
2490 	 */
2491 	if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_6) ||
2492 	    (CDB[0] == WRITE_6))) {
2493 		/*
2494 		 * Get the transfer size in blocks.
2495 		 */
2496 		io_size = (cm->cm_length >> sc->DD_block_exponent);
2497 
2498 		/*
2499 		 * Get virtual LBA given in the CDB.
2500 		 */
2501 		virtLBA = ((uint64_t)(CDB[1] & 0x1F) << 16) |
2502 		    ((uint64_t)CDB[2] << 8) | (uint64_t)CDB[3];
2503 
2504 		/*
2505 		 * Check that LBA range for I/O does not exceed volume's
2506 		 * MaxLBA.
2507 		 */
2508 		if ((virtLBA + (uint64_t)io_size - 1) <=
2509 		    sc->DD_max_lba) {
2510 			/*
2511 			 * Check if the I/O crosses a stripe boundary.  If not,
2512 			 * translate the virtual LBA to a physical LBA and set
2513 			 * the DevHandle for the PhysDisk to be used.  If it
2514 			 * does cross a boundary, do normal I/O.  To get the
2515 			 * right DevHandle to use, get the map number for the
2516 			 * column, then use that map number to look up the
2517 			 * DevHandle of the PhysDisk.
2518 			 */
2519 			stripe_offset = (uint32_t)virtLBA &
2520 			    (sc->DD_stripe_size - 1);
2521 			if ((stripe_offset + io_size) <= sc->DD_stripe_size) {
2522 				physLBA = (uint32_t)virtLBA >>
2523 				    sc->DD_stripe_exponent;
2524 				stripe_unit = physLBA / sc->DD_num_phys_disks;
2525 				column = physLBA % sc->DD_num_phys_disks;
2526 				pIO_req->DevHandle =
2527 				    htole16(sc->DD_column_map[column].dev_handle);
2528 				/* ???? Is this endian safe*/
2529 				cm->cm_desc.SCSIIO.DevHandle =
2530 				    pIO_req->DevHandle;
2531 
2532 				physLBA = (stripe_unit <<
2533 				    sc->DD_stripe_exponent) + stripe_offset;
2534 				ptrLBA = &pIO_req->CDB.CDB32[1];
2535 				physLBA_byte = (uint8_t)(physLBA >> 16);
2536 				*ptrLBA = physLBA_byte;
2537 				ptrLBA = &pIO_req->CDB.CDB32[2];
2538 				physLBA_byte = (uint8_t)(physLBA >> 8);
2539 				*ptrLBA = physLBA_byte;
2540 				ptrLBA = &pIO_req->CDB.CDB32[3];
2541 				physLBA_byte = (uint8_t)physLBA;
2542 				*ptrLBA = physLBA_byte;
2543 
2544 				/*
2545 				 * Set flag that Direct Drive I/O is
2546 				 * being done.
2547 				 */
2548 				cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2549 			}
2550 		}
2551 		return;
2552 	}
2553 
2554 	/*
2555 	 * Handle 10, 12 or 16 byte CDBs.
2556 	 */
2557 	if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_10) ||
2558 	    (CDB[0] == WRITE_10) || (CDB[0] == READ_16) ||
2559 	    (CDB[0] == WRITE_16) || (CDB[0] == READ_12) ||
2560 	    (CDB[0] == WRITE_12))) {
2561 		/*
2562 		 * For 16-byte CDB's, verify that the upper 4 bytes of the CDB
2563 		 * are 0.  If not, this is accessing beyond 2TB so handle it in
2564 		 * the else section.  10-byte and 12-byte CDB's are OK.
2565 		 * FreeBSD sends very rare 12 byte READ/WRITE, but driver is
2566 		 * ready to accept 12byte CDB for Direct IOs.
2567 		 */
2568 		if ((CDB[0] == READ_10 || CDB[0] == WRITE_10) ||
2569 		    (CDB[0] == READ_12 || CDB[0] == WRITE_12) ||
2570 		    !(CDB[2] | CDB[3] | CDB[4] | CDB[5])) {
2571 			/*
2572 			 * Get the transfer size in blocks.
2573 			 */
2574 			io_size = (cm->cm_length >> sc->DD_block_exponent);
2575 
2576 			/*
2577 			 * Get virtual LBA.  Point to correct lower 4 bytes of
2578 			 * LBA in the CDB depending on command.
2579 			 */
2580 			lba_idx = ((CDB[0] == READ_12) ||
2581 				(CDB[0] == WRITE_12) ||
2582 				(CDB[0] == READ_10) ||
2583 				(CDB[0] == WRITE_10))? 2 : 6;
2584 			virtLBA = ((uint64_t)CDB[lba_idx] << 24) |
2585 			    ((uint64_t)CDB[lba_idx + 1] << 16) |
2586 			    ((uint64_t)CDB[lba_idx + 2] << 8) |
2587 			    (uint64_t)CDB[lba_idx + 3];
2588 
2589 			/*
2590 			 * Check that LBA range for I/O does not exceed volume's
2591 			 * MaxLBA.
2592 			 */
2593 			if ((virtLBA + (uint64_t)io_size - 1) <=
2594 			    sc->DD_max_lba) {
2595 				/*
2596 				 * Check if the I/O crosses a stripe boundary.
2597 				 * If not, translate the virtual LBA to a
2598 				 * physical LBA and set the DevHandle for the
2599 				 * PhysDisk to be used.  If it does cross a
2600 				 * boundary, do normal I/O.  To get the right
2601 				 * DevHandle to use, get the map number for the
2602 				 * column, then use that map number to look up
2603 				 * the DevHandle of the PhysDisk.
2604 				 */
2605 				stripe_offset = (uint32_t)virtLBA &
2606 				    (sc->DD_stripe_size - 1);
2607 				if ((stripe_offset + io_size) <=
2608 				    sc->DD_stripe_size) {
2609 					physLBA = (uint32_t)virtLBA >>
2610 					    sc->DD_stripe_exponent;
2611 					stripe_unit = physLBA /
2612 					    sc->DD_num_phys_disks;
2613 					column = physLBA %
2614 					    sc->DD_num_phys_disks;
2615 					pIO_req->DevHandle =
2616 					    htole16(sc->DD_column_map[column].
2617 					    dev_handle);
2618 					cm->cm_desc.SCSIIO.DevHandle =
2619 					    pIO_req->DevHandle;
2620 
2621 					physLBA = (stripe_unit <<
2622 					    sc->DD_stripe_exponent) +
2623 					    stripe_offset;
2624 					ptrLBA =
2625 					    &pIO_req->CDB.CDB32[lba_idx];
2626 					physLBA_byte = (uint8_t)(physLBA >> 24);
2627 					*ptrLBA = physLBA_byte;
2628 					ptrLBA =
2629 					    &pIO_req->CDB.CDB32[lba_idx + 1];
2630 					physLBA_byte = (uint8_t)(physLBA >> 16);
2631 					*ptrLBA = physLBA_byte;
2632 					ptrLBA =
2633 					    &pIO_req->CDB.CDB32[lba_idx + 2];
2634 					physLBA_byte = (uint8_t)(physLBA >> 8);
2635 					*ptrLBA = physLBA_byte;
2636 					ptrLBA =
2637 					    &pIO_req->CDB.CDB32[lba_idx + 3];
2638 					physLBA_byte = (uint8_t)physLBA;
2639 					*ptrLBA = physLBA_byte;
2640 
2641 					/*
2642 					 * Set flag that Direct Drive I/O is
2643 					 * being done.
2644 					 */
2645 					cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2646 				}
2647 			}
2648 		} else {
2649 			/*
2650 			 * 16-byte CDB and the upper 4 bytes of the CDB are not
2651 			 * 0.  Get the transfer size in blocks.
2652 			 */
2653 			io_size = (cm->cm_length >> sc->DD_block_exponent);
2654 
2655 			/*
2656 			 * Get virtual LBA.
2657 			 */
2658 			virtLBA = ((uint64_t)CDB[2] << 54) |
2659 			    ((uint64_t)CDB[3] << 48) |
2660 			    ((uint64_t)CDB[4] << 40) |
2661 			    ((uint64_t)CDB[5] << 32) |
2662 			    ((uint64_t)CDB[6] << 24) |
2663 			    ((uint64_t)CDB[7] << 16) |
2664 			    ((uint64_t)CDB[8] << 8) |
2665 			    (uint64_t)CDB[9];
2666 
2667 			/*
2668 			 * Check that LBA range for I/O does not exceed volume's
2669 			 * MaxLBA.
2670 			 */
2671 			if ((virtLBA + (uint64_t)io_size - 1) <=
2672 			    sc->DD_max_lba) {
2673 				/*
2674 				 * Check if the I/O crosses a stripe boundary.
2675 				 * If not, translate the virtual LBA to a
2676 				 * physical LBA and set the DevHandle for the
2677 				 * PhysDisk to be used.  If it does cross a
2678 				 * boundary, do normal I/O.  To get the right
2679 				 * DevHandle to use, get the map number for the
2680 				 * column, then use that map number to look up
2681 				 * the DevHandle of the PhysDisk.
2682 				 */
2683 				stripe_offset = (uint32_t)virtLBA &
2684 				    (sc->DD_stripe_size - 1);
2685 				if ((stripe_offset + io_size) <=
2686 				    sc->DD_stripe_size) {
2687 					physLBA = (uint32_t)(virtLBA >>
2688 					    sc->DD_stripe_exponent);
2689 					stripe_unit = physLBA /
2690 					    sc->DD_num_phys_disks;
2691 					column = physLBA %
2692 					    sc->DD_num_phys_disks;
2693 					pIO_req->DevHandle =
2694 					    htole16(sc->DD_column_map[column].
2695 					    dev_handle);
2696 					cm->cm_desc.SCSIIO.DevHandle =
2697 					    pIO_req->DevHandle;
2698 
2699 					physLBA = (stripe_unit <<
2700 					    sc->DD_stripe_exponent) +
2701 					    stripe_offset;
2702 
2703 					/*
2704 					 * Set upper 4 bytes of LBA to 0.  We
2705 					 * assume that the phys disks are less
2706 					 * than 2 TB's in size.  Then, set the
2707 					 * lower 4 bytes.
2708 					 */
2709 					pIO_req->CDB.CDB32[2] = 0;
2710 					pIO_req->CDB.CDB32[3] = 0;
2711 					pIO_req->CDB.CDB32[4] = 0;
2712 					pIO_req->CDB.CDB32[5] = 0;
2713 					ptrLBA = &pIO_req->CDB.CDB32[6];
2714 					physLBA_byte = (uint8_t)(physLBA >> 24);
2715 					*ptrLBA = physLBA_byte;
2716 					ptrLBA = &pIO_req->CDB.CDB32[7];
2717 					physLBA_byte = (uint8_t)(physLBA >> 16);
2718 					*ptrLBA = physLBA_byte;
2719 					ptrLBA = &pIO_req->CDB.CDB32[8];
2720 					physLBA_byte = (uint8_t)(physLBA >> 8);
2721 					*ptrLBA = physLBA_byte;
2722 					ptrLBA = &pIO_req->CDB.CDB32[9];
2723 					physLBA_byte = (uint8_t)physLBA;
2724 					*ptrLBA = physLBA_byte;
2725 
2726 					/*
2727 					 * Set flag that Direct Drive I/O is
2728 					 * being done.
2729 					 */
2730 					cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2731 				}
2732 			}
2733 		}
2734 	}
2735 }
2736 
2737 #if __FreeBSD_version >= 900026
2738 static void
2739 mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm)
2740 {
2741 	MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2742 	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2743 	uint64_t sasaddr;
2744 	union ccb *ccb;
2745 
2746 	ccb = cm->cm_complete_data;
2747 
2748 	/*
2749 	 * Currently there should be no way we can hit this case.  It only
2750 	 * happens when we have a failure to allocate chain frames, and SMP
2751 	 * commands require two S/G elements only.  That should be handled
2752 	 * in the standard request size.
2753 	 */
2754 	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2755 		mps_dprint(sc, MPS_ERROR,"%s: cm_flags = %#x on SMP request!\n",
2756 			   __func__, cm->cm_flags);
2757 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2758 		goto bailout;
2759         }
2760 
2761 	rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
2762 	if (rpl == NULL) {
2763 		mps_dprint(sc, MPS_ERROR, "%s: NULL cm_reply!\n", __func__);
2764 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2765 		goto bailout;
2766 	}
2767 
2768 	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2769 	sasaddr = le32toh(req->SASAddress.Low);
2770 	sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
2771 
2772 	if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
2773 	    MPI2_IOCSTATUS_SUCCESS ||
2774 	    rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
2775 		mps_dprint(sc, MPS_XINFO, "%s: IOCStatus %04x SASStatus %02x\n",
2776 		    __func__, le16toh(rpl->IOCStatus), rpl->SASStatus);
2777 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2778 		goto bailout;
2779 	}
2780 
2781 	mps_dprint(sc, MPS_XINFO, "%s: SMP request to SAS address "
2782 		   "%#jx completed successfully\n", __func__,
2783 		   (uintmax_t)sasaddr);
2784 
2785 	if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
2786 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2787 	else
2788 		mpssas_set_ccbstatus(ccb, CAM_SMP_STATUS_ERROR);
2789 
2790 bailout:
2791 	/*
2792 	 * We sync in both directions because we had DMAs in the S/G list
2793 	 * in both directions.
2794 	 */
2795 	bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
2796 			BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2797 	bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2798 	mps_free_command(sc, cm);
2799 	xpt_done(ccb);
2800 }
2801 
2802 static void
2803 mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb, uint64_t sasaddr)
2804 {
2805 	struct mps_command *cm;
2806 	uint8_t *request, *response;
2807 	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2808 	struct mps_softc *sc;
2809 	int error;
2810 
2811 	sc = sassc->sc;
2812 	error = 0;
2813 
2814 	/*
2815 	 * XXX We don't yet support physical addresses here.
2816 	 */
2817 	switch ((ccb->ccb_h.flags & CAM_DATA_MASK)) {
2818 	case CAM_DATA_PADDR:
2819 	case CAM_DATA_SG_PADDR:
2820 		mps_dprint(sc, MPS_ERROR,
2821 			   "%s: physical addresses not supported\n", __func__);
2822 		mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2823 		xpt_done(ccb);
2824 		return;
2825 	case CAM_DATA_SG:
2826 		/*
2827 		 * The chip does not support more than one buffer for the
2828 		 * request or response.
2829 		 */
2830 	 	if ((ccb->smpio.smp_request_sglist_cnt > 1)
2831 		  || (ccb->smpio.smp_response_sglist_cnt > 1)) {
2832 			mps_dprint(sc, MPS_ERROR,
2833 				   "%s: multiple request or response "
2834 				   "buffer segments not supported for SMP\n",
2835 				   __func__);
2836 			mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2837 			xpt_done(ccb);
2838 			return;
2839 		}
2840 
2841 		/*
2842 		 * The CAM_SCATTER_VALID flag was originally implemented
2843 		 * for the XPT_SCSI_IO CCB, which only has one data pointer.
2844 		 * We have two.  So, just take that flag to mean that we
2845 		 * might have S/G lists, and look at the S/G segment count
2846 		 * to figure out whether that is the case for each individual
2847 		 * buffer.
2848 		 */
2849 		if (ccb->smpio.smp_request_sglist_cnt != 0) {
2850 			bus_dma_segment_t *req_sg;
2851 
2852 			req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
2853 			request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
2854 		} else
2855 			request = ccb->smpio.smp_request;
2856 
2857 		if (ccb->smpio.smp_response_sglist_cnt != 0) {
2858 			bus_dma_segment_t *rsp_sg;
2859 
2860 			rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
2861 			response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
2862 		} else
2863 			response = ccb->smpio.smp_response;
2864 		break;
2865 	case CAM_DATA_VADDR:
2866 		request = ccb->smpio.smp_request;
2867 		response = ccb->smpio.smp_response;
2868 		break;
2869 	default:
2870 		mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2871 		xpt_done(ccb);
2872 		return;
2873 	}
2874 
2875 	cm = mps_alloc_command(sc);
2876 	if (cm == NULL) {
2877 		mps_dprint(sc, MPS_ERROR,
2878 		    "%s: cannot allocate command\n", __func__);
2879 		mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
2880 		xpt_done(ccb);
2881 		return;
2882 	}
2883 
2884 	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2885 	bzero(req, sizeof(*req));
2886 	req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
2887 
2888 	/* Allow the chip to use any route to this SAS address. */
2889 	req->PhysicalPort = 0xff;
2890 
2891 	req->RequestDataLength = htole16(ccb->smpio.smp_request_len);
2892 	req->SGLFlags =
2893 	    MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
2894 
2895 	mps_dprint(sc, MPS_XINFO, "%s: sending SMP request to SAS "
2896 	    "address %#jx\n", __func__, (uintmax_t)sasaddr);
2897 
2898 	mpi_init_sge(cm, req, &req->SGL);
2899 
2900 	/*
2901 	 * Set up a uio to pass into mps_map_command().  This allows us to
2902 	 * do one map command, and one busdma call in there.
2903 	 */
2904 	cm->cm_uio.uio_iov = cm->cm_iovec;
2905 	cm->cm_uio.uio_iovcnt = 2;
2906 	cm->cm_uio.uio_segflg = UIO_SYSSPACE;
2907 
2908 	/*
2909 	 * The read/write flag isn't used by busdma, but set it just in
2910 	 * case.  This isn't exactly accurate, either, since we're going in
2911 	 * both directions.
2912 	 */
2913 	cm->cm_uio.uio_rw = UIO_WRITE;
2914 
2915 	cm->cm_iovec[0].iov_base = request;
2916 	cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength);
2917 	cm->cm_iovec[1].iov_base = response;
2918 	cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
2919 
2920 	cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
2921 			       cm->cm_iovec[1].iov_len;
2922 
2923 	/*
2924 	 * Trigger a warning message in mps_data_cb() for the user if we
2925 	 * wind up exceeding two S/G segments.  The chip expects one
2926 	 * segment for the request and another for the response.
2927 	 */
2928 	cm->cm_max_segs = 2;
2929 
2930 	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
2931 	cm->cm_complete = mpssas_smpio_complete;
2932 	cm->cm_complete_data = ccb;
2933 
2934 	/*
2935 	 * Tell the mapping code that we're using a uio, and that this is
2936 	 * an SMP passthrough request.  There is a little special-case
2937 	 * logic there (in mps_data_cb()) to handle the bidirectional
2938 	 * transfer.
2939 	 */
2940 	cm->cm_flags |= MPS_CM_FLAGS_USE_UIO | MPS_CM_FLAGS_SMP_PASS |
2941 			MPS_CM_FLAGS_DATAIN | MPS_CM_FLAGS_DATAOUT;
2942 
2943 	/* The chip data format is little endian. */
2944 	req->SASAddress.High = htole32(sasaddr >> 32);
2945 	req->SASAddress.Low = htole32(sasaddr);
2946 
2947 	/*
2948 	 * XXX Note that we don't have a timeout/abort mechanism here.
2949 	 * From the manual, it looks like task management requests only
2950 	 * work for SCSI IO and SATA passthrough requests.  We may need to
2951 	 * have a mechanism to retry requests in the event of a chip reset
2952 	 * at least.  Hopefully the chip will insure that any errors short
2953 	 * of that are relayed back to the driver.
2954 	 */
2955 	error = mps_map_command(sc, cm);
2956 	if ((error != 0) && (error != EINPROGRESS)) {
2957 		mps_dprint(sc, MPS_ERROR,
2958 			   "%s: error %d returned from mps_map_command()\n",
2959 			   __func__, error);
2960 		goto bailout_error;
2961 	}
2962 
2963 	return;
2964 
2965 bailout_error:
2966 	mps_free_command(sc, cm);
2967 	mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
2968 	xpt_done(ccb);
2969 	return;
2970 
2971 }
2972 
2973 static void
2974 mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb)
2975 {
2976 	struct mps_softc *sc;
2977 	struct mpssas_target *targ;
2978 	uint64_t sasaddr = 0;
2979 
2980 	sc = sassc->sc;
2981 
2982 	/*
2983 	 * Make sure the target exists.
2984 	 */
2985 	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
2986 	    ("Target %d out of bounds in XPT_SMP_IO\n", ccb->ccb_h.target_id));
2987 	targ = &sassc->targets[ccb->ccb_h.target_id];
2988 	if (targ->handle == 0x0) {
2989 		mps_dprint(sc, MPS_ERROR,
2990 			   "%s: target %d does not exist!\n", __func__,
2991 			   ccb->ccb_h.target_id);
2992 		mpssas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
2993 		xpt_done(ccb);
2994 		return;
2995 	}
2996 
2997 	/*
2998 	 * If this device has an embedded SMP target, we'll talk to it
2999 	 * directly.
3000 	 * figure out what the expander's address is.
3001 	 */
3002 	if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
3003 		sasaddr = targ->sasaddr;
3004 
3005 	/*
3006 	 * If we don't have a SAS address for the expander yet, try
3007 	 * grabbing it from the page 0x83 information cached in the
3008 	 * transport layer for this target.  LSI expanders report the
3009 	 * expander SAS address as the port-associated SAS address in
3010 	 * Inquiry VPD page 0x83.  Maxim expanders don't report it in page
3011 	 * 0x83.
3012 	 *
3013 	 * XXX KDM disable this for now, but leave it commented out so that
3014 	 * it is obvious that this is another possible way to get the SAS
3015 	 * address.
3016 	 *
3017 	 * The parent handle method below is a little more reliable, and
3018 	 * the other benefit is that it works for devices other than SES
3019 	 * devices.  So you can send a SMP request to a da(4) device and it
3020 	 * will get routed to the expander that device is attached to.
3021 	 * (Assuming the da(4) device doesn't contain an SMP target...)
3022 	 */
3023 #if 0
3024 	if (sasaddr == 0)
3025 		sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
3026 #endif
3027 
3028 	/*
3029 	 * If we still don't have a SAS address for the expander, look for
3030 	 * the parent device of this device, which is probably the expander.
3031 	 */
3032 	if (sasaddr == 0) {
3033 #ifdef OLD_MPS_PROBE
3034 		struct mpssas_target *parent_target;
3035 #endif
3036 
3037 		if (targ->parent_handle == 0x0) {
3038 			mps_dprint(sc, MPS_ERROR,
3039 				   "%s: handle %d does not have a valid "
3040 				   "parent handle!\n", __func__, targ->handle);
3041 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3042 			goto bailout;
3043 		}
3044 #ifdef OLD_MPS_PROBE
3045 		parent_target = mpssas_find_target_by_handle(sassc, 0,
3046 			targ->parent_handle);
3047 
3048 		if (parent_target == NULL) {
3049 			mps_dprint(sc, MPS_ERROR,
3050 				   "%s: handle %d does not have a valid "
3051 				   "parent target!\n", __func__, targ->handle);
3052 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3053 			goto bailout;
3054 		}
3055 
3056 		if ((parent_target->devinfo &
3057 		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3058 			mps_dprint(sc, MPS_ERROR,
3059 				   "%s: handle %d parent %d does not "
3060 				   "have an SMP target!\n", __func__,
3061 				   targ->handle, parent_target->handle);
3062 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3063 			goto bailout;
3064 
3065 		}
3066 
3067 		sasaddr = parent_target->sasaddr;
3068 #else /* OLD_MPS_PROBE */
3069 		if ((targ->parent_devinfo &
3070 		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3071 			mps_dprint(sc, MPS_ERROR,
3072 				   "%s: handle %d parent %d does not "
3073 				   "have an SMP target!\n", __func__,
3074 				   targ->handle, targ->parent_handle);
3075 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3076 			goto bailout;
3077 
3078 		}
3079 		if (targ->parent_sasaddr == 0x0) {
3080 			mps_dprint(sc, MPS_ERROR,
3081 				   "%s: handle %d parent handle %d does "
3082 				   "not have a valid SAS address!\n",
3083 				   __func__, targ->handle, targ->parent_handle);
3084 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3085 			goto bailout;
3086 		}
3087 
3088 		sasaddr = targ->parent_sasaddr;
3089 #endif /* OLD_MPS_PROBE */
3090 
3091 	}
3092 
3093 	if (sasaddr == 0) {
3094 		mps_dprint(sc, MPS_INFO,
3095 			   "%s: unable to find SAS address for handle %d\n",
3096 			   __func__, targ->handle);
3097 		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3098 		goto bailout;
3099 	}
3100 	mpssas_send_smpcmd(sassc, ccb, sasaddr);
3101 
3102 	return;
3103 
3104 bailout:
3105 	xpt_done(ccb);
3106 
3107 }
3108 #endif //__FreeBSD_version >= 900026
3109 
3110 static void
3111 mpssas_action_resetdev(struct mpssas_softc *sassc, union ccb *ccb)
3112 {
3113 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3114 	struct mps_softc *sc;
3115 	struct mps_command *tm;
3116 	struct mpssas_target *targ;
3117 
3118 	MPS_FUNCTRACE(sassc->sc);
3119 	mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
3120 
3121 	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
3122 	    ("Target %d out of bounds in XPT_RESET_DEV\n",
3123 	     ccb->ccb_h.target_id));
3124 	sc = sassc->sc;
3125 	tm = mps_alloc_command(sc);
3126 	if (tm == NULL) {
3127 		mps_dprint(sc, MPS_ERROR,
3128 		    "command alloc failure in mpssas_action_resetdev\n");
3129 		mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3130 		xpt_done(ccb);
3131 		return;
3132 	}
3133 
3134 	targ = &sassc->targets[ccb->ccb_h.target_id];
3135 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3136 	req->DevHandle = htole16(targ->handle);
3137 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
3138 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3139 
3140 	/* SAS Hard Link Reset / SATA Link Reset */
3141 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3142 
3143 	tm->cm_data = NULL;
3144 	tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
3145 	tm->cm_complete = mpssas_resetdev_complete;
3146 	tm->cm_complete_data = ccb;
3147 	tm->cm_targ = targ;
3148 	targ->flags |= MPSSAS_TARGET_INRESET;
3149 
3150 	mps_map_command(sc, tm);
3151 }
3152 
3153 static void
3154 mpssas_resetdev_complete(struct mps_softc *sc, struct mps_command *tm)
3155 {
3156 	MPI2_SCSI_TASK_MANAGE_REPLY *resp;
3157 	union ccb *ccb;
3158 
3159 	MPS_FUNCTRACE(sc);
3160 	mtx_assert(&sc->mps_mtx, MA_OWNED);
3161 
3162 	resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
3163 	ccb = tm->cm_complete_data;
3164 
3165 	/*
3166 	 * Currently there should be no way we can hit this case.  It only
3167 	 * happens when we have a failure to allocate chain frames, and
3168 	 * task management commands don't have S/G lists.
3169 	 */
3170 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3171 		MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3172 
3173 		req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3174 
3175 		mps_dprint(sc, MPS_ERROR,
3176 			   "%s: cm_flags = %#x for reset of handle %#04x! "
3177 			   "This should not happen!\n", __func__, tm->cm_flags,
3178 			   req->DevHandle);
3179 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3180 		goto bailout;
3181 	}
3182 
3183 	mps_dprint(sc, MPS_XINFO,
3184 	    "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n", __func__,
3185 	    le16toh(resp->IOCStatus), le32toh(resp->ResponseCode));
3186 
3187 	if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
3188 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
3189 		mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
3190 		    CAM_LUN_WILDCARD);
3191 	}
3192 	else
3193 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3194 
3195 bailout:
3196 
3197 	mpssas_free_tm(sc, tm);
3198 	xpt_done(ccb);
3199 }
3200 
3201 static void
3202 mpssas_poll(struct cam_sim *sim)
3203 {
3204 	struct mpssas_softc *sassc;
3205 
3206 	sassc = cam_sim_softc(sim);
3207 
3208 	if (sassc->sc->mps_debug & MPS_TRACE) {
3209 		/* frequent debug messages during a panic just slow
3210 		 * everything down too much.
3211 		 */
3212 		mps_printf(sassc->sc, "%s clearing MPS_TRACE\n", __func__);
3213 		sassc->sc->mps_debug &= ~MPS_TRACE;
3214 	}
3215 
3216 	mps_intr_locked(sassc->sc);
3217 }
3218 
3219 static void
3220 mpssas_async(void *callback_arg, uint32_t code, struct cam_path *path,
3221 	     void *arg)
3222 {
3223 	struct mps_softc *sc;
3224 
3225 	sc = (struct mps_softc *)callback_arg;
3226 
3227 	switch (code) {
3228 #if (__FreeBSD_version >= 1000006) || \
3229     ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
3230 	case AC_ADVINFO_CHANGED: {
3231 		struct mpssas_target *target;
3232 		struct mpssas_softc *sassc;
3233 		struct scsi_read_capacity_data_long rcap_buf;
3234 		struct ccb_dev_advinfo cdai;
3235 		struct mpssas_lun *lun;
3236 		lun_id_t lunid;
3237 		int found_lun;
3238 		uintptr_t buftype;
3239 
3240 		buftype = (uintptr_t)arg;
3241 
3242 		found_lun = 0;
3243 		sassc = sc->sassc;
3244 
3245 		/*
3246 		 * We're only interested in read capacity data changes.
3247 		 */
3248 		if (buftype != CDAI_TYPE_RCAPLONG)
3249 			break;
3250 
3251 		/*
3252 		 * We should have a handle for this, but check to make sure.
3253 		 */
3254 		KASSERT(xpt_path_target_id(path) < sassc->maxtargets,
3255 		    ("Target %d out of bounds in mpssas_async\n",
3256 		    xpt_path_target_id(path)));
3257 		target = &sassc->targets[xpt_path_target_id(path)];
3258 		if (target->handle == 0)
3259 			break;
3260 
3261 		lunid = xpt_path_lun_id(path);
3262 
3263 		SLIST_FOREACH(lun, &target->luns, lun_link) {
3264 			if (lun->lun_id == lunid) {
3265 				found_lun = 1;
3266 				break;
3267 			}
3268 		}
3269 
3270 		if (found_lun == 0) {
3271 			lun = malloc(sizeof(struct mpssas_lun), M_MPT2,
3272 				     M_NOWAIT | M_ZERO);
3273 			if (lun == NULL) {
3274 				mps_dprint(sc, MPS_ERROR, "Unable to alloc "
3275 					   "LUN for EEDP support.\n");
3276 				break;
3277 			}
3278 			lun->lun_id = lunid;
3279 			SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3280 		}
3281 
3282 		bzero(&rcap_buf, sizeof(rcap_buf));
3283 		xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
3284 		cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
3285 		cdai.ccb_h.flags = CAM_DIR_IN;
3286 		cdai.buftype = CDAI_TYPE_RCAPLONG;
3287 #if (__FreeBSD_version >= 1100061) || \
3288     ((__FreeBSD_version >= 1001510) && (__FreeBSD_version < 1100000))
3289 		cdai.flags = CDAI_FLAG_NONE;
3290 #else
3291 		cdai.flags = 0;
3292 #endif
3293 		cdai.bufsiz = sizeof(rcap_buf);
3294 		cdai.buf = (uint8_t *)&rcap_buf;
3295 		xpt_action((union ccb *)&cdai);
3296 		if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
3297 			cam_release_devq(cdai.ccb_h.path,
3298 					 0, 0, 0, FALSE);
3299 
3300 		if ((mpssas_get_ccbstatus((union ccb *)&cdai) == CAM_REQ_CMP)
3301 		 && (rcap_buf.prot & SRC16_PROT_EN)) {
3302 			lun->eedp_formatted = TRUE;
3303 			lun->eedp_block_size = scsi_4btoul(rcap_buf.length);
3304 		} else {
3305 			lun->eedp_formatted = FALSE;
3306 			lun->eedp_block_size = 0;
3307 		}
3308 		break;
3309 	}
3310 #else
3311 	case AC_FOUND_DEVICE: {
3312 		struct ccb_getdev *cgd;
3313 
3314 		cgd = arg;
3315 		mpssas_check_eedp(sc, path, cgd);
3316 		break;
3317 	}
3318 #endif
3319 	default:
3320 		break;
3321 	}
3322 }
3323 
3324 #if (__FreeBSD_version < 901503) || \
3325     ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3326 static void
3327 mpssas_check_eedp(struct mps_softc *sc, struct cam_path *path,
3328 		  struct ccb_getdev *cgd)
3329 {
3330 	struct mpssas_softc *sassc = sc->sassc;
3331 	struct ccb_scsiio *csio;
3332 	struct scsi_read_capacity_16 *scsi_cmd;
3333 	struct scsi_read_capacity_eedp *rcap_buf;
3334 	path_id_t pathid;
3335 	target_id_t targetid;
3336 	lun_id_t lunid;
3337 	union ccb *ccb;
3338 	struct cam_path *local_path;
3339 	struct mpssas_target *target;
3340 	struct mpssas_lun *lun;
3341 	uint8_t	found_lun;
3342 	char path_str[64];
3343 
3344 	sassc = sc->sassc;
3345 	pathid = cam_sim_path(sassc->sim);
3346 	targetid = xpt_path_target_id(path);
3347 	lunid = xpt_path_lun_id(path);
3348 
3349 	KASSERT(targetid < sassc->maxtargets,
3350 	    ("Target %d out of bounds in mpssas_check_eedp\n",
3351 	     targetid));
3352 	target = &sassc->targets[targetid];
3353 	if (target->handle == 0x0)
3354 		return;
3355 
3356 	/*
3357 	 * Determine if the device is EEDP capable.
3358 	 *
3359 	 * If this flag is set in the inquiry data,
3360 	 * the device supports protection information,
3361 	 * and must support the 16 byte read
3362 	 * capacity command, otherwise continue without
3363 	 * sending read cap 16
3364 	 */
3365 	if ((cgd->inq_data.spc3_flags & SPC3_SID_PROTECT) == 0)
3366 		return;
3367 
3368 	/*
3369 	 * Issue a READ CAPACITY 16 command.  This info
3370 	 * is used to determine if the LUN is formatted
3371 	 * for EEDP support.
3372 	 */
3373 	ccb = xpt_alloc_ccb_nowait();
3374 	if (ccb == NULL) {
3375 		mps_dprint(sc, MPS_ERROR, "Unable to alloc CCB "
3376 		    "for EEDP support.\n");
3377 		return;
3378 	}
3379 
3380 	if (xpt_create_path(&local_path, xpt_periph,
3381 	    pathid, targetid, lunid) != CAM_REQ_CMP) {
3382 		mps_dprint(sc, MPS_ERROR, "Unable to create "
3383 		    "path for EEDP support\n");
3384 		xpt_free_ccb(ccb);
3385 		return;
3386 	}
3387 
3388 	/*
3389 	 * If LUN is already in list, don't create a new
3390 	 * one.
3391 	 */
3392 	found_lun = FALSE;
3393 	SLIST_FOREACH(lun, &target->luns, lun_link) {
3394 		if (lun->lun_id == lunid) {
3395 			found_lun = TRUE;
3396 			break;
3397 		}
3398 	}
3399 	if (!found_lun) {
3400 		lun = malloc(sizeof(struct mpssas_lun), M_MPT2,
3401 		    M_NOWAIT | M_ZERO);
3402 		if (lun == NULL) {
3403 			mps_dprint(sc, MPS_ERROR,
3404 			    "Unable to alloc LUN for EEDP support.\n");
3405 			xpt_free_path(local_path);
3406 			xpt_free_ccb(ccb);
3407 			return;
3408 		}
3409 		lun->lun_id = lunid;
3410 		SLIST_INSERT_HEAD(&target->luns, lun,
3411 		    lun_link);
3412 	}
3413 
3414 	xpt_path_string(local_path, path_str, sizeof(path_str));
3415 
3416 	mps_dprint(sc, MPS_INFO, "Sending read cap: path %s handle %d\n",
3417 	    path_str, target->handle);
3418 
3419 	/*
3420 	 * Issue a READ CAPACITY 16 command for the LUN.
3421 	 * The mpssas_read_cap_done function will load
3422 	 * the read cap info into the LUN struct.
3423 	 */
3424 	rcap_buf = malloc(sizeof(struct scsi_read_capacity_eedp),
3425 	    M_MPT2, M_NOWAIT | M_ZERO);
3426 	if (rcap_buf == NULL) {
3427 		mps_dprint(sc, MPS_FAULT,
3428 		    "Unable to alloc read capacity buffer for EEDP support.\n");
3429 		xpt_free_path(ccb->ccb_h.path);
3430 		xpt_free_ccb(ccb);
3431 		return;
3432 	}
3433 	xpt_setup_ccb(&ccb->ccb_h, local_path, CAM_PRIORITY_XPT);
3434 	csio = &ccb->csio;
3435 	csio->ccb_h.func_code = XPT_SCSI_IO;
3436 	csio->ccb_h.flags = CAM_DIR_IN;
3437 	csio->ccb_h.retry_count = 4;
3438 	csio->ccb_h.cbfcnp = mpssas_read_cap_done;
3439 	csio->ccb_h.timeout = 60000;
3440 	csio->data_ptr = (uint8_t *)rcap_buf;
3441 	csio->dxfer_len = sizeof(struct scsi_read_capacity_eedp);
3442 	csio->sense_len = MPS_SENSE_LEN;
3443 	csio->cdb_len = sizeof(*scsi_cmd);
3444 	csio->tag_action = MSG_SIMPLE_Q_TAG;
3445 
3446 	scsi_cmd = (struct scsi_read_capacity_16 *)&csio->cdb_io.cdb_bytes;
3447 	bzero(scsi_cmd, sizeof(*scsi_cmd));
3448 	scsi_cmd->opcode = 0x9E;
3449 	scsi_cmd->service_action = SRC16_SERVICE_ACTION;
3450 	((uint8_t *)scsi_cmd)[13] = sizeof(struct scsi_read_capacity_eedp);
3451 
3452 	ccb->ccb_h.ppriv_ptr1 = sassc;
3453 	xpt_action(ccb);
3454 }
3455 
3456 static void
3457 mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb)
3458 {
3459 	struct mpssas_softc *sassc;
3460 	struct mpssas_target *target;
3461 	struct mpssas_lun *lun;
3462 	struct scsi_read_capacity_eedp *rcap_buf;
3463 
3464 	if (done_ccb == NULL)
3465 		return;
3466 
3467 	/* Driver need to release devq, it Scsi command is
3468 	 * generated by driver internally.
3469 	 * Currently there is a single place where driver
3470 	 * calls scsi command internally. In future if driver
3471 	 * calls more scsi command internally, it needs to release
3472 	 * devq internally, since those command will not go back to
3473 	 * cam_periph.
3474 	 */
3475 	if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) ) {
3476         	done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
3477 		xpt_release_devq(done_ccb->ccb_h.path,
3478 			       	/*count*/ 1, /*run_queue*/TRUE);
3479 	}
3480 
3481 	rcap_buf = (struct scsi_read_capacity_eedp *)done_ccb->csio.data_ptr;
3482 
3483 	/*
3484 	 * Get the LUN ID for the path and look it up in the LUN list for the
3485 	 * target.
3486 	 */
3487 	sassc = (struct mpssas_softc *)done_ccb->ccb_h.ppriv_ptr1;
3488 	KASSERT(done_ccb->ccb_h.target_id < sassc->maxtargets,
3489 	    ("Target %d out of bounds in mpssas_read_cap_done\n",
3490 	     done_ccb->ccb_h.target_id));
3491 	target = &sassc->targets[done_ccb->ccb_h.target_id];
3492 	SLIST_FOREACH(lun, &target->luns, lun_link) {
3493 		if (lun->lun_id != done_ccb->ccb_h.target_lun)
3494 			continue;
3495 
3496 		/*
3497 		 * Got the LUN in the target's LUN list.  Fill it in
3498 		 * with EEDP info.  If the READ CAP 16 command had some
3499 		 * SCSI error (common if command is not supported), mark
3500 		 * the lun as not supporting EEDP and set the block size
3501 		 * to 0.
3502 		 */
3503 		if ((mpssas_get_ccbstatus(done_ccb) != CAM_REQ_CMP)
3504 		 || (done_ccb->csio.scsi_status != SCSI_STATUS_OK)) {
3505 			lun->eedp_formatted = FALSE;
3506 			lun->eedp_block_size = 0;
3507 			break;
3508 		}
3509 
3510 		if (rcap_buf->protect & 0x01) {
3511 			mps_dprint(sassc->sc, MPS_INFO, "LUN %d for "
3512  			    "target ID %d is formatted for EEDP "
3513  			    "support.\n", done_ccb->ccb_h.target_lun,
3514  			    done_ccb->ccb_h.target_id);
3515 			lun->eedp_formatted = TRUE;
3516 			lun->eedp_block_size = scsi_4btoul(rcap_buf->length);
3517 		}
3518 		break;
3519 	}
3520 
3521 	// Finished with this CCB and path.
3522 	free(rcap_buf, M_MPT2);
3523 	xpt_free_path(done_ccb->ccb_h.path);
3524 	xpt_free_ccb(done_ccb);
3525 }
3526 #endif /* (__FreeBSD_version < 901503) || \
3527           ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) */
3528 
3529 void
3530 mpssas_prepare_for_tm(struct mps_softc *sc, struct mps_command *tm,
3531     struct mpssas_target *target, lun_id_t lun_id)
3532 {
3533 	union ccb *ccb;
3534 	path_id_t path_id;
3535 
3536 	/*
3537 	 * Set the INRESET flag for this target so that no I/O will be sent to
3538 	 * the target until the reset has completed.  If an I/O request does
3539 	 * happen, the devq will be frozen.  The CCB holds the path which is
3540 	 * used to release the devq.  The devq is released and the CCB is freed
3541 	 * when the TM completes.
3542 	 */
3543 	ccb = xpt_alloc_ccb_nowait();
3544 	if (ccb) {
3545 		path_id = cam_sim_path(sc->sassc->sim);
3546 		if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, path_id,
3547 		    target->tid, lun_id) != CAM_REQ_CMP) {
3548 			xpt_free_ccb(ccb);
3549 		} else {
3550 			tm->cm_ccb = ccb;
3551 			tm->cm_targ = target;
3552 			target->flags |= MPSSAS_TARGET_INRESET;
3553 		}
3554 	}
3555 }
3556 
3557 int
3558 mpssas_startup(struct mps_softc *sc)
3559 {
3560 
3561 	/*
3562 	 * Send the port enable message and set the wait_for_port_enable flag.
3563 	 * This flag helps to keep the simq frozen until all discovery events
3564 	 * are processed.
3565 	 */
3566 	sc->wait_for_port_enable = 1;
3567 	mpssas_send_portenable(sc);
3568 	return (0);
3569 }
3570 
3571 static int
3572 mpssas_send_portenable(struct mps_softc *sc)
3573 {
3574 	MPI2_PORT_ENABLE_REQUEST *request;
3575 	struct mps_command *cm;
3576 
3577 	MPS_FUNCTRACE(sc);
3578 
3579 	if ((cm = mps_alloc_command(sc)) == NULL)
3580 		return (EBUSY);
3581 	request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3582 	request->Function = MPI2_FUNCTION_PORT_ENABLE;
3583 	request->MsgFlags = 0;
3584 	request->VP_ID = 0;
3585 	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3586 	cm->cm_complete = mpssas_portenable_complete;
3587 	cm->cm_data = NULL;
3588 	cm->cm_sge = NULL;
3589 
3590 	mps_map_command(sc, cm);
3591 	mps_dprint(sc, MPS_XINFO,
3592 	    "mps_send_portenable finished cm %p req %p complete %p\n",
3593 	    cm, cm->cm_req, cm->cm_complete);
3594 	return (0);
3595 }
3596 
3597 static void
3598 mpssas_portenable_complete(struct mps_softc *sc, struct mps_command *cm)
3599 {
3600 	MPI2_PORT_ENABLE_REPLY *reply;
3601 	struct mpssas_softc *sassc;
3602 
3603 	MPS_FUNCTRACE(sc);
3604 	sassc = sc->sassc;
3605 
3606 	/*
3607 	 * Currently there should be no way we can hit this case.  It only
3608 	 * happens when we have a failure to allocate chain frames, and
3609 	 * port enable commands don't have S/G lists.
3610 	 */
3611 	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3612 		mps_dprint(sc, MPS_ERROR, "%s: cm_flags = %#x for port enable! "
3613 			   "This should not happen!\n", __func__, cm->cm_flags);
3614 	}
3615 
3616 	reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3617 	if (reply == NULL)
3618 		mps_dprint(sc, MPS_FAULT, "Portenable NULL reply\n");
3619 	else if (le16toh(reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
3620 	    MPI2_IOCSTATUS_SUCCESS)
3621 		mps_dprint(sc, MPS_FAULT, "Portenable failed\n");
3622 
3623 	mps_free_command(sc, cm);
3624 	if (sc->mps_ich.ich_arg != NULL) {
3625 		mps_dprint(sc, MPS_XINFO, "disestablish config intrhook\n");
3626 		config_intrhook_disestablish(&sc->mps_ich);
3627 		sc->mps_ich.ich_arg = NULL;
3628 	}
3629 
3630 	/*
3631 	 * Get WarpDrive info after discovery is complete but before the scan
3632 	 * starts.  At this point, all devices are ready to be exposed to the
3633 	 * OS.  If devices should be hidden instead, take them out of the
3634 	 * 'targets' array before the scan.  The devinfo for a disk will have
3635 	 * some info and a volume's will be 0.  Use that to remove disks.
3636 	 */
3637 	mps_wd_config_pages(sc);
3638 
3639 	/*
3640 	 * Done waiting for port enable to complete.  Decrement the refcount.
3641 	 * If refcount is 0, discovery is complete and a rescan of the bus can
3642 	 * take place.  Since the simq was explicitly frozen before port
3643 	 * enable, it must be explicitly released here to keep the
3644 	 * freeze/release count in sync.
3645 	 */
3646 	sc->wait_for_port_enable = 0;
3647 	sc->port_enable_complete = 1;
3648 	wakeup(&sc->port_enable_complete);
3649 	mpssas_startup_decrement(sassc);
3650 }
3651 
3652 int
3653 mpssas_check_id(struct mpssas_softc *sassc, int id)
3654 {
3655 	struct mps_softc *sc = sassc->sc;
3656 	char *ids;
3657 	char *name;
3658 
3659 	ids = &sc->exclude_ids[0];
3660 	while((name = strsep(&ids, ",")) != NULL) {
3661 		if (name[0] == '\0')
3662 			continue;
3663 		if (strtol(name, NULL, 0) == (long)id)
3664 			return (1);
3665 	}
3666 
3667 	return (0);
3668 }
3669 
3670 void
3671 mpssas_realloc_targets(struct mps_softc *sc, int maxtargets)
3672 {
3673 	struct mpssas_softc *sassc;
3674 	struct mpssas_lun *lun, *lun_tmp;
3675 	struct mpssas_target *targ;
3676 	int i;
3677 
3678 	sassc = sc->sassc;
3679 	/*
3680 	 * The number of targets is based on IOC Facts, so free all of
3681 	 * the allocated LUNs for each target and then the target buffer
3682 	 * itself.
3683 	 */
3684 	for (i=0; i< maxtargets; i++) {
3685 		targ = &sassc->targets[i];
3686 		SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
3687 			free(lun, M_MPT2);
3688 		}
3689 	}
3690 	free(sassc->targets, M_MPT2);
3691 
3692 	sassc->targets = malloc(sizeof(struct mpssas_target) * maxtargets,
3693 	    M_MPT2, M_WAITOK|M_ZERO);
3694 	if (!sassc->targets) {
3695 		panic("%s failed to alloc targets with error %d\n",
3696 		    __func__, ENOMEM);
3697 	}
3698 }
3699