xref: /freebsd/sys/dev/mps/mps_sas.c (revision 03836978bec158bdc0ecee7a4198962f91ce8298)
1 /*-
2  * Copyright (c) 2009 Yahoo! Inc.
3  * Copyright (c) 2011, 2012 LSI Corp.
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  * LSI MPT-Fusion Host Adapter FreeBSD
28  *
29  * $FreeBSD$
30  */
31 
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 /* Communications core for LSI MPT2 */
36 
37 /* TODO Move headers to mpsvar */
38 #include <sys/types.h>
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/kernel.h>
42 #include <sys/selinfo.h>
43 #include <sys/module.h>
44 #include <sys/bus.h>
45 #include <sys/conf.h>
46 #include <sys/bio.h>
47 #include <sys/malloc.h>
48 #include <sys/uio.h>
49 #include <sys/sysctl.h>
50 #include <sys/endian.h>
51 #include <sys/queue.h>
52 #include <sys/kthread.h>
53 #include <sys/taskqueue.h>
54 #include <sys/sbuf.h>
55 
56 #include <machine/bus.h>
57 #include <machine/resource.h>
58 #include <sys/rman.h>
59 
60 #include <machine/stdarg.h>
61 
62 #include <cam/cam.h>
63 #include <cam/cam_ccb.h>
64 #include <cam/cam_xpt.h>
65 #include <cam/cam_debug.h>
66 #include <cam/cam_sim.h>
67 #include <cam/cam_xpt_sim.h>
68 #include <cam/cam_xpt_periph.h>
69 #include <cam/cam_periph.h>
70 #include <cam/scsi/scsi_all.h>
71 #include <cam/scsi/scsi_message.h>
72 #if __FreeBSD_version >= 900026
73 #include <cam/scsi/smp_all.h>
74 #endif
75 
76 #include <dev/mps/mpi/mpi2_type.h>
77 #include <dev/mps/mpi/mpi2.h>
78 #include <dev/mps/mpi/mpi2_ioc.h>
79 #include <dev/mps/mpi/mpi2_sas.h>
80 #include <dev/mps/mpi/mpi2_cnfg.h>
81 #include <dev/mps/mpi/mpi2_init.h>
82 #include <dev/mps/mpi/mpi2_tool.h>
83 #include <dev/mps/mps_ioctl.h>
84 #include <dev/mps/mpsvar.h>
85 #include <dev/mps/mps_table.h>
86 #include <dev/mps/mps_sas.h>
87 
88 #define MPSSAS_DISCOVERY_TIMEOUT	20
89 #define MPSSAS_MAX_DISCOVERY_TIMEOUTS	10 /* 200 seconds */
90 
91 /*
92  * static array to check SCSI OpCode for EEDP protection bits
93  */
94 #define	PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
95 #define	PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
96 #define	PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
97 static uint8_t op_code_prot[256] = {
98 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
99 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
100 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
101 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
102 	0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
103 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
104 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
105 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
106 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
107 	0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
108 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
109 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
110 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
111 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
112 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
113 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
114 };
115 
116 MALLOC_DEFINE(M_MPSSAS, "MPSSAS", "MPS SAS memory");
117 
118 static void mpssas_discovery_timeout(void *data);
119 static void mpssas_remove_device(struct mps_softc *, struct mps_command *);
120 static void mpssas_remove_complete(struct mps_softc *, struct mps_command *);
121 static void mpssas_action(struct cam_sim *sim, union ccb *ccb);
122 static void mpssas_poll(struct cam_sim *sim);
123 static void mpssas_scsiio_timeout(void *data);
124 static void mpssas_abort_complete(struct mps_softc *sc, struct mps_command *cm);
125 static void mpssas_direct_drive_io(struct mpssas_softc *sassc,
126     struct mps_command *cm, union ccb *ccb);
127 static void mpssas_action_scsiio(struct mpssas_softc *, union ccb *);
128 static void mpssas_scsiio_complete(struct mps_softc *, struct mps_command *);
129 static void mpssas_action_resetdev(struct mpssas_softc *, union ccb *);
130 #if __FreeBSD_version >= 900026
131 static void mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm);
132 static void mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb,
133 			       uint64_t sasaddr);
134 static void mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb);
135 #endif //FreeBSD_version >= 900026
136 static void mpssas_resetdev_complete(struct mps_softc *, struct mps_command *);
137 static int  mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm);
138 static int  mpssas_send_reset(struct mps_softc *sc, struct mps_command *tm, uint8_t type);
139 static void mpssas_rescan(struct mpssas_softc *sassc, union ccb *ccb);
140 static void mpssas_rescan_done(struct cam_periph *periph, union ccb *done_ccb);
141 static void mpssas_scanner_thread(void *arg);
142 #if __FreeBSD_version >= 1000006
143 static void mpssas_async(void *callback_arg, uint32_t code,
144 			 struct cam_path *path, void *arg);
145 #else
146 static void mpssas_check_eedp(struct mpssas_softc *sassc);
147 static void mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb);
148 #endif
149 static int mpssas_send_portenable(struct mps_softc *sc);
150 static void mpssas_portenable_complete(struct mps_softc *sc,
151     struct mps_command *cm);
152 
153 struct mpssas_target *
154 mpssas_find_target_by_handle(struct mpssas_softc *sassc, int start, uint16_t handle)
155 {
156 	struct mpssas_target *target;
157 	int i;
158 
159 	for (i = start; i < sassc->sc->facts->MaxTargets; i++) {
160 		target = &sassc->targets[i];
161 		if (target->handle == handle)
162 			return (target);
163 	}
164 
165 	return (NULL);
166 }
167 
168 /* we need to freeze the simq during attach and diag reset, to avoid failing
169  * commands before device handles have been found by discovery.  Since
170  * discovery involves reading config pages and possibly sending commands,
171  * discovery actions may continue even after we receive the end of discovery
172  * event, so refcount discovery actions instead of assuming we can unfreeze
173  * the simq when we get the event.
174  */
175 void
176 mpssas_startup_increment(struct mpssas_softc *sassc)
177 {
178 	if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
179 		if (sassc->startup_refcount++ == 0) {
180 			/* just starting, freeze the simq */
181 			mps_dprint(sassc->sc, MPS_INFO,
182 			    "%s freezing simq\n", __func__);
183 			xpt_freeze_simq(sassc->sim, 1);
184 		}
185 		mps_dprint(sassc->sc, MPS_TRACE, "%s refcount %u\n", __func__,
186 		    sassc->startup_refcount);
187 	}
188 }
189 
190 void
191 mpssas_startup_decrement(struct mpssas_softc *sassc)
192 {
193 	if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
194 		if (--sassc->startup_refcount == 0) {
195 			/* finished all discovery-related actions, release
196 			 * the simq and rescan for the latest topology.
197 			 */
198 			mps_dprint(sassc->sc, MPS_INFO,
199 			    "%s releasing simq\n", __func__);
200 			sassc->flags &= ~MPSSAS_IN_STARTUP;
201 			xpt_release_simq(sassc->sim, 1);
202 			mpssas_rescan_target(sassc->sc, NULL);
203 		}
204 		mps_dprint(sassc->sc, MPS_TRACE, "%s refcount %u\n", __func__,
205 		    sassc->startup_refcount);
206 	}
207 }
208 
209 /* LSI's firmware requires us to stop sending commands when we're doing task
210  * management, so refcount the TMs and keep the simq frozen when any are in
211  * use.
212  */
213 struct mps_command *
214 mpssas_alloc_tm(struct mps_softc *sc)
215 {
216 	struct mps_command *tm;
217 
218 	tm = mps_alloc_high_priority_command(sc);
219 	if (tm != NULL) {
220 		if (sc->sassc->tm_count++ == 0) {
221 			mps_printf(sc, "%s freezing simq\n", __func__);
222 			xpt_freeze_simq(sc->sassc->sim, 1);
223 		}
224 		mps_dprint(sc, MPS_TRACE, "%s tm_count %u\n", __func__,
225 		    sc->sassc->tm_count);
226 	}
227 	return tm;
228 }
229 
230 void
231 mpssas_free_tm(struct mps_softc *sc, struct mps_command *tm)
232 {
233 	if (tm == NULL)
234 		return;
235 
236 	/* if there are no TMs in use, we can release the simq.  We use our
237 	 * own refcount so that it's easier for a diag reset to cleanup and
238 	 * release the simq.
239 	 */
240 	if (--sc->sassc->tm_count == 0) {
241 		mps_printf(sc, "%s releasing simq\n", __func__);
242 		xpt_release_simq(sc->sassc->sim, 1);
243 	}
244 	mps_dprint(sc, MPS_TRACE, "%s tm_count %u\n", __func__,
245 	    sc->sassc->tm_count);
246 
247 	mps_free_high_priority_command(sc, tm);
248 }
249 
250 
251 void
252 mpssas_rescan_target(struct mps_softc *sc, struct mpssas_target *targ)
253 {
254 	struct mpssas_softc *sassc = sc->sassc;
255 	path_id_t pathid;
256 	target_id_t targetid;
257 	union ccb *ccb;
258 
259 	pathid = cam_sim_path(sassc->sim);
260 	if (targ == NULL)
261 		targetid = CAM_TARGET_WILDCARD;
262 	else
263 		targetid = targ - sassc->targets;
264 
265 	/*
266 	 * Allocate a CCB and schedule a rescan.
267 	 */
268 	ccb = xpt_alloc_ccb_nowait();
269 	if (ccb == NULL) {
270 		mps_dprint(sc, MPS_FAULT, "unable to alloc CCB for rescan\n");
271 		return;
272 	}
273 
274 	if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid,
275 		            targetid, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
276 		mps_dprint(sc, MPS_FAULT, "unable to create path for rescan\n");
277 		xpt_free_ccb(ccb);
278 		return;
279 	}
280 
281 	if (targetid == CAM_TARGET_WILDCARD)
282 		ccb->ccb_h.func_code = XPT_SCAN_BUS;
283 	else
284 		ccb->ccb_h.func_code = XPT_SCAN_TGT;
285 
286 	mps_dprint(sc, MPS_TRACE, "%s targetid %u\n", __func__, targetid);
287 	mpssas_rescan(sassc, ccb);
288 }
289 
290 static void
291 mpssas_log_command(struct mps_command *cm, const char *fmt, ...)
292 {
293 	struct sbuf sb;
294 	va_list ap;
295 	char str[192];
296 	char path_str[64];
297 
298 	if (cm == NULL)
299 		return;
300 
301 	sbuf_new(&sb, str, sizeof(str), 0);
302 
303 	va_start(ap, fmt);
304 
305 	if (cm->cm_ccb != NULL) {
306 		xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
307 				sizeof(path_str));
308 		sbuf_cat(&sb, path_str);
309 		if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
310 			scsi_command_string(&cm->cm_ccb->csio, &sb);
311 			sbuf_printf(&sb, "length %d ",
312 				    cm->cm_ccb->csio.dxfer_len);
313 		}
314 	}
315 	else {
316 		sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
317 		    cam_sim_name(cm->cm_sc->sassc->sim),
318 		    cam_sim_unit(cm->cm_sc->sassc->sim),
319 		    cam_sim_bus(cm->cm_sc->sassc->sim),
320 		    cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
321 		    cm->cm_lun);
322 	}
323 
324 	sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
325 	sbuf_vprintf(&sb, fmt, ap);
326 	sbuf_finish(&sb);
327 	printf("%s", sbuf_data(&sb));
328 
329 	va_end(ap);
330 }
331 
332 
333 static void
334 mpssas_remove_volume(struct mps_softc *sc, struct mps_command *tm)
335 {
336 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
337 	struct mpssas_target *targ;
338 	uint16_t handle;
339 
340 	mps_dprint(sc, MPS_INFO, "%s\n", __func__);
341 
342 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
343 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
344 	targ = tm->cm_targ;
345 
346 	if (reply == NULL) {
347 		/* XXX retry the remove after the diag reset completes? */
348 		mps_printf(sc, "%s NULL reply reseting device 0x%04x\n",
349 		    __func__, handle);
350 		mpssas_free_tm(sc, tm);
351 		return;
352 	}
353 
354 	if (reply->IOCStatus != MPI2_IOCSTATUS_SUCCESS) {
355 		mps_printf(sc, "IOCStatus = 0x%x while resetting device 0x%x\n",
356 		   reply->IOCStatus, handle);
357 		mpssas_free_tm(sc, tm);
358 		return;
359 	}
360 
361 	mps_printf(sc, "Reset aborted %u commands\n", reply->TerminationCount);
362 	mps_free_reply(sc, tm->cm_reply_data);
363 	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
364 
365 	mps_printf(sc, "clearing target %u handle 0x%04x\n", targ->tid, handle);
366 
367 	/*
368 	 * Don't clear target if remove fails because things will get confusing.
369 	 * Leave the devname and sasaddr intact so that we know to avoid reusing
370 	 * this target id if possible, and so we can assign the same target id
371 	 * to this device if it comes back in the future.
372 	 */
373 	if (reply->IOCStatus == MPI2_IOCSTATUS_SUCCESS) {
374 		targ = tm->cm_targ;
375 		targ->handle = 0x0;
376 		targ->encl_handle = 0x0;
377 		targ->encl_slot = 0x0;
378 		targ->exp_dev_handle = 0x0;
379 		targ->phy_num = 0x0;
380 		targ->linkrate = 0x0;
381 		targ->devinfo = 0x0;
382 		targ->flags = 0x0;
383 	}
384 
385 	mpssas_free_tm(sc, tm);
386 }
387 
388 
389 /*
390  * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal.
391  * Otherwise Volume Delete is same as Bare Drive Removal.
392  */
393 void
394 mpssas_prepare_volume_remove(struct mpssas_softc *sassc, uint16_t handle)
395 {
396 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
397 	struct mps_softc *sc;
398 	struct mps_command *cm;
399 	struct mpssas_target *targ = NULL;
400 
401 	mps_dprint(sassc->sc, MPS_INFO, "%s\n", __func__);
402 	sc = sassc->sc;
403 
404 #ifdef WD_SUPPORT
405 	/*
406 	 * If this is a WD controller, determine if the disk should be exposed
407 	 * to the OS or not.  If disk should be exposed, return from this
408 	 * function without doing anything.
409 	 */
410 	if (sc->WD_available && (sc->WD_hide_expose ==
411 	    MPS_WD_EXPOSE_ALWAYS)) {
412 		return;
413 	}
414 #endif //WD_SUPPORT
415 
416 	targ = mpssas_find_target_by_handle(sassc, 0, handle);
417 	if (targ == NULL) {
418 		/* FIXME: what is the action? */
419 		/* We don't know about this device? */
420 		printf("%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
421 		return;
422 	}
423 
424 	targ->flags |= MPSSAS_TARGET_INREMOVAL;
425 
426 	cm = mpssas_alloc_tm(sc);
427 	if (cm == NULL) {
428 		mps_printf(sc, "%s: command alloc failure\n", __func__);
429 		return;
430 	}
431 
432 	mpssas_rescan_target(sc, targ);
433 
434 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
435 	req->DevHandle = targ->handle;
436 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
437 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
438 
439 	/* SAS Hard Link Reset / SATA Link Reset */
440 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
441 
442 	cm->cm_targ = targ;
443 	cm->cm_data = NULL;
444 	cm->cm_desc.HighPriority.RequestFlags =
445 	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
446 	cm->cm_complete = mpssas_remove_volume;
447 	cm->cm_complete_data = (void *)(uintptr_t)handle;
448 	mps_map_command(sc, cm);
449 }
450 
451 /*
452  * The MPT2 firmware performs debounce on the link to avoid transient link
453  * errors and false removals.  When it does decide that link has been lost
454  * and a device need to go away, it expects that the host will perform a
455  * target reset and then an op remove.  The reset has the side-effect of
456  * aborting any outstanding requests for the device, which is required for
457  * the op-remove to succeed.  It's not clear if the host should check for
458  * the device coming back alive after the reset.
459  */
460 void
461 mpssas_prepare_remove(struct mpssas_softc *sassc, uint16_t handle)
462 {
463 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
464 	struct mps_softc *sc;
465 	struct mps_command *cm;
466 	struct mpssas_target *targ = NULL;
467 
468 	mps_dprint(sassc->sc, MPS_INFO, "%s\n", __func__);
469 
470 	sc = sassc->sc;
471 
472 	targ = mpssas_find_target_by_handle(sassc, 0, handle);
473 	if (targ == NULL) {
474 		/* FIXME: what is the action? */
475 		/* We don't know about this device? */
476 		printf("%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
477 		return;
478 	}
479 
480 	targ->flags |= MPSSAS_TARGET_INREMOVAL;
481 
482 	cm = mpssas_alloc_tm(sc);
483 	if (cm == NULL) {
484 		mps_printf(sc, "%s: command alloc failure\n", __func__);
485 		return;
486 	}
487 
488 	mpssas_rescan_target(sc, targ);
489 
490 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
491 	memset(req, 0, sizeof(*req));
492 	req->DevHandle = htole16(targ->handle);
493 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
494 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
495 
496 	/* SAS Hard Link Reset / SATA Link Reset */
497 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
498 
499 	cm->cm_targ = targ;
500 	cm->cm_data = NULL;
501 	cm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
502 	cm->cm_complete = mpssas_remove_device;
503 	cm->cm_complete_data = (void *)(uintptr_t)handle;
504 	mps_map_command(sc, cm);
505 }
506 
507 static void
508 mpssas_remove_device(struct mps_softc *sc, struct mps_command *tm)
509 {
510 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
511 	MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
512 	struct mpssas_target *targ;
513 	struct mps_command *next_cm;
514 	uint16_t handle;
515 
516 	mps_dprint(sc, MPS_INFO, "%s\n", __func__);
517 
518 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
519 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
520 	targ = tm->cm_targ;
521 
522 	/*
523 	 * Currently there should be no way we can hit this case.  It only
524 	 * happens when we have a failure to allocate chain frames, and
525 	 * task management commands don't have S/G lists.
526 	 */
527 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
528 		mps_printf(sc, "%s: cm_flags = %#x for remove of handle %#04x! "
529 			   "This should not happen!\n", __func__, tm->cm_flags,
530 			   handle);
531 		mpssas_free_tm(sc, tm);
532 		return;
533 	}
534 
535 	if (reply == NULL) {
536 		/* XXX retry the remove after the diag reset completes? */
537 		mps_printf(sc, "%s NULL reply reseting device 0x%04x\n",
538 		    __func__, handle);
539 		mpssas_free_tm(sc, tm);
540 		return;
541 	}
542 
543 	if (le16toh(reply->IOCStatus) != MPI2_IOCSTATUS_SUCCESS) {
544 		mps_printf(sc, "IOCStatus = 0x%x while resetting device 0x%x\n",
545 		   le16toh(reply->IOCStatus), handle);
546 		mpssas_free_tm(sc, tm);
547 		return;
548 	}
549 
550 	mps_dprint(sc, MPS_INFO, "Reset aborted %u commands\n",
551 	    le32toh(reply->TerminationCount));
552 	mps_free_reply(sc, tm->cm_reply_data);
553 	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
554 
555 	/* Reuse the existing command */
556 	req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
557 	memset(req, 0, sizeof(*req));
558 	req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
559 	req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
560 	req->DevHandle = htole16(handle);
561 	tm->cm_data = NULL;
562 	tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
563 	tm->cm_complete = mpssas_remove_complete;
564 	tm->cm_complete_data = (void *)(uintptr_t)handle;
565 
566 	mps_map_command(sc, tm);
567 
568 	mps_dprint(sc, MPS_INFO, "clearing target %u handle 0x%04x\n",
569 		   targ->tid, handle);
570 	TAILQ_FOREACH_SAFE(tm, &targ->commands, cm_link, next_cm) {
571 		union ccb *ccb;
572 
573 		mps_dprint(sc, MPS_INFO, "Completing missed command %p\n", tm);
574 		ccb = tm->cm_complete_data;
575 		ccb->ccb_h.status = CAM_DEV_NOT_THERE;
576 		mpssas_scsiio_complete(sc, tm);
577 	}
578 }
579 
580 static void
581 mpssas_remove_complete(struct mps_softc *sc, struct mps_command *tm)
582 {
583 	MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
584 	uint16_t handle;
585 	struct mpssas_target *targ;
586 	struct mpssas_lun *lun;
587 
588 	mps_dprint(sc, MPS_INFO, "%s\n", __func__);
589 
590 	reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
591 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
592 
593 	/*
594 	 * Currently there should be no way we can hit this case.  It only
595 	 * happens when we have a failure to allocate chain frames, and
596 	 * task management commands don't have S/G lists.
597 	 */
598 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
599 		mps_printf(sc, "%s: cm_flags = %#x for remove of handle %#04x! "
600 			   "This should not happen!\n", __func__, tm->cm_flags,
601 			   handle);
602 		mpssas_free_tm(sc, tm);
603 		return;
604 	}
605 
606 	if (reply == NULL) {
607 		/* most likely a chip reset */
608 		mps_printf(sc, "%s NULL reply removing device 0x%04x\n",
609 		    __func__, handle);
610 		mpssas_free_tm(sc, tm);
611 		return;
612 	}
613 
614 	mps_printf(sc, "%s on handle 0x%04x, IOCStatus= 0x%x\n", __func__,
615 	    handle, le16toh(reply->IOCStatus));
616 
617 	/*
618 	 * Don't clear target if remove fails because things will get confusing.
619 	 * Leave the devname and sasaddr intact so that we know to avoid reusing
620 	 * this target id if possible, and so we can assign the same target id
621 	 * to this device if it comes back in the future.
622 	 */
623 	if (le16toh(reply->IOCStatus) == MPI2_IOCSTATUS_SUCCESS) {
624 		targ = tm->cm_targ;
625 		targ->handle = 0x0;
626 		targ->encl_handle = 0x0;
627 		targ->encl_slot = 0x0;
628 		targ->exp_dev_handle = 0x0;
629 		targ->phy_num = 0x0;
630 		targ->linkrate = 0x0;
631 		targ->devinfo = 0x0;
632 		targ->flags = 0x0;
633 
634 		while(!SLIST_EMPTY(&targ->luns)) {
635 			lun = SLIST_FIRST(&targ->luns);
636 			SLIST_REMOVE_HEAD(&targ->luns, lun_link);
637 			free(lun, M_MPT2);
638 		}
639 	}
640 
641 
642 	mpssas_free_tm(sc, tm);
643 }
644 
645 static int
646 mpssas_register_events(struct mps_softc *sc)
647 {
648 	u32 events[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS];
649 
650 	bzero(events, 16);
651 	setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
652 	setbit(events, MPI2_EVENT_SAS_DISCOVERY);
653 	setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
654 	setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
655 	setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
656 	setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
657 	setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
658 	setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
659 	setbit(events, MPI2_EVENT_IR_VOLUME);
660 	setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
661 	setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
662 	setbit(events, MPI2_EVENT_LOG_ENTRY_ADDED);
663 
664 	mps_register_events(sc, events, mpssas_evt_handler, NULL,
665 	    &sc->sassc->mpssas_eh);
666 
667 	return (0);
668 }
669 
670 int
671 mps_attach_sas(struct mps_softc *sc)
672 {
673 	struct mpssas_softc *sassc;
674 #if __FreeBSD_version >= 1000006
675 	cam_status status;
676 #endif
677 	int unit, error = 0;
678 
679 	mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
680 
681 	sassc = malloc(sizeof(struct mpssas_softc), M_MPT2, M_WAITOK|M_ZERO);
682 	if(!sassc) {
683 		device_printf(sc->mps_dev, "Cannot allocate memory %s %d\n",
684 		__func__, __LINE__);
685 		return (ENOMEM);
686 	}
687 	sassc->targets = malloc(sizeof(struct mpssas_target) *
688 	    sc->facts->MaxTargets, M_MPT2, M_WAITOK|M_ZERO);
689 	if(!sassc->targets) {
690 		device_printf(sc->mps_dev, "Cannot allocate memory %s %d\n",
691 		__func__, __LINE__);
692 		free(sassc, M_MPT2);
693 		return (ENOMEM);
694 	}
695 	sc->sassc = sassc;
696 	sassc->sc = sc;
697 
698 	if ((sassc->devq = cam_simq_alloc(sc->num_reqs)) == NULL) {
699 		mps_dprint(sc, MPS_FAULT, "Cannot allocate SIMQ\n");
700 		error = ENOMEM;
701 		goto out;
702 	}
703 
704 	unit = device_get_unit(sc->mps_dev);
705 	sassc->sim = cam_sim_alloc(mpssas_action, mpssas_poll, "mps", sassc,
706 	    unit, &sc->mps_mtx, sc->num_reqs, sc->num_reqs, sassc->devq);
707 	if (sassc->sim == NULL) {
708 		mps_dprint(sc, MPS_FAULT, "Cannot allocate SIM\n");
709 		error = EINVAL;
710 		goto out;
711 	}
712 
713 	TAILQ_INIT(&sassc->ev_queue);
714 
715 	/* Initialize taskqueue for Event Handling */
716 	TASK_INIT(&sassc->ev_task, 0, mpssas_firmware_event_work, sc);
717 	sassc->ev_tq = taskqueue_create("mps_taskq", M_NOWAIT | M_ZERO,
718 	    taskqueue_thread_enqueue, &sassc->ev_tq);
719 
720 	/* Run the task queue with lowest priority */
721 	taskqueue_start_threads(&sassc->ev_tq, 1, 255, "%s taskq",
722 	    device_get_nameunit(sc->mps_dev));
723 
724 	TAILQ_INIT(&sassc->ccb_scanq);
725 	error = mps_kproc_create(mpssas_scanner_thread, sassc,
726 	    &sassc->rescan_thread, 0, 0, "mps_scan%d", unit);
727 	if (error) {
728 		mps_printf(sc, "Error %d starting rescan thread\n", error);
729 		goto out;
730 	}
731 
732 	mps_lock(sc);
733 	sassc->flags |= MPSSAS_SCANTHREAD;
734 
735 	/*
736 	 * XXX There should be a bus for every port on the adapter, but since
737 	 * we're just going to fake the topology for now, we'll pretend that
738 	 * everything is just a target on a single bus.
739 	 */
740 	if ((error = xpt_bus_register(sassc->sim, sc->mps_dev, 0)) != 0) {
741 		mps_dprint(sc, MPS_FAULT, "Error %d registering SCSI bus\n",
742 		    error);
743 		mps_unlock(sc);
744 		goto out;
745 	}
746 
747 	/*
748 	 * Assume that discovery events will start right away.  Freezing
749 	 * the simq will prevent the CAM boottime scanner from running
750 	 * before discovery is complete.
751 	 */
752 	sassc->flags |= MPSSAS_IN_STARTUP | MPSSAS_IN_DISCOVERY;
753 	xpt_freeze_simq(sassc->sim, 1);
754 	sc->sassc->startup_refcount = 0;
755 
756 	callout_init(&sassc->discovery_callout, 1 /*mpsafe*/);
757 	sassc->discovery_timeouts = 0;
758 
759 	sassc->tm_count = 0;
760 
761 #if __FreeBSD_version >= 1000006
762 	status = xpt_register_async(AC_ADVINFO_CHANGED, mpssas_async, sc, NULL);
763 	if (status != CAM_REQ_CMP) {
764 		mps_printf(sc, "Error %#x registering async handler for "
765 			   "AC_ADVINFO_CHANGED events\n", status);
766 	}
767 #endif
768 
769 	mps_unlock(sc);
770 
771 	mpssas_register_events(sc);
772 out:
773 	if (error)
774 		mps_detach_sas(sc);
775 	return (error);
776 }
777 
778 int
779 mps_detach_sas(struct mps_softc *sc)
780 {
781 	struct mpssas_softc *sassc;
782 	struct mpssas_lun *lun, *lun_tmp;
783 	struct mpssas_target *targ;
784 	int i;
785 
786 	mps_dprint(sc, MPS_INFO, "%s\n", __func__);
787 
788 	if (sc->sassc == NULL)
789 		return (0);
790 
791 	sassc = sc->sassc;
792 	mps_deregister_events(sc, sassc->mpssas_eh);
793 
794 	/*
795 	 * Drain and free the event handling taskqueue with the lock
796 	 * unheld so that any parallel processing tasks drain properly
797 	 * without deadlocking.
798 	 */
799 	if (sassc->ev_tq != NULL)
800 		taskqueue_free(sassc->ev_tq);
801 
802 	/* Make sure CAM doesn't wedge if we had to bail out early. */
803 	mps_lock(sc);
804 
805 	/* Deregister our async handler */
806 #if __FreeBSD_version >= 1000006
807 	xpt_register_async(0, mpssas_async, sc, NULL);
808 #endif
809 
810 	if (sassc->flags & MPSSAS_IN_STARTUP)
811 		xpt_release_simq(sassc->sim, 1);
812 
813 	if (sassc->sim != NULL) {
814 		xpt_bus_deregister(cam_sim_path(sassc->sim));
815 		cam_sim_free(sassc->sim, FALSE);
816 	}
817 
818 	if (sassc->flags & MPSSAS_SCANTHREAD) {
819 		sassc->flags |= MPSSAS_SHUTDOWN;
820 		wakeup(&sassc->ccb_scanq);
821 
822 		if (sassc->flags & MPSSAS_SCANTHREAD) {
823 			msleep(&sassc->flags, &sc->mps_mtx, PRIBIO,
824 			       "mps_shutdown", 30 * hz);
825 		}
826 	}
827 	mps_unlock(sc);
828 
829 	mps_dprint(sc, MPS_INFO, "%s:%d\n", __func__,__LINE__);
830 	if (sassc->devq != NULL)
831 		cam_simq_free(sassc->devq);
832 
833 	for(i=0; i< sc->facts->MaxTargets ;i++) {
834 		targ = &sassc->targets[i];
835 		SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
836 			free(lun, M_MPT2);
837 		}
838 	}
839 	free(sassc->targets, M_MPT2);
840 	free(sassc, M_MPT2);
841 	sc->sassc = NULL;
842 
843 	return (0);
844 }
845 
846 void
847 mpssas_discovery_end(struct mpssas_softc *sassc)
848 {
849 	struct mps_softc *sc = sassc->sc;
850 
851 	mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
852 
853 	if (sassc->flags & MPSSAS_DISCOVERY_TIMEOUT_PENDING)
854 		callout_stop(&sassc->discovery_callout);
855 
856 }
857 
858 static void
859 mpssas_discovery_timeout(void *data)
860 {
861 	struct mpssas_softc *sassc = data;
862 	struct mps_softc *sc;
863 
864 	sc = sassc->sc;
865 	mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
866 
867 	mps_lock(sc);
868 	mps_printf(sc,
869 	    "Timeout waiting for discovery, interrupts may not be working!\n");
870 	sassc->flags &= ~MPSSAS_DISCOVERY_TIMEOUT_PENDING;
871 
872 	/* Poll the hardware for events in case interrupts aren't working */
873 	mps_intr_locked(sc);
874 
875 	mps_printf(sassc->sc,
876 	    "Finished polling after discovery timeout at %d\n", ticks);
877 
878 	if ((sassc->flags & MPSSAS_IN_DISCOVERY) == 0) {
879 		mpssas_discovery_end(sassc);
880 	} else {
881 		if (sassc->discovery_timeouts < MPSSAS_MAX_DISCOVERY_TIMEOUTS) {
882 			sassc->flags |= MPSSAS_DISCOVERY_TIMEOUT_PENDING;
883 			callout_reset(&sassc->discovery_callout,
884 			    MPSSAS_DISCOVERY_TIMEOUT * hz,
885 			    mpssas_discovery_timeout, sassc);
886 			sassc->discovery_timeouts++;
887 		} else {
888 			mps_dprint(sassc->sc, MPS_FAULT,
889 			    "Discovery timed out, continuing.\n");
890 			sassc->flags &= ~MPSSAS_IN_DISCOVERY;
891 			mpssas_discovery_end(sassc);
892 		}
893 	}
894 
895 	mps_unlock(sc);
896 }
897 
898 static void
899 mpssas_action(struct cam_sim *sim, union ccb *ccb)
900 {
901 	struct mpssas_softc *sassc;
902 
903 	sassc = cam_sim_softc(sim);
904 
905 	mps_dprint(sassc->sc, MPS_TRACE, "%s func 0x%x\n", __func__,
906 	    ccb->ccb_h.func_code);
907 	mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
908 
909 	switch (ccb->ccb_h.func_code) {
910 	case XPT_PATH_INQ:
911 	{
912 		struct ccb_pathinq *cpi = &ccb->cpi;
913 
914 		cpi->version_num = 1;
915 		cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
916 		cpi->target_sprt = 0;
917 		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
918 		cpi->hba_eng_cnt = 0;
919 		cpi->max_target = sassc->sc->facts->MaxTargets - 1;
920 		cpi->max_lun = 255;
921 		cpi->initiator_id = sassc->sc->facts->MaxTargets - 1;
922 		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
923 		strncpy(cpi->hba_vid, "LSILogic", HBA_IDLEN);
924 		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
925 		cpi->unit_number = cam_sim_unit(sim);
926 		cpi->bus_id = cam_sim_bus(sim);
927 		cpi->base_transfer_speed = 150000;
928 		cpi->transport = XPORT_SAS;
929 		cpi->transport_version = 0;
930 		cpi->protocol = PROTO_SCSI;
931 		cpi->protocol_version = SCSI_REV_SPC;
932 #if __FreeBSD_version >= 800001
933 		/*
934 		 * XXX KDM where does this number come from?
935 		 */
936 		cpi->maxio = 256 * 1024;
937 #endif
938 		cpi->ccb_h.status = CAM_REQ_CMP;
939 		break;
940 	}
941 	case XPT_GET_TRAN_SETTINGS:
942 	{
943 		struct ccb_trans_settings	*cts;
944 		struct ccb_trans_settings_sas	*sas;
945 		struct ccb_trans_settings_scsi	*scsi;
946 		struct mpssas_target *targ;
947 
948 		cts = &ccb->cts;
949 		sas = &cts->xport_specific.sas;
950 		scsi = &cts->proto_specific.scsi;
951 
952 		targ = &sassc->targets[cts->ccb_h.target_id];
953 		if (targ->handle == 0x0) {
954 			cts->ccb_h.status = CAM_SEL_TIMEOUT;
955 			break;
956 		}
957 
958 		cts->protocol_version = SCSI_REV_SPC2;
959 		cts->transport = XPORT_SAS;
960 		cts->transport_version = 0;
961 
962 		sas->valid = CTS_SAS_VALID_SPEED;
963 		switch (targ->linkrate) {
964 		case 0x08:
965 			sas->bitrate = 150000;
966 			break;
967 		case 0x09:
968 			sas->bitrate = 300000;
969 			break;
970 		case 0x0a:
971 			sas->bitrate = 600000;
972 			break;
973 		default:
974 			sas->valid = 0;
975 		}
976 
977 		cts->protocol = PROTO_SCSI;
978 		scsi->valid = CTS_SCSI_VALID_TQ;
979 		scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
980 
981 		cts->ccb_h.status = CAM_REQ_CMP;
982 		break;
983 	}
984 	case XPT_CALC_GEOMETRY:
985 		cam_calc_geometry(&ccb->ccg, /*extended*/1);
986 		ccb->ccb_h.status = CAM_REQ_CMP;
987 		break;
988 	case XPT_RESET_DEV:
989 		mps_printf(sassc->sc, "mpssas_action XPT_RESET_DEV\n");
990 		mpssas_action_resetdev(sassc, ccb);
991 		return;
992 	case XPT_RESET_BUS:
993 	case XPT_ABORT:
994 	case XPT_TERM_IO:
995 		mps_printf(sassc->sc, "mpssas_action faking success for "
996 			   "abort or reset\n");
997 		ccb->ccb_h.status = CAM_REQ_CMP;
998 		break;
999 	case XPT_SCSI_IO:
1000 		mpssas_action_scsiio(sassc, ccb);
1001 		return;
1002 #if __FreeBSD_version >= 900026
1003 	case XPT_SMP_IO:
1004 		mpssas_action_smpio(sassc, ccb);
1005 		return;
1006 #endif
1007 	default:
1008 		ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
1009 		break;
1010 	}
1011 	xpt_done(ccb);
1012 
1013 }
1014 
1015 static void
1016 mpssas_announce_reset(struct mps_softc *sc, uint32_t ac_code,
1017     target_id_t target_id, lun_id_t lun_id)
1018 {
1019 	path_id_t path_id = cam_sim_path(sc->sassc->sim);
1020 	struct cam_path *path;
1021 
1022 	mps_printf(sc, "%s code %x target %d lun %d\n", __func__,
1023 	    ac_code, target_id, lun_id);
1024 
1025 	if (xpt_create_path(&path, NULL,
1026 		path_id, target_id, lun_id) != CAM_REQ_CMP) {
1027 		mps_printf(sc, "unable to create path for reset "
1028 			   "notification\n");
1029 		return;
1030 	}
1031 
1032 	xpt_async(ac_code, path, NULL);
1033 	xpt_free_path(path);
1034 }
1035 
1036 static void
1037 mpssas_complete_all_commands(struct mps_softc *sc)
1038 {
1039 	struct mps_command *cm;
1040 	int i;
1041 	int completed;
1042 
1043 	mps_printf(sc, "%s\n", __func__);
1044 	mtx_assert(&sc->mps_mtx, MA_OWNED);
1045 
1046 	/* complete all commands with a NULL reply */
1047 	for (i = 1; i < sc->num_reqs; i++) {
1048 		cm = &sc->commands[i];
1049 		cm->cm_reply = NULL;
1050 		completed = 0;
1051 
1052 		if (cm->cm_flags & MPS_CM_FLAGS_POLLED)
1053 			cm->cm_flags |= MPS_CM_FLAGS_COMPLETE;
1054 
1055 		if (cm->cm_complete != NULL) {
1056 			mpssas_log_command(cm,
1057 			    "completing cm %p state %x ccb %p for diag reset\n",
1058 			    cm, cm->cm_state, cm->cm_ccb);
1059 
1060 			cm->cm_complete(sc, cm);
1061 			completed = 1;
1062 		}
1063 
1064 		if (cm->cm_flags & MPS_CM_FLAGS_WAKEUP) {
1065 			mpssas_log_command(cm,
1066 			    "waking up cm %p state %x ccb %p for diag reset\n",
1067 			    cm, cm->cm_state, cm->cm_ccb);
1068 			wakeup(cm);
1069 			completed = 1;
1070 		}
1071 
1072 		if ((completed == 0) && (cm->cm_state != MPS_CM_STATE_FREE)) {
1073 			/* this should never happen, but if it does, log */
1074 			mpssas_log_command(cm,
1075 			    "cm %p state %x flags 0x%x ccb %p during diag "
1076 			    "reset\n", cm, cm->cm_state, cm->cm_flags,
1077 			    cm->cm_ccb);
1078 		}
1079 	}
1080 }
1081 
1082 void
1083 mpssas_handle_reinit(struct mps_softc *sc)
1084 {
1085 	int i;
1086 
1087 	/* Go back into startup mode and freeze the simq, so that CAM
1088 	 * doesn't send any commands until after we've rediscovered all
1089 	 * targets and found the proper device handles for them.
1090 	 *
1091 	 * After the reset, portenable will trigger discovery, and after all
1092 	 * discovery-related activities have finished, the simq will be
1093 	 * released.
1094 	 */
1095 	mps_printf(sc, "%s startup\n", __func__);
1096 	sc->sassc->flags |= MPSSAS_IN_STARTUP;
1097 	sc->sassc->flags |= MPSSAS_IN_DISCOVERY;
1098 	xpt_freeze_simq(sc->sassc->sim, 1);
1099 
1100 	/* notify CAM of a bus reset */
1101 	mpssas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1102 	    CAM_LUN_WILDCARD);
1103 
1104 	/* complete and cleanup after all outstanding commands */
1105 	mpssas_complete_all_commands(sc);
1106 
1107 	mps_printf(sc, "%s startup %u tm %u after command completion\n",
1108 	    __func__, sc->sassc->startup_refcount, sc->sassc->tm_count);
1109 
1110 	/*
1111 	 * The simq was explicitly frozen above, so set the refcount to 0.
1112 	 * The simq will be explicitly released after port enable completes.
1113 	 */
1114 	sc->sassc->startup_refcount = 0;
1115 
1116 	/* zero all the target handles, since they may change after the
1117 	 * reset, and we have to rediscover all the targets and use the new
1118 	 * handles.
1119 	 */
1120 	for (i = 0; i < sc->facts->MaxTargets; i++) {
1121 		if (sc->sassc->targets[i].outstanding != 0)
1122 			mps_printf(sc, "target %u outstanding %u\n",
1123 			    i, sc->sassc->targets[i].outstanding);
1124 		sc->sassc->targets[i].handle = 0x0;
1125 		sc->sassc->targets[i].exp_dev_handle = 0x0;
1126 		sc->sassc->targets[i].outstanding = 0;
1127 		sc->sassc->targets[i].flags = MPSSAS_TARGET_INDIAGRESET;
1128 	}
1129 }
1130 static void
1131 mpssas_tm_timeout(void *data)
1132 {
1133 	struct mps_command *tm = data;
1134 	struct mps_softc *sc = tm->cm_sc;
1135 
1136 	mtx_assert(&sc->mps_mtx, MA_OWNED);
1137 
1138 	mpssas_log_command(tm, "task mgmt %p timed out\n", tm);
1139 	mps_reinit(sc);
1140 }
1141 
1142 static void
1143 mpssas_logical_unit_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1144 {
1145 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1146 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1147 	unsigned int cm_count = 0;
1148 	struct mps_command *cm;
1149 	struct mpssas_target *targ;
1150 
1151 	callout_stop(&tm->cm_callout);
1152 
1153 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1154 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1155 	targ = tm->cm_targ;
1156 
1157 	/*
1158 	 * Currently there should be no way we can hit this case.  It only
1159 	 * happens when we have a failure to allocate chain frames, and
1160 	 * task management commands don't have S/G lists.
1161 	 */
1162 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1163 		mps_printf(sc, "%s: cm_flags = %#x for LUN reset! "
1164 			   "This should not happen!\n", __func__, tm->cm_flags);
1165 		mpssas_free_tm(sc, tm);
1166 		return;
1167 	}
1168 
1169 	if (reply == NULL) {
1170 		mpssas_log_command(tm, "NULL reset reply for tm %p\n", tm);
1171 		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1172 			/* this completion was due to a reset, just cleanup */
1173 			targ->flags &= ~MPSSAS_TARGET_INRESET;
1174 			targ->tm = NULL;
1175 			mpssas_free_tm(sc, tm);
1176 		}
1177 		else {
1178 			/* we should have gotten a reply. */
1179 			mps_reinit(sc);
1180 		}
1181 		return;
1182 	}
1183 
1184 	mpssas_log_command(tm,
1185 	    "logical unit reset status 0x%x code 0x%x count %u\n",
1186 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1187 	    le32toh(reply->TerminationCount));
1188 
1189 	/* See if there are any outstanding commands for this LUN.
1190 	 * This could be made more efficient by using a per-LU data
1191 	 * structure of some sort.
1192 	 */
1193 	TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1194 		if (cm->cm_lun == tm->cm_lun)
1195 			cm_count++;
1196 	}
1197 
1198 	if (cm_count == 0) {
1199 		mpssas_log_command(tm,
1200 		    "logical unit %u finished recovery after reset\n",
1201 		    tm->cm_lun, tm);
1202 
1203 		mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1204 		    tm->cm_lun);
1205 
1206 		/* we've finished recovery for this logical unit.  check and
1207 		 * see if some other logical unit has a timedout command
1208 		 * that needs to be processed.
1209 		 */
1210 		cm = TAILQ_FIRST(&targ->timedout_commands);
1211 		if (cm) {
1212 			mpssas_send_abort(sc, tm, cm);
1213 		}
1214 		else {
1215 			targ->tm = NULL;
1216 			mpssas_free_tm(sc, tm);
1217 		}
1218 	}
1219 	else {
1220 		/* if we still have commands for this LUN, the reset
1221 		 * effectively failed, regardless of the status reported.
1222 		 * Escalate to a target reset.
1223 		 */
1224 		mpssas_log_command(tm,
1225 		    "logical unit reset complete for tm %p, but still have %u command(s)\n",
1226 		    tm, cm_count);
1227 		mpssas_send_reset(sc, tm,
1228 		    MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1229 	}
1230 }
1231 
1232 static void
1233 mpssas_target_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1234 {
1235 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1236 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1237 	struct mpssas_target *targ;
1238 
1239 	callout_stop(&tm->cm_callout);
1240 
1241 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1242 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1243 	targ = tm->cm_targ;
1244 
1245 	/*
1246 	 * Currently there should be no way we can hit this case.  It only
1247 	 * happens when we have a failure to allocate chain frames, and
1248 	 * task management commands don't have S/G lists.
1249 	 */
1250 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1251 		mps_printf(sc, "%s: cm_flags = %#x for target reset! "
1252 			   "This should not happen!\n", __func__, tm->cm_flags);
1253 		mpssas_free_tm(sc, tm);
1254 		return;
1255 	}
1256 
1257 	if (reply == NULL) {
1258 		mpssas_log_command(tm, "NULL reset reply for tm %p\n", tm);
1259 		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1260 			/* this completion was due to a reset, just cleanup */
1261 			targ->flags &= ~MPSSAS_TARGET_INRESET;
1262 			targ->tm = NULL;
1263 			mpssas_free_tm(sc, tm);
1264 		}
1265 		else {
1266 			/* we should have gotten a reply. */
1267 			mps_reinit(sc);
1268 		}
1269 		return;
1270 	}
1271 
1272 	mpssas_log_command(tm,
1273 	    "target reset status 0x%x code 0x%x count %u\n",
1274 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1275 	    le32toh(reply->TerminationCount));
1276 
1277 	targ->flags &= ~MPSSAS_TARGET_INRESET;
1278 
1279 	if (targ->outstanding == 0) {
1280 		/* we've finished recovery for this target and all
1281 		 * of its logical units.
1282 		 */
1283 		mpssas_log_command(tm,
1284 		    "recovery finished after target reset\n");
1285 
1286 		mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1287 		    CAM_LUN_WILDCARD);
1288 
1289 		targ->tm = NULL;
1290 		mpssas_free_tm(sc, tm);
1291 	}
1292 	else {
1293 		/* after a target reset, if this target still has
1294 		 * outstanding commands, the reset effectively failed,
1295 		 * regardless of the status reported.  escalate.
1296 		 */
1297 		mpssas_log_command(tm,
1298 		    "target reset complete for tm %p, but still have %u command(s)\n",
1299 		    tm, targ->outstanding);
1300 		mps_reinit(sc);
1301 	}
1302 }
1303 
1304 #define MPS_RESET_TIMEOUT 30
1305 
1306 static int
1307 mpssas_send_reset(struct mps_softc *sc, struct mps_command *tm, uint8_t type)
1308 {
1309 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1310 	struct mpssas_target *target;
1311 	int err;
1312 
1313 	target = tm->cm_targ;
1314 	if (target->handle == 0) {
1315 		mps_printf(sc, "%s null devhandle for target_id %d\n",
1316 		    __func__, target->tid);
1317 		return -1;
1318 	}
1319 
1320 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1321 	req->DevHandle = htole16(target->handle);
1322 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1323 	req->TaskType = type;
1324 
1325 	if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1326 		/* XXX Need to handle invalid LUNs */
1327 		MPS_SET_LUN(req->LUN, tm->cm_lun);
1328 		tm->cm_targ->logical_unit_resets++;
1329 		mpssas_log_command(tm, "sending logical unit reset\n");
1330 		tm->cm_complete = mpssas_logical_unit_reset_complete;
1331 	}
1332 	else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1333 		/* Target reset method =  SAS Hard Link Reset / SATA Link Reset */
1334 		req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1335 		tm->cm_targ->target_resets++;
1336 		tm->cm_targ->flags |= MPSSAS_TARGET_INRESET;
1337 		mpssas_log_command(tm, "sending target reset\n");
1338 		tm->cm_complete = mpssas_target_reset_complete;
1339 	}
1340 	else {
1341 		mps_printf(sc, "unexpected reset type 0x%x\n", type);
1342 		return -1;
1343 	}
1344 
1345 	tm->cm_data = NULL;
1346 	tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1347 	tm->cm_complete_data = (void *)tm;
1348 
1349 	callout_reset(&tm->cm_callout, MPS_RESET_TIMEOUT * hz,
1350 	    mpssas_tm_timeout, tm);
1351 
1352 	err = mps_map_command(sc, tm);
1353 	if (err)
1354 		mpssas_log_command(tm,
1355 		    "error %d sending reset type %u\n",
1356 		    err, type);
1357 
1358 	return err;
1359 }
1360 
1361 
1362 static void
1363 mpssas_abort_complete(struct mps_softc *sc, struct mps_command *tm)
1364 {
1365 	struct mps_command *cm;
1366 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1367 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1368 	struct mpssas_target *targ;
1369 
1370 	callout_stop(&tm->cm_callout);
1371 
1372 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1373 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1374 	targ = tm->cm_targ;
1375 
1376 	/*
1377 	 * Currently there should be no way we can hit this case.  It only
1378 	 * happens when we have a failure to allocate chain frames, and
1379 	 * task management commands don't have S/G lists.
1380 	 */
1381 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1382 		mpssas_log_command(tm,
1383 		    "cm_flags = %#x for abort %p TaskMID %u!\n",
1384 		    tm->cm_flags, tm, le16toh(req->TaskMID));
1385 		mpssas_free_tm(sc, tm);
1386 		return;
1387 	}
1388 
1389 	if (reply == NULL) {
1390 		mpssas_log_command(tm,
1391 		    "NULL abort reply for tm %p TaskMID %u\n",
1392 		    tm, le16toh(req->TaskMID));
1393 		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1394 			/* this completion was due to a reset, just cleanup */
1395 			targ->tm = NULL;
1396 			mpssas_free_tm(sc, tm);
1397 		}
1398 		else {
1399 			/* we should have gotten a reply. */
1400 			mps_reinit(sc);
1401 		}
1402 		return;
1403 	}
1404 
1405 	mpssas_log_command(tm,
1406 	    "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1407 	    le16toh(req->TaskMID),
1408 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1409 	    le32toh(reply->TerminationCount));
1410 
1411 	cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1412 	if (cm == NULL) {
1413 		/* if there are no more timedout commands, we're done with
1414 		 * error recovery for this target.
1415 		 */
1416 		mpssas_log_command(tm,
1417 		    "finished recovery after aborting TaskMID %u\n",
1418 		    le16toh(req->TaskMID));
1419 
1420 		targ->tm = NULL;
1421 		mpssas_free_tm(sc, tm);
1422 	}
1423 	else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) {
1424 		/* abort success, but we have more timedout commands to abort */
1425 		mpssas_log_command(tm,
1426 		    "continuing recovery after aborting TaskMID %u\n",
1427 		    le16toh(req->TaskMID));
1428 
1429 		mpssas_send_abort(sc, tm, cm);
1430 	}
1431 	else {
1432 		/* we didn't get a command completion, so the abort
1433 		 * failed as far as we're concerned.  escalate.
1434 		 */
1435 		mpssas_log_command(tm,
1436 		    "abort failed for TaskMID %u tm %p\n",
1437 		    le16toh(req->TaskMID), tm);
1438 
1439 		mpssas_send_reset(sc, tm,
1440 		    MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1441 	}
1442 }
1443 
1444 #define MPS_ABORT_TIMEOUT 5
1445 
1446 static int
1447 mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm)
1448 {
1449 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1450 	struct mpssas_target *targ;
1451 	int err;
1452 
1453 	targ = cm->cm_targ;
1454 	if (targ->handle == 0) {
1455 		mps_printf(sc, "%s null devhandle for target_id %d\n",
1456 		    __func__, cm->cm_ccb->ccb_h.target_id);
1457 		return -1;
1458 	}
1459 
1460 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1461 	req->DevHandle = htole16(targ->handle);
1462 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1463 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1464 
1465 	/* XXX Need to handle invalid LUNs */
1466 	MPS_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1467 
1468 	req->TaskMID = htole16(cm->cm_desc.Default.SMID);
1469 
1470 	tm->cm_data = NULL;
1471 	tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1472 	tm->cm_complete = mpssas_abort_complete;
1473 	tm->cm_complete_data = (void *)tm;
1474 	tm->cm_targ = cm->cm_targ;
1475 	tm->cm_lun = cm->cm_lun;
1476 
1477 	callout_reset(&tm->cm_callout, MPS_ABORT_TIMEOUT * hz,
1478 	    mpssas_tm_timeout, tm);
1479 
1480 	targ->aborts++;
1481 
1482 	err = mps_map_command(sc, tm);
1483 	if (err)
1484 		mpssas_log_command(tm,
1485 		    "error %d sending abort for cm %p SMID %u\n",
1486 		    err, cm, req->TaskMID);
1487 	return err;
1488 }
1489 
1490 
1491 static void
1492 mpssas_scsiio_timeout(void *data)
1493 {
1494 	struct mps_softc *sc;
1495 	struct mps_command *cm;
1496 	struct mpssas_target *targ;
1497 
1498 	cm = (struct mps_command *)data;
1499 	sc = cm->cm_sc;
1500 
1501 	mtx_assert(&sc->mps_mtx, MA_OWNED);
1502 
1503 	mps_printf(sc, "%s checking sc %p cm %p\n", __func__, sc, cm);
1504 
1505 	/*
1506 	 * Run the interrupt handler to make sure it's not pending.  This
1507 	 * isn't perfect because the command could have already completed
1508 	 * and been re-used, though this is unlikely.
1509 	 */
1510 	mps_intr_locked(sc);
1511 	if (cm->cm_state == MPS_CM_STATE_FREE) {
1512 		mps_printf(sc, "SCSI command %p sc %p almost timed out\n", cm, sc);
1513 		return;
1514 	}
1515 
1516 	if (cm->cm_ccb == NULL) {
1517 		mps_printf(sc, "command timeout with NULL ccb\n");
1518 		return;
1519 	}
1520 
1521 	mpssas_log_command(cm, "command timeout cm %p ccb %p\n",
1522 	    cm, cm->cm_ccb);
1523 
1524 	targ = cm->cm_targ;
1525 	targ->timeouts++;
1526 
1527 	/* XXX first, check the firmware state, to see if it's still
1528 	 * operational.  if not, do a diag reset.
1529 	 */
1530 
1531 	cm->cm_ccb->ccb_h.status = CAM_CMD_TIMEOUT;
1532 	cm->cm_state = MPS_CM_STATE_TIMEDOUT;
1533 	TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1534 
1535 	if (targ->tm != NULL) {
1536 		/* target already in recovery, just queue up another
1537 		 * timedout command to be processed later.
1538 		 */
1539 		mps_printf(sc, "queued timedout cm %p for processing by tm %p\n",
1540 		    cm, targ->tm);
1541 	}
1542 	else if ((targ->tm = mpssas_alloc_tm(sc)) != NULL) {
1543 		mps_printf(sc, "timedout cm %p allocated tm %p\n",
1544 		    cm, targ->tm);
1545 
1546 		/* start recovery by aborting the first timedout command */
1547 		mpssas_send_abort(sc, targ->tm, cm);
1548 	}
1549 	else {
1550 		/* XXX queue this target up for recovery once a TM becomes
1551 		 * available.  The firmware only has a limited number of
1552 		 * HighPriority credits for the high priority requests used
1553 		 * for task management, and we ran out.
1554 		 *
1555 		 * Isilon: don't worry about this for now, since we have
1556 		 * more credits than disks in an enclosure, and limit
1557 		 * ourselves to one TM per target for recovery.
1558 		 */
1559 		mps_printf(sc, "timedout cm %p failed to allocate a tm\n",
1560 		    cm);
1561 	}
1562 
1563 }
1564 
1565 static void
1566 mpssas_action_scsiio(struct mpssas_softc *sassc, union ccb *ccb)
1567 {
1568 	MPI2_SCSI_IO_REQUEST *req;
1569 	struct ccb_scsiio *csio;
1570 	struct mps_softc *sc;
1571 	struct mpssas_target *targ;
1572 	struct mpssas_lun *lun;
1573 	struct mps_command *cm;
1574 	uint8_t i, lba_byte, *ref_tag_addr;
1575 	uint16_t eedp_flags;
1576 	uint32_t mpi_control;
1577 
1578 	sc = sassc->sc;
1579 	mtx_assert(&sc->mps_mtx, MA_OWNED);
1580 
1581 	csio = &ccb->csio;
1582 	targ = &sassc->targets[csio->ccb_h.target_id];
1583 	mps_dprint(sc, MPS_TRACE, "%s ccb %p target flag %x\n", __func__, ccb, targ->flags);
1584 	if (targ->handle == 0x0) {
1585 		mps_dprint(sc, MPS_TRACE, "%s NULL handle for target %u\n",
1586 		    __func__, csio->ccb_h.target_id);
1587 		csio->ccb_h.status = CAM_SEL_TIMEOUT;
1588 		xpt_done(ccb);
1589 		return;
1590 	}
1591 	if (targ->flags & MPS_TARGET_FLAGS_RAID_COMPONENT) {
1592 		mps_dprint(sc, MPS_TRACE, "%s Raid component no SCSI IO supported %u\n",
1593 		    __func__, csio->ccb_h.target_id);
1594 		csio->ccb_h.status = CAM_TID_INVALID;
1595 		xpt_done(ccb);
1596 		return;
1597 	}
1598 	/*
1599 	 * If devinfo is 0 this will be a volume.  In that case don't tell CAM
1600 	 * that the volume has timed out.  We want volumes to be enumerated
1601 	 * until they are deleted/removed, not just failed.
1602 	 */
1603 	if (targ->flags & MPSSAS_TARGET_INREMOVAL) {
1604 		if (targ->devinfo == 0)
1605 			csio->ccb_h.status = CAM_REQ_CMP;
1606 		else
1607 			csio->ccb_h.status = CAM_SEL_TIMEOUT;
1608 		xpt_done(ccb);
1609 		return;
1610 	}
1611 
1612 	if ((sc->mps_flags & MPS_FLAGS_SHUTDOWN) != 0) {
1613 		mps_dprint(sc, MPS_TRACE, "%s shutting down\n", __func__);
1614 		csio->ccb_h.status = CAM_TID_INVALID;
1615 		xpt_done(ccb);
1616 		return;
1617 	}
1618 
1619 	cm = mps_alloc_command(sc);
1620 	if (cm == NULL) {
1621 		if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
1622 			xpt_freeze_simq(sassc->sim, 1);
1623 			sassc->flags |= MPSSAS_QUEUE_FROZEN;
1624 		}
1625 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1626 		ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1627 		xpt_done(ccb);
1628 		return;
1629 	}
1630 
1631 	req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
1632 	bzero(req, sizeof(*req));
1633 	req->DevHandle = htole16(targ->handle);
1634 	req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1635 	req->MsgFlags = 0;
1636 	req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr);
1637 	req->SenseBufferLength = MPS_SENSE_LEN;
1638 	req->SGLFlags = 0;
1639 	req->ChainOffset = 0;
1640 	req->SGLOffset0 = 24;	/* 32bit word offset to the SGL */
1641 	req->SGLOffset1= 0;
1642 	req->SGLOffset2= 0;
1643 	req->SGLOffset3= 0;
1644 	req->SkipCount = 0;
1645 	req->DataLength = htole32(csio->dxfer_len);
1646 	req->BidirectionalDataLength = 0;
1647 	req->IoFlags = htole16(csio->cdb_len);
1648 	req->EEDPFlags = 0;
1649 
1650 	/* Note: BiDirectional transfers are not supported */
1651 	switch (csio->ccb_h.flags & CAM_DIR_MASK) {
1652 	case CAM_DIR_IN:
1653 		mpi_control = MPI2_SCSIIO_CONTROL_READ;
1654 		cm->cm_flags |= MPS_CM_FLAGS_DATAIN;
1655 		break;
1656 	case CAM_DIR_OUT:
1657 		mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
1658 		cm->cm_flags |= MPS_CM_FLAGS_DATAOUT;
1659 		break;
1660 	case CAM_DIR_NONE:
1661 	default:
1662 		mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
1663 		break;
1664 	}
1665 
1666   if (csio->cdb_len == 32)
1667                 mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
1668 	/*
1669 	 * It looks like the hardware doesn't require an explicit tag
1670 	 * number for each transaction.  SAM Task Management not supported
1671 	 * at the moment.
1672 	 */
1673 	switch (csio->tag_action) {
1674 	case MSG_HEAD_OF_Q_TAG:
1675 		mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
1676 		break;
1677 	case MSG_ORDERED_Q_TAG:
1678 		mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
1679 		break;
1680 	case MSG_ACA_TASK:
1681 		mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ;
1682 		break;
1683 	case CAM_TAG_ACTION_NONE:
1684 	case MSG_SIMPLE_Q_TAG:
1685 	default:
1686 		mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
1687 		break;
1688 	}
1689 	mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
1690 	req->Control = htole32(mpi_control);
1691 	if (MPS_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
1692 		mps_free_command(sc, cm);
1693 		ccb->ccb_h.status = CAM_LUN_INVALID;
1694 		xpt_done(ccb);
1695 		return;
1696 	}
1697 
1698 	if (csio->ccb_h.flags & CAM_CDB_POINTER)
1699 		bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
1700 	else
1701 		bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
1702 	req->IoFlags = htole16(csio->cdb_len);
1703 
1704 	/*
1705 	 * Check if EEDP is supported and enabled.  If it is then check if the
1706 	 * SCSI opcode could be using EEDP.  If so, make sure the LUN exists and
1707 	 * is formatted for EEDP support.  If all of this is true, set CDB up
1708 	 * for EEDP transfer.
1709 	 */
1710 	eedp_flags = op_code_prot[req->CDB.CDB32[0]];
1711 	if (sc->eedp_enabled && eedp_flags) {
1712 		SLIST_FOREACH(lun, &targ->luns, lun_link) {
1713 			if (lun->lun_id == csio->ccb_h.target_lun) {
1714 				break;
1715 			}
1716 		}
1717 
1718 		if ((lun != NULL) && (lun->eedp_formatted)) {
1719 			req->EEDPBlockSize = htole16(lun->eedp_block_size);
1720 			eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
1721 			    MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
1722 			    MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
1723 			req->EEDPFlags = htole16(eedp_flags);
1724 
1725 			/*
1726 			 * If CDB less than 32, fill in Primary Ref Tag with
1727 			 * low 4 bytes of LBA.  If CDB is 32, tag stuff is
1728 			 * already there.  Also, set protection bit.  FreeBSD
1729 			 * currently does not support CDBs bigger than 16, but
1730 			 * the code doesn't hurt, and will be here for the
1731 			 * future.
1732 			 */
1733 			if (csio->cdb_len != 32) {
1734 				lba_byte = (csio->cdb_len == 16) ? 6 : 2;
1735 				ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
1736 				    PrimaryReferenceTag;
1737 				for (i = 0; i < 4; i++) {
1738 					*ref_tag_addr =
1739 					    req->CDB.CDB32[lba_byte + i];
1740 					ref_tag_addr++;
1741 				}
1742 				req->CDB.EEDP32.PrimaryReferenceTag =
1743 					htole32(req->CDB.EEDP32.PrimaryReferenceTag);
1744 				req->CDB.EEDP32.PrimaryApplicationTagMask =
1745 				    0xFFFF;
1746 				req->CDB.CDB32[1] = (req->CDB.CDB32[1] & 0x1F) |
1747 				    0x20;
1748 			} else {
1749 				eedp_flags |=
1750 				    MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
1751 				req->EEDPFlags = htole16(eedp_flags);
1752 				req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
1753 				    0x1F) | 0x20;
1754 			}
1755 		}
1756 	}
1757 
1758 	cm->cm_length = csio->dxfer_len;
1759 	if (cm->cm_length != 0) {
1760 		cm->cm_data = ccb;
1761 		cm->cm_flags |= MPS_CM_FLAGS_USE_CCB;
1762 	} else {
1763 		cm->cm_data = NULL;
1764 	}
1765 	cm->cm_sge = &req->SGL;
1766 	cm->cm_sglsize = (32 - 24) * 4;
1767 	cm->cm_desc.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
1768 	cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle);
1769 	cm->cm_complete = mpssas_scsiio_complete;
1770 	cm->cm_complete_data = ccb;
1771 	cm->cm_targ = targ;
1772 	cm->cm_lun = csio->ccb_h.target_lun;
1773 	cm->cm_ccb = ccb;
1774 
1775 	/*
1776 	 * If HBA is a WD and the command is not for a retry, try to build a
1777 	 * direct I/O message. If failed, or the command is for a retry, send
1778 	 * the I/O to the IR volume itself.
1779 	 */
1780 	if (sc->WD_valid_config) {
1781 		if (ccb->ccb_h.status != MPS_WD_RETRY) {
1782 			mpssas_direct_drive_io(sassc, cm, ccb);
1783 		} else {
1784 			ccb->ccb_h.status = CAM_REQ_INPROG;
1785 		}
1786 	}
1787 
1788 	callout_reset(&cm->cm_callout, (ccb->ccb_h.timeout * hz) / 1000,
1789 	   mpssas_scsiio_timeout, cm);
1790 
1791 	targ->issued++;
1792 	targ->outstanding++;
1793 	TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1794 
1795 	if ((sc->mps_debug & MPS_TRACE) != 0)
1796 		mpssas_log_command(cm, "%s cm %p ccb %p outstanding %u\n",
1797 		    __func__, cm, ccb, targ->outstanding);
1798 
1799 	mps_map_command(sc, cm);
1800 	return;
1801 }
1802 
1803 static void
1804 mps_response_code(struct mps_softc *sc, u8 response_code)
1805 {
1806         char *desc;
1807 
1808         switch (response_code) {
1809         case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE:
1810                 desc = "task management request completed";
1811                 break;
1812         case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME:
1813                 desc = "invalid frame";
1814                 break;
1815         case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
1816                 desc = "task management request not supported";
1817                 break;
1818         case MPI2_SCSITASKMGMT_RSP_TM_FAILED:
1819                 desc = "task management request failed";
1820                 break;
1821         case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED:
1822                 desc = "task management request succeeded";
1823                 break;
1824         case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN:
1825                 desc = "invalid lun";
1826                 break;
1827         case 0xA:
1828                 desc = "overlapped tag attempted";
1829                 break;
1830         case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
1831                 desc = "task queued, however not sent to target";
1832                 break;
1833         default:
1834                 desc = "unknown";
1835                 break;
1836         }
1837 		mps_dprint(sc, MPS_INFO, "response_code(0x%01x): %s\n",
1838                 response_code, desc);
1839 }
1840 /**
1841  * mps_sc_failed_io_info - translated non-succesfull SCSI_IO request
1842  */
1843 static void
1844 mps_sc_failed_io_info(struct mps_softc *sc, struct ccb_scsiio *csio,
1845     Mpi2SCSIIOReply_t *mpi_reply)
1846 {
1847 	u32 response_info;
1848 	u8 *response_bytes;
1849 	u16 ioc_status = le16toh(mpi_reply->IOCStatus) &
1850 	    MPI2_IOCSTATUS_MASK;
1851 	u8 scsi_state = mpi_reply->SCSIState;
1852 	u8 scsi_status = mpi_reply->SCSIStatus;
1853 	char *desc_ioc_state = NULL;
1854 	char *desc_scsi_status = NULL;
1855 	char *desc_scsi_state = sc->tmp_string;
1856 	u32 log_info = le32toh(mpi_reply->IOCLogInfo);
1857 
1858 	if (log_info == 0x31170000)
1859 		return;
1860 
1861 	switch (ioc_status) {
1862 	case MPI2_IOCSTATUS_SUCCESS:
1863 		desc_ioc_state = "success";
1864 		break;
1865 	case MPI2_IOCSTATUS_INVALID_FUNCTION:
1866 		desc_ioc_state = "invalid function";
1867 		break;
1868 	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
1869 		desc_ioc_state = "scsi recovered error";
1870 		break;
1871 	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
1872 		desc_ioc_state = "scsi invalid dev handle";
1873 		break;
1874 	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
1875 		desc_ioc_state = "scsi device not there";
1876 		break;
1877 	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
1878 		desc_ioc_state = "scsi data overrun";
1879 		break;
1880 	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
1881 		desc_ioc_state = "scsi data underrun";
1882 		break;
1883 	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
1884 		desc_ioc_state = "scsi io data error";
1885 		break;
1886 	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
1887 		desc_ioc_state = "scsi protocol error";
1888 		break;
1889 	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
1890 		desc_ioc_state = "scsi task terminated";
1891 		break;
1892 	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
1893 		desc_ioc_state = "scsi residual mismatch";
1894 		break;
1895 	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
1896 		desc_ioc_state = "scsi task mgmt failed";
1897 		break;
1898 	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
1899 		desc_ioc_state = "scsi ioc terminated";
1900 		break;
1901 	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
1902 		desc_ioc_state = "scsi ext terminated";
1903 		break;
1904 	case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
1905 		desc_ioc_state = "eedp guard error";
1906 		break;
1907 	case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
1908 		desc_ioc_state = "eedp ref tag error";
1909 		break;
1910 	case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
1911 		desc_ioc_state = "eedp app tag error";
1912 		break;
1913 	default:
1914 		desc_ioc_state = "unknown";
1915 		break;
1916 	}
1917 
1918 	switch (scsi_status) {
1919 	case MPI2_SCSI_STATUS_GOOD:
1920 		desc_scsi_status = "good";
1921 		break;
1922 	case MPI2_SCSI_STATUS_CHECK_CONDITION:
1923 		desc_scsi_status = "check condition";
1924 		break;
1925 	case MPI2_SCSI_STATUS_CONDITION_MET:
1926 		desc_scsi_status = "condition met";
1927 		break;
1928 	case MPI2_SCSI_STATUS_BUSY:
1929 		desc_scsi_status = "busy";
1930 		break;
1931 	case MPI2_SCSI_STATUS_INTERMEDIATE:
1932 		desc_scsi_status = "intermediate";
1933 		break;
1934 	case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET:
1935 		desc_scsi_status = "intermediate condmet";
1936 		break;
1937 	case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
1938 		desc_scsi_status = "reservation conflict";
1939 		break;
1940 	case MPI2_SCSI_STATUS_COMMAND_TERMINATED:
1941 		desc_scsi_status = "command terminated";
1942 		break;
1943 	case MPI2_SCSI_STATUS_TASK_SET_FULL:
1944 		desc_scsi_status = "task set full";
1945 		break;
1946 	case MPI2_SCSI_STATUS_ACA_ACTIVE:
1947 		desc_scsi_status = "aca active";
1948 		break;
1949 	case MPI2_SCSI_STATUS_TASK_ABORTED:
1950 		desc_scsi_status = "task aborted";
1951 		break;
1952 	default:
1953 		desc_scsi_status = "unknown";
1954 		break;
1955 	}
1956 
1957 	desc_scsi_state[0] = '\0';
1958 	if (!scsi_state)
1959 		desc_scsi_state = " ";
1960 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
1961 		strcat(desc_scsi_state, "response info ");
1962 	if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
1963 		strcat(desc_scsi_state, "state terminated ");
1964 	if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS)
1965 		strcat(desc_scsi_state, "no status ");
1966 	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED)
1967 		strcat(desc_scsi_state, "autosense failed ");
1968 	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID)
1969 		strcat(desc_scsi_state, "autosense valid ");
1970 
1971 	mps_dprint(sc, MPS_INFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x), \n",
1972 		le16toh(mpi_reply->DevHandle),
1973 	    desc_ioc_state, ioc_status);
1974 	/* We can add more detail about underflow data here
1975 	 * TO-DO
1976 	 * */
1977 	mps_dprint(sc, MPS_INFO, "\tscsi_status(%s)(0x%02x), "
1978 	    "scsi_state(%s)(0x%02x)\n", desc_scsi_status,
1979 	    scsi_status, desc_scsi_state, scsi_state);
1980 
1981 	if (sc->mps_debug & MPS_INFO &&
1982 		scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
1983 		mps_dprint(sc, MPS_INFO, "-> Sense Buffer Data : Start :\n");
1984 		scsi_sense_print(csio);
1985 		mps_dprint(sc, MPS_INFO, "-> Sense Buffer Data : End :\n");
1986 	}
1987 
1988 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
1989 		response_info = le32toh(mpi_reply->ResponseInfo);
1990 		response_bytes = (u8 *)&response_info;
1991 		mps_response_code(sc,response_bytes[0]);
1992 	}
1993 }
1994 
1995 static void
1996 mpssas_scsiio_complete(struct mps_softc *sc, struct mps_command *cm)
1997 {
1998 	MPI2_SCSI_IO_REPLY *rep;
1999 	union ccb *ccb;
2000 	struct ccb_scsiio *csio;
2001 	struct mpssas_softc *sassc;
2002 	struct scsi_vpd_supported_page_list *vpd_list = NULL;
2003 	u8 *TLR_bits, TLR_on;
2004 	int dir = 0, i;
2005 	u16 alloc_len;
2006 
2007 	mps_dprint(sc, MPS_TRACE,
2008 	    "%s cm %p SMID %u ccb %p reply %p outstanding %u\n",
2009 	    __func__, cm, cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
2010 	    cm->cm_targ->outstanding);
2011 
2012 	callout_stop(&cm->cm_callout);
2013 	mtx_assert(&sc->mps_mtx, MA_OWNED);
2014 
2015 	sassc = sc->sassc;
2016 	ccb = cm->cm_complete_data;
2017 	csio = &ccb->csio;
2018 	rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
2019 	/*
2020 	 * XXX KDM if the chain allocation fails, does it matter if we do
2021 	 * the sync and unload here?  It is simpler to do it in every case,
2022 	 * assuming it doesn't cause problems.
2023 	 */
2024 	if (cm->cm_data != NULL) {
2025 		if (cm->cm_flags & MPS_CM_FLAGS_DATAIN)
2026 			dir = BUS_DMASYNC_POSTREAD;
2027 		else if (cm->cm_flags & MPS_CM_FLAGS_DATAOUT)
2028 			dir = BUS_DMASYNC_POSTWRITE;
2029 		bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
2030 		bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2031 	}
2032 
2033 	cm->cm_targ->completed++;
2034 	cm->cm_targ->outstanding--;
2035 	TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
2036 
2037 	if (cm->cm_state == MPS_CM_STATE_TIMEDOUT) {
2038 		TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
2039 		if (cm->cm_reply != NULL)
2040 			mpssas_log_command(cm,
2041 			    "completed timedout cm %p ccb %p during recovery "
2042 			    "ioc %x scsi %x state %x xfer %u\n",
2043 			    cm, cm->cm_ccb,
2044 			    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2045 			    le32toh(rep->TransferCount));
2046 		else
2047 			mpssas_log_command(cm,
2048 			    "completed timedout cm %p ccb %p during recovery\n",
2049 			    cm, cm->cm_ccb);
2050 	} else if (cm->cm_targ->tm != NULL) {
2051 		if (cm->cm_reply != NULL)
2052 			mpssas_log_command(cm,
2053 			    "completed cm %p ccb %p during recovery "
2054 			    "ioc %x scsi %x state %x xfer %u\n",
2055 			    cm, cm->cm_ccb,
2056 			    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2057 			    le32toh(rep->TransferCount));
2058 		else
2059 			mpssas_log_command(cm,
2060 			    "completed cm %p ccb %p during recovery\n",
2061 			    cm, cm->cm_ccb);
2062 	} else if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
2063 		mpssas_log_command(cm,
2064 		    "reset completed cm %p ccb %p\n",
2065 		    cm, cm->cm_ccb);
2066 	}
2067 
2068 	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2069 		/*
2070 		 * We ran into an error after we tried to map the command,
2071 		 * so we're getting a callback without queueing the command
2072 		 * to the hardware.  So we set the status here, and it will
2073 		 * be retained below.  We'll go through the "fast path",
2074 		 * because there can be no reply when we haven't actually
2075 		 * gone out to the hardware.
2076 		 */
2077 		ccb->ccb_h.status |= CAM_REQUEUE_REQ;
2078 
2079 		/*
2080 		 * Currently the only error included in the mask is
2081 		 * MPS_CM_FLAGS_CHAIN_FAILED, which means we're out of
2082 		 * chain frames.  We need to freeze the queue until we get
2083 		 * a command that completed without this error, which will
2084 		 * hopefully have some chain frames attached that we can
2085 		 * use.  If we wanted to get smarter about it, we would
2086 		 * only unfreeze the queue in this condition when we're
2087 		 * sure that we're getting some chain frames back.  That's
2088 		 * probably unnecessary.
2089 		 */
2090 		if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
2091 			xpt_freeze_simq(sassc->sim, 1);
2092 			sassc->flags |= MPSSAS_QUEUE_FROZEN;
2093 			mps_dprint(sc, MPS_INFO, "Error sending command, "
2094 				   "freezing SIM queue\n");
2095 		}
2096 	}
2097 
2098 	/* Take the fast path to completion */
2099 	if (cm->cm_reply == NULL) {
2100 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
2101 			if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0)
2102 				ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
2103 			else {
2104 				ccb->ccb_h.status = CAM_REQ_CMP;
2105 				ccb->csio.scsi_status = SCSI_STATUS_OK;
2106 			}
2107 			if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2108 				ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2109 				sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2110 				mps_dprint(sc, MPS_INFO,
2111 					   "Unfreezing SIM queue\n");
2112 			}
2113 		}
2114 
2115 		/*
2116 		 * There are two scenarios where the status won't be
2117 		 * CAM_REQ_CMP.  The first is if MPS_CM_FLAGS_ERROR_MASK is
2118 		 * set, the second is in the MPS_FLAGS_DIAGRESET above.
2119 		 */
2120 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2121 			/*
2122 			 * Freeze the dev queue so that commands are
2123 			 * executed in the correct order with after error
2124 			 * recovery.
2125 			 */
2126 			ccb->ccb_h.status |= CAM_DEV_QFRZN;
2127 			xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2128 		}
2129 		mps_free_command(sc, cm);
2130 		xpt_done(ccb);
2131 		return;
2132 	}
2133 
2134 	if (sc->mps_debug & MPS_TRACE)
2135 		mpssas_log_command(cm,
2136 		    "ioc %x scsi %x state %x xfer %u\n",
2137 			le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2138 			le32toh(rep->TransferCount));
2139 
2140 	/*
2141 	 * If this is a Direct Drive I/O, reissue the I/O to the original IR
2142 	 * Volume if an error occurred (normal I/O retry).  Use the original
2143 	 * CCB, but set a flag that this will be a retry so that it's sent to
2144 	 * the original volume.  Free the command but reuse the CCB.
2145 	 */
2146 	if (cm->cm_flags & MPS_CM_FLAGS_DD_IO) {
2147 		mps_free_command(sc, cm);
2148 		ccb->ccb_h.status = MPS_WD_RETRY;
2149 		mpssas_action_scsiio(sassc, ccb);
2150 		return;
2151 	}
2152 
2153 	switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) {
2154 	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2155 		csio->resid = cm->cm_length - le32toh(rep->TransferCount);
2156 		/* FALLTHROUGH */
2157 	case MPI2_IOCSTATUS_SUCCESS:
2158 	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2159 
2160 		if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
2161 		    MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
2162 			mpssas_log_command(cm, "recovered error\n");
2163 
2164 		/* Completion failed at the transport level. */
2165 		if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
2166 		    MPI2_SCSI_STATE_TERMINATED)) {
2167 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2168 			break;
2169 		}
2170 
2171 		/* In a modern packetized environment, an autosense failure
2172 		 * implies that there's not much else that can be done to
2173 		 * recover the command.
2174 		 */
2175 		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
2176 			ccb->ccb_h.status = CAM_AUTOSENSE_FAIL;
2177 			break;
2178 		}
2179 
2180 		/*
2181 		 * CAM doesn't care about SAS Response Info data, but if this is
2182 		 * the state check if TLR should be done.  If not, clear the
2183 		 * TLR_bits for the target.
2184 		 */
2185 		if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
2186 		    ((le32toh(rep->ResponseInfo) & MPI2_SCSI_RI_MASK_REASONCODE) ==
2187 		    MPS_SCSI_RI_INVALID_FRAME)) {
2188 			sc->mapping_table[csio->ccb_h.target_id].TLR_bits =
2189 			    (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2190 		}
2191 
2192 		/*
2193 		 * Intentionally override the normal SCSI status reporting
2194 		 * for these two cases.  These are likely to happen in a
2195 		 * multi-initiator environment, and we want to make sure that
2196 		 * CAM retries these commands rather than fail them.
2197 		 */
2198 		if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
2199 		    (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
2200 			ccb->ccb_h.status = CAM_REQ_ABORTED;
2201 			break;
2202 		}
2203 
2204 		/* Handle normal status and sense */
2205 		csio->scsi_status = rep->SCSIStatus;
2206 		if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
2207 			ccb->ccb_h.status = CAM_REQ_CMP;
2208 		else
2209 			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
2210 
2211 		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2212 			int sense_len, returned_sense_len;
2213 
2214 			returned_sense_len = min(le32toh(rep->SenseCount),
2215 			    sizeof(struct scsi_sense_data));
2216 			if (returned_sense_len < ccb->csio.sense_len)
2217 				ccb->csio.sense_resid = ccb->csio.sense_len -
2218 					returned_sense_len;
2219 			else
2220 				ccb->csio.sense_resid = 0;
2221 
2222 			sense_len = min(returned_sense_len,
2223 			    ccb->csio.sense_len - ccb->csio.sense_resid);
2224 			bzero(&ccb->csio.sense_data,
2225 			      sizeof(ccb->csio.sense_data));
2226 			bcopy(cm->cm_sense, &ccb->csio.sense_data, sense_len);
2227 			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2228 		}
2229 
2230 		/*
2231 		 * Check if this is an INQUIRY command.  If it's a VPD inquiry,
2232 		 * and it's page code 0 (Supported Page List), and there is
2233 		 * inquiry data, and this is for a sequential access device, and
2234 		 * the device is an SSP target, and TLR is supported by the
2235 		 * controller, turn the TLR_bits value ON if page 0x90 is
2236 		 * supported.
2237 		 */
2238 		if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
2239 		    (csio->cdb_io.cdb_bytes[1] & SI_EVPD) &&
2240 		    (csio->cdb_io.cdb_bytes[2] == SVPD_SUPPORTED_PAGE_LIST) &&
2241 		    ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) &&
2242 		    (csio->data_ptr != NULL) && (((uint8_t *)cm->cm_data)[0] ==
2243 		    T_SEQUENTIAL) && (sc->control_TLR) &&
2244 		    (sc->mapping_table[csio->ccb_h.target_id].device_info &
2245 		    MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
2246 			vpd_list = (struct scsi_vpd_supported_page_list *)
2247 			    csio->data_ptr;
2248 			TLR_bits = &sc->mapping_table[csio->ccb_h.target_id].
2249 			    TLR_bits;
2250 			*TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2251 			TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
2252 			alloc_len = ((u16)csio->cdb_io.cdb_bytes[3] << 8) +
2253 			    csio->cdb_io.cdb_bytes[4];
2254 			for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
2255 				if (vpd_list->list[i] == 0x90) {
2256 					*TLR_bits = TLR_on;
2257 					break;
2258 				}
2259 			}
2260 		}
2261 		break;
2262 	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2263 	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2264 		/*
2265 		 * If devinfo is 0 this will be a volume.  In that case don't
2266 		 * tell CAM that the volume is not there.  We want volumes to
2267 		 * be enumerated until they are deleted/removed, not just
2268 		 * failed.
2269 		 */
2270 		if (cm->cm_targ->devinfo == 0)
2271 			ccb->ccb_h.status = CAM_REQ_CMP;
2272 		else
2273 			ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2274 		break;
2275 	case MPI2_IOCSTATUS_INVALID_SGL:
2276 		mps_print_scsiio_cmd(sc, cm);
2277 		ccb->ccb_h.status = CAM_UNREC_HBA_ERROR;
2278 		break;
2279 	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2280 		/*
2281 		 * This is one of the responses that comes back when an I/O
2282 		 * has been aborted.  If it is because of a timeout that we
2283 		 * initiated, just set the status to CAM_CMD_TIMEOUT.
2284 		 * Otherwise set it to CAM_REQ_ABORTED.  The effect on the
2285 		 * command is the same (it gets retried, subject to the
2286 		 * retry counter), the only difference is what gets printed
2287 		 * on the console.
2288 		 */
2289 		if (cm->cm_state == MPS_CM_STATE_TIMEDOUT)
2290 			ccb->ccb_h.status = CAM_CMD_TIMEOUT;
2291 		else
2292 			ccb->ccb_h.status = CAM_REQ_ABORTED;
2293 		break;
2294 	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2295 		/* resid is ignored for this condition */
2296 		csio->resid = 0;
2297 		ccb->ccb_h.status = CAM_DATA_RUN_ERR;
2298 		break;
2299 	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2300 	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2301 		/*
2302 		 * Since these are generally external (i.e. hopefully
2303 		 * transient transport-related) errors, retry these without
2304 		 * decrementing the retry count.
2305 		 */
2306 		ccb->ccb_h.status = CAM_REQUEUE_REQ;
2307 		mpssas_log_command(cm,
2308 		    "terminated ioc %x scsi %x state %x xfer %u\n",
2309 			le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2310 			le32toh(rep->TransferCount));
2311 		break;
2312 	case MPI2_IOCSTATUS_INVALID_FUNCTION:
2313 	case MPI2_IOCSTATUS_INTERNAL_ERROR:
2314 	case MPI2_IOCSTATUS_INVALID_VPID:
2315 	case MPI2_IOCSTATUS_INVALID_FIELD:
2316 	case MPI2_IOCSTATUS_INVALID_STATE:
2317 	case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2318 	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2319 	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2320 	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2321 	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2322 	default:
2323 		mpssas_log_command(cm,
2324 		    "completed ioc %x scsi %x state %x xfer %u\n",
2325 			le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2326 			le32toh(rep->TransferCount));
2327 		csio->resid = cm->cm_length;
2328 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2329 		break;
2330 	}
2331 
2332 	mps_sc_failed_io_info(sc,csio,rep);
2333 
2334 	if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2335 		ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2336 		sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2337 		mps_dprint(sc, MPS_INFO, "Command completed, "
2338 			   "unfreezing SIM queue\n");
2339 	}
2340 
2341 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2342 		ccb->ccb_h.status |= CAM_DEV_QFRZN;
2343 		xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2344 	}
2345 
2346 	mps_free_command(sc, cm);
2347 	xpt_done(ccb);
2348 }
2349 
2350 /* All Request reached here are Endian safe */
2351 static void
2352 mpssas_direct_drive_io(struct mpssas_softc *sassc, struct mps_command *cm,
2353     union ccb *ccb) {
2354 	pMpi2SCSIIORequest_t	pIO_req;
2355 	struct mps_softc	*sc = sassc->sc;
2356 	uint64_t		virtLBA;
2357 	uint32_t		physLBA, stripe_offset, stripe_unit;
2358 	uint32_t		io_size, column;
2359 	uint8_t			*ptrLBA, lba_idx, physLBA_byte, *CDB;
2360 
2361 	/*
2362 	 * If this is a valid SCSI command (Read6, Read10, Read16, Write6,
2363 	 * Write10, or Write16), build a direct I/O message.  Otherwise, the I/O
2364 	 * will be sent to the IR volume itself.  Since Read6 and Write6 are a
2365 	 * bit different than the 10/16 CDBs, handle them separately.
2366 	 */
2367 	pIO_req = (pMpi2SCSIIORequest_t)cm->cm_req;
2368 	CDB = pIO_req->CDB.CDB32;
2369 
2370 	/*
2371 	 * Handle 6 byte CDBs.
2372 	 */
2373 	if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_6) ||
2374 	    (CDB[0] == WRITE_6))) {
2375 		/*
2376 		 * Get the transfer size in blocks.
2377 		 */
2378 		io_size = (cm->cm_length >> sc->DD_block_exponent);
2379 
2380 		/*
2381 		 * Get virtual LBA given in the CDB.
2382 		 */
2383 		virtLBA = ((uint64_t)(CDB[1] & 0x1F) << 16) |
2384 		    ((uint64_t)CDB[2] << 8) | (uint64_t)CDB[3];
2385 
2386 		/*
2387 		 * Check that LBA range for I/O does not exceed volume's
2388 		 * MaxLBA.
2389 		 */
2390 		if ((virtLBA + (uint64_t)io_size - 1) <=
2391 		    sc->DD_max_lba) {
2392 			/*
2393 			 * Check if the I/O crosses a stripe boundary.  If not,
2394 			 * translate the virtual LBA to a physical LBA and set
2395 			 * the DevHandle for the PhysDisk to be used.  If it
2396 			 * does cross a boundry, do normal I/O.  To get the
2397 			 * right DevHandle to use, get the map number for the
2398 			 * column, then use that map number to look up the
2399 			 * DevHandle of the PhysDisk.
2400 			 */
2401 			stripe_offset = (uint32_t)virtLBA &
2402 			    (sc->DD_stripe_size - 1);
2403 			if ((stripe_offset + io_size) <= sc->DD_stripe_size) {
2404 				physLBA = (uint32_t)virtLBA >>
2405 				    sc->DD_stripe_exponent;
2406 				stripe_unit = physLBA / sc->DD_num_phys_disks;
2407 				column = physLBA % sc->DD_num_phys_disks;
2408 				pIO_req->DevHandle =
2409 				    htole16(sc->DD_column_map[column].dev_handle);
2410 				/* ???? Is this endian safe*/
2411 				cm->cm_desc.SCSIIO.DevHandle =
2412 				    pIO_req->DevHandle;
2413 
2414 				physLBA = (stripe_unit <<
2415 				    sc->DD_stripe_exponent) + stripe_offset;
2416 				ptrLBA = &pIO_req->CDB.CDB32[1];
2417 				physLBA_byte = (uint8_t)(physLBA >> 16);
2418 				*ptrLBA = physLBA_byte;
2419 				ptrLBA = &pIO_req->CDB.CDB32[2];
2420 				physLBA_byte = (uint8_t)(physLBA >> 8);
2421 				*ptrLBA = physLBA_byte;
2422 				ptrLBA = &pIO_req->CDB.CDB32[3];
2423 				physLBA_byte = (uint8_t)physLBA;
2424 				*ptrLBA = physLBA_byte;
2425 
2426 				/*
2427 				 * Set flag that Direct Drive I/O is
2428 				 * being done.
2429 				 */
2430 				cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2431 			}
2432 		}
2433 		return;
2434 	}
2435 
2436 	/*
2437 	 * Handle 10, 12 or 16 byte CDBs.
2438 	 */
2439 	if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_10) ||
2440 	    (CDB[0] == WRITE_10) || (CDB[0] == READ_16) ||
2441 	    (CDB[0] == WRITE_16) || (CDB[0] == READ_12) ||
2442 	    (CDB[0] == WRITE_12))) {
2443 		/*
2444 		 * For 16-byte CDB's, verify that the upper 4 bytes of the CDB
2445 		 * are 0.  If not, this is accessing beyond 2TB so handle it in
2446 		 * the else section.  10-byte and 12-byte CDB's are OK.
2447 		 * FreeBSD sends very rare 12 byte READ/WRITE, but driver is
2448 		 * ready to accept 12byte CDB for Direct IOs.
2449 		 */
2450 		if ((CDB[0] == READ_10 || CDB[0] == WRITE_10) ||
2451 		    (CDB[0] == READ_12 || CDB[0] == WRITE_12) ||
2452 		    !(CDB[2] | CDB[3] | CDB[4] | CDB[5])) {
2453 			/*
2454 			 * Get the transfer size in blocks.
2455 			 */
2456 			io_size = (cm->cm_length >> sc->DD_block_exponent);
2457 
2458 			/*
2459 			 * Get virtual LBA.  Point to correct lower 4 bytes of
2460 			 * LBA in the CDB depending on command.
2461 			 */
2462 			lba_idx = ((CDB[0] == READ_12) ||
2463 				(CDB[0] == WRITE_12) ||
2464 				(CDB[0] == READ_10) ||
2465 				(CDB[0] == WRITE_10))? 2 : 6;
2466 			virtLBA = ((uint64_t)CDB[lba_idx] << 24) |
2467 			    ((uint64_t)CDB[lba_idx + 1] << 16) |
2468 			    ((uint64_t)CDB[lba_idx + 2] << 8) |
2469 			    (uint64_t)CDB[lba_idx + 3];
2470 
2471 			/*
2472 			 * Check that LBA range for I/O does not exceed volume's
2473 			 * MaxLBA.
2474 			 */
2475 			if ((virtLBA + (uint64_t)io_size - 1) <=
2476 			    sc->DD_max_lba) {
2477 				/*
2478 				 * Check if the I/O crosses a stripe boundary.
2479 				 * If not, translate the virtual LBA to a
2480 				 * physical LBA and set the DevHandle for the
2481 				 * PhysDisk to be used.  If it does cross a
2482 				 * boundry, do normal I/O.  To get the right
2483 				 * DevHandle to use, get the map number for the
2484 				 * column, then use that map number to look up
2485 				 * the DevHandle of the PhysDisk.
2486 				 */
2487 				stripe_offset = (uint32_t)virtLBA &
2488 				    (sc->DD_stripe_size - 1);
2489 				if ((stripe_offset + io_size) <=
2490 				    sc->DD_stripe_size) {
2491 					physLBA = (uint32_t)virtLBA >>
2492 					    sc->DD_stripe_exponent;
2493 					stripe_unit = physLBA /
2494 					    sc->DD_num_phys_disks;
2495 					column = physLBA %
2496 					    sc->DD_num_phys_disks;
2497 					pIO_req->DevHandle =
2498 					    htole16(sc->DD_column_map[column].
2499 					    dev_handle);
2500 					cm->cm_desc.SCSIIO.DevHandle =
2501 					    pIO_req->DevHandle;
2502 
2503 					physLBA = (stripe_unit <<
2504 					    sc->DD_stripe_exponent) +
2505 					    stripe_offset;
2506 					ptrLBA =
2507 					    &pIO_req->CDB.CDB32[lba_idx];
2508 					physLBA_byte = (uint8_t)(physLBA >> 24);
2509 					*ptrLBA = physLBA_byte;
2510 					ptrLBA =
2511 					    &pIO_req->CDB.CDB32[lba_idx + 1];
2512 					physLBA_byte = (uint8_t)(physLBA >> 16);
2513 					*ptrLBA = physLBA_byte;
2514 					ptrLBA =
2515 					    &pIO_req->CDB.CDB32[lba_idx + 2];
2516 					physLBA_byte = (uint8_t)(physLBA >> 8);
2517 					*ptrLBA = physLBA_byte;
2518 					ptrLBA =
2519 					    &pIO_req->CDB.CDB32[lba_idx + 3];
2520 					physLBA_byte = (uint8_t)physLBA;
2521 					*ptrLBA = physLBA_byte;
2522 
2523 					/*
2524 					 * Set flag that Direct Drive I/O is
2525 					 * being done.
2526 					 */
2527 					cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2528 				}
2529 			}
2530 		} else {
2531 			/*
2532 			 * 16-byte CDB and the upper 4 bytes of the CDB are not
2533 			 * 0.  Get the transfer size in blocks.
2534 			 */
2535 			io_size = (cm->cm_length >> sc->DD_block_exponent);
2536 
2537 			/*
2538 			 * Get virtual LBA.
2539 			 */
2540 			virtLBA = ((uint64_t)CDB[2] << 54) |
2541 			    ((uint64_t)CDB[3] << 48) |
2542 			    ((uint64_t)CDB[4] << 40) |
2543 			    ((uint64_t)CDB[5] << 32) |
2544 			    ((uint64_t)CDB[6] << 24) |
2545 			    ((uint64_t)CDB[7] << 16) |
2546 			    ((uint64_t)CDB[8] << 8) |
2547 			    (uint64_t)CDB[9];
2548 
2549 			/*
2550 			 * Check that LBA range for I/O does not exceed volume's
2551 			 * MaxLBA.
2552 			 */
2553 			if ((virtLBA + (uint64_t)io_size - 1) <=
2554 			    sc->DD_max_lba) {
2555 				/*
2556 				 * Check if the I/O crosses a stripe boundary.
2557 				 * If not, translate the virtual LBA to a
2558 				 * physical LBA and set the DevHandle for the
2559 				 * PhysDisk to be used.  If it does cross a
2560 				 * boundry, do normal I/O.  To get the right
2561 				 * DevHandle to use, get the map number for the
2562 				 * column, then use that map number to look up
2563 				 * the DevHandle of the PhysDisk.
2564 				 */
2565 				stripe_offset = (uint32_t)virtLBA &
2566 				    (sc->DD_stripe_size - 1);
2567 				if ((stripe_offset + io_size) <=
2568 				    sc->DD_stripe_size) {
2569 					physLBA = (uint32_t)(virtLBA >>
2570 					    sc->DD_stripe_exponent);
2571 					stripe_unit = physLBA /
2572 					    sc->DD_num_phys_disks;
2573 					column = physLBA %
2574 					    sc->DD_num_phys_disks;
2575 					pIO_req->DevHandle =
2576 					    htole16(sc->DD_column_map[column].
2577 					    dev_handle);
2578 					cm->cm_desc.SCSIIO.DevHandle =
2579 					    pIO_req->DevHandle;
2580 
2581 					physLBA = (stripe_unit <<
2582 					    sc->DD_stripe_exponent) +
2583 					    stripe_offset;
2584 
2585 					/*
2586 					 * Set upper 4 bytes of LBA to 0.  We
2587 					 * assume that the phys disks are less
2588 					 * than 2 TB's in size.  Then, set the
2589 					 * lower 4 bytes.
2590 					 */
2591 					pIO_req->CDB.CDB32[2] = 0;
2592 					pIO_req->CDB.CDB32[3] = 0;
2593 					pIO_req->CDB.CDB32[4] = 0;
2594 					pIO_req->CDB.CDB32[5] = 0;
2595 					ptrLBA = &pIO_req->CDB.CDB32[6];
2596 					physLBA_byte = (uint8_t)(physLBA >> 24);
2597 					*ptrLBA = physLBA_byte;
2598 					ptrLBA = &pIO_req->CDB.CDB32[7];
2599 					physLBA_byte = (uint8_t)(physLBA >> 16);
2600 					*ptrLBA = physLBA_byte;
2601 					ptrLBA = &pIO_req->CDB.CDB32[8];
2602 					physLBA_byte = (uint8_t)(physLBA >> 8);
2603 					*ptrLBA = physLBA_byte;
2604 					ptrLBA = &pIO_req->CDB.CDB32[9];
2605 					physLBA_byte = (uint8_t)physLBA;
2606 					*ptrLBA = physLBA_byte;
2607 
2608 					/*
2609 					 * Set flag that Direct Drive I/O is
2610 					 * being done.
2611 					 */
2612 					cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2613 				}
2614 			}
2615 		}
2616 	}
2617 }
2618 
2619 #if __FreeBSD_version >= 900026
2620 static void
2621 mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm)
2622 {
2623 	MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2624 	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2625 	uint64_t sasaddr;
2626 	union ccb *ccb;
2627 
2628 	ccb = cm->cm_complete_data;
2629 
2630 	/*
2631 	 * Currently there should be no way we can hit this case.  It only
2632 	 * happens when we have a failure to allocate chain frames, and SMP
2633 	 * commands require two S/G elements only.  That should be handled
2634 	 * in the standard request size.
2635 	 */
2636 	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2637 		mps_printf(sc, "%s: cm_flags = %#x on SMP request!\n",
2638 			   __func__, cm->cm_flags);
2639 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2640 		goto bailout;
2641         }
2642 
2643 	rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
2644 	if (rpl == NULL) {
2645 		mps_dprint(sc, MPS_INFO, "%s: NULL cm_reply!\n", __func__);
2646 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2647 		goto bailout;
2648 	}
2649 
2650 	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2651 	sasaddr = le32toh(req->SASAddress.Low);
2652 	sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
2653 
2654 	if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) != MPI2_IOCSTATUS_SUCCESS ||
2655 	    rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
2656 		mps_dprint(sc, MPS_INFO, "%s: IOCStatus %04x SASStatus %02x\n",
2657 		    __func__, le16toh(rpl->IOCStatus), rpl->SASStatus);
2658 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2659 		goto bailout;
2660 	}
2661 
2662 	mps_dprint(sc, MPS_INFO, "%s: SMP request to SAS address "
2663 		   "%#jx completed successfully\n", __func__,
2664 		   (uintmax_t)sasaddr);
2665 
2666 	if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
2667 		ccb->ccb_h.status = CAM_REQ_CMP;
2668 	else
2669 		ccb->ccb_h.status = CAM_SMP_STATUS_ERROR;
2670 
2671 bailout:
2672 	/*
2673 	 * We sync in both directions because we had DMAs in the S/G list
2674 	 * in both directions.
2675 	 */
2676 	bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
2677 			BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2678 	bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2679 	mps_free_command(sc, cm);
2680 	xpt_done(ccb);
2681 }
2682 
2683 static void
2684 mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb, uint64_t sasaddr)
2685 {
2686 	struct mps_command *cm;
2687 	uint8_t *request, *response;
2688 	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2689 	struct mps_softc *sc;
2690 	struct sglist *sg;
2691 	int error;
2692 
2693 	sc = sassc->sc;
2694 	sg = NULL;
2695 	error = 0;
2696 
2697 	/*
2698 	 * XXX We don't yet support physical addresses here.
2699 	 */
2700 	switch ((ccb->ccb_h.flags & CAM_DATA_MASK)) {
2701 	case CAM_DATA_PADDR:
2702 	case CAM_DATA_SG_PADDR:
2703 		mps_printf(sc, "%s: physical addresses not supported\n",
2704 			   __func__);
2705 		ccb->ccb_h.status = CAM_REQ_INVALID;
2706 		xpt_done(ccb);
2707 		return;
2708 	case CAM_DATA_SG:
2709 		/*
2710 		 * The chip does not support more than one buffer for the
2711 		 * request or response.
2712 		 */
2713 	 	if ((ccb->smpio.smp_request_sglist_cnt > 1)
2714 		  || (ccb->smpio.smp_response_sglist_cnt > 1)) {
2715 			mps_printf(sc, "%s: multiple request or response "
2716 				   "buffer segments not supported for SMP\n",
2717 				   __func__);
2718 			ccb->ccb_h.status = CAM_REQ_INVALID;
2719 			xpt_done(ccb);
2720 			return;
2721 		}
2722 
2723 		/*
2724 		 * The CAM_SCATTER_VALID flag was originally implemented
2725 		 * for the XPT_SCSI_IO CCB, which only has one data pointer.
2726 		 * We have two.  So, just take that flag to mean that we
2727 		 * might have S/G lists, and look at the S/G segment count
2728 		 * to figure out whether that is the case for each individual
2729 		 * buffer.
2730 		 */
2731 		if (ccb->smpio.smp_request_sglist_cnt != 0) {
2732 			bus_dma_segment_t *req_sg;
2733 
2734 			req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
2735 			request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
2736 		} else
2737 			request = ccb->smpio.smp_request;
2738 
2739 		if (ccb->smpio.smp_response_sglist_cnt != 0) {
2740 			bus_dma_segment_t *rsp_sg;
2741 
2742 			rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
2743 			response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
2744 		} else
2745 			response = ccb->smpio.smp_response;
2746 		break;
2747 	case CAM_DATA_VADDR:
2748 		request = ccb->smpio.smp_request;
2749 		response = ccb->smpio.smp_response;
2750 		break;
2751 	default:
2752 		ccb->ccb_h.status = CAM_REQ_INVALID;
2753 		xpt_done(ccb);
2754 		return;
2755 	}
2756 
2757 	cm = mps_alloc_command(sc);
2758 	if (cm == NULL) {
2759 		mps_printf(sc, "%s: cannot allocate command\n", __func__);
2760 		ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2761 		xpt_done(ccb);
2762 		return;
2763 	}
2764 
2765 	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2766 	bzero(req, sizeof(*req));
2767 	req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
2768 
2769 	/* Allow the chip to use any route to this SAS address. */
2770 	req->PhysicalPort = 0xff;
2771 
2772 	req->RequestDataLength = htole16(ccb->smpio.smp_request_len);
2773 	req->SGLFlags =
2774 	    MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
2775 
2776 	mps_dprint(sc, MPS_INFO, "%s: sending SMP request to SAS "
2777 		   "address %#jx\n", __func__, (uintmax_t)sasaddr);
2778 
2779 	mpi_init_sge(cm, req, &req->SGL);
2780 
2781 	/*
2782 	 * Set up a uio to pass into mps_map_command().  This allows us to
2783 	 * do one map command, and one busdma call in there.
2784 	 */
2785 	cm->cm_uio.uio_iov = cm->cm_iovec;
2786 	cm->cm_uio.uio_iovcnt = 2;
2787 	cm->cm_uio.uio_segflg = UIO_SYSSPACE;
2788 
2789 	/*
2790 	 * The read/write flag isn't used by busdma, but set it just in
2791 	 * case.  This isn't exactly accurate, either, since we're going in
2792 	 * both directions.
2793 	 */
2794 	cm->cm_uio.uio_rw = UIO_WRITE;
2795 
2796 	cm->cm_iovec[0].iov_base = request;
2797 	cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength);
2798 	cm->cm_iovec[1].iov_base = response;
2799 	cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
2800 
2801 	cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
2802 			       cm->cm_iovec[1].iov_len;
2803 
2804 	/*
2805 	 * Trigger a warning message in mps_data_cb() for the user if we
2806 	 * wind up exceeding two S/G segments.  The chip expects one
2807 	 * segment for the request and another for the response.
2808 	 */
2809 	cm->cm_max_segs = 2;
2810 
2811 	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
2812 	cm->cm_complete = mpssas_smpio_complete;
2813 	cm->cm_complete_data = ccb;
2814 
2815 	/*
2816 	 * Tell the mapping code that we're using a uio, and that this is
2817 	 * an SMP passthrough request.  There is a little special-case
2818 	 * logic there (in mps_data_cb()) to handle the bidirectional
2819 	 * transfer.
2820 	 */
2821 	cm->cm_flags |= MPS_CM_FLAGS_USE_UIO | MPS_CM_FLAGS_SMP_PASS |
2822 			MPS_CM_FLAGS_DATAIN | MPS_CM_FLAGS_DATAOUT;
2823 
2824 	/* The chip data format is little endian. */
2825 	req->SASAddress.High = htole32(sasaddr >> 32);
2826 	req->SASAddress.Low = htole32(sasaddr);
2827 
2828 	/*
2829 	 * XXX Note that we don't have a timeout/abort mechanism here.
2830 	 * From the manual, it looks like task management requests only
2831 	 * work for SCSI IO and SATA passthrough requests.  We may need to
2832 	 * have a mechanism to retry requests in the event of a chip reset
2833 	 * at least.  Hopefully the chip will insure that any errors short
2834 	 * of that are relayed back to the driver.
2835 	 */
2836 	error = mps_map_command(sc, cm);
2837 	if ((error != 0) && (error != EINPROGRESS)) {
2838 		mps_printf(sc, "%s: error %d returned from mps_map_command()\n",
2839 			   __func__, error);
2840 		goto bailout_error;
2841 	}
2842 
2843 	return;
2844 
2845 bailout_error:
2846 	mps_free_command(sc, cm);
2847 	ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2848 	xpt_done(ccb);
2849 	return;
2850 
2851 }
2852 
2853 static void
2854 mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb)
2855 {
2856 	struct mps_softc *sc;
2857 	struct mpssas_target *targ;
2858 	uint64_t sasaddr = 0;
2859 
2860 	sc = sassc->sc;
2861 
2862 	/*
2863 	 * Make sure the target exists.
2864 	 */
2865 	targ = &sassc->targets[ccb->ccb_h.target_id];
2866 	if (targ->handle == 0x0) {
2867 		mps_printf(sc, "%s: target %d does not exist!\n", __func__,
2868 			   ccb->ccb_h.target_id);
2869 		ccb->ccb_h.status = CAM_SEL_TIMEOUT;
2870 		xpt_done(ccb);
2871 		return;
2872 	}
2873 
2874 	/*
2875 	 * If this device has an embedded SMP target, we'll talk to it
2876 	 * directly.
2877 	 * figure out what the expander's address is.
2878 	 */
2879 	if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
2880 		sasaddr = targ->sasaddr;
2881 
2882 	/*
2883 	 * If we don't have a SAS address for the expander yet, try
2884 	 * grabbing it from the page 0x83 information cached in the
2885 	 * transport layer for this target.  LSI expanders report the
2886 	 * expander SAS address as the port-associated SAS address in
2887 	 * Inquiry VPD page 0x83.  Maxim expanders don't report it in page
2888 	 * 0x83.
2889 	 *
2890 	 * XXX KDM disable this for now, but leave it commented out so that
2891 	 * it is obvious that this is another possible way to get the SAS
2892 	 * address.
2893 	 *
2894 	 * The parent handle method below is a little more reliable, and
2895 	 * the other benefit is that it works for devices other than SES
2896 	 * devices.  So you can send a SMP request to a da(4) device and it
2897 	 * will get routed to the expander that device is attached to.
2898 	 * (Assuming the da(4) device doesn't contain an SMP target...)
2899 	 */
2900 #if 0
2901 	if (sasaddr == 0)
2902 		sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
2903 #endif
2904 
2905 	/*
2906 	 * If we still don't have a SAS address for the expander, look for
2907 	 * the parent device of this device, which is probably the expander.
2908 	 */
2909 	if (sasaddr == 0) {
2910 #ifdef OLD_MPS_PROBE
2911 		struct mpssas_target *parent_target;
2912 #endif
2913 
2914 		if (targ->parent_handle == 0x0) {
2915 			mps_printf(sc, "%s: handle %d does not have a valid "
2916 				   "parent handle!\n", __func__, targ->handle);
2917 			ccb->ccb_h.status = CAM_REQ_INVALID;
2918 			goto bailout;
2919 		}
2920 #ifdef OLD_MPS_PROBE
2921 		parent_target = mpssas_find_target_by_handle(sassc, 0,
2922 			targ->parent_handle);
2923 
2924 		if (parent_target == NULL) {
2925 			mps_printf(sc, "%s: handle %d does not have a valid "
2926 				   "parent target!\n", __func__, targ->handle);
2927 			ccb->ccb_h.status = CAM_REQ_INVALID;
2928 			goto bailout;
2929 		}
2930 
2931 		if ((parent_target->devinfo &
2932 		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
2933 			mps_printf(sc, "%s: handle %d parent %d does not "
2934 				   "have an SMP target!\n", __func__,
2935 				   targ->handle, parent_target->handle);
2936 			ccb->ccb_h.status = CAM_REQ_INVALID;
2937 			goto bailout;
2938 
2939 		}
2940 
2941 		sasaddr = parent_target->sasaddr;
2942 #else /* OLD_MPS_PROBE */
2943 		if ((targ->parent_devinfo &
2944 		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
2945 			mps_printf(sc, "%s: handle %d parent %d does not "
2946 				   "have an SMP target!\n", __func__,
2947 				   targ->handle, targ->parent_handle);
2948 			ccb->ccb_h.status = CAM_REQ_INVALID;
2949 			goto bailout;
2950 
2951 		}
2952 		if (targ->parent_sasaddr == 0x0) {
2953 			mps_printf(sc, "%s: handle %d parent handle %d does "
2954 				   "not have a valid SAS address!\n",
2955 				   __func__, targ->handle, targ->parent_handle);
2956 			ccb->ccb_h.status = CAM_REQ_INVALID;
2957 			goto bailout;
2958 		}
2959 
2960 		sasaddr = targ->parent_sasaddr;
2961 #endif /* OLD_MPS_PROBE */
2962 
2963 	}
2964 
2965 	if (sasaddr == 0) {
2966 		mps_printf(sc, "%s: unable to find SAS address for handle %d\n",
2967 			   __func__, targ->handle);
2968 		ccb->ccb_h.status = CAM_REQ_INVALID;
2969 		goto bailout;
2970 	}
2971 	mpssas_send_smpcmd(sassc, ccb, sasaddr);
2972 
2973 	return;
2974 
2975 bailout:
2976 	xpt_done(ccb);
2977 
2978 }
2979 #endif //__FreeBSD_version >= 900026
2980 
2981 static void
2982 mpssas_action_resetdev(struct mpssas_softc *sassc, union ccb *ccb)
2983 {
2984 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
2985 	struct mps_softc *sc;
2986 	struct mps_command *tm;
2987 	struct mpssas_target *targ;
2988 
2989 	mps_dprint(sassc->sc, MPS_TRACE, __func__);
2990 	mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
2991 
2992 	sc = sassc->sc;
2993 	tm = mps_alloc_command(sc);
2994 	if (tm == NULL) {
2995 		mps_printf(sc, "comand alloc failure in mpssas_action_resetdev\n");
2996 		ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2997 		xpt_done(ccb);
2998 		return;
2999 	}
3000 
3001 	targ = &sassc->targets[ccb->ccb_h.target_id];
3002 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3003 	req->DevHandle = htole16(targ->handle);
3004 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
3005 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3006 
3007 	/* SAS Hard Link Reset / SATA Link Reset */
3008 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3009 
3010 	tm->cm_data = NULL;
3011 	tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
3012 	tm->cm_complete = mpssas_resetdev_complete;
3013 	tm->cm_complete_data = ccb;
3014 	tm->cm_targ = targ;
3015 	mps_map_command(sc, tm);
3016 }
3017 
3018 static void
3019 mpssas_resetdev_complete(struct mps_softc *sc, struct mps_command *tm)
3020 {
3021 	MPI2_SCSI_TASK_MANAGE_REPLY *resp;
3022 	union ccb *ccb;
3023 
3024 	mps_dprint(sc, MPS_TRACE, __func__);
3025 	mtx_assert(&sc->mps_mtx, MA_OWNED);
3026 
3027 	resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
3028 	ccb = tm->cm_complete_data;
3029 
3030 	/*
3031 	 * Currently there should be no way we can hit this case.  It only
3032 	 * happens when we have a failure to allocate chain frames, and
3033 	 * task management commands don't have S/G lists.
3034 	 */
3035 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3036 		MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3037 
3038 		req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3039 
3040 		mps_printf(sc, "%s: cm_flags = %#x for reset of handle %#04x! "
3041 			   "This should not happen!\n", __func__, tm->cm_flags,
3042 			   req->DevHandle);
3043 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
3044 		goto bailout;
3045 	}
3046 
3047 	printf("%s: IOCStatus = 0x%x ResponseCode = 0x%x\n", __func__,
3048 	    le16toh(resp->IOCStatus), le32toh(resp->ResponseCode));
3049 
3050 	if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
3051 		ccb->ccb_h.status = CAM_REQ_CMP;
3052 		mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
3053 		    CAM_LUN_WILDCARD);
3054 	}
3055 	else
3056 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
3057 
3058 bailout:
3059 
3060 	mpssas_free_tm(sc, tm);
3061 	xpt_done(ccb);
3062 }
3063 
3064 static void
3065 mpssas_poll(struct cam_sim *sim)
3066 {
3067 	struct mpssas_softc *sassc;
3068 
3069 	sassc = cam_sim_softc(sim);
3070 
3071 	if (sassc->sc->mps_debug & MPS_TRACE) {
3072 		/* frequent debug messages during a panic just slow
3073 		 * everything down too much.
3074 		 */
3075 		mps_printf(sassc->sc, "%s clearing MPS_TRACE\n", __func__);
3076 		sassc->sc->mps_debug &= ~MPS_TRACE;
3077 	}
3078 
3079 	mps_intr_locked(sassc->sc);
3080 }
3081 
3082 static void
3083 mpssas_rescan_done(struct cam_periph *periph, union ccb *done_ccb)
3084 {
3085 	struct mpssas_softc *sassc;
3086 	char path_str[64];
3087 
3088 	if (done_ccb == NULL)
3089 		return;
3090 
3091 	sassc = (struct mpssas_softc *)done_ccb->ccb_h.ppriv_ptr1;
3092 
3093 	mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
3094 
3095 	xpt_path_string(done_ccb->ccb_h.path, path_str, sizeof(path_str));
3096 	mps_dprint(sassc->sc, MPS_INFO, "Completing rescan for %s\n", path_str);
3097 
3098 	xpt_free_path(done_ccb->ccb_h.path);
3099 	xpt_free_ccb(done_ccb);
3100 
3101 #if __FreeBSD_version < 1000006
3102 	/*
3103 	 * Before completing scan, get EEDP stuff for all of the existing
3104 	 * targets.
3105 	 */
3106 	mpssas_check_eedp(sassc);
3107 #endif
3108 
3109 }
3110 
3111 /* thread to handle bus rescans */
3112 static void
3113 mpssas_scanner_thread(void *arg)
3114 {
3115 	struct mpssas_softc *sassc;
3116 	struct mps_softc *sc;
3117 	union ccb	*ccb;
3118 
3119 	sassc = (struct mpssas_softc *)arg;
3120 	sc = sassc->sc;
3121 
3122 	mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
3123 
3124 	mps_lock(sc);
3125 	for (;;) {
3126 		/* Sleep for 1 second and check the queue status*/
3127 		msleep(&sassc->ccb_scanq, &sc->mps_mtx, PRIBIO,
3128 		       "mps_scanq", 1 * hz);
3129 		if (sassc->flags & MPSSAS_SHUTDOWN) {
3130 			mps_dprint(sc, MPS_TRACE, "Scanner shutting down\n");
3131 			break;
3132 		}
3133 next_work:
3134 		// Get first work.
3135 		ccb = (union ccb *)TAILQ_FIRST(&sassc->ccb_scanq);
3136 		if (ccb == NULL)
3137 			continue;
3138 		// Got first work.
3139 		TAILQ_REMOVE(&sassc->ccb_scanq, &ccb->ccb_h, sim_links.tqe);
3140 		xpt_action(ccb);
3141 		if (sassc->flags & MPSSAS_SHUTDOWN) {
3142 			mps_dprint(sc, MPS_TRACE, "Scanner shutting down\n");
3143 			break;
3144 		}
3145 		goto next_work;
3146 	}
3147 
3148 	sassc->flags &= ~MPSSAS_SCANTHREAD;
3149 	wakeup(&sassc->flags);
3150 	mps_unlock(sc);
3151 	mps_dprint(sc, MPS_TRACE, "Scanner exiting\n");
3152 	mps_kproc_exit(0);
3153 }
3154 
3155 /*
3156  * This function will send READ_CAP_16 to find out EEDP protection mode.
3157  * It will check inquiry data before sending READ_CAP_16.
3158  * Callback for READ_CAP_16 is "mpssas_read_cap_done".
3159  * This is insternal scsi command and we need to take care release of devq, if
3160  * CAM_DEV_QFRZN is set. Driver needs to release devq if it has frozen any.
3161  * xpt_release_devq is called from mpssas_read_cap_done.
3162  *
3163  * All other commands will be handled by periph layer and there it will
3164  * check for "CAM_DEV_QFRZN" and release of devq will be done.
3165  */
3166 static void
3167 mpssas_rescan(struct mpssas_softc *sassc, union ccb *ccb)
3168 {
3169 	char path_str[64];
3170 
3171 	mps_dprint(sassc->sc, MPS_TRACE, "%s\n", __func__);
3172 
3173 	mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
3174 
3175 	if (ccb == NULL)
3176 		return;
3177 
3178 	xpt_path_string(ccb->ccb_h.path, path_str, sizeof(path_str));
3179 	mps_dprint(sassc->sc, MPS_INFO, "Queueing rescan for %s\n", path_str);
3180 
3181 	/* Prepare request */
3182 	ccb->ccb_h.ppriv_ptr1 = sassc;
3183 	ccb->ccb_h.cbfcnp = mpssas_rescan_done;
3184 	xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, MPS_PRIORITY_XPT);
3185 	TAILQ_INSERT_TAIL(&sassc->ccb_scanq, &ccb->ccb_h, sim_links.tqe);
3186 	wakeup(&sassc->ccb_scanq);
3187 }
3188 
3189 #if __FreeBSD_version >= 1000006
3190 static void
3191 mpssas_async(void *callback_arg, uint32_t code, struct cam_path *path,
3192 	     void *arg)
3193 {
3194 	struct mps_softc *sc;
3195 
3196 	sc = (struct mps_softc *)callback_arg;
3197 
3198 	switch (code) {
3199 	case AC_ADVINFO_CHANGED: {
3200 		struct mpssas_target *target;
3201 		struct mpssas_softc *sassc;
3202 		struct scsi_read_capacity_data_long rcap_buf;
3203 		struct ccb_dev_advinfo cdai;
3204 		struct mpssas_lun *lun;
3205 		lun_id_t lunid;
3206 		int found_lun;
3207 		uintptr_t buftype;
3208 
3209 		buftype = (uintptr_t)arg;
3210 
3211 		found_lun = 0;
3212 		sassc = sc->sassc;
3213 
3214 		/*
3215 		 * We're only interested in read capacity data changes.
3216 		 */
3217 		if (buftype != CDAI_TYPE_RCAPLONG)
3218 			break;
3219 
3220 		/*
3221 		 * We're only interested in devices that are attached to
3222 		 * this controller.
3223 		 */
3224 		if (xpt_path_path_id(path) != sassc->sim->path_id)
3225 			break;
3226 
3227 		/*
3228 		 * We should have a handle for this, but check to make sure.
3229 		 */
3230 		target = &sassc->targets[xpt_path_target_id(path)];
3231 		if (target->handle == 0)
3232 			break;
3233 
3234 		lunid = xpt_path_lun_id(path);
3235 
3236 		SLIST_FOREACH(lun, &target->luns, lun_link) {
3237 			if (lun->lun_id == lunid) {
3238 				found_lun = 1;
3239 				break;
3240 			}
3241 		}
3242 
3243 		if (found_lun == 0) {
3244 			lun = malloc(sizeof(struct mpssas_lun), M_MPT2,
3245 				     M_NOWAIT | M_ZERO);
3246 			if (lun == NULL) {
3247 				mps_dprint(sc, MPS_FAULT, "Unable to alloc "
3248 					   "LUN for EEDP support.\n");
3249 				break;
3250 			}
3251 			lun->lun_id = lunid;
3252 			SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3253 		}
3254 
3255 		bzero(&rcap_buf, sizeof(rcap_buf));
3256 		xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
3257 		cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
3258 		cdai.ccb_h.flags = CAM_DIR_IN;
3259 		cdai.buftype = CDAI_TYPE_RCAPLONG;
3260 		cdai.flags = 0;
3261 		cdai.bufsiz = sizeof(rcap_buf);
3262 		cdai.buf = (uint8_t *)&rcap_buf;
3263 		xpt_action((union ccb *)&cdai);
3264 		if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
3265 			cam_release_devq(cdai.ccb_h.path,
3266 					 0, 0, 0, FALSE);
3267 
3268 		if (((cdai.ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP)
3269 		 && (rcap_buf.prot & SRC16_PROT_EN)) {
3270 			lun->eedp_formatted = TRUE;
3271 			lun->eedp_block_size = scsi_4btoul(rcap_buf.length);
3272 		} else {
3273 			lun->eedp_formatted = FALSE;
3274 			lun->eedp_block_size = 0;
3275 		}
3276 		break;
3277 	}
3278 	default:
3279 		break;
3280 	}
3281 }
3282 #else /* __FreeBSD_version >= 1000006 */
3283 
3284 static void
3285 mpssas_check_eedp(struct mpssas_softc *sassc)
3286 {
3287 	struct mps_softc *sc = sassc->sc;
3288 	struct ccb_scsiio *csio;
3289 	struct scsi_read_capacity_16 *scsi_cmd;
3290 	struct scsi_read_capacity_eedp *rcap_buf;
3291 	union ccb *ccb;
3292 	path_id_t pathid = cam_sim_path(sassc->sim);
3293 	target_id_t targetid;
3294 	lun_id_t lunid;
3295 	struct cam_periph *found_periph;
3296 	struct mpssas_target *target;
3297 	struct mpssas_lun *lun;
3298 	uint8_t	found_lun;
3299 	struct ccb_getdev cgd;
3300 	char path_str[64];
3301 
3302 	/*
3303 	 * Issue a READ CAPACITY 16 command to each LUN of each target.  This
3304 	 * info is used to determine if the LUN is formatted for EEDP support.
3305 	 */
3306 	for (targetid = 0; targetid < sc->facts->MaxTargets; targetid++) {
3307 		target = &sassc->targets[targetid];
3308 		if (target->handle == 0x0) {
3309 			continue;
3310 		}
3311 
3312 		lunid = 0;
3313 		do {
3314 			ccb = xpt_alloc_ccb_nowait();
3315 			if (ccb == NULL) {
3316 				mps_dprint(sc, MPS_FAULT, "Unable to alloc CCB "
3317 				    "for EEDP support.\n");
3318 				return;
3319 			}
3320 
3321 			if (xpt_create_path(&ccb->ccb_h.path, NULL,
3322 			    pathid, targetid, lunid) != CAM_REQ_CMP) {
3323 				mps_dprint(sc, MPS_FAULT, "Unable to create "
3324 				    "path for EEDP support\n");
3325 				xpt_free_ccb(ccb);
3326 				return;
3327 			}
3328 
3329 			/*
3330 			 * If a periph is returned, the LUN exists.  Create an
3331 			 * entry in the target's LUN list.
3332 			 */
3333 			if ((found_periph = cam_periph_find(ccb->ccb_h.path,
3334 			    NULL)) != NULL) {
3335 				/*
3336 				 * If LUN is already in list, don't create a new
3337 				 * one.
3338 				 */
3339 				found_lun = FALSE;
3340 				SLIST_FOREACH(lun, &target->luns, lun_link) {
3341 					if (lun->lun_id == lunid) {
3342 						found_lun = TRUE;
3343 						break;
3344 					}
3345 				}
3346 				if (!found_lun) {
3347 					lun = malloc(sizeof(struct mpssas_lun),
3348 					    M_MPT2, M_NOWAIT | M_ZERO);
3349 					if (lun == NULL) {
3350 						mps_dprint(sc, MPS_FAULT,
3351 						    "Unable to alloc LUN for "
3352 						    "EEDP support.\n");
3353 						xpt_free_path(ccb->ccb_h.path);
3354 						xpt_free_ccb(ccb);
3355 						return;
3356 					}
3357 					lun->lun_id = lunid;
3358 					SLIST_INSERT_HEAD(&target->luns, lun,
3359 					    lun_link);
3360 				}
3361 				lunid++;
3362 				/* Before Issuing READ CAPACITY 16,
3363 				 * check Device type.
3364  				 */
3365 				xpt_setup_ccb(&cgd.ccb_h, ccb->ccb_h.path,
3366 					CAM_PRIORITY_NORMAL);
3367 				cgd.ccb_h.func_code = XPT_GDEV_TYPE;
3368 				xpt_action((union ccb *)&cgd);
3369 
3370 				/*
3371 				 * If this flag is set in the inquiry data,
3372 				 * the device supports protection information,
3373 				 * and must support the 16 byte read
3374 				 * capacity command, otherwise continue without
3375 				 * sending read cap 16
3376 				 */
3377 
3378 				xpt_path_string(ccb->ccb_h.path, path_str,
3379 					sizeof(path_str));
3380 
3381 				if ((cgd.inq_data.spc3_flags &
3382 				SPC3_SID_PROTECT) == 0) {
3383 					xpt_free_path(ccb->ccb_h.path);
3384 					xpt_free_ccb(ccb);
3385 					continue;
3386 				}
3387 
3388 				mps_dprint(sc, MPS_INFO,
3389 				"Sending read cap: path %s"
3390 				" handle %d\n", path_str, target->handle );
3391 
3392 				/*
3393 				 * Issue a READ CAPACITY 16 command for the LUN.
3394 				 * The mpssas_read_cap_done function will load
3395 				 * the read cap info into the LUN struct.
3396 				 */
3397 				rcap_buf =
3398 					malloc(sizeof(struct scsi_read_capacity_eedp),
3399 					M_MPT2, M_NOWAIT| M_ZERO);
3400 				if (rcap_buf == NULL) {
3401 					mps_dprint(sc, MPS_FAULT, "Unable to alloc read "
3402 						"capacity buffer for EEDP support.\n");
3403 					xpt_free_path(ccb->ccb_h.path);
3404 					xpt_free_ccb(ccb);
3405 					return;
3406 				}
3407 				csio = &ccb->csio;
3408 				csio->ccb_h.func_code = XPT_SCSI_IO;
3409 				csio->ccb_h.flags = CAM_DIR_IN;
3410 				csio->ccb_h.retry_count = 4;
3411 				csio->ccb_h.cbfcnp = mpssas_read_cap_done;
3412 				csio->ccb_h.timeout = 60000;
3413 				csio->data_ptr = (uint8_t *)rcap_buf;
3414 				csio->dxfer_len = sizeof(struct
3415 				    scsi_read_capacity_eedp);
3416 				csio->sense_len = MPS_SENSE_LEN;
3417 				csio->cdb_len = sizeof(*scsi_cmd);
3418 				csio->tag_action = MSG_SIMPLE_Q_TAG;
3419 
3420 				scsi_cmd = (struct scsi_read_capacity_16 *)
3421 				    &csio->cdb_io.cdb_bytes;
3422 				bzero(scsi_cmd, sizeof(*scsi_cmd));
3423 				scsi_cmd->opcode = 0x9E;
3424 				scsi_cmd->service_action = SRC16_SERVICE_ACTION;
3425 				((uint8_t *)scsi_cmd)[13] = sizeof(struct
3426 				    scsi_read_capacity_eedp);
3427 
3428 				/*
3429 				 * Set the path, target and lun IDs for the READ
3430 				 * CAPACITY request.
3431 				 */
3432 				ccb->ccb_h.path_id =
3433 				    xpt_path_path_id(ccb->ccb_h.path);
3434 				ccb->ccb_h.target_id =
3435 				    xpt_path_target_id(ccb->ccb_h.path);
3436 				ccb->ccb_h.target_lun =
3437 				    xpt_path_lun_id(ccb->ccb_h.path);
3438 
3439 				ccb->ccb_h.ppriv_ptr1 = sassc;
3440 				xpt_action(ccb);
3441 			} else {
3442 				xpt_free_path(ccb->ccb_h.path);
3443 				xpt_free_ccb(ccb);
3444 			}
3445 		} while (found_periph);
3446 	}
3447 }
3448 
3449 
3450 static void
3451 mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb)
3452 {
3453 	struct mpssas_softc *sassc;
3454 	struct mpssas_target *target;
3455 	struct mpssas_lun *lun;
3456 	struct scsi_read_capacity_eedp *rcap_buf;
3457 
3458 	if (done_ccb == NULL)
3459 		return;
3460 
3461 	/* Driver need to release devq, it Scsi command is
3462 	 * generated by driver internally.
3463 	 * Currently there is a single place where driver
3464 	 * calls scsi command internally. In future if driver
3465 	 * calls more scsi command internally, it needs to release
3466 	 * devq internally, since those command will not go back to
3467 	 * cam_periph.
3468 	 */
3469 	if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) ) {
3470         	done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
3471 		xpt_release_devq(done_ccb->ccb_h.path,
3472 			       	/*count*/ 1, /*run_queue*/TRUE);
3473 	}
3474 
3475 	rcap_buf = (struct scsi_read_capacity_eedp *)done_ccb->csio.data_ptr;
3476 
3477 	/*
3478 	 * Get the LUN ID for the path and look it up in the LUN list for the
3479 	 * target.
3480 	 */
3481 	sassc = (struct mpssas_softc *)done_ccb->ccb_h.ppriv_ptr1;
3482 	target = &sassc->targets[done_ccb->ccb_h.target_id];
3483 	SLIST_FOREACH(lun, &target->luns, lun_link) {
3484 		if (lun->lun_id != done_ccb->ccb_h.target_lun)
3485 			continue;
3486 
3487 		/*
3488 		 * Got the LUN in the target's LUN list.  Fill it in
3489 		 * with EEDP info.  If the READ CAP 16 command had some
3490 		 * SCSI error (common if command is not supported), mark
3491 		 * the lun as not supporting EEDP and set the block size
3492 		 * to 0.
3493 		 */
3494 		if (((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)
3495 		 || (done_ccb->csio.scsi_status != SCSI_STATUS_OK)) {
3496 			lun->eedp_formatted = FALSE;
3497 			lun->eedp_block_size = 0;
3498 			break;
3499 		}
3500 
3501 		if (rcap_buf->protect & 0x01) {
3502 			lun->eedp_formatted = TRUE;
3503 			lun->eedp_block_size = scsi_4btoul(rcap_buf->length);
3504 		}
3505 		break;
3506 	}
3507 
3508 	// Finished with this CCB and path.
3509 	free(rcap_buf, M_MPT2);
3510 	xpt_free_path(done_ccb->ccb_h.path);
3511 	xpt_free_ccb(done_ccb);
3512 }
3513 #endif /* __FreeBSD_version >= 1000006 */
3514 
3515 int
3516 mpssas_startup(struct mps_softc *sc)
3517 {
3518 	struct mpssas_softc *sassc;
3519 
3520 	/*
3521 	 * Send the port enable message and set the wait_for_port_enable flag.
3522 	 * This flag helps to keep the simq frozen until all discovery events
3523 	 * are processed.
3524 	 */
3525 	sassc = sc->sassc;
3526 	mpssas_startup_increment(sassc);
3527 	sc->wait_for_port_enable = 1;
3528 	mpssas_send_portenable(sc);
3529 	return (0);
3530 }
3531 
3532 static int
3533 mpssas_send_portenable(struct mps_softc *sc)
3534 {
3535 	MPI2_PORT_ENABLE_REQUEST *request;
3536 	struct mps_command *cm;
3537 
3538 	mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
3539 
3540 	if ((cm = mps_alloc_command(sc)) == NULL)
3541 		return (EBUSY);
3542 	request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3543 	request->Function = MPI2_FUNCTION_PORT_ENABLE;
3544 	request->MsgFlags = 0;
3545 	request->VP_ID = 0;
3546 	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3547 	cm->cm_complete = mpssas_portenable_complete;
3548 	cm->cm_data = NULL;
3549 	cm->cm_sge = NULL;
3550 
3551 	mps_map_command(sc, cm);
3552 	mps_dprint(sc, MPS_TRACE,
3553 	    "mps_send_portenable finished cm %p req %p complete %p\n",
3554 	    cm, cm->cm_req, cm->cm_complete);
3555 	return (0);
3556 }
3557 
3558 static void
3559 mpssas_portenable_complete(struct mps_softc *sc, struct mps_command *cm)
3560 {
3561 	MPI2_PORT_ENABLE_REPLY *reply;
3562 	struct mpssas_softc *sassc;
3563 
3564 	mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
3565 	sassc = sc->sassc;
3566 
3567 	/*
3568 	 * Currently there should be no way we can hit this case.  It only
3569 	 * happens when we have a failure to allocate chain frames, and
3570 	 * port enable commands don't have S/G lists.
3571 	 */
3572 	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3573 		mps_printf(sc, "%s: cm_flags = %#x for port enable! "
3574 			   "This should not happen!\n", __func__, cm->cm_flags);
3575 	}
3576 
3577 	reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3578 	if (reply == NULL)
3579 		mps_dprint(sc, MPS_FAULT, "Portenable NULL reply\n");
3580 	else if (le16toh(reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
3581 	    MPI2_IOCSTATUS_SUCCESS)
3582 		mps_dprint(sc, MPS_FAULT, "Portenable failed\n");
3583 
3584 	mps_free_command(sc, cm);
3585 	if (sc->mps_ich.ich_arg != NULL) {
3586 		mps_dprint(sc, MPS_INFO, "disestablish config intrhook\n");
3587 		config_intrhook_disestablish(&sc->mps_ich);
3588 		sc->mps_ich.ich_arg = NULL;
3589 	}
3590 
3591 	/*
3592 	 * Get WarpDrive info after discovery is complete but before the scan
3593 	 * starts.  At this point, all devices are ready to be exposed to the
3594 	 * OS.  If devices should be hidden instead, take them out of the
3595 	 * 'targets' array before the scan.  The devinfo for a disk will have
3596 	 * some info and a volume's will be 0.  Use that to remove disks.
3597 	 */
3598 	mps_wd_config_pages(sc);
3599 
3600 	/*
3601 	 * Done waiting for port enable to complete.  Decrement the refcount.
3602 	 * If refcount is 0, discovery is complete and a rescan of the bus can
3603 	 * take place.  Since the simq was explicitly frozen before port
3604 	 * enable, it must be explicitly released here to keep the
3605 	 * freeze/release count in sync.
3606 	 */
3607 	sc->wait_for_port_enable = 0;
3608 	sc->port_enable_complete = 1;
3609 	wakeup(&sc->port_enable_complete);
3610 	mpssas_startup_decrement(sassc);
3611 	xpt_release_simq(sassc->sim, 1);
3612 }
3613 
3614