xref: /freebsd/sys/dev/mpr/mpr_sas.c (revision 991554f2c46fdbc7e9acbf87fc8da089618c3a19)
1 /*-
2  * Copyright (c) 2009 Yahoo! Inc.
3  * Copyright (c) 2011-2014 LSI Corp.
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 /* Communications core for LSI MPT2 */
32 
33 /* TODO Move headers to mprvar */
34 #include <sys/types.h>
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/selinfo.h>
39 #include <sys/module.h>
40 #include <sys/bus.h>
41 #include <sys/conf.h>
42 #include <sys/bio.h>
43 #include <sys/malloc.h>
44 #include <sys/uio.h>
45 #include <sys/sysctl.h>
46 #include <sys/endian.h>
47 #include <sys/queue.h>
48 #include <sys/kthread.h>
49 #include <sys/taskqueue.h>
50 #include <sys/sbuf.h>
51 
52 #include <machine/bus.h>
53 #include <machine/resource.h>
54 #include <sys/rman.h>
55 
56 #include <machine/stdarg.h>
57 
58 #include <cam/cam.h>
59 #include <cam/cam_ccb.h>
60 #include <cam/cam_debug.h>
61 #include <cam/cam_sim.h>
62 #include <cam/cam_xpt_sim.h>
63 #include <cam/cam_xpt_periph.h>
64 #include <cam/cam_periph.h>
65 #include <cam/scsi/scsi_all.h>
66 #include <cam/scsi/scsi_message.h>
67 #if __FreeBSD_version >= 900026
68 #include <cam/scsi/smp_all.h>
69 #endif
70 
71 #include <dev/mpr/mpi/mpi2_type.h>
72 #include <dev/mpr/mpi/mpi2.h>
73 #include <dev/mpr/mpi/mpi2_ioc.h>
74 #include <dev/mpr/mpi/mpi2_sas.h>
75 #include <dev/mpr/mpi/mpi2_cnfg.h>
76 #include <dev/mpr/mpi/mpi2_init.h>
77 #include <dev/mpr/mpi/mpi2_tool.h>
78 #include <dev/mpr/mpr_ioctl.h>
79 #include <dev/mpr/mprvar.h>
80 #include <dev/mpr/mpr_table.h>
81 #include <dev/mpr/mpr_sas.h>
82 
83 #define MPRSAS_DISCOVERY_TIMEOUT	20
84 #define MPRSAS_MAX_DISCOVERY_TIMEOUTS	10 /* 200 seconds */
85 
86 /*
87  * static array to check SCSI OpCode for EEDP protection bits
88  */
89 #define	PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
90 #define	PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
91 #define	PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
92 static uint8_t op_code_prot[256] = {
93 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
94 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
95 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
96 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
97 	0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
98 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
99 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
100 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
101 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
102 	0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
103 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
104 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
105 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
106 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
107 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
108 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
109 };
110 
111 MALLOC_DEFINE(M_MPRSAS, "MPRSAS", "MPR SAS memory");
112 
113 static void mprsas_remove_device(struct mpr_softc *, struct mpr_command *);
114 static void mprsas_remove_complete(struct mpr_softc *, struct mpr_command *);
115 static void mprsas_action(struct cam_sim *sim, union ccb *ccb);
116 static void mprsas_poll(struct cam_sim *sim);
117 static void mprsas_scsiio_timeout(void *data);
118 static void mprsas_abort_complete(struct mpr_softc *sc,
119     struct mpr_command *cm);
120 static void mprsas_action_scsiio(struct mprsas_softc *, union ccb *);
121 static void mprsas_scsiio_complete(struct mpr_softc *, struct mpr_command *);
122 static void mprsas_action_resetdev(struct mprsas_softc *, union ccb *);
123 static void mprsas_resetdev_complete(struct mpr_softc *,
124     struct mpr_command *);
125 static int  mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm,
126     struct mpr_command *cm);
127 static int  mprsas_send_reset(struct mpr_softc *sc, struct mpr_command *tm,
128     uint8_t type);
129 static void mprsas_async(void *callback_arg, uint32_t code,
130     struct cam_path *path, void *arg);
131 static void mprsas_prepare_ssu(struct mpr_softc *sc, struct cam_path *path,
132     struct ccb_getdev *cgd);
133 #if (__FreeBSD_version < 901503) || \
134     ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
135 static void mprsas_check_eedp(struct mpr_softc *sc, struct cam_path *path,
136     struct ccb_getdev *cgd);
137 static void mprsas_read_cap_done(struct cam_periph *periph,
138     union ccb *done_ccb);
139 #endif
140 static int mprsas_send_portenable(struct mpr_softc *sc);
141 static void mprsas_portenable_complete(struct mpr_softc *sc,
142     struct mpr_command *cm);
143 
144 #if __FreeBSD_version >= 900026
145 static void
146 mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm);
147 static void mprsas_send_smpcmd(struct mprsas_softc *sassc,
148 	       	union ccb *ccb, uint64_t sasaddr);
149 static void
150 mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb);
151 #endif
152 
153 struct mprsas_target *
154 mprsas_find_target_by_handle(struct mprsas_softc *sassc, int start,
155     uint16_t handle)
156 {
157 	struct mprsas_target *target;
158 	int i;
159 
160 	for (i = start; i < sassc->maxtargets; i++) {
161 		target = &sassc->targets[i];
162 		if (target->handle == handle)
163 			return (target);
164 	}
165 
166 	return (NULL);
167 }
168 
169 /* we need to freeze the simq during attach and diag reset, to avoid failing
170  * commands before device handles have been found by discovery.  Since
171  * discovery involves reading config pages and possibly sending commands,
172  * discovery actions may continue even after we receive the end of discovery
173  * event, so refcount discovery actions instead of assuming we can unfreeze
174  * the simq when we get the event.
175  */
176 void
177 mprsas_startup_increment(struct mprsas_softc *sassc)
178 {
179 	MPR_FUNCTRACE(sassc->sc);
180 
181 	if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) {
182 		if (sassc->startup_refcount++ == 0) {
183 			/* just starting, freeze the simq */
184 			mpr_dprint(sassc->sc, MPR_INIT,
185 			    "%s freezing simq\n", __func__);
186 #if __FreeBSD_version >= 1000039
187 			xpt_hold_boot();
188 #endif
189 			xpt_freeze_simq(sassc->sim, 1);
190 		}
191 		mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__,
192 		    sassc->startup_refcount);
193 	}
194 }
195 
196 void
197 mprsas_release_simq_reinit(struct mprsas_softc *sassc)
198 {
199 	if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
200 		sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
201 		xpt_release_simq(sassc->sim, 1);
202 		mpr_dprint(sassc->sc, MPR_INFO, "Unfreezing SIM queue\n");
203 	}
204 }
205 
206 void
207 mprsas_startup_decrement(struct mprsas_softc *sassc)
208 {
209 	MPR_FUNCTRACE(sassc->sc);
210 
211 	if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) {
212 		if (--sassc->startup_refcount == 0) {
213 			/* finished all discovery-related actions, release
214 			 * the simq and rescan for the latest topology.
215 			 */
216 			mpr_dprint(sassc->sc, MPR_INIT,
217 			    "%s releasing simq\n", __func__);
218 			sassc->flags &= ~MPRSAS_IN_STARTUP;
219 			xpt_release_simq(sassc->sim, 1);
220 #if __FreeBSD_version >= 1000039
221 			xpt_release_boot();
222 #else
223 			mprsas_rescan_target(sassc->sc, NULL);
224 #endif
225 		}
226 		mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__,
227 		    sassc->startup_refcount);
228 	}
229 }
230 
231 /* LSI's firmware requires us to stop sending commands when we're doing task
232  * management, so refcount the TMs and keep the simq frozen when any are in
233  * use.
234  */
235 struct mpr_command *
236 mprsas_alloc_tm(struct mpr_softc *sc)
237 {
238 	struct mpr_command *tm;
239 
240 	MPR_FUNCTRACE(sc);
241 	tm = mpr_alloc_high_priority_command(sc);
242 	if (tm != NULL) {
243 		if (sc->sassc->tm_count++ == 0) {
244 			mpr_dprint(sc, MPR_RECOVERY,
245 			    "%s freezing simq\n", __func__);
246 			xpt_freeze_simq(sc->sassc->sim, 1);
247 		}
248 		mpr_dprint(sc, MPR_RECOVERY, "%s tm_count %u\n", __func__,
249 		    sc->sassc->tm_count);
250 	}
251 	return tm;
252 }
253 
254 void
255 mprsas_free_tm(struct mpr_softc *sc, struct mpr_command *tm)
256 {
257 	mpr_dprint(sc, MPR_TRACE, "%s", __func__);
258 	if (tm == NULL)
259 		return;
260 
261 	/* if there are no TMs in use, we can release the simq.  We use our
262 	 * own refcount so that it's easier for a diag reset to cleanup and
263 	 * release the simq.
264 	 */
265 	if (--sc->sassc->tm_count == 0) {
266 		mpr_dprint(sc, MPR_RECOVERY, "%s releasing simq\n", __func__);
267 		xpt_release_simq(sc->sassc->sim, 1);
268 	}
269 	mpr_dprint(sc, MPR_RECOVERY, "%s tm_count %u\n", __func__,
270 	    sc->sassc->tm_count);
271 
272 	mpr_free_high_priority_command(sc, tm);
273 }
274 
275 void
276 mprsas_rescan_target(struct mpr_softc *sc, struct mprsas_target *targ)
277 {
278 	struct mprsas_softc *sassc = sc->sassc;
279 	path_id_t pathid;
280 	target_id_t targetid;
281 	union ccb *ccb;
282 
283 	MPR_FUNCTRACE(sc);
284 	pathid = cam_sim_path(sassc->sim);
285 	if (targ == NULL)
286 		targetid = CAM_TARGET_WILDCARD;
287 	else
288 		targetid = targ - sassc->targets;
289 
290 	/*
291 	 * Allocate a CCB and schedule a rescan.
292 	 */
293 	ccb = xpt_alloc_ccb_nowait();
294 	if (ccb == NULL) {
295 		mpr_dprint(sc, MPR_ERROR, "unable to alloc CCB for rescan\n");
296 		return;
297 	}
298 
299 	if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid,
300 	    targetid, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
301 		mpr_dprint(sc, MPR_ERROR, "unable to create path for rescan\n");
302 		xpt_free_ccb(ccb);
303 		return;
304 	}
305 
306 	if (targetid == CAM_TARGET_WILDCARD)
307 		ccb->ccb_h.func_code = XPT_SCAN_BUS;
308 	else
309 		ccb->ccb_h.func_code = XPT_SCAN_TGT;
310 
311 	mpr_dprint(sc, MPR_TRACE, "%s targetid %u\n", __func__, targetid);
312 	xpt_rescan(ccb);
313 }
314 
315 static void
316 mprsas_log_command(struct mpr_command *cm, u_int level, const char *fmt, ...)
317 {
318 	struct sbuf sb;
319 	va_list ap;
320 	char str[192];
321 	char path_str[64];
322 
323 	if (cm == NULL)
324 		return;
325 
326 	/* No need to be in here if debugging isn't enabled */
327 	if ((cm->cm_sc->mpr_debug & level) == 0)
328 		return;
329 
330 	sbuf_new(&sb, str, sizeof(str), 0);
331 
332 	va_start(ap, fmt);
333 
334 	if (cm->cm_ccb != NULL) {
335 		xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
336 		    sizeof(path_str));
337 		sbuf_cat(&sb, path_str);
338 		if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
339 			scsi_command_string(&cm->cm_ccb->csio, &sb);
340 			sbuf_printf(&sb, "length %d ",
341 			    cm->cm_ccb->csio.dxfer_len);
342 		}
343 	} else {
344 		sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
345 		    cam_sim_name(cm->cm_sc->sassc->sim),
346 		    cam_sim_unit(cm->cm_sc->sassc->sim),
347 		    cam_sim_bus(cm->cm_sc->sassc->sim),
348 		    cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
349 		    cm->cm_lun);
350 	}
351 
352 	sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
353 	sbuf_vprintf(&sb, fmt, ap);
354 	sbuf_finish(&sb);
355 	mpr_dprint_field(cm->cm_sc, level, "%s", sbuf_data(&sb));
356 
357 	va_end(ap);
358 }
359 
360 static void
361 mprsas_remove_volume(struct mpr_softc *sc, struct mpr_command *tm)
362 {
363 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
364 	struct mprsas_target *targ;
365 	uint16_t handle;
366 
367 	MPR_FUNCTRACE(sc);
368 
369 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
370 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
371 	targ = tm->cm_targ;
372 
373 	if (reply == NULL) {
374 		/* XXX retry the remove after the diag reset completes? */
375 		mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device "
376 		    "0x%04x\n", __func__, handle);
377 		mprsas_free_tm(sc, tm);
378 		return;
379 	}
380 
381 	if (reply->IOCStatus != MPI2_IOCSTATUS_SUCCESS) {
382 		mpr_dprint(sc, MPR_FAULT, "IOCStatus = 0x%x while resetting "
383 		    "device 0x%x\n", reply->IOCStatus, handle);
384 		mprsas_free_tm(sc, tm);
385 		return;
386 	}
387 
388 	mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n",
389 	    reply->TerminationCount);
390 	mpr_free_reply(sc, tm->cm_reply_data);
391 	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
392 
393 	mpr_dprint(sc, MPR_XINFO, "clearing target %u handle 0x%04x\n",
394 	    targ->tid, handle);
395 
396 	/*
397 	 * Don't clear target if remove fails because things will get confusing.
398 	 * Leave the devname and sasaddr intact so that we know to avoid reusing
399 	 * this target id if possible, and so we can assign the same target id
400 	 * to this device if it comes back in the future.
401 	 */
402 	if (reply->IOCStatus == MPI2_IOCSTATUS_SUCCESS) {
403 		targ = tm->cm_targ;
404 		targ->handle = 0x0;
405 		targ->encl_handle = 0x0;
406 		targ->encl_level_valid = 0x0;
407 		targ->encl_level = 0x0;
408 		targ->connector_name[0] = ' ';
409 		targ->connector_name[1] = ' ';
410 		targ->connector_name[2] = ' ';
411 		targ->connector_name[3] = ' ';
412 		targ->encl_slot = 0x0;
413 		targ->exp_dev_handle = 0x0;
414 		targ->phy_num = 0x0;
415 		targ->linkrate = 0x0;
416 		targ->devinfo = 0x0;
417 		targ->flags = 0x0;
418 		targ->scsi_req_desc_type = 0;
419 	}
420 
421 	mprsas_free_tm(sc, tm);
422 }
423 
424 
425 /*
426  * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal.
427  * Otherwise Volume Delete is same as Bare Drive Removal.
428  */
429 void
430 mprsas_prepare_volume_remove(struct mprsas_softc *sassc, uint16_t handle)
431 {
432 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
433 	struct mpr_softc *sc;
434 	struct mpr_command *cm;
435 	struct mprsas_target *targ = NULL;
436 
437 	MPR_FUNCTRACE(sassc->sc);
438 	sc = sassc->sc;
439 
440 	targ = mprsas_find_target_by_handle(sassc, 0, handle);
441 	if (targ == NULL) {
442 		/* FIXME: what is the action? */
443 		/* We don't know about this device? */
444 		mpr_dprint(sc, MPR_ERROR,
445 		   "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
446 		return;
447 	}
448 
449 	targ->flags |= MPRSAS_TARGET_INREMOVAL;
450 
451 	cm = mprsas_alloc_tm(sc);
452 	if (cm == NULL) {
453 		mpr_dprint(sc, MPR_ERROR,
454 		    "%s: command alloc failure\n", __func__);
455 		return;
456 	}
457 
458 	mprsas_rescan_target(sc, targ);
459 
460 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
461 	req->DevHandle = targ->handle;
462 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
463 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
464 
465 	/* SAS Hard Link Reset / SATA Link Reset */
466 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
467 
468 	cm->cm_targ = targ;
469 	cm->cm_data = NULL;
470 	cm->cm_desc.HighPriority.RequestFlags =
471 	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
472 	cm->cm_complete = mprsas_remove_volume;
473 	cm->cm_complete_data = (void *)(uintptr_t)handle;
474 	mpr_map_command(sc, cm);
475 }
476 
477 /*
478  * The MPT2 firmware performs debounce on the link to avoid transient link
479  * errors and false removals.  When it does decide that link has been lost
480  * and a device needs to go away, it expects that the host will perform a
481  * target reset and then an op remove.  The reset has the side-effect of
482  * aborting any outstanding requests for the device, which is required for
483  * the op-remove to succeed.  It's not clear if the host should check for
484  * the device coming back alive after the reset.
485  */
486 void
487 mprsas_prepare_remove(struct mprsas_softc *sassc, uint16_t handle)
488 {
489 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
490 	struct mpr_softc *sc;
491 	struct mpr_command *cm;
492 	struct mprsas_target *targ = NULL;
493 
494 	MPR_FUNCTRACE(sassc->sc);
495 
496 	sc = sassc->sc;
497 
498 	targ = mprsas_find_target_by_handle(sassc, 0, handle);
499 	if (targ == NULL) {
500 		/* FIXME: what is the action? */
501 		/* We don't know about this device? */
502 		mpr_dprint(sc, MPR_ERROR, "%s : invalid handle 0x%x \n",
503 		    __func__, handle);
504 		return;
505 	}
506 
507 	targ->flags |= MPRSAS_TARGET_INREMOVAL;
508 
509 	cm = mprsas_alloc_tm(sc);
510 	if (cm == NULL) {
511 		mpr_dprint(sc, MPR_ERROR, "%s: command alloc failure\n",
512 		    __func__);
513 		return;
514 	}
515 
516 	mprsas_rescan_target(sc, targ);
517 
518 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
519 	memset(req, 0, sizeof(*req));
520 	req->DevHandle = htole16(targ->handle);
521 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
522 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
523 
524 	/* SAS Hard Link Reset / SATA Link Reset */
525 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
526 
527 	cm->cm_targ = targ;
528 	cm->cm_data = NULL;
529 	cm->cm_desc.HighPriority.RequestFlags =
530 	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
531 	cm->cm_complete = mprsas_remove_device;
532 	cm->cm_complete_data = (void *)(uintptr_t)handle;
533 	mpr_map_command(sc, cm);
534 }
535 
536 static void
537 mprsas_remove_device(struct mpr_softc *sc, struct mpr_command *tm)
538 {
539 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
540 	MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
541 	struct mprsas_target *targ;
542 	struct mpr_command *next_cm;
543 	uint16_t handle;
544 
545 	MPR_FUNCTRACE(sc);
546 
547 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
548 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
549 	targ = tm->cm_targ;
550 
551 	/*
552 	 * Currently there should be no way we can hit this case.  It only
553 	 * happens when we have a failure to allocate chain frames, and
554 	 * task management commands don't have S/G lists.
555 	 */
556 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
557 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for remove of "
558 		    "handle %#04x! This should not happen!\n", __func__,
559 		    tm->cm_flags, handle);
560 		mprsas_free_tm(sc, tm);
561 		return;
562 	}
563 
564 	if (reply == NULL) {
565 		/* XXX retry the remove after the diag reset completes? */
566 		mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device "
567 		    "0x%04x\n", __func__, handle);
568 		mprsas_free_tm(sc, tm);
569 		return;
570 	}
571 
572 	if (le16toh(reply->IOCStatus) != MPI2_IOCSTATUS_SUCCESS) {
573 		mpr_dprint(sc, MPR_FAULT, "IOCStatus = 0x%x while resetting "
574 		    "device 0x%x\n", le16toh(reply->IOCStatus), handle);
575 		mprsas_free_tm(sc, tm);
576 		return;
577 	}
578 
579 	mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n",
580 	    le32toh(reply->TerminationCount));
581 	mpr_free_reply(sc, tm->cm_reply_data);
582 	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
583 
584 	/* Reuse the existing command */
585 	req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
586 	memset(req, 0, sizeof(*req));
587 	req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
588 	req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
589 	req->DevHandle = htole16(handle);
590 	tm->cm_data = NULL;
591 	tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
592 	tm->cm_complete = mprsas_remove_complete;
593 	tm->cm_complete_data = (void *)(uintptr_t)handle;
594 
595 	mpr_map_command(sc, tm);
596 
597 	mpr_dprint(sc, MPR_XINFO, "clearing target %u handle 0x%04x\n",
598 	    targ->tid, handle);
599 	if (targ->encl_level_valid) {
600 		mpr_dprint(sc, MPR_XINFO, "At enclosure level %d, slot %d, "
601 		    "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
602 		    targ->connector_name);
603 	}
604 	TAILQ_FOREACH_SAFE(tm, &targ->commands, cm_link, next_cm) {
605 		union ccb *ccb;
606 
607 		mpr_dprint(sc, MPR_XINFO, "Completing missed command %p\n", tm);
608 		ccb = tm->cm_complete_data;
609 		ccb->ccb_h.status = CAM_DEV_NOT_THERE;
610 		mprsas_scsiio_complete(sc, tm);
611 	}
612 }
613 
614 static void
615 mprsas_remove_complete(struct mpr_softc *sc, struct mpr_command *tm)
616 {
617 	MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
618 	uint16_t handle;
619 	struct mprsas_target *targ;
620 	struct mprsas_lun *lun;
621 
622 	MPR_FUNCTRACE(sc);
623 
624 	reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
625 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
626 
627 	/*
628 	 * Currently there should be no way we can hit this case.  It only
629 	 * happens when we have a failure to allocate chain frames, and
630 	 * task management commands don't have S/G lists.
631 	 */
632 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
633 		mpr_dprint(sc, MPR_XINFO, "%s: cm_flags = %#x for remove of "
634 		    "handle %#04x! This should not happen!\n", __func__,
635 		    tm->cm_flags, handle);
636 		mprsas_free_tm(sc, tm);
637 		return;
638 	}
639 
640 	if (reply == NULL) {
641 		/* most likely a chip reset */
642 		mpr_dprint(sc, MPR_FAULT, "%s NULL reply removing device "
643 		    "0x%04x\n", __func__, handle);
644 		mprsas_free_tm(sc, tm);
645 		return;
646 	}
647 
648 	mpr_dprint(sc, MPR_XINFO, "%s on handle 0x%04x, IOCStatus= 0x%x\n",
649 	    __func__, handle, le16toh(reply->IOCStatus));
650 
651 	/*
652 	 * Don't clear target if remove fails because things will get confusing.
653 	 * Leave the devname and sasaddr intact so that we know to avoid reusing
654 	 * this target id if possible, and so we can assign the same target id
655 	 * to this device if it comes back in the future.
656 	 */
657 	if (le16toh(reply->IOCStatus) == MPI2_IOCSTATUS_SUCCESS) {
658 		targ = tm->cm_targ;
659 		targ->handle = 0x0;
660 		targ->encl_handle = 0x0;
661 		targ->encl_level_valid = 0x0;
662 		targ->encl_level = 0x0;
663 		targ->connector_name[0] = ' ';
664 		targ->connector_name[1] = ' ';
665 		targ->connector_name[2] = ' ';
666 		targ->connector_name[3] = ' ';
667 		targ->encl_slot = 0x0;
668 		targ->exp_dev_handle = 0x0;
669 		targ->phy_num = 0x0;
670 		targ->linkrate = 0x0;
671 		targ->devinfo = 0x0;
672 		targ->flags = 0x0;
673 		targ->scsi_req_desc_type = 0;
674 
675 		while (!SLIST_EMPTY(&targ->luns)) {
676 			lun = SLIST_FIRST(&targ->luns);
677 			SLIST_REMOVE_HEAD(&targ->luns, lun_link);
678 			free(lun, M_MPR);
679 		}
680 	}
681 
682 	mprsas_free_tm(sc, tm);
683 }
684 
685 static int
686 mprsas_register_events(struct mpr_softc *sc)
687 {
688 	uint8_t events[16];
689 
690 	bzero(events, 16);
691 	setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
692 	setbit(events, MPI2_EVENT_SAS_DISCOVERY);
693 	setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
694 	setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
695 	setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
696 	setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
697 	setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
698 	setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
699 	setbit(events, MPI2_EVENT_IR_VOLUME);
700 	setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
701 	setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
702 	setbit(events, MPI2_EVENT_TEMP_THRESHOLD);
703 
704 	mpr_register_events(sc, events, mprsas_evt_handler, NULL,
705 	    &sc->sassc->mprsas_eh);
706 
707 	return (0);
708 }
709 
710 int
711 mpr_attach_sas(struct mpr_softc *sc)
712 {
713 	struct mprsas_softc *sassc;
714 	cam_status status;
715 	int unit, error = 0;
716 
717 	MPR_FUNCTRACE(sc);
718 
719 	sassc = malloc(sizeof(struct mprsas_softc), M_MPR, M_WAITOK|M_ZERO);
720 	if (!sassc) {
721 		device_printf(sc->mpr_dev, "Cannot allocate memory %s %d\n",
722 		    __func__, __LINE__);
723 		return (ENOMEM);
724 	}
725 
726 	/*
727 	 * XXX MaxTargets could change during a reinit.  since we don't
728 	 * resize the targets[] array during such an event, cache the value
729 	 * of MaxTargets here so that we don't get into trouble later.  This
730 	 * should move into the reinit logic.
731 	 */
732 	sassc->maxtargets = sc->facts->MaxTargets;
733 	sassc->targets = malloc(sizeof(struct mprsas_target) *
734 	    sassc->maxtargets, M_MPR, M_WAITOK|M_ZERO);
735 	if (!sassc->targets) {
736 		device_printf(sc->mpr_dev, "Cannot allocate memory %s %d\n",
737 		    __func__, __LINE__);
738 		free(sassc, M_MPR);
739 		return (ENOMEM);
740 	}
741 	sc->sassc = sassc;
742 	sassc->sc = sc;
743 
744 	if ((sassc->devq = cam_simq_alloc(sc->num_reqs)) == NULL) {
745 		mpr_dprint(sc, MPR_ERROR, "Cannot allocate SIMQ\n");
746 		error = ENOMEM;
747 		goto out;
748 	}
749 
750 	unit = device_get_unit(sc->mpr_dev);
751 	sassc->sim = cam_sim_alloc(mprsas_action, mprsas_poll, "mpr", sassc,
752 	    unit, &sc->mpr_mtx, sc->num_reqs, sc->num_reqs, sassc->devq);
753 	if (sassc->sim == NULL) {
754 		mpr_dprint(sc, MPR_ERROR, "Cannot allocate SIM\n");
755 		error = EINVAL;
756 		goto out;
757 	}
758 
759 	TAILQ_INIT(&sassc->ev_queue);
760 
761 	/* Initialize taskqueue for Event Handling */
762 	TASK_INIT(&sassc->ev_task, 0, mprsas_firmware_event_work, sc);
763 	sassc->ev_tq = taskqueue_create("mpr_taskq", M_NOWAIT | M_ZERO,
764 	    taskqueue_thread_enqueue, &sassc->ev_tq);
765 
766 	/* Run the task queue with lowest priority */
767 	taskqueue_start_threads(&sassc->ev_tq, 1, 255, "%s taskq",
768 	    device_get_nameunit(sc->mpr_dev));
769 
770 	mpr_lock(sc);
771 
772 	/*
773 	 * XXX There should be a bus for every port on the adapter, but since
774 	 * we're just going to fake the topology for now, we'll pretend that
775 	 * everything is just a target on a single bus.
776 	 */
777 	if ((error = xpt_bus_register(sassc->sim, sc->mpr_dev, 0)) != 0) {
778 		mpr_dprint(sc, MPR_ERROR, "Error %d registering SCSI bus\n",
779 		    error);
780 		mpr_unlock(sc);
781 		goto out;
782 	}
783 
784 	/*
785 	 * Assume that discovery events will start right away.  Freezing
786 	 *
787 	 * Hold off boot until discovery is complete.
788 	 */
789 	sassc->flags |= MPRSAS_IN_STARTUP | MPRSAS_IN_DISCOVERY;
790 	sc->sassc->startup_refcount = 0;
791 	mprsas_startup_increment(sassc);
792 
793 	callout_init(&sassc->discovery_callout, 1 /*mprafe*/);
794 
795 	sassc->tm_count = 0;
796 
797 	/*
798 	 * Register for async events so we can determine the EEDP
799 	 * capabilities of devices.
800 	 */
801 	status = xpt_create_path(&sassc->path, /*periph*/NULL,
802 	    cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD,
803 	    CAM_LUN_WILDCARD);
804 	if (status != CAM_REQ_CMP) {
805 		mpr_printf(sc, "Error %#x creating sim path\n", status);
806 		sassc->path = NULL;
807 	} else {
808 		int event;
809 
810 #if (__FreeBSD_version >= 1000006) || \
811     ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
812 		event = AC_ADVINFO_CHANGED | AC_FOUND_DEVICE;
813 #else
814 		event = AC_FOUND_DEVICE;
815 #endif
816 		status = xpt_register_async(event, mprsas_async, sc,
817 					    sassc->path);
818 		if (status != CAM_REQ_CMP) {
819 			mpr_dprint(sc, MPR_ERROR,
820 			    "Error %#x registering async handler for "
821 			    "AC_ADVINFO_CHANGED events\n", status);
822 			xpt_free_path(sassc->path);
823 			sassc->path = NULL;
824 		}
825 	}
826 	if (status != CAM_REQ_CMP) {
827 		/*
828 		 * EEDP use is the exception, not the rule.
829 		 * Warn the user, but do not fail to attach.
830 		 */
831 		mpr_printf(sc, "EEDP capabilities disabled.\n");
832 	}
833 
834 	mpr_unlock(sc);
835 
836 	mprsas_register_events(sc);
837 out:
838 	if (error)
839 		mpr_detach_sas(sc);
840 	return (error);
841 }
842 
843 int
844 mpr_detach_sas(struct mpr_softc *sc)
845 {
846 	struct mprsas_softc *sassc;
847 	struct mprsas_lun *lun, *lun_tmp;
848 	struct mprsas_target *targ;
849 	int i;
850 
851 	MPR_FUNCTRACE(sc);
852 
853 	if (sc->sassc == NULL)
854 		return (0);
855 
856 	sassc = sc->sassc;
857 	mpr_deregister_events(sc, sassc->mprsas_eh);
858 
859 	/*
860 	 * Drain and free the event handling taskqueue with the lock
861 	 * unheld so that any parallel processing tasks drain properly
862 	 * without deadlocking.
863 	 */
864 	if (sassc->ev_tq != NULL)
865 		taskqueue_free(sassc->ev_tq);
866 
867 	/* Make sure CAM doesn't wedge if we had to bail out early. */
868 	mpr_lock(sc);
869 
870 	/* Deregister our async handler */
871 	if (sassc->path != NULL) {
872 		xpt_register_async(0, mprsas_async, sc, sassc->path);
873 		xpt_free_path(sassc->path);
874 		sassc->path = NULL;
875 	}
876 
877 	if (sassc->flags & MPRSAS_IN_STARTUP)
878 		xpt_release_simq(sassc->sim, 1);
879 
880 	if (sassc->sim != NULL) {
881 		xpt_bus_deregister(cam_sim_path(sassc->sim));
882 		cam_sim_free(sassc->sim, FALSE);
883 	}
884 
885 	sassc->flags |= MPRSAS_SHUTDOWN;
886 	mpr_unlock(sc);
887 
888 	if (sassc->devq != NULL)
889 		cam_simq_free(sassc->devq);
890 
891 	for (i = 0; i < sassc->maxtargets; i++) {
892 		targ = &sassc->targets[i];
893 		SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
894 			free(lun, M_MPR);
895 		}
896 	}
897 	free(sassc->targets, M_MPR);
898 	free(sassc, M_MPR);
899 	sc->sassc = NULL;
900 
901 	return (0);
902 }
903 
904 void
905 mprsas_discovery_end(struct mprsas_softc *sassc)
906 {
907 	struct mpr_softc *sc = sassc->sc;
908 
909 	MPR_FUNCTRACE(sc);
910 
911 	if (sassc->flags & MPRSAS_DISCOVERY_TIMEOUT_PENDING)
912 		callout_stop(&sassc->discovery_callout);
913 
914 }
915 
916 static void
917 mprsas_action(struct cam_sim *sim, union ccb *ccb)
918 {
919 	struct mprsas_softc *sassc;
920 
921 	sassc = cam_sim_softc(sim);
922 
923 	MPR_FUNCTRACE(sassc->sc);
924 	mpr_dprint(sassc->sc, MPR_TRACE, "%s func 0x%x\n", __func__,
925 	    ccb->ccb_h.func_code);
926 	mtx_assert(&sassc->sc->mpr_mtx, MA_OWNED);
927 
928 	switch (ccb->ccb_h.func_code) {
929 	case XPT_PATH_INQ:
930 	{
931 		struct ccb_pathinq *cpi = &ccb->cpi;
932 
933 		cpi->version_num = 1;
934 		cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
935 		cpi->target_sprt = 0;
936 #if __FreeBSD_version >= 1000039
937 		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN;
938 #else
939 		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
940 #endif
941 		cpi->hba_eng_cnt = 0;
942 		cpi->max_target = sassc->maxtargets - 1;
943 		cpi->max_lun = 255;
944 		cpi->initiator_id = sassc->maxtargets - 1;
945 		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
946 		strncpy(cpi->hba_vid, "LSILogic", HBA_IDLEN);
947 		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
948 		cpi->unit_number = cam_sim_unit(sim);
949 		cpi->bus_id = cam_sim_bus(sim);
950 		/*
951 		 * XXXSLM-I think this needs to change based on config page or
952 		 * something instead of hardcoded to 150000.
953 		 */
954 		cpi->base_transfer_speed = 150000;
955 		cpi->transport = XPORT_SAS;
956 		cpi->transport_version = 0;
957 		cpi->protocol = PROTO_SCSI;
958 		cpi->protocol_version = SCSI_REV_SPC;
959 #if __FreeBSD_version >= 800001
960 		/*
961 		 * XXXSLM-probably need to base this number on max SGL's and
962 		 * page size.
963 		 */
964 		cpi->maxio = 256 * 1024;
965 #endif
966 		cpi->ccb_h.status = CAM_REQ_CMP;
967 		break;
968 	}
969 	case XPT_GET_TRAN_SETTINGS:
970 	{
971 		struct ccb_trans_settings	*cts;
972 		struct ccb_trans_settings_sas	*sas;
973 		struct ccb_trans_settings_scsi	*scsi;
974 		struct mprsas_target *targ;
975 
976 		cts = &ccb->cts;
977 		sas = &cts->xport_specific.sas;
978 		scsi = &cts->proto_specific.scsi;
979 
980 		KASSERT(cts->ccb_h.target_id < sassc->maxtargets,
981 		    ("Target %d out of bounds in XPT_GET_TRAN_SETTINGS\n",
982 		    cts->ccb_h.target_id));
983 		targ = &sassc->targets[cts->ccb_h.target_id];
984 		if (targ->handle == 0x0) {
985 			cts->ccb_h.status = CAM_DEV_NOT_THERE;
986 			break;
987 		}
988 
989 		cts->protocol_version = SCSI_REV_SPC2;
990 		cts->transport = XPORT_SAS;
991 		cts->transport_version = 0;
992 
993 		sas->valid = CTS_SAS_VALID_SPEED;
994 		switch (targ->linkrate) {
995 		case 0x08:
996 			sas->bitrate = 150000;
997 			break;
998 		case 0x09:
999 			sas->bitrate = 300000;
1000 			break;
1001 		case 0x0a:
1002 			sas->bitrate = 600000;
1003 			break;
1004 		default:
1005 			sas->valid = 0;
1006 		}
1007 
1008 		cts->protocol = PROTO_SCSI;
1009 		scsi->valid = CTS_SCSI_VALID_TQ;
1010 		scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
1011 
1012 		cts->ccb_h.status = CAM_REQ_CMP;
1013 		break;
1014 	}
1015 	case XPT_CALC_GEOMETRY:
1016 		cam_calc_geometry(&ccb->ccg, /*extended*/1);
1017 		ccb->ccb_h.status = CAM_REQ_CMP;
1018 		break;
1019 	case XPT_RESET_DEV:
1020 		mpr_dprint(sassc->sc, MPR_XINFO,
1021 		    "mprsas_action XPT_RESET_DEV\n");
1022 		mprsas_action_resetdev(sassc, ccb);
1023 		return;
1024 	case XPT_RESET_BUS:
1025 	case XPT_ABORT:
1026 	case XPT_TERM_IO:
1027 		mpr_dprint(sassc->sc, MPR_XINFO,
1028 		    "mprsas_action faking success for abort or reset\n");
1029 		ccb->ccb_h.status = CAM_REQ_CMP;
1030 		break;
1031 	case XPT_SCSI_IO:
1032 		mprsas_action_scsiio(sassc, ccb);
1033 		return;
1034 #if __FreeBSD_version >= 900026
1035 	case XPT_SMP_IO:
1036 		mprsas_action_smpio(sassc, ccb);
1037 		return;
1038 #endif
1039 	default:
1040 		ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
1041 		break;
1042 	}
1043 	xpt_done(ccb);
1044 
1045 }
1046 
1047 static void
1048 mprsas_announce_reset(struct mpr_softc *sc, uint32_t ac_code,
1049     target_id_t target_id, lun_id_t lun_id)
1050 {
1051 	path_id_t path_id = cam_sim_path(sc->sassc->sim);
1052 	struct cam_path *path;
1053 
1054 	mpr_dprint(sc, MPR_XINFO, "%s code %x target %d lun %jx\n", __func__,
1055 	    ac_code, target_id, (uintmax_t)lun_id);
1056 
1057 	if (xpt_create_path(&path, NULL,
1058 		path_id, target_id, lun_id) != CAM_REQ_CMP) {
1059 		mpr_dprint(sc, MPR_ERROR, "unable to create path for reset "
1060 			   "notification\n");
1061 		return;
1062 	}
1063 
1064 	xpt_async(ac_code, path, NULL);
1065 	xpt_free_path(path);
1066 }
1067 
1068 static void
1069 mprsas_complete_all_commands(struct mpr_softc *sc)
1070 {
1071 	struct mpr_command *cm;
1072 	int i;
1073 	int completed;
1074 
1075 	MPR_FUNCTRACE(sc);
1076 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1077 
1078 	/* complete all commands with a NULL reply */
1079 	for (i = 1; i < sc->num_reqs; i++) {
1080 		cm = &sc->commands[i];
1081 		cm->cm_reply = NULL;
1082 		completed = 0;
1083 
1084 		if (cm->cm_flags & MPR_CM_FLAGS_POLLED)
1085 			cm->cm_flags |= MPR_CM_FLAGS_COMPLETE;
1086 
1087 		if (cm->cm_complete != NULL) {
1088 			mprsas_log_command(cm, MPR_RECOVERY,
1089 			    "completing cm %p state %x ccb %p for diag reset\n",
1090 			    cm, cm->cm_state, cm->cm_ccb);
1091 			cm->cm_complete(sc, cm);
1092 			completed = 1;
1093 		}
1094 
1095 		if (cm->cm_flags & MPR_CM_FLAGS_WAKEUP) {
1096 			mprsas_log_command(cm, MPR_RECOVERY,
1097 			    "waking up cm %p state %x ccb %p for diag reset\n",
1098 			    cm, cm->cm_state, cm->cm_ccb);
1099 			wakeup(cm);
1100 			completed = 1;
1101 		}
1102 
1103 		if ((completed == 0) && (cm->cm_state != MPR_CM_STATE_FREE)) {
1104 			/* this should never happen, but if it does, log */
1105 			mprsas_log_command(cm, MPR_RECOVERY,
1106 			    "cm %p state %x flags 0x%x ccb %p during diag "
1107 			    "reset\n", cm, cm->cm_state, cm->cm_flags,
1108 			    cm->cm_ccb);
1109 		}
1110 	}
1111 }
1112 
1113 void
1114 mprsas_handle_reinit(struct mpr_softc *sc)
1115 {
1116 	int i;
1117 
1118 	/* Go back into startup mode and freeze the simq, so that CAM
1119 	 * doesn't send any commands until after we've rediscovered all
1120 	 * targets and found the proper device handles for them.
1121 	 *
1122 	 * After the reset, portenable will trigger discovery, and after all
1123 	 * discovery-related activities have finished, the simq will be
1124 	 * released.
1125 	 */
1126 	mpr_dprint(sc, MPR_INIT, "%s startup\n", __func__);
1127 	sc->sassc->flags |= MPRSAS_IN_STARTUP;
1128 	sc->sassc->flags |= MPRSAS_IN_DISCOVERY;
1129 	mprsas_startup_increment(sc->sassc);
1130 
1131 	/* notify CAM of a bus reset */
1132 	mprsas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1133 	    CAM_LUN_WILDCARD);
1134 
1135 	/* complete and cleanup after all outstanding commands */
1136 	mprsas_complete_all_commands(sc);
1137 
1138 	mpr_dprint(sc, MPR_INIT, "%s startup %u tm %u after command "
1139 	    "completion\n", __func__, sc->sassc->startup_refcount,
1140 	    sc->sassc->tm_count);
1141 
1142 	/* zero all the target handles, since they may change after the
1143 	 * reset, and we have to rediscover all the targets and use the new
1144 	 * handles.
1145 	 */
1146 	for (i = 0; i < sc->sassc->maxtargets; i++) {
1147 		if (sc->sassc->targets[i].outstanding != 0)
1148 			mpr_dprint(sc, MPR_INIT, "target %u outstanding %u\n",
1149 			    i, sc->sassc->targets[i].outstanding);
1150 		sc->sassc->targets[i].handle = 0x0;
1151 		sc->sassc->targets[i].exp_dev_handle = 0x0;
1152 		sc->sassc->targets[i].outstanding = 0;
1153 		sc->sassc->targets[i].flags = MPRSAS_TARGET_INDIAGRESET;
1154 	}
1155 }
1156 static void
1157 mprsas_tm_timeout(void *data)
1158 {
1159 	struct mpr_command *tm = data;
1160 	struct mpr_softc *sc = tm->cm_sc;
1161 
1162 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1163 
1164 	mprsas_log_command(tm, MPR_INFO|MPR_RECOVERY,
1165 	    "task mgmt %p timed out\n", tm);
1166 	mpr_reinit(sc);
1167 }
1168 
1169 static void
1170 mprsas_logical_unit_reset_complete(struct mpr_softc *sc,
1171     struct mpr_command *tm)
1172 {
1173 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1174 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1175 	unsigned int cm_count = 0;
1176 	struct mpr_command *cm;
1177 	struct mprsas_target *targ;
1178 
1179 	callout_stop(&tm->cm_callout);
1180 
1181 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1182 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1183 	targ = tm->cm_targ;
1184 
1185 	/*
1186 	 * Currently there should be no way we can hit this case.  It only
1187 	 * happens when we have a failure to allocate chain frames, and
1188 	 * task management commands don't have S/G lists.
1189 	 */
1190 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1191 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for LUN reset! "
1192 		    "This should not happen!\n", __func__, tm->cm_flags);
1193 		mprsas_free_tm(sc, tm);
1194 		return;
1195 	}
1196 
1197 	if (reply == NULL) {
1198 		mprsas_log_command(tm, MPR_RECOVERY,
1199 		    "NULL reset reply for tm %p\n", tm);
1200 		if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1201 			/* this completion was due to a reset, just cleanup */
1202 			targ->flags &= ~MPRSAS_TARGET_INRESET;
1203 			targ->tm = NULL;
1204 			mprsas_free_tm(sc, tm);
1205 		}
1206 		else {
1207 			/* we should have gotten a reply. */
1208 			mpr_reinit(sc);
1209 		}
1210 		return;
1211 	}
1212 
1213 	mprsas_log_command(tm, MPR_RECOVERY,
1214 	    "logical unit reset status 0x%x code 0x%x count %u\n",
1215 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1216 	    le32toh(reply->TerminationCount));
1217 
1218 	/* See if there are any outstanding commands for this LUN.
1219 	 * This could be made more efficient by using a per-LU data
1220 	 * structure of some sort.
1221 	 */
1222 	TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1223 		if (cm->cm_lun == tm->cm_lun)
1224 			cm_count++;
1225 	}
1226 
1227 	if (cm_count == 0) {
1228 		mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO,
1229 		    "logical unit %u finished recovery after reset\n",
1230 		    tm->cm_lun, tm);
1231 
1232 		mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1233 		    tm->cm_lun);
1234 
1235 		/* we've finished recovery for this logical unit.  check and
1236 		 * see if some other logical unit has a timedout command
1237 		 * that needs to be processed.
1238 		 */
1239 		cm = TAILQ_FIRST(&targ->timedout_commands);
1240 		if (cm) {
1241 			mprsas_send_abort(sc, tm, cm);
1242 		}
1243 		else {
1244 			targ->tm = NULL;
1245 			mprsas_free_tm(sc, tm);
1246 		}
1247 	}
1248 	else {
1249 		/* if we still have commands for this LUN, the reset
1250 		 * effectively failed, regardless of the status reported.
1251 		 * Escalate to a target reset.
1252 		 */
1253 		mprsas_log_command(tm, MPR_RECOVERY,
1254 		    "logical unit reset complete for tm %p, but still have %u "
1255 		    "command(s)\n", tm, cm_count);
1256 		mprsas_send_reset(sc, tm,
1257 		    MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1258 	}
1259 }
1260 
1261 static void
1262 mprsas_target_reset_complete(struct mpr_softc *sc, struct mpr_command *tm)
1263 {
1264 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1265 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1266 	struct mprsas_target *targ;
1267 
1268 	callout_stop(&tm->cm_callout);
1269 
1270 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1271 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1272 	targ = tm->cm_targ;
1273 
1274 	/*
1275 	 * Currently there should be no way we can hit this case.  It only
1276 	 * happens when we have a failure to allocate chain frames, and
1277 	 * task management commands don't have S/G lists.
1278 	 */
1279 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1280 		mpr_dprint(sc, MPR_ERROR,"%s: cm_flags = %#x for target reset! "
1281 		    "This should not happen!\n", __func__, tm->cm_flags);
1282 		mprsas_free_tm(sc, tm);
1283 		return;
1284 	}
1285 
1286 	if (reply == NULL) {
1287 		mprsas_log_command(tm, MPR_RECOVERY,
1288 		    "NULL reset reply for tm %p\n", tm);
1289 		if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1290 			/* this completion was due to a reset, just cleanup */
1291 			targ->flags &= ~MPRSAS_TARGET_INRESET;
1292 			targ->tm = NULL;
1293 			mprsas_free_tm(sc, tm);
1294 		}
1295 		else {
1296 			/* we should have gotten a reply. */
1297 			mpr_reinit(sc);
1298 		}
1299 		return;
1300 	}
1301 
1302 	mprsas_log_command(tm, MPR_RECOVERY,
1303 	    "target reset status 0x%x code 0x%x count %u\n",
1304 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1305 	    le32toh(reply->TerminationCount));
1306 
1307 	targ->flags &= ~MPRSAS_TARGET_INRESET;
1308 
1309 	if (targ->outstanding == 0) {
1310 		/* we've finished recovery for this target and all
1311 		 * of its logical units.
1312 		 */
1313 		mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO,
1314 		    "recovery finished after target reset\n");
1315 
1316 		mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1317 		    CAM_LUN_WILDCARD);
1318 
1319 		targ->tm = NULL;
1320 		mprsas_free_tm(sc, tm);
1321 	}
1322 	else {
1323 		/* after a target reset, if this target still has
1324 		 * outstanding commands, the reset effectively failed,
1325 		 * regardless of the status reported.  escalate.
1326 		 */
1327 		mprsas_log_command(tm, MPR_RECOVERY,
1328 		    "target reset complete for tm %p, but still have %u "
1329 		    "command(s)\n", tm, targ->outstanding);
1330 		mpr_reinit(sc);
1331 	}
1332 }
1333 
1334 #define MPR_RESET_TIMEOUT 30
1335 
1336 static int
1337 mprsas_send_reset(struct mpr_softc *sc, struct mpr_command *tm, uint8_t type)
1338 {
1339 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1340 	struct mprsas_target *target;
1341 	int err;
1342 
1343 	target = tm->cm_targ;
1344 	if (target->handle == 0) {
1345 		mpr_dprint(sc, MPR_ERROR,"%s null devhandle for target_id %d\n",
1346 		    __func__, target->tid);
1347 		return -1;
1348 	}
1349 
1350 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1351 	req->DevHandle = htole16(target->handle);
1352 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1353 	req->TaskType = type;
1354 
1355 	if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1356 		/* XXX Need to handle invalid LUNs */
1357 		MPR_SET_LUN(req->LUN, tm->cm_lun);
1358 		tm->cm_targ->logical_unit_resets++;
1359 		mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO,
1360 		    "sending logical unit reset\n");
1361 		tm->cm_complete = mprsas_logical_unit_reset_complete;
1362 	}
1363 	else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1364 		/*
1365 		 * Target reset method =
1366 		 *     SAS Hard Link Reset / SATA Link Reset
1367 		 */
1368 		req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1369 		tm->cm_targ->target_resets++;
1370 		tm->cm_targ->flags |= MPRSAS_TARGET_INRESET;
1371 		mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO,
1372 		    "sending target reset\n");
1373 		tm->cm_complete = mprsas_target_reset_complete;
1374 	}
1375 	else {
1376 		mpr_dprint(sc, MPR_ERROR, "unexpected reset type 0x%x\n", type);
1377 		return -1;
1378 	}
1379 
1380 	mpr_dprint(sc, MPR_XINFO, "to target %u handle 0x%04x\n", target->tid,
1381 	    target->handle);
1382 	if (target->encl_level_valid) {
1383 		mpr_dprint(sc, MPR_XINFO, "At enclosure level %d, slot %d, "
1384 		    "connector name (%4s)\n", target->encl_level,
1385 		    target->encl_slot, target->connector_name);
1386 	}
1387 
1388 	tm->cm_data = NULL;
1389 	tm->cm_desc.HighPriority.RequestFlags =
1390 	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1391 	tm->cm_complete_data = (void *)tm;
1392 
1393 	callout_reset(&tm->cm_callout, MPR_RESET_TIMEOUT * hz,
1394 	    mprsas_tm_timeout, tm);
1395 
1396 	err = mpr_map_command(sc, tm);
1397 	if (err)
1398 		mprsas_log_command(tm, MPR_RECOVERY,
1399 		    "error %d sending reset type %u\n",
1400 		    err, type);
1401 
1402 	return err;
1403 }
1404 
1405 
1406 static void
1407 mprsas_abort_complete(struct mpr_softc *sc, struct mpr_command *tm)
1408 {
1409 	struct mpr_command *cm;
1410 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1411 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1412 	struct mprsas_target *targ;
1413 
1414 	callout_stop(&tm->cm_callout);
1415 
1416 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1417 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1418 	targ = tm->cm_targ;
1419 
1420 	/*
1421 	 * Currently there should be no way we can hit this case.  It only
1422 	 * happens when we have a failure to allocate chain frames, and
1423 	 * task management commands don't have S/G lists.
1424 	 */
1425 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1426 		mprsas_log_command(tm, MPR_RECOVERY,
1427 		    "cm_flags = %#x for abort %p TaskMID %u!\n",
1428 		    tm->cm_flags, tm, le16toh(req->TaskMID));
1429 		mprsas_free_tm(sc, tm);
1430 		return;
1431 	}
1432 
1433 	if (reply == NULL) {
1434 		mprsas_log_command(tm, MPR_RECOVERY,
1435 		    "NULL abort reply for tm %p TaskMID %u\n",
1436 		    tm, le16toh(req->TaskMID));
1437 		if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1438 			/* this completion was due to a reset, just cleanup */
1439 			targ->tm = NULL;
1440 			mprsas_free_tm(sc, tm);
1441 		}
1442 		else {
1443 			/* we should have gotten a reply. */
1444 			mpr_reinit(sc);
1445 		}
1446 		return;
1447 	}
1448 
1449 	mprsas_log_command(tm, MPR_RECOVERY,
1450 	    "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1451 	    le16toh(req->TaskMID),
1452 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1453 	    le32toh(reply->TerminationCount));
1454 
1455 	cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1456 	if (cm == NULL) {
1457 		/* if there are no more timedout commands, we're done with
1458 		 * error recovery for this target.
1459 		 */
1460 		mprsas_log_command(tm, MPR_RECOVERY,
1461 		    "finished recovery after aborting TaskMID %u\n",
1462 		    le16toh(req->TaskMID));
1463 
1464 		targ->tm = NULL;
1465 		mprsas_free_tm(sc, tm);
1466 	}
1467 	else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) {
1468 		/* abort success, but we have more timedout commands to abort */
1469 		mprsas_log_command(tm, MPR_RECOVERY,
1470 		    "continuing recovery after aborting TaskMID %u\n",
1471 		    le16toh(req->TaskMID));
1472 
1473 		mprsas_send_abort(sc, tm, cm);
1474 	}
1475 	else {
1476 		/* we didn't get a command completion, so the abort
1477 		 * failed as far as we're concerned.  escalate.
1478 		 */
1479 		mprsas_log_command(tm, MPR_RECOVERY,
1480 		    "abort failed for TaskMID %u tm %p\n",
1481 		    le16toh(req->TaskMID), tm);
1482 
1483 		mprsas_send_reset(sc, tm,
1484 		    MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1485 	}
1486 }
1487 
1488 #define MPR_ABORT_TIMEOUT 5
1489 
1490 static int
1491 mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm,
1492     struct mpr_command *cm)
1493 {
1494 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1495 	struct mprsas_target *targ;
1496 	int err;
1497 
1498 	targ = cm->cm_targ;
1499 	if (targ->handle == 0) {
1500 		mpr_dprint(sc, MPR_ERROR,"%s null devhandle for target_id %d\n",
1501 		    __func__, cm->cm_ccb->ccb_h.target_id);
1502 		return -1;
1503 	}
1504 
1505 	mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO,
1506 	    "Aborting command %p\n", cm);
1507 
1508 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1509 	req->DevHandle = htole16(targ->handle);
1510 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1511 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1512 
1513 	/* XXX Need to handle invalid LUNs */
1514 	MPR_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1515 
1516 	req->TaskMID = htole16(cm->cm_desc.Default.SMID);
1517 
1518 	tm->cm_data = NULL;
1519 	tm->cm_desc.HighPriority.RequestFlags =
1520 	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1521 	tm->cm_complete = mprsas_abort_complete;
1522 	tm->cm_complete_data = (void *)tm;
1523 	tm->cm_targ = cm->cm_targ;
1524 	tm->cm_lun = cm->cm_lun;
1525 
1526 	callout_reset(&tm->cm_callout, MPR_ABORT_TIMEOUT * hz,
1527 	    mprsas_tm_timeout, tm);
1528 
1529 	targ->aborts++;
1530 
1531 	err = mpr_map_command(sc, tm);
1532 	if (err)
1533 		mprsas_log_command(tm, MPR_RECOVERY,
1534 		    "error %d sending abort for cm %p SMID %u\n",
1535 		    err, cm, req->TaskMID);
1536 	return err;
1537 }
1538 
1539 
1540 static void
1541 mprsas_scsiio_timeout(void *data)
1542 {
1543 	struct mpr_softc *sc;
1544 	struct mpr_command *cm;
1545 	struct mprsas_target *targ;
1546 
1547 	cm = (struct mpr_command *)data;
1548 	sc = cm->cm_sc;
1549 
1550 	MPR_FUNCTRACE(sc);
1551 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1552 
1553 	mpr_dprint(sc, MPR_XINFO, "Timeout checking cm %p\n", cm);
1554 
1555 	/*
1556 	 * Run the interrupt handler to make sure it's not pending.  This
1557 	 * isn't perfect because the command could have already completed
1558 	 * and been re-used, though this is unlikely.
1559 	 */
1560 	mpr_intr_locked(sc);
1561 	if (cm->cm_state == MPR_CM_STATE_FREE) {
1562 		mprsas_log_command(cm, MPR_XINFO,
1563 		    "SCSI command %p almost timed out\n", cm);
1564 		return;
1565 	}
1566 
1567 	if (cm->cm_ccb == NULL) {
1568 		mpr_dprint(sc, MPR_ERROR, "command timeout with NULL ccb\n");
1569 		return;
1570 	}
1571 
1572 	targ = cm->cm_targ;
1573 	targ->timeouts++;
1574 
1575 	mprsas_log_command(cm, MPR_XINFO, "command timeout cm %p ccb %p "
1576 	    "target %u, handle(0x%04x)\n", cm, cm->cm_ccb, targ->tid,
1577 	    targ->handle);
1578 	if (targ->encl_level_valid) {
1579 		mpr_dprint(sc, MPR_XINFO, "At enclosure level %d, slot %d, "
1580 		    "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
1581 		    targ->connector_name);
1582 	}
1583 
1584 	/* XXX first, check the firmware state, to see if it's still
1585 	 * operational.  if not, do a diag reset.
1586 	 */
1587 
1588 	cm->cm_ccb->ccb_h.status = CAM_CMD_TIMEOUT;
1589 	cm->cm_state = MPR_CM_STATE_TIMEDOUT;
1590 	TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1591 
1592 	if (targ->tm != NULL) {
1593 		/* target already in recovery, just queue up another
1594 		 * timedout command to be processed later.
1595 		 */
1596 		mpr_dprint(sc, MPR_RECOVERY, "queued timedout cm %p for "
1597 		    "processing by tm %p\n", cm, targ->tm);
1598 	}
1599 	else if ((targ->tm = mprsas_alloc_tm(sc)) != NULL) {
1600 		mpr_dprint(sc, MPR_RECOVERY, "timedout cm %p allocated tm %p\n",
1601 		    cm, targ->tm);
1602 
1603 		/* start recovery by aborting the first timedout command */
1604 		mprsas_send_abort(sc, targ->tm, cm);
1605 	}
1606 	else {
1607 		/* XXX queue this target up for recovery once a TM becomes
1608 		 * available.  The firmware only has a limited number of
1609 		 * HighPriority credits for the high priority requests used
1610 		 * for task management, and we ran out.
1611 		 *
1612 		 * Isilon: don't worry about this for now, since we have
1613 		 * more credits than disks in an enclosure, and limit
1614 		 * ourselves to one TM per target for recovery.
1615 		 */
1616 		mpr_dprint(sc, MPR_RECOVERY,
1617 		    "timedout cm %p failed to allocate a tm\n", cm);
1618 	}
1619 }
1620 
1621 static void
1622 mprsas_action_scsiio(struct mprsas_softc *sassc, union ccb *ccb)
1623 {
1624 	MPI2_SCSI_IO_REQUEST *req;
1625 	struct ccb_scsiio *csio;
1626 	struct mpr_softc *sc;
1627 	struct mprsas_target *targ;
1628 	struct mprsas_lun *lun;
1629 	struct mpr_command *cm;
1630 	uint8_t i, lba_byte, *ref_tag_addr;
1631 	uint16_t eedp_flags;
1632 	uint32_t mpi_control;
1633 
1634 	sc = sassc->sc;
1635 	MPR_FUNCTRACE(sc);
1636 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1637 
1638 	csio = &ccb->csio;
1639 	targ = &sassc->targets[csio->ccb_h.target_id];
1640 	mpr_dprint(sc, MPR_TRACE, "ccb %p target flag %x\n", ccb, targ->flags);
1641 	if (targ->handle == 0x0) {
1642 		mpr_dprint(sc, MPR_ERROR, "%s NULL handle for target %u\n",
1643 		    __func__, csio->ccb_h.target_id);
1644 		csio->ccb_h.status = CAM_DEV_NOT_THERE;
1645 		xpt_done(ccb);
1646 		return;
1647 	}
1648 	if (targ->flags & MPR_TARGET_FLAGS_RAID_COMPONENT) {
1649 		mpr_dprint(sc, MPR_TRACE, "%s Raid component no SCSI IO "
1650 		    "supported %u\n", __func__, csio->ccb_h.target_id);
1651 		csio->ccb_h.status = CAM_DEV_NOT_THERE;
1652 		xpt_done(ccb);
1653 		return;
1654 	}
1655 	/*
1656 	 * Sometimes, it is possible to get a command that is not "In
1657 	 * Progress" and was actually aborted by the upper layer.  Check for
1658 	 * this here and complete the command without error.
1659 	 */
1660 	if (ccb->ccb_h.status != CAM_REQ_INPROG) {
1661 		mpr_dprint(sc, MPR_TRACE, "%s Command is not in progress for "
1662 		    "target %u\n", __func__, csio->ccb_h.target_id);
1663 		xpt_done(ccb);
1664 		return;
1665 	}
1666 	/*
1667 	 * If devinfo is 0 this will be a volume.  In that case don't tell CAM
1668 	 * that the volume has timed out.  We want volumes to be enumerated
1669 	 * until they are deleted/removed, not just failed.
1670 	 */
1671 	if (targ->flags & MPRSAS_TARGET_INREMOVAL) {
1672 		if (targ->devinfo == 0)
1673 			csio->ccb_h.status = CAM_REQ_CMP;
1674 		else
1675 			csio->ccb_h.status = CAM_SEL_TIMEOUT;
1676 		xpt_done(ccb);
1677 		return;
1678 	}
1679 
1680 	if ((sc->mpr_flags & MPR_FLAGS_SHUTDOWN) != 0) {
1681 		mpr_dprint(sc, MPR_TRACE, "%s shutting down\n", __func__);
1682 		csio->ccb_h.status = CAM_DEV_NOT_THERE;
1683 		xpt_done(ccb);
1684 		return;
1685 	}
1686 
1687 	cm = mpr_alloc_command(sc);
1688 	if (cm == NULL || (sc->mpr_flags & MPR_FLAGS_DIAGRESET)) {
1689 		if (cm != NULL) {
1690 			mpr_free_command(sc, cm);
1691 		}
1692 		if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) {
1693 			xpt_freeze_simq(sassc->sim, 1);
1694 			sassc->flags |= MPRSAS_QUEUE_FROZEN;
1695 		}
1696 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1697 		ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1698 		xpt_done(ccb);
1699 		return;
1700 	}
1701 
1702 	req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
1703 	bzero(req, sizeof(*req));
1704 	req->DevHandle = htole16(targ->handle);
1705 	req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1706 	req->MsgFlags = 0;
1707 	req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr);
1708 	req->SenseBufferLength = MPR_SENSE_LEN;
1709 	req->SGLFlags = 0;
1710 	req->ChainOffset = 0;
1711 	req->SGLOffset0 = 24;	/* 32bit word offset to the SGL */
1712 	req->SGLOffset1= 0;
1713 	req->SGLOffset2= 0;
1714 	req->SGLOffset3= 0;
1715 	req->SkipCount = 0;
1716 	req->DataLength = htole32(csio->dxfer_len);
1717 	req->BidirectionalDataLength = 0;
1718 	req->IoFlags = htole16(csio->cdb_len);
1719 	req->EEDPFlags = 0;
1720 
1721 	/* Note: BiDirectional transfers are not supported */
1722 	switch (csio->ccb_h.flags & CAM_DIR_MASK) {
1723 	case CAM_DIR_IN:
1724 		mpi_control = MPI2_SCSIIO_CONTROL_READ;
1725 		cm->cm_flags |= MPR_CM_FLAGS_DATAIN;
1726 		break;
1727 	case CAM_DIR_OUT:
1728 		mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
1729 		cm->cm_flags |= MPR_CM_FLAGS_DATAOUT;
1730 		break;
1731 	case CAM_DIR_NONE:
1732 	default:
1733 		mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
1734 		break;
1735 	}
1736 
1737 	if (csio->cdb_len == 32)
1738 		mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
1739 	/*
1740 	 * It looks like the hardware doesn't require an explicit tag
1741 	 * number for each transaction.  SAM Task Management not supported
1742 	 * at the moment.
1743 	 */
1744 	switch (csio->tag_action) {
1745 	case MSG_HEAD_OF_Q_TAG:
1746 		mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
1747 		break;
1748 	case MSG_ORDERED_Q_TAG:
1749 		mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
1750 		break;
1751 	case MSG_ACA_TASK:
1752 		mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ;
1753 		break;
1754 	case CAM_TAG_ACTION_NONE:
1755 	case MSG_SIMPLE_Q_TAG:
1756 	default:
1757 		mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
1758 		break;
1759 	}
1760 	mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
1761 	req->Control = htole32(mpi_control);
1762 
1763 	if (MPR_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
1764 		mpr_free_command(sc, cm);
1765 		ccb->ccb_h.status = CAM_LUN_INVALID;
1766 		xpt_done(ccb);
1767 		return;
1768 	}
1769 
1770 	if (csio->ccb_h.flags & CAM_CDB_POINTER)
1771 		bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
1772 	else
1773 		bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
1774 	req->IoFlags = htole16(csio->cdb_len);
1775 
1776 	/*
1777 	 * Check if EEDP is supported and enabled.  If it is then check if the
1778 	 * SCSI opcode could be using EEDP.  If so, make sure the LUN exists and
1779 	 * is formatted for EEDP support.  If all of this is true, set CDB up
1780 	 * for EEDP transfer.
1781 	 */
1782 	eedp_flags = op_code_prot[req->CDB.CDB32[0]];
1783 	if (sc->eedp_enabled && eedp_flags) {
1784 		SLIST_FOREACH(lun, &targ->luns, lun_link) {
1785 			if (lun->lun_id == csio->ccb_h.target_lun) {
1786 				break;
1787 			}
1788 		}
1789 
1790 		if ((lun != NULL) && (lun->eedp_formatted)) {
1791 			req->EEDPBlockSize = htole16(lun->eedp_block_size);
1792 			eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
1793 			    MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
1794 			    MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
1795 			req->EEDPFlags = htole16(eedp_flags);
1796 
1797 			/*
1798 			 * If CDB less than 32, fill in Primary Ref Tag with
1799 			 * low 4 bytes of LBA.  If CDB is 32, tag stuff is
1800 			 * already there.  Also, set protection bit.  FreeBSD
1801 			 * currently does not support CDBs bigger than 16, but
1802 			 * the code doesn't hurt, and will be here for the
1803 			 * future.
1804 			 */
1805 			if (csio->cdb_len != 32) {
1806 				lba_byte = (csio->cdb_len == 16) ? 6 : 2;
1807 				ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
1808 				    PrimaryReferenceTag;
1809 				for (i = 0; i < 4; i++) {
1810 					*ref_tag_addr =
1811 					    req->CDB.CDB32[lba_byte + i];
1812 					ref_tag_addr++;
1813 				}
1814 				req->CDB.EEDP32.PrimaryReferenceTag =
1815 				    htole32(req->
1816 				    CDB.EEDP32.PrimaryReferenceTag);
1817 				req->CDB.EEDP32.PrimaryApplicationTagMask =
1818 				    0xFFFF;
1819 				req->CDB.CDB32[1] = (req->CDB.CDB32[1] & 0x1F) |
1820 				    0x20;
1821 			} else {
1822 				eedp_flags |=
1823 				    MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
1824 				req->EEDPFlags = htole16(eedp_flags);
1825 				req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
1826 				    0x1F) | 0x20;
1827 			}
1828 		}
1829 	}
1830 
1831 	cm->cm_length = csio->dxfer_len;
1832 	if (cm->cm_length != 0) {
1833 		cm->cm_data = ccb;
1834 		cm->cm_flags |= MPR_CM_FLAGS_USE_CCB;
1835 	} else {
1836 		cm->cm_data = NULL;
1837 	}
1838 	cm->cm_sge = &req->SGL;
1839 	cm->cm_sglsize = (32 - 24) * 4;
1840 	cm->cm_complete = mprsas_scsiio_complete;
1841 	cm->cm_complete_data = ccb;
1842 	cm->cm_targ = targ;
1843 	cm->cm_lun = csio->ccb_h.target_lun;
1844 	cm->cm_ccb = ccb;
1845 	/*
1846 	 * If using FP desc type, need to set a bit in IoFlags (SCSI IO is 0)
1847 	 * and set descriptor type.
1848 	 */
1849 	if (targ->scsi_req_desc_type ==
1850 	    MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO) {
1851 		req->IoFlags |= MPI25_SCSIIO_IOFLAGS_FAST_PATH;
1852 		cm->cm_desc.FastPathSCSIIO.RequestFlags =
1853 		    MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
1854 		cm->cm_desc.FastPathSCSIIO.DevHandle = htole16(targ->handle);
1855 	} else {
1856 		cm->cm_desc.SCSIIO.RequestFlags =
1857 		    MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
1858 		cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle);
1859 	}
1860 
1861 	callout_reset(&cm->cm_callout, (ccb->ccb_h.timeout * hz) / 1000,
1862 	   mprsas_scsiio_timeout, cm);
1863 
1864 	targ->issued++;
1865 	targ->outstanding++;
1866 	TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1867 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
1868 
1869 	mprsas_log_command(cm, MPR_XINFO, "%s cm %p ccb %p outstanding %u\n",
1870 	    __func__, cm, ccb, targ->outstanding);
1871 
1872 	mpr_map_command(sc, cm);
1873 	return;
1874 }
1875 
1876 static void
1877 mpr_response_code(struct mpr_softc *sc, u8 response_code)
1878 {
1879         char *desc;
1880 
1881         switch (response_code) {
1882         case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE:
1883                 desc = "task management request completed";
1884                 break;
1885         case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME:
1886                 desc = "invalid frame";
1887                 break;
1888         case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
1889                 desc = "task management request not supported";
1890                 break;
1891         case MPI2_SCSITASKMGMT_RSP_TM_FAILED:
1892                 desc = "task management request failed";
1893                 break;
1894         case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED:
1895                 desc = "task management request succeeded";
1896                 break;
1897         case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN:
1898                 desc = "invalid lun";
1899                 break;
1900         case 0xA:
1901                 desc = "overlapped tag attempted";
1902                 break;
1903         case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
1904                 desc = "task queued, however not sent to target";
1905                 break;
1906         default:
1907                 desc = "unknown";
1908                 break;
1909         }
1910 	mpr_dprint(sc, MPR_XINFO, "response_code(0x%01x): %s\n", response_code,
1911 	    desc);
1912 }
1913 
1914 /**
1915  * mpr_sc_failed_io_info - translated non-succesfull SCSI_IO request
1916  */
1917 static void
1918 mpr_sc_failed_io_info(struct mpr_softc *sc, struct ccb_scsiio *csio,
1919     Mpi2SCSIIOReply_t *mpi_reply, struct mprsas_target *targ)
1920 {
1921 	u32 response_info;
1922 	u8 *response_bytes;
1923 	u16 ioc_status = le16toh(mpi_reply->IOCStatus) &
1924 	    MPI2_IOCSTATUS_MASK;
1925 	u8 scsi_state = mpi_reply->SCSIState;
1926 	u8 scsi_status = mpi_reply->SCSIStatus;
1927 	char *desc_ioc_state = NULL;
1928 	char *desc_scsi_status = NULL;
1929 	char *desc_scsi_state = sc->tmp_string;
1930 	u32 log_info = le32toh(mpi_reply->IOCLogInfo);
1931 
1932 	if (log_info == 0x31170000)
1933 		return;
1934 
1935 	switch (ioc_status) {
1936 	case MPI2_IOCSTATUS_SUCCESS:
1937 		desc_ioc_state = "success";
1938 		break;
1939 	case MPI2_IOCSTATUS_INVALID_FUNCTION:
1940 		desc_ioc_state = "invalid function";
1941 		break;
1942 	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
1943 		desc_ioc_state = "scsi recovered error";
1944 		break;
1945 	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
1946 		desc_ioc_state = "scsi invalid dev handle";
1947 		break;
1948 	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
1949 		desc_ioc_state = "scsi device not there";
1950 		break;
1951 	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
1952 		desc_ioc_state = "scsi data overrun";
1953 		break;
1954 	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
1955 		desc_ioc_state = "scsi data underrun";
1956 		break;
1957 	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
1958 		desc_ioc_state = "scsi io data error";
1959 		break;
1960 	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
1961 		desc_ioc_state = "scsi protocol error";
1962 		break;
1963 	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
1964 		desc_ioc_state = "scsi task terminated";
1965 		break;
1966 	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
1967 		desc_ioc_state = "scsi residual mismatch";
1968 		break;
1969 	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
1970 		desc_ioc_state = "scsi task mgmt failed";
1971 		break;
1972 	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
1973 		desc_ioc_state = "scsi ioc terminated";
1974 		break;
1975 	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
1976 		desc_ioc_state = "scsi ext terminated";
1977 		break;
1978 	case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
1979 		desc_ioc_state = "eedp guard error";
1980 		break;
1981 	case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
1982 		desc_ioc_state = "eedp ref tag error";
1983 		break;
1984 	case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
1985 		desc_ioc_state = "eedp app tag error";
1986 		break;
1987 	default:
1988 		desc_ioc_state = "unknown";
1989 		break;
1990 	}
1991 
1992 	switch (scsi_status) {
1993 	case MPI2_SCSI_STATUS_GOOD:
1994 		desc_scsi_status = "good";
1995 		break;
1996 	case MPI2_SCSI_STATUS_CHECK_CONDITION:
1997 		desc_scsi_status = "check condition";
1998 		break;
1999 	case MPI2_SCSI_STATUS_CONDITION_MET:
2000 		desc_scsi_status = "condition met";
2001 		break;
2002 	case MPI2_SCSI_STATUS_BUSY:
2003 		desc_scsi_status = "busy";
2004 		break;
2005 	case MPI2_SCSI_STATUS_INTERMEDIATE:
2006 		desc_scsi_status = "intermediate";
2007 		break;
2008 	case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET:
2009 		desc_scsi_status = "intermediate condmet";
2010 		break;
2011 	case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
2012 		desc_scsi_status = "reservation conflict";
2013 		break;
2014 	case MPI2_SCSI_STATUS_COMMAND_TERMINATED:
2015 		desc_scsi_status = "command terminated";
2016 		break;
2017 	case MPI2_SCSI_STATUS_TASK_SET_FULL:
2018 		desc_scsi_status = "task set full";
2019 		break;
2020 	case MPI2_SCSI_STATUS_ACA_ACTIVE:
2021 		desc_scsi_status = "aca active";
2022 		break;
2023 	case MPI2_SCSI_STATUS_TASK_ABORTED:
2024 		desc_scsi_status = "task aborted";
2025 		break;
2026 	default:
2027 		desc_scsi_status = "unknown";
2028 		break;
2029 	}
2030 
2031 	desc_scsi_state[0] = '\0';
2032 	if (!scsi_state)
2033 		desc_scsi_state = " ";
2034 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
2035 		strcat(desc_scsi_state, "response info ");
2036 	if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
2037 		strcat(desc_scsi_state, "state terminated ");
2038 	if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS)
2039 		strcat(desc_scsi_state, "no status ");
2040 	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED)
2041 		strcat(desc_scsi_state, "autosense failed ");
2042 	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID)
2043 		strcat(desc_scsi_state, "autosense valid ");
2044 
2045 	mpr_dprint(sc, MPR_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n",
2046 	    le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status);
2047 	if (targ->encl_level_valid) {
2048 		mpr_dprint(sc, MPR_XINFO, "At enclosure level %d, slot %d, "
2049 		    "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
2050 		    targ->connector_name);
2051 	}
2052 	/* We can add more detail about underflow data here
2053 	 * TO-DO
2054 	 * */
2055 	mpr_dprint(sc, MPR_XINFO, "\tscsi_status(%s)(0x%02x), "
2056 	    "scsi_state(%s)(0x%02x)\n", desc_scsi_status, scsi_status,
2057 	    desc_scsi_state, scsi_state);
2058 
2059 	if (sc->mpr_debug & MPR_XINFO &&
2060 		scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2061 		mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : Start :\n");
2062 		scsi_sense_print(csio);
2063 		mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : End :\n");
2064 	}
2065 
2066 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
2067 		response_info = le32toh(mpi_reply->ResponseInfo);
2068 		response_bytes = (u8 *)&response_info;
2069 		mpr_response_code(sc,response_bytes[0]);
2070 	}
2071 }
2072 
2073 static void
2074 mprsas_scsiio_complete(struct mpr_softc *sc, struct mpr_command *cm)
2075 {
2076 	MPI2_SCSI_IO_REPLY *rep;
2077 	union ccb *ccb;
2078 	struct ccb_scsiio *csio;
2079 	struct mprsas_softc *sassc;
2080 	struct scsi_vpd_supported_page_list *vpd_list = NULL;
2081 	u8 *TLR_bits, TLR_on;
2082 	int dir = 0, i;
2083 	u16 alloc_len;
2084 
2085 	MPR_FUNCTRACE(sc);
2086 	mpr_dprint(sc, MPR_TRACE,
2087 	    "cm %p SMID %u ccb %p reply %p outstanding %u\n", cm,
2088 	    cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
2089 	    cm->cm_targ->outstanding);
2090 
2091 	callout_stop(&cm->cm_callout);
2092 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
2093 
2094 	sassc = sc->sassc;
2095 	ccb = cm->cm_complete_data;
2096 	csio = &ccb->csio;
2097 	rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
2098 	/*
2099 	 * XXX KDM if the chain allocation fails, does it matter if we do
2100 	 * the sync and unload here?  It is simpler to do it in every case,
2101 	 * assuming it doesn't cause problems.
2102 	 */
2103 	if (cm->cm_data != NULL) {
2104 		if (cm->cm_flags & MPR_CM_FLAGS_DATAIN)
2105 			dir = BUS_DMASYNC_POSTREAD;
2106 		else if (cm->cm_flags & MPR_CM_FLAGS_DATAOUT)
2107 			dir = BUS_DMASYNC_POSTWRITE;
2108 		bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
2109 		bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2110 	}
2111 
2112 	cm->cm_targ->completed++;
2113 	cm->cm_targ->outstanding--;
2114 	TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
2115 	ccb->ccb_h.status &= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED);
2116 
2117 	if (cm->cm_state == MPR_CM_STATE_TIMEDOUT) {
2118 		TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
2119 		if (cm->cm_reply != NULL)
2120 			mprsas_log_command(cm, MPR_RECOVERY,
2121 			    "completed timedout cm %p ccb %p during recovery "
2122 			    "ioc %x scsi %x state %x xfer %u\n", cm, cm->cm_ccb,
2123 			    le16toh(rep->IOCStatus), rep->SCSIStatus,
2124 			    rep->SCSIState, le32toh(rep->TransferCount));
2125 		else
2126 			mprsas_log_command(cm, MPR_RECOVERY,
2127 			    "completed timedout cm %p ccb %p during recovery\n",
2128 			    cm, cm->cm_ccb);
2129 	} else if (cm->cm_targ->tm != NULL) {
2130 		if (cm->cm_reply != NULL)
2131 			mprsas_log_command(cm, MPR_RECOVERY,
2132 			    "completed cm %p ccb %p during recovery "
2133 			    "ioc %x scsi %x state %x xfer %u\n",
2134 			    cm, cm->cm_ccb, le16toh(rep->IOCStatus),
2135 			    rep->SCSIStatus, rep->SCSIState,
2136 			    le32toh(rep->TransferCount));
2137 		else
2138 			mprsas_log_command(cm, MPR_RECOVERY,
2139 			    "completed cm %p ccb %p during recovery\n",
2140 			    cm, cm->cm_ccb);
2141 	} else if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
2142 		mprsas_log_command(cm, MPR_RECOVERY,
2143 		    "reset completed cm %p ccb %p\n", cm, cm->cm_ccb);
2144 	}
2145 
2146 	if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
2147 		/*
2148 		 * We ran into an error after we tried to map the command,
2149 		 * so we're getting a callback without queueing the command
2150 		 * to the hardware.  So we set the status here, and it will
2151 		 * be retained below.  We'll go through the "fast path",
2152 		 * because there can be no reply when we haven't actually
2153 		 * gone out to the hardware.
2154 		 */
2155 		ccb->ccb_h.status = CAM_REQUEUE_REQ;
2156 
2157 		/*
2158 		 * Currently the only error included in the mask is
2159 		 * MPR_CM_FLAGS_CHAIN_FAILED, which means we're out of
2160 		 * chain frames.  We need to freeze the queue until we get
2161 		 * a command that completed without this error, which will
2162 		 * hopefully have some chain frames attached that we can
2163 		 * use.  If we wanted to get smarter about it, we would
2164 		 * only unfreeze the queue in this condition when we're
2165 		 * sure that we're getting some chain frames back.  That's
2166 		 * probably unnecessary.
2167 		 */
2168 		if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) {
2169 			xpt_freeze_simq(sassc->sim, 1);
2170 			sassc->flags |= MPRSAS_QUEUE_FROZEN;
2171 			mpr_dprint(sc, MPR_INFO, "Error sending command, "
2172 				   "freezing SIM queue\n");
2173 		}
2174 	}
2175 
2176 	/*
2177 	 * If this is a Start Stop Unit command and it was issued by the driver
2178 	 * during shutdown, decrement the refcount to account for all of the
2179 	 * commands that were sent.  All SSU commands should be completed before
2180 	 * shutdown completes, meaning SSU_refcount will be 0 after SSU_started
2181 	 * is TRUE.
2182 	 */
2183 	if (sc->SSU_started && (csio->cdb_io.cdb_bytes[0] == START_STOP_UNIT)) {
2184 		mpr_dprint(sc, MPR_INFO, "Decrementing SSU count.\n");
2185 		sc->SSU_refcount--;
2186 	}
2187 
2188 	/* Take the fast path to completion */
2189 	if (cm->cm_reply == NULL) {
2190 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
2191 			if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0)
2192 				ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
2193 			else {
2194 				ccb->ccb_h.status = CAM_REQ_CMP;
2195 				ccb->csio.scsi_status = SCSI_STATUS_OK;
2196 			}
2197 			if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
2198 				ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2199 				sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
2200 				mpr_dprint(sc, MPR_XINFO,
2201 				    "Unfreezing SIM queue\n");
2202 			}
2203 		}
2204 
2205 		/*
2206 		 * There are two scenarios where the status won't be
2207 		 * CAM_REQ_CMP.  The first is if MPR_CM_FLAGS_ERROR_MASK is
2208 		 * set, the second is in the MPR_FLAGS_DIAGRESET above.
2209 		 */
2210 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2211 			/*
2212 			 * Freeze the dev queue so that commands are
2213 			 * executed in the correct order with after error
2214 			 * recovery.
2215 			 */
2216 			ccb->ccb_h.status |= CAM_DEV_QFRZN;
2217 			xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2218 		}
2219 		mpr_free_command(sc, cm);
2220 		xpt_done(ccb);
2221 		return;
2222 	}
2223 
2224 	mprsas_log_command(cm, MPR_XINFO,
2225 	    "ioc %x scsi %x state %x xfer %u\n",
2226 	    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2227 	    le32toh(rep->TransferCount));
2228 
2229 	switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) {
2230 	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2231 		csio->resid = cm->cm_length - le32toh(rep->TransferCount);
2232 		/* FALLTHROUGH */
2233 	case MPI2_IOCSTATUS_SUCCESS:
2234 	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2235 
2236 		if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
2237 		    MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
2238 			mprsas_log_command(cm, MPR_XINFO, "recovered error\n");
2239 
2240 		/* Completion failed at the transport level. */
2241 		if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
2242 		    MPI2_SCSI_STATE_TERMINATED)) {
2243 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2244 			break;
2245 		}
2246 
2247 		/* In a modern packetized environment, an autosense failure
2248 		 * implies that there's not much else that can be done to
2249 		 * recover the command.
2250 		 */
2251 		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
2252 			ccb->ccb_h.status = CAM_AUTOSENSE_FAIL;
2253 			break;
2254 		}
2255 
2256 		/*
2257 		 * CAM doesn't care about SAS Response Info data, but if this is
2258 		 * the state check if TLR should be done.  If not, clear the
2259 		 * TLR_bits for the target.
2260 		 */
2261 		if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
2262 		    ((le32toh(rep->ResponseInfo) & MPI2_SCSI_RI_MASK_REASONCODE)
2263 		    == MPR_SCSI_RI_INVALID_FRAME)) {
2264 			sc->mapping_table[csio->ccb_h.target_id].TLR_bits =
2265 			    (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2266 		}
2267 
2268 		/*
2269 		 * Intentionally override the normal SCSI status reporting
2270 		 * for these two cases.  These are likely to happen in a
2271 		 * multi-initiator environment, and we want to make sure that
2272 		 * CAM retries these commands rather than fail them.
2273 		 */
2274 		if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
2275 		    (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
2276 			ccb->ccb_h.status = CAM_REQ_ABORTED;
2277 			break;
2278 		}
2279 
2280 		/* Handle normal status and sense */
2281 		csio->scsi_status = rep->SCSIStatus;
2282 		if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
2283 			ccb->ccb_h.status = CAM_REQ_CMP;
2284 		else
2285 			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
2286 
2287 		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2288 			int sense_len, returned_sense_len;
2289 
2290 			returned_sense_len = min(le32toh(rep->SenseCount),
2291 			    sizeof(struct scsi_sense_data));
2292 			if (returned_sense_len < csio->sense_len)
2293 				csio->sense_resid = csio->sense_len -
2294 				    returned_sense_len;
2295 			else
2296 				csio->sense_resid = 0;
2297 
2298 			sense_len = min(returned_sense_len,
2299 			    csio->sense_len - csio->sense_resid);
2300 			bzero(&csio->sense_data, sizeof(csio->sense_data));
2301 			bcopy(cm->cm_sense, &csio->sense_data, sense_len);
2302 			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2303 		}
2304 
2305 		/*
2306 		 * Check if this is an INQUIRY command.  If it's a VPD inquiry,
2307 		 * and it's page code 0 (Supported Page List), and there is
2308 		 * inquiry data, and this is for a sequential access device, and
2309 		 * the device is an SSP target, and TLR is supported by the
2310 		 * controller, turn the TLR_bits value ON if page 0x90 is
2311 		 * supported.
2312 		 */
2313 		if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
2314 		    (csio->cdb_io.cdb_bytes[1] & SI_EVPD) &&
2315 		    (csio->cdb_io.cdb_bytes[2] == SVPD_SUPPORTED_PAGE_LIST) &&
2316 		    ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) &&
2317 		    (csio->data_ptr != NULL) && (((uint8_t *)cm->cm_data)[0] ==
2318 		    T_SEQUENTIAL) && (sc->control_TLR) &&
2319 		    (sc->mapping_table[csio->ccb_h.target_id].device_info &
2320 		    MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
2321 			vpd_list = (struct scsi_vpd_supported_page_list *)
2322 			    csio->data_ptr;
2323 			TLR_bits = &sc->mapping_table[csio->ccb_h.target_id].
2324 			    TLR_bits;
2325 			*TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2326 			TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
2327 			alloc_len = ((u16)csio->cdb_io.cdb_bytes[3] << 8) +
2328 			    csio->cdb_io.cdb_bytes[4];
2329 			for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
2330 				if (vpd_list->list[i] == 0x90) {
2331 					*TLR_bits = TLR_on;
2332 					break;
2333 				}
2334 			}
2335 		}
2336 		break;
2337 	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2338 	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2339 		/*
2340 		 * If devinfo is 0 this will be a volume.  In that case don't
2341 		 * tell CAM that the volume is not there.  We want volumes to
2342 		 * be enumerated until they are deleted/removed, not just
2343 		 * failed.
2344 		 */
2345 		if (cm->cm_targ->devinfo == 0)
2346 			ccb->ccb_h.status = CAM_REQ_CMP;
2347 		else
2348 			ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2349 		break;
2350 	case MPI2_IOCSTATUS_INVALID_SGL:
2351 		mpr_print_scsiio_cmd(sc, cm);
2352 		ccb->ccb_h.status = CAM_UNREC_HBA_ERROR;
2353 		break;
2354 	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2355 		/*
2356 		 * This is one of the responses that comes back when an I/O
2357 		 * has been aborted.  If it is because of a timeout that we
2358 		 * initiated, just set the status to CAM_CMD_TIMEOUT.
2359 		 * Otherwise set it to CAM_REQ_ABORTED.  The effect on the
2360 		 * command is the same (it gets retried, subject to the
2361 		 * retry counter), the only difference is what gets printed
2362 		 * on the console.
2363 		 */
2364 		if (cm->cm_state == MPR_CM_STATE_TIMEDOUT)
2365 			ccb->ccb_h.status = CAM_CMD_TIMEOUT;
2366 		else
2367 			ccb->ccb_h.status = CAM_REQ_ABORTED;
2368 		break;
2369 	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2370 		/* resid is ignored for this condition */
2371 		csio->resid = 0;
2372 		ccb->ccb_h.status = CAM_DATA_RUN_ERR;
2373 		break;
2374 	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2375 	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2376 		/*
2377 		 * Since these are generally external (i.e. hopefully
2378 		 * transient transport-related) errors, retry these without
2379 		 * decrementing the retry count.
2380 		 */
2381 		ccb->ccb_h.status = CAM_REQUEUE_REQ;
2382 		mprsas_log_command(cm, MPR_INFO,
2383 		    "terminated ioc %x scsi %x state %x xfer %u\n",
2384 		    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2385 		    le32toh(rep->TransferCount));
2386 		break;
2387 	case MPI2_IOCSTATUS_INVALID_FUNCTION:
2388 	case MPI2_IOCSTATUS_INTERNAL_ERROR:
2389 	case MPI2_IOCSTATUS_INVALID_VPID:
2390 	case MPI2_IOCSTATUS_INVALID_FIELD:
2391 	case MPI2_IOCSTATUS_INVALID_STATE:
2392 	case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2393 	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2394 	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2395 	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2396 	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2397 	default:
2398 		mprsas_log_command(cm, MPR_XINFO,
2399 		    "completed ioc %x scsi %x state %x xfer %u\n",
2400 		    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2401 		    le32toh(rep->TransferCount));
2402 		csio->resid = cm->cm_length;
2403 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2404 		break;
2405 	}
2406 
2407 	mpr_sc_failed_io_info(sc, csio, rep, cm->cm_targ);
2408 
2409 	if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
2410 		ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2411 		sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
2412 		mpr_dprint(sc, MPR_XINFO, "Command completed, unfreezing SIM "
2413 		    "queue\n");
2414 	}
2415 
2416 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2417 		ccb->ccb_h.status |= CAM_DEV_QFRZN;
2418 		xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2419 	}
2420 
2421 	mpr_free_command(sc, cm);
2422 	xpt_done(ccb);
2423 }
2424 
2425 #if __FreeBSD_version >= 900026
2426 static void
2427 mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm)
2428 {
2429 	MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2430 	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2431 	uint64_t sasaddr;
2432 	union ccb *ccb;
2433 
2434 	ccb = cm->cm_complete_data;
2435 
2436 	/*
2437 	 * Currently there should be no way we can hit this case.  It only
2438 	 * happens when we have a failure to allocate chain frames, and SMP
2439 	 * commands require two S/G elements only.  That should be handled
2440 	 * in the standard request size.
2441 	 */
2442 	if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
2443 		mpr_dprint(sc, MPR_ERROR,"%s: cm_flags = %#x on SMP request!\n",
2444 		    __func__, cm->cm_flags);
2445 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2446 		goto bailout;
2447         }
2448 
2449 	rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
2450 	if (rpl == NULL) {
2451 		mpr_dprint(sc, MPR_ERROR, "%s: NULL cm_reply!\n", __func__);
2452 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2453 		goto bailout;
2454 	}
2455 
2456 	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2457 	sasaddr = le32toh(req->SASAddress.Low);
2458 	sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
2459 
2460 	if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
2461 	    MPI2_IOCSTATUS_SUCCESS ||
2462 	    rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
2463 		mpr_dprint(sc, MPR_XINFO, "%s: IOCStatus %04x SASStatus %02x\n",
2464 		    __func__, le16toh(rpl->IOCStatus), rpl->SASStatus);
2465 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2466 		goto bailout;
2467 	}
2468 
2469 	mpr_dprint(sc, MPR_XINFO, "%s: SMP request to SAS address "
2470 	    "%#jx completed successfully\n", __func__, (uintmax_t)sasaddr);
2471 
2472 	if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
2473 		ccb->ccb_h.status = CAM_REQ_CMP;
2474 	else
2475 		ccb->ccb_h.status = CAM_SMP_STATUS_ERROR;
2476 
2477 bailout:
2478 	/*
2479 	 * We sync in both directions because we had DMAs in the S/G list
2480 	 * in both directions.
2481 	 */
2482 	bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
2483 			BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2484 	bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2485 	mpr_free_command(sc, cm);
2486 	xpt_done(ccb);
2487 }
2488 
2489 static void
2490 mprsas_send_smpcmd(struct mprsas_softc *sassc, union ccb *ccb,
2491     uint64_t sasaddr)
2492 {
2493 	struct mpr_command *cm;
2494 	uint8_t *request, *response;
2495 	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2496 	struct mpr_softc *sc;
2497 	struct sglist *sg;
2498 	int error;
2499 
2500 	sc = sassc->sc;
2501 	sg = NULL;
2502 	error = 0;
2503 
2504 #if __FreeBSD_version >= 1000029
2505 	switch (ccb->ccb_h.flags & CAM_DATA_MASK) {
2506 	case CAM_DATA_PADDR:
2507 	case CAM_DATA_SG_PADDR:
2508 		/*
2509 		 * XXX We don't yet support physical addresses here.
2510 		 */
2511 		mpr_dprint(sc, MPR_ERROR, "%s: physical addresses not "
2512 		    "supported\n", __func__);
2513 		ccb->ccb_h.status = CAM_REQ_INVALID;
2514 		xpt_done(ccb);
2515 		return;
2516 	case CAM_DATA_SG:
2517 		/*
2518 		 * The chip does not support more than one buffer for the
2519 		 * request or response.
2520 		 */
2521 		if ((ccb->smpio.smp_request_sglist_cnt > 1)
2522 		    || (ccb->smpio.smp_response_sglist_cnt > 1)) {
2523 			mpr_dprint(sc, MPR_ERROR,
2524 			    "%s: multiple request or response buffer segments "
2525 			    "not supported for SMP\n", __func__);
2526 			ccb->ccb_h.status = CAM_REQ_INVALID;
2527 			xpt_done(ccb);
2528 			return;
2529 		}
2530 
2531 		/*
2532 		 * The CAM_SCATTER_VALID flag was originally implemented
2533 		 * for the XPT_SCSI_IO CCB, which only has one data pointer.
2534 		 * We have two.  So, just take that flag to mean that we
2535 		 * might have S/G lists, and look at the S/G segment count
2536 		 * to figure out whether that is the case for each individual
2537 		 * buffer.
2538 		 */
2539 		if (ccb->smpio.smp_request_sglist_cnt != 0) {
2540 			bus_dma_segment_t *req_sg;
2541 
2542 			req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
2543 			request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
2544 		} else
2545 			request = ccb->smpio.smp_request;
2546 
2547 		if (ccb->smpio.smp_response_sglist_cnt != 0) {
2548 			bus_dma_segment_t *rsp_sg;
2549 
2550 			rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
2551 			response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
2552 		} else
2553 			response = ccb->smpio.smp_response;
2554 		break;
2555 	case CAM_DATA_VADDR:
2556 		request = ccb->smpio.smp_request;
2557 		response = ccb->smpio.smp_response;
2558 		break;
2559 	default:
2560 		ccb->ccb_h.status = CAM_REQ_INVALID;
2561 		xpt_done(ccb);
2562 		return;
2563 	}
2564 #else //__FreeBSD_version < 1000029
2565 	/*
2566 	 * XXX We don't yet support physical addresses here.
2567 	 */
2568 	if (ccb->ccb_h.flags & (CAM_DATA_PHYS|CAM_SG_LIST_PHYS)) {
2569 		mpr_printf(sc, "%s: physical addresses not supported\n",
2570 			   __func__);
2571 		ccb->ccb_h.status = CAM_REQ_INVALID;
2572 		xpt_done(ccb);
2573 		return;
2574 	}
2575 
2576 	/*
2577 	 * If the user wants to send an S/G list, check to make sure they
2578 	 * have single buffers.
2579 	 */
2580 	if (ccb->ccb_h.flags & CAM_SCATTER_VALID) {
2581 		/*
2582 		 * The chip does not support more than one buffer for the
2583 		 * request or response.
2584 		 */
2585 	 	if ((ccb->smpio.smp_request_sglist_cnt > 1)
2586 		  || (ccb->smpio.smp_response_sglist_cnt > 1)) {
2587 			mpr_dprint(sc, MPR_ERROR, "%s: multiple request or "
2588 			    "response buffer segments not supported for SMP\n",
2589 			    __func__);
2590 			ccb->ccb_h.status = CAM_REQ_INVALID;
2591 			xpt_done(ccb);
2592 			return;
2593 		}
2594 
2595 		/*
2596 		 * The CAM_SCATTER_VALID flag was originally implemented
2597 		 * for the XPT_SCSI_IO CCB, which only has one data pointer.
2598 		 * We have two.  So, just take that flag to mean that we
2599 		 * might have S/G lists, and look at the S/G segment count
2600 		 * to figure out whether that is the case for each individual
2601 		 * buffer.
2602 		 */
2603 		if (ccb->smpio.smp_request_sglist_cnt != 0) {
2604 			bus_dma_segment_t *req_sg;
2605 
2606 			req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
2607 			request = (uint8_t *)req_sg[0].ds_addr;
2608 		} else
2609 			request = ccb->smpio.smp_request;
2610 
2611 		if (ccb->smpio.smp_response_sglist_cnt != 0) {
2612 			bus_dma_segment_t *rsp_sg;
2613 
2614 			rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
2615 			response = (uint8_t *)rsp_sg[0].ds_addr;
2616 		} else
2617 			response = ccb->smpio.smp_response;
2618 	} else {
2619 		request = ccb->smpio.smp_request;
2620 		response = ccb->smpio.smp_response;
2621 	}
2622 #endif //__FreeBSD_version >= 1000029
2623 
2624 	cm = mpr_alloc_command(sc);
2625 	if (cm == NULL) {
2626 		mpr_dprint(sc, MPR_ERROR,
2627 		    "%s: cannot allocate command\n", __func__);
2628 		ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2629 		xpt_done(ccb);
2630 		return;
2631 	}
2632 
2633 	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2634 	bzero(req, sizeof(*req));
2635 	req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
2636 
2637 	/* Allow the chip to use any route to this SAS address. */
2638 	req->PhysicalPort = 0xff;
2639 
2640 	req->RequestDataLength = htole16(ccb->smpio.smp_request_len);
2641 	req->SGLFlags =
2642 	    MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
2643 
2644 	mpr_dprint(sc, MPR_XINFO, "%s: sending SMP request to SAS address "
2645 	    "%#jx\n", __func__, (uintmax_t)sasaddr);
2646 
2647 	mpr_init_sge(cm, req, &req->SGL);
2648 
2649 	/*
2650 	 * Set up a uio to pass into mpr_map_command().  This allows us to
2651 	 * do one map command, and one busdma call in there.
2652 	 */
2653 	cm->cm_uio.uio_iov = cm->cm_iovec;
2654 	cm->cm_uio.uio_iovcnt = 2;
2655 	cm->cm_uio.uio_segflg = UIO_SYSSPACE;
2656 
2657 	/*
2658 	 * The read/write flag isn't used by busdma, but set it just in
2659 	 * case.  This isn't exactly accurate, either, since we're going in
2660 	 * both directions.
2661 	 */
2662 	cm->cm_uio.uio_rw = UIO_WRITE;
2663 
2664 	cm->cm_iovec[0].iov_base = request;
2665 	cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength);
2666 	cm->cm_iovec[1].iov_base = response;
2667 	cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
2668 
2669 	cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
2670 			       cm->cm_iovec[1].iov_len;
2671 
2672 	/*
2673 	 * Trigger a warning message in mpr_data_cb() for the user if we
2674 	 * wind up exceeding two S/G segments.  The chip expects one
2675 	 * segment for the request and another for the response.
2676 	 */
2677 	cm->cm_max_segs = 2;
2678 
2679 	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
2680 	cm->cm_complete = mprsas_smpio_complete;
2681 	cm->cm_complete_data = ccb;
2682 
2683 	/*
2684 	 * Tell the mapping code that we're using a uio, and that this is
2685 	 * an SMP passthrough request.  There is a little special-case
2686 	 * logic there (in mpr_data_cb()) to handle the bidirectional
2687 	 * transfer.
2688 	 */
2689 	cm->cm_flags |= MPR_CM_FLAGS_USE_UIO | MPR_CM_FLAGS_SMP_PASS |
2690 			MPR_CM_FLAGS_DATAIN | MPR_CM_FLAGS_DATAOUT;
2691 
2692 	/* The chip data format is little endian. */
2693 	req->SASAddress.High = htole32(sasaddr >> 32);
2694 	req->SASAddress.Low = htole32(sasaddr);
2695 
2696 	/*
2697 	 * XXX Note that we don't have a timeout/abort mechanism here.
2698 	 * From the manual, it looks like task management requests only
2699 	 * work for SCSI IO and SATA passthrough requests.  We may need to
2700 	 * have a mechanism to retry requests in the event of a chip reset
2701 	 * at least.  Hopefully the chip will insure that any errors short
2702 	 * of that are relayed back to the driver.
2703 	 */
2704 	error = mpr_map_command(sc, cm);
2705 	if ((error != 0) && (error != EINPROGRESS)) {
2706 		mpr_dprint(sc, MPR_ERROR, "%s: error %d returned from "
2707 		    "mpr_map_command()\n", __func__, error);
2708 		goto bailout_error;
2709 	}
2710 
2711 	return;
2712 
2713 bailout_error:
2714 	mpr_free_command(sc, cm);
2715 	ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2716 	xpt_done(ccb);
2717 	return;
2718 }
2719 
2720 static void
2721 mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb)
2722 {
2723 	struct mpr_softc *sc;
2724 	struct mprsas_target *targ;
2725 	uint64_t sasaddr = 0;
2726 
2727 	sc = sassc->sc;
2728 
2729 	/*
2730 	 * Make sure the target exists.
2731 	 */
2732 	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
2733 	    ("Target %d out of bounds in XPT_SMP_IO\n", ccb->ccb_h.target_id));
2734 	targ = &sassc->targets[ccb->ccb_h.target_id];
2735 	if (targ->handle == 0x0) {
2736 		mpr_dprint(sc, MPR_ERROR, "%s: target %d does not exist!\n",
2737 		    __func__, ccb->ccb_h.target_id);
2738 		ccb->ccb_h.status = CAM_SEL_TIMEOUT;
2739 		xpt_done(ccb);
2740 		return;
2741 	}
2742 
2743 	/*
2744 	 * If this device has an embedded SMP target, we'll talk to it
2745 	 * directly.
2746 	 * figure out what the expander's address is.
2747 	 */
2748 	if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
2749 		sasaddr = targ->sasaddr;
2750 
2751 	/*
2752 	 * If we don't have a SAS address for the expander yet, try
2753 	 * grabbing it from the page 0x83 information cached in the
2754 	 * transport layer for this target.  LSI expanders report the
2755 	 * expander SAS address as the port-associated SAS address in
2756 	 * Inquiry VPD page 0x83.  Maxim expanders don't report it in page
2757 	 * 0x83.
2758 	 *
2759 	 * XXX KDM disable this for now, but leave it commented out so that
2760 	 * it is obvious that this is another possible way to get the SAS
2761 	 * address.
2762 	 *
2763 	 * The parent handle method below is a little more reliable, and
2764 	 * the other benefit is that it works for devices other than SES
2765 	 * devices.  So you can send a SMP request to a da(4) device and it
2766 	 * will get routed to the expander that device is attached to.
2767 	 * (Assuming the da(4) device doesn't contain an SMP target...)
2768 	 */
2769 #if 0
2770 	if (sasaddr == 0)
2771 		sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
2772 #endif
2773 
2774 	/*
2775 	 * If we still don't have a SAS address for the expander, look for
2776 	 * the parent device of this device, which is probably the expander.
2777 	 */
2778 	if (sasaddr == 0) {
2779 #ifdef OLD_MPR_PROBE
2780 		struct mprsas_target *parent_target;
2781 #endif
2782 
2783 		if (targ->parent_handle == 0x0) {
2784 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have "
2785 			    "a valid parent handle!\n", __func__, targ->handle);
2786 			ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2787 			goto bailout;
2788 		}
2789 #ifdef OLD_MPR_PROBE
2790 		parent_target = mprsas_find_target_by_handle(sassc, 0,
2791 		    targ->parent_handle);
2792 
2793 		if (parent_target == NULL) {
2794 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have "
2795 			    "a valid parent target!\n", __func__, targ->handle);
2796 			ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2797 			goto bailout;
2798 		}
2799 
2800 		if ((parent_target->devinfo &
2801 		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
2802 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d "
2803 			    "does not have an SMP target!\n", __func__,
2804 			    targ->handle, parent_target->handle);
2805 			ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2806 			goto bailout;
2807 
2808 		}
2809 
2810 		sasaddr = parent_target->sasaddr;
2811 #else /* OLD_MPR_PROBE */
2812 		if ((targ->parent_devinfo &
2813 		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
2814 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d "
2815 			    "does not have an SMP target!\n", __func__,
2816 			    targ->handle, targ->parent_handle);
2817 			ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2818 			goto bailout;
2819 
2820 		}
2821 		if (targ->parent_sasaddr == 0x0) {
2822 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent handle "
2823 			    "%d does not have a valid SAS address!\n", __func__,
2824 			    targ->handle, targ->parent_handle);
2825 			ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2826 			goto bailout;
2827 		}
2828 
2829 		sasaddr = targ->parent_sasaddr;
2830 #endif /* OLD_MPR_PROBE */
2831 
2832 	}
2833 
2834 	if (sasaddr == 0) {
2835 		mpr_dprint(sc, MPR_INFO, "%s: unable to find SAS address for "
2836 		    "handle %d\n", __func__, targ->handle);
2837 		ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2838 		goto bailout;
2839 	}
2840 	mprsas_send_smpcmd(sassc, ccb, sasaddr);
2841 
2842 	return;
2843 
2844 bailout:
2845 	xpt_done(ccb);
2846 
2847 }
2848 #endif //__FreeBSD_version >= 900026
2849 
2850 static void
2851 mprsas_action_resetdev(struct mprsas_softc *sassc, union ccb *ccb)
2852 {
2853 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
2854 	struct mpr_softc *sc;
2855 	struct mpr_command *tm;
2856 	struct mprsas_target *targ;
2857 
2858 	MPR_FUNCTRACE(sassc->sc);
2859 	mtx_assert(&sassc->sc->mpr_mtx, MA_OWNED);
2860 
2861 	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
2862 	    ("Target %d out of bounds in XPT_RESET_DEV\n",
2863 	    ccb->ccb_h.target_id));
2864 	sc = sassc->sc;
2865 	tm = mpr_alloc_command(sc);
2866 	if (tm == NULL) {
2867 		mpr_dprint(sc, MPR_ERROR,
2868 		    "command alloc failure in mprsas_action_resetdev\n");
2869 		ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2870 		xpt_done(ccb);
2871 		return;
2872 	}
2873 
2874 	targ = &sassc->targets[ccb->ccb_h.target_id];
2875 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
2876 	req->DevHandle = htole16(targ->handle);
2877 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
2878 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
2879 
2880 	/* SAS Hard Link Reset / SATA Link Reset */
2881 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
2882 
2883 	tm->cm_data = NULL;
2884 	tm->cm_desc.HighPriority.RequestFlags =
2885 	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
2886 	tm->cm_complete = mprsas_resetdev_complete;
2887 	tm->cm_complete_data = ccb;
2888 	tm->cm_targ = targ;
2889 	mpr_map_command(sc, tm);
2890 }
2891 
2892 static void
2893 mprsas_resetdev_complete(struct mpr_softc *sc, struct mpr_command *tm)
2894 {
2895 	MPI2_SCSI_TASK_MANAGE_REPLY *resp;
2896 	union ccb *ccb;
2897 
2898 	MPR_FUNCTRACE(sc);
2899 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
2900 
2901 	resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
2902 	ccb = tm->cm_complete_data;
2903 
2904 	/*
2905 	 * Currently there should be no way we can hit this case.  It only
2906 	 * happens when we have a failure to allocate chain frames, and
2907 	 * task management commands don't have S/G lists.
2908 	 */
2909 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
2910 		MPI2_SCSI_TASK_MANAGE_REQUEST *req;
2911 
2912 		req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
2913 
2914 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for reset of "
2915 		    "handle %#04x! This should not happen!\n", __func__,
2916 		    tm->cm_flags, req->DevHandle);
2917 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2918 		goto bailout;
2919 	}
2920 
2921 	mpr_dprint(sc, MPR_XINFO,
2922 	    "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n", __func__,
2923 	    le16toh(resp->IOCStatus), le32toh(resp->ResponseCode));
2924 
2925 	if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
2926 		ccb->ccb_h.status = CAM_REQ_CMP;
2927 		mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
2928 		    CAM_LUN_WILDCARD);
2929 	}
2930 	else
2931 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2932 
2933 bailout:
2934 
2935 	mprsas_free_tm(sc, tm);
2936 	xpt_done(ccb);
2937 }
2938 
2939 static void
2940 mprsas_poll(struct cam_sim *sim)
2941 {
2942 	struct mprsas_softc *sassc;
2943 
2944 	sassc = cam_sim_softc(sim);
2945 
2946 	if (sassc->sc->mpr_debug & MPR_TRACE) {
2947 		/* frequent debug messages during a panic just slow
2948 		 * everything down too much.
2949 		 */
2950 		mpr_printf(sassc->sc, "%s clearing MPR_TRACE\n", __func__);
2951 		sassc->sc->mpr_debug &= ~MPR_TRACE;
2952 	}
2953 
2954 	mpr_intr_locked(sassc->sc);
2955 }
2956 
2957 static void
2958 mprsas_async(void *callback_arg, uint32_t code, struct cam_path *path,
2959     void *arg)
2960 {
2961 	struct mpr_softc *sc;
2962 
2963 	sc = (struct mpr_softc *)callback_arg;
2964 
2965 	switch (code) {
2966 #if (__FreeBSD_version >= 1000006) || \
2967     ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
2968 	case AC_ADVINFO_CHANGED: {
2969 		struct mprsas_target *target;
2970 		struct mprsas_softc *sassc;
2971 		struct scsi_read_capacity_data_long rcap_buf;
2972 		struct ccb_dev_advinfo cdai;
2973 		struct mprsas_lun *lun;
2974 		lun_id_t lunid;
2975 		int found_lun;
2976 		uintptr_t buftype;
2977 
2978 		buftype = (uintptr_t)arg;
2979 
2980 		found_lun = 0;
2981 		sassc = sc->sassc;
2982 
2983 		/*
2984 		 * We're only interested in read capacity data changes.
2985 		 */
2986 		if (buftype != CDAI_TYPE_RCAPLONG)
2987 			break;
2988 
2989 		/*
2990 		 * We should have a handle for this, but check to make sure.
2991 		 */
2992 		KASSERT(xpt_path_target_id(path) < sassc->maxtargets,
2993 		    ("Target %d out of bounds in mprsas_async\n",
2994 		    xpt_path_target_id(path)));
2995 		target = &sassc->targets[xpt_path_target_id(path)];
2996 		if (target->handle == 0)
2997 			break;
2998 
2999 		lunid = xpt_path_lun_id(path);
3000 
3001 		SLIST_FOREACH(lun, &target->luns, lun_link) {
3002 			if (lun->lun_id == lunid) {
3003 				found_lun = 1;
3004 				break;
3005 			}
3006 		}
3007 
3008 		if (found_lun == 0) {
3009 			lun = malloc(sizeof(struct mprsas_lun), M_MPR,
3010 			    M_NOWAIT | M_ZERO);
3011 			if (lun == NULL) {
3012 				mpr_dprint(sc, MPR_ERROR, "Unable to alloc "
3013 				    "LUN for EEDP support.\n");
3014 				break;
3015 			}
3016 			lun->lun_id = lunid;
3017 			SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3018 		}
3019 
3020 		bzero(&rcap_buf, sizeof(rcap_buf));
3021 		xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
3022 		cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
3023 		cdai.ccb_h.flags = CAM_DIR_IN;
3024 		cdai.buftype = CDAI_TYPE_RCAPLONG;
3025 		cdai.flags = 0;
3026 		cdai.bufsiz = sizeof(rcap_buf);
3027 		cdai.buf = (uint8_t *)&rcap_buf;
3028 		xpt_action((union ccb *)&cdai);
3029 		if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
3030 			cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE);
3031 
3032 		if (((cdai.ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP)
3033 		    && (rcap_buf.prot & SRC16_PROT_EN)) {
3034 			lun->eedp_formatted = TRUE;
3035 			lun->eedp_block_size = scsi_4btoul(rcap_buf.length);
3036 		} else {
3037 			lun->eedp_formatted = FALSE;
3038 			lun->eedp_block_size = 0;
3039 		}
3040 		break;
3041 	}
3042 #endif
3043 	case AC_FOUND_DEVICE: {
3044 		struct ccb_getdev *cgd;
3045 
3046 		cgd = arg;
3047 		mprsas_prepare_ssu(sc, path, cgd);
3048 #if (__FreeBSD_version < 901503) || \
3049     ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3050 		mprsas_check_eedp(sc, path, cgd);
3051 #endif
3052 		break;
3053 	}
3054 	default:
3055 		break;
3056 	}
3057 }
3058 
3059 static void
3060 mprsas_prepare_ssu(struct mpr_softc *sc, struct cam_path *path,
3061     struct ccb_getdev *cgd)
3062 {
3063 	struct mprsas_softc *sassc = sc->sassc;
3064 	path_id_t pathid;
3065 	target_id_t targetid;
3066 	lun_id_t lunid;
3067 	struct mprsas_target *target;
3068 	struct mprsas_lun *lun;
3069 	uint8_t	found_lun;
3070 
3071 	sassc = sc->sassc;
3072 	pathid = cam_sim_path(sassc->sim);
3073 	targetid = xpt_path_target_id(path);
3074 	lunid = xpt_path_lun_id(path);
3075 
3076 	KASSERT(targetid < sassc->maxtargets,
3077 	    ("Target %d out of bounds in mprsas_prepare_ssu\n", targetid));
3078 	target = &sassc->targets[targetid];
3079 	if (target->handle == 0x0)
3080 		return;
3081 
3082 	/*
3083 	 * If LUN is already in list, don't create a new one.
3084 	 */
3085 	found_lun = FALSE;
3086 	SLIST_FOREACH(lun, &target->luns, lun_link) {
3087 		if (lun->lun_id == lunid) {
3088 			found_lun = TRUE;
3089 			break;
3090 		}
3091 	}
3092 	if (!found_lun) {
3093 		lun = malloc(sizeof(struct mprsas_lun), M_MPR,
3094 		    M_NOWAIT | M_ZERO);
3095 		if (lun == NULL) {
3096 			mpr_dprint(sc, MPR_ERROR, "Unable to alloc LUN for "
3097 			    "preparing SSU.\n");
3098 			return;
3099 		}
3100 		lun->lun_id = lunid;
3101 		SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3102 	}
3103 
3104 	/*
3105 	 * If this is a SATA direct-access end device, mark it so that a SCSI
3106 	 * StartStopUnit command will be sent to it when the driver is being
3107 	 * shutdown.
3108 	 */
3109 	if (((cgd->inq_data.device & 0x1F) == T_DIRECT) &&
3110 	    (target->devinfo & MPI2_SAS_DEVICE_INFO_SATA_DEVICE) &&
3111 	    ((target->devinfo & MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) ==
3112 	    MPI2_SAS_DEVICE_INFO_END_DEVICE)) {
3113 		lun->stop_at_shutdown = TRUE;
3114 	}
3115 }
3116 
3117 #if (__FreeBSD_version < 901503) || \
3118     ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3119 static void
3120 mprsas_check_eedp(struct mpr_softc *sc, struct cam_path *path,
3121     struct ccb_getdev *cgd)
3122 {
3123 	struct mprsas_softc *sassc = sc->sassc;
3124 	struct ccb_scsiio *csio;
3125 	struct scsi_read_capacity_16 *scsi_cmd;
3126 	struct scsi_read_capacity_eedp *rcap_buf;
3127 	path_id_t pathid;
3128 	target_id_t targetid;
3129 	lun_id_t lunid;
3130 	union ccb *ccb;
3131 	struct cam_path *local_path;
3132 	struct mprsas_target *target;
3133 	struct mprsas_lun *lun;
3134 	uint8_t	found_lun;
3135 	char path_str[64];
3136 
3137 	sassc = sc->sassc;
3138 	pathid = cam_sim_path(sassc->sim);
3139 	targetid = xpt_path_target_id(path);
3140 	lunid = xpt_path_lun_id(path);
3141 
3142 	KASSERT(targetid < sassc->maxtargets,
3143 	    ("Target %d out of bounds in mprsas_check_eedp\n", targetid));
3144 	target = &sassc->targets[targetid];
3145 	if (target->handle == 0x0)
3146 		return;
3147 
3148 	/*
3149 	 * Determine if the device is EEDP capable.
3150 	 *
3151 	 * If this flag is set in the inquiry data, the device supports
3152 	 * protection information, and must support the 16 byte read capacity
3153 	 * command, otherwise continue without sending read cap 16
3154 	 */
3155 	if ((cgd->inq_data.spc3_flags & SPC3_SID_PROTECT) == 0)
3156 		return;
3157 
3158 	/*
3159 	 * Issue a READ CAPACITY 16 command.  This info is used to determine if
3160 	 * the LUN is formatted for EEDP support.
3161 	 */
3162 	ccb = xpt_alloc_ccb_nowait();
3163 	if (ccb == NULL) {
3164 		mpr_dprint(sc, MPR_ERROR, "Unable to alloc CCB for EEDP "
3165 		    "support.\n");
3166 		return;
3167 	}
3168 
3169 	if (xpt_create_path(&local_path, xpt_periph, pathid, targetid, lunid)
3170 	    != CAM_REQ_CMP) {
3171 		mpr_dprint(sc, MPR_ERROR, "Unable to create path for EEDP "
3172 		    "support\n");
3173 		xpt_free_ccb(ccb);
3174 		return;
3175 	}
3176 
3177 	/*
3178 	 * If LUN is already in list, don't create a new one.
3179 	 */
3180 	found_lun = FALSE;
3181 	SLIST_FOREACH(lun, &target->luns, lun_link) {
3182 		if (lun->lun_id == lunid) {
3183 			found_lun = TRUE;
3184 			break;
3185 		}
3186 	}
3187 	if (!found_lun) {
3188 		lun = malloc(sizeof(struct mprsas_lun), M_MPR,
3189 		    M_NOWAIT | M_ZERO);
3190 		if (lun == NULL) {
3191 			mpr_dprint(sc, MPR_ERROR, "Unable to alloc LUN for "
3192 			    "EEDP support.\n");
3193 			xpt_free_path(local_path);
3194 			xpt_free_ccb(ccb);
3195 			return;
3196 		}
3197 		lun->lun_id = lunid;
3198 		SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3199 	}
3200 
3201 	xpt_path_string(local_path, path_str, sizeof(path_str));
3202 	mpr_dprint(sc, MPR_INFO, "Sending read cap: path %s handle %d\n",
3203 	    path_str, target->handle);
3204 
3205 	/*
3206 	 * Issue a READ CAPACITY 16 command for the LUN.  The
3207 	 * mprsas_read_cap_done function will load the read cap info into the
3208 	 * LUN struct.
3209 	 */
3210 	rcap_buf = malloc(sizeof(struct scsi_read_capacity_eedp), M_MPR,
3211 	    M_NOWAIT | M_ZERO);
3212 	if (rcap_buf == NULL) {
3213 		mpr_dprint(sc, MPR_FAULT, "Unable to alloc read capacity "
3214 		    "buffer for EEDP support.\n");
3215 		xpt_free_path(ccb->ccb_h.path);
3216 		xpt_free_ccb(ccb);
3217 		return;
3218 	}
3219 	xpt_setup_ccb(&ccb->ccb_h, local_path, CAM_PRIORITY_XPT);
3220 	csio = &ccb->csio;
3221 	csio->ccb_h.func_code = XPT_SCSI_IO;
3222 	csio->ccb_h.flags = CAM_DIR_IN;
3223 	csio->ccb_h.retry_count = 4;
3224 	csio->ccb_h.cbfcnp = mprsas_read_cap_done;
3225 	csio->ccb_h.timeout = 60000;
3226 	csio->data_ptr = (uint8_t *)rcap_buf;
3227 	csio->dxfer_len = sizeof(struct scsi_read_capacity_eedp);
3228 	csio->sense_len = MPR_SENSE_LEN;
3229 	csio->cdb_len = sizeof(*scsi_cmd);
3230 	csio->tag_action = MSG_SIMPLE_Q_TAG;
3231 
3232 	scsi_cmd = (struct scsi_read_capacity_16 *)&csio->cdb_io.cdb_bytes;
3233 	bzero(scsi_cmd, sizeof(*scsi_cmd));
3234 	scsi_cmd->opcode = 0x9E;
3235 	scsi_cmd->service_action = SRC16_SERVICE_ACTION;
3236 	((uint8_t *)scsi_cmd)[13] = sizeof(struct scsi_read_capacity_eedp);
3237 
3238 	ccb->ccb_h.ppriv_ptr1 = sassc;
3239 	xpt_action(ccb);
3240 }
3241 
3242 static void
3243 mprsas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb)
3244 {
3245 	struct mprsas_softc *sassc;
3246 	struct mprsas_target *target;
3247 	struct mprsas_lun *lun;
3248 	struct scsi_read_capacity_eedp *rcap_buf;
3249 
3250 	if (done_ccb == NULL)
3251 		return;
3252 
3253 	/* Driver need to release devq, it Scsi command is
3254 	 * generated by driver internally.
3255 	 * Currently there is a single place where driver
3256 	 * calls scsi command internally. In future if driver
3257 	 * calls more scsi command internally, it needs to release
3258 	 * devq internally, since those command will not go back to
3259 	 * cam_periph.
3260 	 */
3261 	if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) ) {
3262         	done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
3263 		xpt_release_devq(done_ccb->ccb_h.path,
3264 			       	/*count*/ 1, /*run_queue*/TRUE);
3265 	}
3266 
3267 	rcap_buf = (struct scsi_read_capacity_eedp *)done_ccb->csio.data_ptr;
3268 
3269 	/*
3270 	 * Get the LUN ID for the path and look it up in the LUN list for the
3271 	 * target.
3272 	 */
3273 	sassc = (struct mprsas_softc *)done_ccb->ccb_h.ppriv_ptr1;
3274 	KASSERT(done_ccb->ccb_h.target_id < sassc->maxtargets,
3275 	    ("Target %d out of bounds in mprsas_read_cap_done\n",
3276 	    done_ccb->ccb_h.target_id));
3277 	target = &sassc->targets[done_ccb->ccb_h.target_id];
3278 	SLIST_FOREACH(lun, &target->luns, lun_link) {
3279 		if (lun->lun_id != done_ccb->ccb_h.target_lun)
3280 			continue;
3281 
3282 		/*
3283 		 * Got the LUN in the target's LUN list.  Fill it in with EEDP
3284 		 * info.  If the READ CAP 16 command had some SCSI error (common
3285 		 * if command is not supported), mark the lun as not supporting
3286 		 * EEDP and set the block size to 0.
3287 		 */
3288 		if (((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)
3289 		    || (done_ccb->csio.scsi_status != SCSI_STATUS_OK)) {
3290 			lun->eedp_formatted = FALSE;
3291 			lun->eedp_block_size = 0;
3292 			break;
3293 		}
3294 
3295 		if (rcap_buf->protect & 0x01) {
3296 			mpr_dprint(sassc->sc, MPR_INFO, "LUN %d for "
3297 			    "target ID %d is formatted for EEDP "
3298 			    "support.\n", done_ccb->ccb_h.target_lun,
3299 			    done_ccb->ccb_h.target_id);
3300 			lun->eedp_formatted = TRUE;
3301 			lun->eedp_block_size = scsi_4btoul(rcap_buf->length);
3302 		}
3303 		break;
3304 	}
3305 
3306 	// Finished with this CCB and path.
3307 	free(rcap_buf, M_MPR);
3308 	xpt_free_path(done_ccb->ccb_h.path);
3309 	xpt_free_ccb(done_ccb);
3310 }
3311 #endif /* (__FreeBSD_version < 901503) || \
3312           ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) */
3313 
3314 int
3315 mprsas_startup(struct mpr_softc *sc)
3316 {
3317 	/*
3318 	 * Send the port enable message and set the wait_for_port_enable flag.
3319 	 * This flag helps to keep the simq frozen until all discovery events
3320 	 * are processed.
3321 	 */
3322 	sc->wait_for_port_enable = 1;
3323 	mprsas_send_portenable(sc);
3324 	return (0);
3325 }
3326 
3327 static int
3328 mprsas_send_portenable(struct mpr_softc *sc)
3329 {
3330 	MPI2_PORT_ENABLE_REQUEST *request;
3331 	struct mpr_command *cm;
3332 
3333 	MPR_FUNCTRACE(sc);
3334 
3335 	if ((cm = mpr_alloc_command(sc)) == NULL)
3336 		return (EBUSY);
3337 	request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3338 	request->Function = MPI2_FUNCTION_PORT_ENABLE;
3339 	request->MsgFlags = 0;
3340 	request->VP_ID = 0;
3341 	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3342 	cm->cm_complete = mprsas_portenable_complete;
3343 	cm->cm_data = NULL;
3344 	cm->cm_sge = NULL;
3345 
3346 	mpr_map_command(sc, cm);
3347 	mpr_dprint(sc, MPR_XINFO,
3348 	    "mpr_send_portenable finished cm %p req %p complete %p\n",
3349 	    cm, cm->cm_req, cm->cm_complete);
3350 	return (0);
3351 }
3352 
3353 static void
3354 mprsas_portenable_complete(struct mpr_softc *sc, struct mpr_command *cm)
3355 {
3356 	MPI2_PORT_ENABLE_REPLY *reply;
3357 	struct mprsas_softc *sassc;
3358 
3359 	MPR_FUNCTRACE(sc);
3360 	sassc = sc->sassc;
3361 
3362 	/*
3363 	 * Currently there should be no way we can hit this case.  It only
3364 	 * happens when we have a failure to allocate chain frames, and
3365 	 * port enable commands don't have S/G lists.
3366 	 */
3367 	if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
3368 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for port enable! "
3369 		    "This should not happen!\n", __func__, cm->cm_flags);
3370 	}
3371 
3372 	reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3373 	if (reply == NULL)
3374 		mpr_dprint(sc, MPR_FAULT, "Portenable NULL reply\n");
3375 	else if (le16toh(reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
3376 	    MPI2_IOCSTATUS_SUCCESS)
3377 		mpr_dprint(sc, MPR_FAULT, "Portenable failed\n");
3378 
3379 	mpr_free_command(sc, cm);
3380 	if (sc->mpr_ich.ich_arg != NULL) {
3381 		mpr_dprint(sc, MPR_XINFO, "disestablish config intrhook\n");
3382 		config_intrhook_disestablish(&sc->mpr_ich);
3383 		sc->mpr_ich.ich_arg = NULL;
3384 	}
3385 
3386 	/*
3387 	 * Done waiting for port enable to complete.  Decrement the refcount.
3388 	 * If refcount is 0, discovery is complete and a rescan of the bus can
3389 	 * take place.
3390 	 */
3391 	sc->wait_for_port_enable = 0;
3392 	sc->port_enable_complete = 1;
3393 	wakeup(&sc->port_enable_complete);
3394 	mprsas_startup_decrement(sassc);
3395 }
3396 
3397 int
3398 mprsas_check_id(struct mprsas_softc *sassc, int id)
3399 {
3400 	struct mpr_softc *sc = sassc->sc;
3401 	char *ids;
3402 	char *name;
3403 
3404 	ids = &sc->exclude_ids[0];
3405 	while((name = strsep(&ids, ",")) != NULL) {
3406 		if (name[0] == '\0')
3407 			continue;
3408 		if (strtol(name, NULL, 0) == (long)id)
3409 			return (1);
3410 	}
3411 
3412 	return (0);
3413 }
3414