xref: /freebsd/sys/dev/mpr/mpr_sas.c (revision cc3f4b99653c34ae64f8a1fddea370abefef680e)
1 /*-
2  * Copyright (c) 2009 Yahoo! Inc.
3  * Copyright (c) 2011-2014 LSI Corp.
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 /* Communications core for LSI MPT2 */
32 
33 /* TODO Move headers to mprvar */
34 #include <sys/types.h>
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/selinfo.h>
39 #include <sys/module.h>
40 #include <sys/bus.h>
41 #include <sys/conf.h>
42 #include <sys/bio.h>
43 #include <sys/malloc.h>
44 #include <sys/uio.h>
45 #include <sys/sysctl.h>
46 #include <sys/endian.h>
47 #include <sys/queue.h>
48 #include <sys/kthread.h>
49 #include <sys/taskqueue.h>
50 #include <sys/sbuf.h>
51 
52 #include <machine/bus.h>
53 #include <machine/resource.h>
54 #include <sys/rman.h>
55 
56 #include <machine/stdarg.h>
57 
58 #include <cam/cam.h>
59 #include <cam/cam_ccb.h>
60 #include <cam/cam_debug.h>
61 #include <cam/cam_sim.h>
62 #include <cam/cam_xpt_sim.h>
63 #include <cam/cam_xpt_periph.h>
64 #include <cam/cam_periph.h>
65 #include <cam/scsi/scsi_all.h>
66 #include <cam/scsi/scsi_message.h>
67 #if __FreeBSD_version >= 900026
68 #include <cam/scsi/smp_all.h>
69 #endif
70 
71 #include <dev/mpr/mpi/mpi2_type.h>
72 #include <dev/mpr/mpi/mpi2.h>
73 #include <dev/mpr/mpi/mpi2_ioc.h>
74 #include <dev/mpr/mpi/mpi2_sas.h>
75 #include <dev/mpr/mpi/mpi2_cnfg.h>
76 #include <dev/mpr/mpi/mpi2_init.h>
77 #include <dev/mpr/mpi/mpi2_tool.h>
78 #include <dev/mpr/mpr_ioctl.h>
79 #include <dev/mpr/mprvar.h>
80 #include <dev/mpr/mpr_table.h>
81 #include <dev/mpr/mpr_sas.h>
82 
83 #define MPRSAS_DISCOVERY_TIMEOUT	20
84 #define MPRSAS_MAX_DISCOVERY_TIMEOUTS	10 /* 200 seconds */
85 
86 /*
87  * static array to check SCSI OpCode for EEDP protection bits
88  */
89 #define	PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
90 #define	PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
91 #define	PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
92 static uint8_t op_code_prot[256] = {
93 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
94 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
95 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
96 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
97 	0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
98 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
99 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
100 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
101 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
102 	0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
103 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
104 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
105 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
106 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
107 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
108 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
109 };
110 
111 MALLOC_DEFINE(M_MPRSAS, "MPRSAS", "MPR SAS memory");
112 
113 static void mprsas_remove_device(struct mpr_softc *, struct mpr_command *);
114 static void mprsas_remove_complete(struct mpr_softc *, struct mpr_command *);
115 static void mprsas_action(struct cam_sim *sim, union ccb *ccb);
116 static void mprsas_poll(struct cam_sim *sim);
117 static void mprsas_scsiio_timeout(void *data);
118 static void mprsas_abort_complete(struct mpr_softc *sc,
119     struct mpr_command *cm);
120 static void mprsas_action_scsiio(struct mprsas_softc *, union ccb *);
121 static void mprsas_scsiio_complete(struct mpr_softc *, struct mpr_command *);
122 static void mprsas_action_resetdev(struct mprsas_softc *, union ccb *);
123 static void mprsas_resetdev_complete(struct mpr_softc *,
124     struct mpr_command *);
125 static int  mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm,
126     struct mpr_command *cm);
127 static int  mprsas_send_reset(struct mpr_softc *sc, struct mpr_command *tm,
128     uint8_t type);
129 static void mprsas_async(void *callback_arg, uint32_t code,
130     struct cam_path *path, void *arg);
131 static void mprsas_prepare_ssu(struct mpr_softc *sc, struct cam_path *path,
132     struct ccb_getdev *cgd);
133 #if (__FreeBSD_version < 901503) || \
134     ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
135 static void mprsas_check_eedp(struct mpr_softc *sc, struct cam_path *path,
136     struct ccb_getdev *cgd);
137 static void mprsas_read_cap_done(struct cam_periph *periph,
138     union ccb *done_ccb);
139 #endif
140 static int mprsas_send_portenable(struct mpr_softc *sc);
141 static void mprsas_portenable_complete(struct mpr_softc *sc,
142     struct mpr_command *cm);
143 
144 #if __FreeBSD_version >= 900026
145 static void
146 mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm);
147 static void mprsas_send_smpcmd(struct mprsas_softc *sassc,
148 	       	union ccb *ccb, uint64_t sasaddr);
149 static void
150 mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb);
151 #endif
152 
153 struct mprsas_target *
154 mprsas_find_target_by_handle(struct mprsas_softc *sassc, int start,
155     uint16_t handle)
156 {
157 	struct mprsas_target *target;
158 	int i;
159 
160 	for (i = start; i < sassc->maxtargets; i++) {
161 		target = &sassc->targets[i];
162 		if (target->handle == handle)
163 			return (target);
164 	}
165 
166 	return (NULL);
167 }
168 
169 /* we need to freeze the simq during attach and diag reset, to avoid failing
170  * commands before device handles have been found by discovery.  Since
171  * discovery involves reading config pages and possibly sending commands,
172  * discovery actions may continue even after we receive the end of discovery
173  * event, so refcount discovery actions instead of assuming we can unfreeze
174  * the simq when we get the event.
175  */
176 void
177 mprsas_startup_increment(struct mprsas_softc *sassc)
178 {
179 	MPR_FUNCTRACE(sassc->sc);
180 
181 	if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) {
182 		if (sassc->startup_refcount++ == 0) {
183 			/* just starting, freeze the simq */
184 			mpr_dprint(sassc->sc, MPR_INIT,
185 			    "%s freezing simq\n", __func__);
186 #if (__FreeBSD_version >= 1000039) || \
187     ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
188 			xpt_hold_boot();
189 #endif
190 			xpt_freeze_simq(sassc->sim, 1);
191 		}
192 		mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__,
193 		    sassc->startup_refcount);
194 	}
195 }
196 
197 void
198 mprsas_release_simq_reinit(struct mprsas_softc *sassc)
199 {
200 	if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
201 		sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
202 		xpt_release_simq(sassc->sim, 1);
203 		mpr_dprint(sassc->sc, MPR_INFO, "Unfreezing SIM queue\n");
204 	}
205 }
206 
207 void
208 mprsas_startup_decrement(struct mprsas_softc *sassc)
209 {
210 	MPR_FUNCTRACE(sassc->sc);
211 
212 	if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) {
213 		if (--sassc->startup_refcount == 0) {
214 			/* finished all discovery-related actions, release
215 			 * the simq and rescan for the latest topology.
216 			 */
217 			mpr_dprint(sassc->sc, MPR_INIT,
218 			    "%s releasing simq\n", __func__);
219 			sassc->flags &= ~MPRSAS_IN_STARTUP;
220 			xpt_release_simq(sassc->sim, 1);
221 #if (__FreeBSD_version >= 1000039) || \
222     ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
223 			xpt_release_boot();
224 #else
225 			mprsas_rescan_target(sassc->sc, NULL);
226 #endif
227 		}
228 		mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__,
229 		    sassc->startup_refcount);
230 	}
231 }
232 
233 /* LSI's firmware requires us to stop sending commands when we're doing task
234  * management, so refcount the TMs and keep the simq frozen when any are in
235  * use.
236  */
237 struct mpr_command *
238 mprsas_alloc_tm(struct mpr_softc *sc)
239 {
240 	struct mpr_command *tm;
241 
242 	MPR_FUNCTRACE(sc);
243 	tm = mpr_alloc_high_priority_command(sc);
244 	if (tm != NULL) {
245 		if (sc->sassc->tm_count++ == 0) {
246 			mpr_dprint(sc, MPR_RECOVERY,
247 			    "%s freezing simq\n", __func__);
248 			xpt_freeze_simq(sc->sassc->sim, 1);
249 		}
250 		mpr_dprint(sc, MPR_RECOVERY, "%s tm_count %u\n", __func__,
251 		    sc->sassc->tm_count);
252 	}
253 	return tm;
254 }
255 
256 void
257 mprsas_free_tm(struct mpr_softc *sc, struct mpr_command *tm)
258 {
259 	mpr_dprint(sc, MPR_TRACE, "%s", __func__);
260 	if (tm == NULL)
261 		return;
262 
263 	/* if there are no TMs in use, we can release the simq.  We use our
264 	 * own refcount so that it's easier for a diag reset to cleanup and
265 	 * release the simq.
266 	 */
267 	if (--sc->sassc->tm_count == 0) {
268 		mpr_dprint(sc, MPR_RECOVERY, "%s releasing simq\n", __func__);
269 		xpt_release_simq(sc->sassc->sim, 1);
270 	}
271 	mpr_dprint(sc, MPR_RECOVERY, "%s tm_count %u\n", __func__,
272 	    sc->sassc->tm_count);
273 
274 	mpr_free_high_priority_command(sc, tm);
275 }
276 
277 void
278 mprsas_rescan_target(struct mpr_softc *sc, struct mprsas_target *targ)
279 {
280 	struct mprsas_softc *sassc = sc->sassc;
281 	path_id_t pathid;
282 	target_id_t targetid;
283 	union ccb *ccb;
284 
285 	MPR_FUNCTRACE(sc);
286 	pathid = cam_sim_path(sassc->sim);
287 	if (targ == NULL)
288 		targetid = CAM_TARGET_WILDCARD;
289 	else
290 		targetid = targ - sassc->targets;
291 
292 	/*
293 	 * Allocate a CCB and schedule a rescan.
294 	 */
295 	ccb = xpt_alloc_ccb_nowait();
296 	if (ccb == NULL) {
297 		mpr_dprint(sc, MPR_ERROR, "unable to alloc CCB for rescan\n");
298 		return;
299 	}
300 
301 	if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid,
302 	    targetid, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
303 		mpr_dprint(sc, MPR_ERROR, "unable to create path for rescan\n");
304 		xpt_free_ccb(ccb);
305 		return;
306 	}
307 
308 	if (targetid == CAM_TARGET_WILDCARD)
309 		ccb->ccb_h.func_code = XPT_SCAN_BUS;
310 	else
311 		ccb->ccb_h.func_code = XPT_SCAN_TGT;
312 
313 	mpr_dprint(sc, MPR_TRACE, "%s targetid %u\n", __func__, targetid);
314 	xpt_rescan(ccb);
315 }
316 
317 static void
318 mprsas_log_command(struct mpr_command *cm, u_int level, const char *fmt, ...)
319 {
320 	struct sbuf sb;
321 	va_list ap;
322 	char str[192];
323 	char path_str[64];
324 
325 	if (cm == NULL)
326 		return;
327 
328 	/* No need to be in here if debugging isn't enabled */
329 	if ((cm->cm_sc->mpr_debug & level) == 0)
330 		return;
331 
332 	sbuf_new(&sb, str, sizeof(str), 0);
333 
334 	va_start(ap, fmt);
335 
336 	if (cm->cm_ccb != NULL) {
337 		xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
338 		    sizeof(path_str));
339 		sbuf_cat(&sb, path_str);
340 		if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
341 			scsi_command_string(&cm->cm_ccb->csio, &sb);
342 			sbuf_printf(&sb, "length %d ",
343 			    cm->cm_ccb->csio.dxfer_len);
344 		}
345 	} else {
346 		sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
347 		    cam_sim_name(cm->cm_sc->sassc->sim),
348 		    cam_sim_unit(cm->cm_sc->sassc->sim),
349 		    cam_sim_bus(cm->cm_sc->sassc->sim),
350 		    cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
351 		    cm->cm_lun);
352 	}
353 
354 	sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
355 	sbuf_vprintf(&sb, fmt, ap);
356 	sbuf_finish(&sb);
357 	mpr_dprint_field(cm->cm_sc, level, "%s", sbuf_data(&sb));
358 
359 	va_end(ap);
360 }
361 
362 static void
363 mprsas_remove_volume(struct mpr_softc *sc, struct mpr_command *tm)
364 {
365 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
366 	struct mprsas_target *targ;
367 	uint16_t handle;
368 
369 	MPR_FUNCTRACE(sc);
370 
371 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
372 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
373 	targ = tm->cm_targ;
374 
375 	if (reply == NULL) {
376 		/* XXX retry the remove after the diag reset completes? */
377 		mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device "
378 		    "0x%04x\n", __func__, handle);
379 		mprsas_free_tm(sc, tm);
380 		return;
381 	}
382 
383 	if (reply->IOCStatus != MPI2_IOCSTATUS_SUCCESS) {
384 		mpr_dprint(sc, MPR_FAULT, "IOCStatus = 0x%x while resetting "
385 		    "device 0x%x\n", reply->IOCStatus, handle);
386 		mprsas_free_tm(sc, tm);
387 		return;
388 	}
389 
390 	mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n",
391 	    reply->TerminationCount);
392 	mpr_free_reply(sc, tm->cm_reply_data);
393 	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
394 
395 	mpr_dprint(sc, MPR_XINFO, "clearing target %u handle 0x%04x\n",
396 	    targ->tid, handle);
397 
398 	/*
399 	 * Don't clear target if remove fails because things will get confusing.
400 	 * Leave the devname and sasaddr intact so that we know to avoid reusing
401 	 * this target id if possible, and so we can assign the same target id
402 	 * to this device if it comes back in the future.
403 	 */
404 	if (reply->IOCStatus == MPI2_IOCSTATUS_SUCCESS) {
405 		targ = tm->cm_targ;
406 		targ->handle = 0x0;
407 		targ->encl_handle = 0x0;
408 		targ->encl_level_valid = 0x0;
409 		targ->encl_level = 0x0;
410 		targ->connector_name[0] = ' ';
411 		targ->connector_name[1] = ' ';
412 		targ->connector_name[2] = ' ';
413 		targ->connector_name[3] = ' ';
414 		targ->encl_slot = 0x0;
415 		targ->exp_dev_handle = 0x0;
416 		targ->phy_num = 0x0;
417 		targ->linkrate = 0x0;
418 		targ->devinfo = 0x0;
419 		targ->flags = 0x0;
420 		targ->scsi_req_desc_type = 0;
421 	}
422 
423 	mprsas_free_tm(sc, tm);
424 }
425 
426 
427 /*
428  * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal.
429  * Otherwise Volume Delete is same as Bare Drive Removal.
430  */
431 void
432 mprsas_prepare_volume_remove(struct mprsas_softc *sassc, uint16_t handle)
433 {
434 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
435 	struct mpr_softc *sc;
436 	struct mpr_command *cm;
437 	struct mprsas_target *targ = NULL;
438 
439 	MPR_FUNCTRACE(sassc->sc);
440 	sc = sassc->sc;
441 
442 	targ = mprsas_find_target_by_handle(sassc, 0, handle);
443 	if (targ == NULL) {
444 		/* FIXME: what is the action? */
445 		/* We don't know about this device? */
446 		mpr_dprint(sc, MPR_ERROR,
447 		   "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
448 		return;
449 	}
450 
451 	targ->flags |= MPRSAS_TARGET_INREMOVAL;
452 
453 	cm = mprsas_alloc_tm(sc);
454 	if (cm == NULL) {
455 		mpr_dprint(sc, MPR_ERROR,
456 		    "%s: command alloc failure\n", __func__);
457 		return;
458 	}
459 
460 	mprsas_rescan_target(sc, targ);
461 
462 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
463 	req->DevHandle = targ->handle;
464 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
465 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
466 
467 	/* SAS Hard Link Reset / SATA Link Reset */
468 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
469 
470 	cm->cm_targ = targ;
471 	cm->cm_data = NULL;
472 	cm->cm_desc.HighPriority.RequestFlags =
473 	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
474 	cm->cm_complete = mprsas_remove_volume;
475 	cm->cm_complete_data = (void *)(uintptr_t)handle;
476 	mpr_map_command(sc, cm);
477 }
478 
479 /*
480  * The MPT2 firmware performs debounce on the link to avoid transient link
481  * errors and false removals.  When it does decide that link has been lost
482  * and a device needs to go away, it expects that the host will perform a
483  * target reset and then an op remove.  The reset has the side-effect of
484  * aborting any outstanding requests for the device, which is required for
485  * the op-remove to succeed.  It's not clear if the host should check for
486  * the device coming back alive after the reset.
487  */
488 void
489 mprsas_prepare_remove(struct mprsas_softc *sassc, uint16_t handle)
490 {
491 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
492 	struct mpr_softc *sc;
493 	struct mpr_command *cm;
494 	struct mprsas_target *targ = NULL;
495 
496 	MPR_FUNCTRACE(sassc->sc);
497 
498 	sc = sassc->sc;
499 
500 	targ = mprsas_find_target_by_handle(sassc, 0, handle);
501 	if (targ == NULL) {
502 		/* FIXME: what is the action? */
503 		/* We don't know about this device? */
504 		mpr_dprint(sc, MPR_ERROR, "%s : invalid handle 0x%x \n",
505 		    __func__, handle);
506 		return;
507 	}
508 
509 	targ->flags |= MPRSAS_TARGET_INREMOVAL;
510 
511 	cm = mprsas_alloc_tm(sc);
512 	if (cm == NULL) {
513 		mpr_dprint(sc, MPR_ERROR, "%s: command alloc failure\n",
514 		    __func__);
515 		return;
516 	}
517 
518 	mprsas_rescan_target(sc, targ);
519 
520 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
521 	memset(req, 0, sizeof(*req));
522 	req->DevHandle = htole16(targ->handle);
523 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
524 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
525 
526 	/* SAS Hard Link Reset / SATA Link Reset */
527 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
528 
529 	cm->cm_targ = targ;
530 	cm->cm_data = NULL;
531 	cm->cm_desc.HighPriority.RequestFlags =
532 	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
533 	cm->cm_complete = mprsas_remove_device;
534 	cm->cm_complete_data = (void *)(uintptr_t)handle;
535 	mpr_map_command(sc, cm);
536 }
537 
538 static void
539 mprsas_remove_device(struct mpr_softc *sc, struct mpr_command *tm)
540 {
541 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
542 	MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
543 	struct mprsas_target *targ;
544 	struct mpr_command *next_cm;
545 	uint16_t handle;
546 
547 	MPR_FUNCTRACE(sc);
548 
549 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
550 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
551 	targ = tm->cm_targ;
552 
553 	/*
554 	 * Currently there should be no way we can hit this case.  It only
555 	 * happens when we have a failure to allocate chain frames, and
556 	 * task management commands don't have S/G lists.
557 	 */
558 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
559 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for remove of "
560 		    "handle %#04x! This should not happen!\n", __func__,
561 		    tm->cm_flags, handle);
562 		mprsas_free_tm(sc, tm);
563 		return;
564 	}
565 
566 	if (reply == NULL) {
567 		/* XXX retry the remove after the diag reset completes? */
568 		mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device "
569 		    "0x%04x\n", __func__, handle);
570 		mprsas_free_tm(sc, tm);
571 		return;
572 	}
573 
574 	if (le16toh(reply->IOCStatus) != MPI2_IOCSTATUS_SUCCESS) {
575 		mpr_dprint(sc, MPR_FAULT, "IOCStatus = 0x%x while resetting "
576 		    "device 0x%x\n", le16toh(reply->IOCStatus), handle);
577 		mprsas_free_tm(sc, tm);
578 		return;
579 	}
580 
581 	mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n",
582 	    le32toh(reply->TerminationCount));
583 	mpr_free_reply(sc, tm->cm_reply_data);
584 	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
585 
586 	/* Reuse the existing command */
587 	req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
588 	memset(req, 0, sizeof(*req));
589 	req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
590 	req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
591 	req->DevHandle = htole16(handle);
592 	tm->cm_data = NULL;
593 	tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
594 	tm->cm_complete = mprsas_remove_complete;
595 	tm->cm_complete_data = (void *)(uintptr_t)handle;
596 
597 	mpr_map_command(sc, tm);
598 
599 	mpr_dprint(sc, MPR_XINFO, "clearing target %u handle 0x%04x\n",
600 	    targ->tid, handle);
601 	if (targ->encl_level_valid) {
602 		mpr_dprint(sc, MPR_XINFO, "At enclosure level %d, slot %d, "
603 		    "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
604 		    targ->connector_name);
605 	}
606 	TAILQ_FOREACH_SAFE(tm, &targ->commands, cm_link, next_cm) {
607 		union ccb *ccb;
608 
609 		mpr_dprint(sc, MPR_XINFO, "Completing missed command %p\n", tm);
610 		ccb = tm->cm_complete_data;
611 		ccb->ccb_h.status = CAM_DEV_NOT_THERE;
612 		mprsas_scsiio_complete(sc, tm);
613 	}
614 }
615 
616 static void
617 mprsas_remove_complete(struct mpr_softc *sc, struct mpr_command *tm)
618 {
619 	MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
620 	uint16_t handle;
621 	struct mprsas_target *targ;
622 	struct mprsas_lun *lun;
623 
624 	MPR_FUNCTRACE(sc);
625 
626 	reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
627 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
628 
629 	/*
630 	 * Currently there should be no way we can hit this case.  It only
631 	 * happens when we have a failure to allocate chain frames, and
632 	 * task management commands don't have S/G lists.
633 	 */
634 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
635 		mpr_dprint(sc, MPR_XINFO, "%s: cm_flags = %#x for remove of "
636 		    "handle %#04x! This should not happen!\n", __func__,
637 		    tm->cm_flags, handle);
638 		mprsas_free_tm(sc, tm);
639 		return;
640 	}
641 
642 	if (reply == NULL) {
643 		/* most likely a chip reset */
644 		mpr_dprint(sc, MPR_FAULT, "%s NULL reply removing device "
645 		    "0x%04x\n", __func__, handle);
646 		mprsas_free_tm(sc, tm);
647 		return;
648 	}
649 
650 	mpr_dprint(sc, MPR_XINFO, "%s on handle 0x%04x, IOCStatus= 0x%x\n",
651 	    __func__, handle, le16toh(reply->IOCStatus));
652 
653 	/*
654 	 * Don't clear target if remove fails because things will get confusing.
655 	 * Leave the devname and sasaddr intact so that we know to avoid reusing
656 	 * this target id if possible, and so we can assign the same target id
657 	 * to this device if it comes back in the future.
658 	 */
659 	if (le16toh(reply->IOCStatus) == MPI2_IOCSTATUS_SUCCESS) {
660 		targ = tm->cm_targ;
661 		targ->handle = 0x0;
662 		targ->encl_handle = 0x0;
663 		targ->encl_level_valid = 0x0;
664 		targ->encl_level = 0x0;
665 		targ->connector_name[0] = ' ';
666 		targ->connector_name[1] = ' ';
667 		targ->connector_name[2] = ' ';
668 		targ->connector_name[3] = ' ';
669 		targ->encl_slot = 0x0;
670 		targ->exp_dev_handle = 0x0;
671 		targ->phy_num = 0x0;
672 		targ->linkrate = 0x0;
673 		targ->devinfo = 0x0;
674 		targ->flags = 0x0;
675 		targ->scsi_req_desc_type = 0;
676 
677 		while (!SLIST_EMPTY(&targ->luns)) {
678 			lun = SLIST_FIRST(&targ->luns);
679 			SLIST_REMOVE_HEAD(&targ->luns, lun_link);
680 			free(lun, M_MPR);
681 		}
682 	}
683 
684 	mprsas_free_tm(sc, tm);
685 }
686 
687 static int
688 mprsas_register_events(struct mpr_softc *sc)
689 {
690 	uint8_t events[16];
691 
692 	bzero(events, 16);
693 	setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
694 	setbit(events, MPI2_EVENT_SAS_DISCOVERY);
695 	setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
696 	setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
697 	setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
698 	setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
699 	setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
700 	setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
701 	setbit(events, MPI2_EVENT_IR_VOLUME);
702 	setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
703 	setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
704 	setbit(events, MPI2_EVENT_TEMP_THRESHOLD);
705 
706 	mpr_register_events(sc, events, mprsas_evt_handler, NULL,
707 	    &sc->sassc->mprsas_eh);
708 
709 	return (0);
710 }
711 
712 int
713 mpr_attach_sas(struct mpr_softc *sc)
714 {
715 	struct mprsas_softc *sassc;
716 	cam_status status;
717 	int unit, error = 0;
718 
719 	MPR_FUNCTRACE(sc);
720 
721 	sassc = malloc(sizeof(struct mprsas_softc), M_MPR, M_WAITOK|M_ZERO);
722 	if (!sassc) {
723 		device_printf(sc->mpr_dev, "Cannot allocate memory %s %d\n",
724 		    __func__, __LINE__);
725 		return (ENOMEM);
726 	}
727 
728 	/*
729 	 * XXX MaxTargets could change during a reinit.  since we don't
730 	 * resize the targets[] array during such an event, cache the value
731 	 * of MaxTargets here so that we don't get into trouble later.  This
732 	 * should move into the reinit logic.
733 	 */
734 	sassc->maxtargets = sc->facts->MaxTargets;
735 	sassc->targets = malloc(sizeof(struct mprsas_target) *
736 	    sassc->maxtargets, M_MPR, M_WAITOK|M_ZERO);
737 	if (!sassc->targets) {
738 		device_printf(sc->mpr_dev, "Cannot allocate memory %s %d\n",
739 		    __func__, __LINE__);
740 		free(sassc, M_MPR);
741 		return (ENOMEM);
742 	}
743 	sc->sassc = sassc;
744 	sassc->sc = sc;
745 
746 	if ((sassc->devq = cam_simq_alloc(sc->num_reqs)) == NULL) {
747 		mpr_dprint(sc, MPR_ERROR, "Cannot allocate SIMQ\n");
748 		error = ENOMEM;
749 		goto out;
750 	}
751 
752 	unit = device_get_unit(sc->mpr_dev);
753 	sassc->sim = cam_sim_alloc(mprsas_action, mprsas_poll, "mpr", sassc,
754 	    unit, &sc->mpr_mtx, sc->num_reqs, sc->num_reqs, sassc->devq);
755 	if (sassc->sim == NULL) {
756 		mpr_dprint(sc, MPR_ERROR, "Cannot allocate SIM\n");
757 		error = EINVAL;
758 		goto out;
759 	}
760 
761 	TAILQ_INIT(&sassc->ev_queue);
762 
763 	/* Initialize taskqueue for Event Handling */
764 	TASK_INIT(&sassc->ev_task, 0, mprsas_firmware_event_work, sc);
765 	sassc->ev_tq = taskqueue_create("mpr_taskq", M_NOWAIT | M_ZERO,
766 	    taskqueue_thread_enqueue, &sassc->ev_tq);
767 
768 	/* Run the task queue with lowest priority */
769 	taskqueue_start_threads(&sassc->ev_tq, 1, 255, "%s taskq",
770 	    device_get_nameunit(sc->mpr_dev));
771 
772 	mpr_lock(sc);
773 
774 	/*
775 	 * XXX There should be a bus for every port on the adapter, but since
776 	 * we're just going to fake the topology for now, we'll pretend that
777 	 * everything is just a target on a single bus.
778 	 */
779 	if ((error = xpt_bus_register(sassc->sim, sc->mpr_dev, 0)) != 0) {
780 		mpr_dprint(sc, MPR_ERROR, "Error %d registering SCSI bus\n",
781 		    error);
782 		mpr_unlock(sc);
783 		goto out;
784 	}
785 
786 	/*
787 	 * Assume that discovery events will start right away.  Freezing
788 	 *
789 	 * Hold off boot until discovery is complete.
790 	 */
791 	sassc->flags |= MPRSAS_IN_STARTUP | MPRSAS_IN_DISCOVERY;
792 	sc->sassc->startup_refcount = 0;
793 	mprsas_startup_increment(sassc);
794 
795 	callout_init(&sassc->discovery_callout, 1 /*mprafe*/);
796 
797 	sassc->tm_count = 0;
798 
799 	/*
800 	 * Register for async events so we can determine the EEDP
801 	 * capabilities of devices.
802 	 */
803 	status = xpt_create_path(&sassc->path, /*periph*/NULL,
804 	    cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD,
805 	    CAM_LUN_WILDCARD);
806 	if (status != CAM_REQ_CMP) {
807 		mpr_printf(sc, "Error %#x creating sim path\n", status);
808 		sassc->path = NULL;
809 	} else {
810 		int event;
811 
812 #if (__FreeBSD_version >= 1000006) || \
813     ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
814 		event = AC_ADVINFO_CHANGED | AC_FOUND_DEVICE;
815 #else
816 		event = AC_FOUND_DEVICE;
817 #endif
818 
819 		/*
820 		 * Prior to the CAM locking improvements, we can't call
821 		 * xpt_register_async() with a particular path specified.
822 		 *
823 		 * If a path isn't specified, xpt_register_async() will
824 		 * generate a wildcard path and acquire the XPT lock while
825 		 * it calls xpt_action() to execute the XPT_SASYNC_CB CCB.
826 		 * It will then drop the XPT lock once that is done.
827 		 *
828 		 * If a path is specified for xpt_register_async(), it will
829 		 * not acquire and drop the XPT lock around the call to
830 		 * xpt_action().  xpt_action() asserts that the caller
831 		 * holds the SIM lock, so the SIM lock has to be held when
832 		 * calling xpt_register_async() when the path is specified.
833 		 *
834 		 * But xpt_register_async calls xpt_for_all_devices(),
835 		 * which calls xptbustraverse(), which will acquire each
836 		 * SIM lock.  When it traverses our particular bus, it will
837 		 * necessarily acquire the SIM lock, which will lead to a
838 		 * recursive lock acquisition.
839 		 *
840 		 * The CAM locking changes fix this problem by acquiring
841 		 * the XPT topology lock around bus traversal in
842 		 * xptbustraverse(), so the caller can hold the SIM lock
843 		 * and it does not cause a recursive lock acquisition.
844 		 *
845 		 * These __FreeBSD_version values are approximate, especially
846 		 * for stable/10, which is two months later than the actual
847 		 * change.
848 		 */
849 
850 #if (__FreeBSD_version < 1000703) || \
851     ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
852 		mpr_unlock(sc);
853 		status = xpt_register_async(event, mprsas_async, sc,
854 					    NULL);
855 		mpr_lock(sc);
856 #else
857 		status = xpt_register_async(event, mprsas_async, sc,
858 					    sassc->path);
859 #endif
860 
861 		if (status != CAM_REQ_CMP) {
862 			mpr_dprint(sc, MPR_ERROR,
863 			    "Error %#x registering async handler for "
864 			    "AC_ADVINFO_CHANGED events\n", status);
865 			xpt_free_path(sassc->path);
866 			sassc->path = NULL;
867 		}
868 	}
869 	if (status != CAM_REQ_CMP) {
870 		/*
871 		 * EEDP use is the exception, not the rule.
872 		 * Warn the user, but do not fail to attach.
873 		 */
874 		mpr_printf(sc, "EEDP capabilities disabled.\n");
875 	}
876 
877 	mpr_unlock(sc);
878 
879 	mprsas_register_events(sc);
880 out:
881 	if (error)
882 		mpr_detach_sas(sc);
883 	return (error);
884 }
885 
886 int
887 mpr_detach_sas(struct mpr_softc *sc)
888 {
889 	struct mprsas_softc *sassc;
890 	struct mprsas_lun *lun, *lun_tmp;
891 	struct mprsas_target *targ;
892 	int i;
893 
894 	MPR_FUNCTRACE(sc);
895 
896 	if (sc->sassc == NULL)
897 		return (0);
898 
899 	sassc = sc->sassc;
900 	mpr_deregister_events(sc, sassc->mprsas_eh);
901 
902 	/*
903 	 * Drain and free the event handling taskqueue with the lock
904 	 * unheld so that any parallel processing tasks drain properly
905 	 * without deadlocking.
906 	 */
907 	if (sassc->ev_tq != NULL)
908 		taskqueue_free(sassc->ev_tq);
909 
910 	/* Make sure CAM doesn't wedge if we had to bail out early. */
911 	mpr_lock(sc);
912 
913 	/* Deregister our async handler */
914 	if (sassc->path != NULL) {
915 		xpt_register_async(0, mprsas_async, sc, sassc->path);
916 		xpt_free_path(sassc->path);
917 		sassc->path = NULL;
918 	}
919 
920 	if (sassc->flags & MPRSAS_IN_STARTUP)
921 		xpt_release_simq(sassc->sim, 1);
922 
923 	if (sassc->sim != NULL) {
924 		xpt_bus_deregister(cam_sim_path(sassc->sim));
925 		cam_sim_free(sassc->sim, FALSE);
926 	}
927 
928 	sassc->flags |= MPRSAS_SHUTDOWN;
929 	mpr_unlock(sc);
930 
931 	if (sassc->devq != NULL)
932 		cam_simq_free(sassc->devq);
933 
934 	for (i = 0; i < sassc->maxtargets; i++) {
935 		targ = &sassc->targets[i];
936 		SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
937 			free(lun, M_MPR);
938 		}
939 	}
940 	free(sassc->targets, M_MPR);
941 	free(sassc, M_MPR);
942 	sc->sassc = NULL;
943 
944 	return (0);
945 }
946 
947 void
948 mprsas_discovery_end(struct mprsas_softc *sassc)
949 {
950 	struct mpr_softc *sc = sassc->sc;
951 
952 	MPR_FUNCTRACE(sc);
953 
954 	if (sassc->flags & MPRSAS_DISCOVERY_TIMEOUT_PENDING)
955 		callout_stop(&sassc->discovery_callout);
956 
957 }
958 
959 static void
960 mprsas_action(struct cam_sim *sim, union ccb *ccb)
961 {
962 	struct mprsas_softc *sassc;
963 
964 	sassc = cam_sim_softc(sim);
965 
966 	MPR_FUNCTRACE(sassc->sc);
967 	mpr_dprint(sassc->sc, MPR_TRACE, "%s func 0x%x\n", __func__,
968 	    ccb->ccb_h.func_code);
969 	mtx_assert(&sassc->sc->mpr_mtx, MA_OWNED);
970 
971 	switch (ccb->ccb_h.func_code) {
972 	case XPT_PATH_INQ:
973 	{
974 		struct ccb_pathinq *cpi = &ccb->cpi;
975 
976 		cpi->version_num = 1;
977 		cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
978 		cpi->target_sprt = 0;
979 #if (__FreeBSD_version >= 1000039) || \
980     ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
981 		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN;
982 #else
983 		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
984 #endif
985 		cpi->hba_eng_cnt = 0;
986 		cpi->max_target = sassc->maxtargets - 1;
987 		cpi->max_lun = 255;
988 		cpi->initiator_id = sassc->maxtargets - 1;
989 		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
990 		strncpy(cpi->hba_vid, "LSILogic", HBA_IDLEN);
991 		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
992 		cpi->unit_number = cam_sim_unit(sim);
993 		cpi->bus_id = cam_sim_bus(sim);
994 		/*
995 		 * XXXSLM-I think this needs to change based on config page or
996 		 * something instead of hardcoded to 150000.
997 		 */
998 		cpi->base_transfer_speed = 150000;
999 		cpi->transport = XPORT_SAS;
1000 		cpi->transport_version = 0;
1001 		cpi->protocol = PROTO_SCSI;
1002 		cpi->protocol_version = SCSI_REV_SPC;
1003 #if __FreeBSD_version >= 800001
1004 		/*
1005 		 * XXXSLM-probably need to base this number on max SGL's and
1006 		 * page size.
1007 		 */
1008 		cpi->maxio = 256 * 1024;
1009 #endif
1010 		cpi->ccb_h.status = CAM_REQ_CMP;
1011 		break;
1012 	}
1013 	case XPT_GET_TRAN_SETTINGS:
1014 	{
1015 		struct ccb_trans_settings	*cts;
1016 		struct ccb_trans_settings_sas	*sas;
1017 		struct ccb_trans_settings_scsi	*scsi;
1018 		struct mprsas_target *targ;
1019 
1020 		cts = &ccb->cts;
1021 		sas = &cts->xport_specific.sas;
1022 		scsi = &cts->proto_specific.scsi;
1023 
1024 		KASSERT(cts->ccb_h.target_id < sassc->maxtargets,
1025 		    ("Target %d out of bounds in XPT_GET_TRAN_SETTINGS\n",
1026 		    cts->ccb_h.target_id));
1027 		targ = &sassc->targets[cts->ccb_h.target_id];
1028 		if (targ->handle == 0x0) {
1029 			cts->ccb_h.status = CAM_DEV_NOT_THERE;
1030 			break;
1031 		}
1032 
1033 		cts->protocol_version = SCSI_REV_SPC2;
1034 		cts->transport = XPORT_SAS;
1035 		cts->transport_version = 0;
1036 
1037 		sas->valid = CTS_SAS_VALID_SPEED;
1038 		switch (targ->linkrate) {
1039 		case 0x08:
1040 			sas->bitrate = 150000;
1041 			break;
1042 		case 0x09:
1043 			sas->bitrate = 300000;
1044 			break;
1045 		case 0x0a:
1046 			sas->bitrate = 600000;
1047 			break;
1048 		default:
1049 			sas->valid = 0;
1050 		}
1051 
1052 		cts->protocol = PROTO_SCSI;
1053 		scsi->valid = CTS_SCSI_VALID_TQ;
1054 		scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
1055 
1056 		cts->ccb_h.status = CAM_REQ_CMP;
1057 		break;
1058 	}
1059 	case XPT_CALC_GEOMETRY:
1060 		cam_calc_geometry(&ccb->ccg, /*extended*/1);
1061 		ccb->ccb_h.status = CAM_REQ_CMP;
1062 		break;
1063 	case XPT_RESET_DEV:
1064 		mpr_dprint(sassc->sc, MPR_XINFO,
1065 		    "mprsas_action XPT_RESET_DEV\n");
1066 		mprsas_action_resetdev(sassc, ccb);
1067 		return;
1068 	case XPT_RESET_BUS:
1069 	case XPT_ABORT:
1070 	case XPT_TERM_IO:
1071 		mpr_dprint(sassc->sc, MPR_XINFO,
1072 		    "mprsas_action faking success for abort or reset\n");
1073 		ccb->ccb_h.status = CAM_REQ_CMP;
1074 		break;
1075 	case XPT_SCSI_IO:
1076 		mprsas_action_scsiio(sassc, ccb);
1077 		return;
1078 #if __FreeBSD_version >= 900026
1079 	case XPT_SMP_IO:
1080 		mprsas_action_smpio(sassc, ccb);
1081 		return;
1082 #endif
1083 	default:
1084 		ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
1085 		break;
1086 	}
1087 	xpt_done(ccb);
1088 
1089 }
1090 
1091 static void
1092 mprsas_announce_reset(struct mpr_softc *sc, uint32_t ac_code,
1093     target_id_t target_id, lun_id_t lun_id)
1094 {
1095 	path_id_t path_id = cam_sim_path(sc->sassc->sim);
1096 	struct cam_path *path;
1097 
1098 	mpr_dprint(sc, MPR_XINFO, "%s code %x target %d lun %jx\n", __func__,
1099 	    ac_code, target_id, (uintmax_t)lun_id);
1100 
1101 	if (xpt_create_path(&path, NULL,
1102 		path_id, target_id, lun_id) != CAM_REQ_CMP) {
1103 		mpr_dprint(sc, MPR_ERROR, "unable to create path for reset "
1104 			   "notification\n");
1105 		return;
1106 	}
1107 
1108 	xpt_async(ac_code, path, NULL);
1109 	xpt_free_path(path);
1110 }
1111 
1112 static void
1113 mprsas_complete_all_commands(struct mpr_softc *sc)
1114 {
1115 	struct mpr_command *cm;
1116 	int i;
1117 	int completed;
1118 
1119 	MPR_FUNCTRACE(sc);
1120 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1121 
1122 	/* complete all commands with a NULL reply */
1123 	for (i = 1; i < sc->num_reqs; i++) {
1124 		cm = &sc->commands[i];
1125 		cm->cm_reply = NULL;
1126 		completed = 0;
1127 
1128 		if (cm->cm_flags & MPR_CM_FLAGS_POLLED)
1129 			cm->cm_flags |= MPR_CM_FLAGS_COMPLETE;
1130 
1131 		if (cm->cm_complete != NULL) {
1132 			mprsas_log_command(cm, MPR_RECOVERY,
1133 			    "completing cm %p state %x ccb %p for diag reset\n",
1134 			    cm, cm->cm_state, cm->cm_ccb);
1135 			cm->cm_complete(sc, cm);
1136 			completed = 1;
1137 		}
1138 
1139 		if (cm->cm_flags & MPR_CM_FLAGS_WAKEUP) {
1140 			mprsas_log_command(cm, MPR_RECOVERY,
1141 			    "waking up cm %p state %x ccb %p for diag reset\n",
1142 			    cm, cm->cm_state, cm->cm_ccb);
1143 			wakeup(cm);
1144 			completed = 1;
1145 		}
1146 
1147 		if ((completed == 0) && (cm->cm_state != MPR_CM_STATE_FREE)) {
1148 			/* this should never happen, but if it does, log */
1149 			mprsas_log_command(cm, MPR_RECOVERY,
1150 			    "cm %p state %x flags 0x%x ccb %p during diag "
1151 			    "reset\n", cm, cm->cm_state, cm->cm_flags,
1152 			    cm->cm_ccb);
1153 		}
1154 	}
1155 }
1156 
1157 void
1158 mprsas_handle_reinit(struct mpr_softc *sc)
1159 {
1160 	int i;
1161 
1162 	/* Go back into startup mode and freeze the simq, so that CAM
1163 	 * doesn't send any commands until after we've rediscovered all
1164 	 * targets and found the proper device handles for them.
1165 	 *
1166 	 * After the reset, portenable will trigger discovery, and after all
1167 	 * discovery-related activities have finished, the simq will be
1168 	 * released.
1169 	 */
1170 	mpr_dprint(sc, MPR_INIT, "%s startup\n", __func__);
1171 	sc->sassc->flags |= MPRSAS_IN_STARTUP;
1172 	sc->sassc->flags |= MPRSAS_IN_DISCOVERY;
1173 	mprsas_startup_increment(sc->sassc);
1174 
1175 	/* notify CAM of a bus reset */
1176 	mprsas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1177 	    CAM_LUN_WILDCARD);
1178 
1179 	/* complete and cleanup after all outstanding commands */
1180 	mprsas_complete_all_commands(sc);
1181 
1182 	mpr_dprint(sc, MPR_INIT, "%s startup %u tm %u after command "
1183 	    "completion\n", __func__, sc->sassc->startup_refcount,
1184 	    sc->sassc->tm_count);
1185 
1186 	/* zero all the target handles, since they may change after the
1187 	 * reset, and we have to rediscover all the targets and use the new
1188 	 * handles.
1189 	 */
1190 	for (i = 0; i < sc->sassc->maxtargets; i++) {
1191 		if (sc->sassc->targets[i].outstanding != 0)
1192 			mpr_dprint(sc, MPR_INIT, "target %u outstanding %u\n",
1193 			    i, sc->sassc->targets[i].outstanding);
1194 		sc->sassc->targets[i].handle = 0x0;
1195 		sc->sassc->targets[i].exp_dev_handle = 0x0;
1196 		sc->sassc->targets[i].outstanding = 0;
1197 		sc->sassc->targets[i].flags = MPRSAS_TARGET_INDIAGRESET;
1198 	}
1199 }
1200 static void
1201 mprsas_tm_timeout(void *data)
1202 {
1203 	struct mpr_command *tm = data;
1204 	struct mpr_softc *sc = tm->cm_sc;
1205 
1206 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1207 
1208 	mprsas_log_command(tm, MPR_INFO|MPR_RECOVERY,
1209 	    "task mgmt %p timed out\n", tm);
1210 	mpr_reinit(sc);
1211 }
1212 
1213 static void
1214 mprsas_logical_unit_reset_complete(struct mpr_softc *sc,
1215     struct mpr_command *tm)
1216 {
1217 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1218 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1219 	unsigned int cm_count = 0;
1220 	struct mpr_command *cm;
1221 	struct mprsas_target *targ;
1222 
1223 	callout_stop(&tm->cm_callout);
1224 
1225 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1226 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1227 	targ = tm->cm_targ;
1228 
1229 	/*
1230 	 * Currently there should be no way we can hit this case.  It only
1231 	 * happens when we have a failure to allocate chain frames, and
1232 	 * task management commands don't have S/G lists.
1233 	 */
1234 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1235 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for LUN reset! "
1236 		    "This should not happen!\n", __func__, tm->cm_flags);
1237 		mprsas_free_tm(sc, tm);
1238 		return;
1239 	}
1240 
1241 	if (reply == NULL) {
1242 		mprsas_log_command(tm, MPR_RECOVERY,
1243 		    "NULL reset reply for tm %p\n", tm);
1244 		if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1245 			/* this completion was due to a reset, just cleanup */
1246 			targ->flags &= ~MPRSAS_TARGET_INRESET;
1247 			targ->tm = NULL;
1248 			mprsas_free_tm(sc, tm);
1249 		}
1250 		else {
1251 			/* we should have gotten a reply. */
1252 			mpr_reinit(sc);
1253 		}
1254 		return;
1255 	}
1256 
1257 	mprsas_log_command(tm, MPR_RECOVERY,
1258 	    "logical unit reset status 0x%x code 0x%x count %u\n",
1259 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1260 	    le32toh(reply->TerminationCount));
1261 
1262 	/* See if there are any outstanding commands for this LUN.
1263 	 * This could be made more efficient by using a per-LU data
1264 	 * structure of some sort.
1265 	 */
1266 	TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1267 		if (cm->cm_lun == tm->cm_lun)
1268 			cm_count++;
1269 	}
1270 
1271 	if (cm_count == 0) {
1272 		mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO,
1273 		    "logical unit %u finished recovery after reset\n",
1274 		    tm->cm_lun, tm);
1275 
1276 		mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1277 		    tm->cm_lun);
1278 
1279 		/* we've finished recovery for this logical unit.  check and
1280 		 * see if some other logical unit has a timedout command
1281 		 * that needs to be processed.
1282 		 */
1283 		cm = TAILQ_FIRST(&targ->timedout_commands);
1284 		if (cm) {
1285 			mprsas_send_abort(sc, tm, cm);
1286 		}
1287 		else {
1288 			targ->tm = NULL;
1289 			mprsas_free_tm(sc, tm);
1290 		}
1291 	}
1292 	else {
1293 		/* if we still have commands for this LUN, the reset
1294 		 * effectively failed, regardless of the status reported.
1295 		 * Escalate to a target reset.
1296 		 */
1297 		mprsas_log_command(tm, MPR_RECOVERY,
1298 		    "logical unit reset complete for tm %p, but still have %u "
1299 		    "command(s)\n", tm, cm_count);
1300 		mprsas_send_reset(sc, tm,
1301 		    MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1302 	}
1303 }
1304 
1305 static void
1306 mprsas_target_reset_complete(struct mpr_softc *sc, struct mpr_command *tm)
1307 {
1308 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1309 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1310 	struct mprsas_target *targ;
1311 
1312 	callout_stop(&tm->cm_callout);
1313 
1314 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1315 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1316 	targ = tm->cm_targ;
1317 
1318 	/*
1319 	 * Currently there should be no way we can hit this case.  It only
1320 	 * happens when we have a failure to allocate chain frames, and
1321 	 * task management commands don't have S/G lists.
1322 	 */
1323 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1324 		mpr_dprint(sc, MPR_ERROR,"%s: cm_flags = %#x for target reset! "
1325 		    "This should not happen!\n", __func__, tm->cm_flags);
1326 		mprsas_free_tm(sc, tm);
1327 		return;
1328 	}
1329 
1330 	if (reply == NULL) {
1331 		mprsas_log_command(tm, MPR_RECOVERY,
1332 		    "NULL reset reply for tm %p\n", tm);
1333 		if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1334 			/* this completion was due to a reset, just cleanup */
1335 			targ->flags &= ~MPRSAS_TARGET_INRESET;
1336 			targ->tm = NULL;
1337 			mprsas_free_tm(sc, tm);
1338 		}
1339 		else {
1340 			/* we should have gotten a reply. */
1341 			mpr_reinit(sc);
1342 		}
1343 		return;
1344 	}
1345 
1346 	mprsas_log_command(tm, MPR_RECOVERY,
1347 	    "target reset status 0x%x code 0x%x count %u\n",
1348 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1349 	    le32toh(reply->TerminationCount));
1350 
1351 	targ->flags &= ~MPRSAS_TARGET_INRESET;
1352 
1353 	if (targ->outstanding == 0) {
1354 		/* we've finished recovery for this target and all
1355 		 * of its logical units.
1356 		 */
1357 		mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO,
1358 		    "recovery finished after target reset\n");
1359 
1360 		mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1361 		    CAM_LUN_WILDCARD);
1362 
1363 		targ->tm = NULL;
1364 		mprsas_free_tm(sc, tm);
1365 	}
1366 	else {
1367 		/* after a target reset, if this target still has
1368 		 * outstanding commands, the reset effectively failed,
1369 		 * regardless of the status reported.  escalate.
1370 		 */
1371 		mprsas_log_command(tm, MPR_RECOVERY,
1372 		    "target reset complete for tm %p, but still have %u "
1373 		    "command(s)\n", tm, targ->outstanding);
1374 		mpr_reinit(sc);
1375 	}
1376 }
1377 
1378 #define MPR_RESET_TIMEOUT 30
1379 
1380 static int
1381 mprsas_send_reset(struct mpr_softc *sc, struct mpr_command *tm, uint8_t type)
1382 {
1383 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1384 	struct mprsas_target *target;
1385 	int err;
1386 
1387 	target = tm->cm_targ;
1388 	if (target->handle == 0) {
1389 		mpr_dprint(sc, MPR_ERROR,"%s null devhandle for target_id %d\n",
1390 		    __func__, target->tid);
1391 		return -1;
1392 	}
1393 
1394 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1395 	req->DevHandle = htole16(target->handle);
1396 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1397 	req->TaskType = type;
1398 
1399 	if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1400 		/* XXX Need to handle invalid LUNs */
1401 		MPR_SET_LUN(req->LUN, tm->cm_lun);
1402 		tm->cm_targ->logical_unit_resets++;
1403 		mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO,
1404 		    "sending logical unit reset\n");
1405 		tm->cm_complete = mprsas_logical_unit_reset_complete;
1406 	}
1407 	else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1408 		/*
1409 		 * Target reset method =
1410 		 *     SAS Hard Link Reset / SATA Link Reset
1411 		 */
1412 		req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1413 		tm->cm_targ->target_resets++;
1414 		tm->cm_targ->flags |= MPRSAS_TARGET_INRESET;
1415 		mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO,
1416 		    "sending target reset\n");
1417 		tm->cm_complete = mprsas_target_reset_complete;
1418 	}
1419 	else {
1420 		mpr_dprint(sc, MPR_ERROR, "unexpected reset type 0x%x\n", type);
1421 		return -1;
1422 	}
1423 
1424 	mpr_dprint(sc, MPR_XINFO, "to target %u handle 0x%04x\n", target->tid,
1425 	    target->handle);
1426 	if (target->encl_level_valid) {
1427 		mpr_dprint(sc, MPR_XINFO, "At enclosure level %d, slot %d, "
1428 		    "connector name (%4s)\n", target->encl_level,
1429 		    target->encl_slot, target->connector_name);
1430 	}
1431 
1432 	tm->cm_data = NULL;
1433 	tm->cm_desc.HighPriority.RequestFlags =
1434 	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1435 	tm->cm_complete_data = (void *)tm;
1436 
1437 	callout_reset(&tm->cm_callout, MPR_RESET_TIMEOUT * hz,
1438 	    mprsas_tm_timeout, tm);
1439 
1440 	err = mpr_map_command(sc, tm);
1441 	if (err)
1442 		mprsas_log_command(tm, MPR_RECOVERY,
1443 		    "error %d sending reset type %u\n",
1444 		    err, type);
1445 
1446 	return err;
1447 }
1448 
1449 
1450 static void
1451 mprsas_abort_complete(struct mpr_softc *sc, struct mpr_command *tm)
1452 {
1453 	struct mpr_command *cm;
1454 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1455 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1456 	struct mprsas_target *targ;
1457 
1458 	callout_stop(&tm->cm_callout);
1459 
1460 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1461 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1462 	targ = tm->cm_targ;
1463 
1464 	/*
1465 	 * Currently there should be no way we can hit this case.  It only
1466 	 * happens when we have a failure to allocate chain frames, and
1467 	 * task management commands don't have S/G lists.
1468 	 */
1469 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1470 		mprsas_log_command(tm, MPR_RECOVERY,
1471 		    "cm_flags = %#x for abort %p TaskMID %u!\n",
1472 		    tm->cm_flags, tm, le16toh(req->TaskMID));
1473 		mprsas_free_tm(sc, tm);
1474 		return;
1475 	}
1476 
1477 	if (reply == NULL) {
1478 		mprsas_log_command(tm, MPR_RECOVERY,
1479 		    "NULL abort reply for tm %p TaskMID %u\n",
1480 		    tm, le16toh(req->TaskMID));
1481 		if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1482 			/* this completion was due to a reset, just cleanup */
1483 			targ->tm = NULL;
1484 			mprsas_free_tm(sc, tm);
1485 		}
1486 		else {
1487 			/* we should have gotten a reply. */
1488 			mpr_reinit(sc);
1489 		}
1490 		return;
1491 	}
1492 
1493 	mprsas_log_command(tm, MPR_RECOVERY,
1494 	    "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1495 	    le16toh(req->TaskMID),
1496 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1497 	    le32toh(reply->TerminationCount));
1498 
1499 	cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1500 	if (cm == NULL) {
1501 		/* if there are no more timedout commands, we're done with
1502 		 * error recovery for this target.
1503 		 */
1504 		mprsas_log_command(tm, MPR_RECOVERY,
1505 		    "finished recovery after aborting TaskMID %u\n",
1506 		    le16toh(req->TaskMID));
1507 
1508 		targ->tm = NULL;
1509 		mprsas_free_tm(sc, tm);
1510 	}
1511 	else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) {
1512 		/* abort success, but we have more timedout commands to abort */
1513 		mprsas_log_command(tm, MPR_RECOVERY,
1514 		    "continuing recovery after aborting TaskMID %u\n",
1515 		    le16toh(req->TaskMID));
1516 
1517 		mprsas_send_abort(sc, tm, cm);
1518 	}
1519 	else {
1520 		/* we didn't get a command completion, so the abort
1521 		 * failed as far as we're concerned.  escalate.
1522 		 */
1523 		mprsas_log_command(tm, MPR_RECOVERY,
1524 		    "abort failed for TaskMID %u tm %p\n",
1525 		    le16toh(req->TaskMID), tm);
1526 
1527 		mprsas_send_reset(sc, tm,
1528 		    MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1529 	}
1530 }
1531 
1532 #define MPR_ABORT_TIMEOUT 5
1533 
1534 static int
1535 mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm,
1536     struct mpr_command *cm)
1537 {
1538 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1539 	struct mprsas_target *targ;
1540 	int err;
1541 
1542 	targ = cm->cm_targ;
1543 	if (targ->handle == 0) {
1544 		mpr_dprint(sc, MPR_ERROR,"%s null devhandle for target_id %d\n",
1545 		    __func__, cm->cm_ccb->ccb_h.target_id);
1546 		return -1;
1547 	}
1548 
1549 	mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO,
1550 	    "Aborting command %p\n", cm);
1551 
1552 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1553 	req->DevHandle = htole16(targ->handle);
1554 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1555 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1556 
1557 	/* XXX Need to handle invalid LUNs */
1558 	MPR_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1559 
1560 	req->TaskMID = htole16(cm->cm_desc.Default.SMID);
1561 
1562 	tm->cm_data = NULL;
1563 	tm->cm_desc.HighPriority.RequestFlags =
1564 	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1565 	tm->cm_complete = mprsas_abort_complete;
1566 	tm->cm_complete_data = (void *)tm;
1567 	tm->cm_targ = cm->cm_targ;
1568 	tm->cm_lun = cm->cm_lun;
1569 
1570 	callout_reset(&tm->cm_callout, MPR_ABORT_TIMEOUT * hz,
1571 	    mprsas_tm_timeout, tm);
1572 
1573 	targ->aborts++;
1574 
1575 	err = mpr_map_command(sc, tm);
1576 	if (err)
1577 		mprsas_log_command(tm, MPR_RECOVERY,
1578 		    "error %d sending abort for cm %p SMID %u\n",
1579 		    err, cm, req->TaskMID);
1580 	return err;
1581 }
1582 
1583 
1584 static void
1585 mprsas_scsiio_timeout(void *data)
1586 {
1587 	struct mpr_softc *sc;
1588 	struct mpr_command *cm;
1589 	struct mprsas_target *targ;
1590 
1591 	cm = (struct mpr_command *)data;
1592 	sc = cm->cm_sc;
1593 
1594 	MPR_FUNCTRACE(sc);
1595 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1596 
1597 	mpr_dprint(sc, MPR_XINFO, "Timeout checking cm %p\n", cm);
1598 
1599 	/*
1600 	 * Run the interrupt handler to make sure it's not pending.  This
1601 	 * isn't perfect because the command could have already completed
1602 	 * and been re-used, though this is unlikely.
1603 	 */
1604 	mpr_intr_locked(sc);
1605 	if (cm->cm_state == MPR_CM_STATE_FREE) {
1606 		mprsas_log_command(cm, MPR_XINFO,
1607 		    "SCSI command %p almost timed out\n", cm);
1608 		return;
1609 	}
1610 
1611 	if (cm->cm_ccb == NULL) {
1612 		mpr_dprint(sc, MPR_ERROR, "command timeout with NULL ccb\n");
1613 		return;
1614 	}
1615 
1616 	targ = cm->cm_targ;
1617 	targ->timeouts++;
1618 
1619 	mprsas_log_command(cm, MPR_XINFO, "command timeout cm %p ccb %p "
1620 	    "target %u, handle(0x%04x)\n", cm, cm->cm_ccb, targ->tid,
1621 	    targ->handle);
1622 	if (targ->encl_level_valid) {
1623 		mpr_dprint(sc, MPR_XINFO, "At enclosure level %d, slot %d, "
1624 		    "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
1625 		    targ->connector_name);
1626 	}
1627 
1628 	/* XXX first, check the firmware state, to see if it's still
1629 	 * operational.  if not, do a diag reset.
1630 	 */
1631 
1632 	cm->cm_ccb->ccb_h.status = CAM_CMD_TIMEOUT;
1633 	cm->cm_state = MPR_CM_STATE_TIMEDOUT;
1634 	TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1635 
1636 	if (targ->tm != NULL) {
1637 		/* target already in recovery, just queue up another
1638 		 * timedout command to be processed later.
1639 		 */
1640 		mpr_dprint(sc, MPR_RECOVERY, "queued timedout cm %p for "
1641 		    "processing by tm %p\n", cm, targ->tm);
1642 	}
1643 	else if ((targ->tm = mprsas_alloc_tm(sc)) != NULL) {
1644 		mpr_dprint(sc, MPR_RECOVERY, "timedout cm %p allocated tm %p\n",
1645 		    cm, targ->tm);
1646 
1647 		/* start recovery by aborting the first timedout command */
1648 		mprsas_send_abort(sc, targ->tm, cm);
1649 	}
1650 	else {
1651 		/* XXX queue this target up for recovery once a TM becomes
1652 		 * available.  The firmware only has a limited number of
1653 		 * HighPriority credits for the high priority requests used
1654 		 * for task management, and we ran out.
1655 		 *
1656 		 * Isilon: don't worry about this for now, since we have
1657 		 * more credits than disks in an enclosure, and limit
1658 		 * ourselves to one TM per target for recovery.
1659 		 */
1660 		mpr_dprint(sc, MPR_RECOVERY,
1661 		    "timedout cm %p failed to allocate a tm\n", cm);
1662 	}
1663 }
1664 
1665 static void
1666 mprsas_action_scsiio(struct mprsas_softc *sassc, union ccb *ccb)
1667 {
1668 	MPI2_SCSI_IO_REQUEST *req;
1669 	struct ccb_scsiio *csio;
1670 	struct mpr_softc *sc;
1671 	struct mprsas_target *targ;
1672 	struct mprsas_lun *lun;
1673 	struct mpr_command *cm;
1674 	uint8_t i, lba_byte, *ref_tag_addr;
1675 	uint16_t eedp_flags;
1676 	uint32_t mpi_control;
1677 
1678 	sc = sassc->sc;
1679 	MPR_FUNCTRACE(sc);
1680 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1681 
1682 	csio = &ccb->csio;
1683 	targ = &sassc->targets[csio->ccb_h.target_id];
1684 	mpr_dprint(sc, MPR_TRACE, "ccb %p target flag %x\n", ccb, targ->flags);
1685 	if (targ->handle == 0x0) {
1686 		mpr_dprint(sc, MPR_ERROR, "%s NULL handle for target %u\n",
1687 		    __func__, csio->ccb_h.target_id);
1688 		csio->ccb_h.status = CAM_DEV_NOT_THERE;
1689 		xpt_done(ccb);
1690 		return;
1691 	}
1692 	if (targ->flags & MPR_TARGET_FLAGS_RAID_COMPONENT) {
1693 		mpr_dprint(sc, MPR_TRACE, "%s Raid component no SCSI IO "
1694 		    "supported %u\n", __func__, csio->ccb_h.target_id);
1695 		csio->ccb_h.status = CAM_DEV_NOT_THERE;
1696 		xpt_done(ccb);
1697 		return;
1698 	}
1699 	/*
1700 	 * Sometimes, it is possible to get a command that is not "In
1701 	 * Progress" and was actually aborted by the upper layer.  Check for
1702 	 * this here and complete the command without error.
1703 	 */
1704 	if (ccb->ccb_h.status != CAM_REQ_INPROG) {
1705 		mpr_dprint(sc, MPR_TRACE, "%s Command is not in progress for "
1706 		    "target %u\n", __func__, csio->ccb_h.target_id);
1707 		xpt_done(ccb);
1708 		return;
1709 	}
1710 	/*
1711 	 * If devinfo is 0 this will be a volume.  In that case don't tell CAM
1712 	 * that the volume has timed out.  We want volumes to be enumerated
1713 	 * until they are deleted/removed, not just failed.
1714 	 */
1715 	if (targ->flags & MPRSAS_TARGET_INREMOVAL) {
1716 		if (targ->devinfo == 0)
1717 			csio->ccb_h.status = CAM_REQ_CMP;
1718 		else
1719 			csio->ccb_h.status = CAM_SEL_TIMEOUT;
1720 		xpt_done(ccb);
1721 		return;
1722 	}
1723 
1724 	if ((sc->mpr_flags & MPR_FLAGS_SHUTDOWN) != 0) {
1725 		mpr_dprint(sc, MPR_TRACE, "%s shutting down\n", __func__);
1726 		csio->ccb_h.status = CAM_DEV_NOT_THERE;
1727 		xpt_done(ccb);
1728 		return;
1729 	}
1730 
1731 	cm = mpr_alloc_command(sc);
1732 	if (cm == NULL || (sc->mpr_flags & MPR_FLAGS_DIAGRESET)) {
1733 		if (cm != NULL) {
1734 			mpr_free_command(sc, cm);
1735 		}
1736 		if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) {
1737 			xpt_freeze_simq(sassc->sim, 1);
1738 			sassc->flags |= MPRSAS_QUEUE_FROZEN;
1739 		}
1740 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1741 		ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1742 		xpt_done(ccb);
1743 		return;
1744 	}
1745 
1746 	req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
1747 	bzero(req, sizeof(*req));
1748 	req->DevHandle = htole16(targ->handle);
1749 	req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1750 	req->MsgFlags = 0;
1751 	req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr);
1752 	req->SenseBufferLength = MPR_SENSE_LEN;
1753 	req->SGLFlags = 0;
1754 	req->ChainOffset = 0;
1755 	req->SGLOffset0 = 24;	/* 32bit word offset to the SGL */
1756 	req->SGLOffset1= 0;
1757 	req->SGLOffset2= 0;
1758 	req->SGLOffset3= 0;
1759 	req->SkipCount = 0;
1760 	req->DataLength = htole32(csio->dxfer_len);
1761 	req->BidirectionalDataLength = 0;
1762 	req->IoFlags = htole16(csio->cdb_len);
1763 	req->EEDPFlags = 0;
1764 
1765 	/* Note: BiDirectional transfers are not supported */
1766 	switch (csio->ccb_h.flags & CAM_DIR_MASK) {
1767 	case CAM_DIR_IN:
1768 		mpi_control = MPI2_SCSIIO_CONTROL_READ;
1769 		cm->cm_flags |= MPR_CM_FLAGS_DATAIN;
1770 		break;
1771 	case CAM_DIR_OUT:
1772 		mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
1773 		cm->cm_flags |= MPR_CM_FLAGS_DATAOUT;
1774 		break;
1775 	case CAM_DIR_NONE:
1776 	default:
1777 		mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
1778 		break;
1779 	}
1780 
1781 	if (csio->cdb_len == 32)
1782 		mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
1783 	/*
1784 	 * It looks like the hardware doesn't require an explicit tag
1785 	 * number for each transaction.  SAM Task Management not supported
1786 	 * at the moment.
1787 	 */
1788 	switch (csio->tag_action) {
1789 	case MSG_HEAD_OF_Q_TAG:
1790 		mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
1791 		break;
1792 	case MSG_ORDERED_Q_TAG:
1793 		mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
1794 		break;
1795 	case MSG_ACA_TASK:
1796 		mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ;
1797 		break;
1798 	case CAM_TAG_ACTION_NONE:
1799 	case MSG_SIMPLE_Q_TAG:
1800 	default:
1801 		mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
1802 		break;
1803 	}
1804 	mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
1805 	req->Control = htole32(mpi_control);
1806 
1807 	if (MPR_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
1808 		mpr_free_command(sc, cm);
1809 		ccb->ccb_h.status = CAM_LUN_INVALID;
1810 		xpt_done(ccb);
1811 		return;
1812 	}
1813 
1814 	if (csio->ccb_h.flags & CAM_CDB_POINTER)
1815 		bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
1816 	else
1817 		bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
1818 	req->IoFlags = htole16(csio->cdb_len);
1819 
1820 	/*
1821 	 * Check if EEDP is supported and enabled.  If it is then check if the
1822 	 * SCSI opcode could be using EEDP.  If so, make sure the LUN exists and
1823 	 * is formatted for EEDP support.  If all of this is true, set CDB up
1824 	 * for EEDP transfer.
1825 	 */
1826 	eedp_flags = op_code_prot[req->CDB.CDB32[0]];
1827 	if (sc->eedp_enabled && eedp_flags) {
1828 		SLIST_FOREACH(lun, &targ->luns, lun_link) {
1829 			if (lun->lun_id == csio->ccb_h.target_lun) {
1830 				break;
1831 			}
1832 		}
1833 
1834 		if ((lun != NULL) && (lun->eedp_formatted)) {
1835 			req->EEDPBlockSize = htole16(lun->eedp_block_size);
1836 			eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
1837 			    MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
1838 			    MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
1839 			req->EEDPFlags = htole16(eedp_flags);
1840 
1841 			/*
1842 			 * If CDB less than 32, fill in Primary Ref Tag with
1843 			 * low 4 bytes of LBA.  If CDB is 32, tag stuff is
1844 			 * already there.  Also, set protection bit.  FreeBSD
1845 			 * currently does not support CDBs bigger than 16, but
1846 			 * the code doesn't hurt, and will be here for the
1847 			 * future.
1848 			 */
1849 			if (csio->cdb_len != 32) {
1850 				lba_byte = (csio->cdb_len == 16) ? 6 : 2;
1851 				ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
1852 				    PrimaryReferenceTag;
1853 				for (i = 0; i < 4; i++) {
1854 					*ref_tag_addr =
1855 					    req->CDB.CDB32[lba_byte + i];
1856 					ref_tag_addr++;
1857 				}
1858 				req->CDB.EEDP32.PrimaryReferenceTag =
1859 				    htole32(req->
1860 				    CDB.EEDP32.PrimaryReferenceTag);
1861 				req->CDB.EEDP32.PrimaryApplicationTagMask =
1862 				    0xFFFF;
1863 				req->CDB.CDB32[1] = (req->CDB.CDB32[1] & 0x1F) |
1864 				    0x20;
1865 			} else {
1866 				eedp_flags |=
1867 				    MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
1868 				req->EEDPFlags = htole16(eedp_flags);
1869 				req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
1870 				    0x1F) | 0x20;
1871 			}
1872 		}
1873 	}
1874 
1875 	cm->cm_length = csio->dxfer_len;
1876 	if (cm->cm_length != 0) {
1877 		cm->cm_data = ccb;
1878 		cm->cm_flags |= MPR_CM_FLAGS_USE_CCB;
1879 	} else {
1880 		cm->cm_data = NULL;
1881 	}
1882 	cm->cm_sge = &req->SGL;
1883 	cm->cm_sglsize = (32 - 24) * 4;
1884 	cm->cm_complete = mprsas_scsiio_complete;
1885 	cm->cm_complete_data = ccb;
1886 	cm->cm_targ = targ;
1887 	cm->cm_lun = csio->ccb_h.target_lun;
1888 	cm->cm_ccb = ccb;
1889 	/*
1890 	 * If using FP desc type, need to set a bit in IoFlags (SCSI IO is 0)
1891 	 * and set descriptor type.
1892 	 */
1893 	if (targ->scsi_req_desc_type ==
1894 	    MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO) {
1895 		req->IoFlags |= MPI25_SCSIIO_IOFLAGS_FAST_PATH;
1896 		cm->cm_desc.FastPathSCSIIO.RequestFlags =
1897 		    MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
1898 		cm->cm_desc.FastPathSCSIIO.DevHandle = htole16(targ->handle);
1899 	} else {
1900 		cm->cm_desc.SCSIIO.RequestFlags =
1901 		    MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
1902 		cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle);
1903 	}
1904 
1905 	callout_reset(&cm->cm_callout, (ccb->ccb_h.timeout * hz) / 1000,
1906 	   mprsas_scsiio_timeout, cm);
1907 
1908 	targ->issued++;
1909 	targ->outstanding++;
1910 	TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1911 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
1912 
1913 	mprsas_log_command(cm, MPR_XINFO, "%s cm %p ccb %p outstanding %u\n",
1914 	    __func__, cm, ccb, targ->outstanding);
1915 
1916 	mpr_map_command(sc, cm);
1917 	return;
1918 }
1919 
1920 static void
1921 mpr_response_code(struct mpr_softc *sc, u8 response_code)
1922 {
1923         char *desc;
1924 
1925         switch (response_code) {
1926         case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE:
1927                 desc = "task management request completed";
1928                 break;
1929         case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME:
1930                 desc = "invalid frame";
1931                 break;
1932         case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
1933                 desc = "task management request not supported";
1934                 break;
1935         case MPI2_SCSITASKMGMT_RSP_TM_FAILED:
1936                 desc = "task management request failed";
1937                 break;
1938         case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED:
1939                 desc = "task management request succeeded";
1940                 break;
1941         case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN:
1942                 desc = "invalid lun";
1943                 break;
1944         case 0xA:
1945                 desc = "overlapped tag attempted";
1946                 break;
1947         case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
1948                 desc = "task queued, however not sent to target";
1949                 break;
1950         default:
1951                 desc = "unknown";
1952                 break;
1953         }
1954 	mpr_dprint(sc, MPR_XINFO, "response_code(0x%01x): %s\n", response_code,
1955 	    desc);
1956 }
1957 
1958 /**
1959  * mpr_sc_failed_io_info - translated non-succesfull SCSI_IO request
1960  */
1961 static void
1962 mpr_sc_failed_io_info(struct mpr_softc *sc, struct ccb_scsiio *csio,
1963     Mpi2SCSIIOReply_t *mpi_reply, struct mprsas_target *targ)
1964 {
1965 	u32 response_info;
1966 	u8 *response_bytes;
1967 	u16 ioc_status = le16toh(mpi_reply->IOCStatus) &
1968 	    MPI2_IOCSTATUS_MASK;
1969 	u8 scsi_state = mpi_reply->SCSIState;
1970 	u8 scsi_status = mpi_reply->SCSIStatus;
1971 	char *desc_ioc_state = NULL;
1972 	char *desc_scsi_status = NULL;
1973 	char *desc_scsi_state = sc->tmp_string;
1974 	u32 log_info = le32toh(mpi_reply->IOCLogInfo);
1975 
1976 	if (log_info == 0x31170000)
1977 		return;
1978 
1979 	switch (ioc_status) {
1980 	case MPI2_IOCSTATUS_SUCCESS:
1981 		desc_ioc_state = "success";
1982 		break;
1983 	case MPI2_IOCSTATUS_INVALID_FUNCTION:
1984 		desc_ioc_state = "invalid function";
1985 		break;
1986 	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
1987 		desc_ioc_state = "scsi recovered error";
1988 		break;
1989 	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
1990 		desc_ioc_state = "scsi invalid dev handle";
1991 		break;
1992 	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
1993 		desc_ioc_state = "scsi device not there";
1994 		break;
1995 	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
1996 		desc_ioc_state = "scsi data overrun";
1997 		break;
1998 	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
1999 		desc_ioc_state = "scsi data underrun";
2000 		break;
2001 	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2002 		desc_ioc_state = "scsi io data error";
2003 		break;
2004 	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2005 		desc_ioc_state = "scsi protocol error";
2006 		break;
2007 	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2008 		desc_ioc_state = "scsi task terminated";
2009 		break;
2010 	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2011 		desc_ioc_state = "scsi residual mismatch";
2012 		break;
2013 	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2014 		desc_ioc_state = "scsi task mgmt failed";
2015 		break;
2016 	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2017 		desc_ioc_state = "scsi ioc terminated";
2018 		break;
2019 	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2020 		desc_ioc_state = "scsi ext terminated";
2021 		break;
2022 	case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
2023 		desc_ioc_state = "eedp guard error";
2024 		break;
2025 	case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
2026 		desc_ioc_state = "eedp ref tag error";
2027 		break;
2028 	case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
2029 		desc_ioc_state = "eedp app tag error";
2030 		break;
2031 	default:
2032 		desc_ioc_state = "unknown";
2033 		break;
2034 	}
2035 
2036 	switch (scsi_status) {
2037 	case MPI2_SCSI_STATUS_GOOD:
2038 		desc_scsi_status = "good";
2039 		break;
2040 	case MPI2_SCSI_STATUS_CHECK_CONDITION:
2041 		desc_scsi_status = "check condition";
2042 		break;
2043 	case MPI2_SCSI_STATUS_CONDITION_MET:
2044 		desc_scsi_status = "condition met";
2045 		break;
2046 	case MPI2_SCSI_STATUS_BUSY:
2047 		desc_scsi_status = "busy";
2048 		break;
2049 	case MPI2_SCSI_STATUS_INTERMEDIATE:
2050 		desc_scsi_status = "intermediate";
2051 		break;
2052 	case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET:
2053 		desc_scsi_status = "intermediate condmet";
2054 		break;
2055 	case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
2056 		desc_scsi_status = "reservation conflict";
2057 		break;
2058 	case MPI2_SCSI_STATUS_COMMAND_TERMINATED:
2059 		desc_scsi_status = "command terminated";
2060 		break;
2061 	case MPI2_SCSI_STATUS_TASK_SET_FULL:
2062 		desc_scsi_status = "task set full";
2063 		break;
2064 	case MPI2_SCSI_STATUS_ACA_ACTIVE:
2065 		desc_scsi_status = "aca active";
2066 		break;
2067 	case MPI2_SCSI_STATUS_TASK_ABORTED:
2068 		desc_scsi_status = "task aborted";
2069 		break;
2070 	default:
2071 		desc_scsi_status = "unknown";
2072 		break;
2073 	}
2074 
2075 	desc_scsi_state[0] = '\0';
2076 	if (!scsi_state)
2077 		desc_scsi_state = " ";
2078 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
2079 		strcat(desc_scsi_state, "response info ");
2080 	if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
2081 		strcat(desc_scsi_state, "state terminated ");
2082 	if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS)
2083 		strcat(desc_scsi_state, "no status ");
2084 	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED)
2085 		strcat(desc_scsi_state, "autosense failed ");
2086 	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID)
2087 		strcat(desc_scsi_state, "autosense valid ");
2088 
2089 	mpr_dprint(sc, MPR_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n",
2090 	    le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status);
2091 	if (targ->encl_level_valid) {
2092 		mpr_dprint(sc, MPR_XINFO, "At enclosure level %d, slot %d, "
2093 		    "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
2094 		    targ->connector_name);
2095 	}
2096 	/* We can add more detail about underflow data here
2097 	 * TO-DO
2098 	 * */
2099 	mpr_dprint(sc, MPR_XINFO, "\tscsi_status(%s)(0x%02x), "
2100 	    "scsi_state(%s)(0x%02x)\n", desc_scsi_status, scsi_status,
2101 	    desc_scsi_state, scsi_state);
2102 
2103 	if (sc->mpr_debug & MPR_XINFO &&
2104 		scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2105 		mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : Start :\n");
2106 		scsi_sense_print(csio);
2107 		mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : End :\n");
2108 	}
2109 
2110 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
2111 		response_info = le32toh(mpi_reply->ResponseInfo);
2112 		response_bytes = (u8 *)&response_info;
2113 		mpr_response_code(sc,response_bytes[0]);
2114 	}
2115 }
2116 
2117 static void
2118 mprsas_scsiio_complete(struct mpr_softc *sc, struct mpr_command *cm)
2119 {
2120 	MPI2_SCSI_IO_REPLY *rep;
2121 	union ccb *ccb;
2122 	struct ccb_scsiio *csio;
2123 	struct mprsas_softc *sassc;
2124 	struct scsi_vpd_supported_page_list *vpd_list = NULL;
2125 	u8 *TLR_bits, TLR_on;
2126 	int dir = 0, i;
2127 	u16 alloc_len;
2128 
2129 	MPR_FUNCTRACE(sc);
2130 	mpr_dprint(sc, MPR_TRACE,
2131 	    "cm %p SMID %u ccb %p reply %p outstanding %u\n", cm,
2132 	    cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
2133 	    cm->cm_targ->outstanding);
2134 
2135 	callout_stop(&cm->cm_callout);
2136 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
2137 
2138 	sassc = sc->sassc;
2139 	ccb = cm->cm_complete_data;
2140 	csio = &ccb->csio;
2141 	rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
2142 	/*
2143 	 * XXX KDM if the chain allocation fails, does it matter if we do
2144 	 * the sync and unload here?  It is simpler to do it in every case,
2145 	 * assuming it doesn't cause problems.
2146 	 */
2147 	if (cm->cm_data != NULL) {
2148 		if (cm->cm_flags & MPR_CM_FLAGS_DATAIN)
2149 			dir = BUS_DMASYNC_POSTREAD;
2150 		else if (cm->cm_flags & MPR_CM_FLAGS_DATAOUT)
2151 			dir = BUS_DMASYNC_POSTWRITE;
2152 		bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
2153 		bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2154 	}
2155 
2156 	cm->cm_targ->completed++;
2157 	cm->cm_targ->outstanding--;
2158 	TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
2159 	ccb->ccb_h.status &= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED);
2160 
2161 	if (cm->cm_state == MPR_CM_STATE_TIMEDOUT) {
2162 		TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
2163 		if (cm->cm_reply != NULL)
2164 			mprsas_log_command(cm, MPR_RECOVERY,
2165 			    "completed timedout cm %p ccb %p during recovery "
2166 			    "ioc %x scsi %x state %x xfer %u\n", cm, cm->cm_ccb,
2167 			    le16toh(rep->IOCStatus), rep->SCSIStatus,
2168 			    rep->SCSIState, le32toh(rep->TransferCount));
2169 		else
2170 			mprsas_log_command(cm, MPR_RECOVERY,
2171 			    "completed timedout cm %p ccb %p during recovery\n",
2172 			    cm, cm->cm_ccb);
2173 	} else if (cm->cm_targ->tm != NULL) {
2174 		if (cm->cm_reply != NULL)
2175 			mprsas_log_command(cm, MPR_RECOVERY,
2176 			    "completed cm %p ccb %p during recovery "
2177 			    "ioc %x scsi %x state %x xfer %u\n",
2178 			    cm, cm->cm_ccb, le16toh(rep->IOCStatus),
2179 			    rep->SCSIStatus, rep->SCSIState,
2180 			    le32toh(rep->TransferCount));
2181 		else
2182 			mprsas_log_command(cm, MPR_RECOVERY,
2183 			    "completed cm %p ccb %p during recovery\n",
2184 			    cm, cm->cm_ccb);
2185 	} else if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
2186 		mprsas_log_command(cm, MPR_RECOVERY,
2187 		    "reset completed cm %p ccb %p\n", cm, cm->cm_ccb);
2188 	}
2189 
2190 	if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
2191 		/*
2192 		 * We ran into an error after we tried to map the command,
2193 		 * so we're getting a callback without queueing the command
2194 		 * to the hardware.  So we set the status here, and it will
2195 		 * be retained below.  We'll go through the "fast path",
2196 		 * because there can be no reply when we haven't actually
2197 		 * gone out to the hardware.
2198 		 */
2199 		ccb->ccb_h.status = CAM_REQUEUE_REQ;
2200 
2201 		/*
2202 		 * Currently the only error included in the mask is
2203 		 * MPR_CM_FLAGS_CHAIN_FAILED, which means we're out of
2204 		 * chain frames.  We need to freeze the queue until we get
2205 		 * a command that completed without this error, which will
2206 		 * hopefully have some chain frames attached that we can
2207 		 * use.  If we wanted to get smarter about it, we would
2208 		 * only unfreeze the queue in this condition when we're
2209 		 * sure that we're getting some chain frames back.  That's
2210 		 * probably unnecessary.
2211 		 */
2212 		if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) {
2213 			xpt_freeze_simq(sassc->sim, 1);
2214 			sassc->flags |= MPRSAS_QUEUE_FROZEN;
2215 			mpr_dprint(sc, MPR_INFO, "Error sending command, "
2216 				   "freezing SIM queue\n");
2217 		}
2218 	}
2219 
2220 	/*
2221 	 * If this is a Start Stop Unit command and it was issued by the driver
2222 	 * during shutdown, decrement the refcount to account for all of the
2223 	 * commands that were sent.  All SSU commands should be completed before
2224 	 * shutdown completes, meaning SSU_refcount will be 0 after SSU_started
2225 	 * is TRUE.
2226 	 */
2227 	if (sc->SSU_started && (csio->cdb_io.cdb_bytes[0] == START_STOP_UNIT)) {
2228 		mpr_dprint(sc, MPR_INFO, "Decrementing SSU count.\n");
2229 		sc->SSU_refcount--;
2230 	}
2231 
2232 	/* Take the fast path to completion */
2233 	if (cm->cm_reply == NULL) {
2234 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
2235 			if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0)
2236 				ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
2237 			else {
2238 				ccb->ccb_h.status = CAM_REQ_CMP;
2239 				ccb->csio.scsi_status = SCSI_STATUS_OK;
2240 			}
2241 			if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
2242 				ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2243 				sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
2244 				mpr_dprint(sc, MPR_XINFO,
2245 				    "Unfreezing SIM queue\n");
2246 			}
2247 		}
2248 
2249 		/*
2250 		 * There are two scenarios where the status won't be
2251 		 * CAM_REQ_CMP.  The first is if MPR_CM_FLAGS_ERROR_MASK is
2252 		 * set, the second is in the MPR_FLAGS_DIAGRESET above.
2253 		 */
2254 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2255 			/*
2256 			 * Freeze the dev queue so that commands are
2257 			 * executed in the correct order with after error
2258 			 * recovery.
2259 			 */
2260 			ccb->ccb_h.status |= CAM_DEV_QFRZN;
2261 			xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2262 		}
2263 		mpr_free_command(sc, cm);
2264 		xpt_done(ccb);
2265 		return;
2266 	}
2267 
2268 	mprsas_log_command(cm, MPR_XINFO,
2269 	    "ioc %x scsi %x state %x xfer %u\n",
2270 	    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2271 	    le32toh(rep->TransferCount));
2272 
2273 	switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) {
2274 	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2275 		csio->resid = cm->cm_length - le32toh(rep->TransferCount);
2276 		/* FALLTHROUGH */
2277 	case MPI2_IOCSTATUS_SUCCESS:
2278 	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2279 
2280 		if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
2281 		    MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
2282 			mprsas_log_command(cm, MPR_XINFO, "recovered error\n");
2283 
2284 		/* Completion failed at the transport level. */
2285 		if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
2286 		    MPI2_SCSI_STATE_TERMINATED)) {
2287 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2288 			break;
2289 		}
2290 
2291 		/* In a modern packetized environment, an autosense failure
2292 		 * implies that there's not much else that can be done to
2293 		 * recover the command.
2294 		 */
2295 		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
2296 			ccb->ccb_h.status = CAM_AUTOSENSE_FAIL;
2297 			break;
2298 		}
2299 
2300 		/*
2301 		 * CAM doesn't care about SAS Response Info data, but if this is
2302 		 * the state check if TLR should be done.  If not, clear the
2303 		 * TLR_bits for the target.
2304 		 */
2305 		if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
2306 		    ((le32toh(rep->ResponseInfo) & MPI2_SCSI_RI_MASK_REASONCODE)
2307 		    == MPR_SCSI_RI_INVALID_FRAME)) {
2308 			sc->mapping_table[csio->ccb_h.target_id].TLR_bits =
2309 			    (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2310 		}
2311 
2312 		/*
2313 		 * Intentionally override the normal SCSI status reporting
2314 		 * for these two cases.  These are likely to happen in a
2315 		 * multi-initiator environment, and we want to make sure that
2316 		 * CAM retries these commands rather than fail them.
2317 		 */
2318 		if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
2319 		    (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
2320 			ccb->ccb_h.status = CAM_REQ_ABORTED;
2321 			break;
2322 		}
2323 
2324 		/* Handle normal status and sense */
2325 		csio->scsi_status = rep->SCSIStatus;
2326 		if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
2327 			ccb->ccb_h.status = CAM_REQ_CMP;
2328 		else
2329 			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
2330 
2331 		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2332 			int sense_len, returned_sense_len;
2333 
2334 			returned_sense_len = min(le32toh(rep->SenseCount),
2335 			    sizeof(struct scsi_sense_data));
2336 			if (returned_sense_len < csio->sense_len)
2337 				csio->sense_resid = csio->sense_len -
2338 				    returned_sense_len;
2339 			else
2340 				csio->sense_resid = 0;
2341 
2342 			sense_len = min(returned_sense_len,
2343 			    csio->sense_len - csio->sense_resid);
2344 			bzero(&csio->sense_data, sizeof(csio->sense_data));
2345 			bcopy(cm->cm_sense, &csio->sense_data, sense_len);
2346 			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2347 		}
2348 
2349 		/*
2350 		 * Check if this is an INQUIRY command.  If it's a VPD inquiry,
2351 		 * and it's page code 0 (Supported Page List), and there is
2352 		 * inquiry data, and this is for a sequential access device, and
2353 		 * the device is an SSP target, and TLR is supported by the
2354 		 * controller, turn the TLR_bits value ON if page 0x90 is
2355 		 * supported.
2356 		 */
2357 		if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
2358 		    (csio->cdb_io.cdb_bytes[1] & SI_EVPD) &&
2359 		    (csio->cdb_io.cdb_bytes[2] == SVPD_SUPPORTED_PAGE_LIST) &&
2360 		    ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) &&
2361 		    (csio->data_ptr != NULL) &&
2362 		    ((csio->data_ptr[0] & 0x1f) == T_SEQUENTIAL) &&
2363 		    (sc->control_TLR) &&
2364 		    (sc->mapping_table[csio->ccb_h.target_id].device_info &
2365 		    MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
2366 			vpd_list = (struct scsi_vpd_supported_page_list *)
2367 			    csio->data_ptr;
2368 			TLR_bits = &sc->mapping_table[csio->ccb_h.target_id].
2369 			    TLR_bits;
2370 			*TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2371 			TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
2372 			alloc_len = ((u16)csio->cdb_io.cdb_bytes[3] << 8) +
2373 			    csio->cdb_io.cdb_bytes[4];
2374 			alloc_len -= csio->resid;
2375 			for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
2376 				if (vpd_list->list[i] == 0x90) {
2377 					*TLR_bits = TLR_on;
2378 					break;
2379 				}
2380 			}
2381 		}
2382 		break;
2383 	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2384 	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2385 		/*
2386 		 * If devinfo is 0 this will be a volume.  In that case don't
2387 		 * tell CAM that the volume is not there.  We want volumes to
2388 		 * be enumerated until they are deleted/removed, not just
2389 		 * failed.
2390 		 */
2391 		if (cm->cm_targ->devinfo == 0)
2392 			ccb->ccb_h.status = CAM_REQ_CMP;
2393 		else
2394 			ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2395 		break;
2396 	case MPI2_IOCSTATUS_INVALID_SGL:
2397 		mpr_print_scsiio_cmd(sc, cm);
2398 		ccb->ccb_h.status = CAM_UNREC_HBA_ERROR;
2399 		break;
2400 	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2401 		/*
2402 		 * This is one of the responses that comes back when an I/O
2403 		 * has been aborted.  If it is because of a timeout that we
2404 		 * initiated, just set the status to CAM_CMD_TIMEOUT.
2405 		 * Otherwise set it to CAM_REQ_ABORTED.  The effect on the
2406 		 * command is the same (it gets retried, subject to the
2407 		 * retry counter), the only difference is what gets printed
2408 		 * on the console.
2409 		 */
2410 		if (cm->cm_state == MPR_CM_STATE_TIMEDOUT)
2411 			ccb->ccb_h.status = CAM_CMD_TIMEOUT;
2412 		else
2413 			ccb->ccb_h.status = CAM_REQ_ABORTED;
2414 		break;
2415 	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2416 		/* resid is ignored for this condition */
2417 		csio->resid = 0;
2418 		ccb->ccb_h.status = CAM_DATA_RUN_ERR;
2419 		break;
2420 	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2421 	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2422 		/*
2423 		 * Since these are generally external (i.e. hopefully
2424 		 * transient transport-related) errors, retry these without
2425 		 * decrementing the retry count.
2426 		 */
2427 		ccb->ccb_h.status = CAM_REQUEUE_REQ;
2428 		mprsas_log_command(cm, MPR_INFO,
2429 		    "terminated ioc %x scsi %x state %x xfer %u\n",
2430 		    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2431 		    le32toh(rep->TransferCount));
2432 		break;
2433 	case MPI2_IOCSTATUS_INVALID_FUNCTION:
2434 	case MPI2_IOCSTATUS_INTERNAL_ERROR:
2435 	case MPI2_IOCSTATUS_INVALID_VPID:
2436 	case MPI2_IOCSTATUS_INVALID_FIELD:
2437 	case MPI2_IOCSTATUS_INVALID_STATE:
2438 	case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2439 	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2440 	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2441 	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2442 	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2443 	default:
2444 		mprsas_log_command(cm, MPR_XINFO,
2445 		    "completed ioc %x scsi %x state %x xfer %u\n",
2446 		    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2447 		    le32toh(rep->TransferCount));
2448 		csio->resid = cm->cm_length;
2449 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2450 		break;
2451 	}
2452 
2453 	mpr_sc_failed_io_info(sc, csio, rep, cm->cm_targ);
2454 
2455 	if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
2456 		ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2457 		sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
2458 		mpr_dprint(sc, MPR_XINFO, "Command completed, unfreezing SIM "
2459 		    "queue\n");
2460 	}
2461 
2462 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2463 		ccb->ccb_h.status |= CAM_DEV_QFRZN;
2464 		xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2465 	}
2466 
2467 	mpr_free_command(sc, cm);
2468 	xpt_done(ccb);
2469 }
2470 
2471 #if __FreeBSD_version >= 900026
2472 static void
2473 mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm)
2474 {
2475 	MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2476 	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2477 	uint64_t sasaddr;
2478 	union ccb *ccb;
2479 
2480 	ccb = cm->cm_complete_data;
2481 
2482 	/*
2483 	 * Currently there should be no way we can hit this case.  It only
2484 	 * happens when we have a failure to allocate chain frames, and SMP
2485 	 * commands require two S/G elements only.  That should be handled
2486 	 * in the standard request size.
2487 	 */
2488 	if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
2489 		mpr_dprint(sc, MPR_ERROR,"%s: cm_flags = %#x on SMP request!\n",
2490 		    __func__, cm->cm_flags);
2491 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2492 		goto bailout;
2493         }
2494 
2495 	rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
2496 	if (rpl == NULL) {
2497 		mpr_dprint(sc, MPR_ERROR, "%s: NULL cm_reply!\n", __func__);
2498 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2499 		goto bailout;
2500 	}
2501 
2502 	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2503 	sasaddr = le32toh(req->SASAddress.Low);
2504 	sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
2505 
2506 	if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
2507 	    MPI2_IOCSTATUS_SUCCESS ||
2508 	    rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
2509 		mpr_dprint(sc, MPR_XINFO, "%s: IOCStatus %04x SASStatus %02x\n",
2510 		    __func__, le16toh(rpl->IOCStatus), rpl->SASStatus);
2511 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2512 		goto bailout;
2513 	}
2514 
2515 	mpr_dprint(sc, MPR_XINFO, "%s: SMP request to SAS address "
2516 	    "%#jx completed successfully\n", __func__, (uintmax_t)sasaddr);
2517 
2518 	if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
2519 		ccb->ccb_h.status = CAM_REQ_CMP;
2520 	else
2521 		ccb->ccb_h.status = CAM_SMP_STATUS_ERROR;
2522 
2523 bailout:
2524 	/*
2525 	 * We sync in both directions because we had DMAs in the S/G list
2526 	 * in both directions.
2527 	 */
2528 	bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
2529 			BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2530 	bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2531 	mpr_free_command(sc, cm);
2532 	xpt_done(ccb);
2533 }
2534 
2535 static void
2536 mprsas_send_smpcmd(struct mprsas_softc *sassc, union ccb *ccb,
2537     uint64_t sasaddr)
2538 {
2539 	struct mpr_command *cm;
2540 	uint8_t *request, *response;
2541 	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2542 	struct mpr_softc *sc;
2543 	struct sglist *sg;
2544 	int error;
2545 
2546 	sc = sassc->sc;
2547 	sg = NULL;
2548 	error = 0;
2549 
2550 #if (__FreeBSD_version >= 1000028) || \
2551     ((__FreeBSD_version >= 902001) && (__FreeBSD_version < 1000000))
2552 	switch (ccb->ccb_h.flags & CAM_DATA_MASK) {
2553 	case CAM_DATA_PADDR:
2554 	case CAM_DATA_SG_PADDR:
2555 		/*
2556 		 * XXX We don't yet support physical addresses here.
2557 		 */
2558 		mpr_dprint(sc, MPR_ERROR, "%s: physical addresses not "
2559 		    "supported\n", __func__);
2560 		ccb->ccb_h.status = CAM_REQ_INVALID;
2561 		xpt_done(ccb);
2562 		return;
2563 	case CAM_DATA_SG:
2564 		/*
2565 		 * The chip does not support more than one buffer for the
2566 		 * request or response.
2567 		 */
2568 		if ((ccb->smpio.smp_request_sglist_cnt > 1)
2569 		    || (ccb->smpio.smp_response_sglist_cnt > 1)) {
2570 			mpr_dprint(sc, MPR_ERROR,
2571 			    "%s: multiple request or response buffer segments "
2572 			    "not supported for SMP\n", __func__);
2573 			ccb->ccb_h.status = CAM_REQ_INVALID;
2574 			xpt_done(ccb);
2575 			return;
2576 		}
2577 
2578 		/*
2579 		 * The CAM_SCATTER_VALID flag was originally implemented
2580 		 * for the XPT_SCSI_IO CCB, which only has one data pointer.
2581 		 * We have two.  So, just take that flag to mean that we
2582 		 * might have S/G lists, and look at the S/G segment count
2583 		 * to figure out whether that is the case for each individual
2584 		 * buffer.
2585 		 */
2586 		if (ccb->smpio.smp_request_sglist_cnt != 0) {
2587 			bus_dma_segment_t *req_sg;
2588 
2589 			req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
2590 			request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
2591 		} else
2592 			request = ccb->smpio.smp_request;
2593 
2594 		if (ccb->smpio.smp_response_sglist_cnt != 0) {
2595 			bus_dma_segment_t *rsp_sg;
2596 
2597 			rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
2598 			response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
2599 		} else
2600 			response = ccb->smpio.smp_response;
2601 		break;
2602 	case CAM_DATA_VADDR:
2603 		request = ccb->smpio.smp_request;
2604 		response = ccb->smpio.smp_response;
2605 		break;
2606 	default:
2607 		ccb->ccb_h.status = CAM_REQ_INVALID;
2608 		xpt_done(ccb);
2609 		return;
2610 	}
2611 #else /* __FreeBSD_version < 1000028 */
2612 	/*
2613 	 * XXX We don't yet support physical addresses here.
2614 	 */
2615 	if (ccb->ccb_h.flags & (CAM_DATA_PHYS|CAM_SG_LIST_PHYS)) {
2616 		mpr_printf(sc, "%s: physical addresses not supported\n",
2617 			   __func__);
2618 		ccb->ccb_h.status = CAM_REQ_INVALID;
2619 		xpt_done(ccb);
2620 		return;
2621 	}
2622 
2623 	/*
2624 	 * If the user wants to send an S/G list, check to make sure they
2625 	 * have single buffers.
2626 	 */
2627 	if (ccb->ccb_h.flags & CAM_SCATTER_VALID) {
2628 		/*
2629 		 * The chip does not support more than one buffer for the
2630 		 * request or response.
2631 		 */
2632 	 	if ((ccb->smpio.smp_request_sglist_cnt > 1)
2633 		  || (ccb->smpio.smp_response_sglist_cnt > 1)) {
2634 			mpr_dprint(sc, MPR_ERROR, "%s: multiple request or "
2635 			    "response buffer segments not supported for SMP\n",
2636 			    __func__);
2637 			ccb->ccb_h.status = CAM_REQ_INVALID;
2638 			xpt_done(ccb);
2639 			return;
2640 		}
2641 
2642 		/*
2643 		 * The CAM_SCATTER_VALID flag was originally implemented
2644 		 * for the XPT_SCSI_IO CCB, which only has one data pointer.
2645 		 * We have two.  So, just take that flag to mean that we
2646 		 * might have S/G lists, and look at the S/G segment count
2647 		 * to figure out whether that is the case for each individual
2648 		 * buffer.
2649 		 */
2650 		if (ccb->smpio.smp_request_sglist_cnt != 0) {
2651 			bus_dma_segment_t *req_sg;
2652 
2653 			req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
2654 			request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
2655 		} else
2656 			request = ccb->smpio.smp_request;
2657 
2658 		if (ccb->smpio.smp_response_sglist_cnt != 0) {
2659 			bus_dma_segment_t *rsp_sg;
2660 
2661 			rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
2662 			response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
2663 		} else
2664 			response = ccb->smpio.smp_response;
2665 	} else {
2666 		request = ccb->smpio.smp_request;
2667 		response = ccb->smpio.smp_response;
2668 	}
2669 #endif /* __FreeBSD_version < 1000028 */
2670 
2671 	cm = mpr_alloc_command(sc);
2672 	if (cm == NULL) {
2673 		mpr_dprint(sc, MPR_ERROR,
2674 		    "%s: cannot allocate command\n", __func__);
2675 		ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2676 		xpt_done(ccb);
2677 		return;
2678 	}
2679 
2680 	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2681 	bzero(req, sizeof(*req));
2682 	req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
2683 
2684 	/* Allow the chip to use any route to this SAS address. */
2685 	req->PhysicalPort = 0xff;
2686 
2687 	req->RequestDataLength = htole16(ccb->smpio.smp_request_len);
2688 	req->SGLFlags =
2689 	    MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
2690 
2691 	mpr_dprint(sc, MPR_XINFO, "%s: sending SMP request to SAS address "
2692 	    "%#jx\n", __func__, (uintmax_t)sasaddr);
2693 
2694 	mpr_init_sge(cm, req, &req->SGL);
2695 
2696 	/*
2697 	 * Set up a uio to pass into mpr_map_command().  This allows us to
2698 	 * do one map command, and one busdma call in there.
2699 	 */
2700 	cm->cm_uio.uio_iov = cm->cm_iovec;
2701 	cm->cm_uio.uio_iovcnt = 2;
2702 	cm->cm_uio.uio_segflg = UIO_SYSSPACE;
2703 
2704 	/*
2705 	 * The read/write flag isn't used by busdma, but set it just in
2706 	 * case.  This isn't exactly accurate, either, since we're going in
2707 	 * both directions.
2708 	 */
2709 	cm->cm_uio.uio_rw = UIO_WRITE;
2710 
2711 	cm->cm_iovec[0].iov_base = request;
2712 	cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength);
2713 	cm->cm_iovec[1].iov_base = response;
2714 	cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
2715 
2716 	cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
2717 			       cm->cm_iovec[1].iov_len;
2718 
2719 	/*
2720 	 * Trigger a warning message in mpr_data_cb() for the user if we
2721 	 * wind up exceeding two S/G segments.  The chip expects one
2722 	 * segment for the request and another for the response.
2723 	 */
2724 	cm->cm_max_segs = 2;
2725 
2726 	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
2727 	cm->cm_complete = mprsas_smpio_complete;
2728 	cm->cm_complete_data = ccb;
2729 
2730 	/*
2731 	 * Tell the mapping code that we're using a uio, and that this is
2732 	 * an SMP passthrough request.  There is a little special-case
2733 	 * logic there (in mpr_data_cb()) to handle the bidirectional
2734 	 * transfer.
2735 	 */
2736 	cm->cm_flags |= MPR_CM_FLAGS_USE_UIO | MPR_CM_FLAGS_SMP_PASS |
2737 			MPR_CM_FLAGS_DATAIN | MPR_CM_FLAGS_DATAOUT;
2738 
2739 	/* The chip data format is little endian. */
2740 	req->SASAddress.High = htole32(sasaddr >> 32);
2741 	req->SASAddress.Low = htole32(sasaddr);
2742 
2743 	/*
2744 	 * XXX Note that we don't have a timeout/abort mechanism here.
2745 	 * From the manual, it looks like task management requests only
2746 	 * work for SCSI IO and SATA passthrough requests.  We may need to
2747 	 * have a mechanism to retry requests in the event of a chip reset
2748 	 * at least.  Hopefully the chip will insure that any errors short
2749 	 * of that are relayed back to the driver.
2750 	 */
2751 	error = mpr_map_command(sc, cm);
2752 	if ((error != 0) && (error != EINPROGRESS)) {
2753 		mpr_dprint(sc, MPR_ERROR, "%s: error %d returned from "
2754 		    "mpr_map_command()\n", __func__, error);
2755 		goto bailout_error;
2756 	}
2757 
2758 	return;
2759 
2760 bailout_error:
2761 	mpr_free_command(sc, cm);
2762 	ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2763 	xpt_done(ccb);
2764 	return;
2765 }
2766 
2767 static void
2768 mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb)
2769 {
2770 	struct mpr_softc *sc;
2771 	struct mprsas_target *targ;
2772 	uint64_t sasaddr = 0;
2773 
2774 	sc = sassc->sc;
2775 
2776 	/*
2777 	 * Make sure the target exists.
2778 	 */
2779 	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
2780 	    ("Target %d out of bounds in XPT_SMP_IO\n", ccb->ccb_h.target_id));
2781 	targ = &sassc->targets[ccb->ccb_h.target_id];
2782 	if (targ->handle == 0x0) {
2783 		mpr_dprint(sc, MPR_ERROR, "%s: target %d does not exist!\n",
2784 		    __func__, ccb->ccb_h.target_id);
2785 		ccb->ccb_h.status = CAM_SEL_TIMEOUT;
2786 		xpt_done(ccb);
2787 		return;
2788 	}
2789 
2790 	/*
2791 	 * If this device has an embedded SMP target, we'll talk to it
2792 	 * directly.
2793 	 * figure out what the expander's address is.
2794 	 */
2795 	if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
2796 		sasaddr = targ->sasaddr;
2797 
2798 	/*
2799 	 * If we don't have a SAS address for the expander yet, try
2800 	 * grabbing it from the page 0x83 information cached in the
2801 	 * transport layer for this target.  LSI expanders report the
2802 	 * expander SAS address as the port-associated SAS address in
2803 	 * Inquiry VPD page 0x83.  Maxim expanders don't report it in page
2804 	 * 0x83.
2805 	 *
2806 	 * XXX KDM disable this for now, but leave it commented out so that
2807 	 * it is obvious that this is another possible way to get the SAS
2808 	 * address.
2809 	 *
2810 	 * The parent handle method below is a little more reliable, and
2811 	 * the other benefit is that it works for devices other than SES
2812 	 * devices.  So you can send a SMP request to a da(4) device and it
2813 	 * will get routed to the expander that device is attached to.
2814 	 * (Assuming the da(4) device doesn't contain an SMP target...)
2815 	 */
2816 #if 0
2817 	if (sasaddr == 0)
2818 		sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
2819 #endif
2820 
2821 	/*
2822 	 * If we still don't have a SAS address for the expander, look for
2823 	 * the parent device of this device, which is probably the expander.
2824 	 */
2825 	if (sasaddr == 0) {
2826 #ifdef OLD_MPR_PROBE
2827 		struct mprsas_target *parent_target;
2828 #endif
2829 
2830 		if (targ->parent_handle == 0x0) {
2831 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have "
2832 			    "a valid parent handle!\n", __func__, targ->handle);
2833 			ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2834 			goto bailout;
2835 		}
2836 #ifdef OLD_MPR_PROBE
2837 		parent_target = mprsas_find_target_by_handle(sassc, 0,
2838 		    targ->parent_handle);
2839 
2840 		if (parent_target == NULL) {
2841 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have "
2842 			    "a valid parent target!\n", __func__, targ->handle);
2843 			ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2844 			goto bailout;
2845 		}
2846 
2847 		if ((parent_target->devinfo &
2848 		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
2849 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d "
2850 			    "does not have an SMP target!\n", __func__,
2851 			    targ->handle, parent_target->handle);
2852 			ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2853 			goto bailout;
2854 
2855 		}
2856 
2857 		sasaddr = parent_target->sasaddr;
2858 #else /* OLD_MPR_PROBE */
2859 		if ((targ->parent_devinfo &
2860 		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
2861 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d "
2862 			    "does not have an SMP target!\n", __func__,
2863 			    targ->handle, targ->parent_handle);
2864 			ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2865 			goto bailout;
2866 
2867 		}
2868 		if (targ->parent_sasaddr == 0x0) {
2869 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent handle "
2870 			    "%d does not have a valid SAS address!\n", __func__,
2871 			    targ->handle, targ->parent_handle);
2872 			ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2873 			goto bailout;
2874 		}
2875 
2876 		sasaddr = targ->parent_sasaddr;
2877 #endif /* OLD_MPR_PROBE */
2878 
2879 	}
2880 
2881 	if (sasaddr == 0) {
2882 		mpr_dprint(sc, MPR_INFO, "%s: unable to find SAS address for "
2883 		    "handle %d\n", __func__, targ->handle);
2884 		ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2885 		goto bailout;
2886 	}
2887 	mprsas_send_smpcmd(sassc, ccb, sasaddr);
2888 
2889 	return;
2890 
2891 bailout:
2892 	xpt_done(ccb);
2893 
2894 }
2895 #endif //__FreeBSD_version >= 900026
2896 
2897 static void
2898 mprsas_action_resetdev(struct mprsas_softc *sassc, union ccb *ccb)
2899 {
2900 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
2901 	struct mpr_softc *sc;
2902 	struct mpr_command *tm;
2903 	struct mprsas_target *targ;
2904 
2905 	MPR_FUNCTRACE(sassc->sc);
2906 	mtx_assert(&sassc->sc->mpr_mtx, MA_OWNED);
2907 
2908 	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
2909 	    ("Target %d out of bounds in XPT_RESET_DEV\n",
2910 	    ccb->ccb_h.target_id));
2911 	sc = sassc->sc;
2912 	tm = mpr_alloc_command(sc);
2913 	if (tm == NULL) {
2914 		mpr_dprint(sc, MPR_ERROR,
2915 		    "command alloc failure in mprsas_action_resetdev\n");
2916 		ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2917 		xpt_done(ccb);
2918 		return;
2919 	}
2920 
2921 	targ = &sassc->targets[ccb->ccb_h.target_id];
2922 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
2923 	req->DevHandle = htole16(targ->handle);
2924 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
2925 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
2926 
2927 	/* SAS Hard Link Reset / SATA Link Reset */
2928 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
2929 
2930 	tm->cm_data = NULL;
2931 	tm->cm_desc.HighPriority.RequestFlags =
2932 	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
2933 	tm->cm_complete = mprsas_resetdev_complete;
2934 	tm->cm_complete_data = ccb;
2935 	tm->cm_targ = targ;
2936 	mpr_map_command(sc, tm);
2937 }
2938 
2939 static void
2940 mprsas_resetdev_complete(struct mpr_softc *sc, struct mpr_command *tm)
2941 {
2942 	MPI2_SCSI_TASK_MANAGE_REPLY *resp;
2943 	union ccb *ccb;
2944 
2945 	MPR_FUNCTRACE(sc);
2946 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
2947 
2948 	resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
2949 	ccb = tm->cm_complete_data;
2950 
2951 	/*
2952 	 * Currently there should be no way we can hit this case.  It only
2953 	 * happens when we have a failure to allocate chain frames, and
2954 	 * task management commands don't have S/G lists.
2955 	 */
2956 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
2957 		MPI2_SCSI_TASK_MANAGE_REQUEST *req;
2958 
2959 		req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
2960 
2961 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for reset of "
2962 		    "handle %#04x! This should not happen!\n", __func__,
2963 		    tm->cm_flags, req->DevHandle);
2964 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2965 		goto bailout;
2966 	}
2967 
2968 	mpr_dprint(sc, MPR_XINFO,
2969 	    "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n", __func__,
2970 	    le16toh(resp->IOCStatus), le32toh(resp->ResponseCode));
2971 
2972 	if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
2973 		ccb->ccb_h.status = CAM_REQ_CMP;
2974 		mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
2975 		    CAM_LUN_WILDCARD);
2976 	}
2977 	else
2978 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2979 
2980 bailout:
2981 
2982 	mprsas_free_tm(sc, tm);
2983 	xpt_done(ccb);
2984 }
2985 
2986 static void
2987 mprsas_poll(struct cam_sim *sim)
2988 {
2989 	struct mprsas_softc *sassc;
2990 
2991 	sassc = cam_sim_softc(sim);
2992 
2993 	if (sassc->sc->mpr_debug & MPR_TRACE) {
2994 		/* frequent debug messages during a panic just slow
2995 		 * everything down too much.
2996 		 */
2997 		mpr_printf(sassc->sc, "%s clearing MPR_TRACE\n", __func__);
2998 		sassc->sc->mpr_debug &= ~MPR_TRACE;
2999 	}
3000 
3001 	mpr_intr_locked(sassc->sc);
3002 }
3003 
3004 static void
3005 mprsas_async(void *callback_arg, uint32_t code, struct cam_path *path,
3006     void *arg)
3007 {
3008 	struct mpr_softc *sc;
3009 
3010 	sc = (struct mpr_softc *)callback_arg;
3011 
3012 	switch (code) {
3013 #if (__FreeBSD_version >= 1000006) || \
3014     ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
3015 	case AC_ADVINFO_CHANGED: {
3016 		struct mprsas_target *target;
3017 		struct mprsas_softc *sassc;
3018 		struct scsi_read_capacity_data_long rcap_buf;
3019 		struct ccb_dev_advinfo cdai;
3020 		struct mprsas_lun *lun;
3021 		lun_id_t lunid;
3022 		int found_lun;
3023 		uintptr_t buftype;
3024 
3025 		buftype = (uintptr_t)arg;
3026 
3027 		found_lun = 0;
3028 		sassc = sc->sassc;
3029 
3030 		/*
3031 		 * We're only interested in read capacity data changes.
3032 		 */
3033 		if (buftype != CDAI_TYPE_RCAPLONG)
3034 			break;
3035 
3036 		/*
3037 		 * See the comment in mpr_attach_sas() for a detailed
3038 		 * explanation.  In these versions of FreeBSD we register
3039 		 * for all events and filter out the events that don't
3040 		 * apply to us.
3041 		 */
3042 #if (__FreeBSD_version < 1000703) || \
3043     ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
3044 		if (xpt_path_path_id(path) != sassc->sim->path_id)
3045 			break;
3046 #endif
3047 
3048 		/*
3049 		 * We should have a handle for this, but check to make sure.
3050 		 */
3051 		KASSERT(xpt_path_target_id(path) < sassc->maxtargets,
3052 		    ("Target %d out of bounds in mprsas_async\n",
3053 		    xpt_path_target_id(path)));
3054 		target = &sassc->targets[xpt_path_target_id(path)];
3055 		if (target->handle == 0)
3056 			break;
3057 
3058 		lunid = xpt_path_lun_id(path);
3059 
3060 		SLIST_FOREACH(lun, &target->luns, lun_link) {
3061 			if (lun->lun_id == lunid) {
3062 				found_lun = 1;
3063 				break;
3064 			}
3065 		}
3066 
3067 		if (found_lun == 0) {
3068 			lun = malloc(sizeof(struct mprsas_lun), M_MPR,
3069 			    M_NOWAIT | M_ZERO);
3070 			if (lun == NULL) {
3071 				mpr_dprint(sc, MPR_ERROR, "Unable to alloc "
3072 				    "LUN for EEDP support.\n");
3073 				break;
3074 			}
3075 			lun->lun_id = lunid;
3076 			SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3077 		}
3078 
3079 		bzero(&rcap_buf, sizeof(rcap_buf));
3080 		xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
3081 		cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
3082 		cdai.ccb_h.flags = CAM_DIR_IN;
3083 		cdai.buftype = CDAI_TYPE_RCAPLONG;
3084 		cdai.flags = 0;
3085 		cdai.bufsiz = sizeof(rcap_buf);
3086 		cdai.buf = (uint8_t *)&rcap_buf;
3087 		xpt_action((union ccb *)&cdai);
3088 		if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
3089 			cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE);
3090 
3091 		if (((cdai.ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP)
3092 		    && (rcap_buf.prot & SRC16_PROT_EN)) {
3093 			lun->eedp_formatted = TRUE;
3094 			lun->eedp_block_size = scsi_4btoul(rcap_buf.length);
3095 		} else {
3096 			lun->eedp_formatted = FALSE;
3097 			lun->eedp_block_size = 0;
3098 		}
3099 		break;
3100 	}
3101 #endif
3102 	case AC_FOUND_DEVICE: {
3103 		struct ccb_getdev *cgd;
3104 
3105 		/*
3106 		 * See the comment in mpr_attach_sas() for a detailed
3107 		 * explanation.  In these versions of FreeBSD we register
3108 		 * for all events and filter out the events that don't
3109 		 * apply to us.
3110 		 */
3111 #if (__FreeBSD_version < 1000703) || \
3112     ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
3113 		if (xpt_path_path_id(path) != sc->sassc->sim->path_id)
3114 			break;
3115 #endif
3116 
3117 		cgd = arg;
3118 		mprsas_prepare_ssu(sc, path, cgd);
3119 
3120 #if (__FreeBSD_version < 901503) || \
3121     ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3122 		mprsas_check_eedp(sc, path, cgd);
3123 #endif
3124 		break;
3125 	}
3126 	default:
3127 		break;
3128 	}
3129 }
3130 
3131 static void
3132 mprsas_prepare_ssu(struct mpr_softc *sc, struct cam_path *path,
3133     struct ccb_getdev *cgd)
3134 {
3135 	struct mprsas_softc *sassc = sc->sassc;
3136 	path_id_t pathid;
3137 	target_id_t targetid;
3138 	lun_id_t lunid;
3139 	struct mprsas_target *target;
3140 	struct mprsas_lun *lun;
3141 	uint8_t	found_lun;
3142 
3143 	sassc = sc->sassc;
3144 	pathid = cam_sim_path(sassc->sim);
3145 	targetid = xpt_path_target_id(path);
3146 	lunid = xpt_path_lun_id(path);
3147 
3148 	KASSERT(targetid < sassc->maxtargets,
3149 	    ("Target %d out of bounds in mprsas_prepare_ssu\n", targetid));
3150 	target = &sassc->targets[targetid];
3151 	if (target->handle == 0x0)
3152 		return;
3153 
3154 	/*
3155 	 * If LUN is already in list, don't create a new one.
3156 	 */
3157 	found_lun = FALSE;
3158 	SLIST_FOREACH(lun, &target->luns, lun_link) {
3159 		if (lun->lun_id == lunid) {
3160 			found_lun = TRUE;
3161 			break;
3162 		}
3163 	}
3164 	if (!found_lun) {
3165 		lun = malloc(sizeof(struct mprsas_lun), M_MPR,
3166 		    M_NOWAIT | M_ZERO);
3167 		if (lun == NULL) {
3168 			mpr_dprint(sc, MPR_ERROR, "Unable to alloc LUN for "
3169 			    "preparing SSU.\n");
3170 			return;
3171 		}
3172 		lun->lun_id = lunid;
3173 		SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3174 	}
3175 
3176 	/*
3177 	 * If this is a SATA direct-access end device, mark it so that a SCSI
3178 	 * StartStopUnit command will be sent to it when the driver is being
3179 	 * shutdown.
3180 	 */
3181 	if (((cgd->inq_data.device & 0x1F) == T_DIRECT) &&
3182 	    (target->devinfo & MPI2_SAS_DEVICE_INFO_SATA_DEVICE) &&
3183 	    ((target->devinfo & MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) ==
3184 	    MPI2_SAS_DEVICE_INFO_END_DEVICE)) {
3185 		lun->stop_at_shutdown = TRUE;
3186 	}
3187 }
3188 
3189 #if (__FreeBSD_version < 901503) || \
3190     ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3191 static void
3192 mprsas_check_eedp(struct mpr_softc *sc, struct cam_path *path,
3193     struct ccb_getdev *cgd)
3194 {
3195 	struct mprsas_softc *sassc = sc->sassc;
3196 	struct ccb_scsiio *csio;
3197 	struct scsi_read_capacity_16 *scsi_cmd;
3198 	struct scsi_read_capacity_eedp *rcap_buf;
3199 	path_id_t pathid;
3200 	target_id_t targetid;
3201 	lun_id_t lunid;
3202 	union ccb *ccb;
3203 	struct cam_path *local_path;
3204 	struct mprsas_target *target;
3205 	struct mprsas_lun *lun;
3206 	uint8_t	found_lun;
3207 	char path_str[64];
3208 
3209 	sassc = sc->sassc;
3210 	pathid = cam_sim_path(sassc->sim);
3211 	targetid = xpt_path_target_id(path);
3212 	lunid = xpt_path_lun_id(path);
3213 
3214 	KASSERT(targetid < sassc->maxtargets,
3215 	    ("Target %d out of bounds in mprsas_check_eedp\n", targetid));
3216 	target = &sassc->targets[targetid];
3217 	if (target->handle == 0x0)
3218 		return;
3219 
3220 	/*
3221 	 * Determine if the device is EEDP capable.
3222 	 *
3223 	 * If this flag is set in the inquiry data, the device supports
3224 	 * protection information, and must support the 16 byte read capacity
3225 	 * command, otherwise continue without sending read cap 16
3226 	 */
3227 	if ((cgd->inq_data.spc3_flags & SPC3_SID_PROTECT) == 0)
3228 		return;
3229 
3230 	/*
3231 	 * Issue a READ CAPACITY 16 command.  This info is used to determine if
3232 	 * the LUN is formatted for EEDP support.
3233 	 */
3234 	ccb = xpt_alloc_ccb_nowait();
3235 	if (ccb == NULL) {
3236 		mpr_dprint(sc, MPR_ERROR, "Unable to alloc CCB for EEDP "
3237 		    "support.\n");
3238 		return;
3239 	}
3240 
3241 	if (xpt_create_path(&local_path, xpt_periph, pathid, targetid, lunid)
3242 	    != CAM_REQ_CMP) {
3243 		mpr_dprint(sc, MPR_ERROR, "Unable to create path for EEDP "
3244 		    "support\n");
3245 		xpt_free_ccb(ccb);
3246 		return;
3247 	}
3248 
3249 	/*
3250 	 * If LUN is already in list, don't create a new one.
3251 	 */
3252 	found_lun = FALSE;
3253 	SLIST_FOREACH(lun, &target->luns, lun_link) {
3254 		if (lun->lun_id == lunid) {
3255 			found_lun = TRUE;
3256 			break;
3257 		}
3258 	}
3259 	if (!found_lun) {
3260 		lun = malloc(sizeof(struct mprsas_lun), M_MPR,
3261 		    M_NOWAIT | M_ZERO);
3262 		if (lun == NULL) {
3263 			mpr_dprint(sc, MPR_ERROR, "Unable to alloc LUN for "
3264 			    "EEDP support.\n");
3265 			xpt_free_path(local_path);
3266 			xpt_free_ccb(ccb);
3267 			return;
3268 		}
3269 		lun->lun_id = lunid;
3270 		SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3271 	}
3272 
3273 	xpt_path_string(local_path, path_str, sizeof(path_str));
3274 	mpr_dprint(sc, MPR_INFO, "Sending read cap: path %s handle %d\n",
3275 	    path_str, target->handle);
3276 
3277 	/*
3278 	 * Issue a READ CAPACITY 16 command for the LUN.  The
3279 	 * mprsas_read_cap_done function will load the read cap info into the
3280 	 * LUN struct.
3281 	 */
3282 	rcap_buf = malloc(sizeof(struct scsi_read_capacity_eedp), M_MPR,
3283 	    M_NOWAIT | M_ZERO);
3284 	if (rcap_buf == NULL) {
3285 		mpr_dprint(sc, MPR_FAULT, "Unable to alloc read capacity "
3286 		    "buffer for EEDP support.\n");
3287 		xpt_free_path(ccb->ccb_h.path);
3288 		xpt_free_ccb(ccb);
3289 		return;
3290 	}
3291 	xpt_setup_ccb(&ccb->ccb_h, local_path, CAM_PRIORITY_XPT);
3292 	csio = &ccb->csio;
3293 	csio->ccb_h.func_code = XPT_SCSI_IO;
3294 	csio->ccb_h.flags = CAM_DIR_IN;
3295 	csio->ccb_h.retry_count = 4;
3296 	csio->ccb_h.cbfcnp = mprsas_read_cap_done;
3297 	csio->ccb_h.timeout = 60000;
3298 	csio->data_ptr = (uint8_t *)rcap_buf;
3299 	csio->dxfer_len = sizeof(struct scsi_read_capacity_eedp);
3300 	csio->sense_len = MPR_SENSE_LEN;
3301 	csio->cdb_len = sizeof(*scsi_cmd);
3302 	csio->tag_action = MSG_SIMPLE_Q_TAG;
3303 
3304 	scsi_cmd = (struct scsi_read_capacity_16 *)&csio->cdb_io.cdb_bytes;
3305 	bzero(scsi_cmd, sizeof(*scsi_cmd));
3306 	scsi_cmd->opcode = 0x9E;
3307 	scsi_cmd->service_action = SRC16_SERVICE_ACTION;
3308 	((uint8_t *)scsi_cmd)[13] = sizeof(struct scsi_read_capacity_eedp);
3309 
3310 	ccb->ccb_h.ppriv_ptr1 = sassc;
3311 	xpt_action(ccb);
3312 }
3313 
3314 static void
3315 mprsas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb)
3316 {
3317 	struct mprsas_softc *sassc;
3318 	struct mprsas_target *target;
3319 	struct mprsas_lun *lun;
3320 	struct scsi_read_capacity_eedp *rcap_buf;
3321 
3322 	if (done_ccb == NULL)
3323 		return;
3324 
3325 	/* Driver need to release devq, it Scsi command is
3326 	 * generated by driver internally.
3327 	 * Currently there is a single place where driver
3328 	 * calls scsi command internally. In future if driver
3329 	 * calls more scsi command internally, it needs to release
3330 	 * devq internally, since those command will not go back to
3331 	 * cam_periph.
3332 	 */
3333 	if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) ) {
3334         	done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
3335 		xpt_release_devq(done_ccb->ccb_h.path,
3336 			       	/*count*/ 1, /*run_queue*/TRUE);
3337 	}
3338 
3339 	rcap_buf = (struct scsi_read_capacity_eedp *)done_ccb->csio.data_ptr;
3340 
3341 	/*
3342 	 * Get the LUN ID for the path and look it up in the LUN list for the
3343 	 * target.
3344 	 */
3345 	sassc = (struct mprsas_softc *)done_ccb->ccb_h.ppriv_ptr1;
3346 	KASSERT(done_ccb->ccb_h.target_id < sassc->maxtargets,
3347 	    ("Target %d out of bounds in mprsas_read_cap_done\n",
3348 	    done_ccb->ccb_h.target_id));
3349 	target = &sassc->targets[done_ccb->ccb_h.target_id];
3350 	SLIST_FOREACH(lun, &target->luns, lun_link) {
3351 		if (lun->lun_id != done_ccb->ccb_h.target_lun)
3352 			continue;
3353 
3354 		/*
3355 		 * Got the LUN in the target's LUN list.  Fill it in with EEDP
3356 		 * info.  If the READ CAP 16 command had some SCSI error (common
3357 		 * if command is not supported), mark the lun as not supporting
3358 		 * EEDP and set the block size to 0.
3359 		 */
3360 		if (((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)
3361 		    || (done_ccb->csio.scsi_status != SCSI_STATUS_OK)) {
3362 			lun->eedp_formatted = FALSE;
3363 			lun->eedp_block_size = 0;
3364 			break;
3365 		}
3366 
3367 		if (rcap_buf->protect & 0x01) {
3368 			mpr_dprint(sassc->sc, MPR_INFO, "LUN %d for "
3369 			    "target ID %d is formatted for EEDP "
3370 			    "support.\n", done_ccb->ccb_h.target_lun,
3371 			    done_ccb->ccb_h.target_id);
3372 			lun->eedp_formatted = TRUE;
3373 			lun->eedp_block_size = scsi_4btoul(rcap_buf->length);
3374 		}
3375 		break;
3376 	}
3377 
3378 	// Finished with this CCB and path.
3379 	free(rcap_buf, M_MPR);
3380 	xpt_free_path(done_ccb->ccb_h.path);
3381 	xpt_free_ccb(done_ccb);
3382 }
3383 #endif /* (__FreeBSD_version < 901503) || \
3384           ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) */
3385 
3386 int
3387 mprsas_startup(struct mpr_softc *sc)
3388 {
3389 	/*
3390 	 * Send the port enable message and set the wait_for_port_enable flag.
3391 	 * This flag helps to keep the simq frozen until all discovery events
3392 	 * are processed.
3393 	 */
3394 	sc->wait_for_port_enable = 1;
3395 	mprsas_send_portenable(sc);
3396 	return (0);
3397 }
3398 
3399 static int
3400 mprsas_send_portenable(struct mpr_softc *sc)
3401 {
3402 	MPI2_PORT_ENABLE_REQUEST *request;
3403 	struct mpr_command *cm;
3404 
3405 	MPR_FUNCTRACE(sc);
3406 
3407 	if ((cm = mpr_alloc_command(sc)) == NULL)
3408 		return (EBUSY);
3409 	request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3410 	request->Function = MPI2_FUNCTION_PORT_ENABLE;
3411 	request->MsgFlags = 0;
3412 	request->VP_ID = 0;
3413 	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3414 	cm->cm_complete = mprsas_portenable_complete;
3415 	cm->cm_data = NULL;
3416 	cm->cm_sge = NULL;
3417 
3418 	mpr_map_command(sc, cm);
3419 	mpr_dprint(sc, MPR_XINFO,
3420 	    "mpr_send_portenable finished cm %p req %p complete %p\n",
3421 	    cm, cm->cm_req, cm->cm_complete);
3422 	return (0);
3423 }
3424 
3425 static void
3426 mprsas_portenable_complete(struct mpr_softc *sc, struct mpr_command *cm)
3427 {
3428 	MPI2_PORT_ENABLE_REPLY *reply;
3429 	struct mprsas_softc *sassc;
3430 
3431 	MPR_FUNCTRACE(sc);
3432 	sassc = sc->sassc;
3433 
3434 	/*
3435 	 * Currently there should be no way we can hit this case.  It only
3436 	 * happens when we have a failure to allocate chain frames, and
3437 	 * port enable commands don't have S/G lists.
3438 	 */
3439 	if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
3440 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for port enable! "
3441 		    "This should not happen!\n", __func__, cm->cm_flags);
3442 	}
3443 
3444 	reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3445 	if (reply == NULL)
3446 		mpr_dprint(sc, MPR_FAULT, "Portenable NULL reply\n");
3447 	else if (le16toh(reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
3448 	    MPI2_IOCSTATUS_SUCCESS)
3449 		mpr_dprint(sc, MPR_FAULT, "Portenable failed\n");
3450 
3451 	mpr_free_command(sc, cm);
3452 	if (sc->mpr_ich.ich_arg != NULL) {
3453 		mpr_dprint(sc, MPR_XINFO, "disestablish config intrhook\n");
3454 		config_intrhook_disestablish(&sc->mpr_ich);
3455 		sc->mpr_ich.ich_arg = NULL;
3456 	}
3457 
3458 	/*
3459 	 * Done waiting for port enable to complete.  Decrement the refcount.
3460 	 * If refcount is 0, discovery is complete and a rescan of the bus can
3461 	 * take place.
3462 	 */
3463 	sc->wait_for_port_enable = 0;
3464 	sc->port_enable_complete = 1;
3465 	wakeup(&sc->port_enable_complete);
3466 	mprsas_startup_decrement(sassc);
3467 }
3468 
3469 int
3470 mprsas_check_id(struct mprsas_softc *sassc, int id)
3471 {
3472 	struct mpr_softc *sc = sassc->sc;
3473 	char *ids;
3474 	char *name;
3475 
3476 	ids = &sc->exclude_ids[0];
3477 	while((name = strsep(&ids, ",")) != NULL) {
3478 		if (name[0] == '\0')
3479 			continue;
3480 		if (strtol(name, NULL, 0) == (long)id)
3481 			return (1);
3482 	}
3483 
3484 	return (0);
3485 }
3486