xref: /freebsd/sys/dev/mpr/mpr_sas.c (revision fcb560670601b2a4d87bb31d7531c8dcc37ee71b)
1 /*-
2  * Copyright (c) 2009 Yahoo! Inc.
3  * Copyright (c) 2011-2014 LSI Corp.
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 /* Communications core for LSI MPT2 */
32 
33 /* TODO Move headers to mprvar */
34 #include <sys/types.h>
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/selinfo.h>
39 #include <sys/module.h>
40 #include <sys/bus.h>
41 #include <sys/conf.h>
42 #include <sys/bio.h>
43 #include <sys/malloc.h>
44 #include <sys/uio.h>
45 #include <sys/sysctl.h>
46 #include <sys/endian.h>
47 #include <sys/queue.h>
48 #include <sys/kthread.h>
49 #include <sys/taskqueue.h>
50 #include <sys/sbuf.h>
51 
52 #include <machine/bus.h>
53 #include <machine/resource.h>
54 #include <sys/rman.h>
55 
56 #include <machine/stdarg.h>
57 
58 #include <cam/cam.h>
59 #include <cam/cam_ccb.h>
60 #include <cam/cam_debug.h>
61 #include <cam/cam_sim.h>
62 #include <cam/cam_xpt_sim.h>
63 #include <cam/cam_xpt_periph.h>
64 #include <cam/cam_periph.h>
65 #include <cam/scsi/scsi_all.h>
66 #include <cam/scsi/scsi_message.h>
67 #if __FreeBSD_version >= 900026
68 #include <cam/scsi/smp_all.h>
69 #endif
70 
71 #include <dev/mpr/mpi/mpi2_type.h>
72 #include <dev/mpr/mpi/mpi2.h>
73 #include <dev/mpr/mpi/mpi2_ioc.h>
74 #include <dev/mpr/mpi/mpi2_sas.h>
75 #include <dev/mpr/mpi/mpi2_cnfg.h>
76 #include <dev/mpr/mpi/mpi2_init.h>
77 #include <dev/mpr/mpi/mpi2_tool.h>
78 #include <dev/mpr/mpr_ioctl.h>
79 #include <dev/mpr/mprvar.h>
80 #include <dev/mpr/mpr_table.h>
81 #include <dev/mpr/mpr_sas.h>
82 
83 #define MPRSAS_DISCOVERY_TIMEOUT	20
84 #define MPRSAS_MAX_DISCOVERY_TIMEOUTS	10 /* 200 seconds */
85 
86 /*
87  * static array to check SCSI OpCode for EEDP protection bits
88  */
89 #define	PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
90 #define	PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
91 #define	PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
92 static uint8_t op_code_prot[256] = {
93 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
94 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
95 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
96 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
97 	0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
98 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
99 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
100 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
101 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
102 	0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
103 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
104 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
105 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
106 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
107 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
108 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
109 };
110 
111 MALLOC_DEFINE(M_MPRSAS, "MPRSAS", "MPR SAS memory");
112 
113 static void mprsas_remove_device(struct mpr_softc *, struct mpr_command *);
114 static void mprsas_remove_complete(struct mpr_softc *, struct mpr_command *);
115 static void mprsas_action(struct cam_sim *sim, union ccb *ccb);
116 static void mprsas_poll(struct cam_sim *sim);
117 static void mprsas_scsiio_timeout(void *data);
118 static void mprsas_abort_complete(struct mpr_softc *sc,
119     struct mpr_command *cm);
120 static void mprsas_action_scsiio(struct mprsas_softc *, union ccb *);
121 static void mprsas_scsiio_complete(struct mpr_softc *, struct mpr_command *);
122 static void mprsas_action_resetdev(struct mprsas_softc *, union ccb *);
123 static void mprsas_resetdev_complete(struct mpr_softc *,
124     struct mpr_command *);
125 static int  mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm,
126     struct mpr_command *cm);
127 static int  mprsas_send_reset(struct mpr_softc *sc, struct mpr_command *tm,
128     uint8_t type);
129 static void mprsas_async(void *callback_arg, uint32_t code,
130     struct cam_path *path, void *arg);
131 static void mprsas_prepare_ssu(struct mpr_softc *sc, struct cam_path *path,
132     struct ccb_getdev *cgd);
133 #if (__FreeBSD_version < 901503) || \
134     ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
135 static void mprsas_check_eedp(struct mpr_softc *sc, struct cam_path *path,
136     struct ccb_getdev *cgd);
137 static void mprsas_read_cap_done(struct cam_periph *periph,
138     union ccb *done_ccb);
139 #endif
140 static int mprsas_send_portenable(struct mpr_softc *sc);
141 static void mprsas_portenable_complete(struct mpr_softc *sc,
142     struct mpr_command *cm);
143 
144 #if __FreeBSD_version >= 900026
145 static void
146 mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm);
147 static void mprsas_send_smpcmd(struct mprsas_softc *sassc,
148 	       	union ccb *ccb, uint64_t sasaddr);
149 static void
150 mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb);
151 #endif
152 
153 struct mprsas_target *
154 mprsas_find_target_by_handle(struct mprsas_softc *sassc, int start,
155     uint16_t handle)
156 {
157 	struct mprsas_target *target;
158 	int i;
159 
160 	for (i = start; i < sassc->maxtargets; i++) {
161 		target = &sassc->targets[i];
162 		if (target->handle == handle)
163 			return (target);
164 	}
165 
166 	return (NULL);
167 }
168 
169 /* we need to freeze the simq during attach and diag reset, to avoid failing
170  * commands before device handles have been found by discovery.  Since
171  * discovery involves reading config pages and possibly sending commands,
172  * discovery actions may continue even after we receive the end of discovery
173  * event, so refcount discovery actions instead of assuming we can unfreeze
174  * the simq when we get the event.
175  */
176 void
177 mprsas_startup_increment(struct mprsas_softc *sassc)
178 {
179 	MPR_FUNCTRACE(sassc->sc);
180 
181 	if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) {
182 		if (sassc->startup_refcount++ == 0) {
183 			/* just starting, freeze the simq */
184 			mpr_dprint(sassc->sc, MPR_INIT,
185 			    "%s freezing simq\n", __func__);
186 #if (__FreeBSD_version >= 1000039) || \
187     ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
188 			xpt_hold_boot();
189 #endif
190 			xpt_freeze_simq(sassc->sim, 1);
191 		}
192 		mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__,
193 		    sassc->startup_refcount);
194 	}
195 }
196 
197 void
198 mprsas_release_simq_reinit(struct mprsas_softc *sassc)
199 {
200 	if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
201 		sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
202 		xpt_release_simq(sassc->sim, 1);
203 		mpr_dprint(sassc->sc, MPR_INFO, "Unfreezing SIM queue\n");
204 	}
205 }
206 
207 void
208 mprsas_startup_decrement(struct mprsas_softc *sassc)
209 {
210 	MPR_FUNCTRACE(sassc->sc);
211 
212 	if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) {
213 		if (--sassc->startup_refcount == 0) {
214 			/* finished all discovery-related actions, release
215 			 * the simq and rescan for the latest topology.
216 			 */
217 			mpr_dprint(sassc->sc, MPR_INIT,
218 			    "%s releasing simq\n", __func__);
219 			sassc->flags &= ~MPRSAS_IN_STARTUP;
220 			xpt_release_simq(sassc->sim, 1);
221 #if (__FreeBSD_version >= 1000039) || \
222     ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
223 			xpt_release_boot();
224 #else
225 			mprsas_rescan_target(sassc->sc, NULL);
226 #endif
227 		}
228 		mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__,
229 		    sassc->startup_refcount);
230 	}
231 }
232 
233 /* LSI's firmware requires us to stop sending commands when we're doing task
234  * management, so refcount the TMs and keep the simq frozen when any are in
235  * use.
236  */
237 struct mpr_command *
238 mprsas_alloc_tm(struct mpr_softc *sc)
239 {
240 	struct mpr_command *tm;
241 
242 	MPR_FUNCTRACE(sc);
243 	tm = mpr_alloc_high_priority_command(sc);
244 	if (tm != NULL) {
245 		if (sc->sassc->tm_count++ == 0) {
246 			mpr_dprint(sc, MPR_RECOVERY,
247 			    "%s freezing simq\n", __func__);
248 			xpt_freeze_simq(sc->sassc->sim, 1);
249 		}
250 		mpr_dprint(sc, MPR_RECOVERY, "%s tm_count %u\n", __func__,
251 		    sc->sassc->tm_count);
252 	}
253 	return tm;
254 }
255 
256 void
257 mprsas_free_tm(struct mpr_softc *sc, struct mpr_command *tm)
258 {
259 	mpr_dprint(sc, MPR_TRACE, "%s", __func__);
260 	if (tm == NULL)
261 		return;
262 
263 	/* if there are no TMs in use, we can release the simq.  We use our
264 	 * own refcount so that it's easier for a diag reset to cleanup and
265 	 * release the simq.
266 	 */
267 	if (--sc->sassc->tm_count == 0) {
268 		mpr_dprint(sc, MPR_RECOVERY, "%s releasing simq\n", __func__);
269 		xpt_release_simq(sc->sassc->sim, 1);
270 	}
271 	mpr_dprint(sc, MPR_RECOVERY, "%s tm_count %u\n", __func__,
272 	    sc->sassc->tm_count);
273 
274 	mpr_free_high_priority_command(sc, tm);
275 }
276 
277 void
278 mprsas_rescan_target(struct mpr_softc *sc, struct mprsas_target *targ)
279 {
280 	struct mprsas_softc *sassc = sc->sassc;
281 	path_id_t pathid;
282 	target_id_t targetid;
283 	union ccb *ccb;
284 
285 	MPR_FUNCTRACE(sc);
286 	pathid = cam_sim_path(sassc->sim);
287 	if (targ == NULL)
288 		targetid = CAM_TARGET_WILDCARD;
289 	else
290 		targetid = targ - sassc->targets;
291 
292 	/*
293 	 * Allocate a CCB and schedule a rescan.
294 	 */
295 	ccb = xpt_alloc_ccb_nowait();
296 	if (ccb == NULL) {
297 		mpr_dprint(sc, MPR_ERROR, "unable to alloc CCB for rescan\n");
298 		return;
299 	}
300 
301 	if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid,
302 	    targetid, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
303 		mpr_dprint(sc, MPR_ERROR, "unable to create path for rescan\n");
304 		xpt_free_ccb(ccb);
305 		return;
306 	}
307 
308 	if (targetid == CAM_TARGET_WILDCARD)
309 		ccb->ccb_h.func_code = XPT_SCAN_BUS;
310 	else
311 		ccb->ccb_h.func_code = XPT_SCAN_TGT;
312 
313 	mpr_dprint(sc, MPR_TRACE, "%s targetid %u\n", __func__, targetid);
314 	xpt_rescan(ccb);
315 }
316 
317 static void
318 mprsas_log_command(struct mpr_command *cm, u_int level, const char *fmt, ...)
319 {
320 	struct sbuf sb;
321 	va_list ap;
322 	char str[192];
323 	char path_str[64];
324 
325 	if (cm == NULL)
326 		return;
327 
328 	/* No need to be in here if debugging isn't enabled */
329 	if ((cm->cm_sc->mpr_debug & level) == 0)
330 		return;
331 
332 	sbuf_new(&sb, str, sizeof(str), 0);
333 
334 	va_start(ap, fmt);
335 
336 	if (cm->cm_ccb != NULL) {
337 		xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
338 		    sizeof(path_str));
339 		sbuf_cat(&sb, path_str);
340 		if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
341 			scsi_command_string(&cm->cm_ccb->csio, &sb);
342 			sbuf_printf(&sb, "length %d ",
343 			    cm->cm_ccb->csio.dxfer_len);
344 		}
345 	} else {
346 		sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
347 		    cam_sim_name(cm->cm_sc->sassc->sim),
348 		    cam_sim_unit(cm->cm_sc->sassc->sim),
349 		    cam_sim_bus(cm->cm_sc->sassc->sim),
350 		    cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
351 		    cm->cm_lun);
352 	}
353 
354 	sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
355 	sbuf_vprintf(&sb, fmt, ap);
356 	sbuf_finish(&sb);
357 	mpr_dprint_field(cm->cm_sc, level, "%s", sbuf_data(&sb));
358 
359 	va_end(ap);
360 }
361 
362 static void
363 mprsas_remove_volume(struct mpr_softc *sc, struct mpr_command *tm)
364 {
365 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
366 	struct mprsas_target *targ;
367 	uint16_t handle;
368 
369 	MPR_FUNCTRACE(sc);
370 
371 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
372 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
373 	targ = tm->cm_targ;
374 
375 	if (reply == NULL) {
376 		/* XXX retry the remove after the diag reset completes? */
377 		mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device "
378 		    "0x%04x\n", __func__, handle);
379 		mprsas_free_tm(sc, tm);
380 		return;
381 	}
382 
383 	if (reply->IOCStatus != MPI2_IOCSTATUS_SUCCESS) {
384 		mpr_dprint(sc, MPR_FAULT, "IOCStatus = 0x%x while resetting "
385 		    "device 0x%x\n", reply->IOCStatus, handle);
386 		mprsas_free_tm(sc, tm);
387 		return;
388 	}
389 
390 	mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n",
391 	    reply->TerminationCount);
392 	mpr_free_reply(sc, tm->cm_reply_data);
393 	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
394 
395 	mpr_dprint(sc, MPR_XINFO, "clearing target %u handle 0x%04x\n",
396 	    targ->tid, handle);
397 
398 	/*
399 	 * Don't clear target if remove fails because things will get confusing.
400 	 * Leave the devname and sasaddr intact so that we know to avoid reusing
401 	 * this target id if possible, and so we can assign the same target id
402 	 * to this device if it comes back in the future.
403 	 */
404 	if (reply->IOCStatus == MPI2_IOCSTATUS_SUCCESS) {
405 		targ = tm->cm_targ;
406 		targ->handle = 0x0;
407 		targ->encl_handle = 0x0;
408 		targ->encl_level_valid = 0x0;
409 		targ->encl_level = 0x0;
410 		targ->connector_name[0] = ' ';
411 		targ->connector_name[1] = ' ';
412 		targ->connector_name[2] = ' ';
413 		targ->connector_name[3] = ' ';
414 		targ->encl_slot = 0x0;
415 		targ->exp_dev_handle = 0x0;
416 		targ->phy_num = 0x0;
417 		targ->linkrate = 0x0;
418 		targ->devinfo = 0x0;
419 		targ->flags = 0x0;
420 		targ->scsi_req_desc_type = 0;
421 	}
422 
423 	mprsas_free_tm(sc, tm);
424 }
425 
426 
427 /*
428  * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal.
429  * Otherwise Volume Delete is same as Bare Drive Removal.
430  */
431 void
432 mprsas_prepare_volume_remove(struct mprsas_softc *sassc, uint16_t handle)
433 {
434 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
435 	struct mpr_softc *sc;
436 	struct mpr_command *cm;
437 	struct mprsas_target *targ = NULL;
438 
439 	MPR_FUNCTRACE(sassc->sc);
440 	sc = sassc->sc;
441 
442 	targ = mprsas_find_target_by_handle(sassc, 0, handle);
443 	if (targ == NULL) {
444 		/* FIXME: what is the action? */
445 		/* We don't know about this device? */
446 		mpr_dprint(sc, MPR_ERROR,
447 		   "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
448 		return;
449 	}
450 
451 	targ->flags |= MPRSAS_TARGET_INREMOVAL;
452 
453 	cm = mprsas_alloc_tm(sc);
454 	if (cm == NULL) {
455 		mpr_dprint(sc, MPR_ERROR,
456 		    "%s: command alloc failure\n", __func__);
457 		return;
458 	}
459 
460 	mprsas_rescan_target(sc, targ);
461 
462 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
463 	req->DevHandle = targ->handle;
464 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
465 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
466 
467 	/* SAS Hard Link Reset / SATA Link Reset */
468 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
469 
470 	cm->cm_targ = targ;
471 	cm->cm_data = NULL;
472 	cm->cm_desc.HighPriority.RequestFlags =
473 	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
474 	cm->cm_complete = mprsas_remove_volume;
475 	cm->cm_complete_data = (void *)(uintptr_t)handle;
476 	mpr_map_command(sc, cm);
477 }
478 
479 /*
480  * The MPT2 firmware performs debounce on the link to avoid transient link
481  * errors and false removals.  When it does decide that link has been lost
482  * and a device needs to go away, it expects that the host will perform a
483  * target reset and then an op remove.  The reset has the side-effect of
484  * aborting any outstanding requests for the device, which is required for
485  * the op-remove to succeed.  It's not clear if the host should check for
486  * the device coming back alive after the reset.
487  */
488 void
489 mprsas_prepare_remove(struct mprsas_softc *sassc, uint16_t handle)
490 {
491 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
492 	struct mpr_softc *sc;
493 	struct mpr_command *cm;
494 	struct mprsas_target *targ = NULL;
495 
496 	MPR_FUNCTRACE(sassc->sc);
497 
498 	sc = sassc->sc;
499 
500 	targ = mprsas_find_target_by_handle(sassc, 0, handle);
501 	if (targ == NULL) {
502 		/* FIXME: what is the action? */
503 		/* We don't know about this device? */
504 		mpr_dprint(sc, MPR_ERROR, "%s : invalid handle 0x%x \n",
505 		    __func__, handle);
506 		return;
507 	}
508 
509 	targ->flags |= MPRSAS_TARGET_INREMOVAL;
510 
511 	cm = mprsas_alloc_tm(sc);
512 	if (cm == NULL) {
513 		mpr_dprint(sc, MPR_ERROR, "%s: command alloc failure\n",
514 		    __func__);
515 		return;
516 	}
517 
518 	mprsas_rescan_target(sc, targ);
519 
520 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
521 	memset(req, 0, sizeof(*req));
522 	req->DevHandle = htole16(targ->handle);
523 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
524 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
525 
526 	/* SAS Hard Link Reset / SATA Link Reset */
527 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
528 
529 	cm->cm_targ = targ;
530 	cm->cm_data = NULL;
531 	cm->cm_desc.HighPriority.RequestFlags =
532 	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
533 	cm->cm_complete = mprsas_remove_device;
534 	cm->cm_complete_data = (void *)(uintptr_t)handle;
535 	mpr_map_command(sc, cm);
536 }
537 
538 static void
539 mprsas_remove_device(struct mpr_softc *sc, struct mpr_command *tm)
540 {
541 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
542 	MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
543 	struct mprsas_target *targ;
544 	struct mpr_command *next_cm;
545 	uint16_t handle;
546 
547 	MPR_FUNCTRACE(sc);
548 
549 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
550 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
551 	targ = tm->cm_targ;
552 
553 	/*
554 	 * Currently there should be no way we can hit this case.  It only
555 	 * happens when we have a failure to allocate chain frames, and
556 	 * task management commands don't have S/G lists.
557 	 */
558 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
559 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for remove of "
560 		    "handle %#04x! This should not happen!\n", __func__,
561 		    tm->cm_flags, handle);
562 		mprsas_free_tm(sc, tm);
563 		return;
564 	}
565 
566 	if (reply == NULL) {
567 		/* XXX retry the remove after the diag reset completes? */
568 		mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device "
569 		    "0x%04x\n", __func__, handle);
570 		mprsas_free_tm(sc, tm);
571 		return;
572 	}
573 
574 	if (le16toh(reply->IOCStatus) != MPI2_IOCSTATUS_SUCCESS) {
575 		mpr_dprint(sc, MPR_FAULT, "IOCStatus = 0x%x while resetting "
576 		    "device 0x%x\n", le16toh(reply->IOCStatus), handle);
577 		mprsas_free_tm(sc, tm);
578 		return;
579 	}
580 
581 	mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n",
582 	    le32toh(reply->TerminationCount));
583 	mpr_free_reply(sc, tm->cm_reply_data);
584 	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
585 
586 	/* Reuse the existing command */
587 	req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
588 	memset(req, 0, sizeof(*req));
589 	req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
590 	req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
591 	req->DevHandle = htole16(handle);
592 	tm->cm_data = NULL;
593 	tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
594 	tm->cm_complete = mprsas_remove_complete;
595 	tm->cm_complete_data = (void *)(uintptr_t)handle;
596 
597 	mpr_map_command(sc, tm);
598 
599 	mpr_dprint(sc, MPR_XINFO, "clearing target %u handle 0x%04x\n",
600 	    targ->tid, handle);
601 	if (targ->encl_level_valid) {
602 		mpr_dprint(sc, MPR_XINFO, "At enclosure level %d, slot %d, "
603 		    "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
604 		    targ->connector_name);
605 	}
606 	TAILQ_FOREACH_SAFE(tm, &targ->commands, cm_link, next_cm) {
607 		union ccb *ccb;
608 
609 		mpr_dprint(sc, MPR_XINFO, "Completing missed command %p\n", tm);
610 		ccb = tm->cm_complete_data;
611 		ccb->ccb_h.status = CAM_DEV_NOT_THERE;
612 		mprsas_scsiio_complete(sc, tm);
613 	}
614 }
615 
616 static void
617 mprsas_remove_complete(struct mpr_softc *sc, struct mpr_command *tm)
618 {
619 	MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
620 	uint16_t handle;
621 	struct mprsas_target *targ;
622 	struct mprsas_lun *lun;
623 
624 	MPR_FUNCTRACE(sc);
625 
626 	reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
627 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
628 
629 	/*
630 	 * Currently there should be no way we can hit this case.  It only
631 	 * happens when we have a failure to allocate chain frames, and
632 	 * task management commands don't have S/G lists.
633 	 */
634 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
635 		mpr_dprint(sc, MPR_XINFO, "%s: cm_flags = %#x for remove of "
636 		    "handle %#04x! This should not happen!\n", __func__,
637 		    tm->cm_flags, handle);
638 		mprsas_free_tm(sc, tm);
639 		return;
640 	}
641 
642 	if (reply == NULL) {
643 		/* most likely a chip reset */
644 		mpr_dprint(sc, MPR_FAULT, "%s NULL reply removing device "
645 		    "0x%04x\n", __func__, handle);
646 		mprsas_free_tm(sc, tm);
647 		return;
648 	}
649 
650 	mpr_dprint(sc, MPR_XINFO, "%s on handle 0x%04x, IOCStatus= 0x%x\n",
651 	    __func__, handle, le16toh(reply->IOCStatus));
652 
653 	/*
654 	 * Don't clear target if remove fails because things will get confusing.
655 	 * Leave the devname and sasaddr intact so that we know to avoid reusing
656 	 * this target id if possible, and so we can assign the same target id
657 	 * to this device if it comes back in the future.
658 	 */
659 	if (le16toh(reply->IOCStatus) == MPI2_IOCSTATUS_SUCCESS) {
660 		targ = tm->cm_targ;
661 		targ->handle = 0x0;
662 		targ->encl_handle = 0x0;
663 		targ->encl_level_valid = 0x0;
664 		targ->encl_level = 0x0;
665 		targ->connector_name[0] = ' ';
666 		targ->connector_name[1] = ' ';
667 		targ->connector_name[2] = ' ';
668 		targ->connector_name[3] = ' ';
669 		targ->encl_slot = 0x0;
670 		targ->exp_dev_handle = 0x0;
671 		targ->phy_num = 0x0;
672 		targ->linkrate = 0x0;
673 		targ->devinfo = 0x0;
674 		targ->flags = 0x0;
675 		targ->scsi_req_desc_type = 0;
676 
677 		while (!SLIST_EMPTY(&targ->luns)) {
678 			lun = SLIST_FIRST(&targ->luns);
679 			SLIST_REMOVE_HEAD(&targ->luns, lun_link);
680 			free(lun, M_MPR);
681 		}
682 	}
683 
684 	mprsas_free_tm(sc, tm);
685 }
686 
687 static int
688 mprsas_register_events(struct mpr_softc *sc)
689 {
690 	uint8_t events[16];
691 
692 	bzero(events, 16);
693 	setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
694 	setbit(events, MPI2_EVENT_SAS_DISCOVERY);
695 	setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
696 	setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
697 	setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
698 	setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
699 	setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
700 	setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
701 	setbit(events, MPI2_EVENT_IR_VOLUME);
702 	setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
703 	setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
704 	setbit(events, MPI2_EVENT_TEMP_THRESHOLD);
705 
706 	mpr_register_events(sc, events, mprsas_evt_handler, NULL,
707 	    &sc->sassc->mprsas_eh);
708 
709 	return (0);
710 }
711 
712 int
713 mpr_attach_sas(struct mpr_softc *sc)
714 {
715 	struct mprsas_softc *sassc;
716 	cam_status status;
717 	int unit, error = 0;
718 
719 	MPR_FUNCTRACE(sc);
720 
721 	sassc = malloc(sizeof(struct mprsas_softc), M_MPR, M_WAITOK|M_ZERO);
722 	if (!sassc) {
723 		device_printf(sc->mpr_dev, "Cannot allocate memory %s %d\n",
724 		    __func__, __LINE__);
725 		return (ENOMEM);
726 	}
727 
728 	/*
729 	 * XXX MaxTargets could change during a reinit.  since we don't
730 	 * resize the targets[] array during such an event, cache the value
731 	 * of MaxTargets here so that we don't get into trouble later.  This
732 	 * should move into the reinit logic.
733 	 */
734 	sassc->maxtargets = sc->facts->MaxTargets;
735 	sassc->targets = malloc(sizeof(struct mprsas_target) *
736 	    sassc->maxtargets, M_MPR, M_WAITOK|M_ZERO);
737 	if (!sassc->targets) {
738 		device_printf(sc->mpr_dev, "Cannot allocate memory %s %d\n",
739 		    __func__, __LINE__);
740 		free(sassc, M_MPR);
741 		return (ENOMEM);
742 	}
743 	sc->sassc = sassc;
744 	sassc->sc = sc;
745 
746 	if ((sassc->devq = cam_simq_alloc(sc->num_reqs)) == NULL) {
747 		mpr_dprint(sc, MPR_ERROR, "Cannot allocate SIMQ\n");
748 		error = ENOMEM;
749 		goto out;
750 	}
751 
752 	unit = device_get_unit(sc->mpr_dev);
753 	sassc->sim = cam_sim_alloc(mprsas_action, mprsas_poll, "mpr", sassc,
754 	    unit, &sc->mpr_mtx, sc->num_reqs, sc->num_reqs, sassc->devq);
755 	if (sassc->sim == NULL) {
756 		mpr_dprint(sc, MPR_ERROR, "Cannot allocate SIM\n");
757 		error = EINVAL;
758 		goto out;
759 	}
760 
761 	TAILQ_INIT(&sassc->ev_queue);
762 
763 	/* Initialize taskqueue for Event Handling */
764 	TASK_INIT(&sassc->ev_task, 0, mprsas_firmware_event_work, sc);
765 	sassc->ev_tq = taskqueue_create("mpr_taskq", M_NOWAIT | M_ZERO,
766 	    taskqueue_thread_enqueue, &sassc->ev_tq);
767 	taskqueue_start_threads(&sassc->ev_tq, 1, PRIBIO, "%s taskq",
768 	    device_get_nameunit(sc->mpr_dev));
769 
770 	mpr_lock(sc);
771 
772 	/*
773 	 * XXX There should be a bus for every port on the adapter, but since
774 	 * we're just going to fake the topology for now, we'll pretend that
775 	 * everything is just a target on a single bus.
776 	 */
777 	if ((error = xpt_bus_register(sassc->sim, sc->mpr_dev, 0)) != 0) {
778 		mpr_dprint(sc, MPR_ERROR, "Error %d registering SCSI bus\n",
779 		    error);
780 		mpr_unlock(sc);
781 		goto out;
782 	}
783 
784 	/*
785 	 * Assume that discovery events will start right away.  Freezing
786 	 *
787 	 * Hold off boot until discovery is complete.
788 	 */
789 	sassc->flags |= MPRSAS_IN_STARTUP | MPRSAS_IN_DISCOVERY;
790 	sc->sassc->startup_refcount = 0;
791 	mprsas_startup_increment(sassc);
792 
793 	callout_init(&sassc->discovery_callout, 1 /*mprafe*/);
794 
795 	sassc->tm_count = 0;
796 
797 	/*
798 	 * Register for async events so we can determine the EEDP
799 	 * capabilities of devices.
800 	 */
801 	status = xpt_create_path(&sassc->path, /*periph*/NULL,
802 	    cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD,
803 	    CAM_LUN_WILDCARD);
804 	if (status != CAM_REQ_CMP) {
805 		mpr_printf(sc, "Error %#x creating sim path\n", status);
806 		sassc->path = NULL;
807 	} else {
808 		int event;
809 
810 #if (__FreeBSD_version >= 1000006) || \
811     ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
812 		event = AC_ADVINFO_CHANGED | AC_FOUND_DEVICE;
813 #else
814 		event = AC_FOUND_DEVICE;
815 #endif
816 
817 		/*
818 		 * Prior to the CAM locking improvements, we can't call
819 		 * xpt_register_async() with a particular path specified.
820 		 *
821 		 * If a path isn't specified, xpt_register_async() will
822 		 * generate a wildcard path and acquire the XPT lock while
823 		 * it calls xpt_action() to execute the XPT_SASYNC_CB CCB.
824 		 * It will then drop the XPT lock once that is done.
825 		 *
826 		 * If a path is specified for xpt_register_async(), it will
827 		 * not acquire and drop the XPT lock around the call to
828 		 * xpt_action().  xpt_action() asserts that the caller
829 		 * holds the SIM lock, so the SIM lock has to be held when
830 		 * calling xpt_register_async() when the path is specified.
831 		 *
832 		 * But xpt_register_async calls xpt_for_all_devices(),
833 		 * which calls xptbustraverse(), which will acquire each
834 		 * SIM lock.  When it traverses our particular bus, it will
835 		 * necessarily acquire the SIM lock, which will lead to a
836 		 * recursive lock acquisition.
837 		 *
838 		 * The CAM locking changes fix this problem by acquiring
839 		 * the XPT topology lock around bus traversal in
840 		 * xptbustraverse(), so the caller can hold the SIM lock
841 		 * and it does not cause a recursive lock acquisition.
842 		 *
843 		 * These __FreeBSD_version values are approximate, especially
844 		 * for stable/10, which is two months later than the actual
845 		 * change.
846 		 */
847 
848 #if (__FreeBSD_version < 1000703) || \
849     ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
850 		mpr_unlock(sc);
851 		status = xpt_register_async(event, mprsas_async, sc,
852 					    NULL);
853 		mpr_lock(sc);
854 #else
855 		status = xpt_register_async(event, mprsas_async, sc,
856 					    sassc->path);
857 #endif
858 
859 		if (status != CAM_REQ_CMP) {
860 			mpr_dprint(sc, MPR_ERROR,
861 			    "Error %#x registering async handler for "
862 			    "AC_ADVINFO_CHANGED events\n", status);
863 			xpt_free_path(sassc->path);
864 			sassc->path = NULL;
865 		}
866 	}
867 	if (status != CAM_REQ_CMP) {
868 		/*
869 		 * EEDP use is the exception, not the rule.
870 		 * Warn the user, but do not fail to attach.
871 		 */
872 		mpr_printf(sc, "EEDP capabilities disabled.\n");
873 	}
874 
875 	mpr_unlock(sc);
876 
877 	mprsas_register_events(sc);
878 out:
879 	if (error)
880 		mpr_detach_sas(sc);
881 	return (error);
882 }
883 
884 int
885 mpr_detach_sas(struct mpr_softc *sc)
886 {
887 	struct mprsas_softc *sassc;
888 	struct mprsas_lun *lun, *lun_tmp;
889 	struct mprsas_target *targ;
890 	int i;
891 
892 	MPR_FUNCTRACE(sc);
893 
894 	if (sc->sassc == NULL)
895 		return (0);
896 
897 	sassc = sc->sassc;
898 	mpr_deregister_events(sc, sassc->mprsas_eh);
899 
900 	/*
901 	 * Drain and free the event handling taskqueue with the lock
902 	 * unheld so that any parallel processing tasks drain properly
903 	 * without deadlocking.
904 	 */
905 	if (sassc->ev_tq != NULL)
906 		taskqueue_free(sassc->ev_tq);
907 
908 	/* Make sure CAM doesn't wedge if we had to bail out early. */
909 	mpr_lock(sc);
910 
911 	/* Deregister our async handler */
912 	if (sassc->path != NULL) {
913 		xpt_register_async(0, mprsas_async, sc, sassc->path);
914 		xpt_free_path(sassc->path);
915 		sassc->path = NULL;
916 	}
917 
918 	if (sassc->flags & MPRSAS_IN_STARTUP)
919 		xpt_release_simq(sassc->sim, 1);
920 
921 	if (sassc->sim != NULL) {
922 		xpt_bus_deregister(cam_sim_path(sassc->sim));
923 		cam_sim_free(sassc->sim, FALSE);
924 	}
925 
926 	sassc->flags |= MPRSAS_SHUTDOWN;
927 	mpr_unlock(sc);
928 
929 	if (sassc->devq != NULL)
930 		cam_simq_free(sassc->devq);
931 
932 	for (i = 0; i < sassc->maxtargets; i++) {
933 		targ = &sassc->targets[i];
934 		SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
935 			free(lun, M_MPR);
936 		}
937 	}
938 	free(sassc->targets, M_MPR);
939 	free(sassc, M_MPR);
940 	sc->sassc = NULL;
941 
942 	return (0);
943 }
944 
945 void
946 mprsas_discovery_end(struct mprsas_softc *sassc)
947 {
948 	struct mpr_softc *sc = sassc->sc;
949 
950 	MPR_FUNCTRACE(sc);
951 
952 	if (sassc->flags & MPRSAS_DISCOVERY_TIMEOUT_PENDING)
953 		callout_stop(&sassc->discovery_callout);
954 
955 }
956 
957 static void
958 mprsas_action(struct cam_sim *sim, union ccb *ccb)
959 {
960 	struct mprsas_softc *sassc;
961 
962 	sassc = cam_sim_softc(sim);
963 
964 	MPR_FUNCTRACE(sassc->sc);
965 	mpr_dprint(sassc->sc, MPR_TRACE, "%s func 0x%x\n", __func__,
966 	    ccb->ccb_h.func_code);
967 	mtx_assert(&sassc->sc->mpr_mtx, MA_OWNED);
968 
969 	switch (ccb->ccb_h.func_code) {
970 	case XPT_PATH_INQ:
971 	{
972 		struct ccb_pathinq *cpi = &ccb->cpi;
973 
974 		cpi->version_num = 1;
975 		cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
976 		cpi->target_sprt = 0;
977 #if (__FreeBSD_version >= 1000039) || \
978     ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
979 		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN;
980 #else
981 		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
982 #endif
983 		cpi->hba_eng_cnt = 0;
984 		cpi->max_target = sassc->maxtargets - 1;
985 		cpi->max_lun = 255;
986 		cpi->initiator_id = sassc->maxtargets - 1;
987 		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
988 		strncpy(cpi->hba_vid, "LSILogic", HBA_IDLEN);
989 		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
990 		cpi->unit_number = cam_sim_unit(sim);
991 		cpi->bus_id = cam_sim_bus(sim);
992 		/*
993 		 * XXXSLM-I think this needs to change based on config page or
994 		 * something instead of hardcoded to 150000.
995 		 */
996 		cpi->base_transfer_speed = 150000;
997 		cpi->transport = XPORT_SAS;
998 		cpi->transport_version = 0;
999 		cpi->protocol = PROTO_SCSI;
1000 		cpi->protocol_version = SCSI_REV_SPC;
1001 #if __FreeBSD_version >= 800001
1002 		/*
1003 		 * XXXSLM-probably need to base this number on max SGL's and
1004 		 * page size.
1005 		 */
1006 		cpi->maxio = 256 * 1024;
1007 #endif
1008 		cpi->ccb_h.status = CAM_REQ_CMP;
1009 		break;
1010 	}
1011 	case XPT_GET_TRAN_SETTINGS:
1012 	{
1013 		struct ccb_trans_settings	*cts;
1014 		struct ccb_trans_settings_sas	*sas;
1015 		struct ccb_trans_settings_scsi	*scsi;
1016 		struct mprsas_target *targ;
1017 
1018 		cts = &ccb->cts;
1019 		sas = &cts->xport_specific.sas;
1020 		scsi = &cts->proto_specific.scsi;
1021 
1022 		KASSERT(cts->ccb_h.target_id < sassc->maxtargets,
1023 		    ("Target %d out of bounds in XPT_GET_TRAN_SETTINGS\n",
1024 		    cts->ccb_h.target_id));
1025 		targ = &sassc->targets[cts->ccb_h.target_id];
1026 		if (targ->handle == 0x0) {
1027 			cts->ccb_h.status = CAM_DEV_NOT_THERE;
1028 			break;
1029 		}
1030 
1031 		cts->protocol_version = SCSI_REV_SPC2;
1032 		cts->transport = XPORT_SAS;
1033 		cts->transport_version = 0;
1034 
1035 		sas->valid = CTS_SAS_VALID_SPEED;
1036 		switch (targ->linkrate) {
1037 		case 0x08:
1038 			sas->bitrate = 150000;
1039 			break;
1040 		case 0x09:
1041 			sas->bitrate = 300000;
1042 			break;
1043 		case 0x0a:
1044 			sas->bitrate = 600000;
1045 			break;
1046 		case 0x0b:
1047 			sas->bitrate = 1200000;
1048 			break;
1049 		default:
1050 			sas->valid = 0;
1051 		}
1052 
1053 		cts->protocol = PROTO_SCSI;
1054 		scsi->valid = CTS_SCSI_VALID_TQ;
1055 		scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
1056 
1057 		cts->ccb_h.status = CAM_REQ_CMP;
1058 		break;
1059 	}
1060 	case XPT_CALC_GEOMETRY:
1061 		cam_calc_geometry(&ccb->ccg, /*extended*/1);
1062 		ccb->ccb_h.status = CAM_REQ_CMP;
1063 		break;
1064 	case XPT_RESET_DEV:
1065 		mpr_dprint(sassc->sc, MPR_XINFO,
1066 		    "mprsas_action XPT_RESET_DEV\n");
1067 		mprsas_action_resetdev(sassc, ccb);
1068 		return;
1069 	case XPT_RESET_BUS:
1070 	case XPT_ABORT:
1071 	case XPT_TERM_IO:
1072 		mpr_dprint(sassc->sc, MPR_XINFO,
1073 		    "mprsas_action faking success for abort or reset\n");
1074 		ccb->ccb_h.status = CAM_REQ_CMP;
1075 		break;
1076 	case XPT_SCSI_IO:
1077 		mprsas_action_scsiio(sassc, ccb);
1078 		return;
1079 #if __FreeBSD_version >= 900026
1080 	case XPT_SMP_IO:
1081 		mprsas_action_smpio(sassc, ccb);
1082 		return;
1083 #endif
1084 	default:
1085 		ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
1086 		break;
1087 	}
1088 	xpt_done(ccb);
1089 
1090 }
1091 
1092 static void
1093 mprsas_announce_reset(struct mpr_softc *sc, uint32_t ac_code,
1094     target_id_t target_id, lun_id_t lun_id)
1095 {
1096 	path_id_t path_id = cam_sim_path(sc->sassc->sim);
1097 	struct cam_path *path;
1098 
1099 	mpr_dprint(sc, MPR_XINFO, "%s code %x target %d lun %jx\n", __func__,
1100 	    ac_code, target_id, (uintmax_t)lun_id);
1101 
1102 	if (xpt_create_path(&path, NULL,
1103 		path_id, target_id, lun_id) != CAM_REQ_CMP) {
1104 		mpr_dprint(sc, MPR_ERROR, "unable to create path for reset "
1105 			   "notification\n");
1106 		return;
1107 	}
1108 
1109 	xpt_async(ac_code, path, NULL);
1110 	xpt_free_path(path);
1111 }
1112 
1113 static void
1114 mprsas_complete_all_commands(struct mpr_softc *sc)
1115 {
1116 	struct mpr_command *cm;
1117 	int i;
1118 	int completed;
1119 
1120 	MPR_FUNCTRACE(sc);
1121 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1122 
1123 	/* complete all commands with a NULL reply */
1124 	for (i = 1; i < sc->num_reqs; i++) {
1125 		cm = &sc->commands[i];
1126 		cm->cm_reply = NULL;
1127 		completed = 0;
1128 
1129 		if (cm->cm_flags & MPR_CM_FLAGS_POLLED)
1130 			cm->cm_flags |= MPR_CM_FLAGS_COMPLETE;
1131 
1132 		if (cm->cm_complete != NULL) {
1133 			mprsas_log_command(cm, MPR_RECOVERY,
1134 			    "completing cm %p state %x ccb %p for diag reset\n",
1135 			    cm, cm->cm_state, cm->cm_ccb);
1136 			cm->cm_complete(sc, cm);
1137 			completed = 1;
1138 		}
1139 
1140 		if (cm->cm_flags & MPR_CM_FLAGS_WAKEUP) {
1141 			mprsas_log_command(cm, MPR_RECOVERY,
1142 			    "waking up cm %p state %x ccb %p for diag reset\n",
1143 			    cm, cm->cm_state, cm->cm_ccb);
1144 			wakeup(cm);
1145 			completed = 1;
1146 		}
1147 
1148 		if ((completed == 0) && (cm->cm_state != MPR_CM_STATE_FREE)) {
1149 			/* this should never happen, but if it does, log */
1150 			mprsas_log_command(cm, MPR_RECOVERY,
1151 			    "cm %p state %x flags 0x%x ccb %p during diag "
1152 			    "reset\n", cm, cm->cm_state, cm->cm_flags,
1153 			    cm->cm_ccb);
1154 		}
1155 	}
1156 }
1157 
1158 void
1159 mprsas_handle_reinit(struct mpr_softc *sc)
1160 {
1161 	int i;
1162 
1163 	/* Go back into startup mode and freeze the simq, so that CAM
1164 	 * doesn't send any commands until after we've rediscovered all
1165 	 * targets and found the proper device handles for them.
1166 	 *
1167 	 * After the reset, portenable will trigger discovery, and after all
1168 	 * discovery-related activities have finished, the simq will be
1169 	 * released.
1170 	 */
1171 	mpr_dprint(sc, MPR_INIT, "%s startup\n", __func__);
1172 	sc->sassc->flags |= MPRSAS_IN_STARTUP;
1173 	sc->sassc->flags |= MPRSAS_IN_DISCOVERY;
1174 	mprsas_startup_increment(sc->sassc);
1175 
1176 	/* notify CAM of a bus reset */
1177 	mprsas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1178 	    CAM_LUN_WILDCARD);
1179 
1180 	/* complete and cleanup after all outstanding commands */
1181 	mprsas_complete_all_commands(sc);
1182 
1183 	mpr_dprint(sc, MPR_INIT, "%s startup %u tm %u after command "
1184 	    "completion\n", __func__, sc->sassc->startup_refcount,
1185 	    sc->sassc->tm_count);
1186 
1187 	/* zero all the target handles, since they may change after the
1188 	 * reset, and we have to rediscover all the targets and use the new
1189 	 * handles.
1190 	 */
1191 	for (i = 0; i < sc->sassc->maxtargets; i++) {
1192 		if (sc->sassc->targets[i].outstanding != 0)
1193 			mpr_dprint(sc, MPR_INIT, "target %u outstanding %u\n",
1194 			    i, sc->sassc->targets[i].outstanding);
1195 		sc->sassc->targets[i].handle = 0x0;
1196 		sc->sassc->targets[i].exp_dev_handle = 0x0;
1197 		sc->sassc->targets[i].outstanding = 0;
1198 		sc->sassc->targets[i].flags = MPRSAS_TARGET_INDIAGRESET;
1199 	}
1200 }
1201 static void
1202 mprsas_tm_timeout(void *data)
1203 {
1204 	struct mpr_command *tm = data;
1205 	struct mpr_softc *sc = tm->cm_sc;
1206 
1207 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1208 
1209 	mprsas_log_command(tm, MPR_INFO|MPR_RECOVERY,
1210 	    "task mgmt %p timed out\n", tm);
1211 	mpr_reinit(sc);
1212 }
1213 
1214 static void
1215 mprsas_logical_unit_reset_complete(struct mpr_softc *sc,
1216     struct mpr_command *tm)
1217 {
1218 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1219 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1220 	unsigned int cm_count = 0;
1221 	struct mpr_command *cm;
1222 	struct mprsas_target *targ;
1223 
1224 	callout_stop(&tm->cm_callout);
1225 
1226 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1227 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1228 	targ = tm->cm_targ;
1229 
1230 	/*
1231 	 * Currently there should be no way we can hit this case.  It only
1232 	 * happens when we have a failure to allocate chain frames, and
1233 	 * task management commands don't have S/G lists.
1234 	 */
1235 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1236 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for LUN reset! "
1237 		    "This should not happen!\n", __func__, tm->cm_flags);
1238 		mprsas_free_tm(sc, tm);
1239 		return;
1240 	}
1241 
1242 	if (reply == NULL) {
1243 		mprsas_log_command(tm, MPR_RECOVERY,
1244 		    "NULL reset reply for tm %p\n", tm);
1245 		if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1246 			/* this completion was due to a reset, just cleanup */
1247 			targ->flags &= ~MPRSAS_TARGET_INRESET;
1248 			targ->tm = NULL;
1249 			mprsas_free_tm(sc, tm);
1250 		}
1251 		else {
1252 			/* we should have gotten a reply. */
1253 			mpr_reinit(sc);
1254 		}
1255 		return;
1256 	}
1257 
1258 	mprsas_log_command(tm, MPR_RECOVERY,
1259 	    "logical unit reset status 0x%x code 0x%x count %u\n",
1260 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1261 	    le32toh(reply->TerminationCount));
1262 
1263 	/* See if there are any outstanding commands for this LUN.
1264 	 * This could be made more efficient by using a per-LU data
1265 	 * structure of some sort.
1266 	 */
1267 	TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1268 		if (cm->cm_lun == tm->cm_lun)
1269 			cm_count++;
1270 	}
1271 
1272 	if (cm_count == 0) {
1273 		mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO,
1274 		    "logical unit %u finished recovery after reset\n",
1275 		    tm->cm_lun, tm);
1276 
1277 		mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1278 		    tm->cm_lun);
1279 
1280 		/* we've finished recovery for this logical unit.  check and
1281 		 * see if some other logical unit has a timedout command
1282 		 * that needs to be processed.
1283 		 */
1284 		cm = TAILQ_FIRST(&targ->timedout_commands);
1285 		if (cm) {
1286 			mprsas_send_abort(sc, tm, cm);
1287 		}
1288 		else {
1289 			targ->tm = NULL;
1290 			mprsas_free_tm(sc, tm);
1291 		}
1292 	}
1293 	else {
1294 		/* if we still have commands for this LUN, the reset
1295 		 * effectively failed, regardless of the status reported.
1296 		 * Escalate to a target reset.
1297 		 */
1298 		mprsas_log_command(tm, MPR_RECOVERY,
1299 		    "logical unit reset complete for tm %p, but still have %u "
1300 		    "command(s)\n", tm, cm_count);
1301 		mprsas_send_reset(sc, tm,
1302 		    MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1303 	}
1304 }
1305 
1306 static void
1307 mprsas_target_reset_complete(struct mpr_softc *sc, struct mpr_command *tm)
1308 {
1309 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1310 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1311 	struct mprsas_target *targ;
1312 
1313 	callout_stop(&tm->cm_callout);
1314 
1315 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1316 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1317 	targ = tm->cm_targ;
1318 
1319 	/*
1320 	 * Currently there should be no way we can hit this case.  It only
1321 	 * happens when we have a failure to allocate chain frames, and
1322 	 * task management commands don't have S/G lists.
1323 	 */
1324 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1325 		mpr_dprint(sc, MPR_ERROR,"%s: cm_flags = %#x for target reset! "
1326 		    "This should not happen!\n", __func__, tm->cm_flags);
1327 		mprsas_free_tm(sc, tm);
1328 		return;
1329 	}
1330 
1331 	if (reply == NULL) {
1332 		mprsas_log_command(tm, MPR_RECOVERY,
1333 		    "NULL reset reply for tm %p\n", tm);
1334 		if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1335 			/* this completion was due to a reset, just cleanup */
1336 			targ->flags &= ~MPRSAS_TARGET_INRESET;
1337 			targ->tm = NULL;
1338 			mprsas_free_tm(sc, tm);
1339 		}
1340 		else {
1341 			/* we should have gotten a reply. */
1342 			mpr_reinit(sc);
1343 		}
1344 		return;
1345 	}
1346 
1347 	mprsas_log_command(tm, MPR_RECOVERY,
1348 	    "target reset status 0x%x code 0x%x count %u\n",
1349 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1350 	    le32toh(reply->TerminationCount));
1351 
1352 	targ->flags &= ~MPRSAS_TARGET_INRESET;
1353 
1354 	if (targ->outstanding == 0) {
1355 		/* we've finished recovery for this target and all
1356 		 * of its logical units.
1357 		 */
1358 		mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO,
1359 		    "recovery finished after target reset\n");
1360 
1361 		mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1362 		    CAM_LUN_WILDCARD);
1363 
1364 		targ->tm = NULL;
1365 		mprsas_free_tm(sc, tm);
1366 	}
1367 	else {
1368 		/* after a target reset, if this target still has
1369 		 * outstanding commands, the reset effectively failed,
1370 		 * regardless of the status reported.  escalate.
1371 		 */
1372 		mprsas_log_command(tm, MPR_RECOVERY,
1373 		    "target reset complete for tm %p, but still have %u "
1374 		    "command(s)\n", tm, targ->outstanding);
1375 		mpr_reinit(sc);
1376 	}
1377 }
1378 
1379 #define MPR_RESET_TIMEOUT 30
1380 
1381 static int
1382 mprsas_send_reset(struct mpr_softc *sc, struct mpr_command *tm, uint8_t type)
1383 {
1384 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1385 	struct mprsas_target *target;
1386 	int err;
1387 
1388 	target = tm->cm_targ;
1389 	if (target->handle == 0) {
1390 		mpr_dprint(sc, MPR_ERROR,"%s null devhandle for target_id %d\n",
1391 		    __func__, target->tid);
1392 		return -1;
1393 	}
1394 
1395 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1396 	req->DevHandle = htole16(target->handle);
1397 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1398 	req->TaskType = type;
1399 
1400 	if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1401 		/* XXX Need to handle invalid LUNs */
1402 		MPR_SET_LUN(req->LUN, tm->cm_lun);
1403 		tm->cm_targ->logical_unit_resets++;
1404 		mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO,
1405 		    "sending logical unit reset\n");
1406 		tm->cm_complete = mprsas_logical_unit_reset_complete;
1407 	}
1408 	else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1409 		/*
1410 		 * Target reset method =
1411 		 *     SAS Hard Link Reset / SATA Link Reset
1412 		 */
1413 		req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1414 		tm->cm_targ->target_resets++;
1415 		tm->cm_targ->flags |= MPRSAS_TARGET_INRESET;
1416 		mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO,
1417 		    "sending target reset\n");
1418 		tm->cm_complete = mprsas_target_reset_complete;
1419 	}
1420 	else {
1421 		mpr_dprint(sc, MPR_ERROR, "unexpected reset type 0x%x\n", type);
1422 		return -1;
1423 	}
1424 
1425 	mpr_dprint(sc, MPR_XINFO, "to target %u handle 0x%04x\n", target->tid,
1426 	    target->handle);
1427 	if (target->encl_level_valid) {
1428 		mpr_dprint(sc, MPR_XINFO, "At enclosure level %d, slot %d, "
1429 		    "connector name (%4s)\n", target->encl_level,
1430 		    target->encl_slot, target->connector_name);
1431 	}
1432 
1433 	tm->cm_data = NULL;
1434 	tm->cm_desc.HighPriority.RequestFlags =
1435 	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1436 	tm->cm_complete_data = (void *)tm;
1437 
1438 	callout_reset(&tm->cm_callout, MPR_RESET_TIMEOUT * hz,
1439 	    mprsas_tm_timeout, tm);
1440 
1441 	err = mpr_map_command(sc, tm);
1442 	if (err)
1443 		mprsas_log_command(tm, MPR_RECOVERY,
1444 		    "error %d sending reset type %u\n",
1445 		    err, type);
1446 
1447 	return err;
1448 }
1449 
1450 
1451 static void
1452 mprsas_abort_complete(struct mpr_softc *sc, struct mpr_command *tm)
1453 {
1454 	struct mpr_command *cm;
1455 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1456 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1457 	struct mprsas_target *targ;
1458 
1459 	callout_stop(&tm->cm_callout);
1460 
1461 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1462 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1463 	targ = tm->cm_targ;
1464 
1465 	/*
1466 	 * Currently there should be no way we can hit this case.  It only
1467 	 * happens when we have a failure to allocate chain frames, and
1468 	 * task management commands don't have S/G lists.
1469 	 */
1470 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1471 		mprsas_log_command(tm, MPR_RECOVERY,
1472 		    "cm_flags = %#x for abort %p TaskMID %u!\n",
1473 		    tm->cm_flags, tm, le16toh(req->TaskMID));
1474 		mprsas_free_tm(sc, tm);
1475 		return;
1476 	}
1477 
1478 	if (reply == NULL) {
1479 		mprsas_log_command(tm, MPR_RECOVERY,
1480 		    "NULL abort reply for tm %p TaskMID %u\n",
1481 		    tm, le16toh(req->TaskMID));
1482 		if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1483 			/* this completion was due to a reset, just cleanup */
1484 			targ->tm = NULL;
1485 			mprsas_free_tm(sc, tm);
1486 		}
1487 		else {
1488 			/* we should have gotten a reply. */
1489 			mpr_reinit(sc);
1490 		}
1491 		return;
1492 	}
1493 
1494 	mprsas_log_command(tm, MPR_RECOVERY,
1495 	    "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1496 	    le16toh(req->TaskMID),
1497 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1498 	    le32toh(reply->TerminationCount));
1499 
1500 	cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1501 	if (cm == NULL) {
1502 		/* if there are no more timedout commands, we're done with
1503 		 * error recovery for this target.
1504 		 */
1505 		mprsas_log_command(tm, MPR_RECOVERY,
1506 		    "finished recovery after aborting TaskMID %u\n",
1507 		    le16toh(req->TaskMID));
1508 
1509 		targ->tm = NULL;
1510 		mprsas_free_tm(sc, tm);
1511 	}
1512 	else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) {
1513 		/* abort success, but we have more timedout commands to abort */
1514 		mprsas_log_command(tm, MPR_RECOVERY,
1515 		    "continuing recovery after aborting TaskMID %u\n",
1516 		    le16toh(req->TaskMID));
1517 
1518 		mprsas_send_abort(sc, tm, cm);
1519 	}
1520 	else {
1521 		/* we didn't get a command completion, so the abort
1522 		 * failed as far as we're concerned.  escalate.
1523 		 */
1524 		mprsas_log_command(tm, MPR_RECOVERY,
1525 		    "abort failed for TaskMID %u tm %p\n",
1526 		    le16toh(req->TaskMID), tm);
1527 
1528 		mprsas_send_reset(sc, tm,
1529 		    MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1530 	}
1531 }
1532 
1533 #define MPR_ABORT_TIMEOUT 5
1534 
1535 static int
1536 mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm,
1537     struct mpr_command *cm)
1538 {
1539 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1540 	struct mprsas_target *targ;
1541 	int err;
1542 
1543 	targ = cm->cm_targ;
1544 	if (targ->handle == 0) {
1545 		mpr_dprint(sc, MPR_ERROR,"%s null devhandle for target_id %d\n",
1546 		    __func__, cm->cm_ccb->ccb_h.target_id);
1547 		return -1;
1548 	}
1549 
1550 	mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO,
1551 	    "Aborting command %p\n", cm);
1552 
1553 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1554 	req->DevHandle = htole16(targ->handle);
1555 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1556 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1557 
1558 	/* XXX Need to handle invalid LUNs */
1559 	MPR_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1560 
1561 	req->TaskMID = htole16(cm->cm_desc.Default.SMID);
1562 
1563 	tm->cm_data = NULL;
1564 	tm->cm_desc.HighPriority.RequestFlags =
1565 	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1566 	tm->cm_complete = mprsas_abort_complete;
1567 	tm->cm_complete_data = (void *)tm;
1568 	tm->cm_targ = cm->cm_targ;
1569 	tm->cm_lun = cm->cm_lun;
1570 
1571 	callout_reset(&tm->cm_callout, MPR_ABORT_TIMEOUT * hz,
1572 	    mprsas_tm_timeout, tm);
1573 
1574 	targ->aborts++;
1575 
1576 	err = mpr_map_command(sc, tm);
1577 	if (err)
1578 		mprsas_log_command(tm, MPR_RECOVERY,
1579 		    "error %d sending abort for cm %p SMID %u\n",
1580 		    err, cm, req->TaskMID);
1581 	return err;
1582 }
1583 
1584 
1585 static void
1586 mprsas_scsiio_timeout(void *data)
1587 {
1588 	struct mpr_softc *sc;
1589 	struct mpr_command *cm;
1590 	struct mprsas_target *targ;
1591 
1592 	cm = (struct mpr_command *)data;
1593 	sc = cm->cm_sc;
1594 
1595 	MPR_FUNCTRACE(sc);
1596 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1597 
1598 	mpr_dprint(sc, MPR_XINFO, "Timeout checking cm %p\n", cm);
1599 
1600 	/*
1601 	 * Run the interrupt handler to make sure it's not pending.  This
1602 	 * isn't perfect because the command could have already completed
1603 	 * and been re-used, though this is unlikely.
1604 	 */
1605 	mpr_intr_locked(sc);
1606 	if (cm->cm_state == MPR_CM_STATE_FREE) {
1607 		mprsas_log_command(cm, MPR_XINFO,
1608 		    "SCSI command %p almost timed out\n", cm);
1609 		return;
1610 	}
1611 
1612 	if (cm->cm_ccb == NULL) {
1613 		mpr_dprint(sc, MPR_ERROR, "command timeout with NULL ccb\n");
1614 		return;
1615 	}
1616 
1617 	targ = cm->cm_targ;
1618 	targ->timeouts++;
1619 
1620 	mprsas_log_command(cm, MPR_XINFO, "command timeout cm %p ccb %p "
1621 	    "target %u, handle(0x%04x)\n", cm, cm->cm_ccb, targ->tid,
1622 	    targ->handle);
1623 	if (targ->encl_level_valid) {
1624 		mpr_dprint(sc, MPR_XINFO, "At enclosure level %d, slot %d, "
1625 		    "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
1626 		    targ->connector_name);
1627 	}
1628 
1629 	/* XXX first, check the firmware state, to see if it's still
1630 	 * operational.  if not, do a diag reset.
1631 	 */
1632 
1633 	cm->cm_ccb->ccb_h.status = CAM_CMD_TIMEOUT;
1634 	cm->cm_state = MPR_CM_STATE_TIMEDOUT;
1635 	TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1636 
1637 	if (targ->tm != NULL) {
1638 		/* target already in recovery, just queue up another
1639 		 * timedout command to be processed later.
1640 		 */
1641 		mpr_dprint(sc, MPR_RECOVERY, "queued timedout cm %p for "
1642 		    "processing by tm %p\n", cm, targ->tm);
1643 	}
1644 	else if ((targ->tm = mprsas_alloc_tm(sc)) != NULL) {
1645 		mpr_dprint(sc, MPR_RECOVERY, "timedout cm %p allocated tm %p\n",
1646 		    cm, targ->tm);
1647 
1648 		/* start recovery by aborting the first timedout command */
1649 		mprsas_send_abort(sc, targ->tm, cm);
1650 	}
1651 	else {
1652 		/* XXX queue this target up for recovery once a TM becomes
1653 		 * available.  The firmware only has a limited number of
1654 		 * HighPriority credits for the high priority requests used
1655 		 * for task management, and we ran out.
1656 		 *
1657 		 * Isilon: don't worry about this for now, since we have
1658 		 * more credits than disks in an enclosure, and limit
1659 		 * ourselves to one TM per target for recovery.
1660 		 */
1661 		mpr_dprint(sc, MPR_RECOVERY,
1662 		    "timedout cm %p failed to allocate a tm\n", cm);
1663 	}
1664 }
1665 
1666 static void
1667 mprsas_action_scsiio(struct mprsas_softc *sassc, union ccb *ccb)
1668 {
1669 	MPI2_SCSI_IO_REQUEST *req;
1670 	struct ccb_scsiio *csio;
1671 	struct mpr_softc *sc;
1672 	struct mprsas_target *targ;
1673 	struct mprsas_lun *lun;
1674 	struct mpr_command *cm;
1675 	uint8_t i, lba_byte, *ref_tag_addr;
1676 	uint16_t eedp_flags;
1677 	uint32_t mpi_control;
1678 
1679 	sc = sassc->sc;
1680 	MPR_FUNCTRACE(sc);
1681 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1682 
1683 	csio = &ccb->csio;
1684 	targ = &sassc->targets[csio->ccb_h.target_id];
1685 	mpr_dprint(sc, MPR_TRACE, "ccb %p target flag %x\n", ccb, targ->flags);
1686 	if (targ->handle == 0x0) {
1687 		mpr_dprint(sc, MPR_ERROR, "%s NULL handle for target %u\n",
1688 		    __func__, csio->ccb_h.target_id);
1689 		csio->ccb_h.status = CAM_DEV_NOT_THERE;
1690 		xpt_done(ccb);
1691 		return;
1692 	}
1693 	if (targ->flags & MPR_TARGET_FLAGS_RAID_COMPONENT) {
1694 		mpr_dprint(sc, MPR_TRACE, "%s Raid component no SCSI IO "
1695 		    "supported %u\n", __func__, csio->ccb_h.target_id);
1696 		csio->ccb_h.status = CAM_DEV_NOT_THERE;
1697 		xpt_done(ccb);
1698 		return;
1699 	}
1700 	/*
1701 	 * Sometimes, it is possible to get a command that is not "In
1702 	 * Progress" and was actually aborted by the upper layer.  Check for
1703 	 * this here and complete the command without error.
1704 	 */
1705 	if (ccb->ccb_h.status != CAM_REQ_INPROG) {
1706 		mpr_dprint(sc, MPR_TRACE, "%s Command is not in progress for "
1707 		    "target %u\n", __func__, csio->ccb_h.target_id);
1708 		xpt_done(ccb);
1709 		return;
1710 	}
1711 	/*
1712 	 * If devinfo is 0 this will be a volume.  In that case don't tell CAM
1713 	 * that the volume has timed out.  We want volumes to be enumerated
1714 	 * until they are deleted/removed, not just failed.
1715 	 */
1716 	if (targ->flags & MPRSAS_TARGET_INREMOVAL) {
1717 		if (targ->devinfo == 0)
1718 			csio->ccb_h.status = CAM_REQ_CMP;
1719 		else
1720 			csio->ccb_h.status = CAM_SEL_TIMEOUT;
1721 		xpt_done(ccb);
1722 		return;
1723 	}
1724 
1725 	if ((sc->mpr_flags & MPR_FLAGS_SHUTDOWN) != 0) {
1726 		mpr_dprint(sc, MPR_TRACE, "%s shutting down\n", __func__);
1727 		csio->ccb_h.status = CAM_DEV_NOT_THERE;
1728 		xpt_done(ccb);
1729 		return;
1730 	}
1731 
1732 	cm = mpr_alloc_command(sc);
1733 	if (cm == NULL || (sc->mpr_flags & MPR_FLAGS_DIAGRESET)) {
1734 		if (cm != NULL) {
1735 			mpr_free_command(sc, cm);
1736 		}
1737 		if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) {
1738 			xpt_freeze_simq(sassc->sim, 1);
1739 			sassc->flags |= MPRSAS_QUEUE_FROZEN;
1740 		}
1741 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1742 		ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1743 		xpt_done(ccb);
1744 		return;
1745 	}
1746 
1747 	req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
1748 	bzero(req, sizeof(*req));
1749 	req->DevHandle = htole16(targ->handle);
1750 	req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1751 	req->MsgFlags = 0;
1752 	req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr);
1753 	req->SenseBufferLength = MPR_SENSE_LEN;
1754 	req->SGLFlags = 0;
1755 	req->ChainOffset = 0;
1756 	req->SGLOffset0 = 24;	/* 32bit word offset to the SGL */
1757 	req->SGLOffset1= 0;
1758 	req->SGLOffset2= 0;
1759 	req->SGLOffset3= 0;
1760 	req->SkipCount = 0;
1761 	req->DataLength = htole32(csio->dxfer_len);
1762 	req->BidirectionalDataLength = 0;
1763 	req->IoFlags = htole16(csio->cdb_len);
1764 	req->EEDPFlags = 0;
1765 
1766 	/* Note: BiDirectional transfers are not supported */
1767 	switch (csio->ccb_h.flags & CAM_DIR_MASK) {
1768 	case CAM_DIR_IN:
1769 		mpi_control = MPI2_SCSIIO_CONTROL_READ;
1770 		cm->cm_flags |= MPR_CM_FLAGS_DATAIN;
1771 		break;
1772 	case CAM_DIR_OUT:
1773 		mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
1774 		cm->cm_flags |= MPR_CM_FLAGS_DATAOUT;
1775 		break;
1776 	case CAM_DIR_NONE:
1777 	default:
1778 		mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
1779 		break;
1780 	}
1781 
1782 	if (csio->cdb_len == 32)
1783 		mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
1784 	/*
1785 	 * It looks like the hardware doesn't require an explicit tag
1786 	 * number for each transaction.  SAM Task Management not supported
1787 	 * at the moment.
1788 	 */
1789 	switch (csio->tag_action) {
1790 	case MSG_HEAD_OF_Q_TAG:
1791 		mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
1792 		break;
1793 	case MSG_ORDERED_Q_TAG:
1794 		mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
1795 		break;
1796 	case MSG_ACA_TASK:
1797 		mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ;
1798 		break;
1799 	case CAM_TAG_ACTION_NONE:
1800 	case MSG_SIMPLE_Q_TAG:
1801 	default:
1802 		mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
1803 		break;
1804 	}
1805 	mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
1806 	req->Control = htole32(mpi_control);
1807 
1808 	if (MPR_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
1809 		mpr_free_command(sc, cm);
1810 		ccb->ccb_h.status = CAM_LUN_INVALID;
1811 		xpt_done(ccb);
1812 		return;
1813 	}
1814 
1815 	if (csio->ccb_h.flags & CAM_CDB_POINTER)
1816 		bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
1817 	else
1818 		bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
1819 	req->IoFlags = htole16(csio->cdb_len);
1820 
1821 	/*
1822 	 * Check if EEDP is supported and enabled.  If it is then check if the
1823 	 * SCSI opcode could be using EEDP.  If so, make sure the LUN exists and
1824 	 * is formatted for EEDP support.  If all of this is true, set CDB up
1825 	 * for EEDP transfer.
1826 	 */
1827 	eedp_flags = op_code_prot[req->CDB.CDB32[0]];
1828 	if (sc->eedp_enabled && eedp_flags) {
1829 		SLIST_FOREACH(lun, &targ->luns, lun_link) {
1830 			if (lun->lun_id == csio->ccb_h.target_lun) {
1831 				break;
1832 			}
1833 		}
1834 
1835 		if ((lun != NULL) && (lun->eedp_formatted)) {
1836 			req->EEDPBlockSize = htole16(lun->eedp_block_size);
1837 			eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
1838 			    MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
1839 			    MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
1840 			req->EEDPFlags = htole16(eedp_flags);
1841 
1842 			/*
1843 			 * If CDB less than 32, fill in Primary Ref Tag with
1844 			 * low 4 bytes of LBA.  If CDB is 32, tag stuff is
1845 			 * already there.  Also, set protection bit.  FreeBSD
1846 			 * currently does not support CDBs bigger than 16, but
1847 			 * the code doesn't hurt, and will be here for the
1848 			 * future.
1849 			 */
1850 			if (csio->cdb_len != 32) {
1851 				lba_byte = (csio->cdb_len == 16) ? 6 : 2;
1852 				ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
1853 				    PrimaryReferenceTag;
1854 				for (i = 0; i < 4; i++) {
1855 					*ref_tag_addr =
1856 					    req->CDB.CDB32[lba_byte + i];
1857 					ref_tag_addr++;
1858 				}
1859 				req->CDB.EEDP32.PrimaryReferenceTag =
1860 				    htole32(req->
1861 				    CDB.EEDP32.PrimaryReferenceTag);
1862 				req->CDB.EEDP32.PrimaryApplicationTagMask =
1863 				    0xFFFF;
1864 				req->CDB.CDB32[1] = (req->CDB.CDB32[1] & 0x1F) |
1865 				    0x20;
1866 			} else {
1867 				eedp_flags |=
1868 				    MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
1869 				req->EEDPFlags = htole16(eedp_flags);
1870 				req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
1871 				    0x1F) | 0x20;
1872 			}
1873 		}
1874 	}
1875 
1876 	cm->cm_length = csio->dxfer_len;
1877 	if (cm->cm_length != 0) {
1878 		cm->cm_data = ccb;
1879 		cm->cm_flags |= MPR_CM_FLAGS_USE_CCB;
1880 	} else {
1881 		cm->cm_data = NULL;
1882 	}
1883 	cm->cm_sge = &req->SGL;
1884 	cm->cm_sglsize = (32 - 24) * 4;
1885 	cm->cm_complete = mprsas_scsiio_complete;
1886 	cm->cm_complete_data = ccb;
1887 	cm->cm_targ = targ;
1888 	cm->cm_lun = csio->ccb_h.target_lun;
1889 	cm->cm_ccb = ccb;
1890 	/*
1891 	 * If using FP desc type, need to set a bit in IoFlags (SCSI IO is 0)
1892 	 * and set descriptor type.
1893 	 */
1894 	if (targ->scsi_req_desc_type ==
1895 	    MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO) {
1896 		req->IoFlags |= MPI25_SCSIIO_IOFLAGS_FAST_PATH;
1897 		cm->cm_desc.FastPathSCSIIO.RequestFlags =
1898 		    MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
1899 		cm->cm_desc.FastPathSCSIIO.DevHandle = htole16(targ->handle);
1900 	} else {
1901 		cm->cm_desc.SCSIIO.RequestFlags =
1902 		    MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
1903 		cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle);
1904 	}
1905 
1906 	callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
1907 	   mprsas_scsiio_timeout, cm, 0);
1908 
1909 	targ->issued++;
1910 	targ->outstanding++;
1911 	TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1912 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
1913 
1914 	mprsas_log_command(cm, MPR_XINFO, "%s cm %p ccb %p outstanding %u\n",
1915 	    __func__, cm, ccb, targ->outstanding);
1916 
1917 	mpr_map_command(sc, cm);
1918 	return;
1919 }
1920 
1921 static void
1922 mpr_response_code(struct mpr_softc *sc, u8 response_code)
1923 {
1924         char *desc;
1925 
1926         switch (response_code) {
1927         case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE:
1928                 desc = "task management request completed";
1929                 break;
1930         case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME:
1931                 desc = "invalid frame";
1932                 break;
1933         case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
1934                 desc = "task management request not supported";
1935                 break;
1936         case MPI2_SCSITASKMGMT_RSP_TM_FAILED:
1937                 desc = "task management request failed";
1938                 break;
1939         case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED:
1940                 desc = "task management request succeeded";
1941                 break;
1942         case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN:
1943                 desc = "invalid lun";
1944                 break;
1945         case 0xA:
1946                 desc = "overlapped tag attempted";
1947                 break;
1948         case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
1949                 desc = "task queued, however not sent to target";
1950                 break;
1951         default:
1952                 desc = "unknown";
1953                 break;
1954         }
1955 	mpr_dprint(sc, MPR_XINFO, "response_code(0x%01x): %s\n", response_code,
1956 	    desc);
1957 }
1958 
1959 /**
1960  * mpr_sc_failed_io_info - translated non-succesfull SCSI_IO request
1961  */
1962 static void
1963 mpr_sc_failed_io_info(struct mpr_softc *sc, struct ccb_scsiio *csio,
1964     Mpi2SCSIIOReply_t *mpi_reply, struct mprsas_target *targ)
1965 {
1966 	u32 response_info;
1967 	u8 *response_bytes;
1968 	u16 ioc_status = le16toh(mpi_reply->IOCStatus) &
1969 	    MPI2_IOCSTATUS_MASK;
1970 	u8 scsi_state = mpi_reply->SCSIState;
1971 	u8 scsi_status = mpi_reply->SCSIStatus;
1972 	char *desc_ioc_state = NULL;
1973 	char *desc_scsi_status = NULL;
1974 	char *desc_scsi_state = sc->tmp_string;
1975 	u32 log_info = le32toh(mpi_reply->IOCLogInfo);
1976 
1977 	if (log_info == 0x31170000)
1978 		return;
1979 
1980 	switch (ioc_status) {
1981 	case MPI2_IOCSTATUS_SUCCESS:
1982 		desc_ioc_state = "success";
1983 		break;
1984 	case MPI2_IOCSTATUS_INVALID_FUNCTION:
1985 		desc_ioc_state = "invalid function";
1986 		break;
1987 	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
1988 		desc_ioc_state = "scsi recovered error";
1989 		break;
1990 	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
1991 		desc_ioc_state = "scsi invalid dev handle";
1992 		break;
1993 	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
1994 		desc_ioc_state = "scsi device not there";
1995 		break;
1996 	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
1997 		desc_ioc_state = "scsi data overrun";
1998 		break;
1999 	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2000 		desc_ioc_state = "scsi data underrun";
2001 		break;
2002 	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2003 		desc_ioc_state = "scsi io data error";
2004 		break;
2005 	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2006 		desc_ioc_state = "scsi protocol error";
2007 		break;
2008 	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2009 		desc_ioc_state = "scsi task terminated";
2010 		break;
2011 	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2012 		desc_ioc_state = "scsi residual mismatch";
2013 		break;
2014 	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2015 		desc_ioc_state = "scsi task mgmt failed";
2016 		break;
2017 	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2018 		desc_ioc_state = "scsi ioc terminated";
2019 		break;
2020 	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2021 		desc_ioc_state = "scsi ext terminated";
2022 		break;
2023 	case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
2024 		desc_ioc_state = "eedp guard error";
2025 		break;
2026 	case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
2027 		desc_ioc_state = "eedp ref tag error";
2028 		break;
2029 	case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
2030 		desc_ioc_state = "eedp app tag error";
2031 		break;
2032 	default:
2033 		desc_ioc_state = "unknown";
2034 		break;
2035 	}
2036 
2037 	switch (scsi_status) {
2038 	case MPI2_SCSI_STATUS_GOOD:
2039 		desc_scsi_status = "good";
2040 		break;
2041 	case MPI2_SCSI_STATUS_CHECK_CONDITION:
2042 		desc_scsi_status = "check condition";
2043 		break;
2044 	case MPI2_SCSI_STATUS_CONDITION_MET:
2045 		desc_scsi_status = "condition met";
2046 		break;
2047 	case MPI2_SCSI_STATUS_BUSY:
2048 		desc_scsi_status = "busy";
2049 		break;
2050 	case MPI2_SCSI_STATUS_INTERMEDIATE:
2051 		desc_scsi_status = "intermediate";
2052 		break;
2053 	case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET:
2054 		desc_scsi_status = "intermediate condmet";
2055 		break;
2056 	case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
2057 		desc_scsi_status = "reservation conflict";
2058 		break;
2059 	case MPI2_SCSI_STATUS_COMMAND_TERMINATED:
2060 		desc_scsi_status = "command terminated";
2061 		break;
2062 	case MPI2_SCSI_STATUS_TASK_SET_FULL:
2063 		desc_scsi_status = "task set full";
2064 		break;
2065 	case MPI2_SCSI_STATUS_ACA_ACTIVE:
2066 		desc_scsi_status = "aca active";
2067 		break;
2068 	case MPI2_SCSI_STATUS_TASK_ABORTED:
2069 		desc_scsi_status = "task aborted";
2070 		break;
2071 	default:
2072 		desc_scsi_status = "unknown";
2073 		break;
2074 	}
2075 
2076 	desc_scsi_state[0] = '\0';
2077 	if (!scsi_state)
2078 		desc_scsi_state = " ";
2079 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
2080 		strcat(desc_scsi_state, "response info ");
2081 	if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
2082 		strcat(desc_scsi_state, "state terminated ");
2083 	if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS)
2084 		strcat(desc_scsi_state, "no status ");
2085 	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED)
2086 		strcat(desc_scsi_state, "autosense failed ");
2087 	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID)
2088 		strcat(desc_scsi_state, "autosense valid ");
2089 
2090 	mpr_dprint(sc, MPR_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n",
2091 	    le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status);
2092 	if (targ->encl_level_valid) {
2093 		mpr_dprint(sc, MPR_XINFO, "At enclosure level %d, slot %d, "
2094 		    "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
2095 		    targ->connector_name);
2096 	}
2097 	/* We can add more detail about underflow data here
2098 	 * TO-DO
2099 	 * */
2100 	mpr_dprint(sc, MPR_XINFO, "\tscsi_status(%s)(0x%02x), "
2101 	    "scsi_state(%s)(0x%02x)\n", desc_scsi_status, scsi_status,
2102 	    desc_scsi_state, scsi_state);
2103 
2104 	if (sc->mpr_debug & MPR_XINFO &&
2105 		scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2106 		mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : Start :\n");
2107 		scsi_sense_print(csio);
2108 		mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : End :\n");
2109 	}
2110 
2111 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
2112 		response_info = le32toh(mpi_reply->ResponseInfo);
2113 		response_bytes = (u8 *)&response_info;
2114 		mpr_response_code(sc,response_bytes[0]);
2115 	}
2116 }
2117 
2118 static void
2119 mprsas_scsiio_complete(struct mpr_softc *sc, struct mpr_command *cm)
2120 {
2121 	MPI2_SCSI_IO_REPLY *rep;
2122 	union ccb *ccb;
2123 	struct ccb_scsiio *csio;
2124 	struct mprsas_softc *sassc;
2125 	struct scsi_vpd_supported_page_list *vpd_list = NULL;
2126 	u8 *TLR_bits, TLR_on;
2127 	int dir = 0, i;
2128 	u16 alloc_len;
2129 
2130 	MPR_FUNCTRACE(sc);
2131 	mpr_dprint(sc, MPR_TRACE,
2132 	    "cm %p SMID %u ccb %p reply %p outstanding %u\n", cm,
2133 	    cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
2134 	    cm->cm_targ->outstanding);
2135 
2136 	callout_stop(&cm->cm_callout);
2137 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
2138 
2139 	sassc = sc->sassc;
2140 	ccb = cm->cm_complete_data;
2141 	csio = &ccb->csio;
2142 	rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
2143 	/*
2144 	 * XXX KDM if the chain allocation fails, does it matter if we do
2145 	 * the sync and unload here?  It is simpler to do it in every case,
2146 	 * assuming it doesn't cause problems.
2147 	 */
2148 	if (cm->cm_data != NULL) {
2149 		if (cm->cm_flags & MPR_CM_FLAGS_DATAIN)
2150 			dir = BUS_DMASYNC_POSTREAD;
2151 		else if (cm->cm_flags & MPR_CM_FLAGS_DATAOUT)
2152 			dir = BUS_DMASYNC_POSTWRITE;
2153 		bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
2154 		bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2155 	}
2156 
2157 	cm->cm_targ->completed++;
2158 	cm->cm_targ->outstanding--;
2159 	TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
2160 	ccb->ccb_h.status &= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED);
2161 
2162 	if (cm->cm_state == MPR_CM_STATE_TIMEDOUT) {
2163 		TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
2164 		if (cm->cm_reply != NULL)
2165 			mprsas_log_command(cm, MPR_RECOVERY,
2166 			    "completed timedout cm %p ccb %p during recovery "
2167 			    "ioc %x scsi %x state %x xfer %u\n", cm, cm->cm_ccb,
2168 			    le16toh(rep->IOCStatus), rep->SCSIStatus,
2169 			    rep->SCSIState, le32toh(rep->TransferCount));
2170 		else
2171 			mprsas_log_command(cm, MPR_RECOVERY,
2172 			    "completed timedout cm %p ccb %p during recovery\n",
2173 			    cm, cm->cm_ccb);
2174 	} else if (cm->cm_targ->tm != NULL) {
2175 		if (cm->cm_reply != NULL)
2176 			mprsas_log_command(cm, MPR_RECOVERY,
2177 			    "completed cm %p ccb %p during recovery "
2178 			    "ioc %x scsi %x state %x xfer %u\n",
2179 			    cm, cm->cm_ccb, le16toh(rep->IOCStatus),
2180 			    rep->SCSIStatus, rep->SCSIState,
2181 			    le32toh(rep->TransferCount));
2182 		else
2183 			mprsas_log_command(cm, MPR_RECOVERY,
2184 			    "completed cm %p ccb %p during recovery\n",
2185 			    cm, cm->cm_ccb);
2186 	} else if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
2187 		mprsas_log_command(cm, MPR_RECOVERY,
2188 		    "reset completed cm %p ccb %p\n", cm, cm->cm_ccb);
2189 	}
2190 
2191 	if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
2192 		/*
2193 		 * We ran into an error after we tried to map the command,
2194 		 * so we're getting a callback without queueing the command
2195 		 * to the hardware.  So we set the status here, and it will
2196 		 * be retained below.  We'll go through the "fast path",
2197 		 * because there can be no reply when we haven't actually
2198 		 * gone out to the hardware.
2199 		 */
2200 		ccb->ccb_h.status = CAM_REQUEUE_REQ;
2201 
2202 		/*
2203 		 * Currently the only error included in the mask is
2204 		 * MPR_CM_FLAGS_CHAIN_FAILED, which means we're out of
2205 		 * chain frames.  We need to freeze the queue until we get
2206 		 * a command that completed without this error, which will
2207 		 * hopefully have some chain frames attached that we can
2208 		 * use.  If we wanted to get smarter about it, we would
2209 		 * only unfreeze the queue in this condition when we're
2210 		 * sure that we're getting some chain frames back.  That's
2211 		 * probably unnecessary.
2212 		 */
2213 		if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) {
2214 			xpt_freeze_simq(sassc->sim, 1);
2215 			sassc->flags |= MPRSAS_QUEUE_FROZEN;
2216 			mpr_dprint(sc, MPR_INFO, "Error sending command, "
2217 				   "freezing SIM queue\n");
2218 		}
2219 	}
2220 
2221 	/*
2222 	 * If this is a Start Stop Unit command and it was issued by the driver
2223 	 * during shutdown, decrement the refcount to account for all of the
2224 	 * commands that were sent.  All SSU commands should be completed before
2225 	 * shutdown completes, meaning SSU_refcount will be 0 after SSU_started
2226 	 * is TRUE.
2227 	 */
2228 	if (sc->SSU_started && (csio->cdb_io.cdb_bytes[0] == START_STOP_UNIT)) {
2229 		mpr_dprint(sc, MPR_INFO, "Decrementing SSU count.\n");
2230 		sc->SSU_refcount--;
2231 	}
2232 
2233 	/* Take the fast path to completion */
2234 	if (cm->cm_reply == NULL) {
2235 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
2236 			if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0)
2237 				ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
2238 			else {
2239 				ccb->ccb_h.status = CAM_REQ_CMP;
2240 				ccb->csio.scsi_status = SCSI_STATUS_OK;
2241 			}
2242 			if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
2243 				ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2244 				sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
2245 				mpr_dprint(sc, MPR_XINFO,
2246 				    "Unfreezing SIM queue\n");
2247 			}
2248 		}
2249 
2250 		/*
2251 		 * There are two scenarios where the status won't be
2252 		 * CAM_REQ_CMP.  The first is if MPR_CM_FLAGS_ERROR_MASK is
2253 		 * set, the second is in the MPR_FLAGS_DIAGRESET above.
2254 		 */
2255 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2256 			/*
2257 			 * Freeze the dev queue so that commands are
2258 			 * executed in the correct order with after error
2259 			 * recovery.
2260 			 */
2261 			ccb->ccb_h.status |= CAM_DEV_QFRZN;
2262 			xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2263 		}
2264 		mpr_free_command(sc, cm);
2265 		xpt_done(ccb);
2266 		return;
2267 	}
2268 
2269 	mprsas_log_command(cm, MPR_XINFO,
2270 	    "ioc %x scsi %x state %x xfer %u\n",
2271 	    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2272 	    le32toh(rep->TransferCount));
2273 
2274 	switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) {
2275 	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2276 		csio->resid = cm->cm_length - le32toh(rep->TransferCount);
2277 		/* FALLTHROUGH */
2278 	case MPI2_IOCSTATUS_SUCCESS:
2279 	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2280 
2281 		if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
2282 		    MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
2283 			mprsas_log_command(cm, MPR_XINFO, "recovered error\n");
2284 
2285 		/* Completion failed at the transport level. */
2286 		if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
2287 		    MPI2_SCSI_STATE_TERMINATED)) {
2288 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2289 			break;
2290 		}
2291 
2292 		/* In a modern packetized environment, an autosense failure
2293 		 * implies that there's not much else that can be done to
2294 		 * recover the command.
2295 		 */
2296 		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
2297 			ccb->ccb_h.status = CAM_AUTOSENSE_FAIL;
2298 			break;
2299 		}
2300 
2301 		/*
2302 		 * CAM doesn't care about SAS Response Info data, but if this is
2303 		 * the state check if TLR should be done.  If not, clear the
2304 		 * TLR_bits for the target.
2305 		 */
2306 		if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
2307 		    ((le32toh(rep->ResponseInfo) & MPI2_SCSI_RI_MASK_REASONCODE)
2308 		    == MPR_SCSI_RI_INVALID_FRAME)) {
2309 			sc->mapping_table[csio->ccb_h.target_id].TLR_bits =
2310 			    (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2311 		}
2312 
2313 		/*
2314 		 * Intentionally override the normal SCSI status reporting
2315 		 * for these two cases.  These are likely to happen in a
2316 		 * multi-initiator environment, and we want to make sure that
2317 		 * CAM retries these commands rather than fail them.
2318 		 */
2319 		if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
2320 		    (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
2321 			ccb->ccb_h.status = CAM_REQ_ABORTED;
2322 			break;
2323 		}
2324 
2325 		/* Handle normal status and sense */
2326 		csio->scsi_status = rep->SCSIStatus;
2327 		if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
2328 			ccb->ccb_h.status = CAM_REQ_CMP;
2329 		else
2330 			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
2331 
2332 		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2333 			int sense_len, returned_sense_len;
2334 
2335 			returned_sense_len = min(le32toh(rep->SenseCount),
2336 			    sizeof(struct scsi_sense_data));
2337 			if (returned_sense_len < csio->sense_len)
2338 				csio->sense_resid = csio->sense_len -
2339 				    returned_sense_len;
2340 			else
2341 				csio->sense_resid = 0;
2342 
2343 			sense_len = min(returned_sense_len,
2344 			    csio->sense_len - csio->sense_resid);
2345 			bzero(&csio->sense_data, sizeof(csio->sense_data));
2346 			bcopy(cm->cm_sense, &csio->sense_data, sense_len);
2347 			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2348 		}
2349 
2350 		/*
2351 		 * Check if this is an INQUIRY command.  If it's a VPD inquiry,
2352 		 * and it's page code 0 (Supported Page List), and there is
2353 		 * inquiry data, and this is for a sequential access device, and
2354 		 * the device is an SSP target, and TLR is supported by the
2355 		 * controller, turn the TLR_bits value ON if page 0x90 is
2356 		 * supported.
2357 		 */
2358 		if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
2359 		    (csio->cdb_io.cdb_bytes[1] & SI_EVPD) &&
2360 		    (csio->cdb_io.cdb_bytes[2] == SVPD_SUPPORTED_PAGE_LIST) &&
2361 		    ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) &&
2362 		    (csio->data_ptr != NULL) &&
2363 		    ((csio->data_ptr[0] & 0x1f) == T_SEQUENTIAL) &&
2364 		    (sc->control_TLR) &&
2365 		    (sc->mapping_table[csio->ccb_h.target_id].device_info &
2366 		    MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
2367 			vpd_list = (struct scsi_vpd_supported_page_list *)
2368 			    csio->data_ptr;
2369 			TLR_bits = &sc->mapping_table[csio->ccb_h.target_id].
2370 			    TLR_bits;
2371 			*TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2372 			TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
2373 			alloc_len = ((u16)csio->cdb_io.cdb_bytes[3] << 8) +
2374 			    csio->cdb_io.cdb_bytes[4];
2375 			alloc_len -= csio->resid;
2376 			for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
2377 				if (vpd_list->list[i] == 0x90) {
2378 					*TLR_bits = TLR_on;
2379 					break;
2380 				}
2381 			}
2382 		}
2383 		break;
2384 	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2385 	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2386 		/*
2387 		 * If devinfo is 0 this will be a volume.  In that case don't
2388 		 * tell CAM that the volume is not there.  We want volumes to
2389 		 * be enumerated until they are deleted/removed, not just
2390 		 * failed.
2391 		 */
2392 		if (cm->cm_targ->devinfo == 0)
2393 			ccb->ccb_h.status = CAM_REQ_CMP;
2394 		else
2395 			ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2396 		break;
2397 	case MPI2_IOCSTATUS_INVALID_SGL:
2398 		mpr_print_scsiio_cmd(sc, cm);
2399 		ccb->ccb_h.status = CAM_UNREC_HBA_ERROR;
2400 		break;
2401 	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2402 		/*
2403 		 * This is one of the responses that comes back when an I/O
2404 		 * has been aborted.  If it is because of a timeout that we
2405 		 * initiated, just set the status to CAM_CMD_TIMEOUT.
2406 		 * Otherwise set it to CAM_REQ_ABORTED.  The effect on the
2407 		 * command is the same (it gets retried, subject to the
2408 		 * retry counter), the only difference is what gets printed
2409 		 * on the console.
2410 		 */
2411 		if (cm->cm_state == MPR_CM_STATE_TIMEDOUT)
2412 			ccb->ccb_h.status = CAM_CMD_TIMEOUT;
2413 		else
2414 			ccb->ccb_h.status = CAM_REQ_ABORTED;
2415 		break;
2416 	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2417 		/* resid is ignored for this condition */
2418 		csio->resid = 0;
2419 		ccb->ccb_h.status = CAM_DATA_RUN_ERR;
2420 		break;
2421 	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2422 	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2423 		/*
2424 		 * Since these are generally external (i.e. hopefully
2425 		 * transient transport-related) errors, retry these without
2426 		 * decrementing the retry count.
2427 		 */
2428 		ccb->ccb_h.status = CAM_REQUEUE_REQ;
2429 		mprsas_log_command(cm, MPR_INFO,
2430 		    "terminated ioc %x scsi %x state %x xfer %u\n",
2431 		    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2432 		    le32toh(rep->TransferCount));
2433 		break;
2434 	case MPI2_IOCSTATUS_INVALID_FUNCTION:
2435 	case MPI2_IOCSTATUS_INTERNAL_ERROR:
2436 	case MPI2_IOCSTATUS_INVALID_VPID:
2437 	case MPI2_IOCSTATUS_INVALID_FIELD:
2438 	case MPI2_IOCSTATUS_INVALID_STATE:
2439 	case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2440 	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2441 	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2442 	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2443 	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2444 	default:
2445 		mprsas_log_command(cm, MPR_XINFO,
2446 		    "completed ioc %x scsi %x state %x xfer %u\n",
2447 		    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2448 		    le32toh(rep->TransferCount));
2449 		csio->resid = cm->cm_length;
2450 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2451 		break;
2452 	}
2453 
2454 	mpr_sc_failed_io_info(sc, csio, rep, cm->cm_targ);
2455 
2456 	if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
2457 		ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2458 		sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
2459 		mpr_dprint(sc, MPR_XINFO, "Command completed, unfreezing SIM "
2460 		    "queue\n");
2461 	}
2462 
2463 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2464 		ccb->ccb_h.status |= CAM_DEV_QFRZN;
2465 		xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2466 	}
2467 
2468 	mpr_free_command(sc, cm);
2469 	xpt_done(ccb);
2470 }
2471 
2472 #if __FreeBSD_version >= 900026
2473 static void
2474 mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm)
2475 {
2476 	MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2477 	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2478 	uint64_t sasaddr;
2479 	union ccb *ccb;
2480 
2481 	ccb = cm->cm_complete_data;
2482 
2483 	/*
2484 	 * Currently there should be no way we can hit this case.  It only
2485 	 * happens when we have a failure to allocate chain frames, and SMP
2486 	 * commands require two S/G elements only.  That should be handled
2487 	 * in the standard request size.
2488 	 */
2489 	if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
2490 		mpr_dprint(sc, MPR_ERROR,"%s: cm_flags = %#x on SMP request!\n",
2491 		    __func__, cm->cm_flags);
2492 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2493 		goto bailout;
2494         }
2495 
2496 	rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
2497 	if (rpl == NULL) {
2498 		mpr_dprint(sc, MPR_ERROR, "%s: NULL cm_reply!\n", __func__);
2499 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2500 		goto bailout;
2501 	}
2502 
2503 	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2504 	sasaddr = le32toh(req->SASAddress.Low);
2505 	sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
2506 
2507 	if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
2508 	    MPI2_IOCSTATUS_SUCCESS ||
2509 	    rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
2510 		mpr_dprint(sc, MPR_XINFO, "%s: IOCStatus %04x SASStatus %02x\n",
2511 		    __func__, le16toh(rpl->IOCStatus), rpl->SASStatus);
2512 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2513 		goto bailout;
2514 	}
2515 
2516 	mpr_dprint(sc, MPR_XINFO, "%s: SMP request to SAS address "
2517 	    "%#jx completed successfully\n", __func__, (uintmax_t)sasaddr);
2518 
2519 	if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
2520 		ccb->ccb_h.status = CAM_REQ_CMP;
2521 	else
2522 		ccb->ccb_h.status = CAM_SMP_STATUS_ERROR;
2523 
2524 bailout:
2525 	/*
2526 	 * We sync in both directions because we had DMAs in the S/G list
2527 	 * in both directions.
2528 	 */
2529 	bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
2530 			BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2531 	bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2532 	mpr_free_command(sc, cm);
2533 	xpt_done(ccb);
2534 }
2535 
2536 static void
2537 mprsas_send_smpcmd(struct mprsas_softc *sassc, union ccb *ccb,
2538     uint64_t sasaddr)
2539 {
2540 	struct mpr_command *cm;
2541 	uint8_t *request, *response;
2542 	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2543 	struct mpr_softc *sc;
2544 	struct sglist *sg;
2545 	int error;
2546 
2547 	sc = sassc->sc;
2548 	sg = NULL;
2549 	error = 0;
2550 
2551 #if (__FreeBSD_version >= 1000028) || \
2552     ((__FreeBSD_version >= 902001) && (__FreeBSD_version < 1000000))
2553 	switch (ccb->ccb_h.flags & CAM_DATA_MASK) {
2554 	case CAM_DATA_PADDR:
2555 	case CAM_DATA_SG_PADDR:
2556 		/*
2557 		 * XXX We don't yet support physical addresses here.
2558 		 */
2559 		mpr_dprint(sc, MPR_ERROR, "%s: physical addresses not "
2560 		    "supported\n", __func__);
2561 		ccb->ccb_h.status = CAM_REQ_INVALID;
2562 		xpt_done(ccb);
2563 		return;
2564 	case CAM_DATA_SG:
2565 		/*
2566 		 * The chip does not support more than one buffer for the
2567 		 * request or response.
2568 		 */
2569 		if ((ccb->smpio.smp_request_sglist_cnt > 1)
2570 		    || (ccb->smpio.smp_response_sglist_cnt > 1)) {
2571 			mpr_dprint(sc, MPR_ERROR,
2572 			    "%s: multiple request or response buffer segments "
2573 			    "not supported for SMP\n", __func__);
2574 			ccb->ccb_h.status = CAM_REQ_INVALID;
2575 			xpt_done(ccb);
2576 			return;
2577 		}
2578 
2579 		/*
2580 		 * The CAM_SCATTER_VALID flag was originally implemented
2581 		 * for the XPT_SCSI_IO CCB, which only has one data pointer.
2582 		 * We have two.  So, just take that flag to mean that we
2583 		 * might have S/G lists, and look at the S/G segment count
2584 		 * to figure out whether that is the case for each individual
2585 		 * buffer.
2586 		 */
2587 		if (ccb->smpio.smp_request_sglist_cnt != 0) {
2588 			bus_dma_segment_t *req_sg;
2589 
2590 			req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
2591 			request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
2592 		} else
2593 			request = ccb->smpio.smp_request;
2594 
2595 		if (ccb->smpio.smp_response_sglist_cnt != 0) {
2596 			bus_dma_segment_t *rsp_sg;
2597 
2598 			rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
2599 			response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
2600 		} else
2601 			response = ccb->smpio.smp_response;
2602 		break;
2603 	case CAM_DATA_VADDR:
2604 		request = ccb->smpio.smp_request;
2605 		response = ccb->smpio.smp_response;
2606 		break;
2607 	default:
2608 		ccb->ccb_h.status = CAM_REQ_INVALID;
2609 		xpt_done(ccb);
2610 		return;
2611 	}
2612 #else /* __FreeBSD_version < 1000028 */
2613 	/*
2614 	 * XXX We don't yet support physical addresses here.
2615 	 */
2616 	if (ccb->ccb_h.flags & (CAM_DATA_PHYS|CAM_SG_LIST_PHYS)) {
2617 		mpr_printf(sc, "%s: physical addresses not supported\n",
2618 			   __func__);
2619 		ccb->ccb_h.status = CAM_REQ_INVALID;
2620 		xpt_done(ccb);
2621 		return;
2622 	}
2623 
2624 	/*
2625 	 * If the user wants to send an S/G list, check to make sure they
2626 	 * have single buffers.
2627 	 */
2628 	if (ccb->ccb_h.flags & CAM_SCATTER_VALID) {
2629 		/*
2630 		 * The chip does not support more than one buffer for the
2631 		 * request or response.
2632 		 */
2633 	 	if ((ccb->smpio.smp_request_sglist_cnt > 1)
2634 		  || (ccb->smpio.smp_response_sglist_cnt > 1)) {
2635 			mpr_dprint(sc, MPR_ERROR, "%s: multiple request or "
2636 			    "response buffer segments not supported for SMP\n",
2637 			    __func__);
2638 			ccb->ccb_h.status = CAM_REQ_INVALID;
2639 			xpt_done(ccb);
2640 			return;
2641 		}
2642 
2643 		/*
2644 		 * The CAM_SCATTER_VALID flag was originally implemented
2645 		 * for the XPT_SCSI_IO CCB, which only has one data pointer.
2646 		 * We have two.  So, just take that flag to mean that we
2647 		 * might have S/G lists, and look at the S/G segment count
2648 		 * to figure out whether that is the case for each individual
2649 		 * buffer.
2650 		 */
2651 		if (ccb->smpio.smp_request_sglist_cnt != 0) {
2652 			bus_dma_segment_t *req_sg;
2653 
2654 			req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
2655 			request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
2656 		} else
2657 			request = ccb->smpio.smp_request;
2658 
2659 		if (ccb->smpio.smp_response_sglist_cnt != 0) {
2660 			bus_dma_segment_t *rsp_sg;
2661 
2662 			rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
2663 			response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
2664 		} else
2665 			response = ccb->smpio.smp_response;
2666 	} else {
2667 		request = ccb->smpio.smp_request;
2668 		response = ccb->smpio.smp_response;
2669 	}
2670 #endif /* __FreeBSD_version < 1000028 */
2671 
2672 	cm = mpr_alloc_command(sc);
2673 	if (cm == NULL) {
2674 		mpr_dprint(sc, MPR_ERROR,
2675 		    "%s: cannot allocate command\n", __func__);
2676 		ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2677 		xpt_done(ccb);
2678 		return;
2679 	}
2680 
2681 	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2682 	bzero(req, sizeof(*req));
2683 	req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
2684 
2685 	/* Allow the chip to use any route to this SAS address. */
2686 	req->PhysicalPort = 0xff;
2687 
2688 	req->RequestDataLength = htole16(ccb->smpio.smp_request_len);
2689 	req->SGLFlags =
2690 	    MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
2691 
2692 	mpr_dprint(sc, MPR_XINFO, "%s: sending SMP request to SAS address "
2693 	    "%#jx\n", __func__, (uintmax_t)sasaddr);
2694 
2695 	mpr_init_sge(cm, req, &req->SGL);
2696 
2697 	/*
2698 	 * Set up a uio to pass into mpr_map_command().  This allows us to
2699 	 * do one map command, and one busdma call in there.
2700 	 */
2701 	cm->cm_uio.uio_iov = cm->cm_iovec;
2702 	cm->cm_uio.uio_iovcnt = 2;
2703 	cm->cm_uio.uio_segflg = UIO_SYSSPACE;
2704 
2705 	/*
2706 	 * The read/write flag isn't used by busdma, but set it just in
2707 	 * case.  This isn't exactly accurate, either, since we're going in
2708 	 * both directions.
2709 	 */
2710 	cm->cm_uio.uio_rw = UIO_WRITE;
2711 
2712 	cm->cm_iovec[0].iov_base = request;
2713 	cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength);
2714 	cm->cm_iovec[1].iov_base = response;
2715 	cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
2716 
2717 	cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
2718 			       cm->cm_iovec[1].iov_len;
2719 
2720 	/*
2721 	 * Trigger a warning message in mpr_data_cb() for the user if we
2722 	 * wind up exceeding two S/G segments.  The chip expects one
2723 	 * segment for the request and another for the response.
2724 	 */
2725 	cm->cm_max_segs = 2;
2726 
2727 	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
2728 	cm->cm_complete = mprsas_smpio_complete;
2729 	cm->cm_complete_data = ccb;
2730 
2731 	/*
2732 	 * Tell the mapping code that we're using a uio, and that this is
2733 	 * an SMP passthrough request.  There is a little special-case
2734 	 * logic there (in mpr_data_cb()) to handle the bidirectional
2735 	 * transfer.
2736 	 */
2737 	cm->cm_flags |= MPR_CM_FLAGS_USE_UIO | MPR_CM_FLAGS_SMP_PASS |
2738 			MPR_CM_FLAGS_DATAIN | MPR_CM_FLAGS_DATAOUT;
2739 
2740 	/* The chip data format is little endian. */
2741 	req->SASAddress.High = htole32(sasaddr >> 32);
2742 	req->SASAddress.Low = htole32(sasaddr);
2743 
2744 	/*
2745 	 * XXX Note that we don't have a timeout/abort mechanism here.
2746 	 * From the manual, it looks like task management requests only
2747 	 * work for SCSI IO and SATA passthrough requests.  We may need to
2748 	 * have a mechanism to retry requests in the event of a chip reset
2749 	 * at least.  Hopefully the chip will insure that any errors short
2750 	 * of that are relayed back to the driver.
2751 	 */
2752 	error = mpr_map_command(sc, cm);
2753 	if ((error != 0) && (error != EINPROGRESS)) {
2754 		mpr_dprint(sc, MPR_ERROR, "%s: error %d returned from "
2755 		    "mpr_map_command()\n", __func__, error);
2756 		goto bailout_error;
2757 	}
2758 
2759 	return;
2760 
2761 bailout_error:
2762 	mpr_free_command(sc, cm);
2763 	ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2764 	xpt_done(ccb);
2765 	return;
2766 }
2767 
2768 static void
2769 mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb)
2770 {
2771 	struct mpr_softc *sc;
2772 	struct mprsas_target *targ;
2773 	uint64_t sasaddr = 0;
2774 
2775 	sc = sassc->sc;
2776 
2777 	/*
2778 	 * Make sure the target exists.
2779 	 */
2780 	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
2781 	    ("Target %d out of bounds in XPT_SMP_IO\n", ccb->ccb_h.target_id));
2782 	targ = &sassc->targets[ccb->ccb_h.target_id];
2783 	if (targ->handle == 0x0) {
2784 		mpr_dprint(sc, MPR_ERROR, "%s: target %d does not exist!\n",
2785 		    __func__, ccb->ccb_h.target_id);
2786 		ccb->ccb_h.status = CAM_SEL_TIMEOUT;
2787 		xpt_done(ccb);
2788 		return;
2789 	}
2790 
2791 	/*
2792 	 * If this device has an embedded SMP target, we'll talk to it
2793 	 * directly.
2794 	 * figure out what the expander's address is.
2795 	 */
2796 	if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
2797 		sasaddr = targ->sasaddr;
2798 
2799 	/*
2800 	 * If we don't have a SAS address for the expander yet, try
2801 	 * grabbing it from the page 0x83 information cached in the
2802 	 * transport layer for this target.  LSI expanders report the
2803 	 * expander SAS address as the port-associated SAS address in
2804 	 * Inquiry VPD page 0x83.  Maxim expanders don't report it in page
2805 	 * 0x83.
2806 	 *
2807 	 * XXX KDM disable this for now, but leave it commented out so that
2808 	 * it is obvious that this is another possible way to get the SAS
2809 	 * address.
2810 	 *
2811 	 * The parent handle method below is a little more reliable, and
2812 	 * the other benefit is that it works for devices other than SES
2813 	 * devices.  So you can send a SMP request to a da(4) device and it
2814 	 * will get routed to the expander that device is attached to.
2815 	 * (Assuming the da(4) device doesn't contain an SMP target...)
2816 	 */
2817 #if 0
2818 	if (sasaddr == 0)
2819 		sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
2820 #endif
2821 
2822 	/*
2823 	 * If we still don't have a SAS address for the expander, look for
2824 	 * the parent device of this device, which is probably the expander.
2825 	 */
2826 	if (sasaddr == 0) {
2827 #ifdef OLD_MPR_PROBE
2828 		struct mprsas_target *parent_target;
2829 #endif
2830 
2831 		if (targ->parent_handle == 0x0) {
2832 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have "
2833 			    "a valid parent handle!\n", __func__, targ->handle);
2834 			ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2835 			goto bailout;
2836 		}
2837 #ifdef OLD_MPR_PROBE
2838 		parent_target = mprsas_find_target_by_handle(sassc, 0,
2839 		    targ->parent_handle);
2840 
2841 		if (parent_target == NULL) {
2842 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have "
2843 			    "a valid parent target!\n", __func__, targ->handle);
2844 			ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2845 			goto bailout;
2846 		}
2847 
2848 		if ((parent_target->devinfo &
2849 		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
2850 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d "
2851 			    "does not have an SMP target!\n", __func__,
2852 			    targ->handle, parent_target->handle);
2853 			ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2854 			goto bailout;
2855 
2856 		}
2857 
2858 		sasaddr = parent_target->sasaddr;
2859 #else /* OLD_MPR_PROBE */
2860 		if ((targ->parent_devinfo &
2861 		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
2862 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d "
2863 			    "does not have an SMP target!\n", __func__,
2864 			    targ->handle, targ->parent_handle);
2865 			ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2866 			goto bailout;
2867 
2868 		}
2869 		if (targ->parent_sasaddr == 0x0) {
2870 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent handle "
2871 			    "%d does not have a valid SAS address!\n", __func__,
2872 			    targ->handle, targ->parent_handle);
2873 			ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2874 			goto bailout;
2875 		}
2876 
2877 		sasaddr = targ->parent_sasaddr;
2878 #endif /* OLD_MPR_PROBE */
2879 
2880 	}
2881 
2882 	if (sasaddr == 0) {
2883 		mpr_dprint(sc, MPR_INFO, "%s: unable to find SAS address for "
2884 		    "handle %d\n", __func__, targ->handle);
2885 		ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2886 		goto bailout;
2887 	}
2888 	mprsas_send_smpcmd(sassc, ccb, sasaddr);
2889 
2890 	return;
2891 
2892 bailout:
2893 	xpt_done(ccb);
2894 
2895 }
2896 #endif //__FreeBSD_version >= 900026
2897 
2898 static void
2899 mprsas_action_resetdev(struct mprsas_softc *sassc, union ccb *ccb)
2900 {
2901 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
2902 	struct mpr_softc *sc;
2903 	struct mpr_command *tm;
2904 	struct mprsas_target *targ;
2905 
2906 	MPR_FUNCTRACE(sassc->sc);
2907 	mtx_assert(&sassc->sc->mpr_mtx, MA_OWNED);
2908 
2909 	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
2910 	    ("Target %d out of bounds in XPT_RESET_DEV\n",
2911 	    ccb->ccb_h.target_id));
2912 	sc = sassc->sc;
2913 	tm = mpr_alloc_command(sc);
2914 	if (tm == NULL) {
2915 		mpr_dprint(sc, MPR_ERROR,
2916 		    "command alloc failure in mprsas_action_resetdev\n");
2917 		ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2918 		xpt_done(ccb);
2919 		return;
2920 	}
2921 
2922 	targ = &sassc->targets[ccb->ccb_h.target_id];
2923 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
2924 	req->DevHandle = htole16(targ->handle);
2925 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
2926 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
2927 
2928 	/* SAS Hard Link Reset / SATA Link Reset */
2929 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
2930 
2931 	tm->cm_data = NULL;
2932 	tm->cm_desc.HighPriority.RequestFlags =
2933 	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
2934 	tm->cm_complete = mprsas_resetdev_complete;
2935 	tm->cm_complete_data = ccb;
2936 	tm->cm_targ = targ;
2937 	mpr_map_command(sc, tm);
2938 }
2939 
2940 static void
2941 mprsas_resetdev_complete(struct mpr_softc *sc, struct mpr_command *tm)
2942 {
2943 	MPI2_SCSI_TASK_MANAGE_REPLY *resp;
2944 	union ccb *ccb;
2945 
2946 	MPR_FUNCTRACE(sc);
2947 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
2948 
2949 	resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
2950 	ccb = tm->cm_complete_data;
2951 
2952 	/*
2953 	 * Currently there should be no way we can hit this case.  It only
2954 	 * happens when we have a failure to allocate chain frames, and
2955 	 * task management commands don't have S/G lists.
2956 	 */
2957 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
2958 		MPI2_SCSI_TASK_MANAGE_REQUEST *req;
2959 
2960 		req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
2961 
2962 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for reset of "
2963 		    "handle %#04x! This should not happen!\n", __func__,
2964 		    tm->cm_flags, req->DevHandle);
2965 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2966 		goto bailout;
2967 	}
2968 
2969 	mpr_dprint(sc, MPR_XINFO,
2970 	    "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n", __func__,
2971 	    le16toh(resp->IOCStatus), le32toh(resp->ResponseCode));
2972 
2973 	if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
2974 		ccb->ccb_h.status = CAM_REQ_CMP;
2975 		mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
2976 		    CAM_LUN_WILDCARD);
2977 	}
2978 	else
2979 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2980 
2981 bailout:
2982 
2983 	mprsas_free_tm(sc, tm);
2984 	xpt_done(ccb);
2985 }
2986 
2987 static void
2988 mprsas_poll(struct cam_sim *sim)
2989 {
2990 	struct mprsas_softc *sassc;
2991 
2992 	sassc = cam_sim_softc(sim);
2993 
2994 	if (sassc->sc->mpr_debug & MPR_TRACE) {
2995 		/* frequent debug messages during a panic just slow
2996 		 * everything down too much.
2997 		 */
2998 		mpr_printf(sassc->sc, "%s clearing MPR_TRACE\n", __func__);
2999 		sassc->sc->mpr_debug &= ~MPR_TRACE;
3000 	}
3001 
3002 	mpr_intr_locked(sassc->sc);
3003 }
3004 
3005 static void
3006 mprsas_async(void *callback_arg, uint32_t code, struct cam_path *path,
3007     void *arg)
3008 {
3009 	struct mpr_softc *sc;
3010 
3011 	sc = (struct mpr_softc *)callback_arg;
3012 
3013 	switch (code) {
3014 #if (__FreeBSD_version >= 1000006) || \
3015     ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
3016 	case AC_ADVINFO_CHANGED: {
3017 		struct mprsas_target *target;
3018 		struct mprsas_softc *sassc;
3019 		struct scsi_read_capacity_data_long rcap_buf;
3020 		struct ccb_dev_advinfo cdai;
3021 		struct mprsas_lun *lun;
3022 		lun_id_t lunid;
3023 		int found_lun;
3024 		uintptr_t buftype;
3025 
3026 		buftype = (uintptr_t)arg;
3027 
3028 		found_lun = 0;
3029 		sassc = sc->sassc;
3030 
3031 		/*
3032 		 * We're only interested in read capacity data changes.
3033 		 */
3034 		if (buftype != CDAI_TYPE_RCAPLONG)
3035 			break;
3036 
3037 		/*
3038 		 * See the comment in mpr_attach_sas() for a detailed
3039 		 * explanation.  In these versions of FreeBSD we register
3040 		 * for all events and filter out the events that don't
3041 		 * apply to us.
3042 		 */
3043 #if (__FreeBSD_version < 1000703) || \
3044     ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
3045 		if (xpt_path_path_id(path) != sassc->sim->path_id)
3046 			break;
3047 #endif
3048 
3049 		/*
3050 		 * We should have a handle for this, but check to make sure.
3051 		 */
3052 		KASSERT(xpt_path_target_id(path) < sassc->maxtargets,
3053 		    ("Target %d out of bounds in mprsas_async\n",
3054 		    xpt_path_target_id(path)));
3055 		target = &sassc->targets[xpt_path_target_id(path)];
3056 		if (target->handle == 0)
3057 			break;
3058 
3059 		lunid = xpt_path_lun_id(path);
3060 
3061 		SLIST_FOREACH(lun, &target->luns, lun_link) {
3062 			if (lun->lun_id == lunid) {
3063 				found_lun = 1;
3064 				break;
3065 			}
3066 		}
3067 
3068 		if (found_lun == 0) {
3069 			lun = malloc(sizeof(struct mprsas_lun), M_MPR,
3070 			    M_NOWAIT | M_ZERO);
3071 			if (lun == NULL) {
3072 				mpr_dprint(sc, MPR_ERROR, "Unable to alloc "
3073 				    "LUN for EEDP support.\n");
3074 				break;
3075 			}
3076 			lun->lun_id = lunid;
3077 			SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3078 		}
3079 
3080 		bzero(&rcap_buf, sizeof(rcap_buf));
3081 		xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
3082 		cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
3083 		cdai.ccb_h.flags = CAM_DIR_IN;
3084 		cdai.buftype = CDAI_TYPE_RCAPLONG;
3085 		cdai.flags = 0;
3086 		cdai.bufsiz = sizeof(rcap_buf);
3087 		cdai.buf = (uint8_t *)&rcap_buf;
3088 		xpt_action((union ccb *)&cdai);
3089 		if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
3090 			cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE);
3091 
3092 		if (((cdai.ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP)
3093 		    && (rcap_buf.prot & SRC16_PROT_EN)) {
3094 			lun->eedp_formatted = TRUE;
3095 			lun->eedp_block_size = scsi_4btoul(rcap_buf.length);
3096 		} else {
3097 			lun->eedp_formatted = FALSE;
3098 			lun->eedp_block_size = 0;
3099 		}
3100 		break;
3101 	}
3102 #endif
3103 	case AC_FOUND_DEVICE: {
3104 		struct ccb_getdev *cgd;
3105 
3106 		/*
3107 		 * See the comment in mpr_attach_sas() for a detailed
3108 		 * explanation.  In these versions of FreeBSD we register
3109 		 * for all events and filter out the events that don't
3110 		 * apply to us.
3111 		 */
3112 #if (__FreeBSD_version < 1000703) || \
3113     ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
3114 		if (xpt_path_path_id(path) != sc->sassc->sim->path_id)
3115 			break;
3116 #endif
3117 
3118 		cgd = arg;
3119 		mprsas_prepare_ssu(sc, path, cgd);
3120 
3121 #if (__FreeBSD_version < 901503) || \
3122     ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3123 		mprsas_check_eedp(sc, path, cgd);
3124 #endif
3125 		break;
3126 	}
3127 	default:
3128 		break;
3129 	}
3130 }
3131 
3132 static void
3133 mprsas_prepare_ssu(struct mpr_softc *sc, struct cam_path *path,
3134     struct ccb_getdev *cgd)
3135 {
3136 	struct mprsas_softc *sassc = sc->sassc;
3137 	path_id_t pathid;
3138 	target_id_t targetid;
3139 	lun_id_t lunid;
3140 	struct mprsas_target *target;
3141 	struct mprsas_lun *lun;
3142 	uint8_t	found_lun;
3143 
3144 	sassc = sc->sassc;
3145 	pathid = cam_sim_path(sassc->sim);
3146 	targetid = xpt_path_target_id(path);
3147 	lunid = xpt_path_lun_id(path);
3148 
3149 	KASSERT(targetid < sassc->maxtargets,
3150 	    ("Target %d out of bounds in mprsas_prepare_ssu\n", targetid));
3151 	target = &sassc->targets[targetid];
3152 	if (target->handle == 0x0)
3153 		return;
3154 
3155 	/*
3156 	 * If LUN is already in list, don't create a new one.
3157 	 */
3158 	found_lun = FALSE;
3159 	SLIST_FOREACH(lun, &target->luns, lun_link) {
3160 		if (lun->lun_id == lunid) {
3161 			found_lun = TRUE;
3162 			break;
3163 		}
3164 	}
3165 	if (!found_lun) {
3166 		lun = malloc(sizeof(struct mprsas_lun), M_MPR,
3167 		    M_NOWAIT | M_ZERO);
3168 		if (lun == NULL) {
3169 			mpr_dprint(sc, MPR_ERROR, "Unable to alloc LUN for "
3170 			    "preparing SSU.\n");
3171 			return;
3172 		}
3173 		lun->lun_id = lunid;
3174 		SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3175 	}
3176 
3177 	/*
3178 	 * If this is a SATA direct-access end device, mark it so that a SCSI
3179 	 * StartStopUnit command will be sent to it when the driver is being
3180 	 * shutdown.
3181 	 */
3182 	if (((cgd->inq_data.device & 0x1F) == T_DIRECT) &&
3183 	    (target->devinfo & MPI2_SAS_DEVICE_INFO_SATA_DEVICE) &&
3184 	    ((target->devinfo & MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) ==
3185 	    MPI2_SAS_DEVICE_INFO_END_DEVICE)) {
3186 		lun->stop_at_shutdown = TRUE;
3187 	}
3188 }
3189 
3190 #if (__FreeBSD_version < 901503) || \
3191     ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3192 static void
3193 mprsas_check_eedp(struct mpr_softc *sc, struct cam_path *path,
3194     struct ccb_getdev *cgd)
3195 {
3196 	struct mprsas_softc *sassc = sc->sassc;
3197 	struct ccb_scsiio *csio;
3198 	struct scsi_read_capacity_16 *scsi_cmd;
3199 	struct scsi_read_capacity_eedp *rcap_buf;
3200 	path_id_t pathid;
3201 	target_id_t targetid;
3202 	lun_id_t lunid;
3203 	union ccb *ccb;
3204 	struct cam_path *local_path;
3205 	struct mprsas_target *target;
3206 	struct mprsas_lun *lun;
3207 	uint8_t	found_lun;
3208 	char path_str[64];
3209 
3210 	sassc = sc->sassc;
3211 	pathid = cam_sim_path(sassc->sim);
3212 	targetid = xpt_path_target_id(path);
3213 	lunid = xpt_path_lun_id(path);
3214 
3215 	KASSERT(targetid < sassc->maxtargets,
3216 	    ("Target %d out of bounds in mprsas_check_eedp\n", targetid));
3217 	target = &sassc->targets[targetid];
3218 	if (target->handle == 0x0)
3219 		return;
3220 
3221 	/*
3222 	 * Determine if the device is EEDP capable.
3223 	 *
3224 	 * If this flag is set in the inquiry data, the device supports
3225 	 * protection information, and must support the 16 byte read capacity
3226 	 * command, otherwise continue without sending read cap 16
3227 	 */
3228 	if ((cgd->inq_data.spc3_flags & SPC3_SID_PROTECT) == 0)
3229 		return;
3230 
3231 	/*
3232 	 * Issue a READ CAPACITY 16 command.  This info is used to determine if
3233 	 * the LUN is formatted for EEDP support.
3234 	 */
3235 	ccb = xpt_alloc_ccb_nowait();
3236 	if (ccb == NULL) {
3237 		mpr_dprint(sc, MPR_ERROR, "Unable to alloc CCB for EEDP "
3238 		    "support.\n");
3239 		return;
3240 	}
3241 
3242 	if (xpt_create_path(&local_path, xpt_periph, pathid, targetid, lunid)
3243 	    != CAM_REQ_CMP) {
3244 		mpr_dprint(sc, MPR_ERROR, "Unable to create path for EEDP "
3245 		    "support\n");
3246 		xpt_free_ccb(ccb);
3247 		return;
3248 	}
3249 
3250 	/*
3251 	 * If LUN is already in list, don't create a new one.
3252 	 */
3253 	found_lun = FALSE;
3254 	SLIST_FOREACH(lun, &target->luns, lun_link) {
3255 		if (lun->lun_id == lunid) {
3256 			found_lun = TRUE;
3257 			break;
3258 		}
3259 	}
3260 	if (!found_lun) {
3261 		lun = malloc(sizeof(struct mprsas_lun), M_MPR,
3262 		    M_NOWAIT | M_ZERO);
3263 		if (lun == NULL) {
3264 			mpr_dprint(sc, MPR_ERROR, "Unable to alloc LUN for "
3265 			    "EEDP support.\n");
3266 			xpt_free_path(local_path);
3267 			xpt_free_ccb(ccb);
3268 			return;
3269 		}
3270 		lun->lun_id = lunid;
3271 		SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3272 	}
3273 
3274 	xpt_path_string(local_path, path_str, sizeof(path_str));
3275 	mpr_dprint(sc, MPR_INFO, "Sending read cap: path %s handle %d\n",
3276 	    path_str, target->handle);
3277 
3278 	/*
3279 	 * Issue a READ CAPACITY 16 command for the LUN.  The
3280 	 * mprsas_read_cap_done function will load the read cap info into the
3281 	 * LUN struct.
3282 	 */
3283 	rcap_buf = malloc(sizeof(struct scsi_read_capacity_eedp), M_MPR,
3284 	    M_NOWAIT | M_ZERO);
3285 	if (rcap_buf == NULL) {
3286 		mpr_dprint(sc, MPR_FAULT, "Unable to alloc read capacity "
3287 		    "buffer for EEDP support.\n");
3288 		xpt_free_path(ccb->ccb_h.path);
3289 		xpt_free_ccb(ccb);
3290 		return;
3291 	}
3292 	xpt_setup_ccb(&ccb->ccb_h, local_path, CAM_PRIORITY_XPT);
3293 	csio = &ccb->csio;
3294 	csio->ccb_h.func_code = XPT_SCSI_IO;
3295 	csio->ccb_h.flags = CAM_DIR_IN;
3296 	csio->ccb_h.retry_count = 4;
3297 	csio->ccb_h.cbfcnp = mprsas_read_cap_done;
3298 	csio->ccb_h.timeout = 60000;
3299 	csio->data_ptr = (uint8_t *)rcap_buf;
3300 	csio->dxfer_len = sizeof(struct scsi_read_capacity_eedp);
3301 	csio->sense_len = MPR_SENSE_LEN;
3302 	csio->cdb_len = sizeof(*scsi_cmd);
3303 	csio->tag_action = MSG_SIMPLE_Q_TAG;
3304 
3305 	scsi_cmd = (struct scsi_read_capacity_16 *)&csio->cdb_io.cdb_bytes;
3306 	bzero(scsi_cmd, sizeof(*scsi_cmd));
3307 	scsi_cmd->opcode = 0x9E;
3308 	scsi_cmd->service_action = SRC16_SERVICE_ACTION;
3309 	((uint8_t *)scsi_cmd)[13] = sizeof(struct scsi_read_capacity_eedp);
3310 
3311 	ccb->ccb_h.ppriv_ptr1 = sassc;
3312 	xpt_action(ccb);
3313 }
3314 
3315 static void
3316 mprsas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb)
3317 {
3318 	struct mprsas_softc *sassc;
3319 	struct mprsas_target *target;
3320 	struct mprsas_lun *lun;
3321 	struct scsi_read_capacity_eedp *rcap_buf;
3322 
3323 	if (done_ccb == NULL)
3324 		return;
3325 
3326 	/* Driver need to release devq, it Scsi command is
3327 	 * generated by driver internally.
3328 	 * Currently there is a single place where driver
3329 	 * calls scsi command internally. In future if driver
3330 	 * calls more scsi command internally, it needs to release
3331 	 * devq internally, since those command will not go back to
3332 	 * cam_periph.
3333 	 */
3334 	if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) ) {
3335         	done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
3336 		xpt_release_devq(done_ccb->ccb_h.path,
3337 			       	/*count*/ 1, /*run_queue*/TRUE);
3338 	}
3339 
3340 	rcap_buf = (struct scsi_read_capacity_eedp *)done_ccb->csio.data_ptr;
3341 
3342 	/*
3343 	 * Get the LUN ID for the path and look it up in the LUN list for the
3344 	 * target.
3345 	 */
3346 	sassc = (struct mprsas_softc *)done_ccb->ccb_h.ppriv_ptr1;
3347 	KASSERT(done_ccb->ccb_h.target_id < sassc->maxtargets,
3348 	    ("Target %d out of bounds in mprsas_read_cap_done\n",
3349 	    done_ccb->ccb_h.target_id));
3350 	target = &sassc->targets[done_ccb->ccb_h.target_id];
3351 	SLIST_FOREACH(lun, &target->luns, lun_link) {
3352 		if (lun->lun_id != done_ccb->ccb_h.target_lun)
3353 			continue;
3354 
3355 		/*
3356 		 * Got the LUN in the target's LUN list.  Fill it in with EEDP
3357 		 * info.  If the READ CAP 16 command had some SCSI error (common
3358 		 * if command is not supported), mark the lun as not supporting
3359 		 * EEDP and set the block size to 0.
3360 		 */
3361 		if (((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)
3362 		    || (done_ccb->csio.scsi_status != SCSI_STATUS_OK)) {
3363 			lun->eedp_formatted = FALSE;
3364 			lun->eedp_block_size = 0;
3365 			break;
3366 		}
3367 
3368 		if (rcap_buf->protect & 0x01) {
3369 			mpr_dprint(sassc->sc, MPR_INFO, "LUN %d for "
3370 			    "target ID %d is formatted for EEDP "
3371 			    "support.\n", done_ccb->ccb_h.target_lun,
3372 			    done_ccb->ccb_h.target_id);
3373 			lun->eedp_formatted = TRUE;
3374 			lun->eedp_block_size = scsi_4btoul(rcap_buf->length);
3375 		}
3376 		break;
3377 	}
3378 
3379 	// Finished with this CCB and path.
3380 	free(rcap_buf, M_MPR);
3381 	xpt_free_path(done_ccb->ccb_h.path);
3382 	xpt_free_ccb(done_ccb);
3383 }
3384 #endif /* (__FreeBSD_version < 901503) || \
3385           ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) */
3386 
3387 int
3388 mprsas_startup(struct mpr_softc *sc)
3389 {
3390 	/*
3391 	 * Send the port enable message and set the wait_for_port_enable flag.
3392 	 * This flag helps to keep the simq frozen until all discovery events
3393 	 * are processed.
3394 	 */
3395 	sc->wait_for_port_enable = 1;
3396 	mprsas_send_portenable(sc);
3397 	return (0);
3398 }
3399 
3400 static int
3401 mprsas_send_portenable(struct mpr_softc *sc)
3402 {
3403 	MPI2_PORT_ENABLE_REQUEST *request;
3404 	struct mpr_command *cm;
3405 
3406 	MPR_FUNCTRACE(sc);
3407 
3408 	if ((cm = mpr_alloc_command(sc)) == NULL)
3409 		return (EBUSY);
3410 	request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3411 	request->Function = MPI2_FUNCTION_PORT_ENABLE;
3412 	request->MsgFlags = 0;
3413 	request->VP_ID = 0;
3414 	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3415 	cm->cm_complete = mprsas_portenable_complete;
3416 	cm->cm_data = NULL;
3417 	cm->cm_sge = NULL;
3418 
3419 	mpr_map_command(sc, cm);
3420 	mpr_dprint(sc, MPR_XINFO,
3421 	    "mpr_send_portenable finished cm %p req %p complete %p\n",
3422 	    cm, cm->cm_req, cm->cm_complete);
3423 	return (0);
3424 }
3425 
3426 static void
3427 mprsas_portenable_complete(struct mpr_softc *sc, struct mpr_command *cm)
3428 {
3429 	MPI2_PORT_ENABLE_REPLY *reply;
3430 	struct mprsas_softc *sassc;
3431 
3432 	MPR_FUNCTRACE(sc);
3433 	sassc = sc->sassc;
3434 
3435 	/*
3436 	 * Currently there should be no way we can hit this case.  It only
3437 	 * happens when we have a failure to allocate chain frames, and
3438 	 * port enable commands don't have S/G lists.
3439 	 */
3440 	if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
3441 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for port enable! "
3442 		    "This should not happen!\n", __func__, cm->cm_flags);
3443 	}
3444 
3445 	reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3446 	if (reply == NULL)
3447 		mpr_dprint(sc, MPR_FAULT, "Portenable NULL reply\n");
3448 	else if (le16toh(reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
3449 	    MPI2_IOCSTATUS_SUCCESS)
3450 		mpr_dprint(sc, MPR_FAULT, "Portenable failed\n");
3451 
3452 	mpr_free_command(sc, cm);
3453 	if (sc->mpr_ich.ich_arg != NULL) {
3454 		mpr_dprint(sc, MPR_XINFO, "disestablish config intrhook\n");
3455 		config_intrhook_disestablish(&sc->mpr_ich);
3456 		sc->mpr_ich.ich_arg = NULL;
3457 	}
3458 
3459 	/*
3460 	 * Done waiting for port enable to complete.  Decrement the refcount.
3461 	 * If refcount is 0, discovery is complete and a rescan of the bus can
3462 	 * take place.
3463 	 */
3464 	sc->wait_for_port_enable = 0;
3465 	sc->port_enable_complete = 1;
3466 	wakeup(&sc->port_enable_complete);
3467 	mprsas_startup_decrement(sassc);
3468 }
3469 
3470 int
3471 mprsas_check_id(struct mprsas_softc *sassc, int id)
3472 {
3473 	struct mpr_softc *sc = sassc->sc;
3474 	char *ids;
3475 	char *name;
3476 
3477 	ids = &sc->exclude_ids[0];
3478 	while((name = strsep(&ids, ",")) != NULL) {
3479 		if (name[0] == '\0')
3480 			continue;
3481 		if (strtol(name, NULL, 0) == (long)id)
3482 			return (1);
3483 	}
3484 
3485 	return (0);
3486 }
3487