xref: /freebsd/sys/dev/mpr/mpr_sas.c (revision f4b37ed0f8b307b1f3f0f630ca725d68f1dff30d)
1 /*-
2  * Copyright (c) 2009 Yahoo! Inc.
3  * Copyright (c) 2011-2015 LSI Corp.
4  * Copyright (c) 2013-2015 Avago Technologies
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  * Avago Technologies (LSI) MPT-Fusion Host Adapter FreeBSD
29  *
30  */
31 
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 /* Communications core for Avago Technologies (LSI) MPT3 */
36 
37 /* TODO Move headers to mprvar */
38 #include <sys/types.h>
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/kernel.h>
42 #include <sys/selinfo.h>
43 #include <sys/module.h>
44 #include <sys/bus.h>
45 #include <sys/conf.h>
46 #include <sys/bio.h>
47 #include <sys/malloc.h>
48 #include <sys/uio.h>
49 #include <sys/sysctl.h>
50 #include <sys/endian.h>
51 #include <sys/queue.h>
52 #include <sys/kthread.h>
53 #include <sys/taskqueue.h>
54 #include <sys/sbuf.h>
55 
56 #include <machine/bus.h>
57 #include <machine/resource.h>
58 #include <sys/rman.h>
59 
60 #include <machine/stdarg.h>
61 
62 #include <cam/cam.h>
63 #include <cam/cam_ccb.h>
64 #include <cam/cam_debug.h>
65 #include <cam/cam_sim.h>
66 #include <cam/cam_xpt_sim.h>
67 #include <cam/cam_xpt_periph.h>
68 #include <cam/cam_periph.h>
69 #include <cam/scsi/scsi_all.h>
70 #include <cam/scsi/scsi_message.h>
71 #if __FreeBSD_version >= 900026
72 #include <cam/scsi/smp_all.h>
73 #endif
74 
75 #include <dev/mpr/mpi/mpi2_type.h>
76 #include <dev/mpr/mpi/mpi2.h>
77 #include <dev/mpr/mpi/mpi2_ioc.h>
78 #include <dev/mpr/mpi/mpi2_sas.h>
79 #include <dev/mpr/mpi/mpi2_cnfg.h>
80 #include <dev/mpr/mpi/mpi2_init.h>
81 #include <dev/mpr/mpi/mpi2_tool.h>
82 #include <dev/mpr/mpr_ioctl.h>
83 #include <dev/mpr/mprvar.h>
84 #include <dev/mpr/mpr_table.h>
85 #include <dev/mpr/mpr_sas.h>
86 
87 #define MPRSAS_DISCOVERY_TIMEOUT	20
88 #define MPRSAS_MAX_DISCOVERY_TIMEOUTS	10 /* 200 seconds */
89 
90 /*
91  * static array to check SCSI OpCode for EEDP protection bits
92  */
93 #define	PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
94 #define	PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
95 #define	PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
96 static uint8_t op_code_prot[256] = {
97 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
98 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
99 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
100 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
101 	0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
102 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
103 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
104 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
105 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
106 	0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
107 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
108 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
109 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
110 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
111 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
112 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
113 };
114 
115 MALLOC_DEFINE(M_MPRSAS, "MPRSAS", "MPR SAS memory");
116 
117 static void mprsas_remove_device(struct mpr_softc *, struct mpr_command *);
118 static void mprsas_remove_complete(struct mpr_softc *, struct mpr_command *);
119 static void mprsas_action(struct cam_sim *sim, union ccb *ccb);
120 static void mprsas_poll(struct cam_sim *sim);
121 static void mprsas_scsiio_timeout(void *data);
122 static void mprsas_abort_complete(struct mpr_softc *sc,
123     struct mpr_command *cm);
124 static void mprsas_action_scsiio(struct mprsas_softc *, union ccb *);
125 static void mprsas_scsiio_complete(struct mpr_softc *, struct mpr_command *);
126 static void mprsas_action_resetdev(struct mprsas_softc *, union ccb *);
127 static void mprsas_resetdev_complete(struct mpr_softc *,
128     struct mpr_command *);
129 static int mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm,
130     struct mpr_command *cm);
131 static void mprsas_async(void *callback_arg, uint32_t code,
132     struct cam_path *path, void *arg);
133 #if (__FreeBSD_version < 901503) || \
134     ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
135 static void mprsas_check_eedp(struct mpr_softc *sc, struct cam_path *path,
136     struct ccb_getdev *cgd);
137 static void mprsas_read_cap_done(struct cam_periph *periph,
138     union ccb *done_ccb);
139 #endif
140 static int mprsas_send_portenable(struct mpr_softc *sc);
141 static void mprsas_portenable_complete(struct mpr_softc *sc,
142     struct mpr_command *cm);
143 
144 #if __FreeBSD_version >= 900026
145 static void mprsas_smpio_complete(struct mpr_softc *sc,
146     struct mpr_command *cm);
147 static void mprsas_send_smpcmd(struct mprsas_softc *sassc,
148     union ccb *ccb, uint64_t sasaddr);
149 static void mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb);
150 #endif //FreeBSD_version >= 900026
151 
152 struct mprsas_target *
153 mprsas_find_target_by_handle(struct mprsas_softc *sassc, int start,
154     uint16_t handle)
155 {
156 	struct mprsas_target *target;
157 	int i;
158 
159 	for (i = start; i < sassc->maxtargets; i++) {
160 		target = &sassc->targets[i];
161 		if (target->handle == handle)
162 			return (target);
163 	}
164 
165 	return (NULL);
166 }
167 
168 /* we need to freeze the simq during attach and diag reset, to avoid failing
169  * commands before device handles have been found by discovery.  Since
170  * discovery involves reading config pages and possibly sending commands,
171  * discovery actions may continue even after we receive the end of discovery
172  * event, so refcount discovery actions instead of assuming we can unfreeze
173  * the simq when we get the event.
174  */
175 void
176 mprsas_startup_increment(struct mprsas_softc *sassc)
177 {
178 	MPR_FUNCTRACE(sassc->sc);
179 
180 	if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) {
181 		if (sassc->startup_refcount++ == 0) {
182 			/* just starting, freeze the simq */
183 			mpr_dprint(sassc->sc, MPR_INIT,
184 			    "%s freezing simq\n", __func__);
185 #if (__FreeBSD_version >= 1000039) || \
186     ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
187 			xpt_hold_boot();
188 #endif
189 			xpt_freeze_simq(sassc->sim, 1);
190 		}
191 		mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__,
192 		    sassc->startup_refcount);
193 	}
194 }
195 
196 void
197 mprsas_release_simq_reinit(struct mprsas_softc *sassc)
198 {
199 	if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
200 		sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
201 		xpt_release_simq(sassc->sim, 1);
202 		mpr_dprint(sassc->sc, MPR_INFO, "Unfreezing SIM queue\n");
203 	}
204 }
205 
206 void
207 mprsas_startup_decrement(struct mprsas_softc *sassc)
208 {
209 	MPR_FUNCTRACE(sassc->sc);
210 
211 	if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) {
212 		if (--sassc->startup_refcount == 0) {
213 			/* finished all discovery-related actions, release
214 			 * the simq and rescan for the latest topology.
215 			 */
216 			mpr_dprint(sassc->sc, MPR_INIT,
217 			    "%s releasing simq\n", __func__);
218 			sassc->flags &= ~MPRSAS_IN_STARTUP;
219 			xpt_release_simq(sassc->sim, 1);
220 #if (__FreeBSD_version >= 1000039) || \
221     ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
222 			xpt_release_boot();
223 #else
224 			mprsas_rescan_target(sassc->sc, NULL);
225 #endif
226 		}
227 		mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__,
228 		    sassc->startup_refcount);
229 	}
230 }
231 
232 /* The firmware requires us to stop sending commands when we're doing task
233  * management, so refcount the TMs and keep the simq frozen when any are in
234  * use.
235  */
236 struct mpr_command *
237 mprsas_alloc_tm(struct mpr_softc *sc)
238 {
239 	struct mpr_command *tm;
240 
241 	MPR_FUNCTRACE(sc);
242 	tm = mpr_alloc_high_priority_command(sc);
243 	return tm;
244 }
245 
246 void
247 mprsas_free_tm(struct mpr_softc *sc, struct mpr_command *tm)
248 {
249 	MPR_FUNCTRACE(sc);
250 	if (tm == NULL)
251 		return;
252 
253 	/*
254 	 * For TM's the devq is frozen for the device.  Unfreeze it here and
255 	 * free the resources used for freezing the devq.  Must clear the
256 	 * INRESET flag as well or scsi I/O will not work.
257 	 */
258 	if (tm->cm_targ != NULL) {
259 		tm->cm_targ->flags &= ~MPRSAS_TARGET_INRESET;
260 	}
261 	if (tm->cm_ccb) {
262 		mpr_dprint(sc, MPR_INFO, "Unfreezing devq for target ID %d\n",
263 		    tm->cm_targ->tid);
264 		xpt_release_devq(tm->cm_ccb->ccb_h.path, 1, TRUE);
265 		xpt_free_path(tm->cm_ccb->ccb_h.path);
266 		xpt_free_ccb(tm->cm_ccb);
267 	}
268 
269 	mpr_free_high_priority_command(sc, tm);
270 }
271 
272 void
273 mprsas_rescan_target(struct mpr_softc *sc, struct mprsas_target *targ)
274 {
275 	struct mprsas_softc *sassc = sc->sassc;
276 	path_id_t pathid;
277 	target_id_t targetid;
278 	union ccb *ccb;
279 
280 	MPR_FUNCTRACE(sc);
281 	pathid = cam_sim_path(sassc->sim);
282 	if (targ == NULL)
283 		targetid = CAM_TARGET_WILDCARD;
284 	else
285 		targetid = targ - sassc->targets;
286 
287 	/*
288 	 * Allocate a CCB and schedule a rescan.
289 	 */
290 	ccb = xpt_alloc_ccb_nowait();
291 	if (ccb == NULL) {
292 		mpr_dprint(sc, MPR_ERROR, "unable to alloc CCB for rescan\n");
293 		return;
294 	}
295 
296 	if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid, targetid,
297 	    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
298 		mpr_dprint(sc, MPR_ERROR, "unable to create path for rescan\n");
299 		xpt_free_ccb(ccb);
300 		return;
301 	}
302 
303 	if (targetid == CAM_TARGET_WILDCARD)
304 		ccb->ccb_h.func_code = XPT_SCAN_BUS;
305 	else
306 		ccb->ccb_h.func_code = XPT_SCAN_TGT;
307 
308 	mpr_dprint(sc, MPR_TRACE, "%s targetid %u\n", __func__, targetid);
309 	xpt_rescan(ccb);
310 }
311 
312 static void
313 mprsas_log_command(struct mpr_command *cm, u_int level, const char *fmt, ...)
314 {
315 	struct sbuf sb;
316 	va_list ap;
317 	char str[192];
318 	char path_str[64];
319 
320 	if (cm == NULL)
321 		return;
322 
323 	/* No need to be in here if debugging isn't enabled */
324 	if ((cm->cm_sc->mpr_debug & level) == 0)
325 		return;
326 
327 	sbuf_new(&sb, str, sizeof(str), 0);
328 
329 	va_start(ap, fmt);
330 
331 	if (cm->cm_ccb != NULL) {
332 		xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
333 		    sizeof(path_str));
334 		sbuf_cat(&sb, path_str);
335 		if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
336 			scsi_command_string(&cm->cm_ccb->csio, &sb);
337 			sbuf_printf(&sb, "length %d ",
338 			    cm->cm_ccb->csio.dxfer_len);
339 		}
340 	} else {
341 		sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
342 		    cam_sim_name(cm->cm_sc->sassc->sim),
343 		    cam_sim_unit(cm->cm_sc->sassc->sim),
344 		    cam_sim_bus(cm->cm_sc->sassc->sim),
345 		    cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
346 		    cm->cm_lun);
347 	}
348 
349 	sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
350 	sbuf_vprintf(&sb, fmt, ap);
351 	sbuf_finish(&sb);
352 	mpr_dprint_field(cm->cm_sc, level, "%s", sbuf_data(&sb));
353 
354 	va_end(ap);
355 }
356 
357 static void
358 mprsas_remove_volume(struct mpr_softc *sc, struct mpr_command *tm)
359 {
360 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
361 	struct mprsas_target *targ;
362 	uint16_t handle;
363 
364 	MPR_FUNCTRACE(sc);
365 
366 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
367 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
368 	targ = tm->cm_targ;
369 
370 	if (reply == NULL) {
371 		/* XXX retry the remove after the diag reset completes? */
372 		mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device "
373 		    "0x%04x\n", __func__, handle);
374 		mprsas_free_tm(sc, tm);
375 		return;
376 	}
377 
378 	if (reply->IOCStatus != MPI2_IOCSTATUS_SUCCESS) {
379 		mpr_dprint(sc, MPR_FAULT, "IOCStatus = 0x%x while resetting "
380 		    "device 0x%x\n", reply->IOCStatus, handle);
381 		mprsas_free_tm(sc, tm);
382 		return;
383 	}
384 
385 	mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n",
386 	    reply->TerminationCount);
387 	mpr_free_reply(sc, tm->cm_reply_data);
388 	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
389 
390 	mpr_dprint(sc, MPR_XINFO, "clearing target %u handle 0x%04x\n",
391 	    targ->tid, handle);
392 
393 	/*
394 	 * Don't clear target if remove fails because things will get confusing.
395 	 * Leave the devname and sasaddr intact so that we know to avoid reusing
396 	 * this target id if possible, and so we can assign the same target id
397 	 * to this device if it comes back in the future.
398 	 */
399 	if (reply->IOCStatus == MPI2_IOCSTATUS_SUCCESS) {
400 		targ = tm->cm_targ;
401 		targ->handle = 0x0;
402 		targ->encl_handle = 0x0;
403 		targ->encl_level_valid = 0x0;
404 		targ->encl_level = 0x0;
405 		targ->connector_name[0] = ' ';
406 		targ->connector_name[1] = ' ';
407 		targ->connector_name[2] = ' ';
408 		targ->connector_name[3] = ' ';
409 		targ->encl_slot = 0x0;
410 		targ->exp_dev_handle = 0x0;
411 		targ->phy_num = 0x0;
412 		targ->linkrate = 0x0;
413 		targ->devinfo = 0x0;
414 		targ->flags = 0x0;
415 		targ->scsi_req_desc_type = 0;
416 	}
417 
418 	mprsas_free_tm(sc, tm);
419 }
420 
421 
422 /*
423  * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal.
424  * Otherwise Volume Delete is same as Bare Drive Removal.
425  */
426 void
427 mprsas_prepare_volume_remove(struct mprsas_softc *sassc, uint16_t handle)
428 {
429 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
430 	struct mpr_softc *sc;
431 	struct mpr_command *cm;
432 	struct mprsas_target *targ = NULL;
433 
434 	MPR_FUNCTRACE(sassc->sc);
435 	sc = sassc->sc;
436 
437 	targ = mprsas_find_target_by_handle(sassc, 0, handle);
438 	if (targ == NULL) {
439 		/* FIXME: what is the action? */
440 		/* We don't know about this device? */
441 		mpr_dprint(sc, MPR_ERROR,
442 		   "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
443 		return;
444 	}
445 
446 	targ->flags |= MPRSAS_TARGET_INREMOVAL;
447 
448 	cm = mprsas_alloc_tm(sc);
449 	if (cm == NULL) {
450 		mpr_dprint(sc, MPR_ERROR,
451 		    "%s: command alloc failure\n", __func__);
452 		return;
453 	}
454 
455 	mprsas_rescan_target(sc, targ);
456 
457 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
458 	req->DevHandle = targ->handle;
459 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
460 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
461 
462 	/* SAS Hard Link Reset / SATA Link Reset */
463 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
464 
465 	cm->cm_targ = targ;
466 	cm->cm_data = NULL;
467 	cm->cm_desc.HighPriority.RequestFlags =
468 	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
469 	cm->cm_complete = mprsas_remove_volume;
470 	cm->cm_complete_data = (void *)(uintptr_t)handle;
471 
472 	mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
473 	    __func__, targ->tid);
474 	mprsas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
475 
476 	mpr_map_command(sc, cm);
477 }
478 
479 /*
480  * The MPT3 firmware performs debounce on the link to avoid transient link
481  * errors and false removals.  When it does decide that link has been lost
482  * and a device needs to go away, it expects that the host will perform a
483  * target reset and then an op remove.  The reset has the side-effect of
484  * aborting any outstanding requests for the device, which is required for
485  * the op-remove to succeed.  It's not clear if the host should check for
486  * the device coming back alive after the reset.
487  */
488 void
489 mprsas_prepare_remove(struct mprsas_softc *sassc, uint16_t handle)
490 {
491 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
492 	struct mpr_softc *sc;
493 	struct mpr_command *cm;
494 	struct mprsas_target *targ = NULL;
495 
496 	MPR_FUNCTRACE(sassc->sc);
497 
498 	sc = sassc->sc;
499 
500 	targ = mprsas_find_target_by_handle(sassc, 0, handle);
501 	if (targ == NULL) {
502 		/* FIXME: what is the action? */
503 		/* We don't know about this device? */
504 		mpr_dprint(sc, MPR_ERROR, "%s : invalid handle 0x%x \n",
505 		    __func__, handle);
506 		return;
507 	}
508 
509 	targ->flags |= MPRSAS_TARGET_INREMOVAL;
510 
511 	cm = mprsas_alloc_tm(sc);
512 	if (cm == NULL) {
513 		mpr_dprint(sc, MPR_ERROR, "%s: command alloc failure\n",
514 		    __func__);
515 		return;
516 	}
517 
518 	mprsas_rescan_target(sc, targ);
519 
520 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
521 	memset(req, 0, sizeof(*req));
522 	req->DevHandle = htole16(targ->handle);
523 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
524 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
525 
526 	/* SAS Hard Link Reset / SATA Link Reset */
527 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
528 
529 	cm->cm_targ = targ;
530 	cm->cm_data = NULL;
531 	cm->cm_desc.HighPriority.RequestFlags =
532 	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
533 	cm->cm_complete = mprsas_remove_device;
534 	cm->cm_complete_data = (void *)(uintptr_t)handle;
535 
536 	mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
537 	    __func__, targ->tid);
538 	mprsas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
539 
540 	mpr_map_command(sc, cm);
541 }
542 
543 static void
544 mprsas_remove_device(struct mpr_softc *sc, struct mpr_command *tm)
545 {
546 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
547 	MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
548 	struct mprsas_target *targ;
549 	struct mpr_command *next_cm;
550 	uint16_t handle;
551 
552 	MPR_FUNCTRACE(sc);
553 
554 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
555 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
556 	targ = tm->cm_targ;
557 
558 	/*
559 	 * Currently there should be no way we can hit this case.  It only
560 	 * happens when we have a failure to allocate chain frames, and
561 	 * task management commands don't have S/G lists.
562 	 */
563 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
564 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for remove of "
565 		    "handle %#04x! This should not happen!\n", __func__,
566 		    tm->cm_flags, handle);
567 		mprsas_free_tm(sc, tm);
568 		return;
569 	}
570 
571 	if (reply == NULL) {
572 		/* XXX retry the remove after the diag reset completes? */
573 		mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device "
574 		    "0x%04x\n", __func__, handle);
575 		mprsas_free_tm(sc, tm);
576 		return;
577 	}
578 
579 	if (le16toh(reply->IOCStatus) != MPI2_IOCSTATUS_SUCCESS) {
580 		mpr_dprint(sc, MPR_FAULT, "IOCStatus = 0x%x while resetting "
581 		    "device 0x%x\n", le16toh(reply->IOCStatus), handle);
582 		mprsas_free_tm(sc, tm);
583 		return;
584 	}
585 
586 	mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n",
587 	    le32toh(reply->TerminationCount));
588 	mpr_free_reply(sc, tm->cm_reply_data);
589 	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
590 
591 	/* Reuse the existing command */
592 	req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
593 	memset(req, 0, sizeof(*req));
594 	req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
595 	req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
596 	req->DevHandle = htole16(handle);
597 	tm->cm_data = NULL;
598 	tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
599 	tm->cm_complete = mprsas_remove_complete;
600 	tm->cm_complete_data = (void *)(uintptr_t)handle;
601 
602 	mpr_map_command(sc, tm);
603 
604 	mpr_dprint(sc, MPR_INFO, "clearing target %u handle 0x%04x\n",
605 	    targ->tid, handle);
606 	if (targ->encl_level_valid) {
607 		mpr_dprint(sc, MPR_INFO, "At enclosure level %d, slot %d, "
608 		    "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
609 		    targ->connector_name);
610 	}
611 	TAILQ_FOREACH_SAFE(tm, &targ->commands, cm_link, next_cm) {
612 		union ccb *ccb;
613 
614 		mpr_dprint(sc, MPR_XINFO, "Completing missed command %p\n", tm);
615 		ccb = tm->cm_complete_data;
616 		mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
617 		mprsas_scsiio_complete(sc, tm);
618 	}
619 }
620 
621 static void
622 mprsas_remove_complete(struct mpr_softc *sc, struct mpr_command *tm)
623 {
624 	MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
625 	uint16_t handle;
626 	struct mprsas_target *targ;
627 	struct mprsas_lun *lun;
628 
629 	MPR_FUNCTRACE(sc);
630 
631 	reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
632 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
633 
634 	/*
635 	 * Currently there should be no way we can hit this case.  It only
636 	 * happens when we have a failure to allocate chain frames, and
637 	 * task management commands don't have S/G lists.
638 	 */
639 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
640 		mpr_dprint(sc, MPR_XINFO, "%s: cm_flags = %#x for remove of "
641 		    "handle %#04x! This should not happen!\n", __func__,
642 		    tm->cm_flags, handle);
643 		mprsas_free_tm(sc, tm);
644 		return;
645 	}
646 
647 	if (reply == NULL) {
648 		/* most likely a chip reset */
649 		mpr_dprint(sc, MPR_FAULT, "%s NULL reply removing device "
650 		    "0x%04x\n", __func__, handle);
651 		mprsas_free_tm(sc, tm);
652 		return;
653 	}
654 
655 	mpr_dprint(sc, MPR_XINFO, "%s on handle 0x%04x, IOCStatus= 0x%x\n",
656 	    __func__, handle, le16toh(reply->IOCStatus));
657 
658 	/*
659 	 * Don't clear target if remove fails because things will get confusing.
660 	 * Leave the devname and sasaddr intact so that we know to avoid reusing
661 	 * this target id if possible, and so we can assign the same target id
662 	 * to this device if it comes back in the future.
663 	 */
664 	if (le16toh(reply->IOCStatus) == MPI2_IOCSTATUS_SUCCESS) {
665 		targ = tm->cm_targ;
666 		targ->handle = 0x0;
667 		targ->encl_handle = 0x0;
668 		targ->encl_level_valid = 0x0;
669 		targ->encl_level = 0x0;
670 		targ->connector_name[0] = ' ';
671 		targ->connector_name[1] = ' ';
672 		targ->connector_name[2] = ' ';
673 		targ->connector_name[3] = ' ';
674 		targ->encl_slot = 0x0;
675 		targ->exp_dev_handle = 0x0;
676 		targ->phy_num = 0x0;
677 		targ->linkrate = 0x0;
678 		targ->devinfo = 0x0;
679 		targ->flags = 0x0;
680 		targ->scsi_req_desc_type = 0;
681 
682 		while (!SLIST_EMPTY(&targ->luns)) {
683 			lun = SLIST_FIRST(&targ->luns);
684 			SLIST_REMOVE_HEAD(&targ->luns, lun_link);
685 			free(lun, M_MPR);
686 		}
687 	}
688 
689 	mprsas_free_tm(sc, tm);
690 }
691 
692 static int
693 mprsas_register_events(struct mpr_softc *sc)
694 {
695 	uint8_t events[16];
696 
697 	bzero(events, 16);
698 	setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
699 	setbit(events, MPI2_EVENT_SAS_DISCOVERY);
700 	setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
701 	setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
702 	setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
703 	setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
704 	setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
705 	setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
706 	setbit(events, MPI2_EVENT_IR_VOLUME);
707 	setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
708 	setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
709 	setbit(events, MPI2_EVENT_TEMP_THRESHOLD);
710 
711 	mpr_register_events(sc, events, mprsas_evt_handler, NULL,
712 	    &sc->sassc->mprsas_eh);
713 
714 	return (0);
715 }
716 
717 int
718 mpr_attach_sas(struct mpr_softc *sc)
719 {
720 	struct mprsas_softc *sassc;
721 	cam_status status;
722 	int unit, error = 0;
723 
724 	MPR_FUNCTRACE(sc);
725 
726 	sassc = malloc(sizeof(struct mprsas_softc), M_MPR, M_WAITOK|M_ZERO);
727 	if (!sassc) {
728 		device_printf(sc->mpr_dev, "Cannot allocate memory %s %d\n",
729 		    __func__, __LINE__);
730 		return (ENOMEM);
731 	}
732 
733 	/*
734 	 * XXX MaxTargets could change during a reinit.  Since we don't
735 	 * resize the targets[] array during such an event, cache the value
736 	 * of MaxTargets here so that we don't get into trouble later.  This
737 	 * should move into the reinit logic.
738 	 */
739 	sassc->maxtargets = sc->facts->MaxTargets;
740 	sassc->targets = malloc(sizeof(struct mprsas_target) *
741 	    sassc->maxtargets, M_MPR, M_WAITOK|M_ZERO);
742 	if (!sassc->targets) {
743 		device_printf(sc->mpr_dev, "Cannot allocate memory %s %d\n",
744 		    __func__, __LINE__);
745 		free(sassc, M_MPR);
746 		return (ENOMEM);
747 	}
748 	sc->sassc = sassc;
749 	sassc->sc = sc;
750 
751 	if ((sassc->devq = cam_simq_alloc(sc->num_reqs)) == NULL) {
752 		mpr_dprint(sc, MPR_ERROR, "Cannot allocate SIMQ\n");
753 		error = ENOMEM;
754 		goto out;
755 	}
756 
757 	unit = device_get_unit(sc->mpr_dev);
758 	sassc->sim = cam_sim_alloc(mprsas_action, mprsas_poll, "mpr", sassc,
759 	    unit, &sc->mpr_mtx, sc->num_reqs, sc->num_reqs, sassc->devq);
760 	if (sassc->sim == NULL) {
761 		mpr_dprint(sc, MPR_ERROR, "Cannot allocate SIM\n");
762 		error = EINVAL;
763 		goto out;
764 	}
765 
766 	TAILQ_INIT(&sassc->ev_queue);
767 
768 	/* Initialize taskqueue for Event Handling */
769 	TASK_INIT(&sassc->ev_task, 0, mprsas_firmware_event_work, sc);
770 	sassc->ev_tq = taskqueue_create("mpr_taskq", M_NOWAIT | M_ZERO,
771 	    taskqueue_thread_enqueue, &sassc->ev_tq);
772 	taskqueue_start_threads(&sassc->ev_tq, 1, PRIBIO, "%s taskq",
773 	    device_get_nameunit(sc->mpr_dev));
774 
775 	mpr_lock(sc);
776 
777 	/*
778 	 * XXX There should be a bus for every port on the adapter, but since
779 	 * we're just going to fake the topology for now, we'll pretend that
780 	 * everything is just a target on a single bus.
781 	 */
782 	if ((error = xpt_bus_register(sassc->sim, sc->mpr_dev, 0)) != 0) {
783 		mpr_dprint(sc, MPR_ERROR, "Error %d registering SCSI bus\n",
784 		    error);
785 		mpr_unlock(sc);
786 		goto out;
787 	}
788 
789 	/*
790 	 * Assume that discovery events will start right away.
791 	 *
792 	 * Hold off boot until discovery is complete.
793 	 */
794 	sassc->flags |= MPRSAS_IN_STARTUP | MPRSAS_IN_DISCOVERY;
795 	sc->sassc->startup_refcount = 0;
796 	mprsas_startup_increment(sassc);
797 
798 	callout_init(&sassc->discovery_callout, 1 /*mpsafe*/);
799 
800 	/*
801 	 * Register for async events so we can determine the EEDP
802 	 * capabilities of devices.
803 	 */
804 	status = xpt_create_path(&sassc->path, /*periph*/NULL,
805 	    cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD,
806 	    CAM_LUN_WILDCARD);
807 	if (status != CAM_REQ_CMP) {
808 		mpr_printf(sc, "Error %#x creating sim path\n", status);
809 		sassc->path = NULL;
810 	} else {
811 		int event;
812 
813 #if (__FreeBSD_version >= 1000006) || \
814     ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
815 		event = AC_ADVINFO_CHANGED | AC_FOUND_DEVICE;
816 #else
817 		event = AC_FOUND_DEVICE;
818 #endif
819 
820 		/*
821 		 * Prior to the CAM locking improvements, we can't call
822 		 * xpt_register_async() with a particular path specified.
823 		 *
824 		 * If a path isn't specified, xpt_register_async() will
825 		 * generate a wildcard path and acquire the XPT lock while
826 		 * it calls xpt_action() to execute the XPT_SASYNC_CB CCB.
827 		 * It will then drop the XPT lock once that is done.
828 		 *
829 		 * If a path is specified for xpt_register_async(), it will
830 		 * not acquire and drop the XPT lock around the call to
831 		 * xpt_action().  xpt_action() asserts that the caller
832 		 * holds the SIM lock, so the SIM lock has to be held when
833 		 * calling xpt_register_async() when the path is specified.
834 		 *
835 		 * But xpt_register_async calls xpt_for_all_devices(),
836 		 * which calls xptbustraverse(), which will acquire each
837 		 * SIM lock.  When it traverses our particular bus, it will
838 		 * necessarily acquire the SIM lock, which will lead to a
839 		 * recursive lock acquisition.
840 		 *
841 		 * The CAM locking changes fix this problem by acquiring
842 		 * the XPT topology lock around bus traversal in
843 		 * xptbustraverse(), so the caller can hold the SIM lock
844 		 * and it does not cause a recursive lock acquisition.
845 		 *
846 		 * These __FreeBSD_version values are approximate, especially
847 		 * for stable/10, which is two months later than the actual
848 		 * change.
849 		 */
850 
851 #if (__FreeBSD_version < 1000703) || \
852     ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
853 		mpr_unlock(sc);
854 		status = xpt_register_async(event, mprsas_async, sc,
855 					    NULL);
856 		mpr_lock(sc);
857 #else
858 		status = xpt_register_async(event, mprsas_async, sc,
859 					    sassc->path);
860 #endif
861 
862 		if (status != CAM_REQ_CMP) {
863 			mpr_dprint(sc, MPR_ERROR,
864 			    "Error %#x registering async handler for "
865 			    "AC_ADVINFO_CHANGED events\n", status);
866 			xpt_free_path(sassc->path);
867 			sassc->path = NULL;
868 		}
869 	}
870 	if (status != CAM_REQ_CMP) {
871 		/*
872 		 * EEDP use is the exception, not the rule.
873 		 * Warn the user, but do not fail to attach.
874 		 */
875 		mpr_printf(sc, "EEDP capabilities disabled.\n");
876 	}
877 
878 	mpr_unlock(sc);
879 
880 	mprsas_register_events(sc);
881 out:
882 	if (error)
883 		mpr_detach_sas(sc);
884 	return (error);
885 }
886 
887 int
888 mpr_detach_sas(struct mpr_softc *sc)
889 {
890 	struct mprsas_softc *sassc;
891 	struct mprsas_lun *lun, *lun_tmp;
892 	struct mprsas_target *targ;
893 	int i;
894 
895 	MPR_FUNCTRACE(sc);
896 
897 	if (sc->sassc == NULL)
898 		return (0);
899 
900 	sassc = sc->sassc;
901 	mpr_deregister_events(sc, sassc->mprsas_eh);
902 
903 	/*
904 	 * Drain and free the event handling taskqueue with the lock
905 	 * unheld so that any parallel processing tasks drain properly
906 	 * without deadlocking.
907 	 */
908 	if (sassc->ev_tq != NULL)
909 		taskqueue_free(sassc->ev_tq);
910 
911 	/* Make sure CAM doesn't wedge if we had to bail out early. */
912 	mpr_lock(sc);
913 
914 	/* Deregister our async handler */
915 	if (sassc->path != NULL) {
916 		xpt_register_async(0, mprsas_async, sc, sassc->path);
917 		xpt_free_path(sassc->path);
918 		sassc->path = NULL;
919 	}
920 
921 	if (sassc->flags & MPRSAS_IN_STARTUP)
922 		xpt_release_simq(sassc->sim, 1);
923 
924 	if (sassc->sim != NULL) {
925 		xpt_bus_deregister(cam_sim_path(sassc->sim));
926 		cam_sim_free(sassc->sim, FALSE);
927 	}
928 
929 	sassc->flags |= MPRSAS_SHUTDOWN;
930 	mpr_unlock(sc);
931 
932 	if (sassc->devq != NULL)
933 		cam_simq_free(sassc->devq);
934 
935 	for (i = 0; i < sassc->maxtargets; i++) {
936 		targ = &sassc->targets[i];
937 		SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
938 			free(lun, M_MPR);
939 		}
940 	}
941 	free(sassc->targets, M_MPR);
942 	free(sassc, M_MPR);
943 	sc->sassc = NULL;
944 
945 	return (0);
946 }
947 
948 void
949 mprsas_discovery_end(struct mprsas_softc *sassc)
950 {
951 	struct mpr_softc *sc = sassc->sc;
952 
953 	MPR_FUNCTRACE(sc);
954 
955 	if (sassc->flags & MPRSAS_DISCOVERY_TIMEOUT_PENDING)
956 		callout_stop(&sassc->discovery_callout);
957 
958 }
959 
960 static void
961 mprsas_action(struct cam_sim *sim, union ccb *ccb)
962 {
963 	struct mprsas_softc *sassc;
964 
965 	sassc = cam_sim_softc(sim);
966 
967 	MPR_FUNCTRACE(sassc->sc);
968 	mpr_dprint(sassc->sc, MPR_TRACE, "ccb func_code 0x%x\n",
969 	    ccb->ccb_h.func_code);
970 	mtx_assert(&sassc->sc->mpr_mtx, MA_OWNED);
971 
972 	switch (ccb->ccb_h.func_code) {
973 	case XPT_PATH_INQ:
974 	{
975 		struct ccb_pathinq *cpi = &ccb->cpi;
976 
977 		cpi->version_num = 1;
978 		cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
979 		cpi->target_sprt = 0;
980 #if (__FreeBSD_version >= 1000039) || \
981     ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
982 		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN;
983 #else
984 		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
985 #endif
986 		cpi->hba_eng_cnt = 0;
987 		cpi->max_target = sassc->maxtargets - 1;
988 		cpi->max_lun = 255;
989 		cpi->initiator_id = sassc->maxtargets - 1;
990 		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
991 		strncpy(cpi->hba_vid, "Avago Tech (LSI)", HBA_IDLEN);
992 		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
993 		cpi->unit_number = cam_sim_unit(sim);
994 		cpi->bus_id = cam_sim_bus(sim);
995 		/*
996 		 * XXXSLM-I think this needs to change based on config page or
997 		 * something instead of hardcoded to 150000.
998 		 */
999 		cpi->base_transfer_speed = 150000;
1000 		cpi->transport = XPORT_SAS;
1001 		cpi->transport_version = 0;
1002 		cpi->protocol = PROTO_SCSI;
1003 		cpi->protocol_version = SCSI_REV_SPC;
1004 #if __FreeBSD_version >= 800001
1005 		/*
1006 		 * XXXSLM-probably need to base this number on max SGL's and
1007 		 * page size.
1008 		 */
1009 		cpi->maxio = 256 * 1024;
1010 #endif
1011 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1012 		break;
1013 	}
1014 	case XPT_GET_TRAN_SETTINGS:
1015 	{
1016 		struct ccb_trans_settings	*cts;
1017 		struct ccb_trans_settings_sas	*sas;
1018 		struct ccb_trans_settings_scsi	*scsi;
1019 		struct mprsas_target *targ;
1020 
1021 		cts = &ccb->cts;
1022 		sas = &cts->xport_specific.sas;
1023 		scsi = &cts->proto_specific.scsi;
1024 
1025 		KASSERT(cts->ccb_h.target_id < sassc->maxtargets,
1026 		    ("Target %d out of bounds in XPT_GET_TRAN_SETTINGS\n",
1027 		    cts->ccb_h.target_id));
1028 		targ = &sassc->targets[cts->ccb_h.target_id];
1029 		if (targ->handle == 0x0) {
1030 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1031 			break;
1032 		}
1033 
1034 		cts->protocol_version = SCSI_REV_SPC2;
1035 		cts->transport = XPORT_SAS;
1036 		cts->transport_version = 0;
1037 
1038 		sas->valid = CTS_SAS_VALID_SPEED;
1039 		switch (targ->linkrate) {
1040 		case 0x08:
1041 			sas->bitrate = 150000;
1042 			break;
1043 		case 0x09:
1044 			sas->bitrate = 300000;
1045 			break;
1046 		case 0x0a:
1047 			sas->bitrate = 600000;
1048 			break;
1049 		case 0x0b:
1050 			sas->bitrate = 1200000;
1051 			break;
1052 		default:
1053 			sas->valid = 0;
1054 		}
1055 
1056 		cts->protocol = PROTO_SCSI;
1057 		scsi->valid = CTS_SCSI_VALID_TQ;
1058 		scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
1059 
1060 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1061 		break;
1062 	}
1063 	case XPT_CALC_GEOMETRY:
1064 		cam_calc_geometry(&ccb->ccg, /*extended*/1);
1065 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1066 		break;
1067 	case XPT_RESET_DEV:
1068 		mpr_dprint(sassc->sc, MPR_XINFO,
1069 		    "mprsas_action XPT_RESET_DEV\n");
1070 		mprsas_action_resetdev(sassc, ccb);
1071 		return;
1072 	case XPT_RESET_BUS:
1073 	case XPT_ABORT:
1074 	case XPT_TERM_IO:
1075 		mpr_dprint(sassc->sc, MPR_XINFO,
1076 		    "mprsas_action faking success for abort or reset\n");
1077 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1078 		break;
1079 	case XPT_SCSI_IO:
1080 		mprsas_action_scsiio(sassc, ccb);
1081 		return;
1082 #if __FreeBSD_version >= 900026
1083 	case XPT_SMP_IO:
1084 		mprsas_action_smpio(sassc, ccb);
1085 		return;
1086 #endif
1087 	default:
1088 		mprsas_set_ccbstatus(ccb, CAM_FUNC_NOTAVAIL);
1089 		break;
1090 	}
1091 	xpt_done(ccb);
1092 
1093 }
1094 
1095 static void
1096 mprsas_announce_reset(struct mpr_softc *sc, uint32_t ac_code,
1097     target_id_t target_id, lun_id_t lun_id)
1098 {
1099 	path_id_t path_id = cam_sim_path(sc->sassc->sim);
1100 	struct cam_path *path;
1101 
1102 	mpr_dprint(sc, MPR_XINFO, "%s code %x target %d lun %jx\n", __func__,
1103 	    ac_code, target_id, (uintmax_t)lun_id);
1104 
1105 	if (xpt_create_path(&path, NULL,
1106 		path_id, target_id, lun_id) != CAM_REQ_CMP) {
1107 		mpr_dprint(sc, MPR_ERROR, "unable to create path for reset "
1108 		    "notification\n");
1109 		return;
1110 	}
1111 
1112 	xpt_async(ac_code, path, NULL);
1113 	xpt_free_path(path);
1114 }
1115 
1116 static void
1117 mprsas_complete_all_commands(struct mpr_softc *sc)
1118 {
1119 	struct mpr_command *cm;
1120 	int i;
1121 	int completed;
1122 
1123 	MPR_FUNCTRACE(sc);
1124 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1125 
1126 	/* complete all commands with a NULL reply */
1127 	for (i = 1; i < sc->num_reqs; i++) {
1128 		cm = &sc->commands[i];
1129 		cm->cm_reply = NULL;
1130 		completed = 0;
1131 
1132 		if (cm->cm_flags & MPR_CM_FLAGS_POLLED)
1133 			cm->cm_flags |= MPR_CM_FLAGS_COMPLETE;
1134 
1135 		if (cm->cm_complete != NULL) {
1136 			mprsas_log_command(cm, MPR_RECOVERY,
1137 			    "completing cm %p state %x ccb %p for diag "
1138 			    "reset\n", cm, cm->cm_state, cm->cm_ccb);
1139 			cm->cm_complete(sc, cm);
1140 			completed = 1;
1141 		}
1142 
1143 		if (cm->cm_flags & MPR_CM_FLAGS_WAKEUP) {
1144 			mprsas_log_command(cm, MPR_RECOVERY,
1145 			    "waking up cm %p state %x ccb %p for diag reset\n",
1146 			    cm, cm->cm_state, cm->cm_ccb);
1147 			wakeup(cm);
1148 			completed = 1;
1149 		}
1150 
1151 		if (cm->cm_sc->io_cmds_active != 0) {
1152 			cm->cm_sc->io_cmds_active--;
1153 		} else {
1154 			mpr_dprint(cm->cm_sc, MPR_INFO, "Warning: "
1155 			    "io_cmds_active is out of sync - resynching to "
1156 			    "0\n");
1157 		}
1158 
1159 		if ((completed == 0) && (cm->cm_state != MPR_CM_STATE_FREE)) {
1160 			/* this should never happen, but if it does, log */
1161 			mprsas_log_command(cm, MPR_RECOVERY,
1162 			    "cm %p state %x flags 0x%x ccb %p during diag "
1163 			    "reset\n", cm, cm->cm_state, cm->cm_flags,
1164 			    cm->cm_ccb);
1165 		}
1166 	}
1167 }
1168 
1169 void
1170 mprsas_handle_reinit(struct mpr_softc *sc)
1171 {
1172 	int i;
1173 
1174 	/* Go back into startup mode and freeze the simq, so that CAM
1175 	 * doesn't send any commands until after we've rediscovered all
1176 	 * targets and found the proper device handles for them.
1177 	 *
1178 	 * After the reset, portenable will trigger discovery, and after all
1179 	 * discovery-related activities have finished, the simq will be
1180 	 * released.
1181 	 */
1182 	mpr_dprint(sc, MPR_INIT, "%s startup\n", __func__);
1183 	sc->sassc->flags |= MPRSAS_IN_STARTUP;
1184 	sc->sassc->flags |= MPRSAS_IN_DISCOVERY;
1185 	mprsas_startup_increment(sc->sassc);
1186 
1187 	/* notify CAM of a bus reset */
1188 	mprsas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1189 	    CAM_LUN_WILDCARD);
1190 
1191 	/* complete and cleanup after all outstanding commands */
1192 	mprsas_complete_all_commands(sc);
1193 
1194 	mpr_dprint(sc, MPR_INIT, "%s startup %u after command completion\n",
1195 	    __func__, sc->sassc->startup_refcount);
1196 
1197 	/* zero all the target handles, since they may change after the
1198 	 * reset, and we have to rediscover all the targets and use the new
1199 	 * handles.
1200 	 */
1201 	for (i = 0; i < sc->sassc->maxtargets; i++) {
1202 		if (sc->sassc->targets[i].outstanding != 0)
1203 			mpr_dprint(sc, MPR_INIT, "target %u outstanding %u\n",
1204 			    i, sc->sassc->targets[i].outstanding);
1205 		sc->sassc->targets[i].handle = 0x0;
1206 		sc->sassc->targets[i].exp_dev_handle = 0x0;
1207 		sc->sassc->targets[i].outstanding = 0;
1208 		sc->sassc->targets[i].flags = MPRSAS_TARGET_INDIAGRESET;
1209 	}
1210 }
1211 static void
1212 mprsas_tm_timeout(void *data)
1213 {
1214 	struct mpr_command *tm = data;
1215 	struct mpr_softc *sc = tm->cm_sc;
1216 
1217 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1218 
1219 	mprsas_log_command(tm, MPR_INFO|MPR_RECOVERY,
1220 	    "task mgmt %p timed out\n", tm);
1221 	mpr_reinit(sc);
1222 }
1223 
1224 static void
1225 mprsas_logical_unit_reset_complete(struct mpr_softc *sc,
1226     struct mpr_command *tm)
1227 {
1228 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1229 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1230 	unsigned int cm_count = 0;
1231 	struct mpr_command *cm;
1232 	struct mprsas_target *targ;
1233 
1234 	callout_stop(&tm->cm_callout);
1235 
1236 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1237 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1238 	targ = tm->cm_targ;
1239 
1240 	/*
1241 	 * Currently there should be no way we can hit this case.  It only
1242 	 * happens when we have a failure to allocate chain frames, and
1243 	 * task management commands don't have S/G lists.
1244 	 */
1245 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1246 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for LUN reset! "
1247 		    "This should not happen!\n", __func__, tm->cm_flags);
1248 		mprsas_free_tm(sc, tm);
1249 		return;
1250 	}
1251 
1252 	if (reply == NULL) {
1253 		mprsas_log_command(tm, MPR_RECOVERY,
1254 		    "NULL reset reply for tm %p\n", tm);
1255 		if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1256 			/* this completion was due to a reset, just cleanup */
1257 			targ->tm = NULL;
1258 			mprsas_free_tm(sc, tm);
1259 		}
1260 		else {
1261 			/* we should have gotten a reply. */
1262 			mpr_reinit(sc);
1263 		}
1264 		return;
1265 	}
1266 
1267 	mprsas_log_command(tm, MPR_RECOVERY,
1268 	    "logical unit reset status 0x%x code 0x%x count %u\n",
1269 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1270 	    le32toh(reply->TerminationCount));
1271 
1272 	/* See if there are any outstanding commands for this LUN.
1273 	 * This could be made more efficient by using a per-LU data
1274 	 * structure of some sort.
1275 	 */
1276 	TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1277 		if (cm->cm_lun == tm->cm_lun)
1278 			cm_count++;
1279 	}
1280 
1281 	if (cm_count == 0) {
1282 		mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO,
1283 		    "logical unit %u finished recovery after reset\n",
1284 		    tm->cm_lun, tm);
1285 
1286 		mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1287 		    tm->cm_lun);
1288 
1289 		/* we've finished recovery for this logical unit.  check and
1290 		 * see if some other logical unit has a timedout command
1291 		 * that needs to be processed.
1292 		 */
1293 		cm = TAILQ_FIRST(&targ->timedout_commands);
1294 		if (cm) {
1295 			mprsas_send_abort(sc, tm, cm);
1296 		}
1297 		else {
1298 			targ->tm = NULL;
1299 			mprsas_free_tm(sc, tm);
1300 		}
1301 	}
1302 	else {
1303 		/* if we still have commands for this LUN, the reset
1304 		 * effectively failed, regardless of the status reported.
1305 		 * Escalate to a target reset.
1306 		 */
1307 		mprsas_log_command(tm, MPR_RECOVERY,
1308 		    "logical unit reset complete for tm %p, but still have %u "
1309 		    "command(s)\n", tm, cm_count);
1310 		mprsas_send_reset(sc, tm,
1311 		    MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1312 	}
1313 }
1314 
1315 static void
1316 mprsas_target_reset_complete(struct mpr_softc *sc, struct mpr_command *tm)
1317 {
1318 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1319 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1320 	struct mprsas_target *targ;
1321 
1322 	callout_stop(&tm->cm_callout);
1323 
1324 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1325 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1326 	targ = tm->cm_targ;
1327 
1328 	/*
1329 	 * Currently there should be no way we can hit this case.  It only
1330 	 * happens when we have a failure to allocate chain frames, and
1331 	 * task management commands don't have S/G lists.
1332 	 */
1333 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1334 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for target "
1335 		    "reset! This should not happen!\n", __func__, tm->cm_flags);
1336 		mprsas_free_tm(sc, tm);
1337 		return;
1338 	}
1339 
1340 	if (reply == NULL) {
1341 		mprsas_log_command(tm, MPR_RECOVERY,
1342 		    "NULL reset reply for tm %p\n", tm);
1343 		if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1344 			/* this completion was due to a reset, just cleanup */
1345 			targ->tm = NULL;
1346 			mprsas_free_tm(sc, tm);
1347 		}
1348 		else {
1349 			/* we should have gotten a reply. */
1350 			mpr_reinit(sc);
1351 		}
1352 		return;
1353 	}
1354 
1355 	mprsas_log_command(tm, MPR_RECOVERY,
1356 	    "target reset status 0x%x code 0x%x count %u\n",
1357 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1358 	    le32toh(reply->TerminationCount));
1359 
1360 	if (targ->outstanding == 0) {
1361 		/* we've finished recovery for this target and all
1362 		 * of its logical units.
1363 		 */
1364 		mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO,
1365 		    "recovery finished after target reset\n");
1366 
1367 		mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1368 		    CAM_LUN_WILDCARD);
1369 
1370 		targ->tm = NULL;
1371 		mprsas_free_tm(sc, tm);
1372 	}
1373 	else {
1374 		/* after a target reset, if this target still has
1375 		 * outstanding commands, the reset effectively failed,
1376 		 * regardless of the status reported.  escalate.
1377 		 */
1378 		mprsas_log_command(tm, MPR_RECOVERY,
1379 		    "target reset complete for tm %p, but still have %u "
1380 		    "command(s)\n", tm, targ->outstanding);
1381 		mpr_reinit(sc);
1382 	}
1383 }
1384 
1385 #define MPR_RESET_TIMEOUT 30
1386 
1387 int
1388 mprsas_send_reset(struct mpr_softc *sc, struct mpr_command *tm, uint8_t type)
1389 {
1390 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1391 	struct mprsas_target *target;
1392 	int err;
1393 
1394 	target = tm->cm_targ;
1395 	if (target->handle == 0) {
1396 		mpr_dprint(sc, MPR_ERROR, "%s null devhandle for target_id "
1397 		    "%d\n", __func__, target->tid);
1398 		return -1;
1399 	}
1400 
1401 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1402 	req->DevHandle = htole16(target->handle);
1403 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1404 	req->TaskType = type;
1405 
1406 	if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1407 		/* XXX Need to handle invalid LUNs */
1408 		MPR_SET_LUN(req->LUN, tm->cm_lun);
1409 		tm->cm_targ->logical_unit_resets++;
1410 		mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO,
1411 		    "sending logical unit reset\n");
1412 		tm->cm_complete = mprsas_logical_unit_reset_complete;
1413 		mprsas_prepare_for_tm(sc, tm, target, tm->cm_lun);
1414 	}
1415 	else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1416 		/*
1417 		 * Target reset method =
1418 		 *     SAS Hard Link Reset / SATA Link Reset
1419 		 */
1420 		req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1421 		tm->cm_targ->target_resets++;
1422 		mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO,
1423 		    "sending target reset\n");
1424 		tm->cm_complete = mprsas_target_reset_complete;
1425 		mprsas_prepare_for_tm(sc, tm, target, CAM_LUN_WILDCARD);
1426 	}
1427 	else {
1428 		mpr_dprint(sc, MPR_ERROR, "unexpected reset type 0x%x\n", type);
1429 		return -1;
1430 	}
1431 
1432 	mpr_dprint(sc, MPR_INFO, "to target %u handle 0x%04x\n", target->tid,
1433 	    target->handle);
1434 	if (target->encl_level_valid) {
1435 		mpr_dprint(sc, MPR_INFO, "At enclosure level %d, slot %d, "
1436 		    "connector name (%4s)\n", target->encl_level,
1437 		    target->encl_slot, target->connector_name);
1438 	}
1439 
1440 	tm->cm_data = NULL;
1441 	tm->cm_desc.HighPriority.RequestFlags =
1442 	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1443 	tm->cm_complete_data = (void *)tm;
1444 
1445 	callout_reset(&tm->cm_callout, MPR_RESET_TIMEOUT * hz,
1446 	    mprsas_tm_timeout, tm);
1447 
1448 	err = mpr_map_command(sc, tm);
1449 	if (err)
1450 		mprsas_log_command(tm, MPR_RECOVERY,
1451 		    "error %d sending reset type %u\n", err, type);
1452 
1453 	return err;
1454 }
1455 
1456 
1457 static void
1458 mprsas_abort_complete(struct mpr_softc *sc, struct mpr_command *tm)
1459 {
1460 	struct mpr_command *cm;
1461 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1462 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1463 	struct mprsas_target *targ;
1464 
1465 	callout_stop(&tm->cm_callout);
1466 
1467 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1468 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1469 	targ = tm->cm_targ;
1470 
1471 	/*
1472 	 * Currently there should be no way we can hit this case.  It only
1473 	 * happens when we have a failure to allocate chain frames, and
1474 	 * task management commands don't have S/G lists.
1475 	 */
1476 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1477 		mprsas_log_command(tm, MPR_RECOVERY,
1478 		    "cm_flags = %#x for abort %p TaskMID %u!\n",
1479 		    tm->cm_flags, tm, le16toh(req->TaskMID));
1480 		mprsas_free_tm(sc, tm);
1481 		return;
1482 	}
1483 
1484 	if (reply == NULL) {
1485 		mprsas_log_command(tm, MPR_RECOVERY,
1486 		    "NULL abort reply for tm %p TaskMID %u\n",
1487 		    tm, le16toh(req->TaskMID));
1488 		if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1489 			/* this completion was due to a reset, just cleanup */
1490 			targ->tm = NULL;
1491 			mprsas_free_tm(sc, tm);
1492 		}
1493 		else {
1494 			/* we should have gotten a reply. */
1495 			mpr_reinit(sc);
1496 		}
1497 		return;
1498 	}
1499 
1500 	mprsas_log_command(tm, MPR_RECOVERY,
1501 	    "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1502 	    le16toh(req->TaskMID),
1503 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1504 	    le32toh(reply->TerminationCount));
1505 
1506 	cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1507 	if (cm == NULL) {
1508 		/* if there are no more timedout commands, we're done with
1509 		 * error recovery for this target.
1510 		 */
1511 		mprsas_log_command(tm, MPR_RECOVERY,
1512 		    "finished recovery after aborting TaskMID %u\n",
1513 		    le16toh(req->TaskMID));
1514 
1515 		targ->tm = NULL;
1516 		mprsas_free_tm(sc, tm);
1517 	}
1518 	else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) {
1519 		/* abort success, but we have more timedout commands to abort */
1520 		mprsas_log_command(tm, MPR_RECOVERY,
1521 		    "continuing recovery after aborting TaskMID %u\n",
1522 		    le16toh(req->TaskMID));
1523 
1524 		mprsas_send_abort(sc, tm, cm);
1525 	}
1526 	else {
1527 		/* we didn't get a command completion, so the abort
1528 		 * failed as far as we're concerned.  escalate.
1529 		 */
1530 		mprsas_log_command(tm, MPR_RECOVERY,
1531 		    "abort failed for TaskMID %u tm %p\n",
1532 		    le16toh(req->TaskMID), tm);
1533 
1534 		mprsas_send_reset(sc, tm,
1535 		    MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1536 	}
1537 }
1538 
1539 #define MPR_ABORT_TIMEOUT 5
1540 
1541 static int
1542 mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm,
1543     struct mpr_command *cm)
1544 {
1545 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1546 	struct mprsas_target *targ;
1547 	int err;
1548 
1549 	targ = cm->cm_targ;
1550 	if (targ->handle == 0) {
1551 		mpr_dprint(sc, MPR_ERROR,"%s null devhandle for target_id %d\n",
1552 		    __func__, cm->cm_ccb->ccb_h.target_id);
1553 		return -1;
1554 	}
1555 
1556 	mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO,
1557 	    "Aborting command %p\n", cm);
1558 
1559 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1560 	req->DevHandle = htole16(targ->handle);
1561 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1562 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1563 
1564 	/* XXX Need to handle invalid LUNs */
1565 	MPR_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1566 
1567 	req->TaskMID = htole16(cm->cm_desc.Default.SMID);
1568 
1569 	tm->cm_data = NULL;
1570 	tm->cm_desc.HighPriority.RequestFlags =
1571 	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1572 	tm->cm_complete = mprsas_abort_complete;
1573 	tm->cm_complete_data = (void *)tm;
1574 	tm->cm_targ = cm->cm_targ;
1575 	tm->cm_lun = cm->cm_lun;
1576 
1577 	callout_reset(&tm->cm_callout, MPR_ABORT_TIMEOUT * hz,
1578 	    mprsas_tm_timeout, tm);
1579 
1580 	targ->aborts++;
1581 
1582 	mpr_dprint(sc, MPR_INFO, "Sending reset from %s for target ID %d\n",
1583 	    __func__, targ->tid);
1584 	mprsas_prepare_for_tm(sc, tm, targ, tm->cm_lun);
1585 
1586 	err = mpr_map_command(sc, tm);
1587 	if (err)
1588 		mprsas_log_command(tm, MPR_RECOVERY,
1589 		    "error %d sending abort for cm %p SMID %u\n",
1590 		    err, cm, req->TaskMID);
1591 	return err;
1592 }
1593 
1594 static void
1595 mprsas_scsiio_timeout(void *data)
1596 {
1597 	struct mpr_softc *sc;
1598 	struct mpr_command *cm;
1599 	struct mprsas_target *targ;
1600 
1601 	cm = (struct mpr_command *)data;
1602 	sc = cm->cm_sc;
1603 
1604 	MPR_FUNCTRACE(sc);
1605 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1606 
1607 	mpr_dprint(sc, MPR_XINFO, "Timeout checking cm %p\n", cm);
1608 
1609 	/*
1610 	 * Run the interrupt handler to make sure it's not pending.  This
1611 	 * isn't perfect because the command could have already completed
1612 	 * and been re-used, though this is unlikely.
1613 	 */
1614 	mpr_intr_locked(sc);
1615 	if (cm->cm_state == MPR_CM_STATE_FREE) {
1616 		mprsas_log_command(cm, MPR_XINFO,
1617 		    "SCSI command %p almost timed out\n", cm);
1618 		return;
1619 	}
1620 
1621 	if (cm->cm_ccb == NULL) {
1622 		mpr_dprint(sc, MPR_ERROR, "command timeout with NULL ccb\n");
1623 		return;
1624 	}
1625 
1626 	targ = cm->cm_targ;
1627 	targ->timeouts++;
1628 
1629 	mprsas_log_command(cm, MPR_ERROR, "command timeout cm %p ccb %p "
1630 	    "target %u, handle(0x%04x)\n", cm, cm->cm_ccb, targ->tid,
1631 	    targ->handle);
1632 	if (targ->encl_level_valid) {
1633 		mpr_dprint(sc, MPR_ERROR, "At enclosure level %d, slot %d, "
1634 		    "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
1635 		    targ->connector_name);
1636 	}
1637 
1638 	/* XXX first, check the firmware state, to see if it's still
1639 	 * operational.  if not, do a diag reset.
1640 	 */
1641 	mprsas_set_ccbstatus(cm->cm_ccb, CAM_CMD_TIMEOUT);
1642 	cm->cm_state = MPR_CM_STATE_TIMEDOUT;
1643 	TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1644 
1645 	if (targ->tm != NULL) {
1646 		/* target already in recovery, just queue up another
1647 		 * timedout command to be processed later.
1648 		 */
1649 		mpr_dprint(sc, MPR_RECOVERY, "queued timedout cm %p for "
1650 		    "processing by tm %p\n", cm, targ->tm);
1651 	}
1652 	else if ((targ->tm = mprsas_alloc_tm(sc)) != NULL) {
1653 		mpr_dprint(sc, MPR_RECOVERY, "timedout cm %p allocated tm %p\n",
1654 		    cm, targ->tm);
1655 
1656 		/* start recovery by aborting the first timedout command */
1657 		mprsas_send_abort(sc, targ->tm, cm);
1658 	}
1659 	else {
1660 		/* XXX queue this target up for recovery once a TM becomes
1661 		 * available.  The firmware only has a limited number of
1662 		 * HighPriority credits for the high priority requests used
1663 		 * for task management, and we ran out.
1664 		 *
1665 		 * Isilon: don't worry about this for now, since we have
1666 		 * more credits than disks in an enclosure, and limit
1667 		 * ourselves to one TM per target for recovery.
1668 		 */
1669 		mpr_dprint(sc, MPR_RECOVERY,
1670 		    "timedout cm %p failed to allocate a tm\n", cm);
1671 	}
1672 }
1673 
1674 static void
1675 mprsas_action_scsiio(struct mprsas_softc *sassc, union ccb *ccb)
1676 {
1677 	MPI2_SCSI_IO_REQUEST *req;
1678 	struct ccb_scsiio *csio;
1679 	struct mpr_softc *sc;
1680 	struct mprsas_target *targ;
1681 	struct mprsas_lun *lun;
1682 	struct mpr_command *cm;
1683 	uint8_t i, lba_byte, *ref_tag_addr;
1684 	uint16_t eedp_flags;
1685 	uint32_t mpi_control;
1686 
1687 	sc = sassc->sc;
1688 	MPR_FUNCTRACE(sc);
1689 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1690 
1691 	csio = &ccb->csio;
1692 	KASSERT(csio->ccb_h.target_id < sassc->maxtargets,
1693 	    ("Target %d out of bounds in XPT_SCSI_IO\n",
1694 	     csio->ccb_h.target_id));
1695 	targ = &sassc->targets[csio->ccb_h.target_id];
1696 	mpr_dprint(sc, MPR_TRACE, "ccb %p target flag %x\n", ccb, targ->flags);
1697 	if (targ->handle == 0x0) {
1698 		mpr_dprint(sc, MPR_ERROR, "%s NULL handle for target %u\n",
1699 		    __func__, csio->ccb_h.target_id);
1700 		mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1701 		xpt_done(ccb);
1702 		return;
1703 	}
1704 	if (targ->flags & MPR_TARGET_FLAGS_RAID_COMPONENT) {
1705 		mpr_dprint(sc, MPR_ERROR, "%s Raid component no SCSI IO "
1706 		    "supported %u\n", __func__, csio->ccb_h.target_id);
1707 		mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1708 		xpt_done(ccb);
1709 		return;
1710 	}
1711 	/*
1712 	 * Sometimes, it is possible to get a command that is not "In
1713 	 * Progress" and was actually aborted by the upper layer.  Check for
1714 	 * this here and complete the command without error.
1715 	 */
1716 	if (mprsas_get_ccbstatus(ccb) != CAM_REQ_INPROG) {
1717 		mpr_dprint(sc, MPR_TRACE, "%s Command is not in progress for "
1718 		    "target %u\n", __func__, csio->ccb_h.target_id);
1719 		xpt_done(ccb);
1720 		return;
1721 	}
1722 	/*
1723 	 * If devinfo is 0 this will be a volume.  In that case don't tell CAM
1724 	 * that the volume has timed out.  We want volumes to be enumerated
1725 	 * until they are deleted/removed, not just failed.
1726 	 */
1727 	if (targ->flags & MPRSAS_TARGET_INREMOVAL) {
1728 		if (targ->devinfo == 0)
1729 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1730 		else
1731 			mprsas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
1732 		xpt_done(ccb);
1733 		return;
1734 	}
1735 
1736 	if ((sc->mpr_flags & MPR_FLAGS_SHUTDOWN) != 0) {
1737 		mpr_dprint(sc, MPR_INFO, "%s shutting down\n", __func__);
1738 		mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1739 		xpt_done(ccb);
1740 		return;
1741 	}
1742 
1743 	/*
1744 	 * If target has a reset in progress, freeze the devq and return.  The
1745 	 * devq will be released when the TM reset is finished.
1746 	 */
1747 	if (targ->flags & MPRSAS_TARGET_INRESET) {
1748 		ccb->ccb_h.status = CAM_BUSY | CAM_DEV_QFRZN;
1749 		mpr_dprint(sc, MPR_INFO, "%s: Freezing devq for target ID %d\n",
1750 		    __func__, targ->tid);
1751 		xpt_freeze_devq(ccb->ccb_h.path, 1);
1752 		xpt_done(ccb);
1753 		return;
1754 	}
1755 
1756 	cm = mpr_alloc_command(sc);
1757 	if (cm == NULL || (sc->mpr_flags & MPR_FLAGS_DIAGRESET)) {
1758 		if (cm != NULL) {
1759 			mpr_free_command(sc, cm);
1760 		}
1761 		if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) {
1762 			xpt_freeze_simq(sassc->sim, 1);
1763 			sassc->flags |= MPRSAS_QUEUE_FROZEN;
1764 		}
1765 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1766 		ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1767 		xpt_done(ccb);
1768 		return;
1769 	}
1770 
1771 	req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
1772 	bzero(req, sizeof(*req));
1773 	req->DevHandle = htole16(targ->handle);
1774 	req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1775 	req->MsgFlags = 0;
1776 	req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr);
1777 	req->SenseBufferLength = MPR_SENSE_LEN;
1778 	req->SGLFlags = 0;
1779 	req->ChainOffset = 0;
1780 	req->SGLOffset0 = 24;	/* 32bit word offset to the SGL */
1781 	req->SGLOffset1= 0;
1782 	req->SGLOffset2= 0;
1783 	req->SGLOffset3= 0;
1784 	req->SkipCount = 0;
1785 	req->DataLength = htole32(csio->dxfer_len);
1786 	req->BidirectionalDataLength = 0;
1787 	req->IoFlags = htole16(csio->cdb_len);
1788 	req->EEDPFlags = 0;
1789 
1790 	/* Note: BiDirectional transfers are not supported */
1791 	switch (csio->ccb_h.flags & CAM_DIR_MASK) {
1792 	case CAM_DIR_IN:
1793 		mpi_control = MPI2_SCSIIO_CONTROL_READ;
1794 		cm->cm_flags |= MPR_CM_FLAGS_DATAIN;
1795 		break;
1796 	case CAM_DIR_OUT:
1797 		mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
1798 		cm->cm_flags |= MPR_CM_FLAGS_DATAOUT;
1799 		break;
1800 	case CAM_DIR_NONE:
1801 	default:
1802 		mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
1803 		break;
1804 	}
1805 
1806 	if (csio->cdb_len == 32)
1807 		mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
1808 	/*
1809 	 * It looks like the hardware doesn't require an explicit tag
1810 	 * number for each transaction.  SAM Task Management not supported
1811 	 * at the moment.
1812 	 */
1813 	switch (csio->tag_action) {
1814 	case MSG_HEAD_OF_Q_TAG:
1815 		mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
1816 		break;
1817 	case MSG_ORDERED_Q_TAG:
1818 		mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
1819 		break;
1820 	case MSG_ACA_TASK:
1821 		mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ;
1822 		break;
1823 	case CAM_TAG_ACTION_NONE:
1824 	case MSG_SIMPLE_Q_TAG:
1825 	default:
1826 		mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
1827 		break;
1828 	}
1829 	mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
1830 	req->Control = htole32(mpi_control);
1831 
1832 	if (MPR_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
1833 		mpr_free_command(sc, cm);
1834 		mprsas_set_ccbstatus(ccb, CAM_LUN_INVALID);
1835 		xpt_done(ccb);
1836 		return;
1837 	}
1838 
1839 	if (csio->ccb_h.flags & CAM_CDB_POINTER)
1840 		bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
1841 	else
1842 		bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
1843 	req->IoFlags = htole16(csio->cdb_len);
1844 
1845 	/*
1846 	 * Check if EEDP is supported and enabled.  If it is then check if the
1847 	 * SCSI opcode could be using EEDP.  If so, make sure the LUN exists and
1848 	 * is formatted for EEDP support.  If all of this is true, set CDB up
1849 	 * for EEDP transfer.
1850 	 */
1851 	eedp_flags = op_code_prot[req->CDB.CDB32[0]];
1852 	if (sc->eedp_enabled && eedp_flags) {
1853 		SLIST_FOREACH(lun, &targ->luns, lun_link) {
1854 			if (lun->lun_id == csio->ccb_h.target_lun) {
1855 				break;
1856 			}
1857 		}
1858 
1859 		if ((lun != NULL) && (lun->eedp_formatted)) {
1860 			req->EEDPBlockSize = htole16(lun->eedp_block_size);
1861 			eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
1862 			    MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
1863 			    MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
1864 			req->EEDPFlags = htole16(eedp_flags);
1865 
1866 			/*
1867 			 * If CDB less than 32, fill in Primary Ref Tag with
1868 			 * low 4 bytes of LBA.  If CDB is 32, tag stuff is
1869 			 * already there.  Also, set protection bit.  FreeBSD
1870 			 * currently does not support CDBs bigger than 16, but
1871 			 * the code doesn't hurt, and will be here for the
1872 			 * future.
1873 			 */
1874 			if (csio->cdb_len != 32) {
1875 				lba_byte = (csio->cdb_len == 16) ? 6 : 2;
1876 				ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
1877 				    PrimaryReferenceTag;
1878 				for (i = 0; i < 4; i++) {
1879 					*ref_tag_addr =
1880 					    req->CDB.CDB32[lba_byte + i];
1881 					ref_tag_addr++;
1882 				}
1883 				req->CDB.EEDP32.PrimaryReferenceTag =
1884 				    htole32(req->
1885 				    CDB.EEDP32.PrimaryReferenceTag);
1886 				req->CDB.EEDP32.PrimaryApplicationTagMask =
1887 				    0xFFFF;
1888 				req->CDB.CDB32[1] = (req->CDB.CDB32[1] & 0x1F) |
1889 				    0x20;
1890 			} else {
1891 				eedp_flags |=
1892 				    MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
1893 				req->EEDPFlags = htole16(eedp_flags);
1894 				req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
1895 				    0x1F) | 0x20;
1896 			}
1897 		}
1898 	}
1899 
1900 	cm->cm_length = csio->dxfer_len;
1901 	if (cm->cm_length != 0) {
1902 		cm->cm_data = ccb;
1903 		cm->cm_flags |= MPR_CM_FLAGS_USE_CCB;
1904 	} else {
1905 		cm->cm_data = NULL;
1906 	}
1907 	cm->cm_sge = &req->SGL;
1908 	cm->cm_sglsize = (32 - 24) * 4;
1909 	cm->cm_complete = mprsas_scsiio_complete;
1910 	cm->cm_complete_data = ccb;
1911 	cm->cm_targ = targ;
1912 	cm->cm_lun = csio->ccb_h.target_lun;
1913 	cm->cm_ccb = ccb;
1914 	/*
1915 	 * If using FP desc type, need to set a bit in IoFlags (SCSI IO is 0)
1916 	 * and set descriptor type.
1917 	 */
1918 	if (targ->scsi_req_desc_type ==
1919 	    MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO) {
1920 		req->IoFlags |= MPI25_SCSIIO_IOFLAGS_FAST_PATH;
1921 		cm->cm_desc.FastPathSCSIIO.RequestFlags =
1922 		    MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
1923 		cm->cm_desc.FastPathSCSIIO.DevHandle = htole16(targ->handle);
1924 	} else {
1925 		cm->cm_desc.SCSIIO.RequestFlags =
1926 		    MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
1927 		cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle);
1928 	}
1929 
1930 	callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
1931 	    mprsas_scsiio_timeout, cm, 0);
1932 
1933 	targ->issued++;
1934 	targ->outstanding++;
1935 	TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1936 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
1937 
1938 	mprsas_log_command(cm, MPR_XINFO, "%s cm %p ccb %p outstanding %u\n",
1939 	    __func__, cm, ccb, targ->outstanding);
1940 
1941 	mpr_map_command(sc, cm);
1942 	return;
1943 }
1944 
1945 static void
1946 mpr_response_code(struct mpr_softc *sc, u8 response_code)
1947 {
1948         char *desc;
1949 
1950         switch (response_code) {
1951         case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE:
1952                 desc = "task management request completed";
1953                 break;
1954         case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME:
1955                 desc = "invalid frame";
1956                 break;
1957         case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
1958                 desc = "task management request not supported";
1959                 break;
1960         case MPI2_SCSITASKMGMT_RSP_TM_FAILED:
1961                 desc = "task management request failed";
1962                 break;
1963         case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED:
1964                 desc = "task management request succeeded";
1965                 break;
1966         case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN:
1967                 desc = "invalid lun";
1968                 break;
1969         case 0xA:
1970                 desc = "overlapped tag attempted";
1971                 break;
1972         case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
1973                 desc = "task queued, however not sent to target";
1974                 break;
1975         default:
1976                 desc = "unknown";
1977                 break;
1978         }
1979 	mpr_dprint(sc, MPR_XINFO, "response_code(0x%01x): %s\n", response_code,
1980 	    desc);
1981 }
1982 
1983 /**
1984  * mpr_sc_failed_io_info - translated non-succesfull SCSI_IO request
1985  */
1986 static void
1987 mpr_sc_failed_io_info(struct mpr_softc *sc, struct ccb_scsiio *csio,
1988     Mpi2SCSIIOReply_t *mpi_reply, struct mprsas_target *targ)
1989 {
1990 	u32 response_info;
1991 	u8 *response_bytes;
1992 	u16 ioc_status = le16toh(mpi_reply->IOCStatus) &
1993 	    MPI2_IOCSTATUS_MASK;
1994 	u8 scsi_state = mpi_reply->SCSIState;
1995 	u8 scsi_status = mpi_reply->SCSIStatus;
1996 	char *desc_ioc_state = NULL;
1997 	char *desc_scsi_status = NULL;
1998 	char *desc_scsi_state = sc->tmp_string;
1999 	u32 log_info = le32toh(mpi_reply->IOCLogInfo);
2000 
2001 	if (log_info == 0x31170000)
2002 		return;
2003 
2004 	switch (ioc_status) {
2005 	case MPI2_IOCSTATUS_SUCCESS:
2006 		desc_ioc_state = "success";
2007 		break;
2008 	case MPI2_IOCSTATUS_INVALID_FUNCTION:
2009 		desc_ioc_state = "invalid function";
2010 		break;
2011 	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2012 		desc_ioc_state = "scsi recovered error";
2013 		break;
2014 	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2015 		desc_ioc_state = "scsi invalid dev handle";
2016 		break;
2017 	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2018 		desc_ioc_state = "scsi device not there";
2019 		break;
2020 	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2021 		desc_ioc_state = "scsi data overrun";
2022 		break;
2023 	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2024 		desc_ioc_state = "scsi data underrun";
2025 		break;
2026 	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2027 		desc_ioc_state = "scsi io data error";
2028 		break;
2029 	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2030 		desc_ioc_state = "scsi protocol error";
2031 		break;
2032 	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2033 		desc_ioc_state = "scsi task terminated";
2034 		break;
2035 	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2036 		desc_ioc_state = "scsi residual mismatch";
2037 		break;
2038 	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2039 		desc_ioc_state = "scsi task mgmt failed";
2040 		break;
2041 	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2042 		desc_ioc_state = "scsi ioc terminated";
2043 		break;
2044 	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2045 		desc_ioc_state = "scsi ext terminated";
2046 		break;
2047 	case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
2048 		desc_ioc_state = "eedp guard error";
2049 		break;
2050 	case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
2051 		desc_ioc_state = "eedp ref tag error";
2052 		break;
2053 	case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
2054 		desc_ioc_state = "eedp app tag error";
2055 		break;
2056 	default:
2057 		desc_ioc_state = "unknown";
2058 		break;
2059 	}
2060 
2061 	switch (scsi_status) {
2062 	case MPI2_SCSI_STATUS_GOOD:
2063 		desc_scsi_status = "good";
2064 		break;
2065 	case MPI2_SCSI_STATUS_CHECK_CONDITION:
2066 		desc_scsi_status = "check condition";
2067 		break;
2068 	case MPI2_SCSI_STATUS_CONDITION_MET:
2069 		desc_scsi_status = "condition met";
2070 		break;
2071 	case MPI2_SCSI_STATUS_BUSY:
2072 		desc_scsi_status = "busy";
2073 		break;
2074 	case MPI2_SCSI_STATUS_INTERMEDIATE:
2075 		desc_scsi_status = "intermediate";
2076 		break;
2077 	case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET:
2078 		desc_scsi_status = "intermediate condmet";
2079 		break;
2080 	case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
2081 		desc_scsi_status = "reservation conflict";
2082 		break;
2083 	case MPI2_SCSI_STATUS_COMMAND_TERMINATED:
2084 		desc_scsi_status = "command terminated";
2085 		break;
2086 	case MPI2_SCSI_STATUS_TASK_SET_FULL:
2087 		desc_scsi_status = "task set full";
2088 		break;
2089 	case MPI2_SCSI_STATUS_ACA_ACTIVE:
2090 		desc_scsi_status = "aca active";
2091 		break;
2092 	case MPI2_SCSI_STATUS_TASK_ABORTED:
2093 		desc_scsi_status = "task aborted";
2094 		break;
2095 	default:
2096 		desc_scsi_status = "unknown";
2097 		break;
2098 	}
2099 
2100 	desc_scsi_state[0] = '\0';
2101 	if (!scsi_state)
2102 		desc_scsi_state = " ";
2103 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
2104 		strcat(desc_scsi_state, "response info ");
2105 	if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
2106 		strcat(desc_scsi_state, "state terminated ");
2107 	if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS)
2108 		strcat(desc_scsi_state, "no status ");
2109 	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED)
2110 		strcat(desc_scsi_state, "autosense failed ");
2111 	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID)
2112 		strcat(desc_scsi_state, "autosense valid ");
2113 
2114 	mpr_dprint(sc, MPR_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n",
2115 	    le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status);
2116 	if (targ->encl_level_valid) {
2117 		mpr_dprint(sc, MPR_XINFO, "At enclosure level %d, slot %d, "
2118 		    "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
2119 		    targ->connector_name);
2120 	}
2121 	/* We can add more detail about underflow data here
2122 	 * TO-DO
2123 	 * */
2124 	mpr_dprint(sc, MPR_XINFO, "\tscsi_status(%s)(0x%02x), "
2125 	    "scsi_state(%s)(0x%02x)\n", desc_scsi_status, scsi_status,
2126 	    desc_scsi_state, scsi_state);
2127 
2128 	if (sc->mpr_debug & MPR_XINFO &&
2129 	    scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2130 		mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : Start :\n");
2131 		scsi_sense_print(csio);
2132 		mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : End :\n");
2133 	}
2134 
2135 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
2136 		response_info = le32toh(mpi_reply->ResponseInfo);
2137 		response_bytes = (u8 *)&response_info;
2138 		mpr_response_code(sc,response_bytes[0]);
2139 	}
2140 }
2141 
2142 static void
2143 mprsas_scsiio_complete(struct mpr_softc *sc, struct mpr_command *cm)
2144 {
2145 	MPI2_SCSI_IO_REPLY *rep;
2146 	union ccb *ccb;
2147 	struct ccb_scsiio *csio;
2148 	struct mprsas_softc *sassc;
2149 	struct scsi_vpd_supported_page_list *vpd_list = NULL;
2150 	u8 *TLR_bits, TLR_on;
2151 	int dir = 0, i;
2152 	u16 alloc_len;
2153 	struct mprsas_target *target;
2154 	target_id_t target_id;
2155 
2156 	MPR_FUNCTRACE(sc);
2157 	mpr_dprint(sc, MPR_TRACE,
2158 	    "cm %p SMID %u ccb %p reply %p outstanding %u\n", cm,
2159 	    cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
2160 	    cm->cm_targ->outstanding);
2161 
2162 	callout_stop(&cm->cm_callout);
2163 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
2164 
2165 	sassc = sc->sassc;
2166 	ccb = cm->cm_complete_data;
2167 	csio = &ccb->csio;
2168 	target_id = csio->ccb_h.target_id;
2169 	rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
2170 	/*
2171 	 * XXX KDM if the chain allocation fails, does it matter if we do
2172 	 * the sync and unload here?  It is simpler to do it in every case,
2173 	 * assuming it doesn't cause problems.
2174 	 */
2175 	if (cm->cm_data != NULL) {
2176 		if (cm->cm_flags & MPR_CM_FLAGS_DATAIN)
2177 			dir = BUS_DMASYNC_POSTREAD;
2178 		else if (cm->cm_flags & MPR_CM_FLAGS_DATAOUT)
2179 			dir = BUS_DMASYNC_POSTWRITE;
2180 		bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
2181 		bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2182 	}
2183 
2184 	cm->cm_targ->completed++;
2185 	cm->cm_targ->outstanding--;
2186 	TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
2187 	ccb->ccb_h.status &= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED);
2188 
2189 	if (cm->cm_state == MPR_CM_STATE_TIMEDOUT) {
2190 		TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
2191 		if (cm->cm_reply != NULL)
2192 			mprsas_log_command(cm, MPR_RECOVERY,
2193 			    "completed timedout cm %p ccb %p during recovery "
2194 			    "ioc %x scsi %x state %x xfer %u\n", cm, cm->cm_ccb,
2195 			    le16toh(rep->IOCStatus), rep->SCSIStatus,
2196 			    rep->SCSIState, le32toh(rep->TransferCount));
2197 		else
2198 			mprsas_log_command(cm, MPR_RECOVERY,
2199 			    "completed timedout cm %p ccb %p during recovery\n",
2200 			    cm, cm->cm_ccb);
2201 	} else if (cm->cm_targ->tm != NULL) {
2202 		if (cm->cm_reply != NULL)
2203 			mprsas_log_command(cm, MPR_RECOVERY,
2204 			    "completed cm %p ccb %p during recovery "
2205 			    "ioc %x scsi %x state %x xfer %u\n",
2206 			    cm, cm->cm_ccb, le16toh(rep->IOCStatus),
2207 			    rep->SCSIStatus, rep->SCSIState,
2208 			    le32toh(rep->TransferCount));
2209 		else
2210 			mprsas_log_command(cm, MPR_RECOVERY,
2211 			    "completed cm %p ccb %p during recovery\n",
2212 			    cm, cm->cm_ccb);
2213 	} else if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
2214 		mprsas_log_command(cm, MPR_RECOVERY,
2215 		    "reset completed cm %p ccb %p\n", cm, cm->cm_ccb);
2216 	}
2217 
2218 	if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
2219 		/*
2220 		 * We ran into an error after we tried to map the command,
2221 		 * so we're getting a callback without queueing the command
2222 		 * to the hardware.  So we set the status here, and it will
2223 		 * be retained below.  We'll go through the "fast path",
2224 		 * because there can be no reply when we haven't actually
2225 		 * gone out to the hardware.
2226 		 */
2227 		mprsas_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
2228 
2229 		/*
2230 		 * Currently the only error included in the mask is
2231 		 * MPR_CM_FLAGS_CHAIN_FAILED, which means we're out of
2232 		 * chain frames.  We need to freeze the queue until we get
2233 		 * a command that completed without this error, which will
2234 		 * hopefully have some chain frames attached that we can
2235 		 * use.  If we wanted to get smarter about it, we would
2236 		 * only unfreeze the queue in this condition when we're
2237 		 * sure that we're getting some chain frames back.  That's
2238 		 * probably unnecessary.
2239 		 */
2240 		if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) {
2241 			xpt_freeze_simq(sassc->sim, 1);
2242 			sassc->flags |= MPRSAS_QUEUE_FROZEN;
2243 			mpr_dprint(sc, MPR_INFO, "Error sending command, "
2244 			    "freezing SIM queue\n");
2245 		}
2246 	}
2247 
2248 	/*
2249 	 * If this is a Start Stop Unit command and it was issued by the driver
2250 	 * during shutdown, decrement the refcount to account for all of the
2251 	 * commands that were sent.  All SSU commands should be completed before
2252 	 * shutdown completes, meaning SSU_refcount will be 0 after SSU_started
2253 	 * is TRUE.
2254 	 */
2255 	if (sc->SSU_started && (csio->cdb_io.cdb_bytes[0] == START_STOP_UNIT)) {
2256 		mpr_dprint(sc, MPR_INFO, "Decrementing SSU count.\n");
2257 		sc->SSU_refcount--;
2258 	}
2259 
2260 	/* Take the fast path to completion */
2261 	if (cm->cm_reply == NULL) {
2262 		if (mprsas_get_ccbstatus(ccb) == CAM_REQ_INPROG) {
2263 			if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0)
2264 				mprsas_set_ccbstatus(ccb, CAM_SCSI_BUS_RESET);
2265 			else {
2266 				mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2267 				csio->scsi_status = SCSI_STATUS_OK;
2268 			}
2269 			if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
2270 				ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2271 				sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
2272 				mpr_dprint(sc, MPR_XINFO,
2273 				    "Unfreezing SIM queue\n");
2274 			}
2275 		}
2276 
2277 		/*
2278 		 * There are two scenarios where the status won't be
2279 		 * CAM_REQ_CMP.  The first is if MPR_CM_FLAGS_ERROR_MASK is
2280 		 * set, the second is in the MPR_FLAGS_DIAGRESET above.
2281 		 */
2282 		if (mprsas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2283 			/*
2284 			 * Freeze the dev queue so that commands are
2285 			 * executed in the correct order after error
2286 			 * recovery.
2287 			 */
2288 			ccb->ccb_h.status |= CAM_DEV_QFRZN;
2289 			xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2290 		}
2291 		mpr_free_command(sc, cm);
2292 		xpt_done(ccb);
2293 		return;
2294 	}
2295 
2296 	mprsas_log_command(cm, MPR_XINFO,
2297 	    "ioc %x scsi %x state %x xfer %u\n",
2298 	    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2299 	    le32toh(rep->TransferCount));
2300 
2301 	switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) {
2302 	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2303 		csio->resid = cm->cm_length - le32toh(rep->TransferCount);
2304 		/* FALLTHROUGH */
2305 	case MPI2_IOCSTATUS_SUCCESS:
2306 	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2307 
2308 		if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
2309 		    MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
2310 			mprsas_log_command(cm, MPR_XINFO, "recovered error\n");
2311 
2312 		/* Completion failed at the transport level. */
2313 		if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
2314 		    MPI2_SCSI_STATE_TERMINATED)) {
2315 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2316 			break;
2317 		}
2318 
2319 		/* In a modern packetized environment, an autosense failure
2320 		 * implies that there's not much else that can be done to
2321 		 * recover the command.
2322 		 */
2323 		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
2324 			mprsas_set_ccbstatus(ccb, CAM_AUTOSENSE_FAIL);
2325 			break;
2326 		}
2327 
2328 		/*
2329 		 * CAM doesn't care about SAS Response Info data, but if this is
2330 		 * the state check if TLR should be done.  If not, clear the
2331 		 * TLR_bits for the target.
2332 		 */
2333 		if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
2334 		    ((le32toh(rep->ResponseInfo) & MPI2_SCSI_RI_MASK_REASONCODE)
2335 		    == MPR_SCSI_RI_INVALID_FRAME)) {
2336 			sc->mapping_table[target_id].TLR_bits =
2337 			    (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2338 		}
2339 
2340 		/*
2341 		 * Intentionally override the normal SCSI status reporting
2342 		 * for these two cases.  These are likely to happen in a
2343 		 * multi-initiator environment, and we want to make sure that
2344 		 * CAM retries these commands rather than fail them.
2345 		 */
2346 		if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
2347 		    (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
2348 			mprsas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2349 			break;
2350 		}
2351 
2352 		/* Handle normal status and sense */
2353 		csio->scsi_status = rep->SCSIStatus;
2354 		if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
2355 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2356 		else
2357 			mprsas_set_ccbstatus(ccb, CAM_SCSI_STATUS_ERROR);
2358 
2359 		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2360 			int sense_len, returned_sense_len;
2361 
2362 			returned_sense_len = min(le32toh(rep->SenseCount),
2363 			    sizeof(struct scsi_sense_data));
2364 			if (returned_sense_len < csio->sense_len)
2365 				csio->sense_resid = csio->sense_len -
2366 				    returned_sense_len;
2367 			else
2368 				csio->sense_resid = 0;
2369 
2370 			sense_len = min(returned_sense_len,
2371 			    csio->sense_len - csio->sense_resid);
2372 			bzero(&csio->sense_data, sizeof(csio->sense_data));
2373 			bcopy(cm->cm_sense, &csio->sense_data, sense_len);
2374 			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2375 		}
2376 
2377 		/*
2378 		 * Check if this is an INQUIRY command.  If it's a VPD inquiry,
2379 		 * and it's page code 0 (Supported Page List), and there is
2380 		 * inquiry data, and this is for a sequential access device, and
2381 		 * the device is an SSP target, and TLR is supported by the
2382 		 * controller, turn the TLR_bits value ON if page 0x90 is
2383 		 * supported.
2384 		 */
2385 		if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
2386 		    (csio->cdb_io.cdb_bytes[1] & SI_EVPD) &&
2387 		    (csio->cdb_io.cdb_bytes[2] == SVPD_SUPPORTED_PAGE_LIST) &&
2388 		    ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) &&
2389 		    (csio->data_ptr != NULL) &&
2390 		    ((csio->data_ptr[0] & 0x1f) == T_SEQUENTIAL) &&
2391 		    (sc->control_TLR) &&
2392 		    (sc->mapping_table[target_id].device_info &
2393 		    MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
2394 			vpd_list = (struct scsi_vpd_supported_page_list *)
2395 			    csio->data_ptr;
2396 			TLR_bits = &sc->mapping_table[target_id].TLR_bits;
2397 			*TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2398 			TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
2399 			alloc_len = ((u16)csio->cdb_io.cdb_bytes[3] << 8) +
2400 			    csio->cdb_io.cdb_bytes[4];
2401 			alloc_len -= csio->resid;
2402 			for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
2403 				if (vpd_list->list[i] == 0x90) {
2404 					*TLR_bits = TLR_on;
2405 					break;
2406 				}
2407 			}
2408 		}
2409 
2410 		/*
2411 		 * If this is a SATA direct-access end device, mark it so that
2412 		 * a SCSI StartStopUnit command will be sent to it when the
2413 		 * driver is being shutdown.
2414 		 */
2415 		if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
2416 		    ((csio->data_ptr[0] & 0x1f) == T_DIRECT) &&
2417 		    (sc->mapping_table[target_id].device_info &
2418 		    MPI2_SAS_DEVICE_INFO_SATA_DEVICE) &&
2419 		    ((sc->mapping_table[target_id].device_info &
2420 		    MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) ==
2421 		    MPI2_SAS_DEVICE_INFO_END_DEVICE)) {
2422 			target = &sassc->targets[target_id];
2423 			target->supports_SSU = TRUE;
2424 			mpr_dprint(sc, MPR_XINFO, "Target %d supports SSU\n",
2425 			    target_id);
2426 		}
2427 		break;
2428 	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2429 	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2430 		/*
2431 		 * If devinfo is 0 this will be a volume.  In that case don't
2432 		 * tell CAM that the volume is not there.  We want volumes to
2433 		 * be enumerated until they are deleted/removed, not just
2434 		 * failed.
2435 		 */
2436 		if (cm->cm_targ->devinfo == 0)
2437 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2438 		else
2439 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2440 		break;
2441 	case MPI2_IOCSTATUS_INVALID_SGL:
2442 		mpr_print_scsiio_cmd(sc, cm);
2443 		mprsas_set_ccbstatus(ccb, CAM_UNREC_HBA_ERROR);
2444 		break;
2445 	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2446 		/*
2447 		 * This is one of the responses that comes back when an I/O
2448 		 * has been aborted.  If it is because of a timeout that we
2449 		 * initiated, just set the status to CAM_CMD_TIMEOUT.
2450 		 * Otherwise set it to CAM_REQ_ABORTED.  The effect on the
2451 		 * command is the same (it gets retried, subject to the
2452 		 * retry counter), the only difference is what gets printed
2453 		 * on the console.
2454 		 */
2455 		if (cm->cm_state == MPR_CM_STATE_TIMEDOUT)
2456 			mprsas_set_ccbstatus(ccb, CAM_CMD_TIMEOUT);
2457 		else
2458 			mprsas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2459 		break;
2460 	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2461 		/* resid is ignored for this condition */
2462 		csio->resid = 0;
2463 		mprsas_set_ccbstatus(ccb, CAM_DATA_RUN_ERR);
2464 		break;
2465 	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2466 	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2467 		/*
2468 		 * Since these are generally external (i.e. hopefully
2469 		 * transient transport-related) errors, retry these without
2470 		 * decrementing the retry count.
2471 		 */
2472 		mprsas_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
2473 		mprsas_log_command(cm, MPR_INFO,
2474 		    "terminated ioc %x scsi %x state %x xfer %u\n",
2475 		    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2476 		    le32toh(rep->TransferCount));
2477 		break;
2478 	case MPI2_IOCSTATUS_INVALID_FUNCTION:
2479 	case MPI2_IOCSTATUS_INTERNAL_ERROR:
2480 	case MPI2_IOCSTATUS_INVALID_VPID:
2481 	case MPI2_IOCSTATUS_INVALID_FIELD:
2482 	case MPI2_IOCSTATUS_INVALID_STATE:
2483 	case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2484 	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2485 	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2486 	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2487 	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2488 	default:
2489 		mprsas_log_command(cm, MPR_XINFO,
2490 		    "completed ioc %x scsi %x state %x xfer %u\n",
2491 		    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2492 		    le32toh(rep->TransferCount));
2493 		csio->resid = cm->cm_length;
2494 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2495 		break;
2496 	}
2497 
2498 	mpr_sc_failed_io_info(sc, csio, rep, cm->cm_targ);
2499 
2500 	if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
2501 		ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2502 		sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
2503 		mpr_dprint(sc, MPR_XINFO, "Command completed, unfreezing SIM "
2504 		    "queue\n");
2505 	}
2506 
2507 	if (mprsas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2508 		ccb->ccb_h.status |= CAM_DEV_QFRZN;
2509 		xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2510 	}
2511 
2512 	mpr_free_command(sc, cm);
2513 	xpt_done(ccb);
2514 }
2515 
2516 #if __FreeBSD_version >= 900026
2517 static void
2518 mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm)
2519 {
2520 	MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2521 	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2522 	uint64_t sasaddr;
2523 	union ccb *ccb;
2524 
2525 	ccb = cm->cm_complete_data;
2526 
2527 	/*
2528 	 * Currently there should be no way we can hit this case.  It only
2529 	 * happens when we have a failure to allocate chain frames, and SMP
2530 	 * commands require two S/G elements only.  That should be handled
2531 	 * in the standard request size.
2532 	 */
2533 	if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
2534 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x on SMP "
2535 		    "request!\n", __func__, cm->cm_flags);
2536 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2537 		goto bailout;
2538         }
2539 
2540 	rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
2541 	if (rpl == NULL) {
2542 		mpr_dprint(sc, MPR_ERROR, "%s: NULL cm_reply!\n", __func__);
2543 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2544 		goto bailout;
2545 	}
2546 
2547 	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2548 	sasaddr = le32toh(req->SASAddress.Low);
2549 	sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
2550 
2551 	if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
2552 	    MPI2_IOCSTATUS_SUCCESS ||
2553 	    rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
2554 		mpr_dprint(sc, MPR_XINFO, "%s: IOCStatus %04x SASStatus %02x\n",
2555 		    __func__, le16toh(rpl->IOCStatus), rpl->SASStatus);
2556 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2557 		goto bailout;
2558 	}
2559 
2560 	mpr_dprint(sc, MPR_XINFO, "%s: SMP request to SAS address %#jx "
2561 	    "completed successfully\n", __func__, (uintmax_t)sasaddr);
2562 
2563 	if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
2564 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2565 	else
2566 		mprsas_set_ccbstatus(ccb, CAM_SMP_STATUS_ERROR);
2567 
2568 bailout:
2569 	/*
2570 	 * We sync in both directions because we had DMAs in the S/G list
2571 	 * in both directions.
2572 	 */
2573 	bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
2574 			BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2575 	bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2576 	mpr_free_command(sc, cm);
2577 	xpt_done(ccb);
2578 }
2579 
2580 static void
2581 mprsas_send_smpcmd(struct mprsas_softc *sassc, union ccb *ccb,
2582     uint64_t sasaddr)
2583 {
2584 	struct mpr_command *cm;
2585 	uint8_t *request, *response;
2586 	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2587 	struct mpr_softc *sc;
2588 	struct sglist *sg;
2589 	int error;
2590 
2591 	sc = sassc->sc;
2592 	sg = NULL;
2593 	error = 0;
2594 
2595 #if (__FreeBSD_version >= 1000028) || \
2596     ((__FreeBSD_version >= 902001) && (__FreeBSD_version < 1000000))
2597 	switch (ccb->ccb_h.flags & CAM_DATA_MASK) {
2598 	case CAM_DATA_PADDR:
2599 	case CAM_DATA_SG_PADDR:
2600 		/*
2601 		 * XXX We don't yet support physical addresses here.
2602 		 */
2603 		mpr_dprint(sc, MPR_ERROR, "%s: physical addresses not "
2604 		    "supported\n", __func__);
2605 		mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2606 		xpt_done(ccb);
2607 		return;
2608 	case CAM_DATA_SG:
2609 		/*
2610 		 * The chip does not support more than one buffer for the
2611 		 * request or response.
2612 		 */
2613 		if ((ccb->smpio.smp_request_sglist_cnt > 1)
2614 		    || (ccb->smpio.smp_response_sglist_cnt > 1)) {
2615 			mpr_dprint(sc, MPR_ERROR,
2616 			    "%s: multiple request or response buffer segments "
2617 			    "not supported for SMP\n", __func__);
2618 			mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2619 			xpt_done(ccb);
2620 			return;
2621 		}
2622 
2623 		/*
2624 		 * The CAM_SCATTER_VALID flag was originally implemented
2625 		 * for the XPT_SCSI_IO CCB, which only has one data pointer.
2626 		 * We have two.  So, just take that flag to mean that we
2627 		 * might have S/G lists, and look at the S/G segment count
2628 		 * to figure out whether that is the case for each individual
2629 		 * buffer.
2630 		 */
2631 		if (ccb->smpio.smp_request_sglist_cnt != 0) {
2632 			bus_dma_segment_t *req_sg;
2633 
2634 			req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
2635 			request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
2636 		} else
2637 			request = ccb->smpio.smp_request;
2638 
2639 		if (ccb->smpio.smp_response_sglist_cnt != 0) {
2640 			bus_dma_segment_t *rsp_sg;
2641 
2642 			rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
2643 			response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
2644 		} else
2645 			response = ccb->smpio.smp_response;
2646 		break;
2647 	case CAM_DATA_VADDR:
2648 		request = ccb->smpio.smp_request;
2649 		response = ccb->smpio.smp_response;
2650 		break;
2651 	default:
2652 		mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2653 		xpt_done(ccb);
2654 		return;
2655 	}
2656 #else /* __FreeBSD_version < 1000028 */
2657 	/*
2658 	 * XXX We don't yet support physical addresses here.
2659 	 */
2660 	if (ccb->ccb_h.flags & (CAM_DATA_PHYS|CAM_SG_LIST_PHYS)) {
2661 		mpr_dprint(sc, MPR_ERROR, "%s: physical addresses not "
2662 		    "supported\n", __func__);
2663 		mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2664 		xpt_done(ccb);
2665 		return;
2666 	}
2667 
2668 	/*
2669 	 * If the user wants to send an S/G list, check to make sure they
2670 	 * have single buffers.
2671 	 */
2672 	if (ccb->ccb_h.flags & CAM_SCATTER_VALID) {
2673 		/*
2674 		 * The chip does not support more than one buffer for the
2675 		 * request or response.
2676 		 */
2677 	 	if ((ccb->smpio.smp_request_sglist_cnt > 1)
2678 		  || (ccb->smpio.smp_response_sglist_cnt > 1)) {
2679 			mpr_dprint(sc, MPR_ERROR, "%s: multiple request or "
2680 			    "response buffer segments not supported for SMP\n",
2681 			    __func__);
2682 			mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2683 			xpt_done(ccb);
2684 			return;
2685 		}
2686 
2687 		/*
2688 		 * The CAM_SCATTER_VALID flag was originally implemented
2689 		 * for the XPT_SCSI_IO CCB, which only has one data pointer.
2690 		 * We have two.  So, just take that flag to mean that we
2691 		 * might have S/G lists, and look at the S/G segment count
2692 		 * to figure out whether that is the case for each individual
2693 		 * buffer.
2694 		 */
2695 		if (ccb->smpio.smp_request_sglist_cnt != 0) {
2696 			bus_dma_segment_t *req_sg;
2697 
2698 			req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
2699 			request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
2700 		} else
2701 			request = ccb->smpio.smp_request;
2702 
2703 		if (ccb->smpio.smp_response_sglist_cnt != 0) {
2704 			bus_dma_segment_t *rsp_sg;
2705 
2706 			rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
2707 			response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
2708 		} else
2709 			response = ccb->smpio.smp_response;
2710 	} else {
2711 		request = ccb->smpio.smp_request;
2712 		response = ccb->smpio.smp_response;
2713 	}
2714 #endif /* __FreeBSD_version < 1000028 */
2715 
2716 	cm = mpr_alloc_command(sc);
2717 	if (cm == NULL) {
2718 		mpr_dprint(sc, MPR_ERROR,
2719 		    "%s: cannot allocate command\n", __func__);
2720 		mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
2721 		xpt_done(ccb);
2722 		return;
2723 	}
2724 
2725 	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2726 	bzero(req, sizeof(*req));
2727 	req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
2728 
2729 	/* Allow the chip to use any route to this SAS address. */
2730 	req->PhysicalPort = 0xff;
2731 
2732 	req->RequestDataLength = htole16(ccb->smpio.smp_request_len);
2733 	req->SGLFlags =
2734 	    MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
2735 
2736 	mpr_dprint(sc, MPR_XINFO, "%s: sending SMP request to SAS address "
2737 	    "%#jx\n", __func__, (uintmax_t)sasaddr);
2738 
2739 	mpr_init_sge(cm, req, &req->SGL);
2740 
2741 	/*
2742 	 * Set up a uio to pass into mpr_map_command().  This allows us to
2743 	 * do one map command, and one busdma call in there.
2744 	 */
2745 	cm->cm_uio.uio_iov = cm->cm_iovec;
2746 	cm->cm_uio.uio_iovcnt = 2;
2747 	cm->cm_uio.uio_segflg = UIO_SYSSPACE;
2748 
2749 	/*
2750 	 * The read/write flag isn't used by busdma, but set it just in
2751 	 * case.  This isn't exactly accurate, either, since we're going in
2752 	 * both directions.
2753 	 */
2754 	cm->cm_uio.uio_rw = UIO_WRITE;
2755 
2756 	cm->cm_iovec[0].iov_base = request;
2757 	cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength);
2758 	cm->cm_iovec[1].iov_base = response;
2759 	cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
2760 
2761 	cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
2762 			       cm->cm_iovec[1].iov_len;
2763 
2764 	/*
2765 	 * Trigger a warning message in mpr_data_cb() for the user if we
2766 	 * wind up exceeding two S/G segments.  The chip expects one
2767 	 * segment for the request and another for the response.
2768 	 */
2769 	cm->cm_max_segs = 2;
2770 
2771 	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
2772 	cm->cm_complete = mprsas_smpio_complete;
2773 	cm->cm_complete_data = ccb;
2774 
2775 	/*
2776 	 * Tell the mapping code that we're using a uio, and that this is
2777 	 * an SMP passthrough request.  There is a little special-case
2778 	 * logic there (in mpr_data_cb()) to handle the bidirectional
2779 	 * transfer.
2780 	 */
2781 	cm->cm_flags |= MPR_CM_FLAGS_USE_UIO | MPR_CM_FLAGS_SMP_PASS |
2782 			MPR_CM_FLAGS_DATAIN | MPR_CM_FLAGS_DATAOUT;
2783 
2784 	/* The chip data format is little endian. */
2785 	req->SASAddress.High = htole32(sasaddr >> 32);
2786 	req->SASAddress.Low = htole32(sasaddr);
2787 
2788 	/*
2789 	 * XXX Note that we don't have a timeout/abort mechanism here.
2790 	 * From the manual, it looks like task management requests only
2791 	 * work for SCSI IO and SATA passthrough requests.  We may need to
2792 	 * have a mechanism to retry requests in the event of a chip reset
2793 	 * at least.  Hopefully the chip will insure that any errors short
2794 	 * of that are relayed back to the driver.
2795 	 */
2796 	error = mpr_map_command(sc, cm);
2797 	if ((error != 0) && (error != EINPROGRESS)) {
2798 		mpr_dprint(sc, MPR_ERROR, "%s: error %d returned from "
2799 		    "mpr_map_command()\n", __func__, error);
2800 		goto bailout_error;
2801 	}
2802 
2803 	return;
2804 
2805 bailout_error:
2806 	mpr_free_command(sc, cm);
2807 	mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
2808 	xpt_done(ccb);
2809 	return;
2810 }
2811 
2812 static void
2813 mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb)
2814 {
2815 	struct mpr_softc *sc;
2816 	struct mprsas_target *targ;
2817 	uint64_t sasaddr = 0;
2818 
2819 	sc = sassc->sc;
2820 
2821 	/*
2822 	 * Make sure the target exists.
2823 	 */
2824 	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
2825 	    ("Target %d out of bounds in XPT_SMP_IO\n", ccb->ccb_h.target_id));
2826 	targ = &sassc->targets[ccb->ccb_h.target_id];
2827 	if (targ->handle == 0x0) {
2828 		mpr_dprint(sc, MPR_ERROR, "%s: target %d does not exist!\n",
2829 		    __func__, ccb->ccb_h.target_id);
2830 		mprsas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
2831 		xpt_done(ccb);
2832 		return;
2833 	}
2834 
2835 	/*
2836 	 * If this device has an embedded SMP target, we'll talk to it
2837 	 * directly.
2838 	 * figure out what the expander's address is.
2839 	 */
2840 	if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
2841 		sasaddr = targ->sasaddr;
2842 
2843 	/*
2844 	 * If we don't have a SAS address for the expander yet, try
2845 	 * grabbing it from the page 0x83 information cached in the
2846 	 * transport layer for this target.  LSI expanders report the
2847 	 * expander SAS address as the port-associated SAS address in
2848 	 * Inquiry VPD page 0x83.  Maxim expanders don't report it in page
2849 	 * 0x83.
2850 	 *
2851 	 * XXX KDM disable this for now, but leave it commented out so that
2852 	 * it is obvious that this is another possible way to get the SAS
2853 	 * address.
2854 	 *
2855 	 * The parent handle method below is a little more reliable, and
2856 	 * the other benefit is that it works for devices other than SES
2857 	 * devices.  So you can send a SMP request to a da(4) device and it
2858 	 * will get routed to the expander that device is attached to.
2859 	 * (Assuming the da(4) device doesn't contain an SMP target...)
2860 	 */
2861 #if 0
2862 	if (sasaddr == 0)
2863 		sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
2864 #endif
2865 
2866 	/*
2867 	 * If we still don't have a SAS address for the expander, look for
2868 	 * the parent device of this device, which is probably the expander.
2869 	 */
2870 	if (sasaddr == 0) {
2871 #ifdef OLD_MPR_PROBE
2872 		struct mprsas_target *parent_target;
2873 #endif
2874 
2875 		if (targ->parent_handle == 0x0) {
2876 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have "
2877 			    "a valid parent handle!\n", __func__, targ->handle);
2878 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2879 			goto bailout;
2880 		}
2881 #ifdef OLD_MPR_PROBE
2882 		parent_target = mprsas_find_target_by_handle(sassc, 0,
2883 		    targ->parent_handle);
2884 
2885 		if (parent_target == NULL) {
2886 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have "
2887 			    "a valid parent target!\n", __func__, targ->handle);
2888 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2889 			goto bailout;
2890 		}
2891 
2892 		if ((parent_target->devinfo &
2893 		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
2894 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d "
2895 			    "does not have an SMP target!\n", __func__,
2896 			    targ->handle, parent_target->handle);
2897 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2898 			goto bailout;
2899 		}
2900 
2901 		sasaddr = parent_target->sasaddr;
2902 #else /* OLD_MPR_PROBE */
2903 		if ((targ->parent_devinfo &
2904 		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
2905 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d "
2906 			    "does not have an SMP target!\n", __func__,
2907 			    targ->handle, targ->parent_handle);
2908 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2909 			goto bailout;
2910 
2911 		}
2912 		if (targ->parent_sasaddr == 0x0) {
2913 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent handle "
2914 			    "%d does not have a valid SAS address!\n", __func__,
2915 			    targ->handle, targ->parent_handle);
2916 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2917 			goto bailout;
2918 		}
2919 
2920 		sasaddr = targ->parent_sasaddr;
2921 #endif /* OLD_MPR_PROBE */
2922 
2923 	}
2924 
2925 	if (sasaddr == 0) {
2926 		mpr_dprint(sc, MPR_INFO, "%s: unable to find SAS address for "
2927 		    "handle %d\n", __func__, targ->handle);
2928 		mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2929 		goto bailout;
2930 	}
2931 	mprsas_send_smpcmd(sassc, ccb, sasaddr);
2932 
2933 	return;
2934 
2935 bailout:
2936 	xpt_done(ccb);
2937 
2938 }
2939 #endif //__FreeBSD_version >= 900026
2940 
2941 static void
2942 mprsas_action_resetdev(struct mprsas_softc *sassc, union ccb *ccb)
2943 {
2944 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
2945 	struct mpr_softc *sc;
2946 	struct mpr_command *tm;
2947 	struct mprsas_target *targ;
2948 
2949 	MPR_FUNCTRACE(sassc->sc);
2950 	mtx_assert(&sassc->sc->mpr_mtx, MA_OWNED);
2951 
2952 	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
2953 	    ("Target %d out of bounds in XPT_RESET_DEV\n",
2954 	    ccb->ccb_h.target_id));
2955 	sc = sassc->sc;
2956 	tm = mpr_alloc_command(sc);
2957 	if (tm == NULL) {
2958 		mpr_dprint(sc, MPR_ERROR,
2959 		    "command alloc failure in mprsas_action_resetdev\n");
2960 		mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
2961 		xpt_done(ccb);
2962 		return;
2963 	}
2964 
2965 	targ = &sassc->targets[ccb->ccb_h.target_id];
2966 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
2967 	req->DevHandle = htole16(targ->handle);
2968 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
2969 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
2970 
2971 	/* SAS Hard Link Reset / SATA Link Reset */
2972 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
2973 
2974 	tm->cm_data = NULL;
2975 	tm->cm_desc.HighPriority.RequestFlags =
2976 	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
2977 	tm->cm_complete = mprsas_resetdev_complete;
2978 	tm->cm_complete_data = ccb;
2979 
2980 	mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
2981 	    __func__, targ->tid);
2982 	tm->cm_targ = targ;
2983 	targ->flags |= MPRSAS_TARGET_INRESET;
2984 
2985 	mpr_map_command(sc, tm);
2986 }
2987 
2988 static void
2989 mprsas_resetdev_complete(struct mpr_softc *sc, struct mpr_command *tm)
2990 {
2991 	MPI2_SCSI_TASK_MANAGE_REPLY *resp;
2992 	union ccb *ccb;
2993 
2994 	MPR_FUNCTRACE(sc);
2995 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
2996 
2997 	resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
2998 	ccb = tm->cm_complete_data;
2999 
3000 	/*
3001 	 * Currently there should be no way we can hit this case.  It only
3002 	 * happens when we have a failure to allocate chain frames, and
3003 	 * task management commands don't have S/G lists.
3004 	 */
3005 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
3006 		MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3007 
3008 		req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3009 
3010 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for reset of "
3011 		    "handle %#04x! This should not happen!\n", __func__,
3012 		    tm->cm_flags, req->DevHandle);
3013 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3014 		goto bailout;
3015 	}
3016 
3017 	mpr_dprint(sc, MPR_XINFO,
3018 	    "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n", __func__,
3019 	    le16toh(resp->IOCStatus), le32toh(resp->ResponseCode));
3020 
3021 	if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
3022 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
3023 		mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
3024 		    CAM_LUN_WILDCARD);
3025 	}
3026 	else
3027 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3028 
3029 bailout:
3030 
3031 	mprsas_free_tm(sc, tm);
3032 	xpt_done(ccb);
3033 }
3034 
3035 static void
3036 mprsas_poll(struct cam_sim *sim)
3037 {
3038 	struct mprsas_softc *sassc;
3039 
3040 	sassc = cam_sim_softc(sim);
3041 
3042 	if (sassc->sc->mpr_debug & MPR_TRACE) {
3043 		/* frequent debug messages during a panic just slow
3044 		 * everything down too much.
3045 		 */
3046 		mpr_dprint(sassc->sc, MPR_XINFO, "%s clearing MPR_TRACE\n",
3047 		    __func__);
3048 		sassc->sc->mpr_debug &= ~MPR_TRACE;
3049 	}
3050 
3051 	mpr_intr_locked(sassc->sc);
3052 }
3053 
3054 static void
3055 mprsas_async(void *callback_arg, uint32_t code, struct cam_path *path,
3056     void *arg)
3057 {
3058 	struct mpr_softc *sc;
3059 
3060 	sc = (struct mpr_softc *)callback_arg;
3061 
3062 	switch (code) {
3063 #if (__FreeBSD_version >= 1000006) || \
3064     ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
3065 	case AC_ADVINFO_CHANGED: {
3066 		struct mprsas_target *target;
3067 		struct mprsas_softc *sassc;
3068 		struct scsi_read_capacity_data_long rcap_buf;
3069 		struct ccb_dev_advinfo cdai;
3070 		struct mprsas_lun *lun;
3071 		lun_id_t lunid;
3072 		int found_lun;
3073 		uintptr_t buftype;
3074 
3075 		buftype = (uintptr_t)arg;
3076 
3077 		found_lun = 0;
3078 		sassc = sc->sassc;
3079 
3080 		/*
3081 		 * We're only interested in read capacity data changes.
3082 		 */
3083 		if (buftype != CDAI_TYPE_RCAPLONG)
3084 			break;
3085 
3086 		/*
3087 		 * See the comment in mpr_attach_sas() for a detailed
3088 		 * explanation.  In these versions of FreeBSD we register
3089 		 * for all events and filter out the events that don't
3090 		 * apply to us.
3091 		 */
3092 #if (__FreeBSD_version < 1000703) || \
3093     ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
3094 		if (xpt_path_path_id(path) != sassc->sim->path_id)
3095 			break;
3096 #endif
3097 
3098 		/*
3099 		 * We should have a handle for this, but check to make sure.
3100 		 */
3101 		KASSERT(xpt_path_target_id(path) < sassc->maxtargets,
3102 		    ("Target %d out of bounds in mprsas_async\n",
3103 		    xpt_path_target_id(path)));
3104 		target = &sassc->targets[xpt_path_target_id(path)];
3105 		if (target->handle == 0)
3106 			break;
3107 
3108 		lunid = xpt_path_lun_id(path);
3109 
3110 		SLIST_FOREACH(lun, &target->luns, lun_link) {
3111 			if (lun->lun_id == lunid) {
3112 				found_lun = 1;
3113 				break;
3114 			}
3115 		}
3116 
3117 		if (found_lun == 0) {
3118 			lun = malloc(sizeof(struct mprsas_lun), M_MPR,
3119 			    M_NOWAIT | M_ZERO);
3120 			if (lun == NULL) {
3121 				mpr_dprint(sc, MPR_ERROR, "Unable to alloc "
3122 				    "LUN for EEDP support.\n");
3123 				break;
3124 			}
3125 			lun->lun_id = lunid;
3126 			SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3127 		}
3128 
3129 		bzero(&rcap_buf, sizeof(rcap_buf));
3130 		xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
3131 		cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
3132 		cdai.ccb_h.flags = CAM_DIR_IN;
3133 		cdai.buftype = CDAI_TYPE_RCAPLONG;
3134 #if (__FreeBSD_version >= 1100061) || \
3135     ((__FreeBSD_version >= 1001510) && (__FreeBSD_version < 1100000))
3136 		cdai.flags = CDAI_FLAG_NONE;
3137 #else
3138 		cdai.flags = 0;
3139 #endif
3140 		cdai.bufsiz = sizeof(rcap_buf);
3141 		cdai.buf = (uint8_t *)&rcap_buf;
3142 		xpt_action((union ccb *)&cdai);
3143 		if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
3144 			cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE);
3145 
3146 		if ((mprsas_get_ccbstatus((union ccb *)&cdai) == CAM_REQ_CMP)
3147 		    && (rcap_buf.prot & SRC16_PROT_EN)) {
3148 			lun->eedp_formatted = TRUE;
3149 			lun->eedp_block_size = scsi_4btoul(rcap_buf.length);
3150 		} else {
3151 			lun->eedp_formatted = FALSE;
3152 			lun->eedp_block_size = 0;
3153 		}
3154 		break;
3155 	}
3156 #endif
3157 	case AC_FOUND_DEVICE: {
3158 		struct ccb_getdev *cgd;
3159 
3160 		/*
3161 		 * See the comment in mpr_attach_sas() for a detailed
3162 		 * explanation.  In these versions of FreeBSD we register
3163 		 * for all events and filter out the events that don't
3164 		 * apply to us.
3165 		 */
3166 #if (__FreeBSD_version < 1000703) || \
3167     ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
3168 		if (xpt_path_path_id(path) != sc->sassc->sim->path_id)
3169 			break;
3170 #endif
3171 
3172 		cgd = arg;
3173 #if (__FreeBSD_version < 901503) || \
3174     ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3175 		mprsas_check_eedp(sc, path, cgd);
3176 #endif
3177 		break;
3178 	}
3179 	default:
3180 		break;
3181 	}
3182 }
3183 
3184 #if (__FreeBSD_version < 901503) || \
3185     ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3186 static void
3187 mprsas_check_eedp(struct mpr_softc *sc, struct cam_path *path,
3188     struct ccb_getdev *cgd)
3189 {
3190 	struct mprsas_softc *sassc = sc->sassc;
3191 	struct ccb_scsiio *csio;
3192 	struct scsi_read_capacity_16 *scsi_cmd;
3193 	struct scsi_read_capacity_eedp *rcap_buf;
3194 	path_id_t pathid;
3195 	target_id_t targetid;
3196 	lun_id_t lunid;
3197 	union ccb *ccb;
3198 	struct cam_path *local_path;
3199 	struct mprsas_target *target;
3200 	struct mprsas_lun *lun;
3201 	uint8_t	found_lun;
3202 	char path_str[64];
3203 
3204 	pathid = cam_sim_path(sassc->sim);
3205 	targetid = xpt_path_target_id(path);
3206 	lunid = xpt_path_lun_id(path);
3207 
3208 	KASSERT(targetid < sassc->maxtargets,
3209 	    ("Target %d out of bounds in mprsas_check_eedp\n", targetid));
3210 	target = &sassc->targets[targetid];
3211 	if (target->handle == 0x0)
3212 		return;
3213 
3214 	/*
3215 	 * Determine if the device is EEDP capable.
3216 	 *
3217 	 * If this flag is set in the inquiry data, the device supports
3218 	 * protection information, and must support the 16 byte read capacity
3219 	 * command, otherwise continue without sending read cap 16
3220 	 */
3221 	if ((cgd->inq_data.spc3_flags & SPC3_SID_PROTECT) == 0)
3222 		return;
3223 
3224 	/*
3225 	 * Issue a READ CAPACITY 16 command.  This info is used to determine if
3226 	 * the LUN is formatted for EEDP support.
3227 	 */
3228 	ccb = xpt_alloc_ccb_nowait();
3229 	if (ccb == NULL) {
3230 		mpr_dprint(sc, MPR_ERROR, "Unable to alloc CCB for EEDP "
3231 		    "support.\n");
3232 		return;
3233 	}
3234 
3235 	if (xpt_create_path(&local_path, xpt_periph, pathid, targetid, lunid)
3236 	    != CAM_REQ_CMP) {
3237 		mpr_dprint(sc, MPR_ERROR, "Unable to create path for EEDP "
3238 		    "support\n");
3239 		xpt_free_ccb(ccb);
3240 		return;
3241 	}
3242 
3243 	/*
3244 	 * If LUN is already in list, don't create a new one.
3245 	 */
3246 	found_lun = FALSE;
3247 	SLIST_FOREACH(lun, &target->luns, lun_link) {
3248 		if (lun->lun_id == lunid) {
3249 			found_lun = TRUE;
3250 			break;
3251 		}
3252 	}
3253 	if (!found_lun) {
3254 		lun = malloc(sizeof(struct mprsas_lun), M_MPR,
3255 		    M_NOWAIT | M_ZERO);
3256 		if (lun == NULL) {
3257 			mpr_dprint(sc, MPR_ERROR, "Unable to alloc LUN for "
3258 			    "EEDP support.\n");
3259 			xpt_free_path(local_path);
3260 			xpt_free_ccb(ccb);
3261 			return;
3262 		}
3263 		lun->lun_id = lunid;
3264 		SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3265 	}
3266 
3267 	xpt_path_string(local_path, path_str, sizeof(path_str));
3268 	mpr_dprint(sc, MPR_INFO, "Sending read cap: path %s handle %d\n",
3269 	    path_str, target->handle);
3270 
3271 	/*
3272 	 * Issue a READ CAPACITY 16 command for the LUN.  The
3273 	 * mprsas_read_cap_done function will load the read cap info into the
3274 	 * LUN struct.
3275 	 */
3276 	rcap_buf = malloc(sizeof(struct scsi_read_capacity_eedp), M_MPR,
3277 	    M_NOWAIT | M_ZERO);
3278 	if (rcap_buf == NULL) {
3279 		mpr_dprint(sc, MPR_ERROR, "Unable to alloc read capacity "
3280 		    "buffer for EEDP support.\n");
3281 		xpt_free_path(ccb->ccb_h.path);
3282 		xpt_free_ccb(ccb);
3283 		return;
3284 	}
3285 	xpt_setup_ccb(&ccb->ccb_h, local_path, CAM_PRIORITY_XPT);
3286 	csio = &ccb->csio;
3287 	csio->ccb_h.func_code = XPT_SCSI_IO;
3288 	csio->ccb_h.flags = CAM_DIR_IN;
3289 	csio->ccb_h.retry_count = 4;
3290 	csio->ccb_h.cbfcnp = mprsas_read_cap_done;
3291 	csio->ccb_h.timeout = 60000;
3292 	csio->data_ptr = (uint8_t *)rcap_buf;
3293 	csio->dxfer_len = sizeof(struct scsi_read_capacity_eedp);
3294 	csio->sense_len = MPR_SENSE_LEN;
3295 	csio->cdb_len = sizeof(*scsi_cmd);
3296 	csio->tag_action = MSG_SIMPLE_Q_TAG;
3297 
3298 	scsi_cmd = (struct scsi_read_capacity_16 *)&csio->cdb_io.cdb_bytes;
3299 	bzero(scsi_cmd, sizeof(*scsi_cmd));
3300 	scsi_cmd->opcode = 0x9E;
3301 	scsi_cmd->service_action = SRC16_SERVICE_ACTION;
3302 	((uint8_t *)scsi_cmd)[13] = sizeof(struct scsi_read_capacity_eedp);
3303 
3304 	ccb->ccb_h.ppriv_ptr1 = sassc;
3305 	xpt_action(ccb);
3306 }
3307 
3308 static void
3309 mprsas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb)
3310 {
3311 	struct mprsas_softc *sassc;
3312 	struct mprsas_target *target;
3313 	struct mprsas_lun *lun;
3314 	struct scsi_read_capacity_eedp *rcap_buf;
3315 
3316 	if (done_ccb == NULL)
3317 		return;
3318 
3319 	/* Driver need to release devq, it Scsi command is
3320 	 * generated by driver internally.
3321 	 * Currently there is a single place where driver
3322 	 * calls scsi command internally. In future if driver
3323 	 * calls more scsi command internally, it needs to release
3324 	 * devq internally, since those command will not go back to
3325 	 * cam_periph.
3326 	 */
3327 	if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) ) {
3328         	done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
3329 		xpt_release_devq(done_ccb->ccb_h.path,
3330 			       	/*count*/ 1, /*run_queue*/TRUE);
3331 	}
3332 
3333 	rcap_buf = (struct scsi_read_capacity_eedp *)done_ccb->csio.data_ptr;
3334 
3335 	/*
3336 	 * Get the LUN ID for the path and look it up in the LUN list for the
3337 	 * target.
3338 	 */
3339 	sassc = (struct mprsas_softc *)done_ccb->ccb_h.ppriv_ptr1;
3340 	KASSERT(done_ccb->ccb_h.target_id < sassc->maxtargets,
3341 	    ("Target %d out of bounds in mprsas_read_cap_done\n",
3342 	    done_ccb->ccb_h.target_id));
3343 	target = &sassc->targets[done_ccb->ccb_h.target_id];
3344 	SLIST_FOREACH(lun, &target->luns, lun_link) {
3345 		if (lun->lun_id != done_ccb->ccb_h.target_lun)
3346 			continue;
3347 
3348 		/*
3349 		 * Got the LUN in the target's LUN list.  Fill it in with EEDP
3350 		 * info. If the READ CAP 16 command had some SCSI error (common
3351 		 * if command is not supported), mark the lun as not supporting
3352 		 * EEDP and set the block size to 0.
3353 		 */
3354 		if ((mprsas_get_ccbstatus(done_ccb) != CAM_REQ_CMP) ||
3355 		    (done_ccb->csio.scsi_status != SCSI_STATUS_OK)) {
3356 			lun->eedp_formatted = FALSE;
3357 			lun->eedp_block_size = 0;
3358 			break;
3359 		}
3360 
3361 		if (rcap_buf->protect & 0x01) {
3362 			mpr_dprint(sassc->sc, MPR_INFO, "LUN %d for target ID "
3363 			    "%d is formatted for EEDP support.\n",
3364 			    done_ccb->ccb_h.target_lun,
3365 			    done_ccb->ccb_h.target_id);
3366 			lun->eedp_formatted = TRUE;
3367 			lun->eedp_block_size = scsi_4btoul(rcap_buf->length);
3368 		}
3369 		break;
3370 	}
3371 
3372 	// Finished with this CCB and path.
3373 	free(rcap_buf, M_MPR);
3374 	xpt_free_path(done_ccb->ccb_h.path);
3375 	xpt_free_ccb(done_ccb);
3376 }
3377 #endif /* (__FreeBSD_version < 901503) || \
3378           ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) */
3379 
3380 void
3381 mprsas_prepare_for_tm(struct mpr_softc *sc, struct mpr_command *tm,
3382     struct mprsas_target *target, lun_id_t lun_id)
3383 {
3384 	union ccb *ccb;
3385 	path_id_t path_id;
3386 
3387 	/*
3388 	 * Set the INRESET flag for this target so that no I/O will be sent to
3389 	 * the target until the reset has completed.  If an I/O request does
3390 	 * happen, the devq will be frozen.  The CCB holds the path which is
3391 	 * used to release the devq.  The devq is released and the CCB is freed
3392 	 * when the TM completes.
3393 	 */
3394 	ccb = xpt_alloc_ccb_nowait();
3395 	if (ccb) {
3396 		path_id = cam_sim_path(sc->sassc->sim);
3397 		if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, path_id,
3398 		    target->tid, lun_id) != CAM_REQ_CMP) {
3399 			xpt_free_ccb(ccb);
3400 		} else {
3401 			tm->cm_ccb = ccb;
3402 			tm->cm_targ = target;
3403 			target->flags |= MPRSAS_TARGET_INRESET;
3404 		}
3405 	}
3406 }
3407 
3408 int
3409 mprsas_startup(struct mpr_softc *sc)
3410 {
3411 	/*
3412 	 * Send the port enable message and set the wait_for_port_enable flag.
3413 	 * This flag helps to keep the simq frozen until all discovery events
3414 	 * are processed.
3415 	 */
3416 	sc->wait_for_port_enable = 1;
3417 	mprsas_send_portenable(sc);
3418 	return (0);
3419 }
3420 
3421 static int
3422 mprsas_send_portenable(struct mpr_softc *sc)
3423 {
3424 	MPI2_PORT_ENABLE_REQUEST *request;
3425 	struct mpr_command *cm;
3426 
3427 	MPR_FUNCTRACE(sc);
3428 
3429 	if ((cm = mpr_alloc_command(sc)) == NULL)
3430 		return (EBUSY);
3431 	request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3432 	request->Function = MPI2_FUNCTION_PORT_ENABLE;
3433 	request->MsgFlags = 0;
3434 	request->VP_ID = 0;
3435 	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3436 	cm->cm_complete = mprsas_portenable_complete;
3437 	cm->cm_data = NULL;
3438 	cm->cm_sge = NULL;
3439 
3440 	mpr_map_command(sc, cm);
3441 	mpr_dprint(sc, MPR_XINFO,
3442 	    "mpr_send_portenable finished cm %p req %p complete %p\n",
3443 	    cm, cm->cm_req, cm->cm_complete);
3444 	return (0);
3445 }
3446 
3447 static void
3448 mprsas_portenable_complete(struct mpr_softc *sc, struct mpr_command *cm)
3449 {
3450 	MPI2_PORT_ENABLE_REPLY *reply;
3451 	struct mprsas_softc *sassc;
3452 
3453 	MPR_FUNCTRACE(sc);
3454 	sassc = sc->sassc;
3455 
3456 	/*
3457 	 * Currently there should be no way we can hit this case.  It only
3458 	 * happens when we have a failure to allocate chain frames, and
3459 	 * port enable commands don't have S/G lists.
3460 	 */
3461 	if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
3462 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for port enable! "
3463 		    "This should not happen!\n", __func__, cm->cm_flags);
3464 	}
3465 
3466 	reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3467 	if (reply == NULL)
3468 		mpr_dprint(sc, MPR_FAULT, "Portenable NULL reply\n");
3469 	else if (le16toh(reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
3470 	    MPI2_IOCSTATUS_SUCCESS)
3471 		mpr_dprint(sc, MPR_FAULT, "Portenable failed\n");
3472 
3473 	mpr_free_command(sc, cm);
3474 	if (sc->mpr_ich.ich_arg != NULL) {
3475 		mpr_dprint(sc, MPR_XINFO, "disestablish config intrhook\n");
3476 		config_intrhook_disestablish(&sc->mpr_ich);
3477 		sc->mpr_ich.ich_arg = NULL;
3478 	}
3479 
3480 	/*
3481 	 * Done waiting for port enable to complete.  Decrement the refcount.
3482 	 * If refcount is 0, discovery is complete and a rescan of the bus can
3483 	 * take place.
3484 	 */
3485 	sc->wait_for_port_enable = 0;
3486 	sc->port_enable_complete = 1;
3487 	wakeup(&sc->port_enable_complete);
3488 	mprsas_startup_decrement(sassc);
3489 }
3490 
3491 int
3492 mprsas_check_id(struct mprsas_softc *sassc, int id)
3493 {
3494 	struct mpr_softc *sc = sassc->sc;
3495 	char *ids;
3496 	char *name;
3497 
3498 	ids = &sc->exclude_ids[0];
3499 	while((name = strsep(&ids, ",")) != NULL) {
3500 		if (name[0] == '\0')
3501 			continue;
3502 		if (strtol(name, NULL, 0) == (long)id)
3503 			return (1);
3504 	}
3505 
3506 	return (0);
3507 }
3508 
3509 void
3510 mprsas_realloc_targets(struct mpr_softc *sc, int maxtargets)
3511 {
3512 	struct mprsas_softc *sassc;
3513 	struct mprsas_lun *lun, *lun_tmp;
3514 	struct mprsas_target *targ;
3515 	int i;
3516 
3517 	sassc = sc->sassc;
3518 	/*
3519 	 * The number of targets is based on IOC Facts, so free all of
3520 	 * the allocated LUNs for each target and then the target buffer
3521 	 * itself.
3522 	 */
3523 	for (i=0; i< maxtargets; i++) {
3524 		targ = &sassc->targets[i];
3525 		SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
3526 			free(lun, M_MPR);
3527 		}
3528 	}
3529 	free(sassc->targets, M_MPR);
3530 
3531 	sassc->targets = malloc(sizeof(struct mprsas_target) * maxtargets,
3532 	    M_MPR, M_WAITOK|M_ZERO);
3533 	if (!sassc->targets) {
3534 		panic("%s failed to alloc targets with error %d\n",
3535 		    __func__, ENOMEM);
3536 	}
3537 }
3538