xref: /freebsd/sys/dev/mps/mps_sas.c (revision 8c4ee0b22c98fc1e208dd133f617bd329cd10728)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2009 Yahoo! Inc.
5  * Copyright (c) 2011-2015 LSI Corp.
6  * Copyright (c) 2013-2015 Avago Technologies
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  *
30  * Avago Technologies (LSI) MPT-Fusion Host Adapter FreeBSD
31  */
32 
33 #include <sys/cdefs.h>
34 /* Communications core for Avago Technologies (LSI) MPT2 */
35 
36 /* TODO Move headers to mpsvar */
37 #include <sys/types.h>
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/kernel.h>
41 #include <sys/selinfo.h>
42 #include <sys/module.h>
43 #include <sys/bus.h>
44 #include <sys/conf.h>
45 #include <sys/bio.h>
46 #include <sys/malloc.h>
47 #include <sys/uio.h>
48 #include <sys/sysctl.h>
49 #include <sys/endian.h>
50 #include <sys/queue.h>
51 #include <sys/kthread.h>
52 #include <sys/taskqueue.h>
53 #include <sys/sbuf.h>
54 
55 #include <machine/bus.h>
56 #include <machine/resource.h>
57 #include <sys/rman.h>
58 
59 #include <machine/stdarg.h>
60 
61 #include <cam/cam.h>
62 #include <cam/cam_ccb.h>
63 #include <cam/cam_xpt.h>
64 #include <cam/cam_debug.h>
65 #include <cam/cam_sim.h>
66 #include <cam/cam_xpt_sim.h>
67 #include <cam/cam_xpt_periph.h>
68 #include <cam/cam_periph.h>
69 #include <cam/scsi/scsi_all.h>
70 #include <cam/scsi/scsi_message.h>
71 #include <cam/scsi/smp_all.h>
72 
73 #include <dev/mps/mpi/mpi2_type.h>
74 #include <dev/mps/mpi/mpi2.h>
75 #include <dev/mps/mpi/mpi2_ioc.h>
76 #include <dev/mps/mpi/mpi2_sas.h>
77 #include <dev/mps/mpi/mpi2_cnfg.h>
78 #include <dev/mps/mpi/mpi2_init.h>
79 #include <dev/mps/mpi/mpi2_tool.h>
80 #include <dev/mps/mps_ioctl.h>
81 #include <dev/mps/mpsvar.h>
82 #include <dev/mps/mps_table.h>
83 #include <dev/mps/mps_sas.h>
84 
85 /*
86  * static array to check SCSI OpCode for EEDP protection bits
87  */
88 #define	PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
89 #define	PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
90 #define	PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
91 static uint8_t op_code_prot[256] = {
92 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
93 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
94 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
95 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
96 	0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
97 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
98 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
99 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
100 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
101 	0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
102 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
103 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
104 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
105 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
106 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
107 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
108 };
109 
110 MALLOC_DEFINE(M_MPSSAS, "MPSSAS", "MPS SAS memory");
111 
112 static void mpssas_remove_device(struct mps_softc *, struct mps_command *);
113 static void mpssas_remove_complete(struct mps_softc *, struct mps_command *);
114 static void mpssas_action(struct cam_sim *sim, union ccb *ccb);
115 static void mpssas_poll(struct cam_sim *sim);
116 static int mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm,
117     struct mps_command *cm);
118 static void mpssas_scsiio_timeout(void *data);
119 static void mpssas_abort_complete(struct mps_softc *sc, struct mps_command *cm);
120 static void mpssas_direct_drive_io(struct mpssas_softc *sassc,
121     struct mps_command *cm, union ccb *ccb);
122 static void mpssas_action_scsiio(struct mpssas_softc *, union ccb *);
123 static void mpssas_scsiio_complete(struct mps_softc *, struct mps_command *);
124 static void mpssas_action_resetdev(struct mpssas_softc *, union ccb *);
125 static void mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm);
126 static void mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb,
127 			       uint64_t sasaddr);
128 static void mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb);
129 static void mpssas_resetdev_complete(struct mps_softc *, struct mps_command *);
130 static void mpssas_async(void *callback_arg, uint32_t code,
131 			 struct cam_path *path, void *arg);
132 static int mpssas_send_portenable(struct mps_softc *sc);
133 static void mpssas_portenable_complete(struct mps_softc *sc,
134     struct mps_command *cm);
135 
136 struct mpssas_target *
mpssas_find_target_by_handle(struct mpssas_softc * sassc,int start,uint16_t handle)137 mpssas_find_target_by_handle(struct mpssas_softc *sassc, int start, uint16_t handle)
138 {
139 	struct mpssas_target *target;
140 	int i;
141 
142 	for (i = start; i < sassc->maxtargets; i++) {
143 		target = &sassc->targets[i];
144 		if (target->handle == handle)
145 			return (target);
146 	}
147 
148 	return (NULL);
149 }
150 
151 /* we need to freeze the simq during attach and diag reset, to avoid failing
152  * commands before device handles have been found by discovery.  Since
153  * discovery involves reading config pages and possibly sending commands,
154  * discovery actions may continue even after we receive the end of discovery
155  * event, so refcount discovery actions instead of assuming we can unfreeze
156  * the simq when we get the event.
157  */
158 void
mpssas_startup_increment(struct mpssas_softc * sassc)159 mpssas_startup_increment(struct mpssas_softc *sassc)
160 {
161 	MPS_FUNCTRACE(sassc->sc);
162 
163 	if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
164 		if (sassc->startup_refcount++ == 0) {
165 			/* just starting, freeze the simq */
166 			mps_dprint(sassc->sc, MPS_INIT,
167 			    "%s freezing simq\n", __func__);
168 			xpt_hold_boot();
169 			xpt_freeze_simq(sassc->sim, 1);
170 		}
171 		mps_dprint(sassc->sc, MPS_INIT, "%s refcount %u\n", __func__,
172 		    sassc->startup_refcount);
173 	}
174 }
175 
176 void
mpssas_release_simq_reinit(struct mpssas_softc * sassc)177 mpssas_release_simq_reinit(struct mpssas_softc *sassc)
178 {
179 	if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
180 		sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
181 		xpt_release_simq(sassc->sim, 1);
182 		mps_dprint(sassc->sc, MPS_INFO, "Unfreezing SIM queue\n");
183 	}
184 }
185 
186 void
mpssas_startup_decrement(struct mpssas_softc * sassc)187 mpssas_startup_decrement(struct mpssas_softc *sassc)
188 {
189 	MPS_FUNCTRACE(sassc->sc);
190 
191 	if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
192 		if (--sassc->startup_refcount == 0) {
193 			/* finished all discovery-related actions, release
194 			 * the simq and rescan for the latest topology.
195 			 */
196 			mps_dprint(sassc->sc, MPS_INIT,
197 			    "%s releasing simq\n", __func__);
198 			sassc->flags &= ~MPSSAS_IN_STARTUP;
199 			xpt_release_simq(sassc->sim, 1);
200 			xpt_release_boot();
201 		}
202 		mps_dprint(sassc->sc, MPS_INIT, "%s refcount %u\n", __func__,
203 		    sassc->startup_refcount);
204 	}
205 }
206 
207 /*
208  * The firmware requires us to stop sending commands when we're doing task
209  * management.
210  * XXX The logic for serializing the device has been made lazy and moved to
211  * mpssas_prepare_for_tm().
212  */
213 struct mps_command *
mpssas_alloc_tm(struct mps_softc * sc)214 mpssas_alloc_tm(struct mps_softc *sc)
215 {
216 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
217 	struct mps_command *tm;
218 
219 	tm = mps_alloc_high_priority_command(sc);
220 	if (tm == NULL)
221 		return (NULL);
222 
223 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
224 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
225 	return tm;
226 }
227 
228 void
mpssas_free_tm(struct mps_softc * sc,struct mps_command * tm)229 mpssas_free_tm(struct mps_softc *sc, struct mps_command *tm)
230 {
231 	if (tm == NULL)
232 		return;
233 
234 	/*
235 	 * For TM's the devq is frozen for the device.  Unfreeze it here and
236 	 * free the resources used for freezing the devq.  Must clear the
237 	 * INRESET flag as well or scsi I/O will not work.
238 	 */
239 	if (tm->cm_ccb) {
240 		mps_dprint(sc, MPS_XINFO | MPS_RECOVERY,
241 		    "Unfreezing devq for target ID %d\n",
242 		    tm->cm_targ->tid);
243 		tm->cm_targ->flags &= ~MPSSAS_TARGET_INRESET;
244 		xpt_release_devq(tm->cm_ccb->ccb_h.path, 1, TRUE);
245 		xpt_free_path(tm->cm_ccb->ccb_h.path);
246 		xpt_free_ccb(tm->cm_ccb);
247 	}
248 
249 	mps_free_high_priority_command(sc, tm);
250 }
251 
252 void
mpssas_rescan_target(struct mps_softc * sc,struct mpssas_target * targ)253 mpssas_rescan_target(struct mps_softc *sc, struct mpssas_target *targ)
254 {
255 	struct mpssas_softc *sassc = sc->sassc;
256 	path_id_t pathid;
257 	target_id_t targetid;
258 	union ccb *ccb;
259 
260 	MPS_FUNCTRACE(sc);
261 	pathid = cam_sim_path(sassc->sim);
262 	if (targ == NULL)
263 		targetid = CAM_TARGET_WILDCARD;
264 	else
265 		targetid = targ - sassc->targets;
266 
267 	/*
268 	 * Allocate a CCB and schedule a rescan.
269 	 */
270 	ccb = xpt_alloc_ccb_nowait();
271 	if (ccb == NULL) {
272 		mps_dprint(sc, MPS_ERROR, "unable to alloc CCB for rescan\n");
273 		return;
274 	}
275 
276 	if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid,
277 	    targetid, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
278 		mps_dprint(sc, MPS_ERROR, "unable to create path for rescan\n");
279 		xpt_free_ccb(ccb);
280 		return;
281 	}
282 
283 	if (targetid == CAM_TARGET_WILDCARD)
284 		ccb->ccb_h.func_code = XPT_SCAN_BUS;
285 	else
286 		ccb->ccb_h.func_code = XPT_SCAN_TGT;
287 
288 	mps_dprint(sc, MPS_TRACE, "%s targetid %u\n", __func__, targetid);
289 	xpt_rescan(ccb);
290 }
291 
292 static void
mpssas_log_command(struct mps_command * cm,u_int level,const char * fmt,...)293 mpssas_log_command(struct mps_command *cm, u_int level, const char *fmt, ...)
294 {
295 	struct sbuf sb;
296 	va_list ap;
297 	char str[224];
298 
299 	if (cm == NULL)
300 		return;
301 
302 	/* No need to be in here if debugging isn't enabled */
303 	if ((cm->cm_sc->mps_debug & level) == 0)
304 		return;
305 
306 	sbuf_new(&sb, str, sizeof(str), 0);
307 
308 	va_start(ap, fmt);
309 
310 	if (cm->cm_ccb != NULL) {
311 		xpt_path_sbuf(cm->cm_ccb->csio.ccb_h.path, &sb);
312 		if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
313 			scsi_command_string(&cm->cm_ccb->csio, &sb);
314 			sbuf_printf(&sb, "length %d ",
315 				    cm->cm_ccb->csio.dxfer_len);
316 		}
317 	}
318 	else {
319 		sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
320 		    cam_sim_name(cm->cm_sc->sassc->sim),
321 		    cam_sim_unit(cm->cm_sc->sassc->sim),
322 		    cam_sim_bus(cm->cm_sc->sassc->sim),
323 		    cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
324 		    cm->cm_lun);
325 	}
326 
327 	sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
328 	sbuf_vprintf(&sb, fmt, ap);
329 	sbuf_finish(&sb);
330 	mps_print_field(cm->cm_sc, "%s", sbuf_data(&sb));
331 
332 	va_end(ap);
333 }
334 
335 static void
mpssas_remove_volume(struct mps_softc * sc,struct mps_command * tm)336 mpssas_remove_volume(struct mps_softc *sc, struct mps_command *tm)
337 {
338 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
339 	struct mpssas_target *targ;
340 	uint16_t handle;
341 
342 	MPS_FUNCTRACE(sc);
343 
344 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
345 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
346 	targ = tm->cm_targ;
347 
348 	if (reply == NULL) {
349 		/* XXX retry the remove after the diag reset completes? */
350 		mps_dprint(sc, MPS_FAULT,
351 		    "%s NULL reply resetting device 0x%04x\n", __func__,
352 		    handle);
353 		mpssas_free_tm(sc, tm);
354 		return;
355 	}
356 
357 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
358 	    MPI2_IOCSTATUS_SUCCESS) {
359 		mps_dprint(sc, MPS_ERROR,
360 		   "IOCStatus = 0x%x while resetting device 0x%x\n",
361 		   le16toh(reply->IOCStatus), handle);
362 	}
363 
364 	mps_dprint(sc, MPS_XINFO,
365 	    "Reset aborted %u commands\n", reply->TerminationCount);
366 	mps_free_reply(sc, tm->cm_reply_data);
367 	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
368 
369 	mps_dprint(sc, MPS_XINFO,
370 	    "clearing target %u handle 0x%04x\n", targ->tid, handle);
371 
372 	/*
373 	 * Don't clear target if remove fails because things will get confusing.
374 	 * Leave the devname and sasaddr intact so that we know to avoid reusing
375 	 * this target id if possible, and so we can assign the same target id
376 	 * to this device if it comes back in the future.
377 	 */
378 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
379 	    MPI2_IOCSTATUS_SUCCESS) {
380 		targ = tm->cm_targ;
381 		targ->handle = 0x0;
382 		targ->encl_handle = 0x0;
383 		targ->encl_slot = 0x0;
384 		targ->exp_dev_handle = 0x0;
385 		targ->phy_num = 0x0;
386 		targ->linkrate = 0x0;
387 		targ->devinfo = 0x0;
388 		targ->flags = 0x0;
389 	}
390 
391 	mpssas_free_tm(sc, tm);
392 }
393 
394 /*
395  * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal.
396  * Otherwise Volume Delete is same as Bare Drive Removal.
397  */
398 void
mpssas_prepare_volume_remove(struct mpssas_softc * sassc,uint16_t handle)399 mpssas_prepare_volume_remove(struct mpssas_softc *sassc, uint16_t handle)
400 {
401 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
402 	struct mps_softc *sc;
403 	struct mps_command *tm;
404 	struct mpssas_target *targ = NULL;
405 
406 	MPS_FUNCTRACE(sassc->sc);
407 	sc = sassc->sc;
408 
409 #ifdef WD_SUPPORT
410 	/*
411 	 * If this is a WD controller, determine if the disk should be exposed
412 	 * to the OS or not.  If disk should be exposed, return from this
413 	 * function without doing anything.
414 	 */
415 	if (sc->WD_available && (sc->WD_hide_expose ==
416 	    MPS_WD_EXPOSE_ALWAYS)) {
417 		return;
418 	}
419 #endif //WD_SUPPORT
420 
421 	targ = mpssas_find_target_by_handle(sassc, 0, handle);
422 	if (targ == NULL) {
423 		/* FIXME: what is the action? */
424 		/* We don't know about this device? */
425 		mps_dprint(sc, MPS_ERROR,
426 		   "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
427 		return;
428 	}
429 
430 	targ->flags |= MPSSAS_TARGET_INREMOVAL;
431 
432 	tm = mpssas_alloc_tm(sc);
433 	if (tm == NULL) {
434 		mps_dprint(sc, MPS_ERROR,
435 		    "%s: command alloc failure\n", __func__);
436 		return;
437 	}
438 
439 	mpssas_rescan_target(sc, targ);
440 
441 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
442 	req->DevHandle = targ->handle;
443 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
444 
445 	/* SAS Hard Link Reset / SATA Link Reset */
446 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
447 
448 	tm->cm_targ = targ;
449 	tm->cm_data = NULL;
450 	tm->cm_complete = mpssas_remove_volume;
451 	tm->cm_complete_data = (void *)(uintptr_t)handle;
452 
453 	mps_dprint(sc, MPS_INFO, "%s: Sending reset for target ID %d\n",
454 	    __func__, targ->tid);
455 	mpssas_prepare_for_tm(sc, tm, targ, CAM_LUN_WILDCARD);
456 
457 	mps_map_command(sc, tm);
458 }
459 
460 /*
461  * The MPT2 firmware performs debounce on the link to avoid transient link
462  * errors and false removals.  When it does decide that link has been lost
463  * and a device need to go away, it expects that the host will perform a
464  * target reset and then an op remove.  The reset has the side-effect of
465  * aborting any outstanding requests for the device, which is required for
466  * the op-remove to succeed.  It's not clear if the host should check for
467  * the device coming back alive after the reset.
468  */
469 void
mpssas_prepare_remove(struct mpssas_softc * sassc,uint16_t handle)470 mpssas_prepare_remove(struct mpssas_softc *sassc, uint16_t handle)
471 {
472 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
473 	struct mps_softc *sc;
474 	struct mps_command *cm;
475 	struct mpssas_target *targ = NULL;
476 
477 	MPS_FUNCTRACE(sassc->sc);
478 
479 	sc = sassc->sc;
480 
481 	targ = mpssas_find_target_by_handle(sassc, 0, handle);
482 	if (targ == NULL) {
483 		/* FIXME: what is the action? */
484 		/* We don't know about this device? */
485 		mps_dprint(sc, MPS_ERROR,
486 		    "%s : invalid handle 0x%x \n", __func__, handle);
487 		return;
488 	}
489 
490 	targ->flags |= MPSSAS_TARGET_INREMOVAL;
491 
492 	cm = mpssas_alloc_tm(sc);
493 	if (cm == NULL) {
494 		mps_dprint(sc, MPS_ERROR,
495 		    "%s: command alloc failure\n", __func__);
496 		return;
497 	}
498 
499 	mpssas_rescan_target(sc, targ);
500 
501 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
502 	req->DevHandle = htole16(targ->handle);
503 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
504 
505 	/* SAS Hard Link Reset / SATA Link Reset */
506 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
507 
508 	cm->cm_targ = targ;
509 	cm->cm_data = NULL;
510 	cm->cm_complete = mpssas_remove_device;
511 	cm->cm_complete_data = (void *)(uintptr_t)handle;
512 
513 	mps_dprint(sc, MPS_INFO, "%s: Sending reset for target ID %d\n",
514 	    __func__, targ->tid);
515 	mpssas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
516 
517 	mps_map_command(sc, cm);
518 }
519 
520 static void
mpssas_remove_device(struct mps_softc * sc,struct mps_command * tm)521 mpssas_remove_device(struct mps_softc *sc, struct mps_command *tm)
522 {
523 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
524 	MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
525 	struct mpssas_target *targ;
526 	uint16_t handle;
527 
528 	MPS_FUNCTRACE(sc);
529 
530 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
531 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
532 	targ = tm->cm_targ;
533 
534 	/*
535 	 * Currently there should be no way we can hit this case.  It only
536 	 * happens when we have a failure to allocate chain frames, and
537 	 * task management commands don't have S/G lists.
538 	 */
539 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
540 		mps_dprint(sc, MPS_ERROR,
541 		    "%s: cm_flags = %#x for remove of handle %#04x! "
542 		    "This should not happen!\n", __func__, tm->cm_flags,
543 		    handle);
544 	}
545 
546 	if (reply == NULL) {
547 		/* XXX retry the remove after the diag reset completes? */
548 		mps_dprint(sc, MPS_FAULT,
549 		    "%s NULL reply resetting device 0x%04x\n", __func__,
550 		    handle);
551 		mpssas_free_tm(sc, tm);
552 		return;
553 	}
554 
555 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
556 	    MPI2_IOCSTATUS_SUCCESS) {
557 		mps_dprint(sc, MPS_ERROR,
558 		   "IOCStatus = 0x%x while resetting device 0x%x\n",
559 		   le16toh(reply->IOCStatus), handle);
560 	}
561 
562 	mps_dprint(sc, MPS_XINFO, "Reset aborted %u commands\n",
563 	    le32toh(reply->TerminationCount));
564 	mps_free_reply(sc, tm->cm_reply_data);
565 	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
566 
567 	/* Reuse the existing command */
568 	req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
569 	memset(req, 0, sizeof(*req));
570 	req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
571 	req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
572 	req->DevHandle = htole16(handle);
573 	tm->cm_data = NULL;
574 	tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
575 	tm->cm_complete = mpssas_remove_complete;
576 	tm->cm_complete_data = (void *)(uintptr_t)handle;
577 
578 	/*
579 	 * Wait to send the REMOVE_DEVICE until all the commands have cleared.
580 	 * They should be aborted or time out and we'll kick thus off there
581 	 * if so.
582 	 */
583 	if (TAILQ_FIRST(&targ->commands) == NULL) {
584 		mps_dprint(sc, MPS_INFO,
585 		    "No pending commands: starting remove_device target %u handle 0x%04x\n",
586 		    targ->tid, handle);
587 		mps_map_command(sc, tm);
588 		targ->pending_remove_tm = NULL;
589 	} else {
590 		targ->pending_remove_tm = tm;
591 	}
592 
593 	mps_dprint(sc, MPS_XINFO, "clearing target %u handle 0x%04x\n",
594 		   targ->tid, handle);
595 }
596 
597 static void
mpssas_remove_complete(struct mps_softc * sc,struct mps_command * tm)598 mpssas_remove_complete(struct mps_softc *sc, struct mps_command *tm)
599 {
600 	MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
601 	uint16_t handle;
602 	struct mpssas_target *targ;
603 	struct mpssas_lun *lun;
604 
605 	MPS_FUNCTRACE(sc);
606 
607 	reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
608 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
609 	targ = tm->cm_targ;
610 
611 	/*
612 	 * At this point, we should have no pending commands for the target.
613 	 * The remove target has just completed.
614 	 */
615 	KASSERT(TAILQ_FIRST(&targ->commands) == NULL,
616 	    ("%s: no commands should be pending\n", __func__));
617 
618 	/*
619 	 * Currently there should be no way we can hit this case.  It only
620 	 * happens when we have a failure to allocate chain frames, and
621 	 * task management commands don't have S/G lists.
622 	 */
623 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
624 		mps_dprint(sc, MPS_XINFO,
625 			   "%s: cm_flags = %#x for remove of handle %#04x! "
626 			   "This should not happen!\n", __func__, tm->cm_flags,
627 			   handle);
628 		mpssas_free_tm(sc, tm);
629 		return;
630 	}
631 
632 	if (reply == NULL) {
633 		/* most likely a chip reset */
634 		mps_dprint(sc, MPS_FAULT,
635 		    "%s NULL reply removing device 0x%04x\n", __func__, handle);
636 		mpssas_free_tm(sc, tm);
637 		return;
638 	}
639 
640 	mps_dprint(sc, MPS_XINFO,
641 	    "%s on handle 0x%04x, IOCStatus= 0x%x\n", __func__,
642 	    handle, le16toh(reply->IOCStatus));
643 
644 	/*
645 	 * Don't clear target if remove fails because things will get confusing.
646 	 * Leave the devname and sasaddr intact so that we know to avoid reusing
647 	 * this target id if possible, and so we can assign the same target id
648 	 * to this device if it comes back in the future.
649 	 */
650 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
651 	    MPI2_IOCSTATUS_SUCCESS) {
652 		targ->handle = 0x0;
653 		targ->encl_handle = 0x0;
654 		targ->encl_slot = 0x0;
655 		targ->exp_dev_handle = 0x0;
656 		targ->phy_num = 0x0;
657 		targ->linkrate = 0x0;
658 		targ->devinfo = 0x0;
659 		targ->flags = 0x0;
660 
661 		while(!SLIST_EMPTY(&targ->luns)) {
662 			lun = SLIST_FIRST(&targ->luns);
663 			SLIST_REMOVE_HEAD(&targ->luns, lun_link);
664 			free(lun, M_MPT2);
665 		}
666 	}
667 
668 	mpssas_free_tm(sc, tm);
669 }
670 
671 static int
mpssas_register_events(struct mps_softc * sc)672 mpssas_register_events(struct mps_softc *sc)
673 {
674 	u32 events[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS];
675 
676 	bzero(events, 16);
677 	setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
678 	setbit(events, MPI2_EVENT_SAS_DISCOVERY);
679 	setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
680 	setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
681 	setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
682 	setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
683 	setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
684 	setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
685 	setbit(events, MPI2_EVENT_IR_VOLUME);
686 	setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
687 	setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
688 	setbit(events, MPI2_EVENT_LOG_ENTRY_ADDED);
689 
690 	mps_register_events(sc, events, mpssas_evt_handler, NULL,
691 	    &sc->sassc->mpssas_eh);
692 
693 	return (0);
694 }
695 
696 int
mps_attach_sas(struct mps_softc * sc)697 mps_attach_sas(struct mps_softc *sc)
698 {
699 	struct mpssas_softc *sassc;
700 	cam_status status;
701 	int unit, error = 0, reqs;
702 
703 	MPS_FUNCTRACE(sc);
704 	mps_dprint(sc, MPS_INIT, "%s entered\n", __func__);
705 
706 	sassc = malloc(sizeof(struct mpssas_softc), M_MPT2, M_WAITOK|M_ZERO);
707 
708 	/*
709 	 * XXX MaxTargets could change during a reinit.  Since we don't
710 	 * resize the targets[] array during such an event, cache the value
711 	 * of MaxTargets here so that we don't get into trouble later.  This
712 	 * should move into the reinit logic.
713 	 */
714 	sassc->maxtargets = sc->facts->MaxTargets + sc->facts->MaxVolumes;
715 	sassc->targets = malloc(sizeof(struct mpssas_target) *
716 	    sassc->maxtargets, M_MPT2, M_WAITOK|M_ZERO);
717 	sc->sassc = sassc;
718 	sassc->sc = sc;
719 
720 	reqs = sc->num_reqs - sc->num_prireqs - 1;
721 	if ((sassc->devq = cam_simq_alloc(reqs)) == NULL) {
722 		mps_dprint(sc, MPS_ERROR, "Cannot allocate SIMQ\n");
723 		error = ENOMEM;
724 		goto out;
725 	}
726 
727 	unit = device_get_unit(sc->mps_dev);
728 	sassc->sim = cam_sim_alloc(mpssas_action, mpssas_poll, "mps", sassc,
729 	    unit, &sc->mps_mtx, reqs, reqs, sassc->devq);
730 	if (sassc->sim == NULL) {
731 		mps_dprint(sc, MPS_INIT|MPS_ERROR, "Cannot allocate SIM\n");
732 		error = EINVAL;
733 		goto out;
734 	}
735 
736 	TAILQ_INIT(&sassc->ev_queue);
737 
738 	/* Initialize taskqueue for Event Handling */
739 	TASK_INIT(&sassc->ev_task, 0, mpssas_firmware_event_work, sc);
740 	sassc->ev_tq = taskqueue_create("mps_taskq", M_NOWAIT | M_ZERO,
741 	    taskqueue_thread_enqueue, &sassc->ev_tq);
742 	taskqueue_start_threads(&sassc->ev_tq, 1, PRIBIO, "%s taskq",
743 	    device_get_nameunit(sc->mps_dev));
744 
745 	mps_lock(sc);
746 
747 	/*
748 	 * XXX There should be a bus for every port on the adapter, but since
749 	 * we're just going to fake the topology for now, we'll pretend that
750 	 * everything is just a target on a single bus.
751 	 */
752 	if ((error = xpt_bus_register(sassc->sim, sc->mps_dev, 0)) != 0) {
753 		mps_dprint(sc, MPS_INIT|MPS_ERROR,
754 		    "Error %d registering SCSI bus\n", error);
755 		mps_unlock(sc);
756 		goto out;
757 	}
758 
759 	/*
760 	 * Assume that discovery events will start right away.
761 	 *
762 	 * Hold off boot until discovery is complete.
763 	 */
764 	sassc->flags |= MPSSAS_IN_STARTUP | MPSSAS_IN_DISCOVERY;
765 	sc->sassc->startup_refcount = 0;
766 	mpssas_startup_increment(sassc);
767 
768 	mps_unlock(sc);
769 
770 	/*
771 	 * Register for async events so we can determine the EEDP
772 	 * capabilities of devices.
773 	 */
774 	status = xpt_create_path(&sassc->path, /*periph*/NULL,
775 	    cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD,
776 	    CAM_LUN_WILDCARD);
777 	if (status != CAM_REQ_CMP) {
778 		mps_dprint(sc, MPS_ERROR|MPS_INIT,
779 		    "Error %#x creating sim path\n", status);
780 		sassc->path = NULL;
781 	} else {
782 		int event;
783 
784 		event = AC_ADVINFO_CHANGED;
785 		status = xpt_register_async(event, mpssas_async, sc,
786 					    sassc->path);
787 		if (status != CAM_REQ_CMP) {
788 			mps_dprint(sc, MPS_ERROR,
789 			    "Error %#x registering async handler for "
790 			    "AC_ADVINFO_CHANGED events\n", status);
791 			xpt_free_path(sassc->path);
792 			sassc->path = NULL;
793 		}
794 	}
795 	if (status != CAM_REQ_CMP) {
796 		/*
797 		 * EEDP use is the exception, not the rule.
798 		 * Warn the user, but do not fail to attach.
799 		 */
800 		mps_printf(sc, "EEDP capabilities disabled.\n");
801 	}
802 
803 	mpssas_register_events(sc);
804 out:
805 	if (error)
806 		mps_detach_sas(sc);
807 
808 	mps_dprint(sc, MPS_INIT, "%s exit error= %d\n", __func__, error);
809 	return (error);
810 }
811 
812 int
mps_detach_sas(struct mps_softc * sc)813 mps_detach_sas(struct mps_softc *sc)
814 {
815 	struct mpssas_softc *sassc;
816 	struct mpssas_lun *lun, *lun_tmp;
817 	struct mpssas_target *targ;
818 	int i;
819 
820 	MPS_FUNCTRACE(sc);
821 
822 	if (sc->sassc == NULL)
823 		return (0);
824 
825 	sassc = sc->sassc;
826 	mps_deregister_events(sc, sassc->mpssas_eh);
827 
828 	/*
829 	 * Drain and free the event handling taskqueue with the lock
830 	 * unheld so that any parallel processing tasks drain properly
831 	 * without deadlocking.
832 	 */
833 	if (sassc->ev_tq != NULL)
834 		taskqueue_free(sassc->ev_tq);
835 
836 	/* Deregister our async handler */
837 	if (sassc->path != NULL) {
838 		xpt_register_async(0, mpssas_async, sc, sassc->path);
839 		xpt_free_path(sassc->path);
840 		sassc->path = NULL;
841 	}
842 
843 	/* Make sure CAM doesn't wedge if we had to bail out early. */
844 	mps_lock(sc);
845 
846 	while (sassc->startup_refcount != 0)
847 		mpssas_startup_decrement(sassc);
848 
849 	if (sassc->flags & MPSSAS_IN_STARTUP)
850 		xpt_release_simq(sassc->sim, 1);
851 
852 	if (sassc->sim != NULL) {
853 		xpt_bus_deregister(cam_sim_path(sassc->sim));
854 		cam_sim_free(sassc->sim, FALSE);
855 	}
856 
857 	mps_unlock(sc);
858 
859 	if (sassc->devq != NULL)
860 		cam_simq_free(sassc->devq);
861 
862 	for(i=0; i< sassc->maxtargets ;i++) {
863 		targ = &sassc->targets[i];
864 		SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
865 			free(lun, M_MPT2);
866 		}
867 	}
868 	free(sassc->targets, M_MPT2);
869 	free(sassc, M_MPT2);
870 	sc->sassc = NULL;
871 
872 	return (0);
873 }
874 
875 void
mpssas_discovery_end(struct mpssas_softc * sassc)876 mpssas_discovery_end(struct mpssas_softc *sassc)
877 {
878 	struct mps_softc *sc = sassc->sc;
879 
880 	MPS_FUNCTRACE(sc);
881 
882 	/*
883 	 * After discovery has completed, check the mapping table for any
884 	 * missing devices and update their missing counts. Only do this once
885 	 * whenever the driver is initialized so that missing counts aren't
886 	 * updated unnecessarily. Note that just because discovery has
887 	 * completed doesn't mean that events have been processed yet. The
888 	 * check_devices function is a callout timer that checks if ALL devices
889 	 * are missing. If so, it will wait a little longer for events to
890 	 * complete and keep resetting itself until some device in the mapping
891 	 * table is not missing, meaning that event processing has started.
892 	 */
893 	if (sc->track_mapping_events) {
894 		mps_dprint(sc, MPS_XINFO | MPS_MAPPING, "Discovery has "
895 		    "completed. Check for missing devices in the mapping "
896 		    "table.\n");
897 		callout_reset(&sc->device_check_callout,
898 		    MPS_MISSING_CHECK_DELAY * hz, mps_mapping_check_devices,
899 		    sc);
900 	}
901 }
902 
903 static void
mpssas_action(struct cam_sim * sim,union ccb * ccb)904 mpssas_action(struct cam_sim *sim, union ccb *ccb)
905 {
906 	struct mpssas_softc *sassc;
907 
908 	sassc = cam_sim_softc(sim);
909 
910 	MPS_FUNCTRACE(sassc->sc);
911 	mps_dprint(sassc->sc, MPS_TRACE, "ccb func_code 0x%x\n",
912 	    ccb->ccb_h.func_code);
913 	mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
914 
915 	switch (ccb->ccb_h.func_code) {
916 	case XPT_PATH_INQ:
917 	{
918 		struct ccb_pathinq *cpi = &ccb->cpi;
919 		struct mps_softc *sc = sassc->sc;
920 
921 		cpi->version_num = 1;
922 		cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
923 		cpi->target_sprt = 0;
924 		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN;
925 		cpi->hba_eng_cnt = 0;
926 		cpi->max_target = sassc->maxtargets - 1;
927 		cpi->max_lun = 255;
928 
929 		/*
930 		 * initiator_id is set here to an ID outside the set of valid
931 		 * target IDs (including volumes).
932 		 */
933 		cpi->initiator_id = sassc->maxtargets;
934 		strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
935 		strlcpy(cpi->hba_vid, "Avago Tech", HBA_IDLEN);
936 		strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
937 		cpi->unit_number = cam_sim_unit(sim);
938 		cpi->bus_id = cam_sim_bus(sim);
939 		cpi->base_transfer_speed = 150000;
940 		cpi->transport = XPORT_SAS;
941 		cpi->transport_version = 0;
942 		cpi->protocol = PROTO_SCSI;
943 		cpi->protocol_version = SCSI_REV_SPC;
944 		cpi->maxio = sc->maxio;
945 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
946 		break;
947 	}
948 	case XPT_GET_TRAN_SETTINGS:
949 	{
950 		struct ccb_trans_settings	*cts;
951 		struct ccb_trans_settings_sas	*sas;
952 		struct ccb_trans_settings_scsi	*scsi;
953 		struct mpssas_target *targ;
954 
955 		cts = &ccb->cts;
956 		sas = &cts->xport_specific.sas;
957 		scsi = &cts->proto_specific.scsi;
958 
959 		KASSERT(cts->ccb_h.target_id < sassc->maxtargets,
960 		    ("Target %d out of bounds in XPT_GET_TRANS_SETTINGS\n",
961 		    cts->ccb_h.target_id));
962 		targ = &sassc->targets[cts->ccb_h.target_id];
963 		if (targ->handle == 0x0) {
964 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
965 			break;
966 		}
967 
968 		cts->protocol_version = SCSI_REV_SPC2;
969 		cts->transport = XPORT_SAS;
970 		cts->transport_version = 0;
971 
972 		sas->valid = CTS_SAS_VALID_SPEED;
973 		switch (targ->linkrate) {
974 		case 0x08:
975 			sas->bitrate = 150000;
976 			break;
977 		case 0x09:
978 			sas->bitrate = 300000;
979 			break;
980 		case 0x0a:
981 			sas->bitrate = 600000;
982 			break;
983 		default:
984 			sas->valid = 0;
985 		}
986 
987 		cts->protocol = PROTO_SCSI;
988 		scsi->valid = CTS_SCSI_VALID_TQ;
989 		scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
990 
991 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
992 		break;
993 	}
994 	case XPT_CALC_GEOMETRY:
995 		cam_calc_geometry(&ccb->ccg, /*extended*/1);
996 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
997 		break;
998 	case XPT_RESET_DEV:
999 		mps_dprint(sassc->sc, MPS_XINFO, "mpssas_action XPT_RESET_DEV\n");
1000 		mpssas_action_resetdev(sassc, ccb);
1001 		return;
1002 	case XPT_RESET_BUS:
1003 	case XPT_ABORT:
1004 	case XPT_TERM_IO:
1005 		mps_dprint(sassc->sc, MPS_XINFO,
1006 		    "mpssas_action faking success for abort or reset\n");
1007 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1008 		break;
1009 	case XPT_SCSI_IO:
1010 		mpssas_action_scsiio(sassc, ccb);
1011 		return;
1012 	case XPT_SMP_IO:
1013 		mpssas_action_smpio(sassc, ccb);
1014 		return;
1015 	default:
1016 		mpssas_set_ccbstatus(ccb, CAM_FUNC_NOTAVAIL);
1017 		break;
1018 	}
1019 	xpt_done(ccb);
1020 
1021 }
1022 
1023 static void
mpssas_announce_reset(struct mps_softc * sc,uint32_t ac_code,target_id_t target_id,lun_id_t lun_id)1024 mpssas_announce_reset(struct mps_softc *sc, uint32_t ac_code,
1025     target_id_t target_id, lun_id_t lun_id)
1026 {
1027 	path_id_t path_id = cam_sim_path(sc->sassc->sim);
1028 	struct cam_path *path;
1029 
1030 	mps_dprint(sc, MPS_XINFO, "%s code %x target %d lun %jx\n", __func__,
1031 	    ac_code, target_id, (uintmax_t)lun_id);
1032 
1033 	if (xpt_create_path(&path, NULL,
1034 		path_id, target_id, lun_id) != CAM_REQ_CMP) {
1035 		mps_dprint(sc, MPS_ERROR, "unable to create path for reset "
1036 			   "notification\n");
1037 		return;
1038 	}
1039 
1040 	xpt_async(ac_code, path, NULL);
1041 	xpt_free_path(path);
1042 }
1043 
1044 static void
mpssas_complete_all_commands(struct mps_softc * sc)1045 mpssas_complete_all_commands(struct mps_softc *sc)
1046 {
1047 	struct mps_command *cm;
1048 	int i;
1049 	int completed;
1050 
1051 	MPS_FUNCTRACE(sc);
1052 	mtx_assert(&sc->mps_mtx, MA_OWNED);
1053 
1054 	/* complete all commands with a NULL reply */
1055 	for (i = 1; i < sc->num_reqs; i++) {
1056 		cm = &sc->commands[i];
1057 		if (cm->cm_state == MPS_CM_STATE_FREE)
1058 			continue;
1059 
1060 		cm->cm_state = MPS_CM_STATE_BUSY;
1061 		cm->cm_reply = NULL;
1062 		completed = 0;
1063 
1064 		if (cm->cm_flags & MPS_CM_FLAGS_SATA_ID_TIMEOUT) {
1065 			MPASS(cm->cm_data);
1066 			free(cm->cm_data, M_MPT2);
1067 			cm->cm_data = NULL;
1068 		}
1069 
1070 		if (cm->cm_flags & MPS_CM_FLAGS_POLLED)
1071 			cm->cm_flags |= MPS_CM_FLAGS_COMPLETE;
1072 
1073 		if (cm->cm_complete != NULL) {
1074 			mpssas_log_command(cm, MPS_RECOVERY,
1075 			    "completing cm %p state %x ccb %p for diag reset\n",
1076 			    cm, cm->cm_state, cm->cm_ccb);
1077 
1078 			cm->cm_complete(sc, cm);
1079 			completed = 1;
1080 		} else if (cm->cm_flags & MPS_CM_FLAGS_WAKEUP) {
1081 			mpssas_log_command(cm, MPS_RECOVERY,
1082 			    "waking up cm %p state %x ccb %p for diag reset\n",
1083 			    cm, cm->cm_state, cm->cm_ccb);
1084 			wakeup(cm);
1085 			completed = 1;
1086 		}
1087 
1088 		if ((completed == 0) && (cm->cm_state != MPS_CM_STATE_FREE)) {
1089 			/* this should never happen, but if it does, log */
1090 			mpssas_log_command(cm, MPS_RECOVERY,
1091 			    "cm %p state %x flags 0x%x ccb %p during diag "
1092 			    "reset\n", cm, cm->cm_state, cm->cm_flags,
1093 			    cm->cm_ccb);
1094 		}
1095 	}
1096 
1097 	sc->io_cmds_active = 0;
1098 }
1099 
1100 void
mpssas_handle_reinit(struct mps_softc * sc)1101 mpssas_handle_reinit(struct mps_softc *sc)
1102 {
1103 	int i;
1104 
1105 	/* Go back into startup mode and freeze the simq, so that CAM
1106 	 * doesn't send any commands until after we've rediscovered all
1107 	 * targets and found the proper device handles for them.
1108 	 *
1109 	 * After the reset, portenable will trigger discovery, and after all
1110 	 * discovery-related activities have finished, the simq will be
1111 	 * released.
1112 	 */
1113 	mps_dprint(sc, MPS_INIT, "%s startup\n", __func__);
1114 	sc->sassc->flags |= MPSSAS_IN_STARTUP;
1115 	sc->sassc->flags |= MPSSAS_IN_DISCOVERY;
1116 	mpssas_startup_increment(sc->sassc);
1117 
1118 	/* notify CAM of a bus reset */
1119 	mpssas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1120 	    CAM_LUN_WILDCARD);
1121 
1122 	/* complete and cleanup after all outstanding commands */
1123 	mpssas_complete_all_commands(sc);
1124 
1125 	mps_dprint(sc, MPS_INIT,
1126 	    "%s startup %u after command completion\n", __func__,
1127 	    sc->sassc->startup_refcount);
1128 
1129 	/* zero all the target handles, since they may change after the
1130 	 * reset, and we have to rediscover all the targets and use the new
1131 	 * handles.
1132 	 */
1133 	for (i = 0; i < sc->sassc->maxtargets; i++) {
1134 		if (sc->sassc->targets[i].outstanding != 0)
1135 			mps_dprint(sc, MPS_INIT, "target %u outstanding %u\n",
1136 			    i, sc->sassc->targets[i].outstanding);
1137 		sc->sassc->targets[i].handle = 0x0;
1138 		sc->sassc->targets[i].exp_dev_handle = 0x0;
1139 		sc->sassc->targets[i].outstanding = 0;
1140 		sc->sassc->targets[i].flags = MPSSAS_TARGET_INDIAGRESET;
1141 	}
1142 }
1143 
1144 static void
mpssas_tm_timeout(void * data)1145 mpssas_tm_timeout(void *data)
1146 {
1147 	struct mps_command *tm = data;
1148 	struct mps_softc *sc = tm->cm_sc;
1149 
1150 	mtx_assert(&sc->mps_mtx, MA_OWNED);
1151 
1152 	mpssas_log_command(tm, MPS_INFO|MPS_RECOVERY,
1153 	    "task mgmt %p timed out\n", tm);
1154 
1155 	KASSERT(tm->cm_state == MPS_CM_STATE_INQUEUE,
1156 	    ("command not inqueue, state = %u\n", tm->cm_state));
1157 
1158 	tm->cm_state = MPS_CM_STATE_BUSY;
1159 	mps_reinit(sc);
1160 }
1161 
1162 static void
mpssas_logical_unit_reset_complete(struct mps_softc * sc,struct mps_command * tm)1163 mpssas_logical_unit_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1164 {
1165 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1166 	unsigned int cm_count = 0;
1167 	struct mps_command *cm;
1168 	struct mpssas_target *targ;
1169 
1170 	callout_stop(&tm->cm_callout);
1171 
1172 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1173 	targ = tm->cm_targ;
1174 
1175 	/*
1176 	 * Currently there should be no way we can hit this case.  It only
1177 	 * happens when we have a failure to allocate chain frames, and
1178 	 * task management commands don't have S/G lists.
1179 	 * XXXSL So should it be an assertion?
1180 	 */
1181 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1182 		mps_dprint(sc, MPS_RECOVERY|MPS_ERROR,
1183 		    "%s: cm_flags = %#x for LUN reset! "
1184 		   "This should not happen!\n", __func__, tm->cm_flags);
1185 		mpssas_free_tm(sc, tm);
1186 		return;
1187 	}
1188 
1189 	if (reply == NULL) {
1190 		mps_dprint(sc, MPS_RECOVERY, "NULL reset reply for tm %p\n",
1191 		    tm);
1192 		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1193 			/* this completion was due to a reset, just cleanup */
1194 			mps_dprint(sc, MPS_RECOVERY, "Hardware undergoing "
1195 			    "reset, ignoring NULL LUN reset reply\n");
1196 			targ->tm = NULL;
1197 			mpssas_free_tm(sc, tm);
1198 		}
1199 		else {
1200 			/* we should have gotten a reply. */
1201 			mps_dprint(sc, MPS_INFO|MPS_RECOVERY, "NULL reply on "
1202 			    "LUN reset attempt, resetting controller\n");
1203 			mps_reinit(sc);
1204 		}
1205 		return;
1206 	}
1207 
1208 	mps_dprint(sc, MPS_RECOVERY,
1209 	    "logical unit reset status 0x%x code 0x%x count %u\n",
1210 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1211 	    le32toh(reply->TerminationCount));
1212 
1213 	/*
1214 	 * See if there are any outstanding commands for this LUN.
1215 	 * This could be made more efficient by using a per-LU data
1216 	 * structure of some sort.
1217 	 */
1218 	TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1219 		if (cm->cm_lun == tm->cm_lun)
1220 			cm_count++;
1221 	}
1222 
1223 	if (cm_count == 0) {
1224 		mps_dprint(sc, MPS_RECOVERY|MPS_INFO,
1225 		    "Finished recovery after LUN reset for target %u\n",
1226 		    targ->tid);
1227 
1228 		mpssas_announce_reset(sc, AC_SENT_BDR, targ->tid, tm->cm_lun);
1229 
1230 		/*
1231 		 * We've finished recovery for this logical unit.  check and
1232 		 * see if some other logical unit has a timedout command
1233 		 * that needs to be processed.
1234 		 */
1235 		cm = TAILQ_FIRST(&targ->timedout_commands);
1236 		if (cm) {
1237 			mps_dprint(sc, MPS_INFO|MPS_RECOVERY,
1238 			    "More commands to abort for target %u\n",
1239 			    targ->tid);
1240 			mpssas_send_abort(sc, tm, cm);
1241 		} else {
1242 			targ->tm = NULL;
1243 			mpssas_free_tm(sc, tm);
1244 		}
1245 	} else {
1246 		/*
1247 		 * If we still have commands for this LUN, the reset
1248 		 * effectively failed, regardless of the status reported.
1249 		 * Escalate to a target reset.
1250 		 */
1251 		mps_dprint(sc, MPS_INFO|MPS_RECOVERY,
1252 		    "logical unit reset complete for target %u, but still "
1253 		    "have %u command(s), sending target reset\n", targ->tid,
1254 		    cm_count);
1255 		mpssas_send_reset(sc, tm,
1256 		    MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1257 	}
1258 }
1259 
1260 static void
mpssas_target_reset_complete(struct mps_softc * sc,struct mps_command * tm)1261 mpssas_target_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1262 {
1263 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1264 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1265 	struct mpssas_target *targ;
1266 
1267 	callout_stop(&tm->cm_callout);
1268 
1269 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1270 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1271 	targ = tm->cm_targ;
1272 
1273 	/*
1274 	 * Currently there should be no way we can hit this case.  It only
1275 	 * happens when we have a failure to allocate chain frames, and
1276 	 * task management commands don't have S/G lists.
1277 	 */
1278 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1279 		mps_dprint(sc, MPS_ERROR,"%s: cm_flags = %#x for target reset! "
1280 			   "This should not happen!\n", __func__, tm->cm_flags);
1281 		mpssas_free_tm(sc, tm);
1282 		return;
1283 	}
1284 
1285 	if (reply == NULL) {
1286 		mps_dprint(sc, MPS_RECOVERY,
1287 		    "NULL target reset reply for tm %pi TaskMID %u\n",
1288 		    tm, le16toh(req->TaskMID));
1289 		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1290 			/* this completion was due to a reset, just cleanup */
1291 			mps_dprint(sc, MPS_RECOVERY, "Hardware undergoing "
1292 			    "reset, ignoring NULL target reset reply\n");
1293 			targ->tm = NULL;
1294 			mpssas_free_tm(sc, tm);
1295 		} else {
1296 			/* we should have gotten a reply. */
1297 			mps_dprint(sc, MPS_INFO|MPS_RECOVERY, "NULL reply on "
1298 			    "target reset attempt, resetting controller\n");
1299 			mps_reinit(sc);
1300 		}
1301 		return;
1302 	}
1303 
1304 	mps_dprint(sc, MPS_RECOVERY,
1305 	    "target reset status 0x%x code 0x%x count %u\n",
1306 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1307 	    le32toh(reply->TerminationCount));
1308 
1309 	if (targ->outstanding == 0) {
1310 		/* we've finished recovery for this target and all
1311 		 * of its logical units.
1312 		 */
1313 		mps_dprint(sc, MPS_RECOVERY|MPS_INFO,
1314 		    "Finished reset recovery for target %u\n", targ->tid);
1315 
1316 		mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1317 		    CAM_LUN_WILDCARD);
1318 
1319 		targ->tm = NULL;
1320 		mpssas_free_tm(sc, tm);
1321 	} else {
1322 		/*
1323 		 * After a target reset, if this target still has
1324 		 * outstanding commands, the reset effectively failed,
1325 		 * regardless of the status reported.  escalate.
1326 		 */
1327 		mps_dprint(sc, MPS_INFO|MPS_RECOVERY,
1328 		    "Target reset complete for target %u, but still have %u "
1329 		    "command(s), resetting controller\n", targ->tid,
1330 		    targ->outstanding);
1331 		mps_reinit(sc);
1332 	}
1333 }
1334 
1335 #define MPS_RESET_TIMEOUT 30
1336 
1337 int
mpssas_send_reset(struct mps_softc * sc,struct mps_command * tm,uint8_t type)1338 mpssas_send_reset(struct mps_softc *sc, struct mps_command *tm, uint8_t type)
1339 {
1340 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1341 	struct mpssas_target *target;
1342 	int err;
1343 
1344 	target = tm->cm_targ;
1345 	if (target->handle == 0) {
1346 		mps_dprint(sc, MPS_ERROR,"%s null devhandle for target_id %d\n",
1347 		    __func__, target->tid);
1348 		return -1;
1349 	}
1350 
1351 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1352 	req->DevHandle = htole16(target->handle);
1353 	req->TaskType = type;
1354 
1355 	if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1356 		/* XXX Need to handle invalid LUNs */
1357 		MPS_SET_LUN(req->LUN, tm->cm_lun);
1358 		tm->cm_targ->logical_unit_resets++;
1359 		mps_dprint(sc, MPS_RECOVERY|MPS_INFO,
1360 		    "Sending logical unit reset to target %u lun %d\n",
1361 		    target->tid, tm->cm_lun);
1362 		tm->cm_complete = mpssas_logical_unit_reset_complete;
1363 		mpssas_prepare_for_tm(sc, tm, target, tm->cm_lun);
1364 	} else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1365 		/*
1366 		 * Target reset method =
1367 		 * 	SAS Hard Link Reset / SATA Link Reset
1368 		 */
1369 		req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1370 		tm->cm_targ->target_resets++;
1371 		mps_dprint(sc, MPS_RECOVERY|MPS_INFO,
1372 		    "Sending target reset to target %u\n", target->tid);
1373 		tm->cm_complete = mpssas_target_reset_complete;
1374 		mpssas_prepare_for_tm(sc, tm, target, CAM_LUN_WILDCARD);
1375 	} else {
1376 		mps_dprint(sc, MPS_ERROR, "unexpected reset type 0x%x\n", type);
1377 		return -1;
1378 	}
1379 
1380 	tm->cm_data = NULL;
1381 	tm->cm_complete_data = (void *)tm;
1382 
1383 	callout_reset(&tm->cm_callout, MPS_RESET_TIMEOUT * hz,
1384 	    mpssas_tm_timeout, tm);
1385 
1386 	err = mps_map_command(sc, tm);
1387 	if (err)
1388 		mps_dprint(sc, MPS_ERROR|MPS_RECOVERY,
1389 		    "error %d sending reset type %u\n",
1390 		    err, type);
1391 
1392 	return err;
1393 }
1394 
1395 static void
mpssas_abort_complete(struct mps_softc * sc,struct mps_command * tm)1396 mpssas_abort_complete(struct mps_softc *sc, struct mps_command *tm)
1397 {
1398 	struct mps_command *cm;
1399 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1400 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1401 	struct mpssas_target *targ;
1402 
1403 	callout_stop(&tm->cm_callout);
1404 
1405 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1406 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1407 	targ = tm->cm_targ;
1408 
1409 	/*
1410 	 * Currently there should be no way we can hit this case.  It only
1411 	 * happens when we have a failure to allocate chain frames, and
1412 	 * task management commands don't have S/G lists.
1413 	 */
1414 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1415 		mps_dprint(sc, MPS_RECOVERY,
1416 		    "cm_flags = %#x for abort %p TaskMID %u!\n",
1417 		    tm->cm_flags, tm, le16toh(req->TaskMID));
1418 		mpssas_free_tm(sc, tm);
1419 		return;
1420 	}
1421 
1422 	if (reply == NULL) {
1423 		mps_dprint(sc, MPS_RECOVERY,
1424 		    "NULL abort reply for tm %p TaskMID %u\n",
1425 		    tm, le16toh(req->TaskMID));
1426 		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1427 			/* this completion was due to a reset, just cleanup */
1428 			mps_dprint(sc, MPS_RECOVERY, "Hardware undergoing "
1429 			    "reset, ignoring NULL abort reply\n");
1430 			targ->tm = NULL;
1431 			mpssas_free_tm(sc, tm);
1432 		} else {
1433 			/* we should have gotten a reply. */
1434 			mps_dprint(sc, MPS_INFO|MPS_RECOVERY, "NULL reply on "
1435 			    "abort attempt, resetting controller\n");
1436 			mps_reinit(sc);
1437 		}
1438 		return;
1439 	}
1440 
1441 	mps_dprint(sc, MPS_RECOVERY,
1442 	    "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1443 	    le16toh(req->TaskMID),
1444 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1445 	    le32toh(reply->TerminationCount));
1446 
1447 	cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1448 	if (cm == NULL) {
1449 		/*
1450 		 * If there are no more timedout commands, we're done with
1451 		 * error recovery for this target.
1452 		 */
1453 		mps_dprint(sc, MPS_INFO|MPS_RECOVERY,
1454 		    "Finished abort recovery for target %u\n", targ->tid);
1455 
1456 		targ->tm = NULL;
1457 		mpssas_free_tm(sc, tm);
1458 	} else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) {
1459 		/* abort success, but we have more timedout commands to abort */
1460 		mps_dprint(sc, MPS_INFO|MPS_RECOVERY,
1461 		    "Continuing abort recovery for target %u\n", targ->tid);
1462 
1463 		mpssas_send_abort(sc, tm, cm);
1464 	} else {
1465 		/* we didn't get a command completion, so the abort
1466 		 * failed as far as we're concerned.  escalate.
1467 		 */
1468 		mps_dprint(sc, MPS_RECOVERY,
1469 		    "Abort failed for target %u, sending logical unit reset\n",
1470 		    targ->tid);
1471 
1472 		mpssas_send_reset(sc, tm,
1473 		    MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1474 	}
1475 }
1476 
1477 #define MPS_ABORT_TIMEOUT 5
1478 
1479 static int
mpssas_send_abort(struct mps_softc * sc,struct mps_command * tm,struct mps_command * cm)1480 mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm)
1481 {
1482 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1483 	struct mpssas_target *targ;
1484 	int err;
1485 
1486 	targ = cm->cm_targ;
1487 	if (targ->handle == 0) {
1488 		mps_dprint(sc, MPS_ERROR|MPS_RECOVERY,
1489 		    "%s null devhandle for target_id %d\n",
1490 		    __func__, cm->cm_ccb->ccb_h.target_id);
1491 		return -1;
1492 	}
1493 
1494 	mpssas_log_command(cm, MPS_RECOVERY|MPS_INFO,
1495 	    "Aborting command %p\n", cm);
1496 
1497 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1498 	req->DevHandle = htole16(targ->handle);
1499 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1500 
1501 	/* XXX Need to handle invalid LUNs */
1502 	MPS_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1503 
1504 	req->TaskMID = htole16(cm->cm_desc.Default.SMID);
1505 
1506 	tm->cm_data = NULL;
1507 	tm->cm_complete = mpssas_abort_complete;
1508 	tm->cm_complete_data = (void *)tm;
1509 	tm->cm_targ = cm->cm_targ;
1510 	tm->cm_lun = cm->cm_lun;
1511 
1512 	callout_reset(&tm->cm_callout, MPS_ABORT_TIMEOUT * hz,
1513 	    mpssas_tm_timeout, tm);
1514 
1515 	targ->aborts++;
1516 
1517 	mpssas_prepare_for_tm(sc, tm, targ, tm->cm_lun);
1518 
1519 	err = mps_map_command(sc, tm);
1520 	if (err)
1521 		mps_dprint(sc, MPS_ERROR|MPS_RECOVERY,
1522 		    "error %d sending abort for cm %p SMID %u\n",
1523 		    err, cm, req->TaskMID);
1524 	return err;
1525 }
1526 
1527 static void
mpssas_scsiio_timeout(void * data)1528 mpssas_scsiio_timeout(void *data)
1529 {
1530 	sbintime_t elapsed, now;
1531 	union ccb *ccb;
1532 	struct mps_softc *sc;
1533 	struct mps_command *cm;
1534 	struct mpssas_target *targ;
1535 
1536 	cm = (struct mps_command *)data;
1537 	sc = cm->cm_sc;
1538 	ccb = cm->cm_ccb;
1539 	now = sbinuptime();
1540 
1541 	MPS_FUNCTRACE(sc);
1542 	mtx_assert(&sc->mps_mtx, MA_OWNED);
1543 
1544 	mps_dprint(sc, MPS_XINFO|MPS_RECOVERY, "Timeout checking cm %p\n", cm);
1545 
1546 	/*
1547 	 * Run the interrupt handler to make sure it's not pending.  This
1548 	 * isn't perfect because the command could have already completed
1549 	 * and been re-used, though this is unlikely.
1550 	 */
1551 	mps_intr_locked(sc);
1552 	if (cm->cm_flags & MPS_CM_FLAGS_ON_RECOVERY) {
1553 		mpssas_log_command(cm, MPS_XINFO,
1554 		    "SCSI command %p almost timed out\n", cm);
1555 		return;
1556 	}
1557 
1558 	if (cm->cm_ccb == NULL) {
1559 		mps_dprint(sc, MPS_ERROR, "command timeout with NULL ccb\n");
1560 		return;
1561 	}
1562 
1563 	targ = cm->cm_targ;
1564 	targ->timeouts++;
1565 
1566 	elapsed = now - ccb->ccb_h.qos.sim_data;
1567 	mpssas_log_command(cm, MPS_INFO|MPS_RECOVERY,
1568 	    "Command timeout on target %u(0x%04x) %d set, %d.%d elapsed\n",
1569 	    targ->tid, targ->handle, ccb->ccb_h.timeout,
1570 	    sbintime_getsec(elapsed), elapsed & 0xffffffff);
1571 
1572 	/* XXX first, check the firmware state, to see if it's still
1573 	 * operational.  if not, do a diag reset.
1574 	 */
1575 	mpssas_set_ccbstatus(cm->cm_ccb, CAM_CMD_TIMEOUT);
1576 	cm->cm_flags |= MPS_CM_FLAGS_ON_RECOVERY | MPS_CM_FLAGS_TIMEDOUT;
1577 	TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1578 
1579 	if (targ->tm != NULL) {
1580 		/* target already in recovery, just queue up another
1581 		 * timedout command to be processed later.
1582 		 */
1583 		mps_dprint(sc, MPS_RECOVERY,
1584 		    "queued timedout cm %p for processing by tm %p\n",
1585 		    cm, targ->tm);
1586 	} else if ((targ->tm = mpssas_alloc_tm(sc)) != NULL) {
1587 		mps_dprint(sc, MPS_RECOVERY|MPS_INFO,
1588 		    "Sending abort to target %u for SMID %d\n", targ->tid,
1589 		    cm->cm_desc.Default.SMID);
1590 		mps_dprint(sc, MPS_RECOVERY, "timedout cm %p allocated tm %p\n",
1591 		    cm, targ->tm);
1592 
1593 		/* start recovery by aborting the first timedout command */
1594 		mpssas_send_abort(sc, targ->tm, cm);
1595 	} else {
1596 		/* XXX queue this target up for recovery once a TM becomes
1597 		 * available.  The firmware only has a limited number of
1598 		 * HighPriority credits for the high priority requests used
1599 		 * for task management, and we ran out.
1600 		 *
1601 		 * Isilon: don't worry about this for now, since we have
1602 		 * more credits than disks in an enclosure, and limit
1603 		 * ourselves to one TM per target for recovery.
1604 		 */
1605 		mps_dprint(sc, MPS_ERROR|MPS_RECOVERY,
1606 		    "timedout cm %p failed to allocate a tm\n", cm);
1607 	}
1608 
1609 }
1610 
1611 static void
mpssas_action_scsiio(struct mpssas_softc * sassc,union ccb * ccb)1612 mpssas_action_scsiio(struct mpssas_softc *sassc, union ccb *ccb)
1613 {
1614 	MPI2_SCSI_IO_REQUEST *req;
1615 	struct ccb_scsiio *csio;
1616 	struct mps_softc *sc;
1617 	struct mpssas_target *targ;
1618 	struct mpssas_lun *lun;
1619 	struct mps_command *cm;
1620 	uint8_t i, lba_byte, *ref_tag_addr;
1621 	uint16_t eedp_flags;
1622 	uint32_t mpi_control;
1623 
1624 	sc = sassc->sc;
1625 	MPS_FUNCTRACE(sc);
1626 	mtx_assert(&sc->mps_mtx, MA_OWNED);
1627 
1628 	csio = &ccb->csio;
1629 	KASSERT(csio->ccb_h.target_id < sassc->maxtargets,
1630 	    ("Target %d out of bounds in XPT_SCSI_IO\n",
1631 	     csio->ccb_h.target_id));
1632 	targ = &sassc->targets[csio->ccb_h.target_id];
1633 	mps_dprint(sc, MPS_TRACE, "ccb %p target flag %x\n", ccb, targ->flags);
1634 	if (targ->handle == 0x0) {
1635 		if (targ->flags & MPSSAS_TARGET_INDIAGRESET) {
1636 			mps_dprint(sc, MPS_ERROR,
1637 			    "%s NULL handle for target %u in diag reset freezing queue\n",
1638 			    __func__, csio->ccb_h.target_id);
1639 			ccb->ccb_h.status = CAM_REQUEUE_REQ | CAM_DEV_QFRZN;
1640 			xpt_freeze_devq(ccb->ccb_h.path, 1);
1641 			xpt_done(ccb);
1642 			return;
1643 		}
1644 		mps_dprint(sc, MPS_ERROR, "%s NULL handle for target %u\n",
1645 		    __func__, csio->ccb_h.target_id);
1646 		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1647 		xpt_done(ccb);
1648 		return;
1649 	}
1650 	if (targ->flags & MPS_TARGET_FLAGS_RAID_COMPONENT) {
1651 		mps_dprint(sc, MPS_ERROR, "%s Raid component no SCSI IO "
1652 		    "supported %u\n", __func__, csio->ccb_h.target_id);
1653 		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1654 		xpt_done(ccb);
1655 		return;
1656 	}
1657 	/*
1658 	 * Sometimes, it is possible to get a command that is not "In
1659 	 * Progress" and was actually aborted by the upper layer.  Check for
1660 	 * this here and complete the command without error.
1661 	 */
1662 	if (mpssas_get_ccbstatus(ccb) != CAM_REQ_INPROG) {
1663 		mps_dprint(sc, MPS_TRACE, "%s Command is not in progress for "
1664 		    "target %u\n", __func__, csio->ccb_h.target_id);
1665 		xpt_done(ccb);
1666 		return;
1667 	}
1668 	/*
1669 	 * If devinfo is 0 this will be a volume.  In that case don't tell CAM
1670 	 * that the volume has timed out.  We want volumes to be enumerated
1671 	 * until they are deleted/removed, not just failed. In either event,
1672 	 * we're removing the target due to a firmware event telling us
1673 	 * the device is now gone (as opposed to some transient event). Since
1674 	 * we're opting to remove failed devices from the OS's view, we need
1675 	 * to propagate that status up the stack.
1676 	 */
1677 	if (targ->flags & MPSSAS_TARGET_INREMOVAL) {
1678 		if (targ->devinfo == 0)
1679 			mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1680 		else
1681 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1682 		xpt_done(ccb);
1683 		return;
1684 	}
1685 
1686 	if ((sc->mps_flags & MPS_FLAGS_SHUTDOWN) != 0) {
1687 		mps_dprint(sc, MPS_INFO, "%s shutting down\n", __func__);
1688 		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1689 		xpt_done(ccb);
1690 		return;
1691 	}
1692 
1693 	/*
1694 	 * If target has a reset in progress, the devq should be frozen.
1695 	 * Geting here we likely hit a race, so just requeue.
1696 	 */
1697 	if (targ->flags & MPSSAS_TARGET_INRESET) {
1698 		ccb->ccb_h.status = CAM_REQUEUE_REQ | CAM_DEV_QFRZN;
1699 		mps_dprint(sc, MPS_INFO, "%s: Freezing devq for target ID %d\n",
1700 		    __func__, targ->tid);
1701 		xpt_freeze_devq(ccb->ccb_h.path, 1);
1702 		xpt_done(ccb);
1703 		return;
1704 	}
1705 
1706 	cm = mps_alloc_command(sc);
1707 	if (cm == NULL || (sc->mps_flags & MPS_FLAGS_DIAGRESET)) {
1708 		if (cm != NULL) {
1709 			mps_free_command(sc, cm);
1710 		}
1711 		if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
1712 			xpt_freeze_simq(sassc->sim, 1);
1713 			sassc->flags |= MPSSAS_QUEUE_FROZEN;
1714 		}
1715 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1716 		ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1717 		xpt_done(ccb);
1718 		return;
1719 	}
1720 
1721 	req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
1722 	bzero(req, sizeof(*req));
1723 	req->DevHandle = htole16(targ->handle);
1724 	req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1725 	req->MsgFlags = 0;
1726 	req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr);
1727 	req->SenseBufferLength = MPS_SENSE_LEN;
1728 	req->SGLFlags = 0;
1729 	req->ChainOffset = 0;
1730 	req->SGLOffset0 = 24;	/* 32bit word offset to the SGL */
1731 	req->SGLOffset1= 0;
1732 	req->SGLOffset2= 0;
1733 	req->SGLOffset3= 0;
1734 	req->SkipCount = 0;
1735 	req->DataLength = htole32(csio->dxfer_len);
1736 	req->BidirectionalDataLength = 0;
1737 	req->IoFlags = htole16(csio->cdb_len);
1738 	req->EEDPFlags = 0;
1739 
1740 	/* Note: BiDirectional transfers are not supported */
1741 	switch (csio->ccb_h.flags & CAM_DIR_MASK) {
1742 	case CAM_DIR_IN:
1743 		mpi_control = MPI2_SCSIIO_CONTROL_READ;
1744 		cm->cm_flags |= MPS_CM_FLAGS_DATAIN;
1745 		break;
1746 	case CAM_DIR_OUT:
1747 		mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
1748 		cm->cm_flags |= MPS_CM_FLAGS_DATAOUT;
1749 		break;
1750 	case CAM_DIR_NONE:
1751 	default:
1752 		mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
1753 		break;
1754 	}
1755 
1756 	if (csio->cdb_len == 32)
1757                 mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
1758 	/*
1759 	 * It looks like the hardware doesn't require an explicit tag
1760 	 * number for each transaction.  SAM Task Management not supported
1761 	 * at the moment.
1762 	 */
1763 	switch (csio->tag_action) {
1764 	case MSG_HEAD_OF_Q_TAG:
1765 		mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
1766 		break;
1767 	case MSG_ORDERED_Q_TAG:
1768 		mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
1769 		break;
1770 	case MSG_ACA_TASK:
1771 		mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ;
1772 		break;
1773 	case CAM_TAG_ACTION_NONE:
1774 	case MSG_SIMPLE_Q_TAG:
1775 	default:
1776 		mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
1777 		break;
1778 	}
1779 	mpi_control |= (csio->priority << MPI2_SCSIIO_CONTROL_TASKPRI_SHIFT) &
1780 	    MPI2_SCSIIO_CONTROL_TASKPRI_MASK;
1781 	mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
1782 	req->Control = htole32(mpi_control);
1783 	if (MPS_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
1784 		mps_free_command(sc, cm);
1785 		mpssas_set_ccbstatus(ccb, CAM_LUN_INVALID);
1786 		xpt_done(ccb);
1787 		return;
1788 	}
1789 
1790 	if (csio->ccb_h.flags & CAM_CDB_POINTER)
1791 		bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
1792 	else
1793 		bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
1794 	req->IoFlags = htole16(csio->cdb_len);
1795 
1796 	/*
1797 	 * Check if EEDP is supported and enabled.  If it is then check if the
1798 	 * SCSI opcode could be using EEDP.  If so, make sure the LUN exists and
1799 	 * is formatted for EEDP support.  If all of this is true, set CDB up
1800 	 * for EEDP transfer.
1801 	 */
1802 	eedp_flags = op_code_prot[req->CDB.CDB32[0]];
1803 	if (sc->eedp_enabled && eedp_flags) {
1804 		SLIST_FOREACH(lun, &targ->luns, lun_link) {
1805 			if (lun->lun_id == csio->ccb_h.target_lun) {
1806 				break;
1807 			}
1808 		}
1809 
1810 		if ((lun != NULL) && (lun->eedp_formatted)) {
1811 			req->EEDPBlockSize = htole16(lun->eedp_block_size);
1812 			eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
1813 			    MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
1814 			    MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
1815 			req->EEDPFlags = htole16(eedp_flags);
1816 
1817 			/*
1818 			 * If CDB less than 32, fill in Primary Ref Tag with
1819 			 * low 4 bytes of LBA.  If CDB is 32, tag stuff is
1820 			 * already there.  Also, set protection bit.  FreeBSD
1821 			 * currently does not support CDBs bigger than 16, but
1822 			 * the code doesn't hurt, and will be here for the
1823 			 * future.
1824 			 */
1825 			if (csio->cdb_len != 32) {
1826 				lba_byte = (csio->cdb_len == 16) ? 6 : 2;
1827 				ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
1828 				    PrimaryReferenceTag;
1829 				for (i = 0; i < 4; i++) {
1830 					*ref_tag_addr =
1831 					    req->CDB.CDB32[lba_byte + i];
1832 					ref_tag_addr++;
1833 				}
1834 				req->CDB.EEDP32.PrimaryReferenceTag =
1835 					htole32(req->CDB.EEDP32.PrimaryReferenceTag);
1836 				req->CDB.EEDP32.PrimaryApplicationTagMask =
1837 				    0xFFFF;
1838 				req->CDB.CDB32[1] = (req->CDB.CDB32[1] & 0x1F) |
1839 				    0x20;
1840 			} else {
1841 				eedp_flags |=
1842 				    MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
1843 				req->EEDPFlags = htole16(eedp_flags);
1844 				req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
1845 				    0x1F) | 0x20;
1846 			}
1847 		}
1848 	}
1849 
1850 	cm->cm_length = csio->dxfer_len;
1851 	if (cm->cm_length != 0) {
1852 		cm->cm_data = ccb;
1853 		cm->cm_flags |= MPS_CM_FLAGS_USE_CCB;
1854 	} else {
1855 		cm->cm_data = NULL;
1856 	}
1857 	cm->cm_sge = &req->SGL;
1858 	cm->cm_sglsize = (32 - 24) * 4;
1859 	cm->cm_desc.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
1860 	cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle);
1861 	cm->cm_complete = mpssas_scsiio_complete;
1862 	cm->cm_complete_data = ccb;
1863 	cm->cm_targ = targ;
1864 	cm->cm_lun = csio->ccb_h.target_lun;
1865 	cm->cm_ccb = ccb;
1866 
1867 	/*
1868 	 * If HBA is a WD and the command is not for a retry, try to build a
1869 	 * direct I/O message. If failed, or the command is for a retry, send
1870 	 * the I/O to the IR volume itself.
1871 	 */
1872 	if (sc->WD_valid_config) {
1873 		if (ccb->ccb_h.sim_priv.entries[0].field == MPS_WD_RETRY) {
1874 			mpssas_direct_drive_io(sassc, cm, ccb);
1875 		} else {
1876 			mpssas_set_ccbstatus(ccb, CAM_REQ_INPROG);
1877 		}
1878 	}
1879 
1880 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
1881 	if (csio->bio != NULL)
1882 		biotrack(csio->bio, __func__);
1883 #endif
1884 	csio->ccb_h.qos.sim_data = sbinuptime();
1885 	callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
1886 	    mpssas_scsiio_timeout, cm, 0);
1887 
1888 	targ->issued++;
1889 	targ->outstanding++;
1890 	TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1891 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
1892 
1893 	mpssas_log_command(cm, MPS_XINFO, "%s cm %p ccb %p outstanding %u\n",
1894 	    __func__, cm, ccb, targ->outstanding);
1895 
1896 	mps_map_command(sc, cm);
1897 	return;
1898 }
1899 
1900 /**
1901  * mps_sc_failed_io_info - translated non-succesfull SCSI_IO request
1902  */
1903 static void
mps_sc_failed_io_info(struct mps_softc * sc,struct ccb_scsiio * csio,Mpi2SCSIIOReply_t * mpi_reply)1904 mps_sc_failed_io_info(struct mps_softc *sc, struct ccb_scsiio *csio,
1905     Mpi2SCSIIOReply_t *mpi_reply)
1906 {
1907 	u32 response_info;
1908 	u8 *response_bytes;
1909 	u16 ioc_status = le16toh(mpi_reply->IOCStatus) &
1910 	    MPI2_IOCSTATUS_MASK;
1911 	u8 scsi_state = mpi_reply->SCSIState;
1912 	u8 scsi_status = mpi_reply->SCSIStatus;
1913 	u32 log_info = le32toh(mpi_reply->IOCLogInfo);
1914 	const char *desc_ioc_state, *desc_scsi_status;
1915 
1916 	if (log_info == 0x31170000)
1917 		return;
1918 
1919 	desc_ioc_state = mps_describe_table(mps_iocstatus_string,
1920 	    ioc_status);
1921 	desc_scsi_status = mps_describe_table(mps_scsi_status_string,
1922 	    scsi_status);
1923 
1924 	mps_dprint(sc, MPS_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n",
1925 	    le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status);
1926 
1927 	/*
1928 	 *We can add more detail about underflow data here
1929 	 * TO-DO
1930 	 */
1931 	mps_dprint(sc, MPS_XINFO, "\tscsi_status(%s)(0x%02x), "
1932 	    "scsi_state %b\n", desc_scsi_status, scsi_status,
1933 	    scsi_state, "\20" "\1AutosenseValid" "\2AutosenseFailed"
1934 	    "\3NoScsiStatus" "\4Terminated" "\5Response InfoValid");
1935 
1936 	if (sc->mps_debug & MPS_XINFO &&
1937 		scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
1938 		mps_dprint(sc, MPS_XINFO, "-> Sense Buffer Data : Start :\n");
1939 		scsi_sense_print(csio);
1940 		mps_dprint(sc, MPS_XINFO, "-> Sense Buffer Data : End :\n");
1941 	}
1942 
1943 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
1944 		response_info = le32toh(mpi_reply->ResponseInfo);
1945 		response_bytes = (u8 *)&response_info;
1946 		mps_dprint(sc, MPS_XINFO, "response code(0x%1x): %s\n",
1947 		    response_bytes[0],
1948 		    mps_describe_table(mps_scsi_taskmgmt_string,
1949 		    response_bytes[0]));
1950 	}
1951 }
1952 
1953 static void
mpssas_scsiio_complete(struct mps_softc * sc,struct mps_command * cm)1954 mpssas_scsiio_complete(struct mps_softc *sc, struct mps_command *cm)
1955 {
1956 	MPI2_SCSI_IO_REPLY *rep;
1957 	union ccb *ccb;
1958 	struct ccb_scsiio *csio;
1959 	struct mpssas_softc *sassc;
1960 	struct scsi_vpd_supported_page_list *vpd_list = NULL;
1961 	u8 *TLR_bits, TLR_on;
1962 	int dir = 0, i;
1963 	u16 alloc_len;
1964 	struct mpssas_target *target;
1965 	target_id_t target_id;
1966 
1967 	MPS_FUNCTRACE(sc);
1968 	mps_dprint(sc, MPS_TRACE,
1969 	    "cm %p SMID %u ccb %p reply %p outstanding %u\n", cm,
1970 	    cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
1971 	    cm->cm_targ->outstanding);
1972 
1973 	callout_stop(&cm->cm_callout);
1974 	mtx_assert(&sc->mps_mtx, MA_OWNED);
1975 
1976 	sassc = sc->sassc;
1977 	ccb = cm->cm_complete_data;
1978 	csio = &ccb->csio;
1979 	target_id = csio->ccb_h.target_id;
1980 	rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
1981 	/*
1982 	 * XXX KDM if the chain allocation fails, does it matter if we do
1983 	 * the sync and unload here?  It is simpler to do it in every case,
1984 	 * assuming it doesn't cause problems.
1985 	 */
1986 	if (cm->cm_data != NULL) {
1987 		if (cm->cm_flags & MPS_CM_FLAGS_DATAIN)
1988 			dir = BUS_DMASYNC_POSTREAD;
1989 		else if (cm->cm_flags & MPS_CM_FLAGS_DATAOUT)
1990 			dir = BUS_DMASYNC_POSTWRITE;
1991 		bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
1992 		bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
1993 	}
1994 
1995 	cm->cm_targ->completed++;
1996 	cm->cm_targ->outstanding--;
1997 	TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
1998 	ccb->ccb_h.status &= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED);
1999 
2000 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
2001 	if (ccb->csio.bio != NULL)
2002 		biotrack(ccb->csio.bio, __func__);
2003 #endif
2004 
2005 	if (cm->cm_flags & MPS_CM_FLAGS_ON_RECOVERY) {
2006 		TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
2007 		KASSERT(cm->cm_state == MPS_CM_STATE_BUSY,
2008 		    ("Not busy for CM_FLAGS_TIMEDOUT: %u\n", cm->cm_state));
2009 		cm->cm_flags &= ~MPS_CM_FLAGS_ON_RECOVERY;
2010 		if (cm->cm_reply != NULL)
2011 			mpssas_log_command(cm, MPS_RECOVERY,
2012 			    "completed timedout cm %p ccb %p during recovery "
2013 			    "ioc %x scsi %x state %x xfer %u\n",
2014 			    cm, cm->cm_ccb, le16toh(rep->IOCStatus),
2015 			    rep->SCSIStatus, rep->SCSIState,
2016 			    le32toh(rep->TransferCount));
2017 		else
2018 			mpssas_log_command(cm, MPS_RECOVERY,
2019 			    "completed timedout cm %p ccb %p during recovery\n",
2020 			    cm, cm->cm_ccb);
2021 	} else if (cm->cm_targ->tm != NULL) {
2022 		if (cm->cm_reply != NULL)
2023 			mpssas_log_command(cm, MPS_RECOVERY,
2024 			    "completed cm %p ccb %p during recovery "
2025 			    "ioc %x scsi %x state %x xfer %u\n",
2026 			    cm, cm->cm_ccb, le16toh(rep->IOCStatus),
2027 			    rep->SCSIStatus, rep->SCSIState,
2028 			    le32toh(rep->TransferCount));
2029 		else
2030 			mpssas_log_command(cm, MPS_RECOVERY,
2031 			    "completed cm %p ccb %p during recovery\n",
2032 			    cm, cm->cm_ccb);
2033 	} else if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
2034 		mpssas_log_command(cm, MPS_RECOVERY,
2035 		    "reset completed cm %p ccb %p\n",
2036 		    cm, cm->cm_ccb);
2037 	}
2038 
2039 	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2040 		/*
2041 		 * We ran into an error after we tried to map the command,
2042 		 * so we're getting a callback without queueing the command
2043 		 * to the hardware.  So we set the status here, and it will
2044 		 * be retained below.  We'll go through the "fast path",
2045 		 * because there can be no reply when we haven't actually
2046 		 * gone out to the hardware.
2047 		 */
2048 		mpssas_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
2049 
2050 		/*
2051 		 * Currently the only error included in the mask is
2052 		 * MPS_CM_FLAGS_CHAIN_FAILED, which means we're out of
2053 		 * chain frames.  We need to freeze the queue until we get
2054 		 * a command that completed without this error, which will
2055 		 * hopefully have some chain frames attached that we can
2056 		 * use.  If we wanted to get smarter about it, we would
2057 		 * only unfreeze the queue in this condition when we're
2058 		 * sure that we're getting some chain frames back.  That's
2059 		 * probably unnecessary.
2060 		 */
2061 		if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
2062 			xpt_freeze_simq(sassc->sim, 1);
2063 			sassc->flags |= MPSSAS_QUEUE_FROZEN;
2064 			mps_dprint(sc, MPS_XINFO, "Error sending command, "
2065 				   "freezing SIM queue\n");
2066 		}
2067 	}
2068 
2069 	/*
2070 	 * If this is a Start Stop Unit command and it was issued by the driver
2071 	 * during shutdown, decrement the refcount to account for all of the
2072 	 * commands that were sent.  All SSU commands should be completed before
2073 	 * shutdown completes, meaning SSU_refcount will be 0 after SSU_started
2074 	 * is TRUE.
2075 	 */
2076 	if (sc->SSU_started && (csio->cdb_io.cdb_bytes[0] == START_STOP_UNIT)) {
2077 		mps_dprint(sc, MPS_INFO, "Decrementing SSU count.\n");
2078 		sc->SSU_refcount--;
2079 	}
2080 
2081 	/* Take the fast path to completion */
2082 	if (cm->cm_reply == NULL) {
2083 		if (mpssas_get_ccbstatus(ccb) == CAM_REQ_INPROG) {
2084 			if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0)
2085 				mpssas_set_ccbstatus(ccb, CAM_SCSI_BUS_RESET);
2086 			else {
2087 				mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2088 				ccb->csio.scsi_status = SCSI_STATUS_OK;
2089 			}
2090 			if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2091 				ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2092 				sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2093 				mps_dprint(sc, MPS_XINFO,
2094 				    "Unfreezing SIM queue\n");
2095 			}
2096 		}
2097 
2098 		/*
2099 		 * There are two scenarios where the status won't be
2100 		 * CAM_REQ_CMP.  The first is if MPS_CM_FLAGS_ERROR_MASK is
2101 		 * set, the second is in the MPS_FLAGS_DIAGRESET above.
2102 		 */
2103 		if (mpssas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2104 			/*
2105 			 * Freeze the dev queue so that commands are
2106 			 * executed in the correct order after error
2107 			 * recovery.
2108 			 */
2109 			ccb->ccb_h.status |= CAM_DEV_QFRZN;
2110 			xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2111 		}
2112 		mps_free_command(sc, cm);
2113 		xpt_done(ccb);
2114 		return;
2115 	}
2116 
2117 	mpssas_log_command(cm, MPS_XINFO,
2118 	    "ioc %x scsi %x state %x xfer %u\n",
2119 	    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2120 	    le32toh(rep->TransferCount));
2121 
2122 	/*
2123 	 * If this is a Direct Drive I/O, reissue the I/O to the original IR
2124 	 * Volume if an error occurred (normal I/O retry).  Use the original
2125 	 * CCB, but set a flag that this will be a retry so that it's sent to
2126 	 * the original volume.  Free the command but reuse the CCB.
2127 	 */
2128 	if (cm->cm_flags & MPS_CM_FLAGS_DD_IO) {
2129 		mps_free_command(sc, cm);
2130 		ccb->ccb_h.sim_priv.entries[0].field = MPS_WD_RETRY;
2131 		mpssas_action_scsiio(sassc, ccb);
2132 		return;
2133 	} else
2134 		ccb->ccb_h.sim_priv.entries[0].field = 0;
2135 
2136 	switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) {
2137 	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2138 		csio->resid = cm->cm_length - le32toh(rep->TransferCount);
2139 		/* FALLTHROUGH */
2140 	case MPI2_IOCSTATUS_SUCCESS:
2141 	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2142 
2143 		if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
2144 		    MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
2145 			mpssas_log_command(cm, MPS_XINFO, "recovered error\n");
2146 
2147 		/* Completion failed at the transport level. */
2148 		if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
2149 		    MPI2_SCSI_STATE_TERMINATED)) {
2150 			mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2151 			break;
2152 		}
2153 
2154 		/* In a modern packetized environment, an autosense failure
2155 		 * implies that there's not much else that can be done to
2156 		 * recover the command.
2157 		 */
2158 		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
2159 			mpssas_set_ccbstatus(ccb, CAM_AUTOSENSE_FAIL);
2160 			break;
2161 		}
2162 
2163 		/*
2164 		 * CAM doesn't care about SAS Response Info data, but if this is
2165 		 * the state check if TLR should be done.  If not, clear the
2166 		 * TLR_bits for the target.
2167 		 */
2168 		if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
2169 		    ((le32toh(rep->ResponseInfo) &
2170 		    MPI2_SCSI_RI_MASK_REASONCODE) ==
2171 		    MPS_SCSI_RI_INVALID_FRAME)) {
2172 			sc->mapping_table[target_id].TLR_bits =
2173 			    (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2174 		}
2175 
2176 		/*
2177 		 * Intentionally override the normal SCSI status reporting
2178 		 * for these two cases.  These are likely to happen in a
2179 		 * multi-initiator environment, and we want to make sure that
2180 		 * CAM retries these commands rather than fail them.
2181 		 */
2182 		if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
2183 		    (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
2184 			mpssas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2185 			break;
2186 		}
2187 
2188 		/* Handle normal status and sense */
2189 		csio->scsi_status = rep->SCSIStatus;
2190 		if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
2191 			mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2192 		else
2193 			mpssas_set_ccbstatus(ccb, CAM_SCSI_STATUS_ERROR);
2194 
2195 		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2196 			int sense_len, returned_sense_len;
2197 
2198 			returned_sense_len = min(le32toh(rep->SenseCount),
2199 			    sizeof(struct scsi_sense_data));
2200 			if (returned_sense_len < ccb->csio.sense_len)
2201 				ccb->csio.sense_resid = ccb->csio.sense_len -
2202 					returned_sense_len;
2203 			else
2204 				ccb->csio.sense_resid = 0;
2205 
2206 			sense_len = min(returned_sense_len,
2207 			    ccb->csio.sense_len - ccb->csio.sense_resid);
2208 			bzero(&ccb->csio.sense_data,
2209 			      sizeof(ccb->csio.sense_data));
2210 			bcopy(cm->cm_sense, &ccb->csio.sense_data, sense_len);
2211 			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2212 		}
2213 
2214 		/*
2215 		 * Check if this is an INQUIRY command.  If it's a VPD inquiry,
2216 		 * and it's page code 0 (Supported Page List), and there is
2217 		 * inquiry data, and this is for a sequential access device, and
2218 		 * the device is an SSP target, and TLR is supported by the
2219 		 * controller, turn the TLR_bits value ON if page 0x90 is
2220 		 * supported.
2221 		 */
2222 		if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
2223 		    (csio->cdb_io.cdb_bytes[1] & SI_EVPD) &&
2224 		    (csio->cdb_io.cdb_bytes[2] == SVPD_SUPPORTED_PAGE_LIST) &&
2225 		    ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) &&
2226 		    (csio->data_ptr != NULL) &&
2227 		    ((csio->data_ptr[0] & 0x1f) == T_SEQUENTIAL) &&
2228 		    (sc->control_TLR) &&
2229 		    (sc->mapping_table[target_id].device_info &
2230 		    MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
2231 			vpd_list = (struct scsi_vpd_supported_page_list *)
2232 			    csio->data_ptr;
2233 			TLR_bits = &sc->mapping_table[target_id].TLR_bits;
2234 			*TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2235 			TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
2236 			alloc_len = ((u16)csio->cdb_io.cdb_bytes[3] << 8) +
2237 			    csio->cdb_io.cdb_bytes[4];
2238 			alloc_len -= csio->resid;
2239 			for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
2240 				if (vpd_list->list[i] == 0x90) {
2241 					*TLR_bits = TLR_on;
2242 					break;
2243 				}
2244 			}
2245 		}
2246 
2247 		/*
2248 		 * If this is a SATA direct-access end device, mark it so that
2249 		 * a SCSI StartStopUnit command will be sent to it when the
2250 		 * driver is being shutdown.
2251 		 */
2252 		if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
2253 		    ((csio->data_ptr[0] & 0x1f) == T_DIRECT) &&
2254 		    (sc->mapping_table[target_id].device_info &
2255 		    MPI2_SAS_DEVICE_INFO_SATA_DEVICE) &&
2256 		    ((sc->mapping_table[target_id].device_info &
2257 		    MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) ==
2258 		    MPI2_SAS_DEVICE_INFO_END_DEVICE)) {
2259 			target = &sassc->targets[target_id];
2260 			target->supports_SSU = TRUE;
2261 			mps_dprint(sc, MPS_XINFO, "Target %d supports SSU\n",
2262 			    target_id);
2263 		}
2264 		break;
2265 	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2266 	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2267 		/*
2268 		 * If devinfo is 0 this will be a volume.  In that case don't
2269 		 * tell CAM that the volume is not there.  We want volumes to
2270 		 * be enumerated until they are deleted/removed, not just
2271 		 * failed.
2272 		 */
2273 		if (cm->cm_targ->devinfo == 0)
2274 			mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2275 		else
2276 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2277 		break;
2278 	case MPI2_IOCSTATUS_INVALID_SGL:
2279 		mps_print_scsiio_cmd(sc, cm);
2280 		mpssas_set_ccbstatus(ccb, CAM_UNREC_HBA_ERROR);
2281 		break;
2282 	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2283 		/*
2284 		 * This is one of the responses that comes back when an I/O
2285 		 * has been aborted.  If it is because of a timeout that we
2286 		 * initiated, just set the status to CAM_CMD_TIMEOUT.
2287 		 * Otherwise set it to CAM_REQ_ABORTED.  The effect on the
2288 		 * command is the same (it gets retried, subject to the
2289 		 * retry counter), the only difference is what gets printed
2290 		 * on the console.
2291 		 */
2292 		if (cm->cm_flags & MPS_CM_FLAGS_TIMEDOUT)
2293 			mpssas_set_ccbstatus(ccb, CAM_CMD_TIMEOUT);
2294 		else
2295 			mpssas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2296 		break;
2297 	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2298 		/* resid is ignored for this condition */
2299 		csio->resid = 0;
2300 		mpssas_set_ccbstatus(ccb, CAM_DATA_RUN_ERR);
2301 		break;
2302 	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2303 	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2304 		/*
2305 		 * These can sometimes be transient transport-related
2306 		 * errors, and sometimes persistent drive-related errors.
2307 		 * We used to retry these without decrementing the retry
2308 		 * count by returning CAM_REQUEUE_REQ.  Unfortunately, if
2309 		 * we hit a persistent drive problem that returns one of
2310 		 * these error codes, we would retry indefinitely.  So,
2311 		 * return CAM_REQ_CMP_ERROR so that we decrement the retry
2312 		 * count and avoid infinite retries.  We're taking the
2313 		 * potential risk of flagging false failures in the event
2314 		 * of a topology-related error (e.g. a SAS expander problem
2315 		 * causes a command addressed to a drive to fail), but
2316 		 * avoiding getting into an infinite retry loop. However,
2317 		 * if we get them while were moving a device, we should
2318 		 * fail the request as 'not there' because the device
2319 		 * is effectively gone.
2320 		 */
2321 		if (cm->cm_targ->flags & MPSSAS_TARGET_INREMOVAL)
2322 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2323 		else
2324 			mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2325 		mps_dprint(sc, MPS_INFO,
2326 		    "Controller reported %s tgt %u SMID %u loginfo %x%s\n",
2327 		    mps_describe_table(mps_iocstatus_string,
2328 		    le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK),
2329 		    target_id, cm->cm_desc.Default.SMID,
2330 		    le32toh(rep->IOCLogInfo),
2331 		    (cm->cm_targ->flags & MPSSAS_TARGET_INREMOVAL) ? " departing" : "");
2332 		mps_dprint(sc, MPS_XINFO,
2333 		    "SCSIStatus %x SCSIState %x xfercount %u\n",
2334 		    rep->SCSIStatus, rep->SCSIState,
2335 		    le32toh(rep->TransferCount));
2336 		break;
2337 	case MPI2_IOCSTATUS_INVALID_FUNCTION:
2338 	case MPI2_IOCSTATUS_INTERNAL_ERROR:
2339 	case MPI2_IOCSTATUS_INVALID_VPID:
2340 	case MPI2_IOCSTATUS_INVALID_FIELD:
2341 	case MPI2_IOCSTATUS_INVALID_STATE:
2342 	case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2343 	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2344 	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2345 	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2346 	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2347 	default:
2348 		mpssas_log_command(cm, MPS_XINFO,
2349 		    "completed ioc %x loginfo %x scsi %x state %x xfer %u\n",
2350 		    le16toh(rep->IOCStatus), le32toh(rep->IOCLogInfo),
2351 		    rep->SCSIStatus, rep->SCSIState,
2352 		    le32toh(rep->TransferCount));
2353 		csio->resid = cm->cm_length;
2354 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2355 		break;
2356 	}
2357 
2358 	mps_sc_failed_io_info(sc,csio,rep);
2359 
2360 	if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2361 		ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2362 		sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2363 		mps_dprint(sc, MPS_XINFO, "Command completed, "
2364 		    "unfreezing SIM queue\n");
2365 	}
2366 
2367 	if (mpssas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2368 		ccb->ccb_h.status |= CAM_DEV_QFRZN;
2369 		xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2370 	}
2371 
2372 	/*
2373 	 * Check to see if we're removing the device. If so, and this is the
2374 	 * last command on the queue, proceed with the deferred removal of the
2375 	 * device.  Note, for removing a volume, this won't trigger because
2376 	 * pending_remove_tm will be NULL.
2377 	 */
2378 	if (cm->cm_targ->flags & MPSSAS_TARGET_INREMOVAL) {
2379 		if (TAILQ_FIRST(&cm->cm_targ->commands) == NULL &&
2380 		    cm->cm_targ->pending_remove_tm != NULL) {
2381 			mps_dprint(sc, MPS_INFO,
2382 			    "Last pending command complete: starting remove_device target %u handle 0x%04x\n",
2383 			    cm->cm_targ->tid, cm->cm_targ->handle);
2384 			mps_map_command(sc, cm->cm_targ->pending_remove_tm);
2385 			cm->cm_targ->pending_remove_tm = NULL;
2386 		}
2387 	}
2388 
2389 	mps_free_command(sc, cm);
2390 	xpt_done(ccb);
2391 }
2392 
2393 /* All Request reached here are Endian safe */
2394 static void
mpssas_direct_drive_io(struct mpssas_softc * sassc,struct mps_command * cm,union ccb * ccb)2395 mpssas_direct_drive_io(struct mpssas_softc *sassc, struct mps_command *cm,
2396     union ccb *ccb) {
2397 	pMpi2SCSIIORequest_t	pIO_req;
2398 	struct mps_softc	*sc = sassc->sc;
2399 	uint64_t		virtLBA;
2400 	uint32_t		physLBA, stripe_offset, stripe_unit;
2401 	uint32_t		io_size, column;
2402 	uint8_t			*ptrLBA, lba_idx, physLBA_byte, *CDB;
2403 
2404 	/*
2405 	 * If this is a valid SCSI command (Read6, Read10, Read16, Write6,
2406 	 * Write10, or Write16), build a direct I/O message.  Otherwise, the I/O
2407 	 * will be sent to the IR volume itself.  Since Read6 and Write6 are a
2408 	 * bit different than the 10/16 CDBs, handle them separately.
2409 	 */
2410 	pIO_req = (pMpi2SCSIIORequest_t)cm->cm_req;
2411 	CDB = pIO_req->CDB.CDB32;
2412 
2413 	/*
2414 	 * Handle 6 byte CDBs.
2415 	 */
2416 	if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_6) ||
2417 	    (CDB[0] == WRITE_6))) {
2418 		/*
2419 		 * Get the transfer size in blocks.
2420 		 */
2421 		io_size = (cm->cm_length >> sc->DD_block_exponent);
2422 
2423 		/*
2424 		 * Get virtual LBA given in the CDB.
2425 		 */
2426 		virtLBA = ((uint64_t)(CDB[1] & 0x1F) << 16) |
2427 		    ((uint64_t)CDB[2] << 8) | (uint64_t)CDB[3];
2428 
2429 		/*
2430 		 * Check that LBA range for I/O does not exceed volume's
2431 		 * MaxLBA.
2432 		 */
2433 		if ((virtLBA + (uint64_t)io_size - 1) <=
2434 		    sc->DD_max_lba) {
2435 			/*
2436 			 * Check if the I/O crosses a stripe boundary.  If not,
2437 			 * translate the virtual LBA to a physical LBA and set
2438 			 * the DevHandle for the PhysDisk to be used.  If it
2439 			 * does cross a boundary, do normal I/O.  To get the
2440 			 * right DevHandle to use, get the map number for the
2441 			 * column, then use that map number to look up the
2442 			 * DevHandle of the PhysDisk.
2443 			 */
2444 			stripe_offset = (uint32_t)virtLBA &
2445 			    (sc->DD_stripe_size - 1);
2446 			if ((stripe_offset + io_size) <= sc->DD_stripe_size) {
2447 				physLBA = (uint32_t)virtLBA >>
2448 				    sc->DD_stripe_exponent;
2449 				stripe_unit = physLBA / sc->DD_num_phys_disks;
2450 				column = physLBA % sc->DD_num_phys_disks;
2451 				pIO_req->DevHandle =
2452 				    htole16(sc->DD_column_map[column].dev_handle);
2453 				/* ???? Is this endian safe*/
2454 				cm->cm_desc.SCSIIO.DevHandle =
2455 				    pIO_req->DevHandle;
2456 
2457 				physLBA = (stripe_unit <<
2458 				    sc->DD_stripe_exponent) + stripe_offset;
2459 				ptrLBA = &pIO_req->CDB.CDB32[1];
2460 				physLBA_byte = (uint8_t)(physLBA >> 16);
2461 				*ptrLBA = physLBA_byte;
2462 				ptrLBA = &pIO_req->CDB.CDB32[2];
2463 				physLBA_byte = (uint8_t)(physLBA >> 8);
2464 				*ptrLBA = physLBA_byte;
2465 				ptrLBA = &pIO_req->CDB.CDB32[3];
2466 				physLBA_byte = (uint8_t)physLBA;
2467 				*ptrLBA = physLBA_byte;
2468 
2469 				/*
2470 				 * Set flag that Direct Drive I/O is
2471 				 * being done.
2472 				 */
2473 				cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2474 			}
2475 		}
2476 		return;
2477 	}
2478 
2479 	/*
2480 	 * Handle 10, 12 or 16 byte CDBs.
2481 	 */
2482 	if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_10) ||
2483 	    (CDB[0] == WRITE_10) || (CDB[0] == READ_16) ||
2484 	    (CDB[0] == WRITE_16) || (CDB[0] == READ_12) ||
2485 	    (CDB[0] == WRITE_12))) {
2486 		/*
2487 		 * For 16-byte CDB's, verify that the upper 4 bytes of the CDB
2488 		 * are 0.  If not, this is accessing beyond 2TB so handle it in
2489 		 * the else section.  10-byte and 12-byte CDB's are OK.
2490 		 * FreeBSD sends very rare 12 byte READ/WRITE, but driver is
2491 		 * ready to accept 12byte CDB for Direct IOs.
2492 		 */
2493 		if ((CDB[0] == READ_10 || CDB[0] == WRITE_10) ||
2494 		    (CDB[0] == READ_12 || CDB[0] == WRITE_12) ||
2495 		    !(CDB[2] | CDB[3] | CDB[4] | CDB[5])) {
2496 			/*
2497 			 * Get the transfer size in blocks.
2498 			 */
2499 			io_size = (cm->cm_length >> sc->DD_block_exponent);
2500 
2501 			/*
2502 			 * Get virtual LBA.  Point to correct lower 4 bytes of
2503 			 * LBA in the CDB depending on command.
2504 			 */
2505 			lba_idx = ((CDB[0] == READ_12) ||
2506 				(CDB[0] == WRITE_12) ||
2507 				(CDB[0] == READ_10) ||
2508 				(CDB[0] == WRITE_10))? 2 : 6;
2509 			virtLBA = ((uint64_t)CDB[lba_idx] << 24) |
2510 			    ((uint64_t)CDB[lba_idx + 1] << 16) |
2511 			    ((uint64_t)CDB[lba_idx + 2] << 8) |
2512 			    (uint64_t)CDB[lba_idx + 3];
2513 
2514 			/*
2515 			 * Check that LBA range for I/O does not exceed volume's
2516 			 * MaxLBA.
2517 			 */
2518 			if ((virtLBA + (uint64_t)io_size - 1) <=
2519 			    sc->DD_max_lba) {
2520 				/*
2521 				 * Check if the I/O crosses a stripe boundary.
2522 				 * If not, translate the virtual LBA to a
2523 				 * physical LBA and set the DevHandle for the
2524 				 * PhysDisk to be used.  If it does cross a
2525 				 * boundary, do normal I/O.  To get the right
2526 				 * DevHandle to use, get the map number for the
2527 				 * column, then use that map number to look up
2528 				 * the DevHandle of the PhysDisk.
2529 				 */
2530 				stripe_offset = (uint32_t)virtLBA &
2531 				    (sc->DD_stripe_size - 1);
2532 				if ((stripe_offset + io_size) <=
2533 				    sc->DD_stripe_size) {
2534 					physLBA = (uint32_t)virtLBA >>
2535 					    sc->DD_stripe_exponent;
2536 					stripe_unit = physLBA /
2537 					    sc->DD_num_phys_disks;
2538 					column = physLBA %
2539 					    sc->DD_num_phys_disks;
2540 					pIO_req->DevHandle =
2541 					    htole16(sc->DD_column_map[column].
2542 					    dev_handle);
2543 					cm->cm_desc.SCSIIO.DevHandle =
2544 					    pIO_req->DevHandle;
2545 
2546 					physLBA = (stripe_unit <<
2547 					    sc->DD_stripe_exponent) +
2548 					    stripe_offset;
2549 					ptrLBA =
2550 					    &pIO_req->CDB.CDB32[lba_idx];
2551 					physLBA_byte = (uint8_t)(physLBA >> 24);
2552 					*ptrLBA = physLBA_byte;
2553 					ptrLBA =
2554 					    &pIO_req->CDB.CDB32[lba_idx + 1];
2555 					physLBA_byte = (uint8_t)(physLBA >> 16);
2556 					*ptrLBA = physLBA_byte;
2557 					ptrLBA =
2558 					    &pIO_req->CDB.CDB32[lba_idx + 2];
2559 					physLBA_byte = (uint8_t)(physLBA >> 8);
2560 					*ptrLBA = physLBA_byte;
2561 					ptrLBA =
2562 					    &pIO_req->CDB.CDB32[lba_idx + 3];
2563 					physLBA_byte = (uint8_t)physLBA;
2564 					*ptrLBA = physLBA_byte;
2565 
2566 					/*
2567 					 * Set flag that Direct Drive I/O is
2568 					 * being done.
2569 					 */
2570 					cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2571 				}
2572 			}
2573 		} else {
2574 			/*
2575 			 * 16-byte CDB and the upper 4 bytes of the CDB are not
2576 			 * 0.  Get the transfer size in blocks.
2577 			 */
2578 			io_size = (cm->cm_length >> sc->DD_block_exponent);
2579 
2580 			/*
2581 			 * Get virtual LBA.
2582 			 */
2583 			virtLBA = ((uint64_t)CDB[2] << 54) |
2584 			    ((uint64_t)CDB[3] << 48) |
2585 			    ((uint64_t)CDB[4] << 40) |
2586 			    ((uint64_t)CDB[5] << 32) |
2587 			    ((uint64_t)CDB[6] << 24) |
2588 			    ((uint64_t)CDB[7] << 16) |
2589 			    ((uint64_t)CDB[8] << 8) |
2590 			    (uint64_t)CDB[9];
2591 
2592 			/*
2593 			 * Check that LBA range for I/O does not exceed volume's
2594 			 * MaxLBA.
2595 			 */
2596 			if ((virtLBA + (uint64_t)io_size - 1) <=
2597 			    sc->DD_max_lba) {
2598 				/*
2599 				 * Check if the I/O crosses a stripe boundary.
2600 				 * If not, translate the virtual LBA to a
2601 				 * physical LBA and set the DevHandle for the
2602 				 * PhysDisk to be used.  If it does cross a
2603 				 * boundary, do normal I/O.  To get the right
2604 				 * DevHandle to use, get the map number for the
2605 				 * column, then use that map number to look up
2606 				 * the DevHandle of the PhysDisk.
2607 				 */
2608 				stripe_offset = (uint32_t)virtLBA &
2609 				    (sc->DD_stripe_size - 1);
2610 				if ((stripe_offset + io_size) <=
2611 				    sc->DD_stripe_size) {
2612 					physLBA = (uint32_t)(virtLBA >>
2613 					    sc->DD_stripe_exponent);
2614 					stripe_unit = physLBA /
2615 					    sc->DD_num_phys_disks;
2616 					column = physLBA %
2617 					    sc->DD_num_phys_disks;
2618 					pIO_req->DevHandle =
2619 					    htole16(sc->DD_column_map[column].
2620 					    dev_handle);
2621 					cm->cm_desc.SCSIIO.DevHandle =
2622 					    pIO_req->DevHandle;
2623 
2624 					physLBA = (stripe_unit <<
2625 					    sc->DD_stripe_exponent) +
2626 					    stripe_offset;
2627 
2628 					/*
2629 					 * Set upper 4 bytes of LBA to 0.  We
2630 					 * assume that the phys disks are less
2631 					 * than 2 TB's in size.  Then, set the
2632 					 * lower 4 bytes.
2633 					 */
2634 					pIO_req->CDB.CDB32[2] = 0;
2635 					pIO_req->CDB.CDB32[3] = 0;
2636 					pIO_req->CDB.CDB32[4] = 0;
2637 					pIO_req->CDB.CDB32[5] = 0;
2638 					ptrLBA = &pIO_req->CDB.CDB32[6];
2639 					physLBA_byte = (uint8_t)(physLBA >> 24);
2640 					*ptrLBA = physLBA_byte;
2641 					ptrLBA = &pIO_req->CDB.CDB32[7];
2642 					physLBA_byte = (uint8_t)(physLBA >> 16);
2643 					*ptrLBA = physLBA_byte;
2644 					ptrLBA = &pIO_req->CDB.CDB32[8];
2645 					physLBA_byte = (uint8_t)(physLBA >> 8);
2646 					*ptrLBA = physLBA_byte;
2647 					ptrLBA = &pIO_req->CDB.CDB32[9];
2648 					physLBA_byte = (uint8_t)physLBA;
2649 					*ptrLBA = physLBA_byte;
2650 
2651 					/*
2652 					 * Set flag that Direct Drive I/O is
2653 					 * being done.
2654 					 */
2655 					cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2656 				}
2657 			}
2658 		}
2659 	}
2660 }
2661 
2662 static void
mpssas_smpio_complete(struct mps_softc * sc,struct mps_command * cm)2663 mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm)
2664 {
2665 	MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2666 	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2667 	uint64_t sasaddr;
2668 	union ccb *ccb;
2669 
2670 	ccb = cm->cm_complete_data;
2671 
2672 	/*
2673 	 * Currently there should be no way we can hit this case.  It only
2674 	 * happens when we have a failure to allocate chain frames, and SMP
2675 	 * commands require two S/G elements only.  That should be handled
2676 	 * in the standard request size.
2677 	 */
2678 	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2679 		mps_dprint(sc, MPS_ERROR,"%s: cm_flags = %#x on SMP request!\n",
2680 			   __func__, cm->cm_flags);
2681 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2682 		goto bailout;
2683         }
2684 
2685 	rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
2686 	if (rpl == NULL) {
2687 		mps_dprint(sc, MPS_ERROR, "%s: NULL cm_reply!\n", __func__);
2688 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2689 		goto bailout;
2690 	}
2691 
2692 	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2693 	sasaddr = le32toh(req->SASAddress.Low);
2694 	sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
2695 
2696 	if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
2697 	    MPI2_IOCSTATUS_SUCCESS ||
2698 	    rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
2699 		mps_dprint(sc, MPS_XINFO, "%s: IOCStatus %04x SASStatus %02x\n",
2700 		    __func__, le16toh(rpl->IOCStatus), rpl->SASStatus);
2701 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2702 		goto bailout;
2703 	}
2704 
2705 	mps_dprint(sc, MPS_XINFO, "%s: SMP request to SAS address "
2706 		   "%#jx completed successfully\n", __func__,
2707 		   (uintmax_t)sasaddr);
2708 
2709 	if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
2710 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2711 	else
2712 		mpssas_set_ccbstatus(ccb, CAM_SMP_STATUS_ERROR);
2713 
2714 bailout:
2715 	/*
2716 	 * We sync in both directions because we had DMAs in the S/G list
2717 	 * in both directions.
2718 	 */
2719 	bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
2720 			BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2721 	bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2722 	mps_free_command(sc, cm);
2723 	xpt_done(ccb);
2724 }
2725 
2726 static void
mpssas_send_smpcmd(struct mpssas_softc * sassc,union ccb * ccb,uint64_t sasaddr)2727 mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb, uint64_t sasaddr)
2728 {
2729 	struct mps_command *cm;
2730 	uint8_t *request, *response;
2731 	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2732 	struct mps_softc *sc;
2733 	int error;
2734 
2735 	sc = sassc->sc;
2736 	error = 0;
2737 
2738 	/*
2739 	 * XXX We don't yet support physical addresses here.
2740 	 */
2741 	switch ((ccb->ccb_h.flags & CAM_DATA_MASK)) {
2742 	case CAM_DATA_PADDR:
2743 	case CAM_DATA_SG_PADDR:
2744 		mps_dprint(sc, MPS_ERROR,
2745 			   "%s: physical addresses not supported\n", __func__);
2746 		mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2747 		xpt_done(ccb);
2748 		return;
2749 	case CAM_DATA_SG:
2750 		/*
2751 		 * The chip does not support more than one buffer for the
2752 		 * request or response.
2753 		 */
2754 	 	if ((ccb->smpio.smp_request_sglist_cnt > 1)
2755 		  || (ccb->smpio.smp_response_sglist_cnt > 1)) {
2756 			mps_dprint(sc, MPS_ERROR,
2757 				   "%s: multiple request or response "
2758 				   "buffer segments not supported for SMP\n",
2759 				   __func__);
2760 			mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2761 			xpt_done(ccb);
2762 			return;
2763 		}
2764 
2765 		/*
2766 		 * The CAM_SCATTER_VALID flag was originally implemented
2767 		 * for the XPT_SCSI_IO CCB, which only has one data pointer.
2768 		 * We have two.  So, just take that flag to mean that we
2769 		 * might have S/G lists, and look at the S/G segment count
2770 		 * to figure out whether that is the case for each individual
2771 		 * buffer.
2772 		 */
2773 		if (ccb->smpio.smp_request_sglist_cnt != 0) {
2774 			bus_dma_segment_t *req_sg;
2775 
2776 			req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
2777 			request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
2778 		} else
2779 			request = ccb->smpio.smp_request;
2780 
2781 		if (ccb->smpio.smp_response_sglist_cnt != 0) {
2782 			bus_dma_segment_t *rsp_sg;
2783 
2784 			rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
2785 			response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
2786 		} else
2787 			response = ccb->smpio.smp_response;
2788 		break;
2789 	case CAM_DATA_VADDR:
2790 		request = ccb->smpio.smp_request;
2791 		response = ccb->smpio.smp_response;
2792 		break;
2793 	default:
2794 		mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2795 		xpt_done(ccb);
2796 		return;
2797 	}
2798 
2799 	cm = mps_alloc_command(sc);
2800 	if (cm == NULL) {
2801 		mps_dprint(sc, MPS_ERROR,
2802 		    "%s: cannot allocate command\n", __func__);
2803 		mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
2804 		xpt_done(ccb);
2805 		return;
2806 	}
2807 
2808 	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2809 	bzero(req, sizeof(*req));
2810 	req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
2811 
2812 	/* Allow the chip to use any route to this SAS address. */
2813 	req->PhysicalPort = 0xff;
2814 
2815 	req->RequestDataLength = htole16(ccb->smpio.smp_request_len);
2816 	req->SGLFlags =
2817 	    MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
2818 
2819 	mps_dprint(sc, MPS_XINFO, "%s: sending SMP request to SAS "
2820 	    "address %#jx\n", __func__, (uintmax_t)sasaddr);
2821 
2822 	mpi_init_sge(cm, req, &req->SGL);
2823 
2824 	/*
2825 	 * Set up a uio to pass into mps_map_command().  This allows us to
2826 	 * do one map command, and one busdma call in there.
2827 	 */
2828 	cm->cm_uio.uio_iov = cm->cm_iovec;
2829 	cm->cm_uio.uio_iovcnt = 2;
2830 	cm->cm_uio.uio_segflg = UIO_SYSSPACE;
2831 
2832 	/*
2833 	 * The read/write flag isn't used by busdma, but set it just in
2834 	 * case.  This isn't exactly accurate, either, since we're going in
2835 	 * both directions.
2836 	 */
2837 	cm->cm_uio.uio_rw = UIO_WRITE;
2838 
2839 	cm->cm_iovec[0].iov_base = request;
2840 	cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength);
2841 	cm->cm_iovec[1].iov_base = response;
2842 	cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
2843 
2844 	cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
2845 			       cm->cm_iovec[1].iov_len;
2846 
2847 	/*
2848 	 * Trigger a warning message in mps_data_cb() for the user if we
2849 	 * wind up exceeding two S/G segments.  The chip expects one
2850 	 * segment for the request and another for the response.
2851 	 */
2852 	cm->cm_max_segs = 2;
2853 
2854 	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
2855 	cm->cm_complete = mpssas_smpio_complete;
2856 	cm->cm_complete_data = ccb;
2857 
2858 	/*
2859 	 * Tell the mapping code that we're using a uio, and that this is
2860 	 * an SMP passthrough request.  There is a little special-case
2861 	 * logic there (in mps_data_cb()) to handle the bidirectional
2862 	 * transfer.
2863 	 */
2864 	cm->cm_flags |= MPS_CM_FLAGS_USE_UIO | MPS_CM_FLAGS_SMP_PASS |
2865 			MPS_CM_FLAGS_DATAIN | MPS_CM_FLAGS_DATAOUT;
2866 
2867 	/* The chip data format is little endian. */
2868 	req->SASAddress.High = htole32(sasaddr >> 32);
2869 	req->SASAddress.Low = htole32(sasaddr);
2870 
2871 	/*
2872 	 * XXX Note that we don't have a timeout/abort mechanism here.
2873 	 * From the manual, it looks like task management requests only
2874 	 * work for SCSI IO and SATA passthrough requests.  We may need to
2875 	 * have a mechanism to retry requests in the event of a chip reset
2876 	 * at least.  Hopefully the chip will insure that any errors short
2877 	 * of that are relayed back to the driver.
2878 	 */
2879 	error = mps_map_command(sc, cm);
2880 	if ((error != 0) && (error != EINPROGRESS)) {
2881 		mps_dprint(sc, MPS_ERROR,
2882 			   "%s: error %d returned from mps_map_command()\n",
2883 			   __func__, error);
2884 		goto bailout_error;
2885 	}
2886 
2887 	return;
2888 
2889 bailout_error:
2890 	mps_free_command(sc, cm);
2891 	mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
2892 	xpt_done(ccb);
2893 	return;
2894 
2895 }
2896 
2897 static void
mpssas_action_smpio(struct mpssas_softc * sassc,union ccb * ccb)2898 mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb)
2899 {
2900 	struct mps_softc *sc;
2901 	struct mpssas_target *targ;
2902 	uint64_t sasaddr = 0;
2903 
2904 	sc = sassc->sc;
2905 
2906 	/*
2907 	 * Make sure the target exists.
2908 	 */
2909 	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
2910 	    ("Target %d out of bounds in XPT_SMP_IO\n", ccb->ccb_h.target_id));
2911 	targ = &sassc->targets[ccb->ccb_h.target_id];
2912 	if (targ->handle == 0x0) {
2913 		mps_dprint(sc, MPS_ERROR,
2914 			   "%s: target %d does not exist!\n", __func__,
2915 			   ccb->ccb_h.target_id);
2916 		mpssas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
2917 		xpt_done(ccb);
2918 		return;
2919 	}
2920 
2921 	/*
2922 	 * If this device has an embedded SMP target, we'll talk to it
2923 	 * directly.
2924 	 * figure out what the expander's address is.
2925 	 */
2926 	if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
2927 		sasaddr = targ->sasaddr;
2928 
2929 	/*
2930 	 * If we don't have a SAS address for the expander yet, try
2931 	 * grabbing it from the page 0x83 information cached in the
2932 	 * transport layer for this target.  LSI expanders report the
2933 	 * expander SAS address as the port-associated SAS address in
2934 	 * Inquiry VPD page 0x83.  Maxim expanders don't report it in page
2935 	 * 0x83.
2936 	 *
2937 	 * XXX KDM disable this for now, but leave it commented out so that
2938 	 * it is obvious that this is another possible way to get the SAS
2939 	 * address.
2940 	 *
2941 	 * The parent handle method below is a little more reliable, and
2942 	 * the other benefit is that it works for devices other than SES
2943 	 * devices.  So you can send a SMP request to a da(4) device and it
2944 	 * will get routed to the expander that device is attached to.
2945 	 * (Assuming the da(4) device doesn't contain an SMP target...)
2946 	 */
2947 #if 0
2948 	if (sasaddr == 0)
2949 		sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
2950 #endif
2951 
2952 	/*
2953 	 * If we still don't have a SAS address for the expander, look for
2954 	 * the parent device of this device, which is probably the expander.
2955 	 */
2956 	if (sasaddr == 0) {
2957 #ifdef OLD_MPS_PROBE
2958 		struct mpssas_target *parent_target;
2959 #endif
2960 
2961 		if (targ->parent_handle == 0x0) {
2962 			mps_dprint(sc, MPS_ERROR,
2963 				   "%s: handle %d does not have a valid "
2964 				   "parent handle!\n", __func__, targ->handle);
2965 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2966 			goto bailout;
2967 		}
2968 #ifdef OLD_MPS_PROBE
2969 		parent_target = mpssas_find_target_by_handle(sassc, 0,
2970 			targ->parent_handle);
2971 
2972 		if (parent_target == NULL) {
2973 			mps_dprint(sc, MPS_ERROR,
2974 				   "%s: handle %d does not have a valid "
2975 				   "parent target!\n", __func__, targ->handle);
2976 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2977 			goto bailout;
2978 		}
2979 
2980 		if ((parent_target->devinfo &
2981 		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
2982 			mps_dprint(sc, MPS_ERROR,
2983 				   "%s: handle %d parent %d does not "
2984 				   "have an SMP target!\n", __func__,
2985 				   targ->handle, parent_target->handle);
2986 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2987 			goto bailout;
2988 		}
2989 
2990 		sasaddr = parent_target->sasaddr;
2991 #else /* OLD_MPS_PROBE */
2992 		if ((targ->parent_devinfo &
2993 		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
2994 			mps_dprint(sc, MPS_ERROR,
2995 				   "%s: handle %d parent %d does not "
2996 				   "have an SMP target!\n", __func__,
2997 				   targ->handle, targ->parent_handle);
2998 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2999 			goto bailout;
3000 		}
3001 		if (targ->parent_sasaddr == 0x0) {
3002 			mps_dprint(sc, MPS_ERROR,
3003 				   "%s: handle %d parent handle %d does "
3004 				   "not have a valid SAS address!\n",
3005 				   __func__, targ->handle, targ->parent_handle);
3006 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3007 			goto bailout;
3008 		}
3009 
3010 		sasaddr = targ->parent_sasaddr;
3011 #endif /* OLD_MPS_PROBE */
3012 	}
3013 
3014 	if (sasaddr == 0) {
3015 		mps_dprint(sc, MPS_INFO,
3016 			   "%s: unable to find SAS address for handle %d\n",
3017 			   __func__, targ->handle);
3018 		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3019 		goto bailout;
3020 	}
3021 	mpssas_send_smpcmd(sassc, ccb, sasaddr);
3022 
3023 	return;
3024 
3025 bailout:
3026 	xpt_done(ccb);
3027 
3028 }
3029 
3030 static void
mpssas_action_resetdev(struct mpssas_softc * sassc,union ccb * ccb)3031 mpssas_action_resetdev(struct mpssas_softc *sassc, union ccb *ccb)
3032 {
3033 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3034 	struct mps_softc *sc;
3035 	struct mps_command *tm;
3036 	struct mpssas_target *targ;
3037 
3038 	MPS_FUNCTRACE(sassc->sc);
3039 	mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
3040 
3041 	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
3042 	    ("Target %d out of bounds in XPT_RESET_DEV\n",
3043 	     ccb->ccb_h.target_id));
3044 	sc = sassc->sc;
3045 	tm = mpssas_alloc_tm(sc);
3046 	if (tm == NULL) {
3047 		mps_dprint(sc, MPS_ERROR,
3048 		    "command alloc failure in mpssas_action_resetdev\n");
3049 		mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3050 		xpt_done(ccb);
3051 		return;
3052 	}
3053 
3054 	targ = &sassc->targets[ccb->ccb_h.target_id];
3055 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3056 	req->DevHandle = htole16(targ->handle);
3057 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3058 
3059 	/* SAS Hard Link Reset / SATA Link Reset */
3060 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3061 
3062 	tm->cm_data = NULL;
3063 	tm->cm_complete = mpssas_resetdev_complete;
3064 	tm->cm_complete_data = ccb;
3065 	tm->cm_targ = targ;
3066 
3067 	mpssas_prepare_for_tm(sc, tm, targ, CAM_LUN_WILDCARD);
3068 	mps_map_command(sc, tm);
3069 }
3070 
3071 static void
mpssas_resetdev_complete(struct mps_softc * sc,struct mps_command * tm)3072 mpssas_resetdev_complete(struct mps_softc *sc, struct mps_command *tm)
3073 {
3074 	MPI2_SCSI_TASK_MANAGE_REPLY *resp;
3075 	union ccb *ccb;
3076 
3077 	MPS_FUNCTRACE(sc);
3078 	mtx_assert(&sc->mps_mtx, MA_OWNED);
3079 
3080 	resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
3081 	ccb = tm->cm_complete_data;
3082 
3083 	/*
3084 	 * Currently there should be no way we can hit this case.  It only
3085 	 * happens when we have a failure to allocate chain frames, and
3086 	 * task management commands don't have S/G lists.
3087 	 */
3088 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3089 		MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3090 
3091 		req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3092 
3093 		mps_dprint(sc, MPS_ERROR,
3094 			   "%s: cm_flags = %#x for reset of handle %#04x! "
3095 			   "This should not happen!\n", __func__, tm->cm_flags,
3096 			   req->DevHandle);
3097 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3098 		goto bailout;
3099 	}
3100 
3101 	mps_dprint(sc, MPS_XINFO,
3102 	    "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n", __func__,
3103 	    le16toh(resp->IOCStatus), le32toh(resp->ResponseCode));
3104 
3105 	if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
3106 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
3107 		mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
3108 		    CAM_LUN_WILDCARD);
3109 	}
3110 	else
3111 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3112 
3113 bailout:
3114 
3115 	mpssas_free_tm(sc, tm);
3116 	xpt_done(ccb);
3117 }
3118 
3119 static void
mpssas_poll(struct cam_sim * sim)3120 mpssas_poll(struct cam_sim *sim)
3121 {
3122 	struct mpssas_softc *sassc;
3123 
3124 	sassc = cam_sim_softc(sim);
3125 
3126 	if (sassc->sc->mps_debug & MPS_TRACE) {
3127 		/* frequent debug messages during a panic just slow
3128 		 * everything down too much.
3129 		 */
3130 		mps_printf(sassc->sc, "%s clearing MPS_TRACE\n", __func__);
3131 		sassc->sc->mps_debug &= ~MPS_TRACE;
3132 	}
3133 
3134 	mps_intr_locked(sassc->sc);
3135 }
3136 
3137 static void
mpssas_async(void * callback_arg,uint32_t code,struct cam_path * path,void * arg)3138 mpssas_async(void *callback_arg, uint32_t code, struct cam_path *path,
3139 	     void *arg)
3140 {
3141 	struct mps_softc *sc;
3142 
3143 	sc = (struct mps_softc *)callback_arg;
3144 
3145 	mps_lock(sc);
3146 	switch (code) {
3147 	case AC_ADVINFO_CHANGED: {
3148 		struct mpssas_target *target;
3149 		struct mpssas_softc *sassc;
3150 		struct scsi_read_capacity_data_long rcap_buf;
3151 		struct ccb_dev_advinfo cdai;
3152 		struct mpssas_lun *lun;
3153 		lun_id_t lunid;
3154 		int found_lun;
3155 		uintptr_t buftype;
3156 
3157 		buftype = (uintptr_t)arg;
3158 
3159 		found_lun = 0;
3160 		sassc = sc->sassc;
3161 
3162 		/*
3163 		 * We're only interested in read capacity data changes.
3164 		 */
3165 		if (buftype != CDAI_TYPE_RCAPLONG)
3166 			break;
3167 
3168 		/*
3169 		 * We should have a handle for this, but check to make sure.
3170 		 */
3171 		KASSERT(xpt_path_target_id(path) < sassc->maxtargets,
3172 		    ("Target %d out of bounds in mpssas_async\n",
3173 		    xpt_path_target_id(path)));
3174 		target = &sassc->targets[xpt_path_target_id(path)];
3175 		if (target->handle == 0)
3176 			break;
3177 
3178 		lunid = xpt_path_lun_id(path);
3179 
3180 		SLIST_FOREACH(lun, &target->luns, lun_link) {
3181 			if (lun->lun_id == lunid) {
3182 				found_lun = 1;
3183 				break;
3184 			}
3185 		}
3186 
3187 		if (found_lun == 0) {
3188 			lun = malloc(sizeof(struct mpssas_lun), M_MPT2,
3189 				     M_NOWAIT | M_ZERO);
3190 			if (lun == NULL) {
3191 				mps_dprint(sc, MPS_ERROR, "Unable to alloc "
3192 					   "LUN for EEDP support.\n");
3193 				break;
3194 			}
3195 			lun->lun_id = lunid;
3196 			SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3197 		}
3198 
3199 		bzero(&rcap_buf, sizeof(rcap_buf));
3200 		bzero(&cdai, sizeof(cdai));
3201 		xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
3202 		cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
3203 		cdai.ccb_h.flags = CAM_DIR_IN;
3204 		cdai.buftype = CDAI_TYPE_RCAPLONG;
3205 		cdai.flags = CDAI_FLAG_NONE;
3206 		cdai.bufsiz = sizeof(rcap_buf);
3207 		cdai.buf = (uint8_t *)&rcap_buf;
3208 		xpt_action((union ccb *)&cdai);
3209 		if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
3210 			cam_release_devq(cdai.ccb_h.path,
3211 					 0, 0, 0, FALSE);
3212 
3213 		if ((mpssas_get_ccbstatus((union ccb *)&cdai) == CAM_REQ_CMP)
3214 		 && (rcap_buf.prot & SRC16_PROT_EN)) {
3215 			switch (rcap_buf.prot & SRC16_P_TYPE) {
3216 			case SRC16_PTYPE_1:
3217 			case SRC16_PTYPE_3:
3218 				lun->eedp_formatted = TRUE;
3219 				lun->eedp_block_size =
3220 				    scsi_4btoul(rcap_buf.length);
3221 				break;
3222 			case SRC16_PTYPE_2:
3223 			default:
3224 				lun->eedp_formatted = FALSE;
3225 				lun->eedp_block_size = 0;
3226 				break;
3227 			}
3228 		} else {
3229 			lun->eedp_formatted = FALSE;
3230 			lun->eedp_block_size = 0;
3231 		}
3232 		break;
3233 	}
3234 	default:
3235 		break;
3236 	}
3237 	mps_unlock(sc);
3238 }
3239 
3240 /*
3241  * Freeze the devq and set the INRESET flag so that no I/O will be sent to
3242  * the target until the reset has completed.  The CCB holds the path which
3243  * is used to release the devq.  The devq is released and the CCB is freed
3244  * when the TM completes.
3245  * We only need to do this when we're entering reset, not at each time we
3246  * need to send an abort (which will happen if multiple commands timeout
3247  * while we're sending the abort). We do not release the queue for each
3248  * command we complete (just at the end when we free the tm), so freezing
3249  * it each time doesn't make sense.
3250  */
3251 void
mpssas_prepare_for_tm(struct mps_softc * sc,struct mps_command * tm,struct mpssas_target * target,lun_id_t lun_id)3252 mpssas_prepare_for_tm(struct mps_softc *sc, struct mps_command *tm,
3253     struct mpssas_target *target, lun_id_t lun_id)
3254 {
3255 	union ccb *ccb;
3256 	path_id_t path_id;
3257 
3258 	ccb = xpt_alloc_ccb_nowait();
3259 	if (ccb) {
3260 		path_id = cam_sim_path(sc->sassc->sim);
3261 		if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, path_id,
3262 		    target->tid, lun_id) != CAM_REQ_CMP) {
3263 			xpt_free_ccb(ccb);
3264 		} else {
3265 			tm->cm_ccb = ccb;
3266 			tm->cm_targ = target;
3267 			if ((target->flags & MPSSAS_TARGET_INRESET) == 0) {
3268 				mps_dprint(sc, MPS_XINFO | MPS_RECOVERY,
3269 				    "%s: Freezing devq for target ID %d\n",
3270 				    __func__, target->tid);
3271 				xpt_freeze_devq(ccb->ccb_h.path, 1);
3272 				target->flags |= MPSSAS_TARGET_INRESET;
3273 			}
3274 		}
3275 	}
3276 }
3277 
3278 int
mpssas_startup(struct mps_softc * sc)3279 mpssas_startup(struct mps_softc *sc)
3280 {
3281 
3282 	/*
3283 	 * Send the port enable message and set the wait_for_port_enable flag.
3284 	 * This flag helps to keep the simq frozen until all discovery events
3285 	 * are processed.
3286 	 */
3287 	sc->wait_for_port_enable = 1;
3288 	mpssas_send_portenable(sc);
3289 	return (0);
3290 }
3291 
3292 static int
mpssas_send_portenable(struct mps_softc * sc)3293 mpssas_send_portenable(struct mps_softc *sc)
3294 {
3295 	MPI2_PORT_ENABLE_REQUEST *request;
3296 	struct mps_command *cm;
3297 
3298 	MPS_FUNCTRACE(sc);
3299 
3300 	if ((cm = mps_alloc_command(sc)) == NULL)
3301 		return (EBUSY);
3302 	request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3303 	request->Function = MPI2_FUNCTION_PORT_ENABLE;
3304 	request->MsgFlags = 0;
3305 	request->VP_ID = 0;
3306 	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3307 	cm->cm_complete = mpssas_portenable_complete;
3308 	cm->cm_data = NULL;
3309 	cm->cm_sge = NULL;
3310 
3311 	mps_map_command(sc, cm);
3312 	mps_dprint(sc, MPS_XINFO,
3313 	    "mps_send_portenable finished cm %p req %p complete %p\n",
3314 	    cm, cm->cm_req, cm->cm_complete);
3315 	return (0);
3316 }
3317 
3318 static void
mpssas_portenable_complete(struct mps_softc * sc,struct mps_command * cm)3319 mpssas_portenable_complete(struct mps_softc *sc, struct mps_command *cm)
3320 {
3321 	MPI2_PORT_ENABLE_REPLY *reply;
3322 	struct mpssas_softc *sassc;
3323 
3324 	MPS_FUNCTRACE(sc);
3325 	sassc = sc->sassc;
3326 
3327 	/*
3328 	 * Currently there should be no way we can hit this case.  It only
3329 	 * happens when we have a failure to allocate chain frames, and
3330 	 * port enable commands don't have S/G lists.
3331 	 */
3332 	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3333 		mps_dprint(sc, MPS_ERROR, "%s: cm_flags = %#x for port enable! "
3334 			   "This should not happen!\n", __func__, cm->cm_flags);
3335 	}
3336 
3337 	reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3338 	if (reply == NULL)
3339 		mps_dprint(sc, MPS_FAULT, "Portenable NULL reply\n");
3340 	else if (le16toh(reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
3341 	    MPI2_IOCSTATUS_SUCCESS)
3342 		mps_dprint(sc, MPS_FAULT, "Portenable failed\n");
3343 
3344 	mps_free_command(sc, cm);
3345 
3346 	/*
3347 	 * Get WarpDrive info after discovery is complete but before the scan
3348 	 * starts.  At this point, all devices are ready to be exposed to the
3349 	 * OS.  If devices should be hidden instead, take them out of the
3350 	 * 'targets' array before the scan.  The devinfo for a disk will have
3351 	 * some info and a volume's will be 0.  Use that to remove disks.
3352 	 */
3353 	mps_wd_config_pages(sc);
3354 
3355 	/*
3356 	 * Done waiting for port enable to complete.  Decrement the refcount.
3357 	 * If refcount is 0, discovery is complete and a rescan of the bus can
3358 	 * take place.  Since the simq was explicitly frozen before port
3359 	 * enable, it must be explicitly released here to keep the
3360 	 * freeze/release count in sync.
3361 	 */
3362 	sc->wait_for_port_enable = 0;
3363 	sc->port_enable_complete = 1;
3364 	wakeup(&sc->port_enable_complete);
3365 	mpssas_startup_decrement(sassc);
3366 }
3367 
3368 int
mpssas_check_id(struct mpssas_softc * sassc,int id)3369 mpssas_check_id(struct mpssas_softc *sassc, int id)
3370 {
3371 	struct mps_softc *sc = sassc->sc;
3372 	char *ids;
3373 	char *name;
3374 
3375 	ids = &sc->exclude_ids[0];
3376 	while((name = strsep(&ids, ",")) != NULL) {
3377 		if (name[0] == '\0')
3378 			continue;
3379 		if (strtol(name, NULL, 0) == (long)id)
3380 			return (1);
3381 	}
3382 
3383 	return (0);
3384 }
3385 
3386 void
mpssas_realloc_targets(struct mps_softc * sc,int maxtargets)3387 mpssas_realloc_targets(struct mps_softc *sc, int maxtargets)
3388 {
3389 	struct mpssas_softc *sassc;
3390 	struct mpssas_lun *lun, *lun_tmp;
3391 	struct mpssas_target *targ;
3392 	int i;
3393 
3394 	sassc = sc->sassc;
3395 	/*
3396 	 * The number of targets is based on IOC Facts, so free all of
3397 	 * the allocated LUNs for each target and then the target buffer
3398 	 * itself.
3399 	 */
3400 	for (i=0; i< maxtargets; i++) {
3401 		targ = &sassc->targets[i];
3402 		SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
3403 			free(lun, M_MPT2);
3404 		}
3405 	}
3406 	free(sassc->targets, M_MPT2);
3407 
3408 	sassc->targets = malloc(sizeof(struct mpssas_target) * maxtargets,
3409 	    M_MPT2, M_WAITOK|M_ZERO);
3410 }
3411