xref: /freebsd/sys/dev/mps/mps_sas.c (revision e453e498cbb88570a3ff7b3679de65c88707da95)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2009 Yahoo! Inc.
5  * Copyright (c) 2011-2015 LSI Corp.
6  * Copyright (c) 2013-2015 Avago Technologies
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  *
30  * Avago Technologies (LSI) MPT-Fusion Host Adapter FreeBSD
31  */
32 
33 #include <sys/cdefs.h>
34 /* Communications core for Avago Technologies (LSI) MPT2 */
35 
36 /* TODO Move headers to mpsvar */
37 #include <sys/types.h>
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/kernel.h>
41 #include <sys/selinfo.h>
42 #include <sys/module.h>
43 #include <sys/bus.h>
44 #include <sys/conf.h>
45 #include <sys/bio.h>
46 #include <sys/malloc.h>
47 #include <sys/uio.h>
48 #include <sys/sysctl.h>
49 #include <sys/endian.h>
50 #include <sys/queue.h>
51 #include <sys/kthread.h>
52 #include <sys/taskqueue.h>
53 #include <sys/sbuf.h>
54 #include <sys/stdarg.h>
55 
56 #include <machine/bus.h>
57 #include <machine/resource.h>
58 #include <sys/rman.h>
59 
60 #include <cam/cam.h>
61 #include <cam/cam_ccb.h>
62 #include <cam/cam_xpt.h>
63 #include <cam/cam_debug.h>
64 #include <cam/cam_sim.h>
65 #include <cam/cam_xpt_sim.h>
66 #include <cam/cam_xpt_periph.h>
67 #include <cam/cam_periph.h>
68 #include <cam/scsi/scsi_all.h>
69 #include <cam/scsi/scsi_message.h>
70 #include <cam/scsi/smp_all.h>
71 
72 #include <dev/mps/mpi/mpi2_type.h>
73 #include <dev/mps/mpi/mpi2.h>
74 #include <dev/mps/mpi/mpi2_ioc.h>
75 #include <dev/mps/mpi/mpi2_sas.h>
76 #include <dev/mps/mpi/mpi2_cnfg.h>
77 #include <dev/mps/mpi/mpi2_init.h>
78 #include <dev/mps/mpi/mpi2_tool.h>
79 #include <dev/mps/mps_ioctl.h>
80 #include <dev/mps/mpsvar.h>
81 #include <dev/mps/mps_table.h>
82 #include <dev/mps/mps_sas.h>
83 
84 /*
85  * static array to check SCSI OpCode for EEDP protection bits
86  */
87 #define	PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
88 #define	PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
89 #define	PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
90 static uint8_t op_code_prot[256] = {
91 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
92 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
93 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
94 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
95 	0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
96 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
97 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
98 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
99 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
100 	0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
101 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
102 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
103 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
104 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
105 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
106 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
107 };
108 
109 MALLOC_DEFINE(M_MPSSAS, "MPSSAS", "MPS SAS memory");
110 
111 static void mpssas_remove_device(struct mps_softc *, struct mps_command *);
112 static void mpssas_remove_complete(struct mps_softc *, struct mps_command *);
113 static void mpssas_action(struct cam_sim *sim, union ccb *ccb);
114 static void mpssas_poll(struct cam_sim *sim);
115 static int mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm,
116     struct mps_command *cm);
117 static void mpssas_scsiio_timeout(void *data);
118 static void mpssas_abort_complete(struct mps_softc *sc, struct mps_command *cm);
119 static void mpssas_direct_drive_io(struct mpssas_softc *sassc,
120     struct mps_command *cm, union ccb *ccb);
121 static void mpssas_action_scsiio(struct mpssas_softc *, union ccb *);
122 static void mpssas_scsiio_complete(struct mps_softc *, struct mps_command *);
123 static void mpssas_action_resetdev(struct mpssas_softc *, union ccb *);
124 static void mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm);
125 static void mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb,
126 			       uint64_t sasaddr);
127 static void mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb);
128 static void mpssas_resetdev_complete(struct mps_softc *, struct mps_command *);
129 static void mpssas_async(void *callback_arg, uint32_t code,
130 			 struct cam_path *path, void *arg);
131 static int mpssas_send_portenable(struct mps_softc *sc);
132 static void mpssas_portenable_complete(struct mps_softc *sc,
133     struct mps_command *cm);
134 
135 struct mpssas_target *
mpssas_find_target_by_handle(struct mpssas_softc * sassc,int start,uint16_t handle)136 mpssas_find_target_by_handle(struct mpssas_softc *sassc, int start, uint16_t handle)
137 {
138 	struct mpssas_target *target;
139 	int i;
140 
141 	for (i = start; i < sassc->maxtargets; i++) {
142 		target = &sassc->targets[i];
143 		if (target->handle == handle)
144 			return (target);
145 	}
146 
147 	return (NULL);
148 }
149 
150 /* we need to freeze the simq during attach and diag reset, to avoid failing
151  * commands before device handles have been found by discovery.  Since
152  * discovery involves reading config pages and possibly sending commands,
153  * discovery actions may continue even after we receive the end of discovery
154  * event, so refcount discovery actions instead of assuming we can unfreeze
155  * the simq when we get the event.
156  */
157 void
mpssas_startup_increment(struct mpssas_softc * sassc)158 mpssas_startup_increment(struct mpssas_softc *sassc)
159 {
160 	MPS_FUNCTRACE(sassc->sc);
161 
162 	if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
163 		if (sassc->startup_refcount++ == 0) {
164 			/* just starting, freeze the simq */
165 			mps_dprint(sassc->sc, MPS_INIT,
166 			    "%s freezing simq\n", __func__);
167 			xpt_hold_boot();
168 			xpt_freeze_simq(sassc->sim, 1);
169 		}
170 		mps_dprint(sassc->sc, MPS_INIT, "%s refcount %u\n", __func__,
171 		    sassc->startup_refcount);
172 	}
173 }
174 
175 void
mpssas_release_simq_reinit(struct mpssas_softc * sassc)176 mpssas_release_simq_reinit(struct mpssas_softc *sassc)
177 {
178 	if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
179 		sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
180 		xpt_release_simq(sassc->sim, 1);
181 		mps_dprint(sassc->sc, MPS_INFO, "Unfreezing SIM queue\n");
182 	}
183 }
184 
185 void
mpssas_startup_decrement(struct mpssas_softc * sassc)186 mpssas_startup_decrement(struct mpssas_softc *sassc)
187 {
188 	MPS_FUNCTRACE(sassc->sc);
189 
190 	if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
191 		if (--sassc->startup_refcount == 0) {
192 			/* finished all discovery-related actions, release
193 			 * the simq and rescan for the latest topology.
194 			 */
195 			mps_dprint(sassc->sc, MPS_INIT,
196 			    "%s releasing simq\n", __func__);
197 			sassc->flags &= ~MPSSAS_IN_STARTUP;
198 			xpt_release_simq(sassc->sim, 1);
199 			xpt_release_boot();
200 		}
201 		mps_dprint(sassc->sc, MPS_INIT, "%s refcount %u\n", __func__,
202 		    sassc->startup_refcount);
203 	}
204 }
205 
206 /*
207  * The firmware requires us to stop sending commands when we're doing task
208  * management.
209  * XXX The logic for serializing the device has been made lazy and moved to
210  * mpssas_prepare_for_tm().
211  */
212 struct mps_command *
mpssas_alloc_tm(struct mps_softc * sc)213 mpssas_alloc_tm(struct mps_softc *sc)
214 {
215 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
216 	struct mps_command *tm;
217 
218 	tm = mps_alloc_high_priority_command(sc);
219 	if (tm == NULL)
220 		return (NULL);
221 
222 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
223 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
224 	return tm;
225 }
226 
227 void
mpssas_free_tm(struct mps_softc * sc,struct mps_command * tm)228 mpssas_free_tm(struct mps_softc *sc, struct mps_command *tm)
229 {
230 	if (tm == NULL)
231 		return;
232 
233 	/*
234 	 * For TM's the devq is frozen for the device.  Unfreeze it here and
235 	 * free the resources used for freezing the devq.  Must clear the
236 	 * INRESET flag as well or scsi I/O will not work.
237 	 */
238 	if (tm->cm_ccb) {
239 		mps_dprint(sc, MPS_XINFO | MPS_RECOVERY,
240 		    "Unfreezing devq for target ID %d\n",
241 		    tm->cm_targ->tid);
242 		tm->cm_targ->flags &= ~MPSSAS_TARGET_INRESET;
243 		xpt_release_devq(tm->cm_ccb->ccb_h.path, 1, TRUE);
244 		xpt_free_path(tm->cm_ccb->ccb_h.path);
245 		xpt_free_ccb(tm->cm_ccb);
246 	}
247 
248 	mps_free_high_priority_command(sc, tm);
249 }
250 
251 void
mpssas_rescan_target(struct mps_softc * sc,struct mpssas_target * targ)252 mpssas_rescan_target(struct mps_softc *sc, struct mpssas_target *targ)
253 {
254 	struct mpssas_softc *sassc = sc->sassc;
255 	path_id_t pathid;
256 	target_id_t targetid;
257 	union ccb *ccb;
258 
259 	MPS_FUNCTRACE(sc);
260 	pathid = cam_sim_path(sassc->sim);
261 	if (targ == NULL)
262 		targetid = CAM_TARGET_WILDCARD;
263 	else
264 		targetid = targ - sassc->targets;
265 
266 	/*
267 	 * Allocate a CCB and schedule a rescan.
268 	 */
269 	ccb = xpt_alloc_ccb_nowait();
270 	if (ccb == NULL) {
271 		mps_dprint(sc, MPS_ERROR, "unable to alloc CCB for rescan\n");
272 		return;
273 	}
274 
275 	if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid,
276 	    targetid, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
277 		mps_dprint(sc, MPS_ERROR, "unable to create path for rescan\n");
278 		xpt_free_ccb(ccb);
279 		return;
280 	}
281 
282 	if (targetid == CAM_TARGET_WILDCARD)
283 		ccb->ccb_h.func_code = XPT_SCAN_BUS;
284 	else
285 		ccb->ccb_h.func_code = XPT_SCAN_TGT;
286 
287 	mps_dprint(sc, MPS_TRACE, "%s targetid %u\n", __func__, targetid);
288 	xpt_rescan(ccb);
289 }
290 
291 static void
mpssas_log_command(struct mps_command * cm,u_int level,const char * fmt,...)292 mpssas_log_command(struct mps_command *cm, u_int level, const char *fmt, ...)
293 {
294 	struct sbuf sb;
295 	va_list ap;
296 	char str[224];
297 
298 	if (cm == NULL)
299 		return;
300 
301 	/* No need to be in here if debugging isn't enabled */
302 	if ((cm->cm_sc->mps_debug & level) == 0)
303 		return;
304 
305 	sbuf_new(&sb, str, sizeof(str), 0);
306 
307 	va_start(ap, fmt);
308 
309 	if (cm->cm_ccb != NULL) {
310 		xpt_path_sbuf(cm->cm_ccb->csio.ccb_h.path, &sb);
311 		if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
312 			scsi_command_string(&cm->cm_ccb->csio, &sb);
313 			sbuf_printf(&sb, "length %d ",
314 				    cm->cm_ccb->csio.dxfer_len);
315 		}
316 	}
317 	else {
318 		sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
319 		    cam_sim_name(cm->cm_sc->sassc->sim),
320 		    cam_sim_unit(cm->cm_sc->sassc->sim),
321 		    cam_sim_bus(cm->cm_sc->sassc->sim),
322 		    cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
323 		    cm->cm_lun);
324 	}
325 
326 	sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
327 	sbuf_vprintf(&sb, fmt, ap);
328 	sbuf_finish(&sb);
329 	mps_print_field(cm->cm_sc, "%s", sbuf_data(&sb));
330 
331 	va_end(ap);
332 }
333 
334 static void
mpssas_remove_volume(struct mps_softc * sc,struct mps_command * tm)335 mpssas_remove_volume(struct mps_softc *sc, struct mps_command *tm)
336 {
337 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
338 	struct mpssas_target *targ;
339 	uint16_t handle;
340 
341 	MPS_FUNCTRACE(sc);
342 
343 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
344 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
345 	targ = tm->cm_targ;
346 
347 	if (reply == NULL) {
348 		/* XXX retry the remove after the diag reset completes? */
349 		mps_dprint(sc, MPS_FAULT,
350 		    "%s NULL reply resetting device 0x%04x\n", __func__,
351 		    handle);
352 		mpssas_free_tm(sc, tm);
353 		return;
354 	}
355 
356 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
357 	    MPI2_IOCSTATUS_SUCCESS) {
358 		mps_dprint(sc, MPS_ERROR,
359 		   "IOCStatus = 0x%x while resetting device 0x%x\n",
360 		   le16toh(reply->IOCStatus), handle);
361 	}
362 
363 	mps_dprint(sc, MPS_XINFO,
364 	    "Reset aborted %u commands\n", reply->TerminationCount);
365 	mps_free_reply(sc, tm->cm_reply_data);
366 	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
367 
368 	mps_dprint(sc, MPS_XINFO,
369 	    "clearing target %u handle 0x%04x\n", targ->tid, handle);
370 
371 	/*
372 	 * Don't clear target if remove fails because things will get confusing.
373 	 * Leave the devname and sasaddr intact so that we know to avoid reusing
374 	 * this target id if possible, and so we can assign the same target id
375 	 * to this device if it comes back in the future.
376 	 */
377 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
378 	    MPI2_IOCSTATUS_SUCCESS) {
379 		targ = tm->cm_targ;
380 		targ->handle = 0x0;
381 		targ->encl_handle = 0x0;
382 		targ->encl_slot = 0x0;
383 		targ->exp_dev_handle = 0x0;
384 		targ->phy_num = 0x0;
385 		targ->linkrate = 0x0;
386 		targ->devinfo = 0x0;
387 		targ->flags = 0x0;
388 	}
389 
390 	mpssas_free_tm(sc, tm);
391 }
392 
393 /*
394  * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal.
395  * Otherwise Volume Delete is same as Bare Drive Removal.
396  */
397 void
mpssas_prepare_volume_remove(struct mpssas_softc * sassc,uint16_t handle)398 mpssas_prepare_volume_remove(struct mpssas_softc *sassc, uint16_t handle)
399 {
400 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
401 	struct mps_softc *sc;
402 	struct mps_command *tm;
403 	struct mpssas_target *targ = NULL;
404 
405 	MPS_FUNCTRACE(sassc->sc);
406 	sc = sassc->sc;
407 
408 #ifdef WD_SUPPORT
409 	/*
410 	 * If this is a WD controller, determine if the disk should be exposed
411 	 * to the OS or not.  If disk should be exposed, return from this
412 	 * function without doing anything.
413 	 */
414 	if (sc->WD_available && (sc->WD_hide_expose ==
415 	    MPS_WD_EXPOSE_ALWAYS)) {
416 		return;
417 	}
418 #endif //WD_SUPPORT
419 
420 	targ = mpssas_find_target_by_handle(sassc, 0, handle);
421 	if (targ == NULL) {
422 		/* FIXME: what is the action? */
423 		/* We don't know about this device? */
424 		mps_dprint(sc, MPS_ERROR,
425 		   "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
426 		return;
427 	}
428 
429 	targ->flags |= MPSSAS_TARGET_INREMOVAL;
430 
431 	tm = mpssas_alloc_tm(sc);
432 	if (tm == NULL) {
433 		mps_dprint(sc, MPS_ERROR,
434 		    "%s: command alloc failure\n", __func__);
435 		return;
436 	}
437 
438 	mpssas_rescan_target(sc, targ);
439 
440 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
441 	req->DevHandle = targ->handle;
442 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
443 
444 	/* SAS Hard Link Reset / SATA Link Reset */
445 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
446 
447 	tm->cm_targ = targ;
448 	tm->cm_data = NULL;
449 	tm->cm_complete = mpssas_remove_volume;
450 	tm->cm_complete_data = (void *)(uintptr_t)handle;
451 
452 	mps_dprint(sc, MPS_INFO, "%s: Sending reset for target ID %d\n",
453 	    __func__, targ->tid);
454 	mpssas_prepare_for_tm(sc, tm, targ, CAM_LUN_WILDCARD);
455 
456 	mps_map_command(sc, tm);
457 }
458 
459 /*
460  * The MPT2 firmware performs debounce on the link to avoid transient link
461  * errors and false removals.  When it does decide that link has been lost
462  * and a device need to go away, it expects that the host will perform a
463  * target reset and then an op remove.  The reset has the side-effect of
464  * aborting any outstanding requests for the device, which is required for
465  * the op-remove to succeed.  It's not clear if the host should check for
466  * the device coming back alive after the reset.
467  */
468 void
mpssas_prepare_remove(struct mpssas_softc * sassc,uint16_t handle)469 mpssas_prepare_remove(struct mpssas_softc *sassc, uint16_t handle)
470 {
471 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
472 	struct mps_softc *sc;
473 	struct mps_command *cm;
474 	struct mpssas_target *targ = NULL;
475 
476 	MPS_FUNCTRACE(sassc->sc);
477 
478 	sc = sassc->sc;
479 
480 	targ = mpssas_find_target_by_handle(sassc, 0, handle);
481 	if (targ == NULL) {
482 		/* FIXME: what is the action? */
483 		/* We don't know about this device? */
484 		mps_dprint(sc, MPS_ERROR,
485 		    "%s : invalid handle 0x%x \n", __func__, handle);
486 		return;
487 	}
488 
489 	targ->flags |= MPSSAS_TARGET_INREMOVAL;
490 
491 	cm = mpssas_alloc_tm(sc);
492 	if (cm == NULL) {
493 		mps_dprint(sc, MPS_ERROR,
494 		    "%s: command alloc failure\n", __func__);
495 		return;
496 	}
497 
498 	mpssas_rescan_target(sc, targ);
499 
500 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
501 	req->DevHandle = htole16(targ->handle);
502 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
503 
504 	/* SAS Hard Link Reset / SATA Link Reset */
505 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
506 
507 	cm->cm_targ = targ;
508 	cm->cm_data = NULL;
509 	cm->cm_complete = mpssas_remove_device;
510 	cm->cm_complete_data = (void *)(uintptr_t)handle;
511 
512 	mps_dprint(sc, MPS_INFO, "%s: Sending reset for target ID %d\n",
513 	    __func__, targ->tid);
514 	mpssas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
515 
516 	mps_map_command(sc, cm);
517 }
518 
519 static void
mpssas_remove_device(struct mps_softc * sc,struct mps_command * tm)520 mpssas_remove_device(struct mps_softc *sc, struct mps_command *tm)
521 {
522 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
523 	MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
524 	struct mpssas_target *targ;
525 	uint16_t handle;
526 
527 	MPS_FUNCTRACE(sc);
528 
529 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
530 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
531 	targ = tm->cm_targ;
532 
533 	/*
534 	 * Currently there should be no way we can hit this case.  It only
535 	 * happens when we have a failure to allocate chain frames, and
536 	 * task management commands don't have S/G lists.
537 	 */
538 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
539 		mps_dprint(sc, MPS_ERROR,
540 		    "%s: cm_flags = %#x for remove of handle %#04x! "
541 		    "This should not happen!\n", __func__, tm->cm_flags,
542 		    handle);
543 	}
544 
545 	if (reply == NULL) {
546 		/* XXX retry the remove after the diag reset completes? */
547 		mps_dprint(sc, MPS_FAULT,
548 		    "%s NULL reply resetting device 0x%04x\n", __func__,
549 		    handle);
550 		mpssas_free_tm(sc, tm);
551 		return;
552 	}
553 
554 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
555 	    MPI2_IOCSTATUS_SUCCESS) {
556 		mps_dprint(sc, MPS_ERROR,
557 		   "IOCStatus = 0x%x while resetting device 0x%x\n",
558 		   le16toh(reply->IOCStatus), handle);
559 	}
560 
561 	mps_dprint(sc, MPS_XINFO, "Reset aborted %u commands\n",
562 	    le32toh(reply->TerminationCount));
563 	mps_free_reply(sc, tm->cm_reply_data);
564 	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
565 
566 	/* Reuse the existing command */
567 	req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
568 	memset(req, 0, sizeof(*req));
569 	req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
570 	req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
571 	req->DevHandle = htole16(handle);
572 	tm->cm_data = NULL;
573 	tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
574 	tm->cm_complete = mpssas_remove_complete;
575 	tm->cm_complete_data = (void *)(uintptr_t)handle;
576 
577 	/*
578 	 * Wait to send the REMOVE_DEVICE until all the commands have cleared.
579 	 * They should be aborted or time out and we'll kick thus off there
580 	 * if so.
581 	 */
582 	if (TAILQ_FIRST(&targ->commands) == NULL) {
583 		mps_dprint(sc, MPS_INFO,
584 		    "No pending commands: starting remove_device target %u handle 0x%04x\n",
585 		    targ->tid, handle);
586 		mps_map_command(sc, tm);
587 		targ->pending_remove_tm = NULL;
588 	} else {
589 		targ->pending_remove_tm = tm;
590 	}
591 
592 	mps_dprint(sc, MPS_XINFO, "clearing target %u handle 0x%04x\n",
593 		   targ->tid, handle);
594 }
595 
596 static void
mpssas_remove_complete(struct mps_softc * sc,struct mps_command * tm)597 mpssas_remove_complete(struct mps_softc *sc, struct mps_command *tm)
598 {
599 	MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
600 	uint16_t handle;
601 	struct mpssas_target *targ;
602 	struct mpssas_lun *lun;
603 
604 	MPS_FUNCTRACE(sc);
605 
606 	reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
607 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
608 	targ = tm->cm_targ;
609 
610 	/*
611 	 * At this point, we should have no pending commands for the target.
612 	 * The remove target has just completed.
613 	 */
614 	KASSERT(TAILQ_FIRST(&targ->commands) == NULL,
615 	    ("%s: no commands should be pending\n", __func__));
616 
617 	/*
618 	 * Currently there should be no way we can hit this case.  It only
619 	 * happens when we have a failure to allocate chain frames, and
620 	 * task management commands don't have S/G lists.
621 	 */
622 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
623 		mps_dprint(sc, MPS_XINFO,
624 			   "%s: cm_flags = %#x for remove of handle %#04x! "
625 			   "This should not happen!\n", __func__, tm->cm_flags,
626 			   handle);
627 		mpssas_free_tm(sc, tm);
628 		return;
629 	}
630 
631 	if (reply == NULL) {
632 		/* most likely a chip reset */
633 		mps_dprint(sc, MPS_FAULT,
634 		    "%s NULL reply removing device 0x%04x\n", __func__, handle);
635 		mpssas_free_tm(sc, tm);
636 		return;
637 	}
638 
639 	mps_dprint(sc, MPS_XINFO,
640 	    "%s on handle 0x%04x, IOCStatus= 0x%x\n", __func__,
641 	    handle, le16toh(reply->IOCStatus));
642 
643 	/*
644 	 * Don't clear target if remove fails because things will get confusing.
645 	 * Leave the devname and sasaddr intact so that we know to avoid reusing
646 	 * this target id if possible, and so we can assign the same target id
647 	 * to this device if it comes back in the future.
648 	 */
649 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
650 	    MPI2_IOCSTATUS_SUCCESS) {
651 		targ->handle = 0x0;
652 		targ->encl_handle = 0x0;
653 		targ->encl_slot = 0x0;
654 		targ->exp_dev_handle = 0x0;
655 		targ->phy_num = 0x0;
656 		targ->linkrate = 0x0;
657 		targ->devinfo = 0x0;
658 		targ->flags = 0x0;
659 
660 		while(!SLIST_EMPTY(&targ->luns)) {
661 			lun = SLIST_FIRST(&targ->luns);
662 			SLIST_REMOVE_HEAD(&targ->luns, lun_link);
663 			free(lun, M_MPT2);
664 		}
665 	}
666 
667 	mpssas_free_tm(sc, tm);
668 }
669 
670 static int
mpssas_register_events(struct mps_softc * sc)671 mpssas_register_events(struct mps_softc *sc)
672 {
673 	u32 events[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS];
674 
675 	bzero(events, 16);
676 	setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
677 	setbit(events, MPI2_EVENT_SAS_DISCOVERY);
678 	setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
679 	setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
680 	setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
681 	setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
682 	setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
683 	setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
684 	setbit(events, MPI2_EVENT_IR_VOLUME);
685 	setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
686 	setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
687 	setbit(events, MPI2_EVENT_LOG_ENTRY_ADDED);
688 
689 	mps_register_events(sc, events, mpssas_evt_handler, NULL,
690 	    &sc->sassc->mpssas_eh);
691 
692 	return (0);
693 }
694 
695 int
mps_attach_sas(struct mps_softc * sc)696 mps_attach_sas(struct mps_softc *sc)
697 {
698 	struct mpssas_softc *sassc;
699 	cam_status status;
700 	int unit, error = 0, reqs;
701 
702 	MPS_FUNCTRACE(sc);
703 	mps_dprint(sc, MPS_INIT, "%s entered\n", __func__);
704 
705 	sassc = malloc(sizeof(struct mpssas_softc), M_MPT2, M_WAITOK|M_ZERO);
706 
707 	/*
708 	 * XXX MaxTargets could change during a reinit.  Since we don't
709 	 * resize the targets[] array during such an event, cache the value
710 	 * of MaxTargets here so that we don't get into trouble later.  This
711 	 * should move into the reinit logic.
712 	 */
713 	sassc->maxtargets = sc->facts->MaxTargets + sc->facts->MaxVolumes;
714 	sassc->targets = malloc(sizeof(struct mpssas_target) *
715 	    sassc->maxtargets, M_MPT2, M_WAITOK|M_ZERO);
716 	sc->sassc = sassc;
717 	sassc->sc = sc;
718 
719 	reqs = sc->num_reqs - sc->num_prireqs - 1;
720 	if ((sassc->devq = cam_simq_alloc(reqs)) == NULL) {
721 		mps_dprint(sc, MPS_ERROR, "Cannot allocate SIMQ\n");
722 		error = ENOMEM;
723 		goto out;
724 	}
725 
726 	unit = device_get_unit(sc->mps_dev);
727 	sassc->sim = cam_sim_alloc(mpssas_action, mpssas_poll, "mps", sassc,
728 	    unit, &sc->mps_mtx, reqs, reqs, sassc->devq);
729 	if (sassc->sim == NULL) {
730 		mps_dprint(sc, MPS_INIT|MPS_ERROR, "Cannot allocate SIM\n");
731 		error = EINVAL;
732 		goto out;
733 	}
734 
735 	TAILQ_INIT(&sassc->ev_queue);
736 
737 	/* Initialize taskqueue for Event Handling */
738 	TASK_INIT(&sassc->ev_task, 0, mpssas_firmware_event_work, sc);
739 	sassc->ev_tq = taskqueue_create("mps_taskq", M_NOWAIT | M_ZERO,
740 	    taskqueue_thread_enqueue, &sassc->ev_tq);
741 	taskqueue_start_threads(&sassc->ev_tq, 1, PRIBIO, "%s taskq",
742 	    device_get_nameunit(sc->mps_dev));
743 
744 	mps_lock(sc);
745 
746 	/*
747 	 * XXX There should be a bus for every port on the adapter, but since
748 	 * we're just going to fake the topology for now, we'll pretend that
749 	 * everything is just a target on a single bus.
750 	 */
751 	if ((error = xpt_bus_register(sassc->sim, sc->mps_dev, 0)) != 0) {
752 		mps_dprint(sc, MPS_INIT|MPS_ERROR,
753 		    "Error %d registering SCSI bus\n", error);
754 		mps_unlock(sc);
755 		goto out;
756 	}
757 
758 	/*
759 	 * Assume that discovery events will start right away.
760 	 *
761 	 * Hold off boot until discovery is complete.
762 	 */
763 	sassc->flags |= MPSSAS_IN_STARTUP | MPSSAS_IN_DISCOVERY;
764 	sc->sassc->startup_refcount = 0;
765 	mpssas_startup_increment(sassc);
766 
767 	mps_unlock(sc);
768 
769 	/*
770 	 * Register for async events so we can determine the EEDP
771 	 * capabilities of devices.
772 	 */
773 	status = xpt_create_path(&sassc->path, /*periph*/NULL,
774 	    cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD,
775 	    CAM_LUN_WILDCARD);
776 	if (status != CAM_REQ_CMP) {
777 		mps_dprint(sc, MPS_ERROR|MPS_INIT,
778 		    "Error %#x creating sim path\n", status);
779 		sassc->path = NULL;
780 	} else {
781 		int event;
782 
783 		event = AC_ADVINFO_CHANGED;
784 		status = xpt_register_async(event, mpssas_async, sc,
785 					    sassc->path);
786 		if (status != CAM_REQ_CMP) {
787 			mps_dprint(sc, MPS_ERROR,
788 			    "Error %#x registering async handler for "
789 			    "AC_ADVINFO_CHANGED events\n", status);
790 			xpt_free_path(sassc->path);
791 			sassc->path = NULL;
792 		}
793 	}
794 	if (status != CAM_REQ_CMP) {
795 		/*
796 		 * EEDP use is the exception, not the rule.
797 		 * Warn the user, but do not fail to attach.
798 		 */
799 		mps_printf(sc, "EEDP capabilities disabled.\n");
800 	}
801 
802 	mpssas_register_events(sc);
803 out:
804 	if (error)
805 		mps_detach_sas(sc);
806 
807 	mps_dprint(sc, MPS_INIT, "%s exit error= %d\n", __func__, error);
808 	return (error);
809 }
810 
811 int
mps_detach_sas(struct mps_softc * sc)812 mps_detach_sas(struct mps_softc *sc)
813 {
814 	struct mpssas_softc *sassc;
815 	struct mpssas_lun *lun, *lun_tmp;
816 	struct mpssas_target *targ;
817 	int i;
818 
819 	MPS_FUNCTRACE(sc);
820 
821 	if (sc->sassc == NULL)
822 		return (0);
823 
824 	sassc = sc->sassc;
825 	mps_deregister_events(sc, sassc->mpssas_eh);
826 
827 	/*
828 	 * Drain and free the event handling taskqueue with the lock
829 	 * unheld so that any parallel processing tasks drain properly
830 	 * without deadlocking.
831 	 */
832 	if (sassc->ev_tq != NULL)
833 		taskqueue_free(sassc->ev_tq);
834 
835 	/* Deregister our async handler */
836 	if (sassc->path != NULL) {
837 		xpt_register_async(0, mpssas_async, sc, sassc->path);
838 		xpt_free_path(sassc->path);
839 		sassc->path = NULL;
840 	}
841 
842 	/* Make sure CAM doesn't wedge if we had to bail out early. */
843 	mps_lock(sc);
844 
845 	while (sassc->startup_refcount != 0)
846 		mpssas_startup_decrement(sassc);
847 
848 	if (sassc->flags & MPSSAS_IN_STARTUP)
849 		xpt_release_simq(sassc->sim, 1);
850 
851 	if (sassc->sim != NULL) {
852 		xpt_bus_deregister(cam_sim_path(sassc->sim));
853 		cam_sim_free(sassc->sim, FALSE);
854 	}
855 
856 	mps_unlock(sc);
857 
858 	if (sassc->devq != NULL)
859 		cam_simq_free(sassc->devq);
860 
861 	for(i=0; i< sassc->maxtargets ;i++) {
862 		targ = &sassc->targets[i];
863 		SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
864 			free(lun, M_MPT2);
865 		}
866 	}
867 	free(sassc->targets, M_MPT2);
868 	free(sassc, M_MPT2);
869 	sc->sassc = NULL;
870 
871 	return (0);
872 }
873 
874 void
mpssas_discovery_end(struct mpssas_softc * sassc)875 mpssas_discovery_end(struct mpssas_softc *sassc)
876 {
877 	struct mps_softc *sc = sassc->sc;
878 
879 	MPS_FUNCTRACE(sc);
880 
881 	/*
882 	 * After discovery has completed, check the mapping table for any
883 	 * missing devices and update their missing counts. Only do this once
884 	 * whenever the driver is initialized so that missing counts aren't
885 	 * updated unnecessarily. Note that just because discovery has
886 	 * completed doesn't mean that events have been processed yet. The
887 	 * check_devices function is a callout timer that checks if ALL devices
888 	 * are missing. If so, it will wait a little longer for events to
889 	 * complete and keep resetting itself until some device in the mapping
890 	 * table is not missing, meaning that event processing has started.
891 	 */
892 	if (sc->track_mapping_events) {
893 		mps_dprint(sc, MPS_XINFO | MPS_MAPPING, "Discovery has "
894 		    "completed. Check for missing devices in the mapping "
895 		    "table.\n");
896 		callout_reset(&sc->device_check_callout,
897 		    MPS_MISSING_CHECK_DELAY * hz, mps_mapping_check_devices,
898 		    sc);
899 	}
900 }
901 
902 static void
mpssas_action(struct cam_sim * sim,union ccb * ccb)903 mpssas_action(struct cam_sim *sim, union ccb *ccb)
904 {
905 	struct mpssas_softc *sassc;
906 
907 	sassc = cam_sim_softc(sim);
908 
909 	MPS_FUNCTRACE(sassc->sc);
910 	mps_dprint(sassc->sc, MPS_TRACE, "ccb func_code 0x%x\n",
911 	    ccb->ccb_h.func_code);
912 	mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
913 
914 	switch (ccb->ccb_h.func_code) {
915 	case XPT_PATH_INQ:
916 	{
917 		struct ccb_pathinq *cpi = &ccb->cpi;
918 		struct mps_softc *sc = sassc->sc;
919 
920 		cpi->version_num = 1;
921 		cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
922 		cpi->target_sprt = 0;
923 		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN;
924 		cpi->hba_eng_cnt = 0;
925 		cpi->max_target = sassc->maxtargets - 1;
926 		cpi->max_lun = 255;
927 
928 		/*
929 		 * initiator_id is set here to an ID outside the set of valid
930 		 * target IDs (including volumes).
931 		 */
932 		cpi->initiator_id = sassc->maxtargets;
933 		strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
934 		strlcpy(cpi->hba_vid, "Avago Tech", HBA_IDLEN);
935 		strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
936 		cpi->unit_number = cam_sim_unit(sim);
937 		cpi->bus_id = cam_sim_bus(sim);
938 		cpi->base_transfer_speed = 150000;
939 		cpi->transport = XPORT_SAS;
940 		cpi->transport_version = 0;
941 		cpi->protocol = PROTO_SCSI;
942 		cpi->protocol_version = SCSI_REV_SPC;
943 		cpi->maxio = sc->maxio;
944 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
945 		break;
946 	}
947 	case XPT_GET_TRAN_SETTINGS:
948 	{
949 		struct ccb_trans_settings	*cts;
950 		struct ccb_trans_settings_sas	*sas;
951 		struct ccb_trans_settings_scsi	*scsi;
952 		struct mpssas_target *targ;
953 
954 		cts = &ccb->cts;
955 		sas = &cts->xport_specific.sas;
956 		scsi = &cts->proto_specific.scsi;
957 
958 		KASSERT(cts->ccb_h.target_id < sassc->maxtargets,
959 		    ("Target %d out of bounds in XPT_GET_TRANS_SETTINGS\n",
960 		    cts->ccb_h.target_id));
961 		targ = &sassc->targets[cts->ccb_h.target_id];
962 		if (targ->handle == 0x0) {
963 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
964 			break;
965 		}
966 
967 		cts->protocol_version = SCSI_REV_SPC2;
968 		cts->transport = XPORT_SAS;
969 		cts->transport_version = 0;
970 
971 		sas->valid = CTS_SAS_VALID_SPEED;
972 		switch (targ->linkrate) {
973 		case 0x08:
974 			sas->bitrate = 150000;
975 			break;
976 		case 0x09:
977 			sas->bitrate = 300000;
978 			break;
979 		case 0x0a:
980 			sas->bitrate = 600000;
981 			break;
982 		default:
983 			sas->valid = 0;
984 		}
985 
986 		cts->protocol = PROTO_SCSI;
987 		scsi->valid = CTS_SCSI_VALID_TQ;
988 		scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
989 
990 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
991 		break;
992 	}
993 	case XPT_CALC_GEOMETRY:
994 		cam_calc_geometry(&ccb->ccg, /*extended*/1);
995 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
996 		break;
997 	case XPT_RESET_DEV:
998 		mps_dprint(sassc->sc, MPS_XINFO, "mpssas_action XPT_RESET_DEV\n");
999 		mpssas_action_resetdev(sassc, ccb);
1000 		return;
1001 	case XPT_RESET_BUS:
1002 	case XPT_ABORT:
1003 	case XPT_TERM_IO:
1004 		mps_dprint(sassc->sc, MPS_XINFO,
1005 		    "mpssas_action faking success for abort or reset\n");
1006 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1007 		break;
1008 	case XPT_SCSI_IO:
1009 		mpssas_action_scsiio(sassc, ccb);
1010 		return;
1011 	case XPT_SMP_IO:
1012 		mpssas_action_smpio(sassc, ccb);
1013 		return;
1014 	default:
1015 		mpssas_set_ccbstatus(ccb, CAM_FUNC_NOTAVAIL);
1016 		break;
1017 	}
1018 	xpt_done(ccb);
1019 
1020 }
1021 
1022 static void
mpssas_announce_reset(struct mps_softc * sc,uint32_t ac_code,target_id_t target_id,lun_id_t lun_id)1023 mpssas_announce_reset(struct mps_softc *sc, uint32_t ac_code,
1024     target_id_t target_id, lun_id_t lun_id)
1025 {
1026 	path_id_t path_id = cam_sim_path(sc->sassc->sim);
1027 	struct cam_path *path;
1028 
1029 	mps_dprint(sc, MPS_XINFO, "%s code %x target %d lun %jx\n", __func__,
1030 	    ac_code, target_id, (uintmax_t)lun_id);
1031 
1032 	if (xpt_create_path(&path, NULL,
1033 		path_id, target_id, lun_id) != CAM_REQ_CMP) {
1034 		mps_dprint(sc, MPS_ERROR, "unable to create path for reset "
1035 			   "notification\n");
1036 		return;
1037 	}
1038 
1039 	xpt_async(ac_code, path, NULL);
1040 	xpt_free_path(path);
1041 }
1042 
1043 static void
mpssas_complete_all_commands(struct mps_softc * sc)1044 mpssas_complete_all_commands(struct mps_softc *sc)
1045 {
1046 	struct mps_command *cm;
1047 	int i;
1048 	int completed;
1049 
1050 	MPS_FUNCTRACE(sc);
1051 	mtx_assert(&sc->mps_mtx, MA_OWNED);
1052 
1053 	/* complete all commands with a NULL reply */
1054 	for (i = 1; i < sc->num_reqs; i++) {
1055 		cm = &sc->commands[i];
1056 		if (cm->cm_state == MPS_CM_STATE_FREE)
1057 			continue;
1058 
1059 		cm->cm_state = MPS_CM_STATE_BUSY;
1060 		cm->cm_reply = NULL;
1061 		completed = 0;
1062 
1063 		if (cm->cm_flags & MPS_CM_FLAGS_SATA_ID_TIMEOUT) {
1064 			MPASS(cm->cm_data);
1065 			free(cm->cm_data, M_MPT2);
1066 			cm->cm_data = NULL;
1067 		}
1068 
1069 		if (cm->cm_flags & MPS_CM_FLAGS_POLLED)
1070 			cm->cm_flags |= MPS_CM_FLAGS_COMPLETE;
1071 
1072 		if (cm->cm_complete != NULL) {
1073 			mpssas_log_command(cm, MPS_RECOVERY,
1074 			    "completing cm %p state %x ccb %p for diag reset\n",
1075 			    cm, cm->cm_state, cm->cm_ccb);
1076 
1077 			cm->cm_complete(sc, cm);
1078 			completed = 1;
1079 		} else if (cm->cm_flags & MPS_CM_FLAGS_WAKEUP) {
1080 			mpssas_log_command(cm, MPS_RECOVERY,
1081 			    "waking up cm %p state %x ccb %p for diag reset\n",
1082 			    cm, cm->cm_state, cm->cm_ccb);
1083 			wakeup(cm);
1084 			completed = 1;
1085 		}
1086 
1087 		if ((completed == 0) && (cm->cm_state != MPS_CM_STATE_FREE)) {
1088 			/* this should never happen, but if it does, log */
1089 			mpssas_log_command(cm, MPS_RECOVERY,
1090 			    "cm %p state %x flags 0x%x ccb %p during diag "
1091 			    "reset\n", cm, cm->cm_state, cm->cm_flags,
1092 			    cm->cm_ccb);
1093 		}
1094 	}
1095 
1096 	sc->io_cmds_active = 0;
1097 }
1098 
1099 void
mpssas_handle_reinit(struct mps_softc * sc)1100 mpssas_handle_reinit(struct mps_softc *sc)
1101 {
1102 	int i;
1103 
1104 	/* Go back into startup mode and freeze the simq, so that CAM
1105 	 * doesn't send any commands until after we've rediscovered all
1106 	 * targets and found the proper device handles for them.
1107 	 *
1108 	 * After the reset, portenable will trigger discovery, and after all
1109 	 * discovery-related activities have finished, the simq will be
1110 	 * released.
1111 	 */
1112 	mps_dprint(sc, MPS_INIT, "%s startup\n", __func__);
1113 	sc->sassc->flags |= MPSSAS_IN_STARTUP;
1114 	sc->sassc->flags |= MPSSAS_IN_DISCOVERY;
1115 	mpssas_startup_increment(sc->sassc);
1116 
1117 	/* notify CAM of a bus reset */
1118 	mpssas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1119 	    CAM_LUN_WILDCARD);
1120 
1121 	/* complete and cleanup after all outstanding commands */
1122 	mpssas_complete_all_commands(sc);
1123 
1124 	mps_dprint(sc, MPS_INIT,
1125 	    "%s startup %u after command completion\n", __func__,
1126 	    sc->sassc->startup_refcount);
1127 
1128 	/* zero all the target handles, since they may change after the
1129 	 * reset, and we have to rediscover all the targets and use the new
1130 	 * handles.
1131 	 */
1132 	for (i = 0; i < sc->sassc->maxtargets; i++) {
1133 		if (sc->sassc->targets[i].outstanding != 0)
1134 			mps_dprint(sc, MPS_INIT, "target %u outstanding %u\n",
1135 			    i, sc->sassc->targets[i].outstanding);
1136 		sc->sassc->targets[i].handle = 0x0;
1137 		sc->sassc->targets[i].exp_dev_handle = 0x0;
1138 		sc->sassc->targets[i].outstanding = 0;
1139 		sc->sassc->targets[i].flags = MPSSAS_TARGET_INDIAGRESET;
1140 	}
1141 }
1142 
1143 static void
mpssas_tm_timeout(void * data)1144 mpssas_tm_timeout(void *data)
1145 {
1146 	struct mps_command *tm = data;
1147 	struct mps_softc *sc = tm->cm_sc;
1148 
1149 	mtx_assert(&sc->mps_mtx, MA_OWNED);
1150 
1151 	mpssas_log_command(tm, MPS_INFO|MPS_RECOVERY,
1152 	    "task mgmt %p timed out\n", tm);
1153 
1154 	KASSERT(tm->cm_state == MPS_CM_STATE_INQUEUE,
1155 	    ("command not inqueue, state = %u\n", tm->cm_state));
1156 
1157 	tm->cm_state = MPS_CM_STATE_BUSY;
1158 	mps_reinit(sc);
1159 }
1160 
1161 static void
mpssas_logical_unit_reset_complete(struct mps_softc * sc,struct mps_command * tm)1162 mpssas_logical_unit_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1163 {
1164 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1165 	unsigned int cm_count = 0;
1166 	struct mps_command *cm;
1167 	struct mpssas_target *targ;
1168 
1169 	callout_stop(&tm->cm_callout);
1170 
1171 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1172 	targ = tm->cm_targ;
1173 
1174 	/*
1175 	 * Currently there should be no way we can hit this case.  It only
1176 	 * happens when we have a failure to allocate chain frames, and
1177 	 * task management commands don't have S/G lists.
1178 	 * XXXSL So should it be an assertion?
1179 	 */
1180 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1181 		mps_dprint(sc, MPS_RECOVERY|MPS_ERROR,
1182 		    "%s: cm_flags = %#x for LUN reset! "
1183 		   "This should not happen!\n", __func__, tm->cm_flags);
1184 		mpssas_free_tm(sc, tm);
1185 		return;
1186 	}
1187 
1188 	if (reply == NULL) {
1189 		mps_dprint(sc, MPS_RECOVERY, "NULL reset reply for tm %p\n",
1190 		    tm);
1191 		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1192 			/* this completion was due to a reset, just cleanup */
1193 			mps_dprint(sc, MPS_RECOVERY, "Hardware undergoing "
1194 			    "reset, ignoring NULL LUN reset reply\n");
1195 			targ->tm = NULL;
1196 			mpssas_free_tm(sc, tm);
1197 		}
1198 		else {
1199 			/* we should have gotten a reply. */
1200 			mps_dprint(sc, MPS_INFO|MPS_RECOVERY, "NULL reply on "
1201 			    "LUN reset attempt, resetting controller\n");
1202 			mps_reinit(sc);
1203 		}
1204 		return;
1205 	}
1206 
1207 	mps_dprint(sc, MPS_RECOVERY,
1208 	    "logical unit reset status 0x%x code 0x%x count %u\n",
1209 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1210 	    le32toh(reply->TerminationCount));
1211 
1212 	/*
1213 	 * See if there are any outstanding commands for this LUN.
1214 	 * This could be made more efficient by using a per-LU data
1215 	 * structure of some sort.
1216 	 */
1217 	TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1218 		if (cm->cm_lun == tm->cm_lun)
1219 			cm_count++;
1220 	}
1221 
1222 	if (cm_count == 0) {
1223 		mps_dprint(sc, MPS_RECOVERY|MPS_INFO,
1224 		    "Finished recovery after LUN reset for target %u\n",
1225 		    targ->tid);
1226 
1227 		mpssas_announce_reset(sc, AC_SENT_BDR, targ->tid, tm->cm_lun);
1228 
1229 		/*
1230 		 * We've finished recovery for this logical unit.  check and
1231 		 * see if some other logical unit has a timedout command
1232 		 * that needs to be processed.
1233 		 */
1234 		cm = TAILQ_FIRST(&targ->timedout_commands);
1235 		if (cm) {
1236 			mps_dprint(sc, MPS_INFO|MPS_RECOVERY,
1237 			    "More commands to abort for target %u\n",
1238 			    targ->tid);
1239 			mpssas_send_abort(sc, tm, cm);
1240 		} else {
1241 			targ->tm = NULL;
1242 			mpssas_free_tm(sc, tm);
1243 		}
1244 	} else {
1245 		/*
1246 		 * If we still have commands for this LUN, the reset
1247 		 * effectively failed, regardless of the status reported.
1248 		 * Escalate to a target reset.
1249 		 */
1250 		mps_dprint(sc, MPS_INFO|MPS_RECOVERY,
1251 		    "logical unit reset complete for target %u, but still "
1252 		    "have %u command(s), sending target reset\n", targ->tid,
1253 		    cm_count);
1254 		mpssas_send_reset(sc, tm,
1255 		    MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1256 	}
1257 }
1258 
1259 static void
mpssas_target_reset_complete(struct mps_softc * sc,struct mps_command * tm)1260 mpssas_target_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1261 {
1262 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1263 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1264 	struct mpssas_target *targ;
1265 
1266 	callout_stop(&tm->cm_callout);
1267 
1268 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1269 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1270 	targ = tm->cm_targ;
1271 
1272 	/*
1273 	 * Currently there should be no way we can hit this case.  It only
1274 	 * happens when we have a failure to allocate chain frames, and
1275 	 * task management commands don't have S/G lists.
1276 	 */
1277 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1278 		mps_dprint(sc, MPS_ERROR,"%s: cm_flags = %#x for target reset! "
1279 			   "This should not happen!\n", __func__, tm->cm_flags);
1280 		mpssas_free_tm(sc, tm);
1281 		return;
1282 	}
1283 
1284 	if (reply == NULL) {
1285 		mps_dprint(sc, MPS_RECOVERY,
1286 		    "NULL target reset reply for tm %pi TaskMID %u\n",
1287 		    tm, le16toh(req->TaskMID));
1288 		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1289 			/* this completion was due to a reset, just cleanup */
1290 			mps_dprint(sc, MPS_RECOVERY, "Hardware undergoing "
1291 			    "reset, ignoring NULL target reset reply\n");
1292 			targ->tm = NULL;
1293 			mpssas_free_tm(sc, tm);
1294 		} else {
1295 			/* we should have gotten a reply. */
1296 			mps_dprint(sc, MPS_INFO|MPS_RECOVERY, "NULL reply on "
1297 			    "target reset attempt, resetting controller\n");
1298 			mps_reinit(sc);
1299 		}
1300 		return;
1301 	}
1302 
1303 	mps_dprint(sc, MPS_RECOVERY,
1304 	    "target reset status 0x%x code 0x%x count %u\n",
1305 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1306 	    le32toh(reply->TerminationCount));
1307 
1308 	if (targ->outstanding == 0) {
1309 		/* we've finished recovery for this target and all
1310 		 * of its logical units.
1311 		 */
1312 		mps_dprint(sc, MPS_RECOVERY|MPS_INFO,
1313 		    "Finished reset recovery for target %u\n", targ->tid);
1314 
1315 		mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1316 		    CAM_LUN_WILDCARD);
1317 
1318 		targ->tm = NULL;
1319 		mpssas_free_tm(sc, tm);
1320 	} else {
1321 		/*
1322 		 * After a target reset, if this target still has
1323 		 * outstanding commands, the reset effectively failed,
1324 		 * regardless of the status reported.  escalate.
1325 		 */
1326 		mps_dprint(sc, MPS_INFO|MPS_RECOVERY,
1327 		    "Target reset complete for target %u, but still have %u "
1328 		    "command(s), resetting controller\n", targ->tid,
1329 		    targ->outstanding);
1330 		mps_reinit(sc);
1331 	}
1332 }
1333 
1334 #define MPS_RESET_TIMEOUT 30
1335 
1336 int
mpssas_send_reset(struct mps_softc * sc,struct mps_command * tm,uint8_t type)1337 mpssas_send_reset(struct mps_softc *sc, struct mps_command *tm, uint8_t type)
1338 {
1339 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1340 	struct mpssas_target *target;
1341 	int err;
1342 
1343 	target = tm->cm_targ;
1344 	if (target->handle == 0) {
1345 		mps_dprint(sc, MPS_ERROR,"%s null devhandle for target_id %d\n",
1346 		    __func__, target->tid);
1347 		return -1;
1348 	}
1349 
1350 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1351 	req->DevHandle = htole16(target->handle);
1352 	req->TaskType = type;
1353 
1354 	if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1355 		/* XXX Need to handle invalid LUNs */
1356 		MPS_SET_LUN(req->LUN, tm->cm_lun);
1357 		tm->cm_targ->logical_unit_resets++;
1358 		mps_dprint(sc, MPS_RECOVERY|MPS_INFO,
1359 		    "Sending logical unit reset to target %u lun %d\n",
1360 		    target->tid, tm->cm_lun);
1361 		tm->cm_complete = mpssas_logical_unit_reset_complete;
1362 		mpssas_prepare_for_tm(sc, tm, target, tm->cm_lun);
1363 	} else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1364 		/*
1365 		 * Target reset method =
1366 		 * 	SAS Hard Link Reset / SATA Link Reset
1367 		 */
1368 		req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1369 		tm->cm_targ->target_resets++;
1370 		mps_dprint(sc, MPS_RECOVERY|MPS_INFO,
1371 		    "Sending target reset to target %u\n", target->tid);
1372 		tm->cm_complete = mpssas_target_reset_complete;
1373 		mpssas_prepare_for_tm(sc, tm, target, CAM_LUN_WILDCARD);
1374 	} else {
1375 		mps_dprint(sc, MPS_ERROR, "unexpected reset type 0x%x\n", type);
1376 		return -1;
1377 	}
1378 
1379 	tm->cm_data = NULL;
1380 	tm->cm_complete_data = (void *)tm;
1381 
1382 	callout_reset(&tm->cm_callout, MPS_RESET_TIMEOUT * hz,
1383 	    mpssas_tm_timeout, tm);
1384 
1385 	err = mps_map_command(sc, tm);
1386 	if (err)
1387 		mps_dprint(sc, MPS_ERROR|MPS_RECOVERY,
1388 		    "error %d sending reset type %u\n",
1389 		    err, type);
1390 
1391 	return err;
1392 }
1393 
1394 static void
mpssas_abort_complete(struct mps_softc * sc,struct mps_command * tm)1395 mpssas_abort_complete(struct mps_softc *sc, struct mps_command *tm)
1396 {
1397 	struct mps_command *cm;
1398 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1399 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1400 	struct mpssas_target *targ;
1401 
1402 	callout_stop(&tm->cm_callout);
1403 
1404 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1405 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1406 	targ = tm->cm_targ;
1407 
1408 	/*
1409 	 * Currently there should be no way we can hit this case.  It only
1410 	 * happens when we have a failure to allocate chain frames, and
1411 	 * task management commands don't have S/G lists.
1412 	 */
1413 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1414 		mps_dprint(sc, MPS_RECOVERY,
1415 		    "cm_flags = %#x for abort %p TaskMID %u!\n",
1416 		    tm->cm_flags, tm, le16toh(req->TaskMID));
1417 		mpssas_free_tm(sc, tm);
1418 		return;
1419 	}
1420 
1421 	if (reply == NULL) {
1422 		mps_dprint(sc, MPS_RECOVERY,
1423 		    "NULL abort reply for tm %p TaskMID %u\n",
1424 		    tm, le16toh(req->TaskMID));
1425 		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1426 			/* this completion was due to a reset, just cleanup */
1427 			mps_dprint(sc, MPS_RECOVERY, "Hardware undergoing "
1428 			    "reset, ignoring NULL abort reply\n");
1429 			targ->tm = NULL;
1430 			mpssas_free_tm(sc, tm);
1431 		} else {
1432 			/* we should have gotten a reply. */
1433 			mps_dprint(sc, MPS_INFO|MPS_RECOVERY, "NULL reply on "
1434 			    "abort attempt, resetting controller\n");
1435 			mps_reinit(sc);
1436 		}
1437 		return;
1438 	}
1439 
1440 	mps_dprint(sc, MPS_RECOVERY,
1441 	    "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1442 	    le16toh(req->TaskMID),
1443 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1444 	    le32toh(reply->TerminationCount));
1445 
1446 	cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1447 	if (cm == NULL) {
1448 		/*
1449 		 * If there are no more timedout commands, we're done with
1450 		 * error recovery for this target.
1451 		 */
1452 		mps_dprint(sc, MPS_INFO|MPS_RECOVERY,
1453 		    "Finished abort recovery for target %u\n", targ->tid);
1454 
1455 		targ->tm = NULL;
1456 		mpssas_free_tm(sc, tm);
1457 	} else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) {
1458 		/* abort success, but we have more timedout commands to abort */
1459 		mps_dprint(sc, MPS_INFO|MPS_RECOVERY,
1460 		    "Continuing abort recovery for target %u\n", targ->tid);
1461 
1462 		mpssas_send_abort(sc, tm, cm);
1463 	} else {
1464 		/* we didn't get a command completion, so the abort
1465 		 * failed as far as we're concerned.  escalate.
1466 		 */
1467 		mps_dprint(sc, MPS_RECOVERY,
1468 		    "Abort failed for target %u, sending logical unit reset\n",
1469 		    targ->tid);
1470 
1471 		mpssas_send_reset(sc, tm,
1472 		    MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1473 	}
1474 }
1475 
1476 #define MPS_ABORT_TIMEOUT 5
1477 
1478 static int
mpssas_send_abort(struct mps_softc * sc,struct mps_command * tm,struct mps_command * cm)1479 mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm)
1480 {
1481 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1482 	struct mpssas_target *targ;
1483 	int err;
1484 
1485 	targ = cm->cm_targ;
1486 	if (targ->handle == 0) {
1487 		mps_dprint(sc, MPS_ERROR|MPS_RECOVERY,
1488 		    "%s null devhandle for target_id %d\n",
1489 		    __func__, cm->cm_ccb->ccb_h.target_id);
1490 		return -1;
1491 	}
1492 
1493 	mpssas_log_command(cm, MPS_RECOVERY|MPS_INFO,
1494 	    "Aborting command %p\n", cm);
1495 
1496 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1497 	req->DevHandle = htole16(targ->handle);
1498 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1499 
1500 	/* XXX Need to handle invalid LUNs */
1501 	MPS_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1502 
1503 	req->TaskMID = htole16(cm->cm_desc.Default.SMID);
1504 
1505 	tm->cm_data = NULL;
1506 	tm->cm_complete = mpssas_abort_complete;
1507 	tm->cm_complete_data = (void *)tm;
1508 	tm->cm_targ = cm->cm_targ;
1509 	tm->cm_lun = cm->cm_lun;
1510 
1511 	callout_reset(&tm->cm_callout, MPS_ABORT_TIMEOUT * hz,
1512 	    mpssas_tm_timeout, tm);
1513 
1514 	targ->aborts++;
1515 
1516 	mpssas_prepare_for_tm(sc, tm, targ, tm->cm_lun);
1517 
1518 	err = mps_map_command(sc, tm);
1519 	if (err)
1520 		mps_dprint(sc, MPS_ERROR|MPS_RECOVERY,
1521 		    "error %d sending abort for cm %p SMID %u\n",
1522 		    err, cm, req->TaskMID);
1523 	return err;
1524 }
1525 
1526 static void
mpssas_scsiio_timeout(void * data)1527 mpssas_scsiio_timeout(void *data)
1528 {
1529 	sbintime_t elapsed, now;
1530 	union ccb *ccb;
1531 	struct mps_softc *sc;
1532 	struct mps_command *cm;
1533 	struct mpssas_target *targ;
1534 
1535 	cm = (struct mps_command *)data;
1536 	sc = cm->cm_sc;
1537 	ccb = cm->cm_ccb;
1538 	now = sbinuptime();
1539 
1540 	MPS_FUNCTRACE(sc);
1541 	mtx_assert(&sc->mps_mtx, MA_OWNED);
1542 
1543 	mps_dprint(sc, MPS_XINFO|MPS_RECOVERY, "Timeout checking cm %p\n", cm);
1544 
1545 	/*
1546 	 * Run the interrupt handler to make sure it's not pending.  This
1547 	 * isn't perfect because the command could have already completed
1548 	 * and been re-used, though this is unlikely.
1549 	 */
1550 	mps_intr_locked(sc);
1551 	if (cm->cm_flags & MPS_CM_FLAGS_ON_RECOVERY) {
1552 		mpssas_log_command(cm, MPS_XINFO,
1553 		    "SCSI command %p almost timed out\n", cm);
1554 		return;
1555 	}
1556 
1557 	if (cm->cm_ccb == NULL) {
1558 		mps_dprint(sc, MPS_ERROR, "command timeout with NULL ccb\n");
1559 		return;
1560 	}
1561 
1562 	targ = cm->cm_targ;
1563 	targ->timeouts++;
1564 
1565 	elapsed = now - ccb->ccb_h.qos.sim_data;
1566 	mpssas_log_command(cm, MPS_INFO|MPS_RECOVERY,
1567 	    "Command timeout on target %u(0x%04x) %d set, %d.%d elapsed\n",
1568 	    targ->tid, targ->handle, ccb->ccb_h.timeout,
1569 	    sbintime_getsec(elapsed), elapsed & 0xffffffff);
1570 
1571 	/* XXX first, check the firmware state, to see if it's still
1572 	 * operational.  if not, do a diag reset.
1573 	 */
1574 	mpssas_set_ccbstatus(cm->cm_ccb, CAM_CMD_TIMEOUT);
1575 	cm->cm_flags |= MPS_CM_FLAGS_ON_RECOVERY | MPS_CM_FLAGS_TIMEDOUT;
1576 	TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1577 
1578 	if (targ->tm != NULL) {
1579 		/* target already in recovery, just queue up another
1580 		 * timedout command to be processed later.
1581 		 */
1582 		mps_dprint(sc, MPS_RECOVERY,
1583 		    "queued timedout cm %p for processing by tm %p\n",
1584 		    cm, targ->tm);
1585 	} else if ((targ->tm = mpssas_alloc_tm(sc)) != NULL) {
1586 		mps_dprint(sc, MPS_RECOVERY|MPS_INFO,
1587 		    "Sending abort to target %u for SMID %d\n", targ->tid,
1588 		    cm->cm_desc.Default.SMID);
1589 		mps_dprint(sc, MPS_RECOVERY, "timedout cm %p allocated tm %p\n",
1590 		    cm, targ->tm);
1591 
1592 		/* start recovery by aborting the first timedout command */
1593 		mpssas_send_abort(sc, targ->tm, cm);
1594 	} else {
1595 		/* XXX queue this target up for recovery once a TM becomes
1596 		 * available.  The firmware only has a limited number of
1597 		 * HighPriority credits for the high priority requests used
1598 		 * for task management, and we ran out.
1599 		 *
1600 		 * Isilon: don't worry about this for now, since we have
1601 		 * more credits than disks in an enclosure, and limit
1602 		 * ourselves to one TM per target for recovery.
1603 		 */
1604 		mps_dprint(sc, MPS_ERROR|MPS_RECOVERY,
1605 		    "timedout cm %p failed to allocate a tm\n", cm);
1606 	}
1607 
1608 }
1609 
1610 static void
mpssas_action_scsiio(struct mpssas_softc * sassc,union ccb * ccb)1611 mpssas_action_scsiio(struct mpssas_softc *sassc, union ccb *ccb)
1612 {
1613 	MPI2_SCSI_IO_REQUEST *req;
1614 	struct ccb_scsiio *csio;
1615 	struct mps_softc *sc;
1616 	struct mpssas_target *targ;
1617 	struct mpssas_lun *lun;
1618 	struct mps_command *cm;
1619 	uint8_t i, lba_byte, *ref_tag_addr;
1620 	uint16_t eedp_flags;
1621 	uint32_t mpi_control;
1622 
1623 	sc = sassc->sc;
1624 	MPS_FUNCTRACE(sc);
1625 	mtx_assert(&sc->mps_mtx, MA_OWNED);
1626 
1627 	csio = &ccb->csio;
1628 	KASSERT(csio->ccb_h.target_id < sassc->maxtargets,
1629 	    ("Target %d out of bounds in XPT_SCSI_IO\n",
1630 	     csio->ccb_h.target_id));
1631 	targ = &sassc->targets[csio->ccb_h.target_id];
1632 	mps_dprint(sc, MPS_TRACE, "ccb %p target flag %x\n", ccb, targ->flags);
1633 	if (targ->handle == 0x0) {
1634 		if (targ->flags & MPSSAS_TARGET_INDIAGRESET) {
1635 			mps_dprint(sc, MPS_ERROR,
1636 			    "%s NULL handle for target %u in diag reset freezing queue\n",
1637 			    __func__, csio->ccb_h.target_id);
1638 			ccb->ccb_h.status = CAM_REQUEUE_REQ | CAM_DEV_QFRZN;
1639 			xpt_freeze_devq(ccb->ccb_h.path, 1);
1640 			xpt_done(ccb);
1641 			return;
1642 		}
1643 		mps_dprint(sc, MPS_ERROR, "%s NULL handle for target %u\n",
1644 		    __func__, csio->ccb_h.target_id);
1645 		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1646 		xpt_done(ccb);
1647 		return;
1648 	}
1649 	if (targ->flags & MPS_TARGET_FLAGS_RAID_COMPONENT) {
1650 		mps_dprint(sc, MPS_ERROR, "%s Raid component no SCSI IO "
1651 		    "supported %u\n", __func__, csio->ccb_h.target_id);
1652 		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1653 		xpt_done(ccb);
1654 		return;
1655 	}
1656 	/*
1657 	 * Sometimes, it is possible to get a command that is not "In
1658 	 * Progress" and was actually aborted by the upper layer.  Check for
1659 	 * this here and complete the command without error.
1660 	 */
1661 	if (mpssas_get_ccbstatus(ccb) != CAM_REQ_INPROG) {
1662 		mps_dprint(sc, MPS_TRACE, "%s Command is not in progress for "
1663 		    "target %u\n", __func__, csio->ccb_h.target_id);
1664 		xpt_done(ccb);
1665 		return;
1666 	}
1667 	/*
1668 	 * If devinfo is 0 this will be a volume.  In that case don't tell CAM
1669 	 * that the volume has timed out.  We want volumes to be enumerated
1670 	 * until they are deleted/removed, not just failed. In either event,
1671 	 * we're removing the target due to a firmware event telling us
1672 	 * the device is now gone (as opposed to some transient event). Since
1673 	 * we're opting to remove failed devices from the OS's view, we need
1674 	 * to propagate that status up the stack.
1675 	 */
1676 	if (targ->flags & MPSSAS_TARGET_INREMOVAL) {
1677 		if (targ->devinfo == 0)
1678 			mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1679 		else
1680 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1681 		xpt_done(ccb);
1682 		return;
1683 	}
1684 
1685 	if ((sc->mps_flags & MPS_FLAGS_SHUTDOWN) != 0) {
1686 		mps_dprint(sc, MPS_INFO, "%s shutting down\n", __func__);
1687 		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1688 		xpt_done(ccb);
1689 		return;
1690 	}
1691 
1692 	/*
1693 	 * If target has a reset in progress, the devq should be frozen.
1694 	 * Geting here we likely hit a race, so just requeue.
1695 	 */
1696 	if (targ->flags & MPSSAS_TARGET_INRESET) {
1697 		ccb->ccb_h.status = CAM_REQUEUE_REQ | CAM_DEV_QFRZN;
1698 		mps_dprint(sc, MPS_INFO, "%s: Freezing devq for target ID %d\n",
1699 		    __func__, targ->tid);
1700 		xpt_freeze_devq(ccb->ccb_h.path, 1);
1701 		xpt_done(ccb);
1702 		return;
1703 	}
1704 
1705 	cm = mps_alloc_command(sc);
1706 	if (cm == NULL || (sc->mps_flags & MPS_FLAGS_DIAGRESET)) {
1707 		if (cm != NULL) {
1708 			mps_free_command(sc, cm);
1709 		}
1710 		if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
1711 			xpt_freeze_simq(sassc->sim, 1);
1712 			sassc->flags |= MPSSAS_QUEUE_FROZEN;
1713 		}
1714 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1715 		ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1716 		xpt_done(ccb);
1717 		return;
1718 	}
1719 
1720 	req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
1721 	bzero(req, sizeof(*req));
1722 	req->DevHandle = htole16(targ->handle);
1723 	req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1724 	req->MsgFlags = 0;
1725 	req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr);
1726 	req->SenseBufferLength = MPS_SENSE_LEN;
1727 	req->SGLFlags = 0;
1728 	req->ChainOffset = 0;
1729 	req->SGLOffset0 = 24;	/* 32bit word offset to the SGL */
1730 	req->SGLOffset1= 0;
1731 	req->SGLOffset2= 0;
1732 	req->SGLOffset3= 0;
1733 	req->SkipCount = 0;
1734 	req->DataLength = htole32(csio->dxfer_len);
1735 	req->BidirectionalDataLength = 0;
1736 	req->IoFlags = htole16(csio->cdb_len);
1737 	req->EEDPFlags = 0;
1738 
1739 	/* Note: BiDirectional transfers are not supported */
1740 	switch (csio->ccb_h.flags & CAM_DIR_MASK) {
1741 	case CAM_DIR_IN:
1742 		mpi_control = MPI2_SCSIIO_CONTROL_READ;
1743 		cm->cm_flags |= MPS_CM_FLAGS_DATAIN;
1744 		break;
1745 	case CAM_DIR_OUT:
1746 		mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
1747 		cm->cm_flags |= MPS_CM_FLAGS_DATAOUT;
1748 		break;
1749 	case CAM_DIR_NONE:
1750 	default:
1751 		mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
1752 		break;
1753 	}
1754 
1755 	if (csio->cdb_len == 32)
1756                 mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
1757 	/*
1758 	 * It looks like the hardware doesn't require an explicit tag
1759 	 * number for each transaction.  SAM Task Management not supported
1760 	 * at the moment.
1761 	 */
1762 	switch (csio->tag_action) {
1763 	case MSG_HEAD_OF_Q_TAG:
1764 		mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
1765 		break;
1766 	case MSG_ORDERED_Q_TAG:
1767 		mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
1768 		break;
1769 	case MSG_ACA_TASK:
1770 		mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ;
1771 		break;
1772 	case CAM_TAG_ACTION_NONE:
1773 	case MSG_SIMPLE_Q_TAG:
1774 	default:
1775 		mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
1776 		break;
1777 	}
1778 	mpi_control |= (csio->priority << MPI2_SCSIIO_CONTROL_TASKPRI_SHIFT) &
1779 	    MPI2_SCSIIO_CONTROL_TASKPRI_MASK;
1780 	mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
1781 	req->Control = htole32(mpi_control);
1782 	if (MPS_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
1783 		mps_free_command(sc, cm);
1784 		mpssas_set_ccbstatus(ccb, CAM_LUN_INVALID);
1785 		xpt_done(ccb);
1786 		return;
1787 	}
1788 
1789 	if (csio->ccb_h.flags & CAM_CDB_POINTER)
1790 		bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
1791 	else
1792 		bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
1793 	req->IoFlags = htole16(csio->cdb_len);
1794 
1795 	/*
1796 	 * Check if EEDP is supported and enabled.  If it is then check if the
1797 	 * SCSI opcode could be using EEDP.  If so, make sure the LUN exists and
1798 	 * is formatted for EEDP support.  If all of this is true, set CDB up
1799 	 * for EEDP transfer.
1800 	 */
1801 	eedp_flags = op_code_prot[req->CDB.CDB32[0]];
1802 	if (sc->eedp_enabled && eedp_flags) {
1803 		SLIST_FOREACH(lun, &targ->luns, lun_link) {
1804 			if (lun->lun_id == csio->ccb_h.target_lun) {
1805 				break;
1806 			}
1807 		}
1808 
1809 		if ((lun != NULL) && (lun->eedp_formatted)) {
1810 			req->EEDPBlockSize = htole16(lun->eedp_block_size);
1811 			eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
1812 			    MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
1813 			    MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
1814 			req->EEDPFlags = htole16(eedp_flags);
1815 
1816 			/*
1817 			 * If CDB less than 32, fill in Primary Ref Tag with
1818 			 * low 4 bytes of LBA.  If CDB is 32, tag stuff is
1819 			 * already there.  Also, set protection bit.  FreeBSD
1820 			 * currently does not support CDBs bigger than 16, but
1821 			 * the code doesn't hurt, and will be here for the
1822 			 * future.
1823 			 */
1824 			if (csio->cdb_len != 32) {
1825 				lba_byte = (csio->cdb_len == 16) ? 6 : 2;
1826 				ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
1827 				    PrimaryReferenceTag;
1828 				for (i = 0; i < 4; i++) {
1829 					*ref_tag_addr =
1830 					    req->CDB.CDB32[lba_byte + i];
1831 					ref_tag_addr++;
1832 				}
1833 				req->CDB.EEDP32.PrimaryReferenceTag =
1834 					htole32(req->CDB.EEDP32.PrimaryReferenceTag);
1835 				req->CDB.EEDP32.PrimaryApplicationTagMask =
1836 				    0xFFFF;
1837 				req->CDB.CDB32[1] = (req->CDB.CDB32[1] & 0x1F) |
1838 				    0x20;
1839 			} else {
1840 				eedp_flags |=
1841 				    MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
1842 				req->EEDPFlags = htole16(eedp_flags);
1843 				req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
1844 				    0x1F) | 0x20;
1845 			}
1846 		}
1847 	}
1848 
1849 	cm->cm_length = csio->dxfer_len;
1850 	if (cm->cm_length != 0) {
1851 		cm->cm_data = ccb;
1852 		cm->cm_flags |= MPS_CM_FLAGS_USE_CCB;
1853 	} else {
1854 		cm->cm_data = NULL;
1855 	}
1856 	cm->cm_sge = &req->SGL;
1857 	cm->cm_sglsize = (32 - 24) * 4;
1858 	cm->cm_desc.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
1859 	cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle);
1860 	cm->cm_complete = mpssas_scsiio_complete;
1861 	cm->cm_complete_data = ccb;
1862 	cm->cm_targ = targ;
1863 	cm->cm_lun = csio->ccb_h.target_lun;
1864 	cm->cm_ccb = ccb;
1865 
1866 	/*
1867 	 * If HBA is a WD and the command is not for a retry, try to build a
1868 	 * direct I/O message. If failed, or the command is for a retry, send
1869 	 * the I/O to the IR volume itself.
1870 	 */
1871 	if (sc->WD_valid_config) {
1872 		if (ccb->ccb_h.sim_priv.entries[0].field == MPS_WD_RETRY) {
1873 			mpssas_direct_drive_io(sassc, cm, ccb);
1874 		} else {
1875 			mpssas_set_ccbstatus(ccb, CAM_REQ_INPROG);
1876 		}
1877 	}
1878 
1879 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
1880 	if (csio->bio != NULL)
1881 		biotrack(csio->bio, __func__);
1882 #endif
1883 	csio->ccb_h.qos.sim_data = sbinuptime();
1884 	callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
1885 	    mpssas_scsiio_timeout, cm, 0);
1886 
1887 	targ->issued++;
1888 	targ->outstanding++;
1889 	TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1890 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
1891 
1892 	mpssas_log_command(cm, MPS_XINFO, "%s cm %p ccb %p outstanding %u\n",
1893 	    __func__, cm, ccb, targ->outstanding);
1894 
1895 	mps_map_command(sc, cm);
1896 	return;
1897 }
1898 
1899 /**
1900  * mps_sc_failed_io_info - translated non-succesfull SCSI_IO request
1901  */
1902 static void
mps_sc_failed_io_info(struct mps_softc * sc,struct ccb_scsiio * csio,Mpi2SCSIIOReply_t * mpi_reply)1903 mps_sc_failed_io_info(struct mps_softc *sc, struct ccb_scsiio *csio,
1904     Mpi2SCSIIOReply_t *mpi_reply)
1905 {
1906 	u32 response_info;
1907 	u8 *response_bytes;
1908 	u16 ioc_status = le16toh(mpi_reply->IOCStatus) &
1909 	    MPI2_IOCSTATUS_MASK;
1910 	u8 scsi_state = mpi_reply->SCSIState;
1911 	u8 scsi_status = mpi_reply->SCSIStatus;
1912 	u32 log_info = le32toh(mpi_reply->IOCLogInfo);
1913 	const char *desc_ioc_state, *desc_scsi_status;
1914 
1915 	if (log_info == 0x31170000)
1916 		return;
1917 
1918 	desc_ioc_state = mps_describe_table(mps_iocstatus_string,
1919 	    ioc_status);
1920 	desc_scsi_status = mps_describe_table(mps_scsi_status_string,
1921 	    scsi_status);
1922 
1923 	mps_dprint(sc, MPS_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n",
1924 	    le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status);
1925 
1926 	/*
1927 	 *We can add more detail about underflow data here
1928 	 * TO-DO
1929 	 */
1930 	mps_dprint(sc, MPS_XINFO, "\tscsi_status(%s)(0x%02x), "
1931 	    "scsi_state %b\n", desc_scsi_status, scsi_status,
1932 	    scsi_state, "\20" "\1AutosenseValid" "\2AutosenseFailed"
1933 	    "\3NoScsiStatus" "\4Terminated" "\5Response InfoValid");
1934 
1935 	if (sc->mps_debug & MPS_XINFO &&
1936 		scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
1937 		mps_dprint(sc, MPS_XINFO, "-> Sense Buffer Data : Start :\n");
1938 		scsi_sense_print(csio);
1939 		mps_dprint(sc, MPS_XINFO, "-> Sense Buffer Data : End :\n");
1940 	}
1941 
1942 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
1943 		response_info = le32toh(mpi_reply->ResponseInfo);
1944 		response_bytes = (u8 *)&response_info;
1945 		mps_dprint(sc, MPS_XINFO, "response code(0x%1x): %s\n",
1946 		    response_bytes[0],
1947 		    mps_describe_table(mps_scsi_taskmgmt_string,
1948 		    response_bytes[0]));
1949 	}
1950 }
1951 
1952 static void
mpssas_scsiio_complete(struct mps_softc * sc,struct mps_command * cm)1953 mpssas_scsiio_complete(struct mps_softc *sc, struct mps_command *cm)
1954 {
1955 	MPI2_SCSI_IO_REPLY *rep;
1956 	union ccb *ccb;
1957 	struct ccb_scsiio *csio;
1958 	struct mpssas_softc *sassc;
1959 	struct scsi_vpd_supported_page_list *vpd_list = NULL;
1960 	u8 *TLR_bits, TLR_on;
1961 	int dir = 0, i;
1962 	u16 alloc_len;
1963 	struct mpssas_target *target;
1964 	target_id_t target_id;
1965 
1966 	MPS_FUNCTRACE(sc);
1967 	mps_dprint(sc, MPS_TRACE,
1968 	    "cm %p SMID %u ccb %p reply %p outstanding %u\n", cm,
1969 	    cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
1970 	    cm->cm_targ->outstanding);
1971 
1972 	callout_stop(&cm->cm_callout);
1973 	mtx_assert(&sc->mps_mtx, MA_OWNED);
1974 
1975 	sassc = sc->sassc;
1976 	ccb = cm->cm_complete_data;
1977 	csio = &ccb->csio;
1978 	target_id = csio->ccb_h.target_id;
1979 	rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
1980 	/*
1981 	 * XXX KDM if the chain allocation fails, does it matter if we do
1982 	 * the sync and unload here?  It is simpler to do it in every case,
1983 	 * assuming it doesn't cause problems.
1984 	 */
1985 	if (cm->cm_data != NULL) {
1986 		if (cm->cm_flags & MPS_CM_FLAGS_DATAIN)
1987 			dir = BUS_DMASYNC_POSTREAD;
1988 		else if (cm->cm_flags & MPS_CM_FLAGS_DATAOUT)
1989 			dir = BUS_DMASYNC_POSTWRITE;
1990 		bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
1991 		bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
1992 	}
1993 
1994 	cm->cm_targ->completed++;
1995 	cm->cm_targ->outstanding--;
1996 	TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
1997 	ccb->ccb_h.status &= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED);
1998 
1999 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
2000 	if (ccb->csio.bio != NULL)
2001 		biotrack(ccb->csio.bio, __func__);
2002 #endif
2003 
2004 	if (cm->cm_flags & MPS_CM_FLAGS_ON_RECOVERY) {
2005 		TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
2006 		KASSERT(cm->cm_state == MPS_CM_STATE_BUSY,
2007 		    ("Not busy for CM_FLAGS_TIMEDOUT: %u\n", cm->cm_state));
2008 		cm->cm_flags &= ~MPS_CM_FLAGS_ON_RECOVERY;
2009 		if (cm->cm_reply != NULL)
2010 			mpssas_log_command(cm, MPS_RECOVERY,
2011 			    "completed timedout cm %p ccb %p during recovery "
2012 			    "ioc %x scsi %x state %x xfer %u\n",
2013 			    cm, cm->cm_ccb, le16toh(rep->IOCStatus),
2014 			    rep->SCSIStatus, rep->SCSIState,
2015 			    le32toh(rep->TransferCount));
2016 		else
2017 			mpssas_log_command(cm, MPS_RECOVERY,
2018 			    "completed timedout cm %p ccb %p during recovery\n",
2019 			    cm, cm->cm_ccb);
2020 	} else if (cm->cm_targ->tm != NULL) {
2021 		if (cm->cm_reply != NULL)
2022 			mpssas_log_command(cm, MPS_RECOVERY,
2023 			    "completed cm %p ccb %p during recovery "
2024 			    "ioc %x scsi %x state %x xfer %u\n",
2025 			    cm, cm->cm_ccb, le16toh(rep->IOCStatus),
2026 			    rep->SCSIStatus, rep->SCSIState,
2027 			    le32toh(rep->TransferCount));
2028 		else
2029 			mpssas_log_command(cm, MPS_RECOVERY,
2030 			    "completed cm %p ccb %p during recovery\n",
2031 			    cm, cm->cm_ccb);
2032 	} else if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
2033 		mpssas_log_command(cm, MPS_RECOVERY,
2034 		    "reset completed cm %p ccb %p\n",
2035 		    cm, cm->cm_ccb);
2036 	}
2037 
2038 	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2039 		/*
2040 		 * We ran into an error after we tried to map the command,
2041 		 * so we're getting a callback without queueing the command
2042 		 * to the hardware.  So we set the status here, and it will
2043 		 * be retained below.  We'll go through the "fast path",
2044 		 * because there can be no reply when we haven't actually
2045 		 * gone out to the hardware.
2046 		 */
2047 		mpssas_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
2048 
2049 		/*
2050 		 * Currently the only error included in the mask is
2051 		 * MPS_CM_FLAGS_CHAIN_FAILED, which means we're out of
2052 		 * chain frames.  We need to freeze the queue until we get
2053 		 * a command that completed without this error, which will
2054 		 * hopefully have some chain frames attached that we can
2055 		 * use.  If we wanted to get smarter about it, we would
2056 		 * only unfreeze the queue in this condition when we're
2057 		 * sure that we're getting some chain frames back.  That's
2058 		 * probably unnecessary.
2059 		 */
2060 		if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
2061 			xpt_freeze_simq(sassc->sim, 1);
2062 			sassc->flags |= MPSSAS_QUEUE_FROZEN;
2063 			mps_dprint(sc, MPS_XINFO, "Error sending command, "
2064 				   "freezing SIM queue\n");
2065 		}
2066 	}
2067 
2068 	/*
2069 	 * If this is a Start Stop Unit command and it was issued by the driver
2070 	 * during shutdown, decrement the refcount to account for all of the
2071 	 * commands that were sent.  All SSU commands should be completed before
2072 	 * shutdown completes, meaning SSU_refcount will be 0 after SSU_started
2073 	 * is TRUE.
2074 	 */
2075 	if (sc->SSU_started && (csio->cdb_io.cdb_bytes[0] == START_STOP_UNIT)) {
2076 		mps_dprint(sc, MPS_INFO, "Decrementing SSU count.\n");
2077 		sc->SSU_refcount--;
2078 	}
2079 
2080 	/* Take the fast path to completion */
2081 	if (cm->cm_reply == NULL) {
2082 		if (mpssas_get_ccbstatus(ccb) == CAM_REQ_INPROG) {
2083 			if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0)
2084 				mpssas_set_ccbstatus(ccb, CAM_SCSI_BUS_RESET);
2085 			else {
2086 				mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2087 				ccb->csio.scsi_status = SCSI_STATUS_OK;
2088 			}
2089 			if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2090 				ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2091 				sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2092 				mps_dprint(sc, MPS_XINFO,
2093 				    "Unfreezing SIM queue\n");
2094 			}
2095 		}
2096 
2097 		/*
2098 		 * There are two scenarios where the status won't be
2099 		 * CAM_REQ_CMP.  The first is if MPS_CM_FLAGS_ERROR_MASK is
2100 		 * set, the second is in the MPS_FLAGS_DIAGRESET above.
2101 		 */
2102 		if (mpssas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2103 			/*
2104 			 * Freeze the dev queue so that commands are
2105 			 * executed in the correct order after error
2106 			 * recovery.
2107 			 */
2108 			ccb->ccb_h.status |= CAM_DEV_QFRZN;
2109 			xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2110 		}
2111 		mps_free_command(sc, cm);
2112 		xpt_done(ccb);
2113 		return;
2114 	}
2115 
2116 	mpssas_log_command(cm, MPS_XINFO,
2117 	    "ioc %x scsi %x state %x xfer %u\n",
2118 	    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2119 	    le32toh(rep->TransferCount));
2120 
2121 	/*
2122 	 * If this is a Direct Drive I/O, reissue the I/O to the original IR
2123 	 * Volume if an error occurred (normal I/O retry).  Use the original
2124 	 * CCB, but set a flag that this will be a retry so that it's sent to
2125 	 * the original volume.  Free the command but reuse the CCB.
2126 	 */
2127 	if (cm->cm_flags & MPS_CM_FLAGS_DD_IO) {
2128 		mps_free_command(sc, cm);
2129 		ccb->ccb_h.sim_priv.entries[0].field = MPS_WD_RETRY;
2130 		mpssas_action_scsiio(sassc, ccb);
2131 		return;
2132 	} else
2133 		ccb->ccb_h.sim_priv.entries[0].field = 0;
2134 
2135 	switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) {
2136 	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2137 		csio->resid = cm->cm_length - le32toh(rep->TransferCount);
2138 		/* FALLTHROUGH */
2139 	case MPI2_IOCSTATUS_SUCCESS:
2140 	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2141 
2142 		if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
2143 		    MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
2144 			mpssas_log_command(cm, MPS_XINFO, "recovered error\n");
2145 
2146 		/* Completion failed at the transport level. */
2147 		if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
2148 		    MPI2_SCSI_STATE_TERMINATED)) {
2149 			mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2150 			break;
2151 		}
2152 
2153 		/* In a modern packetized environment, an autosense failure
2154 		 * implies that there's not much else that can be done to
2155 		 * recover the command.
2156 		 */
2157 		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
2158 			mpssas_set_ccbstatus(ccb, CAM_AUTOSENSE_FAIL);
2159 			break;
2160 		}
2161 
2162 		/*
2163 		 * CAM doesn't care about SAS Response Info data, but if this is
2164 		 * the state check if TLR should be done.  If not, clear the
2165 		 * TLR_bits for the target.
2166 		 */
2167 		if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
2168 		    ((le32toh(rep->ResponseInfo) &
2169 		    MPI2_SCSI_RI_MASK_REASONCODE) ==
2170 		    MPS_SCSI_RI_INVALID_FRAME)) {
2171 			sc->mapping_table[target_id].TLR_bits =
2172 			    (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2173 		}
2174 
2175 		/*
2176 		 * Intentionally override the normal SCSI status reporting
2177 		 * for these two cases.  These are likely to happen in a
2178 		 * multi-initiator environment, and we want to make sure that
2179 		 * CAM retries these commands rather than fail them.
2180 		 */
2181 		if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
2182 		    (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
2183 			mpssas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2184 			break;
2185 		}
2186 
2187 		/* Handle normal status and sense */
2188 		csio->scsi_status = rep->SCSIStatus;
2189 		if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
2190 			mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2191 		else
2192 			mpssas_set_ccbstatus(ccb, CAM_SCSI_STATUS_ERROR);
2193 
2194 		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2195 			int sense_len, returned_sense_len;
2196 
2197 			returned_sense_len = min(le32toh(rep->SenseCount),
2198 			    sizeof(struct scsi_sense_data));
2199 			if (returned_sense_len < ccb->csio.sense_len)
2200 				ccb->csio.sense_resid = ccb->csio.sense_len -
2201 					returned_sense_len;
2202 			else
2203 				ccb->csio.sense_resid = 0;
2204 
2205 			sense_len = min(returned_sense_len,
2206 			    ccb->csio.sense_len - ccb->csio.sense_resid);
2207 			bzero(&ccb->csio.sense_data,
2208 			      sizeof(ccb->csio.sense_data));
2209 			bcopy(cm->cm_sense, &ccb->csio.sense_data, sense_len);
2210 			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2211 		}
2212 
2213 		/*
2214 		 * Check if this is an INQUIRY command.  If it's a VPD inquiry,
2215 		 * and it's page code 0 (Supported Page List), and there is
2216 		 * inquiry data, and this is for a sequential access device, and
2217 		 * the device is an SSP target, and TLR is supported by the
2218 		 * controller, turn the TLR_bits value ON if page 0x90 is
2219 		 * supported.
2220 		 */
2221 		if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
2222 		    (csio->cdb_io.cdb_bytes[1] & SI_EVPD) &&
2223 		    (csio->cdb_io.cdb_bytes[2] == SVPD_SUPPORTED_PAGE_LIST) &&
2224 		    ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) &&
2225 		    (csio->data_ptr != NULL) &&
2226 		    ((csio->data_ptr[0] & 0x1f) == T_SEQUENTIAL) &&
2227 		    (sc->control_TLR) &&
2228 		    (sc->mapping_table[target_id].device_info &
2229 		    MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
2230 			vpd_list = (struct scsi_vpd_supported_page_list *)
2231 			    csio->data_ptr;
2232 			TLR_bits = &sc->mapping_table[target_id].TLR_bits;
2233 			*TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2234 			TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
2235 			alloc_len = ((u16)csio->cdb_io.cdb_bytes[3] << 8) +
2236 			    csio->cdb_io.cdb_bytes[4];
2237 			alloc_len -= csio->resid;
2238 			for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
2239 				if (vpd_list->list[i] == 0x90) {
2240 					*TLR_bits = TLR_on;
2241 					break;
2242 				}
2243 			}
2244 		}
2245 
2246 		/*
2247 		 * If this is a SATA direct-access end device, mark it so that
2248 		 * a SCSI StartStopUnit command will be sent to it when the
2249 		 * driver is being shutdown.
2250 		 */
2251 		if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
2252 		    ((csio->data_ptr[0] & 0x1f) == T_DIRECT) &&
2253 		    (sc->mapping_table[target_id].device_info &
2254 		    MPI2_SAS_DEVICE_INFO_SATA_DEVICE) &&
2255 		    ((sc->mapping_table[target_id].device_info &
2256 		    MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) ==
2257 		    MPI2_SAS_DEVICE_INFO_END_DEVICE)) {
2258 			target = &sassc->targets[target_id];
2259 			target->supports_SSU = TRUE;
2260 			mps_dprint(sc, MPS_XINFO, "Target %d supports SSU\n",
2261 			    target_id);
2262 		}
2263 		break;
2264 	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2265 	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2266 		/*
2267 		 * If devinfo is 0 this will be a volume.  In that case don't
2268 		 * tell CAM that the volume is not there.  We want volumes to
2269 		 * be enumerated until they are deleted/removed, not just
2270 		 * failed.
2271 		 */
2272 		if (cm->cm_targ->devinfo == 0)
2273 			mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2274 		else
2275 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2276 		break;
2277 	case MPI2_IOCSTATUS_INVALID_SGL:
2278 		mps_print_scsiio_cmd(sc, cm);
2279 		mpssas_set_ccbstatus(ccb, CAM_UNREC_HBA_ERROR);
2280 		break;
2281 	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2282 		/*
2283 		 * This is one of the responses that comes back when an I/O
2284 		 * has been aborted.  If it is because of a timeout that we
2285 		 * initiated, just set the status to CAM_CMD_TIMEOUT.
2286 		 * Otherwise set it to CAM_REQ_ABORTED.  The effect on the
2287 		 * command is the same (it gets retried, subject to the
2288 		 * retry counter), the only difference is what gets printed
2289 		 * on the console.
2290 		 */
2291 		if (cm->cm_flags & MPS_CM_FLAGS_TIMEDOUT)
2292 			mpssas_set_ccbstatus(ccb, CAM_CMD_TIMEOUT);
2293 		else
2294 			mpssas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2295 		break;
2296 	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2297 		/* resid is ignored for this condition */
2298 		csio->resid = 0;
2299 		mpssas_set_ccbstatus(ccb, CAM_DATA_RUN_ERR);
2300 		break;
2301 	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2302 	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2303 		/*
2304 		 * These can sometimes be transient transport-related
2305 		 * errors, and sometimes persistent drive-related errors.
2306 		 * We used to retry these without decrementing the retry
2307 		 * count by returning CAM_REQUEUE_REQ.  Unfortunately, if
2308 		 * we hit a persistent drive problem that returns one of
2309 		 * these error codes, we would retry indefinitely.  So,
2310 		 * return CAM_REQ_CMP_ERROR so that we decrement the retry
2311 		 * count and avoid infinite retries.  We're taking the
2312 		 * potential risk of flagging false failures in the event
2313 		 * of a topology-related error (e.g. a SAS expander problem
2314 		 * causes a command addressed to a drive to fail), but
2315 		 * avoiding getting into an infinite retry loop. However,
2316 		 * if we get them while were moving a device, we should
2317 		 * fail the request as 'not there' because the device
2318 		 * is effectively gone.
2319 		 */
2320 		if (cm->cm_targ->flags & MPSSAS_TARGET_INREMOVAL)
2321 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2322 		else
2323 			mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2324 		mps_dprint(sc, MPS_INFO,
2325 		    "Controller reported %s tgt %u SMID %u loginfo %x%s\n",
2326 		    mps_describe_table(mps_iocstatus_string,
2327 		    le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK),
2328 		    target_id, cm->cm_desc.Default.SMID,
2329 		    le32toh(rep->IOCLogInfo),
2330 		    (cm->cm_targ->flags & MPSSAS_TARGET_INREMOVAL) ? " departing" : "");
2331 		mps_dprint(sc, MPS_XINFO,
2332 		    "SCSIStatus %x SCSIState %x xfercount %u\n",
2333 		    rep->SCSIStatus, rep->SCSIState,
2334 		    le32toh(rep->TransferCount));
2335 		break;
2336 	case MPI2_IOCSTATUS_INVALID_FUNCTION:
2337 	case MPI2_IOCSTATUS_INTERNAL_ERROR:
2338 	case MPI2_IOCSTATUS_INVALID_VPID:
2339 	case MPI2_IOCSTATUS_INVALID_FIELD:
2340 	case MPI2_IOCSTATUS_INVALID_STATE:
2341 	case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2342 	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2343 	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2344 	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2345 	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2346 	default:
2347 		mpssas_log_command(cm, MPS_XINFO,
2348 		    "completed ioc %x loginfo %x scsi %x state %x xfer %u\n",
2349 		    le16toh(rep->IOCStatus), le32toh(rep->IOCLogInfo),
2350 		    rep->SCSIStatus, rep->SCSIState,
2351 		    le32toh(rep->TransferCount));
2352 		csio->resid = cm->cm_length;
2353 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2354 		break;
2355 	}
2356 
2357 	mps_sc_failed_io_info(sc,csio,rep);
2358 
2359 	if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2360 		ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2361 		sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2362 		mps_dprint(sc, MPS_XINFO, "Command completed, "
2363 		    "unfreezing SIM queue\n");
2364 	}
2365 
2366 	if (mpssas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2367 		ccb->ccb_h.status |= CAM_DEV_QFRZN;
2368 		xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2369 	}
2370 
2371 	/*
2372 	 * Check to see if we're removing the device. If so, and this is the
2373 	 * last command on the queue, proceed with the deferred removal of the
2374 	 * device.  Note, for removing a volume, this won't trigger because
2375 	 * pending_remove_tm will be NULL.
2376 	 */
2377 	if (cm->cm_targ->flags & MPSSAS_TARGET_INREMOVAL) {
2378 		if (TAILQ_FIRST(&cm->cm_targ->commands) == NULL &&
2379 		    cm->cm_targ->pending_remove_tm != NULL) {
2380 			mps_dprint(sc, MPS_INFO,
2381 			    "Last pending command complete: starting remove_device target %u handle 0x%04x\n",
2382 			    cm->cm_targ->tid, cm->cm_targ->handle);
2383 			mps_map_command(sc, cm->cm_targ->pending_remove_tm);
2384 			cm->cm_targ->pending_remove_tm = NULL;
2385 		}
2386 	}
2387 
2388 	mps_free_command(sc, cm);
2389 	xpt_done(ccb);
2390 }
2391 
2392 /* All Request reached here are Endian safe */
2393 static void
mpssas_direct_drive_io(struct mpssas_softc * sassc,struct mps_command * cm,union ccb * ccb)2394 mpssas_direct_drive_io(struct mpssas_softc *sassc, struct mps_command *cm,
2395     union ccb *ccb) {
2396 	pMpi2SCSIIORequest_t	pIO_req;
2397 	struct mps_softc	*sc = sassc->sc;
2398 	uint64_t		virtLBA;
2399 	uint32_t		physLBA, stripe_offset, stripe_unit;
2400 	uint32_t		io_size, column;
2401 	uint8_t			*ptrLBA, lba_idx, physLBA_byte, *CDB;
2402 
2403 	/*
2404 	 * If this is a valid SCSI command (Read6, Read10, Read16, Write6,
2405 	 * Write10, or Write16), build a direct I/O message.  Otherwise, the I/O
2406 	 * will be sent to the IR volume itself.  Since Read6 and Write6 are a
2407 	 * bit different than the 10/16 CDBs, handle them separately.
2408 	 */
2409 	pIO_req = (pMpi2SCSIIORequest_t)cm->cm_req;
2410 	CDB = pIO_req->CDB.CDB32;
2411 
2412 	/*
2413 	 * Handle 6 byte CDBs.
2414 	 */
2415 	if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_6) ||
2416 	    (CDB[0] == WRITE_6))) {
2417 		/*
2418 		 * Get the transfer size in blocks.
2419 		 */
2420 		io_size = (cm->cm_length >> sc->DD_block_exponent);
2421 
2422 		/*
2423 		 * Get virtual LBA given in the CDB.
2424 		 */
2425 		virtLBA = ((uint64_t)(CDB[1] & 0x1F) << 16) |
2426 		    ((uint64_t)CDB[2] << 8) | (uint64_t)CDB[3];
2427 
2428 		/*
2429 		 * Check that LBA range for I/O does not exceed volume's
2430 		 * MaxLBA.
2431 		 */
2432 		if ((virtLBA + (uint64_t)io_size - 1) <=
2433 		    sc->DD_max_lba) {
2434 			/*
2435 			 * Check if the I/O crosses a stripe boundary.  If not,
2436 			 * translate the virtual LBA to a physical LBA and set
2437 			 * the DevHandle for the PhysDisk to be used.  If it
2438 			 * does cross a boundary, do normal I/O.  To get the
2439 			 * right DevHandle to use, get the map number for the
2440 			 * column, then use that map number to look up the
2441 			 * DevHandle of the PhysDisk.
2442 			 */
2443 			stripe_offset = (uint32_t)virtLBA &
2444 			    (sc->DD_stripe_size - 1);
2445 			if ((stripe_offset + io_size) <= sc->DD_stripe_size) {
2446 				physLBA = (uint32_t)virtLBA >>
2447 				    sc->DD_stripe_exponent;
2448 				stripe_unit = physLBA / sc->DD_num_phys_disks;
2449 				column = physLBA % sc->DD_num_phys_disks;
2450 				pIO_req->DevHandle =
2451 				    htole16(sc->DD_column_map[column].dev_handle);
2452 				/* ???? Is this endian safe*/
2453 				cm->cm_desc.SCSIIO.DevHandle =
2454 				    pIO_req->DevHandle;
2455 
2456 				physLBA = (stripe_unit <<
2457 				    sc->DD_stripe_exponent) + stripe_offset;
2458 				ptrLBA = &pIO_req->CDB.CDB32[1];
2459 				physLBA_byte = (uint8_t)(physLBA >> 16);
2460 				*ptrLBA = physLBA_byte;
2461 				ptrLBA = &pIO_req->CDB.CDB32[2];
2462 				physLBA_byte = (uint8_t)(physLBA >> 8);
2463 				*ptrLBA = physLBA_byte;
2464 				ptrLBA = &pIO_req->CDB.CDB32[3];
2465 				physLBA_byte = (uint8_t)physLBA;
2466 				*ptrLBA = physLBA_byte;
2467 
2468 				/*
2469 				 * Set flag that Direct Drive I/O is
2470 				 * being done.
2471 				 */
2472 				cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2473 			}
2474 		}
2475 		return;
2476 	}
2477 
2478 	/*
2479 	 * Handle 10, 12 or 16 byte CDBs.
2480 	 */
2481 	if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_10) ||
2482 	    (CDB[0] == WRITE_10) || (CDB[0] == READ_16) ||
2483 	    (CDB[0] == WRITE_16) || (CDB[0] == READ_12) ||
2484 	    (CDB[0] == WRITE_12))) {
2485 		/*
2486 		 * For 16-byte CDB's, verify that the upper 4 bytes of the CDB
2487 		 * are 0.  If not, this is accessing beyond 2TB so handle it in
2488 		 * the else section.  10-byte and 12-byte CDB's are OK.
2489 		 * FreeBSD sends very rare 12 byte READ/WRITE, but driver is
2490 		 * ready to accept 12byte CDB for Direct IOs.
2491 		 */
2492 		if ((CDB[0] == READ_10 || CDB[0] == WRITE_10) ||
2493 		    (CDB[0] == READ_12 || CDB[0] == WRITE_12) ||
2494 		    !(CDB[2] | CDB[3] | CDB[4] | CDB[5])) {
2495 			/*
2496 			 * Get the transfer size in blocks.
2497 			 */
2498 			io_size = (cm->cm_length >> sc->DD_block_exponent);
2499 
2500 			/*
2501 			 * Get virtual LBA.  Point to correct lower 4 bytes of
2502 			 * LBA in the CDB depending on command.
2503 			 */
2504 			lba_idx = ((CDB[0] == READ_12) ||
2505 				(CDB[0] == WRITE_12) ||
2506 				(CDB[0] == READ_10) ||
2507 				(CDB[0] == WRITE_10))? 2 : 6;
2508 			virtLBA = ((uint64_t)CDB[lba_idx] << 24) |
2509 			    ((uint64_t)CDB[lba_idx + 1] << 16) |
2510 			    ((uint64_t)CDB[lba_idx + 2] << 8) |
2511 			    (uint64_t)CDB[lba_idx + 3];
2512 
2513 			/*
2514 			 * Check that LBA range for I/O does not exceed volume's
2515 			 * MaxLBA.
2516 			 */
2517 			if ((virtLBA + (uint64_t)io_size - 1) <=
2518 			    sc->DD_max_lba) {
2519 				/*
2520 				 * Check if the I/O crosses a stripe boundary.
2521 				 * If not, translate the virtual LBA to a
2522 				 * physical LBA and set the DevHandle for the
2523 				 * PhysDisk to be used.  If it does cross a
2524 				 * boundary, do normal I/O.  To get the right
2525 				 * DevHandle to use, get the map number for the
2526 				 * column, then use that map number to look up
2527 				 * the DevHandle of the PhysDisk.
2528 				 */
2529 				stripe_offset = (uint32_t)virtLBA &
2530 				    (sc->DD_stripe_size - 1);
2531 				if ((stripe_offset + io_size) <=
2532 				    sc->DD_stripe_size) {
2533 					physLBA = (uint32_t)virtLBA >>
2534 					    sc->DD_stripe_exponent;
2535 					stripe_unit = physLBA /
2536 					    sc->DD_num_phys_disks;
2537 					column = physLBA %
2538 					    sc->DD_num_phys_disks;
2539 					pIO_req->DevHandle =
2540 					    htole16(sc->DD_column_map[column].
2541 					    dev_handle);
2542 					cm->cm_desc.SCSIIO.DevHandle =
2543 					    pIO_req->DevHandle;
2544 
2545 					physLBA = (stripe_unit <<
2546 					    sc->DD_stripe_exponent) +
2547 					    stripe_offset;
2548 					ptrLBA =
2549 					    &pIO_req->CDB.CDB32[lba_idx];
2550 					physLBA_byte = (uint8_t)(physLBA >> 24);
2551 					*ptrLBA = physLBA_byte;
2552 					ptrLBA =
2553 					    &pIO_req->CDB.CDB32[lba_idx + 1];
2554 					physLBA_byte = (uint8_t)(physLBA >> 16);
2555 					*ptrLBA = physLBA_byte;
2556 					ptrLBA =
2557 					    &pIO_req->CDB.CDB32[lba_idx + 2];
2558 					physLBA_byte = (uint8_t)(physLBA >> 8);
2559 					*ptrLBA = physLBA_byte;
2560 					ptrLBA =
2561 					    &pIO_req->CDB.CDB32[lba_idx + 3];
2562 					physLBA_byte = (uint8_t)physLBA;
2563 					*ptrLBA = physLBA_byte;
2564 
2565 					/*
2566 					 * Set flag that Direct Drive I/O is
2567 					 * being done.
2568 					 */
2569 					cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2570 				}
2571 			}
2572 		} else {
2573 			/*
2574 			 * 16-byte CDB and the upper 4 bytes of the CDB are not
2575 			 * 0.  Get the transfer size in blocks.
2576 			 */
2577 			io_size = (cm->cm_length >> sc->DD_block_exponent);
2578 
2579 			/*
2580 			 * Get virtual LBA.
2581 			 */
2582 			virtLBA = ((uint64_t)CDB[2] << 54) |
2583 			    ((uint64_t)CDB[3] << 48) |
2584 			    ((uint64_t)CDB[4] << 40) |
2585 			    ((uint64_t)CDB[5] << 32) |
2586 			    ((uint64_t)CDB[6] << 24) |
2587 			    ((uint64_t)CDB[7] << 16) |
2588 			    ((uint64_t)CDB[8] << 8) |
2589 			    (uint64_t)CDB[9];
2590 
2591 			/*
2592 			 * Check that LBA range for I/O does not exceed volume's
2593 			 * MaxLBA.
2594 			 */
2595 			if ((virtLBA + (uint64_t)io_size - 1) <=
2596 			    sc->DD_max_lba) {
2597 				/*
2598 				 * Check if the I/O crosses a stripe boundary.
2599 				 * If not, translate the virtual LBA to a
2600 				 * physical LBA and set the DevHandle for the
2601 				 * PhysDisk to be used.  If it does cross a
2602 				 * boundary, do normal I/O.  To get the right
2603 				 * DevHandle to use, get the map number for the
2604 				 * column, then use that map number to look up
2605 				 * the DevHandle of the PhysDisk.
2606 				 */
2607 				stripe_offset = (uint32_t)virtLBA &
2608 				    (sc->DD_stripe_size - 1);
2609 				if ((stripe_offset + io_size) <=
2610 				    sc->DD_stripe_size) {
2611 					physLBA = (uint32_t)(virtLBA >>
2612 					    sc->DD_stripe_exponent);
2613 					stripe_unit = physLBA /
2614 					    sc->DD_num_phys_disks;
2615 					column = physLBA %
2616 					    sc->DD_num_phys_disks;
2617 					pIO_req->DevHandle =
2618 					    htole16(sc->DD_column_map[column].
2619 					    dev_handle);
2620 					cm->cm_desc.SCSIIO.DevHandle =
2621 					    pIO_req->DevHandle;
2622 
2623 					physLBA = (stripe_unit <<
2624 					    sc->DD_stripe_exponent) +
2625 					    stripe_offset;
2626 
2627 					/*
2628 					 * Set upper 4 bytes of LBA to 0.  We
2629 					 * assume that the phys disks are less
2630 					 * than 2 TB's in size.  Then, set the
2631 					 * lower 4 bytes.
2632 					 */
2633 					pIO_req->CDB.CDB32[2] = 0;
2634 					pIO_req->CDB.CDB32[3] = 0;
2635 					pIO_req->CDB.CDB32[4] = 0;
2636 					pIO_req->CDB.CDB32[5] = 0;
2637 					ptrLBA = &pIO_req->CDB.CDB32[6];
2638 					physLBA_byte = (uint8_t)(physLBA >> 24);
2639 					*ptrLBA = physLBA_byte;
2640 					ptrLBA = &pIO_req->CDB.CDB32[7];
2641 					physLBA_byte = (uint8_t)(physLBA >> 16);
2642 					*ptrLBA = physLBA_byte;
2643 					ptrLBA = &pIO_req->CDB.CDB32[8];
2644 					physLBA_byte = (uint8_t)(physLBA >> 8);
2645 					*ptrLBA = physLBA_byte;
2646 					ptrLBA = &pIO_req->CDB.CDB32[9];
2647 					physLBA_byte = (uint8_t)physLBA;
2648 					*ptrLBA = physLBA_byte;
2649 
2650 					/*
2651 					 * Set flag that Direct Drive I/O is
2652 					 * being done.
2653 					 */
2654 					cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2655 				}
2656 			}
2657 		}
2658 	}
2659 }
2660 
2661 static void
mpssas_smpio_complete(struct mps_softc * sc,struct mps_command * cm)2662 mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm)
2663 {
2664 	MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2665 	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2666 	uint64_t sasaddr;
2667 	union ccb *ccb;
2668 
2669 	ccb = cm->cm_complete_data;
2670 
2671 	/*
2672 	 * Currently there should be no way we can hit this case.  It only
2673 	 * happens when we have a failure to allocate chain frames, and SMP
2674 	 * commands require two S/G elements only.  That should be handled
2675 	 * in the standard request size.
2676 	 */
2677 	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2678 		mps_dprint(sc, MPS_ERROR,"%s: cm_flags = %#x on SMP request!\n",
2679 			   __func__, cm->cm_flags);
2680 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2681 		goto bailout;
2682         }
2683 
2684 	rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
2685 	if (rpl == NULL) {
2686 		mps_dprint(sc, MPS_ERROR, "%s: NULL cm_reply!\n", __func__);
2687 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2688 		goto bailout;
2689 	}
2690 
2691 	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2692 	sasaddr = le32toh(req->SASAddress.Low);
2693 	sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
2694 
2695 	if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
2696 	    MPI2_IOCSTATUS_SUCCESS ||
2697 	    rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
2698 		mps_dprint(sc, MPS_XINFO, "%s: IOCStatus %04x SASStatus %02x\n",
2699 		    __func__, le16toh(rpl->IOCStatus), rpl->SASStatus);
2700 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2701 		goto bailout;
2702 	}
2703 
2704 	mps_dprint(sc, MPS_XINFO, "%s: SMP request to SAS address "
2705 		   "%#jx completed successfully\n", __func__,
2706 		   (uintmax_t)sasaddr);
2707 
2708 	if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
2709 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2710 	else
2711 		mpssas_set_ccbstatus(ccb, CAM_SMP_STATUS_ERROR);
2712 
2713 bailout:
2714 	/*
2715 	 * We sync in both directions because we had DMAs in the S/G list
2716 	 * in both directions.
2717 	 */
2718 	bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
2719 			BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2720 	bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2721 	mps_free_command(sc, cm);
2722 	xpt_done(ccb);
2723 }
2724 
2725 static void
mpssas_send_smpcmd(struct mpssas_softc * sassc,union ccb * ccb,uint64_t sasaddr)2726 mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb, uint64_t sasaddr)
2727 {
2728 	struct mps_command *cm;
2729 	uint8_t *request, *response;
2730 	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2731 	struct mps_softc *sc;
2732 	int error;
2733 
2734 	sc = sassc->sc;
2735 	error = 0;
2736 
2737 	/*
2738 	 * XXX We don't yet support physical addresses here.
2739 	 */
2740 	switch ((ccb->ccb_h.flags & CAM_DATA_MASK)) {
2741 	case CAM_DATA_PADDR:
2742 	case CAM_DATA_SG_PADDR:
2743 		mps_dprint(sc, MPS_ERROR,
2744 			   "%s: physical addresses not supported\n", __func__);
2745 		mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2746 		xpt_done(ccb);
2747 		return;
2748 	case CAM_DATA_SG:
2749 		/*
2750 		 * The chip does not support more than one buffer for the
2751 		 * request or response.
2752 		 */
2753 	 	if ((ccb->smpio.smp_request_sglist_cnt > 1)
2754 		  || (ccb->smpio.smp_response_sglist_cnt > 1)) {
2755 			mps_dprint(sc, MPS_ERROR,
2756 				   "%s: multiple request or response "
2757 				   "buffer segments not supported for SMP\n",
2758 				   __func__);
2759 			mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2760 			xpt_done(ccb);
2761 			return;
2762 		}
2763 
2764 		/*
2765 		 * The CAM_SCATTER_VALID flag was originally implemented
2766 		 * for the XPT_SCSI_IO CCB, which only has one data pointer.
2767 		 * We have two.  So, just take that flag to mean that we
2768 		 * might have S/G lists, and look at the S/G segment count
2769 		 * to figure out whether that is the case for each individual
2770 		 * buffer.
2771 		 */
2772 		if (ccb->smpio.smp_request_sglist_cnt != 0) {
2773 			bus_dma_segment_t *req_sg;
2774 
2775 			req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
2776 			request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
2777 		} else
2778 			request = ccb->smpio.smp_request;
2779 
2780 		if (ccb->smpio.smp_response_sglist_cnt != 0) {
2781 			bus_dma_segment_t *rsp_sg;
2782 
2783 			rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
2784 			response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
2785 		} else
2786 			response = ccb->smpio.smp_response;
2787 		break;
2788 	case CAM_DATA_VADDR:
2789 		request = ccb->smpio.smp_request;
2790 		response = ccb->smpio.smp_response;
2791 		break;
2792 	default:
2793 		mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2794 		xpt_done(ccb);
2795 		return;
2796 	}
2797 
2798 	cm = mps_alloc_command(sc);
2799 	if (cm == NULL) {
2800 		mps_dprint(sc, MPS_ERROR,
2801 		    "%s: cannot allocate command\n", __func__);
2802 		mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
2803 		xpt_done(ccb);
2804 		return;
2805 	}
2806 
2807 	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2808 	bzero(req, sizeof(*req));
2809 	req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
2810 
2811 	/* Allow the chip to use any route to this SAS address. */
2812 	req->PhysicalPort = 0xff;
2813 
2814 	req->RequestDataLength = htole16(ccb->smpio.smp_request_len);
2815 	req->SGLFlags =
2816 	    MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
2817 
2818 	mps_dprint(sc, MPS_XINFO, "%s: sending SMP request to SAS "
2819 	    "address %#jx\n", __func__, (uintmax_t)sasaddr);
2820 
2821 	mpi_init_sge(cm, req, &req->SGL);
2822 
2823 	/*
2824 	 * Set up a uio to pass into mps_map_command().  This allows us to
2825 	 * do one map command, and one busdma call in there.
2826 	 */
2827 	cm->cm_uio.uio_iov = cm->cm_iovec;
2828 	cm->cm_uio.uio_iovcnt = 2;
2829 	cm->cm_uio.uio_segflg = UIO_SYSSPACE;
2830 
2831 	/*
2832 	 * The read/write flag isn't used by busdma, but set it just in
2833 	 * case.  This isn't exactly accurate, either, since we're going in
2834 	 * both directions.
2835 	 */
2836 	cm->cm_uio.uio_rw = UIO_WRITE;
2837 
2838 	cm->cm_iovec[0].iov_base = request;
2839 	cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength);
2840 	cm->cm_iovec[1].iov_base = response;
2841 	cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
2842 
2843 	cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
2844 			       cm->cm_iovec[1].iov_len;
2845 
2846 	/*
2847 	 * Trigger a warning message in mps_data_cb() for the user if we
2848 	 * wind up exceeding two S/G segments.  The chip expects one
2849 	 * segment for the request and another for the response.
2850 	 */
2851 	cm->cm_max_segs = 2;
2852 
2853 	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
2854 	cm->cm_complete = mpssas_smpio_complete;
2855 	cm->cm_complete_data = ccb;
2856 
2857 	/*
2858 	 * Tell the mapping code that we're using a uio, and that this is
2859 	 * an SMP passthrough request.  There is a little special-case
2860 	 * logic there (in mps_data_cb()) to handle the bidirectional
2861 	 * transfer.
2862 	 */
2863 	cm->cm_flags |= MPS_CM_FLAGS_USE_UIO | MPS_CM_FLAGS_SMP_PASS |
2864 			MPS_CM_FLAGS_DATAIN | MPS_CM_FLAGS_DATAOUT;
2865 
2866 	/* The chip data format is little endian. */
2867 	req->SASAddress.High = htole32(sasaddr >> 32);
2868 	req->SASAddress.Low = htole32(sasaddr);
2869 
2870 	/*
2871 	 * XXX Note that we don't have a timeout/abort mechanism here.
2872 	 * From the manual, it looks like task management requests only
2873 	 * work for SCSI IO and SATA passthrough requests.  We may need to
2874 	 * have a mechanism to retry requests in the event of a chip reset
2875 	 * at least.  Hopefully the chip will insure that any errors short
2876 	 * of that are relayed back to the driver.
2877 	 */
2878 	error = mps_map_command(sc, cm);
2879 	if ((error != 0) && (error != EINPROGRESS)) {
2880 		mps_dprint(sc, MPS_ERROR,
2881 			   "%s: error %d returned from mps_map_command()\n",
2882 			   __func__, error);
2883 		goto bailout_error;
2884 	}
2885 
2886 	return;
2887 
2888 bailout_error:
2889 	mps_free_command(sc, cm);
2890 	mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
2891 	xpt_done(ccb);
2892 	return;
2893 
2894 }
2895 
2896 static void
mpssas_action_smpio(struct mpssas_softc * sassc,union ccb * ccb)2897 mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb)
2898 {
2899 	struct mps_softc *sc;
2900 	struct mpssas_target *targ;
2901 	uint64_t sasaddr = 0;
2902 
2903 	sc = sassc->sc;
2904 
2905 	/*
2906 	 * Make sure the target exists.
2907 	 */
2908 	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
2909 	    ("Target %d out of bounds in XPT_SMP_IO\n", ccb->ccb_h.target_id));
2910 	targ = &sassc->targets[ccb->ccb_h.target_id];
2911 	if (targ->handle == 0x0) {
2912 		mps_dprint(sc, MPS_ERROR,
2913 			   "%s: target %d does not exist!\n", __func__,
2914 			   ccb->ccb_h.target_id);
2915 		mpssas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
2916 		xpt_done(ccb);
2917 		return;
2918 	}
2919 
2920 	/*
2921 	 * If this device has an embedded SMP target, we'll talk to it
2922 	 * directly.
2923 	 * figure out what the expander's address is.
2924 	 */
2925 	if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
2926 		sasaddr = targ->sasaddr;
2927 
2928 	/*
2929 	 * If we don't have a SAS address for the expander yet, try
2930 	 * grabbing it from the page 0x83 information cached in the
2931 	 * transport layer for this target.  LSI expanders report the
2932 	 * expander SAS address as the port-associated SAS address in
2933 	 * Inquiry VPD page 0x83.  Maxim expanders don't report it in page
2934 	 * 0x83.
2935 	 *
2936 	 * XXX KDM disable this for now, but leave it commented out so that
2937 	 * it is obvious that this is another possible way to get the SAS
2938 	 * address.
2939 	 *
2940 	 * The parent handle method below is a little more reliable, and
2941 	 * the other benefit is that it works for devices other than SES
2942 	 * devices.  So you can send a SMP request to a da(4) device and it
2943 	 * will get routed to the expander that device is attached to.
2944 	 * (Assuming the da(4) device doesn't contain an SMP target...)
2945 	 */
2946 #if 0
2947 	if (sasaddr == 0)
2948 		sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
2949 #endif
2950 
2951 	/*
2952 	 * If we still don't have a SAS address for the expander, look for
2953 	 * the parent device of this device, which is probably the expander.
2954 	 */
2955 	if (sasaddr == 0) {
2956 #ifdef OLD_MPS_PROBE
2957 		struct mpssas_target *parent_target;
2958 #endif
2959 
2960 		if (targ->parent_handle == 0x0) {
2961 			mps_dprint(sc, MPS_ERROR,
2962 				   "%s: handle %d does not have a valid "
2963 				   "parent handle!\n", __func__, targ->handle);
2964 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2965 			goto bailout;
2966 		}
2967 #ifdef OLD_MPS_PROBE
2968 		parent_target = mpssas_find_target_by_handle(sassc, 0,
2969 			targ->parent_handle);
2970 
2971 		if (parent_target == NULL) {
2972 			mps_dprint(sc, MPS_ERROR,
2973 				   "%s: handle %d does not have a valid "
2974 				   "parent target!\n", __func__, targ->handle);
2975 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2976 			goto bailout;
2977 		}
2978 
2979 		if ((parent_target->devinfo &
2980 		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
2981 			mps_dprint(sc, MPS_ERROR,
2982 				   "%s: handle %d parent %d does not "
2983 				   "have an SMP target!\n", __func__,
2984 				   targ->handle, parent_target->handle);
2985 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2986 			goto bailout;
2987 		}
2988 
2989 		sasaddr = parent_target->sasaddr;
2990 #else /* OLD_MPS_PROBE */
2991 		if ((targ->parent_devinfo &
2992 		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
2993 			mps_dprint(sc, MPS_ERROR,
2994 				   "%s: handle %d parent %d does not "
2995 				   "have an SMP target!\n", __func__,
2996 				   targ->handle, targ->parent_handle);
2997 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2998 			goto bailout;
2999 		}
3000 		if (targ->parent_sasaddr == 0x0) {
3001 			mps_dprint(sc, MPS_ERROR,
3002 				   "%s: handle %d parent handle %d does "
3003 				   "not have a valid SAS address!\n",
3004 				   __func__, targ->handle, targ->parent_handle);
3005 			mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3006 			goto bailout;
3007 		}
3008 
3009 		sasaddr = targ->parent_sasaddr;
3010 #endif /* OLD_MPS_PROBE */
3011 	}
3012 
3013 	if (sasaddr == 0) {
3014 		mps_dprint(sc, MPS_INFO,
3015 			   "%s: unable to find SAS address for handle %d\n",
3016 			   __func__, targ->handle);
3017 		mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3018 		goto bailout;
3019 	}
3020 	mpssas_send_smpcmd(sassc, ccb, sasaddr);
3021 
3022 	return;
3023 
3024 bailout:
3025 	xpt_done(ccb);
3026 
3027 }
3028 
3029 static void
mpssas_action_resetdev(struct mpssas_softc * sassc,union ccb * ccb)3030 mpssas_action_resetdev(struct mpssas_softc *sassc, union ccb *ccb)
3031 {
3032 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3033 	struct mps_softc *sc;
3034 	struct mps_command *tm;
3035 	struct mpssas_target *targ;
3036 
3037 	MPS_FUNCTRACE(sassc->sc);
3038 	mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
3039 
3040 	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
3041 	    ("Target %d out of bounds in XPT_RESET_DEV\n",
3042 	     ccb->ccb_h.target_id));
3043 	sc = sassc->sc;
3044 	tm = mpssas_alloc_tm(sc);
3045 	if (tm == NULL) {
3046 		mps_dprint(sc, MPS_ERROR,
3047 		    "command alloc failure in mpssas_action_resetdev\n");
3048 		mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3049 		xpt_done(ccb);
3050 		return;
3051 	}
3052 
3053 	targ = &sassc->targets[ccb->ccb_h.target_id];
3054 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3055 	req->DevHandle = htole16(targ->handle);
3056 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3057 
3058 	/* SAS Hard Link Reset / SATA Link Reset */
3059 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3060 
3061 	tm->cm_data = NULL;
3062 	tm->cm_complete = mpssas_resetdev_complete;
3063 	tm->cm_complete_data = ccb;
3064 	tm->cm_targ = targ;
3065 
3066 	mpssas_prepare_for_tm(sc, tm, targ, CAM_LUN_WILDCARD);
3067 	mps_map_command(sc, tm);
3068 }
3069 
3070 static void
mpssas_resetdev_complete(struct mps_softc * sc,struct mps_command * tm)3071 mpssas_resetdev_complete(struct mps_softc *sc, struct mps_command *tm)
3072 {
3073 	MPI2_SCSI_TASK_MANAGE_REPLY *resp;
3074 	union ccb *ccb;
3075 
3076 	MPS_FUNCTRACE(sc);
3077 	mtx_assert(&sc->mps_mtx, MA_OWNED);
3078 
3079 	resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
3080 	ccb = tm->cm_complete_data;
3081 
3082 	/*
3083 	 * Currently there should be no way we can hit this case.  It only
3084 	 * happens when we have a failure to allocate chain frames, and
3085 	 * task management commands don't have S/G lists.
3086 	 */
3087 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3088 		MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3089 
3090 		req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3091 
3092 		mps_dprint(sc, MPS_ERROR,
3093 			   "%s: cm_flags = %#x for reset of handle %#04x! "
3094 			   "This should not happen!\n", __func__, tm->cm_flags,
3095 			   req->DevHandle);
3096 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3097 		goto bailout;
3098 	}
3099 
3100 	mps_dprint(sc, MPS_XINFO,
3101 	    "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n", __func__,
3102 	    le16toh(resp->IOCStatus), le32toh(resp->ResponseCode));
3103 
3104 	if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
3105 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
3106 		mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
3107 		    CAM_LUN_WILDCARD);
3108 	}
3109 	else
3110 		mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3111 
3112 bailout:
3113 
3114 	mpssas_free_tm(sc, tm);
3115 	xpt_done(ccb);
3116 }
3117 
3118 static void
mpssas_poll(struct cam_sim * sim)3119 mpssas_poll(struct cam_sim *sim)
3120 {
3121 	struct mpssas_softc *sassc;
3122 
3123 	sassc = cam_sim_softc(sim);
3124 
3125 	if (sassc->sc->mps_debug & MPS_TRACE) {
3126 		/* frequent debug messages during a panic just slow
3127 		 * everything down too much.
3128 		 */
3129 		mps_printf(sassc->sc, "%s clearing MPS_TRACE\n", __func__);
3130 		sassc->sc->mps_debug &= ~MPS_TRACE;
3131 	}
3132 
3133 	mps_intr_locked(sassc->sc);
3134 }
3135 
3136 static void
mpssas_async(void * callback_arg,uint32_t code,struct cam_path * path,void * arg)3137 mpssas_async(void *callback_arg, uint32_t code, struct cam_path *path,
3138 	     void *arg)
3139 {
3140 	struct mps_softc *sc;
3141 
3142 	sc = (struct mps_softc *)callback_arg;
3143 
3144 	mps_lock(sc);
3145 	switch (code) {
3146 	case AC_ADVINFO_CHANGED: {
3147 		struct mpssas_target *target;
3148 		struct mpssas_softc *sassc;
3149 		struct scsi_read_capacity_data_long rcap_buf;
3150 		struct ccb_dev_advinfo cdai;
3151 		struct mpssas_lun *lun;
3152 		lun_id_t lunid;
3153 		int found_lun;
3154 		uintptr_t buftype;
3155 
3156 		buftype = (uintptr_t)arg;
3157 
3158 		found_lun = 0;
3159 		sassc = sc->sassc;
3160 
3161 		/*
3162 		 * We're only interested in read capacity data changes.
3163 		 */
3164 		if (buftype != CDAI_TYPE_RCAPLONG)
3165 			break;
3166 
3167 		/*
3168 		 * We should have a handle for this, but check to make sure.
3169 		 */
3170 		KASSERT(xpt_path_target_id(path) < sassc->maxtargets,
3171 		    ("Target %d out of bounds in mpssas_async\n",
3172 		    xpt_path_target_id(path)));
3173 		target = &sassc->targets[xpt_path_target_id(path)];
3174 		if (target->handle == 0)
3175 			break;
3176 
3177 		lunid = xpt_path_lun_id(path);
3178 
3179 		SLIST_FOREACH(lun, &target->luns, lun_link) {
3180 			if (lun->lun_id == lunid) {
3181 				found_lun = 1;
3182 				break;
3183 			}
3184 		}
3185 
3186 		if (found_lun == 0) {
3187 			lun = malloc(sizeof(struct mpssas_lun), M_MPT2,
3188 				     M_NOWAIT | M_ZERO);
3189 			if (lun == NULL) {
3190 				mps_dprint(sc, MPS_ERROR, "Unable to alloc "
3191 					   "LUN for EEDP support.\n");
3192 				break;
3193 			}
3194 			lun->lun_id = lunid;
3195 			SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3196 		}
3197 
3198 		bzero(&rcap_buf, sizeof(rcap_buf));
3199 		bzero(&cdai, sizeof(cdai));
3200 		xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
3201 		cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
3202 		cdai.ccb_h.flags = CAM_DIR_IN;
3203 		cdai.buftype = CDAI_TYPE_RCAPLONG;
3204 		cdai.flags = CDAI_FLAG_NONE;
3205 		cdai.bufsiz = sizeof(rcap_buf);
3206 		cdai.buf = (uint8_t *)&rcap_buf;
3207 		xpt_action((union ccb *)&cdai);
3208 		if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
3209 			cam_release_devq(cdai.ccb_h.path,
3210 					 0, 0, 0, FALSE);
3211 
3212 		if ((mpssas_get_ccbstatus((union ccb *)&cdai) == CAM_REQ_CMP)
3213 		 && (rcap_buf.prot & SRC16_PROT_EN)) {
3214 			switch (rcap_buf.prot & SRC16_P_TYPE) {
3215 			case SRC16_PTYPE_1:
3216 			case SRC16_PTYPE_3:
3217 				lun->eedp_formatted = TRUE;
3218 				lun->eedp_block_size =
3219 				    scsi_4btoul(rcap_buf.length);
3220 				break;
3221 			case SRC16_PTYPE_2:
3222 			default:
3223 				lun->eedp_formatted = FALSE;
3224 				lun->eedp_block_size = 0;
3225 				break;
3226 			}
3227 		} else {
3228 			lun->eedp_formatted = FALSE;
3229 			lun->eedp_block_size = 0;
3230 		}
3231 		break;
3232 	}
3233 	default:
3234 		break;
3235 	}
3236 	mps_unlock(sc);
3237 }
3238 
3239 /*
3240  * Freeze the devq and set the INRESET flag so that no I/O will be sent to
3241  * the target until the reset has completed.  The CCB holds the path which
3242  * is used to release the devq.  The devq is released and the CCB is freed
3243  * when the TM completes.
3244  * We only need to do this when we're entering reset, not at each time we
3245  * need to send an abort (which will happen if multiple commands timeout
3246  * while we're sending the abort). We do not release the queue for each
3247  * command we complete (just at the end when we free the tm), so freezing
3248  * it each time doesn't make sense.
3249  */
3250 void
mpssas_prepare_for_tm(struct mps_softc * sc,struct mps_command * tm,struct mpssas_target * target,lun_id_t lun_id)3251 mpssas_prepare_for_tm(struct mps_softc *sc, struct mps_command *tm,
3252     struct mpssas_target *target, lun_id_t lun_id)
3253 {
3254 	union ccb *ccb;
3255 	path_id_t path_id;
3256 
3257 	ccb = xpt_alloc_ccb_nowait();
3258 	if (ccb) {
3259 		path_id = cam_sim_path(sc->sassc->sim);
3260 		if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, path_id,
3261 		    target->tid, lun_id) != CAM_REQ_CMP) {
3262 			xpt_free_ccb(ccb);
3263 		} else {
3264 			tm->cm_ccb = ccb;
3265 			tm->cm_targ = target;
3266 			if ((target->flags & MPSSAS_TARGET_INRESET) == 0) {
3267 				mps_dprint(sc, MPS_XINFO | MPS_RECOVERY,
3268 				    "%s: Freezing devq for target ID %d\n",
3269 				    __func__, target->tid);
3270 				xpt_freeze_devq(ccb->ccb_h.path, 1);
3271 				target->flags |= MPSSAS_TARGET_INRESET;
3272 			}
3273 		}
3274 	}
3275 }
3276 
3277 int
mpssas_startup(struct mps_softc * sc)3278 mpssas_startup(struct mps_softc *sc)
3279 {
3280 
3281 	/*
3282 	 * Send the port enable message and set the wait_for_port_enable flag.
3283 	 * This flag helps to keep the simq frozen until all discovery events
3284 	 * are processed.
3285 	 */
3286 	sc->wait_for_port_enable = 1;
3287 	mpssas_send_portenable(sc);
3288 	return (0);
3289 }
3290 
3291 static int
mpssas_send_portenable(struct mps_softc * sc)3292 mpssas_send_portenable(struct mps_softc *sc)
3293 {
3294 	MPI2_PORT_ENABLE_REQUEST *request;
3295 	struct mps_command *cm;
3296 
3297 	MPS_FUNCTRACE(sc);
3298 
3299 	if ((cm = mps_alloc_command(sc)) == NULL)
3300 		return (EBUSY);
3301 	request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3302 	request->Function = MPI2_FUNCTION_PORT_ENABLE;
3303 	request->MsgFlags = 0;
3304 	request->VP_ID = 0;
3305 	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3306 	cm->cm_complete = mpssas_portenable_complete;
3307 	cm->cm_data = NULL;
3308 	cm->cm_sge = NULL;
3309 
3310 	mps_map_command(sc, cm);
3311 	mps_dprint(sc, MPS_XINFO,
3312 	    "mps_send_portenable finished cm %p req %p complete %p\n",
3313 	    cm, cm->cm_req, cm->cm_complete);
3314 	return (0);
3315 }
3316 
3317 static void
mpssas_portenable_complete(struct mps_softc * sc,struct mps_command * cm)3318 mpssas_portenable_complete(struct mps_softc *sc, struct mps_command *cm)
3319 {
3320 	MPI2_PORT_ENABLE_REPLY *reply;
3321 	struct mpssas_softc *sassc;
3322 
3323 	MPS_FUNCTRACE(sc);
3324 	sassc = sc->sassc;
3325 
3326 	/*
3327 	 * Currently there should be no way we can hit this case.  It only
3328 	 * happens when we have a failure to allocate chain frames, and
3329 	 * port enable commands don't have S/G lists.
3330 	 */
3331 	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3332 		mps_dprint(sc, MPS_ERROR, "%s: cm_flags = %#x for port enable! "
3333 			   "This should not happen!\n", __func__, cm->cm_flags);
3334 	}
3335 
3336 	reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3337 	if (reply == NULL)
3338 		mps_dprint(sc, MPS_FAULT, "Portenable NULL reply\n");
3339 	else if (le16toh(reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
3340 	    MPI2_IOCSTATUS_SUCCESS)
3341 		mps_dprint(sc, MPS_FAULT, "Portenable failed\n");
3342 
3343 	mps_free_command(sc, cm);
3344 
3345 	/*
3346 	 * Get WarpDrive info after discovery is complete but before the scan
3347 	 * starts.  At this point, all devices are ready to be exposed to the
3348 	 * OS.  If devices should be hidden instead, take them out of the
3349 	 * 'targets' array before the scan.  The devinfo for a disk will have
3350 	 * some info and a volume's will be 0.  Use that to remove disks.
3351 	 */
3352 	mps_wd_config_pages(sc);
3353 
3354 	/*
3355 	 * Done waiting for port enable to complete.  Decrement the refcount.
3356 	 * If refcount is 0, discovery is complete and a rescan of the bus can
3357 	 * take place.  Since the simq was explicitly frozen before port
3358 	 * enable, it must be explicitly released here to keep the
3359 	 * freeze/release count in sync.
3360 	 */
3361 	sc->wait_for_port_enable = 0;
3362 	sc->port_enable_complete = 1;
3363 	wakeup(&sc->port_enable_complete);
3364 	mpssas_startup_decrement(sassc);
3365 }
3366 
3367 int
mpssas_check_id(struct mpssas_softc * sassc,int id)3368 mpssas_check_id(struct mpssas_softc *sassc, int id)
3369 {
3370 	struct mps_softc *sc = sassc->sc;
3371 	char *ids;
3372 	char *name;
3373 
3374 	ids = &sc->exclude_ids[0];
3375 	while((name = strsep(&ids, ",")) != NULL) {
3376 		if (name[0] == '\0')
3377 			continue;
3378 		if (strtol(name, NULL, 0) == (long)id)
3379 			return (1);
3380 	}
3381 
3382 	return (0);
3383 }
3384 
3385 void
mpssas_realloc_targets(struct mps_softc * sc,int maxtargets)3386 mpssas_realloc_targets(struct mps_softc *sc, int maxtargets)
3387 {
3388 	struct mpssas_softc *sassc;
3389 	struct mpssas_lun *lun, *lun_tmp;
3390 	struct mpssas_target *targ;
3391 	int i;
3392 
3393 	sassc = sc->sassc;
3394 	/*
3395 	 * The number of targets is based on IOC Facts, so free all of
3396 	 * the allocated LUNs for each target and then the target buffer
3397 	 * itself.
3398 	 */
3399 	for (i=0; i< maxtargets; i++) {
3400 		targ = &sassc->targets[i];
3401 		SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
3402 			free(lun, M_MPT2);
3403 		}
3404 	}
3405 	free(sassc->targets, M_MPT2);
3406 
3407 	sassc->targets = malloc(sizeof(struct mpssas_target) * maxtargets,
3408 	    M_MPT2, M_WAITOK|M_ZERO);
3409 }
3410