1 /*- 2 * Copyright (c) 2009 Yahoo! Inc. 3 * Copyright (c) 2011-2015 LSI Corp. 4 * Copyright (c) 2013-2015 Avago Technologies 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * Avago Technologies (LSI) MPT-Fusion Host Adapter FreeBSD 29 * 30 * $FreeBSD$ 31 */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 /* Communications core for Avago Technologies (LSI) MPT2 */ 37 38 /* TODO Move headers to mpsvar */ 39 #include <sys/types.h> 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/kernel.h> 43 #include <sys/selinfo.h> 44 #include <sys/module.h> 45 #include <sys/bus.h> 46 #include <sys/conf.h> 47 #include <sys/bio.h> 48 #include <sys/malloc.h> 49 #include <sys/uio.h> 50 #include <sys/sysctl.h> 51 #include <sys/endian.h> 52 #include <sys/queue.h> 53 #include <sys/kthread.h> 54 #include <sys/taskqueue.h> 55 #include <sys/sbuf.h> 56 57 #include <machine/bus.h> 58 #include <machine/resource.h> 59 #include <sys/rman.h> 60 61 #include <machine/stdarg.h> 62 63 #include <cam/cam.h> 64 #include <cam/cam_ccb.h> 65 #include <cam/cam_xpt.h> 66 #include <cam/cam_debug.h> 67 #include <cam/cam_sim.h> 68 #include <cam/cam_xpt_sim.h> 69 #include <cam/cam_xpt_periph.h> 70 #include <cam/cam_periph.h> 71 #include <cam/scsi/scsi_all.h> 72 #include <cam/scsi/scsi_message.h> 73 #if __FreeBSD_version >= 900026 74 #include <cam/scsi/smp_all.h> 75 #endif 76 77 #include <dev/mps/mpi/mpi2_type.h> 78 #include <dev/mps/mpi/mpi2.h> 79 #include <dev/mps/mpi/mpi2_ioc.h> 80 #include <dev/mps/mpi/mpi2_sas.h> 81 #include <dev/mps/mpi/mpi2_cnfg.h> 82 #include <dev/mps/mpi/mpi2_init.h> 83 #include <dev/mps/mpi/mpi2_tool.h> 84 #include <dev/mps/mps_ioctl.h> 85 #include <dev/mps/mpsvar.h> 86 #include <dev/mps/mps_table.h> 87 #include <dev/mps/mps_sas.h> 88 89 #define MPSSAS_DISCOVERY_TIMEOUT 20 90 #define MPSSAS_MAX_DISCOVERY_TIMEOUTS 10 /* 200 seconds */ 91 92 /* 93 * static array to check SCSI OpCode for EEDP protection bits 94 */ 95 #define PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP 96 #define PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP 97 #define PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP 98 static uint8_t op_code_prot[256] = { 99 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 100 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 101 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V, 102 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 103 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 104 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 105 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 106 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 107 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V, 108 0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 109 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V, 110 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 111 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 112 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 113 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 114 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 115 }; 116 117 MALLOC_DEFINE(M_MPSSAS, "MPSSAS", "MPS SAS memory"); 118 119 static void mpssas_remove_device(struct mps_softc *, struct mps_command *); 120 static void mpssas_remove_complete(struct mps_softc *, struct mps_command *); 121 static void mpssas_action(struct cam_sim *sim, union ccb *ccb); 122 static void mpssas_poll(struct cam_sim *sim); 123 static int mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, 124 struct mps_command *cm); 125 static void mpssas_scsiio_timeout(void *data); 126 static void mpssas_abort_complete(struct mps_softc *sc, struct mps_command *cm); 127 static void mpssas_direct_drive_io(struct mpssas_softc *sassc, 128 struct mps_command *cm, union ccb *ccb); 129 static void mpssas_action_scsiio(struct mpssas_softc *, union ccb *); 130 static void mpssas_scsiio_complete(struct mps_softc *, struct mps_command *); 131 static void mpssas_action_resetdev(struct mpssas_softc *, union ccb *); 132 #if __FreeBSD_version >= 900026 133 static void mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm); 134 static void mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb, 135 uint64_t sasaddr); 136 static void mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb); 137 #endif //FreeBSD_version >= 900026 138 static void mpssas_resetdev_complete(struct mps_softc *, struct mps_command *); 139 static void mpssas_async(void *callback_arg, uint32_t code, 140 struct cam_path *path, void *arg); 141 #if (__FreeBSD_version < 901503) || \ 142 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) 143 static void mpssas_check_eedp(struct mps_softc *sc, struct cam_path *path, 144 struct ccb_getdev *cgd); 145 static void mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb); 146 #endif 147 static int mpssas_send_portenable(struct mps_softc *sc); 148 static void mpssas_portenable_complete(struct mps_softc *sc, 149 struct mps_command *cm); 150 151 struct mpssas_target * 152 mpssas_find_target_by_handle(struct mpssas_softc *sassc, int start, uint16_t handle) 153 { 154 struct mpssas_target *target; 155 int i; 156 157 for (i = start; i < sassc->maxtargets; i++) { 158 target = &sassc->targets[i]; 159 if (target->handle == handle) 160 return (target); 161 } 162 163 return (NULL); 164 } 165 166 /* we need to freeze the simq during attach and diag reset, to avoid failing 167 * commands before device handles have been found by discovery. Since 168 * discovery involves reading config pages and possibly sending commands, 169 * discovery actions may continue even after we receive the end of discovery 170 * event, so refcount discovery actions instead of assuming we can unfreeze 171 * the simq when we get the event. 172 */ 173 void 174 mpssas_startup_increment(struct mpssas_softc *sassc) 175 { 176 MPS_FUNCTRACE(sassc->sc); 177 178 if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) { 179 if (sassc->startup_refcount++ == 0) { 180 /* just starting, freeze the simq */ 181 mps_dprint(sassc->sc, MPS_INIT, 182 "%s freezing simq\n", __func__); 183 #if __FreeBSD_version >= 1000039 184 xpt_hold_boot(); 185 #endif 186 xpt_freeze_simq(sassc->sim, 1); 187 } 188 mps_dprint(sassc->sc, MPS_INIT, "%s refcount %u\n", __func__, 189 sassc->startup_refcount); 190 } 191 } 192 193 void 194 mpssas_release_simq_reinit(struct mpssas_softc *sassc) 195 { 196 if (sassc->flags & MPSSAS_QUEUE_FROZEN) { 197 sassc->flags &= ~MPSSAS_QUEUE_FROZEN; 198 xpt_release_simq(sassc->sim, 1); 199 mps_dprint(sassc->sc, MPS_INFO, "Unfreezing SIM queue\n"); 200 } 201 } 202 203 void 204 mpssas_startup_decrement(struct mpssas_softc *sassc) 205 { 206 MPS_FUNCTRACE(sassc->sc); 207 208 if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) { 209 if (--sassc->startup_refcount == 0) { 210 /* finished all discovery-related actions, release 211 * the simq and rescan for the latest topology. 212 */ 213 mps_dprint(sassc->sc, MPS_INIT, 214 "%s releasing simq\n", __func__); 215 sassc->flags &= ~MPSSAS_IN_STARTUP; 216 xpt_release_simq(sassc->sim, 1); 217 #if __FreeBSD_version >= 1000039 218 xpt_release_boot(); 219 #else 220 mpssas_rescan_target(sassc->sc, NULL); 221 #endif 222 } 223 mps_dprint(sassc->sc, MPS_INIT, "%s refcount %u\n", __func__, 224 sassc->startup_refcount); 225 } 226 } 227 228 /* The firmware requires us to stop sending commands when we're doing task 229 * management, so refcount the TMs and keep the simq frozen when any are in 230 * use. 231 */ 232 struct mps_command * 233 mpssas_alloc_tm(struct mps_softc *sc) 234 { 235 struct mps_command *tm; 236 237 tm = mps_alloc_high_priority_command(sc); 238 return tm; 239 } 240 241 void 242 mpssas_free_tm(struct mps_softc *sc, struct mps_command *tm) 243 { 244 int target_id = 0xFFFFFFFF; 245 246 if (tm == NULL) 247 return; 248 249 /* 250 * For TM's the devq is frozen for the device. Unfreeze it here and 251 * free the resources used for freezing the devq. Must clear the 252 * INRESET flag as well or scsi I/O will not work. 253 */ 254 if (tm->cm_targ != NULL) { 255 tm->cm_targ->flags &= ~MPSSAS_TARGET_INRESET; 256 target_id = tm->cm_targ->tid; 257 } 258 if (tm->cm_ccb) { 259 mps_dprint(sc, MPS_INFO, "Unfreezing devq for target ID %d\n", 260 target_id); 261 xpt_release_devq(tm->cm_ccb->ccb_h.path, 1, TRUE); 262 xpt_free_path(tm->cm_ccb->ccb_h.path); 263 xpt_free_ccb(tm->cm_ccb); 264 } 265 266 mps_free_high_priority_command(sc, tm); 267 } 268 269 void 270 mpssas_rescan_target(struct mps_softc *sc, struct mpssas_target *targ) 271 { 272 struct mpssas_softc *sassc = sc->sassc; 273 path_id_t pathid; 274 target_id_t targetid; 275 union ccb *ccb; 276 277 MPS_FUNCTRACE(sc); 278 pathid = cam_sim_path(sassc->sim); 279 if (targ == NULL) 280 targetid = CAM_TARGET_WILDCARD; 281 else 282 targetid = targ - sassc->targets; 283 284 /* 285 * Allocate a CCB and schedule a rescan. 286 */ 287 ccb = xpt_alloc_ccb_nowait(); 288 if (ccb == NULL) { 289 mps_dprint(sc, MPS_ERROR, "unable to alloc CCB for rescan\n"); 290 return; 291 } 292 293 if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid, 294 targetid, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 295 mps_dprint(sc, MPS_ERROR, "unable to create path for rescan\n"); 296 xpt_free_ccb(ccb); 297 return; 298 } 299 300 if (targetid == CAM_TARGET_WILDCARD) 301 ccb->ccb_h.func_code = XPT_SCAN_BUS; 302 else 303 ccb->ccb_h.func_code = XPT_SCAN_TGT; 304 305 mps_dprint(sc, MPS_TRACE, "%s targetid %u\n", __func__, targetid); 306 xpt_rescan(ccb); 307 } 308 309 static void 310 mpssas_log_command(struct mps_command *cm, u_int level, const char *fmt, ...) 311 { 312 struct sbuf sb; 313 va_list ap; 314 char str[192]; 315 char path_str[64]; 316 317 if (cm == NULL) 318 return; 319 320 /* No need to be in here if debugging isn't enabled */ 321 if ((cm->cm_sc->mps_debug & level) == 0) 322 return; 323 324 sbuf_new(&sb, str, sizeof(str), 0); 325 326 va_start(ap, fmt); 327 328 if (cm->cm_ccb != NULL) { 329 xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str, 330 sizeof(path_str)); 331 sbuf_cat(&sb, path_str); 332 if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) { 333 scsi_command_string(&cm->cm_ccb->csio, &sb); 334 sbuf_printf(&sb, "length %d ", 335 cm->cm_ccb->csio.dxfer_len); 336 } 337 } 338 else { 339 sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ", 340 cam_sim_name(cm->cm_sc->sassc->sim), 341 cam_sim_unit(cm->cm_sc->sassc->sim), 342 cam_sim_bus(cm->cm_sc->sassc->sim), 343 cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF, 344 cm->cm_lun); 345 } 346 347 sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID); 348 sbuf_vprintf(&sb, fmt, ap); 349 sbuf_finish(&sb); 350 mps_dprint_field(cm->cm_sc, level, "%s", sbuf_data(&sb)); 351 352 va_end(ap); 353 } 354 355 356 static void 357 mpssas_remove_volume(struct mps_softc *sc, struct mps_command *tm) 358 { 359 MPI2_SCSI_TASK_MANAGE_REPLY *reply; 360 struct mpssas_target *targ; 361 uint16_t handle; 362 363 MPS_FUNCTRACE(sc); 364 365 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply; 366 handle = (uint16_t)(uintptr_t)tm->cm_complete_data; 367 targ = tm->cm_targ; 368 369 if (reply == NULL) { 370 /* XXX retry the remove after the diag reset completes? */ 371 mps_dprint(sc, MPS_FAULT, 372 "%s NULL reply resetting device 0x%04x\n", __func__, 373 handle); 374 mpssas_free_tm(sc, tm); 375 return; 376 } 377 378 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) != 379 MPI2_IOCSTATUS_SUCCESS) { 380 mps_dprint(sc, MPS_ERROR, 381 "IOCStatus = 0x%x while resetting device 0x%x\n", 382 le16toh(reply->IOCStatus), handle); 383 } 384 385 mps_dprint(sc, MPS_XINFO, 386 "Reset aborted %u commands\n", reply->TerminationCount); 387 mps_free_reply(sc, tm->cm_reply_data); 388 tm->cm_reply = NULL; /* Ensures the reply won't get re-freed */ 389 390 mps_dprint(sc, MPS_XINFO, 391 "clearing target %u handle 0x%04x\n", targ->tid, handle); 392 393 /* 394 * Don't clear target if remove fails because things will get confusing. 395 * Leave the devname and sasaddr intact so that we know to avoid reusing 396 * this target id if possible, and so we can assign the same target id 397 * to this device if it comes back in the future. 398 */ 399 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) == 400 MPI2_IOCSTATUS_SUCCESS) { 401 targ = tm->cm_targ; 402 targ->handle = 0x0; 403 targ->encl_handle = 0x0; 404 targ->encl_slot = 0x0; 405 targ->exp_dev_handle = 0x0; 406 targ->phy_num = 0x0; 407 targ->linkrate = 0x0; 408 targ->devinfo = 0x0; 409 targ->flags = 0x0; 410 } 411 412 mpssas_free_tm(sc, tm); 413 } 414 415 416 /* 417 * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal. 418 * Otherwise Volume Delete is same as Bare Drive Removal. 419 */ 420 void 421 mpssas_prepare_volume_remove(struct mpssas_softc *sassc, uint16_t handle) 422 { 423 MPI2_SCSI_TASK_MANAGE_REQUEST *req; 424 struct mps_softc *sc; 425 struct mps_command *cm; 426 struct mpssas_target *targ = NULL; 427 428 MPS_FUNCTRACE(sassc->sc); 429 sc = sassc->sc; 430 431 #ifdef WD_SUPPORT 432 /* 433 * If this is a WD controller, determine if the disk should be exposed 434 * to the OS or not. If disk should be exposed, return from this 435 * function without doing anything. 436 */ 437 if (sc->WD_available && (sc->WD_hide_expose == 438 MPS_WD_EXPOSE_ALWAYS)) { 439 return; 440 } 441 #endif //WD_SUPPORT 442 443 targ = mpssas_find_target_by_handle(sassc, 0, handle); 444 if (targ == NULL) { 445 /* FIXME: what is the action? */ 446 /* We don't know about this device? */ 447 mps_dprint(sc, MPS_ERROR, 448 "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle); 449 return; 450 } 451 452 targ->flags |= MPSSAS_TARGET_INREMOVAL; 453 454 cm = mpssas_alloc_tm(sc); 455 if (cm == NULL) { 456 mps_dprint(sc, MPS_ERROR, 457 "%s: command alloc failure\n", __func__); 458 return; 459 } 460 461 mpssas_rescan_target(sc, targ); 462 463 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req; 464 req->DevHandle = targ->handle; 465 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; 466 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET; 467 468 /* SAS Hard Link Reset / SATA Link Reset */ 469 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET; 470 471 cm->cm_targ = targ; 472 cm->cm_data = NULL; 473 cm->cm_desc.HighPriority.RequestFlags = 474 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY; 475 cm->cm_complete = mpssas_remove_volume; 476 cm->cm_complete_data = (void *)(uintptr_t)handle; 477 478 mps_dprint(sc, MPS_INFO, "%s: Sending reset for target ID %d\n", 479 __func__, targ->tid); 480 mpssas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD); 481 482 mps_map_command(sc, cm); 483 } 484 485 /* 486 * The MPT2 firmware performs debounce on the link to avoid transient link 487 * errors and false removals. When it does decide that link has been lost 488 * and a device need to go away, it expects that the host will perform a 489 * target reset and then an op remove. The reset has the side-effect of 490 * aborting any outstanding requests for the device, which is required for 491 * the op-remove to succeed. It's not clear if the host should check for 492 * the device coming back alive after the reset. 493 */ 494 void 495 mpssas_prepare_remove(struct mpssas_softc *sassc, uint16_t handle) 496 { 497 MPI2_SCSI_TASK_MANAGE_REQUEST *req; 498 struct mps_softc *sc; 499 struct mps_command *cm; 500 struct mpssas_target *targ = NULL; 501 502 MPS_FUNCTRACE(sassc->sc); 503 504 sc = sassc->sc; 505 506 targ = mpssas_find_target_by_handle(sassc, 0, handle); 507 if (targ == NULL) { 508 /* FIXME: what is the action? */ 509 /* We don't know about this device? */ 510 mps_dprint(sc, MPS_ERROR, 511 "%s : invalid handle 0x%x \n", __func__, handle); 512 return; 513 } 514 515 targ->flags |= MPSSAS_TARGET_INREMOVAL; 516 517 cm = mpssas_alloc_tm(sc); 518 if (cm == NULL) { 519 mps_dprint(sc, MPS_ERROR, 520 "%s: command alloc failure\n", __func__); 521 return; 522 } 523 524 mpssas_rescan_target(sc, targ); 525 526 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req; 527 memset(req, 0, sizeof(*req)); 528 req->DevHandle = htole16(targ->handle); 529 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; 530 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET; 531 532 /* SAS Hard Link Reset / SATA Link Reset */ 533 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET; 534 535 cm->cm_targ = targ; 536 cm->cm_data = NULL; 537 cm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY; 538 cm->cm_complete = mpssas_remove_device; 539 cm->cm_complete_data = (void *)(uintptr_t)handle; 540 541 mps_dprint(sc, MPS_INFO, "%s: Sending reset for target ID %d\n", 542 __func__, targ->tid); 543 mpssas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD); 544 545 mps_map_command(sc, cm); 546 } 547 548 static void 549 mpssas_remove_device(struct mps_softc *sc, struct mps_command *tm) 550 { 551 MPI2_SCSI_TASK_MANAGE_REPLY *reply; 552 MPI2_SAS_IOUNIT_CONTROL_REQUEST *req; 553 struct mpssas_target *targ; 554 struct mps_command *next_cm; 555 uint16_t handle; 556 557 MPS_FUNCTRACE(sc); 558 559 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply; 560 handle = (uint16_t)(uintptr_t)tm->cm_complete_data; 561 targ = tm->cm_targ; 562 563 /* 564 * Currently there should be no way we can hit this case. It only 565 * happens when we have a failure to allocate chain frames, and 566 * task management commands don't have S/G lists. 567 */ 568 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) { 569 mps_dprint(sc, MPS_ERROR, 570 "%s: cm_flags = %#x for remove of handle %#04x! " 571 "This should not happen!\n", __func__, tm->cm_flags, 572 handle); 573 } 574 575 if (reply == NULL) { 576 /* XXX retry the remove after the diag reset completes? */ 577 mps_dprint(sc, MPS_FAULT, 578 "%s NULL reply resetting device 0x%04x\n", __func__, 579 handle); 580 mpssas_free_tm(sc, tm); 581 return; 582 } 583 584 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) != 585 MPI2_IOCSTATUS_SUCCESS) { 586 mps_dprint(sc, MPS_ERROR, 587 "IOCStatus = 0x%x while resetting device 0x%x\n", 588 le16toh(reply->IOCStatus), handle); 589 } 590 591 mps_dprint(sc, MPS_XINFO, "Reset aborted %u commands\n", 592 le32toh(reply->TerminationCount)); 593 mps_free_reply(sc, tm->cm_reply_data); 594 tm->cm_reply = NULL; /* Ensures the reply won't get re-freed */ 595 596 /* Reuse the existing command */ 597 req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req; 598 memset(req, 0, sizeof(*req)); 599 req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL; 600 req->Operation = MPI2_SAS_OP_REMOVE_DEVICE; 601 req->DevHandle = htole16(handle); 602 tm->cm_data = NULL; 603 tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; 604 tm->cm_complete = mpssas_remove_complete; 605 tm->cm_complete_data = (void *)(uintptr_t)handle; 606 607 mps_map_command(sc, tm); 608 609 mps_dprint(sc, MPS_XINFO, "clearing target %u handle 0x%04x\n", 610 targ->tid, handle); 611 TAILQ_FOREACH_SAFE(tm, &targ->commands, cm_link, next_cm) { 612 union ccb *ccb; 613 614 mps_dprint(sc, MPS_XINFO, "Completing missed command %p\n", tm); 615 ccb = tm->cm_complete_data; 616 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); 617 mpssas_scsiio_complete(sc, tm); 618 } 619 } 620 621 static void 622 mpssas_remove_complete(struct mps_softc *sc, struct mps_command *tm) 623 { 624 MPI2_SAS_IOUNIT_CONTROL_REPLY *reply; 625 uint16_t handle; 626 struct mpssas_target *targ; 627 struct mpssas_lun *lun; 628 629 MPS_FUNCTRACE(sc); 630 631 reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply; 632 handle = (uint16_t)(uintptr_t)tm->cm_complete_data; 633 634 /* 635 * Currently there should be no way we can hit this case. It only 636 * happens when we have a failure to allocate chain frames, and 637 * task management commands don't have S/G lists. 638 */ 639 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) { 640 mps_dprint(sc, MPS_XINFO, 641 "%s: cm_flags = %#x for remove of handle %#04x! " 642 "This should not happen!\n", __func__, tm->cm_flags, 643 handle); 644 mpssas_free_tm(sc, tm); 645 return; 646 } 647 648 if (reply == NULL) { 649 /* most likely a chip reset */ 650 mps_dprint(sc, MPS_FAULT, 651 "%s NULL reply removing device 0x%04x\n", __func__, handle); 652 mpssas_free_tm(sc, tm); 653 return; 654 } 655 656 mps_dprint(sc, MPS_XINFO, 657 "%s on handle 0x%04x, IOCStatus= 0x%x\n", __func__, 658 handle, le16toh(reply->IOCStatus)); 659 660 /* 661 * Don't clear target if remove fails because things will get confusing. 662 * Leave the devname and sasaddr intact so that we know to avoid reusing 663 * this target id if possible, and so we can assign the same target id 664 * to this device if it comes back in the future. 665 */ 666 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) == 667 MPI2_IOCSTATUS_SUCCESS) { 668 targ = tm->cm_targ; 669 targ->handle = 0x0; 670 targ->encl_handle = 0x0; 671 targ->encl_slot = 0x0; 672 targ->exp_dev_handle = 0x0; 673 targ->phy_num = 0x0; 674 targ->linkrate = 0x0; 675 targ->devinfo = 0x0; 676 targ->flags = 0x0; 677 678 while(!SLIST_EMPTY(&targ->luns)) { 679 lun = SLIST_FIRST(&targ->luns); 680 SLIST_REMOVE_HEAD(&targ->luns, lun_link); 681 free(lun, M_MPT2); 682 } 683 } 684 685 686 mpssas_free_tm(sc, tm); 687 } 688 689 static int 690 mpssas_register_events(struct mps_softc *sc) 691 { 692 u32 events[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS]; 693 694 bzero(events, 16); 695 setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE); 696 setbit(events, MPI2_EVENT_SAS_DISCOVERY); 697 setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE); 698 setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE); 699 setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW); 700 setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST); 701 setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE); 702 setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST); 703 setbit(events, MPI2_EVENT_IR_VOLUME); 704 setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK); 705 setbit(events, MPI2_EVENT_IR_OPERATION_STATUS); 706 setbit(events, MPI2_EVENT_LOG_ENTRY_ADDED); 707 708 mps_register_events(sc, events, mpssas_evt_handler, NULL, 709 &sc->sassc->mpssas_eh); 710 711 return (0); 712 } 713 714 int 715 mps_attach_sas(struct mps_softc *sc) 716 { 717 struct mpssas_softc *sassc; 718 cam_status status; 719 int unit, error = 0; 720 721 MPS_FUNCTRACE(sc); 722 723 sassc = malloc(sizeof(struct mpssas_softc), M_MPT2, M_WAITOK|M_ZERO); 724 if(!sassc) { 725 device_printf(sc->mps_dev, "Cannot allocate memory %s %d\n", 726 __func__, __LINE__); 727 return (ENOMEM); 728 } 729 730 /* 731 * XXX MaxTargets could change during a reinit. Since we don't 732 * resize the targets[] array during such an event, cache the value 733 * of MaxTargets here so that we don't get into trouble later. This 734 * should move into the reinit logic. 735 */ 736 sassc->maxtargets = sc->facts->MaxTargets; 737 sassc->targets = malloc(sizeof(struct mpssas_target) * 738 sassc->maxtargets, M_MPT2, M_WAITOK|M_ZERO); 739 if(!sassc->targets) { 740 device_printf(sc->mps_dev, "Cannot allocate memory %s %d\n", 741 __func__, __LINE__); 742 free(sassc, M_MPT2); 743 return (ENOMEM); 744 } 745 sc->sassc = sassc; 746 sassc->sc = sc; 747 748 if ((sassc->devq = cam_simq_alloc(sc->num_reqs)) == NULL) { 749 mps_dprint(sc, MPS_ERROR, "Cannot allocate SIMQ\n"); 750 error = ENOMEM; 751 goto out; 752 } 753 754 unit = device_get_unit(sc->mps_dev); 755 sassc->sim = cam_sim_alloc(mpssas_action, mpssas_poll, "mps", sassc, 756 unit, &sc->mps_mtx, sc->num_reqs, sc->num_reqs, sassc->devq); 757 if (sassc->sim == NULL) { 758 mps_dprint(sc, MPS_ERROR, "Cannot allocate SIM\n"); 759 error = EINVAL; 760 goto out; 761 } 762 763 TAILQ_INIT(&sassc->ev_queue); 764 765 /* Initialize taskqueue for Event Handling */ 766 TASK_INIT(&sassc->ev_task, 0, mpssas_firmware_event_work, sc); 767 sassc->ev_tq = taskqueue_create("mps_taskq", M_NOWAIT | M_ZERO, 768 taskqueue_thread_enqueue, &sassc->ev_tq); 769 taskqueue_start_threads(&sassc->ev_tq, 1, PRIBIO, "%s taskq", 770 device_get_nameunit(sc->mps_dev)); 771 772 mps_lock(sc); 773 774 /* 775 * XXX There should be a bus for every port on the adapter, but since 776 * we're just going to fake the topology for now, we'll pretend that 777 * everything is just a target on a single bus. 778 */ 779 if ((error = xpt_bus_register(sassc->sim, sc->mps_dev, 0)) != 0) { 780 mps_dprint(sc, MPS_ERROR, "Error %d registering SCSI bus\n", 781 error); 782 mps_unlock(sc); 783 goto out; 784 } 785 786 /* 787 * Assume that discovery events will start right away. 788 * 789 * Hold off boot until discovery is complete. 790 */ 791 sassc->flags |= MPSSAS_IN_STARTUP | MPSSAS_IN_DISCOVERY; 792 sc->sassc->startup_refcount = 0; 793 mpssas_startup_increment(sassc); 794 795 callout_init(&sassc->discovery_callout, 1 /*mpsafe*/); 796 797 /* 798 * Register for async events so we can determine the EEDP 799 * capabilities of devices. 800 */ 801 status = xpt_create_path(&sassc->path, /*periph*/NULL, 802 cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD, 803 CAM_LUN_WILDCARD); 804 if (status != CAM_REQ_CMP) { 805 mps_printf(sc, "Error %#x creating sim path\n", status); 806 sassc->path = NULL; 807 } else { 808 int event; 809 810 #if (__FreeBSD_version >= 1000006) || \ 811 ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000)) 812 event = AC_ADVINFO_CHANGED; 813 #else 814 event = AC_FOUND_DEVICE; 815 #endif 816 status = xpt_register_async(event, mpssas_async, sc, 817 sassc->path); 818 if (status != CAM_REQ_CMP) { 819 mps_dprint(sc, MPS_ERROR, 820 "Error %#x registering async handler for " 821 "AC_ADVINFO_CHANGED events\n", status); 822 xpt_free_path(sassc->path); 823 sassc->path = NULL; 824 } 825 } 826 if (status != CAM_REQ_CMP) { 827 /* 828 * EEDP use is the exception, not the rule. 829 * Warn the user, but do not fail to attach. 830 */ 831 mps_printf(sc, "EEDP capabilities disabled.\n"); 832 } 833 834 mps_unlock(sc); 835 836 mpssas_register_events(sc); 837 out: 838 if (error) 839 mps_detach_sas(sc); 840 return (error); 841 } 842 843 int 844 mps_detach_sas(struct mps_softc *sc) 845 { 846 struct mpssas_softc *sassc; 847 struct mpssas_lun *lun, *lun_tmp; 848 struct mpssas_target *targ; 849 int i; 850 851 MPS_FUNCTRACE(sc); 852 853 if (sc->sassc == NULL) 854 return (0); 855 856 sassc = sc->sassc; 857 mps_deregister_events(sc, sassc->mpssas_eh); 858 859 /* 860 * Drain and free the event handling taskqueue with the lock 861 * unheld so that any parallel processing tasks drain properly 862 * without deadlocking. 863 */ 864 if (sassc->ev_tq != NULL) 865 taskqueue_free(sassc->ev_tq); 866 867 /* Make sure CAM doesn't wedge if we had to bail out early. */ 868 mps_lock(sc); 869 870 /* Deregister our async handler */ 871 if (sassc->path != NULL) { 872 xpt_register_async(0, mpssas_async, sc, sassc->path); 873 xpt_free_path(sassc->path); 874 sassc->path = NULL; 875 } 876 877 if (sassc->flags & MPSSAS_IN_STARTUP) 878 xpt_release_simq(sassc->sim, 1); 879 880 if (sassc->sim != NULL) { 881 xpt_bus_deregister(cam_sim_path(sassc->sim)); 882 cam_sim_free(sassc->sim, FALSE); 883 } 884 885 mps_unlock(sc); 886 887 if (sassc->devq != NULL) 888 cam_simq_free(sassc->devq); 889 890 for(i=0; i< sassc->maxtargets ;i++) { 891 targ = &sassc->targets[i]; 892 SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) { 893 free(lun, M_MPT2); 894 } 895 } 896 free(sassc->targets, M_MPT2); 897 free(sassc, M_MPT2); 898 sc->sassc = NULL; 899 900 return (0); 901 } 902 903 void 904 mpssas_discovery_end(struct mpssas_softc *sassc) 905 { 906 struct mps_softc *sc = sassc->sc; 907 908 MPS_FUNCTRACE(sc); 909 910 if (sassc->flags & MPSSAS_DISCOVERY_TIMEOUT_PENDING) 911 callout_stop(&sassc->discovery_callout); 912 913 } 914 915 static void 916 mpssas_action(struct cam_sim *sim, union ccb *ccb) 917 { 918 struct mpssas_softc *sassc; 919 920 sassc = cam_sim_softc(sim); 921 922 MPS_FUNCTRACE(sassc->sc); 923 mps_dprint(sassc->sc, MPS_TRACE, "ccb func_code 0x%x\n", 924 ccb->ccb_h.func_code); 925 mtx_assert(&sassc->sc->mps_mtx, MA_OWNED); 926 927 switch (ccb->ccb_h.func_code) { 928 case XPT_PATH_INQ: 929 { 930 struct ccb_pathinq *cpi = &ccb->cpi; 931 struct mps_softc *sc = sassc->sc; 932 uint8_t sges_per_frame; 933 934 cpi->version_num = 1; 935 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16; 936 cpi->target_sprt = 0; 937 #if __FreeBSD_version >= 1000039 938 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN; 939 #else 940 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED; 941 #endif 942 cpi->hba_eng_cnt = 0; 943 cpi->max_target = sassc->maxtargets - 1; 944 cpi->max_lun = 255; 945 cpi->initiator_id = sassc->maxtargets - 1; 946 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 947 strncpy(cpi->hba_vid, "Avago Tech (LSI)", HBA_IDLEN); 948 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 949 cpi->unit_number = cam_sim_unit(sim); 950 cpi->bus_id = cam_sim_bus(sim); 951 cpi->base_transfer_speed = 150000; 952 cpi->transport = XPORT_SAS; 953 cpi->transport_version = 0; 954 cpi->protocol = PROTO_SCSI; 955 cpi->protocol_version = SCSI_REV_SPC; 956 957 /* 958 * Max IO Size is Page Size * the following: 959 * ((SGEs per frame - 1 for chain element) * 960 * Max Chain Depth) + 1 for no chain needed in last frame 961 * 962 * If user suggests a Max IO size to use, use the smaller of the 963 * user's value and the calculated value as long as the user's 964 * value is larger than 0. The user's value is in pages. 965 */ 966 sges_per_frame = ((sc->facts->IOCRequestFrameSize * 4) / 967 sizeof(MPI2_SGE_SIMPLE64)) - 1; 968 cpi->maxio = (sges_per_frame * sc->facts->MaxChainDepth) + 1; 969 cpi->maxio *= PAGE_SIZE; 970 if ((sc->max_io_pages > 0) && (sc->max_io_pages * PAGE_SIZE < 971 cpi->maxio)) 972 cpi->maxio = sc->max_io_pages * PAGE_SIZE; 973 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP); 974 break; 975 } 976 case XPT_GET_TRAN_SETTINGS: 977 { 978 struct ccb_trans_settings *cts; 979 struct ccb_trans_settings_sas *sas; 980 struct ccb_trans_settings_scsi *scsi; 981 struct mpssas_target *targ; 982 983 cts = &ccb->cts; 984 sas = &cts->xport_specific.sas; 985 scsi = &cts->proto_specific.scsi; 986 987 KASSERT(cts->ccb_h.target_id < sassc->maxtargets, 988 ("Target %d out of bounds in XPT_GET_TRANS_SETTINGS\n", 989 cts->ccb_h.target_id)); 990 targ = &sassc->targets[cts->ccb_h.target_id]; 991 if (targ->handle == 0x0) { 992 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); 993 break; 994 } 995 996 cts->protocol_version = SCSI_REV_SPC2; 997 cts->transport = XPORT_SAS; 998 cts->transport_version = 0; 999 1000 sas->valid = CTS_SAS_VALID_SPEED; 1001 switch (targ->linkrate) { 1002 case 0x08: 1003 sas->bitrate = 150000; 1004 break; 1005 case 0x09: 1006 sas->bitrate = 300000; 1007 break; 1008 case 0x0a: 1009 sas->bitrate = 600000; 1010 break; 1011 default: 1012 sas->valid = 0; 1013 } 1014 1015 cts->protocol = PROTO_SCSI; 1016 scsi->valid = CTS_SCSI_VALID_TQ; 1017 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; 1018 1019 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP); 1020 break; 1021 } 1022 case XPT_CALC_GEOMETRY: 1023 cam_calc_geometry(&ccb->ccg, /*extended*/1); 1024 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP); 1025 break; 1026 case XPT_RESET_DEV: 1027 mps_dprint(sassc->sc, MPS_XINFO, "mpssas_action XPT_RESET_DEV\n"); 1028 mpssas_action_resetdev(sassc, ccb); 1029 return; 1030 case XPT_RESET_BUS: 1031 case XPT_ABORT: 1032 case XPT_TERM_IO: 1033 mps_dprint(sassc->sc, MPS_XINFO, 1034 "mpssas_action faking success for abort or reset\n"); 1035 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP); 1036 break; 1037 case XPT_SCSI_IO: 1038 mpssas_action_scsiio(sassc, ccb); 1039 return; 1040 #if __FreeBSD_version >= 900026 1041 case XPT_SMP_IO: 1042 mpssas_action_smpio(sassc, ccb); 1043 return; 1044 #endif 1045 default: 1046 mpssas_set_ccbstatus(ccb, CAM_FUNC_NOTAVAIL); 1047 break; 1048 } 1049 xpt_done(ccb); 1050 1051 } 1052 1053 static void 1054 mpssas_announce_reset(struct mps_softc *sc, uint32_t ac_code, 1055 target_id_t target_id, lun_id_t lun_id) 1056 { 1057 path_id_t path_id = cam_sim_path(sc->sassc->sim); 1058 struct cam_path *path; 1059 1060 mps_dprint(sc, MPS_XINFO, "%s code %x target %d lun %jx\n", __func__, 1061 ac_code, target_id, (uintmax_t)lun_id); 1062 1063 if (xpt_create_path(&path, NULL, 1064 path_id, target_id, lun_id) != CAM_REQ_CMP) { 1065 mps_dprint(sc, MPS_ERROR, "unable to create path for reset " 1066 "notification\n"); 1067 return; 1068 } 1069 1070 xpt_async(ac_code, path, NULL); 1071 xpt_free_path(path); 1072 } 1073 1074 static void 1075 mpssas_complete_all_commands(struct mps_softc *sc) 1076 { 1077 struct mps_command *cm; 1078 int i; 1079 int completed; 1080 1081 MPS_FUNCTRACE(sc); 1082 mtx_assert(&sc->mps_mtx, MA_OWNED); 1083 1084 /* complete all commands with a NULL reply */ 1085 for (i = 1; i < sc->num_reqs; i++) { 1086 cm = &sc->commands[i]; 1087 cm->cm_reply = NULL; 1088 completed = 0; 1089 1090 if (cm->cm_flags & MPS_CM_FLAGS_POLLED) 1091 cm->cm_flags |= MPS_CM_FLAGS_COMPLETE; 1092 1093 if (cm->cm_complete != NULL) { 1094 mpssas_log_command(cm, MPS_RECOVERY, 1095 "completing cm %p state %x ccb %p for diag reset\n", 1096 cm, cm->cm_state, cm->cm_ccb); 1097 1098 cm->cm_complete(sc, cm); 1099 completed = 1; 1100 } 1101 1102 if (cm->cm_flags & MPS_CM_FLAGS_WAKEUP) { 1103 mpssas_log_command(cm, MPS_RECOVERY, 1104 "waking up cm %p state %x ccb %p for diag reset\n", 1105 cm, cm->cm_state, cm->cm_ccb); 1106 wakeup(cm); 1107 completed = 1; 1108 } 1109 1110 if (cm->cm_sc->io_cmds_active != 0) { 1111 cm->cm_sc->io_cmds_active--; 1112 } else { 1113 mps_dprint(cm->cm_sc, MPS_INFO, "Warning: " 1114 "io_cmds_active is out of sync - resynching to " 1115 "0\n"); 1116 } 1117 1118 if ((completed == 0) && (cm->cm_state != MPS_CM_STATE_FREE)) { 1119 /* this should never happen, but if it does, log */ 1120 mpssas_log_command(cm, MPS_RECOVERY, 1121 "cm %p state %x flags 0x%x ccb %p during diag " 1122 "reset\n", cm, cm->cm_state, cm->cm_flags, 1123 cm->cm_ccb); 1124 } 1125 } 1126 } 1127 1128 void 1129 mpssas_handle_reinit(struct mps_softc *sc) 1130 { 1131 int i; 1132 1133 /* Go back into startup mode and freeze the simq, so that CAM 1134 * doesn't send any commands until after we've rediscovered all 1135 * targets and found the proper device handles for them. 1136 * 1137 * After the reset, portenable will trigger discovery, and after all 1138 * discovery-related activities have finished, the simq will be 1139 * released. 1140 */ 1141 mps_dprint(sc, MPS_INIT, "%s startup\n", __func__); 1142 sc->sassc->flags |= MPSSAS_IN_STARTUP; 1143 sc->sassc->flags |= MPSSAS_IN_DISCOVERY; 1144 mpssas_startup_increment(sc->sassc); 1145 1146 /* notify CAM of a bus reset */ 1147 mpssas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD, 1148 CAM_LUN_WILDCARD); 1149 1150 /* complete and cleanup after all outstanding commands */ 1151 mpssas_complete_all_commands(sc); 1152 1153 mps_dprint(sc, MPS_INIT, 1154 "%s startup %u after command completion\n", __func__, 1155 sc->sassc->startup_refcount); 1156 1157 /* zero all the target handles, since they may change after the 1158 * reset, and we have to rediscover all the targets and use the new 1159 * handles. 1160 */ 1161 for (i = 0; i < sc->sassc->maxtargets; i++) { 1162 if (sc->sassc->targets[i].outstanding != 0) 1163 mps_dprint(sc, MPS_INIT, "target %u outstanding %u\n", 1164 i, sc->sassc->targets[i].outstanding); 1165 sc->sassc->targets[i].handle = 0x0; 1166 sc->sassc->targets[i].exp_dev_handle = 0x0; 1167 sc->sassc->targets[i].outstanding = 0; 1168 sc->sassc->targets[i].flags = MPSSAS_TARGET_INDIAGRESET; 1169 } 1170 } 1171 1172 static void 1173 mpssas_tm_timeout(void *data) 1174 { 1175 struct mps_command *tm = data; 1176 struct mps_softc *sc = tm->cm_sc; 1177 1178 mtx_assert(&sc->mps_mtx, MA_OWNED); 1179 1180 mpssas_log_command(tm, MPS_INFO|MPS_RECOVERY, 1181 "task mgmt %p timed out\n", tm); 1182 mps_reinit(sc); 1183 } 1184 1185 static void 1186 mpssas_logical_unit_reset_complete(struct mps_softc *sc, struct mps_command *tm) 1187 { 1188 MPI2_SCSI_TASK_MANAGE_REPLY *reply; 1189 MPI2_SCSI_TASK_MANAGE_REQUEST *req; 1190 unsigned int cm_count = 0; 1191 struct mps_command *cm; 1192 struct mpssas_target *targ; 1193 1194 callout_stop(&tm->cm_callout); 1195 1196 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req; 1197 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply; 1198 targ = tm->cm_targ; 1199 1200 /* 1201 * Currently there should be no way we can hit this case. It only 1202 * happens when we have a failure to allocate chain frames, and 1203 * task management commands don't have S/G lists. 1204 * XXXSL So should it be an assertion? 1205 */ 1206 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) { 1207 mps_dprint(sc, MPS_ERROR, "%s: cm_flags = %#x for LUN reset! " 1208 "This should not happen!\n", __func__, tm->cm_flags); 1209 mpssas_free_tm(sc, tm); 1210 return; 1211 } 1212 1213 if (reply == NULL) { 1214 mpssas_log_command(tm, MPS_RECOVERY, 1215 "NULL reset reply for tm %p\n", tm); 1216 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) { 1217 /* this completion was due to a reset, just cleanup */ 1218 targ->tm = NULL; 1219 mpssas_free_tm(sc, tm); 1220 } 1221 else { 1222 /* we should have gotten a reply. */ 1223 mps_reinit(sc); 1224 } 1225 return; 1226 } 1227 1228 mpssas_log_command(tm, MPS_RECOVERY, 1229 "logical unit reset status 0x%x code 0x%x count %u\n", 1230 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode), 1231 le32toh(reply->TerminationCount)); 1232 1233 /* See if there are any outstanding commands for this LUN. 1234 * This could be made more efficient by using a per-LU data 1235 * structure of some sort. 1236 */ 1237 TAILQ_FOREACH(cm, &targ->commands, cm_link) { 1238 if (cm->cm_lun == tm->cm_lun) 1239 cm_count++; 1240 } 1241 1242 if (cm_count == 0) { 1243 mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO, 1244 "logical unit %u finished recovery after reset\n", 1245 tm->cm_lun, tm); 1246 1247 mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid, 1248 tm->cm_lun); 1249 1250 /* we've finished recovery for this logical unit. check and 1251 * see if some other logical unit has a timedout command 1252 * that needs to be processed. 1253 */ 1254 cm = TAILQ_FIRST(&targ->timedout_commands); 1255 if (cm) { 1256 mpssas_send_abort(sc, tm, cm); 1257 } 1258 else { 1259 targ->tm = NULL; 1260 mpssas_free_tm(sc, tm); 1261 } 1262 } 1263 else { 1264 /* if we still have commands for this LUN, the reset 1265 * effectively failed, regardless of the status reported. 1266 * Escalate to a target reset. 1267 */ 1268 mpssas_log_command(tm, MPS_RECOVERY, 1269 "logical unit reset complete for tm %p, but still have %u command(s)\n", 1270 tm, cm_count); 1271 mpssas_send_reset(sc, tm, 1272 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET); 1273 } 1274 } 1275 1276 static void 1277 mpssas_target_reset_complete(struct mps_softc *sc, struct mps_command *tm) 1278 { 1279 MPI2_SCSI_TASK_MANAGE_REPLY *reply; 1280 MPI2_SCSI_TASK_MANAGE_REQUEST *req; 1281 struct mpssas_target *targ; 1282 1283 callout_stop(&tm->cm_callout); 1284 1285 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req; 1286 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply; 1287 targ = tm->cm_targ; 1288 1289 /* 1290 * Currently there should be no way we can hit this case. It only 1291 * happens when we have a failure to allocate chain frames, and 1292 * task management commands don't have S/G lists. 1293 */ 1294 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) { 1295 mps_dprint(sc, MPS_ERROR,"%s: cm_flags = %#x for target reset! " 1296 "This should not happen!\n", __func__, tm->cm_flags); 1297 mpssas_free_tm(sc, tm); 1298 return; 1299 } 1300 1301 if (reply == NULL) { 1302 mpssas_log_command(tm, MPS_RECOVERY, 1303 "NULL reset reply for tm %p\n", tm); 1304 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) { 1305 /* this completion was due to a reset, just cleanup */ 1306 targ->tm = NULL; 1307 mpssas_free_tm(sc, tm); 1308 } 1309 else { 1310 /* we should have gotten a reply. */ 1311 mps_reinit(sc); 1312 } 1313 return; 1314 } 1315 1316 mpssas_log_command(tm, MPS_RECOVERY, 1317 "target reset status 0x%x code 0x%x count %u\n", 1318 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode), 1319 le32toh(reply->TerminationCount)); 1320 1321 if (targ->outstanding == 0) { 1322 /* we've finished recovery for this target and all 1323 * of its logical units. 1324 */ 1325 mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO, 1326 "recovery finished after target reset\n"); 1327 1328 mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid, 1329 CAM_LUN_WILDCARD); 1330 1331 targ->tm = NULL; 1332 mpssas_free_tm(sc, tm); 1333 } 1334 else { 1335 /* after a target reset, if this target still has 1336 * outstanding commands, the reset effectively failed, 1337 * regardless of the status reported. escalate. 1338 */ 1339 mpssas_log_command(tm, MPS_RECOVERY, 1340 "target reset complete for tm %p, but still have %u command(s)\n", 1341 tm, targ->outstanding); 1342 mps_reinit(sc); 1343 } 1344 } 1345 1346 #define MPS_RESET_TIMEOUT 30 1347 1348 int 1349 mpssas_send_reset(struct mps_softc *sc, struct mps_command *tm, uint8_t type) 1350 { 1351 MPI2_SCSI_TASK_MANAGE_REQUEST *req; 1352 struct mpssas_target *target; 1353 int err; 1354 1355 target = tm->cm_targ; 1356 if (target->handle == 0) { 1357 mps_dprint(sc, MPS_ERROR,"%s null devhandle for target_id %d\n", 1358 __func__, target->tid); 1359 return -1; 1360 } 1361 1362 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req; 1363 req->DevHandle = htole16(target->handle); 1364 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; 1365 req->TaskType = type; 1366 1367 if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) { 1368 /* XXX Need to handle invalid LUNs */ 1369 MPS_SET_LUN(req->LUN, tm->cm_lun); 1370 tm->cm_targ->logical_unit_resets++; 1371 mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO, 1372 "sending logical unit reset\n"); 1373 tm->cm_complete = mpssas_logical_unit_reset_complete; 1374 mpssas_prepare_for_tm(sc, tm, target, tm->cm_lun); 1375 } 1376 else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) { 1377 /* 1378 * Target reset method = 1379 * SAS Hard Link Reset / SATA Link Reset 1380 */ 1381 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET; 1382 tm->cm_targ->target_resets++; 1383 mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO, 1384 "sending target reset\n"); 1385 tm->cm_complete = mpssas_target_reset_complete; 1386 mpssas_prepare_for_tm(sc, tm, target, CAM_LUN_WILDCARD); 1387 } 1388 else { 1389 mps_dprint(sc, MPS_ERROR, "unexpected reset type 0x%x\n", type); 1390 return -1; 1391 } 1392 1393 tm->cm_data = NULL; 1394 tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY; 1395 tm->cm_complete_data = (void *)tm; 1396 1397 callout_reset(&tm->cm_callout, MPS_RESET_TIMEOUT * hz, 1398 mpssas_tm_timeout, tm); 1399 1400 err = mps_map_command(sc, tm); 1401 if (err) 1402 mpssas_log_command(tm, MPS_RECOVERY, 1403 "error %d sending reset type %u\n", 1404 err, type); 1405 1406 return err; 1407 } 1408 1409 1410 static void 1411 mpssas_abort_complete(struct mps_softc *sc, struct mps_command *tm) 1412 { 1413 struct mps_command *cm; 1414 MPI2_SCSI_TASK_MANAGE_REPLY *reply; 1415 MPI2_SCSI_TASK_MANAGE_REQUEST *req; 1416 struct mpssas_target *targ; 1417 1418 callout_stop(&tm->cm_callout); 1419 1420 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req; 1421 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply; 1422 targ = tm->cm_targ; 1423 1424 /* 1425 * Currently there should be no way we can hit this case. It only 1426 * happens when we have a failure to allocate chain frames, and 1427 * task management commands don't have S/G lists. 1428 */ 1429 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) { 1430 mpssas_log_command(tm, MPS_RECOVERY, 1431 "cm_flags = %#x for abort %p TaskMID %u!\n", 1432 tm->cm_flags, tm, le16toh(req->TaskMID)); 1433 mpssas_free_tm(sc, tm); 1434 return; 1435 } 1436 1437 if (reply == NULL) { 1438 mpssas_log_command(tm, MPS_RECOVERY, 1439 "NULL abort reply for tm %p TaskMID %u\n", 1440 tm, le16toh(req->TaskMID)); 1441 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) { 1442 /* this completion was due to a reset, just cleanup */ 1443 targ->tm = NULL; 1444 mpssas_free_tm(sc, tm); 1445 } 1446 else { 1447 /* we should have gotten a reply. */ 1448 mps_reinit(sc); 1449 } 1450 return; 1451 } 1452 1453 mpssas_log_command(tm, MPS_RECOVERY, 1454 "abort TaskMID %u status 0x%x code 0x%x count %u\n", 1455 le16toh(req->TaskMID), 1456 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode), 1457 le32toh(reply->TerminationCount)); 1458 1459 cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands); 1460 if (cm == NULL) { 1461 /* if there are no more timedout commands, we're done with 1462 * error recovery for this target. 1463 */ 1464 mpssas_log_command(tm, MPS_RECOVERY, 1465 "finished recovery after aborting TaskMID %u\n", 1466 le16toh(req->TaskMID)); 1467 1468 targ->tm = NULL; 1469 mpssas_free_tm(sc, tm); 1470 } 1471 else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) { 1472 /* abort success, but we have more timedout commands to abort */ 1473 mpssas_log_command(tm, MPS_RECOVERY, 1474 "continuing recovery after aborting TaskMID %u\n", 1475 le16toh(req->TaskMID)); 1476 1477 mpssas_send_abort(sc, tm, cm); 1478 } 1479 else { 1480 /* we didn't get a command completion, so the abort 1481 * failed as far as we're concerned. escalate. 1482 */ 1483 mpssas_log_command(tm, MPS_RECOVERY, 1484 "abort failed for TaskMID %u tm %p\n", 1485 le16toh(req->TaskMID), tm); 1486 1487 mpssas_send_reset(sc, tm, 1488 MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET); 1489 } 1490 } 1491 1492 #define MPS_ABORT_TIMEOUT 5 1493 1494 static int 1495 mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm) 1496 { 1497 MPI2_SCSI_TASK_MANAGE_REQUEST *req; 1498 struct mpssas_target *targ; 1499 int err; 1500 1501 targ = cm->cm_targ; 1502 if (targ->handle == 0) { 1503 mps_dprint(sc, MPS_ERROR,"%s null devhandle for target_id %d\n", 1504 __func__, cm->cm_ccb->ccb_h.target_id); 1505 return -1; 1506 } 1507 1508 mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO, 1509 "Aborting command %p\n", cm); 1510 1511 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req; 1512 req->DevHandle = htole16(targ->handle); 1513 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; 1514 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK; 1515 1516 /* XXX Need to handle invalid LUNs */ 1517 MPS_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun); 1518 1519 req->TaskMID = htole16(cm->cm_desc.Default.SMID); 1520 1521 tm->cm_data = NULL; 1522 tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY; 1523 tm->cm_complete = mpssas_abort_complete; 1524 tm->cm_complete_data = (void *)tm; 1525 tm->cm_targ = cm->cm_targ; 1526 tm->cm_lun = cm->cm_lun; 1527 1528 callout_reset(&tm->cm_callout, MPS_ABORT_TIMEOUT * hz, 1529 mpssas_tm_timeout, tm); 1530 1531 targ->aborts++; 1532 1533 mps_dprint(sc, MPS_INFO, "Sending reset from %s for target ID %d\n", 1534 __func__, targ->tid); 1535 mpssas_prepare_for_tm(sc, tm, targ, tm->cm_lun); 1536 1537 err = mps_map_command(sc, tm); 1538 if (err) 1539 mpssas_log_command(tm, MPS_RECOVERY, 1540 "error %d sending abort for cm %p SMID %u\n", 1541 err, cm, req->TaskMID); 1542 return err; 1543 } 1544 1545 static void 1546 mpssas_scsiio_timeout(void *data) 1547 { 1548 struct mps_softc *sc; 1549 struct mps_command *cm; 1550 struct mpssas_target *targ; 1551 1552 cm = (struct mps_command *)data; 1553 sc = cm->cm_sc; 1554 1555 MPS_FUNCTRACE(sc); 1556 mtx_assert(&sc->mps_mtx, MA_OWNED); 1557 1558 mps_dprint(sc, MPS_XINFO, "Timeout checking cm %p\n", sc); 1559 1560 /* 1561 * Run the interrupt handler to make sure it's not pending. This 1562 * isn't perfect because the command could have already completed 1563 * and been re-used, though this is unlikely. 1564 */ 1565 mps_intr_locked(sc); 1566 if (cm->cm_state == MPS_CM_STATE_FREE) { 1567 mpssas_log_command(cm, MPS_XINFO, 1568 "SCSI command %p almost timed out\n", cm); 1569 return; 1570 } 1571 1572 if (cm->cm_ccb == NULL) { 1573 mps_dprint(sc, MPS_ERROR, "command timeout with NULL ccb\n"); 1574 return; 1575 } 1576 1577 mpssas_log_command(cm, MPS_INFO, "command timeout cm %p ccb %p\n", 1578 cm, cm->cm_ccb); 1579 1580 targ = cm->cm_targ; 1581 targ->timeouts++; 1582 1583 /* XXX first, check the firmware state, to see if it's still 1584 * operational. if not, do a diag reset. 1585 */ 1586 mpssas_set_ccbstatus(cm->cm_ccb, CAM_CMD_TIMEOUT); 1587 cm->cm_state = MPS_CM_STATE_TIMEDOUT; 1588 TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery); 1589 1590 if (targ->tm != NULL) { 1591 /* target already in recovery, just queue up another 1592 * timedout command to be processed later. 1593 */ 1594 mps_dprint(sc, MPS_RECOVERY, 1595 "queued timedout cm %p for processing by tm %p\n", 1596 cm, targ->tm); 1597 } 1598 else if ((targ->tm = mpssas_alloc_tm(sc)) != NULL) { 1599 mps_dprint(sc, MPS_RECOVERY, "timedout cm %p allocated tm %p\n", 1600 cm, targ->tm); 1601 1602 /* start recovery by aborting the first timedout command */ 1603 mpssas_send_abort(sc, targ->tm, cm); 1604 } 1605 else { 1606 /* XXX queue this target up for recovery once a TM becomes 1607 * available. The firmware only has a limited number of 1608 * HighPriority credits for the high priority requests used 1609 * for task management, and we ran out. 1610 * 1611 * Isilon: don't worry about this for now, since we have 1612 * more credits than disks in an enclosure, and limit 1613 * ourselves to one TM per target for recovery. 1614 */ 1615 mps_dprint(sc, MPS_RECOVERY, 1616 "timedout cm %p failed to allocate a tm\n", cm); 1617 } 1618 1619 } 1620 1621 static void 1622 mpssas_action_scsiio(struct mpssas_softc *sassc, union ccb *ccb) 1623 { 1624 MPI2_SCSI_IO_REQUEST *req; 1625 struct ccb_scsiio *csio; 1626 struct mps_softc *sc; 1627 struct mpssas_target *targ; 1628 struct mpssas_lun *lun; 1629 struct mps_command *cm; 1630 uint8_t i, lba_byte, *ref_tag_addr; 1631 uint16_t eedp_flags; 1632 uint32_t mpi_control; 1633 1634 sc = sassc->sc; 1635 MPS_FUNCTRACE(sc); 1636 mtx_assert(&sc->mps_mtx, MA_OWNED); 1637 1638 csio = &ccb->csio; 1639 KASSERT(csio->ccb_h.target_id < sassc->maxtargets, 1640 ("Target %d out of bounds in XPT_SCSI_IO\n", 1641 csio->ccb_h.target_id)); 1642 targ = &sassc->targets[csio->ccb_h.target_id]; 1643 mps_dprint(sc, MPS_TRACE, "ccb %p target flag %x\n", ccb, targ->flags); 1644 if (targ->handle == 0x0) { 1645 mps_dprint(sc, MPS_ERROR, "%s NULL handle for target %u\n", 1646 __func__, csio->ccb_h.target_id); 1647 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); 1648 xpt_done(ccb); 1649 return; 1650 } 1651 if (targ->flags & MPS_TARGET_FLAGS_RAID_COMPONENT) { 1652 mps_dprint(sc, MPS_ERROR, "%s Raid component no SCSI IO " 1653 "supported %u\n", __func__, csio->ccb_h.target_id); 1654 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); 1655 xpt_done(ccb); 1656 return; 1657 } 1658 /* 1659 * Sometimes, it is possible to get a command that is not "In 1660 * Progress" and was actually aborted by the upper layer. Check for 1661 * this here and complete the command without error. 1662 */ 1663 if (mpssas_get_ccbstatus(ccb) != CAM_REQ_INPROG) { 1664 mps_dprint(sc, MPS_TRACE, "%s Command is not in progress for " 1665 "target %u\n", __func__, csio->ccb_h.target_id); 1666 xpt_done(ccb); 1667 return; 1668 } 1669 /* 1670 * If devinfo is 0 this will be a volume. In that case don't tell CAM 1671 * that the volume has timed out. We want volumes to be enumerated 1672 * until they are deleted/removed, not just failed. 1673 */ 1674 if (targ->flags & MPSSAS_TARGET_INREMOVAL) { 1675 if (targ->devinfo == 0) 1676 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP); 1677 else 1678 mpssas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT); 1679 xpt_done(ccb); 1680 return; 1681 } 1682 1683 if ((sc->mps_flags & MPS_FLAGS_SHUTDOWN) != 0) { 1684 mps_dprint(sc, MPS_INFO, "%s shutting down\n", __func__); 1685 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); 1686 xpt_done(ccb); 1687 return; 1688 } 1689 1690 /* 1691 * If target has a reset in progress, freeze the devq and return. The 1692 * devq will be released when the TM reset is finished. 1693 */ 1694 if (targ->flags & MPSSAS_TARGET_INRESET) { 1695 ccb->ccb_h.status = CAM_BUSY | CAM_DEV_QFRZN; 1696 mps_dprint(sc, MPS_INFO, "%s: Freezing devq for target ID %d\n", 1697 __func__, targ->tid); 1698 xpt_freeze_devq(ccb->ccb_h.path, 1); 1699 xpt_done(ccb); 1700 return; 1701 } 1702 1703 cm = mps_alloc_command(sc); 1704 if (cm == NULL || (sc->mps_flags & MPS_FLAGS_DIAGRESET)) { 1705 if (cm != NULL) { 1706 mps_free_command(sc, cm); 1707 } 1708 if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) { 1709 xpt_freeze_simq(sassc->sim, 1); 1710 sassc->flags |= MPSSAS_QUEUE_FROZEN; 1711 } 1712 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 1713 ccb->ccb_h.status |= CAM_REQUEUE_REQ; 1714 xpt_done(ccb); 1715 return; 1716 } 1717 1718 req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req; 1719 bzero(req, sizeof(*req)); 1720 req->DevHandle = htole16(targ->handle); 1721 req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; 1722 req->MsgFlags = 0; 1723 req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr); 1724 req->SenseBufferLength = MPS_SENSE_LEN; 1725 req->SGLFlags = 0; 1726 req->ChainOffset = 0; 1727 req->SGLOffset0 = 24; /* 32bit word offset to the SGL */ 1728 req->SGLOffset1= 0; 1729 req->SGLOffset2= 0; 1730 req->SGLOffset3= 0; 1731 req->SkipCount = 0; 1732 req->DataLength = htole32(csio->dxfer_len); 1733 req->BidirectionalDataLength = 0; 1734 req->IoFlags = htole16(csio->cdb_len); 1735 req->EEDPFlags = 0; 1736 1737 /* Note: BiDirectional transfers are not supported */ 1738 switch (csio->ccb_h.flags & CAM_DIR_MASK) { 1739 case CAM_DIR_IN: 1740 mpi_control = MPI2_SCSIIO_CONTROL_READ; 1741 cm->cm_flags |= MPS_CM_FLAGS_DATAIN; 1742 break; 1743 case CAM_DIR_OUT: 1744 mpi_control = MPI2_SCSIIO_CONTROL_WRITE; 1745 cm->cm_flags |= MPS_CM_FLAGS_DATAOUT; 1746 break; 1747 case CAM_DIR_NONE: 1748 default: 1749 mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER; 1750 break; 1751 } 1752 1753 if (csio->cdb_len == 32) 1754 mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT; 1755 /* 1756 * It looks like the hardware doesn't require an explicit tag 1757 * number for each transaction. SAM Task Management not supported 1758 * at the moment. 1759 */ 1760 switch (csio->tag_action) { 1761 case MSG_HEAD_OF_Q_TAG: 1762 mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ; 1763 break; 1764 case MSG_ORDERED_Q_TAG: 1765 mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ; 1766 break; 1767 case MSG_ACA_TASK: 1768 mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ; 1769 break; 1770 case CAM_TAG_ACTION_NONE: 1771 case MSG_SIMPLE_Q_TAG: 1772 default: 1773 mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ; 1774 break; 1775 } 1776 mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits; 1777 req->Control = htole32(mpi_control); 1778 if (MPS_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) { 1779 mps_free_command(sc, cm); 1780 mpssas_set_ccbstatus(ccb, CAM_LUN_INVALID); 1781 xpt_done(ccb); 1782 return; 1783 } 1784 1785 if (csio->ccb_h.flags & CAM_CDB_POINTER) 1786 bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len); 1787 else 1788 bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len); 1789 req->IoFlags = htole16(csio->cdb_len); 1790 1791 /* 1792 * Check if EEDP is supported and enabled. If it is then check if the 1793 * SCSI opcode could be using EEDP. If so, make sure the LUN exists and 1794 * is formatted for EEDP support. If all of this is true, set CDB up 1795 * for EEDP transfer. 1796 */ 1797 eedp_flags = op_code_prot[req->CDB.CDB32[0]]; 1798 if (sc->eedp_enabled && eedp_flags) { 1799 SLIST_FOREACH(lun, &targ->luns, lun_link) { 1800 if (lun->lun_id == csio->ccb_h.target_lun) { 1801 break; 1802 } 1803 } 1804 1805 if ((lun != NULL) && (lun->eedp_formatted)) { 1806 req->EEDPBlockSize = htole16(lun->eedp_block_size); 1807 eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG | 1808 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG | 1809 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD); 1810 req->EEDPFlags = htole16(eedp_flags); 1811 1812 /* 1813 * If CDB less than 32, fill in Primary Ref Tag with 1814 * low 4 bytes of LBA. If CDB is 32, tag stuff is 1815 * already there. Also, set protection bit. FreeBSD 1816 * currently does not support CDBs bigger than 16, but 1817 * the code doesn't hurt, and will be here for the 1818 * future. 1819 */ 1820 if (csio->cdb_len != 32) { 1821 lba_byte = (csio->cdb_len == 16) ? 6 : 2; 1822 ref_tag_addr = (uint8_t *)&req->CDB.EEDP32. 1823 PrimaryReferenceTag; 1824 for (i = 0; i < 4; i++) { 1825 *ref_tag_addr = 1826 req->CDB.CDB32[lba_byte + i]; 1827 ref_tag_addr++; 1828 } 1829 req->CDB.EEDP32.PrimaryReferenceTag = 1830 htole32(req->CDB.EEDP32.PrimaryReferenceTag); 1831 req->CDB.EEDP32.PrimaryApplicationTagMask = 1832 0xFFFF; 1833 req->CDB.CDB32[1] = (req->CDB.CDB32[1] & 0x1F) | 1834 0x20; 1835 } else { 1836 eedp_flags |= 1837 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG; 1838 req->EEDPFlags = htole16(eedp_flags); 1839 req->CDB.CDB32[10] = (req->CDB.CDB32[10] & 1840 0x1F) | 0x20; 1841 } 1842 } 1843 } 1844 1845 cm->cm_length = csio->dxfer_len; 1846 if (cm->cm_length != 0) { 1847 cm->cm_data = ccb; 1848 cm->cm_flags |= MPS_CM_FLAGS_USE_CCB; 1849 } else { 1850 cm->cm_data = NULL; 1851 } 1852 cm->cm_sge = &req->SGL; 1853 cm->cm_sglsize = (32 - 24) * 4; 1854 cm->cm_desc.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO; 1855 cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle); 1856 cm->cm_complete = mpssas_scsiio_complete; 1857 cm->cm_complete_data = ccb; 1858 cm->cm_targ = targ; 1859 cm->cm_lun = csio->ccb_h.target_lun; 1860 cm->cm_ccb = ccb; 1861 1862 /* 1863 * If HBA is a WD and the command is not for a retry, try to build a 1864 * direct I/O message. If failed, or the command is for a retry, send 1865 * the I/O to the IR volume itself. 1866 */ 1867 if (sc->WD_valid_config) { 1868 if (ccb->ccb_h.sim_priv.entries[0].field == MPS_WD_RETRY) { 1869 mpssas_direct_drive_io(sassc, cm, ccb); 1870 } else { 1871 mpssas_set_ccbstatus(ccb, CAM_REQ_INPROG); 1872 } 1873 } 1874 1875 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) 1876 if (csio->bio != NULL) 1877 biotrack(csio->bio, __func__); 1878 #endif 1879 callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0, 1880 mpssas_scsiio_timeout, cm, 0); 1881 1882 targ->issued++; 1883 targ->outstanding++; 1884 TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link); 1885 ccb->ccb_h.status |= CAM_SIM_QUEUED; 1886 1887 mpssas_log_command(cm, MPS_XINFO, "%s cm %p ccb %p outstanding %u\n", 1888 __func__, cm, ccb, targ->outstanding); 1889 1890 mps_map_command(sc, cm); 1891 return; 1892 } 1893 1894 static void 1895 mps_response_code(struct mps_softc *sc, u8 response_code) 1896 { 1897 char *desc; 1898 1899 switch (response_code) { 1900 case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE: 1901 desc = "task management request completed"; 1902 break; 1903 case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME: 1904 desc = "invalid frame"; 1905 break; 1906 case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED: 1907 desc = "task management request not supported"; 1908 break; 1909 case MPI2_SCSITASKMGMT_RSP_TM_FAILED: 1910 desc = "task management request failed"; 1911 break; 1912 case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED: 1913 desc = "task management request succeeded"; 1914 break; 1915 case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN: 1916 desc = "invalid lun"; 1917 break; 1918 case 0xA: 1919 desc = "overlapped tag attempted"; 1920 break; 1921 case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC: 1922 desc = "task queued, however not sent to target"; 1923 break; 1924 default: 1925 desc = "unknown"; 1926 break; 1927 } 1928 mps_dprint(sc, MPS_XINFO, "response_code(0x%01x): %s\n", 1929 response_code, desc); 1930 } 1931 /** 1932 * mps_sc_failed_io_info - translated non-succesfull SCSI_IO request 1933 */ 1934 static void 1935 mps_sc_failed_io_info(struct mps_softc *sc, struct ccb_scsiio *csio, 1936 Mpi2SCSIIOReply_t *mpi_reply) 1937 { 1938 u32 response_info; 1939 u8 *response_bytes; 1940 u16 ioc_status = le16toh(mpi_reply->IOCStatus) & 1941 MPI2_IOCSTATUS_MASK; 1942 u8 scsi_state = mpi_reply->SCSIState; 1943 u8 scsi_status = mpi_reply->SCSIStatus; 1944 char *desc_ioc_state = NULL; 1945 char *desc_scsi_status = NULL; 1946 char *desc_scsi_state = sc->tmp_string; 1947 u32 log_info = le32toh(mpi_reply->IOCLogInfo); 1948 1949 if (log_info == 0x31170000) 1950 return; 1951 1952 switch (ioc_status) { 1953 case MPI2_IOCSTATUS_SUCCESS: 1954 desc_ioc_state = "success"; 1955 break; 1956 case MPI2_IOCSTATUS_INVALID_FUNCTION: 1957 desc_ioc_state = "invalid function"; 1958 break; 1959 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR: 1960 desc_ioc_state = "scsi recovered error"; 1961 break; 1962 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE: 1963 desc_ioc_state = "scsi invalid dev handle"; 1964 break; 1965 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE: 1966 desc_ioc_state = "scsi device not there"; 1967 break; 1968 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN: 1969 desc_ioc_state = "scsi data overrun"; 1970 break; 1971 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN: 1972 desc_ioc_state = "scsi data underrun"; 1973 break; 1974 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR: 1975 desc_ioc_state = "scsi io data error"; 1976 break; 1977 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR: 1978 desc_ioc_state = "scsi protocol error"; 1979 break; 1980 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED: 1981 desc_ioc_state = "scsi task terminated"; 1982 break; 1983 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: 1984 desc_ioc_state = "scsi residual mismatch"; 1985 break; 1986 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED: 1987 desc_ioc_state = "scsi task mgmt failed"; 1988 break; 1989 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED: 1990 desc_ioc_state = "scsi ioc terminated"; 1991 break; 1992 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED: 1993 desc_ioc_state = "scsi ext terminated"; 1994 break; 1995 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR: 1996 desc_ioc_state = "eedp guard error"; 1997 break; 1998 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR: 1999 desc_ioc_state = "eedp ref tag error"; 2000 break; 2001 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR: 2002 desc_ioc_state = "eedp app tag error"; 2003 break; 2004 default: 2005 desc_ioc_state = "unknown"; 2006 break; 2007 } 2008 2009 switch (scsi_status) { 2010 case MPI2_SCSI_STATUS_GOOD: 2011 desc_scsi_status = "good"; 2012 break; 2013 case MPI2_SCSI_STATUS_CHECK_CONDITION: 2014 desc_scsi_status = "check condition"; 2015 break; 2016 case MPI2_SCSI_STATUS_CONDITION_MET: 2017 desc_scsi_status = "condition met"; 2018 break; 2019 case MPI2_SCSI_STATUS_BUSY: 2020 desc_scsi_status = "busy"; 2021 break; 2022 case MPI2_SCSI_STATUS_INTERMEDIATE: 2023 desc_scsi_status = "intermediate"; 2024 break; 2025 case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET: 2026 desc_scsi_status = "intermediate condmet"; 2027 break; 2028 case MPI2_SCSI_STATUS_RESERVATION_CONFLICT: 2029 desc_scsi_status = "reservation conflict"; 2030 break; 2031 case MPI2_SCSI_STATUS_COMMAND_TERMINATED: 2032 desc_scsi_status = "command terminated"; 2033 break; 2034 case MPI2_SCSI_STATUS_TASK_SET_FULL: 2035 desc_scsi_status = "task set full"; 2036 break; 2037 case MPI2_SCSI_STATUS_ACA_ACTIVE: 2038 desc_scsi_status = "aca active"; 2039 break; 2040 case MPI2_SCSI_STATUS_TASK_ABORTED: 2041 desc_scsi_status = "task aborted"; 2042 break; 2043 default: 2044 desc_scsi_status = "unknown"; 2045 break; 2046 } 2047 2048 desc_scsi_state[0] = '\0'; 2049 if (!scsi_state) 2050 desc_scsi_state = " "; 2051 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) 2052 strcat(desc_scsi_state, "response info "); 2053 if (scsi_state & MPI2_SCSI_STATE_TERMINATED) 2054 strcat(desc_scsi_state, "state terminated "); 2055 if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS) 2056 strcat(desc_scsi_state, "no status "); 2057 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED) 2058 strcat(desc_scsi_state, "autosense failed "); 2059 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) 2060 strcat(desc_scsi_state, "autosense valid "); 2061 2062 mps_dprint(sc, MPS_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n", 2063 le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status); 2064 /* We can add more detail about underflow data here 2065 * TO-DO 2066 * */ 2067 mps_dprint(sc, MPS_XINFO, "\tscsi_status(%s)(0x%02x), " 2068 "scsi_state(%s)(0x%02x)\n", desc_scsi_status, scsi_status, 2069 desc_scsi_state, scsi_state); 2070 2071 if (sc->mps_debug & MPS_XINFO && 2072 scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) { 2073 mps_dprint(sc, MPS_XINFO, "-> Sense Buffer Data : Start :\n"); 2074 scsi_sense_print(csio); 2075 mps_dprint(sc, MPS_XINFO, "-> Sense Buffer Data : End :\n"); 2076 } 2077 2078 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) { 2079 response_info = le32toh(mpi_reply->ResponseInfo); 2080 response_bytes = (u8 *)&response_info; 2081 mps_response_code(sc,response_bytes[0]); 2082 } 2083 } 2084 2085 static void 2086 mpssas_scsiio_complete(struct mps_softc *sc, struct mps_command *cm) 2087 { 2088 MPI2_SCSI_IO_REPLY *rep; 2089 union ccb *ccb; 2090 struct ccb_scsiio *csio; 2091 struct mpssas_softc *sassc; 2092 struct scsi_vpd_supported_page_list *vpd_list = NULL; 2093 u8 *TLR_bits, TLR_on; 2094 int dir = 0, i; 2095 u16 alloc_len; 2096 struct mpssas_target *target; 2097 target_id_t target_id; 2098 2099 MPS_FUNCTRACE(sc); 2100 mps_dprint(sc, MPS_TRACE, 2101 "cm %p SMID %u ccb %p reply %p outstanding %u\n", cm, 2102 cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply, 2103 cm->cm_targ->outstanding); 2104 2105 callout_stop(&cm->cm_callout); 2106 mtx_assert(&sc->mps_mtx, MA_OWNED); 2107 2108 sassc = sc->sassc; 2109 ccb = cm->cm_complete_data; 2110 csio = &ccb->csio; 2111 target_id = csio->ccb_h.target_id; 2112 rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply; 2113 /* 2114 * XXX KDM if the chain allocation fails, does it matter if we do 2115 * the sync and unload here? It is simpler to do it in every case, 2116 * assuming it doesn't cause problems. 2117 */ 2118 if (cm->cm_data != NULL) { 2119 if (cm->cm_flags & MPS_CM_FLAGS_DATAIN) 2120 dir = BUS_DMASYNC_POSTREAD; 2121 else if (cm->cm_flags & MPS_CM_FLAGS_DATAOUT) 2122 dir = BUS_DMASYNC_POSTWRITE; 2123 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir); 2124 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap); 2125 } 2126 2127 cm->cm_targ->completed++; 2128 cm->cm_targ->outstanding--; 2129 TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link); 2130 ccb->ccb_h.status &= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED); 2131 2132 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) 2133 if (ccb->csio.bio != NULL) 2134 biotrack(ccb->csio.bio, __func__); 2135 #endif 2136 2137 if (cm->cm_state == MPS_CM_STATE_TIMEDOUT) { 2138 TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery); 2139 if (cm->cm_reply != NULL) 2140 mpssas_log_command(cm, MPS_RECOVERY, 2141 "completed timedout cm %p ccb %p during recovery " 2142 "ioc %x scsi %x state %x xfer %u\n", 2143 cm, cm->cm_ccb, 2144 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState, 2145 le32toh(rep->TransferCount)); 2146 else 2147 mpssas_log_command(cm, MPS_RECOVERY, 2148 "completed timedout cm %p ccb %p during recovery\n", 2149 cm, cm->cm_ccb); 2150 } else if (cm->cm_targ->tm != NULL) { 2151 if (cm->cm_reply != NULL) 2152 mpssas_log_command(cm, MPS_RECOVERY, 2153 "completed cm %p ccb %p during recovery " 2154 "ioc %x scsi %x state %x xfer %u\n", 2155 cm, cm->cm_ccb, 2156 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState, 2157 le32toh(rep->TransferCount)); 2158 else 2159 mpssas_log_command(cm, MPS_RECOVERY, 2160 "completed cm %p ccb %p during recovery\n", 2161 cm, cm->cm_ccb); 2162 } else if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) { 2163 mpssas_log_command(cm, MPS_RECOVERY, 2164 "reset completed cm %p ccb %p\n", 2165 cm, cm->cm_ccb); 2166 } 2167 2168 if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) { 2169 /* 2170 * We ran into an error after we tried to map the command, 2171 * so we're getting a callback without queueing the command 2172 * to the hardware. So we set the status here, and it will 2173 * be retained below. We'll go through the "fast path", 2174 * because there can be no reply when we haven't actually 2175 * gone out to the hardware. 2176 */ 2177 mpssas_set_ccbstatus(ccb, CAM_REQUEUE_REQ); 2178 2179 /* 2180 * Currently the only error included in the mask is 2181 * MPS_CM_FLAGS_CHAIN_FAILED, which means we're out of 2182 * chain frames. We need to freeze the queue until we get 2183 * a command that completed without this error, which will 2184 * hopefully have some chain frames attached that we can 2185 * use. If we wanted to get smarter about it, we would 2186 * only unfreeze the queue in this condition when we're 2187 * sure that we're getting some chain frames back. That's 2188 * probably unnecessary. 2189 */ 2190 if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) { 2191 xpt_freeze_simq(sassc->sim, 1); 2192 sassc->flags |= MPSSAS_QUEUE_FROZEN; 2193 mps_dprint(sc, MPS_XINFO, "Error sending command, " 2194 "freezing SIM queue\n"); 2195 } 2196 } 2197 2198 /* 2199 * If this is a Start Stop Unit command and it was issued by the driver 2200 * during shutdown, decrement the refcount to account for all of the 2201 * commands that were sent. All SSU commands should be completed before 2202 * shutdown completes, meaning SSU_refcount will be 0 after SSU_started 2203 * is TRUE. 2204 */ 2205 if (sc->SSU_started && (csio->cdb_io.cdb_bytes[0] == START_STOP_UNIT)) { 2206 mps_dprint(sc, MPS_INFO, "Decrementing SSU count.\n"); 2207 sc->SSU_refcount--; 2208 } 2209 2210 /* Take the fast path to completion */ 2211 if (cm->cm_reply == NULL) { 2212 if (mpssas_get_ccbstatus(ccb) == CAM_REQ_INPROG) { 2213 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) 2214 mpssas_set_ccbstatus(ccb, CAM_SCSI_BUS_RESET); 2215 else { 2216 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP); 2217 ccb->csio.scsi_status = SCSI_STATUS_OK; 2218 } 2219 if (sassc->flags & MPSSAS_QUEUE_FROZEN) { 2220 ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 2221 sassc->flags &= ~MPSSAS_QUEUE_FROZEN; 2222 mps_dprint(sc, MPS_XINFO, 2223 "Unfreezing SIM queue\n"); 2224 } 2225 } 2226 2227 /* 2228 * There are two scenarios where the status won't be 2229 * CAM_REQ_CMP. The first is if MPS_CM_FLAGS_ERROR_MASK is 2230 * set, the second is in the MPS_FLAGS_DIAGRESET above. 2231 */ 2232 if (mpssas_get_ccbstatus(ccb) != CAM_REQ_CMP) { 2233 /* 2234 * Freeze the dev queue so that commands are 2235 * executed in the correct order after error 2236 * recovery. 2237 */ 2238 ccb->ccb_h.status |= CAM_DEV_QFRZN; 2239 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1); 2240 } 2241 mps_free_command(sc, cm); 2242 xpt_done(ccb); 2243 return; 2244 } 2245 2246 mpssas_log_command(cm, MPS_XINFO, 2247 "ioc %x scsi %x state %x xfer %u\n", 2248 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState, 2249 le32toh(rep->TransferCount)); 2250 2251 /* 2252 * If this is a Direct Drive I/O, reissue the I/O to the original IR 2253 * Volume if an error occurred (normal I/O retry). Use the original 2254 * CCB, but set a flag that this will be a retry so that it's sent to 2255 * the original volume. Free the command but reuse the CCB. 2256 */ 2257 if (cm->cm_flags & MPS_CM_FLAGS_DD_IO) { 2258 mps_free_command(sc, cm); 2259 ccb->ccb_h.sim_priv.entries[0].field = MPS_WD_RETRY; 2260 mpssas_action_scsiio(sassc, ccb); 2261 return; 2262 } else 2263 ccb->ccb_h.sim_priv.entries[0].field = 0; 2264 2265 switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) { 2266 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN: 2267 csio->resid = cm->cm_length - le32toh(rep->TransferCount); 2268 /* FALLTHROUGH */ 2269 case MPI2_IOCSTATUS_SUCCESS: 2270 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR: 2271 2272 if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) == 2273 MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR) 2274 mpssas_log_command(cm, MPS_XINFO, "recovered error\n"); 2275 2276 /* Completion failed at the transport level. */ 2277 if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS | 2278 MPI2_SCSI_STATE_TERMINATED)) { 2279 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR); 2280 break; 2281 } 2282 2283 /* In a modern packetized environment, an autosense failure 2284 * implies that there's not much else that can be done to 2285 * recover the command. 2286 */ 2287 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) { 2288 mpssas_set_ccbstatus(ccb, CAM_AUTOSENSE_FAIL); 2289 break; 2290 } 2291 2292 /* 2293 * CAM doesn't care about SAS Response Info data, but if this is 2294 * the state check if TLR should be done. If not, clear the 2295 * TLR_bits for the target. 2296 */ 2297 if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) && 2298 ((le32toh(rep->ResponseInfo) & 2299 MPI2_SCSI_RI_MASK_REASONCODE) == 2300 MPS_SCSI_RI_INVALID_FRAME)) { 2301 sc->mapping_table[target_id].TLR_bits = 2302 (u8)MPI2_SCSIIO_CONTROL_NO_TLR; 2303 } 2304 2305 /* 2306 * Intentionally override the normal SCSI status reporting 2307 * for these two cases. These are likely to happen in a 2308 * multi-initiator environment, and we want to make sure that 2309 * CAM retries these commands rather than fail them. 2310 */ 2311 if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) || 2312 (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) { 2313 mpssas_set_ccbstatus(ccb, CAM_REQ_ABORTED); 2314 break; 2315 } 2316 2317 /* Handle normal status and sense */ 2318 csio->scsi_status = rep->SCSIStatus; 2319 if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD) 2320 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP); 2321 else 2322 mpssas_set_ccbstatus(ccb, CAM_SCSI_STATUS_ERROR); 2323 2324 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) { 2325 int sense_len, returned_sense_len; 2326 2327 returned_sense_len = min(le32toh(rep->SenseCount), 2328 sizeof(struct scsi_sense_data)); 2329 if (returned_sense_len < ccb->csio.sense_len) 2330 ccb->csio.sense_resid = ccb->csio.sense_len - 2331 returned_sense_len; 2332 else 2333 ccb->csio.sense_resid = 0; 2334 2335 sense_len = min(returned_sense_len, 2336 ccb->csio.sense_len - ccb->csio.sense_resid); 2337 bzero(&ccb->csio.sense_data, 2338 sizeof(ccb->csio.sense_data)); 2339 bcopy(cm->cm_sense, &ccb->csio.sense_data, sense_len); 2340 ccb->ccb_h.status |= CAM_AUTOSNS_VALID; 2341 } 2342 2343 /* 2344 * Check if this is an INQUIRY command. If it's a VPD inquiry, 2345 * and it's page code 0 (Supported Page List), and there is 2346 * inquiry data, and this is for a sequential access device, and 2347 * the device is an SSP target, and TLR is supported by the 2348 * controller, turn the TLR_bits value ON if page 0x90 is 2349 * supported. 2350 */ 2351 if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) && 2352 (csio->cdb_io.cdb_bytes[1] & SI_EVPD) && 2353 (csio->cdb_io.cdb_bytes[2] == SVPD_SUPPORTED_PAGE_LIST) && 2354 ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) && 2355 (csio->data_ptr != NULL) && 2356 ((csio->data_ptr[0] & 0x1f) == T_SEQUENTIAL) && 2357 (sc->control_TLR) && 2358 (sc->mapping_table[target_id].device_info & 2359 MPI2_SAS_DEVICE_INFO_SSP_TARGET)) { 2360 vpd_list = (struct scsi_vpd_supported_page_list *) 2361 csio->data_ptr; 2362 TLR_bits = &sc->mapping_table[target_id].TLR_bits; 2363 *TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR; 2364 TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON; 2365 alloc_len = ((u16)csio->cdb_io.cdb_bytes[3] << 8) + 2366 csio->cdb_io.cdb_bytes[4]; 2367 alloc_len -= csio->resid; 2368 for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) { 2369 if (vpd_list->list[i] == 0x90) { 2370 *TLR_bits = TLR_on; 2371 break; 2372 } 2373 } 2374 } 2375 2376 /* 2377 * If this is a SATA direct-access end device, mark it so that 2378 * a SCSI StartStopUnit command will be sent to it when the 2379 * driver is being shutdown. 2380 */ 2381 if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) && 2382 ((csio->data_ptr[0] & 0x1f) == T_DIRECT) && 2383 (sc->mapping_table[target_id].device_info & 2384 MPI2_SAS_DEVICE_INFO_SATA_DEVICE) && 2385 ((sc->mapping_table[target_id].device_info & 2386 MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) == 2387 MPI2_SAS_DEVICE_INFO_END_DEVICE)) { 2388 target = &sassc->targets[target_id]; 2389 target->supports_SSU = TRUE; 2390 mps_dprint(sc, MPS_XINFO, "Target %d supports SSU\n", 2391 target_id); 2392 } 2393 break; 2394 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE: 2395 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE: 2396 /* 2397 * If devinfo is 0 this will be a volume. In that case don't 2398 * tell CAM that the volume is not there. We want volumes to 2399 * be enumerated until they are deleted/removed, not just 2400 * failed. 2401 */ 2402 if (cm->cm_targ->devinfo == 0) 2403 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP); 2404 else 2405 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); 2406 break; 2407 case MPI2_IOCSTATUS_INVALID_SGL: 2408 mps_print_scsiio_cmd(sc, cm); 2409 mpssas_set_ccbstatus(ccb, CAM_UNREC_HBA_ERROR); 2410 break; 2411 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED: 2412 /* 2413 * This is one of the responses that comes back when an I/O 2414 * has been aborted. If it is because of a timeout that we 2415 * initiated, just set the status to CAM_CMD_TIMEOUT. 2416 * Otherwise set it to CAM_REQ_ABORTED. The effect on the 2417 * command is the same (it gets retried, subject to the 2418 * retry counter), the only difference is what gets printed 2419 * on the console. 2420 */ 2421 if (cm->cm_state == MPS_CM_STATE_TIMEDOUT) 2422 mpssas_set_ccbstatus(ccb, CAM_CMD_TIMEOUT); 2423 else 2424 mpssas_set_ccbstatus(ccb, CAM_REQ_ABORTED); 2425 break; 2426 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN: 2427 /* resid is ignored for this condition */ 2428 csio->resid = 0; 2429 mpssas_set_ccbstatus(ccb, CAM_DATA_RUN_ERR); 2430 break; 2431 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED: 2432 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED: 2433 /* 2434 * These can sometimes be transient transport-related 2435 * errors, and sometimes persistent drive-related errors. 2436 * We used to retry these without decrementing the retry 2437 * count by returning CAM_REQUEUE_REQ. Unfortunately, if 2438 * we hit a persistent drive problem that returns one of 2439 * these error codes, we would retry indefinitely. So, 2440 * return CAM_REQ_CMP_ERROR so that we decrement the retry 2441 * count and avoid infinite retries. We're taking the 2442 * potential risk of flagging false failures in the event 2443 * of a topology-related error (e.g. a SAS expander problem 2444 * causes a command addressed to a drive to fail), but 2445 * avoiding getting into an infinite retry loop. 2446 */ 2447 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR); 2448 mpssas_log_command(cm, MPS_INFO, 2449 "terminated ioc %x scsi %x state %x xfer %u\n", 2450 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState, 2451 le32toh(rep->TransferCount)); 2452 break; 2453 case MPI2_IOCSTATUS_INVALID_FUNCTION: 2454 case MPI2_IOCSTATUS_INTERNAL_ERROR: 2455 case MPI2_IOCSTATUS_INVALID_VPID: 2456 case MPI2_IOCSTATUS_INVALID_FIELD: 2457 case MPI2_IOCSTATUS_INVALID_STATE: 2458 case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED: 2459 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR: 2460 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR: 2461 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: 2462 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED: 2463 default: 2464 mpssas_log_command(cm, MPS_XINFO, 2465 "completed ioc %x scsi %x state %x xfer %u\n", 2466 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState, 2467 le32toh(rep->TransferCount)); 2468 csio->resid = cm->cm_length; 2469 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR); 2470 break; 2471 } 2472 2473 mps_sc_failed_io_info(sc,csio,rep); 2474 2475 if (sassc->flags & MPSSAS_QUEUE_FROZEN) { 2476 ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 2477 sassc->flags &= ~MPSSAS_QUEUE_FROZEN; 2478 mps_dprint(sc, MPS_XINFO, "Command completed, " 2479 "unfreezing SIM queue\n"); 2480 } 2481 2482 if (mpssas_get_ccbstatus(ccb) != CAM_REQ_CMP) { 2483 ccb->ccb_h.status |= CAM_DEV_QFRZN; 2484 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1); 2485 } 2486 2487 mps_free_command(sc, cm); 2488 xpt_done(ccb); 2489 } 2490 2491 /* All Request reached here are Endian safe */ 2492 static void 2493 mpssas_direct_drive_io(struct mpssas_softc *sassc, struct mps_command *cm, 2494 union ccb *ccb) { 2495 pMpi2SCSIIORequest_t pIO_req; 2496 struct mps_softc *sc = sassc->sc; 2497 uint64_t virtLBA; 2498 uint32_t physLBA, stripe_offset, stripe_unit; 2499 uint32_t io_size, column; 2500 uint8_t *ptrLBA, lba_idx, physLBA_byte, *CDB; 2501 2502 /* 2503 * If this is a valid SCSI command (Read6, Read10, Read16, Write6, 2504 * Write10, or Write16), build a direct I/O message. Otherwise, the I/O 2505 * will be sent to the IR volume itself. Since Read6 and Write6 are a 2506 * bit different than the 10/16 CDBs, handle them separately. 2507 */ 2508 pIO_req = (pMpi2SCSIIORequest_t)cm->cm_req; 2509 CDB = pIO_req->CDB.CDB32; 2510 2511 /* 2512 * Handle 6 byte CDBs. 2513 */ 2514 if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_6) || 2515 (CDB[0] == WRITE_6))) { 2516 /* 2517 * Get the transfer size in blocks. 2518 */ 2519 io_size = (cm->cm_length >> sc->DD_block_exponent); 2520 2521 /* 2522 * Get virtual LBA given in the CDB. 2523 */ 2524 virtLBA = ((uint64_t)(CDB[1] & 0x1F) << 16) | 2525 ((uint64_t)CDB[2] << 8) | (uint64_t)CDB[3]; 2526 2527 /* 2528 * Check that LBA range for I/O does not exceed volume's 2529 * MaxLBA. 2530 */ 2531 if ((virtLBA + (uint64_t)io_size - 1) <= 2532 sc->DD_max_lba) { 2533 /* 2534 * Check if the I/O crosses a stripe boundary. If not, 2535 * translate the virtual LBA to a physical LBA and set 2536 * the DevHandle for the PhysDisk to be used. If it 2537 * does cross a boundary, do normal I/O. To get the 2538 * right DevHandle to use, get the map number for the 2539 * column, then use that map number to look up the 2540 * DevHandle of the PhysDisk. 2541 */ 2542 stripe_offset = (uint32_t)virtLBA & 2543 (sc->DD_stripe_size - 1); 2544 if ((stripe_offset + io_size) <= sc->DD_stripe_size) { 2545 physLBA = (uint32_t)virtLBA >> 2546 sc->DD_stripe_exponent; 2547 stripe_unit = physLBA / sc->DD_num_phys_disks; 2548 column = physLBA % sc->DD_num_phys_disks; 2549 pIO_req->DevHandle = 2550 htole16(sc->DD_column_map[column].dev_handle); 2551 /* ???? Is this endian safe*/ 2552 cm->cm_desc.SCSIIO.DevHandle = 2553 pIO_req->DevHandle; 2554 2555 physLBA = (stripe_unit << 2556 sc->DD_stripe_exponent) + stripe_offset; 2557 ptrLBA = &pIO_req->CDB.CDB32[1]; 2558 physLBA_byte = (uint8_t)(physLBA >> 16); 2559 *ptrLBA = physLBA_byte; 2560 ptrLBA = &pIO_req->CDB.CDB32[2]; 2561 physLBA_byte = (uint8_t)(physLBA >> 8); 2562 *ptrLBA = physLBA_byte; 2563 ptrLBA = &pIO_req->CDB.CDB32[3]; 2564 physLBA_byte = (uint8_t)physLBA; 2565 *ptrLBA = physLBA_byte; 2566 2567 /* 2568 * Set flag that Direct Drive I/O is 2569 * being done. 2570 */ 2571 cm->cm_flags |= MPS_CM_FLAGS_DD_IO; 2572 } 2573 } 2574 return; 2575 } 2576 2577 /* 2578 * Handle 10, 12 or 16 byte CDBs. 2579 */ 2580 if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_10) || 2581 (CDB[0] == WRITE_10) || (CDB[0] == READ_16) || 2582 (CDB[0] == WRITE_16) || (CDB[0] == READ_12) || 2583 (CDB[0] == WRITE_12))) { 2584 /* 2585 * For 16-byte CDB's, verify that the upper 4 bytes of the CDB 2586 * are 0. If not, this is accessing beyond 2TB so handle it in 2587 * the else section. 10-byte and 12-byte CDB's are OK. 2588 * FreeBSD sends very rare 12 byte READ/WRITE, but driver is 2589 * ready to accept 12byte CDB for Direct IOs. 2590 */ 2591 if ((CDB[0] == READ_10 || CDB[0] == WRITE_10) || 2592 (CDB[0] == READ_12 || CDB[0] == WRITE_12) || 2593 !(CDB[2] | CDB[3] | CDB[4] | CDB[5])) { 2594 /* 2595 * Get the transfer size in blocks. 2596 */ 2597 io_size = (cm->cm_length >> sc->DD_block_exponent); 2598 2599 /* 2600 * Get virtual LBA. Point to correct lower 4 bytes of 2601 * LBA in the CDB depending on command. 2602 */ 2603 lba_idx = ((CDB[0] == READ_12) || 2604 (CDB[0] == WRITE_12) || 2605 (CDB[0] == READ_10) || 2606 (CDB[0] == WRITE_10))? 2 : 6; 2607 virtLBA = ((uint64_t)CDB[lba_idx] << 24) | 2608 ((uint64_t)CDB[lba_idx + 1] << 16) | 2609 ((uint64_t)CDB[lba_idx + 2] << 8) | 2610 (uint64_t)CDB[lba_idx + 3]; 2611 2612 /* 2613 * Check that LBA range for I/O does not exceed volume's 2614 * MaxLBA. 2615 */ 2616 if ((virtLBA + (uint64_t)io_size - 1) <= 2617 sc->DD_max_lba) { 2618 /* 2619 * Check if the I/O crosses a stripe boundary. 2620 * If not, translate the virtual LBA to a 2621 * physical LBA and set the DevHandle for the 2622 * PhysDisk to be used. If it does cross a 2623 * boundary, do normal I/O. To get the right 2624 * DevHandle to use, get the map number for the 2625 * column, then use that map number to look up 2626 * the DevHandle of the PhysDisk. 2627 */ 2628 stripe_offset = (uint32_t)virtLBA & 2629 (sc->DD_stripe_size - 1); 2630 if ((stripe_offset + io_size) <= 2631 sc->DD_stripe_size) { 2632 physLBA = (uint32_t)virtLBA >> 2633 sc->DD_stripe_exponent; 2634 stripe_unit = physLBA / 2635 sc->DD_num_phys_disks; 2636 column = physLBA % 2637 sc->DD_num_phys_disks; 2638 pIO_req->DevHandle = 2639 htole16(sc->DD_column_map[column]. 2640 dev_handle); 2641 cm->cm_desc.SCSIIO.DevHandle = 2642 pIO_req->DevHandle; 2643 2644 physLBA = (stripe_unit << 2645 sc->DD_stripe_exponent) + 2646 stripe_offset; 2647 ptrLBA = 2648 &pIO_req->CDB.CDB32[lba_idx]; 2649 physLBA_byte = (uint8_t)(physLBA >> 24); 2650 *ptrLBA = physLBA_byte; 2651 ptrLBA = 2652 &pIO_req->CDB.CDB32[lba_idx + 1]; 2653 physLBA_byte = (uint8_t)(physLBA >> 16); 2654 *ptrLBA = physLBA_byte; 2655 ptrLBA = 2656 &pIO_req->CDB.CDB32[lba_idx + 2]; 2657 physLBA_byte = (uint8_t)(physLBA >> 8); 2658 *ptrLBA = physLBA_byte; 2659 ptrLBA = 2660 &pIO_req->CDB.CDB32[lba_idx + 3]; 2661 physLBA_byte = (uint8_t)physLBA; 2662 *ptrLBA = physLBA_byte; 2663 2664 /* 2665 * Set flag that Direct Drive I/O is 2666 * being done. 2667 */ 2668 cm->cm_flags |= MPS_CM_FLAGS_DD_IO; 2669 } 2670 } 2671 } else { 2672 /* 2673 * 16-byte CDB and the upper 4 bytes of the CDB are not 2674 * 0. Get the transfer size in blocks. 2675 */ 2676 io_size = (cm->cm_length >> sc->DD_block_exponent); 2677 2678 /* 2679 * Get virtual LBA. 2680 */ 2681 virtLBA = ((uint64_t)CDB[2] << 54) | 2682 ((uint64_t)CDB[3] << 48) | 2683 ((uint64_t)CDB[4] << 40) | 2684 ((uint64_t)CDB[5] << 32) | 2685 ((uint64_t)CDB[6] << 24) | 2686 ((uint64_t)CDB[7] << 16) | 2687 ((uint64_t)CDB[8] << 8) | 2688 (uint64_t)CDB[9]; 2689 2690 /* 2691 * Check that LBA range for I/O does not exceed volume's 2692 * MaxLBA. 2693 */ 2694 if ((virtLBA + (uint64_t)io_size - 1) <= 2695 sc->DD_max_lba) { 2696 /* 2697 * Check if the I/O crosses a stripe boundary. 2698 * If not, translate the virtual LBA to a 2699 * physical LBA and set the DevHandle for the 2700 * PhysDisk to be used. If it does cross a 2701 * boundary, do normal I/O. To get the right 2702 * DevHandle to use, get the map number for the 2703 * column, then use that map number to look up 2704 * the DevHandle of the PhysDisk. 2705 */ 2706 stripe_offset = (uint32_t)virtLBA & 2707 (sc->DD_stripe_size - 1); 2708 if ((stripe_offset + io_size) <= 2709 sc->DD_stripe_size) { 2710 physLBA = (uint32_t)(virtLBA >> 2711 sc->DD_stripe_exponent); 2712 stripe_unit = physLBA / 2713 sc->DD_num_phys_disks; 2714 column = physLBA % 2715 sc->DD_num_phys_disks; 2716 pIO_req->DevHandle = 2717 htole16(sc->DD_column_map[column]. 2718 dev_handle); 2719 cm->cm_desc.SCSIIO.DevHandle = 2720 pIO_req->DevHandle; 2721 2722 physLBA = (stripe_unit << 2723 sc->DD_stripe_exponent) + 2724 stripe_offset; 2725 2726 /* 2727 * Set upper 4 bytes of LBA to 0. We 2728 * assume that the phys disks are less 2729 * than 2 TB's in size. Then, set the 2730 * lower 4 bytes. 2731 */ 2732 pIO_req->CDB.CDB32[2] = 0; 2733 pIO_req->CDB.CDB32[3] = 0; 2734 pIO_req->CDB.CDB32[4] = 0; 2735 pIO_req->CDB.CDB32[5] = 0; 2736 ptrLBA = &pIO_req->CDB.CDB32[6]; 2737 physLBA_byte = (uint8_t)(physLBA >> 24); 2738 *ptrLBA = physLBA_byte; 2739 ptrLBA = &pIO_req->CDB.CDB32[7]; 2740 physLBA_byte = (uint8_t)(physLBA >> 16); 2741 *ptrLBA = physLBA_byte; 2742 ptrLBA = &pIO_req->CDB.CDB32[8]; 2743 physLBA_byte = (uint8_t)(physLBA >> 8); 2744 *ptrLBA = physLBA_byte; 2745 ptrLBA = &pIO_req->CDB.CDB32[9]; 2746 physLBA_byte = (uint8_t)physLBA; 2747 *ptrLBA = physLBA_byte; 2748 2749 /* 2750 * Set flag that Direct Drive I/O is 2751 * being done. 2752 */ 2753 cm->cm_flags |= MPS_CM_FLAGS_DD_IO; 2754 } 2755 } 2756 } 2757 } 2758 } 2759 2760 #if __FreeBSD_version >= 900026 2761 static void 2762 mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm) 2763 { 2764 MPI2_SMP_PASSTHROUGH_REPLY *rpl; 2765 MPI2_SMP_PASSTHROUGH_REQUEST *req; 2766 uint64_t sasaddr; 2767 union ccb *ccb; 2768 2769 ccb = cm->cm_complete_data; 2770 2771 /* 2772 * Currently there should be no way we can hit this case. It only 2773 * happens when we have a failure to allocate chain frames, and SMP 2774 * commands require two S/G elements only. That should be handled 2775 * in the standard request size. 2776 */ 2777 if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) { 2778 mps_dprint(sc, MPS_ERROR,"%s: cm_flags = %#x on SMP request!\n", 2779 __func__, cm->cm_flags); 2780 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR); 2781 goto bailout; 2782 } 2783 2784 rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply; 2785 if (rpl == NULL) { 2786 mps_dprint(sc, MPS_ERROR, "%s: NULL cm_reply!\n", __func__); 2787 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR); 2788 goto bailout; 2789 } 2790 2791 req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req; 2792 sasaddr = le32toh(req->SASAddress.Low); 2793 sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32; 2794 2795 if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) != 2796 MPI2_IOCSTATUS_SUCCESS || 2797 rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) { 2798 mps_dprint(sc, MPS_XINFO, "%s: IOCStatus %04x SASStatus %02x\n", 2799 __func__, le16toh(rpl->IOCStatus), rpl->SASStatus); 2800 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR); 2801 goto bailout; 2802 } 2803 2804 mps_dprint(sc, MPS_XINFO, "%s: SMP request to SAS address " 2805 "%#jx completed successfully\n", __func__, 2806 (uintmax_t)sasaddr); 2807 2808 if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED) 2809 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP); 2810 else 2811 mpssas_set_ccbstatus(ccb, CAM_SMP_STATUS_ERROR); 2812 2813 bailout: 2814 /* 2815 * We sync in both directions because we had DMAs in the S/G list 2816 * in both directions. 2817 */ 2818 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, 2819 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2820 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap); 2821 mps_free_command(sc, cm); 2822 xpt_done(ccb); 2823 } 2824 2825 static void 2826 mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb, uint64_t sasaddr) 2827 { 2828 struct mps_command *cm; 2829 uint8_t *request, *response; 2830 MPI2_SMP_PASSTHROUGH_REQUEST *req; 2831 struct mps_softc *sc; 2832 int error; 2833 2834 sc = sassc->sc; 2835 error = 0; 2836 2837 /* 2838 * XXX We don't yet support physical addresses here. 2839 */ 2840 switch ((ccb->ccb_h.flags & CAM_DATA_MASK)) { 2841 case CAM_DATA_PADDR: 2842 case CAM_DATA_SG_PADDR: 2843 mps_dprint(sc, MPS_ERROR, 2844 "%s: physical addresses not supported\n", __func__); 2845 mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID); 2846 xpt_done(ccb); 2847 return; 2848 case CAM_DATA_SG: 2849 /* 2850 * The chip does not support more than one buffer for the 2851 * request or response. 2852 */ 2853 if ((ccb->smpio.smp_request_sglist_cnt > 1) 2854 || (ccb->smpio.smp_response_sglist_cnt > 1)) { 2855 mps_dprint(sc, MPS_ERROR, 2856 "%s: multiple request or response " 2857 "buffer segments not supported for SMP\n", 2858 __func__); 2859 mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID); 2860 xpt_done(ccb); 2861 return; 2862 } 2863 2864 /* 2865 * The CAM_SCATTER_VALID flag was originally implemented 2866 * for the XPT_SCSI_IO CCB, which only has one data pointer. 2867 * We have two. So, just take that flag to mean that we 2868 * might have S/G lists, and look at the S/G segment count 2869 * to figure out whether that is the case for each individual 2870 * buffer. 2871 */ 2872 if (ccb->smpio.smp_request_sglist_cnt != 0) { 2873 bus_dma_segment_t *req_sg; 2874 2875 req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request; 2876 request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr; 2877 } else 2878 request = ccb->smpio.smp_request; 2879 2880 if (ccb->smpio.smp_response_sglist_cnt != 0) { 2881 bus_dma_segment_t *rsp_sg; 2882 2883 rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response; 2884 response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr; 2885 } else 2886 response = ccb->smpio.smp_response; 2887 break; 2888 case CAM_DATA_VADDR: 2889 request = ccb->smpio.smp_request; 2890 response = ccb->smpio.smp_response; 2891 break; 2892 default: 2893 mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID); 2894 xpt_done(ccb); 2895 return; 2896 } 2897 2898 cm = mps_alloc_command(sc); 2899 if (cm == NULL) { 2900 mps_dprint(sc, MPS_ERROR, 2901 "%s: cannot allocate command\n", __func__); 2902 mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL); 2903 xpt_done(ccb); 2904 return; 2905 } 2906 2907 req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req; 2908 bzero(req, sizeof(*req)); 2909 req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH; 2910 2911 /* Allow the chip to use any route to this SAS address. */ 2912 req->PhysicalPort = 0xff; 2913 2914 req->RequestDataLength = htole16(ccb->smpio.smp_request_len); 2915 req->SGLFlags = 2916 MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI; 2917 2918 mps_dprint(sc, MPS_XINFO, "%s: sending SMP request to SAS " 2919 "address %#jx\n", __func__, (uintmax_t)sasaddr); 2920 2921 mpi_init_sge(cm, req, &req->SGL); 2922 2923 /* 2924 * Set up a uio to pass into mps_map_command(). This allows us to 2925 * do one map command, and one busdma call in there. 2926 */ 2927 cm->cm_uio.uio_iov = cm->cm_iovec; 2928 cm->cm_uio.uio_iovcnt = 2; 2929 cm->cm_uio.uio_segflg = UIO_SYSSPACE; 2930 2931 /* 2932 * The read/write flag isn't used by busdma, but set it just in 2933 * case. This isn't exactly accurate, either, since we're going in 2934 * both directions. 2935 */ 2936 cm->cm_uio.uio_rw = UIO_WRITE; 2937 2938 cm->cm_iovec[0].iov_base = request; 2939 cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength); 2940 cm->cm_iovec[1].iov_base = response; 2941 cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len; 2942 2943 cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len + 2944 cm->cm_iovec[1].iov_len; 2945 2946 /* 2947 * Trigger a warning message in mps_data_cb() for the user if we 2948 * wind up exceeding two S/G segments. The chip expects one 2949 * segment for the request and another for the response. 2950 */ 2951 cm->cm_max_segs = 2; 2952 2953 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; 2954 cm->cm_complete = mpssas_smpio_complete; 2955 cm->cm_complete_data = ccb; 2956 2957 /* 2958 * Tell the mapping code that we're using a uio, and that this is 2959 * an SMP passthrough request. There is a little special-case 2960 * logic there (in mps_data_cb()) to handle the bidirectional 2961 * transfer. 2962 */ 2963 cm->cm_flags |= MPS_CM_FLAGS_USE_UIO | MPS_CM_FLAGS_SMP_PASS | 2964 MPS_CM_FLAGS_DATAIN | MPS_CM_FLAGS_DATAOUT; 2965 2966 /* The chip data format is little endian. */ 2967 req->SASAddress.High = htole32(sasaddr >> 32); 2968 req->SASAddress.Low = htole32(sasaddr); 2969 2970 /* 2971 * XXX Note that we don't have a timeout/abort mechanism here. 2972 * From the manual, it looks like task management requests only 2973 * work for SCSI IO and SATA passthrough requests. We may need to 2974 * have a mechanism to retry requests in the event of a chip reset 2975 * at least. Hopefully the chip will insure that any errors short 2976 * of that are relayed back to the driver. 2977 */ 2978 error = mps_map_command(sc, cm); 2979 if ((error != 0) && (error != EINPROGRESS)) { 2980 mps_dprint(sc, MPS_ERROR, 2981 "%s: error %d returned from mps_map_command()\n", 2982 __func__, error); 2983 goto bailout_error; 2984 } 2985 2986 return; 2987 2988 bailout_error: 2989 mps_free_command(sc, cm); 2990 mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL); 2991 xpt_done(ccb); 2992 return; 2993 2994 } 2995 2996 static void 2997 mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb) 2998 { 2999 struct mps_softc *sc; 3000 struct mpssas_target *targ; 3001 uint64_t sasaddr = 0; 3002 3003 sc = sassc->sc; 3004 3005 /* 3006 * Make sure the target exists. 3007 */ 3008 KASSERT(ccb->ccb_h.target_id < sassc->maxtargets, 3009 ("Target %d out of bounds in XPT_SMP_IO\n", ccb->ccb_h.target_id)); 3010 targ = &sassc->targets[ccb->ccb_h.target_id]; 3011 if (targ->handle == 0x0) { 3012 mps_dprint(sc, MPS_ERROR, 3013 "%s: target %d does not exist!\n", __func__, 3014 ccb->ccb_h.target_id); 3015 mpssas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT); 3016 xpt_done(ccb); 3017 return; 3018 } 3019 3020 /* 3021 * If this device has an embedded SMP target, we'll talk to it 3022 * directly. 3023 * figure out what the expander's address is. 3024 */ 3025 if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0) 3026 sasaddr = targ->sasaddr; 3027 3028 /* 3029 * If we don't have a SAS address for the expander yet, try 3030 * grabbing it from the page 0x83 information cached in the 3031 * transport layer for this target. LSI expanders report the 3032 * expander SAS address as the port-associated SAS address in 3033 * Inquiry VPD page 0x83. Maxim expanders don't report it in page 3034 * 0x83. 3035 * 3036 * XXX KDM disable this for now, but leave it commented out so that 3037 * it is obvious that this is another possible way to get the SAS 3038 * address. 3039 * 3040 * The parent handle method below is a little more reliable, and 3041 * the other benefit is that it works for devices other than SES 3042 * devices. So you can send a SMP request to a da(4) device and it 3043 * will get routed to the expander that device is attached to. 3044 * (Assuming the da(4) device doesn't contain an SMP target...) 3045 */ 3046 #if 0 3047 if (sasaddr == 0) 3048 sasaddr = xpt_path_sas_addr(ccb->ccb_h.path); 3049 #endif 3050 3051 /* 3052 * If we still don't have a SAS address for the expander, look for 3053 * the parent device of this device, which is probably the expander. 3054 */ 3055 if (sasaddr == 0) { 3056 #ifdef OLD_MPS_PROBE 3057 struct mpssas_target *parent_target; 3058 #endif 3059 3060 if (targ->parent_handle == 0x0) { 3061 mps_dprint(sc, MPS_ERROR, 3062 "%s: handle %d does not have a valid " 3063 "parent handle!\n", __func__, targ->handle); 3064 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); 3065 goto bailout; 3066 } 3067 #ifdef OLD_MPS_PROBE 3068 parent_target = mpssas_find_target_by_handle(sassc, 0, 3069 targ->parent_handle); 3070 3071 if (parent_target == NULL) { 3072 mps_dprint(sc, MPS_ERROR, 3073 "%s: handle %d does not have a valid " 3074 "parent target!\n", __func__, targ->handle); 3075 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); 3076 goto bailout; 3077 } 3078 3079 if ((parent_target->devinfo & 3080 MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) { 3081 mps_dprint(sc, MPS_ERROR, 3082 "%s: handle %d parent %d does not " 3083 "have an SMP target!\n", __func__, 3084 targ->handle, parent_target->handle); 3085 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); 3086 goto bailout; 3087 3088 } 3089 3090 sasaddr = parent_target->sasaddr; 3091 #else /* OLD_MPS_PROBE */ 3092 if ((targ->parent_devinfo & 3093 MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) { 3094 mps_dprint(sc, MPS_ERROR, 3095 "%s: handle %d parent %d does not " 3096 "have an SMP target!\n", __func__, 3097 targ->handle, targ->parent_handle); 3098 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); 3099 goto bailout; 3100 3101 } 3102 if (targ->parent_sasaddr == 0x0) { 3103 mps_dprint(sc, MPS_ERROR, 3104 "%s: handle %d parent handle %d does " 3105 "not have a valid SAS address!\n", 3106 __func__, targ->handle, targ->parent_handle); 3107 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); 3108 goto bailout; 3109 } 3110 3111 sasaddr = targ->parent_sasaddr; 3112 #endif /* OLD_MPS_PROBE */ 3113 3114 } 3115 3116 if (sasaddr == 0) { 3117 mps_dprint(sc, MPS_INFO, 3118 "%s: unable to find SAS address for handle %d\n", 3119 __func__, targ->handle); 3120 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); 3121 goto bailout; 3122 } 3123 mpssas_send_smpcmd(sassc, ccb, sasaddr); 3124 3125 return; 3126 3127 bailout: 3128 xpt_done(ccb); 3129 3130 } 3131 #endif //__FreeBSD_version >= 900026 3132 3133 static void 3134 mpssas_action_resetdev(struct mpssas_softc *sassc, union ccb *ccb) 3135 { 3136 MPI2_SCSI_TASK_MANAGE_REQUEST *req; 3137 struct mps_softc *sc; 3138 struct mps_command *tm; 3139 struct mpssas_target *targ; 3140 3141 MPS_FUNCTRACE(sassc->sc); 3142 mtx_assert(&sassc->sc->mps_mtx, MA_OWNED); 3143 3144 KASSERT(ccb->ccb_h.target_id < sassc->maxtargets, 3145 ("Target %d out of bounds in XPT_RESET_DEV\n", 3146 ccb->ccb_h.target_id)); 3147 sc = sassc->sc; 3148 tm = mps_alloc_command(sc); 3149 if (tm == NULL) { 3150 mps_dprint(sc, MPS_ERROR, 3151 "command alloc failure in mpssas_action_resetdev\n"); 3152 mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL); 3153 xpt_done(ccb); 3154 return; 3155 } 3156 3157 targ = &sassc->targets[ccb->ccb_h.target_id]; 3158 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req; 3159 req->DevHandle = htole16(targ->handle); 3160 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; 3161 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET; 3162 3163 /* SAS Hard Link Reset / SATA Link Reset */ 3164 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET; 3165 3166 tm->cm_data = NULL; 3167 tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY; 3168 tm->cm_complete = mpssas_resetdev_complete; 3169 tm->cm_complete_data = ccb; 3170 tm->cm_targ = targ; 3171 targ->flags |= MPSSAS_TARGET_INRESET; 3172 3173 mps_map_command(sc, tm); 3174 } 3175 3176 static void 3177 mpssas_resetdev_complete(struct mps_softc *sc, struct mps_command *tm) 3178 { 3179 MPI2_SCSI_TASK_MANAGE_REPLY *resp; 3180 union ccb *ccb; 3181 3182 MPS_FUNCTRACE(sc); 3183 mtx_assert(&sc->mps_mtx, MA_OWNED); 3184 3185 resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply; 3186 ccb = tm->cm_complete_data; 3187 3188 /* 3189 * Currently there should be no way we can hit this case. It only 3190 * happens when we have a failure to allocate chain frames, and 3191 * task management commands don't have S/G lists. 3192 */ 3193 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) { 3194 MPI2_SCSI_TASK_MANAGE_REQUEST *req; 3195 3196 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req; 3197 3198 mps_dprint(sc, MPS_ERROR, 3199 "%s: cm_flags = %#x for reset of handle %#04x! " 3200 "This should not happen!\n", __func__, tm->cm_flags, 3201 req->DevHandle); 3202 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR); 3203 goto bailout; 3204 } 3205 3206 mps_dprint(sc, MPS_XINFO, 3207 "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n", __func__, 3208 le16toh(resp->IOCStatus), le32toh(resp->ResponseCode)); 3209 3210 if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) { 3211 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP); 3212 mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid, 3213 CAM_LUN_WILDCARD); 3214 } 3215 else 3216 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR); 3217 3218 bailout: 3219 3220 mpssas_free_tm(sc, tm); 3221 xpt_done(ccb); 3222 } 3223 3224 static void 3225 mpssas_poll(struct cam_sim *sim) 3226 { 3227 struct mpssas_softc *sassc; 3228 3229 sassc = cam_sim_softc(sim); 3230 3231 if (sassc->sc->mps_debug & MPS_TRACE) { 3232 /* frequent debug messages during a panic just slow 3233 * everything down too much. 3234 */ 3235 mps_printf(sassc->sc, "%s clearing MPS_TRACE\n", __func__); 3236 sassc->sc->mps_debug &= ~MPS_TRACE; 3237 } 3238 3239 mps_intr_locked(sassc->sc); 3240 } 3241 3242 static void 3243 mpssas_async(void *callback_arg, uint32_t code, struct cam_path *path, 3244 void *arg) 3245 { 3246 struct mps_softc *sc; 3247 3248 sc = (struct mps_softc *)callback_arg; 3249 3250 switch (code) { 3251 #if (__FreeBSD_version >= 1000006) || \ 3252 ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000)) 3253 case AC_ADVINFO_CHANGED: { 3254 struct mpssas_target *target; 3255 struct mpssas_softc *sassc; 3256 struct scsi_read_capacity_data_long rcap_buf; 3257 struct ccb_dev_advinfo cdai; 3258 struct mpssas_lun *lun; 3259 lun_id_t lunid; 3260 int found_lun; 3261 uintptr_t buftype; 3262 3263 buftype = (uintptr_t)arg; 3264 3265 found_lun = 0; 3266 sassc = sc->sassc; 3267 3268 /* 3269 * We're only interested in read capacity data changes. 3270 */ 3271 if (buftype != CDAI_TYPE_RCAPLONG) 3272 break; 3273 3274 /* 3275 * We should have a handle for this, but check to make sure. 3276 */ 3277 KASSERT(xpt_path_target_id(path) < sassc->maxtargets, 3278 ("Target %d out of bounds in mpssas_async\n", 3279 xpt_path_target_id(path))); 3280 target = &sassc->targets[xpt_path_target_id(path)]; 3281 if (target->handle == 0) 3282 break; 3283 3284 lunid = xpt_path_lun_id(path); 3285 3286 SLIST_FOREACH(lun, &target->luns, lun_link) { 3287 if (lun->lun_id == lunid) { 3288 found_lun = 1; 3289 break; 3290 } 3291 } 3292 3293 if (found_lun == 0) { 3294 lun = malloc(sizeof(struct mpssas_lun), M_MPT2, 3295 M_NOWAIT | M_ZERO); 3296 if (lun == NULL) { 3297 mps_dprint(sc, MPS_ERROR, "Unable to alloc " 3298 "LUN for EEDP support.\n"); 3299 break; 3300 } 3301 lun->lun_id = lunid; 3302 SLIST_INSERT_HEAD(&target->luns, lun, lun_link); 3303 } 3304 3305 bzero(&rcap_buf, sizeof(rcap_buf)); 3306 xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL); 3307 cdai.ccb_h.func_code = XPT_DEV_ADVINFO; 3308 cdai.ccb_h.flags = CAM_DIR_IN; 3309 cdai.buftype = CDAI_TYPE_RCAPLONG; 3310 #if (__FreeBSD_version >= 1100061) || \ 3311 ((__FreeBSD_version >= 1001510) && (__FreeBSD_version < 1100000)) 3312 cdai.flags = CDAI_FLAG_NONE; 3313 #else 3314 cdai.flags = 0; 3315 #endif 3316 cdai.bufsiz = sizeof(rcap_buf); 3317 cdai.buf = (uint8_t *)&rcap_buf; 3318 xpt_action((union ccb *)&cdai); 3319 if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0) 3320 cam_release_devq(cdai.ccb_h.path, 3321 0, 0, 0, FALSE); 3322 3323 if ((mpssas_get_ccbstatus((union ccb *)&cdai) == CAM_REQ_CMP) 3324 && (rcap_buf.prot & SRC16_PROT_EN)) { 3325 lun->eedp_formatted = TRUE; 3326 lun->eedp_block_size = scsi_4btoul(rcap_buf.length); 3327 } else { 3328 lun->eedp_formatted = FALSE; 3329 lun->eedp_block_size = 0; 3330 } 3331 break; 3332 } 3333 #else 3334 case AC_FOUND_DEVICE: { 3335 struct ccb_getdev *cgd; 3336 3337 cgd = arg; 3338 mpssas_check_eedp(sc, path, cgd); 3339 break; 3340 } 3341 #endif 3342 default: 3343 break; 3344 } 3345 } 3346 3347 #if (__FreeBSD_version < 901503) || \ 3348 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) 3349 static void 3350 mpssas_check_eedp(struct mps_softc *sc, struct cam_path *path, 3351 struct ccb_getdev *cgd) 3352 { 3353 struct mpssas_softc *sassc = sc->sassc; 3354 struct ccb_scsiio *csio; 3355 struct scsi_read_capacity_16 *scsi_cmd; 3356 struct scsi_read_capacity_eedp *rcap_buf; 3357 path_id_t pathid; 3358 target_id_t targetid; 3359 lun_id_t lunid; 3360 union ccb *ccb; 3361 struct cam_path *local_path; 3362 struct mpssas_target *target; 3363 struct mpssas_lun *lun; 3364 uint8_t found_lun; 3365 char path_str[64]; 3366 3367 sassc = sc->sassc; 3368 pathid = cam_sim_path(sassc->sim); 3369 targetid = xpt_path_target_id(path); 3370 lunid = xpt_path_lun_id(path); 3371 3372 KASSERT(targetid < sassc->maxtargets, 3373 ("Target %d out of bounds in mpssas_check_eedp\n", 3374 targetid)); 3375 target = &sassc->targets[targetid]; 3376 if (target->handle == 0x0) 3377 return; 3378 3379 /* 3380 * Determine if the device is EEDP capable. 3381 * 3382 * If this flag is set in the inquiry data, 3383 * the device supports protection information, 3384 * and must support the 16 byte read 3385 * capacity command, otherwise continue without 3386 * sending read cap 16 3387 */ 3388 if ((cgd->inq_data.spc3_flags & SPC3_SID_PROTECT) == 0) 3389 return; 3390 3391 /* 3392 * Issue a READ CAPACITY 16 command. This info 3393 * is used to determine if the LUN is formatted 3394 * for EEDP support. 3395 */ 3396 ccb = xpt_alloc_ccb_nowait(); 3397 if (ccb == NULL) { 3398 mps_dprint(sc, MPS_ERROR, "Unable to alloc CCB " 3399 "for EEDP support.\n"); 3400 return; 3401 } 3402 3403 if (xpt_create_path(&local_path, xpt_periph, 3404 pathid, targetid, lunid) != CAM_REQ_CMP) { 3405 mps_dprint(sc, MPS_ERROR, "Unable to create " 3406 "path for EEDP support\n"); 3407 xpt_free_ccb(ccb); 3408 return; 3409 } 3410 3411 /* 3412 * If LUN is already in list, don't create a new 3413 * one. 3414 */ 3415 found_lun = FALSE; 3416 SLIST_FOREACH(lun, &target->luns, lun_link) { 3417 if (lun->lun_id == lunid) { 3418 found_lun = TRUE; 3419 break; 3420 } 3421 } 3422 if (!found_lun) { 3423 lun = malloc(sizeof(struct mpssas_lun), M_MPT2, 3424 M_NOWAIT | M_ZERO); 3425 if (lun == NULL) { 3426 mps_dprint(sc, MPS_ERROR, 3427 "Unable to alloc LUN for EEDP support.\n"); 3428 xpt_free_path(local_path); 3429 xpt_free_ccb(ccb); 3430 return; 3431 } 3432 lun->lun_id = lunid; 3433 SLIST_INSERT_HEAD(&target->luns, lun, 3434 lun_link); 3435 } 3436 3437 xpt_path_string(local_path, path_str, sizeof(path_str)); 3438 3439 mps_dprint(sc, MPS_INFO, "Sending read cap: path %s handle %d\n", 3440 path_str, target->handle); 3441 3442 /* 3443 * Issue a READ CAPACITY 16 command for the LUN. 3444 * The mpssas_read_cap_done function will load 3445 * the read cap info into the LUN struct. 3446 */ 3447 rcap_buf = malloc(sizeof(struct scsi_read_capacity_eedp), 3448 M_MPT2, M_NOWAIT | M_ZERO); 3449 if (rcap_buf == NULL) { 3450 mps_dprint(sc, MPS_FAULT, 3451 "Unable to alloc read capacity buffer for EEDP support.\n"); 3452 xpt_free_path(ccb->ccb_h.path); 3453 xpt_free_ccb(ccb); 3454 return; 3455 } 3456 xpt_setup_ccb(&ccb->ccb_h, local_path, CAM_PRIORITY_XPT); 3457 csio = &ccb->csio; 3458 csio->ccb_h.func_code = XPT_SCSI_IO; 3459 csio->ccb_h.flags = CAM_DIR_IN; 3460 csio->ccb_h.retry_count = 4; 3461 csio->ccb_h.cbfcnp = mpssas_read_cap_done; 3462 csio->ccb_h.timeout = 60000; 3463 csio->data_ptr = (uint8_t *)rcap_buf; 3464 csio->dxfer_len = sizeof(struct scsi_read_capacity_eedp); 3465 csio->sense_len = MPS_SENSE_LEN; 3466 csio->cdb_len = sizeof(*scsi_cmd); 3467 csio->tag_action = MSG_SIMPLE_Q_TAG; 3468 3469 scsi_cmd = (struct scsi_read_capacity_16 *)&csio->cdb_io.cdb_bytes; 3470 bzero(scsi_cmd, sizeof(*scsi_cmd)); 3471 scsi_cmd->opcode = 0x9E; 3472 scsi_cmd->service_action = SRC16_SERVICE_ACTION; 3473 ((uint8_t *)scsi_cmd)[13] = sizeof(struct scsi_read_capacity_eedp); 3474 3475 ccb->ccb_h.ppriv_ptr1 = sassc; 3476 xpt_action(ccb); 3477 } 3478 3479 static void 3480 mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb) 3481 { 3482 struct mpssas_softc *sassc; 3483 struct mpssas_target *target; 3484 struct mpssas_lun *lun; 3485 struct scsi_read_capacity_eedp *rcap_buf; 3486 3487 if (done_ccb == NULL) 3488 return; 3489 3490 /* Driver need to release devq, it Scsi command is 3491 * generated by driver internally. 3492 * Currently there is a single place where driver 3493 * calls scsi command internally. In future if driver 3494 * calls more scsi command internally, it needs to release 3495 * devq internally, since those command will not go back to 3496 * cam_periph. 3497 */ 3498 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) ) { 3499 done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN; 3500 xpt_release_devq(done_ccb->ccb_h.path, 3501 /*count*/ 1, /*run_queue*/TRUE); 3502 } 3503 3504 rcap_buf = (struct scsi_read_capacity_eedp *)done_ccb->csio.data_ptr; 3505 3506 /* 3507 * Get the LUN ID for the path and look it up in the LUN list for the 3508 * target. 3509 */ 3510 sassc = (struct mpssas_softc *)done_ccb->ccb_h.ppriv_ptr1; 3511 KASSERT(done_ccb->ccb_h.target_id < sassc->maxtargets, 3512 ("Target %d out of bounds in mpssas_read_cap_done\n", 3513 done_ccb->ccb_h.target_id)); 3514 target = &sassc->targets[done_ccb->ccb_h.target_id]; 3515 SLIST_FOREACH(lun, &target->luns, lun_link) { 3516 if (lun->lun_id != done_ccb->ccb_h.target_lun) 3517 continue; 3518 3519 /* 3520 * Got the LUN in the target's LUN list. Fill it in 3521 * with EEDP info. If the READ CAP 16 command had some 3522 * SCSI error (common if command is not supported), mark 3523 * the lun as not supporting EEDP and set the block size 3524 * to 0. 3525 */ 3526 if ((mpssas_get_ccbstatus(done_ccb) != CAM_REQ_CMP) 3527 || (done_ccb->csio.scsi_status != SCSI_STATUS_OK)) { 3528 lun->eedp_formatted = FALSE; 3529 lun->eedp_block_size = 0; 3530 break; 3531 } 3532 3533 if (rcap_buf->protect & 0x01) { 3534 mps_dprint(sassc->sc, MPS_INFO, "LUN %d for " 3535 "target ID %d is formatted for EEDP " 3536 "support.\n", done_ccb->ccb_h.target_lun, 3537 done_ccb->ccb_h.target_id); 3538 lun->eedp_formatted = TRUE; 3539 lun->eedp_block_size = scsi_4btoul(rcap_buf->length); 3540 } 3541 break; 3542 } 3543 3544 // Finished with this CCB and path. 3545 free(rcap_buf, M_MPT2); 3546 xpt_free_path(done_ccb->ccb_h.path); 3547 xpt_free_ccb(done_ccb); 3548 } 3549 #endif /* (__FreeBSD_version < 901503) || \ 3550 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) */ 3551 3552 void 3553 mpssas_prepare_for_tm(struct mps_softc *sc, struct mps_command *tm, 3554 struct mpssas_target *target, lun_id_t lun_id) 3555 { 3556 union ccb *ccb; 3557 path_id_t path_id; 3558 3559 /* 3560 * Set the INRESET flag for this target so that no I/O will be sent to 3561 * the target until the reset has completed. If an I/O request does 3562 * happen, the devq will be frozen. The CCB holds the path which is 3563 * used to release the devq. The devq is released and the CCB is freed 3564 * when the TM completes. 3565 */ 3566 ccb = xpt_alloc_ccb_nowait(); 3567 if (ccb) { 3568 path_id = cam_sim_path(sc->sassc->sim); 3569 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, path_id, 3570 target->tid, lun_id) != CAM_REQ_CMP) { 3571 xpt_free_ccb(ccb); 3572 } else { 3573 tm->cm_ccb = ccb; 3574 tm->cm_targ = target; 3575 target->flags |= MPSSAS_TARGET_INRESET; 3576 } 3577 } 3578 } 3579 3580 int 3581 mpssas_startup(struct mps_softc *sc) 3582 { 3583 3584 /* 3585 * Send the port enable message and set the wait_for_port_enable flag. 3586 * This flag helps to keep the simq frozen until all discovery events 3587 * are processed. 3588 */ 3589 sc->wait_for_port_enable = 1; 3590 mpssas_send_portenable(sc); 3591 return (0); 3592 } 3593 3594 static int 3595 mpssas_send_portenable(struct mps_softc *sc) 3596 { 3597 MPI2_PORT_ENABLE_REQUEST *request; 3598 struct mps_command *cm; 3599 3600 MPS_FUNCTRACE(sc); 3601 3602 if ((cm = mps_alloc_command(sc)) == NULL) 3603 return (EBUSY); 3604 request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req; 3605 request->Function = MPI2_FUNCTION_PORT_ENABLE; 3606 request->MsgFlags = 0; 3607 request->VP_ID = 0; 3608 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; 3609 cm->cm_complete = mpssas_portenable_complete; 3610 cm->cm_data = NULL; 3611 cm->cm_sge = NULL; 3612 3613 mps_map_command(sc, cm); 3614 mps_dprint(sc, MPS_XINFO, 3615 "mps_send_portenable finished cm %p req %p complete %p\n", 3616 cm, cm->cm_req, cm->cm_complete); 3617 return (0); 3618 } 3619 3620 static void 3621 mpssas_portenable_complete(struct mps_softc *sc, struct mps_command *cm) 3622 { 3623 MPI2_PORT_ENABLE_REPLY *reply; 3624 struct mpssas_softc *sassc; 3625 3626 MPS_FUNCTRACE(sc); 3627 sassc = sc->sassc; 3628 3629 /* 3630 * Currently there should be no way we can hit this case. It only 3631 * happens when we have a failure to allocate chain frames, and 3632 * port enable commands don't have S/G lists. 3633 */ 3634 if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) { 3635 mps_dprint(sc, MPS_ERROR, "%s: cm_flags = %#x for port enable! " 3636 "This should not happen!\n", __func__, cm->cm_flags); 3637 } 3638 3639 reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply; 3640 if (reply == NULL) 3641 mps_dprint(sc, MPS_FAULT, "Portenable NULL reply\n"); 3642 else if (le16toh(reply->IOCStatus & MPI2_IOCSTATUS_MASK) != 3643 MPI2_IOCSTATUS_SUCCESS) 3644 mps_dprint(sc, MPS_FAULT, "Portenable failed\n"); 3645 3646 mps_free_command(sc, cm); 3647 if (sc->mps_ich.ich_arg != NULL) { 3648 mps_dprint(sc, MPS_XINFO, "disestablish config intrhook\n"); 3649 config_intrhook_disestablish(&sc->mps_ich); 3650 sc->mps_ich.ich_arg = NULL; 3651 } 3652 3653 /* 3654 * Get WarpDrive info after discovery is complete but before the scan 3655 * starts. At this point, all devices are ready to be exposed to the 3656 * OS. If devices should be hidden instead, take them out of the 3657 * 'targets' array before the scan. The devinfo for a disk will have 3658 * some info and a volume's will be 0. Use that to remove disks. 3659 */ 3660 mps_wd_config_pages(sc); 3661 3662 /* 3663 * Done waiting for port enable to complete. Decrement the refcount. 3664 * If refcount is 0, discovery is complete and a rescan of the bus can 3665 * take place. Since the simq was explicitly frozen before port 3666 * enable, it must be explicitly released here to keep the 3667 * freeze/release count in sync. 3668 */ 3669 sc->wait_for_port_enable = 0; 3670 sc->port_enable_complete = 1; 3671 wakeup(&sc->port_enable_complete); 3672 mpssas_startup_decrement(sassc); 3673 } 3674 3675 int 3676 mpssas_check_id(struct mpssas_softc *sassc, int id) 3677 { 3678 struct mps_softc *sc = sassc->sc; 3679 char *ids; 3680 char *name; 3681 3682 ids = &sc->exclude_ids[0]; 3683 while((name = strsep(&ids, ",")) != NULL) { 3684 if (name[0] == '\0') 3685 continue; 3686 if (strtol(name, NULL, 0) == (long)id) 3687 return (1); 3688 } 3689 3690 return (0); 3691 } 3692 3693 void 3694 mpssas_realloc_targets(struct mps_softc *sc, int maxtargets) 3695 { 3696 struct mpssas_softc *sassc; 3697 struct mpssas_lun *lun, *lun_tmp; 3698 struct mpssas_target *targ; 3699 int i; 3700 3701 sassc = sc->sassc; 3702 /* 3703 * The number of targets is based on IOC Facts, so free all of 3704 * the allocated LUNs for each target and then the target buffer 3705 * itself. 3706 */ 3707 for (i=0; i< maxtargets; i++) { 3708 targ = &sassc->targets[i]; 3709 SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) { 3710 free(lun, M_MPT2); 3711 } 3712 } 3713 free(sassc->targets, M_MPT2); 3714 3715 sassc->targets = malloc(sizeof(struct mpssas_target) * maxtargets, 3716 M_MPT2, M_WAITOK|M_ZERO); 3717 if (!sassc->targets) { 3718 panic("%s failed to alloc targets with error %d\n", 3719 __func__, ENOMEM); 3720 } 3721 } 3722