1 /*- 2 * Copyright (c) 2009 Yahoo! Inc. 3 * Copyright (c) 2011-2015 LSI Corp. 4 * Copyright (c) 2013-2015 Avago Technologies 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * Avago Technologies (LSI) MPT-Fusion Host Adapter FreeBSD 29 * 30 * $FreeBSD$ 31 */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 /* Communications core for Avago Technologies (LSI) MPT2 */ 37 38 /* TODO Move headers to mpsvar */ 39 #include <sys/types.h> 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/kernel.h> 43 #include <sys/selinfo.h> 44 #include <sys/module.h> 45 #include <sys/bus.h> 46 #include <sys/conf.h> 47 #include <sys/bio.h> 48 #include <sys/malloc.h> 49 #include <sys/uio.h> 50 #include <sys/sysctl.h> 51 #include <sys/endian.h> 52 #include <sys/queue.h> 53 #include <sys/kthread.h> 54 #include <sys/taskqueue.h> 55 #include <sys/sbuf.h> 56 57 #include <machine/bus.h> 58 #include <machine/resource.h> 59 #include <sys/rman.h> 60 61 #include <machine/stdarg.h> 62 63 #include <cam/cam.h> 64 #include <cam/cam_ccb.h> 65 #include <cam/cam_xpt.h> 66 #include <cam/cam_debug.h> 67 #include <cam/cam_sim.h> 68 #include <cam/cam_xpt_sim.h> 69 #include <cam/cam_xpt_periph.h> 70 #include <cam/cam_periph.h> 71 #include <cam/scsi/scsi_all.h> 72 #include <cam/scsi/scsi_message.h> 73 #if __FreeBSD_version >= 900026 74 #include <cam/scsi/smp_all.h> 75 #endif 76 77 #include <dev/mps/mpi/mpi2_type.h> 78 #include <dev/mps/mpi/mpi2.h> 79 #include <dev/mps/mpi/mpi2_ioc.h> 80 #include <dev/mps/mpi/mpi2_sas.h> 81 #include <dev/mps/mpi/mpi2_cnfg.h> 82 #include <dev/mps/mpi/mpi2_init.h> 83 #include <dev/mps/mpi/mpi2_tool.h> 84 #include <dev/mps/mps_ioctl.h> 85 #include <dev/mps/mpsvar.h> 86 #include <dev/mps/mps_table.h> 87 #include <dev/mps/mps_sas.h> 88 89 #define MPSSAS_DISCOVERY_TIMEOUT 20 90 #define MPSSAS_MAX_DISCOVERY_TIMEOUTS 10 /* 200 seconds */ 91 92 /* 93 * static array to check SCSI OpCode for EEDP protection bits 94 */ 95 #define PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP 96 #define PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP 97 #define PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP 98 static uint8_t op_code_prot[256] = { 99 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 100 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 101 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V, 102 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 103 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 104 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 105 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 106 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 107 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V, 108 0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 109 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V, 110 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 111 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 112 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 113 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 114 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 115 }; 116 117 MALLOC_DEFINE(M_MPSSAS, "MPSSAS", "MPS SAS memory"); 118 119 static void mpssas_remove_device(struct mps_softc *, struct mps_command *); 120 static void mpssas_remove_complete(struct mps_softc *, struct mps_command *); 121 static void mpssas_action(struct cam_sim *sim, union ccb *ccb); 122 static void mpssas_poll(struct cam_sim *sim); 123 static int mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, 124 struct mps_command *cm); 125 static void mpssas_scsiio_timeout(void *data); 126 static void mpssas_abort_complete(struct mps_softc *sc, struct mps_command *cm); 127 static void mpssas_direct_drive_io(struct mpssas_softc *sassc, 128 struct mps_command *cm, union ccb *ccb); 129 static void mpssas_action_scsiio(struct mpssas_softc *, union ccb *); 130 static void mpssas_scsiio_complete(struct mps_softc *, struct mps_command *); 131 static void mpssas_action_resetdev(struct mpssas_softc *, union ccb *); 132 #if __FreeBSD_version >= 900026 133 static void mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm); 134 static void mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb, 135 uint64_t sasaddr); 136 static void mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb); 137 #endif //FreeBSD_version >= 900026 138 static void mpssas_resetdev_complete(struct mps_softc *, struct mps_command *); 139 static void mpssas_async(void *callback_arg, uint32_t code, 140 struct cam_path *path, void *arg); 141 #if (__FreeBSD_version < 901503) || \ 142 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) 143 static void mpssas_check_eedp(struct mps_softc *sc, struct cam_path *path, 144 struct ccb_getdev *cgd); 145 static void mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb); 146 #endif 147 static int mpssas_send_portenable(struct mps_softc *sc); 148 static void mpssas_portenable_complete(struct mps_softc *sc, 149 struct mps_command *cm); 150 151 struct mpssas_target * 152 mpssas_find_target_by_handle(struct mpssas_softc *sassc, int start, uint16_t handle) 153 { 154 struct mpssas_target *target; 155 int i; 156 157 for (i = start; i < sassc->maxtargets; i++) { 158 target = &sassc->targets[i]; 159 if (target->handle == handle) 160 return (target); 161 } 162 163 return (NULL); 164 } 165 166 /* we need to freeze the simq during attach and diag reset, to avoid failing 167 * commands before device handles have been found by discovery. Since 168 * discovery involves reading config pages and possibly sending commands, 169 * discovery actions may continue even after we receive the end of discovery 170 * event, so refcount discovery actions instead of assuming we can unfreeze 171 * the simq when we get the event. 172 */ 173 void 174 mpssas_startup_increment(struct mpssas_softc *sassc) 175 { 176 MPS_FUNCTRACE(sassc->sc); 177 178 if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) { 179 if (sassc->startup_refcount++ == 0) { 180 /* just starting, freeze the simq */ 181 mps_dprint(sassc->sc, MPS_INIT, 182 "%s freezing simq\n", __func__); 183 #if __FreeBSD_version >= 1000039 184 xpt_hold_boot(); 185 #endif 186 xpt_freeze_simq(sassc->sim, 1); 187 } 188 mps_dprint(sassc->sc, MPS_INIT, "%s refcount %u\n", __func__, 189 sassc->startup_refcount); 190 } 191 } 192 193 void 194 mpssas_release_simq_reinit(struct mpssas_softc *sassc) 195 { 196 if (sassc->flags & MPSSAS_QUEUE_FROZEN) { 197 sassc->flags &= ~MPSSAS_QUEUE_FROZEN; 198 xpt_release_simq(sassc->sim, 1); 199 mps_dprint(sassc->sc, MPS_INFO, "Unfreezing SIM queue\n"); 200 } 201 } 202 203 void 204 mpssas_startup_decrement(struct mpssas_softc *sassc) 205 { 206 MPS_FUNCTRACE(sassc->sc); 207 208 if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) { 209 if (--sassc->startup_refcount == 0) { 210 /* finished all discovery-related actions, release 211 * the simq and rescan for the latest topology. 212 */ 213 mps_dprint(sassc->sc, MPS_INIT, 214 "%s releasing simq\n", __func__); 215 sassc->flags &= ~MPSSAS_IN_STARTUP; 216 xpt_release_simq(sassc->sim, 1); 217 #if __FreeBSD_version >= 1000039 218 xpt_release_boot(); 219 #else 220 mpssas_rescan_target(sassc->sc, NULL); 221 #endif 222 } 223 mps_dprint(sassc->sc, MPS_INIT, "%s refcount %u\n", __func__, 224 sassc->startup_refcount); 225 } 226 } 227 228 /* The firmware requires us to stop sending commands when we're doing task 229 * management, so refcount the TMs and keep the simq frozen when any are in 230 * use. 231 */ 232 struct mps_command * 233 mpssas_alloc_tm(struct mps_softc *sc) 234 { 235 struct mps_command *tm; 236 237 tm = mps_alloc_high_priority_command(sc); 238 return tm; 239 } 240 241 void 242 mpssas_free_tm(struct mps_softc *sc, struct mps_command *tm) 243 { 244 int target_id = 0xFFFFFFFF; 245 246 if (tm == NULL) 247 return; 248 249 /* 250 * For TM's the devq is frozen for the device. Unfreeze it here and 251 * free the resources used for freezing the devq. Must clear the 252 * INRESET flag as well or scsi I/O will not work. 253 */ 254 if (tm->cm_targ != NULL) { 255 tm->cm_targ->flags &= ~MPSSAS_TARGET_INRESET; 256 target_id = tm->cm_targ->tid; 257 } 258 if (tm->cm_ccb) { 259 mps_dprint(sc, MPS_INFO, "Unfreezing devq for target ID %d\n", 260 target_id); 261 xpt_release_devq(tm->cm_ccb->ccb_h.path, 1, TRUE); 262 xpt_free_path(tm->cm_ccb->ccb_h.path); 263 xpt_free_ccb(tm->cm_ccb); 264 } 265 266 mps_free_high_priority_command(sc, tm); 267 } 268 269 void 270 mpssas_rescan_target(struct mps_softc *sc, struct mpssas_target *targ) 271 { 272 struct mpssas_softc *sassc = sc->sassc; 273 path_id_t pathid; 274 target_id_t targetid; 275 union ccb *ccb; 276 277 MPS_FUNCTRACE(sc); 278 pathid = cam_sim_path(sassc->sim); 279 if (targ == NULL) 280 targetid = CAM_TARGET_WILDCARD; 281 else 282 targetid = targ - sassc->targets; 283 284 /* 285 * Allocate a CCB and schedule a rescan. 286 */ 287 ccb = xpt_alloc_ccb_nowait(); 288 if (ccb == NULL) { 289 mps_dprint(sc, MPS_ERROR, "unable to alloc CCB for rescan\n"); 290 return; 291 } 292 293 if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid, 294 targetid, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 295 mps_dprint(sc, MPS_ERROR, "unable to create path for rescan\n"); 296 xpt_free_ccb(ccb); 297 return; 298 } 299 300 if (targetid == CAM_TARGET_WILDCARD) 301 ccb->ccb_h.func_code = XPT_SCAN_BUS; 302 else 303 ccb->ccb_h.func_code = XPT_SCAN_TGT; 304 305 mps_dprint(sc, MPS_TRACE, "%s targetid %u\n", __func__, targetid); 306 xpt_rescan(ccb); 307 } 308 309 static void 310 mpssas_log_command(struct mps_command *cm, u_int level, const char *fmt, ...) 311 { 312 struct sbuf sb; 313 va_list ap; 314 char str[192]; 315 char path_str[64]; 316 317 if (cm == NULL) 318 return; 319 320 /* No need to be in here if debugging isn't enabled */ 321 if ((cm->cm_sc->mps_debug & level) == 0) 322 return; 323 324 sbuf_new(&sb, str, sizeof(str), 0); 325 326 va_start(ap, fmt); 327 328 if (cm->cm_ccb != NULL) { 329 xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str, 330 sizeof(path_str)); 331 sbuf_cat(&sb, path_str); 332 if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) { 333 scsi_command_string(&cm->cm_ccb->csio, &sb); 334 sbuf_printf(&sb, "length %d ", 335 cm->cm_ccb->csio.dxfer_len); 336 } 337 } 338 else { 339 sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ", 340 cam_sim_name(cm->cm_sc->sassc->sim), 341 cam_sim_unit(cm->cm_sc->sassc->sim), 342 cam_sim_bus(cm->cm_sc->sassc->sim), 343 cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF, 344 cm->cm_lun); 345 } 346 347 sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID); 348 sbuf_vprintf(&sb, fmt, ap); 349 sbuf_finish(&sb); 350 mps_dprint_field(cm->cm_sc, level, "%s", sbuf_data(&sb)); 351 352 va_end(ap); 353 } 354 355 356 static void 357 mpssas_remove_volume(struct mps_softc *sc, struct mps_command *tm) 358 { 359 MPI2_SCSI_TASK_MANAGE_REPLY *reply; 360 struct mpssas_target *targ; 361 uint16_t handle; 362 363 MPS_FUNCTRACE(sc); 364 365 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply; 366 handle = (uint16_t)(uintptr_t)tm->cm_complete_data; 367 targ = tm->cm_targ; 368 369 if (reply == NULL) { 370 /* XXX retry the remove after the diag reset completes? */ 371 mps_dprint(sc, MPS_FAULT, 372 "%s NULL reply resetting device 0x%04x\n", __func__, 373 handle); 374 mpssas_free_tm(sc, tm); 375 return; 376 } 377 378 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) != 379 MPI2_IOCSTATUS_SUCCESS) { 380 mps_dprint(sc, MPS_ERROR, 381 "IOCStatus = 0x%x while resetting device 0x%x\n", 382 le16toh(reply->IOCStatus), handle); 383 } 384 385 mps_dprint(sc, MPS_XINFO, 386 "Reset aborted %u commands\n", reply->TerminationCount); 387 mps_free_reply(sc, tm->cm_reply_data); 388 tm->cm_reply = NULL; /* Ensures the reply won't get re-freed */ 389 390 mps_dprint(sc, MPS_XINFO, 391 "clearing target %u handle 0x%04x\n", targ->tid, handle); 392 393 /* 394 * Don't clear target if remove fails because things will get confusing. 395 * Leave the devname and sasaddr intact so that we know to avoid reusing 396 * this target id if possible, and so we can assign the same target id 397 * to this device if it comes back in the future. 398 */ 399 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) == 400 MPI2_IOCSTATUS_SUCCESS) { 401 targ = tm->cm_targ; 402 targ->handle = 0x0; 403 targ->encl_handle = 0x0; 404 targ->encl_slot = 0x0; 405 targ->exp_dev_handle = 0x0; 406 targ->phy_num = 0x0; 407 targ->linkrate = 0x0; 408 targ->devinfo = 0x0; 409 targ->flags = 0x0; 410 } 411 412 mpssas_free_tm(sc, tm); 413 } 414 415 416 /* 417 * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal. 418 * Otherwise Volume Delete is same as Bare Drive Removal. 419 */ 420 void 421 mpssas_prepare_volume_remove(struct mpssas_softc *sassc, uint16_t handle) 422 { 423 MPI2_SCSI_TASK_MANAGE_REQUEST *req; 424 struct mps_softc *sc; 425 struct mps_command *cm; 426 struct mpssas_target *targ = NULL; 427 428 MPS_FUNCTRACE(sassc->sc); 429 sc = sassc->sc; 430 431 #ifdef WD_SUPPORT 432 /* 433 * If this is a WD controller, determine if the disk should be exposed 434 * to the OS or not. If disk should be exposed, return from this 435 * function without doing anything. 436 */ 437 if (sc->WD_available && (sc->WD_hide_expose == 438 MPS_WD_EXPOSE_ALWAYS)) { 439 return; 440 } 441 #endif //WD_SUPPORT 442 443 targ = mpssas_find_target_by_handle(sassc, 0, handle); 444 if (targ == NULL) { 445 /* FIXME: what is the action? */ 446 /* We don't know about this device? */ 447 mps_dprint(sc, MPS_ERROR, 448 "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle); 449 return; 450 } 451 452 targ->flags |= MPSSAS_TARGET_INREMOVAL; 453 454 cm = mpssas_alloc_tm(sc); 455 if (cm == NULL) { 456 mps_dprint(sc, MPS_ERROR, 457 "%s: command alloc failure\n", __func__); 458 return; 459 } 460 461 mpssas_rescan_target(sc, targ); 462 463 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req; 464 req->DevHandle = targ->handle; 465 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; 466 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET; 467 468 /* SAS Hard Link Reset / SATA Link Reset */ 469 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET; 470 471 cm->cm_targ = targ; 472 cm->cm_data = NULL; 473 cm->cm_desc.HighPriority.RequestFlags = 474 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY; 475 cm->cm_complete = mpssas_remove_volume; 476 cm->cm_complete_data = (void *)(uintptr_t)handle; 477 478 mps_dprint(sc, MPS_INFO, "%s: Sending reset for target ID %d\n", 479 __func__, targ->tid); 480 mpssas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD); 481 482 mps_map_command(sc, cm); 483 } 484 485 /* 486 * The MPT2 firmware performs debounce on the link to avoid transient link 487 * errors and false removals. When it does decide that link has been lost 488 * and a device need to go away, it expects that the host will perform a 489 * target reset and then an op remove. The reset has the side-effect of 490 * aborting any outstanding requests for the device, which is required for 491 * the op-remove to succeed. It's not clear if the host should check for 492 * the device coming back alive after the reset. 493 */ 494 void 495 mpssas_prepare_remove(struct mpssas_softc *sassc, uint16_t handle) 496 { 497 MPI2_SCSI_TASK_MANAGE_REQUEST *req; 498 struct mps_softc *sc; 499 struct mps_command *cm; 500 struct mpssas_target *targ = NULL; 501 502 MPS_FUNCTRACE(sassc->sc); 503 504 sc = sassc->sc; 505 506 targ = mpssas_find_target_by_handle(sassc, 0, handle); 507 if (targ == NULL) { 508 /* FIXME: what is the action? */ 509 /* We don't know about this device? */ 510 mps_dprint(sc, MPS_ERROR, 511 "%s : invalid handle 0x%x \n", __func__, handle); 512 return; 513 } 514 515 targ->flags |= MPSSAS_TARGET_INREMOVAL; 516 517 cm = mpssas_alloc_tm(sc); 518 if (cm == NULL) { 519 mps_dprint(sc, MPS_ERROR, 520 "%s: command alloc failure\n", __func__); 521 return; 522 } 523 524 mpssas_rescan_target(sc, targ); 525 526 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req; 527 memset(req, 0, sizeof(*req)); 528 req->DevHandle = htole16(targ->handle); 529 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; 530 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET; 531 532 /* SAS Hard Link Reset / SATA Link Reset */ 533 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET; 534 535 cm->cm_targ = targ; 536 cm->cm_data = NULL; 537 cm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY; 538 cm->cm_complete = mpssas_remove_device; 539 cm->cm_complete_data = (void *)(uintptr_t)handle; 540 541 mps_dprint(sc, MPS_INFO, "%s: Sending reset for target ID %d\n", 542 __func__, targ->tid); 543 mpssas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD); 544 545 mps_map_command(sc, cm); 546 } 547 548 static void 549 mpssas_remove_device(struct mps_softc *sc, struct mps_command *tm) 550 { 551 MPI2_SCSI_TASK_MANAGE_REPLY *reply; 552 MPI2_SAS_IOUNIT_CONTROL_REQUEST *req; 553 struct mpssas_target *targ; 554 struct mps_command *next_cm; 555 uint16_t handle; 556 557 MPS_FUNCTRACE(sc); 558 559 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply; 560 handle = (uint16_t)(uintptr_t)tm->cm_complete_data; 561 targ = tm->cm_targ; 562 563 /* 564 * Currently there should be no way we can hit this case. It only 565 * happens when we have a failure to allocate chain frames, and 566 * task management commands don't have S/G lists. 567 */ 568 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) { 569 mps_dprint(sc, MPS_ERROR, 570 "%s: cm_flags = %#x for remove of handle %#04x! " 571 "This should not happen!\n", __func__, tm->cm_flags, 572 handle); 573 } 574 575 if (reply == NULL) { 576 /* XXX retry the remove after the diag reset completes? */ 577 mps_dprint(sc, MPS_FAULT, 578 "%s NULL reply resetting device 0x%04x\n", __func__, 579 handle); 580 mpssas_free_tm(sc, tm); 581 return; 582 } 583 584 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) != 585 MPI2_IOCSTATUS_SUCCESS) { 586 mps_dprint(sc, MPS_ERROR, 587 "IOCStatus = 0x%x while resetting device 0x%x\n", 588 le16toh(reply->IOCStatus), handle); 589 } 590 591 mps_dprint(sc, MPS_XINFO, "Reset aborted %u commands\n", 592 le32toh(reply->TerminationCount)); 593 mps_free_reply(sc, tm->cm_reply_data); 594 tm->cm_reply = NULL; /* Ensures the reply won't get re-freed */ 595 596 /* Reuse the existing command */ 597 req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req; 598 memset(req, 0, sizeof(*req)); 599 req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL; 600 req->Operation = MPI2_SAS_OP_REMOVE_DEVICE; 601 req->DevHandle = htole16(handle); 602 tm->cm_data = NULL; 603 tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; 604 tm->cm_complete = mpssas_remove_complete; 605 tm->cm_complete_data = (void *)(uintptr_t)handle; 606 607 mps_map_command(sc, tm); 608 609 mps_dprint(sc, MPS_XINFO, "clearing target %u handle 0x%04x\n", 610 targ->tid, handle); 611 TAILQ_FOREACH_SAFE(tm, &targ->commands, cm_link, next_cm) { 612 union ccb *ccb; 613 614 mps_dprint(sc, MPS_XINFO, "Completing missed command %p\n", tm); 615 ccb = tm->cm_complete_data; 616 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); 617 mpssas_scsiio_complete(sc, tm); 618 } 619 } 620 621 static void 622 mpssas_remove_complete(struct mps_softc *sc, struct mps_command *tm) 623 { 624 MPI2_SAS_IOUNIT_CONTROL_REPLY *reply; 625 uint16_t handle; 626 struct mpssas_target *targ; 627 struct mpssas_lun *lun; 628 629 MPS_FUNCTRACE(sc); 630 631 reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply; 632 handle = (uint16_t)(uintptr_t)tm->cm_complete_data; 633 634 /* 635 * Currently there should be no way we can hit this case. It only 636 * happens when we have a failure to allocate chain frames, and 637 * task management commands don't have S/G lists. 638 */ 639 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) { 640 mps_dprint(sc, MPS_XINFO, 641 "%s: cm_flags = %#x for remove of handle %#04x! " 642 "This should not happen!\n", __func__, tm->cm_flags, 643 handle); 644 mpssas_free_tm(sc, tm); 645 return; 646 } 647 648 if (reply == NULL) { 649 /* most likely a chip reset */ 650 mps_dprint(sc, MPS_FAULT, 651 "%s NULL reply removing device 0x%04x\n", __func__, handle); 652 mpssas_free_tm(sc, tm); 653 return; 654 } 655 656 mps_dprint(sc, MPS_XINFO, 657 "%s on handle 0x%04x, IOCStatus= 0x%x\n", __func__, 658 handle, le16toh(reply->IOCStatus)); 659 660 /* 661 * Don't clear target if remove fails because things will get confusing. 662 * Leave the devname and sasaddr intact so that we know to avoid reusing 663 * this target id if possible, and so we can assign the same target id 664 * to this device if it comes back in the future. 665 */ 666 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) == 667 MPI2_IOCSTATUS_SUCCESS) { 668 targ = tm->cm_targ; 669 targ->handle = 0x0; 670 targ->encl_handle = 0x0; 671 targ->encl_slot = 0x0; 672 targ->exp_dev_handle = 0x0; 673 targ->phy_num = 0x0; 674 targ->linkrate = 0x0; 675 targ->devinfo = 0x0; 676 targ->flags = 0x0; 677 678 while(!SLIST_EMPTY(&targ->luns)) { 679 lun = SLIST_FIRST(&targ->luns); 680 SLIST_REMOVE_HEAD(&targ->luns, lun_link); 681 free(lun, M_MPT2); 682 } 683 } 684 685 686 mpssas_free_tm(sc, tm); 687 } 688 689 static int 690 mpssas_register_events(struct mps_softc *sc) 691 { 692 u32 events[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS]; 693 694 bzero(events, 16); 695 setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE); 696 setbit(events, MPI2_EVENT_SAS_DISCOVERY); 697 setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE); 698 setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE); 699 setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW); 700 setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST); 701 setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE); 702 setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST); 703 setbit(events, MPI2_EVENT_IR_VOLUME); 704 setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK); 705 setbit(events, MPI2_EVENT_IR_OPERATION_STATUS); 706 setbit(events, MPI2_EVENT_LOG_ENTRY_ADDED); 707 708 mps_register_events(sc, events, mpssas_evt_handler, NULL, 709 &sc->sassc->mpssas_eh); 710 711 return (0); 712 } 713 714 int 715 mps_attach_sas(struct mps_softc *sc) 716 { 717 struct mpssas_softc *sassc; 718 cam_status status; 719 int unit, error = 0; 720 721 MPS_FUNCTRACE(sc); 722 723 sassc = malloc(sizeof(struct mpssas_softc), M_MPT2, M_WAITOK|M_ZERO); 724 if(!sassc) { 725 device_printf(sc->mps_dev, "Cannot allocate memory %s %d\n", 726 __func__, __LINE__); 727 return (ENOMEM); 728 } 729 730 /* 731 * XXX MaxTargets could change during a reinit. Since we don't 732 * resize the targets[] array during such an event, cache the value 733 * of MaxTargets here so that we don't get into trouble later. This 734 * should move into the reinit logic. 735 */ 736 sassc->maxtargets = sc->facts->MaxTargets; 737 sassc->targets = malloc(sizeof(struct mpssas_target) * 738 sassc->maxtargets, M_MPT2, M_WAITOK|M_ZERO); 739 if(!sassc->targets) { 740 device_printf(sc->mps_dev, "Cannot allocate memory %s %d\n", 741 __func__, __LINE__); 742 free(sassc, M_MPT2); 743 return (ENOMEM); 744 } 745 sc->sassc = sassc; 746 sassc->sc = sc; 747 748 if ((sassc->devq = cam_simq_alloc(sc->num_reqs)) == NULL) { 749 mps_dprint(sc, MPS_ERROR, "Cannot allocate SIMQ\n"); 750 error = ENOMEM; 751 goto out; 752 } 753 754 unit = device_get_unit(sc->mps_dev); 755 sassc->sim = cam_sim_alloc(mpssas_action, mpssas_poll, "mps", sassc, 756 unit, &sc->mps_mtx, sc->num_reqs, sc->num_reqs, sassc->devq); 757 if (sassc->sim == NULL) { 758 mps_dprint(sc, MPS_ERROR, "Cannot allocate SIM\n"); 759 error = EINVAL; 760 goto out; 761 } 762 763 TAILQ_INIT(&sassc->ev_queue); 764 765 /* Initialize taskqueue for Event Handling */ 766 TASK_INIT(&sassc->ev_task, 0, mpssas_firmware_event_work, sc); 767 sassc->ev_tq = taskqueue_create("mps_taskq", M_NOWAIT | M_ZERO, 768 taskqueue_thread_enqueue, &sassc->ev_tq); 769 taskqueue_start_threads(&sassc->ev_tq, 1, PRIBIO, "%s taskq", 770 device_get_nameunit(sc->mps_dev)); 771 772 mps_lock(sc); 773 774 /* 775 * XXX There should be a bus for every port on the adapter, but since 776 * we're just going to fake the topology for now, we'll pretend that 777 * everything is just a target on a single bus. 778 */ 779 if ((error = xpt_bus_register(sassc->sim, sc->mps_dev, 0)) != 0) { 780 mps_dprint(sc, MPS_ERROR, "Error %d registering SCSI bus\n", 781 error); 782 mps_unlock(sc); 783 goto out; 784 } 785 786 /* 787 * Assume that discovery events will start right away. 788 * 789 * Hold off boot until discovery is complete. 790 */ 791 sassc->flags |= MPSSAS_IN_STARTUP | MPSSAS_IN_DISCOVERY; 792 sc->sassc->startup_refcount = 0; 793 mpssas_startup_increment(sassc); 794 795 callout_init(&sassc->discovery_callout, 1 /*mpsafe*/); 796 797 /* 798 * Register for async events so we can determine the EEDP 799 * capabilities of devices. 800 */ 801 status = xpt_create_path(&sassc->path, /*periph*/NULL, 802 cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD, 803 CAM_LUN_WILDCARD); 804 if (status != CAM_REQ_CMP) { 805 mps_printf(sc, "Error %#x creating sim path\n", status); 806 sassc->path = NULL; 807 } else { 808 int event; 809 810 #if (__FreeBSD_version >= 1000006) || \ 811 ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000)) 812 event = AC_ADVINFO_CHANGED; 813 #else 814 event = AC_FOUND_DEVICE; 815 #endif 816 status = xpt_register_async(event, mpssas_async, sc, 817 sassc->path); 818 if (status != CAM_REQ_CMP) { 819 mps_dprint(sc, MPS_ERROR, 820 "Error %#x registering async handler for " 821 "AC_ADVINFO_CHANGED events\n", status); 822 xpt_free_path(sassc->path); 823 sassc->path = NULL; 824 } 825 } 826 if (status != CAM_REQ_CMP) { 827 /* 828 * EEDP use is the exception, not the rule. 829 * Warn the user, but do not fail to attach. 830 */ 831 mps_printf(sc, "EEDP capabilities disabled.\n"); 832 } 833 834 mps_unlock(sc); 835 836 mpssas_register_events(sc); 837 out: 838 if (error) 839 mps_detach_sas(sc); 840 return (error); 841 } 842 843 int 844 mps_detach_sas(struct mps_softc *sc) 845 { 846 struct mpssas_softc *sassc; 847 struct mpssas_lun *lun, *lun_tmp; 848 struct mpssas_target *targ; 849 int i; 850 851 MPS_FUNCTRACE(sc); 852 853 if (sc->sassc == NULL) 854 return (0); 855 856 sassc = sc->sassc; 857 mps_deregister_events(sc, sassc->mpssas_eh); 858 859 /* 860 * Drain and free the event handling taskqueue with the lock 861 * unheld so that any parallel processing tasks drain properly 862 * without deadlocking. 863 */ 864 if (sassc->ev_tq != NULL) 865 taskqueue_free(sassc->ev_tq); 866 867 /* Make sure CAM doesn't wedge if we had to bail out early. */ 868 mps_lock(sc); 869 870 /* Deregister our async handler */ 871 if (sassc->path != NULL) { 872 xpt_register_async(0, mpssas_async, sc, sassc->path); 873 xpt_free_path(sassc->path); 874 sassc->path = NULL; 875 } 876 877 if (sassc->flags & MPSSAS_IN_STARTUP) 878 xpt_release_simq(sassc->sim, 1); 879 880 if (sassc->sim != NULL) { 881 xpt_bus_deregister(cam_sim_path(sassc->sim)); 882 cam_sim_free(sassc->sim, FALSE); 883 } 884 885 mps_unlock(sc); 886 887 if (sassc->devq != NULL) 888 cam_simq_free(sassc->devq); 889 890 for(i=0; i< sassc->maxtargets ;i++) { 891 targ = &sassc->targets[i]; 892 SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) { 893 free(lun, M_MPT2); 894 } 895 } 896 free(sassc->targets, M_MPT2); 897 free(sassc, M_MPT2); 898 sc->sassc = NULL; 899 900 return (0); 901 } 902 903 void 904 mpssas_discovery_end(struct mpssas_softc *sassc) 905 { 906 struct mps_softc *sc = sassc->sc; 907 908 MPS_FUNCTRACE(sc); 909 910 if (sassc->flags & MPSSAS_DISCOVERY_TIMEOUT_PENDING) 911 callout_stop(&sassc->discovery_callout); 912 913 } 914 915 static void 916 mpssas_action(struct cam_sim *sim, union ccb *ccb) 917 { 918 struct mpssas_softc *sassc; 919 920 sassc = cam_sim_softc(sim); 921 922 MPS_FUNCTRACE(sassc->sc); 923 mps_dprint(sassc->sc, MPS_TRACE, "ccb func_code 0x%x\n", 924 ccb->ccb_h.func_code); 925 mtx_assert(&sassc->sc->mps_mtx, MA_OWNED); 926 927 switch (ccb->ccb_h.func_code) { 928 case XPT_PATH_INQ: 929 { 930 struct ccb_pathinq *cpi = &ccb->cpi; 931 932 cpi->version_num = 1; 933 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16; 934 cpi->target_sprt = 0; 935 #if __FreeBSD_version >= 1000039 936 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN; 937 #else 938 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED; 939 #endif 940 cpi->hba_eng_cnt = 0; 941 cpi->max_target = sassc->maxtargets - 1; 942 cpi->max_lun = 255; 943 cpi->initiator_id = sassc->maxtargets - 1; 944 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 945 strncpy(cpi->hba_vid, "Avago Tech (LSI)", HBA_IDLEN); 946 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 947 cpi->unit_number = cam_sim_unit(sim); 948 cpi->bus_id = cam_sim_bus(sim); 949 cpi->base_transfer_speed = 150000; 950 cpi->transport = XPORT_SAS; 951 cpi->transport_version = 0; 952 cpi->protocol = PROTO_SCSI; 953 cpi->protocol_version = SCSI_REV_SPC; 954 #if __FreeBSD_version >= 800001 955 /* 956 * XXX KDM where does this number come from? 957 */ 958 cpi->maxio = 256 * 1024; 959 #endif 960 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP); 961 break; 962 } 963 case XPT_GET_TRAN_SETTINGS: 964 { 965 struct ccb_trans_settings *cts; 966 struct ccb_trans_settings_sas *sas; 967 struct ccb_trans_settings_scsi *scsi; 968 struct mpssas_target *targ; 969 970 cts = &ccb->cts; 971 sas = &cts->xport_specific.sas; 972 scsi = &cts->proto_specific.scsi; 973 974 KASSERT(cts->ccb_h.target_id < sassc->maxtargets, 975 ("Target %d out of bounds in XPT_GET_TRANS_SETTINGS\n", 976 cts->ccb_h.target_id)); 977 targ = &sassc->targets[cts->ccb_h.target_id]; 978 if (targ->handle == 0x0) { 979 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); 980 break; 981 } 982 983 cts->protocol_version = SCSI_REV_SPC2; 984 cts->transport = XPORT_SAS; 985 cts->transport_version = 0; 986 987 sas->valid = CTS_SAS_VALID_SPEED; 988 switch (targ->linkrate) { 989 case 0x08: 990 sas->bitrate = 150000; 991 break; 992 case 0x09: 993 sas->bitrate = 300000; 994 break; 995 case 0x0a: 996 sas->bitrate = 600000; 997 break; 998 default: 999 sas->valid = 0; 1000 } 1001 1002 cts->protocol = PROTO_SCSI; 1003 scsi->valid = CTS_SCSI_VALID_TQ; 1004 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; 1005 1006 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP); 1007 break; 1008 } 1009 case XPT_CALC_GEOMETRY: 1010 cam_calc_geometry(&ccb->ccg, /*extended*/1); 1011 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP); 1012 break; 1013 case XPT_RESET_DEV: 1014 mps_dprint(sassc->sc, MPS_XINFO, "mpssas_action XPT_RESET_DEV\n"); 1015 mpssas_action_resetdev(sassc, ccb); 1016 return; 1017 case XPT_RESET_BUS: 1018 case XPT_ABORT: 1019 case XPT_TERM_IO: 1020 mps_dprint(sassc->sc, MPS_XINFO, 1021 "mpssas_action faking success for abort or reset\n"); 1022 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP); 1023 break; 1024 case XPT_SCSI_IO: 1025 mpssas_action_scsiio(sassc, ccb); 1026 return; 1027 #if __FreeBSD_version >= 900026 1028 case XPT_SMP_IO: 1029 mpssas_action_smpio(sassc, ccb); 1030 return; 1031 #endif 1032 default: 1033 mpssas_set_ccbstatus(ccb, CAM_FUNC_NOTAVAIL); 1034 break; 1035 } 1036 xpt_done(ccb); 1037 1038 } 1039 1040 static void 1041 mpssas_announce_reset(struct mps_softc *sc, uint32_t ac_code, 1042 target_id_t target_id, lun_id_t lun_id) 1043 { 1044 path_id_t path_id = cam_sim_path(sc->sassc->sim); 1045 struct cam_path *path; 1046 1047 mps_dprint(sc, MPS_XINFO, "%s code %x target %d lun %jx\n", __func__, 1048 ac_code, target_id, (uintmax_t)lun_id); 1049 1050 if (xpt_create_path(&path, NULL, 1051 path_id, target_id, lun_id) != CAM_REQ_CMP) { 1052 mps_dprint(sc, MPS_ERROR, "unable to create path for reset " 1053 "notification\n"); 1054 return; 1055 } 1056 1057 xpt_async(ac_code, path, NULL); 1058 xpt_free_path(path); 1059 } 1060 1061 static void 1062 mpssas_complete_all_commands(struct mps_softc *sc) 1063 { 1064 struct mps_command *cm; 1065 int i; 1066 int completed; 1067 1068 MPS_FUNCTRACE(sc); 1069 mtx_assert(&sc->mps_mtx, MA_OWNED); 1070 1071 /* complete all commands with a NULL reply */ 1072 for (i = 1; i < sc->num_reqs; i++) { 1073 cm = &sc->commands[i]; 1074 cm->cm_reply = NULL; 1075 completed = 0; 1076 1077 if (cm->cm_flags & MPS_CM_FLAGS_POLLED) 1078 cm->cm_flags |= MPS_CM_FLAGS_COMPLETE; 1079 1080 if (cm->cm_complete != NULL) { 1081 mpssas_log_command(cm, MPS_RECOVERY, 1082 "completing cm %p state %x ccb %p for diag reset\n", 1083 cm, cm->cm_state, cm->cm_ccb); 1084 1085 cm->cm_complete(sc, cm); 1086 completed = 1; 1087 } 1088 1089 if (cm->cm_flags & MPS_CM_FLAGS_WAKEUP) { 1090 mpssas_log_command(cm, MPS_RECOVERY, 1091 "waking up cm %p state %x ccb %p for diag reset\n", 1092 cm, cm->cm_state, cm->cm_ccb); 1093 wakeup(cm); 1094 completed = 1; 1095 } 1096 1097 if (cm->cm_sc->io_cmds_active != 0) { 1098 cm->cm_sc->io_cmds_active--; 1099 } else { 1100 mps_dprint(cm->cm_sc, MPS_INFO, "Warning: " 1101 "io_cmds_active is out of sync - resynching to " 1102 "0\n"); 1103 } 1104 1105 if ((completed == 0) && (cm->cm_state != MPS_CM_STATE_FREE)) { 1106 /* this should never happen, but if it does, log */ 1107 mpssas_log_command(cm, MPS_RECOVERY, 1108 "cm %p state %x flags 0x%x ccb %p during diag " 1109 "reset\n", cm, cm->cm_state, cm->cm_flags, 1110 cm->cm_ccb); 1111 } 1112 } 1113 } 1114 1115 void 1116 mpssas_handle_reinit(struct mps_softc *sc) 1117 { 1118 int i; 1119 1120 /* Go back into startup mode and freeze the simq, so that CAM 1121 * doesn't send any commands until after we've rediscovered all 1122 * targets and found the proper device handles for them. 1123 * 1124 * After the reset, portenable will trigger discovery, and after all 1125 * discovery-related activities have finished, the simq will be 1126 * released. 1127 */ 1128 mps_dprint(sc, MPS_INIT, "%s startup\n", __func__); 1129 sc->sassc->flags |= MPSSAS_IN_STARTUP; 1130 sc->sassc->flags |= MPSSAS_IN_DISCOVERY; 1131 mpssas_startup_increment(sc->sassc); 1132 1133 /* notify CAM of a bus reset */ 1134 mpssas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD, 1135 CAM_LUN_WILDCARD); 1136 1137 /* complete and cleanup after all outstanding commands */ 1138 mpssas_complete_all_commands(sc); 1139 1140 mps_dprint(sc, MPS_INIT, 1141 "%s startup %u after command completion\n", __func__, 1142 sc->sassc->startup_refcount); 1143 1144 /* zero all the target handles, since they may change after the 1145 * reset, and we have to rediscover all the targets and use the new 1146 * handles. 1147 */ 1148 for (i = 0; i < sc->sassc->maxtargets; i++) { 1149 if (sc->sassc->targets[i].outstanding != 0) 1150 mps_dprint(sc, MPS_INIT, "target %u outstanding %u\n", 1151 i, sc->sassc->targets[i].outstanding); 1152 sc->sassc->targets[i].handle = 0x0; 1153 sc->sassc->targets[i].exp_dev_handle = 0x0; 1154 sc->sassc->targets[i].outstanding = 0; 1155 sc->sassc->targets[i].flags = MPSSAS_TARGET_INDIAGRESET; 1156 } 1157 } 1158 1159 static void 1160 mpssas_tm_timeout(void *data) 1161 { 1162 struct mps_command *tm = data; 1163 struct mps_softc *sc = tm->cm_sc; 1164 1165 mtx_assert(&sc->mps_mtx, MA_OWNED); 1166 1167 mpssas_log_command(tm, MPS_INFO|MPS_RECOVERY, 1168 "task mgmt %p timed out\n", tm); 1169 mps_reinit(sc); 1170 } 1171 1172 static void 1173 mpssas_logical_unit_reset_complete(struct mps_softc *sc, struct mps_command *tm) 1174 { 1175 MPI2_SCSI_TASK_MANAGE_REPLY *reply; 1176 MPI2_SCSI_TASK_MANAGE_REQUEST *req; 1177 unsigned int cm_count = 0; 1178 struct mps_command *cm; 1179 struct mpssas_target *targ; 1180 1181 callout_stop(&tm->cm_callout); 1182 1183 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req; 1184 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply; 1185 targ = tm->cm_targ; 1186 1187 /* 1188 * Currently there should be no way we can hit this case. It only 1189 * happens when we have a failure to allocate chain frames, and 1190 * task management commands don't have S/G lists. 1191 * XXXSL So should it be an assertion? 1192 */ 1193 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) { 1194 mps_dprint(sc, MPS_ERROR, "%s: cm_flags = %#x for LUN reset! " 1195 "This should not happen!\n", __func__, tm->cm_flags); 1196 mpssas_free_tm(sc, tm); 1197 return; 1198 } 1199 1200 if (reply == NULL) { 1201 mpssas_log_command(tm, MPS_RECOVERY, 1202 "NULL reset reply for tm %p\n", tm); 1203 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) { 1204 /* this completion was due to a reset, just cleanup */ 1205 targ->tm = NULL; 1206 mpssas_free_tm(sc, tm); 1207 } 1208 else { 1209 /* we should have gotten a reply. */ 1210 mps_reinit(sc); 1211 } 1212 return; 1213 } 1214 1215 mpssas_log_command(tm, MPS_RECOVERY, 1216 "logical unit reset status 0x%x code 0x%x count %u\n", 1217 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode), 1218 le32toh(reply->TerminationCount)); 1219 1220 /* See if there are any outstanding commands for this LUN. 1221 * This could be made more efficient by using a per-LU data 1222 * structure of some sort. 1223 */ 1224 TAILQ_FOREACH(cm, &targ->commands, cm_link) { 1225 if (cm->cm_lun == tm->cm_lun) 1226 cm_count++; 1227 } 1228 1229 if (cm_count == 0) { 1230 mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO, 1231 "logical unit %u finished recovery after reset\n", 1232 tm->cm_lun, tm); 1233 1234 mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid, 1235 tm->cm_lun); 1236 1237 /* we've finished recovery for this logical unit. check and 1238 * see if some other logical unit has a timedout command 1239 * that needs to be processed. 1240 */ 1241 cm = TAILQ_FIRST(&targ->timedout_commands); 1242 if (cm) { 1243 mpssas_send_abort(sc, tm, cm); 1244 } 1245 else { 1246 targ->tm = NULL; 1247 mpssas_free_tm(sc, tm); 1248 } 1249 } 1250 else { 1251 /* if we still have commands for this LUN, the reset 1252 * effectively failed, regardless of the status reported. 1253 * Escalate to a target reset. 1254 */ 1255 mpssas_log_command(tm, MPS_RECOVERY, 1256 "logical unit reset complete for tm %p, but still have %u command(s)\n", 1257 tm, cm_count); 1258 mpssas_send_reset(sc, tm, 1259 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET); 1260 } 1261 } 1262 1263 static void 1264 mpssas_target_reset_complete(struct mps_softc *sc, struct mps_command *tm) 1265 { 1266 MPI2_SCSI_TASK_MANAGE_REPLY *reply; 1267 MPI2_SCSI_TASK_MANAGE_REQUEST *req; 1268 struct mpssas_target *targ; 1269 1270 callout_stop(&tm->cm_callout); 1271 1272 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req; 1273 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply; 1274 targ = tm->cm_targ; 1275 1276 /* 1277 * Currently there should be no way we can hit this case. It only 1278 * happens when we have a failure to allocate chain frames, and 1279 * task management commands don't have S/G lists. 1280 */ 1281 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) { 1282 mps_dprint(sc, MPS_ERROR,"%s: cm_flags = %#x for target reset! " 1283 "This should not happen!\n", __func__, tm->cm_flags); 1284 mpssas_free_tm(sc, tm); 1285 return; 1286 } 1287 1288 if (reply == NULL) { 1289 mpssas_log_command(tm, MPS_RECOVERY, 1290 "NULL reset reply for tm %p\n", tm); 1291 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) { 1292 /* this completion was due to a reset, just cleanup */ 1293 targ->tm = NULL; 1294 mpssas_free_tm(sc, tm); 1295 } 1296 else { 1297 /* we should have gotten a reply. */ 1298 mps_reinit(sc); 1299 } 1300 return; 1301 } 1302 1303 mpssas_log_command(tm, MPS_RECOVERY, 1304 "target reset status 0x%x code 0x%x count %u\n", 1305 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode), 1306 le32toh(reply->TerminationCount)); 1307 1308 if (targ->outstanding == 0) { 1309 /* we've finished recovery for this target and all 1310 * of its logical units. 1311 */ 1312 mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO, 1313 "recovery finished after target reset\n"); 1314 1315 mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid, 1316 CAM_LUN_WILDCARD); 1317 1318 targ->tm = NULL; 1319 mpssas_free_tm(sc, tm); 1320 } 1321 else { 1322 /* after a target reset, if this target still has 1323 * outstanding commands, the reset effectively failed, 1324 * regardless of the status reported. escalate. 1325 */ 1326 mpssas_log_command(tm, MPS_RECOVERY, 1327 "target reset complete for tm %p, but still have %u command(s)\n", 1328 tm, targ->outstanding); 1329 mps_reinit(sc); 1330 } 1331 } 1332 1333 #define MPS_RESET_TIMEOUT 30 1334 1335 int 1336 mpssas_send_reset(struct mps_softc *sc, struct mps_command *tm, uint8_t type) 1337 { 1338 MPI2_SCSI_TASK_MANAGE_REQUEST *req; 1339 struct mpssas_target *target; 1340 int err; 1341 1342 target = tm->cm_targ; 1343 if (target->handle == 0) { 1344 mps_dprint(sc, MPS_ERROR,"%s null devhandle for target_id %d\n", 1345 __func__, target->tid); 1346 return -1; 1347 } 1348 1349 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req; 1350 req->DevHandle = htole16(target->handle); 1351 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; 1352 req->TaskType = type; 1353 1354 if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) { 1355 /* XXX Need to handle invalid LUNs */ 1356 MPS_SET_LUN(req->LUN, tm->cm_lun); 1357 tm->cm_targ->logical_unit_resets++; 1358 mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO, 1359 "sending logical unit reset\n"); 1360 tm->cm_complete = mpssas_logical_unit_reset_complete; 1361 mpssas_prepare_for_tm(sc, tm, target, tm->cm_lun); 1362 } 1363 else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) { 1364 /* 1365 * Target reset method = 1366 * SAS Hard Link Reset / SATA Link Reset 1367 */ 1368 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET; 1369 tm->cm_targ->target_resets++; 1370 mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO, 1371 "sending target reset\n"); 1372 tm->cm_complete = mpssas_target_reset_complete; 1373 mpssas_prepare_for_tm(sc, tm, target, CAM_LUN_WILDCARD); 1374 } 1375 else { 1376 mps_dprint(sc, MPS_ERROR, "unexpected reset type 0x%x\n", type); 1377 return -1; 1378 } 1379 1380 tm->cm_data = NULL; 1381 tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY; 1382 tm->cm_complete_data = (void *)tm; 1383 1384 callout_reset(&tm->cm_callout, MPS_RESET_TIMEOUT * hz, 1385 mpssas_tm_timeout, tm); 1386 1387 err = mps_map_command(sc, tm); 1388 if (err) 1389 mpssas_log_command(tm, MPS_RECOVERY, 1390 "error %d sending reset type %u\n", 1391 err, type); 1392 1393 return err; 1394 } 1395 1396 1397 static void 1398 mpssas_abort_complete(struct mps_softc *sc, struct mps_command *tm) 1399 { 1400 struct mps_command *cm; 1401 MPI2_SCSI_TASK_MANAGE_REPLY *reply; 1402 MPI2_SCSI_TASK_MANAGE_REQUEST *req; 1403 struct mpssas_target *targ; 1404 1405 callout_stop(&tm->cm_callout); 1406 1407 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req; 1408 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply; 1409 targ = tm->cm_targ; 1410 1411 /* 1412 * Currently there should be no way we can hit this case. It only 1413 * happens when we have a failure to allocate chain frames, and 1414 * task management commands don't have S/G lists. 1415 */ 1416 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) { 1417 mpssas_log_command(tm, MPS_RECOVERY, 1418 "cm_flags = %#x for abort %p TaskMID %u!\n", 1419 tm->cm_flags, tm, le16toh(req->TaskMID)); 1420 mpssas_free_tm(sc, tm); 1421 return; 1422 } 1423 1424 if (reply == NULL) { 1425 mpssas_log_command(tm, MPS_RECOVERY, 1426 "NULL abort reply for tm %p TaskMID %u\n", 1427 tm, le16toh(req->TaskMID)); 1428 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) { 1429 /* this completion was due to a reset, just cleanup */ 1430 targ->tm = NULL; 1431 mpssas_free_tm(sc, tm); 1432 } 1433 else { 1434 /* we should have gotten a reply. */ 1435 mps_reinit(sc); 1436 } 1437 return; 1438 } 1439 1440 mpssas_log_command(tm, MPS_RECOVERY, 1441 "abort TaskMID %u status 0x%x code 0x%x count %u\n", 1442 le16toh(req->TaskMID), 1443 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode), 1444 le32toh(reply->TerminationCount)); 1445 1446 cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands); 1447 if (cm == NULL) { 1448 /* if there are no more timedout commands, we're done with 1449 * error recovery for this target. 1450 */ 1451 mpssas_log_command(tm, MPS_RECOVERY, 1452 "finished recovery after aborting TaskMID %u\n", 1453 le16toh(req->TaskMID)); 1454 1455 targ->tm = NULL; 1456 mpssas_free_tm(sc, tm); 1457 } 1458 else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) { 1459 /* abort success, but we have more timedout commands to abort */ 1460 mpssas_log_command(tm, MPS_RECOVERY, 1461 "continuing recovery after aborting TaskMID %u\n", 1462 le16toh(req->TaskMID)); 1463 1464 mpssas_send_abort(sc, tm, cm); 1465 } 1466 else { 1467 /* we didn't get a command completion, so the abort 1468 * failed as far as we're concerned. escalate. 1469 */ 1470 mpssas_log_command(tm, MPS_RECOVERY, 1471 "abort failed for TaskMID %u tm %p\n", 1472 le16toh(req->TaskMID), tm); 1473 1474 mpssas_send_reset(sc, tm, 1475 MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET); 1476 } 1477 } 1478 1479 #define MPS_ABORT_TIMEOUT 5 1480 1481 static int 1482 mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm) 1483 { 1484 MPI2_SCSI_TASK_MANAGE_REQUEST *req; 1485 struct mpssas_target *targ; 1486 int err; 1487 1488 targ = cm->cm_targ; 1489 if (targ->handle == 0) { 1490 mps_dprint(sc, MPS_ERROR,"%s null devhandle for target_id %d\n", 1491 __func__, cm->cm_ccb->ccb_h.target_id); 1492 return -1; 1493 } 1494 1495 mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO, 1496 "Aborting command %p\n", cm); 1497 1498 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req; 1499 req->DevHandle = htole16(targ->handle); 1500 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; 1501 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK; 1502 1503 /* XXX Need to handle invalid LUNs */ 1504 MPS_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun); 1505 1506 req->TaskMID = htole16(cm->cm_desc.Default.SMID); 1507 1508 tm->cm_data = NULL; 1509 tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY; 1510 tm->cm_complete = mpssas_abort_complete; 1511 tm->cm_complete_data = (void *)tm; 1512 tm->cm_targ = cm->cm_targ; 1513 tm->cm_lun = cm->cm_lun; 1514 1515 callout_reset(&tm->cm_callout, MPS_ABORT_TIMEOUT * hz, 1516 mpssas_tm_timeout, tm); 1517 1518 targ->aborts++; 1519 1520 mps_dprint(sc, MPS_INFO, "Sending reset from %s for target ID %d\n", 1521 __func__, targ->tid); 1522 mpssas_prepare_for_tm(sc, tm, targ, tm->cm_lun); 1523 1524 err = mps_map_command(sc, tm); 1525 if (err) 1526 mpssas_log_command(tm, MPS_RECOVERY, 1527 "error %d sending abort for cm %p SMID %u\n", 1528 err, cm, req->TaskMID); 1529 return err; 1530 } 1531 1532 static void 1533 mpssas_scsiio_timeout(void *data) 1534 { 1535 struct mps_softc *sc; 1536 struct mps_command *cm; 1537 struct mpssas_target *targ; 1538 1539 cm = (struct mps_command *)data; 1540 sc = cm->cm_sc; 1541 1542 MPS_FUNCTRACE(sc); 1543 mtx_assert(&sc->mps_mtx, MA_OWNED); 1544 1545 mps_dprint(sc, MPS_XINFO, "Timeout checking cm %p\n", sc); 1546 1547 /* 1548 * Run the interrupt handler to make sure it's not pending. This 1549 * isn't perfect because the command could have already completed 1550 * and been re-used, though this is unlikely. 1551 */ 1552 mps_intr_locked(sc); 1553 if (cm->cm_state == MPS_CM_STATE_FREE) { 1554 mpssas_log_command(cm, MPS_XINFO, 1555 "SCSI command %p almost timed out\n", cm); 1556 return; 1557 } 1558 1559 if (cm->cm_ccb == NULL) { 1560 mps_dprint(sc, MPS_ERROR, "command timeout with NULL ccb\n"); 1561 return; 1562 } 1563 1564 mpssas_log_command(cm, MPS_INFO, "command timeout cm %p ccb %p\n", 1565 cm, cm->cm_ccb); 1566 1567 targ = cm->cm_targ; 1568 targ->timeouts++; 1569 1570 /* XXX first, check the firmware state, to see if it's still 1571 * operational. if not, do a diag reset. 1572 */ 1573 mpssas_set_ccbstatus(cm->cm_ccb, CAM_CMD_TIMEOUT); 1574 cm->cm_state = MPS_CM_STATE_TIMEDOUT; 1575 TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery); 1576 1577 if (targ->tm != NULL) { 1578 /* target already in recovery, just queue up another 1579 * timedout command to be processed later. 1580 */ 1581 mps_dprint(sc, MPS_RECOVERY, 1582 "queued timedout cm %p for processing by tm %p\n", 1583 cm, targ->tm); 1584 } 1585 else if ((targ->tm = mpssas_alloc_tm(sc)) != NULL) { 1586 mps_dprint(sc, MPS_RECOVERY, "timedout cm %p allocated tm %p\n", 1587 cm, targ->tm); 1588 1589 /* start recovery by aborting the first timedout command */ 1590 mpssas_send_abort(sc, targ->tm, cm); 1591 } 1592 else { 1593 /* XXX queue this target up for recovery once a TM becomes 1594 * available. The firmware only has a limited number of 1595 * HighPriority credits for the high priority requests used 1596 * for task management, and we ran out. 1597 * 1598 * Isilon: don't worry about this for now, since we have 1599 * more credits than disks in an enclosure, and limit 1600 * ourselves to one TM per target for recovery. 1601 */ 1602 mps_dprint(sc, MPS_RECOVERY, 1603 "timedout cm %p failed to allocate a tm\n", cm); 1604 } 1605 1606 } 1607 1608 static void 1609 mpssas_action_scsiio(struct mpssas_softc *sassc, union ccb *ccb) 1610 { 1611 MPI2_SCSI_IO_REQUEST *req; 1612 struct ccb_scsiio *csio; 1613 struct mps_softc *sc; 1614 struct mpssas_target *targ; 1615 struct mpssas_lun *lun; 1616 struct mps_command *cm; 1617 uint8_t i, lba_byte, *ref_tag_addr; 1618 uint16_t eedp_flags; 1619 uint32_t mpi_control; 1620 1621 sc = sassc->sc; 1622 MPS_FUNCTRACE(sc); 1623 mtx_assert(&sc->mps_mtx, MA_OWNED); 1624 1625 csio = &ccb->csio; 1626 KASSERT(csio->ccb_h.target_id < sassc->maxtargets, 1627 ("Target %d out of bounds in XPT_SCSI_IO\n", 1628 csio->ccb_h.target_id)); 1629 targ = &sassc->targets[csio->ccb_h.target_id]; 1630 mps_dprint(sc, MPS_TRACE, "ccb %p target flag %x\n", ccb, targ->flags); 1631 if (targ->handle == 0x0) { 1632 mps_dprint(sc, MPS_ERROR, "%s NULL handle for target %u\n", 1633 __func__, csio->ccb_h.target_id); 1634 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); 1635 xpt_done(ccb); 1636 return; 1637 } 1638 if (targ->flags & MPS_TARGET_FLAGS_RAID_COMPONENT) { 1639 mps_dprint(sc, MPS_ERROR, "%s Raid component no SCSI IO " 1640 "supported %u\n", __func__, csio->ccb_h.target_id); 1641 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); 1642 xpt_done(ccb); 1643 return; 1644 } 1645 /* 1646 * Sometimes, it is possible to get a command that is not "In 1647 * Progress" and was actually aborted by the upper layer. Check for 1648 * this here and complete the command without error. 1649 */ 1650 if (mpssas_get_ccbstatus(ccb) != CAM_REQ_INPROG) { 1651 mps_dprint(sc, MPS_TRACE, "%s Command is not in progress for " 1652 "target %u\n", __func__, csio->ccb_h.target_id); 1653 xpt_done(ccb); 1654 return; 1655 } 1656 /* 1657 * If devinfo is 0 this will be a volume. In that case don't tell CAM 1658 * that the volume has timed out. We want volumes to be enumerated 1659 * until they are deleted/removed, not just failed. 1660 */ 1661 if (targ->flags & MPSSAS_TARGET_INREMOVAL) { 1662 if (targ->devinfo == 0) 1663 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP); 1664 else 1665 mpssas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT); 1666 xpt_done(ccb); 1667 return; 1668 } 1669 1670 if ((sc->mps_flags & MPS_FLAGS_SHUTDOWN) != 0) { 1671 mps_dprint(sc, MPS_INFO, "%s shutting down\n", __func__); 1672 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); 1673 xpt_done(ccb); 1674 return; 1675 } 1676 1677 /* 1678 * If target has a reset in progress, freeze the devq and return. The 1679 * devq will be released when the TM reset is finished. 1680 */ 1681 if (targ->flags & MPSSAS_TARGET_INRESET) { 1682 ccb->ccb_h.status = CAM_BUSY | CAM_DEV_QFRZN; 1683 mps_dprint(sc, MPS_INFO, "%s: Freezing devq for target ID %d\n", 1684 __func__, targ->tid); 1685 xpt_freeze_devq(ccb->ccb_h.path, 1); 1686 xpt_done(ccb); 1687 return; 1688 } 1689 1690 cm = mps_alloc_command(sc); 1691 if (cm == NULL || (sc->mps_flags & MPS_FLAGS_DIAGRESET)) { 1692 if (cm != NULL) { 1693 mps_free_command(sc, cm); 1694 } 1695 if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) { 1696 xpt_freeze_simq(sassc->sim, 1); 1697 sassc->flags |= MPSSAS_QUEUE_FROZEN; 1698 } 1699 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 1700 ccb->ccb_h.status |= CAM_REQUEUE_REQ; 1701 xpt_done(ccb); 1702 return; 1703 } 1704 1705 req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req; 1706 bzero(req, sizeof(*req)); 1707 req->DevHandle = htole16(targ->handle); 1708 req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; 1709 req->MsgFlags = 0; 1710 req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr); 1711 req->SenseBufferLength = MPS_SENSE_LEN; 1712 req->SGLFlags = 0; 1713 req->ChainOffset = 0; 1714 req->SGLOffset0 = 24; /* 32bit word offset to the SGL */ 1715 req->SGLOffset1= 0; 1716 req->SGLOffset2= 0; 1717 req->SGLOffset3= 0; 1718 req->SkipCount = 0; 1719 req->DataLength = htole32(csio->dxfer_len); 1720 req->BidirectionalDataLength = 0; 1721 req->IoFlags = htole16(csio->cdb_len); 1722 req->EEDPFlags = 0; 1723 1724 /* Note: BiDirectional transfers are not supported */ 1725 switch (csio->ccb_h.flags & CAM_DIR_MASK) { 1726 case CAM_DIR_IN: 1727 mpi_control = MPI2_SCSIIO_CONTROL_READ; 1728 cm->cm_flags |= MPS_CM_FLAGS_DATAIN; 1729 break; 1730 case CAM_DIR_OUT: 1731 mpi_control = MPI2_SCSIIO_CONTROL_WRITE; 1732 cm->cm_flags |= MPS_CM_FLAGS_DATAOUT; 1733 break; 1734 case CAM_DIR_NONE: 1735 default: 1736 mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER; 1737 break; 1738 } 1739 1740 if (csio->cdb_len == 32) 1741 mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT; 1742 /* 1743 * It looks like the hardware doesn't require an explicit tag 1744 * number for each transaction. SAM Task Management not supported 1745 * at the moment. 1746 */ 1747 switch (csio->tag_action) { 1748 case MSG_HEAD_OF_Q_TAG: 1749 mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ; 1750 break; 1751 case MSG_ORDERED_Q_TAG: 1752 mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ; 1753 break; 1754 case MSG_ACA_TASK: 1755 mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ; 1756 break; 1757 case CAM_TAG_ACTION_NONE: 1758 case MSG_SIMPLE_Q_TAG: 1759 default: 1760 mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ; 1761 break; 1762 } 1763 mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits; 1764 req->Control = htole32(mpi_control); 1765 if (MPS_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) { 1766 mps_free_command(sc, cm); 1767 mpssas_set_ccbstatus(ccb, CAM_LUN_INVALID); 1768 xpt_done(ccb); 1769 return; 1770 } 1771 1772 if (csio->ccb_h.flags & CAM_CDB_POINTER) 1773 bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len); 1774 else 1775 bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len); 1776 req->IoFlags = htole16(csio->cdb_len); 1777 1778 /* 1779 * Check if EEDP is supported and enabled. If it is then check if the 1780 * SCSI opcode could be using EEDP. If so, make sure the LUN exists and 1781 * is formatted for EEDP support. If all of this is true, set CDB up 1782 * for EEDP transfer. 1783 */ 1784 eedp_flags = op_code_prot[req->CDB.CDB32[0]]; 1785 if (sc->eedp_enabled && eedp_flags) { 1786 SLIST_FOREACH(lun, &targ->luns, lun_link) { 1787 if (lun->lun_id == csio->ccb_h.target_lun) { 1788 break; 1789 } 1790 } 1791 1792 if ((lun != NULL) && (lun->eedp_formatted)) { 1793 req->EEDPBlockSize = htole16(lun->eedp_block_size); 1794 eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG | 1795 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG | 1796 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD); 1797 req->EEDPFlags = htole16(eedp_flags); 1798 1799 /* 1800 * If CDB less than 32, fill in Primary Ref Tag with 1801 * low 4 bytes of LBA. If CDB is 32, tag stuff is 1802 * already there. Also, set protection bit. FreeBSD 1803 * currently does not support CDBs bigger than 16, but 1804 * the code doesn't hurt, and will be here for the 1805 * future. 1806 */ 1807 if (csio->cdb_len != 32) { 1808 lba_byte = (csio->cdb_len == 16) ? 6 : 2; 1809 ref_tag_addr = (uint8_t *)&req->CDB.EEDP32. 1810 PrimaryReferenceTag; 1811 for (i = 0; i < 4; i++) { 1812 *ref_tag_addr = 1813 req->CDB.CDB32[lba_byte + i]; 1814 ref_tag_addr++; 1815 } 1816 req->CDB.EEDP32.PrimaryReferenceTag = 1817 htole32(req->CDB.EEDP32.PrimaryReferenceTag); 1818 req->CDB.EEDP32.PrimaryApplicationTagMask = 1819 0xFFFF; 1820 req->CDB.CDB32[1] = (req->CDB.CDB32[1] & 0x1F) | 1821 0x20; 1822 } else { 1823 eedp_flags |= 1824 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG; 1825 req->EEDPFlags = htole16(eedp_flags); 1826 req->CDB.CDB32[10] = (req->CDB.CDB32[10] & 1827 0x1F) | 0x20; 1828 } 1829 } 1830 } 1831 1832 cm->cm_length = csio->dxfer_len; 1833 if (cm->cm_length != 0) { 1834 cm->cm_data = ccb; 1835 cm->cm_flags |= MPS_CM_FLAGS_USE_CCB; 1836 } else { 1837 cm->cm_data = NULL; 1838 } 1839 cm->cm_sge = &req->SGL; 1840 cm->cm_sglsize = (32 - 24) * 4; 1841 cm->cm_desc.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO; 1842 cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle); 1843 cm->cm_complete = mpssas_scsiio_complete; 1844 cm->cm_complete_data = ccb; 1845 cm->cm_targ = targ; 1846 cm->cm_lun = csio->ccb_h.target_lun; 1847 cm->cm_ccb = ccb; 1848 1849 /* 1850 * If HBA is a WD and the command is not for a retry, try to build a 1851 * direct I/O message. If failed, or the command is for a retry, send 1852 * the I/O to the IR volume itself. 1853 */ 1854 if (sc->WD_valid_config) { 1855 if (ccb->ccb_h.sim_priv.entries[0].field == MPS_WD_RETRY) { 1856 mpssas_direct_drive_io(sassc, cm, ccb); 1857 } else { 1858 mpssas_set_ccbstatus(ccb, CAM_REQ_INPROG); 1859 } 1860 } 1861 1862 callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0, 1863 mpssas_scsiio_timeout, cm, 0); 1864 1865 targ->issued++; 1866 targ->outstanding++; 1867 TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link); 1868 ccb->ccb_h.status |= CAM_SIM_QUEUED; 1869 1870 mpssas_log_command(cm, MPS_XINFO, "%s cm %p ccb %p outstanding %u\n", 1871 __func__, cm, ccb, targ->outstanding); 1872 1873 mps_map_command(sc, cm); 1874 return; 1875 } 1876 1877 static void 1878 mps_response_code(struct mps_softc *sc, u8 response_code) 1879 { 1880 char *desc; 1881 1882 switch (response_code) { 1883 case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE: 1884 desc = "task management request completed"; 1885 break; 1886 case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME: 1887 desc = "invalid frame"; 1888 break; 1889 case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED: 1890 desc = "task management request not supported"; 1891 break; 1892 case MPI2_SCSITASKMGMT_RSP_TM_FAILED: 1893 desc = "task management request failed"; 1894 break; 1895 case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED: 1896 desc = "task management request succeeded"; 1897 break; 1898 case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN: 1899 desc = "invalid lun"; 1900 break; 1901 case 0xA: 1902 desc = "overlapped tag attempted"; 1903 break; 1904 case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC: 1905 desc = "task queued, however not sent to target"; 1906 break; 1907 default: 1908 desc = "unknown"; 1909 break; 1910 } 1911 mps_dprint(sc, MPS_XINFO, "response_code(0x%01x): %s\n", 1912 response_code, desc); 1913 } 1914 /** 1915 * mps_sc_failed_io_info - translated non-succesfull SCSI_IO request 1916 */ 1917 static void 1918 mps_sc_failed_io_info(struct mps_softc *sc, struct ccb_scsiio *csio, 1919 Mpi2SCSIIOReply_t *mpi_reply) 1920 { 1921 u32 response_info; 1922 u8 *response_bytes; 1923 u16 ioc_status = le16toh(mpi_reply->IOCStatus) & 1924 MPI2_IOCSTATUS_MASK; 1925 u8 scsi_state = mpi_reply->SCSIState; 1926 u8 scsi_status = mpi_reply->SCSIStatus; 1927 char *desc_ioc_state = NULL; 1928 char *desc_scsi_status = NULL; 1929 char *desc_scsi_state = sc->tmp_string; 1930 u32 log_info = le32toh(mpi_reply->IOCLogInfo); 1931 1932 if (log_info == 0x31170000) 1933 return; 1934 1935 switch (ioc_status) { 1936 case MPI2_IOCSTATUS_SUCCESS: 1937 desc_ioc_state = "success"; 1938 break; 1939 case MPI2_IOCSTATUS_INVALID_FUNCTION: 1940 desc_ioc_state = "invalid function"; 1941 break; 1942 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR: 1943 desc_ioc_state = "scsi recovered error"; 1944 break; 1945 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE: 1946 desc_ioc_state = "scsi invalid dev handle"; 1947 break; 1948 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE: 1949 desc_ioc_state = "scsi device not there"; 1950 break; 1951 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN: 1952 desc_ioc_state = "scsi data overrun"; 1953 break; 1954 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN: 1955 desc_ioc_state = "scsi data underrun"; 1956 break; 1957 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR: 1958 desc_ioc_state = "scsi io data error"; 1959 break; 1960 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR: 1961 desc_ioc_state = "scsi protocol error"; 1962 break; 1963 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED: 1964 desc_ioc_state = "scsi task terminated"; 1965 break; 1966 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: 1967 desc_ioc_state = "scsi residual mismatch"; 1968 break; 1969 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED: 1970 desc_ioc_state = "scsi task mgmt failed"; 1971 break; 1972 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED: 1973 desc_ioc_state = "scsi ioc terminated"; 1974 break; 1975 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED: 1976 desc_ioc_state = "scsi ext terminated"; 1977 break; 1978 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR: 1979 desc_ioc_state = "eedp guard error"; 1980 break; 1981 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR: 1982 desc_ioc_state = "eedp ref tag error"; 1983 break; 1984 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR: 1985 desc_ioc_state = "eedp app tag error"; 1986 break; 1987 default: 1988 desc_ioc_state = "unknown"; 1989 break; 1990 } 1991 1992 switch (scsi_status) { 1993 case MPI2_SCSI_STATUS_GOOD: 1994 desc_scsi_status = "good"; 1995 break; 1996 case MPI2_SCSI_STATUS_CHECK_CONDITION: 1997 desc_scsi_status = "check condition"; 1998 break; 1999 case MPI2_SCSI_STATUS_CONDITION_MET: 2000 desc_scsi_status = "condition met"; 2001 break; 2002 case MPI2_SCSI_STATUS_BUSY: 2003 desc_scsi_status = "busy"; 2004 break; 2005 case MPI2_SCSI_STATUS_INTERMEDIATE: 2006 desc_scsi_status = "intermediate"; 2007 break; 2008 case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET: 2009 desc_scsi_status = "intermediate condmet"; 2010 break; 2011 case MPI2_SCSI_STATUS_RESERVATION_CONFLICT: 2012 desc_scsi_status = "reservation conflict"; 2013 break; 2014 case MPI2_SCSI_STATUS_COMMAND_TERMINATED: 2015 desc_scsi_status = "command terminated"; 2016 break; 2017 case MPI2_SCSI_STATUS_TASK_SET_FULL: 2018 desc_scsi_status = "task set full"; 2019 break; 2020 case MPI2_SCSI_STATUS_ACA_ACTIVE: 2021 desc_scsi_status = "aca active"; 2022 break; 2023 case MPI2_SCSI_STATUS_TASK_ABORTED: 2024 desc_scsi_status = "task aborted"; 2025 break; 2026 default: 2027 desc_scsi_status = "unknown"; 2028 break; 2029 } 2030 2031 desc_scsi_state[0] = '\0'; 2032 if (!scsi_state) 2033 desc_scsi_state = " "; 2034 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) 2035 strcat(desc_scsi_state, "response info "); 2036 if (scsi_state & MPI2_SCSI_STATE_TERMINATED) 2037 strcat(desc_scsi_state, "state terminated "); 2038 if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS) 2039 strcat(desc_scsi_state, "no status "); 2040 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED) 2041 strcat(desc_scsi_state, "autosense failed "); 2042 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) 2043 strcat(desc_scsi_state, "autosense valid "); 2044 2045 mps_dprint(sc, MPS_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n", 2046 le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status); 2047 /* We can add more detail about underflow data here 2048 * TO-DO 2049 * */ 2050 mps_dprint(sc, MPS_XINFO, "\tscsi_status(%s)(0x%02x), " 2051 "scsi_state(%s)(0x%02x)\n", desc_scsi_status, scsi_status, 2052 desc_scsi_state, scsi_state); 2053 2054 if (sc->mps_debug & MPS_XINFO && 2055 scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) { 2056 mps_dprint(sc, MPS_XINFO, "-> Sense Buffer Data : Start :\n"); 2057 scsi_sense_print(csio); 2058 mps_dprint(sc, MPS_XINFO, "-> Sense Buffer Data : End :\n"); 2059 } 2060 2061 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) { 2062 response_info = le32toh(mpi_reply->ResponseInfo); 2063 response_bytes = (u8 *)&response_info; 2064 mps_response_code(sc,response_bytes[0]); 2065 } 2066 } 2067 2068 static void 2069 mpssas_scsiio_complete(struct mps_softc *sc, struct mps_command *cm) 2070 { 2071 MPI2_SCSI_IO_REPLY *rep; 2072 union ccb *ccb; 2073 struct ccb_scsiio *csio; 2074 struct mpssas_softc *sassc; 2075 struct scsi_vpd_supported_page_list *vpd_list = NULL; 2076 u8 *TLR_bits, TLR_on; 2077 int dir = 0, i; 2078 u16 alloc_len; 2079 struct mpssas_target *target; 2080 target_id_t target_id; 2081 2082 MPS_FUNCTRACE(sc); 2083 mps_dprint(sc, MPS_TRACE, 2084 "cm %p SMID %u ccb %p reply %p outstanding %u\n", cm, 2085 cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply, 2086 cm->cm_targ->outstanding); 2087 2088 callout_stop(&cm->cm_callout); 2089 mtx_assert(&sc->mps_mtx, MA_OWNED); 2090 2091 sassc = sc->sassc; 2092 ccb = cm->cm_complete_data; 2093 csio = &ccb->csio; 2094 target_id = csio->ccb_h.target_id; 2095 rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply; 2096 /* 2097 * XXX KDM if the chain allocation fails, does it matter if we do 2098 * the sync and unload here? It is simpler to do it in every case, 2099 * assuming it doesn't cause problems. 2100 */ 2101 if (cm->cm_data != NULL) { 2102 if (cm->cm_flags & MPS_CM_FLAGS_DATAIN) 2103 dir = BUS_DMASYNC_POSTREAD; 2104 else if (cm->cm_flags & MPS_CM_FLAGS_DATAOUT) 2105 dir = BUS_DMASYNC_POSTWRITE; 2106 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir); 2107 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap); 2108 } 2109 2110 cm->cm_targ->completed++; 2111 cm->cm_targ->outstanding--; 2112 TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link); 2113 ccb->ccb_h.status &= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED); 2114 2115 if (cm->cm_state == MPS_CM_STATE_TIMEDOUT) { 2116 TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery); 2117 if (cm->cm_reply != NULL) 2118 mpssas_log_command(cm, MPS_RECOVERY, 2119 "completed timedout cm %p ccb %p during recovery " 2120 "ioc %x scsi %x state %x xfer %u\n", 2121 cm, cm->cm_ccb, 2122 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState, 2123 le32toh(rep->TransferCount)); 2124 else 2125 mpssas_log_command(cm, MPS_RECOVERY, 2126 "completed timedout cm %p ccb %p during recovery\n", 2127 cm, cm->cm_ccb); 2128 } else if (cm->cm_targ->tm != NULL) { 2129 if (cm->cm_reply != NULL) 2130 mpssas_log_command(cm, MPS_RECOVERY, 2131 "completed cm %p ccb %p during recovery " 2132 "ioc %x scsi %x state %x xfer %u\n", 2133 cm, cm->cm_ccb, 2134 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState, 2135 le32toh(rep->TransferCount)); 2136 else 2137 mpssas_log_command(cm, MPS_RECOVERY, 2138 "completed cm %p ccb %p during recovery\n", 2139 cm, cm->cm_ccb); 2140 } else if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) { 2141 mpssas_log_command(cm, MPS_RECOVERY, 2142 "reset completed cm %p ccb %p\n", 2143 cm, cm->cm_ccb); 2144 } 2145 2146 if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) { 2147 /* 2148 * We ran into an error after we tried to map the command, 2149 * so we're getting a callback without queueing the command 2150 * to the hardware. So we set the status here, and it will 2151 * be retained below. We'll go through the "fast path", 2152 * because there can be no reply when we haven't actually 2153 * gone out to the hardware. 2154 */ 2155 mpssas_set_ccbstatus(ccb, CAM_REQUEUE_REQ); 2156 2157 /* 2158 * Currently the only error included in the mask is 2159 * MPS_CM_FLAGS_CHAIN_FAILED, which means we're out of 2160 * chain frames. We need to freeze the queue until we get 2161 * a command that completed without this error, which will 2162 * hopefully have some chain frames attached that we can 2163 * use. If we wanted to get smarter about it, we would 2164 * only unfreeze the queue in this condition when we're 2165 * sure that we're getting some chain frames back. That's 2166 * probably unnecessary. 2167 */ 2168 if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) { 2169 xpt_freeze_simq(sassc->sim, 1); 2170 sassc->flags |= MPSSAS_QUEUE_FROZEN; 2171 mps_dprint(sc, MPS_XINFO, "Error sending command, " 2172 "freezing SIM queue\n"); 2173 } 2174 } 2175 2176 /* 2177 * If this is a Start Stop Unit command and it was issued by the driver 2178 * during shutdown, decrement the refcount to account for all of the 2179 * commands that were sent. All SSU commands should be completed before 2180 * shutdown completes, meaning SSU_refcount will be 0 after SSU_started 2181 * is TRUE. 2182 */ 2183 if (sc->SSU_started && (csio->cdb_io.cdb_bytes[0] == START_STOP_UNIT)) { 2184 mps_dprint(sc, MPS_INFO, "Decrementing SSU count.\n"); 2185 sc->SSU_refcount--; 2186 } 2187 2188 /* Take the fast path to completion */ 2189 if (cm->cm_reply == NULL) { 2190 if (mpssas_get_ccbstatus(ccb) == CAM_REQ_INPROG) { 2191 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) 2192 mpssas_set_ccbstatus(ccb, CAM_SCSI_BUS_RESET); 2193 else { 2194 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP); 2195 ccb->csio.scsi_status = SCSI_STATUS_OK; 2196 } 2197 if (sassc->flags & MPSSAS_QUEUE_FROZEN) { 2198 ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 2199 sassc->flags &= ~MPSSAS_QUEUE_FROZEN; 2200 mps_dprint(sc, MPS_XINFO, 2201 "Unfreezing SIM queue\n"); 2202 } 2203 } 2204 2205 /* 2206 * There are two scenarios where the status won't be 2207 * CAM_REQ_CMP. The first is if MPS_CM_FLAGS_ERROR_MASK is 2208 * set, the second is in the MPS_FLAGS_DIAGRESET above. 2209 */ 2210 if (mpssas_get_ccbstatus(ccb) != CAM_REQ_CMP) { 2211 /* 2212 * Freeze the dev queue so that commands are 2213 * executed in the correct order after error 2214 * recovery. 2215 */ 2216 ccb->ccb_h.status |= CAM_DEV_QFRZN; 2217 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1); 2218 } 2219 mps_free_command(sc, cm); 2220 xpt_done(ccb); 2221 return; 2222 } 2223 2224 mpssas_log_command(cm, MPS_XINFO, 2225 "ioc %x scsi %x state %x xfer %u\n", 2226 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState, 2227 le32toh(rep->TransferCount)); 2228 2229 /* 2230 * If this is a Direct Drive I/O, reissue the I/O to the original IR 2231 * Volume if an error occurred (normal I/O retry). Use the original 2232 * CCB, but set a flag that this will be a retry so that it's sent to 2233 * the original volume. Free the command but reuse the CCB. 2234 */ 2235 if (cm->cm_flags & MPS_CM_FLAGS_DD_IO) { 2236 mps_free_command(sc, cm); 2237 ccb->ccb_h.sim_priv.entries[0].field = MPS_WD_RETRY; 2238 mpssas_action_scsiio(sassc, ccb); 2239 return; 2240 } else 2241 ccb->ccb_h.sim_priv.entries[0].field = 0; 2242 2243 switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) { 2244 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN: 2245 csio->resid = cm->cm_length - le32toh(rep->TransferCount); 2246 /* FALLTHROUGH */ 2247 case MPI2_IOCSTATUS_SUCCESS: 2248 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR: 2249 2250 if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) == 2251 MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR) 2252 mpssas_log_command(cm, MPS_XINFO, "recovered error\n"); 2253 2254 /* Completion failed at the transport level. */ 2255 if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS | 2256 MPI2_SCSI_STATE_TERMINATED)) { 2257 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR); 2258 break; 2259 } 2260 2261 /* In a modern packetized environment, an autosense failure 2262 * implies that there's not much else that can be done to 2263 * recover the command. 2264 */ 2265 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) { 2266 mpssas_set_ccbstatus(ccb, CAM_AUTOSENSE_FAIL); 2267 break; 2268 } 2269 2270 /* 2271 * CAM doesn't care about SAS Response Info data, but if this is 2272 * the state check if TLR should be done. If not, clear the 2273 * TLR_bits for the target. 2274 */ 2275 if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) && 2276 ((le32toh(rep->ResponseInfo) & 2277 MPI2_SCSI_RI_MASK_REASONCODE) == 2278 MPS_SCSI_RI_INVALID_FRAME)) { 2279 sc->mapping_table[target_id].TLR_bits = 2280 (u8)MPI2_SCSIIO_CONTROL_NO_TLR; 2281 } 2282 2283 /* 2284 * Intentionally override the normal SCSI status reporting 2285 * for these two cases. These are likely to happen in a 2286 * multi-initiator environment, and we want to make sure that 2287 * CAM retries these commands rather than fail them. 2288 */ 2289 if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) || 2290 (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) { 2291 mpssas_set_ccbstatus(ccb, CAM_REQ_ABORTED); 2292 break; 2293 } 2294 2295 /* Handle normal status and sense */ 2296 csio->scsi_status = rep->SCSIStatus; 2297 if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD) 2298 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP); 2299 else 2300 mpssas_set_ccbstatus(ccb, CAM_SCSI_STATUS_ERROR); 2301 2302 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) { 2303 int sense_len, returned_sense_len; 2304 2305 returned_sense_len = min(le32toh(rep->SenseCount), 2306 sizeof(struct scsi_sense_data)); 2307 if (returned_sense_len < ccb->csio.sense_len) 2308 ccb->csio.sense_resid = ccb->csio.sense_len - 2309 returned_sense_len; 2310 else 2311 ccb->csio.sense_resid = 0; 2312 2313 sense_len = min(returned_sense_len, 2314 ccb->csio.sense_len - ccb->csio.sense_resid); 2315 bzero(&ccb->csio.sense_data, 2316 sizeof(ccb->csio.sense_data)); 2317 bcopy(cm->cm_sense, &ccb->csio.sense_data, sense_len); 2318 ccb->ccb_h.status |= CAM_AUTOSNS_VALID; 2319 } 2320 2321 /* 2322 * Check if this is an INQUIRY command. If it's a VPD inquiry, 2323 * and it's page code 0 (Supported Page List), and there is 2324 * inquiry data, and this is for a sequential access device, and 2325 * the device is an SSP target, and TLR is supported by the 2326 * controller, turn the TLR_bits value ON if page 0x90 is 2327 * supported. 2328 */ 2329 if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) && 2330 (csio->cdb_io.cdb_bytes[1] & SI_EVPD) && 2331 (csio->cdb_io.cdb_bytes[2] == SVPD_SUPPORTED_PAGE_LIST) && 2332 ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) && 2333 (csio->data_ptr != NULL) && 2334 ((csio->data_ptr[0] & 0x1f) == T_SEQUENTIAL) && 2335 (sc->control_TLR) && 2336 (sc->mapping_table[target_id].device_info & 2337 MPI2_SAS_DEVICE_INFO_SSP_TARGET)) { 2338 vpd_list = (struct scsi_vpd_supported_page_list *) 2339 csio->data_ptr; 2340 TLR_bits = &sc->mapping_table[target_id].TLR_bits; 2341 *TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR; 2342 TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON; 2343 alloc_len = ((u16)csio->cdb_io.cdb_bytes[3] << 8) + 2344 csio->cdb_io.cdb_bytes[4]; 2345 alloc_len -= csio->resid; 2346 for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) { 2347 if (vpd_list->list[i] == 0x90) { 2348 *TLR_bits = TLR_on; 2349 break; 2350 } 2351 } 2352 } 2353 2354 /* 2355 * If this is a SATA direct-access end device, mark it so that 2356 * a SCSI StartStopUnit command will be sent to it when the 2357 * driver is being shutdown. 2358 */ 2359 if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) && 2360 ((csio->data_ptr[0] & 0x1f) == T_DIRECT) && 2361 (sc->mapping_table[target_id].device_info & 2362 MPI2_SAS_DEVICE_INFO_SATA_DEVICE) && 2363 ((sc->mapping_table[target_id].device_info & 2364 MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) == 2365 MPI2_SAS_DEVICE_INFO_END_DEVICE)) { 2366 target = &sassc->targets[target_id]; 2367 target->supports_SSU = TRUE; 2368 mps_dprint(sc, MPS_XINFO, "Target %d supports SSU\n", 2369 target_id); 2370 } 2371 break; 2372 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE: 2373 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE: 2374 /* 2375 * If devinfo is 0 this will be a volume. In that case don't 2376 * tell CAM that the volume is not there. We want volumes to 2377 * be enumerated until they are deleted/removed, not just 2378 * failed. 2379 */ 2380 if (cm->cm_targ->devinfo == 0) 2381 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP); 2382 else 2383 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); 2384 break; 2385 case MPI2_IOCSTATUS_INVALID_SGL: 2386 mps_print_scsiio_cmd(sc, cm); 2387 mpssas_set_ccbstatus(ccb, CAM_UNREC_HBA_ERROR); 2388 break; 2389 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED: 2390 /* 2391 * This is one of the responses that comes back when an I/O 2392 * has been aborted. If it is because of a timeout that we 2393 * initiated, just set the status to CAM_CMD_TIMEOUT. 2394 * Otherwise set it to CAM_REQ_ABORTED. The effect on the 2395 * command is the same (it gets retried, subject to the 2396 * retry counter), the only difference is what gets printed 2397 * on the console. 2398 */ 2399 if (cm->cm_state == MPS_CM_STATE_TIMEDOUT) 2400 mpssas_set_ccbstatus(ccb, CAM_CMD_TIMEOUT); 2401 else 2402 mpssas_set_ccbstatus(ccb, CAM_REQ_ABORTED); 2403 break; 2404 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN: 2405 /* resid is ignored for this condition */ 2406 csio->resid = 0; 2407 mpssas_set_ccbstatus(ccb, CAM_DATA_RUN_ERR); 2408 break; 2409 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED: 2410 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED: 2411 /* 2412 * These can sometimes be transient transport-related 2413 * errors, and sometimes persistent drive-related errors. 2414 * We used to retry these without decrementing the retry 2415 * count by returning CAM_REQUEUE_REQ. Unfortunately, if 2416 * we hit a persistent drive problem that returns one of 2417 * these error codes, we would retry indefinitely. So, 2418 * return CAM_REQ_CMP_ERROR so that we decrement the retry 2419 * count and avoid infinite retries. We're taking the 2420 * potential risk of flagging false failures in the event 2421 * of a topology-related error (e.g. a SAS expander problem 2422 * causes a command addressed to a drive to fail), but 2423 * avoiding getting into an infinite retry loop. 2424 */ 2425 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR); 2426 mpssas_log_command(cm, MPS_INFO, 2427 "terminated ioc %x scsi %x state %x xfer %u\n", 2428 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState, 2429 le32toh(rep->TransferCount)); 2430 break; 2431 case MPI2_IOCSTATUS_INVALID_FUNCTION: 2432 case MPI2_IOCSTATUS_INTERNAL_ERROR: 2433 case MPI2_IOCSTATUS_INVALID_VPID: 2434 case MPI2_IOCSTATUS_INVALID_FIELD: 2435 case MPI2_IOCSTATUS_INVALID_STATE: 2436 case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED: 2437 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR: 2438 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR: 2439 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: 2440 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED: 2441 default: 2442 mpssas_log_command(cm, MPS_XINFO, 2443 "completed ioc %x scsi %x state %x xfer %u\n", 2444 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState, 2445 le32toh(rep->TransferCount)); 2446 csio->resid = cm->cm_length; 2447 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR); 2448 break; 2449 } 2450 2451 mps_sc_failed_io_info(sc,csio,rep); 2452 2453 if (sassc->flags & MPSSAS_QUEUE_FROZEN) { 2454 ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 2455 sassc->flags &= ~MPSSAS_QUEUE_FROZEN; 2456 mps_dprint(sc, MPS_XINFO, "Command completed, " 2457 "unfreezing SIM queue\n"); 2458 } 2459 2460 if (mpssas_get_ccbstatus(ccb) != CAM_REQ_CMP) { 2461 ccb->ccb_h.status |= CAM_DEV_QFRZN; 2462 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1); 2463 } 2464 2465 mps_free_command(sc, cm); 2466 xpt_done(ccb); 2467 } 2468 2469 /* All Request reached here are Endian safe */ 2470 static void 2471 mpssas_direct_drive_io(struct mpssas_softc *sassc, struct mps_command *cm, 2472 union ccb *ccb) { 2473 pMpi2SCSIIORequest_t pIO_req; 2474 struct mps_softc *sc = sassc->sc; 2475 uint64_t virtLBA; 2476 uint32_t physLBA, stripe_offset, stripe_unit; 2477 uint32_t io_size, column; 2478 uint8_t *ptrLBA, lba_idx, physLBA_byte, *CDB; 2479 2480 /* 2481 * If this is a valid SCSI command (Read6, Read10, Read16, Write6, 2482 * Write10, or Write16), build a direct I/O message. Otherwise, the I/O 2483 * will be sent to the IR volume itself. Since Read6 and Write6 are a 2484 * bit different than the 10/16 CDBs, handle them separately. 2485 */ 2486 pIO_req = (pMpi2SCSIIORequest_t)cm->cm_req; 2487 CDB = pIO_req->CDB.CDB32; 2488 2489 /* 2490 * Handle 6 byte CDBs. 2491 */ 2492 if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_6) || 2493 (CDB[0] == WRITE_6))) { 2494 /* 2495 * Get the transfer size in blocks. 2496 */ 2497 io_size = (cm->cm_length >> sc->DD_block_exponent); 2498 2499 /* 2500 * Get virtual LBA given in the CDB. 2501 */ 2502 virtLBA = ((uint64_t)(CDB[1] & 0x1F) << 16) | 2503 ((uint64_t)CDB[2] << 8) | (uint64_t)CDB[3]; 2504 2505 /* 2506 * Check that LBA range for I/O does not exceed volume's 2507 * MaxLBA. 2508 */ 2509 if ((virtLBA + (uint64_t)io_size - 1) <= 2510 sc->DD_max_lba) { 2511 /* 2512 * Check if the I/O crosses a stripe boundary. If not, 2513 * translate the virtual LBA to a physical LBA and set 2514 * the DevHandle for the PhysDisk to be used. If it 2515 * does cross a boundary, do normal I/O. To get the 2516 * right DevHandle to use, get the map number for the 2517 * column, then use that map number to look up the 2518 * DevHandle of the PhysDisk. 2519 */ 2520 stripe_offset = (uint32_t)virtLBA & 2521 (sc->DD_stripe_size - 1); 2522 if ((stripe_offset + io_size) <= sc->DD_stripe_size) { 2523 physLBA = (uint32_t)virtLBA >> 2524 sc->DD_stripe_exponent; 2525 stripe_unit = physLBA / sc->DD_num_phys_disks; 2526 column = physLBA % sc->DD_num_phys_disks; 2527 pIO_req->DevHandle = 2528 htole16(sc->DD_column_map[column].dev_handle); 2529 /* ???? Is this endian safe*/ 2530 cm->cm_desc.SCSIIO.DevHandle = 2531 pIO_req->DevHandle; 2532 2533 physLBA = (stripe_unit << 2534 sc->DD_stripe_exponent) + stripe_offset; 2535 ptrLBA = &pIO_req->CDB.CDB32[1]; 2536 physLBA_byte = (uint8_t)(physLBA >> 16); 2537 *ptrLBA = physLBA_byte; 2538 ptrLBA = &pIO_req->CDB.CDB32[2]; 2539 physLBA_byte = (uint8_t)(physLBA >> 8); 2540 *ptrLBA = physLBA_byte; 2541 ptrLBA = &pIO_req->CDB.CDB32[3]; 2542 physLBA_byte = (uint8_t)physLBA; 2543 *ptrLBA = physLBA_byte; 2544 2545 /* 2546 * Set flag that Direct Drive I/O is 2547 * being done. 2548 */ 2549 cm->cm_flags |= MPS_CM_FLAGS_DD_IO; 2550 } 2551 } 2552 return; 2553 } 2554 2555 /* 2556 * Handle 10, 12 or 16 byte CDBs. 2557 */ 2558 if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_10) || 2559 (CDB[0] == WRITE_10) || (CDB[0] == READ_16) || 2560 (CDB[0] == WRITE_16) || (CDB[0] == READ_12) || 2561 (CDB[0] == WRITE_12))) { 2562 /* 2563 * For 16-byte CDB's, verify that the upper 4 bytes of the CDB 2564 * are 0. If not, this is accessing beyond 2TB so handle it in 2565 * the else section. 10-byte and 12-byte CDB's are OK. 2566 * FreeBSD sends very rare 12 byte READ/WRITE, but driver is 2567 * ready to accept 12byte CDB for Direct IOs. 2568 */ 2569 if ((CDB[0] == READ_10 || CDB[0] == WRITE_10) || 2570 (CDB[0] == READ_12 || CDB[0] == WRITE_12) || 2571 !(CDB[2] | CDB[3] | CDB[4] | CDB[5])) { 2572 /* 2573 * Get the transfer size in blocks. 2574 */ 2575 io_size = (cm->cm_length >> sc->DD_block_exponent); 2576 2577 /* 2578 * Get virtual LBA. Point to correct lower 4 bytes of 2579 * LBA in the CDB depending on command. 2580 */ 2581 lba_idx = ((CDB[0] == READ_12) || 2582 (CDB[0] == WRITE_12) || 2583 (CDB[0] == READ_10) || 2584 (CDB[0] == WRITE_10))? 2 : 6; 2585 virtLBA = ((uint64_t)CDB[lba_idx] << 24) | 2586 ((uint64_t)CDB[lba_idx + 1] << 16) | 2587 ((uint64_t)CDB[lba_idx + 2] << 8) | 2588 (uint64_t)CDB[lba_idx + 3]; 2589 2590 /* 2591 * Check that LBA range for I/O does not exceed volume's 2592 * MaxLBA. 2593 */ 2594 if ((virtLBA + (uint64_t)io_size - 1) <= 2595 sc->DD_max_lba) { 2596 /* 2597 * Check if the I/O crosses a stripe boundary. 2598 * If not, translate the virtual LBA to a 2599 * physical LBA and set the DevHandle for the 2600 * PhysDisk to be used. If it does cross a 2601 * boundary, do normal I/O. To get the right 2602 * DevHandle to use, get the map number for the 2603 * column, then use that map number to look up 2604 * the DevHandle of the PhysDisk. 2605 */ 2606 stripe_offset = (uint32_t)virtLBA & 2607 (sc->DD_stripe_size - 1); 2608 if ((stripe_offset + io_size) <= 2609 sc->DD_stripe_size) { 2610 physLBA = (uint32_t)virtLBA >> 2611 sc->DD_stripe_exponent; 2612 stripe_unit = physLBA / 2613 sc->DD_num_phys_disks; 2614 column = physLBA % 2615 sc->DD_num_phys_disks; 2616 pIO_req->DevHandle = 2617 htole16(sc->DD_column_map[column]. 2618 dev_handle); 2619 cm->cm_desc.SCSIIO.DevHandle = 2620 pIO_req->DevHandle; 2621 2622 physLBA = (stripe_unit << 2623 sc->DD_stripe_exponent) + 2624 stripe_offset; 2625 ptrLBA = 2626 &pIO_req->CDB.CDB32[lba_idx]; 2627 physLBA_byte = (uint8_t)(physLBA >> 24); 2628 *ptrLBA = physLBA_byte; 2629 ptrLBA = 2630 &pIO_req->CDB.CDB32[lba_idx + 1]; 2631 physLBA_byte = (uint8_t)(physLBA >> 16); 2632 *ptrLBA = physLBA_byte; 2633 ptrLBA = 2634 &pIO_req->CDB.CDB32[lba_idx + 2]; 2635 physLBA_byte = (uint8_t)(physLBA >> 8); 2636 *ptrLBA = physLBA_byte; 2637 ptrLBA = 2638 &pIO_req->CDB.CDB32[lba_idx + 3]; 2639 physLBA_byte = (uint8_t)physLBA; 2640 *ptrLBA = physLBA_byte; 2641 2642 /* 2643 * Set flag that Direct Drive I/O is 2644 * being done. 2645 */ 2646 cm->cm_flags |= MPS_CM_FLAGS_DD_IO; 2647 } 2648 } 2649 } else { 2650 /* 2651 * 16-byte CDB and the upper 4 bytes of the CDB are not 2652 * 0. Get the transfer size in blocks. 2653 */ 2654 io_size = (cm->cm_length >> sc->DD_block_exponent); 2655 2656 /* 2657 * Get virtual LBA. 2658 */ 2659 virtLBA = ((uint64_t)CDB[2] << 54) | 2660 ((uint64_t)CDB[3] << 48) | 2661 ((uint64_t)CDB[4] << 40) | 2662 ((uint64_t)CDB[5] << 32) | 2663 ((uint64_t)CDB[6] << 24) | 2664 ((uint64_t)CDB[7] << 16) | 2665 ((uint64_t)CDB[8] << 8) | 2666 (uint64_t)CDB[9]; 2667 2668 /* 2669 * Check that LBA range for I/O does not exceed volume's 2670 * MaxLBA. 2671 */ 2672 if ((virtLBA + (uint64_t)io_size - 1) <= 2673 sc->DD_max_lba) { 2674 /* 2675 * Check if the I/O crosses a stripe boundary. 2676 * If not, translate the virtual LBA to a 2677 * physical LBA and set the DevHandle for the 2678 * PhysDisk to be used. If it does cross a 2679 * boundary, do normal I/O. To get the right 2680 * DevHandle to use, get the map number for the 2681 * column, then use that map number to look up 2682 * the DevHandle of the PhysDisk. 2683 */ 2684 stripe_offset = (uint32_t)virtLBA & 2685 (sc->DD_stripe_size - 1); 2686 if ((stripe_offset + io_size) <= 2687 sc->DD_stripe_size) { 2688 physLBA = (uint32_t)(virtLBA >> 2689 sc->DD_stripe_exponent); 2690 stripe_unit = physLBA / 2691 sc->DD_num_phys_disks; 2692 column = physLBA % 2693 sc->DD_num_phys_disks; 2694 pIO_req->DevHandle = 2695 htole16(sc->DD_column_map[column]. 2696 dev_handle); 2697 cm->cm_desc.SCSIIO.DevHandle = 2698 pIO_req->DevHandle; 2699 2700 physLBA = (stripe_unit << 2701 sc->DD_stripe_exponent) + 2702 stripe_offset; 2703 2704 /* 2705 * Set upper 4 bytes of LBA to 0. We 2706 * assume that the phys disks are less 2707 * than 2 TB's in size. Then, set the 2708 * lower 4 bytes. 2709 */ 2710 pIO_req->CDB.CDB32[2] = 0; 2711 pIO_req->CDB.CDB32[3] = 0; 2712 pIO_req->CDB.CDB32[4] = 0; 2713 pIO_req->CDB.CDB32[5] = 0; 2714 ptrLBA = &pIO_req->CDB.CDB32[6]; 2715 physLBA_byte = (uint8_t)(physLBA >> 24); 2716 *ptrLBA = physLBA_byte; 2717 ptrLBA = &pIO_req->CDB.CDB32[7]; 2718 physLBA_byte = (uint8_t)(physLBA >> 16); 2719 *ptrLBA = physLBA_byte; 2720 ptrLBA = &pIO_req->CDB.CDB32[8]; 2721 physLBA_byte = (uint8_t)(physLBA >> 8); 2722 *ptrLBA = physLBA_byte; 2723 ptrLBA = &pIO_req->CDB.CDB32[9]; 2724 physLBA_byte = (uint8_t)physLBA; 2725 *ptrLBA = physLBA_byte; 2726 2727 /* 2728 * Set flag that Direct Drive I/O is 2729 * being done. 2730 */ 2731 cm->cm_flags |= MPS_CM_FLAGS_DD_IO; 2732 } 2733 } 2734 } 2735 } 2736 } 2737 2738 #if __FreeBSD_version >= 900026 2739 static void 2740 mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm) 2741 { 2742 MPI2_SMP_PASSTHROUGH_REPLY *rpl; 2743 MPI2_SMP_PASSTHROUGH_REQUEST *req; 2744 uint64_t sasaddr; 2745 union ccb *ccb; 2746 2747 ccb = cm->cm_complete_data; 2748 2749 /* 2750 * Currently there should be no way we can hit this case. It only 2751 * happens when we have a failure to allocate chain frames, and SMP 2752 * commands require two S/G elements only. That should be handled 2753 * in the standard request size. 2754 */ 2755 if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) { 2756 mps_dprint(sc, MPS_ERROR,"%s: cm_flags = %#x on SMP request!\n", 2757 __func__, cm->cm_flags); 2758 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR); 2759 goto bailout; 2760 } 2761 2762 rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply; 2763 if (rpl == NULL) { 2764 mps_dprint(sc, MPS_ERROR, "%s: NULL cm_reply!\n", __func__); 2765 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR); 2766 goto bailout; 2767 } 2768 2769 req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req; 2770 sasaddr = le32toh(req->SASAddress.Low); 2771 sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32; 2772 2773 if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) != 2774 MPI2_IOCSTATUS_SUCCESS || 2775 rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) { 2776 mps_dprint(sc, MPS_XINFO, "%s: IOCStatus %04x SASStatus %02x\n", 2777 __func__, le16toh(rpl->IOCStatus), rpl->SASStatus); 2778 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR); 2779 goto bailout; 2780 } 2781 2782 mps_dprint(sc, MPS_XINFO, "%s: SMP request to SAS address " 2783 "%#jx completed successfully\n", __func__, 2784 (uintmax_t)sasaddr); 2785 2786 if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED) 2787 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP); 2788 else 2789 mpssas_set_ccbstatus(ccb, CAM_SMP_STATUS_ERROR); 2790 2791 bailout: 2792 /* 2793 * We sync in both directions because we had DMAs in the S/G list 2794 * in both directions. 2795 */ 2796 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, 2797 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2798 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap); 2799 mps_free_command(sc, cm); 2800 xpt_done(ccb); 2801 } 2802 2803 static void 2804 mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb, uint64_t sasaddr) 2805 { 2806 struct mps_command *cm; 2807 uint8_t *request, *response; 2808 MPI2_SMP_PASSTHROUGH_REQUEST *req; 2809 struct mps_softc *sc; 2810 int error; 2811 2812 sc = sassc->sc; 2813 error = 0; 2814 2815 /* 2816 * XXX We don't yet support physical addresses here. 2817 */ 2818 switch ((ccb->ccb_h.flags & CAM_DATA_MASK)) { 2819 case CAM_DATA_PADDR: 2820 case CAM_DATA_SG_PADDR: 2821 mps_dprint(sc, MPS_ERROR, 2822 "%s: physical addresses not supported\n", __func__); 2823 mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID); 2824 xpt_done(ccb); 2825 return; 2826 case CAM_DATA_SG: 2827 /* 2828 * The chip does not support more than one buffer for the 2829 * request or response. 2830 */ 2831 if ((ccb->smpio.smp_request_sglist_cnt > 1) 2832 || (ccb->smpio.smp_response_sglist_cnt > 1)) { 2833 mps_dprint(sc, MPS_ERROR, 2834 "%s: multiple request or response " 2835 "buffer segments not supported for SMP\n", 2836 __func__); 2837 mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID); 2838 xpt_done(ccb); 2839 return; 2840 } 2841 2842 /* 2843 * The CAM_SCATTER_VALID flag was originally implemented 2844 * for the XPT_SCSI_IO CCB, which only has one data pointer. 2845 * We have two. So, just take that flag to mean that we 2846 * might have S/G lists, and look at the S/G segment count 2847 * to figure out whether that is the case for each individual 2848 * buffer. 2849 */ 2850 if (ccb->smpio.smp_request_sglist_cnt != 0) { 2851 bus_dma_segment_t *req_sg; 2852 2853 req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request; 2854 request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr; 2855 } else 2856 request = ccb->smpio.smp_request; 2857 2858 if (ccb->smpio.smp_response_sglist_cnt != 0) { 2859 bus_dma_segment_t *rsp_sg; 2860 2861 rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response; 2862 response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr; 2863 } else 2864 response = ccb->smpio.smp_response; 2865 break; 2866 case CAM_DATA_VADDR: 2867 request = ccb->smpio.smp_request; 2868 response = ccb->smpio.smp_response; 2869 break; 2870 default: 2871 mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID); 2872 xpt_done(ccb); 2873 return; 2874 } 2875 2876 cm = mps_alloc_command(sc); 2877 if (cm == NULL) { 2878 mps_dprint(sc, MPS_ERROR, 2879 "%s: cannot allocate command\n", __func__); 2880 mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL); 2881 xpt_done(ccb); 2882 return; 2883 } 2884 2885 req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req; 2886 bzero(req, sizeof(*req)); 2887 req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH; 2888 2889 /* Allow the chip to use any route to this SAS address. */ 2890 req->PhysicalPort = 0xff; 2891 2892 req->RequestDataLength = htole16(ccb->smpio.smp_request_len); 2893 req->SGLFlags = 2894 MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI; 2895 2896 mps_dprint(sc, MPS_XINFO, "%s: sending SMP request to SAS " 2897 "address %#jx\n", __func__, (uintmax_t)sasaddr); 2898 2899 mpi_init_sge(cm, req, &req->SGL); 2900 2901 /* 2902 * Set up a uio to pass into mps_map_command(). This allows us to 2903 * do one map command, and one busdma call in there. 2904 */ 2905 cm->cm_uio.uio_iov = cm->cm_iovec; 2906 cm->cm_uio.uio_iovcnt = 2; 2907 cm->cm_uio.uio_segflg = UIO_SYSSPACE; 2908 2909 /* 2910 * The read/write flag isn't used by busdma, but set it just in 2911 * case. This isn't exactly accurate, either, since we're going in 2912 * both directions. 2913 */ 2914 cm->cm_uio.uio_rw = UIO_WRITE; 2915 2916 cm->cm_iovec[0].iov_base = request; 2917 cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength); 2918 cm->cm_iovec[1].iov_base = response; 2919 cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len; 2920 2921 cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len + 2922 cm->cm_iovec[1].iov_len; 2923 2924 /* 2925 * Trigger a warning message in mps_data_cb() for the user if we 2926 * wind up exceeding two S/G segments. The chip expects one 2927 * segment for the request and another for the response. 2928 */ 2929 cm->cm_max_segs = 2; 2930 2931 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; 2932 cm->cm_complete = mpssas_smpio_complete; 2933 cm->cm_complete_data = ccb; 2934 2935 /* 2936 * Tell the mapping code that we're using a uio, and that this is 2937 * an SMP passthrough request. There is a little special-case 2938 * logic there (in mps_data_cb()) to handle the bidirectional 2939 * transfer. 2940 */ 2941 cm->cm_flags |= MPS_CM_FLAGS_USE_UIO | MPS_CM_FLAGS_SMP_PASS | 2942 MPS_CM_FLAGS_DATAIN | MPS_CM_FLAGS_DATAOUT; 2943 2944 /* The chip data format is little endian. */ 2945 req->SASAddress.High = htole32(sasaddr >> 32); 2946 req->SASAddress.Low = htole32(sasaddr); 2947 2948 /* 2949 * XXX Note that we don't have a timeout/abort mechanism here. 2950 * From the manual, it looks like task management requests only 2951 * work for SCSI IO and SATA passthrough requests. We may need to 2952 * have a mechanism to retry requests in the event of a chip reset 2953 * at least. Hopefully the chip will insure that any errors short 2954 * of that are relayed back to the driver. 2955 */ 2956 error = mps_map_command(sc, cm); 2957 if ((error != 0) && (error != EINPROGRESS)) { 2958 mps_dprint(sc, MPS_ERROR, 2959 "%s: error %d returned from mps_map_command()\n", 2960 __func__, error); 2961 goto bailout_error; 2962 } 2963 2964 return; 2965 2966 bailout_error: 2967 mps_free_command(sc, cm); 2968 mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL); 2969 xpt_done(ccb); 2970 return; 2971 2972 } 2973 2974 static void 2975 mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb) 2976 { 2977 struct mps_softc *sc; 2978 struct mpssas_target *targ; 2979 uint64_t sasaddr = 0; 2980 2981 sc = sassc->sc; 2982 2983 /* 2984 * Make sure the target exists. 2985 */ 2986 KASSERT(ccb->ccb_h.target_id < sassc->maxtargets, 2987 ("Target %d out of bounds in XPT_SMP_IO\n", ccb->ccb_h.target_id)); 2988 targ = &sassc->targets[ccb->ccb_h.target_id]; 2989 if (targ->handle == 0x0) { 2990 mps_dprint(sc, MPS_ERROR, 2991 "%s: target %d does not exist!\n", __func__, 2992 ccb->ccb_h.target_id); 2993 mpssas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT); 2994 xpt_done(ccb); 2995 return; 2996 } 2997 2998 /* 2999 * If this device has an embedded SMP target, we'll talk to it 3000 * directly. 3001 * figure out what the expander's address is. 3002 */ 3003 if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0) 3004 sasaddr = targ->sasaddr; 3005 3006 /* 3007 * If we don't have a SAS address for the expander yet, try 3008 * grabbing it from the page 0x83 information cached in the 3009 * transport layer for this target. LSI expanders report the 3010 * expander SAS address as the port-associated SAS address in 3011 * Inquiry VPD page 0x83. Maxim expanders don't report it in page 3012 * 0x83. 3013 * 3014 * XXX KDM disable this for now, but leave it commented out so that 3015 * it is obvious that this is another possible way to get the SAS 3016 * address. 3017 * 3018 * The parent handle method below is a little more reliable, and 3019 * the other benefit is that it works for devices other than SES 3020 * devices. So you can send a SMP request to a da(4) device and it 3021 * will get routed to the expander that device is attached to. 3022 * (Assuming the da(4) device doesn't contain an SMP target...) 3023 */ 3024 #if 0 3025 if (sasaddr == 0) 3026 sasaddr = xpt_path_sas_addr(ccb->ccb_h.path); 3027 #endif 3028 3029 /* 3030 * If we still don't have a SAS address for the expander, look for 3031 * the parent device of this device, which is probably the expander. 3032 */ 3033 if (sasaddr == 0) { 3034 #ifdef OLD_MPS_PROBE 3035 struct mpssas_target *parent_target; 3036 #endif 3037 3038 if (targ->parent_handle == 0x0) { 3039 mps_dprint(sc, MPS_ERROR, 3040 "%s: handle %d does not have a valid " 3041 "parent handle!\n", __func__, targ->handle); 3042 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); 3043 goto bailout; 3044 } 3045 #ifdef OLD_MPS_PROBE 3046 parent_target = mpssas_find_target_by_handle(sassc, 0, 3047 targ->parent_handle); 3048 3049 if (parent_target == NULL) { 3050 mps_dprint(sc, MPS_ERROR, 3051 "%s: handle %d does not have a valid " 3052 "parent target!\n", __func__, targ->handle); 3053 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); 3054 goto bailout; 3055 } 3056 3057 if ((parent_target->devinfo & 3058 MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) { 3059 mps_dprint(sc, MPS_ERROR, 3060 "%s: handle %d parent %d does not " 3061 "have an SMP target!\n", __func__, 3062 targ->handle, parent_target->handle); 3063 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); 3064 goto bailout; 3065 3066 } 3067 3068 sasaddr = parent_target->sasaddr; 3069 #else /* OLD_MPS_PROBE */ 3070 if ((targ->parent_devinfo & 3071 MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) { 3072 mps_dprint(sc, MPS_ERROR, 3073 "%s: handle %d parent %d does not " 3074 "have an SMP target!\n", __func__, 3075 targ->handle, targ->parent_handle); 3076 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); 3077 goto bailout; 3078 3079 } 3080 if (targ->parent_sasaddr == 0x0) { 3081 mps_dprint(sc, MPS_ERROR, 3082 "%s: handle %d parent handle %d does " 3083 "not have a valid SAS address!\n", 3084 __func__, targ->handle, targ->parent_handle); 3085 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); 3086 goto bailout; 3087 } 3088 3089 sasaddr = targ->parent_sasaddr; 3090 #endif /* OLD_MPS_PROBE */ 3091 3092 } 3093 3094 if (sasaddr == 0) { 3095 mps_dprint(sc, MPS_INFO, 3096 "%s: unable to find SAS address for handle %d\n", 3097 __func__, targ->handle); 3098 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); 3099 goto bailout; 3100 } 3101 mpssas_send_smpcmd(sassc, ccb, sasaddr); 3102 3103 return; 3104 3105 bailout: 3106 xpt_done(ccb); 3107 3108 } 3109 #endif //__FreeBSD_version >= 900026 3110 3111 static void 3112 mpssas_action_resetdev(struct mpssas_softc *sassc, union ccb *ccb) 3113 { 3114 MPI2_SCSI_TASK_MANAGE_REQUEST *req; 3115 struct mps_softc *sc; 3116 struct mps_command *tm; 3117 struct mpssas_target *targ; 3118 3119 MPS_FUNCTRACE(sassc->sc); 3120 mtx_assert(&sassc->sc->mps_mtx, MA_OWNED); 3121 3122 KASSERT(ccb->ccb_h.target_id < sassc->maxtargets, 3123 ("Target %d out of bounds in XPT_RESET_DEV\n", 3124 ccb->ccb_h.target_id)); 3125 sc = sassc->sc; 3126 tm = mps_alloc_command(sc); 3127 if (tm == NULL) { 3128 mps_dprint(sc, MPS_ERROR, 3129 "command alloc failure in mpssas_action_resetdev\n"); 3130 mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL); 3131 xpt_done(ccb); 3132 return; 3133 } 3134 3135 targ = &sassc->targets[ccb->ccb_h.target_id]; 3136 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req; 3137 req->DevHandle = htole16(targ->handle); 3138 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; 3139 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET; 3140 3141 /* SAS Hard Link Reset / SATA Link Reset */ 3142 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET; 3143 3144 tm->cm_data = NULL; 3145 tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY; 3146 tm->cm_complete = mpssas_resetdev_complete; 3147 tm->cm_complete_data = ccb; 3148 tm->cm_targ = targ; 3149 targ->flags |= MPSSAS_TARGET_INRESET; 3150 3151 mps_map_command(sc, tm); 3152 } 3153 3154 static void 3155 mpssas_resetdev_complete(struct mps_softc *sc, struct mps_command *tm) 3156 { 3157 MPI2_SCSI_TASK_MANAGE_REPLY *resp; 3158 union ccb *ccb; 3159 3160 MPS_FUNCTRACE(sc); 3161 mtx_assert(&sc->mps_mtx, MA_OWNED); 3162 3163 resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply; 3164 ccb = tm->cm_complete_data; 3165 3166 /* 3167 * Currently there should be no way we can hit this case. It only 3168 * happens when we have a failure to allocate chain frames, and 3169 * task management commands don't have S/G lists. 3170 */ 3171 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) { 3172 MPI2_SCSI_TASK_MANAGE_REQUEST *req; 3173 3174 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req; 3175 3176 mps_dprint(sc, MPS_ERROR, 3177 "%s: cm_flags = %#x for reset of handle %#04x! " 3178 "This should not happen!\n", __func__, tm->cm_flags, 3179 req->DevHandle); 3180 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR); 3181 goto bailout; 3182 } 3183 3184 mps_dprint(sc, MPS_XINFO, 3185 "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n", __func__, 3186 le16toh(resp->IOCStatus), le32toh(resp->ResponseCode)); 3187 3188 if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) { 3189 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP); 3190 mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid, 3191 CAM_LUN_WILDCARD); 3192 } 3193 else 3194 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR); 3195 3196 bailout: 3197 3198 mpssas_free_tm(sc, tm); 3199 xpt_done(ccb); 3200 } 3201 3202 static void 3203 mpssas_poll(struct cam_sim *sim) 3204 { 3205 struct mpssas_softc *sassc; 3206 3207 sassc = cam_sim_softc(sim); 3208 3209 if (sassc->sc->mps_debug & MPS_TRACE) { 3210 /* frequent debug messages during a panic just slow 3211 * everything down too much. 3212 */ 3213 mps_printf(sassc->sc, "%s clearing MPS_TRACE\n", __func__); 3214 sassc->sc->mps_debug &= ~MPS_TRACE; 3215 } 3216 3217 mps_intr_locked(sassc->sc); 3218 } 3219 3220 static void 3221 mpssas_async(void *callback_arg, uint32_t code, struct cam_path *path, 3222 void *arg) 3223 { 3224 struct mps_softc *sc; 3225 3226 sc = (struct mps_softc *)callback_arg; 3227 3228 switch (code) { 3229 #if (__FreeBSD_version >= 1000006) || \ 3230 ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000)) 3231 case AC_ADVINFO_CHANGED: { 3232 struct mpssas_target *target; 3233 struct mpssas_softc *sassc; 3234 struct scsi_read_capacity_data_long rcap_buf; 3235 struct ccb_dev_advinfo cdai; 3236 struct mpssas_lun *lun; 3237 lun_id_t lunid; 3238 int found_lun; 3239 uintptr_t buftype; 3240 3241 buftype = (uintptr_t)arg; 3242 3243 found_lun = 0; 3244 sassc = sc->sassc; 3245 3246 /* 3247 * We're only interested in read capacity data changes. 3248 */ 3249 if (buftype != CDAI_TYPE_RCAPLONG) 3250 break; 3251 3252 /* 3253 * We should have a handle for this, but check to make sure. 3254 */ 3255 KASSERT(xpt_path_target_id(path) < sassc->maxtargets, 3256 ("Target %d out of bounds in mpssas_async\n", 3257 xpt_path_target_id(path))); 3258 target = &sassc->targets[xpt_path_target_id(path)]; 3259 if (target->handle == 0) 3260 break; 3261 3262 lunid = xpt_path_lun_id(path); 3263 3264 SLIST_FOREACH(lun, &target->luns, lun_link) { 3265 if (lun->lun_id == lunid) { 3266 found_lun = 1; 3267 break; 3268 } 3269 } 3270 3271 if (found_lun == 0) { 3272 lun = malloc(sizeof(struct mpssas_lun), M_MPT2, 3273 M_NOWAIT | M_ZERO); 3274 if (lun == NULL) { 3275 mps_dprint(sc, MPS_ERROR, "Unable to alloc " 3276 "LUN for EEDP support.\n"); 3277 break; 3278 } 3279 lun->lun_id = lunid; 3280 SLIST_INSERT_HEAD(&target->luns, lun, lun_link); 3281 } 3282 3283 bzero(&rcap_buf, sizeof(rcap_buf)); 3284 xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL); 3285 cdai.ccb_h.func_code = XPT_DEV_ADVINFO; 3286 cdai.ccb_h.flags = CAM_DIR_IN; 3287 cdai.buftype = CDAI_TYPE_RCAPLONG; 3288 #if (__FreeBSD_version >= 1100061) || \ 3289 ((__FreeBSD_version >= 1001510) && (__FreeBSD_version < 1100000)) 3290 cdai.flags = CDAI_FLAG_NONE; 3291 #else 3292 cdai.flags = 0; 3293 #endif 3294 cdai.bufsiz = sizeof(rcap_buf); 3295 cdai.buf = (uint8_t *)&rcap_buf; 3296 xpt_action((union ccb *)&cdai); 3297 if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0) 3298 cam_release_devq(cdai.ccb_h.path, 3299 0, 0, 0, FALSE); 3300 3301 if ((mpssas_get_ccbstatus((union ccb *)&cdai) == CAM_REQ_CMP) 3302 && (rcap_buf.prot & SRC16_PROT_EN)) { 3303 lun->eedp_formatted = TRUE; 3304 lun->eedp_block_size = scsi_4btoul(rcap_buf.length); 3305 } else { 3306 lun->eedp_formatted = FALSE; 3307 lun->eedp_block_size = 0; 3308 } 3309 break; 3310 } 3311 #else 3312 case AC_FOUND_DEVICE: { 3313 struct ccb_getdev *cgd; 3314 3315 cgd = arg; 3316 mpssas_check_eedp(sc, path, cgd); 3317 break; 3318 } 3319 #endif 3320 default: 3321 break; 3322 } 3323 } 3324 3325 #if (__FreeBSD_version < 901503) || \ 3326 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) 3327 static void 3328 mpssas_check_eedp(struct mps_softc *sc, struct cam_path *path, 3329 struct ccb_getdev *cgd) 3330 { 3331 struct mpssas_softc *sassc = sc->sassc; 3332 struct ccb_scsiio *csio; 3333 struct scsi_read_capacity_16 *scsi_cmd; 3334 struct scsi_read_capacity_eedp *rcap_buf; 3335 path_id_t pathid; 3336 target_id_t targetid; 3337 lun_id_t lunid; 3338 union ccb *ccb; 3339 struct cam_path *local_path; 3340 struct mpssas_target *target; 3341 struct mpssas_lun *lun; 3342 uint8_t found_lun; 3343 char path_str[64]; 3344 3345 sassc = sc->sassc; 3346 pathid = cam_sim_path(sassc->sim); 3347 targetid = xpt_path_target_id(path); 3348 lunid = xpt_path_lun_id(path); 3349 3350 KASSERT(targetid < sassc->maxtargets, 3351 ("Target %d out of bounds in mpssas_check_eedp\n", 3352 targetid)); 3353 target = &sassc->targets[targetid]; 3354 if (target->handle == 0x0) 3355 return; 3356 3357 /* 3358 * Determine if the device is EEDP capable. 3359 * 3360 * If this flag is set in the inquiry data, 3361 * the device supports protection information, 3362 * and must support the 16 byte read 3363 * capacity command, otherwise continue without 3364 * sending read cap 16 3365 */ 3366 if ((cgd->inq_data.spc3_flags & SPC3_SID_PROTECT) == 0) 3367 return; 3368 3369 /* 3370 * Issue a READ CAPACITY 16 command. This info 3371 * is used to determine if the LUN is formatted 3372 * for EEDP support. 3373 */ 3374 ccb = xpt_alloc_ccb_nowait(); 3375 if (ccb == NULL) { 3376 mps_dprint(sc, MPS_ERROR, "Unable to alloc CCB " 3377 "for EEDP support.\n"); 3378 return; 3379 } 3380 3381 if (xpt_create_path(&local_path, xpt_periph, 3382 pathid, targetid, lunid) != CAM_REQ_CMP) { 3383 mps_dprint(sc, MPS_ERROR, "Unable to create " 3384 "path for EEDP support\n"); 3385 xpt_free_ccb(ccb); 3386 return; 3387 } 3388 3389 /* 3390 * If LUN is already in list, don't create a new 3391 * one. 3392 */ 3393 found_lun = FALSE; 3394 SLIST_FOREACH(lun, &target->luns, lun_link) { 3395 if (lun->lun_id == lunid) { 3396 found_lun = TRUE; 3397 break; 3398 } 3399 } 3400 if (!found_lun) { 3401 lun = malloc(sizeof(struct mpssas_lun), M_MPT2, 3402 M_NOWAIT | M_ZERO); 3403 if (lun == NULL) { 3404 mps_dprint(sc, MPS_ERROR, 3405 "Unable to alloc LUN for EEDP support.\n"); 3406 xpt_free_path(local_path); 3407 xpt_free_ccb(ccb); 3408 return; 3409 } 3410 lun->lun_id = lunid; 3411 SLIST_INSERT_HEAD(&target->luns, lun, 3412 lun_link); 3413 } 3414 3415 xpt_path_string(local_path, path_str, sizeof(path_str)); 3416 3417 mps_dprint(sc, MPS_INFO, "Sending read cap: path %s handle %d\n", 3418 path_str, target->handle); 3419 3420 /* 3421 * Issue a READ CAPACITY 16 command for the LUN. 3422 * The mpssas_read_cap_done function will load 3423 * the read cap info into the LUN struct. 3424 */ 3425 rcap_buf = malloc(sizeof(struct scsi_read_capacity_eedp), 3426 M_MPT2, M_NOWAIT | M_ZERO); 3427 if (rcap_buf == NULL) { 3428 mps_dprint(sc, MPS_FAULT, 3429 "Unable to alloc read capacity buffer for EEDP support.\n"); 3430 xpt_free_path(ccb->ccb_h.path); 3431 xpt_free_ccb(ccb); 3432 return; 3433 } 3434 xpt_setup_ccb(&ccb->ccb_h, local_path, CAM_PRIORITY_XPT); 3435 csio = &ccb->csio; 3436 csio->ccb_h.func_code = XPT_SCSI_IO; 3437 csio->ccb_h.flags = CAM_DIR_IN; 3438 csio->ccb_h.retry_count = 4; 3439 csio->ccb_h.cbfcnp = mpssas_read_cap_done; 3440 csio->ccb_h.timeout = 60000; 3441 csio->data_ptr = (uint8_t *)rcap_buf; 3442 csio->dxfer_len = sizeof(struct scsi_read_capacity_eedp); 3443 csio->sense_len = MPS_SENSE_LEN; 3444 csio->cdb_len = sizeof(*scsi_cmd); 3445 csio->tag_action = MSG_SIMPLE_Q_TAG; 3446 3447 scsi_cmd = (struct scsi_read_capacity_16 *)&csio->cdb_io.cdb_bytes; 3448 bzero(scsi_cmd, sizeof(*scsi_cmd)); 3449 scsi_cmd->opcode = 0x9E; 3450 scsi_cmd->service_action = SRC16_SERVICE_ACTION; 3451 ((uint8_t *)scsi_cmd)[13] = sizeof(struct scsi_read_capacity_eedp); 3452 3453 ccb->ccb_h.ppriv_ptr1 = sassc; 3454 xpt_action(ccb); 3455 } 3456 3457 static void 3458 mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb) 3459 { 3460 struct mpssas_softc *sassc; 3461 struct mpssas_target *target; 3462 struct mpssas_lun *lun; 3463 struct scsi_read_capacity_eedp *rcap_buf; 3464 3465 if (done_ccb == NULL) 3466 return; 3467 3468 /* Driver need to release devq, it Scsi command is 3469 * generated by driver internally. 3470 * Currently there is a single place where driver 3471 * calls scsi command internally. In future if driver 3472 * calls more scsi command internally, it needs to release 3473 * devq internally, since those command will not go back to 3474 * cam_periph. 3475 */ 3476 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) ) { 3477 done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN; 3478 xpt_release_devq(done_ccb->ccb_h.path, 3479 /*count*/ 1, /*run_queue*/TRUE); 3480 } 3481 3482 rcap_buf = (struct scsi_read_capacity_eedp *)done_ccb->csio.data_ptr; 3483 3484 /* 3485 * Get the LUN ID for the path and look it up in the LUN list for the 3486 * target. 3487 */ 3488 sassc = (struct mpssas_softc *)done_ccb->ccb_h.ppriv_ptr1; 3489 KASSERT(done_ccb->ccb_h.target_id < sassc->maxtargets, 3490 ("Target %d out of bounds in mpssas_read_cap_done\n", 3491 done_ccb->ccb_h.target_id)); 3492 target = &sassc->targets[done_ccb->ccb_h.target_id]; 3493 SLIST_FOREACH(lun, &target->luns, lun_link) { 3494 if (lun->lun_id != done_ccb->ccb_h.target_lun) 3495 continue; 3496 3497 /* 3498 * Got the LUN in the target's LUN list. Fill it in 3499 * with EEDP info. If the READ CAP 16 command had some 3500 * SCSI error (common if command is not supported), mark 3501 * the lun as not supporting EEDP and set the block size 3502 * to 0. 3503 */ 3504 if ((mpssas_get_ccbstatus(done_ccb) != CAM_REQ_CMP) 3505 || (done_ccb->csio.scsi_status != SCSI_STATUS_OK)) { 3506 lun->eedp_formatted = FALSE; 3507 lun->eedp_block_size = 0; 3508 break; 3509 } 3510 3511 if (rcap_buf->protect & 0x01) { 3512 mps_dprint(sassc->sc, MPS_INFO, "LUN %d for " 3513 "target ID %d is formatted for EEDP " 3514 "support.\n", done_ccb->ccb_h.target_lun, 3515 done_ccb->ccb_h.target_id); 3516 lun->eedp_formatted = TRUE; 3517 lun->eedp_block_size = scsi_4btoul(rcap_buf->length); 3518 } 3519 break; 3520 } 3521 3522 // Finished with this CCB and path. 3523 free(rcap_buf, M_MPT2); 3524 xpt_free_path(done_ccb->ccb_h.path); 3525 xpt_free_ccb(done_ccb); 3526 } 3527 #endif /* (__FreeBSD_version < 901503) || \ 3528 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) */ 3529 3530 void 3531 mpssas_prepare_for_tm(struct mps_softc *sc, struct mps_command *tm, 3532 struct mpssas_target *target, lun_id_t lun_id) 3533 { 3534 union ccb *ccb; 3535 path_id_t path_id; 3536 3537 /* 3538 * Set the INRESET flag for this target so that no I/O will be sent to 3539 * the target until the reset has completed. If an I/O request does 3540 * happen, the devq will be frozen. The CCB holds the path which is 3541 * used to release the devq. The devq is released and the CCB is freed 3542 * when the TM completes. 3543 */ 3544 ccb = xpt_alloc_ccb_nowait(); 3545 if (ccb) { 3546 path_id = cam_sim_path(sc->sassc->sim); 3547 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, path_id, 3548 target->tid, lun_id) != CAM_REQ_CMP) { 3549 xpt_free_ccb(ccb); 3550 } else { 3551 tm->cm_ccb = ccb; 3552 tm->cm_targ = target; 3553 target->flags |= MPSSAS_TARGET_INRESET; 3554 } 3555 } 3556 } 3557 3558 int 3559 mpssas_startup(struct mps_softc *sc) 3560 { 3561 3562 /* 3563 * Send the port enable message and set the wait_for_port_enable flag. 3564 * This flag helps to keep the simq frozen until all discovery events 3565 * are processed. 3566 */ 3567 sc->wait_for_port_enable = 1; 3568 mpssas_send_portenable(sc); 3569 return (0); 3570 } 3571 3572 static int 3573 mpssas_send_portenable(struct mps_softc *sc) 3574 { 3575 MPI2_PORT_ENABLE_REQUEST *request; 3576 struct mps_command *cm; 3577 3578 MPS_FUNCTRACE(sc); 3579 3580 if ((cm = mps_alloc_command(sc)) == NULL) 3581 return (EBUSY); 3582 request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req; 3583 request->Function = MPI2_FUNCTION_PORT_ENABLE; 3584 request->MsgFlags = 0; 3585 request->VP_ID = 0; 3586 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; 3587 cm->cm_complete = mpssas_portenable_complete; 3588 cm->cm_data = NULL; 3589 cm->cm_sge = NULL; 3590 3591 mps_map_command(sc, cm); 3592 mps_dprint(sc, MPS_XINFO, 3593 "mps_send_portenable finished cm %p req %p complete %p\n", 3594 cm, cm->cm_req, cm->cm_complete); 3595 return (0); 3596 } 3597 3598 static void 3599 mpssas_portenable_complete(struct mps_softc *sc, struct mps_command *cm) 3600 { 3601 MPI2_PORT_ENABLE_REPLY *reply; 3602 struct mpssas_softc *sassc; 3603 3604 MPS_FUNCTRACE(sc); 3605 sassc = sc->sassc; 3606 3607 /* 3608 * Currently there should be no way we can hit this case. It only 3609 * happens when we have a failure to allocate chain frames, and 3610 * port enable commands don't have S/G lists. 3611 */ 3612 if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) { 3613 mps_dprint(sc, MPS_ERROR, "%s: cm_flags = %#x for port enable! " 3614 "This should not happen!\n", __func__, cm->cm_flags); 3615 } 3616 3617 reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply; 3618 if (reply == NULL) 3619 mps_dprint(sc, MPS_FAULT, "Portenable NULL reply\n"); 3620 else if (le16toh(reply->IOCStatus & MPI2_IOCSTATUS_MASK) != 3621 MPI2_IOCSTATUS_SUCCESS) 3622 mps_dprint(sc, MPS_FAULT, "Portenable failed\n"); 3623 3624 mps_free_command(sc, cm); 3625 if (sc->mps_ich.ich_arg != NULL) { 3626 mps_dprint(sc, MPS_XINFO, "disestablish config intrhook\n"); 3627 config_intrhook_disestablish(&sc->mps_ich); 3628 sc->mps_ich.ich_arg = NULL; 3629 } 3630 3631 /* 3632 * Get WarpDrive info after discovery is complete but before the scan 3633 * starts. At this point, all devices are ready to be exposed to the 3634 * OS. If devices should be hidden instead, take them out of the 3635 * 'targets' array before the scan. The devinfo for a disk will have 3636 * some info and a volume's will be 0. Use that to remove disks. 3637 */ 3638 mps_wd_config_pages(sc); 3639 3640 /* 3641 * Done waiting for port enable to complete. Decrement the refcount. 3642 * If refcount is 0, discovery is complete and a rescan of the bus can 3643 * take place. Since the simq was explicitly frozen before port 3644 * enable, it must be explicitly released here to keep the 3645 * freeze/release count in sync. 3646 */ 3647 sc->wait_for_port_enable = 0; 3648 sc->port_enable_complete = 1; 3649 wakeup(&sc->port_enable_complete); 3650 mpssas_startup_decrement(sassc); 3651 } 3652 3653 int 3654 mpssas_check_id(struct mpssas_softc *sassc, int id) 3655 { 3656 struct mps_softc *sc = sassc->sc; 3657 char *ids; 3658 char *name; 3659 3660 ids = &sc->exclude_ids[0]; 3661 while((name = strsep(&ids, ",")) != NULL) { 3662 if (name[0] == '\0') 3663 continue; 3664 if (strtol(name, NULL, 0) == (long)id) 3665 return (1); 3666 } 3667 3668 return (0); 3669 } 3670 3671 void 3672 mpssas_realloc_targets(struct mps_softc *sc, int maxtargets) 3673 { 3674 struct mpssas_softc *sassc; 3675 struct mpssas_lun *lun, *lun_tmp; 3676 struct mpssas_target *targ; 3677 int i; 3678 3679 sassc = sc->sassc; 3680 /* 3681 * The number of targets is based on IOC Facts, so free all of 3682 * the allocated LUNs for each target and then the target buffer 3683 * itself. 3684 */ 3685 for (i=0; i< maxtargets; i++) { 3686 targ = &sassc->targets[i]; 3687 SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) { 3688 free(lun, M_MPT2); 3689 } 3690 } 3691 free(sassc->targets, M_MPT2); 3692 3693 sassc->targets = malloc(sizeof(struct mpssas_target) * maxtargets, 3694 M_MPT2, M_WAITOK|M_ZERO); 3695 if (!sassc->targets) { 3696 panic("%s failed to alloc targets with error %d\n", 3697 __func__, ENOMEM); 3698 } 3699 } 3700