1 /*- 2 * Copyright (c) 2009 Yahoo! Inc. 3 * Copyright (c) 2011-2015 LSI Corp. 4 * Copyright (c) 2013-2016 Avago Technologies 5 * Copyright 2000-2020 Broadcom Inc. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * Broadcom Inc. (LSI) MPT-Fusion Host Adapter FreeBSD 30 * 31 */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 /* Communications core for Avago Technologies (LSI) MPT3 */ 37 38 /* TODO Move headers to mprvar */ 39 #include <sys/types.h> 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/kernel.h> 43 #include <sys/selinfo.h> 44 #include <sys/module.h> 45 #include <sys/bus.h> 46 #include <sys/conf.h> 47 #include <sys/bio.h> 48 #include <sys/malloc.h> 49 #include <sys/uio.h> 50 #include <sys/sysctl.h> 51 #include <sys/endian.h> 52 #include <sys/queue.h> 53 #include <sys/kthread.h> 54 #include <sys/taskqueue.h> 55 #include <sys/sbuf.h> 56 57 #include <machine/bus.h> 58 #include <machine/resource.h> 59 #include <sys/rman.h> 60 61 #include <machine/stdarg.h> 62 63 #include <cam/cam.h> 64 #include <cam/cam_ccb.h> 65 #include <cam/cam_debug.h> 66 #include <cam/cam_sim.h> 67 #include <cam/cam_xpt_sim.h> 68 #include <cam/cam_xpt_periph.h> 69 #include <cam/cam_periph.h> 70 #include <cam/scsi/scsi_all.h> 71 #include <cam/scsi/scsi_message.h> 72 #if __FreeBSD_version >= 900026 73 #include <cam/scsi/smp_all.h> 74 #endif 75 76 #include <dev/nvme/nvme.h> 77 78 #include <dev/mpr/mpi/mpi2_type.h> 79 #include <dev/mpr/mpi/mpi2.h> 80 #include <dev/mpr/mpi/mpi2_ioc.h> 81 #include <dev/mpr/mpi/mpi2_sas.h> 82 #include <dev/mpr/mpi/mpi2_pci.h> 83 #include <dev/mpr/mpi/mpi2_cnfg.h> 84 #include <dev/mpr/mpi/mpi2_init.h> 85 #include <dev/mpr/mpi/mpi2_tool.h> 86 #include <dev/mpr/mpr_ioctl.h> 87 #include <dev/mpr/mprvar.h> 88 #include <dev/mpr/mpr_table.h> 89 #include <dev/mpr/mpr_sas.h> 90 91 #define MPRSAS_DISCOVERY_TIMEOUT 20 92 #define MPRSAS_MAX_DISCOVERY_TIMEOUTS 10 /* 200 seconds */ 93 94 /* 95 * static array to check SCSI OpCode for EEDP protection bits 96 */ 97 #define PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP 98 #define PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP 99 #define PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP 100 static uint8_t op_code_prot[256] = { 101 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 102 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 103 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V, 104 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 105 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 106 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 107 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 108 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 109 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V, 110 0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 111 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V, 112 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 113 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 114 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 115 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 116 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 117 }; 118 119 MALLOC_DEFINE(M_MPRSAS, "MPRSAS", "MPR SAS memory"); 120 121 static void mprsas_remove_device(struct mpr_softc *, struct mpr_command *); 122 static void mprsas_remove_complete(struct mpr_softc *, struct mpr_command *); 123 static void mprsas_action(struct cam_sim *sim, union ccb *ccb); 124 static void mprsas_poll(struct cam_sim *sim); 125 static void mprsas_scsiio_timeout(void *data); 126 static void mprsas_abort_complete(struct mpr_softc *sc, struct mpr_command *cm); 127 static void mprsas_action_scsiio(struct mprsas_softc *, union ccb *); 128 static void mprsas_scsiio_complete(struct mpr_softc *, struct mpr_command *); 129 static void mprsas_action_resetdev(struct mprsas_softc *, union ccb *); 130 static void mprsas_resetdev_complete(struct mpr_softc *, struct mpr_command *); 131 static int mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm, 132 struct mpr_command *cm); 133 static void mprsas_async(void *callback_arg, uint32_t code, 134 struct cam_path *path, void *arg); 135 #if (__FreeBSD_version < 901503) || \ 136 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) 137 static void mprsas_check_eedp(struct mpr_softc *sc, struct cam_path *path, 138 struct ccb_getdev *cgd); 139 static void mprsas_read_cap_done(struct cam_periph *periph, 140 union ccb *done_ccb); 141 #endif 142 static int mprsas_send_portenable(struct mpr_softc *sc); 143 static void mprsas_portenable_complete(struct mpr_softc *sc, 144 struct mpr_command *cm); 145 146 #if __FreeBSD_version >= 900026 147 static void mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm); 148 static void mprsas_send_smpcmd(struct mprsas_softc *sassc, union ccb *ccb, 149 uint64_t sasaddr); 150 static void mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb); 151 #endif //FreeBSD_version >= 900026 152 153 struct mprsas_target * 154 mprsas_find_target_by_handle(struct mprsas_softc *sassc, int start, 155 uint16_t handle) 156 { 157 struct mprsas_target *target; 158 int i; 159 160 for (i = start; i < sassc->maxtargets; i++) { 161 target = &sassc->targets[i]; 162 if (target->handle == handle) 163 return (target); 164 } 165 166 return (NULL); 167 } 168 169 /* we need to freeze the simq during attach and diag reset, to avoid failing 170 * commands before device handles have been found by discovery. Since 171 * discovery involves reading config pages and possibly sending commands, 172 * discovery actions may continue even after we receive the end of discovery 173 * event, so refcount discovery actions instead of assuming we can unfreeze 174 * the simq when we get the event. 175 */ 176 void 177 mprsas_startup_increment(struct mprsas_softc *sassc) 178 { 179 MPR_FUNCTRACE(sassc->sc); 180 181 if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) { 182 if (sassc->startup_refcount++ == 0) { 183 /* just starting, freeze the simq */ 184 mpr_dprint(sassc->sc, MPR_INIT, 185 "%s freezing simq\n", __func__); 186 #if (__FreeBSD_version >= 1000039) || \ 187 ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502)) 188 xpt_hold_boot(); 189 #endif 190 xpt_freeze_simq(sassc->sim, 1); 191 } 192 mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__, 193 sassc->startup_refcount); 194 } 195 } 196 197 void 198 mprsas_release_simq_reinit(struct mprsas_softc *sassc) 199 { 200 if (sassc->flags & MPRSAS_QUEUE_FROZEN) { 201 sassc->flags &= ~MPRSAS_QUEUE_FROZEN; 202 xpt_release_simq(sassc->sim, 1); 203 mpr_dprint(sassc->sc, MPR_INFO, "Unfreezing SIM queue\n"); 204 } 205 } 206 207 void 208 mprsas_startup_decrement(struct mprsas_softc *sassc) 209 { 210 MPR_FUNCTRACE(sassc->sc); 211 212 if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) { 213 if (--sassc->startup_refcount == 0) { 214 /* finished all discovery-related actions, release 215 * the simq and rescan for the latest topology. 216 */ 217 mpr_dprint(sassc->sc, MPR_INIT, 218 "%s releasing simq\n", __func__); 219 sassc->flags &= ~MPRSAS_IN_STARTUP; 220 xpt_release_simq(sassc->sim, 1); 221 #if (__FreeBSD_version >= 1000039) || \ 222 ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502)) 223 xpt_release_boot(); 224 #else 225 mprsas_rescan_target(sassc->sc, NULL); 226 #endif 227 } 228 mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__, 229 sassc->startup_refcount); 230 } 231 } 232 233 /* 234 * The firmware requires us to stop sending commands when we're doing task 235 * management. 236 * use. 237 * XXX The logic for serializing the device has been made lazy and moved to 238 * mprsas_prepare_for_tm(). 239 */ 240 struct mpr_command * 241 mprsas_alloc_tm(struct mpr_softc *sc) 242 { 243 MPI2_SCSI_TASK_MANAGE_REQUEST *req; 244 struct mpr_command *tm; 245 246 MPR_FUNCTRACE(sc); 247 tm = mpr_alloc_high_priority_command(sc); 248 if (tm == NULL) 249 return (NULL); 250 251 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req; 252 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; 253 return tm; 254 } 255 256 void 257 mprsas_free_tm(struct mpr_softc *sc, struct mpr_command *tm) 258 { 259 int target_id = 0xFFFFFFFF; 260 261 MPR_FUNCTRACE(sc); 262 if (tm == NULL) 263 return; 264 265 /* 266 * For TM's the devq is frozen for the device. Unfreeze it here and 267 * free the resources used for freezing the devq. Must clear the 268 * INRESET flag as well or scsi I/O will not work. 269 */ 270 if (tm->cm_targ != NULL) { 271 tm->cm_targ->flags &= ~MPRSAS_TARGET_INRESET; 272 target_id = tm->cm_targ->tid; 273 } 274 if (tm->cm_ccb) { 275 mpr_dprint(sc, MPR_INFO, "Unfreezing devq for target ID %d\n", 276 target_id); 277 xpt_release_devq(tm->cm_ccb->ccb_h.path, 1, TRUE); 278 xpt_free_path(tm->cm_ccb->ccb_h.path); 279 xpt_free_ccb(tm->cm_ccb); 280 } 281 282 mpr_free_high_priority_command(sc, tm); 283 } 284 285 void 286 mprsas_rescan_target(struct mpr_softc *sc, struct mprsas_target *targ) 287 { 288 struct mprsas_softc *sassc = sc->sassc; 289 path_id_t pathid; 290 target_id_t targetid; 291 union ccb *ccb; 292 293 MPR_FUNCTRACE(sc); 294 pathid = cam_sim_path(sassc->sim); 295 if (targ == NULL) 296 targetid = CAM_TARGET_WILDCARD; 297 else 298 targetid = targ - sassc->targets; 299 300 /* 301 * Allocate a CCB and schedule a rescan. 302 */ 303 ccb = xpt_alloc_ccb_nowait(); 304 if (ccb == NULL) { 305 mpr_dprint(sc, MPR_ERROR, "unable to alloc CCB for rescan\n"); 306 return; 307 } 308 309 if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid, targetid, 310 CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 311 mpr_dprint(sc, MPR_ERROR, "unable to create path for rescan\n"); 312 xpt_free_ccb(ccb); 313 return; 314 } 315 316 if (targetid == CAM_TARGET_WILDCARD) 317 ccb->ccb_h.func_code = XPT_SCAN_BUS; 318 else 319 ccb->ccb_h.func_code = XPT_SCAN_TGT; 320 321 mpr_dprint(sc, MPR_TRACE, "%s targetid %u\n", __func__, targetid); 322 xpt_rescan(ccb); 323 } 324 325 static void 326 mprsas_log_command(struct mpr_command *cm, u_int level, const char *fmt, ...) 327 { 328 struct sbuf sb; 329 va_list ap; 330 char str[192]; 331 char path_str[64]; 332 333 if (cm == NULL) 334 return; 335 336 /* No need to be in here if debugging isn't enabled */ 337 if ((cm->cm_sc->mpr_debug & level) == 0) 338 return; 339 340 sbuf_new(&sb, str, sizeof(str), 0); 341 342 va_start(ap, fmt); 343 344 if (cm->cm_ccb != NULL) { 345 xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str, 346 sizeof(path_str)); 347 sbuf_cat(&sb, path_str); 348 if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) { 349 scsi_command_string(&cm->cm_ccb->csio, &sb); 350 sbuf_printf(&sb, "length %d ", 351 cm->cm_ccb->csio.dxfer_len); 352 } 353 } else { 354 sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ", 355 cam_sim_name(cm->cm_sc->sassc->sim), 356 cam_sim_unit(cm->cm_sc->sassc->sim), 357 cam_sim_bus(cm->cm_sc->sassc->sim), 358 cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF, 359 cm->cm_lun); 360 } 361 362 sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID); 363 sbuf_vprintf(&sb, fmt, ap); 364 sbuf_finish(&sb); 365 mpr_print_field(cm->cm_sc, "%s", sbuf_data(&sb)); 366 367 va_end(ap); 368 } 369 370 static void 371 mprsas_remove_volume(struct mpr_softc *sc, struct mpr_command *tm) 372 { 373 MPI2_SCSI_TASK_MANAGE_REPLY *reply; 374 struct mprsas_target *targ; 375 uint16_t handle; 376 377 MPR_FUNCTRACE(sc); 378 379 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply; 380 handle = (uint16_t)(uintptr_t)tm->cm_complete_data; 381 targ = tm->cm_targ; 382 383 if (reply == NULL) { 384 /* XXX retry the remove after the diag reset completes? */ 385 mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device " 386 "0x%04x\n", __func__, handle); 387 mprsas_free_tm(sc, tm); 388 return; 389 } 390 391 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) != 392 MPI2_IOCSTATUS_SUCCESS) { 393 mpr_dprint(sc, MPR_ERROR, "IOCStatus = 0x%x while resetting " 394 "device 0x%x\n", le16toh(reply->IOCStatus), handle); 395 } 396 397 mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n", 398 le32toh(reply->TerminationCount)); 399 mpr_free_reply(sc, tm->cm_reply_data); 400 tm->cm_reply = NULL; /* Ensures the reply won't get re-freed */ 401 402 mpr_dprint(sc, MPR_XINFO, "clearing target %u handle 0x%04x\n", 403 targ->tid, handle); 404 405 /* 406 * Don't clear target if remove fails because things will get confusing. 407 * Leave the devname and sasaddr intact so that we know to avoid reusing 408 * this target id if possible, and so we can assign the same target id 409 * to this device if it comes back in the future. 410 */ 411 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) == 412 MPI2_IOCSTATUS_SUCCESS) { 413 targ = tm->cm_targ; 414 targ->handle = 0x0; 415 targ->encl_handle = 0x0; 416 targ->encl_level_valid = 0x0; 417 targ->encl_level = 0x0; 418 targ->connector_name[0] = ' '; 419 targ->connector_name[1] = ' '; 420 targ->connector_name[2] = ' '; 421 targ->connector_name[3] = ' '; 422 targ->encl_slot = 0x0; 423 targ->exp_dev_handle = 0x0; 424 targ->phy_num = 0x0; 425 targ->linkrate = 0x0; 426 targ->devinfo = 0x0; 427 targ->flags = 0x0; 428 targ->scsi_req_desc_type = 0; 429 } 430 431 mprsas_free_tm(sc, tm); 432 } 433 434 435 /* 436 * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal. 437 * Otherwise Volume Delete is same as Bare Drive Removal. 438 */ 439 void 440 mprsas_prepare_volume_remove(struct mprsas_softc *sassc, uint16_t handle) 441 { 442 MPI2_SCSI_TASK_MANAGE_REQUEST *req; 443 struct mpr_softc *sc; 444 struct mpr_command *cm; 445 struct mprsas_target *targ = NULL; 446 447 MPR_FUNCTRACE(sassc->sc); 448 sc = sassc->sc; 449 450 targ = mprsas_find_target_by_handle(sassc, 0, handle); 451 if (targ == NULL) { 452 /* FIXME: what is the action? */ 453 /* We don't know about this device? */ 454 mpr_dprint(sc, MPR_ERROR, 455 "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle); 456 return; 457 } 458 459 targ->flags |= MPRSAS_TARGET_INREMOVAL; 460 461 cm = mprsas_alloc_tm(sc); 462 if (cm == NULL) { 463 mpr_dprint(sc, MPR_ERROR, 464 "%s: command alloc failure\n", __func__); 465 return; 466 } 467 468 mprsas_rescan_target(sc, targ); 469 470 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req; 471 req->DevHandle = targ->handle; 472 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET; 473 474 if (!targ->is_nvme || sc->custom_nvme_tm_handling) { 475 /* SAS Hard Link Reset / SATA Link Reset */ 476 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET; 477 } else { 478 /* PCIe Protocol Level Reset*/ 479 req->MsgFlags = 480 MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE; 481 } 482 483 cm->cm_targ = targ; 484 cm->cm_data = NULL; 485 cm->cm_complete = mprsas_remove_volume; 486 cm->cm_complete_data = (void *)(uintptr_t)handle; 487 488 mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n", 489 __func__, targ->tid); 490 mprsas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD); 491 492 mpr_map_command(sc, cm); 493 } 494 495 /* 496 * The firmware performs debounce on the link to avoid transient link errors 497 * and false removals. When it does decide that link has been lost and a 498 * device needs to go away, it expects that the host will perform a target reset 499 * and then an op remove. The reset has the side-effect of aborting any 500 * outstanding requests for the device, which is required for the op-remove to 501 * succeed. It's not clear if the host should check for the device coming back 502 * alive after the reset. 503 */ 504 void 505 mprsas_prepare_remove(struct mprsas_softc *sassc, uint16_t handle) 506 { 507 MPI2_SCSI_TASK_MANAGE_REQUEST *req; 508 struct mpr_softc *sc; 509 struct mpr_command *tm; 510 struct mprsas_target *targ = NULL; 511 512 MPR_FUNCTRACE(sassc->sc); 513 514 sc = sassc->sc; 515 516 targ = mprsas_find_target_by_handle(sassc, 0, handle); 517 if (targ == NULL) { 518 /* FIXME: what is the action? */ 519 /* We don't know about this device? */ 520 mpr_dprint(sc, MPR_ERROR, "%s : invalid handle 0x%x \n", 521 __func__, handle); 522 return; 523 } 524 525 targ->flags |= MPRSAS_TARGET_INREMOVAL; 526 527 tm = mprsas_alloc_tm(sc); 528 if (tm == NULL) { 529 mpr_dprint(sc, MPR_ERROR, "%s: command alloc failure\n", 530 __func__); 531 return; 532 } 533 534 mprsas_rescan_target(sc, targ); 535 536 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req; 537 memset(req, 0, sizeof(*req)); 538 req->DevHandle = htole16(targ->handle); 539 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET; 540 541 /* SAS Hard Link Reset / SATA Link Reset */ 542 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET; 543 544 tm->cm_targ = targ; 545 tm->cm_data = NULL; 546 tm->cm_complete = mprsas_remove_device; 547 tm->cm_complete_data = (void *)(uintptr_t)handle; 548 549 mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n", 550 __func__, targ->tid); 551 mprsas_prepare_for_tm(sc, tm, targ, CAM_LUN_WILDCARD); 552 553 mpr_map_command(sc, tm); 554 } 555 556 static void 557 mprsas_remove_device(struct mpr_softc *sc, struct mpr_command *tm) 558 { 559 MPI2_SCSI_TASK_MANAGE_REPLY *reply; 560 MPI2_SAS_IOUNIT_CONTROL_REQUEST *req; 561 struct mprsas_target *targ; 562 struct mpr_command *next_cm; 563 uint16_t handle; 564 565 MPR_FUNCTRACE(sc); 566 567 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply; 568 handle = (uint16_t)(uintptr_t)tm->cm_complete_data; 569 targ = tm->cm_targ; 570 571 /* 572 * Currently there should be no way we can hit this case. It only 573 * happens when we have a failure to allocate chain frames, and 574 * task management commands don't have S/G lists. 575 */ 576 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) { 577 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for remove of " 578 "handle %#04x! This should not happen!\n", __func__, 579 tm->cm_flags, handle); 580 } 581 582 if (reply == NULL) { 583 /* XXX retry the remove after the diag reset completes? */ 584 mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device " 585 "0x%04x\n", __func__, handle); 586 mprsas_free_tm(sc, tm); 587 return; 588 } 589 590 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) != 591 MPI2_IOCSTATUS_SUCCESS) { 592 mpr_dprint(sc, MPR_ERROR, "IOCStatus = 0x%x while resetting " 593 "device 0x%x\n", le16toh(reply->IOCStatus), handle); 594 } 595 596 mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n", 597 le32toh(reply->TerminationCount)); 598 mpr_free_reply(sc, tm->cm_reply_data); 599 tm->cm_reply = NULL; /* Ensures the reply won't get re-freed */ 600 601 /* Reuse the existing command */ 602 req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req; 603 memset(req, 0, sizeof(*req)); 604 req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL; 605 req->Operation = MPI2_SAS_OP_REMOVE_DEVICE; 606 req->DevHandle = htole16(handle); 607 tm->cm_data = NULL; 608 tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; 609 tm->cm_complete = mprsas_remove_complete; 610 tm->cm_complete_data = (void *)(uintptr_t)handle; 611 612 mpr_map_command(sc, tm); 613 614 mpr_dprint(sc, MPR_INFO, "clearing target %u handle 0x%04x\n", 615 targ->tid, handle); 616 if (targ->encl_level_valid) { 617 mpr_dprint(sc, MPR_INFO, "At enclosure level %d, slot %d, " 618 "connector name (%4s)\n", targ->encl_level, targ->encl_slot, 619 targ->connector_name); 620 } 621 TAILQ_FOREACH_SAFE(tm, &targ->commands, cm_link, next_cm) { 622 union ccb *ccb; 623 624 mpr_dprint(sc, MPR_XINFO, "Completing missed command %p\n", tm); 625 ccb = tm->cm_complete_data; 626 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); 627 mprsas_scsiio_complete(sc, tm); 628 } 629 } 630 631 static void 632 mprsas_remove_complete(struct mpr_softc *sc, struct mpr_command *tm) 633 { 634 MPI2_SAS_IOUNIT_CONTROL_REPLY *reply; 635 uint16_t handle; 636 struct mprsas_target *targ; 637 struct mprsas_lun *lun; 638 639 MPR_FUNCTRACE(sc); 640 641 reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply; 642 handle = (uint16_t)(uintptr_t)tm->cm_complete_data; 643 644 /* 645 * Currently there should be no way we can hit this case. It only 646 * happens when we have a failure to allocate chain frames, and 647 * task management commands don't have S/G lists. 648 */ 649 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) { 650 mpr_dprint(sc, MPR_XINFO, "%s: cm_flags = %#x for remove of " 651 "handle %#04x! This should not happen!\n", __func__, 652 tm->cm_flags, handle); 653 mprsas_free_tm(sc, tm); 654 return; 655 } 656 657 if (reply == NULL) { 658 /* most likely a chip reset */ 659 mpr_dprint(sc, MPR_FAULT, "%s NULL reply removing device " 660 "0x%04x\n", __func__, handle); 661 mprsas_free_tm(sc, tm); 662 return; 663 } 664 665 mpr_dprint(sc, MPR_XINFO, "%s on handle 0x%04x, IOCStatus= 0x%x\n", 666 __func__, handle, le16toh(reply->IOCStatus)); 667 668 /* 669 * Don't clear target if remove fails because things will get confusing. 670 * Leave the devname and sasaddr intact so that we know to avoid reusing 671 * this target id if possible, and so we can assign the same target id 672 * to this device if it comes back in the future. 673 */ 674 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) == 675 MPI2_IOCSTATUS_SUCCESS) { 676 targ = tm->cm_targ; 677 targ->handle = 0x0; 678 targ->encl_handle = 0x0; 679 targ->encl_level_valid = 0x0; 680 targ->encl_level = 0x0; 681 targ->connector_name[0] = ' '; 682 targ->connector_name[1] = ' '; 683 targ->connector_name[2] = ' '; 684 targ->connector_name[3] = ' '; 685 targ->encl_slot = 0x0; 686 targ->exp_dev_handle = 0x0; 687 targ->phy_num = 0x0; 688 targ->linkrate = 0x0; 689 targ->devinfo = 0x0; 690 targ->flags = 0x0; 691 targ->scsi_req_desc_type = 0; 692 693 while (!SLIST_EMPTY(&targ->luns)) { 694 lun = SLIST_FIRST(&targ->luns); 695 SLIST_REMOVE_HEAD(&targ->luns, lun_link); 696 free(lun, M_MPR); 697 } 698 } 699 700 mprsas_free_tm(sc, tm); 701 } 702 703 static int 704 mprsas_register_events(struct mpr_softc *sc) 705 { 706 uint8_t events[16]; 707 708 bzero(events, 16); 709 setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE); 710 setbit(events, MPI2_EVENT_SAS_DISCOVERY); 711 setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE); 712 setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE); 713 setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW); 714 setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST); 715 setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE); 716 setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST); 717 setbit(events, MPI2_EVENT_IR_VOLUME); 718 setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK); 719 setbit(events, MPI2_EVENT_IR_OPERATION_STATUS); 720 setbit(events, MPI2_EVENT_TEMP_THRESHOLD); 721 setbit(events, MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR); 722 if (sc->facts->MsgVersion >= MPI2_VERSION_02_06) { 723 setbit(events, MPI2_EVENT_ACTIVE_CABLE_EXCEPTION); 724 if (sc->mpr_flags & MPR_FLAGS_GEN35_IOC) { 725 setbit(events, MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE); 726 setbit(events, MPI2_EVENT_PCIE_ENUMERATION); 727 setbit(events, MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST); 728 } 729 } 730 731 mpr_register_events(sc, events, mprsas_evt_handler, NULL, 732 &sc->sassc->mprsas_eh); 733 734 return (0); 735 } 736 737 int 738 mpr_attach_sas(struct mpr_softc *sc) 739 { 740 struct mprsas_softc *sassc; 741 cam_status status; 742 int unit, error = 0, reqs; 743 744 MPR_FUNCTRACE(sc); 745 mpr_dprint(sc, MPR_INIT, "%s entered\n", __func__); 746 747 sassc = malloc(sizeof(struct mprsas_softc), M_MPR, M_WAITOK|M_ZERO); 748 if (!sassc) { 749 mpr_dprint(sc, MPR_INIT|MPR_ERROR, 750 "Cannot allocate SAS subsystem memory\n"); 751 return (ENOMEM); 752 } 753 754 /* 755 * XXX MaxTargets could change during a reinit. Since we don't 756 * resize the targets[] array during such an event, cache the value 757 * of MaxTargets here so that we don't get into trouble later. This 758 * should move into the reinit logic. 759 */ 760 sassc->maxtargets = sc->facts->MaxTargets + sc->facts->MaxVolumes; 761 sassc->targets = malloc(sizeof(struct mprsas_target) * 762 sassc->maxtargets, M_MPR, M_WAITOK|M_ZERO); 763 if (!sassc->targets) { 764 mpr_dprint(sc, MPR_INIT|MPR_ERROR, 765 "Cannot allocate SAS target memory\n"); 766 free(sassc, M_MPR); 767 return (ENOMEM); 768 } 769 sc->sassc = sassc; 770 sassc->sc = sc; 771 772 reqs = sc->num_reqs - sc->num_prireqs - 1; 773 if ((sassc->devq = cam_simq_alloc(reqs)) == NULL) { 774 mpr_dprint(sc, MPR_INIT|MPR_ERROR, "Cannot allocate SIMQ\n"); 775 error = ENOMEM; 776 goto out; 777 } 778 779 unit = device_get_unit(sc->mpr_dev); 780 sassc->sim = cam_sim_alloc(mprsas_action, mprsas_poll, "mpr", sassc, 781 unit, &sc->mpr_mtx, reqs, reqs, sassc->devq); 782 if (sassc->sim == NULL) { 783 mpr_dprint(sc, MPR_INIT|MPR_ERROR, "Cannot allocate SIM\n"); 784 error = EINVAL; 785 goto out; 786 } 787 788 TAILQ_INIT(&sassc->ev_queue); 789 790 /* Initialize taskqueue for Event Handling */ 791 TASK_INIT(&sassc->ev_task, 0, mprsas_firmware_event_work, sc); 792 sassc->ev_tq = taskqueue_create("mpr_taskq", M_NOWAIT | M_ZERO, 793 taskqueue_thread_enqueue, &sassc->ev_tq); 794 taskqueue_start_threads(&sassc->ev_tq, 1, PRIBIO, "%s taskq", 795 device_get_nameunit(sc->mpr_dev)); 796 797 mpr_lock(sc); 798 799 /* 800 * XXX There should be a bus for every port on the adapter, but since 801 * we're just going to fake the topology for now, we'll pretend that 802 * everything is just a target on a single bus. 803 */ 804 if ((error = xpt_bus_register(sassc->sim, sc->mpr_dev, 0)) != 0) { 805 mpr_dprint(sc, MPR_INIT|MPR_ERROR, 806 "Error %d registering SCSI bus\n", error); 807 mpr_unlock(sc); 808 goto out; 809 } 810 811 /* 812 * Assume that discovery events will start right away. 813 * 814 * Hold off boot until discovery is complete. 815 */ 816 sassc->flags |= MPRSAS_IN_STARTUP | MPRSAS_IN_DISCOVERY; 817 sc->sassc->startup_refcount = 0; 818 mprsas_startup_increment(sassc); 819 820 callout_init(&sassc->discovery_callout, 1 /*mpsafe*/); 821 822 /* 823 * Register for async events so we can determine the EEDP 824 * capabilities of devices. 825 */ 826 status = xpt_create_path(&sassc->path, /*periph*/NULL, 827 cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD, 828 CAM_LUN_WILDCARD); 829 if (status != CAM_REQ_CMP) { 830 mpr_dprint(sc, MPR_INIT|MPR_ERROR, 831 "Error %#x creating sim path\n", status); 832 sassc->path = NULL; 833 } else { 834 int event; 835 836 #if (__FreeBSD_version >= 1000006) || \ 837 ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000)) 838 event = AC_ADVINFO_CHANGED | AC_FOUND_DEVICE; 839 #else 840 event = AC_FOUND_DEVICE; 841 #endif 842 843 /* 844 * Prior to the CAM locking improvements, we can't call 845 * xpt_register_async() with a particular path specified. 846 * 847 * If a path isn't specified, xpt_register_async() will 848 * generate a wildcard path and acquire the XPT lock while 849 * it calls xpt_action() to execute the XPT_SASYNC_CB CCB. 850 * It will then drop the XPT lock once that is done. 851 * 852 * If a path is specified for xpt_register_async(), it will 853 * not acquire and drop the XPT lock around the call to 854 * xpt_action(). xpt_action() asserts that the caller 855 * holds the SIM lock, so the SIM lock has to be held when 856 * calling xpt_register_async() when the path is specified. 857 * 858 * But xpt_register_async calls xpt_for_all_devices(), 859 * which calls xptbustraverse(), which will acquire each 860 * SIM lock. When it traverses our particular bus, it will 861 * necessarily acquire the SIM lock, which will lead to a 862 * recursive lock acquisition. 863 * 864 * The CAM locking changes fix this problem by acquiring 865 * the XPT topology lock around bus traversal in 866 * xptbustraverse(), so the caller can hold the SIM lock 867 * and it does not cause a recursive lock acquisition. 868 * 869 * These __FreeBSD_version values are approximate, especially 870 * for stable/10, which is two months later than the actual 871 * change. 872 */ 873 874 #if (__FreeBSD_version < 1000703) || \ 875 ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002)) 876 mpr_unlock(sc); 877 status = xpt_register_async(event, mprsas_async, sc, 878 NULL); 879 mpr_lock(sc); 880 #else 881 status = xpt_register_async(event, mprsas_async, sc, 882 sassc->path); 883 #endif 884 885 if (status != CAM_REQ_CMP) { 886 mpr_dprint(sc, MPR_ERROR, 887 "Error %#x registering async handler for " 888 "AC_ADVINFO_CHANGED events\n", status); 889 xpt_free_path(sassc->path); 890 sassc->path = NULL; 891 } 892 } 893 if (status != CAM_REQ_CMP) { 894 /* 895 * EEDP use is the exception, not the rule. 896 * Warn the user, but do not fail to attach. 897 */ 898 mpr_printf(sc, "EEDP capabilities disabled.\n"); 899 } 900 901 mpr_unlock(sc); 902 903 mprsas_register_events(sc); 904 out: 905 if (error) 906 mpr_detach_sas(sc); 907 908 mpr_dprint(sc, MPR_INIT, "%s exit, error= %d\n", __func__, error); 909 return (error); 910 } 911 912 int 913 mpr_detach_sas(struct mpr_softc *sc) 914 { 915 struct mprsas_softc *sassc; 916 struct mprsas_lun *lun, *lun_tmp; 917 struct mprsas_target *targ; 918 int i; 919 920 MPR_FUNCTRACE(sc); 921 922 if (sc->sassc == NULL) 923 return (0); 924 925 sassc = sc->sassc; 926 mpr_deregister_events(sc, sassc->mprsas_eh); 927 928 /* 929 * Drain and free the event handling taskqueue with the lock 930 * unheld so that any parallel processing tasks drain properly 931 * without deadlocking. 932 */ 933 if (sassc->ev_tq != NULL) 934 taskqueue_free(sassc->ev_tq); 935 936 /* Make sure CAM doesn't wedge if we had to bail out early. */ 937 mpr_lock(sc); 938 939 while (sassc->startup_refcount != 0) 940 mprsas_startup_decrement(sassc); 941 942 /* Deregister our async handler */ 943 if (sassc->path != NULL) { 944 xpt_register_async(0, mprsas_async, sc, sassc->path); 945 xpt_free_path(sassc->path); 946 sassc->path = NULL; 947 } 948 949 if (sassc->flags & MPRSAS_IN_STARTUP) 950 xpt_release_simq(sassc->sim, 1); 951 952 if (sassc->sim != NULL) { 953 xpt_bus_deregister(cam_sim_path(sassc->sim)); 954 cam_sim_free(sassc->sim, FALSE); 955 } 956 957 mpr_unlock(sc); 958 959 if (sassc->devq != NULL) 960 cam_simq_free(sassc->devq); 961 962 for (i = 0; i < sassc->maxtargets; i++) { 963 targ = &sassc->targets[i]; 964 SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) { 965 free(lun, M_MPR); 966 } 967 } 968 free(sassc->targets, M_MPR); 969 free(sassc, M_MPR); 970 sc->sassc = NULL; 971 972 return (0); 973 } 974 975 void 976 mprsas_discovery_end(struct mprsas_softc *sassc) 977 { 978 struct mpr_softc *sc = sassc->sc; 979 980 MPR_FUNCTRACE(sc); 981 982 if (sassc->flags & MPRSAS_DISCOVERY_TIMEOUT_PENDING) 983 callout_stop(&sassc->discovery_callout); 984 985 /* 986 * After discovery has completed, check the mapping table for any 987 * missing devices and update their missing counts. Only do this once 988 * whenever the driver is initialized so that missing counts aren't 989 * updated unnecessarily. Note that just because discovery has 990 * completed doesn't mean that events have been processed yet. The 991 * check_devices function is a callout timer that checks if ALL devices 992 * are missing. If so, it will wait a little longer for events to 993 * complete and keep resetting itself until some device in the mapping 994 * table is not missing, meaning that event processing has started. 995 */ 996 if (sc->track_mapping_events) { 997 mpr_dprint(sc, MPR_XINFO | MPR_MAPPING, "Discovery has " 998 "completed. Check for missing devices in the mapping " 999 "table.\n"); 1000 callout_reset(&sc->device_check_callout, 1001 MPR_MISSING_CHECK_DELAY * hz, mpr_mapping_check_devices, 1002 sc); 1003 } 1004 } 1005 1006 static void 1007 mprsas_action(struct cam_sim *sim, union ccb *ccb) 1008 { 1009 struct mprsas_softc *sassc; 1010 1011 sassc = cam_sim_softc(sim); 1012 1013 MPR_FUNCTRACE(sassc->sc); 1014 mpr_dprint(sassc->sc, MPR_TRACE, "ccb func_code 0x%x\n", 1015 ccb->ccb_h.func_code); 1016 mtx_assert(&sassc->sc->mpr_mtx, MA_OWNED); 1017 1018 switch (ccb->ccb_h.func_code) { 1019 case XPT_PATH_INQ: 1020 { 1021 struct ccb_pathinq *cpi = &ccb->cpi; 1022 struct mpr_softc *sc = sassc->sc; 1023 1024 cpi->version_num = 1; 1025 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16; 1026 cpi->target_sprt = 0; 1027 #if (__FreeBSD_version >= 1000039) || \ 1028 ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502)) 1029 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN; 1030 #else 1031 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED; 1032 #endif 1033 cpi->hba_eng_cnt = 0; 1034 cpi->max_target = sassc->maxtargets - 1; 1035 cpi->max_lun = 255; 1036 1037 /* 1038 * initiator_id is set here to an ID outside the set of valid 1039 * target IDs (including volumes). 1040 */ 1041 cpi->initiator_id = sassc->maxtargets; 1042 strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 1043 strlcpy(cpi->hba_vid, "Avago Tech", HBA_IDLEN); 1044 strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 1045 cpi->unit_number = cam_sim_unit(sim); 1046 cpi->bus_id = cam_sim_bus(sim); 1047 /* 1048 * XXXSLM-I think this needs to change based on config page or 1049 * something instead of hardcoded to 150000. 1050 */ 1051 cpi->base_transfer_speed = 150000; 1052 cpi->transport = XPORT_SAS; 1053 cpi->transport_version = 0; 1054 cpi->protocol = PROTO_SCSI; 1055 cpi->protocol_version = SCSI_REV_SPC; 1056 cpi->maxio = sc->maxio; 1057 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP); 1058 break; 1059 } 1060 case XPT_GET_TRAN_SETTINGS: 1061 { 1062 struct ccb_trans_settings *cts; 1063 struct ccb_trans_settings_sas *sas; 1064 struct ccb_trans_settings_scsi *scsi; 1065 struct mprsas_target *targ; 1066 1067 cts = &ccb->cts; 1068 sas = &cts->xport_specific.sas; 1069 scsi = &cts->proto_specific.scsi; 1070 1071 KASSERT(cts->ccb_h.target_id < sassc->maxtargets, 1072 ("Target %d out of bounds in XPT_GET_TRAN_SETTINGS\n", 1073 cts->ccb_h.target_id)); 1074 targ = &sassc->targets[cts->ccb_h.target_id]; 1075 if (targ->handle == 0x0) { 1076 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); 1077 break; 1078 } 1079 1080 cts->protocol_version = SCSI_REV_SPC2; 1081 cts->transport = XPORT_SAS; 1082 cts->transport_version = 0; 1083 1084 sas->valid = CTS_SAS_VALID_SPEED; 1085 switch (targ->linkrate) { 1086 case 0x08: 1087 sas->bitrate = 150000; 1088 break; 1089 case 0x09: 1090 sas->bitrate = 300000; 1091 break; 1092 case 0x0a: 1093 sas->bitrate = 600000; 1094 break; 1095 case 0x0b: 1096 sas->bitrate = 1200000; 1097 break; 1098 default: 1099 sas->valid = 0; 1100 } 1101 1102 cts->protocol = PROTO_SCSI; 1103 scsi->valid = CTS_SCSI_VALID_TQ; 1104 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; 1105 1106 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP); 1107 break; 1108 } 1109 case XPT_CALC_GEOMETRY: 1110 cam_calc_geometry(&ccb->ccg, /*extended*/1); 1111 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP); 1112 break; 1113 case XPT_RESET_DEV: 1114 mpr_dprint(sassc->sc, MPR_XINFO, "mprsas_action " 1115 "XPT_RESET_DEV\n"); 1116 mprsas_action_resetdev(sassc, ccb); 1117 return; 1118 case XPT_RESET_BUS: 1119 case XPT_ABORT: 1120 case XPT_TERM_IO: 1121 mpr_dprint(sassc->sc, MPR_XINFO, "mprsas_action faking success " 1122 "for abort or reset\n"); 1123 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP); 1124 break; 1125 case XPT_SCSI_IO: 1126 mprsas_action_scsiio(sassc, ccb); 1127 return; 1128 #if __FreeBSD_version >= 900026 1129 case XPT_SMP_IO: 1130 mprsas_action_smpio(sassc, ccb); 1131 return; 1132 #endif 1133 default: 1134 mprsas_set_ccbstatus(ccb, CAM_FUNC_NOTAVAIL); 1135 break; 1136 } 1137 xpt_done(ccb); 1138 1139 } 1140 1141 static void 1142 mprsas_announce_reset(struct mpr_softc *sc, uint32_t ac_code, 1143 target_id_t target_id, lun_id_t lun_id) 1144 { 1145 path_id_t path_id = cam_sim_path(sc->sassc->sim); 1146 struct cam_path *path; 1147 1148 mpr_dprint(sc, MPR_XINFO, "%s code %x target %d lun %jx\n", __func__, 1149 ac_code, target_id, (uintmax_t)lun_id); 1150 1151 if (xpt_create_path(&path, NULL, 1152 path_id, target_id, lun_id) != CAM_REQ_CMP) { 1153 mpr_dprint(sc, MPR_ERROR, "unable to create path for reset " 1154 "notification\n"); 1155 return; 1156 } 1157 1158 xpt_async(ac_code, path, NULL); 1159 xpt_free_path(path); 1160 } 1161 1162 static void 1163 mprsas_complete_all_commands(struct mpr_softc *sc) 1164 { 1165 struct mpr_command *cm; 1166 int i; 1167 int completed; 1168 1169 MPR_FUNCTRACE(sc); 1170 mtx_assert(&sc->mpr_mtx, MA_OWNED); 1171 1172 /* complete all commands with a NULL reply */ 1173 for (i = 1; i < sc->num_reqs; i++) { 1174 cm = &sc->commands[i]; 1175 if (cm->cm_state == MPR_CM_STATE_FREE) 1176 continue; 1177 1178 cm->cm_state = MPR_CM_STATE_BUSY; 1179 cm->cm_reply = NULL; 1180 completed = 0; 1181 1182 if (cm->cm_flags & MPR_CM_FLAGS_SATA_ID_TIMEOUT) { 1183 MPASS(cm->cm_data); 1184 free(cm->cm_data, M_MPR); 1185 cm->cm_data = NULL; 1186 } 1187 1188 if (cm->cm_flags & MPR_CM_FLAGS_POLLED) 1189 cm->cm_flags |= MPR_CM_FLAGS_COMPLETE; 1190 1191 if (cm->cm_complete != NULL) { 1192 mprsas_log_command(cm, MPR_RECOVERY, 1193 "completing cm %p state %x ccb %p for diag reset\n", 1194 cm, cm->cm_state, cm->cm_ccb); 1195 cm->cm_complete(sc, cm); 1196 completed = 1; 1197 } else if (cm->cm_flags & MPR_CM_FLAGS_WAKEUP) { 1198 mprsas_log_command(cm, MPR_RECOVERY, 1199 "waking up cm %p state %x ccb %p for diag reset\n", 1200 cm, cm->cm_state, cm->cm_ccb); 1201 wakeup(cm); 1202 completed = 1; 1203 } 1204 1205 if ((completed == 0) && (cm->cm_state != MPR_CM_STATE_FREE)) { 1206 /* this should never happen, but if it does, log */ 1207 mprsas_log_command(cm, MPR_RECOVERY, 1208 "cm %p state %x flags 0x%x ccb %p during diag " 1209 "reset\n", cm, cm->cm_state, cm->cm_flags, 1210 cm->cm_ccb); 1211 } 1212 } 1213 1214 sc->io_cmds_active = 0; 1215 } 1216 1217 void 1218 mprsas_handle_reinit(struct mpr_softc *sc) 1219 { 1220 int i; 1221 1222 /* Go back into startup mode and freeze the simq, so that CAM 1223 * doesn't send any commands until after we've rediscovered all 1224 * targets and found the proper device handles for them. 1225 * 1226 * After the reset, portenable will trigger discovery, and after all 1227 * discovery-related activities have finished, the simq will be 1228 * released. 1229 */ 1230 mpr_dprint(sc, MPR_INIT, "%s startup\n", __func__); 1231 sc->sassc->flags |= MPRSAS_IN_STARTUP; 1232 sc->sassc->flags |= MPRSAS_IN_DISCOVERY; 1233 mprsas_startup_increment(sc->sassc); 1234 1235 /* notify CAM of a bus reset */ 1236 mprsas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD, 1237 CAM_LUN_WILDCARD); 1238 1239 /* complete and cleanup after all outstanding commands */ 1240 mprsas_complete_all_commands(sc); 1241 1242 mpr_dprint(sc, MPR_INIT, "%s startup %u after command completion\n", 1243 __func__, sc->sassc->startup_refcount); 1244 1245 /* zero all the target handles, since they may change after the 1246 * reset, and we have to rediscover all the targets and use the new 1247 * handles. 1248 */ 1249 for (i = 0; i < sc->sassc->maxtargets; i++) { 1250 if (sc->sassc->targets[i].outstanding != 0) 1251 mpr_dprint(sc, MPR_INIT, "target %u outstanding %u\n", 1252 i, sc->sassc->targets[i].outstanding); 1253 sc->sassc->targets[i].handle = 0x0; 1254 sc->sassc->targets[i].exp_dev_handle = 0x0; 1255 sc->sassc->targets[i].outstanding = 0; 1256 sc->sassc->targets[i].flags = MPRSAS_TARGET_INDIAGRESET; 1257 } 1258 } 1259 static void 1260 mprsas_tm_timeout(void *data) 1261 { 1262 struct mpr_command *tm = data; 1263 struct mpr_softc *sc = tm->cm_sc; 1264 1265 mtx_assert(&sc->mpr_mtx, MA_OWNED); 1266 1267 mprsas_log_command(tm, MPR_INFO|MPR_RECOVERY, "task mgmt %p timed " 1268 "out\n", tm); 1269 1270 KASSERT(tm->cm_state == MPR_CM_STATE_INQUEUE, 1271 ("command not inqueue\n")); 1272 1273 tm->cm_state = MPR_CM_STATE_BUSY; 1274 mpr_reinit(sc); 1275 } 1276 1277 static void 1278 mprsas_logical_unit_reset_complete(struct mpr_softc *sc, struct mpr_command *tm) 1279 { 1280 MPI2_SCSI_TASK_MANAGE_REPLY *reply; 1281 MPI2_SCSI_TASK_MANAGE_REQUEST *req; 1282 unsigned int cm_count = 0; 1283 struct mpr_command *cm; 1284 struct mprsas_target *targ; 1285 1286 callout_stop(&tm->cm_callout); 1287 1288 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req; 1289 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply; 1290 targ = tm->cm_targ; 1291 1292 /* 1293 * Currently there should be no way we can hit this case. It only 1294 * happens when we have a failure to allocate chain frames, and 1295 * task management commands don't have S/G lists. 1296 */ 1297 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) { 1298 mpr_dprint(sc, MPR_RECOVERY|MPR_ERROR, 1299 "%s: cm_flags = %#x for LUN reset! " 1300 "This should not happen!\n", __func__, tm->cm_flags); 1301 mprsas_free_tm(sc, tm); 1302 return; 1303 } 1304 1305 if (reply == NULL) { 1306 mpr_dprint(sc, MPR_RECOVERY, "NULL reset reply for tm %p\n", 1307 tm); 1308 if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) { 1309 /* this completion was due to a reset, just cleanup */ 1310 mpr_dprint(sc, MPR_RECOVERY, "Hardware undergoing " 1311 "reset, ignoring NULL LUN reset reply\n"); 1312 targ->tm = NULL; 1313 mprsas_free_tm(sc, tm); 1314 } 1315 else { 1316 /* we should have gotten a reply. */ 1317 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, "NULL reply on " 1318 "LUN reset attempt, resetting controller\n"); 1319 mpr_reinit(sc); 1320 } 1321 return; 1322 } 1323 1324 mpr_dprint(sc, MPR_RECOVERY, 1325 "logical unit reset status 0x%x code 0x%x count %u\n", 1326 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode), 1327 le32toh(reply->TerminationCount)); 1328 1329 /* 1330 * See if there are any outstanding commands for this LUN. 1331 * This could be made more efficient by using a per-LU data 1332 * structure of some sort. 1333 */ 1334 TAILQ_FOREACH(cm, &targ->commands, cm_link) { 1335 if (cm->cm_lun == tm->cm_lun) 1336 cm_count++; 1337 } 1338 1339 if (cm_count == 0) { 1340 mpr_dprint(sc, MPR_RECOVERY|MPR_INFO, 1341 "Finished recovery after LUN reset for target %u\n", 1342 targ->tid); 1343 1344 mprsas_announce_reset(sc, AC_SENT_BDR, targ->tid, 1345 tm->cm_lun); 1346 1347 /* 1348 * We've finished recovery for this logical unit. check and 1349 * see if some other logical unit has a timedout command 1350 * that needs to be processed. 1351 */ 1352 cm = TAILQ_FIRST(&targ->timedout_commands); 1353 if (cm) { 1354 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, 1355 "More commands to abort for target %u\n", targ->tid); 1356 mprsas_send_abort(sc, tm, cm); 1357 } else { 1358 targ->tm = NULL; 1359 mprsas_free_tm(sc, tm); 1360 } 1361 } else { 1362 /* if we still have commands for this LUN, the reset 1363 * effectively failed, regardless of the status reported. 1364 * Escalate to a target reset. 1365 */ 1366 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, 1367 "logical unit reset complete for target %u, but still " 1368 "have %u command(s), sending target reset\n", targ->tid, 1369 cm_count); 1370 if (!targ->is_nvme || sc->custom_nvme_tm_handling) 1371 mprsas_send_reset(sc, tm, 1372 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET); 1373 else 1374 mpr_reinit(sc); 1375 } 1376 } 1377 1378 static void 1379 mprsas_target_reset_complete(struct mpr_softc *sc, struct mpr_command *tm) 1380 { 1381 MPI2_SCSI_TASK_MANAGE_REPLY *reply; 1382 MPI2_SCSI_TASK_MANAGE_REQUEST *req; 1383 struct mprsas_target *targ; 1384 1385 callout_stop(&tm->cm_callout); 1386 1387 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req; 1388 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply; 1389 targ = tm->cm_targ; 1390 1391 /* 1392 * Currently there should be no way we can hit this case. It only 1393 * happens when we have a failure to allocate chain frames, and 1394 * task management commands don't have S/G lists. 1395 */ 1396 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) { 1397 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for target " 1398 "reset! This should not happen!\n", __func__, tm->cm_flags); 1399 mprsas_free_tm(sc, tm); 1400 return; 1401 } 1402 1403 if (reply == NULL) { 1404 mpr_dprint(sc, MPR_RECOVERY, 1405 "NULL target reset reply for tm %p TaskMID %u\n", 1406 tm, le16toh(req->TaskMID)); 1407 if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) { 1408 /* this completion was due to a reset, just cleanup */ 1409 mpr_dprint(sc, MPR_RECOVERY, "Hardware undergoing " 1410 "reset, ignoring NULL target reset reply\n"); 1411 targ->tm = NULL; 1412 mprsas_free_tm(sc, tm); 1413 } 1414 else { 1415 /* we should have gotten a reply. */ 1416 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, "NULL reply on " 1417 "target reset attempt, resetting controller\n"); 1418 mpr_reinit(sc); 1419 } 1420 return; 1421 } 1422 1423 mpr_dprint(sc, MPR_RECOVERY, 1424 "target reset status 0x%x code 0x%x count %u\n", 1425 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode), 1426 le32toh(reply->TerminationCount)); 1427 1428 if (targ->outstanding == 0) { 1429 /* 1430 * We've finished recovery for this target and all 1431 * of its logical units. 1432 */ 1433 mpr_dprint(sc, MPR_RECOVERY|MPR_INFO, 1434 "Finished reset recovery for target %u\n", targ->tid); 1435 1436 mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid, 1437 CAM_LUN_WILDCARD); 1438 1439 targ->tm = NULL; 1440 mprsas_free_tm(sc, tm); 1441 } else { 1442 /* 1443 * After a target reset, if this target still has 1444 * outstanding commands, the reset effectively failed, 1445 * regardless of the status reported. escalate. 1446 */ 1447 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, 1448 "Target reset complete for target %u, but still have %u " 1449 "command(s), resetting controller\n", targ->tid, 1450 targ->outstanding); 1451 mpr_reinit(sc); 1452 } 1453 } 1454 1455 #define MPR_RESET_TIMEOUT 30 1456 1457 int 1458 mprsas_send_reset(struct mpr_softc *sc, struct mpr_command *tm, uint8_t type) 1459 { 1460 MPI2_SCSI_TASK_MANAGE_REQUEST *req; 1461 struct mprsas_target *target; 1462 int err, timeout; 1463 1464 target = tm->cm_targ; 1465 if (target->handle == 0) { 1466 mpr_dprint(sc, MPR_ERROR, "%s null devhandle for target_id " 1467 "%d\n", __func__, target->tid); 1468 return -1; 1469 } 1470 1471 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req; 1472 req->DevHandle = htole16(target->handle); 1473 req->TaskType = type; 1474 1475 if (!target->is_nvme || sc->custom_nvme_tm_handling) { 1476 timeout = MPR_RESET_TIMEOUT; 1477 /* 1478 * Target reset method = 1479 * SAS Hard Link Reset / SATA Link Reset 1480 */ 1481 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET; 1482 } else { 1483 timeout = (target->controller_reset_timeout) ? ( 1484 target->controller_reset_timeout) : (MPR_RESET_TIMEOUT); 1485 /* PCIe Protocol Level Reset*/ 1486 req->MsgFlags = 1487 MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE; 1488 } 1489 1490 if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) { 1491 /* XXX Need to handle invalid LUNs */ 1492 MPR_SET_LUN(req->LUN, tm->cm_lun); 1493 tm->cm_targ->logical_unit_resets++; 1494 mpr_dprint(sc, MPR_RECOVERY|MPR_INFO, 1495 "Sending logical unit reset to target %u lun %d\n", 1496 target->tid, tm->cm_lun); 1497 tm->cm_complete = mprsas_logical_unit_reset_complete; 1498 mprsas_prepare_for_tm(sc, tm, target, tm->cm_lun); 1499 } else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) { 1500 tm->cm_targ->target_resets++; 1501 mpr_dprint(sc, MPR_RECOVERY|MPR_INFO, 1502 "Sending target reset to target %u\n", target->tid); 1503 tm->cm_complete = mprsas_target_reset_complete; 1504 mprsas_prepare_for_tm(sc, tm, target, CAM_LUN_WILDCARD); 1505 } 1506 else { 1507 mpr_dprint(sc, MPR_ERROR, "unexpected reset type 0x%x\n", type); 1508 return -1; 1509 } 1510 1511 if (target->encl_level_valid) { 1512 mpr_dprint(sc, MPR_RECOVERY|MPR_INFO, 1513 "At enclosure level %d, slot %d, connector name (%4s)\n", 1514 target->encl_level, target->encl_slot, 1515 target->connector_name); 1516 } 1517 1518 tm->cm_data = NULL; 1519 tm->cm_complete_data = (void *)tm; 1520 1521 callout_reset(&tm->cm_callout, timeout * hz, 1522 mprsas_tm_timeout, tm); 1523 1524 err = mpr_map_command(sc, tm); 1525 if (err) 1526 mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY, 1527 "error %d sending reset type %u\n", err, type); 1528 1529 return err; 1530 } 1531 1532 1533 static void 1534 mprsas_abort_complete(struct mpr_softc *sc, struct mpr_command *tm) 1535 { 1536 struct mpr_command *cm; 1537 MPI2_SCSI_TASK_MANAGE_REPLY *reply; 1538 MPI2_SCSI_TASK_MANAGE_REQUEST *req; 1539 struct mprsas_target *targ; 1540 1541 callout_stop(&tm->cm_callout); 1542 1543 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req; 1544 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply; 1545 targ = tm->cm_targ; 1546 1547 /* 1548 * Currently there should be no way we can hit this case. It only 1549 * happens when we have a failure to allocate chain frames, and 1550 * task management commands don't have S/G lists. 1551 */ 1552 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) { 1553 mpr_dprint(sc, MPR_RECOVERY|MPR_ERROR, 1554 "cm_flags = %#x for abort %p TaskMID %u!\n", 1555 tm->cm_flags, tm, le16toh(req->TaskMID)); 1556 mprsas_free_tm(sc, tm); 1557 return; 1558 } 1559 1560 if (reply == NULL) { 1561 mpr_dprint(sc, MPR_RECOVERY, 1562 "NULL abort reply for tm %p TaskMID %u\n", 1563 tm, le16toh(req->TaskMID)); 1564 if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) { 1565 /* this completion was due to a reset, just cleanup */ 1566 mpr_dprint(sc, MPR_RECOVERY, "Hardware undergoing " 1567 "reset, ignoring NULL abort reply\n"); 1568 targ->tm = NULL; 1569 mprsas_free_tm(sc, tm); 1570 } else { 1571 /* we should have gotten a reply. */ 1572 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, "NULL reply on " 1573 "abort attempt, resetting controller\n"); 1574 mpr_reinit(sc); 1575 } 1576 return; 1577 } 1578 1579 mpr_dprint(sc, MPR_RECOVERY, 1580 "abort TaskMID %u status 0x%x code 0x%x count %u\n", 1581 le16toh(req->TaskMID), 1582 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode), 1583 le32toh(reply->TerminationCount)); 1584 1585 cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands); 1586 if (cm == NULL) { 1587 /* 1588 * if there are no more timedout commands, we're done with 1589 * error recovery for this target. 1590 */ 1591 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, 1592 "Finished abort recovery for target %u\n", targ->tid); 1593 targ->tm = NULL; 1594 mprsas_free_tm(sc, tm); 1595 } else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) { 1596 /* abort success, but we have more timedout commands to abort */ 1597 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, 1598 "Continuing abort recovery for target %u\n", targ->tid); 1599 mprsas_send_abort(sc, tm, cm); 1600 } else { 1601 /* 1602 * we didn't get a command completion, so the abort 1603 * failed as far as we're concerned. escalate. 1604 */ 1605 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, 1606 "Abort failed for target %u, sending logical unit reset\n", 1607 targ->tid); 1608 1609 mprsas_send_reset(sc, tm, 1610 MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET); 1611 } 1612 } 1613 1614 #define MPR_ABORT_TIMEOUT 5 1615 1616 static int 1617 mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm, 1618 struct mpr_command *cm) 1619 { 1620 MPI2_SCSI_TASK_MANAGE_REQUEST *req; 1621 struct mprsas_target *targ; 1622 int err, timeout; 1623 1624 targ = cm->cm_targ; 1625 if (targ->handle == 0) { 1626 mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY, 1627 "%s null devhandle for target_id %d\n", 1628 __func__, cm->cm_ccb->ccb_h.target_id); 1629 return -1; 1630 } 1631 1632 mprsas_log_command(cm, MPR_RECOVERY|MPR_INFO, 1633 "Aborting command %p\n", cm); 1634 1635 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req; 1636 req->DevHandle = htole16(targ->handle); 1637 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK; 1638 1639 /* XXX Need to handle invalid LUNs */ 1640 MPR_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun); 1641 1642 req->TaskMID = htole16(cm->cm_desc.Default.SMID); 1643 1644 tm->cm_data = NULL; 1645 tm->cm_complete = mprsas_abort_complete; 1646 tm->cm_complete_data = (void *)tm; 1647 tm->cm_targ = cm->cm_targ; 1648 tm->cm_lun = cm->cm_lun; 1649 1650 if (!targ->is_nvme || sc->custom_nvme_tm_handling) 1651 timeout = MPR_ABORT_TIMEOUT; 1652 else 1653 timeout = sc->nvme_abort_timeout; 1654 1655 callout_reset(&tm->cm_callout, timeout * hz, 1656 mprsas_tm_timeout, tm); 1657 1658 targ->aborts++; 1659 1660 mprsas_prepare_for_tm(sc, tm, targ, tm->cm_lun); 1661 1662 err = mpr_map_command(sc, tm); 1663 if (err) 1664 mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY, 1665 "error %d sending abort for cm %p SMID %u\n", 1666 err, cm, req->TaskMID); 1667 return err; 1668 } 1669 1670 static void 1671 mprsas_scsiio_timeout(void *data) 1672 { 1673 sbintime_t elapsed, now; 1674 union ccb *ccb; 1675 struct mpr_softc *sc; 1676 struct mpr_command *cm; 1677 struct mprsas_target *targ; 1678 1679 cm = (struct mpr_command *)data; 1680 sc = cm->cm_sc; 1681 ccb = cm->cm_ccb; 1682 now = sbinuptime(); 1683 1684 MPR_FUNCTRACE(sc); 1685 mtx_assert(&sc->mpr_mtx, MA_OWNED); 1686 1687 mpr_dprint(sc, MPR_XINFO|MPR_RECOVERY, "Timeout checking cm %p\n", cm); 1688 1689 /* 1690 * Run the interrupt handler to make sure it's not pending. This 1691 * isn't perfect because the command could have already completed 1692 * and been re-used, though this is unlikely. 1693 */ 1694 mpr_intr_locked(sc); 1695 if (cm->cm_state != MPR_CM_STATE_INQUEUE) { 1696 mprsas_log_command(cm, MPR_XINFO, 1697 "SCSI command %p almost timed out\n", cm); 1698 return; 1699 } 1700 1701 if (cm->cm_ccb == NULL) { 1702 mpr_dprint(sc, MPR_ERROR, "command timeout with NULL ccb\n"); 1703 return; 1704 } 1705 1706 targ = cm->cm_targ; 1707 targ->timeouts++; 1708 1709 elapsed = now - ccb->ccb_h.qos.sim_data; 1710 mprsas_log_command(cm, MPR_INFO|MPR_RECOVERY, 1711 "Command timeout on target %u(0x%04x), %d set, %d.%d elapsed\n", 1712 targ->tid, targ->handle, ccb->ccb_h.timeout, 1713 sbintime_getsec(elapsed), elapsed & 0xffffffff); 1714 if (targ->encl_level_valid) { 1715 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, 1716 "At enclosure level %d, slot %d, connector name (%4s)\n", 1717 targ->encl_level, targ->encl_slot, targ->connector_name); 1718 } 1719 1720 /* XXX first, check the firmware state, to see if it's still 1721 * operational. if not, do a diag reset. 1722 */ 1723 mprsas_set_ccbstatus(cm->cm_ccb, CAM_CMD_TIMEOUT); 1724 cm->cm_state = MPR_CM_STATE_TIMEDOUT; 1725 TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery); 1726 1727 if (targ->tm != NULL) { 1728 /* target already in recovery, just queue up another 1729 * timedout command to be processed later. 1730 */ 1731 mpr_dprint(sc, MPR_RECOVERY, "queued timedout cm %p for " 1732 "processing by tm %p\n", cm, targ->tm); 1733 } 1734 else if ((targ->tm = mprsas_alloc_tm(sc)) != NULL) { 1735 1736 /* start recovery by aborting the first timedout command */ 1737 mpr_dprint(sc, MPR_RECOVERY|MPR_INFO, 1738 "Sending abort to target %u for SMID %d\n", targ->tid, 1739 cm->cm_desc.Default.SMID); 1740 mpr_dprint(sc, MPR_RECOVERY, "timedout cm %p allocated tm %p\n", 1741 cm, targ->tm); 1742 mprsas_send_abort(sc, targ->tm, cm); 1743 } 1744 else { 1745 /* XXX queue this target up for recovery once a TM becomes 1746 * available. The firmware only has a limited number of 1747 * HighPriority credits for the high priority requests used 1748 * for task management, and we ran out. 1749 * 1750 * Isilon: don't worry about this for now, since we have 1751 * more credits than disks in an enclosure, and limit 1752 * ourselves to one TM per target for recovery. 1753 */ 1754 mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY, 1755 "timedout cm %p failed to allocate a tm\n", cm); 1756 } 1757 } 1758 1759 /** 1760 * mprsas_build_nvme_unmap - Build Native NVMe DSM command equivalent 1761 * to SCSI Unmap. 1762 * Return 0 - for success, 1763 * 1 - to immediately return back the command with success status to CAM 1764 * negative value - to fallback to firmware path i.e. issue scsi unmap 1765 * to FW without any translation. 1766 */ 1767 static int 1768 mprsas_build_nvme_unmap(struct mpr_softc *sc, struct mpr_command *cm, 1769 union ccb *ccb, struct mprsas_target *targ) 1770 { 1771 Mpi26NVMeEncapsulatedRequest_t *req = NULL; 1772 struct ccb_scsiio *csio; 1773 struct unmap_parm_list *plist; 1774 struct nvme_dsm_range *nvme_dsm_ranges = NULL; 1775 struct nvme_command *c; 1776 int i, res; 1777 uint16_t ndesc, list_len, data_length; 1778 struct mpr_prp_page *prp_page_info; 1779 uint64_t nvme_dsm_ranges_dma_handle; 1780 1781 csio = &ccb->csio; 1782 #if __FreeBSD_version >= 1100103 1783 list_len = (scsiio_cdb_ptr(csio)[7] << 8 | scsiio_cdb_ptr(csio)[8]); 1784 #else 1785 if (csio->ccb_h.flags & CAM_CDB_POINTER) { 1786 list_len = (ccb->csio.cdb_io.cdb_ptr[7] << 8 | 1787 ccb->csio.cdb_io.cdb_ptr[8]); 1788 } else { 1789 list_len = (ccb->csio.cdb_io.cdb_bytes[7] << 8 | 1790 ccb->csio.cdb_io.cdb_bytes[8]); 1791 } 1792 #endif 1793 if (!list_len) { 1794 mpr_dprint(sc, MPR_ERROR, "Parameter list length is Zero\n"); 1795 return -EINVAL; 1796 } 1797 1798 plist = malloc(csio->dxfer_len, M_MPR, M_ZERO|M_NOWAIT); 1799 if (!plist) { 1800 mpr_dprint(sc, MPR_ERROR, "Unable to allocate memory to " 1801 "save UNMAP data\n"); 1802 return -ENOMEM; 1803 } 1804 1805 /* Copy SCSI unmap data to a local buffer */ 1806 bcopy(csio->data_ptr, plist, csio->dxfer_len); 1807 1808 /* return back the unmap command to CAM with success status, 1809 * if number of descripts is zero. 1810 */ 1811 ndesc = be16toh(plist->unmap_blk_desc_data_len) >> 4; 1812 if (!ndesc) { 1813 mpr_dprint(sc, MPR_XINFO, "Number of descriptors in " 1814 "UNMAP cmd is Zero\n"); 1815 res = 1; 1816 goto out; 1817 } 1818 1819 data_length = ndesc * sizeof(struct nvme_dsm_range); 1820 if (data_length > targ->MDTS) { 1821 mpr_dprint(sc, MPR_ERROR, "data length: %d is greater than " 1822 "Device's MDTS: %d\n", data_length, targ->MDTS); 1823 res = -EINVAL; 1824 goto out; 1825 } 1826 1827 prp_page_info = mpr_alloc_prp_page(sc); 1828 KASSERT(prp_page_info != NULL, ("%s: There is no PRP Page for " 1829 "UNMAP command.\n", __func__)); 1830 1831 /* 1832 * Insert the allocated PRP page into the command's PRP page list. This 1833 * will be freed when the command is freed. 1834 */ 1835 TAILQ_INSERT_TAIL(&cm->cm_prp_page_list, prp_page_info, prp_page_link); 1836 1837 nvme_dsm_ranges = (struct nvme_dsm_range *)prp_page_info->prp_page; 1838 nvme_dsm_ranges_dma_handle = prp_page_info->prp_page_busaddr; 1839 1840 bzero(nvme_dsm_ranges, data_length); 1841 1842 /* Convert SCSI unmap's descriptor data to NVMe DSM specific Range data 1843 * for each descriptors contained in SCSI UNMAP data. 1844 */ 1845 for (i = 0; i < ndesc; i++) { 1846 nvme_dsm_ranges[i].length = 1847 htole32(be32toh(plist->desc[i].nlb)); 1848 nvme_dsm_ranges[i].starting_lba = 1849 htole64(be64toh(plist->desc[i].slba)); 1850 nvme_dsm_ranges[i].attributes = 0; 1851 } 1852 1853 /* Build MPI2.6's NVMe Encapsulated Request Message */ 1854 req = (Mpi26NVMeEncapsulatedRequest_t *)cm->cm_req; 1855 bzero(req, sizeof(*req)); 1856 req->DevHandle = htole16(targ->handle); 1857 req->Function = MPI2_FUNCTION_NVME_ENCAPSULATED; 1858 req->Flags = MPI26_NVME_FLAGS_WRITE; 1859 req->ErrorResponseBaseAddress.High = 1860 htole32((uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32)); 1861 req->ErrorResponseBaseAddress.Low = 1862 htole32(cm->cm_sense_busaddr); 1863 req->ErrorResponseAllocationLength = 1864 htole16(sizeof(struct nvme_completion)); 1865 req->EncapsulatedCommandLength = 1866 htole16(sizeof(struct nvme_command)); 1867 req->DataLength = htole32(data_length); 1868 1869 /* Build NVMe DSM command */ 1870 c = (struct nvme_command *) req->NVMe_Command; 1871 c->opc = NVME_OPC_DATASET_MANAGEMENT; 1872 c->nsid = htole32(csio->ccb_h.target_lun + 1); 1873 c->cdw10 = htole32(ndesc - 1); 1874 c->cdw11 = htole32(NVME_DSM_ATTR_DEALLOCATE); 1875 1876 cm->cm_length = data_length; 1877 cm->cm_data = NULL; 1878 1879 cm->cm_complete = mprsas_scsiio_complete; 1880 cm->cm_complete_data = ccb; 1881 cm->cm_targ = targ; 1882 cm->cm_lun = csio->ccb_h.target_lun; 1883 cm->cm_ccb = ccb; 1884 1885 cm->cm_desc.Default.RequestFlags = 1886 MPI26_REQ_DESCRIPT_FLAGS_PCIE_ENCAPSULATED; 1887 1888 csio->ccb_h.qos.sim_data = sbinuptime(); 1889 #if __FreeBSD_version >= 1000029 1890 callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0, 1891 mprsas_scsiio_timeout, cm, 0); 1892 #else //__FreeBSD_version < 1000029 1893 callout_reset(&cm->cm_callout, (ccb->ccb_h.timeout * hz) / 1000, 1894 mprsas_scsiio_timeout, cm); 1895 #endif //__FreeBSD_version >= 1000029 1896 1897 targ->issued++; 1898 targ->outstanding++; 1899 TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link); 1900 ccb->ccb_h.status |= CAM_SIM_QUEUED; 1901 1902 mprsas_log_command(cm, MPR_XINFO, "%s cm %p ccb %p outstanding %u\n", 1903 __func__, cm, ccb, targ->outstanding); 1904 1905 mpr_build_nvme_prp(sc, cm, req, 1906 (void *)(uintptr_t)nvme_dsm_ranges_dma_handle, 0, data_length); 1907 mpr_map_command(sc, cm); 1908 1909 out: 1910 free(plist, M_MPR); 1911 return 0; 1912 } 1913 1914 static void 1915 mprsas_action_scsiio(struct mprsas_softc *sassc, union ccb *ccb) 1916 { 1917 MPI2_SCSI_IO_REQUEST *req; 1918 struct ccb_scsiio *csio; 1919 struct mpr_softc *sc; 1920 struct mprsas_target *targ; 1921 struct mprsas_lun *lun; 1922 struct mpr_command *cm; 1923 uint8_t i, lba_byte, *ref_tag_addr, scsi_opcode; 1924 uint16_t eedp_flags; 1925 uint32_t mpi_control; 1926 int rc; 1927 1928 sc = sassc->sc; 1929 MPR_FUNCTRACE(sc); 1930 mtx_assert(&sc->mpr_mtx, MA_OWNED); 1931 1932 csio = &ccb->csio; 1933 KASSERT(csio->ccb_h.target_id < sassc->maxtargets, 1934 ("Target %d out of bounds in XPT_SCSI_IO\n", 1935 csio->ccb_h.target_id)); 1936 targ = &sassc->targets[csio->ccb_h.target_id]; 1937 mpr_dprint(sc, MPR_TRACE, "ccb %p target flag %x\n", ccb, targ->flags); 1938 if (targ->handle == 0x0) { 1939 mpr_dprint(sc, MPR_ERROR, "%s NULL handle for target %u\n", 1940 __func__, csio->ccb_h.target_id); 1941 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); 1942 xpt_done(ccb); 1943 return; 1944 } 1945 if (targ->flags & MPR_TARGET_FLAGS_RAID_COMPONENT) { 1946 mpr_dprint(sc, MPR_ERROR, "%s Raid component no SCSI IO " 1947 "supported %u\n", __func__, csio->ccb_h.target_id); 1948 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); 1949 xpt_done(ccb); 1950 return; 1951 } 1952 /* 1953 * Sometimes, it is possible to get a command that is not "In 1954 * Progress" and was actually aborted by the upper layer. Check for 1955 * this here and complete the command without error. 1956 */ 1957 if (mprsas_get_ccbstatus(ccb) != CAM_REQ_INPROG) { 1958 mpr_dprint(sc, MPR_TRACE, "%s Command is not in progress for " 1959 "target %u\n", __func__, csio->ccb_h.target_id); 1960 xpt_done(ccb); 1961 return; 1962 } 1963 /* 1964 * If devinfo is 0 this will be a volume. In that case don't tell CAM 1965 * that the volume has timed out. We want volumes to be enumerated 1966 * until they are deleted/removed, not just failed. 1967 */ 1968 if (targ->flags & MPRSAS_TARGET_INREMOVAL) { 1969 if (targ->devinfo == 0) 1970 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP); 1971 else 1972 mprsas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT); 1973 xpt_done(ccb); 1974 return; 1975 } 1976 1977 if ((sc->mpr_flags & MPR_FLAGS_SHUTDOWN) != 0) { 1978 mpr_dprint(sc, MPR_INFO, "%s shutting down\n", __func__); 1979 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); 1980 xpt_done(ccb); 1981 return; 1982 } 1983 1984 /* 1985 * If target has a reset in progress, freeze the devq and return. The 1986 * devq will be released when the TM reset is finished. 1987 */ 1988 if (targ->flags & MPRSAS_TARGET_INRESET) { 1989 ccb->ccb_h.status = CAM_BUSY | CAM_DEV_QFRZN; 1990 mpr_dprint(sc, MPR_INFO, "%s: Freezing devq for target ID %d\n", 1991 __func__, targ->tid); 1992 xpt_freeze_devq(ccb->ccb_h.path, 1); 1993 xpt_done(ccb); 1994 return; 1995 } 1996 1997 cm = mpr_alloc_command(sc); 1998 if (cm == NULL || (sc->mpr_flags & MPR_FLAGS_DIAGRESET)) { 1999 if (cm != NULL) { 2000 mpr_free_command(sc, cm); 2001 } 2002 if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) { 2003 xpt_freeze_simq(sassc->sim, 1); 2004 sassc->flags |= MPRSAS_QUEUE_FROZEN; 2005 } 2006 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 2007 ccb->ccb_h.status |= CAM_REQUEUE_REQ; 2008 xpt_done(ccb); 2009 return; 2010 } 2011 2012 /* For NVME device's issue UNMAP command directly to NVME drives by 2013 * constructing equivalent native NVMe DataSetManagement command. 2014 */ 2015 #if __FreeBSD_version >= 1100103 2016 scsi_opcode = scsiio_cdb_ptr(csio)[0]; 2017 #else 2018 if (csio->ccb_h.flags & CAM_CDB_POINTER) 2019 scsi_opcode = csio->cdb_io.cdb_ptr[0]; 2020 else 2021 scsi_opcode = csio->cdb_io.cdb_bytes[0]; 2022 #endif 2023 if (scsi_opcode == UNMAP && 2024 targ->is_nvme && 2025 (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) { 2026 rc = mprsas_build_nvme_unmap(sc, cm, ccb, targ); 2027 if (rc == 1) { /* return command to CAM with success status */ 2028 mpr_free_command(sc, cm); 2029 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP); 2030 xpt_done(ccb); 2031 return; 2032 } else if (!rc) /* Issued NVMe Encapsulated Request Message */ 2033 return; 2034 } 2035 2036 req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req; 2037 bzero(req, sizeof(*req)); 2038 req->DevHandle = htole16(targ->handle); 2039 req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; 2040 req->MsgFlags = 0; 2041 req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr); 2042 req->SenseBufferLength = MPR_SENSE_LEN; 2043 req->SGLFlags = 0; 2044 req->ChainOffset = 0; 2045 req->SGLOffset0 = 24; /* 32bit word offset to the SGL */ 2046 req->SGLOffset1= 0; 2047 req->SGLOffset2= 0; 2048 req->SGLOffset3= 0; 2049 req->SkipCount = 0; 2050 req->DataLength = htole32(csio->dxfer_len); 2051 req->BidirectionalDataLength = 0; 2052 req->IoFlags = htole16(csio->cdb_len); 2053 req->EEDPFlags = 0; 2054 2055 /* Note: BiDirectional transfers are not supported */ 2056 switch (csio->ccb_h.flags & CAM_DIR_MASK) { 2057 case CAM_DIR_IN: 2058 mpi_control = MPI2_SCSIIO_CONTROL_READ; 2059 cm->cm_flags |= MPR_CM_FLAGS_DATAIN; 2060 break; 2061 case CAM_DIR_OUT: 2062 mpi_control = MPI2_SCSIIO_CONTROL_WRITE; 2063 cm->cm_flags |= MPR_CM_FLAGS_DATAOUT; 2064 break; 2065 case CAM_DIR_NONE: 2066 default: 2067 mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER; 2068 break; 2069 } 2070 2071 if (csio->cdb_len == 32) 2072 mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT; 2073 /* 2074 * It looks like the hardware doesn't require an explicit tag 2075 * number for each transaction. SAM Task Management not supported 2076 * at the moment. 2077 */ 2078 switch (csio->tag_action) { 2079 case MSG_HEAD_OF_Q_TAG: 2080 mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ; 2081 break; 2082 case MSG_ORDERED_Q_TAG: 2083 mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ; 2084 break; 2085 case MSG_ACA_TASK: 2086 mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ; 2087 break; 2088 case CAM_TAG_ACTION_NONE: 2089 case MSG_SIMPLE_Q_TAG: 2090 default: 2091 mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ; 2092 break; 2093 } 2094 mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits; 2095 req->Control = htole32(mpi_control); 2096 2097 if (MPR_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) { 2098 mpr_free_command(sc, cm); 2099 mprsas_set_ccbstatus(ccb, CAM_LUN_INVALID); 2100 xpt_done(ccb); 2101 return; 2102 } 2103 2104 if (csio->ccb_h.flags & CAM_CDB_POINTER) 2105 bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len); 2106 else { 2107 KASSERT(csio->cdb_len <= IOCDBLEN, 2108 ("cdb_len %d is greater than IOCDBLEN but CAM_CDB_POINTER " 2109 "is not set", csio->cdb_len)); 2110 bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len); 2111 } 2112 req->IoFlags = htole16(csio->cdb_len); 2113 2114 /* 2115 * Check if EEDP is supported and enabled. If it is then check if the 2116 * SCSI opcode could be using EEDP. If so, make sure the LUN exists and 2117 * is formatted for EEDP support. If all of this is true, set CDB up 2118 * for EEDP transfer. 2119 */ 2120 eedp_flags = op_code_prot[req->CDB.CDB32[0]]; 2121 if (sc->eedp_enabled && eedp_flags) { 2122 SLIST_FOREACH(lun, &targ->luns, lun_link) { 2123 if (lun->lun_id == csio->ccb_h.target_lun) { 2124 break; 2125 } 2126 } 2127 2128 if ((lun != NULL) && (lun->eedp_formatted)) { 2129 req->EEDPBlockSize = htole16(lun->eedp_block_size); 2130 eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG | 2131 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG | 2132 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD); 2133 if (sc->mpr_flags & MPR_FLAGS_GEN35_IOC) { 2134 eedp_flags |= 2135 MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE; 2136 } 2137 req->EEDPFlags = htole16(eedp_flags); 2138 2139 /* 2140 * If CDB less than 32, fill in Primary Ref Tag with 2141 * low 4 bytes of LBA. If CDB is 32, tag stuff is 2142 * already there. Also, set protection bit. FreeBSD 2143 * currently does not support CDBs bigger than 16, but 2144 * the code doesn't hurt, and will be here for the 2145 * future. 2146 */ 2147 if (csio->cdb_len != 32) { 2148 lba_byte = (csio->cdb_len == 16) ? 6 : 2; 2149 ref_tag_addr = (uint8_t *)&req->CDB.EEDP32. 2150 PrimaryReferenceTag; 2151 for (i = 0; i < 4; i++) { 2152 *ref_tag_addr = 2153 req->CDB.CDB32[lba_byte + i]; 2154 ref_tag_addr++; 2155 } 2156 req->CDB.EEDP32.PrimaryReferenceTag = 2157 htole32(req-> 2158 CDB.EEDP32.PrimaryReferenceTag); 2159 req->CDB.EEDP32.PrimaryApplicationTagMask = 2160 0xFFFF; 2161 req->CDB.CDB32[1] = 2162 (req->CDB.CDB32[1] & 0x1F) | 0x20; 2163 } else { 2164 eedp_flags |= 2165 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG; 2166 req->EEDPFlags = htole16(eedp_flags); 2167 req->CDB.CDB32[10] = (req->CDB.CDB32[10] & 2168 0x1F) | 0x20; 2169 } 2170 } 2171 } 2172 2173 cm->cm_length = csio->dxfer_len; 2174 if (cm->cm_length != 0) { 2175 cm->cm_data = ccb; 2176 cm->cm_flags |= MPR_CM_FLAGS_USE_CCB; 2177 } else { 2178 cm->cm_data = NULL; 2179 } 2180 cm->cm_sge = &req->SGL; 2181 cm->cm_sglsize = (32 - 24) * 4; 2182 cm->cm_complete = mprsas_scsiio_complete; 2183 cm->cm_complete_data = ccb; 2184 cm->cm_targ = targ; 2185 cm->cm_lun = csio->ccb_h.target_lun; 2186 cm->cm_ccb = ccb; 2187 /* 2188 * If using FP desc type, need to set a bit in IoFlags (SCSI IO is 0) 2189 * and set descriptor type. 2190 */ 2191 if (targ->scsi_req_desc_type == 2192 MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO) { 2193 req->IoFlags |= MPI25_SCSIIO_IOFLAGS_FAST_PATH; 2194 cm->cm_desc.FastPathSCSIIO.RequestFlags = 2195 MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO; 2196 if (!sc->atomic_desc_capable) { 2197 cm->cm_desc.FastPathSCSIIO.DevHandle = 2198 htole16(targ->handle); 2199 } 2200 } else { 2201 cm->cm_desc.SCSIIO.RequestFlags = 2202 MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO; 2203 if (!sc->atomic_desc_capable) 2204 cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle); 2205 } 2206 2207 csio->ccb_h.qos.sim_data = sbinuptime(); 2208 #if __FreeBSD_version >= 1000029 2209 callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0, 2210 mprsas_scsiio_timeout, cm, 0); 2211 #else //__FreeBSD_version < 1000029 2212 callout_reset(&cm->cm_callout, (ccb->ccb_h.timeout * hz) / 1000, 2213 mprsas_scsiio_timeout, cm); 2214 #endif //__FreeBSD_version >= 1000029 2215 2216 targ->issued++; 2217 targ->outstanding++; 2218 TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link); 2219 ccb->ccb_h.status |= CAM_SIM_QUEUED; 2220 2221 mprsas_log_command(cm, MPR_XINFO, "%s cm %p ccb %p outstanding %u\n", 2222 __func__, cm, ccb, targ->outstanding); 2223 2224 mpr_map_command(sc, cm); 2225 return; 2226 } 2227 2228 /** 2229 * mpr_sc_failed_io_info - translated non-succesfull SCSI_IO request 2230 */ 2231 static void 2232 mpr_sc_failed_io_info(struct mpr_softc *sc, struct ccb_scsiio *csio, 2233 Mpi2SCSIIOReply_t *mpi_reply, struct mprsas_target *targ) 2234 { 2235 u32 response_info; 2236 u8 *response_bytes; 2237 u16 ioc_status = le16toh(mpi_reply->IOCStatus) & 2238 MPI2_IOCSTATUS_MASK; 2239 u8 scsi_state = mpi_reply->SCSIState; 2240 u8 scsi_status = mpi_reply->SCSIStatus; 2241 char *desc_ioc_state = NULL; 2242 char *desc_scsi_status = NULL; 2243 u32 log_info = le32toh(mpi_reply->IOCLogInfo); 2244 2245 if (log_info == 0x31170000) 2246 return; 2247 2248 desc_ioc_state = mpr_describe_table(mpr_iocstatus_string, 2249 ioc_status); 2250 desc_scsi_status = mpr_describe_table(mpr_scsi_status_string, 2251 scsi_status); 2252 2253 mpr_dprint(sc, MPR_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n", 2254 le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status); 2255 if (targ->encl_level_valid) { 2256 mpr_dprint(sc, MPR_XINFO, "At enclosure level %d, slot %d, " 2257 "connector name (%4s)\n", targ->encl_level, targ->encl_slot, 2258 targ->connector_name); 2259 } 2260 2261 /* 2262 * We can add more detail about underflow data here 2263 * TO-DO 2264 */ 2265 mpr_dprint(sc, MPR_XINFO, "\tscsi_status(%s)(0x%02x), " 2266 "scsi_state %b\n", desc_scsi_status, scsi_status, 2267 scsi_state, "\20" "\1AutosenseValid" "\2AutosenseFailed" 2268 "\3NoScsiStatus" "\4Terminated" "\5Response InfoValid"); 2269 2270 if (sc->mpr_debug & MPR_XINFO && 2271 scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) { 2272 mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : Start :\n"); 2273 scsi_sense_print(csio); 2274 mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : End :\n"); 2275 } 2276 2277 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) { 2278 response_info = le32toh(mpi_reply->ResponseInfo); 2279 response_bytes = (u8 *)&response_info; 2280 mpr_dprint(sc, MPR_XINFO, "response code(0x%01x): %s\n", 2281 response_bytes[0], 2282 mpr_describe_table(mpr_scsi_taskmgmt_string, 2283 response_bytes[0])); 2284 } 2285 } 2286 2287 /** mprsas_nvme_trans_status_code 2288 * 2289 * Convert Native NVMe command error status to 2290 * equivalent SCSI error status. 2291 * 2292 * Returns appropriate scsi_status 2293 */ 2294 static u8 2295 mprsas_nvme_trans_status_code(uint16_t nvme_status, 2296 struct mpr_command *cm) 2297 { 2298 u8 status = MPI2_SCSI_STATUS_GOOD; 2299 int skey, asc, ascq; 2300 union ccb *ccb = cm->cm_complete_data; 2301 int returned_sense_len; 2302 uint8_t sct, sc; 2303 2304 sct = NVME_STATUS_GET_SCT(nvme_status); 2305 sc = NVME_STATUS_GET_SC(nvme_status); 2306 2307 status = MPI2_SCSI_STATUS_CHECK_CONDITION; 2308 skey = SSD_KEY_ILLEGAL_REQUEST; 2309 asc = SCSI_ASC_NO_SENSE; 2310 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; 2311 2312 switch (sct) { 2313 case NVME_SCT_GENERIC: 2314 switch (sc) { 2315 case NVME_SC_SUCCESS: 2316 status = MPI2_SCSI_STATUS_GOOD; 2317 skey = SSD_KEY_NO_SENSE; 2318 asc = SCSI_ASC_NO_SENSE; 2319 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; 2320 break; 2321 case NVME_SC_INVALID_OPCODE: 2322 status = MPI2_SCSI_STATUS_CHECK_CONDITION; 2323 skey = SSD_KEY_ILLEGAL_REQUEST; 2324 asc = SCSI_ASC_ILLEGAL_COMMAND; 2325 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; 2326 break; 2327 case NVME_SC_INVALID_FIELD: 2328 status = MPI2_SCSI_STATUS_CHECK_CONDITION; 2329 skey = SSD_KEY_ILLEGAL_REQUEST; 2330 asc = SCSI_ASC_INVALID_CDB; 2331 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; 2332 break; 2333 case NVME_SC_DATA_TRANSFER_ERROR: 2334 status = MPI2_SCSI_STATUS_CHECK_CONDITION; 2335 skey = SSD_KEY_MEDIUM_ERROR; 2336 asc = SCSI_ASC_NO_SENSE; 2337 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; 2338 break; 2339 case NVME_SC_ABORTED_POWER_LOSS: 2340 status = MPI2_SCSI_STATUS_TASK_ABORTED; 2341 skey = SSD_KEY_ABORTED_COMMAND; 2342 asc = SCSI_ASC_WARNING; 2343 ascq = SCSI_ASCQ_POWER_LOSS_EXPECTED; 2344 break; 2345 case NVME_SC_INTERNAL_DEVICE_ERROR: 2346 status = MPI2_SCSI_STATUS_CHECK_CONDITION; 2347 skey = SSD_KEY_HARDWARE_ERROR; 2348 asc = SCSI_ASC_INTERNAL_TARGET_FAILURE; 2349 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; 2350 break; 2351 case NVME_SC_ABORTED_BY_REQUEST: 2352 case NVME_SC_ABORTED_SQ_DELETION: 2353 case NVME_SC_ABORTED_FAILED_FUSED: 2354 case NVME_SC_ABORTED_MISSING_FUSED: 2355 status = MPI2_SCSI_STATUS_TASK_ABORTED; 2356 skey = SSD_KEY_ABORTED_COMMAND; 2357 asc = SCSI_ASC_NO_SENSE; 2358 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; 2359 break; 2360 case NVME_SC_INVALID_NAMESPACE_OR_FORMAT: 2361 status = MPI2_SCSI_STATUS_CHECK_CONDITION; 2362 skey = SSD_KEY_ILLEGAL_REQUEST; 2363 asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID; 2364 ascq = SCSI_ASCQ_INVALID_LUN_ID; 2365 break; 2366 case NVME_SC_LBA_OUT_OF_RANGE: 2367 status = MPI2_SCSI_STATUS_CHECK_CONDITION; 2368 skey = SSD_KEY_ILLEGAL_REQUEST; 2369 asc = SCSI_ASC_ILLEGAL_BLOCK; 2370 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; 2371 break; 2372 case NVME_SC_CAPACITY_EXCEEDED: 2373 status = MPI2_SCSI_STATUS_CHECK_CONDITION; 2374 skey = SSD_KEY_MEDIUM_ERROR; 2375 asc = SCSI_ASC_NO_SENSE; 2376 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; 2377 break; 2378 case NVME_SC_NAMESPACE_NOT_READY: 2379 status = MPI2_SCSI_STATUS_CHECK_CONDITION; 2380 skey = SSD_KEY_NOT_READY; 2381 asc = SCSI_ASC_LUN_NOT_READY; 2382 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; 2383 break; 2384 } 2385 break; 2386 case NVME_SCT_COMMAND_SPECIFIC: 2387 switch (sc) { 2388 case NVME_SC_INVALID_FORMAT: 2389 status = MPI2_SCSI_STATUS_CHECK_CONDITION; 2390 skey = SSD_KEY_ILLEGAL_REQUEST; 2391 asc = SCSI_ASC_FORMAT_COMMAND_FAILED; 2392 ascq = SCSI_ASCQ_FORMAT_COMMAND_FAILED; 2393 break; 2394 case NVME_SC_CONFLICTING_ATTRIBUTES: 2395 status = MPI2_SCSI_STATUS_CHECK_CONDITION; 2396 skey = SSD_KEY_ILLEGAL_REQUEST; 2397 asc = SCSI_ASC_INVALID_CDB; 2398 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; 2399 break; 2400 } 2401 break; 2402 case NVME_SCT_MEDIA_ERROR: 2403 switch (sc) { 2404 case NVME_SC_WRITE_FAULTS: 2405 status = MPI2_SCSI_STATUS_CHECK_CONDITION; 2406 skey = SSD_KEY_MEDIUM_ERROR; 2407 asc = SCSI_ASC_PERIPHERAL_DEV_WRITE_FAULT; 2408 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; 2409 break; 2410 case NVME_SC_UNRECOVERED_READ_ERROR: 2411 status = MPI2_SCSI_STATUS_CHECK_CONDITION; 2412 skey = SSD_KEY_MEDIUM_ERROR; 2413 asc = SCSI_ASC_UNRECOVERED_READ_ERROR; 2414 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; 2415 break; 2416 case NVME_SC_GUARD_CHECK_ERROR: 2417 status = MPI2_SCSI_STATUS_CHECK_CONDITION; 2418 skey = SSD_KEY_MEDIUM_ERROR; 2419 asc = SCSI_ASC_LOG_BLOCK_GUARD_CHECK_FAILED; 2420 ascq = SCSI_ASCQ_LOG_BLOCK_GUARD_CHECK_FAILED; 2421 break; 2422 case NVME_SC_APPLICATION_TAG_CHECK_ERROR: 2423 status = MPI2_SCSI_STATUS_CHECK_CONDITION; 2424 skey = SSD_KEY_MEDIUM_ERROR; 2425 asc = SCSI_ASC_LOG_BLOCK_APPTAG_CHECK_FAILED; 2426 ascq = SCSI_ASCQ_LOG_BLOCK_APPTAG_CHECK_FAILED; 2427 break; 2428 case NVME_SC_REFERENCE_TAG_CHECK_ERROR: 2429 status = MPI2_SCSI_STATUS_CHECK_CONDITION; 2430 skey = SSD_KEY_MEDIUM_ERROR; 2431 asc = SCSI_ASC_LOG_BLOCK_REFTAG_CHECK_FAILED; 2432 ascq = SCSI_ASCQ_LOG_BLOCK_REFTAG_CHECK_FAILED; 2433 break; 2434 case NVME_SC_COMPARE_FAILURE: 2435 status = MPI2_SCSI_STATUS_CHECK_CONDITION; 2436 skey = SSD_KEY_MISCOMPARE; 2437 asc = SCSI_ASC_MISCOMPARE_DURING_VERIFY; 2438 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; 2439 break; 2440 case NVME_SC_ACCESS_DENIED: 2441 status = MPI2_SCSI_STATUS_CHECK_CONDITION; 2442 skey = SSD_KEY_ILLEGAL_REQUEST; 2443 asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID; 2444 ascq = SCSI_ASCQ_INVALID_LUN_ID; 2445 break; 2446 } 2447 break; 2448 } 2449 2450 returned_sense_len = sizeof(struct scsi_sense_data); 2451 if (returned_sense_len < ccb->csio.sense_len) 2452 ccb->csio.sense_resid = ccb->csio.sense_len - 2453 returned_sense_len; 2454 else 2455 ccb->csio.sense_resid = 0; 2456 2457 scsi_set_sense_data(&ccb->csio.sense_data, SSD_TYPE_FIXED, 2458 1, skey, asc, ascq, SSD_ELEM_NONE); 2459 ccb->ccb_h.status |= CAM_AUTOSNS_VALID; 2460 2461 return status; 2462 } 2463 2464 /** mprsas_complete_nvme_unmap 2465 * 2466 * Complete native NVMe command issued using NVMe Encapsulated 2467 * Request Message. 2468 */ 2469 static u8 2470 mprsas_complete_nvme_unmap(struct mpr_softc *sc, struct mpr_command *cm) 2471 { 2472 Mpi26NVMeEncapsulatedErrorReply_t *mpi_reply; 2473 struct nvme_completion *nvme_completion = NULL; 2474 u8 scsi_status = MPI2_SCSI_STATUS_GOOD; 2475 2476 mpi_reply =(Mpi26NVMeEncapsulatedErrorReply_t *)cm->cm_reply; 2477 if (le16toh(mpi_reply->ErrorResponseCount)){ 2478 nvme_completion = (struct nvme_completion *)cm->cm_sense; 2479 scsi_status = mprsas_nvme_trans_status_code( 2480 nvme_completion->status, cm); 2481 } 2482 return scsi_status; 2483 } 2484 2485 static void 2486 mprsas_scsiio_complete(struct mpr_softc *sc, struct mpr_command *cm) 2487 { 2488 MPI2_SCSI_IO_REPLY *rep; 2489 union ccb *ccb; 2490 struct ccb_scsiio *csio; 2491 struct mprsas_softc *sassc; 2492 struct scsi_vpd_supported_page_list *vpd_list = NULL; 2493 u8 *TLR_bits, TLR_on, *scsi_cdb; 2494 int dir = 0, i; 2495 u16 alloc_len; 2496 struct mprsas_target *target; 2497 target_id_t target_id; 2498 2499 MPR_FUNCTRACE(sc); 2500 mpr_dprint(sc, MPR_TRACE, 2501 "cm %p SMID %u ccb %p reply %p outstanding %u\n", cm, 2502 cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply, 2503 cm->cm_targ->outstanding); 2504 2505 callout_stop(&cm->cm_callout); 2506 mtx_assert(&sc->mpr_mtx, MA_OWNED); 2507 2508 sassc = sc->sassc; 2509 ccb = cm->cm_complete_data; 2510 csio = &ccb->csio; 2511 target_id = csio->ccb_h.target_id; 2512 rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply; 2513 /* 2514 * XXX KDM if the chain allocation fails, does it matter if we do 2515 * the sync and unload here? It is simpler to do it in every case, 2516 * assuming it doesn't cause problems. 2517 */ 2518 if (cm->cm_data != NULL) { 2519 if (cm->cm_flags & MPR_CM_FLAGS_DATAIN) 2520 dir = BUS_DMASYNC_POSTREAD; 2521 else if (cm->cm_flags & MPR_CM_FLAGS_DATAOUT) 2522 dir = BUS_DMASYNC_POSTWRITE; 2523 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir); 2524 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap); 2525 } 2526 2527 cm->cm_targ->completed++; 2528 cm->cm_targ->outstanding--; 2529 TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link); 2530 ccb->ccb_h.status &= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED); 2531 2532 if (cm->cm_state == MPR_CM_STATE_TIMEDOUT) { 2533 TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery); 2534 cm->cm_state = MPR_CM_STATE_BUSY; 2535 if (cm->cm_reply != NULL) 2536 mprsas_log_command(cm, MPR_RECOVERY, 2537 "completed timedout cm %p ccb %p during recovery " 2538 "ioc %x scsi %x state %x xfer %u\n", cm, cm->cm_ccb, 2539 le16toh(rep->IOCStatus), rep->SCSIStatus, 2540 rep->SCSIState, le32toh(rep->TransferCount)); 2541 else 2542 mprsas_log_command(cm, MPR_RECOVERY, 2543 "completed timedout cm %p ccb %p during recovery\n", 2544 cm, cm->cm_ccb); 2545 } else if (cm->cm_targ->tm != NULL) { 2546 if (cm->cm_reply != NULL) 2547 mprsas_log_command(cm, MPR_RECOVERY, 2548 "completed cm %p ccb %p during recovery " 2549 "ioc %x scsi %x state %x xfer %u\n", 2550 cm, cm->cm_ccb, le16toh(rep->IOCStatus), 2551 rep->SCSIStatus, rep->SCSIState, 2552 le32toh(rep->TransferCount)); 2553 else 2554 mprsas_log_command(cm, MPR_RECOVERY, 2555 "completed cm %p ccb %p during recovery\n", 2556 cm, cm->cm_ccb); 2557 } else if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) { 2558 mprsas_log_command(cm, MPR_RECOVERY, 2559 "reset completed cm %p ccb %p\n", cm, cm->cm_ccb); 2560 } 2561 2562 if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) { 2563 /* 2564 * We ran into an error after we tried to map the command, 2565 * so we're getting a callback without queueing the command 2566 * to the hardware. So we set the status here, and it will 2567 * be retained below. We'll go through the "fast path", 2568 * because there can be no reply when we haven't actually 2569 * gone out to the hardware. 2570 */ 2571 mprsas_set_ccbstatus(ccb, CAM_REQUEUE_REQ); 2572 2573 /* 2574 * Currently the only error included in the mask is 2575 * MPR_CM_FLAGS_CHAIN_FAILED, which means we're out of 2576 * chain frames. We need to freeze the queue until we get 2577 * a command that completed without this error, which will 2578 * hopefully have some chain frames attached that we can 2579 * use. If we wanted to get smarter about it, we would 2580 * only unfreeze the queue in this condition when we're 2581 * sure that we're getting some chain frames back. That's 2582 * probably unnecessary. 2583 */ 2584 if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) { 2585 xpt_freeze_simq(sassc->sim, 1); 2586 sassc->flags |= MPRSAS_QUEUE_FROZEN; 2587 mpr_dprint(sc, MPR_XINFO, "Error sending command, " 2588 "freezing SIM queue\n"); 2589 } 2590 } 2591 2592 /* 2593 * Point to the SCSI CDB, which is dependent on the CAM_CDB_POINTER 2594 * flag, and use it in a few places in the rest of this function for 2595 * convenience. Use the macro if available. 2596 */ 2597 #if __FreeBSD_version >= 1100103 2598 scsi_cdb = scsiio_cdb_ptr(csio); 2599 #else 2600 if (csio->ccb_h.flags & CAM_CDB_POINTER) 2601 scsi_cdb = csio->cdb_io.cdb_ptr; 2602 else 2603 scsi_cdb = csio->cdb_io.cdb_bytes; 2604 #endif 2605 2606 /* 2607 * If this is a Start Stop Unit command and it was issued by the driver 2608 * during shutdown, decrement the refcount to account for all of the 2609 * commands that were sent. All SSU commands should be completed before 2610 * shutdown completes, meaning SSU_refcount will be 0 after SSU_started 2611 * is TRUE. 2612 */ 2613 if (sc->SSU_started && (scsi_cdb[0] == START_STOP_UNIT)) { 2614 mpr_dprint(sc, MPR_INFO, "Decrementing SSU count.\n"); 2615 sc->SSU_refcount--; 2616 } 2617 2618 /* Take the fast path to completion */ 2619 if (cm->cm_reply == NULL) { 2620 if (mprsas_get_ccbstatus(ccb) == CAM_REQ_INPROG) { 2621 if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) 2622 mprsas_set_ccbstatus(ccb, CAM_SCSI_BUS_RESET); 2623 else { 2624 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP); 2625 csio->scsi_status = SCSI_STATUS_OK; 2626 } 2627 if (sassc->flags & MPRSAS_QUEUE_FROZEN) { 2628 ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 2629 sassc->flags &= ~MPRSAS_QUEUE_FROZEN; 2630 mpr_dprint(sc, MPR_XINFO, 2631 "Unfreezing SIM queue\n"); 2632 } 2633 } 2634 2635 /* 2636 * There are two scenarios where the status won't be 2637 * CAM_REQ_CMP. The first is if MPR_CM_FLAGS_ERROR_MASK is 2638 * set, the second is in the MPR_FLAGS_DIAGRESET above. 2639 */ 2640 if (mprsas_get_ccbstatus(ccb) != CAM_REQ_CMP) { 2641 /* 2642 * Freeze the dev queue so that commands are 2643 * executed in the correct order after error 2644 * recovery. 2645 */ 2646 ccb->ccb_h.status |= CAM_DEV_QFRZN; 2647 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1); 2648 } 2649 mpr_free_command(sc, cm); 2650 xpt_done(ccb); 2651 return; 2652 } 2653 2654 target = &sassc->targets[target_id]; 2655 if (scsi_cdb[0] == UNMAP && 2656 target->is_nvme && 2657 (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) { 2658 rep->SCSIStatus = mprsas_complete_nvme_unmap(sc, cm); 2659 csio->scsi_status = rep->SCSIStatus; 2660 } 2661 2662 mprsas_log_command(cm, MPR_XINFO, 2663 "ioc %x scsi %x state %x xfer %u\n", 2664 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState, 2665 le32toh(rep->TransferCount)); 2666 2667 switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) { 2668 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN: 2669 csio->resid = cm->cm_length - le32toh(rep->TransferCount); 2670 /* FALLTHROUGH */ 2671 case MPI2_IOCSTATUS_SUCCESS: 2672 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR: 2673 if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) == 2674 MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR) 2675 mprsas_log_command(cm, MPR_XINFO, "recovered error\n"); 2676 2677 /* Completion failed at the transport level. */ 2678 if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS | 2679 MPI2_SCSI_STATE_TERMINATED)) { 2680 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR); 2681 break; 2682 } 2683 2684 /* In a modern packetized environment, an autosense failure 2685 * implies that there's not much else that can be done to 2686 * recover the command. 2687 */ 2688 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) { 2689 mprsas_set_ccbstatus(ccb, CAM_AUTOSENSE_FAIL); 2690 break; 2691 } 2692 2693 /* 2694 * CAM doesn't care about SAS Response Info data, but if this is 2695 * the state check if TLR should be done. If not, clear the 2696 * TLR_bits for the target. 2697 */ 2698 if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) && 2699 ((le32toh(rep->ResponseInfo) & MPI2_SCSI_RI_MASK_REASONCODE) 2700 == MPR_SCSI_RI_INVALID_FRAME)) { 2701 sc->mapping_table[target_id].TLR_bits = 2702 (u8)MPI2_SCSIIO_CONTROL_NO_TLR; 2703 } 2704 2705 /* 2706 * Intentionally override the normal SCSI status reporting 2707 * for these two cases. These are likely to happen in a 2708 * multi-initiator environment, and we want to make sure that 2709 * CAM retries these commands rather than fail them. 2710 */ 2711 if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) || 2712 (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) { 2713 mprsas_set_ccbstatus(ccb, CAM_REQ_ABORTED); 2714 break; 2715 } 2716 2717 /* Handle normal status and sense */ 2718 csio->scsi_status = rep->SCSIStatus; 2719 if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD) 2720 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP); 2721 else 2722 mprsas_set_ccbstatus(ccb, CAM_SCSI_STATUS_ERROR); 2723 2724 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) { 2725 int sense_len, returned_sense_len; 2726 2727 returned_sense_len = min(le32toh(rep->SenseCount), 2728 sizeof(struct scsi_sense_data)); 2729 if (returned_sense_len < csio->sense_len) 2730 csio->sense_resid = csio->sense_len - 2731 returned_sense_len; 2732 else 2733 csio->sense_resid = 0; 2734 2735 sense_len = min(returned_sense_len, 2736 csio->sense_len - csio->sense_resid); 2737 bzero(&csio->sense_data, sizeof(csio->sense_data)); 2738 bcopy(cm->cm_sense, &csio->sense_data, sense_len); 2739 ccb->ccb_h.status |= CAM_AUTOSNS_VALID; 2740 } 2741 2742 /* 2743 * Check if this is an INQUIRY command. If it's a VPD inquiry, 2744 * and it's page code 0 (Supported Page List), and there is 2745 * inquiry data, and this is for a sequential access device, and 2746 * the device is an SSP target, and TLR is supported by the 2747 * controller, turn the TLR_bits value ON if page 0x90 is 2748 * supported. 2749 */ 2750 if ((scsi_cdb[0] == INQUIRY) && 2751 (scsi_cdb[1] & SI_EVPD) && 2752 (scsi_cdb[2] == SVPD_SUPPORTED_PAGE_LIST) && 2753 ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) && 2754 (csio->data_ptr != NULL) && 2755 ((csio->data_ptr[0] & 0x1f) == T_SEQUENTIAL) && 2756 (sc->control_TLR) && 2757 (sc->mapping_table[target_id].device_info & 2758 MPI2_SAS_DEVICE_INFO_SSP_TARGET)) { 2759 vpd_list = (struct scsi_vpd_supported_page_list *) 2760 csio->data_ptr; 2761 TLR_bits = &sc->mapping_table[target_id].TLR_bits; 2762 *TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR; 2763 TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON; 2764 alloc_len = ((u16)scsi_cdb[3] << 8) + scsi_cdb[4]; 2765 alloc_len -= csio->resid; 2766 for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) { 2767 if (vpd_list->list[i] == 0x90) { 2768 *TLR_bits = TLR_on; 2769 break; 2770 } 2771 } 2772 } 2773 2774 /* 2775 * If this is a SATA direct-access end device, mark it so that 2776 * a SCSI StartStopUnit command will be sent to it when the 2777 * driver is being shutdown. 2778 */ 2779 if ((scsi_cdb[0] == INQUIRY) && 2780 (csio->data_ptr != NULL) && 2781 ((csio->data_ptr[0] & 0x1f) == T_DIRECT) && 2782 (sc->mapping_table[target_id].device_info & 2783 MPI2_SAS_DEVICE_INFO_SATA_DEVICE) && 2784 ((sc->mapping_table[target_id].device_info & 2785 MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) == 2786 MPI2_SAS_DEVICE_INFO_END_DEVICE)) { 2787 target = &sassc->targets[target_id]; 2788 target->supports_SSU = TRUE; 2789 mpr_dprint(sc, MPR_XINFO, "Target %d supports SSU\n", 2790 target_id); 2791 } 2792 break; 2793 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE: 2794 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE: 2795 /* 2796 * If devinfo is 0 this will be a volume. In that case don't 2797 * tell CAM that the volume is not there. We want volumes to 2798 * be enumerated until they are deleted/removed, not just 2799 * failed. 2800 */ 2801 if (cm->cm_targ->devinfo == 0) 2802 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP); 2803 else 2804 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); 2805 break; 2806 case MPI2_IOCSTATUS_INVALID_SGL: 2807 mpr_print_scsiio_cmd(sc, cm); 2808 mprsas_set_ccbstatus(ccb, CAM_UNREC_HBA_ERROR); 2809 break; 2810 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED: 2811 /* 2812 * This is one of the responses that comes back when an I/O 2813 * has been aborted. If it is because of a timeout that we 2814 * initiated, just set the status to CAM_CMD_TIMEOUT. 2815 * Otherwise set it to CAM_REQ_ABORTED. The effect on the 2816 * command is the same (it gets retried, subject to the 2817 * retry counter), the only difference is what gets printed 2818 * on the console. 2819 */ 2820 if (cm->cm_state == MPR_CM_STATE_TIMEDOUT) 2821 mprsas_set_ccbstatus(ccb, CAM_CMD_TIMEOUT); 2822 else 2823 mprsas_set_ccbstatus(ccb, CAM_REQ_ABORTED); 2824 break; 2825 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN: 2826 /* resid is ignored for this condition */ 2827 csio->resid = 0; 2828 mprsas_set_ccbstatus(ccb, CAM_DATA_RUN_ERR); 2829 break; 2830 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED: 2831 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED: 2832 /* 2833 * These can sometimes be transient transport-related 2834 * errors, and sometimes persistent drive-related errors. 2835 * We used to retry these without decrementing the retry 2836 * count by returning CAM_REQUEUE_REQ. Unfortunately, if 2837 * we hit a persistent drive problem that returns one of 2838 * these error codes, we would retry indefinitely. So, 2839 * return CAM_REQ_CMP_ERROR so that we decrement the retry 2840 * count and avoid infinite retries. We're taking the 2841 * potential risk of flagging false failures in the event 2842 * of a topology-related error (e.g. a SAS expander problem 2843 * causes a command addressed to a drive to fail), but 2844 * avoiding getting into an infinite retry loop. 2845 */ 2846 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR); 2847 mpr_dprint(sc, MPR_INFO, 2848 "Controller reported %s tgt %u SMID %u loginfo %x\n", 2849 mpr_describe_table(mpr_iocstatus_string, 2850 le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK), 2851 target_id, cm->cm_desc.Default.SMID, 2852 le32toh(rep->IOCLogInfo)); 2853 mpr_dprint(sc, MPR_XINFO, 2854 "SCSIStatus %x SCSIState %x xfercount %u\n", 2855 rep->SCSIStatus, rep->SCSIState, 2856 le32toh(rep->TransferCount)); 2857 break; 2858 case MPI2_IOCSTATUS_INVALID_FUNCTION: 2859 case MPI2_IOCSTATUS_INTERNAL_ERROR: 2860 case MPI2_IOCSTATUS_INVALID_VPID: 2861 case MPI2_IOCSTATUS_INVALID_FIELD: 2862 case MPI2_IOCSTATUS_INVALID_STATE: 2863 case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED: 2864 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR: 2865 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR: 2866 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: 2867 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED: 2868 default: 2869 mprsas_log_command(cm, MPR_XINFO, 2870 "completed ioc %x loginfo %x scsi %x state %x xfer %u\n", 2871 le16toh(rep->IOCStatus), le32toh(rep->IOCLogInfo), 2872 rep->SCSIStatus, rep->SCSIState, 2873 le32toh(rep->TransferCount)); 2874 csio->resid = cm->cm_length; 2875 2876 if (scsi_cdb[0] == UNMAP && 2877 target->is_nvme && 2878 (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) 2879 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP); 2880 else 2881 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR); 2882 2883 break; 2884 } 2885 2886 mpr_sc_failed_io_info(sc, csio, rep, cm->cm_targ); 2887 2888 if (sassc->flags & MPRSAS_QUEUE_FROZEN) { 2889 ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 2890 sassc->flags &= ~MPRSAS_QUEUE_FROZEN; 2891 mpr_dprint(sc, MPR_XINFO, "Command completed, unfreezing SIM " 2892 "queue\n"); 2893 } 2894 2895 if (mprsas_get_ccbstatus(ccb) != CAM_REQ_CMP) { 2896 ccb->ccb_h.status |= CAM_DEV_QFRZN; 2897 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1); 2898 } 2899 2900 mpr_free_command(sc, cm); 2901 xpt_done(ccb); 2902 } 2903 2904 #if __FreeBSD_version >= 900026 2905 static void 2906 mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm) 2907 { 2908 MPI2_SMP_PASSTHROUGH_REPLY *rpl; 2909 MPI2_SMP_PASSTHROUGH_REQUEST *req; 2910 uint64_t sasaddr; 2911 union ccb *ccb; 2912 2913 ccb = cm->cm_complete_data; 2914 2915 /* 2916 * Currently there should be no way we can hit this case. It only 2917 * happens when we have a failure to allocate chain frames, and SMP 2918 * commands require two S/G elements only. That should be handled 2919 * in the standard request size. 2920 */ 2921 if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) { 2922 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x on SMP " 2923 "request!\n", __func__, cm->cm_flags); 2924 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR); 2925 goto bailout; 2926 } 2927 2928 rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply; 2929 if (rpl == NULL) { 2930 mpr_dprint(sc, MPR_ERROR, "%s: NULL cm_reply!\n", __func__); 2931 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR); 2932 goto bailout; 2933 } 2934 2935 req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req; 2936 sasaddr = le32toh(req->SASAddress.Low); 2937 sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32; 2938 2939 if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) != 2940 MPI2_IOCSTATUS_SUCCESS || 2941 rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) { 2942 mpr_dprint(sc, MPR_XINFO, "%s: IOCStatus %04x SASStatus %02x\n", 2943 __func__, le16toh(rpl->IOCStatus), rpl->SASStatus); 2944 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR); 2945 goto bailout; 2946 } 2947 2948 mpr_dprint(sc, MPR_XINFO, "%s: SMP request to SAS address %#jx " 2949 "completed successfully\n", __func__, (uintmax_t)sasaddr); 2950 2951 if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED) 2952 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP); 2953 else 2954 mprsas_set_ccbstatus(ccb, CAM_SMP_STATUS_ERROR); 2955 2956 bailout: 2957 /* 2958 * We sync in both directions because we had DMAs in the S/G list 2959 * in both directions. 2960 */ 2961 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, 2962 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2963 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap); 2964 mpr_free_command(sc, cm); 2965 xpt_done(ccb); 2966 } 2967 2968 static void 2969 mprsas_send_smpcmd(struct mprsas_softc *sassc, union ccb *ccb, uint64_t sasaddr) 2970 { 2971 struct mpr_command *cm; 2972 uint8_t *request, *response; 2973 MPI2_SMP_PASSTHROUGH_REQUEST *req; 2974 struct mpr_softc *sc; 2975 struct sglist *sg; 2976 int error; 2977 2978 sc = sassc->sc; 2979 sg = NULL; 2980 error = 0; 2981 2982 #if (__FreeBSD_version >= 1000028) || \ 2983 ((__FreeBSD_version >= 902001) && (__FreeBSD_version < 1000000)) 2984 switch (ccb->ccb_h.flags & CAM_DATA_MASK) { 2985 case CAM_DATA_PADDR: 2986 case CAM_DATA_SG_PADDR: 2987 /* 2988 * XXX We don't yet support physical addresses here. 2989 */ 2990 mpr_dprint(sc, MPR_ERROR, "%s: physical addresses not " 2991 "supported\n", __func__); 2992 mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID); 2993 xpt_done(ccb); 2994 return; 2995 case CAM_DATA_SG: 2996 /* 2997 * The chip does not support more than one buffer for the 2998 * request or response. 2999 */ 3000 if ((ccb->smpio.smp_request_sglist_cnt > 1) 3001 || (ccb->smpio.smp_response_sglist_cnt > 1)) { 3002 mpr_dprint(sc, MPR_ERROR, "%s: multiple request or " 3003 "response buffer segments not supported for SMP\n", 3004 __func__); 3005 mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID); 3006 xpt_done(ccb); 3007 return; 3008 } 3009 3010 /* 3011 * The CAM_SCATTER_VALID flag was originally implemented 3012 * for the XPT_SCSI_IO CCB, which only has one data pointer. 3013 * We have two. So, just take that flag to mean that we 3014 * might have S/G lists, and look at the S/G segment count 3015 * to figure out whether that is the case for each individual 3016 * buffer. 3017 */ 3018 if (ccb->smpio.smp_request_sglist_cnt != 0) { 3019 bus_dma_segment_t *req_sg; 3020 3021 req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request; 3022 request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr; 3023 } else 3024 request = ccb->smpio.smp_request; 3025 3026 if (ccb->smpio.smp_response_sglist_cnt != 0) { 3027 bus_dma_segment_t *rsp_sg; 3028 3029 rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response; 3030 response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr; 3031 } else 3032 response = ccb->smpio.smp_response; 3033 break; 3034 case CAM_DATA_VADDR: 3035 request = ccb->smpio.smp_request; 3036 response = ccb->smpio.smp_response; 3037 break; 3038 default: 3039 mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID); 3040 xpt_done(ccb); 3041 return; 3042 } 3043 #else /* __FreeBSD_version < 1000028 */ 3044 /* 3045 * XXX We don't yet support physical addresses here. 3046 */ 3047 if (ccb->ccb_h.flags & (CAM_DATA_PHYS|CAM_SG_LIST_PHYS)) { 3048 mpr_dprint(sc, MPR_ERROR, "%s: physical addresses not " 3049 "supported\n", __func__); 3050 mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID); 3051 xpt_done(ccb); 3052 return; 3053 } 3054 3055 /* 3056 * If the user wants to send an S/G list, check to make sure they 3057 * have single buffers. 3058 */ 3059 if (ccb->ccb_h.flags & CAM_SCATTER_VALID) { 3060 /* 3061 * The chip does not support more than one buffer for the 3062 * request or response. 3063 */ 3064 if ((ccb->smpio.smp_request_sglist_cnt > 1) 3065 || (ccb->smpio.smp_response_sglist_cnt > 1)) { 3066 mpr_dprint(sc, MPR_ERROR, "%s: multiple request or " 3067 "response buffer segments not supported for SMP\n", 3068 __func__); 3069 mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID); 3070 xpt_done(ccb); 3071 return; 3072 } 3073 3074 /* 3075 * The CAM_SCATTER_VALID flag was originally implemented 3076 * for the XPT_SCSI_IO CCB, which only has one data pointer. 3077 * We have two. So, just take that flag to mean that we 3078 * might have S/G lists, and look at the S/G segment count 3079 * to figure out whether that is the case for each individual 3080 * buffer. 3081 */ 3082 if (ccb->smpio.smp_request_sglist_cnt != 0) { 3083 bus_dma_segment_t *req_sg; 3084 3085 req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request; 3086 request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr; 3087 } else 3088 request = ccb->smpio.smp_request; 3089 3090 if (ccb->smpio.smp_response_sglist_cnt != 0) { 3091 bus_dma_segment_t *rsp_sg; 3092 3093 rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response; 3094 response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr; 3095 } else 3096 response = ccb->smpio.smp_response; 3097 } else { 3098 request = ccb->smpio.smp_request; 3099 response = ccb->smpio.smp_response; 3100 } 3101 #endif /* __FreeBSD_version < 1000028 */ 3102 3103 cm = mpr_alloc_command(sc); 3104 if (cm == NULL) { 3105 mpr_dprint(sc, MPR_ERROR, "%s: cannot allocate command\n", 3106 __func__); 3107 mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL); 3108 xpt_done(ccb); 3109 return; 3110 } 3111 3112 req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req; 3113 bzero(req, sizeof(*req)); 3114 req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH; 3115 3116 /* Allow the chip to use any route to this SAS address. */ 3117 req->PhysicalPort = 0xff; 3118 3119 req->RequestDataLength = htole16(ccb->smpio.smp_request_len); 3120 req->SGLFlags = 3121 MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI; 3122 3123 mpr_dprint(sc, MPR_XINFO, "%s: sending SMP request to SAS address " 3124 "%#jx\n", __func__, (uintmax_t)sasaddr); 3125 3126 mpr_init_sge(cm, req, &req->SGL); 3127 3128 /* 3129 * Set up a uio to pass into mpr_map_command(). This allows us to 3130 * do one map command, and one busdma call in there. 3131 */ 3132 cm->cm_uio.uio_iov = cm->cm_iovec; 3133 cm->cm_uio.uio_iovcnt = 2; 3134 cm->cm_uio.uio_segflg = UIO_SYSSPACE; 3135 3136 /* 3137 * The read/write flag isn't used by busdma, but set it just in 3138 * case. This isn't exactly accurate, either, since we're going in 3139 * both directions. 3140 */ 3141 cm->cm_uio.uio_rw = UIO_WRITE; 3142 3143 cm->cm_iovec[0].iov_base = request; 3144 cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength); 3145 cm->cm_iovec[1].iov_base = response; 3146 cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len; 3147 3148 cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len + 3149 cm->cm_iovec[1].iov_len; 3150 3151 /* 3152 * Trigger a warning message in mpr_data_cb() for the user if we 3153 * wind up exceeding two S/G segments. The chip expects one 3154 * segment for the request and another for the response. 3155 */ 3156 cm->cm_max_segs = 2; 3157 3158 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; 3159 cm->cm_complete = mprsas_smpio_complete; 3160 cm->cm_complete_data = ccb; 3161 3162 /* 3163 * Tell the mapping code that we're using a uio, and that this is 3164 * an SMP passthrough request. There is a little special-case 3165 * logic there (in mpr_data_cb()) to handle the bidirectional 3166 * transfer. 3167 */ 3168 cm->cm_flags |= MPR_CM_FLAGS_USE_UIO | MPR_CM_FLAGS_SMP_PASS | 3169 MPR_CM_FLAGS_DATAIN | MPR_CM_FLAGS_DATAOUT; 3170 3171 /* The chip data format is little endian. */ 3172 req->SASAddress.High = htole32(sasaddr >> 32); 3173 req->SASAddress.Low = htole32(sasaddr); 3174 3175 /* 3176 * XXX Note that we don't have a timeout/abort mechanism here. 3177 * From the manual, it looks like task management requests only 3178 * work for SCSI IO and SATA passthrough requests. We may need to 3179 * have a mechanism to retry requests in the event of a chip reset 3180 * at least. Hopefully the chip will insure that any errors short 3181 * of that are relayed back to the driver. 3182 */ 3183 error = mpr_map_command(sc, cm); 3184 if ((error != 0) && (error != EINPROGRESS)) { 3185 mpr_dprint(sc, MPR_ERROR, "%s: error %d returned from " 3186 "mpr_map_command()\n", __func__, error); 3187 goto bailout_error; 3188 } 3189 3190 return; 3191 3192 bailout_error: 3193 mpr_free_command(sc, cm); 3194 mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL); 3195 xpt_done(ccb); 3196 return; 3197 } 3198 3199 static void 3200 mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb) 3201 { 3202 struct mpr_softc *sc; 3203 struct mprsas_target *targ; 3204 uint64_t sasaddr = 0; 3205 3206 sc = sassc->sc; 3207 3208 /* 3209 * Make sure the target exists. 3210 */ 3211 KASSERT(ccb->ccb_h.target_id < sassc->maxtargets, 3212 ("Target %d out of bounds in XPT_SMP_IO\n", ccb->ccb_h.target_id)); 3213 targ = &sassc->targets[ccb->ccb_h.target_id]; 3214 if (targ->handle == 0x0) { 3215 mpr_dprint(sc, MPR_ERROR, "%s: target %d does not exist!\n", 3216 __func__, ccb->ccb_h.target_id); 3217 mprsas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT); 3218 xpt_done(ccb); 3219 return; 3220 } 3221 3222 /* 3223 * If this device has an embedded SMP target, we'll talk to it 3224 * directly. 3225 * figure out what the expander's address is. 3226 */ 3227 if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0) 3228 sasaddr = targ->sasaddr; 3229 3230 /* 3231 * If we don't have a SAS address for the expander yet, try 3232 * grabbing it from the page 0x83 information cached in the 3233 * transport layer for this target. LSI expanders report the 3234 * expander SAS address as the port-associated SAS address in 3235 * Inquiry VPD page 0x83. Maxim expanders don't report it in page 3236 * 0x83. 3237 * 3238 * XXX KDM disable this for now, but leave it commented out so that 3239 * it is obvious that this is another possible way to get the SAS 3240 * address. 3241 * 3242 * The parent handle method below is a little more reliable, and 3243 * the other benefit is that it works for devices other than SES 3244 * devices. So you can send a SMP request to a da(4) device and it 3245 * will get routed to the expander that device is attached to. 3246 * (Assuming the da(4) device doesn't contain an SMP target...) 3247 */ 3248 #if 0 3249 if (sasaddr == 0) 3250 sasaddr = xpt_path_sas_addr(ccb->ccb_h.path); 3251 #endif 3252 3253 /* 3254 * If we still don't have a SAS address for the expander, look for 3255 * the parent device of this device, which is probably the expander. 3256 */ 3257 if (sasaddr == 0) { 3258 #ifdef OLD_MPR_PROBE 3259 struct mprsas_target *parent_target; 3260 #endif 3261 3262 if (targ->parent_handle == 0x0) { 3263 mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have " 3264 "a valid parent handle!\n", __func__, targ->handle); 3265 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); 3266 goto bailout; 3267 } 3268 #ifdef OLD_MPR_PROBE 3269 parent_target = mprsas_find_target_by_handle(sassc, 0, 3270 targ->parent_handle); 3271 3272 if (parent_target == NULL) { 3273 mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have " 3274 "a valid parent target!\n", __func__, targ->handle); 3275 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); 3276 goto bailout; 3277 } 3278 3279 if ((parent_target->devinfo & 3280 MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) { 3281 mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d " 3282 "does not have an SMP target!\n", __func__, 3283 targ->handle, parent_target->handle); 3284 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); 3285 goto bailout; 3286 } 3287 3288 sasaddr = parent_target->sasaddr; 3289 #else /* OLD_MPR_PROBE */ 3290 if ((targ->parent_devinfo & 3291 MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) { 3292 mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d " 3293 "does not have an SMP target!\n", __func__, 3294 targ->handle, targ->parent_handle); 3295 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); 3296 goto bailout; 3297 3298 } 3299 if (targ->parent_sasaddr == 0x0) { 3300 mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent handle " 3301 "%d does not have a valid SAS address!\n", __func__, 3302 targ->handle, targ->parent_handle); 3303 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); 3304 goto bailout; 3305 } 3306 3307 sasaddr = targ->parent_sasaddr; 3308 #endif /* OLD_MPR_PROBE */ 3309 3310 } 3311 3312 if (sasaddr == 0) { 3313 mpr_dprint(sc, MPR_INFO, "%s: unable to find SAS address for " 3314 "handle %d\n", __func__, targ->handle); 3315 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); 3316 goto bailout; 3317 } 3318 mprsas_send_smpcmd(sassc, ccb, sasaddr); 3319 3320 return; 3321 3322 bailout: 3323 xpt_done(ccb); 3324 3325 } 3326 #endif //__FreeBSD_version >= 900026 3327 3328 static void 3329 mprsas_action_resetdev(struct mprsas_softc *sassc, union ccb *ccb) 3330 { 3331 MPI2_SCSI_TASK_MANAGE_REQUEST *req; 3332 struct mpr_softc *sc; 3333 struct mpr_command *tm; 3334 struct mprsas_target *targ; 3335 3336 MPR_FUNCTRACE(sassc->sc); 3337 mtx_assert(&sassc->sc->mpr_mtx, MA_OWNED); 3338 3339 KASSERT(ccb->ccb_h.target_id < sassc->maxtargets, ("Target %d out of " 3340 "bounds in XPT_RESET_DEV\n", ccb->ccb_h.target_id)); 3341 sc = sassc->sc; 3342 tm = mprsas_alloc_tm(sc); 3343 if (tm == NULL) { 3344 mpr_dprint(sc, MPR_ERROR, "command alloc failure in " 3345 "mprsas_action_resetdev\n"); 3346 mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL); 3347 xpt_done(ccb); 3348 return; 3349 } 3350 3351 targ = &sassc->targets[ccb->ccb_h.target_id]; 3352 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req; 3353 req->DevHandle = htole16(targ->handle); 3354 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET; 3355 3356 if (!targ->is_nvme || sc->custom_nvme_tm_handling) { 3357 /* SAS Hard Link Reset / SATA Link Reset */ 3358 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET; 3359 } else { 3360 /* PCIe Protocol Level Reset*/ 3361 req->MsgFlags = 3362 MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE; 3363 } 3364 3365 tm->cm_data = NULL; 3366 tm->cm_complete = mprsas_resetdev_complete; 3367 tm->cm_complete_data = ccb; 3368 3369 mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n", 3370 __func__, targ->tid); 3371 tm->cm_targ = targ; 3372 3373 mprsas_prepare_for_tm(sc, tm, targ, CAM_LUN_WILDCARD); 3374 mpr_map_command(sc, tm); 3375 } 3376 3377 static void 3378 mprsas_resetdev_complete(struct mpr_softc *sc, struct mpr_command *tm) 3379 { 3380 MPI2_SCSI_TASK_MANAGE_REPLY *resp; 3381 union ccb *ccb; 3382 3383 MPR_FUNCTRACE(sc); 3384 mtx_assert(&sc->mpr_mtx, MA_OWNED); 3385 3386 resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply; 3387 ccb = tm->cm_complete_data; 3388 3389 /* 3390 * Currently there should be no way we can hit this case. It only 3391 * happens when we have a failure to allocate chain frames, and 3392 * task management commands don't have S/G lists. 3393 */ 3394 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) { 3395 MPI2_SCSI_TASK_MANAGE_REQUEST *req; 3396 3397 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req; 3398 3399 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for reset of " 3400 "handle %#04x! This should not happen!\n", __func__, 3401 tm->cm_flags, req->DevHandle); 3402 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR); 3403 goto bailout; 3404 } 3405 3406 mpr_dprint(sc, MPR_XINFO, "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n", 3407 __func__, le16toh(resp->IOCStatus), le32toh(resp->ResponseCode)); 3408 3409 if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) { 3410 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP); 3411 mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid, 3412 CAM_LUN_WILDCARD); 3413 } 3414 else 3415 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR); 3416 3417 bailout: 3418 3419 mprsas_free_tm(sc, tm); 3420 xpt_done(ccb); 3421 } 3422 3423 static void 3424 mprsas_poll(struct cam_sim *sim) 3425 { 3426 struct mprsas_softc *sassc; 3427 3428 sassc = cam_sim_softc(sim); 3429 3430 if (sassc->sc->mpr_debug & MPR_TRACE) { 3431 /* frequent debug messages during a panic just slow 3432 * everything down too much. 3433 */ 3434 mpr_dprint(sassc->sc, MPR_XINFO, "%s clearing MPR_TRACE\n", 3435 __func__); 3436 sassc->sc->mpr_debug &= ~MPR_TRACE; 3437 } 3438 3439 mpr_intr_locked(sassc->sc); 3440 } 3441 3442 static void 3443 mprsas_async(void *callback_arg, uint32_t code, struct cam_path *path, 3444 void *arg) 3445 { 3446 struct mpr_softc *sc; 3447 3448 sc = (struct mpr_softc *)callback_arg; 3449 3450 switch (code) { 3451 #if (__FreeBSD_version >= 1000006) || \ 3452 ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000)) 3453 case AC_ADVINFO_CHANGED: { 3454 struct mprsas_target *target; 3455 struct mprsas_softc *sassc; 3456 struct scsi_read_capacity_data_long rcap_buf; 3457 struct ccb_dev_advinfo cdai; 3458 struct mprsas_lun *lun; 3459 lun_id_t lunid; 3460 int found_lun; 3461 uintptr_t buftype; 3462 3463 buftype = (uintptr_t)arg; 3464 3465 found_lun = 0; 3466 sassc = sc->sassc; 3467 3468 /* 3469 * We're only interested in read capacity data changes. 3470 */ 3471 if (buftype != CDAI_TYPE_RCAPLONG) 3472 break; 3473 3474 /* 3475 * See the comment in mpr_attach_sas() for a detailed 3476 * explanation. In these versions of FreeBSD we register 3477 * for all events and filter out the events that don't 3478 * apply to us. 3479 */ 3480 #if (__FreeBSD_version < 1000703) || \ 3481 ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002)) 3482 if (xpt_path_path_id(path) != sassc->sim->path_id) 3483 break; 3484 #endif 3485 3486 /* 3487 * We should have a handle for this, but check to make sure. 3488 */ 3489 KASSERT(xpt_path_target_id(path) < sassc->maxtargets, 3490 ("Target %d out of bounds in mprsas_async\n", 3491 xpt_path_target_id(path))); 3492 target = &sassc->targets[xpt_path_target_id(path)]; 3493 if (target->handle == 0) 3494 break; 3495 3496 lunid = xpt_path_lun_id(path); 3497 3498 SLIST_FOREACH(lun, &target->luns, lun_link) { 3499 if (lun->lun_id == lunid) { 3500 found_lun = 1; 3501 break; 3502 } 3503 } 3504 3505 if (found_lun == 0) { 3506 lun = malloc(sizeof(struct mprsas_lun), M_MPR, 3507 M_NOWAIT | M_ZERO); 3508 if (lun == NULL) { 3509 mpr_dprint(sc, MPR_ERROR, "Unable to alloc " 3510 "LUN for EEDP support.\n"); 3511 break; 3512 } 3513 lun->lun_id = lunid; 3514 SLIST_INSERT_HEAD(&target->luns, lun, lun_link); 3515 } 3516 3517 bzero(&rcap_buf, sizeof(rcap_buf)); 3518 xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL); 3519 cdai.ccb_h.func_code = XPT_DEV_ADVINFO; 3520 cdai.ccb_h.flags = CAM_DIR_IN; 3521 cdai.buftype = CDAI_TYPE_RCAPLONG; 3522 #if (__FreeBSD_version >= 1100061) || \ 3523 ((__FreeBSD_version >= 1001510) && (__FreeBSD_version < 1100000)) 3524 cdai.flags = CDAI_FLAG_NONE; 3525 #else 3526 cdai.flags = 0; 3527 #endif 3528 cdai.bufsiz = sizeof(rcap_buf); 3529 cdai.buf = (uint8_t *)&rcap_buf; 3530 xpt_action((union ccb *)&cdai); 3531 if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0) 3532 cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE); 3533 3534 if ((mprsas_get_ccbstatus((union ccb *)&cdai) == CAM_REQ_CMP) 3535 && (rcap_buf.prot & SRC16_PROT_EN)) { 3536 switch (rcap_buf.prot & SRC16_P_TYPE) { 3537 case SRC16_PTYPE_1: 3538 case SRC16_PTYPE_3: 3539 lun->eedp_formatted = TRUE; 3540 lun->eedp_block_size = 3541 scsi_4btoul(rcap_buf.length); 3542 break; 3543 case SRC16_PTYPE_2: 3544 default: 3545 lun->eedp_formatted = FALSE; 3546 lun->eedp_block_size = 0; 3547 break; 3548 } 3549 } else { 3550 lun->eedp_formatted = FALSE; 3551 lun->eedp_block_size = 0; 3552 } 3553 break; 3554 } 3555 #endif 3556 case AC_FOUND_DEVICE: { 3557 struct ccb_getdev *cgd; 3558 3559 /* 3560 * See the comment in mpr_attach_sas() for a detailed 3561 * explanation. In these versions of FreeBSD we register 3562 * for all events and filter out the events that don't 3563 * apply to us. 3564 */ 3565 #if (__FreeBSD_version < 1000703) || \ 3566 ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002)) 3567 if (xpt_path_path_id(path) != sc->sassc->sim->path_id) 3568 break; 3569 #endif 3570 3571 cgd = arg; 3572 #if (__FreeBSD_version < 901503) || \ 3573 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) 3574 mprsas_check_eedp(sc, path, cgd); 3575 #endif 3576 break; 3577 } 3578 default: 3579 break; 3580 } 3581 } 3582 3583 #if (__FreeBSD_version < 901503) || \ 3584 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) 3585 static void 3586 mprsas_check_eedp(struct mpr_softc *sc, struct cam_path *path, 3587 struct ccb_getdev *cgd) 3588 { 3589 struct mprsas_softc *sassc = sc->sassc; 3590 struct ccb_scsiio *csio; 3591 struct scsi_read_capacity_16 *scsi_cmd; 3592 struct scsi_read_capacity_eedp *rcap_buf; 3593 path_id_t pathid; 3594 target_id_t targetid; 3595 lun_id_t lunid; 3596 union ccb *ccb; 3597 struct cam_path *local_path; 3598 struct mprsas_target *target; 3599 struct mprsas_lun *lun; 3600 uint8_t found_lun; 3601 char path_str[64]; 3602 3603 pathid = cam_sim_path(sassc->sim); 3604 targetid = xpt_path_target_id(path); 3605 lunid = xpt_path_lun_id(path); 3606 3607 KASSERT(targetid < sassc->maxtargets, ("Target %d out of bounds in " 3608 "mprsas_check_eedp\n", targetid)); 3609 target = &sassc->targets[targetid]; 3610 if (target->handle == 0x0) 3611 return; 3612 3613 /* 3614 * Determine if the device is EEDP capable. 3615 * 3616 * If this flag is set in the inquiry data, the device supports 3617 * protection information, and must support the 16 byte read capacity 3618 * command, otherwise continue without sending read cap 16. 3619 */ 3620 if ((cgd->inq_data.spc3_flags & SPC3_SID_PROTECT) == 0) 3621 return; 3622 3623 /* 3624 * Issue a READ CAPACITY 16 command. This info is used to determine if 3625 * the LUN is formatted for EEDP support. 3626 */ 3627 ccb = xpt_alloc_ccb_nowait(); 3628 if (ccb == NULL) { 3629 mpr_dprint(sc, MPR_ERROR, "Unable to alloc CCB for EEDP " 3630 "support.\n"); 3631 return; 3632 } 3633 3634 if (xpt_create_path(&local_path, xpt_periph, pathid, targetid, lunid) != 3635 CAM_REQ_CMP) { 3636 mpr_dprint(sc, MPR_ERROR, "Unable to create path for EEDP " 3637 "support.\n"); 3638 xpt_free_ccb(ccb); 3639 return; 3640 } 3641 3642 /* 3643 * If LUN is already in list, don't create a new one. 3644 */ 3645 found_lun = FALSE; 3646 SLIST_FOREACH(lun, &target->luns, lun_link) { 3647 if (lun->lun_id == lunid) { 3648 found_lun = TRUE; 3649 break; 3650 } 3651 } 3652 if (!found_lun) { 3653 lun = malloc(sizeof(struct mprsas_lun), M_MPR, 3654 M_NOWAIT | M_ZERO); 3655 if (lun == NULL) { 3656 mpr_dprint(sc, MPR_ERROR, "Unable to alloc LUN for " 3657 "EEDP support.\n"); 3658 xpt_free_path(local_path); 3659 xpt_free_ccb(ccb); 3660 return; 3661 } 3662 lun->lun_id = lunid; 3663 SLIST_INSERT_HEAD(&target->luns, lun, lun_link); 3664 } 3665 3666 xpt_path_string(local_path, path_str, sizeof(path_str)); 3667 mpr_dprint(sc, MPR_INFO, "Sending read cap: path %s handle %d\n", 3668 path_str, target->handle); 3669 3670 /* 3671 * Issue a READ CAPACITY 16 command for the LUN. The 3672 * mprsas_read_cap_done function will load the read cap info into the 3673 * LUN struct. 3674 */ 3675 rcap_buf = malloc(sizeof(struct scsi_read_capacity_eedp), M_MPR, 3676 M_NOWAIT | M_ZERO); 3677 if (rcap_buf == NULL) { 3678 mpr_dprint(sc, MPR_ERROR, "Unable to alloc read capacity " 3679 "buffer for EEDP support.\n"); 3680 xpt_free_path(ccb->ccb_h.path); 3681 xpt_free_ccb(ccb); 3682 return; 3683 } 3684 xpt_setup_ccb(&ccb->ccb_h, local_path, CAM_PRIORITY_XPT); 3685 csio = &ccb->csio; 3686 csio->ccb_h.func_code = XPT_SCSI_IO; 3687 csio->ccb_h.flags = CAM_DIR_IN; 3688 csio->ccb_h.retry_count = 4; 3689 csio->ccb_h.cbfcnp = mprsas_read_cap_done; 3690 csio->ccb_h.timeout = 60000; 3691 csio->data_ptr = (uint8_t *)rcap_buf; 3692 csio->dxfer_len = sizeof(struct scsi_read_capacity_eedp); 3693 csio->sense_len = MPR_SENSE_LEN; 3694 csio->cdb_len = sizeof(*scsi_cmd); 3695 csio->tag_action = MSG_SIMPLE_Q_TAG; 3696 3697 scsi_cmd = (struct scsi_read_capacity_16 *)&csio->cdb_io.cdb_bytes; 3698 bzero(scsi_cmd, sizeof(*scsi_cmd)); 3699 scsi_cmd->opcode = 0x9E; 3700 scsi_cmd->service_action = SRC16_SERVICE_ACTION; 3701 ((uint8_t *)scsi_cmd)[13] = sizeof(struct scsi_read_capacity_eedp); 3702 3703 ccb->ccb_h.ppriv_ptr1 = sassc; 3704 xpt_action(ccb); 3705 } 3706 3707 static void 3708 mprsas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb) 3709 { 3710 struct mprsas_softc *sassc; 3711 struct mprsas_target *target; 3712 struct mprsas_lun *lun; 3713 struct scsi_read_capacity_eedp *rcap_buf; 3714 3715 if (done_ccb == NULL) 3716 return; 3717 3718 /* Driver need to release devq, it Scsi command is 3719 * generated by driver internally. 3720 * Currently there is a single place where driver 3721 * calls scsi command internally. In future if driver 3722 * calls more scsi command internally, it needs to release 3723 * devq internally, since those command will not go back to 3724 * cam_periph. 3725 */ 3726 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) ) { 3727 done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN; 3728 xpt_release_devq(done_ccb->ccb_h.path, 3729 /*count*/ 1, /*run_queue*/TRUE); 3730 } 3731 3732 rcap_buf = (struct scsi_read_capacity_eedp *)done_ccb->csio.data_ptr; 3733 3734 /* 3735 * Get the LUN ID for the path and look it up in the LUN list for the 3736 * target. 3737 */ 3738 sassc = (struct mprsas_softc *)done_ccb->ccb_h.ppriv_ptr1; 3739 KASSERT(done_ccb->ccb_h.target_id < sassc->maxtargets, ("Target %d out " 3740 "of bounds in mprsas_read_cap_done\n", done_ccb->ccb_h.target_id)); 3741 target = &sassc->targets[done_ccb->ccb_h.target_id]; 3742 SLIST_FOREACH(lun, &target->luns, lun_link) { 3743 if (lun->lun_id != done_ccb->ccb_h.target_lun) 3744 continue; 3745 3746 /* 3747 * Got the LUN in the target's LUN list. Fill it in with EEDP 3748 * info. If the READ CAP 16 command had some SCSI error (common 3749 * if command is not supported), mark the lun as not supporting 3750 * EEDP and set the block size to 0. 3751 */ 3752 if ((mprsas_get_ccbstatus(done_ccb) != CAM_REQ_CMP) || 3753 (done_ccb->csio.scsi_status != SCSI_STATUS_OK)) { 3754 lun->eedp_formatted = FALSE; 3755 lun->eedp_block_size = 0; 3756 break; 3757 } 3758 3759 if (rcap_buf->protect & 0x01) { 3760 mpr_dprint(sassc->sc, MPR_INFO, "LUN %d for target ID " 3761 "%d is formatted for EEDP support.\n", 3762 done_ccb->ccb_h.target_lun, 3763 done_ccb->ccb_h.target_id); 3764 lun->eedp_formatted = TRUE; 3765 lun->eedp_block_size = scsi_4btoul(rcap_buf->length); 3766 } 3767 break; 3768 } 3769 3770 // Finished with this CCB and path. 3771 free(rcap_buf, M_MPR); 3772 xpt_free_path(done_ccb->ccb_h.path); 3773 xpt_free_ccb(done_ccb); 3774 } 3775 #endif /* (__FreeBSD_version < 901503) || \ 3776 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) */ 3777 3778 /* 3779 * Set the INRESET flag for this target so that no I/O will be sent to 3780 * the target until the reset has completed. If an I/O request does 3781 * happen, the devq will be frozen. The CCB holds the path which is 3782 * used to release the devq. The devq is released and the CCB is freed 3783 * when the TM completes. 3784 */ 3785 void 3786 mprsas_prepare_for_tm(struct mpr_softc *sc, struct mpr_command *tm, 3787 struct mprsas_target *target, lun_id_t lun_id) 3788 { 3789 union ccb *ccb; 3790 path_id_t path_id; 3791 3792 ccb = xpt_alloc_ccb_nowait(); 3793 if (ccb) { 3794 path_id = cam_sim_path(sc->sassc->sim); 3795 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, path_id, 3796 target->tid, lun_id) != CAM_REQ_CMP) { 3797 xpt_free_ccb(ccb); 3798 } else { 3799 tm->cm_ccb = ccb; 3800 tm->cm_targ = target; 3801 target->flags |= MPRSAS_TARGET_INRESET; 3802 } 3803 } 3804 } 3805 3806 int 3807 mprsas_startup(struct mpr_softc *sc) 3808 { 3809 /* 3810 * Send the port enable message and set the wait_for_port_enable flag. 3811 * This flag helps to keep the simq frozen until all discovery events 3812 * are processed. 3813 */ 3814 sc->wait_for_port_enable = 1; 3815 mprsas_send_portenable(sc); 3816 return (0); 3817 } 3818 3819 static int 3820 mprsas_send_portenable(struct mpr_softc *sc) 3821 { 3822 MPI2_PORT_ENABLE_REQUEST *request; 3823 struct mpr_command *cm; 3824 3825 MPR_FUNCTRACE(sc); 3826 3827 if ((cm = mpr_alloc_command(sc)) == NULL) 3828 return (EBUSY); 3829 request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req; 3830 request->Function = MPI2_FUNCTION_PORT_ENABLE; 3831 request->MsgFlags = 0; 3832 request->VP_ID = 0; 3833 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; 3834 cm->cm_complete = mprsas_portenable_complete; 3835 cm->cm_data = NULL; 3836 cm->cm_sge = NULL; 3837 3838 mpr_map_command(sc, cm); 3839 mpr_dprint(sc, MPR_XINFO, 3840 "mpr_send_portenable finished cm %p req %p complete %p\n", 3841 cm, cm->cm_req, cm->cm_complete); 3842 return (0); 3843 } 3844 3845 static void 3846 mprsas_portenable_complete(struct mpr_softc *sc, struct mpr_command *cm) 3847 { 3848 MPI2_PORT_ENABLE_REPLY *reply; 3849 struct mprsas_softc *sassc; 3850 3851 MPR_FUNCTRACE(sc); 3852 sassc = sc->sassc; 3853 3854 /* 3855 * Currently there should be no way we can hit this case. It only 3856 * happens when we have a failure to allocate chain frames, and 3857 * port enable commands don't have S/G lists. 3858 */ 3859 if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) { 3860 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for port enable! " 3861 "This should not happen!\n", __func__, cm->cm_flags); 3862 } 3863 3864 reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply; 3865 if (reply == NULL) 3866 mpr_dprint(sc, MPR_FAULT, "Portenable NULL reply\n"); 3867 else if (le16toh(reply->IOCStatus & MPI2_IOCSTATUS_MASK) != 3868 MPI2_IOCSTATUS_SUCCESS) 3869 mpr_dprint(sc, MPR_FAULT, "Portenable failed\n"); 3870 3871 mpr_free_command(sc, cm); 3872 /* 3873 * Done waiting for port enable to complete. Decrement the refcount. 3874 * If refcount is 0, discovery is complete and a rescan of the bus can 3875 * take place. 3876 */ 3877 sc->wait_for_port_enable = 0; 3878 sc->port_enable_complete = 1; 3879 wakeup(&sc->port_enable_complete); 3880 mprsas_startup_decrement(sassc); 3881 } 3882 3883 int 3884 mprsas_check_id(struct mprsas_softc *sassc, int id) 3885 { 3886 struct mpr_softc *sc = sassc->sc; 3887 char *ids; 3888 char *name; 3889 3890 ids = &sc->exclude_ids[0]; 3891 while((name = strsep(&ids, ",")) != NULL) { 3892 if (name[0] == '\0') 3893 continue; 3894 if (strtol(name, NULL, 0) == (long)id) 3895 return (1); 3896 } 3897 3898 return (0); 3899 } 3900 3901 void 3902 mprsas_realloc_targets(struct mpr_softc *sc, int maxtargets) 3903 { 3904 struct mprsas_softc *sassc; 3905 struct mprsas_lun *lun, *lun_tmp; 3906 struct mprsas_target *targ; 3907 int i; 3908 3909 sassc = sc->sassc; 3910 /* 3911 * The number of targets is based on IOC Facts, so free all of 3912 * the allocated LUNs for each target and then the target buffer 3913 * itself. 3914 */ 3915 for (i=0; i< maxtargets; i++) { 3916 targ = &sassc->targets[i]; 3917 SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) { 3918 free(lun, M_MPR); 3919 } 3920 } 3921 free(sassc->targets, M_MPR); 3922 3923 sassc->targets = malloc(sizeof(struct mprsas_target) * maxtargets, 3924 M_MPR, M_WAITOK|M_ZERO); 3925 if (!sassc->targets) { 3926 panic("%s failed to alloc targets with error %d\n", 3927 __func__, ENOMEM); 3928 } 3929 } 3930