1 /*- 2 * Copyright (c) 2009 Yahoo! Inc. 3 * Copyright (c) 2011-2015 LSI Corp. 4 * Copyright (c) 2013-2015 Avago Technologies 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * Avago Technologies (LSI) MPT-Fusion Host Adapter FreeBSD 29 * 30 */ 31 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 /* Communications core for Avago Technologies (LSI) MPT3 */ 36 37 /* TODO Move headers to mprvar */ 38 #include <sys/types.h> 39 #include <sys/param.h> 40 #include <sys/systm.h> 41 #include <sys/kernel.h> 42 #include <sys/selinfo.h> 43 #include <sys/module.h> 44 #include <sys/bus.h> 45 #include <sys/conf.h> 46 #include <sys/bio.h> 47 #include <sys/malloc.h> 48 #include <sys/uio.h> 49 #include <sys/sysctl.h> 50 #include <sys/endian.h> 51 #include <sys/queue.h> 52 #include <sys/kthread.h> 53 #include <sys/taskqueue.h> 54 #include <sys/sbuf.h> 55 56 #include <machine/bus.h> 57 #include <machine/resource.h> 58 #include <sys/rman.h> 59 60 #include <machine/stdarg.h> 61 62 #include <cam/cam.h> 63 #include <cam/cam_ccb.h> 64 #include <cam/cam_debug.h> 65 #include <cam/cam_sim.h> 66 #include <cam/cam_xpt_sim.h> 67 #include <cam/cam_xpt_periph.h> 68 #include <cam/cam_periph.h> 69 #include <cam/scsi/scsi_all.h> 70 #include <cam/scsi/scsi_message.h> 71 #if __FreeBSD_version >= 900026 72 #include <cam/scsi/smp_all.h> 73 #endif 74 75 #include <dev/mpr/mpi/mpi2_type.h> 76 #include <dev/mpr/mpi/mpi2.h> 77 #include <dev/mpr/mpi/mpi2_ioc.h> 78 #include <dev/mpr/mpi/mpi2_sas.h> 79 #include <dev/mpr/mpi/mpi2_cnfg.h> 80 #include <dev/mpr/mpi/mpi2_init.h> 81 #include <dev/mpr/mpi/mpi2_tool.h> 82 #include <dev/mpr/mpr_ioctl.h> 83 #include <dev/mpr/mprvar.h> 84 #include <dev/mpr/mpr_table.h> 85 #include <dev/mpr/mpr_sas.h> 86 87 #define MPRSAS_DISCOVERY_TIMEOUT 20 88 #define MPRSAS_MAX_DISCOVERY_TIMEOUTS 10 /* 200 seconds */ 89 90 /* 91 * static array to check SCSI OpCode for EEDP protection bits 92 */ 93 #define PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP 94 #define PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP 95 #define PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP 96 static uint8_t op_code_prot[256] = { 97 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 98 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 99 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V, 100 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 101 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 102 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 103 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 104 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 105 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V, 106 0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 107 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V, 108 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 109 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 110 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 111 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 112 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 113 }; 114 115 MALLOC_DEFINE(M_MPRSAS, "MPRSAS", "MPR SAS memory"); 116 117 static void mprsas_remove_device(struct mpr_softc *, struct mpr_command *); 118 static void mprsas_remove_complete(struct mpr_softc *, struct mpr_command *); 119 static void mprsas_action(struct cam_sim *sim, union ccb *ccb); 120 static void mprsas_poll(struct cam_sim *sim); 121 static void mprsas_scsiio_timeout(void *data); 122 static void mprsas_abort_complete(struct mpr_softc *sc, 123 struct mpr_command *cm); 124 static void mprsas_action_scsiio(struct mprsas_softc *, union ccb *); 125 static void mprsas_scsiio_complete(struct mpr_softc *, struct mpr_command *); 126 static void mprsas_action_resetdev(struct mprsas_softc *, union ccb *); 127 static void mprsas_resetdev_complete(struct mpr_softc *, 128 struct mpr_command *); 129 static int mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm, 130 struct mpr_command *cm); 131 static void mprsas_async(void *callback_arg, uint32_t code, 132 struct cam_path *path, void *arg); 133 #if (__FreeBSD_version < 901503) || \ 134 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) 135 static void mprsas_check_eedp(struct mpr_softc *sc, struct cam_path *path, 136 struct ccb_getdev *cgd); 137 static void mprsas_read_cap_done(struct cam_periph *periph, 138 union ccb *done_ccb); 139 #endif 140 static int mprsas_send_portenable(struct mpr_softc *sc); 141 static void mprsas_portenable_complete(struct mpr_softc *sc, 142 struct mpr_command *cm); 143 144 #if __FreeBSD_version >= 900026 145 static void mprsas_smpio_complete(struct mpr_softc *sc, 146 struct mpr_command *cm); 147 static void mprsas_send_smpcmd(struct mprsas_softc *sassc, 148 union ccb *ccb, uint64_t sasaddr); 149 static void mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb); 150 #endif //FreeBSD_version >= 900026 151 152 struct mprsas_target * 153 mprsas_find_target_by_handle(struct mprsas_softc *sassc, int start, 154 uint16_t handle) 155 { 156 struct mprsas_target *target; 157 int i; 158 159 for (i = start; i < sassc->maxtargets; i++) { 160 target = &sassc->targets[i]; 161 if (target->handle == handle) 162 return (target); 163 } 164 165 return (NULL); 166 } 167 168 /* we need to freeze the simq during attach and diag reset, to avoid failing 169 * commands before device handles have been found by discovery. Since 170 * discovery involves reading config pages and possibly sending commands, 171 * discovery actions may continue even after we receive the end of discovery 172 * event, so refcount discovery actions instead of assuming we can unfreeze 173 * the simq when we get the event. 174 */ 175 void 176 mprsas_startup_increment(struct mprsas_softc *sassc) 177 { 178 MPR_FUNCTRACE(sassc->sc); 179 180 if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) { 181 if (sassc->startup_refcount++ == 0) { 182 /* just starting, freeze the simq */ 183 mpr_dprint(sassc->sc, MPR_INIT, 184 "%s freezing simq\n", __func__); 185 #if (__FreeBSD_version >= 1000039) || \ 186 ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502)) 187 xpt_hold_boot(); 188 #endif 189 xpt_freeze_simq(sassc->sim, 1); 190 } 191 mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__, 192 sassc->startup_refcount); 193 } 194 } 195 196 void 197 mprsas_release_simq_reinit(struct mprsas_softc *sassc) 198 { 199 if (sassc->flags & MPRSAS_QUEUE_FROZEN) { 200 sassc->flags &= ~MPRSAS_QUEUE_FROZEN; 201 xpt_release_simq(sassc->sim, 1); 202 mpr_dprint(sassc->sc, MPR_INFO, "Unfreezing SIM queue\n"); 203 } 204 } 205 206 void 207 mprsas_startup_decrement(struct mprsas_softc *sassc) 208 { 209 MPR_FUNCTRACE(sassc->sc); 210 211 if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) { 212 if (--sassc->startup_refcount == 0) { 213 /* finished all discovery-related actions, release 214 * the simq and rescan for the latest topology. 215 */ 216 mpr_dprint(sassc->sc, MPR_INIT, 217 "%s releasing simq\n", __func__); 218 sassc->flags &= ~MPRSAS_IN_STARTUP; 219 xpt_release_simq(sassc->sim, 1); 220 #if (__FreeBSD_version >= 1000039) || \ 221 ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502)) 222 xpt_release_boot(); 223 #else 224 mprsas_rescan_target(sassc->sc, NULL); 225 #endif 226 } 227 mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__, 228 sassc->startup_refcount); 229 } 230 } 231 232 /* The firmware requires us to stop sending commands when we're doing task 233 * management, so refcount the TMs and keep the simq frozen when any are in 234 * use. 235 */ 236 struct mpr_command * 237 mprsas_alloc_tm(struct mpr_softc *sc) 238 { 239 struct mpr_command *tm; 240 241 MPR_FUNCTRACE(sc); 242 tm = mpr_alloc_high_priority_command(sc); 243 return tm; 244 } 245 246 void 247 mprsas_free_tm(struct mpr_softc *sc, struct mpr_command *tm) 248 { 249 MPR_FUNCTRACE(sc); 250 if (tm == NULL) 251 return; 252 253 /* 254 * For TM's the devq is frozen for the device. Unfreeze it here and 255 * free the resources used for freezing the devq. Must clear the 256 * INRESET flag as well or scsi I/O will not work. 257 */ 258 if (tm->cm_targ != NULL) { 259 tm->cm_targ->flags &= ~MPRSAS_TARGET_INRESET; 260 } 261 if (tm->cm_ccb) { 262 mpr_dprint(sc, MPR_INFO, "Unfreezing devq for target ID %d\n", 263 tm->cm_targ->tid); 264 xpt_release_devq(tm->cm_ccb->ccb_h.path, 1, TRUE); 265 xpt_free_path(tm->cm_ccb->ccb_h.path); 266 xpt_free_ccb(tm->cm_ccb); 267 } 268 269 mpr_free_high_priority_command(sc, tm); 270 } 271 272 void 273 mprsas_rescan_target(struct mpr_softc *sc, struct mprsas_target *targ) 274 { 275 struct mprsas_softc *sassc = sc->sassc; 276 path_id_t pathid; 277 target_id_t targetid; 278 union ccb *ccb; 279 280 MPR_FUNCTRACE(sc); 281 pathid = cam_sim_path(sassc->sim); 282 if (targ == NULL) 283 targetid = CAM_TARGET_WILDCARD; 284 else 285 targetid = targ - sassc->targets; 286 287 /* 288 * Allocate a CCB and schedule a rescan. 289 */ 290 ccb = xpt_alloc_ccb_nowait(); 291 if (ccb == NULL) { 292 mpr_dprint(sc, MPR_ERROR, "unable to alloc CCB for rescan\n"); 293 return; 294 } 295 296 if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid, targetid, 297 CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 298 mpr_dprint(sc, MPR_ERROR, "unable to create path for rescan\n"); 299 xpt_free_ccb(ccb); 300 return; 301 } 302 303 if (targetid == CAM_TARGET_WILDCARD) 304 ccb->ccb_h.func_code = XPT_SCAN_BUS; 305 else 306 ccb->ccb_h.func_code = XPT_SCAN_TGT; 307 308 mpr_dprint(sc, MPR_TRACE, "%s targetid %u\n", __func__, targetid); 309 xpt_rescan(ccb); 310 } 311 312 static void 313 mprsas_log_command(struct mpr_command *cm, u_int level, const char *fmt, ...) 314 { 315 struct sbuf sb; 316 va_list ap; 317 char str[192]; 318 char path_str[64]; 319 320 if (cm == NULL) 321 return; 322 323 /* No need to be in here if debugging isn't enabled */ 324 if ((cm->cm_sc->mpr_debug & level) == 0) 325 return; 326 327 sbuf_new(&sb, str, sizeof(str), 0); 328 329 va_start(ap, fmt); 330 331 if (cm->cm_ccb != NULL) { 332 xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str, 333 sizeof(path_str)); 334 sbuf_cat(&sb, path_str); 335 if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) { 336 scsi_command_string(&cm->cm_ccb->csio, &sb); 337 sbuf_printf(&sb, "length %d ", 338 cm->cm_ccb->csio.dxfer_len); 339 } 340 } else { 341 sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ", 342 cam_sim_name(cm->cm_sc->sassc->sim), 343 cam_sim_unit(cm->cm_sc->sassc->sim), 344 cam_sim_bus(cm->cm_sc->sassc->sim), 345 cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF, 346 cm->cm_lun); 347 } 348 349 sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID); 350 sbuf_vprintf(&sb, fmt, ap); 351 sbuf_finish(&sb); 352 mpr_dprint_field(cm->cm_sc, level, "%s", sbuf_data(&sb)); 353 354 va_end(ap); 355 } 356 357 static void 358 mprsas_remove_volume(struct mpr_softc *sc, struct mpr_command *tm) 359 { 360 MPI2_SCSI_TASK_MANAGE_REPLY *reply; 361 struct mprsas_target *targ; 362 uint16_t handle; 363 364 MPR_FUNCTRACE(sc); 365 366 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply; 367 handle = (uint16_t)(uintptr_t)tm->cm_complete_data; 368 targ = tm->cm_targ; 369 370 if (reply == NULL) { 371 /* XXX retry the remove after the diag reset completes? */ 372 mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device " 373 "0x%04x\n", __func__, handle); 374 mprsas_free_tm(sc, tm); 375 return; 376 } 377 378 if (reply->IOCStatus != MPI2_IOCSTATUS_SUCCESS) { 379 mpr_dprint(sc, MPR_FAULT, "IOCStatus = 0x%x while resetting " 380 "device 0x%x\n", reply->IOCStatus, handle); 381 mprsas_free_tm(sc, tm); 382 return; 383 } 384 385 mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n", 386 reply->TerminationCount); 387 mpr_free_reply(sc, tm->cm_reply_data); 388 tm->cm_reply = NULL; /* Ensures the reply won't get re-freed */ 389 390 mpr_dprint(sc, MPR_XINFO, "clearing target %u handle 0x%04x\n", 391 targ->tid, handle); 392 393 /* 394 * Don't clear target if remove fails because things will get confusing. 395 * Leave the devname and sasaddr intact so that we know to avoid reusing 396 * this target id if possible, and so we can assign the same target id 397 * to this device if it comes back in the future. 398 */ 399 if (reply->IOCStatus == MPI2_IOCSTATUS_SUCCESS) { 400 targ = tm->cm_targ; 401 targ->handle = 0x0; 402 targ->encl_handle = 0x0; 403 targ->encl_level_valid = 0x0; 404 targ->encl_level = 0x0; 405 targ->connector_name[0] = ' '; 406 targ->connector_name[1] = ' '; 407 targ->connector_name[2] = ' '; 408 targ->connector_name[3] = ' '; 409 targ->encl_slot = 0x0; 410 targ->exp_dev_handle = 0x0; 411 targ->phy_num = 0x0; 412 targ->linkrate = 0x0; 413 targ->devinfo = 0x0; 414 targ->flags = 0x0; 415 targ->scsi_req_desc_type = 0; 416 } 417 418 mprsas_free_tm(sc, tm); 419 } 420 421 422 /* 423 * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal. 424 * Otherwise Volume Delete is same as Bare Drive Removal. 425 */ 426 void 427 mprsas_prepare_volume_remove(struct mprsas_softc *sassc, uint16_t handle) 428 { 429 MPI2_SCSI_TASK_MANAGE_REQUEST *req; 430 struct mpr_softc *sc; 431 struct mpr_command *cm; 432 struct mprsas_target *targ = NULL; 433 434 MPR_FUNCTRACE(sassc->sc); 435 sc = sassc->sc; 436 437 targ = mprsas_find_target_by_handle(sassc, 0, handle); 438 if (targ == NULL) { 439 /* FIXME: what is the action? */ 440 /* We don't know about this device? */ 441 mpr_dprint(sc, MPR_ERROR, 442 "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle); 443 return; 444 } 445 446 targ->flags |= MPRSAS_TARGET_INREMOVAL; 447 448 cm = mprsas_alloc_tm(sc); 449 if (cm == NULL) { 450 mpr_dprint(sc, MPR_ERROR, 451 "%s: command alloc failure\n", __func__); 452 return; 453 } 454 455 mprsas_rescan_target(sc, targ); 456 457 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req; 458 req->DevHandle = targ->handle; 459 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; 460 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET; 461 462 /* SAS Hard Link Reset / SATA Link Reset */ 463 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET; 464 465 cm->cm_targ = targ; 466 cm->cm_data = NULL; 467 cm->cm_desc.HighPriority.RequestFlags = 468 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY; 469 cm->cm_complete = mprsas_remove_volume; 470 cm->cm_complete_data = (void *)(uintptr_t)handle; 471 472 mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n", 473 __func__, targ->tid); 474 mprsas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD); 475 476 mpr_map_command(sc, cm); 477 } 478 479 /* 480 * The MPT3 firmware performs debounce on the link to avoid transient link 481 * errors and false removals. When it does decide that link has been lost 482 * and a device needs to go away, it expects that the host will perform a 483 * target reset and then an op remove. The reset has the side-effect of 484 * aborting any outstanding requests for the device, which is required for 485 * the op-remove to succeed. It's not clear if the host should check for 486 * the device coming back alive after the reset. 487 */ 488 void 489 mprsas_prepare_remove(struct mprsas_softc *sassc, uint16_t handle) 490 { 491 MPI2_SCSI_TASK_MANAGE_REQUEST *req; 492 struct mpr_softc *sc; 493 struct mpr_command *cm; 494 struct mprsas_target *targ = NULL; 495 496 MPR_FUNCTRACE(sassc->sc); 497 498 sc = sassc->sc; 499 500 targ = mprsas_find_target_by_handle(sassc, 0, handle); 501 if (targ == NULL) { 502 /* FIXME: what is the action? */ 503 /* We don't know about this device? */ 504 mpr_dprint(sc, MPR_ERROR, "%s : invalid handle 0x%x \n", 505 __func__, handle); 506 return; 507 } 508 509 targ->flags |= MPRSAS_TARGET_INREMOVAL; 510 511 cm = mprsas_alloc_tm(sc); 512 if (cm == NULL) { 513 mpr_dprint(sc, MPR_ERROR, "%s: command alloc failure\n", 514 __func__); 515 return; 516 } 517 518 mprsas_rescan_target(sc, targ); 519 520 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req; 521 memset(req, 0, sizeof(*req)); 522 req->DevHandle = htole16(targ->handle); 523 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; 524 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET; 525 526 /* SAS Hard Link Reset / SATA Link Reset */ 527 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET; 528 529 cm->cm_targ = targ; 530 cm->cm_data = NULL; 531 cm->cm_desc.HighPriority.RequestFlags = 532 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY; 533 cm->cm_complete = mprsas_remove_device; 534 cm->cm_complete_data = (void *)(uintptr_t)handle; 535 536 mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n", 537 __func__, targ->tid); 538 mprsas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD); 539 540 mpr_map_command(sc, cm); 541 } 542 543 static void 544 mprsas_remove_device(struct mpr_softc *sc, struct mpr_command *tm) 545 { 546 MPI2_SCSI_TASK_MANAGE_REPLY *reply; 547 MPI2_SAS_IOUNIT_CONTROL_REQUEST *req; 548 struct mprsas_target *targ; 549 struct mpr_command *next_cm; 550 uint16_t handle; 551 552 MPR_FUNCTRACE(sc); 553 554 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply; 555 handle = (uint16_t)(uintptr_t)tm->cm_complete_data; 556 targ = tm->cm_targ; 557 558 /* 559 * Currently there should be no way we can hit this case. It only 560 * happens when we have a failure to allocate chain frames, and 561 * task management commands don't have S/G lists. 562 */ 563 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) { 564 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for remove of " 565 "handle %#04x! This should not happen!\n", __func__, 566 tm->cm_flags, handle); 567 mprsas_free_tm(sc, tm); 568 return; 569 } 570 571 if (reply == NULL) { 572 /* XXX retry the remove after the diag reset completes? */ 573 mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device " 574 "0x%04x\n", __func__, handle); 575 mprsas_free_tm(sc, tm); 576 return; 577 } 578 579 if (le16toh(reply->IOCStatus) != MPI2_IOCSTATUS_SUCCESS) { 580 mpr_dprint(sc, MPR_FAULT, "IOCStatus = 0x%x while resetting " 581 "device 0x%x\n", le16toh(reply->IOCStatus), handle); 582 mprsas_free_tm(sc, tm); 583 return; 584 } 585 586 mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n", 587 le32toh(reply->TerminationCount)); 588 mpr_free_reply(sc, tm->cm_reply_data); 589 tm->cm_reply = NULL; /* Ensures the reply won't get re-freed */ 590 591 /* Reuse the existing command */ 592 req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req; 593 memset(req, 0, sizeof(*req)); 594 req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL; 595 req->Operation = MPI2_SAS_OP_REMOVE_DEVICE; 596 req->DevHandle = htole16(handle); 597 tm->cm_data = NULL; 598 tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; 599 tm->cm_complete = mprsas_remove_complete; 600 tm->cm_complete_data = (void *)(uintptr_t)handle; 601 602 mpr_map_command(sc, tm); 603 604 mpr_dprint(sc, MPR_INFO, "clearing target %u handle 0x%04x\n", 605 targ->tid, handle); 606 if (targ->encl_level_valid) { 607 mpr_dprint(sc, MPR_INFO, "At enclosure level %d, slot %d, " 608 "connector name (%4s)\n", targ->encl_level, targ->encl_slot, 609 targ->connector_name); 610 } 611 TAILQ_FOREACH_SAFE(tm, &targ->commands, cm_link, next_cm) { 612 union ccb *ccb; 613 614 mpr_dprint(sc, MPR_XINFO, "Completing missed command %p\n", tm); 615 ccb = tm->cm_complete_data; 616 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); 617 mprsas_scsiio_complete(sc, tm); 618 } 619 } 620 621 static void 622 mprsas_remove_complete(struct mpr_softc *sc, struct mpr_command *tm) 623 { 624 MPI2_SAS_IOUNIT_CONTROL_REPLY *reply; 625 uint16_t handle; 626 struct mprsas_target *targ; 627 struct mprsas_lun *lun; 628 629 MPR_FUNCTRACE(sc); 630 631 reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply; 632 handle = (uint16_t)(uintptr_t)tm->cm_complete_data; 633 634 /* 635 * Currently there should be no way we can hit this case. It only 636 * happens when we have a failure to allocate chain frames, and 637 * task management commands don't have S/G lists. 638 */ 639 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) { 640 mpr_dprint(sc, MPR_XINFO, "%s: cm_flags = %#x for remove of " 641 "handle %#04x! This should not happen!\n", __func__, 642 tm->cm_flags, handle); 643 mprsas_free_tm(sc, tm); 644 return; 645 } 646 647 if (reply == NULL) { 648 /* most likely a chip reset */ 649 mpr_dprint(sc, MPR_FAULT, "%s NULL reply removing device " 650 "0x%04x\n", __func__, handle); 651 mprsas_free_tm(sc, tm); 652 return; 653 } 654 655 mpr_dprint(sc, MPR_XINFO, "%s on handle 0x%04x, IOCStatus= 0x%x\n", 656 __func__, handle, le16toh(reply->IOCStatus)); 657 658 /* 659 * Don't clear target if remove fails because things will get confusing. 660 * Leave the devname and sasaddr intact so that we know to avoid reusing 661 * this target id if possible, and so we can assign the same target id 662 * to this device if it comes back in the future. 663 */ 664 if (le16toh(reply->IOCStatus) == MPI2_IOCSTATUS_SUCCESS) { 665 targ = tm->cm_targ; 666 targ->handle = 0x0; 667 targ->encl_handle = 0x0; 668 targ->encl_level_valid = 0x0; 669 targ->encl_level = 0x0; 670 targ->connector_name[0] = ' '; 671 targ->connector_name[1] = ' '; 672 targ->connector_name[2] = ' '; 673 targ->connector_name[3] = ' '; 674 targ->encl_slot = 0x0; 675 targ->exp_dev_handle = 0x0; 676 targ->phy_num = 0x0; 677 targ->linkrate = 0x0; 678 targ->devinfo = 0x0; 679 targ->flags = 0x0; 680 targ->scsi_req_desc_type = 0; 681 682 while (!SLIST_EMPTY(&targ->luns)) { 683 lun = SLIST_FIRST(&targ->luns); 684 SLIST_REMOVE_HEAD(&targ->luns, lun_link); 685 free(lun, M_MPR); 686 } 687 } 688 689 mprsas_free_tm(sc, tm); 690 } 691 692 static int 693 mprsas_register_events(struct mpr_softc *sc) 694 { 695 uint8_t events[16]; 696 697 bzero(events, 16); 698 setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE); 699 setbit(events, MPI2_EVENT_SAS_DISCOVERY); 700 setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE); 701 setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE); 702 setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW); 703 setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST); 704 setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE); 705 setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST); 706 setbit(events, MPI2_EVENT_IR_VOLUME); 707 setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK); 708 setbit(events, MPI2_EVENT_IR_OPERATION_STATUS); 709 setbit(events, MPI2_EVENT_TEMP_THRESHOLD); 710 711 mpr_register_events(sc, events, mprsas_evt_handler, NULL, 712 &sc->sassc->mprsas_eh); 713 714 return (0); 715 } 716 717 int 718 mpr_attach_sas(struct mpr_softc *sc) 719 { 720 struct mprsas_softc *sassc; 721 cam_status status; 722 int unit, error = 0; 723 724 MPR_FUNCTRACE(sc); 725 726 sassc = malloc(sizeof(struct mprsas_softc), M_MPR, M_WAITOK|M_ZERO); 727 if (!sassc) { 728 device_printf(sc->mpr_dev, "Cannot allocate memory %s %d\n", 729 __func__, __LINE__); 730 return (ENOMEM); 731 } 732 733 /* 734 * XXX MaxTargets could change during a reinit. Since we don't 735 * resize the targets[] array during such an event, cache the value 736 * of MaxTargets here so that we don't get into trouble later. This 737 * should move into the reinit logic. 738 */ 739 sassc->maxtargets = sc->facts->MaxTargets; 740 sassc->targets = malloc(sizeof(struct mprsas_target) * 741 sassc->maxtargets, M_MPR, M_WAITOK|M_ZERO); 742 if (!sassc->targets) { 743 device_printf(sc->mpr_dev, "Cannot allocate memory %s %d\n", 744 __func__, __LINE__); 745 free(sassc, M_MPR); 746 return (ENOMEM); 747 } 748 sc->sassc = sassc; 749 sassc->sc = sc; 750 751 if ((sassc->devq = cam_simq_alloc(sc->num_reqs)) == NULL) { 752 mpr_dprint(sc, MPR_ERROR, "Cannot allocate SIMQ\n"); 753 error = ENOMEM; 754 goto out; 755 } 756 757 unit = device_get_unit(sc->mpr_dev); 758 sassc->sim = cam_sim_alloc(mprsas_action, mprsas_poll, "mpr", sassc, 759 unit, &sc->mpr_mtx, sc->num_reqs, sc->num_reqs, sassc->devq); 760 if (sassc->sim == NULL) { 761 mpr_dprint(sc, MPR_ERROR, "Cannot allocate SIM\n"); 762 error = EINVAL; 763 goto out; 764 } 765 766 TAILQ_INIT(&sassc->ev_queue); 767 768 /* Initialize taskqueue for Event Handling */ 769 TASK_INIT(&sassc->ev_task, 0, mprsas_firmware_event_work, sc); 770 sassc->ev_tq = taskqueue_create("mpr_taskq", M_NOWAIT | M_ZERO, 771 taskqueue_thread_enqueue, &sassc->ev_tq); 772 taskqueue_start_threads(&sassc->ev_tq, 1, PRIBIO, "%s taskq", 773 device_get_nameunit(sc->mpr_dev)); 774 775 mpr_lock(sc); 776 777 /* 778 * XXX There should be a bus for every port on the adapter, but since 779 * we're just going to fake the topology for now, we'll pretend that 780 * everything is just a target on a single bus. 781 */ 782 if ((error = xpt_bus_register(sassc->sim, sc->mpr_dev, 0)) != 0) { 783 mpr_dprint(sc, MPR_ERROR, "Error %d registering SCSI bus\n", 784 error); 785 mpr_unlock(sc); 786 goto out; 787 } 788 789 /* 790 * Assume that discovery events will start right away. 791 * 792 * Hold off boot until discovery is complete. 793 */ 794 sassc->flags |= MPRSAS_IN_STARTUP | MPRSAS_IN_DISCOVERY; 795 sc->sassc->startup_refcount = 0; 796 mprsas_startup_increment(sassc); 797 798 callout_init(&sassc->discovery_callout, 1 /*mpsafe*/); 799 800 /* 801 * Register for async events so we can determine the EEDP 802 * capabilities of devices. 803 */ 804 status = xpt_create_path(&sassc->path, /*periph*/NULL, 805 cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD, 806 CAM_LUN_WILDCARD); 807 if (status != CAM_REQ_CMP) { 808 mpr_printf(sc, "Error %#x creating sim path\n", status); 809 sassc->path = NULL; 810 } else { 811 int event; 812 813 #if (__FreeBSD_version >= 1000006) || \ 814 ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000)) 815 event = AC_ADVINFO_CHANGED | AC_FOUND_DEVICE; 816 #else 817 event = AC_FOUND_DEVICE; 818 #endif 819 820 /* 821 * Prior to the CAM locking improvements, we can't call 822 * xpt_register_async() with a particular path specified. 823 * 824 * If a path isn't specified, xpt_register_async() will 825 * generate a wildcard path and acquire the XPT lock while 826 * it calls xpt_action() to execute the XPT_SASYNC_CB CCB. 827 * It will then drop the XPT lock once that is done. 828 * 829 * If a path is specified for xpt_register_async(), it will 830 * not acquire and drop the XPT lock around the call to 831 * xpt_action(). xpt_action() asserts that the caller 832 * holds the SIM lock, so the SIM lock has to be held when 833 * calling xpt_register_async() when the path is specified. 834 * 835 * But xpt_register_async calls xpt_for_all_devices(), 836 * which calls xptbustraverse(), which will acquire each 837 * SIM lock. When it traverses our particular bus, it will 838 * necessarily acquire the SIM lock, which will lead to a 839 * recursive lock acquisition. 840 * 841 * The CAM locking changes fix this problem by acquiring 842 * the XPT topology lock around bus traversal in 843 * xptbustraverse(), so the caller can hold the SIM lock 844 * and it does not cause a recursive lock acquisition. 845 * 846 * These __FreeBSD_version values are approximate, especially 847 * for stable/10, which is two months later than the actual 848 * change. 849 */ 850 851 #if (__FreeBSD_version < 1000703) || \ 852 ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002)) 853 mpr_unlock(sc); 854 status = xpt_register_async(event, mprsas_async, sc, 855 NULL); 856 mpr_lock(sc); 857 #else 858 status = xpt_register_async(event, mprsas_async, sc, 859 sassc->path); 860 #endif 861 862 if (status != CAM_REQ_CMP) { 863 mpr_dprint(sc, MPR_ERROR, 864 "Error %#x registering async handler for " 865 "AC_ADVINFO_CHANGED events\n", status); 866 xpt_free_path(sassc->path); 867 sassc->path = NULL; 868 } 869 } 870 if (status != CAM_REQ_CMP) { 871 /* 872 * EEDP use is the exception, not the rule. 873 * Warn the user, but do not fail to attach. 874 */ 875 mpr_printf(sc, "EEDP capabilities disabled.\n"); 876 } 877 878 mpr_unlock(sc); 879 880 mprsas_register_events(sc); 881 out: 882 if (error) 883 mpr_detach_sas(sc); 884 return (error); 885 } 886 887 int 888 mpr_detach_sas(struct mpr_softc *sc) 889 { 890 struct mprsas_softc *sassc; 891 struct mprsas_lun *lun, *lun_tmp; 892 struct mprsas_target *targ; 893 int i; 894 895 MPR_FUNCTRACE(sc); 896 897 if (sc->sassc == NULL) 898 return (0); 899 900 sassc = sc->sassc; 901 mpr_deregister_events(sc, sassc->mprsas_eh); 902 903 /* 904 * Drain and free the event handling taskqueue with the lock 905 * unheld so that any parallel processing tasks drain properly 906 * without deadlocking. 907 */ 908 if (sassc->ev_tq != NULL) 909 taskqueue_free(sassc->ev_tq); 910 911 /* Make sure CAM doesn't wedge if we had to bail out early. */ 912 mpr_lock(sc); 913 914 /* Deregister our async handler */ 915 if (sassc->path != NULL) { 916 xpt_register_async(0, mprsas_async, sc, sassc->path); 917 xpt_free_path(sassc->path); 918 sassc->path = NULL; 919 } 920 921 if (sassc->flags & MPRSAS_IN_STARTUP) 922 xpt_release_simq(sassc->sim, 1); 923 924 if (sassc->sim != NULL) { 925 xpt_bus_deregister(cam_sim_path(sassc->sim)); 926 cam_sim_free(sassc->sim, FALSE); 927 } 928 929 sassc->flags |= MPRSAS_SHUTDOWN; 930 mpr_unlock(sc); 931 932 if (sassc->devq != NULL) 933 cam_simq_free(sassc->devq); 934 935 for (i = 0; i < sassc->maxtargets; i++) { 936 targ = &sassc->targets[i]; 937 SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) { 938 free(lun, M_MPR); 939 } 940 } 941 free(sassc->targets, M_MPR); 942 free(sassc, M_MPR); 943 sc->sassc = NULL; 944 945 return (0); 946 } 947 948 void 949 mprsas_discovery_end(struct mprsas_softc *sassc) 950 { 951 struct mpr_softc *sc = sassc->sc; 952 953 MPR_FUNCTRACE(sc); 954 955 if (sassc->flags & MPRSAS_DISCOVERY_TIMEOUT_PENDING) 956 callout_stop(&sassc->discovery_callout); 957 958 } 959 960 static void 961 mprsas_action(struct cam_sim *sim, union ccb *ccb) 962 { 963 struct mprsas_softc *sassc; 964 965 sassc = cam_sim_softc(sim); 966 967 MPR_FUNCTRACE(sassc->sc); 968 mpr_dprint(sassc->sc, MPR_TRACE, "ccb func_code 0x%x\n", 969 ccb->ccb_h.func_code); 970 mtx_assert(&sassc->sc->mpr_mtx, MA_OWNED); 971 972 switch (ccb->ccb_h.func_code) { 973 case XPT_PATH_INQ: 974 { 975 struct ccb_pathinq *cpi = &ccb->cpi; 976 977 cpi->version_num = 1; 978 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16; 979 cpi->target_sprt = 0; 980 #if (__FreeBSD_version >= 1000039) || \ 981 ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502)) 982 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN; 983 #else 984 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED; 985 #endif 986 cpi->hba_eng_cnt = 0; 987 cpi->max_target = sassc->maxtargets - 1; 988 cpi->max_lun = 255; 989 cpi->initiator_id = sassc->maxtargets - 1; 990 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 991 strncpy(cpi->hba_vid, "Avago Tech (LSI)", HBA_IDLEN); 992 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 993 cpi->unit_number = cam_sim_unit(sim); 994 cpi->bus_id = cam_sim_bus(sim); 995 /* 996 * XXXSLM-I think this needs to change based on config page or 997 * something instead of hardcoded to 150000. 998 */ 999 cpi->base_transfer_speed = 150000; 1000 cpi->transport = XPORT_SAS; 1001 cpi->transport_version = 0; 1002 cpi->protocol = PROTO_SCSI; 1003 cpi->protocol_version = SCSI_REV_SPC; 1004 #if __FreeBSD_version >= 800001 1005 /* 1006 * XXXSLM-probably need to base this number on max SGL's and 1007 * page size. 1008 */ 1009 cpi->maxio = 256 * 1024; 1010 #endif 1011 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP); 1012 break; 1013 } 1014 case XPT_GET_TRAN_SETTINGS: 1015 { 1016 struct ccb_trans_settings *cts; 1017 struct ccb_trans_settings_sas *sas; 1018 struct ccb_trans_settings_scsi *scsi; 1019 struct mprsas_target *targ; 1020 1021 cts = &ccb->cts; 1022 sas = &cts->xport_specific.sas; 1023 scsi = &cts->proto_specific.scsi; 1024 1025 KASSERT(cts->ccb_h.target_id < sassc->maxtargets, 1026 ("Target %d out of bounds in XPT_GET_TRAN_SETTINGS\n", 1027 cts->ccb_h.target_id)); 1028 targ = &sassc->targets[cts->ccb_h.target_id]; 1029 if (targ->handle == 0x0) { 1030 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); 1031 break; 1032 } 1033 1034 cts->protocol_version = SCSI_REV_SPC2; 1035 cts->transport = XPORT_SAS; 1036 cts->transport_version = 0; 1037 1038 sas->valid = CTS_SAS_VALID_SPEED; 1039 switch (targ->linkrate) { 1040 case 0x08: 1041 sas->bitrate = 150000; 1042 break; 1043 case 0x09: 1044 sas->bitrate = 300000; 1045 break; 1046 case 0x0a: 1047 sas->bitrate = 600000; 1048 break; 1049 case 0x0b: 1050 sas->bitrate = 1200000; 1051 break; 1052 default: 1053 sas->valid = 0; 1054 } 1055 1056 cts->protocol = PROTO_SCSI; 1057 scsi->valid = CTS_SCSI_VALID_TQ; 1058 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; 1059 1060 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP); 1061 break; 1062 } 1063 case XPT_CALC_GEOMETRY: 1064 cam_calc_geometry(&ccb->ccg, /*extended*/1); 1065 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP); 1066 break; 1067 case XPT_RESET_DEV: 1068 mpr_dprint(sassc->sc, MPR_XINFO, 1069 "mprsas_action XPT_RESET_DEV\n"); 1070 mprsas_action_resetdev(sassc, ccb); 1071 return; 1072 case XPT_RESET_BUS: 1073 case XPT_ABORT: 1074 case XPT_TERM_IO: 1075 mpr_dprint(sassc->sc, MPR_XINFO, 1076 "mprsas_action faking success for abort or reset\n"); 1077 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP); 1078 break; 1079 case XPT_SCSI_IO: 1080 mprsas_action_scsiio(sassc, ccb); 1081 return; 1082 #if __FreeBSD_version >= 900026 1083 case XPT_SMP_IO: 1084 mprsas_action_smpio(sassc, ccb); 1085 return; 1086 #endif 1087 default: 1088 mprsas_set_ccbstatus(ccb, CAM_FUNC_NOTAVAIL); 1089 break; 1090 } 1091 xpt_done(ccb); 1092 1093 } 1094 1095 static void 1096 mprsas_announce_reset(struct mpr_softc *sc, uint32_t ac_code, 1097 target_id_t target_id, lun_id_t lun_id) 1098 { 1099 path_id_t path_id = cam_sim_path(sc->sassc->sim); 1100 struct cam_path *path; 1101 1102 mpr_dprint(sc, MPR_XINFO, "%s code %x target %d lun %jx\n", __func__, 1103 ac_code, target_id, (uintmax_t)lun_id); 1104 1105 if (xpt_create_path(&path, NULL, 1106 path_id, target_id, lun_id) != CAM_REQ_CMP) { 1107 mpr_dprint(sc, MPR_ERROR, "unable to create path for reset " 1108 "notification\n"); 1109 return; 1110 } 1111 1112 xpt_async(ac_code, path, NULL); 1113 xpt_free_path(path); 1114 } 1115 1116 static void 1117 mprsas_complete_all_commands(struct mpr_softc *sc) 1118 { 1119 struct mpr_command *cm; 1120 int i; 1121 int completed; 1122 1123 MPR_FUNCTRACE(sc); 1124 mtx_assert(&sc->mpr_mtx, MA_OWNED); 1125 1126 /* complete all commands with a NULL reply */ 1127 for (i = 1; i < sc->num_reqs; i++) { 1128 cm = &sc->commands[i]; 1129 cm->cm_reply = NULL; 1130 completed = 0; 1131 1132 if (cm->cm_flags & MPR_CM_FLAGS_POLLED) 1133 cm->cm_flags |= MPR_CM_FLAGS_COMPLETE; 1134 1135 if (cm->cm_complete != NULL) { 1136 mprsas_log_command(cm, MPR_RECOVERY, 1137 "completing cm %p state %x ccb %p for diag " 1138 "reset\n", cm, cm->cm_state, cm->cm_ccb); 1139 cm->cm_complete(sc, cm); 1140 completed = 1; 1141 } 1142 1143 if (cm->cm_flags & MPR_CM_FLAGS_WAKEUP) { 1144 mprsas_log_command(cm, MPR_RECOVERY, 1145 "waking up cm %p state %x ccb %p for diag reset\n", 1146 cm, cm->cm_state, cm->cm_ccb); 1147 wakeup(cm); 1148 completed = 1; 1149 } 1150 1151 if (cm->cm_sc->io_cmds_active != 0) { 1152 cm->cm_sc->io_cmds_active--; 1153 } else { 1154 mpr_dprint(cm->cm_sc, MPR_INFO, "Warning: " 1155 "io_cmds_active is out of sync - resynching to " 1156 "0\n"); 1157 } 1158 1159 if ((completed == 0) && (cm->cm_state != MPR_CM_STATE_FREE)) { 1160 /* this should never happen, but if it does, log */ 1161 mprsas_log_command(cm, MPR_RECOVERY, 1162 "cm %p state %x flags 0x%x ccb %p during diag " 1163 "reset\n", cm, cm->cm_state, cm->cm_flags, 1164 cm->cm_ccb); 1165 } 1166 } 1167 } 1168 1169 void 1170 mprsas_handle_reinit(struct mpr_softc *sc) 1171 { 1172 int i; 1173 1174 /* Go back into startup mode and freeze the simq, so that CAM 1175 * doesn't send any commands until after we've rediscovered all 1176 * targets and found the proper device handles for them. 1177 * 1178 * After the reset, portenable will trigger discovery, and after all 1179 * discovery-related activities have finished, the simq will be 1180 * released. 1181 */ 1182 mpr_dprint(sc, MPR_INIT, "%s startup\n", __func__); 1183 sc->sassc->flags |= MPRSAS_IN_STARTUP; 1184 sc->sassc->flags |= MPRSAS_IN_DISCOVERY; 1185 mprsas_startup_increment(sc->sassc); 1186 1187 /* notify CAM of a bus reset */ 1188 mprsas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD, 1189 CAM_LUN_WILDCARD); 1190 1191 /* complete and cleanup after all outstanding commands */ 1192 mprsas_complete_all_commands(sc); 1193 1194 mpr_dprint(sc, MPR_INIT, "%s startup %u after command completion\n", 1195 __func__, sc->sassc->startup_refcount); 1196 1197 /* zero all the target handles, since they may change after the 1198 * reset, and we have to rediscover all the targets and use the new 1199 * handles. 1200 */ 1201 for (i = 0; i < sc->sassc->maxtargets; i++) { 1202 if (sc->sassc->targets[i].outstanding != 0) 1203 mpr_dprint(sc, MPR_INIT, "target %u outstanding %u\n", 1204 i, sc->sassc->targets[i].outstanding); 1205 sc->sassc->targets[i].handle = 0x0; 1206 sc->sassc->targets[i].exp_dev_handle = 0x0; 1207 sc->sassc->targets[i].outstanding = 0; 1208 sc->sassc->targets[i].flags = MPRSAS_TARGET_INDIAGRESET; 1209 } 1210 } 1211 static void 1212 mprsas_tm_timeout(void *data) 1213 { 1214 struct mpr_command *tm = data; 1215 struct mpr_softc *sc = tm->cm_sc; 1216 1217 mtx_assert(&sc->mpr_mtx, MA_OWNED); 1218 1219 mprsas_log_command(tm, MPR_INFO|MPR_RECOVERY, 1220 "task mgmt %p timed out\n", tm); 1221 mpr_reinit(sc); 1222 } 1223 1224 static void 1225 mprsas_logical_unit_reset_complete(struct mpr_softc *sc, 1226 struct mpr_command *tm) 1227 { 1228 MPI2_SCSI_TASK_MANAGE_REPLY *reply; 1229 MPI2_SCSI_TASK_MANAGE_REQUEST *req; 1230 unsigned int cm_count = 0; 1231 struct mpr_command *cm; 1232 struct mprsas_target *targ; 1233 1234 callout_stop(&tm->cm_callout); 1235 1236 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req; 1237 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply; 1238 targ = tm->cm_targ; 1239 1240 /* 1241 * Currently there should be no way we can hit this case. It only 1242 * happens when we have a failure to allocate chain frames, and 1243 * task management commands don't have S/G lists. 1244 */ 1245 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) { 1246 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for LUN reset! " 1247 "This should not happen!\n", __func__, tm->cm_flags); 1248 mprsas_free_tm(sc, tm); 1249 return; 1250 } 1251 1252 if (reply == NULL) { 1253 mprsas_log_command(tm, MPR_RECOVERY, 1254 "NULL reset reply for tm %p\n", tm); 1255 if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) { 1256 /* this completion was due to a reset, just cleanup */ 1257 targ->tm = NULL; 1258 mprsas_free_tm(sc, tm); 1259 } 1260 else { 1261 /* we should have gotten a reply. */ 1262 mpr_reinit(sc); 1263 } 1264 return; 1265 } 1266 1267 mprsas_log_command(tm, MPR_RECOVERY, 1268 "logical unit reset status 0x%x code 0x%x count %u\n", 1269 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode), 1270 le32toh(reply->TerminationCount)); 1271 1272 /* See if there are any outstanding commands for this LUN. 1273 * This could be made more efficient by using a per-LU data 1274 * structure of some sort. 1275 */ 1276 TAILQ_FOREACH(cm, &targ->commands, cm_link) { 1277 if (cm->cm_lun == tm->cm_lun) 1278 cm_count++; 1279 } 1280 1281 if (cm_count == 0) { 1282 mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO, 1283 "logical unit %u finished recovery after reset\n", 1284 tm->cm_lun, tm); 1285 1286 mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid, 1287 tm->cm_lun); 1288 1289 /* we've finished recovery for this logical unit. check and 1290 * see if some other logical unit has a timedout command 1291 * that needs to be processed. 1292 */ 1293 cm = TAILQ_FIRST(&targ->timedout_commands); 1294 if (cm) { 1295 mprsas_send_abort(sc, tm, cm); 1296 } 1297 else { 1298 targ->tm = NULL; 1299 mprsas_free_tm(sc, tm); 1300 } 1301 } 1302 else { 1303 /* if we still have commands for this LUN, the reset 1304 * effectively failed, regardless of the status reported. 1305 * Escalate to a target reset. 1306 */ 1307 mprsas_log_command(tm, MPR_RECOVERY, 1308 "logical unit reset complete for tm %p, but still have %u " 1309 "command(s)\n", tm, cm_count); 1310 mprsas_send_reset(sc, tm, 1311 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET); 1312 } 1313 } 1314 1315 static void 1316 mprsas_target_reset_complete(struct mpr_softc *sc, struct mpr_command *tm) 1317 { 1318 MPI2_SCSI_TASK_MANAGE_REPLY *reply; 1319 MPI2_SCSI_TASK_MANAGE_REQUEST *req; 1320 struct mprsas_target *targ; 1321 1322 callout_stop(&tm->cm_callout); 1323 1324 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req; 1325 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply; 1326 targ = tm->cm_targ; 1327 1328 /* 1329 * Currently there should be no way we can hit this case. It only 1330 * happens when we have a failure to allocate chain frames, and 1331 * task management commands don't have S/G lists. 1332 */ 1333 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) { 1334 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for target " 1335 "reset! This should not happen!\n", __func__, tm->cm_flags); 1336 mprsas_free_tm(sc, tm); 1337 return; 1338 } 1339 1340 if (reply == NULL) { 1341 mprsas_log_command(tm, MPR_RECOVERY, 1342 "NULL reset reply for tm %p\n", tm); 1343 if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) { 1344 /* this completion was due to a reset, just cleanup */ 1345 targ->tm = NULL; 1346 mprsas_free_tm(sc, tm); 1347 } 1348 else { 1349 /* we should have gotten a reply. */ 1350 mpr_reinit(sc); 1351 } 1352 return; 1353 } 1354 1355 mprsas_log_command(tm, MPR_RECOVERY, 1356 "target reset status 0x%x code 0x%x count %u\n", 1357 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode), 1358 le32toh(reply->TerminationCount)); 1359 1360 if (targ->outstanding == 0) { 1361 /* we've finished recovery for this target and all 1362 * of its logical units. 1363 */ 1364 mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO, 1365 "recovery finished after target reset\n"); 1366 1367 mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid, 1368 CAM_LUN_WILDCARD); 1369 1370 targ->tm = NULL; 1371 mprsas_free_tm(sc, tm); 1372 } 1373 else { 1374 /* after a target reset, if this target still has 1375 * outstanding commands, the reset effectively failed, 1376 * regardless of the status reported. escalate. 1377 */ 1378 mprsas_log_command(tm, MPR_RECOVERY, 1379 "target reset complete for tm %p, but still have %u " 1380 "command(s)\n", tm, targ->outstanding); 1381 mpr_reinit(sc); 1382 } 1383 } 1384 1385 #define MPR_RESET_TIMEOUT 30 1386 1387 int 1388 mprsas_send_reset(struct mpr_softc *sc, struct mpr_command *tm, uint8_t type) 1389 { 1390 MPI2_SCSI_TASK_MANAGE_REQUEST *req; 1391 struct mprsas_target *target; 1392 int err; 1393 1394 target = tm->cm_targ; 1395 if (target->handle == 0) { 1396 mpr_dprint(sc, MPR_ERROR, "%s null devhandle for target_id " 1397 "%d\n", __func__, target->tid); 1398 return -1; 1399 } 1400 1401 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req; 1402 req->DevHandle = htole16(target->handle); 1403 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; 1404 req->TaskType = type; 1405 1406 if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) { 1407 /* XXX Need to handle invalid LUNs */ 1408 MPR_SET_LUN(req->LUN, tm->cm_lun); 1409 tm->cm_targ->logical_unit_resets++; 1410 mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO, 1411 "sending logical unit reset\n"); 1412 tm->cm_complete = mprsas_logical_unit_reset_complete; 1413 mprsas_prepare_for_tm(sc, tm, target, tm->cm_lun); 1414 } 1415 else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) { 1416 /* 1417 * Target reset method = 1418 * SAS Hard Link Reset / SATA Link Reset 1419 */ 1420 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET; 1421 tm->cm_targ->target_resets++; 1422 mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO, 1423 "sending target reset\n"); 1424 tm->cm_complete = mprsas_target_reset_complete; 1425 mprsas_prepare_for_tm(sc, tm, target, CAM_LUN_WILDCARD); 1426 } 1427 else { 1428 mpr_dprint(sc, MPR_ERROR, "unexpected reset type 0x%x\n", type); 1429 return -1; 1430 } 1431 1432 mpr_dprint(sc, MPR_INFO, "to target %u handle 0x%04x\n", target->tid, 1433 target->handle); 1434 if (target->encl_level_valid) { 1435 mpr_dprint(sc, MPR_INFO, "At enclosure level %d, slot %d, " 1436 "connector name (%4s)\n", target->encl_level, 1437 target->encl_slot, target->connector_name); 1438 } 1439 1440 tm->cm_data = NULL; 1441 tm->cm_desc.HighPriority.RequestFlags = 1442 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY; 1443 tm->cm_complete_data = (void *)tm; 1444 1445 callout_reset(&tm->cm_callout, MPR_RESET_TIMEOUT * hz, 1446 mprsas_tm_timeout, tm); 1447 1448 err = mpr_map_command(sc, tm); 1449 if (err) 1450 mprsas_log_command(tm, MPR_RECOVERY, 1451 "error %d sending reset type %u\n", err, type); 1452 1453 return err; 1454 } 1455 1456 1457 static void 1458 mprsas_abort_complete(struct mpr_softc *sc, struct mpr_command *tm) 1459 { 1460 struct mpr_command *cm; 1461 MPI2_SCSI_TASK_MANAGE_REPLY *reply; 1462 MPI2_SCSI_TASK_MANAGE_REQUEST *req; 1463 struct mprsas_target *targ; 1464 1465 callout_stop(&tm->cm_callout); 1466 1467 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req; 1468 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply; 1469 targ = tm->cm_targ; 1470 1471 /* 1472 * Currently there should be no way we can hit this case. It only 1473 * happens when we have a failure to allocate chain frames, and 1474 * task management commands don't have S/G lists. 1475 */ 1476 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) { 1477 mprsas_log_command(tm, MPR_RECOVERY, 1478 "cm_flags = %#x for abort %p TaskMID %u!\n", 1479 tm->cm_flags, tm, le16toh(req->TaskMID)); 1480 mprsas_free_tm(sc, tm); 1481 return; 1482 } 1483 1484 if (reply == NULL) { 1485 mprsas_log_command(tm, MPR_RECOVERY, 1486 "NULL abort reply for tm %p TaskMID %u\n", 1487 tm, le16toh(req->TaskMID)); 1488 if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) { 1489 /* this completion was due to a reset, just cleanup */ 1490 targ->tm = NULL; 1491 mprsas_free_tm(sc, tm); 1492 } 1493 else { 1494 /* we should have gotten a reply. */ 1495 mpr_reinit(sc); 1496 } 1497 return; 1498 } 1499 1500 mprsas_log_command(tm, MPR_RECOVERY, 1501 "abort TaskMID %u status 0x%x code 0x%x count %u\n", 1502 le16toh(req->TaskMID), 1503 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode), 1504 le32toh(reply->TerminationCount)); 1505 1506 cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands); 1507 if (cm == NULL) { 1508 /* if there are no more timedout commands, we're done with 1509 * error recovery for this target. 1510 */ 1511 mprsas_log_command(tm, MPR_RECOVERY, 1512 "finished recovery after aborting TaskMID %u\n", 1513 le16toh(req->TaskMID)); 1514 1515 targ->tm = NULL; 1516 mprsas_free_tm(sc, tm); 1517 } 1518 else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) { 1519 /* abort success, but we have more timedout commands to abort */ 1520 mprsas_log_command(tm, MPR_RECOVERY, 1521 "continuing recovery after aborting TaskMID %u\n", 1522 le16toh(req->TaskMID)); 1523 1524 mprsas_send_abort(sc, tm, cm); 1525 } 1526 else { 1527 /* we didn't get a command completion, so the abort 1528 * failed as far as we're concerned. escalate. 1529 */ 1530 mprsas_log_command(tm, MPR_RECOVERY, 1531 "abort failed for TaskMID %u tm %p\n", 1532 le16toh(req->TaskMID), tm); 1533 1534 mprsas_send_reset(sc, tm, 1535 MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET); 1536 } 1537 } 1538 1539 #define MPR_ABORT_TIMEOUT 5 1540 1541 static int 1542 mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm, 1543 struct mpr_command *cm) 1544 { 1545 MPI2_SCSI_TASK_MANAGE_REQUEST *req; 1546 struct mprsas_target *targ; 1547 int err; 1548 1549 targ = cm->cm_targ; 1550 if (targ->handle == 0) { 1551 mpr_dprint(sc, MPR_ERROR,"%s null devhandle for target_id %d\n", 1552 __func__, cm->cm_ccb->ccb_h.target_id); 1553 return -1; 1554 } 1555 1556 mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO, 1557 "Aborting command %p\n", cm); 1558 1559 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req; 1560 req->DevHandle = htole16(targ->handle); 1561 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; 1562 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK; 1563 1564 /* XXX Need to handle invalid LUNs */ 1565 MPR_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun); 1566 1567 req->TaskMID = htole16(cm->cm_desc.Default.SMID); 1568 1569 tm->cm_data = NULL; 1570 tm->cm_desc.HighPriority.RequestFlags = 1571 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY; 1572 tm->cm_complete = mprsas_abort_complete; 1573 tm->cm_complete_data = (void *)tm; 1574 tm->cm_targ = cm->cm_targ; 1575 tm->cm_lun = cm->cm_lun; 1576 1577 callout_reset(&tm->cm_callout, MPR_ABORT_TIMEOUT * hz, 1578 mprsas_tm_timeout, tm); 1579 1580 targ->aborts++; 1581 1582 mpr_dprint(sc, MPR_INFO, "Sending reset from %s for target ID %d\n", 1583 __func__, targ->tid); 1584 mprsas_prepare_for_tm(sc, tm, targ, tm->cm_lun); 1585 1586 err = mpr_map_command(sc, tm); 1587 if (err) 1588 mprsas_log_command(tm, MPR_RECOVERY, 1589 "error %d sending abort for cm %p SMID %u\n", 1590 err, cm, req->TaskMID); 1591 return err; 1592 } 1593 1594 static void 1595 mprsas_scsiio_timeout(void *data) 1596 { 1597 struct mpr_softc *sc; 1598 struct mpr_command *cm; 1599 struct mprsas_target *targ; 1600 1601 cm = (struct mpr_command *)data; 1602 sc = cm->cm_sc; 1603 1604 MPR_FUNCTRACE(sc); 1605 mtx_assert(&sc->mpr_mtx, MA_OWNED); 1606 1607 mpr_dprint(sc, MPR_XINFO, "Timeout checking cm %p\n", cm); 1608 1609 /* 1610 * Run the interrupt handler to make sure it's not pending. This 1611 * isn't perfect because the command could have already completed 1612 * and been re-used, though this is unlikely. 1613 */ 1614 mpr_intr_locked(sc); 1615 if (cm->cm_state == MPR_CM_STATE_FREE) { 1616 mprsas_log_command(cm, MPR_XINFO, 1617 "SCSI command %p almost timed out\n", cm); 1618 return; 1619 } 1620 1621 if (cm->cm_ccb == NULL) { 1622 mpr_dprint(sc, MPR_ERROR, "command timeout with NULL ccb\n"); 1623 return; 1624 } 1625 1626 targ = cm->cm_targ; 1627 targ->timeouts++; 1628 1629 mprsas_log_command(cm, MPR_ERROR, "command timeout cm %p ccb %p " 1630 "target %u, handle(0x%04x)\n", cm, cm->cm_ccb, targ->tid, 1631 targ->handle); 1632 if (targ->encl_level_valid) { 1633 mpr_dprint(sc, MPR_ERROR, "At enclosure level %d, slot %d, " 1634 "connector name (%4s)\n", targ->encl_level, targ->encl_slot, 1635 targ->connector_name); 1636 } 1637 1638 /* XXX first, check the firmware state, to see if it's still 1639 * operational. if not, do a diag reset. 1640 */ 1641 mprsas_set_ccbstatus(cm->cm_ccb, CAM_CMD_TIMEOUT); 1642 cm->cm_state = MPR_CM_STATE_TIMEDOUT; 1643 TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery); 1644 1645 if (targ->tm != NULL) { 1646 /* target already in recovery, just queue up another 1647 * timedout command to be processed later. 1648 */ 1649 mpr_dprint(sc, MPR_RECOVERY, "queued timedout cm %p for " 1650 "processing by tm %p\n", cm, targ->tm); 1651 } 1652 else if ((targ->tm = mprsas_alloc_tm(sc)) != NULL) { 1653 mpr_dprint(sc, MPR_RECOVERY, "timedout cm %p allocated tm %p\n", 1654 cm, targ->tm); 1655 1656 /* start recovery by aborting the first timedout command */ 1657 mprsas_send_abort(sc, targ->tm, cm); 1658 } 1659 else { 1660 /* XXX queue this target up for recovery once a TM becomes 1661 * available. The firmware only has a limited number of 1662 * HighPriority credits for the high priority requests used 1663 * for task management, and we ran out. 1664 * 1665 * Isilon: don't worry about this for now, since we have 1666 * more credits than disks in an enclosure, and limit 1667 * ourselves to one TM per target for recovery. 1668 */ 1669 mpr_dprint(sc, MPR_RECOVERY, 1670 "timedout cm %p failed to allocate a tm\n", cm); 1671 } 1672 } 1673 1674 static void 1675 mprsas_action_scsiio(struct mprsas_softc *sassc, union ccb *ccb) 1676 { 1677 MPI2_SCSI_IO_REQUEST *req; 1678 struct ccb_scsiio *csio; 1679 struct mpr_softc *sc; 1680 struct mprsas_target *targ; 1681 struct mprsas_lun *lun; 1682 struct mpr_command *cm; 1683 uint8_t i, lba_byte, *ref_tag_addr; 1684 uint16_t eedp_flags; 1685 uint32_t mpi_control; 1686 1687 sc = sassc->sc; 1688 MPR_FUNCTRACE(sc); 1689 mtx_assert(&sc->mpr_mtx, MA_OWNED); 1690 1691 csio = &ccb->csio; 1692 KASSERT(csio->ccb_h.target_id < sassc->maxtargets, 1693 ("Target %d out of bounds in XPT_SCSI_IO\n", 1694 csio->ccb_h.target_id)); 1695 targ = &sassc->targets[csio->ccb_h.target_id]; 1696 mpr_dprint(sc, MPR_TRACE, "ccb %p target flag %x\n", ccb, targ->flags); 1697 if (targ->handle == 0x0) { 1698 mpr_dprint(sc, MPR_ERROR, "%s NULL handle for target %u\n", 1699 __func__, csio->ccb_h.target_id); 1700 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); 1701 xpt_done(ccb); 1702 return; 1703 } 1704 if (targ->flags & MPR_TARGET_FLAGS_RAID_COMPONENT) { 1705 mpr_dprint(sc, MPR_ERROR, "%s Raid component no SCSI IO " 1706 "supported %u\n", __func__, csio->ccb_h.target_id); 1707 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); 1708 xpt_done(ccb); 1709 return; 1710 } 1711 /* 1712 * Sometimes, it is possible to get a command that is not "In 1713 * Progress" and was actually aborted by the upper layer. Check for 1714 * this here and complete the command without error. 1715 */ 1716 if (mprsas_get_ccbstatus(ccb) != CAM_REQ_INPROG) { 1717 mpr_dprint(sc, MPR_TRACE, "%s Command is not in progress for " 1718 "target %u\n", __func__, csio->ccb_h.target_id); 1719 xpt_done(ccb); 1720 return; 1721 } 1722 /* 1723 * If devinfo is 0 this will be a volume. In that case don't tell CAM 1724 * that the volume has timed out. We want volumes to be enumerated 1725 * until they are deleted/removed, not just failed. 1726 */ 1727 if (targ->flags & MPRSAS_TARGET_INREMOVAL) { 1728 if (targ->devinfo == 0) 1729 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP); 1730 else 1731 mprsas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT); 1732 xpt_done(ccb); 1733 return; 1734 } 1735 1736 if ((sc->mpr_flags & MPR_FLAGS_SHUTDOWN) != 0) { 1737 mpr_dprint(sc, MPR_INFO, "%s shutting down\n", __func__); 1738 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); 1739 xpt_done(ccb); 1740 return; 1741 } 1742 1743 /* 1744 * If target has a reset in progress, freeze the devq and return. The 1745 * devq will be released when the TM reset is finished. 1746 */ 1747 if (targ->flags & MPRSAS_TARGET_INRESET) { 1748 ccb->ccb_h.status = CAM_BUSY | CAM_DEV_QFRZN; 1749 mpr_dprint(sc, MPR_INFO, "%s: Freezing devq for target ID %d\n", 1750 __func__, targ->tid); 1751 xpt_freeze_devq(ccb->ccb_h.path, 1); 1752 xpt_done(ccb); 1753 return; 1754 } 1755 1756 cm = mpr_alloc_command(sc); 1757 if (cm == NULL || (sc->mpr_flags & MPR_FLAGS_DIAGRESET)) { 1758 if (cm != NULL) { 1759 mpr_free_command(sc, cm); 1760 } 1761 if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) { 1762 xpt_freeze_simq(sassc->sim, 1); 1763 sassc->flags |= MPRSAS_QUEUE_FROZEN; 1764 } 1765 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 1766 ccb->ccb_h.status |= CAM_REQUEUE_REQ; 1767 xpt_done(ccb); 1768 return; 1769 } 1770 1771 req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req; 1772 bzero(req, sizeof(*req)); 1773 req->DevHandle = htole16(targ->handle); 1774 req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; 1775 req->MsgFlags = 0; 1776 req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr); 1777 req->SenseBufferLength = MPR_SENSE_LEN; 1778 req->SGLFlags = 0; 1779 req->ChainOffset = 0; 1780 req->SGLOffset0 = 24; /* 32bit word offset to the SGL */ 1781 req->SGLOffset1= 0; 1782 req->SGLOffset2= 0; 1783 req->SGLOffset3= 0; 1784 req->SkipCount = 0; 1785 req->DataLength = htole32(csio->dxfer_len); 1786 req->BidirectionalDataLength = 0; 1787 req->IoFlags = htole16(csio->cdb_len); 1788 req->EEDPFlags = 0; 1789 1790 /* Note: BiDirectional transfers are not supported */ 1791 switch (csio->ccb_h.flags & CAM_DIR_MASK) { 1792 case CAM_DIR_IN: 1793 mpi_control = MPI2_SCSIIO_CONTROL_READ; 1794 cm->cm_flags |= MPR_CM_FLAGS_DATAIN; 1795 break; 1796 case CAM_DIR_OUT: 1797 mpi_control = MPI2_SCSIIO_CONTROL_WRITE; 1798 cm->cm_flags |= MPR_CM_FLAGS_DATAOUT; 1799 break; 1800 case CAM_DIR_NONE: 1801 default: 1802 mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER; 1803 break; 1804 } 1805 1806 if (csio->cdb_len == 32) 1807 mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT; 1808 /* 1809 * It looks like the hardware doesn't require an explicit tag 1810 * number for each transaction. SAM Task Management not supported 1811 * at the moment. 1812 */ 1813 switch (csio->tag_action) { 1814 case MSG_HEAD_OF_Q_TAG: 1815 mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ; 1816 break; 1817 case MSG_ORDERED_Q_TAG: 1818 mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ; 1819 break; 1820 case MSG_ACA_TASK: 1821 mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ; 1822 break; 1823 case CAM_TAG_ACTION_NONE: 1824 case MSG_SIMPLE_Q_TAG: 1825 default: 1826 mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ; 1827 break; 1828 } 1829 mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits; 1830 req->Control = htole32(mpi_control); 1831 1832 if (MPR_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) { 1833 mpr_free_command(sc, cm); 1834 mprsas_set_ccbstatus(ccb, CAM_LUN_INVALID); 1835 xpt_done(ccb); 1836 return; 1837 } 1838 1839 if (csio->ccb_h.flags & CAM_CDB_POINTER) 1840 bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len); 1841 else 1842 bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len); 1843 req->IoFlags = htole16(csio->cdb_len); 1844 1845 /* 1846 * Check if EEDP is supported and enabled. If it is then check if the 1847 * SCSI opcode could be using EEDP. If so, make sure the LUN exists and 1848 * is formatted for EEDP support. If all of this is true, set CDB up 1849 * for EEDP transfer. 1850 */ 1851 eedp_flags = op_code_prot[req->CDB.CDB32[0]]; 1852 if (sc->eedp_enabled && eedp_flags) { 1853 SLIST_FOREACH(lun, &targ->luns, lun_link) { 1854 if (lun->lun_id == csio->ccb_h.target_lun) { 1855 break; 1856 } 1857 } 1858 1859 if ((lun != NULL) && (lun->eedp_formatted)) { 1860 req->EEDPBlockSize = htole16(lun->eedp_block_size); 1861 eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG | 1862 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG | 1863 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD); 1864 req->EEDPFlags = htole16(eedp_flags); 1865 1866 /* 1867 * If CDB less than 32, fill in Primary Ref Tag with 1868 * low 4 bytes of LBA. If CDB is 32, tag stuff is 1869 * already there. Also, set protection bit. FreeBSD 1870 * currently does not support CDBs bigger than 16, but 1871 * the code doesn't hurt, and will be here for the 1872 * future. 1873 */ 1874 if (csio->cdb_len != 32) { 1875 lba_byte = (csio->cdb_len == 16) ? 6 : 2; 1876 ref_tag_addr = (uint8_t *)&req->CDB.EEDP32. 1877 PrimaryReferenceTag; 1878 for (i = 0; i < 4; i++) { 1879 *ref_tag_addr = 1880 req->CDB.CDB32[lba_byte + i]; 1881 ref_tag_addr++; 1882 } 1883 req->CDB.EEDP32.PrimaryReferenceTag = 1884 htole32(req-> 1885 CDB.EEDP32.PrimaryReferenceTag); 1886 req->CDB.EEDP32.PrimaryApplicationTagMask = 1887 0xFFFF; 1888 req->CDB.CDB32[1] = (req->CDB.CDB32[1] & 0x1F) | 1889 0x20; 1890 } else { 1891 eedp_flags |= 1892 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG; 1893 req->EEDPFlags = htole16(eedp_flags); 1894 req->CDB.CDB32[10] = (req->CDB.CDB32[10] & 1895 0x1F) | 0x20; 1896 } 1897 } 1898 } 1899 1900 cm->cm_length = csio->dxfer_len; 1901 if (cm->cm_length != 0) { 1902 cm->cm_data = ccb; 1903 cm->cm_flags |= MPR_CM_FLAGS_USE_CCB; 1904 } else { 1905 cm->cm_data = NULL; 1906 } 1907 cm->cm_sge = &req->SGL; 1908 cm->cm_sglsize = (32 - 24) * 4; 1909 cm->cm_complete = mprsas_scsiio_complete; 1910 cm->cm_complete_data = ccb; 1911 cm->cm_targ = targ; 1912 cm->cm_lun = csio->ccb_h.target_lun; 1913 cm->cm_ccb = ccb; 1914 /* 1915 * If using FP desc type, need to set a bit in IoFlags (SCSI IO is 0) 1916 * and set descriptor type. 1917 */ 1918 if (targ->scsi_req_desc_type == 1919 MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO) { 1920 req->IoFlags |= MPI25_SCSIIO_IOFLAGS_FAST_PATH; 1921 cm->cm_desc.FastPathSCSIIO.RequestFlags = 1922 MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO; 1923 cm->cm_desc.FastPathSCSIIO.DevHandle = htole16(targ->handle); 1924 } else { 1925 cm->cm_desc.SCSIIO.RequestFlags = 1926 MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO; 1927 cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle); 1928 } 1929 1930 callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0, 1931 mprsas_scsiio_timeout, cm, 0); 1932 1933 targ->issued++; 1934 targ->outstanding++; 1935 TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link); 1936 ccb->ccb_h.status |= CAM_SIM_QUEUED; 1937 1938 mprsas_log_command(cm, MPR_XINFO, "%s cm %p ccb %p outstanding %u\n", 1939 __func__, cm, ccb, targ->outstanding); 1940 1941 mpr_map_command(sc, cm); 1942 return; 1943 } 1944 1945 static void 1946 mpr_response_code(struct mpr_softc *sc, u8 response_code) 1947 { 1948 char *desc; 1949 1950 switch (response_code) { 1951 case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE: 1952 desc = "task management request completed"; 1953 break; 1954 case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME: 1955 desc = "invalid frame"; 1956 break; 1957 case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED: 1958 desc = "task management request not supported"; 1959 break; 1960 case MPI2_SCSITASKMGMT_RSP_TM_FAILED: 1961 desc = "task management request failed"; 1962 break; 1963 case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED: 1964 desc = "task management request succeeded"; 1965 break; 1966 case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN: 1967 desc = "invalid lun"; 1968 break; 1969 case 0xA: 1970 desc = "overlapped tag attempted"; 1971 break; 1972 case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC: 1973 desc = "task queued, however not sent to target"; 1974 break; 1975 default: 1976 desc = "unknown"; 1977 break; 1978 } 1979 mpr_dprint(sc, MPR_XINFO, "response_code(0x%01x): %s\n", response_code, 1980 desc); 1981 } 1982 1983 /** 1984 * mpr_sc_failed_io_info - translated non-succesfull SCSI_IO request 1985 */ 1986 static void 1987 mpr_sc_failed_io_info(struct mpr_softc *sc, struct ccb_scsiio *csio, 1988 Mpi2SCSIIOReply_t *mpi_reply, struct mprsas_target *targ) 1989 { 1990 u32 response_info; 1991 u8 *response_bytes; 1992 u16 ioc_status = le16toh(mpi_reply->IOCStatus) & 1993 MPI2_IOCSTATUS_MASK; 1994 u8 scsi_state = mpi_reply->SCSIState; 1995 u8 scsi_status = mpi_reply->SCSIStatus; 1996 char *desc_ioc_state = NULL; 1997 char *desc_scsi_status = NULL; 1998 char *desc_scsi_state = sc->tmp_string; 1999 u32 log_info = le32toh(mpi_reply->IOCLogInfo); 2000 2001 if (log_info == 0x31170000) 2002 return; 2003 2004 switch (ioc_status) { 2005 case MPI2_IOCSTATUS_SUCCESS: 2006 desc_ioc_state = "success"; 2007 break; 2008 case MPI2_IOCSTATUS_INVALID_FUNCTION: 2009 desc_ioc_state = "invalid function"; 2010 break; 2011 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR: 2012 desc_ioc_state = "scsi recovered error"; 2013 break; 2014 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE: 2015 desc_ioc_state = "scsi invalid dev handle"; 2016 break; 2017 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE: 2018 desc_ioc_state = "scsi device not there"; 2019 break; 2020 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN: 2021 desc_ioc_state = "scsi data overrun"; 2022 break; 2023 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN: 2024 desc_ioc_state = "scsi data underrun"; 2025 break; 2026 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR: 2027 desc_ioc_state = "scsi io data error"; 2028 break; 2029 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR: 2030 desc_ioc_state = "scsi protocol error"; 2031 break; 2032 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED: 2033 desc_ioc_state = "scsi task terminated"; 2034 break; 2035 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: 2036 desc_ioc_state = "scsi residual mismatch"; 2037 break; 2038 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED: 2039 desc_ioc_state = "scsi task mgmt failed"; 2040 break; 2041 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED: 2042 desc_ioc_state = "scsi ioc terminated"; 2043 break; 2044 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED: 2045 desc_ioc_state = "scsi ext terminated"; 2046 break; 2047 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR: 2048 desc_ioc_state = "eedp guard error"; 2049 break; 2050 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR: 2051 desc_ioc_state = "eedp ref tag error"; 2052 break; 2053 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR: 2054 desc_ioc_state = "eedp app tag error"; 2055 break; 2056 default: 2057 desc_ioc_state = "unknown"; 2058 break; 2059 } 2060 2061 switch (scsi_status) { 2062 case MPI2_SCSI_STATUS_GOOD: 2063 desc_scsi_status = "good"; 2064 break; 2065 case MPI2_SCSI_STATUS_CHECK_CONDITION: 2066 desc_scsi_status = "check condition"; 2067 break; 2068 case MPI2_SCSI_STATUS_CONDITION_MET: 2069 desc_scsi_status = "condition met"; 2070 break; 2071 case MPI2_SCSI_STATUS_BUSY: 2072 desc_scsi_status = "busy"; 2073 break; 2074 case MPI2_SCSI_STATUS_INTERMEDIATE: 2075 desc_scsi_status = "intermediate"; 2076 break; 2077 case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET: 2078 desc_scsi_status = "intermediate condmet"; 2079 break; 2080 case MPI2_SCSI_STATUS_RESERVATION_CONFLICT: 2081 desc_scsi_status = "reservation conflict"; 2082 break; 2083 case MPI2_SCSI_STATUS_COMMAND_TERMINATED: 2084 desc_scsi_status = "command terminated"; 2085 break; 2086 case MPI2_SCSI_STATUS_TASK_SET_FULL: 2087 desc_scsi_status = "task set full"; 2088 break; 2089 case MPI2_SCSI_STATUS_ACA_ACTIVE: 2090 desc_scsi_status = "aca active"; 2091 break; 2092 case MPI2_SCSI_STATUS_TASK_ABORTED: 2093 desc_scsi_status = "task aborted"; 2094 break; 2095 default: 2096 desc_scsi_status = "unknown"; 2097 break; 2098 } 2099 2100 desc_scsi_state[0] = '\0'; 2101 if (!scsi_state) 2102 desc_scsi_state = " "; 2103 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) 2104 strcat(desc_scsi_state, "response info "); 2105 if (scsi_state & MPI2_SCSI_STATE_TERMINATED) 2106 strcat(desc_scsi_state, "state terminated "); 2107 if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS) 2108 strcat(desc_scsi_state, "no status "); 2109 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED) 2110 strcat(desc_scsi_state, "autosense failed "); 2111 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) 2112 strcat(desc_scsi_state, "autosense valid "); 2113 2114 mpr_dprint(sc, MPR_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n", 2115 le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status); 2116 if (targ->encl_level_valid) { 2117 mpr_dprint(sc, MPR_XINFO, "At enclosure level %d, slot %d, " 2118 "connector name (%4s)\n", targ->encl_level, targ->encl_slot, 2119 targ->connector_name); 2120 } 2121 /* We can add more detail about underflow data here 2122 * TO-DO 2123 * */ 2124 mpr_dprint(sc, MPR_XINFO, "\tscsi_status(%s)(0x%02x), " 2125 "scsi_state(%s)(0x%02x)\n", desc_scsi_status, scsi_status, 2126 desc_scsi_state, scsi_state); 2127 2128 if (sc->mpr_debug & MPR_XINFO && 2129 scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) { 2130 mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : Start :\n"); 2131 scsi_sense_print(csio); 2132 mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : End :\n"); 2133 } 2134 2135 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) { 2136 response_info = le32toh(mpi_reply->ResponseInfo); 2137 response_bytes = (u8 *)&response_info; 2138 mpr_response_code(sc,response_bytes[0]); 2139 } 2140 } 2141 2142 static void 2143 mprsas_scsiio_complete(struct mpr_softc *sc, struct mpr_command *cm) 2144 { 2145 MPI2_SCSI_IO_REPLY *rep; 2146 union ccb *ccb; 2147 struct ccb_scsiio *csio; 2148 struct mprsas_softc *sassc; 2149 struct scsi_vpd_supported_page_list *vpd_list = NULL; 2150 u8 *TLR_bits, TLR_on; 2151 int dir = 0, i; 2152 u16 alloc_len; 2153 struct mprsas_target *target; 2154 target_id_t target_id; 2155 2156 MPR_FUNCTRACE(sc); 2157 mpr_dprint(sc, MPR_TRACE, 2158 "cm %p SMID %u ccb %p reply %p outstanding %u\n", cm, 2159 cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply, 2160 cm->cm_targ->outstanding); 2161 2162 callout_stop(&cm->cm_callout); 2163 mtx_assert(&sc->mpr_mtx, MA_OWNED); 2164 2165 sassc = sc->sassc; 2166 ccb = cm->cm_complete_data; 2167 csio = &ccb->csio; 2168 target_id = csio->ccb_h.target_id; 2169 rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply; 2170 /* 2171 * XXX KDM if the chain allocation fails, does it matter if we do 2172 * the sync and unload here? It is simpler to do it in every case, 2173 * assuming it doesn't cause problems. 2174 */ 2175 if (cm->cm_data != NULL) { 2176 if (cm->cm_flags & MPR_CM_FLAGS_DATAIN) 2177 dir = BUS_DMASYNC_POSTREAD; 2178 else if (cm->cm_flags & MPR_CM_FLAGS_DATAOUT) 2179 dir = BUS_DMASYNC_POSTWRITE; 2180 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir); 2181 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap); 2182 } 2183 2184 cm->cm_targ->completed++; 2185 cm->cm_targ->outstanding--; 2186 TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link); 2187 ccb->ccb_h.status &= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED); 2188 2189 if (cm->cm_state == MPR_CM_STATE_TIMEDOUT) { 2190 TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery); 2191 if (cm->cm_reply != NULL) 2192 mprsas_log_command(cm, MPR_RECOVERY, 2193 "completed timedout cm %p ccb %p during recovery " 2194 "ioc %x scsi %x state %x xfer %u\n", cm, cm->cm_ccb, 2195 le16toh(rep->IOCStatus), rep->SCSIStatus, 2196 rep->SCSIState, le32toh(rep->TransferCount)); 2197 else 2198 mprsas_log_command(cm, MPR_RECOVERY, 2199 "completed timedout cm %p ccb %p during recovery\n", 2200 cm, cm->cm_ccb); 2201 } else if (cm->cm_targ->tm != NULL) { 2202 if (cm->cm_reply != NULL) 2203 mprsas_log_command(cm, MPR_RECOVERY, 2204 "completed cm %p ccb %p during recovery " 2205 "ioc %x scsi %x state %x xfer %u\n", 2206 cm, cm->cm_ccb, le16toh(rep->IOCStatus), 2207 rep->SCSIStatus, rep->SCSIState, 2208 le32toh(rep->TransferCount)); 2209 else 2210 mprsas_log_command(cm, MPR_RECOVERY, 2211 "completed cm %p ccb %p during recovery\n", 2212 cm, cm->cm_ccb); 2213 } else if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) { 2214 mprsas_log_command(cm, MPR_RECOVERY, 2215 "reset completed cm %p ccb %p\n", cm, cm->cm_ccb); 2216 } 2217 2218 if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) { 2219 /* 2220 * We ran into an error after we tried to map the command, 2221 * so we're getting a callback without queueing the command 2222 * to the hardware. So we set the status here, and it will 2223 * be retained below. We'll go through the "fast path", 2224 * because there can be no reply when we haven't actually 2225 * gone out to the hardware. 2226 */ 2227 mprsas_set_ccbstatus(ccb, CAM_REQUEUE_REQ); 2228 2229 /* 2230 * Currently the only error included in the mask is 2231 * MPR_CM_FLAGS_CHAIN_FAILED, which means we're out of 2232 * chain frames. We need to freeze the queue until we get 2233 * a command that completed without this error, which will 2234 * hopefully have some chain frames attached that we can 2235 * use. If we wanted to get smarter about it, we would 2236 * only unfreeze the queue in this condition when we're 2237 * sure that we're getting some chain frames back. That's 2238 * probably unnecessary. 2239 */ 2240 if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) { 2241 xpt_freeze_simq(sassc->sim, 1); 2242 sassc->flags |= MPRSAS_QUEUE_FROZEN; 2243 mpr_dprint(sc, MPR_INFO, "Error sending command, " 2244 "freezing SIM queue\n"); 2245 } 2246 } 2247 2248 /* 2249 * If this is a Start Stop Unit command and it was issued by the driver 2250 * during shutdown, decrement the refcount to account for all of the 2251 * commands that were sent. All SSU commands should be completed before 2252 * shutdown completes, meaning SSU_refcount will be 0 after SSU_started 2253 * is TRUE. 2254 */ 2255 if (sc->SSU_started && (csio->cdb_io.cdb_bytes[0] == START_STOP_UNIT)) { 2256 mpr_dprint(sc, MPR_INFO, "Decrementing SSU count.\n"); 2257 sc->SSU_refcount--; 2258 } 2259 2260 /* Take the fast path to completion */ 2261 if (cm->cm_reply == NULL) { 2262 if (mprsas_get_ccbstatus(ccb) == CAM_REQ_INPROG) { 2263 if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) 2264 mprsas_set_ccbstatus(ccb, CAM_SCSI_BUS_RESET); 2265 else { 2266 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP); 2267 csio->scsi_status = SCSI_STATUS_OK; 2268 } 2269 if (sassc->flags & MPRSAS_QUEUE_FROZEN) { 2270 ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 2271 sassc->flags &= ~MPRSAS_QUEUE_FROZEN; 2272 mpr_dprint(sc, MPR_XINFO, 2273 "Unfreezing SIM queue\n"); 2274 } 2275 } 2276 2277 /* 2278 * There are two scenarios where the status won't be 2279 * CAM_REQ_CMP. The first is if MPR_CM_FLAGS_ERROR_MASK is 2280 * set, the second is in the MPR_FLAGS_DIAGRESET above. 2281 */ 2282 if (mprsas_get_ccbstatus(ccb) != CAM_REQ_CMP) { 2283 /* 2284 * Freeze the dev queue so that commands are 2285 * executed in the correct order after error 2286 * recovery. 2287 */ 2288 ccb->ccb_h.status |= CAM_DEV_QFRZN; 2289 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1); 2290 } 2291 mpr_free_command(sc, cm); 2292 xpt_done(ccb); 2293 return; 2294 } 2295 2296 mprsas_log_command(cm, MPR_XINFO, 2297 "ioc %x scsi %x state %x xfer %u\n", 2298 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState, 2299 le32toh(rep->TransferCount)); 2300 2301 switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) { 2302 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN: 2303 csio->resid = cm->cm_length - le32toh(rep->TransferCount); 2304 /* FALLTHROUGH */ 2305 case MPI2_IOCSTATUS_SUCCESS: 2306 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR: 2307 2308 if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) == 2309 MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR) 2310 mprsas_log_command(cm, MPR_XINFO, "recovered error\n"); 2311 2312 /* Completion failed at the transport level. */ 2313 if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS | 2314 MPI2_SCSI_STATE_TERMINATED)) { 2315 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR); 2316 break; 2317 } 2318 2319 /* In a modern packetized environment, an autosense failure 2320 * implies that there's not much else that can be done to 2321 * recover the command. 2322 */ 2323 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) { 2324 mprsas_set_ccbstatus(ccb, CAM_AUTOSENSE_FAIL); 2325 break; 2326 } 2327 2328 /* 2329 * CAM doesn't care about SAS Response Info data, but if this is 2330 * the state check if TLR should be done. If not, clear the 2331 * TLR_bits for the target. 2332 */ 2333 if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) && 2334 ((le32toh(rep->ResponseInfo) & MPI2_SCSI_RI_MASK_REASONCODE) 2335 == MPR_SCSI_RI_INVALID_FRAME)) { 2336 sc->mapping_table[target_id].TLR_bits = 2337 (u8)MPI2_SCSIIO_CONTROL_NO_TLR; 2338 } 2339 2340 /* 2341 * Intentionally override the normal SCSI status reporting 2342 * for these two cases. These are likely to happen in a 2343 * multi-initiator environment, and we want to make sure that 2344 * CAM retries these commands rather than fail them. 2345 */ 2346 if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) || 2347 (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) { 2348 mprsas_set_ccbstatus(ccb, CAM_REQ_ABORTED); 2349 break; 2350 } 2351 2352 /* Handle normal status and sense */ 2353 csio->scsi_status = rep->SCSIStatus; 2354 if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD) 2355 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP); 2356 else 2357 mprsas_set_ccbstatus(ccb, CAM_SCSI_STATUS_ERROR); 2358 2359 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) { 2360 int sense_len, returned_sense_len; 2361 2362 returned_sense_len = min(le32toh(rep->SenseCount), 2363 sizeof(struct scsi_sense_data)); 2364 if (returned_sense_len < csio->sense_len) 2365 csio->sense_resid = csio->sense_len - 2366 returned_sense_len; 2367 else 2368 csio->sense_resid = 0; 2369 2370 sense_len = min(returned_sense_len, 2371 csio->sense_len - csio->sense_resid); 2372 bzero(&csio->sense_data, sizeof(csio->sense_data)); 2373 bcopy(cm->cm_sense, &csio->sense_data, sense_len); 2374 ccb->ccb_h.status |= CAM_AUTOSNS_VALID; 2375 } 2376 2377 /* 2378 * Check if this is an INQUIRY command. If it's a VPD inquiry, 2379 * and it's page code 0 (Supported Page List), and there is 2380 * inquiry data, and this is for a sequential access device, and 2381 * the device is an SSP target, and TLR is supported by the 2382 * controller, turn the TLR_bits value ON if page 0x90 is 2383 * supported. 2384 */ 2385 if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) && 2386 (csio->cdb_io.cdb_bytes[1] & SI_EVPD) && 2387 (csio->cdb_io.cdb_bytes[2] == SVPD_SUPPORTED_PAGE_LIST) && 2388 ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) && 2389 (csio->data_ptr != NULL) && 2390 ((csio->data_ptr[0] & 0x1f) == T_SEQUENTIAL) && 2391 (sc->control_TLR) && 2392 (sc->mapping_table[target_id].device_info & 2393 MPI2_SAS_DEVICE_INFO_SSP_TARGET)) { 2394 vpd_list = (struct scsi_vpd_supported_page_list *) 2395 csio->data_ptr; 2396 TLR_bits = &sc->mapping_table[target_id].TLR_bits; 2397 *TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR; 2398 TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON; 2399 alloc_len = ((u16)csio->cdb_io.cdb_bytes[3] << 8) + 2400 csio->cdb_io.cdb_bytes[4]; 2401 alloc_len -= csio->resid; 2402 for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) { 2403 if (vpd_list->list[i] == 0x90) { 2404 *TLR_bits = TLR_on; 2405 break; 2406 } 2407 } 2408 } 2409 2410 /* 2411 * If this is a SATA direct-access end device, mark it so that 2412 * a SCSI StartStopUnit command will be sent to it when the 2413 * driver is being shutdown. 2414 */ 2415 if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) && 2416 ((csio->data_ptr[0] & 0x1f) == T_DIRECT) && 2417 (sc->mapping_table[target_id].device_info & 2418 MPI2_SAS_DEVICE_INFO_SATA_DEVICE) && 2419 ((sc->mapping_table[target_id].device_info & 2420 MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) == 2421 MPI2_SAS_DEVICE_INFO_END_DEVICE)) { 2422 target = &sassc->targets[target_id]; 2423 target->supports_SSU = TRUE; 2424 mpr_dprint(sc, MPR_XINFO, "Target %d supports SSU\n", 2425 target_id); 2426 } 2427 break; 2428 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE: 2429 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE: 2430 /* 2431 * If devinfo is 0 this will be a volume. In that case don't 2432 * tell CAM that the volume is not there. We want volumes to 2433 * be enumerated until they are deleted/removed, not just 2434 * failed. 2435 */ 2436 if (cm->cm_targ->devinfo == 0) 2437 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP); 2438 else 2439 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); 2440 break; 2441 case MPI2_IOCSTATUS_INVALID_SGL: 2442 mpr_print_scsiio_cmd(sc, cm); 2443 mprsas_set_ccbstatus(ccb, CAM_UNREC_HBA_ERROR); 2444 break; 2445 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED: 2446 /* 2447 * This is one of the responses that comes back when an I/O 2448 * has been aborted. If it is because of a timeout that we 2449 * initiated, just set the status to CAM_CMD_TIMEOUT. 2450 * Otherwise set it to CAM_REQ_ABORTED. The effect on the 2451 * command is the same (it gets retried, subject to the 2452 * retry counter), the only difference is what gets printed 2453 * on the console. 2454 */ 2455 if (cm->cm_state == MPR_CM_STATE_TIMEDOUT) 2456 mprsas_set_ccbstatus(ccb, CAM_CMD_TIMEOUT); 2457 else 2458 mprsas_set_ccbstatus(ccb, CAM_REQ_ABORTED); 2459 break; 2460 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN: 2461 /* resid is ignored for this condition */ 2462 csio->resid = 0; 2463 mprsas_set_ccbstatus(ccb, CAM_DATA_RUN_ERR); 2464 break; 2465 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED: 2466 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED: 2467 /* 2468 * These can sometimes be transient transport-related 2469 * errors, and sometimes persistent drive-related errors. 2470 * We used to retry these without decrementing the retry 2471 * count by returning CAM_REQUEUE_REQ. Unfortunately, if 2472 * we hit a persistent drive problem that returns one of 2473 * these error codes, we would retry indefinitely. So, 2474 * return CAM_REQ_CMP_ERROR so that we decrement the retry 2475 * count and avoid infinite retries. We're taking the 2476 * potential risk of flagging false failures in the event 2477 * of a topology-related error (e.g. a SAS expander problem 2478 * causes a command addressed to a drive to fail), but 2479 * avoiding getting into an infinite retry loop. 2480 */ 2481 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR); 2482 mprsas_log_command(cm, MPR_INFO, 2483 "terminated ioc %x scsi %x state %x xfer %u\n", 2484 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState, 2485 le32toh(rep->TransferCount)); 2486 break; 2487 case MPI2_IOCSTATUS_INVALID_FUNCTION: 2488 case MPI2_IOCSTATUS_INTERNAL_ERROR: 2489 case MPI2_IOCSTATUS_INVALID_VPID: 2490 case MPI2_IOCSTATUS_INVALID_FIELD: 2491 case MPI2_IOCSTATUS_INVALID_STATE: 2492 case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED: 2493 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR: 2494 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR: 2495 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: 2496 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED: 2497 default: 2498 mprsas_log_command(cm, MPR_XINFO, 2499 "completed ioc %x scsi %x state %x xfer %u\n", 2500 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState, 2501 le32toh(rep->TransferCount)); 2502 csio->resid = cm->cm_length; 2503 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR); 2504 break; 2505 } 2506 2507 mpr_sc_failed_io_info(sc, csio, rep, cm->cm_targ); 2508 2509 if (sassc->flags & MPRSAS_QUEUE_FROZEN) { 2510 ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 2511 sassc->flags &= ~MPRSAS_QUEUE_FROZEN; 2512 mpr_dprint(sc, MPR_XINFO, "Command completed, unfreezing SIM " 2513 "queue\n"); 2514 } 2515 2516 if (mprsas_get_ccbstatus(ccb) != CAM_REQ_CMP) { 2517 ccb->ccb_h.status |= CAM_DEV_QFRZN; 2518 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1); 2519 } 2520 2521 mpr_free_command(sc, cm); 2522 xpt_done(ccb); 2523 } 2524 2525 #if __FreeBSD_version >= 900026 2526 static void 2527 mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm) 2528 { 2529 MPI2_SMP_PASSTHROUGH_REPLY *rpl; 2530 MPI2_SMP_PASSTHROUGH_REQUEST *req; 2531 uint64_t sasaddr; 2532 union ccb *ccb; 2533 2534 ccb = cm->cm_complete_data; 2535 2536 /* 2537 * Currently there should be no way we can hit this case. It only 2538 * happens when we have a failure to allocate chain frames, and SMP 2539 * commands require two S/G elements only. That should be handled 2540 * in the standard request size. 2541 */ 2542 if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) { 2543 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x on SMP " 2544 "request!\n", __func__, cm->cm_flags); 2545 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR); 2546 goto bailout; 2547 } 2548 2549 rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply; 2550 if (rpl == NULL) { 2551 mpr_dprint(sc, MPR_ERROR, "%s: NULL cm_reply!\n", __func__); 2552 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR); 2553 goto bailout; 2554 } 2555 2556 req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req; 2557 sasaddr = le32toh(req->SASAddress.Low); 2558 sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32; 2559 2560 if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) != 2561 MPI2_IOCSTATUS_SUCCESS || 2562 rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) { 2563 mpr_dprint(sc, MPR_XINFO, "%s: IOCStatus %04x SASStatus %02x\n", 2564 __func__, le16toh(rpl->IOCStatus), rpl->SASStatus); 2565 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR); 2566 goto bailout; 2567 } 2568 2569 mpr_dprint(sc, MPR_XINFO, "%s: SMP request to SAS address %#jx " 2570 "completed successfully\n", __func__, (uintmax_t)sasaddr); 2571 2572 if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED) 2573 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP); 2574 else 2575 mprsas_set_ccbstatus(ccb, CAM_SMP_STATUS_ERROR); 2576 2577 bailout: 2578 /* 2579 * We sync in both directions because we had DMAs in the S/G list 2580 * in both directions. 2581 */ 2582 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, 2583 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2584 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap); 2585 mpr_free_command(sc, cm); 2586 xpt_done(ccb); 2587 } 2588 2589 static void 2590 mprsas_send_smpcmd(struct mprsas_softc *sassc, union ccb *ccb, 2591 uint64_t sasaddr) 2592 { 2593 struct mpr_command *cm; 2594 uint8_t *request, *response; 2595 MPI2_SMP_PASSTHROUGH_REQUEST *req; 2596 struct mpr_softc *sc; 2597 struct sglist *sg; 2598 int error; 2599 2600 sc = sassc->sc; 2601 sg = NULL; 2602 error = 0; 2603 2604 #if (__FreeBSD_version >= 1000028) || \ 2605 ((__FreeBSD_version >= 902001) && (__FreeBSD_version < 1000000)) 2606 switch (ccb->ccb_h.flags & CAM_DATA_MASK) { 2607 case CAM_DATA_PADDR: 2608 case CAM_DATA_SG_PADDR: 2609 /* 2610 * XXX We don't yet support physical addresses here. 2611 */ 2612 mpr_dprint(sc, MPR_ERROR, "%s: physical addresses not " 2613 "supported\n", __func__); 2614 mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID); 2615 xpt_done(ccb); 2616 return; 2617 case CAM_DATA_SG: 2618 /* 2619 * The chip does not support more than one buffer for the 2620 * request or response. 2621 */ 2622 if ((ccb->smpio.smp_request_sglist_cnt > 1) 2623 || (ccb->smpio.smp_response_sglist_cnt > 1)) { 2624 mpr_dprint(sc, MPR_ERROR, 2625 "%s: multiple request or response buffer segments " 2626 "not supported for SMP\n", __func__); 2627 mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID); 2628 xpt_done(ccb); 2629 return; 2630 } 2631 2632 /* 2633 * The CAM_SCATTER_VALID flag was originally implemented 2634 * for the XPT_SCSI_IO CCB, which only has one data pointer. 2635 * We have two. So, just take that flag to mean that we 2636 * might have S/G lists, and look at the S/G segment count 2637 * to figure out whether that is the case for each individual 2638 * buffer. 2639 */ 2640 if (ccb->smpio.smp_request_sglist_cnt != 0) { 2641 bus_dma_segment_t *req_sg; 2642 2643 req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request; 2644 request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr; 2645 } else 2646 request = ccb->smpio.smp_request; 2647 2648 if (ccb->smpio.smp_response_sglist_cnt != 0) { 2649 bus_dma_segment_t *rsp_sg; 2650 2651 rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response; 2652 response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr; 2653 } else 2654 response = ccb->smpio.smp_response; 2655 break; 2656 case CAM_DATA_VADDR: 2657 request = ccb->smpio.smp_request; 2658 response = ccb->smpio.smp_response; 2659 break; 2660 default: 2661 mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID); 2662 xpt_done(ccb); 2663 return; 2664 } 2665 #else /* __FreeBSD_version < 1000028 */ 2666 /* 2667 * XXX We don't yet support physical addresses here. 2668 */ 2669 if (ccb->ccb_h.flags & (CAM_DATA_PHYS|CAM_SG_LIST_PHYS)) { 2670 mpr_dprint(sc, MPR_ERROR, "%s: physical addresses not " 2671 "supported\n", __func__); 2672 mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID); 2673 xpt_done(ccb); 2674 return; 2675 } 2676 2677 /* 2678 * If the user wants to send an S/G list, check to make sure they 2679 * have single buffers. 2680 */ 2681 if (ccb->ccb_h.flags & CAM_SCATTER_VALID) { 2682 /* 2683 * The chip does not support more than one buffer for the 2684 * request or response. 2685 */ 2686 if ((ccb->smpio.smp_request_sglist_cnt > 1) 2687 || (ccb->smpio.smp_response_sglist_cnt > 1)) { 2688 mpr_dprint(sc, MPR_ERROR, "%s: multiple request or " 2689 "response buffer segments not supported for SMP\n", 2690 __func__); 2691 mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID); 2692 xpt_done(ccb); 2693 return; 2694 } 2695 2696 /* 2697 * The CAM_SCATTER_VALID flag was originally implemented 2698 * for the XPT_SCSI_IO CCB, which only has one data pointer. 2699 * We have two. So, just take that flag to mean that we 2700 * might have S/G lists, and look at the S/G segment count 2701 * to figure out whether that is the case for each individual 2702 * buffer. 2703 */ 2704 if (ccb->smpio.smp_request_sglist_cnt != 0) { 2705 bus_dma_segment_t *req_sg; 2706 2707 req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request; 2708 request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr; 2709 } else 2710 request = ccb->smpio.smp_request; 2711 2712 if (ccb->smpio.smp_response_sglist_cnt != 0) { 2713 bus_dma_segment_t *rsp_sg; 2714 2715 rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response; 2716 response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr; 2717 } else 2718 response = ccb->smpio.smp_response; 2719 } else { 2720 request = ccb->smpio.smp_request; 2721 response = ccb->smpio.smp_response; 2722 } 2723 #endif /* __FreeBSD_version < 1000028 */ 2724 2725 cm = mpr_alloc_command(sc); 2726 if (cm == NULL) { 2727 mpr_dprint(sc, MPR_ERROR, 2728 "%s: cannot allocate command\n", __func__); 2729 mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL); 2730 xpt_done(ccb); 2731 return; 2732 } 2733 2734 req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req; 2735 bzero(req, sizeof(*req)); 2736 req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH; 2737 2738 /* Allow the chip to use any route to this SAS address. */ 2739 req->PhysicalPort = 0xff; 2740 2741 req->RequestDataLength = htole16(ccb->smpio.smp_request_len); 2742 req->SGLFlags = 2743 MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI; 2744 2745 mpr_dprint(sc, MPR_XINFO, "%s: sending SMP request to SAS address " 2746 "%#jx\n", __func__, (uintmax_t)sasaddr); 2747 2748 mpr_init_sge(cm, req, &req->SGL); 2749 2750 /* 2751 * Set up a uio to pass into mpr_map_command(). This allows us to 2752 * do one map command, and one busdma call in there. 2753 */ 2754 cm->cm_uio.uio_iov = cm->cm_iovec; 2755 cm->cm_uio.uio_iovcnt = 2; 2756 cm->cm_uio.uio_segflg = UIO_SYSSPACE; 2757 2758 /* 2759 * The read/write flag isn't used by busdma, but set it just in 2760 * case. This isn't exactly accurate, either, since we're going in 2761 * both directions. 2762 */ 2763 cm->cm_uio.uio_rw = UIO_WRITE; 2764 2765 cm->cm_iovec[0].iov_base = request; 2766 cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength); 2767 cm->cm_iovec[1].iov_base = response; 2768 cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len; 2769 2770 cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len + 2771 cm->cm_iovec[1].iov_len; 2772 2773 /* 2774 * Trigger a warning message in mpr_data_cb() for the user if we 2775 * wind up exceeding two S/G segments. The chip expects one 2776 * segment for the request and another for the response. 2777 */ 2778 cm->cm_max_segs = 2; 2779 2780 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; 2781 cm->cm_complete = mprsas_smpio_complete; 2782 cm->cm_complete_data = ccb; 2783 2784 /* 2785 * Tell the mapping code that we're using a uio, and that this is 2786 * an SMP passthrough request. There is a little special-case 2787 * logic there (in mpr_data_cb()) to handle the bidirectional 2788 * transfer. 2789 */ 2790 cm->cm_flags |= MPR_CM_FLAGS_USE_UIO | MPR_CM_FLAGS_SMP_PASS | 2791 MPR_CM_FLAGS_DATAIN | MPR_CM_FLAGS_DATAOUT; 2792 2793 /* The chip data format is little endian. */ 2794 req->SASAddress.High = htole32(sasaddr >> 32); 2795 req->SASAddress.Low = htole32(sasaddr); 2796 2797 /* 2798 * XXX Note that we don't have a timeout/abort mechanism here. 2799 * From the manual, it looks like task management requests only 2800 * work for SCSI IO and SATA passthrough requests. We may need to 2801 * have a mechanism to retry requests in the event of a chip reset 2802 * at least. Hopefully the chip will insure that any errors short 2803 * of that are relayed back to the driver. 2804 */ 2805 error = mpr_map_command(sc, cm); 2806 if ((error != 0) && (error != EINPROGRESS)) { 2807 mpr_dprint(sc, MPR_ERROR, "%s: error %d returned from " 2808 "mpr_map_command()\n", __func__, error); 2809 goto bailout_error; 2810 } 2811 2812 return; 2813 2814 bailout_error: 2815 mpr_free_command(sc, cm); 2816 mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL); 2817 xpt_done(ccb); 2818 return; 2819 } 2820 2821 static void 2822 mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb) 2823 { 2824 struct mpr_softc *sc; 2825 struct mprsas_target *targ; 2826 uint64_t sasaddr = 0; 2827 2828 sc = sassc->sc; 2829 2830 /* 2831 * Make sure the target exists. 2832 */ 2833 KASSERT(ccb->ccb_h.target_id < sassc->maxtargets, 2834 ("Target %d out of bounds in XPT_SMP_IO\n", ccb->ccb_h.target_id)); 2835 targ = &sassc->targets[ccb->ccb_h.target_id]; 2836 if (targ->handle == 0x0) { 2837 mpr_dprint(sc, MPR_ERROR, "%s: target %d does not exist!\n", 2838 __func__, ccb->ccb_h.target_id); 2839 mprsas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT); 2840 xpt_done(ccb); 2841 return; 2842 } 2843 2844 /* 2845 * If this device has an embedded SMP target, we'll talk to it 2846 * directly. 2847 * figure out what the expander's address is. 2848 */ 2849 if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0) 2850 sasaddr = targ->sasaddr; 2851 2852 /* 2853 * If we don't have a SAS address for the expander yet, try 2854 * grabbing it from the page 0x83 information cached in the 2855 * transport layer for this target. LSI expanders report the 2856 * expander SAS address as the port-associated SAS address in 2857 * Inquiry VPD page 0x83. Maxim expanders don't report it in page 2858 * 0x83. 2859 * 2860 * XXX KDM disable this for now, but leave it commented out so that 2861 * it is obvious that this is another possible way to get the SAS 2862 * address. 2863 * 2864 * The parent handle method below is a little more reliable, and 2865 * the other benefit is that it works for devices other than SES 2866 * devices. So you can send a SMP request to a da(4) device and it 2867 * will get routed to the expander that device is attached to. 2868 * (Assuming the da(4) device doesn't contain an SMP target...) 2869 */ 2870 #if 0 2871 if (sasaddr == 0) 2872 sasaddr = xpt_path_sas_addr(ccb->ccb_h.path); 2873 #endif 2874 2875 /* 2876 * If we still don't have a SAS address for the expander, look for 2877 * the parent device of this device, which is probably the expander. 2878 */ 2879 if (sasaddr == 0) { 2880 #ifdef OLD_MPR_PROBE 2881 struct mprsas_target *parent_target; 2882 #endif 2883 2884 if (targ->parent_handle == 0x0) { 2885 mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have " 2886 "a valid parent handle!\n", __func__, targ->handle); 2887 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); 2888 goto bailout; 2889 } 2890 #ifdef OLD_MPR_PROBE 2891 parent_target = mprsas_find_target_by_handle(sassc, 0, 2892 targ->parent_handle); 2893 2894 if (parent_target == NULL) { 2895 mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have " 2896 "a valid parent target!\n", __func__, targ->handle); 2897 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); 2898 goto bailout; 2899 } 2900 2901 if ((parent_target->devinfo & 2902 MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) { 2903 mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d " 2904 "does not have an SMP target!\n", __func__, 2905 targ->handle, parent_target->handle); 2906 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); 2907 goto bailout; 2908 } 2909 2910 sasaddr = parent_target->sasaddr; 2911 #else /* OLD_MPR_PROBE */ 2912 if ((targ->parent_devinfo & 2913 MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) { 2914 mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d " 2915 "does not have an SMP target!\n", __func__, 2916 targ->handle, targ->parent_handle); 2917 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); 2918 goto bailout; 2919 2920 } 2921 if (targ->parent_sasaddr == 0x0) { 2922 mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent handle " 2923 "%d does not have a valid SAS address!\n", __func__, 2924 targ->handle, targ->parent_handle); 2925 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); 2926 goto bailout; 2927 } 2928 2929 sasaddr = targ->parent_sasaddr; 2930 #endif /* OLD_MPR_PROBE */ 2931 2932 } 2933 2934 if (sasaddr == 0) { 2935 mpr_dprint(sc, MPR_INFO, "%s: unable to find SAS address for " 2936 "handle %d\n", __func__, targ->handle); 2937 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE); 2938 goto bailout; 2939 } 2940 mprsas_send_smpcmd(sassc, ccb, sasaddr); 2941 2942 return; 2943 2944 bailout: 2945 xpt_done(ccb); 2946 2947 } 2948 #endif //__FreeBSD_version >= 900026 2949 2950 static void 2951 mprsas_action_resetdev(struct mprsas_softc *sassc, union ccb *ccb) 2952 { 2953 MPI2_SCSI_TASK_MANAGE_REQUEST *req; 2954 struct mpr_softc *sc; 2955 struct mpr_command *tm; 2956 struct mprsas_target *targ; 2957 2958 MPR_FUNCTRACE(sassc->sc); 2959 mtx_assert(&sassc->sc->mpr_mtx, MA_OWNED); 2960 2961 KASSERT(ccb->ccb_h.target_id < sassc->maxtargets, 2962 ("Target %d out of bounds in XPT_RESET_DEV\n", 2963 ccb->ccb_h.target_id)); 2964 sc = sassc->sc; 2965 tm = mpr_alloc_command(sc); 2966 if (tm == NULL) { 2967 mpr_dprint(sc, MPR_ERROR, 2968 "command alloc failure in mprsas_action_resetdev\n"); 2969 mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL); 2970 xpt_done(ccb); 2971 return; 2972 } 2973 2974 targ = &sassc->targets[ccb->ccb_h.target_id]; 2975 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req; 2976 req->DevHandle = htole16(targ->handle); 2977 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; 2978 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET; 2979 2980 /* SAS Hard Link Reset / SATA Link Reset */ 2981 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET; 2982 2983 tm->cm_data = NULL; 2984 tm->cm_desc.HighPriority.RequestFlags = 2985 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY; 2986 tm->cm_complete = mprsas_resetdev_complete; 2987 tm->cm_complete_data = ccb; 2988 2989 mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n", 2990 __func__, targ->tid); 2991 tm->cm_targ = targ; 2992 targ->flags |= MPRSAS_TARGET_INRESET; 2993 2994 mpr_map_command(sc, tm); 2995 } 2996 2997 static void 2998 mprsas_resetdev_complete(struct mpr_softc *sc, struct mpr_command *tm) 2999 { 3000 MPI2_SCSI_TASK_MANAGE_REPLY *resp; 3001 union ccb *ccb; 3002 3003 MPR_FUNCTRACE(sc); 3004 mtx_assert(&sc->mpr_mtx, MA_OWNED); 3005 3006 resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply; 3007 ccb = tm->cm_complete_data; 3008 3009 /* 3010 * Currently there should be no way we can hit this case. It only 3011 * happens when we have a failure to allocate chain frames, and 3012 * task management commands don't have S/G lists. 3013 */ 3014 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) { 3015 MPI2_SCSI_TASK_MANAGE_REQUEST *req; 3016 3017 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req; 3018 3019 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for reset of " 3020 "handle %#04x! This should not happen!\n", __func__, 3021 tm->cm_flags, req->DevHandle); 3022 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR); 3023 goto bailout; 3024 } 3025 3026 mpr_dprint(sc, MPR_XINFO, 3027 "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n", __func__, 3028 le16toh(resp->IOCStatus), le32toh(resp->ResponseCode)); 3029 3030 if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) { 3031 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP); 3032 mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid, 3033 CAM_LUN_WILDCARD); 3034 } 3035 else 3036 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR); 3037 3038 bailout: 3039 3040 mprsas_free_tm(sc, tm); 3041 xpt_done(ccb); 3042 } 3043 3044 static void 3045 mprsas_poll(struct cam_sim *sim) 3046 { 3047 struct mprsas_softc *sassc; 3048 3049 sassc = cam_sim_softc(sim); 3050 3051 if (sassc->sc->mpr_debug & MPR_TRACE) { 3052 /* frequent debug messages during a panic just slow 3053 * everything down too much. 3054 */ 3055 mpr_dprint(sassc->sc, MPR_XINFO, "%s clearing MPR_TRACE\n", 3056 __func__); 3057 sassc->sc->mpr_debug &= ~MPR_TRACE; 3058 } 3059 3060 mpr_intr_locked(sassc->sc); 3061 } 3062 3063 static void 3064 mprsas_async(void *callback_arg, uint32_t code, struct cam_path *path, 3065 void *arg) 3066 { 3067 struct mpr_softc *sc; 3068 3069 sc = (struct mpr_softc *)callback_arg; 3070 3071 switch (code) { 3072 #if (__FreeBSD_version >= 1000006) || \ 3073 ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000)) 3074 case AC_ADVINFO_CHANGED: { 3075 struct mprsas_target *target; 3076 struct mprsas_softc *sassc; 3077 struct scsi_read_capacity_data_long rcap_buf; 3078 struct ccb_dev_advinfo cdai; 3079 struct mprsas_lun *lun; 3080 lun_id_t lunid; 3081 int found_lun; 3082 uintptr_t buftype; 3083 3084 buftype = (uintptr_t)arg; 3085 3086 found_lun = 0; 3087 sassc = sc->sassc; 3088 3089 /* 3090 * We're only interested in read capacity data changes. 3091 */ 3092 if (buftype != CDAI_TYPE_RCAPLONG) 3093 break; 3094 3095 /* 3096 * See the comment in mpr_attach_sas() for a detailed 3097 * explanation. In these versions of FreeBSD we register 3098 * for all events and filter out the events that don't 3099 * apply to us. 3100 */ 3101 #if (__FreeBSD_version < 1000703) || \ 3102 ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002)) 3103 if (xpt_path_path_id(path) != sassc->sim->path_id) 3104 break; 3105 #endif 3106 3107 /* 3108 * We should have a handle for this, but check to make sure. 3109 */ 3110 KASSERT(xpt_path_target_id(path) < sassc->maxtargets, 3111 ("Target %d out of bounds in mprsas_async\n", 3112 xpt_path_target_id(path))); 3113 target = &sassc->targets[xpt_path_target_id(path)]; 3114 if (target->handle == 0) 3115 break; 3116 3117 lunid = xpt_path_lun_id(path); 3118 3119 SLIST_FOREACH(lun, &target->luns, lun_link) { 3120 if (lun->lun_id == lunid) { 3121 found_lun = 1; 3122 break; 3123 } 3124 } 3125 3126 if (found_lun == 0) { 3127 lun = malloc(sizeof(struct mprsas_lun), M_MPR, 3128 M_NOWAIT | M_ZERO); 3129 if (lun == NULL) { 3130 mpr_dprint(sc, MPR_ERROR, "Unable to alloc " 3131 "LUN for EEDP support.\n"); 3132 break; 3133 } 3134 lun->lun_id = lunid; 3135 SLIST_INSERT_HEAD(&target->luns, lun, lun_link); 3136 } 3137 3138 bzero(&rcap_buf, sizeof(rcap_buf)); 3139 xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL); 3140 cdai.ccb_h.func_code = XPT_DEV_ADVINFO; 3141 cdai.ccb_h.flags = CAM_DIR_IN; 3142 cdai.buftype = CDAI_TYPE_RCAPLONG; 3143 #if (__FreeBSD_version >= 1100061) || \ 3144 ((__FreeBSD_version >= 1001510) && (__FreeBSD_version < 1100000)) 3145 cdai.flags = CDAI_FLAG_NONE; 3146 #else 3147 cdai.flags = 0; 3148 #endif 3149 cdai.bufsiz = sizeof(rcap_buf); 3150 cdai.buf = (uint8_t *)&rcap_buf; 3151 xpt_action((union ccb *)&cdai); 3152 if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0) 3153 cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE); 3154 3155 if ((mprsas_get_ccbstatus((union ccb *)&cdai) == CAM_REQ_CMP) 3156 && (rcap_buf.prot & SRC16_PROT_EN)) { 3157 lun->eedp_formatted = TRUE; 3158 lun->eedp_block_size = scsi_4btoul(rcap_buf.length); 3159 } else { 3160 lun->eedp_formatted = FALSE; 3161 lun->eedp_block_size = 0; 3162 } 3163 break; 3164 } 3165 #endif 3166 case AC_FOUND_DEVICE: { 3167 struct ccb_getdev *cgd; 3168 3169 /* 3170 * See the comment in mpr_attach_sas() for a detailed 3171 * explanation. In these versions of FreeBSD we register 3172 * for all events and filter out the events that don't 3173 * apply to us. 3174 */ 3175 #if (__FreeBSD_version < 1000703) || \ 3176 ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002)) 3177 if (xpt_path_path_id(path) != sc->sassc->sim->path_id) 3178 break; 3179 #endif 3180 3181 cgd = arg; 3182 #if (__FreeBSD_version < 901503) || \ 3183 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) 3184 mprsas_check_eedp(sc, path, cgd); 3185 #endif 3186 break; 3187 } 3188 default: 3189 break; 3190 } 3191 } 3192 3193 #if (__FreeBSD_version < 901503) || \ 3194 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) 3195 static void 3196 mprsas_check_eedp(struct mpr_softc *sc, struct cam_path *path, 3197 struct ccb_getdev *cgd) 3198 { 3199 struct mprsas_softc *sassc = sc->sassc; 3200 struct ccb_scsiio *csio; 3201 struct scsi_read_capacity_16 *scsi_cmd; 3202 struct scsi_read_capacity_eedp *rcap_buf; 3203 path_id_t pathid; 3204 target_id_t targetid; 3205 lun_id_t lunid; 3206 union ccb *ccb; 3207 struct cam_path *local_path; 3208 struct mprsas_target *target; 3209 struct mprsas_lun *lun; 3210 uint8_t found_lun; 3211 char path_str[64]; 3212 3213 pathid = cam_sim_path(sassc->sim); 3214 targetid = xpt_path_target_id(path); 3215 lunid = xpt_path_lun_id(path); 3216 3217 KASSERT(targetid < sassc->maxtargets, 3218 ("Target %d out of bounds in mprsas_check_eedp\n", targetid)); 3219 target = &sassc->targets[targetid]; 3220 if (target->handle == 0x0) 3221 return; 3222 3223 /* 3224 * Determine if the device is EEDP capable. 3225 * 3226 * If this flag is set in the inquiry data, the device supports 3227 * protection information, and must support the 16 byte read capacity 3228 * command, otherwise continue without sending read cap 16 3229 */ 3230 if ((cgd->inq_data.spc3_flags & SPC3_SID_PROTECT) == 0) 3231 return; 3232 3233 /* 3234 * Issue a READ CAPACITY 16 command. This info is used to determine if 3235 * the LUN is formatted for EEDP support. 3236 */ 3237 ccb = xpt_alloc_ccb_nowait(); 3238 if (ccb == NULL) { 3239 mpr_dprint(sc, MPR_ERROR, "Unable to alloc CCB for EEDP " 3240 "support.\n"); 3241 return; 3242 } 3243 3244 if (xpt_create_path(&local_path, xpt_periph, pathid, targetid, lunid) 3245 != CAM_REQ_CMP) { 3246 mpr_dprint(sc, MPR_ERROR, "Unable to create path for EEDP " 3247 "support\n"); 3248 xpt_free_ccb(ccb); 3249 return; 3250 } 3251 3252 /* 3253 * If LUN is already in list, don't create a new one. 3254 */ 3255 found_lun = FALSE; 3256 SLIST_FOREACH(lun, &target->luns, lun_link) { 3257 if (lun->lun_id == lunid) { 3258 found_lun = TRUE; 3259 break; 3260 } 3261 } 3262 if (!found_lun) { 3263 lun = malloc(sizeof(struct mprsas_lun), M_MPR, 3264 M_NOWAIT | M_ZERO); 3265 if (lun == NULL) { 3266 mpr_dprint(sc, MPR_ERROR, "Unable to alloc LUN for " 3267 "EEDP support.\n"); 3268 xpt_free_path(local_path); 3269 xpt_free_ccb(ccb); 3270 return; 3271 } 3272 lun->lun_id = lunid; 3273 SLIST_INSERT_HEAD(&target->luns, lun, lun_link); 3274 } 3275 3276 xpt_path_string(local_path, path_str, sizeof(path_str)); 3277 mpr_dprint(sc, MPR_INFO, "Sending read cap: path %s handle %d\n", 3278 path_str, target->handle); 3279 3280 /* 3281 * Issue a READ CAPACITY 16 command for the LUN. The 3282 * mprsas_read_cap_done function will load the read cap info into the 3283 * LUN struct. 3284 */ 3285 rcap_buf = malloc(sizeof(struct scsi_read_capacity_eedp), M_MPR, 3286 M_NOWAIT | M_ZERO); 3287 if (rcap_buf == NULL) { 3288 mpr_dprint(sc, MPR_ERROR, "Unable to alloc read capacity " 3289 "buffer for EEDP support.\n"); 3290 xpt_free_path(ccb->ccb_h.path); 3291 xpt_free_ccb(ccb); 3292 return; 3293 } 3294 xpt_setup_ccb(&ccb->ccb_h, local_path, CAM_PRIORITY_XPT); 3295 csio = &ccb->csio; 3296 csio->ccb_h.func_code = XPT_SCSI_IO; 3297 csio->ccb_h.flags = CAM_DIR_IN; 3298 csio->ccb_h.retry_count = 4; 3299 csio->ccb_h.cbfcnp = mprsas_read_cap_done; 3300 csio->ccb_h.timeout = 60000; 3301 csio->data_ptr = (uint8_t *)rcap_buf; 3302 csio->dxfer_len = sizeof(struct scsi_read_capacity_eedp); 3303 csio->sense_len = MPR_SENSE_LEN; 3304 csio->cdb_len = sizeof(*scsi_cmd); 3305 csio->tag_action = MSG_SIMPLE_Q_TAG; 3306 3307 scsi_cmd = (struct scsi_read_capacity_16 *)&csio->cdb_io.cdb_bytes; 3308 bzero(scsi_cmd, sizeof(*scsi_cmd)); 3309 scsi_cmd->opcode = 0x9E; 3310 scsi_cmd->service_action = SRC16_SERVICE_ACTION; 3311 ((uint8_t *)scsi_cmd)[13] = sizeof(struct scsi_read_capacity_eedp); 3312 3313 ccb->ccb_h.ppriv_ptr1 = sassc; 3314 xpt_action(ccb); 3315 } 3316 3317 static void 3318 mprsas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb) 3319 { 3320 struct mprsas_softc *sassc; 3321 struct mprsas_target *target; 3322 struct mprsas_lun *lun; 3323 struct scsi_read_capacity_eedp *rcap_buf; 3324 3325 if (done_ccb == NULL) 3326 return; 3327 3328 /* Driver need to release devq, it Scsi command is 3329 * generated by driver internally. 3330 * Currently there is a single place where driver 3331 * calls scsi command internally. In future if driver 3332 * calls more scsi command internally, it needs to release 3333 * devq internally, since those command will not go back to 3334 * cam_periph. 3335 */ 3336 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) ) { 3337 done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN; 3338 xpt_release_devq(done_ccb->ccb_h.path, 3339 /*count*/ 1, /*run_queue*/TRUE); 3340 } 3341 3342 rcap_buf = (struct scsi_read_capacity_eedp *)done_ccb->csio.data_ptr; 3343 3344 /* 3345 * Get the LUN ID for the path and look it up in the LUN list for the 3346 * target. 3347 */ 3348 sassc = (struct mprsas_softc *)done_ccb->ccb_h.ppriv_ptr1; 3349 KASSERT(done_ccb->ccb_h.target_id < sassc->maxtargets, 3350 ("Target %d out of bounds in mprsas_read_cap_done\n", 3351 done_ccb->ccb_h.target_id)); 3352 target = &sassc->targets[done_ccb->ccb_h.target_id]; 3353 SLIST_FOREACH(lun, &target->luns, lun_link) { 3354 if (lun->lun_id != done_ccb->ccb_h.target_lun) 3355 continue; 3356 3357 /* 3358 * Got the LUN in the target's LUN list. Fill it in with EEDP 3359 * info. If the READ CAP 16 command had some SCSI error (common 3360 * if command is not supported), mark the lun as not supporting 3361 * EEDP and set the block size to 0. 3362 */ 3363 if ((mprsas_get_ccbstatus(done_ccb) != CAM_REQ_CMP) || 3364 (done_ccb->csio.scsi_status != SCSI_STATUS_OK)) { 3365 lun->eedp_formatted = FALSE; 3366 lun->eedp_block_size = 0; 3367 break; 3368 } 3369 3370 if (rcap_buf->protect & 0x01) { 3371 mpr_dprint(sassc->sc, MPR_INFO, "LUN %d for target ID " 3372 "%d is formatted for EEDP support.\n", 3373 done_ccb->ccb_h.target_lun, 3374 done_ccb->ccb_h.target_id); 3375 lun->eedp_formatted = TRUE; 3376 lun->eedp_block_size = scsi_4btoul(rcap_buf->length); 3377 } 3378 break; 3379 } 3380 3381 // Finished with this CCB and path. 3382 free(rcap_buf, M_MPR); 3383 xpt_free_path(done_ccb->ccb_h.path); 3384 xpt_free_ccb(done_ccb); 3385 } 3386 #endif /* (__FreeBSD_version < 901503) || \ 3387 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) */ 3388 3389 void 3390 mprsas_prepare_for_tm(struct mpr_softc *sc, struct mpr_command *tm, 3391 struct mprsas_target *target, lun_id_t lun_id) 3392 { 3393 union ccb *ccb; 3394 path_id_t path_id; 3395 3396 /* 3397 * Set the INRESET flag for this target so that no I/O will be sent to 3398 * the target until the reset has completed. If an I/O request does 3399 * happen, the devq will be frozen. The CCB holds the path which is 3400 * used to release the devq. The devq is released and the CCB is freed 3401 * when the TM completes. 3402 */ 3403 ccb = xpt_alloc_ccb_nowait(); 3404 if (ccb) { 3405 path_id = cam_sim_path(sc->sassc->sim); 3406 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, path_id, 3407 target->tid, lun_id) != CAM_REQ_CMP) { 3408 xpt_free_ccb(ccb); 3409 } else { 3410 tm->cm_ccb = ccb; 3411 tm->cm_targ = target; 3412 target->flags |= MPRSAS_TARGET_INRESET; 3413 } 3414 } 3415 } 3416 3417 int 3418 mprsas_startup(struct mpr_softc *sc) 3419 { 3420 /* 3421 * Send the port enable message and set the wait_for_port_enable flag. 3422 * This flag helps to keep the simq frozen until all discovery events 3423 * are processed. 3424 */ 3425 sc->wait_for_port_enable = 1; 3426 mprsas_send_portenable(sc); 3427 return (0); 3428 } 3429 3430 static int 3431 mprsas_send_portenable(struct mpr_softc *sc) 3432 { 3433 MPI2_PORT_ENABLE_REQUEST *request; 3434 struct mpr_command *cm; 3435 3436 MPR_FUNCTRACE(sc); 3437 3438 if ((cm = mpr_alloc_command(sc)) == NULL) 3439 return (EBUSY); 3440 request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req; 3441 request->Function = MPI2_FUNCTION_PORT_ENABLE; 3442 request->MsgFlags = 0; 3443 request->VP_ID = 0; 3444 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; 3445 cm->cm_complete = mprsas_portenable_complete; 3446 cm->cm_data = NULL; 3447 cm->cm_sge = NULL; 3448 3449 mpr_map_command(sc, cm); 3450 mpr_dprint(sc, MPR_XINFO, 3451 "mpr_send_portenable finished cm %p req %p complete %p\n", 3452 cm, cm->cm_req, cm->cm_complete); 3453 return (0); 3454 } 3455 3456 static void 3457 mprsas_portenable_complete(struct mpr_softc *sc, struct mpr_command *cm) 3458 { 3459 MPI2_PORT_ENABLE_REPLY *reply; 3460 struct mprsas_softc *sassc; 3461 3462 MPR_FUNCTRACE(sc); 3463 sassc = sc->sassc; 3464 3465 /* 3466 * Currently there should be no way we can hit this case. It only 3467 * happens when we have a failure to allocate chain frames, and 3468 * port enable commands don't have S/G lists. 3469 */ 3470 if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) { 3471 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for port enable! " 3472 "This should not happen!\n", __func__, cm->cm_flags); 3473 } 3474 3475 reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply; 3476 if (reply == NULL) 3477 mpr_dprint(sc, MPR_FAULT, "Portenable NULL reply\n"); 3478 else if (le16toh(reply->IOCStatus & MPI2_IOCSTATUS_MASK) != 3479 MPI2_IOCSTATUS_SUCCESS) 3480 mpr_dprint(sc, MPR_FAULT, "Portenable failed\n"); 3481 3482 mpr_free_command(sc, cm); 3483 if (sc->mpr_ich.ich_arg != NULL) { 3484 mpr_dprint(sc, MPR_XINFO, "disestablish config intrhook\n"); 3485 config_intrhook_disestablish(&sc->mpr_ich); 3486 sc->mpr_ich.ich_arg = NULL; 3487 } 3488 3489 /* 3490 * Done waiting for port enable to complete. Decrement the refcount. 3491 * If refcount is 0, discovery is complete and a rescan of the bus can 3492 * take place. 3493 */ 3494 sc->wait_for_port_enable = 0; 3495 sc->port_enable_complete = 1; 3496 wakeup(&sc->port_enable_complete); 3497 mprsas_startup_decrement(sassc); 3498 } 3499 3500 int 3501 mprsas_check_id(struct mprsas_softc *sassc, int id) 3502 { 3503 struct mpr_softc *sc = sassc->sc; 3504 char *ids; 3505 char *name; 3506 3507 ids = &sc->exclude_ids[0]; 3508 while((name = strsep(&ids, ",")) != NULL) { 3509 if (name[0] == '\0') 3510 continue; 3511 if (strtol(name, NULL, 0) == (long)id) 3512 return (1); 3513 } 3514 3515 return (0); 3516 } 3517 3518 void 3519 mprsas_realloc_targets(struct mpr_softc *sc, int maxtargets) 3520 { 3521 struct mprsas_softc *sassc; 3522 struct mprsas_lun *lun, *lun_tmp; 3523 struct mprsas_target *targ; 3524 int i; 3525 3526 sassc = sc->sassc; 3527 /* 3528 * The number of targets is based on IOC Facts, so free all of 3529 * the allocated LUNs for each target and then the target buffer 3530 * itself. 3531 */ 3532 for (i=0; i< maxtargets; i++) { 3533 targ = &sassc->targets[i]; 3534 SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) { 3535 free(lun, M_MPR); 3536 } 3537 } 3538 free(sassc->targets, M_MPR); 3539 3540 sassc->targets = malloc(sizeof(struct mprsas_target) * maxtargets, 3541 M_MPR, M_WAITOK|M_ZERO); 3542 if (!sassc->targets) { 3543 panic("%s failed to alloc targets with error %d\n", 3544 __func__, ENOMEM); 3545 } 3546 } 3547