1 /*- 2 * Copyright (c) 2009 Yahoo! Inc. 3 * Copyright (c) 2011-2014 LSI Corp. 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 /* Communications core for LSI MPT2 */ 32 33 /* TODO Move headers to mprvar */ 34 #include <sys/types.h> 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/kernel.h> 38 #include <sys/selinfo.h> 39 #include <sys/module.h> 40 #include <sys/bus.h> 41 #include <sys/conf.h> 42 #include <sys/bio.h> 43 #include <sys/malloc.h> 44 #include <sys/uio.h> 45 #include <sys/sysctl.h> 46 #include <sys/endian.h> 47 #include <sys/queue.h> 48 #include <sys/kthread.h> 49 #include <sys/taskqueue.h> 50 #include <sys/sbuf.h> 51 52 #include <machine/bus.h> 53 #include <machine/resource.h> 54 #include <sys/rman.h> 55 56 #include <machine/stdarg.h> 57 58 #include <cam/cam.h> 59 #include <cam/cam_ccb.h> 60 #include <cam/cam_debug.h> 61 #include <cam/cam_sim.h> 62 #include <cam/cam_xpt_sim.h> 63 #include <cam/cam_xpt_periph.h> 64 #include <cam/cam_periph.h> 65 #include <cam/scsi/scsi_all.h> 66 #include <cam/scsi/scsi_message.h> 67 #if __FreeBSD_version >= 900026 68 #include <cam/scsi/smp_all.h> 69 #endif 70 71 #include <dev/mpr/mpi/mpi2_type.h> 72 #include <dev/mpr/mpi/mpi2.h> 73 #include <dev/mpr/mpi/mpi2_ioc.h> 74 #include <dev/mpr/mpi/mpi2_sas.h> 75 #include <dev/mpr/mpi/mpi2_cnfg.h> 76 #include <dev/mpr/mpi/mpi2_init.h> 77 #include <dev/mpr/mpi/mpi2_tool.h> 78 #include <dev/mpr/mpr_ioctl.h> 79 #include <dev/mpr/mprvar.h> 80 #include <dev/mpr/mpr_table.h> 81 #include <dev/mpr/mpr_sas.h> 82 83 #define MPRSAS_DISCOVERY_TIMEOUT 20 84 #define MPRSAS_MAX_DISCOVERY_TIMEOUTS 10 /* 200 seconds */ 85 86 /* 87 * static array to check SCSI OpCode for EEDP protection bits 88 */ 89 #define PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP 90 #define PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP 91 #define PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP 92 static uint8_t op_code_prot[256] = { 93 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 94 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 95 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V, 96 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 97 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 98 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 99 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 100 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 101 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V, 102 0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 103 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V, 104 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 105 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 106 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 107 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 108 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 109 }; 110 111 MALLOC_DEFINE(M_MPRSAS, "MPRSAS", "MPR SAS memory"); 112 113 static void mprsas_remove_device(struct mpr_softc *, struct mpr_command *); 114 static void mprsas_remove_complete(struct mpr_softc *, struct mpr_command *); 115 static void mprsas_action(struct cam_sim *sim, union ccb *ccb); 116 static void mprsas_poll(struct cam_sim *sim); 117 static void mprsas_scsiio_timeout(void *data); 118 static void mprsas_abort_complete(struct mpr_softc *sc, 119 struct mpr_command *cm); 120 static void mprsas_action_scsiio(struct mprsas_softc *, union ccb *); 121 static void mprsas_scsiio_complete(struct mpr_softc *, struct mpr_command *); 122 static void mprsas_action_resetdev(struct mprsas_softc *, union ccb *); 123 static void mprsas_resetdev_complete(struct mpr_softc *, 124 struct mpr_command *); 125 static int mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm, 126 struct mpr_command *cm); 127 static int mprsas_send_reset(struct mpr_softc *sc, struct mpr_command *tm, 128 uint8_t type); 129 static void mprsas_async(void *callback_arg, uint32_t code, 130 struct cam_path *path, void *arg); 131 static void mprsas_prepare_ssu(struct mpr_softc *sc, struct cam_path *path, 132 struct ccb_getdev *cgd); 133 #if (__FreeBSD_version < 901503) || \ 134 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) 135 static void mprsas_check_eedp(struct mpr_softc *sc, struct cam_path *path, 136 struct ccb_getdev *cgd); 137 static void mprsas_read_cap_done(struct cam_periph *periph, 138 union ccb *done_ccb); 139 #endif 140 static int mprsas_send_portenable(struct mpr_softc *sc); 141 static void mprsas_portenable_complete(struct mpr_softc *sc, 142 struct mpr_command *cm); 143 144 #if __FreeBSD_version >= 900026 145 static void 146 mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm); 147 static void mprsas_send_smpcmd(struct mprsas_softc *sassc, 148 union ccb *ccb, uint64_t sasaddr); 149 static void 150 mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb); 151 #endif 152 153 struct mprsas_target * 154 mprsas_find_target_by_handle(struct mprsas_softc *sassc, int start, 155 uint16_t handle) 156 { 157 struct mprsas_target *target; 158 int i; 159 160 for (i = start; i < sassc->maxtargets; i++) { 161 target = &sassc->targets[i]; 162 if (target->handle == handle) 163 return (target); 164 } 165 166 return (NULL); 167 } 168 169 /* we need to freeze the simq during attach and diag reset, to avoid failing 170 * commands before device handles have been found by discovery. Since 171 * discovery involves reading config pages and possibly sending commands, 172 * discovery actions may continue even after we receive the end of discovery 173 * event, so refcount discovery actions instead of assuming we can unfreeze 174 * the simq when we get the event. 175 */ 176 void 177 mprsas_startup_increment(struct mprsas_softc *sassc) 178 { 179 MPR_FUNCTRACE(sassc->sc); 180 181 if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) { 182 if (sassc->startup_refcount++ == 0) { 183 /* just starting, freeze the simq */ 184 mpr_dprint(sassc->sc, MPR_INIT, 185 "%s freezing simq\n", __func__); 186 #if __FreeBSD_version >= 1000039 187 xpt_hold_boot(); 188 #endif 189 xpt_freeze_simq(sassc->sim, 1); 190 } 191 mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__, 192 sassc->startup_refcount); 193 } 194 } 195 196 void 197 mprsas_release_simq_reinit(struct mprsas_softc *sassc) 198 { 199 if (sassc->flags & MPRSAS_QUEUE_FROZEN) { 200 sassc->flags &= ~MPRSAS_QUEUE_FROZEN; 201 xpt_release_simq(sassc->sim, 1); 202 mpr_dprint(sassc->sc, MPR_INFO, "Unfreezing SIM queue\n"); 203 } 204 } 205 206 void 207 mprsas_startup_decrement(struct mprsas_softc *sassc) 208 { 209 MPR_FUNCTRACE(sassc->sc); 210 211 if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) { 212 if (--sassc->startup_refcount == 0) { 213 /* finished all discovery-related actions, release 214 * the simq and rescan for the latest topology. 215 */ 216 mpr_dprint(sassc->sc, MPR_INIT, 217 "%s releasing simq\n", __func__); 218 sassc->flags &= ~MPRSAS_IN_STARTUP; 219 xpt_release_simq(sassc->sim, 1); 220 #if __FreeBSD_version >= 1000039 221 xpt_release_boot(); 222 #else 223 mprsas_rescan_target(sassc->sc, NULL); 224 #endif 225 } 226 mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__, 227 sassc->startup_refcount); 228 } 229 } 230 231 /* LSI's firmware requires us to stop sending commands when we're doing task 232 * management, so refcount the TMs and keep the simq frozen when any are in 233 * use. 234 */ 235 struct mpr_command * 236 mprsas_alloc_tm(struct mpr_softc *sc) 237 { 238 struct mpr_command *tm; 239 240 MPR_FUNCTRACE(sc); 241 tm = mpr_alloc_high_priority_command(sc); 242 if (tm != NULL) { 243 if (sc->sassc->tm_count++ == 0) { 244 mpr_dprint(sc, MPR_RECOVERY, 245 "%s freezing simq\n", __func__); 246 xpt_freeze_simq(sc->sassc->sim, 1); 247 } 248 mpr_dprint(sc, MPR_RECOVERY, "%s tm_count %u\n", __func__, 249 sc->sassc->tm_count); 250 } 251 return tm; 252 } 253 254 void 255 mprsas_free_tm(struct mpr_softc *sc, struct mpr_command *tm) 256 { 257 mpr_dprint(sc, MPR_TRACE, "%s", __func__); 258 if (tm == NULL) 259 return; 260 261 /* if there are no TMs in use, we can release the simq. We use our 262 * own refcount so that it's easier for a diag reset to cleanup and 263 * release the simq. 264 */ 265 if (--sc->sassc->tm_count == 0) { 266 mpr_dprint(sc, MPR_RECOVERY, "%s releasing simq\n", __func__); 267 xpt_release_simq(sc->sassc->sim, 1); 268 } 269 mpr_dprint(sc, MPR_RECOVERY, "%s tm_count %u\n", __func__, 270 sc->sassc->tm_count); 271 272 mpr_free_high_priority_command(sc, tm); 273 } 274 275 void 276 mprsas_rescan_target(struct mpr_softc *sc, struct mprsas_target *targ) 277 { 278 struct mprsas_softc *sassc = sc->sassc; 279 path_id_t pathid; 280 target_id_t targetid; 281 union ccb *ccb; 282 283 MPR_FUNCTRACE(sc); 284 pathid = cam_sim_path(sassc->sim); 285 if (targ == NULL) 286 targetid = CAM_TARGET_WILDCARD; 287 else 288 targetid = targ - sassc->targets; 289 290 /* 291 * Allocate a CCB and schedule a rescan. 292 */ 293 ccb = xpt_alloc_ccb_nowait(); 294 if (ccb == NULL) { 295 mpr_dprint(sc, MPR_ERROR, "unable to alloc CCB for rescan\n"); 296 return; 297 } 298 299 if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid, 300 targetid, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 301 mpr_dprint(sc, MPR_ERROR, "unable to create path for rescan\n"); 302 xpt_free_ccb(ccb); 303 return; 304 } 305 306 if (targetid == CAM_TARGET_WILDCARD) 307 ccb->ccb_h.func_code = XPT_SCAN_BUS; 308 else 309 ccb->ccb_h.func_code = XPT_SCAN_TGT; 310 311 mpr_dprint(sc, MPR_TRACE, "%s targetid %u\n", __func__, targetid); 312 xpt_rescan(ccb); 313 } 314 315 static void 316 mprsas_log_command(struct mpr_command *cm, u_int level, const char *fmt, ...) 317 { 318 struct sbuf sb; 319 va_list ap; 320 char str[192]; 321 char path_str[64]; 322 323 if (cm == NULL) 324 return; 325 326 /* No need to be in here if debugging isn't enabled */ 327 if ((cm->cm_sc->mpr_debug & level) == 0) 328 return; 329 330 sbuf_new(&sb, str, sizeof(str), 0); 331 332 va_start(ap, fmt); 333 334 if (cm->cm_ccb != NULL) { 335 xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str, 336 sizeof(path_str)); 337 sbuf_cat(&sb, path_str); 338 if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) { 339 scsi_command_string(&cm->cm_ccb->csio, &sb); 340 sbuf_printf(&sb, "length %d ", 341 cm->cm_ccb->csio.dxfer_len); 342 } 343 } else { 344 sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ", 345 cam_sim_name(cm->cm_sc->sassc->sim), 346 cam_sim_unit(cm->cm_sc->sassc->sim), 347 cam_sim_bus(cm->cm_sc->sassc->sim), 348 cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF, 349 cm->cm_lun); 350 } 351 352 sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID); 353 sbuf_vprintf(&sb, fmt, ap); 354 sbuf_finish(&sb); 355 mpr_dprint_field(cm->cm_sc, level, "%s", sbuf_data(&sb)); 356 357 va_end(ap); 358 } 359 360 static void 361 mprsas_remove_volume(struct mpr_softc *sc, struct mpr_command *tm) 362 { 363 MPI2_SCSI_TASK_MANAGE_REPLY *reply; 364 struct mprsas_target *targ; 365 uint16_t handle; 366 367 MPR_FUNCTRACE(sc); 368 369 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply; 370 handle = (uint16_t)(uintptr_t)tm->cm_complete_data; 371 targ = tm->cm_targ; 372 373 if (reply == NULL) { 374 /* XXX retry the remove after the diag reset completes? */ 375 mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device " 376 "0x%04x\n", __func__, handle); 377 mprsas_free_tm(sc, tm); 378 return; 379 } 380 381 if (reply->IOCStatus != MPI2_IOCSTATUS_SUCCESS) { 382 mpr_dprint(sc, MPR_FAULT, "IOCStatus = 0x%x while resetting " 383 "device 0x%x\n", reply->IOCStatus, handle); 384 mprsas_free_tm(sc, tm); 385 return; 386 } 387 388 mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n", 389 reply->TerminationCount); 390 mpr_free_reply(sc, tm->cm_reply_data); 391 tm->cm_reply = NULL; /* Ensures the reply won't get re-freed */ 392 393 mpr_dprint(sc, MPR_XINFO, "clearing target %u handle 0x%04x\n", 394 targ->tid, handle); 395 396 /* 397 * Don't clear target if remove fails because things will get confusing. 398 * Leave the devname and sasaddr intact so that we know to avoid reusing 399 * this target id if possible, and so we can assign the same target id 400 * to this device if it comes back in the future. 401 */ 402 if (reply->IOCStatus == MPI2_IOCSTATUS_SUCCESS) { 403 targ = tm->cm_targ; 404 targ->handle = 0x0; 405 targ->encl_handle = 0x0; 406 targ->encl_level_valid = 0x0; 407 targ->encl_level = 0x0; 408 targ->connector_name[0] = ' '; 409 targ->connector_name[1] = ' '; 410 targ->connector_name[2] = ' '; 411 targ->connector_name[3] = ' '; 412 targ->encl_slot = 0x0; 413 targ->exp_dev_handle = 0x0; 414 targ->phy_num = 0x0; 415 targ->linkrate = 0x0; 416 targ->devinfo = 0x0; 417 targ->flags = 0x0; 418 targ->scsi_req_desc_type = 0; 419 } 420 421 mprsas_free_tm(sc, tm); 422 } 423 424 425 /* 426 * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal. 427 * Otherwise Volume Delete is same as Bare Drive Removal. 428 */ 429 void 430 mprsas_prepare_volume_remove(struct mprsas_softc *sassc, uint16_t handle) 431 { 432 MPI2_SCSI_TASK_MANAGE_REQUEST *req; 433 struct mpr_softc *sc; 434 struct mpr_command *cm; 435 struct mprsas_target *targ = NULL; 436 437 MPR_FUNCTRACE(sassc->sc); 438 sc = sassc->sc; 439 440 targ = mprsas_find_target_by_handle(sassc, 0, handle); 441 if (targ == NULL) { 442 /* FIXME: what is the action? */ 443 /* We don't know about this device? */ 444 mpr_dprint(sc, MPR_ERROR, 445 "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle); 446 return; 447 } 448 449 targ->flags |= MPRSAS_TARGET_INREMOVAL; 450 451 cm = mprsas_alloc_tm(sc); 452 if (cm == NULL) { 453 mpr_dprint(sc, MPR_ERROR, 454 "%s: command alloc failure\n", __func__); 455 return; 456 } 457 458 mprsas_rescan_target(sc, targ); 459 460 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req; 461 req->DevHandle = targ->handle; 462 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; 463 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET; 464 465 /* SAS Hard Link Reset / SATA Link Reset */ 466 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET; 467 468 cm->cm_targ = targ; 469 cm->cm_data = NULL; 470 cm->cm_desc.HighPriority.RequestFlags = 471 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY; 472 cm->cm_complete = mprsas_remove_volume; 473 cm->cm_complete_data = (void *)(uintptr_t)handle; 474 mpr_map_command(sc, cm); 475 } 476 477 /* 478 * The MPT2 firmware performs debounce on the link to avoid transient link 479 * errors and false removals. When it does decide that link has been lost 480 * and a device needs to go away, it expects that the host will perform a 481 * target reset and then an op remove. The reset has the side-effect of 482 * aborting any outstanding requests for the device, which is required for 483 * the op-remove to succeed. It's not clear if the host should check for 484 * the device coming back alive after the reset. 485 */ 486 void 487 mprsas_prepare_remove(struct mprsas_softc *sassc, uint16_t handle) 488 { 489 MPI2_SCSI_TASK_MANAGE_REQUEST *req; 490 struct mpr_softc *sc; 491 struct mpr_command *cm; 492 struct mprsas_target *targ = NULL; 493 494 MPR_FUNCTRACE(sassc->sc); 495 496 sc = sassc->sc; 497 498 targ = mprsas_find_target_by_handle(sassc, 0, handle); 499 if (targ == NULL) { 500 /* FIXME: what is the action? */ 501 /* We don't know about this device? */ 502 mpr_dprint(sc, MPR_ERROR, "%s : invalid handle 0x%x \n", 503 __func__, handle); 504 return; 505 } 506 507 targ->flags |= MPRSAS_TARGET_INREMOVAL; 508 509 cm = mprsas_alloc_tm(sc); 510 if (cm == NULL) { 511 mpr_dprint(sc, MPR_ERROR, "%s: command alloc failure\n", 512 __func__); 513 return; 514 } 515 516 mprsas_rescan_target(sc, targ); 517 518 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req; 519 memset(req, 0, sizeof(*req)); 520 req->DevHandle = htole16(targ->handle); 521 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; 522 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET; 523 524 /* SAS Hard Link Reset / SATA Link Reset */ 525 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET; 526 527 cm->cm_targ = targ; 528 cm->cm_data = NULL; 529 cm->cm_desc.HighPriority.RequestFlags = 530 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY; 531 cm->cm_complete = mprsas_remove_device; 532 cm->cm_complete_data = (void *)(uintptr_t)handle; 533 mpr_map_command(sc, cm); 534 } 535 536 static void 537 mprsas_remove_device(struct mpr_softc *sc, struct mpr_command *tm) 538 { 539 MPI2_SCSI_TASK_MANAGE_REPLY *reply; 540 MPI2_SAS_IOUNIT_CONTROL_REQUEST *req; 541 struct mprsas_target *targ; 542 struct mpr_command *next_cm; 543 uint16_t handle; 544 545 MPR_FUNCTRACE(sc); 546 547 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply; 548 handle = (uint16_t)(uintptr_t)tm->cm_complete_data; 549 targ = tm->cm_targ; 550 551 /* 552 * Currently there should be no way we can hit this case. It only 553 * happens when we have a failure to allocate chain frames, and 554 * task management commands don't have S/G lists. 555 */ 556 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) { 557 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for remove of " 558 "handle %#04x! This should not happen!\n", __func__, 559 tm->cm_flags, handle); 560 mprsas_free_tm(sc, tm); 561 return; 562 } 563 564 if (reply == NULL) { 565 /* XXX retry the remove after the diag reset completes? */ 566 mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device " 567 "0x%04x\n", __func__, handle); 568 mprsas_free_tm(sc, tm); 569 return; 570 } 571 572 if (le16toh(reply->IOCStatus) != MPI2_IOCSTATUS_SUCCESS) { 573 mpr_dprint(sc, MPR_FAULT, "IOCStatus = 0x%x while resetting " 574 "device 0x%x\n", le16toh(reply->IOCStatus), handle); 575 mprsas_free_tm(sc, tm); 576 return; 577 } 578 579 mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n", 580 le32toh(reply->TerminationCount)); 581 mpr_free_reply(sc, tm->cm_reply_data); 582 tm->cm_reply = NULL; /* Ensures the reply won't get re-freed */ 583 584 /* Reuse the existing command */ 585 req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req; 586 memset(req, 0, sizeof(*req)); 587 req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL; 588 req->Operation = MPI2_SAS_OP_REMOVE_DEVICE; 589 req->DevHandle = htole16(handle); 590 tm->cm_data = NULL; 591 tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; 592 tm->cm_complete = mprsas_remove_complete; 593 tm->cm_complete_data = (void *)(uintptr_t)handle; 594 595 mpr_map_command(sc, tm); 596 597 mpr_dprint(sc, MPR_XINFO, "clearing target %u handle 0x%04x\n", 598 targ->tid, handle); 599 if (targ->encl_level_valid) { 600 mpr_dprint(sc, MPR_XINFO, "At enclosure level %d, slot %d, " 601 "connector name (%4s)\n", targ->encl_level, targ->encl_slot, 602 targ->connector_name); 603 } 604 TAILQ_FOREACH_SAFE(tm, &targ->commands, cm_link, next_cm) { 605 union ccb *ccb; 606 607 mpr_dprint(sc, MPR_XINFO, "Completing missed command %p\n", tm); 608 ccb = tm->cm_complete_data; 609 ccb->ccb_h.status = CAM_DEV_NOT_THERE; 610 mprsas_scsiio_complete(sc, tm); 611 } 612 } 613 614 static void 615 mprsas_remove_complete(struct mpr_softc *sc, struct mpr_command *tm) 616 { 617 MPI2_SAS_IOUNIT_CONTROL_REPLY *reply; 618 uint16_t handle; 619 struct mprsas_target *targ; 620 struct mprsas_lun *lun; 621 622 MPR_FUNCTRACE(sc); 623 624 reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply; 625 handle = (uint16_t)(uintptr_t)tm->cm_complete_data; 626 627 /* 628 * Currently there should be no way we can hit this case. It only 629 * happens when we have a failure to allocate chain frames, and 630 * task management commands don't have S/G lists. 631 */ 632 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) { 633 mpr_dprint(sc, MPR_XINFO, "%s: cm_flags = %#x for remove of " 634 "handle %#04x! This should not happen!\n", __func__, 635 tm->cm_flags, handle); 636 mprsas_free_tm(sc, tm); 637 return; 638 } 639 640 if (reply == NULL) { 641 /* most likely a chip reset */ 642 mpr_dprint(sc, MPR_FAULT, "%s NULL reply removing device " 643 "0x%04x\n", __func__, handle); 644 mprsas_free_tm(sc, tm); 645 return; 646 } 647 648 mpr_dprint(sc, MPR_XINFO, "%s on handle 0x%04x, IOCStatus= 0x%x\n", 649 __func__, handle, le16toh(reply->IOCStatus)); 650 651 /* 652 * Don't clear target if remove fails because things will get confusing. 653 * Leave the devname and sasaddr intact so that we know to avoid reusing 654 * this target id if possible, and so we can assign the same target id 655 * to this device if it comes back in the future. 656 */ 657 if (le16toh(reply->IOCStatus) == MPI2_IOCSTATUS_SUCCESS) { 658 targ = tm->cm_targ; 659 targ->handle = 0x0; 660 targ->encl_handle = 0x0; 661 targ->encl_level_valid = 0x0; 662 targ->encl_level = 0x0; 663 targ->connector_name[0] = ' '; 664 targ->connector_name[1] = ' '; 665 targ->connector_name[2] = ' '; 666 targ->connector_name[3] = ' '; 667 targ->encl_slot = 0x0; 668 targ->exp_dev_handle = 0x0; 669 targ->phy_num = 0x0; 670 targ->linkrate = 0x0; 671 targ->devinfo = 0x0; 672 targ->flags = 0x0; 673 targ->scsi_req_desc_type = 0; 674 675 while (!SLIST_EMPTY(&targ->luns)) { 676 lun = SLIST_FIRST(&targ->luns); 677 SLIST_REMOVE_HEAD(&targ->luns, lun_link); 678 free(lun, M_MPR); 679 } 680 } 681 682 mprsas_free_tm(sc, tm); 683 } 684 685 static int 686 mprsas_register_events(struct mpr_softc *sc) 687 { 688 uint8_t events[16]; 689 690 bzero(events, 16); 691 setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE); 692 setbit(events, MPI2_EVENT_SAS_DISCOVERY); 693 setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE); 694 setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE); 695 setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW); 696 setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST); 697 setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE); 698 setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST); 699 setbit(events, MPI2_EVENT_IR_VOLUME); 700 setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK); 701 setbit(events, MPI2_EVENT_IR_OPERATION_STATUS); 702 setbit(events, MPI2_EVENT_TEMP_THRESHOLD); 703 704 mpr_register_events(sc, events, mprsas_evt_handler, NULL, 705 &sc->sassc->mprsas_eh); 706 707 return (0); 708 } 709 710 int 711 mpr_attach_sas(struct mpr_softc *sc) 712 { 713 struct mprsas_softc *sassc; 714 cam_status status; 715 int unit, error = 0; 716 717 MPR_FUNCTRACE(sc); 718 719 sassc = malloc(sizeof(struct mprsas_softc), M_MPR, M_WAITOK|M_ZERO); 720 if (!sassc) { 721 device_printf(sc->mpr_dev, "Cannot allocate memory %s %d\n", 722 __func__, __LINE__); 723 return (ENOMEM); 724 } 725 726 /* 727 * XXX MaxTargets could change during a reinit. since we don't 728 * resize the targets[] array during such an event, cache the value 729 * of MaxTargets here so that we don't get into trouble later. This 730 * should move into the reinit logic. 731 */ 732 sassc->maxtargets = sc->facts->MaxTargets; 733 sassc->targets = malloc(sizeof(struct mprsas_target) * 734 sassc->maxtargets, M_MPR, M_WAITOK|M_ZERO); 735 if (!sassc->targets) { 736 device_printf(sc->mpr_dev, "Cannot allocate memory %s %d\n", 737 __func__, __LINE__); 738 free(sassc, M_MPR); 739 return (ENOMEM); 740 } 741 sc->sassc = sassc; 742 sassc->sc = sc; 743 744 if ((sassc->devq = cam_simq_alloc(sc->num_reqs)) == NULL) { 745 mpr_dprint(sc, MPR_ERROR, "Cannot allocate SIMQ\n"); 746 error = ENOMEM; 747 goto out; 748 } 749 750 unit = device_get_unit(sc->mpr_dev); 751 sassc->sim = cam_sim_alloc(mprsas_action, mprsas_poll, "mpr", sassc, 752 unit, &sc->mpr_mtx, sc->num_reqs, sc->num_reqs, sassc->devq); 753 if (sassc->sim == NULL) { 754 mpr_dprint(sc, MPR_ERROR, "Cannot allocate SIM\n"); 755 error = EINVAL; 756 goto out; 757 } 758 759 TAILQ_INIT(&sassc->ev_queue); 760 761 /* Initialize taskqueue for Event Handling */ 762 TASK_INIT(&sassc->ev_task, 0, mprsas_firmware_event_work, sc); 763 sassc->ev_tq = taskqueue_create("mpr_taskq", M_NOWAIT | M_ZERO, 764 taskqueue_thread_enqueue, &sassc->ev_tq); 765 766 /* Run the task queue with lowest priority */ 767 taskqueue_start_threads(&sassc->ev_tq, 1, 255, "%s taskq", 768 device_get_nameunit(sc->mpr_dev)); 769 770 mpr_lock(sc); 771 772 /* 773 * XXX There should be a bus for every port on the adapter, but since 774 * we're just going to fake the topology for now, we'll pretend that 775 * everything is just a target on a single bus. 776 */ 777 if ((error = xpt_bus_register(sassc->sim, sc->mpr_dev, 0)) != 0) { 778 mpr_dprint(sc, MPR_ERROR, "Error %d registering SCSI bus\n", 779 error); 780 mpr_unlock(sc); 781 goto out; 782 } 783 784 /* 785 * Assume that discovery events will start right away. Freezing 786 * 787 * Hold off boot until discovery is complete. 788 */ 789 sassc->flags |= MPRSAS_IN_STARTUP | MPRSAS_IN_DISCOVERY; 790 sc->sassc->startup_refcount = 0; 791 mprsas_startup_increment(sassc); 792 793 callout_init(&sassc->discovery_callout, 1 /*mprafe*/); 794 795 sassc->tm_count = 0; 796 797 /* 798 * Register for async events so we can determine the EEDP 799 * capabilities of devices. 800 */ 801 status = xpt_create_path(&sassc->path, /*periph*/NULL, 802 cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD, 803 CAM_LUN_WILDCARD); 804 if (status != CAM_REQ_CMP) { 805 mpr_printf(sc, "Error %#x creating sim path\n", status); 806 sassc->path = NULL; 807 } else { 808 int event; 809 810 #if (__FreeBSD_version >= 1000006) || \ 811 ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000)) 812 event = AC_ADVINFO_CHANGED | AC_FOUND_DEVICE; 813 #else 814 event = AC_FOUND_DEVICE; 815 #endif 816 status = xpt_register_async(event, mprsas_async, sc, 817 sassc->path); 818 if (status != CAM_REQ_CMP) { 819 mpr_dprint(sc, MPR_ERROR, 820 "Error %#x registering async handler for " 821 "AC_ADVINFO_CHANGED events\n", status); 822 xpt_free_path(sassc->path); 823 sassc->path = NULL; 824 } 825 } 826 if (status != CAM_REQ_CMP) { 827 /* 828 * EEDP use is the exception, not the rule. 829 * Warn the user, but do not fail to attach. 830 */ 831 mpr_printf(sc, "EEDP capabilities disabled.\n"); 832 } 833 834 mpr_unlock(sc); 835 836 mprsas_register_events(sc); 837 out: 838 if (error) 839 mpr_detach_sas(sc); 840 return (error); 841 } 842 843 int 844 mpr_detach_sas(struct mpr_softc *sc) 845 { 846 struct mprsas_softc *sassc; 847 struct mprsas_lun *lun, *lun_tmp; 848 struct mprsas_target *targ; 849 int i; 850 851 MPR_FUNCTRACE(sc); 852 853 if (sc->sassc == NULL) 854 return (0); 855 856 sassc = sc->sassc; 857 mpr_deregister_events(sc, sassc->mprsas_eh); 858 859 /* 860 * Drain and free the event handling taskqueue with the lock 861 * unheld so that any parallel processing tasks drain properly 862 * without deadlocking. 863 */ 864 if (sassc->ev_tq != NULL) 865 taskqueue_free(sassc->ev_tq); 866 867 /* Make sure CAM doesn't wedge if we had to bail out early. */ 868 mpr_lock(sc); 869 870 /* Deregister our async handler */ 871 if (sassc->path != NULL) { 872 xpt_register_async(0, mprsas_async, sc, sassc->path); 873 xpt_free_path(sassc->path); 874 sassc->path = NULL; 875 } 876 877 if (sassc->flags & MPRSAS_IN_STARTUP) 878 xpt_release_simq(sassc->sim, 1); 879 880 if (sassc->sim != NULL) { 881 xpt_bus_deregister(cam_sim_path(sassc->sim)); 882 cam_sim_free(sassc->sim, FALSE); 883 } 884 885 sassc->flags |= MPRSAS_SHUTDOWN; 886 mpr_unlock(sc); 887 888 if (sassc->devq != NULL) 889 cam_simq_free(sassc->devq); 890 891 for (i = 0; i < sassc->maxtargets; i++) { 892 targ = &sassc->targets[i]; 893 SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) { 894 free(lun, M_MPR); 895 } 896 } 897 free(sassc->targets, M_MPR); 898 free(sassc, M_MPR); 899 sc->sassc = NULL; 900 901 return (0); 902 } 903 904 void 905 mprsas_discovery_end(struct mprsas_softc *sassc) 906 { 907 struct mpr_softc *sc = sassc->sc; 908 909 MPR_FUNCTRACE(sc); 910 911 if (sassc->flags & MPRSAS_DISCOVERY_TIMEOUT_PENDING) 912 callout_stop(&sassc->discovery_callout); 913 914 } 915 916 static void 917 mprsas_action(struct cam_sim *sim, union ccb *ccb) 918 { 919 struct mprsas_softc *sassc; 920 921 sassc = cam_sim_softc(sim); 922 923 MPR_FUNCTRACE(sassc->sc); 924 mpr_dprint(sassc->sc, MPR_TRACE, "%s func 0x%x\n", __func__, 925 ccb->ccb_h.func_code); 926 mtx_assert(&sassc->sc->mpr_mtx, MA_OWNED); 927 928 switch (ccb->ccb_h.func_code) { 929 case XPT_PATH_INQ: 930 { 931 struct ccb_pathinq *cpi = &ccb->cpi; 932 933 cpi->version_num = 1; 934 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16; 935 cpi->target_sprt = 0; 936 #if __FreeBSD_version >= 1000039 937 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN; 938 #else 939 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED; 940 #endif 941 cpi->hba_eng_cnt = 0; 942 cpi->max_target = sassc->maxtargets - 1; 943 cpi->max_lun = 255; 944 cpi->initiator_id = sassc->maxtargets - 1; 945 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 946 strncpy(cpi->hba_vid, "LSILogic", HBA_IDLEN); 947 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 948 cpi->unit_number = cam_sim_unit(sim); 949 cpi->bus_id = cam_sim_bus(sim); 950 /* 951 * XXXSLM-I think this needs to change based on config page or 952 * something instead of hardcoded to 150000. 953 */ 954 cpi->base_transfer_speed = 150000; 955 cpi->transport = XPORT_SAS; 956 cpi->transport_version = 0; 957 cpi->protocol = PROTO_SCSI; 958 cpi->protocol_version = SCSI_REV_SPC; 959 #if __FreeBSD_version >= 800001 960 /* 961 * XXXSLM-probably need to base this number on max SGL's and 962 * page size. 963 */ 964 cpi->maxio = 256 * 1024; 965 #endif 966 cpi->ccb_h.status = CAM_REQ_CMP; 967 break; 968 } 969 case XPT_GET_TRAN_SETTINGS: 970 { 971 struct ccb_trans_settings *cts; 972 struct ccb_trans_settings_sas *sas; 973 struct ccb_trans_settings_scsi *scsi; 974 struct mprsas_target *targ; 975 976 cts = &ccb->cts; 977 sas = &cts->xport_specific.sas; 978 scsi = &cts->proto_specific.scsi; 979 980 KASSERT(cts->ccb_h.target_id < sassc->maxtargets, 981 ("Target %d out of bounds in XPT_GET_TRAN_SETTINGS\n", 982 cts->ccb_h.target_id)); 983 targ = &sassc->targets[cts->ccb_h.target_id]; 984 if (targ->handle == 0x0) { 985 cts->ccb_h.status = CAM_DEV_NOT_THERE; 986 break; 987 } 988 989 cts->protocol_version = SCSI_REV_SPC2; 990 cts->transport = XPORT_SAS; 991 cts->transport_version = 0; 992 993 sas->valid = CTS_SAS_VALID_SPEED; 994 switch (targ->linkrate) { 995 case 0x08: 996 sas->bitrate = 150000; 997 break; 998 case 0x09: 999 sas->bitrate = 300000; 1000 break; 1001 case 0x0a: 1002 sas->bitrate = 600000; 1003 break; 1004 default: 1005 sas->valid = 0; 1006 } 1007 1008 cts->protocol = PROTO_SCSI; 1009 scsi->valid = CTS_SCSI_VALID_TQ; 1010 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; 1011 1012 cts->ccb_h.status = CAM_REQ_CMP; 1013 break; 1014 } 1015 case XPT_CALC_GEOMETRY: 1016 cam_calc_geometry(&ccb->ccg, /*extended*/1); 1017 ccb->ccb_h.status = CAM_REQ_CMP; 1018 break; 1019 case XPT_RESET_DEV: 1020 mpr_dprint(sassc->sc, MPR_XINFO, 1021 "mprsas_action XPT_RESET_DEV\n"); 1022 mprsas_action_resetdev(sassc, ccb); 1023 return; 1024 case XPT_RESET_BUS: 1025 case XPT_ABORT: 1026 case XPT_TERM_IO: 1027 mpr_dprint(sassc->sc, MPR_XINFO, 1028 "mprsas_action faking success for abort or reset\n"); 1029 ccb->ccb_h.status = CAM_REQ_CMP; 1030 break; 1031 case XPT_SCSI_IO: 1032 mprsas_action_scsiio(sassc, ccb); 1033 return; 1034 #if __FreeBSD_version >= 900026 1035 case XPT_SMP_IO: 1036 mprsas_action_smpio(sassc, ccb); 1037 return; 1038 #endif 1039 default: 1040 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 1041 break; 1042 } 1043 xpt_done(ccb); 1044 1045 } 1046 1047 static void 1048 mprsas_announce_reset(struct mpr_softc *sc, uint32_t ac_code, 1049 target_id_t target_id, lun_id_t lun_id) 1050 { 1051 path_id_t path_id = cam_sim_path(sc->sassc->sim); 1052 struct cam_path *path; 1053 1054 mpr_dprint(sc, MPR_XINFO, "%s code %x target %d lun %jx\n", __func__, 1055 ac_code, target_id, (uintmax_t)lun_id); 1056 1057 if (xpt_create_path(&path, NULL, 1058 path_id, target_id, lun_id) != CAM_REQ_CMP) { 1059 mpr_dprint(sc, MPR_ERROR, "unable to create path for reset " 1060 "notification\n"); 1061 return; 1062 } 1063 1064 xpt_async(ac_code, path, NULL); 1065 xpt_free_path(path); 1066 } 1067 1068 static void 1069 mprsas_complete_all_commands(struct mpr_softc *sc) 1070 { 1071 struct mpr_command *cm; 1072 int i; 1073 int completed; 1074 1075 MPR_FUNCTRACE(sc); 1076 mtx_assert(&sc->mpr_mtx, MA_OWNED); 1077 1078 /* complete all commands with a NULL reply */ 1079 for (i = 1; i < sc->num_reqs; i++) { 1080 cm = &sc->commands[i]; 1081 cm->cm_reply = NULL; 1082 completed = 0; 1083 1084 if (cm->cm_flags & MPR_CM_FLAGS_POLLED) 1085 cm->cm_flags |= MPR_CM_FLAGS_COMPLETE; 1086 1087 if (cm->cm_complete != NULL) { 1088 mprsas_log_command(cm, MPR_RECOVERY, 1089 "completing cm %p state %x ccb %p for diag reset\n", 1090 cm, cm->cm_state, cm->cm_ccb); 1091 cm->cm_complete(sc, cm); 1092 completed = 1; 1093 } 1094 1095 if (cm->cm_flags & MPR_CM_FLAGS_WAKEUP) { 1096 mprsas_log_command(cm, MPR_RECOVERY, 1097 "waking up cm %p state %x ccb %p for diag reset\n", 1098 cm, cm->cm_state, cm->cm_ccb); 1099 wakeup(cm); 1100 completed = 1; 1101 } 1102 1103 if ((completed == 0) && (cm->cm_state != MPR_CM_STATE_FREE)) { 1104 /* this should never happen, but if it does, log */ 1105 mprsas_log_command(cm, MPR_RECOVERY, 1106 "cm %p state %x flags 0x%x ccb %p during diag " 1107 "reset\n", cm, cm->cm_state, cm->cm_flags, 1108 cm->cm_ccb); 1109 } 1110 } 1111 } 1112 1113 void 1114 mprsas_handle_reinit(struct mpr_softc *sc) 1115 { 1116 int i; 1117 1118 /* Go back into startup mode and freeze the simq, so that CAM 1119 * doesn't send any commands until after we've rediscovered all 1120 * targets and found the proper device handles for them. 1121 * 1122 * After the reset, portenable will trigger discovery, and after all 1123 * discovery-related activities have finished, the simq will be 1124 * released. 1125 */ 1126 mpr_dprint(sc, MPR_INIT, "%s startup\n", __func__); 1127 sc->sassc->flags |= MPRSAS_IN_STARTUP; 1128 sc->sassc->flags |= MPRSAS_IN_DISCOVERY; 1129 mprsas_startup_increment(sc->sassc); 1130 1131 /* notify CAM of a bus reset */ 1132 mprsas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD, 1133 CAM_LUN_WILDCARD); 1134 1135 /* complete and cleanup after all outstanding commands */ 1136 mprsas_complete_all_commands(sc); 1137 1138 mpr_dprint(sc, MPR_INIT, "%s startup %u tm %u after command " 1139 "completion\n", __func__, sc->sassc->startup_refcount, 1140 sc->sassc->tm_count); 1141 1142 /* zero all the target handles, since they may change after the 1143 * reset, and we have to rediscover all the targets and use the new 1144 * handles. 1145 */ 1146 for (i = 0; i < sc->sassc->maxtargets; i++) { 1147 if (sc->sassc->targets[i].outstanding != 0) 1148 mpr_dprint(sc, MPR_INIT, "target %u outstanding %u\n", 1149 i, sc->sassc->targets[i].outstanding); 1150 sc->sassc->targets[i].handle = 0x0; 1151 sc->sassc->targets[i].exp_dev_handle = 0x0; 1152 sc->sassc->targets[i].outstanding = 0; 1153 sc->sassc->targets[i].flags = MPRSAS_TARGET_INDIAGRESET; 1154 } 1155 } 1156 static void 1157 mprsas_tm_timeout(void *data) 1158 { 1159 struct mpr_command *tm = data; 1160 struct mpr_softc *sc = tm->cm_sc; 1161 1162 mtx_assert(&sc->mpr_mtx, MA_OWNED); 1163 1164 mprsas_log_command(tm, MPR_INFO|MPR_RECOVERY, 1165 "task mgmt %p timed out\n", tm); 1166 mpr_reinit(sc); 1167 } 1168 1169 static void 1170 mprsas_logical_unit_reset_complete(struct mpr_softc *sc, 1171 struct mpr_command *tm) 1172 { 1173 MPI2_SCSI_TASK_MANAGE_REPLY *reply; 1174 MPI2_SCSI_TASK_MANAGE_REQUEST *req; 1175 unsigned int cm_count = 0; 1176 struct mpr_command *cm; 1177 struct mprsas_target *targ; 1178 1179 callout_stop(&tm->cm_callout); 1180 1181 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req; 1182 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply; 1183 targ = tm->cm_targ; 1184 1185 /* 1186 * Currently there should be no way we can hit this case. It only 1187 * happens when we have a failure to allocate chain frames, and 1188 * task management commands don't have S/G lists. 1189 */ 1190 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) { 1191 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for LUN reset! " 1192 "This should not happen!\n", __func__, tm->cm_flags); 1193 mprsas_free_tm(sc, tm); 1194 return; 1195 } 1196 1197 if (reply == NULL) { 1198 mprsas_log_command(tm, MPR_RECOVERY, 1199 "NULL reset reply for tm %p\n", tm); 1200 if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) { 1201 /* this completion was due to a reset, just cleanup */ 1202 targ->flags &= ~MPRSAS_TARGET_INRESET; 1203 targ->tm = NULL; 1204 mprsas_free_tm(sc, tm); 1205 } 1206 else { 1207 /* we should have gotten a reply. */ 1208 mpr_reinit(sc); 1209 } 1210 return; 1211 } 1212 1213 mprsas_log_command(tm, MPR_RECOVERY, 1214 "logical unit reset status 0x%x code 0x%x count %u\n", 1215 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode), 1216 le32toh(reply->TerminationCount)); 1217 1218 /* See if there are any outstanding commands for this LUN. 1219 * This could be made more efficient by using a per-LU data 1220 * structure of some sort. 1221 */ 1222 TAILQ_FOREACH(cm, &targ->commands, cm_link) { 1223 if (cm->cm_lun == tm->cm_lun) 1224 cm_count++; 1225 } 1226 1227 if (cm_count == 0) { 1228 mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO, 1229 "logical unit %u finished recovery after reset\n", 1230 tm->cm_lun, tm); 1231 1232 mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid, 1233 tm->cm_lun); 1234 1235 /* we've finished recovery for this logical unit. check and 1236 * see if some other logical unit has a timedout command 1237 * that needs to be processed. 1238 */ 1239 cm = TAILQ_FIRST(&targ->timedout_commands); 1240 if (cm) { 1241 mprsas_send_abort(sc, tm, cm); 1242 } 1243 else { 1244 targ->tm = NULL; 1245 mprsas_free_tm(sc, tm); 1246 } 1247 } 1248 else { 1249 /* if we still have commands for this LUN, the reset 1250 * effectively failed, regardless of the status reported. 1251 * Escalate to a target reset. 1252 */ 1253 mprsas_log_command(tm, MPR_RECOVERY, 1254 "logical unit reset complete for tm %p, but still have %u " 1255 "command(s)\n", tm, cm_count); 1256 mprsas_send_reset(sc, tm, 1257 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET); 1258 } 1259 } 1260 1261 static void 1262 mprsas_target_reset_complete(struct mpr_softc *sc, struct mpr_command *tm) 1263 { 1264 MPI2_SCSI_TASK_MANAGE_REPLY *reply; 1265 MPI2_SCSI_TASK_MANAGE_REQUEST *req; 1266 struct mprsas_target *targ; 1267 1268 callout_stop(&tm->cm_callout); 1269 1270 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req; 1271 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply; 1272 targ = tm->cm_targ; 1273 1274 /* 1275 * Currently there should be no way we can hit this case. It only 1276 * happens when we have a failure to allocate chain frames, and 1277 * task management commands don't have S/G lists. 1278 */ 1279 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) { 1280 mpr_dprint(sc, MPR_ERROR,"%s: cm_flags = %#x for target reset! " 1281 "This should not happen!\n", __func__, tm->cm_flags); 1282 mprsas_free_tm(sc, tm); 1283 return; 1284 } 1285 1286 if (reply == NULL) { 1287 mprsas_log_command(tm, MPR_RECOVERY, 1288 "NULL reset reply for tm %p\n", tm); 1289 if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) { 1290 /* this completion was due to a reset, just cleanup */ 1291 targ->flags &= ~MPRSAS_TARGET_INRESET; 1292 targ->tm = NULL; 1293 mprsas_free_tm(sc, tm); 1294 } 1295 else { 1296 /* we should have gotten a reply. */ 1297 mpr_reinit(sc); 1298 } 1299 return; 1300 } 1301 1302 mprsas_log_command(tm, MPR_RECOVERY, 1303 "target reset status 0x%x code 0x%x count %u\n", 1304 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode), 1305 le32toh(reply->TerminationCount)); 1306 1307 targ->flags &= ~MPRSAS_TARGET_INRESET; 1308 1309 if (targ->outstanding == 0) { 1310 /* we've finished recovery for this target and all 1311 * of its logical units. 1312 */ 1313 mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO, 1314 "recovery finished after target reset\n"); 1315 1316 mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid, 1317 CAM_LUN_WILDCARD); 1318 1319 targ->tm = NULL; 1320 mprsas_free_tm(sc, tm); 1321 } 1322 else { 1323 /* after a target reset, if this target still has 1324 * outstanding commands, the reset effectively failed, 1325 * regardless of the status reported. escalate. 1326 */ 1327 mprsas_log_command(tm, MPR_RECOVERY, 1328 "target reset complete for tm %p, but still have %u " 1329 "command(s)\n", tm, targ->outstanding); 1330 mpr_reinit(sc); 1331 } 1332 } 1333 1334 #define MPR_RESET_TIMEOUT 30 1335 1336 static int 1337 mprsas_send_reset(struct mpr_softc *sc, struct mpr_command *tm, uint8_t type) 1338 { 1339 MPI2_SCSI_TASK_MANAGE_REQUEST *req; 1340 struct mprsas_target *target; 1341 int err; 1342 1343 target = tm->cm_targ; 1344 if (target->handle == 0) { 1345 mpr_dprint(sc, MPR_ERROR,"%s null devhandle for target_id %d\n", 1346 __func__, target->tid); 1347 return -1; 1348 } 1349 1350 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req; 1351 req->DevHandle = htole16(target->handle); 1352 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; 1353 req->TaskType = type; 1354 1355 if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) { 1356 /* XXX Need to handle invalid LUNs */ 1357 MPR_SET_LUN(req->LUN, tm->cm_lun); 1358 tm->cm_targ->logical_unit_resets++; 1359 mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO, 1360 "sending logical unit reset\n"); 1361 tm->cm_complete = mprsas_logical_unit_reset_complete; 1362 } 1363 else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) { 1364 /* 1365 * Target reset method = 1366 * SAS Hard Link Reset / SATA Link Reset 1367 */ 1368 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET; 1369 tm->cm_targ->target_resets++; 1370 tm->cm_targ->flags |= MPRSAS_TARGET_INRESET; 1371 mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO, 1372 "sending target reset\n"); 1373 tm->cm_complete = mprsas_target_reset_complete; 1374 } 1375 else { 1376 mpr_dprint(sc, MPR_ERROR, "unexpected reset type 0x%x\n", type); 1377 return -1; 1378 } 1379 1380 mpr_dprint(sc, MPR_XINFO, "to target %u handle 0x%04x\n", target->tid, 1381 target->handle); 1382 if (target->encl_level_valid) { 1383 mpr_dprint(sc, MPR_XINFO, "At enclosure level %d, slot %d, " 1384 "connector name (%4s)\n", target->encl_level, 1385 target->encl_slot, target->connector_name); 1386 } 1387 1388 tm->cm_data = NULL; 1389 tm->cm_desc.HighPriority.RequestFlags = 1390 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY; 1391 tm->cm_complete_data = (void *)tm; 1392 1393 callout_reset(&tm->cm_callout, MPR_RESET_TIMEOUT * hz, 1394 mprsas_tm_timeout, tm); 1395 1396 err = mpr_map_command(sc, tm); 1397 if (err) 1398 mprsas_log_command(tm, MPR_RECOVERY, 1399 "error %d sending reset type %u\n", 1400 err, type); 1401 1402 return err; 1403 } 1404 1405 1406 static void 1407 mprsas_abort_complete(struct mpr_softc *sc, struct mpr_command *tm) 1408 { 1409 struct mpr_command *cm; 1410 MPI2_SCSI_TASK_MANAGE_REPLY *reply; 1411 MPI2_SCSI_TASK_MANAGE_REQUEST *req; 1412 struct mprsas_target *targ; 1413 1414 callout_stop(&tm->cm_callout); 1415 1416 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req; 1417 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply; 1418 targ = tm->cm_targ; 1419 1420 /* 1421 * Currently there should be no way we can hit this case. It only 1422 * happens when we have a failure to allocate chain frames, and 1423 * task management commands don't have S/G lists. 1424 */ 1425 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) { 1426 mprsas_log_command(tm, MPR_RECOVERY, 1427 "cm_flags = %#x for abort %p TaskMID %u!\n", 1428 tm->cm_flags, tm, le16toh(req->TaskMID)); 1429 mprsas_free_tm(sc, tm); 1430 return; 1431 } 1432 1433 if (reply == NULL) { 1434 mprsas_log_command(tm, MPR_RECOVERY, 1435 "NULL abort reply for tm %p TaskMID %u\n", 1436 tm, le16toh(req->TaskMID)); 1437 if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) { 1438 /* this completion was due to a reset, just cleanup */ 1439 targ->tm = NULL; 1440 mprsas_free_tm(sc, tm); 1441 } 1442 else { 1443 /* we should have gotten a reply. */ 1444 mpr_reinit(sc); 1445 } 1446 return; 1447 } 1448 1449 mprsas_log_command(tm, MPR_RECOVERY, 1450 "abort TaskMID %u status 0x%x code 0x%x count %u\n", 1451 le16toh(req->TaskMID), 1452 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode), 1453 le32toh(reply->TerminationCount)); 1454 1455 cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands); 1456 if (cm == NULL) { 1457 /* if there are no more timedout commands, we're done with 1458 * error recovery for this target. 1459 */ 1460 mprsas_log_command(tm, MPR_RECOVERY, 1461 "finished recovery after aborting TaskMID %u\n", 1462 le16toh(req->TaskMID)); 1463 1464 targ->tm = NULL; 1465 mprsas_free_tm(sc, tm); 1466 } 1467 else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) { 1468 /* abort success, but we have more timedout commands to abort */ 1469 mprsas_log_command(tm, MPR_RECOVERY, 1470 "continuing recovery after aborting TaskMID %u\n", 1471 le16toh(req->TaskMID)); 1472 1473 mprsas_send_abort(sc, tm, cm); 1474 } 1475 else { 1476 /* we didn't get a command completion, so the abort 1477 * failed as far as we're concerned. escalate. 1478 */ 1479 mprsas_log_command(tm, MPR_RECOVERY, 1480 "abort failed for TaskMID %u tm %p\n", 1481 le16toh(req->TaskMID), tm); 1482 1483 mprsas_send_reset(sc, tm, 1484 MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET); 1485 } 1486 } 1487 1488 #define MPR_ABORT_TIMEOUT 5 1489 1490 static int 1491 mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm, 1492 struct mpr_command *cm) 1493 { 1494 MPI2_SCSI_TASK_MANAGE_REQUEST *req; 1495 struct mprsas_target *targ; 1496 int err; 1497 1498 targ = cm->cm_targ; 1499 if (targ->handle == 0) { 1500 mpr_dprint(sc, MPR_ERROR,"%s null devhandle for target_id %d\n", 1501 __func__, cm->cm_ccb->ccb_h.target_id); 1502 return -1; 1503 } 1504 1505 mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO, 1506 "Aborting command %p\n", cm); 1507 1508 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req; 1509 req->DevHandle = htole16(targ->handle); 1510 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; 1511 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK; 1512 1513 /* XXX Need to handle invalid LUNs */ 1514 MPR_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun); 1515 1516 req->TaskMID = htole16(cm->cm_desc.Default.SMID); 1517 1518 tm->cm_data = NULL; 1519 tm->cm_desc.HighPriority.RequestFlags = 1520 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY; 1521 tm->cm_complete = mprsas_abort_complete; 1522 tm->cm_complete_data = (void *)tm; 1523 tm->cm_targ = cm->cm_targ; 1524 tm->cm_lun = cm->cm_lun; 1525 1526 callout_reset(&tm->cm_callout, MPR_ABORT_TIMEOUT * hz, 1527 mprsas_tm_timeout, tm); 1528 1529 targ->aborts++; 1530 1531 err = mpr_map_command(sc, tm); 1532 if (err) 1533 mprsas_log_command(tm, MPR_RECOVERY, 1534 "error %d sending abort for cm %p SMID %u\n", 1535 err, cm, req->TaskMID); 1536 return err; 1537 } 1538 1539 1540 static void 1541 mprsas_scsiio_timeout(void *data) 1542 { 1543 struct mpr_softc *sc; 1544 struct mpr_command *cm; 1545 struct mprsas_target *targ; 1546 1547 cm = (struct mpr_command *)data; 1548 sc = cm->cm_sc; 1549 1550 MPR_FUNCTRACE(sc); 1551 mtx_assert(&sc->mpr_mtx, MA_OWNED); 1552 1553 mpr_dprint(sc, MPR_XINFO, "Timeout checking cm %p\n", cm); 1554 1555 /* 1556 * Run the interrupt handler to make sure it's not pending. This 1557 * isn't perfect because the command could have already completed 1558 * and been re-used, though this is unlikely. 1559 */ 1560 mpr_intr_locked(sc); 1561 if (cm->cm_state == MPR_CM_STATE_FREE) { 1562 mprsas_log_command(cm, MPR_XINFO, 1563 "SCSI command %p almost timed out\n", cm); 1564 return; 1565 } 1566 1567 if (cm->cm_ccb == NULL) { 1568 mpr_dprint(sc, MPR_ERROR, "command timeout with NULL ccb\n"); 1569 return; 1570 } 1571 1572 targ = cm->cm_targ; 1573 targ->timeouts++; 1574 1575 mprsas_log_command(cm, MPR_XINFO, "command timeout cm %p ccb %p " 1576 "target %u, handle(0x%04x)\n", cm, cm->cm_ccb, targ->tid, 1577 targ->handle); 1578 if (targ->encl_level_valid) { 1579 mpr_dprint(sc, MPR_XINFO, "At enclosure level %d, slot %d, " 1580 "connector name (%4s)\n", targ->encl_level, targ->encl_slot, 1581 targ->connector_name); 1582 } 1583 1584 /* XXX first, check the firmware state, to see if it's still 1585 * operational. if not, do a diag reset. 1586 */ 1587 1588 cm->cm_ccb->ccb_h.status = CAM_CMD_TIMEOUT; 1589 cm->cm_state = MPR_CM_STATE_TIMEDOUT; 1590 TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery); 1591 1592 if (targ->tm != NULL) { 1593 /* target already in recovery, just queue up another 1594 * timedout command to be processed later. 1595 */ 1596 mpr_dprint(sc, MPR_RECOVERY, "queued timedout cm %p for " 1597 "processing by tm %p\n", cm, targ->tm); 1598 } 1599 else if ((targ->tm = mprsas_alloc_tm(sc)) != NULL) { 1600 mpr_dprint(sc, MPR_RECOVERY, "timedout cm %p allocated tm %p\n", 1601 cm, targ->tm); 1602 1603 /* start recovery by aborting the first timedout command */ 1604 mprsas_send_abort(sc, targ->tm, cm); 1605 } 1606 else { 1607 /* XXX queue this target up for recovery once a TM becomes 1608 * available. The firmware only has a limited number of 1609 * HighPriority credits for the high priority requests used 1610 * for task management, and we ran out. 1611 * 1612 * Isilon: don't worry about this for now, since we have 1613 * more credits than disks in an enclosure, and limit 1614 * ourselves to one TM per target for recovery. 1615 */ 1616 mpr_dprint(sc, MPR_RECOVERY, 1617 "timedout cm %p failed to allocate a tm\n", cm); 1618 } 1619 } 1620 1621 static void 1622 mprsas_action_scsiio(struct mprsas_softc *sassc, union ccb *ccb) 1623 { 1624 MPI2_SCSI_IO_REQUEST *req; 1625 struct ccb_scsiio *csio; 1626 struct mpr_softc *sc; 1627 struct mprsas_target *targ; 1628 struct mprsas_lun *lun; 1629 struct mpr_command *cm; 1630 uint8_t i, lba_byte, *ref_tag_addr; 1631 uint16_t eedp_flags; 1632 uint32_t mpi_control; 1633 1634 sc = sassc->sc; 1635 MPR_FUNCTRACE(sc); 1636 mtx_assert(&sc->mpr_mtx, MA_OWNED); 1637 1638 csio = &ccb->csio; 1639 targ = &sassc->targets[csio->ccb_h.target_id]; 1640 mpr_dprint(sc, MPR_TRACE, "ccb %p target flag %x\n", ccb, targ->flags); 1641 if (targ->handle == 0x0) { 1642 mpr_dprint(sc, MPR_ERROR, "%s NULL handle for target %u\n", 1643 __func__, csio->ccb_h.target_id); 1644 csio->ccb_h.status = CAM_DEV_NOT_THERE; 1645 xpt_done(ccb); 1646 return; 1647 } 1648 if (targ->flags & MPR_TARGET_FLAGS_RAID_COMPONENT) { 1649 mpr_dprint(sc, MPR_TRACE, "%s Raid component no SCSI IO " 1650 "supported %u\n", __func__, csio->ccb_h.target_id); 1651 csio->ccb_h.status = CAM_DEV_NOT_THERE; 1652 xpt_done(ccb); 1653 return; 1654 } 1655 /* 1656 * Sometimes, it is possible to get a command that is not "In 1657 * Progress" and was actually aborted by the upper layer. Check for 1658 * this here and complete the command without error. 1659 */ 1660 if (ccb->ccb_h.status != CAM_REQ_INPROG) { 1661 mpr_dprint(sc, MPR_TRACE, "%s Command is not in progress for " 1662 "target %u\n", __func__, csio->ccb_h.target_id); 1663 xpt_done(ccb); 1664 return; 1665 } 1666 /* 1667 * If devinfo is 0 this will be a volume. In that case don't tell CAM 1668 * that the volume has timed out. We want volumes to be enumerated 1669 * until they are deleted/removed, not just failed. 1670 */ 1671 if (targ->flags & MPRSAS_TARGET_INREMOVAL) { 1672 if (targ->devinfo == 0) 1673 csio->ccb_h.status = CAM_REQ_CMP; 1674 else 1675 csio->ccb_h.status = CAM_SEL_TIMEOUT; 1676 xpt_done(ccb); 1677 return; 1678 } 1679 1680 if ((sc->mpr_flags & MPR_FLAGS_SHUTDOWN) != 0) { 1681 mpr_dprint(sc, MPR_TRACE, "%s shutting down\n", __func__); 1682 csio->ccb_h.status = CAM_DEV_NOT_THERE; 1683 xpt_done(ccb); 1684 return; 1685 } 1686 1687 cm = mpr_alloc_command(sc); 1688 if (cm == NULL || (sc->mpr_flags & MPR_FLAGS_DIAGRESET)) { 1689 if (cm != NULL) { 1690 mpr_free_command(sc, cm); 1691 } 1692 if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) { 1693 xpt_freeze_simq(sassc->sim, 1); 1694 sassc->flags |= MPRSAS_QUEUE_FROZEN; 1695 } 1696 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 1697 ccb->ccb_h.status |= CAM_REQUEUE_REQ; 1698 xpt_done(ccb); 1699 return; 1700 } 1701 1702 req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req; 1703 bzero(req, sizeof(*req)); 1704 req->DevHandle = htole16(targ->handle); 1705 req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; 1706 req->MsgFlags = 0; 1707 req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr); 1708 req->SenseBufferLength = MPR_SENSE_LEN; 1709 req->SGLFlags = 0; 1710 req->ChainOffset = 0; 1711 req->SGLOffset0 = 24; /* 32bit word offset to the SGL */ 1712 req->SGLOffset1= 0; 1713 req->SGLOffset2= 0; 1714 req->SGLOffset3= 0; 1715 req->SkipCount = 0; 1716 req->DataLength = htole32(csio->dxfer_len); 1717 req->BidirectionalDataLength = 0; 1718 req->IoFlags = htole16(csio->cdb_len); 1719 req->EEDPFlags = 0; 1720 1721 /* Note: BiDirectional transfers are not supported */ 1722 switch (csio->ccb_h.flags & CAM_DIR_MASK) { 1723 case CAM_DIR_IN: 1724 mpi_control = MPI2_SCSIIO_CONTROL_READ; 1725 cm->cm_flags |= MPR_CM_FLAGS_DATAIN; 1726 break; 1727 case CAM_DIR_OUT: 1728 mpi_control = MPI2_SCSIIO_CONTROL_WRITE; 1729 cm->cm_flags |= MPR_CM_FLAGS_DATAOUT; 1730 break; 1731 case CAM_DIR_NONE: 1732 default: 1733 mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER; 1734 break; 1735 } 1736 1737 if (csio->cdb_len == 32) 1738 mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT; 1739 /* 1740 * It looks like the hardware doesn't require an explicit tag 1741 * number for each transaction. SAM Task Management not supported 1742 * at the moment. 1743 */ 1744 switch (csio->tag_action) { 1745 case MSG_HEAD_OF_Q_TAG: 1746 mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ; 1747 break; 1748 case MSG_ORDERED_Q_TAG: 1749 mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ; 1750 break; 1751 case MSG_ACA_TASK: 1752 mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ; 1753 break; 1754 case CAM_TAG_ACTION_NONE: 1755 case MSG_SIMPLE_Q_TAG: 1756 default: 1757 mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ; 1758 break; 1759 } 1760 mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits; 1761 req->Control = htole32(mpi_control); 1762 1763 if (MPR_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) { 1764 mpr_free_command(sc, cm); 1765 ccb->ccb_h.status = CAM_LUN_INVALID; 1766 xpt_done(ccb); 1767 return; 1768 } 1769 1770 if (csio->ccb_h.flags & CAM_CDB_POINTER) 1771 bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len); 1772 else 1773 bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len); 1774 req->IoFlags = htole16(csio->cdb_len); 1775 1776 /* 1777 * Check if EEDP is supported and enabled. If it is then check if the 1778 * SCSI opcode could be using EEDP. If so, make sure the LUN exists and 1779 * is formatted for EEDP support. If all of this is true, set CDB up 1780 * for EEDP transfer. 1781 */ 1782 eedp_flags = op_code_prot[req->CDB.CDB32[0]]; 1783 if (sc->eedp_enabled && eedp_flags) { 1784 SLIST_FOREACH(lun, &targ->luns, lun_link) { 1785 if (lun->lun_id == csio->ccb_h.target_lun) { 1786 break; 1787 } 1788 } 1789 1790 if ((lun != NULL) && (lun->eedp_formatted)) { 1791 req->EEDPBlockSize = htole16(lun->eedp_block_size); 1792 eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG | 1793 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG | 1794 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD); 1795 req->EEDPFlags = htole16(eedp_flags); 1796 1797 /* 1798 * If CDB less than 32, fill in Primary Ref Tag with 1799 * low 4 bytes of LBA. If CDB is 32, tag stuff is 1800 * already there. Also, set protection bit. FreeBSD 1801 * currently does not support CDBs bigger than 16, but 1802 * the code doesn't hurt, and will be here for the 1803 * future. 1804 */ 1805 if (csio->cdb_len != 32) { 1806 lba_byte = (csio->cdb_len == 16) ? 6 : 2; 1807 ref_tag_addr = (uint8_t *)&req->CDB.EEDP32. 1808 PrimaryReferenceTag; 1809 for (i = 0; i < 4; i++) { 1810 *ref_tag_addr = 1811 req->CDB.CDB32[lba_byte + i]; 1812 ref_tag_addr++; 1813 } 1814 req->CDB.EEDP32.PrimaryReferenceTag = 1815 htole32(req-> 1816 CDB.EEDP32.PrimaryReferenceTag); 1817 req->CDB.EEDP32.PrimaryApplicationTagMask = 1818 0xFFFF; 1819 req->CDB.CDB32[1] = (req->CDB.CDB32[1] & 0x1F) | 1820 0x20; 1821 } else { 1822 eedp_flags |= 1823 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG; 1824 req->EEDPFlags = htole16(eedp_flags); 1825 req->CDB.CDB32[10] = (req->CDB.CDB32[10] & 1826 0x1F) | 0x20; 1827 } 1828 } 1829 } 1830 1831 cm->cm_length = csio->dxfer_len; 1832 if (cm->cm_length != 0) { 1833 cm->cm_data = ccb; 1834 cm->cm_flags |= MPR_CM_FLAGS_USE_CCB; 1835 } else { 1836 cm->cm_data = NULL; 1837 } 1838 cm->cm_sge = &req->SGL; 1839 cm->cm_sglsize = (32 - 24) * 4; 1840 cm->cm_complete = mprsas_scsiio_complete; 1841 cm->cm_complete_data = ccb; 1842 cm->cm_targ = targ; 1843 cm->cm_lun = csio->ccb_h.target_lun; 1844 cm->cm_ccb = ccb; 1845 /* 1846 * If using FP desc type, need to set a bit in IoFlags (SCSI IO is 0) 1847 * and set descriptor type. 1848 */ 1849 if (targ->scsi_req_desc_type == 1850 MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO) { 1851 req->IoFlags |= MPI25_SCSIIO_IOFLAGS_FAST_PATH; 1852 cm->cm_desc.FastPathSCSIIO.RequestFlags = 1853 MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO; 1854 cm->cm_desc.FastPathSCSIIO.DevHandle = htole16(targ->handle); 1855 } else { 1856 cm->cm_desc.SCSIIO.RequestFlags = 1857 MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO; 1858 cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle); 1859 } 1860 1861 callout_reset(&cm->cm_callout, (ccb->ccb_h.timeout * hz) / 1000, 1862 mprsas_scsiio_timeout, cm); 1863 1864 targ->issued++; 1865 targ->outstanding++; 1866 TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link); 1867 ccb->ccb_h.status |= CAM_SIM_QUEUED; 1868 1869 mprsas_log_command(cm, MPR_XINFO, "%s cm %p ccb %p outstanding %u\n", 1870 __func__, cm, ccb, targ->outstanding); 1871 1872 mpr_map_command(sc, cm); 1873 return; 1874 } 1875 1876 static void 1877 mpr_response_code(struct mpr_softc *sc, u8 response_code) 1878 { 1879 char *desc; 1880 1881 switch (response_code) { 1882 case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE: 1883 desc = "task management request completed"; 1884 break; 1885 case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME: 1886 desc = "invalid frame"; 1887 break; 1888 case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED: 1889 desc = "task management request not supported"; 1890 break; 1891 case MPI2_SCSITASKMGMT_RSP_TM_FAILED: 1892 desc = "task management request failed"; 1893 break; 1894 case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED: 1895 desc = "task management request succeeded"; 1896 break; 1897 case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN: 1898 desc = "invalid lun"; 1899 break; 1900 case 0xA: 1901 desc = "overlapped tag attempted"; 1902 break; 1903 case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC: 1904 desc = "task queued, however not sent to target"; 1905 break; 1906 default: 1907 desc = "unknown"; 1908 break; 1909 } 1910 mpr_dprint(sc, MPR_XINFO, "response_code(0x%01x): %s\n", response_code, 1911 desc); 1912 } 1913 1914 /** 1915 * mpr_sc_failed_io_info - translated non-succesfull SCSI_IO request 1916 */ 1917 static void 1918 mpr_sc_failed_io_info(struct mpr_softc *sc, struct ccb_scsiio *csio, 1919 Mpi2SCSIIOReply_t *mpi_reply, struct mprsas_target *targ) 1920 { 1921 u32 response_info; 1922 u8 *response_bytes; 1923 u16 ioc_status = le16toh(mpi_reply->IOCStatus) & 1924 MPI2_IOCSTATUS_MASK; 1925 u8 scsi_state = mpi_reply->SCSIState; 1926 u8 scsi_status = mpi_reply->SCSIStatus; 1927 char *desc_ioc_state = NULL; 1928 char *desc_scsi_status = NULL; 1929 char *desc_scsi_state = sc->tmp_string; 1930 u32 log_info = le32toh(mpi_reply->IOCLogInfo); 1931 1932 if (log_info == 0x31170000) 1933 return; 1934 1935 switch (ioc_status) { 1936 case MPI2_IOCSTATUS_SUCCESS: 1937 desc_ioc_state = "success"; 1938 break; 1939 case MPI2_IOCSTATUS_INVALID_FUNCTION: 1940 desc_ioc_state = "invalid function"; 1941 break; 1942 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR: 1943 desc_ioc_state = "scsi recovered error"; 1944 break; 1945 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE: 1946 desc_ioc_state = "scsi invalid dev handle"; 1947 break; 1948 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE: 1949 desc_ioc_state = "scsi device not there"; 1950 break; 1951 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN: 1952 desc_ioc_state = "scsi data overrun"; 1953 break; 1954 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN: 1955 desc_ioc_state = "scsi data underrun"; 1956 break; 1957 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR: 1958 desc_ioc_state = "scsi io data error"; 1959 break; 1960 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR: 1961 desc_ioc_state = "scsi protocol error"; 1962 break; 1963 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED: 1964 desc_ioc_state = "scsi task terminated"; 1965 break; 1966 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: 1967 desc_ioc_state = "scsi residual mismatch"; 1968 break; 1969 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED: 1970 desc_ioc_state = "scsi task mgmt failed"; 1971 break; 1972 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED: 1973 desc_ioc_state = "scsi ioc terminated"; 1974 break; 1975 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED: 1976 desc_ioc_state = "scsi ext terminated"; 1977 break; 1978 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR: 1979 desc_ioc_state = "eedp guard error"; 1980 break; 1981 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR: 1982 desc_ioc_state = "eedp ref tag error"; 1983 break; 1984 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR: 1985 desc_ioc_state = "eedp app tag error"; 1986 break; 1987 default: 1988 desc_ioc_state = "unknown"; 1989 break; 1990 } 1991 1992 switch (scsi_status) { 1993 case MPI2_SCSI_STATUS_GOOD: 1994 desc_scsi_status = "good"; 1995 break; 1996 case MPI2_SCSI_STATUS_CHECK_CONDITION: 1997 desc_scsi_status = "check condition"; 1998 break; 1999 case MPI2_SCSI_STATUS_CONDITION_MET: 2000 desc_scsi_status = "condition met"; 2001 break; 2002 case MPI2_SCSI_STATUS_BUSY: 2003 desc_scsi_status = "busy"; 2004 break; 2005 case MPI2_SCSI_STATUS_INTERMEDIATE: 2006 desc_scsi_status = "intermediate"; 2007 break; 2008 case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET: 2009 desc_scsi_status = "intermediate condmet"; 2010 break; 2011 case MPI2_SCSI_STATUS_RESERVATION_CONFLICT: 2012 desc_scsi_status = "reservation conflict"; 2013 break; 2014 case MPI2_SCSI_STATUS_COMMAND_TERMINATED: 2015 desc_scsi_status = "command terminated"; 2016 break; 2017 case MPI2_SCSI_STATUS_TASK_SET_FULL: 2018 desc_scsi_status = "task set full"; 2019 break; 2020 case MPI2_SCSI_STATUS_ACA_ACTIVE: 2021 desc_scsi_status = "aca active"; 2022 break; 2023 case MPI2_SCSI_STATUS_TASK_ABORTED: 2024 desc_scsi_status = "task aborted"; 2025 break; 2026 default: 2027 desc_scsi_status = "unknown"; 2028 break; 2029 } 2030 2031 desc_scsi_state[0] = '\0'; 2032 if (!scsi_state) 2033 desc_scsi_state = " "; 2034 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) 2035 strcat(desc_scsi_state, "response info "); 2036 if (scsi_state & MPI2_SCSI_STATE_TERMINATED) 2037 strcat(desc_scsi_state, "state terminated "); 2038 if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS) 2039 strcat(desc_scsi_state, "no status "); 2040 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED) 2041 strcat(desc_scsi_state, "autosense failed "); 2042 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) 2043 strcat(desc_scsi_state, "autosense valid "); 2044 2045 mpr_dprint(sc, MPR_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n", 2046 le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status); 2047 if (targ->encl_level_valid) { 2048 mpr_dprint(sc, MPR_XINFO, "At enclosure level %d, slot %d, " 2049 "connector name (%4s)\n", targ->encl_level, targ->encl_slot, 2050 targ->connector_name); 2051 } 2052 /* We can add more detail about underflow data here 2053 * TO-DO 2054 * */ 2055 mpr_dprint(sc, MPR_XINFO, "\tscsi_status(%s)(0x%02x), " 2056 "scsi_state(%s)(0x%02x)\n", desc_scsi_status, scsi_status, 2057 desc_scsi_state, scsi_state); 2058 2059 if (sc->mpr_debug & MPR_XINFO && 2060 scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) { 2061 mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : Start :\n"); 2062 scsi_sense_print(csio); 2063 mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : End :\n"); 2064 } 2065 2066 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) { 2067 response_info = le32toh(mpi_reply->ResponseInfo); 2068 response_bytes = (u8 *)&response_info; 2069 mpr_response_code(sc,response_bytes[0]); 2070 } 2071 } 2072 2073 static void 2074 mprsas_scsiio_complete(struct mpr_softc *sc, struct mpr_command *cm) 2075 { 2076 MPI2_SCSI_IO_REPLY *rep; 2077 union ccb *ccb; 2078 struct ccb_scsiio *csio; 2079 struct mprsas_softc *sassc; 2080 struct scsi_vpd_supported_page_list *vpd_list = NULL; 2081 u8 *TLR_bits, TLR_on; 2082 int dir = 0, i; 2083 u16 alloc_len; 2084 2085 MPR_FUNCTRACE(sc); 2086 mpr_dprint(sc, MPR_TRACE, 2087 "cm %p SMID %u ccb %p reply %p outstanding %u\n", cm, 2088 cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply, 2089 cm->cm_targ->outstanding); 2090 2091 callout_stop(&cm->cm_callout); 2092 mtx_assert(&sc->mpr_mtx, MA_OWNED); 2093 2094 sassc = sc->sassc; 2095 ccb = cm->cm_complete_data; 2096 csio = &ccb->csio; 2097 rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply; 2098 /* 2099 * XXX KDM if the chain allocation fails, does it matter if we do 2100 * the sync and unload here? It is simpler to do it in every case, 2101 * assuming it doesn't cause problems. 2102 */ 2103 if (cm->cm_data != NULL) { 2104 if (cm->cm_flags & MPR_CM_FLAGS_DATAIN) 2105 dir = BUS_DMASYNC_POSTREAD; 2106 else if (cm->cm_flags & MPR_CM_FLAGS_DATAOUT) 2107 dir = BUS_DMASYNC_POSTWRITE; 2108 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir); 2109 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap); 2110 } 2111 2112 cm->cm_targ->completed++; 2113 cm->cm_targ->outstanding--; 2114 TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link); 2115 ccb->ccb_h.status &= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED); 2116 2117 if (cm->cm_state == MPR_CM_STATE_TIMEDOUT) { 2118 TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery); 2119 if (cm->cm_reply != NULL) 2120 mprsas_log_command(cm, MPR_RECOVERY, 2121 "completed timedout cm %p ccb %p during recovery " 2122 "ioc %x scsi %x state %x xfer %u\n", cm, cm->cm_ccb, 2123 le16toh(rep->IOCStatus), rep->SCSIStatus, 2124 rep->SCSIState, le32toh(rep->TransferCount)); 2125 else 2126 mprsas_log_command(cm, MPR_RECOVERY, 2127 "completed timedout cm %p ccb %p during recovery\n", 2128 cm, cm->cm_ccb); 2129 } else if (cm->cm_targ->tm != NULL) { 2130 if (cm->cm_reply != NULL) 2131 mprsas_log_command(cm, MPR_RECOVERY, 2132 "completed cm %p ccb %p during recovery " 2133 "ioc %x scsi %x state %x xfer %u\n", 2134 cm, cm->cm_ccb, le16toh(rep->IOCStatus), 2135 rep->SCSIStatus, rep->SCSIState, 2136 le32toh(rep->TransferCount)); 2137 else 2138 mprsas_log_command(cm, MPR_RECOVERY, 2139 "completed cm %p ccb %p during recovery\n", 2140 cm, cm->cm_ccb); 2141 } else if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) { 2142 mprsas_log_command(cm, MPR_RECOVERY, 2143 "reset completed cm %p ccb %p\n", cm, cm->cm_ccb); 2144 } 2145 2146 if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) { 2147 /* 2148 * We ran into an error after we tried to map the command, 2149 * so we're getting a callback without queueing the command 2150 * to the hardware. So we set the status here, and it will 2151 * be retained below. We'll go through the "fast path", 2152 * because there can be no reply when we haven't actually 2153 * gone out to the hardware. 2154 */ 2155 ccb->ccb_h.status = CAM_REQUEUE_REQ; 2156 2157 /* 2158 * Currently the only error included in the mask is 2159 * MPR_CM_FLAGS_CHAIN_FAILED, which means we're out of 2160 * chain frames. We need to freeze the queue until we get 2161 * a command that completed without this error, which will 2162 * hopefully have some chain frames attached that we can 2163 * use. If we wanted to get smarter about it, we would 2164 * only unfreeze the queue in this condition when we're 2165 * sure that we're getting some chain frames back. That's 2166 * probably unnecessary. 2167 */ 2168 if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) { 2169 xpt_freeze_simq(sassc->sim, 1); 2170 sassc->flags |= MPRSAS_QUEUE_FROZEN; 2171 mpr_dprint(sc, MPR_INFO, "Error sending command, " 2172 "freezing SIM queue\n"); 2173 } 2174 } 2175 2176 /* 2177 * If this is a Start Stop Unit command and it was issued by the driver 2178 * during shutdown, decrement the refcount to account for all of the 2179 * commands that were sent. All SSU commands should be completed before 2180 * shutdown completes, meaning SSU_refcount will be 0 after SSU_started 2181 * is TRUE. 2182 */ 2183 if (sc->SSU_started && (csio->cdb_io.cdb_bytes[0] == START_STOP_UNIT)) { 2184 mpr_dprint(sc, MPR_INFO, "Decrementing SSU count.\n"); 2185 sc->SSU_refcount--; 2186 } 2187 2188 /* Take the fast path to completion */ 2189 if (cm->cm_reply == NULL) { 2190 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) { 2191 if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) 2192 ccb->ccb_h.status = CAM_SCSI_BUS_RESET; 2193 else { 2194 ccb->ccb_h.status = CAM_REQ_CMP; 2195 ccb->csio.scsi_status = SCSI_STATUS_OK; 2196 } 2197 if (sassc->flags & MPRSAS_QUEUE_FROZEN) { 2198 ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 2199 sassc->flags &= ~MPRSAS_QUEUE_FROZEN; 2200 mpr_dprint(sc, MPR_XINFO, 2201 "Unfreezing SIM queue\n"); 2202 } 2203 } 2204 2205 /* 2206 * There are two scenarios where the status won't be 2207 * CAM_REQ_CMP. The first is if MPR_CM_FLAGS_ERROR_MASK is 2208 * set, the second is in the MPR_FLAGS_DIAGRESET above. 2209 */ 2210 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 2211 /* 2212 * Freeze the dev queue so that commands are 2213 * executed in the correct order with after error 2214 * recovery. 2215 */ 2216 ccb->ccb_h.status |= CAM_DEV_QFRZN; 2217 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1); 2218 } 2219 mpr_free_command(sc, cm); 2220 xpt_done(ccb); 2221 return; 2222 } 2223 2224 mprsas_log_command(cm, MPR_XINFO, 2225 "ioc %x scsi %x state %x xfer %u\n", 2226 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState, 2227 le32toh(rep->TransferCount)); 2228 2229 switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) { 2230 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN: 2231 csio->resid = cm->cm_length - le32toh(rep->TransferCount); 2232 /* FALLTHROUGH */ 2233 case MPI2_IOCSTATUS_SUCCESS: 2234 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR: 2235 2236 if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) == 2237 MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR) 2238 mprsas_log_command(cm, MPR_XINFO, "recovered error\n"); 2239 2240 /* Completion failed at the transport level. */ 2241 if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS | 2242 MPI2_SCSI_STATE_TERMINATED)) { 2243 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 2244 break; 2245 } 2246 2247 /* In a modern packetized environment, an autosense failure 2248 * implies that there's not much else that can be done to 2249 * recover the command. 2250 */ 2251 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) { 2252 ccb->ccb_h.status = CAM_AUTOSENSE_FAIL; 2253 break; 2254 } 2255 2256 /* 2257 * CAM doesn't care about SAS Response Info data, but if this is 2258 * the state check if TLR should be done. If not, clear the 2259 * TLR_bits for the target. 2260 */ 2261 if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) && 2262 ((le32toh(rep->ResponseInfo) & MPI2_SCSI_RI_MASK_REASONCODE) 2263 == MPR_SCSI_RI_INVALID_FRAME)) { 2264 sc->mapping_table[csio->ccb_h.target_id].TLR_bits = 2265 (u8)MPI2_SCSIIO_CONTROL_NO_TLR; 2266 } 2267 2268 /* 2269 * Intentionally override the normal SCSI status reporting 2270 * for these two cases. These are likely to happen in a 2271 * multi-initiator environment, and we want to make sure that 2272 * CAM retries these commands rather than fail them. 2273 */ 2274 if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) || 2275 (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) { 2276 ccb->ccb_h.status = CAM_REQ_ABORTED; 2277 break; 2278 } 2279 2280 /* Handle normal status and sense */ 2281 csio->scsi_status = rep->SCSIStatus; 2282 if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD) 2283 ccb->ccb_h.status = CAM_REQ_CMP; 2284 else 2285 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; 2286 2287 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) { 2288 int sense_len, returned_sense_len; 2289 2290 returned_sense_len = min(le32toh(rep->SenseCount), 2291 sizeof(struct scsi_sense_data)); 2292 if (returned_sense_len < csio->sense_len) 2293 csio->sense_resid = csio->sense_len - 2294 returned_sense_len; 2295 else 2296 csio->sense_resid = 0; 2297 2298 sense_len = min(returned_sense_len, 2299 csio->sense_len - csio->sense_resid); 2300 bzero(&csio->sense_data, sizeof(csio->sense_data)); 2301 bcopy(cm->cm_sense, &csio->sense_data, sense_len); 2302 ccb->ccb_h.status |= CAM_AUTOSNS_VALID; 2303 } 2304 2305 /* 2306 * Check if this is an INQUIRY command. If it's a VPD inquiry, 2307 * and it's page code 0 (Supported Page List), and there is 2308 * inquiry data, and this is for a sequential access device, and 2309 * the device is an SSP target, and TLR is supported by the 2310 * controller, turn the TLR_bits value ON if page 0x90 is 2311 * supported. 2312 */ 2313 if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) && 2314 (csio->cdb_io.cdb_bytes[1] & SI_EVPD) && 2315 (csio->cdb_io.cdb_bytes[2] == SVPD_SUPPORTED_PAGE_LIST) && 2316 ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) && 2317 (csio->data_ptr != NULL) && (((uint8_t *)cm->cm_data)[0] == 2318 T_SEQUENTIAL) && (sc->control_TLR) && 2319 (sc->mapping_table[csio->ccb_h.target_id].device_info & 2320 MPI2_SAS_DEVICE_INFO_SSP_TARGET)) { 2321 vpd_list = (struct scsi_vpd_supported_page_list *) 2322 csio->data_ptr; 2323 TLR_bits = &sc->mapping_table[csio->ccb_h.target_id]. 2324 TLR_bits; 2325 *TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR; 2326 TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON; 2327 alloc_len = ((u16)csio->cdb_io.cdb_bytes[3] << 8) + 2328 csio->cdb_io.cdb_bytes[4]; 2329 for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) { 2330 if (vpd_list->list[i] == 0x90) { 2331 *TLR_bits = TLR_on; 2332 break; 2333 } 2334 } 2335 } 2336 break; 2337 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE: 2338 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE: 2339 /* 2340 * If devinfo is 0 this will be a volume. In that case don't 2341 * tell CAM that the volume is not there. We want volumes to 2342 * be enumerated until they are deleted/removed, not just 2343 * failed. 2344 */ 2345 if (cm->cm_targ->devinfo == 0) 2346 ccb->ccb_h.status = CAM_REQ_CMP; 2347 else 2348 ccb->ccb_h.status = CAM_DEV_NOT_THERE; 2349 break; 2350 case MPI2_IOCSTATUS_INVALID_SGL: 2351 mpr_print_scsiio_cmd(sc, cm); 2352 ccb->ccb_h.status = CAM_UNREC_HBA_ERROR; 2353 break; 2354 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED: 2355 /* 2356 * This is one of the responses that comes back when an I/O 2357 * has been aborted. If it is because of a timeout that we 2358 * initiated, just set the status to CAM_CMD_TIMEOUT. 2359 * Otherwise set it to CAM_REQ_ABORTED. The effect on the 2360 * command is the same (it gets retried, subject to the 2361 * retry counter), the only difference is what gets printed 2362 * on the console. 2363 */ 2364 if (cm->cm_state == MPR_CM_STATE_TIMEDOUT) 2365 ccb->ccb_h.status = CAM_CMD_TIMEOUT; 2366 else 2367 ccb->ccb_h.status = CAM_REQ_ABORTED; 2368 break; 2369 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN: 2370 /* resid is ignored for this condition */ 2371 csio->resid = 0; 2372 ccb->ccb_h.status = CAM_DATA_RUN_ERR; 2373 break; 2374 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED: 2375 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED: 2376 /* 2377 * Since these are generally external (i.e. hopefully 2378 * transient transport-related) errors, retry these without 2379 * decrementing the retry count. 2380 */ 2381 ccb->ccb_h.status = CAM_REQUEUE_REQ; 2382 mprsas_log_command(cm, MPR_INFO, 2383 "terminated ioc %x scsi %x state %x xfer %u\n", 2384 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState, 2385 le32toh(rep->TransferCount)); 2386 break; 2387 case MPI2_IOCSTATUS_INVALID_FUNCTION: 2388 case MPI2_IOCSTATUS_INTERNAL_ERROR: 2389 case MPI2_IOCSTATUS_INVALID_VPID: 2390 case MPI2_IOCSTATUS_INVALID_FIELD: 2391 case MPI2_IOCSTATUS_INVALID_STATE: 2392 case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED: 2393 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR: 2394 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR: 2395 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: 2396 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED: 2397 default: 2398 mprsas_log_command(cm, MPR_XINFO, 2399 "completed ioc %x scsi %x state %x xfer %u\n", 2400 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState, 2401 le32toh(rep->TransferCount)); 2402 csio->resid = cm->cm_length; 2403 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 2404 break; 2405 } 2406 2407 mpr_sc_failed_io_info(sc, csio, rep, cm->cm_targ); 2408 2409 if (sassc->flags & MPRSAS_QUEUE_FROZEN) { 2410 ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 2411 sassc->flags &= ~MPRSAS_QUEUE_FROZEN; 2412 mpr_dprint(sc, MPR_XINFO, "Command completed, unfreezing SIM " 2413 "queue\n"); 2414 } 2415 2416 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 2417 ccb->ccb_h.status |= CAM_DEV_QFRZN; 2418 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1); 2419 } 2420 2421 mpr_free_command(sc, cm); 2422 xpt_done(ccb); 2423 } 2424 2425 #if __FreeBSD_version >= 900026 2426 static void 2427 mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm) 2428 { 2429 MPI2_SMP_PASSTHROUGH_REPLY *rpl; 2430 MPI2_SMP_PASSTHROUGH_REQUEST *req; 2431 uint64_t sasaddr; 2432 union ccb *ccb; 2433 2434 ccb = cm->cm_complete_data; 2435 2436 /* 2437 * Currently there should be no way we can hit this case. It only 2438 * happens when we have a failure to allocate chain frames, and SMP 2439 * commands require two S/G elements only. That should be handled 2440 * in the standard request size. 2441 */ 2442 if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) { 2443 mpr_dprint(sc, MPR_ERROR,"%s: cm_flags = %#x on SMP request!\n", 2444 __func__, cm->cm_flags); 2445 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 2446 goto bailout; 2447 } 2448 2449 rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply; 2450 if (rpl == NULL) { 2451 mpr_dprint(sc, MPR_ERROR, "%s: NULL cm_reply!\n", __func__); 2452 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 2453 goto bailout; 2454 } 2455 2456 req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req; 2457 sasaddr = le32toh(req->SASAddress.Low); 2458 sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32; 2459 2460 if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) != 2461 MPI2_IOCSTATUS_SUCCESS || 2462 rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) { 2463 mpr_dprint(sc, MPR_XINFO, "%s: IOCStatus %04x SASStatus %02x\n", 2464 __func__, le16toh(rpl->IOCStatus), rpl->SASStatus); 2465 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 2466 goto bailout; 2467 } 2468 2469 mpr_dprint(sc, MPR_XINFO, "%s: SMP request to SAS address " 2470 "%#jx completed successfully\n", __func__, (uintmax_t)sasaddr); 2471 2472 if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED) 2473 ccb->ccb_h.status = CAM_REQ_CMP; 2474 else 2475 ccb->ccb_h.status = CAM_SMP_STATUS_ERROR; 2476 2477 bailout: 2478 /* 2479 * We sync in both directions because we had DMAs in the S/G list 2480 * in both directions. 2481 */ 2482 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, 2483 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2484 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap); 2485 mpr_free_command(sc, cm); 2486 xpt_done(ccb); 2487 } 2488 2489 static void 2490 mprsas_send_smpcmd(struct mprsas_softc *sassc, union ccb *ccb, 2491 uint64_t sasaddr) 2492 { 2493 struct mpr_command *cm; 2494 uint8_t *request, *response; 2495 MPI2_SMP_PASSTHROUGH_REQUEST *req; 2496 struct mpr_softc *sc; 2497 struct sglist *sg; 2498 int error; 2499 2500 sc = sassc->sc; 2501 sg = NULL; 2502 error = 0; 2503 2504 #if (__FreeBSD_version >= 1000028) || \ 2505 ((__FreeBSD_version >= 902001) && (__FreeBSD_version < 1000000)) 2506 switch (ccb->ccb_h.flags & CAM_DATA_MASK) { 2507 case CAM_DATA_PADDR: 2508 case CAM_DATA_SG_PADDR: 2509 /* 2510 * XXX We don't yet support physical addresses here. 2511 */ 2512 mpr_dprint(sc, MPR_ERROR, "%s: physical addresses not " 2513 "supported\n", __func__); 2514 ccb->ccb_h.status = CAM_REQ_INVALID; 2515 xpt_done(ccb); 2516 return; 2517 case CAM_DATA_SG: 2518 /* 2519 * The chip does not support more than one buffer for the 2520 * request or response. 2521 */ 2522 if ((ccb->smpio.smp_request_sglist_cnt > 1) 2523 || (ccb->smpio.smp_response_sglist_cnt > 1)) { 2524 mpr_dprint(sc, MPR_ERROR, 2525 "%s: multiple request or response buffer segments " 2526 "not supported for SMP\n", __func__); 2527 ccb->ccb_h.status = CAM_REQ_INVALID; 2528 xpt_done(ccb); 2529 return; 2530 } 2531 2532 /* 2533 * The CAM_SCATTER_VALID flag was originally implemented 2534 * for the XPT_SCSI_IO CCB, which only has one data pointer. 2535 * We have two. So, just take that flag to mean that we 2536 * might have S/G lists, and look at the S/G segment count 2537 * to figure out whether that is the case for each individual 2538 * buffer. 2539 */ 2540 if (ccb->smpio.smp_request_sglist_cnt != 0) { 2541 bus_dma_segment_t *req_sg; 2542 2543 req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request; 2544 request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr; 2545 } else 2546 request = ccb->smpio.smp_request; 2547 2548 if (ccb->smpio.smp_response_sglist_cnt != 0) { 2549 bus_dma_segment_t *rsp_sg; 2550 2551 rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response; 2552 response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr; 2553 } else 2554 response = ccb->smpio.smp_response; 2555 break; 2556 case CAM_DATA_VADDR: 2557 request = ccb->smpio.smp_request; 2558 response = ccb->smpio.smp_response; 2559 break; 2560 default: 2561 ccb->ccb_h.status = CAM_REQ_INVALID; 2562 xpt_done(ccb); 2563 return; 2564 } 2565 #else /* __FreeBSD_version < 1000028 */ 2566 /* 2567 * XXX We don't yet support physical addresses here. 2568 */ 2569 if (ccb->ccb_h.flags & (CAM_DATA_PHYS|CAM_SG_LIST_PHYS)) { 2570 mpr_printf(sc, "%s: physical addresses not supported\n", 2571 __func__); 2572 ccb->ccb_h.status = CAM_REQ_INVALID; 2573 xpt_done(ccb); 2574 return; 2575 } 2576 2577 /* 2578 * If the user wants to send an S/G list, check to make sure they 2579 * have single buffers. 2580 */ 2581 if (ccb->ccb_h.flags & CAM_SCATTER_VALID) { 2582 /* 2583 * The chip does not support more than one buffer for the 2584 * request or response. 2585 */ 2586 if ((ccb->smpio.smp_request_sglist_cnt > 1) 2587 || (ccb->smpio.smp_response_sglist_cnt > 1)) { 2588 mpr_dprint(sc, MPR_ERROR, "%s: multiple request or " 2589 "response buffer segments not supported for SMP\n", 2590 __func__); 2591 ccb->ccb_h.status = CAM_REQ_INVALID; 2592 xpt_done(ccb); 2593 return; 2594 } 2595 2596 /* 2597 * The CAM_SCATTER_VALID flag was originally implemented 2598 * for the XPT_SCSI_IO CCB, which only has one data pointer. 2599 * We have two. So, just take that flag to mean that we 2600 * might have S/G lists, and look at the S/G segment count 2601 * to figure out whether that is the case for each individual 2602 * buffer. 2603 */ 2604 if (ccb->smpio.smp_request_sglist_cnt != 0) { 2605 bus_dma_segment_t *req_sg; 2606 2607 req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request; 2608 request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr; 2609 } else 2610 request = ccb->smpio.smp_request; 2611 2612 if (ccb->smpio.smp_response_sglist_cnt != 0) { 2613 bus_dma_segment_t *rsp_sg; 2614 2615 rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response; 2616 response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr; 2617 } else 2618 response = ccb->smpio.smp_response; 2619 } else { 2620 request = ccb->smpio.smp_request; 2621 response = ccb->smpio.smp_response; 2622 } 2623 #endif /* __FreeBSD_version < 1000028 */ 2624 2625 cm = mpr_alloc_command(sc); 2626 if (cm == NULL) { 2627 mpr_dprint(sc, MPR_ERROR, 2628 "%s: cannot allocate command\n", __func__); 2629 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 2630 xpt_done(ccb); 2631 return; 2632 } 2633 2634 req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req; 2635 bzero(req, sizeof(*req)); 2636 req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH; 2637 2638 /* Allow the chip to use any route to this SAS address. */ 2639 req->PhysicalPort = 0xff; 2640 2641 req->RequestDataLength = htole16(ccb->smpio.smp_request_len); 2642 req->SGLFlags = 2643 MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI; 2644 2645 mpr_dprint(sc, MPR_XINFO, "%s: sending SMP request to SAS address " 2646 "%#jx\n", __func__, (uintmax_t)sasaddr); 2647 2648 mpr_init_sge(cm, req, &req->SGL); 2649 2650 /* 2651 * Set up a uio to pass into mpr_map_command(). This allows us to 2652 * do one map command, and one busdma call in there. 2653 */ 2654 cm->cm_uio.uio_iov = cm->cm_iovec; 2655 cm->cm_uio.uio_iovcnt = 2; 2656 cm->cm_uio.uio_segflg = UIO_SYSSPACE; 2657 2658 /* 2659 * The read/write flag isn't used by busdma, but set it just in 2660 * case. This isn't exactly accurate, either, since we're going in 2661 * both directions. 2662 */ 2663 cm->cm_uio.uio_rw = UIO_WRITE; 2664 2665 cm->cm_iovec[0].iov_base = request; 2666 cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength); 2667 cm->cm_iovec[1].iov_base = response; 2668 cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len; 2669 2670 cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len + 2671 cm->cm_iovec[1].iov_len; 2672 2673 /* 2674 * Trigger a warning message in mpr_data_cb() for the user if we 2675 * wind up exceeding two S/G segments. The chip expects one 2676 * segment for the request and another for the response. 2677 */ 2678 cm->cm_max_segs = 2; 2679 2680 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; 2681 cm->cm_complete = mprsas_smpio_complete; 2682 cm->cm_complete_data = ccb; 2683 2684 /* 2685 * Tell the mapping code that we're using a uio, and that this is 2686 * an SMP passthrough request. There is a little special-case 2687 * logic there (in mpr_data_cb()) to handle the bidirectional 2688 * transfer. 2689 */ 2690 cm->cm_flags |= MPR_CM_FLAGS_USE_UIO | MPR_CM_FLAGS_SMP_PASS | 2691 MPR_CM_FLAGS_DATAIN | MPR_CM_FLAGS_DATAOUT; 2692 2693 /* The chip data format is little endian. */ 2694 req->SASAddress.High = htole32(sasaddr >> 32); 2695 req->SASAddress.Low = htole32(sasaddr); 2696 2697 /* 2698 * XXX Note that we don't have a timeout/abort mechanism here. 2699 * From the manual, it looks like task management requests only 2700 * work for SCSI IO and SATA passthrough requests. We may need to 2701 * have a mechanism to retry requests in the event of a chip reset 2702 * at least. Hopefully the chip will insure that any errors short 2703 * of that are relayed back to the driver. 2704 */ 2705 error = mpr_map_command(sc, cm); 2706 if ((error != 0) && (error != EINPROGRESS)) { 2707 mpr_dprint(sc, MPR_ERROR, "%s: error %d returned from " 2708 "mpr_map_command()\n", __func__, error); 2709 goto bailout_error; 2710 } 2711 2712 return; 2713 2714 bailout_error: 2715 mpr_free_command(sc, cm); 2716 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 2717 xpt_done(ccb); 2718 return; 2719 } 2720 2721 static void 2722 mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb) 2723 { 2724 struct mpr_softc *sc; 2725 struct mprsas_target *targ; 2726 uint64_t sasaddr = 0; 2727 2728 sc = sassc->sc; 2729 2730 /* 2731 * Make sure the target exists. 2732 */ 2733 KASSERT(ccb->ccb_h.target_id < sassc->maxtargets, 2734 ("Target %d out of bounds in XPT_SMP_IO\n", ccb->ccb_h.target_id)); 2735 targ = &sassc->targets[ccb->ccb_h.target_id]; 2736 if (targ->handle == 0x0) { 2737 mpr_dprint(sc, MPR_ERROR, "%s: target %d does not exist!\n", 2738 __func__, ccb->ccb_h.target_id); 2739 ccb->ccb_h.status = CAM_SEL_TIMEOUT; 2740 xpt_done(ccb); 2741 return; 2742 } 2743 2744 /* 2745 * If this device has an embedded SMP target, we'll talk to it 2746 * directly. 2747 * figure out what the expander's address is. 2748 */ 2749 if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0) 2750 sasaddr = targ->sasaddr; 2751 2752 /* 2753 * If we don't have a SAS address for the expander yet, try 2754 * grabbing it from the page 0x83 information cached in the 2755 * transport layer for this target. LSI expanders report the 2756 * expander SAS address as the port-associated SAS address in 2757 * Inquiry VPD page 0x83. Maxim expanders don't report it in page 2758 * 0x83. 2759 * 2760 * XXX KDM disable this for now, but leave it commented out so that 2761 * it is obvious that this is another possible way to get the SAS 2762 * address. 2763 * 2764 * The parent handle method below is a little more reliable, and 2765 * the other benefit is that it works for devices other than SES 2766 * devices. So you can send a SMP request to a da(4) device and it 2767 * will get routed to the expander that device is attached to. 2768 * (Assuming the da(4) device doesn't contain an SMP target...) 2769 */ 2770 #if 0 2771 if (sasaddr == 0) 2772 sasaddr = xpt_path_sas_addr(ccb->ccb_h.path); 2773 #endif 2774 2775 /* 2776 * If we still don't have a SAS address for the expander, look for 2777 * the parent device of this device, which is probably the expander. 2778 */ 2779 if (sasaddr == 0) { 2780 #ifdef OLD_MPR_PROBE 2781 struct mprsas_target *parent_target; 2782 #endif 2783 2784 if (targ->parent_handle == 0x0) { 2785 mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have " 2786 "a valid parent handle!\n", __func__, targ->handle); 2787 ccb->ccb_h.status = CAM_DEV_NOT_THERE; 2788 goto bailout; 2789 } 2790 #ifdef OLD_MPR_PROBE 2791 parent_target = mprsas_find_target_by_handle(sassc, 0, 2792 targ->parent_handle); 2793 2794 if (parent_target == NULL) { 2795 mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have " 2796 "a valid parent target!\n", __func__, targ->handle); 2797 ccb->ccb_h.status = CAM_DEV_NOT_THERE; 2798 goto bailout; 2799 } 2800 2801 if ((parent_target->devinfo & 2802 MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) { 2803 mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d " 2804 "does not have an SMP target!\n", __func__, 2805 targ->handle, parent_target->handle); 2806 ccb->ccb_h.status = CAM_DEV_NOT_THERE; 2807 goto bailout; 2808 2809 } 2810 2811 sasaddr = parent_target->sasaddr; 2812 #else /* OLD_MPR_PROBE */ 2813 if ((targ->parent_devinfo & 2814 MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) { 2815 mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d " 2816 "does not have an SMP target!\n", __func__, 2817 targ->handle, targ->parent_handle); 2818 ccb->ccb_h.status = CAM_DEV_NOT_THERE; 2819 goto bailout; 2820 2821 } 2822 if (targ->parent_sasaddr == 0x0) { 2823 mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent handle " 2824 "%d does not have a valid SAS address!\n", __func__, 2825 targ->handle, targ->parent_handle); 2826 ccb->ccb_h.status = CAM_DEV_NOT_THERE; 2827 goto bailout; 2828 } 2829 2830 sasaddr = targ->parent_sasaddr; 2831 #endif /* OLD_MPR_PROBE */ 2832 2833 } 2834 2835 if (sasaddr == 0) { 2836 mpr_dprint(sc, MPR_INFO, "%s: unable to find SAS address for " 2837 "handle %d\n", __func__, targ->handle); 2838 ccb->ccb_h.status = CAM_DEV_NOT_THERE; 2839 goto bailout; 2840 } 2841 mprsas_send_smpcmd(sassc, ccb, sasaddr); 2842 2843 return; 2844 2845 bailout: 2846 xpt_done(ccb); 2847 2848 } 2849 #endif //__FreeBSD_version >= 900026 2850 2851 static void 2852 mprsas_action_resetdev(struct mprsas_softc *sassc, union ccb *ccb) 2853 { 2854 MPI2_SCSI_TASK_MANAGE_REQUEST *req; 2855 struct mpr_softc *sc; 2856 struct mpr_command *tm; 2857 struct mprsas_target *targ; 2858 2859 MPR_FUNCTRACE(sassc->sc); 2860 mtx_assert(&sassc->sc->mpr_mtx, MA_OWNED); 2861 2862 KASSERT(ccb->ccb_h.target_id < sassc->maxtargets, 2863 ("Target %d out of bounds in XPT_RESET_DEV\n", 2864 ccb->ccb_h.target_id)); 2865 sc = sassc->sc; 2866 tm = mpr_alloc_command(sc); 2867 if (tm == NULL) { 2868 mpr_dprint(sc, MPR_ERROR, 2869 "command alloc failure in mprsas_action_resetdev\n"); 2870 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 2871 xpt_done(ccb); 2872 return; 2873 } 2874 2875 targ = &sassc->targets[ccb->ccb_h.target_id]; 2876 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req; 2877 req->DevHandle = htole16(targ->handle); 2878 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; 2879 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET; 2880 2881 /* SAS Hard Link Reset / SATA Link Reset */ 2882 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET; 2883 2884 tm->cm_data = NULL; 2885 tm->cm_desc.HighPriority.RequestFlags = 2886 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY; 2887 tm->cm_complete = mprsas_resetdev_complete; 2888 tm->cm_complete_data = ccb; 2889 tm->cm_targ = targ; 2890 mpr_map_command(sc, tm); 2891 } 2892 2893 static void 2894 mprsas_resetdev_complete(struct mpr_softc *sc, struct mpr_command *tm) 2895 { 2896 MPI2_SCSI_TASK_MANAGE_REPLY *resp; 2897 union ccb *ccb; 2898 2899 MPR_FUNCTRACE(sc); 2900 mtx_assert(&sc->mpr_mtx, MA_OWNED); 2901 2902 resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply; 2903 ccb = tm->cm_complete_data; 2904 2905 /* 2906 * Currently there should be no way we can hit this case. It only 2907 * happens when we have a failure to allocate chain frames, and 2908 * task management commands don't have S/G lists. 2909 */ 2910 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) { 2911 MPI2_SCSI_TASK_MANAGE_REQUEST *req; 2912 2913 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req; 2914 2915 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for reset of " 2916 "handle %#04x! This should not happen!\n", __func__, 2917 tm->cm_flags, req->DevHandle); 2918 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 2919 goto bailout; 2920 } 2921 2922 mpr_dprint(sc, MPR_XINFO, 2923 "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n", __func__, 2924 le16toh(resp->IOCStatus), le32toh(resp->ResponseCode)); 2925 2926 if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) { 2927 ccb->ccb_h.status = CAM_REQ_CMP; 2928 mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid, 2929 CAM_LUN_WILDCARD); 2930 } 2931 else 2932 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 2933 2934 bailout: 2935 2936 mprsas_free_tm(sc, tm); 2937 xpt_done(ccb); 2938 } 2939 2940 static void 2941 mprsas_poll(struct cam_sim *sim) 2942 { 2943 struct mprsas_softc *sassc; 2944 2945 sassc = cam_sim_softc(sim); 2946 2947 if (sassc->sc->mpr_debug & MPR_TRACE) { 2948 /* frequent debug messages during a panic just slow 2949 * everything down too much. 2950 */ 2951 mpr_printf(sassc->sc, "%s clearing MPR_TRACE\n", __func__); 2952 sassc->sc->mpr_debug &= ~MPR_TRACE; 2953 } 2954 2955 mpr_intr_locked(sassc->sc); 2956 } 2957 2958 static void 2959 mprsas_async(void *callback_arg, uint32_t code, struct cam_path *path, 2960 void *arg) 2961 { 2962 struct mpr_softc *sc; 2963 2964 sc = (struct mpr_softc *)callback_arg; 2965 2966 switch (code) { 2967 #if (__FreeBSD_version >= 1000006) || \ 2968 ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000)) 2969 case AC_ADVINFO_CHANGED: { 2970 struct mprsas_target *target; 2971 struct mprsas_softc *sassc; 2972 struct scsi_read_capacity_data_long rcap_buf; 2973 struct ccb_dev_advinfo cdai; 2974 struct mprsas_lun *lun; 2975 lun_id_t lunid; 2976 int found_lun; 2977 uintptr_t buftype; 2978 2979 buftype = (uintptr_t)arg; 2980 2981 found_lun = 0; 2982 sassc = sc->sassc; 2983 2984 /* 2985 * We're only interested in read capacity data changes. 2986 */ 2987 if (buftype != CDAI_TYPE_RCAPLONG) 2988 break; 2989 2990 /* 2991 * We should have a handle for this, but check to make sure. 2992 */ 2993 KASSERT(xpt_path_target_id(path) < sassc->maxtargets, 2994 ("Target %d out of bounds in mprsas_async\n", 2995 xpt_path_target_id(path))); 2996 target = &sassc->targets[xpt_path_target_id(path)]; 2997 if (target->handle == 0) 2998 break; 2999 3000 lunid = xpt_path_lun_id(path); 3001 3002 SLIST_FOREACH(lun, &target->luns, lun_link) { 3003 if (lun->lun_id == lunid) { 3004 found_lun = 1; 3005 break; 3006 } 3007 } 3008 3009 if (found_lun == 0) { 3010 lun = malloc(sizeof(struct mprsas_lun), M_MPR, 3011 M_NOWAIT | M_ZERO); 3012 if (lun == NULL) { 3013 mpr_dprint(sc, MPR_ERROR, "Unable to alloc " 3014 "LUN for EEDP support.\n"); 3015 break; 3016 } 3017 lun->lun_id = lunid; 3018 SLIST_INSERT_HEAD(&target->luns, lun, lun_link); 3019 } 3020 3021 bzero(&rcap_buf, sizeof(rcap_buf)); 3022 xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL); 3023 cdai.ccb_h.func_code = XPT_DEV_ADVINFO; 3024 cdai.ccb_h.flags = CAM_DIR_IN; 3025 cdai.buftype = CDAI_TYPE_RCAPLONG; 3026 cdai.flags = 0; 3027 cdai.bufsiz = sizeof(rcap_buf); 3028 cdai.buf = (uint8_t *)&rcap_buf; 3029 xpt_action((union ccb *)&cdai); 3030 if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0) 3031 cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE); 3032 3033 if (((cdai.ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) 3034 && (rcap_buf.prot & SRC16_PROT_EN)) { 3035 lun->eedp_formatted = TRUE; 3036 lun->eedp_block_size = scsi_4btoul(rcap_buf.length); 3037 } else { 3038 lun->eedp_formatted = FALSE; 3039 lun->eedp_block_size = 0; 3040 } 3041 break; 3042 } 3043 #endif 3044 case AC_FOUND_DEVICE: { 3045 struct ccb_getdev *cgd; 3046 3047 cgd = arg; 3048 mprsas_prepare_ssu(sc, path, cgd); 3049 #if (__FreeBSD_version < 901503) || \ 3050 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) 3051 mprsas_check_eedp(sc, path, cgd); 3052 #endif 3053 break; 3054 } 3055 default: 3056 break; 3057 } 3058 } 3059 3060 static void 3061 mprsas_prepare_ssu(struct mpr_softc *sc, struct cam_path *path, 3062 struct ccb_getdev *cgd) 3063 { 3064 struct mprsas_softc *sassc = sc->sassc; 3065 path_id_t pathid; 3066 target_id_t targetid; 3067 lun_id_t lunid; 3068 struct mprsas_target *target; 3069 struct mprsas_lun *lun; 3070 uint8_t found_lun; 3071 3072 sassc = sc->sassc; 3073 pathid = cam_sim_path(sassc->sim); 3074 targetid = xpt_path_target_id(path); 3075 lunid = xpt_path_lun_id(path); 3076 3077 KASSERT(targetid < sassc->maxtargets, 3078 ("Target %d out of bounds in mprsas_prepare_ssu\n", targetid)); 3079 target = &sassc->targets[targetid]; 3080 if (target->handle == 0x0) 3081 return; 3082 3083 /* 3084 * If LUN is already in list, don't create a new one. 3085 */ 3086 found_lun = FALSE; 3087 SLIST_FOREACH(lun, &target->luns, lun_link) { 3088 if (lun->lun_id == lunid) { 3089 found_lun = TRUE; 3090 break; 3091 } 3092 } 3093 if (!found_lun) { 3094 lun = malloc(sizeof(struct mprsas_lun), M_MPR, 3095 M_NOWAIT | M_ZERO); 3096 if (lun == NULL) { 3097 mpr_dprint(sc, MPR_ERROR, "Unable to alloc LUN for " 3098 "preparing SSU.\n"); 3099 return; 3100 } 3101 lun->lun_id = lunid; 3102 SLIST_INSERT_HEAD(&target->luns, lun, lun_link); 3103 } 3104 3105 /* 3106 * If this is a SATA direct-access end device, mark it so that a SCSI 3107 * StartStopUnit command will be sent to it when the driver is being 3108 * shutdown. 3109 */ 3110 if (((cgd->inq_data.device & 0x1F) == T_DIRECT) && 3111 (target->devinfo & MPI2_SAS_DEVICE_INFO_SATA_DEVICE) && 3112 ((target->devinfo & MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) == 3113 MPI2_SAS_DEVICE_INFO_END_DEVICE)) { 3114 lun->stop_at_shutdown = TRUE; 3115 } 3116 } 3117 3118 #if (__FreeBSD_version < 901503) || \ 3119 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) 3120 static void 3121 mprsas_check_eedp(struct mpr_softc *sc, struct cam_path *path, 3122 struct ccb_getdev *cgd) 3123 { 3124 struct mprsas_softc *sassc = sc->sassc; 3125 struct ccb_scsiio *csio; 3126 struct scsi_read_capacity_16 *scsi_cmd; 3127 struct scsi_read_capacity_eedp *rcap_buf; 3128 path_id_t pathid; 3129 target_id_t targetid; 3130 lun_id_t lunid; 3131 union ccb *ccb; 3132 struct cam_path *local_path; 3133 struct mprsas_target *target; 3134 struct mprsas_lun *lun; 3135 uint8_t found_lun; 3136 char path_str[64]; 3137 3138 sassc = sc->sassc; 3139 pathid = cam_sim_path(sassc->sim); 3140 targetid = xpt_path_target_id(path); 3141 lunid = xpt_path_lun_id(path); 3142 3143 KASSERT(targetid < sassc->maxtargets, 3144 ("Target %d out of bounds in mprsas_check_eedp\n", targetid)); 3145 target = &sassc->targets[targetid]; 3146 if (target->handle == 0x0) 3147 return; 3148 3149 /* 3150 * Determine if the device is EEDP capable. 3151 * 3152 * If this flag is set in the inquiry data, the device supports 3153 * protection information, and must support the 16 byte read capacity 3154 * command, otherwise continue without sending read cap 16 3155 */ 3156 if ((cgd->inq_data.spc3_flags & SPC3_SID_PROTECT) == 0) 3157 return; 3158 3159 /* 3160 * Issue a READ CAPACITY 16 command. This info is used to determine if 3161 * the LUN is formatted for EEDP support. 3162 */ 3163 ccb = xpt_alloc_ccb_nowait(); 3164 if (ccb == NULL) { 3165 mpr_dprint(sc, MPR_ERROR, "Unable to alloc CCB for EEDP " 3166 "support.\n"); 3167 return; 3168 } 3169 3170 if (xpt_create_path(&local_path, xpt_periph, pathid, targetid, lunid) 3171 != CAM_REQ_CMP) { 3172 mpr_dprint(sc, MPR_ERROR, "Unable to create path for EEDP " 3173 "support\n"); 3174 xpt_free_ccb(ccb); 3175 return; 3176 } 3177 3178 /* 3179 * If LUN is already in list, don't create a new one. 3180 */ 3181 found_lun = FALSE; 3182 SLIST_FOREACH(lun, &target->luns, lun_link) { 3183 if (lun->lun_id == lunid) { 3184 found_lun = TRUE; 3185 break; 3186 } 3187 } 3188 if (!found_lun) { 3189 lun = malloc(sizeof(struct mprsas_lun), M_MPR, 3190 M_NOWAIT | M_ZERO); 3191 if (lun == NULL) { 3192 mpr_dprint(sc, MPR_ERROR, "Unable to alloc LUN for " 3193 "EEDP support.\n"); 3194 xpt_free_path(local_path); 3195 xpt_free_ccb(ccb); 3196 return; 3197 } 3198 lun->lun_id = lunid; 3199 SLIST_INSERT_HEAD(&target->luns, lun, lun_link); 3200 } 3201 3202 xpt_path_string(local_path, path_str, sizeof(path_str)); 3203 mpr_dprint(sc, MPR_INFO, "Sending read cap: path %s handle %d\n", 3204 path_str, target->handle); 3205 3206 /* 3207 * Issue a READ CAPACITY 16 command for the LUN. The 3208 * mprsas_read_cap_done function will load the read cap info into the 3209 * LUN struct. 3210 */ 3211 rcap_buf = malloc(sizeof(struct scsi_read_capacity_eedp), M_MPR, 3212 M_NOWAIT | M_ZERO); 3213 if (rcap_buf == NULL) { 3214 mpr_dprint(sc, MPR_FAULT, "Unable to alloc read capacity " 3215 "buffer for EEDP support.\n"); 3216 xpt_free_path(ccb->ccb_h.path); 3217 xpt_free_ccb(ccb); 3218 return; 3219 } 3220 xpt_setup_ccb(&ccb->ccb_h, local_path, CAM_PRIORITY_XPT); 3221 csio = &ccb->csio; 3222 csio->ccb_h.func_code = XPT_SCSI_IO; 3223 csio->ccb_h.flags = CAM_DIR_IN; 3224 csio->ccb_h.retry_count = 4; 3225 csio->ccb_h.cbfcnp = mprsas_read_cap_done; 3226 csio->ccb_h.timeout = 60000; 3227 csio->data_ptr = (uint8_t *)rcap_buf; 3228 csio->dxfer_len = sizeof(struct scsi_read_capacity_eedp); 3229 csio->sense_len = MPR_SENSE_LEN; 3230 csio->cdb_len = sizeof(*scsi_cmd); 3231 csio->tag_action = MSG_SIMPLE_Q_TAG; 3232 3233 scsi_cmd = (struct scsi_read_capacity_16 *)&csio->cdb_io.cdb_bytes; 3234 bzero(scsi_cmd, sizeof(*scsi_cmd)); 3235 scsi_cmd->opcode = 0x9E; 3236 scsi_cmd->service_action = SRC16_SERVICE_ACTION; 3237 ((uint8_t *)scsi_cmd)[13] = sizeof(struct scsi_read_capacity_eedp); 3238 3239 ccb->ccb_h.ppriv_ptr1 = sassc; 3240 xpt_action(ccb); 3241 } 3242 3243 static void 3244 mprsas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb) 3245 { 3246 struct mprsas_softc *sassc; 3247 struct mprsas_target *target; 3248 struct mprsas_lun *lun; 3249 struct scsi_read_capacity_eedp *rcap_buf; 3250 3251 if (done_ccb == NULL) 3252 return; 3253 3254 /* Driver need to release devq, it Scsi command is 3255 * generated by driver internally. 3256 * Currently there is a single place where driver 3257 * calls scsi command internally. In future if driver 3258 * calls more scsi command internally, it needs to release 3259 * devq internally, since those command will not go back to 3260 * cam_periph. 3261 */ 3262 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) ) { 3263 done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN; 3264 xpt_release_devq(done_ccb->ccb_h.path, 3265 /*count*/ 1, /*run_queue*/TRUE); 3266 } 3267 3268 rcap_buf = (struct scsi_read_capacity_eedp *)done_ccb->csio.data_ptr; 3269 3270 /* 3271 * Get the LUN ID for the path and look it up in the LUN list for the 3272 * target. 3273 */ 3274 sassc = (struct mprsas_softc *)done_ccb->ccb_h.ppriv_ptr1; 3275 KASSERT(done_ccb->ccb_h.target_id < sassc->maxtargets, 3276 ("Target %d out of bounds in mprsas_read_cap_done\n", 3277 done_ccb->ccb_h.target_id)); 3278 target = &sassc->targets[done_ccb->ccb_h.target_id]; 3279 SLIST_FOREACH(lun, &target->luns, lun_link) { 3280 if (lun->lun_id != done_ccb->ccb_h.target_lun) 3281 continue; 3282 3283 /* 3284 * Got the LUN in the target's LUN list. Fill it in with EEDP 3285 * info. If the READ CAP 16 command had some SCSI error (common 3286 * if command is not supported), mark the lun as not supporting 3287 * EEDP and set the block size to 0. 3288 */ 3289 if (((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) 3290 || (done_ccb->csio.scsi_status != SCSI_STATUS_OK)) { 3291 lun->eedp_formatted = FALSE; 3292 lun->eedp_block_size = 0; 3293 break; 3294 } 3295 3296 if (rcap_buf->protect & 0x01) { 3297 mpr_dprint(sassc->sc, MPR_INFO, "LUN %d for " 3298 "target ID %d is formatted for EEDP " 3299 "support.\n", done_ccb->ccb_h.target_lun, 3300 done_ccb->ccb_h.target_id); 3301 lun->eedp_formatted = TRUE; 3302 lun->eedp_block_size = scsi_4btoul(rcap_buf->length); 3303 } 3304 break; 3305 } 3306 3307 // Finished with this CCB and path. 3308 free(rcap_buf, M_MPR); 3309 xpt_free_path(done_ccb->ccb_h.path); 3310 xpt_free_ccb(done_ccb); 3311 } 3312 #endif /* (__FreeBSD_version < 901503) || \ 3313 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) */ 3314 3315 int 3316 mprsas_startup(struct mpr_softc *sc) 3317 { 3318 /* 3319 * Send the port enable message and set the wait_for_port_enable flag. 3320 * This flag helps to keep the simq frozen until all discovery events 3321 * are processed. 3322 */ 3323 sc->wait_for_port_enable = 1; 3324 mprsas_send_portenable(sc); 3325 return (0); 3326 } 3327 3328 static int 3329 mprsas_send_portenable(struct mpr_softc *sc) 3330 { 3331 MPI2_PORT_ENABLE_REQUEST *request; 3332 struct mpr_command *cm; 3333 3334 MPR_FUNCTRACE(sc); 3335 3336 if ((cm = mpr_alloc_command(sc)) == NULL) 3337 return (EBUSY); 3338 request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req; 3339 request->Function = MPI2_FUNCTION_PORT_ENABLE; 3340 request->MsgFlags = 0; 3341 request->VP_ID = 0; 3342 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; 3343 cm->cm_complete = mprsas_portenable_complete; 3344 cm->cm_data = NULL; 3345 cm->cm_sge = NULL; 3346 3347 mpr_map_command(sc, cm); 3348 mpr_dprint(sc, MPR_XINFO, 3349 "mpr_send_portenable finished cm %p req %p complete %p\n", 3350 cm, cm->cm_req, cm->cm_complete); 3351 return (0); 3352 } 3353 3354 static void 3355 mprsas_portenable_complete(struct mpr_softc *sc, struct mpr_command *cm) 3356 { 3357 MPI2_PORT_ENABLE_REPLY *reply; 3358 struct mprsas_softc *sassc; 3359 3360 MPR_FUNCTRACE(sc); 3361 sassc = sc->sassc; 3362 3363 /* 3364 * Currently there should be no way we can hit this case. It only 3365 * happens when we have a failure to allocate chain frames, and 3366 * port enable commands don't have S/G lists. 3367 */ 3368 if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) { 3369 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for port enable! " 3370 "This should not happen!\n", __func__, cm->cm_flags); 3371 } 3372 3373 reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply; 3374 if (reply == NULL) 3375 mpr_dprint(sc, MPR_FAULT, "Portenable NULL reply\n"); 3376 else if (le16toh(reply->IOCStatus & MPI2_IOCSTATUS_MASK) != 3377 MPI2_IOCSTATUS_SUCCESS) 3378 mpr_dprint(sc, MPR_FAULT, "Portenable failed\n"); 3379 3380 mpr_free_command(sc, cm); 3381 if (sc->mpr_ich.ich_arg != NULL) { 3382 mpr_dprint(sc, MPR_XINFO, "disestablish config intrhook\n"); 3383 config_intrhook_disestablish(&sc->mpr_ich); 3384 sc->mpr_ich.ich_arg = NULL; 3385 } 3386 3387 /* 3388 * Done waiting for port enable to complete. Decrement the refcount. 3389 * If refcount is 0, discovery is complete and a rescan of the bus can 3390 * take place. 3391 */ 3392 sc->wait_for_port_enable = 0; 3393 sc->port_enable_complete = 1; 3394 wakeup(&sc->port_enable_complete); 3395 mprsas_startup_decrement(sassc); 3396 } 3397 3398 int 3399 mprsas_check_id(struct mprsas_softc *sassc, int id) 3400 { 3401 struct mpr_softc *sc = sassc->sc; 3402 char *ids; 3403 char *name; 3404 3405 ids = &sc->exclude_ids[0]; 3406 while((name = strsep(&ids, ",")) != NULL) { 3407 if (name[0] == '\0') 3408 continue; 3409 if (strtol(name, NULL, 0) == (long)id) 3410 return (1); 3411 } 3412 3413 return (0); 3414 } 3415