1 /*- 2 * Copyright (c) 2009 Yahoo! Inc. 3 * Copyright (c) 2011-2014 LSI Corp. 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 /* Communications core for LSI MPT2 */ 32 33 /* TODO Move headers to mprvar */ 34 #include <sys/types.h> 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/kernel.h> 38 #include <sys/selinfo.h> 39 #include <sys/module.h> 40 #include <sys/bus.h> 41 #include <sys/conf.h> 42 #include <sys/bio.h> 43 #include <sys/malloc.h> 44 #include <sys/uio.h> 45 #include <sys/sysctl.h> 46 #include <sys/endian.h> 47 #include <sys/queue.h> 48 #include <sys/kthread.h> 49 #include <sys/taskqueue.h> 50 #include <sys/sbuf.h> 51 52 #include <machine/bus.h> 53 #include <machine/resource.h> 54 #include <sys/rman.h> 55 56 #include <machine/stdarg.h> 57 58 #include <cam/cam.h> 59 #include <cam/cam_ccb.h> 60 #include <cam/cam_debug.h> 61 #include <cam/cam_sim.h> 62 #include <cam/cam_xpt_sim.h> 63 #include <cam/cam_xpt_periph.h> 64 #include <cam/cam_periph.h> 65 #include <cam/scsi/scsi_all.h> 66 #include <cam/scsi/scsi_message.h> 67 #if __FreeBSD_version >= 900026 68 #include <cam/scsi/smp_all.h> 69 #endif 70 71 #include <dev/mpr/mpi/mpi2_type.h> 72 #include <dev/mpr/mpi/mpi2.h> 73 #include <dev/mpr/mpi/mpi2_ioc.h> 74 #include <dev/mpr/mpi/mpi2_sas.h> 75 #include <dev/mpr/mpi/mpi2_cnfg.h> 76 #include <dev/mpr/mpi/mpi2_init.h> 77 #include <dev/mpr/mpi/mpi2_tool.h> 78 #include <dev/mpr/mpr_ioctl.h> 79 #include <dev/mpr/mprvar.h> 80 #include <dev/mpr/mpr_table.h> 81 #include <dev/mpr/mpr_sas.h> 82 83 #define MPRSAS_DISCOVERY_TIMEOUT 20 84 #define MPRSAS_MAX_DISCOVERY_TIMEOUTS 10 /* 200 seconds */ 85 86 /* 87 * static array to check SCSI OpCode for EEDP protection bits 88 */ 89 #define PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP 90 #define PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP 91 #define PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP 92 static uint8_t op_code_prot[256] = { 93 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 94 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 95 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V, 96 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 97 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 98 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 99 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 100 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 101 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V, 102 0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 103 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V, 104 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 105 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 106 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 107 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 108 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 109 }; 110 111 MALLOC_DEFINE(M_MPRSAS, "MPRSAS", "MPR SAS memory"); 112 113 static void mprsas_remove_device(struct mpr_softc *, struct mpr_command *); 114 static void mprsas_remove_complete(struct mpr_softc *, struct mpr_command *); 115 static void mprsas_action(struct cam_sim *sim, union ccb *ccb); 116 static void mprsas_poll(struct cam_sim *sim); 117 static void mprsas_scsiio_timeout(void *data); 118 static void mprsas_abort_complete(struct mpr_softc *sc, 119 struct mpr_command *cm); 120 static void mprsas_action_scsiio(struct mprsas_softc *, union ccb *); 121 static void mprsas_scsiio_complete(struct mpr_softc *, struct mpr_command *); 122 static void mprsas_action_resetdev(struct mprsas_softc *, union ccb *); 123 static void mprsas_resetdev_complete(struct mpr_softc *, 124 struct mpr_command *); 125 static int mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm, 126 struct mpr_command *cm); 127 static int mprsas_send_reset(struct mpr_softc *sc, struct mpr_command *tm, 128 uint8_t type); 129 static void mprsas_async(void *callback_arg, uint32_t code, 130 struct cam_path *path, void *arg); 131 static void mprsas_prepare_ssu(struct mpr_softc *sc, struct cam_path *path, 132 struct ccb_getdev *cgd); 133 #if (__FreeBSD_version < 901503) || \ 134 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) 135 static void mprsas_check_eedp(struct mpr_softc *sc, struct cam_path *path, 136 struct ccb_getdev *cgd); 137 static void mprsas_read_cap_done(struct cam_periph *periph, 138 union ccb *done_ccb); 139 #endif 140 static int mprsas_send_portenable(struct mpr_softc *sc); 141 static void mprsas_portenable_complete(struct mpr_softc *sc, 142 struct mpr_command *cm); 143 144 #if __FreeBSD_version >= 900026 145 static void 146 mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm); 147 static void mprsas_send_smpcmd(struct mprsas_softc *sassc, 148 union ccb *ccb, uint64_t sasaddr); 149 static void 150 mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb); 151 #endif 152 153 struct mprsas_target * 154 mprsas_find_target_by_handle(struct mprsas_softc *sassc, int start, 155 uint16_t handle) 156 { 157 struct mprsas_target *target; 158 int i; 159 160 for (i = start; i < sassc->maxtargets; i++) { 161 target = &sassc->targets[i]; 162 if (target->handle == handle) 163 return (target); 164 } 165 166 return (NULL); 167 } 168 169 /* we need to freeze the simq during attach and diag reset, to avoid failing 170 * commands before device handles have been found by discovery. Since 171 * discovery involves reading config pages and possibly sending commands, 172 * discovery actions may continue even after we receive the end of discovery 173 * event, so refcount discovery actions instead of assuming we can unfreeze 174 * the simq when we get the event. 175 */ 176 void 177 mprsas_startup_increment(struct mprsas_softc *sassc) 178 { 179 MPR_FUNCTRACE(sassc->sc); 180 181 if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) { 182 if (sassc->startup_refcount++ == 0) { 183 /* just starting, freeze the simq */ 184 mpr_dprint(sassc->sc, MPR_INIT, 185 "%s freezing simq\n", __func__); 186 #if (__FreeBSD_version >= 1000039) || \ 187 ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502)) 188 xpt_hold_boot(); 189 #endif 190 xpt_freeze_simq(sassc->sim, 1); 191 } 192 mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__, 193 sassc->startup_refcount); 194 } 195 } 196 197 void 198 mprsas_release_simq_reinit(struct mprsas_softc *sassc) 199 { 200 if (sassc->flags & MPRSAS_QUEUE_FROZEN) { 201 sassc->flags &= ~MPRSAS_QUEUE_FROZEN; 202 xpt_release_simq(sassc->sim, 1); 203 mpr_dprint(sassc->sc, MPR_INFO, "Unfreezing SIM queue\n"); 204 } 205 } 206 207 void 208 mprsas_startup_decrement(struct mprsas_softc *sassc) 209 { 210 MPR_FUNCTRACE(sassc->sc); 211 212 if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) { 213 if (--sassc->startup_refcount == 0) { 214 /* finished all discovery-related actions, release 215 * the simq and rescan for the latest topology. 216 */ 217 mpr_dprint(sassc->sc, MPR_INIT, 218 "%s releasing simq\n", __func__); 219 sassc->flags &= ~MPRSAS_IN_STARTUP; 220 xpt_release_simq(sassc->sim, 1); 221 #if (__FreeBSD_version >= 1000039) || \ 222 ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502)) 223 xpt_release_boot(); 224 #else 225 mprsas_rescan_target(sassc->sc, NULL); 226 #endif 227 } 228 mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__, 229 sassc->startup_refcount); 230 } 231 } 232 233 /* LSI's firmware requires us to stop sending commands when we're doing task 234 * management, so refcount the TMs and keep the simq frozen when any are in 235 * use. 236 */ 237 struct mpr_command * 238 mprsas_alloc_tm(struct mpr_softc *sc) 239 { 240 struct mpr_command *tm; 241 242 MPR_FUNCTRACE(sc); 243 tm = mpr_alloc_high_priority_command(sc); 244 if (tm != NULL) { 245 if (sc->sassc->tm_count++ == 0) { 246 mpr_dprint(sc, MPR_RECOVERY, 247 "%s freezing simq\n", __func__); 248 xpt_freeze_simq(sc->sassc->sim, 1); 249 } 250 mpr_dprint(sc, MPR_RECOVERY, "%s tm_count %u\n", __func__, 251 sc->sassc->tm_count); 252 } 253 return tm; 254 } 255 256 void 257 mprsas_free_tm(struct mpr_softc *sc, struct mpr_command *tm) 258 { 259 mpr_dprint(sc, MPR_TRACE, "%s", __func__); 260 if (tm == NULL) 261 return; 262 263 /* if there are no TMs in use, we can release the simq. We use our 264 * own refcount so that it's easier for a diag reset to cleanup and 265 * release the simq. 266 */ 267 if (--sc->sassc->tm_count == 0) { 268 mpr_dprint(sc, MPR_RECOVERY, "%s releasing simq\n", __func__); 269 xpt_release_simq(sc->sassc->sim, 1); 270 } 271 mpr_dprint(sc, MPR_RECOVERY, "%s tm_count %u\n", __func__, 272 sc->sassc->tm_count); 273 274 mpr_free_high_priority_command(sc, tm); 275 } 276 277 void 278 mprsas_rescan_target(struct mpr_softc *sc, struct mprsas_target *targ) 279 { 280 struct mprsas_softc *sassc = sc->sassc; 281 path_id_t pathid; 282 target_id_t targetid; 283 union ccb *ccb; 284 285 MPR_FUNCTRACE(sc); 286 pathid = cam_sim_path(sassc->sim); 287 if (targ == NULL) 288 targetid = CAM_TARGET_WILDCARD; 289 else 290 targetid = targ - sassc->targets; 291 292 /* 293 * Allocate a CCB and schedule a rescan. 294 */ 295 ccb = xpt_alloc_ccb_nowait(); 296 if (ccb == NULL) { 297 mpr_dprint(sc, MPR_ERROR, "unable to alloc CCB for rescan\n"); 298 return; 299 } 300 301 if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid, 302 targetid, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 303 mpr_dprint(sc, MPR_ERROR, "unable to create path for rescan\n"); 304 xpt_free_ccb(ccb); 305 return; 306 } 307 308 if (targetid == CAM_TARGET_WILDCARD) 309 ccb->ccb_h.func_code = XPT_SCAN_BUS; 310 else 311 ccb->ccb_h.func_code = XPT_SCAN_TGT; 312 313 mpr_dprint(sc, MPR_TRACE, "%s targetid %u\n", __func__, targetid); 314 xpt_rescan(ccb); 315 } 316 317 static void 318 mprsas_log_command(struct mpr_command *cm, u_int level, const char *fmt, ...) 319 { 320 struct sbuf sb; 321 va_list ap; 322 char str[192]; 323 char path_str[64]; 324 325 if (cm == NULL) 326 return; 327 328 /* No need to be in here if debugging isn't enabled */ 329 if ((cm->cm_sc->mpr_debug & level) == 0) 330 return; 331 332 sbuf_new(&sb, str, sizeof(str), 0); 333 334 va_start(ap, fmt); 335 336 if (cm->cm_ccb != NULL) { 337 xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str, 338 sizeof(path_str)); 339 sbuf_cat(&sb, path_str); 340 if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) { 341 scsi_command_string(&cm->cm_ccb->csio, &sb); 342 sbuf_printf(&sb, "length %d ", 343 cm->cm_ccb->csio.dxfer_len); 344 } 345 } else { 346 sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ", 347 cam_sim_name(cm->cm_sc->sassc->sim), 348 cam_sim_unit(cm->cm_sc->sassc->sim), 349 cam_sim_bus(cm->cm_sc->sassc->sim), 350 cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF, 351 cm->cm_lun); 352 } 353 354 sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID); 355 sbuf_vprintf(&sb, fmt, ap); 356 sbuf_finish(&sb); 357 mpr_dprint_field(cm->cm_sc, level, "%s", sbuf_data(&sb)); 358 359 va_end(ap); 360 } 361 362 static void 363 mprsas_remove_volume(struct mpr_softc *sc, struct mpr_command *tm) 364 { 365 MPI2_SCSI_TASK_MANAGE_REPLY *reply; 366 struct mprsas_target *targ; 367 uint16_t handle; 368 369 MPR_FUNCTRACE(sc); 370 371 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply; 372 handle = (uint16_t)(uintptr_t)tm->cm_complete_data; 373 targ = tm->cm_targ; 374 375 if (reply == NULL) { 376 /* XXX retry the remove after the diag reset completes? */ 377 mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device " 378 "0x%04x\n", __func__, handle); 379 mprsas_free_tm(sc, tm); 380 return; 381 } 382 383 if (reply->IOCStatus != MPI2_IOCSTATUS_SUCCESS) { 384 mpr_dprint(sc, MPR_FAULT, "IOCStatus = 0x%x while resetting " 385 "device 0x%x\n", reply->IOCStatus, handle); 386 mprsas_free_tm(sc, tm); 387 return; 388 } 389 390 mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n", 391 reply->TerminationCount); 392 mpr_free_reply(sc, tm->cm_reply_data); 393 tm->cm_reply = NULL; /* Ensures the reply won't get re-freed */ 394 395 mpr_dprint(sc, MPR_XINFO, "clearing target %u handle 0x%04x\n", 396 targ->tid, handle); 397 398 /* 399 * Don't clear target if remove fails because things will get confusing. 400 * Leave the devname and sasaddr intact so that we know to avoid reusing 401 * this target id if possible, and so we can assign the same target id 402 * to this device if it comes back in the future. 403 */ 404 if (reply->IOCStatus == MPI2_IOCSTATUS_SUCCESS) { 405 targ = tm->cm_targ; 406 targ->handle = 0x0; 407 targ->encl_handle = 0x0; 408 targ->encl_level_valid = 0x0; 409 targ->encl_level = 0x0; 410 targ->connector_name[0] = ' '; 411 targ->connector_name[1] = ' '; 412 targ->connector_name[2] = ' '; 413 targ->connector_name[3] = ' '; 414 targ->encl_slot = 0x0; 415 targ->exp_dev_handle = 0x0; 416 targ->phy_num = 0x0; 417 targ->linkrate = 0x0; 418 targ->devinfo = 0x0; 419 targ->flags = 0x0; 420 targ->scsi_req_desc_type = 0; 421 } 422 423 mprsas_free_tm(sc, tm); 424 } 425 426 427 /* 428 * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal. 429 * Otherwise Volume Delete is same as Bare Drive Removal. 430 */ 431 void 432 mprsas_prepare_volume_remove(struct mprsas_softc *sassc, uint16_t handle) 433 { 434 MPI2_SCSI_TASK_MANAGE_REQUEST *req; 435 struct mpr_softc *sc; 436 struct mpr_command *cm; 437 struct mprsas_target *targ = NULL; 438 439 MPR_FUNCTRACE(sassc->sc); 440 sc = sassc->sc; 441 442 targ = mprsas_find_target_by_handle(sassc, 0, handle); 443 if (targ == NULL) { 444 /* FIXME: what is the action? */ 445 /* We don't know about this device? */ 446 mpr_dprint(sc, MPR_ERROR, 447 "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle); 448 return; 449 } 450 451 targ->flags |= MPRSAS_TARGET_INREMOVAL; 452 453 cm = mprsas_alloc_tm(sc); 454 if (cm == NULL) { 455 mpr_dprint(sc, MPR_ERROR, 456 "%s: command alloc failure\n", __func__); 457 return; 458 } 459 460 mprsas_rescan_target(sc, targ); 461 462 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req; 463 req->DevHandle = targ->handle; 464 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; 465 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET; 466 467 /* SAS Hard Link Reset / SATA Link Reset */ 468 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET; 469 470 cm->cm_targ = targ; 471 cm->cm_data = NULL; 472 cm->cm_desc.HighPriority.RequestFlags = 473 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY; 474 cm->cm_complete = mprsas_remove_volume; 475 cm->cm_complete_data = (void *)(uintptr_t)handle; 476 mpr_map_command(sc, cm); 477 } 478 479 /* 480 * The MPT2 firmware performs debounce on the link to avoid transient link 481 * errors and false removals. When it does decide that link has been lost 482 * and a device needs to go away, it expects that the host will perform a 483 * target reset and then an op remove. The reset has the side-effect of 484 * aborting any outstanding requests for the device, which is required for 485 * the op-remove to succeed. It's not clear if the host should check for 486 * the device coming back alive after the reset. 487 */ 488 void 489 mprsas_prepare_remove(struct mprsas_softc *sassc, uint16_t handle) 490 { 491 MPI2_SCSI_TASK_MANAGE_REQUEST *req; 492 struct mpr_softc *sc; 493 struct mpr_command *cm; 494 struct mprsas_target *targ = NULL; 495 496 MPR_FUNCTRACE(sassc->sc); 497 498 sc = sassc->sc; 499 500 targ = mprsas_find_target_by_handle(sassc, 0, handle); 501 if (targ == NULL) { 502 /* FIXME: what is the action? */ 503 /* We don't know about this device? */ 504 mpr_dprint(sc, MPR_ERROR, "%s : invalid handle 0x%x \n", 505 __func__, handle); 506 return; 507 } 508 509 targ->flags |= MPRSAS_TARGET_INREMOVAL; 510 511 cm = mprsas_alloc_tm(sc); 512 if (cm == NULL) { 513 mpr_dprint(sc, MPR_ERROR, "%s: command alloc failure\n", 514 __func__); 515 return; 516 } 517 518 mprsas_rescan_target(sc, targ); 519 520 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req; 521 memset(req, 0, sizeof(*req)); 522 req->DevHandle = htole16(targ->handle); 523 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; 524 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET; 525 526 /* SAS Hard Link Reset / SATA Link Reset */ 527 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET; 528 529 cm->cm_targ = targ; 530 cm->cm_data = NULL; 531 cm->cm_desc.HighPriority.RequestFlags = 532 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY; 533 cm->cm_complete = mprsas_remove_device; 534 cm->cm_complete_data = (void *)(uintptr_t)handle; 535 mpr_map_command(sc, cm); 536 } 537 538 static void 539 mprsas_remove_device(struct mpr_softc *sc, struct mpr_command *tm) 540 { 541 MPI2_SCSI_TASK_MANAGE_REPLY *reply; 542 MPI2_SAS_IOUNIT_CONTROL_REQUEST *req; 543 struct mprsas_target *targ; 544 struct mpr_command *next_cm; 545 uint16_t handle; 546 547 MPR_FUNCTRACE(sc); 548 549 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply; 550 handle = (uint16_t)(uintptr_t)tm->cm_complete_data; 551 targ = tm->cm_targ; 552 553 /* 554 * Currently there should be no way we can hit this case. It only 555 * happens when we have a failure to allocate chain frames, and 556 * task management commands don't have S/G lists. 557 */ 558 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) { 559 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for remove of " 560 "handle %#04x! This should not happen!\n", __func__, 561 tm->cm_flags, handle); 562 mprsas_free_tm(sc, tm); 563 return; 564 } 565 566 if (reply == NULL) { 567 /* XXX retry the remove after the diag reset completes? */ 568 mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device " 569 "0x%04x\n", __func__, handle); 570 mprsas_free_tm(sc, tm); 571 return; 572 } 573 574 if (le16toh(reply->IOCStatus) != MPI2_IOCSTATUS_SUCCESS) { 575 mpr_dprint(sc, MPR_FAULT, "IOCStatus = 0x%x while resetting " 576 "device 0x%x\n", le16toh(reply->IOCStatus), handle); 577 mprsas_free_tm(sc, tm); 578 return; 579 } 580 581 mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n", 582 le32toh(reply->TerminationCount)); 583 mpr_free_reply(sc, tm->cm_reply_data); 584 tm->cm_reply = NULL; /* Ensures the reply won't get re-freed */ 585 586 /* Reuse the existing command */ 587 req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req; 588 memset(req, 0, sizeof(*req)); 589 req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL; 590 req->Operation = MPI2_SAS_OP_REMOVE_DEVICE; 591 req->DevHandle = htole16(handle); 592 tm->cm_data = NULL; 593 tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; 594 tm->cm_complete = mprsas_remove_complete; 595 tm->cm_complete_data = (void *)(uintptr_t)handle; 596 597 mpr_map_command(sc, tm); 598 599 mpr_dprint(sc, MPR_XINFO, "clearing target %u handle 0x%04x\n", 600 targ->tid, handle); 601 if (targ->encl_level_valid) { 602 mpr_dprint(sc, MPR_XINFO, "At enclosure level %d, slot %d, " 603 "connector name (%4s)\n", targ->encl_level, targ->encl_slot, 604 targ->connector_name); 605 } 606 TAILQ_FOREACH_SAFE(tm, &targ->commands, cm_link, next_cm) { 607 union ccb *ccb; 608 609 mpr_dprint(sc, MPR_XINFO, "Completing missed command %p\n", tm); 610 ccb = tm->cm_complete_data; 611 ccb->ccb_h.status = CAM_DEV_NOT_THERE; 612 mprsas_scsiio_complete(sc, tm); 613 } 614 } 615 616 static void 617 mprsas_remove_complete(struct mpr_softc *sc, struct mpr_command *tm) 618 { 619 MPI2_SAS_IOUNIT_CONTROL_REPLY *reply; 620 uint16_t handle; 621 struct mprsas_target *targ; 622 struct mprsas_lun *lun; 623 624 MPR_FUNCTRACE(sc); 625 626 reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply; 627 handle = (uint16_t)(uintptr_t)tm->cm_complete_data; 628 629 /* 630 * Currently there should be no way we can hit this case. It only 631 * happens when we have a failure to allocate chain frames, and 632 * task management commands don't have S/G lists. 633 */ 634 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) { 635 mpr_dprint(sc, MPR_XINFO, "%s: cm_flags = %#x for remove of " 636 "handle %#04x! This should not happen!\n", __func__, 637 tm->cm_flags, handle); 638 mprsas_free_tm(sc, tm); 639 return; 640 } 641 642 if (reply == NULL) { 643 /* most likely a chip reset */ 644 mpr_dprint(sc, MPR_FAULT, "%s NULL reply removing device " 645 "0x%04x\n", __func__, handle); 646 mprsas_free_tm(sc, tm); 647 return; 648 } 649 650 mpr_dprint(sc, MPR_XINFO, "%s on handle 0x%04x, IOCStatus= 0x%x\n", 651 __func__, handle, le16toh(reply->IOCStatus)); 652 653 /* 654 * Don't clear target if remove fails because things will get confusing. 655 * Leave the devname and sasaddr intact so that we know to avoid reusing 656 * this target id if possible, and so we can assign the same target id 657 * to this device if it comes back in the future. 658 */ 659 if (le16toh(reply->IOCStatus) == MPI2_IOCSTATUS_SUCCESS) { 660 targ = tm->cm_targ; 661 targ->handle = 0x0; 662 targ->encl_handle = 0x0; 663 targ->encl_level_valid = 0x0; 664 targ->encl_level = 0x0; 665 targ->connector_name[0] = ' '; 666 targ->connector_name[1] = ' '; 667 targ->connector_name[2] = ' '; 668 targ->connector_name[3] = ' '; 669 targ->encl_slot = 0x0; 670 targ->exp_dev_handle = 0x0; 671 targ->phy_num = 0x0; 672 targ->linkrate = 0x0; 673 targ->devinfo = 0x0; 674 targ->flags = 0x0; 675 targ->scsi_req_desc_type = 0; 676 677 while (!SLIST_EMPTY(&targ->luns)) { 678 lun = SLIST_FIRST(&targ->luns); 679 SLIST_REMOVE_HEAD(&targ->luns, lun_link); 680 free(lun, M_MPR); 681 } 682 } 683 684 mprsas_free_tm(sc, tm); 685 } 686 687 static int 688 mprsas_register_events(struct mpr_softc *sc) 689 { 690 uint8_t events[16]; 691 692 bzero(events, 16); 693 setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE); 694 setbit(events, MPI2_EVENT_SAS_DISCOVERY); 695 setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE); 696 setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE); 697 setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW); 698 setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST); 699 setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE); 700 setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST); 701 setbit(events, MPI2_EVENT_IR_VOLUME); 702 setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK); 703 setbit(events, MPI2_EVENT_IR_OPERATION_STATUS); 704 setbit(events, MPI2_EVENT_TEMP_THRESHOLD); 705 706 mpr_register_events(sc, events, mprsas_evt_handler, NULL, 707 &sc->sassc->mprsas_eh); 708 709 return (0); 710 } 711 712 int 713 mpr_attach_sas(struct mpr_softc *sc) 714 { 715 struct mprsas_softc *sassc; 716 cam_status status; 717 int unit, error = 0; 718 719 MPR_FUNCTRACE(sc); 720 721 sassc = malloc(sizeof(struct mprsas_softc), M_MPR, M_WAITOK|M_ZERO); 722 if (!sassc) { 723 device_printf(sc->mpr_dev, "Cannot allocate memory %s %d\n", 724 __func__, __LINE__); 725 return (ENOMEM); 726 } 727 728 /* 729 * XXX MaxTargets could change during a reinit. since we don't 730 * resize the targets[] array during such an event, cache the value 731 * of MaxTargets here so that we don't get into trouble later. This 732 * should move into the reinit logic. 733 */ 734 sassc->maxtargets = sc->facts->MaxTargets; 735 sassc->targets = malloc(sizeof(struct mprsas_target) * 736 sassc->maxtargets, M_MPR, M_WAITOK|M_ZERO); 737 if (!sassc->targets) { 738 device_printf(sc->mpr_dev, "Cannot allocate memory %s %d\n", 739 __func__, __LINE__); 740 free(sassc, M_MPR); 741 return (ENOMEM); 742 } 743 sc->sassc = sassc; 744 sassc->sc = sc; 745 746 if ((sassc->devq = cam_simq_alloc(sc->num_reqs)) == NULL) { 747 mpr_dprint(sc, MPR_ERROR, "Cannot allocate SIMQ\n"); 748 error = ENOMEM; 749 goto out; 750 } 751 752 unit = device_get_unit(sc->mpr_dev); 753 sassc->sim = cam_sim_alloc(mprsas_action, mprsas_poll, "mpr", sassc, 754 unit, &sc->mpr_mtx, sc->num_reqs, sc->num_reqs, sassc->devq); 755 if (sassc->sim == NULL) { 756 mpr_dprint(sc, MPR_ERROR, "Cannot allocate SIM\n"); 757 error = EINVAL; 758 goto out; 759 } 760 761 TAILQ_INIT(&sassc->ev_queue); 762 763 /* Initialize taskqueue for Event Handling */ 764 TASK_INIT(&sassc->ev_task, 0, mprsas_firmware_event_work, sc); 765 sassc->ev_tq = taskqueue_create("mpr_taskq", M_NOWAIT | M_ZERO, 766 taskqueue_thread_enqueue, &sassc->ev_tq); 767 taskqueue_start_threads(&sassc->ev_tq, 1, PRIBIO, "%s taskq", 768 device_get_nameunit(sc->mpr_dev)); 769 770 mpr_lock(sc); 771 772 /* 773 * XXX There should be a bus for every port on the adapter, but since 774 * we're just going to fake the topology for now, we'll pretend that 775 * everything is just a target on a single bus. 776 */ 777 if ((error = xpt_bus_register(sassc->sim, sc->mpr_dev, 0)) != 0) { 778 mpr_dprint(sc, MPR_ERROR, "Error %d registering SCSI bus\n", 779 error); 780 mpr_unlock(sc); 781 goto out; 782 } 783 784 /* 785 * Assume that discovery events will start right away. Freezing 786 * 787 * Hold off boot until discovery is complete. 788 */ 789 sassc->flags |= MPRSAS_IN_STARTUP | MPRSAS_IN_DISCOVERY; 790 sc->sassc->startup_refcount = 0; 791 mprsas_startup_increment(sassc); 792 793 callout_init(&sassc->discovery_callout, 1 /*mprafe*/); 794 795 sassc->tm_count = 0; 796 797 /* 798 * Register for async events so we can determine the EEDP 799 * capabilities of devices. 800 */ 801 status = xpt_create_path(&sassc->path, /*periph*/NULL, 802 cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD, 803 CAM_LUN_WILDCARD); 804 if (status != CAM_REQ_CMP) { 805 mpr_printf(sc, "Error %#x creating sim path\n", status); 806 sassc->path = NULL; 807 } else { 808 int event; 809 810 #if (__FreeBSD_version >= 1000006) || \ 811 ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000)) 812 event = AC_ADVINFO_CHANGED | AC_FOUND_DEVICE; 813 #else 814 event = AC_FOUND_DEVICE; 815 #endif 816 817 /* 818 * Prior to the CAM locking improvements, we can't call 819 * xpt_register_async() with a particular path specified. 820 * 821 * If a path isn't specified, xpt_register_async() will 822 * generate a wildcard path and acquire the XPT lock while 823 * it calls xpt_action() to execute the XPT_SASYNC_CB CCB. 824 * It will then drop the XPT lock once that is done. 825 * 826 * If a path is specified for xpt_register_async(), it will 827 * not acquire and drop the XPT lock around the call to 828 * xpt_action(). xpt_action() asserts that the caller 829 * holds the SIM lock, so the SIM lock has to be held when 830 * calling xpt_register_async() when the path is specified. 831 * 832 * But xpt_register_async calls xpt_for_all_devices(), 833 * which calls xptbustraverse(), which will acquire each 834 * SIM lock. When it traverses our particular bus, it will 835 * necessarily acquire the SIM lock, which will lead to a 836 * recursive lock acquisition. 837 * 838 * The CAM locking changes fix this problem by acquiring 839 * the XPT topology lock around bus traversal in 840 * xptbustraverse(), so the caller can hold the SIM lock 841 * and it does not cause a recursive lock acquisition. 842 * 843 * These __FreeBSD_version values are approximate, especially 844 * for stable/10, which is two months later than the actual 845 * change. 846 */ 847 848 #if (__FreeBSD_version < 1000703) || \ 849 ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002)) 850 mpr_unlock(sc); 851 status = xpt_register_async(event, mprsas_async, sc, 852 NULL); 853 mpr_lock(sc); 854 #else 855 status = xpt_register_async(event, mprsas_async, sc, 856 sassc->path); 857 #endif 858 859 if (status != CAM_REQ_CMP) { 860 mpr_dprint(sc, MPR_ERROR, 861 "Error %#x registering async handler for " 862 "AC_ADVINFO_CHANGED events\n", status); 863 xpt_free_path(sassc->path); 864 sassc->path = NULL; 865 } 866 } 867 if (status != CAM_REQ_CMP) { 868 /* 869 * EEDP use is the exception, not the rule. 870 * Warn the user, but do not fail to attach. 871 */ 872 mpr_printf(sc, "EEDP capabilities disabled.\n"); 873 } 874 875 mpr_unlock(sc); 876 877 mprsas_register_events(sc); 878 out: 879 if (error) 880 mpr_detach_sas(sc); 881 return (error); 882 } 883 884 int 885 mpr_detach_sas(struct mpr_softc *sc) 886 { 887 struct mprsas_softc *sassc; 888 struct mprsas_lun *lun, *lun_tmp; 889 struct mprsas_target *targ; 890 int i; 891 892 MPR_FUNCTRACE(sc); 893 894 if (sc->sassc == NULL) 895 return (0); 896 897 sassc = sc->sassc; 898 mpr_deregister_events(sc, sassc->mprsas_eh); 899 900 /* 901 * Drain and free the event handling taskqueue with the lock 902 * unheld so that any parallel processing tasks drain properly 903 * without deadlocking. 904 */ 905 if (sassc->ev_tq != NULL) 906 taskqueue_free(sassc->ev_tq); 907 908 /* Make sure CAM doesn't wedge if we had to bail out early. */ 909 mpr_lock(sc); 910 911 /* Deregister our async handler */ 912 if (sassc->path != NULL) { 913 xpt_register_async(0, mprsas_async, sc, sassc->path); 914 xpt_free_path(sassc->path); 915 sassc->path = NULL; 916 } 917 918 if (sassc->flags & MPRSAS_IN_STARTUP) 919 xpt_release_simq(sassc->sim, 1); 920 921 if (sassc->sim != NULL) { 922 xpt_bus_deregister(cam_sim_path(sassc->sim)); 923 cam_sim_free(sassc->sim, FALSE); 924 } 925 926 sassc->flags |= MPRSAS_SHUTDOWN; 927 mpr_unlock(sc); 928 929 if (sassc->devq != NULL) 930 cam_simq_free(sassc->devq); 931 932 for (i = 0; i < sassc->maxtargets; i++) { 933 targ = &sassc->targets[i]; 934 SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) { 935 free(lun, M_MPR); 936 } 937 } 938 free(sassc->targets, M_MPR); 939 free(sassc, M_MPR); 940 sc->sassc = NULL; 941 942 return (0); 943 } 944 945 void 946 mprsas_discovery_end(struct mprsas_softc *sassc) 947 { 948 struct mpr_softc *sc = sassc->sc; 949 950 MPR_FUNCTRACE(sc); 951 952 if (sassc->flags & MPRSAS_DISCOVERY_TIMEOUT_PENDING) 953 callout_stop(&sassc->discovery_callout); 954 955 } 956 957 static void 958 mprsas_action(struct cam_sim *sim, union ccb *ccb) 959 { 960 struct mprsas_softc *sassc; 961 962 sassc = cam_sim_softc(sim); 963 964 MPR_FUNCTRACE(sassc->sc); 965 mpr_dprint(sassc->sc, MPR_TRACE, "%s func 0x%x\n", __func__, 966 ccb->ccb_h.func_code); 967 mtx_assert(&sassc->sc->mpr_mtx, MA_OWNED); 968 969 switch (ccb->ccb_h.func_code) { 970 case XPT_PATH_INQ: 971 { 972 struct ccb_pathinq *cpi = &ccb->cpi; 973 974 cpi->version_num = 1; 975 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16; 976 cpi->target_sprt = 0; 977 #if (__FreeBSD_version >= 1000039) || \ 978 ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502)) 979 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN; 980 #else 981 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED; 982 #endif 983 cpi->hba_eng_cnt = 0; 984 cpi->max_target = sassc->maxtargets - 1; 985 cpi->max_lun = 255; 986 cpi->initiator_id = sassc->maxtargets - 1; 987 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 988 strncpy(cpi->hba_vid, "LSILogic", HBA_IDLEN); 989 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 990 cpi->unit_number = cam_sim_unit(sim); 991 cpi->bus_id = cam_sim_bus(sim); 992 /* 993 * XXXSLM-I think this needs to change based on config page or 994 * something instead of hardcoded to 150000. 995 */ 996 cpi->base_transfer_speed = 150000; 997 cpi->transport = XPORT_SAS; 998 cpi->transport_version = 0; 999 cpi->protocol = PROTO_SCSI; 1000 cpi->protocol_version = SCSI_REV_SPC; 1001 #if __FreeBSD_version >= 800001 1002 /* 1003 * XXXSLM-probably need to base this number on max SGL's and 1004 * page size. 1005 */ 1006 cpi->maxio = 256 * 1024; 1007 #endif 1008 cpi->ccb_h.status = CAM_REQ_CMP; 1009 break; 1010 } 1011 case XPT_GET_TRAN_SETTINGS: 1012 { 1013 struct ccb_trans_settings *cts; 1014 struct ccb_trans_settings_sas *sas; 1015 struct ccb_trans_settings_scsi *scsi; 1016 struct mprsas_target *targ; 1017 1018 cts = &ccb->cts; 1019 sas = &cts->xport_specific.sas; 1020 scsi = &cts->proto_specific.scsi; 1021 1022 KASSERT(cts->ccb_h.target_id < sassc->maxtargets, 1023 ("Target %d out of bounds in XPT_GET_TRAN_SETTINGS\n", 1024 cts->ccb_h.target_id)); 1025 targ = &sassc->targets[cts->ccb_h.target_id]; 1026 if (targ->handle == 0x0) { 1027 cts->ccb_h.status = CAM_DEV_NOT_THERE; 1028 break; 1029 } 1030 1031 cts->protocol_version = SCSI_REV_SPC2; 1032 cts->transport = XPORT_SAS; 1033 cts->transport_version = 0; 1034 1035 sas->valid = CTS_SAS_VALID_SPEED; 1036 switch (targ->linkrate) { 1037 case 0x08: 1038 sas->bitrate = 150000; 1039 break; 1040 case 0x09: 1041 sas->bitrate = 300000; 1042 break; 1043 case 0x0a: 1044 sas->bitrate = 600000; 1045 break; 1046 default: 1047 sas->valid = 0; 1048 } 1049 1050 cts->protocol = PROTO_SCSI; 1051 scsi->valid = CTS_SCSI_VALID_TQ; 1052 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; 1053 1054 cts->ccb_h.status = CAM_REQ_CMP; 1055 break; 1056 } 1057 case XPT_CALC_GEOMETRY: 1058 cam_calc_geometry(&ccb->ccg, /*extended*/1); 1059 ccb->ccb_h.status = CAM_REQ_CMP; 1060 break; 1061 case XPT_RESET_DEV: 1062 mpr_dprint(sassc->sc, MPR_XINFO, 1063 "mprsas_action XPT_RESET_DEV\n"); 1064 mprsas_action_resetdev(sassc, ccb); 1065 return; 1066 case XPT_RESET_BUS: 1067 case XPT_ABORT: 1068 case XPT_TERM_IO: 1069 mpr_dprint(sassc->sc, MPR_XINFO, 1070 "mprsas_action faking success for abort or reset\n"); 1071 ccb->ccb_h.status = CAM_REQ_CMP; 1072 break; 1073 case XPT_SCSI_IO: 1074 mprsas_action_scsiio(sassc, ccb); 1075 return; 1076 #if __FreeBSD_version >= 900026 1077 case XPT_SMP_IO: 1078 mprsas_action_smpio(sassc, ccb); 1079 return; 1080 #endif 1081 default: 1082 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 1083 break; 1084 } 1085 xpt_done(ccb); 1086 1087 } 1088 1089 static void 1090 mprsas_announce_reset(struct mpr_softc *sc, uint32_t ac_code, 1091 target_id_t target_id, lun_id_t lun_id) 1092 { 1093 path_id_t path_id = cam_sim_path(sc->sassc->sim); 1094 struct cam_path *path; 1095 1096 mpr_dprint(sc, MPR_XINFO, "%s code %x target %d lun %jx\n", __func__, 1097 ac_code, target_id, (uintmax_t)lun_id); 1098 1099 if (xpt_create_path(&path, NULL, 1100 path_id, target_id, lun_id) != CAM_REQ_CMP) { 1101 mpr_dprint(sc, MPR_ERROR, "unable to create path for reset " 1102 "notification\n"); 1103 return; 1104 } 1105 1106 xpt_async(ac_code, path, NULL); 1107 xpt_free_path(path); 1108 } 1109 1110 static void 1111 mprsas_complete_all_commands(struct mpr_softc *sc) 1112 { 1113 struct mpr_command *cm; 1114 int i; 1115 int completed; 1116 1117 MPR_FUNCTRACE(sc); 1118 mtx_assert(&sc->mpr_mtx, MA_OWNED); 1119 1120 /* complete all commands with a NULL reply */ 1121 for (i = 1; i < sc->num_reqs; i++) { 1122 cm = &sc->commands[i]; 1123 cm->cm_reply = NULL; 1124 completed = 0; 1125 1126 if (cm->cm_flags & MPR_CM_FLAGS_POLLED) 1127 cm->cm_flags |= MPR_CM_FLAGS_COMPLETE; 1128 1129 if (cm->cm_complete != NULL) { 1130 mprsas_log_command(cm, MPR_RECOVERY, 1131 "completing cm %p state %x ccb %p for diag reset\n", 1132 cm, cm->cm_state, cm->cm_ccb); 1133 cm->cm_complete(sc, cm); 1134 completed = 1; 1135 } 1136 1137 if (cm->cm_flags & MPR_CM_FLAGS_WAKEUP) { 1138 mprsas_log_command(cm, MPR_RECOVERY, 1139 "waking up cm %p state %x ccb %p for diag reset\n", 1140 cm, cm->cm_state, cm->cm_ccb); 1141 wakeup(cm); 1142 completed = 1; 1143 } 1144 1145 if ((completed == 0) && (cm->cm_state != MPR_CM_STATE_FREE)) { 1146 /* this should never happen, but if it does, log */ 1147 mprsas_log_command(cm, MPR_RECOVERY, 1148 "cm %p state %x flags 0x%x ccb %p during diag " 1149 "reset\n", cm, cm->cm_state, cm->cm_flags, 1150 cm->cm_ccb); 1151 } 1152 } 1153 } 1154 1155 void 1156 mprsas_handle_reinit(struct mpr_softc *sc) 1157 { 1158 int i; 1159 1160 /* Go back into startup mode and freeze the simq, so that CAM 1161 * doesn't send any commands until after we've rediscovered all 1162 * targets and found the proper device handles for them. 1163 * 1164 * After the reset, portenable will trigger discovery, and after all 1165 * discovery-related activities have finished, the simq will be 1166 * released. 1167 */ 1168 mpr_dprint(sc, MPR_INIT, "%s startup\n", __func__); 1169 sc->sassc->flags |= MPRSAS_IN_STARTUP; 1170 sc->sassc->flags |= MPRSAS_IN_DISCOVERY; 1171 mprsas_startup_increment(sc->sassc); 1172 1173 /* notify CAM of a bus reset */ 1174 mprsas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD, 1175 CAM_LUN_WILDCARD); 1176 1177 /* complete and cleanup after all outstanding commands */ 1178 mprsas_complete_all_commands(sc); 1179 1180 mpr_dprint(sc, MPR_INIT, "%s startup %u tm %u after command " 1181 "completion\n", __func__, sc->sassc->startup_refcount, 1182 sc->sassc->tm_count); 1183 1184 /* zero all the target handles, since they may change after the 1185 * reset, and we have to rediscover all the targets and use the new 1186 * handles. 1187 */ 1188 for (i = 0; i < sc->sassc->maxtargets; i++) { 1189 if (sc->sassc->targets[i].outstanding != 0) 1190 mpr_dprint(sc, MPR_INIT, "target %u outstanding %u\n", 1191 i, sc->sassc->targets[i].outstanding); 1192 sc->sassc->targets[i].handle = 0x0; 1193 sc->sassc->targets[i].exp_dev_handle = 0x0; 1194 sc->sassc->targets[i].outstanding = 0; 1195 sc->sassc->targets[i].flags = MPRSAS_TARGET_INDIAGRESET; 1196 } 1197 } 1198 static void 1199 mprsas_tm_timeout(void *data) 1200 { 1201 struct mpr_command *tm = data; 1202 struct mpr_softc *sc = tm->cm_sc; 1203 1204 mtx_assert(&sc->mpr_mtx, MA_OWNED); 1205 1206 mprsas_log_command(tm, MPR_INFO|MPR_RECOVERY, 1207 "task mgmt %p timed out\n", tm); 1208 mpr_reinit(sc); 1209 } 1210 1211 static void 1212 mprsas_logical_unit_reset_complete(struct mpr_softc *sc, 1213 struct mpr_command *tm) 1214 { 1215 MPI2_SCSI_TASK_MANAGE_REPLY *reply; 1216 MPI2_SCSI_TASK_MANAGE_REQUEST *req; 1217 unsigned int cm_count = 0; 1218 struct mpr_command *cm; 1219 struct mprsas_target *targ; 1220 1221 callout_stop(&tm->cm_callout); 1222 1223 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req; 1224 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply; 1225 targ = tm->cm_targ; 1226 1227 /* 1228 * Currently there should be no way we can hit this case. It only 1229 * happens when we have a failure to allocate chain frames, and 1230 * task management commands don't have S/G lists. 1231 */ 1232 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) { 1233 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for LUN reset! " 1234 "This should not happen!\n", __func__, tm->cm_flags); 1235 mprsas_free_tm(sc, tm); 1236 return; 1237 } 1238 1239 if (reply == NULL) { 1240 mprsas_log_command(tm, MPR_RECOVERY, 1241 "NULL reset reply for tm %p\n", tm); 1242 if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) { 1243 /* this completion was due to a reset, just cleanup */ 1244 targ->flags &= ~MPRSAS_TARGET_INRESET; 1245 targ->tm = NULL; 1246 mprsas_free_tm(sc, tm); 1247 } 1248 else { 1249 /* we should have gotten a reply. */ 1250 mpr_reinit(sc); 1251 } 1252 return; 1253 } 1254 1255 mprsas_log_command(tm, MPR_RECOVERY, 1256 "logical unit reset status 0x%x code 0x%x count %u\n", 1257 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode), 1258 le32toh(reply->TerminationCount)); 1259 1260 /* See if there are any outstanding commands for this LUN. 1261 * This could be made more efficient by using a per-LU data 1262 * structure of some sort. 1263 */ 1264 TAILQ_FOREACH(cm, &targ->commands, cm_link) { 1265 if (cm->cm_lun == tm->cm_lun) 1266 cm_count++; 1267 } 1268 1269 if (cm_count == 0) { 1270 mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO, 1271 "logical unit %u finished recovery after reset\n", 1272 tm->cm_lun, tm); 1273 1274 mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid, 1275 tm->cm_lun); 1276 1277 /* we've finished recovery for this logical unit. check and 1278 * see if some other logical unit has a timedout command 1279 * that needs to be processed. 1280 */ 1281 cm = TAILQ_FIRST(&targ->timedout_commands); 1282 if (cm) { 1283 mprsas_send_abort(sc, tm, cm); 1284 } 1285 else { 1286 targ->tm = NULL; 1287 mprsas_free_tm(sc, tm); 1288 } 1289 } 1290 else { 1291 /* if we still have commands for this LUN, the reset 1292 * effectively failed, regardless of the status reported. 1293 * Escalate to a target reset. 1294 */ 1295 mprsas_log_command(tm, MPR_RECOVERY, 1296 "logical unit reset complete for tm %p, but still have %u " 1297 "command(s)\n", tm, cm_count); 1298 mprsas_send_reset(sc, tm, 1299 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET); 1300 } 1301 } 1302 1303 static void 1304 mprsas_target_reset_complete(struct mpr_softc *sc, struct mpr_command *tm) 1305 { 1306 MPI2_SCSI_TASK_MANAGE_REPLY *reply; 1307 MPI2_SCSI_TASK_MANAGE_REQUEST *req; 1308 struct mprsas_target *targ; 1309 1310 callout_stop(&tm->cm_callout); 1311 1312 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req; 1313 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply; 1314 targ = tm->cm_targ; 1315 1316 /* 1317 * Currently there should be no way we can hit this case. It only 1318 * happens when we have a failure to allocate chain frames, and 1319 * task management commands don't have S/G lists. 1320 */ 1321 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) { 1322 mpr_dprint(sc, MPR_ERROR,"%s: cm_flags = %#x for target reset! " 1323 "This should not happen!\n", __func__, tm->cm_flags); 1324 mprsas_free_tm(sc, tm); 1325 return; 1326 } 1327 1328 if (reply == NULL) { 1329 mprsas_log_command(tm, MPR_RECOVERY, 1330 "NULL reset reply for tm %p\n", tm); 1331 if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) { 1332 /* this completion was due to a reset, just cleanup */ 1333 targ->flags &= ~MPRSAS_TARGET_INRESET; 1334 targ->tm = NULL; 1335 mprsas_free_tm(sc, tm); 1336 } 1337 else { 1338 /* we should have gotten a reply. */ 1339 mpr_reinit(sc); 1340 } 1341 return; 1342 } 1343 1344 mprsas_log_command(tm, MPR_RECOVERY, 1345 "target reset status 0x%x code 0x%x count %u\n", 1346 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode), 1347 le32toh(reply->TerminationCount)); 1348 1349 targ->flags &= ~MPRSAS_TARGET_INRESET; 1350 1351 if (targ->outstanding == 0) { 1352 /* we've finished recovery for this target and all 1353 * of its logical units. 1354 */ 1355 mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO, 1356 "recovery finished after target reset\n"); 1357 1358 mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid, 1359 CAM_LUN_WILDCARD); 1360 1361 targ->tm = NULL; 1362 mprsas_free_tm(sc, tm); 1363 } 1364 else { 1365 /* after a target reset, if this target still has 1366 * outstanding commands, the reset effectively failed, 1367 * regardless of the status reported. escalate. 1368 */ 1369 mprsas_log_command(tm, MPR_RECOVERY, 1370 "target reset complete for tm %p, but still have %u " 1371 "command(s)\n", tm, targ->outstanding); 1372 mpr_reinit(sc); 1373 } 1374 } 1375 1376 #define MPR_RESET_TIMEOUT 30 1377 1378 static int 1379 mprsas_send_reset(struct mpr_softc *sc, struct mpr_command *tm, uint8_t type) 1380 { 1381 MPI2_SCSI_TASK_MANAGE_REQUEST *req; 1382 struct mprsas_target *target; 1383 int err; 1384 1385 target = tm->cm_targ; 1386 if (target->handle == 0) { 1387 mpr_dprint(sc, MPR_ERROR,"%s null devhandle for target_id %d\n", 1388 __func__, target->tid); 1389 return -1; 1390 } 1391 1392 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req; 1393 req->DevHandle = htole16(target->handle); 1394 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; 1395 req->TaskType = type; 1396 1397 if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) { 1398 /* XXX Need to handle invalid LUNs */ 1399 MPR_SET_LUN(req->LUN, tm->cm_lun); 1400 tm->cm_targ->logical_unit_resets++; 1401 mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO, 1402 "sending logical unit reset\n"); 1403 tm->cm_complete = mprsas_logical_unit_reset_complete; 1404 } 1405 else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) { 1406 /* 1407 * Target reset method = 1408 * SAS Hard Link Reset / SATA Link Reset 1409 */ 1410 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET; 1411 tm->cm_targ->target_resets++; 1412 tm->cm_targ->flags |= MPRSAS_TARGET_INRESET; 1413 mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO, 1414 "sending target reset\n"); 1415 tm->cm_complete = mprsas_target_reset_complete; 1416 } 1417 else { 1418 mpr_dprint(sc, MPR_ERROR, "unexpected reset type 0x%x\n", type); 1419 return -1; 1420 } 1421 1422 mpr_dprint(sc, MPR_XINFO, "to target %u handle 0x%04x\n", target->tid, 1423 target->handle); 1424 if (target->encl_level_valid) { 1425 mpr_dprint(sc, MPR_XINFO, "At enclosure level %d, slot %d, " 1426 "connector name (%4s)\n", target->encl_level, 1427 target->encl_slot, target->connector_name); 1428 } 1429 1430 tm->cm_data = NULL; 1431 tm->cm_desc.HighPriority.RequestFlags = 1432 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY; 1433 tm->cm_complete_data = (void *)tm; 1434 1435 callout_reset(&tm->cm_callout, MPR_RESET_TIMEOUT * hz, 1436 mprsas_tm_timeout, tm); 1437 1438 err = mpr_map_command(sc, tm); 1439 if (err) 1440 mprsas_log_command(tm, MPR_RECOVERY, 1441 "error %d sending reset type %u\n", 1442 err, type); 1443 1444 return err; 1445 } 1446 1447 1448 static void 1449 mprsas_abort_complete(struct mpr_softc *sc, struct mpr_command *tm) 1450 { 1451 struct mpr_command *cm; 1452 MPI2_SCSI_TASK_MANAGE_REPLY *reply; 1453 MPI2_SCSI_TASK_MANAGE_REQUEST *req; 1454 struct mprsas_target *targ; 1455 1456 callout_stop(&tm->cm_callout); 1457 1458 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req; 1459 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply; 1460 targ = tm->cm_targ; 1461 1462 /* 1463 * Currently there should be no way we can hit this case. It only 1464 * happens when we have a failure to allocate chain frames, and 1465 * task management commands don't have S/G lists. 1466 */ 1467 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) { 1468 mprsas_log_command(tm, MPR_RECOVERY, 1469 "cm_flags = %#x for abort %p TaskMID %u!\n", 1470 tm->cm_flags, tm, le16toh(req->TaskMID)); 1471 mprsas_free_tm(sc, tm); 1472 return; 1473 } 1474 1475 if (reply == NULL) { 1476 mprsas_log_command(tm, MPR_RECOVERY, 1477 "NULL abort reply for tm %p TaskMID %u\n", 1478 tm, le16toh(req->TaskMID)); 1479 if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) { 1480 /* this completion was due to a reset, just cleanup */ 1481 targ->tm = NULL; 1482 mprsas_free_tm(sc, tm); 1483 } 1484 else { 1485 /* we should have gotten a reply. */ 1486 mpr_reinit(sc); 1487 } 1488 return; 1489 } 1490 1491 mprsas_log_command(tm, MPR_RECOVERY, 1492 "abort TaskMID %u status 0x%x code 0x%x count %u\n", 1493 le16toh(req->TaskMID), 1494 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode), 1495 le32toh(reply->TerminationCount)); 1496 1497 cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands); 1498 if (cm == NULL) { 1499 /* if there are no more timedout commands, we're done with 1500 * error recovery for this target. 1501 */ 1502 mprsas_log_command(tm, MPR_RECOVERY, 1503 "finished recovery after aborting TaskMID %u\n", 1504 le16toh(req->TaskMID)); 1505 1506 targ->tm = NULL; 1507 mprsas_free_tm(sc, tm); 1508 } 1509 else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) { 1510 /* abort success, but we have more timedout commands to abort */ 1511 mprsas_log_command(tm, MPR_RECOVERY, 1512 "continuing recovery after aborting TaskMID %u\n", 1513 le16toh(req->TaskMID)); 1514 1515 mprsas_send_abort(sc, tm, cm); 1516 } 1517 else { 1518 /* we didn't get a command completion, so the abort 1519 * failed as far as we're concerned. escalate. 1520 */ 1521 mprsas_log_command(tm, MPR_RECOVERY, 1522 "abort failed for TaskMID %u tm %p\n", 1523 le16toh(req->TaskMID), tm); 1524 1525 mprsas_send_reset(sc, tm, 1526 MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET); 1527 } 1528 } 1529 1530 #define MPR_ABORT_TIMEOUT 5 1531 1532 static int 1533 mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm, 1534 struct mpr_command *cm) 1535 { 1536 MPI2_SCSI_TASK_MANAGE_REQUEST *req; 1537 struct mprsas_target *targ; 1538 int err; 1539 1540 targ = cm->cm_targ; 1541 if (targ->handle == 0) { 1542 mpr_dprint(sc, MPR_ERROR,"%s null devhandle for target_id %d\n", 1543 __func__, cm->cm_ccb->ccb_h.target_id); 1544 return -1; 1545 } 1546 1547 mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO, 1548 "Aborting command %p\n", cm); 1549 1550 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req; 1551 req->DevHandle = htole16(targ->handle); 1552 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; 1553 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK; 1554 1555 /* XXX Need to handle invalid LUNs */ 1556 MPR_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun); 1557 1558 req->TaskMID = htole16(cm->cm_desc.Default.SMID); 1559 1560 tm->cm_data = NULL; 1561 tm->cm_desc.HighPriority.RequestFlags = 1562 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY; 1563 tm->cm_complete = mprsas_abort_complete; 1564 tm->cm_complete_data = (void *)tm; 1565 tm->cm_targ = cm->cm_targ; 1566 tm->cm_lun = cm->cm_lun; 1567 1568 callout_reset(&tm->cm_callout, MPR_ABORT_TIMEOUT * hz, 1569 mprsas_tm_timeout, tm); 1570 1571 targ->aborts++; 1572 1573 err = mpr_map_command(sc, tm); 1574 if (err) 1575 mprsas_log_command(tm, MPR_RECOVERY, 1576 "error %d sending abort for cm %p SMID %u\n", 1577 err, cm, req->TaskMID); 1578 return err; 1579 } 1580 1581 1582 static void 1583 mprsas_scsiio_timeout(void *data) 1584 { 1585 struct mpr_softc *sc; 1586 struct mpr_command *cm; 1587 struct mprsas_target *targ; 1588 1589 cm = (struct mpr_command *)data; 1590 sc = cm->cm_sc; 1591 1592 MPR_FUNCTRACE(sc); 1593 mtx_assert(&sc->mpr_mtx, MA_OWNED); 1594 1595 mpr_dprint(sc, MPR_XINFO, "Timeout checking cm %p\n", cm); 1596 1597 /* 1598 * Run the interrupt handler to make sure it's not pending. This 1599 * isn't perfect because the command could have already completed 1600 * and been re-used, though this is unlikely. 1601 */ 1602 mpr_intr_locked(sc); 1603 if (cm->cm_state == MPR_CM_STATE_FREE) { 1604 mprsas_log_command(cm, MPR_XINFO, 1605 "SCSI command %p almost timed out\n", cm); 1606 return; 1607 } 1608 1609 if (cm->cm_ccb == NULL) { 1610 mpr_dprint(sc, MPR_ERROR, "command timeout with NULL ccb\n"); 1611 return; 1612 } 1613 1614 targ = cm->cm_targ; 1615 targ->timeouts++; 1616 1617 mprsas_log_command(cm, MPR_XINFO, "command timeout cm %p ccb %p " 1618 "target %u, handle(0x%04x)\n", cm, cm->cm_ccb, targ->tid, 1619 targ->handle); 1620 if (targ->encl_level_valid) { 1621 mpr_dprint(sc, MPR_XINFO, "At enclosure level %d, slot %d, " 1622 "connector name (%4s)\n", targ->encl_level, targ->encl_slot, 1623 targ->connector_name); 1624 } 1625 1626 /* XXX first, check the firmware state, to see if it's still 1627 * operational. if not, do a diag reset. 1628 */ 1629 1630 cm->cm_ccb->ccb_h.status = CAM_CMD_TIMEOUT; 1631 cm->cm_state = MPR_CM_STATE_TIMEDOUT; 1632 TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery); 1633 1634 if (targ->tm != NULL) { 1635 /* target already in recovery, just queue up another 1636 * timedout command to be processed later. 1637 */ 1638 mpr_dprint(sc, MPR_RECOVERY, "queued timedout cm %p for " 1639 "processing by tm %p\n", cm, targ->tm); 1640 } 1641 else if ((targ->tm = mprsas_alloc_tm(sc)) != NULL) { 1642 mpr_dprint(sc, MPR_RECOVERY, "timedout cm %p allocated tm %p\n", 1643 cm, targ->tm); 1644 1645 /* start recovery by aborting the first timedout command */ 1646 mprsas_send_abort(sc, targ->tm, cm); 1647 } 1648 else { 1649 /* XXX queue this target up for recovery once a TM becomes 1650 * available. The firmware only has a limited number of 1651 * HighPriority credits for the high priority requests used 1652 * for task management, and we ran out. 1653 * 1654 * Isilon: don't worry about this for now, since we have 1655 * more credits than disks in an enclosure, and limit 1656 * ourselves to one TM per target for recovery. 1657 */ 1658 mpr_dprint(sc, MPR_RECOVERY, 1659 "timedout cm %p failed to allocate a tm\n", cm); 1660 } 1661 } 1662 1663 static void 1664 mprsas_action_scsiio(struct mprsas_softc *sassc, union ccb *ccb) 1665 { 1666 MPI2_SCSI_IO_REQUEST *req; 1667 struct ccb_scsiio *csio; 1668 struct mpr_softc *sc; 1669 struct mprsas_target *targ; 1670 struct mprsas_lun *lun; 1671 struct mpr_command *cm; 1672 uint8_t i, lba_byte, *ref_tag_addr; 1673 uint16_t eedp_flags; 1674 uint32_t mpi_control; 1675 1676 sc = sassc->sc; 1677 MPR_FUNCTRACE(sc); 1678 mtx_assert(&sc->mpr_mtx, MA_OWNED); 1679 1680 csio = &ccb->csio; 1681 targ = &sassc->targets[csio->ccb_h.target_id]; 1682 mpr_dprint(sc, MPR_TRACE, "ccb %p target flag %x\n", ccb, targ->flags); 1683 if (targ->handle == 0x0) { 1684 mpr_dprint(sc, MPR_ERROR, "%s NULL handle for target %u\n", 1685 __func__, csio->ccb_h.target_id); 1686 csio->ccb_h.status = CAM_DEV_NOT_THERE; 1687 xpt_done(ccb); 1688 return; 1689 } 1690 if (targ->flags & MPR_TARGET_FLAGS_RAID_COMPONENT) { 1691 mpr_dprint(sc, MPR_TRACE, "%s Raid component no SCSI IO " 1692 "supported %u\n", __func__, csio->ccb_h.target_id); 1693 csio->ccb_h.status = CAM_DEV_NOT_THERE; 1694 xpt_done(ccb); 1695 return; 1696 } 1697 /* 1698 * Sometimes, it is possible to get a command that is not "In 1699 * Progress" and was actually aborted by the upper layer. Check for 1700 * this here and complete the command without error. 1701 */ 1702 if (ccb->ccb_h.status != CAM_REQ_INPROG) { 1703 mpr_dprint(sc, MPR_TRACE, "%s Command is not in progress for " 1704 "target %u\n", __func__, csio->ccb_h.target_id); 1705 xpt_done(ccb); 1706 return; 1707 } 1708 /* 1709 * If devinfo is 0 this will be a volume. In that case don't tell CAM 1710 * that the volume has timed out. We want volumes to be enumerated 1711 * until they are deleted/removed, not just failed. 1712 */ 1713 if (targ->flags & MPRSAS_TARGET_INREMOVAL) { 1714 if (targ->devinfo == 0) 1715 csio->ccb_h.status = CAM_REQ_CMP; 1716 else 1717 csio->ccb_h.status = CAM_SEL_TIMEOUT; 1718 xpt_done(ccb); 1719 return; 1720 } 1721 1722 if ((sc->mpr_flags & MPR_FLAGS_SHUTDOWN) != 0) { 1723 mpr_dprint(sc, MPR_TRACE, "%s shutting down\n", __func__); 1724 csio->ccb_h.status = CAM_DEV_NOT_THERE; 1725 xpt_done(ccb); 1726 return; 1727 } 1728 1729 cm = mpr_alloc_command(sc); 1730 if (cm == NULL || (sc->mpr_flags & MPR_FLAGS_DIAGRESET)) { 1731 if (cm != NULL) { 1732 mpr_free_command(sc, cm); 1733 } 1734 if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) { 1735 xpt_freeze_simq(sassc->sim, 1); 1736 sassc->flags |= MPRSAS_QUEUE_FROZEN; 1737 } 1738 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 1739 ccb->ccb_h.status |= CAM_REQUEUE_REQ; 1740 xpt_done(ccb); 1741 return; 1742 } 1743 1744 req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req; 1745 bzero(req, sizeof(*req)); 1746 req->DevHandle = htole16(targ->handle); 1747 req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; 1748 req->MsgFlags = 0; 1749 req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr); 1750 req->SenseBufferLength = MPR_SENSE_LEN; 1751 req->SGLFlags = 0; 1752 req->ChainOffset = 0; 1753 req->SGLOffset0 = 24; /* 32bit word offset to the SGL */ 1754 req->SGLOffset1= 0; 1755 req->SGLOffset2= 0; 1756 req->SGLOffset3= 0; 1757 req->SkipCount = 0; 1758 req->DataLength = htole32(csio->dxfer_len); 1759 req->BidirectionalDataLength = 0; 1760 req->IoFlags = htole16(csio->cdb_len); 1761 req->EEDPFlags = 0; 1762 1763 /* Note: BiDirectional transfers are not supported */ 1764 switch (csio->ccb_h.flags & CAM_DIR_MASK) { 1765 case CAM_DIR_IN: 1766 mpi_control = MPI2_SCSIIO_CONTROL_READ; 1767 cm->cm_flags |= MPR_CM_FLAGS_DATAIN; 1768 break; 1769 case CAM_DIR_OUT: 1770 mpi_control = MPI2_SCSIIO_CONTROL_WRITE; 1771 cm->cm_flags |= MPR_CM_FLAGS_DATAOUT; 1772 break; 1773 case CAM_DIR_NONE: 1774 default: 1775 mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER; 1776 break; 1777 } 1778 1779 if (csio->cdb_len == 32) 1780 mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT; 1781 /* 1782 * It looks like the hardware doesn't require an explicit tag 1783 * number for each transaction. SAM Task Management not supported 1784 * at the moment. 1785 */ 1786 switch (csio->tag_action) { 1787 case MSG_HEAD_OF_Q_TAG: 1788 mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ; 1789 break; 1790 case MSG_ORDERED_Q_TAG: 1791 mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ; 1792 break; 1793 case MSG_ACA_TASK: 1794 mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ; 1795 break; 1796 case CAM_TAG_ACTION_NONE: 1797 case MSG_SIMPLE_Q_TAG: 1798 default: 1799 mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ; 1800 break; 1801 } 1802 mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits; 1803 req->Control = htole32(mpi_control); 1804 1805 if (MPR_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) { 1806 mpr_free_command(sc, cm); 1807 ccb->ccb_h.status = CAM_LUN_INVALID; 1808 xpt_done(ccb); 1809 return; 1810 } 1811 1812 if (csio->ccb_h.flags & CAM_CDB_POINTER) 1813 bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len); 1814 else 1815 bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len); 1816 req->IoFlags = htole16(csio->cdb_len); 1817 1818 /* 1819 * Check if EEDP is supported and enabled. If it is then check if the 1820 * SCSI opcode could be using EEDP. If so, make sure the LUN exists and 1821 * is formatted for EEDP support. If all of this is true, set CDB up 1822 * for EEDP transfer. 1823 */ 1824 eedp_flags = op_code_prot[req->CDB.CDB32[0]]; 1825 if (sc->eedp_enabled && eedp_flags) { 1826 SLIST_FOREACH(lun, &targ->luns, lun_link) { 1827 if (lun->lun_id == csio->ccb_h.target_lun) { 1828 break; 1829 } 1830 } 1831 1832 if ((lun != NULL) && (lun->eedp_formatted)) { 1833 req->EEDPBlockSize = htole16(lun->eedp_block_size); 1834 eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG | 1835 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG | 1836 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD); 1837 req->EEDPFlags = htole16(eedp_flags); 1838 1839 /* 1840 * If CDB less than 32, fill in Primary Ref Tag with 1841 * low 4 bytes of LBA. If CDB is 32, tag stuff is 1842 * already there. Also, set protection bit. FreeBSD 1843 * currently does not support CDBs bigger than 16, but 1844 * the code doesn't hurt, and will be here for the 1845 * future. 1846 */ 1847 if (csio->cdb_len != 32) { 1848 lba_byte = (csio->cdb_len == 16) ? 6 : 2; 1849 ref_tag_addr = (uint8_t *)&req->CDB.EEDP32. 1850 PrimaryReferenceTag; 1851 for (i = 0; i < 4; i++) { 1852 *ref_tag_addr = 1853 req->CDB.CDB32[lba_byte + i]; 1854 ref_tag_addr++; 1855 } 1856 req->CDB.EEDP32.PrimaryReferenceTag = 1857 htole32(req-> 1858 CDB.EEDP32.PrimaryReferenceTag); 1859 req->CDB.EEDP32.PrimaryApplicationTagMask = 1860 0xFFFF; 1861 req->CDB.CDB32[1] = (req->CDB.CDB32[1] & 0x1F) | 1862 0x20; 1863 } else { 1864 eedp_flags |= 1865 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG; 1866 req->EEDPFlags = htole16(eedp_flags); 1867 req->CDB.CDB32[10] = (req->CDB.CDB32[10] & 1868 0x1F) | 0x20; 1869 } 1870 } 1871 } 1872 1873 cm->cm_length = csio->dxfer_len; 1874 if (cm->cm_length != 0) { 1875 cm->cm_data = ccb; 1876 cm->cm_flags |= MPR_CM_FLAGS_USE_CCB; 1877 } else { 1878 cm->cm_data = NULL; 1879 } 1880 cm->cm_sge = &req->SGL; 1881 cm->cm_sglsize = (32 - 24) * 4; 1882 cm->cm_complete = mprsas_scsiio_complete; 1883 cm->cm_complete_data = ccb; 1884 cm->cm_targ = targ; 1885 cm->cm_lun = csio->ccb_h.target_lun; 1886 cm->cm_ccb = ccb; 1887 /* 1888 * If using FP desc type, need to set a bit in IoFlags (SCSI IO is 0) 1889 * and set descriptor type. 1890 */ 1891 if (targ->scsi_req_desc_type == 1892 MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO) { 1893 req->IoFlags |= MPI25_SCSIIO_IOFLAGS_FAST_PATH; 1894 cm->cm_desc.FastPathSCSIIO.RequestFlags = 1895 MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO; 1896 cm->cm_desc.FastPathSCSIIO.DevHandle = htole16(targ->handle); 1897 } else { 1898 cm->cm_desc.SCSIIO.RequestFlags = 1899 MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO; 1900 cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle); 1901 } 1902 1903 callout_reset(&cm->cm_callout, (ccb->ccb_h.timeout * hz) / 1000, 1904 mprsas_scsiio_timeout, cm); 1905 1906 targ->issued++; 1907 targ->outstanding++; 1908 TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link); 1909 ccb->ccb_h.status |= CAM_SIM_QUEUED; 1910 1911 mprsas_log_command(cm, MPR_XINFO, "%s cm %p ccb %p outstanding %u\n", 1912 __func__, cm, ccb, targ->outstanding); 1913 1914 mpr_map_command(sc, cm); 1915 return; 1916 } 1917 1918 static void 1919 mpr_response_code(struct mpr_softc *sc, u8 response_code) 1920 { 1921 char *desc; 1922 1923 switch (response_code) { 1924 case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE: 1925 desc = "task management request completed"; 1926 break; 1927 case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME: 1928 desc = "invalid frame"; 1929 break; 1930 case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED: 1931 desc = "task management request not supported"; 1932 break; 1933 case MPI2_SCSITASKMGMT_RSP_TM_FAILED: 1934 desc = "task management request failed"; 1935 break; 1936 case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED: 1937 desc = "task management request succeeded"; 1938 break; 1939 case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN: 1940 desc = "invalid lun"; 1941 break; 1942 case 0xA: 1943 desc = "overlapped tag attempted"; 1944 break; 1945 case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC: 1946 desc = "task queued, however not sent to target"; 1947 break; 1948 default: 1949 desc = "unknown"; 1950 break; 1951 } 1952 mpr_dprint(sc, MPR_XINFO, "response_code(0x%01x): %s\n", response_code, 1953 desc); 1954 } 1955 1956 /** 1957 * mpr_sc_failed_io_info - translated non-succesfull SCSI_IO request 1958 */ 1959 static void 1960 mpr_sc_failed_io_info(struct mpr_softc *sc, struct ccb_scsiio *csio, 1961 Mpi2SCSIIOReply_t *mpi_reply, struct mprsas_target *targ) 1962 { 1963 u32 response_info; 1964 u8 *response_bytes; 1965 u16 ioc_status = le16toh(mpi_reply->IOCStatus) & 1966 MPI2_IOCSTATUS_MASK; 1967 u8 scsi_state = mpi_reply->SCSIState; 1968 u8 scsi_status = mpi_reply->SCSIStatus; 1969 char *desc_ioc_state = NULL; 1970 char *desc_scsi_status = NULL; 1971 char *desc_scsi_state = sc->tmp_string; 1972 u32 log_info = le32toh(mpi_reply->IOCLogInfo); 1973 1974 if (log_info == 0x31170000) 1975 return; 1976 1977 switch (ioc_status) { 1978 case MPI2_IOCSTATUS_SUCCESS: 1979 desc_ioc_state = "success"; 1980 break; 1981 case MPI2_IOCSTATUS_INVALID_FUNCTION: 1982 desc_ioc_state = "invalid function"; 1983 break; 1984 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR: 1985 desc_ioc_state = "scsi recovered error"; 1986 break; 1987 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE: 1988 desc_ioc_state = "scsi invalid dev handle"; 1989 break; 1990 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE: 1991 desc_ioc_state = "scsi device not there"; 1992 break; 1993 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN: 1994 desc_ioc_state = "scsi data overrun"; 1995 break; 1996 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN: 1997 desc_ioc_state = "scsi data underrun"; 1998 break; 1999 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR: 2000 desc_ioc_state = "scsi io data error"; 2001 break; 2002 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR: 2003 desc_ioc_state = "scsi protocol error"; 2004 break; 2005 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED: 2006 desc_ioc_state = "scsi task terminated"; 2007 break; 2008 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: 2009 desc_ioc_state = "scsi residual mismatch"; 2010 break; 2011 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED: 2012 desc_ioc_state = "scsi task mgmt failed"; 2013 break; 2014 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED: 2015 desc_ioc_state = "scsi ioc terminated"; 2016 break; 2017 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED: 2018 desc_ioc_state = "scsi ext terminated"; 2019 break; 2020 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR: 2021 desc_ioc_state = "eedp guard error"; 2022 break; 2023 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR: 2024 desc_ioc_state = "eedp ref tag error"; 2025 break; 2026 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR: 2027 desc_ioc_state = "eedp app tag error"; 2028 break; 2029 default: 2030 desc_ioc_state = "unknown"; 2031 break; 2032 } 2033 2034 switch (scsi_status) { 2035 case MPI2_SCSI_STATUS_GOOD: 2036 desc_scsi_status = "good"; 2037 break; 2038 case MPI2_SCSI_STATUS_CHECK_CONDITION: 2039 desc_scsi_status = "check condition"; 2040 break; 2041 case MPI2_SCSI_STATUS_CONDITION_MET: 2042 desc_scsi_status = "condition met"; 2043 break; 2044 case MPI2_SCSI_STATUS_BUSY: 2045 desc_scsi_status = "busy"; 2046 break; 2047 case MPI2_SCSI_STATUS_INTERMEDIATE: 2048 desc_scsi_status = "intermediate"; 2049 break; 2050 case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET: 2051 desc_scsi_status = "intermediate condmet"; 2052 break; 2053 case MPI2_SCSI_STATUS_RESERVATION_CONFLICT: 2054 desc_scsi_status = "reservation conflict"; 2055 break; 2056 case MPI2_SCSI_STATUS_COMMAND_TERMINATED: 2057 desc_scsi_status = "command terminated"; 2058 break; 2059 case MPI2_SCSI_STATUS_TASK_SET_FULL: 2060 desc_scsi_status = "task set full"; 2061 break; 2062 case MPI2_SCSI_STATUS_ACA_ACTIVE: 2063 desc_scsi_status = "aca active"; 2064 break; 2065 case MPI2_SCSI_STATUS_TASK_ABORTED: 2066 desc_scsi_status = "task aborted"; 2067 break; 2068 default: 2069 desc_scsi_status = "unknown"; 2070 break; 2071 } 2072 2073 desc_scsi_state[0] = '\0'; 2074 if (!scsi_state) 2075 desc_scsi_state = " "; 2076 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) 2077 strcat(desc_scsi_state, "response info "); 2078 if (scsi_state & MPI2_SCSI_STATE_TERMINATED) 2079 strcat(desc_scsi_state, "state terminated "); 2080 if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS) 2081 strcat(desc_scsi_state, "no status "); 2082 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED) 2083 strcat(desc_scsi_state, "autosense failed "); 2084 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) 2085 strcat(desc_scsi_state, "autosense valid "); 2086 2087 mpr_dprint(sc, MPR_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n", 2088 le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status); 2089 if (targ->encl_level_valid) { 2090 mpr_dprint(sc, MPR_XINFO, "At enclosure level %d, slot %d, " 2091 "connector name (%4s)\n", targ->encl_level, targ->encl_slot, 2092 targ->connector_name); 2093 } 2094 /* We can add more detail about underflow data here 2095 * TO-DO 2096 * */ 2097 mpr_dprint(sc, MPR_XINFO, "\tscsi_status(%s)(0x%02x), " 2098 "scsi_state(%s)(0x%02x)\n", desc_scsi_status, scsi_status, 2099 desc_scsi_state, scsi_state); 2100 2101 if (sc->mpr_debug & MPR_XINFO && 2102 scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) { 2103 mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : Start :\n"); 2104 scsi_sense_print(csio); 2105 mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : End :\n"); 2106 } 2107 2108 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) { 2109 response_info = le32toh(mpi_reply->ResponseInfo); 2110 response_bytes = (u8 *)&response_info; 2111 mpr_response_code(sc,response_bytes[0]); 2112 } 2113 } 2114 2115 static void 2116 mprsas_scsiio_complete(struct mpr_softc *sc, struct mpr_command *cm) 2117 { 2118 MPI2_SCSI_IO_REPLY *rep; 2119 union ccb *ccb; 2120 struct ccb_scsiio *csio; 2121 struct mprsas_softc *sassc; 2122 struct scsi_vpd_supported_page_list *vpd_list = NULL; 2123 u8 *TLR_bits, TLR_on; 2124 int dir = 0, i; 2125 u16 alloc_len; 2126 2127 MPR_FUNCTRACE(sc); 2128 mpr_dprint(sc, MPR_TRACE, 2129 "cm %p SMID %u ccb %p reply %p outstanding %u\n", cm, 2130 cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply, 2131 cm->cm_targ->outstanding); 2132 2133 callout_stop(&cm->cm_callout); 2134 mtx_assert(&sc->mpr_mtx, MA_OWNED); 2135 2136 sassc = sc->sassc; 2137 ccb = cm->cm_complete_data; 2138 csio = &ccb->csio; 2139 rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply; 2140 /* 2141 * XXX KDM if the chain allocation fails, does it matter if we do 2142 * the sync and unload here? It is simpler to do it in every case, 2143 * assuming it doesn't cause problems. 2144 */ 2145 if (cm->cm_data != NULL) { 2146 if (cm->cm_flags & MPR_CM_FLAGS_DATAIN) 2147 dir = BUS_DMASYNC_POSTREAD; 2148 else if (cm->cm_flags & MPR_CM_FLAGS_DATAOUT) 2149 dir = BUS_DMASYNC_POSTWRITE; 2150 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir); 2151 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap); 2152 } 2153 2154 cm->cm_targ->completed++; 2155 cm->cm_targ->outstanding--; 2156 TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link); 2157 ccb->ccb_h.status &= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED); 2158 2159 if (cm->cm_state == MPR_CM_STATE_TIMEDOUT) { 2160 TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery); 2161 if (cm->cm_reply != NULL) 2162 mprsas_log_command(cm, MPR_RECOVERY, 2163 "completed timedout cm %p ccb %p during recovery " 2164 "ioc %x scsi %x state %x xfer %u\n", cm, cm->cm_ccb, 2165 le16toh(rep->IOCStatus), rep->SCSIStatus, 2166 rep->SCSIState, le32toh(rep->TransferCount)); 2167 else 2168 mprsas_log_command(cm, MPR_RECOVERY, 2169 "completed timedout cm %p ccb %p during recovery\n", 2170 cm, cm->cm_ccb); 2171 } else if (cm->cm_targ->tm != NULL) { 2172 if (cm->cm_reply != NULL) 2173 mprsas_log_command(cm, MPR_RECOVERY, 2174 "completed cm %p ccb %p during recovery " 2175 "ioc %x scsi %x state %x xfer %u\n", 2176 cm, cm->cm_ccb, le16toh(rep->IOCStatus), 2177 rep->SCSIStatus, rep->SCSIState, 2178 le32toh(rep->TransferCount)); 2179 else 2180 mprsas_log_command(cm, MPR_RECOVERY, 2181 "completed cm %p ccb %p during recovery\n", 2182 cm, cm->cm_ccb); 2183 } else if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) { 2184 mprsas_log_command(cm, MPR_RECOVERY, 2185 "reset completed cm %p ccb %p\n", cm, cm->cm_ccb); 2186 } 2187 2188 if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) { 2189 /* 2190 * We ran into an error after we tried to map the command, 2191 * so we're getting a callback without queueing the command 2192 * to the hardware. So we set the status here, and it will 2193 * be retained below. We'll go through the "fast path", 2194 * because there can be no reply when we haven't actually 2195 * gone out to the hardware. 2196 */ 2197 ccb->ccb_h.status = CAM_REQUEUE_REQ; 2198 2199 /* 2200 * Currently the only error included in the mask is 2201 * MPR_CM_FLAGS_CHAIN_FAILED, which means we're out of 2202 * chain frames. We need to freeze the queue until we get 2203 * a command that completed without this error, which will 2204 * hopefully have some chain frames attached that we can 2205 * use. If we wanted to get smarter about it, we would 2206 * only unfreeze the queue in this condition when we're 2207 * sure that we're getting some chain frames back. That's 2208 * probably unnecessary. 2209 */ 2210 if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) { 2211 xpt_freeze_simq(sassc->sim, 1); 2212 sassc->flags |= MPRSAS_QUEUE_FROZEN; 2213 mpr_dprint(sc, MPR_INFO, "Error sending command, " 2214 "freezing SIM queue\n"); 2215 } 2216 } 2217 2218 /* 2219 * If this is a Start Stop Unit command and it was issued by the driver 2220 * during shutdown, decrement the refcount to account for all of the 2221 * commands that were sent. All SSU commands should be completed before 2222 * shutdown completes, meaning SSU_refcount will be 0 after SSU_started 2223 * is TRUE. 2224 */ 2225 if (sc->SSU_started && (csio->cdb_io.cdb_bytes[0] == START_STOP_UNIT)) { 2226 mpr_dprint(sc, MPR_INFO, "Decrementing SSU count.\n"); 2227 sc->SSU_refcount--; 2228 } 2229 2230 /* Take the fast path to completion */ 2231 if (cm->cm_reply == NULL) { 2232 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) { 2233 if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) 2234 ccb->ccb_h.status = CAM_SCSI_BUS_RESET; 2235 else { 2236 ccb->ccb_h.status = CAM_REQ_CMP; 2237 ccb->csio.scsi_status = SCSI_STATUS_OK; 2238 } 2239 if (sassc->flags & MPRSAS_QUEUE_FROZEN) { 2240 ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 2241 sassc->flags &= ~MPRSAS_QUEUE_FROZEN; 2242 mpr_dprint(sc, MPR_XINFO, 2243 "Unfreezing SIM queue\n"); 2244 } 2245 } 2246 2247 /* 2248 * There are two scenarios where the status won't be 2249 * CAM_REQ_CMP. The first is if MPR_CM_FLAGS_ERROR_MASK is 2250 * set, the second is in the MPR_FLAGS_DIAGRESET above. 2251 */ 2252 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 2253 /* 2254 * Freeze the dev queue so that commands are 2255 * executed in the correct order with after error 2256 * recovery. 2257 */ 2258 ccb->ccb_h.status |= CAM_DEV_QFRZN; 2259 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1); 2260 } 2261 mpr_free_command(sc, cm); 2262 xpt_done(ccb); 2263 return; 2264 } 2265 2266 mprsas_log_command(cm, MPR_XINFO, 2267 "ioc %x scsi %x state %x xfer %u\n", 2268 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState, 2269 le32toh(rep->TransferCount)); 2270 2271 switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) { 2272 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN: 2273 csio->resid = cm->cm_length - le32toh(rep->TransferCount); 2274 /* FALLTHROUGH */ 2275 case MPI2_IOCSTATUS_SUCCESS: 2276 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR: 2277 2278 if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) == 2279 MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR) 2280 mprsas_log_command(cm, MPR_XINFO, "recovered error\n"); 2281 2282 /* Completion failed at the transport level. */ 2283 if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS | 2284 MPI2_SCSI_STATE_TERMINATED)) { 2285 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 2286 break; 2287 } 2288 2289 /* In a modern packetized environment, an autosense failure 2290 * implies that there's not much else that can be done to 2291 * recover the command. 2292 */ 2293 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) { 2294 ccb->ccb_h.status = CAM_AUTOSENSE_FAIL; 2295 break; 2296 } 2297 2298 /* 2299 * CAM doesn't care about SAS Response Info data, but if this is 2300 * the state check if TLR should be done. If not, clear the 2301 * TLR_bits for the target. 2302 */ 2303 if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) && 2304 ((le32toh(rep->ResponseInfo) & MPI2_SCSI_RI_MASK_REASONCODE) 2305 == MPR_SCSI_RI_INVALID_FRAME)) { 2306 sc->mapping_table[csio->ccb_h.target_id].TLR_bits = 2307 (u8)MPI2_SCSIIO_CONTROL_NO_TLR; 2308 } 2309 2310 /* 2311 * Intentionally override the normal SCSI status reporting 2312 * for these two cases. These are likely to happen in a 2313 * multi-initiator environment, and we want to make sure that 2314 * CAM retries these commands rather than fail them. 2315 */ 2316 if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) || 2317 (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) { 2318 ccb->ccb_h.status = CAM_REQ_ABORTED; 2319 break; 2320 } 2321 2322 /* Handle normal status and sense */ 2323 csio->scsi_status = rep->SCSIStatus; 2324 if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD) 2325 ccb->ccb_h.status = CAM_REQ_CMP; 2326 else 2327 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; 2328 2329 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) { 2330 int sense_len, returned_sense_len; 2331 2332 returned_sense_len = min(le32toh(rep->SenseCount), 2333 sizeof(struct scsi_sense_data)); 2334 if (returned_sense_len < csio->sense_len) 2335 csio->sense_resid = csio->sense_len - 2336 returned_sense_len; 2337 else 2338 csio->sense_resid = 0; 2339 2340 sense_len = min(returned_sense_len, 2341 csio->sense_len - csio->sense_resid); 2342 bzero(&csio->sense_data, sizeof(csio->sense_data)); 2343 bcopy(cm->cm_sense, &csio->sense_data, sense_len); 2344 ccb->ccb_h.status |= CAM_AUTOSNS_VALID; 2345 } 2346 2347 /* 2348 * Check if this is an INQUIRY command. If it's a VPD inquiry, 2349 * and it's page code 0 (Supported Page List), and there is 2350 * inquiry data, and this is for a sequential access device, and 2351 * the device is an SSP target, and TLR is supported by the 2352 * controller, turn the TLR_bits value ON if page 0x90 is 2353 * supported. 2354 */ 2355 if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) && 2356 (csio->cdb_io.cdb_bytes[1] & SI_EVPD) && 2357 (csio->cdb_io.cdb_bytes[2] == SVPD_SUPPORTED_PAGE_LIST) && 2358 ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) && 2359 (csio->data_ptr != NULL) && 2360 ((csio->data_ptr[0] & 0x1f) == T_SEQUENTIAL) && 2361 (sc->control_TLR) && 2362 (sc->mapping_table[csio->ccb_h.target_id].device_info & 2363 MPI2_SAS_DEVICE_INFO_SSP_TARGET)) { 2364 vpd_list = (struct scsi_vpd_supported_page_list *) 2365 csio->data_ptr; 2366 TLR_bits = &sc->mapping_table[csio->ccb_h.target_id]. 2367 TLR_bits; 2368 *TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR; 2369 TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON; 2370 alloc_len = ((u16)csio->cdb_io.cdb_bytes[3] << 8) + 2371 csio->cdb_io.cdb_bytes[4]; 2372 alloc_len -= csio->resid; 2373 for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) { 2374 if (vpd_list->list[i] == 0x90) { 2375 *TLR_bits = TLR_on; 2376 break; 2377 } 2378 } 2379 } 2380 break; 2381 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE: 2382 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE: 2383 /* 2384 * If devinfo is 0 this will be a volume. In that case don't 2385 * tell CAM that the volume is not there. We want volumes to 2386 * be enumerated until they are deleted/removed, not just 2387 * failed. 2388 */ 2389 if (cm->cm_targ->devinfo == 0) 2390 ccb->ccb_h.status = CAM_REQ_CMP; 2391 else 2392 ccb->ccb_h.status = CAM_DEV_NOT_THERE; 2393 break; 2394 case MPI2_IOCSTATUS_INVALID_SGL: 2395 mpr_print_scsiio_cmd(sc, cm); 2396 ccb->ccb_h.status = CAM_UNREC_HBA_ERROR; 2397 break; 2398 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED: 2399 /* 2400 * This is one of the responses that comes back when an I/O 2401 * has been aborted. If it is because of a timeout that we 2402 * initiated, just set the status to CAM_CMD_TIMEOUT. 2403 * Otherwise set it to CAM_REQ_ABORTED. The effect on the 2404 * command is the same (it gets retried, subject to the 2405 * retry counter), the only difference is what gets printed 2406 * on the console. 2407 */ 2408 if (cm->cm_state == MPR_CM_STATE_TIMEDOUT) 2409 ccb->ccb_h.status = CAM_CMD_TIMEOUT; 2410 else 2411 ccb->ccb_h.status = CAM_REQ_ABORTED; 2412 break; 2413 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN: 2414 /* resid is ignored for this condition */ 2415 csio->resid = 0; 2416 ccb->ccb_h.status = CAM_DATA_RUN_ERR; 2417 break; 2418 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED: 2419 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED: 2420 /* 2421 * Since these are generally external (i.e. hopefully 2422 * transient transport-related) errors, retry these without 2423 * decrementing the retry count. 2424 */ 2425 ccb->ccb_h.status = CAM_REQUEUE_REQ; 2426 mprsas_log_command(cm, MPR_INFO, 2427 "terminated ioc %x scsi %x state %x xfer %u\n", 2428 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState, 2429 le32toh(rep->TransferCount)); 2430 break; 2431 case MPI2_IOCSTATUS_INVALID_FUNCTION: 2432 case MPI2_IOCSTATUS_INTERNAL_ERROR: 2433 case MPI2_IOCSTATUS_INVALID_VPID: 2434 case MPI2_IOCSTATUS_INVALID_FIELD: 2435 case MPI2_IOCSTATUS_INVALID_STATE: 2436 case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED: 2437 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR: 2438 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR: 2439 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: 2440 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED: 2441 default: 2442 mprsas_log_command(cm, MPR_XINFO, 2443 "completed ioc %x scsi %x state %x xfer %u\n", 2444 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState, 2445 le32toh(rep->TransferCount)); 2446 csio->resid = cm->cm_length; 2447 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 2448 break; 2449 } 2450 2451 mpr_sc_failed_io_info(sc, csio, rep, cm->cm_targ); 2452 2453 if (sassc->flags & MPRSAS_QUEUE_FROZEN) { 2454 ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 2455 sassc->flags &= ~MPRSAS_QUEUE_FROZEN; 2456 mpr_dprint(sc, MPR_XINFO, "Command completed, unfreezing SIM " 2457 "queue\n"); 2458 } 2459 2460 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 2461 ccb->ccb_h.status |= CAM_DEV_QFRZN; 2462 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1); 2463 } 2464 2465 mpr_free_command(sc, cm); 2466 xpt_done(ccb); 2467 } 2468 2469 #if __FreeBSD_version >= 900026 2470 static void 2471 mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm) 2472 { 2473 MPI2_SMP_PASSTHROUGH_REPLY *rpl; 2474 MPI2_SMP_PASSTHROUGH_REQUEST *req; 2475 uint64_t sasaddr; 2476 union ccb *ccb; 2477 2478 ccb = cm->cm_complete_data; 2479 2480 /* 2481 * Currently there should be no way we can hit this case. It only 2482 * happens when we have a failure to allocate chain frames, and SMP 2483 * commands require two S/G elements only. That should be handled 2484 * in the standard request size. 2485 */ 2486 if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) { 2487 mpr_dprint(sc, MPR_ERROR,"%s: cm_flags = %#x on SMP request!\n", 2488 __func__, cm->cm_flags); 2489 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 2490 goto bailout; 2491 } 2492 2493 rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply; 2494 if (rpl == NULL) { 2495 mpr_dprint(sc, MPR_ERROR, "%s: NULL cm_reply!\n", __func__); 2496 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 2497 goto bailout; 2498 } 2499 2500 req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req; 2501 sasaddr = le32toh(req->SASAddress.Low); 2502 sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32; 2503 2504 if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) != 2505 MPI2_IOCSTATUS_SUCCESS || 2506 rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) { 2507 mpr_dprint(sc, MPR_XINFO, "%s: IOCStatus %04x SASStatus %02x\n", 2508 __func__, le16toh(rpl->IOCStatus), rpl->SASStatus); 2509 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 2510 goto bailout; 2511 } 2512 2513 mpr_dprint(sc, MPR_XINFO, "%s: SMP request to SAS address " 2514 "%#jx completed successfully\n", __func__, (uintmax_t)sasaddr); 2515 2516 if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED) 2517 ccb->ccb_h.status = CAM_REQ_CMP; 2518 else 2519 ccb->ccb_h.status = CAM_SMP_STATUS_ERROR; 2520 2521 bailout: 2522 /* 2523 * We sync in both directions because we had DMAs in the S/G list 2524 * in both directions. 2525 */ 2526 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, 2527 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2528 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap); 2529 mpr_free_command(sc, cm); 2530 xpt_done(ccb); 2531 } 2532 2533 static void 2534 mprsas_send_smpcmd(struct mprsas_softc *sassc, union ccb *ccb, 2535 uint64_t sasaddr) 2536 { 2537 struct mpr_command *cm; 2538 uint8_t *request, *response; 2539 MPI2_SMP_PASSTHROUGH_REQUEST *req; 2540 struct mpr_softc *sc; 2541 struct sglist *sg; 2542 int error; 2543 2544 sc = sassc->sc; 2545 sg = NULL; 2546 error = 0; 2547 2548 #if (__FreeBSD_version >= 1000028) || \ 2549 ((__FreeBSD_version >= 902001) && (__FreeBSD_version < 1000000)) 2550 switch (ccb->ccb_h.flags & CAM_DATA_MASK) { 2551 case CAM_DATA_PADDR: 2552 case CAM_DATA_SG_PADDR: 2553 /* 2554 * XXX We don't yet support physical addresses here. 2555 */ 2556 mpr_dprint(sc, MPR_ERROR, "%s: physical addresses not " 2557 "supported\n", __func__); 2558 ccb->ccb_h.status = CAM_REQ_INVALID; 2559 xpt_done(ccb); 2560 return; 2561 case CAM_DATA_SG: 2562 /* 2563 * The chip does not support more than one buffer for the 2564 * request or response. 2565 */ 2566 if ((ccb->smpio.smp_request_sglist_cnt > 1) 2567 || (ccb->smpio.smp_response_sglist_cnt > 1)) { 2568 mpr_dprint(sc, MPR_ERROR, 2569 "%s: multiple request or response buffer segments " 2570 "not supported for SMP\n", __func__); 2571 ccb->ccb_h.status = CAM_REQ_INVALID; 2572 xpt_done(ccb); 2573 return; 2574 } 2575 2576 /* 2577 * The CAM_SCATTER_VALID flag was originally implemented 2578 * for the XPT_SCSI_IO CCB, which only has one data pointer. 2579 * We have two. So, just take that flag to mean that we 2580 * might have S/G lists, and look at the S/G segment count 2581 * to figure out whether that is the case for each individual 2582 * buffer. 2583 */ 2584 if (ccb->smpio.smp_request_sglist_cnt != 0) { 2585 bus_dma_segment_t *req_sg; 2586 2587 req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request; 2588 request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr; 2589 } else 2590 request = ccb->smpio.smp_request; 2591 2592 if (ccb->smpio.smp_response_sglist_cnt != 0) { 2593 bus_dma_segment_t *rsp_sg; 2594 2595 rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response; 2596 response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr; 2597 } else 2598 response = ccb->smpio.smp_response; 2599 break; 2600 case CAM_DATA_VADDR: 2601 request = ccb->smpio.smp_request; 2602 response = ccb->smpio.smp_response; 2603 break; 2604 default: 2605 ccb->ccb_h.status = CAM_REQ_INVALID; 2606 xpt_done(ccb); 2607 return; 2608 } 2609 #else /* __FreeBSD_version < 1000028 */ 2610 /* 2611 * XXX We don't yet support physical addresses here. 2612 */ 2613 if (ccb->ccb_h.flags & (CAM_DATA_PHYS|CAM_SG_LIST_PHYS)) { 2614 mpr_printf(sc, "%s: physical addresses not supported\n", 2615 __func__); 2616 ccb->ccb_h.status = CAM_REQ_INVALID; 2617 xpt_done(ccb); 2618 return; 2619 } 2620 2621 /* 2622 * If the user wants to send an S/G list, check to make sure they 2623 * have single buffers. 2624 */ 2625 if (ccb->ccb_h.flags & CAM_SCATTER_VALID) { 2626 /* 2627 * The chip does not support more than one buffer for the 2628 * request or response. 2629 */ 2630 if ((ccb->smpio.smp_request_sglist_cnt > 1) 2631 || (ccb->smpio.smp_response_sglist_cnt > 1)) { 2632 mpr_dprint(sc, MPR_ERROR, "%s: multiple request or " 2633 "response buffer segments not supported for SMP\n", 2634 __func__); 2635 ccb->ccb_h.status = CAM_REQ_INVALID; 2636 xpt_done(ccb); 2637 return; 2638 } 2639 2640 /* 2641 * The CAM_SCATTER_VALID flag was originally implemented 2642 * for the XPT_SCSI_IO CCB, which only has one data pointer. 2643 * We have two. So, just take that flag to mean that we 2644 * might have S/G lists, and look at the S/G segment count 2645 * to figure out whether that is the case for each individual 2646 * buffer. 2647 */ 2648 if (ccb->smpio.smp_request_sglist_cnt != 0) { 2649 bus_dma_segment_t *req_sg; 2650 2651 req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request; 2652 request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr; 2653 } else 2654 request = ccb->smpio.smp_request; 2655 2656 if (ccb->smpio.smp_response_sglist_cnt != 0) { 2657 bus_dma_segment_t *rsp_sg; 2658 2659 rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response; 2660 response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr; 2661 } else 2662 response = ccb->smpio.smp_response; 2663 } else { 2664 request = ccb->smpio.smp_request; 2665 response = ccb->smpio.smp_response; 2666 } 2667 #endif /* __FreeBSD_version < 1000028 */ 2668 2669 cm = mpr_alloc_command(sc); 2670 if (cm == NULL) { 2671 mpr_dprint(sc, MPR_ERROR, 2672 "%s: cannot allocate command\n", __func__); 2673 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 2674 xpt_done(ccb); 2675 return; 2676 } 2677 2678 req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req; 2679 bzero(req, sizeof(*req)); 2680 req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH; 2681 2682 /* Allow the chip to use any route to this SAS address. */ 2683 req->PhysicalPort = 0xff; 2684 2685 req->RequestDataLength = htole16(ccb->smpio.smp_request_len); 2686 req->SGLFlags = 2687 MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI; 2688 2689 mpr_dprint(sc, MPR_XINFO, "%s: sending SMP request to SAS address " 2690 "%#jx\n", __func__, (uintmax_t)sasaddr); 2691 2692 mpr_init_sge(cm, req, &req->SGL); 2693 2694 /* 2695 * Set up a uio to pass into mpr_map_command(). This allows us to 2696 * do one map command, and one busdma call in there. 2697 */ 2698 cm->cm_uio.uio_iov = cm->cm_iovec; 2699 cm->cm_uio.uio_iovcnt = 2; 2700 cm->cm_uio.uio_segflg = UIO_SYSSPACE; 2701 2702 /* 2703 * The read/write flag isn't used by busdma, but set it just in 2704 * case. This isn't exactly accurate, either, since we're going in 2705 * both directions. 2706 */ 2707 cm->cm_uio.uio_rw = UIO_WRITE; 2708 2709 cm->cm_iovec[0].iov_base = request; 2710 cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength); 2711 cm->cm_iovec[1].iov_base = response; 2712 cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len; 2713 2714 cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len + 2715 cm->cm_iovec[1].iov_len; 2716 2717 /* 2718 * Trigger a warning message in mpr_data_cb() for the user if we 2719 * wind up exceeding two S/G segments. The chip expects one 2720 * segment for the request and another for the response. 2721 */ 2722 cm->cm_max_segs = 2; 2723 2724 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; 2725 cm->cm_complete = mprsas_smpio_complete; 2726 cm->cm_complete_data = ccb; 2727 2728 /* 2729 * Tell the mapping code that we're using a uio, and that this is 2730 * an SMP passthrough request. There is a little special-case 2731 * logic there (in mpr_data_cb()) to handle the bidirectional 2732 * transfer. 2733 */ 2734 cm->cm_flags |= MPR_CM_FLAGS_USE_UIO | MPR_CM_FLAGS_SMP_PASS | 2735 MPR_CM_FLAGS_DATAIN | MPR_CM_FLAGS_DATAOUT; 2736 2737 /* The chip data format is little endian. */ 2738 req->SASAddress.High = htole32(sasaddr >> 32); 2739 req->SASAddress.Low = htole32(sasaddr); 2740 2741 /* 2742 * XXX Note that we don't have a timeout/abort mechanism here. 2743 * From the manual, it looks like task management requests only 2744 * work for SCSI IO and SATA passthrough requests. We may need to 2745 * have a mechanism to retry requests in the event of a chip reset 2746 * at least. Hopefully the chip will insure that any errors short 2747 * of that are relayed back to the driver. 2748 */ 2749 error = mpr_map_command(sc, cm); 2750 if ((error != 0) && (error != EINPROGRESS)) { 2751 mpr_dprint(sc, MPR_ERROR, "%s: error %d returned from " 2752 "mpr_map_command()\n", __func__, error); 2753 goto bailout_error; 2754 } 2755 2756 return; 2757 2758 bailout_error: 2759 mpr_free_command(sc, cm); 2760 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 2761 xpt_done(ccb); 2762 return; 2763 } 2764 2765 static void 2766 mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb) 2767 { 2768 struct mpr_softc *sc; 2769 struct mprsas_target *targ; 2770 uint64_t sasaddr = 0; 2771 2772 sc = sassc->sc; 2773 2774 /* 2775 * Make sure the target exists. 2776 */ 2777 KASSERT(ccb->ccb_h.target_id < sassc->maxtargets, 2778 ("Target %d out of bounds in XPT_SMP_IO\n", ccb->ccb_h.target_id)); 2779 targ = &sassc->targets[ccb->ccb_h.target_id]; 2780 if (targ->handle == 0x0) { 2781 mpr_dprint(sc, MPR_ERROR, "%s: target %d does not exist!\n", 2782 __func__, ccb->ccb_h.target_id); 2783 ccb->ccb_h.status = CAM_SEL_TIMEOUT; 2784 xpt_done(ccb); 2785 return; 2786 } 2787 2788 /* 2789 * If this device has an embedded SMP target, we'll talk to it 2790 * directly. 2791 * figure out what the expander's address is. 2792 */ 2793 if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0) 2794 sasaddr = targ->sasaddr; 2795 2796 /* 2797 * If we don't have a SAS address for the expander yet, try 2798 * grabbing it from the page 0x83 information cached in the 2799 * transport layer for this target. LSI expanders report the 2800 * expander SAS address as the port-associated SAS address in 2801 * Inquiry VPD page 0x83. Maxim expanders don't report it in page 2802 * 0x83. 2803 * 2804 * XXX KDM disable this for now, but leave it commented out so that 2805 * it is obvious that this is another possible way to get the SAS 2806 * address. 2807 * 2808 * The parent handle method below is a little more reliable, and 2809 * the other benefit is that it works for devices other than SES 2810 * devices. So you can send a SMP request to a da(4) device and it 2811 * will get routed to the expander that device is attached to. 2812 * (Assuming the da(4) device doesn't contain an SMP target...) 2813 */ 2814 #if 0 2815 if (sasaddr == 0) 2816 sasaddr = xpt_path_sas_addr(ccb->ccb_h.path); 2817 #endif 2818 2819 /* 2820 * If we still don't have a SAS address for the expander, look for 2821 * the parent device of this device, which is probably the expander. 2822 */ 2823 if (sasaddr == 0) { 2824 #ifdef OLD_MPR_PROBE 2825 struct mprsas_target *parent_target; 2826 #endif 2827 2828 if (targ->parent_handle == 0x0) { 2829 mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have " 2830 "a valid parent handle!\n", __func__, targ->handle); 2831 ccb->ccb_h.status = CAM_DEV_NOT_THERE; 2832 goto bailout; 2833 } 2834 #ifdef OLD_MPR_PROBE 2835 parent_target = mprsas_find_target_by_handle(sassc, 0, 2836 targ->parent_handle); 2837 2838 if (parent_target == NULL) { 2839 mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have " 2840 "a valid parent target!\n", __func__, targ->handle); 2841 ccb->ccb_h.status = CAM_DEV_NOT_THERE; 2842 goto bailout; 2843 } 2844 2845 if ((parent_target->devinfo & 2846 MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) { 2847 mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d " 2848 "does not have an SMP target!\n", __func__, 2849 targ->handle, parent_target->handle); 2850 ccb->ccb_h.status = CAM_DEV_NOT_THERE; 2851 goto bailout; 2852 2853 } 2854 2855 sasaddr = parent_target->sasaddr; 2856 #else /* OLD_MPR_PROBE */ 2857 if ((targ->parent_devinfo & 2858 MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) { 2859 mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d " 2860 "does not have an SMP target!\n", __func__, 2861 targ->handle, targ->parent_handle); 2862 ccb->ccb_h.status = CAM_DEV_NOT_THERE; 2863 goto bailout; 2864 2865 } 2866 if (targ->parent_sasaddr == 0x0) { 2867 mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent handle " 2868 "%d does not have a valid SAS address!\n", __func__, 2869 targ->handle, targ->parent_handle); 2870 ccb->ccb_h.status = CAM_DEV_NOT_THERE; 2871 goto bailout; 2872 } 2873 2874 sasaddr = targ->parent_sasaddr; 2875 #endif /* OLD_MPR_PROBE */ 2876 2877 } 2878 2879 if (sasaddr == 0) { 2880 mpr_dprint(sc, MPR_INFO, "%s: unable to find SAS address for " 2881 "handle %d\n", __func__, targ->handle); 2882 ccb->ccb_h.status = CAM_DEV_NOT_THERE; 2883 goto bailout; 2884 } 2885 mprsas_send_smpcmd(sassc, ccb, sasaddr); 2886 2887 return; 2888 2889 bailout: 2890 xpt_done(ccb); 2891 2892 } 2893 #endif //__FreeBSD_version >= 900026 2894 2895 static void 2896 mprsas_action_resetdev(struct mprsas_softc *sassc, union ccb *ccb) 2897 { 2898 MPI2_SCSI_TASK_MANAGE_REQUEST *req; 2899 struct mpr_softc *sc; 2900 struct mpr_command *tm; 2901 struct mprsas_target *targ; 2902 2903 MPR_FUNCTRACE(sassc->sc); 2904 mtx_assert(&sassc->sc->mpr_mtx, MA_OWNED); 2905 2906 KASSERT(ccb->ccb_h.target_id < sassc->maxtargets, 2907 ("Target %d out of bounds in XPT_RESET_DEV\n", 2908 ccb->ccb_h.target_id)); 2909 sc = sassc->sc; 2910 tm = mpr_alloc_command(sc); 2911 if (tm == NULL) { 2912 mpr_dprint(sc, MPR_ERROR, 2913 "command alloc failure in mprsas_action_resetdev\n"); 2914 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 2915 xpt_done(ccb); 2916 return; 2917 } 2918 2919 targ = &sassc->targets[ccb->ccb_h.target_id]; 2920 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req; 2921 req->DevHandle = htole16(targ->handle); 2922 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; 2923 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET; 2924 2925 /* SAS Hard Link Reset / SATA Link Reset */ 2926 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET; 2927 2928 tm->cm_data = NULL; 2929 tm->cm_desc.HighPriority.RequestFlags = 2930 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY; 2931 tm->cm_complete = mprsas_resetdev_complete; 2932 tm->cm_complete_data = ccb; 2933 tm->cm_targ = targ; 2934 mpr_map_command(sc, tm); 2935 } 2936 2937 static void 2938 mprsas_resetdev_complete(struct mpr_softc *sc, struct mpr_command *tm) 2939 { 2940 MPI2_SCSI_TASK_MANAGE_REPLY *resp; 2941 union ccb *ccb; 2942 2943 MPR_FUNCTRACE(sc); 2944 mtx_assert(&sc->mpr_mtx, MA_OWNED); 2945 2946 resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply; 2947 ccb = tm->cm_complete_data; 2948 2949 /* 2950 * Currently there should be no way we can hit this case. It only 2951 * happens when we have a failure to allocate chain frames, and 2952 * task management commands don't have S/G lists. 2953 */ 2954 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) { 2955 MPI2_SCSI_TASK_MANAGE_REQUEST *req; 2956 2957 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req; 2958 2959 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for reset of " 2960 "handle %#04x! This should not happen!\n", __func__, 2961 tm->cm_flags, req->DevHandle); 2962 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 2963 goto bailout; 2964 } 2965 2966 mpr_dprint(sc, MPR_XINFO, 2967 "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n", __func__, 2968 le16toh(resp->IOCStatus), le32toh(resp->ResponseCode)); 2969 2970 if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) { 2971 ccb->ccb_h.status = CAM_REQ_CMP; 2972 mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid, 2973 CAM_LUN_WILDCARD); 2974 } 2975 else 2976 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 2977 2978 bailout: 2979 2980 mprsas_free_tm(sc, tm); 2981 xpt_done(ccb); 2982 } 2983 2984 static void 2985 mprsas_poll(struct cam_sim *sim) 2986 { 2987 struct mprsas_softc *sassc; 2988 2989 sassc = cam_sim_softc(sim); 2990 2991 if (sassc->sc->mpr_debug & MPR_TRACE) { 2992 /* frequent debug messages during a panic just slow 2993 * everything down too much. 2994 */ 2995 mpr_printf(sassc->sc, "%s clearing MPR_TRACE\n", __func__); 2996 sassc->sc->mpr_debug &= ~MPR_TRACE; 2997 } 2998 2999 mpr_intr_locked(sassc->sc); 3000 } 3001 3002 static void 3003 mprsas_async(void *callback_arg, uint32_t code, struct cam_path *path, 3004 void *arg) 3005 { 3006 struct mpr_softc *sc; 3007 3008 sc = (struct mpr_softc *)callback_arg; 3009 3010 switch (code) { 3011 #if (__FreeBSD_version >= 1000006) || \ 3012 ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000)) 3013 case AC_ADVINFO_CHANGED: { 3014 struct mprsas_target *target; 3015 struct mprsas_softc *sassc; 3016 struct scsi_read_capacity_data_long rcap_buf; 3017 struct ccb_dev_advinfo cdai; 3018 struct mprsas_lun *lun; 3019 lun_id_t lunid; 3020 int found_lun; 3021 uintptr_t buftype; 3022 3023 buftype = (uintptr_t)arg; 3024 3025 found_lun = 0; 3026 sassc = sc->sassc; 3027 3028 /* 3029 * We're only interested in read capacity data changes. 3030 */ 3031 if (buftype != CDAI_TYPE_RCAPLONG) 3032 break; 3033 3034 /* 3035 * See the comment in mpr_attach_sas() for a detailed 3036 * explanation. In these versions of FreeBSD we register 3037 * for all events and filter out the events that don't 3038 * apply to us. 3039 */ 3040 #if (__FreeBSD_version < 1000703) || \ 3041 ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002)) 3042 if (xpt_path_path_id(path) != sassc->sim->path_id) 3043 break; 3044 #endif 3045 3046 /* 3047 * We should have a handle for this, but check to make sure. 3048 */ 3049 KASSERT(xpt_path_target_id(path) < sassc->maxtargets, 3050 ("Target %d out of bounds in mprsas_async\n", 3051 xpt_path_target_id(path))); 3052 target = &sassc->targets[xpt_path_target_id(path)]; 3053 if (target->handle == 0) 3054 break; 3055 3056 lunid = xpt_path_lun_id(path); 3057 3058 SLIST_FOREACH(lun, &target->luns, lun_link) { 3059 if (lun->lun_id == lunid) { 3060 found_lun = 1; 3061 break; 3062 } 3063 } 3064 3065 if (found_lun == 0) { 3066 lun = malloc(sizeof(struct mprsas_lun), M_MPR, 3067 M_NOWAIT | M_ZERO); 3068 if (lun == NULL) { 3069 mpr_dprint(sc, MPR_ERROR, "Unable to alloc " 3070 "LUN for EEDP support.\n"); 3071 break; 3072 } 3073 lun->lun_id = lunid; 3074 SLIST_INSERT_HEAD(&target->luns, lun, lun_link); 3075 } 3076 3077 bzero(&rcap_buf, sizeof(rcap_buf)); 3078 xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL); 3079 cdai.ccb_h.func_code = XPT_DEV_ADVINFO; 3080 cdai.ccb_h.flags = CAM_DIR_IN; 3081 cdai.buftype = CDAI_TYPE_RCAPLONG; 3082 cdai.flags = 0; 3083 cdai.bufsiz = sizeof(rcap_buf); 3084 cdai.buf = (uint8_t *)&rcap_buf; 3085 xpt_action((union ccb *)&cdai); 3086 if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0) 3087 cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE); 3088 3089 if (((cdai.ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) 3090 && (rcap_buf.prot & SRC16_PROT_EN)) { 3091 lun->eedp_formatted = TRUE; 3092 lun->eedp_block_size = scsi_4btoul(rcap_buf.length); 3093 } else { 3094 lun->eedp_formatted = FALSE; 3095 lun->eedp_block_size = 0; 3096 } 3097 break; 3098 } 3099 #endif 3100 case AC_FOUND_DEVICE: { 3101 struct ccb_getdev *cgd; 3102 3103 /* 3104 * See the comment in mpr_attach_sas() for a detailed 3105 * explanation. In these versions of FreeBSD we register 3106 * for all events and filter out the events that don't 3107 * apply to us. 3108 */ 3109 #if (__FreeBSD_version < 1000703) || \ 3110 ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002)) 3111 if (xpt_path_path_id(path) != sc->sassc->sim->path_id) 3112 break; 3113 #endif 3114 3115 cgd = arg; 3116 mprsas_prepare_ssu(sc, path, cgd); 3117 3118 #if (__FreeBSD_version < 901503) || \ 3119 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) 3120 mprsas_check_eedp(sc, path, cgd); 3121 #endif 3122 break; 3123 } 3124 default: 3125 break; 3126 } 3127 } 3128 3129 static void 3130 mprsas_prepare_ssu(struct mpr_softc *sc, struct cam_path *path, 3131 struct ccb_getdev *cgd) 3132 { 3133 struct mprsas_softc *sassc = sc->sassc; 3134 path_id_t pathid; 3135 target_id_t targetid; 3136 lun_id_t lunid; 3137 struct mprsas_target *target; 3138 struct mprsas_lun *lun; 3139 uint8_t found_lun; 3140 3141 sassc = sc->sassc; 3142 pathid = cam_sim_path(sassc->sim); 3143 targetid = xpt_path_target_id(path); 3144 lunid = xpt_path_lun_id(path); 3145 3146 KASSERT(targetid < sassc->maxtargets, 3147 ("Target %d out of bounds in mprsas_prepare_ssu\n", targetid)); 3148 target = &sassc->targets[targetid]; 3149 if (target->handle == 0x0) 3150 return; 3151 3152 /* 3153 * If LUN is already in list, don't create a new one. 3154 */ 3155 found_lun = FALSE; 3156 SLIST_FOREACH(lun, &target->luns, lun_link) { 3157 if (lun->lun_id == lunid) { 3158 found_lun = TRUE; 3159 break; 3160 } 3161 } 3162 if (!found_lun) { 3163 lun = malloc(sizeof(struct mprsas_lun), M_MPR, 3164 M_NOWAIT | M_ZERO); 3165 if (lun == NULL) { 3166 mpr_dprint(sc, MPR_ERROR, "Unable to alloc LUN for " 3167 "preparing SSU.\n"); 3168 return; 3169 } 3170 lun->lun_id = lunid; 3171 SLIST_INSERT_HEAD(&target->luns, lun, lun_link); 3172 } 3173 3174 /* 3175 * If this is a SATA direct-access end device, mark it so that a SCSI 3176 * StartStopUnit command will be sent to it when the driver is being 3177 * shutdown. 3178 */ 3179 if (((cgd->inq_data.device & 0x1F) == T_DIRECT) && 3180 (target->devinfo & MPI2_SAS_DEVICE_INFO_SATA_DEVICE) && 3181 ((target->devinfo & MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) == 3182 MPI2_SAS_DEVICE_INFO_END_DEVICE)) { 3183 lun->stop_at_shutdown = TRUE; 3184 } 3185 } 3186 3187 #if (__FreeBSD_version < 901503) || \ 3188 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) 3189 static void 3190 mprsas_check_eedp(struct mpr_softc *sc, struct cam_path *path, 3191 struct ccb_getdev *cgd) 3192 { 3193 struct mprsas_softc *sassc = sc->sassc; 3194 struct ccb_scsiio *csio; 3195 struct scsi_read_capacity_16 *scsi_cmd; 3196 struct scsi_read_capacity_eedp *rcap_buf; 3197 path_id_t pathid; 3198 target_id_t targetid; 3199 lun_id_t lunid; 3200 union ccb *ccb; 3201 struct cam_path *local_path; 3202 struct mprsas_target *target; 3203 struct mprsas_lun *lun; 3204 uint8_t found_lun; 3205 char path_str[64]; 3206 3207 sassc = sc->sassc; 3208 pathid = cam_sim_path(sassc->sim); 3209 targetid = xpt_path_target_id(path); 3210 lunid = xpt_path_lun_id(path); 3211 3212 KASSERT(targetid < sassc->maxtargets, 3213 ("Target %d out of bounds in mprsas_check_eedp\n", targetid)); 3214 target = &sassc->targets[targetid]; 3215 if (target->handle == 0x0) 3216 return; 3217 3218 /* 3219 * Determine if the device is EEDP capable. 3220 * 3221 * If this flag is set in the inquiry data, the device supports 3222 * protection information, and must support the 16 byte read capacity 3223 * command, otherwise continue without sending read cap 16 3224 */ 3225 if ((cgd->inq_data.spc3_flags & SPC3_SID_PROTECT) == 0) 3226 return; 3227 3228 /* 3229 * Issue a READ CAPACITY 16 command. This info is used to determine if 3230 * the LUN is formatted for EEDP support. 3231 */ 3232 ccb = xpt_alloc_ccb_nowait(); 3233 if (ccb == NULL) { 3234 mpr_dprint(sc, MPR_ERROR, "Unable to alloc CCB for EEDP " 3235 "support.\n"); 3236 return; 3237 } 3238 3239 if (xpt_create_path(&local_path, xpt_periph, pathid, targetid, lunid) 3240 != CAM_REQ_CMP) { 3241 mpr_dprint(sc, MPR_ERROR, "Unable to create path for EEDP " 3242 "support\n"); 3243 xpt_free_ccb(ccb); 3244 return; 3245 } 3246 3247 /* 3248 * If LUN is already in list, don't create a new one. 3249 */ 3250 found_lun = FALSE; 3251 SLIST_FOREACH(lun, &target->luns, lun_link) { 3252 if (lun->lun_id == lunid) { 3253 found_lun = TRUE; 3254 break; 3255 } 3256 } 3257 if (!found_lun) { 3258 lun = malloc(sizeof(struct mprsas_lun), M_MPR, 3259 M_NOWAIT | M_ZERO); 3260 if (lun == NULL) { 3261 mpr_dprint(sc, MPR_ERROR, "Unable to alloc LUN for " 3262 "EEDP support.\n"); 3263 xpt_free_path(local_path); 3264 xpt_free_ccb(ccb); 3265 return; 3266 } 3267 lun->lun_id = lunid; 3268 SLIST_INSERT_HEAD(&target->luns, lun, lun_link); 3269 } 3270 3271 xpt_path_string(local_path, path_str, sizeof(path_str)); 3272 mpr_dprint(sc, MPR_INFO, "Sending read cap: path %s handle %d\n", 3273 path_str, target->handle); 3274 3275 /* 3276 * Issue a READ CAPACITY 16 command for the LUN. The 3277 * mprsas_read_cap_done function will load the read cap info into the 3278 * LUN struct. 3279 */ 3280 rcap_buf = malloc(sizeof(struct scsi_read_capacity_eedp), M_MPR, 3281 M_NOWAIT | M_ZERO); 3282 if (rcap_buf == NULL) { 3283 mpr_dprint(sc, MPR_FAULT, "Unable to alloc read capacity " 3284 "buffer for EEDP support.\n"); 3285 xpt_free_path(ccb->ccb_h.path); 3286 xpt_free_ccb(ccb); 3287 return; 3288 } 3289 xpt_setup_ccb(&ccb->ccb_h, local_path, CAM_PRIORITY_XPT); 3290 csio = &ccb->csio; 3291 csio->ccb_h.func_code = XPT_SCSI_IO; 3292 csio->ccb_h.flags = CAM_DIR_IN; 3293 csio->ccb_h.retry_count = 4; 3294 csio->ccb_h.cbfcnp = mprsas_read_cap_done; 3295 csio->ccb_h.timeout = 60000; 3296 csio->data_ptr = (uint8_t *)rcap_buf; 3297 csio->dxfer_len = sizeof(struct scsi_read_capacity_eedp); 3298 csio->sense_len = MPR_SENSE_LEN; 3299 csio->cdb_len = sizeof(*scsi_cmd); 3300 csio->tag_action = MSG_SIMPLE_Q_TAG; 3301 3302 scsi_cmd = (struct scsi_read_capacity_16 *)&csio->cdb_io.cdb_bytes; 3303 bzero(scsi_cmd, sizeof(*scsi_cmd)); 3304 scsi_cmd->opcode = 0x9E; 3305 scsi_cmd->service_action = SRC16_SERVICE_ACTION; 3306 ((uint8_t *)scsi_cmd)[13] = sizeof(struct scsi_read_capacity_eedp); 3307 3308 ccb->ccb_h.ppriv_ptr1 = sassc; 3309 xpt_action(ccb); 3310 } 3311 3312 static void 3313 mprsas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb) 3314 { 3315 struct mprsas_softc *sassc; 3316 struct mprsas_target *target; 3317 struct mprsas_lun *lun; 3318 struct scsi_read_capacity_eedp *rcap_buf; 3319 3320 if (done_ccb == NULL) 3321 return; 3322 3323 /* Driver need to release devq, it Scsi command is 3324 * generated by driver internally. 3325 * Currently there is a single place where driver 3326 * calls scsi command internally. In future if driver 3327 * calls more scsi command internally, it needs to release 3328 * devq internally, since those command will not go back to 3329 * cam_periph. 3330 */ 3331 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) ) { 3332 done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN; 3333 xpt_release_devq(done_ccb->ccb_h.path, 3334 /*count*/ 1, /*run_queue*/TRUE); 3335 } 3336 3337 rcap_buf = (struct scsi_read_capacity_eedp *)done_ccb->csio.data_ptr; 3338 3339 /* 3340 * Get the LUN ID for the path and look it up in the LUN list for the 3341 * target. 3342 */ 3343 sassc = (struct mprsas_softc *)done_ccb->ccb_h.ppriv_ptr1; 3344 KASSERT(done_ccb->ccb_h.target_id < sassc->maxtargets, 3345 ("Target %d out of bounds in mprsas_read_cap_done\n", 3346 done_ccb->ccb_h.target_id)); 3347 target = &sassc->targets[done_ccb->ccb_h.target_id]; 3348 SLIST_FOREACH(lun, &target->luns, lun_link) { 3349 if (lun->lun_id != done_ccb->ccb_h.target_lun) 3350 continue; 3351 3352 /* 3353 * Got the LUN in the target's LUN list. Fill it in with EEDP 3354 * info. If the READ CAP 16 command had some SCSI error (common 3355 * if command is not supported), mark the lun as not supporting 3356 * EEDP and set the block size to 0. 3357 */ 3358 if (((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) 3359 || (done_ccb->csio.scsi_status != SCSI_STATUS_OK)) { 3360 lun->eedp_formatted = FALSE; 3361 lun->eedp_block_size = 0; 3362 break; 3363 } 3364 3365 if (rcap_buf->protect & 0x01) { 3366 mpr_dprint(sassc->sc, MPR_INFO, "LUN %d for " 3367 "target ID %d is formatted for EEDP " 3368 "support.\n", done_ccb->ccb_h.target_lun, 3369 done_ccb->ccb_h.target_id); 3370 lun->eedp_formatted = TRUE; 3371 lun->eedp_block_size = scsi_4btoul(rcap_buf->length); 3372 } 3373 break; 3374 } 3375 3376 // Finished with this CCB and path. 3377 free(rcap_buf, M_MPR); 3378 xpt_free_path(done_ccb->ccb_h.path); 3379 xpt_free_ccb(done_ccb); 3380 } 3381 #endif /* (__FreeBSD_version < 901503) || \ 3382 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) */ 3383 3384 int 3385 mprsas_startup(struct mpr_softc *sc) 3386 { 3387 /* 3388 * Send the port enable message and set the wait_for_port_enable flag. 3389 * This flag helps to keep the simq frozen until all discovery events 3390 * are processed. 3391 */ 3392 sc->wait_for_port_enable = 1; 3393 mprsas_send_portenable(sc); 3394 return (0); 3395 } 3396 3397 static int 3398 mprsas_send_portenable(struct mpr_softc *sc) 3399 { 3400 MPI2_PORT_ENABLE_REQUEST *request; 3401 struct mpr_command *cm; 3402 3403 MPR_FUNCTRACE(sc); 3404 3405 if ((cm = mpr_alloc_command(sc)) == NULL) 3406 return (EBUSY); 3407 request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req; 3408 request->Function = MPI2_FUNCTION_PORT_ENABLE; 3409 request->MsgFlags = 0; 3410 request->VP_ID = 0; 3411 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; 3412 cm->cm_complete = mprsas_portenable_complete; 3413 cm->cm_data = NULL; 3414 cm->cm_sge = NULL; 3415 3416 mpr_map_command(sc, cm); 3417 mpr_dprint(sc, MPR_XINFO, 3418 "mpr_send_portenable finished cm %p req %p complete %p\n", 3419 cm, cm->cm_req, cm->cm_complete); 3420 return (0); 3421 } 3422 3423 static void 3424 mprsas_portenable_complete(struct mpr_softc *sc, struct mpr_command *cm) 3425 { 3426 MPI2_PORT_ENABLE_REPLY *reply; 3427 struct mprsas_softc *sassc; 3428 3429 MPR_FUNCTRACE(sc); 3430 sassc = sc->sassc; 3431 3432 /* 3433 * Currently there should be no way we can hit this case. It only 3434 * happens when we have a failure to allocate chain frames, and 3435 * port enable commands don't have S/G lists. 3436 */ 3437 if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) { 3438 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for port enable! " 3439 "This should not happen!\n", __func__, cm->cm_flags); 3440 } 3441 3442 reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply; 3443 if (reply == NULL) 3444 mpr_dprint(sc, MPR_FAULT, "Portenable NULL reply\n"); 3445 else if (le16toh(reply->IOCStatus & MPI2_IOCSTATUS_MASK) != 3446 MPI2_IOCSTATUS_SUCCESS) 3447 mpr_dprint(sc, MPR_FAULT, "Portenable failed\n"); 3448 3449 mpr_free_command(sc, cm); 3450 if (sc->mpr_ich.ich_arg != NULL) { 3451 mpr_dprint(sc, MPR_XINFO, "disestablish config intrhook\n"); 3452 config_intrhook_disestablish(&sc->mpr_ich); 3453 sc->mpr_ich.ich_arg = NULL; 3454 } 3455 3456 /* 3457 * Done waiting for port enable to complete. Decrement the refcount. 3458 * If refcount is 0, discovery is complete and a rescan of the bus can 3459 * take place. 3460 */ 3461 sc->wait_for_port_enable = 0; 3462 sc->port_enable_complete = 1; 3463 wakeup(&sc->port_enable_complete); 3464 mprsas_startup_decrement(sassc); 3465 } 3466 3467 int 3468 mprsas_check_id(struct mprsas_softc *sassc, int id) 3469 { 3470 struct mpr_softc *sc = sassc->sc; 3471 char *ids; 3472 char *name; 3473 3474 ids = &sc->exclude_ids[0]; 3475 while((name = strsep(&ids, ",")) != NULL) { 3476 if (name[0] == '\0') 3477 continue; 3478 if (strtol(name, NULL, 0) == (long)id) 3479 return (1); 3480 } 3481 3482 return (0); 3483 } 3484