1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved. 23 */ 24 /* 25 * SCSI (SCSA) midlayer interface for PMC drier. 26 */ 27 28 #include <sys/scsi/adapters/pmcs/pmcs.h> 29 30 extern scsi_lun_t scsi_lun64_to_lun(scsi_lun64_t lun64); 31 32 static int pmcs_scsa_tran_tgt_init(dev_info_t *, dev_info_t *, 33 scsi_hba_tran_t *, struct scsi_device *); 34 static void pmcs_scsa_tran_tgt_free(dev_info_t *, dev_info_t *, 35 scsi_hba_tran_t *, struct scsi_device *); 36 static int pmcs_scsa_start(struct scsi_address *, struct scsi_pkt *); 37 static int pmcs_scsa_abort(struct scsi_address *, struct scsi_pkt *); 38 static int pmcs_scsa_reset(struct scsi_address *, int); 39 static int pmcs_scsi_reset_notify(struct scsi_address *, int, 40 void (*)(caddr_t), caddr_t); 41 static int pmcs_scsa_getcap(struct scsi_address *, char *, int); 42 static int pmcs_scsa_setcap(struct scsi_address *, char *, int, int); 43 static int pmcs_scsa_setup_pkt(struct scsi_pkt *, int (*)(caddr_t), caddr_t); 44 static void pmcs_scsa_teardown_pkt(struct scsi_pkt *); 45 46 static int pmcs_smp_init(dev_info_t *, dev_info_t *, smp_hba_tran_t *, 47 smp_device_t *); 48 static void pmcs_smp_free(dev_info_t *, dev_info_t *, smp_hba_tran_t *, 49 smp_device_t *); 50 static int pmcs_smp_start(struct smp_pkt *); 51 52 static int pmcs_scsi_quiesce(dev_info_t *); 53 static int pmcs_scsi_unquiesce(dev_info_t *); 54 55 static int pmcs_cap(struct scsi_address *, char *, int, int, int); 56 static pmcs_xscsi_t * 57 pmcs_addr2xp(struct scsi_address *, uint64_t *, pmcs_cmd_t *); 58 static int pmcs_SAS_run(pmcs_cmd_t *, pmcwork_t *); 59 static void pmcs_SAS_done(pmcs_hw_t *, pmcwork_t *, uint32_t *); 60 61 static int pmcs_SATA_run(pmcs_cmd_t *, pmcwork_t *); 62 static void pmcs_SATA_done(pmcs_hw_t *, pmcwork_t *, uint32_t *); 63 static uint8_t pmcs_SATA_rwparm(uint8_t *, uint32_t *, uint64_t *, uint64_t); 64 65 static void pmcs_ioerror(pmcs_hw_t *, pmcs_dtype_t pmcs_dtype, 66 pmcwork_t *, uint32_t *, uint32_t); 67 68 69 int 70 pmcs_scsa_init(pmcs_hw_t *pwp, const ddi_dma_attr_t *ap) 71 { 72 scsi_hba_tran_t *tran; 73 ddi_dma_attr_t pmcs_scsa_dattr; 74 int flags; 75 76 (void) memcpy(&pmcs_scsa_dattr, ap, sizeof (ddi_dma_attr_t)); 77 pmcs_scsa_dattr.dma_attr_sgllen = 78 ((PMCS_SGL_NCHUNKS - 1) * (PMCS_MAX_CHUNKS - 1)) + PMCS_SGL_NCHUNKS; 79 pmcs_scsa_dattr.dma_attr_flags = DDI_DMA_RELAXED_ORDERING; 80 pmcs_scsa_dattr.dma_attr_flags |= DDI_DMA_FLAGERR; 81 82 /* 83 * Allocate a transport structure 84 */ 85 tran = scsi_hba_tran_alloc(pwp->dip, SCSI_HBA_CANSLEEP); 86 if (tran == NULL) { 87 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 88 "scsi_hba_tran_alloc failed"); 89 return (DDI_FAILURE); 90 } 91 92 tran->tran_hba_private = pwp; 93 tran->tran_tgt_init = pmcs_scsa_tran_tgt_init; 94 tran->tran_tgt_free = pmcs_scsa_tran_tgt_free; 95 tran->tran_start = pmcs_scsa_start; 96 tran->tran_abort = pmcs_scsa_abort; 97 tran->tran_reset = pmcs_scsa_reset; 98 tran->tran_reset_notify = pmcs_scsi_reset_notify; 99 tran->tran_getcap = pmcs_scsa_getcap; 100 tran->tran_setcap = pmcs_scsa_setcap; 101 tran->tran_setup_pkt = pmcs_scsa_setup_pkt; 102 tran->tran_teardown_pkt = pmcs_scsa_teardown_pkt; 103 tran->tran_quiesce = pmcs_scsi_quiesce; 104 tran->tran_unquiesce = pmcs_scsi_unquiesce; 105 tran->tran_interconnect_type = INTERCONNECT_SAS; 106 tran->tran_hba_len = sizeof (pmcs_cmd_t); 107 108 /* 109 * Attach this instance of the hba 110 */ 111 112 flags = SCSI_HBA_TRAN_SCB | SCSI_HBA_TRAN_CDB | SCSI_HBA_ADDR_COMPLEX | 113 SCSI_HBA_TRAN_PHCI | SCSI_HBA_HBA; 114 115 if (scsi_hba_attach_setup(pwp->dip, &pmcs_scsa_dattr, tran, flags)) { 116 scsi_hba_tran_free(tran); 117 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 118 "scsi_hba_attach failed"); 119 return (DDI_FAILURE); 120 } 121 pwp->tran = tran; 122 123 /* 124 * Attach the SMP part of this hba 125 */ 126 pwp->smp_tran = smp_hba_tran_alloc(pwp->dip); 127 ASSERT(pwp->smp_tran != NULL); 128 pwp->smp_tran->smp_tran_hba_private = pwp; 129 pwp->smp_tran->smp_tran_init = pmcs_smp_init; 130 pwp->smp_tran->smp_tran_free = pmcs_smp_free; 131 pwp->smp_tran->smp_tran_start = pmcs_smp_start; 132 133 if (smp_hba_attach_setup(pwp->dip, pwp->smp_tran) != DDI_SUCCESS) { 134 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 135 "smp_hba_attach failed"); 136 smp_hba_tran_free(pwp->smp_tran); 137 pwp->smp_tran = NULL; 138 scsi_hba_tran_free(tran); 139 return (DDI_FAILURE); 140 } 141 142 return (DDI_SUCCESS); 143 } 144 145 /* 146 * SCSA entry points 147 */ 148 149 static int 150 pmcs_scsa_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip, 151 scsi_hba_tran_t *tran, struct scsi_device *sd) 152 { 153 pmcs_hw_t *pwp = NULL; 154 int rval; 155 char *variant_prop = "sata"; 156 char *tgt_port = NULL, *ua = NULL; 157 pmcs_xscsi_t *tgt = NULL; 158 pmcs_iport_t *iport; 159 pmcs_lun_t *lun = NULL; 160 pmcs_phy_t *phyp = NULL; 161 uint64_t lun_num; 162 boolean_t got_scratch = B_FALSE; 163 164 /* 165 * First, make sure we're an iport and get the pointer to the HBA 166 * node's softstate 167 */ 168 if (scsi_hba_iport_unit_address(hba_dip) == NULL) { 169 pmcs_prt(TRAN2PMC(tran), PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 170 "%s: We don't enumerate devices on the HBA node", __func__); 171 goto tgt_init_fail; 172 } 173 174 pwp = ITRAN2PMC(tran); 175 iport = ITRAN2IPORT(tran); 176 177 /* 178 * Get the unit-address 179 */ 180 ua = scsi_device_unit_address(sd); 181 if (ua == NULL) { 182 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 183 "%s: Couldn't get UA", __func__); 184 pwp = NULL; 185 goto tgt_init_fail; 186 } 187 pmcs_prt(pwp, PMCS_PRT_DEBUG3, NULL, NULL, 188 "got ua '%s'", ua); 189 190 /* 191 * Get the target address 192 */ 193 rval = scsi_device_prop_lookup_string(sd, SCSI_DEVICE_PROP_PATH, 194 SCSI_ADDR_PROP_TARGET_PORT, &tgt_port); 195 if (rval != DDI_PROP_SUCCESS) { 196 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 197 "Couldn't get target UA"); 198 pwp = NULL; 199 goto tgt_init_fail; 200 } 201 pmcs_prt(pwp, PMCS_PRT_DEBUG3, NULL, NULL, 202 "got tgt_port '%s'", tgt_port); 203 204 /* 205 * Validate that this tran_tgt_init is for an active iport. 206 */ 207 if (iport->ua_state == UA_INACTIVE) { 208 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 209 "%s: Got tran_tgt_init on inactive iport for '%s'", 210 __func__, tgt_port); 211 pwp = NULL; 212 goto tgt_init_fail; 213 } 214 215 /* 216 * Since we're going to wait for scratch, be sure to acquire it while 217 * we're not holding any other locks 218 */ 219 (void) pmcs_acquire_scratch(pwp, B_TRUE); 220 got_scratch = B_TRUE; 221 222 mutex_enter(&pwp->lock); 223 224 /* 225 * See if there's already a target softstate. If not, allocate one. 226 */ 227 tgt = pmcs_get_target(iport, tgt_port, B_TRUE); 228 229 if (tgt == NULL) { 230 goto tgt_init_fail; 231 } 232 233 phyp = tgt->phy; 234 if (!IS_ROOT_PHY(phyp)) { 235 pmcs_inc_phy_ref_count(phyp); 236 } 237 ASSERT(mutex_owned(&phyp->phy_lock)); 238 239 pmcs_prt(pwp, PMCS_PRT_DEBUG2, phyp, tgt, "@%s tgt = 0x%p, dip = 0x%p", 240 ua, (void *)tgt, (void *)tgt_dip); 241 242 /* Now get the lun */ 243 lun_num = scsi_device_prop_get_int64(sd, SCSI_DEVICE_PROP_PATH, 244 SCSI_ADDR_PROP_LUN64, SCSI_LUN64_ILLEGAL); 245 if (lun_num == SCSI_LUN64_ILLEGAL) { 246 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, phyp, tgt, 247 "No LUN for tgt %p", (void *)tgt); 248 goto tgt_init_fail; 249 } 250 251 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, phyp, tgt, "%s: @%s tgt 0x%p phy " 252 "0x%p (%s)", __func__, ua, (void *)tgt, (void *)phyp, phyp->path); 253 254 mutex_enter(&tgt->statlock); 255 tgt->dtype = phyp->dtype; 256 if (tgt->dtype != SAS && tgt->dtype != SATA) { 257 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, phyp, tgt, 258 "PHY 0x%p went away?", (void *)phyp); 259 goto tgt_init_fail; 260 } 261 262 /* We don't support SATA devices at LUN > 0. */ 263 if ((tgt->dtype == SATA) && (lun_num > 0)) { 264 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, phyp, tgt, 265 "%s: No support for SATA devices at LUN > 0 " 266 "(target = 0x%p)", __func__, (void *)tgt); 267 goto tgt_init_fail; 268 } 269 270 /* 271 * Allocate LU soft state. We use ddi_soft_state_bystr_zalloc instead 272 * of kmem_alloc because ddi_soft_state_bystr_zalloc allows us to 273 * verify that the framework never tries to initialize two scsi_device 274 * structures with the same unit-address at the same time. 275 */ 276 if (ddi_soft_state_bystr_zalloc(tgt->lun_sstate, ua) != DDI_SUCCESS) { 277 pmcs_prt(pwp, PMCS_PRT_DEBUG2, phyp, tgt, 278 "Couldn't allocate LU soft state"); 279 goto tgt_init_fail; 280 } 281 282 lun = ddi_soft_state_bystr_get(tgt->lun_sstate, ua); 283 if (lun == NULL) { 284 pmcs_prt(pwp, PMCS_PRT_DEBUG2, phyp, tgt, 285 "Couldn't get LU soft state"); 286 goto tgt_init_fail; 287 } 288 scsi_device_hba_private_set(sd, lun); 289 lun->lun_num = lun_num; 290 291 /* convert the scsi_lun64_t value to SCSI standard form */ 292 lun->scsi_lun = scsi_lun64_to_lun(lun_num); 293 294 ASSERT(strlen(ua) < (PMCS_MAX_UA_SIZE - 1)); 295 bcopy(ua, lun->unit_address, strnlen(ua, PMCS_MAX_UA_SIZE - 1)); 296 297 lun->target = tgt; 298 299 /* 300 * If this is the first tran_tgt_init, add this target to our list 301 */ 302 if (tgt->target_num == PMCS_INVALID_TARGET_NUM) { 303 int target; 304 for (target = 0; target < pwp->max_dev; target++) { 305 if (pwp->targets[target] != NULL) { 306 continue; 307 } 308 309 pwp->targets[target] = tgt; 310 tgt->target_num = (uint16_t)target; 311 break; 312 } 313 314 if (target == pwp->max_dev) { 315 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, phyp, tgt, 316 "Target list full."); 317 goto tgt_init_fail; 318 } 319 } 320 321 tgt->dip = sd->sd_dev; 322 lun->sd = sd; 323 list_insert_tail(&tgt->lun_list, lun); 324 325 if (!pmcs_assign_device(pwp, tgt)) { 326 pmcs_release_scratch(pwp); 327 pwp->targets[tgt->target_num] = NULL; 328 tgt->target_num = PMCS_INVALID_TARGET_NUM; 329 tgt->phy = NULL; 330 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, phyp, tgt, 331 "%s: pmcs_assign_device failed for target 0x%p", 332 __func__, (void *)tgt); 333 goto tgt_init_fail; 334 } 335 336 pmcs_release_scratch(pwp); 337 tgt->ref_count++; 338 339 (void) scsi_device_prop_update_int(sd, SCSI_DEVICE_PROP_PATH, 340 SCSI_ADDR_PROP_TARGET, (uint32_t)(tgt->target_num)); 341 342 /* SM-HBA */ 343 if (tgt->dtype == SATA) { 344 /* TCR in PSARC/1997/281 opinion */ 345 (void) scsi_device_prop_update_string(sd, 346 SCSI_DEVICE_PROP_PATH, "variant", variant_prop); 347 } 348 349 tgt->phy_addressable = PMCS_PHY_ADDRESSABLE(phyp); 350 351 if (tgt->phy_addressable) { 352 (void) scsi_device_prop_update_int(sd, SCSI_DEVICE_PROP_PATH, 353 SCSI_ADDR_PROP_SATA_PHY, phyp->phynum); 354 } 355 356 /* SM-HBA */ 357 (void) pmcs_smhba_set_scsi_device_props(pwp, phyp, sd); 358 /* 359 * Make sure attached port and target port pm props are updated 360 * By passing in 0s, we're not actually updating any values, but 361 * the properties should now get updated on the node. 362 */ 363 364 mutex_exit(&tgt->statlock); 365 pmcs_update_phy_pm_props(phyp, 0, 0, B_TRUE); 366 pmcs_unlock_phy(phyp); 367 mutex_exit(&pwp->lock); 368 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, tgt_port); 369 return (DDI_SUCCESS); 370 371 tgt_init_fail: 372 scsi_device_hba_private_set(sd, NULL); 373 if (got_scratch) { 374 pmcs_release_scratch(pwp); 375 } 376 if (lun) { 377 list_remove(&tgt->lun_list, lun); 378 ddi_soft_state_bystr_free(tgt->lun_sstate, ua); 379 } 380 if (phyp) { 381 mutex_exit(&tgt->statlock); 382 pmcs_unlock_phy(phyp); 383 /* 384 * phyp's ref count was incremented in pmcs_new_tport. 385 * We're failing configuration, we now need to decrement it. 386 */ 387 if (!IS_ROOT_PHY(phyp)) { 388 pmcs_dec_phy_ref_count(phyp); 389 } 390 phyp->target = NULL; 391 } 392 if (tgt && tgt->ref_count == 0) { 393 ddi_soft_state_bystr_free(iport->tgt_sstate, tgt_port); 394 } 395 if (pwp) { 396 mutex_exit(&pwp->lock); 397 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, phyp, tgt, 398 "%s: failed for @%s tgt 0x%p phy 0x%p", __func__, ua, 399 (void *)tgt, (void *)phyp); 400 } 401 if (tgt_port) { 402 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, tgt_port); 403 } 404 return (DDI_FAILURE); 405 } 406 407 static void 408 pmcs_scsa_tran_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip, 409 scsi_hba_tran_t *tran, struct scsi_device *sd) 410 { 411 _NOTE(ARGUNUSED(hba_dip, tgt_dip)); 412 pmcs_hw_t *pwp; 413 pmcs_lun_t *lun; 414 pmcs_xscsi_t *target; 415 char *unit_address; 416 pmcs_phy_t *phyp; 417 418 if (scsi_hba_iport_unit_address(hba_dip) == NULL) { 419 pwp = TRAN2PMC(tran); 420 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 421 "%s: We don't enumerate devices on the HBA node", __func__); 422 return; 423 } 424 425 lun = (pmcs_lun_t *)scsi_device_hba_private_get(sd); 426 427 ASSERT((lun != NULL) && (lun->target != NULL)); 428 ASSERT(lun->target->ref_count > 0); 429 430 target = lun->target; 431 unit_address = lun->unit_address; 432 list_remove(&target->lun_list, lun); 433 434 pwp = ITRAN2PMC(tran); 435 mutex_enter(&pwp->lock); 436 phyp = target->phy; 437 if (phyp) { 438 mutex_enter(&phyp->phy_lock); 439 } 440 mutex_enter(&target->statlock); 441 442 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, phyp, target, 443 "%s: for @%s tgt 0x%p phy 0x%p", __func__, unit_address, 444 (void *)target, (void *)phyp); 445 ddi_soft_state_bystr_free(lun->target->lun_sstate, unit_address); 446 447 if (target->recover_wait) { 448 mutex_exit(&target->statlock); 449 if (phyp) { 450 mutex_exit(&phyp->phy_lock); 451 } 452 mutex_exit(&pwp->lock); 453 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, phyp, target, "%s: " 454 "Target 0x%p in device state recovery, fail tran_tgt_free", 455 __func__, (void *)target); 456 return; 457 } 458 459 /* 460 * If this target still has a PHY pointer and that PHY's target pointer 461 * has been cleared, then that PHY has been reaped. In that case, there 462 * would be no need to decrement the reference count 463 */ 464 if (phyp && !IS_ROOT_PHY(phyp) && phyp->target) { 465 pmcs_dec_phy_ref_count(phyp); 466 } 467 468 if (--target->ref_count == 0) { 469 /* 470 * Remove this target from our list. The target soft 471 * state will remain, and the device will remain registered 472 * with the hardware unless/until we're told the device 473 * physically went away. 474 */ 475 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, phyp, target, 476 "%s: Free target 0x%p (vtgt %d)", __func__, (void *)target, 477 target->target_num); 478 pwp->targets[target->target_num] = NULL; 479 target->target_num = PMCS_INVALID_TARGET_NUM; 480 /* 481 * If the target still has a PHY pointer, break the linkage 482 */ 483 if (phyp) { 484 phyp->target = NULL; 485 } 486 target->phy = NULL; 487 pmcs_destroy_target(target); 488 } else { 489 mutex_exit(&target->statlock); 490 } 491 492 if (phyp) { 493 mutex_exit(&phyp->phy_lock); 494 } 495 mutex_exit(&pwp->lock); 496 } 497 498 static int 499 pmcs_scsa_start(struct scsi_address *ap, struct scsi_pkt *pkt) 500 { 501 pmcs_cmd_t *sp = PKT2CMD(pkt); 502 pmcs_hw_t *pwp = ADDR2PMC(ap); 503 pmcs_xscsi_t *xp; 504 boolean_t blocked; 505 uint32_t hba_state; 506 507 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, 508 "%s: pkt %p sd %p cdb0=0x%02x dl=%lu", __func__, (void *)pkt, 509 (void *)scsi_address_device(&pkt->pkt_address), 510 pkt->pkt_cdbp[0] & 0xff, pkt->pkt_dma_len); 511 512 if (pkt->pkt_flags & FLAG_NOINTR) { 513 pmcs_prt(pwp, PMCS_PRT_DEBUG3, NULL, NULL, 514 "%s: nointr pkt", __func__); 515 return (TRAN_BADPKT); 516 } 517 518 sp->cmd_tag = 0; 519 pkt->pkt_state = pkt->pkt_statistics = 0; 520 pkt->pkt_reason = CMD_INCOMPLETE; 521 522 mutex_enter(&pwp->lock); 523 hba_state = pwp->state; 524 blocked = pwp->blocked; 525 mutex_exit(&pwp->lock); 526 527 if (hba_state != STATE_RUNNING) { 528 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 529 "%s: hba dead", __func__); 530 return (TRAN_FATAL_ERROR); 531 } 532 533 xp = pmcs_addr2xp(ap, NULL, sp); 534 if (xp == NULL) { 535 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, 536 "%s: dropping due to null target", __func__); 537 goto dead_target; 538 } 539 ASSERT(mutex_owned(&xp->statlock)); 540 541 /* 542 * First, check to see if the device is gone. 543 */ 544 if (xp->dev_gone) { 545 xp->actv_pkts++; 546 mutex_exit(&xp->statlock); 547 pmcs_prt(pwp, PMCS_PRT_DEBUG3, NULL, xp, 548 "%s: dropping due to dead target 0x%p", 549 __func__, (void *)xp); 550 goto dead_target; 551 } 552 553 /* 554 * If we're blocked (quiesced) just return. 555 */ 556 if (blocked) { 557 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 558 "%s: hba blocked", __func__); 559 xp->actv_pkts++; 560 mutex_exit(&xp->statlock); 561 mutex_enter(&xp->wqlock); 562 STAILQ_INSERT_TAIL(&xp->wq, sp, cmd_next); 563 mutex_exit(&xp->wqlock); 564 return (TRAN_ACCEPT); 565 } 566 567 /* 568 * If we're draining or resetting, queue and return. 569 */ 570 if (xp->draining || xp->resetting || xp->recover_wait) { 571 xp->actv_pkts++; 572 mutex_exit(&xp->statlock); 573 mutex_enter(&xp->wqlock); 574 STAILQ_INSERT_TAIL(&xp->wq, sp, cmd_next); 575 mutex_exit(&xp->wqlock); 576 pmcs_prt(pwp, PMCS_PRT_DEBUG1, NULL, xp, 577 "%s: draining/resetting/recovering (cnt %u)", 578 __func__, xp->actv_cnt); 579 /* 580 * By the time we get here, draining or 581 * resetting may have come and gone, not 582 * yet noticing that we had put something 583 * on the wait queue, so schedule a worker 584 * to look at this later. 585 */ 586 SCHEDULE_WORK(pwp, PMCS_WORK_RUN_QUEUES); 587 return (TRAN_ACCEPT); 588 } 589 590 xp->actv_pkts++; 591 mutex_exit(&xp->statlock); 592 593 /* 594 * Queue this command to the tail of the wait queue. 595 * This keeps us getting commands out of order. 596 */ 597 mutex_enter(&xp->wqlock); 598 STAILQ_INSERT_TAIL(&xp->wq, sp, cmd_next); 599 mutex_exit(&xp->wqlock); 600 601 /* 602 * Now run the queue for this device. 603 */ 604 (void) pmcs_scsa_wq_run_one(pwp, xp); 605 606 return (TRAN_ACCEPT); 607 608 dead_target: 609 pkt->pkt_state = STATE_GOT_BUS; 610 pkt->pkt_reason = CMD_DEV_GONE; 611 mutex_enter(&pwp->cq_lock); 612 STAILQ_INSERT_TAIL(&pwp->cq, sp, cmd_next); 613 PMCS_CQ_RUN_LOCKED(pwp); 614 mutex_exit(&pwp->cq_lock); 615 return (TRAN_ACCEPT); 616 } 617 618 /* Return code 1 = Success */ 619 static int 620 pmcs_scsa_abort(struct scsi_address *ap, struct scsi_pkt *pkt) 621 { 622 pmcs_hw_t *pwp = ADDR2PMC(ap); 623 pmcs_cmd_t *sp = NULL; 624 pmcs_xscsi_t *xp = NULL; 625 pmcs_phy_t *pptr = NULL; 626 pmcs_lun_t *pmcs_lun = (pmcs_lun_t *) 627 scsi_device_hba_private_get(scsi_address_device(ap)); 628 uint32_t tag; 629 uint64_t lun; 630 pmcwork_t *pwrk; 631 632 mutex_enter(&pwp->lock); 633 if (pwp->state != STATE_RUNNING) { 634 mutex_exit(&pwp->lock); 635 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 636 "%s: hba dead", __func__); 637 return (0); 638 } 639 mutex_exit(&pwp->lock); 640 641 if (pkt == NULL) { 642 if (pmcs_lun == NULL) { 643 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, "%s: " 644 "No pmcs_lun_t struct to do ABORT_ALL", __func__); 645 return (0); 646 } 647 xp = pmcs_lun->target; 648 if (xp != NULL) { 649 pptr = xp->phy; 650 } 651 if (pptr == NULL) { 652 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, xp, "%s: pkt is " 653 "NULL. No tgt/phy to do ABORT_ALL", __func__); 654 return (0); 655 } 656 pmcs_lock_phy(pptr); 657 if (pmcs_abort(pwp, pptr, 0, 1, 0)) { 658 pptr->abort_pending = 1; 659 SCHEDULE_WORK(pwp, PMCS_WORK_ABORT_HANDLE); 660 } 661 pmcs_unlock_phy(pptr); 662 return (1); 663 } 664 665 sp = PKT2CMD(pkt); 666 xp = sp->cmd_target; 667 668 if (sp->cmd_lun) { 669 lun = sp->cmd_lun->lun_num; 670 } else { 671 lun = 0; 672 } 673 if (xp == NULL) { 674 return (0); 675 } 676 677 /* 678 * See if we have a real work structure associated with this cmd. 679 */ 680 pwrk = pmcs_tag2wp(pwp, sp->cmd_tag, B_FALSE); 681 if (pwrk && pwrk->arg == sp) { 682 tag = pwrk->htag; 683 pptr = pwrk->phy; 684 pwrk->timer = 0; /* we don't time this here */ 685 ASSERT(pwrk->state == PMCS_WORK_STATE_ONCHIP); 686 mutex_exit(&pwrk->lock); 687 pmcs_lock_phy(pptr); 688 if (pptr->dtype == SAS) { 689 if (pmcs_ssp_tmf(pwp, pptr, SAS_ABORT_TASK, tag, lun, 690 NULL)) { 691 pptr->abort_pending = 1; 692 pmcs_unlock_phy(pptr); 693 SCHEDULE_WORK(pwp, PMCS_WORK_ABORT_HANDLE); 694 return (0); 695 } 696 } else { 697 /* 698 * XXX: Was the command that was active an 699 * NCQ I/O command? 700 */ 701 pptr->need_rl_ext = 1; 702 if (pmcs_sata_abort_ncq(pwp, pptr)) { 703 pptr->abort_pending = 1; 704 pmcs_unlock_phy(pptr); 705 SCHEDULE_WORK(pwp, PMCS_WORK_ABORT_HANDLE); 706 return (0); 707 } 708 } 709 pptr->abort_pending = 1; 710 pmcs_unlock_phy(pptr); 711 SCHEDULE_WORK(pwp, PMCS_WORK_ABORT_HANDLE); 712 return (1); 713 } 714 if (pwrk) { 715 mutex_exit(&pwrk->lock); 716 } 717 /* 718 * Okay, those weren't the droids we were looking for. 719 * See if the command is on any of the wait queues. 720 */ 721 mutex_enter(&xp->wqlock); 722 sp = NULL; 723 STAILQ_FOREACH(sp, &xp->wq, cmd_next) { 724 if (sp == PKT2CMD(pkt)) { 725 STAILQ_REMOVE(&xp->wq, sp, pmcs_cmd, cmd_next); 726 break; 727 } 728 } 729 mutex_exit(&xp->wqlock); 730 if (sp) { 731 pkt->pkt_reason = CMD_ABORTED; 732 pkt->pkt_statistics |= STAT_ABORTED; 733 mutex_enter(&pwp->cq_lock); 734 STAILQ_INSERT_TAIL(&pwp->cq, sp, cmd_next); 735 PMCS_CQ_RUN_LOCKED(pwp); 736 mutex_exit(&pwp->cq_lock); 737 return (1); 738 } 739 return (0); 740 } 741 742 /* 743 * SCSA reset functions 744 */ 745 static int 746 pmcs_scsa_reset(struct scsi_address *ap, int level) 747 { 748 pmcs_hw_t *pwp = ADDR2PMC(ap); 749 pmcs_phy_t *pptr; 750 pmcs_xscsi_t *xp; 751 uint64_t lun = (uint64_t)-1, *lp = NULL; 752 int rval; 753 754 mutex_enter(&pwp->lock); 755 if (pwp->state != STATE_RUNNING) { 756 mutex_exit(&pwp->lock); 757 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 758 "%s: hba dead", __func__); 759 return (0); 760 } 761 mutex_exit(&pwp->lock); 762 763 switch (level) { 764 case RESET_ALL: 765 rval = 0; 766 break; 767 case RESET_LUN: 768 /* 769 * Point lp at lun so that pmcs_addr2xp 770 * will fill out the 64 bit lun number. 771 */ 772 lp = &lun; 773 /* FALLTHROUGH */ 774 case RESET_TARGET: 775 xp = pmcs_addr2xp(ap, lp, NULL); 776 if (xp == NULL) { 777 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 778 "%s: no xp found for this scsi address", __func__); 779 return (0); 780 } 781 782 if (xp->dev_gone) { 783 mutex_exit(&xp->statlock); 784 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, xp, 785 "%s: Target 0x%p has gone away", __func__, 786 (void *)xp); 787 return (0); 788 } 789 790 /* 791 * If we're already performing this action, or if device 792 * state recovery is already running, just return failure. 793 */ 794 if (xp->resetting || xp->recover_wait) { 795 mutex_exit(&xp->statlock); 796 return (0); 797 } 798 xp->reset_wait = 0; 799 xp->reset_success = 0; 800 xp->resetting = 1; 801 pptr = xp->phy; 802 mutex_exit(&xp->statlock); 803 804 if (pmcs_reset_dev(pwp, pptr, lun)) { 805 rval = 0; 806 } else { 807 rval = 1; 808 } 809 810 mutex_enter(&xp->statlock); 811 if (rval == 1) { 812 xp->reset_success = 1; 813 } 814 if (xp->reset_wait) { 815 xp->reset_wait = 0; 816 cv_signal(&xp->reset_cv); 817 } 818 xp->resetting = 0; 819 mutex_exit(&xp->statlock); 820 SCHEDULE_WORK(pwp, PMCS_WORK_RUN_QUEUES); 821 break; 822 default: 823 rval = 0; 824 break; 825 } 826 827 return (rval); 828 } 829 830 static int 831 pmcs_scsi_reset_notify(struct scsi_address *ap, int flag, 832 void (*callback)(caddr_t), caddr_t arg) 833 { 834 pmcs_hw_t *pwp = ADDR2PMC(ap); 835 return (scsi_hba_reset_notify_setup(ap, flag, callback, arg, 836 &pwp->lock, &pwp->reset_notify_listf)); 837 } 838 839 840 static int 841 pmcs_cap(struct scsi_address *ap, char *cap, int val, int tonly, int set) 842 { 843 _NOTE(ARGUNUSED(val, tonly)); 844 int cidx, rval = 0; 845 pmcs_xscsi_t *xp; 846 847 cidx = scsi_hba_lookup_capstr(cap); 848 if (cidx == -1) { 849 return (-1); 850 } 851 852 xp = pmcs_addr2xp(ap, NULL, NULL); 853 if (xp == NULL) { 854 return (-1); 855 } 856 857 switch (cidx) { 858 case SCSI_CAP_DMA_MAX: 859 case SCSI_CAP_INITIATOR_ID: 860 if (set == 0) { 861 rval = INT_MAX; /* argh */ 862 } 863 break; 864 case SCSI_CAP_DISCONNECT: 865 case SCSI_CAP_SYNCHRONOUS: 866 case SCSI_CAP_WIDE_XFER: 867 case SCSI_CAP_PARITY: 868 case SCSI_CAP_ARQ: 869 case SCSI_CAP_UNTAGGED_QING: 870 if (set == 0) { 871 rval = 1; 872 } 873 break; 874 875 case SCSI_CAP_TAGGED_QING: 876 rval = 1; 877 break; 878 879 case SCSI_CAP_MSG_OUT: 880 case SCSI_CAP_RESET_NOTIFICATION: 881 case SCSI_CAP_QFULL_RETRIES: 882 case SCSI_CAP_QFULL_RETRY_INTERVAL: 883 break; 884 case SCSI_CAP_SCSI_VERSION: 885 if (set == 0) { 886 rval = SCSI_VERSION_3; 887 } 888 break; 889 case SCSI_CAP_INTERCONNECT_TYPE: 890 if (set) { 891 break; 892 } 893 if (xp->phy_addressable) { 894 rval = INTERCONNECT_SATA; 895 } else { 896 rval = INTERCONNECT_SAS; 897 } 898 break; 899 case SCSI_CAP_CDB_LEN: 900 if (set == 0) { 901 rval = 16; 902 } 903 break; 904 case SCSI_CAP_LUN_RESET: 905 if (set) { 906 break; 907 } 908 if (xp->dtype == SATA) { 909 rval = 0; 910 } else { 911 rval = 1; 912 } 913 break; 914 default: 915 rval = -1; 916 break; 917 } 918 mutex_exit(&xp->statlock); 919 pmcs_prt(ADDR2PMC(ap), PMCS_PRT_DEBUG3, NULL, NULL, 920 "%s: cap %s val %d set %d rval %d", 921 __func__, cap, val, set, rval); 922 return (rval); 923 } 924 925 /* 926 * Returns with statlock held if the xp is found. 927 * Fills in pmcs_cmd_t with values if pmcs_cmd_t pointer non-NULL. 928 */ 929 static pmcs_xscsi_t * 930 pmcs_addr2xp(struct scsi_address *ap, uint64_t *lp, pmcs_cmd_t *sp) 931 { 932 pmcs_xscsi_t *xp; 933 pmcs_lun_t *lun = (pmcs_lun_t *) 934 scsi_device_hba_private_get(scsi_address_device(ap)); 935 936 if ((lun == NULL) || (lun->target == NULL)) { 937 return (NULL); 938 } 939 xp = lun->target; 940 mutex_enter(&xp->statlock); 941 942 if (xp->dev_gone || (xp->phy == NULL)) { 943 /* 944 * This may be a retried packet, so it's possible cmd_target 945 * and cmd_lun may still be populated. Clear them. 946 */ 947 if (sp != NULL) { 948 sp->cmd_target = NULL; 949 sp->cmd_lun = NULL; 950 } 951 mutex_exit(&xp->statlock); 952 return (NULL); 953 } 954 955 if (sp != NULL) { 956 sp->cmd_target = xp; 957 sp->cmd_lun = lun; 958 } 959 if (lp) { 960 *lp = lun->lun_num; 961 } 962 return (xp); 963 } 964 965 static int 966 pmcs_scsa_getcap(struct scsi_address *ap, char *cap, int whom) 967 { 968 int r; 969 if (cap == NULL) { 970 return (-1); 971 } 972 r = pmcs_cap(ap, cap, 0, whom, 0); 973 return (r); 974 } 975 976 static int 977 pmcs_scsa_setcap(struct scsi_address *ap, char *cap, int value, int whom) 978 { 979 int r; 980 if (cap == NULL) { 981 return (-1); 982 } 983 r = pmcs_cap(ap, cap, value, whom, 1); 984 return (r); 985 } 986 987 static int 988 pmcs_scsa_setup_pkt(struct scsi_pkt *pkt, int (*callback)(caddr_t), 989 caddr_t cbarg) 990 { 991 _NOTE(ARGUNUSED(callback, cbarg)); 992 pmcs_cmd_t *sp = pkt->pkt_ha_private; 993 994 bzero(sp, sizeof (pmcs_cmd_t)); 995 sp->cmd_pkt = pkt; 996 return (0); 997 } 998 999 static void 1000 pmcs_scsa_teardown_pkt(struct scsi_pkt *pkt) 1001 { 1002 pmcs_cmd_t *sp = pkt->pkt_ha_private; 1003 sp->cmd_target = NULL; 1004 sp->cmd_lun = NULL; 1005 } 1006 1007 static int 1008 pmcs_smp_start(struct smp_pkt *smp_pkt) 1009 { 1010 struct pmcwork *pwrk; 1011 const uint_t rdoff = SAS_SMP_MAX_PAYLOAD; 1012 uint32_t msg[PMCS_MSG_SIZE], *ptr, htag, status; 1013 uint64_t wwn; 1014 pmcs_hw_t *pwp; 1015 pmcs_phy_t *pptr; 1016 pmcs_xscsi_t *xp; 1017 uint_t reqsz, rspsz, will_retry; 1018 int result; 1019 1020 pwp = smp_pkt->smp_pkt_address->smp_a_hba_tran->smp_tran_hba_private; 1021 bcopy(smp_pkt->smp_pkt_address->smp_a_wwn, &wwn, SAS_WWN_BYTE_SIZE); 1022 1023 pmcs_prt(pwp, PMCS_PRT_DEBUG1, NULL, NULL, 1024 "%s: starting for wwn 0x%" PRIx64, __func__, wwn); 1025 1026 will_retry = smp_pkt->smp_pkt_will_retry; 1027 1028 (void) pmcs_acquire_scratch(pwp, B_TRUE); 1029 reqsz = smp_pkt->smp_pkt_reqsize; 1030 if (reqsz > SAS_SMP_MAX_PAYLOAD) { 1031 reqsz = SAS_SMP_MAX_PAYLOAD; 1032 } 1033 (void) memcpy(pwp->scratch, smp_pkt->smp_pkt_req, reqsz); 1034 1035 rspsz = smp_pkt->smp_pkt_rspsize; 1036 if (rspsz > SAS_SMP_MAX_PAYLOAD) { 1037 rspsz = SAS_SMP_MAX_PAYLOAD; 1038 } 1039 1040 /* 1041 * The request size from the SMP driver always includes 4 bytes 1042 * for the CRC. The PMCS chip, however, doesn't want to see those 1043 * counts as part of the transfer size. 1044 */ 1045 reqsz -= 4; 1046 1047 pptr = pmcs_find_phy_by_wwn(pwp, wwn); 1048 /* PHY is now locked */ 1049 if (pptr == NULL || pptr->dtype != EXPANDER) { 1050 if (pptr) { 1051 pmcs_unlock_phy(pptr); 1052 } 1053 pmcs_release_scratch(pwp); 1054 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 1055 "%s: could not find phy", __func__); 1056 smp_pkt->smp_pkt_reason = ENXIO; 1057 return (DDI_FAILURE); 1058 } 1059 1060 if ((pptr->iport == NULL) || !pptr->valid_device_id) { 1061 pmcs_unlock_phy(pptr); 1062 pmcs_release_scratch(pwp); 1063 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, pptr->target, 1064 "%s: Can't reach PHY %s", __func__, pptr->path); 1065 smp_pkt->smp_pkt_reason = ENXIO; 1066 return (DDI_FAILURE); 1067 } 1068 1069 pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, pptr); 1070 if (pwrk == NULL) { 1071 pmcs_unlock_phy(pptr); 1072 pmcs_release_scratch(pwp); 1073 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 1074 "%s: could not get work structure", __func__); 1075 smp_pkt->smp_pkt_reason = will_retry ? EAGAIN : EBUSY; 1076 return (DDI_FAILURE); 1077 } 1078 1079 pwrk->arg = msg; 1080 pwrk->dtype = EXPANDER; 1081 mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]); 1082 ptr = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 1083 if (ptr == NULL) { 1084 pmcs_pwork(pwp, pwrk); 1085 mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]); 1086 pmcs_unlock_phy(pptr); 1087 pmcs_release_scratch(pwp); 1088 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1089 "%s: could not get IQ entry", __func__); 1090 smp_pkt->smp_pkt_reason = will_retry ? EAGAIN :EBUSY; 1091 return (DDI_FAILURE); 1092 } 1093 msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_GENERAL, PMCIN_SMP_REQUEST)); 1094 msg[1] = LE_32(pwrk->htag); 1095 msg[2] = LE_32(pptr->device_id); 1096 msg[3] = LE_32(SMP_INDIRECT_RESPONSE | SMP_INDIRECT_REQUEST); 1097 msg[8] = LE_32(DWORD0(pwp->scratch_dma)); 1098 msg[9] = LE_32(DWORD1(pwp->scratch_dma)); 1099 msg[10] = LE_32(reqsz); 1100 msg[11] = 0; 1101 msg[12] = LE_32(DWORD0(pwp->scratch_dma+rdoff)); 1102 msg[13] = LE_32(DWORD1(pwp->scratch_dma+rdoff)); 1103 msg[14] = LE_32(rspsz); 1104 msg[15] = 0; 1105 1106 COPY_MESSAGE(ptr, msg, PMCS_MSG_SIZE); 1107 /* SMP serialization */ 1108 pmcs_smp_acquire(pptr->iport); 1109 1110 pwrk->state = PMCS_WORK_STATE_ONCHIP; 1111 htag = pwrk->htag; 1112 INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 1113 1114 pmcs_unlock_phy(pptr); 1115 WAIT_FOR(pwrk, smp_pkt->smp_pkt_timeout * 1000, result); 1116 pmcs_pwork(pwp, pwrk); 1117 /* Release SMP lock before reacquiring PHY lock */ 1118 pmcs_smp_release(pptr->iport); 1119 pmcs_lock_phy(pptr); 1120 1121 if (result) { 1122 pmcs_timed_out(pwp, htag, __func__); 1123 if (pmcs_abort(pwp, pptr, htag, 0, 0)) { 1124 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, pptr->target, 1125 "%s: Unable to issue SMP ABORT for htag 0x%08x", 1126 __func__, htag); 1127 } else { 1128 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, pptr->target, 1129 "%s: Issuing SMP ABORT for htag 0x%08x", 1130 __func__, htag); 1131 } 1132 pmcs_unlock_phy(pptr); 1133 pmcs_release_scratch(pwp); 1134 smp_pkt->smp_pkt_reason = ETIMEDOUT; 1135 return (DDI_FAILURE); 1136 } 1137 status = LE_32(msg[2]); 1138 if (status == PMCOUT_STATUS_OVERFLOW) { 1139 status = PMCOUT_STATUS_OK; 1140 smp_pkt->smp_pkt_reason = EOVERFLOW; 1141 } 1142 if (status != PMCOUT_STATUS_OK) { 1143 const char *emsg = pmcs_status_str(status); 1144 if (emsg == NULL) { 1145 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, pptr->target, 1146 "SMP operation failed (0x%x)", status); 1147 } else { 1148 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, pptr->target, 1149 "SMP operation failed (%s)", emsg); 1150 } 1151 1152 if ((status == PMCOUT_STATUS_ERROR_HW_TIMEOUT) || 1153 (status == PMCOUT_STATUS_IO_XFER_OPEN_RETRY_TIMEOUT)) { 1154 smp_pkt->smp_pkt_reason = 1155 will_retry ? EAGAIN : ETIMEDOUT; 1156 result = DDI_FAILURE; 1157 } else if (status == 1158 PMCOUT_STATUS_OPEN_CNX_ERROR_IT_NEXUS_LOSS) { 1159 xp = pptr->target; 1160 if (xp == NULL) { 1161 smp_pkt->smp_pkt_reason = EIO; 1162 result = DDI_FAILURE; 1163 goto out; 1164 } 1165 if (xp->dev_state != 1166 PMCS_DEVICE_STATE_NON_OPERATIONAL) { 1167 xp->dev_state = 1168 PMCS_DEVICE_STATE_NON_OPERATIONAL; 1169 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, xp->phy, 1170 xp, "%s: Got _IT_NEXUS_LOSS SMP status. " 1171 "Tgt(0x%p) dev_state set to " 1172 "_NON_OPERATIONAL", __func__, 1173 (void *)xp); 1174 } 1175 /* ABORT any pending commands related to this device */ 1176 if (pmcs_abort(pwp, pptr, pptr->device_id, 1, 1) != 0) { 1177 pptr->abort_pending = 1; 1178 smp_pkt->smp_pkt_reason = EIO; 1179 result = DDI_FAILURE; 1180 } 1181 } else { 1182 smp_pkt->smp_pkt_reason = will_retry ? EAGAIN : EIO; 1183 result = DDI_FAILURE; 1184 } 1185 } else { 1186 (void) memcpy(smp_pkt->smp_pkt_rsp, 1187 &((uint8_t *)pwp->scratch)[rdoff], rspsz); 1188 if (smp_pkt->smp_pkt_reason == EOVERFLOW) { 1189 result = DDI_FAILURE; 1190 } else { 1191 result = DDI_SUCCESS; 1192 } 1193 } 1194 out: 1195 pmcs_prt(pwp, PMCS_PRT_DEBUG1, pptr, pptr->target, 1196 "%s: done for wwn 0x%" PRIx64, __func__, wwn); 1197 1198 pmcs_unlock_phy(pptr); 1199 pmcs_release_scratch(pwp); 1200 return (result); 1201 } 1202 1203 static int 1204 pmcs_smp_init(dev_info_t *self, dev_info_t *child, 1205 smp_hba_tran_t *tran, smp_device_t *smp_sd) 1206 { 1207 _NOTE(ARGUNUSED(tran, smp_sd)); 1208 pmcs_iport_t *iport; 1209 pmcs_hw_t *pwp; 1210 pmcs_xscsi_t *tgt; 1211 pmcs_phy_t *phy, *pphy; 1212 uint64_t wwn; 1213 char *addr, *tgt_port; 1214 int ua_form = 1; 1215 1216 iport = ddi_get_soft_state(pmcs_iport_softstate, 1217 ddi_get_instance(self)); 1218 ASSERT(iport); 1219 if (iport == NULL) 1220 return (DDI_FAILURE); 1221 pwp = iport->pwp; 1222 ASSERT(pwp); 1223 if (pwp == NULL) 1224 return (DDI_FAILURE); 1225 1226 /* Get "target-port" prop from devinfo node */ 1227 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, child, 1228 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, 1229 SCSI_ADDR_PROP_TARGET_PORT, &tgt_port) != DDI_SUCCESS) { 1230 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, "%s: Failed to " 1231 "lookup prop ("SCSI_ADDR_PROP_TARGET_PORT")", __func__); 1232 /* Dont fail _smp_init() because we couldnt get/set a prop */ 1233 return (DDI_SUCCESS); 1234 } 1235 1236 /* 1237 * Validate that this tran_tgt_init is for an active iport. 1238 */ 1239 if (iport->ua_state == UA_INACTIVE) { 1240 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1241 "%s: Init on inactive iport for '%s'", __func__, tgt_port); 1242 ddi_prop_free(tgt_port); 1243 return (DDI_FAILURE); 1244 } 1245 1246 mutex_enter(&pwp->lock); 1247 1248 /* Retrieve softstate using unit-address */ 1249 tgt = pmcs_get_target(iport, tgt_port, B_TRUE); 1250 if (tgt == NULL) { 1251 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1252 "%s: tgt softstate not found", __func__); 1253 ddi_prop_free(tgt_port); 1254 mutex_exit(&pwp->lock); 1255 return (DDI_FAILURE); 1256 } 1257 1258 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, tgt, "%s: %s (%s)", 1259 __func__, ddi_get_name(child), tgt_port); 1260 1261 mutex_enter(&tgt->statlock); 1262 phy = tgt->phy; 1263 ASSERT(mutex_owned(&phy->phy_lock)); 1264 1265 if (IS_ROOT_PHY(phy)) { 1266 /* Expander attached to HBA - don't ref_count it */ 1267 wwn = pwp->sas_wwns[0]; 1268 } else { 1269 pmcs_inc_phy_ref_count(phy); 1270 1271 /* 1272 * Parent (in topology) is also an expander 1273 * Now that we've increased the ref count on phy, it's OK 1274 * to drop the lock so we can acquire the parent's lock. 1275 */ 1276 pphy = phy->parent; 1277 mutex_exit(&tgt->statlock); 1278 pmcs_unlock_phy(phy); 1279 pmcs_lock_phy(pphy); 1280 wwn = pmcs_barray2wwn(pphy->sas_address); 1281 pmcs_unlock_phy(pphy); 1282 pmcs_lock_phy(phy); 1283 mutex_enter(&tgt->statlock); 1284 } 1285 1286 /* 1287 * If this is the 1st smp_init, add this to our list. 1288 */ 1289 if (tgt->target_num == PMCS_INVALID_TARGET_NUM) { 1290 int target; 1291 for (target = 0; target < pwp->max_dev; target++) { 1292 if (pwp->targets[target] != NULL) { 1293 continue; 1294 } 1295 1296 pwp->targets[target] = tgt; 1297 tgt->target_num = (uint16_t)target; 1298 tgt->assigned = 1; 1299 tgt->dev_state = PMCS_DEVICE_STATE_OPERATIONAL; 1300 break; 1301 } 1302 1303 if (target == pwp->max_dev) { 1304 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 1305 "Target list full."); 1306 goto smp_init_fail; 1307 } 1308 } 1309 1310 if (!pmcs_assign_device(pwp, tgt)) { 1311 pwp->targets[tgt->target_num] = NULL; 1312 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, tgt, 1313 "%s: pmcs_assign_device failed for target 0x%p", 1314 __func__, (void *)tgt); 1315 goto smp_init_fail; 1316 } 1317 1318 /* 1319 * Update the attached port and target port pm properties 1320 */ 1321 tgt->smpd = smp_sd; 1322 1323 pmcs_unlock_phy(phy); 1324 mutex_exit(&pwp->lock); 1325 1326 tgt->ref_count++; 1327 tgt->dtype = phy->dtype; 1328 mutex_exit(&tgt->statlock); 1329 1330 pmcs_update_phy_pm_props(phy, 0, 0, B_TRUE); 1331 1332 addr = scsi_wwn_to_wwnstr(wwn, ua_form, NULL); 1333 if (smp_device_prop_update_string(smp_sd, SCSI_ADDR_PROP_ATTACHED_PORT, 1334 addr) != DDI_SUCCESS) { 1335 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, "%s: Failed to set " 1336 "prop ("SCSI_ADDR_PROP_ATTACHED_PORT")", __func__); 1337 } 1338 (void) scsi_free_wwnstr(addr); 1339 ddi_prop_free(tgt_port); 1340 return (DDI_SUCCESS); 1341 1342 smp_init_fail: 1343 tgt->phy = NULL; 1344 tgt->target_num = PMCS_INVALID_TARGET_NUM; 1345 phy->target = NULL; 1346 if (!IS_ROOT_PHY(phy)) { 1347 pmcs_dec_phy_ref_count(phy); 1348 } 1349 mutex_exit(&tgt->statlock); 1350 pmcs_unlock_phy(phy); 1351 mutex_exit(&pwp->lock); 1352 ddi_soft_state_bystr_free(iport->tgt_sstate, tgt->unit_address); 1353 ddi_prop_free(tgt_port); 1354 return (DDI_FAILURE); 1355 } 1356 1357 static void 1358 pmcs_smp_free(dev_info_t *self, dev_info_t *child, 1359 smp_hba_tran_t *tran, smp_device_t *smp) 1360 { 1361 _NOTE(ARGUNUSED(tran, smp)); 1362 pmcs_iport_t *iport; 1363 pmcs_hw_t *pwp; 1364 pmcs_xscsi_t *tgt; 1365 pmcs_phy_t *phyp; 1366 char *tgt_port; 1367 1368 iport = ddi_get_soft_state(pmcs_iport_softstate, 1369 ddi_get_instance(self)); 1370 ASSERT(iport); 1371 if (iport == NULL) 1372 return; 1373 1374 pwp = iport->pwp; 1375 if (pwp == NULL) 1376 return; 1377 ASSERT(pwp); 1378 1379 /* Get "target-port" prop from devinfo node */ 1380 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, child, 1381 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, 1382 SCSI_ADDR_PROP_TARGET_PORT, &tgt_port) != DDI_SUCCESS) { 1383 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, "%s: Failed to " 1384 "lookup prop ("SCSI_ADDR_PROP_TARGET_PORT")", __func__); 1385 return; 1386 } 1387 1388 /* Retrieve softstate using unit-address */ 1389 mutex_enter(&pwp->lock); 1390 tgt = ddi_soft_state_bystr_get(iport->tgt_sstate, tgt_port); 1391 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, tgt, "%s: %s (%s)", __func__, 1392 ddi_get_name(child), tgt_port); 1393 ddi_prop_free(tgt_port); 1394 1395 if (tgt == NULL) { 1396 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1397 "%s: tgt softstate not found", __func__); 1398 mutex_exit(&pwp->lock); 1399 return; 1400 } 1401 1402 phyp = tgt->phy; 1403 if (phyp) { 1404 mutex_enter(&phyp->phy_lock); 1405 if (!IS_ROOT_PHY(phyp)) { 1406 pmcs_dec_phy_ref_count(phyp); 1407 } 1408 } 1409 mutex_enter(&tgt->statlock); 1410 1411 if (--tgt->ref_count == 0) { 1412 /* 1413 * Remove this target from our list. The softstate 1414 * will remain, and the device will remain registered 1415 * with the hardware unless/until we're told that the 1416 * device physically went away. 1417 */ 1418 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, tgt, 1419 "Removing target 0x%p (vtgt %d) from target list", 1420 (void *)tgt, tgt->target_num); 1421 pwp->targets[tgt->target_num] = NULL; 1422 tgt->target_num = PMCS_INVALID_TARGET_NUM; 1423 if (phyp) { 1424 phyp->target = NULL; 1425 } 1426 tgt->phy = NULL; 1427 pmcs_destroy_target(tgt); 1428 } else { 1429 mutex_exit(&tgt->statlock); 1430 } 1431 1432 if (phyp) { 1433 mutex_exit(&phyp->phy_lock); 1434 } 1435 mutex_exit(&pwp->lock); 1436 } 1437 1438 static int 1439 pmcs_scsi_quiesce(dev_info_t *dip) 1440 { 1441 pmcs_hw_t *pwp; 1442 int totactive = -1; 1443 pmcs_xscsi_t *xp; 1444 uint16_t target; 1445 1446 if (ddi_get_soft_state(pmcs_iport_softstate, ddi_get_instance(dip))) 1447 return (0); /* iport */ 1448 1449 pwp = ddi_get_soft_state(pmcs_softc_state, ddi_get_instance(dip)); 1450 if (pwp == NULL) { 1451 return (-1); 1452 } 1453 mutex_enter(&pwp->lock); 1454 if (pwp->state != STATE_RUNNING) { 1455 mutex_exit(&pwp->lock); 1456 return (-1); 1457 } 1458 1459 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, "%s called", __func__); 1460 pwp->quiesced = pwp->blocked = 1; 1461 while (totactive) { 1462 totactive = 0; 1463 for (target = 0; target < pwp->max_dev; target++) { 1464 xp = pwp->targets[target]; 1465 if (xp == NULL) { 1466 continue; 1467 } 1468 mutex_enter(&xp->statlock); 1469 if (xp->actv_cnt) { 1470 totactive += xp->actv_cnt; 1471 xp->draining = 1; 1472 } 1473 mutex_exit(&xp->statlock); 1474 } 1475 if (totactive) { 1476 cv_wait(&pwp->drain_cv, &pwp->lock); 1477 } 1478 /* 1479 * The pwp->blocked may have been reset. e.g a SCSI bus reset 1480 */ 1481 pwp->blocked = 1; 1482 } 1483 1484 for (target = 0; target < pwp->max_dev; target++) { 1485 xp = pwp->targets[target]; 1486 if (xp == NULL) { 1487 continue; 1488 } 1489 mutex_enter(&xp->statlock); 1490 xp->draining = 0; 1491 mutex_exit(&xp->statlock); 1492 } 1493 1494 mutex_exit(&pwp->lock); 1495 if (totactive == 0) { 1496 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, xp, 1497 "%s drain complete", __func__); 1498 } 1499 return (0); 1500 } 1501 1502 static int 1503 pmcs_scsi_unquiesce(dev_info_t *dip) 1504 { 1505 pmcs_hw_t *pwp; 1506 1507 if (ddi_get_soft_state(pmcs_iport_softstate, ddi_get_instance(dip))) 1508 return (0); /* iport */ 1509 1510 pwp = ddi_get_soft_state(pmcs_softc_state, ddi_get_instance(dip)); 1511 if (pwp == NULL) { 1512 return (-1); 1513 } 1514 mutex_enter(&pwp->lock); 1515 if (pwp->state != STATE_RUNNING) { 1516 mutex_exit(&pwp->lock); 1517 return (-1); 1518 } 1519 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, "%s called", __func__); 1520 pwp->blocked = pwp->quiesced = 0; 1521 mutex_exit(&pwp->lock); 1522 1523 /* 1524 * Run all pending commands. 1525 */ 1526 pmcs_scsa_wq_run(pwp); 1527 1528 /* 1529 * Complete all completed commands. 1530 * This also unlocks us. 1531 */ 1532 PMCS_CQ_RUN(pwp); 1533 return (0); 1534 } 1535 1536 /* 1537 * Start commands for a particular device 1538 * If the actual start of a command fails, return B_FALSE. Any other result 1539 * is a B_TRUE return. 1540 */ 1541 boolean_t 1542 pmcs_scsa_wq_run_one(pmcs_hw_t *pwp, pmcs_xscsi_t *xp) 1543 { 1544 pmcs_cmd_t *sp; 1545 pmcs_phy_t *phyp; 1546 pmcwork_t *pwrk; 1547 boolean_t run_one, blocked; 1548 int rval; 1549 1550 /* 1551 * First, check to see if we're blocked or resource limited 1552 */ 1553 mutex_enter(&pwp->lock); 1554 blocked = pwp->blocked; 1555 /* 1556 * If resource_limited is set, we're resource constrained and 1557 * we will run only one work request for this target. 1558 */ 1559 run_one = pwp->resource_limited; 1560 mutex_exit(&pwp->lock); 1561 1562 if (blocked) { 1563 /* Queues will get restarted when we get unblocked */ 1564 return (B_TRUE); 1565 } 1566 1567 /* 1568 * Might as well verify the queue is not empty before moving on 1569 */ 1570 mutex_enter(&xp->wqlock); 1571 if (STAILQ_EMPTY(&xp->wq)) { 1572 mutex_exit(&xp->wqlock); 1573 return (B_TRUE); 1574 } 1575 mutex_exit(&xp->wqlock); 1576 1577 /* 1578 * If we're draining or resetting, just reschedule work queue and bail. 1579 */ 1580 mutex_enter(&xp->statlock); 1581 if (xp->draining || xp->resetting || xp->special_running || 1582 xp->special_needed) { 1583 mutex_exit(&xp->statlock); 1584 SCHEDULE_WORK(pwp, PMCS_WORK_RUN_QUEUES); 1585 return (B_TRUE); 1586 } 1587 1588 /* 1589 * Next, check to see if the target is gone. 1590 */ 1591 if (xp->dev_gone) { 1592 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, xp, 1593 "%s: Flushing wait queue for dead tgt 0x%p", __func__, 1594 (void *)xp); 1595 pmcs_flush_target_queues(pwp, xp, PMCS_TGT_WAIT_QUEUE); 1596 mutex_exit(&xp->statlock); 1597 return (B_TRUE); 1598 } 1599 1600 /* 1601 * Increment the PHY's ref_count now so we know it won't go away 1602 * after we drop the target lock. Drop it before returning. If the 1603 * PHY dies, the commands we attempt to send will fail, but at least 1604 * we know we have a real PHY pointer. 1605 */ 1606 phyp = xp->phy; 1607 pmcs_inc_phy_ref_count(phyp); 1608 mutex_exit(&xp->statlock); 1609 1610 mutex_enter(&xp->wqlock); 1611 while ((sp = STAILQ_FIRST(&xp->wq)) != NULL) { 1612 pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_CBACK, phyp); 1613 if (pwrk == NULL) { 1614 mutex_exit(&xp->wqlock); 1615 mutex_enter(&pwp->lock); 1616 if (pwp->resource_limited == 0) { 1617 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1618 "%s: out of work structures", __func__); 1619 } 1620 pwp->resource_limited = 1; 1621 SCHEDULE_WORK(pwp, PMCS_WORK_RUN_QUEUES); 1622 mutex_exit(&pwp->lock); 1623 return (B_FALSE); 1624 } 1625 STAILQ_REMOVE_HEAD(&xp->wq, cmd_next); 1626 mutex_exit(&xp->wqlock); 1627 1628 pwrk->xp = xp; 1629 pwrk->arg = sp; 1630 sp->cmd_tag = pwrk->htag; 1631 pwrk->timer = US2WT(CMD2PKT(sp)->pkt_time * 1000000); 1632 if (pwrk->timer == 0) { 1633 pwrk->timer = US2WT(1000000); 1634 } 1635 1636 pwrk->dtype = xp->dtype; 1637 1638 if (xp->dtype == SAS) { 1639 pwrk->ptr = (void *) pmcs_SAS_done; 1640 if ((rval = pmcs_SAS_run(sp, pwrk)) != 0) { 1641 sp->cmd_tag = NULL; 1642 pmcs_dec_phy_ref_count(phyp); 1643 pmcs_pwork(pwp, pwrk); 1644 SCHEDULE_WORK(pwp, PMCS_WORK_RUN_QUEUES); 1645 if (rval == PMCS_WQ_RUN_FAIL_RES) { 1646 return (B_FALSE); 1647 } else { 1648 return (B_TRUE); 1649 } 1650 } 1651 } else { 1652 ASSERT(xp->dtype == SATA); 1653 pwrk->ptr = (void *) pmcs_SATA_done; 1654 if ((rval = pmcs_SATA_run(sp, pwrk)) != 0) { 1655 sp->cmd_tag = NULL; 1656 pmcs_dec_phy_ref_count(phyp); 1657 pmcs_pwork(pwp, pwrk); 1658 SCHEDULE_WORK(pwp, PMCS_WORK_RUN_QUEUES); 1659 if (rval == PMCS_WQ_RUN_FAIL_RES) { 1660 return (B_FALSE); 1661 } else { 1662 return (B_TRUE); 1663 } 1664 } 1665 } 1666 1667 if (run_one) { 1668 goto wq_out; 1669 } 1670 mutex_enter(&xp->wqlock); 1671 } 1672 1673 mutex_exit(&xp->wqlock); 1674 1675 wq_out: 1676 pmcs_dec_phy_ref_count(phyp); 1677 return (B_TRUE); 1678 } 1679 1680 /* 1681 * Start commands for all devices. 1682 */ 1683 void 1684 pmcs_scsa_wq_run(pmcs_hw_t *pwp) 1685 { 1686 pmcs_xscsi_t *xp; 1687 uint16_t target_start, target; 1688 boolean_t rval = B_TRUE; 1689 1690 mutex_enter(&pwp->lock); 1691 target_start = pwp->last_wq_dev; 1692 target = target_start; 1693 1694 do { 1695 xp = pwp->targets[target]; 1696 if ((xp == NULL) || (STAILQ_EMPTY(&xp->wq))) { 1697 if (++target == pwp->max_dev) { 1698 target = 0; 1699 } 1700 continue; 1701 } 1702 1703 mutex_exit(&pwp->lock); 1704 rval = pmcs_scsa_wq_run_one(pwp, xp); 1705 mutex_enter(&pwp->lock); 1706 1707 if (rval == B_FALSE) { 1708 break; 1709 } 1710 1711 if (++target == pwp->max_dev) { 1712 target = 0; 1713 } 1714 } while (target != target_start); 1715 1716 if (rval) { 1717 /* 1718 * If we were resource limited, but apparently are not now, 1719 * reschedule the work queues anyway. 1720 */ 1721 if (pwp->resource_limited) { 1722 SCHEDULE_WORK(pwp, PMCS_WORK_RUN_QUEUES); 1723 } 1724 pwp->resource_limited = 0; /* Not resource-constrained */ 1725 } else { 1726 /* 1727 * Give everybody a chance, and reschedule to run the queues 1728 * again as long as we're limited. 1729 */ 1730 pwp->resource_limited = 1; 1731 SCHEDULE_WORK(pwp, PMCS_WORK_RUN_QUEUES); 1732 } 1733 1734 pwp->last_wq_dev = target; 1735 mutex_exit(&pwp->lock); 1736 } 1737 1738 /* 1739 * Pull the completion queue, drop the lock and complete all elements. 1740 */ 1741 1742 void 1743 pmcs_scsa_cq_run(void *arg) 1744 { 1745 pmcs_cq_thr_info_t *cqti = (pmcs_cq_thr_info_t *)arg; 1746 pmcs_hw_t *pwp = cqti->cq_pwp; 1747 pmcs_cmd_t *sp, *nxt; 1748 struct scsi_pkt *pkt; 1749 pmcs_xscsi_t *tgt; 1750 pmcs_iocomp_cb_t *ioccb, *ioccb_next; 1751 pmcs_cb_t callback; 1752 1753 DTRACE_PROBE1(pmcs__scsa__cq__run__start, pmcs_cq_thr_info_t *, cqti); 1754 1755 mutex_enter(&pwp->cq_lock); 1756 1757 while (!pwp->cq_info.cq_stop) { 1758 /* 1759 * First, check the I/O completion callback queue. 1760 */ 1761 ioccb = pwp->iocomp_cb_head; 1762 pwp->iocomp_cb_head = NULL; 1763 pwp->iocomp_cb_tail = NULL; 1764 mutex_exit(&pwp->cq_lock); 1765 1766 while (ioccb) { 1767 /* 1768 * Grab the lock on the work structure. The callback 1769 * routine is responsible for clearing it. 1770 */ 1771 mutex_enter(&ioccb->pwrk->lock); 1772 ioccb_next = ioccb->next; 1773 callback = (pmcs_cb_t)ioccb->pwrk->ptr; 1774 (*callback)(pwp, ioccb->pwrk, 1775 (uint32_t *)((void *)ioccb->iomb)); 1776 kmem_cache_free(pwp->iocomp_cb_cache, ioccb); 1777 ioccb = ioccb_next; 1778 } 1779 1780 /* 1781 * Next, run the completion queue 1782 */ 1783 mutex_enter(&pwp->cq_lock); 1784 sp = STAILQ_FIRST(&pwp->cq); 1785 STAILQ_INIT(&pwp->cq); 1786 mutex_exit(&pwp->cq_lock); 1787 1788 DTRACE_PROBE1(pmcs__scsa__cq__run__start__loop, 1789 pmcs_cq_thr_info_t *, cqti); 1790 1791 if (sp && pmcs_check_acc_dma_handle(pwp)) { 1792 ddi_fm_service_impact(pwp->dip, DDI_SERVICE_UNAFFECTED); 1793 } 1794 1795 while (sp) { 1796 nxt = STAILQ_NEXT(sp, cmd_next); 1797 pkt = CMD2PKT(sp); 1798 tgt = sp->cmd_target; 1799 pmcs_prt(pwp, PMCS_PRT_DEBUG3, NULL, tgt, 1800 "%s: calling completion on %p for tgt %p", __func__, 1801 (void *)sp, (void *)tgt); 1802 if (tgt) { 1803 mutex_enter(&tgt->statlock); 1804 ASSERT(tgt->actv_pkts != 0); 1805 tgt->actv_pkts--; 1806 mutex_exit(&tgt->statlock); 1807 } 1808 scsi_hba_pkt_comp(pkt); 1809 sp = nxt; 1810 } 1811 1812 DTRACE_PROBE1(pmcs__scsa__cq__run__end__loop, 1813 pmcs_cq_thr_info_t *, cqti); 1814 1815 /* 1816 * Check if there are more completions to do. If so, and we've 1817 * not been told to stop, skip the wait and cycle through again. 1818 */ 1819 mutex_enter(&pwp->cq_lock); 1820 if ((pwp->iocomp_cb_head == NULL) && STAILQ_EMPTY(&pwp->cq) && 1821 !pwp->cq_info.cq_stop) { 1822 mutex_exit(&pwp->cq_lock); 1823 mutex_enter(&cqti->cq_thr_lock); 1824 cv_wait(&cqti->cq_cv, &cqti->cq_thr_lock); 1825 mutex_exit(&cqti->cq_thr_lock); 1826 mutex_enter(&pwp->cq_lock); 1827 } 1828 } 1829 1830 mutex_exit(&pwp->cq_lock); 1831 DTRACE_PROBE1(pmcs__scsa__cq__run__stop, pmcs_cq_thr_info_t *, cqti); 1832 thread_exit(); 1833 } 1834 1835 /* 1836 * Run a SAS command. Called with pwrk->lock held, returns unlocked. 1837 */ 1838 static int 1839 pmcs_SAS_run(pmcs_cmd_t *sp, pmcwork_t *pwrk) 1840 { 1841 pmcs_hw_t *pwp = CMD2PMC(sp); 1842 struct scsi_pkt *pkt = CMD2PKT(sp); 1843 pmcs_xscsi_t *xp = pwrk->xp; 1844 uint32_t iq, *ptr; 1845 sas_ssp_cmd_iu_t sc; 1846 1847 mutex_enter(&xp->statlock); 1848 if (!xp->assigned) { 1849 mutex_exit(&xp->statlock); 1850 return (PMCS_WQ_RUN_FAIL_OTHER); 1851 } 1852 if ((xp->actv_cnt >= xp->qdepth) || xp->recover_wait) { 1853 mutex_exit(&xp->statlock); 1854 mutex_enter(&xp->wqlock); 1855 STAILQ_INSERT_HEAD(&xp->wq, sp, cmd_next); 1856 mutex_exit(&xp->wqlock); 1857 return (PMCS_WQ_RUN_FAIL_OTHER); 1858 } 1859 GET_IO_IQ_ENTRY(pwp, ptr, pwrk->phy->device_id, iq); 1860 if (ptr == NULL) { 1861 mutex_exit(&xp->statlock); 1862 /* 1863 * This is a temporary failure not likely to unblocked by 1864 * commands completing as the test for scheduling the 1865 * restart of work is a per-device test. 1866 */ 1867 mutex_enter(&xp->wqlock); 1868 STAILQ_INSERT_HEAD(&xp->wq, sp, cmd_next); 1869 mutex_exit(&xp->wqlock); 1870 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, xp, 1871 "%s: Failed to get IO IQ entry for tgt %d", 1872 __func__, xp->target_num); 1873 return (PMCS_WQ_RUN_FAIL_RES); 1874 1875 } 1876 1877 ptr[0] = 1878 LE_32(PMCS_IOMB_IN_SAS(PMCS_OQ_IODONE, PMCIN_SSP_INI_IO_START)); 1879 ptr[1] = LE_32(pwrk->htag); 1880 ptr[2] = LE_32(pwrk->phy->device_id); 1881 ptr[3] = LE_32(pkt->pkt_dma_len); 1882 if (ptr[3]) { 1883 ASSERT(pkt->pkt_numcookies); 1884 if (pkt->pkt_dma_flags & DDI_DMA_READ) { 1885 ptr[4] = LE_32(PMCIN_DATADIR_2_INI); 1886 } else { 1887 ptr[4] = LE_32(PMCIN_DATADIR_2_DEV); 1888 } 1889 if (pmcs_dma_load(pwp, sp, ptr)) { 1890 mutex_exit(&pwp->iqp_lock[iq]); 1891 mutex_exit(&xp->statlock); 1892 mutex_enter(&xp->wqlock); 1893 if (STAILQ_EMPTY(&xp->wq)) { 1894 STAILQ_INSERT_HEAD(&xp->wq, sp, cmd_next); 1895 mutex_exit(&xp->wqlock); 1896 } else { 1897 mutex_exit(&xp->wqlock); 1898 CMD2PKT(sp)->pkt_scbp[0] = STATUS_QFULL; 1899 CMD2PKT(sp)->pkt_reason = CMD_CMPLT; 1900 CMD2PKT(sp)->pkt_state |= STATE_GOT_BUS | 1901 STATE_GOT_TARGET | STATE_SENT_CMD | 1902 STATE_GOT_STATUS; 1903 mutex_enter(&pwp->cq_lock); 1904 STAILQ_INSERT_TAIL(&pwp->cq, sp, cmd_next); 1905 PMCS_CQ_RUN_LOCKED(pwp); 1906 mutex_exit(&pwp->cq_lock); 1907 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, xp, 1908 "%s: Failed to dma_load for tgt %d (QF)", 1909 __func__, xp->target_num); 1910 } 1911 return (PMCS_WQ_RUN_FAIL_RES); 1912 } 1913 } else { 1914 ptr[4] = LE_32(PMCIN_DATADIR_NONE); 1915 CLEAN_MESSAGE(ptr, 12); 1916 } 1917 xp->actv_cnt++; 1918 if (xp->actv_cnt > xp->maxdepth) { 1919 xp->maxdepth = xp->actv_cnt; 1920 pmcs_prt(pwp, PMCS_PRT_DEBUG2, pwrk->phy, xp, "%s: max depth " 1921 "now %u", pwrk->phy->path, xp->maxdepth); 1922 } 1923 mutex_exit(&xp->statlock); 1924 1925 1926 #ifdef DEBUG 1927 /* 1928 * Generate a PMCOUT_STATUS_XFER_CMD_FRAME_ISSUED 1929 * event when this goes out on the wire. 1930 */ 1931 ptr[4] |= PMCIN_MESSAGE_REPORT; 1932 #endif 1933 /* 1934 * Fill in the SSP IU 1935 */ 1936 1937 bzero(&sc, sizeof (sas_ssp_cmd_iu_t)); 1938 bcopy((uint8_t *)&sp->cmd_lun->scsi_lun, sc.lun, sizeof (scsi_lun_t)); 1939 1940 switch (pkt->pkt_flags & FLAG_TAGMASK) { 1941 case FLAG_HTAG: 1942 sc.task_attribute = SAS_CMD_TASK_ATTR_HEAD; 1943 break; 1944 case FLAG_OTAG: 1945 sc.task_attribute = SAS_CMD_TASK_ATTR_ORDERED; 1946 break; 1947 case FLAG_STAG: 1948 default: 1949 sc.task_attribute = SAS_CMD_TASK_ATTR_SIMPLE; 1950 break; 1951 } 1952 (void) memcpy(sc.cdb, pkt->pkt_cdbp, 1953 min(SCSA_CDBLEN(sp), sizeof (sc.cdb))); 1954 (void) memcpy(&ptr[5], &sc, sizeof (sas_ssp_cmd_iu_t)); 1955 pwrk->state = PMCS_WORK_STATE_ONCHIP; 1956 mutex_exit(&pwrk->lock); 1957 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, 1958 "%s: giving pkt %p (tag %x) to the hardware", __func__, 1959 (void *)pkt, pwrk->htag); 1960 #ifdef DEBUG 1961 pmcs_print_entry(pwp, PMCS_PRT_DEBUG3, "SAS INI Message", ptr); 1962 #endif 1963 mutex_enter(&xp->aqlock); 1964 STAILQ_INSERT_TAIL(&xp->aq, sp, cmd_next); 1965 mutex_exit(&xp->aqlock); 1966 INC_IQ_ENTRY(pwp, iq); 1967 1968 /* 1969 * If we just submitted the last command queued from device state 1970 * recovery, clear the wq_recovery_tail pointer. 1971 */ 1972 mutex_enter(&xp->wqlock); 1973 if (xp->wq_recovery_tail == sp) { 1974 xp->wq_recovery_tail = NULL; 1975 } 1976 mutex_exit(&xp->wqlock); 1977 1978 return (PMCS_WQ_RUN_SUCCESS); 1979 } 1980 1981 /* 1982 * Complete a SAS command 1983 * 1984 * Called with pwrk lock held. 1985 * The free of pwrk releases the lock. 1986 */ 1987 1988 static void 1989 pmcs_SAS_done(pmcs_hw_t *pwp, pmcwork_t *pwrk, uint32_t *msg) 1990 { 1991 pmcs_cmd_t *sp = pwrk->arg; 1992 pmcs_phy_t *pptr = pwrk->phy; 1993 pmcs_xscsi_t *xp = pwrk->xp; 1994 struct scsi_pkt *pkt = CMD2PKT(sp); 1995 int dead; 1996 uint32_t sts; 1997 boolean_t aborted = B_FALSE; 1998 boolean_t do_ds_recovery = B_FALSE; 1999 2000 ASSERT(xp != NULL); 2001 ASSERT(sp != NULL); 2002 ASSERT(pptr != NULL); 2003 2004 DTRACE_PROBE4(pmcs__io__done, uint64_t, pkt->pkt_dma_len, int, 2005 (pkt->pkt_dma_flags & DDI_DMA_READ) != 0, hrtime_t, pwrk->start, 2006 hrtime_t, gethrtime()); 2007 2008 dead = pwrk->dead; 2009 2010 if (msg) { 2011 sts = LE_32(msg[2]); 2012 } else { 2013 sts = 0; 2014 } 2015 2016 if (dead != 0) { 2017 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, "%s: dead cmd tag " 2018 "0x%x for %s", __func__, pwrk->htag, pptr->path); 2019 goto out; 2020 } 2021 2022 if (sts == PMCOUT_STATUS_ABORTED) { 2023 aborted = B_TRUE; 2024 } 2025 2026 if (pwrk->state == PMCS_WORK_STATE_TIMED_OUT) { 2027 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 2028 "%s: cmd 0x%p (tag 0x%x) timed out for %s", 2029 __func__, (void *)sp, pwrk->htag, pptr->path); 2030 CMD2PKT(sp)->pkt_scbp[0] = STATUS_GOOD; 2031 CMD2PKT(sp)->pkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET | 2032 STATE_SENT_CMD; 2033 CMD2PKT(sp)->pkt_statistics |= STAT_TIMEOUT; 2034 goto out; 2035 } 2036 2037 /* 2038 * If the status isn't okay but not underflow, 2039 * step to the side and parse the (possible) error. 2040 */ 2041 #ifdef DEBUG 2042 if (msg) { 2043 pmcs_print_entry(pwp, PMCS_PRT_DEBUG3, "Outbound Message", msg); 2044 } 2045 #endif 2046 if (!msg) { 2047 goto out; 2048 } 2049 2050 switch (sts) { 2051 case PMCOUT_STATUS_OPEN_CNX_ERROR_IT_NEXUS_LOSS: 2052 case PMCOUT_STATUS_IO_DS_NON_OPERATIONAL: 2053 case PMCOUT_STATUS_IO_DS_IN_RECOVERY: 2054 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 2055 "%s: PHY %s requires DS recovery (status=%d)", 2056 __func__, pptr->path, sts); 2057 do_ds_recovery = B_TRUE; 2058 break; 2059 case PMCOUT_STATUS_UNDERFLOW: 2060 (void) pmcs_set_resid(pkt, pkt->pkt_dma_len, LE_32(msg[3])); 2061 pmcs_prt(pwp, PMCS_PRT_DEBUG_UNDERFLOW, NULL, NULL, 2062 "%s: underflow %u for cdb 0x%x", 2063 __func__, LE_32(msg[3]), pkt->pkt_cdbp[0] & 0xff); 2064 sts = PMCOUT_STATUS_OK; 2065 msg[3] = 0; 2066 break; 2067 case PMCOUT_STATUS_OK: 2068 pkt->pkt_resid = 0; 2069 break; 2070 } 2071 2072 if (sts != PMCOUT_STATUS_OK) { 2073 pmcs_ioerror(pwp, SAS, pwrk, msg, sts); 2074 } else { 2075 if (msg[3]) { 2076 uint8_t local[PMCS_QENTRY_SIZE << 1], *xd; 2077 sas_ssp_rsp_iu_t *rptr = (void *)local; 2078 const int lim = 2079 (PMCS_QENTRY_SIZE << 1) - SAS_RSP_HDR_SIZE; 2080 static const uint8_t ssp_rsp_evec[] = { 2081 0x58, 0x61, 0x56, 0x72, 0x00 2082 }; 2083 2084 /* 2085 * Transform the the first part of the response 2086 * to host canonical form. This gives us enough 2087 * information to figure out what to do with the 2088 * rest (which remains unchanged in the incoming 2089 * message which can be up to two queue entries 2090 * in length). 2091 */ 2092 pmcs_endian_transform(pwp, local, &msg[5], 2093 ssp_rsp_evec); 2094 xd = (uint8_t *)(&msg[5]); 2095 xd += SAS_RSP_HDR_SIZE; 2096 2097 if (rptr->datapres == SAS_RSP_DATAPRES_RESPONSE_DATA) { 2098 if (rptr->response_data_length != 4) { 2099 pmcs_print_entry(pwp, PMCS_PRT_DEBUG, 2100 "Bad SAS RESPONSE DATA LENGTH", 2101 msg); 2102 pkt->pkt_reason = CMD_TRAN_ERR; 2103 goto out; 2104 } 2105 (void) memcpy(&sts, xd, sizeof (uint32_t)); 2106 sts = BE_32(sts); 2107 /* 2108 * The only response code we should legally get 2109 * here is an INVALID FRAME response code. 2110 */ 2111 if (sts == SAS_RSP_INVALID_FRAME) { 2112 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 2113 "%s: pkt %p tgt %u path %s " 2114 "completed: INVALID FRAME response", 2115 __func__, (void *)pkt, 2116 xp->target_num, pptr->path); 2117 } else { 2118 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 2119 "%s: pkt %p tgt %u path %s " 2120 "completed: illegal response 0x%x", 2121 __func__, (void *)pkt, 2122 xp->target_num, pptr->path, sts); 2123 } 2124 pkt->pkt_reason = CMD_TRAN_ERR; 2125 goto out; 2126 } 2127 if (rptr->datapres == SAS_RSP_DATAPRES_SENSE_DATA) { 2128 uint32_t slen; 2129 slen = rptr->sense_data_length; 2130 if (slen > lim) { 2131 slen = lim; 2132 } 2133 pmcs_latch_status(pwp, sp, rptr->status, xd, 2134 slen, pptr->path); 2135 } else if (rptr->datapres == SAS_RSP_DATAPRES_NO_DATA) { 2136 pmcout_ssp_comp_t *sspcp; 2137 sspcp = (pmcout_ssp_comp_t *)msg; 2138 uint32_t *residp; 2139 /* 2140 * This is the case for a plain SCSI status. 2141 * Note: If RESC_V is set and we're here, there 2142 * is a residual. We need to find it and update 2143 * the packet accordingly. 2144 */ 2145 pmcs_latch_status(pwp, sp, rptr->status, NULL, 2146 0, pptr->path); 2147 2148 if (sspcp->resc_v) { 2149 /* 2150 * Point residual to the SSP_RESP_IU 2151 */ 2152 residp = (uint32_t *)(sspcp + 1); 2153 /* 2154 * param contains the number of bytes 2155 * between where the SSP_RESP_IU may 2156 * or may not be and the residual. 2157 * Increment residp by the appropriate 2158 * number of words: (param+resc_pad)/4). 2159 */ 2160 residp += (LE_32(sspcp->param) + 2161 sspcp->resc_pad) / 2162 sizeof (uint32_t); 2163 pmcs_prt(pwp, PMCS_PRT_DEBUG_UNDERFLOW, 2164 pptr, xp, "%s: tgt 0x%p " 2165 "residual %d for pkt 0x%p", 2166 __func__, (void *) xp, *residp, 2167 (void *) pkt); 2168 ASSERT(LE_32(*residp) <= 2169 pkt->pkt_dma_len); 2170 (void) pmcs_set_resid(pkt, 2171 pkt->pkt_dma_len, LE_32(*residp)); 2172 } 2173 } else { 2174 pmcs_print_entry(pwp, PMCS_PRT_DEBUG, 2175 "illegal SAS response", msg); 2176 pkt->pkt_reason = CMD_TRAN_ERR; 2177 goto out; 2178 } 2179 } else { 2180 pmcs_latch_status(pwp, sp, STATUS_GOOD, NULL, 0, 2181 pptr->path); 2182 } 2183 if (pkt->pkt_dma_len) { 2184 pkt->pkt_state |= STATE_XFERRED_DATA; 2185 } 2186 } 2187 pmcs_prt(pwp, PMCS_PRT_DEBUG2, pptr, xp, 2188 "%s: pkt %p tgt %u done reason=%x state=%x resid=%ld status=%x", 2189 __func__, (void *)pkt, xp->target_num, pkt->pkt_reason, 2190 pkt->pkt_state, pkt->pkt_resid, pkt->pkt_scbp[0]); 2191 2192 if (pwrk->state == PMCS_WORK_STATE_ABORTED) { 2193 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 2194 "%s: scsi_pkt 0x%p aborted for PHY %s; work = 0x%p", 2195 __func__, (void *)pkt, pptr->path, (void *)pwrk); 2196 aborted = B_TRUE; 2197 } 2198 2199 out: 2200 pmcs_dma_unload(pwp, sp); 2201 mutex_enter(&xp->statlock); 2202 2203 /* 2204 * If the device no longer has a PHY pointer, clear the PHY pointer 2205 * from the work structure before we free it. Otherwise, pmcs_pwork 2206 * may decrement the ref_count on a PHY that's been freed. 2207 */ 2208 if (xp->phy == NULL) { 2209 pwrk->phy = NULL; 2210 } 2211 2212 pmcs_pwork(pwp, pwrk); 2213 2214 /* 2215 * If the device is gone, we only put this command on the completion 2216 * queue if the work structure is not marked dead. If it's marked 2217 * dead, it will already have been put there. 2218 */ 2219 if (xp->dev_gone) { 2220 mutex_exit(&xp->statlock); 2221 if (!dead) { 2222 mutex_enter(&xp->aqlock); 2223 STAILQ_REMOVE(&xp->aq, sp, pmcs_cmd, cmd_next); 2224 mutex_exit(&xp->aqlock); 2225 pmcs_prt(pwp, PMCS_PRT_DEBUG3, pptr, xp, 2226 "%s: Removing cmd 0x%p (htag 0x%x) from aq", 2227 __func__, (void *)sp, sp->cmd_tag); 2228 mutex_enter(&pwp->cq_lock); 2229 STAILQ_INSERT_TAIL(&pwp->cq, sp, cmd_next); 2230 PMCS_CQ_RUN_LOCKED(pwp); 2231 mutex_exit(&pwp->cq_lock); 2232 pmcs_prt(pwp, PMCS_PRT_DEBUG2, pptr, xp, 2233 "%s: Completing command for dead target 0x%p", 2234 __func__, (void *)xp); 2235 } 2236 return; 2237 } 2238 2239 ASSERT(xp->actv_cnt > 0); 2240 if (--(xp->actv_cnt) == 0) { 2241 if (xp->draining) { 2242 pmcs_prt(pwp, PMCS_PRT_DEBUG1, pptr, xp, 2243 "%s: waking up drain waiters", __func__); 2244 cv_signal(&pwp->drain_cv); 2245 } 2246 } 2247 mutex_exit(&xp->statlock); 2248 2249 /* 2250 * If the status is other than OK, determine if it's something that 2251 * is worth re-attempting enumeration. If so, mark the PHY. 2252 */ 2253 if (sts != PMCOUT_STATUS_OK) { 2254 pmcs_status_disposition(pptr, sts); 2255 } 2256 2257 if (dead == 0) { 2258 #ifdef DEBUG 2259 pmcs_cmd_t *wp; 2260 mutex_enter(&xp->aqlock); 2261 STAILQ_FOREACH(wp, &xp->aq, cmd_next) { 2262 if (wp == sp) { 2263 break; 2264 } 2265 } 2266 ASSERT(wp != NULL); 2267 #else 2268 mutex_enter(&xp->aqlock); 2269 #endif 2270 pmcs_prt(pwp, PMCS_PRT_DEBUG3, pptr, xp, 2271 "%s: Removing cmd 0x%p (htag 0x%x) from aq", __func__, 2272 (void *)sp, sp->cmd_tag); 2273 STAILQ_REMOVE(&xp->aq, sp, pmcs_cmd, cmd_next); 2274 if (aborted) { 2275 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 2276 "%s: Aborted cmd for tgt 0x%p, signaling waiters", 2277 __func__, (void *)xp); 2278 cv_signal(&xp->abort_cv); 2279 } 2280 mutex_exit(&xp->aqlock); 2281 } 2282 2283 /* 2284 * If do_ds_recovery is set, we need to initiate device state 2285 * recovery. In this case, we put this I/O back on the head of 2286 * the wait queue to run again after recovery is complete 2287 */ 2288 if (do_ds_recovery) { 2289 mutex_enter(&xp->statlock); 2290 pmcs_start_dev_state_recovery(xp, pptr); 2291 mutex_exit(&xp->statlock); 2292 pmcs_prt(pwp, PMCS_PRT_DEBUG1, pptr, xp, "%s: Putting cmd 0x%p " 2293 "back on wq during recovery for tgt 0x%p", __func__, 2294 (void *)sp, (void *)xp); 2295 mutex_enter(&xp->wqlock); 2296 if (xp->wq_recovery_tail == NULL) { 2297 STAILQ_INSERT_HEAD(&xp->wq, sp, cmd_next); 2298 } else { 2299 /* 2300 * If there are other I/Os waiting at the head due to 2301 * device state recovery, add this one in the right spot 2302 * to maintain proper order. 2303 */ 2304 STAILQ_INSERT_AFTER(&xp->wq, xp->wq_recovery_tail, sp, 2305 cmd_next); 2306 } 2307 xp->wq_recovery_tail = sp; 2308 mutex_exit(&xp->wqlock); 2309 } else { 2310 /* 2311 * If we're not initiating device state recovery and this 2312 * command was not "dead", put it on the completion queue 2313 */ 2314 if (!dead) { 2315 mutex_enter(&pwp->cq_lock); 2316 STAILQ_INSERT_TAIL(&pwp->cq, sp, cmd_next); 2317 PMCS_CQ_RUN_LOCKED(pwp); 2318 mutex_exit(&pwp->cq_lock); 2319 } 2320 } 2321 } 2322 2323 /* 2324 * Run a SATA command (normal reads and writes), 2325 * or block and schedule a SATL interpretation 2326 * of the command. 2327 * 2328 * Called with pwrk lock held, returns unlocked. 2329 */ 2330 2331 static int 2332 pmcs_SATA_run(pmcs_cmd_t *sp, pmcwork_t *pwrk) 2333 { 2334 pmcs_hw_t *pwp = CMD2PMC(sp); 2335 struct scsi_pkt *pkt = CMD2PKT(sp); 2336 pmcs_xscsi_t *xp; 2337 uint8_t cdb_base, asc, tag; 2338 uint32_t *ptr, iq, nblk, i, mtype; 2339 fis_t fis; 2340 size_t amt; 2341 uint64_t lba; 2342 2343 xp = pwrk->xp; 2344 2345 /* 2346 * First, see if this is just a plain read/write command. 2347 * If not, we have to queue it up for processing, block 2348 * any additional commands from coming in, and wake up 2349 * the thread that will process this command. 2350 */ 2351 cdb_base = pkt->pkt_cdbp[0] & 0x1f; 2352 if (cdb_base != SCMD_READ && cdb_base != SCMD_WRITE) { 2353 pmcs_prt(pwp, PMCS_PRT_DEBUG1, NULL, NULL, 2354 "%s: special SATA cmd %p", __func__, (void *)sp); 2355 2356 ASSERT(xp->phy != NULL); 2357 pmcs_pwork(pwp, pwrk); 2358 pmcs_lock_phy(xp->phy); 2359 mutex_enter(&xp->statlock); 2360 xp->special_needed = 1; /* Set the special_needed flag */ 2361 STAILQ_INSERT_TAIL(&xp->sq, sp, cmd_next); 2362 if (pmcs_run_sata_special(pwp, xp)) { 2363 SCHEDULE_WORK(pwp, PMCS_WORK_SATA_RUN); 2364 } 2365 mutex_exit(&xp->statlock); 2366 pmcs_unlock_phy(xp->phy); 2367 2368 return (PMCS_WQ_RUN_SUCCESS); 2369 } 2370 2371 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "%s: regular cmd", __func__); 2372 2373 mutex_enter(&xp->statlock); 2374 if (!xp->assigned) { 2375 mutex_exit(&xp->statlock); 2376 return (PMCS_WQ_RUN_FAIL_OTHER); 2377 } 2378 if (xp->special_running || xp->special_needed || xp->recover_wait) { 2379 mutex_exit(&xp->statlock); 2380 mutex_enter(&xp->wqlock); 2381 STAILQ_INSERT_HEAD(&xp->wq, sp, cmd_next); 2382 mutex_exit(&xp->wqlock); 2383 /* 2384 * By the time we get here the special 2385 * commands running or waiting to be run 2386 * may have come and gone, so kick our 2387 * worker to run the waiting queues 2388 * just in case. 2389 */ 2390 SCHEDULE_WORK(pwp, PMCS_WORK_RUN_QUEUES); 2391 return (PMCS_WQ_RUN_FAIL_OTHER); 2392 } 2393 lba = xp->capacity; 2394 mutex_exit(&xp->statlock); 2395 2396 /* 2397 * Extract data length and lba parameters out of the command. The 2398 * function pmcs_SATA_rwparm returns a non-zero ASC value if the CDB 2399 * values are considered illegal. 2400 */ 2401 asc = pmcs_SATA_rwparm(pkt->pkt_cdbp, &nblk, &lba, lba); 2402 if (asc) { 2403 uint8_t sns[18]; 2404 bzero(sns, sizeof (sns)); 2405 sns[0] = 0xf0; 2406 sns[2] = 0x5; 2407 sns[12] = asc; 2408 pmcs_latch_status(pwp, sp, STATUS_CHECK, sns, sizeof (sns), 2409 pwrk->phy->path); 2410 pmcs_pwork(pwp, pwrk); 2411 mutex_enter(&pwp->cq_lock); 2412 STAILQ_INSERT_TAIL(&pwp->cq, sp, cmd_next); 2413 PMCS_CQ_RUN_LOCKED(pwp); 2414 mutex_exit(&pwp->cq_lock); 2415 return (PMCS_WQ_RUN_SUCCESS); 2416 } 2417 2418 /* 2419 * If the command decodes as not moving any data, complete it here. 2420 */ 2421 amt = nblk; 2422 amt <<= 9; 2423 amt = pmcs_set_resid(pkt, amt, nblk << 9); 2424 if (amt == 0) { 2425 pmcs_latch_status(pwp, sp, STATUS_GOOD, NULL, 0, 2426 pwrk->phy->path); 2427 pmcs_pwork(pwp, pwrk); 2428 mutex_enter(&pwp->cq_lock); 2429 STAILQ_INSERT_TAIL(&pwp->cq, sp, cmd_next); 2430 PMCS_CQ_RUN_LOCKED(pwp); 2431 mutex_exit(&pwp->cq_lock); 2432 return (PMCS_WQ_RUN_SUCCESS); 2433 } 2434 2435 /* 2436 * Get an inbound queue entry for this I/O 2437 */ 2438 GET_IO_IQ_ENTRY(pwp, ptr, xp->phy->device_id, iq); 2439 if (ptr == NULL) { 2440 /* 2441 * This is a temporary failure not likely to unblocked by 2442 * commands completing as the test for scheduling the 2443 * restart of work is a per-device test. 2444 */ 2445 mutex_enter(&xp->wqlock); 2446 STAILQ_INSERT_HEAD(&xp->wq, sp, cmd_next); 2447 mutex_exit(&xp->wqlock); 2448 pmcs_dma_unload(pwp, sp); 2449 SCHEDULE_WORK(pwp, PMCS_WORK_RUN_QUEUES); 2450 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, xp, 2451 "%s: Failed to get IO IQ entry for tgt %d", 2452 __func__, xp->target_num); 2453 return (PMCS_WQ_RUN_FAIL_RES); 2454 } 2455 2456 /* 2457 * Get a tag. At this point, hold statlock until the tagmap is 2458 * updated (just prior to sending the cmd to the hardware). 2459 */ 2460 mutex_enter(&xp->statlock); 2461 for (tag = 0; tag < xp->qdepth; tag++) { 2462 if ((xp->tagmap & (1 << tag)) == 0) { 2463 break; 2464 } 2465 } 2466 2467 if (tag == xp->qdepth) { 2468 mutex_exit(&xp->statlock); 2469 mutex_exit(&pwp->iqp_lock[iq]); 2470 mutex_enter(&xp->wqlock); 2471 STAILQ_INSERT_HEAD(&xp->wq, sp, cmd_next); 2472 mutex_exit(&xp->wqlock); 2473 return (PMCS_WQ_RUN_FAIL_OTHER); 2474 } 2475 2476 sp->cmd_satltag = (uint8_t)tag; 2477 2478 /* 2479 * Set up the command 2480 */ 2481 bzero(fis, sizeof (fis)); 2482 ptr[0] = 2483 LE_32(PMCS_IOMB_IN_SAS(PMCS_OQ_IODONE, PMCIN_SATA_HOST_IO_START)); 2484 ptr[1] = LE_32(pwrk->htag); 2485 ptr[2] = LE_32(pwrk->phy->device_id); 2486 ptr[3] = LE_32(amt); 2487 2488 if (xp->ncq) { 2489 mtype = SATA_PROTOCOL_FPDMA | (tag << 16); 2490 fis[0] = ((nblk & 0xff) << 24) | (C_BIT << 8) | FIS_REG_H2DEV; 2491 if (cdb_base == SCMD_READ) { 2492 fis[0] |= (READ_FPDMA_QUEUED << 16); 2493 } else { 2494 fis[0] |= (WRITE_FPDMA_QUEUED << 16); 2495 } 2496 fis[1] = (FEATURE_LBA << 24) | (lba & 0xffffff); 2497 fis[2] = ((nblk & 0xff00) << 16) | ((lba >> 24) & 0xffffff); 2498 fis[3] = tag << 3; 2499 } else { 2500 int op; 2501 fis[0] = (C_BIT << 8) | FIS_REG_H2DEV; 2502 if (xp->pio) { 2503 mtype = SATA_PROTOCOL_PIO; 2504 if (cdb_base == SCMD_READ) { 2505 op = READ_SECTORS_EXT; 2506 } else { 2507 op = WRITE_SECTORS_EXT; 2508 } 2509 } else { 2510 mtype = SATA_PROTOCOL_DMA; 2511 if (cdb_base == SCMD_READ) { 2512 op = READ_DMA_EXT; 2513 } else { 2514 op = WRITE_DMA_EXT; 2515 } 2516 } 2517 fis[0] |= (op << 16); 2518 fis[1] = (FEATURE_LBA << 24) | (lba & 0xffffff); 2519 fis[2] = (lba >> 24) & 0xffffff; 2520 fis[3] = nblk; 2521 } 2522 2523 if (cdb_base == SCMD_READ) { 2524 ptr[4] = LE_32(mtype | PMCIN_DATADIR_2_INI); 2525 } else { 2526 ptr[4] = LE_32(mtype | PMCIN_DATADIR_2_DEV); 2527 } 2528 #ifdef DEBUG 2529 /* 2530 * Generate a PMCOUT_STATUS_XFER_CMD_FRAME_ISSUED 2531 * event when this goes out on the wire. 2532 */ 2533 ptr[4] |= PMCIN_MESSAGE_REPORT; 2534 #endif 2535 for (i = 0; i < (sizeof (fis_t))/(sizeof (uint32_t)); i++) { 2536 ptr[i+5] = LE_32(fis[i]); 2537 } 2538 if (pmcs_dma_load(pwp, sp, ptr)) { 2539 mutex_exit(&xp->statlock); 2540 mutex_exit(&pwp->iqp_lock[iq]); 2541 mutex_enter(&xp->wqlock); 2542 STAILQ_INSERT_HEAD(&xp->wq, sp, cmd_next); 2543 mutex_exit(&xp->wqlock); 2544 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, xp, 2545 "%s: Failed to dma_load for tgt %d", 2546 __func__, xp->target_num); 2547 return (PMCS_WQ_RUN_FAIL_RES); 2548 2549 } 2550 2551 pwrk->state = PMCS_WORK_STATE_ONCHIP; 2552 mutex_exit(&pwrk->lock); 2553 xp->tagmap |= (1 << tag); 2554 xp->actv_cnt++; 2555 if (xp->actv_cnt > xp->maxdepth) { 2556 xp->maxdepth = xp->actv_cnt; 2557 pmcs_prt(pwp, PMCS_PRT_DEBUG2, pwrk->phy, xp, 2558 "%s: max depth now %u", pwrk->phy->path, xp->maxdepth); 2559 } 2560 mutex_exit(&xp->statlock); 2561 mutex_enter(&xp->aqlock); 2562 STAILQ_INSERT_TAIL(&xp->aq, sp, cmd_next); 2563 mutex_exit(&xp->aqlock); 2564 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, 2565 "%s: giving pkt %p to hardware", __func__, (void *)pkt); 2566 #ifdef DEBUG 2567 pmcs_print_entry(pwp, PMCS_PRT_DEBUG3, "SATA INI Message", ptr); 2568 #endif 2569 INC_IQ_ENTRY(pwp, iq); 2570 2571 return (PMCS_WQ_RUN_SUCCESS); 2572 } 2573 2574 /* 2575 * Complete a SATA command. Called with pwrk lock held. 2576 */ 2577 void 2578 pmcs_SATA_done(pmcs_hw_t *pwp, pmcwork_t *pwrk, uint32_t *msg) 2579 { 2580 pmcs_cmd_t *sp = pwrk->arg; 2581 struct scsi_pkt *pkt = CMD2PKT(sp); 2582 pmcs_phy_t *pptr = pwrk->phy; 2583 int dead; 2584 uint32_t sts; 2585 pmcs_xscsi_t *xp; 2586 boolean_t aborted = B_FALSE; 2587 2588 xp = pwrk->xp; 2589 ASSERT(xp != NULL); 2590 2591 DTRACE_PROBE4(pmcs__io__done, uint64_t, pkt->pkt_dma_len, int, 2592 (pkt->pkt_dma_flags & DDI_DMA_READ) != 0, hrtime_t, pwrk->start, 2593 hrtime_t, gethrtime()); 2594 2595 dead = pwrk->dead; 2596 2597 if (msg) { 2598 sts = LE_32(msg[2]); 2599 } else { 2600 sts = 0; 2601 } 2602 2603 if (dead != 0) { 2604 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, "%s: dead cmd tag " 2605 "0x%x for %s", __func__, pwrk->htag, pptr->path); 2606 goto out; 2607 } 2608 if ((pwrk->state == PMCS_WORK_STATE_TIMED_OUT) && 2609 (sts != PMCOUT_STATUS_ABORTED)) { 2610 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 2611 "%s: cmd 0x%p (tag 0x%x) timed out for %s", 2612 __func__, (void *)sp, pwrk->htag, pptr->path); 2613 CMD2PKT(sp)->pkt_scbp[0] = STATUS_GOOD; 2614 /* pkt_reason already set to CMD_TIMEOUT */ 2615 ASSERT(CMD2PKT(sp)->pkt_reason == CMD_TIMEOUT); 2616 CMD2PKT(sp)->pkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET | 2617 STATE_SENT_CMD; 2618 CMD2PKT(sp)->pkt_statistics |= STAT_TIMEOUT; 2619 goto out; 2620 } 2621 2622 pmcs_prt(pwp, PMCS_PRT_DEBUG2, pptr, xp, "%s: pkt %p tgt %u done", 2623 __func__, (void *)pkt, xp->target_num); 2624 2625 /* 2626 * If the status isn't okay but not underflow, 2627 * step to the side and parse the (possible) error. 2628 */ 2629 #ifdef DEBUG 2630 if (msg) { 2631 pmcs_print_entry(pwp, PMCS_PRT_DEBUG3, "Outbound Message", msg); 2632 } 2633 #endif 2634 if (!msg) { 2635 goto out; 2636 } 2637 2638 /* 2639 * If the status isn't okay or we got a FIS response of some kind, 2640 * step to the side and parse the (possible) error. 2641 */ 2642 if ((sts != PMCOUT_STATUS_OK) || (LE_32(msg[3]) != 0)) { 2643 if (sts == PMCOUT_STATUS_IO_DS_NON_OPERATIONAL) { 2644 mutex_exit(&pwrk->lock); 2645 pmcs_lock_phy(pptr); 2646 mutex_enter(&xp->statlock); 2647 if ((xp->resetting == 0) && (xp->reset_success != 0) && 2648 (xp->reset_wait == 0)) { 2649 mutex_exit(&xp->statlock); 2650 if (pmcs_reset_phy(pwp, pptr, 2651 PMCS_PHYOP_LINK_RESET) != 0) { 2652 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 2653 "%s: PHY (%s) Local Control/Link " 2654 "Reset FAILED as part of error " 2655 "recovery", __func__, pptr->path); 2656 } 2657 mutex_enter(&xp->statlock); 2658 } 2659 mutex_exit(&xp->statlock); 2660 pmcs_unlock_phy(pptr); 2661 mutex_enter(&pwrk->lock); 2662 } 2663 pmcs_ioerror(pwp, SATA, pwrk, msg, sts); 2664 } else { 2665 pmcs_latch_status(pwp, sp, STATUS_GOOD, NULL, 0, 2666 pwrk->phy->path); 2667 pkt->pkt_state |= STATE_XFERRED_DATA; 2668 pkt->pkt_resid = 0; 2669 } 2670 2671 pmcs_prt(pwp, PMCS_PRT_DEBUG2, pptr, xp, 2672 "%s: pkt %p tgt %u done reason=%x state=%x resid=%ld status=%x", 2673 __func__, (void *)pkt, xp->target_num, pkt->pkt_reason, 2674 pkt->pkt_state, pkt->pkt_resid, pkt->pkt_scbp[0]); 2675 2676 if (pwrk->state == PMCS_WORK_STATE_ABORTED) { 2677 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 2678 "%s: scsi_pkt 0x%p aborted for PHY %s; work = 0x%p", 2679 __func__, (void *)pkt, pptr->path, (void *)pwrk); 2680 aborted = B_TRUE; 2681 } 2682 2683 out: 2684 pmcs_dma_unload(pwp, sp); 2685 mutex_enter(&xp->statlock); 2686 xp->tagmap &= ~(1 << sp->cmd_satltag); 2687 2688 /* 2689 * If the device no longer has a PHY pointer, clear the PHY pointer 2690 * from the work structure before we free it. Otherwise, pmcs_pwork 2691 * may decrement the ref_count on a PHY that's been freed. 2692 */ 2693 if (xp->phy == NULL) { 2694 pwrk->phy = NULL; 2695 } 2696 2697 pmcs_pwork(pwp, pwrk); 2698 2699 if (xp->dev_gone) { 2700 mutex_exit(&xp->statlock); 2701 if (!dead) { 2702 mutex_enter(&xp->aqlock); 2703 STAILQ_REMOVE(&xp->aq, sp, pmcs_cmd, cmd_next); 2704 mutex_exit(&xp->aqlock); 2705 pmcs_prt(pwp, PMCS_PRT_DEBUG3, pptr, xp, 2706 "%s: Removing cmd 0x%p (htag 0x%x) from aq", 2707 __func__, (void *)sp, sp->cmd_tag); 2708 mutex_enter(&pwp->cq_lock); 2709 STAILQ_INSERT_TAIL(&pwp->cq, sp, cmd_next); 2710 PMCS_CQ_RUN_LOCKED(pwp); 2711 mutex_exit(&pwp->cq_lock); 2712 pmcs_prt(pwp, PMCS_PRT_DEBUG2, pptr, xp, 2713 "%s: Completing command for dead target 0x%p", 2714 __func__, (void *)xp); 2715 } 2716 return; 2717 } 2718 2719 ASSERT(xp->actv_cnt > 0); 2720 if (--(xp->actv_cnt) == 0) { 2721 if (xp->draining) { 2722 pmcs_prt(pwp, PMCS_PRT_DEBUG1, pptr, xp, 2723 "%s: waking up drain waiters", __func__); 2724 cv_signal(&pwp->drain_cv); 2725 } else if (xp->special_needed) { 2726 SCHEDULE_WORK(pwp, PMCS_WORK_SATA_RUN); 2727 } 2728 } 2729 mutex_exit(&xp->statlock); 2730 2731 /* 2732 * If the status is other than OK, determine if it's something that 2733 * is worth re-attempting enumeration. If so, mark the PHY. 2734 */ 2735 if (sts != PMCOUT_STATUS_OK) { 2736 pmcs_status_disposition(pptr, sts); 2737 } 2738 2739 if (dead == 0) { 2740 #ifdef DEBUG 2741 pmcs_cmd_t *wp; 2742 mutex_enter(&xp->aqlock); 2743 STAILQ_FOREACH(wp, &xp->aq, cmd_next) { 2744 if (wp == sp) { 2745 break; 2746 } 2747 } 2748 ASSERT(wp != NULL); 2749 #else 2750 mutex_enter(&xp->aqlock); 2751 #endif 2752 STAILQ_REMOVE(&xp->aq, sp, pmcs_cmd, cmd_next); 2753 if (aborted) { 2754 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 2755 "%s: Aborted cmd for tgt 0x%p, signaling waiters", 2756 __func__, (void *)xp); 2757 cv_signal(&xp->abort_cv); 2758 } 2759 mutex_exit(&xp->aqlock); 2760 mutex_enter(&pwp->cq_lock); 2761 STAILQ_INSERT_TAIL(&pwp->cq, sp, cmd_next); 2762 PMCS_CQ_RUN_LOCKED(pwp); 2763 mutex_exit(&pwp->cq_lock); 2764 } 2765 } 2766 2767 static uint8_t 2768 pmcs_SATA_rwparm(uint8_t *cdb, uint32_t *xfr, uint64_t *lba, uint64_t lbamax) 2769 { 2770 uint8_t asc = 0; 2771 switch (cdb[0]) { 2772 case SCMD_READ_G5: 2773 case SCMD_WRITE_G5: 2774 *xfr = 2775 (((uint32_t)cdb[10]) << 24) | 2776 (((uint32_t)cdb[11]) << 16) | 2777 (((uint32_t)cdb[12]) << 8) | 2778 ((uint32_t)cdb[13]); 2779 *lba = 2780 (((uint64_t)cdb[2]) << 56) | 2781 (((uint64_t)cdb[3]) << 48) | 2782 (((uint64_t)cdb[4]) << 40) | 2783 (((uint64_t)cdb[5]) << 32) | 2784 (((uint64_t)cdb[6]) << 24) | 2785 (((uint64_t)cdb[7]) << 16) | 2786 (((uint64_t)cdb[8]) << 8) | 2787 ((uint64_t)cdb[9]); 2788 /* Check for illegal bits */ 2789 if (cdb[15]) { 2790 asc = 0x24; /* invalid field in cdb */ 2791 } 2792 break; 2793 case SCMD_READ_G4: 2794 case SCMD_WRITE_G4: 2795 *xfr = 2796 (((uint32_t)cdb[6]) << 16) | 2797 (((uint32_t)cdb[7]) << 8) | 2798 ((uint32_t)cdb[8]); 2799 *lba = 2800 (((uint32_t)cdb[2]) << 24) | 2801 (((uint32_t)cdb[3]) << 16) | 2802 (((uint32_t)cdb[4]) << 8) | 2803 ((uint32_t)cdb[5]); 2804 /* Check for illegal bits */ 2805 if (cdb[11]) { 2806 asc = 0x24; /* invalid field in cdb */ 2807 } 2808 break; 2809 case SCMD_READ_G1: 2810 case SCMD_WRITE_G1: 2811 *xfr = (((uint32_t)cdb[7]) << 8) | ((uint32_t)cdb[8]); 2812 *lba = 2813 (((uint32_t)cdb[2]) << 24) | 2814 (((uint32_t)cdb[3]) << 16) | 2815 (((uint32_t)cdb[4]) << 8) | 2816 ((uint32_t)cdb[5]); 2817 /* Check for illegal bits */ 2818 if (cdb[9]) { 2819 asc = 0x24; /* invalid field in cdb */ 2820 } 2821 break; 2822 case SCMD_READ: 2823 case SCMD_WRITE: 2824 *xfr = cdb[4]; 2825 if (*xfr == 0) { 2826 *xfr = 256; 2827 } 2828 *lba = 2829 (((uint32_t)cdb[1] & 0x1f) << 16) | 2830 (((uint32_t)cdb[2]) << 8) | 2831 ((uint32_t)cdb[3]); 2832 /* Check for illegal bits */ 2833 if (cdb[5]) { 2834 asc = 0x24; /* invalid field in cdb */ 2835 } 2836 break; 2837 } 2838 2839 if (asc == 0) { 2840 if ((*lba + *xfr) > lbamax) { 2841 asc = 0x21; /* logical block out of range */ 2842 } 2843 } 2844 return (asc); 2845 } 2846 2847 /* 2848 * Called with pwrk lock held. 2849 */ 2850 static void 2851 pmcs_ioerror(pmcs_hw_t *pwp, pmcs_dtype_t t, pmcwork_t *pwrk, uint32_t *w, 2852 uint32_t status) 2853 { 2854 static uint8_t por[] = { 2855 0xf0, 0x0, 0x6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x28 2856 }; 2857 static uint8_t parity[] = { 2858 0xf0, 0x0, 0xb, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x47, 5 2859 }; 2860 const char *msg; 2861 char buf[20]; 2862 pmcs_cmd_t *sp = pwrk->arg; 2863 pmcs_phy_t *phyp = pwrk->phy; 2864 struct scsi_pkt *pkt = CMD2PKT(sp); 2865 uint32_t resid; 2866 2867 ASSERT(w != NULL); 2868 resid = LE_32(w[3]); 2869 2870 msg = pmcs_status_str(status); 2871 if (msg == NULL) { 2872 (void) snprintf(buf, sizeof (buf), "Error 0x%x", status); 2873 msg = buf; 2874 } 2875 2876 if (status != PMCOUT_STATUS_OK) { 2877 pmcs_prt(pwp, PMCS_PRT_DEBUG1, phyp, NULL, 2878 "%s: device %s tag 0x%x status %s @ %llu", __func__, 2879 phyp->path, pwrk->htag, msg, 2880 (unsigned long long)gethrtime()); 2881 } 2882 2883 pkt->pkt_reason = CMD_CMPLT; /* default reason */ 2884 2885 switch (status) { 2886 case PMCOUT_STATUS_OK: 2887 if (t == SATA) { 2888 int i; 2889 fis_t fis; 2890 for (i = 0; i < sizeof (fis) / sizeof (fis[0]); i++) { 2891 fis[i] = LE_32(w[4+i]); 2892 } 2893 if ((fis[0] & 0xff) != FIS_REG_D2H) { 2894 pmcs_prt(pwp, PMCS_PRT_DEBUG, phyp, NULL, 2895 "unexpected fis code 0x%x", fis[0] & 0xff); 2896 } else { 2897 pmcs_prt(pwp, PMCS_PRT_DEBUG, phyp, NULL, 2898 "FIS ERROR"); 2899 pmcs_fis_dump(pwp, fis); 2900 } 2901 pkt->pkt_reason = CMD_TRAN_ERR; 2902 break; 2903 } 2904 pmcs_latch_status(pwp, sp, STATUS_GOOD, NULL, 0, phyp->path); 2905 break; 2906 2907 case PMCOUT_STATUS_ABORTED: 2908 /* 2909 * Command successfully aborted. 2910 */ 2911 if (phyp->dead) { 2912 pkt->pkt_reason = CMD_DEV_GONE; 2913 pkt->pkt_state = STATE_GOT_BUS; 2914 } else if (pwrk->ssp_event != 0) { 2915 pkt->pkt_reason = CMD_TRAN_ERR; 2916 pkt->pkt_state = STATE_GOT_BUS; 2917 } else if (pwrk->state == PMCS_WORK_STATE_TIMED_OUT) { 2918 pkt->pkt_reason = CMD_TIMEOUT; 2919 pkt->pkt_statistics |= STAT_TIMEOUT; 2920 pkt->pkt_state = STATE_GOT_BUS | STATE_GOT_TARGET | 2921 STATE_SENT_CMD; 2922 } else { 2923 pkt->pkt_reason = CMD_ABORTED; 2924 pkt->pkt_statistics |= STAT_ABORTED; 2925 pkt->pkt_state = STATE_GOT_BUS | STATE_GOT_TARGET | 2926 STATE_SENT_CMD; 2927 } 2928 2929 /* 2930 * PMCS_WORK_STATE_TIMED_OUT doesn't need to be preserved past 2931 * this point, so go ahead and mark it as aborted. 2932 */ 2933 pwrk->state = PMCS_WORK_STATE_ABORTED; 2934 break; 2935 2936 case PMCOUT_STATUS_UNDERFLOW: 2937 /* 2938 * This will only get called for SATA 2939 */ 2940 pkt->pkt_resid = resid; 2941 if (pkt->pkt_dma_len < pkt->pkt_resid) { 2942 (void) pmcs_set_resid(pkt, pkt->pkt_dma_len, resid); 2943 } 2944 pmcs_latch_status(pwp, sp, STATUS_GOOD, NULL, 0, phyp->path); 2945 break; 2946 2947 case PMCOUT_STATUS_NO_DEVICE: 2948 case PMCOUT_STATUS_XFER_ERROR_SATA_LINK_TIMEOUT: 2949 pkt->pkt_reason = CMD_DEV_GONE; 2950 break; 2951 2952 case PMCOUT_STATUS_OPEN_CNX_ERROR_WRONG_DESTINATION: 2953 /* 2954 * Need to do rediscovery. We probably have 2955 * the wrong device (disk swap), so kill 2956 * this one. 2957 */ 2958 case PMCOUT_STATUS_OPEN_CNX_PROTOCOL_NOT_SUPPORTED: 2959 case PMCOUT_STATUS_OPEN_CNX_ERROR_ZONE_VIOLATION: 2960 case PMCOUT_STATUS_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED: 2961 case PMCOUT_STATUS_OPEN_CNX_ERROR_UNKNOWN_ERROR: 2962 /* 2963 * Need to do rediscovery. 2964 */ 2965 if (!phyp->dead) { 2966 mutex_exit(&pwrk->lock); 2967 pmcs_lock_phy(pwrk->phy); 2968 pmcs_kill_changed(pwp, pwrk->phy, 0); 2969 pmcs_unlock_phy(pwrk->phy); 2970 mutex_enter(&pwrk->lock); 2971 pkt->pkt_reason = CMD_INCOMPLETE; 2972 pkt->pkt_state = STATE_GOT_BUS; 2973 } else { 2974 pkt->pkt_reason = CMD_DEV_GONE; 2975 } 2976 break; 2977 2978 case PMCOUT_STATUS_OPEN_CNX_ERROR_BREAK: 2979 case PMCOUT_STATUS_OPEN_CNX_ERROR_IT_NEXUS_LOSS: 2980 case PMCOUT_STATUS_OPENCNX_ERROR_BAD_DESTINATION: 2981 case PMCOUT_STATUS_IO_XFER_ERROR_NAK_RECEIVED: 2982 /* cmd is pending on the target */ 2983 case PMCOUT_STATUS_XFER_ERROR_OFFSET_MISMATCH: 2984 case PMCOUT_STATUS_XFER_ERROR_REJECTED_NCQ_MODE: 2985 /* transitory - commands sent while in NCQ failure mode */ 2986 case PMCOUT_STATUS_XFER_ERROR_ABORTED_NCQ_MODE: 2987 /* NCQ failure */ 2988 case PMCOUT_STATUS_IO_PORT_IN_RESET: 2989 case PMCOUT_STATUS_XFER_ERR_BREAK: 2990 case PMCOUT_STATUS_XFER_ERR_PHY_NOT_READY: 2991 pkt->pkt_reason = CMD_INCOMPLETE; 2992 pkt->pkt_state = STATE_GOT_BUS; 2993 break; 2994 2995 case PMCOUT_STATUS_IO_XFER_OPEN_RETRY_TIMEOUT: 2996 pmcs_prt(pwp, PMCS_PRT_DEBUG, phyp, phyp->target, 2997 "STATUS_BUSY for htag 0x%08x", sp->cmd_tag); 2998 pmcs_latch_status(pwp, sp, STATUS_BUSY, NULL, 0, phyp->path); 2999 break; 3000 3001 case PMCOUT_STATUS_OPEN_CNX_ERROR_STP_RESOURCES_BUSY: 3002 /* synthesize a RESERVATION CONFLICT */ 3003 pmcs_prt(pwp, PMCS_PRT_DEBUG, phyp, phyp->target, 3004 "%s: Potential affiliation active on 0x%" PRIx64, __func__, 3005 pmcs_barray2wwn(phyp->sas_address)); 3006 pmcs_latch_status(pwp, sp, STATUS_RESERVATION_CONFLICT, NULL, 3007 0, phyp->path); 3008 break; 3009 3010 case PMCOUT_STATUS_XFER_ERROR_ABORTED_DUE_TO_SRST: 3011 /* synthesize a power-on/reset */ 3012 pmcs_latch_status(pwp, sp, STATUS_CHECK, por, sizeof (por), 3013 phyp->path); 3014 break; 3015 3016 case PMCOUT_STATUS_XFER_ERROR_UNEXPECTED_PHASE: 3017 case PMCOUT_STATUS_XFER_ERROR_RDY_OVERRUN: 3018 case PMCOUT_STATUS_XFER_ERROR_RDY_NOT_EXPECTED: 3019 case PMCOUT_STATUS_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT: 3020 case PMCOUT_STATUS_XFER_ERROR_CMD_ISSUE_BREAK_BEFORE_ACK_NACK: 3021 case PMCOUT_STATUS_XFER_ERROR_CMD_ISSUE_PHY_DOWN_BEFORE_ACK_NAK: 3022 /* synthesize a PARITY ERROR */ 3023 pmcs_latch_status(pwp, sp, STATUS_CHECK, parity, 3024 sizeof (parity), phyp->path); 3025 break; 3026 3027 case PMCOUT_STATUS_IO_XFER_ERROR_DMA: 3028 case PMCOUT_STATUS_IO_NOT_VALID: 3029 case PMCOUT_STATUS_PROG_ERROR: 3030 case PMCOUT_STATUS_XFER_ERROR_PEER_ABORTED: 3031 case PMCOUT_STATUS_XFER_ERROR_SATA: /* non-NCQ failure */ 3032 default: 3033 pkt->pkt_reason = CMD_TRAN_ERR; 3034 break; 3035 } 3036 } 3037 3038 /* 3039 * Latch up SCSI status 3040 */ 3041 3042 void 3043 pmcs_latch_status(pmcs_hw_t *pwp, pmcs_cmd_t *sp, uint8_t status, 3044 uint8_t *snsp, size_t snslen, char *path) 3045 { 3046 static const char c1[] = 3047 "%s: Status Byte 0x%02x for CDB0=0x%02x (%02x %02x %02x) " 3048 "HTAG 0x%x @ %llu"; 3049 static const char c2[] = 3050 "%s: Status Byte 0x%02x for CDB0=0x%02x HTAG 0x%x @ %llu"; 3051 3052 CMD2PKT(sp)->pkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET | 3053 STATE_SENT_CMD | STATE_GOT_STATUS; 3054 CMD2PKT(sp)->pkt_scbp[0] = status; 3055 3056 if (status == STATUS_CHECK && snsp && 3057 (size_t)SCSA_STSLEN(sp) >= sizeof (struct scsi_arq_status)) { 3058 struct scsi_arq_status *aqp = 3059 (void *) CMD2PKT(sp)->pkt_scbp; 3060 size_t amt = sizeof (struct scsi_extended_sense); 3061 uint8_t key = scsi_sense_key(snsp); 3062 uint8_t asc = scsi_sense_asc(snsp); 3063 uint8_t ascq = scsi_sense_ascq(snsp); 3064 if (amt > snslen) { 3065 amt = snslen; 3066 } 3067 pmcs_prt(pwp, PMCS_PRT_DEBUG_SCSI_STATUS, NULL, NULL, c1, path, 3068 status, CMD2PKT(sp)->pkt_cdbp[0] & 0xff, key, asc, ascq, 3069 sp->cmd_tag, (unsigned long long)gethrtime()); 3070 CMD2PKT(sp)->pkt_state |= STATE_ARQ_DONE; 3071 (*(uint8_t *)&aqp->sts_rqpkt_status) = STATUS_GOOD; 3072 aqp->sts_rqpkt_statistics = 0; 3073 aqp->sts_rqpkt_reason = CMD_CMPLT; 3074 aqp->sts_rqpkt_state = STATE_GOT_BUS | 3075 STATE_GOT_TARGET | STATE_SENT_CMD | 3076 STATE_XFERRED_DATA | STATE_GOT_STATUS; 3077 (void) memcpy(&aqp->sts_sensedata, snsp, amt); 3078 if (aqp->sts_sensedata.es_class != CLASS_EXTENDED_SENSE) { 3079 aqp->sts_rqpkt_reason = CMD_TRAN_ERR; 3080 aqp->sts_rqpkt_state = 0; 3081 aqp->sts_rqpkt_resid = 3082 sizeof (struct scsi_extended_sense); 3083 } else { 3084 aqp->sts_rqpkt_resid = 3085 sizeof (struct scsi_extended_sense) - amt; 3086 } 3087 } else if (status) { 3088 pmcs_prt(pwp, PMCS_PRT_DEBUG_SCSI_STATUS, NULL, NULL, c2, 3089 path, status, CMD2PKT(sp)->pkt_cdbp[0] & 0xff, 3090 sp->cmd_tag, (unsigned long long)gethrtime()); 3091 } 3092 3093 CMD2PKT(sp)->pkt_reason = CMD_CMPLT; 3094 } 3095 3096 /* 3097 * Calculate and set packet residual and return the amount 3098 * left over after applying various filters. 3099 */ 3100 size_t 3101 pmcs_set_resid(struct scsi_pkt *pkt, size_t amt, uint32_t cdbamt) 3102 { 3103 pkt->pkt_resid = cdbamt; 3104 if (amt > pkt->pkt_resid) { 3105 amt = pkt->pkt_resid; 3106 } 3107 if (amt > pkt->pkt_dma_len) { 3108 amt = pkt->pkt_dma_len; 3109 } 3110 return (amt); 3111 } 3112 3113 /* 3114 * Return the existing target softstate (unlocked) if there is one. If so, 3115 * the PHY is locked and that lock must be freed by the caller after the 3116 * target/PHY linkage is established. If there isn't one, and alloc_tgt is 3117 * TRUE, then allocate one. 3118 */ 3119 pmcs_xscsi_t * 3120 pmcs_get_target(pmcs_iport_t *iport, char *tgt_port, boolean_t alloc_tgt) 3121 { 3122 pmcs_hw_t *pwp = iport->pwp; 3123 pmcs_phy_t *phyp; 3124 pmcs_xscsi_t *tgt; 3125 uint64_t wwn; 3126 char unit_address[PMCS_MAX_UA_SIZE]; 3127 int ua_form = 1; 3128 3129 /* 3130 * Find the PHY for this target 3131 */ 3132 phyp = pmcs_find_phy_by_sas_address(pwp, iport, NULL, tgt_port); 3133 if (phyp == NULL) { 3134 pmcs_prt(pwp, PMCS_PRT_DEBUG3, NULL, NULL, 3135 "%s: No PHY for target @ %s", __func__, tgt_port); 3136 return (NULL); 3137 } 3138 3139 tgt = ddi_soft_state_bystr_get(iport->tgt_sstate, tgt_port); 3140 3141 if (tgt) { 3142 mutex_enter(&tgt->statlock); 3143 /* 3144 * There's already a target. Check its PHY pointer to see 3145 * if we need to clear the old linkages 3146 */ 3147 if (tgt->phy && (tgt->phy != phyp)) { 3148 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, phyp, tgt, 3149 "%s: Target PHY updated from %p to %p", __func__, 3150 (void *)tgt->phy, (void *)phyp); 3151 if (!IS_ROOT_PHY(tgt->phy)) { 3152 pmcs_dec_phy_ref_count(tgt->phy); 3153 pmcs_inc_phy_ref_count(phyp); 3154 } 3155 tgt->phy->target = NULL; 3156 } 3157 3158 /* 3159 * If this target has no PHY pointer and alloc_tgt is FALSE, 3160 * that implies we expect the target to already exist. This 3161 * implies that there has already been a tran_tgt_init on at 3162 * least one LU. 3163 */ 3164 if ((tgt->phy == NULL) && !alloc_tgt) { 3165 pmcs_prt(pwp, PMCS_PRT_DEBUG, phyp, tgt, 3166 "%s: Establish linkage from new PHY to old target @" 3167 "%s", __func__, tgt->unit_address); 3168 for (int idx = 0; idx < tgt->ref_count; idx++) { 3169 pmcs_inc_phy_ref_count(phyp); 3170 } 3171 } 3172 3173 tgt->phy = phyp; 3174 phyp->target = tgt; 3175 3176 mutex_exit(&tgt->statlock); 3177 return (tgt); 3178 } 3179 3180 /* 3181 * Make sure the PHY we found is on the correct iport 3182 */ 3183 if (phyp->iport != iport) { 3184 pmcs_prt(pwp, PMCS_PRT_DEBUG, phyp, NULL, 3185 "%s: No target at %s on this iport", __func__, tgt_port); 3186 pmcs_unlock_phy(phyp); 3187 return (NULL); 3188 } 3189 3190 /* 3191 * If this was just a lookup (i.e. alloc_tgt is false), return now. 3192 */ 3193 if (alloc_tgt == B_FALSE) { 3194 pmcs_unlock_phy(phyp); 3195 return (NULL); 3196 } 3197 3198 /* 3199 * Allocate the new softstate 3200 */ 3201 wwn = pmcs_barray2wwn(phyp->sas_address); 3202 (void) scsi_wwn_to_wwnstr(wwn, ua_form, unit_address); 3203 3204 if (ddi_soft_state_bystr_zalloc(iport->tgt_sstate, unit_address) != 3205 DDI_SUCCESS) { 3206 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, phyp, tgt, 3207 "%s: Couldn't alloc softstate for device at %s", 3208 __func__, unit_address); 3209 pmcs_unlock_phy(phyp); 3210 return (NULL); 3211 } 3212 3213 tgt = ddi_soft_state_bystr_get(iport->tgt_sstate, unit_address); 3214 ASSERT(tgt != NULL); 3215 STAILQ_INIT(&tgt->wq); 3216 STAILQ_INIT(&tgt->aq); 3217 STAILQ_INIT(&tgt->sq); 3218 mutex_init(&tgt->statlock, NULL, MUTEX_DRIVER, 3219 DDI_INTR_PRI(pwp->intr_pri)); 3220 mutex_init(&tgt->wqlock, NULL, MUTEX_DRIVER, 3221 DDI_INTR_PRI(pwp->intr_pri)); 3222 mutex_init(&tgt->aqlock, NULL, MUTEX_DRIVER, 3223 DDI_INTR_PRI(pwp->intr_pri)); 3224 cv_init(&tgt->reset_cv, NULL, CV_DRIVER, NULL); 3225 cv_init(&tgt->abort_cv, NULL, CV_DRIVER, NULL); 3226 list_create(&tgt->lun_list, sizeof (pmcs_lun_t), 3227 offsetof(pmcs_lun_t, lun_list_next)); 3228 tgt->qdepth = 1; 3229 tgt->target_num = PMCS_INVALID_TARGET_NUM; 3230 bcopy(unit_address, tgt->unit_address, PMCS_MAX_UA_SIZE); 3231 tgt->pwp = pwp; 3232 tgt->ua = strdup(iport->ua); 3233 tgt->phy = phyp; 3234 ASSERT((phyp->target == NULL) || (phyp->target == tgt)); 3235 if (phyp->target == NULL) { 3236 phyp->target = tgt; 3237 } 3238 3239 /* 3240 * Don't allocate LUN softstate for SMP targets 3241 */ 3242 if (phyp->dtype == EXPANDER) { 3243 return (tgt); 3244 } 3245 3246 if (ddi_soft_state_bystr_init(&tgt->lun_sstate, 3247 sizeof (pmcs_lun_t), PMCS_LUN_SSTATE_SZ) != 0) { 3248 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, phyp, tgt, 3249 "%s: LUN soft_state_bystr_init failed", __func__); 3250 ddi_soft_state_bystr_free(iport->tgt_sstate, tgt_port); 3251 pmcs_unlock_phy(phyp); 3252 return (NULL); 3253 } 3254 3255 return (tgt); 3256 } 3257