1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 * 21 * 22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 /* 26 * SCSI (SCSA) midlayer interface for PMC drier. 27 */ 28 29 #include <sys/scsi/adapters/pmcs/pmcs.h> 30 31 extern scsi_lun_t scsi_lun64_to_lun(scsi_lun64_t lun64); 32 33 static int pmcs_scsa_tran_tgt_init(dev_info_t *, dev_info_t *, 34 scsi_hba_tran_t *, struct scsi_device *); 35 static void pmcs_scsa_tran_tgt_free(dev_info_t *, dev_info_t *, 36 scsi_hba_tran_t *, struct scsi_device *); 37 static int pmcs_scsa_start(struct scsi_address *, struct scsi_pkt *); 38 static int pmcs_scsa_abort(struct scsi_address *, struct scsi_pkt *); 39 static int pmcs_scsa_reset(struct scsi_address *, int); 40 static int pmcs_scsi_reset_notify(struct scsi_address *, int, 41 void (*)(caddr_t), caddr_t); 42 static int pmcs_scsa_getcap(struct scsi_address *, char *, int); 43 static int pmcs_scsa_setcap(struct scsi_address *, char *, int, int); 44 static int pmcs_scsa_setup_pkt(struct scsi_pkt *, int (*)(caddr_t), caddr_t); 45 static void pmcs_scsa_teardown_pkt(struct scsi_pkt *); 46 47 static int pmcs_smp_init(dev_info_t *, dev_info_t *, smp_hba_tran_t *, 48 smp_device_t *); 49 static void pmcs_smp_free(dev_info_t *, dev_info_t *, smp_hba_tran_t *, 50 smp_device_t *); 51 static int pmcs_smp_start(struct smp_pkt *); 52 53 static int pmcs_scsi_quiesce(dev_info_t *); 54 static int pmcs_scsi_unquiesce(dev_info_t *); 55 56 static int pmcs_cap(struct scsi_address *, char *, int, int, int); 57 static pmcs_xscsi_t * 58 pmcs_addr2xp(struct scsi_address *, uint64_t *, pmcs_cmd_t *); 59 static int pmcs_SAS_run(pmcs_cmd_t *, pmcwork_t *); 60 static void pmcs_SAS_done(pmcs_hw_t *, pmcwork_t *, uint32_t *); 61 62 static int pmcs_SATA_run(pmcs_cmd_t *, pmcwork_t *); 63 static void pmcs_SATA_done(pmcs_hw_t *, pmcwork_t *, uint32_t *); 64 static uint8_t pmcs_SATA_rwparm(uint8_t *, uint32_t *, uint64_t *, uint64_t); 65 66 static void pmcs_ioerror(pmcs_hw_t *, pmcs_dtype_t pmcs_dtype, 67 pmcwork_t *, uint32_t *); 68 69 70 int 71 pmcs_scsa_init(pmcs_hw_t *pwp, const ddi_dma_attr_t *ap) 72 { 73 scsi_hba_tran_t *tran; 74 ddi_dma_attr_t pmcs_scsa_dattr; 75 int flags; 76 77 (void) memcpy(&pmcs_scsa_dattr, ap, sizeof (ddi_dma_attr_t)); 78 pmcs_scsa_dattr.dma_attr_sgllen = 79 ((PMCS_SGL_NCHUNKS - 1) * (PMCS_MAX_CHUNKS - 1)) + PMCS_SGL_NCHUNKS; 80 pmcs_scsa_dattr.dma_attr_flags = DDI_DMA_RELAXED_ORDERING; 81 pmcs_scsa_dattr.dma_attr_flags |= DDI_DMA_FLAGERR; 82 83 /* 84 * Allocate a transport structure 85 */ 86 tran = scsi_hba_tran_alloc(pwp->dip, SCSI_HBA_CANSLEEP); 87 if (tran == NULL) { 88 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 89 "scsi_hba_tran_alloc failed"); 90 return (DDI_FAILURE); 91 } 92 93 tran->tran_hba_private = pwp; 94 tran->tran_tgt_init = pmcs_scsa_tran_tgt_init; 95 tran->tran_tgt_free = pmcs_scsa_tran_tgt_free; 96 tran->tran_start = pmcs_scsa_start; 97 tran->tran_abort = pmcs_scsa_abort; 98 tran->tran_reset = pmcs_scsa_reset; 99 tran->tran_reset_notify = pmcs_scsi_reset_notify; 100 tran->tran_getcap = pmcs_scsa_getcap; 101 tran->tran_setcap = pmcs_scsa_setcap; 102 tran->tran_setup_pkt = pmcs_scsa_setup_pkt; 103 tran->tran_teardown_pkt = pmcs_scsa_teardown_pkt; 104 tran->tran_quiesce = pmcs_scsi_quiesce; 105 tran->tran_unquiesce = pmcs_scsi_unquiesce; 106 tran->tran_interconnect_type = INTERCONNECT_SAS; 107 tran->tran_hba_len = sizeof (pmcs_cmd_t); 108 109 /* 110 * Attach this instance of the hba 111 */ 112 113 flags = SCSI_HBA_TRAN_SCB | SCSI_HBA_TRAN_CDB | SCSI_HBA_ADDR_COMPLEX | 114 SCSI_HBA_TRAN_PHCI | SCSI_HBA_HBA; 115 116 if (scsi_hba_attach_setup(pwp->dip, &pmcs_scsa_dattr, tran, flags)) { 117 scsi_hba_tran_free(tran); 118 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 119 "scsi_hba_attach failed"); 120 return (DDI_FAILURE); 121 } 122 pwp->tran = tran; 123 124 /* 125 * Attach the SMP part of this hba 126 */ 127 pwp->smp_tran = smp_hba_tran_alloc(pwp->dip); 128 ASSERT(pwp->smp_tran != NULL); 129 pwp->smp_tran->smp_tran_hba_private = pwp; 130 pwp->smp_tran->smp_tran_init = pmcs_smp_init; 131 pwp->smp_tran->smp_tran_free = pmcs_smp_free; 132 pwp->smp_tran->smp_tran_start = pmcs_smp_start; 133 134 if (smp_hba_attach_setup(pwp->dip, pwp->smp_tran) != DDI_SUCCESS) { 135 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 136 "smp_hba_attach failed"); 137 smp_hba_tran_free(pwp->smp_tran); 138 pwp->smp_tran = NULL; 139 scsi_hba_tran_free(tran); 140 return (DDI_FAILURE); 141 } 142 143 return (DDI_SUCCESS); 144 } 145 146 /* 147 * SCSA entry points 148 */ 149 150 static int 151 pmcs_scsa_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip, 152 scsi_hba_tran_t *tran, struct scsi_device *sd) 153 { 154 pmcs_hw_t *pwp = NULL; 155 int rval; 156 char *variant_prop = "sata"; 157 char *tgt_port = NULL, *ua = NULL; 158 pmcs_xscsi_t *tgt = NULL; 159 pmcs_iport_t *iport; 160 pmcs_lun_t *lun = NULL; 161 pmcs_phy_t *phyp = NULL; 162 uint64_t lun_num; 163 boolean_t got_scratch = B_FALSE; 164 165 /* 166 * First, make sure we're an iport and get the pointer to the HBA 167 * node's softstate 168 */ 169 if (scsi_hba_iport_unit_address(hba_dip) == NULL) { 170 pmcs_prt(TRAN2PMC(tran), PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 171 "%s: We don't enumerate devices on the HBA node", __func__); 172 goto tgt_init_fail; 173 } 174 175 pwp = ITRAN2PMC(tran); 176 iport = ITRAN2IPORT(tran); 177 178 /* 179 * Get the unit-address 180 */ 181 ua = scsi_device_unit_address(sd); 182 if (ua == NULL) { 183 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 184 "%s: Couldn't get UA", __func__); 185 pwp = NULL; 186 goto tgt_init_fail; 187 } 188 pmcs_prt(pwp, PMCS_PRT_DEBUG3, NULL, NULL, 189 "got ua '%s'", ua); 190 191 /* 192 * Get the target address 193 */ 194 rval = scsi_device_prop_lookup_string(sd, SCSI_DEVICE_PROP_PATH, 195 SCSI_ADDR_PROP_TARGET_PORT, &tgt_port); 196 if (rval != DDI_PROP_SUCCESS) { 197 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 198 "Couldn't get target UA"); 199 pwp = NULL; 200 goto tgt_init_fail; 201 } 202 pmcs_prt(pwp, PMCS_PRT_DEBUG3, NULL, NULL, 203 "got tgt_port '%s'", tgt_port); 204 205 /* 206 * Validate that this tran_tgt_init is for an active iport. 207 */ 208 if (iport->ua_state == UA_INACTIVE) { 209 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 210 "%s: Got tran_tgt_init on inactive iport for '%s'", 211 __func__, tgt_port); 212 pwp = NULL; 213 goto tgt_init_fail; 214 } 215 216 /* 217 * Since we're going to wait for scratch, be sure to acquire it while 218 * we're not holding any other locks 219 */ 220 (void) pmcs_acquire_scratch(pwp, B_TRUE); 221 got_scratch = B_TRUE; 222 223 mutex_enter(&pwp->lock); 224 225 /* 226 * See if there's already a target softstate. If not, allocate one. 227 */ 228 tgt = pmcs_get_target(iport, tgt_port, B_TRUE); 229 230 if (tgt == NULL) { 231 goto tgt_init_fail; 232 } 233 234 phyp = tgt->phy; 235 if (!IS_ROOT_PHY(phyp)) { 236 pmcs_inc_phy_ref_count(phyp); 237 } 238 ASSERT(mutex_owned(&phyp->phy_lock)); 239 240 pmcs_prt(pwp, PMCS_PRT_DEBUG2, phyp, tgt, "@%s tgt = 0x%p, dip = 0x%p", 241 ua, (void *)tgt, (void *)tgt_dip); 242 243 /* Now get the lun */ 244 lun_num = scsi_device_prop_get_int64(sd, SCSI_DEVICE_PROP_PATH, 245 SCSI_ADDR_PROP_LUN64, SCSI_LUN64_ILLEGAL); 246 if (lun_num == SCSI_LUN64_ILLEGAL) { 247 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, phyp, tgt, 248 "No LUN for tgt %p", (void *)tgt); 249 goto tgt_init_fail; 250 } 251 252 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, phyp, tgt, "%s: @%s tgt 0x%p phy " 253 "0x%p (%s)", __func__, ua, (void *)tgt, (void *)phyp, phyp->path); 254 255 mutex_enter(&tgt->statlock); 256 tgt->dtype = phyp->dtype; 257 if (tgt->dtype != SAS && tgt->dtype != SATA) { 258 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, phyp, tgt, 259 "PHY 0x%p went away?", (void *)phyp); 260 goto tgt_init_fail; 261 } 262 263 /* We don't support SATA devices at LUN > 0. */ 264 if ((tgt->dtype == SATA) && (lun_num > 0)) { 265 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, phyp, tgt, 266 "%s: No support for SATA devices at LUN > 0 " 267 "(target = 0x%p)", __func__, (void *)tgt); 268 goto tgt_init_fail; 269 } 270 271 /* 272 * Allocate LU soft state. We use ddi_soft_state_bystr_zalloc instead 273 * of kmem_alloc because ddi_soft_state_bystr_zalloc allows us to 274 * verify that the framework never tries to initialize two scsi_device 275 * structures with the same unit-address at the same time. 276 */ 277 if (ddi_soft_state_bystr_zalloc(tgt->lun_sstate, ua) != DDI_SUCCESS) { 278 pmcs_prt(pwp, PMCS_PRT_DEBUG2, phyp, tgt, 279 "Couldn't allocate LU soft state"); 280 goto tgt_init_fail; 281 } 282 283 lun = ddi_soft_state_bystr_get(tgt->lun_sstate, ua); 284 if (lun == NULL) { 285 pmcs_prt(pwp, PMCS_PRT_DEBUG2, phyp, tgt, 286 "Couldn't get LU soft state"); 287 goto tgt_init_fail; 288 } 289 scsi_device_hba_private_set(sd, lun); 290 lun->lun_num = lun_num; 291 292 /* convert the scsi_lun64_t value to SCSI standard form */ 293 lun->scsi_lun = scsi_lun64_to_lun(lun_num); 294 295 ASSERT(strlen(ua) < (PMCS_MAX_UA_SIZE - 1)); 296 bcopy(ua, lun->unit_address, strnlen(ua, PMCS_MAX_UA_SIZE - 1)); 297 298 lun->target = tgt; 299 300 /* 301 * If this is the first tran_tgt_init, add this target to our list 302 */ 303 if (tgt->target_num == PMCS_INVALID_TARGET_NUM) { 304 int target; 305 for (target = 0; target < pwp->max_dev; target++) { 306 if (pwp->targets[target] != NULL) { 307 continue; 308 } 309 310 pwp->targets[target] = tgt; 311 tgt->target_num = (uint16_t)target; 312 break; 313 } 314 315 if (target == pwp->max_dev) { 316 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, phyp, tgt, 317 "Target list full."); 318 goto tgt_init_fail; 319 } 320 } 321 322 tgt->dip = sd->sd_dev; 323 lun->sd = sd; 324 list_insert_tail(&tgt->lun_list, lun); 325 326 if (!pmcs_assign_device(pwp, tgt)) { 327 pmcs_release_scratch(pwp); 328 pwp->targets[tgt->target_num] = NULL; 329 tgt->target_num = PMCS_INVALID_TARGET_NUM; 330 tgt->phy = NULL; 331 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, phyp, tgt, 332 "%s: pmcs_assign_device failed for target 0x%p", 333 __func__, (void *)tgt); 334 goto tgt_init_fail; 335 } 336 337 pmcs_release_scratch(pwp); 338 tgt->ref_count++; 339 340 (void) scsi_device_prop_update_int(sd, SCSI_DEVICE_PROP_PATH, 341 SCSI_ADDR_PROP_TARGET, (uint32_t)(tgt->target_num)); 342 343 /* SM-HBA */ 344 if (tgt->dtype == SATA) { 345 /* TCR in PSARC/1997/281 opinion */ 346 (void) scsi_device_prop_update_string(sd, 347 SCSI_DEVICE_PROP_PATH, "variant", variant_prop); 348 } 349 350 tgt->phy_addressable = PMCS_PHY_ADDRESSABLE(phyp); 351 352 if (tgt->phy_addressable) { 353 (void) scsi_device_prop_update_int(sd, SCSI_DEVICE_PROP_PATH, 354 SCSI_ADDR_PROP_SATA_PHY, phyp->phynum); 355 } 356 357 /* SM-HBA */ 358 (void) pmcs_smhba_set_scsi_device_props(pwp, phyp, sd); 359 /* 360 * Make sure attached port and target port pm props are updated 361 * By passing in 0s, we're not actually updating any values, but 362 * the properties should now get updated on the node. 363 */ 364 365 mutex_exit(&tgt->statlock); 366 pmcs_update_phy_pm_props(phyp, 0, 0, B_TRUE); 367 pmcs_unlock_phy(phyp); 368 mutex_exit(&pwp->lock); 369 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, tgt_port); 370 return (DDI_SUCCESS); 371 372 tgt_init_fail: 373 scsi_device_hba_private_set(sd, NULL); 374 if (got_scratch) { 375 pmcs_release_scratch(pwp); 376 } 377 if (lun) { 378 list_remove(&tgt->lun_list, lun); 379 ddi_soft_state_bystr_free(tgt->lun_sstate, ua); 380 } 381 if (phyp) { 382 mutex_exit(&tgt->statlock); 383 pmcs_unlock_phy(phyp); 384 /* 385 * phyp's ref count was incremented in pmcs_new_tport. 386 * We're failing configuration, we now need to decrement it. 387 */ 388 if (!IS_ROOT_PHY(phyp)) { 389 pmcs_dec_phy_ref_count(phyp); 390 } 391 phyp->target = NULL; 392 } 393 if (tgt && tgt->ref_count == 0) { 394 ddi_soft_state_bystr_free(iport->tgt_sstate, tgt_port); 395 } 396 if (pwp) { 397 mutex_exit(&pwp->lock); 398 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, phyp, tgt, 399 "%s: failed for @%s tgt 0x%p phy 0x%p", __func__, ua, 400 (void *)tgt, (void *)phyp); 401 } 402 if (tgt_port) { 403 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, tgt_port); 404 } 405 return (DDI_FAILURE); 406 } 407 408 static void 409 pmcs_scsa_tran_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip, 410 scsi_hba_tran_t *tran, struct scsi_device *sd) 411 { 412 _NOTE(ARGUNUSED(hba_dip, tgt_dip)); 413 pmcs_hw_t *pwp; 414 pmcs_lun_t *lun; 415 pmcs_xscsi_t *target; 416 char *unit_address; 417 pmcs_phy_t *phyp; 418 419 if (scsi_hba_iport_unit_address(hba_dip) == NULL) { 420 pwp = TRAN2PMC(tran); 421 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 422 "%s: We don't enumerate devices on the HBA node", __func__); 423 return; 424 } 425 426 lun = (pmcs_lun_t *)scsi_device_hba_private_get(sd); 427 428 ASSERT((lun != NULL) && (lun->target != NULL)); 429 ASSERT(lun->target->ref_count > 0); 430 431 target = lun->target; 432 unit_address = lun->unit_address; 433 list_remove(&target->lun_list, lun); 434 435 pwp = ITRAN2PMC(tran); 436 mutex_enter(&pwp->lock); 437 mutex_enter(&target->statlock); 438 phyp = target->phy; 439 440 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, phyp, target, 441 "%s: for @%s tgt 0x%p phy 0x%p", __func__, unit_address, 442 (void *)target, (void *)phyp); 443 ddi_soft_state_bystr_free(lun->target->lun_sstate, unit_address); 444 445 if (target->recover_wait) { 446 mutex_exit(&target->statlock); 447 mutex_exit(&pwp->lock); 448 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, phyp, target, "%s: " 449 "Target 0x%p in device state recovery, fail tran_tgt_free", 450 __func__, (void *)target); 451 return; 452 } 453 454 /* 455 * If this target still has a PHY pointer and that PHY's target pointer 456 * has been cleared, then that PHY has been reaped. In that case, there 457 * would be no need to decrement the reference count 458 */ 459 if (phyp && !IS_ROOT_PHY(phyp) && phyp->target) { 460 pmcs_dec_phy_ref_count(phyp); 461 } 462 463 if (--target->ref_count == 0) { 464 /* 465 * Remove this target from our list. The target soft 466 * state will remain, and the device will remain registered 467 * with the hardware unless/until we're told the device 468 * physically went away. 469 */ 470 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, phyp, target, 471 "%s: Free target 0x%p (vtgt %d)", __func__, (void *)target, 472 target->target_num); 473 pwp->targets[target->target_num] = NULL; 474 target->target_num = PMCS_INVALID_TARGET_NUM; 475 /* 476 * If the target still has a PHY pointer, break the linkage 477 */ 478 if (phyp) { 479 phyp->target = NULL; 480 } 481 target->phy = NULL; 482 pmcs_destroy_target(target); 483 } else { 484 mutex_exit(&target->statlock); 485 } 486 487 mutex_exit(&pwp->lock); 488 } 489 490 static int 491 pmcs_scsa_start(struct scsi_address *ap, struct scsi_pkt *pkt) 492 { 493 pmcs_cmd_t *sp = PKT2CMD(pkt); 494 pmcs_hw_t *pwp = ADDR2PMC(ap); 495 pmcs_xscsi_t *xp; 496 boolean_t blocked; 497 uint32_t hba_state; 498 499 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, 500 "%s: pkt %p sd %p cdb0=0x%02x dl=%lu", __func__, (void *)pkt, 501 (void *)scsi_address_device(&pkt->pkt_address), 502 pkt->pkt_cdbp[0] & 0xff, pkt->pkt_dma_len); 503 504 if (pkt->pkt_flags & FLAG_NOINTR) { 505 pmcs_prt(pwp, PMCS_PRT_DEBUG3, NULL, NULL, 506 "%s: nointr pkt", __func__); 507 return (TRAN_BADPKT); 508 } 509 510 sp->cmd_tag = 0; 511 pkt->pkt_state = pkt->pkt_statistics = 0; 512 pkt->pkt_reason = CMD_INCOMPLETE; 513 514 mutex_enter(&pwp->lock); 515 hba_state = pwp->state; 516 blocked = pwp->blocked; 517 mutex_exit(&pwp->lock); 518 519 if (hba_state != STATE_RUNNING) { 520 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 521 "%s: hba dead", __func__); 522 return (TRAN_FATAL_ERROR); 523 } 524 525 xp = pmcs_addr2xp(ap, NULL, sp); 526 if (xp == NULL) { 527 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, 528 "%s: dropping due to null target", __func__); 529 goto dead_target; 530 } 531 ASSERT(mutex_owned(&xp->statlock)); 532 533 /* 534 * First, check to see if the device is gone. 535 */ 536 if (xp->dev_gone) { 537 xp->actv_pkts++; 538 mutex_exit(&xp->statlock); 539 pmcs_prt(pwp, PMCS_PRT_DEBUG3, NULL, xp, 540 "%s: dropping due to dead target 0x%p", 541 __func__, (void *)xp); 542 goto dead_target; 543 } 544 545 /* 546 * If we're blocked (quiesced) just return. 547 */ 548 if (blocked) { 549 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 550 "%s: hba blocked", __func__); 551 xp->actv_pkts++; 552 mutex_exit(&xp->statlock); 553 mutex_enter(&xp->wqlock); 554 STAILQ_INSERT_TAIL(&xp->wq, sp, cmd_next); 555 mutex_exit(&xp->wqlock); 556 return (TRAN_ACCEPT); 557 } 558 559 /* 560 * If we're draining or resetting, queue and return. 561 */ 562 if (xp->draining || xp->resetting || xp->recover_wait) { 563 xp->actv_pkts++; 564 mutex_exit(&xp->statlock); 565 mutex_enter(&xp->wqlock); 566 STAILQ_INSERT_TAIL(&xp->wq, sp, cmd_next); 567 mutex_exit(&xp->wqlock); 568 pmcs_prt(pwp, PMCS_PRT_DEBUG1, NULL, xp, 569 "%s: draining/resetting/recovering (cnt %u)", 570 __func__, xp->actv_cnt); 571 /* 572 * By the time we get here, draining or 573 * resetting may have come and gone, not 574 * yet noticing that we had put something 575 * on the wait queue, so schedule a worker 576 * to look at this later. 577 */ 578 SCHEDULE_WORK(pwp, PMCS_WORK_RUN_QUEUES); 579 return (TRAN_ACCEPT); 580 } 581 582 xp->actv_pkts++; 583 mutex_exit(&xp->statlock); 584 585 /* 586 * Queue this command to the tail of the wait queue. 587 * This keeps us getting commands out of order. 588 */ 589 mutex_enter(&xp->wqlock); 590 STAILQ_INSERT_TAIL(&xp->wq, sp, cmd_next); 591 mutex_exit(&xp->wqlock); 592 593 /* 594 * Now run the queue for this device. 595 */ 596 (void) pmcs_scsa_wq_run_one(pwp, xp); 597 598 return (TRAN_ACCEPT); 599 600 dead_target: 601 pkt->pkt_state = STATE_GOT_BUS; 602 pkt->pkt_reason = CMD_DEV_GONE; 603 mutex_enter(&pwp->cq_lock); 604 STAILQ_INSERT_TAIL(&pwp->cq, sp, cmd_next); 605 PMCS_CQ_RUN_LOCKED(pwp); 606 mutex_exit(&pwp->cq_lock); 607 return (TRAN_ACCEPT); 608 } 609 610 /* Return code 1 = Success */ 611 static int 612 pmcs_scsa_abort(struct scsi_address *ap, struct scsi_pkt *pkt) 613 { 614 pmcs_hw_t *pwp = ADDR2PMC(ap); 615 pmcs_cmd_t *sp = NULL; 616 pmcs_xscsi_t *xp = NULL; 617 pmcs_phy_t *pptr = NULL; 618 pmcs_lun_t *pmcs_lun = (pmcs_lun_t *) 619 scsi_device_hba_private_get(scsi_address_device(ap)); 620 uint32_t tag; 621 uint64_t lun; 622 pmcwork_t *pwrk; 623 624 mutex_enter(&pwp->lock); 625 if (pwp->state != STATE_RUNNING) { 626 mutex_exit(&pwp->lock); 627 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 628 "%s: hba dead", __func__); 629 return (0); 630 } 631 mutex_exit(&pwp->lock); 632 633 if (pkt == NULL) { 634 if (pmcs_lun == NULL) { 635 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, "%s: " 636 "No pmcs_lun_t struct to do ABORT_ALL", __func__); 637 return (0); 638 } 639 xp = pmcs_lun->target; 640 if (xp != NULL) { 641 pptr = xp->phy; 642 } 643 if (pptr == NULL) { 644 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, xp, "%s: pkt is " 645 "NULL. No tgt/phy to do ABORT_ALL", __func__); 646 return (0); 647 } 648 pmcs_lock_phy(pptr); 649 if (pmcs_abort(pwp, pptr, 0, 1, 0)) { 650 pptr->abort_pending = 1; 651 SCHEDULE_WORK(pwp, PMCS_WORK_ABORT_HANDLE); 652 } 653 pmcs_unlock_phy(pptr); 654 return (1); 655 } 656 657 sp = PKT2CMD(pkt); 658 xp = sp->cmd_target; 659 660 if (sp->cmd_lun) { 661 lun = sp->cmd_lun->lun_num; 662 } else { 663 lun = 0; 664 } 665 if (xp == NULL) { 666 return (0); 667 } 668 669 /* 670 * See if we have a real work structure associated with this cmd. 671 */ 672 pwrk = pmcs_tag2wp(pwp, sp->cmd_tag); 673 if (pwrk && pwrk->arg == sp) { 674 tag = pwrk->htag; 675 pptr = pwrk->phy; 676 pwrk->timer = 0; /* we don't time this here */ 677 ASSERT(pwrk->state == PMCS_WORK_STATE_ONCHIP); 678 mutex_exit(&pwrk->lock); 679 pmcs_lock_phy(pptr); 680 if (pptr->dtype == SAS) { 681 if (pmcs_ssp_tmf(pwp, pptr, SAS_ABORT_TASK, tag, lun, 682 NULL)) { 683 pptr->abort_pending = 1; 684 pmcs_unlock_phy(pptr); 685 SCHEDULE_WORK(pwp, PMCS_WORK_ABORT_HANDLE); 686 return (0); 687 } 688 } else { 689 /* 690 * XXX: Was the command that was active an 691 * NCQ I/O command? 692 */ 693 pptr->need_rl_ext = 1; 694 if (pmcs_sata_abort_ncq(pwp, pptr)) { 695 pptr->abort_pending = 1; 696 pmcs_unlock_phy(pptr); 697 SCHEDULE_WORK(pwp, PMCS_WORK_ABORT_HANDLE); 698 return (0); 699 } 700 } 701 pptr->abort_pending = 1; 702 pmcs_unlock_phy(pptr); 703 SCHEDULE_WORK(pwp, PMCS_WORK_ABORT_HANDLE); 704 return (1); 705 } 706 if (pwrk) { 707 mutex_exit(&pwrk->lock); 708 } 709 /* 710 * Okay, those weren't the droids we were looking for. 711 * See if the command is on any of the wait queues. 712 */ 713 mutex_enter(&xp->wqlock); 714 sp = NULL; 715 STAILQ_FOREACH(sp, &xp->wq, cmd_next) { 716 if (sp == PKT2CMD(pkt)) { 717 STAILQ_REMOVE(&xp->wq, sp, pmcs_cmd, cmd_next); 718 break; 719 } 720 } 721 mutex_exit(&xp->wqlock); 722 if (sp) { 723 pkt->pkt_reason = CMD_ABORTED; 724 pkt->pkt_statistics |= STAT_ABORTED; 725 mutex_enter(&pwp->cq_lock); 726 STAILQ_INSERT_TAIL(&pwp->cq, sp, cmd_next); 727 PMCS_CQ_RUN_LOCKED(pwp); 728 mutex_exit(&pwp->cq_lock); 729 return (1); 730 } 731 return (0); 732 } 733 734 /* 735 * SCSA reset functions 736 */ 737 static int 738 pmcs_scsa_reset(struct scsi_address *ap, int level) 739 { 740 pmcs_hw_t *pwp = ADDR2PMC(ap); 741 pmcs_phy_t *pptr; 742 pmcs_xscsi_t *xp; 743 uint64_t lun = (uint64_t)-1, *lp = NULL; 744 int rval; 745 746 mutex_enter(&pwp->lock); 747 if (pwp->state != STATE_RUNNING) { 748 mutex_exit(&pwp->lock); 749 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 750 "%s: hba dead", __func__); 751 return (0); 752 } 753 mutex_exit(&pwp->lock); 754 755 switch (level) { 756 case RESET_ALL: 757 rval = 0; 758 break; 759 case RESET_LUN: 760 /* 761 * Point lp at lun so that pmcs_addr2xp 762 * will fill out the 64 bit lun number. 763 */ 764 lp = &lun; 765 /* FALLTHROUGH */ 766 case RESET_TARGET: 767 xp = pmcs_addr2xp(ap, lp, NULL); 768 if (xp == NULL) { 769 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 770 "%s: no xp found for this scsi address", __func__); 771 return (0); 772 } 773 774 if (xp->dev_gone) { 775 mutex_exit(&xp->statlock); 776 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, xp, 777 "%s: Target 0x%p has gone away", __func__, 778 (void *)xp); 779 return (0); 780 } 781 782 /* 783 * If we're already performing this action, or if device 784 * state recovery is already running, just return failure. 785 */ 786 if (xp->resetting || xp->recover_wait) { 787 mutex_exit(&xp->statlock); 788 return (0); 789 } 790 xp->reset_wait = 0; 791 xp->reset_success = 0; 792 xp->resetting = 1; 793 pptr = xp->phy; 794 mutex_exit(&xp->statlock); 795 796 if (pmcs_reset_dev(pwp, pptr, lun)) { 797 rval = 0; 798 } else { 799 rval = 1; 800 } 801 802 mutex_enter(&xp->statlock); 803 if (rval == 1) { 804 xp->reset_success = 1; 805 } 806 if (xp->reset_wait) { 807 xp->reset_wait = 0; 808 cv_signal(&xp->reset_cv); 809 } 810 xp->resetting = 0; 811 mutex_exit(&xp->statlock); 812 SCHEDULE_WORK(pwp, PMCS_WORK_RUN_QUEUES); 813 break; 814 default: 815 rval = 0; 816 break; 817 } 818 819 return (rval); 820 } 821 822 static int 823 pmcs_scsi_reset_notify(struct scsi_address *ap, int flag, 824 void (*callback)(caddr_t), caddr_t arg) 825 { 826 pmcs_hw_t *pwp = ADDR2PMC(ap); 827 return (scsi_hba_reset_notify_setup(ap, flag, callback, arg, 828 &pwp->lock, &pwp->reset_notify_listf)); 829 } 830 831 832 static int 833 pmcs_cap(struct scsi_address *ap, char *cap, int val, int tonly, int set) 834 { 835 _NOTE(ARGUNUSED(val, tonly)); 836 int cidx, rval = 0; 837 pmcs_xscsi_t *xp; 838 839 cidx = scsi_hba_lookup_capstr(cap); 840 if (cidx == -1) { 841 return (-1); 842 } 843 844 xp = pmcs_addr2xp(ap, NULL, NULL); 845 if (xp == NULL) { 846 return (-1); 847 } 848 849 switch (cidx) { 850 case SCSI_CAP_DMA_MAX: 851 case SCSI_CAP_INITIATOR_ID: 852 if (set == 0) { 853 rval = INT_MAX; /* argh */ 854 } 855 break; 856 case SCSI_CAP_DISCONNECT: 857 case SCSI_CAP_SYNCHRONOUS: 858 case SCSI_CAP_WIDE_XFER: 859 case SCSI_CAP_PARITY: 860 case SCSI_CAP_ARQ: 861 case SCSI_CAP_UNTAGGED_QING: 862 if (set == 0) { 863 rval = 1; 864 } 865 break; 866 867 case SCSI_CAP_TAGGED_QING: 868 rval = 1; 869 break; 870 871 case SCSI_CAP_MSG_OUT: 872 case SCSI_CAP_RESET_NOTIFICATION: 873 case SCSI_CAP_QFULL_RETRIES: 874 case SCSI_CAP_QFULL_RETRY_INTERVAL: 875 break; 876 case SCSI_CAP_SCSI_VERSION: 877 if (set == 0) { 878 rval = SCSI_VERSION_3; 879 } 880 break; 881 case SCSI_CAP_INTERCONNECT_TYPE: 882 if (set) { 883 break; 884 } 885 if (xp->phy_addressable) { 886 rval = INTERCONNECT_SATA; 887 } else { 888 rval = INTERCONNECT_SAS; 889 } 890 break; 891 case SCSI_CAP_CDB_LEN: 892 if (set == 0) { 893 rval = 16; 894 } 895 break; 896 case SCSI_CAP_LUN_RESET: 897 if (set) { 898 break; 899 } 900 if (xp->dtype == SATA) { 901 rval = 0; 902 } else { 903 rval = 1; 904 } 905 break; 906 default: 907 rval = -1; 908 break; 909 } 910 mutex_exit(&xp->statlock); 911 pmcs_prt(ADDR2PMC(ap), PMCS_PRT_DEBUG3, NULL, NULL, 912 "%s: cap %s val %d set %d rval %d", 913 __func__, cap, val, set, rval); 914 return (rval); 915 } 916 917 /* 918 * Returns with statlock held if the xp is found. 919 * Fills in pmcs_cmd_t with values if pmcs_cmd_t pointer non-NULL. 920 */ 921 static pmcs_xscsi_t * 922 pmcs_addr2xp(struct scsi_address *ap, uint64_t *lp, pmcs_cmd_t *sp) 923 { 924 pmcs_xscsi_t *xp; 925 pmcs_lun_t *lun = (pmcs_lun_t *) 926 scsi_device_hba_private_get(scsi_address_device(ap)); 927 928 if ((lun == NULL) || (lun->target == NULL)) { 929 return (NULL); 930 } 931 xp = lun->target; 932 mutex_enter(&xp->statlock); 933 934 if (xp->dev_gone || (xp->phy == NULL)) { 935 /* 936 * This may be a retried packet, so it's possible cmd_target 937 * and cmd_lun may still be populated. Clear them. 938 */ 939 if (sp != NULL) { 940 sp->cmd_target = NULL; 941 sp->cmd_lun = NULL; 942 } 943 mutex_exit(&xp->statlock); 944 return (NULL); 945 } 946 947 if (sp != NULL) { 948 sp->cmd_target = xp; 949 sp->cmd_lun = lun; 950 } 951 if (lp) { 952 *lp = lun->lun_num; 953 } 954 return (xp); 955 } 956 957 static int 958 pmcs_scsa_getcap(struct scsi_address *ap, char *cap, int whom) 959 { 960 int r; 961 if (cap == NULL) { 962 return (-1); 963 } 964 r = pmcs_cap(ap, cap, 0, whom, 0); 965 return (r); 966 } 967 968 static int 969 pmcs_scsa_setcap(struct scsi_address *ap, char *cap, int value, int whom) 970 { 971 int r; 972 if (cap == NULL) { 973 return (-1); 974 } 975 r = pmcs_cap(ap, cap, value, whom, 1); 976 return (r); 977 } 978 979 static int 980 pmcs_scsa_setup_pkt(struct scsi_pkt *pkt, int (*callback)(caddr_t), 981 caddr_t cbarg) 982 { 983 _NOTE(ARGUNUSED(callback, cbarg)); 984 pmcs_cmd_t *sp = pkt->pkt_ha_private; 985 986 bzero(sp, sizeof (pmcs_cmd_t)); 987 sp->cmd_pkt = pkt; 988 return (0); 989 } 990 991 static void 992 pmcs_scsa_teardown_pkt(struct scsi_pkt *pkt) 993 { 994 pmcs_cmd_t *sp = pkt->pkt_ha_private; 995 sp->cmd_target = NULL; 996 sp->cmd_lun = NULL; 997 } 998 999 static int 1000 pmcs_smp_start(struct smp_pkt *smp_pkt) 1001 { 1002 struct pmcwork *pwrk; 1003 const uint_t rdoff = SAS_SMP_MAX_PAYLOAD; 1004 uint32_t msg[PMCS_MSG_SIZE], *ptr, htag, status; 1005 uint64_t wwn; 1006 pmcs_hw_t *pwp; 1007 pmcs_phy_t *pptr; 1008 pmcs_xscsi_t *xp; 1009 uint_t reqsz, rspsz, will_retry; 1010 int result; 1011 1012 pwp = smp_pkt->smp_pkt_address->smp_a_hba_tran->smp_tran_hba_private; 1013 bcopy(smp_pkt->smp_pkt_address->smp_a_wwn, &wwn, SAS_WWN_BYTE_SIZE); 1014 1015 pmcs_prt(pwp, PMCS_PRT_DEBUG1, NULL, NULL, 1016 "%s: starting for wwn 0x%" PRIx64, __func__, wwn); 1017 1018 will_retry = smp_pkt->smp_pkt_will_retry; 1019 1020 (void) pmcs_acquire_scratch(pwp, B_TRUE); 1021 reqsz = smp_pkt->smp_pkt_reqsize; 1022 if (reqsz > SAS_SMP_MAX_PAYLOAD) { 1023 reqsz = SAS_SMP_MAX_PAYLOAD; 1024 } 1025 (void) memcpy(pwp->scratch, smp_pkt->smp_pkt_req, reqsz); 1026 1027 rspsz = smp_pkt->smp_pkt_rspsize; 1028 if (rspsz > SAS_SMP_MAX_PAYLOAD) { 1029 rspsz = SAS_SMP_MAX_PAYLOAD; 1030 } 1031 1032 /* 1033 * The request size from the SMP driver always includes 4 bytes 1034 * for the CRC. The PMCS chip, however, doesn't want to see those 1035 * counts as part of the transfer size. 1036 */ 1037 reqsz -= 4; 1038 1039 pptr = pmcs_find_phy_by_wwn(pwp, wwn); 1040 /* PHY is now locked */ 1041 if (pptr == NULL || pptr->dtype != EXPANDER) { 1042 if (pptr) { 1043 pmcs_unlock_phy(pptr); 1044 } 1045 pmcs_release_scratch(pwp); 1046 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 1047 "%s: could not find phy", __func__); 1048 smp_pkt->smp_pkt_reason = ENXIO; 1049 return (DDI_FAILURE); 1050 } 1051 1052 if ((pptr->iport == NULL) || !pptr->valid_device_id) { 1053 pmcs_unlock_phy(pptr); 1054 pmcs_release_scratch(pwp); 1055 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, pptr->target, 1056 "%s: Can't reach PHY %s", __func__, pptr->path); 1057 smp_pkt->smp_pkt_reason = ENXIO; 1058 return (DDI_FAILURE); 1059 } 1060 1061 pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, pptr); 1062 if (pwrk == NULL) { 1063 pmcs_unlock_phy(pptr); 1064 pmcs_release_scratch(pwp); 1065 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 1066 "%s: could not get work structure", __func__); 1067 smp_pkt->smp_pkt_reason = will_retry ? EAGAIN : EBUSY; 1068 return (DDI_FAILURE); 1069 } 1070 1071 pwrk->arg = msg; 1072 pwrk->dtype = EXPANDER; 1073 mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]); 1074 ptr = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 1075 if (ptr == NULL) { 1076 pmcs_pwork(pwp, pwrk); 1077 mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]); 1078 pmcs_unlock_phy(pptr); 1079 pmcs_release_scratch(pwp); 1080 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1081 "%s: could not get IQ entry", __func__); 1082 smp_pkt->smp_pkt_reason = will_retry ? EAGAIN :EBUSY; 1083 return (DDI_FAILURE); 1084 } 1085 msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_GENERAL, PMCIN_SMP_REQUEST)); 1086 msg[1] = LE_32(pwrk->htag); 1087 msg[2] = LE_32(pptr->device_id); 1088 msg[3] = LE_32(SMP_INDIRECT_RESPONSE | SMP_INDIRECT_REQUEST); 1089 msg[8] = LE_32(DWORD0(pwp->scratch_dma)); 1090 msg[9] = LE_32(DWORD1(pwp->scratch_dma)); 1091 msg[10] = LE_32(reqsz); 1092 msg[11] = 0; 1093 msg[12] = LE_32(DWORD0(pwp->scratch_dma+rdoff)); 1094 msg[13] = LE_32(DWORD1(pwp->scratch_dma+rdoff)); 1095 msg[14] = LE_32(rspsz); 1096 msg[15] = 0; 1097 1098 COPY_MESSAGE(ptr, msg, PMCS_MSG_SIZE); 1099 /* SMP serialization */ 1100 pmcs_smp_acquire(pptr->iport); 1101 1102 pwrk->state = PMCS_WORK_STATE_ONCHIP; 1103 htag = pwrk->htag; 1104 INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 1105 1106 pmcs_unlock_phy(pptr); 1107 WAIT_FOR(pwrk, smp_pkt->smp_pkt_timeout * 1000, result); 1108 pmcs_pwork(pwp, pwrk); 1109 /* Release SMP lock before reacquiring PHY lock */ 1110 pmcs_smp_release(pptr->iport); 1111 pmcs_lock_phy(pptr); 1112 1113 if (result) { 1114 pmcs_timed_out(pwp, htag, __func__); 1115 if (pmcs_abort(pwp, pptr, htag, 0, 0)) { 1116 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, pptr->target, 1117 "%s: Unable to issue SMP ABORT for htag 0x%08x", 1118 __func__, htag); 1119 } else { 1120 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, pptr->target, 1121 "%s: Issuing SMP ABORT for htag 0x%08x", 1122 __func__, htag); 1123 } 1124 pmcs_unlock_phy(pptr); 1125 pmcs_release_scratch(pwp); 1126 smp_pkt->smp_pkt_reason = ETIMEDOUT; 1127 return (DDI_FAILURE); 1128 } 1129 status = LE_32(msg[2]); 1130 if (status == PMCOUT_STATUS_OVERFLOW) { 1131 status = PMCOUT_STATUS_OK; 1132 smp_pkt->smp_pkt_reason = EOVERFLOW; 1133 } 1134 if (status != PMCOUT_STATUS_OK) { 1135 const char *emsg = pmcs_status_str(status); 1136 if (emsg == NULL) { 1137 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, pptr->target, 1138 "SMP operation failed (0x%x)", status); 1139 } else { 1140 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, pptr->target, 1141 "SMP operation failed (%s)", emsg); 1142 } 1143 1144 if ((status == PMCOUT_STATUS_ERROR_HW_TIMEOUT) || 1145 (status == PMCOUT_STATUS_IO_XFER_OPEN_RETRY_TIMEOUT)) { 1146 smp_pkt->smp_pkt_reason = 1147 will_retry ? EAGAIN : ETIMEDOUT; 1148 result = DDI_FAILURE; 1149 } else if (status == 1150 PMCOUT_STATUS_OPEN_CNX_ERROR_IT_NEXUS_LOSS) { 1151 xp = pptr->target; 1152 if (xp == NULL) { 1153 smp_pkt->smp_pkt_reason = EIO; 1154 result = DDI_FAILURE; 1155 goto out; 1156 } 1157 if (xp->dev_state != 1158 PMCS_DEVICE_STATE_NON_OPERATIONAL) { 1159 xp->dev_state = 1160 PMCS_DEVICE_STATE_NON_OPERATIONAL; 1161 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, xp->phy, 1162 xp, "%s: Got _IT_NEXUS_LOSS SMP status. " 1163 "Tgt(0x%p) dev_state set to " 1164 "_NON_OPERATIONAL", __func__, 1165 (void *)xp); 1166 } 1167 /* ABORT any pending commands related to this device */ 1168 if (pmcs_abort(pwp, pptr, pptr->device_id, 1, 1) != 0) { 1169 pptr->abort_pending = 1; 1170 smp_pkt->smp_pkt_reason = EIO; 1171 result = DDI_FAILURE; 1172 } 1173 } else { 1174 smp_pkt->smp_pkt_reason = will_retry ? EAGAIN : EIO; 1175 result = DDI_FAILURE; 1176 } 1177 } else { 1178 (void) memcpy(smp_pkt->smp_pkt_rsp, 1179 &((uint8_t *)pwp->scratch)[rdoff], rspsz); 1180 if (smp_pkt->smp_pkt_reason == EOVERFLOW) { 1181 result = DDI_FAILURE; 1182 } else { 1183 result = DDI_SUCCESS; 1184 } 1185 } 1186 out: 1187 pmcs_prt(pwp, PMCS_PRT_DEBUG1, pptr, pptr->target, 1188 "%s: done for wwn 0x%" PRIx64, __func__, wwn); 1189 1190 pmcs_unlock_phy(pptr); 1191 pmcs_release_scratch(pwp); 1192 return (result); 1193 } 1194 1195 static int 1196 pmcs_smp_init(dev_info_t *self, dev_info_t *child, 1197 smp_hba_tran_t *tran, smp_device_t *smp_sd) 1198 { 1199 _NOTE(ARGUNUSED(tran, smp_sd)); 1200 pmcs_iport_t *iport; 1201 pmcs_hw_t *pwp; 1202 pmcs_xscsi_t *tgt; 1203 pmcs_phy_t *phy, *pphy; 1204 uint64_t wwn; 1205 char *addr, *tgt_port; 1206 int ua_form = 1; 1207 1208 iport = ddi_get_soft_state(pmcs_iport_softstate, 1209 ddi_get_instance(self)); 1210 ASSERT(iport); 1211 if (iport == NULL) 1212 return (DDI_FAILURE); 1213 pwp = iport->pwp; 1214 ASSERT(pwp); 1215 if (pwp == NULL) 1216 return (DDI_FAILURE); 1217 1218 /* Get "target-port" prop from devinfo node */ 1219 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, child, 1220 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, 1221 SCSI_ADDR_PROP_TARGET_PORT, &tgt_port) != DDI_SUCCESS) { 1222 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, "%s: Failed to " 1223 "lookup prop ("SCSI_ADDR_PROP_TARGET_PORT")", __func__); 1224 /* Dont fail _smp_init() because we couldnt get/set a prop */ 1225 return (DDI_SUCCESS); 1226 } 1227 1228 /* 1229 * Validate that this tran_tgt_init is for an active iport. 1230 */ 1231 if (iport->ua_state == UA_INACTIVE) { 1232 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1233 "%s: Init on inactive iport for '%s'", __func__, tgt_port); 1234 ddi_prop_free(tgt_port); 1235 return (DDI_FAILURE); 1236 } 1237 1238 mutex_enter(&pwp->lock); 1239 1240 /* Retrieve softstate using unit-address */ 1241 tgt = pmcs_get_target(iport, tgt_port, B_TRUE); 1242 if (tgt == NULL) { 1243 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1244 "%s: tgt softstate not found", __func__); 1245 ddi_prop_free(tgt_port); 1246 mutex_exit(&pwp->lock); 1247 return (DDI_FAILURE); 1248 } 1249 1250 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, tgt, "%s: %s (%s)", 1251 __func__, ddi_get_name(child), tgt_port); 1252 1253 mutex_enter(&tgt->statlock); 1254 phy = tgt->phy; 1255 ASSERT(mutex_owned(&phy->phy_lock)); 1256 1257 if (IS_ROOT_PHY(phy)) { 1258 /* Expander attached to HBA - don't ref_count it */ 1259 wwn = pwp->sas_wwns[0]; 1260 } else { 1261 pmcs_inc_phy_ref_count(phy); 1262 1263 /* 1264 * Parent (in topology) is also an expander 1265 * Now that we've increased the ref count on phy, it's OK 1266 * to drop the lock so we can acquire the parent's lock. 1267 */ 1268 pphy = phy->parent; 1269 mutex_exit(&tgt->statlock); 1270 pmcs_unlock_phy(phy); 1271 pmcs_lock_phy(pphy); 1272 wwn = pmcs_barray2wwn(pphy->sas_address); 1273 pmcs_unlock_phy(pphy); 1274 pmcs_lock_phy(phy); 1275 mutex_enter(&tgt->statlock); 1276 } 1277 1278 /* 1279 * If this is the 1st smp_init, add this to our list. 1280 */ 1281 if (tgt->target_num == PMCS_INVALID_TARGET_NUM) { 1282 int target; 1283 for (target = 0; target < pwp->max_dev; target++) { 1284 if (pwp->targets[target] != NULL) { 1285 continue; 1286 } 1287 1288 pwp->targets[target] = tgt; 1289 tgt->target_num = (uint16_t)target; 1290 tgt->assigned = 1; 1291 tgt->dev_state = PMCS_DEVICE_STATE_OPERATIONAL; 1292 break; 1293 } 1294 1295 if (target == pwp->max_dev) { 1296 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 1297 "Target list full."); 1298 goto smp_init_fail; 1299 } 1300 } 1301 1302 if (!pmcs_assign_device(pwp, tgt)) { 1303 pwp->targets[tgt->target_num] = NULL; 1304 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, tgt, 1305 "%s: pmcs_assign_device failed for target 0x%p", 1306 __func__, (void *)tgt); 1307 goto smp_init_fail; 1308 } 1309 1310 /* 1311 * Update the attached port and target port pm properties 1312 */ 1313 tgt->smpd = smp_sd; 1314 1315 pmcs_unlock_phy(phy); 1316 mutex_exit(&pwp->lock); 1317 1318 tgt->ref_count++; 1319 tgt->dtype = phy->dtype; 1320 mutex_exit(&tgt->statlock); 1321 1322 pmcs_update_phy_pm_props(phy, 0, 0, B_TRUE); 1323 1324 addr = scsi_wwn_to_wwnstr(wwn, ua_form, NULL); 1325 if (smp_device_prop_update_string(smp_sd, SCSI_ADDR_PROP_ATTACHED_PORT, 1326 addr) != DDI_SUCCESS) { 1327 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, "%s: Failed to set " 1328 "prop ("SCSI_ADDR_PROP_ATTACHED_PORT")", __func__); 1329 } 1330 (void) scsi_free_wwnstr(addr); 1331 ddi_prop_free(tgt_port); 1332 return (DDI_SUCCESS); 1333 1334 smp_init_fail: 1335 tgt->phy = NULL; 1336 tgt->target_num = PMCS_INVALID_TARGET_NUM; 1337 phy->target = NULL; 1338 if (!IS_ROOT_PHY(phy)) { 1339 pmcs_dec_phy_ref_count(phy); 1340 } 1341 mutex_exit(&tgt->statlock); 1342 pmcs_unlock_phy(phy); 1343 mutex_exit(&pwp->lock); 1344 ddi_soft_state_bystr_free(iport->tgt_sstate, tgt->unit_address); 1345 ddi_prop_free(tgt_port); 1346 return (DDI_FAILURE); 1347 } 1348 1349 static void 1350 pmcs_smp_free(dev_info_t *self, dev_info_t *child, 1351 smp_hba_tran_t *tran, smp_device_t *smp) 1352 { 1353 _NOTE(ARGUNUSED(tran, smp)); 1354 pmcs_iport_t *iport; 1355 pmcs_hw_t *pwp; 1356 pmcs_xscsi_t *tgt; 1357 char *tgt_port; 1358 1359 iport = ddi_get_soft_state(pmcs_iport_softstate, 1360 ddi_get_instance(self)); 1361 ASSERT(iport); 1362 if (iport == NULL) 1363 return; 1364 1365 pwp = iport->pwp; 1366 if (pwp == NULL) 1367 return; 1368 ASSERT(pwp); 1369 1370 /* Get "target-port" prop from devinfo node */ 1371 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, child, 1372 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, 1373 SCSI_ADDR_PROP_TARGET_PORT, &tgt_port) != DDI_SUCCESS) { 1374 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, "%s: Failed to " 1375 "lookup prop ("SCSI_ADDR_PROP_TARGET_PORT")", __func__); 1376 return; 1377 } 1378 1379 /* Retrieve softstate using unit-address */ 1380 mutex_enter(&pwp->lock); 1381 tgt = ddi_soft_state_bystr_get(iport->tgt_sstate, tgt_port); 1382 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, tgt, "%s: %s (%s)", __func__, 1383 ddi_get_name(child), tgt_port); 1384 ddi_prop_free(tgt_port); 1385 1386 if (tgt == NULL) { 1387 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1388 "%s: tgt softstate not found", __func__); 1389 mutex_exit(&pwp->lock); 1390 return; 1391 } 1392 1393 mutex_enter(&tgt->statlock); 1394 if (tgt->phy) { 1395 if (!IS_ROOT_PHY(tgt->phy)) { 1396 pmcs_dec_phy_ref_count(tgt->phy); 1397 } 1398 } 1399 1400 if (--tgt->ref_count == 0) { 1401 /* 1402 * Remove this target from our list. The softstate 1403 * will remain, and the device will remain registered 1404 * with the hardware unless/until we're told that the 1405 * device physically went away. 1406 */ 1407 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, tgt, 1408 "Removing target 0x%p (vtgt %d) from target list", 1409 (void *)tgt, tgt->target_num); 1410 pwp->targets[tgt->target_num] = NULL; 1411 tgt->target_num = PMCS_INVALID_TARGET_NUM; 1412 if (tgt->phy) { 1413 tgt->phy->target = NULL; 1414 tgt->phy = NULL; 1415 } 1416 pmcs_destroy_target(tgt); 1417 } else { 1418 mutex_exit(&tgt->statlock); 1419 } 1420 1421 mutex_exit(&pwp->lock); 1422 } 1423 1424 static int 1425 pmcs_scsi_quiesce(dev_info_t *dip) 1426 { 1427 pmcs_hw_t *pwp; 1428 int totactive = -1; 1429 pmcs_xscsi_t *xp; 1430 uint16_t target; 1431 1432 if (ddi_get_soft_state(pmcs_iport_softstate, ddi_get_instance(dip))) 1433 return (0); /* iport */ 1434 1435 pwp = ddi_get_soft_state(pmcs_softc_state, ddi_get_instance(dip)); 1436 if (pwp == NULL) { 1437 return (-1); 1438 } 1439 mutex_enter(&pwp->lock); 1440 if (pwp->state != STATE_RUNNING) { 1441 mutex_exit(&pwp->lock); 1442 return (-1); 1443 } 1444 1445 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, "%s called", __func__); 1446 pwp->quiesced = pwp->blocked = 1; 1447 while (totactive) { 1448 totactive = 0; 1449 for (target = 0; target < pwp->max_dev; target++) { 1450 xp = pwp->targets[target]; 1451 if (xp == NULL) { 1452 continue; 1453 } 1454 mutex_enter(&xp->statlock); 1455 if (xp->actv_cnt) { 1456 totactive += xp->actv_cnt; 1457 xp->draining = 1; 1458 } 1459 mutex_exit(&xp->statlock); 1460 } 1461 if (totactive) { 1462 cv_wait(&pwp->drain_cv, &pwp->lock); 1463 } 1464 /* 1465 * The pwp->blocked may have been reset. e.g a SCSI bus reset 1466 */ 1467 pwp->blocked = 1; 1468 } 1469 1470 for (target = 0; target < pwp->max_dev; target++) { 1471 xp = pwp->targets[target]; 1472 if (xp == NULL) { 1473 continue; 1474 } 1475 mutex_enter(&xp->statlock); 1476 xp->draining = 0; 1477 mutex_exit(&xp->statlock); 1478 } 1479 1480 mutex_exit(&pwp->lock); 1481 if (totactive == 0) { 1482 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, xp, 1483 "%s drain complete", __func__); 1484 } 1485 return (0); 1486 } 1487 1488 static int 1489 pmcs_scsi_unquiesce(dev_info_t *dip) 1490 { 1491 pmcs_hw_t *pwp; 1492 1493 if (ddi_get_soft_state(pmcs_iport_softstate, ddi_get_instance(dip))) 1494 return (0); /* iport */ 1495 1496 pwp = ddi_get_soft_state(pmcs_softc_state, ddi_get_instance(dip)); 1497 if (pwp == NULL) { 1498 return (-1); 1499 } 1500 mutex_enter(&pwp->lock); 1501 if (pwp->state != STATE_RUNNING) { 1502 mutex_exit(&pwp->lock); 1503 return (-1); 1504 } 1505 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, "%s called", __func__); 1506 pwp->blocked = pwp->quiesced = 0; 1507 mutex_exit(&pwp->lock); 1508 1509 /* 1510 * Run all pending commands. 1511 */ 1512 pmcs_scsa_wq_run(pwp); 1513 1514 /* 1515 * Complete all completed commands. 1516 * This also unlocks us. 1517 */ 1518 PMCS_CQ_RUN(pwp); 1519 return (0); 1520 } 1521 1522 /* 1523 * Start commands for a particular device 1524 * If the actual start of a command fails, return B_FALSE. Any other result 1525 * is a B_TRUE return. 1526 */ 1527 boolean_t 1528 pmcs_scsa_wq_run_one(pmcs_hw_t *pwp, pmcs_xscsi_t *xp) 1529 { 1530 pmcs_cmd_t *sp; 1531 pmcs_phy_t *phyp; 1532 pmcwork_t *pwrk; 1533 boolean_t run_one, blocked; 1534 int rval; 1535 1536 /* 1537 * First, check to see if we're blocked or resource limited 1538 */ 1539 mutex_enter(&pwp->lock); 1540 blocked = pwp->blocked; 1541 /* 1542 * If resource_limited is set, we're resource constrained and 1543 * we will run only one work request for this target. 1544 */ 1545 run_one = pwp->resource_limited; 1546 mutex_exit(&pwp->lock); 1547 1548 if (blocked) { 1549 /* Queues will get restarted when we get unblocked */ 1550 return (B_TRUE); 1551 } 1552 1553 /* 1554 * Might as well verify the queue is not empty before moving on 1555 */ 1556 mutex_enter(&xp->wqlock); 1557 if (STAILQ_EMPTY(&xp->wq)) { 1558 mutex_exit(&xp->wqlock); 1559 return (B_TRUE); 1560 } 1561 mutex_exit(&xp->wqlock); 1562 1563 /* 1564 * If we're draining or resetting, just reschedule work queue and bail. 1565 */ 1566 mutex_enter(&xp->statlock); 1567 if (xp->draining || xp->resetting || xp->special_running || 1568 xp->special_needed) { 1569 mutex_exit(&xp->statlock); 1570 SCHEDULE_WORK(pwp, PMCS_WORK_RUN_QUEUES); 1571 return (B_TRUE); 1572 } 1573 1574 /* 1575 * Next, check to see if the target is gone. 1576 */ 1577 if (xp->dev_gone) { 1578 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, xp, 1579 "%s: Flushing wait queue for dead tgt 0x%p", __func__, 1580 (void *)xp); 1581 pmcs_flush_target_queues(pwp, xp, PMCS_TGT_WAIT_QUEUE); 1582 mutex_exit(&xp->statlock); 1583 return (B_TRUE); 1584 } 1585 1586 /* 1587 * Increment the PHY's ref_count now so we know it won't go away 1588 * after we drop the target lock. Drop it before returning. If the 1589 * PHY dies, the commands we attempt to send will fail, but at least 1590 * we know we have a real PHY pointer. 1591 */ 1592 phyp = xp->phy; 1593 pmcs_inc_phy_ref_count(phyp); 1594 mutex_exit(&xp->statlock); 1595 1596 mutex_enter(&xp->wqlock); 1597 while ((sp = STAILQ_FIRST(&xp->wq)) != NULL) { 1598 pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_CBACK, phyp); 1599 if (pwrk == NULL) { 1600 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1601 "%s: out of work structures", __func__); 1602 SCHEDULE_WORK(pwp, PMCS_WORK_RUN_QUEUES); 1603 break; 1604 } 1605 STAILQ_REMOVE_HEAD(&xp->wq, cmd_next); 1606 mutex_exit(&xp->wqlock); 1607 1608 pwrk->xp = xp; 1609 pwrk->arg = sp; 1610 sp->cmd_tag = pwrk->htag; 1611 pwrk->timer = US2WT(CMD2PKT(sp)->pkt_time * 1000000); 1612 if (pwrk->timer == 0) { 1613 pwrk->timer = US2WT(1000000); 1614 } 1615 1616 pwrk->dtype = xp->dtype; 1617 1618 if (xp->dtype == SAS) { 1619 pwrk->ptr = (void *) pmcs_SAS_done; 1620 if ((rval = pmcs_SAS_run(sp, pwrk)) != 0) { 1621 sp->cmd_tag = NULL; 1622 pmcs_dec_phy_ref_count(phyp); 1623 pmcs_pwork(pwp, pwrk); 1624 SCHEDULE_WORK(pwp, PMCS_WORK_RUN_QUEUES); 1625 if (rval == PMCS_WQ_RUN_FAIL_RES) { 1626 return (B_FALSE); 1627 } else { 1628 return (B_TRUE); 1629 } 1630 } 1631 } else { 1632 ASSERT(xp->dtype == SATA); 1633 pwrk->ptr = (void *) pmcs_SATA_done; 1634 if ((rval = pmcs_SATA_run(sp, pwrk)) != 0) { 1635 sp->cmd_tag = NULL; 1636 pmcs_dec_phy_ref_count(phyp); 1637 pmcs_pwork(pwp, pwrk); 1638 SCHEDULE_WORK(pwp, PMCS_WORK_RUN_QUEUES); 1639 if (rval == PMCS_WQ_RUN_FAIL_RES) { 1640 return (B_FALSE); 1641 } else { 1642 return (B_TRUE); 1643 } 1644 } 1645 } 1646 1647 if (run_one) { 1648 goto wq_out; 1649 } 1650 mutex_enter(&xp->wqlock); 1651 } 1652 1653 mutex_exit(&xp->wqlock); 1654 1655 wq_out: 1656 pmcs_dec_phy_ref_count(phyp); 1657 return (B_TRUE); 1658 } 1659 1660 /* 1661 * Start commands for all devices. 1662 */ 1663 void 1664 pmcs_scsa_wq_run(pmcs_hw_t *pwp) 1665 { 1666 pmcs_xscsi_t *xp; 1667 uint16_t target_start, target; 1668 boolean_t rval = B_TRUE; 1669 1670 mutex_enter(&pwp->lock); 1671 target_start = pwp->last_wq_dev; 1672 target = target_start; 1673 1674 do { 1675 xp = pwp->targets[target]; 1676 if ((xp == NULL) || (STAILQ_EMPTY(&xp->wq))) { 1677 if (++target == pwp->max_dev) { 1678 target = 0; 1679 } 1680 continue; 1681 } 1682 1683 mutex_exit(&pwp->lock); 1684 rval = pmcs_scsa_wq_run_one(pwp, xp); 1685 mutex_enter(&pwp->lock); 1686 1687 if (rval == B_FALSE) { 1688 break; 1689 } 1690 1691 if (++target == pwp->max_dev) { 1692 target = 0; 1693 } 1694 } while (target != target_start); 1695 1696 if (rval) { 1697 pwp->resource_limited = 0; /* Not resource-constrained */ 1698 } else { 1699 pwp->resource_limited = 1; /* Give others a chance */ 1700 } 1701 1702 pwp->last_wq_dev = target; 1703 mutex_exit(&pwp->lock); 1704 } 1705 1706 /* 1707 * Pull the completion queue, drop the lock and complete all elements. 1708 */ 1709 1710 void 1711 pmcs_scsa_cq_run(void *arg) 1712 { 1713 pmcs_cq_thr_info_t *cqti = (pmcs_cq_thr_info_t *)arg; 1714 pmcs_hw_t *pwp = cqti->cq_pwp; 1715 pmcs_cmd_t *sp, *nxt; 1716 struct scsi_pkt *pkt; 1717 pmcs_xscsi_t *tgt; 1718 pmcs_iocomp_cb_t *ioccb, *ioccb_next; 1719 pmcs_cb_t callback; 1720 uint32_t niodone; 1721 1722 DTRACE_PROBE1(pmcs__scsa__cq__run__start, pmcs_cq_thr_info_t *, cqti); 1723 1724 mutex_enter(&pwp->cq_lock); 1725 1726 while (!pwp->cq_info.cq_stop) { 1727 /* 1728 * First, check the I/O completion callback queue. 1729 */ 1730 1731 ioccb = pwp->iocomp_cb_head; 1732 pwp->iocomp_cb_head = NULL; 1733 pwp->iocomp_cb_tail = NULL; 1734 mutex_exit(&pwp->cq_lock); 1735 1736 niodone = 0; 1737 1738 while (ioccb) { 1739 niodone++; 1740 /* 1741 * Grab the lock on the work structure. The callback 1742 * routine is responsible for clearing it. 1743 */ 1744 mutex_enter(&ioccb->pwrk->lock); 1745 ioccb_next = ioccb->next; 1746 callback = (pmcs_cb_t)ioccb->pwrk->ptr; 1747 (*callback)(pwp, ioccb->pwrk, 1748 (uint32_t *)((void *)ioccb->iomb)); 1749 kmem_cache_free(pwp->iocomp_cb_cache, ioccb); 1750 ioccb = ioccb_next; 1751 } 1752 1753 /* 1754 * Next, run the completion queue 1755 */ 1756 1757 mutex_enter(&pwp->cq_lock); 1758 sp = STAILQ_FIRST(&pwp->cq); 1759 STAILQ_INIT(&pwp->cq); 1760 mutex_exit(&pwp->cq_lock); 1761 1762 DTRACE_PROBE1(pmcs__scsa__cq__run__start__loop, 1763 pmcs_cq_thr_info_t *, cqti); 1764 1765 if (sp && pmcs_check_acc_dma_handle(pwp)) { 1766 ddi_fm_service_impact(pwp->dip, DDI_SERVICE_UNAFFECTED); 1767 } 1768 1769 while (sp) { 1770 nxt = STAILQ_NEXT(sp, cmd_next); 1771 pkt = CMD2PKT(sp); 1772 tgt = sp->cmd_target; 1773 pmcs_prt(pwp, PMCS_PRT_DEBUG3, NULL, tgt, 1774 "%s: calling completion on %p for tgt %p", __func__, 1775 (void *)sp, (void *)tgt); 1776 if (tgt) { 1777 mutex_enter(&tgt->statlock); 1778 ASSERT(tgt->actv_pkts != 0); 1779 tgt->actv_pkts--; 1780 mutex_exit(&tgt->statlock); 1781 } 1782 scsi_hba_pkt_comp(pkt); 1783 sp = nxt; 1784 } 1785 1786 DTRACE_PROBE1(pmcs__scsa__cq__run__end__loop, 1787 pmcs_cq_thr_info_t *, cqti); 1788 1789 mutex_enter(&cqti->cq_thr_lock); 1790 cv_wait(&cqti->cq_cv, &cqti->cq_thr_lock); 1791 mutex_exit(&cqti->cq_thr_lock); 1792 1793 mutex_enter(&pwp->cq_lock); 1794 } 1795 1796 mutex_exit(&pwp->cq_lock); 1797 DTRACE_PROBE1(pmcs__scsa__cq__run__stop, pmcs_cq_thr_info_t *, cqti); 1798 thread_exit(); 1799 } 1800 1801 /* 1802 * Run a SAS command. Called with pwrk->lock held, returns unlocked. 1803 */ 1804 static int 1805 pmcs_SAS_run(pmcs_cmd_t *sp, pmcwork_t *pwrk) 1806 { 1807 pmcs_hw_t *pwp = CMD2PMC(sp); 1808 struct scsi_pkt *pkt = CMD2PKT(sp); 1809 pmcs_xscsi_t *xp = pwrk->xp; 1810 uint32_t iq, *ptr; 1811 sas_ssp_cmd_iu_t sc; 1812 1813 mutex_enter(&xp->statlock); 1814 if (!xp->assigned) { 1815 mutex_exit(&xp->statlock); 1816 return (PMCS_WQ_RUN_FAIL_OTHER); 1817 } 1818 if ((xp->actv_cnt >= xp->qdepth) || xp->recover_wait) { 1819 mutex_exit(&xp->statlock); 1820 mutex_enter(&xp->wqlock); 1821 STAILQ_INSERT_HEAD(&xp->wq, sp, cmd_next); 1822 mutex_exit(&xp->wqlock); 1823 return (PMCS_WQ_RUN_FAIL_OTHER); 1824 } 1825 GET_IO_IQ_ENTRY(pwp, ptr, pwrk->phy->device_id, iq); 1826 if (ptr == NULL) { 1827 mutex_exit(&xp->statlock); 1828 /* 1829 * This is a temporary failure not likely to unblocked by 1830 * commands completing as the test for scheduling the 1831 * restart of work is a per-device test. 1832 */ 1833 mutex_enter(&xp->wqlock); 1834 STAILQ_INSERT_HEAD(&xp->wq, sp, cmd_next); 1835 mutex_exit(&xp->wqlock); 1836 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, xp, 1837 "%s: Failed to get IO IQ entry for tgt %d", 1838 __func__, xp->target_num); 1839 return (PMCS_WQ_RUN_FAIL_RES); 1840 1841 } 1842 1843 ptr[0] = 1844 LE_32(PMCS_IOMB_IN_SAS(PMCS_OQ_IODONE, PMCIN_SSP_INI_IO_START)); 1845 ptr[1] = LE_32(pwrk->htag); 1846 ptr[2] = LE_32(pwrk->phy->device_id); 1847 ptr[3] = LE_32(pkt->pkt_dma_len); 1848 if (ptr[3]) { 1849 ASSERT(pkt->pkt_numcookies); 1850 if (pkt->pkt_dma_flags & DDI_DMA_READ) { 1851 ptr[4] = LE_32(PMCIN_DATADIR_2_INI); 1852 } else { 1853 ptr[4] = LE_32(PMCIN_DATADIR_2_DEV); 1854 } 1855 if (pmcs_dma_load(pwp, sp, ptr)) { 1856 mutex_exit(&pwp->iqp_lock[iq]); 1857 mutex_exit(&xp->statlock); 1858 mutex_enter(&xp->wqlock); 1859 if (STAILQ_EMPTY(&xp->wq)) { 1860 STAILQ_INSERT_HEAD(&xp->wq, sp, cmd_next); 1861 mutex_exit(&xp->wqlock); 1862 } else { 1863 mutex_exit(&xp->wqlock); 1864 CMD2PKT(sp)->pkt_scbp[0] = STATUS_QFULL; 1865 CMD2PKT(sp)->pkt_reason = CMD_CMPLT; 1866 CMD2PKT(sp)->pkt_state |= STATE_GOT_BUS | 1867 STATE_GOT_TARGET | STATE_SENT_CMD | 1868 STATE_GOT_STATUS; 1869 mutex_enter(&pwp->cq_lock); 1870 STAILQ_INSERT_TAIL(&pwp->cq, sp, cmd_next); 1871 mutex_exit(&pwp->cq_lock); 1872 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, xp, 1873 "%s: Failed to dma_load for tgt %d (QF)", 1874 __func__, xp->target_num); 1875 } 1876 return (PMCS_WQ_RUN_FAIL_RES); 1877 } 1878 } else { 1879 ptr[4] = LE_32(PMCIN_DATADIR_NONE); 1880 CLEAN_MESSAGE(ptr, 12); 1881 } 1882 xp->actv_cnt++; 1883 if (xp->actv_cnt > xp->maxdepth) { 1884 xp->maxdepth = xp->actv_cnt; 1885 pmcs_prt(pwp, PMCS_PRT_DEBUG2, pwrk->phy, xp, "%s: max depth " 1886 "now %u", pwrk->phy->path, xp->maxdepth); 1887 } 1888 mutex_exit(&xp->statlock); 1889 1890 1891 #ifdef DEBUG 1892 /* 1893 * Generate a PMCOUT_STATUS_XFER_CMD_FRAME_ISSUED 1894 * event when this goes out on the wire. 1895 */ 1896 ptr[4] |= PMCIN_MESSAGE_REPORT; 1897 #endif 1898 /* 1899 * Fill in the SSP IU 1900 */ 1901 1902 bzero(&sc, sizeof (sas_ssp_cmd_iu_t)); 1903 bcopy((uint8_t *)&sp->cmd_lun->scsi_lun, sc.lun, sizeof (scsi_lun_t)); 1904 1905 switch (pkt->pkt_flags & FLAG_TAGMASK) { 1906 case FLAG_HTAG: 1907 sc.task_attribute = SAS_CMD_TASK_ATTR_HEAD; 1908 break; 1909 case FLAG_OTAG: 1910 sc.task_attribute = SAS_CMD_TASK_ATTR_ORDERED; 1911 break; 1912 case FLAG_STAG: 1913 default: 1914 sc.task_attribute = SAS_CMD_TASK_ATTR_SIMPLE; 1915 break; 1916 } 1917 (void) memcpy(sc.cdb, pkt->pkt_cdbp, 1918 min(SCSA_CDBLEN(sp), sizeof (sc.cdb))); 1919 (void) memcpy(&ptr[5], &sc, sizeof (sas_ssp_cmd_iu_t)); 1920 pwrk->state = PMCS_WORK_STATE_ONCHIP; 1921 mutex_exit(&pwrk->lock); 1922 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, 1923 "%s: giving pkt %p (tag %x) to the hardware", __func__, 1924 (void *)pkt, pwrk->htag); 1925 #ifdef DEBUG 1926 pmcs_print_entry(pwp, PMCS_PRT_DEBUG3, "SAS INI Message", ptr); 1927 #endif 1928 mutex_enter(&xp->aqlock); 1929 STAILQ_INSERT_TAIL(&xp->aq, sp, cmd_next); 1930 mutex_exit(&xp->aqlock); 1931 INC_IQ_ENTRY(pwp, iq); 1932 1933 /* 1934 * If we just submitted the last command queued from device state 1935 * recovery, clear the wq_recovery_tail pointer. 1936 */ 1937 mutex_enter(&xp->wqlock); 1938 if (xp->wq_recovery_tail == sp) { 1939 xp->wq_recovery_tail = NULL; 1940 } 1941 mutex_exit(&xp->wqlock); 1942 1943 return (PMCS_WQ_RUN_SUCCESS); 1944 } 1945 1946 /* 1947 * Complete a SAS command 1948 * 1949 * Called with pwrk lock held. 1950 * The free of pwrk releases the lock. 1951 */ 1952 1953 static void 1954 pmcs_SAS_done(pmcs_hw_t *pwp, pmcwork_t *pwrk, uint32_t *msg) 1955 { 1956 pmcs_cmd_t *sp = pwrk->arg; 1957 pmcs_phy_t *pptr = pwrk->phy; 1958 pmcs_xscsi_t *xp = pwrk->xp; 1959 struct scsi_pkt *pkt = CMD2PKT(sp); 1960 int dead; 1961 uint32_t sts; 1962 boolean_t aborted = B_FALSE; 1963 boolean_t do_ds_recovery = B_FALSE; 1964 1965 ASSERT(xp != NULL); 1966 ASSERT(sp != NULL); 1967 ASSERT(pptr != NULL); 1968 1969 DTRACE_PROBE4(pmcs__io__done, uint64_t, pkt->pkt_dma_len, int, 1970 (pkt->pkt_dma_flags & DDI_DMA_READ) != 0, hrtime_t, pwrk->start, 1971 hrtime_t, gethrtime()); 1972 1973 dead = pwrk->dead; 1974 1975 if (msg) { 1976 sts = LE_32(msg[2]); 1977 } else { 1978 sts = 0; 1979 } 1980 1981 if (dead != 0) { 1982 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, "%s: dead cmd tag " 1983 "0x%x for %s", __func__, pwrk->htag, pptr->path); 1984 goto out; 1985 } 1986 1987 if (sts == PMCOUT_STATUS_ABORTED) { 1988 aborted = B_TRUE; 1989 } 1990 1991 if (pwrk->state == PMCS_WORK_STATE_TIMED_OUT) { 1992 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 1993 "%s: cmd 0x%p (tag 0x%x) timed out for %s", 1994 __func__, (void *)sp, pwrk->htag, pptr->path); 1995 CMD2PKT(sp)->pkt_scbp[0] = STATUS_GOOD; 1996 CMD2PKT(sp)->pkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET | 1997 STATE_SENT_CMD; 1998 CMD2PKT(sp)->pkt_statistics |= STAT_TIMEOUT; 1999 goto out; 2000 } 2001 2002 /* 2003 * If the status isn't okay but not underflow, 2004 * step to the side and parse the (possible) error. 2005 */ 2006 #ifdef DEBUG 2007 if (msg) { 2008 pmcs_print_entry(pwp, PMCS_PRT_DEBUG3, "Outbound Message", msg); 2009 } 2010 #endif 2011 if (!msg) { 2012 goto out; 2013 } 2014 2015 switch (sts) { 2016 case PMCOUT_STATUS_OPEN_CNX_ERROR_IT_NEXUS_LOSS: 2017 case PMCOUT_STATUS_IO_DS_NON_OPERATIONAL: 2018 case PMCOUT_STATUS_IO_DS_IN_RECOVERY: 2019 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 2020 "%s: PHY %s requires DS recovery (status=%d)", 2021 __func__, pptr->path, sts); 2022 do_ds_recovery = B_TRUE; 2023 break; 2024 case PMCOUT_STATUS_UNDERFLOW: 2025 (void) pmcs_set_resid(pkt, pkt->pkt_dma_len, LE_32(msg[3])); 2026 pmcs_prt(pwp, PMCS_PRT_DEBUG_UNDERFLOW, NULL, NULL, 2027 "%s: underflow %u for cdb 0x%x", 2028 __func__, LE_32(msg[3]), pkt->pkt_cdbp[0] & 0xff); 2029 sts = PMCOUT_STATUS_OK; 2030 msg[3] = 0; 2031 break; 2032 case PMCOUT_STATUS_OK: 2033 pkt->pkt_resid = 0; 2034 break; 2035 } 2036 2037 if (sts != PMCOUT_STATUS_OK) { 2038 pmcs_ioerror(pwp, SAS, pwrk, msg); 2039 } else { 2040 if (msg[3]) { 2041 uint8_t local[PMCS_QENTRY_SIZE << 1], *xd; 2042 sas_ssp_rsp_iu_t *rptr = (void *)local; 2043 const int lim = 2044 (PMCS_QENTRY_SIZE << 1) - SAS_RSP_HDR_SIZE; 2045 static const uint8_t ssp_rsp_evec[] = { 2046 0x58, 0x61, 0x56, 0x72, 0x00 2047 }; 2048 2049 /* 2050 * Transform the the first part of the response 2051 * to host canonical form. This gives us enough 2052 * information to figure out what to do with the 2053 * rest (which remains unchanged in the incoming 2054 * message which can be up to two queue entries 2055 * in length). 2056 */ 2057 pmcs_endian_transform(pwp, local, &msg[5], 2058 ssp_rsp_evec); 2059 xd = (uint8_t *)(&msg[5]); 2060 xd += SAS_RSP_HDR_SIZE; 2061 2062 if (rptr->datapres == SAS_RSP_DATAPRES_RESPONSE_DATA) { 2063 if (rptr->response_data_length != 4) { 2064 pmcs_print_entry(pwp, PMCS_PRT_DEBUG, 2065 "Bad SAS RESPONSE DATA LENGTH", 2066 msg); 2067 pkt->pkt_reason = CMD_TRAN_ERR; 2068 goto out; 2069 } 2070 (void) memcpy(&sts, xd, sizeof (uint32_t)); 2071 sts = BE_32(sts); 2072 /* 2073 * The only response code we should legally get 2074 * here is an INVALID FRAME response code. 2075 */ 2076 if (sts == SAS_RSP_INVALID_FRAME) { 2077 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 2078 "%s: pkt %p tgt %u path %s " 2079 "completed: INVALID FRAME response", 2080 __func__, (void *)pkt, 2081 xp->target_num, pptr->path); 2082 } else { 2083 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 2084 "%s: pkt %p tgt %u path %s " 2085 "completed: illegal response 0x%x", 2086 __func__, (void *)pkt, 2087 xp->target_num, pptr->path, sts); 2088 } 2089 pkt->pkt_reason = CMD_TRAN_ERR; 2090 goto out; 2091 } 2092 if (rptr->datapres == SAS_RSP_DATAPRES_SENSE_DATA) { 2093 uint32_t slen; 2094 slen = rptr->sense_data_length; 2095 if (slen > lim) { 2096 slen = lim; 2097 } 2098 pmcs_latch_status(pwp, sp, rptr->status, xd, 2099 slen, pptr->path); 2100 } else if (rptr->datapres == SAS_RSP_DATAPRES_NO_DATA) { 2101 pmcout_ssp_comp_t *sspcp; 2102 sspcp = (pmcout_ssp_comp_t *)msg; 2103 uint32_t *residp; 2104 /* 2105 * This is the case for a plain SCSI status. 2106 * Note: If RESC_V is set and we're here, there 2107 * is a residual. We need to find it and update 2108 * the packet accordingly. 2109 */ 2110 pmcs_latch_status(pwp, sp, rptr->status, NULL, 2111 0, pptr->path); 2112 2113 if (sspcp->resc_v) { 2114 /* 2115 * Point residual to the SSP_RESP_IU 2116 */ 2117 residp = (uint32_t *)(sspcp + 1); 2118 /* 2119 * param contains the number of bytes 2120 * between where the SSP_RESP_IU may 2121 * or may not be and the residual. 2122 * Increment residp by the appropriate 2123 * number of words: (param+resc_pad)/4). 2124 */ 2125 residp += (LE_32(sspcp->param) + 2126 sspcp->resc_pad) / 2127 sizeof (uint32_t); 2128 pmcs_prt(pwp, PMCS_PRT_DEBUG_UNDERFLOW, 2129 pptr, xp, "%s: tgt 0x%p " 2130 "residual %d for pkt 0x%p", 2131 __func__, (void *) xp, *residp, 2132 (void *) pkt); 2133 ASSERT(LE_32(*residp) <= 2134 pkt->pkt_dma_len); 2135 (void) pmcs_set_resid(pkt, 2136 pkt->pkt_dma_len, LE_32(*residp)); 2137 } 2138 } else { 2139 pmcs_print_entry(pwp, PMCS_PRT_DEBUG, 2140 "illegal SAS response", msg); 2141 pkt->pkt_reason = CMD_TRAN_ERR; 2142 goto out; 2143 } 2144 } else { 2145 pmcs_latch_status(pwp, sp, STATUS_GOOD, NULL, 0, 2146 pptr->path); 2147 } 2148 if (pkt->pkt_dma_len) { 2149 pkt->pkt_state |= STATE_XFERRED_DATA; 2150 } 2151 } 2152 pmcs_prt(pwp, PMCS_PRT_DEBUG2, pptr, xp, 2153 "%s: pkt %p tgt %u done reason=%x state=%x resid=%ld status=%x", 2154 __func__, (void *)pkt, xp->target_num, pkt->pkt_reason, 2155 pkt->pkt_state, pkt->pkt_resid, pkt->pkt_scbp[0]); 2156 2157 if (pwrk->state == PMCS_WORK_STATE_ABORTED) { 2158 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 2159 "%s: scsi_pkt 0x%p aborted for PHY %s; work = 0x%p", 2160 __func__, (void *)pkt, pptr->path, (void *)pwrk); 2161 aborted = B_TRUE; 2162 } 2163 2164 out: 2165 pmcs_dma_unload(pwp, sp); 2166 2167 /* 2168 * If the status is other than OK, determine if it's something that 2169 * is worth re-attempting enumeration. If so, mark the PHY. 2170 */ 2171 if (sts != PMCOUT_STATUS_OK) { 2172 pmcs_status_disposition(pptr, sts); 2173 } 2174 2175 mutex_enter(&xp->statlock); 2176 2177 /* 2178 * If the device no longer has a PHY pointer, clear the PHY pointer 2179 * from the work structure before we free it. Otherwise, pmcs_pwork 2180 * may decrement the ref_count on a PHY that's been freed. 2181 */ 2182 if (xp->phy == NULL) { 2183 pwrk->phy = NULL; 2184 } 2185 2186 pmcs_pwork(pwp, pwrk); 2187 2188 /* 2189 * If the device is gone, we only put this command on the completion 2190 * queue if the work structure is not marked dead. If it's marked 2191 * dead, it will already have been put there. 2192 */ 2193 if (xp->dev_gone) { 2194 mutex_exit(&xp->statlock); 2195 if (!dead) { 2196 mutex_enter(&xp->aqlock); 2197 STAILQ_REMOVE(&xp->aq, sp, pmcs_cmd, cmd_next); 2198 mutex_exit(&xp->aqlock); 2199 pmcs_prt(pwp, PMCS_PRT_DEBUG3, pptr, xp, 2200 "%s: Removing cmd 0x%p (htag 0x%x) from aq", 2201 __func__, (void *)sp, sp->cmd_tag); 2202 mutex_enter(&pwp->cq_lock); 2203 STAILQ_INSERT_TAIL(&pwp->cq, sp, cmd_next); 2204 mutex_exit(&pwp->cq_lock); 2205 pmcs_prt(pwp, PMCS_PRT_DEBUG2, pptr, xp, 2206 "%s: Completing command for dead target 0x%p", 2207 __func__, (void *)xp); 2208 } 2209 return; 2210 } 2211 2212 ASSERT(xp->actv_cnt > 0); 2213 if (--(xp->actv_cnt) == 0) { 2214 if (xp->draining) { 2215 pmcs_prt(pwp, PMCS_PRT_DEBUG1, pptr, xp, 2216 "%s: waking up drain waiters", __func__); 2217 cv_signal(&pwp->drain_cv); 2218 } 2219 } 2220 mutex_exit(&xp->statlock); 2221 if (dead == 0) { 2222 #ifdef DEBUG 2223 pmcs_cmd_t *wp; 2224 mutex_enter(&xp->aqlock); 2225 STAILQ_FOREACH(wp, &xp->aq, cmd_next) { 2226 if (wp == sp) { 2227 break; 2228 } 2229 } 2230 ASSERT(wp != NULL); 2231 #else 2232 mutex_enter(&xp->aqlock); 2233 #endif 2234 pmcs_prt(pwp, PMCS_PRT_DEBUG3, pptr, xp, 2235 "%s: Removing cmd 0x%p (htag 0x%x) from aq", __func__, 2236 (void *)sp, sp->cmd_tag); 2237 STAILQ_REMOVE(&xp->aq, sp, pmcs_cmd, cmd_next); 2238 if (aborted) { 2239 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 2240 "%s: Aborted cmd for tgt 0x%p, signaling waiters", 2241 __func__, (void *)xp); 2242 cv_signal(&xp->abort_cv); 2243 } 2244 mutex_exit(&xp->aqlock); 2245 } 2246 2247 /* 2248 * If do_ds_recovery is set, we need to initiate device state 2249 * recovery. In this case, we put this I/O back on the head of 2250 * the wait queue to run again after recovery is complete 2251 */ 2252 if (do_ds_recovery) { 2253 mutex_enter(&xp->statlock); 2254 pmcs_start_dev_state_recovery(xp, pptr); 2255 mutex_exit(&xp->statlock); 2256 pmcs_prt(pwp, PMCS_PRT_DEBUG1, pptr, xp, "%s: Putting cmd 0x%p " 2257 "back on wq during recovery for tgt 0x%p", __func__, 2258 (void *)sp, (void *)xp); 2259 mutex_enter(&xp->wqlock); 2260 if (xp->wq_recovery_tail == NULL) { 2261 STAILQ_INSERT_HEAD(&xp->wq, sp, cmd_next); 2262 } else { 2263 /* 2264 * If there are other I/Os waiting at the head due to 2265 * device state recovery, add this one in the right spot 2266 * to maintain proper order. 2267 */ 2268 STAILQ_INSERT_AFTER(&xp->wq, xp->wq_recovery_tail, sp, 2269 cmd_next); 2270 } 2271 xp->wq_recovery_tail = sp; 2272 mutex_exit(&xp->wqlock); 2273 } else { 2274 /* 2275 * If we're not initiating device state recovery and this 2276 * command was not "dead", put it on the completion queue 2277 */ 2278 if (!dead) { 2279 mutex_enter(&pwp->cq_lock); 2280 STAILQ_INSERT_TAIL(&pwp->cq, sp, cmd_next); 2281 mutex_exit(&pwp->cq_lock); 2282 } 2283 } 2284 } 2285 2286 /* 2287 * Run a SATA command (normal reads and writes), 2288 * or block and schedule a SATL interpretation 2289 * of the command. 2290 * 2291 * Called with pwrk lock held, returns unlocked. 2292 */ 2293 2294 static int 2295 pmcs_SATA_run(pmcs_cmd_t *sp, pmcwork_t *pwrk) 2296 { 2297 pmcs_hw_t *pwp = CMD2PMC(sp); 2298 struct scsi_pkt *pkt = CMD2PKT(sp); 2299 pmcs_xscsi_t *xp; 2300 uint8_t cdb_base, asc, tag; 2301 uint32_t *ptr, iq, nblk, i, mtype; 2302 fis_t fis; 2303 size_t amt; 2304 uint64_t lba; 2305 2306 xp = pwrk->xp; 2307 2308 /* 2309 * First, see if this is just a plain read/write command. 2310 * If not, we have to queue it up for processing, block 2311 * any additional commands from coming in, and wake up 2312 * the thread that will process this command. 2313 */ 2314 cdb_base = pkt->pkt_cdbp[0] & 0x1f; 2315 if (cdb_base != SCMD_READ && cdb_base != SCMD_WRITE) { 2316 pmcs_prt(pwp, PMCS_PRT_DEBUG1, NULL, NULL, 2317 "%s: special SATA cmd %p", __func__, (void *)sp); 2318 2319 ASSERT(xp->phy != NULL); 2320 pmcs_pwork(pwp, pwrk); 2321 pmcs_lock_phy(xp->phy); 2322 mutex_enter(&xp->statlock); 2323 xp->special_needed = 1; /* Set the special_needed flag */ 2324 STAILQ_INSERT_TAIL(&xp->sq, sp, cmd_next); 2325 if (pmcs_run_sata_special(pwp, xp)) { 2326 SCHEDULE_WORK(pwp, PMCS_WORK_SATA_RUN); 2327 } 2328 mutex_exit(&xp->statlock); 2329 pmcs_unlock_phy(xp->phy); 2330 2331 return (PMCS_WQ_RUN_SUCCESS); 2332 } 2333 2334 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "%s: regular cmd", __func__); 2335 2336 mutex_enter(&xp->statlock); 2337 if (!xp->assigned) { 2338 mutex_exit(&xp->statlock); 2339 return (PMCS_WQ_RUN_FAIL_OTHER); 2340 } 2341 if (xp->special_running || xp->special_needed || xp->recover_wait) { 2342 mutex_exit(&xp->statlock); 2343 mutex_enter(&xp->wqlock); 2344 STAILQ_INSERT_HEAD(&xp->wq, sp, cmd_next); 2345 mutex_exit(&xp->wqlock); 2346 /* 2347 * By the time we get here the special 2348 * commands running or waiting to be run 2349 * may have come and gone, so kick our 2350 * worker to run the waiting queues 2351 * just in case. 2352 */ 2353 SCHEDULE_WORK(pwp, PMCS_WORK_RUN_QUEUES); 2354 return (PMCS_WQ_RUN_FAIL_OTHER); 2355 } 2356 lba = xp->capacity; 2357 mutex_exit(&xp->statlock); 2358 2359 /* 2360 * Extract data length and lba parameters out of the command. The 2361 * function pmcs_SATA_rwparm returns a non-zero ASC value if the CDB 2362 * values are considered illegal. 2363 */ 2364 asc = pmcs_SATA_rwparm(pkt->pkt_cdbp, &nblk, &lba, lba); 2365 if (asc) { 2366 uint8_t sns[18]; 2367 bzero(sns, sizeof (sns)); 2368 sns[0] = 0xf0; 2369 sns[2] = 0x5; 2370 sns[12] = asc; 2371 pmcs_latch_status(pwp, sp, STATUS_CHECK, sns, sizeof (sns), 2372 pwrk->phy->path); 2373 pmcs_pwork(pwp, pwrk); 2374 mutex_enter(&pwp->cq_lock); 2375 STAILQ_INSERT_TAIL(&pwp->cq, sp, cmd_next); 2376 PMCS_CQ_RUN_LOCKED(pwp); 2377 mutex_exit(&pwp->cq_lock); 2378 return (PMCS_WQ_RUN_SUCCESS); 2379 } 2380 2381 /* 2382 * If the command decodes as not moving any data, complete it here. 2383 */ 2384 amt = nblk; 2385 amt <<= 9; 2386 amt = pmcs_set_resid(pkt, amt, nblk << 9); 2387 if (amt == 0) { 2388 pmcs_latch_status(pwp, sp, STATUS_GOOD, NULL, 0, 2389 pwrk->phy->path); 2390 pmcs_pwork(pwp, pwrk); 2391 mutex_enter(&pwp->cq_lock); 2392 STAILQ_INSERT_TAIL(&pwp->cq, sp, cmd_next); 2393 PMCS_CQ_RUN_LOCKED(pwp); 2394 mutex_exit(&pwp->cq_lock); 2395 return (PMCS_WQ_RUN_SUCCESS); 2396 } 2397 2398 /* 2399 * Get an inbound queue entry for this I/O 2400 */ 2401 GET_IO_IQ_ENTRY(pwp, ptr, xp->phy->device_id, iq); 2402 if (ptr == NULL) { 2403 /* 2404 * This is a temporary failure not likely to unblocked by 2405 * commands completing as the test for scheduling the 2406 * restart of work is a per-device test. 2407 */ 2408 mutex_enter(&xp->wqlock); 2409 STAILQ_INSERT_HEAD(&xp->wq, sp, cmd_next); 2410 mutex_exit(&xp->wqlock); 2411 pmcs_dma_unload(pwp, sp); 2412 SCHEDULE_WORK(pwp, PMCS_WORK_RUN_QUEUES); 2413 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, xp, 2414 "%s: Failed to get IO IQ entry for tgt %d", 2415 __func__, xp->target_num); 2416 return (PMCS_WQ_RUN_FAIL_RES); 2417 } 2418 2419 /* 2420 * Get a tag. At this point, hold statlock until the tagmap is 2421 * updated (just prior to sending the cmd to the hardware). 2422 */ 2423 mutex_enter(&xp->statlock); 2424 for (tag = 0; tag < xp->qdepth; tag++) { 2425 if ((xp->tagmap & (1 << tag)) == 0) { 2426 break; 2427 } 2428 } 2429 2430 if (tag == xp->qdepth) { 2431 mutex_exit(&xp->statlock); 2432 mutex_exit(&pwp->iqp_lock[iq]); 2433 mutex_enter(&xp->wqlock); 2434 STAILQ_INSERT_HEAD(&xp->wq, sp, cmd_next); 2435 mutex_exit(&xp->wqlock); 2436 return (PMCS_WQ_RUN_FAIL_OTHER); 2437 } 2438 2439 sp->cmd_satltag = (uint8_t)tag; 2440 2441 /* 2442 * Set up the command 2443 */ 2444 bzero(fis, sizeof (fis)); 2445 ptr[0] = 2446 LE_32(PMCS_IOMB_IN_SAS(PMCS_OQ_IODONE, PMCIN_SATA_HOST_IO_START)); 2447 ptr[1] = LE_32(pwrk->htag); 2448 ptr[2] = LE_32(pwrk->phy->device_id); 2449 ptr[3] = LE_32(amt); 2450 2451 if (xp->ncq) { 2452 mtype = SATA_PROTOCOL_FPDMA | (tag << 16); 2453 fis[0] = ((nblk & 0xff) << 24) | (C_BIT << 8) | FIS_REG_H2DEV; 2454 if (cdb_base == SCMD_READ) { 2455 fis[0] |= (READ_FPDMA_QUEUED << 16); 2456 } else { 2457 fis[0] |= (WRITE_FPDMA_QUEUED << 16); 2458 } 2459 fis[1] = (FEATURE_LBA << 24) | (lba & 0xffffff); 2460 fis[2] = ((nblk & 0xff00) << 16) | ((lba >> 24) & 0xffffff); 2461 fis[3] = tag << 3; 2462 } else { 2463 int op; 2464 fis[0] = (C_BIT << 8) | FIS_REG_H2DEV; 2465 if (xp->pio) { 2466 mtype = SATA_PROTOCOL_PIO; 2467 if (cdb_base == SCMD_READ) { 2468 op = READ_SECTORS_EXT; 2469 } else { 2470 op = WRITE_SECTORS_EXT; 2471 } 2472 } else { 2473 mtype = SATA_PROTOCOL_DMA; 2474 if (cdb_base == SCMD_READ) { 2475 op = READ_DMA_EXT; 2476 } else { 2477 op = WRITE_DMA_EXT; 2478 } 2479 } 2480 fis[0] |= (op << 16); 2481 fis[1] = (FEATURE_LBA << 24) | (lba & 0xffffff); 2482 fis[2] = (lba >> 24) & 0xffffff; 2483 fis[3] = nblk; 2484 } 2485 2486 if (cdb_base == SCMD_READ) { 2487 ptr[4] = LE_32(mtype | PMCIN_DATADIR_2_INI); 2488 } else { 2489 ptr[4] = LE_32(mtype | PMCIN_DATADIR_2_DEV); 2490 } 2491 #ifdef DEBUG 2492 /* 2493 * Generate a PMCOUT_STATUS_XFER_CMD_FRAME_ISSUED 2494 * event when this goes out on the wire. 2495 */ 2496 ptr[4] |= PMCIN_MESSAGE_REPORT; 2497 #endif 2498 for (i = 0; i < (sizeof (fis_t))/(sizeof (uint32_t)); i++) { 2499 ptr[i+5] = LE_32(fis[i]); 2500 } 2501 if (pmcs_dma_load(pwp, sp, ptr)) { 2502 mutex_exit(&xp->statlock); 2503 mutex_exit(&pwp->iqp_lock[iq]); 2504 mutex_enter(&xp->wqlock); 2505 STAILQ_INSERT_HEAD(&xp->wq, sp, cmd_next); 2506 mutex_exit(&xp->wqlock); 2507 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, xp, 2508 "%s: Failed to dma_load for tgt %d", 2509 __func__, xp->target_num); 2510 return (PMCS_WQ_RUN_FAIL_RES); 2511 2512 } 2513 2514 pwrk->state = PMCS_WORK_STATE_ONCHIP; 2515 mutex_exit(&pwrk->lock); 2516 xp->tagmap |= (1 << tag); 2517 xp->actv_cnt++; 2518 if (xp->actv_cnt > xp->maxdepth) { 2519 xp->maxdepth = xp->actv_cnt; 2520 pmcs_prt(pwp, PMCS_PRT_DEBUG2, pwrk->phy, xp, 2521 "%s: max depth now %u", pwrk->phy->path, xp->maxdepth); 2522 } 2523 mutex_exit(&xp->statlock); 2524 mutex_enter(&xp->aqlock); 2525 STAILQ_INSERT_TAIL(&xp->aq, sp, cmd_next); 2526 mutex_exit(&xp->aqlock); 2527 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, 2528 "%s: giving pkt %p to hardware", __func__, (void *)pkt); 2529 #ifdef DEBUG 2530 pmcs_print_entry(pwp, PMCS_PRT_DEBUG3, "SATA INI Message", ptr); 2531 #endif 2532 INC_IQ_ENTRY(pwp, iq); 2533 2534 return (PMCS_WQ_RUN_SUCCESS); 2535 } 2536 2537 /* 2538 * Complete a SATA command. Called with pwrk lock held. 2539 */ 2540 void 2541 pmcs_SATA_done(pmcs_hw_t *pwp, pmcwork_t *pwrk, uint32_t *msg) 2542 { 2543 pmcs_cmd_t *sp = pwrk->arg; 2544 struct scsi_pkt *pkt = CMD2PKT(sp); 2545 pmcs_phy_t *pptr = pwrk->phy; 2546 int dead; 2547 uint32_t sts; 2548 pmcs_xscsi_t *xp; 2549 boolean_t aborted = B_FALSE; 2550 2551 xp = pwrk->xp; 2552 ASSERT(xp != NULL); 2553 2554 DTRACE_PROBE4(pmcs__io__done, uint64_t, pkt->pkt_dma_len, int, 2555 (pkt->pkt_dma_flags & DDI_DMA_READ) != 0, hrtime_t, pwrk->start, 2556 hrtime_t, gethrtime()); 2557 2558 dead = pwrk->dead; 2559 2560 if (msg) { 2561 sts = LE_32(msg[2]); 2562 } else { 2563 sts = 0; 2564 } 2565 2566 if (dead != 0) { 2567 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, "%s: dead cmd tag " 2568 "0x%x for %s", __func__, pwrk->htag, pptr->path); 2569 goto out; 2570 } 2571 if ((pwrk->state == PMCS_WORK_STATE_TIMED_OUT) && 2572 (sts != PMCOUT_STATUS_ABORTED)) { 2573 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 2574 "%s: cmd 0x%p (tag 0x%x) timed out for %s", 2575 __func__, (void *)sp, pwrk->htag, pptr->path); 2576 CMD2PKT(sp)->pkt_scbp[0] = STATUS_GOOD; 2577 /* pkt_reason already set to CMD_TIMEOUT */ 2578 ASSERT(CMD2PKT(sp)->pkt_reason == CMD_TIMEOUT); 2579 CMD2PKT(sp)->pkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET | 2580 STATE_SENT_CMD; 2581 CMD2PKT(sp)->pkt_statistics |= STAT_TIMEOUT; 2582 goto out; 2583 } 2584 2585 pmcs_prt(pwp, PMCS_PRT_DEBUG2, pptr, xp, "%s: pkt %p tgt %u done", 2586 __func__, (void *)pkt, xp->target_num); 2587 2588 /* 2589 * If the status isn't okay but not underflow, 2590 * step to the side and parse the (possible) error. 2591 */ 2592 #ifdef DEBUG 2593 if (msg) { 2594 pmcs_print_entry(pwp, PMCS_PRT_DEBUG3, "Outbound Message", msg); 2595 } 2596 #endif 2597 if (!msg) { 2598 goto out; 2599 } 2600 2601 /* 2602 * If the status isn't okay or we got a FIS response of some kind, 2603 * step to the side and parse the (possible) error. 2604 */ 2605 if ((sts != PMCOUT_STATUS_OK) || (LE_32(msg[3]) != 0)) { 2606 if (sts == PMCOUT_STATUS_IO_DS_NON_OPERATIONAL) { 2607 mutex_exit(&pwrk->lock); 2608 pmcs_lock_phy(pptr); 2609 mutex_enter(&xp->statlock); 2610 if ((xp->resetting == 0) && (xp->reset_success != 0) && 2611 (xp->reset_wait == 0)) { 2612 mutex_exit(&xp->statlock); 2613 if (pmcs_reset_phy(pwp, pptr, 2614 PMCS_PHYOP_LINK_RESET) != 0) { 2615 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 2616 "%s: PHY (%s) Local Control/Link " 2617 "Reset FAILED as part of error " 2618 "recovery", __func__, pptr->path); 2619 } 2620 mutex_enter(&xp->statlock); 2621 } 2622 mutex_exit(&xp->statlock); 2623 pmcs_unlock_phy(pptr); 2624 mutex_enter(&pwrk->lock); 2625 } 2626 pmcs_ioerror(pwp, SATA, pwrk, msg); 2627 } else { 2628 pmcs_latch_status(pwp, sp, STATUS_GOOD, NULL, 0, 2629 pwrk->phy->path); 2630 pkt->pkt_state |= STATE_XFERRED_DATA; 2631 pkt->pkt_resid = 0; 2632 } 2633 2634 pmcs_prt(pwp, PMCS_PRT_DEBUG2, pptr, xp, 2635 "%s: pkt %p tgt %u done reason=%x state=%x resid=%ld status=%x", 2636 __func__, (void *)pkt, xp->target_num, pkt->pkt_reason, 2637 pkt->pkt_state, pkt->pkt_resid, pkt->pkt_scbp[0]); 2638 2639 if (pwrk->state == PMCS_WORK_STATE_ABORTED) { 2640 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 2641 "%s: scsi_pkt 0x%p aborted for PHY %s; work = 0x%p", 2642 __func__, (void *)pkt, pptr->path, (void *)pwrk); 2643 aborted = B_TRUE; 2644 } 2645 2646 out: 2647 pmcs_dma_unload(pwp, sp); 2648 2649 /* 2650 * If the status is other than OK, determine if it's something that 2651 * is worth re-attempting enumeration. If so, mark the PHY. 2652 */ 2653 if (sts != PMCOUT_STATUS_OK) { 2654 pmcs_status_disposition(pptr, sts); 2655 } 2656 2657 mutex_enter(&xp->statlock); 2658 xp->tagmap &= ~(1 << sp->cmd_satltag); 2659 2660 /* 2661 * If the device no longer has a PHY pointer, clear the PHY pointer 2662 * from the work structure before we free it. Otherwise, pmcs_pwork 2663 * may decrement the ref_count on a PHY that's been freed. 2664 */ 2665 if (xp->phy == NULL) { 2666 pwrk->phy = NULL; 2667 } 2668 2669 pmcs_pwork(pwp, pwrk); 2670 2671 if (xp->dev_gone) { 2672 mutex_exit(&xp->statlock); 2673 if (!dead) { 2674 mutex_enter(&xp->aqlock); 2675 STAILQ_REMOVE(&xp->aq, sp, pmcs_cmd, cmd_next); 2676 mutex_exit(&xp->aqlock); 2677 pmcs_prt(pwp, PMCS_PRT_DEBUG3, pptr, xp, 2678 "%s: Removing cmd 0x%p (htag 0x%x) from aq", 2679 __func__, (void *)sp, sp->cmd_tag); 2680 mutex_enter(&pwp->cq_lock); 2681 STAILQ_INSERT_TAIL(&pwp->cq, sp, cmd_next); 2682 mutex_exit(&pwp->cq_lock); 2683 pmcs_prt(pwp, PMCS_PRT_DEBUG2, pptr, xp, 2684 "%s: Completing command for dead target 0x%p", 2685 __func__, (void *)xp); 2686 } 2687 return; 2688 } 2689 2690 ASSERT(xp->actv_cnt > 0); 2691 if (--(xp->actv_cnt) == 0) { 2692 if (xp->draining) { 2693 pmcs_prt(pwp, PMCS_PRT_DEBUG1, pptr, xp, 2694 "%s: waking up drain waiters", __func__); 2695 cv_signal(&pwp->drain_cv); 2696 } else if (xp->special_needed) { 2697 SCHEDULE_WORK(pwp, PMCS_WORK_SATA_RUN); 2698 } 2699 } 2700 mutex_exit(&xp->statlock); 2701 2702 if (dead == 0) { 2703 #ifdef DEBUG 2704 pmcs_cmd_t *wp; 2705 mutex_enter(&xp->aqlock); 2706 STAILQ_FOREACH(wp, &xp->aq, cmd_next) { 2707 if (wp == sp) { 2708 break; 2709 } 2710 } 2711 ASSERT(wp != NULL); 2712 #else 2713 mutex_enter(&xp->aqlock); 2714 #endif 2715 STAILQ_REMOVE(&xp->aq, sp, pmcs_cmd, cmd_next); 2716 if (aborted) { 2717 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 2718 "%s: Aborted cmd for tgt 0x%p, signaling waiters", 2719 __func__, (void *)xp); 2720 cv_signal(&xp->abort_cv); 2721 } 2722 mutex_exit(&xp->aqlock); 2723 mutex_enter(&pwp->cq_lock); 2724 STAILQ_INSERT_TAIL(&pwp->cq, sp, cmd_next); 2725 mutex_exit(&pwp->cq_lock); 2726 } 2727 } 2728 2729 static uint8_t 2730 pmcs_SATA_rwparm(uint8_t *cdb, uint32_t *xfr, uint64_t *lba, uint64_t lbamax) 2731 { 2732 uint8_t asc = 0; 2733 switch (cdb[0]) { 2734 case SCMD_READ_G5: 2735 case SCMD_WRITE_G5: 2736 *xfr = 2737 (((uint32_t)cdb[10]) << 24) | 2738 (((uint32_t)cdb[11]) << 16) | 2739 (((uint32_t)cdb[12]) << 8) | 2740 ((uint32_t)cdb[13]); 2741 *lba = 2742 (((uint64_t)cdb[2]) << 56) | 2743 (((uint64_t)cdb[3]) << 48) | 2744 (((uint64_t)cdb[4]) << 40) | 2745 (((uint64_t)cdb[5]) << 32) | 2746 (((uint64_t)cdb[6]) << 24) | 2747 (((uint64_t)cdb[7]) << 16) | 2748 (((uint64_t)cdb[8]) << 8) | 2749 ((uint64_t)cdb[9]); 2750 /* Check for illegal bits */ 2751 if (cdb[15]) { 2752 asc = 0x24; /* invalid field in cdb */ 2753 } 2754 break; 2755 case SCMD_READ_G4: 2756 case SCMD_WRITE_G4: 2757 *xfr = 2758 (((uint32_t)cdb[6]) << 16) | 2759 (((uint32_t)cdb[7]) << 8) | 2760 ((uint32_t)cdb[8]); 2761 *lba = 2762 (((uint32_t)cdb[2]) << 24) | 2763 (((uint32_t)cdb[3]) << 16) | 2764 (((uint32_t)cdb[4]) << 8) | 2765 ((uint32_t)cdb[5]); 2766 /* Check for illegal bits */ 2767 if (cdb[11]) { 2768 asc = 0x24; /* invalid field in cdb */ 2769 } 2770 break; 2771 case SCMD_READ_G1: 2772 case SCMD_WRITE_G1: 2773 *xfr = (((uint32_t)cdb[7]) << 8) | ((uint32_t)cdb[8]); 2774 *lba = 2775 (((uint32_t)cdb[2]) << 24) | 2776 (((uint32_t)cdb[3]) << 16) | 2777 (((uint32_t)cdb[4]) << 8) | 2778 ((uint32_t)cdb[5]); 2779 /* Check for illegal bits */ 2780 if (cdb[9]) { 2781 asc = 0x24; /* invalid field in cdb */ 2782 } 2783 break; 2784 case SCMD_READ: 2785 case SCMD_WRITE: 2786 *xfr = cdb[4]; 2787 if (*xfr == 0) { 2788 *xfr = 256; 2789 } 2790 *lba = 2791 (((uint32_t)cdb[1] & 0x1f) << 16) | 2792 (((uint32_t)cdb[2]) << 8) | 2793 ((uint32_t)cdb[3]); 2794 /* Check for illegal bits */ 2795 if (cdb[5]) { 2796 asc = 0x24; /* invalid field in cdb */ 2797 } 2798 break; 2799 } 2800 2801 if (asc == 0) { 2802 if ((*lba + *xfr) > lbamax) { 2803 asc = 0x21; /* logical block out of range */ 2804 } 2805 } 2806 return (asc); 2807 } 2808 2809 /* 2810 * Called with pwrk lock held. 2811 */ 2812 static void 2813 pmcs_ioerror(pmcs_hw_t *pwp, pmcs_dtype_t t, pmcwork_t *pwrk, uint32_t *w) 2814 { 2815 static uint8_t por[] = { 2816 0xf0, 0x0, 0x6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x28 2817 }; 2818 static uint8_t parity[] = { 2819 0xf0, 0x0, 0xb, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x47, 5 2820 }; 2821 const char *msg; 2822 char buf[20]; 2823 pmcs_cmd_t *sp = pwrk->arg; 2824 pmcs_phy_t *phyp = pwrk->phy; 2825 struct scsi_pkt *pkt = CMD2PKT(sp); 2826 uint32_t status; 2827 uint32_t resid; 2828 2829 ASSERT(w != NULL); 2830 status = LE_32(w[2]); 2831 resid = LE_32(w[3]); 2832 2833 msg = pmcs_status_str(status); 2834 if (msg == NULL) { 2835 (void) snprintf(buf, sizeof (buf), "Error 0x%x", status); 2836 msg = buf; 2837 } 2838 2839 if (status != PMCOUT_STATUS_OK) { 2840 pmcs_prt(pwp, PMCS_PRT_DEBUG1, phyp, NULL, 2841 "%s: device %s tag 0x%x status %s @ %llu", __func__, 2842 phyp->path, pwrk->htag, msg, 2843 (unsigned long long)gethrtime()); 2844 } 2845 2846 pkt->pkt_reason = CMD_CMPLT; /* default reason */ 2847 2848 switch (status) { 2849 case PMCOUT_STATUS_OK: 2850 if (t == SATA) { 2851 int i; 2852 fis_t fis; 2853 for (i = 0; i < sizeof (fis) / sizeof (fis[0]); i++) { 2854 fis[i] = LE_32(w[4+i]); 2855 } 2856 if ((fis[0] & 0xff) != FIS_REG_D2H) { 2857 pmcs_prt(pwp, PMCS_PRT_DEBUG, phyp, NULL, 2858 "unexpected fis code 0x%x", fis[0] & 0xff); 2859 } else { 2860 pmcs_prt(pwp, PMCS_PRT_DEBUG, phyp, NULL, 2861 "FIS ERROR"); 2862 pmcs_fis_dump(pwp, fis); 2863 } 2864 pkt->pkt_reason = CMD_TRAN_ERR; 2865 break; 2866 } 2867 pmcs_latch_status(pwp, sp, STATUS_GOOD, NULL, 0, phyp->path); 2868 break; 2869 2870 case PMCOUT_STATUS_ABORTED: 2871 /* 2872 * Command successfully aborted. 2873 */ 2874 if (phyp->dead) { 2875 pkt->pkt_reason = CMD_DEV_GONE; 2876 pkt->pkt_state = STATE_GOT_BUS; 2877 } else if (pwrk->ssp_event != 0) { 2878 pkt->pkt_reason = CMD_TRAN_ERR; 2879 pkt->pkt_state = STATE_GOT_BUS; 2880 } else if (pwrk->state == PMCS_WORK_STATE_TIMED_OUT) { 2881 pkt->pkt_reason = CMD_TIMEOUT; 2882 pkt->pkt_statistics |= STAT_TIMEOUT; 2883 pkt->pkt_state = STATE_GOT_BUS | STATE_GOT_TARGET | 2884 STATE_SENT_CMD; 2885 } else { 2886 pkt->pkt_reason = CMD_ABORTED; 2887 pkt->pkt_statistics |= STAT_ABORTED; 2888 pkt->pkt_state = STATE_GOT_BUS | STATE_GOT_TARGET | 2889 STATE_SENT_CMD; 2890 } 2891 2892 /* 2893 * PMCS_WORK_STATE_TIMED_OUT doesn't need to be preserved past 2894 * this point, so go ahead and mark it as aborted. 2895 */ 2896 pwrk->state = PMCS_WORK_STATE_ABORTED; 2897 break; 2898 2899 case PMCOUT_STATUS_UNDERFLOW: 2900 /* 2901 * This will only get called for SATA 2902 */ 2903 pkt->pkt_resid = resid; 2904 if (pkt->pkt_dma_len < pkt->pkt_resid) { 2905 (void) pmcs_set_resid(pkt, pkt->pkt_dma_len, resid); 2906 } 2907 pmcs_latch_status(pwp, sp, STATUS_GOOD, NULL, 0, phyp->path); 2908 break; 2909 2910 case PMCOUT_STATUS_NO_DEVICE: 2911 case PMCOUT_STATUS_XFER_ERROR_SATA_LINK_TIMEOUT: 2912 pkt->pkt_reason = CMD_DEV_GONE; 2913 break; 2914 2915 case PMCOUT_STATUS_OPEN_CNX_ERROR_WRONG_DESTINATION: 2916 /* 2917 * Need to do rediscovery. We probably have 2918 * the wrong device (disk swap), so kill 2919 * this one. 2920 */ 2921 case PMCOUT_STATUS_OPEN_CNX_PROTOCOL_NOT_SUPPORTED: 2922 case PMCOUT_STATUS_OPEN_CNX_ERROR_ZONE_VIOLATION: 2923 case PMCOUT_STATUS_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED: 2924 case PMCOUT_STATUS_OPEN_CNX_ERROR_UNKNOWN_ERROR: 2925 /* 2926 * Need to do rediscovery. 2927 */ 2928 if (!phyp->dead) { 2929 mutex_exit(&pwrk->lock); 2930 pmcs_lock_phy(pwrk->phy); 2931 pmcs_kill_changed(pwp, pwrk->phy, 0); 2932 pmcs_unlock_phy(pwrk->phy); 2933 mutex_enter(&pwrk->lock); 2934 pkt->pkt_reason = CMD_INCOMPLETE; 2935 pkt->pkt_state = STATE_GOT_BUS; 2936 } else { 2937 pkt->pkt_reason = CMD_DEV_GONE; 2938 } 2939 break; 2940 2941 case PMCOUT_STATUS_OPEN_CNX_ERROR_BREAK: 2942 case PMCOUT_STATUS_OPEN_CNX_ERROR_IT_NEXUS_LOSS: 2943 case PMCOUT_STATUS_OPENCNX_ERROR_BAD_DESTINATION: 2944 case PMCOUT_STATUS_IO_XFER_ERROR_NAK_RECEIVED: 2945 /* cmd is pending on the target */ 2946 case PMCOUT_STATUS_XFER_ERROR_OFFSET_MISMATCH: 2947 case PMCOUT_STATUS_XFER_ERROR_REJECTED_NCQ_MODE: 2948 /* transitory - commands sent while in NCQ failure mode */ 2949 case PMCOUT_STATUS_XFER_ERROR_ABORTED_NCQ_MODE: 2950 /* NCQ failure */ 2951 case PMCOUT_STATUS_IO_PORT_IN_RESET: 2952 case PMCOUT_STATUS_XFER_ERR_BREAK: 2953 case PMCOUT_STATUS_XFER_ERR_PHY_NOT_READY: 2954 pkt->pkt_reason = CMD_INCOMPLETE; 2955 pkt->pkt_state = STATE_GOT_BUS; 2956 break; 2957 2958 case PMCOUT_STATUS_IO_XFER_OPEN_RETRY_TIMEOUT: 2959 pmcs_latch_status(pwp, sp, STATUS_BUSY, NULL, 0, phyp->path); 2960 break; 2961 2962 case PMCOUT_STATUS_OPEN_CNX_ERROR_STP_RESOURCES_BUSY: 2963 /* synthesize a RESERVATION CONFLICT */ 2964 pmcs_prt(pwp, PMCS_PRT_DEBUG, phyp, phyp->target, 2965 "%s: Potential affiliation active on 0x%" PRIx64, __func__, 2966 pmcs_barray2wwn(phyp->sas_address)); 2967 pmcs_latch_status(pwp, sp, STATUS_RESERVATION_CONFLICT, NULL, 2968 0, phyp->path); 2969 break; 2970 2971 case PMCOUT_STATUS_XFER_ERROR_ABORTED_DUE_TO_SRST: 2972 /* synthesize a power-on/reset */ 2973 pmcs_latch_status(pwp, sp, STATUS_CHECK, por, sizeof (por), 2974 phyp->path); 2975 break; 2976 2977 case PMCOUT_STATUS_XFER_ERROR_UNEXPECTED_PHASE: 2978 case PMCOUT_STATUS_XFER_ERROR_RDY_OVERRUN: 2979 case PMCOUT_STATUS_XFER_ERROR_RDY_NOT_EXPECTED: 2980 case PMCOUT_STATUS_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT: 2981 case PMCOUT_STATUS_XFER_ERROR_CMD_ISSUE_BREAK_BEFORE_ACK_NACK: 2982 case PMCOUT_STATUS_XFER_ERROR_CMD_ISSUE_PHY_DOWN_BEFORE_ACK_NAK: 2983 /* synthesize a PARITY ERROR */ 2984 pmcs_latch_status(pwp, sp, STATUS_CHECK, parity, 2985 sizeof (parity), phyp->path); 2986 break; 2987 2988 case PMCOUT_STATUS_IO_XFER_ERROR_DMA: 2989 case PMCOUT_STATUS_IO_NOT_VALID: 2990 case PMCOUT_STATUS_PROG_ERROR: 2991 case PMCOUT_STATUS_XFER_ERROR_PEER_ABORTED: 2992 case PMCOUT_STATUS_XFER_ERROR_SATA: /* non-NCQ failure */ 2993 default: 2994 pkt->pkt_reason = CMD_TRAN_ERR; 2995 break; 2996 } 2997 } 2998 2999 /* 3000 * Latch up SCSI status 3001 */ 3002 3003 void 3004 pmcs_latch_status(pmcs_hw_t *pwp, pmcs_cmd_t *sp, uint8_t status, 3005 uint8_t *snsp, size_t snslen, char *path) 3006 { 3007 static const char c1[] = 3008 "%s: Status Byte 0x%02x for CDB0=0x%02x (%02x %02x %02x) " 3009 "HTAG 0x%x @ %llu"; 3010 static const char c2[] = 3011 "%s: Status Byte 0x%02x for CDB0=0x%02x HTAG 0x%x @ %llu"; 3012 3013 CMD2PKT(sp)->pkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET | 3014 STATE_SENT_CMD | STATE_GOT_STATUS; 3015 CMD2PKT(sp)->pkt_scbp[0] = status; 3016 3017 if (status == STATUS_CHECK && snsp && 3018 (size_t)SCSA_STSLEN(sp) >= sizeof (struct scsi_arq_status)) { 3019 struct scsi_arq_status *aqp = 3020 (void *) CMD2PKT(sp)->pkt_scbp; 3021 size_t amt = sizeof (struct scsi_extended_sense); 3022 uint8_t key = scsi_sense_key(snsp); 3023 uint8_t asc = scsi_sense_asc(snsp); 3024 uint8_t ascq = scsi_sense_ascq(snsp); 3025 if (amt > snslen) { 3026 amt = snslen; 3027 } 3028 pmcs_prt(pwp, PMCS_PRT_DEBUG_SCSI_STATUS, NULL, NULL, c1, path, 3029 status, CMD2PKT(sp)->pkt_cdbp[0] & 0xff, key, asc, ascq, 3030 sp->cmd_tag, (unsigned long long)gethrtime()); 3031 CMD2PKT(sp)->pkt_state |= STATE_ARQ_DONE; 3032 (*(uint8_t *)&aqp->sts_rqpkt_status) = STATUS_GOOD; 3033 aqp->sts_rqpkt_statistics = 0; 3034 aqp->sts_rqpkt_reason = CMD_CMPLT; 3035 aqp->sts_rqpkt_state = STATE_GOT_BUS | 3036 STATE_GOT_TARGET | STATE_SENT_CMD | 3037 STATE_XFERRED_DATA | STATE_GOT_STATUS; 3038 (void) memcpy(&aqp->sts_sensedata, snsp, amt); 3039 if (aqp->sts_sensedata.es_class != CLASS_EXTENDED_SENSE) { 3040 aqp->sts_rqpkt_reason = CMD_TRAN_ERR; 3041 aqp->sts_rqpkt_state = 0; 3042 aqp->sts_rqpkt_resid = 3043 sizeof (struct scsi_extended_sense); 3044 } else { 3045 aqp->sts_rqpkt_resid = 3046 sizeof (struct scsi_extended_sense) - amt; 3047 } 3048 } else if (status) { 3049 pmcs_prt(pwp, PMCS_PRT_DEBUG_SCSI_STATUS, NULL, NULL, c2, 3050 path, status, CMD2PKT(sp)->pkt_cdbp[0] & 0xff, 3051 sp->cmd_tag, (unsigned long long)gethrtime()); 3052 } 3053 3054 CMD2PKT(sp)->pkt_reason = CMD_CMPLT; 3055 } 3056 3057 /* 3058 * Calculate and set packet residual and return the amount 3059 * left over after applying various filters. 3060 */ 3061 size_t 3062 pmcs_set_resid(struct scsi_pkt *pkt, size_t amt, uint32_t cdbamt) 3063 { 3064 pkt->pkt_resid = cdbamt; 3065 if (amt > pkt->pkt_resid) { 3066 amt = pkt->pkt_resid; 3067 } 3068 if (amt > pkt->pkt_dma_len) { 3069 amt = pkt->pkt_dma_len; 3070 } 3071 return (amt); 3072 } 3073 3074 /* 3075 * Return the existing target softstate if there is one. If there is, 3076 * the PHY is locked as well and that lock must be freed by the caller 3077 * after the target/PHY linkage is established. If there isn't one, and 3078 * alloc_tgt is TRUE, then allocate one. 3079 */ 3080 pmcs_xscsi_t * 3081 pmcs_get_target(pmcs_iport_t *iport, char *tgt_port, boolean_t alloc_tgt) 3082 { 3083 pmcs_hw_t *pwp = iport->pwp; 3084 pmcs_phy_t *phyp; 3085 pmcs_xscsi_t *tgt; 3086 uint64_t wwn; 3087 char unit_address[PMCS_MAX_UA_SIZE]; 3088 int ua_form = 1; 3089 3090 /* 3091 * Find the PHY for this target 3092 */ 3093 phyp = pmcs_find_phy_by_sas_address(pwp, iport, NULL, tgt_port); 3094 if (phyp == NULL) { 3095 pmcs_prt(pwp, PMCS_PRT_DEBUG3, NULL, NULL, 3096 "%s: No PHY for target @ %s", __func__, tgt_port); 3097 return (NULL); 3098 } 3099 3100 tgt = ddi_soft_state_bystr_get(iport->tgt_sstate, tgt_port); 3101 3102 if (tgt) { 3103 /* 3104 * There's already a target. Check its PHY pointer to see 3105 * if we need to clear the old linkages 3106 */ 3107 if (tgt->phy && (tgt->phy != phyp)) { 3108 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, phyp, tgt, 3109 "%s: Target PHY updated from %p to %p", __func__, 3110 (void *)tgt->phy, (void *)phyp); 3111 if (!IS_ROOT_PHY(tgt->phy)) { 3112 pmcs_dec_phy_ref_count(tgt->phy); 3113 pmcs_inc_phy_ref_count(phyp); 3114 } 3115 tgt->phy->target = NULL; 3116 } 3117 3118 tgt->phy = phyp; 3119 phyp->target = tgt; 3120 return (tgt); 3121 } 3122 3123 /* 3124 * Make sure the PHY we found is on the correct iport 3125 */ 3126 if (phyp->iport != iport) { 3127 pmcs_prt(pwp, PMCS_PRT_DEBUG, phyp, NULL, 3128 "%s: No target at %s on this iport", __func__, tgt_port); 3129 pmcs_unlock_phy(phyp); 3130 return (NULL); 3131 } 3132 3133 /* 3134 * If this was just a lookup (i.e. alloc_tgt is false), return now. 3135 */ 3136 if (alloc_tgt == B_FALSE) { 3137 pmcs_unlock_phy(phyp); 3138 return (NULL); 3139 } 3140 3141 /* 3142 * Allocate the new softstate 3143 */ 3144 wwn = pmcs_barray2wwn(phyp->sas_address); 3145 (void) scsi_wwn_to_wwnstr(wwn, ua_form, unit_address); 3146 3147 if (ddi_soft_state_bystr_zalloc(iport->tgt_sstate, unit_address) != 3148 DDI_SUCCESS) { 3149 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, phyp, tgt, 3150 "%s: Couldn't alloc softstate for device at %s", 3151 __func__, unit_address); 3152 pmcs_unlock_phy(phyp); 3153 return (NULL); 3154 } 3155 3156 tgt = ddi_soft_state_bystr_get(iport->tgt_sstate, unit_address); 3157 ASSERT(tgt != NULL); 3158 STAILQ_INIT(&tgt->wq); 3159 STAILQ_INIT(&tgt->aq); 3160 STAILQ_INIT(&tgt->sq); 3161 mutex_init(&tgt->statlock, NULL, MUTEX_DRIVER, 3162 DDI_INTR_PRI(pwp->intr_pri)); 3163 mutex_init(&tgt->wqlock, NULL, MUTEX_DRIVER, 3164 DDI_INTR_PRI(pwp->intr_pri)); 3165 mutex_init(&tgt->aqlock, NULL, MUTEX_DRIVER, 3166 DDI_INTR_PRI(pwp->intr_pri)); 3167 cv_init(&tgt->reset_cv, NULL, CV_DRIVER, NULL); 3168 cv_init(&tgt->abort_cv, NULL, CV_DRIVER, NULL); 3169 list_create(&tgt->lun_list, sizeof (pmcs_lun_t), 3170 offsetof(pmcs_lun_t, lun_list_next)); 3171 tgt->qdepth = 1; 3172 tgt->target_num = PMCS_INVALID_TARGET_NUM; 3173 bcopy(unit_address, tgt->unit_address, PMCS_MAX_UA_SIZE); 3174 tgt->pwp = pwp; 3175 tgt->ua = strdup(iport->ua); 3176 tgt->phy = phyp; 3177 ASSERT((phyp->target == NULL) || (phyp->target == tgt)); 3178 if (phyp->target == NULL) { 3179 phyp->target = tgt; 3180 } 3181 3182 /* 3183 * Don't allocate LUN softstate for SMP targets 3184 */ 3185 if (phyp->dtype == EXPANDER) { 3186 return (tgt); 3187 } 3188 3189 if (ddi_soft_state_bystr_init(&tgt->lun_sstate, 3190 sizeof (pmcs_lun_t), PMCS_LUN_SSTATE_SZ) != 0) { 3191 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, phyp, tgt, 3192 "%s: LUN soft_state_bystr_init failed", __func__); 3193 ddi_soft_state_bystr_free(iport->tgt_sstate, tgt_port); 3194 pmcs_unlock_phy(phyp); 3195 return (NULL); 3196 } 3197 3198 return (tgt); 3199 } 3200