1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved. 23 */ 24 /* 25 * SCSI (SCSA) midlayer interface for PMC drier. 26 */ 27 28 #include <sys/scsi/adapters/pmcs/pmcs.h> 29 30 extern scsi_lun_t scsi_lun64_to_lun(scsi_lun64_t lun64); 31 32 static int pmcs_scsa_tran_tgt_init(dev_info_t *, dev_info_t *, 33 scsi_hba_tran_t *, struct scsi_device *); 34 static void pmcs_scsa_tran_tgt_free(dev_info_t *, dev_info_t *, 35 scsi_hba_tran_t *, struct scsi_device *); 36 static int pmcs_scsa_start(struct scsi_address *, struct scsi_pkt *); 37 static int pmcs_scsa_abort(struct scsi_address *, struct scsi_pkt *); 38 static int pmcs_scsa_reset(struct scsi_address *, int); 39 static int pmcs_scsi_reset_notify(struct scsi_address *, int, 40 void (*)(caddr_t), caddr_t); 41 static int pmcs_scsa_getcap(struct scsi_address *, char *, int); 42 static int pmcs_scsa_setcap(struct scsi_address *, char *, int, int); 43 static int pmcs_scsa_setup_pkt(struct scsi_pkt *, int (*)(caddr_t), caddr_t); 44 static void pmcs_scsa_teardown_pkt(struct scsi_pkt *); 45 46 static int pmcs_smp_init(dev_info_t *, dev_info_t *, smp_hba_tran_t *, 47 smp_device_t *); 48 static void pmcs_smp_free(dev_info_t *, dev_info_t *, smp_hba_tran_t *, 49 smp_device_t *); 50 static int pmcs_smp_start(struct smp_pkt *); 51 52 static int pmcs_scsi_quiesce(dev_info_t *); 53 static int pmcs_scsi_unquiesce(dev_info_t *); 54 55 static int pmcs_cap(struct scsi_address *, char *, int, int, int); 56 static pmcs_xscsi_t * 57 pmcs_addr2xp(struct scsi_address *, uint64_t *, pmcs_cmd_t *); 58 static int pmcs_SAS_run(pmcs_cmd_t *, pmcwork_t *); 59 static void pmcs_SAS_done(pmcs_hw_t *, pmcwork_t *, uint32_t *); 60 61 static int pmcs_SATA_run(pmcs_cmd_t *, pmcwork_t *); 62 static void pmcs_SATA_done(pmcs_hw_t *, pmcwork_t *, uint32_t *); 63 static uint8_t pmcs_SATA_rwparm(uint8_t *, uint32_t *, uint64_t *, uint64_t); 64 65 static void pmcs_ioerror(pmcs_hw_t *, pmcs_dtype_t pmcs_dtype, 66 pmcwork_t *, uint32_t *, uint32_t); 67 68 69 int 70 pmcs_scsa_init(pmcs_hw_t *pwp, const ddi_dma_attr_t *ap) 71 { 72 scsi_hba_tran_t *tran; 73 ddi_dma_attr_t pmcs_scsa_dattr; 74 int flags; 75 76 (void) memcpy(&pmcs_scsa_dattr, ap, sizeof (ddi_dma_attr_t)); 77 pmcs_scsa_dattr.dma_attr_sgllen = 78 ((PMCS_SGL_NCHUNKS - 1) * (PMCS_MAX_CHUNKS - 1)) + PMCS_SGL_NCHUNKS; 79 pmcs_scsa_dattr.dma_attr_flags = DDI_DMA_RELAXED_ORDERING; 80 pmcs_scsa_dattr.dma_attr_flags |= DDI_DMA_FLAGERR; 81 82 /* 83 * Allocate a transport structure 84 */ 85 tran = scsi_hba_tran_alloc(pwp->dip, SCSI_HBA_CANSLEEP); 86 if (tran == NULL) { 87 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 88 "scsi_hba_tran_alloc failed"); 89 return (DDI_FAILURE); 90 } 91 92 tran->tran_hba_private = pwp; 93 tran->tran_tgt_init = pmcs_scsa_tran_tgt_init; 94 tran->tran_tgt_free = pmcs_scsa_tran_tgt_free; 95 tran->tran_start = pmcs_scsa_start; 96 tran->tran_abort = pmcs_scsa_abort; 97 tran->tran_reset = pmcs_scsa_reset; 98 tran->tran_reset_notify = pmcs_scsi_reset_notify; 99 tran->tran_getcap = pmcs_scsa_getcap; 100 tran->tran_setcap = pmcs_scsa_setcap; 101 tran->tran_setup_pkt = pmcs_scsa_setup_pkt; 102 tran->tran_teardown_pkt = pmcs_scsa_teardown_pkt; 103 tran->tran_quiesce = pmcs_scsi_quiesce; 104 tran->tran_unquiesce = pmcs_scsi_unquiesce; 105 tran->tran_interconnect_type = INTERCONNECT_SAS; 106 tran->tran_hba_len = sizeof (pmcs_cmd_t); 107 108 /* 109 * Attach this instance of the hba 110 */ 111 112 flags = SCSI_HBA_TRAN_SCB | SCSI_HBA_TRAN_CDB | SCSI_HBA_ADDR_COMPLEX | 113 SCSI_HBA_TRAN_PHCI | SCSI_HBA_HBA; 114 115 if (scsi_hba_attach_setup(pwp->dip, &pmcs_scsa_dattr, tran, flags)) { 116 scsi_hba_tran_free(tran); 117 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 118 "scsi_hba_attach failed"); 119 return (DDI_FAILURE); 120 } 121 pwp->tran = tran; 122 123 /* 124 * Attach the SMP part of this hba 125 */ 126 pwp->smp_tran = smp_hba_tran_alloc(pwp->dip); 127 ASSERT(pwp->smp_tran != NULL); 128 pwp->smp_tran->smp_tran_hba_private = pwp; 129 pwp->smp_tran->smp_tran_init = pmcs_smp_init; 130 pwp->smp_tran->smp_tran_free = pmcs_smp_free; 131 pwp->smp_tran->smp_tran_start = pmcs_smp_start; 132 133 if (smp_hba_attach_setup(pwp->dip, pwp->smp_tran) != DDI_SUCCESS) { 134 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 135 "smp_hba_attach failed"); 136 smp_hba_tran_free(pwp->smp_tran); 137 pwp->smp_tran = NULL; 138 scsi_hba_tran_free(tran); 139 return (DDI_FAILURE); 140 } 141 142 return (DDI_SUCCESS); 143 } 144 145 /* 146 * SCSA entry points 147 */ 148 149 static int 150 pmcs_scsa_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip, 151 scsi_hba_tran_t *tran, struct scsi_device *sd) 152 { 153 pmcs_hw_t *pwp = NULL; 154 int rval; 155 char *variant_prop = "sata"; 156 char *tgt_port = NULL, *ua = NULL; 157 pmcs_xscsi_t *tgt = NULL; 158 pmcs_iport_t *iport; 159 pmcs_lun_t *lun = NULL; 160 pmcs_phy_t *phyp = NULL; 161 uint64_t lun_num; 162 boolean_t got_scratch = B_FALSE; 163 164 /* 165 * First, make sure we're an iport and get the pointer to the HBA 166 * node's softstate 167 */ 168 if (scsi_hba_iport_unit_address(hba_dip) == NULL) { 169 pmcs_prt(TRAN2PMC(tran), PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 170 "%s: We don't enumerate devices on the HBA node", __func__); 171 goto tgt_init_fail; 172 } 173 174 pwp = ITRAN2PMC(tran); 175 iport = ITRAN2IPORT(tran); 176 177 /* 178 * Get the unit-address 179 */ 180 ua = scsi_device_unit_address(sd); 181 if (ua == NULL) { 182 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 183 "%s: Couldn't get UA", __func__); 184 pwp = NULL; 185 goto tgt_init_fail; 186 } 187 pmcs_prt(pwp, PMCS_PRT_DEBUG3, NULL, NULL, 188 "got ua '%s'", ua); 189 190 /* 191 * Get the target address 192 */ 193 rval = scsi_device_prop_lookup_string(sd, SCSI_DEVICE_PROP_PATH, 194 SCSI_ADDR_PROP_TARGET_PORT, &tgt_port); 195 if (rval != DDI_PROP_SUCCESS) { 196 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 197 "Couldn't get target UA"); 198 pwp = NULL; 199 goto tgt_init_fail; 200 } 201 pmcs_prt(pwp, PMCS_PRT_DEBUG3, NULL, NULL, 202 "got tgt_port '%s'", tgt_port); 203 204 /* 205 * Validate that this tran_tgt_init is for an active iport. 206 */ 207 if (iport->ua_state == UA_INACTIVE) { 208 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 209 "%s: Got tran_tgt_init on inactive iport for '%s'", 210 __func__, tgt_port); 211 pwp = NULL; 212 goto tgt_init_fail; 213 } 214 215 /* 216 * Since we're going to wait for scratch, be sure to acquire it while 217 * we're not holding any other locks 218 */ 219 (void) pmcs_acquire_scratch(pwp, B_TRUE); 220 got_scratch = B_TRUE; 221 222 mutex_enter(&pwp->lock); 223 224 /* 225 * See if there's already a target softstate. If not, allocate one. 226 */ 227 tgt = pmcs_get_target(iport, tgt_port, B_TRUE); 228 229 if (tgt == NULL) { 230 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "%s: " 231 "No tgt for tgt_port (%s)", __func__, tgt_port); 232 goto tgt_init_fail; 233 } 234 235 phyp = tgt->phy; 236 if (!IS_ROOT_PHY(phyp)) { 237 pmcs_inc_phy_ref_count(phyp); 238 } 239 ASSERT(mutex_owned(&phyp->phy_lock)); 240 241 pmcs_prt(pwp, PMCS_PRT_DEBUG2, phyp, tgt, "@%s tgt = 0x%p, dip = 0x%p", 242 ua, (void *)tgt, (void *)tgt_dip); 243 244 /* Now get the lun */ 245 lun_num = scsi_device_prop_get_int64(sd, SCSI_DEVICE_PROP_PATH, 246 SCSI_ADDR_PROP_LUN64, SCSI_LUN64_ILLEGAL); 247 if (lun_num == SCSI_LUN64_ILLEGAL) { 248 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, phyp, tgt, 249 "No LUN for tgt %p", (void *)tgt); 250 goto tgt_init_fail; 251 } 252 253 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, phyp, tgt, "%s: @%s tgt 0x%p phy " 254 "0x%p (%s)", __func__, ua, (void *)tgt, (void *)phyp, phyp->path); 255 256 mutex_enter(&tgt->statlock); 257 tgt->dtype = phyp->dtype; 258 if (tgt->dtype != SAS && tgt->dtype != SATA) { 259 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, phyp, tgt, 260 "PHY 0x%p went away?", (void *)phyp); 261 goto tgt_init_fail; 262 } 263 264 /* We don't support SATA devices at LUN > 0. */ 265 if ((tgt->dtype == SATA) && (lun_num > 0)) { 266 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, phyp, tgt, 267 "%s: No support for SATA devices at LUN > 0 " 268 "(target = 0x%p)", __func__, (void *)tgt); 269 goto tgt_init_fail; 270 } 271 272 /* 273 * Allocate LU soft state. We use ddi_soft_state_bystr_zalloc instead 274 * of kmem_alloc because ddi_soft_state_bystr_zalloc allows us to 275 * verify that the framework never tries to initialize two scsi_device 276 * structures with the same unit-address at the same time. 277 */ 278 if (ddi_soft_state_bystr_zalloc(tgt->lun_sstate, ua) != DDI_SUCCESS) { 279 pmcs_prt(pwp, PMCS_PRT_DEBUG2, phyp, tgt, 280 "Couldn't allocate LU soft state"); 281 goto tgt_init_fail; 282 } 283 284 lun = ddi_soft_state_bystr_get(tgt->lun_sstate, ua); 285 if (lun == NULL) { 286 pmcs_prt(pwp, PMCS_PRT_DEBUG2, phyp, tgt, 287 "Couldn't get LU soft state"); 288 goto tgt_init_fail; 289 } 290 scsi_device_hba_private_set(sd, lun); 291 lun->lun_num = lun_num; 292 293 /* convert the scsi_lun64_t value to SCSI standard form */ 294 lun->scsi_lun = scsi_lun64_to_lun(lun_num); 295 296 ASSERT(strlen(ua) < (PMCS_MAX_UA_SIZE - 1)); 297 bcopy(ua, lun->unit_address, strnlen(ua, PMCS_MAX_UA_SIZE - 1)); 298 299 lun->target = tgt; 300 301 /* 302 * If this is the first tran_tgt_init, add this target to our list 303 */ 304 if (tgt->target_num == PMCS_INVALID_TARGET_NUM) { 305 int target; 306 for (target = 0; target < pwp->max_dev; target++) { 307 if (pwp->targets[target] != NULL) { 308 continue; 309 } 310 311 pwp->targets[target] = tgt; 312 tgt->target_num = (uint16_t)target; 313 break; 314 } 315 316 if (target == pwp->max_dev) { 317 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, phyp, tgt, 318 "Target list full."); 319 goto tgt_init_fail; 320 } 321 } 322 323 tgt->dip = sd->sd_dev; 324 lun->sd = sd; 325 list_insert_tail(&tgt->lun_list, lun); 326 327 if (!pmcs_assign_device(pwp, tgt)) { 328 pmcs_release_scratch(pwp); 329 pwp->targets[tgt->target_num] = NULL; 330 tgt->target_num = PMCS_INVALID_TARGET_NUM; 331 tgt->phy = NULL; 332 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, phyp, tgt, 333 "%s: pmcs_assign_device failed for target 0x%p", 334 __func__, (void *)tgt); 335 goto tgt_init_fail; 336 } 337 338 pmcs_release_scratch(pwp); 339 tgt->ref_count++; 340 341 (void) scsi_device_prop_update_int(sd, SCSI_DEVICE_PROP_PATH, 342 SCSI_ADDR_PROP_TARGET, (uint32_t)(tgt->target_num)); 343 344 /* SM-HBA */ 345 if (tgt->dtype == SATA) { 346 /* TCR in PSARC/1997/281 opinion */ 347 (void) scsi_device_prop_update_string(sd, 348 SCSI_DEVICE_PROP_PATH, "variant", variant_prop); 349 } 350 351 tgt->phy_addressable = PMCS_PHY_ADDRESSABLE(phyp); 352 353 if (tgt->phy_addressable) { 354 (void) scsi_device_prop_update_int(sd, SCSI_DEVICE_PROP_PATH, 355 SCSI_ADDR_PROP_SATA_PHY, phyp->phynum); 356 } 357 358 /* SM-HBA */ 359 (void) pmcs_smhba_set_scsi_device_props(pwp, phyp, sd); 360 /* 361 * Make sure attached port and target port pm props are updated 362 * By passing in 0s, we're not actually updating any values, but 363 * the properties should now get updated on the node. 364 */ 365 366 mutex_exit(&tgt->statlock); 367 pmcs_update_phy_pm_props(phyp, 0, 0, B_TRUE); 368 pmcs_unlock_phy(phyp); 369 mutex_exit(&pwp->lock); 370 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, tgt_port); 371 return (DDI_SUCCESS); 372 373 tgt_init_fail: 374 scsi_device_hba_private_set(sd, NULL); 375 if (got_scratch) { 376 pmcs_release_scratch(pwp); 377 } 378 if (lun) { 379 list_remove(&tgt->lun_list, lun); 380 ddi_soft_state_bystr_free(tgt->lun_sstate, ua); 381 } 382 if (phyp) { 383 mutex_exit(&tgt->statlock); 384 pmcs_unlock_phy(phyp); 385 /* 386 * phyp's ref count was incremented in pmcs_new_tport. 387 * We're failing configuration, we now need to decrement it. 388 */ 389 if (!IS_ROOT_PHY(phyp)) { 390 pmcs_dec_phy_ref_count(phyp); 391 } 392 phyp->target = NULL; 393 } 394 if (tgt && tgt->ref_count == 0) { 395 ddi_soft_state_bystr_free(iport->tgt_sstate, tgt_port); 396 } 397 if (pwp) { 398 mutex_exit(&pwp->lock); 399 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, phyp, tgt, 400 "%s: failed for @%s tgt 0x%p phy 0x%p", __func__, ua, 401 (void *)tgt, (void *)phyp); 402 } 403 if (tgt_port) { 404 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, tgt_port); 405 } 406 return (DDI_FAILURE); 407 } 408 409 static void 410 pmcs_scsa_tran_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip, 411 scsi_hba_tran_t *tran, struct scsi_device *sd) 412 { 413 _NOTE(ARGUNUSED(hba_dip, tgt_dip)); 414 pmcs_hw_t *pwp; 415 pmcs_lun_t *lun; 416 pmcs_xscsi_t *target; 417 char *unit_address; 418 pmcs_phy_t *phyp; 419 420 if (scsi_hba_iport_unit_address(hba_dip) == NULL) { 421 pwp = TRAN2PMC(tran); 422 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 423 "%s: We don't enumerate devices on the HBA node", __func__); 424 return; 425 } 426 427 lun = (pmcs_lun_t *)scsi_device_hba_private_get(sd); 428 429 ASSERT((lun != NULL) && (lun->target != NULL)); 430 ASSERT(lun->target->ref_count > 0); 431 432 target = lun->target; 433 unit_address = lun->unit_address; 434 list_remove(&target->lun_list, lun); 435 436 pwp = ITRAN2PMC(tran); 437 mutex_enter(&pwp->lock); 438 phyp = target->phy; 439 if (phyp) { 440 mutex_enter(&phyp->phy_lock); 441 } 442 mutex_enter(&target->statlock); 443 444 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, phyp, target, 445 "%s: for @%s tgt 0x%p phy 0x%p", __func__, unit_address, 446 (void *)target, (void *)phyp); 447 ddi_soft_state_bystr_free(lun->target->lun_sstate, unit_address); 448 449 if (target->recover_wait) { 450 mutex_exit(&target->statlock); 451 if (phyp) { 452 mutex_exit(&phyp->phy_lock); 453 } 454 mutex_exit(&pwp->lock); 455 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, phyp, target, "%s: " 456 "Target 0x%p in device state recovery, fail tran_tgt_free", 457 __func__, (void *)target); 458 return; 459 } 460 461 /* 462 * If this target still has a PHY pointer and that PHY's target pointer 463 * has been cleared, then that PHY has been reaped. In that case, there 464 * would be no need to decrement the reference count 465 */ 466 if (phyp && !IS_ROOT_PHY(phyp) && phyp->target) { 467 pmcs_dec_phy_ref_count(phyp); 468 } 469 470 if (--target->ref_count == 0) { 471 /* 472 * Remove this target from our list. The target soft 473 * state will remain, and the device will remain registered 474 * with the hardware unless/until we're told the device 475 * physically went away. 476 */ 477 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, phyp, target, 478 "%s: Free target 0x%p (vtgt %d)", __func__, (void *)target, 479 target->target_num); 480 pwp->targets[target->target_num] = NULL; 481 target->target_num = PMCS_INVALID_TARGET_NUM; 482 /* 483 * If the target still has a PHY pointer, break the linkage 484 */ 485 if (phyp) { 486 phyp->target = NULL; 487 } 488 target->phy = NULL; 489 if (phyp) { 490 mutex_exit(&phyp->phy_lock); 491 } 492 pmcs_destroy_target(target); 493 } else { 494 mutex_exit(&target->statlock); 495 if (phyp) { 496 mutex_exit(&phyp->phy_lock); 497 } 498 } 499 500 mutex_exit(&pwp->lock); 501 } 502 503 static int 504 pmcs_scsa_start(struct scsi_address *ap, struct scsi_pkt *pkt) 505 { 506 pmcs_cmd_t *sp = PKT2CMD(pkt); 507 pmcs_hw_t *pwp = ADDR2PMC(ap); 508 pmcs_xscsi_t *xp; 509 boolean_t blocked; 510 uint32_t hba_state; 511 512 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, 513 "%s: pkt %p sd %p cdb0=0x%02x dl=%lu", __func__, (void *)pkt, 514 (void *)scsi_address_device(&pkt->pkt_address), 515 pkt->pkt_cdbp[0] & 0xff, pkt->pkt_dma_len); 516 517 if (pkt->pkt_flags & FLAG_NOINTR) { 518 pmcs_prt(pwp, PMCS_PRT_DEBUG3, NULL, NULL, 519 "%s: nointr pkt", __func__); 520 return (TRAN_BADPKT); 521 } 522 523 sp->cmd_tag = 0; 524 pkt->pkt_state = pkt->pkt_statistics = 0; 525 pkt->pkt_reason = CMD_INCOMPLETE; 526 527 mutex_enter(&pwp->lock); 528 hba_state = pwp->state; 529 blocked = pwp->blocked; 530 mutex_exit(&pwp->lock); 531 532 if (hba_state != STATE_RUNNING) { 533 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 534 "%s: hba dead", __func__); 535 return (TRAN_FATAL_ERROR); 536 } 537 538 xp = pmcs_addr2xp(ap, NULL, sp); 539 if (xp == NULL) { 540 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, 541 "%s: dropping due to null target", __func__); 542 goto dead_target; 543 } 544 ASSERT(mutex_owned(&xp->statlock)); 545 546 /* 547 * First, check to see if the device is gone. 548 */ 549 if (xp->dev_gone) { 550 xp->actv_pkts++; 551 mutex_exit(&xp->statlock); 552 pmcs_prt(pwp, PMCS_PRT_DEBUG3, NULL, xp, 553 "%s: dropping due to dead target 0x%p", 554 __func__, (void *)xp); 555 goto dead_target; 556 } 557 558 /* 559 * If we're blocked (quiesced) just return. 560 */ 561 if (blocked) { 562 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 563 "%s: hba blocked", __func__); 564 xp->actv_pkts++; 565 mutex_exit(&xp->statlock); 566 mutex_enter(&xp->wqlock); 567 STAILQ_INSERT_TAIL(&xp->wq, sp, cmd_next); 568 mutex_exit(&xp->wqlock); 569 return (TRAN_ACCEPT); 570 } 571 572 /* 573 * If we're draining or resetting, queue and return. 574 */ 575 if (xp->draining || xp->resetting || xp->recover_wait) { 576 xp->actv_pkts++; 577 mutex_exit(&xp->statlock); 578 mutex_enter(&xp->wqlock); 579 STAILQ_INSERT_TAIL(&xp->wq, sp, cmd_next); 580 mutex_exit(&xp->wqlock); 581 pmcs_prt(pwp, PMCS_PRT_DEBUG1, NULL, xp, 582 "%s: draining/resetting/recovering (cnt %u)", 583 __func__, xp->actv_cnt); 584 /* 585 * By the time we get here, draining or 586 * resetting may have come and gone, not 587 * yet noticing that we had put something 588 * on the wait queue, so schedule a worker 589 * to look at this later. 590 */ 591 SCHEDULE_WORK(pwp, PMCS_WORK_RUN_QUEUES); 592 return (TRAN_ACCEPT); 593 } 594 595 xp->actv_pkts++; 596 mutex_exit(&xp->statlock); 597 598 /* 599 * Queue this command to the tail of the wait queue. 600 * This keeps us getting commands out of order. 601 */ 602 mutex_enter(&xp->wqlock); 603 STAILQ_INSERT_TAIL(&xp->wq, sp, cmd_next); 604 mutex_exit(&xp->wqlock); 605 606 /* 607 * Now run the queue for this device. 608 */ 609 (void) pmcs_scsa_wq_run_one(pwp, xp); 610 611 return (TRAN_ACCEPT); 612 613 dead_target: 614 pkt->pkt_state = STATE_GOT_BUS; 615 pkt->pkt_reason = CMD_DEV_GONE; 616 mutex_enter(&pwp->cq_lock); 617 STAILQ_INSERT_TAIL(&pwp->cq, sp, cmd_next); 618 PMCS_CQ_RUN_LOCKED(pwp); 619 mutex_exit(&pwp->cq_lock); 620 return (TRAN_ACCEPT); 621 } 622 623 /* Return code 1 = Success */ 624 static int 625 pmcs_scsa_abort(struct scsi_address *ap, struct scsi_pkt *pkt) 626 { 627 pmcs_hw_t *pwp = ADDR2PMC(ap); 628 pmcs_cmd_t *sp = NULL; 629 pmcs_xscsi_t *xp = NULL; 630 pmcs_phy_t *pptr = NULL; 631 pmcs_lun_t *pmcs_lun = (pmcs_lun_t *) 632 scsi_device_hba_private_get(scsi_address_device(ap)); 633 uint32_t tag; 634 uint64_t lun; 635 pmcwork_t *pwrk; 636 637 mutex_enter(&pwp->lock); 638 if (pwp->state != STATE_RUNNING) { 639 mutex_exit(&pwp->lock); 640 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 641 "%s: hba dead", __func__); 642 return (0); 643 } 644 mutex_exit(&pwp->lock); 645 646 if (pkt == NULL) { 647 if (pmcs_lun == NULL) { 648 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, "%s: " 649 "No pmcs_lun_t struct to do ABORT_ALL", __func__); 650 return (0); 651 } 652 xp = pmcs_lun->target; 653 if (xp != NULL) { 654 pptr = xp->phy; 655 } 656 if (pptr == NULL) { 657 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, xp, "%s: pkt is " 658 "NULL. No tgt/phy to do ABORT_ALL", __func__); 659 return (0); 660 } 661 pmcs_lock_phy(pptr); 662 if (pmcs_abort(pwp, pptr, 0, 1, 0)) { 663 pptr->abort_pending = 1; 664 SCHEDULE_WORK(pwp, PMCS_WORK_ABORT_HANDLE); 665 } 666 pmcs_unlock_phy(pptr); 667 return (1); 668 } 669 670 sp = PKT2CMD(pkt); 671 xp = sp->cmd_target; 672 673 if (sp->cmd_lun) { 674 lun = sp->cmd_lun->lun_num; 675 } else { 676 lun = 0; 677 } 678 if (xp == NULL) { 679 return (0); 680 } 681 682 /* 683 * See if we have a real work structure associated with this cmd. 684 */ 685 pwrk = pmcs_tag2wp(pwp, sp->cmd_tag, B_FALSE); 686 if (pwrk && pwrk->arg == sp) { 687 tag = pwrk->htag; 688 pptr = pwrk->phy; 689 pwrk->timer = 0; /* we don't time this here */ 690 ASSERT(pwrk->state == PMCS_WORK_STATE_ONCHIP); 691 mutex_exit(&pwrk->lock); 692 pmcs_lock_phy(pptr); 693 if (pptr->dtype == SAS) { 694 if (pmcs_ssp_tmf(pwp, pptr, SAS_ABORT_TASK, tag, lun, 695 NULL)) { 696 pptr->abort_pending = 1; 697 pmcs_unlock_phy(pptr); 698 SCHEDULE_WORK(pwp, PMCS_WORK_ABORT_HANDLE); 699 return (0); 700 } 701 } else { 702 /* 703 * XXX: Was the command that was active an 704 * NCQ I/O command? 705 */ 706 pptr->need_rl_ext = 1; 707 if (pmcs_sata_abort_ncq(pwp, pptr)) { 708 pptr->abort_pending = 1; 709 pmcs_unlock_phy(pptr); 710 SCHEDULE_WORK(pwp, PMCS_WORK_ABORT_HANDLE); 711 return (0); 712 } 713 } 714 pptr->abort_pending = 1; 715 pmcs_unlock_phy(pptr); 716 SCHEDULE_WORK(pwp, PMCS_WORK_ABORT_HANDLE); 717 return (1); 718 } 719 if (pwrk) { 720 mutex_exit(&pwrk->lock); 721 } 722 /* 723 * Okay, those weren't the droids we were looking for. 724 * See if the command is on any of the wait queues. 725 */ 726 mutex_enter(&xp->wqlock); 727 sp = NULL; 728 STAILQ_FOREACH(sp, &xp->wq, cmd_next) { 729 if (sp == PKT2CMD(pkt)) { 730 STAILQ_REMOVE(&xp->wq, sp, pmcs_cmd, cmd_next); 731 break; 732 } 733 } 734 mutex_exit(&xp->wqlock); 735 if (sp) { 736 pkt->pkt_reason = CMD_ABORTED; 737 pkt->pkt_statistics |= STAT_ABORTED; 738 mutex_enter(&pwp->cq_lock); 739 STAILQ_INSERT_TAIL(&pwp->cq, sp, cmd_next); 740 PMCS_CQ_RUN_LOCKED(pwp); 741 mutex_exit(&pwp->cq_lock); 742 return (1); 743 } 744 return (0); 745 } 746 747 /* 748 * SCSA reset functions 749 */ 750 static int 751 pmcs_scsa_reset(struct scsi_address *ap, int level) 752 { 753 pmcs_hw_t *pwp = ADDR2PMC(ap); 754 pmcs_phy_t *pptr; 755 pmcs_xscsi_t *xp; 756 uint64_t lun = (uint64_t)-1, *lp = NULL; 757 int rval; 758 759 mutex_enter(&pwp->lock); 760 if (pwp->state != STATE_RUNNING) { 761 mutex_exit(&pwp->lock); 762 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 763 "%s: hba dead", __func__); 764 return (0); 765 } 766 mutex_exit(&pwp->lock); 767 768 switch (level) { 769 case RESET_ALL: 770 rval = 0; 771 break; 772 case RESET_LUN: 773 /* 774 * Point lp at lun so that pmcs_addr2xp 775 * will fill out the 64 bit lun number. 776 */ 777 lp = &lun; 778 /* FALLTHROUGH */ 779 case RESET_TARGET: 780 xp = pmcs_addr2xp(ap, lp, NULL); 781 if (xp == NULL) { 782 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 783 "%s: no xp found for this scsi address", __func__); 784 return (0); 785 } 786 787 if (xp->dev_gone) { 788 mutex_exit(&xp->statlock); 789 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, xp, 790 "%s: Target 0x%p has gone away", __func__, 791 (void *)xp); 792 return (0); 793 } 794 795 /* 796 * If we're already performing this action, or if device 797 * state recovery is already running, just return failure. 798 */ 799 if (xp->resetting || xp->recover_wait) { 800 mutex_exit(&xp->statlock); 801 return (0); 802 } 803 xp->reset_wait = 0; 804 xp->reset_success = 0; 805 xp->resetting = 1; 806 pptr = xp->phy; 807 mutex_exit(&xp->statlock); 808 809 if (pmcs_reset_dev(pwp, pptr, lun)) { 810 rval = 0; 811 } else { 812 rval = 1; 813 } 814 815 mutex_enter(&xp->statlock); 816 if (rval == 1) { 817 xp->reset_success = 1; 818 } 819 if (xp->reset_wait) { 820 xp->reset_wait = 0; 821 cv_signal(&xp->reset_cv); 822 } 823 xp->resetting = 0; 824 mutex_exit(&xp->statlock); 825 SCHEDULE_WORK(pwp, PMCS_WORK_RUN_QUEUES); 826 break; 827 default: 828 rval = 0; 829 break; 830 } 831 832 return (rval); 833 } 834 835 static int 836 pmcs_scsi_reset_notify(struct scsi_address *ap, int flag, 837 void (*callback)(caddr_t), caddr_t arg) 838 { 839 pmcs_hw_t *pwp = ADDR2PMC(ap); 840 return (scsi_hba_reset_notify_setup(ap, flag, callback, arg, 841 &pwp->lock, &pwp->reset_notify_listf)); 842 } 843 844 845 static int 846 pmcs_cap(struct scsi_address *ap, char *cap, int val, int tonly, int set) 847 { 848 _NOTE(ARGUNUSED(val, tonly)); 849 int cidx, rval = 0; 850 pmcs_xscsi_t *xp; 851 852 cidx = scsi_hba_lookup_capstr(cap); 853 if (cidx == -1) { 854 return (-1); 855 } 856 857 xp = pmcs_addr2xp(ap, NULL, NULL); 858 if (xp == NULL) { 859 return (-1); 860 } 861 862 switch (cidx) { 863 case SCSI_CAP_DMA_MAX: 864 case SCSI_CAP_INITIATOR_ID: 865 if (set == 0) { 866 rval = INT_MAX; /* argh */ 867 } 868 break; 869 case SCSI_CAP_DISCONNECT: 870 case SCSI_CAP_SYNCHRONOUS: 871 case SCSI_CAP_WIDE_XFER: 872 case SCSI_CAP_PARITY: 873 case SCSI_CAP_ARQ: 874 case SCSI_CAP_UNTAGGED_QING: 875 if (set == 0) { 876 rval = 1; 877 } 878 break; 879 880 case SCSI_CAP_TAGGED_QING: 881 rval = 1; 882 break; 883 884 case SCSI_CAP_MSG_OUT: 885 case SCSI_CAP_RESET_NOTIFICATION: 886 case SCSI_CAP_QFULL_RETRIES: 887 case SCSI_CAP_QFULL_RETRY_INTERVAL: 888 break; 889 case SCSI_CAP_SCSI_VERSION: 890 if (set == 0) { 891 rval = SCSI_VERSION_3; 892 } 893 break; 894 case SCSI_CAP_INTERCONNECT_TYPE: 895 if (set) { 896 break; 897 } 898 if (xp->phy_addressable) { 899 rval = INTERCONNECT_SATA; 900 } else { 901 rval = INTERCONNECT_SAS; 902 } 903 break; 904 case SCSI_CAP_CDB_LEN: 905 if (set == 0) { 906 rval = 16; 907 } 908 break; 909 case SCSI_CAP_LUN_RESET: 910 if (set) { 911 break; 912 } 913 if (xp->dtype == SATA) { 914 rval = 0; 915 } else { 916 rval = 1; 917 } 918 break; 919 default: 920 rval = -1; 921 break; 922 } 923 mutex_exit(&xp->statlock); 924 pmcs_prt(ADDR2PMC(ap), PMCS_PRT_DEBUG3, NULL, NULL, 925 "%s: cap %s val %d set %d rval %d", 926 __func__, cap, val, set, rval); 927 return (rval); 928 } 929 930 /* 931 * Returns with statlock held if the xp is found. 932 * Fills in pmcs_cmd_t with values if pmcs_cmd_t pointer non-NULL. 933 */ 934 static pmcs_xscsi_t * 935 pmcs_addr2xp(struct scsi_address *ap, uint64_t *lp, pmcs_cmd_t *sp) 936 { 937 pmcs_xscsi_t *xp; 938 pmcs_lun_t *lun = (pmcs_lun_t *) 939 scsi_device_hba_private_get(scsi_address_device(ap)); 940 941 if ((lun == NULL) || (lun->target == NULL)) { 942 return (NULL); 943 } 944 xp = lun->target; 945 mutex_enter(&xp->statlock); 946 947 if (xp->dev_gone || (xp->phy == NULL)) { 948 /* 949 * This may be a retried packet, so it's possible cmd_target 950 * and cmd_lun may still be populated. Clear them. 951 */ 952 if (sp != NULL) { 953 sp->cmd_target = NULL; 954 sp->cmd_lun = NULL; 955 } 956 mutex_exit(&xp->statlock); 957 return (NULL); 958 } 959 960 if (sp != NULL) { 961 sp->cmd_target = xp; 962 sp->cmd_lun = lun; 963 } 964 if (lp) { 965 *lp = lun->lun_num; 966 } 967 return (xp); 968 } 969 970 static int 971 pmcs_scsa_getcap(struct scsi_address *ap, char *cap, int whom) 972 { 973 int r; 974 if (cap == NULL) { 975 return (-1); 976 } 977 r = pmcs_cap(ap, cap, 0, whom, 0); 978 return (r); 979 } 980 981 static int 982 pmcs_scsa_setcap(struct scsi_address *ap, char *cap, int value, int whom) 983 { 984 int r; 985 if (cap == NULL) { 986 return (-1); 987 } 988 r = pmcs_cap(ap, cap, value, whom, 1); 989 return (r); 990 } 991 992 static int 993 pmcs_scsa_setup_pkt(struct scsi_pkt *pkt, int (*callback)(caddr_t), 994 caddr_t cbarg) 995 { 996 _NOTE(ARGUNUSED(callback, cbarg)); 997 pmcs_cmd_t *sp = pkt->pkt_ha_private; 998 999 bzero(sp, sizeof (pmcs_cmd_t)); 1000 sp->cmd_pkt = pkt; 1001 return (0); 1002 } 1003 1004 static void 1005 pmcs_scsa_teardown_pkt(struct scsi_pkt *pkt) 1006 { 1007 pmcs_cmd_t *sp = pkt->pkt_ha_private; 1008 sp->cmd_target = NULL; 1009 sp->cmd_lun = NULL; 1010 } 1011 1012 static int 1013 pmcs_smp_start(struct smp_pkt *smp_pkt) 1014 { 1015 struct pmcwork *pwrk; 1016 pmcs_iport_t *iport; 1017 const uint_t rdoff = SAS_SMP_MAX_PAYLOAD; 1018 uint32_t msg[PMCS_MSG_SIZE], *ptr, htag, status; 1019 uint64_t wwn; 1020 pmcs_hw_t *pwp; 1021 pmcs_phy_t *pptr; 1022 pmcs_xscsi_t *xp; 1023 uint_t reqsz, rspsz, will_retry; 1024 int result; 1025 1026 pwp = smp_pkt->smp_pkt_address->smp_a_hba_tran->smp_tran_hba_private; 1027 bcopy(smp_pkt->smp_pkt_address->smp_a_wwn, &wwn, SAS_WWN_BYTE_SIZE); 1028 1029 pmcs_prt(pwp, PMCS_PRT_DEBUG1, NULL, NULL, 1030 "%s: starting for wwn 0x%" PRIx64, __func__, wwn); 1031 1032 will_retry = smp_pkt->smp_pkt_will_retry; 1033 1034 (void) pmcs_acquire_scratch(pwp, B_TRUE); 1035 reqsz = smp_pkt->smp_pkt_reqsize; 1036 if (reqsz > SAS_SMP_MAX_PAYLOAD) { 1037 reqsz = SAS_SMP_MAX_PAYLOAD; 1038 } 1039 (void) memcpy(pwp->scratch, smp_pkt->smp_pkt_req, reqsz); 1040 1041 rspsz = smp_pkt->smp_pkt_rspsize; 1042 if (rspsz > SAS_SMP_MAX_PAYLOAD) { 1043 rspsz = SAS_SMP_MAX_PAYLOAD; 1044 } 1045 1046 /* 1047 * The request size from the SMP driver always includes 4 bytes 1048 * for the CRC. The PMCS chip, however, doesn't want to see those 1049 * counts as part of the transfer size. 1050 */ 1051 reqsz -= 4; 1052 1053 pptr = pmcs_find_phy_by_wwn(pwp, wwn); 1054 /* PHY is now locked */ 1055 if (pptr == NULL || pptr->dtype != EXPANDER) { 1056 if (pptr) { 1057 pmcs_unlock_phy(pptr); 1058 } 1059 pmcs_release_scratch(pwp); 1060 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 1061 "%s: could not find phy", __func__); 1062 smp_pkt->smp_pkt_reason = ENXIO; 1063 return (DDI_FAILURE); 1064 } 1065 1066 if ((pptr->iport == NULL) || !pptr->valid_device_id) { 1067 pmcs_unlock_phy(pptr); 1068 pmcs_release_scratch(pwp); 1069 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, pptr->target, 1070 "%s: Can't reach PHY %s", __func__, pptr->path); 1071 smp_pkt->smp_pkt_reason = ENXIO; 1072 return (DDI_FAILURE); 1073 } 1074 1075 pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, pptr); 1076 if (pwrk == NULL) { 1077 pmcs_unlock_phy(pptr); 1078 pmcs_release_scratch(pwp); 1079 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 1080 "%s: could not get work structure", __func__); 1081 smp_pkt->smp_pkt_reason = will_retry ? EAGAIN : EBUSY; 1082 return (DDI_FAILURE); 1083 } 1084 1085 pwrk->arg = msg; 1086 pwrk->dtype = EXPANDER; 1087 mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]); 1088 ptr = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 1089 if (ptr == NULL) { 1090 pmcs_pwork(pwp, pwrk); 1091 mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]); 1092 pmcs_unlock_phy(pptr); 1093 pmcs_release_scratch(pwp); 1094 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1095 "%s: could not get IQ entry", __func__); 1096 smp_pkt->smp_pkt_reason = will_retry ? EAGAIN :EBUSY; 1097 return (DDI_FAILURE); 1098 } 1099 msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_GENERAL, PMCIN_SMP_REQUEST)); 1100 msg[1] = LE_32(pwrk->htag); 1101 msg[2] = LE_32(pptr->device_id); 1102 msg[3] = LE_32(SMP_INDIRECT_RESPONSE | SMP_INDIRECT_REQUEST); 1103 msg[8] = LE_32(DWORD0(pwp->scratch_dma)); 1104 msg[9] = LE_32(DWORD1(pwp->scratch_dma)); 1105 msg[10] = LE_32(reqsz); 1106 msg[11] = 0; 1107 msg[12] = LE_32(DWORD0(pwp->scratch_dma+rdoff)); 1108 msg[13] = LE_32(DWORD1(pwp->scratch_dma+rdoff)); 1109 msg[14] = LE_32(rspsz); 1110 msg[15] = 0; 1111 1112 COPY_MESSAGE(ptr, msg, PMCS_MSG_SIZE); 1113 1114 pmcs_hold_iport(pptr->iport); 1115 iport = pptr->iport; 1116 pmcs_smp_acquire(iport); 1117 pwrk->state = PMCS_WORK_STATE_ONCHIP; 1118 htag = pwrk->htag; 1119 INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 1120 pmcs_unlock_phy(pptr); 1121 WAIT_FOR(pwrk, smp_pkt->smp_pkt_timeout * 1000, result); 1122 pmcs_pwork(pwp, pwrk); 1123 pmcs_lock_phy(pptr); 1124 if (result) { 1125 pmcs_timed_out(pwp, htag, __func__); 1126 if (pmcs_abort(pwp, pptr, htag, 0, 0)) { 1127 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, pptr->target, 1128 "%s: Unable to issue SMP ABORT for htag 0x%08x", 1129 __func__, htag); 1130 } else { 1131 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, pptr->target, 1132 "%s: Issuing SMP ABORT for htag 0x%08x", 1133 __func__, htag); 1134 } 1135 pmcs_smp_release(iport); 1136 pmcs_rele_iport(iport); 1137 pmcs_unlock_phy(pptr); 1138 pmcs_release_scratch(pwp); 1139 smp_pkt->smp_pkt_reason = ETIMEDOUT; 1140 return (DDI_FAILURE); 1141 } 1142 pmcs_smp_release(iport); 1143 pmcs_rele_iport(iport); 1144 status = LE_32(msg[2]); 1145 if (status == PMCOUT_STATUS_OVERFLOW) { 1146 status = PMCOUT_STATUS_OK; 1147 smp_pkt->smp_pkt_reason = EOVERFLOW; 1148 } 1149 if (status != PMCOUT_STATUS_OK) { 1150 const char *emsg = pmcs_status_str(status); 1151 if (emsg == NULL) { 1152 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, pptr->target, 1153 "SMP operation failed (0x%x)", status); 1154 } else { 1155 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, pptr->target, 1156 "SMP operation failed (%s)", emsg); 1157 } 1158 1159 if ((status == PMCOUT_STATUS_ERROR_HW_TIMEOUT) || 1160 (status == PMCOUT_STATUS_IO_XFER_OPEN_RETRY_TIMEOUT)) { 1161 smp_pkt->smp_pkt_reason = 1162 will_retry ? EAGAIN : ETIMEDOUT; 1163 result = DDI_FAILURE; 1164 } else if (status == 1165 PMCOUT_STATUS_OPEN_CNX_ERROR_IT_NEXUS_LOSS) { 1166 xp = pptr->target; 1167 if (xp == NULL) { 1168 smp_pkt->smp_pkt_reason = EIO; 1169 result = DDI_FAILURE; 1170 goto out; 1171 } 1172 if (xp->dev_state != 1173 PMCS_DEVICE_STATE_NON_OPERATIONAL) { 1174 xp->dev_state = 1175 PMCS_DEVICE_STATE_NON_OPERATIONAL; 1176 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, xp->phy, 1177 xp, "%s: Got _IT_NEXUS_LOSS SMP status. " 1178 "Tgt(0x%p) dev_state set to " 1179 "_NON_OPERATIONAL", __func__, 1180 (void *)xp); 1181 } 1182 /* ABORT any pending commands related to this device */ 1183 if (pmcs_abort(pwp, pptr, pptr->device_id, 1, 1) != 0) { 1184 pptr->abort_pending = 1; 1185 smp_pkt->smp_pkt_reason = EIO; 1186 result = DDI_FAILURE; 1187 } 1188 } else { 1189 smp_pkt->smp_pkt_reason = will_retry ? EAGAIN : EIO; 1190 result = DDI_FAILURE; 1191 } 1192 } else { 1193 (void) memcpy(smp_pkt->smp_pkt_rsp, 1194 &((uint8_t *)pwp->scratch)[rdoff], rspsz); 1195 if (smp_pkt->smp_pkt_reason == EOVERFLOW) { 1196 result = DDI_FAILURE; 1197 } else { 1198 result = DDI_SUCCESS; 1199 } 1200 } 1201 out: 1202 pmcs_prt(pwp, PMCS_PRT_DEBUG1, pptr, pptr->target, 1203 "%s: done for wwn 0x%" PRIx64, __func__, wwn); 1204 1205 pmcs_unlock_phy(pptr); 1206 pmcs_release_scratch(pwp); 1207 return (result); 1208 } 1209 1210 static int 1211 pmcs_smp_init(dev_info_t *self, dev_info_t *child, 1212 smp_hba_tran_t *tran, smp_device_t *smp_sd) 1213 { 1214 _NOTE(ARGUNUSED(tran, smp_sd)); 1215 pmcs_iport_t *iport; 1216 pmcs_hw_t *pwp; 1217 pmcs_xscsi_t *tgt; 1218 pmcs_phy_t *phy, *pphy; 1219 uint64_t wwn; 1220 char *addr, *tgt_port; 1221 int ua_form = 1; 1222 1223 iport = ddi_get_soft_state(pmcs_iport_softstate, 1224 ddi_get_instance(self)); 1225 ASSERT(iport); 1226 if (iport == NULL) 1227 return (DDI_FAILURE); 1228 pwp = iport->pwp; 1229 ASSERT(pwp); 1230 if (pwp == NULL) 1231 return (DDI_FAILURE); 1232 1233 /* Get "target-port" prop from devinfo node */ 1234 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, child, 1235 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, 1236 SCSI_ADDR_PROP_TARGET_PORT, &tgt_port) != DDI_SUCCESS) { 1237 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, "%s: Failed to " 1238 "lookup prop ("SCSI_ADDR_PROP_TARGET_PORT")", __func__); 1239 /* Dont fail _smp_init() because we couldnt get/set a prop */ 1240 return (DDI_SUCCESS); 1241 } 1242 1243 /* 1244 * Validate that this tran_tgt_init is for an active iport. 1245 */ 1246 if (iport->ua_state == UA_INACTIVE) { 1247 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1248 "%s: Init on inactive iport for '%s'", __func__, tgt_port); 1249 ddi_prop_free(tgt_port); 1250 return (DDI_FAILURE); 1251 } 1252 1253 mutex_enter(&pwp->lock); 1254 1255 /* Retrieve softstate using unit-address */ 1256 tgt = pmcs_get_target(iport, tgt_port, B_TRUE); 1257 if (tgt == NULL) { 1258 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1259 "%s: tgt softstate not found", __func__); 1260 ddi_prop_free(tgt_port); 1261 mutex_exit(&pwp->lock); 1262 return (DDI_FAILURE); 1263 } 1264 1265 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, tgt, "%s: %s (%s)", 1266 __func__, ddi_get_name(child), tgt_port); 1267 1268 mutex_enter(&tgt->statlock); 1269 phy = tgt->phy; 1270 ASSERT(mutex_owned(&phy->phy_lock)); 1271 1272 if (IS_ROOT_PHY(phy)) { 1273 /* Expander attached to HBA - don't ref_count it */ 1274 wwn = pwp->sas_wwns[0]; 1275 } else { 1276 pmcs_inc_phy_ref_count(phy); 1277 1278 /* 1279 * Parent (in topology) is also an expander 1280 * Now that we've increased the ref count on phy, it's OK 1281 * to drop the lock so we can acquire the parent's lock. 1282 */ 1283 pphy = phy->parent; 1284 mutex_exit(&tgt->statlock); 1285 pmcs_unlock_phy(phy); 1286 pmcs_lock_phy(pphy); 1287 wwn = pmcs_barray2wwn(pphy->sas_address); 1288 pmcs_unlock_phy(pphy); 1289 pmcs_lock_phy(phy); 1290 mutex_enter(&tgt->statlock); 1291 } 1292 1293 /* 1294 * If this is the 1st smp_init, add this to our list. 1295 */ 1296 if (tgt->target_num == PMCS_INVALID_TARGET_NUM) { 1297 int target; 1298 for (target = 0; target < pwp->max_dev; target++) { 1299 if (pwp->targets[target] != NULL) { 1300 continue; 1301 } 1302 1303 pwp->targets[target] = tgt; 1304 tgt->target_num = (uint16_t)target; 1305 tgt->assigned = 1; 1306 tgt->dev_state = PMCS_DEVICE_STATE_OPERATIONAL; 1307 break; 1308 } 1309 1310 if (target == pwp->max_dev) { 1311 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 1312 "Target list full."); 1313 goto smp_init_fail; 1314 } 1315 } 1316 1317 if (!pmcs_assign_device(pwp, tgt)) { 1318 pwp->targets[tgt->target_num] = NULL; 1319 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, tgt, 1320 "%s: pmcs_assign_device failed for target 0x%p", 1321 __func__, (void *)tgt); 1322 goto smp_init_fail; 1323 } 1324 1325 /* 1326 * Update the attached port and target port pm properties 1327 */ 1328 tgt->smpd = smp_sd; 1329 1330 pmcs_unlock_phy(phy); 1331 mutex_exit(&pwp->lock); 1332 1333 tgt->ref_count++; 1334 tgt->dtype = phy->dtype; 1335 mutex_exit(&tgt->statlock); 1336 1337 pmcs_update_phy_pm_props(phy, 0, 0, B_TRUE); 1338 1339 addr = scsi_wwn_to_wwnstr(wwn, ua_form, NULL); 1340 if (smp_device_prop_update_string(smp_sd, SCSI_ADDR_PROP_ATTACHED_PORT, 1341 addr) != DDI_SUCCESS) { 1342 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, "%s: Failed to set " 1343 "prop ("SCSI_ADDR_PROP_ATTACHED_PORT")", __func__); 1344 } 1345 (void) scsi_free_wwnstr(addr); 1346 ddi_prop_free(tgt_port); 1347 return (DDI_SUCCESS); 1348 1349 smp_init_fail: 1350 tgt->phy = NULL; 1351 tgt->target_num = PMCS_INVALID_TARGET_NUM; 1352 phy->target = NULL; 1353 if (!IS_ROOT_PHY(phy)) { 1354 pmcs_dec_phy_ref_count(phy); 1355 } 1356 mutex_exit(&tgt->statlock); 1357 pmcs_unlock_phy(phy); 1358 mutex_exit(&pwp->lock); 1359 ddi_soft_state_bystr_free(iport->tgt_sstate, tgt->unit_address); 1360 ddi_prop_free(tgt_port); 1361 return (DDI_FAILURE); 1362 } 1363 1364 static void 1365 pmcs_smp_free(dev_info_t *self, dev_info_t *child, 1366 smp_hba_tran_t *tran, smp_device_t *smp) 1367 { 1368 _NOTE(ARGUNUSED(tran, smp)); 1369 pmcs_iport_t *iport; 1370 pmcs_hw_t *pwp; 1371 pmcs_xscsi_t *tgt; 1372 pmcs_phy_t *phyp; 1373 char *tgt_port; 1374 1375 iport = ddi_get_soft_state(pmcs_iport_softstate, 1376 ddi_get_instance(self)); 1377 ASSERT(iport); 1378 if (iport == NULL) 1379 return; 1380 1381 pwp = iport->pwp; 1382 if (pwp == NULL) 1383 return; 1384 ASSERT(pwp); 1385 1386 /* Get "target-port" prop from devinfo node */ 1387 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, child, 1388 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, 1389 SCSI_ADDR_PROP_TARGET_PORT, &tgt_port) != DDI_SUCCESS) { 1390 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, "%s: Failed to " 1391 "lookup prop ("SCSI_ADDR_PROP_TARGET_PORT")", __func__); 1392 return; 1393 } 1394 1395 /* Retrieve softstate using unit-address */ 1396 mutex_enter(&pwp->lock); 1397 tgt = ddi_soft_state_bystr_get(iport->tgt_sstate, tgt_port); 1398 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, tgt, "%s: %s (%s)", __func__, 1399 ddi_get_name(child), tgt_port); 1400 ddi_prop_free(tgt_port); 1401 1402 if (tgt == NULL) { 1403 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1404 "%s: tgt softstate not found", __func__); 1405 mutex_exit(&pwp->lock); 1406 return; 1407 } 1408 1409 phyp = tgt->phy; 1410 if (phyp) { 1411 mutex_enter(&phyp->phy_lock); 1412 if (!IS_ROOT_PHY(phyp)) { 1413 pmcs_dec_phy_ref_count(phyp); 1414 } 1415 } 1416 mutex_enter(&tgt->statlock); 1417 1418 if (--tgt->ref_count == 0) { 1419 /* 1420 * Remove this target from our list. The softstate 1421 * will remain, and the device will remain registered 1422 * with the hardware unless/until we're told that the 1423 * device physically went away. 1424 */ 1425 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, tgt, 1426 "Removing target 0x%p (vtgt %d) from target list", 1427 (void *)tgt, tgt->target_num); 1428 pwp->targets[tgt->target_num] = NULL; 1429 tgt->target_num = PMCS_INVALID_TARGET_NUM; 1430 if (phyp) { 1431 phyp->target = NULL; 1432 } 1433 tgt->phy = NULL; 1434 pmcs_destroy_target(tgt); 1435 } else { 1436 mutex_exit(&tgt->statlock); 1437 } 1438 1439 if (phyp) { 1440 mutex_exit(&phyp->phy_lock); 1441 } 1442 mutex_exit(&pwp->lock); 1443 } 1444 1445 static int 1446 pmcs_scsi_quiesce(dev_info_t *dip) 1447 { 1448 pmcs_hw_t *pwp; 1449 int totactive = -1; 1450 pmcs_xscsi_t *xp; 1451 uint16_t target; 1452 1453 if (ddi_get_soft_state(pmcs_iport_softstate, ddi_get_instance(dip))) 1454 return (0); /* iport */ 1455 1456 pwp = ddi_get_soft_state(pmcs_softc_state, ddi_get_instance(dip)); 1457 if (pwp == NULL) { 1458 return (-1); 1459 } 1460 mutex_enter(&pwp->lock); 1461 if (pwp->state != STATE_RUNNING) { 1462 mutex_exit(&pwp->lock); 1463 return (-1); 1464 } 1465 1466 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, "%s called", __func__); 1467 pwp->quiesced = pwp->blocked = 1; 1468 while (totactive) { 1469 totactive = 0; 1470 for (target = 0; target < pwp->max_dev; target++) { 1471 xp = pwp->targets[target]; 1472 if (xp == NULL) { 1473 continue; 1474 } 1475 mutex_enter(&xp->statlock); 1476 if (xp->actv_cnt) { 1477 totactive += xp->actv_cnt; 1478 xp->draining = 1; 1479 } 1480 mutex_exit(&xp->statlock); 1481 } 1482 if (totactive) { 1483 cv_wait(&pwp->drain_cv, &pwp->lock); 1484 } 1485 /* 1486 * The pwp->blocked may have been reset. e.g a SCSI bus reset 1487 */ 1488 pwp->blocked = 1; 1489 } 1490 1491 for (target = 0; target < pwp->max_dev; target++) { 1492 xp = pwp->targets[target]; 1493 if (xp == NULL) { 1494 continue; 1495 } 1496 mutex_enter(&xp->statlock); 1497 xp->draining = 0; 1498 mutex_exit(&xp->statlock); 1499 } 1500 1501 mutex_exit(&pwp->lock); 1502 if (totactive == 0) { 1503 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, xp, 1504 "%s drain complete", __func__); 1505 } 1506 return (0); 1507 } 1508 1509 static int 1510 pmcs_scsi_unquiesce(dev_info_t *dip) 1511 { 1512 pmcs_hw_t *pwp; 1513 1514 if (ddi_get_soft_state(pmcs_iport_softstate, ddi_get_instance(dip))) 1515 return (0); /* iport */ 1516 1517 pwp = ddi_get_soft_state(pmcs_softc_state, ddi_get_instance(dip)); 1518 if (pwp == NULL) { 1519 return (-1); 1520 } 1521 mutex_enter(&pwp->lock); 1522 if (pwp->state != STATE_RUNNING) { 1523 mutex_exit(&pwp->lock); 1524 return (-1); 1525 } 1526 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, "%s called", __func__); 1527 pwp->blocked = pwp->quiesced = 0; 1528 mutex_exit(&pwp->lock); 1529 1530 /* 1531 * Run all pending commands. 1532 */ 1533 pmcs_scsa_wq_run(pwp); 1534 1535 /* 1536 * Complete all completed commands. 1537 * This also unlocks us. 1538 */ 1539 PMCS_CQ_RUN(pwp); 1540 return (0); 1541 } 1542 1543 /* 1544 * Start commands for a particular device 1545 * If the actual start of a command fails, return B_FALSE. Any other result 1546 * is a B_TRUE return. 1547 */ 1548 boolean_t 1549 pmcs_scsa_wq_run_one(pmcs_hw_t *pwp, pmcs_xscsi_t *xp) 1550 { 1551 pmcs_cmd_t *sp; 1552 pmcs_phy_t *phyp; 1553 pmcwork_t *pwrk; 1554 boolean_t run_one, blocked; 1555 int rval; 1556 1557 /* 1558 * First, check to see if we're blocked or resource limited 1559 */ 1560 mutex_enter(&pwp->lock); 1561 blocked = pwp->blocked; 1562 /* 1563 * If resource_limited is set, we're resource constrained and 1564 * we will run only one work request for this target. 1565 */ 1566 run_one = pwp->resource_limited; 1567 mutex_exit(&pwp->lock); 1568 1569 if (blocked) { 1570 /* Queues will get restarted when we get unblocked */ 1571 return (B_TRUE); 1572 } 1573 1574 /* 1575 * Might as well verify the queue is not empty before moving on 1576 */ 1577 mutex_enter(&xp->wqlock); 1578 if (STAILQ_EMPTY(&xp->wq)) { 1579 mutex_exit(&xp->wqlock); 1580 return (B_TRUE); 1581 } 1582 mutex_exit(&xp->wqlock); 1583 1584 /* 1585 * If we're draining or resetting, just reschedule work queue and bail. 1586 */ 1587 mutex_enter(&xp->statlock); 1588 if (xp->draining || xp->resetting || xp->special_running || 1589 xp->special_needed) { 1590 mutex_exit(&xp->statlock); 1591 SCHEDULE_WORK(pwp, PMCS_WORK_RUN_QUEUES); 1592 return (B_TRUE); 1593 } 1594 1595 /* 1596 * Next, check to see if the target is gone. 1597 */ 1598 if (xp->dev_gone) { 1599 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, xp, 1600 "%s: Flushing wait queue for dead tgt 0x%p", __func__, 1601 (void *)xp); 1602 pmcs_flush_target_queues(pwp, xp, PMCS_TGT_WAIT_QUEUE); 1603 mutex_exit(&xp->statlock); 1604 return (B_TRUE); 1605 } 1606 1607 /* 1608 * Increment the PHY's ref_count now so we know it won't go away 1609 * after we drop the target lock. Drop it before returning. If the 1610 * PHY dies, the commands we attempt to send will fail, but at least 1611 * we know we have a real PHY pointer. 1612 */ 1613 phyp = xp->phy; 1614 pmcs_inc_phy_ref_count(phyp); 1615 mutex_exit(&xp->statlock); 1616 1617 mutex_enter(&xp->wqlock); 1618 while ((sp = STAILQ_FIRST(&xp->wq)) != NULL) { 1619 pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_CBACK, phyp); 1620 if (pwrk == NULL) { 1621 mutex_exit(&xp->wqlock); 1622 mutex_enter(&pwp->lock); 1623 if (pwp->resource_limited == 0) { 1624 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1625 "%s: out of work structures", __func__); 1626 } 1627 pwp->resource_limited = 1; 1628 SCHEDULE_WORK(pwp, PMCS_WORK_RUN_QUEUES); 1629 mutex_exit(&pwp->lock); 1630 return (B_FALSE); 1631 } 1632 STAILQ_REMOVE_HEAD(&xp->wq, cmd_next); 1633 mutex_exit(&xp->wqlock); 1634 1635 pwrk->xp = xp; 1636 pwrk->arg = sp; 1637 sp->cmd_tag = pwrk->htag; 1638 pwrk->timer = US2WT(CMD2PKT(sp)->pkt_time * 1000000); 1639 if (pwrk->timer == 0) { 1640 pwrk->timer = US2WT(1000000); 1641 } 1642 1643 pwrk->dtype = xp->dtype; 1644 1645 if (xp->dtype == SAS) { 1646 pwrk->ptr = (void *) pmcs_SAS_done; 1647 if ((rval = pmcs_SAS_run(sp, pwrk)) != 0) { 1648 sp->cmd_tag = NULL; 1649 pmcs_dec_phy_ref_count(phyp); 1650 pmcs_pwork(pwp, pwrk); 1651 SCHEDULE_WORK(pwp, PMCS_WORK_RUN_QUEUES); 1652 if (rval == PMCS_WQ_RUN_FAIL_RES) { 1653 return (B_FALSE); 1654 } else { 1655 return (B_TRUE); 1656 } 1657 } 1658 } else { 1659 ASSERT(xp->dtype == SATA); 1660 pwrk->ptr = (void *) pmcs_SATA_done; 1661 if ((rval = pmcs_SATA_run(sp, pwrk)) != 0) { 1662 sp->cmd_tag = NULL; 1663 pmcs_dec_phy_ref_count(phyp); 1664 pmcs_pwork(pwp, pwrk); 1665 SCHEDULE_WORK(pwp, PMCS_WORK_RUN_QUEUES); 1666 if (rval == PMCS_WQ_RUN_FAIL_RES) { 1667 return (B_FALSE); 1668 } else { 1669 return (B_TRUE); 1670 } 1671 } 1672 } 1673 1674 if (run_one) { 1675 goto wq_out; 1676 } 1677 mutex_enter(&xp->wqlock); 1678 } 1679 1680 mutex_exit(&xp->wqlock); 1681 1682 wq_out: 1683 pmcs_dec_phy_ref_count(phyp); 1684 return (B_TRUE); 1685 } 1686 1687 /* 1688 * Start commands for all devices. 1689 */ 1690 void 1691 pmcs_scsa_wq_run(pmcs_hw_t *pwp) 1692 { 1693 pmcs_xscsi_t *xp; 1694 uint16_t target_start, target; 1695 boolean_t rval = B_TRUE; 1696 1697 mutex_enter(&pwp->lock); 1698 target_start = pwp->last_wq_dev; 1699 target = target_start; 1700 1701 do { 1702 xp = pwp->targets[target]; 1703 if ((xp == NULL) || (STAILQ_EMPTY(&xp->wq))) { 1704 if (++target == pwp->max_dev) { 1705 target = 0; 1706 } 1707 continue; 1708 } 1709 1710 mutex_exit(&pwp->lock); 1711 rval = pmcs_scsa_wq_run_one(pwp, xp); 1712 mutex_enter(&pwp->lock); 1713 1714 if (rval == B_FALSE) { 1715 break; 1716 } 1717 1718 if (++target == pwp->max_dev) { 1719 target = 0; 1720 } 1721 } while (target != target_start); 1722 1723 if (rval) { 1724 /* 1725 * If we were resource limited, but apparently are not now, 1726 * reschedule the work queues anyway. 1727 */ 1728 if (pwp->resource_limited) { 1729 SCHEDULE_WORK(pwp, PMCS_WORK_RUN_QUEUES); 1730 } 1731 pwp->resource_limited = 0; /* Not resource-constrained */ 1732 } else { 1733 /* 1734 * Give everybody a chance, and reschedule to run the queues 1735 * again as long as we're limited. 1736 */ 1737 pwp->resource_limited = 1; 1738 SCHEDULE_WORK(pwp, PMCS_WORK_RUN_QUEUES); 1739 } 1740 1741 pwp->last_wq_dev = target; 1742 mutex_exit(&pwp->lock); 1743 } 1744 1745 /* 1746 * Pull the completion queue, drop the lock and complete all elements. 1747 */ 1748 1749 void 1750 pmcs_scsa_cq_run(void *arg) 1751 { 1752 pmcs_cq_thr_info_t *cqti = (pmcs_cq_thr_info_t *)arg; 1753 pmcs_hw_t *pwp = cqti->cq_pwp; 1754 pmcs_cmd_t *sp, *nxt; 1755 struct scsi_pkt *pkt; 1756 pmcs_xscsi_t *tgt; 1757 pmcs_iocomp_cb_t *ioccb, *ioccb_next; 1758 pmcs_cb_t callback; 1759 1760 DTRACE_PROBE1(pmcs__scsa__cq__run__start, pmcs_cq_thr_info_t *, cqti); 1761 1762 mutex_enter(&pwp->cq_lock); 1763 1764 while (!pwp->cq_info.cq_stop) { 1765 /* 1766 * First, check the I/O completion callback queue. 1767 */ 1768 ioccb = pwp->iocomp_cb_head; 1769 pwp->iocomp_cb_head = NULL; 1770 pwp->iocomp_cb_tail = NULL; 1771 mutex_exit(&pwp->cq_lock); 1772 1773 while (ioccb) { 1774 /* 1775 * Grab the lock on the work structure. The callback 1776 * routine is responsible for clearing it. 1777 */ 1778 mutex_enter(&ioccb->pwrk->lock); 1779 ioccb_next = ioccb->next; 1780 callback = (pmcs_cb_t)ioccb->pwrk->ptr; 1781 (*callback)(pwp, ioccb->pwrk, 1782 (uint32_t *)((void *)ioccb->iomb)); 1783 kmem_cache_free(pwp->iocomp_cb_cache, ioccb); 1784 ioccb = ioccb_next; 1785 } 1786 1787 /* 1788 * Next, run the completion queue 1789 */ 1790 mutex_enter(&pwp->cq_lock); 1791 sp = STAILQ_FIRST(&pwp->cq); 1792 STAILQ_INIT(&pwp->cq); 1793 mutex_exit(&pwp->cq_lock); 1794 1795 DTRACE_PROBE1(pmcs__scsa__cq__run__start__loop, 1796 pmcs_cq_thr_info_t *, cqti); 1797 1798 if (sp && pmcs_check_acc_dma_handle(pwp)) { 1799 ddi_fm_service_impact(pwp->dip, DDI_SERVICE_UNAFFECTED); 1800 } 1801 1802 while (sp) { 1803 nxt = STAILQ_NEXT(sp, cmd_next); 1804 pkt = CMD2PKT(sp); 1805 tgt = sp->cmd_target; 1806 pmcs_prt(pwp, PMCS_PRT_DEBUG3, NULL, tgt, 1807 "%s: calling completion on %p for tgt %p", __func__, 1808 (void *)sp, (void *)tgt); 1809 if (tgt) { 1810 mutex_enter(&tgt->statlock); 1811 ASSERT(tgt->actv_pkts != 0); 1812 tgt->actv_pkts--; 1813 mutex_exit(&tgt->statlock); 1814 } 1815 scsi_hba_pkt_comp(pkt); 1816 sp = nxt; 1817 } 1818 1819 DTRACE_PROBE1(pmcs__scsa__cq__run__end__loop, 1820 pmcs_cq_thr_info_t *, cqti); 1821 1822 /* 1823 * Check if there are more completions to do. If so, and we've 1824 * not been told to stop, skip the wait and cycle through again. 1825 */ 1826 mutex_enter(&pwp->cq_lock); 1827 if ((pwp->iocomp_cb_head == NULL) && STAILQ_EMPTY(&pwp->cq) && 1828 !pwp->cq_info.cq_stop) { 1829 mutex_exit(&pwp->cq_lock); 1830 mutex_enter(&cqti->cq_thr_lock); 1831 cv_wait(&cqti->cq_cv, &cqti->cq_thr_lock); 1832 mutex_exit(&cqti->cq_thr_lock); 1833 mutex_enter(&pwp->cq_lock); 1834 } 1835 } 1836 1837 mutex_exit(&pwp->cq_lock); 1838 DTRACE_PROBE1(pmcs__scsa__cq__run__stop, pmcs_cq_thr_info_t *, cqti); 1839 thread_exit(); 1840 } 1841 1842 /* 1843 * Run a SAS command. Called with pwrk->lock held, returns unlocked. 1844 */ 1845 static int 1846 pmcs_SAS_run(pmcs_cmd_t *sp, pmcwork_t *pwrk) 1847 { 1848 pmcs_hw_t *pwp = CMD2PMC(sp); 1849 struct scsi_pkt *pkt = CMD2PKT(sp); 1850 pmcs_xscsi_t *xp = pwrk->xp; 1851 uint32_t iq, *ptr; 1852 sas_ssp_cmd_iu_t sc; 1853 1854 ASSERT(xp != NULL); 1855 mutex_enter(&xp->statlock); 1856 if (!xp->assigned) { 1857 mutex_exit(&xp->statlock); 1858 return (PMCS_WQ_RUN_FAIL_OTHER); 1859 } 1860 if ((xp->actv_cnt >= xp->qdepth) || xp->recover_wait) { 1861 mutex_exit(&xp->statlock); 1862 mutex_enter(&xp->wqlock); 1863 STAILQ_INSERT_HEAD(&xp->wq, sp, cmd_next); 1864 mutex_exit(&xp->wqlock); 1865 return (PMCS_WQ_RUN_FAIL_OTHER); 1866 } 1867 GET_IO_IQ_ENTRY(pwp, ptr, pwrk->phy->device_id, iq); 1868 if (ptr == NULL) { 1869 mutex_exit(&xp->statlock); 1870 /* 1871 * This is a temporary failure not likely to unblocked by 1872 * commands completing as the test for scheduling the 1873 * restart of work is a per-device test. 1874 */ 1875 mutex_enter(&xp->wqlock); 1876 STAILQ_INSERT_HEAD(&xp->wq, sp, cmd_next); 1877 mutex_exit(&xp->wqlock); 1878 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, xp, 1879 "%s: Failed to get IO IQ entry for tgt %d", 1880 __func__, xp->target_num); 1881 return (PMCS_WQ_RUN_FAIL_RES); 1882 1883 } 1884 1885 ptr[0] = 1886 LE_32(PMCS_IOMB_IN_SAS(PMCS_OQ_IODONE, PMCIN_SSP_INI_IO_START)); 1887 ptr[1] = LE_32(pwrk->htag); 1888 ptr[2] = LE_32(pwrk->phy->device_id); 1889 ptr[3] = LE_32(pkt->pkt_dma_len); 1890 if (ptr[3]) { 1891 ASSERT(pkt->pkt_numcookies); 1892 if (pkt->pkt_dma_flags & DDI_DMA_READ) { 1893 ptr[4] = LE_32(PMCIN_DATADIR_2_INI); 1894 } else { 1895 ptr[4] = LE_32(PMCIN_DATADIR_2_DEV); 1896 } 1897 if (pmcs_dma_load(pwp, sp, ptr)) { 1898 mutex_exit(&pwp->iqp_lock[iq]); 1899 mutex_exit(&xp->statlock); 1900 mutex_enter(&xp->wqlock); 1901 if (STAILQ_EMPTY(&xp->wq)) { 1902 STAILQ_INSERT_HEAD(&xp->wq, sp, cmd_next); 1903 mutex_exit(&xp->wqlock); 1904 } else { 1905 mutex_exit(&xp->wqlock); 1906 CMD2PKT(sp)->pkt_scbp[0] = STATUS_QFULL; 1907 CMD2PKT(sp)->pkt_reason = CMD_CMPLT; 1908 CMD2PKT(sp)->pkt_state |= STATE_GOT_BUS | 1909 STATE_GOT_TARGET | STATE_SENT_CMD | 1910 STATE_GOT_STATUS; 1911 mutex_enter(&pwp->cq_lock); 1912 STAILQ_INSERT_TAIL(&pwp->cq, sp, cmd_next); 1913 PMCS_CQ_RUN_LOCKED(pwp); 1914 mutex_exit(&pwp->cq_lock); 1915 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, xp, 1916 "%s: Failed to dma_load for tgt %d (QF)", 1917 __func__, xp->target_num); 1918 } 1919 return (PMCS_WQ_RUN_FAIL_RES); 1920 } 1921 } else { 1922 ptr[4] = LE_32(PMCIN_DATADIR_NONE); 1923 CLEAN_MESSAGE(ptr, 12); 1924 } 1925 xp->actv_cnt++; 1926 if (xp->actv_cnt > xp->maxdepth) { 1927 xp->maxdepth = xp->actv_cnt; 1928 pmcs_prt(pwp, PMCS_PRT_DEBUG2, pwrk->phy, xp, "%s: max depth " 1929 "now %u", pwrk->phy->path, xp->maxdepth); 1930 } 1931 mutex_exit(&xp->statlock); 1932 1933 1934 #ifdef DEBUG 1935 /* 1936 * Generate a PMCOUT_STATUS_XFER_CMD_FRAME_ISSUED 1937 * event when this goes out on the wire. 1938 */ 1939 ptr[4] |= PMCIN_MESSAGE_REPORT; 1940 #endif 1941 /* 1942 * Fill in the SSP IU 1943 */ 1944 1945 bzero(&sc, sizeof (sas_ssp_cmd_iu_t)); 1946 bcopy((uint8_t *)&sp->cmd_lun->scsi_lun, sc.lun, sizeof (scsi_lun_t)); 1947 1948 switch (pkt->pkt_flags & FLAG_TAGMASK) { 1949 case FLAG_HTAG: 1950 sc.task_attribute = SAS_CMD_TASK_ATTR_HEAD; 1951 break; 1952 case FLAG_OTAG: 1953 sc.task_attribute = SAS_CMD_TASK_ATTR_ORDERED; 1954 break; 1955 case FLAG_STAG: 1956 default: 1957 sc.task_attribute = SAS_CMD_TASK_ATTR_SIMPLE; 1958 break; 1959 } 1960 (void) memcpy(sc.cdb, pkt->pkt_cdbp, 1961 min(SCSA_CDBLEN(sp), sizeof (sc.cdb))); 1962 (void) memcpy(&ptr[5], &sc, sizeof (sas_ssp_cmd_iu_t)); 1963 pwrk->state = PMCS_WORK_STATE_ONCHIP; 1964 mutex_exit(&pwrk->lock); 1965 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, 1966 "%s: giving pkt %p (tag %x) to the hardware", __func__, 1967 (void *)pkt, pwrk->htag); 1968 #ifdef DEBUG 1969 pmcs_print_entry(pwp, PMCS_PRT_DEBUG3, "SAS INI Message", ptr); 1970 #endif 1971 mutex_enter(&xp->aqlock); 1972 STAILQ_INSERT_TAIL(&xp->aq, sp, cmd_next); 1973 mutex_exit(&xp->aqlock); 1974 INC_IQ_ENTRY(pwp, iq); 1975 1976 /* 1977 * If we just submitted the last command queued from device state 1978 * recovery, clear the wq_recovery_tail pointer. 1979 */ 1980 mutex_enter(&xp->wqlock); 1981 if (xp->wq_recovery_tail == sp) { 1982 xp->wq_recovery_tail = NULL; 1983 } 1984 mutex_exit(&xp->wqlock); 1985 1986 return (PMCS_WQ_RUN_SUCCESS); 1987 } 1988 1989 /* 1990 * Complete a SAS command 1991 * 1992 * Called with pwrk lock held. 1993 * The free of pwrk releases the lock. 1994 */ 1995 1996 static void 1997 pmcs_SAS_done(pmcs_hw_t *pwp, pmcwork_t *pwrk, uint32_t *msg) 1998 { 1999 pmcs_cmd_t *sp = pwrk->arg; 2000 pmcs_phy_t *pptr = pwrk->phy; 2001 pmcs_xscsi_t *xp = pwrk->xp; 2002 struct scsi_pkt *pkt = CMD2PKT(sp); 2003 int dead; 2004 uint32_t sts; 2005 boolean_t aborted = B_FALSE; 2006 boolean_t do_ds_recovery = B_FALSE; 2007 2008 ASSERT(xp != NULL); 2009 ASSERT(sp != NULL); 2010 ASSERT(pptr != NULL); 2011 2012 DTRACE_PROBE4(pmcs__io__done, uint64_t, pkt->pkt_dma_len, int, 2013 (pkt->pkt_dma_flags & DDI_DMA_READ) != 0, hrtime_t, pwrk->start, 2014 hrtime_t, gethrtime()); 2015 2016 dead = pwrk->dead; 2017 2018 if (msg) { 2019 sts = LE_32(msg[2]); 2020 } else { 2021 sts = 0; 2022 } 2023 2024 if (dead != 0) { 2025 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, "%s: dead cmd tag " 2026 "0x%x for %s", __func__, pwrk->htag, pptr->path); 2027 goto out; 2028 } 2029 2030 if (sts == PMCOUT_STATUS_ABORTED) { 2031 aborted = B_TRUE; 2032 } 2033 2034 if (pwrk->state == PMCS_WORK_STATE_TIMED_OUT) { 2035 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 2036 "%s: cmd 0x%p (tag 0x%x) timed out for %s", 2037 __func__, (void *)sp, pwrk->htag, pptr->path); 2038 CMD2PKT(sp)->pkt_scbp[0] = STATUS_GOOD; 2039 CMD2PKT(sp)->pkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET | 2040 STATE_SENT_CMD; 2041 CMD2PKT(sp)->pkt_statistics |= STAT_TIMEOUT; 2042 goto out; 2043 } 2044 2045 /* 2046 * If the status isn't okay but not underflow, 2047 * step to the side and parse the (possible) error. 2048 */ 2049 #ifdef DEBUG 2050 if (msg) { 2051 pmcs_print_entry(pwp, PMCS_PRT_DEBUG3, "Outbound Message", msg); 2052 } 2053 #endif 2054 if (!msg) { 2055 goto out; 2056 } 2057 2058 switch (sts) { 2059 case PMCOUT_STATUS_OPEN_CNX_ERROR_IT_NEXUS_LOSS: 2060 case PMCOUT_STATUS_IO_DS_NON_OPERATIONAL: 2061 case PMCOUT_STATUS_IO_DS_IN_RECOVERY: 2062 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 2063 "%s: PHY %s requires DS recovery (status=%d)", 2064 __func__, pptr->path, sts); 2065 do_ds_recovery = B_TRUE; 2066 break; 2067 case PMCOUT_STATUS_UNDERFLOW: 2068 (void) pmcs_set_resid(pkt, pkt->pkt_dma_len, LE_32(msg[3])); 2069 pmcs_prt(pwp, PMCS_PRT_DEBUG_UNDERFLOW, NULL, NULL, 2070 "%s: underflow %u for cdb 0x%x", 2071 __func__, LE_32(msg[3]), pkt->pkt_cdbp[0] & 0xff); 2072 sts = PMCOUT_STATUS_OK; 2073 msg[3] = 0; 2074 break; 2075 case PMCOUT_STATUS_OK: 2076 pkt->pkt_resid = 0; 2077 break; 2078 } 2079 2080 if (sts != PMCOUT_STATUS_OK) { 2081 pmcs_ioerror(pwp, SAS, pwrk, msg, sts); 2082 } else { 2083 if (msg[3]) { 2084 uint8_t local[PMCS_QENTRY_SIZE << 1], *xd; 2085 sas_ssp_rsp_iu_t *rptr = (void *)local; 2086 const int lim = 2087 (PMCS_QENTRY_SIZE << 1) - SAS_RSP_HDR_SIZE; 2088 static const uint8_t ssp_rsp_evec[] = { 2089 0x58, 0x61, 0x56, 0x72, 0x00 2090 }; 2091 2092 /* 2093 * Transform the the first part of the response 2094 * to host canonical form. This gives us enough 2095 * information to figure out what to do with the 2096 * rest (which remains unchanged in the incoming 2097 * message which can be up to two queue entries 2098 * in length). 2099 */ 2100 pmcs_endian_transform(pwp, local, &msg[5], 2101 ssp_rsp_evec); 2102 xd = (uint8_t *)(&msg[5]); 2103 xd += SAS_RSP_HDR_SIZE; 2104 2105 if (rptr->datapres == SAS_RSP_DATAPRES_RESPONSE_DATA) { 2106 if (rptr->response_data_length != 4) { 2107 pmcs_print_entry(pwp, PMCS_PRT_DEBUG, 2108 "Bad SAS RESPONSE DATA LENGTH", 2109 msg); 2110 pkt->pkt_reason = CMD_TRAN_ERR; 2111 goto out; 2112 } 2113 (void) memcpy(&sts, xd, sizeof (uint32_t)); 2114 sts = BE_32(sts); 2115 /* 2116 * The only response code we should legally get 2117 * here is an INVALID FRAME response code. 2118 */ 2119 if (sts == SAS_RSP_INVALID_FRAME) { 2120 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 2121 "%s: pkt %p tgt %u path %s " 2122 "completed: INVALID FRAME response", 2123 __func__, (void *)pkt, 2124 xp->target_num, pptr->path); 2125 } else { 2126 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 2127 "%s: pkt %p tgt %u path %s " 2128 "completed: illegal response 0x%x", 2129 __func__, (void *)pkt, 2130 xp->target_num, pptr->path, sts); 2131 } 2132 pkt->pkt_reason = CMD_TRAN_ERR; 2133 goto out; 2134 } 2135 if (rptr->datapres == SAS_RSP_DATAPRES_SENSE_DATA) { 2136 uint32_t slen; 2137 slen = rptr->sense_data_length; 2138 if (slen > lim) { 2139 slen = lim; 2140 } 2141 pmcs_latch_status(pwp, sp, rptr->status, xd, 2142 slen, pptr->path); 2143 } else if (rptr->datapres == SAS_RSP_DATAPRES_NO_DATA) { 2144 pmcout_ssp_comp_t *sspcp; 2145 sspcp = (pmcout_ssp_comp_t *)msg; 2146 uint32_t *residp; 2147 /* 2148 * This is the case for a plain SCSI status. 2149 * Note: If RESC_V is set and we're here, there 2150 * is a residual. We need to find it and update 2151 * the packet accordingly. 2152 */ 2153 pmcs_latch_status(pwp, sp, rptr->status, NULL, 2154 0, pptr->path); 2155 2156 if (sspcp->resc_v) { 2157 /* 2158 * Point residual to the SSP_RESP_IU 2159 */ 2160 residp = (uint32_t *)(sspcp + 1); 2161 /* 2162 * param contains the number of bytes 2163 * between where the SSP_RESP_IU may 2164 * or may not be and the residual. 2165 * Increment residp by the appropriate 2166 * number of words: (param+resc_pad)/4). 2167 */ 2168 residp += (LE_32(sspcp->param) + 2169 sspcp->resc_pad) / 2170 sizeof (uint32_t); 2171 pmcs_prt(pwp, PMCS_PRT_DEBUG_UNDERFLOW, 2172 pptr, xp, "%s: tgt 0x%p " 2173 "residual %d for pkt 0x%p", 2174 __func__, (void *) xp, *residp, 2175 (void *) pkt); 2176 ASSERT(LE_32(*residp) <= 2177 pkt->pkt_dma_len); 2178 (void) pmcs_set_resid(pkt, 2179 pkt->pkt_dma_len, LE_32(*residp)); 2180 } 2181 } else { 2182 pmcs_print_entry(pwp, PMCS_PRT_DEBUG, 2183 "illegal SAS response", msg); 2184 pkt->pkt_reason = CMD_TRAN_ERR; 2185 goto out; 2186 } 2187 } else { 2188 pmcs_latch_status(pwp, sp, STATUS_GOOD, NULL, 0, 2189 pptr->path); 2190 } 2191 if (pkt->pkt_dma_len) { 2192 pkt->pkt_state |= STATE_XFERRED_DATA; 2193 } 2194 } 2195 pmcs_prt(pwp, PMCS_PRT_DEBUG2, pptr, xp, 2196 "%s: pkt %p tgt %u done reason=%x state=%x resid=%ld status=%x", 2197 __func__, (void *)pkt, xp->target_num, pkt->pkt_reason, 2198 pkt->pkt_state, pkt->pkt_resid, pkt->pkt_scbp[0]); 2199 2200 if (pwrk->state == PMCS_WORK_STATE_ABORTED) { 2201 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 2202 "%s: scsi_pkt 0x%p aborted for PHY %s; work = 0x%p", 2203 __func__, (void *)pkt, pptr->path, (void *)pwrk); 2204 aborted = B_TRUE; 2205 } 2206 2207 out: 2208 pmcs_dma_unload(pwp, sp); 2209 mutex_enter(&xp->statlock); 2210 2211 /* 2212 * If the device no longer has a PHY pointer, clear the PHY pointer 2213 * from the work structure before we free it. Otherwise, pmcs_pwork 2214 * may decrement the ref_count on a PHY that's been freed. 2215 */ 2216 if (xp->phy == NULL) { 2217 pwrk->phy = NULL; 2218 } 2219 2220 /* 2221 * We may arrive here due to a command timing out, which in turn 2222 * could be addressed in a different context. So, free the work 2223 * back, but only after confirming it's not already been freed 2224 * elsewhere. 2225 */ 2226 if (pwrk->htag != PMCS_TAG_FREE) { 2227 pmcs_pwork(pwp, pwrk); 2228 } 2229 2230 /* 2231 * If the device is gone, we only put this command on the completion 2232 * queue if the work structure is not marked dead. If it's marked 2233 * dead, it will already have been put there. 2234 */ 2235 if (xp->dev_gone) { 2236 mutex_exit(&xp->statlock); 2237 if (!dead) { 2238 mutex_enter(&xp->aqlock); 2239 STAILQ_REMOVE(&xp->aq, sp, pmcs_cmd, cmd_next); 2240 mutex_exit(&xp->aqlock); 2241 pmcs_prt(pwp, PMCS_PRT_DEBUG3, pptr, xp, 2242 "%s: Removing cmd 0x%p (htag 0x%x) from aq", 2243 __func__, (void *)sp, sp->cmd_tag); 2244 mutex_enter(&pwp->cq_lock); 2245 STAILQ_INSERT_TAIL(&pwp->cq, sp, cmd_next); 2246 PMCS_CQ_RUN_LOCKED(pwp); 2247 mutex_exit(&pwp->cq_lock); 2248 pmcs_prt(pwp, PMCS_PRT_DEBUG2, pptr, xp, 2249 "%s: Completing command for dead target 0x%p", 2250 __func__, (void *)xp); 2251 } 2252 return; 2253 } 2254 2255 ASSERT(xp->actv_cnt > 0); 2256 if (--(xp->actv_cnt) == 0) { 2257 if (xp->draining) { 2258 pmcs_prt(pwp, PMCS_PRT_DEBUG1, pptr, xp, 2259 "%s: waking up drain waiters", __func__); 2260 cv_signal(&pwp->drain_cv); 2261 } 2262 } 2263 mutex_exit(&xp->statlock); 2264 2265 /* 2266 * If the status is other than OK, determine if it's something that 2267 * is worth re-attempting enumeration. If so, mark the PHY. 2268 */ 2269 if (sts != PMCOUT_STATUS_OK) { 2270 pmcs_status_disposition(pptr, sts); 2271 } 2272 2273 if (dead == 0) { 2274 #ifdef DEBUG 2275 pmcs_cmd_t *wp; 2276 mutex_enter(&xp->aqlock); 2277 STAILQ_FOREACH(wp, &xp->aq, cmd_next) { 2278 if (wp == sp) { 2279 break; 2280 } 2281 } 2282 ASSERT(wp != NULL); 2283 #else 2284 mutex_enter(&xp->aqlock); 2285 #endif 2286 pmcs_prt(pwp, PMCS_PRT_DEBUG3, pptr, xp, 2287 "%s: Removing cmd 0x%p (htag 0x%x) from aq", __func__, 2288 (void *)sp, sp->cmd_tag); 2289 STAILQ_REMOVE(&xp->aq, sp, pmcs_cmd, cmd_next); 2290 if (aborted) { 2291 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 2292 "%s: Aborted cmd for tgt 0x%p, signaling waiters", 2293 __func__, (void *)xp); 2294 cv_signal(&xp->abort_cv); 2295 } 2296 mutex_exit(&xp->aqlock); 2297 } 2298 2299 /* 2300 * If do_ds_recovery is set, we need to initiate device state 2301 * recovery. In this case, we put this I/O back on the head of 2302 * the wait queue to run again after recovery is complete 2303 */ 2304 if (do_ds_recovery) { 2305 mutex_enter(&xp->statlock); 2306 pmcs_start_dev_state_recovery(xp, pptr); 2307 mutex_exit(&xp->statlock); 2308 pmcs_prt(pwp, PMCS_PRT_DEBUG1, pptr, xp, "%s: Putting cmd 0x%p " 2309 "back on wq during recovery for tgt 0x%p", __func__, 2310 (void *)sp, (void *)xp); 2311 mutex_enter(&xp->wqlock); 2312 if (xp->wq_recovery_tail == NULL) { 2313 STAILQ_INSERT_HEAD(&xp->wq, sp, cmd_next); 2314 } else { 2315 /* 2316 * If there are other I/Os waiting at the head due to 2317 * device state recovery, add this one in the right spot 2318 * to maintain proper order. 2319 */ 2320 STAILQ_INSERT_AFTER(&xp->wq, xp->wq_recovery_tail, sp, 2321 cmd_next); 2322 } 2323 xp->wq_recovery_tail = sp; 2324 mutex_exit(&xp->wqlock); 2325 } else { 2326 /* 2327 * If we're not initiating device state recovery and this 2328 * command was not "dead", put it on the completion queue 2329 */ 2330 if (!dead) { 2331 mutex_enter(&pwp->cq_lock); 2332 STAILQ_INSERT_TAIL(&pwp->cq, sp, cmd_next); 2333 PMCS_CQ_RUN_LOCKED(pwp); 2334 mutex_exit(&pwp->cq_lock); 2335 } 2336 } 2337 } 2338 2339 /* 2340 * Run a SATA command (normal reads and writes), 2341 * or block and schedule a SATL interpretation 2342 * of the command. 2343 * 2344 * Called with pwrk lock held, returns unlocked. 2345 */ 2346 2347 static int 2348 pmcs_SATA_run(pmcs_cmd_t *sp, pmcwork_t *pwrk) 2349 { 2350 pmcs_hw_t *pwp = CMD2PMC(sp); 2351 struct scsi_pkt *pkt = CMD2PKT(sp); 2352 pmcs_xscsi_t *xp; 2353 uint8_t cdb_base, asc, tag; 2354 uint32_t *ptr, iq, nblk, i, mtype; 2355 fis_t fis; 2356 size_t amt; 2357 uint64_t lba; 2358 2359 xp = pwrk->xp; 2360 ASSERT(xp != NULL); 2361 2362 /* 2363 * First, see if this is just a plain read/write command. 2364 * If not, we have to queue it up for processing, block 2365 * any additional commands from coming in, and wake up 2366 * the thread that will process this command. 2367 */ 2368 cdb_base = pkt->pkt_cdbp[0] & 0x1f; 2369 if (cdb_base != SCMD_READ && cdb_base != SCMD_WRITE) { 2370 pmcs_prt(pwp, PMCS_PRT_DEBUG1, NULL, NULL, 2371 "%s: special SATA cmd %p", __func__, (void *)sp); 2372 2373 ASSERT(xp->phy != NULL); 2374 pmcs_pwork(pwp, pwrk); 2375 pmcs_lock_phy(xp->phy); 2376 mutex_enter(&xp->statlock); 2377 xp->special_needed = 1; /* Set the special_needed flag */ 2378 STAILQ_INSERT_TAIL(&xp->sq, sp, cmd_next); 2379 if (pmcs_run_sata_special(pwp, xp)) { 2380 SCHEDULE_WORK(pwp, PMCS_WORK_SATA_RUN); 2381 } 2382 mutex_exit(&xp->statlock); 2383 pmcs_unlock_phy(xp->phy); 2384 2385 return (PMCS_WQ_RUN_SUCCESS); 2386 } 2387 2388 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "%s: regular cmd", __func__); 2389 2390 mutex_enter(&xp->statlock); 2391 if (!xp->assigned) { 2392 mutex_exit(&xp->statlock); 2393 return (PMCS_WQ_RUN_FAIL_OTHER); 2394 } 2395 if (xp->special_running || xp->special_needed || xp->recover_wait) { 2396 mutex_exit(&xp->statlock); 2397 mutex_enter(&xp->wqlock); 2398 STAILQ_INSERT_HEAD(&xp->wq, sp, cmd_next); 2399 mutex_exit(&xp->wqlock); 2400 /* 2401 * By the time we get here the special 2402 * commands running or waiting to be run 2403 * may have come and gone, so kick our 2404 * worker to run the waiting queues 2405 * just in case. 2406 */ 2407 SCHEDULE_WORK(pwp, PMCS_WORK_RUN_QUEUES); 2408 return (PMCS_WQ_RUN_FAIL_OTHER); 2409 } 2410 lba = xp->capacity; 2411 mutex_exit(&xp->statlock); 2412 2413 /* 2414 * Extract data length and lba parameters out of the command. The 2415 * function pmcs_SATA_rwparm returns a non-zero ASC value if the CDB 2416 * values are considered illegal. 2417 */ 2418 asc = pmcs_SATA_rwparm(pkt->pkt_cdbp, &nblk, &lba, lba); 2419 if (asc) { 2420 uint8_t sns[18]; 2421 bzero(sns, sizeof (sns)); 2422 sns[0] = 0xf0; 2423 sns[2] = 0x5; 2424 sns[12] = asc; 2425 pmcs_latch_status(pwp, sp, STATUS_CHECK, sns, sizeof (sns), 2426 pwrk->phy->path); 2427 pmcs_pwork(pwp, pwrk); 2428 mutex_enter(&pwp->cq_lock); 2429 STAILQ_INSERT_TAIL(&pwp->cq, sp, cmd_next); 2430 PMCS_CQ_RUN_LOCKED(pwp); 2431 mutex_exit(&pwp->cq_lock); 2432 return (PMCS_WQ_RUN_SUCCESS); 2433 } 2434 2435 /* 2436 * If the command decodes as not moving any data, complete it here. 2437 */ 2438 amt = nblk; 2439 amt <<= 9; 2440 amt = pmcs_set_resid(pkt, amt, nblk << 9); 2441 if (amt == 0) { 2442 pmcs_latch_status(pwp, sp, STATUS_GOOD, NULL, 0, 2443 pwrk->phy->path); 2444 pmcs_pwork(pwp, pwrk); 2445 mutex_enter(&pwp->cq_lock); 2446 STAILQ_INSERT_TAIL(&pwp->cq, sp, cmd_next); 2447 PMCS_CQ_RUN_LOCKED(pwp); 2448 mutex_exit(&pwp->cq_lock); 2449 return (PMCS_WQ_RUN_SUCCESS); 2450 } 2451 2452 /* 2453 * Get an inbound queue entry for this I/O 2454 */ 2455 GET_IO_IQ_ENTRY(pwp, ptr, xp->phy->device_id, iq); 2456 if (ptr == NULL) { 2457 /* 2458 * This is a temporary failure not likely to unblocked by 2459 * commands completing as the test for scheduling the 2460 * restart of work is a per-device test. 2461 */ 2462 mutex_enter(&xp->wqlock); 2463 STAILQ_INSERT_HEAD(&xp->wq, sp, cmd_next); 2464 mutex_exit(&xp->wqlock); 2465 pmcs_dma_unload(pwp, sp); 2466 SCHEDULE_WORK(pwp, PMCS_WORK_RUN_QUEUES); 2467 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, xp, 2468 "%s: Failed to get IO IQ entry for tgt %d", 2469 __func__, xp->target_num); 2470 return (PMCS_WQ_RUN_FAIL_RES); 2471 } 2472 2473 /* 2474 * Get a tag. At this point, hold statlock until the tagmap is 2475 * updated (just prior to sending the cmd to the hardware). 2476 */ 2477 mutex_enter(&xp->statlock); 2478 for (tag = 0; tag < xp->qdepth; tag++) { 2479 if ((xp->tagmap & (1 << tag)) == 0) { 2480 break; 2481 } 2482 } 2483 2484 if (tag == xp->qdepth) { 2485 mutex_exit(&xp->statlock); 2486 mutex_exit(&pwp->iqp_lock[iq]); 2487 mutex_enter(&xp->wqlock); 2488 STAILQ_INSERT_HEAD(&xp->wq, sp, cmd_next); 2489 mutex_exit(&xp->wqlock); 2490 return (PMCS_WQ_RUN_FAIL_OTHER); 2491 } 2492 2493 sp->cmd_satltag = (uint8_t)tag; 2494 2495 /* 2496 * Set up the command 2497 */ 2498 bzero(fis, sizeof (fis)); 2499 ptr[0] = 2500 LE_32(PMCS_IOMB_IN_SAS(PMCS_OQ_IODONE, PMCIN_SATA_HOST_IO_START)); 2501 ptr[1] = LE_32(pwrk->htag); 2502 ptr[2] = LE_32(pwrk->phy->device_id); 2503 ptr[3] = LE_32(amt); 2504 2505 if (xp->ncq) { 2506 mtype = SATA_PROTOCOL_FPDMA | (tag << 16); 2507 fis[0] = ((nblk & 0xff) << 24) | (C_BIT << 8) | FIS_REG_H2DEV; 2508 if (cdb_base == SCMD_READ) { 2509 fis[0] |= (READ_FPDMA_QUEUED << 16); 2510 } else { 2511 fis[0] |= (WRITE_FPDMA_QUEUED << 16); 2512 } 2513 fis[1] = (FEATURE_LBA << 24) | (lba & 0xffffff); 2514 fis[2] = ((nblk & 0xff00) << 16) | ((lba >> 24) & 0xffffff); 2515 fis[3] = tag << 3; 2516 } else { 2517 int op; 2518 fis[0] = (C_BIT << 8) | FIS_REG_H2DEV; 2519 if (xp->pio) { 2520 mtype = SATA_PROTOCOL_PIO; 2521 if (cdb_base == SCMD_READ) { 2522 op = READ_SECTORS_EXT; 2523 } else { 2524 op = WRITE_SECTORS_EXT; 2525 } 2526 } else { 2527 mtype = SATA_PROTOCOL_DMA; 2528 if (cdb_base == SCMD_READ) { 2529 op = READ_DMA_EXT; 2530 } else { 2531 op = WRITE_DMA_EXT; 2532 } 2533 } 2534 fis[0] |= (op << 16); 2535 fis[1] = (FEATURE_LBA << 24) | (lba & 0xffffff); 2536 fis[2] = (lba >> 24) & 0xffffff; 2537 fis[3] = nblk; 2538 } 2539 2540 if (cdb_base == SCMD_READ) { 2541 ptr[4] = LE_32(mtype | PMCIN_DATADIR_2_INI); 2542 } else { 2543 ptr[4] = LE_32(mtype | PMCIN_DATADIR_2_DEV); 2544 } 2545 #ifdef DEBUG 2546 /* 2547 * Generate a PMCOUT_STATUS_XFER_CMD_FRAME_ISSUED 2548 * event when this goes out on the wire. 2549 */ 2550 ptr[4] |= PMCIN_MESSAGE_REPORT; 2551 #endif 2552 for (i = 0; i < (sizeof (fis_t))/(sizeof (uint32_t)); i++) { 2553 ptr[i+5] = LE_32(fis[i]); 2554 } 2555 if (pmcs_dma_load(pwp, sp, ptr)) { 2556 mutex_exit(&xp->statlock); 2557 mutex_exit(&pwp->iqp_lock[iq]); 2558 mutex_enter(&xp->wqlock); 2559 STAILQ_INSERT_HEAD(&xp->wq, sp, cmd_next); 2560 mutex_exit(&xp->wqlock); 2561 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, xp, 2562 "%s: Failed to dma_load for tgt %d", 2563 __func__, xp->target_num); 2564 return (PMCS_WQ_RUN_FAIL_RES); 2565 2566 } 2567 2568 pwrk->state = PMCS_WORK_STATE_ONCHIP; 2569 mutex_exit(&pwrk->lock); 2570 xp->tagmap |= (1 << tag); 2571 xp->actv_cnt++; 2572 if (xp->actv_cnt > xp->maxdepth) { 2573 xp->maxdepth = xp->actv_cnt; 2574 pmcs_prt(pwp, PMCS_PRT_DEBUG2, pwrk->phy, xp, 2575 "%s: max depth now %u", pwrk->phy->path, xp->maxdepth); 2576 } 2577 mutex_exit(&xp->statlock); 2578 mutex_enter(&xp->aqlock); 2579 STAILQ_INSERT_TAIL(&xp->aq, sp, cmd_next); 2580 mutex_exit(&xp->aqlock); 2581 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, 2582 "%s: giving pkt %p to hardware", __func__, (void *)pkt); 2583 #ifdef DEBUG 2584 pmcs_print_entry(pwp, PMCS_PRT_DEBUG3, "SATA INI Message", ptr); 2585 #endif 2586 INC_IQ_ENTRY(pwp, iq); 2587 2588 return (PMCS_WQ_RUN_SUCCESS); 2589 } 2590 2591 /* 2592 * Complete a SATA command. Called with pwrk lock held. 2593 */ 2594 void 2595 pmcs_SATA_done(pmcs_hw_t *pwp, pmcwork_t *pwrk, uint32_t *msg) 2596 { 2597 pmcs_cmd_t *sp = pwrk->arg; 2598 struct scsi_pkt *pkt = CMD2PKT(sp); 2599 pmcs_phy_t *pptr = pwrk->phy; 2600 int dead; 2601 uint32_t sts; 2602 pmcs_xscsi_t *xp; 2603 boolean_t aborted = B_FALSE; 2604 2605 xp = pwrk->xp; 2606 ASSERT(xp != NULL); 2607 2608 DTRACE_PROBE4(pmcs__io__done, uint64_t, pkt->pkt_dma_len, int, 2609 (pkt->pkt_dma_flags & DDI_DMA_READ) != 0, hrtime_t, pwrk->start, 2610 hrtime_t, gethrtime()); 2611 2612 dead = pwrk->dead; 2613 2614 if (msg) { 2615 sts = LE_32(msg[2]); 2616 } else { 2617 sts = 0; 2618 } 2619 2620 if (dead != 0) { 2621 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, "%s: dead cmd tag " 2622 "0x%x for %s", __func__, pwrk->htag, pptr->path); 2623 goto out; 2624 } 2625 if ((pwrk->state == PMCS_WORK_STATE_TIMED_OUT) && 2626 (sts != PMCOUT_STATUS_ABORTED)) { 2627 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 2628 "%s: cmd 0x%p (tag 0x%x) timed out for %s", 2629 __func__, (void *)sp, pwrk->htag, pptr->path); 2630 CMD2PKT(sp)->pkt_scbp[0] = STATUS_GOOD; 2631 /* pkt_reason already set to CMD_TIMEOUT */ 2632 ASSERT(CMD2PKT(sp)->pkt_reason == CMD_TIMEOUT); 2633 CMD2PKT(sp)->pkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET | 2634 STATE_SENT_CMD; 2635 CMD2PKT(sp)->pkt_statistics |= STAT_TIMEOUT; 2636 goto out; 2637 } 2638 2639 pmcs_prt(pwp, PMCS_PRT_DEBUG2, pptr, xp, "%s: pkt %p tgt %u done", 2640 __func__, (void *)pkt, xp->target_num); 2641 2642 /* 2643 * If the status isn't okay but not underflow, 2644 * step to the side and parse the (possible) error. 2645 */ 2646 #ifdef DEBUG 2647 if (msg) { 2648 pmcs_print_entry(pwp, PMCS_PRT_DEBUG3, "Outbound Message", msg); 2649 } 2650 #endif 2651 if (!msg) { 2652 goto out; 2653 } 2654 2655 /* 2656 * If the status isn't okay or we got a FIS response of some kind, 2657 * step to the side and parse the (possible) error. 2658 */ 2659 if ((sts != PMCOUT_STATUS_OK) || (LE_32(msg[3]) != 0)) { 2660 if (sts == PMCOUT_STATUS_IO_DS_NON_OPERATIONAL) { 2661 mutex_exit(&pwrk->lock); 2662 pmcs_lock_phy(pptr); 2663 mutex_enter(&xp->statlock); 2664 if ((xp->resetting == 0) && (xp->reset_success != 0) && 2665 (xp->reset_wait == 0)) { 2666 mutex_exit(&xp->statlock); 2667 if (pmcs_reset_phy(pwp, pptr, 2668 PMCS_PHYOP_LINK_RESET) != 0) { 2669 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 2670 "%s: PHY (%s) Local Control/Link " 2671 "Reset FAILED as part of error " 2672 "recovery", __func__, pptr->path); 2673 } 2674 mutex_enter(&xp->statlock); 2675 } 2676 mutex_exit(&xp->statlock); 2677 pmcs_unlock_phy(pptr); 2678 mutex_enter(&pwrk->lock); 2679 } 2680 pmcs_ioerror(pwp, SATA, pwrk, msg, sts); 2681 } else { 2682 pmcs_latch_status(pwp, sp, STATUS_GOOD, NULL, 0, 2683 pwrk->phy->path); 2684 pkt->pkt_state |= STATE_XFERRED_DATA; 2685 pkt->pkt_resid = 0; 2686 } 2687 2688 pmcs_prt(pwp, PMCS_PRT_DEBUG2, pptr, xp, 2689 "%s: pkt %p tgt %u done reason=%x state=%x resid=%ld status=%x", 2690 __func__, (void *)pkt, xp->target_num, pkt->pkt_reason, 2691 pkt->pkt_state, pkt->pkt_resid, pkt->pkt_scbp[0]); 2692 2693 if (pwrk->state == PMCS_WORK_STATE_ABORTED) { 2694 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 2695 "%s: scsi_pkt 0x%p aborted for PHY %s; work = 0x%p", 2696 __func__, (void *)pkt, pptr->path, (void *)pwrk); 2697 aborted = B_TRUE; 2698 } 2699 2700 out: 2701 pmcs_dma_unload(pwp, sp); 2702 mutex_enter(&xp->statlock); 2703 xp->tagmap &= ~(1 << sp->cmd_satltag); 2704 2705 /* 2706 * If the device no longer has a PHY pointer, clear the PHY pointer 2707 * from the work structure before we free it. Otherwise, pmcs_pwork 2708 * may decrement the ref_count on a PHY that's been freed. 2709 */ 2710 if (xp->phy == NULL) { 2711 pwrk->phy = NULL; 2712 } 2713 2714 /* 2715 * We may arrive here due to a command timing out, which in turn 2716 * could be addressed in a different context. So, free the work 2717 * back, but only after confirming it's not already been freed 2718 * elsewhere. 2719 */ 2720 if (pwrk->htag != PMCS_TAG_FREE) { 2721 pmcs_pwork(pwp, pwrk); 2722 } 2723 2724 if (xp->dev_gone) { 2725 mutex_exit(&xp->statlock); 2726 if (!dead) { 2727 mutex_enter(&xp->aqlock); 2728 STAILQ_REMOVE(&xp->aq, sp, pmcs_cmd, cmd_next); 2729 mutex_exit(&xp->aqlock); 2730 pmcs_prt(pwp, PMCS_PRT_DEBUG3, pptr, xp, 2731 "%s: Removing cmd 0x%p (htag 0x%x) from aq", 2732 __func__, (void *)sp, sp->cmd_tag); 2733 mutex_enter(&pwp->cq_lock); 2734 STAILQ_INSERT_TAIL(&pwp->cq, sp, cmd_next); 2735 PMCS_CQ_RUN_LOCKED(pwp); 2736 mutex_exit(&pwp->cq_lock); 2737 pmcs_prt(pwp, PMCS_PRT_DEBUG2, pptr, xp, 2738 "%s: Completing command for dead target 0x%p", 2739 __func__, (void *)xp); 2740 } 2741 return; 2742 } 2743 2744 ASSERT(xp->actv_cnt > 0); 2745 if (--(xp->actv_cnt) == 0) { 2746 if (xp->draining) { 2747 pmcs_prt(pwp, PMCS_PRT_DEBUG1, pptr, xp, 2748 "%s: waking up drain waiters", __func__); 2749 cv_signal(&pwp->drain_cv); 2750 } else if (xp->special_needed) { 2751 SCHEDULE_WORK(pwp, PMCS_WORK_SATA_RUN); 2752 } 2753 } 2754 mutex_exit(&xp->statlock); 2755 2756 /* 2757 * If the status is other than OK, determine if it's something that 2758 * is worth re-attempting enumeration. If so, mark the PHY. 2759 */ 2760 if (sts != PMCOUT_STATUS_OK) { 2761 pmcs_status_disposition(pptr, sts); 2762 } 2763 2764 if (dead == 0) { 2765 #ifdef DEBUG 2766 pmcs_cmd_t *wp; 2767 mutex_enter(&xp->aqlock); 2768 STAILQ_FOREACH(wp, &xp->aq, cmd_next) { 2769 if (wp == sp) { 2770 break; 2771 } 2772 } 2773 ASSERT(wp != NULL); 2774 #else 2775 mutex_enter(&xp->aqlock); 2776 #endif 2777 STAILQ_REMOVE(&xp->aq, sp, pmcs_cmd, cmd_next); 2778 if (aborted) { 2779 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 2780 "%s: Aborted cmd for tgt 0x%p, signaling waiters", 2781 __func__, (void *)xp); 2782 cv_signal(&xp->abort_cv); 2783 } 2784 mutex_exit(&xp->aqlock); 2785 mutex_enter(&pwp->cq_lock); 2786 STAILQ_INSERT_TAIL(&pwp->cq, sp, cmd_next); 2787 PMCS_CQ_RUN_LOCKED(pwp); 2788 mutex_exit(&pwp->cq_lock); 2789 } 2790 } 2791 2792 static uint8_t 2793 pmcs_SATA_rwparm(uint8_t *cdb, uint32_t *xfr, uint64_t *lba, uint64_t lbamax) 2794 { 2795 uint8_t asc = 0; 2796 switch (cdb[0]) { 2797 case SCMD_READ_G5: 2798 case SCMD_WRITE_G5: 2799 *xfr = 2800 (((uint32_t)cdb[10]) << 24) | 2801 (((uint32_t)cdb[11]) << 16) | 2802 (((uint32_t)cdb[12]) << 8) | 2803 ((uint32_t)cdb[13]); 2804 *lba = 2805 (((uint64_t)cdb[2]) << 56) | 2806 (((uint64_t)cdb[3]) << 48) | 2807 (((uint64_t)cdb[4]) << 40) | 2808 (((uint64_t)cdb[5]) << 32) | 2809 (((uint64_t)cdb[6]) << 24) | 2810 (((uint64_t)cdb[7]) << 16) | 2811 (((uint64_t)cdb[8]) << 8) | 2812 ((uint64_t)cdb[9]); 2813 /* Check for illegal bits */ 2814 if (cdb[15]) { 2815 asc = 0x24; /* invalid field in cdb */ 2816 } 2817 break; 2818 case SCMD_READ_G4: 2819 case SCMD_WRITE_G4: 2820 *xfr = 2821 (((uint32_t)cdb[6]) << 16) | 2822 (((uint32_t)cdb[7]) << 8) | 2823 ((uint32_t)cdb[8]); 2824 *lba = 2825 (((uint32_t)cdb[2]) << 24) | 2826 (((uint32_t)cdb[3]) << 16) | 2827 (((uint32_t)cdb[4]) << 8) | 2828 ((uint32_t)cdb[5]); 2829 /* Check for illegal bits */ 2830 if (cdb[11]) { 2831 asc = 0x24; /* invalid field in cdb */ 2832 } 2833 break; 2834 case SCMD_READ_G1: 2835 case SCMD_WRITE_G1: 2836 *xfr = (((uint32_t)cdb[7]) << 8) | ((uint32_t)cdb[8]); 2837 *lba = 2838 (((uint32_t)cdb[2]) << 24) | 2839 (((uint32_t)cdb[3]) << 16) | 2840 (((uint32_t)cdb[4]) << 8) | 2841 ((uint32_t)cdb[5]); 2842 /* Check for illegal bits */ 2843 if (cdb[9]) { 2844 asc = 0x24; /* invalid field in cdb */ 2845 } 2846 break; 2847 case SCMD_READ: 2848 case SCMD_WRITE: 2849 *xfr = cdb[4]; 2850 if (*xfr == 0) { 2851 *xfr = 256; 2852 } 2853 *lba = 2854 (((uint32_t)cdb[1] & 0x1f) << 16) | 2855 (((uint32_t)cdb[2]) << 8) | 2856 ((uint32_t)cdb[3]); 2857 /* Check for illegal bits */ 2858 if (cdb[5]) { 2859 asc = 0x24; /* invalid field in cdb */ 2860 } 2861 break; 2862 } 2863 2864 if (asc == 0) { 2865 if ((*lba + *xfr) > lbamax) { 2866 asc = 0x21; /* logical block out of range */ 2867 } 2868 } 2869 return (asc); 2870 } 2871 2872 /* 2873 * Called with pwrk lock held. 2874 */ 2875 static void 2876 pmcs_ioerror(pmcs_hw_t *pwp, pmcs_dtype_t t, pmcwork_t *pwrk, uint32_t *w, 2877 uint32_t status) 2878 { 2879 static uint8_t por[] = { 2880 0xf0, 0x0, 0x6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x28 2881 }; 2882 static uint8_t parity[] = { 2883 0xf0, 0x0, 0xb, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x47, 5 2884 }; 2885 const char *msg; 2886 char buf[20]; 2887 pmcs_cmd_t *sp = pwrk->arg; 2888 pmcs_phy_t *phyp = pwrk->phy; 2889 struct scsi_pkt *pkt = CMD2PKT(sp); 2890 uint32_t resid; 2891 2892 ASSERT(w != NULL); 2893 resid = LE_32(w[3]); 2894 2895 msg = pmcs_status_str(status); 2896 if (msg == NULL) { 2897 (void) snprintf(buf, sizeof (buf), "Error 0x%x", status); 2898 msg = buf; 2899 } 2900 2901 if (status != PMCOUT_STATUS_OK) { 2902 pmcs_prt(pwp, PMCS_PRT_DEBUG1, phyp, NULL, 2903 "%s: device %s tag 0x%x status %s @ %llu", __func__, 2904 phyp->path, pwrk->htag, msg, 2905 (unsigned long long)gethrtime()); 2906 } 2907 2908 pkt->pkt_reason = CMD_CMPLT; /* default reason */ 2909 2910 switch (status) { 2911 case PMCOUT_STATUS_OK: 2912 if (t == SATA) { 2913 int i; 2914 fis_t fis; 2915 for (i = 0; i < sizeof (fis) / sizeof (fis[0]); i++) { 2916 fis[i] = LE_32(w[4+i]); 2917 } 2918 if ((fis[0] & 0xff) != FIS_REG_D2H) { 2919 pmcs_prt(pwp, PMCS_PRT_DEBUG, phyp, NULL, 2920 "unexpected fis code 0x%x", fis[0] & 0xff); 2921 } else { 2922 pmcs_prt(pwp, PMCS_PRT_DEBUG, phyp, NULL, 2923 "FIS ERROR"); 2924 pmcs_fis_dump(pwp, fis); 2925 } 2926 pkt->pkt_reason = CMD_TRAN_ERR; 2927 break; 2928 } 2929 pmcs_latch_status(pwp, sp, STATUS_GOOD, NULL, 0, phyp->path); 2930 break; 2931 2932 case PMCOUT_STATUS_ABORTED: 2933 /* 2934 * Command successfully aborted. 2935 */ 2936 if (phyp->dead) { 2937 pkt->pkt_reason = CMD_DEV_GONE; 2938 pkt->pkt_state = STATE_GOT_BUS; 2939 } else if (pwrk->ssp_event != 0) { 2940 pkt->pkt_reason = CMD_TRAN_ERR; 2941 pkt->pkt_state = STATE_GOT_BUS; 2942 } else if (pwrk->state == PMCS_WORK_STATE_TIMED_OUT) { 2943 pkt->pkt_reason = CMD_TIMEOUT; 2944 pkt->pkt_statistics |= STAT_TIMEOUT; 2945 pkt->pkt_state = STATE_GOT_BUS | STATE_GOT_TARGET | 2946 STATE_SENT_CMD; 2947 } else { 2948 pkt->pkt_reason = CMD_ABORTED; 2949 pkt->pkt_statistics |= STAT_ABORTED; 2950 pkt->pkt_state = STATE_GOT_BUS | STATE_GOT_TARGET | 2951 STATE_SENT_CMD; 2952 } 2953 2954 /* 2955 * PMCS_WORK_STATE_TIMED_OUT doesn't need to be preserved past 2956 * this point, so go ahead and mark it as aborted. 2957 */ 2958 pwrk->state = PMCS_WORK_STATE_ABORTED; 2959 break; 2960 2961 case PMCOUT_STATUS_UNDERFLOW: 2962 /* 2963 * This will only get called for SATA 2964 */ 2965 pkt->pkt_resid = resid; 2966 if (pkt->pkt_dma_len < pkt->pkt_resid) { 2967 (void) pmcs_set_resid(pkt, pkt->pkt_dma_len, resid); 2968 } 2969 pmcs_latch_status(pwp, sp, STATUS_GOOD, NULL, 0, phyp->path); 2970 break; 2971 2972 case PMCOUT_STATUS_NO_DEVICE: 2973 case PMCOUT_STATUS_XFER_ERROR_SATA_LINK_TIMEOUT: 2974 pkt->pkt_reason = CMD_DEV_GONE; 2975 break; 2976 2977 case PMCOUT_STATUS_OPEN_CNX_ERROR_WRONG_DESTINATION: 2978 /* 2979 * Need to do rediscovery. We probably have 2980 * the wrong device (disk swap), so kill 2981 * this one. 2982 */ 2983 case PMCOUT_STATUS_OPEN_CNX_PROTOCOL_NOT_SUPPORTED: 2984 case PMCOUT_STATUS_OPEN_CNX_ERROR_ZONE_VIOLATION: 2985 case PMCOUT_STATUS_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED: 2986 case PMCOUT_STATUS_OPEN_CNX_ERROR_UNKNOWN_ERROR: 2987 /* 2988 * Need to do rediscovery. 2989 */ 2990 if (!phyp->dead) { 2991 mutex_exit(&pwrk->lock); 2992 pmcs_lock_phy(pwrk->phy); 2993 pmcs_kill_changed(pwp, pwrk->phy, 0); 2994 pmcs_unlock_phy(pwrk->phy); 2995 mutex_enter(&pwrk->lock); 2996 pkt->pkt_reason = CMD_INCOMPLETE; 2997 pkt->pkt_state = STATE_GOT_BUS; 2998 } else { 2999 pkt->pkt_reason = CMD_DEV_GONE; 3000 } 3001 break; 3002 3003 case PMCOUT_STATUS_OPEN_CNX_ERROR_BREAK: 3004 case PMCOUT_STATUS_OPEN_CNX_ERROR_IT_NEXUS_LOSS: 3005 case PMCOUT_STATUS_OPENCNX_ERROR_BAD_DESTINATION: 3006 case PMCOUT_STATUS_IO_XFER_ERROR_NAK_RECEIVED: 3007 /* cmd is pending on the target */ 3008 case PMCOUT_STATUS_XFER_ERROR_OFFSET_MISMATCH: 3009 case PMCOUT_STATUS_XFER_ERROR_REJECTED_NCQ_MODE: 3010 /* transitory - commands sent while in NCQ failure mode */ 3011 case PMCOUT_STATUS_XFER_ERROR_ABORTED_NCQ_MODE: 3012 /* NCQ failure */ 3013 case PMCOUT_STATUS_IO_PORT_IN_RESET: 3014 case PMCOUT_STATUS_XFER_ERR_BREAK: 3015 case PMCOUT_STATUS_XFER_ERR_PHY_NOT_READY: 3016 pkt->pkt_reason = CMD_INCOMPLETE; 3017 pkt->pkt_state = STATE_GOT_BUS; 3018 break; 3019 3020 case PMCOUT_STATUS_IO_XFER_OPEN_RETRY_TIMEOUT: 3021 pmcs_prt(pwp, PMCS_PRT_DEBUG, phyp, phyp->target, 3022 "STATUS_BUSY for htag 0x%08x", sp->cmd_tag); 3023 pmcs_latch_status(pwp, sp, STATUS_BUSY, NULL, 0, phyp->path); 3024 break; 3025 3026 case PMCOUT_STATUS_OPEN_CNX_ERROR_STP_RESOURCES_BUSY: 3027 /* synthesize a RESERVATION CONFLICT */ 3028 pmcs_prt(pwp, PMCS_PRT_DEBUG, phyp, phyp->target, 3029 "%s: Potential affiliation active on 0x%" PRIx64, __func__, 3030 pmcs_barray2wwn(phyp->sas_address)); 3031 pmcs_latch_status(pwp, sp, STATUS_RESERVATION_CONFLICT, NULL, 3032 0, phyp->path); 3033 break; 3034 3035 case PMCOUT_STATUS_XFER_ERROR_ABORTED_DUE_TO_SRST: 3036 /* synthesize a power-on/reset */ 3037 pmcs_latch_status(pwp, sp, STATUS_CHECK, por, sizeof (por), 3038 phyp->path); 3039 break; 3040 3041 case PMCOUT_STATUS_XFER_ERROR_UNEXPECTED_PHASE: 3042 case PMCOUT_STATUS_XFER_ERROR_RDY_OVERRUN: 3043 case PMCOUT_STATUS_XFER_ERROR_RDY_NOT_EXPECTED: 3044 case PMCOUT_STATUS_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT: 3045 case PMCOUT_STATUS_XFER_ERROR_CMD_ISSUE_BREAK_BEFORE_ACK_NACK: 3046 case PMCOUT_STATUS_XFER_ERROR_CMD_ISSUE_PHY_DOWN_BEFORE_ACK_NAK: 3047 /* synthesize a PARITY ERROR */ 3048 pmcs_latch_status(pwp, sp, STATUS_CHECK, parity, 3049 sizeof (parity), phyp->path); 3050 break; 3051 3052 case PMCOUT_STATUS_IO_XFER_ERROR_DMA: 3053 case PMCOUT_STATUS_IO_NOT_VALID: 3054 case PMCOUT_STATUS_PROG_ERROR: 3055 case PMCOUT_STATUS_XFER_ERROR_PEER_ABORTED: 3056 case PMCOUT_STATUS_XFER_ERROR_SATA: /* non-NCQ failure */ 3057 default: 3058 pkt->pkt_reason = CMD_TRAN_ERR; 3059 break; 3060 } 3061 } 3062 3063 /* 3064 * Latch up SCSI status 3065 */ 3066 3067 void 3068 pmcs_latch_status(pmcs_hw_t *pwp, pmcs_cmd_t *sp, uint8_t status, 3069 uint8_t *snsp, size_t snslen, char *path) 3070 { 3071 static const char c1[] = 3072 "%s: Status Byte 0x%02x for CDB0=0x%02x (%02x %02x %02x) " 3073 "HTAG 0x%x @ %llu"; 3074 static const char c2[] = 3075 "%s: Status Byte 0x%02x for CDB0=0x%02x HTAG 0x%x @ %llu"; 3076 3077 CMD2PKT(sp)->pkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET | 3078 STATE_SENT_CMD | STATE_GOT_STATUS; 3079 CMD2PKT(sp)->pkt_scbp[0] = status; 3080 3081 if (status == STATUS_CHECK && snsp && 3082 (size_t)SCSA_STSLEN(sp) >= sizeof (struct scsi_arq_status)) { 3083 struct scsi_arq_status *aqp = 3084 (void *) CMD2PKT(sp)->pkt_scbp; 3085 size_t amt = sizeof (struct scsi_extended_sense); 3086 uint8_t key = scsi_sense_key(snsp); 3087 uint8_t asc = scsi_sense_asc(snsp); 3088 uint8_t ascq = scsi_sense_ascq(snsp); 3089 if (amt > snslen) { 3090 amt = snslen; 3091 } 3092 pmcs_prt(pwp, PMCS_PRT_DEBUG_SCSI_STATUS, NULL, NULL, c1, path, 3093 status, CMD2PKT(sp)->pkt_cdbp[0] & 0xff, key, asc, ascq, 3094 sp->cmd_tag, (unsigned long long)gethrtime()); 3095 CMD2PKT(sp)->pkt_state |= STATE_ARQ_DONE; 3096 (*(uint8_t *)&aqp->sts_rqpkt_status) = STATUS_GOOD; 3097 aqp->sts_rqpkt_statistics = 0; 3098 aqp->sts_rqpkt_reason = CMD_CMPLT; 3099 aqp->sts_rqpkt_state = STATE_GOT_BUS | 3100 STATE_GOT_TARGET | STATE_SENT_CMD | 3101 STATE_XFERRED_DATA | STATE_GOT_STATUS; 3102 (void) memcpy(&aqp->sts_sensedata, snsp, amt); 3103 if (aqp->sts_sensedata.es_class != CLASS_EXTENDED_SENSE) { 3104 aqp->sts_rqpkt_reason = CMD_TRAN_ERR; 3105 aqp->sts_rqpkt_state = 0; 3106 aqp->sts_rqpkt_resid = 3107 sizeof (struct scsi_extended_sense); 3108 } else { 3109 aqp->sts_rqpkt_resid = 3110 sizeof (struct scsi_extended_sense) - amt; 3111 } 3112 } else if (status) { 3113 pmcs_prt(pwp, PMCS_PRT_DEBUG_SCSI_STATUS, NULL, NULL, c2, 3114 path, status, CMD2PKT(sp)->pkt_cdbp[0] & 0xff, 3115 sp->cmd_tag, (unsigned long long)gethrtime()); 3116 } 3117 3118 CMD2PKT(sp)->pkt_reason = CMD_CMPLT; 3119 } 3120 3121 /* 3122 * Calculate and set packet residual and return the amount 3123 * left over after applying various filters. 3124 */ 3125 size_t 3126 pmcs_set_resid(struct scsi_pkt *pkt, size_t amt, uint32_t cdbamt) 3127 { 3128 pkt->pkt_resid = cdbamt; 3129 if (amt > pkt->pkt_resid) { 3130 amt = pkt->pkt_resid; 3131 } 3132 if (amt > pkt->pkt_dma_len) { 3133 amt = pkt->pkt_dma_len; 3134 } 3135 return (amt); 3136 } 3137 3138 /* 3139 * Return the existing target softstate (unlocked) if there is one. If so, 3140 * the PHY is locked and that lock must be freed by the caller after the 3141 * target/PHY linkage is established. If there isn't one, and alloc_tgt is 3142 * TRUE, then allocate one. 3143 */ 3144 pmcs_xscsi_t * 3145 pmcs_get_target(pmcs_iport_t *iport, char *tgt_port, boolean_t alloc_tgt) 3146 { 3147 pmcs_hw_t *pwp = iport->pwp; 3148 pmcs_phy_t *phyp; 3149 pmcs_xscsi_t *tgt; 3150 uint64_t wwn; 3151 char unit_address[PMCS_MAX_UA_SIZE]; 3152 int ua_form = 1; 3153 3154 /* 3155 * Find the PHY for this target 3156 */ 3157 phyp = pmcs_find_phy_by_sas_address(pwp, iport, NULL, tgt_port); 3158 if (phyp == NULL) { 3159 pmcs_prt(pwp, PMCS_PRT_DEBUG3, NULL, NULL, 3160 "%s: No PHY for target @ %s", __func__, tgt_port); 3161 return (NULL); 3162 } 3163 3164 tgt = ddi_soft_state_bystr_get(iport->tgt_sstate, tgt_port); 3165 3166 if (tgt) { 3167 mutex_enter(&tgt->statlock); 3168 /* 3169 * There's already a target. Check its PHY pointer to see 3170 * if we need to clear the old linkages 3171 */ 3172 if (tgt->phy && (tgt->phy != phyp)) { 3173 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, phyp, tgt, 3174 "%s: Target PHY updated from %p to %p", __func__, 3175 (void *)tgt->phy, (void *)phyp); 3176 if (!IS_ROOT_PHY(tgt->phy)) { 3177 pmcs_dec_phy_ref_count(tgt->phy); 3178 pmcs_inc_phy_ref_count(phyp); 3179 } 3180 tgt->phy->target = NULL; 3181 } 3182 3183 /* 3184 * If this target has no PHY pointer and alloc_tgt is FALSE, 3185 * that implies we expect the target to already exist. This 3186 * implies that there has already been a tran_tgt_init on at 3187 * least one LU. 3188 */ 3189 if ((tgt->phy == NULL) && !alloc_tgt) { 3190 pmcs_prt(pwp, PMCS_PRT_DEBUG, phyp, tgt, 3191 "%s: Establish linkage from new PHY to old target @" 3192 "%s", __func__, tgt->unit_address); 3193 for (int idx = 0; idx < tgt->ref_count; idx++) { 3194 pmcs_inc_phy_ref_count(phyp); 3195 } 3196 } 3197 3198 tgt->phy = phyp; 3199 phyp->target = tgt; 3200 3201 mutex_exit(&tgt->statlock); 3202 return (tgt); 3203 } 3204 3205 /* 3206 * Make sure the PHY we found is on the correct iport 3207 */ 3208 if (phyp->iport != iport) { 3209 pmcs_prt(pwp, PMCS_PRT_DEBUG, phyp, NULL, 3210 "%s: No target at %s on this iport", __func__, tgt_port); 3211 pmcs_unlock_phy(phyp); 3212 return (NULL); 3213 } 3214 3215 /* 3216 * If this was just a lookup (i.e. alloc_tgt is false), return now. 3217 */ 3218 if (alloc_tgt == B_FALSE) { 3219 pmcs_unlock_phy(phyp); 3220 return (NULL); 3221 } 3222 3223 /* 3224 * Allocate the new softstate 3225 */ 3226 wwn = pmcs_barray2wwn(phyp->sas_address); 3227 (void) scsi_wwn_to_wwnstr(wwn, ua_form, unit_address); 3228 3229 if (ddi_soft_state_bystr_zalloc(iport->tgt_sstate, unit_address) != 3230 DDI_SUCCESS) { 3231 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, phyp, tgt, 3232 "%s: Couldn't alloc softstate for device at %s", 3233 __func__, unit_address); 3234 pmcs_unlock_phy(phyp); 3235 return (NULL); 3236 } 3237 3238 tgt = ddi_soft_state_bystr_get(iport->tgt_sstate, unit_address); 3239 ASSERT(tgt != NULL); 3240 STAILQ_INIT(&tgt->wq); 3241 STAILQ_INIT(&tgt->aq); 3242 STAILQ_INIT(&tgt->sq); 3243 mutex_init(&tgt->statlock, NULL, MUTEX_DRIVER, 3244 DDI_INTR_PRI(pwp->intr_pri)); 3245 mutex_init(&tgt->wqlock, NULL, MUTEX_DRIVER, 3246 DDI_INTR_PRI(pwp->intr_pri)); 3247 mutex_init(&tgt->aqlock, NULL, MUTEX_DRIVER, 3248 DDI_INTR_PRI(pwp->intr_pri)); 3249 cv_init(&tgt->reset_cv, NULL, CV_DRIVER, NULL); 3250 cv_init(&tgt->abort_cv, NULL, CV_DRIVER, NULL); 3251 list_create(&tgt->lun_list, sizeof (pmcs_lun_t), 3252 offsetof(pmcs_lun_t, lun_list_next)); 3253 tgt->qdepth = 1; 3254 tgt->target_num = PMCS_INVALID_TARGET_NUM; 3255 bcopy(unit_address, tgt->unit_address, PMCS_MAX_UA_SIZE); 3256 tgt->pwp = pwp; 3257 tgt->ua = strdup(iport->ua); 3258 tgt->phy = phyp; 3259 ASSERT((phyp->target == NULL) || (phyp->target == tgt)); 3260 if (phyp->target == NULL) { 3261 phyp->target = tgt; 3262 } 3263 3264 /* 3265 * Don't allocate LUN softstate for SMP targets 3266 */ 3267 if (phyp->dtype == EXPANDER) { 3268 return (tgt); 3269 } 3270 3271 if (ddi_soft_state_bystr_init(&tgt->lun_sstate, 3272 sizeof (pmcs_lun_t), PMCS_LUN_SSTATE_SZ) != 0) { 3273 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, phyp, tgt, 3274 "%s: LUN soft_state_bystr_init failed", __func__); 3275 ddi_soft_state_bystr_free(iport->tgt_sstate, tgt_port); 3276 pmcs_unlock_phy(phyp); 3277 return (NULL); 3278 } 3279 3280 return (tgt); 3281 } 3282