1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved. 23 */ 24 /* 25 * SCSI (SCSA) midlayer interface for PMC drier. 26 */ 27 28 #include <sys/scsi/adapters/pmcs/pmcs.h> 29 30 extern scsi_lun_t scsi_lun64_to_lun(scsi_lun64_t lun64); 31 32 static int pmcs_scsa_tran_tgt_init(dev_info_t *, dev_info_t *, 33 scsi_hba_tran_t *, struct scsi_device *); 34 static void pmcs_scsa_tran_tgt_free(dev_info_t *, dev_info_t *, 35 scsi_hba_tran_t *, struct scsi_device *); 36 static int pmcs_scsa_start(struct scsi_address *, struct scsi_pkt *); 37 static int pmcs_scsa_abort(struct scsi_address *, struct scsi_pkt *); 38 static int pmcs_scsa_reset(struct scsi_address *, int); 39 static int pmcs_scsi_reset_notify(struct scsi_address *, int, 40 void (*)(caddr_t), caddr_t); 41 static int pmcs_scsa_getcap(struct scsi_address *, char *, int); 42 static int pmcs_scsa_setcap(struct scsi_address *, char *, int, int); 43 static int pmcs_scsa_setup_pkt(struct scsi_pkt *, int (*)(caddr_t), caddr_t); 44 static void pmcs_scsa_teardown_pkt(struct scsi_pkt *); 45 46 static int pmcs_smp_init(dev_info_t *, dev_info_t *, smp_hba_tran_t *, 47 smp_device_t *); 48 static void pmcs_smp_free(dev_info_t *, dev_info_t *, smp_hba_tran_t *, 49 smp_device_t *); 50 static int pmcs_smp_start(struct smp_pkt *); 51 52 static int pmcs_scsi_quiesce(dev_info_t *); 53 static int pmcs_scsi_unquiesce(dev_info_t *); 54 55 static int pmcs_cap(struct scsi_address *, char *, int, int, int); 56 static pmcs_xscsi_t * 57 pmcs_addr2xp(struct scsi_address *, uint64_t *, pmcs_cmd_t *); 58 static int pmcs_SAS_run(pmcs_cmd_t *, pmcwork_t *); 59 static void pmcs_SAS_done(pmcs_hw_t *, pmcwork_t *, uint32_t *); 60 61 static int pmcs_SATA_run(pmcs_cmd_t *, pmcwork_t *); 62 static void pmcs_SATA_done(pmcs_hw_t *, pmcwork_t *, uint32_t *); 63 static uint8_t pmcs_SATA_rwparm(uint8_t *, uint32_t *, uint64_t *, uint64_t); 64 65 static void pmcs_ioerror(pmcs_hw_t *, pmcs_dtype_t pmcs_dtype, 66 pmcwork_t *, uint32_t *, uint32_t); 67 68 69 int 70 pmcs_scsa_init(pmcs_hw_t *pwp, const ddi_dma_attr_t *ap) 71 { 72 scsi_hba_tran_t *tran; 73 ddi_dma_attr_t pmcs_scsa_dattr; 74 int flags; 75 76 (void) memcpy(&pmcs_scsa_dattr, ap, sizeof (ddi_dma_attr_t)); 77 pmcs_scsa_dattr.dma_attr_sgllen = 78 ((PMCS_SGL_NCHUNKS - 1) * (PMCS_MAX_CHUNKS - 1)) + PMCS_SGL_NCHUNKS; 79 pmcs_scsa_dattr.dma_attr_flags = DDI_DMA_RELAXED_ORDERING; 80 pmcs_scsa_dattr.dma_attr_flags |= DDI_DMA_FLAGERR; 81 82 /* 83 * Allocate a transport structure 84 */ 85 tran = scsi_hba_tran_alloc(pwp->dip, SCSI_HBA_CANSLEEP); 86 if (tran == NULL) { 87 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 88 "scsi_hba_tran_alloc failed"); 89 return (DDI_FAILURE); 90 } 91 92 tran->tran_hba_private = pwp; 93 tran->tran_tgt_init = pmcs_scsa_tran_tgt_init; 94 tran->tran_tgt_free = pmcs_scsa_tran_tgt_free; 95 tran->tran_start = pmcs_scsa_start; 96 tran->tran_abort = pmcs_scsa_abort; 97 tran->tran_reset = pmcs_scsa_reset; 98 tran->tran_reset_notify = pmcs_scsi_reset_notify; 99 tran->tran_getcap = pmcs_scsa_getcap; 100 tran->tran_setcap = pmcs_scsa_setcap; 101 tran->tran_setup_pkt = pmcs_scsa_setup_pkt; 102 tran->tran_teardown_pkt = pmcs_scsa_teardown_pkt; 103 tran->tran_quiesce = pmcs_scsi_quiesce; 104 tran->tran_unquiesce = pmcs_scsi_unquiesce; 105 tran->tran_interconnect_type = INTERCONNECT_SAS; 106 tran->tran_hba_len = sizeof (pmcs_cmd_t); 107 108 /* 109 * Attach this instance of the hba 110 */ 111 112 flags = SCSI_HBA_TRAN_SCB | SCSI_HBA_TRAN_CDB | SCSI_HBA_ADDR_COMPLEX | 113 SCSI_HBA_TRAN_PHCI | SCSI_HBA_HBA; 114 115 if (scsi_hba_attach_setup(pwp->dip, &pmcs_scsa_dattr, tran, flags)) { 116 scsi_hba_tran_free(tran); 117 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 118 "scsi_hba_attach failed"); 119 return (DDI_FAILURE); 120 } 121 pwp->tran = tran; 122 123 /* 124 * Attach the SMP part of this hba 125 */ 126 pwp->smp_tran = smp_hba_tran_alloc(pwp->dip); 127 ASSERT(pwp->smp_tran != NULL); 128 pwp->smp_tran->smp_tran_hba_private = pwp; 129 pwp->smp_tran->smp_tran_init = pmcs_smp_init; 130 pwp->smp_tran->smp_tran_free = pmcs_smp_free; 131 pwp->smp_tran->smp_tran_start = pmcs_smp_start; 132 133 if (smp_hba_attach_setup(pwp->dip, pwp->smp_tran) != DDI_SUCCESS) { 134 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 135 "smp_hba_attach failed"); 136 smp_hba_tran_free(pwp->smp_tran); 137 pwp->smp_tran = NULL; 138 scsi_hba_tran_free(tran); 139 return (DDI_FAILURE); 140 } 141 142 return (DDI_SUCCESS); 143 } 144 145 /* 146 * SCSA entry points 147 */ 148 149 static int 150 pmcs_scsa_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip, 151 scsi_hba_tran_t *tran, struct scsi_device *sd) 152 { 153 pmcs_hw_t *pwp = NULL; 154 int rval; 155 char *variant_prop = "sata"; 156 char *tgt_port = NULL, *ua = NULL; 157 pmcs_xscsi_t *tgt = NULL; 158 pmcs_iport_t *iport; 159 pmcs_lun_t *lun = NULL; 160 pmcs_phy_t *phyp = NULL; 161 uint64_t lun_num; 162 boolean_t got_scratch = B_FALSE; 163 164 /* 165 * First, make sure we're an iport and get the pointer to the HBA 166 * node's softstate 167 */ 168 if (scsi_hba_iport_unit_address(hba_dip) == NULL) { 169 pmcs_prt(TRAN2PMC(tran), PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 170 "%s: We don't enumerate devices on the HBA node", __func__); 171 goto tgt_init_fail; 172 } 173 174 pwp = ITRAN2PMC(tran); 175 iport = ITRAN2IPORT(tran); 176 177 /* 178 * Get the unit-address 179 */ 180 ua = scsi_device_unit_address(sd); 181 if (ua == NULL) { 182 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 183 "%s: Couldn't get UA", __func__); 184 pwp = NULL; 185 goto tgt_init_fail; 186 } 187 pmcs_prt(pwp, PMCS_PRT_DEBUG3, NULL, NULL, 188 "got ua '%s'", ua); 189 190 /* 191 * Get the target address 192 */ 193 rval = scsi_device_prop_lookup_string(sd, SCSI_DEVICE_PROP_PATH, 194 SCSI_ADDR_PROP_TARGET_PORT, &tgt_port); 195 if (rval != DDI_PROP_SUCCESS) { 196 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 197 "Couldn't get target UA"); 198 pwp = NULL; 199 goto tgt_init_fail; 200 } 201 pmcs_prt(pwp, PMCS_PRT_DEBUG3, NULL, NULL, 202 "got tgt_port '%s'", tgt_port); 203 204 /* 205 * Validate that this tran_tgt_init is for an active iport. 206 */ 207 if (iport->ua_state == UA_INACTIVE) { 208 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 209 "%s: Got tran_tgt_init on inactive iport for '%s'", 210 __func__, tgt_port); 211 pwp = NULL; 212 goto tgt_init_fail; 213 } 214 215 /* 216 * Since we're going to wait for scratch, be sure to acquire it while 217 * we're not holding any other locks 218 */ 219 (void) pmcs_acquire_scratch(pwp, B_TRUE); 220 got_scratch = B_TRUE; 221 222 mutex_enter(&pwp->lock); 223 224 /* 225 * See if there's already a target softstate. If not, allocate one. 226 */ 227 tgt = pmcs_get_target(iport, tgt_port, B_TRUE); 228 229 if (tgt == NULL) { 230 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "%s: " 231 "No tgt for tgt_port (%s)", __func__, tgt_port); 232 goto tgt_init_fail; 233 } 234 235 phyp = tgt->phy; 236 if (!IS_ROOT_PHY(phyp)) { 237 pmcs_inc_phy_ref_count(phyp); 238 } 239 ASSERT(mutex_owned(&phyp->phy_lock)); 240 241 pmcs_prt(pwp, PMCS_PRT_DEBUG2, phyp, tgt, "@%s tgt = 0x%p, dip = 0x%p", 242 ua, (void *)tgt, (void *)tgt_dip); 243 244 /* Now get the lun */ 245 lun_num = scsi_device_prop_get_int64(sd, SCSI_DEVICE_PROP_PATH, 246 SCSI_ADDR_PROP_LUN64, SCSI_LUN64_ILLEGAL); 247 if (lun_num == SCSI_LUN64_ILLEGAL) { 248 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, phyp, tgt, 249 "No LUN for tgt %p", (void *)tgt); 250 goto tgt_init_fail; 251 } 252 253 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, phyp, tgt, "%s: @%s tgt 0x%p phy " 254 "0x%p (%s)", __func__, ua, (void *)tgt, (void *)phyp, phyp->path); 255 256 mutex_enter(&tgt->statlock); 257 tgt->dtype = phyp->dtype; 258 if (tgt->dtype != SAS && tgt->dtype != SATA) { 259 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, phyp, tgt, 260 "PHY 0x%p went away?", (void *)phyp); 261 goto tgt_init_fail; 262 } 263 264 /* We don't support SATA devices at LUN > 0. */ 265 if ((tgt->dtype == SATA) && (lun_num > 0)) { 266 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, phyp, tgt, 267 "%s: No support for SATA devices at LUN > 0 " 268 "(target = 0x%p)", __func__, (void *)tgt); 269 goto tgt_init_fail; 270 } 271 272 /* 273 * Allocate LU soft state. We use ddi_soft_state_bystr_zalloc instead 274 * of kmem_alloc because ddi_soft_state_bystr_zalloc allows us to 275 * verify that the framework never tries to initialize two scsi_device 276 * structures with the same unit-address at the same time. 277 */ 278 if (ddi_soft_state_bystr_zalloc(tgt->lun_sstate, ua) != DDI_SUCCESS) { 279 pmcs_prt(pwp, PMCS_PRT_DEBUG2, phyp, tgt, 280 "Couldn't allocate LU soft state"); 281 goto tgt_init_fail; 282 } 283 284 lun = ddi_soft_state_bystr_get(tgt->lun_sstate, ua); 285 if (lun == NULL) { 286 pmcs_prt(pwp, PMCS_PRT_DEBUG2, phyp, tgt, 287 "Couldn't get LU soft state"); 288 goto tgt_init_fail; 289 } 290 scsi_device_hba_private_set(sd, lun); 291 lun->lun_num = lun_num; 292 293 /* convert the scsi_lun64_t value to SCSI standard form */ 294 lun->scsi_lun = scsi_lun64_to_lun(lun_num); 295 296 ASSERT(strlen(ua) < (PMCS_MAX_UA_SIZE - 1)); 297 bcopy(ua, lun->unit_address, strnlen(ua, PMCS_MAX_UA_SIZE - 1)); 298 299 lun->target = tgt; 300 301 /* 302 * If this is the first tran_tgt_init, add this target to our list 303 */ 304 if (tgt->target_num == PMCS_INVALID_TARGET_NUM) { 305 int target; 306 for (target = 0; target < pwp->max_dev; target++) { 307 if (pwp->targets[target] != NULL) { 308 continue; 309 } 310 311 pwp->targets[target] = tgt; 312 tgt->target_num = (uint16_t)target; 313 break; 314 } 315 316 if (target == pwp->max_dev) { 317 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, phyp, tgt, 318 "Target list full."); 319 goto tgt_init_fail; 320 } 321 } 322 323 tgt->dip = sd->sd_dev; 324 lun->sd = sd; 325 list_insert_tail(&tgt->lun_list, lun); 326 327 if (!pmcs_assign_device(pwp, tgt)) { 328 pmcs_release_scratch(pwp); 329 pwp->targets[tgt->target_num] = NULL; 330 tgt->target_num = PMCS_INVALID_TARGET_NUM; 331 tgt->phy = NULL; 332 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, phyp, tgt, 333 "%s: pmcs_assign_device failed for target 0x%p", 334 __func__, (void *)tgt); 335 goto tgt_init_fail; 336 } 337 338 pmcs_release_scratch(pwp); 339 tgt->ref_count++; 340 341 (void) scsi_device_prop_update_int(sd, SCSI_DEVICE_PROP_PATH, 342 SCSI_ADDR_PROP_TARGET, (uint32_t)(tgt->target_num)); 343 344 /* SM-HBA */ 345 if (tgt->dtype == SATA) { 346 /* TCR in PSARC/1997/281 opinion */ 347 (void) scsi_device_prop_update_string(sd, 348 SCSI_DEVICE_PROP_PATH, "variant", variant_prop); 349 } 350 351 tgt->phy_addressable = PMCS_PHY_ADDRESSABLE(phyp); 352 353 if (tgt->phy_addressable) { 354 (void) scsi_device_prop_update_int(sd, SCSI_DEVICE_PROP_PATH, 355 SCSI_ADDR_PROP_SATA_PHY, phyp->phynum); 356 } 357 358 /* SM-HBA */ 359 (void) pmcs_smhba_set_scsi_device_props(pwp, phyp, sd); 360 /* 361 * Make sure attached port and target port pm props are updated 362 * By passing in 0s, we're not actually updating any values, but 363 * the properties should now get updated on the node. 364 */ 365 366 mutex_exit(&tgt->statlock); 367 pmcs_update_phy_pm_props(phyp, 0, 0, B_TRUE); 368 pmcs_unlock_phy(phyp); 369 mutex_exit(&pwp->lock); 370 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, tgt_port); 371 return (DDI_SUCCESS); 372 373 tgt_init_fail: 374 scsi_device_hba_private_set(sd, NULL); 375 if (got_scratch) { 376 pmcs_release_scratch(pwp); 377 } 378 if (lun) { 379 list_remove(&tgt->lun_list, lun); 380 ddi_soft_state_bystr_free(tgt->lun_sstate, ua); 381 } 382 if (phyp) { 383 mutex_exit(&tgt->statlock); 384 pmcs_unlock_phy(phyp); 385 /* 386 * phyp's ref count was incremented in pmcs_new_tport. 387 * We're failing configuration, we now need to decrement it. 388 */ 389 if (!IS_ROOT_PHY(phyp)) { 390 pmcs_dec_phy_ref_count(phyp); 391 } 392 phyp->target = NULL; 393 } 394 if (tgt && tgt->ref_count == 0) { 395 ddi_soft_state_bystr_free(iport->tgt_sstate, tgt_port); 396 } 397 if (pwp) { 398 mutex_exit(&pwp->lock); 399 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, phyp, tgt, 400 "%s: failed for @%s tgt 0x%p phy 0x%p", __func__, ua, 401 (void *)tgt, (void *)phyp); 402 } 403 if (tgt_port) { 404 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, tgt_port); 405 } 406 return (DDI_FAILURE); 407 } 408 409 static void 410 pmcs_scsa_tran_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip, 411 scsi_hba_tran_t *tran, struct scsi_device *sd) 412 { 413 _NOTE(ARGUNUSED(hba_dip, tgt_dip)); 414 pmcs_hw_t *pwp; 415 pmcs_lun_t *lun; 416 pmcs_xscsi_t *target; 417 char *unit_address; 418 pmcs_phy_t *phyp; 419 420 if (scsi_hba_iport_unit_address(hba_dip) == NULL) { 421 pwp = TRAN2PMC(tran); 422 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 423 "%s: We don't enumerate devices on the HBA node", __func__); 424 return; 425 } 426 427 lun = (pmcs_lun_t *)scsi_device_hba_private_get(sd); 428 429 ASSERT((lun != NULL) && (lun->target != NULL)); 430 ASSERT(lun->target->ref_count > 0); 431 432 target = lun->target; 433 unit_address = lun->unit_address; 434 list_remove(&target->lun_list, lun); 435 436 pwp = ITRAN2PMC(tran); 437 mutex_enter(&pwp->lock); 438 phyp = target->phy; 439 if (phyp) { 440 mutex_enter(&phyp->phy_lock); 441 } 442 mutex_enter(&target->statlock); 443 444 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, phyp, target, 445 "%s: for @%s tgt 0x%p phy 0x%p", __func__, unit_address, 446 (void *)target, (void *)phyp); 447 ddi_soft_state_bystr_free(lun->target->lun_sstate, unit_address); 448 449 if (target->recover_wait) { 450 mutex_exit(&target->statlock); 451 if (phyp) { 452 mutex_exit(&phyp->phy_lock); 453 } 454 mutex_exit(&pwp->lock); 455 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, phyp, target, "%s: " 456 "Target 0x%p in device state recovery, fail tran_tgt_free", 457 __func__, (void *)target); 458 return; 459 } 460 461 /* 462 * If this target still has a PHY pointer and that PHY's target pointer 463 * has been cleared, then that PHY has been reaped. In that case, there 464 * would be no need to decrement the reference count 465 */ 466 if (phyp && !IS_ROOT_PHY(phyp) && phyp->target) { 467 pmcs_dec_phy_ref_count(phyp); 468 } 469 470 if (--target->ref_count == 0) { 471 /* 472 * Remove this target from our list. The target soft 473 * state will remain, and the device will remain registered 474 * with the hardware unless/until we're told the device 475 * physically went away. 476 */ 477 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, phyp, target, 478 "%s: Free target 0x%p (vtgt %d)", __func__, (void *)target, 479 target->target_num); 480 pwp->targets[target->target_num] = NULL; 481 target->target_num = PMCS_INVALID_TARGET_NUM; 482 /* If the PHY has a pointer to this target, clear it */ 483 if (phyp && (phyp->target == target)) { 484 phyp->target = NULL; 485 } 486 target->phy = NULL; 487 if (phyp) { 488 mutex_exit(&phyp->phy_lock); 489 } 490 pmcs_destroy_target(target); 491 } else { 492 mutex_exit(&target->statlock); 493 if (phyp) { 494 mutex_exit(&phyp->phy_lock); 495 } 496 } 497 498 mutex_exit(&pwp->lock); 499 } 500 501 static int 502 pmcs_scsa_start(struct scsi_address *ap, struct scsi_pkt *pkt) 503 { 504 pmcs_cmd_t *sp = PKT2CMD(pkt); 505 pmcs_hw_t *pwp = ADDR2PMC(ap); 506 pmcs_xscsi_t *xp; 507 boolean_t blocked; 508 uint32_t hba_state; 509 510 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, 511 "%s: pkt %p sd %p cdb0=0x%02x dl=%lu", __func__, (void *)pkt, 512 (void *)scsi_address_device(&pkt->pkt_address), 513 pkt->pkt_cdbp[0] & 0xff, pkt->pkt_dma_len); 514 515 if (pkt->pkt_flags & FLAG_NOINTR) { 516 pmcs_prt(pwp, PMCS_PRT_DEBUG3, NULL, NULL, 517 "%s: nointr pkt", __func__); 518 return (TRAN_BADPKT); 519 } 520 521 sp->cmd_tag = 0; 522 pkt->pkt_state = pkt->pkt_statistics = 0; 523 pkt->pkt_reason = CMD_INCOMPLETE; 524 525 mutex_enter(&pwp->lock); 526 hba_state = pwp->state; 527 blocked = pwp->blocked; 528 mutex_exit(&pwp->lock); 529 530 if (hba_state != STATE_RUNNING) { 531 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 532 "%s: hba dead", __func__); 533 return (TRAN_FATAL_ERROR); 534 } 535 536 xp = pmcs_addr2xp(ap, NULL, sp); 537 if (xp == NULL) { 538 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, 539 "%s: dropping due to null target", __func__); 540 goto dead_target; 541 } 542 ASSERT(mutex_owned(&xp->statlock)); 543 544 /* 545 * First, check to see if the device is gone. 546 */ 547 if (xp->dev_gone) { 548 xp->actv_pkts++; 549 mutex_exit(&xp->statlock); 550 pmcs_prt(pwp, PMCS_PRT_DEBUG3, NULL, xp, 551 "%s: dropping due to dead target 0x%p", 552 __func__, (void *)xp); 553 goto dead_target; 554 } 555 556 /* 557 * If we're blocked (quiesced) just return. 558 */ 559 if (blocked) { 560 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 561 "%s: hba blocked", __func__); 562 xp->actv_pkts++; 563 mutex_exit(&xp->statlock); 564 mutex_enter(&xp->wqlock); 565 STAILQ_INSERT_TAIL(&xp->wq, sp, cmd_next); 566 mutex_exit(&xp->wqlock); 567 return (TRAN_ACCEPT); 568 } 569 570 /* 571 * If we're draining or resetting, queue and return. 572 */ 573 if (xp->draining || xp->resetting || xp->recover_wait) { 574 xp->actv_pkts++; 575 mutex_exit(&xp->statlock); 576 mutex_enter(&xp->wqlock); 577 STAILQ_INSERT_TAIL(&xp->wq, sp, cmd_next); 578 mutex_exit(&xp->wqlock); 579 pmcs_prt(pwp, PMCS_PRT_DEBUG1, NULL, xp, 580 "%s: draining/resetting/recovering (cnt %u)", 581 __func__, xp->actv_cnt); 582 /* 583 * By the time we get here, draining or 584 * resetting may have come and gone, not 585 * yet noticing that we had put something 586 * on the wait queue, so schedule a worker 587 * to look at this later. 588 */ 589 SCHEDULE_WORK(pwp, PMCS_WORK_RUN_QUEUES); 590 return (TRAN_ACCEPT); 591 } 592 593 xp->actv_pkts++; 594 mutex_exit(&xp->statlock); 595 596 /* 597 * Queue this command to the tail of the wait queue. 598 * This keeps us getting commands out of order. 599 */ 600 mutex_enter(&xp->wqlock); 601 STAILQ_INSERT_TAIL(&xp->wq, sp, cmd_next); 602 mutex_exit(&xp->wqlock); 603 604 /* 605 * Now run the queue for this device. 606 */ 607 (void) pmcs_scsa_wq_run_one(pwp, xp); 608 609 return (TRAN_ACCEPT); 610 611 dead_target: 612 pkt->pkt_state = STATE_GOT_BUS; 613 pkt->pkt_reason = CMD_DEV_GONE; 614 mutex_enter(&pwp->cq_lock); 615 STAILQ_INSERT_TAIL(&pwp->cq, sp, cmd_next); 616 PMCS_CQ_RUN_LOCKED(pwp); 617 mutex_exit(&pwp->cq_lock); 618 return (TRAN_ACCEPT); 619 } 620 621 /* Return code 1 = Success */ 622 static int 623 pmcs_scsa_abort(struct scsi_address *ap, struct scsi_pkt *pkt) 624 { 625 pmcs_hw_t *pwp = ADDR2PMC(ap); 626 pmcs_cmd_t *sp = NULL; 627 pmcs_xscsi_t *xp = NULL; 628 pmcs_phy_t *pptr = NULL; 629 pmcs_lun_t *pmcs_lun = (pmcs_lun_t *) 630 scsi_device_hba_private_get(scsi_address_device(ap)); 631 uint32_t tag; 632 uint64_t lun; 633 pmcwork_t *pwrk; 634 635 mutex_enter(&pwp->lock); 636 if (pwp->state != STATE_RUNNING) { 637 mutex_exit(&pwp->lock); 638 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 639 "%s: hba dead", __func__); 640 return (0); 641 } 642 mutex_exit(&pwp->lock); 643 644 if (pkt == NULL) { 645 if (pmcs_lun == NULL) { 646 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, "%s: " 647 "No pmcs_lun_t struct to do ABORT_ALL", __func__); 648 return (0); 649 } 650 xp = pmcs_lun->target; 651 if (xp != NULL) { 652 pptr = xp->phy; 653 } 654 if (pptr == NULL) { 655 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, xp, "%s: pkt is " 656 "NULL. No tgt/phy to do ABORT_ALL", __func__); 657 return (0); 658 } 659 pmcs_lock_phy(pptr); 660 if (pmcs_abort(pwp, pptr, 0, 1, 0)) { 661 pptr->abort_pending = 1; 662 SCHEDULE_WORK(pwp, PMCS_WORK_ABORT_HANDLE); 663 } 664 pmcs_unlock_phy(pptr); 665 return (1); 666 } 667 668 sp = PKT2CMD(pkt); 669 xp = sp->cmd_target; 670 671 if (sp->cmd_lun) { 672 lun = sp->cmd_lun->lun_num; 673 } else { 674 lun = 0; 675 } 676 if (xp == NULL) { 677 return (0); 678 } 679 680 /* 681 * See if we have a real work structure associated with this cmd. 682 */ 683 pwrk = pmcs_tag2wp(pwp, sp->cmd_tag, B_FALSE); 684 if (pwrk && pwrk->arg == sp) { 685 tag = pwrk->htag; 686 pptr = pwrk->phy; 687 pwrk->timer = 0; /* we don't time this here */ 688 ASSERT(pwrk->state == PMCS_WORK_STATE_ONCHIP); 689 mutex_exit(&pwrk->lock); 690 pmcs_lock_phy(pptr); 691 if (pptr->dtype == SAS) { 692 if (pmcs_ssp_tmf(pwp, pptr, SAS_ABORT_TASK, tag, lun, 693 NULL)) { 694 pptr->abort_pending = 1; 695 pmcs_unlock_phy(pptr); 696 SCHEDULE_WORK(pwp, PMCS_WORK_ABORT_HANDLE); 697 return (0); 698 } 699 } else { 700 /* 701 * XXX: Was the command that was active an 702 * NCQ I/O command? 703 */ 704 pptr->need_rl_ext = 1; 705 if (pmcs_sata_abort_ncq(pwp, pptr)) { 706 pptr->abort_pending = 1; 707 pmcs_unlock_phy(pptr); 708 SCHEDULE_WORK(pwp, PMCS_WORK_ABORT_HANDLE); 709 return (0); 710 } 711 } 712 pptr->abort_pending = 1; 713 pmcs_unlock_phy(pptr); 714 SCHEDULE_WORK(pwp, PMCS_WORK_ABORT_HANDLE); 715 return (1); 716 } 717 if (pwrk) { 718 mutex_exit(&pwrk->lock); 719 } 720 /* 721 * Okay, those weren't the droids we were looking for. 722 * See if the command is on any of the wait queues. 723 */ 724 mutex_enter(&xp->wqlock); 725 sp = NULL; 726 STAILQ_FOREACH(sp, &xp->wq, cmd_next) { 727 if (sp == PKT2CMD(pkt)) { 728 STAILQ_REMOVE(&xp->wq, sp, pmcs_cmd, cmd_next); 729 break; 730 } 731 } 732 mutex_exit(&xp->wqlock); 733 if (sp) { 734 pkt->pkt_reason = CMD_ABORTED; 735 pkt->pkt_statistics |= STAT_ABORTED; 736 mutex_enter(&pwp->cq_lock); 737 STAILQ_INSERT_TAIL(&pwp->cq, sp, cmd_next); 738 PMCS_CQ_RUN_LOCKED(pwp); 739 mutex_exit(&pwp->cq_lock); 740 return (1); 741 } 742 return (0); 743 } 744 745 /* 746 * SCSA reset functions 747 */ 748 static int 749 pmcs_scsa_reset(struct scsi_address *ap, int level) 750 { 751 pmcs_hw_t *pwp = ADDR2PMC(ap); 752 pmcs_phy_t *pptr; 753 pmcs_xscsi_t *xp; 754 uint64_t lun = (uint64_t)-1, *lp = NULL; 755 int rval; 756 757 mutex_enter(&pwp->lock); 758 if (pwp->state != STATE_RUNNING) { 759 mutex_exit(&pwp->lock); 760 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 761 "%s: hba dead", __func__); 762 return (0); 763 } 764 mutex_exit(&pwp->lock); 765 766 switch (level) { 767 case RESET_ALL: 768 rval = 0; 769 break; 770 case RESET_LUN: 771 /* 772 * Point lp at lun so that pmcs_addr2xp 773 * will fill out the 64 bit lun number. 774 */ 775 lp = &lun; 776 /* FALLTHROUGH */ 777 case RESET_TARGET: 778 xp = pmcs_addr2xp(ap, lp, NULL); 779 if (xp == NULL) { 780 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 781 "%s: no xp found for this scsi address", __func__); 782 return (0); 783 } 784 785 if (xp->dev_gone) { 786 mutex_exit(&xp->statlock); 787 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, xp, 788 "%s: Target 0x%p has gone away", __func__, 789 (void *)xp); 790 return (0); 791 } 792 793 /* 794 * If we're already performing this action, or if device 795 * state recovery is already running, just return failure. 796 */ 797 if (xp->resetting || xp->recover_wait) { 798 mutex_exit(&xp->statlock); 799 return (0); 800 } 801 xp->reset_wait = 0; 802 xp->reset_success = 0; 803 xp->resetting = 1; 804 pptr = xp->phy; 805 mutex_exit(&xp->statlock); 806 807 if (pmcs_reset_dev(pwp, pptr, lun)) { 808 rval = 0; 809 } else { 810 rval = 1; 811 } 812 813 mutex_enter(&xp->statlock); 814 if (rval == 1) { 815 xp->reset_success = 1; 816 } 817 if (xp->reset_wait) { 818 xp->reset_wait = 0; 819 cv_signal(&xp->reset_cv); 820 } 821 xp->resetting = 0; 822 mutex_exit(&xp->statlock); 823 SCHEDULE_WORK(pwp, PMCS_WORK_RUN_QUEUES); 824 break; 825 default: 826 rval = 0; 827 break; 828 } 829 830 return (rval); 831 } 832 833 static int 834 pmcs_scsi_reset_notify(struct scsi_address *ap, int flag, 835 void (*callback)(caddr_t), caddr_t arg) 836 { 837 pmcs_hw_t *pwp = ADDR2PMC(ap); 838 return (scsi_hba_reset_notify_setup(ap, flag, callback, arg, 839 &pwp->lock, &pwp->reset_notify_listf)); 840 } 841 842 843 static int 844 pmcs_cap(struct scsi_address *ap, char *cap, int val, int tonly, int set) 845 { 846 _NOTE(ARGUNUSED(val, tonly)); 847 int cidx, rval = 0; 848 pmcs_xscsi_t *xp; 849 850 cidx = scsi_hba_lookup_capstr(cap); 851 if (cidx == -1) { 852 return (-1); 853 } 854 855 xp = pmcs_addr2xp(ap, NULL, NULL); 856 if (xp == NULL) { 857 return (-1); 858 } 859 860 switch (cidx) { 861 case SCSI_CAP_DMA_MAX: 862 case SCSI_CAP_INITIATOR_ID: 863 if (set == 0) { 864 rval = INT_MAX; /* argh */ 865 } 866 break; 867 case SCSI_CAP_DISCONNECT: 868 case SCSI_CAP_SYNCHRONOUS: 869 case SCSI_CAP_WIDE_XFER: 870 case SCSI_CAP_PARITY: 871 case SCSI_CAP_ARQ: 872 case SCSI_CAP_UNTAGGED_QING: 873 if (set == 0) { 874 rval = 1; 875 } 876 break; 877 878 case SCSI_CAP_TAGGED_QING: 879 rval = 1; 880 break; 881 882 case SCSI_CAP_MSG_OUT: 883 case SCSI_CAP_RESET_NOTIFICATION: 884 case SCSI_CAP_QFULL_RETRIES: 885 case SCSI_CAP_QFULL_RETRY_INTERVAL: 886 break; 887 case SCSI_CAP_SCSI_VERSION: 888 if (set == 0) { 889 rval = SCSI_VERSION_3; 890 } 891 break; 892 case SCSI_CAP_INTERCONNECT_TYPE: 893 if (set) { 894 break; 895 } 896 if (xp->phy_addressable) { 897 rval = INTERCONNECT_SATA; 898 } else { 899 rval = INTERCONNECT_SAS; 900 } 901 break; 902 case SCSI_CAP_CDB_LEN: 903 if (set == 0) { 904 rval = 16; 905 } 906 break; 907 case SCSI_CAP_LUN_RESET: 908 if (set) { 909 break; 910 } 911 if (xp->dtype == SATA) { 912 rval = 0; 913 } else { 914 rval = 1; 915 } 916 break; 917 default: 918 rval = -1; 919 break; 920 } 921 mutex_exit(&xp->statlock); 922 pmcs_prt(ADDR2PMC(ap), PMCS_PRT_DEBUG3, NULL, NULL, 923 "%s: cap %s val %d set %d rval %d", 924 __func__, cap, val, set, rval); 925 return (rval); 926 } 927 928 /* 929 * Returns with statlock held if the xp is found. 930 * Fills in pmcs_cmd_t with values if pmcs_cmd_t pointer non-NULL. 931 */ 932 static pmcs_xscsi_t * 933 pmcs_addr2xp(struct scsi_address *ap, uint64_t *lp, pmcs_cmd_t *sp) 934 { 935 pmcs_xscsi_t *xp; 936 pmcs_lun_t *lun = (pmcs_lun_t *) 937 scsi_device_hba_private_get(scsi_address_device(ap)); 938 939 if ((lun == NULL) || (lun->target == NULL)) { 940 return (NULL); 941 } 942 xp = lun->target; 943 mutex_enter(&xp->statlock); 944 945 if (xp->dev_gone || (xp->phy == NULL)) { 946 /* 947 * This may be a retried packet, so it's possible cmd_target 948 * and cmd_lun may still be populated. Clear them. 949 */ 950 if (sp != NULL) { 951 sp->cmd_target = NULL; 952 sp->cmd_lun = NULL; 953 } 954 mutex_exit(&xp->statlock); 955 return (NULL); 956 } 957 958 if (sp != NULL) { 959 sp->cmd_target = xp; 960 sp->cmd_lun = lun; 961 } 962 if (lp) { 963 *lp = lun->lun_num; 964 } 965 return (xp); 966 } 967 968 static int 969 pmcs_scsa_getcap(struct scsi_address *ap, char *cap, int whom) 970 { 971 int r; 972 if (cap == NULL) { 973 return (-1); 974 } 975 r = pmcs_cap(ap, cap, 0, whom, 0); 976 return (r); 977 } 978 979 static int 980 pmcs_scsa_setcap(struct scsi_address *ap, char *cap, int value, int whom) 981 { 982 int r; 983 if (cap == NULL) { 984 return (-1); 985 } 986 r = pmcs_cap(ap, cap, value, whom, 1); 987 return (r); 988 } 989 990 static int 991 pmcs_scsa_setup_pkt(struct scsi_pkt *pkt, int (*callback)(caddr_t), 992 caddr_t cbarg) 993 { 994 _NOTE(ARGUNUSED(callback, cbarg)); 995 pmcs_cmd_t *sp = pkt->pkt_ha_private; 996 997 bzero(sp, sizeof (pmcs_cmd_t)); 998 sp->cmd_pkt = pkt; 999 return (0); 1000 } 1001 1002 static void 1003 pmcs_scsa_teardown_pkt(struct scsi_pkt *pkt) 1004 { 1005 pmcs_cmd_t *sp = pkt->pkt_ha_private; 1006 sp->cmd_target = NULL; 1007 sp->cmd_lun = NULL; 1008 } 1009 1010 static int 1011 pmcs_smp_start(struct smp_pkt *smp_pkt) 1012 { 1013 struct pmcwork *pwrk; 1014 pmcs_iport_t *iport; 1015 const uint_t rdoff = SAS_SMP_MAX_PAYLOAD; 1016 uint32_t msg[PMCS_MSG_SIZE], *ptr, htag, status; 1017 uint64_t wwn; 1018 pmcs_hw_t *pwp; 1019 pmcs_phy_t *pptr; 1020 pmcs_xscsi_t *xp; 1021 uint_t reqsz, rspsz, will_retry; 1022 int result; 1023 1024 pwp = smp_pkt->smp_pkt_address->smp_a_hba_tran->smp_tran_hba_private; 1025 bcopy(smp_pkt->smp_pkt_address->smp_a_wwn, &wwn, SAS_WWN_BYTE_SIZE); 1026 1027 pmcs_prt(pwp, PMCS_PRT_DEBUG1, NULL, NULL, 1028 "%s: starting for wwn 0x%" PRIx64, __func__, wwn); 1029 1030 will_retry = smp_pkt->smp_pkt_will_retry; 1031 1032 (void) pmcs_acquire_scratch(pwp, B_TRUE); 1033 reqsz = smp_pkt->smp_pkt_reqsize; 1034 if (reqsz > SAS_SMP_MAX_PAYLOAD) { 1035 reqsz = SAS_SMP_MAX_PAYLOAD; 1036 } 1037 (void) memcpy(pwp->scratch, smp_pkt->smp_pkt_req, reqsz); 1038 1039 rspsz = smp_pkt->smp_pkt_rspsize; 1040 if (rspsz > SAS_SMP_MAX_PAYLOAD) { 1041 rspsz = SAS_SMP_MAX_PAYLOAD; 1042 } 1043 1044 /* 1045 * The request size from the SMP driver always includes 4 bytes 1046 * for the CRC. The PMCS chip, however, doesn't want to see those 1047 * counts as part of the transfer size. 1048 */ 1049 reqsz -= 4; 1050 1051 pptr = pmcs_find_phy_by_wwn(pwp, wwn); 1052 /* PHY is now locked */ 1053 if (pptr == NULL || pptr->dtype != EXPANDER) { 1054 if (pptr) { 1055 pmcs_unlock_phy(pptr); 1056 } 1057 pmcs_release_scratch(pwp); 1058 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 1059 "%s: could not find phy", __func__); 1060 smp_pkt->smp_pkt_reason = ENXIO; 1061 return (DDI_FAILURE); 1062 } 1063 1064 if ((pptr->iport == NULL) || !pptr->valid_device_id) { 1065 pmcs_unlock_phy(pptr); 1066 pmcs_release_scratch(pwp); 1067 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, pptr->target, 1068 "%s: Can't reach PHY %s", __func__, pptr->path); 1069 smp_pkt->smp_pkt_reason = ENXIO; 1070 return (DDI_FAILURE); 1071 } 1072 1073 pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, pptr); 1074 if (pwrk == NULL) { 1075 pmcs_unlock_phy(pptr); 1076 pmcs_release_scratch(pwp); 1077 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, NULL, 1078 "%s: could not get work structure", __func__); 1079 smp_pkt->smp_pkt_reason = will_retry ? EAGAIN : EBUSY; 1080 return (DDI_FAILURE); 1081 } 1082 1083 pwrk->arg = msg; 1084 pwrk->dtype = EXPANDER; 1085 mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]); 1086 ptr = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 1087 if (ptr == NULL) { 1088 pmcs_pwork(pwp, pwrk); 1089 mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]); 1090 pmcs_unlock_phy(pptr); 1091 pmcs_release_scratch(pwp); 1092 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1093 "%s: could not get IQ entry", __func__); 1094 smp_pkt->smp_pkt_reason = will_retry ? EAGAIN :EBUSY; 1095 return (DDI_FAILURE); 1096 } 1097 msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_GENERAL, PMCIN_SMP_REQUEST)); 1098 msg[1] = LE_32(pwrk->htag); 1099 msg[2] = LE_32(pptr->device_id); 1100 msg[3] = LE_32(SMP_INDIRECT_RESPONSE | SMP_INDIRECT_REQUEST); 1101 msg[8] = LE_32(DWORD0(pwp->scratch_dma)); 1102 msg[9] = LE_32(DWORD1(pwp->scratch_dma)); 1103 msg[10] = LE_32(reqsz); 1104 msg[11] = 0; 1105 msg[12] = LE_32(DWORD0(pwp->scratch_dma+rdoff)); 1106 msg[13] = LE_32(DWORD1(pwp->scratch_dma+rdoff)); 1107 msg[14] = LE_32(rspsz); 1108 msg[15] = 0; 1109 1110 COPY_MESSAGE(ptr, msg, PMCS_MSG_SIZE); 1111 1112 pmcs_hold_iport(pptr->iport); 1113 iport = pptr->iport; 1114 pmcs_smp_acquire(iport); 1115 pwrk->state = PMCS_WORK_STATE_ONCHIP; 1116 htag = pwrk->htag; 1117 INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER); 1118 pmcs_unlock_phy(pptr); 1119 WAIT_FOR(pwrk, smp_pkt->smp_pkt_timeout * 1000, result); 1120 pmcs_pwork(pwp, pwrk); 1121 pmcs_lock_phy(pptr); 1122 if (result) { 1123 pmcs_timed_out(pwp, htag, __func__); 1124 if (pmcs_abort(pwp, pptr, htag, 0, 0)) { 1125 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, pptr->target, 1126 "%s: Unable to issue SMP ABORT for htag 0x%08x", 1127 __func__, htag); 1128 } else { 1129 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, pptr->target, 1130 "%s: Issuing SMP ABORT for htag 0x%08x", 1131 __func__, htag); 1132 } 1133 pmcs_smp_release(iport); 1134 pmcs_rele_iport(iport); 1135 pmcs_unlock_phy(pptr); 1136 pmcs_release_scratch(pwp); 1137 smp_pkt->smp_pkt_reason = ETIMEDOUT; 1138 return (DDI_FAILURE); 1139 } 1140 pmcs_smp_release(iport); 1141 pmcs_rele_iport(iport); 1142 status = LE_32(msg[2]); 1143 if (status == PMCOUT_STATUS_OVERFLOW) { 1144 status = PMCOUT_STATUS_OK; 1145 smp_pkt->smp_pkt_reason = EOVERFLOW; 1146 } 1147 if (status != PMCOUT_STATUS_OK) { 1148 const char *emsg = pmcs_status_str(status); 1149 if (emsg == NULL) { 1150 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, pptr->target, 1151 "SMP operation failed (0x%x)", status); 1152 } else { 1153 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, pptr, pptr->target, 1154 "SMP operation failed (%s)", emsg); 1155 } 1156 1157 if ((status == PMCOUT_STATUS_ERROR_HW_TIMEOUT) || 1158 (status == PMCOUT_STATUS_IO_XFER_OPEN_RETRY_TIMEOUT)) { 1159 smp_pkt->smp_pkt_reason = 1160 will_retry ? EAGAIN : ETIMEDOUT; 1161 result = DDI_FAILURE; 1162 } else if (status == 1163 PMCOUT_STATUS_OPEN_CNX_ERROR_IT_NEXUS_LOSS) { 1164 xp = pptr->target; 1165 if (xp == NULL) { 1166 smp_pkt->smp_pkt_reason = EIO; 1167 result = DDI_FAILURE; 1168 goto out; 1169 } 1170 if (xp->dev_state != 1171 PMCS_DEVICE_STATE_NON_OPERATIONAL) { 1172 xp->dev_state = 1173 PMCS_DEVICE_STATE_NON_OPERATIONAL; 1174 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, xp->phy, 1175 xp, "%s: Got _IT_NEXUS_LOSS SMP status. " 1176 "Tgt(0x%p) dev_state set to " 1177 "_NON_OPERATIONAL", __func__, 1178 (void *)xp); 1179 } 1180 /* ABORT any pending commands related to this device */ 1181 if (pmcs_abort(pwp, pptr, pptr->device_id, 1, 1) != 0) { 1182 pptr->abort_pending = 1; 1183 smp_pkt->smp_pkt_reason = EIO; 1184 result = DDI_FAILURE; 1185 } 1186 } else { 1187 smp_pkt->smp_pkt_reason = will_retry ? EAGAIN : EIO; 1188 result = DDI_FAILURE; 1189 } 1190 } else { 1191 (void) memcpy(smp_pkt->smp_pkt_rsp, 1192 &((uint8_t *)pwp->scratch)[rdoff], rspsz); 1193 if (smp_pkt->smp_pkt_reason == EOVERFLOW) { 1194 result = DDI_FAILURE; 1195 } else { 1196 result = DDI_SUCCESS; 1197 } 1198 } 1199 out: 1200 pmcs_prt(pwp, PMCS_PRT_DEBUG1, pptr, pptr->target, 1201 "%s: done for wwn 0x%" PRIx64, __func__, wwn); 1202 1203 pmcs_unlock_phy(pptr); 1204 pmcs_release_scratch(pwp); 1205 return (result); 1206 } 1207 1208 static int 1209 pmcs_smp_init(dev_info_t *self, dev_info_t *child, 1210 smp_hba_tran_t *tran, smp_device_t *smp_sd) 1211 { 1212 _NOTE(ARGUNUSED(tran, smp_sd)); 1213 pmcs_iport_t *iport; 1214 pmcs_hw_t *pwp; 1215 pmcs_xscsi_t *tgt; 1216 pmcs_phy_t *phy, *pphy; 1217 uint64_t wwn; 1218 char *addr, *tgt_port; 1219 int ua_form = 1; 1220 1221 iport = ddi_get_soft_state(pmcs_iport_softstate, 1222 ddi_get_instance(self)); 1223 ASSERT(iport); 1224 if (iport == NULL) 1225 return (DDI_FAILURE); 1226 pwp = iport->pwp; 1227 ASSERT(pwp); 1228 if (pwp == NULL) 1229 return (DDI_FAILURE); 1230 1231 /* Get "target-port" prop from devinfo node */ 1232 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, child, 1233 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, 1234 SCSI_ADDR_PROP_TARGET_PORT, &tgt_port) != DDI_SUCCESS) { 1235 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, "%s: Failed to " 1236 "lookup prop ("SCSI_ADDR_PROP_TARGET_PORT")", __func__); 1237 /* Dont fail _smp_init() because we couldnt get/set a prop */ 1238 return (DDI_SUCCESS); 1239 } 1240 1241 /* 1242 * Validate that this tran_tgt_init is for an active iport. 1243 */ 1244 if (iport->ua_state == UA_INACTIVE) { 1245 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1246 "%s: Init on inactive iport for '%s'", __func__, tgt_port); 1247 ddi_prop_free(tgt_port); 1248 return (DDI_FAILURE); 1249 } 1250 1251 mutex_enter(&pwp->lock); 1252 1253 /* Retrieve softstate using unit-address */ 1254 tgt = pmcs_get_target(iport, tgt_port, B_TRUE); 1255 if (tgt == NULL) { 1256 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1257 "%s: tgt softstate not found", __func__); 1258 ddi_prop_free(tgt_port); 1259 mutex_exit(&pwp->lock); 1260 return (DDI_FAILURE); 1261 } 1262 1263 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, tgt, "%s: %s (%s)", 1264 __func__, ddi_get_name(child), tgt_port); 1265 1266 mutex_enter(&tgt->statlock); 1267 phy = tgt->phy; 1268 ASSERT(mutex_owned(&phy->phy_lock)); 1269 1270 if (IS_ROOT_PHY(phy)) { 1271 /* Expander attached to HBA - don't ref_count it */ 1272 wwn = pwp->sas_wwns[0]; 1273 } else { 1274 pmcs_inc_phy_ref_count(phy); 1275 1276 /* 1277 * Parent (in topology) is also an expander 1278 * Now that we've increased the ref count on phy, it's OK 1279 * to drop the lock so we can acquire the parent's lock. 1280 */ 1281 pphy = phy->parent; 1282 mutex_exit(&tgt->statlock); 1283 pmcs_unlock_phy(phy); 1284 pmcs_lock_phy(pphy); 1285 wwn = pmcs_barray2wwn(pphy->sas_address); 1286 pmcs_unlock_phy(pphy); 1287 pmcs_lock_phy(phy); 1288 mutex_enter(&tgt->statlock); 1289 } 1290 1291 /* 1292 * If this is the 1st smp_init, add this to our list. 1293 */ 1294 if (tgt->target_num == PMCS_INVALID_TARGET_NUM) { 1295 int target; 1296 for (target = 0; target < pwp->max_dev; target++) { 1297 if (pwp->targets[target] != NULL) { 1298 continue; 1299 } 1300 1301 pwp->targets[target] = tgt; 1302 tgt->target_num = (uint16_t)target; 1303 tgt->assigned = 1; 1304 tgt->dev_state = PMCS_DEVICE_STATE_OPERATIONAL; 1305 break; 1306 } 1307 1308 if (target == pwp->max_dev) { 1309 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, NULL, 1310 "Target list full."); 1311 goto smp_init_fail; 1312 } 1313 } 1314 1315 if (!pmcs_assign_device(pwp, tgt)) { 1316 pwp->targets[tgt->target_num] = NULL; 1317 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, tgt, 1318 "%s: pmcs_assign_device failed for target 0x%p", 1319 __func__, (void *)tgt); 1320 goto smp_init_fail; 1321 } 1322 1323 /* 1324 * Update the attached port and target port pm properties 1325 */ 1326 tgt->smpd = smp_sd; 1327 1328 pmcs_unlock_phy(phy); 1329 mutex_exit(&pwp->lock); 1330 1331 tgt->ref_count++; 1332 tgt->dtype = phy->dtype; 1333 mutex_exit(&tgt->statlock); 1334 1335 pmcs_update_phy_pm_props(phy, 0, 0, B_TRUE); 1336 1337 addr = scsi_wwn_to_wwnstr(wwn, ua_form, NULL); 1338 if (smp_device_prop_update_string(smp_sd, SCSI_ADDR_PROP_ATTACHED_PORT, 1339 addr) != DDI_SUCCESS) { 1340 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, "%s: Failed to set " 1341 "prop ("SCSI_ADDR_PROP_ATTACHED_PORT")", __func__); 1342 } 1343 (void) scsi_free_wwnstr(addr); 1344 ddi_prop_free(tgt_port); 1345 return (DDI_SUCCESS); 1346 1347 smp_init_fail: 1348 tgt->phy = NULL; 1349 tgt->target_num = PMCS_INVALID_TARGET_NUM; 1350 phy->target = NULL; 1351 if (!IS_ROOT_PHY(phy)) { 1352 pmcs_dec_phy_ref_count(phy); 1353 } 1354 mutex_exit(&tgt->statlock); 1355 pmcs_unlock_phy(phy); 1356 mutex_exit(&pwp->lock); 1357 ddi_soft_state_bystr_free(iport->tgt_sstate, tgt->unit_address); 1358 ddi_prop_free(tgt_port); 1359 return (DDI_FAILURE); 1360 } 1361 1362 static void 1363 pmcs_smp_free(dev_info_t *self, dev_info_t *child, 1364 smp_hba_tran_t *tran, smp_device_t *smp) 1365 { 1366 _NOTE(ARGUNUSED(tran, smp)); 1367 pmcs_iport_t *iport; 1368 pmcs_hw_t *pwp; 1369 pmcs_xscsi_t *tgt; 1370 pmcs_phy_t *phyp; 1371 char *tgt_port; 1372 1373 iport = ddi_get_soft_state(pmcs_iport_softstate, 1374 ddi_get_instance(self)); 1375 ASSERT(iport); 1376 if (iport == NULL) 1377 return; 1378 1379 pwp = iport->pwp; 1380 if (pwp == NULL) 1381 return; 1382 ASSERT(pwp); 1383 1384 /* Get "target-port" prop from devinfo node */ 1385 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, child, 1386 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, 1387 SCSI_ADDR_PROP_TARGET_PORT, &tgt_port) != DDI_SUCCESS) { 1388 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, "%s: Failed to " 1389 "lookup prop ("SCSI_ADDR_PROP_TARGET_PORT")", __func__); 1390 return; 1391 } 1392 1393 /* Retrieve softstate using unit-address */ 1394 mutex_enter(&pwp->lock); 1395 tgt = ddi_soft_state_bystr_get(iport->tgt_sstate, tgt_port); 1396 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, tgt, "%s: %s (%s)", __func__, 1397 ddi_get_name(child), tgt_port); 1398 ddi_prop_free(tgt_port); 1399 1400 if (tgt == NULL) { 1401 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1402 "%s: tgt softstate not found", __func__); 1403 mutex_exit(&pwp->lock); 1404 return; 1405 } 1406 1407 phyp = tgt->phy; 1408 if (phyp) { 1409 mutex_enter(&phyp->phy_lock); 1410 if (!IS_ROOT_PHY(phyp)) { 1411 pmcs_dec_phy_ref_count(phyp); 1412 } 1413 } 1414 mutex_enter(&tgt->statlock); 1415 1416 if (--tgt->ref_count == 0) { 1417 /* 1418 * Remove this target from our list. The softstate 1419 * will remain, and the device will remain registered 1420 * with the hardware unless/until we're told that the 1421 * device physically went away. 1422 */ 1423 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, NULL, tgt, 1424 "Removing target 0x%p (vtgt %d) from target list", 1425 (void *)tgt, tgt->target_num); 1426 pwp->targets[tgt->target_num] = NULL; 1427 tgt->target_num = PMCS_INVALID_TARGET_NUM; 1428 /* If the PHY has a pointer to this target, clear it */ 1429 if (phyp && (phyp->target == tgt)) { 1430 phyp->target = NULL; 1431 } 1432 tgt->phy = NULL; 1433 pmcs_destroy_target(tgt); 1434 } else { 1435 mutex_exit(&tgt->statlock); 1436 } 1437 1438 if (phyp) { 1439 mutex_exit(&phyp->phy_lock); 1440 } 1441 mutex_exit(&pwp->lock); 1442 } 1443 1444 static int 1445 pmcs_scsi_quiesce(dev_info_t *dip) 1446 { 1447 pmcs_hw_t *pwp; 1448 int totactive = -1; 1449 pmcs_xscsi_t *xp; 1450 uint16_t target; 1451 1452 if (ddi_get_soft_state(pmcs_iport_softstate, ddi_get_instance(dip))) 1453 return (0); /* iport */ 1454 1455 pwp = ddi_get_soft_state(pmcs_softc_state, ddi_get_instance(dip)); 1456 if (pwp == NULL) { 1457 return (-1); 1458 } 1459 mutex_enter(&pwp->lock); 1460 if (pwp->state != STATE_RUNNING) { 1461 mutex_exit(&pwp->lock); 1462 return (-1); 1463 } 1464 1465 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, "%s called", __func__); 1466 pwp->quiesced = pwp->blocked = 1; 1467 while (totactive) { 1468 totactive = 0; 1469 for (target = 0; target < pwp->max_dev; target++) { 1470 xp = pwp->targets[target]; 1471 if (xp == NULL) { 1472 continue; 1473 } 1474 mutex_enter(&xp->statlock); 1475 if (xp->actv_cnt) { 1476 totactive += xp->actv_cnt; 1477 xp->draining = 1; 1478 } 1479 mutex_exit(&xp->statlock); 1480 } 1481 if (totactive) { 1482 cv_wait(&pwp->drain_cv, &pwp->lock); 1483 } 1484 /* 1485 * The pwp->blocked may have been reset. e.g a SCSI bus reset 1486 */ 1487 pwp->blocked = 1; 1488 } 1489 1490 for (target = 0; target < pwp->max_dev; target++) { 1491 xp = pwp->targets[target]; 1492 if (xp == NULL) { 1493 continue; 1494 } 1495 mutex_enter(&xp->statlock); 1496 xp->draining = 0; 1497 mutex_exit(&xp->statlock); 1498 } 1499 1500 mutex_exit(&pwp->lock); 1501 if (totactive == 0) { 1502 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, xp, 1503 "%s drain complete", __func__); 1504 } 1505 return (0); 1506 } 1507 1508 static int 1509 pmcs_scsi_unquiesce(dev_info_t *dip) 1510 { 1511 pmcs_hw_t *pwp; 1512 1513 if (ddi_get_soft_state(pmcs_iport_softstate, ddi_get_instance(dip))) 1514 return (0); /* iport */ 1515 1516 pwp = ddi_get_soft_state(pmcs_softc_state, ddi_get_instance(dip)); 1517 if (pwp == NULL) { 1518 return (-1); 1519 } 1520 mutex_enter(&pwp->lock); 1521 if (pwp->state != STATE_RUNNING) { 1522 mutex_exit(&pwp->lock); 1523 return (-1); 1524 } 1525 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, "%s called", __func__); 1526 pwp->blocked = pwp->quiesced = 0; 1527 mutex_exit(&pwp->lock); 1528 1529 /* 1530 * Run all pending commands. 1531 */ 1532 pmcs_scsa_wq_run(pwp); 1533 1534 /* 1535 * Complete all completed commands. 1536 * This also unlocks us. 1537 */ 1538 PMCS_CQ_RUN(pwp); 1539 return (0); 1540 } 1541 1542 /* 1543 * Start commands for a particular device 1544 * If the actual start of a command fails, return B_FALSE. Any other result 1545 * is a B_TRUE return. 1546 */ 1547 boolean_t 1548 pmcs_scsa_wq_run_one(pmcs_hw_t *pwp, pmcs_xscsi_t *xp) 1549 { 1550 pmcs_cmd_t *sp; 1551 pmcs_phy_t *phyp; 1552 pmcwork_t *pwrk; 1553 boolean_t run_one, blocked; 1554 int rval; 1555 1556 /* 1557 * First, check to see if we're blocked or resource limited 1558 */ 1559 mutex_enter(&pwp->lock); 1560 blocked = pwp->blocked; 1561 /* 1562 * If resource_limited is set, we're resource constrained and 1563 * we will run only one work request for this target. 1564 */ 1565 run_one = pwp->resource_limited; 1566 mutex_exit(&pwp->lock); 1567 1568 if (blocked) { 1569 /* Queues will get restarted when we get unblocked */ 1570 return (B_TRUE); 1571 } 1572 1573 /* 1574 * Might as well verify the queue is not empty before moving on 1575 */ 1576 mutex_enter(&xp->wqlock); 1577 if (STAILQ_EMPTY(&xp->wq)) { 1578 mutex_exit(&xp->wqlock); 1579 return (B_TRUE); 1580 } 1581 mutex_exit(&xp->wqlock); 1582 1583 /* 1584 * If we're draining or resetting, just reschedule work queue and bail. 1585 */ 1586 mutex_enter(&xp->statlock); 1587 if (xp->draining || xp->resetting || xp->special_running || 1588 xp->special_needed) { 1589 mutex_exit(&xp->statlock); 1590 SCHEDULE_WORK(pwp, PMCS_WORK_RUN_QUEUES); 1591 return (B_TRUE); 1592 } 1593 1594 /* 1595 * Next, check to see if the target is gone. 1596 */ 1597 if (xp->dev_gone) { 1598 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, xp, 1599 "%s: Flushing wait queue for dead tgt 0x%p", __func__, 1600 (void *)xp); 1601 pmcs_flush_target_queues(pwp, xp, PMCS_TGT_WAIT_QUEUE); 1602 mutex_exit(&xp->statlock); 1603 return (B_TRUE); 1604 } 1605 1606 /* 1607 * Increment the PHY's ref_count now so we know it won't go away 1608 * after we drop the target lock. Drop it before returning. If the 1609 * PHY dies, the commands we attempt to send will fail, but at least 1610 * we know we have a real PHY pointer. 1611 */ 1612 phyp = xp->phy; 1613 pmcs_inc_phy_ref_count(phyp); 1614 mutex_exit(&xp->statlock); 1615 1616 mutex_enter(&xp->wqlock); 1617 while ((sp = STAILQ_FIRST(&xp->wq)) != NULL) { 1618 pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_CBACK, phyp); 1619 if (pwrk == NULL) { 1620 mutex_exit(&xp->wqlock); 1621 mutex_enter(&pwp->lock); 1622 if (pwp->resource_limited == 0) { 1623 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, NULL, 1624 "%s: out of work structures", __func__); 1625 } 1626 pwp->resource_limited = 1; 1627 SCHEDULE_WORK(pwp, PMCS_WORK_RUN_QUEUES); 1628 mutex_exit(&pwp->lock); 1629 return (B_FALSE); 1630 } 1631 STAILQ_REMOVE_HEAD(&xp->wq, cmd_next); 1632 mutex_exit(&xp->wqlock); 1633 1634 pwrk->xp = xp; 1635 pwrk->arg = sp; 1636 pwrk->timer = 0; 1637 sp->cmd_tag = pwrk->htag; 1638 1639 pwrk->dtype = xp->dtype; 1640 1641 if (xp->dtype == SAS) { 1642 pwrk->ptr = (void *) pmcs_SAS_done; 1643 if ((rval = pmcs_SAS_run(sp, pwrk)) != 0) { 1644 if (rval != PMCS_WQ_RUN_FAIL_RES_CMP) { 1645 sp->cmd_tag = NULL; 1646 } 1647 pmcs_dec_phy_ref_count(phyp); 1648 pmcs_pwork(pwp, pwrk); 1649 SCHEDULE_WORK(pwp, PMCS_WORK_RUN_QUEUES); 1650 if (rval == PMCS_WQ_RUN_FAIL_RES) { 1651 return (B_FALSE); 1652 } else { 1653 return (B_TRUE); 1654 } 1655 } 1656 } else { 1657 ASSERT(xp->dtype == SATA); 1658 pwrk->ptr = (void *) pmcs_SATA_done; 1659 if ((rval = pmcs_SATA_run(sp, pwrk)) != 0) { 1660 sp->cmd_tag = NULL; 1661 pmcs_dec_phy_ref_count(phyp); 1662 pmcs_pwork(pwp, pwrk); 1663 SCHEDULE_WORK(pwp, PMCS_WORK_RUN_QUEUES); 1664 if (rval == PMCS_WQ_RUN_FAIL_RES) { 1665 return (B_FALSE); 1666 } else { 1667 return (B_TRUE); 1668 } 1669 } 1670 } 1671 1672 if (run_one) { 1673 goto wq_out; 1674 } 1675 mutex_enter(&xp->wqlock); 1676 } 1677 1678 mutex_exit(&xp->wqlock); 1679 1680 wq_out: 1681 pmcs_dec_phy_ref_count(phyp); 1682 return (B_TRUE); 1683 } 1684 1685 /* 1686 * Start commands for all devices. 1687 */ 1688 void 1689 pmcs_scsa_wq_run(pmcs_hw_t *pwp) 1690 { 1691 pmcs_xscsi_t *xp; 1692 uint16_t target_start, target; 1693 boolean_t rval = B_TRUE; 1694 1695 mutex_enter(&pwp->lock); 1696 target_start = pwp->last_wq_dev; 1697 target = target_start; 1698 1699 do { 1700 xp = pwp->targets[target]; 1701 if ((xp == NULL) || (STAILQ_EMPTY(&xp->wq))) { 1702 if (++target == pwp->max_dev) { 1703 target = 0; 1704 } 1705 continue; 1706 } 1707 1708 mutex_exit(&pwp->lock); 1709 rval = pmcs_scsa_wq_run_one(pwp, xp); 1710 mutex_enter(&pwp->lock); 1711 1712 if (rval == B_FALSE) { 1713 break; 1714 } 1715 1716 if (++target == pwp->max_dev) { 1717 target = 0; 1718 } 1719 } while (target != target_start); 1720 1721 if (rval) { 1722 /* 1723 * If we were resource limited, but apparently are not now, 1724 * reschedule the work queues anyway. 1725 */ 1726 if (pwp->resource_limited) { 1727 SCHEDULE_WORK(pwp, PMCS_WORK_RUN_QUEUES); 1728 } 1729 pwp->resource_limited = 0; /* Not resource-constrained */ 1730 } else { 1731 /* 1732 * Give everybody a chance, and reschedule to run the queues 1733 * again as long as we're limited. 1734 */ 1735 pwp->resource_limited = 1; 1736 SCHEDULE_WORK(pwp, PMCS_WORK_RUN_QUEUES); 1737 } 1738 1739 pwp->last_wq_dev = target; 1740 mutex_exit(&pwp->lock); 1741 } 1742 1743 /* 1744 * Pull the completion queue, drop the lock and complete all elements. 1745 */ 1746 1747 void 1748 pmcs_scsa_cq_run(void *arg) 1749 { 1750 pmcs_cq_thr_info_t *cqti = (pmcs_cq_thr_info_t *)arg; 1751 pmcs_hw_t *pwp = cqti->cq_pwp; 1752 pmcs_cmd_t *sp, *nxt; 1753 struct scsi_pkt *pkt; 1754 pmcs_xscsi_t *tgt; 1755 pmcs_iocomp_cb_t *ioccb, *ioccb_next; 1756 pmcs_cb_t callback; 1757 1758 DTRACE_PROBE1(pmcs__scsa__cq__run__start, pmcs_cq_thr_info_t *, cqti); 1759 1760 mutex_enter(&pwp->cq_lock); 1761 1762 while (!pwp->cq_info.cq_stop) { 1763 /* 1764 * First, check the I/O completion callback queue. 1765 */ 1766 ioccb = pwp->iocomp_cb_head; 1767 pwp->iocomp_cb_head = NULL; 1768 pwp->iocomp_cb_tail = NULL; 1769 mutex_exit(&pwp->cq_lock); 1770 1771 while (ioccb) { 1772 /* 1773 * Grab the lock on the work structure. The callback 1774 * routine is responsible for clearing it. 1775 */ 1776 mutex_enter(&ioccb->pwrk->lock); 1777 ioccb_next = ioccb->next; 1778 callback = (pmcs_cb_t)ioccb->pwrk->ptr; 1779 (*callback)(pwp, ioccb->pwrk, 1780 (uint32_t *)((void *)ioccb->iomb)); 1781 kmem_cache_free(pwp->iocomp_cb_cache, ioccb); 1782 ioccb = ioccb_next; 1783 } 1784 1785 /* 1786 * Next, run the completion queue 1787 */ 1788 mutex_enter(&pwp->cq_lock); 1789 sp = STAILQ_FIRST(&pwp->cq); 1790 STAILQ_INIT(&pwp->cq); 1791 mutex_exit(&pwp->cq_lock); 1792 1793 DTRACE_PROBE1(pmcs__scsa__cq__run__start__loop, 1794 pmcs_cq_thr_info_t *, cqti); 1795 1796 if (sp && pmcs_check_acc_dma_handle(pwp)) { 1797 ddi_fm_service_impact(pwp->dip, DDI_SERVICE_UNAFFECTED); 1798 } 1799 1800 while (sp) { 1801 nxt = STAILQ_NEXT(sp, cmd_next); 1802 pkt = CMD2PKT(sp); 1803 tgt = sp->cmd_target; 1804 pmcs_prt(pwp, PMCS_PRT_DEBUG3, NULL, tgt, 1805 "%s: calling completion on %p for tgt %p", __func__, 1806 (void *)sp, (void *)tgt); 1807 if (tgt) { 1808 mutex_enter(&tgt->statlock); 1809 ASSERT(tgt->actv_pkts != 0); 1810 tgt->actv_pkts--; 1811 mutex_exit(&tgt->statlock); 1812 } 1813 scsi_hba_pkt_comp(pkt); 1814 sp = nxt; 1815 } 1816 1817 DTRACE_PROBE1(pmcs__scsa__cq__run__end__loop, 1818 pmcs_cq_thr_info_t *, cqti); 1819 1820 /* 1821 * Check if there are more completions to do. If so, and we've 1822 * not been told to stop, skip the wait and cycle through again. 1823 */ 1824 mutex_enter(&pwp->cq_lock); 1825 if ((pwp->iocomp_cb_head == NULL) && STAILQ_EMPTY(&pwp->cq) && 1826 !pwp->cq_info.cq_stop) { 1827 mutex_exit(&pwp->cq_lock); 1828 mutex_enter(&cqti->cq_thr_lock); 1829 cv_wait(&cqti->cq_cv, &cqti->cq_thr_lock); 1830 mutex_exit(&cqti->cq_thr_lock); 1831 mutex_enter(&pwp->cq_lock); 1832 } 1833 } 1834 1835 mutex_exit(&pwp->cq_lock); 1836 DTRACE_PROBE1(pmcs__scsa__cq__run__stop, pmcs_cq_thr_info_t *, cqti); 1837 thread_exit(); 1838 } 1839 1840 /* 1841 * Run a SAS command. Called with pwrk->lock held, returns unlocked. 1842 */ 1843 static int 1844 pmcs_SAS_run(pmcs_cmd_t *sp, pmcwork_t *pwrk) 1845 { 1846 pmcs_hw_t *pwp = CMD2PMC(sp); 1847 struct scsi_pkt *pkt = CMD2PKT(sp); 1848 pmcs_xscsi_t *xp = pwrk->xp; 1849 uint32_t iq, lhtag, *ptr; 1850 sas_ssp_cmd_iu_t sc; 1851 int sp_pkt_time = 0; 1852 1853 ASSERT(xp != NULL); 1854 mutex_enter(&xp->statlock); 1855 if (!xp->assigned) { 1856 mutex_exit(&xp->statlock); 1857 return (PMCS_WQ_RUN_FAIL_OTHER); 1858 } 1859 if ((xp->actv_cnt >= xp->qdepth) || xp->recover_wait) { 1860 mutex_exit(&xp->statlock); 1861 mutex_enter(&xp->wqlock); 1862 STAILQ_INSERT_HEAD(&xp->wq, sp, cmd_next); 1863 mutex_exit(&xp->wqlock); 1864 return (PMCS_WQ_RUN_FAIL_OTHER); 1865 } 1866 GET_IO_IQ_ENTRY(pwp, ptr, pwrk->phy->device_id, iq); 1867 if (ptr == NULL) { 1868 mutex_exit(&xp->statlock); 1869 /* 1870 * This is a temporary failure not likely to unblocked by 1871 * commands completing as the test for scheduling the 1872 * restart of work is a per-device test. 1873 */ 1874 mutex_enter(&xp->wqlock); 1875 STAILQ_INSERT_HEAD(&xp->wq, sp, cmd_next); 1876 mutex_exit(&xp->wqlock); 1877 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, xp, 1878 "%s: Failed to get IO IQ entry for tgt %d", 1879 __func__, xp->target_num); 1880 return (PMCS_WQ_RUN_FAIL_RES); 1881 1882 } 1883 1884 ptr[0] = 1885 LE_32(PMCS_IOMB_IN_SAS(PMCS_OQ_IODONE, PMCIN_SSP_INI_IO_START)); 1886 ptr[1] = LE_32(pwrk->htag); 1887 ptr[2] = LE_32(pwrk->phy->device_id); 1888 ptr[3] = LE_32(pkt->pkt_dma_len); 1889 if (ptr[3]) { 1890 ASSERT(pkt->pkt_numcookies); 1891 if (pkt->pkt_dma_flags & DDI_DMA_READ) { 1892 ptr[4] = LE_32(PMCIN_DATADIR_2_INI); 1893 } else { 1894 ptr[4] = LE_32(PMCIN_DATADIR_2_DEV); 1895 } 1896 if (pmcs_dma_load(pwp, sp, ptr)) { 1897 mutex_exit(&pwp->iqp_lock[iq]); 1898 mutex_exit(&xp->statlock); 1899 mutex_enter(&xp->wqlock); 1900 if (STAILQ_EMPTY(&xp->wq)) { 1901 STAILQ_INSERT_HEAD(&xp->wq, sp, cmd_next); 1902 mutex_exit(&xp->wqlock); 1903 return (PMCS_WQ_RUN_FAIL_RES); 1904 } else { 1905 mutex_exit(&xp->wqlock); 1906 CMD2PKT(sp)->pkt_scbp[0] = STATUS_QFULL; 1907 CMD2PKT(sp)->pkt_reason = CMD_CMPLT; 1908 CMD2PKT(sp)->pkt_state |= STATE_GOT_BUS | 1909 STATE_GOT_TARGET | STATE_SENT_CMD | 1910 STATE_GOT_STATUS; 1911 sp->cmd_tag = NULL; 1912 mutex_enter(&pwp->cq_lock); 1913 STAILQ_INSERT_TAIL(&pwp->cq, sp, cmd_next); 1914 PMCS_CQ_RUN_LOCKED(pwp); 1915 mutex_exit(&pwp->cq_lock); 1916 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, xp, 1917 "%s: Failed to dma_load for tgt %d (QF)", 1918 __func__, xp->target_num); 1919 return (PMCS_WQ_RUN_FAIL_RES_CMP); 1920 } 1921 } 1922 } else { 1923 ptr[4] = LE_32(PMCIN_DATADIR_NONE); 1924 CLEAN_MESSAGE(ptr, 12); 1925 } 1926 xp->actv_cnt++; 1927 if (xp->actv_cnt > xp->maxdepth) { 1928 xp->maxdepth = xp->actv_cnt; 1929 pmcs_prt(pwp, PMCS_PRT_DEBUG2, pwrk->phy, xp, "%s: max depth " 1930 "now %u", pwrk->phy->path, xp->maxdepth); 1931 } 1932 mutex_exit(&xp->statlock); 1933 1934 1935 #ifdef DEBUG 1936 /* 1937 * Generate a PMCOUT_STATUS_XFER_CMD_FRAME_ISSUED 1938 * event when this goes out on the wire. 1939 */ 1940 ptr[4] |= PMCIN_MESSAGE_REPORT; 1941 #endif 1942 /* 1943 * Fill in the SSP IU 1944 */ 1945 1946 bzero(&sc, sizeof (sas_ssp_cmd_iu_t)); 1947 bcopy((uint8_t *)&sp->cmd_lun->scsi_lun, sc.lun, sizeof (scsi_lun_t)); 1948 1949 switch (pkt->pkt_flags & FLAG_TAGMASK) { 1950 case FLAG_HTAG: 1951 sc.task_attribute = SAS_CMD_TASK_ATTR_HEAD; 1952 break; 1953 case FLAG_OTAG: 1954 sc.task_attribute = SAS_CMD_TASK_ATTR_ORDERED; 1955 break; 1956 case FLAG_STAG: 1957 default: 1958 sc.task_attribute = SAS_CMD_TASK_ATTR_SIMPLE; 1959 break; 1960 } 1961 (void) memcpy(sc.cdb, pkt->pkt_cdbp, 1962 min(SCSA_CDBLEN(sp), sizeof (sc.cdb))); 1963 (void) memcpy(&ptr[5], &sc, sizeof (sas_ssp_cmd_iu_t)); 1964 pwrk->state = PMCS_WORK_STATE_ONCHIP; 1965 lhtag = pwrk->htag; 1966 mutex_exit(&pwrk->lock); 1967 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, 1968 "%s: giving pkt %p (tag %x) to the hardware", __func__, 1969 (void *)pkt, pwrk->htag); 1970 #ifdef DEBUG 1971 pmcs_print_entry(pwp, PMCS_PRT_DEBUG3, "SAS INI Message", ptr); 1972 #endif 1973 mutex_enter(&xp->aqlock); 1974 STAILQ_INSERT_TAIL(&xp->aq, sp, cmd_next); 1975 mutex_exit(&xp->aqlock); 1976 sp_pkt_time = CMD2PKT(sp)->pkt_time; 1977 INC_IQ_ENTRY(pwp, iq); 1978 mutex_enter(&pwrk->lock); 1979 if (lhtag == pwrk->htag) { 1980 pwrk->timer = US2WT(sp_pkt_time * 1000000); 1981 if (pwrk->timer == 0) { 1982 pwrk->timer = US2WT(1000000); 1983 } 1984 } 1985 mutex_exit(&pwrk->lock); 1986 1987 /* 1988 * If we just submitted the last command queued from device state 1989 * recovery, clear the wq_recovery_tail pointer. 1990 */ 1991 mutex_enter(&xp->wqlock); 1992 if (xp->wq_recovery_tail == sp) { 1993 xp->wq_recovery_tail = NULL; 1994 } 1995 mutex_exit(&xp->wqlock); 1996 1997 return (PMCS_WQ_RUN_SUCCESS); 1998 } 1999 2000 /* 2001 * Complete a SAS command 2002 * 2003 * Called with pwrk lock held. 2004 * The free of pwrk releases the lock. 2005 */ 2006 2007 static void 2008 pmcs_SAS_done(pmcs_hw_t *pwp, pmcwork_t *pwrk, uint32_t *msg) 2009 { 2010 pmcs_cmd_t *sp = pwrk->arg; 2011 pmcs_phy_t *pptr = pwrk->phy; 2012 pmcs_xscsi_t *xp = pwrk->xp; 2013 struct scsi_pkt *pkt = CMD2PKT(sp); 2014 int dead; 2015 uint32_t sts; 2016 boolean_t aborted = B_FALSE; 2017 boolean_t do_ds_recovery = B_FALSE; 2018 2019 ASSERT(xp != NULL); 2020 ASSERT(sp != NULL); 2021 ASSERT(pptr != NULL); 2022 2023 DTRACE_PROBE4(pmcs__io__done, uint64_t, pkt->pkt_dma_len, int, 2024 (pkt->pkt_dma_flags & DDI_DMA_READ) != 0, hrtime_t, pwrk->start, 2025 hrtime_t, gethrtime()); 2026 2027 dead = pwrk->dead; 2028 2029 if (msg) { 2030 sts = LE_32(msg[2]); 2031 } else { 2032 sts = 0; 2033 } 2034 2035 if (dead != 0) { 2036 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, "%s: dead cmd tag " 2037 "0x%x for %s", __func__, pwrk->htag, pptr->path); 2038 goto out; 2039 } 2040 2041 if (sts == PMCOUT_STATUS_ABORTED) { 2042 aborted = B_TRUE; 2043 } 2044 2045 if (pwrk->state == PMCS_WORK_STATE_TIMED_OUT) { 2046 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 2047 "%s: cmd 0x%p (tag 0x%x) timed out for %s", 2048 __func__, (void *)sp, pwrk->htag, pptr->path); 2049 CMD2PKT(sp)->pkt_scbp[0] = STATUS_GOOD; 2050 CMD2PKT(sp)->pkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET | 2051 STATE_SENT_CMD; 2052 CMD2PKT(sp)->pkt_statistics |= STAT_TIMEOUT; 2053 goto out; 2054 } 2055 2056 /* 2057 * If the status isn't okay but not underflow, 2058 * step to the side and parse the (possible) error. 2059 */ 2060 #ifdef DEBUG 2061 if (msg) { 2062 pmcs_print_entry(pwp, PMCS_PRT_DEBUG3, "Outbound Message", msg); 2063 } 2064 #endif 2065 if (!msg) { 2066 goto out; 2067 } 2068 2069 switch (sts) { 2070 case PMCOUT_STATUS_OPEN_CNX_ERROR_IT_NEXUS_LOSS: 2071 case PMCOUT_STATUS_IO_DS_NON_OPERATIONAL: 2072 case PMCOUT_STATUS_IO_DS_IN_RECOVERY: 2073 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 2074 "%s: PHY %s requires DS recovery (status=%d)", 2075 __func__, pptr->path, sts); 2076 do_ds_recovery = B_TRUE; 2077 break; 2078 case PMCOUT_STATUS_UNDERFLOW: 2079 (void) pmcs_set_resid(pkt, pkt->pkt_dma_len, LE_32(msg[3])); 2080 pmcs_prt(pwp, PMCS_PRT_DEBUG_UNDERFLOW, NULL, NULL, 2081 "%s: underflow %u for cdb 0x%x", 2082 __func__, LE_32(msg[3]), pkt->pkt_cdbp[0] & 0xff); 2083 sts = PMCOUT_STATUS_OK; 2084 msg[3] = 0; 2085 break; 2086 case PMCOUT_STATUS_OK: 2087 pkt->pkt_resid = 0; 2088 break; 2089 } 2090 2091 if (sts != PMCOUT_STATUS_OK) { 2092 pmcs_ioerror(pwp, SAS, pwrk, msg, sts); 2093 } else { 2094 if (msg[3]) { 2095 uint8_t local[PMCS_QENTRY_SIZE << 1], *xd; 2096 sas_ssp_rsp_iu_t *rptr = (void *)local; 2097 const int lim = 2098 (PMCS_QENTRY_SIZE << 1) - SAS_RSP_HDR_SIZE; 2099 static const uint8_t ssp_rsp_evec[] = { 2100 0x58, 0x61, 0x56, 0x72, 0x00 2101 }; 2102 2103 /* 2104 * Transform the the first part of the response 2105 * to host canonical form. This gives us enough 2106 * information to figure out what to do with the 2107 * rest (which remains unchanged in the incoming 2108 * message which can be up to two queue entries 2109 * in length). 2110 */ 2111 pmcs_endian_transform(pwp, local, &msg[5], 2112 ssp_rsp_evec); 2113 xd = (uint8_t *)(&msg[5]); 2114 xd += SAS_RSP_HDR_SIZE; 2115 2116 if (rptr->datapres == SAS_RSP_DATAPRES_RESPONSE_DATA) { 2117 if (rptr->response_data_length != 4) { 2118 pmcs_print_entry(pwp, PMCS_PRT_DEBUG, 2119 "Bad SAS RESPONSE DATA LENGTH", 2120 msg); 2121 pkt->pkt_reason = CMD_TRAN_ERR; 2122 goto out; 2123 } 2124 (void) memcpy(&sts, xd, sizeof (uint32_t)); 2125 sts = BE_32(sts); 2126 /* 2127 * The only response code we should legally get 2128 * here is an INVALID FRAME response code. 2129 */ 2130 if (sts == SAS_RSP_INVALID_FRAME) { 2131 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 2132 "%s: pkt %p tgt %u path %s " 2133 "completed: INVALID FRAME response", 2134 __func__, (void *)pkt, 2135 xp->target_num, pptr->path); 2136 } else { 2137 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 2138 "%s: pkt %p tgt %u path %s " 2139 "completed: illegal response 0x%x", 2140 __func__, (void *)pkt, 2141 xp->target_num, pptr->path, sts); 2142 } 2143 pkt->pkt_reason = CMD_TRAN_ERR; 2144 goto out; 2145 } 2146 if (rptr->datapres == SAS_RSP_DATAPRES_SENSE_DATA) { 2147 uint32_t slen; 2148 slen = rptr->sense_data_length; 2149 if (slen > lim) { 2150 slen = lim; 2151 } 2152 pmcs_latch_status(pwp, sp, rptr->status, xd, 2153 slen, pptr->path); 2154 } else if (rptr->datapres == SAS_RSP_DATAPRES_NO_DATA) { 2155 pmcout_ssp_comp_t *sspcp; 2156 sspcp = (pmcout_ssp_comp_t *)msg; 2157 uint32_t *residp; 2158 /* 2159 * This is the case for a plain SCSI status. 2160 * Note: If RESC_V is set and we're here, there 2161 * is a residual. We need to find it and update 2162 * the packet accordingly. 2163 */ 2164 pmcs_latch_status(pwp, sp, rptr->status, NULL, 2165 0, pptr->path); 2166 2167 if (sspcp->resc_v) { 2168 /* 2169 * Point residual to the SSP_RESP_IU 2170 */ 2171 residp = (uint32_t *)(sspcp + 1); 2172 /* 2173 * param contains the number of bytes 2174 * between where the SSP_RESP_IU may 2175 * or may not be and the residual. 2176 * Increment residp by the appropriate 2177 * number of words: (param+resc_pad)/4). 2178 */ 2179 residp += (LE_32(sspcp->param) + 2180 sspcp->resc_pad) / 2181 sizeof (uint32_t); 2182 pmcs_prt(pwp, PMCS_PRT_DEBUG_UNDERFLOW, 2183 pptr, xp, "%s: tgt 0x%p " 2184 "residual %d for pkt 0x%p", 2185 __func__, (void *) xp, *residp, 2186 (void *) pkt); 2187 ASSERT(LE_32(*residp) <= 2188 pkt->pkt_dma_len); 2189 (void) pmcs_set_resid(pkt, 2190 pkt->pkt_dma_len, LE_32(*residp)); 2191 } 2192 } else { 2193 pmcs_print_entry(pwp, PMCS_PRT_DEBUG, 2194 "illegal SAS response", msg); 2195 pkt->pkt_reason = CMD_TRAN_ERR; 2196 goto out; 2197 } 2198 } else { 2199 pmcs_latch_status(pwp, sp, STATUS_GOOD, NULL, 0, 2200 pptr->path); 2201 } 2202 if (pkt->pkt_dma_len) { 2203 pkt->pkt_state |= STATE_XFERRED_DATA; 2204 } 2205 } 2206 pmcs_prt(pwp, PMCS_PRT_DEBUG2, pptr, xp, 2207 "%s: pkt %p tgt %u done reason=%x state=%x resid=%ld status=%x", 2208 __func__, (void *)pkt, xp->target_num, pkt->pkt_reason, 2209 pkt->pkt_state, pkt->pkt_resid, pkt->pkt_scbp[0]); 2210 2211 if (pwrk->state == PMCS_WORK_STATE_ABORTED) { 2212 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 2213 "%s: scsi_pkt 0x%p aborted for PHY %s; work = 0x%p", 2214 __func__, (void *)pkt, pptr->path, (void *)pwrk); 2215 aborted = B_TRUE; 2216 } 2217 2218 out: 2219 pmcs_dma_unload(pwp, sp); 2220 mutex_enter(&xp->statlock); 2221 2222 /* 2223 * If the device no longer has a PHY pointer, clear the PHY pointer 2224 * from the work structure before we free it. Otherwise, pmcs_pwork 2225 * may decrement the ref_count on a PHY that's been freed. 2226 */ 2227 if (xp->phy == NULL) { 2228 pwrk->phy = NULL; 2229 } 2230 2231 /* 2232 * We may arrive here due to a command timing out, which in turn 2233 * could be addressed in a different context. So, free the work 2234 * back, but only after confirming it's not already been freed 2235 * elsewhere. 2236 */ 2237 if (pwrk->htag != PMCS_TAG_FREE) { 2238 pmcs_pwork(pwp, pwrk); 2239 } 2240 2241 /* 2242 * If the device is gone, we only put this command on the completion 2243 * queue if the work structure is not marked dead. If it's marked 2244 * dead, it will already have been put there. 2245 */ 2246 if (xp->dev_gone) { 2247 mutex_exit(&xp->statlock); 2248 if (!dead) { 2249 mutex_enter(&xp->aqlock); 2250 STAILQ_REMOVE(&xp->aq, sp, pmcs_cmd, cmd_next); 2251 mutex_exit(&xp->aqlock); 2252 pmcs_prt(pwp, PMCS_PRT_DEBUG3, pptr, xp, 2253 "%s: Removing cmd 0x%p (htag 0x%x) from aq", 2254 __func__, (void *)sp, sp->cmd_tag); 2255 mutex_enter(&pwp->cq_lock); 2256 STAILQ_INSERT_TAIL(&pwp->cq, sp, cmd_next); 2257 PMCS_CQ_RUN_LOCKED(pwp); 2258 mutex_exit(&pwp->cq_lock); 2259 pmcs_prt(pwp, PMCS_PRT_DEBUG2, pptr, xp, 2260 "%s: Completing command for dead target 0x%p", 2261 __func__, (void *)xp); 2262 } 2263 return; 2264 } 2265 2266 ASSERT(xp->actv_cnt > 0); 2267 if (--(xp->actv_cnt) == 0) { 2268 if (xp->draining) { 2269 pmcs_prt(pwp, PMCS_PRT_DEBUG1, pptr, xp, 2270 "%s: waking up drain waiters", __func__); 2271 cv_signal(&pwp->drain_cv); 2272 } 2273 } 2274 mutex_exit(&xp->statlock); 2275 2276 /* 2277 * If the status is other than OK, determine if it's something that 2278 * is worth re-attempting enumeration. If so, mark the PHY. 2279 */ 2280 if (sts != PMCOUT_STATUS_OK) { 2281 pmcs_status_disposition(pptr, sts); 2282 } 2283 2284 if (dead == 0) { 2285 #ifdef DEBUG 2286 pmcs_cmd_t *wp; 2287 mutex_enter(&xp->aqlock); 2288 STAILQ_FOREACH(wp, &xp->aq, cmd_next) { 2289 if (wp == sp) { 2290 break; 2291 } 2292 } 2293 ASSERT(wp != NULL); 2294 #else 2295 mutex_enter(&xp->aqlock); 2296 #endif 2297 pmcs_prt(pwp, PMCS_PRT_DEBUG3, pptr, xp, 2298 "%s: Removing cmd 0x%p (htag 0x%x) from aq", __func__, 2299 (void *)sp, sp->cmd_tag); 2300 STAILQ_REMOVE(&xp->aq, sp, pmcs_cmd, cmd_next); 2301 if (aborted) { 2302 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 2303 "%s: Aborted cmd for tgt 0x%p, signaling waiters", 2304 __func__, (void *)xp); 2305 cv_signal(&xp->abort_cv); 2306 } 2307 mutex_exit(&xp->aqlock); 2308 } 2309 2310 /* 2311 * If do_ds_recovery is set, we need to initiate device state 2312 * recovery. In this case, we put this I/O back on the head of 2313 * the wait queue to run again after recovery is complete 2314 */ 2315 if (do_ds_recovery) { 2316 mutex_enter(&xp->statlock); 2317 pmcs_start_dev_state_recovery(xp, pptr); 2318 mutex_exit(&xp->statlock); 2319 pmcs_prt(pwp, PMCS_PRT_DEBUG1, pptr, xp, "%s: Putting cmd 0x%p " 2320 "back on wq during recovery for tgt 0x%p", __func__, 2321 (void *)sp, (void *)xp); 2322 mutex_enter(&xp->wqlock); 2323 if (xp->wq_recovery_tail == NULL) { 2324 STAILQ_INSERT_HEAD(&xp->wq, sp, cmd_next); 2325 } else { 2326 /* 2327 * If there are other I/Os waiting at the head due to 2328 * device state recovery, add this one in the right spot 2329 * to maintain proper order. 2330 */ 2331 STAILQ_INSERT_AFTER(&xp->wq, xp->wq_recovery_tail, sp, 2332 cmd_next); 2333 } 2334 xp->wq_recovery_tail = sp; 2335 mutex_exit(&xp->wqlock); 2336 } else { 2337 /* 2338 * If we're not initiating device state recovery and this 2339 * command was not "dead", put it on the completion queue 2340 */ 2341 if (!dead) { 2342 mutex_enter(&pwp->cq_lock); 2343 STAILQ_INSERT_TAIL(&pwp->cq, sp, cmd_next); 2344 PMCS_CQ_RUN_LOCKED(pwp); 2345 mutex_exit(&pwp->cq_lock); 2346 } 2347 } 2348 } 2349 2350 /* 2351 * Run a SATA command (normal reads and writes), 2352 * or block and schedule a SATL interpretation 2353 * of the command. 2354 * 2355 * Called with pwrk lock held, returns unlocked. 2356 */ 2357 2358 static int 2359 pmcs_SATA_run(pmcs_cmd_t *sp, pmcwork_t *pwrk) 2360 { 2361 pmcs_hw_t *pwp = CMD2PMC(sp); 2362 struct scsi_pkt *pkt = CMD2PKT(sp); 2363 pmcs_xscsi_t *xp; 2364 uint8_t cdb_base, asc, tag; 2365 uint32_t *ptr, lhtag, iq, nblk, i, mtype; 2366 fis_t fis; 2367 size_t amt; 2368 uint64_t lba; 2369 int sp_pkt_time = 0; 2370 2371 xp = pwrk->xp; 2372 ASSERT(xp != NULL); 2373 2374 /* 2375 * First, see if this is just a plain read/write command. 2376 * If not, we have to queue it up for processing, block 2377 * any additional commands from coming in, and wake up 2378 * the thread that will process this command. 2379 */ 2380 cdb_base = pkt->pkt_cdbp[0] & 0x1f; 2381 if (cdb_base != SCMD_READ && cdb_base != SCMD_WRITE) { 2382 pmcs_prt(pwp, PMCS_PRT_DEBUG1, NULL, NULL, 2383 "%s: special SATA cmd %p", __func__, (void *)sp); 2384 2385 ASSERT(xp->phy != NULL); 2386 pmcs_pwork(pwp, pwrk); 2387 pmcs_lock_phy(xp->phy); 2388 mutex_enter(&xp->statlock); 2389 xp->special_needed = 1; /* Set the special_needed flag */ 2390 STAILQ_INSERT_TAIL(&xp->sq, sp, cmd_next); 2391 if (pmcs_run_sata_special(pwp, xp)) { 2392 SCHEDULE_WORK(pwp, PMCS_WORK_SATA_RUN); 2393 } 2394 mutex_exit(&xp->statlock); 2395 pmcs_unlock_phy(xp->phy); 2396 2397 return (PMCS_WQ_RUN_SUCCESS); 2398 } 2399 2400 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, "%s: regular cmd", __func__); 2401 2402 mutex_enter(&xp->statlock); 2403 if (!xp->assigned) { 2404 mutex_exit(&xp->statlock); 2405 return (PMCS_WQ_RUN_FAIL_OTHER); 2406 } 2407 if (xp->special_running || xp->special_needed || xp->recover_wait) { 2408 mutex_exit(&xp->statlock); 2409 mutex_enter(&xp->wqlock); 2410 STAILQ_INSERT_HEAD(&xp->wq, sp, cmd_next); 2411 mutex_exit(&xp->wqlock); 2412 /* 2413 * By the time we get here the special 2414 * commands running or waiting to be run 2415 * may have come and gone, so kick our 2416 * worker to run the waiting queues 2417 * just in case. 2418 */ 2419 SCHEDULE_WORK(pwp, PMCS_WORK_RUN_QUEUES); 2420 return (PMCS_WQ_RUN_FAIL_OTHER); 2421 } 2422 lba = xp->capacity; 2423 mutex_exit(&xp->statlock); 2424 2425 /* 2426 * Extract data length and lba parameters out of the command. The 2427 * function pmcs_SATA_rwparm returns a non-zero ASC value if the CDB 2428 * values are considered illegal. 2429 */ 2430 asc = pmcs_SATA_rwparm(pkt->pkt_cdbp, &nblk, &lba, lba); 2431 if (asc) { 2432 uint8_t sns[18]; 2433 bzero(sns, sizeof (sns)); 2434 sns[0] = 0xf0; 2435 sns[2] = 0x5; 2436 sns[12] = asc; 2437 pmcs_latch_status(pwp, sp, STATUS_CHECK, sns, sizeof (sns), 2438 pwrk->phy->path); 2439 pmcs_pwork(pwp, pwrk); 2440 mutex_enter(&pwp->cq_lock); 2441 STAILQ_INSERT_TAIL(&pwp->cq, sp, cmd_next); 2442 PMCS_CQ_RUN_LOCKED(pwp); 2443 mutex_exit(&pwp->cq_lock); 2444 return (PMCS_WQ_RUN_SUCCESS); 2445 } 2446 2447 /* 2448 * If the command decodes as not moving any data, complete it here. 2449 */ 2450 amt = nblk; 2451 amt <<= 9; 2452 amt = pmcs_set_resid(pkt, amt, nblk << 9); 2453 if (amt == 0) { 2454 pmcs_latch_status(pwp, sp, STATUS_GOOD, NULL, 0, 2455 pwrk->phy->path); 2456 pmcs_pwork(pwp, pwrk); 2457 mutex_enter(&pwp->cq_lock); 2458 STAILQ_INSERT_TAIL(&pwp->cq, sp, cmd_next); 2459 PMCS_CQ_RUN_LOCKED(pwp); 2460 mutex_exit(&pwp->cq_lock); 2461 return (PMCS_WQ_RUN_SUCCESS); 2462 } 2463 2464 /* 2465 * Get an inbound queue entry for this I/O 2466 */ 2467 GET_IO_IQ_ENTRY(pwp, ptr, xp->phy->device_id, iq); 2468 if (ptr == NULL) { 2469 /* 2470 * This is a temporary failure not likely to unblocked by 2471 * commands completing as the test for scheduling the 2472 * restart of work is a per-device test. 2473 */ 2474 mutex_enter(&xp->wqlock); 2475 STAILQ_INSERT_HEAD(&xp->wq, sp, cmd_next); 2476 mutex_exit(&xp->wqlock); 2477 pmcs_dma_unload(pwp, sp); 2478 SCHEDULE_WORK(pwp, PMCS_WORK_RUN_QUEUES); 2479 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, xp, 2480 "%s: Failed to get IO IQ entry for tgt %d", 2481 __func__, xp->target_num); 2482 return (PMCS_WQ_RUN_FAIL_RES); 2483 } 2484 2485 /* 2486 * Get a tag. At this point, hold statlock until the tagmap is 2487 * updated (just prior to sending the cmd to the hardware). 2488 */ 2489 mutex_enter(&xp->statlock); 2490 for (tag = 0; tag < xp->qdepth; tag++) { 2491 if ((xp->tagmap & (1 << tag)) == 0) { 2492 break; 2493 } 2494 } 2495 2496 if (tag == xp->qdepth) { 2497 mutex_exit(&xp->statlock); 2498 mutex_exit(&pwp->iqp_lock[iq]); 2499 mutex_enter(&xp->wqlock); 2500 STAILQ_INSERT_HEAD(&xp->wq, sp, cmd_next); 2501 mutex_exit(&xp->wqlock); 2502 return (PMCS_WQ_RUN_FAIL_OTHER); 2503 } 2504 2505 sp->cmd_satltag = (uint8_t)tag; 2506 2507 /* 2508 * Set up the command 2509 */ 2510 bzero(fis, sizeof (fis)); 2511 ptr[0] = 2512 LE_32(PMCS_IOMB_IN_SAS(PMCS_OQ_IODONE, PMCIN_SATA_HOST_IO_START)); 2513 ptr[1] = LE_32(pwrk->htag); 2514 ptr[2] = LE_32(pwrk->phy->device_id); 2515 ptr[3] = LE_32(amt); 2516 2517 if (xp->ncq) { 2518 mtype = SATA_PROTOCOL_FPDMA | (tag << 16); 2519 fis[0] = ((nblk & 0xff) << 24) | (C_BIT << 8) | FIS_REG_H2DEV; 2520 if (cdb_base == SCMD_READ) { 2521 fis[0] |= (READ_FPDMA_QUEUED << 16); 2522 } else { 2523 fis[0] |= (WRITE_FPDMA_QUEUED << 16); 2524 } 2525 fis[1] = (FEATURE_LBA << 24) | (lba & 0xffffff); 2526 fis[2] = ((nblk & 0xff00) << 16) | ((lba >> 24) & 0xffffff); 2527 fis[3] = tag << 3; 2528 } else { 2529 int op; 2530 fis[0] = (C_BIT << 8) | FIS_REG_H2DEV; 2531 if (xp->pio) { 2532 mtype = SATA_PROTOCOL_PIO; 2533 if (cdb_base == SCMD_READ) { 2534 op = READ_SECTORS_EXT; 2535 } else { 2536 op = WRITE_SECTORS_EXT; 2537 } 2538 } else { 2539 mtype = SATA_PROTOCOL_DMA; 2540 if (cdb_base == SCMD_READ) { 2541 op = READ_DMA_EXT; 2542 } else { 2543 op = WRITE_DMA_EXT; 2544 } 2545 } 2546 fis[0] |= (op << 16); 2547 fis[1] = (FEATURE_LBA << 24) | (lba & 0xffffff); 2548 fis[2] = (lba >> 24) & 0xffffff; 2549 fis[3] = nblk; 2550 } 2551 2552 if (cdb_base == SCMD_READ) { 2553 ptr[4] = LE_32(mtype | PMCIN_DATADIR_2_INI); 2554 } else { 2555 ptr[4] = LE_32(mtype | PMCIN_DATADIR_2_DEV); 2556 } 2557 #ifdef DEBUG 2558 /* 2559 * Generate a PMCOUT_STATUS_XFER_CMD_FRAME_ISSUED 2560 * event when this goes out on the wire. 2561 */ 2562 ptr[4] |= PMCIN_MESSAGE_REPORT; 2563 #endif 2564 for (i = 0; i < (sizeof (fis_t))/(sizeof (uint32_t)); i++) { 2565 ptr[i+5] = LE_32(fis[i]); 2566 } 2567 if (pmcs_dma_load(pwp, sp, ptr)) { 2568 mutex_exit(&xp->statlock); 2569 mutex_exit(&pwp->iqp_lock[iq]); 2570 mutex_enter(&xp->wqlock); 2571 STAILQ_INSERT_HEAD(&xp->wq, sp, cmd_next); 2572 mutex_exit(&xp->wqlock); 2573 pmcs_prt(pwp, PMCS_PRT_DEBUG, NULL, xp, 2574 "%s: Failed to dma_load for tgt %d", 2575 __func__, xp->target_num); 2576 return (PMCS_WQ_RUN_FAIL_RES); 2577 2578 } 2579 2580 pwrk->state = PMCS_WORK_STATE_ONCHIP; 2581 lhtag = pwrk->htag; 2582 mutex_exit(&pwrk->lock); 2583 xp->tagmap |= (1 << tag); 2584 xp->actv_cnt++; 2585 if (xp->actv_cnt > xp->maxdepth) { 2586 xp->maxdepth = xp->actv_cnt; 2587 pmcs_prt(pwp, PMCS_PRT_DEBUG2, pwrk->phy, xp, 2588 "%s: max depth now %u", pwrk->phy->path, xp->maxdepth); 2589 } 2590 mutex_exit(&xp->statlock); 2591 mutex_enter(&xp->aqlock); 2592 STAILQ_INSERT_TAIL(&xp->aq, sp, cmd_next); 2593 mutex_exit(&xp->aqlock); 2594 pmcs_prt(pwp, PMCS_PRT_DEBUG2, NULL, NULL, 2595 "%s: giving pkt %p to hardware", __func__, (void *)pkt); 2596 #ifdef DEBUG 2597 pmcs_print_entry(pwp, PMCS_PRT_DEBUG3, "SATA INI Message", ptr); 2598 #endif 2599 sp_pkt_time = CMD2PKT(sp)->pkt_time; 2600 INC_IQ_ENTRY(pwp, iq); 2601 mutex_enter(&pwrk->lock); 2602 if (lhtag == pwrk->htag) { 2603 pwrk->timer = US2WT(sp_pkt_time * 1000000); 2604 if (pwrk->timer == 0) { 2605 pwrk->timer = US2WT(1000000); 2606 } 2607 } 2608 mutex_exit(&pwrk->lock); 2609 2610 return (PMCS_WQ_RUN_SUCCESS); 2611 } 2612 2613 /* 2614 * Complete a SATA command. Called with pwrk lock held. 2615 */ 2616 void 2617 pmcs_SATA_done(pmcs_hw_t *pwp, pmcwork_t *pwrk, uint32_t *msg) 2618 { 2619 pmcs_cmd_t *sp = pwrk->arg; 2620 struct scsi_pkt *pkt = CMD2PKT(sp); 2621 pmcs_phy_t *pptr = pwrk->phy; 2622 int dead; 2623 uint32_t sts; 2624 pmcs_xscsi_t *xp; 2625 boolean_t aborted = B_FALSE; 2626 2627 xp = pwrk->xp; 2628 ASSERT(xp != NULL); 2629 2630 DTRACE_PROBE4(pmcs__io__done, uint64_t, pkt->pkt_dma_len, int, 2631 (pkt->pkt_dma_flags & DDI_DMA_READ) != 0, hrtime_t, pwrk->start, 2632 hrtime_t, gethrtime()); 2633 2634 dead = pwrk->dead; 2635 2636 if (msg) { 2637 sts = LE_32(msg[2]); 2638 } else { 2639 sts = 0; 2640 } 2641 2642 if (dead != 0) { 2643 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, "%s: dead cmd tag " 2644 "0x%x for %s", __func__, pwrk->htag, pptr->path); 2645 goto out; 2646 } 2647 if ((pwrk->state == PMCS_WORK_STATE_TIMED_OUT) && 2648 (sts != PMCOUT_STATUS_ABORTED)) { 2649 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 2650 "%s: cmd 0x%p (tag 0x%x) timed out for %s", 2651 __func__, (void *)sp, pwrk->htag, pptr->path); 2652 CMD2PKT(sp)->pkt_scbp[0] = STATUS_GOOD; 2653 /* pkt_reason already set to CMD_TIMEOUT */ 2654 ASSERT(CMD2PKT(sp)->pkt_reason == CMD_TIMEOUT); 2655 CMD2PKT(sp)->pkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET | 2656 STATE_SENT_CMD; 2657 CMD2PKT(sp)->pkt_statistics |= STAT_TIMEOUT; 2658 goto out; 2659 } 2660 2661 pmcs_prt(pwp, PMCS_PRT_DEBUG2, pptr, xp, "%s: pkt %p tgt %u done", 2662 __func__, (void *)pkt, xp->target_num); 2663 2664 /* 2665 * If the status isn't okay but not underflow, 2666 * step to the side and parse the (possible) error. 2667 */ 2668 #ifdef DEBUG 2669 if (msg) { 2670 pmcs_print_entry(pwp, PMCS_PRT_DEBUG3, "Outbound Message", msg); 2671 } 2672 #endif 2673 if (!msg) { 2674 goto out; 2675 } 2676 2677 /* 2678 * If the status isn't okay or we got a FIS response of some kind, 2679 * step to the side and parse the (possible) error. 2680 */ 2681 if ((sts != PMCOUT_STATUS_OK) || (LE_32(msg[3]) != 0)) { 2682 if (sts == PMCOUT_STATUS_IO_DS_NON_OPERATIONAL) { 2683 mutex_exit(&pwrk->lock); 2684 pmcs_lock_phy(pptr); 2685 mutex_enter(&xp->statlock); 2686 if ((xp->resetting == 0) && (xp->reset_success != 0) && 2687 (xp->reset_wait == 0)) { 2688 mutex_exit(&xp->statlock); 2689 if (pmcs_reset_phy(pwp, pptr, 2690 PMCS_PHYOP_LINK_RESET) != 0) { 2691 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 2692 "%s: PHY (%s) Local Control/Link " 2693 "Reset FAILED as part of error " 2694 "recovery", __func__, pptr->path); 2695 } 2696 mutex_enter(&xp->statlock); 2697 } 2698 mutex_exit(&xp->statlock); 2699 pmcs_unlock_phy(pptr); 2700 mutex_enter(&pwrk->lock); 2701 } 2702 pmcs_ioerror(pwp, SATA, pwrk, msg, sts); 2703 } else { 2704 pmcs_latch_status(pwp, sp, STATUS_GOOD, NULL, 0, 2705 pwrk->phy->path); 2706 pkt->pkt_state |= STATE_XFERRED_DATA; 2707 pkt->pkt_resid = 0; 2708 } 2709 2710 pmcs_prt(pwp, PMCS_PRT_DEBUG2, pptr, xp, 2711 "%s: pkt %p tgt %u done reason=%x state=%x resid=%ld status=%x", 2712 __func__, (void *)pkt, xp->target_num, pkt->pkt_reason, 2713 pkt->pkt_state, pkt->pkt_resid, pkt->pkt_scbp[0]); 2714 2715 if (pwrk->state == PMCS_WORK_STATE_ABORTED) { 2716 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 2717 "%s: scsi_pkt 0x%p aborted for PHY %s; work = 0x%p", 2718 __func__, (void *)pkt, pptr->path, (void *)pwrk); 2719 aborted = B_TRUE; 2720 } 2721 2722 out: 2723 pmcs_dma_unload(pwp, sp); 2724 mutex_enter(&xp->statlock); 2725 xp->tagmap &= ~(1 << sp->cmd_satltag); 2726 2727 /* 2728 * If the device no longer has a PHY pointer, clear the PHY pointer 2729 * from the work structure before we free it. Otherwise, pmcs_pwork 2730 * may decrement the ref_count on a PHY that's been freed. 2731 */ 2732 if (xp->phy == NULL) { 2733 pwrk->phy = NULL; 2734 } 2735 2736 /* 2737 * We may arrive here due to a command timing out, which in turn 2738 * could be addressed in a different context. So, free the work 2739 * back, but only after confirming it's not already been freed 2740 * elsewhere. 2741 */ 2742 if (pwrk->htag != PMCS_TAG_FREE) { 2743 pmcs_pwork(pwp, pwrk); 2744 } 2745 2746 if (xp->dev_gone) { 2747 mutex_exit(&xp->statlock); 2748 if (!dead) { 2749 mutex_enter(&xp->aqlock); 2750 STAILQ_REMOVE(&xp->aq, sp, pmcs_cmd, cmd_next); 2751 mutex_exit(&xp->aqlock); 2752 pmcs_prt(pwp, PMCS_PRT_DEBUG3, pptr, xp, 2753 "%s: Removing cmd 0x%p (htag 0x%x) from aq", 2754 __func__, (void *)sp, sp->cmd_tag); 2755 mutex_enter(&pwp->cq_lock); 2756 STAILQ_INSERT_TAIL(&pwp->cq, sp, cmd_next); 2757 PMCS_CQ_RUN_LOCKED(pwp); 2758 mutex_exit(&pwp->cq_lock); 2759 pmcs_prt(pwp, PMCS_PRT_DEBUG2, pptr, xp, 2760 "%s: Completing command for dead target 0x%p", 2761 __func__, (void *)xp); 2762 } 2763 return; 2764 } 2765 2766 ASSERT(xp->actv_cnt > 0); 2767 if (--(xp->actv_cnt) == 0) { 2768 if (xp->draining) { 2769 pmcs_prt(pwp, PMCS_PRT_DEBUG1, pptr, xp, 2770 "%s: waking up drain waiters", __func__); 2771 cv_signal(&pwp->drain_cv); 2772 } else if (xp->special_needed) { 2773 SCHEDULE_WORK(pwp, PMCS_WORK_SATA_RUN); 2774 } 2775 } 2776 mutex_exit(&xp->statlock); 2777 2778 /* 2779 * If the status is other than OK, determine if it's something that 2780 * is worth re-attempting enumeration. If so, mark the PHY. 2781 */ 2782 if (sts != PMCOUT_STATUS_OK) { 2783 pmcs_status_disposition(pptr, sts); 2784 } 2785 2786 if (dead == 0) { 2787 #ifdef DEBUG 2788 pmcs_cmd_t *wp; 2789 mutex_enter(&xp->aqlock); 2790 STAILQ_FOREACH(wp, &xp->aq, cmd_next) { 2791 if (wp == sp) { 2792 break; 2793 } 2794 } 2795 ASSERT(wp != NULL); 2796 #else 2797 mutex_enter(&xp->aqlock); 2798 #endif 2799 STAILQ_REMOVE(&xp->aq, sp, pmcs_cmd, cmd_next); 2800 if (aborted) { 2801 pmcs_prt(pwp, PMCS_PRT_DEBUG, pptr, xp, 2802 "%s: Aborted cmd for tgt 0x%p, signaling waiters", 2803 __func__, (void *)xp); 2804 cv_signal(&xp->abort_cv); 2805 } 2806 mutex_exit(&xp->aqlock); 2807 mutex_enter(&pwp->cq_lock); 2808 STAILQ_INSERT_TAIL(&pwp->cq, sp, cmd_next); 2809 PMCS_CQ_RUN_LOCKED(pwp); 2810 mutex_exit(&pwp->cq_lock); 2811 } 2812 } 2813 2814 static uint8_t 2815 pmcs_SATA_rwparm(uint8_t *cdb, uint32_t *xfr, uint64_t *lba, uint64_t lbamax) 2816 { 2817 uint8_t asc = 0; 2818 switch (cdb[0]) { 2819 case SCMD_READ_G5: 2820 case SCMD_WRITE_G5: 2821 *xfr = 2822 (((uint32_t)cdb[10]) << 24) | 2823 (((uint32_t)cdb[11]) << 16) | 2824 (((uint32_t)cdb[12]) << 8) | 2825 ((uint32_t)cdb[13]); 2826 *lba = 2827 (((uint64_t)cdb[2]) << 56) | 2828 (((uint64_t)cdb[3]) << 48) | 2829 (((uint64_t)cdb[4]) << 40) | 2830 (((uint64_t)cdb[5]) << 32) | 2831 (((uint64_t)cdb[6]) << 24) | 2832 (((uint64_t)cdb[7]) << 16) | 2833 (((uint64_t)cdb[8]) << 8) | 2834 ((uint64_t)cdb[9]); 2835 /* Check for illegal bits */ 2836 if (cdb[15]) { 2837 asc = 0x24; /* invalid field in cdb */ 2838 } 2839 break; 2840 case SCMD_READ_G4: 2841 case SCMD_WRITE_G4: 2842 *xfr = 2843 (((uint32_t)cdb[6]) << 16) | 2844 (((uint32_t)cdb[7]) << 8) | 2845 ((uint32_t)cdb[8]); 2846 *lba = 2847 (((uint32_t)cdb[2]) << 24) | 2848 (((uint32_t)cdb[3]) << 16) | 2849 (((uint32_t)cdb[4]) << 8) | 2850 ((uint32_t)cdb[5]); 2851 /* Check for illegal bits */ 2852 if (cdb[11]) { 2853 asc = 0x24; /* invalid field in cdb */ 2854 } 2855 break; 2856 case SCMD_READ_G1: 2857 case SCMD_WRITE_G1: 2858 *xfr = (((uint32_t)cdb[7]) << 8) | ((uint32_t)cdb[8]); 2859 *lba = 2860 (((uint32_t)cdb[2]) << 24) | 2861 (((uint32_t)cdb[3]) << 16) | 2862 (((uint32_t)cdb[4]) << 8) | 2863 ((uint32_t)cdb[5]); 2864 /* Check for illegal bits */ 2865 if (cdb[9]) { 2866 asc = 0x24; /* invalid field in cdb */ 2867 } 2868 break; 2869 case SCMD_READ: 2870 case SCMD_WRITE: 2871 *xfr = cdb[4]; 2872 if (*xfr == 0) { 2873 *xfr = 256; 2874 } 2875 *lba = 2876 (((uint32_t)cdb[1] & 0x1f) << 16) | 2877 (((uint32_t)cdb[2]) << 8) | 2878 ((uint32_t)cdb[3]); 2879 /* Check for illegal bits */ 2880 if (cdb[5]) { 2881 asc = 0x24; /* invalid field in cdb */ 2882 } 2883 break; 2884 } 2885 2886 if (asc == 0) { 2887 if ((*lba + *xfr) > lbamax) { 2888 asc = 0x21; /* logical block out of range */ 2889 } 2890 } 2891 return (asc); 2892 } 2893 2894 /* 2895 * Called with pwrk lock held. 2896 */ 2897 static void 2898 pmcs_ioerror(pmcs_hw_t *pwp, pmcs_dtype_t t, pmcwork_t *pwrk, uint32_t *w, 2899 uint32_t status) 2900 { 2901 static uint8_t por[] = { 2902 0xf0, 0x0, 0x6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x28 2903 }; 2904 static uint8_t parity[] = { 2905 0xf0, 0x0, 0xb, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x47, 5 2906 }; 2907 const char *msg; 2908 char buf[20]; 2909 pmcs_cmd_t *sp = pwrk->arg; 2910 pmcs_phy_t *phyp = pwrk->phy; 2911 struct scsi_pkt *pkt = CMD2PKT(sp); 2912 uint32_t resid; 2913 2914 ASSERT(w != NULL); 2915 resid = LE_32(w[3]); 2916 2917 msg = pmcs_status_str(status); 2918 if (msg == NULL) { 2919 (void) snprintf(buf, sizeof (buf), "Error 0x%x", status); 2920 msg = buf; 2921 } 2922 2923 if (status != PMCOUT_STATUS_OK) { 2924 pmcs_prt(pwp, PMCS_PRT_DEBUG1, phyp, NULL, 2925 "%s: device %s tag 0x%x status %s @ %llu", __func__, 2926 phyp->path, pwrk->htag, msg, 2927 (unsigned long long)gethrtime()); 2928 } 2929 2930 pkt->pkt_reason = CMD_CMPLT; /* default reason */ 2931 2932 switch (status) { 2933 case PMCOUT_STATUS_OK: 2934 if (t == SATA) { 2935 int i; 2936 fis_t fis; 2937 for (i = 0; i < sizeof (fis) / sizeof (fis[0]); i++) { 2938 fis[i] = LE_32(w[4+i]); 2939 } 2940 if ((fis[0] & 0xff) != FIS_REG_D2H) { 2941 pmcs_prt(pwp, PMCS_PRT_DEBUG, phyp, NULL, 2942 "unexpected fis code 0x%x", fis[0] & 0xff); 2943 } else { 2944 pmcs_prt(pwp, PMCS_PRT_DEBUG, phyp, NULL, 2945 "FIS ERROR"); 2946 pmcs_fis_dump(pwp, fis); 2947 } 2948 pkt->pkt_reason = CMD_TRAN_ERR; 2949 break; 2950 } 2951 pmcs_latch_status(pwp, sp, STATUS_GOOD, NULL, 0, phyp->path); 2952 break; 2953 2954 case PMCOUT_STATUS_ABORTED: 2955 /* 2956 * Command successfully aborted. 2957 */ 2958 if (phyp->dead) { 2959 pkt->pkt_reason = CMD_DEV_GONE; 2960 pkt->pkt_state = STATE_GOT_BUS; 2961 } else if (pwrk->ssp_event != 0) { 2962 pkt->pkt_reason = CMD_TRAN_ERR; 2963 pkt->pkt_state = STATE_GOT_BUS; 2964 } else if (pwrk->state == PMCS_WORK_STATE_TIMED_OUT) { 2965 pkt->pkt_reason = CMD_TIMEOUT; 2966 pkt->pkt_statistics |= STAT_TIMEOUT; 2967 pkt->pkt_state = STATE_GOT_BUS | STATE_GOT_TARGET | 2968 STATE_SENT_CMD; 2969 } else { 2970 pkt->pkt_reason = CMD_ABORTED; 2971 pkt->pkt_statistics |= STAT_ABORTED; 2972 pkt->pkt_state = STATE_GOT_BUS | STATE_GOT_TARGET | 2973 STATE_SENT_CMD; 2974 } 2975 2976 /* 2977 * PMCS_WORK_STATE_TIMED_OUT doesn't need to be preserved past 2978 * this point, so go ahead and mark it as aborted. 2979 */ 2980 pwrk->state = PMCS_WORK_STATE_ABORTED; 2981 break; 2982 2983 case PMCOUT_STATUS_UNDERFLOW: 2984 /* 2985 * This will only get called for SATA 2986 */ 2987 pkt->pkt_resid = resid; 2988 if (pkt->pkt_dma_len < pkt->pkt_resid) { 2989 (void) pmcs_set_resid(pkt, pkt->pkt_dma_len, resid); 2990 } 2991 pmcs_latch_status(pwp, sp, STATUS_GOOD, NULL, 0, phyp->path); 2992 break; 2993 2994 case PMCOUT_STATUS_NO_DEVICE: 2995 case PMCOUT_STATUS_XFER_ERROR_SATA_LINK_TIMEOUT: 2996 pkt->pkt_reason = CMD_DEV_GONE; 2997 break; 2998 2999 case PMCOUT_STATUS_OPEN_CNX_ERROR_WRONG_DESTINATION: 3000 /* 3001 * Need to do rediscovery. We probably have 3002 * the wrong device (disk swap), so kill 3003 * this one. 3004 */ 3005 case PMCOUT_STATUS_OPEN_CNX_PROTOCOL_NOT_SUPPORTED: 3006 case PMCOUT_STATUS_OPEN_CNX_ERROR_ZONE_VIOLATION: 3007 case PMCOUT_STATUS_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED: 3008 case PMCOUT_STATUS_OPEN_CNX_ERROR_UNKNOWN_ERROR: 3009 /* 3010 * Need to do rediscovery. 3011 */ 3012 if (!phyp->dead) { 3013 mutex_exit(&pwrk->lock); 3014 pmcs_lock_phy(pwrk->phy); 3015 pmcs_kill_changed(pwp, pwrk->phy, 0); 3016 pmcs_unlock_phy(pwrk->phy); 3017 mutex_enter(&pwrk->lock); 3018 pkt->pkt_reason = CMD_INCOMPLETE; 3019 pkt->pkt_state = STATE_GOT_BUS; 3020 } else { 3021 pkt->pkt_reason = CMD_DEV_GONE; 3022 } 3023 break; 3024 3025 case PMCOUT_STATUS_OPEN_CNX_ERROR_BREAK: 3026 case PMCOUT_STATUS_OPEN_CNX_ERROR_IT_NEXUS_LOSS: 3027 case PMCOUT_STATUS_OPENCNX_ERROR_BAD_DESTINATION: 3028 case PMCOUT_STATUS_IO_XFER_ERROR_NAK_RECEIVED: 3029 /* cmd is pending on the target */ 3030 case PMCOUT_STATUS_XFER_ERROR_OFFSET_MISMATCH: 3031 case PMCOUT_STATUS_XFER_ERROR_REJECTED_NCQ_MODE: 3032 /* transitory - commands sent while in NCQ failure mode */ 3033 case PMCOUT_STATUS_XFER_ERROR_ABORTED_NCQ_MODE: 3034 /* NCQ failure */ 3035 case PMCOUT_STATUS_IO_PORT_IN_RESET: 3036 case PMCOUT_STATUS_XFER_ERR_BREAK: 3037 case PMCOUT_STATUS_XFER_ERR_PHY_NOT_READY: 3038 pkt->pkt_reason = CMD_INCOMPLETE; 3039 pkt->pkt_state = STATE_GOT_BUS; 3040 break; 3041 3042 case PMCOUT_STATUS_IO_XFER_OPEN_RETRY_TIMEOUT: 3043 pmcs_prt(pwp, PMCS_PRT_DEBUG, phyp, phyp->target, 3044 "STATUS_BUSY for htag 0x%08x", sp->cmd_tag); 3045 pmcs_latch_status(pwp, sp, STATUS_BUSY, NULL, 0, phyp->path); 3046 break; 3047 3048 case PMCOUT_STATUS_OPEN_CNX_ERROR_STP_RESOURCES_BUSY: 3049 /* synthesize a RESERVATION CONFLICT */ 3050 pmcs_prt(pwp, PMCS_PRT_DEBUG, phyp, phyp->target, 3051 "%s: Potential affiliation active on 0x%" PRIx64, __func__, 3052 pmcs_barray2wwn(phyp->sas_address)); 3053 pmcs_latch_status(pwp, sp, STATUS_RESERVATION_CONFLICT, NULL, 3054 0, phyp->path); 3055 break; 3056 3057 case PMCOUT_STATUS_XFER_ERROR_ABORTED_DUE_TO_SRST: 3058 /* synthesize a power-on/reset */ 3059 pmcs_latch_status(pwp, sp, STATUS_CHECK, por, sizeof (por), 3060 phyp->path); 3061 break; 3062 3063 case PMCOUT_STATUS_XFER_ERROR_UNEXPECTED_PHASE: 3064 case PMCOUT_STATUS_XFER_ERROR_RDY_OVERRUN: 3065 case PMCOUT_STATUS_XFER_ERROR_RDY_NOT_EXPECTED: 3066 case PMCOUT_STATUS_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT: 3067 case PMCOUT_STATUS_XFER_ERROR_CMD_ISSUE_BREAK_BEFORE_ACK_NACK: 3068 case PMCOUT_STATUS_XFER_ERROR_CMD_ISSUE_PHY_DOWN_BEFORE_ACK_NAK: 3069 /* synthesize a PARITY ERROR */ 3070 pmcs_latch_status(pwp, sp, STATUS_CHECK, parity, 3071 sizeof (parity), phyp->path); 3072 break; 3073 3074 case PMCOUT_STATUS_IO_XFER_ERROR_DMA: 3075 case PMCOUT_STATUS_IO_NOT_VALID: 3076 case PMCOUT_STATUS_PROG_ERROR: 3077 case PMCOUT_STATUS_XFER_ERROR_PEER_ABORTED: 3078 case PMCOUT_STATUS_XFER_ERROR_SATA: /* non-NCQ failure */ 3079 default: 3080 pkt->pkt_reason = CMD_TRAN_ERR; 3081 break; 3082 } 3083 } 3084 3085 /* 3086 * Latch up SCSI status 3087 */ 3088 3089 void 3090 pmcs_latch_status(pmcs_hw_t *pwp, pmcs_cmd_t *sp, uint8_t status, 3091 uint8_t *snsp, size_t snslen, char *path) 3092 { 3093 static const char c1[] = 3094 "%s: Status Byte 0x%02x for CDB0=0x%02x (%02x %02x %02x) " 3095 "HTAG 0x%x @ %llu"; 3096 static const char c2[] = 3097 "%s: Status Byte 0x%02x for CDB0=0x%02x HTAG 0x%x @ %llu"; 3098 3099 CMD2PKT(sp)->pkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET | 3100 STATE_SENT_CMD | STATE_GOT_STATUS; 3101 CMD2PKT(sp)->pkt_scbp[0] = status; 3102 3103 if (status == STATUS_CHECK && snsp && 3104 (size_t)SCSA_STSLEN(sp) >= sizeof (struct scsi_arq_status)) { 3105 struct scsi_arq_status *aqp = 3106 (void *) CMD2PKT(sp)->pkt_scbp; 3107 size_t amt = sizeof (struct scsi_extended_sense); 3108 uint8_t key = scsi_sense_key(snsp); 3109 uint8_t asc = scsi_sense_asc(snsp); 3110 uint8_t ascq = scsi_sense_ascq(snsp); 3111 if (amt > snslen) { 3112 amt = snslen; 3113 } 3114 pmcs_prt(pwp, PMCS_PRT_DEBUG_SCSI_STATUS, NULL, NULL, c1, path, 3115 status, CMD2PKT(sp)->pkt_cdbp[0] & 0xff, key, asc, ascq, 3116 sp->cmd_tag, (unsigned long long)gethrtime()); 3117 CMD2PKT(sp)->pkt_state |= STATE_ARQ_DONE; 3118 (*(uint8_t *)&aqp->sts_rqpkt_status) = STATUS_GOOD; 3119 aqp->sts_rqpkt_statistics = 0; 3120 aqp->sts_rqpkt_reason = CMD_CMPLT; 3121 aqp->sts_rqpkt_state = STATE_GOT_BUS | 3122 STATE_GOT_TARGET | STATE_SENT_CMD | 3123 STATE_XFERRED_DATA | STATE_GOT_STATUS; 3124 (void) memcpy(&aqp->sts_sensedata, snsp, amt); 3125 if (aqp->sts_sensedata.es_class != CLASS_EXTENDED_SENSE) { 3126 aqp->sts_rqpkt_reason = CMD_TRAN_ERR; 3127 aqp->sts_rqpkt_state = 0; 3128 aqp->sts_rqpkt_resid = 3129 sizeof (struct scsi_extended_sense); 3130 } else { 3131 aqp->sts_rqpkt_resid = 3132 sizeof (struct scsi_extended_sense) - amt; 3133 } 3134 } else if (status) { 3135 pmcs_prt(pwp, PMCS_PRT_DEBUG_SCSI_STATUS, NULL, NULL, c2, 3136 path, status, CMD2PKT(sp)->pkt_cdbp[0] & 0xff, 3137 sp->cmd_tag, (unsigned long long)gethrtime()); 3138 } 3139 3140 CMD2PKT(sp)->pkt_reason = CMD_CMPLT; 3141 } 3142 3143 /* 3144 * Calculate and set packet residual and return the amount 3145 * left over after applying various filters. 3146 */ 3147 size_t 3148 pmcs_set_resid(struct scsi_pkt *pkt, size_t amt, uint32_t cdbamt) 3149 { 3150 pkt->pkt_resid = cdbamt; 3151 if (amt > pkt->pkt_resid) { 3152 amt = pkt->pkt_resid; 3153 } 3154 if (amt > pkt->pkt_dma_len) { 3155 amt = pkt->pkt_dma_len; 3156 } 3157 return (amt); 3158 } 3159 3160 /* 3161 * Return the existing target softstate (unlocked) if there is one. If so, 3162 * the PHY is locked and that lock must be freed by the caller after the 3163 * target/PHY linkage is established. If there isn't one, and alloc_tgt is 3164 * TRUE, then allocate one. 3165 */ 3166 pmcs_xscsi_t * 3167 pmcs_get_target(pmcs_iport_t *iport, char *tgt_port, boolean_t alloc_tgt) 3168 { 3169 pmcs_hw_t *pwp = iport->pwp; 3170 pmcs_phy_t *phyp; 3171 pmcs_xscsi_t *tgt; 3172 uint64_t wwn; 3173 char unit_address[PMCS_MAX_UA_SIZE]; 3174 int ua_form = 1; 3175 3176 /* 3177 * Find the PHY for this target 3178 */ 3179 phyp = pmcs_find_phy_by_sas_address(pwp, iport, NULL, tgt_port); 3180 if (phyp == NULL) { 3181 pmcs_prt(pwp, PMCS_PRT_DEBUG3, NULL, NULL, 3182 "%s: No PHY for target @ %s", __func__, tgt_port); 3183 return (NULL); 3184 } 3185 3186 tgt = ddi_soft_state_bystr_get(iport->tgt_sstate, tgt_port); 3187 3188 if (tgt) { 3189 mutex_enter(&tgt->statlock); 3190 /* 3191 * There's already a target. Check its PHY pointer to see 3192 * if we need to clear the old linkages 3193 */ 3194 if (tgt->phy && (tgt->phy != phyp)) { 3195 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, phyp, tgt, 3196 "%s: Target PHY updated from %p to %p", __func__, 3197 (void *)tgt->phy, (void *)phyp); 3198 if (!IS_ROOT_PHY(tgt->phy)) { 3199 pmcs_dec_phy_ref_count(tgt->phy); 3200 pmcs_inc_phy_ref_count(phyp); 3201 } 3202 tgt->phy->target = NULL; 3203 } 3204 3205 /* 3206 * If this target has no PHY pointer and alloc_tgt is FALSE, 3207 * that implies we expect the target to already exist. This 3208 * implies that there has already been a tran_tgt_init on at 3209 * least one LU. 3210 */ 3211 if ((tgt->phy == NULL) && !alloc_tgt) { 3212 pmcs_prt(pwp, PMCS_PRT_DEBUG, phyp, tgt, 3213 "%s: Establish linkage from new PHY to old target @" 3214 "%s", __func__, tgt->unit_address); 3215 for (int idx = 0; idx < tgt->ref_count; idx++) { 3216 pmcs_inc_phy_ref_count(phyp); 3217 } 3218 } 3219 3220 /* 3221 * Set this target pointer back up, since it's been 3222 * through pmcs_clear_xp(). 3223 */ 3224 tgt->dev_gone = 0; 3225 tgt->assigned = 1; 3226 tgt->dtype = phyp->dtype; 3227 tgt->dev_state = PMCS_DEVICE_STATE_OPERATIONAL; 3228 tgt->phy = phyp; 3229 phyp->target = tgt; 3230 3231 mutex_exit(&tgt->statlock); 3232 return (tgt); 3233 } 3234 3235 /* 3236 * Make sure the PHY we found is on the correct iport 3237 */ 3238 if (phyp->iport != iport) { 3239 pmcs_prt(pwp, PMCS_PRT_DEBUG, phyp, NULL, 3240 "%s: No target at %s on this iport", __func__, tgt_port); 3241 pmcs_unlock_phy(phyp); 3242 return (NULL); 3243 } 3244 3245 /* 3246 * If this was just a lookup (i.e. alloc_tgt is false), return now. 3247 */ 3248 if (alloc_tgt == B_FALSE) { 3249 pmcs_unlock_phy(phyp); 3250 return (NULL); 3251 } 3252 3253 /* 3254 * Allocate the new softstate 3255 */ 3256 wwn = pmcs_barray2wwn(phyp->sas_address); 3257 (void) scsi_wwn_to_wwnstr(wwn, ua_form, unit_address); 3258 3259 if (ddi_soft_state_bystr_zalloc(iport->tgt_sstate, unit_address) != 3260 DDI_SUCCESS) { 3261 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, phyp, tgt, 3262 "%s: Couldn't alloc softstate for device at %s", 3263 __func__, unit_address); 3264 pmcs_unlock_phy(phyp); 3265 return (NULL); 3266 } 3267 3268 tgt = ddi_soft_state_bystr_get(iport->tgt_sstate, unit_address); 3269 ASSERT(tgt != NULL); 3270 STAILQ_INIT(&tgt->wq); 3271 STAILQ_INIT(&tgt->aq); 3272 STAILQ_INIT(&tgt->sq); 3273 mutex_init(&tgt->statlock, NULL, MUTEX_DRIVER, 3274 DDI_INTR_PRI(pwp->intr_pri)); 3275 mutex_init(&tgt->wqlock, NULL, MUTEX_DRIVER, 3276 DDI_INTR_PRI(pwp->intr_pri)); 3277 mutex_init(&tgt->aqlock, NULL, MUTEX_DRIVER, 3278 DDI_INTR_PRI(pwp->intr_pri)); 3279 cv_init(&tgt->reset_cv, NULL, CV_DRIVER, NULL); 3280 cv_init(&tgt->abort_cv, NULL, CV_DRIVER, NULL); 3281 list_create(&tgt->lun_list, sizeof (pmcs_lun_t), 3282 offsetof(pmcs_lun_t, lun_list_next)); 3283 tgt->qdepth = 1; 3284 tgt->target_num = PMCS_INVALID_TARGET_NUM; 3285 bcopy(unit_address, tgt->unit_address, PMCS_MAX_UA_SIZE); 3286 tgt->pwp = pwp; 3287 tgt->ua = strdup(iport->ua); 3288 tgt->phy = phyp; 3289 ASSERT((phyp->target == NULL) || (phyp->target == tgt)); 3290 if (phyp->target == NULL) { 3291 phyp->target = tgt; 3292 } 3293 3294 /* 3295 * Don't allocate LUN softstate for SMP targets 3296 */ 3297 if (phyp->dtype == EXPANDER) { 3298 return (tgt); 3299 } 3300 3301 if (ddi_soft_state_bystr_init(&tgt->lun_sstate, 3302 sizeof (pmcs_lun_t), PMCS_LUN_SSTATE_SZ) != 0) { 3303 pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, phyp, tgt, 3304 "%s: LUN soft_state_bystr_init failed", __func__); 3305 ddi_soft_state_bystr_free(iport->tgt_sstate, tgt_port); 3306 pmcs_unlock_phy(phyp); 3307 return (NULL); 3308 } 3309 3310 return (tgt); 3311 } 3312