1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Serial Attached SCSI (SAS) class SCSI Host glue. 4 * 5 * Copyright (C) 2005 Adaptec, Inc. All rights reserved. 6 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com> 7 */ 8 9 #include <linux/kthread.h> 10 #include <linux/firmware.h> 11 #include <linux/export.h> 12 #include <linux/ctype.h> 13 #include <linux/hex.h> 14 #include <linux/kernel.h> 15 16 #include "sas_internal.h" 17 18 #include <scsi/scsi_host.h> 19 #include <scsi/scsi_device.h> 20 #include <scsi/scsi_tcq.h> 21 #include <scsi/scsi.h> 22 #include <scsi/scsi_eh.h> 23 #include <scsi/scsi_transport.h> 24 #include <scsi/scsi_transport_sas.h> 25 #include <scsi/sas_ata.h> 26 #include "scsi_sas_internal.h" 27 #include "scsi_transport_api.h" 28 #include "scsi_priv.h" 29 30 #include <linux/err.h> 31 #include <linux/blkdev.h> 32 #include <linux/freezer.h> 33 #include <linux/gfp.h> 34 #include <linux/scatterlist.h> 35 #include <linux/libata.h> 36 37 /* record final status and free the task */ 38 static void sas_end_task(struct scsi_cmnd *sc, struct sas_task *task) 39 { 40 struct task_status_struct *ts = &task->task_status; 41 enum scsi_host_status hs = DID_OK; 42 enum exec_status stat = SAS_SAM_STAT_GOOD; 43 44 if (ts->resp == SAS_TASK_UNDELIVERED) { 45 /* transport error */ 46 hs = DID_NO_CONNECT; 47 } else { /* ts->resp == SAS_TASK_COMPLETE */ 48 /* task delivered, what happened afterwards? */ 49 switch (ts->stat) { 50 case SAS_DEV_NO_RESPONSE: 51 case SAS_INTERRUPTED: 52 case SAS_PHY_DOWN: 53 case SAS_NAK_R_ERR: 54 case SAS_OPEN_TO: 55 hs = DID_NO_CONNECT; 56 break; 57 case SAS_DATA_UNDERRUN: 58 scsi_set_resid(sc, ts->residual); 59 if (scsi_bufflen(sc) - scsi_get_resid(sc) < sc->underflow) 60 hs = DID_ERROR; 61 break; 62 case SAS_DATA_OVERRUN: 63 hs = DID_ERROR; 64 break; 65 case SAS_QUEUE_FULL: 66 hs = DID_SOFT_ERROR; /* retry */ 67 break; 68 case SAS_DEVICE_UNKNOWN: 69 hs = DID_BAD_TARGET; 70 break; 71 case SAS_OPEN_REJECT: 72 if (ts->open_rej_reason == SAS_OREJ_RSVD_RETRY) 73 hs = DID_SOFT_ERROR; /* retry */ 74 else 75 hs = DID_ERROR; 76 break; 77 case SAS_PROTO_RESPONSE: 78 pr_notice("LLDD:%s sent SAS_PROTO_RESP for an SSP task; please report this\n", 79 task->dev->port->ha->sas_ha_name); 80 break; 81 case SAS_ABORTED_TASK: 82 hs = DID_ABORT; 83 break; 84 case SAS_SAM_STAT_CHECK_CONDITION: 85 memcpy(sc->sense_buffer, ts->buf, 86 min(SCSI_SENSE_BUFFERSIZE, ts->buf_valid_size)); 87 stat = SAS_SAM_STAT_CHECK_CONDITION; 88 break; 89 default: 90 stat = ts->stat; 91 break; 92 } 93 } 94 95 sc->result = (hs << 16) | stat; 96 ASSIGN_SAS_TASK(sc, NULL); 97 sas_free_task(task); 98 } 99 100 static void sas_scsi_task_done(struct sas_task *task) 101 { 102 struct scsi_cmnd *sc = task->uldd_task; 103 struct domain_device *dev = task->dev; 104 struct sas_ha_struct *ha = dev->port->ha; 105 unsigned long flags; 106 107 spin_lock_irqsave(&dev->done_lock, flags); 108 if (test_bit(SAS_HA_FROZEN, &ha->state)) 109 task = NULL; 110 else 111 ASSIGN_SAS_TASK(sc, NULL); 112 spin_unlock_irqrestore(&dev->done_lock, flags); 113 114 if (unlikely(!task)) { 115 /* task will be completed by the error handler */ 116 pr_debug("task done but aborted\n"); 117 return; 118 } 119 120 if (unlikely(!sc)) { 121 pr_debug("task_done called with non existing SCSI cmnd!\n"); 122 sas_free_task(task); 123 return; 124 } 125 126 sas_end_task(sc, task); 127 scsi_done(sc); 128 } 129 130 static struct sas_task *sas_create_task(struct scsi_cmnd *cmd, 131 struct domain_device *dev, 132 gfp_t gfp_flags) 133 { 134 struct sas_task *task = sas_alloc_task(gfp_flags); 135 struct scsi_lun lun; 136 137 if (!task) 138 return NULL; 139 140 task->uldd_task = cmd; 141 ASSIGN_SAS_TASK(cmd, task); 142 143 task->dev = dev; 144 task->task_proto = task->dev->tproto; /* BUG_ON(!SSP) */ 145 146 int_to_scsilun(cmd->device->lun, &lun); 147 memcpy(task->ssp_task.LUN, &lun.scsi_lun, 8); 148 task->ssp_task.task_attr = TASK_ATTR_SIMPLE; 149 task->ssp_task.cmd = cmd; 150 151 task->scatter = scsi_sglist(cmd); 152 task->num_scatter = scsi_sg_count(cmd); 153 task->total_xfer_len = scsi_bufflen(cmd); 154 task->data_dir = cmd->sc_data_direction; 155 156 task->task_done = sas_scsi_task_done; 157 158 return task; 159 } 160 161 int sas_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) 162 { 163 struct sas_internal *i = to_sas_internal(host->transportt); 164 struct domain_device *dev = cmd_to_domain_dev(cmd); 165 struct sas_task *task; 166 int res = 0; 167 168 /* If the device fell off, no sense in issuing commands */ 169 if (test_bit(SAS_DEV_GONE, &dev->state)) { 170 cmd->result = DID_BAD_TARGET << 16; 171 goto out_done; 172 } 173 174 if (dev_is_sata(dev)) { 175 spin_lock_irq(dev->sata_dev.ap->lock); 176 res = ata_sas_queuecmd(cmd, dev->sata_dev.ap); 177 spin_unlock_irq(dev->sata_dev.ap->lock); 178 return res; 179 } 180 181 task = sas_create_task(cmd, dev, GFP_ATOMIC); 182 if (!task) 183 return SCSI_MLQUEUE_HOST_BUSY; 184 185 res = i->dft->lldd_execute_task(task, GFP_ATOMIC); 186 if (res) 187 goto out_free_task; 188 return 0; 189 190 out_free_task: 191 pr_debug("lldd_execute_task returned: %d\n", res); 192 ASSIGN_SAS_TASK(cmd, NULL); 193 sas_free_task(task); 194 if (res == -SAS_QUEUE_FULL) 195 cmd->result = DID_SOFT_ERROR << 16; /* retry */ 196 else 197 cmd->result = DID_ERROR << 16; 198 out_done: 199 scsi_done(cmd); 200 return 0; 201 } 202 EXPORT_SYMBOL_GPL(sas_queuecommand); 203 204 static void sas_eh_finish_cmd(struct scsi_cmnd *cmd) 205 { 206 struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(cmd->device->host); 207 struct domain_device *dev = cmd_to_domain_dev(cmd); 208 struct sas_task *task = TO_SAS_TASK(cmd); 209 210 /* At this point, we only get called following an actual abort 211 * of the task, so we should be guaranteed not to be racing with 212 * any completions from the LLD. Task is freed after this. 213 */ 214 sas_end_task(cmd, task); 215 216 if (dev_is_sata(dev)) { 217 /* defer commands to libata so that libata EH can 218 * handle ata qcs correctly 219 */ 220 list_move_tail(&cmd->eh_entry, &sas_ha->eh_ata_q); 221 return; 222 } 223 224 /* now finish the command and move it on to the error 225 * handler done list, this also takes it off the 226 * error handler pending list. 227 */ 228 scsi_eh_finish_cmd(cmd, &sas_ha->eh_done_q); 229 } 230 231 static void sas_scsi_clear_queue_lu(struct list_head *error_q, struct scsi_cmnd *my_cmd) 232 { 233 struct scsi_cmnd *cmd, *n; 234 235 list_for_each_entry_safe(cmd, n, error_q, eh_entry) { 236 if (cmd->device->sdev_target == my_cmd->device->sdev_target && 237 cmd->device->lun == my_cmd->device->lun) 238 sas_eh_finish_cmd(cmd); 239 } 240 } 241 242 static void sas_scsi_clear_queue_I_T(struct list_head *error_q, 243 struct domain_device *dev) 244 { 245 struct scsi_cmnd *cmd, *n; 246 247 list_for_each_entry_safe(cmd, n, error_q, eh_entry) { 248 struct domain_device *x = cmd_to_domain_dev(cmd); 249 250 if (x == dev) 251 sas_eh_finish_cmd(cmd); 252 } 253 } 254 255 static void sas_scsi_clear_queue_port(struct list_head *error_q, 256 struct asd_sas_port *port) 257 { 258 struct scsi_cmnd *cmd, *n; 259 260 list_for_each_entry_safe(cmd, n, error_q, eh_entry) { 261 struct domain_device *dev = cmd_to_domain_dev(cmd); 262 struct asd_sas_port *x = dev->port; 263 264 if (x == port) 265 sas_eh_finish_cmd(cmd); 266 } 267 } 268 269 enum task_disposition { 270 TASK_IS_DONE, 271 TASK_IS_ABORTED, 272 TASK_IS_AT_LU, 273 TASK_IS_NOT_AT_LU, 274 TASK_ABORT_FAILED, 275 }; 276 277 static enum task_disposition sas_scsi_find_task(struct sas_task *task) 278 { 279 unsigned long flags; 280 int i, res; 281 struct sas_internal *si = 282 to_sas_internal(task->dev->port->ha->shost->transportt); 283 284 for (i = 0; i < 5; i++) { 285 pr_notice("%s: aborting task 0x%p\n", __func__, task); 286 res = si->dft->lldd_abort_task(task); 287 288 spin_lock_irqsave(&task->task_state_lock, flags); 289 if (task->task_state_flags & SAS_TASK_STATE_DONE) { 290 spin_unlock_irqrestore(&task->task_state_lock, flags); 291 pr_debug("%s: task 0x%p is done\n", __func__, task); 292 return TASK_IS_DONE; 293 } 294 spin_unlock_irqrestore(&task->task_state_lock, flags); 295 296 if (res == TMF_RESP_FUNC_COMPLETE) { 297 pr_notice("%s: task 0x%p is aborted\n", 298 __func__, task); 299 return TASK_IS_ABORTED; 300 } else if (si->dft->lldd_query_task) { 301 pr_notice("%s: querying task 0x%p\n", __func__, task); 302 res = si->dft->lldd_query_task(task); 303 switch (res) { 304 case TMF_RESP_FUNC_SUCC: 305 pr_notice("%s: task 0x%p at LU\n", __func__, 306 task); 307 return TASK_IS_AT_LU; 308 case TMF_RESP_FUNC_COMPLETE: 309 pr_notice("%s: task 0x%p not at LU\n", 310 __func__, task); 311 return TASK_IS_NOT_AT_LU; 312 case TMF_RESP_FUNC_FAILED: 313 pr_notice("%s: task 0x%p failed to abort\n", 314 __func__, task); 315 return TASK_ABORT_FAILED; 316 default: 317 pr_notice("%s: task 0x%p result code %d not handled\n", 318 __func__, task, res); 319 } 320 } 321 } 322 return TASK_ABORT_FAILED; 323 } 324 325 static int sas_recover_lu(struct domain_device *dev, struct scsi_cmnd *cmd) 326 { 327 int res = TMF_RESP_FUNC_FAILED; 328 struct scsi_lun lun; 329 struct sas_internal *i = 330 to_sas_internal(dev->port->ha->shost->transportt); 331 332 int_to_scsilun(cmd->device->lun, &lun); 333 334 pr_notice("eh: device %016llx LUN 0x%llx has the task\n", 335 SAS_ADDR(dev->sas_addr), 336 cmd->device->lun); 337 338 if (i->dft->lldd_abort_task_set) 339 res = i->dft->lldd_abort_task_set(dev, lun.scsi_lun); 340 341 if (res == TMF_RESP_FUNC_FAILED) { 342 if (i->dft->lldd_clear_task_set) 343 res = i->dft->lldd_clear_task_set(dev, lun.scsi_lun); 344 } 345 346 if (res == TMF_RESP_FUNC_FAILED) { 347 if (i->dft->lldd_lu_reset) 348 res = i->dft->lldd_lu_reset(dev, lun.scsi_lun); 349 } 350 351 return res; 352 } 353 354 static int sas_recover_I_T(struct domain_device *dev) 355 { 356 int res = TMF_RESP_FUNC_FAILED; 357 struct sas_internal *i = 358 to_sas_internal(dev->port->ha->shost->transportt); 359 360 pr_notice("I_T nexus reset for dev %016llx\n", 361 SAS_ADDR(dev->sas_addr)); 362 363 if (i->dft->lldd_I_T_nexus_reset) 364 res = i->dft->lldd_I_T_nexus_reset(dev); 365 366 return res; 367 } 368 369 /* take a reference on the last known good phy for this device */ 370 struct sas_phy *sas_get_local_phy(struct domain_device *dev) 371 { 372 struct sas_ha_struct *ha = dev->port->ha; 373 struct sas_phy *phy; 374 unsigned long flags; 375 376 /* a published domain device always has a valid phy, it may be 377 * stale, but it is never NULL 378 */ 379 BUG_ON(!dev->phy); 380 381 spin_lock_irqsave(&ha->phy_port_lock, flags); 382 phy = dev->phy; 383 get_device(&phy->dev); 384 spin_unlock_irqrestore(&ha->phy_port_lock, flags); 385 386 return phy; 387 } 388 EXPORT_SYMBOL_GPL(sas_get_local_phy); 389 390 static int sas_queue_reset(struct domain_device *dev, int reset_type, u64 lun) 391 { 392 struct sas_ha_struct *ha = dev->port->ha; 393 int scheduled = 0, tries = 100; 394 395 /* ata: promote lun reset to bus reset */ 396 if (dev_is_sata(dev)) { 397 sas_ata_schedule_reset(dev); 398 return SUCCESS; 399 } 400 401 while (!scheduled && tries--) { 402 spin_lock_irq(&ha->lock); 403 if (!test_bit(SAS_DEV_EH_PENDING, &dev->state) && 404 !test_bit(reset_type, &dev->state)) { 405 scheduled = 1; 406 ha->eh_active++; 407 list_add_tail(&dev->ssp_dev.eh_list_node, &ha->eh_dev_q); 408 set_bit(SAS_DEV_EH_PENDING, &dev->state); 409 set_bit(reset_type, &dev->state); 410 int_to_scsilun(lun, &dev->ssp_dev.reset_lun); 411 scsi_schedule_eh(ha->shost); 412 } 413 spin_unlock_irq(&ha->lock); 414 415 if (scheduled) 416 return SUCCESS; 417 } 418 419 pr_warn("%s reset of %s failed\n", 420 reset_type == SAS_DEV_LU_RESET ? "LUN" : "Bus", 421 dev_name(&dev->rphy->dev)); 422 423 return FAILED; 424 } 425 426 int sas_eh_abort_handler(struct scsi_cmnd *cmd) 427 { 428 int res = TMF_RESP_FUNC_FAILED; 429 struct sas_task *task = TO_SAS_TASK(cmd); 430 struct Scsi_Host *host = cmd->device->host; 431 struct domain_device *dev = cmd_to_domain_dev(cmd); 432 struct sas_internal *i = to_sas_internal(host->transportt); 433 unsigned long flags; 434 435 if (!i->dft->lldd_abort_task) 436 return FAILED; 437 438 spin_lock_irqsave(host->host_lock, flags); 439 /* We cannot do async aborts for SATA devices */ 440 if (dev_is_sata(dev) && !host->host_eh_scheduled) { 441 spin_unlock_irqrestore(host->host_lock, flags); 442 return FAILED; 443 } 444 spin_unlock_irqrestore(host->host_lock, flags); 445 446 if (task) 447 res = i->dft->lldd_abort_task(task); 448 else 449 pr_notice("no task to abort\n"); 450 if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE) 451 return SUCCESS; 452 453 return FAILED; 454 } 455 EXPORT_SYMBOL_GPL(sas_eh_abort_handler); 456 457 /* Attempt to send a LUN reset message to a device */ 458 int sas_eh_device_reset_handler(struct scsi_cmnd *cmd) 459 { 460 int res; 461 struct scsi_lun lun; 462 struct Scsi_Host *host = cmd->device->host; 463 struct domain_device *dev = cmd_to_domain_dev(cmd); 464 struct sas_internal *i = to_sas_internal(host->transportt); 465 466 if (current != host->ehandler) 467 return sas_queue_reset(dev, SAS_DEV_LU_RESET, cmd->device->lun); 468 469 int_to_scsilun(cmd->device->lun, &lun); 470 471 if (!i->dft->lldd_lu_reset) 472 return FAILED; 473 474 res = i->dft->lldd_lu_reset(dev, lun.scsi_lun); 475 if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE) 476 return SUCCESS; 477 478 return FAILED; 479 } 480 EXPORT_SYMBOL_GPL(sas_eh_device_reset_handler); 481 482 int sas_eh_target_reset_handler(struct scsi_cmnd *cmd) 483 { 484 int res; 485 struct Scsi_Host *host = cmd->device->host; 486 struct domain_device *dev = cmd_to_domain_dev(cmd); 487 struct sas_internal *i = to_sas_internal(host->transportt); 488 489 if (current != host->ehandler) 490 return sas_queue_reset(dev, SAS_DEV_RESET, 0); 491 492 if (!i->dft->lldd_I_T_nexus_reset) 493 return FAILED; 494 495 res = i->dft->lldd_I_T_nexus_reset(dev); 496 if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE || 497 res == -ENODEV) 498 return SUCCESS; 499 500 return FAILED; 501 } 502 EXPORT_SYMBOL_GPL(sas_eh_target_reset_handler); 503 504 /* Try to reset a device */ 505 static int try_to_reset_cmd_device(struct scsi_cmnd *cmd) 506 { 507 int res; 508 struct Scsi_Host *shost = cmd->device->host; 509 510 if (!shost->hostt->eh_device_reset_handler) 511 goto try_target_reset; 512 513 res = shost->hostt->eh_device_reset_handler(cmd); 514 if (res == SUCCESS) 515 return res; 516 517 try_target_reset: 518 if (shost->hostt->eh_target_reset_handler) 519 return shost->hostt->eh_target_reset_handler(cmd); 520 521 return FAILED; 522 } 523 524 static void sas_eh_handle_sas_errors(struct Scsi_Host *shost, struct list_head *work_q) 525 { 526 struct scsi_cmnd *cmd, *n; 527 enum task_disposition res = TASK_IS_DONE; 528 int tmf_resp, need_reset; 529 struct sas_internal *i = to_sas_internal(shost->transportt); 530 unsigned long flags; 531 struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost); 532 LIST_HEAD(done); 533 534 /* clean out any commands that won the completion vs eh race */ 535 list_for_each_entry_safe(cmd, n, work_q, eh_entry) { 536 struct domain_device *dev = cmd_to_domain_dev(cmd); 537 struct sas_task *task; 538 539 spin_lock_irqsave(&dev->done_lock, flags); 540 /* by this point the lldd has either observed 541 * SAS_HA_FROZEN and is leaving the task alone, or has 542 * won the race with eh and decided to complete it 543 */ 544 task = TO_SAS_TASK(cmd); 545 spin_unlock_irqrestore(&dev->done_lock, flags); 546 547 if (!task) 548 list_move_tail(&cmd->eh_entry, &done); 549 } 550 551 Again: 552 list_for_each_entry_safe(cmd, n, work_q, eh_entry) { 553 struct sas_task *task = TO_SAS_TASK(cmd); 554 555 list_del_init(&cmd->eh_entry); 556 557 spin_lock_irqsave(&task->task_state_lock, flags); 558 need_reset = task->task_state_flags & SAS_TASK_NEED_DEV_RESET; 559 spin_unlock_irqrestore(&task->task_state_lock, flags); 560 561 if (need_reset) { 562 pr_notice("%s: task 0x%p requests reset\n", 563 __func__, task); 564 goto reset; 565 } 566 567 pr_debug("trying to find task 0x%p\n", task); 568 res = sas_scsi_find_task(task); 569 570 switch (res) { 571 case TASK_IS_DONE: 572 pr_notice("%s: task 0x%p is done\n", __func__, 573 task); 574 sas_eh_finish_cmd(cmd); 575 continue; 576 case TASK_IS_ABORTED: 577 pr_notice("%s: task 0x%p is aborted\n", 578 __func__, task); 579 sas_eh_finish_cmd(cmd); 580 continue; 581 case TASK_IS_AT_LU: 582 pr_info("task 0x%p is at LU: lu recover\n", task); 583 reset: 584 tmf_resp = sas_recover_lu(task->dev, cmd); 585 if (tmf_resp == TMF_RESP_FUNC_COMPLETE) { 586 pr_notice("dev %016llx LU 0x%llx is recovered\n", 587 SAS_ADDR(task->dev), 588 cmd->device->lun); 589 sas_eh_finish_cmd(cmd); 590 sas_scsi_clear_queue_lu(work_q, cmd); 591 goto Again; 592 } 593 fallthrough; 594 case TASK_IS_NOT_AT_LU: 595 case TASK_ABORT_FAILED: 596 pr_notice("task 0x%p is not at LU: I_T recover\n", 597 task); 598 tmf_resp = sas_recover_I_T(task->dev); 599 if (tmf_resp == TMF_RESP_FUNC_COMPLETE || 600 tmf_resp == -ENODEV) { 601 struct domain_device *dev = task->dev; 602 pr_notice("I_T %016llx recovered\n", 603 SAS_ADDR(task->dev->sas_addr)); 604 sas_eh_finish_cmd(cmd); 605 sas_scsi_clear_queue_I_T(work_q, dev); 606 goto Again; 607 } 608 /* Hammer time :-) */ 609 try_to_reset_cmd_device(cmd); 610 if (i->dft->lldd_clear_nexus_port) { 611 struct asd_sas_port *port = task->dev->port; 612 pr_debug("clearing nexus for port:%d\n", 613 port->id); 614 res = i->dft->lldd_clear_nexus_port(port); 615 if (res == TMF_RESP_FUNC_COMPLETE) { 616 pr_notice("clear nexus port:%d succeeded\n", 617 port->id); 618 sas_eh_finish_cmd(cmd); 619 sas_scsi_clear_queue_port(work_q, 620 port); 621 goto Again; 622 } 623 } 624 if (i->dft->lldd_clear_nexus_ha) { 625 pr_debug("clear nexus ha\n"); 626 res = i->dft->lldd_clear_nexus_ha(ha); 627 if (res == TMF_RESP_FUNC_COMPLETE) { 628 pr_notice("clear nexus ha succeeded\n"); 629 sas_eh_finish_cmd(cmd); 630 goto clear_q; 631 } 632 } 633 /* If we are here -- this means that no amount 634 * of effort could recover from errors. Quite 635 * possibly the HA just disappeared. 636 */ 637 pr_err("error from device %016llx, LUN 0x%llx couldn't be recovered in any way\n", 638 SAS_ADDR(task->dev->sas_addr), 639 cmd->device->lun); 640 641 sas_eh_finish_cmd(cmd); 642 goto clear_q; 643 } 644 } 645 out: 646 list_splice_tail(&done, work_q); 647 list_splice_tail_init(&ha->eh_ata_q, work_q); 648 return; 649 650 clear_q: 651 pr_debug("--- Exit %s -- clear_q\n", __func__); 652 list_for_each_entry_safe(cmd, n, work_q, eh_entry) 653 sas_eh_finish_cmd(cmd); 654 goto out; 655 } 656 657 static void sas_eh_handle_resets(struct Scsi_Host *shost) 658 { 659 struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost); 660 struct sas_internal *i = to_sas_internal(shost->transportt); 661 662 /* handle directed resets to sas devices */ 663 spin_lock_irq(&ha->lock); 664 while (!list_empty(&ha->eh_dev_q)) { 665 struct domain_device *dev; 666 struct ssp_device *ssp; 667 668 ssp = list_entry(ha->eh_dev_q.next, typeof(*ssp), eh_list_node); 669 list_del_init(&ssp->eh_list_node); 670 dev = container_of(ssp, typeof(*dev), ssp_dev); 671 kref_get(&dev->kref); 672 WARN_ONCE(dev_is_sata(dev), "ssp reset to ata device?\n"); 673 674 spin_unlock_irq(&ha->lock); 675 676 if (test_and_clear_bit(SAS_DEV_LU_RESET, &dev->state)) 677 i->dft->lldd_lu_reset(dev, ssp->reset_lun.scsi_lun); 678 679 if (test_and_clear_bit(SAS_DEV_RESET, &dev->state)) 680 i->dft->lldd_I_T_nexus_reset(dev); 681 682 sas_put_device(dev); 683 spin_lock_irq(&ha->lock); 684 clear_bit(SAS_DEV_EH_PENDING, &dev->state); 685 ha->eh_active--; 686 } 687 spin_unlock_irq(&ha->lock); 688 } 689 690 691 void sas_scsi_recover_host(struct Scsi_Host *shost) 692 { 693 struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost); 694 LIST_HEAD(eh_work_q); 695 int tries = 0; 696 bool retry; 697 698 retry: 699 tries++; 700 retry = true; 701 spin_lock_irq(shost->host_lock); 702 list_splice_init(&shost->eh_cmd_q, &eh_work_q); 703 spin_unlock_irq(shost->host_lock); 704 705 pr_notice("Enter %s busy: %d failed: %d\n", 706 __func__, scsi_host_busy(shost), shost->host_failed); 707 /* 708 * Deal with commands that still have SAS tasks (i.e. they didn't 709 * complete via the normal sas_task completion mechanism), 710 * SAS_HA_FROZEN gives eh dominion over all sas_task completion. 711 */ 712 set_bit(SAS_HA_FROZEN, &ha->state); 713 sas_eh_handle_sas_errors(shost, &eh_work_q); 714 clear_bit(SAS_HA_FROZEN, &ha->state); 715 if (list_empty(&eh_work_q)) 716 goto out; 717 718 /* 719 * Now deal with SCSI commands that completed ok but have a an error 720 * code (and hopefully sense data) attached. This is roughly what 721 * scsi_unjam_host does, but we skip scsi_eh_abort_cmds because any 722 * command we see here has no sas_task and is thus unknown to the HA. 723 */ 724 sas_ata_eh(shost, &eh_work_q); 725 if (!scsi_eh_get_sense(&eh_work_q, &ha->eh_done_q)) 726 scsi_eh_ready_devs(shost, &eh_work_q, &ha->eh_done_q); 727 728 out: 729 sas_eh_handle_resets(shost); 730 731 /* now link into libata eh --- if we have any ata devices */ 732 sas_ata_strategy_handler(shost); 733 734 scsi_eh_flush_done_q(&ha->eh_done_q); 735 736 /* check if any new eh work was scheduled during the last run */ 737 spin_lock_irq(&ha->lock); 738 if (ha->eh_active == 0) { 739 shost->host_eh_scheduled = 0; 740 retry = false; 741 } 742 spin_unlock_irq(&ha->lock); 743 744 if (retry) 745 goto retry; 746 747 pr_notice("--- Exit %s: busy: %d failed: %d tries: %d\n", 748 __func__, scsi_host_busy(shost), 749 shost->host_failed, tries); 750 } 751 752 int sas_ioctl(struct scsi_device *sdev, unsigned int cmd, void __user *arg) 753 { 754 struct domain_device *dev = sdev_to_domain_dev(sdev); 755 756 if (dev_is_sata(dev)) 757 return ata_sas_scsi_ioctl(dev->sata_dev.ap, sdev, cmd, arg); 758 759 return -EINVAL; 760 } 761 EXPORT_SYMBOL_GPL(sas_ioctl); 762 763 struct domain_device *sas_find_dev_by_rphy(struct sas_rphy *rphy) 764 { 765 struct Scsi_Host *shost = dev_to_shost(rphy->dev.parent); 766 struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost); 767 struct domain_device *found_dev = NULL; 768 int i; 769 unsigned long flags; 770 771 spin_lock_irqsave(&ha->phy_port_lock, flags); 772 for (i = 0; i < ha->num_phys; i++) { 773 struct asd_sas_port *port = ha->sas_port[i]; 774 struct domain_device *dev; 775 776 spin_lock(&port->dev_list_lock); 777 list_for_each_entry(dev, &port->dev_list, dev_list_node) { 778 if (rphy == dev->rphy) { 779 found_dev = dev; 780 spin_unlock(&port->dev_list_lock); 781 goto found; 782 } 783 } 784 spin_unlock(&port->dev_list_lock); 785 } 786 found: 787 spin_unlock_irqrestore(&ha->phy_port_lock, flags); 788 789 return found_dev; 790 } 791 792 int sas_target_alloc(struct scsi_target *starget) 793 { 794 struct sas_rphy *rphy = dev_to_rphy(starget->dev.parent); 795 struct domain_device *found_dev = sas_find_dev_by_rphy(rphy); 796 797 if (!found_dev) 798 return -ENODEV; 799 800 kref_get(&found_dev->kref); 801 starget->hostdata = found_dev; 802 return 0; 803 } 804 EXPORT_SYMBOL_GPL(sas_target_alloc); 805 806 #define SAS_DEF_QD 256 807 808 int sas_sdev_configure(struct scsi_device *scsi_dev, struct queue_limits *lim) 809 { 810 struct domain_device *dev = sdev_to_domain_dev(scsi_dev); 811 812 BUG_ON(dev->rphy->identify.device_type != SAS_END_DEVICE); 813 814 if (dev_is_sata(dev)) { 815 ata_sas_sdev_configure(scsi_dev, lim, dev->sata_dev.ap); 816 return 0; 817 } 818 819 sas_read_port_mode_page(scsi_dev); 820 821 if (scsi_dev->tagged_supported) { 822 scsi_change_queue_depth(scsi_dev, SAS_DEF_QD); 823 } else { 824 pr_notice("device %016llx, LUN 0x%llx doesn't support TCQ\n", 825 SAS_ADDR(dev->sas_addr), scsi_dev->lun); 826 scsi_change_queue_depth(scsi_dev, 1); 827 } 828 829 scsi_dev->allow_restart = 1; 830 831 return 0; 832 } 833 EXPORT_SYMBOL_GPL(sas_sdev_configure); 834 835 int sas_change_queue_depth(struct scsi_device *sdev, int depth) 836 { 837 struct domain_device *dev = sdev_to_domain_dev(sdev); 838 839 if (dev_is_sata(dev)) 840 return ata_change_queue_depth(dev->sata_dev.ap, sdev, depth); 841 842 if (!sdev->tagged_supported) 843 depth = 1; 844 return scsi_change_queue_depth(sdev, depth); 845 } 846 EXPORT_SYMBOL_GPL(sas_change_queue_depth); 847 848 int sas_bios_param(struct scsi_device *scsi_dev, 849 struct gendisk *unused, 850 sector_t capacity, int *hsc) 851 { 852 hsc[0] = 255; 853 hsc[1] = 63; 854 sector_div(capacity, 255*63); 855 hsc[2] = capacity; 856 857 return 0; 858 } 859 EXPORT_SYMBOL_GPL(sas_bios_param); 860 861 void sas_task_internal_done(struct sas_task *task) 862 { 863 timer_delete(&task->slow_task->timer); 864 complete(&task->slow_task->completion); 865 } 866 867 void sas_task_internal_timedout(struct timer_list *t) 868 { 869 struct sas_task_slow *slow = timer_container_of(slow, t, timer); 870 struct sas_task *task = slow->task; 871 bool is_completed = true; 872 unsigned long flags; 873 874 spin_lock_irqsave(&task->task_state_lock, flags); 875 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { 876 task->task_state_flags |= SAS_TASK_STATE_ABORTED; 877 is_completed = false; 878 } 879 spin_unlock_irqrestore(&task->task_state_lock, flags); 880 881 if (!is_completed) 882 complete(&task->slow_task->completion); 883 } 884 885 #define TASK_TIMEOUT (20 * HZ) 886 #define TASK_RETRY 3 887 888 static int sas_execute_internal_abort(struct domain_device *device, 889 enum sas_internal_abort type, u16 tag, 890 unsigned int qid, void *data) 891 { 892 struct sas_ha_struct *ha = device->port->ha; 893 struct sas_internal *i = to_sas_internal(ha->shost->transportt); 894 struct sas_task *task = NULL; 895 int res, retry; 896 897 for (retry = 0; retry < TASK_RETRY; retry++) { 898 task = sas_alloc_slow_task(GFP_KERNEL); 899 if (!task) 900 return -ENOMEM; 901 902 task->dev = device; 903 task->task_proto = SAS_PROTOCOL_INTERNAL_ABORT; 904 task->task_done = sas_task_internal_done; 905 task->slow_task->timer.function = sas_task_internal_timedout; 906 task->slow_task->timer.expires = jiffies + TASK_TIMEOUT; 907 add_timer(&task->slow_task->timer); 908 909 task->abort_task.tag = tag; 910 task->abort_task.type = type; 911 task->abort_task.qid = qid; 912 913 res = i->dft->lldd_execute_task(task, GFP_KERNEL); 914 if (res) { 915 timer_delete_sync(&task->slow_task->timer); 916 pr_err("Executing internal abort failed %016llx (%d)\n", 917 SAS_ADDR(device->sas_addr), res); 918 break; 919 } 920 921 wait_for_completion(&task->slow_task->completion); 922 res = TMF_RESP_FUNC_FAILED; 923 924 /* Even if the internal abort timed out, return direct. */ 925 if (task->task_state_flags & SAS_TASK_STATE_ABORTED) { 926 bool quit = true; 927 928 if (i->dft->lldd_abort_timeout) 929 quit = i->dft->lldd_abort_timeout(task, data); 930 else 931 pr_err("Internal abort: timeout %016llx\n", 932 SAS_ADDR(device->sas_addr)); 933 res = -EIO; 934 if (quit) 935 break; 936 } 937 938 if (task->task_status.resp == SAS_TASK_COMPLETE && 939 task->task_status.stat == SAS_SAM_STAT_GOOD) { 940 res = TMF_RESP_FUNC_COMPLETE; 941 break; 942 } 943 944 if (task->task_status.resp == SAS_TASK_COMPLETE && 945 task->task_status.stat == TMF_RESP_FUNC_SUCC) { 946 res = TMF_RESP_FUNC_SUCC; 947 break; 948 } 949 950 pr_err("Internal abort: task to dev %016llx response: 0x%x status 0x%x\n", 951 SAS_ADDR(device->sas_addr), task->task_status.resp, 952 task->task_status.stat); 953 sas_free_task(task); 954 task = NULL; 955 } 956 BUG_ON(retry == TASK_RETRY && task != NULL); 957 sas_free_task(task); 958 return res; 959 } 960 961 int sas_execute_internal_abort_single(struct domain_device *device, u16 tag, 962 unsigned int qid, void *data) 963 { 964 return sas_execute_internal_abort(device, SAS_INTERNAL_ABORT_SINGLE, 965 tag, qid, data); 966 } 967 EXPORT_SYMBOL_GPL(sas_execute_internal_abort_single); 968 969 int sas_execute_internal_abort_dev(struct domain_device *device, 970 unsigned int qid, void *data) 971 { 972 return sas_execute_internal_abort(device, SAS_INTERNAL_ABORT_DEV, 973 SCSI_NO_TAG, qid, data); 974 } 975 EXPORT_SYMBOL_GPL(sas_execute_internal_abort_dev); 976 977 int sas_execute_tmf(struct domain_device *device, void *parameter, 978 int para_len, int force_phy_id, 979 struct sas_tmf_task *tmf) 980 { 981 struct sas_task *task; 982 struct sas_internal *i = 983 to_sas_internal(device->port->ha->shost->transportt); 984 int res, retry; 985 986 for (retry = 0; retry < TASK_RETRY; retry++) { 987 task = sas_alloc_slow_task(GFP_KERNEL); 988 if (!task) 989 return -ENOMEM; 990 991 task->dev = device; 992 task->task_proto = device->tproto; 993 994 if (dev_is_sata(device)) { 995 task->ata_task.device_control_reg_update = 1; 996 if (force_phy_id >= 0) { 997 task->ata_task.force_phy = true; 998 task->ata_task.force_phy_id = force_phy_id; 999 } 1000 memcpy(&task->ata_task.fis, parameter, para_len); 1001 } else { 1002 memcpy(&task->ssp_task, parameter, para_len); 1003 } 1004 1005 task->task_done = sas_task_internal_done; 1006 task->tmf = tmf; 1007 1008 task->slow_task->timer.function = sas_task_internal_timedout; 1009 task->slow_task->timer.expires = jiffies + TASK_TIMEOUT; 1010 add_timer(&task->slow_task->timer); 1011 1012 res = i->dft->lldd_execute_task(task, GFP_KERNEL); 1013 if (res) { 1014 timer_delete_sync(&task->slow_task->timer); 1015 pr_err("executing TMF task failed %016llx (%d)\n", 1016 SAS_ADDR(device->sas_addr), res); 1017 break; 1018 } 1019 1020 wait_for_completion(&task->slow_task->completion); 1021 1022 if (i->dft->lldd_tmf_exec_complete) 1023 i->dft->lldd_tmf_exec_complete(device); 1024 1025 res = TMF_RESP_FUNC_FAILED; 1026 1027 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { 1028 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { 1029 pr_err("TMF task timeout for %016llx and not done\n", 1030 SAS_ADDR(device->sas_addr)); 1031 if (i->dft->lldd_tmf_aborted) 1032 i->dft->lldd_tmf_aborted(task); 1033 break; 1034 } 1035 pr_warn("TMF task timeout for %016llx and done\n", 1036 SAS_ADDR(device->sas_addr)); 1037 } 1038 1039 if (task->task_status.resp == SAS_TASK_COMPLETE && 1040 task->task_status.stat == TMF_RESP_FUNC_COMPLETE) { 1041 res = TMF_RESP_FUNC_COMPLETE; 1042 break; 1043 } 1044 1045 if (task->task_status.resp == SAS_TASK_COMPLETE && 1046 task->task_status.stat == TMF_RESP_FUNC_SUCC) { 1047 res = TMF_RESP_FUNC_SUCC; 1048 break; 1049 } 1050 1051 if (task->task_status.resp == SAS_TASK_COMPLETE && 1052 task->task_status.stat == SAS_DATA_UNDERRUN) { 1053 /* no error, but return the number of bytes of 1054 * underrun 1055 */ 1056 pr_warn("TMF task to dev %016llx resp: 0x%x sts 0x%x underrun\n", 1057 SAS_ADDR(device->sas_addr), 1058 task->task_status.resp, 1059 task->task_status.stat); 1060 res = task->task_status.residual; 1061 break; 1062 } 1063 1064 if (task->task_status.resp == SAS_TASK_COMPLETE && 1065 task->task_status.stat == SAS_DATA_OVERRUN) { 1066 pr_warn("TMF task blocked task error %016llx\n", 1067 SAS_ADDR(device->sas_addr)); 1068 res = -EMSGSIZE; 1069 break; 1070 } 1071 1072 if (task->task_status.resp == SAS_TASK_COMPLETE && 1073 task->task_status.stat == SAS_OPEN_REJECT) { 1074 pr_warn("TMF task open reject failed %016llx\n", 1075 SAS_ADDR(device->sas_addr)); 1076 res = -EIO; 1077 } else { 1078 pr_warn("TMF task to dev %016llx resp: 0x%x status 0x%x\n", 1079 SAS_ADDR(device->sas_addr), 1080 task->task_status.resp, 1081 task->task_status.stat); 1082 } 1083 sas_free_task(task); 1084 task = NULL; 1085 } 1086 1087 if (retry == TASK_RETRY) 1088 pr_warn("executing TMF for %016llx failed after %d attempts!\n", 1089 SAS_ADDR(device->sas_addr), TASK_RETRY); 1090 sas_free_task(task); 1091 1092 return res; 1093 } 1094 1095 static int sas_execute_ssp_tmf(struct domain_device *device, u8 *lun, 1096 struct sas_tmf_task *tmf) 1097 { 1098 struct sas_ssp_task ssp_task; 1099 1100 if (!(device->tproto & SAS_PROTOCOL_SSP)) 1101 return TMF_RESP_FUNC_ESUPP; 1102 1103 memcpy(ssp_task.LUN, lun, 8); 1104 1105 return sas_execute_tmf(device, &ssp_task, sizeof(ssp_task), -1, tmf); 1106 } 1107 1108 int sas_abort_task_set(struct domain_device *dev, u8 *lun) 1109 { 1110 struct sas_tmf_task tmf_task = { 1111 .tmf = TMF_ABORT_TASK_SET, 1112 }; 1113 1114 return sas_execute_ssp_tmf(dev, lun, &tmf_task); 1115 } 1116 EXPORT_SYMBOL_GPL(sas_abort_task_set); 1117 1118 int sas_clear_task_set(struct domain_device *dev, u8 *lun) 1119 { 1120 struct sas_tmf_task tmf_task = { 1121 .tmf = TMF_CLEAR_TASK_SET, 1122 }; 1123 1124 return sas_execute_ssp_tmf(dev, lun, &tmf_task); 1125 } 1126 EXPORT_SYMBOL_GPL(sas_clear_task_set); 1127 1128 int sas_lu_reset(struct domain_device *dev, u8 *lun) 1129 { 1130 struct sas_tmf_task tmf_task = { 1131 .tmf = TMF_LU_RESET, 1132 }; 1133 1134 return sas_execute_ssp_tmf(dev, lun, &tmf_task); 1135 } 1136 EXPORT_SYMBOL_GPL(sas_lu_reset); 1137 1138 int sas_query_task(struct sas_task *task, u16 tag) 1139 { 1140 struct sas_tmf_task tmf_task = { 1141 .tmf = TMF_QUERY_TASK, 1142 .tag_of_task_to_be_managed = tag, 1143 }; 1144 struct scsi_cmnd *cmnd = task->uldd_task; 1145 struct domain_device *dev = task->dev; 1146 struct scsi_lun lun; 1147 1148 int_to_scsilun(cmnd->device->lun, &lun); 1149 1150 return sas_execute_ssp_tmf(dev, lun.scsi_lun, &tmf_task); 1151 } 1152 EXPORT_SYMBOL_GPL(sas_query_task); 1153 1154 int sas_abort_task(struct sas_task *task, u16 tag) 1155 { 1156 struct sas_tmf_task tmf_task = { 1157 .tmf = TMF_ABORT_TASK, 1158 .tag_of_task_to_be_managed = tag, 1159 }; 1160 struct scsi_cmnd *cmnd = task->uldd_task; 1161 struct domain_device *dev = task->dev; 1162 struct scsi_lun lun; 1163 1164 int_to_scsilun(cmnd->device->lun, &lun); 1165 1166 return sas_execute_ssp_tmf(dev, lun.scsi_lun, &tmf_task); 1167 } 1168 EXPORT_SYMBOL_GPL(sas_abort_task); 1169 1170 /* 1171 * Tell an upper layer that it needs to initiate an abort for a given task. 1172 * This should only ever be called by an LLDD. 1173 */ 1174 void sas_task_abort(struct sas_task *task) 1175 { 1176 struct scsi_cmnd *sc = task->uldd_task; 1177 1178 /* Escape for libsas internal commands */ 1179 if (!sc) { 1180 struct sas_task_slow *slow = task->slow_task; 1181 1182 if (!slow) 1183 return; 1184 if (!timer_delete(&slow->timer)) 1185 return; 1186 slow->timer.function(&slow->timer); 1187 return; 1188 } 1189 1190 if (dev_is_sata(task->dev)) 1191 sas_ata_task_abort(task); 1192 else 1193 blk_abort_request(scsi_cmd_to_rq(sc)); 1194 } 1195 EXPORT_SYMBOL_GPL(sas_task_abort); 1196 1197 int sas_sdev_init(struct scsi_device *sdev) 1198 { 1199 if (dev_is_sata(sdev_to_domain_dev(sdev)) && sdev->lun) 1200 return -ENXIO; 1201 1202 return 0; 1203 } 1204 EXPORT_SYMBOL_GPL(sas_sdev_init); 1205 1206 void sas_target_destroy(struct scsi_target *starget) 1207 { 1208 struct domain_device *found_dev = starget->hostdata; 1209 1210 if (!found_dev) 1211 return; 1212 1213 starget->hostdata = NULL; 1214 sas_put_device(found_dev); 1215 } 1216 EXPORT_SYMBOL_GPL(sas_target_destroy); 1217 1218 #define SAS_STRING_ADDR_SIZE 16 1219 1220 int sas_request_addr(struct Scsi_Host *shost, u8 *addr) 1221 { 1222 int res; 1223 const struct firmware *fw; 1224 1225 res = request_firmware(&fw, "sas_addr", &shost->shost_gendev); 1226 if (res) 1227 return res; 1228 1229 if (fw->size < SAS_STRING_ADDR_SIZE) { 1230 res = -ENODEV; 1231 goto out; 1232 } 1233 1234 res = hex2bin(addr, fw->data, strnlen(fw->data, SAS_ADDR_SIZE * 2) / 2); 1235 if (res) 1236 goto out; 1237 1238 out: 1239 release_firmware(fw); 1240 return res; 1241 } 1242 EXPORT_SYMBOL_GPL(sas_request_addr); 1243 1244