1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright (c) 2015 Linaro Ltd. 4 * Copyright (c) 2015 Hisilicon Limited. 5 */ 6 7 #include "hisi_sas.h" 8 #define DRV_NAME "hisi_sas" 9 10 #define DEV_IS_GONE(dev) \ 11 ((!dev) || (dev->dev_type == SAS_PHY_UNUSED)) 12 13 static int hisi_sas_softreset_ata_disk(struct domain_device *device); 14 static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func, 15 void *funcdata); 16 static void hisi_sas_release_task(struct hisi_hba *hisi_hba, 17 struct domain_device *device); 18 static void hisi_sas_dev_gone(struct domain_device *device); 19 20 struct hisi_sas_internal_abort_data { 21 bool rst_ha_timeout; /* reset the HA for timeout */ 22 }; 23 24 u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis, int direction) 25 { 26 switch (fis->command) { 27 case ATA_CMD_FPDMA_WRITE: 28 case ATA_CMD_FPDMA_READ: 29 case ATA_CMD_FPDMA_RECV: 30 case ATA_CMD_FPDMA_SEND: 31 case ATA_CMD_NCQ_NON_DATA: 32 return HISI_SAS_SATA_PROTOCOL_FPDMA; 33 34 case ATA_CMD_DOWNLOAD_MICRO: 35 case ATA_CMD_ID_ATA: 36 case ATA_CMD_PMP_READ: 37 case ATA_CMD_READ_LOG_EXT: 38 case ATA_CMD_PIO_READ: 39 case ATA_CMD_PIO_READ_EXT: 40 case ATA_CMD_PMP_WRITE: 41 case ATA_CMD_WRITE_LOG_EXT: 42 case ATA_CMD_PIO_WRITE: 43 case ATA_CMD_PIO_WRITE_EXT: 44 return HISI_SAS_SATA_PROTOCOL_PIO; 45 46 case ATA_CMD_DSM: 47 case ATA_CMD_DOWNLOAD_MICRO_DMA: 48 case ATA_CMD_PMP_READ_DMA: 49 case ATA_CMD_PMP_WRITE_DMA: 50 case ATA_CMD_READ: 51 case ATA_CMD_READ_EXT: 52 case ATA_CMD_READ_LOG_DMA_EXT: 53 case ATA_CMD_READ_STREAM_DMA_EXT: 54 case ATA_CMD_TRUSTED_RCV_DMA: 55 case ATA_CMD_TRUSTED_SND_DMA: 56 case ATA_CMD_WRITE: 57 case ATA_CMD_WRITE_EXT: 58 case ATA_CMD_WRITE_FUA_EXT: 59 case ATA_CMD_WRITE_QUEUED: 60 case ATA_CMD_WRITE_LOG_DMA_EXT: 61 case ATA_CMD_WRITE_STREAM_DMA_EXT: 62 case ATA_CMD_ZAC_MGMT_IN: 63 return HISI_SAS_SATA_PROTOCOL_DMA; 64 65 case ATA_CMD_CHK_POWER: 66 case ATA_CMD_DEV_RESET: 67 case ATA_CMD_EDD: 68 case ATA_CMD_FLUSH: 69 case ATA_CMD_FLUSH_EXT: 70 case ATA_CMD_VERIFY: 71 case ATA_CMD_VERIFY_EXT: 72 case ATA_CMD_SET_FEATURES: 73 case ATA_CMD_STANDBY: 74 case ATA_CMD_STANDBYNOW1: 75 case ATA_CMD_ZAC_MGMT_OUT: 76 return HISI_SAS_SATA_PROTOCOL_NONDATA; 77 78 case ATA_CMD_SET_MAX: 79 switch (fis->features) { 80 case ATA_SET_MAX_PASSWD: 81 case ATA_SET_MAX_LOCK: 82 return HISI_SAS_SATA_PROTOCOL_PIO; 83 84 case ATA_SET_MAX_PASSWD_DMA: 85 case ATA_SET_MAX_UNLOCK_DMA: 86 return HISI_SAS_SATA_PROTOCOL_DMA; 87 88 default: 89 return HISI_SAS_SATA_PROTOCOL_NONDATA; 90 } 91 92 default: 93 { 94 if (direction == DMA_NONE) 95 return HISI_SAS_SATA_PROTOCOL_NONDATA; 96 return HISI_SAS_SATA_PROTOCOL_PIO; 97 } 98 } 99 } 100 EXPORT_SYMBOL_GPL(hisi_sas_get_ata_protocol); 101 102 void hisi_sas_sata_done(struct sas_task *task, 103 struct hisi_sas_slot *slot) 104 { 105 struct task_status_struct *ts = &task->task_status; 106 struct ata_task_resp *resp = (struct ata_task_resp *)ts->buf; 107 struct hisi_sas_status_buffer *status_buf = 108 hisi_sas_status_buf_addr_mem(slot); 109 u8 *iu = &status_buf->iu[0]; 110 struct dev_to_host_fis *d2h = (struct dev_to_host_fis *)iu; 111 112 resp->frame_len = sizeof(struct dev_to_host_fis); 113 memcpy(&resp->ending_fis[0], d2h, sizeof(struct dev_to_host_fis)); 114 115 ts->buf_valid_size = sizeof(*resp); 116 } 117 EXPORT_SYMBOL_GPL(hisi_sas_sata_done); 118 119 /* 120 * This function assumes linkrate mask fits in 8 bits, which it 121 * does for all HW versions supported. 122 */ 123 u8 hisi_sas_get_prog_phy_linkrate_mask(enum sas_linkrate max) 124 { 125 u8 rate = 0; 126 int i; 127 128 max -= SAS_LINK_RATE_1_5_GBPS; 129 for (i = 0; i <= max; i++) 130 rate |= 1 << (i * 2); 131 return rate; 132 } 133 EXPORT_SYMBOL_GPL(hisi_sas_get_prog_phy_linkrate_mask); 134 135 static struct hisi_hba *dev_to_hisi_hba(struct domain_device *device) 136 { 137 return device->port->ha->lldd_ha; 138 } 139 140 struct hisi_sas_port *to_hisi_sas_port(struct asd_sas_port *sas_port) 141 { 142 return container_of(sas_port, struct hisi_sas_port, sas_port); 143 } 144 EXPORT_SYMBOL_GPL(to_hisi_sas_port); 145 146 void hisi_sas_stop_phys(struct hisi_hba *hisi_hba) 147 { 148 int phy_no; 149 150 for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) 151 hisi_sas_phy_enable(hisi_hba, phy_no, 0); 152 } 153 EXPORT_SYMBOL_GPL(hisi_sas_stop_phys); 154 155 static void hisi_sas_slot_index_clear(struct hisi_hba *hisi_hba, int slot_idx) 156 { 157 void *bitmap = hisi_hba->slot_index_tags; 158 159 __clear_bit(slot_idx, bitmap); 160 } 161 162 static void hisi_sas_slot_index_free(struct hisi_hba *hisi_hba, int slot_idx) 163 { 164 if (hisi_hba->hw->slot_index_alloc || 165 slot_idx < HISI_SAS_RESERVED_IPTT) { 166 spin_lock(&hisi_hba->lock); 167 hisi_sas_slot_index_clear(hisi_hba, slot_idx); 168 spin_unlock(&hisi_hba->lock); 169 } 170 } 171 172 static void hisi_sas_slot_index_set(struct hisi_hba *hisi_hba, int slot_idx) 173 { 174 void *bitmap = hisi_hba->slot_index_tags; 175 176 __set_bit(slot_idx, bitmap); 177 } 178 179 static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba, 180 struct request *rq) 181 { 182 int index; 183 void *bitmap = hisi_hba->slot_index_tags; 184 185 if (rq) 186 return rq->tag + HISI_SAS_RESERVED_IPTT; 187 188 spin_lock(&hisi_hba->lock); 189 index = find_next_zero_bit(bitmap, HISI_SAS_RESERVED_IPTT, 190 hisi_hba->last_slot_index + 1); 191 if (index >= HISI_SAS_RESERVED_IPTT) { 192 index = find_next_zero_bit(bitmap, 193 HISI_SAS_RESERVED_IPTT, 194 0); 195 if (index >= HISI_SAS_RESERVED_IPTT) { 196 spin_unlock(&hisi_hba->lock); 197 return -SAS_QUEUE_FULL; 198 } 199 } 200 hisi_sas_slot_index_set(hisi_hba, index); 201 hisi_hba->last_slot_index = index; 202 spin_unlock(&hisi_hba->lock); 203 204 return index; 205 } 206 207 void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task, 208 struct hisi_sas_slot *slot, bool need_lock) 209 { 210 int device_id = slot->device_id; 211 struct hisi_sas_device *sas_dev = &hisi_hba->devices[device_id]; 212 213 if (task) { 214 struct device *dev = hisi_hba->dev; 215 216 if (!task->lldd_task) 217 return; 218 219 task->lldd_task = NULL; 220 221 if (!sas_protocol_ata(task->task_proto)) { 222 if (slot->n_elem) { 223 if (task->task_proto & SAS_PROTOCOL_SSP) 224 dma_unmap_sg(dev, task->scatter, 225 task->num_scatter, 226 task->data_dir); 227 else 228 dma_unmap_sg(dev, &task->smp_task.smp_req, 229 1, DMA_TO_DEVICE); 230 } 231 if (slot->n_elem_dif) { 232 struct sas_ssp_task *ssp_task = &task->ssp_task; 233 struct scsi_cmnd *scsi_cmnd = ssp_task->cmd; 234 235 dma_unmap_sg(dev, scsi_prot_sglist(scsi_cmnd), 236 scsi_prot_sg_count(scsi_cmnd), 237 task->data_dir); 238 } 239 } 240 } 241 242 if (need_lock) { 243 spin_lock(&sas_dev->lock); 244 list_del_init(&slot->entry); 245 spin_unlock(&sas_dev->lock); 246 } else { 247 list_del_init(&slot->entry); 248 } 249 250 memset(slot, 0, offsetof(struct hisi_sas_slot, buf)); 251 252 hisi_sas_slot_index_free(hisi_hba, slot->idx); 253 } 254 EXPORT_SYMBOL_GPL(hisi_sas_slot_task_free); 255 256 static void hisi_sas_task_prep_smp(struct hisi_hba *hisi_hba, 257 struct hisi_sas_slot *slot) 258 { 259 hisi_hba->hw->prep_smp(hisi_hba, slot); 260 } 261 262 static void hisi_sas_task_prep_ssp(struct hisi_hba *hisi_hba, 263 struct hisi_sas_slot *slot) 264 { 265 hisi_hba->hw->prep_ssp(hisi_hba, slot); 266 } 267 268 static void hisi_sas_task_prep_ata(struct hisi_hba *hisi_hba, 269 struct hisi_sas_slot *slot) 270 { 271 hisi_hba->hw->prep_stp(hisi_hba, slot); 272 } 273 274 static void hisi_sas_task_prep_abort(struct hisi_hba *hisi_hba, 275 struct hisi_sas_slot *slot) 276 { 277 hisi_hba->hw->prep_abort(hisi_hba, slot); 278 } 279 280 static void hisi_sas_dma_unmap(struct hisi_hba *hisi_hba, 281 struct sas_task *task, int n_elem) 282 { 283 struct device *dev = hisi_hba->dev; 284 285 if (!sas_protocol_ata(task->task_proto) && n_elem) { 286 if (task->num_scatter) { 287 dma_unmap_sg(dev, task->scatter, task->num_scatter, 288 task->data_dir); 289 } else if (task->task_proto & SAS_PROTOCOL_SMP) { 290 dma_unmap_sg(dev, &task->smp_task.smp_req, 291 1, DMA_TO_DEVICE); 292 } 293 } 294 } 295 296 static int hisi_sas_dma_map(struct hisi_hba *hisi_hba, 297 struct sas_task *task, int *n_elem) 298 { 299 struct device *dev = hisi_hba->dev; 300 int rc; 301 302 if (sas_protocol_ata(task->task_proto)) { 303 *n_elem = task->num_scatter; 304 } else { 305 unsigned int req_len; 306 307 if (task->num_scatter) { 308 *n_elem = dma_map_sg(dev, task->scatter, 309 task->num_scatter, task->data_dir); 310 if (!*n_elem) { 311 rc = -ENOMEM; 312 goto prep_out; 313 } 314 } else if (task->task_proto & SAS_PROTOCOL_SMP) { 315 *n_elem = dma_map_sg(dev, &task->smp_task.smp_req, 316 1, DMA_TO_DEVICE); 317 if (!*n_elem) { 318 rc = -ENOMEM; 319 goto prep_out; 320 } 321 req_len = sg_dma_len(&task->smp_task.smp_req); 322 if (req_len & 0x3) { 323 rc = -EINVAL; 324 goto err_out_dma_unmap; 325 } 326 } 327 } 328 329 if (*n_elem > HISI_SAS_SGE_PAGE_CNT) { 330 dev_err(dev, "task prep: n_elem(%d) > HISI_SAS_SGE_PAGE_CNT\n", 331 *n_elem); 332 rc = -EINVAL; 333 goto err_out_dma_unmap; 334 } 335 return 0; 336 337 err_out_dma_unmap: 338 /* It would be better to call dma_unmap_sg() here, but it's messy */ 339 hisi_sas_dma_unmap(hisi_hba, task, *n_elem); 340 prep_out: 341 return rc; 342 } 343 344 static void hisi_sas_dif_dma_unmap(struct hisi_hba *hisi_hba, 345 struct sas_task *task, int n_elem_dif) 346 { 347 struct device *dev = hisi_hba->dev; 348 349 if (n_elem_dif) { 350 struct sas_ssp_task *ssp_task = &task->ssp_task; 351 struct scsi_cmnd *scsi_cmnd = ssp_task->cmd; 352 353 dma_unmap_sg(dev, scsi_prot_sglist(scsi_cmnd), 354 scsi_prot_sg_count(scsi_cmnd), 355 task->data_dir); 356 } 357 } 358 359 static int hisi_sas_dif_dma_map(struct hisi_hba *hisi_hba, 360 int *n_elem_dif, struct sas_task *task) 361 { 362 struct device *dev = hisi_hba->dev; 363 struct sas_ssp_task *ssp_task; 364 struct scsi_cmnd *scsi_cmnd; 365 int rc; 366 367 if (task->num_scatter) { 368 ssp_task = &task->ssp_task; 369 scsi_cmnd = ssp_task->cmd; 370 371 if (scsi_prot_sg_count(scsi_cmnd)) { 372 *n_elem_dif = dma_map_sg(dev, 373 scsi_prot_sglist(scsi_cmnd), 374 scsi_prot_sg_count(scsi_cmnd), 375 task->data_dir); 376 377 if (!*n_elem_dif) 378 return -ENOMEM; 379 380 if (*n_elem_dif > HISI_SAS_SGE_DIF_PAGE_CNT) { 381 dev_err(dev, "task prep: n_elem_dif(%d) too large\n", 382 *n_elem_dif); 383 rc = -EINVAL; 384 goto err_out_dif_dma_unmap; 385 } 386 } 387 } 388 389 return 0; 390 391 err_out_dif_dma_unmap: 392 dma_unmap_sg(dev, scsi_prot_sglist(scsi_cmnd), 393 scsi_prot_sg_count(scsi_cmnd), task->data_dir); 394 return rc; 395 } 396 397 static 398 void hisi_sas_task_deliver(struct hisi_hba *hisi_hba, 399 struct hisi_sas_slot *slot, 400 struct hisi_sas_dq *dq, 401 struct hisi_sas_device *sas_dev) 402 { 403 struct hisi_sas_cmd_hdr *cmd_hdr_base; 404 int dlvry_queue_slot, dlvry_queue; 405 struct sas_task *task = slot->task; 406 int wr_q_index; 407 408 spin_lock(&dq->lock); 409 wr_q_index = dq->wr_point; 410 dq->wr_point = (dq->wr_point + 1) % HISI_SAS_QUEUE_SLOTS; 411 list_add_tail(&slot->delivery, &dq->list); 412 spin_unlock(&dq->lock); 413 spin_lock(&sas_dev->lock); 414 list_add_tail(&slot->entry, &sas_dev->list); 415 spin_unlock(&sas_dev->lock); 416 417 dlvry_queue = dq->id; 418 dlvry_queue_slot = wr_q_index; 419 420 slot->device_id = sas_dev->device_id; 421 slot->dlvry_queue = dlvry_queue; 422 slot->dlvry_queue_slot = dlvry_queue_slot; 423 cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue]; 424 slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot]; 425 426 task->lldd_task = slot; 427 428 memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr)); 429 memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ); 430 memset(hisi_sas_status_buf_addr_mem(slot), 0, 431 sizeof(struct hisi_sas_err_record)); 432 433 switch (task->task_proto) { 434 case SAS_PROTOCOL_SMP: 435 hisi_sas_task_prep_smp(hisi_hba, slot); 436 break; 437 case SAS_PROTOCOL_SSP: 438 hisi_sas_task_prep_ssp(hisi_hba, slot); 439 break; 440 case SAS_PROTOCOL_SATA: 441 case SAS_PROTOCOL_STP: 442 case SAS_PROTOCOL_STP_ALL: 443 hisi_sas_task_prep_ata(hisi_hba, slot); 444 break; 445 case SAS_PROTOCOL_INTERNAL_ABORT: 446 hisi_sas_task_prep_abort(hisi_hba, slot); 447 break; 448 default: 449 return; 450 } 451 452 /* Make slot memories observable before marking as ready */ 453 smp_wmb(); 454 WRITE_ONCE(slot->ready, 1); 455 456 spin_lock(&dq->lock); 457 hisi_hba->hw->start_delivery(dq); 458 spin_unlock(&dq->lock); 459 } 460 461 static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags) 462 { 463 int n_elem = 0, n_elem_dif = 0; 464 struct domain_device *device = task->dev; 465 struct asd_sas_port *sas_port = device->port; 466 struct hisi_sas_device *sas_dev = device->lldd_dev; 467 bool internal_abort = sas_is_internal_abort(task); 468 struct hisi_sas_dq *dq = NULL; 469 struct hisi_sas_port *port; 470 struct hisi_hba *hisi_hba; 471 struct hisi_sas_slot *slot; 472 struct request *rq = NULL; 473 struct device *dev; 474 int rc; 475 476 if (!sas_port) { 477 struct task_status_struct *ts = &task->task_status; 478 479 ts->resp = SAS_TASK_UNDELIVERED; 480 ts->stat = SAS_PHY_DOWN; 481 /* 482 * libsas will use dev->port, should 483 * not call task_done for sata 484 */ 485 if (device->dev_type != SAS_SATA_DEV && !internal_abort) 486 task->task_done(task); 487 return -ECOMM; 488 } 489 490 hisi_hba = dev_to_hisi_hba(device); 491 dev = hisi_hba->dev; 492 493 switch (task->task_proto) { 494 case SAS_PROTOCOL_SSP: 495 case SAS_PROTOCOL_SMP: 496 case SAS_PROTOCOL_SATA: 497 case SAS_PROTOCOL_STP: 498 case SAS_PROTOCOL_STP_ALL: 499 if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags))) { 500 if (!gfpflags_allow_blocking(gfp_flags)) 501 return -EINVAL; 502 503 down(&hisi_hba->sem); 504 up(&hisi_hba->sem); 505 } 506 507 if (DEV_IS_GONE(sas_dev)) { 508 if (sas_dev) 509 dev_info(dev, "task prep: device %d not ready\n", 510 sas_dev->device_id); 511 else 512 dev_info(dev, "task prep: device %016llx not ready\n", 513 SAS_ADDR(device->sas_addr)); 514 515 return -ECOMM; 516 } 517 518 port = to_hisi_sas_port(sas_port); 519 if (!port->port_attached) { 520 dev_info(dev, "task prep: %s port%d not attach device\n", 521 dev_is_sata(device) ? "SATA/STP" : "SAS", 522 device->port->id); 523 524 return -ECOMM; 525 } 526 527 rq = sas_task_find_rq(task); 528 if (rq) { 529 unsigned int dq_index; 530 u32 blk_tag; 531 532 blk_tag = blk_mq_unique_tag(rq); 533 dq_index = blk_mq_unique_tag_to_hwq(blk_tag); 534 dq = &hisi_hba->dq[dq_index]; 535 } else { 536 int queue; 537 538 if (hisi_hba->iopoll_q_cnt) { 539 /* 540 * Use interrupt queue (queue 0) to deliver and complete 541 * internal IOs of libsas or libata when there is at least 542 * one iopoll queue 543 */ 544 queue = 0; 545 } else { 546 struct Scsi_Host *shost = hisi_hba->shost; 547 struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT]; 548 549 queue = qmap->mq_map[raw_smp_processor_id()]; 550 } 551 dq = &hisi_hba->dq[queue]; 552 } 553 break; 554 case SAS_PROTOCOL_INTERNAL_ABORT: 555 if (!hisi_hba->hw->prep_abort) 556 return TMF_RESP_FUNC_FAILED; 557 558 if (test_bit(HISI_SAS_HW_FAULT_BIT, &hisi_hba->flags)) 559 return -EIO; 560 561 hisi_hba = dev_to_hisi_hba(device); 562 563 if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags))) 564 return -EINVAL; 565 566 port = to_hisi_sas_port(sas_port); 567 dq = &hisi_hba->dq[task->abort_task.qid]; 568 break; 569 default: 570 dev_err(hisi_hba->dev, "task prep: unknown/unsupported proto (0x%x)\n", 571 task->task_proto); 572 return -EINVAL; 573 } 574 575 rc = hisi_sas_dma_map(hisi_hba, task, &n_elem); 576 if (rc < 0) 577 goto prep_out; 578 579 if (!sas_protocol_ata(task->task_proto)) { 580 rc = hisi_sas_dif_dma_map(hisi_hba, &n_elem_dif, task); 581 if (rc < 0) 582 goto err_out_dma_unmap; 583 } 584 585 if (!internal_abort && hisi_hba->hw->slot_index_alloc) 586 rc = hisi_hba->hw->slot_index_alloc(hisi_hba, device); 587 else 588 rc = hisi_sas_slot_index_alloc(hisi_hba, rq); 589 590 if (rc < 0) 591 goto err_out_dif_dma_unmap; 592 593 slot = &hisi_hba->slot_info[rc]; 594 slot->n_elem = n_elem; 595 slot->n_elem_dif = n_elem_dif; 596 slot->task = task; 597 slot->port = port; 598 599 slot->tmf = task->tmf; 600 slot->is_internal = !!task->tmf || internal_abort; 601 602 /* protect task_prep and start_delivery sequence */ 603 hisi_sas_task_deliver(hisi_hba, slot, dq, sas_dev); 604 605 return 0; 606 607 err_out_dif_dma_unmap: 608 if (!sas_protocol_ata(task->task_proto)) 609 hisi_sas_dif_dma_unmap(hisi_hba, task, n_elem_dif); 610 err_out_dma_unmap: 611 hisi_sas_dma_unmap(hisi_hba, task, n_elem); 612 prep_out: 613 dev_err(dev, "task exec: failed[%d]!\n", rc); 614 return rc; 615 } 616 617 static void hisi_sas_bytes_dmaed(struct hisi_hba *hisi_hba, int phy_no, 618 gfp_t gfp_flags) 619 { 620 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 621 struct asd_sas_phy *sas_phy = &phy->sas_phy; 622 623 if (!phy->phy_attached) 624 return; 625 626 sas_notify_phy_event(sas_phy, PHYE_OOB_DONE, gfp_flags); 627 628 if (sas_phy->phy) { 629 struct sas_phy *sphy = sas_phy->phy; 630 631 sphy->negotiated_linkrate = sas_phy->linkrate; 632 sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS; 633 sphy->maximum_linkrate_hw = 634 hisi_hba->hw->phy_get_max_linkrate(); 635 if (sphy->minimum_linkrate == SAS_LINK_RATE_UNKNOWN) 636 sphy->minimum_linkrate = phy->minimum_linkrate; 637 638 if (sphy->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) 639 sphy->maximum_linkrate = phy->maximum_linkrate; 640 } 641 642 if (phy->phy_type & PORT_TYPE_SAS) { 643 struct sas_identify_frame *id; 644 645 id = (struct sas_identify_frame *)phy->frame_rcvd; 646 id->dev_type = phy->identify.device_type; 647 id->initiator_bits = SAS_PROTOCOL_ALL; 648 id->target_bits = phy->identify.target_port_protocols; 649 } else if (phy->phy_type & PORT_TYPE_SATA) { 650 /* Nothing */ 651 } 652 653 sas_phy->frame_rcvd_size = phy->frame_rcvd_size; 654 sas_notify_port_event(sas_phy, PORTE_BYTES_DMAED, gfp_flags); 655 } 656 657 static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device) 658 { 659 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 660 struct hisi_sas_device *sas_dev = NULL; 661 int last = hisi_hba->last_dev_id; 662 int first = (hisi_hba->last_dev_id + 1) % HISI_SAS_MAX_DEVICES; 663 int i; 664 665 spin_lock(&hisi_hba->lock); 666 for (i = first; i != last; i %= HISI_SAS_MAX_DEVICES) { 667 if (hisi_hba->devices[i].dev_type == SAS_PHY_UNUSED) { 668 int queue = i % hisi_hba->queue_count; 669 struct hisi_sas_dq *dq = &hisi_hba->dq[queue]; 670 671 hisi_hba->devices[i].device_id = i; 672 sas_dev = &hisi_hba->devices[i]; 673 sas_dev->dev_status = HISI_SAS_DEV_INIT; 674 sas_dev->dev_type = device->dev_type; 675 sas_dev->hisi_hba = hisi_hba; 676 sas_dev->sas_device = device; 677 sas_dev->dq = dq; 678 spin_lock_init(&sas_dev->lock); 679 INIT_LIST_HEAD(&hisi_hba->devices[i].list); 680 break; 681 } 682 i++; 683 } 684 hisi_hba->last_dev_id = i; 685 spin_unlock(&hisi_hba->lock); 686 687 return sas_dev; 688 } 689 690 static void hisi_sas_sync_poll_cq(struct hisi_sas_cq *cq) 691 { 692 /* make sure CQ entries being processed are processed to completion */ 693 spin_lock(&cq->poll_lock); 694 spin_unlock(&cq->poll_lock); 695 } 696 697 static bool hisi_sas_queue_is_poll(struct hisi_sas_cq *cq) 698 { 699 struct hisi_hba *hisi_hba = cq->hisi_hba; 700 701 if (cq->id < hisi_hba->queue_count - hisi_hba->iopoll_q_cnt) 702 return false; 703 return true; 704 } 705 706 static void hisi_sas_sync_cq(struct hisi_sas_cq *cq) 707 { 708 if (hisi_sas_queue_is_poll(cq)) 709 hisi_sas_sync_poll_cq(cq); 710 else 711 synchronize_irq(cq->irq_no); 712 } 713 714 void hisi_sas_sync_poll_cqs(struct hisi_hba *hisi_hba) 715 { 716 int i; 717 718 for (i = 0; i < hisi_hba->queue_count; i++) { 719 struct hisi_sas_cq *cq = &hisi_hba->cq[i]; 720 721 if (hisi_sas_queue_is_poll(cq)) 722 hisi_sas_sync_poll_cq(cq); 723 } 724 } 725 EXPORT_SYMBOL_GPL(hisi_sas_sync_poll_cqs); 726 727 void hisi_sas_sync_cqs(struct hisi_hba *hisi_hba) 728 { 729 int i; 730 731 for (i = 0; i < hisi_hba->queue_count; i++) { 732 struct hisi_sas_cq *cq = &hisi_hba->cq[i]; 733 734 hisi_sas_sync_cq(cq); 735 } 736 } 737 EXPORT_SYMBOL_GPL(hisi_sas_sync_cqs); 738 739 static void hisi_sas_tmf_aborted(struct sas_task *task) 740 { 741 struct hisi_sas_slot *slot = task->lldd_task; 742 struct domain_device *device = task->dev; 743 struct hisi_sas_device *sas_dev = device->lldd_dev; 744 struct hisi_hba *hisi_hba = sas_dev->hisi_hba; 745 746 if (slot) { 747 struct hisi_sas_cq *cq = 748 &hisi_hba->cq[slot->dlvry_queue]; 749 /* 750 * sync irq or poll queue to avoid free'ing task 751 * before using task in IO completion 752 */ 753 hisi_sas_sync_cq(cq); 754 slot->task = NULL; 755 } 756 } 757 758 #define HISI_SAS_DISK_RECOVER_CNT 3 759 static int hisi_sas_init_device(struct domain_device *device) 760 { 761 int rc = TMF_RESP_FUNC_COMPLETE; 762 struct scsi_lun lun; 763 int retry = HISI_SAS_DISK_RECOVER_CNT; 764 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 765 766 switch (device->dev_type) { 767 case SAS_END_DEVICE: 768 int_to_scsilun(0, &lun); 769 770 while (retry-- > 0) { 771 rc = sas_abort_task_set(device, lun.scsi_lun); 772 if (rc == TMF_RESP_FUNC_COMPLETE) { 773 hisi_sas_release_task(hisi_hba, device); 774 break; 775 } 776 } 777 break; 778 case SAS_SATA_DEV: 779 case SAS_SATA_PM: 780 case SAS_SATA_PM_PORT: 781 case SAS_SATA_PENDING: 782 /* 783 * If an expander is swapped when a SATA disk is attached then 784 * we should issue a hard reset to clear previous affiliation 785 * of STP target port, see SPL (chapter 6.19.4). 786 * 787 * However we don't need to issue a hard reset here for these 788 * reasons: 789 * a. When probing the device, libsas/libata already issues a 790 * hard reset in sas_probe_sata() -> ata_port_probe(). 791 * Note that in hisi_sas_debug_I_T_nexus_reset() we take care 792 * to issue a hard reset by checking the dev status (== INIT). 793 * b. When resetting the controller, this is simply unnecessary. 794 */ 795 while (retry-- > 0) { 796 rc = hisi_sas_softreset_ata_disk(device); 797 if (!rc) 798 break; 799 } 800 break; 801 default: 802 break; 803 } 804 805 return rc; 806 } 807 808 int hisi_sas_slave_alloc(struct scsi_device *sdev) 809 { 810 struct domain_device *ddev = sdev_to_domain_dev(sdev); 811 struct hisi_sas_device *sas_dev = ddev->lldd_dev; 812 int rc; 813 814 rc = sas_slave_alloc(sdev); 815 if (rc) 816 return rc; 817 818 rc = hisi_sas_init_device(ddev); 819 if (rc) 820 return rc; 821 sas_dev->dev_status = HISI_SAS_DEV_NORMAL; 822 return 0; 823 } 824 EXPORT_SYMBOL_GPL(hisi_sas_slave_alloc); 825 826 static int hisi_sas_dev_found(struct domain_device *device) 827 { 828 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 829 struct domain_device *parent_dev = device->parent; 830 struct hisi_sas_device *sas_dev; 831 struct device *dev = hisi_hba->dev; 832 int rc; 833 834 if (hisi_hba->hw->alloc_dev) 835 sas_dev = hisi_hba->hw->alloc_dev(device); 836 else 837 sas_dev = hisi_sas_alloc_dev(device); 838 if (!sas_dev) { 839 dev_err(dev, "fail alloc dev: max support %d devices\n", 840 HISI_SAS_MAX_DEVICES); 841 return -EINVAL; 842 } 843 844 device->lldd_dev = sas_dev; 845 hisi_hba->hw->setup_itct(hisi_hba, sas_dev); 846 847 if (parent_dev && dev_is_expander(parent_dev->dev_type)) { 848 int phy_no; 849 850 phy_no = sas_find_attached_phy_id(&parent_dev->ex_dev, device); 851 if (phy_no < 0) { 852 dev_info(dev, "dev found: no attached " 853 "dev:%016llx at ex:%016llx\n", 854 SAS_ADDR(device->sas_addr), 855 SAS_ADDR(parent_dev->sas_addr)); 856 rc = phy_no; 857 goto err_out; 858 } 859 } 860 861 dev_info(dev, "dev[%d:%x] found\n", 862 sas_dev->device_id, sas_dev->dev_type); 863 864 return 0; 865 866 err_out: 867 hisi_sas_dev_gone(device); 868 return rc; 869 } 870 871 int hisi_sas_device_configure(struct scsi_device *sdev, 872 struct queue_limits *lim) 873 { 874 struct domain_device *dev = sdev_to_domain_dev(sdev); 875 int ret = sas_device_configure(sdev, lim); 876 877 if (ret) 878 return ret; 879 if (!dev_is_sata(dev)) 880 sas_change_queue_depth(sdev, 64); 881 882 return 0; 883 } 884 EXPORT_SYMBOL_GPL(hisi_sas_device_configure); 885 886 void hisi_sas_scan_start(struct Scsi_Host *shost) 887 { 888 struct hisi_hba *hisi_hba = shost_priv(shost); 889 890 hisi_hba->hw->phys_init(hisi_hba); 891 } 892 EXPORT_SYMBOL_GPL(hisi_sas_scan_start); 893 894 int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time) 895 { 896 struct hisi_hba *hisi_hba = shost_priv(shost); 897 struct sas_ha_struct *sha = &hisi_hba->sha; 898 899 /* Wait for PHY up interrupt to occur */ 900 if (time < HZ) 901 return 0; 902 903 sas_drain_work(sha); 904 return 1; 905 } 906 EXPORT_SYMBOL_GPL(hisi_sas_scan_finished); 907 908 static void hisi_sas_phyup_work_common(struct work_struct *work, 909 enum hisi_sas_phy_event event) 910 { 911 struct hisi_sas_phy *phy = 912 container_of(work, typeof(*phy), works[event]); 913 struct hisi_hba *hisi_hba = phy->hisi_hba; 914 struct asd_sas_phy *sas_phy = &phy->sas_phy; 915 int phy_no = sas_phy->id; 916 917 phy->wait_phyup_cnt = 0; 918 if (phy->identify.target_port_protocols == SAS_PROTOCOL_SSP) 919 hisi_hba->hw->sl_notify_ssp(hisi_hba, phy_no); 920 hisi_sas_bytes_dmaed(hisi_hba, phy_no, GFP_KERNEL); 921 } 922 923 static void hisi_sas_phyup_work(struct work_struct *work) 924 { 925 hisi_sas_phyup_work_common(work, HISI_PHYE_PHY_UP); 926 } 927 928 static void hisi_sas_linkreset_work(struct work_struct *work) 929 { 930 struct hisi_sas_phy *phy = 931 container_of(work, typeof(*phy), works[HISI_PHYE_LINK_RESET]); 932 struct asd_sas_phy *sas_phy = &phy->sas_phy; 933 934 hisi_sas_control_phy(sas_phy, PHY_FUNC_LINK_RESET, NULL); 935 } 936 937 static void hisi_sas_phyup_pm_work(struct work_struct *work) 938 { 939 struct hisi_sas_phy *phy = 940 container_of(work, typeof(*phy), works[HISI_PHYE_PHY_UP_PM]); 941 struct hisi_hba *hisi_hba = phy->hisi_hba; 942 struct device *dev = hisi_hba->dev; 943 944 hisi_sas_phyup_work_common(work, HISI_PHYE_PHY_UP_PM); 945 pm_runtime_put_sync(dev); 946 } 947 948 static const work_func_t hisi_sas_phye_fns[HISI_PHYES_NUM] = { 949 [HISI_PHYE_PHY_UP] = hisi_sas_phyup_work, 950 [HISI_PHYE_LINK_RESET] = hisi_sas_linkreset_work, 951 [HISI_PHYE_PHY_UP_PM] = hisi_sas_phyup_pm_work, 952 }; 953 954 bool hisi_sas_notify_phy_event(struct hisi_sas_phy *phy, 955 enum hisi_sas_phy_event event) 956 { 957 struct hisi_hba *hisi_hba = phy->hisi_hba; 958 959 if (WARN_ON(event >= HISI_PHYES_NUM)) 960 return false; 961 962 return queue_work(hisi_hba->wq, &phy->works[event]); 963 } 964 EXPORT_SYMBOL_GPL(hisi_sas_notify_phy_event); 965 966 static void hisi_sas_wait_phyup_timedout(struct timer_list *t) 967 { 968 struct hisi_sas_phy *phy = from_timer(phy, t, timer); 969 struct hisi_hba *hisi_hba = phy->hisi_hba; 970 struct device *dev = hisi_hba->dev; 971 int phy_no = phy->sas_phy.id; 972 973 dev_warn(dev, "phy%d wait phyup timeout, issuing link reset\n", phy_no); 974 hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET); 975 } 976 977 #define HISI_SAS_WAIT_PHYUP_RETRIES 10 978 979 void hisi_sas_phy_oob_ready(struct hisi_hba *hisi_hba, int phy_no) 980 { 981 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 982 struct device *dev = hisi_hba->dev; 983 unsigned long flags; 984 985 dev_dbg(dev, "phy%d OOB ready\n", phy_no); 986 spin_lock_irqsave(&phy->lock, flags); 987 if (phy->phy_attached) { 988 spin_unlock_irqrestore(&phy->lock, flags); 989 return; 990 } 991 992 if (!timer_pending(&phy->timer)) { 993 if (phy->wait_phyup_cnt < HISI_SAS_WAIT_PHYUP_RETRIES) { 994 phy->wait_phyup_cnt++; 995 phy->timer.expires = jiffies + 996 HISI_SAS_WAIT_PHYUP_TIMEOUT; 997 add_timer(&phy->timer); 998 spin_unlock_irqrestore(&phy->lock, flags); 999 return; 1000 } 1001 1002 dev_warn(dev, "phy%d failed to come up %d times, giving up\n", 1003 phy_no, phy->wait_phyup_cnt); 1004 phy->wait_phyup_cnt = 0; 1005 } 1006 spin_unlock_irqrestore(&phy->lock, flags); 1007 } 1008 1009 EXPORT_SYMBOL_GPL(hisi_sas_phy_oob_ready); 1010 1011 static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no) 1012 { 1013 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 1014 struct asd_sas_phy *sas_phy = &phy->sas_phy; 1015 int i; 1016 1017 phy->hisi_hba = hisi_hba; 1018 phy->port = NULL; 1019 phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS; 1020 phy->maximum_linkrate = hisi_hba->hw->phy_get_max_linkrate(); 1021 sas_phy->enabled = (phy_no < hisi_hba->n_phy) ? 1 : 0; 1022 sas_phy->iproto = SAS_PROTOCOL_ALL; 1023 sas_phy->tproto = 0; 1024 sas_phy->role = PHY_ROLE_INITIATOR; 1025 sas_phy->oob_mode = OOB_NOT_CONNECTED; 1026 sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN; 1027 sas_phy->id = phy_no; 1028 sas_phy->sas_addr = &hisi_hba->sas_addr[0]; 1029 sas_phy->frame_rcvd = &phy->frame_rcvd[0]; 1030 sas_phy->ha = (struct sas_ha_struct *)hisi_hba->shost->hostdata; 1031 sas_phy->lldd_phy = phy; 1032 1033 for (i = 0; i < HISI_PHYES_NUM; i++) 1034 INIT_WORK(&phy->works[i], hisi_sas_phye_fns[i]); 1035 1036 spin_lock_init(&phy->lock); 1037 1038 timer_setup(&phy->timer, hisi_sas_wait_phyup_timedout, 0); 1039 } 1040 1041 /* Wrapper to ensure we track hisi_sas_phy.enable properly */ 1042 void hisi_sas_phy_enable(struct hisi_hba *hisi_hba, int phy_no, int enable) 1043 { 1044 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 1045 struct asd_sas_phy *aphy = &phy->sas_phy; 1046 struct sas_phy *sphy = aphy->phy; 1047 unsigned long flags; 1048 1049 spin_lock_irqsave(&phy->lock, flags); 1050 1051 if (enable) { 1052 /* We may have been enabled already; if so, don't touch */ 1053 if (!phy->enable) 1054 sphy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN; 1055 hisi_hba->hw->phy_start(hisi_hba, phy_no); 1056 } else { 1057 sphy->negotiated_linkrate = SAS_PHY_DISABLED; 1058 hisi_hba->hw->phy_disable(hisi_hba, phy_no); 1059 } 1060 phy->enable = enable; 1061 spin_unlock_irqrestore(&phy->lock, flags); 1062 } 1063 EXPORT_SYMBOL_GPL(hisi_sas_phy_enable); 1064 1065 static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy) 1066 { 1067 struct hisi_sas_phy *phy = sas_phy->lldd_phy; 1068 struct asd_sas_port *sas_port = sas_phy->port; 1069 struct hisi_sas_port *port; 1070 1071 if (!sas_port) 1072 return; 1073 1074 port = to_hisi_sas_port(sas_port); 1075 port->port_attached = 1; 1076 port->id = phy->port_id; 1077 phy->port = port; 1078 sas_port->lldd_port = port; 1079 } 1080 1081 static void hisi_sas_do_release_task(struct hisi_hba *hisi_hba, struct sas_task *task, 1082 struct hisi_sas_slot *slot, bool need_lock) 1083 { 1084 if (task) { 1085 unsigned long flags; 1086 struct task_status_struct *ts; 1087 1088 ts = &task->task_status; 1089 1090 ts->resp = SAS_TASK_COMPLETE; 1091 ts->stat = SAS_ABORTED_TASK; 1092 spin_lock_irqsave(&task->task_state_lock, flags); 1093 task->task_state_flags &= ~SAS_TASK_STATE_PENDING; 1094 if (!slot->is_internal && task->task_proto != SAS_PROTOCOL_SMP) 1095 task->task_state_flags |= SAS_TASK_STATE_DONE; 1096 spin_unlock_irqrestore(&task->task_state_lock, flags); 1097 } 1098 1099 hisi_sas_slot_task_free(hisi_hba, task, slot, need_lock); 1100 } 1101 1102 static void hisi_sas_release_task(struct hisi_hba *hisi_hba, 1103 struct domain_device *device) 1104 { 1105 struct hisi_sas_slot *slot, *slot2; 1106 struct hisi_sas_device *sas_dev = device->lldd_dev; 1107 1108 spin_lock(&sas_dev->lock); 1109 list_for_each_entry_safe(slot, slot2, &sas_dev->list, entry) 1110 hisi_sas_do_release_task(hisi_hba, slot->task, slot, false); 1111 1112 spin_unlock(&sas_dev->lock); 1113 } 1114 1115 void hisi_sas_release_tasks(struct hisi_hba *hisi_hba) 1116 { 1117 struct hisi_sas_device *sas_dev; 1118 struct domain_device *device; 1119 int i; 1120 1121 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 1122 sas_dev = &hisi_hba->devices[i]; 1123 device = sas_dev->sas_device; 1124 1125 if ((sas_dev->dev_type == SAS_PHY_UNUSED) || 1126 !device) 1127 continue; 1128 1129 hisi_sas_release_task(hisi_hba, device); 1130 } 1131 } 1132 EXPORT_SYMBOL_GPL(hisi_sas_release_tasks); 1133 1134 static void hisi_sas_dereg_device(struct hisi_hba *hisi_hba, 1135 struct domain_device *device) 1136 { 1137 if (hisi_hba->hw->dereg_device) 1138 hisi_hba->hw->dereg_device(hisi_hba, device); 1139 } 1140 1141 static int 1142 hisi_sas_internal_task_abort_dev(struct hisi_sas_device *sas_dev, 1143 bool rst_ha_timeout) 1144 { 1145 struct hisi_sas_internal_abort_data data = { rst_ha_timeout }; 1146 struct domain_device *device = sas_dev->sas_device; 1147 struct hisi_hba *hisi_hba = sas_dev->hisi_hba; 1148 int i, rc; 1149 1150 for (i = 0; i < hisi_hba->cq_nvecs; i++) { 1151 struct hisi_sas_cq *cq = &hisi_hba->cq[i]; 1152 const struct cpumask *mask = cq->irq_mask; 1153 1154 if (mask && !cpumask_intersects(cpu_online_mask, mask)) 1155 continue; 1156 rc = sas_execute_internal_abort_dev(device, i, &data); 1157 if (rc) 1158 return rc; 1159 } 1160 1161 return 0; 1162 } 1163 1164 static void hisi_sas_dev_gone(struct domain_device *device) 1165 { 1166 struct hisi_sas_device *sas_dev = device->lldd_dev; 1167 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1168 struct device *dev = hisi_hba->dev; 1169 int ret = 0; 1170 1171 dev_info(dev, "dev[%d:%x] is gone\n", 1172 sas_dev->device_id, sas_dev->dev_type); 1173 1174 down(&hisi_hba->sem); 1175 if (!test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags)) { 1176 hisi_sas_internal_task_abort_dev(sas_dev, true); 1177 1178 hisi_sas_dereg_device(hisi_hba, device); 1179 1180 ret = hisi_hba->hw->clear_itct(hisi_hba, sas_dev); 1181 device->lldd_dev = NULL; 1182 } 1183 1184 if (hisi_hba->hw->free_device) 1185 hisi_hba->hw->free_device(sas_dev); 1186 1187 /* Don't mark it as SAS_PHY_UNUSED if failed to clear ITCT */ 1188 if (!ret) 1189 sas_dev->dev_type = SAS_PHY_UNUSED; 1190 sas_dev->sas_device = NULL; 1191 up(&hisi_hba->sem); 1192 } 1193 1194 static int hisi_sas_phy_set_linkrate(struct hisi_hba *hisi_hba, int phy_no, 1195 struct sas_phy_linkrates *r) 1196 { 1197 struct sas_phy_linkrates _r; 1198 1199 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 1200 struct asd_sas_phy *sas_phy = &phy->sas_phy; 1201 enum sas_linkrate min, max; 1202 1203 if (r->minimum_linkrate > SAS_LINK_RATE_1_5_GBPS) 1204 return -EINVAL; 1205 1206 if (r->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) { 1207 max = sas_phy->phy->maximum_linkrate; 1208 min = r->minimum_linkrate; 1209 } else if (r->minimum_linkrate == SAS_LINK_RATE_UNKNOWN) { 1210 max = r->maximum_linkrate; 1211 min = sas_phy->phy->minimum_linkrate; 1212 } else 1213 return -EINVAL; 1214 1215 _r.maximum_linkrate = max; 1216 _r.minimum_linkrate = min; 1217 1218 sas_phy->phy->maximum_linkrate = max; 1219 sas_phy->phy->minimum_linkrate = min; 1220 1221 hisi_sas_phy_enable(hisi_hba, phy_no, 0); 1222 msleep(100); 1223 hisi_hba->hw->phy_set_linkrate(hisi_hba, phy_no, &_r); 1224 hisi_sas_phy_enable(hisi_hba, phy_no, 1); 1225 1226 return 0; 1227 } 1228 1229 static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func, 1230 void *funcdata) 1231 { 1232 struct hisi_sas_phy *phy = container_of(sas_phy, 1233 struct hisi_sas_phy, sas_phy); 1234 struct sas_ha_struct *sas_ha = sas_phy->ha; 1235 struct hisi_hba *hisi_hba = sas_ha->lldd_ha; 1236 struct device *dev = hisi_hba->dev; 1237 DECLARE_COMPLETION_ONSTACK(completion); 1238 int phy_no = sas_phy->id; 1239 u8 sts = phy->phy_attached; 1240 int ret = 0; 1241 1242 down(&hisi_hba->sem); 1243 phy->reset_completion = &completion; 1244 1245 switch (func) { 1246 case PHY_FUNC_HARD_RESET: 1247 hisi_hba->hw->phy_hard_reset(hisi_hba, phy_no); 1248 break; 1249 1250 case PHY_FUNC_LINK_RESET: 1251 hisi_sas_phy_enable(hisi_hba, phy_no, 0); 1252 msleep(100); 1253 hisi_sas_phy_enable(hisi_hba, phy_no, 1); 1254 break; 1255 1256 case PHY_FUNC_DISABLE: 1257 hisi_sas_phy_enable(hisi_hba, phy_no, 0); 1258 goto out; 1259 1260 case PHY_FUNC_SET_LINK_RATE: 1261 ret = hisi_sas_phy_set_linkrate(hisi_hba, phy_no, funcdata); 1262 break; 1263 1264 case PHY_FUNC_GET_EVENTS: 1265 if (hisi_hba->hw->get_events) { 1266 hisi_hba->hw->get_events(hisi_hba, phy_no); 1267 goto out; 1268 } 1269 fallthrough; 1270 case PHY_FUNC_RELEASE_SPINUP_HOLD: 1271 default: 1272 ret = -EOPNOTSUPP; 1273 goto out; 1274 } 1275 1276 if (sts && !wait_for_completion_timeout(&completion, 1277 HISI_SAS_WAIT_PHYUP_TIMEOUT)) { 1278 dev_warn(dev, "phy%d wait phyup timed out for func %d\n", 1279 phy_no, func); 1280 if (phy->in_reset) 1281 ret = -ETIMEDOUT; 1282 } 1283 1284 out: 1285 phy->reset_completion = NULL; 1286 1287 up(&hisi_hba->sem); 1288 return ret; 1289 } 1290 1291 static void hisi_sas_fill_ata_reset_cmd(struct ata_device *dev, 1292 bool reset, int pmp, u8 *fis) 1293 { 1294 struct ata_taskfile tf; 1295 1296 ata_tf_init(dev, &tf); 1297 if (reset) 1298 tf.ctl |= ATA_SRST; 1299 else 1300 tf.ctl &= ~ATA_SRST; 1301 tf.command = ATA_CMD_DEV_RESET; 1302 ata_tf_to_fis(&tf, pmp, 0, fis); 1303 } 1304 1305 static int hisi_sas_softreset_ata_disk(struct domain_device *device) 1306 { 1307 u8 fis[20] = {0}; 1308 struct ata_port *ap = device->sata_dev.ap; 1309 struct ata_link *link; 1310 int rc = TMF_RESP_FUNC_FAILED; 1311 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1312 struct device *dev = hisi_hba->dev; 1313 1314 ata_for_each_link(link, ap, EDGE) { 1315 int pmp = sata_srst_pmp(link); 1316 1317 hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis); 1318 rc = sas_execute_ata_cmd(device, fis, -1); 1319 if (rc != TMF_RESP_FUNC_COMPLETE) 1320 break; 1321 } 1322 1323 if (rc == TMF_RESP_FUNC_COMPLETE) { 1324 usleep_range(900, 1000); 1325 ata_for_each_link(link, ap, EDGE) { 1326 int pmp = sata_srst_pmp(link); 1327 1328 hisi_sas_fill_ata_reset_cmd(link->device, 0, pmp, fis); 1329 rc = sas_execute_ata_cmd(device, fis, -1); 1330 if (rc != TMF_RESP_FUNC_COMPLETE) 1331 dev_err(dev, "ata disk %016llx de-reset failed\n", 1332 SAS_ADDR(device->sas_addr)); 1333 } 1334 } else { 1335 dev_err(dev, "ata disk %016llx reset failed\n", 1336 SAS_ADDR(device->sas_addr)); 1337 } 1338 1339 if (rc == TMF_RESP_FUNC_COMPLETE) 1340 hisi_sas_release_task(hisi_hba, device); 1341 1342 return rc; 1343 } 1344 1345 static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba) 1346 { 1347 u32 state = hisi_hba->hw->get_phys_state(hisi_hba); 1348 int i; 1349 1350 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 1351 struct hisi_sas_device *sas_dev = &hisi_hba->devices[i]; 1352 struct domain_device *device = sas_dev->sas_device; 1353 struct asd_sas_port *sas_port; 1354 struct hisi_sas_port *port; 1355 struct hisi_sas_phy *phy = NULL; 1356 struct asd_sas_phy *sas_phy; 1357 1358 if ((sas_dev->dev_type == SAS_PHY_UNUSED) 1359 || !device || !device->port) 1360 continue; 1361 1362 sas_port = device->port; 1363 port = to_hisi_sas_port(sas_port); 1364 1365 spin_lock(&sas_port->phy_list_lock); 1366 list_for_each_entry(sas_phy, &sas_port->phy_list, port_phy_el) 1367 if (state & BIT(sas_phy->id)) { 1368 phy = sas_phy->lldd_phy; 1369 break; 1370 } 1371 spin_unlock(&sas_port->phy_list_lock); 1372 1373 if (phy) { 1374 port->id = phy->port_id; 1375 1376 /* Update linkrate of directly attached device. */ 1377 if (!device->parent) 1378 device->linkrate = phy->sas_phy.linkrate; 1379 1380 hisi_hba->hw->setup_itct(hisi_hba, sas_dev); 1381 } else if (!port->port_attached) 1382 port->id = 0xff; 1383 } 1384 } 1385 1386 static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 state) 1387 { 1388 u32 new_state = hisi_hba->hw->get_phys_state(hisi_hba); 1389 struct asd_sas_port *_sas_port = NULL; 1390 int phy_no; 1391 1392 for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) { 1393 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 1394 struct asd_sas_phy *sas_phy = &phy->sas_phy; 1395 struct asd_sas_port *sas_port = sas_phy->port; 1396 bool do_port_check = _sas_port != sas_port; 1397 1398 if (!sas_phy->phy->enabled) 1399 continue; 1400 1401 /* Report PHY state change to libsas */ 1402 if (new_state & BIT(phy_no)) { 1403 if (do_port_check && sas_port && sas_port->port_dev) { 1404 struct domain_device *dev = sas_port->port_dev; 1405 1406 _sas_port = sas_port; 1407 1408 if (dev_is_expander(dev->dev_type)) 1409 sas_notify_port_event(sas_phy, 1410 PORTE_BROADCAST_RCVD, 1411 GFP_KERNEL); 1412 } 1413 } else { 1414 hisi_sas_phy_down(hisi_hba, phy_no, 0, GFP_KERNEL); 1415 1416 /* 1417 * The new_state is not ready but old_state is ready, 1418 * the two possible causes: 1419 * 1. The connected device is removed 1420 * 2. Device exists but phyup timed out 1421 */ 1422 if (state & BIT(phy_no)) 1423 hisi_sas_notify_phy_event(phy, 1424 HISI_PHYE_LINK_RESET); 1425 } 1426 } 1427 } 1428 1429 static void hisi_sas_reset_init_all_devices(struct hisi_hba *hisi_hba) 1430 { 1431 struct hisi_sas_device *sas_dev; 1432 struct domain_device *device; 1433 int i; 1434 1435 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 1436 sas_dev = &hisi_hba->devices[i]; 1437 device = sas_dev->sas_device; 1438 1439 if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device) 1440 continue; 1441 1442 hisi_sas_init_device(device); 1443 } 1444 } 1445 1446 static void hisi_sas_send_ata_reset_each_phy(struct hisi_hba *hisi_hba, 1447 struct asd_sas_port *sas_port, 1448 struct domain_device *device) 1449 { 1450 struct ata_port *ap = device->sata_dev.ap; 1451 struct device *dev = hisi_hba->dev; 1452 int rc = TMF_RESP_FUNC_FAILED; 1453 struct ata_link *link; 1454 u8 fis[20] = {0}; 1455 int i; 1456 1457 for (i = 0; i < hisi_hba->n_phy; i++) { 1458 if (!(sas_port->phy_mask & BIT(i))) 1459 continue; 1460 1461 ata_for_each_link(link, ap, EDGE) { 1462 int pmp = sata_srst_pmp(link); 1463 1464 hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis); 1465 rc = sas_execute_ata_cmd(device, fis, i); 1466 if (rc != TMF_RESP_FUNC_COMPLETE) { 1467 dev_err(dev, "phy%d ata reset failed rc=%d\n", 1468 i, rc); 1469 break; 1470 } 1471 } 1472 } 1473 } 1474 1475 static void hisi_sas_terminate_stp_reject(struct hisi_hba *hisi_hba) 1476 { 1477 struct device *dev = hisi_hba->dev; 1478 int port_no, rc, i; 1479 1480 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 1481 struct hisi_sas_device *sas_dev = &hisi_hba->devices[i]; 1482 struct domain_device *device = sas_dev->sas_device; 1483 1484 if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device) 1485 continue; 1486 1487 rc = hisi_sas_internal_task_abort_dev(sas_dev, false); 1488 if (rc < 0) 1489 dev_err(dev, "STP reject: abort dev failed %d\n", rc); 1490 } 1491 1492 for (port_no = 0; port_no < hisi_hba->n_phy; port_no++) { 1493 struct hisi_sas_port *port = &hisi_hba->port[port_no]; 1494 struct asd_sas_port *sas_port = &port->sas_port; 1495 struct domain_device *port_dev = sas_port->port_dev; 1496 struct domain_device *device; 1497 1498 if (!port_dev || !dev_is_expander(port_dev->dev_type)) 1499 continue; 1500 1501 /* Try to find a SATA device */ 1502 list_for_each_entry(device, &sas_port->dev_list, 1503 dev_list_node) { 1504 if (dev_is_sata(device)) { 1505 hisi_sas_send_ata_reset_each_phy(hisi_hba, 1506 sas_port, 1507 device); 1508 break; 1509 } 1510 } 1511 } 1512 } 1513 1514 void hisi_sas_controller_reset_prepare(struct hisi_hba *hisi_hba) 1515 { 1516 struct Scsi_Host *shost = hisi_hba->shost; 1517 1518 hisi_hba->phy_state = hisi_hba->hw->get_phys_state(hisi_hba); 1519 1520 scsi_block_requests(shost); 1521 hisi_hba->hw->wait_cmds_complete_timeout(hisi_hba, 100, 5000); 1522 1523 /* 1524 * hisi_hba->timer is only used for v1/v2 hw, and check hw->sht 1525 * which is also only used for v1/v2 hw to skip it for v3 hw 1526 */ 1527 if (hisi_hba->hw->sht) 1528 del_timer_sync(&hisi_hba->timer); 1529 1530 set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); 1531 } 1532 EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_prepare); 1533 1534 static void hisi_sas_async_init_wait_phyup(void *data, async_cookie_t cookie) 1535 { 1536 struct hisi_sas_phy *phy = data; 1537 struct hisi_hba *hisi_hba = phy->hisi_hba; 1538 struct device *dev = hisi_hba->dev; 1539 DECLARE_COMPLETION_ONSTACK(completion); 1540 int phy_no = phy->sas_phy.id; 1541 1542 phy->reset_completion = &completion; 1543 hisi_sas_phy_enable(hisi_hba, phy_no, 1); 1544 if (!wait_for_completion_timeout(&completion, 1545 HISI_SAS_WAIT_PHYUP_TIMEOUT)) 1546 dev_warn(dev, "phy%d wait phyup timed out\n", phy_no); 1547 1548 phy->reset_completion = NULL; 1549 } 1550 1551 void hisi_sas_controller_reset_done(struct hisi_hba *hisi_hba) 1552 { 1553 struct Scsi_Host *shost = hisi_hba->shost; 1554 ASYNC_DOMAIN_EXCLUSIVE(async); 1555 int phy_no; 1556 1557 /* Init and wait for PHYs to come up and all libsas event finished. */ 1558 for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) { 1559 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 1560 struct asd_sas_phy *sas_phy = &phy->sas_phy; 1561 1562 if (!sas_phy->phy->enabled) 1563 continue; 1564 1565 if (!(hisi_hba->phy_state & BIT(phy_no))) { 1566 hisi_sas_phy_enable(hisi_hba, phy_no, 1); 1567 continue; 1568 } 1569 1570 async_schedule_domain(hisi_sas_async_init_wait_phyup, 1571 phy, &async); 1572 } 1573 1574 async_synchronize_full_domain(&async); 1575 hisi_sas_refresh_port_id(hisi_hba); 1576 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); 1577 1578 if (hisi_hba->reject_stp_links_msk) 1579 hisi_sas_terminate_stp_reject(hisi_hba); 1580 hisi_sas_reset_init_all_devices(hisi_hba); 1581 scsi_unblock_requests(shost); 1582 clear_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags); 1583 up(&hisi_hba->sem); 1584 1585 hisi_sas_rescan_topology(hisi_hba, hisi_hba->phy_state); 1586 } 1587 EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_done); 1588 1589 static int hisi_sas_controller_prereset(struct hisi_hba *hisi_hba) 1590 { 1591 if (!hisi_hba->hw->soft_reset) 1592 return -ENOENT; 1593 1594 down(&hisi_hba->sem); 1595 if (test_and_set_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags)) { 1596 up(&hisi_hba->sem); 1597 return -EPERM; 1598 } 1599 1600 if (hisi_sas_debugfs_enable) 1601 hisi_hba->hw->debugfs_snapshot_regs(hisi_hba); 1602 1603 return 0; 1604 } 1605 1606 static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba) 1607 { 1608 struct device *dev = hisi_hba->dev; 1609 struct Scsi_Host *shost = hisi_hba->shost; 1610 int rc; 1611 1612 dev_info(dev, "controller resetting...\n"); 1613 hisi_sas_controller_reset_prepare(hisi_hba); 1614 1615 rc = hisi_hba->hw->soft_reset(hisi_hba); 1616 if (rc) { 1617 dev_warn(dev, "controller reset failed (%d)\n", rc); 1618 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); 1619 up(&hisi_hba->sem); 1620 scsi_unblock_requests(shost); 1621 clear_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags); 1622 return rc; 1623 } 1624 clear_bit(HISI_SAS_HW_FAULT_BIT, &hisi_hba->flags); 1625 1626 hisi_sas_controller_reset_done(hisi_hba); 1627 dev_info(dev, "controller reset complete\n"); 1628 1629 return 0; 1630 } 1631 1632 static int hisi_sas_abort_task(struct sas_task *task) 1633 { 1634 struct hisi_sas_internal_abort_data internal_abort_data = { false }; 1635 struct domain_device *device = task->dev; 1636 struct hisi_sas_device *sas_dev = device->lldd_dev; 1637 struct hisi_sas_slot *slot = task->lldd_task; 1638 struct hisi_hba *hisi_hba; 1639 struct device *dev; 1640 int rc = TMF_RESP_FUNC_FAILED; 1641 unsigned long flags; 1642 1643 if (!sas_dev) 1644 return TMF_RESP_FUNC_FAILED; 1645 1646 hisi_hba = dev_to_hisi_hba(task->dev); 1647 dev = hisi_hba->dev; 1648 1649 spin_lock_irqsave(&task->task_state_lock, flags); 1650 if (task->task_state_flags & SAS_TASK_STATE_DONE) { 1651 struct hisi_sas_cq *cq; 1652 1653 if (slot) { 1654 /* 1655 * sync irq or poll queue to avoid free'ing task 1656 * before using task in IO completion 1657 */ 1658 cq = &hisi_hba->cq[slot->dlvry_queue]; 1659 hisi_sas_sync_cq(cq); 1660 } 1661 spin_unlock_irqrestore(&task->task_state_lock, flags); 1662 rc = TMF_RESP_FUNC_COMPLETE; 1663 goto out; 1664 } 1665 task->task_state_flags |= SAS_TASK_STATE_ABORTED; 1666 spin_unlock_irqrestore(&task->task_state_lock, flags); 1667 1668 if (!slot) 1669 goto out; 1670 1671 if (task->task_proto & SAS_PROTOCOL_SSP) { 1672 u16 tag = slot->idx; 1673 int rc2; 1674 1675 rc = sas_abort_task(task, tag); 1676 rc2 = sas_execute_internal_abort_single(device, tag, 1677 slot->dlvry_queue, &internal_abort_data); 1678 if (rc2 < 0) { 1679 dev_err(dev, "abort task: internal abort (%d)\n", rc2); 1680 return TMF_RESP_FUNC_FAILED; 1681 } 1682 1683 /* 1684 * If the TMF finds that the IO is not in the device and also 1685 * the internal abort does not succeed, then it is safe to 1686 * free the slot. 1687 * Note: if the internal abort succeeds then the slot 1688 * will have already been completed 1689 */ 1690 if (rc == TMF_RESP_FUNC_COMPLETE && rc2 != TMF_RESP_FUNC_SUCC) { 1691 if (task->lldd_task) 1692 hisi_sas_do_release_task(hisi_hba, task, slot, true); 1693 } 1694 } else if (task->task_proto & SAS_PROTOCOL_SATA || 1695 task->task_proto & SAS_PROTOCOL_STP) { 1696 if (task->dev->dev_type == SAS_SATA_DEV) { 1697 struct ata_queued_cmd *qc = task->uldd_task; 1698 1699 rc = hisi_sas_internal_task_abort_dev(sas_dev, false); 1700 if (rc < 0) { 1701 dev_err(dev, "abort task: internal abort failed\n"); 1702 goto out; 1703 } 1704 hisi_sas_dereg_device(hisi_hba, device); 1705 1706 /* 1707 * If an ATA internal command times out in ATA EH, it 1708 * need to execute soft reset, so check the scsicmd 1709 */ 1710 if ((sas_dev->dev_status == HISI_SAS_DEV_NCQ_ERR) && 1711 qc && qc->scsicmd) { 1712 hisi_sas_do_release_task(hisi_hba, task, slot, true); 1713 rc = TMF_RESP_FUNC_COMPLETE; 1714 } else { 1715 rc = hisi_sas_softreset_ata_disk(device); 1716 } 1717 } 1718 } else if (task->task_proto & SAS_PROTOCOL_SMP) { 1719 /* SMP */ 1720 u32 tag = slot->idx; 1721 struct hisi_sas_cq *cq = &hisi_hba->cq[slot->dlvry_queue]; 1722 1723 rc = sas_execute_internal_abort_single(device, 1724 tag, slot->dlvry_queue, 1725 &internal_abort_data); 1726 if (((rc < 0) || (rc == TMF_RESP_FUNC_FAILED)) && 1727 task->lldd_task) { 1728 /* 1729 * sync irq or poll queue to avoid free'ing task 1730 * before using task in IO completion 1731 */ 1732 hisi_sas_sync_cq(cq); 1733 slot->task = NULL; 1734 } 1735 } 1736 1737 out: 1738 if (rc != TMF_RESP_FUNC_COMPLETE) 1739 dev_notice(dev, "abort task: rc=%d\n", rc); 1740 return rc; 1741 } 1742 1743 static int hisi_sas_abort_task_set(struct domain_device *device, u8 *lun) 1744 { 1745 struct hisi_sas_device *sas_dev = device->lldd_dev; 1746 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1747 struct device *dev = hisi_hba->dev; 1748 int rc; 1749 1750 rc = hisi_sas_internal_task_abort_dev(sas_dev, false); 1751 if (rc < 0) { 1752 dev_err(dev, "abort task set: internal abort rc=%d\n", rc); 1753 return TMF_RESP_FUNC_FAILED; 1754 } 1755 hisi_sas_dereg_device(hisi_hba, device); 1756 1757 rc = sas_abort_task_set(device, lun); 1758 if (rc == TMF_RESP_FUNC_COMPLETE) 1759 hisi_sas_release_task(hisi_hba, device); 1760 1761 return rc; 1762 } 1763 1764 static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device) 1765 { 1766 struct sas_phy *local_phy = sas_get_local_phy(device); 1767 struct hisi_sas_device *sas_dev = device->lldd_dev; 1768 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1769 struct sas_ha_struct *sas_ha = &hisi_hba->sha; 1770 int rc, reset_type; 1771 1772 if (!local_phy->enabled) { 1773 sas_put_local_phy(local_phy); 1774 return -ENODEV; 1775 } 1776 1777 if (scsi_is_sas_phy_local(local_phy)) { 1778 struct asd_sas_phy *sas_phy = 1779 sas_ha->sas_phy[local_phy->number]; 1780 struct hisi_sas_phy *phy = 1781 container_of(sas_phy, struct hisi_sas_phy, sas_phy); 1782 unsigned long flags; 1783 1784 spin_lock_irqsave(&phy->lock, flags); 1785 phy->in_reset = 1; 1786 spin_unlock_irqrestore(&phy->lock, flags); 1787 } 1788 1789 reset_type = (sas_dev->dev_status == HISI_SAS_DEV_INIT || 1790 !dev_is_sata(device)) ? true : false; 1791 1792 rc = sas_phy_reset(local_phy, reset_type); 1793 sas_put_local_phy(local_phy); 1794 1795 if (scsi_is_sas_phy_local(local_phy)) { 1796 struct asd_sas_phy *sas_phy = 1797 sas_ha->sas_phy[local_phy->number]; 1798 struct hisi_sas_phy *phy = 1799 container_of(sas_phy, struct hisi_sas_phy, sas_phy); 1800 unsigned long flags; 1801 1802 spin_lock_irqsave(&phy->lock, flags); 1803 phy->in_reset = 0; 1804 spin_unlock_irqrestore(&phy->lock, flags); 1805 1806 /* report PHY down if timed out */ 1807 if (rc == -ETIMEDOUT) 1808 hisi_sas_phy_down(hisi_hba, sas_phy->id, 0, GFP_KERNEL); 1809 return rc; 1810 } 1811 1812 /* Remote phy */ 1813 if (rc) 1814 return rc; 1815 1816 if (dev_is_sata(device)) { 1817 struct ata_link *link = &device->sata_dev.ap->link; 1818 1819 rc = ata_wait_after_reset(link, jiffies + HISI_SAS_WAIT_PHYUP_TIMEOUT, 1820 smp_ata_check_ready_type); 1821 } else { 1822 msleep(2000); 1823 } 1824 1825 return rc; 1826 } 1827 1828 static int hisi_sas_I_T_nexus_reset(struct domain_device *device) 1829 { 1830 struct hisi_sas_device *sas_dev = device->lldd_dev; 1831 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1832 struct device *dev = hisi_hba->dev; 1833 int rc; 1834 1835 if (sas_dev->dev_status == HISI_SAS_DEV_NCQ_ERR) 1836 sas_dev->dev_status = HISI_SAS_DEV_NORMAL; 1837 1838 rc = hisi_sas_internal_task_abort_dev(sas_dev, false); 1839 if (rc < 0) { 1840 dev_err(dev, "I_T nexus reset: internal abort (%d)\n", rc); 1841 return TMF_RESP_FUNC_FAILED; 1842 } 1843 hisi_sas_dereg_device(hisi_hba, device); 1844 1845 rc = hisi_sas_debug_I_T_nexus_reset(device); 1846 if (rc == TMF_RESP_FUNC_COMPLETE && dev_is_sata(device)) { 1847 struct sas_phy *local_phy; 1848 1849 rc = hisi_sas_softreset_ata_disk(device); 1850 switch (rc) { 1851 case -ECOMM: 1852 rc = -ENODEV; 1853 break; 1854 case TMF_RESP_FUNC_FAILED: 1855 case -EMSGSIZE: 1856 case -EIO: 1857 local_phy = sas_get_local_phy(device); 1858 rc = sas_phy_enable(local_phy, 0); 1859 if (!rc) { 1860 local_phy->enabled = 0; 1861 dev_err(dev, "Disabled local phy of ATA disk %016llx due to softreset fail (%d)\n", 1862 SAS_ADDR(device->sas_addr), rc); 1863 rc = -ENODEV; 1864 } 1865 sas_put_local_phy(local_phy); 1866 break; 1867 default: 1868 break; 1869 } 1870 } 1871 1872 if ((rc == TMF_RESP_FUNC_COMPLETE) || (rc == -ENODEV)) 1873 hisi_sas_release_task(hisi_hba, device); 1874 1875 return rc; 1876 } 1877 1878 static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun) 1879 { 1880 struct hisi_sas_device *sas_dev = device->lldd_dev; 1881 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1882 struct device *dev = hisi_hba->dev; 1883 int rc = TMF_RESP_FUNC_FAILED; 1884 1885 /* Clear internal IO and then lu reset */ 1886 rc = hisi_sas_internal_task_abort_dev(sas_dev, false); 1887 if (rc < 0) { 1888 dev_err(dev, "lu_reset: internal abort failed\n"); 1889 goto out; 1890 } 1891 hisi_sas_dereg_device(hisi_hba, device); 1892 1893 if (dev_is_sata(device)) { 1894 struct sas_phy *phy; 1895 1896 phy = sas_get_local_phy(device); 1897 1898 rc = sas_phy_reset(phy, true); 1899 1900 if (rc == 0) 1901 hisi_sas_release_task(hisi_hba, device); 1902 sas_put_local_phy(phy); 1903 } else { 1904 rc = sas_lu_reset(device, lun); 1905 if (rc == TMF_RESP_FUNC_COMPLETE) 1906 hisi_sas_release_task(hisi_hba, device); 1907 } 1908 out: 1909 if (rc != TMF_RESP_FUNC_COMPLETE) 1910 dev_err(dev, "lu_reset: for device[%d]:rc= %d\n", 1911 sas_dev->device_id, rc); 1912 return rc; 1913 } 1914 1915 static void hisi_sas_async_I_T_nexus_reset(void *data, async_cookie_t cookie) 1916 { 1917 struct domain_device *device = data; 1918 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1919 int rc; 1920 1921 rc = hisi_sas_debug_I_T_nexus_reset(device); 1922 if (rc != TMF_RESP_FUNC_COMPLETE) 1923 dev_info(hisi_hba->dev, "I_T_nexus reset fail for dev:%016llx rc=%d\n", 1924 SAS_ADDR(device->sas_addr), rc); 1925 } 1926 1927 static int hisi_sas_clear_nexus_ha(struct sas_ha_struct *sas_ha) 1928 { 1929 struct hisi_hba *hisi_hba = sas_ha->lldd_ha; 1930 HISI_SAS_DECLARE_RST_WORK_ON_STACK(r); 1931 ASYNC_DOMAIN_EXCLUSIVE(async); 1932 int i; 1933 1934 queue_work(hisi_hba->wq, &r.work); 1935 wait_for_completion(r.completion); 1936 if (!r.done) 1937 return TMF_RESP_FUNC_FAILED; 1938 1939 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 1940 struct hisi_sas_device *sas_dev = &hisi_hba->devices[i]; 1941 struct domain_device *device = sas_dev->sas_device; 1942 1943 if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device || 1944 dev_is_expander(device->dev_type)) 1945 continue; 1946 1947 async_schedule_domain(hisi_sas_async_I_T_nexus_reset, 1948 device, &async); 1949 } 1950 1951 async_synchronize_full_domain(&async); 1952 hisi_sas_release_tasks(hisi_hba); 1953 1954 return TMF_RESP_FUNC_COMPLETE; 1955 } 1956 1957 static int hisi_sas_query_task(struct sas_task *task) 1958 { 1959 int rc = TMF_RESP_FUNC_FAILED; 1960 1961 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) { 1962 struct hisi_sas_slot *slot = task->lldd_task; 1963 u32 tag = slot->idx; 1964 1965 rc = sas_query_task(task, tag); 1966 switch (rc) { 1967 /* The task is still in Lun, release it then */ 1968 case TMF_RESP_FUNC_SUCC: 1969 /* The task is not in Lun or failed, reset the phy */ 1970 case TMF_RESP_FUNC_FAILED: 1971 case TMF_RESP_FUNC_COMPLETE: 1972 break; 1973 default: 1974 rc = TMF_RESP_FUNC_FAILED; 1975 break; 1976 } 1977 } 1978 return rc; 1979 } 1980 1981 static bool hisi_sas_internal_abort_timeout(struct sas_task *task, 1982 void *data) 1983 { 1984 struct domain_device *device = task->dev; 1985 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1986 struct hisi_sas_internal_abort_data *timeout = data; 1987 1988 if (hisi_sas_debugfs_enable) { 1989 /* 1990 * If timeout occurs in device gone scenario, to avoid 1991 * circular dependency like: 1992 * hisi_sas_dev_gone() -> down() -> ... -> 1993 * hisi_sas_internal_abort_timeout() -> down(). 1994 */ 1995 if (!timeout->rst_ha_timeout) 1996 down(&hisi_hba->sem); 1997 hisi_hba->hw->debugfs_snapshot_regs(hisi_hba); 1998 if (!timeout->rst_ha_timeout) 1999 up(&hisi_hba->sem); 2000 } 2001 2002 if (task->task_state_flags & SAS_TASK_STATE_DONE) { 2003 pr_err("Internal abort: timeout %016llx\n", 2004 SAS_ADDR(device->sas_addr)); 2005 } else { 2006 struct hisi_sas_slot *slot = task->lldd_task; 2007 2008 set_bit(HISI_SAS_HW_FAULT_BIT, &hisi_hba->flags); 2009 2010 if (slot) { 2011 struct hisi_sas_cq *cq = 2012 &hisi_hba->cq[slot->dlvry_queue]; 2013 /* 2014 * sync irq or poll queue to avoid free'ing task 2015 * before using task in IO completion 2016 */ 2017 hisi_sas_sync_cq(cq); 2018 slot->task = NULL; 2019 } 2020 2021 if (timeout->rst_ha_timeout) { 2022 pr_err("Internal abort: timeout and not done %016llx. Queuing reset.\n", 2023 SAS_ADDR(device->sas_addr)); 2024 queue_work(hisi_hba->wq, &hisi_hba->rst_work); 2025 } else { 2026 pr_err("Internal abort: timeout and not done %016llx.\n", 2027 SAS_ADDR(device->sas_addr)); 2028 } 2029 2030 return true; 2031 } 2032 2033 return false; 2034 } 2035 2036 static void hisi_sas_port_formed(struct asd_sas_phy *sas_phy) 2037 { 2038 hisi_sas_port_notify_formed(sas_phy); 2039 } 2040 2041 static int hisi_sas_write_gpio(struct sas_ha_struct *sha, u8 reg_type, 2042 u8 reg_index, u8 reg_count, u8 *write_data) 2043 { 2044 struct hisi_hba *hisi_hba = sha->lldd_ha; 2045 2046 if (!hisi_hba->hw->write_gpio) 2047 return -EOPNOTSUPP; 2048 2049 return hisi_hba->hw->write_gpio(hisi_hba, reg_type, 2050 reg_index, reg_count, write_data); 2051 } 2052 2053 static void hisi_sas_phy_disconnected(struct hisi_sas_phy *phy) 2054 { 2055 struct asd_sas_phy *sas_phy = &phy->sas_phy; 2056 struct sas_phy *sphy = sas_phy->phy; 2057 unsigned long flags; 2058 2059 phy->phy_attached = 0; 2060 phy->phy_type = 0; 2061 phy->port = NULL; 2062 2063 spin_lock_irqsave(&phy->lock, flags); 2064 if (phy->enable) 2065 sphy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN; 2066 else 2067 sphy->negotiated_linkrate = SAS_PHY_DISABLED; 2068 spin_unlock_irqrestore(&phy->lock, flags); 2069 } 2070 2071 void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy, 2072 gfp_t gfp_flags) 2073 { 2074 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 2075 struct asd_sas_phy *sas_phy = &phy->sas_phy; 2076 struct device *dev = hisi_hba->dev; 2077 2078 if (rdy) { 2079 /* Phy down but ready */ 2080 hisi_sas_bytes_dmaed(hisi_hba, phy_no, gfp_flags); 2081 hisi_sas_port_notify_formed(sas_phy); 2082 } else { 2083 struct hisi_sas_port *port = phy->port; 2084 2085 if (test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags) || 2086 phy->in_reset) { 2087 dev_info(dev, "ignore flutter phy%d down\n", phy_no); 2088 return; 2089 } 2090 /* Phy down and not ready */ 2091 sas_notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL, gfp_flags); 2092 sas_phy_disconnected(sas_phy); 2093 2094 if (port) { 2095 if (phy->phy_type & PORT_TYPE_SAS) { 2096 int port_id = port->id; 2097 2098 if (!hisi_hba->hw->get_wideport_bitmap(hisi_hba, 2099 port_id)) 2100 port->port_attached = 0; 2101 } else if (phy->phy_type & PORT_TYPE_SATA) 2102 port->port_attached = 0; 2103 } 2104 hisi_sas_phy_disconnected(phy); 2105 } 2106 } 2107 EXPORT_SYMBOL_GPL(hisi_sas_phy_down); 2108 2109 void hisi_sas_phy_bcast(struct hisi_sas_phy *phy) 2110 { 2111 struct asd_sas_phy *sas_phy = &phy->sas_phy; 2112 struct hisi_hba *hisi_hba = phy->hisi_hba; 2113 2114 if (test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags)) 2115 return; 2116 2117 sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD, GFP_ATOMIC); 2118 } 2119 EXPORT_SYMBOL_GPL(hisi_sas_phy_bcast); 2120 2121 int hisi_sas_host_reset(struct Scsi_Host *shost, int reset_type) 2122 { 2123 struct hisi_hba *hisi_hba = shost_priv(shost); 2124 2125 if (reset_type != SCSI_ADAPTER_RESET) 2126 return -EOPNOTSUPP; 2127 2128 queue_work(hisi_hba->wq, &hisi_hba->rst_work); 2129 2130 return 0; 2131 } 2132 EXPORT_SYMBOL_GPL(hisi_sas_host_reset); 2133 2134 struct scsi_transport_template *hisi_sas_stt; 2135 EXPORT_SYMBOL_GPL(hisi_sas_stt); 2136 2137 static struct sas_domain_function_template hisi_sas_transport_ops = { 2138 .lldd_dev_found = hisi_sas_dev_found, 2139 .lldd_dev_gone = hisi_sas_dev_gone, 2140 .lldd_execute_task = hisi_sas_queue_command, 2141 .lldd_control_phy = hisi_sas_control_phy, 2142 .lldd_abort_task = hisi_sas_abort_task, 2143 .lldd_abort_task_set = hisi_sas_abort_task_set, 2144 .lldd_I_T_nexus_reset = hisi_sas_I_T_nexus_reset, 2145 .lldd_lu_reset = hisi_sas_lu_reset, 2146 .lldd_query_task = hisi_sas_query_task, 2147 .lldd_clear_nexus_ha = hisi_sas_clear_nexus_ha, 2148 .lldd_port_formed = hisi_sas_port_formed, 2149 .lldd_write_gpio = hisi_sas_write_gpio, 2150 .lldd_tmf_aborted = hisi_sas_tmf_aborted, 2151 .lldd_abort_timeout = hisi_sas_internal_abort_timeout, 2152 }; 2153 2154 void hisi_sas_init_mem(struct hisi_hba *hisi_hba) 2155 { 2156 int i, s, j, max_command_entries = HISI_SAS_MAX_COMMANDS; 2157 struct hisi_sas_breakpoint *sata_breakpoint = hisi_hba->sata_breakpoint; 2158 2159 for (i = 0; i < hisi_hba->queue_count; i++) { 2160 struct hisi_sas_cq *cq = &hisi_hba->cq[i]; 2161 struct hisi_sas_dq *dq = &hisi_hba->dq[i]; 2162 struct hisi_sas_cmd_hdr *cmd_hdr = hisi_hba->cmd_hdr[i]; 2163 2164 s = sizeof(struct hisi_sas_cmd_hdr); 2165 for (j = 0; j < HISI_SAS_QUEUE_SLOTS; j++) 2166 memset(&cmd_hdr[j], 0, s); 2167 2168 dq->wr_point = 0; 2169 2170 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS; 2171 memset(hisi_hba->complete_hdr[i], 0, s); 2172 cq->rd_point = 0; 2173 } 2174 2175 s = sizeof(struct hisi_sas_initial_fis) * hisi_hba->n_phy; 2176 memset(hisi_hba->initial_fis, 0, s); 2177 2178 s = max_command_entries * sizeof(struct hisi_sas_iost); 2179 memset(hisi_hba->iost, 0, s); 2180 2181 s = max_command_entries * sizeof(struct hisi_sas_breakpoint); 2182 memset(hisi_hba->breakpoint, 0, s); 2183 2184 s = sizeof(struct hisi_sas_sata_breakpoint); 2185 for (j = 0; j < HISI_SAS_MAX_ITCT_ENTRIES; j++) 2186 memset(&sata_breakpoint[j], 0, s); 2187 } 2188 EXPORT_SYMBOL_GPL(hisi_sas_init_mem); 2189 2190 int hisi_sas_alloc(struct hisi_hba *hisi_hba) 2191 { 2192 struct device *dev = hisi_hba->dev; 2193 int i, j, s, max_command_entries = HISI_SAS_MAX_COMMANDS; 2194 int max_command_entries_ru, sz_slot_buf_ru; 2195 int blk_cnt, slots_per_blk; 2196 2197 sema_init(&hisi_hba->sem, 1); 2198 spin_lock_init(&hisi_hba->lock); 2199 for (i = 0; i < hisi_hba->n_phy; i++) { 2200 hisi_sas_phy_init(hisi_hba, i); 2201 hisi_hba->port[i].port_attached = 0; 2202 hisi_hba->port[i].id = -1; 2203 } 2204 2205 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 2206 hisi_hba->devices[i].dev_type = SAS_PHY_UNUSED; 2207 hisi_hba->devices[i].device_id = i; 2208 hisi_hba->devices[i].dev_status = HISI_SAS_DEV_INIT; 2209 } 2210 2211 for (i = 0; i < hisi_hba->queue_count; i++) { 2212 struct hisi_sas_cq *cq = &hisi_hba->cq[i]; 2213 struct hisi_sas_dq *dq = &hisi_hba->dq[i]; 2214 2215 /* Completion queue structure */ 2216 cq->id = i; 2217 cq->hisi_hba = hisi_hba; 2218 spin_lock_init(&cq->poll_lock); 2219 2220 /* Delivery queue structure */ 2221 spin_lock_init(&dq->lock); 2222 INIT_LIST_HEAD(&dq->list); 2223 dq->id = i; 2224 dq->hisi_hba = hisi_hba; 2225 2226 /* Delivery queue */ 2227 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS; 2228 hisi_hba->cmd_hdr[i] = dmam_alloc_coherent(dev, s, 2229 &hisi_hba->cmd_hdr_dma[i], 2230 GFP_KERNEL); 2231 if (!hisi_hba->cmd_hdr[i]) 2232 goto err_out; 2233 2234 /* Completion queue */ 2235 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS; 2236 hisi_hba->complete_hdr[i] = dmam_alloc_coherent(dev, s, 2237 &hisi_hba->complete_hdr_dma[i], 2238 GFP_KERNEL); 2239 if (!hisi_hba->complete_hdr[i]) 2240 goto err_out; 2241 } 2242 2243 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct); 2244 hisi_hba->itct = dmam_alloc_coherent(dev, s, &hisi_hba->itct_dma, 2245 GFP_KERNEL); 2246 if (!hisi_hba->itct) 2247 goto err_out; 2248 2249 hisi_hba->slot_info = devm_kcalloc(dev, max_command_entries, 2250 sizeof(struct hisi_sas_slot), 2251 GFP_KERNEL); 2252 if (!hisi_hba->slot_info) 2253 goto err_out; 2254 2255 /* roundup to avoid overly large block size */ 2256 max_command_entries_ru = roundup(max_command_entries, 64); 2257 if (hisi_hba->prot_mask & HISI_SAS_DIX_PROT_MASK) 2258 sz_slot_buf_ru = sizeof(struct hisi_sas_slot_dif_buf_table); 2259 else 2260 sz_slot_buf_ru = sizeof(struct hisi_sas_slot_buf_table); 2261 sz_slot_buf_ru = roundup(sz_slot_buf_ru, 64); 2262 s = max(lcm(max_command_entries_ru, sz_slot_buf_ru), PAGE_SIZE); 2263 blk_cnt = (max_command_entries_ru * sz_slot_buf_ru) / s; 2264 slots_per_blk = s / sz_slot_buf_ru; 2265 2266 for (i = 0; i < blk_cnt; i++) { 2267 int slot_index = i * slots_per_blk; 2268 dma_addr_t buf_dma; 2269 void *buf; 2270 2271 buf = dmam_alloc_coherent(dev, s, &buf_dma, 2272 GFP_KERNEL); 2273 if (!buf) 2274 goto err_out; 2275 2276 for (j = 0; j < slots_per_blk; j++, slot_index++) { 2277 struct hisi_sas_slot *slot; 2278 2279 slot = &hisi_hba->slot_info[slot_index]; 2280 slot->buf = buf; 2281 slot->buf_dma = buf_dma; 2282 slot->idx = slot_index; 2283 2284 buf += sz_slot_buf_ru; 2285 buf_dma += sz_slot_buf_ru; 2286 } 2287 } 2288 2289 s = max_command_entries * sizeof(struct hisi_sas_iost); 2290 hisi_hba->iost = dmam_alloc_coherent(dev, s, &hisi_hba->iost_dma, 2291 GFP_KERNEL); 2292 if (!hisi_hba->iost) 2293 goto err_out; 2294 2295 s = max_command_entries * sizeof(struct hisi_sas_breakpoint); 2296 hisi_hba->breakpoint = dmam_alloc_coherent(dev, s, 2297 &hisi_hba->breakpoint_dma, 2298 GFP_KERNEL); 2299 if (!hisi_hba->breakpoint) 2300 goto err_out; 2301 2302 s = hisi_hba->slot_index_count = max_command_entries; 2303 hisi_hba->slot_index_tags = devm_bitmap_zalloc(dev, s, GFP_KERNEL); 2304 if (!hisi_hba->slot_index_tags) 2305 goto err_out; 2306 2307 s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS; 2308 hisi_hba->initial_fis = dmam_alloc_coherent(dev, s, 2309 &hisi_hba->initial_fis_dma, 2310 GFP_KERNEL); 2311 if (!hisi_hba->initial_fis) 2312 goto err_out; 2313 2314 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint); 2315 hisi_hba->sata_breakpoint = dmam_alloc_coherent(dev, s, 2316 &hisi_hba->sata_breakpoint_dma, 2317 GFP_KERNEL); 2318 if (!hisi_hba->sata_breakpoint) 2319 goto err_out; 2320 2321 hisi_hba->last_slot_index = 0; 2322 2323 hisi_hba->wq = 2324 alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, dev_name(dev)); 2325 if (!hisi_hba->wq) { 2326 dev_err(dev, "sas_alloc: failed to create workqueue\n"); 2327 goto err_out; 2328 } 2329 2330 return 0; 2331 err_out: 2332 return -ENOMEM; 2333 } 2334 EXPORT_SYMBOL_GPL(hisi_sas_alloc); 2335 2336 void hisi_sas_free(struct hisi_hba *hisi_hba) 2337 { 2338 int i; 2339 2340 for (i = 0; i < hisi_hba->n_phy; i++) { 2341 struct hisi_sas_phy *phy = &hisi_hba->phy[i]; 2342 2343 del_timer_sync(&phy->timer); 2344 } 2345 2346 if (hisi_hba->wq) 2347 destroy_workqueue(hisi_hba->wq); 2348 } 2349 EXPORT_SYMBOL_GPL(hisi_sas_free); 2350 2351 void hisi_sas_rst_work_handler(struct work_struct *work) 2352 { 2353 struct hisi_hba *hisi_hba = 2354 container_of(work, struct hisi_hba, rst_work); 2355 2356 if (hisi_sas_controller_prereset(hisi_hba)) 2357 return; 2358 2359 hisi_sas_controller_reset(hisi_hba); 2360 } 2361 EXPORT_SYMBOL_GPL(hisi_sas_rst_work_handler); 2362 2363 void hisi_sas_sync_rst_work_handler(struct work_struct *work) 2364 { 2365 struct hisi_sas_rst *rst = 2366 container_of(work, struct hisi_sas_rst, work); 2367 2368 if (hisi_sas_controller_prereset(rst->hisi_hba)) 2369 goto rst_complete; 2370 2371 if (!hisi_sas_controller_reset(rst->hisi_hba)) 2372 rst->done = true; 2373 rst_complete: 2374 complete(rst->completion); 2375 } 2376 EXPORT_SYMBOL_GPL(hisi_sas_sync_rst_work_handler); 2377 2378 int hisi_sas_get_fw_info(struct hisi_hba *hisi_hba) 2379 { 2380 struct device *dev = hisi_hba->dev; 2381 struct platform_device *pdev = hisi_hba->platform_dev; 2382 struct device_node *np = pdev ? pdev->dev.of_node : NULL; 2383 struct clk *refclk; 2384 2385 if (device_property_read_u8_array(dev, "sas-addr", hisi_hba->sas_addr, 2386 SAS_ADDR_SIZE)) { 2387 dev_err(dev, "could not get property sas-addr\n"); 2388 return -ENOENT; 2389 } 2390 2391 if (np) { 2392 /* 2393 * These properties are only required for platform device-based 2394 * controller with DT firmware. 2395 */ 2396 hisi_hba->ctrl = syscon_regmap_lookup_by_phandle(np, 2397 "hisilicon,sas-syscon"); 2398 if (IS_ERR(hisi_hba->ctrl)) { 2399 dev_err(dev, "could not get syscon\n"); 2400 return -ENOENT; 2401 } 2402 2403 if (device_property_read_u32(dev, "ctrl-reset-reg", 2404 &hisi_hba->ctrl_reset_reg)) { 2405 dev_err(dev, "could not get property ctrl-reset-reg\n"); 2406 return -ENOENT; 2407 } 2408 2409 if (device_property_read_u32(dev, "ctrl-reset-sts-reg", 2410 &hisi_hba->ctrl_reset_sts_reg)) { 2411 dev_err(dev, "could not get property ctrl-reset-sts-reg\n"); 2412 return -ENOENT; 2413 } 2414 2415 if (device_property_read_u32(dev, "ctrl-clock-ena-reg", 2416 &hisi_hba->ctrl_clock_ena_reg)) { 2417 dev_err(dev, "could not get property ctrl-clock-ena-reg\n"); 2418 return -ENOENT; 2419 } 2420 } 2421 2422 refclk = devm_clk_get(dev, NULL); 2423 if (IS_ERR(refclk)) 2424 dev_dbg(dev, "no ref clk property\n"); 2425 else 2426 hisi_hba->refclk_frequency_mhz = clk_get_rate(refclk) / 1000000; 2427 2428 if (device_property_read_u32(dev, "phy-count", &hisi_hba->n_phy)) { 2429 dev_err(dev, "could not get property phy-count\n"); 2430 return -ENOENT; 2431 } 2432 2433 if (device_property_read_u32(dev, "queue-count", 2434 &hisi_hba->queue_count)) { 2435 dev_err(dev, "could not get property queue-count\n"); 2436 return -ENOENT; 2437 } 2438 2439 return 0; 2440 } 2441 EXPORT_SYMBOL_GPL(hisi_sas_get_fw_info); 2442 2443 static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev, 2444 const struct hisi_sas_hw *hw) 2445 { 2446 struct resource *res; 2447 struct Scsi_Host *shost; 2448 struct hisi_hba *hisi_hba; 2449 struct device *dev = &pdev->dev; 2450 int error; 2451 2452 shost = scsi_host_alloc(hw->sht, sizeof(*hisi_hba)); 2453 if (!shost) { 2454 dev_err(dev, "scsi host alloc failed\n"); 2455 return NULL; 2456 } 2457 hisi_hba = shost_priv(shost); 2458 2459 INIT_WORK(&hisi_hba->rst_work, hisi_sas_rst_work_handler); 2460 hisi_hba->hw = hw; 2461 hisi_hba->dev = dev; 2462 hisi_hba->platform_dev = pdev; 2463 hisi_hba->shost = shost; 2464 SHOST_TO_SAS_HA(shost) = &hisi_hba->sha; 2465 2466 timer_setup(&hisi_hba->timer, NULL, 0); 2467 2468 if (hisi_sas_get_fw_info(hisi_hba) < 0) 2469 goto err_out; 2470 2471 if (hisi_hba->hw->fw_info_check) { 2472 if (hisi_hba->hw->fw_info_check(hisi_hba)) 2473 goto err_out; 2474 } 2475 2476 error = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); 2477 if (error) { 2478 dev_err(dev, "No usable DMA addressing method\n"); 2479 goto err_out; 2480 } 2481 2482 hisi_hba->regs = devm_platform_ioremap_resource(pdev, 0); 2483 if (IS_ERR(hisi_hba->regs)) 2484 goto err_out; 2485 2486 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 2487 if (res) { 2488 hisi_hba->sgpio_regs = devm_ioremap_resource(dev, res); 2489 if (IS_ERR(hisi_hba->sgpio_regs)) 2490 goto err_out; 2491 } 2492 2493 if (hisi_sas_alloc(hisi_hba)) { 2494 hisi_sas_free(hisi_hba); 2495 goto err_out; 2496 } 2497 2498 return shost; 2499 err_out: 2500 scsi_host_put(shost); 2501 dev_err(dev, "shost alloc failed\n"); 2502 return NULL; 2503 } 2504 2505 static int hisi_sas_interrupt_preinit(struct hisi_hba *hisi_hba) 2506 { 2507 if (hisi_hba->hw->interrupt_preinit) 2508 return hisi_hba->hw->interrupt_preinit(hisi_hba); 2509 return 0; 2510 } 2511 2512 int hisi_sas_probe(struct platform_device *pdev, 2513 const struct hisi_sas_hw *hw) 2514 { 2515 struct Scsi_Host *shost; 2516 struct hisi_hba *hisi_hba; 2517 struct device *dev = &pdev->dev; 2518 struct asd_sas_phy **arr_phy; 2519 struct asd_sas_port **arr_port; 2520 struct sas_ha_struct *sha; 2521 int rc, phy_nr, port_nr, i; 2522 2523 shost = hisi_sas_shost_alloc(pdev, hw); 2524 if (!shost) 2525 return -ENOMEM; 2526 2527 sha = SHOST_TO_SAS_HA(shost); 2528 hisi_hba = shost_priv(shost); 2529 platform_set_drvdata(pdev, sha); 2530 2531 phy_nr = port_nr = hisi_hba->n_phy; 2532 2533 arr_phy = devm_kcalloc(dev, phy_nr, sizeof(void *), GFP_KERNEL); 2534 arr_port = devm_kcalloc(dev, port_nr, sizeof(void *), GFP_KERNEL); 2535 if (!arr_phy || !arr_port) { 2536 rc = -ENOMEM; 2537 goto err_out_ha; 2538 } 2539 2540 sha->sas_phy = arr_phy; 2541 sha->sas_port = arr_port; 2542 sha->lldd_ha = hisi_hba; 2543 2544 shost->transportt = hisi_sas_stt; 2545 shost->max_id = HISI_SAS_MAX_DEVICES; 2546 shost->max_lun = ~0; 2547 shost->max_channel = 1; 2548 shost->max_cmd_len = 16; 2549 if (hisi_hba->hw->slot_index_alloc) { 2550 shost->can_queue = HISI_SAS_MAX_COMMANDS; 2551 shost->cmd_per_lun = HISI_SAS_MAX_COMMANDS; 2552 } else { 2553 shost->can_queue = HISI_SAS_UNRESERVED_IPTT; 2554 shost->cmd_per_lun = HISI_SAS_UNRESERVED_IPTT; 2555 } 2556 2557 sha->sas_ha_name = DRV_NAME; 2558 sha->dev = hisi_hba->dev; 2559 sha->sas_addr = &hisi_hba->sas_addr[0]; 2560 sha->num_phys = hisi_hba->n_phy; 2561 sha->shost = hisi_hba->shost; 2562 2563 for (i = 0; i < hisi_hba->n_phy; i++) { 2564 sha->sas_phy[i] = &hisi_hba->phy[i].sas_phy; 2565 sha->sas_port[i] = &hisi_hba->port[i].sas_port; 2566 } 2567 2568 rc = hisi_sas_interrupt_preinit(hisi_hba); 2569 if (rc) 2570 goto err_out_ha; 2571 2572 rc = scsi_add_host(shost, &pdev->dev); 2573 if (rc) 2574 goto err_out_ha; 2575 2576 rc = sas_register_ha(sha); 2577 if (rc) 2578 goto err_out_register_ha; 2579 2580 rc = hisi_hba->hw->hw_init(hisi_hba); 2581 if (rc) 2582 goto err_out_hw_init; 2583 2584 scsi_scan_host(shost); 2585 2586 return 0; 2587 2588 err_out_hw_init: 2589 sas_unregister_ha(sha); 2590 err_out_register_ha: 2591 scsi_remove_host(shost); 2592 err_out_ha: 2593 hisi_sas_free(hisi_hba); 2594 scsi_host_put(shost); 2595 return rc; 2596 } 2597 EXPORT_SYMBOL_GPL(hisi_sas_probe); 2598 2599 void hisi_sas_remove(struct platform_device *pdev) 2600 { 2601 struct sas_ha_struct *sha = platform_get_drvdata(pdev); 2602 struct hisi_hba *hisi_hba = sha->lldd_ha; 2603 struct Scsi_Host *shost = sha->shost; 2604 2605 del_timer_sync(&hisi_hba->timer); 2606 2607 sas_unregister_ha(sha); 2608 sas_remove_host(shost); 2609 2610 hisi_sas_free(hisi_hba); 2611 scsi_host_put(shost); 2612 } 2613 EXPORT_SYMBOL_GPL(hisi_sas_remove); 2614 2615 #if IS_ENABLED(CONFIG_SCSI_HISI_SAS_DEBUGFS_DEFAULT_ENABLE) 2616 #define DEBUGFS_ENABLE_DEFAULT "enabled" 2617 bool hisi_sas_debugfs_enable = true; 2618 u32 hisi_sas_debugfs_dump_count = 50; 2619 #else 2620 #define DEBUGFS_ENABLE_DEFAULT "disabled" 2621 bool hisi_sas_debugfs_enable; 2622 u32 hisi_sas_debugfs_dump_count = 1; 2623 #endif 2624 2625 EXPORT_SYMBOL_GPL(hisi_sas_debugfs_enable); 2626 module_param_named(debugfs_enable, hisi_sas_debugfs_enable, bool, 0444); 2627 MODULE_PARM_DESC(hisi_sas_debugfs_enable, 2628 "Enable driver debugfs (default "DEBUGFS_ENABLE_DEFAULT")"); 2629 2630 EXPORT_SYMBOL_GPL(hisi_sas_debugfs_dump_count); 2631 module_param_named(debugfs_dump_count, hisi_sas_debugfs_dump_count, uint, 0444); 2632 MODULE_PARM_DESC(hisi_sas_debugfs_dump_count, "Number of debugfs dumps to allow"); 2633 2634 struct dentry *hisi_sas_debugfs_dir; 2635 EXPORT_SYMBOL_GPL(hisi_sas_debugfs_dir); 2636 2637 static __init int hisi_sas_init(void) 2638 { 2639 hisi_sas_stt = sas_domain_attach_transport(&hisi_sas_transport_ops); 2640 if (!hisi_sas_stt) 2641 return -ENOMEM; 2642 2643 if (hisi_sas_debugfs_enable) { 2644 hisi_sas_debugfs_dir = debugfs_create_dir("hisi_sas", NULL); 2645 if (hisi_sas_debugfs_dump_count > HISI_SAS_MAX_DEBUGFS_DUMP) { 2646 pr_info("hisi_sas: Limiting debugfs dump count\n"); 2647 hisi_sas_debugfs_dump_count = HISI_SAS_MAX_DEBUGFS_DUMP; 2648 } 2649 } 2650 2651 return 0; 2652 } 2653 2654 static __exit void hisi_sas_exit(void) 2655 { 2656 if (hisi_sas_debugfs_enable) 2657 debugfs_remove(hisi_sas_debugfs_dir); 2658 2659 sas_release_transport(hisi_sas_stt); 2660 } 2661 2662 module_init(hisi_sas_init); 2663 module_exit(hisi_sas_exit); 2664 2665 MODULE_LICENSE("GPL"); 2666 MODULE_AUTHOR("John Garry <john.garry@huawei.com>"); 2667 MODULE_DESCRIPTION("HISILICON SAS controller driver"); 2668 MODULE_ALIAS("platform:" DRV_NAME); 2669