1 /* 2 * Copyright (c) 2015 Linaro Ltd. 3 * Copyright (c) 2015 Hisilicon Limited. 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; either version 2 of the License, or 8 * (at your option) any later version. 9 * 10 */ 11 12 #include "hisi_sas.h" 13 #define DRV_NAME "hisi_sas" 14 15 #define DEV_IS_GONE(dev) \ 16 ((!dev) || (dev->dev_type == SAS_PHY_UNUSED)) 17 18 static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device, 19 u8 *lun, struct hisi_sas_tmf_task *tmf); 20 static int 21 hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba, 22 struct domain_device *device, 23 int abort_flag, int tag); 24 static int hisi_sas_softreset_ata_disk(struct domain_device *device); 25 static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func, 26 void *funcdata); 27 static void hisi_sas_release_task(struct hisi_hba *hisi_hba, 28 struct domain_device *device); 29 static void hisi_sas_dev_gone(struct domain_device *device); 30 31 u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis, int direction) 32 { 33 switch (fis->command) { 34 case ATA_CMD_FPDMA_WRITE: 35 case ATA_CMD_FPDMA_READ: 36 case ATA_CMD_FPDMA_RECV: 37 case ATA_CMD_FPDMA_SEND: 38 case ATA_CMD_NCQ_NON_DATA: 39 return HISI_SAS_SATA_PROTOCOL_FPDMA; 40 41 case ATA_CMD_DOWNLOAD_MICRO: 42 case ATA_CMD_ID_ATA: 43 case ATA_CMD_PMP_READ: 44 case ATA_CMD_READ_LOG_EXT: 45 case ATA_CMD_PIO_READ: 46 case ATA_CMD_PIO_READ_EXT: 47 case ATA_CMD_PMP_WRITE: 48 case ATA_CMD_WRITE_LOG_EXT: 49 case ATA_CMD_PIO_WRITE: 50 case ATA_CMD_PIO_WRITE_EXT: 51 return HISI_SAS_SATA_PROTOCOL_PIO; 52 53 case ATA_CMD_DSM: 54 case ATA_CMD_DOWNLOAD_MICRO_DMA: 55 case ATA_CMD_PMP_READ_DMA: 56 case ATA_CMD_PMP_WRITE_DMA: 57 case ATA_CMD_READ: 58 case ATA_CMD_READ_EXT: 59 case ATA_CMD_READ_LOG_DMA_EXT: 60 case ATA_CMD_READ_STREAM_DMA_EXT: 61 case ATA_CMD_TRUSTED_RCV_DMA: 62 case ATA_CMD_TRUSTED_SND_DMA: 63 case ATA_CMD_WRITE: 64 case ATA_CMD_WRITE_EXT: 65 case ATA_CMD_WRITE_FUA_EXT: 66 case ATA_CMD_WRITE_QUEUED: 67 case ATA_CMD_WRITE_LOG_DMA_EXT: 68 case ATA_CMD_WRITE_STREAM_DMA_EXT: 69 case ATA_CMD_ZAC_MGMT_IN: 70 return HISI_SAS_SATA_PROTOCOL_DMA; 71 72 case ATA_CMD_CHK_POWER: 73 case ATA_CMD_DEV_RESET: 74 case ATA_CMD_EDD: 75 case ATA_CMD_FLUSH: 76 case ATA_CMD_FLUSH_EXT: 77 case ATA_CMD_VERIFY: 78 case ATA_CMD_VERIFY_EXT: 79 case ATA_CMD_SET_FEATURES: 80 case ATA_CMD_STANDBY: 81 case ATA_CMD_STANDBYNOW1: 82 case ATA_CMD_ZAC_MGMT_OUT: 83 return HISI_SAS_SATA_PROTOCOL_NONDATA; 84 85 case ATA_CMD_SET_MAX: 86 switch (fis->features) { 87 case ATA_SET_MAX_PASSWD: 88 case ATA_SET_MAX_LOCK: 89 return HISI_SAS_SATA_PROTOCOL_PIO; 90 91 case ATA_SET_MAX_PASSWD_DMA: 92 case ATA_SET_MAX_UNLOCK_DMA: 93 return HISI_SAS_SATA_PROTOCOL_DMA; 94 95 default: 96 return HISI_SAS_SATA_PROTOCOL_NONDATA; 97 } 98 99 default: 100 { 101 if (direction == DMA_NONE) 102 return HISI_SAS_SATA_PROTOCOL_NONDATA; 103 return HISI_SAS_SATA_PROTOCOL_PIO; 104 } 105 } 106 } 107 EXPORT_SYMBOL_GPL(hisi_sas_get_ata_protocol); 108 109 void hisi_sas_sata_done(struct sas_task *task, 110 struct hisi_sas_slot *slot) 111 { 112 struct task_status_struct *ts = &task->task_status; 113 struct ata_task_resp *resp = (struct ata_task_resp *)ts->buf; 114 struct hisi_sas_status_buffer *status_buf = 115 hisi_sas_status_buf_addr_mem(slot); 116 u8 *iu = &status_buf->iu[0]; 117 struct dev_to_host_fis *d2h = (struct dev_to_host_fis *)iu; 118 119 resp->frame_len = sizeof(struct dev_to_host_fis); 120 memcpy(&resp->ending_fis[0], d2h, sizeof(struct dev_to_host_fis)); 121 122 ts->buf_valid_size = sizeof(*resp); 123 } 124 EXPORT_SYMBOL_GPL(hisi_sas_sata_done); 125 126 int hisi_sas_get_ncq_tag(struct sas_task *task, u32 *tag) 127 { 128 struct ata_queued_cmd *qc = task->uldd_task; 129 130 if (qc) { 131 if (qc->tf.command == ATA_CMD_FPDMA_WRITE || 132 qc->tf.command == ATA_CMD_FPDMA_READ) { 133 *tag = qc->tag; 134 return 1; 135 } 136 } 137 return 0; 138 } 139 EXPORT_SYMBOL_GPL(hisi_sas_get_ncq_tag); 140 141 /* 142 * This function assumes linkrate mask fits in 8 bits, which it 143 * does for all HW versions supported. 144 */ 145 u8 hisi_sas_get_prog_phy_linkrate_mask(enum sas_linkrate max) 146 { 147 u16 rate = 0; 148 int i; 149 150 max -= SAS_LINK_RATE_1_5_GBPS; 151 for (i = 0; i <= max; i++) 152 rate |= 1 << (i * 2); 153 return rate; 154 } 155 EXPORT_SYMBOL_GPL(hisi_sas_get_prog_phy_linkrate_mask); 156 157 static struct hisi_hba *dev_to_hisi_hba(struct domain_device *device) 158 { 159 return device->port->ha->lldd_ha; 160 } 161 162 struct hisi_sas_port *to_hisi_sas_port(struct asd_sas_port *sas_port) 163 { 164 return container_of(sas_port, struct hisi_sas_port, sas_port); 165 } 166 EXPORT_SYMBOL_GPL(to_hisi_sas_port); 167 168 void hisi_sas_stop_phys(struct hisi_hba *hisi_hba) 169 { 170 int phy_no; 171 172 for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) 173 hisi_hba->hw->phy_disable(hisi_hba, phy_no); 174 } 175 EXPORT_SYMBOL_GPL(hisi_sas_stop_phys); 176 177 static void hisi_sas_slot_index_clear(struct hisi_hba *hisi_hba, int slot_idx) 178 { 179 void *bitmap = hisi_hba->slot_index_tags; 180 181 clear_bit(slot_idx, bitmap); 182 } 183 184 static void hisi_sas_slot_index_free(struct hisi_hba *hisi_hba, int slot_idx) 185 { 186 unsigned long flags; 187 188 if (hisi_hba->hw->slot_index_alloc || (slot_idx >= 189 hisi_hba->hw->max_command_entries - HISI_SAS_RESERVED_IPTT_CNT)) { 190 spin_lock_irqsave(&hisi_hba->lock, flags); 191 hisi_sas_slot_index_clear(hisi_hba, slot_idx); 192 spin_unlock_irqrestore(&hisi_hba->lock, flags); 193 } 194 } 195 196 static void hisi_sas_slot_index_set(struct hisi_hba *hisi_hba, int slot_idx) 197 { 198 void *bitmap = hisi_hba->slot_index_tags; 199 200 set_bit(slot_idx, bitmap); 201 } 202 203 static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba, 204 struct scsi_cmnd *scsi_cmnd) 205 { 206 int index; 207 void *bitmap = hisi_hba->slot_index_tags; 208 unsigned long flags; 209 210 if (scsi_cmnd) 211 return scsi_cmnd->request->tag; 212 213 spin_lock_irqsave(&hisi_hba->lock, flags); 214 index = find_next_zero_bit(bitmap, hisi_hba->slot_index_count, 215 hisi_hba->last_slot_index + 1); 216 if (index >= hisi_hba->slot_index_count) { 217 index = find_next_zero_bit(bitmap, 218 hisi_hba->slot_index_count, 219 hisi_hba->hw->max_command_entries - 220 HISI_SAS_RESERVED_IPTT_CNT); 221 if (index >= hisi_hba->slot_index_count) { 222 spin_unlock_irqrestore(&hisi_hba->lock, flags); 223 return -SAS_QUEUE_FULL; 224 } 225 } 226 hisi_sas_slot_index_set(hisi_hba, index); 227 hisi_hba->last_slot_index = index; 228 spin_unlock_irqrestore(&hisi_hba->lock, flags); 229 230 return index; 231 } 232 233 static void hisi_sas_slot_index_init(struct hisi_hba *hisi_hba) 234 { 235 int i; 236 237 for (i = 0; i < hisi_hba->slot_index_count; ++i) 238 hisi_sas_slot_index_clear(hisi_hba, i); 239 } 240 241 void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task, 242 struct hisi_sas_slot *slot) 243 { 244 struct hisi_sas_dq *dq = &hisi_hba->dq[slot->dlvry_queue]; 245 unsigned long flags; 246 247 if (task) { 248 struct device *dev = hisi_hba->dev; 249 250 if (!task->lldd_task) 251 return; 252 253 task->lldd_task = NULL; 254 255 if (!sas_protocol_ata(task->task_proto)) 256 if (slot->n_elem) 257 dma_unmap_sg(dev, task->scatter, 258 task->num_scatter, 259 task->data_dir); 260 } 261 262 263 spin_lock_irqsave(&dq->lock, flags); 264 list_del_init(&slot->entry); 265 spin_unlock_irqrestore(&dq->lock, flags); 266 267 memset(slot, 0, offsetof(struct hisi_sas_slot, buf)); 268 269 hisi_sas_slot_index_free(hisi_hba, slot->idx); 270 } 271 EXPORT_SYMBOL_GPL(hisi_sas_slot_task_free); 272 273 static void hisi_sas_task_prep_smp(struct hisi_hba *hisi_hba, 274 struct hisi_sas_slot *slot) 275 { 276 hisi_hba->hw->prep_smp(hisi_hba, slot); 277 } 278 279 static void hisi_sas_task_prep_ssp(struct hisi_hba *hisi_hba, 280 struct hisi_sas_slot *slot) 281 { 282 hisi_hba->hw->prep_ssp(hisi_hba, slot); 283 } 284 285 static void hisi_sas_task_prep_ata(struct hisi_hba *hisi_hba, 286 struct hisi_sas_slot *slot) 287 { 288 hisi_hba->hw->prep_stp(hisi_hba, slot); 289 } 290 291 static void hisi_sas_task_prep_abort(struct hisi_hba *hisi_hba, 292 struct hisi_sas_slot *slot, 293 int device_id, int abort_flag, int tag_to_abort) 294 { 295 hisi_hba->hw->prep_abort(hisi_hba, slot, 296 device_id, abort_flag, tag_to_abort); 297 } 298 299 static int hisi_sas_task_prep(struct sas_task *task, 300 struct hisi_sas_dq **dq_pointer, 301 bool is_tmf, struct hisi_sas_tmf_task *tmf, 302 int *pass) 303 { 304 struct domain_device *device = task->dev; 305 struct hisi_hba *hisi_hba; 306 struct hisi_sas_device *sas_dev = device->lldd_dev; 307 struct hisi_sas_port *port; 308 struct hisi_sas_slot *slot; 309 struct hisi_sas_cmd_hdr *cmd_hdr_base; 310 struct asd_sas_port *sas_port = device->port; 311 struct device *dev; 312 int dlvry_queue_slot, dlvry_queue, rc, slot_idx; 313 int n_elem = 0, n_elem_req = 0, n_elem_resp = 0; 314 struct hisi_sas_dq *dq; 315 unsigned long flags; 316 int wr_q_index; 317 318 if (!sas_port) { 319 struct task_status_struct *ts = &task->task_status; 320 321 ts->resp = SAS_TASK_UNDELIVERED; 322 ts->stat = SAS_PHY_DOWN; 323 /* 324 * libsas will use dev->port, should 325 * not call task_done for sata 326 */ 327 if (device->dev_type != SAS_SATA_DEV) 328 task->task_done(task); 329 return -ECOMM; 330 } 331 332 hisi_hba = dev_to_hisi_hba(device); 333 dev = hisi_hba->dev; 334 335 if (DEV_IS_GONE(sas_dev)) { 336 if (sas_dev) 337 dev_info(dev, "task prep: device %d not ready\n", 338 sas_dev->device_id); 339 else 340 dev_info(dev, "task prep: device %016llx not ready\n", 341 SAS_ADDR(device->sas_addr)); 342 343 return -ECOMM; 344 } 345 346 *dq_pointer = dq = sas_dev->dq; 347 348 port = to_hisi_sas_port(sas_port); 349 if (port && !port->port_attached) { 350 dev_info(dev, "task prep: %s port%d not attach device\n", 351 (dev_is_sata(device)) ? 352 "SATA/STP" : "SAS", 353 device->port->id); 354 355 return -ECOMM; 356 } 357 358 if (!sas_protocol_ata(task->task_proto)) { 359 unsigned int req_len, resp_len; 360 361 if (task->num_scatter) { 362 n_elem = dma_map_sg(dev, task->scatter, 363 task->num_scatter, task->data_dir); 364 if (!n_elem) { 365 rc = -ENOMEM; 366 goto prep_out; 367 } 368 } else if (task->task_proto & SAS_PROTOCOL_SMP) { 369 n_elem_req = dma_map_sg(dev, &task->smp_task.smp_req, 370 1, DMA_TO_DEVICE); 371 if (!n_elem_req) { 372 rc = -ENOMEM; 373 goto prep_out; 374 } 375 req_len = sg_dma_len(&task->smp_task.smp_req); 376 if (req_len & 0x3) { 377 rc = -EINVAL; 378 goto err_out_dma_unmap; 379 } 380 n_elem_resp = dma_map_sg(dev, &task->smp_task.smp_resp, 381 1, DMA_FROM_DEVICE); 382 if (!n_elem_resp) { 383 rc = -ENOMEM; 384 goto err_out_dma_unmap; 385 } 386 resp_len = sg_dma_len(&task->smp_task.smp_resp); 387 if (resp_len & 0x3) { 388 rc = -EINVAL; 389 goto err_out_dma_unmap; 390 } 391 } 392 } else 393 n_elem = task->num_scatter; 394 395 if (n_elem > HISI_SAS_SGE_PAGE_CNT) { 396 dev_err(dev, "task prep: n_elem(%d) > HISI_SAS_SGE_PAGE_CNT", 397 n_elem); 398 rc = -EINVAL; 399 goto err_out_dma_unmap; 400 } 401 402 if (hisi_hba->hw->slot_index_alloc) 403 rc = hisi_hba->hw->slot_index_alloc(hisi_hba, device); 404 else { 405 struct scsi_cmnd *scsi_cmnd = NULL; 406 407 if (task->uldd_task) { 408 struct ata_queued_cmd *qc; 409 410 if (dev_is_sata(device)) { 411 qc = task->uldd_task; 412 scsi_cmnd = qc->scsicmd; 413 } else { 414 scsi_cmnd = task->uldd_task; 415 } 416 } 417 rc = hisi_sas_slot_index_alloc(hisi_hba, scsi_cmnd); 418 } 419 if (rc < 0) 420 goto err_out_dma_unmap; 421 422 slot_idx = rc; 423 slot = &hisi_hba->slot_info[slot_idx]; 424 425 spin_lock_irqsave(&dq->lock, flags); 426 wr_q_index = hisi_hba->hw->get_free_slot(hisi_hba, dq); 427 if (wr_q_index < 0) { 428 spin_unlock_irqrestore(&dq->lock, flags); 429 rc = -EAGAIN; 430 goto err_out_tag; 431 } 432 433 list_add_tail(&slot->delivery, &dq->list); 434 list_add_tail(&slot->entry, &sas_dev->list); 435 spin_unlock_irqrestore(&dq->lock, flags); 436 437 dlvry_queue = dq->id; 438 dlvry_queue_slot = wr_q_index; 439 440 slot->n_elem = n_elem; 441 slot->dlvry_queue = dlvry_queue; 442 slot->dlvry_queue_slot = dlvry_queue_slot; 443 cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue]; 444 slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot]; 445 slot->task = task; 446 slot->port = port; 447 slot->tmf = tmf; 448 slot->is_internal = is_tmf; 449 task->lldd_task = slot; 450 451 memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr)); 452 memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ); 453 memset(hisi_sas_status_buf_addr_mem(slot), 0, HISI_SAS_STATUS_BUF_SZ); 454 455 switch (task->task_proto) { 456 case SAS_PROTOCOL_SMP: 457 hisi_sas_task_prep_smp(hisi_hba, slot); 458 break; 459 case SAS_PROTOCOL_SSP: 460 hisi_sas_task_prep_ssp(hisi_hba, slot); 461 break; 462 case SAS_PROTOCOL_SATA: 463 case SAS_PROTOCOL_STP: 464 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: 465 hisi_sas_task_prep_ata(hisi_hba, slot); 466 break; 467 default: 468 dev_err(dev, "task prep: unknown/unsupported proto (0x%x)\n", 469 task->task_proto); 470 break; 471 } 472 473 spin_lock_irqsave(&task->task_state_lock, flags); 474 task->task_state_flags |= SAS_TASK_AT_INITIATOR; 475 spin_unlock_irqrestore(&task->task_state_lock, flags); 476 477 ++(*pass); 478 WRITE_ONCE(slot->ready, 1); 479 480 return 0; 481 482 err_out_tag: 483 hisi_sas_slot_index_free(hisi_hba, slot_idx); 484 err_out_dma_unmap: 485 if (!sas_protocol_ata(task->task_proto)) { 486 if (task->num_scatter) { 487 dma_unmap_sg(dev, task->scatter, task->num_scatter, 488 task->data_dir); 489 } else if (task->task_proto & SAS_PROTOCOL_SMP) { 490 if (n_elem_req) 491 dma_unmap_sg(dev, &task->smp_task.smp_req, 492 1, DMA_TO_DEVICE); 493 if (n_elem_resp) 494 dma_unmap_sg(dev, &task->smp_task.smp_resp, 495 1, DMA_FROM_DEVICE); 496 } 497 } 498 prep_out: 499 dev_err(dev, "task prep: failed[%d]!\n", rc); 500 return rc; 501 } 502 503 static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags, 504 bool is_tmf, struct hisi_sas_tmf_task *tmf) 505 { 506 u32 rc; 507 u32 pass = 0; 508 unsigned long flags; 509 struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev); 510 struct device *dev = hisi_hba->dev; 511 struct hisi_sas_dq *dq = NULL; 512 513 if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags))) { 514 if (in_softirq()) 515 return -EINVAL; 516 517 down(&hisi_hba->sem); 518 up(&hisi_hba->sem); 519 } 520 521 /* protect task_prep and start_delivery sequence */ 522 rc = hisi_sas_task_prep(task, &dq, is_tmf, tmf, &pass); 523 if (rc) 524 dev_err(dev, "task exec: failed[%d]!\n", rc); 525 526 if (likely(pass)) { 527 spin_lock_irqsave(&dq->lock, flags); 528 hisi_hba->hw->start_delivery(dq); 529 spin_unlock_irqrestore(&dq->lock, flags); 530 } 531 532 return rc; 533 } 534 535 static void hisi_sas_bytes_dmaed(struct hisi_hba *hisi_hba, int phy_no) 536 { 537 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 538 struct asd_sas_phy *sas_phy = &phy->sas_phy; 539 struct sas_ha_struct *sas_ha; 540 541 if (!phy->phy_attached) 542 return; 543 544 sas_ha = &hisi_hba->sha; 545 sas_ha->notify_phy_event(sas_phy, PHYE_OOB_DONE); 546 547 if (sas_phy->phy) { 548 struct sas_phy *sphy = sas_phy->phy; 549 550 sphy->negotiated_linkrate = sas_phy->linkrate; 551 sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS; 552 sphy->maximum_linkrate_hw = 553 hisi_hba->hw->phy_get_max_linkrate(); 554 if (sphy->minimum_linkrate == SAS_LINK_RATE_UNKNOWN) 555 sphy->minimum_linkrate = phy->minimum_linkrate; 556 557 if (sphy->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) 558 sphy->maximum_linkrate = phy->maximum_linkrate; 559 } 560 561 if (phy->phy_type & PORT_TYPE_SAS) { 562 struct sas_identify_frame *id; 563 564 id = (struct sas_identify_frame *)phy->frame_rcvd; 565 id->dev_type = phy->identify.device_type; 566 id->initiator_bits = SAS_PROTOCOL_ALL; 567 id->target_bits = phy->identify.target_port_protocols; 568 } else if (phy->phy_type & PORT_TYPE_SATA) { 569 /*Nothing*/ 570 } 571 572 sas_phy->frame_rcvd_size = phy->frame_rcvd_size; 573 sas_ha->notify_port_event(sas_phy, PORTE_BYTES_DMAED); 574 } 575 576 static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device) 577 { 578 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 579 struct hisi_sas_device *sas_dev = NULL; 580 unsigned long flags; 581 int last = hisi_hba->last_dev_id; 582 int first = (hisi_hba->last_dev_id + 1) % HISI_SAS_MAX_DEVICES; 583 int i; 584 585 spin_lock_irqsave(&hisi_hba->lock, flags); 586 for (i = first; i != last; i %= HISI_SAS_MAX_DEVICES) { 587 if (hisi_hba->devices[i].dev_type == SAS_PHY_UNUSED) { 588 int queue = i % hisi_hba->queue_count; 589 struct hisi_sas_dq *dq = &hisi_hba->dq[queue]; 590 591 hisi_hba->devices[i].device_id = i; 592 sas_dev = &hisi_hba->devices[i]; 593 sas_dev->dev_status = HISI_SAS_DEV_NORMAL; 594 sas_dev->dev_type = device->dev_type; 595 sas_dev->hisi_hba = hisi_hba; 596 sas_dev->sas_device = device; 597 sas_dev->dq = dq; 598 INIT_LIST_HEAD(&hisi_hba->devices[i].list); 599 break; 600 } 601 i++; 602 } 603 hisi_hba->last_dev_id = i; 604 spin_unlock_irqrestore(&hisi_hba->lock, flags); 605 606 return sas_dev; 607 } 608 609 #define HISI_SAS_SRST_ATA_DISK_CNT 3 610 static int hisi_sas_init_device(struct domain_device *device) 611 { 612 int rc = TMF_RESP_FUNC_COMPLETE; 613 struct scsi_lun lun; 614 struct hisi_sas_tmf_task tmf_task; 615 int retry = HISI_SAS_SRST_ATA_DISK_CNT; 616 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 617 618 switch (device->dev_type) { 619 case SAS_END_DEVICE: 620 int_to_scsilun(0, &lun); 621 622 tmf_task.tmf = TMF_CLEAR_TASK_SET; 623 rc = hisi_sas_debug_issue_ssp_tmf(device, lun.scsi_lun, 624 &tmf_task); 625 if (rc == TMF_RESP_FUNC_COMPLETE) 626 hisi_sas_release_task(hisi_hba, device); 627 break; 628 case SAS_SATA_DEV: 629 case SAS_SATA_PM: 630 case SAS_SATA_PM_PORT: 631 case SAS_SATA_PENDING: 632 while (retry-- > 0) { 633 rc = hisi_sas_softreset_ata_disk(device); 634 if (!rc) 635 break; 636 } 637 break; 638 default: 639 break; 640 } 641 642 return rc; 643 } 644 645 static int hisi_sas_dev_found(struct domain_device *device) 646 { 647 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 648 struct domain_device *parent_dev = device->parent; 649 struct hisi_sas_device *sas_dev; 650 struct device *dev = hisi_hba->dev; 651 int rc; 652 653 if (hisi_hba->hw->alloc_dev) 654 sas_dev = hisi_hba->hw->alloc_dev(device); 655 else 656 sas_dev = hisi_sas_alloc_dev(device); 657 if (!sas_dev) { 658 dev_err(dev, "fail alloc dev: max support %d devices\n", 659 HISI_SAS_MAX_DEVICES); 660 return -EINVAL; 661 } 662 663 device->lldd_dev = sas_dev; 664 hisi_hba->hw->setup_itct(hisi_hba, sas_dev); 665 666 if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) { 667 int phy_no; 668 u8 phy_num = parent_dev->ex_dev.num_phys; 669 struct ex_phy *phy; 670 671 for (phy_no = 0; phy_no < phy_num; phy_no++) { 672 phy = &parent_dev->ex_dev.ex_phy[phy_no]; 673 if (SAS_ADDR(phy->attached_sas_addr) == 674 SAS_ADDR(device->sas_addr)) 675 break; 676 } 677 678 if (phy_no == phy_num) { 679 dev_info(dev, "dev found: no attached " 680 "dev:%016llx at ex:%016llx\n", 681 SAS_ADDR(device->sas_addr), 682 SAS_ADDR(parent_dev->sas_addr)); 683 rc = -EINVAL; 684 goto err_out; 685 } 686 } 687 688 dev_info(dev, "dev[%d:%x] found\n", 689 sas_dev->device_id, sas_dev->dev_type); 690 691 rc = hisi_sas_init_device(device); 692 if (rc) 693 goto err_out; 694 return 0; 695 696 err_out: 697 hisi_sas_dev_gone(device); 698 return rc; 699 } 700 701 int hisi_sas_slave_configure(struct scsi_device *sdev) 702 { 703 struct domain_device *dev = sdev_to_domain_dev(sdev); 704 int ret = sas_slave_configure(sdev); 705 706 if (ret) 707 return ret; 708 if (!dev_is_sata(dev)) 709 sas_change_queue_depth(sdev, 64); 710 711 return 0; 712 } 713 EXPORT_SYMBOL_GPL(hisi_sas_slave_configure); 714 715 void hisi_sas_scan_start(struct Scsi_Host *shost) 716 { 717 struct hisi_hba *hisi_hba = shost_priv(shost); 718 719 hisi_hba->hw->phys_init(hisi_hba); 720 } 721 EXPORT_SYMBOL_GPL(hisi_sas_scan_start); 722 723 int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time) 724 { 725 struct hisi_hba *hisi_hba = shost_priv(shost); 726 struct sas_ha_struct *sha = &hisi_hba->sha; 727 728 /* Wait for PHY up interrupt to occur */ 729 if (time < HZ) 730 return 0; 731 732 sas_drain_work(sha); 733 return 1; 734 } 735 EXPORT_SYMBOL_GPL(hisi_sas_scan_finished); 736 737 static void hisi_sas_phyup_work(struct work_struct *work) 738 { 739 struct hisi_sas_phy *phy = 740 container_of(work, typeof(*phy), works[HISI_PHYE_PHY_UP]); 741 struct hisi_hba *hisi_hba = phy->hisi_hba; 742 struct asd_sas_phy *sas_phy = &phy->sas_phy; 743 int phy_no = sas_phy->id; 744 745 hisi_hba->hw->sl_notify(hisi_hba, phy_no); /* This requires a sleep */ 746 hisi_sas_bytes_dmaed(hisi_hba, phy_no); 747 } 748 749 static void hisi_sas_linkreset_work(struct work_struct *work) 750 { 751 struct hisi_sas_phy *phy = 752 container_of(work, typeof(*phy), works[HISI_PHYE_LINK_RESET]); 753 struct asd_sas_phy *sas_phy = &phy->sas_phy; 754 755 hisi_sas_control_phy(sas_phy, PHY_FUNC_LINK_RESET, NULL); 756 } 757 758 static const work_func_t hisi_sas_phye_fns[HISI_PHYES_NUM] = { 759 [HISI_PHYE_PHY_UP] = hisi_sas_phyup_work, 760 [HISI_PHYE_LINK_RESET] = hisi_sas_linkreset_work, 761 }; 762 763 bool hisi_sas_notify_phy_event(struct hisi_sas_phy *phy, 764 enum hisi_sas_phy_event event) 765 { 766 struct hisi_hba *hisi_hba = phy->hisi_hba; 767 768 if (WARN_ON(event >= HISI_PHYES_NUM)) 769 return false; 770 771 return queue_work(hisi_hba->wq, &phy->works[event]); 772 } 773 EXPORT_SYMBOL_GPL(hisi_sas_notify_phy_event); 774 775 static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no) 776 { 777 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 778 struct asd_sas_phy *sas_phy = &phy->sas_phy; 779 int i; 780 781 phy->hisi_hba = hisi_hba; 782 phy->port = NULL; 783 phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS; 784 phy->maximum_linkrate = hisi_hba->hw->phy_get_max_linkrate(); 785 sas_phy->enabled = (phy_no < hisi_hba->n_phy) ? 1 : 0; 786 sas_phy->class = SAS; 787 sas_phy->iproto = SAS_PROTOCOL_ALL; 788 sas_phy->tproto = 0; 789 sas_phy->type = PHY_TYPE_PHYSICAL; 790 sas_phy->role = PHY_ROLE_INITIATOR; 791 sas_phy->oob_mode = OOB_NOT_CONNECTED; 792 sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN; 793 sas_phy->id = phy_no; 794 sas_phy->sas_addr = &hisi_hba->sas_addr[0]; 795 sas_phy->frame_rcvd = &phy->frame_rcvd[0]; 796 sas_phy->ha = (struct sas_ha_struct *)hisi_hba->shost->hostdata; 797 sas_phy->lldd_phy = phy; 798 799 for (i = 0; i < HISI_PHYES_NUM; i++) 800 INIT_WORK(&phy->works[i], hisi_sas_phye_fns[i]); 801 802 spin_lock_init(&phy->lock); 803 } 804 805 static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy) 806 { 807 struct sas_ha_struct *sas_ha = sas_phy->ha; 808 struct hisi_hba *hisi_hba = sas_ha->lldd_ha; 809 struct hisi_sas_phy *phy = sas_phy->lldd_phy; 810 struct asd_sas_port *sas_port = sas_phy->port; 811 struct hisi_sas_port *port = to_hisi_sas_port(sas_port); 812 unsigned long flags; 813 814 if (!sas_port) 815 return; 816 817 spin_lock_irqsave(&hisi_hba->lock, flags); 818 port->port_attached = 1; 819 port->id = phy->port_id; 820 phy->port = port; 821 sas_port->lldd_port = port; 822 spin_unlock_irqrestore(&hisi_hba->lock, flags); 823 } 824 825 static void hisi_sas_do_release_task(struct hisi_hba *hisi_hba, struct sas_task *task, 826 struct hisi_sas_slot *slot) 827 { 828 if (task) { 829 unsigned long flags; 830 struct task_status_struct *ts; 831 832 ts = &task->task_status; 833 834 ts->resp = SAS_TASK_COMPLETE; 835 ts->stat = SAS_ABORTED_TASK; 836 spin_lock_irqsave(&task->task_state_lock, flags); 837 task->task_state_flags &= 838 ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR); 839 task->task_state_flags |= SAS_TASK_STATE_DONE; 840 spin_unlock_irqrestore(&task->task_state_lock, flags); 841 } 842 843 hisi_sas_slot_task_free(hisi_hba, task, slot); 844 } 845 846 static void hisi_sas_release_task(struct hisi_hba *hisi_hba, 847 struct domain_device *device) 848 { 849 struct hisi_sas_slot *slot, *slot2; 850 struct hisi_sas_device *sas_dev = device->lldd_dev; 851 852 list_for_each_entry_safe(slot, slot2, &sas_dev->list, entry) 853 hisi_sas_do_release_task(hisi_hba, slot->task, slot); 854 } 855 856 void hisi_sas_release_tasks(struct hisi_hba *hisi_hba) 857 { 858 struct hisi_sas_device *sas_dev; 859 struct domain_device *device; 860 int i; 861 862 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 863 sas_dev = &hisi_hba->devices[i]; 864 device = sas_dev->sas_device; 865 866 if ((sas_dev->dev_type == SAS_PHY_UNUSED) || 867 !device) 868 continue; 869 870 hisi_sas_release_task(hisi_hba, device); 871 } 872 } 873 EXPORT_SYMBOL_GPL(hisi_sas_release_tasks); 874 875 static void hisi_sas_dereg_device(struct hisi_hba *hisi_hba, 876 struct domain_device *device) 877 { 878 if (hisi_hba->hw->dereg_device) 879 hisi_hba->hw->dereg_device(hisi_hba, device); 880 } 881 882 static void hisi_sas_dev_gone(struct domain_device *device) 883 { 884 struct hisi_sas_device *sas_dev = device->lldd_dev; 885 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 886 struct device *dev = hisi_hba->dev; 887 888 dev_info(dev, "dev[%d:%x] is gone\n", 889 sas_dev->device_id, sas_dev->dev_type); 890 891 if (!test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) { 892 hisi_sas_internal_task_abort(hisi_hba, device, 893 HISI_SAS_INT_ABT_DEV, 0); 894 895 hisi_sas_dereg_device(hisi_hba, device); 896 897 down(&hisi_hba->sem); 898 hisi_hba->hw->clear_itct(hisi_hba, sas_dev); 899 up(&hisi_hba->sem); 900 device->lldd_dev = NULL; 901 } 902 903 if (hisi_hba->hw->free_device) 904 hisi_hba->hw->free_device(sas_dev); 905 sas_dev->dev_type = SAS_PHY_UNUSED; 906 } 907 908 static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags) 909 { 910 return hisi_sas_task_exec(task, gfp_flags, 0, NULL); 911 } 912 913 static void hisi_sas_phy_set_linkrate(struct hisi_hba *hisi_hba, int phy_no, 914 struct sas_phy_linkrates *r) 915 { 916 struct sas_phy_linkrates _r; 917 918 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 919 struct asd_sas_phy *sas_phy = &phy->sas_phy; 920 enum sas_linkrate min, max; 921 922 if (r->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) { 923 max = sas_phy->phy->maximum_linkrate; 924 min = r->minimum_linkrate; 925 } else if (r->minimum_linkrate == SAS_LINK_RATE_UNKNOWN) { 926 max = r->maximum_linkrate; 927 min = sas_phy->phy->minimum_linkrate; 928 } else 929 return; 930 931 _r.maximum_linkrate = max; 932 _r.minimum_linkrate = min; 933 934 sas_phy->phy->maximum_linkrate = max; 935 sas_phy->phy->minimum_linkrate = min; 936 937 hisi_hba->hw->phy_disable(hisi_hba, phy_no); 938 msleep(100); 939 hisi_hba->hw->phy_set_linkrate(hisi_hba, phy_no, &_r); 940 hisi_hba->hw->phy_start(hisi_hba, phy_no); 941 } 942 943 static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func, 944 void *funcdata) 945 { 946 struct sas_ha_struct *sas_ha = sas_phy->ha; 947 struct hisi_hba *hisi_hba = sas_ha->lldd_ha; 948 int phy_no = sas_phy->id; 949 950 switch (func) { 951 case PHY_FUNC_HARD_RESET: 952 hisi_hba->hw->phy_hard_reset(hisi_hba, phy_no); 953 break; 954 955 case PHY_FUNC_LINK_RESET: 956 hisi_hba->hw->phy_disable(hisi_hba, phy_no); 957 msleep(100); 958 hisi_hba->hw->phy_start(hisi_hba, phy_no); 959 break; 960 961 case PHY_FUNC_DISABLE: 962 hisi_hba->hw->phy_disable(hisi_hba, phy_no); 963 break; 964 965 case PHY_FUNC_SET_LINK_RATE: 966 hisi_sas_phy_set_linkrate(hisi_hba, phy_no, funcdata); 967 break; 968 case PHY_FUNC_GET_EVENTS: 969 if (hisi_hba->hw->get_events) { 970 hisi_hba->hw->get_events(hisi_hba, phy_no); 971 break; 972 } 973 /* fallthru */ 974 case PHY_FUNC_RELEASE_SPINUP_HOLD: 975 default: 976 return -EOPNOTSUPP; 977 } 978 return 0; 979 } 980 981 static void hisi_sas_task_done(struct sas_task *task) 982 { 983 del_timer(&task->slow_task->timer); 984 complete(&task->slow_task->completion); 985 } 986 987 static void hisi_sas_tmf_timedout(struct timer_list *t) 988 { 989 struct sas_task_slow *slow = from_timer(slow, t, timer); 990 struct sas_task *task = slow->task; 991 unsigned long flags; 992 bool is_completed = true; 993 994 spin_lock_irqsave(&task->task_state_lock, flags); 995 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { 996 task->task_state_flags |= SAS_TASK_STATE_ABORTED; 997 is_completed = false; 998 } 999 spin_unlock_irqrestore(&task->task_state_lock, flags); 1000 1001 if (!is_completed) 1002 complete(&task->slow_task->completion); 1003 } 1004 1005 #define TASK_TIMEOUT 20 1006 #define TASK_RETRY 3 1007 #define INTERNAL_ABORT_TIMEOUT 6 1008 static int hisi_sas_exec_internal_tmf_task(struct domain_device *device, 1009 void *parameter, u32 para_len, 1010 struct hisi_sas_tmf_task *tmf) 1011 { 1012 struct hisi_sas_device *sas_dev = device->lldd_dev; 1013 struct hisi_hba *hisi_hba = sas_dev->hisi_hba; 1014 struct device *dev = hisi_hba->dev; 1015 struct sas_task *task; 1016 int res, retry; 1017 1018 for (retry = 0; retry < TASK_RETRY; retry++) { 1019 task = sas_alloc_slow_task(GFP_KERNEL); 1020 if (!task) 1021 return -ENOMEM; 1022 1023 task->dev = device; 1024 task->task_proto = device->tproto; 1025 1026 if (dev_is_sata(device)) { 1027 task->ata_task.device_control_reg_update = 1; 1028 memcpy(&task->ata_task.fis, parameter, para_len); 1029 } else { 1030 memcpy(&task->ssp_task, parameter, para_len); 1031 } 1032 task->task_done = hisi_sas_task_done; 1033 1034 task->slow_task->timer.function = hisi_sas_tmf_timedout; 1035 task->slow_task->timer.expires = jiffies + TASK_TIMEOUT*HZ; 1036 add_timer(&task->slow_task->timer); 1037 1038 res = hisi_sas_task_exec(task, GFP_KERNEL, 1, tmf); 1039 1040 if (res) { 1041 del_timer(&task->slow_task->timer); 1042 dev_err(dev, "abort tmf: executing internal task failed: %d\n", 1043 res); 1044 goto ex_err; 1045 } 1046 1047 wait_for_completion(&task->slow_task->completion); 1048 res = TMF_RESP_FUNC_FAILED; 1049 /* Even TMF timed out, return direct. */ 1050 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { 1051 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { 1052 struct hisi_sas_slot *slot = task->lldd_task; 1053 1054 dev_err(dev, "abort tmf: TMF task timeout and not done\n"); 1055 if (slot) { 1056 struct hisi_sas_cq *cq = 1057 &hisi_hba->cq[slot->dlvry_queue]; 1058 /* 1059 * flush tasklet to avoid free'ing task 1060 * before using task in IO completion 1061 */ 1062 tasklet_kill(&cq->tasklet); 1063 slot->task = NULL; 1064 } 1065 1066 goto ex_err; 1067 } else 1068 dev_err(dev, "abort tmf: TMF task timeout\n"); 1069 } 1070 1071 if (task->task_status.resp == SAS_TASK_COMPLETE && 1072 task->task_status.stat == TMF_RESP_FUNC_COMPLETE) { 1073 res = TMF_RESP_FUNC_COMPLETE; 1074 break; 1075 } 1076 1077 if (task->task_status.resp == SAS_TASK_COMPLETE && 1078 task->task_status.stat == TMF_RESP_FUNC_SUCC) { 1079 res = TMF_RESP_FUNC_SUCC; 1080 break; 1081 } 1082 1083 if (task->task_status.resp == SAS_TASK_COMPLETE && 1084 task->task_status.stat == SAS_DATA_UNDERRUN) { 1085 /* no error, but return the number of bytes of 1086 * underrun 1087 */ 1088 dev_warn(dev, "abort tmf: task to dev %016llx " 1089 "resp: 0x%x sts 0x%x underrun\n", 1090 SAS_ADDR(device->sas_addr), 1091 task->task_status.resp, 1092 task->task_status.stat); 1093 res = task->task_status.residual; 1094 break; 1095 } 1096 1097 if (task->task_status.resp == SAS_TASK_COMPLETE && 1098 task->task_status.stat == SAS_DATA_OVERRUN) { 1099 dev_warn(dev, "abort tmf: blocked task error\n"); 1100 res = -EMSGSIZE; 1101 break; 1102 } 1103 1104 dev_warn(dev, "abort tmf: task to dev " 1105 "%016llx resp: 0x%x status 0x%x\n", 1106 SAS_ADDR(device->sas_addr), task->task_status.resp, 1107 task->task_status.stat); 1108 sas_free_task(task); 1109 task = NULL; 1110 } 1111 ex_err: 1112 if (retry == TASK_RETRY) 1113 dev_warn(dev, "abort tmf: executing internal task failed!\n"); 1114 sas_free_task(task); 1115 return res; 1116 } 1117 1118 static void hisi_sas_fill_ata_reset_cmd(struct ata_device *dev, 1119 bool reset, int pmp, u8 *fis) 1120 { 1121 struct ata_taskfile tf; 1122 1123 ata_tf_init(dev, &tf); 1124 if (reset) 1125 tf.ctl |= ATA_SRST; 1126 else 1127 tf.ctl &= ~ATA_SRST; 1128 tf.command = ATA_CMD_DEV_RESET; 1129 ata_tf_to_fis(&tf, pmp, 0, fis); 1130 } 1131 1132 static int hisi_sas_softreset_ata_disk(struct domain_device *device) 1133 { 1134 u8 fis[20] = {0}; 1135 struct ata_port *ap = device->sata_dev.ap; 1136 struct ata_link *link; 1137 int rc = TMF_RESP_FUNC_FAILED; 1138 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1139 struct device *dev = hisi_hba->dev; 1140 int s = sizeof(struct host_to_dev_fis); 1141 1142 ata_for_each_link(link, ap, EDGE) { 1143 int pmp = sata_srst_pmp(link); 1144 1145 hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis); 1146 rc = hisi_sas_exec_internal_tmf_task(device, fis, s, NULL); 1147 if (rc != TMF_RESP_FUNC_COMPLETE) 1148 break; 1149 } 1150 1151 if (rc == TMF_RESP_FUNC_COMPLETE) { 1152 ata_for_each_link(link, ap, EDGE) { 1153 int pmp = sata_srst_pmp(link); 1154 1155 hisi_sas_fill_ata_reset_cmd(link->device, 0, pmp, fis); 1156 rc = hisi_sas_exec_internal_tmf_task(device, fis, 1157 s, NULL); 1158 if (rc != TMF_RESP_FUNC_COMPLETE) 1159 dev_err(dev, "ata disk de-reset failed\n"); 1160 } 1161 } else { 1162 dev_err(dev, "ata disk reset failed\n"); 1163 } 1164 1165 if (rc == TMF_RESP_FUNC_COMPLETE) 1166 hisi_sas_release_task(hisi_hba, device); 1167 1168 return rc; 1169 } 1170 1171 static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device, 1172 u8 *lun, struct hisi_sas_tmf_task *tmf) 1173 { 1174 struct sas_ssp_task ssp_task; 1175 1176 if (!(device->tproto & SAS_PROTOCOL_SSP)) 1177 return TMF_RESP_FUNC_ESUPP; 1178 1179 memcpy(ssp_task.LUN, lun, 8); 1180 1181 return hisi_sas_exec_internal_tmf_task(device, &ssp_task, 1182 sizeof(ssp_task), tmf); 1183 } 1184 1185 static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba) 1186 { 1187 u32 state = hisi_hba->hw->get_phys_state(hisi_hba); 1188 int i; 1189 1190 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 1191 struct hisi_sas_device *sas_dev = &hisi_hba->devices[i]; 1192 struct domain_device *device = sas_dev->sas_device; 1193 struct asd_sas_port *sas_port; 1194 struct hisi_sas_port *port; 1195 struct hisi_sas_phy *phy = NULL; 1196 struct asd_sas_phy *sas_phy; 1197 1198 if ((sas_dev->dev_type == SAS_PHY_UNUSED) 1199 || !device || !device->port) 1200 continue; 1201 1202 sas_port = device->port; 1203 port = to_hisi_sas_port(sas_port); 1204 1205 list_for_each_entry(sas_phy, &sas_port->phy_list, port_phy_el) 1206 if (state & BIT(sas_phy->id)) { 1207 phy = sas_phy->lldd_phy; 1208 break; 1209 } 1210 1211 if (phy) { 1212 port->id = phy->port_id; 1213 1214 /* Update linkrate of directly attached device. */ 1215 if (!device->parent) 1216 device->linkrate = phy->sas_phy.linkrate; 1217 1218 hisi_hba->hw->setup_itct(hisi_hba, sas_dev); 1219 } else 1220 port->id = 0xff; 1221 } 1222 } 1223 1224 static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 old_state, 1225 u32 state) 1226 { 1227 struct sas_ha_struct *sas_ha = &hisi_hba->sha; 1228 struct asd_sas_port *_sas_port = NULL; 1229 int phy_no; 1230 1231 for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) { 1232 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 1233 struct asd_sas_phy *sas_phy = &phy->sas_phy; 1234 struct asd_sas_port *sas_port = sas_phy->port; 1235 bool do_port_check = !!(_sas_port != sas_port); 1236 1237 if (!sas_phy->phy->enabled) 1238 continue; 1239 1240 /* Report PHY state change to libsas */ 1241 if (state & BIT(phy_no)) { 1242 if (do_port_check && sas_port && sas_port->port_dev) { 1243 struct domain_device *dev = sas_port->port_dev; 1244 1245 _sas_port = sas_port; 1246 1247 if (DEV_IS_EXPANDER(dev->dev_type)) 1248 sas_ha->notify_port_event(sas_phy, 1249 PORTE_BROADCAST_RCVD); 1250 } 1251 } else if (old_state & (1 << phy_no)) 1252 /* PHY down but was up before */ 1253 hisi_sas_phy_down(hisi_hba, phy_no, 0); 1254 1255 } 1256 } 1257 1258 static void hisi_sas_reset_init_all_devices(struct hisi_hba *hisi_hba) 1259 { 1260 struct hisi_sas_device *sas_dev; 1261 struct domain_device *device; 1262 int i; 1263 1264 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 1265 sas_dev = &hisi_hba->devices[i]; 1266 device = sas_dev->sas_device; 1267 1268 if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device) 1269 continue; 1270 1271 hisi_sas_init_device(device); 1272 } 1273 } 1274 1275 static void hisi_sas_send_ata_reset_each_phy(struct hisi_hba *hisi_hba, 1276 struct asd_sas_port *sas_port, 1277 struct domain_device *device) 1278 { 1279 struct hisi_sas_tmf_task tmf_task = { .force_phy = 1 }; 1280 struct ata_port *ap = device->sata_dev.ap; 1281 struct device *dev = hisi_hba->dev; 1282 int s = sizeof(struct host_to_dev_fis); 1283 int rc = TMF_RESP_FUNC_FAILED; 1284 struct asd_sas_phy *sas_phy; 1285 struct ata_link *link; 1286 u8 fis[20] = {0}; 1287 u32 state; 1288 1289 state = hisi_hba->hw->get_phys_state(hisi_hba); 1290 list_for_each_entry(sas_phy, &sas_port->phy_list, port_phy_el) { 1291 if (!(state & BIT(sas_phy->id))) 1292 continue; 1293 1294 ata_for_each_link(link, ap, EDGE) { 1295 int pmp = sata_srst_pmp(link); 1296 1297 tmf_task.phy_id = sas_phy->id; 1298 hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis); 1299 rc = hisi_sas_exec_internal_tmf_task(device, fis, s, 1300 &tmf_task); 1301 if (rc != TMF_RESP_FUNC_COMPLETE) { 1302 dev_err(dev, "phy%d ata reset failed rc=%d\n", 1303 sas_phy->id, rc); 1304 break; 1305 } 1306 } 1307 } 1308 } 1309 1310 static void hisi_sas_terminate_stp_reject(struct hisi_hba *hisi_hba) 1311 { 1312 struct device *dev = hisi_hba->dev; 1313 int port_no, rc, i; 1314 1315 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 1316 struct hisi_sas_device *sas_dev = &hisi_hba->devices[i]; 1317 struct domain_device *device = sas_dev->sas_device; 1318 1319 if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device) 1320 continue; 1321 1322 rc = hisi_sas_internal_task_abort(hisi_hba, device, 1323 HISI_SAS_INT_ABT_DEV, 0); 1324 if (rc < 0) 1325 dev_err(dev, "STP reject: abort dev failed %d\n", rc); 1326 } 1327 1328 for (port_no = 0; port_no < hisi_hba->n_phy; port_no++) { 1329 struct hisi_sas_port *port = &hisi_hba->port[port_no]; 1330 struct asd_sas_port *sas_port = &port->sas_port; 1331 struct domain_device *port_dev = sas_port->port_dev; 1332 struct domain_device *device; 1333 1334 if (!port_dev || !DEV_IS_EXPANDER(port_dev->dev_type)) 1335 continue; 1336 1337 /* Try to find a SATA device */ 1338 list_for_each_entry(device, &sas_port->dev_list, 1339 dev_list_node) { 1340 if (dev_is_sata(device)) { 1341 hisi_sas_send_ata_reset_each_phy(hisi_hba, 1342 sas_port, 1343 device); 1344 break; 1345 } 1346 } 1347 } 1348 } 1349 1350 void hisi_sas_controller_reset_prepare(struct hisi_hba *hisi_hba) 1351 { 1352 struct Scsi_Host *shost = hisi_hba->shost; 1353 1354 down(&hisi_hba->sem); 1355 hisi_hba->phy_state = hisi_hba->hw->get_phys_state(hisi_hba); 1356 1357 scsi_block_requests(shost); 1358 hisi_hba->hw->wait_cmds_complete_timeout(hisi_hba, 100, 5000); 1359 1360 if (timer_pending(&hisi_hba->timer)) 1361 del_timer_sync(&hisi_hba->timer); 1362 1363 set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); 1364 } 1365 EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_prepare); 1366 1367 void hisi_sas_controller_reset_done(struct hisi_hba *hisi_hba) 1368 { 1369 struct Scsi_Host *shost = hisi_hba->shost; 1370 u32 state; 1371 1372 /* Init and wait for PHYs to come up and all libsas event finished. */ 1373 hisi_hba->hw->phys_init(hisi_hba); 1374 msleep(1000); 1375 hisi_sas_refresh_port_id(hisi_hba); 1376 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); 1377 up(&hisi_hba->sem); 1378 1379 if (hisi_hba->reject_stp_links_msk) 1380 hisi_sas_terminate_stp_reject(hisi_hba); 1381 hisi_sas_reset_init_all_devices(hisi_hba); 1382 scsi_unblock_requests(shost); 1383 clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags); 1384 1385 state = hisi_hba->hw->get_phys_state(hisi_hba); 1386 hisi_sas_rescan_topology(hisi_hba, hisi_hba->phy_state, state); 1387 } 1388 EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_done); 1389 1390 static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba) 1391 { 1392 struct device *dev = hisi_hba->dev; 1393 struct Scsi_Host *shost = hisi_hba->shost; 1394 int rc; 1395 1396 if (!hisi_hba->hw->soft_reset) 1397 return -1; 1398 1399 if (test_and_set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) 1400 return -1; 1401 1402 dev_info(dev, "controller resetting...\n"); 1403 hisi_sas_controller_reset_prepare(hisi_hba); 1404 1405 rc = hisi_hba->hw->soft_reset(hisi_hba); 1406 if (rc) { 1407 dev_warn(dev, "controller reset failed (%d)\n", rc); 1408 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); 1409 up(&hisi_hba->sem); 1410 scsi_unblock_requests(shost); 1411 clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags); 1412 return rc; 1413 } 1414 1415 hisi_sas_controller_reset_done(hisi_hba); 1416 dev_info(dev, "controller reset complete\n"); 1417 1418 return 0; 1419 } 1420 1421 static int hisi_sas_abort_task(struct sas_task *task) 1422 { 1423 struct scsi_lun lun; 1424 struct hisi_sas_tmf_task tmf_task; 1425 struct domain_device *device = task->dev; 1426 struct hisi_sas_device *sas_dev = device->lldd_dev; 1427 struct hisi_hba *hisi_hba; 1428 struct device *dev; 1429 int rc = TMF_RESP_FUNC_FAILED; 1430 unsigned long flags; 1431 1432 if (!sas_dev) 1433 return TMF_RESP_FUNC_FAILED; 1434 1435 hisi_hba = dev_to_hisi_hba(task->dev); 1436 dev = hisi_hba->dev; 1437 1438 spin_lock_irqsave(&task->task_state_lock, flags); 1439 if (task->task_state_flags & SAS_TASK_STATE_DONE) { 1440 struct hisi_sas_slot *slot = task->lldd_task; 1441 struct hisi_sas_cq *cq; 1442 1443 if (slot) { 1444 /* 1445 * flush tasklet to avoid free'ing task 1446 * before using task in IO completion 1447 */ 1448 cq = &hisi_hba->cq[slot->dlvry_queue]; 1449 tasklet_kill(&cq->tasklet); 1450 } 1451 spin_unlock_irqrestore(&task->task_state_lock, flags); 1452 rc = TMF_RESP_FUNC_COMPLETE; 1453 goto out; 1454 } 1455 task->task_state_flags |= SAS_TASK_STATE_ABORTED; 1456 spin_unlock_irqrestore(&task->task_state_lock, flags); 1457 1458 sas_dev->dev_status = HISI_SAS_DEV_EH; 1459 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) { 1460 struct scsi_cmnd *cmnd = task->uldd_task; 1461 struct hisi_sas_slot *slot = task->lldd_task; 1462 u32 tag = slot->idx; 1463 int rc2; 1464 1465 int_to_scsilun(cmnd->device->lun, &lun); 1466 tmf_task.tmf = TMF_ABORT_TASK; 1467 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag); 1468 1469 rc = hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun, 1470 &tmf_task); 1471 1472 rc2 = hisi_sas_internal_task_abort(hisi_hba, device, 1473 HISI_SAS_INT_ABT_CMD, tag); 1474 if (rc2 < 0) { 1475 dev_err(dev, "abort task: internal abort (%d)\n", rc2); 1476 return TMF_RESP_FUNC_FAILED; 1477 } 1478 1479 /* 1480 * If the TMF finds that the IO is not in the device and also 1481 * the internal abort does not succeed, then it is safe to 1482 * free the slot. 1483 * Note: if the internal abort succeeds then the slot 1484 * will have already been completed 1485 */ 1486 if (rc == TMF_RESP_FUNC_COMPLETE && rc2 != TMF_RESP_FUNC_SUCC) { 1487 if (task->lldd_task) 1488 hisi_sas_do_release_task(hisi_hba, task, slot); 1489 } 1490 } else if (task->task_proto & SAS_PROTOCOL_SATA || 1491 task->task_proto & SAS_PROTOCOL_STP) { 1492 if (task->dev->dev_type == SAS_SATA_DEV) { 1493 rc = hisi_sas_internal_task_abort(hisi_hba, device, 1494 HISI_SAS_INT_ABT_DEV, 0); 1495 if (rc < 0) { 1496 dev_err(dev, "abort task: internal abort failed\n"); 1497 goto out; 1498 } 1499 hisi_sas_dereg_device(hisi_hba, device); 1500 rc = hisi_sas_softreset_ata_disk(device); 1501 } 1502 } else if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SMP) { 1503 /* SMP */ 1504 struct hisi_sas_slot *slot = task->lldd_task; 1505 u32 tag = slot->idx; 1506 struct hisi_sas_cq *cq = &hisi_hba->cq[slot->dlvry_queue]; 1507 1508 rc = hisi_sas_internal_task_abort(hisi_hba, device, 1509 HISI_SAS_INT_ABT_CMD, tag); 1510 if (((rc < 0) || (rc == TMF_RESP_FUNC_FAILED)) && 1511 task->lldd_task) { 1512 /* 1513 * flush tasklet to avoid free'ing task 1514 * before using task in IO completion 1515 */ 1516 tasklet_kill(&cq->tasklet); 1517 slot->task = NULL; 1518 } 1519 } 1520 1521 out: 1522 if (rc != TMF_RESP_FUNC_COMPLETE) 1523 dev_notice(dev, "abort task: rc=%d\n", rc); 1524 return rc; 1525 } 1526 1527 static int hisi_sas_abort_task_set(struct domain_device *device, u8 *lun) 1528 { 1529 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1530 struct device *dev = hisi_hba->dev; 1531 struct hisi_sas_tmf_task tmf_task; 1532 int rc = TMF_RESP_FUNC_FAILED; 1533 1534 rc = hisi_sas_internal_task_abort(hisi_hba, device, 1535 HISI_SAS_INT_ABT_DEV, 0); 1536 if (rc < 0) { 1537 dev_err(dev, "abort task set: internal abort rc=%d\n", rc); 1538 return TMF_RESP_FUNC_FAILED; 1539 } 1540 hisi_sas_dereg_device(hisi_hba, device); 1541 1542 tmf_task.tmf = TMF_ABORT_TASK_SET; 1543 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task); 1544 1545 if (rc == TMF_RESP_FUNC_COMPLETE) 1546 hisi_sas_release_task(hisi_hba, device); 1547 1548 return rc; 1549 } 1550 1551 static int hisi_sas_clear_aca(struct domain_device *device, u8 *lun) 1552 { 1553 int rc = TMF_RESP_FUNC_FAILED; 1554 struct hisi_sas_tmf_task tmf_task; 1555 1556 tmf_task.tmf = TMF_CLEAR_ACA; 1557 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task); 1558 1559 return rc; 1560 } 1561 1562 static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device) 1563 { 1564 struct sas_phy *local_phy = sas_get_local_phy(device); 1565 int rc, reset_type = (device->dev_type == SAS_SATA_DEV || 1566 (device->tproto & SAS_PROTOCOL_STP)) ? 0 : 1; 1567 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1568 struct sas_ha_struct *sas_ha = &hisi_hba->sha; 1569 struct asd_sas_phy *sas_phy = sas_ha->sas_phy[local_phy->number]; 1570 struct hisi_sas_phy *phy = container_of(sas_phy, 1571 struct hisi_sas_phy, sas_phy); 1572 DECLARE_COMPLETION_ONSTACK(phyreset); 1573 1574 if (scsi_is_sas_phy_local(local_phy)) { 1575 phy->in_reset = 1; 1576 phy->reset_completion = &phyreset; 1577 } 1578 1579 rc = sas_phy_reset(local_phy, reset_type); 1580 sas_put_local_phy(local_phy); 1581 1582 if (scsi_is_sas_phy_local(local_phy)) { 1583 int ret = wait_for_completion_timeout(&phyreset, 2 * HZ); 1584 unsigned long flags; 1585 1586 spin_lock_irqsave(&phy->lock, flags); 1587 phy->reset_completion = NULL; 1588 phy->in_reset = 0; 1589 spin_unlock_irqrestore(&phy->lock, flags); 1590 1591 /* report PHY down if timed out */ 1592 if (!ret) 1593 hisi_sas_phy_down(hisi_hba, sas_phy->id, 0); 1594 } else 1595 msleep(2000); 1596 1597 return rc; 1598 } 1599 1600 static int hisi_sas_I_T_nexus_reset(struct domain_device *device) 1601 { 1602 struct hisi_sas_device *sas_dev = device->lldd_dev; 1603 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1604 struct device *dev = hisi_hba->dev; 1605 int rc = TMF_RESP_FUNC_FAILED; 1606 1607 if (sas_dev->dev_status != HISI_SAS_DEV_EH) 1608 return TMF_RESP_FUNC_FAILED; 1609 sas_dev->dev_status = HISI_SAS_DEV_NORMAL; 1610 1611 rc = hisi_sas_internal_task_abort(hisi_hba, device, 1612 HISI_SAS_INT_ABT_DEV, 0); 1613 if (rc < 0) { 1614 dev_err(dev, "I_T nexus reset: internal abort (%d)\n", rc); 1615 return TMF_RESP_FUNC_FAILED; 1616 } 1617 hisi_sas_dereg_device(hisi_hba, device); 1618 1619 rc = hisi_sas_debug_I_T_nexus_reset(device); 1620 1621 if ((rc == TMF_RESP_FUNC_COMPLETE) || (rc == -ENODEV)) 1622 hisi_sas_release_task(hisi_hba, device); 1623 1624 return rc; 1625 } 1626 1627 static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun) 1628 { 1629 struct hisi_sas_device *sas_dev = device->lldd_dev; 1630 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1631 struct device *dev = hisi_hba->dev; 1632 int rc = TMF_RESP_FUNC_FAILED; 1633 1634 sas_dev->dev_status = HISI_SAS_DEV_EH; 1635 if (dev_is_sata(device)) { 1636 struct sas_phy *phy; 1637 1638 /* Clear internal IO and then hardreset */ 1639 rc = hisi_sas_internal_task_abort(hisi_hba, device, 1640 HISI_SAS_INT_ABT_DEV, 0); 1641 if (rc < 0) { 1642 dev_err(dev, "lu_reset: internal abort failed\n"); 1643 goto out; 1644 } 1645 hisi_sas_dereg_device(hisi_hba, device); 1646 1647 phy = sas_get_local_phy(device); 1648 1649 rc = sas_phy_reset(phy, 1); 1650 1651 if (rc == 0) 1652 hisi_sas_release_task(hisi_hba, device); 1653 sas_put_local_phy(phy); 1654 } else { 1655 struct hisi_sas_tmf_task tmf_task = { .tmf = TMF_LU_RESET }; 1656 1657 rc = hisi_sas_internal_task_abort(hisi_hba, device, 1658 HISI_SAS_INT_ABT_DEV, 0); 1659 if (rc < 0) { 1660 dev_err(dev, "lu_reset: internal abort failed\n"); 1661 goto out; 1662 } 1663 hisi_sas_dereg_device(hisi_hba, device); 1664 1665 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task); 1666 if (rc == TMF_RESP_FUNC_COMPLETE) 1667 hisi_sas_release_task(hisi_hba, device); 1668 } 1669 out: 1670 if (rc != TMF_RESP_FUNC_COMPLETE) 1671 dev_err(dev, "lu_reset: for device[%d]:rc= %d\n", 1672 sas_dev->device_id, rc); 1673 return rc; 1674 } 1675 1676 static int hisi_sas_clear_nexus_ha(struct sas_ha_struct *sas_ha) 1677 { 1678 struct hisi_hba *hisi_hba = sas_ha->lldd_ha; 1679 struct device *dev = hisi_hba->dev; 1680 HISI_SAS_DECLARE_RST_WORK_ON_STACK(r); 1681 int rc, i; 1682 1683 queue_work(hisi_hba->wq, &r.work); 1684 wait_for_completion(r.completion); 1685 if (!r.done) 1686 return TMF_RESP_FUNC_FAILED; 1687 1688 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 1689 struct hisi_sas_device *sas_dev = &hisi_hba->devices[i]; 1690 struct domain_device *device = sas_dev->sas_device; 1691 1692 if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device || 1693 DEV_IS_EXPANDER(device->dev_type)) 1694 continue; 1695 1696 rc = hisi_sas_debug_I_T_nexus_reset(device); 1697 if (rc != TMF_RESP_FUNC_COMPLETE) 1698 dev_info(dev, "clear nexus ha: for device[%d] rc=%d\n", 1699 sas_dev->device_id, rc); 1700 } 1701 1702 hisi_sas_release_tasks(hisi_hba); 1703 1704 return TMF_RESP_FUNC_COMPLETE; 1705 } 1706 1707 static int hisi_sas_query_task(struct sas_task *task) 1708 { 1709 struct scsi_lun lun; 1710 struct hisi_sas_tmf_task tmf_task; 1711 int rc = TMF_RESP_FUNC_FAILED; 1712 1713 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) { 1714 struct scsi_cmnd *cmnd = task->uldd_task; 1715 struct domain_device *device = task->dev; 1716 struct hisi_sas_slot *slot = task->lldd_task; 1717 u32 tag = slot->idx; 1718 1719 int_to_scsilun(cmnd->device->lun, &lun); 1720 tmf_task.tmf = TMF_QUERY_TASK; 1721 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag); 1722 1723 rc = hisi_sas_debug_issue_ssp_tmf(device, 1724 lun.scsi_lun, 1725 &tmf_task); 1726 switch (rc) { 1727 /* The task is still in Lun, release it then */ 1728 case TMF_RESP_FUNC_SUCC: 1729 /* The task is not in Lun or failed, reset the phy */ 1730 case TMF_RESP_FUNC_FAILED: 1731 case TMF_RESP_FUNC_COMPLETE: 1732 break; 1733 default: 1734 rc = TMF_RESP_FUNC_FAILED; 1735 break; 1736 } 1737 } 1738 return rc; 1739 } 1740 1741 static int 1742 hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id, 1743 struct sas_task *task, int abort_flag, 1744 int task_tag) 1745 { 1746 struct domain_device *device = task->dev; 1747 struct hisi_sas_device *sas_dev = device->lldd_dev; 1748 struct device *dev = hisi_hba->dev; 1749 struct hisi_sas_port *port; 1750 struct hisi_sas_slot *slot; 1751 struct asd_sas_port *sas_port = device->port; 1752 struct hisi_sas_cmd_hdr *cmd_hdr_base; 1753 struct hisi_sas_dq *dq = sas_dev->dq; 1754 int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx; 1755 unsigned long flags, flags_dq = 0; 1756 int wr_q_index; 1757 1758 if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags))) 1759 return -EINVAL; 1760 1761 if (!device->port) 1762 return -1; 1763 1764 port = to_hisi_sas_port(sas_port); 1765 1766 /* simply get a slot and send abort command */ 1767 rc = hisi_sas_slot_index_alloc(hisi_hba, NULL); 1768 if (rc < 0) 1769 goto err_out; 1770 1771 slot_idx = rc; 1772 slot = &hisi_hba->slot_info[slot_idx]; 1773 1774 spin_lock_irqsave(&dq->lock, flags_dq); 1775 wr_q_index = hisi_hba->hw->get_free_slot(hisi_hba, dq); 1776 if (wr_q_index < 0) { 1777 spin_unlock_irqrestore(&dq->lock, flags_dq); 1778 rc = -EAGAIN; 1779 goto err_out_tag; 1780 } 1781 list_add_tail(&slot->delivery, &dq->list); 1782 spin_unlock_irqrestore(&dq->lock, flags_dq); 1783 1784 dlvry_queue = dq->id; 1785 dlvry_queue_slot = wr_q_index; 1786 1787 slot->n_elem = n_elem; 1788 slot->dlvry_queue = dlvry_queue; 1789 slot->dlvry_queue_slot = dlvry_queue_slot; 1790 cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue]; 1791 slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot]; 1792 slot->task = task; 1793 slot->port = port; 1794 slot->is_internal = true; 1795 task->lldd_task = slot; 1796 1797 memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr)); 1798 memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ); 1799 memset(hisi_sas_status_buf_addr_mem(slot), 0, HISI_SAS_STATUS_BUF_SZ); 1800 1801 hisi_sas_task_prep_abort(hisi_hba, slot, device_id, 1802 abort_flag, task_tag); 1803 1804 spin_lock_irqsave(&task->task_state_lock, flags); 1805 task->task_state_flags |= SAS_TASK_AT_INITIATOR; 1806 spin_unlock_irqrestore(&task->task_state_lock, flags); 1807 WRITE_ONCE(slot->ready, 1); 1808 /* send abort command to the chip */ 1809 spin_lock_irqsave(&dq->lock, flags); 1810 list_add_tail(&slot->entry, &sas_dev->list); 1811 hisi_hba->hw->start_delivery(dq); 1812 spin_unlock_irqrestore(&dq->lock, flags); 1813 1814 return 0; 1815 1816 err_out_tag: 1817 hisi_sas_slot_index_free(hisi_hba, slot_idx); 1818 err_out: 1819 dev_err(dev, "internal abort task prep: failed[%d]!\n", rc); 1820 1821 return rc; 1822 } 1823 1824 /** 1825 * hisi_sas_internal_task_abort -- execute an internal 1826 * abort command for single IO command or a device 1827 * @hisi_hba: host controller struct 1828 * @device: domain device 1829 * @abort_flag: mode of operation, device or single IO 1830 * @tag: tag of IO to be aborted (only relevant to single 1831 * IO mode) 1832 */ 1833 static int 1834 hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba, 1835 struct domain_device *device, 1836 int abort_flag, int tag) 1837 { 1838 struct sas_task *task; 1839 struct hisi_sas_device *sas_dev = device->lldd_dev; 1840 struct device *dev = hisi_hba->dev; 1841 int res; 1842 1843 /* 1844 * The interface is not realized means this HW don't support internal 1845 * abort, or don't need to do internal abort. Then here, we return 1846 * TMF_RESP_FUNC_FAILED and let other steps go on, which depends that 1847 * the internal abort has been executed and returned CQ. 1848 */ 1849 if (!hisi_hba->hw->prep_abort) 1850 return TMF_RESP_FUNC_FAILED; 1851 1852 task = sas_alloc_slow_task(GFP_KERNEL); 1853 if (!task) 1854 return -ENOMEM; 1855 1856 task->dev = device; 1857 task->task_proto = device->tproto; 1858 task->task_done = hisi_sas_task_done; 1859 task->slow_task->timer.function = hisi_sas_tmf_timedout; 1860 task->slow_task->timer.expires = jiffies + INTERNAL_ABORT_TIMEOUT*HZ; 1861 add_timer(&task->slow_task->timer); 1862 1863 res = hisi_sas_internal_abort_task_exec(hisi_hba, sas_dev->device_id, 1864 task, abort_flag, tag); 1865 if (res) { 1866 del_timer(&task->slow_task->timer); 1867 dev_err(dev, "internal task abort: executing internal task failed: %d\n", 1868 res); 1869 goto exit; 1870 } 1871 wait_for_completion(&task->slow_task->completion); 1872 res = TMF_RESP_FUNC_FAILED; 1873 1874 /* Internal abort timed out */ 1875 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { 1876 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { 1877 struct hisi_sas_slot *slot = task->lldd_task; 1878 1879 if (slot) { 1880 struct hisi_sas_cq *cq = 1881 &hisi_hba->cq[slot->dlvry_queue]; 1882 /* 1883 * flush tasklet to avoid free'ing task 1884 * before using task in IO completion 1885 */ 1886 tasklet_kill(&cq->tasklet); 1887 slot->task = NULL; 1888 } 1889 dev_err(dev, "internal task abort: timeout and not done.\n"); 1890 res = -EIO; 1891 goto exit; 1892 } else 1893 dev_err(dev, "internal task abort: timeout.\n"); 1894 } 1895 1896 if (task->task_status.resp == SAS_TASK_COMPLETE && 1897 task->task_status.stat == TMF_RESP_FUNC_COMPLETE) { 1898 res = TMF_RESP_FUNC_COMPLETE; 1899 goto exit; 1900 } 1901 1902 if (task->task_status.resp == SAS_TASK_COMPLETE && 1903 task->task_status.stat == TMF_RESP_FUNC_SUCC) { 1904 res = TMF_RESP_FUNC_SUCC; 1905 goto exit; 1906 } 1907 1908 exit: 1909 dev_dbg(dev, "internal task abort: task to dev %016llx task=%p " 1910 "resp: 0x%x sts 0x%x\n", 1911 SAS_ADDR(device->sas_addr), 1912 task, 1913 task->task_status.resp, /* 0 is complete, -1 is undelivered */ 1914 task->task_status.stat); 1915 sas_free_task(task); 1916 1917 return res; 1918 } 1919 1920 static void hisi_sas_port_formed(struct asd_sas_phy *sas_phy) 1921 { 1922 hisi_sas_port_notify_formed(sas_phy); 1923 } 1924 1925 static int hisi_sas_write_gpio(struct sas_ha_struct *sha, u8 reg_type, 1926 u8 reg_index, u8 reg_count, u8 *write_data) 1927 { 1928 struct hisi_hba *hisi_hba = sha->lldd_ha; 1929 1930 if (!hisi_hba->hw->write_gpio) 1931 return -EOPNOTSUPP; 1932 1933 return hisi_hba->hw->write_gpio(hisi_hba, reg_type, 1934 reg_index, reg_count, write_data); 1935 } 1936 1937 static void hisi_sas_phy_disconnected(struct hisi_sas_phy *phy) 1938 { 1939 phy->phy_attached = 0; 1940 phy->phy_type = 0; 1941 phy->port = NULL; 1942 } 1943 1944 void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy) 1945 { 1946 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 1947 struct asd_sas_phy *sas_phy = &phy->sas_phy; 1948 struct sas_ha_struct *sas_ha = &hisi_hba->sha; 1949 struct device *dev = hisi_hba->dev; 1950 1951 if (rdy) { 1952 /* Phy down but ready */ 1953 hisi_sas_bytes_dmaed(hisi_hba, phy_no); 1954 hisi_sas_port_notify_formed(sas_phy); 1955 } else { 1956 struct hisi_sas_port *port = phy->port; 1957 1958 if (test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags) || 1959 phy->in_reset) { 1960 dev_info(dev, "ignore flutter phy%d down\n", phy_no); 1961 return; 1962 } 1963 /* Phy down and not ready */ 1964 sas_ha->notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL); 1965 sas_phy_disconnected(sas_phy); 1966 1967 if (port) { 1968 if (phy->phy_type & PORT_TYPE_SAS) { 1969 int port_id = port->id; 1970 1971 if (!hisi_hba->hw->get_wideport_bitmap(hisi_hba, 1972 port_id)) 1973 port->port_attached = 0; 1974 } else if (phy->phy_type & PORT_TYPE_SATA) 1975 port->port_attached = 0; 1976 } 1977 hisi_sas_phy_disconnected(phy); 1978 } 1979 } 1980 EXPORT_SYMBOL_GPL(hisi_sas_phy_down); 1981 1982 void hisi_sas_kill_tasklets(struct hisi_hba *hisi_hba) 1983 { 1984 int i; 1985 1986 for (i = 0; i < hisi_hba->queue_count; i++) { 1987 struct hisi_sas_cq *cq = &hisi_hba->cq[i]; 1988 1989 tasklet_kill(&cq->tasklet); 1990 } 1991 } 1992 EXPORT_SYMBOL_GPL(hisi_sas_kill_tasklets); 1993 1994 struct scsi_transport_template *hisi_sas_stt; 1995 EXPORT_SYMBOL_GPL(hisi_sas_stt); 1996 1997 struct device_attribute *host_attrs[] = { 1998 &dev_attr_phy_event_threshold, 1999 NULL, 2000 }; 2001 EXPORT_SYMBOL_GPL(host_attrs); 2002 2003 static struct sas_domain_function_template hisi_sas_transport_ops = { 2004 .lldd_dev_found = hisi_sas_dev_found, 2005 .lldd_dev_gone = hisi_sas_dev_gone, 2006 .lldd_execute_task = hisi_sas_queue_command, 2007 .lldd_control_phy = hisi_sas_control_phy, 2008 .lldd_abort_task = hisi_sas_abort_task, 2009 .lldd_abort_task_set = hisi_sas_abort_task_set, 2010 .lldd_clear_aca = hisi_sas_clear_aca, 2011 .lldd_I_T_nexus_reset = hisi_sas_I_T_nexus_reset, 2012 .lldd_lu_reset = hisi_sas_lu_reset, 2013 .lldd_query_task = hisi_sas_query_task, 2014 .lldd_clear_nexus_ha = hisi_sas_clear_nexus_ha, 2015 .lldd_port_formed = hisi_sas_port_formed, 2016 .lldd_write_gpio = hisi_sas_write_gpio, 2017 }; 2018 2019 void hisi_sas_init_mem(struct hisi_hba *hisi_hba) 2020 { 2021 int i, s, max_command_entries = hisi_hba->hw->max_command_entries; 2022 2023 for (i = 0; i < hisi_hba->queue_count; i++) { 2024 struct hisi_sas_cq *cq = &hisi_hba->cq[i]; 2025 struct hisi_sas_dq *dq = &hisi_hba->dq[i]; 2026 2027 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS; 2028 memset(hisi_hba->cmd_hdr[i], 0, s); 2029 dq->wr_point = 0; 2030 2031 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS; 2032 memset(hisi_hba->complete_hdr[i], 0, s); 2033 cq->rd_point = 0; 2034 } 2035 2036 s = sizeof(struct hisi_sas_initial_fis) * hisi_hba->n_phy; 2037 memset(hisi_hba->initial_fis, 0, s); 2038 2039 s = max_command_entries * sizeof(struct hisi_sas_iost); 2040 memset(hisi_hba->iost, 0, s); 2041 2042 s = max_command_entries * sizeof(struct hisi_sas_breakpoint); 2043 memset(hisi_hba->breakpoint, 0, s); 2044 2045 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint); 2046 memset(hisi_hba->sata_breakpoint, 0, s); 2047 } 2048 EXPORT_SYMBOL_GPL(hisi_sas_init_mem); 2049 2050 int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost) 2051 { 2052 struct device *dev = hisi_hba->dev; 2053 int i, j, s, max_command_entries = hisi_hba->hw->max_command_entries; 2054 int max_command_entries_ru, sz_slot_buf_ru; 2055 int blk_cnt, slots_per_blk; 2056 2057 sema_init(&hisi_hba->sem, 1); 2058 spin_lock_init(&hisi_hba->lock); 2059 for (i = 0; i < hisi_hba->n_phy; i++) { 2060 hisi_sas_phy_init(hisi_hba, i); 2061 hisi_hba->port[i].port_attached = 0; 2062 hisi_hba->port[i].id = -1; 2063 } 2064 2065 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 2066 hisi_hba->devices[i].dev_type = SAS_PHY_UNUSED; 2067 hisi_hba->devices[i].device_id = i; 2068 hisi_hba->devices[i].dev_status = HISI_SAS_DEV_NORMAL; 2069 } 2070 2071 for (i = 0; i < hisi_hba->queue_count; i++) { 2072 struct hisi_sas_cq *cq = &hisi_hba->cq[i]; 2073 struct hisi_sas_dq *dq = &hisi_hba->dq[i]; 2074 2075 /* Completion queue structure */ 2076 cq->id = i; 2077 cq->hisi_hba = hisi_hba; 2078 2079 /* Delivery queue structure */ 2080 spin_lock_init(&dq->lock); 2081 INIT_LIST_HEAD(&dq->list); 2082 dq->id = i; 2083 dq->hisi_hba = hisi_hba; 2084 2085 /* Delivery queue */ 2086 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS; 2087 hisi_hba->cmd_hdr[i] = dmam_alloc_coherent(dev, s, 2088 &hisi_hba->cmd_hdr_dma[i], 2089 GFP_KERNEL); 2090 if (!hisi_hba->cmd_hdr[i]) 2091 goto err_out; 2092 2093 /* Completion queue */ 2094 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS; 2095 hisi_hba->complete_hdr[i] = dmam_alloc_coherent(dev, s, 2096 &hisi_hba->complete_hdr_dma[i], 2097 GFP_KERNEL); 2098 if (!hisi_hba->complete_hdr[i]) 2099 goto err_out; 2100 } 2101 2102 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct); 2103 hisi_hba->itct = dmam_alloc_coherent(dev, s, &hisi_hba->itct_dma, 2104 GFP_KERNEL); 2105 if (!hisi_hba->itct) 2106 goto err_out; 2107 memset(hisi_hba->itct, 0, s); 2108 2109 hisi_hba->slot_info = devm_kcalloc(dev, max_command_entries, 2110 sizeof(struct hisi_sas_slot), 2111 GFP_KERNEL); 2112 if (!hisi_hba->slot_info) 2113 goto err_out; 2114 2115 /* roundup to avoid overly large block size */ 2116 max_command_entries_ru = roundup(max_command_entries, 64); 2117 sz_slot_buf_ru = roundup(sizeof(struct hisi_sas_slot_buf_table), 64); 2118 s = lcm(max_command_entries_ru, sz_slot_buf_ru); 2119 blk_cnt = (max_command_entries_ru * sz_slot_buf_ru) / s; 2120 slots_per_blk = s / sz_slot_buf_ru; 2121 for (i = 0; i < blk_cnt; i++) { 2122 struct hisi_sas_slot_buf_table *buf; 2123 dma_addr_t buf_dma; 2124 int slot_index = i * slots_per_blk; 2125 2126 buf = dmam_alloc_coherent(dev, s, &buf_dma, GFP_KERNEL); 2127 if (!buf) 2128 goto err_out; 2129 memset(buf, 0, s); 2130 2131 for (j = 0; j < slots_per_blk; j++, slot_index++) { 2132 struct hisi_sas_slot *slot; 2133 2134 slot = &hisi_hba->slot_info[slot_index]; 2135 slot->buf = buf; 2136 slot->buf_dma = buf_dma; 2137 slot->idx = slot_index; 2138 2139 buf++; 2140 buf_dma += sizeof(*buf); 2141 } 2142 } 2143 2144 s = max_command_entries * sizeof(struct hisi_sas_iost); 2145 hisi_hba->iost = dmam_alloc_coherent(dev, s, &hisi_hba->iost_dma, 2146 GFP_KERNEL); 2147 if (!hisi_hba->iost) 2148 goto err_out; 2149 2150 s = max_command_entries * sizeof(struct hisi_sas_breakpoint); 2151 hisi_hba->breakpoint = dmam_alloc_coherent(dev, s, 2152 &hisi_hba->breakpoint_dma, 2153 GFP_KERNEL); 2154 if (!hisi_hba->breakpoint) 2155 goto err_out; 2156 2157 hisi_hba->slot_index_count = max_command_entries; 2158 s = hisi_hba->slot_index_count / BITS_PER_BYTE; 2159 hisi_hba->slot_index_tags = devm_kzalloc(dev, s, GFP_KERNEL); 2160 if (!hisi_hba->slot_index_tags) 2161 goto err_out; 2162 2163 s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS; 2164 hisi_hba->initial_fis = dmam_alloc_coherent(dev, s, 2165 &hisi_hba->initial_fis_dma, 2166 GFP_KERNEL); 2167 if (!hisi_hba->initial_fis) 2168 goto err_out; 2169 2170 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint); 2171 hisi_hba->sata_breakpoint = dmam_alloc_coherent(dev, s, 2172 &hisi_hba->sata_breakpoint_dma, 2173 GFP_KERNEL); 2174 if (!hisi_hba->sata_breakpoint) 2175 goto err_out; 2176 hisi_sas_init_mem(hisi_hba); 2177 2178 hisi_sas_slot_index_init(hisi_hba); 2179 hisi_hba->last_slot_index = hisi_hba->hw->max_command_entries - 2180 HISI_SAS_RESERVED_IPTT_CNT; 2181 2182 hisi_hba->wq = create_singlethread_workqueue(dev_name(dev)); 2183 if (!hisi_hba->wq) { 2184 dev_err(dev, "sas_alloc: failed to create workqueue\n"); 2185 goto err_out; 2186 } 2187 2188 return 0; 2189 err_out: 2190 return -ENOMEM; 2191 } 2192 EXPORT_SYMBOL_GPL(hisi_sas_alloc); 2193 2194 void hisi_sas_free(struct hisi_hba *hisi_hba) 2195 { 2196 if (hisi_hba->wq) 2197 destroy_workqueue(hisi_hba->wq); 2198 } 2199 EXPORT_SYMBOL_GPL(hisi_sas_free); 2200 2201 void hisi_sas_rst_work_handler(struct work_struct *work) 2202 { 2203 struct hisi_hba *hisi_hba = 2204 container_of(work, struct hisi_hba, rst_work); 2205 2206 hisi_sas_controller_reset(hisi_hba); 2207 } 2208 EXPORT_SYMBOL_GPL(hisi_sas_rst_work_handler); 2209 2210 void hisi_sas_sync_rst_work_handler(struct work_struct *work) 2211 { 2212 struct hisi_sas_rst *rst = 2213 container_of(work, struct hisi_sas_rst, work); 2214 2215 if (!hisi_sas_controller_reset(rst->hisi_hba)) 2216 rst->done = true; 2217 complete(rst->completion); 2218 } 2219 EXPORT_SYMBOL_GPL(hisi_sas_sync_rst_work_handler); 2220 2221 int hisi_sas_get_fw_info(struct hisi_hba *hisi_hba) 2222 { 2223 struct device *dev = hisi_hba->dev; 2224 struct platform_device *pdev = hisi_hba->platform_dev; 2225 struct device_node *np = pdev ? pdev->dev.of_node : NULL; 2226 struct clk *refclk; 2227 2228 if (device_property_read_u8_array(dev, "sas-addr", hisi_hba->sas_addr, 2229 SAS_ADDR_SIZE)) { 2230 dev_err(dev, "could not get property sas-addr\n"); 2231 return -ENOENT; 2232 } 2233 2234 if (np) { 2235 /* 2236 * These properties are only required for platform device-based 2237 * controller with DT firmware. 2238 */ 2239 hisi_hba->ctrl = syscon_regmap_lookup_by_phandle(np, 2240 "hisilicon,sas-syscon"); 2241 if (IS_ERR(hisi_hba->ctrl)) { 2242 dev_err(dev, "could not get syscon\n"); 2243 return -ENOENT; 2244 } 2245 2246 if (device_property_read_u32(dev, "ctrl-reset-reg", 2247 &hisi_hba->ctrl_reset_reg)) { 2248 dev_err(dev, 2249 "could not get property ctrl-reset-reg\n"); 2250 return -ENOENT; 2251 } 2252 2253 if (device_property_read_u32(dev, "ctrl-reset-sts-reg", 2254 &hisi_hba->ctrl_reset_sts_reg)) { 2255 dev_err(dev, 2256 "could not get property ctrl-reset-sts-reg\n"); 2257 return -ENOENT; 2258 } 2259 2260 if (device_property_read_u32(dev, "ctrl-clock-ena-reg", 2261 &hisi_hba->ctrl_clock_ena_reg)) { 2262 dev_err(dev, 2263 "could not get property ctrl-clock-ena-reg\n"); 2264 return -ENOENT; 2265 } 2266 } 2267 2268 refclk = devm_clk_get(dev, NULL); 2269 if (IS_ERR(refclk)) 2270 dev_dbg(dev, "no ref clk property\n"); 2271 else 2272 hisi_hba->refclk_frequency_mhz = clk_get_rate(refclk) / 1000000; 2273 2274 if (device_property_read_u32(dev, "phy-count", &hisi_hba->n_phy)) { 2275 dev_err(dev, "could not get property phy-count\n"); 2276 return -ENOENT; 2277 } 2278 2279 if (device_property_read_u32(dev, "queue-count", 2280 &hisi_hba->queue_count)) { 2281 dev_err(dev, "could not get property queue-count\n"); 2282 return -ENOENT; 2283 } 2284 2285 return 0; 2286 } 2287 EXPORT_SYMBOL_GPL(hisi_sas_get_fw_info); 2288 2289 static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev, 2290 const struct hisi_sas_hw *hw) 2291 { 2292 struct resource *res; 2293 struct Scsi_Host *shost; 2294 struct hisi_hba *hisi_hba; 2295 struct device *dev = &pdev->dev; 2296 2297 shost = scsi_host_alloc(hw->sht, sizeof(*hisi_hba)); 2298 if (!shost) { 2299 dev_err(dev, "scsi host alloc failed\n"); 2300 return NULL; 2301 } 2302 hisi_hba = shost_priv(shost); 2303 2304 INIT_WORK(&hisi_hba->rst_work, hisi_sas_rst_work_handler); 2305 hisi_hba->hw = hw; 2306 hisi_hba->dev = dev; 2307 hisi_hba->platform_dev = pdev; 2308 hisi_hba->shost = shost; 2309 SHOST_TO_SAS_HA(shost) = &hisi_hba->sha; 2310 2311 timer_setup(&hisi_hba->timer, NULL, 0); 2312 2313 if (hisi_sas_get_fw_info(hisi_hba) < 0) 2314 goto err_out; 2315 2316 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)) && 2317 dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) { 2318 dev_err(dev, "No usable DMA addressing method\n"); 2319 goto err_out; 2320 } 2321 2322 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2323 hisi_hba->regs = devm_ioremap_resource(dev, res); 2324 if (IS_ERR(hisi_hba->regs)) 2325 goto err_out; 2326 2327 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 2328 if (res) { 2329 hisi_hba->sgpio_regs = devm_ioremap_resource(dev, res); 2330 if (IS_ERR(hisi_hba->sgpio_regs)) 2331 goto err_out; 2332 } 2333 2334 if (hisi_sas_alloc(hisi_hba, shost)) { 2335 hisi_sas_free(hisi_hba); 2336 goto err_out; 2337 } 2338 2339 return shost; 2340 err_out: 2341 scsi_host_put(shost); 2342 dev_err(dev, "shost alloc failed\n"); 2343 return NULL; 2344 } 2345 2346 int hisi_sas_probe(struct platform_device *pdev, 2347 const struct hisi_sas_hw *hw) 2348 { 2349 struct Scsi_Host *shost; 2350 struct hisi_hba *hisi_hba; 2351 struct device *dev = &pdev->dev; 2352 struct asd_sas_phy **arr_phy; 2353 struct asd_sas_port **arr_port; 2354 struct sas_ha_struct *sha; 2355 int rc, phy_nr, port_nr, i; 2356 2357 shost = hisi_sas_shost_alloc(pdev, hw); 2358 if (!shost) 2359 return -ENOMEM; 2360 2361 sha = SHOST_TO_SAS_HA(shost); 2362 hisi_hba = shost_priv(shost); 2363 platform_set_drvdata(pdev, sha); 2364 2365 phy_nr = port_nr = hisi_hba->n_phy; 2366 2367 arr_phy = devm_kcalloc(dev, phy_nr, sizeof(void *), GFP_KERNEL); 2368 arr_port = devm_kcalloc(dev, port_nr, sizeof(void *), GFP_KERNEL); 2369 if (!arr_phy || !arr_port) { 2370 rc = -ENOMEM; 2371 goto err_out_ha; 2372 } 2373 2374 sha->sas_phy = arr_phy; 2375 sha->sas_port = arr_port; 2376 sha->lldd_ha = hisi_hba; 2377 2378 shost->transportt = hisi_sas_stt; 2379 shost->max_id = HISI_SAS_MAX_DEVICES; 2380 shost->max_lun = ~0; 2381 shost->max_channel = 1; 2382 shost->max_cmd_len = 16; 2383 shost->sg_tablesize = min_t(u16, SG_ALL, HISI_SAS_SGE_PAGE_CNT); 2384 if (hisi_hba->hw->slot_index_alloc) { 2385 shost->can_queue = hisi_hba->hw->max_command_entries; 2386 shost->cmd_per_lun = hisi_hba->hw->max_command_entries; 2387 } else { 2388 shost->can_queue = hisi_hba->hw->max_command_entries - 2389 HISI_SAS_RESERVED_IPTT_CNT; 2390 shost->cmd_per_lun = hisi_hba->hw->max_command_entries - 2391 HISI_SAS_RESERVED_IPTT_CNT; 2392 } 2393 2394 sha->sas_ha_name = DRV_NAME; 2395 sha->dev = hisi_hba->dev; 2396 sha->lldd_module = THIS_MODULE; 2397 sha->sas_addr = &hisi_hba->sas_addr[0]; 2398 sha->num_phys = hisi_hba->n_phy; 2399 sha->core.shost = hisi_hba->shost; 2400 2401 for (i = 0; i < hisi_hba->n_phy; i++) { 2402 sha->sas_phy[i] = &hisi_hba->phy[i].sas_phy; 2403 sha->sas_port[i] = &hisi_hba->port[i].sas_port; 2404 } 2405 2406 rc = scsi_add_host(shost, &pdev->dev); 2407 if (rc) 2408 goto err_out_ha; 2409 2410 rc = sas_register_ha(sha); 2411 if (rc) 2412 goto err_out_register_ha; 2413 2414 rc = hisi_hba->hw->hw_init(hisi_hba); 2415 if (rc) 2416 goto err_out_register_ha; 2417 2418 scsi_scan_host(shost); 2419 2420 return 0; 2421 2422 err_out_register_ha: 2423 scsi_remove_host(shost); 2424 err_out_ha: 2425 hisi_sas_free(hisi_hba); 2426 scsi_host_put(shost); 2427 return rc; 2428 } 2429 EXPORT_SYMBOL_GPL(hisi_sas_probe); 2430 2431 int hisi_sas_remove(struct platform_device *pdev) 2432 { 2433 struct sas_ha_struct *sha = platform_get_drvdata(pdev); 2434 struct hisi_hba *hisi_hba = sha->lldd_ha; 2435 struct Scsi_Host *shost = sha->core.shost; 2436 2437 if (timer_pending(&hisi_hba->timer)) 2438 del_timer(&hisi_hba->timer); 2439 2440 sas_unregister_ha(sha); 2441 sas_remove_host(sha->core.shost); 2442 2443 hisi_sas_free(hisi_hba); 2444 scsi_host_put(shost); 2445 return 0; 2446 } 2447 EXPORT_SYMBOL_GPL(hisi_sas_remove); 2448 2449 static __init int hisi_sas_init(void) 2450 { 2451 hisi_sas_stt = sas_domain_attach_transport(&hisi_sas_transport_ops); 2452 if (!hisi_sas_stt) 2453 return -ENOMEM; 2454 2455 return 0; 2456 } 2457 2458 static __exit void hisi_sas_exit(void) 2459 { 2460 sas_release_transport(hisi_sas_stt); 2461 } 2462 2463 module_init(hisi_sas_init); 2464 module_exit(hisi_sas_exit); 2465 2466 MODULE_LICENSE("GPL"); 2467 MODULE_AUTHOR("John Garry <john.garry@huawei.com>"); 2468 MODULE_DESCRIPTION("HISILICON SAS controller driver"); 2469 MODULE_ALIAS("platform:" DRV_NAME); 2470