1 /* 2 * Copyright (c) 2015 Linaro Ltd. 3 * Copyright (c) 2015 Hisilicon Limited. 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; either version 2 of the License, or 8 * (at your option) any later version. 9 * 10 */ 11 12 #include "hisi_sas.h" 13 #include "../libsas/sas_internal.h" 14 #define DRV_NAME "hisi_sas" 15 16 #define DEV_IS_GONE(dev) \ 17 ((!dev) || (dev->dev_type == SAS_PHY_UNUSED)) 18 19 static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device, 20 u8 *lun, struct hisi_sas_tmf_task *tmf); 21 static int 22 hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba, 23 struct domain_device *device, 24 int abort_flag, int tag); 25 static int hisi_sas_softreset_ata_disk(struct domain_device *device); 26 static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func, 27 void *funcdata); 28 static void hisi_sas_release_task(struct hisi_hba *hisi_hba, 29 struct domain_device *device); 30 static void hisi_sas_dev_gone(struct domain_device *device); 31 32 u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis, int direction) 33 { 34 switch (fis->command) { 35 case ATA_CMD_FPDMA_WRITE: 36 case ATA_CMD_FPDMA_READ: 37 case ATA_CMD_FPDMA_RECV: 38 case ATA_CMD_FPDMA_SEND: 39 case ATA_CMD_NCQ_NON_DATA: 40 return HISI_SAS_SATA_PROTOCOL_FPDMA; 41 42 case ATA_CMD_DOWNLOAD_MICRO: 43 case ATA_CMD_ID_ATA: 44 case ATA_CMD_PMP_READ: 45 case ATA_CMD_READ_LOG_EXT: 46 case ATA_CMD_PIO_READ: 47 case ATA_CMD_PIO_READ_EXT: 48 case ATA_CMD_PMP_WRITE: 49 case ATA_CMD_WRITE_LOG_EXT: 50 case ATA_CMD_PIO_WRITE: 51 case ATA_CMD_PIO_WRITE_EXT: 52 return HISI_SAS_SATA_PROTOCOL_PIO; 53 54 case ATA_CMD_DSM: 55 case ATA_CMD_DOWNLOAD_MICRO_DMA: 56 case ATA_CMD_PMP_READ_DMA: 57 case ATA_CMD_PMP_WRITE_DMA: 58 case ATA_CMD_READ: 59 case ATA_CMD_READ_EXT: 60 case ATA_CMD_READ_LOG_DMA_EXT: 61 case ATA_CMD_READ_STREAM_DMA_EXT: 62 case ATA_CMD_TRUSTED_RCV_DMA: 63 case ATA_CMD_TRUSTED_SND_DMA: 64 case ATA_CMD_WRITE: 65 case ATA_CMD_WRITE_EXT: 66 case ATA_CMD_WRITE_FUA_EXT: 67 case ATA_CMD_WRITE_QUEUED: 68 case ATA_CMD_WRITE_LOG_DMA_EXT: 69 case ATA_CMD_WRITE_STREAM_DMA_EXT: 70 case ATA_CMD_ZAC_MGMT_IN: 71 return HISI_SAS_SATA_PROTOCOL_DMA; 72 73 case ATA_CMD_CHK_POWER: 74 case ATA_CMD_DEV_RESET: 75 case ATA_CMD_EDD: 76 case ATA_CMD_FLUSH: 77 case ATA_CMD_FLUSH_EXT: 78 case ATA_CMD_VERIFY: 79 case ATA_CMD_VERIFY_EXT: 80 case ATA_CMD_SET_FEATURES: 81 case ATA_CMD_STANDBY: 82 case ATA_CMD_STANDBYNOW1: 83 case ATA_CMD_ZAC_MGMT_OUT: 84 return HISI_SAS_SATA_PROTOCOL_NONDATA; 85 86 case ATA_CMD_SET_MAX: 87 switch (fis->features) { 88 case ATA_SET_MAX_PASSWD: 89 case ATA_SET_MAX_LOCK: 90 return HISI_SAS_SATA_PROTOCOL_PIO; 91 92 case ATA_SET_MAX_PASSWD_DMA: 93 case ATA_SET_MAX_UNLOCK_DMA: 94 return HISI_SAS_SATA_PROTOCOL_DMA; 95 96 default: 97 return HISI_SAS_SATA_PROTOCOL_NONDATA; 98 } 99 100 default: 101 { 102 if (direction == DMA_NONE) 103 return HISI_SAS_SATA_PROTOCOL_NONDATA; 104 return HISI_SAS_SATA_PROTOCOL_PIO; 105 } 106 } 107 } 108 EXPORT_SYMBOL_GPL(hisi_sas_get_ata_protocol); 109 110 void hisi_sas_sata_done(struct sas_task *task, 111 struct hisi_sas_slot *slot) 112 { 113 struct task_status_struct *ts = &task->task_status; 114 struct ata_task_resp *resp = (struct ata_task_resp *)ts->buf; 115 struct hisi_sas_status_buffer *status_buf = 116 hisi_sas_status_buf_addr_mem(slot); 117 u8 *iu = &status_buf->iu[0]; 118 struct dev_to_host_fis *d2h = (struct dev_to_host_fis *)iu; 119 120 resp->frame_len = sizeof(struct dev_to_host_fis); 121 memcpy(&resp->ending_fis[0], d2h, sizeof(struct dev_to_host_fis)); 122 123 ts->buf_valid_size = sizeof(*resp); 124 } 125 EXPORT_SYMBOL_GPL(hisi_sas_sata_done); 126 127 int hisi_sas_get_ncq_tag(struct sas_task *task, u32 *tag) 128 { 129 struct ata_queued_cmd *qc = task->uldd_task; 130 131 if (qc) { 132 if (qc->tf.command == ATA_CMD_FPDMA_WRITE || 133 qc->tf.command == ATA_CMD_FPDMA_READ) { 134 *tag = qc->tag; 135 return 1; 136 } 137 } 138 return 0; 139 } 140 EXPORT_SYMBOL_GPL(hisi_sas_get_ncq_tag); 141 142 /* 143 * This function assumes linkrate mask fits in 8 bits, which it 144 * does for all HW versions supported. 145 */ 146 u8 hisi_sas_get_prog_phy_linkrate_mask(enum sas_linkrate max) 147 { 148 u8 rate = 0; 149 int i; 150 151 max -= SAS_LINK_RATE_1_5_GBPS; 152 for (i = 0; i <= max; i++) 153 rate |= 1 << (i * 2); 154 return rate; 155 } 156 EXPORT_SYMBOL_GPL(hisi_sas_get_prog_phy_linkrate_mask); 157 158 static struct hisi_hba *dev_to_hisi_hba(struct domain_device *device) 159 { 160 return device->port->ha->lldd_ha; 161 } 162 163 struct hisi_sas_port *to_hisi_sas_port(struct asd_sas_port *sas_port) 164 { 165 return container_of(sas_port, struct hisi_sas_port, sas_port); 166 } 167 EXPORT_SYMBOL_GPL(to_hisi_sas_port); 168 169 void hisi_sas_stop_phys(struct hisi_hba *hisi_hba) 170 { 171 int phy_no; 172 173 for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) 174 hisi_hba->hw->phy_disable(hisi_hba, phy_no); 175 } 176 EXPORT_SYMBOL_GPL(hisi_sas_stop_phys); 177 178 static void hisi_sas_slot_index_clear(struct hisi_hba *hisi_hba, int slot_idx) 179 { 180 void *bitmap = hisi_hba->slot_index_tags; 181 182 clear_bit(slot_idx, bitmap); 183 } 184 185 static void hisi_sas_slot_index_free(struct hisi_hba *hisi_hba, int slot_idx) 186 { 187 unsigned long flags; 188 189 if (hisi_hba->hw->slot_index_alloc || (slot_idx >= 190 hisi_hba->hw->max_command_entries - HISI_SAS_RESERVED_IPTT_CNT)) { 191 spin_lock_irqsave(&hisi_hba->lock, flags); 192 hisi_sas_slot_index_clear(hisi_hba, slot_idx); 193 spin_unlock_irqrestore(&hisi_hba->lock, flags); 194 } 195 } 196 197 static void hisi_sas_slot_index_set(struct hisi_hba *hisi_hba, int slot_idx) 198 { 199 void *bitmap = hisi_hba->slot_index_tags; 200 201 set_bit(slot_idx, bitmap); 202 } 203 204 static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba, 205 struct scsi_cmnd *scsi_cmnd) 206 { 207 int index; 208 void *bitmap = hisi_hba->slot_index_tags; 209 unsigned long flags; 210 211 if (scsi_cmnd) 212 return scsi_cmnd->request->tag; 213 214 spin_lock_irqsave(&hisi_hba->lock, flags); 215 index = find_next_zero_bit(bitmap, hisi_hba->slot_index_count, 216 hisi_hba->last_slot_index + 1); 217 if (index >= hisi_hba->slot_index_count) { 218 index = find_next_zero_bit(bitmap, 219 hisi_hba->slot_index_count, 220 hisi_hba->hw->max_command_entries - 221 HISI_SAS_RESERVED_IPTT_CNT); 222 if (index >= hisi_hba->slot_index_count) { 223 spin_unlock_irqrestore(&hisi_hba->lock, flags); 224 return -SAS_QUEUE_FULL; 225 } 226 } 227 hisi_sas_slot_index_set(hisi_hba, index); 228 hisi_hba->last_slot_index = index; 229 spin_unlock_irqrestore(&hisi_hba->lock, flags); 230 231 return index; 232 } 233 234 static void hisi_sas_slot_index_init(struct hisi_hba *hisi_hba) 235 { 236 int i; 237 238 for (i = 0; i < hisi_hba->slot_index_count; ++i) 239 hisi_sas_slot_index_clear(hisi_hba, i); 240 } 241 242 void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task, 243 struct hisi_sas_slot *slot) 244 { 245 unsigned long flags; 246 int device_id = slot->device_id; 247 struct hisi_sas_device *sas_dev = &hisi_hba->devices[device_id]; 248 249 if (task) { 250 struct device *dev = hisi_hba->dev; 251 252 if (!task->lldd_task) 253 return; 254 255 task->lldd_task = NULL; 256 257 if (!sas_protocol_ata(task->task_proto)) { 258 struct sas_ssp_task *ssp_task = &task->ssp_task; 259 struct scsi_cmnd *scsi_cmnd = ssp_task->cmd; 260 261 if (slot->n_elem) 262 dma_unmap_sg(dev, task->scatter, 263 task->num_scatter, 264 task->data_dir); 265 if (slot->n_elem_dif) 266 dma_unmap_sg(dev, scsi_prot_sglist(scsi_cmnd), 267 scsi_prot_sg_count(scsi_cmnd), 268 task->data_dir); 269 } 270 } 271 272 spin_lock_irqsave(&sas_dev->lock, flags); 273 list_del_init(&slot->entry); 274 spin_unlock_irqrestore(&sas_dev->lock, flags); 275 276 memset(slot, 0, offsetof(struct hisi_sas_slot, buf)); 277 278 hisi_sas_slot_index_free(hisi_hba, slot->idx); 279 } 280 EXPORT_SYMBOL_GPL(hisi_sas_slot_task_free); 281 282 static void hisi_sas_task_prep_smp(struct hisi_hba *hisi_hba, 283 struct hisi_sas_slot *slot) 284 { 285 hisi_hba->hw->prep_smp(hisi_hba, slot); 286 } 287 288 static void hisi_sas_task_prep_ssp(struct hisi_hba *hisi_hba, 289 struct hisi_sas_slot *slot) 290 { 291 hisi_hba->hw->prep_ssp(hisi_hba, slot); 292 } 293 294 static void hisi_sas_task_prep_ata(struct hisi_hba *hisi_hba, 295 struct hisi_sas_slot *slot) 296 { 297 hisi_hba->hw->prep_stp(hisi_hba, slot); 298 } 299 300 static void hisi_sas_task_prep_abort(struct hisi_hba *hisi_hba, 301 struct hisi_sas_slot *slot, 302 int device_id, int abort_flag, int tag_to_abort) 303 { 304 hisi_hba->hw->prep_abort(hisi_hba, slot, 305 device_id, abort_flag, tag_to_abort); 306 } 307 308 static void hisi_sas_dma_unmap(struct hisi_hba *hisi_hba, 309 struct sas_task *task, int n_elem, 310 int n_elem_req, int n_elem_resp) 311 { 312 struct device *dev = hisi_hba->dev; 313 314 if (!sas_protocol_ata(task->task_proto)) { 315 if (task->num_scatter) { 316 if (n_elem) 317 dma_unmap_sg(dev, task->scatter, 318 task->num_scatter, 319 task->data_dir); 320 } else if (task->task_proto & SAS_PROTOCOL_SMP) { 321 if (n_elem_req) 322 dma_unmap_sg(dev, &task->smp_task.smp_req, 323 1, DMA_TO_DEVICE); 324 if (n_elem_resp) 325 dma_unmap_sg(dev, &task->smp_task.smp_resp, 326 1, DMA_FROM_DEVICE); 327 } 328 } 329 } 330 331 static int hisi_sas_dma_map(struct hisi_hba *hisi_hba, 332 struct sas_task *task, int *n_elem, 333 int *n_elem_req, int *n_elem_resp) 334 { 335 struct device *dev = hisi_hba->dev; 336 int rc; 337 338 if (sas_protocol_ata(task->task_proto)) { 339 *n_elem = task->num_scatter; 340 } else { 341 unsigned int req_len, resp_len; 342 343 if (task->num_scatter) { 344 *n_elem = dma_map_sg(dev, task->scatter, 345 task->num_scatter, task->data_dir); 346 if (!*n_elem) { 347 rc = -ENOMEM; 348 goto prep_out; 349 } 350 } else if (task->task_proto & SAS_PROTOCOL_SMP) { 351 *n_elem_req = dma_map_sg(dev, &task->smp_task.smp_req, 352 1, DMA_TO_DEVICE); 353 if (!*n_elem_req) { 354 rc = -ENOMEM; 355 goto prep_out; 356 } 357 req_len = sg_dma_len(&task->smp_task.smp_req); 358 if (req_len & 0x3) { 359 rc = -EINVAL; 360 goto err_out_dma_unmap; 361 } 362 *n_elem_resp = dma_map_sg(dev, &task->smp_task.smp_resp, 363 1, DMA_FROM_DEVICE); 364 if (!*n_elem_resp) { 365 rc = -ENOMEM; 366 goto err_out_dma_unmap; 367 } 368 resp_len = sg_dma_len(&task->smp_task.smp_resp); 369 if (resp_len & 0x3) { 370 rc = -EINVAL; 371 goto err_out_dma_unmap; 372 } 373 } 374 } 375 376 if (*n_elem > HISI_SAS_SGE_PAGE_CNT) { 377 dev_err(dev, "task prep: n_elem(%d) > HISI_SAS_SGE_PAGE_CNT", 378 *n_elem); 379 rc = -EINVAL; 380 goto err_out_dma_unmap; 381 } 382 return 0; 383 384 err_out_dma_unmap: 385 /* It would be better to call dma_unmap_sg() here, but it's messy */ 386 hisi_sas_dma_unmap(hisi_hba, task, *n_elem, 387 *n_elem_req, *n_elem_resp); 388 prep_out: 389 return rc; 390 } 391 392 static void hisi_sas_dif_dma_unmap(struct hisi_hba *hisi_hba, 393 struct sas_task *task, int n_elem_dif) 394 { 395 struct device *dev = hisi_hba->dev; 396 397 if (n_elem_dif) { 398 struct sas_ssp_task *ssp_task = &task->ssp_task; 399 struct scsi_cmnd *scsi_cmnd = ssp_task->cmd; 400 401 dma_unmap_sg(dev, scsi_prot_sglist(scsi_cmnd), 402 scsi_prot_sg_count(scsi_cmnd), 403 task->data_dir); 404 } 405 } 406 407 static int hisi_sas_dif_dma_map(struct hisi_hba *hisi_hba, 408 int *n_elem_dif, struct sas_task *task) 409 { 410 struct device *dev = hisi_hba->dev; 411 struct sas_ssp_task *ssp_task; 412 struct scsi_cmnd *scsi_cmnd; 413 int rc; 414 415 if (task->num_scatter) { 416 ssp_task = &task->ssp_task; 417 scsi_cmnd = ssp_task->cmd; 418 419 if (scsi_prot_sg_count(scsi_cmnd)) { 420 *n_elem_dif = dma_map_sg(dev, 421 scsi_prot_sglist(scsi_cmnd), 422 scsi_prot_sg_count(scsi_cmnd), 423 task->data_dir); 424 425 if (!*n_elem_dif) 426 return -ENOMEM; 427 428 if (*n_elem_dif > HISI_SAS_SGE_DIF_PAGE_CNT) { 429 dev_err(dev, "task prep: n_elem_dif(%d) too large\n", 430 *n_elem_dif); 431 rc = -EINVAL; 432 goto err_out_dif_dma_unmap; 433 } 434 } 435 } 436 437 return 0; 438 439 err_out_dif_dma_unmap: 440 dma_unmap_sg(dev, scsi_prot_sglist(scsi_cmnd), 441 scsi_prot_sg_count(scsi_cmnd), task->data_dir); 442 return rc; 443 } 444 445 static int hisi_sas_task_prep(struct sas_task *task, 446 struct hisi_sas_dq **dq_pointer, 447 bool is_tmf, struct hisi_sas_tmf_task *tmf, 448 int *pass) 449 { 450 struct domain_device *device = task->dev; 451 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 452 struct hisi_sas_device *sas_dev = device->lldd_dev; 453 struct hisi_sas_port *port; 454 struct hisi_sas_slot *slot; 455 struct hisi_sas_cmd_hdr *cmd_hdr_base; 456 struct asd_sas_port *sas_port = device->port; 457 struct device *dev = hisi_hba->dev; 458 int dlvry_queue_slot, dlvry_queue, rc, slot_idx; 459 int n_elem = 0, n_elem_dif = 0, n_elem_req = 0, n_elem_resp = 0; 460 struct hisi_sas_dq *dq; 461 unsigned long flags; 462 int wr_q_index; 463 464 if (DEV_IS_GONE(sas_dev)) { 465 if (sas_dev) 466 dev_info(dev, "task prep: device %d not ready\n", 467 sas_dev->device_id); 468 else 469 dev_info(dev, "task prep: device %016llx not ready\n", 470 SAS_ADDR(device->sas_addr)); 471 472 return -ECOMM; 473 } 474 475 if (hisi_hba->reply_map) { 476 int cpu = raw_smp_processor_id(); 477 unsigned int dq_index = hisi_hba->reply_map[cpu]; 478 479 *dq_pointer = dq = &hisi_hba->dq[dq_index]; 480 } else { 481 *dq_pointer = dq = sas_dev->dq; 482 } 483 484 port = to_hisi_sas_port(sas_port); 485 if (port && !port->port_attached) { 486 dev_info(dev, "task prep: %s port%d not attach device\n", 487 (dev_is_sata(device)) ? 488 "SATA/STP" : "SAS", 489 device->port->id); 490 491 return -ECOMM; 492 } 493 494 rc = hisi_sas_dma_map(hisi_hba, task, &n_elem, 495 &n_elem_req, &n_elem_resp); 496 if (rc < 0) 497 goto prep_out; 498 499 if (!sas_protocol_ata(task->task_proto)) { 500 rc = hisi_sas_dif_dma_map(hisi_hba, &n_elem_dif, task); 501 if (rc < 0) 502 goto err_out_dma_unmap; 503 } 504 505 if (hisi_hba->hw->slot_index_alloc) 506 rc = hisi_hba->hw->slot_index_alloc(hisi_hba, device); 507 else { 508 struct scsi_cmnd *scsi_cmnd = NULL; 509 510 if (task->uldd_task) { 511 struct ata_queued_cmd *qc; 512 513 if (dev_is_sata(device)) { 514 qc = task->uldd_task; 515 scsi_cmnd = qc->scsicmd; 516 } else { 517 scsi_cmnd = task->uldd_task; 518 } 519 } 520 rc = hisi_sas_slot_index_alloc(hisi_hba, scsi_cmnd); 521 } 522 if (rc < 0) 523 goto err_out_dif_dma_unmap; 524 525 slot_idx = rc; 526 slot = &hisi_hba->slot_info[slot_idx]; 527 528 spin_lock_irqsave(&dq->lock, flags); 529 wr_q_index = hisi_hba->hw->get_free_slot(hisi_hba, dq); 530 if (wr_q_index < 0) { 531 spin_unlock_irqrestore(&dq->lock, flags); 532 rc = -EAGAIN; 533 goto err_out_tag; 534 } 535 536 list_add_tail(&slot->delivery, &dq->list); 537 spin_unlock_irqrestore(&dq->lock, flags); 538 spin_lock_irqsave(&sas_dev->lock, flags); 539 list_add_tail(&slot->entry, &sas_dev->list); 540 spin_unlock_irqrestore(&sas_dev->lock, flags); 541 542 dlvry_queue = dq->id; 543 dlvry_queue_slot = wr_q_index; 544 545 slot->device_id = sas_dev->device_id; 546 slot->n_elem = n_elem; 547 slot->n_elem_dif = n_elem_dif; 548 slot->dlvry_queue = dlvry_queue; 549 slot->dlvry_queue_slot = dlvry_queue_slot; 550 cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue]; 551 slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot]; 552 slot->task = task; 553 slot->port = port; 554 slot->tmf = tmf; 555 slot->is_internal = is_tmf; 556 task->lldd_task = slot; 557 558 memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr)); 559 memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ); 560 memset(hisi_sas_status_buf_addr_mem(slot), 0, HISI_SAS_STATUS_BUF_SZ); 561 562 switch (task->task_proto) { 563 case SAS_PROTOCOL_SMP: 564 hisi_sas_task_prep_smp(hisi_hba, slot); 565 break; 566 case SAS_PROTOCOL_SSP: 567 hisi_sas_task_prep_ssp(hisi_hba, slot); 568 break; 569 case SAS_PROTOCOL_SATA: 570 case SAS_PROTOCOL_STP: 571 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: 572 hisi_sas_task_prep_ata(hisi_hba, slot); 573 break; 574 default: 575 dev_err(dev, "task prep: unknown/unsupported proto (0x%x)\n", 576 task->task_proto); 577 break; 578 } 579 580 spin_lock_irqsave(&task->task_state_lock, flags); 581 task->task_state_flags |= SAS_TASK_AT_INITIATOR; 582 spin_unlock_irqrestore(&task->task_state_lock, flags); 583 584 ++(*pass); 585 WRITE_ONCE(slot->ready, 1); 586 587 return 0; 588 589 err_out_tag: 590 hisi_sas_slot_index_free(hisi_hba, slot_idx); 591 err_out_dif_dma_unmap: 592 if (!sas_protocol_ata(task->task_proto)) 593 hisi_sas_dif_dma_unmap(hisi_hba, task, n_elem_dif); 594 err_out_dma_unmap: 595 hisi_sas_dma_unmap(hisi_hba, task, n_elem, 596 n_elem_req, n_elem_resp); 597 prep_out: 598 dev_err(dev, "task prep: failed[%d]!\n", rc); 599 return rc; 600 } 601 602 static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags, 603 bool is_tmf, struct hisi_sas_tmf_task *tmf) 604 { 605 u32 rc; 606 u32 pass = 0; 607 unsigned long flags; 608 struct hisi_hba *hisi_hba; 609 struct device *dev; 610 struct domain_device *device = task->dev; 611 struct asd_sas_port *sas_port = device->port; 612 struct hisi_sas_dq *dq = NULL; 613 614 if (!sas_port) { 615 struct task_status_struct *ts = &task->task_status; 616 617 ts->resp = SAS_TASK_UNDELIVERED; 618 ts->stat = SAS_PHY_DOWN; 619 /* 620 * libsas will use dev->port, should 621 * not call task_done for sata 622 */ 623 if (device->dev_type != SAS_SATA_DEV) 624 task->task_done(task); 625 return -ECOMM; 626 } 627 628 hisi_hba = dev_to_hisi_hba(device); 629 dev = hisi_hba->dev; 630 631 if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags))) { 632 if (in_softirq()) 633 return -EINVAL; 634 635 down(&hisi_hba->sem); 636 up(&hisi_hba->sem); 637 } 638 639 /* protect task_prep and start_delivery sequence */ 640 rc = hisi_sas_task_prep(task, &dq, is_tmf, tmf, &pass); 641 if (rc) 642 dev_err(dev, "task exec: failed[%d]!\n", rc); 643 644 if (likely(pass)) { 645 spin_lock_irqsave(&dq->lock, flags); 646 hisi_hba->hw->start_delivery(dq); 647 spin_unlock_irqrestore(&dq->lock, flags); 648 } 649 650 return rc; 651 } 652 653 static void hisi_sas_bytes_dmaed(struct hisi_hba *hisi_hba, int phy_no) 654 { 655 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 656 struct asd_sas_phy *sas_phy = &phy->sas_phy; 657 struct sas_ha_struct *sas_ha; 658 659 if (!phy->phy_attached) 660 return; 661 662 sas_ha = &hisi_hba->sha; 663 sas_ha->notify_phy_event(sas_phy, PHYE_OOB_DONE); 664 665 if (sas_phy->phy) { 666 struct sas_phy *sphy = sas_phy->phy; 667 668 sphy->negotiated_linkrate = sas_phy->linkrate; 669 sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS; 670 sphy->maximum_linkrate_hw = 671 hisi_hba->hw->phy_get_max_linkrate(); 672 if (sphy->minimum_linkrate == SAS_LINK_RATE_UNKNOWN) 673 sphy->minimum_linkrate = phy->minimum_linkrate; 674 675 if (sphy->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) 676 sphy->maximum_linkrate = phy->maximum_linkrate; 677 } 678 679 if (phy->phy_type & PORT_TYPE_SAS) { 680 struct sas_identify_frame *id; 681 682 id = (struct sas_identify_frame *)phy->frame_rcvd; 683 id->dev_type = phy->identify.device_type; 684 id->initiator_bits = SAS_PROTOCOL_ALL; 685 id->target_bits = phy->identify.target_port_protocols; 686 } else if (phy->phy_type & PORT_TYPE_SATA) { 687 /*Nothing*/ 688 } 689 690 sas_phy->frame_rcvd_size = phy->frame_rcvd_size; 691 sas_ha->notify_port_event(sas_phy, PORTE_BYTES_DMAED); 692 } 693 694 static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device) 695 { 696 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 697 struct hisi_sas_device *sas_dev = NULL; 698 unsigned long flags; 699 int last = hisi_hba->last_dev_id; 700 int first = (hisi_hba->last_dev_id + 1) % HISI_SAS_MAX_DEVICES; 701 int i; 702 703 spin_lock_irqsave(&hisi_hba->lock, flags); 704 for (i = first; i != last; i %= HISI_SAS_MAX_DEVICES) { 705 if (hisi_hba->devices[i].dev_type == SAS_PHY_UNUSED) { 706 int queue = i % hisi_hba->queue_count; 707 struct hisi_sas_dq *dq = &hisi_hba->dq[queue]; 708 709 hisi_hba->devices[i].device_id = i; 710 sas_dev = &hisi_hba->devices[i]; 711 sas_dev->dev_status = HISI_SAS_DEV_INIT; 712 sas_dev->dev_type = device->dev_type; 713 sas_dev->hisi_hba = hisi_hba; 714 sas_dev->sas_device = device; 715 sas_dev->dq = dq; 716 spin_lock_init(&sas_dev->lock); 717 INIT_LIST_HEAD(&hisi_hba->devices[i].list); 718 break; 719 } 720 i++; 721 } 722 hisi_hba->last_dev_id = i; 723 spin_unlock_irqrestore(&hisi_hba->lock, flags); 724 725 return sas_dev; 726 } 727 728 #define HISI_SAS_SRST_ATA_DISK_CNT 3 729 static int hisi_sas_init_device(struct domain_device *device) 730 { 731 int rc = TMF_RESP_FUNC_COMPLETE; 732 struct scsi_lun lun; 733 struct hisi_sas_tmf_task tmf_task; 734 int retry = HISI_SAS_SRST_ATA_DISK_CNT; 735 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 736 struct device *dev = hisi_hba->dev; 737 struct sas_phy *local_phy; 738 739 switch (device->dev_type) { 740 case SAS_END_DEVICE: 741 int_to_scsilun(0, &lun); 742 743 tmf_task.tmf = TMF_CLEAR_TASK_SET; 744 rc = hisi_sas_debug_issue_ssp_tmf(device, lun.scsi_lun, 745 &tmf_task); 746 if (rc == TMF_RESP_FUNC_COMPLETE) 747 hisi_sas_release_task(hisi_hba, device); 748 break; 749 case SAS_SATA_DEV: 750 case SAS_SATA_PM: 751 case SAS_SATA_PM_PORT: 752 case SAS_SATA_PENDING: 753 /* 754 * send HARD RESET to clear previous affiliation of 755 * STP target port 756 */ 757 local_phy = sas_get_local_phy(device); 758 if (!scsi_is_sas_phy_local(local_phy)) { 759 unsigned long deadline = ata_deadline(jiffies, 20000); 760 struct sata_device *sata_dev = &device->sata_dev; 761 struct ata_host *ata_host = sata_dev->ata_host; 762 struct ata_port_operations *ops = ata_host->ops; 763 struct ata_port *ap = sata_dev->ap; 764 struct ata_link *link; 765 unsigned int classes; 766 767 ata_for_each_link(link, ap, EDGE) 768 rc = ops->hardreset(link, &classes, 769 deadline); 770 } 771 sas_put_local_phy(local_phy); 772 if (rc) { 773 dev_warn(dev, "SATA disk hardreset fail: 0x%x\n", 774 rc); 775 return rc; 776 } 777 778 while (retry-- > 0) { 779 rc = hisi_sas_softreset_ata_disk(device); 780 if (!rc) 781 break; 782 } 783 break; 784 default: 785 break; 786 } 787 788 return rc; 789 } 790 791 static int hisi_sas_dev_found(struct domain_device *device) 792 { 793 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 794 struct domain_device *parent_dev = device->parent; 795 struct hisi_sas_device *sas_dev; 796 struct device *dev = hisi_hba->dev; 797 int rc; 798 799 if (hisi_hba->hw->alloc_dev) 800 sas_dev = hisi_hba->hw->alloc_dev(device); 801 else 802 sas_dev = hisi_sas_alloc_dev(device); 803 if (!sas_dev) { 804 dev_err(dev, "fail alloc dev: max support %d devices\n", 805 HISI_SAS_MAX_DEVICES); 806 return -EINVAL; 807 } 808 809 device->lldd_dev = sas_dev; 810 hisi_hba->hw->setup_itct(hisi_hba, sas_dev); 811 812 if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) { 813 int phy_no; 814 u8 phy_num = parent_dev->ex_dev.num_phys; 815 struct ex_phy *phy; 816 817 for (phy_no = 0; phy_no < phy_num; phy_no++) { 818 phy = &parent_dev->ex_dev.ex_phy[phy_no]; 819 if (SAS_ADDR(phy->attached_sas_addr) == 820 SAS_ADDR(device->sas_addr)) 821 break; 822 } 823 824 if (phy_no == phy_num) { 825 dev_info(dev, "dev found: no attached " 826 "dev:%016llx at ex:%016llx\n", 827 SAS_ADDR(device->sas_addr), 828 SAS_ADDR(parent_dev->sas_addr)); 829 rc = -EINVAL; 830 goto err_out; 831 } 832 } 833 834 dev_info(dev, "dev[%d:%x] found\n", 835 sas_dev->device_id, sas_dev->dev_type); 836 837 rc = hisi_sas_init_device(device); 838 if (rc) 839 goto err_out; 840 sas_dev->dev_status = HISI_SAS_DEV_NORMAL; 841 return 0; 842 843 err_out: 844 hisi_sas_dev_gone(device); 845 return rc; 846 } 847 848 int hisi_sas_slave_configure(struct scsi_device *sdev) 849 { 850 struct domain_device *dev = sdev_to_domain_dev(sdev); 851 int ret = sas_slave_configure(sdev); 852 853 if (ret) 854 return ret; 855 if (!dev_is_sata(dev)) 856 sas_change_queue_depth(sdev, 64); 857 858 return 0; 859 } 860 EXPORT_SYMBOL_GPL(hisi_sas_slave_configure); 861 862 void hisi_sas_scan_start(struct Scsi_Host *shost) 863 { 864 struct hisi_hba *hisi_hba = shost_priv(shost); 865 866 hisi_hba->hw->phys_init(hisi_hba); 867 } 868 EXPORT_SYMBOL_GPL(hisi_sas_scan_start); 869 870 int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time) 871 { 872 struct hisi_hba *hisi_hba = shost_priv(shost); 873 struct sas_ha_struct *sha = &hisi_hba->sha; 874 875 /* Wait for PHY up interrupt to occur */ 876 if (time < HZ) 877 return 0; 878 879 sas_drain_work(sha); 880 return 1; 881 } 882 EXPORT_SYMBOL_GPL(hisi_sas_scan_finished); 883 884 static void hisi_sas_phyup_work(struct work_struct *work) 885 { 886 struct hisi_sas_phy *phy = 887 container_of(work, typeof(*phy), works[HISI_PHYE_PHY_UP]); 888 struct hisi_hba *hisi_hba = phy->hisi_hba; 889 struct asd_sas_phy *sas_phy = &phy->sas_phy; 890 int phy_no = sas_phy->id; 891 892 if (phy->identify.target_port_protocols == SAS_PROTOCOL_SSP) 893 hisi_hba->hw->sl_notify_ssp(hisi_hba, phy_no); 894 hisi_sas_bytes_dmaed(hisi_hba, phy_no); 895 } 896 897 static void hisi_sas_linkreset_work(struct work_struct *work) 898 { 899 struct hisi_sas_phy *phy = 900 container_of(work, typeof(*phy), works[HISI_PHYE_LINK_RESET]); 901 struct asd_sas_phy *sas_phy = &phy->sas_phy; 902 903 hisi_sas_control_phy(sas_phy, PHY_FUNC_LINK_RESET, NULL); 904 } 905 906 static const work_func_t hisi_sas_phye_fns[HISI_PHYES_NUM] = { 907 [HISI_PHYE_PHY_UP] = hisi_sas_phyup_work, 908 [HISI_PHYE_LINK_RESET] = hisi_sas_linkreset_work, 909 }; 910 911 bool hisi_sas_notify_phy_event(struct hisi_sas_phy *phy, 912 enum hisi_sas_phy_event event) 913 { 914 struct hisi_hba *hisi_hba = phy->hisi_hba; 915 916 if (WARN_ON(event >= HISI_PHYES_NUM)) 917 return false; 918 919 return queue_work(hisi_hba->wq, &phy->works[event]); 920 } 921 EXPORT_SYMBOL_GPL(hisi_sas_notify_phy_event); 922 923 static void hisi_sas_wait_phyup_timedout(struct timer_list *t) 924 { 925 struct hisi_sas_phy *phy = from_timer(phy, t, timer); 926 struct hisi_hba *hisi_hba = phy->hisi_hba; 927 struct device *dev = hisi_hba->dev; 928 int phy_no = phy->sas_phy.id; 929 930 dev_warn(dev, "phy%d wait phyup timeout, issuing link reset\n", phy_no); 931 hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET); 932 } 933 934 void hisi_sas_phy_oob_ready(struct hisi_hba *hisi_hba, int phy_no) 935 { 936 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 937 struct device *dev = hisi_hba->dev; 938 939 if (!timer_pending(&phy->timer)) { 940 dev_dbg(dev, "phy%d OOB ready\n", phy_no); 941 phy->timer.expires = jiffies + HISI_SAS_WAIT_PHYUP_TIMEOUT * HZ; 942 add_timer(&phy->timer); 943 } 944 } 945 EXPORT_SYMBOL_GPL(hisi_sas_phy_oob_ready); 946 947 static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no) 948 { 949 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 950 struct asd_sas_phy *sas_phy = &phy->sas_phy; 951 int i; 952 953 phy->hisi_hba = hisi_hba; 954 phy->port = NULL; 955 phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS; 956 phy->maximum_linkrate = hisi_hba->hw->phy_get_max_linkrate(); 957 sas_phy->enabled = (phy_no < hisi_hba->n_phy) ? 1 : 0; 958 sas_phy->class = SAS; 959 sas_phy->iproto = SAS_PROTOCOL_ALL; 960 sas_phy->tproto = 0; 961 sas_phy->type = PHY_TYPE_PHYSICAL; 962 sas_phy->role = PHY_ROLE_INITIATOR; 963 sas_phy->oob_mode = OOB_NOT_CONNECTED; 964 sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN; 965 sas_phy->id = phy_no; 966 sas_phy->sas_addr = &hisi_hba->sas_addr[0]; 967 sas_phy->frame_rcvd = &phy->frame_rcvd[0]; 968 sas_phy->ha = (struct sas_ha_struct *)hisi_hba->shost->hostdata; 969 sas_phy->lldd_phy = phy; 970 971 for (i = 0; i < HISI_PHYES_NUM; i++) 972 INIT_WORK(&phy->works[i], hisi_sas_phye_fns[i]); 973 974 spin_lock_init(&phy->lock); 975 976 timer_setup(&phy->timer, hisi_sas_wait_phyup_timedout, 0); 977 } 978 979 static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy) 980 { 981 struct sas_ha_struct *sas_ha = sas_phy->ha; 982 struct hisi_hba *hisi_hba = sas_ha->lldd_ha; 983 struct hisi_sas_phy *phy = sas_phy->lldd_phy; 984 struct asd_sas_port *sas_port = sas_phy->port; 985 struct hisi_sas_port *port = to_hisi_sas_port(sas_port); 986 unsigned long flags; 987 988 if (!sas_port) 989 return; 990 991 spin_lock_irqsave(&hisi_hba->lock, flags); 992 port->port_attached = 1; 993 port->id = phy->port_id; 994 phy->port = port; 995 sas_port->lldd_port = port; 996 spin_unlock_irqrestore(&hisi_hba->lock, flags); 997 } 998 999 static void hisi_sas_do_release_task(struct hisi_hba *hisi_hba, struct sas_task *task, 1000 struct hisi_sas_slot *slot) 1001 { 1002 if (task) { 1003 unsigned long flags; 1004 struct task_status_struct *ts; 1005 1006 ts = &task->task_status; 1007 1008 ts->resp = SAS_TASK_COMPLETE; 1009 ts->stat = SAS_ABORTED_TASK; 1010 spin_lock_irqsave(&task->task_state_lock, flags); 1011 task->task_state_flags &= 1012 ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR); 1013 if (!slot->is_internal && task->task_proto != SAS_PROTOCOL_SMP) 1014 task->task_state_flags |= SAS_TASK_STATE_DONE; 1015 spin_unlock_irqrestore(&task->task_state_lock, flags); 1016 } 1017 1018 hisi_sas_slot_task_free(hisi_hba, task, slot); 1019 } 1020 1021 static void hisi_sas_release_task(struct hisi_hba *hisi_hba, 1022 struct domain_device *device) 1023 { 1024 struct hisi_sas_slot *slot, *slot2; 1025 struct hisi_sas_device *sas_dev = device->lldd_dev; 1026 1027 list_for_each_entry_safe(slot, slot2, &sas_dev->list, entry) 1028 hisi_sas_do_release_task(hisi_hba, slot->task, slot); 1029 } 1030 1031 void hisi_sas_release_tasks(struct hisi_hba *hisi_hba) 1032 { 1033 struct hisi_sas_device *sas_dev; 1034 struct domain_device *device; 1035 int i; 1036 1037 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 1038 sas_dev = &hisi_hba->devices[i]; 1039 device = sas_dev->sas_device; 1040 1041 if ((sas_dev->dev_type == SAS_PHY_UNUSED) || 1042 !device) 1043 continue; 1044 1045 hisi_sas_release_task(hisi_hba, device); 1046 } 1047 } 1048 EXPORT_SYMBOL_GPL(hisi_sas_release_tasks); 1049 1050 static void hisi_sas_dereg_device(struct hisi_hba *hisi_hba, 1051 struct domain_device *device) 1052 { 1053 if (hisi_hba->hw->dereg_device) 1054 hisi_hba->hw->dereg_device(hisi_hba, device); 1055 } 1056 1057 static void hisi_sas_dev_gone(struct domain_device *device) 1058 { 1059 struct hisi_sas_device *sas_dev = device->lldd_dev; 1060 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1061 struct device *dev = hisi_hba->dev; 1062 1063 dev_info(dev, "dev[%d:%x] is gone\n", 1064 sas_dev->device_id, sas_dev->dev_type); 1065 1066 if (!test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) { 1067 hisi_sas_internal_task_abort(hisi_hba, device, 1068 HISI_SAS_INT_ABT_DEV, 0); 1069 1070 hisi_sas_dereg_device(hisi_hba, device); 1071 1072 down(&hisi_hba->sem); 1073 hisi_hba->hw->clear_itct(hisi_hba, sas_dev); 1074 up(&hisi_hba->sem); 1075 device->lldd_dev = NULL; 1076 } 1077 1078 if (hisi_hba->hw->free_device) 1079 hisi_hba->hw->free_device(sas_dev); 1080 sas_dev->dev_type = SAS_PHY_UNUSED; 1081 } 1082 1083 static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags) 1084 { 1085 return hisi_sas_task_exec(task, gfp_flags, 0, NULL); 1086 } 1087 1088 static int hisi_sas_phy_set_linkrate(struct hisi_hba *hisi_hba, int phy_no, 1089 struct sas_phy_linkrates *r) 1090 { 1091 struct sas_phy_linkrates _r; 1092 1093 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 1094 struct asd_sas_phy *sas_phy = &phy->sas_phy; 1095 enum sas_linkrate min, max; 1096 1097 if (r->minimum_linkrate > SAS_LINK_RATE_1_5_GBPS) 1098 return -EINVAL; 1099 1100 if (r->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) { 1101 max = sas_phy->phy->maximum_linkrate; 1102 min = r->minimum_linkrate; 1103 } else if (r->minimum_linkrate == SAS_LINK_RATE_UNKNOWN) { 1104 max = r->maximum_linkrate; 1105 min = sas_phy->phy->minimum_linkrate; 1106 } else 1107 return -EINVAL; 1108 1109 _r.maximum_linkrate = max; 1110 _r.minimum_linkrate = min; 1111 1112 sas_phy->phy->maximum_linkrate = max; 1113 sas_phy->phy->minimum_linkrate = min; 1114 1115 hisi_hba->hw->phy_disable(hisi_hba, phy_no); 1116 msleep(100); 1117 hisi_hba->hw->phy_set_linkrate(hisi_hba, phy_no, &_r); 1118 hisi_hba->hw->phy_start(hisi_hba, phy_no); 1119 1120 return 0; 1121 } 1122 1123 static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func, 1124 void *funcdata) 1125 { 1126 struct sas_ha_struct *sas_ha = sas_phy->ha; 1127 struct hisi_hba *hisi_hba = sas_ha->lldd_ha; 1128 int phy_no = sas_phy->id; 1129 1130 switch (func) { 1131 case PHY_FUNC_HARD_RESET: 1132 hisi_hba->hw->phy_hard_reset(hisi_hba, phy_no); 1133 break; 1134 1135 case PHY_FUNC_LINK_RESET: 1136 hisi_hba->hw->phy_disable(hisi_hba, phy_no); 1137 msleep(100); 1138 hisi_hba->hw->phy_start(hisi_hba, phy_no); 1139 break; 1140 1141 case PHY_FUNC_DISABLE: 1142 hisi_hba->hw->phy_disable(hisi_hba, phy_no); 1143 break; 1144 1145 case PHY_FUNC_SET_LINK_RATE: 1146 return hisi_sas_phy_set_linkrate(hisi_hba, phy_no, funcdata); 1147 case PHY_FUNC_GET_EVENTS: 1148 if (hisi_hba->hw->get_events) { 1149 hisi_hba->hw->get_events(hisi_hba, phy_no); 1150 break; 1151 } 1152 /* fallthru */ 1153 case PHY_FUNC_RELEASE_SPINUP_HOLD: 1154 default: 1155 return -EOPNOTSUPP; 1156 } 1157 return 0; 1158 } 1159 1160 static void hisi_sas_task_done(struct sas_task *task) 1161 { 1162 del_timer(&task->slow_task->timer); 1163 complete(&task->slow_task->completion); 1164 } 1165 1166 static void hisi_sas_tmf_timedout(struct timer_list *t) 1167 { 1168 struct sas_task_slow *slow = from_timer(slow, t, timer); 1169 struct sas_task *task = slow->task; 1170 unsigned long flags; 1171 bool is_completed = true; 1172 1173 spin_lock_irqsave(&task->task_state_lock, flags); 1174 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { 1175 task->task_state_flags |= SAS_TASK_STATE_ABORTED; 1176 is_completed = false; 1177 } 1178 spin_unlock_irqrestore(&task->task_state_lock, flags); 1179 1180 if (!is_completed) 1181 complete(&task->slow_task->completion); 1182 } 1183 1184 #define TASK_TIMEOUT 20 1185 #define TASK_RETRY 3 1186 #define INTERNAL_ABORT_TIMEOUT 6 1187 static int hisi_sas_exec_internal_tmf_task(struct domain_device *device, 1188 void *parameter, u32 para_len, 1189 struct hisi_sas_tmf_task *tmf) 1190 { 1191 struct hisi_sas_device *sas_dev = device->lldd_dev; 1192 struct hisi_hba *hisi_hba = sas_dev->hisi_hba; 1193 struct device *dev = hisi_hba->dev; 1194 struct sas_task *task; 1195 int res, retry; 1196 1197 for (retry = 0; retry < TASK_RETRY; retry++) { 1198 task = sas_alloc_slow_task(GFP_KERNEL); 1199 if (!task) 1200 return -ENOMEM; 1201 1202 task->dev = device; 1203 task->task_proto = device->tproto; 1204 1205 if (dev_is_sata(device)) { 1206 task->ata_task.device_control_reg_update = 1; 1207 memcpy(&task->ata_task.fis, parameter, para_len); 1208 } else { 1209 memcpy(&task->ssp_task, parameter, para_len); 1210 } 1211 task->task_done = hisi_sas_task_done; 1212 1213 task->slow_task->timer.function = hisi_sas_tmf_timedout; 1214 task->slow_task->timer.expires = jiffies + TASK_TIMEOUT * HZ; 1215 add_timer(&task->slow_task->timer); 1216 1217 res = hisi_sas_task_exec(task, GFP_KERNEL, 1, tmf); 1218 1219 if (res) { 1220 del_timer(&task->slow_task->timer); 1221 dev_err(dev, "abort tmf: executing internal task failed: %d\n", 1222 res); 1223 goto ex_err; 1224 } 1225 1226 wait_for_completion(&task->slow_task->completion); 1227 res = TMF_RESP_FUNC_FAILED; 1228 /* Even TMF timed out, return direct. */ 1229 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { 1230 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { 1231 struct hisi_sas_slot *slot = task->lldd_task; 1232 1233 dev_err(dev, "abort tmf: TMF task timeout and not done\n"); 1234 if (slot) { 1235 struct hisi_sas_cq *cq = 1236 &hisi_hba->cq[slot->dlvry_queue]; 1237 /* 1238 * flush tasklet to avoid free'ing task 1239 * before using task in IO completion 1240 */ 1241 tasklet_kill(&cq->tasklet); 1242 slot->task = NULL; 1243 } 1244 1245 goto ex_err; 1246 } else 1247 dev_err(dev, "abort tmf: TMF task timeout\n"); 1248 } 1249 1250 if (task->task_status.resp == SAS_TASK_COMPLETE && 1251 task->task_status.stat == TMF_RESP_FUNC_COMPLETE) { 1252 res = TMF_RESP_FUNC_COMPLETE; 1253 break; 1254 } 1255 1256 if (task->task_status.resp == SAS_TASK_COMPLETE && 1257 task->task_status.stat == TMF_RESP_FUNC_SUCC) { 1258 res = TMF_RESP_FUNC_SUCC; 1259 break; 1260 } 1261 1262 if (task->task_status.resp == SAS_TASK_COMPLETE && 1263 task->task_status.stat == SAS_DATA_UNDERRUN) { 1264 /* no error, but return the number of bytes of 1265 * underrun 1266 */ 1267 dev_warn(dev, "abort tmf: task to dev %016llx " 1268 "resp: 0x%x sts 0x%x underrun\n", 1269 SAS_ADDR(device->sas_addr), 1270 task->task_status.resp, 1271 task->task_status.stat); 1272 res = task->task_status.residual; 1273 break; 1274 } 1275 1276 if (task->task_status.resp == SAS_TASK_COMPLETE && 1277 task->task_status.stat == SAS_DATA_OVERRUN) { 1278 dev_warn(dev, "abort tmf: blocked task error\n"); 1279 res = -EMSGSIZE; 1280 break; 1281 } 1282 1283 dev_warn(dev, "abort tmf: task to dev " 1284 "%016llx resp: 0x%x status 0x%x\n", 1285 SAS_ADDR(device->sas_addr), task->task_status.resp, 1286 task->task_status.stat); 1287 sas_free_task(task); 1288 task = NULL; 1289 } 1290 ex_err: 1291 if (retry == TASK_RETRY) 1292 dev_warn(dev, "abort tmf: executing internal task failed!\n"); 1293 sas_free_task(task); 1294 return res; 1295 } 1296 1297 static void hisi_sas_fill_ata_reset_cmd(struct ata_device *dev, 1298 bool reset, int pmp, u8 *fis) 1299 { 1300 struct ata_taskfile tf; 1301 1302 ata_tf_init(dev, &tf); 1303 if (reset) 1304 tf.ctl |= ATA_SRST; 1305 else 1306 tf.ctl &= ~ATA_SRST; 1307 tf.command = ATA_CMD_DEV_RESET; 1308 ata_tf_to_fis(&tf, pmp, 0, fis); 1309 } 1310 1311 static int hisi_sas_softreset_ata_disk(struct domain_device *device) 1312 { 1313 u8 fis[20] = {0}; 1314 struct ata_port *ap = device->sata_dev.ap; 1315 struct ata_link *link; 1316 int rc = TMF_RESP_FUNC_FAILED; 1317 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1318 struct device *dev = hisi_hba->dev; 1319 int s = sizeof(struct host_to_dev_fis); 1320 1321 ata_for_each_link(link, ap, EDGE) { 1322 int pmp = sata_srst_pmp(link); 1323 1324 hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis); 1325 rc = hisi_sas_exec_internal_tmf_task(device, fis, s, NULL); 1326 if (rc != TMF_RESP_FUNC_COMPLETE) 1327 break; 1328 } 1329 1330 if (rc == TMF_RESP_FUNC_COMPLETE) { 1331 ata_for_each_link(link, ap, EDGE) { 1332 int pmp = sata_srst_pmp(link); 1333 1334 hisi_sas_fill_ata_reset_cmd(link->device, 0, pmp, fis); 1335 rc = hisi_sas_exec_internal_tmf_task(device, fis, 1336 s, NULL); 1337 if (rc != TMF_RESP_FUNC_COMPLETE) 1338 dev_err(dev, "ata disk de-reset failed\n"); 1339 } 1340 } else { 1341 dev_err(dev, "ata disk reset failed\n"); 1342 } 1343 1344 if (rc == TMF_RESP_FUNC_COMPLETE) 1345 hisi_sas_release_task(hisi_hba, device); 1346 1347 return rc; 1348 } 1349 1350 static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device, 1351 u8 *lun, struct hisi_sas_tmf_task *tmf) 1352 { 1353 struct sas_ssp_task ssp_task; 1354 1355 if (!(device->tproto & SAS_PROTOCOL_SSP)) 1356 return TMF_RESP_FUNC_ESUPP; 1357 1358 memcpy(ssp_task.LUN, lun, 8); 1359 1360 return hisi_sas_exec_internal_tmf_task(device, &ssp_task, 1361 sizeof(ssp_task), tmf); 1362 } 1363 1364 static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba) 1365 { 1366 u32 state = hisi_hba->hw->get_phys_state(hisi_hba); 1367 int i; 1368 1369 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 1370 struct hisi_sas_device *sas_dev = &hisi_hba->devices[i]; 1371 struct domain_device *device = sas_dev->sas_device; 1372 struct asd_sas_port *sas_port; 1373 struct hisi_sas_port *port; 1374 struct hisi_sas_phy *phy = NULL; 1375 struct asd_sas_phy *sas_phy; 1376 1377 if ((sas_dev->dev_type == SAS_PHY_UNUSED) 1378 || !device || !device->port) 1379 continue; 1380 1381 sas_port = device->port; 1382 port = to_hisi_sas_port(sas_port); 1383 1384 list_for_each_entry(sas_phy, &sas_port->phy_list, port_phy_el) 1385 if (state & BIT(sas_phy->id)) { 1386 phy = sas_phy->lldd_phy; 1387 break; 1388 } 1389 1390 if (phy) { 1391 port->id = phy->port_id; 1392 1393 /* Update linkrate of directly attached device. */ 1394 if (!device->parent) 1395 device->linkrate = phy->sas_phy.linkrate; 1396 1397 hisi_hba->hw->setup_itct(hisi_hba, sas_dev); 1398 } else 1399 port->id = 0xff; 1400 } 1401 } 1402 1403 static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 old_state, 1404 u32 state) 1405 { 1406 struct sas_ha_struct *sas_ha = &hisi_hba->sha; 1407 struct asd_sas_port *_sas_port = NULL; 1408 int phy_no; 1409 1410 for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) { 1411 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 1412 struct asd_sas_phy *sas_phy = &phy->sas_phy; 1413 struct asd_sas_port *sas_port = sas_phy->port; 1414 bool do_port_check = !!(_sas_port != sas_port); 1415 1416 if (!sas_phy->phy->enabled) 1417 continue; 1418 1419 /* Report PHY state change to libsas */ 1420 if (state & BIT(phy_no)) { 1421 if (do_port_check && sas_port && sas_port->port_dev) { 1422 struct domain_device *dev = sas_port->port_dev; 1423 1424 _sas_port = sas_port; 1425 1426 if (DEV_IS_EXPANDER(dev->dev_type)) 1427 sas_ha->notify_port_event(sas_phy, 1428 PORTE_BROADCAST_RCVD); 1429 } 1430 } else if (old_state & (1 << phy_no)) 1431 /* PHY down but was up before */ 1432 hisi_sas_phy_down(hisi_hba, phy_no, 0); 1433 1434 } 1435 } 1436 1437 static void hisi_sas_reset_init_all_devices(struct hisi_hba *hisi_hba) 1438 { 1439 struct hisi_sas_device *sas_dev; 1440 struct domain_device *device; 1441 int i; 1442 1443 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 1444 sas_dev = &hisi_hba->devices[i]; 1445 device = sas_dev->sas_device; 1446 1447 if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device) 1448 continue; 1449 1450 hisi_sas_init_device(device); 1451 } 1452 } 1453 1454 static void hisi_sas_send_ata_reset_each_phy(struct hisi_hba *hisi_hba, 1455 struct asd_sas_port *sas_port, 1456 struct domain_device *device) 1457 { 1458 struct hisi_sas_tmf_task tmf_task = { .force_phy = 1 }; 1459 struct ata_port *ap = device->sata_dev.ap; 1460 struct device *dev = hisi_hba->dev; 1461 int s = sizeof(struct host_to_dev_fis); 1462 int rc = TMF_RESP_FUNC_FAILED; 1463 struct asd_sas_phy *sas_phy; 1464 struct ata_link *link; 1465 u8 fis[20] = {0}; 1466 u32 state; 1467 1468 state = hisi_hba->hw->get_phys_state(hisi_hba); 1469 list_for_each_entry(sas_phy, &sas_port->phy_list, port_phy_el) { 1470 if (!(state & BIT(sas_phy->id))) 1471 continue; 1472 1473 ata_for_each_link(link, ap, EDGE) { 1474 int pmp = sata_srst_pmp(link); 1475 1476 tmf_task.phy_id = sas_phy->id; 1477 hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis); 1478 rc = hisi_sas_exec_internal_tmf_task(device, fis, s, 1479 &tmf_task); 1480 if (rc != TMF_RESP_FUNC_COMPLETE) { 1481 dev_err(dev, "phy%d ata reset failed rc=%d\n", 1482 sas_phy->id, rc); 1483 break; 1484 } 1485 } 1486 } 1487 } 1488 1489 static void hisi_sas_terminate_stp_reject(struct hisi_hba *hisi_hba) 1490 { 1491 struct device *dev = hisi_hba->dev; 1492 int port_no, rc, i; 1493 1494 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 1495 struct hisi_sas_device *sas_dev = &hisi_hba->devices[i]; 1496 struct domain_device *device = sas_dev->sas_device; 1497 1498 if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device) 1499 continue; 1500 1501 rc = hisi_sas_internal_task_abort(hisi_hba, device, 1502 HISI_SAS_INT_ABT_DEV, 0); 1503 if (rc < 0) 1504 dev_err(dev, "STP reject: abort dev failed %d\n", rc); 1505 } 1506 1507 for (port_no = 0; port_no < hisi_hba->n_phy; port_no++) { 1508 struct hisi_sas_port *port = &hisi_hba->port[port_no]; 1509 struct asd_sas_port *sas_port = &port->sas_port; 1510 struct domain_device *port_dev = sas_port->port_dev; 1511 struct domain_device *device; 1512 1513 if (!port_dev || !DEV_IS_EXPANDER(port_dev->dev_type)) 1514 continue; 1515 1516 /* Try to find a SATA device */ 1517 list_for_each_entry(device, &sas_port->dev_list, 1518 dev_list_node) { 1519 if (dev_is_sata(device)) { 1520 hisi_sas_send_ata_reset_each_phy(hisi_hba, 1521 sas_port, 1522 device); 1523 break; 1524 } 1525 } 1526 } 1527 } 1528 1529 void hisi_sas_controller_reset_prepare(struct hisi_hba *hisi_hba) 1530 { 1531 struct Scsi_Host *shost = hisi_hba->shost; 1532 1533 down(&hisi_hba->sem); 1534 hisi_hba->phy_state = hisi_hba->hw->get_phys_state(hisi_hba); 1535 1536 scsi_block_requests(shost); 1537 hisi_hba->hw->wait_cmds_complete_timeout(hisi_hba, 100, 5000); 1538 1539 if (timer_pending(&hisi_hba->timer)) 1540 del_timer_sync(&hisi_hba->timer); 1541 1542 set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); 1543 } 1544 EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_prepare); 1545 1546 void hisi_sas_controller_reset_done(struct hisi_hba *hisi_hba) 1547 { 1548 struct Scsi_Host *shost = hisi_hba->shost; 1549 u32 state; 1550 1551 /* Init and wait for PHYs to come up and all libsas event finished. */ 1552 hisi_hba->hw->phys_init(hisi_hba); 1553 msleep(1000); 1554 hisi_sas_refresh_port_id(hisi_hba); 1555 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); 1556 up(&hisi_hba->sem); 1557 1558 if (hisi_hba->reject_stp_links_msk) 1559 hisi_sas_terminate_stp_reject(hisi_hba); 1560 hisi_sas_reset_init_all_devices(hisi_hba); 1561 scsi_unblock_requests(shost); 1562 clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags); 1563 1564 state = hisi_hba->hw->get_phys_state(hisi_hba); 1565 hisi_sas_rescan_topology(hisi_hba, hisi_hba->phy_state, state); 1566 } 1567 EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_done); 1568 1569 static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba) 1570 { 1571 struct device *dev = hisi_hba->dev; 1572 struct Scsi_Host *shost = hisi_hba->shost; 1573 int rc; 1574 1575 if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct) 1576 queue_work(hisi_hba->wq, &hisi_hba->debugfs_work); 1577 1578 if (!hisi_hba->hw->soft_reset) 1579 return -1; 1580 1581 if (test_and_set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) 1582 return -1; 1583 1584 dev_info(dev, "controller resetting...\n"); 1585 hisi_sas_controller_reset_prepare(hisi_hba); 1586 1587 rc = hisi_hba->hw->soft_reset(hisi_hba); 1588 if (rc) { 1589 dev_warn(dev, "controller reset failed (%d)\n", rc); 1590 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); 1591 up(&hisi_hba->sem); 1592 scsi_unblock_requests(shost); 1593 clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags); 1594 return rc; 1595 } 1596 1597 hisi_sas_controller_reset_done(hisi_hba); 1598 dev_info(dev, "controller reset complete\n"); 1599 1600 return 0; 1601 } 1602 1603 static int hisi_sas_abort_task(struct sas_task *task) 1604 { 1605 struct scsi_lun lun; 1606 struct hisi_sas_tmf_task tmf_task; 1607 struct domain_device *device = task->dev; 1608 struct hisi_sas_device *sas_dev = device->lldd_dev; 1609 struct hisi_hba *hisi_hba; 1610 struct device *dev; 1611 int rc = TMF_RESP_FUNC_FAILED; 1612 unsigned long flags; 1613 1614 if (!sas_dev) 1615 return TMF_RESP_FUNC_FAILED; 1616 1617 hisi_hba = dev_to_hisi_hba(task->dev); 1618 dev = hisi_hba->dev; 1619 1620 spin_lock_irqsave(&task->task_state_lock, flags); 1621 if (task->task_state_flags & SAS_TASK_STATE_DONE) { 1622 struct hisi_sas_slot *slot = task->lldd_task; 1623 struct hisi_sas_cq *cq; 1624 1625 if (slot) { 1626 /* 1627 * flush tasklet to avoid free'ing task 1628 * before using task in IO completion 1629 */ 1630 cq = &hisi_hba->cq[slot->dlvry_queue]; 1631 tasklet_kill(&cq->tasklet); 1632 } 1633 spin_unlock_irqrestore(&task->task_state_lock, flags); 1634 rc = TMF_RESP_FUNC_COMPLETE; 1635 goto out; 1636 } 1637 task->task_state_flags |= SAS_TASK_STATE_ABORTED; 1638 spin_unlock_irqrestore(&task->task_state_lock, flags); 1639 1640 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) { 1641 struct scsi_cmnd *cmnd = task->uldd_task; 1642 struct hisi_sas_slot *slot = task->lldd_task; 1643 u16 tag = slot->idx; 1644 int rc2; 1645 1646 int_to_scsilun(cmnd->device->lun, &lun); 1647 tmf_task.tmf = TMF_ABORT_TASK; 1648 tmf_task.tag_of_task_to_be_managed = tag; 1649 1650 rc = hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun, 1651 &tmf_task); 1652 1653 rc2 = hisi_sas_internal_task_abort(hisi_hba, device, 1654 HISI_SAS_INT_ABT_CMD, tag); 1655 if (rc2 < 0) { 1656 dev_err(dev, "abort task: internal abort (%d)\n", rc2); 1657 return TMF_RESP_FUNC_FAILED; 1658 } 1659 1660 /* 1661 * If the TMF finds that the IO is not in the device and also 1662 * the internal abort does not succeed, then it is safe to 1663 * free the slot. 1664 * Note: if the internal abort succeeds then the slot 1665 * will have already been completed 1666 */ 1667 if (rc == TMF_RESP_FUNC_COMPLETE && rc2 != TMF_RESP_FUNC_SUCC) { 1668 if (task->lldd_task) 1669 hisi_sas_do_release_task(hisi_hba, task, slot); 1670 } 1671 } else if (task->task_proto & SAS_PROTOCOL_SATA || 1672 task->task_proto & SAS_PROTOCOL_STP) { 1673 if (task->dev->dev_type == SAS_SATA_DEV) { 1674 rc = hisi_sas_internal_task_abort(hisi_hba, device, 1675 HISI_SAS_INT_ABT_DEV, 1676 0); 1677 if (rc < 0) { 1678 dev_err(dev, "abort task: internal abort failed\n"); 1679 goto out; 1680 } 1681 hisi_sas_dereg_device(hisi_hba, device); 1682 rc = hisi_sas_softreset_ata_disk(device); 1683 } 1684 } else if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SMP) { 1685 /* SMP */ 1686 struct hisi_sas_slot *slot = task->lldd_task; 1687 u32 tag = slot->idx; 1688 struct hisi_sas_cq *cq = &hisi_hba->cq[slot->dlvry_queue]; 1689 1690 rc = hisi_sas_internal_task_abort(hisi_hba, device, 1691 HISI_SAS_INT_ABT_CMD, tag); 1692 if (((rc < 0) || (rc == TMF_RESP_FUNC_FAILED)) && 1693 task->lldd_task) { 1694 /* 1695 * flush tasklet to avoid free'ing task 1696 * before using task in IO completion 1697 */ 1698 tasklet_kill(&cq->tasklet); 1699 slot->task = NULL; 1700 } 1701 } 1702 1703 out: 1704 if (rc != TMF_RESP_FUNC_COMPLETE) 1705 dev_notice(dev, "abort task: rc=%d\n", rc); 1706 return rc; 1707 } 1708 1709 static int hisi_sas_abort_task_set(struct domain_device *device, u8 *lun) 1710 { 1711 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1712 struct device *dev = hisi_hba->dev; 1713 struct hisi_sas_tmf_task tmf_task; 1714 int rc = TMF_RESP_FUNC_FAILED; 1715 1716 rc = hisi_sas_internal_task_abort(hisi_hba, device, 1717 HISI_SAS_INT_ABT_DEV, 0); 1718 if (rc < 0) { 1719 dev_err(dev, "abort task set: internal abort rc=%d\n", rc); 1720 return TMF_RESP_FUNC_FAILED; 1721 } 1722 hisi_sas_dereg_device(hisi_hba, device); 1723 1724 tmf_task.tmf = TMF_ABORT_TASK_SET; 1725 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task); 1726 1727 if (rc == TMF_RESP_FUNC_COMPLETE) 1728 hisi_sas_release_task(hisi_hba, device); 1729 1730 return rc; 1731 } 1732 1733 static int hisi_sas_clear_aca(struct domain_device *device, u8 *lun) 1734 { 1735 struct hisi_sas_tmf_task tmf_task; 1736 int rc; 1737 1738 tmf_task.tmf = TMF_CLEAR_ACA; 1739 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task); 1740 1741 return rc; 1742 } 1743 1744 static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device) 1745 { 1746 struct sas_phy *local_phy = sas_get_local_phy(device); 1747 struct hisi_sas_device *sas_dev = device->lldd_dev; 1748 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1749 struct sas_ha_struct *sas_ha = &hisi_hba->sha; 1750 struct asd_sas_phy *sas_phy = sas_ha->sas_phy[local_phy->number]; 1751 struct hisi_sas_phy *phy = container_of(sas_phy, 1752 struct hisi_sas_phy, sas_phy); 1753 DECLARE_COMPLETION_ONSTACK(phyreset); 1754 int rc, reset_type; 1755 1756 if (scsi_is_sas_phy_local(local_phy)) { 1757 phy->in_reset = 1; 1758 phy->reset_completion = &phyreset; 1759 } 1760 1761 reset_type = (sas_dev->dev_status == HISI_SAS_DEV_INIT || 1762 !dev_is_sata(device)) ? 1 : 0; 1763 1764 rc = sas_phy_reset(local_phy, reset_type); 1765 sas_put_local_phy(local_phy); 1766 1767 if (scsi_is_sas_phy_local(local_phy)) { 1768 int ret = wait_for_completion_timeout(&phyreset, 2 * HZ); 1769 unsigned long flags; 1770 1771 spin_lock_irqsave(&phy->lock, flags); 1772 phy->reset_completion = NULL; 1773 phy->in_reset = 0; 1774 spin_unlock_irqrestore(&phy->lock, flags); 1775 1776 /* report PHY down if timed out */ 1777 if (!ret) 1778 hisi_sas_phy_down(hisi_hba, sas_phy->id, 0); 1779 } else if (sas_dev->dev_status != HISI_SAS_DEV_INIT) { 1780 /* 1781 * If in init state, we rely on caller to wait for link to be 1782 * ready; otherwise, delay. 1783 */ 1784 msleep(2000); 1785 } 1786 1787 return rc; 1788 } 1789 1790 static int hisi_sas_I_T_nexus_reset(struct domain_device *device) 1791 { 1792 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1793 struct device *dev = hisi_hba->dev; 1794 int rc; 1795 1796 rc = hisi_sas_internal_task_abort(hisi_hba, device, 1797 HISI_SAS_INT_ABT_DEV, 0); 1798 if (rc < 0) { 1799 dev_err(dev, "I_T nexus reset: internal abort (%d)\n", rc); 1800 return TMF_RESP_FUNC_FAILED; 1801 } 1802 hisi_sas_dereg_device(hisi_hba, device); 1803 1804 if (dev_is_sata(device)) { 1805 rc = hisi_sas_softreset_ata_disk(device); 1806 if (rc) 1807 return TMF_RESP_FUNC_FAILED; 1808 } 1809 1810 rc = hisi_sas_debug_I_T_nexus_reset(device); 1811 1812 if ((rc == TMF_RESP_FUNC_COMPLETE) || (rc == -ENODEV)) 1813 hisi_sas_release_task(hisi_hba, device); 1814 1815 return rc; 1816 } 1817 1818 static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun) 1819 { 1820 struct hisi_sas_device *sas_dev = device->lldd_dev; 1821 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1822 struct device *dev = hisi_hba->dev; 1823 int rc = TMF_RESP_FUNC_FAILED; 1824 1825 if (dev_is_sata(device)) { 1826 struct sas_phy *phy; 1827 1828 /* Clear internal IO and then hardreset */ 1829 rc = hisi_sas_internal_task_abort(hisi_hba, device, 1830 HISI_SAS_INT_ABT_DEV, 0); 1831 if (rc < 0) { 1832 dev_err(dev, "lu_reset: internal abort failed\n"); 1833 goto out; 1834 } 1835 hisi_sas_dereg_device(hisi_hba, device); 1836 1837 phy = sas_get_local_phy(device); 1838 1839 rc = sas_phy_reset(phy, 1); 1840 1841 if (rc == 0) 1842 hisi_sas_release_task(hisi_hba, device); 1843 sas_put_local_phy(phy); 1844 } else { 1845 struct hisi_sas_tmf_task tmf_task = { .tmf = TMF_LU_RESET }; 1846 1847 rc = hisi_sas_internal_task_abort(hisi_hba, device, 1848 HISI_SAS_INT_ABT_DEV, 0); 1849 if (rc < 0) { 1850 dev_err(dev, "lu_reset: internal abort failed\n"); 1851 goto out; 1852 } 1853 hisi_sas_dereg_device(hisi_hba, device); 1854 1855 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task); 1856 if (rc == TMF_RESP_FUNC_COMPLETE) 1857 hisi_sas_release_task(hisi_hba, device); 1858 } 1859 out: 1860 if (rc != TMF_RESP_FUNC_COMPLETE) 1861 dev_err(dev, "lu_reset: for device[%d]:rc= %d\n", 1862 sas_dev->device_id, rc); 1863 return rc; 1864 } 1865 1866 static int hisi_sas_clear_nexus_ha(struct sas_ha_struct *sas_ha) 1867 { 1868 struct hisi_hba *hisi_hba = sas_ha->lldd_ha; 1869 struct device *dev = hisi_hba->dev; 1870 HISI_SAS_DECLARE_RST_WORK_ON_STACK(r); 1871 int rc, i; 1872 1873 queue_work(hisi_hba->wq, &r.work); 1874 wait_for_completion(r.completion); 1875 if (!r.done) 1876 return TMF_RESP_FUNC_FAILED; 1877 1878 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 1879 struct hisi_sas_device *sas_dev = &hisi_hba->devices[i]; 1880 struct domain_device *device = sas_dev->sas_device; 1881 1882 if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device || 1883 DEV_IS_EXPANDER(device->dev_type)) 1884 continue; 1885 1886 rc = hisi_sas_debug_I_T_nexus_reset(device); 1887 if (rc != TMF_RESP_FUNC_COMPLETE) 1888 dev_info(dev, "clear nexus ha: for device[%d] rc=%d\n", 1889 sas_dev->device_id, rc); 1890 } 1891 1892 hisi_sas_release_tasks(hisi_hba); 1893 1894 return TMF_RESP_FUNC_COMPLETE; 1895 } 1896 1897 static int hisi_sas_query_task(struct sas_task *task) 1898 { 1899 struct scsi_lun lun; 1900 struct hisi_sas_tmf_task tmf_task; 1901 int rc = TMF_RESP_FUNC_FAILED; 1902 1903 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) { 1904 struct scsi_cmnd *cmnd = task->uldd_task; 1905 struct domain_device *device = task->dev; 1906 struct hisi_sas_slot *slot = task->lldd_task; 1907 u32 tag = slot->idx; 1908 1909 int_to_scsilun(cmnd->device->lun, &lun); 1910 tmf_task.tmf = TMF_QUERY_TASK; 1911 tmf_task.tag_of_task_to_be_managed = tag; 1912 1913 rc = hisi_sas_debug_issue_ssp_tmf(device, 1914 lun.scsi_lun, 1915 &tmf_task); 1916 switch (rc) { 1917 /* The task is still in Lun, release it then */ 1918 case TMF_RESP_FUNC_SUCC: 1919 /* The task is not in Lun or failed, reset the phy */ 1920 case TMF_RESP_FUNC_FAILED: 1921 case TMF_RESP_FUNC_COMPLETE: 1922 break; 1923 default: 1924 rc = TMF_RESP_FUNC_FAILED; 1925 break; 1926 } 1927 } 1928 return rc; 1929 } 1930 1931 static int 1932 hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id, 1933 struct sas_task *task, int abort_flag, 1934 int task_tag, struct hisi_sas_dq *dq) 1935 { 1936 struct domain_device *device = task->dev; 1937 struct hisi_sas_device *sas_dev = device->lldd_dev; 1938 struct device *dev = hisi_hba->dev; 1939 struct hisi_sas_port *port; 1940 struct hisi_sas_slot *slot; 1941 struct asd_sas_port *sas_port = device->port; 1942 struct hisi_sas_cmd_hdr *cmd_hdr_base; 1943 int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx; 1944 unsigned long flags, flags_dq = 0; 1945 int wr_q_index; 1946 1947 if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags))) 1948 return -EINVAL; 1949 1950 if (!device->port) 1951 return -1; 1952 1953 port = to_hisi_sas_port(sas_port); 1954 1955 /* simply get a slot and send abort command */ 1956 rc = hisi_sas_slot_index_alloc(hisi_hba, NULL); 1957 if (rc < 0) 1958 goto err_out; 1959 1960 slot_idx = rc; 1961 slot = &hisi_hba->slot_info[slot_idx]; 1962 1963 spin_lock_irqsave(&dq->lock, flags_dq); 1964 wr_q_index = hisi_hba->hw->get_free_slot(hisi_hba, dq); 1965 if (wr_q_index < 0) { 1966 spin_unlock_irqrestore(&dq->lock, flags_dq); 1967 rc = -EAGAIN; 1968 goto err_out_tag; 1969 } 1970 list_add_tail(&slot->delivery, &dq->list); 1971 spin_unlock_irqrestore(&dq->lock, flags_dq); 1972 spin_lock_irqsave(&sas_dev->lock, flags); 1973 list_add_tail(&slot->entry, &sas_dev->list); 1974 spin_unlock_irqrestore(&sas_dev->lock, flags); 1975 1976 dlvry_queue = dq->id; 1977 dlvry_queue_slot = wr_q_index; 1978 1979 slot->device_id = sas_dev->device_id; 1980 slot->n_elem = n_elem; 1981 slot->dlvry_queue = dlvry_queue; 1982 slot->dlvry_queue_slot = dlvry_queue_slot; 1983 cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue]; 1984 slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot]; 1985 slot->task = task; 1986 slot->port = port; 1987 slot->is_internal = true; 1988 task->lldd_task = slot; 1989 1990 memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr)); 1991 memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ); 1992 memset(hisi_sas_status_buf_addr_mem(slot), 0, HISI_SAS_STATUS_BUF_SZ); 1993 1994 hisi_sas_task_prep_abort(hisi_hba, slot, device_id, 1995 abort_flag, task_tag); 1996 1997 spin_lock_irqsave(&task->task_state_lock, flags); 1998 task->task_state_flags |= SAS_TASK_AT_INITIATOR; 1999 spin_unlock_irqrestore(&task->task_state_lock, flags); 2000 WRITE_ONCE(slot->ready, 1); 2001 /* send abort command to the chip */ 2002 spin_lock_irqsave(&dq->lock, flags); 2003 hisi_hba->hw->start_delivery(dq); 2004 spin_unlock_irqrestore(&dq->lock, flags); 2005 2006 return 0; 2007 2008 err_out_tag: 2009 hisi_sas_slot_index_free(hisi_hba, slot_idx); 2010 err_out: 2011 dev_err(dev, "internal abort task prep: failed[%d]!\n", rc); 2012 2013 return rc; 2014 } 2015 2016 /** 2017 * _hisi_sas_internal_task_abort -- execute an internal 2018 * abort command for single IO command or a device 2019 * @hisi_hba: host controller struct 2020 * @device: domain device 2021 * @abort_flag: mode of operation, device or single IO 2022 * @tag: tag of IO to be aborted (only relevant to single 2023 * IO mode) 2024 * @dq: delivery queue for this internal abort command 2025 */ 2026 static int 2027 _hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba, 2028 struct domain_device *device, int abort_flag, 2029 int tag, struct hisi_sas_dq *dq) 2030 { 2031 struct sas_task *task; 2032 struct hisi_sas_device *sas_dev = device->lldd_dev; 2033 struct device *dev = hisi_hba->dev; 2034 int res; 2035 2036 /* 2037 * The interface is not realized means this HW don't support internal 2038 * abort, or don't need to do internal abort. Then here, we return 2039 * TMF_RESP_FUNC_FAILED and let other steps go on, which depends that 2040 * the internal abort has been executed and returned CQ. 2041 */ 2042 if (!hisi_hba->hw->prep_abort) 2043 return TMF_RESP_FUNC_FAILED; 2044 2045 task = sas_alloc_slow_task(GFP_KERNEL); 2046 if (!task) 2047 return -ENOMEM; 2048 2049 task->dev = device; 2050 task->task_proto = device->tproto; 2051 task->task_done = hisi_sas_task_done; 2052 task->slow_task->timer.function = hisi_sas_tmf_timedout; 2053 task->slow_task->timer.expires = jiffies + INTERNAL_ABORT_TIMEOUT * HZ; 2054 add_timer(&task->slow_task->timer); 2055 2056 res = hisi_sas_internal_abort_task_exec(hisi_hba, sas_dev->device_id, 2057 task, abort_flag, tag, dq); 2058 if (res) { 2059 del_timer(&task->slow_task->timer); 2060 dev_err(dev, "internal task abort: executing internal task failed: %d\n", 2061 res); 2062 goto exit; 2063 } 2064 wait_for_completion(&task->slow_task->completion); 2065 res = TMF_RESP_FUNC_FAILED; 2066 2067 /* Internal abort timed out */ 2068 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { 2069 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { 2070 struct hisi_sas_slot *slot = task->lldd_task; 2071 2072 if (slot) { 2073 struct hisi_sas_cq *cq = 2074 &hisi_hba->cq[slot->dlvry_queue]; 2075 /* 2076 * flush tasklet to avoid free'ing task 2077 * before using task in IO completion 2078 */ 2079 tasklet_kill(&cq->tasklet); 2080 slot->task = NULL; 2081 } 2082 dev_err(dev, "internal task abort: timeout and not done.\n"); 2083 2084 res = -EIO; 2085 goto exit; 2086 } else 2087 dev_err(dev, "internal task abort: timeout.\n"); 2088 } 2089 2090 if (task->task_status.resp == SAS_TASK_COMPLETE && 2091 task->task_status.stat == TMF_RESP_FUNC_COMPLETE) { 2092 res = TMF_RESP_FUNC_COMPLETE; 2093 goto exit; 2094 } 2095 2096 if (task->task_status.resp == SAS_TASK_COMPLETE && 2097 task->task_status.stat == TMF_RESP_FUNC_SUCC) { 2098 res = TMF_RESP_FUNC_SUCC; 2099 goto exit; 2100 } 2101 2102 exit: 2103 dev_dbg(dev, "internal task abort: task to dev %016llx task=%p " 2104 "resp: 0x%x sts 0x%x\n", 2105 SAS_ADDR(device->sas_addr), 2106 task, 2107 task->task_status.resp, /* 0 is complete, -1 is undelivered */ 2108 task->task_status.stat); 2109 sas_free_task(task); 2110 2111 return res; 2112 } 2113 2114 static int 2115 hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba, 2116 struct domain_device *device, 2117 int abort_flag, int tag) 2118 { 2119 struct hisi_sas_slot *slot; 2120 struct device *dev = hisi_hba->dev; 2121 struct hisi_sas_dq *dq; 2122 int i, rc; 2123 2124 switch (abort_flag) { 2125 case HISI_SAS_INT_ABT_CMD: 2126 slot = &hisi_hba->slot_info[tag]; 2127 dq = &hisi_hba->dq[slot->dlvry_queue]; 2128 return _hisi_sas_internal_task_abort(hisi_hba, device, 2129 abort_flag, tag, dq); 2130 case HISI_SAS_INT_ABT_DEV: 2131 for (i = 0; i < hisi_hba->cq_nvecs; i++) { 2132 struct hisi_sas_cq *cq = &hisi_hba->cq[i]; 2133 const struct cpumask *mask = cq->pci_irq_mask; 2134 2135 if (mask && !cpumask_intersects(cpu_online_mask, mask)) 2136 continue; 2137 dq = &hisi_hba->dq[i]; 2138 rc = _hisi_sas_internal_task_abort(hisi_hba, device, 2139 abort_flag, tag, 2140 dq); 2141 if (rc) 2142 return rc; 2143 } 2144 break; 2145 default: 2146 dev_err(dev, "Unrecognised internal abort flag (%d)\n", 2147 abort_flag); 2148 return -EINVAL; 2149 } 2150 2151 return 0; 2152 } 2153 2154 static void hisi_sas_port_formed(struct asd_sas_phy *sas_phy) 2155 { 2156 hisi_sas_port_notify_formed(sas_phy); 2157 } 2158 2159 static int hisi_sas_write_gpio(struct sas_ha_struct *sha, u8 reg_type, 2160 u8 reg_index, u8 reg_count, u8 *write_data) 2161 { 2162 struct hisi_hba *hisi_hba = sha->lldd_ha; 2163 2164 if (!hisi_hba->hw->write_gpio) 2165 return -EOPNOTSUPP; 2166 2167 return hisi_hba->hw->write_gpio(hisi_hba, reg_type, 2168 reg_index, reg_count, write_data); 2169 } 2170 2171 static void hisi_sas_phy_disconnected(struct hisi_sas_phy *phy) 2172 { 2173 struct asd_sas_phy *sas_phy = &phy->sas_phy; 2174 struct sas_phy *sphy = sas_phy->phy; 2175 struct sas_phy_data *d = sphy->hostdata; 2176 2177 phy->phy_attached = 0; 2178 phy->phy_type = 0; 2179 phy->port = NULL; 2180 2181 if (d->enable) 2182 sphy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN; 2183 else 2184 sphy->negotiated_linkrate = SAS_PHY_DISABLED; 2185 } 2186 2187 void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy) 2188 { 2189 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 2190 struct asd_sas_phy *sas_phy = &phy->sas_phy; 2191 struct sas_ha_struct *sas_ha = &hisi_hba->sha; 2192 struct device *dev = hisi_hba->dev; 2193 2194 if (rdy) { 2195 /* Phy down but ready */ 2196 hisi_sas_bytes_dmaed(hisi_hba, phy_no); 2197 hisi_sas_port_notify_formed(sas_phy); 2198 } else { 2199 struct hisi_sas_port *port = phy->port; 2200 2201 if (test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags) || 2202 phy->in_reset) { 2203 dev_info(dev, "ignore flutter phy%d down\n", phy_no); 2204 return; 2205 } 2206 /* Phy down and not ready */ 2207 sas_ha->notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL); 2208 sas_phy_disconnected(sas_phy); 2209 2210 if (port) { 2211 if (phy->phy_type & PORT_TYPE_SAS) { 2212 int port_id = port->id; 2213 2214 if (!hisi_hba->hw->get_wideport_bitmap(hisi_hba, 2215 port_id)) 2216 port->port_attached = 0; 2217 } else if (phy->phy_type & PORT_TYPE_SATA) 2218 port->port_attached = 0; 2219 } 2220 hisi_sas_phy_disconnected(phy); 2221 } 2222 } 2223 EXPORT_SYMBOL_GPL(hisi_sas_phy_down); 2224 2225 void hisi_sas_kill_tasklets(struct hisi_hba *hisi_hba) 2226 { 2227 int i; 2228 2229 for (i = 0; i < hisi_hba->cq_nvecs; i++) { 2230 struct hisi_sas_cq *cq = &hisi_hba->cq[i]; 2231 2232 tasklet_kill(&cq->tasklet); 2233 } 2234 } 2235 EXPORT_SYMBOL_GPL(hisi_sas_kill_tasklets); 2236 2237 struct scsi_transport_template *hisi_sas_stt; 2238 EXPORT_SYMBOL_GPL(hisi_sas_stt); 2239 2240 static struct sas_domain_function_template hisi_sas_transport_ops = { 2241 .lldd_dev_found = hisi_sas_dev_found, 2242 .lldd_dev_gone = hisi_sas_dev_gone, 2243 .lldd_execute_task = hisi_sas_queue_command, 2244 .lldd_control_phy = hisi_sas_control_phy, 2245 .lldd_abort_task = hisi_sas_abort_task, 2246 .lldd_abort_task_set = hisi_sas_abort_task_set, 2247 .lldd_clear_aca = hisi_sas_clear_aca, 2248 .lldd_I_T_nexus_reset = hisi_sas_I_T_nexus_reset, 2249 .lldd_lu_reset = hisi_sas_lu_reset, 2250 .lldd_query_task = hisi_sas_query_task, 2251 .lldd_clear_nexus_ha = hisi_sas_clear_nexus_ha, 2252 .lldd_port_formed = hisi_sas_port_formed, 2253 .lldd_write_gpio = hisi_sas_write_gpio, 2254 }; 2255 2256 void hisi_sas_init_mem(struct hisi_hba *hisi_hba) 2257 { 2258 int i, s, j, max_command_entries = hisi_hba->hw->max_command_entries; 2259 struct hisi_sas_breakpoint *sata_breakpoint = hisi_hba->sata_breakpoint; 2260 2261 for (i = 0; i < hisi_hba->queue_count; i++) { 2262 struct hisi_sas_cq *cq = &hisi_hba->cq[i]; 2263 struct hisi_sas_dq *dq = &hisi_hba->dq[i]; 2264 struct hisi_sas_cmd_hdr *cmd_hdr = hisi_hba->cmd_hdr[i]; 2265 2266 s = sizeof(struct hisi_sas_cmd_hdr); 2267 for (j = 0; j < HISI_SAS_QUEUE_SLOTS; j++) 2268 memset(&cmd_hdr[j], 0, s); 2269 2270 dq->wr_point = 0; 2271 2272 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS; 2273 memset(hisi_hba->complete_hdr[i], 0, s); 2274 cq->rd_point = 0; 2275 } 2276 2277 s = sizeof(struct hisi_sas_initial_fis) * hisi_hba->n_phy; 2278 memset(hisi_hba->initial_fis, 0, s); 2279 2280 s = max_command_entries * sizeof(struct hisi_sas_iost); 2281 memset(hisi_hba->iost, 0, s); 2282 2283 s = max_command_entries * sizeof(struct hisi_sas_breakpoint); 2284 memset(hisi_hba->breakpoint, 0, s); 2285 2286 s = sizeof(struct hisi_sas_sata_breakpoint); 2287 for (j = 0; j < HISI_SAS_MAX_ITCT_ENTRIES; j++) 2288 memset(&sata_breakpoint[j], 0, s); 2289 } 2290 EXPORT_SYMBOL_GPL(hisi_sas_init_mem); 2291 2292 int hisi_sas_alloc(struct hisi_hba *hisi_hba) 2293 { 2294 struct device *dev = hisi_hba->dev; 2295 int i, j, s, max_command_entries = hisi_hba->hw->max_command_entries; 2296 int max_command_entries_ru, sz_slot_buf_ru; 2297 int blk_cnt, slots_per_blk; 2298 2299 sema_init(&hisi_hba->sem, 1); 2300 spin_lock_init(&hisi_hba->lock); 2301 for (i = 0; i < hisi_hba->n_phy; i++) { 2302 hisi_sas_phy_init(hisi_hba, i); 2303 hisi_hba->port[i].port_attached = 0; 2304 hisi_hba->port[i].id = -1; 2305 } 2306 2307 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 2308 hisi_hba->devices[i].dev_type = SAS_PHY_UNUSED; 2309 hisi_hba->devices[i].device_id = i; 2310 hisi_hba->devices[i].dev_status = HISI_SAS_DEV_INIT; 2311 } 2312 2313 for (i = 0; i < hisi_hba->queue_count; i++) { 2314 struct hisi_sas_cq *cq = &hisi_hba->cq[i]; 2315 struct hisi_sas_dq *dq = &hisi_hba->dq[i]; 2316 2317 /* Completion queue structure */ 2318 cq->id = i; 2319 cq->hisi_hba = hisi_hba; 2320 2321 /* Delivery queue structure */ 2322 spin_lock_init(&dq->lock); 2323 INIT_LIST_HEAD(&dq->list); 2324 dq->id = i; 2325 dq->hisi_hba = hisi_hba; 2326 2327 /* Delivery queue */ 2328 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS; 2329 hisi_hba->cmd_hdr[i] = dmam_alloc_coherent(dev, s, 2330 &hisi_hba->cmd_hdr_dma[i], 2331 GFP_KERNEL); 2332 if (!hisi_hba->cmd_hdr[i]) 2333 goto err_out; 2334 2335 /* Completion queue */ 2336 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS; 2337 hisi_hba->complete_hdr[i] = dmam_alloc_coherent(dev, s, 2338 &hisi_hba->complete_hdr_dma[i], 2339 GFP_KERNEL); 2340 if (!hisi_hba->complete_hdr[i]) 2341 goto err_out; 2342 } 2343 2344 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct); 2345 hisi_hba->itct = dmam_alloc_coherent(dev, s, &hisi_hba->itct_dma, 2346 GFP_KERNEL | __GFP_ZERO); 2347 if (!hisi_hba->itct) 2348 goto err_out; 2349 2350 hisi_hba->slot_info = devm_kcalloc(dev, max_command_entries, 2351 sizeof(struct hisi_sas_slot), 2352 GFP_KERNEL); 2353 if (!hisi_hba->slot_info) 2354 goto err_out; 2355 2356 /* roundup to avoid overly large block size */ 2357 max_command_entries_ru = roundup(max_command_entries, 64); 2358 if (hisi_hba->prot_mask & HISI_SAS_DIX_PROT_MASK) 2359 sz_slot_buf_ru = sizeof(struct hisi_sas_slot_dif_buf_table); 2360 else 2361 sz_slot_buf_ru = sizeof(struct hisi_sas_slot_buf_table); 2362 sz_slot_buf_ru = roundup(sz_slot_buf_ru, 64); 2363 s = lcm(max_command_entries_ru, sz_slot_buf_ru); 2364 blk_cnt = (max_command_entries_ru * sz_slot_buf_ru) / s; 2365 slots_per_blk = s / sz_slot_buf_ru; 2366 2367 for (i = 0; i < blk_cnt; i++) { 2368 int slot_index = i * slots_per_blk; 2369 dma_addr_t buf_dma; 2370 void *buf; 2371 2372 buf = dmam_alloc_coherent(dev, s, &buf_dma, 2373 GFP_KERNEL | __GFP_ZERO); 2374 if (!buf) 2375 goto err_out; 2376 2377 for (j = 0; j < slots_per_blk; j++, slot_index++) { 2378 struct hisi_sas_slot *slot; 2379 2380 slot = &hisi_hba->slot_info[slot_index]; 2381 slot->buf = buf; 2382 slot->buf_dma = buf_dma; 2383 slot->idx = slot_index; 2384 2385 buf += sz_slot_buf_ru; 2386 buf_dma += sz_slot_buf_ru; 2387 } 2388 } 2389 2390 s = max_command_entries * sizeof(struct hisi_sas_iost); 2391 hisi_hba->iost = dmam_alloc_coherent(dev, s, &hisi_hba->iost_dma, 2392 GFP_KERNEL); 2393 if (!hisi_hba->iost) 2394 goto err_out; 2395 2396 s = max_command_entries * sizeof(struct hisi_sas_breakpoint); 2397 hisi_hba->breakpoint = dmam_alloc_coherent(dev, s, 2398 &hisi_hba->breakpoint_dma, 2399 GFP_KERNEL); 2400 if (!hisi_hba->breakpoint) 2401 goto err_out; 2402 2403 hisi_hba->slot_index_count = max_command_entries; 2404 s = hisi_hba->slot_index_count / BITS_PER_BYTE; 2405 hisi_hba->slot_index_tags = devm_kzalloc(dev, s, GFP_KERNEL); 2406 if (!hisi_hba->slot_index_tags) 2407 goto err_out; 2408 2409 s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS; 2410 hisi_hba->initial_fis = dmam_alloc_coherent(dev, s, 2411 &hisi_hba->initial_fis_dma, 2412 GFP_KERNEL); 2413 if (!hisi_hba->initial_fis) 2414 goto err_out; 2415 2416 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint); 2417 hisi_hba->sata_breakpoint = dmam_alloc_coherent(dev, s, 2418 &hisi_hba->sata_breakpoint_dma, 2419 GFP_KERNEL); 2420 if (!hisi_hba->sata_breakpoint) 2421 goto err_out; 2422 hisi_sas_init_mem(hisi_hba); 2423 2424 hisi_sas_slot_index_init(hisi_hba); 2425 hisi_hba->last_slot_index = hisi_hba->hw->max_command_entries - 2426 HISI_SAS_RESERVED_IPTT_CNT; 2427 2428 hisi_hba->wq = create_singlethread_workqueue(dev_name(dev)); 2429 if (!hisi_hba->wq) { 2430 dev_err(dev, "sas_alloc: failed to create workqueue\n"); 2431 goto err_out; 2432 } 2433 2434 return 0; 2435 err_out: 2436 return -ENOMEM; 2437 } 2438 EXPORT_SYMBOL_GPL(hisi_sas_alloc); 2439 2440 void hisi_sas_free(struct hisi_hba *hisi_hba) 2441 { 2442 if (hisi_hba->wq) 2443 destroy_workqueue(hisi_hba->wq); 2444 } 2445 EXPORT_SYMBOL_GPL(hisi_sas_free); 2446 2447 void hisi_sas_rst_work_handler(struct work_struct *work) 2448 { 2449 struct hisi_hba *hisi_hba = 2450 container_of(work, struct hisi_hba, rst_work); 2451 2452 hisi_sas_controller_reset(hisi_hba); 2453 } 2454 EXPORT_SYMBOL_GPL(hisi_sas_rst_work_handler); 2455 2456 void hisi_sas_sync_rst_work_handler(struct work_struct *work) 2457 { 2458 struct hisi_sas_rst *rst = 2459 container_of(work, struct hisi_sas_rst, work); 2460 2461 if (!hisi_sas_controller_reset(rst->hisi_hba)) 2462 rst->done = true; 2463 complete(rst->completion); 2464 } 2465 EXPORT_SYMBOL_GPL(hisi_sas_sync_rst_work_handler); 2466 2467 int hisi_sas_get_fw_info(struct hisi_hba *hisi_hba) 2468 { 2469 struct device *dev = hisi_hba->dev; 2470 struct platform_device *pdev = hisi_hba->platform_dev; 2471 struct device_node *np = pdev ? pdev->dev.of_node : NULL; 2472 struct clk *refclk; 2473 2474 if (device_property_read_u8_array(dev, "sas-addr", hisi_hba->sas_addr, 2475 SAS_ADDR_SIZE)) { 2476 dev_err(dev, "could not get property sas-addr\n"); 2477 return -ENOENT; 2478 } 2479 2480 if (np) { 2481 /* 2482 * These properties are only required for platform device-based 2483 * controller with DT firmware. 2484 */ 2485 hisi_hba->ctrl = syscon_regmap_lookup_by_phandle(np, 2486 "hisilicon,sas-syscon"); 2487 if (IS_ERR(hisi_hba->ctrl)) { 2488 dev_err(dev, "could not get syscon\n"); 2489 return -ENOENT; 2490 } 2491 2492 if (device_property_read_u32(dev, "ctrl-reset-reg", 2493 &hisi_hba->ctrl_reset_reg)) { 2494 dev_err(dev, 2495 "could not get property ctrl-reset-reg\n"); 2496 return -ENOENT; 2497 } 2498 2499 if (device_property_read_u32(dev, "ctrl-reset-sts-reg", 2500 &hisi_hba->ctrl_reset_sts_reg)) { 2501 dev_err(dev, 2502 "could not get property ctrl-reset-sts-reg\n"); 2503 return -ENOENT; 2504 } 2505 2506 if (device_property_read_u32(dev, "ctrl-clock-ena-reg", 2507 &hisi_hba->ctrl_clock_ena_reg)) { 2508 dev_err(dev, 2509 "could not get property ctrl-clock-ena-reg\n"); 2510 return -ENOENT; 2511 } 2512 } 2513 2514 refclk = devm_clk_get(dev, NULL); 2515 if (IS_ERR(refclk)) 2516 dev_dbg(dev, "no ref clk property\n"); 2517 else 2518 hisi_hba->refclk_frequency_mhz = clk_get_rate(refclk) / 1000000; 2519 2520 if (device_property_read_u32(dev, "phy-count", &hisi_hba->n_phy)) { 2521 dev_err(dev, "could not get property phy-count\n"); 2522 return -ENOENT; 2523 } 2524 2525 if (device_property_read_u32(dev, "queue-count", 2526 &hisi_hba->queue_count)) { 2527 dev_err(dev, "could not get property queue-count\n"); 2528 return -ENOENT; 2529 } 2530 2531 return 0; 2532 } 2533 EXPORT_SYMBOL_GPL(hisi_sas_get_fw_info); 2534 2535 static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev, 2536 const struct hisi_sas_hw *hw) 2537 { 2538 struct resource *res; 2539 struct Scsi_Host *shost; 2540 struct hisi_hba *hisi_hba; 2541 struct device *dev = &pdev->dev; 2542 int error; 2543 2544 shost = scsi_host_alloc(hw->sht, sizeof(*hisi_hba)); 2545 if (!shost) { 2546 dev_err(dev, "scsi host alloc failed\n"); 2547 return NULL; 2548 } 2549 hisi_hba = shost_priv(shost); 2550 2551 INIT_WORK(&hisi_hba->rst_work, hisi_sas_rst_work_handler); 2552 hisi_hba->hw = hw; 2553 hisi_hba->dev = dev; 2554 hisi_hba->platform_dev = pdev; 2555 hisi_hba->shost = shost; 2556 SHOST_TO_SAS_HA(shost) = &hisi_hba->sha; 2557 2558 timer_setup(&hisi_hba->timer, NULL, 0); 2559 2560 if (hisi_sas_get_fw_info(hisi_hba) < 0) 2561 goto err_out; 2562 2563 error = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); 2564 if (error) 2565 error = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); 2566 2567 if (error) { 2568 dev_err(dev, "No usable DMA addressing method\n"); 2569 goto err_out; 2570 } 2571 2572 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2573 hisi_hba->regs = devm_ioremap_resource(dev, res); 2574 if (IS_ERR(hisi_hba->regs)) 2575 goto err_out; 2576 2577 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 2578 if (res) { 2579 hisi_hba->sgpio_regs = devm_ioremap_resource(dev, res); 2580 if (IS_ERR(hisi_hba->sgpio_regs)) 2581 goto err_out; 2582 } 2583 2584 if (hisi_sas_alloc(hisi_hba)) { 2585 hisi_sas_free(hisi_hba); 2586 goto err_out; 2587 } 2588 2589 return shost; 2590 err_out: 2591 scsi_host_put(shost); 2592 dev_err(dev, "shost alloc failed\n"); 2593 return NULL; 2594 } 2595 2596 int hisi_sas_probe(struct platform_device *pdev, 2597 const struct hisi_sas_hw *hw) 2598 { 2599 struct Scsi_Host *shost; 2600 struct hisi_hba *hisi_hba; 2601 struct device *dev = &pdev->dev; 2602 struct asd_sas_phy **arr_phy; 2603 struct asd_sas_port **arr_port; 2604 struct sas_ha_struct *sha; 2605 int rc, phy_nr, port_nr, i; 2606 2607 shost = hisi_sas_shost_alloc(pdev, hw); 2608 if (!shost) 2609 return -ENOMEM; 2610 2611 sha = SHOST_TO_SAS_HA(shost); 2612 hisi_hba = shost_priv(shost); 2613 platform_set_drvdata(pdev, sha); 2614 2615 phy_nr = port_nr = hisi_hba->n_phy; 2616 2617 arr_phy = devm_kcalloc(dev, phy_nr, sizeof(void *), GFP_KERNEL); 2618 arr_port = devm_kcalloc(dev, port_nr, sizeof(void *), GFP_KERNEL); 2619 if (!arr_phy || !arr_port) { 2620 rc = -ENOMEM; 2621 goto err_out_ha; 2622 } 2623 2624 sha->sas_phy = arr_phy; 2625 sha->sas_port = arr_port; 2626 sha->lldd_ha = hisi_hba; 2627 2628 shost->transportt = hisi_sas_stt; 2629 shost->max_id = HISI_SAS_MAX_DEVICES; 2630 shost->max_lun = ~0; 2631 shost->max_channel = 1; 2632 shost->max_cmd_len = 16; 2633 if (hisi_hba->hw->slot_index_alloc) { 2634 shost->can_queue = hisi_hba->hw->max_command_entries; 2635 shost->cmd_per_lun = hisi_hba->hw->max_command_entries; 2636 } else { 2637 shost->can_queue = hisi_hba->hw->max_command_entries - 2638 HISI_SAS_RESERVED_IPTT_CNT; 2639 shost->cmd_per_lun = hisi_hba->hw->max_command_entries - 2640 HISI_SAS_RESERVED_IPTT_CNT; 2641 } 2642 2643 sha->sas_ha_name = DRV_NAME; 2644 sha->dev = hisi_hba->dev; 2645 sha->lldd_module = THIS_MODULE; 2646 sha->sas_addr = &hisi_hba->sas_addr[0]; 2647 sha->num_phys = hisi_hba->n_phy; 2648 sha->core.shost = hisi_hba->shost; 2649 2650 for (i = 0; i < hisi_hba->n_phy; i++) { 2651 sha->sas_phy[i] = &hisi_hba->phy[i].sas_phy; 2652 sha->sas_port[i] = &hisi_hba->port[i].sas_port; 2653 } 2654 2655 rc = scsi_add_host(shost, &pdev->dev); 2656 if (rc) 2657 goto err_out_ha; 2658 2659 rc = sas_register_ha(sha); 2660 if (rc) 2661 goto err_out_register_ha; 2662 2663 rc = hisi_hba->hw->hw_init(hisi_hba); 2664 if (rc) 2665 goto err_out_register_ha; 2666 2667 scsi_scan_host(shost); 2668 2669 return 0; 2670 2671 err_out_register_ha: 2672 scsi_remove_host(shost); 2673 err_out_ha: 2674 hisi_sas_free(hisi_hba); 2675 scsi_host_put(shost); 2676 return rc; 2677 } 2678 EXPORT_SYMBOL_GPL(hisi_sas_probe); 2679 2680 struct dentry *hisi_sas_debugfs_dir; 2681 2682 static void hisi_sas_debugfs_snapshot_cq_reg(struct hisi_hba *hisi_hba) 2683 { 2684 int queue_entry_size = hisi_hba->hw->complete_hdr_size; 2685 int i; 2686 2687 for (i = 0; i < hisi_hba->queue_count; i++) 2688 memcpy(hisi_hba->debugfs_complete_hdr[i], 2689 hisi_hba->complete_hdr[i], 2690 HISI_SAS_QUEUE_SLOTS * queue_entry_size); 2691 } 2692 2693 static void hisi_sas_debugfs_snapshot_dq_reg(struct hisi_hba *hisi_hba) 2694 { 2695 int queue_entry_size = sizeof(struct hisi_sas_cmd_hdr); 2696 int i; 2697 2698 for (i = 0; i < hisi_hba->queue_count; i++) { 2699 struct hisi_sas_cmd_hdr *debugfs_cmd_hdr, *cmd_hdr; 2700 int j; 2701 2702 debugfs_cmd_hdr = hisi_hba->debugfs_cmd_hdr[i]; 2703 cmd_hdr = hisi_hba->cmd_hdr[i]; 2704 2705 for (j = 0; j < HISI_SAS_QUEUE_SLOTS; j++) 2706 memcpy(&debugfs_cmd_hdr[j], &cmd_hdr[j], 2707 queue_entry_size); 2708 } 2709 } 2710 2711 static void hisi_sas_debugfs_snapshot_port_reg(struct hisi_hba *hisi_hba) 2712 { 2713 const struct hisi_sas_debugfs_reg *port = 2714 hisi_hba->hw->debugfs_reg_port; 2715 int i, phy_cnt; 2716 u32 offset; 2717 u32 *databuf; 2718 2719 for (phy_cnt = 0; phy_cnt < hisi_hba->n_phy; phy_cnt++) { 2720 databuf = (u32 *)hisi_hba->debugfs_port_reg[phy_cnt]; 2721 for (i = 0; i < port->count; i++, databuf++) { 2722 offset = port->base_off + 4 * i; 2723 *databuf = port->read_port_reg(hisi_hba, phy_cnt, 2724 offset); 2725 } 2726 } 2727 } 2728 2729 static void hisi_sas_debugfs_snapshot_global_reg(struct hisi_hba *hisi_hba) 2730 { 2731 u32 *databuf = (u32 *)hisi_hba->debugfs_global_reg; 2732 const struct hisi_sas_debugfs_reg *global = 2733 hisi_hba->hw->debugfs_reg_global; 2734 int i; 2735 2736 for (i = 0; i < global->count; i++, databuf++) 2737 *databuf = global->read_global_reg(hisi_hba, 4 * i); 2738 } 2739 2740 static void hisi_sas_debugfs_snapshot_itct_reg(struct hisi_hba *hisi_hba) 2741 { 2742 void *databuf = hisi_hba->debugfs_itct; 2743 struct hisi_sas_itct *itct; 2744 int i; 2745 2746 itct = hisi_hba->itct; 2747 2748 for (i = 0; i < HISI_SAS_MAX_ITCT_ENTRIES; i++, itct++) { 2749 memcpy(databuf, itct, sizeof(struct hisi_sas_itct)); 2750 databuf += sizeof(struct hisi_sas_itct); 2751 } 2752 } 2753 2754 static void hisi_sas_debugfs_snapshot_iost_reg(struct hisi_hba *hisi_hba) 2755 { 2756 int max_command_entries = hisi_hba->hw->max_command_entries; 2757 void *databuf = hisi_hba->debugfs_iost; 2758 struct hisi_sas_iost *iost; 2759 int i; 2760 2761 iost = hisi_hba->iost; 2762 2763 for (i = 0; i < max_command_entries; i++, iost++) { 2764 memcpy(databuf, iost, sizeof(struct hisi_sas_iost)); 2765 databuf += sizeof(struct hisi_sas_iost); 2766 } 2767 } 2768 2769 static const char * 2770 hisi_sas_debugfs_to_reg_name(int off, int base_off, 2771 const struct hisi_sas_debugfs_reg_lu *lu) 2772 { 2773 for (; lu->name; lu++) { 2774 if (off == lu->off - base_off) 2775 return lu->name; 2776 } 2777 2778 return NULL; 2779 } 2780 2781 static void hisi_sas_debugfs_print_reg(u32 *regs_val, const void *ptr, 2782 struct seq_file *s) 2783 { 2784 const struct hisi_sas_debugfs_reg *reg = ptr; 2785 int i; 2786 2787 for (i = 0; i < reg->count; i++) { 2788 int off = i * 4; 2789 const char *name; 2790 2791 name = hisi_sas_debugfs_to_reg_name(off, reg->base_off, 2792 reg->lu); 2793 2794 if (name) 2795 seq_printf(s, "0x%08x 0x%08x %s\n", off, 2796 regs_val[i], name); 2797 else 2798 seq_printf(s, "0x%08x 0x%08x\n", off, 2799 regs_val[i]); 2800 } 2801 } 2802 2803 static int hisi_sas_debugfs_global_show(struct seq_file *s, void *p) 2804 { 2805 struct hisi_hba *hisi_hba = s->private; 2806 const struct hisi_sas_hw *hw = hisi_hba->hw; 2807 const struct hisi_sas_debugfs_reg *reg_global = hw->debugfs_reg_global; 2808 2809 hisi_sas_debugfs_print_reg(hisi_hba->debugfs_global_reg, 2810 reg_global, s); 2811 2812 return 0; 2813 } 2814 2815 static int hisi_sas_debugfs_global_open(struct inode *inode, struct file *filp) 2816 { 2817 return single_open(filp, hisi_sas_debugfs_global_show, 2818 inode->i_private); 2819 } 2820 2821 static const struct file_operations hisi_sas_debugfs_global_fops = { 2822 .open = hisi_sas_debugfs_global_open, 2823 .read = seq_read, 2824 .llseek = seq_lseek, 2825 .release = single_release, 2826 .owner = THIS_MODULE, 2827 }; 2828 2829 static int hisi_sas_debugfs_port_show(struct seq_file *s, void *p) 2830 { 2831 struct hisi_sas_phy *phy = s->private; 2832 struct hisi_hba *hisi_hba = phy->hisi_hba; 2833 const struct hisi_sas_hw *hw = hisi_hba->hw; 2834 const struct hisi_sas_debugfs_reg *reg_port = hw->debugfs_reg_port; 2835 u32 *databuf = hisi_hba->debugfs_port_reg[phy->sas_phy.id]; 2836 2837 hisi_sas_debugfs_print_reg(databuf, reg_port, s); 2838 2839 return 0; 2840 } 2841 2842 static int hisi_sas_debugfs_port_open(struct inode *inode, struct file *filp) 2843 { 2844 return single_open(filp, hisi_sas_debugfs_port_show, inode->i_private); 2845 } 2846 2847 static const struct file_operations hisi_sas_debugfs_port_fops = { 2848 .open = hisi_sas_debugfs_port_open, 2849 .read = seq_read, 2850 .llseek = seq_lseek, 2851 .release = single_release, 2852 .owner = THIS_MODULE, 2853 }; 2854 2855 static int hisi_sas_show_row_64(struct seq_file *s, int index, 2856 int sz, __le64 *ptr) 2857 { 2858 int i; 2859 2860 /* completion header size not fixed per HW version */ 2861 seq_printf(s, "index %04d:\n\t", index); 2862 for (i = 1; i <= sz / 8; i++, ptr++) { 2863 seq_printf(s, " 0x%016llx", le64_to_cpu(*ptr)); 2864 if (!(i % 2)) 2865 seq_puts(s, "\n\t"); 2866 } 2867 2868 seq_puts(s, "\n"); 2869 2870 return 0; 2871 } 2872 2873 static int hisi_sas_show_row_32(struct seq_file *s, int index, 2874 int sz, __le32 *ptr) 2875 { 2876 int i; 2877 2878 /* completion header size not fixed per HW version */ 2879 seq_printf(s, "index %04d:\n\t", index); 2880 for (i = 1; i <= sz / 4; i++, ptr++) { 2881 seq_printf(s, " 0x%08x", le32_to_cpu(*ptr)); 2882 if (!(i % 4)) 2883 seq_puts(s, "\n\t"); 2884 } 2885 seq_puts(s, "\n"); 2886 2887 return 0; 2888 } 2889 2890 static int hisi_sas_cq_show_slot(struct seq_file *s, int slot, void *cq_ptr) 2891 { 2892 struct hisi_sas_cq *cq = cq_ptr; 2893 struct hisi_hba *hisi_hba = cq->hisi_hba; 2894 void *complete_queue = hisi_hba->debugfs_complete_hdr[cq->id]; 2895 __le32 *complete_hdr = complete_queue + 2896 (hisi_hba->hw->complete_hdr_size * slot); 2897 2898 return hisi_sas_show_row_32(s, slot, 2899 hisi_hba->hw->complete_hdr_size, 2900 complete_hdr); 2901 } 2902 2903 static int hisi_sas_debugfs_cq_show(struct seq_file *s, void *p) 2904 { 2905 struct hisi_sas_cq *cq = s->private; 2906 int slot, ret; 2907 2908 for (slot = 0; slot < HISI_SAS_QUEUE_SLOTS; slot++) { 2909 ret = hisi_sas_cq_show_slot(s, slot, cq); 2910 if (ret) 2911 return ret; 2912 } 2913 return 0; 2914 } 2915 2916 static int hisi_sas_debugfs_cq_open(struct inode *inode, struct file *filp) 2917 { 2918 return single_open(filp, hisi_sas_debugfs_cq_show, inode->i_private); 2919 } 2920 2921 static const struct file_operations hisi_sas_debugfs_cq_fops = { 2922 .open = hisi_sas_debugfs_cq_open, 2923 .read = seq_read, 2924 .llseek = seq_lseek, 2925 .release = single_release, 2926 .owner = THIS_MODULE, 2927 }; 2928 2929 static int hisi_sas_dq_show_slot(struct seq_file *s, int slot, void *dq_ptr) 2930 { 2931 struct hisi_sas_dq *dq = dq_ptr; 2932 struct hisi_hba *hisi_hba = dq->hisi_hba; 2933 void *cmd_queue = hisi_hba->debugfs_cmd_hdr[dq->id]; 2934 __le32 *cmd_hdr = cmd_queue + 2935 sizeof(struct hisi_sas_cmd_hdr) * slot; 2936 2937 return hisi_sas_show_row_32(s, slot, sizeof(struct hisi_sas_cmd_hdr), 2938 cmd_hdr); 2939 } 2940 2941 static int hisi_sas_debugfs_dq_show(struct seq_file *s, void *p) 2942 { 2943 int slot, ret; 2944 2945 for (slot = 0; slot < HISI_SAS_QUEUE_SLOTS; slot++) { 2946 ret = hisi_sas_dq_show_slot(s, slot, s->private); 2947 if (ret) 2948 return ret; 2949 } 2950 return 0; 2951 } 2952 2953 static int hisi_sas_debugfs_dq_open(struct inode *inode, struct file *filp) 2954 { 2955 return single_open(filp, hisi_sas_debugfs_dq_show, inode->i_private); 2956 } 2957 2958 static const struct file_operations hisi_sas_debugfs_dq_fops = { 2959 .open = hisi_sas_debugfs_dq_open, 2960 .read = seq_read, 2961 .llseek = seq_lseek, 2962 .release = single_release, 2963 .owner = THIS_MODULE, 2964 }; 2965 2966 static int hisi_sas_debugfs_iost_show(struct seq_file *s, void *p) 2967 { 2968 struct hisi_hba *hisi_hba = s->private; 2969 struct hisi_sas_iost *debugfs_iost = hisi_hba->debugfs_iost; 2970 int i, ret, max_command_entries = hisi_hba->hw->max_command_entries; 2971 __le64 *iost = &debugfs_iost->qw0; 2972 2973 for (i = 0; i < max_command_entries; i++, debugfs_iost++) { 2974 ret = hisi_sas_show_row_64(s, i, sizeof(*debugfs_iost), 2975 iost); 2976 if (ret) 2977 return ret; 2978 } 2979 2980 return 0; 2981 } 2982 2983 static int hisi_sas_debugfs_iost_open(struct inode *inode, struct file *filp) 2984 { 2985 return single_open(filp, hisi_sas_debugfs_iost_show, inode->i_private); 2986 } 2987 2988 static const struct file_operations hisi_sas_debugfs_iost_fops = { 2989 .open = hisi_sas_debugfs_iost_open, 2990 .read = seq_read, 2991 .llseek = seq_lseek, 2992 .release = single_release, 2993 .owner = THIS_MODULE, 2994 }; 2995 2996 static int hisi_sas_debugfs_itct_show(struct seq_file *s, void *p) 2997 { 2998 int i, ret; 2999 struct hisi_hba *hisi_hba = s->private; 3000 struct hisi_sas_itct *debugfs_itct = hisi_hba->debugfs_itct; 3001 __le64 *itct = &debugfs_itct->qw0; 3002 3003 for (i = 0; i < HISI_SAS_MAX_ITCT_ENTRIES; i++, debugfs_itct++) { 3004 ret = hisi_sas_show_row_64(s, i, sizeof(*debugfs_itct), 3005 itct); 3006 if (ret) 3007 return ret; 3008 } 3009 3010 return 0; 3011 } 3012 3013 static int hisi_sas_debugfs_itct_open(struct inode *inode, struct file *filp) 3014 { 3015 return single_open(filp, hisi_sas_debugfs_itct_show, inode->i_private); 3016 } 3017 3018 static const struct file_operations hisi_sas_debugfs_itct_fops = { 3019 .open = hisi_sas_debugfs_itct_open, 3020 .read = seq_read, 3021 .llseek = seq_lseek, 3022 .release = single_release, 3023 .owner = THIS_MODULE, 3024 }; 3025 3026 static void hisi_sas_debugfs_create_files(struct hisi_hba *hisi_hba) 3027 { 3028 struct dentry *dump_dentry; 3029 struct dentry *dentry; 3030 char name[256]; 3031 int p; 3032 int c; 3033 int d; 3034 3035 /* Create dump dir inside device dir */ 3036 dump_dentry = debugfs_create_dir("dump", hisi_hba->debugfs_dir); 3037 hisi_hba->debugfs_dump_dentry = dump_dentry; 3038 3039 debugfs_create_file("global", 0400, dump_dentry, hisi_hba, 3040 &hisi_sas_debugfs_global_fops); 3041 3042 /* Create port dir and files */ 3043 dentry = debugfs_create_dir("port", dump_dentry); 3044 for (p = 0; p < hisi_hba->n_phy; p++) { 3045 snprintf(name, 256, "%d", p); 3046 3047 debugfs_create_file(name, 0400, dentry, &hisi_hba->phy[p], 3048 &hisi_sas_debugfs_port_fops); 3049 } 3050 3051 /* Create CQ dir and files */ 3052 dentry = debugfs_create_dir("cq", dump_dentry); 3053 for (c = 0; c < hisi_hba->queue_count; c++) { 3054 snprintf(name, 256, "%d", c); 3055 3056 debugfs_create_file(name, 0400, dentry, &hisi_hba->cq[c], 3057 &hisi_sas_debugfs_cq_fops); 3058 } 3059 3060 /* Create DQ dir and files */ 3061 dentry = debugfs_create_dir("dq", dump_dentry); 3062 for (d = 0; d < hisi_hba->queue_count; d++) { 3063 snprintf(name, 256, "%d", d); 3064 3065 debugfs_create_file(name, 0400, dentry, &hisi_hba->dq[d], 3066 &hisi_sas_debugfs_dq_fops); 3067 } 3068 3069 debugfs_create_file("iost", 0400, dump_dentry, hisi_hba, 3070 &hisi_sas_debugfs_iost_fops); 3071 3072 debugfs_create_file("itct", 0400, dump_dentry, hisi_hba, 3073 &hisi_sas_debugfs_itct_fops); 3074 3075 return; 3076 } 3077 3078 static void hisi_sas_debugfs_snapshot_regs(struct hisi_hba *hisi_hba) 3079 { 3080 hisi_hba->hw->snapshot_prepare(hisi_hba); 3081 3082 hisi_sas_debugfs_snapshot_global_reg(hisi_hba); 3083 hisi_sas_debugfs_snapshot_port_reg(hisi_hba); 3084 hisi_sas_debugfs_snapshot_cq_reg(hisi_hba); 3085 hisi_sas_debugfs_snapshot_dq_reg(hisi_hba); 3086 hisi_sas_debugfs_snapshot_itct_reg(hisi_hba); 3087 hisi_sas_debugfs_snapshot_iost_reg(hisi_hba); 3088 3089 hisi_sas_debugfs_create_files(hisi_hba); 3090 3091 hisi_hba->hw->snapshot_restore(hisi_hba); 3092 } 3093 3094 static ssize_t hisi_sas_debugfs_trigger_dump_write(struct file *file, 3095 const char __user *user_buf, 3096 size_t count, loff_t *ppos) 3097 { 3098 struct hisi_hba *hisi_hba = file->f_inode->i_private; 3099 char buf[8]; 3100 3101 /* A bit racy, but don't care too much since it's only debugfs */ 3102 if (hisi_hba->debugfs_snapshot) 3103 return -EFAULT; 3104 3105 if (count > 8) 3106 return -EFAULT; 3107 3108 if (copy_from_user(buf, user_buf, count)) 3109 return -EFAULT; 3110 3111 if (buf[0] != '1') 3112 return -EFAULT; 3113 3114 queue_work(hisi_hba->wq, &hisi_hba->debugfs_work); 3115 3116 return count; 3117 } 3118 3119 static const struct file_operations hisi_sas_debugfs_trigger_dump_fops = { 3120 .write = &hisi_sas_debugfs_trigger_dump_write, 3121 .owner = THIS_MODULE, 3122 }; 3123 3124 void hisi_sas_debugfs_work_handler(struct work_struct *work) 3125 { 3126 struct hisi_hba *hisi_hba = 3127 container_of(work, struct hisi_hba, debugfs_work); 3128 3129 if (hisi_hba->debugfs_snapshot) 3130 return; 3131 hisi_hba->debugfs_snapshot = true; 3132 3133 hisi_sas_debugfs_snapshot_regs(hisi_hba); 3134 } 3135 EXPORT_SYMBOL_GPL(hisi_sas_debugfs_work_handler); 3136 3137 void hisi_sas_debugfs_init(struct hisi_hba *hisi_hba) 3138 { 3139 int max_command_entries = hisi_hba->hw->max_command_entries; 3140 struct device *dev = hisi_hba->dev; 3141 int p, i, c, d; 3142 size_t sz; 3143 3144 hisi_hba->debugfs_dir = debugfs_create_dir(dev_name(dev), 3145 hisi_sas_debugfs_dir); 3146 debugfs_create_file("trigger_dump", 0600, 3147 hisi_hba->debugfs_dir, 3148 hisi_hba, 3149 &hisi_sas_debugfs_trigger_dump_fops); 3150 3151 /* Alloc buffer for global */ 3152 sz = hisi_hba->hw->debugfs_reg_global->count * 4; 3153 hisi_hba->debugfs_global_reg = 3154 devm_kmalloc(dev, sz, GFP_KERNEL); 3155 3156 if (!hisi_hba->debugfs_global_reg) 3157 goto fail_global; 3158 3159 /* Alloc buffer for port */ 3160 sz = hisi_hba->hw->debugfs_reg_port->count * 4; 3161 for (p = 0; p < hisi_hba->n_phy; p++) { 3162 hisi_hba->debugfs_port_reg[p] = 3163 devm_kmalloc(dev, sz, GFP_KERNEL); 3164 3165 if (!hisi_hba->debugfs_port_reg[p]) 3166 goto fail_port; 3167 } 3168 3169 /* Alloc buffer for cq */ 3170 sz = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS; 3171 for (c = 0; c < hisi_hba->queue_count; c++) { 3172 hisi_hba->debugfs_complete_hdr[c] = 3173 devm_kmalloc(dev, sz, GFP_KERNEL); 3174 3175 if (!hisi_hba->debugfs_complete_hdr[c]) 3176 goto fail_cq; 3177 } 3178 3179 /* Alloc buffer for dq */ 3180 sz = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS; 3181 for (d = 0; d < hisi_hba->queue_count; d++) { 3182 hisi_hba->debugfs_cmd_hdr[d] = 3183 devm_kmalloc(dev, sz, GFP_KERNEL); 3184 3185 if (!hisi_hba->debugfs_cmd_hdr[d]) 3186 goto fail_iost_dq; 3187 } 3188 3189 /* Alloc buffer for iost */ 3190 sz = max_command_entries * sizeof(struct hisi_sas_iost); 3191 3192 hisi_hba->debugfs_iost = devm_kmalloc(dev, sz, GFP_KERNEL); 3193 if (!hisi_hba->debugfs_iost) 3194 goto fail_iost_dq; 3195 3196 /* Alloc buffer for itct */ 3197 /* New memory allocation must be locate before itct */ 3198 sz = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct); 3199 3200 hisi_hba->debugfs_itct = devm_kmalloc(dev, sz, GFP_KERNEL); 3201 if (!hisi_hba->debugfs_itct) 3202 goto fail_itct; 3203 3204 return; 3205 fail_itct: 3206 devm_kfree(dev, hisi_hba->debugfs_iost); 3207 fail_iost_dq: 3208 for (i = 0; i < d; i++) 3209 devm_kfree(dev, hisi_hba->debugfs_cmd_hdr[i]); 3210 fail_cq: 3211 for (i = 0; i < c; i++) 3212 devm_kfree(dev, hisi_hba->debugfs_complete_hdr[i]); 3213 fail_port: 3214 for (i = 0; i < p; i++) 3215 devm_kfree(dev, hisi_hba->debugfs_port_reg[i]); 3216 devm_kfree(dev, hisi_hba->debugfs_global_reg); 3217 fail_global: 3218 debugfs_remove_recursive(hisi_hba->debugfs_dir); 3219 dev_dbg(dev, "failed to init debugfs!\n"); 3220 } 3221 EXPORT_SYMBOL_GPL(hisi_sas_debugfs_init); 3222 3223 void hisi_sas_debugfs_exit(struct hisi_hba *hisi_hba) 3224 { 3225 debugfs_remove_recursive(hisi_hba->debugfs_dir); 3226 } 3227 EXPORT_SYMBOL_GPL(hisi_sas_debugfs_exit); 3228 3229 int hisi_sas_remove(struct platform_device *pdev) 3230 { 3231 struct sas_ha_struct *sha = platform_get_drvdata(pdev); 3232 struct hisi_hba *hisi_hba = sha->lldd_ha; 3233 struct Scsi_Host *shost = sha->core.shost; 3234 3235 if (timer_pending(&hisi_hba->timer)) 3236 del_timer(&hisi_hba->timer); 3237 3238 sas_unregister_ha(sha); 3239 sas_remove_host(sha->core.shost); 3240 3241 hisi_sas_free(hisi_hba); 3242 scsi_host_put(shost); 3243 return 0; 3244 } 3245 EXPORT_SYMBOL_GPL(hisi_sas_remove); 3246 3247 bool hisi_sas_debugfs_enable; 3248 EXPORT_SYMBOL_GPL(hisi_sas_debugfs_enable); 3249 module_param_named(debugfs_enable, hisi_sas_debugfs_enable, bool, 0444); 3250 MODULE_PARM_DESC(hisi_sas_debugfs_enable, "Enable driver debugfs (default disabled)"); 3251 3252 static __init int hisi_sas_init(void) 3253 { 3254 hisi_sas_stt = sas_domain_attach_transport(&hisi_sas_transport_ops); 3255 if (!hisi_sas_stt) 3256 return -ENOMEM; 3257 3258 if (hisi_sas_debugfs_enable) 3259 hisi_sas_debugfs_dir = debugfs_create_dir("hisi_sas", NULL); 3260 3261 return 0; 3262 } 3263 3264 static __exit void hisi_sas_exit(void) 3265 { 3266 sas_release_transport(hisi_sas_stt); 3267 3268 debugfs_remove(hisi_sas_debugfs_dir); 3269 } 3270 3271 module_init(hisi_sas_init); 3272 module_exit(hisi_sas_exit); 3273 3274 MODULE_LICENSE("GPL"); 3275 MODULE_AUTHOR("John Garry <john.garry@huawei.com>"); 3276 MODULE_DESCRIPTION("HISILICON SAS controller driver"); 3277 MODULE_ALIAS("platform:" DRV_NAME); 3278