1 /* 2 * PMC-Sierra PM8001/8081/8088/8089 SAS/SATA based host adapters driver 3 * 4 * Copyright (c) 2008-2009 USI Co., Ltd. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions, and the following disclaimer, 12 * without modification. 13 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 14 * substantially similar to the "NO WARRANTY" disclaimer below 15 * ("Disclaimer") and any redistribution must be conditioned upon 16 * including a substantially similar Disclaimer requirement for further 17 * binary redistribution. 18 * 3. Neither the names of the above-listed copyright holders nor the names 19 * of any contributors may be used to endorse or promote products derived 20 * from this software without specific prior written permission. 21 * 22 * Alternatively, this software may be distributed under the terms of the 23 * GNU General Public License ("GPL") version 2 as published by the Free 24 * Software Foundation. 25 * 26 * NO WARRANTY 27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 31 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 35 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 36 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 37 * POSSIBILITY OF SUCH DAMAGES. 38 * 39 */ 40 41 #include <linux/slab.h> 42 #include "pm8001_sas.h" 43 #include "pm80xx_tracepoints.h" 44 45 /** 46 * pm8001_find_tag - from sas task to find out tag that belongs to this task 47 * @task: the task sent to the LLDD 48 * @tag: the found tag associated with the task 49 */ 50 static int pm8001_find_tag(struct sas_task *task, u32 *tag) 51 { 52 if (task->lldd_task) { 53 struct pm8001_ccb_info *ccb; 54 ccb = task->lldd_task; 55 *tag = ccb->ccb_tag; 56 return 1; 57 } 58 return 0; 59 } 60 61 /** 62 * pm8001_tag_free - free the no more needed tag 63 * @pm8001_ha: our hba struct 64 * @tag: the found tag associated with the task 65 */ 66 void pm8001_tag_free(struct pm8001_hba_info *pm8001_ha, u32 tag) 67 { 68 void *bitmap = pm8001_ha->rsvd_tags; 69 unsigned long flags; 70 71 if (tag >= PM8001_RESERVE_SLOT) 72 return; 73 74 spin_lock_irqsave(&pm8001_ha->bitmap_lock, flags); 75 __clear_bit(tag, bitmap); 76 spin_unlock_irqrestore(&pm8001_ha->bitmap_lock, flags); 77 } 78 79 /** 80 * pm8001_tag_alloc - allocate a empty tag for task used. 81 * @pm8001_ha: our hba struct 82 * @tag_out: the found empty tag . 83 */ 84 int pm8001_tag_alloc(struct pm8001_hba_info *pm8001_ha, u32 *tag_out) 85 { 86 void *bitmap = pm8001_ha->rsvd_tags; 87 unsigned long flags; 88 unsigned int tag; 89 90 spin_lock_irqsave(&pm8001_ha->bitmap_lock, flags); 91 tag = find_first_zero_bit(bitmap, PM8001_RESERVE_SLOT); 92 if (tag >= PM8001_RESERVE_SLOT) { 93 spin_unlock_irqrestore(&pm8001_ha->bitmap_lock, flags); 94 return -SAS_QUEUE_FULL; 95 } 96 __set_bit(tag, bitmap); 97 spin_unlock_irqrestore(&pm8001_ha->bitmap_lock, flags); 98 99 /* reserved tags are in the lower region of the tagset */ 100 *tag_out = tag; 101 return 0; 102 } 103 104 static void pm80xx_get_tag_opcodes(struct sas_task *task, int *ata_op, 105 int *ata_tag, bool *task_aborted) 106 { 107 unsigned long flags; 108 struct ata_queued_cmd *qc = NULL; 109 110 *ata_op = 0; 111 *ata_tag = -1; 112 *task_aborted = false; 113 114 if (!task) 115 return; 116 117 spin_lock_irqsave(&task->task_state_lock, flags); 118 if (unlikely((task->task_state_flags & SAS_TASK_STATE_ABORTED))) 119 *task_aborted = true; 120 spin_unlock_irqrestore(&task->task_state_lock, flags); 121 122 if (task->task_proto == SAS_PROTOCOL_STP) { 123 // sas_ata_qc_issue path uses SAS_PROTOCOL_STP. 124 // This only works for scsi + libsas + libata users. 125 qc = task->uldd_task; 126 if (qc) { 127 *ata_op = qc->tf.command; 128 *ata_tag = qc->tag; 129 } 130 } 131 } 132 133 u32 pm80xx_get_local_phy_id(struct domain_device *dev) 134 { 135 struct pm8001_device *pm8001_dev = dev->lldd_dev; 136 137 if (dev_parent_is_expander(dev)) 138 return dev->parent->ex_dev.ex_phy->phy_id; 139 140 return pm8001_dev->attached_phy; 141 } 142 143 void pm80xx_show_pending_commands(struct pm8001_hba_info *pm8001_ha, 144 struct pm8001_device *target_pm8001_dev) 145 { 146 int i = 0, ata_op = 0, ata_tag = -1; 147 struct pm8001_ccb_info *ccb = NULL; 148 struct sas_task *task = NULL; 149 struct pm8001_device *pm8001_dev = NULL; 150 bool task_aborted; 151 152 for (i = 0; i < pm8001_ha->ccb_count; i++) { 153 ccb = &pm8001_ha->ccb_info[i]; 154 if (ccb->ccb_tag == PM8001_INVALID_TAG) 155 continue; 156 pm8001_dev = ccb->device; 157 if (target_pm8001_dev && pm8001_dev && 158 target_pm8001_dev != pm8001_dev) 159 continue; 160 task = ccb->task; 161 pm80xx_get_tag_opcodes(task, &ata_op, &ata_tag, &task_aborted); 162 pm8001_dbg(pm8001_ha, FAIL, 163 "tag %#x, device %#x task %p task aborted %d ata opcode %#x ata tag %d\n", 164 ccb->ccb_tag, 165 (pm8001_dev ? pm8001_dev->device_id : 0), 166 task, task_aborted, 167 ata_op, ata_tag); 168 } 169 } 170 171 /** 172 * pm8001_mem_alloc - allocate memory for pm8001. 173 * @pdev: pci device. 174 * @virt_addr: the allocated virtual address 175 * @pphys_addr: DMA address for this device 176 * @pphys_addr_hi: the physical address high byte address. 177 * @pphys_addr_lo: the physical address low byte address. 178 * @mem_size: memory size. 179 * @align: requested byte alignment 180 */ 181 int pm8001_mem_alloc(struct pci_dev *pdev, void **virt_addr, 182 dma_addr_t *pphys_addr, u32 *pphys_addr_hi, 183 u32 *pphys_addr_lo, u32 mem_size, u32 align) 184 { 185 caddr_t mem_virt_alloc; 186 dma_addr_t mem_dma_handle; 187 u64 phys_align; 188 u64 align_offset = 0; 189 if (align) 190 align_offset = (dma_addr_t)align - 1; 191 mem_virt_alloc = dma_alloc_coherent(&pdev->dev, mem_size + align, 192 &mem_dma_handle, GFP_KERNEL); 193 if (!mem_virt_alloc) 194 return -ENOMEM; 195 *pphys_addr = mem_dma_handle; 196 phys_align = (*pphys_addr + align_offset) & ~align_offset; 197 *virt_addr = (void *)mem_virt_alloc + phys_align - *pphys_addr; 198 *pphys_addr_hi = upper_32_bits(phys_align); 199 *pphys_addr_lo = lower_32_bits(phys_align); 200 return 0; 201 } 202 203 /** 204 * pm8001_find_ha_by_dev - from domain device which come from sas layer to 205 * find out our hba struct. 206 * @dev: the domain device which from sas layer. 207 */ 208 static 209 struct pm8001_hba_info *pm8001_find_ha_by_dev(struct domain_device *dev) 210 { 211 struct sas_ha_struct *sha = dev->port->ha; 212 struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; 213 return pm8001_ha; 214 } 215 216 /** 217 * pm8001_phy_control - this function should be registered to 218 * sas_domain_function_template to provide libsas used, note: this is just 219 * control the HBA phy rather than other expander phy if you want control 220 * other phy, you should use SMP command. 221 * @sas_phy: which phy in HBA phys. 222 * @func: the operation. 223 * @funcdata: always NULL. 224 */ 225 int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, 226 void *funcdata) 227 { 228 int rc = 0, phy_id = sas_phy->id; 229 struct pm8001_hba_info *pm8001_ha = NULL; 230 struct sas_phy_linkrates *rates; 231 struct pm8001_phy *phy; 232 DECLARE_COMPLETION_ONSTACK(completion); 233 unsigned long flags; 234 pm8001_ha = sas_phy->ha->lldd_ha; 235 phy = &pm8001_ha->phy[phy_id]; 236 237 if (PM8001_CHIP_DISP->fatal_errors(pm8001_ha)) { 238 /* 239 * If the controller is in fatal error state, 240 * we will not get a response from the controller 241 */ 242 pm8001_dbg(pm8001_ha, FAIL, 243 "Phy control failed due to fatal errors\n"); 244 return -EFAULT; 245 } 246 247 switch (func) { 248 case PHY_FUNC_SET_LINK_RATE: 249 rates = funcdata; 250 if (rates->minimum_linkrate) { 251 pm8001_ha->phy[phy_id].minimum_linkrate = 252 rates->minimum_linkrate; 253 } 254 if (rates->maximum_linkrate) { 255 pm8001_ha->phy[phy_id].maximum_linkrate = 256 rates->maximum_linkrate; 257 } 258 if (pm8001_ha->phy[phy_id].phy_state == PHY_LINK_DISABLE) { 259 pm8001_ha->phy[phy_id].enable_completion = &completion; 260 PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id); 261 wait_for_completion(&completion); 262 } 263 PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id, 264 PHY_LINK_RESET); 265 break; 266 case PHY_FUNC_HARD_RESET: 267 if (pm8001_ha->phy[phy_id].phy_state == PHY_LINK_DISABLE) { 268 pm8001_ha->phy[phy_id].enable_completion = &completion; 269 PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id); 270 wait_for_completion(&completion); 271 } 272 PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id, 273 PHY_HARD_RESET); 274 break; 275 case PHY_FUNC_LINK_RESET: 276 if (pm8001_ha->phy[phy_id].phy_state == PHY_LINK_DISABLE) { 277 pm8001_ha->phy[phy_id].enable_completion = &completion; 278 PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id); 279 wait_for_completion(&completion); 280 } 281 PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id, 282 PHY_LINK_RESET); 283 break; 284 case PHY_FUNC_RELEASE_SPINUP_HOLD: 285 PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id, 286 PHY_LINK_RESET); 287 break; 288 case PHY_FUNC_DISABLE: 289 if (pm8001_ha->chip_id != chip_8001) { 290 if (pm8001_ha->phy[phy_id].phy_state == 291 PHY_STATE_LINK_UP_SPCV) { 292 sas_phy_disconnected(&phy->sas_phy); 293 sas_notify_phy_event(&phy->sas_phy, 294 PHYE_LOSS_OF_SIGNAL, GFP_KERNEL); 295 phy->phy_attached = 0; 296 } 297 } else { 298 if (pm8001_ha->phy[phy_id].phy_state == 299 PHY_STATE_LINK_UP_SPC) { 300 sas_phy_disconnected(&phy->sas_phy); 301 sas_notify_phy_event(&phy->sas_phy, 302 PHYE_LOSS_OF_SIGNAL, GFP_KERNEL); 303 phy->phy_attached = 0; 304 } 305 } 306 PM8001_CHIP_DISP->phy_stop_req(pm8001_ha, phy_id); 307 break; 308 case PHY_FUNC_GET_EVENTS: 309 spin_lock_irqsave(&pm8001_ha->lock, flags); 310 if (pm8001_ha->chip_id == chip_8001) { 311 if (-1 == pm8001_bar4_shift(pm8001_ha, 312 (phy_id < 4) ? 0x30000 : 0x40000)) { 313 spin_unlock_irqrestore(&pm8001_ha->lock, flags); 314 return -EINVAL; 315 } 316 } 317 { 318 struct sas_phy *phy = sas_phy->phy; 319 u32 __iomem *qp = pm8001_ha->io_mem[2].memvirtaddr 320 + 0x1034 + (0x4000 * (phy_id & 3)); 321 322 phy->invalid_dword_count = readl(qp); 323 phy->running_disparity_error_count = readl(&qp[1]); 324 phy->loss_of_dword_sync_count = readl(&qp[3]); 325 phy->phy_reset_problem_count = readl(&qp[4]); 326 } 327 if (pm8001_ha->chip_id == chip_8001) 328 pm8001_bar4_shift(pm8001_ha, 0); 329 spin_unlock_irqrestore(&pm8001_ha->lock, flags); 330 return 0; 331 default: 332 pm8001_dbg(pm8001_ha, DEVIO, "func 0x%x\n", func); 333 rc = -EOPNOTSUPP; 334 } 335 msleep(300); 336 return rc; 337 } 338 339 /** 340 * pm8001_scan_start - we should enable all HBA phys by sending the phy_start 341 * command to HBA. 342 * @shost: the scsi host data. 343 */ 344 void pm8001_scan_start(struct Scsi_Host *shost) 345 { 346 int i; 347 struct pm8001_hba_info *pm8001_ha; 348 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); 349 DECLARE_COMPLETION_ONSTACK(completion); 350 pm8001_ha = sha->lldd_ha; 351 /* SAS_RE_INITIALIZATION not available in SPCv/ve */ 352 if (pm8001_ha->chip_id == chip_8001) 353 PM8001_CHIP_DISP->sas_re_init_req(pm8001_ha); 354 for (i = 0; i < pm8001_ha->chip->n_phy; ++i) { 355 pm8001_ha->phy[i].enable_completion = &completion; 356 PM8001_CHIP_DISP->phy_start_req(pm8001_ha, i); 357 wait_for_completion(&completion); 358 msleep(300); 359 } 360 } 361 362 int pm8001_scan_finished(struct Scsi_Host *shost, unsigned long time) 363 { 364 struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost); 365 366 /* give the phy enabling interrupt event time to come in (1s 367 * is empirically about all it takes) */ 368 if (time < HZ) 369 return 0; 370 /* Wait for discovery to finish */ 371 sas_drain_work(ha); 372 return 1; 373 } 374 375 /** 376 * pm8001_task_prep_smp - the dispatcher function, prepare data for smp task 377 * @pm8001_ha: our hba card information 378 * @ccb: the ccb which attached to smp task 379 */ 380 static int pm8001_task_prep_smp(struct pm8001_hba_info *pm8001_ha, 381 struct pm8001_ccb_info *ccb) 382 { 383 return PM8001_CHIP_DISP->smp_req(pm8001_ha, ccb); 384 } 385 386 u32 pm8001_get_ncq_tag(struct sas_task *task, u32 *tag) 387 { 388 struct ata_queued_cmd *qc = task->uldd_task; 389 390 if (qc && ata_is_ncq(qc->tf.protocol)) { 391 *tag = qc->tag; 392 return 1; 393 } 394 395 return 0; 396 } 397 398 /** 399 * pm8001_task_prep_ata - the dispatcher function, prepare data for sata task 400 * @pm8001_ha: our hba card information 401 * @ccb: the ccb which attached to sata task 402 */ 403 static int pm8001_task_prep_ata(struct pm8001_hba_info *pm8001_ha, 404 struct pm8001_ccb_info *ccb) 405 { 406 return PM8001_CHIP_DISP->sata_req(pm8001_ha, ccb); 407 } 408 409 /** 410 * pm8001_task_prep_internal_abort - the dispatcher function, prepare data 411 * for internal abort task 412 * @pm8001_ha: our hba card information 413 * @ccb: the ccb which attached to sata task 414 */ 415 static int pm8001_task_prep_internal_abort(struct pm8001_hba_info *pm8001_ha, 416 struct pm8001_ccb_info *ccb) 417 { 418 return PM8001_CHIP_DISP->task_abort(pm8001_ha, ccb); 419 } 420 421 /** 422 * pm8001_task_prep_ssp_tm - the dispatcher function, prepare task management data 423 * @pm8001_ha: our hba card information 424 * @ccb: the ccb which attached to TM 425 * @tmf: the task management IU 426 */ 427 static int pm8001_task_prep_ssp_tm(struct pm8001_hba_info *pm8001_ha, 428 struct pm8001_ccb_info *ccb, struct sas_tmf_task *tmf) 429 { 430 return PM8001_CHIP_DISP->ssp_tm_req(pm8001_ha, ccb, tmf); 431 } 432 433 /** 434 * pm8001_task_prep_ssp - the dispatcher function, prepare ssp data for ssp task 435 * @pm8001_ha: our hba card information 436 * @ccb: the ccb which attached to ssp task 437 */ 438 static int pm8001_task_prep_ssp(struct pm8001_hba_info *pm8001_ha, 439 struct pm8001_ccb_info *ccb) 440 { 441 return PM8001_CHIP_DISP->ssp_io_req(pm8001_ha, ccb); 442 } 443 444 #define DEV_IS_GONE(pm8001_dev) \ 445 ((!pm8001_dev || (pm8001_dev->dev_type == SAS_PHY_UNUSED))) 446 447 448 static int pm8001_deliver_command(struct pm8001_hba_info *pm8001_ha, 449 struct pm8001_ccb_info *ccb) 450 { 451 struct sas_task *task = ccb->task; 452 enum sas_protocol task_proto = task->task_proto; 453 struct sas_tmf_task *tmf = task->tmf; 454 int is_tmf = !!tmf; 455 456 switch (task_proto) { 457 case SAS_PROTOCOL_SMP: 458 return pm8001_task_prep_smp(pm8001_ha, ccb); 459 case SAS_PROTOCOL_SSP: 460 if (is_tmf) 461 return pm8001_task_prep_ssp_tm(pm8001_ha, ccb, tmf); 462 return pm8001_task_prep_ssp(pm8001_ha, ccb); 463 case SAS_PROTOCOL_SATA: 464 case SAS_PROTOCOL_STP: 465 return pm8001_task_prep_ata(pm8001_ha, ccb); 466 case SAS_PROTOCOL_INTERNAL_ABORT: 467 return pm8001_task_prep_internal_abort(pm8001_ha, ccb); 468 default: 469 dev_err(pm8001_ha->dev, "unknown sas_task proto: 0x%x\n", 470 task_proto); 471 } 472 473 return -EINVAL; 474 } 475 476 /** 477 * pm8001_queue_command - register for upper layer used, all IO commands sent 478 * to HBA are from this interface. 479 * @task: the task to be execute. 480 * @gfp_flags: gfp_flags 481 */ 482 int pm8001_queue_command(struct sas_task *task, gfp_t gfp_flags) 483 { 484 struct task_status_struct *ts = &task->task_status; 485 enum sas_protocol task_proto = task->task_proto; 486 struct domain_device *dev = task->dev; 487 struct pm8001_device *pm8001_dev = dev->lldd_dev; 488 bool internal_abort = sas_is_internal_abort(task); 489 struct pm8001_hba_info *pm8001_ha; 490 struct pm8001_port *port; 491 struct pm8001_ccb_info *ccb; 492 unsigned long flags; 493 u32 n_elem = 0; 494 int rc = 0; 495 496 if (!internal_abort && !dev->port) { 497 ts->resp = SAS_TASK_UNDELIVERED; 498 ts->stat = SAS_PHY_DOWN; 499 if (dev->dev_type != SAS_SATA_DEV) 500 task->task_done(task); 501 return 0; 502 } 503 504 pm8001_ha = pm8001_find_ha_by_dev(dev); 505 if (pm8001_ha->controller_fatal_error) { 506 ts->resp = SAS_TASK_UNDELIVERED; 507 task->task_done(task); 508 return 0; 509 } 510 511 pm8001_dbg(pm8001_ha, IO, "pm8001_task_exec device\n"); 512 513 spin_lock_irqsave(&pm8001_ha->lock, flags); 514 515 port = dev->port->lldd_port; 516 517 if (!internal_abort && 518 (DEV_IS_GONE(pm8001_dev) || !port || !port->port_attached)) { 519 ts->resp = SAS_TASK_UNDELIVERED; 520 ts->stat = SAS_PHY_DOWN; 521 if (sas_protocol_ata(task_proto)) { 522 spin_unlock_irqrestore(&pm8001_ha->lock, flags); 523 task->task_done(task); 524 spin_lock_irqsave(&pm8001_ha->lock, flags); 525 } else { 526 task->task_done(task); 527 } 528 rc = -ENODEV; 529 goto err_out; 530 } 531 532 ccb = pm8001_ccb_alloc(pm8001_ha, pm8001_dev, task); 533 if (!ccb) { 534 rc = -SAS_QUEUE_FULL; 535 goto err_out; 536 } 537 538 if (!sas_protocol_ata(task_proto)) { 539 if (task->num_scatter) { 540 n_elem = dma_map_sg(pm8001_ha->dev, task->scatter, 541 task->num_scatter, task->data_dir); 542 if (!n_elem) { 543 rc = -ENOMEM; 544 goto err_out_ccb; 545 } 546 } 547 } else { 548 n_elem = task->num_scatter; 549 } 550 551 task->lldd_task = ccb; 552 ccb->n_elem = n_elem; 553 554 atomic_inc(&pm8001_dev->running_req); 555 556 rc = pm8001_deliver_command(pm8001_ha, ccb); 557 if (rc) { 558 atomic_dec(&pm8001_dev->running_req); 559 if (!sas_protocol_ata(task_proto) && n_elem) 560 dma_unmap_sg(pm8001_ha->dev, task->scatter, 561 task->num_scatter, task->data_dir); 562 err_out_ccb: 563 pm8001_ccb_free(pm8001_ha, ccb); 564 565 err_out: 566 pm8001_dbg(pm8001_ha, IO, "pm8001_task_exec failed[%d]!\n", rc); 567 } 568 569 spin_unlock_irqrestore(&pm8001_ha->lock, flags); 570 571 return rc; 572 } 573 574 /** 575 * pm8001_ccb_task_free - free the sg for ssp and smp command, free the ccb. 576 * @pm8001_ha: our hba card information 577 * @ccb: the ccb which attached to ssp task to free 578 */ 579 void pm8001_ccb_task_free(struct pm8001_hba_info *pm8001_ha, 580 struct pm8001_ccb_info *ccb) 581 { 582 struct sas_task *task = ccb->task; 583 struct ata_queued_cmd *qc; 584 struct pm8001_device *pm8001_dev; 585 586 if (!task) 587 return; 588 589 if (!sas_protocol_ata(task->task_proto) && ccb->n_elem) 590 dma_unmap_sg(pm8001_ha->dev, task->scatter, 591 task->num_scatter, task->data_dir); 592 593 switch (task->task_proto) { 594 case SAS_PROTOCOL_SMP: 595 dma_unmap_sg(pm8001_ha->dev, &task->smp_task.smp_resp, 1, 596 DMA_FROM_DEVICE); 597 dma_unmap_sg(pm8001_ha->dev, &task->smp_task.smp_req, 1, 598 DMA_TO_DEVICE); 599 break; 600 601 case SAS_PROTOCOL_SATA: 602 case SAS_PROTOCOL_STP: 603 case SAS_PROTOCOL_SSP: 604 default: 605 /* do nothing */ 606 break; 607 } 608 609 if (sas_protocol_ata(task->task_proto)) { 610 /* For SCSI/ATA commands uldd_task points to ata_queued_cmd */ 611 qc = task->uldd_task; 612 pm8001_dev = ccb->device; 613 trace_pm80xx_request_complete(pm8001_ha->id, 614 pm8001_dev ? pm8001_dev->attached_phy : PM8001_MAX_PHYS, 615 ccb->ccb_tag, 0 /* ctlr_opcode not known */, 616 qc ? qc->tf.command : 0, // ata opcode 617 pm8001_dev ? atomic_read(&pm8001_dev->running_req) : -1); 618 } 619 620 task->lldd_task = NULL; 621 pm8001_ccb_free(pm8001_ha, ccb); 622 } 623 624 static void pm8001_init_dev(struct pm8001_device *pm8001_dev, int id) 625 { 626 pm8001_dev->id = id; 627 pm8001_dev->device_id = PM8001_MAX_DEVICES; 628 atomic_set(&pm8001_dev->running_req, 0); 629 } 630 631 /** 632 * pm8001_alloc_dev - find a empty pm8001_device 633 * @pm8001_ha: our hba card information 634 */ 635 static struct pm8001_device *pm8001_alloc_dev(struct pm8001_hba_info *pm8001_ha) 636 { 637 u32 dev; 638 for (dev = 0; dev < PM8001_MAX_DEVICES; dev++) { 639 struct pm8001_device *pm8001_dev = &pm8001_ha->devices[dev]; 640 641 if (pm8001_dev->dev_type == SAS_PHY_UNUSED) { 642 pm8001_init_dev(pm8001_dev, dev); 643 return pm8001_dev; 644 } 645 } 646 if (dev == PM8001_MAX_DEVICES) { 647 pm8001_dbg(pm8001_ha, FAIL, 648 "max support %d devices, ignore ..\n", 649 PM8001_MAX_DEVICES); 650 } 651 return NULL; 652 } 653 /** 654 * pm8001_find_dev - find a matching pm8001_device 655 * @pm8001_ha: our hba card information 656 * @device_id: device ID to match against 657 */ 658 struct pm8001_device *pm8001_find_dev(struct pm8001_hba_info *pm8001_ha, 659 u32 device_id) 660 { 661 u32 dev; 662 for (dev = 0; dev < PM8001_MAX_DEVICES; dev++) { 663 if (pm8001_ha->devices[dev].device_id == device_id) 664 return &pm8001_ha->devices[dev]; 665 } 666 if (dev == PM8001_MAX_DEVICES) { 667 pm8001_dbg(pm8001_ha, FAIL, "NO MATCHING DEVICE FOUND !!!\n"); 668 } 669 return NULL; 670 } 671 672 void pm8001_free_dev(struct pm8001_device *pm8001_dev) 673 { 674 memset(pm8001_dev, 0, sizeof(*pm8001_dev)); 675 pm8001_dev->dev_type = SAS_PHY_UNUSED; 676 pm8001_dev->device_id = PM8001_MAX_DEVICES; 677 pm8001_dev->sas_device = NULL; 678 } 679 680 /** 681 * pm8001_dev_found_notify - libsas notify a device is found. 682 * @dev: the device structure which sas layer used. 683 * 684 * when libsas find a sas domain device, it should tell the LLDD that 685 * device is found, and then LLDD register this device to HBA firmware 686 * by the command "OPC_INB_REG_DEV", after that the HBA will assign a 687 * device ID(according to device's sas address) and returned it to LLDD. From 688 * now on, we communicate with HBA FW with the device ID which HBA assigned 689 * rather than sas address. it is the necessary step for our HBA but it is 690 * the optional for other HBA driver. 691 */ 692 static int pm8001_dev_found_notify(struct domain_device *dev) 693 { 694 unsigned long flags = 0; 695 int res = 0; 696 struct pm8001_hba_info *pm8001_ha = NULL; 697 struct domain_device *parent_dev = dev->parent; 698 struct pm8001_device *pm8001_device; 699 DECLARE_COMPLETION_ONSTACK(completion); 700 u32 flag = 0; 701 pm8001_ha = pm8001_find_ha_by_dev(dev); 702 spin_lock_irqsave(&pm8001_ha->lock, flags); 703 704 pm8001_device = pm8001_alloc_dev(pm8001_ha); 705 if (!pm8001_device) { 706 res = -1; 707 goto found_out; 708 } 709 pm8001_device->sas_device = dev; 710 dev->lldd_dev = pm8001_device; 711 pm8001_device->dev_type = dev->dev_type; 712 pm8001_device->dcompletion = &completion; 713 if (dev_parent_is_expander(dev)) { 714 int phy_id; 715 716 phy_id = sas_find_attached_phy_id(&parent_dev->ex_dev, dev); 717 if (phy_id < 0) { 718 pm8001_dbg(pm8001_ha, FAIL, 719 "Error: no attached dev:%016llx at ex:%016llx.\n", 720 SAS_ADDR(dev->sas_addr), 721 SAS_ADDR(parent_dev->sas_addr)); 722 res = phy_id; 723 } else { 724 pm8001_device->attached_phy = phy_id; 725 } 726 } else { 727 if (dev->dev_type == SAS_SATA_DEV) { 728 pm8001_device->attached_phy = 729 dev->rphy->identify.phy_identifier; 730 flag = 1; /* directly sata */ 731 } 732 } /*register this device to HBA*/ 733 pm8001_dbg(pm8001_ha, DISC, "Found device\n"); 734 PM8001_CHIP_DISP->reg_dev_req(pm8001_ha, pm8001_device, flag); 735 spin_unlock_irqrestore(&pm8001_ha->lock, flags); 736 wait_for_completion(&completion); 737 if (dev->dev_type == SAS_END_DEVICE) 738 msleep(50); 739 pm8001_ha->flags = PM8001F_RUN_TIME; 740 return 0; 741 found_out: 742 spin_unlock_irqrestore(&pm8001_ha->lock, flags); 743 return res; 744 } 745 746 int pm8001_dev_found(struct domain_device *dev) 747 { 748 return pm8001_dev_found_notify(dev); 749 } 750 751 #define PM8001_TASK_TIMEOUT 20 752 753 /** 754 * pm8001_dev_gone_notify - see the comments for "pm8001_dev_found_notify" 755 * @dev: the device structure which sas layer used. 756 */ 757 static void pm8001_dev_gone_notify(struct domain_device *dev) 758 { 759 unsigned long flags = 0; 760 struct pm8001_hba_info *pm8001_ha; 761 struct pm8001_device *pm8001_dev = dev->lldd_dev; 762 763 pm8001_ha = pm8001_find_ha_by_dev(dev); 764 spin_lock_irqsave(&pm8001_ha->lock, flags); 765 if (pm8001_dev) { 766 u32 device_id = pm8001_dev->device_id; 767 768 pm8001_dbg(pm8001_ha, DISC, "found dev[%d:%x] is gone.\n", 769 pm8001_dev->device_id, pm8001_dev->dev_type); 770 if (atomic_read(&pm8001_dev->running_req)) { 771 spin_unlock_irqrestore(&pm8001_ha->lock, flags); 772 sas_execute_internal_abort_dev(dev, 0, NULL); 773 while (atomic_read(&pm8001_dev->running_req)) 774 msleep(20); 775 spin_lock_irqsave(&pm8001_ha->lock, flags); 776 } 777 PM8001_CHIP_DISP->dereg_dev_req(pm8001_ha, device_id); 778 779 /* 780 * The phy array only contains local phys. Thus, we cannot clear 781 * phy_attached for a device behind an expander. 782 */ 783 if (!dev_parent_is_expander(dev)) { 784 u32 phy_id = pm80xx_get_local_phy_id(dev); 785 786 pm8001_ha->phy[phy_id].phy_attached = 0; 787 } 788 pm8001_free_dev(pm8001_dev); 789 } else { 790 pm8001_dbg(pm8001_ha, DISC, "Found dev has gone.\n"); 791 } 792 dev->lldd_dev = NULL; 793 spin_unlock_irqrestore(&pm8001_ha->lock, flags); 794 } 795 796 void pm8001_dev_gone(struct domain_device *dev) 797 { 798 pm8001_dev_gone_notify(dev); 799 } 800 801 /* retry commands by ha, by task and/or by device */ 802 void pm8001_open_reject_retry( 803 struct pm8001_hba_info *pm8001_ha, 804 struct sas_task *task_to_close, 805 struct pm8001_device *device_to_close) 806 { 807 int i; 808 unsigned long flags; 809 810 if (pm8001_ha == NULL) 811 return; 812 813 spin_lock_irqsave(&pm8001_ha->lock, flags); 814 815 for (i = 0; i < PM8001_MAX_CCB; i++) { 816 struct sas_task *task; 817 struct task_status_struct *ts; 818 struct pm8001_device *pm8001_dev; 819 unsigned long flags1; 820 struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[i]; 821 822 if (ccb->ccb_tag == PM8001_INVALID_TAG) 823 continue; 824 825 pm8001_dev = ccb->device; 826 if (!pm8001_dev || (pm8001_dev->dev_type == SAS_PHY_UNUSED)) 827 continue; 828 if (!device_to_close) { 829 uintptr_t d = (uintptr_t)pm8001_dev 830 - (uintptr_t)&pm8001_ha->devices; 831 if (((d % sizeof(*pm8001_dev)) != 0) 832 || ((d / sizeof(*pm8001_dev)) >= PM8001_MAX_DEVICES)) 833 continue; 834 } else if (pm8001_dev != device_to_close) 835 continue; 836 task = ccb->task; 837 if (!task || !task->task_done) 838 continue; 839 if (task_to_close && (task != task_to_close)) 840 continue; 841 ts = &task->task_status; 842 ts->resp = SAS_TASK_COMPLETE; 843 /* Force the midlayer to retry */ 844 ts->stat = SAS_OPEN_REJECT; 845 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; 846 if (pm8001_dev) 847 atomic_dec(&pm8001_dev->running_req); 848 spin_lock_irqsave(&task->task_state_lock, flags1); 849 task->task_state_flags &= ~SAS_TASK_STATE_PENDING; 850 task->task_state_flags |= SAS_TASK_STATE_DONE; 851 if (unlikely((task->task_state_flags 852 & SAS_TASK_STATE_ABORTED))) { 853 spin_unlock_irqrestore(&task->task_state_lock, 854 flags1); 855 pm8001_ccb_task_free(pm8001_ha, ccb); 856 } else { 857 spin_unlock_irqrestore(&task->task_state_lock, 858 flags1); 859 pm8001_ccb_task_free(pm8001_ha, ccb); 860 mb();/* in order to force CPU ordering */ 861 spin_unlock_irqrestore(&pm8001_ha->lock, flags); 862 task->task_done(task); 863 spin_lock_irqsave(&pm8001_ha->lock, flags); 864 } 865 } 866 867 spin_unlock_irqrestore(&pm8001_ha->lock, flags); 868 } 869 870 /** 871 * pm8001_I_T_nexus_reset() - reset the initiator/target connection 872 * @dev: the device structure for the device to reset. 873 * 874 * Standard mandates link reset for ATA (type 0) and hard reset for 875 * SSP (type 1), only for RECOVERY 876 */ 877 int pm8001_I_T_nexus_reset(struct domain_device *dev) 878 { 879 int rc = TMF_RESP_FUNC_FAILED; 880 struct pm8001_device *pm8001_dev; 881 struct pm8001_hba_info *pm8001_ha; 882 struct sas_phy *phy; 883 884 if (!dev || !dev->lldd_dev) 885 return -ENODEV; 886 887 pm8001_dev = dev->lldd_dev; 888 pm8001_ha = pm8001_find_ha_by_dev(dev); 889 phy = sas_get_local_phy(dev); 890 891 if (dev_is_sata(dev)) { 892 if (scsi_is_sas_phy_local(phy)) { 893 rc = 0; 894 goto out; 895 } 896 rc = sas_phy_reset(phy, 1); 897 if (rc) { 898 pm8001_dbg(pm8001_ha, EH, 899 "phy reset failed for device %x\n" 900 "with rc %d\n", pm8001_dev->device_id, rc); 901 rc = TMF_RESP_FUNC_FAILED; 902 goto out; 903 } 904 msleep(2000); 905 rc = sas_execute_internal_abort_dev(dev, 0, NULL); 906 if (rc) { 907 pm8001_dbg(pm8001_ha, EH, "task abort failed %x\n" 908 "with rc %d\n", pm8001_dev->device_id, rc); 909 rc = TMF_RESP_FUNC_FAILED; 910 } 911 } else { 912 rc = sas_phy_reset(phy, 1); 913 msleep(2000); 914 } 915 pm8001_dbg(pm8001_ha, EH, " for device[%x]:rc=%d\n", 916 pm8001_dev->device_id, rc); 917 out: 918 sas_put_local_phy(phy); 919 return rc; 920 } 921 922 /* 923 * This function handle the IT_NEXUS_XXX event or completion 924 * status code for SSP/SATA/SMP I/O request. 925 */ 926 int pm8001_I_T_nexus_event_handler(struct domain_device *dev) 927 { 928 int rc = TMF_RESP_FUNC_FAILED; 929 struct pm8001_device *pm8001_dev; 930 struct pm8001_hba_info *pm8001_ha; 931 struct sas_phy *phy; 932 933 if (!dev || !dev->lldd_dev) 934 return -1; 935 936 pm8001_dev = dev->lldd_dev; 937 pm8001_ha = pm8001_find_ha_by_dev(dev); 938 939 pm8001_dbg(pm8001_ha, EH, "I_T_Nexus handler invoked !!\n"); 940 941 phy = sas_get_local_phy(dev); 942 943 if (dev_is_sata(dev)) { 944 DECLARE_COMPLETION_ONSTACK(completion_setstate); 945 if (scsi_is_sas_phy_local(phy)) { 946 rc = 0; 947 goto out; 948 } 949 /* send internal ssp/sata/smp abort command to FW */ 950 sas_execute_internal_abort_dev(dev, 0, NULL); 951 msleep(100); 952 953 /* deregister the target device */ 954 pm8001_dev_gone_notify(dev); 955 msleep(200); 956 957 /*send phy reset to hard reset target */ 958 rc = sas_phy_reset(phy, 1); 959 msleep(2000); 960 pm8001_dev->setds_completion = &completion_setstate; 961 962 wait_for_completion(&completion_setstate); 963 } else { 964 /* send internal ssp/sata/smp abort command to FW */ 965 sas_execute_internal_abort_dev(dev, 0, NULL); 966 msleep(100); 967 968 /* deregister the target device */ 969 pm8001_dev_gone_notify(dev); 970 msleep(200); 971 972 /*send phy reset to hard reset target */ 973 rc = sas_phy_reset(phy, 1); 974 msleep(2000); 975 } 976 pm8001_dbg(pm8001_ha, EH, " for device[%x]:rc=%d\n", 977 pm8001_dev->device_id, rc); 978 out: 979 sas_put_local_phy(phy); 980 981 return rc; 982 } 983 /* mandatory SAM-3, the task reset the specified LUN*/ 984 int pm8001_lu_reset(struct domain_device *dev, u8 *lun) 985 { 986 int rc = TMF_RESP_FUNC_FAILED; 987 struct pm8001_device *pm8001_dev = dev->lldd_dev; 988 struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev); 989 DECLARE_COMPLETION_ONSTACK(completion_setstate); 990 991 if (PM8001_CHIP_DISP->fatal_errors(pm8001_ha)) { 992 /* 993 * If the controller is in fatal error state, 994 * we will not get a response from the controller 995 */ 996 pm8001_dbg(pm8001_ha, FAIL, 997 "LUN reset failed due to fatal errors\n"); 998 return rc; 999 } 1000 1001 if (dev_is_sata(dev)) { 1002 struct sas_phy *phy = sas_get_local_phy(dev); 1003 sas_execute_internal_abort_dev(dev, 0, NULL); 1004 rc = sas_phy_reset(phy, 1); 1005 sas_put_local_phy(phy); 1006 pm8001_dev->setds_completion = &completion_setstate; 1007 rc = PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha, 1008 pm8001_dev, DS_OPERATIONAL); 1009 wait_for_completion(&completion_setstate); 1010 } else { 1011 rc = sas_lu_reset(dev, lun); 1012 } 1013 /* If failed, fall-through I_T_Nexus reset */ 1014 pm8001_dbg(pm8001_ha, EH, "for device[%x]:rc=%d\n", 1015 pm8001_dev->device_id, rc); 1016 return rc; 1017 } 1018 1019 /* optional SAM-3 */ 1020 int pm8001_query_task(struct sas_task *task) 1021 { 1022 u32 tag = 0xdeadbeef; 1023 int rc = TMF_RESP_FUNC_FAILED; 1024 if (unlikely(!task || !task->lldd_task || !task->dev)) 1025 return rc; 1026 1027 if (task->task_proto & SAS_PROTOCOL_SSP) { 1028 struct scsi_cmnd *cmnd = task->uldd_task; 1029 struct domain_device *dev = task->dev; 1030 struct pm8001_hba_info *pm8001_ha = 1031 pm8001_find_ha_by_dev(dev); 1032 1033 rc = pm8001_find_tag(task, &tag); 1034 if (rc == 0) { 1035 rc = TMF_RESP_FUNC_FAILED; 1036 return rc; 1037 } 1038 pm8001_dbg(pm8001_ha, EH, "Query:[%16ph]\n", cmnd->cmnd); 1039 1040 rc = sas_query_task(task, tag); 1041 switch (rc) { 1042 /* The task is still in Lun, release it then */ 1043 case TMF_RESP_FUNC_SUCC: 1044 pm8001_dbg(pm8001_ha, EH, 1045 "The task is still in Lun\n"); 1046 break; 1047 /* The task is not in Lun or failed, reset the phy */ 1048 case TMF_RESP_FUNC_FAILED: 1049 case TMF_RESP_FUNC_COMPLETE: 1050 pm8001_dbg(pm8001_ha, EH, 1051 "The task is not in Lun or failed, reset the phy\n"); 1052 break; 1053 } 1054 } 1055 pr_err("pm80xx: rc= %d\n", rc); 1056 return rc; 1057 } 1058 1059 /* mandatory SAM-3, still need free task/ccb info, abort the specified task */ 1060 int pm8001_abort_task(struct sas_task *task) 1061 { 1062 struct pm8001_ccb_info *ccb = task->lldd_task; 1063 unsigned long flags; 1064 u32 tag; 1065 struct domain_device *dev ; 1066 struct pm8001_hba_info *pm8001_ha; 1067 struct pm8001_device *pm8001_dev; 1068 int rc = TMF_RESP_FUNC_FAILED, ret; 1069 u32 port_id; 1070 struct sas_task_slow slow_task; 1071 1072 if (!task->lldd_task || !task->dev) 1073 return TMF_RESP_FUNC_FAILED; 1074 1075 dev = task->dev; 1076 pm8001_dev = dev->lldd_dev; 1077 pm8001_ha = pm8001_find_ha_by_dev(dev); 1078 1079 if (PM8001_CHIP_DISP->fatal_errors(pm8001_ha)) { 1080 // If the controller is seeing fatal errors 1081 // abort task will not get a response from the controller 1082 return TMF_RESP_FUNC_FAILED; 1083 } 1084 1085 ret = pm8001_find_tag(task, &tag); 1086 if (ret == 0) { 1087 pm8001_info(pm8001_ha, "no tag for task:%p\n", task); 1088 return TMF_RESP_FUNC_FAILED; 1089 } 1090 spin_lock_irqsave(&task->task_state_lock, flags); 1091 if (task->task_state_flags & SAS_TASK_STATE_DONE) { 1092 spin_unlock_irqrestore(&task->task_state_lock, flags); 1093 return TMF_RESP_FUNC_COMPLETE; 1094 } 1095 task->task_state_flags |= SAS_TASK_STATE_ABORTED; 1096 if (task->slow_task == NULL) { 1097 init_completion(&slow_task.completion); 1098 task->slow_task = &slow_task; 1099 } 1100 spin_unlock_irqrestore(&task->task_state_lock, flags); 1101 if (task->task_proto & SAS_PROTOCOL_SSP) { 1102 rc = sas_abort_task(task, tag); 1103 sas_execute_internal_abort_single(dev, tag, 0, NULL); 1104 } else if (task->task_proto & SAS_PROTOCOL_SATA || 1105 task->task_proto & SAS_PROTOCOL_STP) { 1106 if (pm8001_ha->chip_id == chip_8006) { 1107 DECLARE_COMPLETION_ONSTACK(completion_reset); 1108 DECLARE_COMPLETION_ONSTACK(completion); 1109 u32 phy_id = pm80xx_get_local_phy_id(dev); 1110 struct pm8001_phy *phy = &pm8001_ha->phy[phy_id]; 1111 port_id = phy->port->port_id; 1112 1113 /* 1. Set Device state as Recovery */ 1114 pm8001_dev->setds_completion = &completion; 1115 PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha, 1116 pm8001_dev, DS_IN_RECOVERY); 1117 wait_for_completion(&completion); 1118 1119 /* 2. Send Phy Control Hard Reset */ 1120 reinit_completion(&completion); 1121 phy->port_reset_status = PORT_RESET_TMO; 1122 phy->reset_success = false; 1123 phy->enable_completion = &completion; 1124 phy->reset_completion = &completion_reset; 1125 ret = PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id, 1126 PHY_HARD_RESET); 1127 if (ret) { 1128 phy->enable_completion = NULL; 1129 phy->reset_completion = NULL; 1130 goto out; 1131 } 1132 1133 /* In the case of the reset timeout/fail we still 1134 * abort the command at the firmware. The assumption 1135 * here is that the drive is off doing something so 1136 * that it's not processing requests, and we want to 1137 * avoid getting a completion for this and either 1138 * leaking the task in libsas or losing the race and 1139 * getting a double free. 1140 */ 1141 pm8001_dbg(pm8001_ha, MSG, 1142 "Waiting for local phy ctl\n"); 1143 ret = wait_for_completion_timeout(&completion, 1144 PM8001_TASK_TIMEOUT * HZ); 1145 if (!ret || !phy->reset_success) { 1146 phy->enable_completion = NULL; 1147 phy->reset_completion = NULL; 1148 } else { 1149 /* 3. Wait for Port Reset complete or 1150 * Port reset TMO 1151 */ 1152 pm8001_dbg(pm8001_ha, MSG, 1153 "Waiting for Port reset\n"); 1154 ret = wait_for_completion_timeout( 1155 &completion_reset, 1156 PM8001_TASK_TIMEOUT * HZ); 1157 if (!ret) 1158 phy->reset_completion = NULL; 1159 WARN_ON(phy->port_reset_status == 1160 PORT_RESET_TMO); 1161 if (phy->port_reset_status == PORT_RESET_TMO) { 1162 pm8001_dev_gone_notify(dev); 1163 PM8001_CHIP_DISP->hw_event_ack_req( 1164 pm8001_ha, 0, 1165 0x07, /*HW_EVENT_PHY_DOWN ack*/ 1166 port_id, phy_id, 0, 0); 1167 goto out; 1168 } 1169 } 1170 1171 /* 1172 * 4. SATA Abort ALL 1173 * we wait for the task to be aborted so that the task 1174 * is removed from the ccb. on success the caller is 1175 * going to free the task. 1176 */ 1177 ret = sas_execute_internal_abort_dev(dev, 0, NULL); 1178 if (ret) 1179 goto out; 1180 ret = wait_for_completion_timeout( 1181 &task->slow_task->completion, 1182 PM8001_TASK_TIMEOUT * HZ); 1183 if (!ret) 1184 goto out; 1185 1186 /* 5. Set Device State as Operational */ 1187 reinit_completion(&completion); 1188 pm8001_dev->setds_completion = &completion; 1189 PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha, 1190 pm8001_dev, DS_OPERATIONAL); 1191 wait_for_completion(&completion); 1192 } else { 1193 /* 1194 * Ensure that if we see a completion for the ccb 1195 * associated with the task which we are trying to 1196 * abort then we should not touch the sas_task as it 1197 * may race with libsas freeing it when return here. 1198 */ 1199 ccb->task = NULL; 1200 ret = sas_execute_internal_abort_single(dev, tag, 0, NULL); 1201 } 1202 rc = TMF_RESP_FUNC_COMPLETE; 1203 } else if (task->task_proto & SAS_PROTOCOL_SMP) { 1204 /* SMP */ 1205 rc = sas_execute_internal_abort_single(dev, tag, 0, NULL); 1206 1207 } 1208 out: 1209 spin_lock_irqsave(&task->task_state_lock, flags); 1210 if (task->slow_task == &slow_task) 1211 task->slow_task = NULL; 1212 spin_unlock_irqrestore(&task->task_state_lock, flags); 1213 if (rc != TMF_RESP_FUNC_COMPLETE) 1214 pm8001_info(pm8001_ha, "rc= %d\n", rc); 1215 return rc; 1216 } 1217 1218 int pm8001_clear_task_set(struct domain_device *dev, u8 *lun) 1219 { 1220 struct pm8001_device *pm8001_dev = dev->lldd_dev; 1221 struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev); 1222 1223 pm8001_dbg(pm8001_ha, EH, "I_T_L_Q clear task set[%x]\n", 1224 pm8001_dev->device_id); 1225 return sas_clear_task_set(dev, lun); 1226 } 1227 1228 void pm8001_port_formed(struct asd_sas_phy *sas_phy) 1229 { 1230 struct sas_ha_struct *sas_ha = sas_phy->ha; 1231 struct pm8001_hba_info *pm8001_ha = sas_ha->lldd_ha; 1232 struct pm8001_phy *phy = sas_phy->lldd_phy; 1233 struct asd_sas_port *sas_port = sas_phy->port; 1234 struct pm8001_port *port = phy->port; 1235 1236 if (!sas_port) { 1237 pm8001_dbg(pm8001_ha, FAIL, "Received null port\n"); 1238 return; 1239 } 1240 sas_port->lldd_port = port; 1241 } 1242 1243 void pm8001_setds_completion(struct domain_device *dev) 1244 { 1245 struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev); 1246 struct pm8001_device *pm8001_dev = dev->lldd_dev; 1247 DECLARE_COMPLETION_ONSTACK(completion_setstate); 1248 1249 if (pm8001_ha->chip_id != chip_8001) { 1250 pm8001_dev->setds_completion = &completion_setstate; 1251 PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha, 1252 pm8001_dev, DS_OPERATIONAL); 1253 wait_for_completion(&completion_setstate); 1254 } 1255 } 1256 1257 void pm8001_tmf_aborted(struct sas_task *task) 1258 { 1259 struct pm8001_ccb_info *ccb = task->lldd_task; 1260 1261 if (ccb) 1262 ccb->task = NULL; 1263 } 1264