1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Marvell 88SE64xx/88SE94xx main function 4 * 5 * Copyright 2007 Red Hat, Inc. 6 * Copyright 2008 Marvell. <kewei@marvell.com> 7 * Copyright 2009-2011 Marvell. <yuxiangl@marvell.com> 8 */ 9 10 #include "mv_sas.h" 11 12 static int mvs_find_tag(struct mvs_info *mvi, struct sas_task *task, u32 *tag) 13 { 14 if (task->lldd_task) { 15 struct mvs_slot_info *slot; 16 slot = task->lldd_task; 17 *tag = slot->slot_tag; 18 return 1; 19 } 20 return 0; 21 } 22 23 void mvs_tag_clear(struct mvs_info *mvi, u32 tag) 24 { 25 void *bitmap = mvi->tags; 26 clear_bit(tag, bitmap); 27 } 28 29 void mvs_tag_free(struct mvs_info *mvi, u32 tag) 30 { 31 mvs_tag_clear(mvi, tag); 32 } 33 34 void mvs_tag_set(struct mvs_info *mvi, unsigned int tag) 35 { 36 void *bitmap = mvi->tags; 37 set_bit(tag, bitmap); 38 } 39 40 inline int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out) 41 { 42 unsigned int index, tag; 43 void *bitmap = mvi->tags; 44 45 index = find_first_zero_bit(bitmap, mvi->tags_num); 46 tag = index; 47 if (tag >= mvi->tags_num) 48 return -SAS_QUEUE_FULL; 49 mvs_tag_set(mvi, tag); 50 *tag_out = tag; 51 return 0; 52 } 53 54 void mvs_tag_init(struct mvs_info *mvi) 55 { 56 int i; 57 for (i = 0; i < mvi->tags_num; ++i) 58 mvs_tag_clear(mvi, i); 59 } 60 61 static struct mvs_info *mvs_find_dev_mvi(struct domain_device *dev) 62 { 63 unsigned long i = 0, j = 0, hi = 0; 64 struct sas_ha_struct *sha = dev->port->ha; 65 struct mvs_info *mvi = NULL; 66 struct asd_sas_phy *phy; 67 68 while (sha->sas_port[i]) { 69 if (sha->sas_port[i] == dev->port) { 70 phy = container_of(sha->sas_port[i]->phy_list.next, 71 struct asd_sas_phy, port_phy_el); 72 j = 0; 73 while (sha->sas_phy[j]) { 74 if (sha->sas_phy[j] == phy) 75 break; 76 j++; 77 } 78 break; 79 } 80 i++; 81 } 82 hi = j/((struct mvs_prv_info *)sha->lldd_ha)->n_phy; 83 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[hi]; 84 85 return mvi; 86 87 } 88 89 static int mvs_find_dev_phyno(struct domain_device *dev, int *phyno) 90 { 91 unsigned long i = 0, j = 0, n = 0, num = 0; 92 struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev; 93 struct mvs_info *mvi = mvi_dev->mvi_info; 94 struct sas_ha_struct *sha = dev->port->ha; 95 96 while (sha->sas_port[i]) { 97 if (sha->sas_port[i] == dev->port) { 98 struct asd_sas_phy *phy; 99 list_for_each_entry(phy, 100 &sha->sas_port[i]->phy_list, port_phy_el) { 101 j = 0; 102 while (sha->sas_phy[j]) { 103 if (sha->sas_phy[j] == phy) 104 break; 105 j++; 106 } 107 phyno[n] = (j >= mvi->chip->n_phy) ? 108 (j - mvi->chip->n_phy) : j; 109 num++; 110 n++; 111 } 112 break; 113 } 114 i++; 115 } 116 return num; 117 } 118 119 struct mvs_device *mvs_find_dev_by_reg_set(struct mvs_info *mvi, 120 u8 reg_set) 121 { 122 u32 dev_no; 123 for (dev_no = 0; dev_no < MVS_MAX_DEVICES; dev_no++) { 124 if (mvi->devices[dev_no].taskfileset == MVS_ID_NOT_MAPPED) 125 continue; 126 127 if (mvi->devices[dev_no].taskfileset == reg_set) 128 return &mvi->devices[dev_no]; 129 } 130 return NULL; 131 } 132 133 static inline void mvs_free_reg_set(struct mvs_info *mvi, 134 struct mvs_device *dev) 135 { 136 if (!dev) { 137 mv_printk("device has been free.\n"); 138 return; 139 } 140 if (dev->taskfileset == MVS_ID_NOT_MAPPED) 141 return; 142 MVS_CHIP_DISP->free_reg_set(mvi, &dev->taskfileset); 143 } 144 145 static inline u8 mvs_assign_reg_set(struct mvs_info *mvi, 146 struct mvs_device *dev) 147 { 148 if (dev->taskfileset != MVS_ID_NOT_MAPPED) 149 return 0; 150 return MVS_CHIP_DISP->assign_reg_set(mvi, &dev->taskfileset); 151 } 152 153 void mvs_phys_reset(struct mvs_info *mvi, u32 phy_mask, int hard) 154 { 155 u32 no; 156 for_each_phy(phy_mask, phy_mask, no) { 157 if (!(phy_mask & 1)) 158 continue; 159 MVS_CHIP_DISP->phy_reset(mvi, no, hard); 160 } 161 } 162 163 int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, 164 void *funcdata) 165 { 166 int rc = 0, phy_id = sas_phy->id; 167 u32 tmp, i = 0, hi; 168 struct sas_ha_struct *sha = sas_phy->ha; 169 struct mvs_info *mvi = NULL; 170 171 while (sha->sas_phy[i]) { 172 if (sha->sas_phy[i] == sas_phy) 173 break; 174 i++; 175 } 176 hi = i/((struct mvs_prv_info *)sha->lldd_ha)->n_phy; 177 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[hi]; 178 179 switch (func) { 180 case PHY_FUNC_SET_LINK_RATE: 181 MVS_CHIP_DISP->phy_set_link_rate(mvi, phy_id, funcdata); 182 break; 183 184 case PHY_FUNC_HARD_RESET: 185 tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, phy_id); 186 if (tmp & PHY_RST_HARD) 187 break; 188 MVS_CHIP_DISP->phy_reset(mvi, phy_id, MVS_HARD_RESET); 189 break; 190 191 case PHY_FUNC_LINK_RESET: 192 MVS_CHIP_DISP->phy_enable(mvi, phy_id); 193 MVS_CHIP_DISP->phy_reset(mvi, phy_id, MVS_SOFT_RESET); 194 break; 195 196 case PHY_FUNC_DISABLE: 197 MVS_CHIP_DISP->phy_disable(mvi, phy_id); 198 break; 199 case PHY_FUNC_RELEASE_SPINUP_HOLD: 200 default: 201 rc = -ENOSYS; 202 } 203 msleep(200); 204 return rc; 205 } 206 207 void mvs_set_sas_addr(struct mvs_info *mvi, int port_id, u32 off_lo, 208 u32 off_hi, u64 sas_addr) 209 { 210 u32 lo = (u32)sas_addr; 211 u32 hi = (u32)(sas_addr>>32); 212 213 MVS_CHIP_DISP->write_port_cfg_addr(mvi, port_id, off_lo); 214 MVS_CHIP_DISP->write_port_cfg_data(mvi, port_id, lo); 215 MVS_CHIP_DISP->write_port_cfg_addr(mvi, port_id, off_hi); 216 MVS_CHIP_DISP->write_port_cfg_data(mvi, port_id, hi); 217 } 218 219 static void mvs_bytes_dmaed(struct mvs_info *mvi, int i) 220 { 221 struct mvs_phy *phy = &mvi->phy[i]; 222 struct asd_sas_phy *sas_phy = &phy->sas_phy; 223 struct sas_ha_struct *sas_ha; 224 if (!phy->phy_attached) 225 return; 226 227 if (!(phy->att_dev_info & PORT_DEV_TRGT_MASK) 228 && phy->phy_type & PORT_TYPE_SAS) { 229 return; 230 } 231 232 sas_ha = mvi->sas; 233 sas_ha->notify_phy_event(sas_phy, PHYE_OOB_DONE); 234 235 if (sas_phy->phy) { 236 struct sas_phy *sphy = sas_phy->phy; 237 238 sphy->negotiated_linkrate = sas_phy->linkrate; 239 sphy->minimum_linkrate = phy->minimum_linkrate; 240 sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS; 241 sphy->maximum_linkrate = phy->maximum_linkrate; 242 sphy->maximum_linkrate_hw = MVS_CHIP_DISP->phy_max_link_rate(); 243 } 244 245 if (phy->phy_type & PORT_TYPE_SAS) { 246 struct sas_identify_frame *id; 247 248 id = (struct sas_identify_frame *)phy->frame_rcvd; 249 id->dev_type = phy->identify.device_type; 250 id->initiator_bits = SAS_PROTOCOL_ALL; 251 id->target_bits = phy->identify.target_port_protocols; 252 253 /* direct attached SAS device */ 254 if (phy->att_dev_info & PORT_SSP_TRGT_MASK) { 255 MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_PHY_STAT); 256 MVS_CHIP_DISP->write_port_cfg_data(mvi, i, 0x00); 257 } 258 } else if (phy->phy_type & PORT_TYPE_SATA) { 259 /*Nothing*/ 260 } 261 mv_dprintk("phy %d byte dmaded.\n", i + mvi->id * mvi->chip->n_phy); 262 263 sas_phy->frame_rcvd_size = phy->frame_rcvd_size; 264 265 mvi->sas->notify_port_event(sas_phy, 266 PORTE_BYTES_DMAED); 267 } 268 269 void mvs_scan_start(struct Scsi_Host *shost) 270 { 271 int i, j; 272 unsigned short core_nr; 273 struct mvs_info *mvi; 274 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); 275 struct mvs_prv_info *mvs_prv = sha->lldd_ha; 276 277 core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host; 278 279 for (j = 0; j < core_nr; j++) { 280 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[j]; 281 for (i = 0; i < mvi->chip->n_phy; ++i) 282 mvs_bytes_dmaed(mvi, i); 283 } 284 mvs_prv->scan_finished = 1; 285 } 286 287 int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time) 288 { 289 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); 290 struct mvs_prv_info *mvs_prv = sha->lldd_ha; 291 292 if (mvs_prv->scan_finished == 0) 293 return 0; 294 295 sas_drain_work(sha); 296 return 1; 297 } 298 299 static int mvs_task_prep_smp(struct mvs_info *mvi, 300 struct mvs_task_exec_info *tei) 301 { 302 int elem, rc, i; 303 struct sas_ha_struct *sha = mvi->sas; 304 struct sas_task *task = tei->task; 305 struct mvs_cmd_hdr *hdr = tei->hdr; 306 struct domain_device *dev = task->dev; 307 struct asd_sas_port *sas_port = dev->port; 308 struct sas_phy *sphy = dev->phy; 309 struct asd_sas_phy *sas_phy = sha->sas_phy[sphy->number]; 310 struct scatterlist *sg_req, *sg_resp; 311 u32 req_len, resp_len, tag = tei->tag; 312 void *buf_tmp; 313 u8 *buf_oaf; 314 dma_addr_t buf_tmp_dma; 315 void *buf_prd; 316 struct mvs_slot_info *slot = &mvi->slot_info[tag]; 317 u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT); 318 319 /* 320 * DMA-map SMP request, response buffers 321 */ 322 sg_req = &task->smp_task.smp_req; 323 elem = dma_map_sg(mvi->dev, sg_req, 1, DMA_TO_DEVICE); 324 if (!elem) 325 return -ENOMEM; 326 req_len = sg_dma_len(sg_req); 327 328 sg_resp = &task->smp_task.smp_resp; 329 elem = dma_map_sg(mvi->dev, sg_resp, 1, DMA_FROM_DEVICE); 330 if (!elem) { 331 rc = -ENOMEM; 332 goto err_out; 333 } 334 resp_len = SB_RFB_MAX; 335 336 /* must be in dwords */ 337 if ((req_len & 0x3) || (resp_len & 0x3)) { 338 rc = -EINVAL; 339 goto err_out_2; 340 } 341 342 /* 343 * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs 344 */ 345 346 /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ***** */ 347 buf_tmp = slot->buf; 348 buf_tmp_dma = slot->buf_dma; 349 350 hdr->cmd_tbl = cpu_to_le64(sg_dma_address(sg_req)); 351 352 /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */ 353 buf_oaf = buf_tmp; 354 hdr->open_frame = cpu_to_le64(buf_tmp_dma); 355 356 buf_tmp += MVS_OAF_SZ; 357 buf_tmp_dma += MVS_OAF_SZ; 358 359 /* region 3: PRD table *********************************** */ 360 buf_prd = buf_tmp; 361 if (tei->n_elem) 362 hdr->prd_tbl = cpu_to_le64(buf_tmp_dma); 363 else 364 hdr->prd_tbl = 0; 365 366 i = MVS_CHIP_DISP->prd_size() * tei->n_elem; 367 buf_tmp += i; 368 buf_tmp_dma += i; 369 370 /* region 4: status buffer (larger the PRD, smaller this buf) ****** */ 371 slot->response = buf_tmp; 372 hdr->status_buf = cpu_to_le64(buf_tmp_dma); 373 if (mvi->flags & MVF_FLAG_SOC) 374 hdr->reserved[0] = 0; 375 376 /* 377 * Fill in TX ring and command slot header 378 */ 379 slot->tx = mvi->tx_prod; 380 mvi->tx[mvi->tx_prod] = cpu_to_le32((TXQ_CMD_SMP << TXQ_CMD_SHIFT) | 381 TXQ_MODE_I | tag | 382 (MVS_PHY_ID << TXQ_PHY_SHIFT)); 383 384 hdr->flags |= flags; 385 hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | ((req_len - 4) / 4)); 386 hdr->tags = cpu_to_le32(tag); 387 hdr->data_len = 0; 388 389 /* generate open address frame hdr (first 12 bytes) */ 390 /* initiator, SMP, ftype 1h */ 391 buf_oaf[0] = (1 << 7) | (PROTOCOL_SMP << 4) | 0x01; 392 buf_oaf[1] = min(sas_port->linkrate, dev->linkrate) & 0xf; 393 *(u16 *)(buf_oaf + 2) = 0xFFFF; /* SAS SPEC */ 394 memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE); 395 396 /* fill in PRD (scatter/gather) table, if any */ 397 MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd); 398 399 return 0; 400 401 err_out_2: 402 dma_unmap_sg(mvi->dev, &tei->task->smp_task.smp_resp, 1, 403 DMA_FROM_DEVICE); 404 err_out: 405 dma_unmap_sg(mvi->dev, &tei->task->smp_task.smp_req, 1, 406 DMA_TO_DEVICE); 407 return rc; 408 } 409 410 static u32 mvs_get_ncq_tag(struct sas_task *task, u32 *tag) 411 { 412 struct ata_queued_cmd *qc = task->uldd_task; 413 414 if (qc) { 415 if (qc->tf.command == ATA_CMD_FPDMA_WRITE || 416 qc->tf.command == ATA_CMD_FPDMA_READ || 417 qc->tf.command == ATA_CMD_FPDMA_RECV || 418 qc->tf.command == ATA_CMD_FPDMA_SEND || 419 qc->tf.command == ATA_CMD_NCQ_NON_DATA) { 420 *tag = qc->tag; 421 return 1; 422 } 423 } 424 425 return 0; 426 } 427 428 static int mvs_task_prep_ata(struct mvs_info *mvi, 429 struct mvs_task_exec_info *tei) 430 { 431 struct sas_task *task = tei->task; 432 struct domain_device *dev = task->dev; 433 struct mvs_device *mvi_dev = dev->lldd_dev; 434 struct mvs_cmd_hdr *hdr = tei->hdr; 435 struct asd_sas_port *sas_port = dev->port; 436 struct mvs_slot_info *slot; 437 void *buf_prd; 438 u32 tag = tei->tag, hdr_tag; 439 u32 flags, del_q; 440 void *buf_tmp; 441 u8 *buf_cmd, *buf_oaf; 442 dma_addr_t buf_tmp_dma; 443 u32 i, req_len, resp_len; 444 const u32 max_resp_len = SB_RFB_MAX; 445 446 if (mvs_assign_reg_set(mvi, mvi_dev) == MVS_ID_NOT_MAPPED) { 447 mv_dprintk("Have not enough regiset for dev %d.\n", 448 mvi_dev->device_id); 449 return -EBUSY; 450 } 451 slot = &mvi->slot_info[tag]; 452 slot->tx = mvi->tx_prod; 453 del_q = TXQ_MODE_I | tag | 454 (TXQ_CMD_STP << TXQ_CMD_SHIFT) | 455 ((sas_port->phy_mask & TXQ_PHY_MASK) << TXQ_PHY_SHIFT) | 456 (mvi_dev->taskfileset << TXQ_SRS_SHIFT); 457 mvi->tx[mvi->tx_prod] = cpu_to_le32(del_q); 458 459 if (task->data_dir == DMA_FROM_DEVICE) 460 flags = (MVS_CHIP_DISP->prd_count() << MCH_PRD_LEN_SHIFT); 461 else 462 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT); 463 464 if (task->ata_task.use_ncq) 465 flags |= MCH_FPDMA; 466 if (dev->sata_dev.class == ATA_DEV_ATAPI) { 467 if (task->ata_task.fis.command != ATA_CMD_ID_ATAPI) 468 flags |= MCH_ATAPI; 469 } 470 471 hdr->flags = cpu_to_le32(flags); 472 473 if (task->ata_task.use_ncq && mvs_get_ncq_tag(task, &hdr_tag)) 474 task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3); 475 else 476 hdr_tag = tag; 477 478 hdr->tags = cpu_to_le32(hdr_tag); 479 480 hdr->data_len = cpu_to_le32(task->total_xfer_len); 481 482 /* 483 * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs 484 */ 485 486 /* region 1: command table area (MVS_ATA_CMD_SZ bytes) ************** */ 487 buf_cmd = buf_tmp = slot->buf; 488 buf_tmp_dma = slot->buf_dma; 489 490 hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma); 491 492 buf_tmp += MVS_ATA_CMD_SZ; 493 buf_tmp_dma += MVS_ATA_CMD_SZ; 494 495 /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */ 496 /* used for STP. unused for SATA? */ 497 buf_oaf = buf_tmp; 498 hdr->open_frame = cpu_to_le64(buf_tmp_dma); 499 500 buf_tmp += MVS_OAF_SZ; 501 buf_tmp_dma += MVS_OAF_SZ; 502 503 /* region 3: PRD table ********************************************* */ 504 buf_prd = buf_tmp; 505 506 if (tei->n_elem) 507 hdr->prd_tbl = cpu_to_le64(buf_tmp_dma); 508 else 509 hdr->prd_tbl = 0; 510 i = MVS_CHIP_DISP->prd_size() * MVS_CHIP_DISP->prd_count(); 511 512 buf_tmp += i; 513 buf_tmp_dma += i; 514 515 /* region 4: status buffer (larger the PRD, smaller this buf) ****** */ 516 slot->response = buf_tmp; 517 hdr->status_buf = cpu_to_le64(buf_tmp_dma); 518 if (mvi->flags & MVF_FLAG_SOC) 519 hdr->reserved[0] = 0; 520 521 req_len = sizeof(struct host_to_dev_fis); 522 resp_len = MVS_SLOT_BUF_SZ - MVS_ATA_CMD_SZ - 523 sizeof(struct mvs_err_info) - i; 524 525 /* request, response lengths */ 526 resp_len = min(resp_len, max_resp_len); 527 hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4)); 528 529 if (likely(!task->ata_task.device_control_reg_update)) 530 task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */ 531 /* fill in command FIS and ATAPI CDB */ 532 memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis)); 533 if (dev->sata_dev.class == ATA_DEV_ATAPI) 534 memcpy(buf_cmd + STP_ATAPI_CMD, 535 task->ata_task.atapi_packet, 16); 536 537 /* generate open address frame hdr (first 12 bytes) */ 538 /* initiator, STP, ftype 1h */ 539 buf_oaf[0] = (1 << 7) | (PROTOCOL_STP << 4) | 0x1; 540 buf_oaf[1] = min(sas_port->linkrate, dev->linkrate) & 0xf; 541 *(u16 *)(buf_oaf + 2) = cpu_to_be16(mvi_dev->device_id + 1); 542 memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE); 543 544 /* fill in PRD (scatter/gather) table, if any */ 545 MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd); 546 547 if (task->data_dir == DMA_FROM_DEVICE) 548 MVS_CHIP_DISP->dma_fix(mvi, sas_port->phy_mask, 549 TRASH_BUCKET_SIZE, tei->n_elem, buf_prd); 550 551 return 0; 552 } 553 554 static int mvs_task_prep_ssp(struct mvs_info *mvi, 555 struct mvs_task_exec_info *tei, int is_tmf, 556 struct mvs_tmf_task *tmf) 557 { 558 struct sas_task *task = tei->task; 559 struct mvs_cmd_hdr *hdr = tei->hdr; 560 struct mvs_port *port = tei->port; 561 struct domain_device *dev = task->dev; 562 struct mvs_device *mvi_dev = dev->lldd_dev; 563 struct asd_sas_port *sas_port = dev->port; 564 struct mvs_slot_info *slot; 565 void *buf_prd; 566 struct ssp_frame_hdr *ssp_hdr; 567 void *buf_tmp; 568 u8 *buf_cmd, *buf_oaf, fburst = 0; 569 dma_addr_t buf_tmp_dma; 570 u32 flags; 571 u32 resp_len, req_len, i, tag = tei->tag; 572 const u32 max_resp_len = SB_RFB_MAX; 573 u32 phy_mask; 574 575 slot = &mvi->slot_info[tag]; 576 577 phy_mask = ((port->wide_port_phymap) ? port->wide_port_phymap : 578 sas_port->phy_mask) & TXQ_PHY_MASK; 579 580 slot->tx = mvi->tx_prod; 581 mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | tag | 582 (TXQ_CMD_SSP << TXQ_CMD_SHIFT) | 583 (phy_mask << TXQ_PHY_SHIFT)); 584 585 flags = MCH_RETRY; 586 if (task->ssp_task.enable_first_burst) { 587 flags |= MCH_FBURST; 588 fburst = (1 << 7); 589 } 590 if (is_tmf) 591 flags |= (MCH_SSP_FR_TASK << MCH_SSP_FR_TYPE_SHIFT); 592 else 593 flags |= (MCH_SSP_FR_CMD << MCH_SSP_FR_TYPE_SHIFT); 594 595 hdr->flags = cpu_to_le32(flags | (tei->n_elem << MCH_PRD_LEN_SHIFT)); 596 hdr->tags = cpu_to_le32(tag); 597 hdr->data_len = cpu_to_le32(task->total_xfer_len); 598 599 /* 600 * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs 601 */ 602 603 /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ************** */ 604 buf_cmd = buf_tmp = slot->buf; 605 buf_tmp_dma = slot->buf_dma; 606 607 hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma); 608 609 buf_tmp += MVS_SSP_CMD_SZ; 610 buf_tmp_dma += MVS_SSP_CMD_SZ; 611 612 /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */ 613 buf_oaf = buf_tmp; 614 hdr->open_frame = cpu_to_le64(buf_tmp_dma); 615 616 buf_tmp += MVS_OAF_SZ; 617 buf_tmp_dma += MVS_OAF_SZ; 618 619 /* region 3: PRD table ********************************************* */ 620 buf_prd = buf_tmp; 621 if (tei->n_elem) 622 hdr->prd_tbl = cpu_to_le64(buf_tmp_dma); 623 else 624 hdr->prd_tbl = 0; 625 626 i = MVS_CHIP_DISP->prd_size() * tei->n_elem; 627 buf_tmp += i; 628 buf_tmp_dma += i; 629 630 /* region 4: status buffer (larger the PRD, smaller this buf) ****** */ 631 slot->response = buf_tmp; 632 hdr->status_buf = cpu_to_le64(buf_tmp_dma); 633 if (mvi->flags & MVF_FLAG_SOC) 634 hdr->reserved[0] = 0; 635 636 resp_len = MVS_SLOT_BUF_SZ - MVS_SSP_CMD_SZ - MVS_OAF_SZ - 637 sizeof(struct mvs_err_info) - i; 638 resp_len = min(resp_len, max_resp_len); 639 640 req_len = sizeof(struct ssp_frame_hdr) + 28; 641 642 /* request, response lengths */ 643 hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4)); 644 645 /* generate open address frame hdr (first 12 bytes) */ 646 /* initiator, SSP, ftype 1h */ 647 buf_oaf[0] = (1 << 7) | (PROTOCOL_SSP << 4) | 0x1; 648 buf_oaf[1] = min(sas_port->linkrate, dev->linkrate) & 0xf; 649 *(u16 *)(buf_oaf + 2) = cpu_to_be16(mvi_dev->device_id + 1); 650 memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE); 651 652 /* fill in SSP frame header (Command Table.SSP frame header) */ 653 ssp_hdr = (struct ssp_frame_hdr *)buf_cmd; 654 655 if (is_tmf) 656 ssp_hdr->frame_type = SSP_TASK; 657 else 658 ssp_hdr->frame_type = SSP_COMMAND; 659 660 memcpy(ssp_hdr->hashed_dest_addr, dev->hashed_sas_addr, 661 HASHED_SAS_ADDR_SIZE); 662 memcpy(ssp_hdr->hashed_src_addr, 663 dev->hashed_sas_addr, HASHED_SAS_ADDR_SIZE); 664 ssp_hdr->tag = cpu_to_be16(tag); 665 666 /* fill in IU for TASK and Command Frame */ 667 buf_cmd += sizeof(*ssp_hdr); 668 memcpy(buf_cmd, &task->ssp_task.LUN, 8); 669 670 if (ssp_hdr->frame_type != SSP_TASK) { 671 buf_cmd[9] = fburst | task->ssp_task.task_attr | 672 (task->ssp_task.task_prio << 3); 673 memcpy(buf_cmd + 12, task->ssp_task.cmd->cmnd, 674 task->ssp_task.cmd->cmd_len); 675 } else{ 676 buf_cmd[10] = tmf->tmf; 677 switch (tmf->tmf) { 678 case TMF_ABORT_TASK: 679 case TMF_QUERY_TASK: 680 buf_cmd[12] = 681 (tmf->tag_of_task_to_be_managed >> 8) & 0xff; 682 buf_cmd[13] = 683 tmf->tag_of_task_to_be_managed & 0xff; 684 break; 685 default: 686 break; 687 } 688 } 689 /* fill in PRD (scatter/gather) table, if any */ 690 MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd); 691 return 0; 692 } 693 694 #define DEV_IS_GONE(mvi_dev) ((!mvi_dev || (mvi_dev->dev_type == SAS_PHY_UNUSED))) 695 static int mvs_task_prep(struct sas_task *task, struct mvs_info *mvi, int is_tmf, 696 struct mvs_tmf_task *tmf, int *pass) 697 { 698 struct domain_device *dev = task->dev; 699 struct mvs_device *mvi_dev = dev->lldd_dev; 700 struct mvs_task_exec_info tei; 701 struct mvs_slot_info *slot; 702 u32 tag = 0xdeadbeef, n_elem = 0; 703 int rc = 0; 704 705 if (!dev->port) { 706 struct task_status_struct *tsm = &task->task_status; 707 708 tsm->resp = SAS_TASK_UNDELIVERED; 709 tsm->stat = SAS_PHY_DOWN; 710 /* 711 * libsas will use dev->port, should 712 * not call task_done for sata 713 */ 714 if (dev->dev_type != SAS_SATA_DEV) 715 task->task_done(task); 716 return rc; 717 } 718 719 if (DEV_IS_GONE(mvi_dev)) { 720 if (mvi_dev) 721 mv_dprintk("device %d not ready.\n", 722 mvi_dev->device_id); 723 else 724 mv_dprintk("device %016llx not ready.\n", 725 SAS_ADDR(dev->sas_addr)); 726 727 rc = SAS_PHY_DOWN; 728 return rc; 729 } 730 tei.port = dev->port->lldd_port; 731 if (tei.port && !tei.port->port_attached && !tmf) { 732 if (sas_protocol_ata(task->task_proto)) { 733 struct task_status_struct *ts = &task->task_status; 734 mv_dprintk("SATA/STP port %d does not attach" 735 "device.\n", dev->port->id); 736 ts->resp = SAS_TASK_COMPLETE; 737 ts->stat = SAS_PHY_DOWN; 738 739 task->task_done(task); 740 741 } else { 742 struct task_status_struct *ts = &task->task_status; 743 mv_dprintk("SAS port %d does not attach" 744 "device.\n", dev->port->id); 745 ts->resp = SAS_TASK_UNDELIVERED; 746 ts->stat = SAS_PHY_DOWN; 747 task->task_done(task); 748 } 749 return rc; 750 } 751 752 if (!sas_protocol_ata(task->task_proto)) { 753 if (task->num_scatter) { 754 n_elem = dma_map_sg(mvi->dev, 755 task->scatter, 756 task->num_scatter, 757 task->data_dir); 758 if (!n_elem) { 759 rc = -ENOMEM; 760 goto prep_out; 761 } 762 } 763 } else { 764 n_elem = task->num_scatter; 765 } 766 767 rc = mvs_tag_alloc(mvi, &tag); 768 if (rc) 769 goto err_out; 770 771 slot = &mvi->slot_info[tag]; 772 773 task->lldd_task = NULL; 774 slot->n_elem = n_elem; 775 slot->slot_tag = tag; 776 777 slot->buf = dma_pool_zalloc(mvi->dma_pool, GFP_ATOMIC, &slot->buf_dma); 778 if (!slot->buf) { 779 rc = -ENOMEM; 780 goto err_out_tag; 781 } 782 783 tei.task = task; 784 tei.hdr = &mvi->slot[tag]; 785 tei.tag = tag; 786 tei.n_elem = n_elem; 787 switch (task->task_proto) { 788 case SAS_PROTOCOL_SMP: 789 rc = mvs_task_prep_smp(mvi, &tei); 790 break; 791 case SAS_PROTOCOL_SSP: 792 rc = mvs_task_prep_ssp(mvi, &tei, is_tmf, tmf); 793 break; 794 case SAS_PROTOCOL_SATA: 795 case SAS_PROTOCOL_STP: 796 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: 797 rc = mvs_task_prep_ata(mvi, &tei); 798 break; 799 default: 800 dev_printk(KERN_ERR, mvi->dev, 801 "unknown sas_task proto: 0x%x\n", 802 task->task_proto); 803 rc = -EINVAL; 804 break; 805 } 806 807 if (rc) { 808 mv_dprintk("rc is %x\n", rc); 809 goto err_out_slot_buf; 810 } 811 slot->task = task; 812 slot->port = tei.port; 813 task->lldd_task = slot; 814 list_add_tail(&slot->entry, &tei.port->list); 815 spin_lock(&task->task_state_lock); 816 task->task_state_flags |= SAS_TASK_AT_INITIATOR; 817 spin_unlock(&task->task_state_lock); 818 819 mvi_dev->running_req++; 820 ++(*pass); 821 mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1); 822 823 return rc; 824 825 err_out_slot_buf: 826 dma_pool_free(mvi->dma_pool, slot->buf, slot->buf_dma); 827 err_out_tag: 828 mvs_tag_free(mvi, tag); 829 err_out: 830 831 dev_printk(KERN_ERR, mvi->dev, "mvsas prep failed[%d]!\n", rc); 832 if (!sas_protocol_ata(task->task_proto)) 833 if (n_elem) 834 dma_unmap_sg(mvi->dev, task->scatter, n_elem, 835 task->data_dir); 836 prep_out: 837 return rc; 838 } 839 840 static int mvs_task_exec(struct sas_task *task, gfp_t gfp_flags, 841 struct completion *completion, int is_tmf, 842 struct mvs_tmf_task *tmf) 843 { 844 struct mvs_info *mvi = NULL; 845 u32 rc = 0; 846 u32 pass = 0; 847 unsigned long flags = 0; 848 849 mvi = ((struct mvs_device *)task->dev->lldd_dev)->mvi_info; 850 851 spin_lock_irqsave(&mvi->lock, flags); 852 rc = mvs_task_prep(task, mvi, is_tmf, tmf, &pass); 853 if (rc) 854 dev_printk(KERN_ERR, mvi->dev, "mvsas exec failed[%d]!\n", rc); 855 856 if (likely(pass)) 857 MVS_CHIP_DISP->start_delivery(mvi, (mvi->tx_prod - 1) & 858 (MVS_CHIP_SLOT_SZ - 1)); 859 spin_unlock_irqrestore(&mvi->lock, flags); 860 861 return rc; 862 } 863 864 int mvs_queue_command(struct sas_task *task, gfp_t gfp_flags) 865 { 866 return mvs_task_exec(task, gfp_flags, NULL, 0, NULL); 867 } 868 869 static void mvs_slot_free(struct mvs_info *mvi, u32 rx_desc) 870 { 871 u32 slot_idx = rx_desc & RXQ_SLOT_MASK; 872 mvs_tag_clear(mvi, slot_idx); 873 } 874 875 static void mvs_slot_task_free(struct mvs_info *mvi, struct sas_task *task, 876 struct mvs_slot_info *slot, u32 slot_idx) 877 { 878 if (!slot) 879 return; 880 if (!slot->task) 881 return; 882 if (!sas_protocol_ata(task->task_proto)) 883 if (slot->n_elem) 884 dma_unmap_sg(mvi->dev, task->scatter, 885 slot->n_elem, task->data_dir); 886 887 switch (task->task_proto) { 888 case SAS_PROTOCOL_SMP: 889 dma_unmap_sg(mvi->dev, &task->smp_task.smp_resp, 1, 890 DMA_FROM_DEVICE); 891 dma_unmap_sg(mvi->dev, &task->smp_task.smp_req, 1, 892 DMA_TO_DEVICE); 893 break; 894 895 case SAS_PROTOCOL_SATA: 896 case SAS_PROTOCOL_STP: 897 case SAS_PROTOCOL_SSP: 898 default: 899 /* do nothing */ 900 break; 901 } 902 903 if (slot->buf) { 904 dma_pool_free(mvi->dma_pool, slot->buf, slot->buf_dma); 905 slot->buf = NULL; 906 } 907 list_del_init(&slot->entry); 908 task->lldd_task = NULL; 909 slot->task = NULL; 910 slot->port = NULL; 911 slot->slot_tag = 0xFFFFFFFF; 912 mvs_slot_free(mvi, slot_idx); 913 } 914 915 static void mvs_update_wideport(struct mvs_info *mvi, int phy_no) 916 { 917 struct mvs_phy *phy = &mvi->phy[phy_no]; 918 struct mvs_port *port = phy->port; 919 int j, no; 920 921 for_each_phy(port->wide_port_phymap, j, no) { 922 if (j & 1) { 923 MVS_CHIP_DISP->write_port_cfg_addr(mvi, no, 924 PHYR_WIDE_PORT); 925 MVS_CHIP_DISP->write_port_cfg_data(mvi, no, 926 port->wide_port_phymap); 927 } else { 928 MVS_CHIP_DISP->write_port_cfg_addr(mvi, no, 929 PHYR_WIDE_PORT); 930 MVS_CHIP_DISP->write_port_cfg_data(mvi, no, 931 0); 932 } 933 } 934 } 935 936 static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i) 937 { 938 u32 tmp; 939 struct mvs_phy *phy = &mvi->phy[i]; 940 struct mvs_port *port = phy->port; 941 942 tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, i); 943 if ((tmp & PHY_READY_MASK) && !(phy->irq_status & PHYEV_POOF)) { 944 if (!port) 945 phy->phy_attached = 1; 946 return tmp; 947 } 948 949 if (port) { 950 if (phy->phy_type & PORT_TYPE_SAS) { 951 port->wide_port_phymap &= ~(1U << i); 952 if (!port->wide_port_phymap) 953 port->port_attached = 0; 954 mvs_update_wideport(mvi, i); 955 } else if (phy->phy_type & PORT_TYPE_SATA) 956 port->port_attached = 0; 957 phy->port = NULL; 958 phy->phy_attached = 0; 959 phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA); 960 } 961 return 0; 962 } 963 964 static void *mvs_get_d2h_reg(struct mvs_info *mvi, int i, void *buf) 965 { 966 u32 *s = (u32 *) buf; 967 968 if (!s) 969 return NULL; 970 971 MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG3); 972 s[3] = cpu_to_le32(MVS_CHIP_DISP->read_port_cfg_data(mvi, i)); 973 974 MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG2); 975 s[2] = cpu_to_le32(MVS_CHIP_DISP->read_port_cfg_data(mvi, i)); 976 977 MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG1); 978 s[1] = cpu_to_le32(MVS_CHIP_DISP->read_port_cfg_data(mvi, i)); 979 980 MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG0); 981 s[0] = cpu_to_le32(MVS_CHIP_DISP->read_port_cfg_data(mvi, i)); 982 983 if (((s[1] & 0x00FFFFFF) == 0x00EB1401) && (*(u8 *)&s[3] == 0x01)) 984 s[1] = 0x00EB1401 | (*((u8 *)&s[1] + 3) & 0x10); 985 986 return s; 987 } 988 989 static u32 mvs_is_sig_fis_received(u32 irq_status) 990 { 991 return irq_status & PHYEV_SIG_FIS; 992 } 993 994 static void mvs_sig_remove_timer(struct mvs_phy *phy) 995 { 996 if (phy->timer.function) 997 del_timer(&phy->timer); 998 phy->timer.function = NULL; 999 } 1000 1001 void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st) 1002 { 1003 struct mvs_phy *phy = &mvi->phy[i]; 1004 struct sas_identify_frame *id; 1005 1006 id = (struct sas_identify_frame *)phy->frame_rcvd; 1007 1008 if (get_st) { 1009 phy->irq_status = MVS_CHIP_DISP->read_port_irq_stat(mvi, i); 1010 phy->phy_status = mvs_is_phy_ready(mvi, i); 1011 } 1012 1013 if (phy->phy_status) { 1014 int oob_done = 0; 1015 struct asd_sas_phy *sas_phy = &mvi->phy[i].sas_phy; 1016 1017 oob_done = MVS_CHIP_DISP->oob_done(mvi, i); 1018 1019 MVS_CHIP_DISP->fix_phy_info(mvi, i, id); 1020 if (phy->phy_type & PORT_TYPE_SATA) { 1021 phy->identify.target_port_protocols = SAS_PROTOCOL_STP; 1022 if (mvs_is_sig_fis_received(phy->irq_status)) { 1023 mvs_sig_remove_timer(phy); 1024 phy->phy_attached = 1; 1025 phy->att_dev_sas_addr = 1026 i + mvi->id * mvi->chip->n_phy; 1027 if (oob_done) 1028 sas_phy->oob_mode = SATA_OOB_MODE; 1029 phy->frame_rcvd_size = 1030 sizeof(struct dev_to_host_fis); 1031 mvs_get_d2h_reg(mvi, i, id); 1032 } else { 1033 u32 tmp; 1034 dev_printk(KERN_DEBUG, mvi->dev, 1035 "Phy%d : No sig fis\n", i); 1036 tmp = MVS_CHIP_DISP->read_port_irq_mask(mvi, i); 1037 MVS_CHIP_DISP->write_port_irq_mask(mvi, i, 1038 tmp | PHYEV_SIG_FIS); 1039 phy->phy_attached = 0; 1040 phy->phy_type &= ~PORT_TYPE_SATA; 1041 goto out_done; 1042 } 1043 } else if (phy->phy_type & PORT_TYPE_SAS 1044 || phy->att_dev_info & PORT_SSP_INIT_MASK) { 1045 phy->phy_attached = 1; 1046 phy->identify.device_type = 1047 phy->att_dev_info & PORT_DEV_TYPE_MASK; 1048 1049 if (phy->identify.device_type == SAS_END_DEVICE) 1050 phy->identify.target_port_protocols = 1051 SAS_PROTOCOL_SSP; 1052 else if (phy->identify.device_type != SAS_PHY_UNUSED) 1053 phy->identify.target_port_protocols = 1054 SAS_PROTOCOL_SMP; 1055 if (oob_done) 1056 sas_phy->oob_mode = SAS_OOB_MODE; 1057 phy->frame_rcvd_size = 1058 sizeof(struct sas_identify_frame); 1059 } 1060 memcpy(sas_phy->attached_sas_addr, 1061 &phy->att_dev_sas_addr, SAS_ADDR_SIZE); 1062 1063 if (MVS_CHIP_DISP->phy_work_around) 1064 MVS_CHIP_DISP->phy_work_around(mvi, i); 1065 } 1066 mv_dprintk("phy %d attach dev info is %x\n", 1067 i + mvi->id * mvi->chip->n_phy, phy->att_dev_info); 1068 mv_dprintk("phy %d attach sas addr is %llx\n", 1069 i + mvi->id * mvi->chip->n_phy, phy->att_dev_sas_addr); 1070 out_done: 1071 if (get_st) 1072 MVS_CHIP_DISP->write_port_irq_stat(mvi, i, phy->irq_status); 1073 } 1074 1075 static void mvs_port_notify_formed(struct asd_sas_phy *sas_phy, int lock) 1076 { 1077 struct sas_ha_struct *sas_ha = sas_phy->ha; 1078 struct mvs_info *mvi = NULL; int i = 0, hi; 1079 struct mvs_phy *phy = sas_phy->lldd_phy; 1080 struct asd_sas_port *sas_port = sas_phy->port; 1081 struct mvs_port *port; 1082 unsigned long flags = 0; 1083 if (!sas_port) 1084 return; 1085 1086 while (sas_ha->sas_phy[i]) { 1087 if (sas_ha->sas_phy[i] == sas_phy) 1088 break; 1089 i++; 1090 } 1091 hi = i/((struct mvs_prv_info *)sas_ha->lldd_ha)->n_phy; 1092 mvi = ((struct mvs_prv_info *)sas_ha->lldd_ha)->mvi[hi]; 1093 if (i >= mvi->chip->n_phy) 1094 port = &mvi->port[i - mvi->chip->n_phy]; 1095 else 1096 port = &mvi->port[i]; 1097 if (lock) 1098 spin_lock_irqsave(&mvi->lock, flags); 1099 port->port_attached = 1; 1100 phy->port = port; 1101 sas_port->lldd_port = port; 1102 if (phy->phy_type & PORT_TYPE_SAS) { 1103 port->wide_port_phymap = sas_port->phy_mask; 1104 mv_printk("set wide port phy map %x\n", sas_port->phy_mask); 1105 mvs_update_wideport(mvi, sas_phy->id); 1106 1107 /* direct attached SAS device */ 1108 if (phy->att_dev_info & PORT_SSP_TRGT_MASK) { 1109 MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_PHY_STAT); 1110 MVS_CHIP_DISP->write_port_cfg_data(mvi, i, 0x04); 1111 } 1112 } 1113 if (lock) 1114 spin_unlock_irqrestore(&mvi->lock, flags); 1115 } 1116 1117 static void mvs_port_notify_deformed(struct asd_sas_phy *sas_phy, int lock) 1118 { 1119 struct domain_device *dev; 1120 struct mvs_phy *phy = sas_phy->lldd_phy; 1121 struct mvs_info *mvi = phy->mvi; 1122 struct asd_sas_port *port = sas_phy->port; 1123 int phy_no = 0; 1124 1125 while (phy != &mvi->phy[phy_no]) { 1126 phy_no++; 1127 if (phy_no >= MVS_MAX_PHYS) 1128 return; 1129 } 1130 list_for_each_entry(dev, &port->dev_list, dev_list_node) 1131 mvs_do_release_task(phy->mvi, phy_no, dev); 1132 1133 } 1134 1135 1136 void mvs_port_formed(struct asd_sas_phy *sas_phy) 1137 { 1138 mvs_port_notify_formed(sas_phy, 1); 1139 } 1140 1141 void mvs_port_deformed(struct asd_sas_phy *sas_phy) 1142 { 1143 mvs_port_notify_deformed(sas_phy, 1); 1144 } 1145 1146 static struct mvs_device *mvs_alloc_dev(struct mvs_info *mvi) 1147 { 1148 u32 dev; 1149 for (dev = 0; dev < MVS_MAX_DEVICES; dev++) { 1150 if (mvi->devices[dev].dev_type == SAS_PHY_UNUSED) { 1151 mvi->devices[dev].device_id = dev; 1152 return &mvi->devices[dev]; 1153 } 1154 } 1155 1156 if (dev == MVS_MAX_DEVICES) 1157 mv_printk("max support %d devices, ignore ..\n", 1158 MVS_MAX_DEVICES); 1159 1160 return NULL; 1161 } 1162 1163 static void mvs_free_dev(struct mvs_device *mvi_dev) 1164 { 1165 u32 id = mvi_dev->device_id; 1166 memset(mvi_dev, 0, sizeof(*mvi_dev)); 1167 mvi_dev->device_id = id; 1168 mvi_dev->dev_type = SAS_PHY_UNUSED; 1169 mvi_dev->dev_status = MVS_DEV_NORMAL; 1170 mvi_dev->taskfileset = MVS_ID_NOT_MAPPED; 1171 } 1172 1173 static int mvs_dev_found_notify(struct domain_device *dev, int lock) 1174 { 1175 unsigned long flags = 0; 1176 int res = 0; 1177 struct mvs_info *mvi = NULL; 1178 struct domain_device *parent_dev = dev->parent; 1179 struct mvs_device *mvi_device; 1180 1181 mvi = mvs_find_dev_mvi(dev); 1182 1183 if (lock) 1184 spin_lock_irqsave(&mvi->lock, flags); 1185 1186 mvi_device = mvs_alloc_dev(mvi); 1187 if (!mvi_device) { 1188 res = -1; 1189 goto found_out; 1190 } 1191 dev->lldd_dev = mvi_device; 1192 mvi_device->dev_status = MVS_DEV_NORMAL; 1193 mvi_device->dev_type = dev->dev_type; 1194 mvi_device->mvi_info = mvi; 1195 mvi_device->sas_device = dev; 1196 if (parent_dev && dev_is_expander(parent_dev->dev_type)) { 1197 int phy_id; 1198 u8 phy_num = parent_dev->ex_dev.num_phys; 1199 struct ex_phy *phy; 1200 for (phy_id = 0; phy_id < phy_num; phy_id++) { 1201 phy = &parent_dev->ex_dev.ex_phy[phy_id]; 1202 if (SAS_ADDR(phy->attached_sas_addr) == 1203 SAS_ADDR(dev->sas_addr)) { 1204 mvi_device->attached_phy = phy_id; 1205 break; 1206 } 1207 } 1208 1209 if (phy_id == phy_num) { 1210 mv_printk("Error: no attached dev:%016llx" 1211 "at ex:%016llx.\n", 1212 SAS_ADDR(dev->sas_addr), 1213 SAS_ADDR(parent_dev->sas_addr)); 1214 res = -1; 1215 } 1216 } 1217 1218 found_out: 1219 if (lock) 1220 spin_unlock_irqrestore(&mvi->lock, flags); 1221 return res; 1222 } 1223 1224 int mvs_dev_found(struct domain_device *dev) 1225 { 1226 return mvs_dev_found_notify(dev, 1); 1227 } 1228 1229 static void mvs_dev_gone_notify(struct domain_device *dev) 1230 { 1231 unsigned long flags = 0; 1232 struct mvs_device *mvi_dev = dev->lldd_dev; 1233 struct mvs_info *mvi; 1234 1235 if (!mvi_dev) { 1236 mv_dprintk("found dev has gone.\n"); 1237 return; 1238 } 1239 1240 mvi = mvi_dev->mvi_info; 1241 1242 spin_lock_irqsave(&mvi->lock, flags); 1243 1244 mv_dprintk("found dev[%d:%x] is gone.\n", 1245 mvi_dev->device_id, mvi_dev->dev_type); 1246 mvs_release_task(mvi, dev); 1247 mvs_free_reg_set(mvi, mvi_dev); 1248 mvs_free_dev(mvi_dev); 1249 1250 dev->lldd_dev = NULL; 1251 mvi_dev->sas_device = NULL; 1252 1253 spin_unlock_irqrestore(&mvi->lock, flags); 1254 } 1255 1256 1257 void mvs_dev_gone(struct domain_device *dev) 1258 { 1259 mvs_dev_gone_notify(dev); 1260 } 1261 1262 static void mvs_task_done(struct sas_task *task) 1263 { 1264 if (!del_timer(&task->slow_task->timer)) 1265 return; 1266 complete(&task->slow_task->completion); 1267 } 1268 1269 static void mvs_tmf_timedout(struct timer_list *t) 1270 { 1271 struct sas_task_slow *slow = from_timer(slow, t, timer); 1272 struct sas_task *task = slow->task; 1273 1274 task->task_state_flags |= SAS_TASK_STATE_ABORTED; 1275 complete(&task->slow_task->completion); 1276 } 1277 1278 #define MVS_TASK_TIMEOUT 20 1279 static int mvs_exec_internal_tmf_task(struct domain_device *dev, 1280 void *parameter, u32 para_len, struct mvs_tmf_task *tmf) 1281 { 1282 int res, retry; 1283 struct sas_task *task = NULL; 1284 1285 for (retry = 0; retry < 3; retry++) { 1286 task = sas_alloc_slow_task(GFP_KERNEL); 1287 if (!task) 1288 return -ENOMEM; 1289 1290 task->dev = dev; 1291 task->task_proto = dev->tproto; 1292 1293 memcpy(&task->ssp_task, parameter, para_len); 1294 task->task_done = mvs_task_done; 1295 1296 task->slow_task->timer.function = mvs_tmf_timedout; 1297 task->slow_task->timer.expires = jiffies + MVS_TASK_TIMEOUT*HZ; 1298 add_timer(&task->slow_task->timer); 1299 1300 res = mvs_task_exec(task, GFP_KERNEL, NULL, 1, tmf); 1301 1302 if (res) { 1303 del_timer(&task->slow_task->timer); 1304 mv_printk("executing internal task failed:%d\n", res); 1305 goto ex_err; 1306 } 1307 1308 wait_for_completion(&task->slow_task->completion); 1309 res = TMF_RESP_FUNC_FAILED; 1310 /* Even TMF timed out, return direct. */ 1311 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { 1312 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { 1313 mv_printk("TMF task[%x] timeout.\n", tmf->tmf); 1314 goto ex_err; 1315 } 1316 } 1317 1318 if (task->task_status.resp == SAS_TASK_COMPLETE && 1319 task->task_status.stat == SAM_STAT_GOOD) { 1320 res = TMF_RESP_FUNC_COMPLETE; 1321 break; 1322 } 1323 1324 if (task->task_status.resp == SAS_TASK_COMPLETE && 1325 task->task_status.stat == SAS_DATA_UNDERRUN) { 1326 /* no error, but return the number of bytes of 1327 * underrun */ 1328 res = task->task_status.residual; 1329 break; 1330 } 1331 1332 if (task->task_status.resp == SAS_TASK_COMPLETE && 1333 task->task_status.stat == SAS_DATA_OVERRUN) { 1334 mv_dprintk("blocked task error.\n"); 1335 res = -EMSGSIZE; 1336 break; 1337 } else { 1338 mv_dprintk(" task to dev %016llx response: 0x%x " 1339 "status 0x%x\n", 1340 SAS_ADDR(dev->sas_addr), 1341 task->task_status.resp, 1342 task->task_status.stat); 1343 sas_free_task(task); 1344 task = NULL; 1345 1346 } 1347 } 1348 ex_err: 1349 BUG_ON(retry == 3 && task != NULL); 1350 sas_free_task(task); 1351 return res; 1352 } 1353 1354 static int mvs_debug_issue_ssp_tmf(struct domain_device *dev, 1355 u8 *lun, struct mvs_tmf_task *tmf) 1356 { 1357 struct sas_ssp_task ssp_task; 1358 if (!(dev->tproto & SAS_PROTOCOL_SSP)) 1359 return TMF_RESP_FUNC_ESUPP; 1360 1361 memcpy(ssp_task.LUN, lun, 8); 1362 1363 return mvs_exec_internal_tmf_task(dev, &ssp_task, 1364 sizeof(ssp_task), tmf); 1365 } 1366 1367 1368 /* Standard mandates link reset for ATA (type 0) 1369 and hard reset for SSP (type 1) , only for RECOVERY */ 1370 static int mvs_debug_I_T_nexus_reset(struct domain_device *dev) 1371 { 1372 int rc; 1373 struct sas_phy *phy = sas_get_local_phy(dev); 1374 int reset_type = (dev->dev_type == SAS_SATA_DEV || 1375 (dev->tproto & SAS_PROTOCOL_STP)) ? 0 : 1; 1376 rc = sas_phy_reset(phy, reset_type); 1377 sas_put_local_phy(phy); 1378 msleep(2000); 1379 return rc; 1380 } 1381 1382 /* mandatory SAM-3 */ 1383 int mvs_lu_reset(struct domain_device *dev, u8 *lun) 1384 { 1385 unsigned long flags; 1386 int rc = TMF_RESP_FUNC_FAILED; 1387 struct mvs_tmf_task tmf_task; 1388 struct mvs_device * mvi_dev = dev->lldd_dev; 1389 struct mvs_info *mvi = mvi_dev->mvi_info; 1390 1391 tmf_task.tmf = TMF_LU_RESET; 1392 mvi_dev->dev_status = MVS_DEV_EH; 1393 rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task); 1394 if (rc == TMF_RESP_FUNC_COMPLETE) { 1395 spin_lock_irqsave(&mvi->lock, flags); 1396 mvs_release_task(mvi, dev); 1397 spin_unlock_irqrestore(&mvi->lock, flags); 1398 } 1399 /* If failed, fall-through I_T_Nexus reset */ 1400 mv_printk("%s for device[%x]:rc= %d\n", __func__, 1401 mvi_dev->device_id, rc); 1402 return rc; 1403 } 1404 1405 int mvs_I_T_nexus_reset(struct domain_device *dev) 1406 { 1407 unsigned long flags; 1408 int rc = TMF_RESP_FUNC_FAILED; 1409 struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev; 1410 struct mvs_info *mvi = mvi_dev->mvi_info; 1411 1412 if (mvi_dev->dev_status != MVS_DEV_EH) 1413 return TMF_RESP_FUNC_COMPLETE; 1414 else 1415 mvi_dev->dev_status = MVS_DEV_NORMAL; 1416 rc = mvs_debug_I_T_nexus_reset(dev); 1417 mv_printk("%s for device[%x]:rc= %d\n", 1418 __func__, mvi_dev->device_id, rc); 1419 1420 spin_lock_irqsave(&mvi->lock, flags); 1421 mvs_release_task(mvi, dev); 1422 spin_unlock_irqrestore(&mvi->lock, flags); 1423 1424 return rc; 1425 } 1426 /* optional SAM-3 */ 1427 int mvs_query_task(struct sas_task *task) 1428 { 1429 u32 tag; 1430 struct scsi_lun lun; 1431 struct mvs_tmf_task tmf_task; 1432 int rc = TMF_RESP_FUNC_FAILED; 1433 1434 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) { 1435 struct scsi_cmnd * cmnd = (struct scsi_cmnd *)task->uldd_task; 1436 struct domain_device *dev = task->dev; 1437 struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev; 1438 struct mvs_info *mvi = mvi_dev->mvi_info; 1439 1440 int_to_scsilun(cmnd->device->lun, &lun); 1441 rc = mvs_find_tag(mvi, task, &tag); 1442 if (rc == 0) { 1443 rc = TMF_RESP_FUNC_FAILED; 1444 return rc; 1445 } 1446 1447 tmf_task.tmf = TMF_QUERY_TASK; 1448 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag); 1449 1450 rc = mvs_debug_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task); 1451 switch (rc) { 1452 /* The task is still in Lun, release it then */ 1453 case TMF_RESP_FUNC_SUCC: 1454 /* The task is not in Lun or failed, reset the phy */ 1455 case TMF_RESP_FUNC_FAILED: 1456 case TMF_RESP_FUNC_COMPLETE: 1457 break; 1458 } 1459 } 1460 mv_printk("%s:rc= %d\n", __func__, rc); 1461 return rc; 1462 } 1463 1464 /* mandatory SAM-3, still need free task/slot info */ 1465 int mvs_abort_task(struct sas_task *task) 1466 { 1467 struct scsi_lun lun; 1468 struct mvs_tmf_task tmf_task; 1469 struct domain_device *dev = task->dev; 1470 struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev; 1471 struct mvs_info *mvi; 1472 int rc = TMF_RESP_FUNC_FAILED; 1473 unsigned long flags; 1474 u32 tag; 1475 1476 if (!mvi_dev) { 1477 mv_printk("Device has removed\n"); 1478 return TMF_RESP_FUNC_FAILED; 1479 } 1480 1481 mvi = mvi_dev->mvi_info; 1482 1483 spin_lock_irqsave(&task->task_state_lock, flags); 1484 if (task->task_state_flags & SAS_TASK_STATE_DONE) { 1485 spin_unlock_irqrestore(&task->task_state_lock, flags); 1486 rc = TMF_RESP_FUNC_COMPLETE; 1487 goto out; 1488 } 1489 spin_unlock_irqrestore(&task->task_state_lock, flags); 1490 mvi_dev->dev_status = MVS_DEV_EH; 1491 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) { 1492 struct scsi_cmnd * cmnd = (struct scsi_cmnd *)task->uldd_task; 1493 1494 int_to_scsilun(cmnd->device->lun, &lun); 1495 rc = mvs_find_tag(mvi, task, &tag); 1496 if (rc == 0) { 1497 mv_printk("No such tag in %s\n", __func__); 1498 rc = TMF_RESP_FUNC_FAILED; 1499 return rc; 1500 } 1501 1502 tmf_task.tmf = TMF_ABORT_TASK; 1503 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag); 1504 1505 rc = mvs_debug_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task); 1506 1507 /* if successful, clear the task and callback forwards.*/ 1508 if (rc == TMF_RESP_FUNC_COMPLETE) { 1509 u32 slot_no; 1510 struct mvs_slot_info *slot; 1511 1512 if (task->lldd_task) { 1513 slot = task->lldd_task; 1514 slot_no = (u32) (slot - mvi->slot_info); 1515 spin_lock_irqsave(&mvi->lock, flags); 1516 mvs_slot_complete(mvi, slot_no, 1); 1517 spin_unlock_irqrestore(&mvi->lock, flags); 1518 } 1519 } 1520 1521 } else if (task->task_proto & SAS_PROTOCOL_SATA || 1522 task->task_proto & SAS_PROTOCOL_STP) { 1523 if (SAS_SATA_DEV == dev->dev_type) { 1524 struct mvs_slot_info *slot = task->lldd_task; 1525 u32 slot_idx = (u32)(slot - mvi->slot_info); 1526 mv_dprintk("mvs_abort_task() mvi=%p task=%p " 1527 "slot=%p slot_idx=x%x\n", 1528 mvi, task, slot, slot_idx); 1529 task->task_state_flags |= SAS_TASK_STATE_ABORTED; 1530 mvs_slot_task_free(mvi, task, slot, slot_idx); 1531 rc = TMF_RESP_FUNC_COMPLETE; 1532 goto out; 1533 } 1534 1535 } 1536 out: 1537 if (rc != TMF_RESP_FUNC_COMPLETE) 1538 mv_printk("%s:rc= %d\n", __func__, rc); 1539 return rc; 1540 } 1541 1542 int mvs_abort_task_set(struct domain_device *dev, u8 *lun) 1543 { 1544 int rc; 1545 struct mvs_tmf_task tmf_task; 1546 1547 tmf_task.tmf = TMF_ABORT_TASK_SET; 1548 rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task); 1549 1550 return rc; 1551 } 1552 1553 int mvs_clear_aca(struct domain_device *dev, u8 *lun) 1554 { 1555 int rc = TMF_RESP_FUNC_FAILED; 1556 struct mvs_tmf_task tmf_task; 1557 1558 tmf_task.tmf = TMF_CLEAR_ACA; 1559 rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task); 1560 1561 return rc; 1562 } 1563 1564 int mvs_clear_task_set(struct domain_device *dev, u8 *lun) 1565 { 1566 int rc = TMF_RESP_FUNC_FAILED; 1567 struct mvs_tmf_task tmf_task; 1568 1569 tmf_task.tmf = TMF_CLEAR_TASK_SET; 1570 rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task); 1571 1572 return rc; 1573 } 1574 1575 static int mvs_sata_done(struct mvs_info *mvi, struct sas_task *task, 1576 u32 slot_idx, int err) 1577 { 1578 struct mvs_device *mvi_dev = task->dev->lldd_dev; 1579 struct task_status_struct *tstat = &task->task_status; 1580 struct ata_task_resp *resp = (struct ata_task_resp *)tstat->buf; 1581 int stat = SAM_STAT_GOOD; 1582 1583 1584 resp->frame_len = sizeof(struct dev_to_host_fis); 1585 memcpy(&resp->ending_fis[0], 1586 SATA_RECEIVED_D2H_FIS(mvi_dev->taskfileset), 1587 sizeof(struct dev_to_host_fis)); 1588 tstat->buf_valid_size = sizeof(*resp); 1589 if (unlikely(err)) { 1590 if (unlikely(err & CMD_ISS_STPD)) 1591 stat = SAS_OPEN_REJECT; 1592 else 1593 stat = SAS_PROTO_RESPONSE; 1594 } 1595 1596 return stat; 1597 } 1598 1599 static void mvs_set_sense(u8 *buffer, int len, int d_sense, 1600 int key, int asc, int ascq) 1601 { 1602 memset(buffer, 0, len); 1603 1604 if (d_sense) { 1605 /* Descriptor format */ 1606 if (len < 4) { 1607 mv_printk("Length %d of sense buffer too small to " 1608 "fit sense %x:%x:%x", len, key, asc, ascq); 1609 } 1610 1611 buffer[0] = 0x72; /* Response Code */ 1612 if (len > 1) 1613 buffer[1] = key; /* Sense Key */ 1614 if (len > 2) 1615 buffer[2] = asc; /* ASC */ 1616 if (len > 3) 1617 buffer[3] = ascq; /* ASCQ */ 1618 } else { 1619 if (len < 14) { 1620 mv_printk("Length %d of sense buffer too small to " 1621 "fit sense %x:%x:%x", len, key, asc, ascq); 1622 } 1623 1624 buffer[0] = 0x70; /* Response Code */ 1625 if (len > 2) 1626 buffer[2] = key; /* Sense Key */ 1627 if (len > 7) 1628 buffer[7] = 0x0a; /* Additional Sense Length */ 1629 if (len > 12) 1630 buffer[12] = asc; /* ASC */ 1631 if (len > 13) 1632 buffer[13] = ascq; /* ASCQ */ 1633 } 1634 1635 return; 1636 } 1637 1638 static void mvs_fill_ssp_resp_iu(struct ssp_response_iu *iu, 1639 u8 key, u8 asc, u8 asc_q) 1640 { 1641 iu->datapres = 2; 1642 iu->response_data_len = 0; 1643 iu->sense_data_len = 17; 1644 iu->status = 02; 1645 mvs_set_sense(iu->sense_data, 17, 0, 1646 key, asc, asc_q); 1647 } 1648 1649 static int mvs_slot_err(struct mvs_info *mvi, struct sas_task *task, 1650 u32 slot_idx) 1651 { 1652 struct mvs_slot_info *slot = &mvi->slot_info[slot_idx]; 1653 int stat; 1654 u32 err_dw0 = le32_to_cpu(*(u32 *)slot->response); 1655 u32 err_dw1 = le32_to_cpu(*((u32 *)slot->response + 1)); 1656 u32 tfs = 0; 1657 enum mvs_port_type type = PORT_TYPE_SAS; 1658 1659 if (err_dw0 & CMD_ISS_STPD) 1660 MVS_CHIP_DISP->issue_stop(mvi, type, tfs); 1661 1662 MVS_CHIP_DISP->command_active(mvi, slot_idx); 1663 1664 stat = SAM_STAT_CHECK_CONDITION; 1665 switch (task->task_proto) { 1666 case SAS_PROTOCOL_SSP: 1667 { 1668 stat = SAS_ABORTED_TASK; 1669 if ((err_dw0 & NO_DEST) || err_dw1 & bit(31)) { 1670 struct ssp_response_iu *iu = slot->response + 1671 sizeof(struct mvs_err_info); 1672 mvs_fill_ssp_resp_iu(iu, NOT_READY, 0x04, 01); 1673 sas_ssp_task_response(mvi->dev, task, iu); 1674 stat = SAM_STAT_CHECK_CONDITION; 1675 } 1676 if (err_dw1 & bit(31)) 1677 mv_printk("reuse same slot, retry command.\n"); 1678 break; 1679 } 1680 case SAS_PROTOCOL_SMP: 1681 stat = SAM_STAT_CHECK_CONDITION; 1682 break; 1683 1684 case SAS_PROTOCOL_SATA: 1685 case SAS_PROTOCOL_STP: 1686 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: 1687 { 1688 task->ata_task.use_ncq = 0; 1689 stat = SAS_PROTO_RESPONSE; 1690 mvs_sata_done(mvi, task, slot_idx, err_dw0); 1691 } 1692 break; 1693 default: 1694 break; 1695 } 1696 1697 return stat; 1698 } 1699 1700 int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags) 1701 { 1702 u32 slot_idx = rx_desc & RXQ_SLOT_MASK; 1703 struct mvs_slot_info *slot = &mvi->slot_info[slot_idx]; 1704 struct sas_task *task = slot->task; 1705 struct mvs_device *mvi_dev = NULL; 1706 struct task_status_struct *tstat; 1707 struct domain_device *dev; 1708 u32 aborted; 1709 1710 void *to; 1711 enum exec_status sts; 1712 1713 if (unlikely(!task || !task->lldd_task || !task->dev)) 1714 return -1; 1715 1716 tstat = &task->task_status; 1717 dev = task->dev; 1718 mvi_dev = dev->lldd_dev; 1719 1720 spin_lock(&task->task_state_lock); 1721 task->task_state_flags &= 1722 ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR); 1723 task->task_state_flags |= SAS_TASK_STATE_DONE; 1724 /* race condition*/ 1725 aborted = task->task_state_flags & SAS_TASK_STATE_ABORTED; 1726 spin_unlock(&task->task_state_lock); 1727 1728 memset(tstat, 0, sizeof(*tstat)); 1729 tstat->resp = SAS_TASK_COMPLETE; 1730 1731 if (unlikely(aborted)) { 1732 tstat->stat = SAS_ABORTED_TASK; 1733 if (mvi_dev && mvi_dev->running_req) 1734 mvi_dev->running_req--; 1735 if (sas_protocol_ata(task->task_proto)) 1736 mvs_free_reg_set(mvi, mvi_dev); 1737 1738 mvs_slot_task_free(mvi, task, slot, slot_idx); 1739 return -1; 1740 } 1741 1742 /* when no device attaching, go ahead and complete by error handling*/ 1743 if (unlikely(!mvi_dev || flags)) { 1744 if (!mvi_dev) 1745 mv_dprintk("port has not device.\n"); 1746 tstat->stat = SAS_PHY_DOWN; 1747 goto out; 1748 } 1749 1750 /* 1751 * error info record present; slot->response is 32 bit aligned but may 1752 * not be 64 bit aligned, so check for zero in two 32 bit reads 1753 */ 1754 if (unlikely((rx_desc & RXQ_ERR) 1755 && (*((u32 *)slot->response) 1756 || *(((u32 *)slot->response) + 1)))) { 1757 mv_dprintk("port %d slot %d rx_desc %X has error info" 1758 "%016llX.\n", slot->port->sas_port.id, slot_idx, 1759 rx_desc, get_unaligned_le64(slot->response)); 1760 tstat->stat = mvs_slot_err(mvi, task, slot_idx); 1761 tstat->resp = SAS_TASK_COMPLETE; 1762 goto out; 1763 } 1764 1765 switch (task->task_proto) { 1766 case SAS_PROTOCOL_SSP: 1767 /* hw says status == 0, datapres == 0 */ 1768 if (rx_desc & RXQ_GOOD) { 1769 tstat->stat = SAM_STAT_GOOD; 1770 tstat->resp = SAS_TASK_COMPLETE; 1771 } 1772 /* response frame present */ 1773 else if (rx_desc & RXQ_RSP) { 1774 struct ssp_response_iu *iu = slot->response + 1775 sizeof(struct mvs_err_info); 1776 sas_ssp_task_response(mvi->dev, task, iu); 1777 } else 1778 tstat->stat = SAM_STAT_CHECK_CONDITION; 1779 break; 1780 1781 case SAS_PROTOCOL_SMP: { 1782 struct scatterlist *sg_resp = &task->smp_task.smp_resp; 1783 tstat->stat = SAM_STAT_GOOD; 1784 to = kmap_atomic(sg_page(sg_resp)); 1785 memcpy(to + sg_resp->offset, 1786 slot->response + sizeof(struct mvs_err_info), 1787 sg_dma_len(sg_resp)); 1788 kunmap_atomic(to); 1789 break; 1790 } 1791 1792 case SAS_PROTOCOL_SATA: 1793 case SAS_PROTOCOL_STP: 1794 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: { 1795 tstat->stat = mvs_sata_done(mvi, task, slot_idx, 0); 1796 break; 1797 } 1798 1799 default: 1800 tstat->stat = SAM_STAT_CHECK_CONDITION; 1801 break; 1802 } 1803 if (!slot->port->port_attached) { 1804 mv_dprintk("port %d has removed.\n", slot->port->sas_port.id); 1805 tstat->stat = SAS_PHY_DOWN; 1806 } 1807 1808 1809 out: 1810 if (mvi_dev && mvi_dev->running_req) { 1811 mvi_dev->running_req--; 1812 if (sas_protocol_ata(task->task_proto) && !mvi_dev->running_req) 1813 mvs_free_reg_set(mvi, mvi_dev); 1814 } 1815 mvs_slot_task_free(mvi, task, slot, slot_idx); 1816 sts = tstat->stat; 1817 1818 spin_unlock(&mvi->lock); 1819 if (task->task_done) 1820 task->task_done(task); 1821 1822 spin_lock(&mvi->lock); 1823 1824 return sts; 1825 } 1826 1827 void mvs_do_release_task(struct mvs_info *mvi, 1828 int phy_no, struct domain_device *dev) 1829 { 1830 u32 slot_idx; 1831 struct mvs_phy *phy; 1832 struct mvs_port *port; 1833 struct mvs_slot_info *slot, *slot2; 1834 1835 phy = &mvi->phy[phy_no]; 1836 port = phy->port; 1837 if (!port) 1838 return; 1839 /* clean cmpl queue in case request is already finished */ 1840 mvs_int_rx(mvi, false); 1841 1842 1843 1844 list_for_each_entry_safe(slot, slot2, &port->list, entry) { 1845 struct sas_task *task; 1846 slot_idx = (u32) (slot - mvi->slot_info); 1847 task = slot->task; 1848 1849 if (dev && task->dev != dev) 1850 continue; 1851 1852 mv_printk("Release slot [%x] tag[%x], task [%p]:\n", 1853 slot_idx, slot->slot_tag, task); 1854 MVS_CHIP_DISP->command_active(mvi, slot_idx); 1855 1856 mvs_slot_complete(mvi, slot_idx, 1); 1857 } 1858 } 1859 1860 void mvs_release_task(struct mvs_info *mvi, 1861 struct domain_device *dev) 1862 { 1863 int i, phyno[WIDE_PORT_MAX_PHY], num; 1864 num = mvs_find_dev_phyno(dev, phyno); 1865 for (i = 0; i < num; i++) 1866 mvs_do_release_task(mvi, phyno[i], dev); 1867 } 1868 1869 static void mvs_phy_disconnected(struct mvs_phy *phy) 1870 { 1871 phy->phy_attached = 0; 1872 phy->att_dev_info = 0; 1873 phy->att_dev_sas_addr = 0; 1874 } 1875 1876 static void mvs_work_queue(struct work_struct *work) 1877 { 1878 struct delayed_work *dw = container_of(work, struct delayed_work, work); 1879 struct mvs_wq *mwq = container_of(dw, struct mvs_wq, work_q); 1880 struct mvs_info *mvi = mwq->mvi; 1881 unsigned long flags; 1882 u32 phy_no = (unsigned long) mwq->data; 1883 struct sas_ha_struct *sas_ha = mvi->sas; 1884 struct mvs_phy *phy = &mvi->phy[phy_no]; 1885 struct asd_sas_phy *sas_phy = &phy->sas_phy; 1886 1887 spin_lock_irqsave(&mvi->lock, flags); 1888 if (mwq->handler & PHY_PLUG_EVENT) { 1889 1890 if (phy->phy_event & PHY_PLUG_OUT) { 1891 u32 tmp; 1892 1893 tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, phy_no); 1894 phy->phy_event &= ~PHY_PLUG_OUT; 1895 if (!(tmp & PHY_READY_MASK)) { 1896 sas_phy_disconnected(sas_phy); 1897 mvs_phy_disconnected(phy); 1898 sas_ha->notify_phy_event(sas_phy, 1899 PHYE_LOSS_OF_SIGNAL); 1900 mv_dprintk("phy%d Removed Device\n", phy_no); 1901 } else { 1902 MVS_CHIP_DISP->detect_porttype(mvi, phy_no); 1903 mvs_update_phyinfo(mvi, phy_no, 1); 1904 mvs_bytes_dmaed(mvi, phy_no); 1905 mvs_port_notify_formed(sas_phy, 0); 1906 mv_dprintk("phy%d Attached Device\n", phy_no); 1907 } 1908 } 1909 } else if (mwq->handler & EXP_BRCT_CHG) { 1910 phy->phy_event &= ~EXP_BRCT_CHG; 1911 sas_ha->notify_port_event(sas_phy, 1912 PORTE_BROADCAST_RCVD); 1913 mv_dprintk("phy%d Got Broadcast Change\n", phy_no); 1914 } 1915 list_del(&mwq->entry); 1916 spin_unlock_irqrestore(&mvi->lock, flags); 1917 kfree(mwq); 1918 } 1919 1920 static int mvs_handle_event(struct mvs_info *mvi, void *data, int handler) 1921 { 1922 struct mvs_wq *mwq; 1923 int ret = 0; 1924 1925 mwq = kmalloc(sizeof(struct mvs_wq), GFP_ATOMIC); 1926 if (mwq) { 1927 mwq->mvi = mvi; 1928 mwq->data = data; 1929 mwq->handler = handler; 1930 MV_INIT_DELAYED_WORK(&mwq->work_q, mvs_work_queue, mwq); 1931 list_add_tail(&mwq->entry, &mvi->wq_list); 1932 schedule_delayed_work(&mwq->work_q, HZ * 2); 1933 } else 1934 ret = -ENOMEM; 1935 1936 return ret; 1937 } 1938 1939 static void mvs_sig_time_out(struct timer_list *t) 1940 { 1941 struct mvs_phy *phy = from_timer(phy, t, timer); 1942 struct mvs_info *mvi = phy->mvi; 1943 u8 phy_no; 1944 1945 for (phy_no = 0; phy_no < mvi->chip->n_phy; phy_no++) { 1946 if (&mvi->phy[phy_no] == phy) { 1947 mv_dprintk("Get signature time out, reset phy %d\n", 1948 phy_no+mvi->id*mvi->chip->n_phy); 1949 MVS_CHIP_DISP->phy_reset(mvi, phy_no, MVS_HARD_RESET); 1950 } 1951 } 1952 } 1953 1954 void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events) 1955 { 1956 u32 tmp; 1957 struct mvs_phy *phy = &mvi->phy[phy_no]; 1958 1959 phy->irq_status = MVS_CHIP_DISP->read_port_irq_stat(mvi, phy_no); 1960 MVS_CHIP_DISP->write_port_irq_stat(mvi, phy_no, phy->irq_status); 1961 mv_dprintk("phy %d ctrl sts=0x%08X.\n", phy_no+mvi->id*mvi->chip->n_phy, 1962 MVS_CHIP_DISP->read_phy_ctl(mvi, phy_no)); 1963 mv_dprintk("phy %d irq sts = 0x%08X\n", phy_no+mvi->id*mvi->chip->n_phy, 1964 phy->irq_status); 1965 1966 /* 1967 * events is port event now , 1968 * we need check the interrupt status which belongs to per port. 1969 */ 1970 1971 if (phy->irq_status & PHYEV_DCDR_ERR) { 1972 mv_dprintk("phy %d STP decoding error.\n", 1973 phy_no + mvi->id*mvi->chip->n_phy); 1974 } 1975 1976 if (phy->irq_status & PHYEV_POOF) { 1977 mdelay(500); 1978 if (!(phy->phy_event & PHY_PLUG_OUT)) { 1979 int dev_sata = phy->phy_type & PORT_TYPE_SATA; 1980 int ready; 1981 mvs_do_release_task(mvi, phy_no, NULL); 1982 phy->phy_event |= PHY_PLUG_OUT; 1983 MVS_CHIP_DISP->clear_srs_irq(mvi, 0, 1); 1984 mvs_handle_event(mvi, 1985 (void *)(unsigned long)phy_no, 1986 PHY_PLUG_EVENT); 1987 ready = mvs_is_phy_ready(mvi, phy_no); 1988 if (ready || dev_sata) { 1989 if (MVS_CHIP_DISP->stp_reset) 1990 MVS_CHIP_DISP->stp_reset(mvi, 1991 phy_no); 1992 else 1993 MVS_CHIP_DISP->phy_reset(mvi, 1994 phy_no, MVS_SOFT_RESET); 1995 return; 1996 } 1997 } 1998 } 1999 2000 if (phy->irq_status & PHYEV_COMWAKE) { 2001 tmp = MVS_CHIP_DISP->read_port_irq_mask(mvi, phy_no); 2002 MVS_CHIP_DISP->write_port_irq_mask(mvi, phy_no, 2003 tmp | PHYEV_SIG_FIS); 2004 if (phy->timer.function == NULL) { 2005 phy->timer.function = mvs_sig_time_out; 2006 phy->timer.expires = jiffies + 5*HZ; 2007 add_timer(&phy->timer); 2008 } 2009 } 2010 if (phy->irq_status & (PHYEV_SIG_FIS | PHYEV_ID_DONE)) { 2011 phy->phy_status = mvs_is_phy_ready(mvi, phy_no); 2012 mv_dprintk("notify plug in on phy[%d]\n", phy_no); 2013 if (phy->phy_status) { 2014 mdelay(10); 2015 MVS_CHIP_DISP->detect_porttype(mvi, phy_no); 2016 if (phy->phy_type & PORT_TYPE_SATA) { 2017 tmp = MVS_CHIP_DISP->read_port_irq_mask( 2018 mvi, phy_no); 2019 tmp &= ~PHYEV_SIG_FIS; 2020 MVS_CHIP_DISP->write_port_irq_mask(mvi, 2021 phy_no, tmp); 2022 } 2023 mvs_update_phyinfo(mvi, phy_no, 0); 2024 if (phy->phy_type & PORT_TYPE_SAS) { 2025 MVS_CHIP_DISP->phy_reset(mvi, phy_no, MVS_PHY_TUNE); 2026 mdelay(10); 2027 } 2028 2029 mvs_bytes_dmaed(mvi, phy_no); 2030 /* whether driver is going to handle hot plug */ 2031 if (phy->phy_event & PHY_PLUG_OUT) { 2032 mvs_port_notify_formed(&phy->sas_phy, 0); 2033 phy->phy_event &= ~PHY_PLUG_OUT; 2034 } 2035 } else { 2036 mv_dprintk("plugin interrupt but phy%d is gone\n", 2037 phy_no + mvi->id*mvi->chip->n_phy); 2038 } 2039 } else if (phy->irq_status & PHYEV_BROAD_CH) { 2040 mv_dprintk("phy %d broadcast change.\n", 2041 phy_no + mvi->id*mvi->chip->n_phy); 2042 mvs_handle_event(mvi, (void *)(unsigned long)phy_no, 2043 EXP_BRCT_CHG); 2044 } 2045 } 2046 2047 int mvs_int_rx(struct mvs_info *mvi, bool self_clear) 2048 { 2049 u32 rx_prod_idx, rx_desc; 2050 bool attn = false; 2051 2052 /* the first dword in the RX ring is special: it contains 2053 * a mirror of the hardware's RX producer index, so that 2054 * we don't have to stall the CPU reading that register. 2055 * The actual RX ring is offset by one dword, due to this. 2056 */ 2057 rx_prod_idx = mvi->rx_cons; 2058 mvi->rx_cons = le32_to_cpu(mvi->rx[0]); 2059 if (mvi->rx_cons == 0xfff) /* h/w hasn't touched RX ring yet */ 2060 return 0; 2061 2062 /* The CMPL_Q may come late, read from register and try again 2063 * note: if coalescing is enabled, 2064 * it will need to read from register every time for sure 2065 */ 2066 if (unlikely(mvi->rx_cons == rx_prod_idx)) 2067 mvi->rx_cons = MVS_CHIP_DISP->rx_update(mvi) & RX_RING_SZ_MASK; 2068 2069 if (mvi->rx_cons == rx_prod_idx) 2070 return 0; 2071 2072 while (mvi->rx_cons != rx_prod_idx) { 2073 /* increment our internal RX consumer pointer */ 2074 rx_prod_idx = (rx_prod_idx + 1) & (MVS_RX_RING_SZ - 1); 2075 rx_desc = le32_to_cpu(mvi->rx[rx_prod_idx + 1]); 2076 2077 if (likely(rx_desc & RXQ_DONE)) 2078 mvs_slot_complete(mvi, rx_desc, 0); 2079 if (rx_desc & RXQ_ATTN) { 2080 attn = true; 2081 } else if (rx_desc & RXQ_ERR) { 2082 if (!(rx_desc & RXQ_DONE)) 2083 mvs_slot_complete(mvi, rx_desc, 0); 2084 } else if (rx_desc & RXQ_SLOT_RESET) { 2085 mvs_slot_free(mvi, rx_desc); 2086 } 2087 } 2088 2089 if (attn && self_clear) 2090 MVS_CHIP_DISP->int_full(mvi); 2091 return 0; 2092 } 2093 2094 int mvs_gpio_write(struct sas_ha_struct *sha, u8 reg_type, u8 reg_index, 2095 u8 reg_count, u8 *write_data) 2096 { 2097 struct mvs_prv_info *mvs_prv = sha->lldd_ha; 2098 struct mvs_info *mvi = mvs_prv->mvi[0]; 2099 2100 if (MVS_CHIP_DISP->gpio_write) { 2101 return MVS_CHIP_DISP->gpio_write(mvs_prv, reg_type, 2102 reg_index, reg_count, write_data); 2103 } 2104 2105 return -ENOSYS; 2106 } 2107