Lines Matching +full:sata +full:- +full:cold

1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * sata_nv.c - NVIDIA nForce SATA
9 * as Documentation/driver-api/libata.rst
12 * This driver programs the NVIDIA SATA controller in a similar
14 * NV-specific details such as register offsets, SATA phy location,
19 * This allows the use of NCQ. Non-DMA-mapped ATA commands are still
84 NV_ADMA_SGTBL_LEN = (1024 - NV_ADMA_CPB_SZ) /
188 /* ADMA Physical Region Descriptor - one SG segment */
199 WNB = (1 << 14), /* wait-not-BSY */
219 __le16 reserved2; /* 6-7 */
220 __le16 tf[12]; /* 8-31 */
221 struct nv_adma_prd aprd[5]; /* 32-111 */
222 __le64 next_aprd; /* 112-119 */
223 __le64 reserved3; /* 120-127 */
393 .can_queue = ATA_MAX_QUEUE - 1,
403 * NV SATA controllers have various different problems with hardreset
410 * linux-ide.
419 * failure on cold boot with the standard debouncing timing.
446 * - Softreset during boot always works.
448 * - Hardreset during boot sometimes fails to bring up the link on
452 * - Hardreset is often necessary after hotplug.
456 * post-boot probing should work around the above issues in most
457 * cases. Define nv_hardreset() which only kicks in for post-boot
589 MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
600 struct nv_adma_port_priv *pp = ap->private_data; in nv_adma_register_mode()
601 void __iomem *mmio = pp->ctl_block; in nv_adma_register_mode()
605 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) in nv_adma_register_mode()
633 pp->flags |= NV_ADMA_PORT_REGISTER_MODE; in nv_adma_register_mode()
638 struct nv_adma_port_priv *pp = ap->private_data; in nv_adma_mode()
639 void __iomem *mmio = pp->ctl_block; in nv_adma_mode()
643 if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) in nv_adma_mode()
646 WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE); in nv_adma_mode()
663 pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE; in nv_adma_mode()
669 struct ata_port *ap = ata_shost_to_port(sdev->host); in nv_adma_device_configure()
670 struct nv_adma_port_priv *pp = ap->private_data; in nv_adma_device_configure()
672 struct pci_dev *pdev = to_pci_dev(ap->host->dev); in nv_adma_device_configure()
681 if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun) in nv_adma_device_configure()
685 spin_lock_irqsave(ap->lock, flags); in nv_adma_device_configure()
687 if (ap->link.device[sdev->id].class == ATA_DEV_ATAPI) { in nv_adma_device_configure()
691 * However, the legacy interface only supports 32-bit DMA. in nv_adma_device_configure()
697 libata-scsi.c */ in nv_adma_device_configure()
698 sg_tablesize = LIBATA_MAX_PRD - 1; in nv_adma_device_configure()
712 if (ap->port_no == 1) in nv_adma_device_configure()
721 pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE; in nv_adma_device_configure()
724 pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE; in nv_adma_device_configure()
730 port0 = ap->host->ports[0]->private_data; in nv_adma_device_configure()
731 port1 = ap->host->ports[1]->private_data; in nv_adma_device_configure()
732 if ((port0->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) || in nv_adma_device_configure()
733 (port1->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) { in nv_adma_device_configure()
735 * We have to set the DMA mask to 32-bit if either port is in in nv_adma_device_configure()
741 rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK); in nv_adma_device_configure()
743 rc = dma_set_mask(&pdev->dev, pp->adma_dma_mask); in nv_adma_device_configure()
746 lim->seg_boundary_mask = segment_boundary; in nv_adma_device_configure()
747 lim->max_segments = sg_tablesize; in nv_adma_device_configure()
750 (unsigned long long)*ap->host->dev->dma_mask, in nv_adma_device_configure()
753 spin_unlock_irqrestore(ap->lock, flags); in nv_adma_device_configure()
760 struct nv_adma_port_priv *pp = qc->ap->private_data; in nv_adma_check_atapi_dma()
761 return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE); in nv_adma_check_atapi_dma()
766 /* Other than when internal or pass-through commands are executed, in nv_adma_tf_read()
782 if (tf->flags & ATA_TFLAG_ISADDR) { in nv_adma_tf_to_cpb()
783 if (tf->flags & ATA_TFLAG_LBA48) { in nv_adma_tf_to_cpb()
784 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->hob_feature | WNB); in nv_adma_tf_to_cpb()
785 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect); in nv_adma_tf_to_cpb()
786 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->hob_lbal); in nv_adma_tf_to_cpb()
787 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->hob_lbam); in nv_adma_tf_to_cpb()
788 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->hob_lbah); in nv_adma_tf_to_cpb()
789 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature); in nv_adma_tf_to_cpb()
791 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature | WNB); in nv_adma_tf_to_cpb()
793 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->nsect); in nv_adma_tf_to_cpb()
794 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->lbal); in nv_adma_tf_to_cpb()
795 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->lbam); in nv_adma_tf_to_cpb()
796 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->lbah); in nv_adma_tf_to_cpb()
799 if (tf->flags & ATA_TFLAG_DEVICE) in nv_adma_tf_to_cpb()
800 cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device); in nv_adma_tf_to_cpb()
802 cpb[idx++] = cpu_to_le16((ATA_REG_CMD << 8) | tf->command | CMDEND); in nv_adma_tf_to_cpb()
812 struct nv_adma_port_priv *pp = ap->private_data; in nv_adma_check_cpb()
813 u8 flags = pp->cpb[cpb_num].resp_flags; in nv_adma_check_cpb()
821 struct ata_eh_info *ehi = &ap->link.eh_info; in nv_adma_check_cpb()
828 ehi->err_mask |= AC_ERR_DEV; in nv_adma_check_cpb()
831 ehi->err_mask |= AC_ERR_DEV; in nv_adma_check_cpb()
834 ehi->err_mask |= AC_ERR_SYSTEM; in nv_adma_check_cpb()
839 ehi->err_mask |= AC_ERR_OTHER; in nv_adma_check_cpb()
847 return -1; in nv_adma_check_cpb()
857 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag); in nv_host_intr()
870 if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) { in nv_host_intr()
885 spin_lock(&host->lock); in nv_adma_interrupt()
887 for (i = 0; i < host->n_ports; i++) { in nv_adma_interrupt()
888 struct ata_port *ap = host->ports[i]; in nv_adma_interrupt()
889 struct nv_adma_port_priv *pp = ap->private_data; in nv_adma_interrupt()
890 void __iomem *mmio = pp->ctl_block; in nv_adma_interrupt()
898 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) { in nv_adma_interrupt()
899 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804) in nv_adma_interrupt()
906 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) { in nv_adma_interrupt()
907 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804) in nv_adma_interrupt()
909 if (ata_tag_valid(ap->link.active_tag)) in nv_adma_interrupt()
922 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL); in nv_adma_interrupt()
924 if (!NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier && in nv_adma_interrupt()
948 struct ata_eh_info *ehi = &ap->link.eh_info; in nv_adma_interrupt()
953 ehi->err_mask |= AC_ERR_SYSTEM; in nv_adma_interrupt()
979 if (ata_tag_valid(ap->link.active_tag)) in nv_adma_interrupt()
981 ap->link.active_tag; in nv_adma_interrupt()
983 check_commands = ap->link.sactive; in nv_adma_interrupt()
988 pos--; in nv_adma_interrupt()
1004 struct nv_adma_port_priv *pp = host->ports[0]->private_data; in nv_adma_interrupt()
1005 writel(notifier_clears[0], pp->notifier_clear_block); in nv_adma_interrupt()
1006 pp = host->ports[1]->private_data; in nv_adma_interrupt()
1007 writel(notifier_clears[1], pp->notifier_clear_block); in nv_adma_interrupt()
1010 spin_unlock(&host->lock); in nv_adma_interrupt()
1017 struct nv_adma_port_priv *pp = ap->private_data; in nv_adma_freeze()
1018 void __iomem *mmio = pp->ctl_block; in nv_adma_freeze()
1023 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) in nv_adma_freeze()
1027 writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT), in nv_adma_freeze()
1028 ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804); in nv_adma_freeze()
1039 struct nv_adma_port_priv *pp = ap->private_data; in nv_adma_thaw()
1040 void __iomem *mmio = pp->ctl_block; in nv_adma_thaw()
1045 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) in nv_adma_thaw()
1057 struct nv_adma_port_priv *pp = ap->private_data; in nv_adma_irq_clear()
1058 void __iomem *mmio = pp->ctl_block; in nv_adma_irq_clear()
1061 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) { in nv_adma_irq_clear()
1067 writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT), in nv_adma_irq_clear()
1068 ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804); in nv_adma_irq_clear()
1073 /* clear notifiers - note both ports need to be written with in nv_adma_irq_clear()
1075 if (ap->port_no == 0) { in nv_adma_irq_clear()
1082 pp = ap->host->ports[0]->private_data; in nv_adma_irq_clear()
1083 writel(notifier_clears[0], pp->notifier_clear_block); in nv_adma_irq_clear()
1084 pp = ap->host->ports[1]->private_data; in nv_adma_irq_clear()
1085 writel(notifier_clears[1], pp->notifier_clear_block); in nv_adma_irq_clear()
1090 struct nv_adma_port_priv *pp = qc->ap->private_data; in nv_adma_post_internal_cmd()
1092 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) in nv_adma_post_internal_cmd()
1098 struct device *dev = ap->host->dev; in nv_adma_port_start()
1108 * Ensure DMA mask is set to 32-bit before allocating legacy PRD and in nv_adma_port_start()
1111 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); in nv_adma_port_start()
1122 return -ENOMEM; in nv_adma_port_start()
1124 mmio = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_PORT + in nv_adma_port_start()
1125 ap->port_no * NV_ADMA_PORT_SIZE; in nv_adma_port_start()
1126 pp->ctl_block = mmio; in nv_adma_port_start()
1127 pp->gen_block = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_GEN; in nv_adma_port_start()
1128 pp->notifier_clear_block = pp->gen_block + in nv_adma_port_start()
1129 NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no); in nv_adma_port_start()
1135 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); in nv_adma_port_start()
1137 pp->adma_dma_mask = *dev->dma_mask; in nv_adma_port_start()
1142 return -ENOMEM; in nv_adma_port_start()
1146 * 128-byte command parameter block (CPB) in nv_adma_port_start()
1149 pp->cpb = mem; in nv_adma_port_start()
1150 pp->cpb_dma = mem_dma; in nv_adma_port_start()
1161 pp->aprd = mem; in nv_adma_port_start()
1162 pp->aprd_dma = mem_dma; in nv_adma_port_start()
1164 ap->private_data = pp; in nv_adma_port_start()
1170 pp->flags = NV_ADMA_PORT_REGISTER_MODE; in nv_adma_port_start()
1192 struct nv_adma_port_priv *pp = ap->private_data; in nv_adma_port_stop()
1193 void __iomem *mmio = pp->ctl_block; in nv_adma_port_stop()
1201 struct nv_adma_port_priv *pp = ap->private_data; in nv_adma_port_suspend()
1202 void __iomem *mmio = pp->ctl_block; in nv_adma_port_suspend()
1204 /* Go to register mode - clears GO */ in nv_adma_port_suspend()
1218 struct nv_adma_port_priv *pp = ap->private_data; in nv_adma_port_resume()
1219 void __iomem *mmio = pp->ctl_block; in nv_adma_port_resume()
1223 writel(pp->cpb_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW); in nv_adma_port_resume()
1224 writel((pp->cpb_dma >> 16) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH); in nv_adma_port_resume()
1230 pp->flags |= NV_ADMA_PORT_REGISTER_MODE; in nv_adma_port_resume()
1253 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR]; in nv_adma_setup_port()
1254 struct ata_ioports *ioport = &ap->ioaddr; in nv_adma_setup_port()
1256 mmio += NV_ADMA_PORT + ap->port_no * NV_ADMA_PORT_SIZE; in nv_adma_setup_port()
1258 ioport->cmd_addr = mmio; in nv_adma_setup_port()
1259 ioport->data_addr = mmio + (ATA_REG_DATA * 4); in nv_adma_setup_port()
1260 ioport->error_addr = in nv_adma_setup_port()
1261 ioport->feature_addr = mmio + (ATA_REG_ERR * 4); in nv_adma_setup_port()
1262 ioport->nsect_addr = mmio + (ATA_REG_NSECT * 4); in nv_adma_setup_port()
1263 ioport->lbal_addr = mmio + (ATA_REG_LBAL * 4); in nv_adma_setup_port()
1264 ioport->lbam_addr = mmio + (ATA_REG_LBAM * 4); in nv_adma_setup_port()
1265 ioport->lbah_addr = mmio + (ATA_REG_LBAH * 4); in nv_adma_setup_port()
1266 ioport->device_addr = mmio + (ATA_REG_DEVICE * 4); in nv_adma_setup_port()
1267 ioport->status_addr = in nv_adma_setup_port()
1268 ioport->command_addr = mmio + (ATA_REG_STATUS * 4); in nv_adma_setup_port()
1269 ioport->altstatus_addr = in nv_adma_setup_port()
1270 ioport->ctl_addr = mmio + 0x20; in nv_adma_setup_port()
1275 struct pci_dev *pdev = to_pci_dev(host->dev); in nv_adma_host_init()
1288 for (i = 0; i < host->n_ports; i++) in nv_adma_host_init()
1289 nv_adma_setup_port(host->ports[i]); in nv_adma_host_init()
1300 if (qc->tf.flags & ATA_TFLAG_WRITE) in nv_adma_fill_aprd()
1302 if (idx == qc->n_elem - 1) in nv_adma_fill_aprd()
1307 aprd->addr = cpu_to_le64(((u64)sg_dma_address(sg))); in nv_adma_fill_aprd()
1308 aprd->len = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */ in nv_adma_fill_aprd()
1309 aprd->flags = flags; in nv_adma_fill_aprd()
1310 aprd->packet_len = 0; in nv_adma_fill_aprd()
1315 struct nv_adma_port_priv *pp = qc->ap->private_data; in nv_adma_fill_sg()
1320 for_each_sg(qc->sg, sg, qc->n_elem, si) { in nv_adma_fill_sg()
1321 aprd = (si < 5) ? &cpb->aprd[si] : in nv_adma_fill_sg()
1322 &pp->aprd[NV_ADMA_SGTBL_LEN * qc->hw_tag + (si-5)]; in nv_adma_fill_sg()
1326 cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->hw_tag))); in nv_adma_fill_sg()
1328 cpb->next_aprd = cpu_to_le64(0); in nv_adma_fill_sg()
1333 struct nv_adma_port_priv *pp = qc->ap->private_data; in nv_adma_use_reg_mode()
1335 /* ADMA engine can only be used for non-ATAPI DMA commands, in nv_adma_use_reg_mode()
1336 or interrupt-driven no-data commands. */ in nv_adma_use_reg_mode()
1337 if ((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) || in nv_adma_use_reg_mode()
1338 (qc->tf.flags & ATA_TFLAG_POLLING)) in nv_adma_use_reg_mode()
1341 if ((qc->flags & ATA_QCFLAG_DMAMAP) || in nv_adma_use_reg_mode()
1342 (qc->tf.protocol == ATA_PROT_NODATA)) in nv_adma_use_reg_mode()
1350 struct nv_adma_port_priv *pp = qc->ap->private_data; in nv_adma_qc_prep()
1351 struct nv_adma_cpb *cpb = &pp->cpb[qc->hw_tag]; in nv_adma_qc_prep()
1356 BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) && in nv_adma_qc_prep()
1357 (qc->flags & ATA_QCFLAG_DMAMAP)); in nv_adma_qc_prep()
1358 nv_adma_register_mode(qc->ap); in nv_adma_qc_prep()
1363 cpb->resp_flags = NV_CPB_RESP_DONE; in nv_adma_qc_prep()
1365 cpb->ctl_flags = 0; in nv_adma_qc_prep()
1368 cpb->len = 3; in nv_adma_qc_prep()
1369 cpb->tag = qc->hw_tag; in nv_adma_qc_prep()
1370 cpb->next_cpb_idx = 0; in nv_adma_qc_prep()
1373 if (qc->tf.protocol == ATA_PROT_NCQ) in nv_adma_qc_prep()
1376 nv_adma_tf_to_cpb(&qc->tf, cpb->tf); in nv_adma_qc_prep()
1378 if (qc->flags & ATA_QCFLAG_DMAMAP) { in nv_adma_qc_prep()
1382 memset(&cpb->aprd[0], 0, sizeof(struct nv_adma_prd) * 5); in nv_adma_qc_prep()
1387 cpb->ctl_flags = ctl_flags; in nv_adma_qc_prep()
1389 cpb->resp_flags = 0; in nv_adma_qc_prep()
1396 struct nv_adma_port_priv *pp = qc->ap->private_data; in nv_adma_qc_issue()
1397 void __iomem *mmio = pp->ctl_block; in nv_adma_qc_issue()
1398 int curr_ncq = (qc->tf.protocol == ATA_PROT_NCQ); in nv_adma_qc_issue()
1403 if (unlikely(qc->tf.protocol == ATA_PROT_NCQ && in nv_adma_qc_issue()
1404 (qc->flags & ATA_QCFLAG_RESULT_TF))) { in nv_adma_qc_issue()
1405 ata_dev_err(qc->dev, "NCQ w/ RESULT_TF not allowed\n"); in nv_adma_qc_issue()
1411 BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) && in nv_adma_qc_issue()
1412 (qc->flags & ATA_QCFLAG_DMAMAP)); in nv_adma_qc_issue()
1413 nv_adma_register_mode(qc->ap); in nv_adma_qc_issue()
1416 nv_adma_mode(qc->ap); in nv_adma_qc_issue()
1419 and (number of cpbs to append -1) in top 8 bits */ in nv_adma_qc_issue()
1422 if (curr_ncq != pp->last_issue_ncq) { in nv_adma_qc_issue()
1424 non-NCQ commands, else we get command timeouts and such. */ in nv_adma_qc_issue()
1426 pp->last_issue_ncq = curr_ncq; in nv_adma_qc_issue()
1429 writew(qc->hw_tag, mmio + NV_ADMA_APPEND); in nv_adma_qc_issue()
1441 spin_lock_irqsave(&host->lock, flags); in nv_generic_interrupt()
1443 for (i = 0; i < host->n_ports; i++) { in nv_generic_interrupt()
1444 struct ata_port *ap = host->ports[i]; in nv_generic_interrupt()
1447 qc = ata_qc_from_tag(ap, ap->link.active_tag); in nv_generic_interrupt()
1448 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) { in nv_generic_interrupt()
1455 ap->ops->sff_check_status(ap); in nv_generic_interrupt()
1459 spin_unlock_irqrestore(&host->lock, flags); in nv_generic_interrupt()
1468 for (i = 0; i < host->n_ports; i++) { in nv_do_interrupt()
1469 handled += nv_host_intr(host->ports[i], irq_stat); in nv_do_interrupt()
1482 spin_lock(&host->lock); in nv_nf2_interrupt()
1483 irq_stat = ioread8(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS); in nv_nf2_interrupt()
1485 spin_unlock(&host->lock); in nv_nf2_interrupt()
1496 spin_lock(&host->lock); in nv_ck804_interrupt()
1497 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804); in nv_ck804_interrupt()
1499 spin_unlock(&host->lock); in nv_ck804_interrupt()
1507 return -EINVAL; in nv_scr_read()
1509 *val = ioread32(link->ap->ioaddr.scr_addr + (sc_reg * 4)); in nv_scr_read()
1516 return -EINVAL; in nv_scr_write()
1518 iowrite32(val, link->ap->ioaddr.scr_addr + (sc_reg * 4)); in nv_scr_write()
1525 struct ata_eh_context *ehc = &link->eh_context; in nv_hardreset()
1527 /* Do hardreset iff it's post-boot probing, please read the in nv_hardreset()
1530 if (!(link->ap->pflags & ATA_PFLAG_LOADING) && in nv_hardreset()
1531 !ata_dev_enabled(link->device)) in nv_hardreset()
1538 if (!(ehc->i.flags & ATA_EHI_QUIET)) in nv_hardreset()
1545 if (rc && rc != -EOPNOTSUPP) in nv_hardreset()
1551 return -EAGAIN; in nv_hardreset()
1556 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr; in nv_nf2_freeze()
1557 int shift = ap->port_no * NV_INT_PORT_SHIFT; in nv_nf2_freeze()
1567 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr; in nv_nf2_thaw()
1568 int shift = ap->port_no * NV_INT_PORT_SHIFT; in nv_nf2_thaw()
1580 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR]; in nv_ck804_freeze()
1581 int shift = ap->port_no * NV_INT_PORT_SHIFT; in nv_ck804_freeze()
1591 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR]; in nv_ck804_thaw()
1592 int shift = ap->port_no * NV_INT_PORT_SHIFT; in nv_ck804_thaw()
1604 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR]; in nv_mcp55_freeze()
1605 int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55; in nv_mcp55_freeze()
1617 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR]; in nv_mcp55_thaw()
1618 int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55; in nv_mcp55_thaw()
1630 struct nv_adma_port_priv *pp = ap->private_data; in nv_adma_error_handler()
1631 if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) { in nv_adma_error_handler()
1632 void __iomem *mmio = pp->ctl_block; in nv_adma_error_handler()
1636 if (ata_tag_valid(ap->link.active_tag) || ap->link.sactive) { in nv_adma_error_handler()
1639 u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL); in nv_adma_error_handler()
1652 struct nv_adma_cpb *cpb = &pp->cpb[i]; in nv_adma_error_handler()
1653 if ((ata_tag_valid(ap->link.active_tag) && i == ap->link.active_tag) || in nv_adma_error_handler()
1654 ap->link.sactive & (1 << i)) in nv_adma_error_handler()
1657 i, cpb->ctl_flags, cpb->resp_flags); in nv_adma_error_handler()
1667 pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID; in nv_adma_error_handler()
1686 struct nv_swncq_port_priv *pp = ap->private_data; in nv_swncq_qc_to_dq()
1687 struct defer_queue *dq = &pp->defer_queue; in nv_swncq_qc_to_dq()
1690 WARN_ON(dq->tail - dq->head == ATA_MAX_QUEUE); in nv_swncq_qc_to_dq()
1691 dq->defer_bits |= (1 << qc->hw_tag); in nv_swncq_qc_to_dq()
1692 dq->tag[dq->tail++ & (ATA_MAX_QUEUE - 1)] = qc->hw_tag; in nv_swncq_qc_to_dq()
1697 struct nv_swncq_port_priv *pp = ap->private_data; in nv_swncq_qc_from_dq()
1698 struct defer_queue *dq = &pp->defer_queue; in nv_swncq_qc_from_dq()
1701 if (dq->head == dq->tail) /* null queue */ in nv_swncq_qc_from_dq()
1704 tag = dq->tag[dq->head & (ATA_MAX_QUEUE - 1)]; in nv_swncq_qc_from_dq()
1705 dq->tag[dq->head++ & (ATA_MAX_QUEUE - 1)] = ATA_TAG_POISON; in nv_swncq_qc_from_dq()
1706 WARN_ON(!(dq->defer_bits & (1 << tag))); in nv_swncq_qc_from_dq()
1707 dq->defer_bits &= ~(1 << tag); in nv_swncq_qc_from_dq()
1714 struct nv_swncq_port_priv *pp = ap->private_data; in nv_swncq_fis_reinit()
1716 pp->dhfis_bits = 0; in nv_swncq_fis_reinit()
1717 pp->dmafis_bits = 0; in nv_swncq_fis_reinit()
1718 pp->sdbfis_bits = 0; in nv_swncq_fis_reinit()
1719 pp->ncq_flags = 0; in nv_swncq_fis_reinit()
1724 struct nv_swncq_port_priv *pp = ap->private_data; in nv_swncq_pp_reinit()
1725 struct defer_queue *dq = &pp->defer_queue; in nv_swncq_pp_reinit()
1727 dq->head = 0; in nv_swncq_pp_reinit()
1728 dq->tail = 0; in nv_swncq_pp_reinit()
1729 dq->defer_bits = 0; in nv_swncq_pp_reinit()
1730 pp->qc_active = 0; in nv_swncq_pp_reinit()
1731 pp->last_issue_tag = ATA_TAG_POISON; in nv_swncq_pp_reinit()
1737 struct nv_swncq_port_priv *pp = ap->private_data; in nv_swncq_irq_clear()
1739 writew(fis, pp->irq_block); in nv_swncq_irq_clear()
1752 struct nv_swncq_port_priv *pp = ap->private_data; in nv_swncq_ncq_stop()
1758 ap->qc_active, ap->link.sactive); in nv_swncq_ncq_stop()
1762 pp->qc_active, pp->defer_queue.defer_bits, pp->last_issue_tag, in nv_swncq_ncq_stop()
1763 pp->dhfis_bits, pp->dmafis_bits, pp->sdbfis_bits); in nv_swncq_ncq_stop()
1766 ap->ops->sff_check_status(ap), in nv_swncq_ncq_stop()
1767 ioread8(ap->ioaddr.error_addr)); in nv_swncq_ncq_stop()
1769 sactive = readl(pp->sactive_block); in nv_swncq_ncq_stop()
1770 done_mask = pp->qc_active ^ sactive; in nv_swncq_ncq_stop()
1775 if (pp->qc_active & (1 << i)) in nv_swncq_ncq_stop()
1784 (pp->dhfis_bits >> i) & 0x1, in nv_swncq_ncq_stop()
1785 (pp->dmafis_bits >> i) & 0x1, in nv_swncq_ncq_stop()
1786 (pp->sdbfis_bits >> i) & 0x1, in nv_swncq_ncq_stop()
1792 ap->ops->sff_irq_clear(ap); in nv_swncq_ncq_stop()
1799 struct ata_eh_context *ehc = &ap->link.eh_context; in nv_swncq_error_handler()
1801 if (ap->link.sactive) { in nv_swncq_error_handler()
1803 ehc->i.action |= ATA_EH_RESET; in nv_swncq_error_handler()
1812 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR]; in nv_swncq_port_suspend()
1831 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR]; in nv_swncq_port_resume()
1851 void __iomem *mmio = host->iomap[NV_MMIO_BAR]; in nv_swncq_host_init()
1852 struct pci_dev *pdev = to_pci_dev(host->dev); in nv_swncq_host_init()
1862 dev_dbg(&pdev->dev, "HOST_CTL:0x%X\n", tmp); in nv_swncq_host_init()
1867 dev_dbg(&pdev->dev, "HOST_ENABLE:0x%X\n", tmp); in nv_swncq_host_init()
1877 struct ata_port *ap = ata_shost_to_port(sdev->host); in nv_swncq_device_configure()
1878 struct pci_dev *pdev = to_pci_dev(ap->host->dev); in nv_swncq_device_configure()
1886 if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun) in nv_swncq_device_configure()
1890 dev = &ap->link.device[sdev->id]; in nv_swncq_device_configure()
1891 if (!(ap->flags & ATA_FLAG_NCQ) || dev->class == ATA_DEV_ATAPI) in nv_swncq_device_configure()
1895 if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA || in nv_swncq_device_configure()
1896 pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2) in nv_swncq_device_configure()
1900 if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA || in nv_swncq_device_configure()
1901 pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2) { in nv_swncq_device_configure()
1910 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num)); in nv_swncq_device_configure()
1915 sdev->queue_depth); in nv_swncq_device_configure()
1923 struct device *dev = ap->host->dev; in nv_swncq_port_start()
1924 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR]; in nv_swncq_port_start()
1935 return -ENOMEM; in nv_swncq_port_start()
1937 pp->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE, in nv_swncq_port_start()
1938 &pp->prd_dma, GFP_KERNEL); in nv_swncq_port_start()
1939 if (!pp->prd) in nv_swncq_port_start()
1940 return -ENOMEM; in nv_swncq_port_start()
1942 ap->private_data = pp; in nv_swncq_port_start()
1943 pp->sactive_block = ap->ioaddr.scr_addr + 4 * SCR_ACTIVE; in nv_swncq_port_start()
1944 pp->irq_block = mmio + NV_INT_STATUS_MCP55 + ap->port_no * 2; in nv_swncq_port_start()
1945 pp->tag_block = mmio + NV_NCQ_REG_MCP55 + ap->port_no * 2; in nv_swncq_port_start()
1952 if (qc->tf.protocol != ATA_PROT_NCQ) { in nv_swncq_qc_prep()
1957 if (!(qc->flags & ATA_QCFLAG_DMAMAP)) in nv_swncq_qc_prep()
1967 struct ata_port *ap = qc->ap; in nv_swncq_fill_sg()
1969 struct nv_swncq_port_priv *pp = ap->private_data; in nv_swncq_fill_sg()
1973 prd = pp->prd + ATA_MAX_PRD * qc->hw_tag; in nv_swncq_fill_sg()
1976 for_each_sg(qc->sg, sg, qc->n_elem, si) { in nv_swncq_fill_sg()
1987 len = 0x10000 - offset; in nv_swncq_fill_sg()
1993 sg_len -= len; in nv_swncq_fill_sg()
1998 prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT); in nv_swncq_fill_sg()
2004 struct nv_swncq_port_priv *pp = ap->private_data; in nv_swncq_issue_atacmd()
2009 writel((1 << qc->hw_tag), pp->sactive_block); in nv_swncq_issue_atacmd()
2010 pp->last_issue_tag = qc->hw_tag; in nv_swncq_issue_atacmd()
2011 pp->dhfis_bits &= ~(1 << qc->hw_tag); in nv_swncq_issue_atacmd()
2012 pp->dmafis_bits &= ~(1 << qc->hw_tag); in nv_swncq_issue_atacmd()
2013 pp->qc_active |= (0x1 << qc->hw_tag); in nv_swncq_issue_atacmd()
2015 trace_ata_tf_load(ap, &qc->tf); in nv_swncq_issue_atacmd()
2016 ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */ in nv_swncq_issue_atacmd()
2017 trace_ata_exec_command(ap, &qc->tf, qc->hw_tag); in nv_swncq_issue_atacmd()
2018 ap->ops->sff_exec_command(ap, &qc->tf); in nv_swncq_issue_atacmd()
2025 struct ata_port *ap = qc->ap; in nv_swncq_qc_issue()
2026 struct nv_swncq_port_priv *pp = ap->private_data; in nv_swncq_qc_issue()
2028 if (qc->tf.protocol != ATA_PROT_NCQ) in nv_swncq_qc_issue()
2031 if (!pp->qc_active) in nv_swncq_qc_issue()
2042 struct ata_eh_info *ehi = &ap->link.eh_info; in nv_swncq_hotplug()
2047 sata_scr_read(&ap->link, SCR_ERROR, &serror); in nv_swncq_hotplug()
2048 sata_scr_write(&ap->link, SCR_ERROR, serror); in nv_swncq_hotplug()
2059 ehi->serror |= serror; in nv_swncq_hotplug()
2067 struct nv_swncq_port_priv *pp = ap->private_data; in nv_swncq_sdbfis()
2068 struct ata_eh_info *ehi = &ap->link.eh_info; in nv_swncq_sdbfis()
2074 host_stat = ap->ops->bmdma_status(ap); in nv_swncq_sdbfis()
2080 ehi->err_mask |= AC_ERR_HOST_BUS; in nv_swncq_sdbfis()
2081 ehi->action |= ATA_EH_RESET; in nv_swncq_sdbfis()
2082 return -EINVAL; in nv_swncq_sdbfis()
2085 ap->ops->sff_irq_clear(ap); in nv_swncq_sdbfis()
2088 sactive = readl(pp->sactive_block); in nv_swncq_sdbfis()
2089 done_mask = pp->qc_active ^ sactive; in nv_swncq_sdbfis()
2091 pp->qc_active &= ~done_mask; in nv_swncq_sdbfis()
2092 pp->dhfis_bits &= ~done_mask; in nv_swncq_sdbfis()
2093 pp->dmafis_bits &= ~done_mask; in nv_swncq_sdbfis()
2094 pp->sdbfis_bits |= done_mask; in nv_swncq_sdbfis()
2097 if (!ap->qc_active) { in nv_swncq_sdbfis()
2103 if (pp->qc_active & pp->dhfis_bits) in nv_swncq_sdbfis()
2106 if ((pp->ncq_flags & ncq_saw_backout) || in nv_swncq_sdbfis()
2107 (pp->qc_active ^ pp->dhfis_bits)) in nv_swncq_sdbfis()
2116 ap->qc_active, pp->qc_active, in nv_swncq_sdbfis()
2117 pp->defer_queue.defer_bits, pp->dhfis_bits, in nv_swncq_sdbfis()
2118 pp->dmafis_bits, pp->last_issue_tag); in nv_swncq_sdbfis()
2123 qc = ata_qc_from_tag(ap, pp->last_issue_tag); in nv_swncq_sdbfis()
2128 if (pp->defer_queue.defer_bits) { in nv_swncq_sdbfis()
2140 struct nv_swncq_port_priv *pp = ap->private_data; in nv_swncq_tag()
2143 tag = readb(pp->tag_block) >> 2; in nv_swncq_tag()
2153 struct nv_swncq_port_priv *pp = ap->private_data; in nv_swncq_dmafis()
2164 rw = qc->tf.flags & ATA_TFLAG_WRITE; in nv_swncq_dmafis()
2167 iowrite32(pp->prd_dma + ATA_PRD_TBL_SZ * qc->hw_tag, in nv_swncq_dmafis()
2168 ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS); in nv_swncq_dmafis()
2170 /* specify data direction, triple-check start bit is clear */ in nv_swncq_dmafis()
2171 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD); in nv_swncq_dmafis()
2176 iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD); in nv_swncq_dmafis()
2181 struct nv_swncq_port_priv *pp = ap->private_data; in nv_swncq_host_interrupt()
2183 struct ata_eh_info *ehi = &ap->link.eh_info; in nv_swncq_host_interrupt()
2187 ata_stat = ap->ops->sff_check_status(ap); in nv_swncq_host_interrupt()
2200 if (!pp->qc_active) in nv_swncq_host_interrupt()
2203 if (ap->ops->scr_read(&ap->link, SCR_ERROR, &serror)) in nv_swncq_host_interrupt()
2205 ap->ops->scr_write(&ap->link, SCR_ERROR, serror); in nv_swncq_host_interrupt()
2210 ehi->err_mask |= AC_ERR_DEV; in nv_swncq_host_interrupt()
2211 ehi->serror |= serror; in nv_swncq_host_interrupt()
2212 ehi->action |= ATA_EH_RESET; in nv_swncq_host_interrupt()
2221 pp->ncq_flags |= ncq_saw_backout; in nv_swncq_host_interrupt()
2225 pp->ncq_flags |= ncq_saw_sdb; in nv_swncq_host_interrupt()
2228 pp->qc_active, pp->dhfis_bits, in nv_swncq_host_interrupt()
2229 pp->dmafis_bits, readl(pp->sactive_block)); in nv_swncq_host_interrupt()
2238 pp->dhfis_bits |= (0x1 << pp->last_issue_tag); in nv_swncq_host_interrupt()
2239 pp->ncq_flags |= ncq_saw_d2h; in nv_swncq_host_interrupt()
2240 if (pp->ncq_flags & (ncq_saw_sdb | ncq_saw_backout)) { in nv_swncq_host_interrupt()
2242 ehi->err_mask |= AC_ERR_HSM; in nv_swncq_host_interrupt()
2243 ehi->action |= ATA_EH_RESET; in nv_swncq_host_interrupt()
2248 !(pp->ncq_flags & ncq_saw_dmas)) { in nv_swncq_host_interrupt()
2249 ata_stat = ap->ops->sff_check_status(ap); in nv_swncq_host_interrupt()
2253 if (pp->defer_queue.defer_bits) { in nv_swncq_host_interrupt()
2265 pp->dmafis_bits |= (0x1 << nv_swncq_tag(ap)); in nv_swncq_host_interrupt()
2266 pp->ncq_flags |= ncq_saw_dmas; in nv_swncq_host_interrupt()
2286 spin_lock_irqsave(&host->lock, flags); in nv_swncq_interrupt()
2288 irq_stat = readl(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_MCP55); in nv_swncq_interrupt()
2290 for (i = 0; i < host->n_ports; i++) { in nv_swncq_interrupt()
2291 struct ata_port *ap = host->ports[i]; in nv_swncq_interrupt()
2293 if (ap->link.sactive) { in nv_swncq_interrupt()
2305 spin_unlock_irqrestore(&host->lock, flags); in nv_swncq_interrupt()
2319 unsigned long type = ent->driver_data; in nv_init_one()
2321 // Make sure this is a SATA controller by counting the number of bars in nv_init_one()
2322 // (NVIDIA SATA controllers will always have six bars). Otherwise, in nv_init_one()
2326 return -ENODEV; in nv_init_one()
2328 ata_print_version_once(&pdev->dev, DRV_VERSION); in nv_init_one()
2336 dev_notice(&pdev->dev, "Using ADMA mode\n"); in nv_init_one()
2339 dev_notice(&pdev->dev, "Using SWNCQ mode\n"); in nv_init_one()
2344 ipriv = ppi[0]->private_data; in nv_init_one()
2349 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL); in nv_init_one()
2351 return -ENOMEM; in nv_init_one()
2352 hpriv->type = type; in nv_init_one()
2353 host->private_data = hpriv; in nv_init_one()
2361 base = host->iomap[NV_MMIO_BAR]; in nv_init_one()
2362 host->ports[0]->ioaddr.scr_addr = base + NV_PORT0_SCR_REG_OFFSET; in nv_init_one()
2363 host->ports[1]->ioaddr.scr_addr = base + NV_PORT1_SCR_REG_OFFSET; in nv_init_one()
2365 /* enable SATA space for CK804 */ in nv_init_one()
2383 dev_notice(&pdev->dev, "Using MSI\n"); in nv_init_one()
2388 return ata_pci_sff_activate_host(host, ipriv->irq_handler, ipriv->sht); in nv_init_one()
2395 struct nv_host_priv *hpriv = host->private_data; in nv_pci_device_resume()
2402 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) { in nv_pci_device_resume()
2403 if (hpriv->type >= CK804) { in nv_pci_device_resume()
2410 if (hpriv->type == ADMA) { in nv_pci_device_resume()
2416 pp = host->ports[0]->private_data; in nv_pci_device_resume()
2417 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) in nv_pci_device_resume()
2423 pp = host->ports[1]->private_data; in nv_pci_device_resume()
2424 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) in nv_pci_device_resume()
2443 struct pci_dev *pdev = to_pci_dev(host->dev); in nv_ck804_host_stop()
2446 /* disable SATA space for CK804 */ in nv_ck804_host_stop()
2454 struct pci_dev *pdev = to_pci_dev(host->dev); in nv_adma_host_stop()