Lines Matching +full:tbg +full:- +full:b +full:- +full:s
1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * sata_promise.c - Promise SATA
7 * Please ALWAYS copy linux-ide@vger.kernel.org
10 * Copyright 2003-2004 Red Hat, Inc.
13 * as Documentation/driver-api/libata.rst
38 PDC_MAX_PRD = LIBATA_MAX_PRD - 1, /* -1 for ASIC PRD bug workaround */
40 /* host register offsets (from host->iomap[PDC_MMIO_BAR]) */
46 PDC_TBG_MODE = 0x41C, /* TBG mode (not SATAII) */
49 /* per-port ATA register offsets (from ap->ioaddr.cmd_addr) */
57 PDC_ALTSTATUS = 0x38, /* Alternate-status/device-control reg (per port) */
62 /* per-port SATA register offsets (from ap->ioaddr.scr_addr) */
77 PDC_SH_ERR = (1 << 9), /* PCI error while loading S/G table */
81 PDC2_ATA_DMA_CNT_ERR = (1 << 14), /* DMA DATA FIS size differs from S/G count */
82 PDC_OVERRUN_ERR = (1 << 19), /* S/G byte count larger than HD requires */
83 PDC_UNDERRUN_ERR = (1 << 20), /* S/G byte count less than HD requires */
122 /* ap->flags bits */
194 /* First-generation chips need a more restrictive ->check_atapi_dma op,
195 and ->freeze/thaw that ignore the hotplug controls. */
316 struct device *dev = ap->host->dev; in pdc_common_port_start()
327 return -ENOMEM; in pdc_common_port_start()
329 pp->pkt = dmam_alloc_coherent(dev, 128, &pp->pkt_dma, GFP_KERNEL); in pdc_common_port_start()
330 if (!pp->pkt) in pdc_common_port_start()
331 return -ENOMEM; in pdc_common_port_start()
333 ap->private_data = pp; in pdc_common_port_start()
347 if (ap->flags & PDC_FLAG_GEN_II) { in pdc_sata_port_start()
348 void __iomem *sata_mmio = ap->ioaddr.scr_addr; in pdc_sata_port_start()
361 void __iomem *sata_mmio = ap->ioaddr.scr_addr; in pdc_fpdma_clear_interrupt_flag()
368 /* It's not allowed to write to the entire FPDMA_CTLSTAT register in pdc_fpdma_clear_interrupt_flag()
369 when NCQ is running. So do a byte-sized write to bits 10 and 11. */ in pdc_fpdma_clear_interrupt_flag()
376 void __iomem *sata_mmio = ap->ioaddr.scr_addr; in pdc_fpdma_reset()
394 void __iomem *sata_mmio = ap->ioaddr.scr_addr; in pdc_not_at_command_packet_phase()
410 void __iomem *sata_mmio = ap->ioaddr.scr_addr; in pdc_clear_internal_debug_record_error_register()
418 void __iomem *ata_ctlstat_mmio = ap->ioaddr.cmd_addr + PDC_CTLSTAT; in pdc_reset_port()
422 if (ap->flags & PDC_FLAG_GEN_II) in pdc_reset_port()
429 for (i = 11; i > 0; i--) { in pdc_reset_port()
444 if (sata_scr_valid(&ap->link) && (ap->flags & PDC_FLAG_GEN_II)) { in pdc_reset_port()
453 void __iomem *ata_mmio = ap->ioaddr.cmd_addr; in pdc_pata_cable_detect()
465 return -EINVAL; in pdc_sata_scr_read()
466 *val = readl(link->ap->ioaddr.scr_addr + (sc_reg * 4)); in pdc_sata_scr_read()
474 return -EINVAL; in pdc_sata_scr_write()
475 writel(val, link->ap->ioaddr.scr_addr + (sc_reg * 4)); in pdc_sata_scr_write()
481 struct ata_port *ap = qc->ap; in pdc_atapi_pkt()
482 dma_addr_t sg_table = ap->bmdma_prd_dma; in pdc_atapi_pkt()
483 unsigned int cdb_len = qc->dev->cdb_len; in pdc_atapi_pkt()
484 u8 *cdb = qc->cdb; in pdc_atapi_pkt()
485 struct pdc_port_priv *pp = ap->private_data; in pdc_atapi_pkt()
486 u8 *buf = pp->pkt; in pdc_atapi_pkt()
493 switch (qc->tf.protocol) { in pdc_atapi_pkt()
495 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) in pdc_atapi_pkt()
507 buf32[1] = cpu_to_le32(sg_table); /* S/G table addr */ in pdc_atapi_pkt()
508 buf32[2] = 0; /* no next-packet */ in pdc_atapi_pkt()
511 if (sata_scr_valid(&ap->link)) in pdc_atapi_pkt()
514 dev_sel = qc->tf.device; in pdc_atapi_pkt()
522 buf[17] = qc->tf.nsect; in pdc_atapi_pkt()
524 buf[19] = qc->tf.lbal; in pdc_atapi_pkt()
527 if (qc->tf.protocol != ATAPI_PROT_DMA) in pdc_atapi_pkt()
535 buf[23] = qc->tf.lbam; in pdc_atapi_pkt()
537 buf[25] = qc->tf.lbah; in pdc_atapi_pkt()
541 buf[27] = qc->tf.command; in pdc_atapi_pkt()
556 * pdc_fill_sg - Fill PCI IDE PRD table
559 * Fill PCI IDE PRD (scatter-gather) table with segments
569 struct ata_port *ap = qc->ap; in pdc_fill_sg()
570 struct ata_bmdma_prd *prd = ap->bmdma_prd; in pdc_fill_sg()
576 if (!(qc->flags & ATA_QCFLAG_DMAMAP)) in pdc_fill_sg()
580 for_each_sg(qc->sg, sg, qc->n_elem, si) { in pdc_fill_sg()
585 * Note h/w doesn't support 64-bit, so we unconditionally in pdc_fill_sg()
595 len = 0x10000 - offset; in pdc_fill_sg()
603 sg_len -= len; in pdc_fill_sg()
608 len = le32_to_cpu(prd[idx - 1].flags_len); in pdc_fill_sg()
613 addr = le32_to_cpu(prd[idx - 1].addr); in pdc_fill_sg()
614 prd[idx - 1].flags_len = cpu_to_le32(len - SG_COUNT_ASIC_BUG); in pdc_fill_sg()
616 idx - 1, addr, SG_COUNT_ASIC_BUG); in pdc_fill_sg()
618 addr = addr + len - SG_COUNT_ASIC_BUG; in pdc_fill_sg()
627 prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT); in pdc_fill_sg()
632 struct pdc_port_priv *pp = qc->ap->private_data; in pdc_qc_prep()
635 switch (qc->tf.protocol) { in pdc_qc_prep()
640 i = pdc_pkt_header(&qc->tf, qc->ap->bmdma_prd_dma, in pdc_qc_prep()
641 qc->dev->devno, pp->pkt); in pdc_qc_prep()
642 if (qc->tf.flags & ATA_TFLAG_LBA48) in pdc_qc_prep()
643 i = pdc_prep_lba48(&qc->tf, pp->pkt, i); in pdc_qc_prep()
645 i = pdc_prep_lba28(&qc->tf, pp->pkt, i); in pdc_qc_prep()
646 pdc_pkt_footer(&qc->tf, pp->pkt, i); in pdc_qc_prep()
679 return (ap->flags & PDC_FLAG_4_PORTS) ? 4 : 2; in pdc_sata_nr_ports()
684 const struct ata_host *host = ap->host; in pdc_sata_ata_port_to_ata_no()
688 for (i = 0; i < nr_ports && host->ports[i] != ap; ++i) in pdc_sata_ata_port_to_ata_no()
691 return pdc_port_no_to_ata_no(i, pdc_is_sataii_tx4(ap->flags)); in pdc_sata_ata_port_to_ata_no()
696 void __iomem *ata_mmio = ap->ioaddr.cmd_addr; in pdc_freeze()
708 struct ata_host *host = ap->host; in pdc_sata_freeze()
709 void __iomem *host_mmio = host->iomap[PDC_MMIO_BAR]; in pdc_sata_freeze()
717 * 1) hotplug register accesses must be serialised via host->lock in pdc_sata_freeze()
718 * 2) ap->lock == &ap->host->lock in pdc_sata_freeze()
719 * 3) ->freeze() and ->thaw() are called with ap->lock held in pdc_sata_freeze()
731 void __iomem *ata_mmio = ap->ioaddr.cmd_addr; in pdc_thaw()
746 struct ata_host *host = ap->host; in pdc_sata_thaw()
747 void __iomem *host_mmio = host->iomap[PDC_MMIO_BAR]; in pdc_sata_thaw()
767 pdc_reset_port(link->ap); in pdc_pata_softreset()
773 void __iomem *ata_mmio = ap->ioaddr.cmd_addr; in pdc_ata_port_to_ata_no()
774 void __iomem *host_mmio = ap->host->iomap[PDC_MMIO_BAR]; in pdc_ata_port_to_ata_no()
777 return (ata_mmio - host_mmio - 0x200) / 0x80; in pdc_ata_port_to_ata_no()
782 void __iomem *host_mmio = ap->host->iomap[PDC_MMIO_BAR]; in pdc_hard_reset_port()
785 struct pdc_host_priv *hpriv = ap->host->private_data; in pdc_hard_reset_port()
788 spin_lock(&hpriv->hard_reset_lock); in pdc_hard_reset_port()
799 spin_unlock(&hpriv->hard_reset_lock); in pdc_hard_reset_port()
805 if (link->ap->flags & PDC_FLAG_GEN_II) in pdc_sata_hardreset()
806 pdc_not_at_command_packet_phase(link->ap); in pdc_sata_hardreset()
808 pdc_hard_reset_port(link->ap); in pdc_sata_hardreset()
809 pdc_reset_port(link->ap); in pdc_sata_hardreset()
812 * after hardreset. Do non-waiting hardreset and request in pdc_sata_hardreset()
813 * follow-up SRST. in pdc_sata_hardreset()
828 struct ata_port *ap = qc->ap; in pdc_post_internal_cmd()
831 if (qc->flags & ATA_QCFLAG_EH) in pdc_post_internal_cmd()
838 struct ata_eh_info *ehi = &ap->link.eh_info; in pdc_error_intr()
855 if (sata_scr_valid(&ap->link)) { in pdc_error_intr()
858 pdc_sata_scr_read(&ap->link, SCR_ERROR, &serror); in pdc_error_intr()
859 ehi->serror |= serror; in pdc_error_intr()
862 qc->err_mask |= ac_err_mask; in pdc_error_intr()
873 void __iomem *ata_mmio = ap->ioaddr.cmd_addr; in pdc_host_intr()
877 if (ap->flags & PDC_FLAG_GEN_II) in pdc_host_intr()
887 switch (qc->tf.protocol) { in pdc_host_intr()
892 qc->err_mask |= ac_err_mask(ata_wait_idle(ap)); in pdc_host_intr()
897 ap->stats.idle_irq++; in pdc_host_intr()
906 void __iomem *ata_mmio = ap->ioaddr.cmd_addr; in pdc_irq_clear()
923 if (!host || !host->iomap[PDC_MMIO_BAR]) in pdc_interrupt()
926 host_mmio = host->iomap[PDC_MMIO_BAR]; in pdc_interrupt()
928 spin_lock(&host->lock); in pdc_interrupt()
931 if (host->ports[0]->flags & PDC_FLAG_GEN_II) { in pdc_interrupt()
952 is_sataii_tx4 = pdc_is_sataii_tx4(host->ports[0]->flags); in pdc_interrupt()
954 for (i = 0; i < host->n_ports; i++) { in pdc_interrupt()
955 ap = host->ports[i]; in pdc_interrupt()
961 struct ata_eh_info *ehi = &ap->link.eh_info; in pdc_interrupt()
975 qc = ata_qc_from_tag(ap, ap->link.active_tag); in pdc_interrupt()
976 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) in pdc_interrupt()
982 spin_unlock(&host->lock); in pdc_interrupt()
988 struct ata_port *ap = qc->ap; in pdc_packet_start()
989 struct pdc_port_priv *pp = ap->private_data; in pdc_packet_start()
990 void __iomem *host_mmio = ap->host->iomap[PDC_MMIO_BAR]; in pdc_packet_start()
991 void __iomem *ata_mmio = ap->ioaddr.cmd_addr; in pdc_packet_start()
992 unsigned int port_no = ap->port_no; in pdc_packet_start()
998 pp->pkt[2] = seq; in pdc_packet_start()
1000 writel(pp->pkt_dma, ata_mmio + PDC_PKT_SUBMIT); in pdc_packet_start()
1006 switch (qc->tf.protocol) { in pdc_qc_issue()
1008 if (qc->dev->flags & ATA_DFLAG_CDB_INTR) in pdc_qc_issue()
1012 if (qc->tf.flags & ATA_TFLAG_POLLING) in pdc_qc_issue()
1027 WARN_ON(tf->protocol == ATA_PROT_DMA || tf->protocol == ATAPI_PROT_DMA); in pdc_tf_load_mmio()
1034 WARN_ON(tf->protocol == ATA_PROT_DMA || tf->protocol == ATAPI_PROT_DMA); in pdc_exec_command_mmio()
1040 u8 *scsicmd = qc->scsicmd->cmnd; in pdc_check_atapi_dma()
1055 /* -45150 (FFFF4FA2) to -1 (FFFFFFFF) shall use PIO mode */ in pdc_check_atapi_dma()
1077 ap->ioaddr.cmd_addr = base; in pdc_ata_setup_port()
1078 ap->ioaddr.data_addr = base; in pdc_ata_setup_port()
1079 ap->ioaddr.feature_addr = in pdc_ata_setup_port()
1080 ap->ioaddr.error_addr = base + 0x4; in pdc_ata_setup_port()
1081 ap->ioaddr.nsect_addr = base + 0x8; in pdc_ata_setup_port()
1082 ap->ioaddr.lbal_addr = base + 0xc; in pdc_ata_setup_port()
1083 ap->ioaddr.lbam_addr = base + 0x10; in pdc_ata_setup_port()
1084 ap->ioaddr.lbah_addr = base + 0x14; in pdc_ata_setup_port()
1085 ap->ioaddr.device_addr = base + 0x18; in pdc_ata_setup_port()
1086 ap->ioaddr.command_addr = in pdc_ata_setup_port()
1087 ap->ioaddr.status_addr = base + 0x1c; in pdc_ata_setup_port()
1088 ap->ioaddr.altstatus_addr = in pdc_ata_setup_port()
1089 ap->ioaddr.ctl_addr = base + 0x38; in pdc_ata_setup_port()
1090 ap->ioaddr.scr_addr = scr_addr; in pdc_ata_setup_port()
1095 void __iomem *host_mmio = host->iomap[PDC_MMIO_BAR]; in pdc_host_init()
1096 int is_gen2 = host->ports[0]->flags & PDC_FLAG_GEN_II; in pdc_host_init()
1128 /* don't initialise TBG or SLEW on 2nd generation chips */ in pdc_host_init()
1132 /* reduce TBG clock to 133 Mhz. */ in pdc_host_init()
1144 tmp |= 0x00000900; /* set bit 11-9 = 100b , bit 8-6 = 100 */ in pdc_host_init()
1151 const struct ata_port_info *pi = &pdc_port_info[ent->driver_data]; in pdc_ata_init_one()
1159 ata_print_version_once(&pdev->dev, DRV_VERSION); in pdc_ata_init_one()
1167 if (rc == -EBUSY) in pdc_ata_init_one()
1175 if (pi->flags & PDC_FLAG_4_PORTS) in pdc_ata_init_one()
1180 if (pi->flags & PDC_FLAG_SATA_PATA) { in pdc_ata_init_one()
1186 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports); in pdc_ata_init_one()
1188 dev_err(&pdev->dev, "failed to allocate host\n"); in pdc_ata_init_one()
1189 return -ENOMEM; in pdc_ata_init_one()
1191 hpriv = devm_kzalloc(&pdev->dev, sizeof *hpriv, GFP_KERNEL); in pdc_ata_init_one()
1193 return -ENOMEM; in pdc_ata_init_one()
1194 spin_lock_init(&hpriv->hard_reset_lock); in pdc_ata_init_one()
1195 host->private_data = hpriv; in pdc_ata_init_one()
1196 host->iomap = pcim_iomap_table(pdev); in pdc_ata_init_one()
1198 is_sataii_tx4 = pdc_is_sataii_tx4(pi->flags); in pdc_ata_init_one()
1199 for (i = 0; i < host->n_ports; i++) { in pdc_ata_init_one()
1200 struct ata_port *ap = host->ports[i]; in pdc_ata_init_one()
1207 ata_port_pbar_desc(ap, PDC_MMIO_BAR, -1, "mmio"); in pdc_ata_init_one()
1214 rc = dma_set_mask_and_coherent(&pdev->dev, ATA_DMA_MASK); in pdc_ata_init_one()
1220 return ata_host_activate(host, pdev->irq, pdc_interrupt, IRQF_SHARED, in pdc_ata_init_one()
1227 MODULE_DESCRIPTION("Promise ATA TX2/TX4/TX4000 low-level driver");