1 /* 2 * sata_mv.c - Marvell SATA support 3 * 4 * Copyright 2008: Marvell Corporation, all rights reserved. 5 * Copyright 2005: EMC Corporation, all rights reserved. 6 * Copyright 2005 Red Hat, Inc. All rights reserved. 7 * 8 * Please ALWAYS copy linux-ide@vger.kernel.org on emails. 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License as published by 12 * the Free Software Foundation; version 2 of the License. 13 * 14 * This program is distributed in the hope that it will be useful, 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 * GNU General Public License for more details. 18 * 19 * You should have received a copy of the GNU General Public License 20 * along with this program; if not, write to the Free Software 21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 22 * 23 */ 24 25 /* 26 * sata_mv TODO list: 27 * 28 * --> Errata workaround for NCQ device errors. 29 * 30 * --> More errata workarounds for PCI-X. 31 * 32 * --> Complete a full errata audit for all chipsets to identify others. 33 * 34 * --> ATAPI support (Marvell claims the 60xx/70xx chips can do it). 35 * 36 * --> Develop a low-power-consumption strategy, and implement it. 37 * 38 * --> [Experiment, low priority] Investigate interrupt coalescing. 39 * Quite often, especially with PCI Message Signalled Interrupts (MSI), 40 * the overhead reduced by interrupt mitigation is quite often not 41 * worth the latency cost. 42 * 43 * --> [Experiment, Marvell value added] Is it possible to use target 44 * mode to cross-connect two Linux boxes with Marvell cards? If so, 45 * creating LibATA target mode support would be very interesting. 46 * 47 * Target mode, for those without docs, is the ability to directly 48 * connect two SATA ports. 49 */ 50 51 #include <linux/kernel.h> 52 #include <linux/module.h> 53 #include <linux/pci.h> 54 #include <linux/init.h> 55 #include <linux/blkdev.h> 56 #include <linux/delay.h> 57 #include <linux/interrupt.h> 58 #include <linux/dmapool.h> 59 #include <linux/dma-mapping.h> 60 #include <linux/device.h> 61 #include <linux/platform_device.h> 62 #include <linux/ata_platform.h> 63 #include <linux/mbus.h> 64 #include <linux/bitops.h> 65 #include <scsi/scsi_host.h> 66 #include <scsi/scsi_cmnd.h> 67 #include <scsi/scsi_device.h> 68 #include <linux/libata.h> 69 70 #define DRV_NAME "sata_mv" 71 #define DRV_VERSION "1.25" 72 73 enum { 74 /* BAR's are enumerated in terms of pci_resource_start() terms */ 75 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */ 76 MV_IO_BAR = 2, /* offset 0x18: IO space */ 77 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */ 78 79 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */ 80 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */ 81 82 MV_PCI_REG_BASE = 0, 83 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */ 84 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08), 85 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88), 86 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c), 87 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc), 88 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0), 89 90 MV_SATAHC0_REG_BASE = 0x20000, 91 MV_FLASH_CTL_OFS = 0x1046c, 92 MV_GPIO_PORT_CTL_OFS = 0x104f0, 93 MV_RESET_CFG_OFS = 0x180d8, 94 95 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ, 96 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ, 97 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */ 98 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ, 99 100 MV_MAX_Q_DEPTH = 32, 101 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1, 102 103 /* CRQB needs alignment on a 1KB boundary. Size == 1KB 104 * CRPB needs alignment on a 256B boundary. Size == 256B 105 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B 106 */ 107 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH), 108 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH), 109 MV_MAX_SG_CT = 256, 110 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT), 111 112 /* Determine hc from 0-7 port: hc = port >> MV_PORT_HC_SHIFT */ 113 MV_PORT_HC_SHIFT = 2, 114 MV_PORTS_PER_HC = (1 << MV_PORT_HC_SHIFT), /* 4 */ 115 /* Determine hc port from 0-7 port: hardport = port & MV_PORT_MASK */ 116 MV_PORT_MASK = (MV_PORTS_PER_HC - 1), /* 3 */ 117 118 /* Host Flags */ 119 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */ 120 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */ 121 122 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 123 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI | 124 ATA_FLAG_PIO_POLLING, 125 126 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE, 127 128 MV_GENIIE_FLAGS = MV_COMMON_FLAGS | MV_6XXX_FLAGS | 129 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA | 130 ATA_FLAG_NCQ | ATA_FLAG_AN, 131 132 CRQB_FLAG_READ = (1 << 0), 133 CRQB_TAG_SHIFT = 1, 134 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */ 135 CRQB_PMP_SHIFT = 12, /* CRQB Gen-II/IIE PMP shift */ 136 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */ 137 CRQB_CMD_ADDR_SHIFT = 8, 138 CRQB_CMD_CS = (0x2 << 11), 139 CRQB_CMD_LAST = (1 << 15), 140 141 CRPB_FLAG_STATUS_SHIFT = 8, 142 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */ 143 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */ 144 145 EPRD_FLAG_END_OF_TBL = (1 << 31), 146 147 /* PCI interface registers */ 148 149 PCI_COMMAND_OFS = 0xc00, 150 PCI_COMMAND_MRDTRIG = (1 << 7), /* PCI Master Read Trigger */ 151 152 PCI_MAIN_CMD_STS_OFS = 0xd30, 153 STOP_PCI_MASTER = (1 << 2), 154 PCI_MASTER_EMPTY = (1 << 3), 155 GLOB_SFT_RST = (1 << 4), 156 157 MV_PCI_MODE_OFS = 0xd00, 158 MV_PCI_MODE_MASK = 0x30, 159 160 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c, 161 MV_PCI_DISC_TIMER = 0xd04, 162 MV_PCI_MSI_TRIGGER = 0xc38, 163 MV_PCI_SERR_MASK = 0xc28, 164 MV_PCI_XBAR_TMOUT_OFS = 0x1d04, 165 MV_PCI_ERR_LOW_ADDRESS = 0x1d40, 166 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44, 167 MV_PCI_ERR_ATTRIBUTE = 0x1d48, 168 MV_PCI_ERR_COMMAND = 0x1d50, 169 170 PCI_IRQ_CAUSE_OFS = 0x1d58, 171 PCI_IRQ_MASK_OFS = 0x1d5c, 172 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */ 173 174 PCIE_IRQ_CAUSE_OFS = 0x1900, 175 PCIE_IRQ_MASK_OFS = 0x1910, 176 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */ 177 178 /* Host Controller Main Interrupt Cause/Mask registers (1 per-chip) */ 179 PCI_HC_MAIN_IRQ_CAUSE_OFS = 0x1d60, 180 PCI_HC_MAIN_IRQ_MASK_OFS = 0x1d64, 181 SOC_HC_MAIN_IRQ_CAUSE_OFS = 0x20020, 182 SOC_HC_MAIN_IRQ_MASK_OFS = 0x20024, 183 ERR_IRQ = (1 << 0), /* shift by port # */ 184 DONE_IRQ = (1 << 1), /* shift by port # */ 185 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */ 186 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */ 187 PCI_ERR = (1 << 18), 188 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */ 189 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */ 190 PORTS_0_3_COAL_DONE = (1 << 8), 191 PORTS_4_7_COAL_DONE = (1 << 17), 192 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */ 193 GPIO_INT = (1 << 22), 194 SELF_INT = (1 << 23), 195 TWSI_INT = (1 << 24), 196 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */ 197 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */ 198 HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */ 199 200 /* SATAHC registers */ 201 HC_CFG_OFS = 0, 202 203 HC_IRQ_CAUSE_OFS = 0x14, 204 DMA_IRQ = (1 << 0), /* shift by port # */ 205 HC_COAL_IRQ = (1 << 4), /* IRQ coalescing */ 206 DEV_IRQ = (1 << 8), /* shift by port # */ 207 208 /* Shadow block registers */ 209 SHD_BLK_OFS = 0x100, 210 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */ 211 212 /* SATA registers */ 213 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */ 214 SATA_ACTIVE_OFS = 0x350, 215 SATA_FIS_IRQ_CAUSE_OFS = 0x364, 216 SATA_FIS_IRQ_AN = (1 << 9), /* async notification */ 217 218 LTMODE_OFS = 0x30c, 219 LTMODE_BIT8 = (1 << 8), /* unknown, but necessary */ 220 221 PHY_MODE3 = 0x310, 222 PHY_MODE4 = 0x314, 223 PHY_MODE4_CFG_MASK = 0x00000003, /* phy internal config field */ 224 PHY_MODE4_CFG_VALUE = 0x00000001, /* phy internal config field */ 225 PHY_MODE4_RSVD_ZEROS = 0x5de3fffa, /* Gen2e always write zeros */ 226 PHY_MODE4_RSVD_ONES = 0x00000005, /* Gen2e always write ones */ 227 228 PHY_MODE2 = 0x330, 229 SATA_IFCTL_OFS = 0x344, 230 SATA_TESTCTL_OFS = 0x348, 231 SATA_IFSTAT_OFS = 0x34c, 232 VENDOR_UNIQUE_FIS_OFS = 0x35c, 233 234 FISCFG_OFS = 0x360, 235 FISCFG_WAIT_DEV_ERR = (1 << 8), /* wait for host on DevErr */ 236 FISCFG_SINGLE_SYNC = (1 << 16), /* SYNC on DMA activation */ 237 238 MV5_PHY_MODE = 0x74, 239 MV5_LTMODE_OFS = 0x30, 240 MV5_PHY_CTL_OFS = 0x0C, 241 SATA_INTERFACE_CFG_OFS = 0x050, 242 243 MV_M2_PREAMP_MASK = 0x7e0, 244 245 /* Port registers */ 246 EDMA_CFG_OFS = 0, 247 EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */ 248 EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */ 249 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */ 250 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */ 251 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */ 252 EDMA_CFG_EDMA_FBS = (1 << 16), /* EDMA FIS-Based Switching */ 253 EDMA_CFG_FBS = (1 << 26), /* FIS-Based Switching */ 254 255 EDMA_ERR_IRQ_CAUSE_OFS = 0x8, 256 EDMA_ERR_IRQ_MASK_OFS = 0xc, 257 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */ 258 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */ 259 EDMA_ERR_DEV = (1 << 2), /* device error */ 260 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */ 261 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */ 262 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */ 263 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */ 264 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */ 265 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */ 266 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */ 267 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */ 268 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */ 269 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */ 270 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */ 271 272 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */ 273 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */ 274 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */ 275 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */ 276 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */ 277 278 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */ 279 280 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */ 281 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */ 282 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */ 283 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */ 284 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */ 285 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */ 286 287 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */ 288 289 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */ 290 EDMA_ERR_OVERRUN_5 = (1 << 5), 291 EDMA_ERR_UNDERRUN_5 = (1 << 6), 292 293 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 | 294 EDMA_ERR_LNK_CTRL_RX_1 | 295 EDMA_ERR_LNK_CTRL_RX_3 | 296 EDMA_ERR_LNK_CTRL_TX, 297 298 EDMA_EH_FREEZE = EDMA_ERR_D_PAR | 299 EDMA_ERR_PRD_PAR | 300 EDMA_ERR_DEV_DCON | 301 EDMA_ERR_DEV_CON | 302 EDMA_ERR_SERR | 303 EDMA_ERR_SELF_DIS | 304 EDMA_ERR_CRQB_PAR | 305 EDMA_ERR_CRPB_PAR | 306 EDMA_ERR_INTRL_PAR | 307 EDMA_ERR_IORDY | 308 EDMA_ERR_LNK_CTRL_RX_2 | 309 EDMA_ERR_LNK_DATA_RX | 310 EDMA_ERR_LNK_DATA_TX | 311 EDMA_ERR_TRANS_PROTO, 312 313 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR | 314 EDMA_ERR_PRD_PAR | 315 EDMA_ERR_DEV_DCON | 316 EDMA_ERR_DEV_CON | 317 EDMA_ERR_OVERRUN_5 | 318 EDMA_ERR_UNDERRUN_5 | 319 EDMA_ERR_SELF_DIS_5 | 320 EDMA_ERR_CRQB_PAR | 321 EDMA_ERR_CRPB_PAR | 322 EDMA_ERR_INTRL_PAR | 323 EDMA_ERR_IORDY, 324 325 EDMA_REQ_Q_BASE_HI_OFS = 0x10, 326 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */ 327 328 EDMA_REQ_Q_OUT_PTR_OFS = 0x18, 329 EDMA_REQ_Q_PTR_SHIFT = 5, 330 331 EDMA_RSP_Q_BASE_HI_OFS = 0x1c, 332 EDMA_RSP_Q_IN_PTR_OFS = 0x20, 333 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */ 334 EDMA_RSP_Q_PTR_SHIFT = 3, 335 336 EDMA_CMD_OFS = 0x28, /* EDMA command register */ 337 EDMA_EN = (1 << 0), /* enable EDMA */ 338 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */ 339 EDMA_RESET = (1 << 2), /* reset eng/trans/link/phy */ 340 341 EDMA_STATUS_OFS = 0x30, /* EDMA engine status */ 342 EDMA_STATUS_CACHE_EMPTY = (1 << 6), /* GenIIe command cache empty */ 343 EDMA_STATUS_IDLE = (1 << 7), /* GenIIe EDMA enabled/idle */ 344 345 EDMA_IORDY_TMOUT_OFS = 0x34, 346 EDMA_ARB_CFG_OFS = 0x38, 347 348 EDMA_HALTCOND_OFS = 0x60, /* GenIIe halt conditions */ 349 350 /* Host private flags (hp_flags) */ 351 MV_HP_FLAG_MSI = (1 << 0), 352 MV_HP_ERRATA_50XXB0 = (1 << 1), 353 MV_HP_ERRATA_50XXB2 = (1 << 2), 354 MV_HP_ERRATA_60X1B2 = (1 << 3), 355 MV_HP_ERRATA_60X1C0 = (1 << 4), 356 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */ 357 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */ 358 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */ 359 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */ 360 MV_HP_CUT_THROUGH = (1 << 10), /* can use EDMA cut-through */ 361 MV_HP_FLAG_SOC = (1 << 11), /* SystemOnChip, no PCI */ 362 363 /* Port private flags (pp_flags) */ 364 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */ 365 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */ 366 MV_PP_FLAG_FBS_EN = (1 << 2), /* is EDMA set up for FBS? */ 367 MV_PP_FLAG_DELAYED_EH = (1 << 3), /* delayed dev err handling */ 368 }; 369 370 #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I) 371 #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II) 372 #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE) 373 #define IS_PCIE(hpriv) ((hpriv)->hp_flags & MV_HP_PCIE) 374 #define IS_SOC(hpriv) ((hpriv)->hp_flags & MV_HP_FLAG_SOC) 375 376 #define WINDOW_CTRL(i) (0x20030 + ((i) << 4)) 377 #define WINDOW_BASE(i) (0x20034 + ((i) << 4)) 378 379 enum { 380 /* DMA boundary 0xffff is required by the s/g splitting 381 * we need on /length/ in mv_fill-sg(). 382 */ 383 MV_DMA_BOUNDARY = 0xffffU, 384 385 /* mask of register bits containing lower 32 bits 386 * of EDMA request queue DMA address 387 */ 388 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U, 389 390 /* ditto, for response queue */ 391 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U, 392 }; 393 394 enum chip_type { 395 chip_504x, 396 chip_508x, 397 chip_5080, 398 chip_604x, 399 chip_608x, 400 chip_6042, 401 chip_7042, 402 chip_soc, 403 }; 404 405 /* Command ReQuest Block: 32B */ 406 struct mv_crqb { 407 __le32 sg_addr; 408 __le32 sg_addr_hi; 409 __le16 ctrl_flags; 410 __le16 ata_cmd[11]; 411 }; 412 413 struct mv_crqb_iie { 414 __le32 addr; 415 __le32 addr_hi; 416 __le32 flags; 417 __le32 len; 418 __le32 ata_cmd[4]; 419 }; 420 421 /* Command ResPonse Block: 8B */ 422 struct mv_crpb { 423 __le16 id; 424 __le16 flags; 425 __le32 tmstmp; 426 }; 427 428 /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */ 429 struct mv_sg { 430 __le32 addr; 431 __le32 flags_size; 432 __le32 addr_hi; 433 __le32 reserved; 434 }; 435 436 struct mv_port_priv { 437 struct mv_crqb *crqb; 438 dma_addr_t crqb_dma; 439 struct mv_crpb *crpb; 440 dma_addr_t crpb_dma; 441 struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH]; 442 dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH]; 443 444 unsigned int req_idx; 445 unsigned int resp_idx; 446 447 u32 pp_flags; 448 unsigned int delayed_eh_pmp_map; 449 }; 450 451 struct mv_port_signal { 452 u32 amps; 453 u32 pre; 454 }; 455 456 struct mv_host_priv { 457 u32 hp_flags; 458 u32 main_irq_mask; 459 struct mv_port_signal signal[8]; 460 const struct mv_hw_ops *ops; 461 int n_ports; 462 void __iomem *base; 463 void __iomem *main_irq_cause_addr; 464 void __iomem *main_irq_mask_addr; 465 u32 irq_cause_ofs; 466 u32 irq_mask_ofs; 467 u32 unmask_all_irqs; 468 /* 469 * These consistent DMA memory pools give us guaranteed 470 * alignment for hardware-accessed data structures, 471 * and less memory waste in accomplishing the alignment. 472 */ 473 struct dma_pool *crqb_pool; 474 struct dma_pool *crpb_pool; 475 struct dma_pool *sg_tbl_pool; 476 }; 477 478 struct mv_hw_ops { 479 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio, 480 unsigned int port); 481 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio); 482 void (*read_preamp)(struct mv_host_priv *hpriv, int idx, 483 void __iomem *mmio); 484 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio, 485 unsigned int n_hc); 486 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio); 487 void (*reset_bus)(struct ata_host *host, void __iomem *mmio); 488 }; 489 490 static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val); 491 static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val); 492 static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val); 493 static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val); 494 static int mv_port_start(struct ata_port *ap); 495 static void mv_port_stop(struct ata_port *ap); 496 static int mv_qc_defer(struct ata_queued_cmd *qc); 497 static void mv_qc_prep(struct ata_queued_cmd *qc); 498 static void mv_qc_prep_iie(struct ata_queued_cmd *qc); 499 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc); 500 static int mv_hardreset(struct ata_link *link, unsigned int *class, 501 unsigned long deadline); 502 static void mv_eh_freeze(struct ata_port *ap); 503 static void mv_eh_thaw(struct ata_port *ap); 504 static void mv6_dev_config(struct ata_device *dev); 505 506 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, 507 unsigned int port); 508 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio); 509 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx, 510 void __iomem *mmio); 511 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio, 512 unsigned int n_hc); 513 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio); 514 static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio); 515 516 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, 517 unsigned int port); 518 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio); 519 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx, 520 void __iomem *mmio); 521 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio, 522 unsigned int n_hc); 523 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio); 524 static void mv_soc_enable_leds(struct mv_host_priv *hpriv, 525 void __iomem *mmio); 526 static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx, 527 void __iomem *mmio); 528 static int mv_soc_reset_hc(struct mv_host_priv *hpriv, 529 void __iomem *mmio, unsigned int n_hc); 530 static void mv_soc_reset_flash(struct mv_host_priv *hpriv, 531 void __iomem *mmio); 532 static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio); 533 static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio); 534 static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio, 535 unsigned int port_no); 536 static int mv_stop_edma(struct ata_port *ap); 537 static int mv_stop_edma_engine(void __iomem *port_mmio); 538 static void mv_edma_cfg(struct ata_port *ap, int want_ncq); 539 540 static void mv_pmp_select(struct ata_port *ap, int pmp); 541 static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class, 542 unsigned long deadline); 543 static int mv_softreset(struct ata_link *link, unsigned int *class, 544 unsigned long deadline); 545 static void mv_pmp_error_handler(struct ata_port *ap); 546 static void mv_process_crpb_entries(struct ata_port *ap, 547 struct mv_port_priv *pp); 548 549 /* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below 550 * because we have to allow room for worst case splitting of 551 * PRDs for 64K boundaries in mv_fill_sg(). 552 */ 553 static struct scsi_host_template mv5_sht = { 554 ATA_BASE_SHT(DRV_NAME), 555 .sg_tablesize = MV_MAX_SG_CT / 2, 556 .dma_boundary = MV_DMA_BOUNDARY, 557 }; 558 559 static struct scsi_host_template mv6_sht = { 560 ATA_NCQ_SHT(DRV_NAME), 561 .can_queue = MV_MAX_Q_DEPTH - 1, 562 .sg_tablesize = MV_MAX_SG_CT / 2, 563 .dma_boundary = MV_DMA_BOUNDARY, 564 }; 565 566 static struct ata_port_operations mv5_ops = { 567 .inherits = &ata_sff_port_ops, 568 569 .qc_defer = mv_qc_defer, 570 .qc_prep = mv_qc_prep, 571 .qc_issue = mv_qc_issue, 572 573 .freeze = mv_eh_freeze, 574 .thaw = mv_eh_thaw, 575 .hardreset = mv_hardreset, 576 .error_handler = ata_std_error_handler, /* avoid SFF EH */ 577 .post_internal_cmd = ATA_OP_NULL, 578 579 .scr_read = mv5_scr_read, 580 .scr_write = mv5_scr_write, 581 582 .port_start = mv_port_start, 583 .port_stop = mv_port_stop, 584 }; 585 586 static struct ata_port_operations mv6_ops = { 587 .inherits = &mv5_ops, 588 .dev_config = mv6_dev_config, 589 .scr_read = mv_scr_read, 590 .scr_write = mv_scr_write, 591 592 .pmp_hardreset = mv_pmp_hardreset, 593 .pmp_softreset = mv_softreset, 594 .softreset = mv_softreset, 595 .error_handler = mv_pmp_error_handler, 596 }; 597 598 static struct ata_port_operations mv_iie_ops = { 599 .inherits = &mv6_ops, 600 .dev_config = ATA_OP_NULL, 601 .qc_prep = mv_qc_prep_iie, 602 }; 603 604 static const struct ata_port_info mv_port_info[] = { 605 { /* chip_504x */ 606 .flags = MV_COMMON_FLAGS, 607 .pio_mask = 0x1f, /* pio0-4 */ 608 .udma_mask = ATA_UDMA6, 609 .port_ops = &mv5_ops, 610 }, 611 { /* chip_508x */ 612 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC, 613 .pio_mask = 0x1f, /* pio0-4 */ 614 .udma_mask = ATA_UDMA6, 615 .port_ops = &mv5_ops, 616 }, 617 { /* chip_5080 */ 618 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC, 619 .pio_mask = 0x1f, /* pio0-4 */ 620 .udma_mask = ATA_UDMA6, 621 .port_ops = &mv5_ops, 622 }, 623 { /* chip_604x */ 624 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS | 625 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA | 626 ATA_FLAG_NCQ, 627 .pio_mask = 0x1f, /* pio0-4 */ 628 .udma_mask = ATA_UDMA6, 629 .port_ops = &mv6_ops, 630 }, 631 { /* chip_608x */ 632 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS | 633 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA | 634 ATA_FLAG_NCQ | MV_FLAG_DUAL_HC, 635 .pio_mask = 0x1f, /* pio0-4 */ 636 .udma_mask = ATA_UDMA6, 637 .port_ops = &mv6_ops, 638 }, 639 { /* chip_6042 */ 640 .flags = MV_GENIIE_FLAGS, 641 .pio_mask = 0x1f, /* pio0-4 */ 642 .udma_mask = ATA_UDMA6, 643 .port_ops = &mv_iie_ops, 644 }, 645 { /* chip_7042 */ 646 .flags = MV_GENIIE_FLAGS, 647 .pio_mask = 0x1f, /* pio0-4 */ 648 .udma_mask = ATA_UDMA6, 649 .port_ops = &mv_iie_ops, 650 }, 651 { /* chip_soc */ 652 .flags = MV_GENIIE_FLAGS, 653 .pio_mask = 0x1f, /* pio0-4 */ 654 .udma_mask = ATA_UDMA6, 655 .port_ops = &mv_iie_ops, 656 }, 657 }; 658 659 static const struct pci_device_id mv_pci_tbl[] = { 660 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x }, 661 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x }, 662 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 }, 663 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x }, 664 /* RocketRAID 1720/174x have different identifiers */ 665 { PCI_VDEVICE(TTI, 0x1720), chip_6042 }, 666 { PCI_VDEVICE(TTI, 0x1740), chip_6042 }, 667 { PCI_VDEVICE(TTI, 0x1742), chip_6042 }, 668 669 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x }, 670 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x }, 671 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 }, 672 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x }, 673 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x }, 674 675 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x }, 676 677 /* Adaptec 1430SA */ 678 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 }, 679 680 /* Marvell 7042 support */ 681 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 }, 682 683 /* Highpoint RocketRAID PCIe series */ 684 { PCI_VDEVICE(TTI, 0x2300), chip_7042 }, 685 { PCI_VDEVICE(TTI, 0x2310), chip_7042 }, 686 687 { } /* terminate list */ 688 }; 689 690 static const struct mv_hw_ops mv5xxx_ops = { 691 .phy_errata = mv5_phy_errata, 692 .enable_leds = mv5_enable_leds, 693 .read_preamp = mv5_read_preamp, 694 .reset_hc = mv5_reset_hc, 695 .reset_flash = mv5_reset_flash, 696 .reset_bus = mv5_reset_bus, 697 }; 698 699 static const struct mv_hw_ops mv6xxx_ops = { 700 .phy_errata = mv6_phy_errata, 701 .enable_leds = mv6_enable_leds, 702 .read_preamp = mv6_read_preamp, 703 .reset_hc = mv6_reset_hc, 704 .reset_flash = mv6_reset_flash, 705 .reset_bus = mv_reset_pci_bus, 706 }; 707 708 static const struct mv_hw_ops mv_soc_ops = { 709 .phy_errata = mv6_phy_errata, 710 .enable_leds = mv_soc_enable_leds, 711 .read_preamp = mv_soc_read_preamp, 712 .reset_hc = mv_soc_reset_hc, 713 .reset_flash = mv_soc_reset_flash, 714 .reset_bus = mv_soc_reset_bus, 715 }; 716 717 /* 718 * Functions 719 */ 720 721 static inline void writelfl(unsigned long data, void __iomem *addr) 722 { 723 writel(data, addr); 724 (void) readl(addr); /* flush to avoid PCI posted write */ 725 } 726 727 static inline unsigned int mv_hc_from_port(unsigned int port) 728 { 729 return port >> MV_PORT_HC_SHIFT; 730 } 731 732 static inline unsigned int mv_hardport_from_port(unsigned int port) 733 { 734 return port & MV_PORT_MASK; 735 } 736 737 /* 738 * Consolidate some rather tricky bit shift calculations. 739 * This is hot-path stuff, so not a function. 740 * Simple code, with two return values, so macro rather than inline. 741 * 742 * port is the sole input, in range 0..7. 743 * shift is one output, for use with main_irq_cause / main_irq_mask registers. 744 * hardport is the other output, in range 0..3. 745 * 746 * Note that port and hardport may be the same variable in some cases. 747 */ 748 #define MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport) \ 749 { \ 750 shift = mv_hc_from_port(port) * HC_SHIFT; \ 751 hardport = mv_hardport_from_port(port); \ 752 shift += hardport * 2; \ 753 } 754 755 static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc) 756 { 757 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ)); 758 } 759 760 static inline void __iomem *mv_hc_base_from_port(void __iomem *base, 761 unsigned int port) 762 { 763 return mv_hc_base(base, mv_hc_from_port(port)); 764 } 765 766 static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port) 767 { 768 return mv_hc_base_from_port(base, port) + 769 MV_SATAHC_ARBTR_REG_SZ + 770 (mv_hardport_from_port(port) * MV_PORT_REG_SZ); 771 } 772 773 static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port) 774 { 775 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port); 776 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL; 777 778 return hc_mmio + ofs; 779 } 780 781 static inline void __iomem *mv_host_base(struct ata_host *host) 782 { 783 struct mv_host_priv *hpriv = host->private_data; 784 return hpriv->base; 785 } 786 787 static inline void __iomem *mv_ap_base(struct ata_port *ap) 788 { 789 return mv_port_base(mv_host_base(ap->host), ap->port_no); 790 } 791 792 static inline int mv_get_hc_count(unsigned long port_flags) 793 { 794 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1); 795 } 796 797 static void mv_set_edma_ptrs(void __iomem *port_mmio, 798 struct mv_host_priv *hpriv, 799 struct mv_port_priv *pp) 800 { 801 u32 index; 802 803 /* 804 * initialize request queue 805 */ 806 pp->req_idx &= MV_MAX_Q_DEPTH_MASK; /* paranoia */ 807 index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT; 808 809 WARN_ON(pp->crqb_dma & 0x3ff); 810 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS); 811 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index, 812 port_mmio + EDMA_REQ_Q_IN_PTR_OFS); 813 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS); 814 815 /* 816 * initialize response queue 817 */ 818 pp->resp_idx &= MV_MAX_Q_DEPTH_MASK; /* paranoia */ 819 index = pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT; 820 821 WARN_ON(pp->crpb_dma & 0xff); 822 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS); 823 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS); 824 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index, 825 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); 826 } 827 828 static void mv_set_main_irq_mask(struct ata_host *host, 829 u32 disable_bits, u32 enable_bits) 830 { 831 struct mv_host_priv *hpriv = host->private_data; 832 u32 old_mask, new_mask; 833 834 old_mask = hpriv->main_irq_mask; 835 new_mask = (old_mask & ~disable_bits) | enable_bits; 836 if (new_mask != old_mask) { 837 hpriv->main_irq_mask = new_mask; 838 writelfl(new_mask, hpriv->main_irq_mask_addr); 839 } 840 } 841 842 static void mv_enable_port_irqs(struct ata_port *ap, 843 unsigned int port_bits) 844 { 845 unsigned int shift, hardport, port = ap->port_no; 846 u32 disable_bits, enable_bits; 847 848 MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport); 849 850 disable_bits = (DONE_IRQ | ERR_IRQ) << shift; 851 enable_bits = port_bits << shift; 852 mv_set_main_irq_mask(ap->host, disable_bits, enable_bits); 853 } 854 855 /** 856 * mv_start_dma - Enable eDMA engine 857 * @base: port base address 858 * @pp: port private data 859 * 860 * Verify the local cache of the eDMA state is accurate with a 861 * WARN_ON. 862 * 863 * LOCKING: 864 * Inherited from caller. 865 */ 866 static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio, 867 struct mv_port_priv *pp, u8 protocol) 868 { 869 int want_ncq = (protocol == ATA_PROT_NCQ); 870 871 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) { 872 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0); 873 if (want_ncq != using_ncq) 874 mv_stop_edma(ap); 875 } 876 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) { 877 struct mv_host_priv *hpriv = ap->host->private_data; 878 int hardport = mv_hardport_from_port(ap->port_no); 879 void __iomem *hc_mmio = mv_hc_base_from_port( 880 mv_host_base(ap->host), ap->port_no); 881 u32 hc_irq_cause; 882 883 /* clear EDMA event indicators, if any */ 884 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); 885 886 /* clear pending irq events */ 887 hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport); 888 writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS); 889 890 mv_edma_cfg(ap, want_ncq); 891 892 /* clear FIS IRQ Cause */ 893 if (IS_GEN_IIE(hpriv)) 894 writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS); 895 896 mv_set_edma_ptrs(port_mmio, hpriv, pp); 897 mv_enable_port_irqs(ap, DONE_IRQ|ERR_IRQ); 898 899 writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS); 900 pp->pp_flags |= MV_PP_FLAG_EDMA_EN; 901 } 902 } 903 904 static void mv_wait_for_edma_empty_idle(struct ata_port *ap) 905 { 906 void __iomem *port_mmio = mv_ap_base(ap); 907 const u32 empty_idle = (EDMA_STATUS_CACHE_EMPTY | EDMA_STATUS_IDLE); 908 const int per_loop = 5, timeout = (15 * 1000 / per_loop); 909 int i; 910 911 /* 912 * Wait for the EDMA engine to finish transactions in progress. 913 * No idea what a good "timeout" value might be, but measurements 914 * indicate that it often requires hundreds of microseconds 915 * with two drives in-use. So we use the 15msec value above 916 * as a rough guess at what even more drives might require. 917 */ 918 for (i = 0; i < timeout; ++i) { 919 u32 edma_stat = readl(port_mmio + EDMA_STATUS_OFS); 920 if ((edma_stat & empty_idle) == empty_idle) 921 break; 922 udelay(per_loop); 923 } 924 /* ata_port_printk(ap, KERN_INFO, "%s: %u+ usecs\n", __func__, i); */ 925 } 926 927 /** 928 * mv_stop_edma_engine - Disable eDMA engine 929 * @port_mmio: io base address 930 * 931 * LOCKING: 932 * Inherited from caller. 933 */ 934 static int mv_stop_edma_engine(void __iomem *port_mmio) 935 { 936 int i; 937 938 /* Disable eDMA. The disable bit auto clears. */ 939 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS); 940 941 /* Wait for the chip to confirm eDMA is off. */ 942 for (i = 10000; i > 0; i--) { 943 u32 reg = readl(port_mmio + EDMA_CMD_OFS); 944 if (!(reg & EDMA_EN)) 945 return 0; 946 udelay(10); 947 } 948 return -EIO; 949 } 950 951 static int mv_stop_edma(struct ata_port *ap) 952 { 953 void __iomem *port_mmio = mv_ap_base(ap); 954 struct mv_port_priv *pp = ap->private_data; 955 956 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) 957 return 0; 958 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; 959 mv_wait_for_edma_empty_idle(ap); 960 if (mv_stop_edma_engine(port_mmio)) { 961 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n"); 962 return -EIO; 963 } 964 return 0; 965 } 966 967 #ifdef ATA_DEBUG 968 static void mv_dump_mem(void __iomem *start, unsigned bytes) 969 { 970 int b, w; 971 for (b = 0; b < bytes; ) { 972 DPRINTK("%p: ", start + b); 973 for (w = 0; b < bytes && w < 4; w++) { 974 printk("%08x ", readl(start + b)); 975 b += sizeof(u32); 976 } 977 printk("\n"); 978 } 979 } 980 #endif 981 982 static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes) 983 { 984 #ifdef ATA_DEBUG 985 int b, w; 986 u32 dw; 987 for (b = 0; b < bytes; ) { 988 DPRINTK("%02x: ", b); 989 for (w = 0; b < bytes && w < 4; w++) { 990 (void) pci_read_config_dword(pdev, b, &dw); 991 printk("%08x ", dw); 992 b += sizeof(u32); 993 } 994 printk("\n"); 995 } 996 #endif 997 } 998 static void mv_dump_all_regs(void __iomem *mmio_base, int port, 999 struct pci_dev *pdev) 1000 { 1001 #ifdef ATA_DEBUG 1002 void __iomem *hc_base = mv_hc_base(mmio_base, 1003 port >> MV_PORT_HC_SHIFT); 1004 void __iomem *port_base; 1005 int start_port, num_ports, p, start_hc, num_hcs, hc; 1006 1007 if (0 > port) { 1008 start_hc = start_port = 0; 1009 num_ports = 8; /* shld be benign for 4 port devs */ 1010 num_hcs = 2; 1011 } else { 1012 start_hc = port >> MV_PORT_HC_SHIFT; 1013 start_port = port; 1014 num_ports = num_hcs = 1; 1015 } 1016 DPRINTK("All registers for port(s) %u-%u:\n", start_port, 1017 num_ports > 1 ? num_ports - 1 : start_port); 1018 1019 if (NULL != pdev) { 1020 DPRINTK("PCI config space regs:\n"); 1021 mv_dump_pci_cfg(pdev, 0x68); 1022 } 1023 DPRINTK("PCI regs:\n"); 1024 mv_dump_mem(mmio_base+0xc00, 0x3c); 1025 mv_dump_mem(mmio_base+0xd00, 0x34); 1026 mv_dump_mem(mmio_base+0xf00, 0x4); 1027 mv_dump_mem(mmio_base+0x1d00, 0x6c); 1028 for (hc = start_hc; hc < start_hc + num_hcs; hc++) { 1029 hc_base = mv_hc_base(mmio_base, hc); 1030 DPRINTK("HC regs (HC %i):\n", hc); 1031 mv_dump_mem(hc_base, 0x1c); 1032 } 1033 for (p = start_port; p < start_port + num_ports; p++) { 1034 port_base = mv_port_base(mmio_base, p); 1035 DPRINTK("EDMA regs (port %i):\n", p); 1036 mv_dump_mem(port_base, 0x54); 1037 DPRINTK("SATA regs (port %i):\n", p); 1038 mv_dump_mem(port_base+0x300, 0x60); 1039 } 1040 #endif 1041 } 1042 1043 static unsigned int mv_scr_offset(unsigned int sc_reg_in) 1044 { 1045 unsigned int ofs; 1046 1047 switch (sc_reg_in) { 1048 case SCR_STATUS: 1049 case SCR_CONTROL: 1050 case SCR_ERROR: 1051 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32)); 1052 break; 1053 case SCR_ACTIVE: 1054 ofs = SATA_ACTIVE_OFS; /* active is not with the others */ 1055 break; 1056 default: 1057 ofs = 0xffffffffU; 1058 break; 1059 } 1060 return ofs; 1061 } 1062 1063 static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val) 1064 { 1065 unsigned int ofs = mv_scr_offset(sc_reg_in); 1066 1067 if (ofs != 0xffffffffU) { 1068 *val = readl(mv_ap_base(link->ap) + ofs); 1069 return 0; 1070 } else 1071 return -EINVAL; 1072 } 1073 1074 static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val) 1075 { 1076 unsigned int ofs = mv_scr_offset(sc_reg_in); 1077 1078 if (ofs != 0xffffffffU) { 1079 writelfl(val, mv_ap_base(link->ap) + ofs); 1080 return 0; 1081 } else 1082 return -EINVAL; 1083 } 1084 1085 static void mv6_dev_config(struct ata_device *adev) 1086 { 1087 /* 1088 * Deal with Gen-II ("mv6") hardware quirks/restrictions: 1089 * 1090 * Gen-II does not support NCQ over a port multiplier 1091 * (no FIS-based switching). 1092 */ 1093 if (adev->flags & ATA_DFLAG_NCQ) { 1094 if (sata_pmp_attached(adev->link->ap)) { 1095 adev->flags &= ~ATA_DFLAG_NCQ; 1096 ata_dev_printk(adev, KERN_INFO, 1097 "NCQ disabled for command-based switching\n"); 1098 } 1099 } 1100 } 1101 1102 static int mv_qc_defer(struct ata_queued_cmd *qc) 1103 { 1104 struct ata_link *link = qc->dev->link; 1105 struct ata_port *ap = link->ap; 1106 struct mv_port_priv *pp = ap->private_data; 1107 1108 /* 1109 * Don't allow new commands if we're in a delayed EH state 1110 * for NCQ and/or FIS-based switching. 1111 */ 1112 if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) 1113 return ATA_DEFER_PORT; 1114 /* 1115 * If the port is completely idle, then allow the new qc. 1116 */ 1117 if (ap->nr_active_links == 0) 1118 return 0; 1119 1120 /* 1121 * The port is operating in host queuing mode (EDMA) with NCQ 1122 * enabled, allow multiple NCQ commands. EDMA also allows 1123 * queueing multiple DMA commands but libata core currently 1124 * doesn't allow it. 1125 */ 1126 if ((pp->pp_flags & MV_PP_FLAG_EDMA_EN) && 1127 (pp->pp_flags & MV_PP_FLAG_NCQ_EN) && ata_is_ncq(qc->tf.protocol)) 1128 return 0; 1129 1130 return ATA_DEFER_PORT; 1131 } 1132 1133 static void mv_config_fbs(void __iomem *port_mmio, int want_ncq, int want_fbs) 1134 { 1135 u32 new_fiscfg, old_fiscfg; 1136 u32 new_ltmode, old_ltmode; 1137 u32 new_haltcond, old_haltcond; 1138 1139 old_fiscfg = readl(port_mmio + FISCFG_OFS); 1140 old_ltmode = readl(port_mmio + LTMODE_OFS); 1141 old_haltcond = readl(port_mmio + EDMA_HALTCOND_OFS); 1142 1143 new_fiscfg = old_fiscfg & ~(FISCFG_SINGLE_SYNC | FISCFG_WAIT_DEV_ERR); 1144 new_ltmode = old_ltmode & ~LTMODE_BIT8; 1145 new_haltcond = old_haltcond | EDMA_ERR_DEV; 1146 1147 if (want_fbs) { 1148 new_fiscfg = old_fiscfg | FISCFG_SINGLE_SYNC; 1149 new_ltmode = old_ltmode | LTMODE_BIT8; 1150 if (want_ncq) 1151 new_haltcond &= ~EDMA_ERR_DEV; 1152 else 1153 new_fiscfg |= FISCFG_WAIT_DEV_ERR; 1154 } 1155 1156 if (new_fiscfg != old_fiscfg) 1157 writelfl(new_fiscfg, port_mmio + FISCFG_OFS); 1158 if (new_ltmode != old_ltmode) 1159 writelfl(new_ltmode, port_mmio + LTMODE_OFS); 1160 if (new_haltcond != old_haltcond) 1161 writelfl(new_haltcond, port_mmio + EDMA_HALTCOND_OFS); 1162 } 1163 1164 static void mv_60x1_errata_sata25(struct ata_port *ap, int want_ncq) 1165 { 1166 struct mv_host_priv *hpriv = ap->host->private_data; 1167 u32 old, new; 1168 1169 /* workaround for 88SX60x1 FEr SATA#25 (part 1) */ 1170 old = readl(hpriv->base + MV_GPIO_PORT_CTL_OFS); 1171 if (want_ncq) 1172 new = old | (1 << 22); 1173 else 1174 new = old & ~(1 << 22); 1175 if (new != old) 1176 writel(new, hpriv->base + MV_GPIO_PORT_CTL_OFS); 1177 } 1178 1179 static void mv_edma_cfg(struct ata_port *ap, int want_ncq) 1180 { 1181 u32 cfg; 1182 struct mv_port_priv *pp = ap->private_data; 1183 struct mv_host_priv *hpriv = ap->host->private_data; 1184 void __iomem *port_mmio = mv_ap_base(ap); 1185 1186 /* set up non-NCQ EDMA configuration */ 1187 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */ 1188 pp->pp_flags &= ~MV_PP_FLAG_FBS_EN; 1189 1190 if (IS_GEN_I(hpriv)) 1191 cfg |= (1 << 8); /* enab config burst size mask */ 1192 1193 else if (IS_GEN_II(hpriv)) { 1194 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN; 1195 mv_60x1_errata_sata25(ap, want_ncq); 1196 1197 } else if (IS_GEN_IIE(hpriv)) { 1198 int want_fbs = sata_pmp_attached(ap); 1199 /* 1200 * Possible future enhancement: 1201 * 1202 * The chip can use FBS with non-NCQ, if we allow it, 1203 * But first we need to have the error handling in place 1204 * for this mode (datasheet section 7.3.15.4.2.3). 1205 * So disallow non-NCQ FBS for now. 1206 */ 1207 want_fbs &= want_ncq; 1208 1209 mv_config_fbs(port_mmio, want_ncq, want_fbs); 1210 1211 if (want_fbs) { 1212 pp->pp_flags |= MV_PP_FLAG_FBS_EN; 1213 cfg |= EDMA_CFG_EDMA_FBS; /* FIS-based switching */ 1214 } 1215 1216 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */ 1217 cfg |= (1 << 22); /* enab 4-entry host queue cache */ 1218 if (!IS_SOC(hpriv)) 1219 cfg |= (1 << 18); /* enab early completion */ 1220 if (hpriv->hp_flags & MV_HP_CUT_THROUGH) 1221 cfg |= (1 << 17); /* enab cut-thru (dis stor&forwrd) */ 1222 } 1223 1224 if (want_ncq) { 1225 cfg |= EDMA_CFG_NCQ; 1226 pp->pp_flags |= MV_PP_FLAG_NCQ_EN; 1227 } else 1228 pp->pp_flags &= ~MV_PP_FLAG_NCQ_EN; 1229 1230 writelfl(cfg, port_mmio + EDMA_CFG_OFS); 1231 } 1232 1233 static void mv_port_free_dma_mem(struct ata_port *ap) 1234 { 1235 struct mv_host_priv *hpriv = ap->host->private_data; 1236 struct mv_port_priv *pp = ap->private_data; 1237 int tag; 1238 1239 if (pp->crqb) { 1240 dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma); 1241 pp->crqb = NULL; 1242 } 1243 if (pp->crpb) { 1244 dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma); 1245 pp->crpb = NULL; 1246 } 1247 /* 1248 * For GEN_I, there's no NCQ, so we have only a single sg_tbl. 1249 * For later hardware, we have one unique sg_tbl per NCQ tag. 1250 */ 1251 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) { 1252 if (pp->sg_tbl[tag]) { 1253 if (tag == 0 || !IS_GEN_I(hpriv)) 1254 dma_pool_free(hpriv->sg_tbl_pool, 1255 pp->sg_tbl[tag], 1256 pp->sg_tbl_dma[tag]); 1257 pp->sg_tbl[tag] = NULL; 1258 } 1259 } 1260 } 1261 1262 /** 1263 * mv_port_start - Port specific init/start routine. 1264 * @ap: ATA channel to manipulate 1265 * 1266 * Allocate and point to DMA memory, init port private memory, 1267 * zero indices. 1268 * 1269 * LOCKING: 1270 * Inherited from caller. 1271 */ 1272 static int mv_port_start(struct ata_port *ap) 1273 { 1274 struct device *dev = ap->host->dev; 1275 struct mv_host_priv *hpriv = ap->host->private_data; 1276 struct mv_port_priv *pp; 1277 int tag; 1278 1279 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); 1280 if (!pp) 1281 return -ENOMEM; 1282 ap->private_data = pp; 1283 1284 pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma); 1285 if (!pp->crqb) 1286 return -ENOMEM; 1287 memset(pp->crqb, 0, MV_CRQB_Q_SZ); 1288 1289 pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma); 1290 if (!pp->crpb) 1291 goto out_port_free_dma_mem; 1292 memset(pp->crpb, 0, MV_CRPB_Q_SZ); 1293 1294 /* 6041/6081 Rev. "C0" (and newer) are okay with async notify */ 1295 if (hpriv->hp_flags & MV_HP_ERRATA_60X1C0) 1296 ap->flags |= ATA_FLAG_AN; 1297 /* 1298 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl. 1299 * For later hardware, we need one unique sg_tbl per NCQ tag. 1300 */ 1301 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) { 1302 if (tag == 0 || !IS_GEN_I(hpriv)) { 1303 pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool, 1304 GFP_KERNEL, &pp->sg_tbl_dma[tag]); 1305 if (!pp->sg_tbl[tag]) 1306 goto out_port_free_dma_mem; 1307 } else { 1308 pp->sg_tbl[tag] = pp->sg_tbl[0]; 1309 pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0]; 1310 } 1311 } 1312 return 0; 1313 1314 out_port_free_dma_mem: 1315 mv_port_free_dma_mem(ap); 1316 return -ENOMEM; 1317 } 1318 1319 /** 1320 * mv_port_stop - Port specific cleanup/stop routine. 1321 * @ap: ATA channel to manipulate 1322 * 1323 * Stop DMA, cleanup port memory. 1324 * 1325 * LOCKING: 1326 * This routine uses the host lock to protect the DMA stop. 1327 */ 1328 static void mv_port_stop(struct ata_port *ap) 1329 { 1330 mv_stop_edma(ap); 1331 mv_enable_port_irqs(ap, 0); 1332 mv_port_free_dma_mem(ap); 1333 } 1334 1335 /** 1336 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries 1337 * @qc: queued command whose SG list to source from 1338 * 1339 * Populate the SG list and mark the last entry. 1340 * 1341 * LOCKING: 1342 * Inherited from caller. 1343 */ 1344 static void mv_fill_sg(struct ata_queued_cmd *qc) 1345 { 1346 struct mv_port_priv *pp = qc->ap->private_data; 1347 struct scatterlist *sg; 1348 struct mv_sg *mv_sg, *last_sg = NULL; 1349 unsigned int si; 1350 1351 mv_sg = pp->sg_tbl[qc->tag]; 1352 for_each_sg(qc->sg, sg, qc->n_elem, si) { 1353 dma_addr_t addr = sg_dma_address(sg); 1354 u32 sg_len = sg_dma_len(sg); 1355 1356 while (sg_len) { 1357 u32 offset = addr & 0xffff; 1358 u32 len = sg_len; 1359 1360 if ((offset + sg_len > 0x10000)) 1361 len = 0x10000 - offset; 1362 1363 mv_sg->addr = cpu_to_le32(addr & 0xffffffff); 1364 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16); 1365 mv_sg->flags_size = cpu_to_le32(len & 0xffff); 1366 1367 sg_len -= len; 1368 addr += len; 1369 1370 last_sg = mv_sg; 1371 mv_sg++; 1372 } 1373 } 1374 1375 if (likely(last_sg)) 1376 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL); 1377 } 1378 1379 static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last) 1380 { 1381 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS | 1382 (last ? CRQB_CMD_LAST : 0); 1383 *cmdw = cpu_to_le16(tmp); 1384 } 1385 1386 /** 1387 * mv_qc_prep - Host specific command preparation. 1388 * @qc: queued command to prepare 1389 * 1390 * This routine simply redirects to the general purpose routine 1391 * if command is not DMA. Else, it handles prep of the CRQB 1392 * (command request block), does some sanity checking, and calls 1393 * the SG load routine. 1394 * 1395 * LOCKING: 1396 * Inherited from caller. 1397 */ 1398 static void mv_qc_prep(struct ata_queued_cmd *qc) 1399 { 1400 struct ata_port *ap = qc->ap; 1401 struct mv_port_priv *pp = ap->private_data; 1402 __le16 *cw; 1403 struct ata_taskfile *tf; 1404 u16 flags = 0; 1405 unsigned in_index; 1406 1407 if ((qc->tf.protocol != ATA_PROT_DMA) && 1408 (qc->tf.protocol != ATA_PROT_NCQ)) 1409 return; 1410 1411 /* Fill in command request block 1412 */ 1413 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) 1414 flags |= CRQB_FLAG_READ; 1415 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag); 1416 flags |= qc->tag << CRQB_TAG_SHIFT; 1417 flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT; 1418 1419 /* get current queue index from software */ 1420 in_index = pp->req_idx; 1421 1422 pp->crqb[in_index].sg_addr = 1423 cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff); 1424 pp->crqb[in_index].sg_addr_hi = 1425 cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16); 1426 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags); 1427 1428 cw = &pp->crqb[in_index].ata_cmd[0]; 1429 tf = &qc->tf; 1430 1431 /* Sadly, the CRQB cannot accomodate all registers--there are 1432 * only 11 bytes...so we must pick and choose required 1433 * registers based on the command. So, we drop feature and 1434 * hob_feature for [RW] DMA commands, but they are needed for 1435 * NCQ. NCQ will drop hob_nsect, which is not needed there 1436 * (nsect is used only for the tag; feat/hob_feat hold true nsect). 1437 */ 1438 switch (tf->command) { 1439 case ATA_CMD_READ: 1440 case ATA_CMD_READ_EXT: 1441 case ATA_CMD_WRITE: 1442 case ATA_CMD_WRITE_EXT: 1443 case ATA_CMD_WRITE_FUA_EXT: 1444 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0); 1445 break; 1446 case ATA_CMD_FPDMA_READ: 1447 case ATA_CMD_FPDMA_WRITE: 1448 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0); 1449 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0); 1450 break; 1451 default: 1452 /* The only other commands EDMA supports in non-queued and 1453 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none 1454 * of which are defined/used by Linux. If we get here, this 1455 * driver needs work. 1456 * 1457 * FIXME: modify libata to give qc_prep a return value and 1458 * return error here. 1459 */ 1460 BUG_ON(tf->command); 1461 break; 1462 } 1463 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0); 1464 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0); 1465 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0); 1466 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0); 1467 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0); 1468 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0); 1469 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0); 1470 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0); 1471 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */ 1472 1473 if (!(qc->flags & ATA_QCFLAG_DMAMAP)) 1474 return; 1475 mv_fill_sg(qc); 1476 } 1477 1478 /** 1479 * mv_qc_prep_iie - Host specific command preparation. 1480 * @qc: queued command to prepare 1481 * 1482 * This routine simply redirects to the general purpose routine 1483 * if command is not DMA. Else, it handles prep of the CRQB 1484 * (command request block), does some sanity checking, and calls 1485 * the SG load routine. 1486 * 1487 * LOCKING: 1488 * Inherited from caller. 1489 */ 1490 static void mv_qc_prep_iie(struct ata_queued_cmd *qc) 1491 { 1492 struct ata_port *ap = qc->ap; 1493 struct mv_port_priv *pp = ap->private_data; 1494 struct mv_crqb_iie *crqb; 1495 struct ata_taskfile *tf; 1496 unsigned in_index; 1497 u32 flags = 0; 1498 1499 if ((qc->tf.protocol != ATA_PROT_DMA) && 1500 (qc->tf.protocol != ATA_PROT_NCQ)) 1501 return; 1502 1503 /* Fill in Gen IIE command request block */ 1504 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) 1505 flags |= CRQB_FLAG_READ; 1506 1507 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag); 1508 flags |= qc->tag << CRQB_TAG_SHIFT; 1509 flags |= qc->tag << CRQB_HOSTQ_SHIFT; 1510 flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT; 1511 1512 /* get current queue index from software */ 1513 in_index = pp->req_idx; 1514 1515 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index]; 1516 crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff); 1517 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16); 1518 crqb->flags = cpu_to_le32(flags); 1519 1520 tf = &qc->tf; 1521 crqb->ata_cmd[0] = cpu_to_le32( 1522 (tf->command << 16) | 1523 (tf->feature << 24) 1524 ); 1525 crqb->ata_cmd[1] = cpu_to_le32( 1526 (tf->lbal << 0) | 1527 (tf->lbam << 8) | 1528 (tf->lbah << 16) | 1529 (tf->device << 24) 1530 ); 1531 crqb->ata_cmd[2] = cpu_to_le32( 1532 (tf->hob_lbal << 0) | 1533 (tf->hob_lbam << 8) | 1534 (tf->hob_lbah << 16) | 1535 (tf->hob_feature << 24) 1536 ); 1537 crqb->ata_cmd[3] = cpu_to_le32( 1538 (tf->nsect << 0) | 1539 (tf->hob_nsect << 8) 1540 ); 1541 1542 if (!(qc->flags & ATA_QCFLAG_DMAMAP)) 1543 return; 1544 mv_fill_sg(qc); 1545 } 1546 1547 /** 1548 * mv_qc_issue - Initiate a command to the host 1549 * @qc: queued command to start 1550 * 1551 * This routine simply redirects to the general purpose routine 1552 * if command is not DMA. Else, it sanity checks our local 1553 * caches of the request producer/consumer indices then enables 1554 * DMA and bumps the request producer index. 1555 * 1556 * LOCKING: 1557 * Inherited from caller. 1558 */ 1559 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc) 1560 { 1561 struct ata_port *ap = qc->ap; 1562 void __iomem *port_mmio = mv_ap_base(ap); 1563 struct mv_port_priv *pp = ap->private_data; 1564 u32 in_index; 1565 1566 if ((qc->tf.protocol != ATA_PROT_DMA) && 1567 (qc->tf.protocol != ATA_PROT_NCQ)) { 1568 static int limit_warnings = 10; 1569 /* 1570 * Errata SATA#16, SATA#24: warn if multiple DRQs expected. 1571 * 1572 * Someday, we might implement special polling workarounds 1573 * for these, but it all seems rather unnecessary since we 1574 * normally use only DMA for commands which transfer more 1575 * than a single block of data. 1576 * 1577 * Much of the time, this could just work regardless. 1578 * So for now, just log the incident, and allow the attempt. 1579 */ 1580 if (limit_warnings > 0 && (qc->nbytes / qc->sect_size) > 1) { 1581 --limit_warnings; 1582 ata_link_printk(qc->dev->link, KERN_WARNING, DRV_NAME 1583 ": attempting PIO w/multiple DRQ: " 1584 "this may fail due to h/w errata\n"); 1585 } 1586 /* 1587 * We're about to send a non-EDMA capable command to the 1588 * port. Turn off EDMA so there won't be problems accessing 1589 * shadow block, etc registers. 1590 */ 1591 mv_stop_edma(ap); 1592 mv_enable_port_irqs(ap, ERR_IRQ); 1593 mv_pmp_select(ap, qc->dev->link->pmp); 1594 return ata_sff_qc_issue(qc); 1595 } 1596 1597 mv_start_dma(ap, port_mmio, pp, qc->tf.protocol); 1598 1599 pp->req_idx = (pp->req_idx + 1) & MV_MAX_Q_DEPTH_MASK; 1600 in_index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT; 1601 1602 /* and write the request in pointer to kick the EDMA to life */ 1603 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index, 1604 port_mmio + EDMA_REQ_Q_IN_PTR_OFS); 1605 1606 return 0; 1607 } 1608 1609 static struct ata_queued_cmd *mv_get_active_qc(struct ata_port *ap) 1610 { 1611 struct mv_port_priv *pp = ap->private_data; 1612 struct ata_queued_cmd *qc; 1613 1614 if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) 1615 return NULL; 1616 qc = ata_qc_from_tag(ap, ap->link.active_tag); 1617 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING)) 1618 qc = NULL; 1619 return qc; 1620 } 1621 1622 static void mv_pmp_error_handler(struct ata_port *ap) 1623 { 1624 unsigned int pmp, pmp_map; 1625 struct mv_port_priv *pp = ap->private_data; 1626 1627 if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) { 1628 /* 1629 * Perform NCQ error analysis on failed PMPs 1630 * before we freeze the port entirely. 1631 * 1632 * The failed PMPs are marked earlier by mv_pmp_eh_prep(). 1633 */ 1634 pmp_map = pp->delayed_eh_pmp_map; 1635 pp->pp_flags &= ~MV_PP_FLAG_DELAYED_EH; 1636 for (pmp = 0; pmp_map != 0; pmp++) { 1637 unsigned int this_pmp = (1 << pmp); 1638 if (pmp_map & this_pmp) { 1639 struct ata_link *link = &ap->pmp_link[pmp]; 1640 pmp_map &= ~this_pmp; 1641 ata_eh_analyze_ncq_error(link); 1642 } 1643 } 1644 ata_port_freeze(ap); 1645 } 1646 sata_pmp_error_handler(ap); 1647 } 1648 1649 static unsigned int mv_get_err_pmp_map(struct ata_port *ap) 1650 { 1651 void __iomem *port_mmio = mv_ap_base(ap); 1652 1653 return readl(port_mmio + SATA_TESTCTL_OFS) >> 16; 1654 } 1655 1656 static void mv_pmp_eh_prep(struct ata_port *ap, unsigned int pmp_map) 1657 { 1658 struct ata_eh_info *ehi; 1659 unsigned int pmp; 1660 1661 /* 1662 * Initialize EH info for PMPs which saw device errors 1663 */ 1664 ehi = &ap->link.eh_info; 1665 for (pmp = 0; pmp_map != 0; pmp++) { 1666 unsigned int this_pmp = (1 << pmp); 1667 if (pmp_map & this_pmp) { 1668 struct ata_link *link = &ap->pmp_link[pmp]; 1669 1670 pmp_map &= ~this_pmp; 1671 ehi = &link->eh_info; 1672 ata_ehi_clear_desc(ehi); 1673 ata_ehi_push_desc(ehi, "dev err"); 1674 ehi->err_mask |= AC_ERR_DEV; 1675 ehi->action |= ATA_EH_RESET; 1676 ata_link_abort(link); 1677 } 1678 } 1679 } 1680 1681 static int mv_req_q_empty(struct ata_port *ap) 1682 { 1683 void __iomem *port_mmio = mv_ap_base(ap); 1684 u32 in_ptr, out_ptr; 1685 1686 in_ptr = (readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS) 1687 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK; 1688 out_ptr = (readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS) 1689 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK; 1690 return (in_ptr == out_ptr); /* 1 == queue_is_empty */ 1691 } 1692 1693 static int mv_handle_fbs_ncq_dev_err(struct ata_port *ap) 1694 { 1695 struct mv_port_priv *pp = ap->private_data; 1696 int failed_links; 1697 unsigned int old_map, new_map; 1698 1699 /* 1700 * Device error during FBS+NCQ operation: 1701 * 1702 * Set a port flag to prevent further I/O being enqueued. 1703 * Leave the EDMA running to drain outstanding commands from this port. 1704 * Perform the post-mortem/EH only when all responses are complete. 1705 * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.2). 1706 */ 1707 if (!(pp->pp_flags & MV_PP_FLAG_DELAYED_EH)) { 1708 pp->pp_flags |= MV_PP_FLAG_DELAYED_EH; 1709 pp->delayed_eh_pmp_map = 0; 1710 } 1711 old_map = pp->delayed_eh_pmp_map; 1712 new_map = old_map | mv_get_err_pmp_map(ap); 1713 1714 if (old_map != new_map) { 1715 pp->delayed_eh_pmp_map = new_map; 1716 mv_pmp_eh_prep(ap, new_map & ~old_map); 1717 } 1718 failed_links = hweight16(new_map); 1719 1720 ata_port_printk(ap, KERN_INFO, "%s: pmp_map=%04x qc_map=%04x " 1721 "failed_links=%d nr_active_links=%d\n", 1722 __func__, pp->delayed_eh_pmp_map, 1723 ap->qc_active, failed_links, 1724 ap->nr_active_links); 1725 1726 if (ap->nr_active_links <= failed_links && mv_req_q_empty(ap)) { 1727 mv_process_crpb_entries(ap, pp); 1728 mv_stop_edma(ap); 1729 mv_eh_freeze(ap); 1730 ata_port_printk(ap, KERN_INFO, "%s: done\n", __func__); 1731 return 1; /* handled */ 1732 } 1733 ata_port_printk(ap, KERN_INFO, "%s: waiting\n", __func__); 1734 return 1; /* handled */ 1735 } 1736 1737 static int mv_handle_fbs_non_ncq_dev_err(struct ata_port *ap) 1738 { 1739 /* 1740 * Possible future enhancement: 1741 * 1742 * FBS+non-NCQ operation is not yet implemented. 1743 * See related notes in mv_edma_cfg(). 1744 * 1745 * Device error during FBS+non-NCQ operation: 1746 * 1747 * We need to snapshot the shadow registers for each failed command. 1748 * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.3). 1749 */ 1750 return 0; /* not handled */ 1751 } 1752 1753 static int mv_handle_dev_err(struct ata_port *ap, u32 edma_err_cause) 1754 { 1755 struct mv_port_priv *pp = ap->private_data; 1756 1757 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) 1758 return 0; /* EDMA was not active: not handled */ 1759 if (!(pp->pp_flags & MV_PP_FLAG_FBS_EN)) 1760 return 0; /* FBS was not active: not handled */ 1761 1762 if (!(edma_err_cause & EDMA_ERR_DEV)) 1763 return 0; /* non DEV error: not handled */ 1764 edma_err_cause &= ~EDMA_ERR_IRQ_TRANSIENT; 1765 if (edma_err_cause & ~(EDMA_ERR_DEV | EDMA_ERR_SELF_DIS)) 1766 return 0; /* other problems: not handled */ 1767 1768 if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) { 1769 /* 1770 * EDMA should NOT have self-disabled for this case. 1771 * If it did, then something is wrong elsewhere, 1772 * and we cannot handle it here. 1773 */ 1774 if (edma_err_cause & EDMA_ERR_SELF_DIS) { 1775 ata_port_printk(ap, KERN_WARNING, 1776 "%s: err_cause=0x%x pp_flags=0x%x\n", 1777 __func__, edma_err_cause, pp->pp_flags); 1778 return 0; /* not handled */ 1779 } 1780 return mv_handle_fbs_ncq_dev_err(ap); 1781 } else { 1782 /* 1783 * EDMA should have self-disabled for this case. 1784 * If it did not, then something is wrong elsewhere, 1785 * and we cannot handle it here. 1786 */ 1787 if (!(edma_err_cause & EDMA_ERR_SELF_DIS)) { 1788 ata_port_printk(ap, KERN_WARNING, 1789 "%s: err_cause=0x%x pp_flags=0x%x\n", 1790 __func__, edma_err_cause, pp->pp_flags); 1791 return 0; /* not handled */ 1792 } 1793 return mv_handle_fbs_non_ncq_dev_err(ap); 1794 } 1795 return 0; /* not handled */ 1796 } 1797 1798 static void mv_unexpected_intr(struct ata_port *ap, int edma_was_enabled) 1799 { 1800 struct ata_eh_info *ehi = &ap->link.eh_info; 1801 char *when = "idle"; 1802 1803 ata_ehi_clear_desc(ehi); 1804 if (!ap || (ap->flags & ATA_FLAG_DISABLED)) { 1805 when = "disabled"; 1806 } else if (edma_was_enabled) { 1807 when = "EDMA enabled"; 1808 } else { 1809 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag); 1810 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING)) 1811 when = "polling"; 1812 } 1813 ata_ehi_push_desc(ehi, "unexpected device interrupt while %s", when); 1814 ehi->err_mask |= AC_ERR_OTHER; 1815 ehi->action |= ATA_EH_RESET; 1816 ata_port_freeze(ap); 1817 } 1818 1819 /** 1820 * mv_err_intr - Handle error interrupts on the port 1821 * @ap: ATA channel to manipulate 1822 * 1823 * Most cases require a full reset of the chip's state machine, 1824 * which also performs a COMRESET. 1825 * Also, if the port disabled DMA, update our cached copy to match. 1826 * 1827 * LOCKING: 1828 * Inherited from caller. 1829 */ 1830 static void mv_err_intr(struct ata_port *ap) 1831 { 1832 void __iomem *port_mmio = mv_ap_base(ap); 1833 u32 edma_err_cause, eh_freeze_mask, serr = 0; 1834 u32 fis_cause = 0; 1835 struct mv_port_priv *pp = ap->private_data; 1836 struct mv_host_priv *hpriv = ap->host->private_data; 1837 unsigned int action = 0, err_mask = 0; 1838 struct ata_eh_info *ehi = &ap->link.eh_info; 1839 struct ata_queued_cmd *qc; 1840 int abort = 0; 1841 1842 /* 1843 * Read and clear the SError and err_cause bits. 1844 * For GenIIe, if EDMA_ERR_TRANS_IRQ_7 is set, we also must read/clear 1845 * the FIS_IRQ_CAUSE register before clearing edma_err_cause. 1846 */ 1847 sata_scr_read(&ap->link, SCR_ERROR, &serr); 1848 sata_scr_write_flush(&ap->link, SCR_ERROR, serr); 1849 1850 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); 1851 if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) { 1852 fis_cause = readl(port_mmio + SATA_FIS_IRQ_CAUSE_OFS); 1853 writelfl(~fis_cause, port_mmio + SATA_FIS_IRQ_CAUSE_OFS); 1854 } 1855 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); 1856 1857 if (edma_err_cause & EDMA_ERR_DEV) { 1858 /* 1859 * Device errors during FIS-based switching operation 1860 * require special handling. 1861 */ 1862 if (mv_handle_dev_err(ap, edma_err_cause)) 1863 return; 1864 } 1865 1866 qc = mv_get_active_qc(ap); 1867 ata_ehi_clear_desc(ehi); 1868 ata_ehi_push_desc(ehi, "edma_err_cause=%08x pp_flags=%08x", 1869 edma_err_cause, pp->pp_flags); 1870 1871 if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) { 1872 ata_ehi_push_desc(ehi, "fis_cause=%08x", fis_cause); 1873 if (fis_cause & SATA_FIS_IRQ_AN) { 1874 u32 ec = edma_err_cause & 1875 ~(EDMA_ERR_TRANS_IRQ_7 | EDMA_ERR_IRQ_TRANSIENT); 1876 sata_async_notification(ap); 1877 if (!ec) 1878 return; /* Just an AN; no need for the nukes */ 1879 ata_ehi_push_desc(ehi, "SDB notify"); 1880 } 1881 } 1882 /* 1883 * All generations share these EDMA error cause bits: 1884 */ 1885 if (edma_err_cause & EDMA_ERR_DEV) { 1886 err_mask |= AC_ERR_DEV; 1887 action |= ATA_EH_RESET; 1888 ata_ehi_push_desc(ehi, "dev error"); 1889 } 1890 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR | 1891 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR | 1892 EDMA_ERR_INTRL_PAR)) { 1893 err_mask |= AC_ERR_ATA_BUS; 1894 action |= ATA_EH_RESET; 1895 ata_ehi_push_desc(ehi, "parity error"); 1896 } 1897 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) { 1898 ata_ehi_hotplugged(ehi); 1899 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ? 1900 "dev disconnect" : "dev connect"); 1901 action |= ATA_EH_RESET; 1902 } 1903 1904 /* 1905 * Gen-I has a different SELF_DIS bit, 1906 * different FREEZE bits, and no SERR bit: 1907 */ 1908 if (IS_GEN_I(hpriv)) { 1909 eh_freeze_mask = EDMA_EH_FREEZE_5; 1910 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) { 1911 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; 1912 ata_ehi_push_desc(ehi, "EDMA self-disable"); 1913 } 1914 } else { 1915 eh_freeze_mask = EDMA_EH_FREEZE; 1916 if (edma_err_cause & EDMA_ERR_SELF_DIS) { 1917 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; 1918 ata_ehi_push_desc(ehi, "EDMA self-disable"); 1919 } 1920 if (edma_err_cause & EDMA_ERR_SERR) { 1921 ata_ehi_push_desc(ehi, "SError=%08x", serr); 1922 err_mask |= AC_ERR_ATA_BUS; 1923 action |= ATA_EH_RESET; 1924 } 1925 } 1926 1927 if (!err_mask) { 1928 err_mask = AC_ERR_OTHER; 1929 action |= ATA_EH_RESET; 1930 } 1931 1932 ehi->serror |= serr; 1933 ehi->action |= action; 1934 1935 if (qc) 1936 qc->err_mask |= err_mask; 1937 else 1938 ehi->err_mask |= err_mask; 1939 1940 if (err_mask == AC_ERR_DEV) { 1941 /* 1942 * Cannot do ata_port_freeze() here, 1943 * because it would kill PIO access, 1944 * which is needed for further diagnosis. 1945 */ 1946 mv_eh_freeze(ap); 1947 abort = 1; 1948 } else if (edma_err_cause & eh_freeze_mask) { 1949 /* 1950 * Note to self: ata_port_freeze() calls ata_port_abort() 1951 */ 1952 ata_port_freeze(ap); 1953 } else { 1954 abort = 1; 1955 } 1956 1957 if (abort) { 1958 if (qc) 1959 ata_link_abort(qc->dev->link); 1960 else 1961 ata_port_abort(ap); 1962 } 1963 } 1964 1965 static void mv_process_crpb_response(struct ata_port *ap, 1966 struct mv_crpb *response, unsigned int tag, int ncq_enabled) 1967 { 1968 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag); 1969 1970 if (qc) { 1971 u8 ata_status; 1972 u16 edma_status = le16_to_cpu(response->flags); 1973 /* 1974 * edma_status from a response queue entry: 1975 * LSB is from EDMA_ERR_IRQ_CAUSE_OFS (non-NCQ only). 1976 * MSB is saved ATA status from command completion. 1977 */ 1978 if (!ncq_enabled) { 1979 u8 err_cause = edma_status & 0xff & ~EDMA_ERR_DEV; 1980 if (err_cause) { 1981 /* 1982 * Error will be seen/handled by mv_err_intr(). 1983 * So do nothing at all here. 1984 */ 1985 return; 1986 } 1987 } 1988 ata_status = edma_status >> CRPB_FLAG_STATUS_SHIFT; 1989 if (!ac_err_mask(ata_status)) 1990 ata_qc_complete(qc); 1991 /* else: leave it for mv_err_intr() */ 1992 } else { 1993 ata_port_printk(ap, KERN_ERR, "%s: no qc for tag=%d\n", 1994 __func__, tag); 1995 } 1996 } 1997 1998 static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp) 1999 { 2000 void __iomem *port_mmio = mv_ap_base(ap); 2001 struct mv_host_priv *hpriv = ap->host->private_data; 2002 u32 in_index; 2003 bool work_done = false; 2004 int ncq_enabled = (pp->pp_flags & MV_PP_FLAG_NCQ_EN); 2005 2006 /* Get the hardware queue position index */ 2007 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS) 2008 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK; 2009 2010 /* Process new responses from since the last time we looked */ 2011 while (in_index != pp->resp_idx) { 2012 unsigned int tag; 2013 struct mv_crpb *response = &pp->crpb[pp->resp_idx]; 2014 2015 pp->resp_idx = (pp->resp_idx + 1) & MV_MAX_Q_DEPTH_MASK; 2016 2017 if (IS_GEN_I(hpriv)) { 2018 /* 50xx: no NCQ, only one command active at a time */ 2019 tag = ap->link.active_tag; 2020 } else { 2021 /* Gen II/IIE: get command tag from CRPB entry */ 2022 tag = le16_to_cpu(response->id) & 0x1f; 2023 } 2024 mv_process_crpb_response(ap, response, tag, ncq_enabled); 2025 work_done = true; 2026 } 2027 2028 /* Update the software queue position index in hardware */ 2029 if (work_done) 2030 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | 2031 (pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT), 2032 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); 2033 } 2034 2035 static void mv_port_intr(struct ata_port *ap, u32 port_cause) 2036 { 2037 struct mv_port_priv *pp; 2038 int edma_was_enabled; 2039 2040 if (!ap || (ap->flags & ATA_FLAG_DISABLED)) { 2041 mv_unexpected_intr(ap, 0); 2042 return; 2043 } 2044 /* 2045 * Grab a snapshot of the EDMA_EN flag setting, 2046 * so that we have a consistent view for this port, 2047 * even if something we call of our routines changes it. 2048 */ 2049 pp = ap->private_data; 2050 edma_was_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN); 2051 /* 2052 * Process completed CRPB response(s) before other events. 2053 */ 2054 if (edma_was_enabled && (port_cause & DONE_IRQ)) { 2055 mv_process_crpb_entries(ap, pp); 2056 if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) 2057 mv_handle_fbs_ncq_dev_err(ap); 2058 } 2059 /* 2060 * Handle chip-reported errors, or continue on to handle PIO. 2061 */ 2062 if (unlikely(port_cause & ERR_IRQ)) { 2063 mv_err_intr(ap); 2064 } else if (!edma_was_enabled) { 2065 struct ata_queued_cmd *qc = mv_get_active_qc(ap); 2066 if (qc) 2067 ata_sff_host_intr(ap, qc); 2068 else 2069 mv_unexpected_intr(ap, edma_was_enabled); 2070 } 2071 } 2072 2073 /** 2074 * mv_host_intr - Handle all interrupts on the given host controller 2075 * @host: host specific structure 2076 * @main_irq_cause: Main interrupt cause register for the chip. 2077 * 2078 * LOCKING: 2079 * Inherited from caller. 2080 */ 2081 static int mv_host_intr(struct ata_host *host, u32 main_irq_cause) 2082 { 2083 struct mv_host_priv *hpriv = host->private_data; 2084 void __iomem *mmio = hpriv->base, *hc_mmio; 2085 unsigned int handled = 0, port; 2086 2087 for (port = 0; port < hpriv->n_ports; port++) { 2088 struct ata_port *ap = host->ports[port]; 2089 unsigned int p, shift, hardport, port_cause; 2090 2091 MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport); 2092 /* 2093 * Each hc within the host has its own hc_irq_cause register, 2094 * where the interrupting ports bits get ack'd. 2095 */ 2096 if (hardport == 0) { /* first port on this hc ? */ 2097 u32 hc_cause = (main_irq_cause >> shift) & HC0_IRQ_PEND; 2098 u32 port_mask, ack_irqs; 2099 /* 2100 * Skip this entire hc if nothing pending for any ports 2101 */ 2102 if (!hc_cause) { 2103 port += MV_PORTS_PER_HC - 1; 2104 continue; 2105 } 2106 /* 2107 * We don't need/want to read the hc_irq_cause register, 2108 * because doing so hurts performance, and 2109 * main_irq_cause already gives us everything we need. 2110 * 2111 * But we do have to *write* to the hc_irq_cause to ack 2112 * the ports that we are handling this time through. 2113 * 2114 * This requires that we create a bitmap for those 2115 * ports which interrupted us, and use that bitmap 2116 * to ack (only) those ports via hc_irq_cause. 2117 */ 2118 ack_irqs = 0; 2119 for (p = 0; p < MV_PORTS_PER_HC; ++p) { 2120 if ((port + p) >= hpriv->n_ports) 2121 break; 2122 port_mask = (DONE_IRQ | ERR_IRQ) << (p * 2); 2123 if (hc_cause & port_mask) 2124 ack_irqs |= (DMA_IRQ | DEV_IRQ) << p; 2125 } 2126 hc_mmio = mv_hc_base_from_port(mmio, port); 2127 writelfl(~ack_irqs, hc_mmio + HC_IRQ_CAUSE_OFS); 2128 handled = 1; 2129 } 2130 /* 2131 * Handle interrupts signalled for this port: 2132 */ 2133 port_cause = (main_irq_cause >> shift) & (DONE_IRQ | ERR_IRQ); 2134 if (port_cause) 2135 mv_port_intr(ap, port_cause); 2136 } 2137 return handled; 2138 } 2139 2140 static int mv_pci_error(struct ata_host *host, void __iomem *mmio) 2141 { 2142 struct mv_host_priv *hpriv = host->private_data; 2143 struct ata_port *ap; 2144 struct ata_queued_cmd *qc; 2145 struct ata_eh_info *ehi; 2146 unsigned int i, err_mask, printed = 0; 2147 u32 err_cause; 2148 2149 err_cause = readl(mmio + hpriv->irq_cause_ofs); 2150 2151 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n", 2152 err_cause); 2153 2154 DPRINTK("All regs @ PCI error\n"); 2155 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev)); 2156 2157 writelfl(0, mmio + hpriv->irq_cause_ofs); 2158 2159 for (i = 0; i < host->n_ports; i++) { 2160 ap = host->ports[i]; 2161 if (!ata_link_offline(&ap->link)) { 2162 ehi = &ap->link.eh_info; 2163 ata_ehi_clear_desc(ehi); 2164 if (!printed++) 2165 ata_ehi_push_desc(ehi, 2166 "PCI err cause 0x%08x", err_cause); 2167 err_mask = AC_ERR_HOST_BUS; 2168 ehi->action = ATA_EH_RESET; 2169 qc = ata_qc_from_tag(ap, ap->link.active_tag); 2170 if (qc) 2171 qc->err_mask |= err_mask; 2172 else 2173 ehi->err_mask |= err_mask; 2174 2175 ata_port_freeze(ap); 2176 } 2177 } 2178 return 1; /* handled */ 2179 } 2180 2181 /** 2182 * mv_interrupt - Main interrupt event handler 2183 * @irq: unused 2184 * @dev_instance: private data; in this case the host structure 2185 * 2186 * Read the read only register to determine if any host 2187 * controllers have pending interrupts. If so, call lower level 2188 * routine to handle. Also check for PCI errors which are only 2189 * reported here. 2190 * 2191 * LOCKING: 2192 * This routine holds the host lock while processing pending 2193 * interrupts. 2194 */ 2195 static irqreturn_t mv_interrupt(int irq, void *dev_instance) 2196 { 2197 struct ata_host *host = dev_instance; 2198 struct mv_host_priv *hpriv = host->private_data; 2199 unsigned int handled = 0; 2200 int using_msi = hpriv->hp_flags & MV_HP_FLAG_MSI; 2201 u32 main_irq_cause, pending_irqs; 2202 2203 spin_lock(&host->lock); 2204 2205 /* for MSI: block new interrupts while in here */ 2206 if (using_msi) 2207 writel(0, hpriv->main_irq_mask_addr); 2208 2209 main_irq_cause = readl(hpriv->main_irq_cause_addr); 2210 pending_irqs = main_irq_cause & hpriv->main_irq_mask; 2211 /* 2212 * Deal with cases where we either have nothing pending, or have read 2213 * a bogus register value which can indicate HW removal or PCI fault. 2214 */ 2215 if (pending_irqs && main_irq_cause != 0xffffffffU) { 2216 if (unlikely((pending_irqs & PCI_ERR) && !IS_SOC(hpriv))) 2217 handled = mv_pci_error(host, hpriv->base); 2218 else 2219 handled = mv_host_intr(host, pending_irqs); 2220 } 2221 2222 /* for MSI: unmask; interrupt cause bits will retrigger now */ 2223 if (using_msi) 2224 writel(hpriv->main_irq_mask, hpriv->main_irq_mask_addr); 2225 2226 spin_unlock(&host->lock); 2227 2228 return IRQ_RETVAL(handled); 2229 } 2230 2231 static unsigned int mv5_scr_offset(unsigned int sc_reg_in) 2232 { 2233 unsigned int ofs; 2234 2235 switch (sc_reg_in) { 2236 case SCR_STATUS: 2237 case SCR_ERROR: 2238 case SCR_CONTROL: 2239 ofs = sc_reg_in * sizeof(u32); 2240 break; 2241 default: 2242 ofs = 0xffffffffU; 2243 break; 2244 } 2245 return ofs; 2246 } 2247 2248 static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val) 2249 { 2250 struct mv_host_priv *hpriv = link->ap->host->private_data; 2251 void __iomem *mmio = hpriv->base; 2252 void __iomem *addr = mv5_phy_base(mmio, link->ap->port_no); 2253 unsigned int ofs = mv5_scr_offset(sc_reg_in); 2254 2255 if (ofs != 0xffffffffU) { 2256 *val = readl(addr + ofs); 2257 return 0; 2258 } else 2259 return -EINVAL; 2260 } 2261 2262 static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val) 2263 { 2264 struct mv_host_priv *hpriv = link->ap->host->private_data; 2265 void __iomem *mmio = hpriv->base; 2266 void __iomem *addr = mv5_phy_base(mmio, link->ap->port_no); 2267 unsigned int ofs = mv5_scr_offset(sc_reg_in); 2268 2269 if (ofs != 0xffffffffU) { 2270 writelfl(val, addr + ofs); 2271 return 0; 2272 } else 2273 return -EINVAL; 2274 } 2275 2276 static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio) 2277 { 2278 struct pci_dev *pdev = to_pci_dev(host->dev); 2279 int early_5080; 2280 2281 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0); 2282 2283 if (!early_5080) { 2284 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL); 2285 tmp |= (1 << 0); 2286 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL); 2287 } 2288 2289 mv_reset_pci_bus(host, mmio); 2290 } 2291 2292 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio) 2293 { 2294 writel(0x0fcfffff, mmio + MV_FLASH_CTL_OFS); 2295 } 2296 2297 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx, 2298 void __iomem *mmio) 2299 { 2300 void __iomem *phy_mmio = mv5_phy_base(mmio, idx); 2301 u32 tmp; 2302 2303 tmp = readl(phy_mmio + MV5_PHY_MODE); 2304 2305 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */ 2306 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */ 2307 } 2308 2309 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio) 2310 { 2311 u32 tmp; 2312 2313 writel(0, mmio + MV_GPIO_PORT_CTL_OFS); 2314 2315 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */ 2316 2317 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL); 2318 tmp |= ~(1 << 0); 2319 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL); 2320 } 2321 2322 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, 2323 unsigned int port) 2324 { 2325 void __iomem *phy_mmio = mv5_phy_base(mmio, port); 2326 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5); 2327 u32 tmp; 2328 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0); 2329 2330 if (fix_apm_sq) { 2331 tmp = readl(phy_mmio + MV5_LTMODE_OFS); 2332 tmp |= (1 << 19); 2333 writel(tmp, phy_mmio + MV5_LTMODE_OFS); 2334 2335 tmp = readl(phy_mmio + MV5_PHY_CTL_OFS); 2336 tmp &= ~0x3; 2337 tmp |= 0x1; 2338 writel(tmp, phy_mmio + MV5_PHY_CTL_OFS); 2339 } 2340 2341 tmp = readl(phy_mmio + MV5_PHY_MODE); 2342 tmp &= ~mask; 2343 tmp |= hpriv->signal[port].pre; 2344 tmp |= hpriv->signal[port].amps; 2345 writel(tmp, phy_mmio + MV5_PHY_MODE); 2346 } 2347 2348 2349 #undef ZERO 2350 #define ZERO(reg) writel(0, port_mmio + (reg)) 2351 static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio, 2352 unsigned int port) 2353 { 2354 void __iomem *port_mmio = mv_port_base(mmio, port); 2355 2356 mv_reset_channel(hpriv, mmio, port); 2357 2358 ZERO(0x028); /* command */ 2359 writel(0x11f, port_mmio + EDMA_CFG_OFS); 2360 ZERO(0x004); /* timer */ 2361 ZERO(0x008); /* irq err cause */ 2362 ZERO(0x00c); /* irq err mask */ 2363 ZERO(0x010); /* rq bah */ 2364 ZERO(0x014); /* rq inp */ 2365 ZERO(0x018); /* rq outp */ 2366 ZERO(0x01c); /* respq bah */ 2367 ZERO(0x024); /* respq outp */ 2368 ZERO(0x020); /* respq inp */ 2369 ZERO(0x02c); /* test control */ 2370 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT_OFS); 2371 } 2372 #undef ZERO 2373 2374 #define ZERO(reg) writel(0, hc_mmio + (reg)) 2375 static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio, 2376 unsigned int hc) 2377 { 2378 void __iomem *hc_mmio = mv_hc_base(mmio, hc); 2379 u32 tmp; 2380 2381 ZERO(0x00c); 2382 ZERO(0x010); 2383 ZERO(0x014); 2384 ZERO(0x018); 2385 2386 tmp = readl(hc_mmio + 0x20); 2387 tmp &= 0x1c1c1c1c; 2388 tmp |= 0x03030303; 2389 writel(tmp, hc_mmio + 0x20); 2390 } 2391 #undef ZERO 2392 2393 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio, 2394 unsigned int n_hc) 2395 { 2396 unsigned int hc, port; 2397 2398 for (hc = 0; hc < n_hc; hc++) { 2399 for (port = 0; port < MV_PORTS_PER_HC; port++) 2400 mv5_reset_hc_port(hpriv, mmio, 2401 (hc * MV_PORTS_PER_HC) + port); 2402 2403 mv5_reset_one_hc(hpriv, mmio, hc); 2404 } 2405 2406 return 0; 2407 } 2408 2409 #undef ZERO 2410 #define ZERO(reg) writel(0, mmio + (reg)) 2411 static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio) 2412 { 2413 struct mv_host_priv *hpriv = host->private_data; 2414 u32 tmp; 2415 2416 tmp = readl(mmio + MV_PCI_MODE_OFS); 2417 tmp &= 0xff00ffff; 2418 writel(tmp, mmio + MV_PCI_MODE_OFS); 2419 2420 ZERO(MV_PCI_DISC_TIMER); 2421 ZERO(MV_PCI_MSI_TRIGGER); 2422 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT_OFS); 2423 ZERO(MV_PCI_SERR_MASK); 2424 ZERO(hpriv->irq_cause_ofs); 2425 ZERO(hpriv->irq_mask_ofs); 2426 ZERO(MV_PCI_ERR_LOW_ADDRESS); 2427 ZERO(MV_PCI_ERR_HIGH_ADDRESS); 2428 ZERO(MV_PCI_ERR_ATTRIBUTE); 2429 ZERO(MV_PCI_ERR_COMMAND); 2430 } 2431 #undef ZERO 2432 2433 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio) 2434 { 2435 u32 tmp; 2436 2437 mv5_reset_flash(hpriv, mmio); 2438 2439 tmp = readl(mmio + MV_GPIO_PORT_CTL_OFS); 2440 tmp &= 0x3; 2441 tmp |= (1 << 5) | (1 << 6); 2442 writel(tmp, mmio + MV_GPIO_PORT_CTL_OFS); 2443 } 2444 2445 /** 2446 * mv6_reset_hc - Perform the 6xxx global soft reset 2447 * @mmio: base address of the HBA 2448 * 2449 * This routine only applies to 6xxx parts. 2450 * 2451 * LOCKING: 2452 * Inherited from caller. 2453 */ 2454 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio, 2455 unsigned int n_hc) 2456 { 2457 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS; 2458 int i, rc = 0; 2459 u32 t; 2460 2461 /* Following procedure defined in PCI "main command and status 2462 * register" table. 2463 */ 2464 t = readl(reg); 2465 writel(t | STOP_PCI_MASTER, reg); 2466 2467 for (i = 0; i < 1000; i++) { 2468 udelay(1); 2469 t = readl(reg); 2470 if (PCI_MASTER_EMPTY & t) 2471 break; 2472 } 2473 if (!(PCI_MASTER_EMPTY & t)) { 2474 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n"); 2475 rc = 1; 2476 goto done; 2477 } 2478 2479 /* set reset */ 2480 i = 5; 2481 do { 2482 writel(t | GLOB_SFT_RST, reg); 2483 t = readl(reg); 2484 udelay(1); 2485 } while (!(GLOB_SFT_RST & t) && (i-- > 0)); 2486 2487 if (!(GLOB_SFT_RST & t)) { 2488 printk(KERN_ERR DRV_NAME ": can't set global reset\n"); 2489 rc = 1; 2490 goto done; 2491 } 2492 2493 /* clear reset and *reenable the PCI master* (not mentioned in spec) */ 2494 i = 5; 2495 do { 2496 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg); 2497 t = readl(reg); 2498 udelay(1); 2499 } while ((GLOB_SFT_RST & t) && (i-- > 0)); 2500 2501 if (GLOB_SFT_RST & t) { 2502 printk(KERN_ERR DRV_NAME ": can't clear global reset\n"); 2503 rc = 1; 2504 } 2505 done: 2506 return rc; 2507 } 2508 2509 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx, 2510 void __iomem *mmio) 2511 { 2512 void __iomem *port_mmio; 2513 u32 tmp; 2514 2515 tmp = readl(mmio + MV_RESET_CFG_OFS); 2516 if ((tmp & (1 << 0)) == 0) { 2517 hpriv->signal[idx].amps = 0x7 << 8; 2518 hpriv->signal[idx].pre = 0x1 << 5; 2519 return; 2520 } 2521 2522 port_mmio = mv_port_base(mmio, idx); 2523 tmp = readl(port_mmio + PHY_MODE2); 2524 2525 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */ 2526 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */ 2527 } 2528 2529 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio) 2530 { 2531 writel(0x00000060, mmio + MV_GPIO_PORT_CTL_OFS); 2532 } 2533 2534 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, 2535 unsigned int port) 2536 { 2537 void __iomem *port_mmio = mv_port_base(mmio, port); 2538 2539 u32 hp_flags = hpriv->hp_flags; 2540 int fix_phy_mode2 = 2541 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0); 2542 int fix_phy_mode4 = 2543 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0); 2544 u32 m2, m3; 2545 2546 if (fix_phy_mode2) { 2547 m2 = readl(port_mmio + PHY_MODE2); 2548 m2 &= ~(1 << 16); 2549 m2 |= (1 << 31); 2550 writel(m2, port_mmio + PHY_MODE2); 2551 2552 udelay(200); 2553 2554 m2 = readl(port_mmio + PHY_MODE2); 2555 m2 &= ~((1 << 16) | (1 << 31)); 2556 writel(m2, port_mmio + PHY_MODE2); 2557 2558 udelay(200); 2559 } 2560 2561 /* 2562 * Gen-II/IIe PHY_MODE3 errata RM#2: 2563 * Achieves better receiver noise performance than the h/w default: 2564 */ 2565 m3 = readl(port_mmio + PHY_MODE3); 2566 m3 = (m3 & 0x1f) | (0x5555601 << 5); 2567 2568 /* Guideline 88F5182 (GL# SATA-S11) */ 2569 if (IS_SOC(hpriv)) 2570 m3 &= ~0x1c; 2571 2572 if (fix_phy_mode4) { 2573 u32 m4 = readl(port_mmio + PHY_MODE4); 2574 /* 2575 * Enforce reserved-bit restrictions on GenIIe devices only. 2576 * For earlier chipsets, force only the internal config field 2577 * (workaround for errata FEr SATA#10 part 1). 2578 */ 2579 if (IS_GEN_IIE(hpriv)) 2580 m4 = (m4 & ~PHY_MODE4_RSVD_ZEROS) | PHY_MODE4_RSVD_ONES; 2581 else 2582 m4 = (m4 & ~PHY_MODE4_CFG_MASK) | PHY_MODE4_CFG_VALUE; 2583 writel(m4, port_mmio + PHY_MODE4); 2584 } 2585 /* 2586 * Workaround for 60x1-B2 errata SATA#13: 2587 * Any write to PHY_MODE4 (above) may corrupt PHY_MODE3, 2588 * so we must always rewrite PHY_MODE3 after PHY_MODE4. 2589 */ 2590 writel(m3, port_mmio + PHY_MODE3); 2591 2592 /* Revert values of pre-emphasis and signal amps to the saved ones */ 2593 m2 = readl(port_mmio + PHY_MODE2); 2594 2595 m2 &= ~MV_M2_PREAMP_MASK; 2596 m2 |= hpriv->signal[port].amps; 2597 m2 |= hpriv->signal[port].pre; 2598 m2 &= ~(1 << 16); 2599 2600 /* according to mvSata 3.6.1, some IIE values are fixed */ 2601 if (IS_GEN_IIE(hpriv)) { 2602 m2 &= ~0xC30FF01F; 2603 m2 |= 0x0000900F; 2604 } 2605 2606 writel(m2, port_mmio + PHY_MODE2); 2607 } 2608 2609 /* TODO: use the generic LED interface to configure the SATA Presence */ 2610 /* & Acitivy LEDs on the board */ 2611 static void mv_soc_enable_leds(struct mv_host_priv *hpriv, 2612 void __iomem *mmio) 2613 { 2614 return; 2615 } 2616 2617 static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx, 2618 void __iomem *mmio) 2619 { 2620 void __iomem *port_mmio; 2621 u32 tmp; 2622 2623 port_mmio = mv_port_base(mmio, idx); 2624 tmp = readl(port_mmio + PHY_MODE2); 2625 2626 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */ 2627 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */ 2628 } 2629 2630 #undef ZERO 2631 #define ZERO(reg) writel(0, port_mmio + (reg)) 2632 static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv, 2633 void __iomem *mmio, unsigned int port) 2634 { 2635 void __iomem *port_mmio = mv_port_base(mmio, port); 2636 2637 mv_reset_channel(hpriv, mmio, port); 2638 2639 ZERO(0x028); /* command */ 2640 writel(0x101f, port_mmio + EDMA_CFG_OFS); 2641 ZERO(0x004); /* timer */ 2642 ZERO(0x008); /* irq err cause */ 2643 ZERO(0x00c); /* irq err mask */ 2644 ZERO(0x010); /* rq bah */ 2645 ZERO(0x014); /* rq inp */ 2646 ZERO(0x018); /* rq outp */ 2647 ZERO(0x01c); /* respq bah */ 2648 ZERO(0x024); /* respq outp */ 2649 ZERO(0x020); /* respq inp */ 2650 ZERO(0x02c); /* test control */ 2651 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT_OFS); 2652 } 2653 2654 #undef ZERO 2655 2656 #define ZERO(reg) writel(0, hc_mmio + (reg)) 2657 static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv, 2658 void __iomem *mmio) 2659 { 2660 void __iomem *hc_mmio = mv_hc_base(mmio, 0); 2661 2662 ZERO(0x00c); 2663 ZERO(0x010); 2664 ZERO(0x014); 2665 2666 } 2667 2668 #undef ZERO 2669 2670 static int mv_soc_reset_hc(struct mv_host_priv *hpriv, 2671 void __iomem *mmio, unsigned int n_hc) 2672 { 2673 unsigned int port; 2674 2675 for (port = 0; port < hpriv->n_ports; port++) 2676 mv_soc_reset_hc_port(hpriv, mmio, port); 2677 2678 mv_soc_reset_one_hc(hpriv, mmio); 2679 2680 return 0; 2681 } 2682 2683 static void mv_soc_reset_flash(struct mv_host_priv *hpriv, 2684 void __iomem *mmio) 2685 { 2686 return; 2687 } 2688 2689 static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio) 2690 { 2691 return; 2692 } 2693 2694 static void mv_setup_ifcfg(void __iomem *port_mmio, int want_gen2i) 2695 { 2696 u32 ifcfg = readl(port_mmio + SATA_INTERFACE_CFG_OFS); 2697 2698 ifcfg = (ifcfg & 0xf7f) | 0x9b1000; /* from chip spec */ 2699 if (want_gen2i) 2700 ifcfg |= (1 << 7); /* enable gen2i speed */ 2701 writelfl(ifcfg, port_mmio + SATA_INTERFACE_CFG_OFS); 2702 } 2703 2704 static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio, 2705 unsigned int port_no) 2706 { 2707 void __iomem *port_mmio = mv_port_base(mmio, port_no); 2708 2709 /* 2710 * The datasheet warns against setting EDMA_RESET when EDMA is active 2711 * (but doesn't say what the problem might be). So we first try 2712 * to disable the EDMA engine before doing the EDMA_RESET operation. 2713 */ 2714 mv_stop_edma_engine(port_mmio); 2715 writelfl(EDMA_RESET, port_mmio + EDMA_CMD_OFS); 2716 2717 if (!IS_GEN_I(hpriv)) { 2718 /* Enable 3.0gb/s link speed: this survives EDMA_RESET */ 2719 mv_setup_ifcfg(port_mmio, 1); 2720 } 2721 /* 2722 * Strobing EDMA_RESET here causes a hard reset of the SATA transport, 2723 * link, and physical layers. It resets all SATA interface registers 2724 * (except for SATA_INTERFACE_CFG), and issues a COMRESET to the dev. 2725 */ 2726 writelfl(EDMA_RESET, port_mmio + EDMA_CMD_OFS); 2727 udelay(25); /* allow reset propagation */ 2728 writelfl(0, port_mmio + EDMA_CMD_OFS); 2729 2730 hpriv->ops->phy_errata(hpriv, mmio, port_no); 2731 2732 if (IS_GEN_I(hpriv)) 2733 mdelay(1); 2734 } 2735 2736 static void mv_pmp_select(struct ata_port *ap, int pmp) 2737 { 2738 if (sata_pmp_supported(ap)) { 2739 void __iomem *port_mmio = mv_ap_base(ap); 2740 u32 reg = readl(port_mmio + SATA_IFCTL_OFS); 2741 int old = reg & 0xf; 2742 2743 if (old != pmp) { 2744 reg = (reg & ~0xf) | pmp; 2745 writelfl(reg, port_mmio + SATA_IFCTL_OFS); 2746 } 2747 } 2748 } 2749 2750 static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class, 2751 unsigned long deadline) 2752 { 2753 mv_pmp_select(link->ap, sata_srst_pmp(link)); 2754 return sata_std_hardreset(link, class, deadline); 2755 } 2756 2757 static int mv_softreset(struct ata_link *link, unsigned int *class, 2758 unsigned long deadline) 2759 { 2760 mv_pmp_select(link->ap, sata_srst_pmp(link)); 2761 return ata_sff_softreset(link, class, deadline); 2762 } 2763 2764 static int mv_hardreset(struct ata_link *link, unsigned int *class, 2765 unsigned long deadline) 2766 { 2767 struct ata_port *ap = link->ap; 2768 struct mv_host_priv *hpriv = ap->host->private_data; 2769 struct mv_port_priv *pp = ap->private_data; 2770 void __iomem *mmio = hpriv->base; 2771 int rc, attempts = 0, extra = 0; 2772 u32 sstatus; 2773 bool online; 2774 2775 mv_reset_channel(hpriv, mmio, ap->port_no); 2776 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; 2777 2778 /* Workaround for errata FEr SATA#10 (part 2) */ 2779 do { 2780 const unsigned long *timing = 2781 sata_ehc_deb_timing(&link->eh_context); 2782 2783 rc = sata_link_hardreset(link, timing, deadline + extra, 2784 &online, NULL); 2785 rc = online ? -EAGAIN : rc; 2786 if (rc) 2787 return rc; 2788 sata_scr_read(link, SCR_STATUS, &sstatus); 2789 if (!IS_GEN_I(hpriv) && ++attempts >= 5 && sstatus == 0x121) { 2790 /* Force 1.5gb/s link speed and try again */ 2791 mv_setup_ifcfg(mv_ap_base(ap), 0); 2792 if (time_after(jiffies + HZ, deadline)) 2793 extra = HZ; /* only extend it once, max */ 2794 } 2795 } while (sstatus != 0x0 && sstatus != 0x113 && sstatus != 0x123); 2796 2797 return rc; 2798 } 2799 2800 static void mv_eh_freeze(struct ata_port *ap) 2801 { 2802 mv_stop_edma(ap); 2803 mv_enable_port_irqs(ap, 0); 2804 } 2805 2806 static void mv_eh_thaw(struct ata_port *ap) 2807 { 2808 struct mv_host_priv *hpriv = ap->host->private_data; 2809 unsigned int port = ap->port_no; 2810 unsigned int hardport = mv_hardport_from_port(port); 2811 void __iomem *hc_mmio = mv_hc_base_from_port(hpriv->base, port); 2812 void __iomem *port_mmio = mv_ap_base(ap); 2813 u32 hc_irq_cause; 2814 2815 /* clear EDMA errors on this port */ 2816 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); 2817 2818 /* clear pending irq events */ 2819 hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport); 2820 writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS); 2821 2822 mv_enable_port_irqs(ap, ERR_IRQ); 2823 } 2824 2825 /** 2826 * mv_port_init - Perform some early initialization on a single port. 2827 * @port: libata data structure storing shadow register addresses 2828 * @port_mmio: base address of the port 2829 * 2830 * Initialize shadow register mmio addresses, clear outstanding 2831 * interrupts on the port, and unmask interrupts for the future 2832 * start of the port. 2833 * 2834 * LOCKING: 2835 * Inherited from caller. 2836 */ 2837 static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio) 2838 { 2839 void __iomem *shd_base = port_mmio + SHD_BLK_OFS; 2840 unsigned serr_ofs; 2841 2842 /* PIO related setup 2843 */ 2844 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA); 2845 port->error_addr = 2846 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR); 2847 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT); 2848 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL); 2849 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM); 2850 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH); 2851 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE); 2852 port->status_addr = 2853 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS); 2854 /* special case: control/altstatus doesn't have ATA_REG_ address */ 2855 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS; 2856 2857 /* unused: */ 2858 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL; 2859 2860 /* Clear any currently outstanding port interrupt conditions */ 2861 serr_ofs = mv_scr_offset(SCR_ERROR); 2862 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs); 2863 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); 2864 2865 /* unmask all non-transient EDMA error interrupts */ 2866 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS); 2867 2868 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n", 2869 readl(port_mmio + EDMA_CFG_OFS), 2870 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS), 2871 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS)); 2872 } 2873 2874 static unsigned int mv_in_pcix_mode(struct ata_host *host) 2875 { 2876 struct mv_host_priv *hpriv = host->private_data; 2877 void __iomem *mmio = hpriv->base; 2878 u32 reg; 2879 2880 if (IS_SOC(hpriv) || !IS_PCIE(hpriv)) 2881 return 0; /* not PCI-X capable */ 2882 reg = readl(mmio + MV_PCI_MODE_OFS); 2883 if ((reg & MV_PCI_MODE_MASK) == 0) 2884 return 0; /* conventional PCI mode */ 2885 return 1; /* chip is in PCI-X mode */ 2886 } 2887 2888 static int mv_pci_cut_through_okay(struct ata_host *host) 2889 { 2890 struct mv_host_priv *hpriv = host->private_data; 2891 void __iomem *mmio = hpriv->base; 2892 u32 reg; 2893 2894 if (!mv_in_pcix_mode(host)) { 2895 reg = readl(mmio + PCI_COMMAND_OFS); 2896 if (reg & PCI_COMMAND_MRDTRIG) 2897 return 0; /* not okay */ 2898 } 2899 return 1; /* okay */ 2900 } 2901 2902 static int mv_chip_id(struct ata_host *host, unsigned int board_idx) 2903 { 2904 struct pci_dev *pdev = to_pci_dev(host->dev); 2905 struct mv_host_priv *hpriv = host->private_data; 2906 u32 hp_flags = hpriv->hp_flags; 2907 2908 switch (board_idx) { 2909 case chip_5080: 2910 hpriv->ops = &mv5xxx_ops; 2911 hp_flags |= MV_HP_GEN_I; 2912 2913 switch (pdev->revision) { 2914 case 0x1: 2915 hp_flags |= MV_HP_ERRATA_50XXB0; 2916 break; 2917 case 0x3: 2918 hp_flags |= MV_HP_ERRATA_50XXB2; 2919 break; 2920 default: 2921 dev_printk(KERN_WARNING, &pdev->dev, 2922 "Applying 50XXB2 workarounds to unknown rev\n"); 2923 hp_flags |= MV_HP_ERRATA_50XXB2; 2924 break; 2925 } 2926 break; 2927 2928 case chip_504x: 2929 case chip_508x: 2930 hpriv->ops = &mv5xxx_ops; 2931 hp_flags |= MV_HP_GEN_I; 2932 2933 switch (pdev->revision) { 2934 case 0x0: 2935 hp_flags |= MV_HP_ERRATA_50XXB0; 2936 break; 2937 case 0x3: 2938 hp_flags |= MV_HP_ERRATA_50XXB2; 2939 break; 2940 default: 2941 dev_printk(KERN_WARNING, &pdev->dev, 2942 "Applying B2 workarounds to unknown rev\n"); 2943 hp_flags |= MV_HP_ERRATA_50XXB2; 2944 break; 2945 } 2946 break; 2947 2948 case chip_604x: 2949 case chip_608x: 2950 hpriv->ops = &mv6xxx_ops; 2951 hp_flags |= MV_HP_GEN_II; 2952 2953 switch (pdev->revision) { 2954 case 0x7: 2955 hp_flags |= MV_HP_ERRATA_60X1B2; 2956 break; 2957 case 0x9: 2958 hp_flags |= MV_HP_ERRATA_60X1C0; 2959 break; 2960 default: 2961 dev_printk(KERN_WARNING, &pdev->dev, 2962 "Applying B2 workarounds to unknown rev\n"); 2963 hp_flags |= MV_HP_ERRATA_60X1B2; 2964 break; 2965 } 2966 break; 2967 2968 case chip_7042: 2969 hp_flags |= MV_HP_PCIE | MV_HP_CUT_THROUGH; 2970 if (pdev->vendor == PCI_VENDOR_ID_TTI && 2971 (pdev->device == 0x2300 || pdev->device == 0x2310)) 2972 { 2973 /* 2974 * Highpoint RocketRAID PCIe 23xx series cards: 2975 * 2976 * Unconfigured drives are treated as "Legacy" 2977 * by the BIOS, and it overwrites sector 8 with 2978 * a "Lgcy" metadata block prior to Linux boot. 2979 * 2980 * Configured drives (RAID or JBOD) leave sector 8 2981 * alone, but instead overwrite a high numbered 2982 * sector for the RAID metadata. This sector can 2983 * be determined exactly, by truncating the physical 2984 * drive capacity to a nice even GB value. 2985 * 2986 * RAID metadata is at: (dev->n_sectors & ~0xfffff) 2987 * 2988 * Warn the user, lest they think we're just buggy. 2989 */ 2990 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID" 2991 " BIOS CORRUPTS DATA on all attached drives," 2992 " regardless of if/how they are configured." 2993 " BEWARE!\n"); 2994 printk(KERN_WARNING DRV_NAME ": For data safety, do not" 2995 " use sectors 8-9 on \"Legacy\" drives," 2996 " and avoid the final two gigabytes on" 2997 " all RocketRAID BIOS initialized drives.\n"); 2998 } 2999 /* drop through */ 3000 case chip_6042: 3001 hpriv->ops = &mv6xxx_ops; 3002 hp_flags |= MV_HP_GEN_IIE; 3003 if (board_idx == chip_6042 && mv_pci_cut_through_okay(host)) 3004 hp_flags |= MV_HP_CUT_THROUGH; 3005 3006 switch (pdev->revision) { 3007 case 0x2: /* Rev.B0: the first/only public release */ 3008 hp_flags |= MV_HP_ERRATA_60X1C0; 3009 break; 3010 default: 3011 dev_printk(KERN_WARNING, &pdev->dev, 3012 "Applying 60X1C0 workarounds to unknown rev\n"); 3013 hp_flags |= MV_HP_ERRATA_60X1C0; 3014 break; 3015 } 3016 break; 3017 case chip_soc: 3018 hpriv->ops = &mv_soc_ops; 3019 hp_flags |= MV_HP_FLAG_SOC | MV_HP_GEN_IIE | 3020 MV_HP_ERRATA_60X1C0; 3021 break; 3022 3023 default: 3024 dev_printk(KERN_ERR, host->dev, 3025 "BUG: invalid board index %u\n", board_idx); 3026 return 1; 3027 } 3028 3029 hpriv->hp_flags = hp_flags; 3030 if (hp_flags & MV_HP_PCIE) { 3031 hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS; 3032 hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS; 3033 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS; 3034 } else { 3035 hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS; 3036 hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS; 3037 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS; 3038 } 3039 3040 return 0; 3041 } 3042 3043 /** 3044 * mv_init_host - Perform some early initialization of the host. 3045 * @host: ATA host to initialize 3046 * @board_idx: controller index 3047 * 3048 * If possible, do an early global reset of the host. Then do 3049 * our port init and clear/unmask all/relevant host interrupts. 3050 * 3051 * LOCKING: 3052 * Inherited from caller. 3053 */ 3054 static int mv_init_host(struct ata_host *host, unsigned int board_idx) 3055 { 3056 int rc = 0, n_hc, port, hc; 3057 struct mv_host_priv *hpriv = host->private_data; 3058 void __iomem *mmio = hpriv->base; 3059 3060 rc = mv_chip_id(host, board_idx); 3061 if (rc) 3062 goto done; 3063 3064 if (IS_SOC(hpriv)) { 3065 hpriv->main_irq_cause_addr = mmio + SOC_HC_MAIN_IRQ_CAUSE_OFS; 3066 hpriv->main_irq_mask_addr = mmio + SOC_HC_MAIN_IRQ_MASK_OFS; 3067 } else { 3068 hpriv->main_irq_cause_addr = mmio + PCI_HC_MAIN_IRQ_CAUSE_OFS; 3069 hpriv->main_irq_mask_addr = mmio + PCI_HC_MAIN_IRQ_MASK_OFS; 3070 } 3071 3072 /* initialize shadow irq mask with register's value */ 3073 hpriv->main_irq_mask = readl(hpriv->main_irq_mask_addr); 3074 3075 /* global interrupt mask: 0 == mask everything */ 3076 mv_set_main_irq_mask(host, ~0, 0); 3077 3078 n_hc = mv_get_hc_count(host->ports[0]->flags); 3079 3080 for (port = 0; port < host->n_ports; port++) 3081 hpriv->ops->read_preamp(hpriv, port, mmio); 3082 3083 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc); 3084 if (rc) 3085 goto done; 3086 3087 hpriv->ops->reset_flash(hpriv, mmio); 3088 hpriv->ops->reset_bus(host, mmio); 3089 hpriv->ops->enable_leds(hpriv, mmio); 3090 3091 for (port = 0; port < host->n_ports; port++) { 3092 struct ata_port *ap = host->ports[port]; 3093 void __iomem *port_mmio = mv_port_base(mmio, port); 3094 3095 mv_port_init(&ap->ioaddr, port_mmio); 3096 3097 #ifdef CONFIG_PCI 3098 if (!IS_SOC(hpriv)) { 3099 unsigned int offset = port_mmio - mmio; 3100 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio"); 3101 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port"); 3102 } 3103 #endif 3104 } 3105 3106 for (hc = 0; hc < n_hc; hc++) { 3107 void __iomem *hc_mmio = mv_hc_base(mmio, hc); 3108 3109 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause " 3110 "(before clear)=0x%08x\n", hc, 3111 readl(hc_mmio + HC_CFG_OFS), 3112 readl(hc_mmio + HC_IRQ_CAUSE_OFS)); 3113 3114 /* Clear any currently outstanding hc interrupt conditions */ 3115 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS); 3116 } 3117 3118 /* Clear any currently outstanding host interrupt conditions */ 3119 writelfl(0, mmio + hpriv->irq_cause_ofs); 3120 3121 /* and unmask interrupt generation for host regs */ 3122 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs); 3123 3124 /* 3125 * enable only global host interrupts for now. 3126 * The per-port interrupts get done later as ports are set up. 3127 */ 3128 mv_set_main_irq_mask(host, 0, PCI_ERR); 3129 done: 3130 return rc; 3131 } 3132 3133 static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev) 3134 { 3135 hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ, 3136 MV_CRQB_Q_SZ, 0); 3137 if (!hpriv->crqb_pool) 3138 return -ENOMEM; 3139 3140 hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ, 3141 MV_CRPB_Q_SZ, 0); 3142 if (!hpriv->crpb_pool) 3143 return -ENOMEM; 3144 3145 hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ, 3146 MV_SG_TBL_SZ, 0); 3147 if (!hpriv->sg_tbl_pool) 3148 return -ENOMEM; 3149 3150 return 0; 3151 } 3152 3153 static void mv_conf_mbus_windows(struct mv_host_priv *hpriv, 3154 struct mbus_dram_target_info *dram) 3155 { 3156 int i; 3157 3158 for (i = 0; i < 4; i++) { 3159 writel(0, hpriv->base + WINDOW_CTRL(i)); 3160 writel(0, hpriv->base + WINDOW_BASE(i)); 3161 } 3162 3163 for (i = 0; i < dram->num_cs; i++) { 3164 struct mbus_dram_window *cs = dram->cs + i; 3165 3166 writel(((cs->size - 1) & 0xffff0000) | 3167 (cs->mbus_attr << 8) | 3168 (dram->mbus_dram_target_id << 4) | 1, 3169 hpriv->base + WINDOW_CTRL(i)); 3170 writel(cs->base, hpriv->base + WINDOW_BASE(i)); 3171 } 3172 } 3173 3174 /** 3175 * mv_platform_probe - handle a positive probe of an soc Marvell 3176 * host 3177 * @pdev: platform device found 3178 * 3179 * LOCKING: 3180 * Inherited from caller. 3181 */ 3182 static int mv_platform_probe(struct platform_device *pdev) 3183 { 3184 static int printed_version; 3185 const struct mv_sata_platform_data *mv_platform_data; 3186 const struct ata_port_info *ppi[] = 3187 { &mv_port_info[chip_soc], NULL }; 3188 struct ata_host *host; 3189 struct mv_host_priv *hpriv; 3190 struct resource *res; 3191 int n_ports, rc; 3192 3193 if (!printed_version++) 3194 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n"); 3195 3196 /* 3197 * Simple resource validation .. 3198 */ 3199 if (unlikely(pdev->num_resources != 2)) { 3200 dev_err(&pdev->dev, "invalid number of resources\n"); 3201 return -EINVAL; 3202 } 3203 3204 /* 3205 * Get the register base first 3206 */ 3207 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 3208 if (res == NULL) 3209 return -EINVAL; 3210 3211 /* allocate host */ 3212 mv_platform_data = pdev->dev.platform_data; 3213 n_ports = mv_platform_data->n_ports; 3214 3215 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports); 3216 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL); 3217 3218 if (!host || !hpriv) 3219 return -ENOMEM; 3220 host->private_data = hpriv; 3221 hpriv->n_ports = n_ports; 3222 3223 host->iomap = NULL; 3224 hpriv->base = devm_ioremap(&pdev->dev, res->start, 3225 res->end - res->start + 1); 3226 hpriv->base -= MV_SATAHC0_REG_BASE; 3227 3228 /* 3229 * (Re-)program MBUS remapping windows if we are asked to. 3230 */ 3231 if (mv_platform_data->dram != NULL) 3232 mv_conf_mbus_windows(hpriv, mv_platform_data->dram); 3233 3234 rc = mv_create_dma_pools(hpriv, &pdev->dev); 3235 if (rc) 3236 return rc; 3237 3238 /* initialize adapter */ 3239 rc = mv_init_host(host, chip_soc); 3240 if (rc) 3241 return rc; 3242 3243 dev_printk(KERN_INFO, &pdev->dev, 3244 "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH, 3245 host->n_ports); 3246 3247 return ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt, 3248 IRQF_SHARED, &mv6_sht); 3249 } 3250 3251 /* 3252 * 3253 * mv_platform_remove - unplug a platform interface 3254 * @pdev: platform device 3255 * 3256 * A platform bus SATA device has been unplugged. Perform the needed 3257 * cleanup. Also called on module unload for any active devices. 3258 */ 3259 static int __devexit mv_platform_remove(struct platform_device *pdev) 3260 { 3261 struct device *dev = &pdev->dev; 3262 struct ata_host *host = dev_get_drvdata(dev); 3263 3264 ata_host_detach(host); 3265 return 0; 3266 } 3267 3268 static struct platform_driver mv_platform_driver = { 3269 .probe = mv_platform_probe, 3270 .remove = __devexit_p(mv_platform_remove), 3271 .driver = { 3272 .name = DRV_NAME, 3273 .owner = THIS_MODULE, 3274 }, 3275 }; 3276 3277 3278 #ifdef CONFIG_PCI 3279 static int mv_pci_init_one(struct pci_dev *pdev, 3280 const struct pci_device_id *ent); 3281 3282 3283 static struct pci_driver mv_pci_driver = { 3284 .name = DRV_NAME, 3285 .id_table = mv_pci_tbl, 3286 .probe = mv_pci_init_one, 3287 .remove = ata_pci_remove_one, 3288 }; 3289 3290 /* 3291 * module options 3292 */ 3293 static int msi; /* Use PCI msi; either zero (off, default) or non-zero */ 3294 3295 3296 /* move to PCI layer or libata core? */ 3297 static int pci_go_64(struct pci_dev *pdev) 3298 { 3299 int rc; 3300 3301 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) { 3302 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); 3303 if (rc) { 3304 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); 3305 if (rc) { 3306 dev_printk(KERN_ERR, &pdev->dev, 3307 "64-bit DMA enable failed\n"); 3308 return rc; 3309 } 3310 } 3311 } else { 3312 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK); 3313 if (rc) { 3314 dev_printk(KERN_ERR, &pdev->dev, 3315 "32-bit DMA enable failed\n"); 3316 return rc; 3317 } 3318 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); 3319 if (rc) { 3320 dev_printk(KERN_ERR, &pdev->dev, 3321 "32-bit consistent DMA enable failed\n"); 3322 return rc; 3323 } 3324 } 3325 3326 return rc; 3327 } 3328 3329 /** 3330 * mv_print_info - Dump key info to kernel log for perusal. 3331 * @host: ATA host to print info about 3332 * 3333 * FIXME: complete this. 3334 * 3335 * LOCKING: 3336 * Inherited from caller. 3337 */ 3338 static void mv_print_info(struct ata_host *host) 3339 { 3340 struct pci_dev *pdev = to_pci_dev(host->dev); 3341 struct mv_host_priv *hpriv = host->private_data; 3342 u8 scc; 3343 const char *scc_s, *gen; 3344 3345 /* Use this to determine the HW stepping of the chip so we know 3346 * what errata to workaround 3347 */ 3348 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc); 3349 if (scc == 0) 3350 scc_s = "SCSI"; 3351 else if (scc == 0x01) 3352 scc_s = "RAID"; 3353 else 3354 scc_s = "?"; 3355 3356 if (IS_GEN_I(hpriv)) 3357 gen = "I"; 3358 else if (IS_GEN_II(hpriv)) 3359 gen = "II"; 3360 else if (IS_GEN_IIE(hpriv)) 3361 gen = "IIE"; 3362 else 3363 gen = "?"; 3364 3365 dev_printk(KERN_INFO, &pdev->dev, 3366 "Gen-%s %u slots %u ports %s mode IRQ via %s\n", 3367 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports, 3368 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx"); 3369 } 3370 3371 /** 3372 * mv_pci_init_one - handle a positive probe of a PCI Marvell host 3373 * @pdev: PCI device found 3374 * @ent: PCI device ID entry for the matched host 3375 * 3376 * LOCKING: 3377 * Inherited from caller. 3378 */ 3379 static int mv_pci_init_one(struct pci_dev *pdev, 3380 const struct pci_device_id *ent) 3381 { 3382 static int printed_version; 3383 unsigned int board_idx = (unsigned int)ent->driver_data; 3384 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL }; 3385 struct ata_host *host; 3386 struct mv_host_priv *hpriv; 3387 int n_ports, rc; 3388 3389 if (!printed_version++) 3390 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n"); 3391 3392 /* allocate host */ 3393 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC; 3394 3395 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports); 3396 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL); 3397 if (!host || !hpriv) 3398 return -ENOMEM; 3399 host->private_data = hpriv; 3400 hpriv->n_ports = n_ports; 3401 3402 /* acquire resources */ 3403 rc = pcim_enable_device(pdev); 3404 if (rc) 3405 return rc; 3406 3407 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME); 3408 if (rc == -EBUSY) 3409 pcim_pin_device(pdev); 3410 if (rc) 3411 return rc; 3412 host->iomap = pcim_iomap_table(pdev); 3413 hpriv->base = host->iomap[MV_PRIMARY_BAR]; 3414 3415 rc = pci_go_64(pdev); 3416 if (rc) 3417 return rc; 3418 3419 rc = mv_create_dma_pools(hpriv, &pdev->dev); 3420 if (rc) 3421 return rc; 3422 3423 /* initialize adapter */ 3424 rc = mv_init_host(host, board_idx); 3425 if (rc) 3426 return rc; 3427 3428 /* Enable message-switched interrupts, if requested */ 3429 if (msi && pci_enable_msi(pdev) == 0) 3430 hpriv->hp_flags |= MV_HP_FLAG_MSI; 3431 3432 mv_dump_pci_cfg(pdev, 0x68); 3433 mv_print_info(host); 3434 3435 pci_set_master(pdev); 3436 pci_try_set_mwi(pdev); 3437 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED, 3438 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht); 3439 } 3440 #endif 3441 3442 static int mv_platform_probe(struct platform_device *pdev); 3443 static int __devexit mv_platform_remove(struct platform_device *pdev); 3444 3445 static int __init mv_init(void) 3446 { 3447 int rc = -ENODEV; 3448 #ifdef CONFIG_PCI 3449 rc = pci_register_driver(&mv_pci_driver); 3450 if (rc < 0) 3451 return rc; 3452 #endif 3453 rc = platform_driver_register(&mv_platform_driver); 3454 3455 #ifdef CONFIG_PCI 3456 if (rc < 0) 3457 pci_unregister_driver(&mv_pci_driver); 3458 #endif 3459 return rc; 3460 } 3461 3462 static void __exit mv_exit(void) 3463 { 3464 #ifdef CONFIG_PCI 3465 pci_unregister_driver(&mv_pci_driver); 3466 #endif 3467 platform_driver_unregister(&mv_platform_driver); 3468 } 3469 3470 MODULE_AUTHOR("Brett Russ"); 3471 MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers"); 3472 MODULE_LICENSE("GPL"); 3473 MODULE_DEVICE_TABLE(pci, mv_pci_tbl); 3474 MODULE_VERSION(DRV_VERSION); 3475 MODULE_ALIAS("platform:" DRV_NAME); 3476 3477 #ifdef CONFIG_PCI 3478 module_param(msi, int, 0444); 3479 MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)"); 3480 #endif 3481 3482 module_init(mv_init); 3483 module_exit(mv_exit); 3484