1 /* 2 * sata_mv.c - Marvell SATA support 3 * 4 * Copyright 2008: Marvell Corporation, all rights reserved. 5 * Copyright 2005: EMC Corporation, all rights reserved. 6 * Copyright 2005 Red Hat, Inc. All rights reserved. 7 * 8 * Please ALWAYS copy linux-ide@vger.kernel.org on emails. 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License as published by 12 * the Free Software Foundation; version 2 of the License. 13 * 14 * This program is distributed in the hope that it will be useful, 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 * GNU General Public License for more details. 18 * 19 * You should have received a copy of the GNU General Public License 20 * along with this program; if not, write to the Free Software 21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 22 * 23 */ 24 25 /* 26 * sata_mv TODO list: 27 * 28 * --> Errata workaround for NCQ device errors. 29 * 30 * --> More errata workarounds for PCI-X. 31 * 32 * --> Complete a full errata audit for all chipsets to identify others. 33 * 34 * --> ATAPI support (Marvell claims the 60xx/70xx chips can do it). 35 * 36 * --> Develop a low-power-consumption strategy, and implement it. 37 * 38 * --> [Experiment, low priority] Investigate interrupt coalescing. 39 * Quite often, especially with PCI Message Signalled Interrupts (MSI), 40 * the overhead reduced by interrupt mitigation is quite often not 41 * worth the latency cost. 42 * 43 * --> [Experiment, Marvell value added] Is it possible to use target 44 * mode to cross-connect two Linux boxes with Marvell cards? If so, 45 * creating LibATA target mode support would be very interesting. 46 * 47 * Target mode, for those without docs, is the ability to directly 48 * connect two SATA ports. 49 */ 50 51 #include <linux/kernel.h> 52 #include <linux/module.h> 53 #include <linux/pci.h> 54 #include <linux/init.h> 55 #include <linux/blkdev.h> 56 #include <linux/delay.h> 57 #include <linux/interrupt.h> 58 #include <linux/dmapool.h> 59 #include <linux/dma-mapping.h> 60 #include <linux/device.h> 61 #include <linux/platform_device.h> 62 #include <linux/ata_platform.h> 63 #include <linux/mbus.h> 64 #include <linux/bitops.h> 65 #include <scsi/scsi_host.h> 66 #include <scsi/scsi_cmnd.h> 67 #include <scsi/scsi_device.h> 68 #include <linux/libata.h> 69 70 #define DRV_NAME "sata_mv" 71 #define DRV_VERSION "1.25" 72 73 enum { 74 /* BAR's are enumerated in terms of pci_resource_start() terms */ 75 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */ 76 MV_IO_BAR = 2, /* offset 0x18: IO space */ 77 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */ 78 79 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */ 80 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */ 81 82 MV_PCI_REG_BASE = 0, 83 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */ 84 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08), 85 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88), 86 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c), 87 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc), 88 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0), 89 90 MV_SATAHC0_REG_BASE = 0x20000, 91 MV_FLASH_CTL_OFS = 0x1046c, 92 MV_GPIO_PORT_CTL_OFS = 0x104f0, 93 MV_RESET_CFG_OFS = 0x180d8, 94 95 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ, 96 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ, 97 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */ 98 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ, 99 100 MV_MAX_Q_DEPTH = 32, 101 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1, 102 103 /* CRQB needs alignment on a 1KB boundary. Size == 1KB 104 * CRPB needs alignment on a 256B boundary. Size == 256B 105 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B 106 */ 107 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH), 108 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH), 109 MV_MAX_SG_CT = 256, 110 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT), 111 112 /* Determine hc from 0-7 port: hc = port >> MV_PORT_HC_SHIFT */ 113 MV_PORT_HC_SHIFT = 2, 114 MV_PORTS_PER_HC = (1 << MV_PORT_HC_SHIFT), /* 4 */ 115 /* Determine hc port from 0-7 port: hardport = port & MV_PORT_MASK */ 116 MV_PORT_MASK = (MV_PORTS_PER_HC - 1), /* 3 */ 117 118 /* Host Flags */ 119 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */ 120 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */ 121 122 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 123 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI | 124 ATA_FLAG_PIO_POLLING, 125 126 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE, 127 128 MV_GENIIE_FLAGS = MV_COMMON_FLAGS | MV_6XXX_FLAGS | 129 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA | 130 ATA_FLAG_NCQ | ATA_FLAG_AN, 131 132 CRQB_FLAG_READ = (1 << 0), 133 CRQB_TAG_SHIFT = 1, 134 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */ 135 CRQB_PMP_SHIFT = 12, /* CRQB Gen-II/IIE PMP shift */ 136 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */ 137 CRQB_CMD_ADDR_SHIFT = 8, 138 CRQB_CMD_CS = (0x2 << 11), 139 CRQB_CMD_LAST = (1 << 15), 140 141 CRPB_FLAG_STATUS_SHIFT = 8, 142 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */ 143 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */ 144 145 EPRD_FLAG_END_OF_TBL = (1 << 31), 146 147 /* PCI interface registers */ 148 149 PCI_COMMAND_OFS = 0xc00, 150 PCI_COMMAND_MRDTRIG = (1 << 7), /* PCI Master Read Trigger */ 151 152 PCI_MAIN_CMD_STS_OFS = 0xd30, 153 STOP_PCI_MASTER = (1 << 2), 154 PCI_MASTER_EMPTY = (1 << 3), 155 GLOB_SFT_RST = (1 << 4), 156 157 MV_PCI_MODE_OFS = 0xd00, 158 MV_PCI_MODE_MASK = 0x30, 159 160 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c, 161 MV_PCI_DISC_TIMER = 0xd04, 162 MV_PCI_MSI_TRIGGER = 0xc38, 163 MV_PCI_SERR_MASK = 0xc28, 164 MV_PCI_XBAR_TMOUT_OFS = 0x1d04, 165 MV_PCI_ERR_LOW_ADDRESS = 0x1d40, 166 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44, 167 MV_PCI_ERR_ATTRIBUTE = 0x1d48, 168 MV_PCI_ERR_COMMAND = 0x1d50, 169 170 PCI_IRQ_CAUSE_OFS = 0x1d58, 171 PCI_IRQ_MASK_OFS = 0x1d5c, 172 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */ 173 174 PCIE_IRQ_CAUSE_OFS = 0x1900, 175 PCIE_IRQ_MASK_OFS = 0x1910, 176 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */ 177 178 /* Host Controller Main Interrupt Cause/Mask registers (1 per-chip) */ 179 PCI_HC_MAIN_IRQ_CAUSE_OFS = 0x1d60, 180 PCI_HC_MAIN_IRQ_MASK_OFS = 0x1d64, 181 SOC_HC_MAIN_IRQ_CAUSE_OFS = 0x20020, 182 SOC_HC_MAIN_IRQ_MASK_OFS = 0x20024, 183 ERR_IRQ = (1 << 0), /* shift by port # */ 184 DONE_IRQ = (1 << 1), /* shift by port # */ 185 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */ 186 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */ 187 PCI_ERR = (1 << 18), 188 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */ 189 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */ 190 PORTS_0_3_COAL_DONE = (1 << 8), 191 PORTS_4_7_COAL_DONE = (1 << 17), 192 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */ 193 GPIO_INT = (1 << 22), 194 SELF_INT = (1 << 23), 195 TWSI_INT = (1 << 24), 196 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */ 197 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */ 198 HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */ 199 200 /* SATAHC registers */ 201 HC_CFG_OFS = 0, 202 203 HC_IRQ_CAUSE_OFS = 0x14, 204 DMA_IRQ = (1 << 0), /* shift by port # */ 205 HC_COAL_IRQ = (1 << 4), /* IRQ coalescing */ 206 DEV_IRQ = (1 << 8), /* shift by port # */ 207 208 /* Shadow block registers */ 209 SHD_BLK_OFS = 0x100, 210 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */ 211 212 /* SATA registers */ 213 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */ 214 SATA_ACTIVE_OFS = 0x350, 215 SATA_FIS_IRQ_CAUSE_OFS = 0x364, 216 SATA_FIS_IRQ_AN = (1 << 9), /* async notification */ 217 218 LTMODE_OFS = 0x30c, 219 LTMODE_BIT8 = (1 << 8), /* unknown, but necessary */ 220 221 PHY_MODE3 = 0x310, 222 PHY_MODE4 = 0x314, 223 PHY_MODE4_CFG_MASK = 0x00000003, /* phy internal config field */ 224 PHY_MODE4_CFG_VALUE = 0x00000001, /* phy internal config field */ 225 PHY_MODE4_RSVD_ZEROS = 0x5de3fffa, /* Gen2e always write zeros */ 226 PHY_MODE4_RSVD_ONES = 0x00000005, /* Gen2e always write ones */ 227 228 PHY_MODE2 = 0x330, 229 SATA_IFCTL_OFS = 0x344, 230 SATA_TESTCTL_OFS = 0x348, 231 SATA_IFSTAT_OFS = 0x34c, 232 VENDOR_UNIQUE_FIS_OFS = 0x35c, 233 234 FISCFG_OFS = 0x360, 235 FISCFG_WAIT_DEV_ERR = (1 << 8), /* wait for host on DevErr */ 236 FISCFG_SINGLE_SYNC = (1 << 16), /* SYNC on DMA activation */ 237 238 MV5_PHY_MODE = 0x74, 239 MV5_LTMODE_OFS = 0x30, 240 MV5_PHY_CTL_OFS = 0x0C, 241 SATA_INTERFACE_CFG_OFS = 0x050, 242 243 MV_M2_PREAMP_MASK = 0x7e0, 244 245 /* Port registers */ 246 EDMA_CFG_OFS = 0, 247 EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */ 248 EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */ 249 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */ 250 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */ 251 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */ 252 EDMA_CFG_EDMA_FBS = (1 << 16), /* EDMA FIS-Based Switching */ 253 EDMA_CFG_FBS = (1 << 26), /* FIS-Based Switching */ 254 255 EDMA_ERR_IRQ_CAUSE_OFS = 0x8, 256 EDMA_ERR_IRQ_MASK_OFS = 0xc, 257 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */ 258 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */ 259 EDMA_ERR_DEV = (1 << 2), /* device error */ 260 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */ 261 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */ 262 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */ 263 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */ 264 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */ 265 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */ 266 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */ 267 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */ 268 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */ 269 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */ 270 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */ 271 272 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */ 273 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */ 274 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */ 275 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */ 276 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */ 277 278 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */ 279 280 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */ 281 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */ 282 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */ 283 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */ 284 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */ 285 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */ 286 287 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */ 288 289 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */ 290 EDMA_ERR_OVERRUN_5 = (1 << 5), 291 EDMA_ERR_UNDERRUN_5 = (1 << 6), 292 293 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 | 294 EDMA_ERR_LNK_CTRL_RX_1 | 295 EDMA_ERR_LNK_CTRL_RX_3 | 296 EDMA_ERR_LNK_CTRL_TX, 297 298 EDMA_EH_FREEZE = EDMA_ERR_D_PAR | 299 EDMA_ERR_PRD_PAR | 300 EDMA_ERR_DEV_DCON | 301 EDMA_ERR_DEV_CON | 302 EDMA_ERR_SERR | 303 EDMA_ERR_SELF_DIS | 304 EDMA_ERR_CRQB_PAR | 305 EDMA_ERR_CRPB_PAR | 306 EDMA_ERR_INTRL_PAR | 307 EDMA_ERR_IORDY | 308 EDMA_ERR_LNK_CTRL_RX_2 | 309 EDMA_ERR_LNK_DATA_RX | 310 EDMA_ERR_LNK_DATA_TX | 311 EDMA_ERR_TRANS_PROTO, 312 313 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR | 314 EDMA_ERR_PRD_PAR | 315 EDMA_ERR_DEV_DCON | 316 EDMA_ERR_DEV_CON | 317 EDMA_ERR_OVERRUN_5 | 318 EDMA_ERR_UNDERRUN_5 | 319 EDMA_ERR_SELF_DIS_5 | 320 EDMA_ERR_CRQB_PAR | 321 EDMA_ERR_CRPB_PAR | 322 EDMA_ERR_INTRL_PAR | 323 EDMA_ERR_IORDY, 324 325 EDMA_REQ_Q_BASE_HI_OFS = 0x10, 326 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */ 327 328 EDMA_REQ_Q_OUT_PTR_OFS = 0x18, 329 EDMA_REQ_Q_PTR_SHIFT = 5, 330 331 EDMA_RSP_Q_BASE_HI_OFS = 0x1c, 332 EDMA_RSP_Q_IN_PTR_OFS = 0x20, 333 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */ 334 EDMA_RSP_Q_PTR_SHIFT = 3, 335 336 EDMA_CMD_OFS = 0x28, /* EDMA command register */ 337 EDMA_EN = (1 << 0), /* enable EDMA */ 338 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */ 339 EDMA_RESET = (1 << 2), /* reset eng/trans/link/phy */ 340 341 EDMA_STATUS_OFS = 0x30, /* EDMA engine status */ 342 EDMA_STATUS_CACHE_EMPTY = (1 << 6), /* GenIIe command cache empty */ 343 EDMA_STATUS_IDLE = (1 << 7), /* GenIIe EDMA enabled/idle */ 344 345 EDMA_IORDY_TMOUT_OFS = 0x34, 346 EDMA_ARB_CFG_OFS = 0x38, 347 348 EDMA_HALTCOND_OFS = 0x60, /* GenIIe halt conditions */ 349 350 /* Host private flags (hp_flags) */ 351 MV_HP_FLAG_MSI = (1 << 0), 352 MV_HP_ERRATA_50XXB0 = (1 << 1), 353 MV_HP_ERRATA_50XXB2 = (1 << 2), 354 MV_HP_ERRATA_60X1B2 = (1 << 3), 355 MV_HP_ERRATA_60X1C0 = (1 << 4), 356 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */ 357 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */ 358 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */ 359 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */ 360 MV_HP_CUT_THROUGH = (1 << 10), /* can use EDMA cut-through */ 361 MV_HP_FLAG_SOC = (1 << 11), /* SystemOnChip, no PCI */ 362 363 /* Port private flags (pp_flags) */ 364 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */ 365 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */ 366 MV_PP_FLAG_FBS_EN = (1 << 2), /* is EDMA set up for FBS? */ 367 MV_PP_FLAG_DELAYED_EH = (1 << 3), /* delayed dev err handling */ 368 }; 369 370 #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I) 371 #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II) 372 #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE) 373 #define IS_PCIE(hpriv) ((hpriv)->hp_flags & MV_HP_PCIE) 374 #define IS_SOC(hpriv) ((hpriv)->hp_flags & MV_HP_FLAG_SOC) 375 376 #define WINDOW_CTRL(i) (0x20030 + ((i) << 4)) 377 #define WINDOW_BASE(i) (0x20034 + ((i) << 4)) 378 379 enum { 380 /* DMA boundary 0xffff is required by the s/g splitting 381 * we need on /length/ in mv_fill-sg(). 382 */ 383 MV_DMA_BOUNDARY = 0xffffU, 384 385 /* mask of register bits containing lower 32 bits 386 * of EDMA request queue DMA address 387 */ 388 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U, 389 390 /* ditto, for response queue */ 391 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U, 392 }; 393 394 enum chip_type { 395 chip_504x, 396 chip_508x, 397 chip_5080, 398 chip_604x, 399 chip_608x, 400 chip_6042, 401 chip_7042, 402 chip_soc, 403 }; 404 405 /* Command ReQuest Block: 32B */ 406 struct mv_crqb { 407 __le32 sg_addr; 408 __le32 sg_addr_hi; 409 __le16 ctrl_flags; 410 __le16 ata_cmd[11]; 411 }; 412 413 struct mv_crqb_iie { 414 __le32 addr; 415 __le32 addr_hi; 416 __le32 flags; 417 __le32 len; 418 __le32 ata_cmd[4]; 419 }; 420 421 /* Command ResPonse Block: 8B */ 422 struct mv_crpb { 423 __le16 id; 424 __le16 flags; 425 __le32 tmstmp; 426 }; 427 428 /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */ 429 struct mv_sg { 430 __le32 addr; 431 __le32 flags_size; 432 __le32 addr_hi; 433 __le32 reserved; 434 }; 435 436 struct mv_port_priv { 437 struct mv_crqb *crqb; 438 dma_addr_t crqb_dma; 439 struct mv_crpb *crpb; 440 dma_addr_t crpb_dma; 441 struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH]; 442 dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH]; 443 444 unsigned int req_idx; 445 unsigned int resp_idx; 446 447 u32 pp_flags; 448 unsigned int delayed_eh_pmp_map; 449 }; 450 451 struct mv_port_signal { 452 u32 amps; 453 u32 pre; 454 }; 455 456 struct mv_host_priv { 457 u32 hp_flags; 458 u32 main_irq_mask; 459 struct mv_port_signal signal[8]; 460 const struct mv_hw_ops *ops; 461 int n_ports; 462 void __iomem *base; 463 void __iomem *main_irq_cause_addr; 464 void __iomem *main_irq_mask_addr; 465 u32 irq_cause_ofs; 466 u32 irq_mask_ofs; 467 u32 unmask_all_irqs; 468 /* 469 * These consistent DMA memory pools give us guaranteed 470 * alignment for hardware-accessed data structures, 471 * and less memory waste in accomplishing the alignment. 472 */ 473 struct dma_pool *crqb_pool; 474 struct dma_pool *crpb_pool; 475 struct dma_pool *sg_tbl_pool; 476 }; 477 478 struct mv_hw_ops { 479 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio, 480 unsigned int port); 481 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio); 482 void (*read_preamp)(struct mv_host_priv *hpriv, int idx, 483 void __iomem *mmio); 484 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio, 485 unsigned int n_hc); 486 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio); 487 void (*reset_bus)(struct ata_host *host, void __iomem *mmio); 488 }; 489 490 static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val); 491 static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val); 492 static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val); 493 static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val); 494 static int mv_port_start(struct ata_port *ap); 495 static void mv_port_stop(struct ata_port *ap); 496 static int mv_qc_defer(struct ata_queued_cmd *qc); 497 static void mv_qc_prep(struct ata_queued_cmd *qc); 498 static void mv_qc_prep_iie(struct ata_queued_cmd *qc); 499 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc); 500 static int mv_hardreset(struct ata_link *link, unsigned int *class, 501 unsigned long deadline); 502 static void mv_eh_freeze(struct ata_port *ap); 503 static void mv_eh_thaw(struct ata_port *ap); 504 static void mv6_dev_config(struct ata_device *dev); 505 506 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, 507 unsigned int port); 508 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio); 509 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx, 510 void __iomem *mmio); 511 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio, 512 unsigned int n_hc); 513 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio); 514 static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio); 515 516 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, 517 unsigned int port); 518 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio); 519 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx, 520 void __iomem *mmio); 521 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio, 522 unsigned int n_hc); 523 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio); 524 static void mv_soc_enable_leds(struct mv_host_priv *hpriv, 525 void __iomem *mmio); 526 static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx, 527 void __iomem *mmio); 528 static int mv_soc_reset_hc(struct mv_host_priv *hpriv, 529 void __iomem *mmio, unsigned int n_hc); 530 static void mv_soc_reset_flash(struct mv_host_priv *hpriv, 531 void __iomem *mmio); 532 static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio); 533 static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio); 534 static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio, 535 unsigned int port_no); 536 static int mv_stop_edma(struct ata_port *ap); 537 static int mv_stop_edma_engine(void __iomem *port_mmio); 538 static void mv_edma_cfg(struct ata_port *ap, int want_ncq); 539 540 static void mv_pmp_select(struct ata_port *ap, int pmp); 541 static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class, 542 unsigned long deadline); 543 static int mv_softreset(struct ata_link *link, unsigned int *class, 544 unsigned long deadline); 545 static void mv_pmp_error_handler(struct ata_port *ap); 546 static void mv_process_crpb_entries(struct ata_port *ap, 547 struct mv_port_priv *pp); 548 549 /* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below 550 * because we have to allow room for worst case splitting of 551 * PRDs for 64K boundaries in mv_fill_sg(). 552 */ 553 static struct scsi_host_template mv5_sht = { 554 ATA_BASE_SHT(DRV_NAME), 555 .sg_tablesize = MV_MAX_SG_CT / 2, 556 .dma_boundary = MV_DMA_BOUNDARY, 557 }; 558 559 static struct scsi_host_template mv6_sht = { 560 ATA_NCQ_SHT(DRV_NAME), 561 .can_queue = MV_MAX_Q_DEPTH - 1, 562 .sg_tablesize = MV_MAX_SG_CT / 2, 563 .dma_boundary = MV_DMA_BOUNDARY, 564 }; 565 566 static struct ata_port_operations mv5_ops = { 567 .inherits = &ata_sff_port_ops, 568 569 .qc_defer = mv_qc_defer, 570 .qc_prep = mv_qc_prep, 571 .qc_issue = mv_qc_issue, 572 573 .freeze = mv_eh_freeze, 574 .thaw = mv_eh_thaw, 575 .hardreset = mv_hardreset, 576 .error_handler = ata_std_error_handler, /* avoid SFF EH */ 577 .post_internal_cmd = ATA_OP_NULL, 578 579 .scr_read = mv5_scr_read, 580 .scr_write = mv5_scr_write, 581 582 .port_start = mv_port_start, 583 .port_stop = mv_port_stop, 584 }; 585 586 static struct ata_port_operations mv6_ops = { 587 .inherits = &mv5_ops, 588 .dev_config = mv6_dev_config, 589 .scr_read = mv_scr_read, 590 .scr_write = mv_scr_write, 591 592 .pmp_hardreset = mv_pmp_hardreset, 593 .pmp_softreset = mv_softreset, 594 .softreset = mv_softreset, 595 .error_handler = mv_pmp_error_handler, 596 }; 597 598 static struct ata_port_operations mv_iie_ops = { 599 .inherits = &mv6_ops, 600 .dev_config = ATA_OP_NULL, 601 .qc_prep = mv_qc_prep_iie, 602 }; 603 604 static const struct ata_port_info mv_port_info[] = { 605 { /* chip_504x */ 606 .flags = MV_COMMON_FLAGS, 607 .pio_mask = 0x1f, /* pio0-4 */ 608 .udma_mask = ATA_UDMA6, 609 .port_ops = &mv5_ops, 610 }, 611 { /* chip_508x */ 612 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC, 613 .pio_mask = 0x1f, /* pio0-4 */ 614 .udma_mask = ATA_UDMA6, 615 .port_ops = &mv5_ops, 616 }, 617 { /* chip_5080 */ 618 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC, 619 .pio_mask = 0x1f, /* pio0-4 */ 620 .udma_mask = ATA_UDMA6, 621 .port_ops = &mv5_ops, 622 }, 623 { /* chip_604x */ 624 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS | 625 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA | 626 ATA_FLAG_NCQ, 627 .pio_mask = 0x1f, /* pio0-4 */ 628 .udma_mask = ATA_UDMA6, 629 .port_ops = &mv6_ops, 630 }, 631 { /* chip_608x */ 632 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS | 633 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA | 634 ATA_FLAG_NCQ | MV_FLAG_DUAL_HC, 635 .pio_mask = 0x1f, /* pio0-4 */ 636 .udma_mask = ATA_UDMA6, 637 .port_ops = &mv6_ops, 638 }, 639 { /* chip_6042 */ 640 .flags = MV_GENIIE_FLAGS, 641 .pio_mask = 0x1f, /* pio0-4 */ 642 .udma_mask = ATA_UDMA6, 643 .port_ops = &mv_iie_ops, 644 }, 645 { /* chip_7042 */ 646 .flags = MV_GENIIE_FLAGS, 647 .pio_mask = 0x1f, /* pio0-4 */ 648 .udma_mask = ATA_UDMA6, 649 .port_ops = &mv_iie_ops, 650 }, 651 { /* chip_soc */ 652 .flags = MV_GENIIE_FLAGS, 653 .pio_mask = 0x1f, /* pio0-4 */ 654 .udma_mask = ATA_UDMA6, 655 .port_ops = &mv_iie_ops, 656 }, 657 }; 658 659 static const struct pci_device_id mv_pci_tbl[] = { 660 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x }, 661 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x }, 662 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 }, 663 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x }, 664 /* RocketRAID 1720/174x have different identifiers */ 665 { PCI_VDEVICE(TTI, 0x1720), chip_6042 }, 666 { PCI_VDEVICE(TTI, 0x1740), chip_6042 }, 667 { PCI_VDEVICE(TTI, 0x1742), chip_6042 }, 668 669 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x }, 670 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x }, 671 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 }, 672 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x }, 673 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x }, 674 675 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x }, 676 677 /* Adaptec 1430SA */ 678 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 }, 679 680 /* Marvell 7042 support */ 681 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 }, 682 683 /* Highpoint RocketRAID PCIe series */ 684 { PCI_VDEVICE(TTI, 0x2300), chip_7042 }, 685 { PCI_VDEVICE(TTI, 0x2310), chip_7042 }, 686 687 { } /* terminate list */ 688 }; 689 690 static const struct mv_hw_ops mv5xxx_ops = { 691 .phy_errata = mv5_phy_errata, 692 .enable_leds = mv5_enable_leds, 693 .read_preamp = mv5_read_preamp, 694 .reset_hc = mv5_reset_hc, 695 .reset_flash = mv5_reset_flash, 696 .reset_bus = mv5_reset_bus, 697 }; 698 699 static const struct mv_hw_ops mv6xxx_ops = { 700 .phy_errata = mv6_phy_errata, 701 .enable_leds = mv6_enable_leds, 702 .read_preamp = mv6_read_preamp, 703 .reset_hc = mv6_reset_hc, 704 .reset_flash = mv6_reset_flash, 705 .reset_bus = mv_reset_pci_bus, 706 }; 707 708 static const struct mv_hw_ops mv_soc_ops = { 709 .phy_errata = mv6_phy_errata, 710 .enable_leds = mv_soc_enable_leds, 711 .read_preamp = mv_soc_read_preamp, 712 .reset_hc = mv_soc_reset_hc, 713 .reset_flash = mv_soc_reset_flash, 714 .reset_bus = mv_soc_reset_bus, 715 }; 716 717 /* 718 * Functions 719 */ 720 721 static inline void writelfl(unsigned long data, void __iomem *addr) 722 { 723 writel(data, addr); 724 (void) readl(addr); /* flush to avoid PCI posted write */ 725 } 726 727 static inline unsigned int mv_hc_from_port(unsigned int port) 728 { 729 return port >> MV_PORT_HC_SHIFT; 730 } 731 732 static inline unsigned int mv_hardport_from_port(unsigned int port) 733 { 734 return port & MV_PORT_MASK; 735 } 736 737 /* 738 * Consolidate some rather tricky bit shift calculations. 739 * This is hot-path stuff, so not a function. 740 * Simple code, with two return values, so macro rather than inline. 741 * 742 * port is the sole input, in range 0..7. 743 * shift is one output, for use with main_irq_cause / main_irq_mask registers. 744 * hardport is the other output, in range 0..3. 745 * 746 * Note that port and hardport may be the same variable in some cases. 747 */ 748 #define MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport) \ 749 { \ 750 shift = mv_hc_from_port(port) * HC_SHIFT; \ 751 hardport = mv_hardport_from_port(port); \ 752 shift += hardport * 2; \ 753 } 754 755 static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc) 756 { 757 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ)); 758 } 759 760 static inline void __iomem *mv_hc_base_from_port(void __iomem *base, 761 unsigned int port) 762 { 763 return mv_hc_base(base, mv_hc_from_port(port)); 764 } 765 766 static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port) 767 { 768 return mv_hc_base_from_port(base, port) + 769 MV_SATAHC_ARBTR_REG_SZ + 770 (mv_hardport_from_port(port) * MV_PORT_REG_SZ); 771 } 772 773 static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port) 774 { 775 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port); 776 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL; 777 778 return hc_mmio + ofs; 779 } 780 781 static inline void __iomem *mv_host_base(struct ata_host *host) 782 { 783 struct mv_host_priv *hpriv = host->private_data; 784 return hpriv->base; 785 } 786 787 static inline void __iomem *mv_ap_base(struct ata_port *ap) 788 { 789 return mv_port_base(mv_host_base(ap->host), ap->port_no); 790 } 791 792 static inline int mv_get_hc_count(unsigned long port_flags) 793 { 794 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1); 795 } 796 797 static void mv_set_edma_ptrs(void __iomem *port_mmio, 798 struct mv_host_priv *hpriv, 799 struct mv_port_priv *pp) 800 { 801 u32 index; 802 803 /* 804 * initialize request queue 805 */ 806 pp->req_idx &= MV_MAX_Q_DEPTH_MASK; /* paranoia */ 807 index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT; 808 809 WARN_ON(pp->crqb_dma & 0x3ff); 810 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS); 811 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index, 812 port_mmio + EDMA_REQ_Q_IN_PTR_OFS); 813 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS); 814 815 /* 816 * initialize response queue 817 */ 818 pp->resp_idx &= MV_MAX_Q_DEPTH_MASK; /* paranoia */ 819 index = pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT; 820 821 WARN_ON(pp->crpb_dma & 0xff); 822 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS); 823 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS); 824 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index, 825 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); 826 } 827 828 static void mv_set_main_irq_mask(struct ata_host *host, 829 u32 disable_bits, u32 enable_bits) 830 { 831 struct mv_host_priv *hpriv = host->private_data; 832 u32 old_mask, new_mask; 833 834 old_mask = hpriv->main_irq_mask; 835 new_mask = (old_mask & ~disable_bits) | enable_bits; 836 if (new_mask != old_mask) { 837 hpriv->main_irq_mask = new_mask; 838 writelfl(new_mask, hpriv->main_irq_mask_addr); 839 } 840 } 841 842 static void mv_enable_port_irqs(struct ata_port *ap, 843 unsigned int port_bits) 844 { 845 unsigned int shift, hardport, port = ap->port_no; 846 u32 disable_bits, enable_bits; 847 848 MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport); 849 850 disable_bits = (DONE_IRQ | ERR_IRQ) << shift; 851 enable_bits = port_bits << shift; 852 mv_set_main_irq_mask(ap->host, disable_bits, enable_bits); 853 } 854 855 /** 856 * mv_start_dma - Enable eDMA engine 857 * @base: port base address 858 * @pp: port private data 859 * 860 * Verify the local cache of the eDMA state is accurate with a 861 * WARN_ON. 862 * 863 * LOCKING: 864 * Inherited from caller. 865 */ 866 static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio, 867 struct mv_port_priv *pp, u8 protocol) 868 { 869 int want_ncq = (protocol == ATA_PROT_NCQ); 870 871 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) { 872 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0); 873 if (want_ncq != using_ncq) 874 mv_stop_edma(ap); 875 } 876 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) { 877 struct mv_host_priv *hpriv = ap->host->private_data; 878 int hardport = mv_hardport_from_port(ap->port_no); 879 void __iomem *hc_mmio = mv_hc_base_from_port( 880 mv_host_base(ap->host), ap->port_no); 881 u32 hc_irq_cause; 882 883 /* clear EDMA event indicators, if any */ 884 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); 885 886 /* clear pending irq events */ 887 hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport); 888 writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS); 889 890 mv_edma_cfg(ap, want_ncq); 891 892 /* clear FIS IRQ Cause */ 893 if (IS_GEN_IIE(hpriv)) 894 writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS); 895 896 mv_set_edma_ptrs(port_mmio, hpriv, pp); 897 mv_enable_port_irqs(ap, DONE_IRQ|ERR_IRQ); 898 899 writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS); 900 pp->pp_flags |= MV_PP_FLAG_EDMA_EN; 901 } 902 } 903 904 static void mv_wait_for_edma_empty_idle(struct ata_port *ap) 905 { 906 void __iomem *port_mmio = mv_ap_base(ap); 907 const u32 empty_idle = (EDMA_STATUS_CACHE_EMPTY | EDMA_STATUS_IDLE); 908 const int per_loop = 5, timeout = (15 * 1000 / per_loop); 909 int i; 910 911 /* 912 * Wait for the EDMA engine to finish transactions in progress. 913 * No idea what a good "timeout" value might be, but measurements 914 * indicate that it often requires hundreds of microseconds 915 * with two drives in-use. So we use the 15msec value above 916 * as a rough guess at what even more drives might require. 917 */ 918 for (i = 0; i < timeout; ++i) { 919 u32 edma_stat = readl(port_mmio + EDMA_STATUS_OFS); 920 if ((edma_stat & empty_idle) == empty_idle) 921 break; 922 udelay(per_loop); 923 } 924 /* ata_port_printk(ap, KERN_INFO, "%s: %u+ usecs\n", __func__, i); */ 925 } 926 927 /** 928 * mv_stop_edma_engine - Disable eDMA engine 929 * @port_mmio: io base address 930 * 931 * LOCKING: 932 * Inherited from caller. 933 */ 934 static int mv_stop_edma_engine(void __iomem *port_mmio) 935 { 936 int i; 937 938 /* Disable eDMA. The disable bit auto clears. */ 939 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS); 940 941 /* Wait for the chip to confirm eDMA is off. */ 942 for (i = 10000; i > 0; i--) { 943 u32 reg = readl(port_mmio + EDMA_CMD_OFS); 944 if (!(reg & EDMA_EN)) 945 return 0; 946 udelay(10); 947 } 948 return -EIO; 949 } 950 951 static int mv_stop_edma(struct ata_port *ap) 952 { 953 void __iomem *port_mmio = mv_ap_base(ap); 954 struct mv_port_priv *pp = ap->private_data; 955 956 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) 957 return 0; 958 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; 959 mv_wait_for_edma_empty_idle(ap); 960 if (mv_stop_edma_engine(port_mmio)) { 961 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n"); 962 return -EIO; 963 } 964 return 0; 965 } 966 967 #ifdef ATA_DEBUG 968 static void mv_dump_mem(void __iomem *start, unsigned bytes) 969 { 970 int b, w; 971 for (b = 0; b < bytes; ) { 972 DPRINTK("%p: ", start + b); 973 for (w = 0; b < bytes && w < 4; w++) { 974 printk("%08x ", readl(start + b)); 975 b += sizeof(u32); 976 } 977 printk("\n"); 978 } 979 } 980 #endif 981 982 static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes) 983 { 984 #ifdef ATA_DEBUG 985 int b, w; 986 u32 dw; 987 for (b = 0; b < bytes; ) { 988 DPRINTK("%02x: ", b); 989 for (w = 0; b < bytes && w < 4; w++) { 990 (void) pci_read_config_dword(pdev, b, &dw); 991 printk("%08x ", dw); 992 b += sizeof(u32); 993 } 994 printk("\n"); 995 } 996 #endif 997 } 998 static void mv_dump_all_regs(void __iomem *mmio_base, int port, 999 struct pci_dev *pdev) 1000 { 1001 #ifdef ATA_DEBUG 1002 void __iomem *hc_base = mv_hc_base(mmio_base, 1003 port >> MV_PORT_HC_SHIFT); 1004 void __iomem *port_base; 1005 int start_port, num_ports, p, start_hc, num_hcs, hc; 1006 1007 if (0 > port) { 1008 start_hc = start_port = 0; 1009 num_ports = 8; /* shld be benign for 4 port devs */ 1010 num_hcs = 2; 1011 } else { 1012 start_hc = port >> MV_PORT_HC_SHIFT; 1013 start_port = port; 1014 num_ports = num_hcs = 1; 1015 } 1016 DPRINTK("All registers for port(s) %u-%u:\n", start_port, 1017 num_ports > 1 ? num_ports - 1 : start_port); 1018 1019 if (NULL != pdev) { 1020 DPRINTK("PCI config space regs:\n"); 1021 mv_dump_pci_cfg(pdev, 0x68); 1022 } 1023 DPRINTK("PCI regs:\n"); 1024 mv_dump_mem(mmio_base+0xc00, 0x3c); 1025 mv_dump_mem(mmio_base+0xd00, 0x34); 1026 mv_dump_mem(mmio_base+0xf00, 0x4); 1027 mv_dump_mem(mmio_base+0x1d00, 0x6c); 1028 for (hc = start_hc; hc < start_hc + num_hcs; hc++) { 1029 hc_base = mv_hc_base(mmio_base, hc); 1030 DPRINTK("HC regs (HC %i):\n", hc); 1031 mv_dump_mem(hc_base, 0x1c); 1032 } 1033 for (p = start_port; p < start_port + num_ports; p++) { 1034 port_base = mv_port_base(mmio_base, p); 1035 DPRINTK("EDMA regs (port %i):\n", p); 1036 mv_dump_mem(port_base, 0x54); 1037 DPRINTK("SATA regs (port %i):\n", p); 1038 mv_dump_mem(port_base+0x300, 0x60); 1039 } 1040 #endif 1041 } 1042 1043 static unsigned int mv_scr_offset(unsigned int sc_reg_in) 1044 { 1045 unsigned int ofs; 1046 1047 switch (sc_reg_in) { 1048 case SCR_STATUS: 1049 case SCR_CONTROL: 1050 case SCR_ERROR: 1051 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32)); 1052 break; 1053 case SCR_ACTIVE: 1054 ofs = SATA_ACTIVE_OFS; /* active is not with the others */ 1055 break; 1056 default: 1057 ofs = 0xffffffffU; 1058 break; 1059 } 1060 return ofs; 1061 } 1062 1063 static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val) 1064 { 1065 unsigned int ofs = mv_scr_offset(sc_reg_in); 1066 1067 if (ofs != 0xffffffffU) { 1068 *val = readl(mv_ap_base(link->ap) + ofs); 1069 return 0; 1070 } else 1071 return -EINVAL; 1072 } 1073 1074 static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val) 1075 { 1076 unsigned int ofs = mv_scr_offset(sc_reg_in); 1077 1078 if (ofs != 0xffffffffU) { 1079 writelfl(val, mv_ap_base(link->ap) + ofs); 1080 return 0; 1081 } else 1082 return -EINVAL; 1083 } 1084 1085 static void mv6_dev_config(struct ata_device *adev) 1086 { 1087 /* 1088 * Deal with Gen-II ("mv6") hardware quirks/restrictions: 1089 * 1090 * Gen-II does not support NCQ over a port multiplier 1091 * (no FIS-based switching). 1092 */ 1093 if (adev->flags & ATA_DFLAG_NCQ) { 1094 if (sata_pmp_attached(adev->link->ap)) { 1095 adev->flags &= ~ATA_DFLAG_NCQ; 1096 ata_dev_printk(adev, KERN_INFO, 1097 "NCQ disabled for command-based switching\n"); 1098 } 1099 } 1100 } 1101 1102 static int mv_qc_defer(struct ata_queued_cmd *qc) 1103 { 1104 struct ata_link *link = qc->dev->link; 1105 struct ata_port *ap = link->ap; 1106 struct mv_port_priv *pp = ap->private_data; 1107 1108 /* 1109 * Don't allow new commands if we're in a delayed EH state 1110 * for NCQ and/or FIS-based switching. 1111 */ 1112 if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) 1113 return ATA_DEFER_PORT; 1114 /* 1115 * If the port is completely idle, then allow the new qc. 1116 */ 1117 if (ap->nr_active_links == 0) 1118 return 0; 1119 1120 /* 1121 * The port is operating in host queuing mode (EDMA) with NCQ 1122 * enabled, allow multiple NCQ commands. EDMA also allows 1123 * queueing multiple DMA commands but libata core currently 1124 * doesn't allow it. 1125 */ 1126 if ((pp->pp_flags & MV_PP_FLAG_EDMA_EN) && 1127 (pp->pp_flags & MV_PP_FLAG_NCQ_EN) && ata_is_ncq(qc->tf.protocol)) 1128 return 0; 1129 1130 return ATA_DEFER_PORT; 1131 } 1132 1133 static void mv_config_fbs(void __iomem *port_mmio, int want_ncq, int want_fbs) 1134 { 1135 u32 new_fiscfg, old_fiscfg; 1136 u32 new_ltmode, old_ltmode; 1137 u32 new_haltcond, old_haltcond; 1138 1139 old_fiscfg = readl(port_mmio + FISCFG_OFS); 1140 old_ltmode = readl(port_mmio + LTMODE_OFS); 1141 old_haltcond = readl(port_mmio + EDMA_HALTCOND_OFS); 1142 1143 new_fiscfg = old_fiscfg & ~(FISCFG_SINGLE_SYNC | FISCFG_WAIT_DEV_ERR); 1144 new_ltmode = old_ltmode & ~LTMODE_BIT8; 1145 new_haltcond = old_haltcond | EDMA_ERR_DEV; 1146 1147 if (want_fbs) { 1148 new_fiscfg = old_fiscfg | FISCFG_SINGLE_SYNC; 1149 new_ltmode = old_ltmode | LTMODE_BIT8; 1150 if (want_ncq) 1151 new_haltcond &= ~EDMA_ERR_DEV; 1152 else 1153 new_fiscfg |= FISCFG_WAIT_DEV_ERR; 1154 } 1155 1156 if (new_fiscfg != old_fiscfg) 1157 writelfl(new_fiscfg, port_mmio + FISCFG_OFS); 1158 if (new_ltmode != old_ltmode) 1159 writelfl(new_ltmode, port_mmio + LTMODE_OFS); 1160 if (new_haltcond != old_haltcond) 1161 writelfl(new_haltcond, port_mmio + EDMA_HALTCOND_OFS); 1162 } 1163 1164 static void mv_60x1_errata_sata25(struct ata_port *ap, int want_ncq) 1165 { 1166 struct mv_host_priv *hpriv = ap->host->private_data; 1167 u32 old, new; 1168 1169 /* workaround for 88SX60x1 FEr SATA#25 (part 1) */ 1170 old = readl(hpriv->base + MV_GPIO_PORT_CTL_OFS); 1171 if (want_ncq) 1172 new = old | (1 << 22); 1173 else 1174 new = old & ~(1 << 22); 1175 if (new != old) 1176 writel(new, hpriv->base + MV_GPIO_PORT_CTL_OFS); 1177 } 1178 1179 static void mv_edma_cfg(struct ata_port *ap, int want_ncq) 1180 { 1181 u32 cfg; 1182 struct mv_port_priv *pp = ap->private_data; 1183 struct mv_host_priv *hpriv = ap->host->private_data; 1184 void __iomem *port_mmio = mv_ap_base(ap); 1185 1186 /* set up non-NCQ EDMA configuration */ 1187 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */ 1188 pp->pp_flags &= ~MV_PP_FLAG_FBS_EN; 1189 1190 if (IS_GEN_I(hpriv)) 1191 cfg |= (1 << 8); /* enab config burst size mask */ 1192 1193 else if (IS_GEN_II(hpriv)) { 1194 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN; 1195 mv_60x1_errata_sata25(ap, want_ncq); 1196 1197 } else if (IS_GEN_IIE(hpriv)) { 1198 int want_fbs = sata_pmp_attached(ap); 1199 /* 1200 * Possible future enhancement: 1201 * 1202 * The chip can use FBS with non-NCQ, if we allow it, 1203 * But first we need to have the error handling in place 1204 * for this mode (datasheet section 7.3.15.4.2.3). 1205 * So disallow non-NCQ FBS for now. 1206 */ 1207 want_fbs &= want_ncq; 1208 1209 mv_config_fbs(port_mmio, want_ncq, want_fbs); 1210 1211 if (want_fbs) { 1212 pp->pp_flags |= MV_PP_FLAG_FBS_EN; 1213 cfg |= EDMA_CFG_EDMA_FBS; /* FIS-based switching */ 1214 } 1215 1216 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */ 1217 cfg |= (1 << 22); /* enab 4-entry host queue cache */ 1218 if (!IS_SOC(hpriv)) 1219 cfg |= (1 << 18); /* enab early completion */ 1220 if (hpriv->hp_flags & MV_HP_CUT_THROUGH) 1221 cfg |= (1 << 17); /* enab cut-thru (dis stor&forwrd) */ 1222 } 1223 1224 if (want_ncq) { 1225 cfg |= EDMA_CFG_NCQ; 1226 pp->pp_flags |= MV_PP_FLAG_NCQ_EN; 1227 } else 1228 pp->pp_flags &= ~MV_PP_FLAG_NCQ_EN; 1229 1230 writelfl(cfg, port_mmio + EDMA_CFG_OFS); 1231 } 1232 1233 static void mv_port_free_dma_mem(struct ata_port *ap) 1234 { 1235 struct mv_host_priv *hpriv = ap->host->private_data; 1236 struct mv_port_priv *pp = ap->private_data; 1237 int tag; 1238 1239 if (pp->crqb) { 1240 dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma); 1241 pp->crqb = NULL; 1242 } 1243 if (pp->crpb) { 1244 dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma); 1245 pp->crpb = NULL; 1246 } 1247 /* 1248 * For GEN_I, there's no NCQ, so we have only a single sg_tbl. 1249 * For later hardware, we have one unique sg_tbl per NCQ tag. 1250 */ 1251 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) { 1252 if (pp->sg_tbl[tag]) { 1253 if (tag == 0 || !IS_GEN_I(hpriv)) 1254 dma_pool_free(hpriv->sg_tbl_pool, 1255 pp->sg_tbl[tag], 1256 pp->sg_tbl_dma[tag]); 1257 pp->sg_tbl[tag] = NULL; 1258 } 1259 } 1260 } 1261 1262 /** 1263 * mv_port_start - Port specific init/start routine. 1264 * @ap: ATA channel to manipulate 1265 * 1266 * Allocate and point to DMA memory, init port private memory, 1267 * zero indices. 1268 * 1269 * LOCKING: 1270 * Inherited from caller. 1271 */ 1272 static int mv_port_start(struct ata_port *ap) 1273 { 1274 struct device *dev = ap->host->dev; 1275 struct mv_host_priv *hpriv = ap->host->private_data; 1276 struct mv_port_priv *pp; 1277 int tag; 1278 1279 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); 1280 if (!pp) 1281 return -ENOMEM; 1282 ap->private_data = pp; 1283 1284 pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma); 1285 if (!pp->crqb) 1286 return -ENOMEM; 1287 memset(pp->crqb, 0, MV_CRQB_Q_SZ); 1288 1289 pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma); 1290 if (!pp->crpb) 1291 goto out_port_free_dma_mem; 1292 memset(pp->crpb, 0, MV_CRPB_Q_SZ); 1293 1294 /* 6041/6081 Rev. "C0" (and newer) are okay with async notify */ 1295 if (hpriv->hp_flags & MV_HP_ERRATA_60X1C0) 1296 ap->flags |= ATA_FLAG_AN; 1297 /* 1298 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl. 1299 * For later hardware, we need one unique sg_tbl per NCQ tag. 1300 */ 1301 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) { 1302 if (tag == 0 || !IS_GEN_I(hpriv)) { 1303 pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool, 1304 GFP_KERNEL, &pp->sg_tbl_dma[tag]); 1305 if (!pp->sg_tbl[tag]) 1306 goto out_port_free_dma_mem; 1307 } else { 1308 pp->sg_tbl[tag] = pp->sg_tbl[0]; 1309 pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0]; 1310 } 1311 } 1312 return 0; 1313 1314 out_port_free_dma_mem: 1315 mv_port_free_dma_mem(ap); 1316 return -ENOMEM; 1317 } 1318 1319 /** 1320 * mv_port_stop - Port specific cleanup/stop routine. 1321 * @ap: ATA channel to manipulate 1322 * 1323 * Stop DMA, cleanup port memory. 1324 * 1325 * LOCKING: 1326 * This routine uses the host lock to protect the DMA stop. 1327 */ 1328 static void mv_port_stop(struct ata_port *ap) 1329 { 1330 mv_stop_edma(ap); 1331 mv_enable_port_irqs(ap, 0); 1332 mv_port_free_dma_mem(ap); 1333 } 1334 1335 /** 1336 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries 1337 * @qc: queued command whose SG list to source from 1338 * 1339 * Populate the SG list and mark the last entry. 1340 * 1341 * LOCKING: 1342 * Inherited from caller. 1343 */ 1344 static void mv_fill_sg(struct ata_queued_cmd *qc) 1345 { 1346 struct mv_port_priv *pp = qc->ap->private_data; 1347 struct scatterlist *sg; 1348 struct mv_sg *mv_sg, *last_sg = NULL; 1349 unsigned int si; 1350 1351 mv_sg = pp->sg_tbl[qc->tag]; 1352 for_each_sg(qc->sg, sg, qc->n_elem, si) { 1353 dma_addr_t addr = sg_dma_address(sg); 1354 u32 sg_len = sg_dma_len(sg); 1355 1356 while (sg_len) { 1357 u32 offset = addr & 0xffff; 1358 u32 len = sg_len; 1359 1360 if ((offset + sg_len > 0x10000)) 1361 len = 0x10000 - offset; 1362 1363 mv_sg->addr = cpu_to_le32(addr & 0xffffffff); 1364 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16); 1365 mv_sg->flags_size = cpu_to_le32(len & 0xffff); 1366 1367 sg_len -= len; 1368 addr += len; 1369 1370 last_sg = mv_sg; 1371 mv_sg++; 1372 } 1373 } 1374 1375 if (likely(last_sg)) 1376 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL); 1377 } 1378 1379 static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last) 1380 { 1381 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS | 1382 (last ? CRQB_CMD_LAST : 0); 1383 *cmdw = cpu_to_le16(tmp); 1384 } 1385 1386 /** 1387 * mv_qc_prep - Host specific command preparation. 1388 * @qc: queued command to prepare 1389 * 1390 * This routine simply redirects to the general purpose routine 1391 * if command is not DMA. Else, it handles prep of the CRQB 1392 * (command request block), does some sanity checking, and calls 1393 * the SG load routine. 1394 * 1395 * LOCKING: 1396 * Inherited from caller. 1397 */ 1398 static void mv_qc_prep(struct ata_queued_cmd *qc) 1399 { 1400 struct ata_port *ap = qc->ap; 1401 struct mv_port_priv *pp = ap->private_data; 1402 __le16 *cw; 1403 struct ata_taskfile *tf; 1404 u16 flags = 0; 1405 unsigned in_index; 1406 1407 if ((qc->tf.protocol != ATA_PROT_DMA) && 1408 (qc->tf.protocol != ATA_PROT_NCQ)) 1409 return; 1410 1411 /* Fill in command request block 1412 */ 1413 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) 1414 flags |= CRQB_FLAG_READ; 1415 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag); 1416 flags |= qc->tag << CRQB_TAG_SHIFT; 1417 flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT; 1418 1419 /* get current queue index from software */ 1420 in_index = pp->req_idx; 1421 1422 pp->crqb[in_index].sg_addr = 1423 cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff); 1424 pp->crqb[in_index].sg_addr_hi = 1425 cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16); 1426 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags); 1427 1428 cw = &pp->crqb[in_index].ata_cmd[0]; 1429 tf = &qc->tf; 1430 1431 /* Sadly, the CRQB cannot accomodate all registers--there are 1432 * only 11 bytes...so we must pick and choose required 1433 * registers based on the command. So, we drop feature and 1434 * hob_feature for [RW] DMA commands, but they are needed for 1435 * NCQ. NCQ will drop hob_nsect, which is not needed there 1436 * (nsect is used only for the tag; feat/hob_feat hold true nsect). 1437 */ 1438 switch (tf->command) { 1439 case ATA_CMD_READ: 1440 case ATA_CMD_READ_EXT: 1441 case ATA_CMD_WRITE: 1442 case ATA_CMD_WRITE_EXT: 1443 case ATA_CMD_WRITE_FUA_EXT: 1444 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0); 1445 break; 1446 case ATA_CMD_FPDMA_READ: 1447 case ATA_CMD_FPDMA_WRITE: 1448 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0); 1449 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0); 1450 break; 1451 default: 1452 /* The only other commands EDMA supports in non-queued and 1453 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none 1454 * of which are defined/used by Linux. If we get here, this 1455 * driver needs work. 1456 * 1457 * FIXME: modify libata to give qc_prep a return value and 1458 * return error here. 1459 */ 1460 BUG_ON(tf->command); 1461 break; 1462 } 1463 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0); 1464 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0); 1465 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0); 1466 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0); 1467 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0); 1468 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0); 1469 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0); 1470 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0); 1471 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */ 1472 1473 if (!(qc->flags & ATA_QCFLAG_DMAMAP)) 1474 return; 1475 mv_fill_sg(qc); 1476 } 1477 1478 /** 1479 * mv_qc_prep_iie - Host specific command preparation. 1480 * @qc: queued command to prepare 1481 * 1482 * This routine simply redirects to the general purpose routine 1483 * if command is not DMA. Else, it handles prep of the CRQB 1484 * (command request block), does some sanity checking, and calls 1485 * the SG load routine. 1486 * 1487 * LOCKING: 1488 * Inherited from caller. 1489 */ 1490 static void mv_qc_prep_iie(struct ata_queued_cmd *qc) 1491 { 1492 struct ata_port *ap = qc->ap; 1493 struct mv_port_priv *pp = ap->private_data; 1494 struct mv_crqb_iie *crqb; 1495 struct ata_taskfile *tf; 1496 unsigned in_index; 1497 u32 flags = 0; 1498 1499 if ((qc->tf.protocol != ATA_PROT_DMA) && 1500 (qc->tf.protocol != ATA_PROT_NCQ)) 1501 return; 1502 1503 /* Fill in Gen IIE command request block */ 1504 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) 1505 flags |= CRQB_FLAG_READ; 1506 1507 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag); 1508 flags |= qc->tag << CRQB_TAG_SHIFT; 1509 flags |= qc->tag << CRQB_HOSTQ_SHIFT; 1510 flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT; 1511 1512 /* get current queue index from software */ 1513 in_index = pp->req_idx; 1514 1515 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index]; 1516 crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff); 1517 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16); 1518 crqb->flags = cpu_to_le32(flags); 1519 1520 tf = &qc->tf; 1521 crqb->ata_cmd[0] = cpu_to_le32( 1522 (tf->command << 16) | 1523 (tf->feature << 24) 1524 ); 1525 crqb->ata_cmd[1] = cpu_to_le32( 1526 (tf->lbal << 0) | 1527 (tf->lbam << 8) | 1528 (tf->lbah << 16) | 1529 (tf->device << 24) 1530 ); 1531 crqb->ata_cmd[2] = cpu_to_le32( 1532 (tf->hob_lbal << 0) | 1533 (tf->hob_lbam << 8) | 1534 (tf->hob_lbah << 16) | 1535 (tf->hob_feature << 24) 1536 ); 1537 crqb->ata_cmd[3] = cpu_to_le32( 1538 (tf->nsect << 0) | 1539 (tf->hob_nsect << 8) 1540 ); 1541 1542 if (!(qc->flags & ATA_QCFLAG_DMAMAP)) 1543 return; 1544 mv_fill_sg(qc); 1545 } 1546 1547 /** 1548 * mv_qc_issue - Initiate a command to the host 1549 * @qc: queued command to start 1550 * 1551 * This routine simply redirects to the general purpose routine 1552 * if command is not DMA. Else, it sanity checks our local 1553 * caches of the request producer/consumer indices then enables 1554 * DMA and bumps the request producer index. 1555 * 1556 * LOCKING: 1557 * Inherited from caller. 1558 */ 1559 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc) 1560 { 1561 struct ata_port *ap = qc->ap; 1562 void __iomem *port_mmio = mv_ap_base(ap); 1563 struct mv_port_priv *pp = ap->private_data; 1564 u32 in_index; 1565 1566 if ((qc->tf.protocol != ATA_PROT_DMA) && 1567 (qc->tf.protocol != ATA_PROT_NCQ)) { 1568 static int limit_warnings = 10; 1569 /* 1570 * Errata SATA#16, SATA#24: warn if multiple DRQs expected. 1571 * 1572 * Someday, we might implement special polling workarounds 1573 * for these, but it all seems rather unnecessary since we 1574 * normally use only DMA for commands which transfer more 1575 * than a single block of data. 1576 * 1577 * Much of the time, this could just work regardless. 1578 * So for now, just log the incident, and allow the attempt. 1579 */ 1580 if (limit_warnings > 0 && (qc->nbytes / qc->sect_size) > 1) { 1581 --limit_warnings; 1582 ata_link_printk(qc->dev->link, KERN_WARNING, DRV_NAME 1583 ": attempting PIO w/multiple DRQ: " 1584 "this may fail due to h/w errata\n"); 1585 } 1586 /* 1587 * We're about to send a non-EDMA capable command to the 1588 * port. Turn off EDMA so there won't be problems accessing 1589 * shadow block, etc registers. 1590 */ 1591 mv_stop_edma(ap); 1592 mv_enable_port_irqs(ap, ERR_IRQ); 1593 mv_pmp_select(ap, qc->dev->link->pmp); 1594 return ata_sff_qc_issue(qc); 1595 } 1596 1597 mv_start_dma(ap, port_mmio, pp, qc->tf.protocol); 1598 1599 pp->req_idx = (pp->req_idx + 1) & MV_MAX_Q_DEPTH_MASK; 1600 in_index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT; 1601 1602 /* and write the request in pointer to kick the EDMA to life */ 1603 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index, 1604 port_mmio + EDMA_REQ_Q_IN_PTR_OFS); 1605 1606 return 0; 1607 } 1608 1609 static struct ata_queued_cmd *mv_get_active_qc(struct ata_port *ap) 1610 { 1611 struct mv_port_priv *pp = ap->private_data; 1612 struct ata_queued_cmd *qc; 1613 1614 if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) 1615 return NULL; 1616 qc = ata_qc_from_tag(ap, ap->link.active_tag); 1617 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING)) 1618 qc = NULL; 1619 return qc; 1620 } 1621 1622 static void mv_pmp_error_handler(struct ata_port *ap) 1623 { 1624 unsigned int pmp, pmp_map; 1625 struct mv_port_priv *pp = ap->private_data; 1626 1627 if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) { 1628 /* 1629 * Perform NCQ error analysis on failed PMPs 1630 * before we freeze the port entirely. 1631 * 1632 * The failed PMPs are marked earlier by mv_pmp_eh_prep(). 1633 */ 1634 pmp_map = pp->delayed_eh_pmp_map; 1635 pp->pp_flags &= ~MV_PP_FLAG_DELAYED_EH; 1636 for (pmp = 0; pmp_map != 0; pmp++) { 1637 unsigned int this_pmp = (1 << pmp); 1638 if (pmp_map & this_pmp) { 1639 struct ata_link *link = &ap->pmp_link[pmp]; 1640 pmp_map &= ~this_pmp; 1641 ata_eh_analyze_ncq_error(link); 1642 } 1643 } 1644 ata_port_freeze(ap); 1645 } 1646 sata_pmp_error_handler(ap); 1647 } 1648 1649 static unsigned int mv_get_err_pmp_map(struct ata_port *ap) 1650 { 1651 void __iomem *port_mmio = mv_ap_base(ap); 1652 1653 return readl(port_mmio + SATA_TESTCTL_OFS) >> 16; 1654 } 1655 1656 static void mv_pmp_eh_prep(struct ata_port *ap, unsigned int pmp_map) 1657 { 1658 struct ata_eh_info *ehi; 1659 unsigned int pmp; 1660 1661 /* 1662 * Initialize EH info for PMPs which saw device errors 1663 */ 1664 ehi = &ap->link.eh_info; 1665 for (pmp = 0; pmp_map != 0; pmp++) { 1666 unsigned int this_pmp = (1 << pmp); 1667 if (pmp_map & this_pmp) { 1668 struct ata_link *link = &ap->pmp_link[pmp]; 1669 1670 pmp_map &= ~this_pmp; 1671 ehi = &link->eh_info; 1672 ata_ehi_clear_desc(ehi); 1673 ata_ehi_push_desc(ehi, "dev err"); 1674 ehi->err_mask |= AC_ERR_DEV; 1675 ehi->action |= ATA_EH_RESET; 1676 ata_link_abort(link); 1677 } 1678 } 1679 } 1680 1681 static int mv_req_q_empty(struct ata_port *ap) 1682 { 1683 void __iomem *port_mmio = mv_ap_base(ap); 1684 u32 in_ptr, out_ptr; 1685 1686 in_ptr = (readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS) 1687 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK; 1688 out_ptr = (readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS) 1689 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK; 1690 return (in_ptr == out_ptr); /* 1 == queue_is_empty */ 1691 } 1692 1693 static int mv_handle_fbs_ncq_dev_err(struct ata_port *ap) 1694 { 1695 struct mv_port_priv *pp = ap->private_data; 1696 int failed_links; 1697 unsigned int old_map, new_map; 1698 1699 /* 1700 * Device error during FBS+NCQ operation: 1701 * 1702 * Set a port flag to prevent further I/O being enqueued. 1703 * Leave the EDMA running to drain outstanding commands from this port. 1704 * Perform the post-mortem/EH only when all responses are complete. 1705 * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.2). 1706 */ 1707 if (!(pp->pp_flags & MV_PP_FLAG_DELAYED_EH)) { 1708 pp->pp_flags |= MV_PP_FLAG_DELAYED_EH; 1709 pp->delayed_eh_pmp_map = 0; 1710 } 1711 old_map = pp->delayed_eh_pmp_map; 1712 new_map = old_map | mv_get_err_pmp_map(ap); 1713 1714 if (old_map != new_map) { 1715 pp->delayed_eh_pmp_map = new_map; 1716 mv_pmp_eh_prep(ap, new_map & ~old_map); 1717 } 1718 failed_links = hweight16(new_map); 1719 1720 ata_port_printk(ap, KERN_INFO, "%s: pmp_map=%04x qc_map=%04x " 1721 "failed_links=%d nr_active_links=%d\n", 1722 __func__, pp->delayed_eh_pmp_map, 1723 ap->qc_active, failed_links, 1724 ap->nr_active_links); 1725 1726 if (ap->nr_active_links <= failed_links && mv_req_q_empty(ap)) { 1727 mv_process_crpb_entries(ap, pp); 1728 mv_stop_edma(ap); 1729 mv_eh_freeze(ap); 1730 ata_port_printk(ap, KERN_INFO, "%s: done\n", __func__); 1731 return 1; /* handled */ 1732 } 1733 ata_port_printk(ap, KERN_INFO, "%s: waiting\n", __func__); 1734 return 1; /* handled */ 1735 } 1736 1737 static int mv_handle_fbs_non_ncq_dev_err(struct ata_port *ap) 1738 { 1739 /* 1740 * Possible future enhancement: 1741 * 1742 * FBS+non-NCQ operation is not yet implemented. 1743 * See related notes in mv_edma_cfg(). 1744 * 1745 * Device error during FBS+non-NCQ operation: 1746 * 1747 * We need to snapshot the shadow registers for each failed command. 1748 * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.3). 1749 */ 1750 return 0; /* not handled */ 1751 } 1752 1753 static int mv_handle_dev_err(struct ata_port *ap, u32 edma_err_cause) 1754 { 1755 struct mv_port_priv *pp = ap->private_data; 1756 1757 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) 1758 return 0; /* EDMA was not active: not handled */ 1759 if (!(pp->pp_flags & MV_PP_FLAG_FBS_EN)) 1760 return 0; /* FBS was not active: not handled */ 1761 1762 if (!(edma_err_cause & EDMA_ERR_DEV)) 1763 return 0; /* non DEV error: not handled */ 1764 edma_err_cause &= ~EDMA_ERR_IRQ_TRANSIENT; 1765 if (edma_err_cause & ~(EDMA_ERR_DEV | EDMA_ERR_SELF_DIS)) 1766 return 0; /* other problems: not handled */ 1767 1768 if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) { 1769 /* 1770 * EDMA should NOT have self-disabled for this case. 1771 * If it did, then something is wrong elsewhere, 1772 * and we cannot handle it here. 1773 */ 1774 if (edma_err_cause & EDMA_ERR_SELF_DIS) { 1775 ata_port_printk(ap, KERN_WARNING, 1776 "%s: err_cause=0x%x pp_flags=0x%x\n", 1777 __func__, edma_err_cause, pp->pp_flags); 1778 return 0; /* not handled */ 1779 } 1780 return mv_handle_fbs_ncq_dev_err(ap); 1781 } else { 1782 /* 1783 * EDMA should have self-disabled for this case. 1784 * If it did not, then something is wrong elsewhere, 1785 * and we cannot handle it here. 1786 */ 1787 if (!(edma_err_cause & EDMA_ERR_SELF_DIS)) { 1788 ata_port_printk(ap, KERN_WARNING, 1789 "%s: err_cause=0x%x pp_flags=0x%x\n", 1790 __func__, edma_err_cause, pp->pp_flags); 1791 return 0; /* not handled */ 1792 } 1793 return mv_handle_fbs_non_ncq_dev_err(ap); 1794 } 1795 return 0; /* not handled */ 1796 } 1797 1798 static void mv_unexpected_intr(struct ata_port *ap, int edma_was_enabled) 1799 { 1800 struct ata_eh_info *ehi = &ap->link.eh_info; 1801 char *when = "idle"; 1802 1803 ata_ehi_clear_desc(ehi); 1804 if (!ap || (ap->flags & ATA_FLAG_DISABLED)) { 1805 when = "disabled"; 1806 } else if (edma_was_enabled) { 1807 when = "EDMA enabled"; 1808 } else { 1809 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag); 1810 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING)) 1811 when = "polling"; 1812 } 1813 ata_ehi_push_desc(ehi, "unexpected device interrupt while %s", when); 1814 ehi->err_mask |= AC_ERR_OTHER; 1815 ehi->action |= ATA_EH_RESET; 1816 ata_port_freeze(ap); 1817 } 1818 1819 /** 1820 * mv_err_intr - Handle error interrupts on the port 1821 * @ap: ATA channel to manipulate 1822 * 1823 * Most cases require a full reset of the chip's state machine, 1824 * which also performs a COMRESET. 1825 * Also, if the port disabled DMA, update our cached copy to match. 1826 * 1827 * LOCKING: 1828 * Inherited from caller. 1829 */ 1830 static void mv_err_intr(struct ata_port *ap) 1831 { 1832 void __iomem *port_mmio = mv_ap_base(ap); 1833 u32 edma_err_cause, eh_freeze_mask, serr = 0; 1834 u32 fis_cause = 0; 1835 struct mv_port_priv *pp = ap->private_data; 1836 struct mv_host_priv *hpriv = ap->host->private_data; 1837 unsigned int action = 0, err_mask = 0; 1838 struct ata_eh_info *ehi = &ap->link.eh_info; 1839 struct ata_queued_cmd *qc; 1840 int abort = 0; 1841 1842 /* 1843 * Read and clear the SError and err_cause bits. 1844 * For GenIIe, if EDMA_ERR_TRANS_IRQ_7 is set, we also must read/clear 1845 * the FIS_IRQ_CAUSE register before clearing edma_err_cause. 1846 */ 1847 sata_scr_read(&ap->link, SCR_ERROR, &serr); 1848 sata_scr_write_flush(&ap->link, SCR_ERROR, serr); 1849 1850 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); 1851 if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) { 1852 fis_cause = readl(port_mmio + SATA_FIS_IRQ_CAUSE_OFS); 1853 writelfl(~fis_cause, port_mmio + SATA_FIS_IRQ_CAUSE_OFS); 1854 } 1855 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); 1856 1857 if (edma_err_cause & EDMA_ERR_DEV) { 1858 /* 1859 * Device errors during FIS-based switching operation 1860 * require special handling. 1861 */ 1862 if (mv_handle_dev_err(ap, edma_err_cause)) 1863 return; 1864 } 1865 1866 qc = mv_get_active_qc(ap); 1867 ata_ehi_clear_desc(ehi); 1868 ata_ehi_push_desc(ehi, "edma_err_cause=%08x pp_flags=%08x", 1869 edma_err_cause, pp->pp_flags); 1870 1871 if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) { 1872 ata_ehi_push_desc(ehi, "fis_cause=%08x", fis_cause); 1873 if (fis_cause & SATA_FIS_IRQ_AN) { 1874 u32 ec = edma_err_cause & 1875 ~(EDMA_ERR_TRANS_IRQ_7 | EDMA_ERR_IRQ_TRANSIENT); 1876 sata_async_notification(ap); 1877 if (!ec) 1878 return; /* Just an AN; no need for the nukes */ 1879 ata_ehi_push_desc(ehi, "SDB notify"); 1880 } 1881 } 1882 /* 1883 * All generations share these EDMA error cause bits: 1884 */ 1885 if (edma_err_cause & EDMA_ERR_DEV) { 1886 err_mask |= AC_ERR_DEV; 1887 action |= ATA_EH_RESET; 1888 ata_ehi_push_desc(ehi, "dev error"); 1889 } 1890 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR | 1891 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR | 1892 EDMA_ERR_INTRL_PAR)) { 1893 err_mask |= AC_ERR_ATA_BUS; 1894 action |= ATA_EH_RESET; 1895 ata_ehi_push_desc(ehi, "parity error"); 1896 } 1897 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) { 1898 ata_ehi_hotplugged(ehi); 1899 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ? 1900 "dev disconnect" : "dev connect"); 1901 action |= ATA_EH_RESET; 1902 } 1903 1904 /* 1905 * Gen-I has a different SELF_DIS bit, 1906 * different FREEZE bits, and no SERR bit: 1907 */ 1908 if (IS_GEN_I(hpriv)) { 1909 eh_freeze_mask = EDMA_EH_FREEZE_5; 1910 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) { 1911 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; 1912 ata_ehi_push_desc(ehi, "EDMA self-disable"); 1913 } 1914 } else { 1915 eh_freeze_mask = EDMA_EH_FREEZE; 1916 if (edma_err_cause & EDMA_ERR_SELF_DIS) { 1917 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; 1918 ata_ehi_push_desc(ehi, "EDMA self-disable"); 1919 } 1920 if (edma_err_cause & EDMA_ERR_SERR) { 1921 ata_ehi_push_desc(ehi, "SError=%08x", serr); 1922 err_mask |= AC_ERR_ATA_BUS; 1923 action |= ATA_EH_RESET; 1924 } 1925 } 1926 1927 if (!err_mask) { 1928 err_mask = AC_ERR_OTHER; 1929 action |= ATA_EH_RESET; 1930 } 1931 1932 ehi->serror |= serr; 1933 ehi->action |= action; 1934 1935 if (qc) 1936 qc->err_mask |= err_mask; 1937 else 1938 ehi->err_mask |= err_mask; 1939 1940 if (err_mask == AC_ERR_DEV) { 1941 /* 1942 * Cannot do ata_port_freeze() here, 1943 * because it would kill PIO access, 1944 * which is needed for further diagnosis. 1945 */ 1946 mv_eh_freeze(ap); 1947 abort = 1; 1948 } else if (edma_err_cause & eh_freeze_mask) { 1949 /* 1950 * Note to self: ata_port_freeze() calls ata_port_abort() 1951 */ 1952 ata_port_freeze(ap); 1953 } else { 1954 abort = 1; 1955 } 1956 1957 if (abort) { 1958 if (qc) 1959 ata_link_abort(qc->dev->link); 1960 else 1961 ata_port_abort(ap); 1962 } 1963 } 1964 1965 static void mv_process_crpb_response(struct ata_port *ap, 1966 struct mv_crpb *response, unsigned int tag, int ncq_enabled) 1967 { 1968 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag); 1969 1970 if (qc) { 1971 u8 ata_status; 1972 u16 edma_status = le16_to_cpu(response->flags); 1973 /* 1974 * edma_status from a response queue entry: 1975 * LSB is from EDMA_ERR_IRQ_CAUSE_OFS (non-NCQ only). 1976 * MSB is saved ATA status from command completion. 1977 */ 1978 if (!ncq_enabled) { 1979 u8 err_cause = edma_status & 0xff & ~EDMA_ERR_DEV; 1980 if (err_cause) { 1981 /* 1982 * Error will be seen/handled by mv_err_intr(). 1983 * So do nothing at all here. 1984 */ 1985 return; 1986 } 1987 } 1988 ata_status = edma_status >> CRPB_FLAG_STATUS_SHIFT; 1989 if (!ac_err_mask(ata_status)) 1990 ata_qc_complete(qc); 1991 /* else: leave it for mv_err_intr() */ 1992 } else { 1993 ata_port_printk(ap, KERN_ERR, "%s: no qc for tag=%d\n", 1994 __func__, tag); 1995 } 1996 } 1997 1998 static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp) 1999 { 2000 void __iomem *port_mmio = mv_ap_base(ap); 2001 struct mv_host_priv *hpriv = ap->host->private_data; 2002 u32 in_index; 2003 bool work_done = false; 2004 int ncq_enabled = (pp->pp_flags & MV_PP_FLAG_NCQ_EN); 2005 2006 /* Get the hardware queue position index */ 2007 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS) 2008 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK; 2009 2010 /* Process new responses from since the last time we looked */ 2011 while (in_index != pp->resp_idx) { 2012 unsigned int tag; 2013 struct mv_crpb *response = &pp->crpb[pp->resp_idx]; 2014 2015 pp->resp_idx = (pp->resp_idx + 1) & MV_MAX_Q_DEPTH_MASK; 2016 2017 if (IS_GEN_I(hpriv)) { 2018 /* 50xx: no NCQ, only one command active at a time */ 2019 tag = ap->link.active_tag; 2020 } else { 2021 /* Gen II/IIE: get command tag from CRPB entry */ 2022 tag = le16_to_cpu(response->id) & 0x1f; 2023 } 2024 mv_process_crpb_response(ap, response, tag, ncq_enabled); 2025 work_done = true; 2026 } 2027 2028 /* Update the software queue position index in hardware */ 2029 if (work_done) 2030 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | 2031 (pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT), 2032 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); 2033 } 2034 2035 static void mv_port_intr(struct ata_port *ap, u32 port_cause) 2036 { 2037 struct mv_port_priv *pp; 2038 int edma_was_enabled; 2039 2040 if (!ap || (ap->flags & ATA_FLAG_DISABLED)) { 2041 mv_unexpected_intr(ap, 0); 2042 return; 2043 } 2044 /* 2045 * Grab a snapshot of the EDMA_EN flag setting, 2046 * so that we have a consistent view for this port, 2047 * even if something we call of our routines changes it. 2048 */ 2049 pp = ap->private_data; 2050 edma_was_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN); 2051 /* 2052 * Process completed CRPB response(s) before other events. 2053 */ 2054 if (edma_was_enabled && (port_cause & DONE_IRQ)) { 2055 mv_process_crpb_entries(ap, pp); 2056 if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) 2057 mv_handle_fbs_ncq_dev_err(ap); 2058 } 2059 /* 2060 * Handle chip-reported errors, or continue on to handle PIO. 2061 */ 2062 if (unlikely(port_cause & ERR_IRQ)) { 2063 mv_err_intr(ap); 2064 } else if (!edma_was_enabled) { 2065 struct ata_queued_cmd *qc = mv_get_active_qc(ap); 2066 if (qc) 2067 ata_sff_host_intr(ap, qc); 2068 else 2069 mv_unexpected_intr(ap, edma_was_enabled); 2070 } 2071 } 2072 2073 /** 2074 * mv_host_intr - Handle all interrupts on the given host controller 2075 * @host: host specific structure 2076 * @main_irq_cause: Main interrupt cause register for the chip. 2077 * 2078 * LOCKING: 2079 * Inherited from caller. 2080 */ 2081 static int mv_host_intr(struct ata_host *host, u32 main_irq_cause) 2082 { 2083 struct mv_host_priv *hpriv = host->private_data; 2084 void __iomem *mmio = hpriv->base, *hc_mmio; 2085 unsigned int handled = 0, port; 2086 2087 for (port = 0; port < hpriv->n_ports; port++) { 2088 struct ata_port *ap = host->ports[port]; 2089 unsigned int p, shift, hardport, port_cause; 2090 2091 MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport); 2092 /* 2093 * Each hc within the host has its own hc_irq_cause register, 2094 * where the interrupting ports bits get ack'd. 2095 */ 2096 if (hardport == 0) { /* first port on this hc ? */ 2097 u32 hc_cause = (main_irq_cause >> shift) & HC0_IRQ_PEND; 2098 u32 port_mask, ack_irqs; 2099 /* 2100 * Skip this entire hc if nothing pending for any ports 2101 */ 2102 if (!hc_cause) { 2103 port += MV_PORTS_PER_HC - 1; 2104 continue; 2105 } 2106 /* 2107 * We don't need/want to read the hc_irq_cause register, 2108 * because doing so hurts performance, and 2109 * main_irq_cause already gives us everything we need. 2110 * 2111 * But we do have to *write* to the hc_irq_cause to ack 2112 * the ports that we are handling this time through. 2113 * 2114 * This requires that we create a bitmap for those 2115 * ports which interrupted us, and use that bitmap 2116 * to ack (only) those ports via hc_irq_cause. 2117 */ 2118 ack_irqs = 0; 2119 for (p = 0; p < MV_PORTS_PER_HC; ++p) { 2120 if ((port + p) >= hpriv->n_ports) 2121 break; 2122 port_mask = (DONE_IRQ | ERR_IRQ) << (p * 2); 2123 if (hc_cause & port_mask) 2124 ack_irqs |= (DMA_IRQ | DEV_IRQ) << p; 2125 } 2126 hc_mmio = mv_hc_base_from_port(mmio, port); 2127 writelfl(~ack_irqs, hc_mmio + HC_IRQ_CAUSE_OFS); 2128 handled = 1; 2129 } 2130 /* 2131 * Handle interrupts signalled for this port: 2132 */ 2133 port_cause = (main_irq_cause >> shift) & (DONE_IRQ | ERR_IRQ); 2134 if (port_cause) 2135 mv_port_intr(ap, port_cause); 2136 } 2137 return handled; 2138 } 2139 2140 static int mv_pci_error(struct ata_host *host, void __iomem *mmio) 2141 { 2142 struct mv_host_priv *hpriv = host->private_data; 2143 struct ata_port *ap; 2144 struct ata_queued_cmd *qc; 2145 struct ata_eh_info *ehi; 2146 unsigned int i, err_mask, printed = 0; 2147 u32 err_cause; 2148 2149 err_cause = readl(mmio + hpriv->irq_cause_ofs); 2150 2151 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n", 2152 err_cause); 2153 2154 DPRINTK("All regs @ PCI error\n"); 2155 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev)); 2156 2157 writelfl(0, mmio + hpriv->irq_cause_ofs); 2158 2159 for (i = 0; i < host->n_ports; i++) { 2160 ap = host->ports[i]; 2161 if (!ata_link_offline(&ap->link)) { 2162 ehi = &ap->link.eh_info; 2163 ata_ehi_clear_desc(ehi); 2164 if (!printed++) 2165 ata_ehi_push_desc(ehi, 2166 "PCI err cause 0x%08x", err_cause); 2167 err_mask = AC_ERR_HOST_BUS; 2168 ehi->action = ATA_EH_RESET; 2169 qc = ata_qc_from_tag(ap, ap->link.active_tag); 2170 if (qc) 2171 qc->err_mask |= err_mask; 2172 else 2173 ehi->err_mask |= err_mask; 2174 2175 ata_port_freeze(ap); 2176 } 2177 } 2178 return 1; /* handled */ 2179 } 2180 2181 /** 2182 * mv_interrupt - Main interrupt event handler 2183 * @irq: unused 2184 * @dev_instance: private data; in this case the host structure 2185 * 2186 * Read the read only register to determine if any host 2187 * controllers have pending interrupts. If so, call lower level 2188 * routine to handle. Also check for PCI errors which are only 2189 * reported here. 2190 * 2191 * LOCKING: 2192 * This routine holds the host lock while processing pending 2193 * interrupts. 2194 */ 2195 static irqreturn_t mv_interrupt(int irq, void *dev_instance) 2196 { 2197 struct ata_host *host = dev_instance; 2198 struct mv_host_priv *hpriv = host->private_data; 2199 unsigned int handled = 0; 2200 int using_msi = hpriv->hp_flags & MV_HP_FLAG_MSI; 2201 u32 main_irq_cause, pending_irqs; 2202 2203 spin_lock(&host->lock); 2204 2205 /* for MSI: block new interrupts while in here */ 2206 if (using_msi) 2207 writel(0, hpriv->main_irq_mask_addr); 2208 2209 main_irq_cause = readl(hpriv->main_irq_cause_addr); 2210 pending_irqs = main_irq_cause & hpriv->main_irq_mask; 2211 /* 2212 * Deal with cases where we either have nothing pending, or have read 2213 * a bogus register value which can indicate HW removal or PCI fault. 2214 */ 2215 if (pending_irqs && main_irq_cause != 0xffffffffU) { 2216 if (unlikely((pending_irqs & PCI_ERR) && !IS_SOC(hpriv))) 2217 handled = mv_pci_error(host, hpriv->base); 2218 else 2219 handled = mv_host_intr(host, pending_irqs); 2220 } 2221 spin_unlock(&host->lock); 2222 2223 /* for MSI: unmask; interrupt cause bits will retrigger now */ 2224 if (using_msi) 2225 writel(hpriv->main_irq_mask, hpriv->main_irq_mask_addr); 2226 2227 return IRQ_RETVAL(handled); 2228 } 2229 2230 static unsigned int mv5_scr_offset(unsigned int sc_reg_in) 2231 { 2232 unsigned int ofs; 2233 2234 switch (sc_reg_in) { 2235 case SCR_STATUS: 2236 case SCR_ERROR: 2237 case SCR_CONTROL: 2238 ofs = sc_reg_in * sizeof(u32); 2239 break; 2240 default: 2241 ofs = 0xffffffffU; 2242 break; 2243 } 2244 return ofs; 2245 } 2246 2247 static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val) 2248 { 2249 struct mv_host_priv *hpriv = link->ap->host->private_data; 2250 void __iomem *mmio = hpriv->base; 2251 void __iomem *addr = mv5_phy_base(mmio, link->ap->port_no); 2252 unsigned int ofs = mv5_scr_offset(sc_reg_in); 2253 2254 if (ofs != 0xffffffffU) { 2255 *val = readl(addr + ofs); 2256 return 0; 2257 } else 2258 return -EINVAL; 2259 } 2260 2261 static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val) 2262 { 2263 struct mv_host_priv *hpriv = link->ap->host->private_data; 2264 void __iomem *mmio = hpriv->base; 2265 void __iomem *addr = mv5_phy_base(mmio, link->ap->port_no); 2266 unsigned int ofs = mv5_scr_offset(sc_reg_in); 2267 2268 if (ofs != 0xffffffffU) { 2269 writelfl(val, addr + ofs); 2270 return 0; 2271 } else 2272 return -EINVAL; 2273 } 2274 2275 static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio) 2276 { 2277 struct pci_dev *pdev = to_pci_dev(host->dev); 2278 int early_5080; 2279 2280 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0); 2281 2282 if (!early_5080) { 2283 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL); 2284 tmp |= (1 << 0); 2285 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL); 2286 } 2287 2288 mv_reset_pci_bus(host, mmio); 2289 } 2290 2291 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio) 2292 { 2293 writel(0x0fcfffff, mmio + MV_FLASH_CTL_OFS); 2294 } 2295 2296 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx, 2297 void __iomem *mmio) 2298 { 2299 void __iomem *phy_mmio = mv5_phy_base(mmio, idx); 2300 u32 tmp; 2301 2302 tmp = readl(phy_mmio + MV5_PHY_MODE); 2303 2304 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */ 2305 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */ 2306 } 2307 2308 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio) 2309 { 2310 u32 tmp; 2311 2312 writel(0, mmio + MV_GPIO_PORT_CTL_OFS); 2313 2314 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */ 2315 2316 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL); 2317 tmp |= ~(1 << 0); 2318 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL); 2319 } 2320 2321 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, 2322 unsigned int port) 2323 { 2324 void __iomem *phy_mmio = mv5_phy_base(mmio, port); 2325 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5); 2326 u32 tmp; 2327 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0); 2328 2329 if (fix_apm_sq) { 2330 tmp = readl(phy_mmio + MV5_LTMODE_OFS); 2331 tmp |= (1 << 19); 2332 writel(tmp, phy_mmio + MV5_LTMODE_OFS); 2333 2334 tmp = readl(phy_mmio + MV5_PHY_CTL_OFS); 2335 tmp &= ~0x3; 2336 tmp |= 0x1; 2337 writel(tmp, phy_mmio + MV5_PHY_CTL_OFS); 2338 } 2339 2340 tmp = readl(phy_mmio + MV5_PHY_MODE); 2341 tmp &= ~mask; 2342 tmp |= hpriv->signal[port].pre; 2343 tmp |= hpriv->signal[port].amps; 2344 writel(tmp, phy_mmio + MV5_PHY_MODE); 2345 } 2346 2347 2348 #undef ZERO 2349 #define ZERO(reg) writel(0, port_mmio + (reg)) 2350 static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio, 2351 unsigned int port) 2352 { 2353 void __iomem *port_mmio = mv_port_base(mmio, port); 2354 2355 mv_reset_channel(hpriv, mmio, port); 2356 2357 ZERO(0x028); /* command */ 2358 writel(0x11f, port_mmio + EDMA_CFG_OFS); 2359 ZERO(0x004); /* timer */ 2360 ZERO(0x008); /* irq err cause */ 2361 ZERO(0x00c); /* irq err mask */ 2362 ZERO(0x010); /* rq bah */ 2363 ZERO(0x014); /* rq inp */ 2364 ZERO(0x018); /* rq outp */ 2365 ZERO(0x01c); /* respq bah */ 2366 ZERO(0x024); /* respq outp */ 2367 ZERO(0x020); /* respq inp */ 2368 ZERO(0x02c); /* test control */ 2369 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT_OFS); 2370 } 2371 #undef ZERO 2372 2373 #define ZERO(reg) writel(0, hc_mmio + (reg)) 2374 static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio, 2375 unsigned int hc) 2376 { 2377 void __iomem *hc_mmio = mv_hc_base(mmio, hc); 2378 u32 tmp; 2379 2380 ZERO(0x00c); 2381 ZERO(0x010); 2382 ZERO(0x014); 2383 ZERO(0x018); 2384 2385 tmp = readl(hc_mmio + 0x20); 2386 tmp &= 0x1c1c1c1c; 2387 tmp |= 0x03030303; 2388 writel(tmp, hc_mmio + 0x20); 2389 } 2390 #undef ZERO 2391 2392 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio, 2393 unsigned int n_hc) 2394 { 2395 unsigned int hc, port; 2396 2397 for (hc = 0; hc < n_hc; hc++) { 2398 for (port = 0; port < MV_PORTS_PER_HC; port++) 2399 mv5_reset_hc_port(hpriv, mmio, 2400 (hc * MV_PORTS_PER_HC) + port); 2401 2402 mv5_reset_one_hc(hpriv, mmio, hc); 2403 } 2404 2405 return 0; 2406 } 2407 2408 #undef ZERO 2409 #define ZERO(reg) writel(0, mmio + (reg)) 2410 static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio) 2411 { 2412 struct mv_host_priv *hpriv = host->private_data; 2413 u32 tmp; 2414 2415 tmp = readl(mmio + MV_PCI_MODE_OFS); 2416 tmp &= 0xff00ffff; 2417 writel(tmp, mmio + MV_PCI_MODE_OFS); 2418 2419 ZERO(MV_PCI_DISC_TIMER); 2420 ZERO(MV_PCI_MSI_TRIGGER); 2421 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT_OFS); 2422 ZERO(MV_PCI_SERR_MASK); 2423 ZERO(hpriv->irq_cause_ofs); 2424 ZERO(hpriv->irq_mask_ofs); 2425 ZERO(MV_PCI_ERR_LOW_ADDRESS); 2426 ZERO(MV_PCI_ERR_HIGH_ADDRESS); 2427 ZERO(MV_PCI_ERR_ATTRIBUTE); 2428 ZERO(MV_PCI_ERR_COMMAND); 2429 } 2430 #undef ZERO 2431 2432 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio) 2433 { 2434 u32 tmp; 2435 2436 mv5_reset_flash(hpriv, mmio); 2437 2438 tmp = readl(mmio + MV_GPIO_PORT_CTL_OFS); 2439 tmp &= 0x3; 2440 tmp |= (1 << 5) | (1 << 6); 2441 writel(tmp, mmio + MV_GPIO_PORT_CTL_OFS); 2442 } 2443 2444 /** 2445 * mv6_reset_hc - Perform the 6xxx global soft reset 2446 * @mmio: base address of the HBA 2447 * 2448 * This routine only applies to 6xxx parts. 2449 * 2450 * LOCKING: 2451 * Inherited from caller. 2452 */ 2453 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio, 2454 unsigned int n_hc) 2455 { 2456 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS; 2457 int i, rc = 0; 2458 u32 t; 2459 2460 /* Following procedure defined in PCI "main command and status 2461 * register" table. 2462 */ 2463 t = readl(reg); 2464 writel(t | STOP_PCI_MASTER, reg); 2465 2466 for (i = 0; i < 1000; i++) { 2467 udelay(1); 2468 t = readl(reg); 2469 if (PCI_MASTER_EMPTY & t) 2470 break; 2471 } 2472 if (!(PCI_MASTER_EMPTY & t)) { 2473 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n"); 2474 rc = 1; 2475 goto done; 2476 } 2477 2478 /* set reset */ 2479 i = 5; 2480 do { 2481 writel(t | GLOB_SFT_RST, reg); 2482 t = readl(reg); 2483 udelay(1); 2484 } while (!(GLOB_SFT_RST & t) && (i-- > 0)); 2485 2486 if (!(GLOB_SFT_RST & t)) { 2487 printk(KERN_ERR DRV_NAME ": can't set global reset\n"); 2488 rc = 1; 2489 goto done; 2490 } 2491 2492 /* clear reset and *reenable the PCI master* (not mentioned in spec) */ 2493 i = 5; 2494 do { 2495 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg); 2496 t = readl(reg); 2497 udelay(1); 2498 } while ((GLOB_SFT_RST & t) && (i-- > 0)); 2499 2500 if (GLOB_SFT_RST & t) { 2501 printk(KERN_ERR DRV_NAME ": can't clear global reset\n"); 2502 rc = 1; 2503 } 2504 done: 2505 return rc; 2506 } 2507 2508 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx, 2509 void __iomem *mmio) 2510 { 2511 void __iomem *port_mmio; 2512 u32 tmp; 2513 2514 tmp = readl(mmio + MV_RESET_CFG_OFS); 2515 if ((tmp & (1 << 0)) == 0) { 2516 hpriv->signal[idx].amps = 0x7 << 8; 2517 hpriv->signal[idx].pre = 0x1 << 5; 2518 return; 2519 } 2520 2521 port_mmio = mv_port_base(mmio, idx); 2522 tmp = readl(port_mmio + PHY_MODE2); 2523 2524 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */ 2525 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */ 2526 } 2527 2528 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio) 2529 { 2530 writel(0x00000060, mmio + MV_GPIO_PORT_CTL_OFS); 2531 } 2532 2533 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, 2534 unsigned int port) 2535 { 2536 void __iomem *port_mmio = mv_port_base(mmio, port); 2537 2538 u32 hp_flags = hpriv->hp_flags; 2539 int fix_phy_mode2 = 2540 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0); 2541 int fix_phy_mode4 = 2542 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0); 2543 u32 m2, m3; 2544 2545 if (fix_phy_mode2) { 2546 m2 = readl(port_mmio + PHY_MODE2); 2547 m2 &= ~(1 << 16); 2548 m2 |= (1 << 31); 2549 writel(m2, port_mmio + PHY_MODE2); 2550 2551 udelay(200); 2552 2553 m2 = readl(port_mmio + PHY_MODE2); 2554 m2 &= ~((1 << 16) | (1 << 31)); 2555 writel(m2, port_mmio + PHY_MODE2); 2556 2557 udelay(200); 2558 } 2559 2560 /* 2561 * Gen-II/IIe PHY_MODE3 errata RM#2: 2562 * Achieves better receiver noise performance than the h/w default: 2563 */ 2564 m3 = readl(port_mmio + PHY_MODE3); 2565 m3 = (m3 & 0x1f) | (0x5555601 << 5); 2566 2567 /* Guideline 88F5182 (GL# SATA-S11) */ 2568 if (IS_SOC(hpriv)) 2569 m3 &= ~0x1c; 2570 2571 if (fix_phy_mode4) { 2572 u32 m4 = readl(port_mmio + PHY_MODE4); 2573 /* 2574 * Enforce reserved-bit restrictions on GenIIe devices only. 2575 * For earlier chipsets, force only the internal config field 2576 * (workaround for errata FEr SATA#10 part 1). 2577 */ 2578 if (IS_GEN_IIE(hpriv)) 2579 m4 = (m4 & ~PHY_MODE4_RSVD_ZEROS) | PHY_MODE4_RSVD_ONES; 2580 else 2581 m4 = (m4 & ~PHY_MODE4_CFG_MASK) | PHY_MODE4_CFG_VALUE; 2582 writel(m4, port_mmio + PHY_MODE4); 2583 } 2584 /* 2585 * Workaround for 60x1-B2 errata SATA#13: 2586 * Any write to PHY_MODE4 (above) may corrupt PHY_MODE3, 2587 * so we must always rewrite PHY_MODE3 after PHY_MODE4. 2588 */ 2589 writel(m3, port_mmio + PHY_MODE3); 2590 2591 /* Revert values of pre-emphasis and signal amps to the saved ones */ 2592 m2 = readl(port_mmio + PHY_MODE2); 2593 2594 m2 &= ~MV_M2_PREAMP_MASK; 2595 m2 |= hpriv->signal[port].amps; 2596 m2 |= hpriv->signal[port].pre; 2597 m2 &= ~(1 << 16); 2598 2599 /* according to mvSata 3.6.1, some IIE values are fixed */ 2600 if (IS_GEN_IIE(hpriv)) { 2601 m2 &= ~0xC30FF01F; 2602 m2 |= 0x0000900F; 2603 } 2604 2605 writel(m2, port_mmio + PHY_MODE2); 2606 } 2607 2608 /* TODO: use the generic LED interface to configure the SATA Presence */ 2609 /* & Acitivy LEDs on the board */ 2610 static void mv_soc_enable_leds(struct mv_host_priv *hpriv, 2611 void __iomem *mmio) 2612 { 2613 return; 2614 } 2615 2616 static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx, 2617 void __iomem *mmio) 2618 { 2619 void __iomem *port_mmio; 2620 u32 tmp; 2621 2622 port_mmio = mv_port_base(mmio, idx); 2623 tmp = readl(port_mmio + PHY_MODE2); 2624 2625 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */ 2626 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */ 2627 } 2628 2629 #undef ZERO 2630 #define ZERO(reg) writel(0, port_mmio + (reg)) 2631 static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv, 2632 void __iomem *mmio, unsigned int port) 2633 { 2634 void __iomem *port_mmio = mv_port_base(mmio, port); 2635 2636 mv_reset_channel(hpriv, mmio, port); 2637 2638 ZERO(0x028); /* command */ 2639 writel(0x101f, port_mmio + EDMA_CFG_OFS); 2640 ZERO(0x004); /* timer */ 2641 ZERO(0x008); /* irq err cause */ 2642 ZERO(0x00c); /* irq err mask */ 2643 ZERO(0x010); /* rq bah */ 2644 ZERO(0x014); /* rq inp */ 2645 ZERO(0x018); /* rq outp */ 2646 ZERO(0x01c); /* respq bah */ 2647 ZERO(0x024); /* respq outp */ 2648 ZERO(0x020); /* respq inp */ 2649 ZERO(0x02c); /* test control */ 2650 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT_OFS); 2651 } 2652 2653 #undef ZERO 2654 2655 #define ZERO(reg) writel(0, hc_mmio + (reg)) 2656 static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv, 2657 void __iomem *mmio) 2658 { 2659 void __iomem *hc_mmio = mv_hc_base(mmio, 0); 2660 2661 ZERO(0x00c); 2662 ZERO(0x010); 2663 ZERO(0x014); 2664 2665 } 2666 2667 #undef ZERO 2668 2669 static int mv_soc_reset_hc(struct mv_host_priv *hpriv, 2670 void __iomem *mmio, unsigned int n_hc) 2671 { 2672 unsigned int port; 2673 2674 for (port = 0; port < hpriv->n_ports; port++) 2675 mv_soc_reset_hc_port(hpriv, mmio, port); 2676 2677 mv_soc_reset_one_hc(hpriv, mmio); 2678 2679 return 0; 2680 } 2681 2682 static void mv_soc_reset_flash(struct mv_host_priv *hpriv, 2683 void __iomem *mmio) 2684 { 2685 return; 2686 } 2687 2688 static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio) 2689 { 2690 return; 2691 } 2692 2693 static void mv_setup_ifcfg(void __iomem *port_mmio, int want_gen2i) 2694 { 2695 u32 ifcfg = readl(port_mmio + SATA_INTERFACE_CFG_OFS); 2696 2697 ifcfg = (ifcfg & 0xf7f) | 0x9b1000; /* from chip spec */ 2698 if (want_gen2i) 2699 ifcfg |= (1 << 7); /* enable gen2i speed */ 2700 writelfl(ifcfg, port_mmio + SATA_INTERFACE_CFG_OFS); 2701 } 2702 2703 static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio, 2704 unsigned int port_no) 2705 { 2706 void __iomem *port_mmio = mv_port_base(mmio, port_no); 2707 2708 /* 2709 * The datasheet warns against setting EDMA_RESET when EDMA is active 2710 * (but doesn't say what the problem might be). So we first try 2711 * to disable the EDMA engine before doing the EDMA_RESET operation. 2712 */ 2713 mv_stop_edma_engine(port_mmio); 2714 writelfl(EDMA_RESET, port_mmio + EDMA_CMD_OFS); 2715 2716 if (!IS_GEN_I(hpriv)) { 2717 /* Enable 3.0gb/s link speed: this survives EDMA_RESET */ 2718 mv_setup_ifcfg(port_mmio, 1); 2719 } 2720 /* 2721 * Strobing EDMA_RESET here causes a hard reset of the SATA transport, 2722 * link, and physical layers. It resets all SATA interface registers 2723 * (except for SATA_INTERFACE_CFG), and issues a COMRESET to the dev. 2724 */ 2725 writelfl(EDMA_RESET, port_mmio + EDMA_CMD_OFS); 2726 udelay(25); /* allow reset propagation */ 2727 writelfl(0, port_mmio + EDMA_CMD_OFS); 2728 2729 hpriv->ops->phy_errata(hpriv, mmio, port_no); 2730 2731 if (IS_GEN_I(hpriv)) 2732 mdelay(1); 2733 } 2734 2735 static void mv_pmp_select(struct ata_port *ap, int pmp) 2736 { 2737 if (sata_pmp_supported(ap)) { 2738 void __iomem *port_mmio = mv_ap_base(ap); 2739 u32 reg = readl(port_mmio + SATA_IFCTL_OFS); 2740 int old = reg & 0xf; 2741 2742 if (old != pmp) { 2743 reg = (reg & ~0xf) | pmp; 2744 writelfl(reg, port_mmio + SATA_IFCTL_OFS); 2745 } 2746 } 2747 } 2748 2749 static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class, 2750 unsigned long deadline) 2751 { 2752 mv_pmp_select(link->ap, sata_srst_pmp(link)); 2753 return sata_std_hardreset(link, class, deadline); 2754 } 2755 2756 static int mv_softreset(struct ata_link *link, unsigned int *class, 2757 unsigned long deadline) 2758 { 2759 mv_pmp_select(link->ap, sata_srst_pmp(link)); 2760 return ata_sff_softreset(link, class, deadline); 2761 } 2762 2763 static int mv_hardreset(struct ata_link *link, unsigned int *class, 2764 unsigned long deadline) 2765 { 2766 struct ata_port *ap = link->ap; 2767 struct mv_host_priv *hpriv = ap->host->private_data; 2768 struct mv_port_priv *pp = ap->private_data; 2769 void __iomem *mmio = hpriv->base; 2770 int rc, attempts = 0, extra = 0; 2771 u32 sstatus; 2772 bool online; 2773 2774 mv_reset_channel(hpriv, mmio, ap->port_no); 2775 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; 2776 2777 /* Workaround for errata FEr SATA#10 (part 2) */ 2778 do { 2779 const unsigned long *timing = 2780 sata_ehc_deb_timing(&link->eh_context); 2781 2782 rc = sata_link_hardreset(link, timing, deadline + extra, 2783 &online, NULL); 2784 rc = online ? -EAGAIN : rc; 2785 if (rc) 2786 return rc; 2787 sata_scr_read(link, SCR_STATUS, &sstatus); 2788 if (!IS_GEN_I(hpriv) && ++attempts >= 5 && sstatus == 0x121) { 2789 /* Force 1.5gb/s link speed and try again */ 2790 mv_setup_ifcfg(mv_ap_base(ap), 0); 2791 if (time_after(jiffies + HZ, deadline)) 2792 extra = HZ; /* only extend it once, max */ 2793 } 2794 } while (sstatus != 0x0 && sstatus != 0x113 && sstatus != 0x123); 2795 2796 return rc; 2797 } 2798 2799 static void mv_eh_freeze(struct ata_port *ap) 2800 { 2801 mv_stop_edma(ap); 2802 mv_enable_port_irqs(ap, 0); 2803 } 2804 2805 static void mv_eh_thaw(struct ata_port *ap) 2806 { 2807 struct mv_host_priv *hpriv = ap->host->private_data; 2808 unsigned int port = ap->port_no; 2809 unsigned int hardport = mv_hardport_from_port(port); 2810 void __iomem *hc_mmio = mv_hc_base_from_port(hpriv->base, port); 2811 void __iomem *port_mmio = mv_ap_base(ap); 2812 u32 hc_irq_cause; 2813 2814 /* clear EDMA errors on this port */ 2815 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); 2816 2817 /* clear pending irq events */ 2818 hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport); 2819 writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS); 2820 2821 mv_enable_port_irqs(ap, ERR_IRQ); 2822 } 2823 2824 /** 2825 * mv_port_init - Perform some early initialization on a single port. 2826 * @port: libata data structure storing shadow register addresses 2827 * @port_mmio: base address of the port 2828 * 2829 * Initialize shadow register mmio addresses, clear outstanding 2830 * interrupts on the port, and unmask interrupts for the future 2831 * start of the port. 2832 * 2833 * LOCKING: 2834 * Inherited from caller. 2835 */ 2836 static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio) 2837 { 2838 void __iomem *shd_base = port_mmio + SHD_BLK_OFS; 2839 unsigned serr_ofs; 2840 2841 /* PIO related setup 2842 */ 2843 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA); 2844 port->error_addr = 2845 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR); 2846 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT); 2847 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL); 2848 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM); 2849 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH); 2850 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE); 2851 port->status_addr = 2852 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS); 2853 /* special case: control/altstatus doesn't have ATA_REG_ address */ 2854 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS; 2855 2856 /* unused: */ 2857 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL; 2858 2859 /* Clear any currently outstanding port interrupt conditions */ 2860 serr_ofs = mv_scr_offset(SCR_ERROR); 2861 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs); 2862 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); 2863 2864 /* unmask all non-transient EDMA error interrupts */ 2865 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS); 2866 2867 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n", 2868 readl(port_mmio + EDMA_CFG_OFS), 2869 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS), 2870 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS)); 2871 } 2872 2873 static unsigned int mv_in_pcix_mode(struct ata_host *host) 2874 { 2875 struct mv_host_priv *hpriv = host->private_data; 2876 void __iomem *mmio = hpriv->base; 2877 u32 reg; 2878 2879 if (IS_SOC(hpriv) || !IS_PCIE(hpriv)) 2880 return 0; /* not PCI-X capable */ 2881 reg = readl(mmio + MV_PCI_MODE_OFS); 2882 if ((reg & MV_PCI_MODE_MASK) == 0) 2883 return 0; /* conventional PCI mode */ 2884 return 1; /* chip is in PCI-X mode */ 2885 } 2886 2887 static int mv_pci_cut_through_okay(struct ata_host *host) 2888 { 2889 struct mv_host_priv *hpriv = host->private_data; 2890 void __iomem *mmio = hpriv->base; 2891 u32 reg; 2892 2893 if (!mv_in_pcix_mode(host)) { 2894 reg = readl(mmio + PCI_COMMAND_OFS); 2895 if (reg & PCI_COMMAND_MRDTRIG) 2896 return 0; /* not okay */ 2897 } 2898 return 1; /* okay */ 2899 } 2900 2901 static int mv_chip_id(struct ata_host *host, unsigned int board_idx) 2902 { 2903 struct pci_dev *pdev = to_pci_dev(host->dev); 2904 struct mv_host_priv *hpriv = host->private_data; 2905 u32 hp_flags = hpriv->hp_flags; 2906 2907 switch (board_idx) { 2908 case chip_5080: 2909 hpriv->ops = &mv5xxx_ops; 2910 hp_flags |= MV_HP_GEN_I; 2911 2912 switch (pdev->revision) { 2913 case 0x1: 2914 hp_flags |= MV_HP_ERRATA_50XXB0; 2915 break; 2916 case 0x3: 2917 hp_flags |= MV_HP_ERRATA_50XXB2; 2918 break; 2919 default: 2920 dev_printk(KERN_WARNING, &pdev->dev, 2921 "Applying 50XXB2 workarounds to unknown rev\n"); 2922 hp_flags |= MV_HP_ERRATA_50XXB2; 2923 break; 2924 } 2925 break; 2926 2927 case chip_504x: 2928 case chip_508x: 2929 hpriv->ops = &mv5xxx_ops; 2930 hp_flags |= MV_HP_GEN_I; 2931 2932 switch (pdev->revision) { 2933 case 0x0: 2934 hp_flags |= MV_HP_ERRATA_50XXB0; 2935 break; 2936 case 0x3: 2937 hp_flags |= MV_HP_ERRATA_50XXB2; 2938 break; 2939 default: 2940 dev_printk(KERN_WARNING, &pdev->dev, 2941 "Applying B2 workarounds to unknown rev\n"); 2942 hp_flags |= MV_HP_ERRATA_50XXB2; 2943 break; 2944 } 2945 break; 2946 2947 case chip_604x: 2948 case chip_608x: 2949 hpriv->ops = &mv6xxx_ops; 2950 hp_flags |= MV_HP_GEN_II; 2951 2952 switch (pdev->revision) { 2953 case 0x7: 2954 hp_flags |= MV_HP_ERRATA_60X1B2; 2955 break; 2956 case 0x9: 2957 hp_flags |= MV_HP_ERRATA_60X1C0; 2958 break; 2959 default: 2960 dev_printk(KERN_WARNING, &pdev->dev, 2961 "Applying B2 workarounds to unknown rev\n"); 2962 hp_flags |= MV_HP_ERRATA_60X1B2; 2963 break; 2964 } 2965 break; 2966 2967 case chip_7042: 2968 hp_flags |= MV_HP_PCIE | MV_HP_CUT_THROUGH; 2969 if (pdev->vendor == PCI_VENDOR_ID_TTI && 2970 (pdev->device == 0x2300 || pdev->device == 0x2310)) 2971 { 2972 /* 2973 * Highpoint RocketRAID PCIe 23xx series cards: 2974 * 2975 * Unconfigured drives are treated as "Legacy" 2976 * by the BIOS, and it overwrites sector 8 with 2977 * a "Lgcy" metadata block prior to Linux boot. 2978 * 2979 * Configured drives (RAID or JBOD) leave sector 8 2980 * alone, but instead overwrite a high numbered 2981 * sector for the RAID metadata. This sector can 2982 * be determined exactly, by truncating the physical 2983 * drive capacity to a nice even GB value. 2984 * 2985 * RAID metadata is at: (dev->n_sectors & ~0xfffff) 2986 * 2987 * Warn the user, lest they think we're just buggy. 2988 */ 2989 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID" 2990 " BIOS CORRUPTS DATA on all attached drives," 2991 " regardless of if/how they are configured." 2992 " BEWARE!\n"); 2993 printk(KERN_WARNING DRV_NAME ": For data safety, do not" 2994 " use sectors 8-9 on \"Legacy\" drives," 2995 " and avoid the final two gigabytes on" 2996 " all RocketRAID BIOS initialized drives.\n"); 2997 } 2998 /* drop through */ 2999 case chip_6042: 3000 hpriv->ops = &mv6xxx_ops; 3001 hp_flags |= MV_HP_GEN_IIE; 3002 if (board_idx == chip_6042 && mv_pci_cut_through_okay(host)) 3003 hp_flags |= MV_HP_CUT_THROUGH; 3004 3005 switch (pdev->revision) { 3006 case 0x2: /* Rev.B0: the first/only public release */ 3007 hp_flags |= MV_HP_ERRATA_60X1C0; 3008 break; 3009 default: 3010 dev_printk(KERN_WARNING, &pdev->dev, 3011 "Applying 60X1C0 workarounds to unknown rev\n"); 3012 hp_flags |= MV_HP_ERRATA_60X1C0; 3013 break; 3014 } 3015 break; 3016 case chip_soc: 3017 hpriv->ops = &mv_soc_ops; 3018 hp_flags |= MV_HP_FLAG_SOC | MV_HP_GEN_IIE | 3019 MV_HP_ERRATA_60X1C0; 3020 break; 3021 3022 default: 3023 dev_printk(KERN_ERR, host->dev, 3024 "BUG: invalid board index %u\n", board_idx); 3025 return 1; 3026 } 3027 3028 hpriv->hp_flags = hp_flags; 3029 if (hp_flags & MV_HP_PCIE) { 3030 hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS; 3031 hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS; 3032 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS; 3033 } else { 3034 hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS; 3035 hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS; 3036 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS; 3037 } 3038 3039 return 0; 3040 } 3041 3042 /** 3043 * mv_init_host - Perform some early initialization of the host. 3044 * @host: ATA host to initialize 3045 * @board_idx: controller index 3046 * 3047 * If possible, do an early global reset of the host. Then do 3048 * our port init and clear/unmask all/relevant host interrupts. 3049 * 3050 * LOCKING: 3051 * Inherited from caller. 3052 */ 3053 static int mv_init_host(struct ata_host *host, unsigned int board_idx) 3054 { 3055 int rc = 0, n_hc, port, hc; 3056 struct mv_host_priv *hpriv = host->private_data; 3057 void __iomem *mmio = hpriv->base; 3058 3059 rc = mv_chip_id(host, board_idx); 3060 if (rc) 3061 goto done; 3062 3063 if (IS_SOC(hpriv)) { 3064 hpriv->main_irq_cause_addr = mmio + SOC_HC_MAIN_IRQ_CAUSE_OFS; 3065 hpriv->main_irq_mask_addr = mmio + SOC_HC_MAIN_IRQ_MASK_OFS; 3066 } else { 3067 hpriv->main_irq_cause_addr = mmio + PCI_HC_MAIN_IRQ_CAUSE_OFS; 3068 hpriv->main_irq_mask_addr = mmio + PCI_HC_MAIN_IRQ_MASK_OFS; 3069 } 3070 3071 /* initialize shadow irq mask with register's value */ 3072 hpriv->main_irq_mask = readl(hpriv->main_irq_mask_addr); 3073 3074 /* global interrupt mask: 0 == mask everything */ 3075 mv_set_main_irq_mask(host, ~0, 0); 3076 3077 n_hc = mv_get_hc_count(host->ports[0]->flags); 3078 3079 for (port = 0; port < host->n_ports; port++) 3080 hpriv->ops->read_preamp(hpriv, port, mmio); 3081 3082 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc); 3083 if (rc) 3084 goto done; 3085 3086 hpriv->ops->reset_flash(hpriv, mmio); 3087 hpriv->ops->reset_bus(host, mmio); 3088 hpriv->ops->enable_leds(hpriv, mmio); 3089 3090 for (port = 0; port < host->n_ports; port++) { 3091 struct ata_port *ap = host->ports[port]; 3092 void __iomem *port_mmio = mv_port_base(mmio, port); 3093 3094 mv_port_init(&ap->ioaddr, port_mmio); 3095 3096 #ifdef CONFIG_PCI 3097 if (!IS_SOC(hpriv)) { 3098 unsigned int offset = port_mmio - mmio; 3099 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio"); 3100 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port"); 3101 } 3102 #endif 3103 } 3104 3105 for (hc = 0; hc < n_hc; hc++) { 3106 void __iomem *hc_mmio = mv_hc_base(mmio, hc); 3107 3108 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause " 3109 "(before clear)=0x%08x\n", hc, 3110 readl(hc_mmio + HC_CFG_OFS), 3111 readl(hc_mmio + HC_IRQ_CAUSE_OFS)); 3112 3113 /* Clear any currently outstanding hc interrupt conditions */ 3114 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS); 3115 } 3116 3117 if (!IS_SOC(hpriv)) { 3118 /* Clear any currently outstanding host interrupt conditions */ 3119 writelfl(0, mmio + hpriv->irq_cause_ofs); 3120 3121 /* and unmask interrupt generation for host regs */ 3122 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs); 3123 3124 /* 3125 * enable only global host interrupts for now. 3126 * The per-port interrupts get done later as ports are set up. 3127 */ 3128 mv_set_main_irq_mask(host, 0, PCI_ERR); 3129 } 3130 done: 3131 return rc; 3132 } 3133 3134 static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev) 3135 { 3136 hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ, 3137 MV_CRQB_Q_SZ, 0); 3138 if (!hpriv->crqb_pool) 3139 return -ENOMEM; 3140 3141 hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ, 3142 MV_CRPB_Q_SZ, 0); 3143 if (!hpriv->crpb_pool) 3144 return -ENOMEM; 3145 3146 hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ, 3147 MV_SG_TBL_SZ, 0); 3148 if (!hpriv->sg_tbl_pool) 3149 return -ENOMEM; 3150 3151 return 0; 3152 } 3153 3154 static void mv_conf_mbus_windows(struct mv_host_priv *hpriv, 3155 struct mbus_dram_target_info *dram) 3156 { 3157 int i; 3158 3159 for (i = 0; i < 4; i++) { 3160 writel(0, hpriv->base + WINDOW_CTRL(i)); 3161 writel(0, hpriv->base + WINDOW_BASE(i)); 3162 } 3163 3164 for (i = 0; i < dram->num_cs; i++) { 3165 struct mbus_dram_window *cs = dram->cs + i; 3166 3167 writel(((cs->size - 1) & 0xffff0000) | 3168 (cs->mbus_attr << 8) | 3169 (dram->mbus_dram_target_id << 4) | 1, 3170 hpriv->base + WINDOW_CTRL(i)); 3171 writel(cs->base, hpriv->base + WINDOW_BASE(i)); 3172 } 3173 } 3174 3175 /** 3176 * mv_platform_probe - handle a positive probe of an soc Marvell 3177 * host 3178 * @pdev: platform device found 3179 * 3180 * LOCKING: 3181 * Inherited from caller. 3182 */ 3183 static int mv_platform_probe(struct platform_device *pdev) 3184 { 3185 static int printed_version; 3186 const struct mv_sata_platform_data *mv_platform_data; 3187 const struct ata_port_info *ppi[] = 3188 { &mv_port_info[chip_soc], NULL }; 3189 struct ata_host *host; 3190 struct mv_host_priv *hpriv; 3191 struct resource *res; 3192 int n_ports, rc; 3193 3194 if (!printed_version++) 3195 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n"); 3196 3197 /* 3198 * Simple resource validation .. 3199 */ 3200 if (unlikely(pdev->num_resources != 2)) { 3201 dev_err(&pdev->dev, "invalid number of resources\n"); 3202 return -EINVAL; 3203 } 3204 3205 /* 3206 * Get the register base first 3207 */ 3208 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 3209 if (res == NULL) 3210 return -EINVAL; 3211 3212 /* allocate host */ 3213 mv_platform_data = pdev->dev.platform_data; 3214 n_ports = mv_platform_data->n_ports; 3215 3216 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports); 3217 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL); 3218 3219 if (!host || !hpriv) 3220 return -ENOMEM; 3221 host->private_data = hpriv; 3222 hpriv->n_ports = n_ports; 3223 3224 host->iomap = NULL; 3225 hpriv->base = devm_ioremap(&pdev->dev, res->start, 3226 res->end - res->start + 1); 3227 hpriv->base -= MV_SATAHC0_REG_BASE; 3228 3229 /* 3230 * (Re-)program MBUS remapping windows if we are asked to. 3231 */ 3232 if (mv_platform_data->dram != NULL) 3233 mv_conf_mbus_windows(hpriv, mv_platform_data->dram); 3234 3235 rc = mv_create_dma_pools(hpriv, &pdev->dev); 3236 if (rc) 3237 return rc; 3238 3239 /* initialize adapter */ 3240 rc = mv_init_host(host, chip_soc); 3241 if (rc) 3242 return rc; 3243 3244 dev_printk(KERN_INFO, &pdev->dev, 3245 "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH, 3246 host->n_ports); 3247 3248 return ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt, 3249 IRQF_SHARED, &mv6_sht); 3250 } 3251 3252 /* 3253 * 3254 * mv_platform_remove - unplug a platform interface 3255 * @pdev: platform device 3256 * 3257 * A platform bus SATA device has been unplugged. Perform the needed 3258 * cleanup. Also called on module unload for any active devices. 3259 */ 3260 static int __devexit mv_platform_remove(struct platform_device *pdev) 3261 { 3262 struct device *dev = &pdev->dev; 3263 struct ata_host *host = dev_get_drvdata(dev); 3264 3265 ata_host_detach(host); 3266 return 0; 3267 } 3268 3269 static struct platform_driver mv_platform_driver = { 3270 .probe = mv_platform_probe, 3271 .remove = __devexit_p(mv_platform_remove), 3272 .driver = { 3273 .name = DRV_NAME, 3274 .owner = THIS_MODULE, 3275 }, 3276 }; 3277 3278 3279 #ifdef CONFIG_PCI 3280 static int mv_pci_init_one(struct pci_dev *pdev, 3281 const struct pci_device_id *ent); 3282 3283 3284 static struct pci_driver mv_pci_driver = { 3285 .name = DRV_NAME, 3286 .id_table = mv_pci_tbl, 3287 .probe = mv_pci_init_one, 3288 .remove = ata_pci_remove_one, 3289 }; 3290 3291 /* 3292 * module options 3293 */ 3294 static int msi; /* Use PCI msi; either zero (off, default) or non-zero */ 3295 3296 3297 /* move to PCI layer or libata core? */ 3298 static int pci_go_64(struct pci_dev *pdev) 3299 { 3300 int rc; 3301 3302 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) { 3303 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); 3304 if (rc) { 3305 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); 3306 if (rc) { 3307 dev_printk(KERN_ERR, &pdev->dev, 3308 "64-bit DMA enable failed\n"); 3309 return rc; 3310 } 3311 } 3312 } else { 3313 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK); 3314 if (rc) { 3315 dev_printk(KERN_ERR, &pdev->dev, 3316 "32-bit DMA enable failed\n"); 3317 return rc; 3318 } 3319 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); 3320 if (rc) { 3321 dev_printk(KERN_ERR, &pdev->dev, 3322 "32-bit consistent DMA enable failed\n"); 3323 return rc; 3324 } 3325 } 3326 3327 return rc; 3328 } 3329 3330 /** 3331 * mv_print_info - Dump key info to kernel log for perusal. 3332 * @host: ATA host to print info about 3333 * 3334 * FIXME: complete this. 3335 * 3336 * LOCKING: 3337 * Inherited from caller. 3338 */ 3339 static void mv_print_info(struct ata_host *host) 3340 { 3341 struct pci_dev *pdev = to_pci_dev(host->dev); 3342 struct mv_host_priv *hpriv = host->private_data; 3343 u8 scc; 3344 const char *scc_s, *gen; 3345 3346 /* Use this to determine the HW stepping of the chip so we know 3347 * what errata to workaround 3348 */ 3349 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc); 3350 if (scc == 0) 3351 scc_s = "SCSI"; 3352 else if (scc == 0x01) 3353 scc_s = "RAID"; 3354 else 3355 scc_s = "?"; 3356 3357 if (IS_GEN_I(hpriv)) 3358 gen = "I"; 3359 else if (IS_GEN_II(hpriv)) 3360 gen = "II"; 3361 else if (IS_GEN_IIE(hpriv)) 3362 gen = "IIE"; 3363 else 3364 gen = "?"; 3365 3366 dev_printk(KERN_INFO, &pdev->dev, 3367 "Gen-%s %u slots %u ports %s mode IRQ via %s\n", 3368 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports, 3369 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx"); 3370 } 3371 3372 /** 3373 * mv_pci_init_one - handle a positive probe of a PCI Marvell host 3374 * @pdev: PCI device found 3375 * @ent: PCI device ID entry for the matched host 3376 * 3377 * LOCKING: 3378 * Inherited from caller. 3379 */ 3380 static int mv_pci_init_one(struct pci_dev *pdev, 3381 const struct pci_device_id *ent) 3382 { 3383 static int printed_version; 3384 unsigned int board_idx = (unsigned int)ent->driver_data; 3385 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL }; 3386 struct ata_host *host; 3387 struct mv_host_priv *hpriv; 3388 int n_ports, rc; 3389 3390 if (!printed_version++) 3391 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n"); 3392 3393 /* allocate host */ 3394 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC; 3395 3396 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports); 3397 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL); 3398 if (!host || !hpriv) 3399 return -ENOMEM; 3400 host->private_data = hpriv; 3401 hpriv->n_ports = n_ports; 3402 3403 /* acquire resources */ 3404 rc = pcim_enable_device(pdev); 3405 if (rc) 3406 return rc; 3407 3408 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME); 3409 if (rc == -EBUSY) 3410 pcim_pin_device(pdev); 3411 if (rc) 3412 return rc; 3413 host->iomap = pcim_iomap_table(pdev); 3414 hpriv->base = host->iomap[MV_PRIMARY_BAR]; 3415 3416 rc = pci_go_64(pdev); 3417 if (rc) 3418 return rc; 3419 3420 rc = mv_create_dma_pools(hpriv, &pdev->dev); 3421 if (rc) 3422 return rc; 3423 3424 /* initialize adapter */ 3425 rc = mv_init_host(host, board_idx); 3426 if (rc) 3427 return rc; 3428 3429 /* Enable message-switched interrupts, if requested */ 3430 if (msi && pci_enable_msi(pdev) == 0) 3431 hpriv->hp_flags |= MV_HP_FLAG_MSI; 3432 3433 mv_dump_pci_cfg(pdev, 0x68); 3434 mv_print_info(host); 3435 3436 pci_set_master(pdev); 3437 pci_try_set_mwi(pdev); 3438 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED, 3439 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht); 3440 } 3441 #endif 3442 3443 static int mv_platform_probe(struct platform_device *pdev); 3444 static int __devexit mv_platform_remove(struct platform_device *pdev); 3445 3446 static int __init mv_init(void) 3447 { 3448 int rc = -ENODEV; 3449 #ifdef CONFIG_PCI 3450 rc = pci_register_driver(&mv_pci_driver); 3451 if (rc < 0) 3452 return rc; 3453 #endif 3454 rc = platform_driver_register(&mv_platform_driver); 3455 3456 #ifdef CONFIG_PCI 3457 if (rc < 0) 3458 pci_unregister_driver(&mv_pci_driver); 3459 #endif 3460 return rc; 3461 } 3462 3463 static void __exit mv_exit(void) 3464 { 3465 #ifdef CONFIG_PCI 3466 pci_unregister_driver(&mv_pci_driver); 3467 #endif 3468 platform_driver_unregister(&mv_platform_driver); 3469 } 3470 3471 MODULE_AUTHOR("Brett Russ"); 3472 MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers"); 3473 MODULE_LICENSE("GPL"); 3474 MODULE_DEVICE_TABLE(pci, mv_pci_tbl); 3475 MODULE_VERSION(DRV_VERSION); 3476 MODULE_ALIAS("platform:" DRV_NAME); 3477 3478 #ifdef CONFIG_PCI 3479 module_param(msi, int, 0444); 3480 MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)"); 3481 #endif 3482 3483 module_init(mv_init); 3484 module_exit(mv_exit); 3485