1 /* 2 * sata_mv.c - Marvell SATA support 3 * 4 * Copyright 2005: EMC Corporation, all rights reserved. 5 * Copyright 2005 Red Hat, Inc. All rights reserved. 6 * 7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails. 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License as published by 11 * the Free Software Foundation; version 2 of the License. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 21 * 22 */ 23 24 #include <linux/kernel.h> 25 #include <linux/module.h> 26 #include <linux/pci.h> 27 #include <linux/init.h> 28 #include <linux/blkdev.h> 29 #include <linux/delay.h> 30 #include <linux/interrupt.h> 31 #include <linux/sched.h> 32 #include <linux/dma-mapping.h> 33 #include <linux/device.h> 34 #include <scsi/scsi_host.h> 35 #include <scsi/scsi_cmnd.h> 36 #include <linux/libata.h> 37 #include <asm/io.h> 38 39 #define DRV_NAME "sata_mv" 40 #define DRV_VERSION "0.7" 41 42 enum { 43 /* BAR's are enumerated in terms of pci_resource_start() terms */ 44 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */ 45 MV_IO_BAR = 2, /* offset 0x18: IO space */ 46 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */ 47 48 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */ 49 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */ 50 51 MV_PCI_REG_BASE = 0, 52 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */ 53 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08), 54 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88), 55 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c), 56 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc), 57 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0), 58 59 MV_SATAHC0_REG_BASE = 0x20000, 60 MV_FLASH_CTL = 0x1046c, 61 MV_GPIO_PORT_CTL = 0x104f0, 62 MV_RESET_CFG = 0x180d8, 63 64 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ, 65 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ, 66 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */ 67 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ, 68 69 MV_USE_Q_DEPTH = ATA_DEF_QUEUE, 70 71 MV_MAX_Q_DEPTH = 32, 72 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1, 73 74 /* CRQB needs alignment on a 1KB boundary. Size == 1KB 75 * CRPB needs alignment on a 256B boundary. Size == 256B 76 * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB 77 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B 78 */ 79 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH), 80 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH), 81 MV_MAX_SG_CT = 176, 82 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT), 83 MV_PORT_PRIV_DMA_SZ = (MV_CRQB_Q_SZ + MV_CRPB_Q_SZ + MV_SG_TBL_SZ), 84 85 MV_PORTS_PER_HC = 4, 86 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */ 87 MV_PORT_HC_SHIFT = 2, 88 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */ 89 MV_PORT_MASK = 3, 90 91 /* Host Flags */ 92 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */ 93 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */ 94 MV_COMMON_FLAGS = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 95 ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO | 96 ATA_FLAG_NO_ATAPI | ATA_FLAG_PIO_POLLING), 97 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE, 98 99 CRQB_FLAG_READ = (1 << 0), 100 CRQB_TAG_SHIFT = 1, 101 CRQB_CMD_ADDR_SHIFT = 8, 102 CRQB_CMD_CS = (0x2 << 11), 103 CRQB_CMD_LAST = (1 << 15), 104 105 CRPB_FLAG_STATUS_SHIFT = 8, 106 107 EPRD_FLAG_END_OF_TBL = (1 << 31), 108 109 /* PCI interface registers */ 110 111 PCI_COMMAND_OFS = 0xc00, 112 113 PCI_MAIN_CMD_STS_OFS = 0xd30, 114 STOP_PCI_MASTER = (1 << 2), 115 PCI_MASTER_EMPTY = (1 << 3), 116 GLOB_SFT_RST = (1 << 4), 117 118 MV_PCI_MODE = 0xd00, 119 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c, 120 MV_PCI_DISC_TIMER = 0xd04, 121 MV_PCI_MSI_TRIGGER = 0xc38, 122 MV_PCI_SERR_MASK = 0xc28, 123 MV_PCI_XBAR_TMOUT = 0x1d04, 124 MV_PCI_ERR_LOW_ADDRESS = 0x1d40, 125 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44, 126 MV_PCI_ERR_ATTRIBUTE = 0x1d48, 127 MV_PCI_ERR_COMMAND = 0x1d50, 128 129 PCI_IRQ_CAUSE_OFS = 0x1d58, 130 PCI_IRQ_MASK_OFS = 0x1d5c, 131 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */ 132 133 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60, 134 HC_MAIN_IRQ_MASK_OFS = 0x1d64, 135 PORT0_ERR = (1 << 0), /* shift by port # */ 136 PORT0_DONE = (1 << 1), /* shift by port # */ 137 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */ 138 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */ 139 PCI_ERR = (1 << 18), 140 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */ 141 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */ 142 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */ 143 GPIO_INT = (1 << 22), 144 SELF_INT = (1 << 23), 145 TWSI_INT = (1 << 24), 146 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */ 147 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE | 148 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT | 149 HC_MAIN_RSVD), 150 151 /* SATAHC registers */ 152 HC_CFG_OFS = 0, 153 154 HC_IRQ_CAUSE_OFS = 0x14, 155 CRPB_DMA_DONE = (1 << 0), /* shift by port # */ 156 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */ 157 DEV_IRQ = (1 << 8), /* shift by port # */ 158 159 /* Shadow block registers */ 160 SHD_BLK_OFS = 0x100, 161 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */ 162 163 /* SATA registers */ 164 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */ 165 SATA_ACTIVE_OFS = 0x350, 166 PHY_MODE3 = 0x310, 167 PHY_MODE4 = 0x314, 168 PHY_MODE2 = 0x330, 169 MV5_PHY_MODE = 0x74, 170 MV5_LT_MODE = 0x30, 171 MV5_PHY_CTL = 0x0C, 172 SATA_INTERFACE_CTL = 0x050, 173 174 MV_M2_PREAMP_MASK = 0x7e0, 175 176 /* Port registers */ 177 EDMA_CFG_OFS = 0, 178 EDMA_CFG_Q_DEPTH = 0, /* queueing disabled */ 179 EDMA_CFG_NCQ = (1 << 5), 180 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */ 181 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */ 182 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */ 183 184 EDMA_ERR_IRQ_CAUSE_OFS = 0x8, 185 EDMA_ERR_IRQ_MASK_OFS = 0xc, 186 EDMA_ERR_D_PAR = (1 << 0), 187 EDMA_ERR_PRD_PAR = (1 << 1), 188 EDMA_ERR_DEV = (1 << 2), 189 EDMA_ERR_DEV_DCON = (1 << 3), 190 EDMA_ERR_DEV_CON = (1 << 4), 191 EDMA_ERR_SERR = (1 << 5), 192 EDMA_ERR_SELF_DIS = (1 << 7), 193 EDMA_ERR_BIST_ASYNC = (1 << 8), 194 EDMA_ERR_CRBQ_PAR = (1 << 9), 195 EDMA_ERR_CRPB_PAR = (1 << 10), 196 EDMA_ERR_INTRL_PAR = (1 << 11), 197 EDMA_ERR_IORDY = (1 << 12), 198 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), 199 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), 200 EDMA_ERR_LNK_DATA_RX = (0xf << 17), 201 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), 202 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), 203 EDMA_ERR_TRANS_PROTO = (1 << 31), 204 EDMA_ERR_FATAL = (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR | 205 EDMA_ERR_DEV_DCON | EDMA_ERR_CRBQ_PAR | 206 EDMA_ERR_CRPB_PAR | EDMA_ERR_INTRL_PAR | 207 EDMA_ERR_IORDY | EDMA_ERR_LNK_CTRL_RX_2 | 208 EDMA_ERR_LNK_DATA_RX | 209 EDMA_ERR_LNK_DATA_TX | 210 EDMA_ERR_TRANS_PROTO), 211 212 EDMA_REQ_Q_BASE_HI_OFS = 0x10, 213 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */ 214 215 EDMA_REQ_Q_OUT_PTR_OFS = 0x18, 216 EDMA_REQ_Q_PTR_SHIFT = 5, 217 218 EDMA_RSP_Q_BASE_HI_OFS = 0x1c, 219 EDMA_RSP_Q_IN_PTR_OFS = 0x20, 220 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */ 221 EDMA_RSP_Q_PTR_SHIFT = 3, 222 223 EDMA_CMD_OFS = 0x28, 224 EDMA_EN = (1 << 0), 225 EDMA_DS = (1 << 1), 226 ATA_RST = (1 << 2), 227 228 EDMA_IORDY_TMOUT = 0x34, 229 EDMA_ARB_CFG = 0x38, 230 231 /* Host private flags (hp_flags) */ 232 MV_HP_FLAG_MSI = (1 << 0), 233 MV_HP_ERRATA_50XXB0 = (1 << 1), 234 MV_HP_ERRATA_50XXB2 = (1 << 2), 235 MV_HP_ERRATA_60X1B2 = (1 << 3), 236 MV_HP_ERRATA_60X1C0 = (1 << 4), 237 MV_HP_ERRATA_XX42A0 = (1 << 5), 238 MV_HP_50XX = (1 << 6), 239 MV_HP_GEN_IIE = (1 << 7), 240 241 /* Port private flags (pp_flags) */ 242 MV_PP_FLAG_EDMA_EN = (1 << 0), 243 MV_PP_FLAG_EDMA_DS_ACT = (1 << 1), 244 }; 245 246 #define IS_50XX(hpriv) ((hpriv)->hp_flags & MV_HP_50XX) 247 #define IS_60XX(hpriv) (((hpriv)->hp_flags & MV_HP_50XX) == 0) 248 #define IS_GEN_I(hpriv) IS_50XX(hpriv) 249 #define IS_GEN_II(hpriv) IS_60XX(hpriv) 250 #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE) 251 252 enum { 253 /* Our DMA boundary is determined by an ePRD being unable to handle 254 * anything larger than 64KB 255 */ 256 MV_DMA_BOUNDARY = 0xffffU, 257 258 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U, 259 260 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U, 261 }; 262 263 enum chip_type { 264 chip_504x, 265 chip_508x, 266 chip_5080, 267 chip_604x, 268 chip_608x, 269 chip_6042, 270 chip_7042, 271 }; 272 273 /* Command ReQuest Block: 32B */ 274 struct mv_crqb { 275 __le32 sg_addr; 276 __le32 sg_addr_hi; 277 __le16 ctrl_flags; 278 __le16 ata_cmd[11]; 279 }; 280 281 struct mv_crqb_iie { 282 __le32 addr; 283 __le32 addr_hi; 284 __le32 flags; 285 __le32 len; 286 __le32 ata_cmd[4]; 287 }; 288 289 /* Command ResPonse Block: 8B */ 290 struct mv_crpb { 291 __le16 id; 292 __le16 flags; 293 __le32 tmstmp; 294 }; 295 296 /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */ 297 struct mv_sg { 298 __le32 addr; 299 __le32 flags_size; 300 __le32 addr_hi; 301 __le32 reserved; 302 }; 303 304 struct mv_port_priv { 305 struct mv_crqb *crqb; 306 dma_addr_t crqb_dma; 307 struct mv_crpb *crpb; 308 dma_addr_t crpb_dma; 309 struct mv_sg *sg_tbl; 310 dma_addr_t sg_tbl_dma; 311 u32 pp_flags; 312 }; 313 314 struct mv_port_signal { 315 u32 amps; 316 u32 pre; 317 }; 318 319 struct mv_host_priv; 320 struct mv_hw_ops { 321 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio, 322 unsigned int port); 323 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio); 324 void (*read_preamp)(struct mv_host_priv *hpriv, int idx, 325 void __iomem *mmio); 326 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio, 327 unsigned int n_hc); 328 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio); 329 void (*reset_bus)(struct pci_dev *pdev, void __iomem *mmio); 330 }; 331 332 struct mv_host_priv { 333 u32 hp_flags; 334 struct mv_port_signal signal[8]; 335 const struct mv_hw_ops *ops; 336 }; 337 338 static void mv_irq_clear(struct ata_port *ap); 339 static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in); 340 static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val); 341 static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in); 342 static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val); 343 static void mv_phy_reset(struct ata_port *ap); 344 static void __mv_phy_reset(struct ata_port *ap, int can_sleep); 345 static void mv_host_stop(struct ata_host *host); 346 static int mv_port_start(struct ata_port *ap); 347 static void mv_port_stop(struct ata_port *ap); 348 static void mv_qc_prep(struct ata_queued_cmd *qc); 349 static void mv_qc_prep_iie(struct ata_queued_cmd *qc); 350 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc); 351 static irqreturn_t mv_interrupt(int irq, void *dev_instance); 352 static void mv_eng_timeout(struct ata_port *ap); 353 static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); 354 355 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, 356 unsigned int port); 357 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio); 358 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx, 359 void __iomem *mmio); 360 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio, 361 unsigned int n_hc); 362 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio); 363 static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio); 364 365 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, 366 unsigned int port); 367 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio); 368 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx, 369 void __iomem *mmio); 370 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio, 371 unsigned int n_hc); 372 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio); 373 static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio); 374 static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio, 375 unsigned int port_no); 376 static void mv_stop_and_reset(struct ata_port *ap); 377 378 static struct scsi_host_template mv_sht = { 379 .module = THIS_MODULE, 380 .name = DRV_NAME, 381 .ioctl = ata_scsi_ioctl, 382 .queuecommand = ata_scsi_queuecmd, 383 .can_queue = MV_USE_Q_DEPTH, 384 .this_id = ATA_SHT_THIS_ID, 385 .sg_tablesize = MV_MAX_SG_CT / 2, 386 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 387 .emulated = ATA_SHT_EMULATED, 388 .use_clustering = ATA_SHT_USE_CLUSTERING, 389 .proc_name = DRV_NAME, 390 .dma_boundary = MV_DMA_BOUNDARY, 391 .slave_configure = ata_scsi_slave_config, 392 .slave_destroy = ata_scsi_slave_destroy, 393 .bios_param = ata_std_bios_param, 394 }; 395 396 static const struct ata_port_operations mv5_ops = { 397 .port_disable = ata_port_disable, 398 399 .tf_load = ata_tf_load, 400 .tf_read = ata_tf_read, 401 .check_status = ata_check_status, 402 .exec_command = ata_exec_command, 403 .dev_select = ata_std_dev_select, 404 405 .phy_reset = mv_phy_reset, 406 407 .qc_prep = mv_qc_prep, 408 .qc_issue = mv_qc_issue, 409 .data_xfer = ata_mmio_data_xfer, 410 411 .eng_timeout = mv_eng_timeout, 412 413 .irq_handler = mv_interrupt, 414 .irq_clear = mv_irq_clear, 415 416 .scr_read = mv5_scr_read, 417 .scr_write = mv5_scr_write, 418 419 .port_start = mv_port_start, 420 .port_stop = mv_port_stop, 421 .host_stop = mv_host_stop, 422 }; 423 424 static const struct ata_port_operations mv6_ops = { 425 .port_disable = ata_port_disable, 426 427 .tf_load = ata_tf_load, 428 .tf_read = ata_tf_read, 429 .check_status = ata_check_status, 430 .exec_command = ata_exec_command, 431 .dev_select = ata_std_dev_select, 432 433 .phy_reset = mv_phy_reset, 434 435 .qc_prep = mv_qc_prep, 436 .qc_issue = mv_qc_issue, 437 .data_xfer = ata_mmio_data_xfer, 438 439 .eng_timeout = mv_eng_timeout, 440 441 .irq_handler = mv_interrupt, 442 .irq_clear = mv_irq_clear, 443 444 .scr_read = mv_scr_read, 445 .scr_write = mv_scr_write, 446 447 .port_start = mv_port_start, 448 .port_stop = mv_port_stop, 449 .host_stop = mv_host_stop, 450 }; 451 452 static const struct ata_port_operations mv_iie_ops = { 453 .port_disable = ata_port_disable, 454 455 .tf_load = ata_tf_load, 456 .tf_read = ata_tf_read, 457 .check_status = ata_check_status, 458 .exec_command = ata_exec_command, 459 .dev_select = ata_std_dev_select, 460 461 .phy_reset = mv_phy_reset, 462 463 .qc_prep = mv_qc_prep_iie, 464 .qc_issue = mv_qc_issue, 465 .data_xfer = ata_mmio_data_xfer, 466 467 .eng_timeout = mv_eng_timeout, 468 469 .irq_handler = mv_interrupt, 470 .irq_clear = mv_irq_clear, 471 472 .scr_read = mv_scr_read, 473 .scr_write = mv_scr_write, 474 475 .port_start = mv_port_start, 476 .port_stop = mv_port_stop, 477 .host_stop = mv_host_stop, 478 }; 479 480 static const struct ata_port_info mv_port_info[] = { 481 { /* chip_504x */ 482 .sht = &mv_sht, 483 .flags = MV_COMMON_FLAGS, 484 .pio_mask = 0x1f, /* pio0-4 */ 485 .udma_mask = 0x7f, /* udma0-6 */ 486 .port_ops = &mv5_ops, 487 }, 488 { /* chip_508x */ 489 .sht = &mv_sht, 490 .flags = (MV_COMMON_FLAGS | MV_FLAG_DUAL_HC), 491 .pio_mask = 0x1f, /* pio0-4 */ 492 .udma_mask = 0x7f, /* udma0-6 */ 493 .port_ops = &mv5_ops, 494 }, 495 { /* chip_5080 */ 496 .sht = &mv_sht, 497 .flags = (MV_COMMON_FLAGS | MV_FLAG_DUAL_HC), 498 .pio_mask = 0x1f, /* pio0-4 */ 499 .udma_mask = 0x7f, /* udma0-6 */ 500 .port_ops = &mv5_ops, 501 }, 502 { /* chip_604x */ 503 .sht = &mv_sht, 504 .flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS), 505 .pio_mask = 0x1f, /* pio0-4 */ 506 .udma_mask = 0x7f, /* udma0-6 */ 507 .port_ops = &mv6_ops, 508 }, 509 { /* chip_608x */ 510 .sht = &mv_sht, 511 .flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS | 512 MV_FLAG_DUAL_HC), 513 .pio_mask = 0x1f, /* pio0-4 */ 514 .udma_mask = 0x7f, /* udma0-6 */ 515 .port_ops = &mv6_ops, 516 }, 517 { /* chip_6042 */ 518 .sht = &mv_sht, 519 .flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS), 520 .pio_mask = 0x1f, /* pio0-4 */ 521 .udma_mask = 0x7f, /* udma0-6 */ 522 .port_ops = &mv_iie_ops, 523 }, 524 { /* chip_7042 */ 525 .sht = &mv_sht, 526 .flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS), 527 .pio_mask = 0x1f, /* pio0-4 */ 528 .udma_mask = 0x7f, /* udma0-6 */ 529 .port_ops = &mv_iie_ops, 530 }, 531 }; 532 533 static const struct pci_device_id mv_pci_tbl[] = { 534 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x }, 535 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x }, 536 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 }, 537 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x }, 538 539 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x }, 540 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x }, 541 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 }, 542 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x }, 543 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x }, 544 545 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x }, 546 547 { PCI_VDEVICE(TTI, 0x2310), chip_7042 }, 548 549 { } /* terminate list */ 550 }; 551 552 static struct pci_driver mv_pci_driver = { 553 .name = DRV_NAME, 554 .id_table = mv_pci_tbl, 555 .probe = mv_init_one, 556 .remove = ata_pci_remove_one, 557 }; 558 559 static const struct mv_hw_ops mv5xxx_ops = { 560 .phy_errata = mv5_phy_errata, 561 .enable_leds = mv5_enable_leds, 562 .read_preamp = mv5_read_preamp, 563 .reset_hc = mv5_reset_hc, 564 .reset_flash = mv5_reset_flash, 565 .reset_bus = mv5_reset_bus, 566 }; 567 568 static const struct mv_hw_ops mv6xxx_ops = { 569 .phy_errata = mv6_phy_errata, 570 .enable_leds = mv6_enable_leds, 571 .read_preamp = mv6_read_preamp, 572 .reset_hc = mv6_reset_hc, 573 .reset_flash = mv6_reset_flash, 574 .reset_bus = mv_reset_pci_bus, 575 }; 576 577 /* 578 * module options 579 */ 580 static int msi; /* Use PCI msi; either zero (off, default) or non-zero */ 581 582 583 /* 584 * Functions 585 */ 586 587 static inline void writelfl(unsigned long data, void __iomem *addr) 588 { 589 writel(data, addr); 590 (void) readl(addr); /* flush to avoid PCI posted write */ 591 } 592 593 static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc) 594 { 595 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ)); 596 } 597 598 static inline unsigned int mv_hc_from_port(unsigned int port) 599 { 600 return port >> MV_PORT_HC_SHIFT; 601 } 602 603 static inline unsigned int mv_hardport_from_port(unsigned int port) 604 { 605 return port & MV_PORT_MASK; 606 } 607 608 static inline void __iomem *mv_hc_base_from_port(void __iomem *base, 609 unsigned int port) 610 { 611 return mv_hc_base(base, mv_hc_from_port(port)); 612 } 613 614 static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port) 615 { 616 return mv_hc_base_from_port(base, port) + 617 MV_SATAHC_ARBTR_REG_SZ + 618 (mv_hardport_from_port(port) * MV_PORT_REG_SZ); 619 } 620 621 static inline void __iomem *mv_ap_base(struct ata_port *ap) 622 { 623 return mv_port_base(ap->host->mmio_base, ap->port_no); 624 } 625 626 static inline int mv_get_hc_count(unsigned long port_flags) 627 { 628 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1); 629 } 630 631 static void mv_irq_clear(struct ata_port *ap) 632 { 633 } 634 635 /** 636 * mv_start_dma - Enable eDMA engine 637 * @base: port base address 638 * @pp: port private data 639 * 640 * Verify the local cache of the eDMA state is accurate with a 641 * WARN_ON. 642 * 643 * LOCKING: 644 * Inherited from caller. 645 */ 646 static void mv_start_dma(void __iomem *base, struct mv_port_priv *pp) 647 { 648 if (!(MV_PP_FLAG_EDMA_EN & pp->pp_flags)) { 649 writelfl(EDMA_EN, base + EDMA_CMD_OFS); 650 pp->pp_flags |= MV_PP_FLAG_EDMA_EN; 651 } 652 WARN_ON(!(EDMA_EN & readl(base + EDMA_CMD_OFS))); 653 } 654 655 /** 656 * mv_stop_dma - Disable eDMA engine 657 * @ap: ATA channel to manipulate 658 * 659 * Verify the local cache of the eDMA state is accurate with a 660 * WARN_ON. 661 * 662 * LOCKING: 663 * Inherited from caller. 664 */ 665 static void mv_stop_dma(struct ata_port *ap) 666 { 667 void __iomem *port_mmio = mv_ap_base(ap); 668 struct mv_port_priv *pp = ap->private_data; 669 u32 reg; 670 int i; 671 672 if (MV_PP_FLAG_EDMA_EN & pp->pp_flags) { 673 /* Disable EDMA if active. The disable bit auto clears. 674 */ 675 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS); 676 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; 677 } else { 678 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)); 679 } 680 681 /* now properly wait for the eDMA to stop */ 682 for (i = 1000; i > 0; i--) { 683 reg = readl(port_mmio + EDMA_CMD_OFS); 684 if (!(EDMA_EN & reg)) { 685 break; 686 } 687 udelay(100); 688 } 689 690 if (EDMA_EN & reg) { 691 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n"); 692 /* FIXME: Consider doing a reset here to recover */ 693 } 694 } 695 696 #ifdef ATA_DEBUG 697 static void mv_dump_mem(void __iomem *start, unsigned bytes) 698 { 699 int b, w; 700 for (b = 0; b < bytes; ) { 701 DPRINTK("%p: ", start + b); 702 for (w = 0; b < bytes && w < 4; w++) { 703 printk("%08x ",readl(start + b)); 704 b += sizeof(u32); 705 } 706 printk("\n"); 707 } 708 } 709 #endif 710 711 static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes) 712 { 713 #ifdef ATA_DEBUG 714 int b, w; 715 u32 dw; 716 for (b = 0; b < bytes; ) { 717 DPRINTK("%02x: ", b); 718 for (w = 0; b < bytes && w < 4; w++) { 719 (void) pci_read_config_dword(pdev,b,&dw); 720 printk("%08x ",dw); 721 b += sizeof(u32); 722 } 723 printk("\n"); 724 } 725 #endif 726 } 727 static void mv_dump_all_regs(void __iomem *mmio_base, int port, 728 struct pci_dev *pdev) 729 { 730 #ifdef ATA_DEBUG 731 void __iomem *hc_base = mv_hc_base(mmio_base, 732 port >> MV_PORT_HC_SHIFT); 733 void __iomem *port_base; 734 int start_port, num_ports, p, start_hc, num_hcs, hc; 735 736 if (0 > port) { 737 start_hc = start_port = 0; 738 num_ports = 8; /* shld be benign for 4 port devs */ 739 num_hcs = 2; 740 } else { 741 start_hc = port >> MV_PORT_HC_SHIFT; 742 start_port = port; 743 num_ports = num_hcs = 1; 744 } 745 DPRINTK("All registers for port(s) %u-%u:\n", start_port, 746 num_ports > 1 ? num_ports - 1 : start_port); 747 748 if (NULL != pdev) { 749 DPRINTK("PCI config space regs:\n"); 750 mv_dump_pci_cfg(pdev, 0x68); 751 } 752 DPRINTK("PCI regs:\n"); 753 mv_dump_mem(mmio_base+0xc00, 0x3c); 754 mv_dump_mem(mmio_base+0xd00, 0x34); 755 mv_dump_mem(mmio_base+0xf00, 0x4); 756 mv_dump_mem(mmio_base+0x1d00, 0x6c); 757 for (hc = start_hc; hc < start_hc + num_hcs; hc++) { 758 hc_base = mv_hc_base(mmio_base, hc); 759 DPRINTK("HC regs (HC %i):\n", hc); 760 mv_dump_mem(hc_base, 0x1c); 761 } 762 for (p = start_port; p < start_port + num_ports; p++) { 763 port_base = mv_port_base(mmio_base, p); 764 DPRINTK("EDMA regs (port %i):\n",p); 765 mv_dump_mem(port_base, 0x54); 766 DPRINTK("SATA regs (port %i):\n",p); 767 mv_dump_mem(port_base+0x300, 0x60); 768 } 769 #endif 770 } 771 772 static unsigned int mv_scr_offset(unsigned int sc_reg_in) 773 { 774 unsigned int ofs; 775 776 switch (sc_reg_in) { 777 case SCR_STATUS: 778 case SCR_CONTROL: 779 case SCR_ERROR: 780 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32)); 781 break; 782 case SCR_ACTIVE: 783 ofs = SATA_ACTIVE_OFS; /* active is not with the others */ 784 break; 785 default: 786 ofs = 0xffffffffU; 787 break; 788 } 789 return ofs; 790 } 791 792 static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in) 793 { 794 unsigned int ofs = mv_scr_offset(sc_reg_in); 795 796 if (0xffffffffU != ofs) { 797 return readl(mv_ap_base(ap) + ofs); 798 } else { 799 return (u32) ofs; 800 } 801 } 802 803 static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val) 804 { 805 unsigned int ofs = mv_scr_offset(sc_reg_in); 806 807 if (0xffffffffU != ofs) { 808 writelfl(val, mv_ap_base(ap) + ofs); 809 } 810 } 811 812 /** 813 * mv_host_stop - Host specific cleanup/stop routine. 814 * @host: host data structure 815 * 816 * Disable ints, cleanup host memory, call general purpose 817 * host_stop. 818 * 819 * LOCKING: 820 * Inherited from caller. 821 */ 822 static void mv_host_stop(struct ata_host *host) 823 { 824 struct mv_host_priv *hpriv = host->private_data; 825 struct pci_dev *pdev = to_pci_dev(host->dev); 826 827 if (hpriv->hp_flags & MV_HP_FLAG_MSI) { 828 pci_disable_msi(pdev); 829 } else { 830 pci_intx(pdev, 0); 831 } 832 kfree(hpriv); 833 ata_host_stop(host); 834 } 835 836 static inline void mv_priv_free(struct mv_port_priv *pp, struct device *dev) 837 { 838 dma_free_coherent(dev, MV_PORT_PRIV_DMA_SZ, pp->crpb, pp->crpb_dma); 839 } 840 841 static void mv_edma_cfg(struct mv_host_priv *hpriv, void __iomem *port_mmio) 842 { 843 u32 cfg = readl(port_mmio + EDMA_CFG_OFS); 844 845 /* set up non-NCQ EDMA configuration */ 846 cfg &= ~0x1f; /* clear queue depth */ 847 cfg &= ~EDMA_CFG_NCQ; /* clear NCQ mode */ 848 cfg &= ~(1 << 9); /* disable equeue */ 849 850 if (IS_GEN_I(hpriv)) 851 cfg |= (1 << 8); /* enab config burst size mask */ 852 853 else if (IS_GEN_II(hpriv)) 854 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN; 855 856 else if (IS_GEN_IIE(hpriv)) { 857 cfg |= (1 << 23); /* dis RX PM port mask */ 858 cfg &= ~(1 << 16); /* dis FIS-based switching (for now) */ 859 cfg &= ~(1 << 19); /* dis 128-entry queue (for now?) */ 860 cfg |= (1 << 18); /* enab early completion */ 861 cfg |= (1 << 17); /* enab host q cache */ 862 cfg |= (1 << 22); /* enab cutthrough */ 863 } 864 865 writelfl(cfg, port_mmio + EDMA_CFG_OFS); 866 } 867 868 /** 869 * mv_port_start - Port specific init/start routine. 870 * @ap: ATA channel to manipulate 871 * 872 * Allocate and point to DMA memory, init port private memory, 873 * zero indices. 874 * 875 * LOCKING: 876 * Inherited from caller. 877 */ 878 static int mv_port_start(struct ata_port *ap) 879 { 880 struct device *dev = ap->host->dev; 881 struct mv_host_priv *hpriv = ap->host->private_data; 882 struct mv_port_priv *pp; 883 void __iomem *port_mmio = mv_ap_base(ap); 884 void *mem; 885 dma_addr_t mem_dma; 886 int rc = -ENOMEM; 887 888 pp = kmalloc(sizeof(*pp), GFP_KERNEL); 889 if (!pp) 890 goto err_out; 891 memset(pp, 0, sizeof(*pp)); 892 893 mem = dma_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma, 894 GFP_KERNEL); 895 if (!mem) 896 goto err_out_pp; 897 memset(mem, 0, MV_PORT_PRIV_DMA_SZ); 898 899 rc = ata_pad_alloc(ap, dev); 900 if (rc) 901 goto err_out_priv; 902 903 /* First item in chunk of DMA memory: 904 * 32-slot command request table (CRQB), 32 bytes each in size 905 */ 906 pp->crqb = mem; 907 pp->crqb_dma = mem_dma; 908 mem += MV_CRQB_Q_SZ; 909 mem_dma += MV_CRQB_Q_SZ; 910 911 /* Second item: 912 * 32-slot command response table (CRPB), 8 bytes each in size 913 */ 914 pp->crpb = mem; 915 pp->crpb_dma = mem_dma; 916 mem += MV_CRPB_Q_SZ; 917 mem_dma += MV_CRPB_Q_SZ; 918 919 /* Third item: 920 * Table of scatter-gather descriptors (ePRD), 16 bytes each 921 */ 922 pp->sg_tbl = mem; 923 pp->sg_tbl_dma = mem_dma; 924 925 mv_edma_cfg(hpriv, port_mmio); 926 927 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS); 928 writelfl(pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK, 929 port_mmio + EDMA_REQ_Q_IN_PTR_OFS); 930 931 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0) 932 writelfl(pp->crqb_dma & 0xffffffff, 933 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS); 934 else 935 writelfl(0, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS); 936 937 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS); 938 939 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0) 940 writelfl(pp->crpb_dma & 0xffffffff, 941 port_mmio + EDMA_RSP_Q_IN_PTR_OFS); 942 else 943 writelfl(0, port_mmio + EDMA_RSP_Q_IN_PTR_OFS); 944 945 writelfl(pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK, 946 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); 947 948 /* Don't turn on EDMA here...do it before DMA commands only. Else 949 * we'll be unable to send non-data, PIO, etc due to restricted access 950 * to shadow regs. 951 */ 952 ap->private_data = pp; 953 return 0; 954 955 err_out_priv: 956 mv_priv_free(pp, dev); 957 err_out_pp: 958 kfree(pp); 959 err_out: 960 return rc; 961 } 962 963 /** 964 * mv_port_stop - Port specific cleanup/stop routine. 965 * @ap: ATA channel to manipulate 966 * 967 * Stop DMA, cleanup port memory. 968 * 969 * LOCKING: 970 * This routine uses the host lock to protect the DMA stop. 971 */ 972 static void mv_port_stop(struct ata_port *ap) 973 { 974 struct device *dev = ap->host->dev; 975 struct mv_port_priv *pp = ap->private_data; 976 unsigned long flags; 977 978 spin_lock_irqsave(&ap->host->lock, flags); 979 mv_stop_dma(ap); 980 spin_unlock_irqrestore(&ap->host->lock, flags); 981 982 ap->private_data = NULL; 983 ata_pad_free(ap, dev); 984 mv_priv_free(pp, dev); 985 kfree(pp); 986 } 987 988 /** 989 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries 990 * @qc: queued command whose SG list to source from 991 * 992 * Populate the SG list and mark the last entry. 993 * 994 * LOCKING: 995 * Inherited from caller. 996 */ 997 static void mv_fill_sg(struct ata_queued_cmd *qc) 998 { 999 struct mv_port_priv *pp = qc->ap->private_data; 1000 unsigned int i = 0; 1001 struct scatterlist *sg; 1002 1003 ata_for_each_sg(sg, qc) { 1004 dma_addr_t addr; 1005 u32 sg_len, len, offset; 1006 1007 addr = sg_dma_address(sg); 1008 sg_len = sg_dma_len(sg); 1009 1010 while (sg_len) { 1011 offset = addr & MV_DMA_BOUNDARY; 1012 len = sg_len; 1013 if ((offset + sg_len) > 0x10000) 1014 len = 0x10000 - offset; 1015 1016 pp->sg_tbl[i].addr = cpu_to_le32(addr & 0xffffffff); 1017 pp->sg_tbl[i].addr_hi = cpu_to_le32((addr >> 16) >> 16); 1018 pp->sg_tbl[i].flags_size = cpu_to_le32(len & 0xffff); 1019 1020 sg_len -= len; 1021 addr += len; 1022 1023 if (!sg_len && ata_sg_is_last(sg, qc)) 1024 pp->sg_tbl[i].flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL); 1025 1026 i++; 1027 } 1028 } 1029 } 1030 1031 static inline unsigned mv_inc_q_index(unsigned index) 1032 { 1033 return (index + 1) & MV_MAX_Q_DEPTH_MASK; 1034 } 1035 1036 static inline void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last) 1037 { 1038 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS | 1039 (last ? CRQB_CMD_LAST : 0); 1040 *cmdw = cpu_to_le16(tmp); 1041 } 1042 1043 /** 1044 * mv_qc_prep - Host specific command preparation. 1045 * @qc: queued command to prepare 1046 * 1047 * This routine simply redirects to the general purpose routine 1048 * if command is not DMA. Else, it handles prep of the CRQB 1049 * (command request block), does some sanity checking, and calls 1050 * the SG load routine. 1051 * 1052 * LOCKING: 1053 * Inherited from caller. 1054 */ 1055 static void mv_qc_prep(struct ata_queued_cmd *qc) 1056 { 1057 struct ata_port *ap = qc->ap; 1058 struct mv_port_priv *pp = ap->private_data; 1059 __le16 *cw; 1060 struct ata_taskfile *tf; 1061 u16 flags = 0; 1062 unsigned in_index; 1063 1064 if (ATA_PROT_DMA != qc->tf.protocol) 1065 return; 1066 1067 /* Fill in command request block 1068 */ 1069 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) 1070 flags |= CRQB_FLAG_READ; 1071 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag); 1072 flags |= qc->tag << CRQB_TAG_SHIFT; 1073 1074 /* get current queue index from hardware */ 1075 in_index = (readl(mv_ap_base(ap) + EDMA_REQ_Q_IN_PTR_OFS) 1076 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK; 1077 1078 pp->crqb[in_index].sg_addr = 1079 cpu_to_le32(pp->sg_tbl_dma & 0xffffffff); 1080 pp->crqb[in_index].sg_addr_hi = 1081 cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16); 1082 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags); 1083 1084 cw = &pp->crqb[in_index].ata_cmd[0]; 1085 tf = &qc->tf; 1086 1087 /* Sadly, the CRQB cannot accomodate all registers--there are 1088 * only 11 bytes...so we must pick and choose required 1089 * registers based on the command. So, we drop feature and 1090 * hob_feature for [RW] DMA commands, but they are needed for 1091 * NCQ. NCQ will drop hob_nsect. 1092 */ 1093 switch (tf->command) { 1094 case ATA_CMD_READ: 1095 case ATA_CMD_READ_EXT: 1096 case ATA_CMD_WRITE: 1097 case ATA_CMD_WRITE_EXT: 1098 case ATA_CMD_WRITE_FUA_EXT: 1099 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0); 1100 break; 1101 #ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */ 1102 case ATA_CMD_FPDMA_READ: 1103 case ATA_CMD_FPDMA_WRITE: 1104 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0); 1105 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0); 1106 break; 1107 #endif /* FIXME: remove this line when NCQ added */ 1108 default: 1109 /* The only other commands EDMA supports in non-queued and 1110 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none 1111 * of which are defined/used by Linux. If we get here, this 1112 * driver needs work. 1113 * 1114 * FIXME: modify libata to give qc_prep a return value and 1115 * return error here. 1116 */ 1117 BUG_ON(tf->command); 1118 break; 1119 } 1120 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0); 1121 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0); 1122 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0); 1123 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0); 1124 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0); 1125 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0); 1126 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0); 1127 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0); 1128 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */ 1129 1130 if (!(qc->flags & ATA_QCFLAG_DMAMAP)) 1131 return; 1132 mv_fill_sg(qc); 1133 } 1134 1135 /** 1136 * mv_qc_prep_iie - Host specific command preparation. 1137 * @qc: queued command to prepare 1138 * 1139 * This routine simply redirects to the general purpose routine 1140 * if command is not DMA. Else, it handles prep of the CRQB 1141 * (command request block), does some sanity checking, and calls 1142 * the SG load routine. 1143 * 1144 * LOCKING: 1145 * Inherited from caller. 1146 */ 1147 static void mv_qc_prep_iie(struct ata_queued_cmd *qc) 1148 { 1149 struct ata_port *ap = qc->ap; 1150 struct mv_port_priv *pp = ap->private_data; 1151 struct mv_crqb_iie *crqb; 1152 struct ata_taskfile *tf; 1153 unsigned in_index; 1154 u32 flags = 0; 1155 1156 if (ATA_PROT_DMA != qc->tf.protocol) 1157 return; 1158 1159 /* Fill in Gen IIE command request block 1160 */ 1161 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) 1162 flags |= CRQB_FLAG_READ; 1163 1164 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag); 1165 flags |= qc->tag << CRQB_TAG_SHIFT; 1166 1167 /* get current queue index from hardware */ 1168 in_index = (readl(mv_ap_base(ap) + EDMA_REQ_Q_IN_PTR_OFS) 1169 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK; 1170 1171 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index]; 1172 crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff); 1173 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16); 1174 crqb->flags = cpu_to_le32(flags); 1175 1176 tf = &qc->tf; 1177 crqb->ata_cmd[0] = cpu_to_le32( 1178 (tf->command << 16) | 1179 (tf->feature << 24) 1180 ); 1181 crqb->ata_cmd[1] = cpu_to_le32( 1182 (tf->lbal << 0) | 1183 (tf->lbam << 8) | 1184 (tf->lbah << 16) | 1185 (tf->device << 24) 1186 ); 1187 crqb->ata_cmd[2] = cpu_to_le32( 1188 (tf->hob_lbal << 0) | 1189 (tf->hob_lbam << 8) | 1190 (tf->hob_lbah << 16) | 1191 (tf->hob_feature << 24) 1192 ); 1193 crqb->ata_cmd[3] = cpu_to_le32( 1194 (tf->nsect << 0) | 1195 (tf->hob_nsect << 8) 1196 ); 1197 1198 if (!(qc->flags & ATA_QCFLAG_DMAMAP)) 1199 return; 1200 mv_fill_sg(qc); 1201 } 1202 1203 /** 1204 * mv_qc_issue - Initiate a command to the host 1205 * @qc: queued command to start 1206 * 1207 * This routine simply redirects to the general purpose routine 1208 * if command is not DMA. Else, it sanity checks our local 1209 * caches of the request producer/consumer indices then enables 1210 * DMA and bumps the request producer index. 1211 * 1212 * LOCKING: 1213 * Inherited from caller. 1214 */ 1215 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc) 1216 { 1217 void __iomem *port_mmio = mv_ap_base(qc->ap); 1218 struct mv_port_priv *pp = qc->ap->private_data; 1219 unsigned in_index; 1220 u32 in_ptr; 1221 1222 if (ATA_PROT_DMA != qc->tf.protocol) { 1223 /* We're about to send a non-EDMA capable command to the 1224 * port. Turn off EDMA so there won't be problems accessing 1225 * shadow block, etc registers. 1226 */ 1227 mv_stop_dma(qc->ap); 1228 return ata_qc_issue_prot(qc); 1229 } 1230 1231 in_ptr = readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS); 1232 in_index = (in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK; 1233 1234 /* until we do queuing, the queue should be empty at this point */ 1235 WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS) 1236 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK)); 1237 1238 in_index = mv_inc_q_index(in_index); /* now incr producer index */ 1239 1240 mv_start_dma(port_mmio, pp); 1241 1242 /* and write the request in pointer to kick the EDMA to life */ 1243 in_ptr &= EDMA_REQ_Q_BASE_LO_MASK; 1244 in_ptr |= in_index << EDMA_REQ_Q_PTR_SHIFT; 1245 writelfl(in_ptr, port_mmio + EDMA_REQ_Q_IN_PTR_OFS); 1246 1247 return 0; 1248 } 1249 1250 /** 1251 * mv_get_crpb_status - get status from most recently completed cmd 1252 * @ap: ATA channel to manipulate 1253 * 1254 * This routine is for use when the port is in DMA mode, when it 1255 * will be using the CRPB (command response block) method of 1256 * returning command completion information. We check indices 1257 * are good, grab status, and bump the response consumer index to 1258 * prove that we're up to date. 1259 * 1260 * LOCKING: 1261 * Inherited from caller. 1262 */ 1263 static u8 mv_get_crpb_status(struct ata_port *ap) 1264 { 1265 void __iomem *port_mmio = mv_ap_base(ap); 1266 struct mv_port_priv *pp = ap->private_data; 1267 unsigned out_index; 1268 u32 out_ptr; 1269 u8 ata_status; 1270 1271 out_ptr = readl(port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); 1272 out_index = (out_ptr >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK; 1273 1274 ata_status = le16_to_cpu(pp->crpb[out_index].flags) 1275 >> CRPB_FLAG_STATUS_SHIFT; 1276 1277 /* increment our consumer index... */ 1278 out_index = mv_inc_q_index(out_index); 1279 1280 /* and, until we do NCQ, there should only be 1 CRPB waiting */ 1281 WARN_ON(out_index != ((readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS) 1282 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK)); 1283 1284 /* write out our inc'd consumer index so EDMA knows we're caught up */ 1285 out_ptr &= EDMA_RSP_Q_BASE_LO_MASK; 1286 out_ptr |= out_index << EDMA_RSP_Q_PTR_SHIFT; 1287 writelfl(out_ptr, port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); 1288 1289 /* Return ATA status register for completed CRPB */ 1290 return ata_status; 1291 } 1292 1293 /** 1294 * mv_err_intr - Handle error interrupts on the port 1295 * @ap: ATA channel to manipulate 1296 * @reset_allowed: bool: 0 == don't trigger from reset here 1297 * 1298 * In most cases, just clear the interrupt and move on. However, 1299 * some cases require an eDMA reset, which is done right before 1300 * the COMRESET in mv_phy_reset(). The SERR case requires a 1301 * clear of pending errors in the SATA SERROR register. Finally, 1302 * if the port disabled DMA, update our cached copy to match. 1303 * 1304 * LOCKING: 1305 * Inherited from caller. 1306 */ 1307 static void mv_err_intr(struct ata_port *ap, int reset_allowed) 1308 { 1309 void __iomem *port_mmio = mv_ap_base(ap); 1310 u32 edma_err_cause, serr = 0; 1311 1312 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); 1313 1314 if (EDMA_ERR_SERR & edma_err_cause) { 1315 sata_scr_read(ap, SCR_ERROR, &serr); 1316 sata_scr_write_flush(ap, SCR_ERROR, serr); 1317 } 1318 if (EDMA_ERR_SELF_DIS & edma_err_cause) { 1319 struct mv_port_priv *pp = ap->private_data; 1320 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; 1321 } 1322 DPRINTK(KERN_ERR "ata%u: port error; EDMA err cause: 0x%08x " 1323 "SERR: 0x%08x\n", ap->id, edma_err_cause, serr); 1324 1325 /* Clear EDMA now that SERR cleanup done */ 1326 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); 1327 1328 /* check for fatal here and recover if needed */ 1329 if (reset_allowed && (EDMA_ERR_FATAL & edma_err_cause)) 1330 mv_stop_and_reset(ap); 1331 } 1332 1333 /** 1334 * mv_host_intr - Handle all interrupts on the given host controller 1335 * @host: host specific structure 1336 * @relevant: port error bits relevant to this host controller 1337 * @hc: which host controller we're to look at 1338 * 1339 * Read then write clear the HC interrupt status then walk each 1340 * port connected to the HC and see if it needs servicing. Port 1341 * success ints are reported in the HC interrupt status reg, the 1342 * port error ints are reported in the higher level main 1343 * interrupt status register and thus are passed in via the 1344 * 'relevant' argument. 1345 * 1346 * LOCKING: 1347 * Inherited from caller. 1348 */ 1349 static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc) 1350 { 1351 void __iomem *mmio = host->mmio_base; 1352 void __iomem *hc_mmio = mv_hc_base(mmio, hc); 1353 struct ata_queued_cmd *qc; 1354 u32 hc_irq_cause; 1355 int shift, port, port0, hard_port, handled; 1356 unsigned int err_mask; 1357 1358 if (hc == 0) { 1359 port0 = 0; 1360 } else { 1361 port0 = MV_PORTS_PER_HC; 1362 } 1363 1364 /* we'll need the HC success int register in most cases */ 1365 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS); 1366 if (hc_irq_cause) { 1367 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS); 1368 } 1369 1370 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n", 1371 hc,relevant,hc_irq_cause); 1372 1373 for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) { 1374 u8 ata_status = 0; 1375 struct ata_port *ap = host->ports[port]; 1376 struct mv_port_priv *pp = ap->private_data; 1377 1378 hard_port = mv_hardport_from_port(port); /* range 0..3 */ 1379 handled = 0; /* ensure ata_status is set if handled++ */ 1380 1381 /* Note that DEV_IRQ might happen spuriously during EDMA, 1382 * and should be ignored in such cases. 1383 * The cause of this is still under investigation. 1384 */ 1385 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) { 1386 /* EDMA: check for response queue interrupt */ 1387 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause) { 1388 ata_status = mv_get_crpb_status(ap); 1389 handled = 1; 1390 } 1391 } else { 1392 /* PIO: check for device (drive) interrupt */ 1393 if ((DEV_IRQ << hard_port) & hc_irq_cause) { 1394 ata_status = readb((void __iomem *) 1395 ap->ioaddr.status_addr); 1396 handled = 1; 1397 /* ignore spurious intr if drive still BUSY */ 1398 if (ata_status & ATA_BUSY) { 1399 ata_status = 0; 1400 handled = 0; 1401 } 1402 } 1403 } 1404 1405 if (ap && (ap->flags & ATA_FLAG_DISABLED)) 1406 continue; 1407 1408 err_mask = ac_err_mask(ata_status); 1409 1410 shift = port << 1; /* (port * 2) */ 1411 if (port >= MV_PORTS_PER_HC) { 1412 shift++; /* skip bit 8 in the HC Main IRQ reg */ 1413 } 1414 if ((PORT0_ERR << shift) & relevant) { 1415 mv_err_intr(ap, 1); 1416 err_mask |= AC_ERR_OTHER; 1417 handled = 1; 1418 } 1419 1420 if (handled) { 1421 qc = ata_qc_from_tag(ap, ap->active_tag); 1422 if (qc && (qc->flags & ATA_QCFLAG_ACTIVE)) { 1423 VPRINTK("port %u IRQ found for qc, " 1424 "ata_status 0x%x\n", port,ata_status); 1425 /* mark qc status appropriately */ 1426 if (!(qc->tf.flags & ATA_TFLAG_POLLING)) { 1427 qc->err_mask |= err_mask; 1428 ata_qc_complete(qc); 1429 } 1430 } 1431 } 1432 } 1433 VPRINTK("EXIT\n"); 1434 } 1435 1436 /** 1437 * mv_interrupt - 1438 * @irq: unused 1439 * @dev_instance: private data; in this case the host structure 1440 * @regs: unused 1441 * 1442 * Read the read only register to determine if any host 1443 * controllers have pending interrupts. If so, call lower level 1444 * routine to handle. Also check for PCI errors which are only 1445 * reported here. 1446 * 1447 * LOCKING: 1448 * This routine holds the host lock while processing pending 1449 * interrupts. 1450 */ 1451 static irqreturn_t mv_interrupt(int irq, void *dev_instance) 1452 { 1453 struct ata_host *host = dev_instance; 1454 unsigned int hc, handled = 0, n_hcs; 1455 void __iomem *mmio = host->mmio_base; 1456 struct mv_host_priv *hpriv; 1457 u32 irq_stat; 1458 1459 irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS); 1460 1461 /* check the cases where we either have nothing pending or have read 1462 * a bogus register value which can indicate HW removal or PCI fault 1463 */ 1464 if (!irq_stat || (0xffffffffU == irq_stat)) { 1465 return IRQ_NONE; 1466 } 1467 1468 n_hcs = mv_get_hc_count(host->ports[0]->flags); 1469 spin_lock(&host->lock); 1470 1471 for (hc = 0; hc < n_hcs; hc++) { 1472 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT)); 1473 if (relevant) { 1474 mv_host_intr(host, relevant, hc); 1475 handled++; 1476 } 1477 } 1478 1479 hpriv = host->private_data; 1480 if (IS_60XX(hpriv)) { 1481 /* deal with the interrupt coalescing bits */ 1482 if (irq_stat & (TRAN_LO_DONE | TRAN_HI_DONE | PORTS_0_7_COAL_DONE)) { 1483 writelfl(0, mmio + MV_IRQ_COAL_CAUSE_LO); 1484 writelfl(0, mmio + MV_IRQ_COAL_CAUSE_HI); 1485 writelfl(0, mmio + MV_IRQ_COAL_CAUSE); 1486 } 1487 } 1488 1489 if (PCI_ERR & irq_stat) { 1490 printk(KERN_ERR DRV_NAME ": PCI ERROR; PCI IRQ cause=0x%08x\n", 1491 readl(mmio + PCI_IRQ_CAUSE_OFS)); 1492 1493 DPRINTK("All regs @ PCI error\n"); 1494 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev)); 1495 1496 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS); 1497 handled++; 1498 } 1499 spin_unlock(&host->lock); 1500 1501 return IRQ_RETVAL(handled); 1502 } 1503 1504 static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port) 1505 { 1506 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port); 1507 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL; 1508 1509 return hc_mmio + ofs; 1510 } 1511 1512 static unsigned int mv5_scr_offset(unsigned int sc_reg_in) 1513 { 1514 unsigned int ofs; 1515 1516 switch (sc_reg_in) { 1517 case SCR_STATUS: 1518 case SCR_ERROR: 1519 case SCR_CONTROL: 1520 ofs = sc_reg_in * sizeof(u32); 1521 break; 1522 default: 1523 ofs = 0xffffffffU; 1524 break; 1525 } 1526 return ofs; 1527 } 1528 1529 static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in) 1530 { 1531 void __iomem *mmio = mv5_phy_base(ap->host->mmio_base, ap->port_no); 1532 unsigned int ofs = mv5_scr_offset(sc_reg_in); 1533 1534 if (ofs != 0xffffffffU) 1535 return readl(mmio + ofs); 1536 else 1537 return (u32) ofs; 1538 } 1539 1540 static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val) 1541 { 1542 void __iomem *mmio = mv5_phy_base(ap->host->mmio_base, ap->port_no); 1543 unsigned int ofs = mv5_scr_offset(sc_reg_in); 1544 1545 if (ofs != 0xffffffffU) 1546 writelfl(val, mmio + ofs); 1547 } 1548 1549 static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio) 1550 { 1551 u8 rev_id; 1552 int early_5080; 1553 1554 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id); 1555 1556 early_5080 = (pdev->device == 0x5080) && (rev_id == 0); 1557 1558 if (!early_5080) { 1559 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL); 1560 tmp |= (1 << 0); 1561 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL); 1562 } 1563 1564 mv_reset_pci_bus(pdev, mmio); 1565 } 1566 1567 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio) 1568 { 1569 writel(0x0fcfffff, mmio + MV_FLASH_CTL); 1570 } 1571 1572 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx, 1573 void __iomem *mmio) 1574 { 1575 void __iomem *phy_mmio = mv5_phy_base(mmio, idx); 1576 u32 tmp; 1577 1578 tmp = readl(phy_mmio + MV5_PHY_MODE); 1579 1580 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */ 1581 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */ 1582 } 1583 1584 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio) 1585 { 1586 u32 tmp; 1587 1588 writel(0, mmio + MV_GPIO_PORT_CTL); 1589 1590 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */ 1591 1592 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL); 1593 tmp |= ~(1 << 0); 1594 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL); 1595 } 1596 1597 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, 1598 unsigned int port) 1599 { 1600 void __iomem *phy_mmio = mv5_phy_base(mmio, port); 1601 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5); 1602 u32 tmp; 1603 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0); 1604 1605 if (fix_apm_sq) { 1606 tmp = readl(phy_mmio + MV5_LT_MODE); 1607 tmp |= (1 << 19); 1608 writel(tmp, phy_mmio + MV5_LT_MODE); 1609 1610 tmp = readl(phy_mmio + MV5_PHY_CTL); 1611 tmp &= ~0x3; 1612 tmp |= 0x1; 1613 writel(tmp, phy_mmio + MV5_PHY_CTL); 1614 } 1615 1616 tmp = readl(phy_mmio + MV5_PHY_MODE); 1617 tmp &= ~mask; 1618 tmp |= hpriv->signal[port].pre; 1619 tmp |= hpriv->signal[port].amps; 1620 writel(tmp, phy_mmio + MV5_PHY_MODE); 1621 } 1622 1623 1624 #undef ZERO 1625 #define ZERO(reg) writel(0, port_mmio + (reg)) 1626 static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio, 1627 unsigned int port) 1628 { 1629 void __iomem *port_mmio = mv_port_base(mmio, port); 1630 1631 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS); 1632 1633 mv_channel_reset(hpriv, mmio, port); 1634 1635 ZERO(0x028); /* command */ 1636 writel(0x11f, port_mmio + EDMA_CFG_OFS); 1637 ZERO(0x004); /* timer */ 1638 ZERO(0x008); /* irq err cause */ 1639 ZERO(0x00c); /* irq err mask */ 1640 ZERO(0x010); /* rq bah */ 1641 ZERO(0x014); /* rq inp */ 1642 ZERO(0x018); /* rq outp */ 1643 ZERO(0x01c); /* respq bah */ 1644 ZERO(0x024); /* respq outp */ 1645 ZERO(0x020); /* respq inp */ 1646 ZERO(0x02c); /* test control */ 1647 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT); 1648 } 1649 #undef ZERO 1650 1651 #define ZERO(reg) writel(0, hc_mmio + (reg)) 1652 static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio, 1653 unsigned int hc) 1654 { 1655 void __iomem *hc_mmio = mv_hc_base(mmio, hc); 1656 u32 tmp; 1657 1658 ZERO(0x00c); 1659 ZERO(0x010); 1660 ZERO(0x014); 1661 ZERO(0x018); 1662 1663 tmp = readl(hc_mmio + 0x20); 1664 tmp &= 0x1c1c1c1c; 1665 tmp |= 0x03030303; 1666 writel(tmp, hc_mmio + 0x20); 1667 } 1668 #undef ZERO 1669 1670 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio, 1671 unsigned int n_hc) 1672 { 1673 unsigned int hc, port; 1674 1675 for (hc = 0; hc < n_hc; hc++) { 1676 for (port = 0; port < MV_PORTS_PER_HC; port++) 1677 mv5_reset_hc_port(hpriv, mmio, 1678 (hc * MV_PORTS_PER_HC) + port); 1679 1680 mv5_reset_one_hc(hpriv, mmio, hc); 1681 } 1682 1683 return 0; 1684 } 1685 1686 #undef ZERO 1687 #define ZERO(reg) writel(0, mmio + (reg)) 1688 static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio) 1689 { 1690 u32 tmp; 1691 1692 tmp = readl(mmio + MV_PCI_MODE); 1693 tmp &= 0xff00ffff; 1694 writel(tmp, mmio + MV_PCI_MODE); 1695 1696 ZERO(MV_PCI_DISC_TIMER); 1697 ZERO(MV_PCI_MSI_TRIGGER); 1698 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT); 1699 ZERO(HC_MAIN_IRQ_MASK_OFS); 1700 ZERO(MV_PCI_SERR_MASK); 1701 ZERO(PCI_IRQ_CAUSE_OFS); 1702 ZERO(PCI_IRQ_MASK_OFS); 1703 ZERO(MV_PCI_ERR_LOW_ADDRESS); 1704 ZERO(MV_PCI_ERR_HIGH_ADDRESS); 1705 ZERO(MV_PCI_ERR_ATTRIBUTE); 1706 ZERO(MV_PCI_ERR_COMMAND); 1707 } 1708 #undef ZERO 1709 1710 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio) 1711 { 1712 u32 tmp; 1713 1714 mv5_reset_flash(hpriv, mmio); 1715 1716 tmp = readl(mmio + MV_GPIO_PORT_CTL); 1717 tmp &= 0x3; 1718 tmp |= (1 << 5) | (1 << 6); 1719 writel(tmp, mmio + MV_GPIO_PORT_CTL); 1720 } 1721 1722 /** 1723 * mv6_reset_hc - Perform the 6xxx global soft reset 1724 * @mmio: base address of the HBA 1725 * 1726 * This routine only applies to 6xxx parts. 1727 * 1728 * LOCKING: 1729 * Inherited from caller. 1730 */ 1731 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio, 1732 unsigned int n_hc) 1733 { 1734 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS; 1735 int i, rc = 0; 1736 u32 t; 1737 1738 /* Following procedure defined in PCI "main command and status 1739 * register" table. 1740 */ 1741 t = readl(reg); 1742 writel(t | STOP_PCI_MASTER, reg); 1743 1744 for (i = 0; i < 1000; i++) { 1745 udelay(1); 1746 t = readl(reg); 1747 if (PCI_MASTER_EMPTY & t) { 1748 break; 1749 } 1750 } 1751 if (!(PCI_MASTER_EMPTY & t)) { 1752 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n"); 1753 rc = 1; 1754 goto done; 1755 } 1756 1757 /* set reset */ 1758 i = 5; 1759 do { 1760 writel(t | GLOB_SFT_RST, reg); 1761 t = readl(reg); 1762 udelay(1); 1763 } while (!(GLOB_SFT_RST & t) && (i-- > 0)); 1764 1765 if (!(GLOB_SFT_RST & t)) { 1766 printk(KERN_ERR DRV_NAME ": can't set global reset\n"); 1767 rc = 1; 1768 goto done; 1769 } 1770 1771 /* clear reset and *reenable the PCI master* (not mentioned in spec) */ 1772 i = 5; 1773 do { 1774 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg); 1775 t = readl(reg); 1776 udelay(1); 1777 } while ((GLOB_SFT_RST & t) && (i-- > 0)); 1778 1779 if (GLOB_SFT_RST & t) { 1780 printk(KERN_ERR DRV_NAME ": can't clear global reset\n"); 1781 rc = 1; 1782 } 1783 done: 1784 return rc; 1785 } 1786 1787 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx, 1788 void __iomem *mmio) 1789 { 1790 void __iomem *port_mmio; 1791 u32 tmp; 1792 1793 tmp = readl(mmio + MV_RESET_CFG); 1794 if ((tmp & (1 << 0)) == 0) { 1795 hpriv->signal[idx].amps = 0x7 << 8; 1796 hpriv->signal[idx].pre = 0x1 << 5; 1797 return; 1798 } 1799 1800 port_mmio = mv_port_base(mmio, idx); 1801 tmp = readl(port_mmio + PHY_MODE2); 1802 1803 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */ 1804 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */ 1805 } 1806 1807 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio) 1808 { 1809 writel(0x00000060, mmio + MV_GPIO_PORT_CTL); 1810 } 1811 1812 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, 1813 unsigned int port) 1814 { 1815 void __iomem *port_mmio = mv_port_base(mmio, port); 1816 1817 u32 hp_flags = hpriv->hp_flags; 1818 int fix_phy_mode2 = 1819 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0); 1820 int fix_phy_mode4 = 1821 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0); 1822 u32 m2, tmp; 1823 1824 if (fix_phy_mode2) { 1825 m2 = readl(port_mmio + PHY_MODE2); 1826 m2 &= ~(1 << 16); 1827 m2 |= (1 << 31); 1828 writel(m2, port_mmio + PHY_MODE2); 1829 1830 udelay(200); 1831 1832 m2 = readl(port_mmio + PHY_MODE2); 1833 m2 &= ~((1 << 16) | (1 << 31)); 1834 writel(m2, port_mmio + PHY_MODE2); 1835 1836 udelay(200); 1837 } 1838 1839 /* who knows what this magic does */ 1840 tmp = readl(port_mmio + PHY_MODE3); 1841 tmp &= ~0x7F800000; 1842 tmp |= 0x2A800000; 1843 writel(tmp, port_mmio + PHY_MODE3); 1844 1845 if (fix_phy_mode4) { 1846 u32 m4; 1847 1848 m4 = readl(port_mmio + PHY_MODE4); 1849 1850 if (hp_flags & MV_HP_ERRATA_60X1B2) 1851 tmp = readl(port_mmio + 0x310); 1852 1853 m4 = (m4 & ~(1 << 1)) | (1 << 0); 1854 1855 writel(m4, port_mmio + PHY_MODE4); 1856 1857 if (hp_flags & MV_HP_ERRATA_60X1B2) 1858 writel(tmp, port_mmio + 0x310); 1859 } 1860 1861 /* Revert values of pre-emphasis and signal amps to the saved ones */ 1862 m2 = readl(port_mmio + PHY_MODE2); 1863 1864 m2 &= ~MV_M2_PREAMP_MASK; 1865 m2 |= hpriv->signal[port].amps; 1866 m2 |= hpriv->signal[port].pre; 1867 m2 &= ~(1 << 16); 1868 1869 /* according to mvSata 3.6.1, some IIE values are fixed */ 1870 if (IS_GEN_IIE(hpriv)) { 1871 m2 &= ~0xC30FF01F; 1872 m2 |= 0x0000900F; 1873 } 1874 1875 writel(m2, port_mmio + PHY_MODE2); 1876 } 1877 1878 static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio, 1879 unsigned int port_no) 1880 { 1881 void __iomem *port_mmio = mv_port_base(mmio, port_no); 1882 1883 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS); 1884 1885 if (IS_60XX(hpriv)) { 1886 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL); 1887 ifctl |= (1 << 7); /* enable gen2i speed */ 1888 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */ 1889 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL); 1890 } 1891 1892 udelay(25); /* allow reset propagation */ 1893 1894 /* Spec never mentions clearing the bit. Marvell's driver does 1895 * clear the bit, however. 1896 */ 1897 writelfl(0, port_mmio + EDMA_CMD_OFS); 1898 1899 hpriv->ops->phy_errata(hpriv, mmio, port_no); 1900 1901 if (IS_50XX(hpriv)) 1902 mdelay(1); 1903 } 1904 1905 static void mv_stop_and_reset(struct ata_port *ap) 1906 { 1907 struct mv_host_priv *hpriv = ap->host->private_data; 1908 void __iomem *mmio = ap->host->mmio_base; 1909 1910 mv_stop_dma(ap); 1911 1912 mv_channel_reset(hpriv, mmio, ap->port_no); 1913 1914 __mv_phy_reset(ap, 0); 1915 } 1916 1917 static inline void __msleep(unsigned int msec, int can_sleep) 1918 { 1919 if (can_sleep) 1920 msleep(msec); 1921 else 1922 mdelay(msec); 1923 } 1924 1925 /** 1926 * __mv_phy_reset - Perform eDMA reset followed by COMRESET 1927 * @ap: ATA channel to manipulate 1928 * 1929 * Part of this is taken from __sata_phy_reset and modified to 1930 * not sleep since this routine gets called from interrupt level. 1931 * 1932 * LOCKING: 1933 * Inherited from caller. This is coded to safe to call at 1934 * interrupt level, i.e. it does not sleep. 1935 */ 1936 static void __mv_phy_reset(struct ata_port *ap, int can_sleep) 1937 { 1938 struct mv_port_priv *pp = ap->private_data; 1939 struct mv_host_priv *hpriv = ap->host->private_data; 1940 void __iomem *port_mmio = mv_ap_base(ap); 1941 struct ata_taskfile tf; 1942 struct ata_device *dev = &ap->device[0]; 1943 unsigned long timeout; 1944 int retry = 5; 1945 u32 sstatus; 1946 1947 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio); 1948 1949 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x " 1950 "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS), 1951 mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL)); 1952 1953 /* Issue COMRESET via SControl */ 1954 comreset_retry: 1955 sata_scr_write_flush(ap, SCR_CONTROL, 0x301); 1956 __msleep(1, can_sleep); 1957 1958 sata_scr_write_flush(ap, SCR_CONTROL, 0x300); 1959 __msleep(20, can_sleep); 1960 1961 timeout = jiffies + msecs_to_jiffies(200); 1962 do { 1963 sata_scr_read(ap, SCR_STATUS, &sstatus); 1964 if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0)) 1965 break; 1966 1967 __msleep(1, can_sleep); 1968 } while (time_before(jiffies, timeout)); 1969 1970 /* work around errata */ 1971 if (IS_60XX(hpriv) && 1972 (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) && 1973 (retry-- > 0)) 1974 goto comreset_retry; 1975 1976 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x " 1977 "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS), 1978 mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL)); 1979 1980 if (ata_port_online(ap)) { 1981 ata_port_probe(ap); 1982 } else { 1983 sata_scr_read(ap, SCR_STATUS, &sstatus); 1984 ata_port_printk(ap, KERN_INFO, 1985 "no device found (phy stat %08x)\n", sstatus); 1986 ata_port_disable(ap); 1987 return; 1988 } 1989 ap->cbl = ATA_CBL_SATA; 1990 1991 /* even after SStatus reflects that device is ready, 1992 * it seems to take a while for link to be fully 1993 * established (and thus Status no longer 0x80/0x7F), 1994 * so we poll a bit for that, here. 1995 */ 1996 retry = 20; 1997 while (1) { 1998 u8 drv_stat = ata_check_status(ap); 1999 if ((drv_stat != 0x80) && (drv_stat != 0x7f)) 2000 break; 2001 __msleep(500, can_sleep); 2002 if (retry-- <= 0) 2003 break; 2004 } 2005 2006 tf.lbah = readb((void __iomem *) ap->ioaddr.lbah_addr); 2007 tf.lbam = readb((void __iomem *) ap->ioaddr.lbam_addr); 2008 tf.lbal = readb((void __iomem *) ap->ioaddr.lbal_addr); 2009 tf.nsect = readb((void __iomem *) ap->ioaddr.nsect_addr); 2010 2011 dev->class = ata_dev_classify(&tf); 2012 if (!ata_dev_enabled(dev)) { 2013 VPRINTK("Port disabled post-sig: No device present.\n"); 2014 ata_port_disable(ap); 2015 } 2016 2017 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); 2018 2019 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; 2020 2021 VPRINTK("EXIT\n"); 2022 } 2023 2024 static void mv_phy_reset(struct ata_port *ap) 2025 { 2026 __mv_phy_reset(ap, 1); 2027 } 2028 2029 /** 2030 * mv_eng_timeout - Routine called by libata when SCSI times out I/O 2031 * @ap: ATA channel to manipulate 2032 * 2033 * Intent is to clear all pending error conditions, reset the 2034 * chip/bus, fail the command, and move on. 2035 * 2036 * LOCKING: 2037 * This routine holds the host lock while failing the command. 2038 */ 2039 static void mv_eng_timeout(struct ata_port *ap) 2040 { 2041 struct ata_queued_cmd *qc; 2042 unsigned long flags; 2043 2044 ata_port_printk(ap, KERN_ERR, "Entering mv_eng_timeout\n"); 2045 DPRINTK("All regs @ start of eng_timeout\n"); 2046 mv_dump_all_regs(ap->host->mmio_base, ap->port_no, 2047 to_pci_dev(ap->host->dev)); 2048 2049 qc = ata_qc_from_tag(ap, ap->active_tag); 2050 printk(KERN_ERR "mmio_base %p ap %p qc %p scsi_cmnd %p &cmnd %p\n", 2051 ap->host->mmio_base, ap, qc, qc->scsicmd, &qc->scsicmd->cmnd); 2052 2053 spin_lock_irqsave(&ap->host->lock, flags); 2054 mv_err_intr(ap, 0); 2055 mv_stop_and_reset(ap); 2056 spin_unlock_irqrestore(&ap->host->lock, flags); 2057 2058 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE)); 2059 if (qc->flags & ATA_QCFLAG_ACTIVE) { 2060 qc->err_mask |= AC_ERR_TIMEOUT; 2061 ata_eh_qc_complete(qc); 2062 } 2063 } 2064 2065 /** 2066 * mv_port_init - Perform some early initialization on a single port. 2067 * @port: libata data structure storing shadow register addresses 2068 * @port_mmio: base address of the port 2069 * 2070 * Initialize shadow register mmio addresses, clear outstanding 2071 * interrupts on the port, and unmask interrupts for the future 2072 * start of the port. 2073 * 2074 * LOCKING: 2075 * Inherited from caller. 2076 */ 2077 static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio) 2078 { 2079 unsigned long shd_base = (unsigned long) port_mmio + SHD_BLK_OFS; 2080 unsigned serr_ofs; 2081 2082 /* PIO related setup 2083 */ 2084 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA); 2085 port->error_addr = 2086 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR); 2087 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT); 2088 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL); 2089 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM); 2090 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH); 2091 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE); 2092 port->status_addr = 2093 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS); 2094 /* special case: control/altstatus doesn't have ATA_REG_ address */ 2095 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS; 2096 2097 /* unused: */ 2098 port->cmd_addr = port->bmdma_addr = port->scr_addr = 0; 2099 2100 /* Clear any currently outstanding port interrupt conditions */ 2101 serr_ofs = mv_scr_offset(SCR_ERROR); 2102 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs); 2103 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); 2104 2105 /* unmask all EDMA error interrupts */ 2106 writelfl(~0, port_mmio + EDMA_ERR_IRQ_MASK_OFS); 2107 2108 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n", 2109 readl(port_mmio + EDMA_CFG_OFS), 2110 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS), 2111 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS)); 2112 } 2113 2114 static int mv_chip_id(struct pci_dev *pdev, struct mv_host_priv *hpriv, 2115 unsigned int board_idx) 2116 { 2117 u8 rev_id; 2118 u32 hp_flags = hpriv->hp_flags; 2119 2120 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id); 2121 2122 switch(board_idx) { 2123 case chip_5080: 2124 hpriv->ops = &mv5xxx_ops; 2125 hp_flags |= MV_HP_50XX; 2126 2127 switch (rev_id) { 2128 case 0x1: 2129 hp_flags |= MV_HP_ERRATA_50XXB0; 2130 break; 2131 case 0x3: 2132 hp_flags |= MV_HP_ERRATA_50XXB2; 2133 break; 2134 default: 2135 dev_printk(KERN_WARNING, &pdev->dev, 2136 "Applying 50XXB2 workarounds to unknown rev\n"); 2137 hp_flags |= MV_HP_ERRATA_50XXB2; 2138 break; 2139 } 2140 break; 2141 2142 case chip_504x: 2143 case chip_508x: 2144 hpriv->ops = &mv5xxx_ops; 2145 hp_flags |= MV_HP_50XX; 2146 2147 switch (rev_id) { 2148 case 0x0: 2149 hp_flags |= MV_HP_ERRATA_50XXB0; 2150 break; 2151 case 0x3: 2152 hp_flags |= MV_HP_ERRATA_50XXB2; 2153 break; 2154 default: 2155 dev_printk(KERN_WARNING, &pdev->dev, 2156 "Applying B2 workarounds to unknown rev\n"); 2157 hp_flags |= MV_HP_ERRATA_50XXB2; 2158 break; 2159 } 2160 break; 2161 2162 case chip_604x: 2163 case chip_608x: 2164 hpriv->ops = &mv6xxx_ops; 2165 2166 switch (rev_id) { 2167 case 0x7: 2168 hp_flags |= MV_HP_ERRATA_60X1B2; 2169 break; 2170 case 0x9: 2171 hp_flags |= MV_HP_ERRATA_60X1C0; 2172 break; 2173 default: 2174 dev_printk(KERN_WARNING, &pdev->dev, 2175 "Applying B2 workarounds to unknown rev\n"); 2176 hp_flags |= MV_HP_ERRATA_60X1B2; 2177 break; 2178 } 2179 break; 2180 2181 case chip_7042: 2182 case chip_6042: 2183 hpriv->ops = &mv6xxx_ops; 2184 2185 hp_flags |= MV_HP_GEN_IIE; 2186 2187 switch (rev_id) { 2188 case 0x0: 2189 hp_flags |= MV_HP_ERRATA_XX42A0; 2190 break; 2191 case 0x1: 2192 hp_flags |= MV_HP_ERRATA_60X1C0; 2193 break; 2194 default: 2195 dev_printk(KERN_WARNING, &pdev->dev, 2196 "Applying 60X1C0 workarounds to unknown rev\n"); 2197 hp_flags |= MV_HP_ERRATA_60X1C0; 2198 break; 2199 } 2200 break; 2201 2202 default: 2203 printk(KERN_ERR DRV_NAME ": BUG: invalid board index %u\n", board_idx); 2204 return 1; 2205 } 2206 2207 hpriv->hp_flags = hp_flags; 2208 2209 return 0; 2210 } 2211 2212 /** 2213 * mv_init_host - Perform some early initialization of the host. 2214 * @pdev: host PCI device 2215 * @probe_ent: early data struct representing the host 2216 * 2217 * If possible, do an early global reset of the host. Then do 2218 * our port init and clear/unmask all/relevant host interrupts. 2219 * 2220 * LOCKING: 2221 * Inherited from caller. 2222 */ 2223 static int mv_init_host(struct pci_dev *pdev, struct ata_probe_ent *probe_ent, 2224 unsigned int board_idx) 2225 { 2226 int rc = 0, n_hc, port, hc; 2227 void __iomem *mmio = probe_ent->mmio_base; 2228 struct mv_host_priv *hpriv = probe_ent->private_data; 2229 2230 /* global interrupt mask */ 2231 writel(0, mmio + HC_MAIN_IRQ_MASK_OFS); 2232 2233 rc = mv_chip_id(pdev, hpriv, board_idx); 2234 if (rc) 2235 goto done; 2236 2237 n_hc = mv_get_hc_count(probe_ent->port_flags); 2238 probe_ent->n_ports = MV_PORTS_PER_HC * n_hc; 2239 2240 for (port = 0; port < probe_ent->n_ports; port++) 2241 hpriv->ops->read_preamp(hpriv, port, mmio); 2242 2243 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc); 2244 if (rc) 2245 goto done; 2246 2247 hpriv->ops->reset_flash(hpriv, mmio); 2248 hpriv->ops->reset_bus(pdev, mmio); 2249 hpriv->ops->enable_leds(hpriv, mmio); 2250 2251 for (port = 0; port < probe_ent->n_ports; port++) { 2252 if (IS_60XX(hpriv)) { 2253 void __iomem *port_mmio = mv_port_base(mmio, port); 2254 2255 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL); 2256 ifctl |= (1 << 7); /* enable gen2i speed */ 2257 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */ 2258 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL); 2259 } 2260 2261 hpriv->ops->phy_errata(hpriv, mmio, port); 2262 } 2263 2264 for (port = 0; port < probe_ent->n_ports; port++) { 2265 void __iomem *port_mmio = mv_port_base(mmio, port); 2266 mv_port_init(&probe_ent->port[port], port_mmio); 2267 } 2268 2269 for (hc = 0; hc < n_hc; hc++) { 2270 void __iomem *hc_mmio = mv_hc_base(mmio, hc); 2271 2272 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause " 2273 "(before clear)=0x%08x\n", hc, 2274 readl(hc_mmio + HC_CFG_OFS), 2275 readl(hc_mmio + HC_IRQ_CAUSE_OFS)); 2276 2277 /* Clear any currently outstanding hc interrupt conditions */ 2278 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS); 2279 } 2280 2281 /* Clear any currently outstanding host interrupt conditions */ 2282 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS); 2283 2284 /* and unmask interrupt generation for host regs */ 2285 writelfl(PCI_UNMASK_ALL_IRQS, mmio + PCI_IRQ_MASK_OFS); 2286 writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS); 2287 2288 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x " 2289 "PCI int cause/mask=0x%08x/0x%08x\n", 2290 readl(mmio + HC_MAIN_IRQ_CAUSE_OFS), 2291 readl(mmio + HC_MAIN_IRQ_MASK_OFS), 2292 readl(mmio + PCI_IRQ_CAUSE_OFS), 2293 readl(mmio + PCI_IRQ_MASK_OFS)); 2294 2295 done: 2296 return rc; 2297 } 2298 2299 /** 2300 * mv_print_info - Dump key info to kernel log for perusal. 2301 * @probe_ent: early data struct representing the host 2302 * 2303 * FIXME: complete this. 2304 * 2305 * LOCKING: 2306 * Inherited from caller. 2307 */ 2308 static void mv_print_info(struct ata_probe_ent *probe_ent) 2309 { 2310 struct pci_dev *pdev = to_pci_dev(probe_ent->dev); 2311 struct mv_host_priv *hpriv = probe_ent->private_data; 2312 u8 rev_id, scc; 2313 const char *scc_s; 2314 2315 /* Use this to determine the HW stepping of the chip so we know 2316 * what errata to workaround 2317 */ 2318 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id); 2319 2320 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc); 2321 if (scc == 0) 2322 scc_s = "SCSI"; 2323 else if (scc == 0x01) 2324 scc_s = "RAID"; 2325 else 2326 scc_s = "unknown"; 2327 2328 dev_printk(KERN_INFO, &pdev->dev, 2329 "%u slots %u ports %s mode IRQ via %s\n", 2330 (unsigned)MV_MAX_Q_DEPTH, probe_ent->n_ports, 2331 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx"); 2332 } 2333 2334 /** 2335 * mv_init_one - handle a positive probe of a Marvell host 2336 * @pdev: PCI device found 2337 * @ent: PCI device ID entry for the matched host 2338 * 2339 * LOCKING: 2340 * Inherited from caller. 2341 */ 2342 static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 2343 { 2344 static int printed_version = 0; 2345 struct ata_probe_ent *probe_ent = NULL; 2346 struct mv_host_priv *hpriv; 2347 unsigned int board_idx = (unsigned int)ent->driver_data; 2348 void __iomem *mmio_base; 2349 int pci_dev_busy = 0, rc; 2350 2351 if (!printed_version++) 2352 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n"); 2353 2354 rc = pci_enable_device(pdev); 2355 if (rc) { 2356 return rc; 2357 } 2358 pci_set_master(pdev); 2359 2360 rc = pci_request_regions(pdev, DRV_NAME); 2361 if (rc) { 2362 pci_dev_busy = 1; 2363 goto err_out; 2364 } 2365 2366 probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL); 2367 if (probe_ent == NULL) { 2368 rc = -ENOMEM; 2369 goto err_out_regions; 2370 } 2371 2372 memset(probe_ent, 0, sizeof(*probe_ent)); 2373 probe_ent->dev = pci_dev_to_dev(pdev); 2374 INIT_LIST_HEAD(&probe_ent->node); 2375 2376 mmio_base = pci_iomap(pdev, MV_PRIMARY_BAR, 0); 2377 if (mmio_base == NULL) { 2378 rc = -ENOMEM; 2379 goto err_out_free_ent; 2380 } 2381 2382 hpriv = kmalloc(sizeof(*hpriv), GFP_KERNEL); 2383 if (!hpriv) { 2384 rc = -ENOMEM; 2385 goto err_out_iounmap; 2386 } 2387 memset(hpriv, 0, sizeof(*hpriv)); 2388 2389 probe_ent->sht = mv_port_info[board_idx].sht; 2390 probe_ent->port_flags = mv_port_info[board_idx].flags; 2391 probe_ent->pio_mask = mv_port_info[board_idx].pio_mask; 2392 probe_ent->udma_mask = mv_port_info[board_idx].udma_mask; 2393 probe_ent->port_ops = mv_port_info[board_idx].port_ops; 2394 2395 probe_ent->irq = pdev->irq; 2396 probe_ent->irq_flags = IRQF_SHARED; 2397 probe_ent->mmio_base = mmio_base; 2398 probe_ent->private_data = hpriv; 2399 2400 /* initialize adapter */ 2401 rc = mv_init_host(pdev, probe_ent, board_idx); 2402 if (rc) { 2403 goto err_out_hpriv; 2404 } 2405 2406 /* Enable interrupts */ 2407 if (msi && pci_enable_msi(pdev) == 0) { 2408 hpriv->hp_flags |= MV_HP_FLAG_MSI; 2409 } else { 2410 pci_intx(pdev, 1); 2411 } 2412 2413 mv_dump_pci_cfg(pdev, 0x68); 2414 mv_print_info(probe_ent); 2415 2416 if (ata_device_add(probe_ent) == 0) { 2417 rc = -ENODEV; /* No devices discovered */ 2418 goto err_out_dev_add; 2419 } 2420 2421 kfree(probe_ent); 2422 return 0; 2423 2424 err_out_dev_add: 2425 if (MV_HP_FLAG_MSI & hpriv->hp_flags) { 2426 pci_disable_msi(pdev); 2427 } else { 2428 pci_intx(pdev, 0); 2429 } 2430 err_out_hpriv: 2431 kfree(hpriv); 2432 err_out_iounmap: 2433 pci_iounmap(pdev, mmio_base); 2434 err_out_free_ent: 2435 kfree(probe_ent); 2436 err_out_regions: 2437 pci_release_regions(pdev); 2438 err_out: 2439 if (!pci_dev_busy) { 2440 pci_disable_device(pdev); 2441 } 2442 2443 return rc; 2444 } 2445 2446 static int __init mv_init(void) 2447 { 2448 return pci_register_driver(&mv_pci_driver); 2449 } 2450 2451 static void __exit mv_exit(void) 2452 { 2453 pci_unregister_driver(&mv_pci_driver); 2454 } 2455 2456 MODULE_AUTHOR("Brett Russ"); 2457 MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers"); 2458 MODULE_LICENSE("GPL"); 2459 MODULE_DEVICE_TABLE(pci, mv_pci_tbl); 2460 MODULE_VERSION(DRV_VERSION); 2461 2462 module_param(msi, int, 0444); 2463 MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)"); 2464 2465 module_init(mv_init); 2466 module_exit(mv_exit); 2467