xref: /linux/drivers/ata/sata_mv.c (revision c537b994505099b7197e7d3125b942ecbcc51eb6)
1 /*
2  * sata_mv.c - Marvell SATA support
3  *
4  * Copyright 2005: EMC Corporation, all rights reserved.
5  * Copyright 2005 Red Hat, Inc.  All rights reserved.
6  *
7  * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; version 2 of the License.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21  *
22  */
23 
24 #include <linux/kernel.h>
25 #include <linux/module.h>
26 #include <linux/pci.h>
27 #include <linux/init.h>
28 #include <linux/blkdev.h>
29 #include <linux/delay.h>
30 #include <linux/interrupt.h>
31 #include <linux/dma-mapping.h>
32 #include <linux/device.h>
33 #include <scsi/scsi_host.h>
34 #include <scsi/scsi_cmnd.h>
35 #include <linux/libata.h>
36 
37 #define DRV_NAME	"sata_mv"
38 #define DRV_VERSION	"0.8"
39 
40 enum {
41 	/* BAR's are enumerated in terms of pci_resource_start() terms */
42 	MV_PRIMARY_BAR		= 0,	/* offset 0x10: memory space */
43 	MV_IO_BAR		= 2,	/* offset 0x18: IO space */
44 	MV_MISC_BAR		= 3,	/* offset 0x1c: FLASH, NVRAM, SRAM */
45 
46 	MV_MAJOR_REG_AREA_SZ	= 0x10000,	/* 64KB */
47 	MV_MINOR_REG_AREA_SZ	= 0x2000,	/* 8KB */
48 
49 	MV_PCI_REG_BASE		= 0,
50 	MV_IRQ_COAL_REG_BASE	= 0x18000,	/* 6xxx part only */
51 	MV_IRQ_COAL_CAUSE		= (MV_IRQ_COAL_REG_BASE + 0x08),
52 	MV_IRQ_COAL_CAUSE_LO		= (MV_IRQ_COAL_REG_BASE + 0x88),
53 	MV_IRQ_COAL_CAUSE_HI		= (MV_IRQ_COAL_REG_BASE + 0x8c),
54 	MV_IRQ_COAL_THRESHOLD		= (MV_IRQ_COAL_REG_BASE + 0xcc),
55 	MV_IRQ_COAL_TIME_THRESHOLD	= (MV_IRQ_COAL_REG_BASE + 0xd0),
56 
57 	MV_SATAHC0_REG_BASE	= 0x20000,
58 	MV_FLASH_CTL		= 0x1046c,
59 	MV_GPIO_PORT_CTL	= 0x104f0,
60 	MV_RESET_CFG		= 0x180d8,
61 
62 	MV_PCI_REG_SZ		= MV_MAJOR_REG_AREA_SZ,
63 	MV_SATAHC_REG_SZ	= MV_MAJOR_REG_AREA_SZ,
64 	MV_SATAHC_ARBTR_REG_SZ	= MV_MINOR_REG_AREA_SZ,		/* arbiter */
65 	MV_PORT_REG_SZ		= MV_MINOR_REG_AREA_SZ,
66 
67 	MV_USE_Q_DEPTH		= ATA_DEF_QUEUE,
68 
69 	MV_MAX_Q_DEPTH		= 32,
70 	MV_MAX_Q_DEPTH_MASK	= MV_MAX_Q_DEPTH - 1,
71 
72 	/* CRQB needs alignment on a 1KB boundary. Size == 1KB
73 	 * CRPB needs alignment on a 256B boundary. Size == 256B
74 	 * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB
75 	 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
76 	 */
77 	MV_CRQB_Q_SZ		= (32 * MV_MAX_Q_DEPTH),
78 	MV_CRPB_Q_SZ		= (8 * MV_MAX_Q_DEPTH),
79 	MV_MAX_SG_CT		= 176,
80 	MV_SG_TBL_SZ		= (16 * MV_MAX_SG_CT),
81 	MV_PORT_PRIV_DMA_SZ	= (MV_CRQB_Q_SZ + MV_CRPB_Q_SZ + MV_SG_TBL_SZ),
82 
83 	MV_PORTS_PER_HC		= 4,
84 	/* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
85 	MV_PORT_HC_SHIFT	= 2,
86 	/* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
87 	MV_PORT_MASK		= 3,
88 
89 	/* Host Flags */
90 	MV_FLAG_DUAL_HC		= (1 << 30),  /* two SATA Host Controllers */
91 	MV_FLAG_IRQ_COALESCE	= (1 << 29),  /* IRQ coalescing capability */
92 	MV_COMMON_FLAGS		= (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
93 				   ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO |
94 				   ATA_FLAG_NO_ATAPI | ATA_FLAG_PIO_POLLING),
95 	MV_6XXX_FLAGS		= MV_FLAG_IRQ_COALESCE,
96 
97 	CRQB_FLAG_READ		= (1 << 0),
98 	CRQB_TAG_SHIFT		= 1,
99 	CRQB_CMD_ADDR_SHIFT	= 8,
100 	CRQB_CMD_CS		= (0x2 << 11),
101 	CRQB_CMD_LAST		= (1 << 15),
102 
103 	CRPB_FLAG_STATUS_SHIFT	= 8,
104 
105 	EPRD_FLAG_END_OF_TBL	= (1 << 31),
106 
107 	/* PCI interface registers */
108 
109 	PCI_COMMAND_OFS		= 0xc00,
110 
111 	PCI_MAIN_CMD_STS_OFS	= 0xd30,
112 	STOP_PCI_MASTER		= (1 << 2),
113 	PCI_MASTER_EMPTY	= (1 << 3),
114 	GLOB_SFT_RST		= (1 << 4),
115 
116 	MV_PCI_MODE		= 0xd00,
117 	MV_PCI_EXP_ROM_BAR_CTL	= 0xd2c,
118 	MV_PCI_DISC_TIMER	= 0xd04,
119 	MV_PCI_MSI_TRIGGER	= 0xc38,
120 	MV_PCI_SERR_MASK	= 0xc28,
121 	MV_PCI_XBAR_TMOUT	= 0x1d04,
122 	MV_PCI_ERR_LOW_ADDRESS	= 0x1d40,
123 	MV_PCI_ERR_HIGH_ADDRESS	= 0x1d44,
124 	MV_PCI_ERR_ATTRIBUTE	= 0x1d48,
125 	MV_PCI_ERR_COMMAND	= 0x1d50,
126 
127 	PCI_IRQ_CAUSE_OFS		= 0x1d58,
128 	PCI_IRQ_MASK_OFS		= 0x1d5c,
129 	PCI_UNMASK_ALL_IRQS	= 0x7fffff,	/* bits 22-0 */
130 
131 	HC_MAIN_IRQ_CAUSE_OFS	= 0x1d60,
132 	HC_MAIN_IRQ_MASK_OFS	= 0x1d64,
133 	PORT0_ERR		= (1 << 0),	/* shift by port # */
134 	PORT0_DONE		= (1 << 1),	/* shift by port # */
135 	HC0_IRQ_PEND		= 0x1ff,	/* bits 0-8 = HC0's ports */
136 	HC_SHIFT		= 9,		/* bits 9-17 = HC1's ports */
137 	PCI_ERR			= (1 << 18),
138 	TRAN_LO_DONE		= (1 << 19),	/* 6xxx: IRQ coalescing */
139 	TRAN_HI_DONE		= (1 << 20),	/* 6xxx: IRQ coalescing */
140 	PORTS_0_3_COAL_DONE	= (1 << 8),
141 	PORTS_4_7_COAL_DONE	= (1 << 17),
142 	PORTS_0_7_COAL_DONE	= (1 << 21),	/* 6xxx: IRQ coalescing */
143 	GPIO_INT		= (1 << 22),
144 	SELF_INT		= (1 << 23),
145 	TWSI_INT		= (1 << 24),
146 	HC_MAIN_RSVD		= (0x7f << 25),	/* bits 31-25 */
147 	HC_MAIN_RSVD_5		= (0x1fff << 19), /* bits 31-19 */
148 	HC_MAIN_MASKED_IRQS	= (TRAN_LO_DONE | TRAN_HI_DONE |
149 				   PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
150 				   HC_MAIN_RSVD),
151 	HC_MAIN_MASKED_IRQS_5	= (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
152 				   HC_MAIN_RSVD_5),
153 
154 	/* SATAHC registers */
155 	HC_CFG_OFS		= 0,
156 
157 	HC_IRQ_CAUSE_OFS	= 0x14,
158 	CRPB_DMA_DONE		= (1 << 0),	/* shift by port # */
159 	HC_IRQ_COAL		= (1 << 4),	/* IRQ coalescing */
160 	DEV_IRQ			= (1 << 8),	/* shift by port # */
161 
162 	/* Shadow block registers */
163 	SHD_BLK_OFS		= 0x100,
164 	SHD_CTL_AST_OFS		= 0x20,		/* ofs from SHD_BLK_OFS */
165 
166 	/* SATA registers */
167 	SATA_STATUS_OFS		= 0x300,  /* ctrl, err regs follow status */
168 	SATA_ACTIVE_OFS		= 0x350,
169 	PHY_MODE3		= 0x310,
170 	PHY_MODE4		= 0x314,
171 	PHY_MODE2		= 0x330,
172 	MV5_PHY_MODE		= 0x74,
173 	MV5_LT_MODE		= 0x30,
174 	MV5_PHY_CTL		= 0x0C,
175 	SATA_INTERFACE_CTL	= 0x050,
176 
177 	MV_M2_PREAMP_MASK	= 0x7e0,
178 
179 	/* Port registers */
180 	EDMA_CFG_OFS		= 0,
181 	EDMA_CFG_Q_DEPTH	= 0,			/* queueing disabled */
182 	EDMA_CFG_NCQ		= (1 << 5),
183 	EDMA_CFG_NCQ_GO_ON_ERR	= (1 << 14),		/* continue on error */
184 	EDMA_CFG_RD_BRST_EXT	= (1 << 11),		/* read burst 512B */
185 	EDMA_CFG_WR_BUFF_LEN	= (1 << 13),		/* write buffer 512B */
186 
187 	EDMA_ERR_IRQ_CAUSE_OFS	= 0x8,
188 	EDMA_ERR_IRQ_MASK_OFS	= 0xc,
189 	EDMA_ERR_D_PAR		= (1 << 0),
190 	EDMA_ERR_PRD_PAR	= (1 << 1),
191 	EDMA_ERR_DEV		= (1 << 2),
192 	EDMA_ERR_DEV_DCON	= (1 << 3),
193 	EDMA_ERR_DEV_CON	= (1 << 4),
194 	EDMA_ERR_SERR		= (1 << 5),
195 	EDMA_ERR_SELF_DIS	= (1 << 7),
196 	EDMA_ERR_BIST_ASYNC	= (1 << 8),
197 	EDMA_ERR_CRBQ_PAR	= (1 << 9),
198 	EDMA_ERR_CRPB_PAR	= (1 << 10),
199 	EDMA_ERR_INTRL_PAR	= (1 << 11),
200 	EDMA_ERR_IORDY		= (1 << 12),
201 	EDMA_ERR_LNK_CTRL_RX	= (0xf << 13),
202 	EDMA_ERR_LNK_CTRL_RX_2	= (1 << 15),
203 	EDMA_ERR_LNK_DATA_RX	= (0xf << 17),
204 	EDMA_ERR_LNK_CTRL_TX	= (0x1f << 21),
205 	EDMA_ERR_LNK_DATA_TX	= (0x1f << 26),
206 	EDMA_ERR_TRANS_PROTO	= (1 << 31),
207 	EDMA_ERR_FATAL		= (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
208 				   EDMA_ERR_DEV_DCON | EDMA_ERR_CRBQ_PAR |
209 				   EDMA_ERR_CRPB_PAR | EDMA_ERR_INTRL_PAR |
210 				   EDMA_ERR_IORDY | EDMA_ERR_LNK_CTRL_RX_2 |
211 				   EDMA_ERR_LNK_DATA_RX |
212 				   EDMA_ERR_LNK_DATA_TX |
213 				   EDMA_ERR_TRANS_PROTO),
214 
215 	EDMA_REQ_Q_BASE_HI_OFS	= 0x10,
216 	EDMA_REQ_Q_IN_PTR_OFS	= 0x14,		/* also contains BASE_LO */
217 
218 	EDMA_REQ_Q_OUT_PTR_OFS	= 0x18,
219 	EDMA_REQ_Q_PTR_SHIFT	= 5,
220 
221 	EDMA_RSP_Q_BASE_HI_OFS	= 0x1c,
222 	EDMA_RSP_Q_IN_PTR_OFS	= 0x20,
223 	EDMA_RSP_Q_OUT_PTR_OFS	= 0x24,		/* also contains BASE_LO */
224 	EDMA_RSP_Q_PTR_SHIFT	= 3,
225 
226 	EDMA_CMD_OFS		= 0x28,
227 	EDMA_EN			= (1 << 0),
228 	EDMA_DS			= (1 << 1),
229 	ATA_RST			= (1 << 2),
230 
231 	EDMA_IORDY_TMOUT	= 0x34,
232 	EDMA_ARB_CFG		= 0x38,
233 
234 	/* Host private flags (hp_flags) */
235 	MV_HP_FLAG_MSI		= (1 << 0),
236 	MV_HP_ERRATA_50XXB0	= (1 << 1),
237 	MV_HP_ERRATA_50XXB2	= (1 << 2),
238 	MV_HP_ERRATA_60X1B2	= (1 << 3),
239 	MV_HP_ERRATA_60X1C0	= (1 << 4),
240 	MV_HP_ERRATA_XX42A0	= (1 << 5),
241 	MV_HP_50XX		= (1 << 6),
242 	MV_HP_GEN_IIE		= (1 << 7),
243 
244 	/* Port private flags (pp_flags) */
245 	MV_PP_FLAG_EDMA_EN	= (1 << 0),
246 	MV_PP_FLAG_EDMA_DS_ACT	= (1 << 1),
247 };
248 
249 #define IS_50XX(hpriv) ((hpriv)->hp_flags & MV_HP_50XX)
250 #define IS_60XX(hpriv) (((hpriv)->hp_flags & MV_HP_50XX) == 0)
251 #define IS_GEN_I(hpriv) IS_50XX(hpriv)
252 #define IS_GEN_II(hpriv) IS_60XX(hpriv)
253 #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
254 
255 enum {
256 	/* Our DMA boundary is determined by an ePRD being unable to handle
257 	 * anything larger than 64KB
258 	 */
259 	MV_DMA_BOUNDARY		= 0xffffU,
260 
261 	EDMA_REQ_Q_BASE_LO_MASK	= 0xfffffc00U,
262 
263 	EDMA_RSP_Q_BASE_LO_MASK	= 0xffffff00U,
264 };
265 
266 enum chip_type {
267 	chip_504x,
268 	chip_508x,
269 	chip_5080,
270 	chip_604x,
271 	chip_608x,
272 	chip_6042,
273 	chip_7042,
274 };
275 
276 /* Command ReQuest Block: 32B */
277 struct mv_crqb {
278 	__le32			sg_addr;
279 	__le32			sg_addr_hi;
280 	__le16			ctrl_flags;
281 	__le16			ata_cmd[11];
282 };
283 
284 struct mv_crqb_iie {
285 	__le32			addr;
286 	__le32			addr_hi;
287 	__le32			flags;
288 	__le32			len;
289 	__le32			ata_cmd[4];
290 };
291 
292 /* Command ResPonse Block: 8B */
293 struct mv_crpb {
294 	__le16			id;
295 	__le16			flags;
296 	__le32			tmstmp;
297 };
298 
299 /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
300 struct mv_sg {
301 	__le32			addr;
302 	__le32			flags_size;
303 	__le32			addr_hi;
304 	__le32			reserved;
305 };
306 
307 struct mv_port_priv {
308 	struct mv_crqb		*crqb;
309 	dma_addr_t		crqb_dma;
310 	struct mv_crpb		*crpb;
311 	dma_addr_t		crpb_dma;
312 	struct mv_sg		*sg_tbl;
313 	dma_addr_t		sg_tbl_dma;
314 	u32			pp_flags;
315 };
316 
317 struct mv_port_signal {
318 	u32			amps;
319 	u32			pre;
320 };
321 
322 struct mv_host_priv;
323 struct mv_hw_ops {
324 	void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
325 			   unsigned int port);
326 	void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
327 	void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
328 			   void __iomem *mmio);
329 	int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
330 			unsigned int n_hc);
331 	void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
332 	void (*reset_bus)(struct pci_dev *pdev, void __iomem *mmio);
333 };
334 
335 struct mv_host_priv {
336 	u32			hp_flags;
337 	struct mv_port_signal	signal[8];
338 	const struct mv_hw_ops	*ops;
339 };
340 
341 static void mv_irq_clear(struct ata_port *ap);
342 static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in);
343 static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
344 static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in);
345 static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
346 static void mv_phy_reset(struct ata_port *ap);
347 static void __mv_phy_reset(struct ata_port *ap, int can_sleep);
348 static int mv_port_start(struct ata_port *ap);
349 static void mv_port_stop(struct ata_port *ap);
350 static void mv_qc_prep(struct ata_queued_cmd *qc);
351 static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
352 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
353 static irqreturn_t mv_interrupt(int irq, void *dev_instance);
354 static void mv_eng_timeout(struct ata_port *ap);
355 static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
356 
357 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
358 			   unsigned int port);
359 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
360 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
361 			   void __iomem *mmio);
362 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
363 			unsigned int n_hc);
364 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
365 static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio);
366 
367 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
368 			   unsigned int port);
369 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
370 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
371 			   void __iomem *mmio);
372 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
373 			unsigned int n_hc);
374 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
375 static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio);
376 static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
377 			     unsigned int port_no);
378 static void mv_stop_and_reset(struct ata_port *ap);
379 
380 static struct scsi_host_template mv_sht = {
381 	.module			= THIS_MODULE,
382 	.name			= DRV_NAME,
383 	.ioctl			= ata_scsi_ioctl,
384 	.queuecommand		= ata_scsi_queuecmd,
385 	.can_queue		= MV_USE_Q_DEPTH,
386 	.this_id		= ATA_SHT_THIS_ID,
387 	.sg_tablesize		= MV_MAX_SG_CT / 2,
388 	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
389 	.emulated		= ATA_SHT_EMULATED,
390 	.use_clustering		= ATA_SHT_USE_CLUSTERING,
391 	.proc_name		= DRV_NAME,
392 	.dma_boundary		= MV_DMA_BOUNDARY,
393 	.slave_configure	= ata_scsi_slave_config,
394 	.slave_destroy		= ata_scsi_slave_destroy,
395 	.bios_param		= ata_std_bios_param,
396 };
397 
398 static const struct ata_port_operations mv5_ops = {
399 	.port_disable		= ata_port_disable,
400 
401 	.tf_load		= ata_tf_load,
402 	.tf_read		= ata_tf_read,
403 	.check_status		= ata_check_status,
404 	.exec_command		= ata_exec_command,
405 	.dev_select		= ata_std_dev_select,
406 
407 	.phy_reset		= mv_phy_reset,
408 
409 	.qc_prep		= mv_qc_prep,
410 	.qc_issue		= mv_qc_issue,
411 	.data_xfer		= ata_data_xfer,
412 
413 	.eng_timeout		= mv_eng_timeout,
414 
415 	.irq_handler		= mv_interrupt,
416 	.irq_clear		= mv_irq_clear,
417 	.irq_on			= ata_irq_on,
418 	.irq_ack		= ata_irq_ack,
419 
420 	.scr_read		= mv5_scr_read,
421 	.scr_write		= mv5_scr_write,
422 
423 	.port_start		= mv_port_start,
424 	.port_stop		= mv_port_stop,
425 };
426 
427 static const struct ata_port_operations mv6_ops = {
428 	.port_disable		= ata_port_disable,
429 
430 	.tf_load		= ata_tf_load,
431 	.tf_read		= ata_tf_read,
432 	.check_status		= ata_check_status,
433 	.exec_command		= ata_exec_command,
434 	.dev_select		= ata_std_dev_select,
435 
436 	.phy_reset		= mv_phy_reset,
437 
438 	.qc_prep		= mv_qc_prep,
439 	.qc_issue		= mv_qc_issue,
440 	.data_xfer		= ata_data_xfer,
441 
442 	.eng_timeout		= mv_eng_timeout,
443 
444 	.irq_handler		= mv_interrupt,
445 	.irq_clear		= mv_irq_clear,
446 	.irq_on			= ata_irq_on,
447 	.irq_ack		= ata_irq_ack,
448 
449 	.scr_read		= mv_scr_read,
450 	.scr_write		= mv_scr_write,
451 
452 	.port_start		= mv_port_start,
453 	.port_stop		= mv_port_stop,
454 };
455 
456 static const struct ata_port_operations mv_iie_ops = {
457 	.port_disable		= ata_port_disable,
458 
459 	.tf_load		= ata_tf_load,
460 	.tf_read		= ata_tf_read,
461 	.check_status		= ata_check_status,
462 	.exec_command		= ata_exec_command,
463 	.dev_select		= ata_std_dev_select,
464 
465 	.phy_reset		= mv_phy_reset,
466 
467 	.qc_prep		= mv_qc_prep_iie,
468 	.qc_issue		= mv_qc_issue,
469 	.data_xfer		= ata_data_xfer,
470 
471 	.eng_timeout		= mv_eng_timeout,
472 
473 	.irq_handler		= mv_interrupt,
474 	.irq_clear		= mv_irq_clear,
475 	.irq_on			= ata_irq_on,
476 	.irq_ack		= ata_irq_ack,
477 
478 	.scr_read		= mv_scr_read,
479 	.scr_write		= mv_scr_write,
480 
481 	.port_start		= mv_port_start,
482 	.port_stop		= mv_port_stop,
483 };
484 
485 static const struct ata_port_info mv_port_info[] = {
486 	{  /* chip_504x */
487 		.sht		= &mv_sht,
488 		.flags		= MV_COMMON_FLAGS,
489 		.pio_mask	= 0x1f,	/* pio0-4 */
490 		.udma_mask	= 0x7f,	/* udma0-6 */
491 		.port_ops	= &mv5_ops,
492 	},
493 	{  /* chip_508x */
494 		.sht		= &mv_sht,
495 		.flags		= (MV_COMMON_FLAGS | MV_FLAG_DUAL_HC),
496 		.pio_mask	= 0x1f,	/* pio0-4 */
497 		.udma_mask	= 0x7f,	/* udma0-6 */
498 		.port_ops	= &mv5_ops,
499 	},
500 	{  /* chip_5080 */
501 		.sht		= &mv_sht,
502 		.flags		= (MV_COMMON_FLAGS | MV_FLAG_DUAL_HC),
503 		.pio_mask	= 0x1f,	/* pio0-4 */
504 		.udma_mask	= 0x7f,	/* udma0-6 */
505 		.port_ops	= &mv5_ops,
506 	},
507 	{  /* chip_604x */
508 		.sht		= &mv_sht,
509 		.flags		= (MV_COMMON_FLAGS | MV_6XXX_FLAGS),
510 		.pio_mask	= 0x1f,	/* pio0-4 */
511 		.udma_mask	= 0x7f,	/* udma0-6 */
512 		.port_ops	= &mv6_ops,
513 	},
514 	{  /* chip_608x */
515 		.sht		= &mv_sht,
516 		.flags		= (MV_COMMON_FLAGS | MV_6XXX_FLAGS |
517 				   MV_FLAG_DUAL_HC),
518 		.pio_mask	= 0x1f,	/* pio0-4 */
519 		.udma_mask	= 0x7f,	/* udma0-6 */
520 		.port_ops	= &mv6_ops,
521 	},
522 	{  /* chip_6042 */
523 		.sht		= &mv_sht,
524 		.flags		= (MV_COMMON_FLAGS | MV_6XXX_FLAGS),
525 		.pio_mask	= 0x1f,	/* pio0-4 */
526 		.udma_mask	= 0x7f,	/* udma0-6 */
527 		.port_ops	= &mv_iie_ops,
528 	},
529 	{  /* chip_7042 */
530 		.sht		= &mv_sht,
531 		.flags		= (MV_COMMON_FLAGS | MV_6XXX_FLAGS),
532 		.pio_mask	= 0x1f,	/* pio0-4 */
533 		.udma_mask	= 0x7f,	/* udma0-6 */
534 		.port_ops	= &mv_iie_ops,
535 	},
536 };
537 
538 static const struct pci_device_id mv_pci_tbl[] = {
539 	{ PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
540 	{ PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
541 	{ PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
542 	{ PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
543 
544 	{ PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
545 	{ PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
546 	{ PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
547 	{ PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
548 	{ PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
549 
550 	{ PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
551 
552 	{ PCI_VDEVICE(TTI, 0x2310), chip_7042 },
553 
554 	{ }			/* terminate list */
555 };
556 
557 static struct pci_driver mv_pci_driver = {
558 	.name			= DRV_NAME,
559 	.id_table		= mv_pci_tbl,
560 	.probe			= mv_init_one,
561 	.remove			= ata_pci_remove_one,
562 };
563 
564 static const struct mv_hw_ops mv5xxx_ops = {
565 	.phy_errata		= mv5_phy_errata,
566 	.enable_leds		= mv5_enable_leds,
567 	.read_preamp		= mv5_read_preamp,
568 	.reset_hc		= mv5_reset_hc,
569 	.reset_flash		= mv5_reset_flash,
570 	.reset_bus		= mv5_reset_bus,
571 };
572 
573 static const struct mv_hw_ops mv6xxx_ops = {
574 	.phy_errata		= mv6_phy_errata,
575 	.enable_leds		= mv6_enable_leds,
576 	.read_preamp		= mv6_read_preamp,
577 	.reset_hc		= mv6_reset_hc,
578 	.reset_flash		= mv6_reset_flash,
579 	.reset_bus		= mv_reset_pci_bus,
580 };
581 
582 /*
583  * module options
584  */
585 static int msi;	      /* Use PCI msi; either zero (off, default) or non-zero */
586 
587 
588 /*
589  * Functions
590  */
591 
592 static inline void writelfl(unsigned long data, void __iomem *addr)
593 {
594 	writel(data, addr);
595 	(void) readl(addr);	/* flush to avoid PCI posted write */
596 }
597 
598 static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
599 {
600 	return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
601 }
602 
603 static inline unsigned int mv_hc_from_port(unsigned int port)
604 {
605 	return port >> MV_PORT_HC_SHIFT;
606 }
607 
608 static inline unsigned int mv_hardport_from_port(unsigned int port)
609 {
610 	return port & MV_PORT_MASK;
611 }
612 
613 static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
614 						 unsigned int port)
615 {
616 	return mv_hc_base(base, mv_hc_from_port(port));
617 }
618 
619 static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
620 {
621 	return  mv_hc_base_from_port(base, port) +
622 		MV_SATAHC_ARBTR_REG_SZ +
623 		(mv_hardport_from_port(port) * MV_PORT_REG_SZ);
624 }
625 
626 static inline void __iomem *mv_ap_base(struct ata_port *ap)
627 {
628 	return mv_port_base(ap->host->iomap[MV_PRIMARY_BAR], ap->port_no);
629 }
630 
631 static inline int mv_get_hc_count(unsigned long port_flags)
632 {
633 	return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
634 }
635 
636 static void mv_irq_clear(struct ata_port *ap)
637 {
638 }
639 
640 /**
641  *      mv_start_dma - Enable eDMA engine
642  *      @base: port base address
643  *      @pp: port private data
644  *
645  *      Verify the local cache of the eDMA state is accurate with a
646  *      WARN_ON.
647  *
648  *      LOCKING:
649  *      Inherited from caller.
650  */
651 static void mv_start_dma(void __iomem *base, struct mv_port_priv *pp)
652 {
653 	if (!(MV_PP_FLAG_EDMA_EN & pp->pp_flags)) {
654 		writelfl(EDMA_EN, base + EDMA_CMD_OFS);
655 		pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
656 	}
657 	WARN_ON(!(EDMA_EN & readl(base + EDMA_CMD_OFS)));
658 }
659 
660 /**
661  *      mv_stop_dma - Disable eDMA engine
662  *      @ap: ATA channel to manipulate
663  *
664  *      Verify the local cache of the eDMA state is accurate with a
665  *      WARN_ON.
666  *
667  *      LOCKING:
668  *      Inherited from caller.
669  */
670 static void mv_stop_dma(struct ata_port *ap)
671 {
672 	void __iomem *port_mmio = mv_ap_base(ap);
673 	struct mv_port_priv *pp	= ap->private_data;
674 	u32 reg;
675 	int i;
676 
677 	if (MV_PP_FLAG_EDMA_EN & pp->pp_flags) {
678 		/* Disable EDMA if active.   The disable bit auto clears.
679 		 */
680 		writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
681 		pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
682 	} else {
683 		WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
684   	}
685 
686 	/* now properly wait for the eDMA to stop */
687 	for (i = 1000; i > 0; i--) {
688 		reg = readl(port_mmio + EDMA_CMD_OFS);
689 		if (!(EDMA_EN & reg)) {
690 			break;
691 		}
692 		udelay(100);
693 	}
694 
695 	if (EDMA_EN & reg) {
696 		ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
697 		/* FIXME: Consider doing a reset here to recover */
698 	}
699 }
700 
701 #ifdef ATA_DEBUG
702 static void mv_dump_mem(void __iomem *start, unsigned bytes)
703 {
704 	int b, w;
705 	for (b = 0; b < bytes; ) {
706 		DPRINTK("%p: ", start + b);
707 		for (w = 0; b < bytes && w < 4; w++) {
708 			printk("%08x ",readl(start + b));
709 			b += sizeof(u32);
710 		}
711 		printk("\n");
712 	}
713 }
714 #endif
715 
716 static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
717 {
718 #ifdef ATA_DEBUG
719 	int b, w;
720 	u32 dw;
721 	for (b = 0; b < bytes; ) {
722 		DPRINTK("%02x: ", b);
723 		for (w = 0; b < bytes && w < 4; w++) {
724 			(void) pci_read_config_dword(pdev,b,&dw);
725 			printk("%08x ",dw);
726 			b += sizeof(u32);
727 		}
728 		printk("\n");
729 	}
730 #endif
731 }
732 static void mv_dump_all_regs(void __iomem *mmio_base, int port,
733 			     struct pci_dev *pdev)
734 {
735 #ifdef ATA_DEBUG
736 	void __iomem *hc_base = mv_hc_base(mmio_base,
737 					   port >> MV_PORT_HC_SHIFT);
738 	void __iomem *port_base;
739 	int start_port, num_ports, p, start_hc, num_hcs, hc;
740 
741 	if (0 > port) {
742 		start_hc = start_port = 0;
743 		num_ports = 8;		/* shld be benign for 4 port devs */
744 		num_hcs = 2;
745 	} else {
746 		start_hc = port >> MV_PORT_HC_SHIFT;
747 		start_port = port;
748 		num_ports = num_hcs = 1;
749 	}
750 	DPRINTK("All registers for port(s) %u-%u:\n", start_port,
751 		num_ports > 1 ? num_ports - 1 : start_port);
752 
753 	if (NULL != pdev) {
754 		DPRINTK("PCI config space regs:\n");
755 		mv_dump_pci_cfg(pdev, 0x68);
756 	}
757 	DPRINTK("PCI regs:\n");
758 	mv_dump_mem(mmio_base+0xc00, 0x3c);
759 	mv_dump_mem(mmio_base+0xd00, 0x34);
760 	mv_dump_mem(mmio_base+0xf00, 0x4);
761 	mv_dump_mem(mmio_base+0x1d00, 0x6c);
762 	for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
763 		hc_base = mv_hc_base(mmio_base, hc);
764 		DPRINTK("HC regs (HC %i):\n", hc);
765 		mv_dump_mem(hc_base, 0x1c);
766 	}
767 	for (p = start_port; p < start_port + num_ports; p++) {
768 		port_base = mv_port_base(mmio_base, p);
769 		DPRINTK("EDMA regs (port %i):\n",p);
770 		mv_dump_mem(port_base, 0x54);
771 		DPRINTK("SATA regs (port %i):\n",p);
772 		mv_dump_mem(port_base+0x300, 0x60);
773 	}
774 #endif
775 }
776 
777 static unsigned int mv_scr_offset(unsigned int sc_reg_in)
778 {
779 	unsigned int ofs;
780 
781 	switch (sc_reg_in) {
782 	case SCR_STATUS:
783 	case SCR_CONTROL:
784 	case SCR_ERROR:
785 		ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
786 		break;
787 	case SCR_ACTIVE:
788 		ofs = SATA_ACTIVE_OFS;   /* active is not with the others */
789 		break;
790 	default:
791 		ofs = 0xffffffffU;
792 		break;
793 	}
794 	return ofs;
795 }
796 
797 static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in)
798 {
799 	unsigned int ofs = mv_scr_offset(sc_reg_in);
800 
801 	if (0xffffffffU != ofs) {
802 		return readl(mv_ap_base(ap) + ofs);
803 	} else {
804 		return (u32) ofs;
805 	}
806 }
807 
808 static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
809 {
810 	unsigned int ofs = mv_scr_offset(sc_reg_in);
811 
812 	if (0xffffffffU != ofs) {
813 		writelfl(val, mv_ap_base(ap) + ofs);
814 	}
815 }
816 
817 static void mv_edma_cfg(struct mv_host_priv *hpriv, void __iomem *port_mmio)
818 {
819 	u32 cfg = readl(port_mmio + EDMA_CFG_OFS);
820 
821 	/* set up non-NCQ EDMA configuration */
822 	cfg &= ~(1 << 9);	/* disable equeue */
823 
824 	if (IS_GEN_I(hpriv)) {
825 		cfg &= ~0x1f;		/* clear queue depth */
826 		cfg |= (1 << 8);	/* enab config burst size mask */
827 	}
828 
829 	else if (IS_GEN_II(hpriv)) {
830 		cfg &= ~0x1f;		/* clear queue depth */
831 		cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
832 		cfg &= ~(EDMA_CFG_NCQ | EDMA_CFG_NCQ_GO_ON_ERR); /* clear NCQ */
833 	}
834 
835 	else if (IS_GEN_IIE(hpriv)) {
836 		cfg |= (1 << 23);	/* do not mask PM field in rx'd FIS */
837 		cfg |= (1 << 22);	/* enab 4-entry host queue cache */
838 		cfg &= ~(1 << 19);	/* dis 128-entry queue (for now?) */
839 		cfg |= (1 << 18);	/* enab early completion */
840 		cfg |= (1 << 17);	/* enab cut-through (dis stor&forwrd) */
841 		cfg &= ~(1 << 16);	/* dis FIS-based switching (for now) */
842 		cfg &= ~(EDMA_CFG_NCQ | EDMA_CFG_NCQ_GO_ON_ERR); /* clear NCQ */
843 	}
844 
845 	writelfl(cfg, port_mmio + EDMA_CFG_OFS);
846 }
847 
848 /**
849  *      mv_port_start - Port specific init/start routine.
850  *      @ap: ATA channel to manipulate
851  *
852  *      Allocate and point to DMA memory, init port private memory,
853  *      zero indices.
854  *
855  *      LOCKING:
856  *      Inherited from caller.
857  */
858 static int mv_port_start(struct ata_port *ap)
859 {
860 	struct device *dev = ap->host->dev;
861 	struct mv_host_priv *hpriv = ap->host->private_data;
862 	struct mv_port_priv *pp;
863 	void __iomem *port_mmio = mv_ap_base(ap);
864 	void *mem;
865 	dma_addr_t mem_dma;
866 	int rc;
867 
868 	pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
869 	if (!pp)
870 		return -ENOMEM;
871 
872 	mem = dmam_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma,
873 				  GFP_KERNEL);
874 	if (!mem)
875 		return -ENOMEM;
876 	memset(mem, 0, MV_PORT_PRIV_DMA_SZ);
877 
878 	rc = ata_pad_alloc(ap, dev);
879 	if (rc)
880 		return rc;
881 
882 	/* First item in chunk of DMA memory:
883 	 * 32-slot command request table (CRQB), 32 bytes each in size
884 	 */
885 	pp->crqb = mem;
886 	pp->crqb_dma = mem_dma;
887 	mem += MV_CRQB_Q_SZ;
888 	mem_dma += MV_CRQB_Q_SZ;
889 
890 	/* Second item:
891 	 * 32-slot command response table (CRPB), 8 bytes each in size
892 	 */
893 	pp->crpb = mem;
894 	pp->crpb_dma = mem_dma;
895 	mem += MV_CRPB_Q_SZ;
896 	mem_dma += MV_CRPB_Q_SZ;
897 
898 	/* Third item:
899 	 * Table of scatter-gather descriptors (ePRD), 16 bytes each
900 	 */
901 	pp->sg_tbl = mem;
902 	pp->sg_tbl_dma = mem_dma;
903 
904 	mv_edma_cfg(hpriv, port_mmio);
905 
906 	writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
907 	writelfl(pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK,
908 		 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
909 
910 	if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
911 		writelfl(pp->crqb_dma & 0xffffffff,
912 			 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
913 	else
914 		writelfl(0, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
915 
916 	writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
917 
918 	if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
919 		writelfl(pp->crpb_dma & 0xffffffff,
920 			 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
921 	else
922 		writelfl(0, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
923 
924 	writelfl(pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK,
925 		 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
926 
927 	/* Don't turn on EDMA here...do it before DMA commands only.  Else
928 	 * we'll be unable to send non-data, PIO, etc due to restricted access
929 	 * to shadow regs.
930 	 */
931 	ap->private_data = pp;
932 	return 0;
933 }
934 
935 /**
936  *      mv_port_stop - Port specific cleanup/stop routine.
937  *      @ap: ATA channel to manipulate
938  *
939  *      Stop DMA, cleanup port memory.
940  *
941  *      LOCKING:
942  *      This routine uses the host lock to protect the DMA stop.
943  */
944 static void mv_port_stop(struct ata_port *ap)
945 {
946 	unsigned long flags;
947 
948 	spin_lock_irqsave(&ap->host->lock, flags);
949 	mv_stop_dma(ap);
950 	spin_unlock_irqrestore(&ap->host->lock, flags);
951 }
952 
953 /**
954  *      mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
955  *      @qc: queued command whose SG list to source from
956  *
957  *      Populate the SG list and mark the last entry.
958  *
959  *      LOCKING:
960  *      Inherited from caller.
961  */
962 static void mv_fill_sg(struct ata_queued_cmd *qc)
963 {
964 	struct mv_port_priv *pp = qc->ap->private_data;
965 	unsigned int i = 0;
966 	struct scatterlist *sg;
967 
968 	ata_for_each_sg(sg, qc) {
969 		dma_addr_t addr;
970 		u32 sg_len, len, offset;
971 
972 		addr = sg_dma_address(sg);
973 		sg_len = sg_dma_len(sg);
974 
975 		while (sg_len) {
976 			offset = addr & MV_DMA_BOUNDARY;
977 			len = sg_len;
978 			if ((offset + sg_len) > 0x10000)
979 				len = 0x10000 - offset;
980 
981 			pp->sg_tbl[i].addr = cpu_to_le32(addr & 0xffffffff);
982 			pp->sg_tbl[i].addr_hi = cpu_to_le32((addr >> 16) >> 16);
983 			pp->sg_tbl[i].flags_size = cpu_to_le32(len & 0xffff);
984 
985 			sg_len -= len;
986 			addr += len;
987 
988 			if (!sg_len && ata_sg_is_last(sg, qc))
989 				pp->sg_tbl[i].flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
990 
991 			i++;
992 		}
993 	}
994 }
995 
996 static inline unsigned mv_inc_q_index(unsigned index)
997 {
998 	return (index + 1) & MV_MAX_Q_DEPTH_MASK;
999 }
1000 
1001 static inline void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
1002 {
1003 	u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
1004 		(last ? CRQB_CMD_LAST : 0);
1005 	*cmdw = cpu_to_le16(tmp);
1006 }
1007 
1008 /**
1009  *      mv_qc_prep - Host specific command preparation.
1010  *      @qc: queued command to prepare
1011  *
1012  *      This routine simply redirects to the general purpose routine
1013  *      if command is not DMA.  Else, it handles prep of the CRQB
1014  *      (command request block), does some sanity checking, and calls
1015  *      the SG load routine.
1016  *
1017  *      LOCKING:
1018  *      Inherited from caller.
1019  */
1020 static void mv_qc_prep(struct ata_queued_cmd *qc)
1021 {
1022 	struct ata_port *ap = qc->ap;
1023 	struct mv_port_priv *pp = ap->private_data;
1024 	__le16 *cw;
1025 	struct ata_taskfile *tf;
1026 	u16 flags = 0;
1027 	unsigned in_index;
1028 
1029  	if (ATA_PROT_DMA != qc->tf.protocol)
1030 		return;
1031 
1032 	/* Fill in command request block
1033 	 */
1034 	if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1035 		flags |= CRQB_FLAG_READ;
1036 	WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1037 	flags |= qc->tag << CRQB_TAG_SHIFT;
1038 
1039 	/* get current queue index from hardware */
1040 	in_index = (readl(mv_ap_base(ap) + EDMA_REQ_Q_IN_PTR_OFS)
1041 			>> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1042 
1043 	pp->crqb[in_index].sg_addr =
1044 		cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1045 	pp->crqb[in_index].sg_addr_hi =
1046 		cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1047 	pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1048 
1049 	cw = &pp->crqb[in_index].ata_cmd[0];
1050 	tf = &qc->tf;
1051 
1052 	/* Sadly, the CRQB cannot accomodate all registers--there are
1053 	 * only 11 bytes...so we must pick and choose required
1054 	 * registers based on the command.  So, we drop feature and
1055 	 * hob_feature for [RW] DMA commands, but they are needed for
1056 	 * NCQ.  NCQ will drop hob_nsect.
1057 	 */
1058 	switch (tf->command) {
1059 	case ATA_CMD_READ:
1060 	case ATA_CMD_READ_EXT:
1061 	case ATA_CMD_WRITE:
1062 	case ATA_CMD_WRITE_EXT:
1063 	case ATA_CMD_WRITE_FUA_EXT:
1064 		mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1065 		break;
1066 #ifdef LIBATA_NCQ		/* FIXME: remove this line when NCQ added */
1067 	case ATA_CMD_FPDMA_READ:
1068 	case ATA_CMD_FPDMA_WRITE:
1069 		mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
1070 		mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1071 		break;
1072 #endif				/* FIXME: remove this line when NCQ added */
1073 	default:
1074 		/* The only other commands EDMA supports in non-queued and
1075 		 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1076 		 * of which are defined/used by Linux.  If we get here, this
1077 		 * driver needs work.
1078 		 *
1079 		 * FIXME: modify libata to give qc_prep a return value and
1080 		 * return error here.
1081 		 */
1082 		BUG_ON(tf->command);
1083 		break;
1084 	}
1085 	mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1086 	mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1087 	mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1088 	mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1089 	mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1090 	mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1091 	mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1092 	mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1093 	mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1);	/* last */
1094 
1095 	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1096 		return;
1097 	mv_fill_sg(qc);
1098 }
1099 
1100 /**
1101  *      mv_qc_prep_iie - Host specific command preparation.
1102  *      @qc: queued command to prepare
1103  *
1104  *      This routine simply redirects to the general purpose routine
1105  *      if command is not DMA.  Else, it handles prep of the CRQB
1106  *      (command request block), does some sanity checking, and calls
1107  *      the SG load routine.
1108  *
1109  *      LOCKING:
1110  *      Inherited from caller.
1111  */
1112 static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1113 {
1114 	struct ata_port *ap = qc->ap;
1115 	struct mv_port_priv *pp = ap->private_data;
1116 	struct mv_crqb_iie *crqb;
1117 	struct ata_taskfile *tf;
1118 	unsigned in_index;
1119 	u32 flags = 0;
1120 
1121  	if (ATA_PROT_DMA != qc->tf.protocol)
1122 		return;
1123 
1124 	/* Fill in Gen IIE command request block
1125 	 */
1126 	if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1127 		flags |= CRQB_FLAG_READ;
1128 
1129 	WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1130 	flags |= qc->tag << CRQB_TAG_SHIFT;
1131 
1132 	/* get current queue index from hardware */
1133 	in_index = (readl(mv_ap_base(ap) + EDMA_REQ_Q_IN_PTR_OFS)
1134 			>> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1135 
1136 	crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
1137 	crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1138 	crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1139 	crqb->flags = cpu_to_le32(flags);
1140 
1141 	tf = &qc->tf;
1142 	crqb->ata_cmd[0] = cpu_to_le32(
1143 			(tf->command << 16) |
1144 			(tf->feature << 24)
1145 		);
1146 	crqb->ata_cmd[1] = cpu_to_le32(
1147 			(tf->lbal << 0) |
1148 			(tf->lbam << 8) |
1149 			(tf->lbah << 16) |
1150 			(tf->device << 24)
1151 		);
1152 	crqb->ata_cmd[2] = cpu_to_le32(
1153 			(tf->hob_lbal << 0) |
1154 			(tf->hob_lbam << 8) |
1155 			(tf->hob_lbah << 16) |
1156 			(tf->hob_feature << 24)
1157 		);
1158 	crqb->ata_cmd[3] = cpu_to_le32(
1159 			(tf->nsect << 0) |
1160 			(tf->hob_nsect << 8)
1161 		);
1162 
1163 	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1164 		return;
1165 	mv_fill_sg(qc);
1166 }
1167 
1168 /**
1169  *      mv_qc_issue - Initiate a command to the host
1170  *      @qc: queued command to start
1171  *
1172  *      This routine simply redirects to the general purpose routine
1173  *      if command is not DMA.  Else, it sanity checks our local
1174  *      caches of the request producer/consumer indices then enables
1175  *      DMA and bumps the request producer index.
1176  *
1177  *      LOCKING:
1178  *      Inherited from caller.
1179  */
1180 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1181 {
1182 	void __iomem *port_mmio = mv_ap_base(qc->ap);
1183 	struct mv_port_priv *pp = qc->ap->private_data;
1184 	unsigned in_index;
1185 	u32 in_ptr;
1186 
1187 	if (ATA_PROT_DMA != qc->tf.protocol) {
1188 		/* We're about to send a non-EDMA capable command to the
1189 		 * port.  Turn off EDMA so there won't be problems accessing
1190 		 * shadow block, etc registers.
1191 		 */
1192 		mv_stop_dma(qc->ap);
1193 		return ata_qc_issue_prot(qc);
1194 	}
1195 
1196 	in_ptr   = readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1197 	in_index = (in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1198 
1199 	/* until we do queuing, the queue should be empty at this point */
1200 	WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)
1201 		>> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
1202 
1203 	in_index = mv_inc_q_index(in_index);	/* now incr producer index */
1204 
1205 	mv_start_dma(port_mmio, pp);
1206 
1207 	/* and write the request in pointer to kick the EDMA to life */
1208 	in_ptr &= EDMA_REQ_Q_BASE_LO_MASK;
1209 	in_ptr |= in_index << EDMA_REQ_Q_PTR_SHIFT;
1210 	writelfl(in_ptr, port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1211 
1212 	return 0;
1213 }
1214 
1215 /**
1216  *      mv_get_crpb_status - get status from most recently completed cmd
1217  *      @ap: ATA channel to manipulate
1218  *
1219  *      This routine is for use when the port is in DMA mode, when it
1220  *      will be using the CRPB (command response block) method of
1221  *      returning command completion information.  We check indices
1222  *      are good, grab status, and bump the response consumer index to
1223  *      prove that we're up to date.
1224  *
1225  *      LOCKING:
1226  *      Inherited from caller.
1227  */
1228 static u8 mv_get_crpb_status(struct ata_port *ap)
1229 {
1230 	void __iomem *port_mmio = mv_ap_base(ap);
1231 	struct mv_port_priv *pp = ap->private_data;
1232 	unsigned out_index;
1233 	u32 out_ptr;
1234 	u8 ata_status;
1235 
1236 	out_ptr   = readl(port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1237 	out_index = (out_ptr >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1238 
1239 	ata_status = le16_to_cpu(pp->crpb[out_index].flags)
1240 					>> CRPB_FLAG_STATUS_SHIFT;
1241 
1242 	/* increment our consumer index... */
1243 	out_index = mv_inc_q_index(out_index);
1244 
1245 	/* and, until we do NCQ, there should only be 1 CRPB waiting */
1246 	WARN_ON(out_index != ((readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1247 		>> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
1248 
1249 	/* write out our inc'd consumer index so EDMA knows we're caught up */
1250 	out_ptr &= EDMA_RSP_Q_BASE_LO_MASK;
1251 	out_ptr |= out_index << EDMA_RSP_Q_PTR_SHIFT;
1252 	writelfl(out_ptr, port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1253 
1254 	/* Return ATA status register for completed CRPB */
1255 	return ata_status;
1256 }
1257 
1258 /**
1259  *      mv_err_intr - Handle error interrupts on the port
1260  *      @ap: ATA channel to manipulate
1261  *      @reset_allowed: bool: 0 == don't trigger from reset here
1262  *
1263  *      In most cases, just clear the interrupt and move on.  However,
1264  *      some cases require an eDMA reset, which is done right before
1265  *      the COMRESET in mv_phy_reset().  The SERR case requires a
1266  *      clear of pending errors in the SATA SERROR register.  Finally,
1267  *      if the port disabled DMA, update our cached copy to match.
1268  *
1269  *      LOCKING:
1270  *      Inherited from caller.
1271  */
1272 static void mv_err_intr(struct ata_port *ap, int reset_allowed)
1273 {
1274 	void __iomem *port_mmio = mv_ap_base(ap);
1275 	u32 edma_err_cause, serr = 0;
1276 
1277 	edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1278 
1279 	if (EDMA_ERR_SERR & edma_err_cause) {
1280 		sata_scr_read(ap, SCR_ERROR, &serr);
1281 		sata_scr_write_flush(ap, SCR_ERROR, serr);
1282 	}
1283 	if (EDMA_ERR_SELF_DIS & edma_err_cause) {
1284 		struct mv_port_priv *pp	= ap->private_data;
1285 		pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1286 	}
1287 	DPRINTK(KERN_ERR "ata%u: port error; EDMA err cause: 0x%08x "
1288 		"SERR: 0x%08x\n", ap->print_id, edma_err_cause, serr);
1289 
1290 	/* Clear EDMA now that SERR cleanup done */
1291 	writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1292 
1293 	/* check for fatal here and recover if needed */
1294 	if (reset_allowed && (EDMA_ERR_FATAL & edma_err_cause))
1295 		mv_stop_and_reset(ap);
1296 }
1297 
1298 /**
1299  *      mv_host_intr - Handle all interrupts on the given host controller
1300  *      @host: host specific structure
1301  *      @relevant: port error bits relevant to this host controller
1302  *      @hc: which host controller we're to look at
1303  *
1304  *      Read then write clear the HC interrupt status then walk each
1305  *      port connected to the HC and see if it needs servicing.  Port
1306  *      success ints are reported in the HC interrupt status reg, the
1307  *      port error ints are reported in the higher level main
1308  *      interrupt status register and thus are passed in via the
1309  *      'relevant' argument.
1310  *
1311  *      LOCKING:
1312  *      Inherited from caller.
1313  */
1314 static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
1315 {
1316 	void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
1317 	void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1318 	struct ata_queued_cmd *qc;
1319 	u32 hc_irq_cause;
1320 	int shift, port, port0, hard_port, handled;
1321 	unsigned int err_mask;
1322 
1323 	if (hc == 0) {
1324 		port0 = 0;
1325 	} else {
1326 		port0 = MV_PORTS_PER_HC;
1327 	}
1328 
1329 	/* we'll need the HC success int register in most cases */
1330 	hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
1331 	if (hc_irq_cause) {
1332 		writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
1333 	}
1334 
1335 	VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
1336 		hc,relevant,hc_irq_cause);
1337 
1338 	for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
1339 		u8 ata_status = 0;
1340 		struct ata_port *ap = host->ports[port];
1341 		struct mv_port_priv *pp = ap->private_data;
1342 
1343 		hard_port = mv_hardport_from_port(port); /* range 0..3 */
1344 		handled = 0;	/* ensure ata_status is set if handled++ */
1345 
1346 		/* Note that DEV_IRQ might happen spuriously during EDMA,
1347 		 * and should be ignored in such cases.
1348 		 * The cause of this is still under investigation.
1349 		 */
1350 		if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1351 			/* EDMA: check for response queue interrupt */
1352 			if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause) {
1353 				ata_status = mv_get_crpb_status(ap);
1354 				handled = 1;
1355 			}
1356 		} else {
1357 			/* PIO: check for device (drive) interrupt */
1358 			if ((DEV_IRQ << hard_port) & hc_irq_cause) {
1359 				ata_status = readb(ap->ioaddr.status_addr);
1360 				handled = 1;
1361 				/* ignore spurious intr if drive still BUSY */
1362 				if (ata_status & ATA_BUSY) {
1363 					ata_status = 0;
1364 					handled = 0;
1365 				}
1366 			}
1367 		}
1368 
1369 		if (ap && (ap->flags & ATA_FLAG_DISABLED))
1370 			continue;
1371 
1372 		err_mask = ac_err_mask(ata_status);
1373 
1374 		shift = port << 1;		/* (port * 2) */
1375 		if (port >= MV_PORTS_PER_HC) {
1376 			shift++;	/* skip bit 8 in the HC Main IRQ reg */
1377 		}
1378 		if ((PORT0_ERR << shift) & relevant) {
1379 			mv_err_intr(ap, 1);
1380 			err_mask |= AC_ERR_OTHER;
1381 			handled = 1;
1382 		}
1383 
1384 		if (handled) {
1385 			qc = ata_qc_from_tag(ap, ap->active_tag);
1386 			if (qc && (qc->flags & ATA_QCFLAG_ACTIVE)) {
1387 				VPRINTK("port %u IRQ found for qc, "
1388 					"ata_status 0x%x\n", port,ata_status);
1389 				/* mark qc status appropriately */
1390 				if (!(qc->tf.flags & ATA_TFLAG_POLLING)) {
1391 					qc->err_mask |= err_mask;
1392 					ata_qc_complete(qc);
1393 				}
1394 			}
1395 		}
1396 	}
1397 	VPRINTK("EXIT\n");
1398 }
1399 
1400 /**
1401  *      mv_interrupt -
1402  *      @irq: unused
1403  *      @dev_instance: private data; in this case the host structure
1404  *      @regs: unused
1405  *
1406  *      Read the read only register to determine if any host
1407  *      controllers have pending interrupts.  If so, call lower level
1408  *      routine to handle.  Also check for PCI errors which are only
1409  *      reported here.
1410  *
1411  *      LOCKING:
1412  *      This routine holds the host lock while processing pending
1413  *      interrupts.
1414  */
1415 static irqreturn_t mv_interrupt(int irq, void *dev_instance)
1416 {
1417 	struct ata_host *host = dev_instance;
1418 	unsigned int hc, handled = 0, n_hcs;
1419 	void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
1420 	struct mv_host_priv *hpriv;
1421 	u32 irq_stat;
1422 
1423 	irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
1424 
1425 	/* check the cases where we either have nothing pending or have read
1426 	 * a bogus register value which can indicate HW removal or PCI fault
1427 	 */
1428 	if (!irq_stat || (0xffffffffU == irq_stat)) {
1429 		return IRQ_NONE;
1430 	}
1431 
1432 	n_hcs = mv_get_hc_count(host->ports[0]->flags);
1433 	spin_lock(&host->lock);
1434 
1435 	for (hc = 0; hc < n_hcs; hc++) {
1436 		u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1437 		if (relevant) {
1438 			mv_host_intr(host, relevant, hc);
1439 			handled++;
1440 		}
1441 	}
1442 
1443 	hpriv = host->private_data;
1444 	if (IS_60XX(hpriv)) {
1445 		/* deal with the interrupt coalescing bits */
1446 		if (irq_stat & (TRAN_LO_DONE | TRAN_HI_DONE | PORTS_0_7_COAL_DONE)) {
1447 			writelfl(0, mmio + MV_IRQ_COAL_CAUSE_LO);
1448 			writelfl(0, mmio + MV_IRQ_COAL_CAUSE_HI);
1449 			writelfl(0, mmio + MV_IRQ_COAL_CAUSE);
1450 		}
1451 	}
1452 
1453 	if (PCI_ERR & irq_stat) {
1454 		printk(KERN_ERR DRV_NAME ": PCI ERROR; PCI IRQ cause=0x%08x\n",
1455 		       readl(mmio + PCI_IRQ_CAUSE_OFS));
1456 
1457 		DPRINTK("All regs @ PCI error\n");
1458 		mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1459 
1460 		writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
1461 		handled++;
1462 	}
1463 	spin_unlock(&host->lock);
1464 
1465 	return IRQ_RETVAL(handled);
1466 }
1467 
1468 static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1469 {
1470 	void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1471 	unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1472 
1473 	return hc_mmio + ofs;
1474 }
1475 
1476 static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1477 {
1478 	unsigned int ofs;
1479 
1480 	switch (sc_reg_in) {
1481 	case SCR_STATUS:
1482 	case SCR_ERROR:
1483 	case SCR_CONTROL:
1484 		ofs = sc_reg_in * sizeof(u32);
1485 		break;
1486 	default:
1487 		ofs = 0xffffffffU;
1488 		break;
1489 	}
1490 	return ofs;
1491 }
1492 
1493 static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in)
1494 {
1495 	void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1496 	void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1497 	unsigned int ofs = mv5_scr_offset(sc_reg_in);
1498 
1499 	if (ofs != 0xffffffffU)
1500 		return readl(addr + ofs);
1501 	else
1502 		return (u32) ofs;
1503 }
1504 
1505 static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1506 {
1507 	void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1508 	void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1509 	unsigned int ofs = mv5_scr_offset(sc_reg_in);
1510 
1511 	if (ofs != 0xffffffffU)
1512 		writelfl(val, addr + ofs);
1513 }
1514 
1515 static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio)
1516 {
1517 	u8 rev_id;
1518 	int early_5080;
1519 
1520 	pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
1521 
1522 	early_5080 = (pdev->device == 0x5080) && (rev_id == 0);
1523 
1524 	if (!early_5080) {
1525 		u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1526 		tmp |= (1 << 0);
1527 		writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1528 	}
1529 
1530 	mv_reset_pci_bus(pdev, mmio);
1531 }
1532 
1533 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1534 {
1535 	writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1536 }
1537 
1538 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
1539 			   void __iomem *mmio)
1540 {
1541 	void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1542 	u32 tmp;
1543 
1544 	tmp = readl(phy_mmio + MV5_PHY_MODE);
1545 
1546 	hpriv->signal[idx].pre = tmp & 0x1800;	/* bits 12:11 */
1547 	hpriv->signal[idx].amps = tmp & 0xe0;	/* bits 7:5 */
1548 }
1549 
1550 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
1551 {
1552 	u32 tmp;
1553 
1554 	writel(0, mmio + MV_GPIO_PORT_CTL);
1555 
1556 	/* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1557 
1558 	tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1559 	tmp |= ~(1 << 0);
1560 	writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1561 }
1562 
1563 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1564 			   unsigned int port)
1565 {
1566 	void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1567 	const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1568 	u32 tmp;
1569 	int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1570 
1571 	if (fix_apm_sq) {
1572 		tmp = readl(phy_mmio + MV5_LT_MODE);
1573 		tmp |= (1 << 19);
1574 		writel(tmp, phy_mmio + MV5_LT_MODE);
1575 
1576 		tmp = readl(phy_mmio + MV5_PHY_CTL);
1577 		tmp &= ~0x3;
1578 		tmp |= 0x1;
1579 		writel(tmp, phy_mmio + MV5_PHY_CTL);
1580 	}
1581 
1582 	tmp = readl(phy_mmio + MV5_PHY_MODE);
1583 	tmp &= ~mask;
1584 	tmp |= hpriv->signal[port].pre;
1585 	tmp |= hpriv->signal[port].amps;
1586 	writel(tmp, phy_mmio + MV5_PHY_MODE);
1587 }
1588 
1589 
1590 #undef ZERO
1591 #define ZERO(reg) writel(0, port_mmio + (reg))
1592 static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1593 			     unsigned int port)
1594 {
1595 	void __iomem *port_mmio = mv_port_base(mmio, port);
1596 
1597 	writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1598 
1599 	mv_channel_reset(hpriv, mmio, port);
1600 
1601 	ZERO(0x028);	/* command */
1602 	writel(0x11f, port_mmio + EDMA_CFG_OFS);
1603 	ZERO(0x004);	/* timer */
1604 	ZERO(0x008);	/* irq err cause */
1605 	ZERO(0x00c);	/* irq err mask */
1606 	ZERO(0x010);	/* rq bah */
1607 	ZERO(0x014);	/* rq inp */
1608 	ZERO(0x018);	/* rq outp */
1609 	ZERO(0x01c);	/* respq bah */
1610 	ZERO(0x024);	/* respq outp */
1611 	ZERO(0x020);	/* respq inp */
1612 	ZERO(0x02c);	/* test control */
1613 	writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1614 }
1615 #undef ZERO
1616 
1617 #define ZERO(reg) writel(0, hc_mmio + (reg))
1618 static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1619 			unsigned int hc)
1620 {
1621 	void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1622 	u32 tmp;
1623 
1624 	ZERO(0x00c);
1625 	ZERO(0x010);
1626 	ZERO(0x014);
1627 	ZERO(0x018);
1628 
1629 	tmp = readl(hc_mmio + 0x20);
1630 	tmp &= 0x1c1c1c1c;
1631 	tmp |= 0x03030303;
1632 	writel(tmp, hc_mmio + 0x20);
1633 }
1634 #undef ZERO
1635 
1636 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1637 			unsigned int n_hc)
1638 {
1639 	unsigned int hc, port;
1640 
1641 	for (hc = 0; hc < n_hc; hc++) {
1642 		for (port = 0; port < MV_PORTS_PER_HC; port++)
1643 			mv5_reset_hc_port(hpriv, mmio,
1644 					  (hc * MV_PORTS_PER_HC) + port);
1645 
1646 		mv5_reset_one_hc(hpriv, mmio, hc);
1647 	}
1648 
1649 	return 0;
1650 }
1651 
1652 #undef ZERO
1653 #define ZERO(reg) writel(0, mmio + (reg))
1654 static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio)
1655 {
1656 	u32 tmp;
1657 
1658 	tmp = readl(mmio + MV_PCI_MODE);
1659 	tmp &= 0xff00ffff;
1660 	writel(tmp, mmio + MV_PCI_MODE);
1661 
1662 	ZERO(MV_PCI_DISC_TIMER);
1663 	ZERO(MV_PCI_MSI_TRIGGER);
1664 	writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
1665 	ZERO(HC_MAIN_IRQ_MASK_OFS);
1666 	ZERO(MV_PCI_SERR_MASK);
1667 	ZERO(PCI_IRQ_CAUSE_OFS);
1668 	ZERO(PCI_IRQ_MASK_OFS);
1669 	ZERO(MV_PCI_ERR_LOW_ADDRESS);
1670 	ZERO(MV_PCI_ERR_HIGH_ADDRESS);
1671 	ZERO(MV_PCI_ERR_ATTRIBUTE);
1672 	ZERO(MV_PCI_ERR_COMMAND);
1673 }
1674 #undef ZERO
1675 
1676 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1677 {
1678 	u32 tmp;
1679 
1680 	mv5_reset_flash(hpriv, mmio);
1681 
1682 	tmp = readl(mmio + MV_GPIO_PORT_CTL);
1683 	tmp &= 0x3;
1684 	tmp |= (1 << 5) | (1 << 6);
1685 	writel(tmp, mmio + MV_GPIO_PORT_CTL);
1686 }
1687 
1688 /**
1689  *      mv6_reset_hc - Perform the 6xxx global soft reset
1690  *      @mmio: base address of the HBA
1691  *
1692  *      This routine only applies to 6xxx parts.
1693  *
1694  *      LOCKING:
1695  *      Inherited from caller.
1696  */
1697 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1698 			unsigned int n_hc)
1699 {
1700 	void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
1701 	int i, rc = 0;
1702 	u32 t;
1703 
1704 	/* Following procedure defined in PCI "main command and status
1705 	 * register" table.
1706 	 */
1707 	t = readl(reg);
1708 	writel(t | STOP_PCI_MASTER, reg);
1709 
1710 	for (i = 0; i < 1000; i++) {
1711 		udelay(1);
1712 		t = readl(reg);
1713 		if (PCI_MASTER_EMPTY & t) {
1714 			break;
1715 		}
1716 	}
1717 	if (!(PCI_MASTER_EMPTY & t)) {
1718 		printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
1719 		rc = 1;
1720 		goto done;
1721 	}
1722 
1723 	/* set reset */
1724 	i = 5;
1725 	do {
1726 		writel(t | GLOB_SFT_RST, reg);
1727 		t = readl(reg);
1728 		udelay(1);
1729 	} while (!(GLOB_SFT_RST & t) && (i-- > 0));
1730 
1731 	if (!(GLOB_SFT_RST & t)) {
1732 		printk(KERN_ERR DRV_NAME ": can't set global reset\n");
1733 		rc = 1;
1734 		goto done;
1735 	}
1736 
1737 	/* clear reset and *reenable the PCI master* (not mentioned in spec) */
1738 	i = 5;
1739 	do {
1740 		writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
1741 		t = readl(reg);
1742 		udelay(1);
1743 	} while ((GLOB_SFT_RST & t) && (i-- > 0));
1744 
1745 	if (GLOB_SFT_RST & t) {
1746 		printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
1747 		rc = 1;
1748 	}
1749 done:
1750 	return rc;
1751 }
1752 
1753 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
1754 			   void __iomem *mmio)
1755 {
1756 	void __iomem *port_mmio;
1757 	u32 tmp;
1758 
1759 	tmp = readl(mmio + MV_RESET_CFG);
1760 	if ((tmp & (1 << 0)) == 0) {
1761 		hpriv->signal[idx].amps = 0x7 << 8;
1762 		hpriv->signal[idx].pre = 0x1 << 5;
1763 		return;
1764 	}
1765 
1766 	port_mmio = mv_port_base(mmio, idx);
1767 	tmp = readl(port_mmio + PHY_MODE2);
1768 
1769 	hpriv->signal[idx].amps = tmp & 0x700;	/* bits 10:8 */
1770 	hpriv->signal[idx].pre = tmp & 0xe0;	/* bits 7:5 */
1771 }
1772 
1773 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
1774 {
1775 	writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
1776 }
1777 
1778 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1779 			   unsigned int port)
1780 {
1781 	void __iomem *port_mmio = mv_port_base(mmio, port);
1782 
1783 	u32 hp_flags = hpriv->hp_flags;
1784 	int fix_phy_mode2 =
1785 		hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
1786 	int fix_phy_mode4 =
1787 		hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
1788 	u32 m2, tmp;
1789 
1790 	if (fix_phy_mode2) {
1791 		m2 = readl(port_mmio + PHY_MODE2);
1792 		m2 &= ~(1 << 16);
1793 		m2 |= (1 << 31);
1794 		writel(m2, port_mmio + PHY_MODE2);
1795 
1796 		udelay(200);
1797 
1798 		m2 = readl(port_mmio + PHY_MODE2);
1799 		m2 &= ~((1 << 16) | (1 << 31));
1800 		writel(m2, port_mmio + PHY_MODE2);
1801 
1802 		udelay(200);
1803 	}
1804 
1805 	/* who knows what this magic does */
1806 	tmp = readl(port_mmio + PHY_MODE3);
1807 	tmp &= ~0x7F800000;
1808 	tmp |= 0x2A800000;
1809 	writel(tmp, port_mmio + PHY_MODE3);
1810 
1811 	if (fix_phy_mode4) {
1812 		u32 m4;
1813 
1814 		m4 = readl(port_mmio + PHY_MODE4);
1815 
1816 		if (hp_flags & MV_HP_ERRATA_60X1B2)
1817 			tmp = readl(port_mmio + 0x310);
1818 
1819 		m4 = (m4 & ~(1 << 1)) | (1 << 0);
1820 
1821 		writel(m4, port_mmio + PHY_MODE4);
1822 
1823 		if (hp_flags & MV_HP_ERRATA_60X1B2)
1824 			writel(tmp, port_mmio + 0x310);
1825 	}
1826 
1827 	/* Revert values of pre-emphasis and signal amps to the saved ones */
1828 	m2 = readl(port_mmio + PHY_MODE2);
1829 
1830 	m2 &= ~MV_M2_PREAMP_MASK;
1831 	m2 |= hpriv->signal[port].amps;
1832 	m2 |= hpriv->signal[port].pre;
1833 	m2 &= ~(1 << 16);
1834 
1835 	/* according to mvSata 3.6.1, some IIE values are fixed */
1836 	if (IS_GEN_IIE(hpriv)) {
1837 		m2 &= ~0xC30FF01F;
1838 		m2 |= 0x0000900F;
1839 	}
1840 
1841 	writel(m2, port_mmio + PHY_MODE2);
1842 }
1843 
1844 static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
1845 			     unsigned int port_no)
1846 {
1847 	void __iomem *port_mmio = mv_port_base(mmio, port_no);
1848 
1849 	writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
1850 
1851 	if (IS_60XX(hpriv)) {
1852 		u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
1853 		ifctl |= (1 << 7);		/* enable gen2i speed */
1854 		ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
1855 		writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
1856 	}
1857 
1858 	udelay(25);		/* allow reset propagation */
1859 
1860 	/* Spec never mentions clearing the bit.  Marvell's driver does
1861 	 * clear the bit, however.
1862 	 */
1863 	writelfl(0, port_mmio + EDMA_CMD_OFS);
1864 
1865 	hpriv->ops->phy_errata(hpriv, mmio, port_no);
1866 
1867 	if (IS_50XX(hpriv))
1868 		mdelay(1);
1869 }
1870 
1871 static void mv_stop_and_reset(struct ata_port *ap)
1872 {
1873 	struct mv_host_priv *hpriv = ap->host->private_data;
1874 	void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1875 
1876 	mv_stop_dma(ap);
1877 
1878 	mv_channel_reset(hpriv, mmio, ap->port_no);
1879 
1880 	__mv_phy_reset(ap, 0);
1881 }
1882 
1883 static inline void __msleep(unsigned int msec, int can_sleep)
1884 {
1885 	if (can_sleep)
1886 		msleep(msec);
1887 	else
1888 		mdelay(msec);
1889 }
1890 
1891 /**
1892  *      __mv_phy_reset - Perform eDMA reset followed by COMRESET
1893  *      @ap: ATA channel to manipulate
1894  *
1895  *      Part of this is taken from __sata_phy_reset and modified to
1896  *      not sleep since this routine gets called from interrupt level.
1897  *
1898  *      LOCKING:
1899  *      Inherited from caller.  This is coded to safe to call at
1900  *      interrupt level, i.e. it does not sleep.
1901  */
1902 static void __mv_phy_reset(struct ata_port *ap, int can_sleep)
1903 {
1904 	struct mv_port_priv *pp	= ap->private_data;
1905 	struct mv_host_priv *hpriv = ap->host->private_data;
1906 	void __iomem *port_mmio = mv_ap_base(ap);
1907 	struct ata_taskfile tf;
1908 	struct ata_device *dev = &ap->device[0];
1909 	unsigned long timeout;
1910 	int retry = 5;
1911 	u32 sstatus;
1912 
1913 	VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
1914 
1915 	DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
1916 		"SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS),
1917 		mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL));
1918 
1919 	/* Issue COMRESET via SControl */
1920 comreset_retry:
1921 	sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
1922 	__msleep(1, can_sleep);
1923 
1924 	sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
1925 	__msleep(20, can_sleep);
1926 
1927 	timeout = jiffies + msecs_to_jiffies(200);
1928 	do {
1929 		sata_scr_read(ap, SCR_STATUS, &sstatus);
1930 		if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
1931 			break;
1932 
1933 		__msleep(1, can_sleep);
1934 	} while (time_before(jiffies, timeout));
1935 
1936 	/* work around errata */
1937 	if (IS_60XX(hpriv) &&
1938 	    (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
1939 	    (retry-- > 0))
1940 		goto comreset_retry;
1941 
1942 	DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
1943 		"SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS),
1944 		mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL));
1945 
1946 	if (ata_port_online(ap)) {
1947 		ata_port_probe(ap);
1948 	} else {
1949 		sata_scr_read(ap, SCR_STATUS, &sstatus);
1950 		ata_port_printk(ap, KERN_INFO,
1951 				"no device found (phy stat %08x)\n", sstatus);
1952 		ata_port_disable(ap);
1953 		return;
1954 	}
1955 	ap->cbl = ATA_CBL_SATA;
1956 
1957 	/* even after SStatus reflects that device is ready,
1958 	 * it seems to take a while for link to be fully
1959 	 * established (and thus Status no longer 0x80/0x7F),
1960 	 * so we poll a bit for that, here.
1961 	 */
1962 	retry = 20;
1963 	while (1) {
1964 		u8 drv_stat = ata_check_status(ap);
1965 		if ((drv_stat != 0x80) && (drv_stat != 0x7f))
1966 			break;
1967 		__msleep(500, can_sleep);
1968 		if (retry-- <= 0)
1969 			break;
1970 	}
1971 
1972 	tf.lbah = readb(ap->ioaddr.lbah_addr);
1973 	tf.lbam = readb(ap->ioaddr.lbam_addr);
1974 	tf.lbal = readb(ap->ioaddr.lbal_addr);
1975 	tf.nsect = readb(ap->ioaddr.nsect_addr);
1976 
1977 	dev->class = ata_dev_classify(&tf);
1978 	if (!ata_dev_enabled(dev)) {
1979 		VPRINTK("Port disabled post-sig: No device present.\n");
1980 		ata_port_disable(ap);
1981 	}
1982 
1983 	writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1984 
1985 	pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1986 
1987 	VPRINTK("EXIT\n");
1988 }
1989 
1990 static void mv_phy_reset(struct ata_port *ap)
1991 {
1992 	__mv_phy_reset(ap, 1);
1993 }
1994 
1995 /**
1996  *      mv_eng_timeout - Routine called by libata when SCSI times out I/O
1997  *      @ap: ATA channel to manipulate
1998  *
1999  *      Intent is to clear all pending error conditions, reset the
2000  *      chip/bus, fail the command, and move on.
2001  *
2002  *      LOCKING:
2003  *      This routine holds the host lock while failing the command.
2004  */
2005 static void mv_eng_timeout(struct ata_port *ap)
2006 {
2007 	void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2008 	struct ata_queued_cmd *qc;
2009 	unsigned long flags;
2010 
2011 	ata_port_printk(ap, KERN_ERR, "Entering mv_eng_timeout\n");
2012 	DPRINTK("All regs @ start of eng_timeout\n");
2013 	mv_dump_all_regs(mmio, ap->port_no, to_pci_dev(ap->host->dev));
2014 
2015 	qc = ata_qc_from_tag(ap, ap->active_tag);
2016         printk(KERN_ERR "mmio_base %p ap %p qc %p scsi_cmnd %p &cmnd %p\n",
2017 	       mmio, ap, qc, qc->scsicmd, &qc->scsicmd->cmnd);
2018 
2019 	spin_lock_irqsave(&ap->host->lock, flags);
2020 	mv_err_intr(ap, 0);
2021 	mv_stop_and_reset(ap);
2022 	spin_unlock_irqrestore(&ap->host->lock, flags);
2023 
2024 	WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
2025 	if (qc->flags & ATA_QCFLAG_ACTIVE) {
2026 		qc->err_mask |= AC_ERR_TIMEOUT;
2027 		ata_eh_qc_complete(qc);
2028 	}
2029 }
2030 
2031 /**
2032  *      mv_port_init - Perform some early initialization on a single port.
2033  *      @port: libata data structure storing shadow register addresses
2034  *      @port_mmio: base address of the port
2035  *
2036  *      Initialize shadow register mmio addresses, clear outstanding
2037  *      interrupts on the port, and unmask interrupts for the future
2038  *      start of the port.
2039  *
2040  *      LOCKING:
2041  *      Inherited from caller.
2042  */
2043 static void mv_port_init(struct ata_ioports *port,  void __iomem *port_mmio)
2044 {
2045 	void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
2046 	unsigned serr_ofs;
2047 
2048 	/* PIO related setup
2049 	 */
2050 	port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
2051 	port->error_addr =
2052 		port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2053 	port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2054 	port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2055 	port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2056 	port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2057 	port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
2058 	port->status_addr =
2059 		port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2060 	/* special case: control/altstatus doesn't have ATA_REG_ address */
2061 	port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2062 
2063 	/* unused: */
2064 	port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
2065 
2066 	/* Clear any currently outstanding port interrupt conditions */
2067 	serr_ofs = mv_scr_offset(SCR_ERROR);
2068 	writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2069 	writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2070 
2071 	/* unmask all EDMA error interrupts */
2072 	writelfl(~0, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
2073 
2074 	VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
2075 		readl(port_mmio + EDMA_CFG_OFS),
2076 		readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2077 		readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
2078 }
2079 
2080 static int mv_chip_id(struct pci_dev *pdev, struct mv_host_priv *hpriv,
2081 		      unsigned int board_idx)
2082 {
2083 	u8 rev_id;
2084 	u32 hp_flags = hpriv->hp_flags;
2085 
2086 	pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
2087 
2088 	switch(board_idx) {
2089 	case chip_5080:
2090 		hpriv->ops = &mv5xxx_ops;
2091 		hp_flags |= MV_HP_50XX;
2092 
2093 		switch (rev_id) {
2094 		case 0x1:
2095 			hp_flags |= MV_HP_ERRATA_50XXB0;
2096 			break;
2097 		case 0x3:
2098 			hp_flags |= MV_HP_ERRATA_50XXB2;
2099 			break;
2100 		default:
2101 			dev_printk(KERN_WARNING, &pdev->dev,
2102 			   "Applying 50XXB2 workarounds to unknown rev\n");
2103 			hp_flags |= MV_HP_ERRATA_50XXB2;
2104 			break;
2105 		}
2106 		break;
2107 
2108 	case chip_504x:
2109 	case chip_508x:
2110 		hpriv->ops = &mv5xxx_ops;
2111 		hp_flags |= MV_HP_50XX;
2112 
2113 		switch (rev_id) {
2114 		case 0x0:
2115 			hp_flags |= MV_HP_ERRATA_50XXB0;
2116 			break;
2117 		case 0x3:
2118 			hp_flags |= MV_HP_ERRATA_50XXB2;
2119 			break;
2120 		default:
2121 			dev_printk(KERN_WARNING, &pdev->dev,
2122 			   "Applying B2 workarounds to unknown rev\n");
2123 			hp_flags |= MV_HP_ERRATA_50XXB2;
2124 			break;
2125 		}
2126 		break;
2127 
2128 	case chip_604x:
2129 	case chip_608x:
2130 		hpriv->ops = &mv6xxx_ops;
2131 
2132 		switch (rev_id) {
2133 		case 0x7:
2134 			hp_flags |= MV_HP_ERRATA_60X1B2;
2135 			break;
2136 		case 0x9:
2137 			hp_flags |= MV_HP_ERRATA_60X1C0;
2138 			break;
2139 		default:
2140 			dev_printk(KERN_WARNING, &pdev->dev,
2141 				   "Applying B2 workarounds to unknown rev\n");
2142 			hp_flags |= MV_HP_ERRATA_60X1B2;
2143 			break;
2144 		}
2145 		break;
2146 
2147 	case chip_7042:
2148 	case chip_6042:
2149 		hpriv->ops = &mv6xxx_ops;
2150 
2151 		hp_flags |= MV_HP_GEN_IIE;
2152 
2153 		switch (rev_id) {
2154 		case 0x0:
2155 			hp_flags |= MV_HP_ERRATA_XX42A0;
2156 			break;
2157 		case 0x1:
2158 			hp_flags |= MV_HP_ERRATA_60X1C0;
2159 			break;
2160 		default:
2161 			dev_printk(KERN_WARNING, &pdev->dev,
2162 			   "Applying 60X1C0 workarounds to unknown rev\n");
2163 			hp_flags |= MV_HP_ERRATA_60X1C0;
2164 			break;
2165 		}
2166 		break;
2167 
2168 	default:
2169 		printk(KERN_ERR DRV_NAME ": BUG: invalid board index %u\n", board_idx);
2170 		return 1;
2171 	}
2172 
2173 	hpriv->hp_flags = hp_flags;
2174 
2175 	return 0;
2176 }
2177 
2178 /**
2179  *      mv_init_host - Perform some early initialization of the host.
2180  *	@pdev: host PCI device
2181  *      @probe_ent: early data struct representing the host
2182  *
2183  *      If possible, do an early global reset of the host.  Then do
2184  *      our port init and clear/unmask all/relevant host interrupts.
2185  *
2186  *      LOCKING:
2187  *      Inherited from caller.
2188  */
2189 static int mv_init_host(struct pci_dev *pdev, struct ata_probe_ent *probe_ent,
2190 			unsigned int board_idx)
2191 {
2192 	int rc = 0, n_hc, port, hc;
2193 	void __iomem *mmio = probe_ent->iomap[MV_PRIMARY_BAR];
2194 	struct mv_host_priv *hpriv = probe_ent->private_data;
2195 
2196 	/* global interrupt mask */
2197 	writel(0, mmio + HC_MAIN_IRQ_MASK_OFS);
2198 
2199 	rc = mv_chip_id(pdev, hpriv, board_idx);
2200 	if (rc)
2201 		goto done;
2202 
2203 	n_hc = mv_get_hc_count(probe_ent->port_flags);
2204 	probe_ent->n_ports = MV_PORTS_PER_HC * n_hc;
2205 
2206 	for (port = 0; port < probe_ent->n_ports; port++)
2207 		hpriv->ops->read_preamp(hpriv, port, mmio);
2208 
2209 	rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
2210 	if (rc)
2211 		goto done;
2212 
2213 	hpriv->ops->reset_flash(hpriv, mmio);
2214 	hpriv->ops->reset_bus(pdev, mmio);
2215 	hpriv->ops->enable_leds(hpriv, mmio);
2216 
2217 	for (port = 0; port < probe_ent->n_ports; port++) {
2218 		if (IS_60XX(hpriv)) {
2219 			void __iomem *port_mmio = mv_port_base(mmio, port);
2220 
2221 			u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
2222 			ifctl |= (1 << 7);		/* enable gen2i speed */
2223 			ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2224 			writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2225 		}
2226 
2227 		hpriv->ops->phy_errata(hpriv, mmio, port);
2228 	}
2229 
2230 	for (port = 0; port < probe_ent->n_ports; port++) {
2231 		void __iomem *port_mmio = mv_port_base(mmio, port);
2232 		mv_port_init(&probe_ent->port[port], port_mmio);
2233 	}
2234 
2235 	for (hc = 0; hc < n_hc; hc++) {
2236 		void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2237 
2238 		VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2239 			"(before clear)=0x%08x\n", hc,
2240 			readl(hc_mmio + HC_CFG_OFS),
2241 			readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2242 
2243 		/* Clear any currently outstanding hc interrupt conditions */
2244 		writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
2245 	}
2246 
2247 	/* Clear any currently outstanding host interrupt conditions */
2248 	writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
2249 
2250 	/* and unmask interrupt generation for host regs */
2251 	writelfl(PCI_UNMASK_ALL_IRQS, mmio + PCI_IRQ_MASK_OFS);
2252 
2253 	if (IS_50XX(hpriv))
2254 		writelfl(~HC_MAIN_MASKED_IRQS_5, mmio + HC_MAIN_IRQ_MASK_OFS);
2255 	else
2256 		writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
2257 
2258 	VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2259 		"PCI int cause/mask=0x%08x/0x%08x\n",
2260 		readl(mmio + HC_MAIN_IRQ_CAUSE_OFS),
2261 		readl(mmio + HC_MAIN_IRQ_MASK_OFS),
2262 		readl(mmio + PCI_IRQ_CAUSE_OFS),
2263 		readl(mmio + PCI_IRQ_MASK_OFS));
2264 
2265 done:
2266 	return rc;
2267 }
2268 
2269 /**
2270  *      mv_print_info - Dump key info to kernel log for perusal.
2271  *      @probe_ent: early data struct representing the host
2272  *
2273  *      FIXME: complete this.
2274  *
2275  *      LOCKING:
2276  *      Inherited from caller.
2277  */
2278 static void mv_print_info(struct ata_probe_ent *probe_ent)
2279 {
2280 	struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
2281 	struct mv_host_priv *hpriv = probe_ent->private_data;
2282 	u8 rev_id, scc;
2283 	const char *scc_s;
2284 
2285 	/* Use this to determine the HW stepping of the chip so we know
2286 	 * what errata to workaround
2287 	 */
2288 	pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
2289 
2290 	pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2291 	if (scc == 0)
2292 		scc_s = "SCSI";
2293 	else if (scc == 0x01)
2294 		scc_s = "RAID";
2295 	else
2296 		scc_s = "unknown";
2297 
2298 	dev_printk(KERN_INFO, &pdev->dev,
2299 	       "%u slots %u ports %s mode IRQ via %s\n",
2300 	       (unsigned)MV_MAX_Q_DEPTH, probe_ent->n_ports,
2301 	       scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2302 }
2303 
2304 /**
2305  *      mv_init_one - handle a positive probe of a Marvell host
2306  *      @pdev: PCI device found
2307  *      @ent: PCI device ID entry for the matched host
2308  *
2309  *      LOCKING:
2310  *      Inherited from caller.
2311  */
2312 static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2313 {
2314 	static int printed_version = 0;
2315 	struct device *dev = &pdev->dev;
2316 	struct ata_probe_ent *probe_ent;
2317 	struct mv_host_priv *hpriv;
2318 	unsigned int board_idx = (unsigned int)ent->driver_data;
2319 	int rc;
2320 
2321 	if (!printed_version++)
2322 		dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2323 
2324 	rc = pcim_enable_device(pdev);
2325 	if (rc)
2326 		return rc;
2327 	pci_set_master(pdev);
2328 
2329 	rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
2330 	if (rc == -EBUSY)
2331 		pcim_pin_device(pdev);
2332 	if (rc)
2333 		return rc;
2334 
2335 	probe_ent = devm_kzalloc(dev, sizeof(*probe_ent), GFP_KERNEL);
2336 	if (probe_ent == NULL)
2337 		return -ENOMEM;
2338 
2339 	probe_ent->dev = pci_dev_to_dev(pdev);
2340 	INIT_LIST_HEAD(&probe_ent->node);
2341 
2342 	hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
2343 	if (!hpriv)
2344 		return -ENOMEM;
2345 
2346 	probe_ent->sht = mv_port_info[board_idx].sht;
2347 	probe_ent->port_flags = mv_port_info[board_idx].flags;
2348 	probe_ent->pio_mask = mv_port_info[board_idx].pio_mask;
2349 	probe_ent->udma_mask = mv_port_info[board_idx].udma_mask;
2350 	probe_ent->port_ops = mv_port_info[board_idx].port_ops;
2351 
2352 	probe_ent->irq = pdev->irq;
2353 	probe_ent->irq_flags = IRQF_SHARED;
2354 	probe_ent->iomap = pcim_iomap_table(pdev);
2355 	probe_ent->private_data = hpriv;
2356 
2357 	/* initialize adapter */
2358 	rc = mv_init_host(pdev, probe_ent, board_idx);
2359 	if (rc)
2360 		return rc;
2361 
2362 	/* Enable interrupts */
2363 	if (msi && pci_enable_msi(pdev))
2364 		pci_intx(pdev, 1);
2365 
2366 	mv_dump_pci_cfg(pdev, 0x68);
2367 	mv_print_info(probe_ent);
2368 
2369 	if (ata_device_add(probe_ent) == 0)
2370 		return -ENODEV;
2371 
2372 	devm_kfree(dev, probe_ent);
2373 	return 0;
2374 }
2375 
2376 static int __init mv_init(void)
2377 {
2378 	return pci_register_driver(&mv_pci_driver);
2379 }
2380 
2381 static void __exit mv_exit(void)
2382 {
2383 	pci_unregister_driver(&mv_pci_driver);
2384 }
2385 
2386 MODULE_AUTHOR("Brett Russ");
2387 MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
2388 MODULE_LICENSE("GPL");
2389 MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
2390 MODULE_VERSION(DRV_VERSION);
2391 
2392 module_param(msi, int, 0444);
2393 MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
2394 
2395 module_init(mv_init);
2396 module_exit(mv_exit);
2397