xref: /linux/drivers/ata/sata_mv.c (revision 98366c20a275e957416e9516db5dcb7195b4e101)
1 /*
2  * sata_mv.c - Marvell SATA support
3  *
4  * Copyright 2005: EMC Corporation, all rights reserved.
5  * Copyright 2005 Red Hat, Inc.  All rights reserved.
6  *
7  * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; version 2 of the License.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21  *
22  */
23 
24 /*
25   sata_mv TODO list:
26 
27   1) Needs a full errata audit for all chipsets.  I implemented most
28   of the errata workarounds found in the Marvell vendor driver, but
29   I distinctly remember a couple workarounds (one related to PCI-X)
30   are still needed.
31 
32   4) Add NCQ support (easy to intermediate, once new-EH support appears)
33 
34   5) Investigate problems with PCI Message Signalled Interrupts (MSI).
35 
36   6) Add port multiplier support (intermediate)
37 
38   8) Develop a low-power-consumption strategy, and implement it.
39 
40   9) [Experiment, low priority] See if ATAPI can be supported using
41   "unknown FIS" or "vendor-specific FIS" support, or something creative
42   like that.
43 
44   10) [Experiment, low priority] Investigate interrupt coalescing.
45   Quite often, especially with PCI Message Signalled Interrupts (MSI),
46   the overhead reduced by interrupt mitigation is quite often not
47   worth the latency cost.
48 
49   11) [Experiment, Marvell value added] Is it possible to use target
50   mode to cross-connect two Linux boxes with Marvell cards?  If so,
51   creating LibATA target mode support would be very interesting.
52 
53   Target mode, for those without docs, is the ability to directly
54   connect two SATA controllers.
55 
56   13) Verify that 7042 is fully supported.  I only have a 6042.
57 
58 */
59 
60 
61 #include <linux/kernel.h>
62 #include <linux/module.h>
63 #include <linux/pci.h>
64 #include <linux/init.h>
65 #include <linux/blkdev.h>
66 #include <linux/delay.h>
67 #include <linux/interrupt.h>
68 #include <linux/dma-mapping.h>
69 #include <linux/device.h>
70 #include <scsi/scsi_host.h>
71 #include <scsi/scsi_cmnd.h>
72 #include <scsi/scsi_device.h>
73 #include <linux/libata.h>
74 
75 #define DRV_NAME	"sata_mv"
76 #define DRV_VERSION	"1.01"
77 
78 enum {
79 	/* BAR's are enumerated in terms of pci_resource_start() terms */
80 	MV_PRIMARY_BAR		= 0,	/* offset 0x10: memory space */
81 	MV_IO_BAR		= 2,	/* offset 0x18: IO space */
82 	MV_MISC_BAR		= 3,	/* offset 0x1c: FLASH, NVRAM, SRAM */
83 
84 	MV_MAJOR_REG_AREA_SZ	= 0x10000,	/* 64KB */
85 	MV_MINOR_REG_AREA_SZ	= 0x2000,	/* 8KB */
86 
87 	MV_PCI_REG_BASE		= 0,
88 	MV_IRQ_COAL_REG_BASE	= 0x18000,	/* 6xxx part only */
89 	MV_IRQ_COAL_CAUSE		= (MV_IRQ_COAL_REG_BASE + 0x08),
90 	MV_IRQ_COAL_CAUSE_LO		= (MV_IRQ_COAL_REG_BASE + 0x88),
91 	MV_IRQ_COAL_CAUSE_HI		= (MV_IRQ_COAL_REG_BASE + 0x8c),
92 	MV_IRQ_COAL_THRESHOLD		= (MV_IRQ_COAL_REG_BASE + 0xcc),
93 	MV_IRQ_COAL_TIME_THRESHOLD	= (MV_IRQ_COAL_REG_BASE + 0xd0),
94 
95 	MV_SATAHC0_REG_BASE	= 0x20000,
96 	MV_FLASH_CTL		= 0x1046c,
97 	MV_GPIO_PORT_CTL	= 0x104f0,
98 	MV_RESET_CFG		= 0x180d8,
99 
100 	MV_PCI_REG_SZ		= MV_MAJOR_REG_AREA_SZ,
101 	MV_SATAHC_REG_SZ	= MV_MAJOR_REG_AREA_SZ,
102 	MV_SATAHC_ARBTR_REG_SZ	= MV_MINOR_REG_AREA_SZ,		/* arbiter */
103 	MV_PORT_REG_SZ		= MV_MINOR_REG_AREA_SZ,
104 
105 	MV_MAX_Q_DEPTH		= 32,
106 	MV_MAX_Q_DEPTH_MASK	= MV_MAX_Q_DEPTH - 1,
107 
108 	/* CRQB needs alignment on a 1KB boundary. Size == 1KB
109 	 * CRPB needs alignment on a 256B boundary. Size == 256B
110 	 * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB
111 	 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
112 	 */
113 	MV_CRQB_Q_SZ		= (32 * MV_MAX_Q_DEPTH),
114 	MV_CRPB_Q_SZ		= (8 * MV_MAX_Q_DEPTH),
115 	MV_MAX_SG_CT		= 176,
116 	MV_SG_TBL_SZ		= (16 * MV_MAX_SG_CT),
117 	MV_PORT_PRIV_DMA_SZ	= (MV_CRQB_Q_SZ + MV_CRPB_Q_SZ + MV_SG_TBL_SZ),
118 
119 	MV_PORTS_PER_HC		= 4,
120 	/* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
121 	MV_PORT_HC_SHIFT	= 2,
122 	/* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
123 	MV_PORT_MASK		= 3,
124 
125 	/* Host Flags */
126 	MV_FLAG_DUAL_HC		= (1 << 30),  /* two SATA Host Controllers */
127 	MV_FLAG_IRQ_COALESCE	= (1 << 29),  /* IRQ coalescing capability */
128 	MV_COMMON_FLAGS		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
129 				  ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
130 				  ATA_FLAG_PIO_POLLING,
131 	MV_6XXX_FLAGS		= MV_FLAG_IRQ_COALESCE,
132 
133 	CRQB_FLAG_READ		= (1 << 0),
134 	CRQB_TAG_SHIFT		= 1,
135 	CRQB_IOID_SHIFT		= 6,	/* CRQB Gen-II/IIE IO Id shift */
136 	CRQB_HOSTQ_SHIFT	= 17,	/* CRQB Gen-II/IIE HostQueTag shift */
137 	CRQB_CMD_ADDR_SHIFT	= 8,
138 	CRQB_CMD_CS		= (0x2 << 11),
139 	CRQB_CMD_LAST		= (1 << 15),
140 
141 	CRPB_FLAG_STATUS_SHIFT	= 8,
142 	CRPB_IOID_SHIFT_6	= 5,	/* CRPB Gen-II IO Id shift */
143 	CRPB_IOID_SHIFT_7	= 7,	/* CRPB Gen-IIE IO Id shift */
144 
145 	EPRD_FLAG_END_OF_TBL	= (1 << 31),
146 
147 	/* PCI interface registers */
148 
149 	PCI_COMMAND_OFS		= 0xc00,
150 
151 	PCI_MAIN_CMD_STS_OFS	= 0xd30,
152 	STOP_PCI_MASTER		= (1 << 2),
153 	PCI_MASTER_EMPTY	= (1 << 3),
154 	GLOB_SFT_RST		= (1 << 4),
155 
156 	MV_PCI_MODE		= 0xd00,
157 	MV_PCI_EXP_ROM_BAR_CTL	= 0xd2c,
158 	MV_PCI_DISC_TIMER	= 0xd04,
159 	MV_PCI_MSI_TRIGGER	= 0xc38,
160 	MV_PCI_SERR_MASK	= 0xc28,
161 	MV_PCI_XBAR_TMOUT	= 0x1d04,
162 	MV_PCI_ERR_LOW_ADDRESS	= 0x1d40,
163 	MV_PCI_ERR_HIGH_ADDRESS	= 0x1d44,
164 	MV_PCI_ERR_ATTRIBUTE	= 0x1d48,
165 	MV_PCI_ERR_COMMAND	= 0x1d50,
166 
167 	PCI_IRQ_CAUSE_OFS		= 0x1d58,
168 	PCI_IRQ_MASK_OFS		= 0x1d5c,
169 	PCI_UNMASK_ALL_IRQS	= 0x7fffff,	/* bits 22-0 */
170 
171 	HC_MAIN_IRQ_CAUSE_OFS	= 0x1d60,
172 	HC_MAIN_IRQ_MASK_OFS	= 0x1d64,
173 	PORT0_ERR		= (1 << 0),	/* shift by port # */
174 	PORT0_DONE		= (1 << 1),	/* shift by port # */
175 	HC0_IRQ_PEND		= 0x1ff,	/* bits 0-8 = HC0's ports */
176 	HC_SHIFT		= 9,		/* bits 9-17 = HC1's ports */
177 	PCI_ERR			= (1 << 18),
178 	TRAN_LO_DONE		= (1 << 19),	/* 6xxx: IRQ coalescing */
179 	TRAN_HI_DONE		= (1 << 20),	/* 6xxx: IRQ coalescing */
180 	PORTS_0_3_COAL_DONE	= (1 << 8),
181 	PORTS_4_7_COAL_DONE	= (1 << 17),
182 	PORTS_0_7_COAL_DONE	= (1 << 21),	/* 6xxx: IRQ coalescing */
183 	GPIO_INT		= (1 << 22),
184 	SELF_INT		= (1 << 23),
185 	TWSI_INT		= (1 << 24),
186 	HC_MAIN_RSVD		= (0x7f << 25),	/* bits 31-25 */
187 	HC_MAIN_RSVD_5		= (0x1fff << 19), /* bits 31-19 */
188 	HC_MAIN_MASKED_IRQS	= (TRAN_LO_DONE | TRAN_HI_DONE |
189 				   PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
190 				   HC_MAIN_RSVD),
191 	HC_MAIN_MASKED_IRQS_5	= (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
192 				   HC_MAIN_RSVD_5),
193 
194 	/* SATAHC registers */
195 	HC_CFG_OFS		= 0,
196 
197 	HC_IRQ_CAUSE_OFS	= 0x14,
198 	CRPB_DMA_DONE		= (1 << 0),	/* shift by port # */
199 	HC_IRQ_COAL		= (1 << 4),	/* IRQ coalescing */
200 	DEV_IRQ			= (1 << 8),	/* shift by port # */
201 
202 	/* Shadow block registers */
203 	SHD_BLK_OFS		= 0x100,
204 	SHD_CTL_AST_OFS		= 0x20,		/* ofs from SHD_BLK_OFS */
205 
206 	/* SATA registers */
207 	SATA_STATUS_OFS		= 0x300,  /* ctrl, err regs follow status */
208 	SATA_ACTIVE_OFS		= 0x350,
209 	PHY_MODE3		= 0x310,
210 	PHY_MODE4		= 0x314,
211 	PHY_MODE2		= 0x330,
212 	MV5_PHY_MODE		= 0x74,
213 	MV5_LT_MODE		= 0x30,
214 	MV5_PHY_CTL		= 0x0C,
215 	SATA_INTERFACE_CTL	= 0x050,
216 
217 	MV_M2_PREAMP_MASK	= 0x7e0,
218 
219 	/* Port registers */
220 	EDMA_CFG_OFS		= 0,
221 	EDMA_CFG_Q_DEPTH	= 0,			/* queueing disabled */
222 	EDMA_CFG_NCQ		= (1 << 5),
223 	EDMA_CFG_NCQ_GO_ON_ERR	= (1 << 14),		/* continue on error */
224 	EDMA_CFG_RD_BRST_EXT	= (1 << 11),		/* read burst 512B */
225 	EDMA_CFG_WR_BUFF_LEN	= (1 << 13),		/* write buffer 512B */
226 
227 	EDMA_ERR_IRQ_CAUSE_OFS	= 0x8,
228 	EDMA_ERR_IRQ_MASK_OFS	= 0xc,
229 	EDMA_ERR_D_PAR		= (1 << 0),	/* UDMA data parity err */
230 	EDMA_ERR_PRD_PAR	= (1 << 1),	/* UDMA PRD parity err */
231 	EDMA_ERR_DEV		= (1 << 2),	/* device error */
232 	EDMA_ERR_DEV_DCON	= (1 << 3),	/* device disconnect */
233 	EDMA_ERR_DEV_CON	= (1 << 4),	/* device connected */
234 	EDMA_ERR_SERR		= (1 << 5),	/* SError bits [WBDST] raised */
235 	EDMA_ERR_SELF_DIS	= (1 << 7),	/* Gen II/IIE self-disable */
236 	EDMA_ERR_SELF_DIS_5	= (1 << 8),	/* Gen I self-disable */
237 	EDMA_ERR_BIST_ASYNC	= (1 << 8),	/* BIST FIS or Async Notify */
238 	EDMA_ERR_TRANS_IRQ_7	= (1 << 8),	/* Gen IIE transprt layer irq */
239 	EDMA_ERR_CRQB_PAR	= (1 << 9),	/* CRQB parity error */
240 	EDMA_ERR_CRPB_PAR	= (1 << 10),	/* CRPB parity error */
241 	EDMA_ERR_INTRL_PAR	= (1 << 11),	/* internal parity error */
242 	EDMA_ERR_IORDY		= (1 << 12),	/* IORdy timeout */
243 	EDMA_ERR_LNK_CTRL_RX	= (0xf << 13),	/* link ctrl rx error */
244 	EDMA_ERR_LNK_CTRL_RX_2	= (1 << 15),
245 	EDMA_ERR_LNK_DATA_RX	= (0xf << 17),	/* link data rx error */
246 	EDMA_ERR_LNK_CTRL_TX	= (0x1f << 21),	/* link ctrl tx error */
247 	EDMA_ERR_LNK_DATA_TX	= (0x1f << 26),	/* link data tx error */
248 	EDMA_ERR_TRANS_PROTO	= (1 << 31),	/* transport protocol error */
249 	EDMA_ERR_OVERRUN_5	= (1 << 5),
250 	EDMA_ERR_UNDERRUN_5	= (1 << 6),
251 	EDMA_EH_FREEZE		= EDMA_ERR_D_PAR |
252 				  EDMA_ERR_PRD_PAR |
253 				  EDMA_ERR_DEV_DCON |
254 				  EDMA_ERR_DEV_CON |
255 				  EDMA_ERR_SERR |
256 				  EDMA_ERR_SELF_DIS |
257 				  EDMA_ERR_CRQB_PAR |
258 				  EDMA_ERR_CRPB_PAR |
259 				  EDMA_ERR_INTRL_PAR |
260 				  EDMA_ERR_IORDY |
261 				  EDMA_ERR_LNK_CTRL_RX_2 |
262 				  EDMA_ERR_LNK_DATA_RX |
263 				  EDMA_ERR_LNK_DATA_TX |
264 				  EDMA_ERR_TRANS_PROTO,
265 	EDMA_EH_FREEZE_5	= EDMA_ERR_D_PAR |
266 				  EDMA_ERR_PRD_PAR |
267 				  EDMA_ERR_DEV_DCON |
268 				  EDMA_ERR_DEV_CON |
269 				  EDMA_ERR_OVERRUN_5 |
270 				  EDMA_ERR_UNDERRUN_5 |
271 				  EDMA_ERR_SELF_DIS_5 |
272 				  EDMA_ERR_CRQB_PAR |
273 				  EDMA_ERR_CRPB_PAR |
274 				  EDMA_ERR_INTRL_PAR |
275 				  EDMA_ERR_IORDY,
276 
277 	EDMA_REQ_Q_BASE_HI_OFS	= 0x10,
278 	EDMA_REQ_Q_IN_PTR_OFS	= 0x14,		/* also contains BASE_LO */
279 
280 	EDMA_REQ_Q_OUT_PTR_OFS	= 0x18,
281 	EDMA_REQ_Q_PTR_SHIFT	= 5,
282 
283 	EDMA_RSP_Q_BASE_HI_OFS	= 0x1c,
284 	EDMA_RSP_Q_IN_PTR_OFS	= 0x20,
285 	EDMA_RSP_Q_OUT_PTR_OFS	= 0x24,		/* also contains BASE_LO */
286 	EDMA_RSP_Q_PTR_SHIFT	= 3,
287 
288 	EDMA_CMD_OFS		= 0x28,		/* EDMA command register */
289 	EDMA_EN			= (1 << 0),	/* enable EDMA */
290 	EDMA_DS			= (1 << 1),	/* disable EDMA; self-negated */
291 	ATA_RST			= (1 << 2),	/* reset trans/link/phy */
292 
293 	EDMA_IORDY_TMOUT	= 0x34,
294 	EDMA_ARB_CFG		= 0x38,
295 
296 	/* Host private flags (hp_flags) */
297 	MV_HP_FLAG_MSI		= (1 << 0),
298 	MV_HP_ERRATA_50XXB0	= (1 << 1),
299 	MV_HP_ERRATA_50XXB2	= (1 << 2),
300 	MV_HP_ERRATA_60X1B2	= (1 << 3),
301 	MV_HP_ERRATA_60X1C0	= (1 << 4),
302 	MV_HP_ERRATA_XX42A0	= (1 << 5),
303 	MV_HP_GEN_I		= (1 << 6),	/* Generation I: 50xx */
304 	MV_HP_GEN_II		= (1 << 7),	/* Generation II: 60xx */
305 	MV_HP_GEN_IIE		= (1 << 8),	/* Generation IIE: 6042/7042 */
306 
307 	/* Port private flags (pp_flags) */
308 	MV_PP_FLAG_EDMA_EN	= (1 << 0),	/* is EDMA engine enabled? */
309 	MV_PP_FLAG_HAD_A_RESET	= (1 << 2),	/* 1st hard reset complete? */
310 };
311 
312 #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
313 #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
314 #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
315 
316 enum {
317 	/* DMA boundary 0xffff is required by the s/g splitting
318 	 * we need on /length/ in mv_fill-sg().
319 	 */
320 	MV_DMA_BOUNDARY		= 0xffffU,
321 
322 	/* mask of register bits containing lower 32 bits
323 	 * of EDMA request queue DMA address
324 	 */
325 	EDMA_REQ_Q_BASE_LO_MASK	= 0xfffffc00U,
326 
327 	/* ditto, for response queue */
328 	EDMA_RSP_Q_BASE_LO_MASK	= 0xffffff00U,
329 };
330 
331 enum chip_type {
332 	chip_504x,
333 	chip_508x,
334 	chip_5080,
335 	chip_604x,
336 	chip_608x,
337 	chip_6042,
338 	chip_7042,
339 };
340 
341 /* Command ReQuest Block: 32B */
342 struct mv_crqb {
343 	__le32			sg_addr;
344 	__le32			sg_addr_hi;
345 	__le16			ctrl_flags;
346 	__le16			ata_cmd[11];
347 };
348 
349 struct mv_crqb_iie {
350 	__le32			addr;
351 	__le32			addr_hi;
352 	__le32			flags;
353 	__le32			len;
354 	__le32			ata_cmd[4];
355 };
356 
357 /* Command ResPonse Block: 8B */
358 struct mv_crpb {
359 	__le16			id;
360 	__le16			flags;
361 	__le32			tmstmp;
362 };
363 
364 /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
365 struct mv_sg {
366 	__le32			addr;
367 	__le32			flags_size;
368 	__le32			addr_hi;
369 	__le32			reserved;
370 };
371 
372 struct mv_port_priv {
373 	struct mv_crqb		*crqb;
374 	dma_addr_t		crqb_dma;
375 	struct mv_crpb		*crpb;
376 	dma_addr_t		crpb_dma;
377 	struct mv_sg		*sg_tbl;
378 	dma_addr_t		sg_tbl_dma;
379 
380 	unsigned int		req_idx;
381 	unsigned int		resp_idx;
382 
383 	u32			pp_flags;
384 };
385 
386 struct mv_port_signal {
387 	u32			amps;
388 	u32			pre;
389 };
390 
391 struct mv_host_priv;
392 struct mv_hw_ops {
393 	void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
394 			   unsigned int port);
395 	void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
396 	void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
397 			   void __iomem *mmio);
398 	int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
399 			unsigned int n_hc);
400 	void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
401 	void (*reset_bus)(struct pci_dev *pdev, void __iomem *mmio);
402 };
403 
404 struct mv_host_priv {
405 	u32			hp_flags;
406 	struct mv_port_signal	signal[8];
407 	const struct mv_hw_ops	*ops;
408 };
409 
410 static void mv_irq_clear(struct ata_port *ap);
411 static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
412 static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
413 static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
414 static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
415 static int mv_port_start(struct ata_port *ap);
416 static void mv_port_stop(struct ata_port *ap);
417 static void mv_qc_prep(struct ata_queued_cmd *qc);
418 static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
419 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
420 static void mv_error_handler(struct ata_port *ap);
421 static void mv_post_int_cmd(struct ata_queued_cmd *qc);
422 static void mv_eh_freeze(struct ata_port *ap);
423 static void mv_eh_thaw(struct ata_port *ap);
424 static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
425 
426 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
427 			   unsigned int port);
428 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
429 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
430 			   void __iomem *mmio);
431 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
432 			unsigned int n_hc);
433 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
434 static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio);
435 
436 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
437 			   unsigned int port);
438 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
439 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
440 			   void __iomem *mmio);
441 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
442 			unsigned int n_hc);
443 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
444 static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio);
445 static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
446 			     unsigned int port_no);
447 
448 static struct scsi_host_template mv5_sht = {
449 	.module			= THIS_MODULE,
450 	.name			= DRV_NAME,
451 	.ioctl			= ata_scsi_ioctl,
452 	.queuecommand		= ata_scsi_queuecmd,
453 	.can_queue		= ATA_DEF_QUEUE,
454 	.this_id		= ATA_SHT_THIS_ID,
455 	.sg_tablesize		= MV_MAX_SG_CT / 2,
456 	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
457 	.emulated		= ATA_SHT_EMULATED,
458 	.use_clustering		= 1,
459 	.proc_name		= DRV_NAME,
460 	.dma_boundary		= MV_DMA_BOUNDARY,
461 	.slave_configure	= ata_scsi_slave_config,
462 	.slave_destroy		= ata_scsi_slave_destroy,
463 	.bios_param		= ata_std_bios_param,
464 };
465 
466 static struct scsi_host_template mv6_sht = {
467 	.module			= THIS_MODULE,
468 	.name			= DRV_NAME,
469 	.ioctl			= ata_scsi_ioctl,
470 	.queuecommand		= ata_scsi_queuecmd,
471 	.can_queue		= ATA_DEF_QUEUE,
472 	.this_id		= ATA_SHT_THIS_ID,
473 	.sg_tablesize		= MV_MAX_SG_CT / 2,
474 	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
475 	.emulated		= ATA_SHT_EMULATED,
476 	.use_clustering		= 1,
477 	.proc_name		= DRV_NAME,
478 	.dma_boundary		= MV_DMA_BOUNDARY,
479 	.slave_configure	= ata_scsi_slave_config,
480 	.slave_destroy		= ata_scsi_slave_destroy,
481 	.bios_param		= ata_std_bios_param,
482 };
483 
484 static const struct ata_port_operations mv5_ops = {
485 	.tf_load		= ata_tf_load,
486 	.tf_read		= ata_tf_read,
487 	.check_status		= ata_check_status,
488 	.exec_command		= ata_exec_command,
489 	.dev_select		= ata_std_dev_select,
490 
491 	.cable_detect		= ata_cable_sata,
492 
493 	.qc_prep		= mv_qc_prep,
494 	.qc_issue		= mv_qc_issue,
495 	.data_xfer		= ata_data_xfer,
496 
497 	.irq_clear		= mv_irq_clear,
498 	.irq_on			= ata_irq_on,
499 
500 	.error_handler		= mv_error_handler,
501 	.post_internal_cmd	= mv_post_int_cmd,
502 	.freeze			= mv_eh_freeze,
503 	.thaw			= mv_eh_thaw,
504 
505 	.scr_read		= mv5_scr_read,
506 	.scr_write		= mv5_scr_write,
507 
508 	.port_start		= mv_port_start,
509 	.port_stop		= mv_port_stop,
510 };
511 
512 static const struct ata_port_operations mv6_ops = {
513 	.tf_load		= ata_tf_load,
514 	.tf_read		= ata_tf_read,
515 	.check_status		= ata_check_status,
516 	.exec_command		= ata_exec_command,
517 	.dev_select		= ata_std_dev_select,
518 
519 	.cable_detect		= ata_cable_sata,
520 
521 	.qc_prep		= mv_qc_prep,
522 	.qc_issue		= mv_qc_issue,
523 	.data_xfer		= ata_data_xfer,
524 
525 	.irq_clear		= mv_irq_clear,
526 	.irq_on			= ata_irq_on,
527 
528 	.error_handler		= mv_error_handler,
529 	.post_internal_cmd	= mv_post_int_cmd,
530 	.freeze			= mv_eh_freeze,
531 	.thaw			= mv_eh_thaw,
532 
533 	.scr_read		= mv_scr_read,
534 	.scr_write		= mv_scr_write,
535 
536 	.port_start		= mv_port_start,
537 	.port_stop		= mv_port_stop,
538 };
539 
540 static const struct ata_port_operations mv_iie_ops = {
541 	.tf_load		= ata_tf_load,
542 	.tf_read		= ata_tf_read,
543 	.check_status		= ata_check_status,
544 	.exec_command		= ata_exec_command,
545 	.dev_select		= ata_std_dev_select,
546 
547 	.cable_detect		= ata_cable_sata,
548 
549 	.qc_prep		= mv_qc_prep_iie,
550 	.qc_issue		= mv_qc_issue,
551 	.data_xfer		= ata_data_xfer,
552 
553 	.irq_clear		= mv_irq_clear,
554 	.irq_on			= ata_irq_on,
555 
556 	.error_handler		= mv_error_handler,
557 	.post_internal_cmd	= mv_post_int_cmd,
558 	.freeze			= mv_eh_freeze,
559 	.thaw			= mv_eh_thaw,
560 
561 	.scr_read		= mv_scr_read,
562 	.scr_write		= mv_scr_write,
563 
564 	.port_start		= mv_port_start,
565 	.port_stop		= mv_port_stop,
566 };
567 
568 static const struct ata_port_info mv_port_info[] = {
569 	{  /* chip_504x */
570 		.flags		= MV_COMMON_FLAGS,
571 		.pio_mask	= 0x1f,	/* pio0-4 */
572 		.udma_mask	= ATA_UDMA6,
573 		.port_ops	= &mv5_ops,
574 	},
575 	{  /* chip_508x */
576 		.flags		= MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
577 		.pio_mask	= 0x1f,	/* pio0-4 */
578 		.udma_mask	= ATA_UDMA6,
579 		.port_ops	= &mv5_ops,
580 	},
581 	{  /* chip_5080 */
582 		.flags		= MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
583 		.pio_mask	= 0x1f,	/* pio0-4 */
584 		.udma_mask	= ATA_UDMA6,
585 		.port_ops	= &mv5_ops,
586 	},
587 	{  /* chip_604x */
588 		.flags		= MV_COMMON_FLAGS | MV_6XXX_FLAGS,
589 		.pio_mask	= 0x1f,	/* pio0-4 */
590 		.udma_mask	= ATA_UDMA6,
591 		.port_ops	= &mv6_ops,
592 	},
593 	{  /* chip_608x */
594 		.flags		= MV_COMMON_FLAGS | MV_6XXX_FLAGS |
595 				  MV_FLAG_DUAL_HC,
596 		.pio_mask	= 0x1f,	/* pio0-4 */
597 		.udma_mask	= ATA_UDMA6,
598 		.port_ops	= &mv6_ops,
599 	},
600 	{  /* chip_6042 */
601 		.flags		= MV_COMMON_FLAGS | MV_6XXX_FLAGS,
602 		.pio_mask	= 0x1f,	/* pio0-4 */
603 		.udma_mask	= ATA_UDMA6,
604 		.port_ops	= &mv_iie_ops,
605 	},
606 	{  /* chip_7042 */
607 		.flags		= MV_COMMON_FLAGS | MV_6XXX_FLAGS,
608 		.pio_mask	= 0x1f,	/* pio0-4 */
609 		.udma_mask	= ATA_UDMA6,
610 		.port_ops	= &mv_iie_ops,
611 	},
612 };
613 
614 static const struct pci_device_id mv_pci_tbl[] = {
615 	{ PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
616 	{ PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
617 	{ PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
618 	{ PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
619 	/* RocketRAID 1740/174x have different identifiers */
620 	{ PCI_VDEVICE(TTI, 0x1740), chip_508x },
621 	{ PCI_VDEVICE(TTI, 0x1742), chip_508x },
622 
623 	{ PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
624 	{ PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
625 	{ PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
626 	{ PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
627 	{ PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
628 
629 	{ PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
630 
631 	/* Adaptec 1430SA */
632 	{ PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
633 
634 	{ PCI_VDEVICE(TTI, 0x2310), chip_7042 },
635 
636 	/* add Marvell 7042 support */
637 	{ PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
638 
639 	{ }			/* terminate list */
640 };
641 
642 static struct pci_driver mv_pci_driver = {
643 	.name			= DRV_NAME,
644 	.id_table		= mv_pci_tbl,
645 	.probe			= mv_init_one,
646 	.remove			= ata_pci_remove_one,
647 };
648 
649 static const struct mv_hw_ops mv5xxx_ops = {
650 	.phy_errata		= mv5_phy_errata,
651 	.enable_leds		= mv5_enable_leds,
652 	.read_preamp		= mv5_read_preamp,
653 	.reset_hc		= mv5_reset_hc,
654 	.reset_flash		= mv5_reset_flash,
655 	.reset_bus		= mv5_reset_bus,
656 };
657 
658 static const struct mv_hw_ops mv6xxx_ops = {
659 	.phy_errata		= mv6_phy_errata,
660 	.enable_leds		= mv6_enable_leds,
661 	.read_preamp		= mv6_read_preamp,
662 	.reset_hc		= mv6_reset_hc,
663 	.reset_flash		= mv6_reset_flash,
664 	.reset_bus		= mv_reset_pci_bus,
665 };
666 
667 /*
668  * module options
669  */
670 static int msi;	      /* Use PCI msi; either zero (off, default) or non-zero */
671 
672 
673 /* move to PCI layer or libata core? */
674 static int pci_go_64(struct pci_dev *pdev)
675 {
676 	int rc;
677 
678 	if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
679 		rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
680 		if (rc) {
681 			rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
682 			if (rc) {
683 				dev_printk(KERN_ERR, &pdev->dev,
684 					   "64-bit DMA enable failed\n");
685 				return rc;
686 			}
687 		}
688 	} else {
689 		rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
690 		if (rc) {
691 			dev_printk(KERN_ERR, &pdev->dev,
692 				   "32-bit DMA enable failed\n");
693 			return rc;
694 		}
695 		rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
696 		if (rc) {
697 			dev_printk(KERN_ERR, &pdev->dev,
698 				   "32-bit consistent DMA enable failed\n");
699 			return rc;
700 		}
701 	}
702 
703 	return rc;
704 }
705 
706 /*
707  * Functions
708  */
709 
710 static inline void writelfl(unsigned long data, void __iomem *addr)
711 {
712 	writel(data, addr);
713 	(void) readl(addr);	/* flush to avoid PCI posted write */
714 }
715 
716 static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
717 {
718 	return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
719 }
720 
721 static inline unsigned int mv_hc_from_port(unsigned int port)
722 {
723 	return port >> MV_PORT_HC_SHIFT;
724 }
725 
726 static inline unsigned int mv_hardport_from_port(unsigned int port)
727 {
728 	return port & MV_PORT_MASK;
729 }
730 
731 static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
732 						 unsigned int port)
733 {
734 	return mv_hc_base(base, mv_hc_from_port(port));
735 }
736 
737 static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
738 {
739 	return  mv_hc_base_from_port(base, port) +
740 		MV_SATAHC_ARBTR_REG_SZ +
741 		(mv_hardport_from_port(port) * MV_PORT_REG_SZ);
742 }
743 
744 static inline void __iomem *mv_ap_base(struct ata_port *ap)
745 {
746 	return mv_port_base(ap->host->iomap[MV_PRIMARY_BAR], ap->port_no);
747 }
748 
749 static inline int mv_get_hc_count(unsigned long port_flags)
750 {
751 	return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
752 }
753 
754 static void mv_irq_clear(struct ata_port *ap)
755 {
756 }
757 
758 static void mv_set_edma_ptrs(void __iomem *port_mmio,
759 			     struct mv_host_priv *hpriv,
760 			     struct mv_port_priv *pp)
761 {
762 	u32 index;
763 
764 	/*
765 	 * initialize request queue
766 	 */
767 	index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
768 
769 	WARN_ON(pp->crqb_dma & 0x3ff);
770 	writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
771 	writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
772 		 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
773 
774 	if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
775 		writelfl((pp->crqb_dma & 0xffffffff) | index,
776 			 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
777 	else
778 		writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
779 
780 	/*
781 	 * initialize response queue
782 	 */
783 	index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
784 
785 	WARN_ON(pp->crpb_dma & 0xff);
786 	writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
787 
788 	if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
789 		writelfl((pp->crpb_dma & 0xffffffff) | index,
790 			 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
791 	else
792 		writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
793 
794 	writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
795 		 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
796 }
797 
798 /**
799  *      mv_start_dma - Enable eDMA engine
800  *      @base: port base address
801  *      @pp: port private data
802  *
803  *      Verify the local cache of the eDMA state is accurate with a
804  *      WARN_ON.
805  *
806  *      LOCKING:
807  *      Inherited from caller.
808  */
809 static void mv_start_dma(void __iomem *base, struct mv_host_priv *hpriv,
810 			 struct mv_port_priv *pp)
811 {
812 	if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
813 		/* clear EDMA event indicators, if any */
814 		writelfl(0, base + EDMA_ERR_IRQ_CAUSE_OFS);
815 
816 		mv_set_edma_ptrs(base, hpriv, pp);
817 
818 		writelfl(EDMA_EN, base + EDMA_CMD_OFS);
819 		pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
820 	}
821 	WARN_ON(!(EDMA_EN & readl(base + EDMA_CMD_OFS)));
822 }
823 
824 /**
825  *      __mv_stop_dma - Disable eDMA engine
826  *      @ap: ATA channel to manipulate
827  *
828  *      Verify the local cache of the eDMA state is accurate with a
829  *      WARN_ON.
830  *
831  *      LOCKING:
832  *      Inherited from caller.
833  */
834 static int __mv_stop_dma(struct ata_port *ap)
835 {
836 	void __iomem *port_mmio = mv_ap_base(ap);
837 	struct mv_port_priv *pp	= ap->private_data;
838 	u32 reg;
839 	int i, err = 0;
840 
841 	if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
842 		/* Disable EDMA if active.   The disable bit auto clears.
843 		 */
844 		writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
845 		pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
846 	} else {
847 		WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
848 	}
849 
850 	/* now properly wait for the eDMA to stop */
851 	for (i = 1000; i > 0; i--) {
852 		reg = readl(port_mmio + EDMA_CMD_OFS);
853 		if (!(reg & EDMA_EN))
854 			break;
855 
856 		udelay(100);
857 	}
858 
859 	if (reg & EDMA_EN) {
860 		ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
861 		err = -EIO;
862 	}
863 
864 	return err;
865 }
866 
867 static int mv_stop_dma(struct ata_port *ap)
868 {
869 	unsigned long flags;
870 	int rc;
871 
872 	spin_lock_irqsave(&ap->host->lock, flags);
873 	rc = __mv_stop_dma(ap);
874 	spin_unlock_irqrestore(&ap->host->lock, flags);
875 
876 	return rc;
877 }
878 
879 #ifdef ATA_DEBUG
880 static void mv_dump_mem(void __iomem *start, unsigned bytes)
881 {
882 	int b, w;
883 	for (b = 0; b < bytes; ) {
884 		DPRINTK("%p: ", start + b);
885 		for (w = 0; b < bytes && w < 4; w++) {
886 			printk("%08x ", readl(start + b));
887 			b += sizeof(u32);
888 		}
889 		printk("\n");
890 	}
891 }
892 #endif
893 
894 static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
895 {
896 #ifdef ATA_DEBUG
897 	int b, w;
898 	u32 dw;
899 	for (b = 0; b < bytes; ) {
900 		DPRINTK("%02x: ", b);
901 		for (w = 0; b < bytes && w < 4; w++) {
902 			(void) pci_read_config_dword(pdev, b, &dw);
903 			printk("%08x ", dw);
904 			b += sizeof(u32);
905 		}
906 		printk("\n");
907 	}
908 #endif
909 }
910 static void mv_dump_all_regs(void __iomem *mmio_base, int port,
911 			     struct pci_dev *pdev)
912 {
913 #ifdef ATA_DEBUG
914 	void __iomem *hc_base = mv_hc_base(mmio_base,
915 					   port >> MV_PORT_HC_SHIFT);
916 	void __iomem *port_base;
917 	int start_port, num_ports, p, start_hc, num_hcs, hc;
918 
919 	if (0 > port) {
920 		start_hc = start_port = 0;
921 		num_ports = 8;		/* shld be benign for 4 port devs */
922 		num_hcs = 2;
923 	} else {
924 		start_hc = port >> MV_PORT_HC_SHIFT;
925 		start_port = port;
926 		num_ports = num_hcs = 1;
927 	}
928 	DPRINTK("All registers for port(s) %u-%u:\n", start_port,
929 		num_ports > 1 ? num_ports - 1 : start_port);
930 
931 	if (NULL != pdev) {
932 		DPRINTK("PCI config space regs:\n");
933 		mv_dump_pci_cfg(pdev, 0x68);
934 	}
935 	DPRINTK("PCI regs:\n");
936 	mv_dump_mem(mmio_base+0xc00, 0x3c);
937 	mv_dump_mem(mmio_base+0xd00, 0x34);
938 	mv_dump_mem(mmio_base+0xf00, 0x4);
939 	mv_dump_mem(mmio_base+0x1d00, 0x6c);
940 	for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
941 		hc_base = mv_hc_base(mmio_base, hc);
942 		DPRINTK("HC regs (HC %i):\n", hc);
943 		mv_dump_mem(hc_base, 0x1c);
944 	}
945 	for (p = start_port; p < start_port + num_ports; p++) {
946 		port_base = mv_port_base(mmio_base, p);
947 		DPRINTK("EDMA regs (port %i):\n", p);
948 		mv_dump_mem(port_base, 0x54);
949 		DPRINTK("SATA regs (port %i):\n", p);
950 		mv_dump_mem(port_base+0x300, 0x60);
951 	}
952 #endif
953 }
954 
955 static unsigned int mv_scr_offset(unsigned int sc_reg_in)
956 {
957 	unsigned int ofs;
958 
959 	switch (sc_reg_in) {
960 	case SCR_STATUS:
961 	case SCR_CONTROL:
962 	case SCR_ERROR:
963 		ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
964 		break;
965 	case SCR_ACTIVE:
966 		ofs = SATA_ACTIVE_OFS;   /* active is not with the others */
967 		break;
968 	default:
969 		ofs = 0xffffffffU;
970 		break;
971 	}
972 	return ofs;
973 }
974 
975 static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
976 {
977 	unsigned int ofs = mv_scr_offset(sc_reg_in);
978 
979 	if (ofs != 0xffffffffU) {
980 		*val = readl(mv_ap_base(ap) + ofs);
981 		return 0;
982 	} else
983 		return -EINVAL;
984 }
985 
986 static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
987 {
988 	unsigned int ofs = mv_scr_offset(sc_reg_in);
989 
990 	if (ofs != 0xffffffffU) {
991 		writelfl(val, mv_ap_base(ap) + ofs);
992 		return 0;
993 	} else
994 		return -EINVAL;
995 }
996 
997 static void mv_edma_cfg(struct ata_port *ap, struct mv_host_priv *hpriv,
998 			void __iomem *port_mmio)
999 {
1000 	u32 cfg = readl(port_mmio + EDMA_CFG_OFS);
1001 
1002 	/* set up non-NCQ EDMA configuration */
1003 	cfg &= ~(1 << 9);	/* disable eQue */
1004 
1005 	if (IS_GEN_I(hpriv)) {
1006 		cfg &= ~0x1f;		/* clear queue depth */
1007 		cfg |= (1 << 8);	/* enab config burst size mask */
1008 	}
1009 
1010 	else if (IS_GEN_II(hpriv)) {
1011 		cfg &= ~0x1f;		/* clear queue depth */
1012 		cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1013 		cfg &= ~(EDMA_CFG_NCQ | EDMA_CFG_NCQ_GO_ON_ERR); /* clear NCQ */
1014 	}
1015 
1016 	else if (IS_GEN_IIE(hpriv)) {
1017 		cfg |= (1 << 23);	/* do not mask PM field in rx'd FIS */
1018 		cfg |= (1 << 22);	/* enab 4-entry host queue cache */
1019 		cfg &= ~(1 << 19);	/* dis 128-entry queue (for now?) */
1020 		cfg |= (1 << 18);	/* enab early completion */
1021 		cfg |= (1 << 17);	/* enab cut-through (dis stor&forwrd) */
1022 		cfg &= ~(1 << 16);	/* dis FIS-based switching (for now) */
1023 		cfg &= ~(EDMA_CFG_NCQ);	/* clear NCQ */
1024 	}
1025 
1026 	writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1027 }
1028 
1029 /**
1030  *      mv_port_start - Port specific init/start routine.
1031  *      @ap: ATA channel to manipulate
1032  *
1033  *      Allocate and point to DMA memory, init port private memory,
1034  *      zero indices.
1035  *
1036  *      LOCKING:
1037  *      Inherited from caller.
1038  */
1039 static int mv_port_start(struct ata_port *ap)
1040 {
1041 	struct device *dev = ap->host->dev;
1042 	struct mv_host_priv *hpriv = ap->host->private_data;
1043 	struct mv_port_priv *pp;
1044 	void __iomem *port_mmio = mv_ap_base(ap);
1045 	void *mem;
1046 	dma_addr_t mem_dma;
1047 	unsigned long flags;
1048 	int rc;
1049 
1050 	pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1051 	if (!pp)
1052 		return -ENOMEM;
1053 
1054 	mem = dmam_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma,
1055 				  GFP_KERNEL);
1056 	if (!mem)
1057 		return -ENOMEM;
1058 	memset(mem, 0, MV_PORT_PRIV_DMA_SZ);
1059 
1060 	rc = ata_pad_alloc(ap, dev);
1061 	if (rc)
1062 		return rc;
1063 
1064 	/* First item in chunk of DMA memory:
1065 	 * 32-slot command request table (CRQB), 32 bytes each in size
1066 	 */
1067 	pp->crqb = mem;
1068 	pp->crqb_dma = mem_dma;
1069 	mem += MV_CRQB_Q_SZ;
1070 	mem_dma += MV_CRQB_Q_SZ;
1071 
1072 	/* Second item:
1073 	 * 32-slot command response table (CRPB), 8 bytes each in size
1074 	 */
1075 	pp->crpb = mem;
1076 	pp->crpb_dma = mem_dma;
1077 	mem += MV_CRPB_Q_SZ;
1078 	mem_dma += MV_CRPB_Q_SZ;
1079 
1080 	/* Third item:
1081 	 * Table of scatter-gather descriptors (ePRD), 16 bytes each
1082 	 */
1083 	pp->sg_tbl = mem;
1084 	pp->sg_tbl_dma = mem_dma;
1085 
1086 	spin_lock_irqsave(&ap->host->lock, flags);
1087 
1088 	mv_edma_cfg(ap, hpriv, port_mmio);
1089 
1090 	mv_set_edma_ptrs(port_mmio, hpriv, pp);
1091 
1092 	spin_unlock_irqrestore(&ap->host->lock, flags);
1093 
1094 	/* Don't turn on EDMA here...do it before DMA commands only.  Else
1095 	 * we'll be unable to send non-data, PIO, etc due to restricted access
1096 	 * to shadow regs.
1097 	 */
1098 	ap->private_data = pp;
1099 	return 0;
1100 }
1101 
1102 /**
1103  *      mv_port_stop - Port specific cleanup/stop routine.
1104  *      @ap: ATA channel to manipulate
1105  *
1106  *      Stop DMA, cleanup port memory.
1107  *
1108  *      LOCKING:
1109  *      This routine uses the host lock to protect the DMA stop.
1110  */
1111 static void mv_port_stop(struct ata_port *ap)
1112 {
1113 	mv_stop_dma(ap);
1114 }
1115 
1116 /**
1117  *      mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1118  *      @qc: queued command whose SG list to source from
1119  *
1120  *      Populate the SG list and mark the last entry.
1121  *
1122  *      LOCKING:
1123  *      Inherited from caller.
1124  */
1125 static void mv_fill_sg(struct ata_queued_cmd *qc)
1126 {
1127 	struct mv_port_priv *pp = qc->ap->private_data;
1128 	struct scatterlist *sg;
1129 	struct mv_sg *mv_sg, *last_sg = NULL;
1130 
1131 	mv_sg = pp->sg_tbl;
1132 	ata_for_each_sg(sg, qc) {
1133 		dma_addr_t addr = sg_dma_address(sg);
1134 		u32 sg_len = sg_dma_len(sg);
1135 
1136 		while (sg_len) {
1137 			u32 offset = addr & 0xffff;
1138 			u32 len = sg_len;
1139 
1140 			if ((offset + sg_len > 0x10000))
1141 				len = 0x10000 - offset;
1142 
1143 			mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1144 			mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
1145 			mv_sg->flags_size = cpu_to_le32(len & 0xffff);
1146 
1147 			sg_len -= len;
1148 			addr += len;
1149 
1150 			last_sg = mv_sg;
1151 			mv_sg++;
1152 		}
1153 	}
1154 
1155 	if (likely(last_sg))
1156 		last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
1157 }
1158 
1159 static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
1160 {
1161 	u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
1162 		(last ? CRQB_CMD_LAST : 0);
1163 	*cmdw = cpu_to_le16(tmp);
1164 }
1165 
1166 /**
1167  *      mv_qc_prep - Host specific command preparation.
1168  *      @qc: queued command to prepare
1169  *
1170  *      This routine simply redirects to the general purpose routine
1171  *      if command is not DMA.  Else, it handles prep of the CRQB
1172  *      (command request block), does some sanity checking, and calls
1173  *      the SG load routine.
1174  *
1175  *      LOCKING:
1176  *      Inherited from caller.
1177  */
1178 static void mv_qc_prep(struct ata_queued_cmd *qc)
1179 {
1180 	struct ata_port *ap = qc->ap;
1181 	struct mv_port_priv *pp = ap->private_data;
1182 	__le16 *cw;
1183 	struct ata_taskfile *tf;
1184 	u16 flags = 0;
1185 	unsigned in_index;
1186 
1187 	if (qc->tf.protocol != ATA_PROT_DMA)
1188 		return;
1189 
1190 	/* Fill in command request block
1191 	 */
1192 	if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1193 		flags |= CRQB_FLAG_READ;
1194 	WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1195 	flags |= qc->tag << CRQB_TAG_SHIFT;
1196 	flags |= qc->tag << CRQB_IOID_SHIFT;	/* 50xx appears to ignore this*/
1197 
1198 	/* get current queue index from software */
1199 	in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1200 
1201 	pp->crqb[in_index].sg_addr =
1202 		cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1203 	pp->crqb[in_index].sg_addr_hi =
1204 		cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1205 	pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1206 
1207 	cw = &pp->crqb[in_index].ata_cmd[0];
1208 	tf = &qc->tf;
1209 
1210 	/* Sadly, the CRQB cannot accomodate all registers--there are
1211 	 * only 11 bytes...so we must pick and choose required
1212 	 * registers based on the command.  So, we drop feature and
1213 	 * hob_feature for [RW] DMA commands, but they are needed for
1214 	 * NCQ.  NCQ will drop hob_nsect.
1215 	 */
1216 	switch (tf->command) {
1217 	case ATA_CMD_READ:
1218 	case ATA_CMD_READ_EXT:
1219 	case ATA_CMD_WRITE:
1220 	case ATA_CMD_WRITE_EXT:
1221 	case ATA_CMD_WRITE_FUA_EXT:
1222 		mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1223 		break;
1224 #ifdef LIBATA_NCQ		/* FIXME: remove this line when NCQ added */
1225 	case ATA_CMD_FPDMA_READ:
1226 	case ATA_CMD_FPDMA_WRITE:
1227 		mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
1228 		mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1229 		break;
1230 #endif				/* FIXME: remove this line when NCQ added */
1231 	default:
1232 		/* The only other commands EDMA supports in non-queued and
1233 		 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1234 		 * of which are defined/used by Linux.  If we get here, this
1235 		 * driver needs work.
1236 		 *
1237 		 * FIXME: modify libata to give qc_prep a return value and
1238 		 * return error here.
1239 		 */
1240 		BUG_ON(tf->command);
1241 		break;
1242 	}
1243 	mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1244 	mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1245 	mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1246 	mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1247 	mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1248 	mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1249 	mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1250 	mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1251 	mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1);	/* last */
1252 
1253 	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1254 		return;
1255 	mv_fill_sg(qc);
1256 }
1257 
1258 /**
1259  *      mv_qc_prep_iie - Host specific command preparation.
1260  *      @qc: queued command to prepare
1261  *
1262  *      This routine simply redirects to the general purpose routine
1263  *      if command is not DMA.  Else, it handles prep of the CRQB
1264  *      (command request block), does some sanity checking, and calls
1265  *      the SG load routine.
1266  *
1267  *      LOCKING:
1268  *      Inherited from caller.
1269  */
1270 static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1271 {
1272 	struct ata_port *ap = qc->ap;
1273 	struct mv_port_priv *pp = ap->private_data;
1274 	struct mv_crqb_iie *crqb;
1275 	struct ata_taskfile *tf;
1276 	unsigned in_index;
1277 	u32 flags = 0;
1278 
1279 	if (qc->tf.protocol != ATA_PROT_DMA)
1280 		return;
1281 
1282 	/* Fill in Gen IIE command request block
1283 	 */
1284 	if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1285 		flags |= CRQB_FLAG_READ;
1286 
1287 	WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1288 	flags |= qc->tag << CRQB_TAG_SHIFT;
1289 	flags |= qc->tag << CRQB_IOID_SHIFT;	/* "I/O Id" is -really-
1290 						   what we use as our tag */
1291 
1292 	/* get current queue index from software */
1293 	in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1294 
1295 	crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
1296 	crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1297 	crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1298 	crqb->flags = cpu_to_le32(flags);
1299 
1300 	tf = &qc->tf;
1301 	crqb->ata_cmd[0] = cpu_to_le32(
1302 			(tf->command << 16) |
1303 			(tf->feature << 24)
1304 		);
1305 	crqb->ata_cmd[1] = cpu_to_le32(
1306 			(tf->lbal << 0) |
1307 			(tf->lbam << 8) |
1308 			(tf->lbah << 16) |
1309 			(tf->device << 24)
1310 		);
1311 	crqb->ata_cmd[2] = cpu_to_le32(
1312 			(tf->hob_lbal << 0) |
1313 			(tf->hob_lbam << 8) |
1314 			(tf->hob_lbah << 16) |
1315 			(tf->hob_feature << 24)
1316 		);
1317 	crqb->ata_cmd[3] = cpu_to_le32(
1318 			(tf->nsect << 0) |
1319 			(tf->hob_nsect << 8)
1320 		);
1321 
1322 	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1323 		return;
1324 	mv_fill_sg(qc);
1325 }
1326 
1327 /**
1328  *      mv_qc_issue - Initiate a command to the host
1329  *      @qc: queued command to start
1330  *
1331  *      This routine simply redirects to the general purpose routine
1332  *      if command is not DMA.  Else, it sanity checks our local
1333  *      caches of the request producer/consumer indices then enables
1334  *      DMA and bumps the request producer index.
1335  *
1336  *      LOCKING:
1337  *      Inherited from caller.
1338  */
1339 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1340 {
1341 	struct ata_port *ap = qc->ap;
1342 	void __iomem *port_mmio = mv_ap_base(ap);
1343 	struct mv_port_priv *pp = ap->private_data;
1344 	struct mv_host_priv *hpriv = ap->host->private_data;
1345 	u32 in_index;
1346 
1347 	if (qc->tf.protocol != ATA_PROT_DMA) {
1348 		/* We're about to send a non-EDMA capable command to the
1349 		 * port.  Turn off EDMA so there won't be problems accessing
1350 		 * shadow block, etc registers.
1351 		 */
1352 		__mv_stop_dma(ap);
1353 		return ata_qc_issue_prot(qc);
1354 	}
1355 
1356 	mv_start_dma(port_mmio, hpriv, pp);
1357 
1358 	in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1359 
1360 	/* until we do queuing, the queue should be empty at this point */
1361 	WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)
1362 		>> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
1363 
1364 	pp->req_idx++;
1365 
1366 	in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
1367 
1368 	/* and write the request in pointer to kick the EDMA to life */
1369 	writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1370 		 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1371 
1372 	return 0;
1373 }
1374 
1375 /**
1376  *      mv_err_intr - Handle error interrupts on the port
1377  *      @ap: ATA channel to manipulate
1378  *      @reset_allowed: bool: 0 == don't trigger from reset here
1379  *
1380  *      In most cases, just clear the interrupt and move on.  However,
1381  *      some cases require an eDMA reset, which is done right before
1382  *      the COMRESET in mv_phy_reset().  The SERR case requires a
1383  *      clear of pending errors in the SATA SERROR register.  Finally,
1384  *      if the port disabled DMA, update our cached copy to match.
1385  *
1386  *      LOCKING:
1387  *      Inherited from caller.
1388  */
1389 static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
1390 {
1391 	void __iomem *port_mmio = mv_ap_base(ap);
1392 	u32 edma_err_cause, eh_freeze_mask, serr = 0;
1393 	struct mv_port_priv *pp = ap->private_data;
1394 	struct mv_host_priv *hpriv = ap->host->private_data;
1395 	unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1396 	unsigned int action = 0, err_mask = 0;
1397 	struct ata_eh_info *ehi = &ap->link.eh_info;
1398 
1399 	ata_ehi_clear_desc(ehi);
1400 
1401 	if (!edma_enabled) {
1402 		/* just a guess: do we need to do this? should we
1403 		 * expand this, and do it in all cases?
1404 		 */
1405 		sata_scr_read(&ap->link, SCR_ERROR, &serr);
1406 		sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
1407 	}
1408 
1409 	edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1410 
1411 	ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1412 
1413 	/*
1414 	 * all generations share these EDMA error cause bits
1415 	 */
1416 
1417 	if (edma_err_cause & EDMA_ERR_DEV)
1418 		err_mask |= AC_ERR_DEV;
1419 	if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
1420 			EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
1421 			EDMA_ERR_INTRL_PAR)) {
1422 		err_mask |= AC_ERR_ATA_BUS;
1423 		action |= ATA_EH_HARDRESET;
1424 		ata_ehi_push_desc(ehi, "parity error");
1425 	}
1426 	if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1427 		ata_ehi_hotplugged(ehi);
1428 		ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
1429 			"dev disconnect" : "dev connect");
1430 	}
1431 
1432 	if (IS_GEN_I(hpriv)) {
1433 		eh_freeze_mask = EDMA_EH_FREEZE_5;
1434 
1435 		if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
1436 			struct mv_port_priv *pp	= ap->private_data;
1437 			pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1438 			ata_ehi_push_desc(ehi, "EDMA self-disable");
1439 		}
1440 	} else {
1441 		eh_freeze_mask = EDMA_EH_FREEZE;
1442 
1443 		if (edma_err_cause & EDMA_ERR_SELF_DIS) {
1444 			struct mv_port_priv *pp	= ap->private_data;
1445 			pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1446 			ata_ehi_push_desc(ehi, "EDMA self-disable");
1447 		}
1448 
1449 		if (edma_err_cause & EDMA_ERR_SERR) {
1450 			sata_scr_read(&ap->link, SCR_ERROR, &serr);
1451 			sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
1452 			err_mask = AC_ERR_ATA_BUS;
1453 			action |= ATA_EH_HARDRESET;
1454 		}
1455 	}
1456 
1457 	/* Clear EDMA now that SERR cleanup done */
1458 	writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1459 
1460 	if (!err_mask) {
1461 		err_mask = AC_ERR_OTHER;
1462 		action |= ATA_EH_HARDRESET;
1463 	}
1464 
1465 	ehi->serror |= serr;
1466 	ehi->action |= action;
1467 
1468 	if (qc)
1469 		qc->err_mask |= err_mask;
1470 	else
1471 		ehi->err_mask |= err_mask;
1472 
1473 	if (edma_err_cause & eh_freeze_mask)
1474 		ata_port_freeze(ap);
1475 	else
1476 		ata_port_abort(ap);
1477 }
1478 
1479 static void mv_intr_pio(struct ata_port *ap)
1480 {
1481 	struct ata_queued_cmd *qc;
1482 	u8 ata_status;
1483 
1484 	/* ignore spurious intr if drive still BUSY */
1485 	ata_status = readb(ap->ioaddr.status_addr);
1486 	if (unlikely(ata_status & ATA_BUSY))
1487 		return;
1488 
1489 	/* get active ATA command */
1490 	qc = ata_qc_from_tag(ap, ap->link.active_tag);
1491 	if (unlikely(!qc))			/* no active tag */
1492 		return;
1493 	if (qc->tf.flags & ATA_TFLAG_POLLING)	/* polling; we don't own qc */
1494 		return;
1495 
1496 	/* and finally, complete the ATA command */
1497 	qc->err_mask |= ac_err_mask(ata_status);
1498 	ata_qc_complete(qc);
1499 }
1500 
1501 static void mv_intr_edma(struct ata_port *ap)
1502 {
1503 	void __iomem *port_mmio = mv_ap_base(ap);
1504 	struct mv_host_priv *hpriv = ap->host->private_data;
1505 	struct mv_port_priv *pp = ap->private_data;
1506 	struct ata_queued_cmd *qc;
1507 	u32 out_index, in_index;
1508 	bool work_done = false;
1509 
1510 	/* get h/w response queue pointer */
1511 	in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1512 			>> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1513 
1514 	while (1) {
1515 		u16 status;
1516 		unsigned int tag;
1517 
1518 		/* get s/w response queue last-read pointer, and compare */
1519 		out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1520 		if (in_index == out_index)
1521 			break;
1522 
1523 		/* 50xx: get active ATA command */
1524 		if (IS_GEN_I(hpriv))
1525 			tag = ap->link.active_tag;
1526 
1527 		/* Gen II/IIE: get active ATA command via tag, to enable
1528 		 * support for queueing.  this works transparently for
1529 		 * queued and non-queued modes.
1530 		 */
1531 		else if (IS_GEN_II(hpriv))
1532 			tag = (le16_to_cpu(pp->crpb[out_index].id)
1533 				>> CRPB_IOID_SHIFT_6) & 0x3f;
1534 
1535 		else /* IS_GEN_IIE */
1536 			tag = (le16_to_cpu(pp->crpb[out_index].id)
1537 				>> CRPB_IOID_SHIFT_7) & 0x3f;
1538 
1539 		qc = ata_qc_from_tag(ap, tag);
1540 
1541 		/* lower 8 bits of status are EDMA_ERR_IRQ_CAUSE_OFS
1542 		 * bits (WARNING: might not necessarily be associated
1543 		 * with this command), which -should- be clear
1544 		 * if all is well
1545 		 */
1546 		status = le16_to_cpu(pp->crpb[out_index].flags);
1547 		if (unlikely(status & 0xff)) {
1548 			mv_err_intr(ap, qc);
1549 			return;
1550 		}
1551 
1552 		/* and finally, complete the ATA command */
1553 		if (qc) {
1554 			qc->err_mask |=
1555 				ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1556 			ata_qc_complete(qc);
1557 		}
1558 
1559 		/* advance software response queue pointer, to
1560 		 * indicate (after the loop completes) to hardware
1561 		 * that we have consumed a response queue entry.
1562 		 */
1563 		work_done = true;
1564 		pp->resp_idx++;
1565 	}
1566 
1567 	if (work_done)
1568 		writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1569 			 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1570 			 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1571 }
1572 
1573 /**
1574  *      mv_host_intr - Handle all interrupts on the given host controller
1575  *      @host: host specific structure
1576  *      @relevant: port error bits relevant to this host controller
1577  *      @hc: which host controller we're to look at
1578  *
1579  *      Read then write clear the HC interrupt status then walk each
1580  *      port connected to the HC and see if it needs servicing.  Port
1581  *      success ints are reported in the HC interrupt status reg, the
1582  *      port error ints are reported in the higher level main
1583  *      interrupt status register and thus are passed in via the
1584  *      'relevant' argument.
1585  *
1586  *      LOCKING:
1587  *      Inherited from caller.
1588  */
1589 static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
1590 {
1591 	void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
1592 	void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1593 	u32 hc_irq_cause;
1594 	int port, port0;
1595 
1596 	if (hc == 0)
1597 		port0 = 0;
1598 	else
1599 		port0 = MV_PORTS_PER_HC;
1600 
1601 	/* we'll need the HC success int register in most cases */
1602 	hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
1603 	if (!hc_irq_cause)
1604 		return;
1605 
1606 	writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
1607 
1608 	VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
1609 		hc, relevant, hc_irq_cause);
1610 
1611 	for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
1612 		struct ata_port *ap = host->ports[port];
1613 		struct mv_port_priv *pp = ap->private_data;
1614 		int have_err_bits, hard_port, shift;
1615 
1616 		if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
1617 			continue;
1618 
1619 		shift = port << 1;		/* (port * 2) */
1620 		if (port >= MV_PORTS_PER_HC) {
1621 			shift++;	/* skip bit 8 in the HC Main IRQ reg */
1622 		}
1623 		have_err_bits = ((PORT0_ERR << shift) & relevant);
1624 
1625 		if (unlikely(have_err_bits)) {
1626 			struct ata_queued_cmd *qc;
1627 
1628 			qc = ata_qc_from_tag(ap, ap->link.active_tag);
1629 			if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1630 				continue;
1631 
1632 			mv_err_intr(ap, qc);
1633 			continue;
1634 		}
1635 
1636 		hard_port = mv_hardport_from_port(port); /* range 0..3 */
1637 
1638 		if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1639 			if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1640 				mv_intr_edma(ap);
1641 		} else {
1642 			if ((DEV_IRQ << hard_port) & hc_irq_cause)
1643 				mv_intr_pio(ap);
1644 		}
1645 	}
1646 	VPRINTK("EXIT\n");
1647 }
1648 
1649 static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1650 {
1651 	struct ata_port *ap;
1652 	struct ata_queued_cmd *qc;
1653 	struct ata_eh_info *ehi;
1654 	unsigned int i, err_mask, printed = 0;
1655 	u32 err_cause;
1656 
1657 	err_cause = readl(mmio + PCI_IRQ_CAUSE_OFS);
1658 
1659 	dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1660 		   err_cause);
1661 
1662 	DPRINTK("All regs @ PCI error\n");
1663 	mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1664 
1665 	writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
1666 
1667 	for (i = 0; i < host->n_ports; i++) {
1668 		ap = host->ports[i];
1669 		if (!ata_link_offline(&ap->link)) {
1670 			ehi = &ap->link.eh_info;
1671 			ata_ehi_clear_desc(ehi);
1672 			if (!printed++)
1673 				ata_ehi_push_desc(ehi,
1674 					"PCI err cause 0x%08x", err_cause);
1675 			err_mask = AC_ERR_HOST_BUS;
1676 			ehi->action = ATA_EH_HARDRESET;
1677 			qc = ata_qc_from_tag(ap, ap->link.active_tag);
1678 			if (qc)
1679 				qc->err_mask |= err_mask;
1680 			else
1681 				ehi->err_mask |= err_mask;
1682 
1683 			ata_port_freeze(ap);
1684 		}
1685 	}
1686 }
1687 
1688 /**
1689  *      mv_interrupt - Main interrupt event handler
1690  *      @irq: unused
1691  *      @dev_instance: private data; in this case the host structure
1692  *
1693  *      Read the read only register to determine if any host
1694  *      controllers have pending interrupts.  If so, call lower level
1695  *      routine to handle.  Also check for PCI errors which are only
1696  *      reported here.
1697  *
1698  *      LOCKING:
1699  *      This routine holds the host lock while processing pending
1700  *      interrupts.
1701  */
1702 static irqreturn_t mv_interrupt(int irq, void *dev_instance)
1703 {
1704 	struct ata_host *host = dev_instance;
1705 	unsigned int hc, handled = 0, n_hcs;
1706 	void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
1707 	u32 irq_stat;
1708 
1709 	irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
1710 
1711 	/* check the cases where we either have nothing pending or have read
1712 	 * a bogus register value which can indicate HW removal or PCI fault
1713 	 */
1714 	if (!irq_stat || (0xffffffffU == irq_stat))
1715 		return IRQ_NONE;
1716 
1717 	n_hcs = mv_get_hc_count(host->ports[0]->flags);
1718 	spin_lock(&host->lock);
1719 
1720 	if (unlikely(irq_stat & PCI_ERR)) {
1721 		mv_pci_error(host, mmio);
1722 		handled = 1;
1723 		goto out_unlock;	/* skip all other HC irq handling */
1724 	}
1725 
1726 	for (hc = 0; hc < n_hcs; hc++) {
1727 		u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1728 		if (relevant) {
1729 			mv_host_intr(host, relevant, hc);
1730 			handled = 1;
1731 		}
1732 	}
1733 
1734 out_unlock:
1735 	spin_unlock(&host->lock);
1736 
1737 	return IRQ_RETVAL(handled);
1738 }
1739 
1740 static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1741 {
1742 	void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1743 	unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1744 
1745 	return hc_mmio + ofs;
1746 }
1747 
1748 static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1749 {
1750 	unsigned int ofs;
1751 
1752 	switch (sc_reg_in) {
1753 	case SCR_STATUS:
1754 	case SCR_ERROR:
1755 	case SCR_CONTROL:
1756 		ofs = sc_reg_in * sizeof(u32);
1757 		break;
1758 	default:
1759 		ofs = 0xffffffffU;
1760 		break;
1761 	}
1762 	return ofs;
1763 }
1764 
1765 static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
1766 {
1767 	void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1768 	void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1769 	unsigned int ofs = mv5_scr_offset(sc_reg_in);
1770 
1771 	if (ofs != 0xffffffffU) {
1772 		*val = readl(addr + ofs);
1773 		return 0;
1774 	} else
1775 		return -EINVAL;
1776 }
1777 
1778 static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1779 {
1780 	void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1781 	void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1782 	unsigned int ofs = mv5_scr_offset(sc_reg_in);
1783 
1784 	if (ofs != 0xffffffffU) {
1785 		writelfl(val, addr + ofs);
1786 		return 0;
1787 	} else
1788 		return -EINVAL;
1789 }
1790 
1791 static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio)
1792 {
1793 	int early_5080;
1794 
1795 	early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
1796 
1797 	if (!early_5080) {
1798 		u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1799 		tmp |= (1 << 0);
1800 		writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1801 	}
1802 
1803 	mv_reset_pci_bus(pdev, mmio);
1804 }
1805 
1806 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1807 {
1808 	writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1809 }
1810 
1811 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
1812 			   void __iomem *mmio)
1813 {
1814 	void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1815 	u32 tmp;
1816 
1817 	tmp = readl(phy_mmio + MV5_PHY_MODE);
1818 
1819 	hpriv->signal[idx].pre = tmp & 0x1800;	/* bits 12:11 */
1820 	hpriv->signal[idx].amps = tmp & 0xe0;	/* bits 7:5 */
1821 }
1822 
1823 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
1824 {
1825 	u32 tmp;
1826 
1827 	writel(0, mmio + MV_GPIO_PORT_CTL);
1828 
1829 	/* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1830 
1831 	tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1832 	tmp |= ~(1 << 0);
1833 	writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1834 }
1835 
1836 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1837 			   unsigned int port)
1838 {
1839 	void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1840 	const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1841 	u32 tmp;
1842 	int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1843 
1844 	if (fix_apm_sq) {
1845 		tmp = readl(phy_mmio + MV5_LT_MODE);
1846 		tmp |= (1 << 19);
1847 		writel(tmp, phy_mmio + MV5_LT_MODE);
1848 
1849 		tmp = readl(phy_mmio + MV5_PHY_CTL);
1850 		tmp &= ~0x3;
1851 		tmp |= 0x1;
1852 		writel(tmp, phy_mmio + MV5_PHY_CTL);
1853 	}
1854 
1855 	tmp = readl(phy_mmio + MV5_PHY_MODE);
1856 	tmp &= ~mask;
1857 	tmp |= hpriv->signal[port].pre;
1858 	tmp |= hpriv->signal[port].amps;
1859 	writel(tmp, phy_mmio + MV5_PHY_MODE);
1860 }
1861 
1862 
1863 #undef ZERO
1864 #define ZERO(reg) writel(0, port_mmio + (reg))
1865 static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1866 			     unsigned int port)
1867 {
1868 	void __iomem *port_mmio = mv_port_base(mmio, port);
1869 
1870 	writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1871 
1872 	mv_channel_reset(hpriv, mmio, port);
1873 
1874 	ZERO(0x028);	/* command */
1875 	writel(0x11f, port_mmio + EDMA_CFG_OFS);
1876 	ZERO(0x004);	/* timer */
1877 	ZERO(0x008);	/* irq err cause */
1878 	ZERO(0x00c);	/* irq err mask */
1879 	ZERO(0x010);	/* rq bah */
1880 	ZERO(0x014);	/* rq inp */
1881 	ZERO(0x018);	/* rq outp */
1882 	ZERO(0x01c);	/* respq bah */
1883 	ZERO(0x024);	/* respq outp */
1884 	ZERO(0x020);	/* respq inp */
1885 	ZERO(0x02c);	/* test control */
1886 	writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1887 }
1888 #undef ZERO
1889 
1890 #define ZERO(reg) writel(0, hc_mmio + (reg))
1891 static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1892 			unsigned int hc)
1893 {
1894 	void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1895 	u32 tmp;
1896 
1897 	ZERO(0x00c);
1898 	ZERO(0x010);
1899 	ZERO(0x014);
1900 	ZERO(0x018);
1901 
1902 	tmp = readl(hc_mmio + 0x20);
1903 	tmp &= 0x1c1c1c1c;
1904 	tmp |= 0x03030303;
1905 	writel(tmp, hc_mmio + 0x20);
1906 }
1907 #undef ZERO
1908 
1909 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1910 			unsigned int n_hc)
1911 {
1912 	unsigned int hc, port;
1913 
1914 	for (hc = 0; hc < n_hc; hc++) {
1915 		for (port = 0; port < MV_PORTS_PER_HC; port++)
1916 			mv5_reset_hc_port(hpriv, mmio,
1917 					  (hc * MV_PORTS_PER_HC) + port);
1918 
1919 		mv5_reset_one_hc(hpriv, mmio, hc);
1920 	}
1921 
1922 	return 0;
1923 }
1924 
1925 #undef ZERO
1926 #define ZERO(reg) writel(0, mmio + (reg))
1927 static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio)
1928 {
1929 	u32 tmp;
1930 
1931 	tmp = readl(mmio + MV_PCI_MODE);
1932 	tmp &= 0xff00ffff;
1933 	writel(tmp, mmio + MV_PCI_MODE);
1934 
1935 	ZERO(MV_PCI_DISC_TIMER);
1936 	ZERO(MV_PCI_MSI_TRIGGER);
1937 	writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
1938 	ZERO(HC_MAIN_IRQ_MASK_OFS);
1939 	ZERO(MV_PCI_SERR_MASK);
1940 	ZERO(PCI_IRQ_CAUSE_OFS);
1941 	ZERO(PCI_IRQ_MASK_OFS);
1942 	ZERO(MV_PCI_ERR_LOW_ADDRESS);
1943 	ZERO(MV_PCI_ERR_HIGH_ADDRESS);
1944 	ZERO(MV_PCI_ERR_ATTRIBUTE);
1945 	ZERO(MV_PCI_ERR_COMMAND);
1946 }
1947 #undef ZERO
1948 
1949 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1950 {
1951 	u32 tmp;
1952 
1953 	mv5_reset_flash(hpriv, mmio);
1954 
1955 	tmp = readl(mmio + MV_GPIO_PORT_CTL);
1956 	tmp &= 0x3;
1957 	tmp |= (1 << 5) | (1 << 6);
1958 	writel(tmp, mmio + MV_GPIO_PORT_CTL);
1959 }
1960 
1961 /**
1962  *      mv6_reset_hc - Perform the 6xxx global soft reset
1963  *      @mmio: base address of the HBA
1964  *
1965  *      This routine only applies to 6xxx parts.
1966  *
1967  *      LOCKING:
1968  *      Inherited from caller.
1969  */
1970 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1971 			unsigned int n_hc)
1972 {
1973 	void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
1974 	int i, rc = 0;
1975 	u32 t;
1976 
1977 	/* Following procedure defined in PCI "main command and status
1978 	 * register" table.
1979 	 */
1980 	t = readl(reg);
1981 	writel(t | STOP_PCI_MASTER, reg);
1982 
1983 	for (i = 0; i < 1000; i++) {
1984 		udelay(1);
1985 		t = readl(reg);
1986 		if (PCI_MASTER_EMPTY & t)
1987 			break;
1988 	}
1989 	if (!(PCI_MASTER_EMPTY & t)) {
1990 		printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
1991 		rc = 1;
1992 		goto done;
1993 	}
1994 
1995 	/* set reset */
1996 	i = 5;
1997 	do {
1998 		writel(t | GLOB_SFT_RST, reg);
1999 		t = readl(reg);
2000 		udelay(1);
2001 	} while (!(GLOB_SFT_RST & t) && (i-- > 0));
2002 
2003 	if (!(GLOB_SFT_RST & t)) {
2004 		printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2005 		rc = 1;
2006 		goto done;
2007 	}
2008 
2009 	/* clear reset and *reenable the PCI master* (not mentioned in spec) */
2010 	i = 5;
2011 	do {
2012 		writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2013 		t = readl(reg);
2014 		udelay(1);
2015 	} while ((GLOB_SFT_RST & t) && (i-- > 0));
2016 
2017 	if (GLOB_SFT_RST & t) {
2018 		printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2019 		rc = 1;
2020 	}
2021 done:
2022 	return rc;
2023 }
2024 
2025 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
2026 			   void __iomem *mmio)
2027 {
2028 	void __iomem *port_mmio;
2029 	u32 tmp;
2030 
2031 	tmp = readl(mmio + MV_RESET_CFG);
2032 	if ((tmp & (1 << 0)) == 0) {
2033 		hpriv->signal[idx].amps = 0x7 << 8;
2034 		hpriv->signal[idx].pre = 0x1 << 5;
2035 		return;
2036 	}
2037 
2038 	port_mmio = mv_port_base(mmio, idx);
2039 	tmp = readl(port_mmio + PHY_MODE2);
2040 
2041 	hpriv->signal[idx].amps = tmp & 0x700;	/* bits 10:8 */
2042 	hpriv->signal[idx].pre = tmp & 0xe0;	/* bits 7:5 */
2043 }
2044 
2045 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
2046 {
2047 	writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
2048 }
2049 
2050 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
2051 			   unsigned int port)
2052 {
2053 	void __iomem *port_mmio = mv_port_base(mmio, port);
2054 
2055 	u32 hp_flags = hpriv->hp_flags;
2056 	int fix_phy_mode2 =
2057 		hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2058 	int fix_phy_mode4 =
2059 		hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2060 	u32 m2, tmp;
2061 
2062 	if (fix_phy_mode2) {
2063 		m2 = readl(port_mmio + PHY_MODE2);
2064 		m2 &= ~(1 << 16);
2065 		m2 |= (1 << 31);
2066 		writel(m2, port_mmio + PHY_MODE2);
2067 
2068 		udelay(200);
2069 
2070 		m2 = readl(port_mmio + PHY_MODE2);
2071 		m2 &= ~((1 << 16) | (1 << 31));
2072 		writel(m2, port_mmio + PHY_MODE2);
2073 
2074 		udelay(200);
2075 	}
2076 
2077 	/* who knows what this magic does */
2078 	tmp = readl(port_mmio + PHY_MODE3);
2079 	tmp &= ~0x7F800000;
2080 	tmp |= 0x2A800000;
2081 	writel(tmp, port_mmio + PHY_MODE3);
2082 
2083 	if (fix_phy_mode4) {
2084 		u32 m4;
2085 
2086 		m4 = readl(port_mmio + PHY_MODE4);
2087 
2088 		if (hp_flags & MV_HP_ERRATA_60X1B2)
2089 			tmp = readl(port_mmio + 0x310);
2090 
2091 		m4 = (m4 & ~(1 << 1)) | (1 << 0);
2092 
2093 		writel(m4, port_mmio + PHY_MODE4);
2094 
2095 		if (hp_flags & MV_HP_ERRATA_60X1B2)
2096 			writel(tmp, port_mmio + 0x310);
2097 	}
2098 
2099 	/* Revert values of pre-emphasis and signal amps to the saved ones */
2100 	m2 = readl(port_mmio + PHY_MODE2);
2101 
2102 	m2 &= ~MV_M2_PREAMP_MASK;
2103 	m2 |= hpriv->signal[port].amps;
2104 	m2 |= hpriv->signal[port].pre;
2105 	m2 &= ~(1 << 16);
2106 
2107 	/* according to mvSata 3.6.1, some IIE values are fixed */
2108 	if (IS_GEN_IIE(hpriv)) {
2109 		m2 &= ~0xC30FF01F;
2110 		m2 |= 0x0000900F;
2111 	}
2112 
2113 	writel(m2, port_mmio + PHY_MODE2);
2114 }
2115 
2116 static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
2117 			     unsigned int port_no)
2118 {
2119 	void __iomem *port_mmio = mv_port_base(mmio, port_no);
2120 
2121 	writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2122 
2123 	if (IS_GEN_II(hpriv)) {
2124 		u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
2125 		ifctl |= (1 << 7);		/* enable gen2i speed */
2126 		ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2127 		writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2128 	}
2129 
2130 	udelay(25);		/* allow reset propagation */
2131 
2132 	/* Spec never mentions clearing the bit.  Marvell's driver does
2133 	 * clear the bit, however.
2134 	 */
2135 	writelfl(0, port_mmio + EDMA_CMD_OFS);
2136 
2137 	hpriv->ops->phy_errata(hpriv, mmio, port_no);
2138 
2139 	if (IS_GEN_I(hpriv))
2140 		mdelay(1);
2141 }
2142 
2143 /**
2144  *      mv_phy_reset - Perform eDMA reset followed by COMRESET
2145  *      @ap: ATA channel to manipulate
2146  *
2147  *      Part of this is taken from __sata_phy_reset and modified to
2148  *      not sleep since this routine gets called from interrupt level.
2149  *
2150  *      LOCKING:
2151  *      Inherited from caller.  This is coded to safe to call at
2152  *      interrupt level, i.e. it does not sleep.
2153  */
2154 static void mv_phy_reset(struct ata_port *ap, unsigned int *class,
2155 			 unsigned long deadline)
2156 {
2157 	struct mv_port_priv *pp	= ap->private_data;
2158 	struct mv_host_priv *hpriv = ap->host->private_data;
2159 	void __iomem *port_mmio = mv_ap_base(ap);
2160 	int retry = 5;
2161 	u32 sstatus;
2162 
2163 	VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
2164 
2165 #ifdef DEBUG
2166 	{
2167 		u32 sstatus, serror, scontrol;
2168 
2169 		mv_scr_read(ap, SCR_STATUS, &sstatus);
2170 		mv_scr_read(ap, SCR_ERROR, &serror);
2171 		mv_scr_read(ap, SCR_CONTROL, &scontrol);
2172 		DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
2173 			"SCtrl 0x%08x\n", status, serror, scontrol);
2174 	}
2175 #endif
2176 
2177 	/* Issue COMRESET via SControl */
2178 comreset_retry:
2179 	sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x301);
2180 	msleep(1);
2181 
2182 	sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x300);
2183 	msleep(20);
2184 
2185 	do {
2186 		sata_scr_read(&ap->link, SCR_STATUS, &sstatus);
2187 		if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
2188 			break;
2189 
2190 		msleep(1);
2191 	} while (time_before(jiffies, deadline));
2192 
2193 	/* work around errata */
2194 	if (IS_GEN_II(hpriv) &&
2195 	    (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
2196 	    (retry-- > 0))
2197 		goto comreset_retry;
2198 
2199 #ifdef DEBUG
2200 	{
2201 		u32 sstatus, serror, scontrol;
2202 
2203 		mv_scr_read(ap, SCR_STATUS, &sstatus);
2204 		mv_scr_read(ap, SCR_ERROR, &serror);
2205 		mv_scr_read(ap, SCR_CONTROL, &scontrol);
2206 		DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
2207 			"SCtrl 0x%08x\n", sstatus, serror, scontrol);
2208 	}
2209 #endif
2210 
2211 	if (ata_link_offline(&ap->link)) {
2212 		*class = ATA_DEV_NONE;
2213 		return;
2214 	}
2215 
2216 	/* even after SStatus reflects that device is ready,
2217 	 * it seems to take a while for link to be fully
2218 	 * established (and thus Status no longer 0x80/0x7F),
2219 	 * so we poll a bit for that, here.
2220 	 */
2221 	retry = 20;
2222 	while (1) {
2223 		u8 drv_stat = ata_check_status(ap);
2224 		if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2225 			break;
2226 		msleep(500);
2227 		if (retry-- <= 0)
2228 			break;
2229 		if (time_after(jiffies, deadline))
2230 			break;
2231 	}
2232 
2233 	/* FIXME: if we passed the deadline, the following
2234 	 * code probably produces an invalid result
2235 	 */
2236 
2237 	/* finally, read device signature from TF registers */
2238 	*class = ata_dev_try_classify(ap->link.device, 1, NULL);
2239 
2240 	writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2241 
2242 	WARN_ON(pp->pp_flags & MV_PP_FLAG_EDMA_EN);
2243 
2244 	VPRINTK("EXIT\n");
2245 }
2246 
2247 static int mv_prereset(struct ata_link *link, unsigned long deadline)
2248 {
2249 	struct ata_port *ap = link->ap;
2250 	struct mv_port_priv *pp	= ap->private_data;
2251 	struct ata_eh_context *ehc = &link->eh_context;
2252 	int rc;
2253 
2254 	rc = mv_stop_dma(ap);
2255 	if (rc)
2256 		ehc->i.action |= ATA_EH_HARDRESET;
2257 
2258 	if (!(pp->pp_flags & MV_PP_FLAG_HAD_A_RESET)) {
2259 		pp->pp_flags |= MV_PP_FLAG_HAD_A_RESET;
2260 		ehc->i.action |= ATA_EH_HARDRESET;
2261 	}
2262 
2263 	/* if we're about to do hardreset, nothing more to do */
2264 	if (ehc->i.action & ATA_EH_HARDRESET)
2265 		return 0;
2266 
2267 	if (ata_link_online(link))
2268 		rc = ata_wait_ready(ap, deadline);
2269 	else
2270 		rc = -ENODEV;
2271 
2272 	return rc;
2273 }
2274 
2275 static int mv_hardreset(struct ata_link *link, unsigned int *class,
2276 			unsigned long deadline)
2277 {
2278 	struct ata_port *ap = link->ap;
2279 	struct mv_host_priv *hpriv = ap->host->private_data;
2280 	void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2281 
2282 	mv_stop_dma(ap);
2283 
2284 	mv_channel_reset(hpriv, mmio, ap->port_no);
2285 
2286 	mv_phy_reset(ap, class, deadline);
2287 
2288 	return 0;
2289 }
2290 
2291 static void mv_postreset(struct ata_link *link, unsigned int *classes)
2292 {
2293 	struct ata_port *ap = link->ap;
2294 	u32 serr;
2295 
2296 	/* print link status */
2297 	sata_print_link_status(link);
2298 
2299 	/* clear SError */
2300 	sata_scr_read(link, SCR_ERROR, &serr);
2301 	sata_scr_write_flush(link, SCR_ERROR, serr);
2302 
2303 	/* bail out if no device is present */
2304 	if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2305 		DPRINTK("EXIT, no device\n");
2306 		return;
2307 	}
2308 
2309 	/* set up device control */
2310 	iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
2311 }
2312 
2313 static void mv_error_handler(struct ata_port *ap)
2314 {
2315 	ata_do_eh(ap, mv_prereset, ata_std_softreset,
2316 		  mv_hardreset, mv_postreset);
2317 }
2318 
2319 static void mv_post_int_cmd(struct ata_queued_cmd *qc)
2320 {
2321 	mv_stop_dma(qc->ap);
2322 }
2323 
2324 static void mv_eh_freeze(struct ata_port *ap)
2325 {
2326 	void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2327 	unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2328 	u32 tmp, mask;
2329 	unsigned int shift;
2330 
2331 	/* FIXME: handle coalescing completion events properly */
2332 
2333 	shift = ap->port_no * 2;
2334 	if (hc > 0)
2335 		shift++;
2336 
2337 	mask = 0x3 << shift;
2338 
2339 	/* disable assertion of portN err, done events */
2340 	tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2341 	writelfl(tmp & ~mask, mmio + HC_MAIN_IRQ_MASK_OFS);
2342 }
2343 
2344 static void mv_eh_thaw(struct ata_port *ap)
2345 {
2346 	void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2347 	unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2348 	void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2349 	void __iomem *port_mmio = mv_ap_base(ap);
2350 	u32 tmp, mask, hc_irq_cause;
2351 	unsigned int shift, hc_port_no = ap->port_no;
2352 
2353 	/* FIXME: handle coalescing completion events properly */
2354 
2355 	shift = ap->port_no * 2;
2356 	if (hc > 0) {
2357 		shift++;
2358 		hc_port_no -= 4;
2359 	}
2360 
2361 	mask = 0x3 << shift;
2362 
2363 	/* clear EDMA errors on this port */
2364 	writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2365 
2366 	/* clear pending irq events */
2367 	hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2368 	hc_irq_cause &= ~(1 << hc_port_no);	/* clear CRPB-done */
2369 	hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2370 	writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2371 
2372 	/* enable assertion of portN err, done events */
2373 	tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2374 	writelfl(tmp | mask, mmio + HC_MAIN_IRQ_MASK_OFS);
2375 }
2376 
2377 /**
2378  *      mv_port_init - Perform some early initialization on a single port.
2379  *      @port: libata data structure storing shadow register addresses
2380  *      @port_mmio: base address of the port
2381  *
2382  *      Initialize shadow register mmio addresses, clear outstanding
2383  *      interrupts on the port, and unmask interrupts for the future
2384  *      start of the port.
2385  *
2386  *      LOCKING:
2387  *      Inherited from caller.
2388  */
2389 static void mv_port_init(struct ata_ioports *port,  void __iomem *port_mmio)
2390 {
2391 	void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
2392 	unsigned serr_ofs;
2393 
2394 	/* PIO related setup
2395 	 */
2396 	port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
2397 	port->error_addr =
2398 		port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2399 	port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2400 	port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2401 	port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2402 	port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2403 	port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
2404 	port->status_addr =
2405 		port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2406 	/* special case: control/altstatus doesn't have ATA_REG_ address */
2407 	port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2408 
2409 	/* unused: */
2410 	port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
2411 
2412 	/* Clear any currently outstanding port interrupt conditions */
2413 	serr_ofs = mv_scr_offset(SCR_ERROR);
2414 	writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2415 	writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2416 
2417 	/* unmask all EDMA error interrupts */
2418 	writelfl(~0, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
2419 
2420 	VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
2421 		readl(port_mmio + EDMA_CFG_OFS),
2422 		readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2423 		readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
2424 }
2425 
2426 static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
2427 {
2428 	struct pci_dev *pdev = to_pci_dev(host->dev);
2429 	struct mv_host_priv *hpriv = host->private_data;
2430 	u32 hp_flags = hpriv->hp_flags;
2431 
2432 	switch (board_idx) {
2433 	case chip_5080:
2434 		hpriv->ops = &mv5xxx_ops;
2435 		hp_flags |= MV_HP_GEN_I;
2436 
2437 		switch (pdev->revision) {
2438 		case 0x1:
2439 			hp_flags |= MV_HP_ERRATA_50XXB0;
2440 			break;
2441 		case 0x3:
2442 			hp_flags |= MV_HP_ERRATA_50XXB2;
2443 			break;
2444 		default:
2445 			dev_printk(KERN_WARNING, &pdev->dev,
2446 			   "Applying 50XXB2 workarounds to unknown rev\n");
2447 			hp_flags |= MV_HP_ERRATA_50XXB2;
2448 			break;
2449 		}
2450 		break;
2451 
2452 	case chip_504x:
2453 	case chip_508x:
2454 		hpriv->ops = &mv5xxx_ops;
2455 		hp_flags |= MV_HP_GEN_I;
2456 
2457 		switch (pdev->revision) {
2458 		case 0x0:
2459 			hp_flags |= MV_HP_ERRATA_50XXB0;
2460 			break;
2461 		case 0x3:
2462 			hp_flags |= MV_HP_ERRATA_50XXB2;
2463 			break;
2464 		default:
2465 			dev_printk(KERN_WARNING, &pdev->dev,
2466 			   "Applying B2 workarounds to unknown rev\n");
2467 			hp_flags |= MV_HP_ERRATA_50XXB2;
2468 			break;
2469 		}
2470 		break;
2471 
2472 	case chip_604x:
2473 	case chip_608x:
2474 		hpriv->ops = &mv6xxx_ops;
2475 		hp_flags |= MV_HP_GEN_II;
2476 
2477 		switch (pdev->revision) {
2478 		case 0x7:
2479 			hp_flags |= MV_HP_ERRATA_60X1B2;
2480 			break;
2481 		case 0x9:
2482 			hp_flags |= MV_HP_ERRATA_60X1C0;
2483 			break;
2484 		default:
2485 			dev_printk(KERN_WARNING, &pdev->dev,
2486 				   "Applying B2 workarounds to unknown rev\n");
2487 			hp_flags |= MV_HP_ERRATA_60X1B2;
2488 			break;
2489 		}
2490 		break;
2491 
2492 	case chip_7042:
2493 	case chip_6042:
2494 		hpriv->ops = &mv6xxx_ops;
2495 		hp_flags |= MV_HP_GEN_IIE;
2496 
2497 		switch (pdev->revision) {
2498 		case 0x0:
2499 			hp_flags |= MV_HP_ERRATA_XX42A0;
2500 			break;
2501 		case 0x1:
2502 			hp_flags |= MV_HP_ERRATA_60X1C0;
2503 			break;
2504 		default:
2505 			dev_printk(KERN_WARNING, &pdev->dev,
2506 			   "Applying 60X1C0 workarounds to unknown rev\n");
2507 			hp_flags |= MV_HP_ERRATA_60X1C0;
2508 			break;
2509 		}
2510 		break;
2511 
2512 	default:
2513 		dev_printk(KERN_ERR, &pdev->dev,
2514 			   "BUG: invalid board index %u\n", board_idx);
2515 		return 1;
2516 	}
2517 
2518 	hpriv->hp_flags = hp_flags;
2519 
2520 	return 0;
2521 }
2522 
2523 /**
2524  *      mv_init_host - Perform some early initialization of the host.
2525  *	@host: ATA host to initialize
2526  *      @board_idx: controller index
2527  *
2528  *      If possible, do an early global reset of the host.  Then do
2529  *      our port init and clear/unmask all/relevant host interrupts.
2530  *
2531  *      LOCKING:
2532  *      Inherited from caller.
2533  */
2534 static int mv_init_host(struct ata_host *host, unsigned int board_idx)
2535 {
2536 	int rc = 0, n_hc, port, hc;
2537 	struct pci_dev *pdev = to_pci_dev(host->dev);
2538 	void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
2539 	struct mv_host_priv *hpriv = host->private_data;
2540 
2541 	/* global interrupt mask */
2542 	writel(0, mmio + HC_MAIN_IRQ_MASK_OFS);
2543 
2544 	rc = mv_chip_id(host, board_idx);
2545 	if (rc)
2546 		goto done;
2547 
2548 	n_hc = mv_get_hc_count(host->ports[0]->flags);
2549 
2550 	for (port = 0; port < host->n_ports; port++)
2551 		hpriv->ops->read_preamp(hpriv, port, mmio);
2552 
2553 	rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
2554 	if (rc)
2555 		goto done;
2556 
2557 	hpriv->ops->reset_flash(hpriv, mmio);
2558 	hpriv->ops->reset_bus(pdev, mmio);
2559 	hpriv->ops->enable_leds(hpriv, mmio);
2560 
2561 	for (port = 0; port < host->n_ports; port++) {
2562 		if (IS_GEN_II(hpriv)) {
2563 			void __iomem *port_mmio = mv_port_base(mmio, port);
2564 
2565 			u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
2566 			ifctl |= (1 << 7);		/* enable gen2i speed */
2567 			ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2568 			writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2569 		}
2570 
2571 		hpriv->ops->phy_errata(hpriv, mmio, port);
2572 	}
2573 
2574 	for (port = 0; port < host->n_ports; port++) {
2575 		struct ata_port *ap = host->ports[port];
2576 		void __iomem *port_mmio = mv_port_base(mmio, port);
2577 		unsigned int offset = port_mmio - mmio;
2578 
2579 		mv_port_init(&ap->ioaddr, port_mmio);
2580 
2581 		ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
2582 		ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
2583 	}
2584 
2585 	for (hc = 0; hc < n_hc; hc++) {
2586 		void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2587 
2588 		VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2589 			"(before clear)=0x%08x\n", hc,
2590 			readl(hc_mmio + HC_CFG_OFS),
2591 			readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2592 
2593 		/* Clear any currently outstanding hc interrupt conditions */
2594 		writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
2595 	}
2596 
2597 	/* Clear any currently outstanding host interrupt conditions */
2598 	writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
2599 
2600 	/* and unmask interrupt generation for host regs */
2601 	writelfl(PCI_UNMASK_ALL_IRQS, mmio + PCI_IRQ_MASK_OFS);
2602 
2603 	if (IS_GEN_I(hpriv))
2604 		writelfl(~HC_MAIN_MASKED_IRQS_5, mmio + HC_MAIN_IRQ_MASK_OFS);
2605 	else
2606 		writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
2607 
2608 	VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2609 		"PCI int cause/mask=0x%08x/0x%08x\n",
2610 		readl(mmio + HC_MAIN_IRQ_CAUSE_OFS),
2611 		readl(mmio + HC_MAIN_IRQ_MASK_OFS),
2612 		readl(mmio + PCI_IRQ_CAUSE_OFS),
2613 		readl(mmio + PCI_IRQ_MASK_OFS));
2614 
2615 done:
2616 	return rc;
2617 }
2618 
2619 /**
2620  *      mv_print_info - Dump key info to kernel log for perusal.
2621  *      @host: ATA host to print info about
2622  *
2623  *      FIXME: complete this.
2624  *
2625  *      LOCKING:
2626  *      Inherited from caller.
2627  */
2628 static void mv_print_info(struct ata_host *host)
2629 {
2630 	struct pci_dev *pdev = to_pci_dev(host->dev);
2631 	struct mv_host_priv *hpriv = host->private_data;
2632 	u8 scc;
2633 	const char *scc_s, *gen;
2634 
2635 	/* Use this to determine the HW stepping of the chip so we know
2636 	 * what errata to workaround
2637 	 */
2638 	pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2639 	if (scc == 0)
2640 		scc_s = "SCSI";
2641 	else if (scc == 0x01)
2642 		scc_s = "RAID";
2643 	else
2644 		scc_s = "?";
2645 
2646 	if (IS_GEN_I(hpriv))
2647 		gen = "I";
2648 	else if (IS_GEN_II(hpriv))
2649 		gen = "II";
2650 	else if (IS_GEN_IIE(hpriv))
2651 		gen = "IIE";
2652 	else
2653 		gen = "?";
2654 
2655 	dev_printk(KERN_INFO, &pdev->dev,
2656 	       "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2657 	       gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
2658 	       scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2659 }
2660 
2661 /**
2662  *      mv_init_one - handle a positive probe of a Marvell host
2663  *      @pdev: PCI device found
2664  *      @ent: PCI device ID entry for the matched host
2665  *
2666  *      LOCKING:
2667  *      Inherited from caller.
2668  */
2669 static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2670 {
2671 	static int printed_version;
2672 	unsigned int board_idx = (unsigned int)ent->driver_data;
2673 	const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
2674 	struct ata_host *host;
2675 	struct mv_host_priv *hpriv;
2676 	int n_ports, rc;
2677 
2678 	if (!printed_version++)
2679 		dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2680 
2681 	/* allocate host */
2682 	n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
2683 
2684 	host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2685 	hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2686 	if (!host || !hpriv)
2687 		return -ENOMEM;
2688 	host->private_data = hpriv;
2689 
2690 	/* acquire resources */
2691 	rc = pcim_enable_device(pdev);
2692 	if (rc)
2693 		return rc;
2694 
2695 	rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
2696 	if (rc == -EBUSY)
2697 		pcim_pin_device(pdev);
2698 	if (rc)
2699 		return rc;
2700 	host->iomap = pcim_iomap_table(pdev);
2701 
2702 	rc = pci_go_64(pdev);
2703 	if (rc)
2704 		return rc;
2705 
2706 	/* initialize adapter */
2707 	rc = mv_init_host(host, board_idx);
2708 	if (rc)
2709 		return rc;
2710 
2711 	/* Enable interrupts */
2712 	if (msi && pci_enable_msi(pdev))
2713 		pci_intx(pdev, 1);
2714 
2715 	mv_dump_pci_cfg(pdev, 0x68);
2716 	mv_print_info(host);
2717 
2718 	pci_set_master(pdev);
2719 	pci_try_set_mwi(pdev);
2720 	return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
2721 				 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
2722 }
2723 
2724 static int __init mv_init(void)
2725 {
2726 	return pci_register_driver(&mv_pci_driver);
2727 }
2728 
2729 static void __exit mv_exit(void)
2730 {
2731 	pci_unregister_driver(&mv_pci_driver);
2732 }
2733 
2734 MODULE_AUTHOR("Brett Russ");
2735 MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
2736 MODULE_LICENSE("GPL");
2737 MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
2738 MODULE_VERSION(DRV_VERSION);
2739 
2740 module_param(msi, int, 0444);
2741 MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
2742 
2743 module_init(mv_init);
2744 module_exit(mv_exit);
2745