xref: /linux/drivers/ata/sata_mv.c (revision 2b8232ce512105e28453f301d1510de8363bccd1)
1 /*
2  * sata_mv.c - Marvell SATA support
3  *
4  * Copyright 2005: EMC Corporation, all rights reserved.
5  * Copyright 2005 Red Hat, Inc.  All rights reserved.
6  *
7  * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; version 2 of the License.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21  *
22  */
23 
24 /*
25   sata_mv TODO list:
26 
27   1) Needs a full errata audit for all chipsets.  I implemented most
28   of the errata workarounds found in the Marvell vendor driver, but
29   I distinctly remember a couple workarounds (one related to PCI-X)
30   are still needed.
31 
32   4) Add NCQ support (easy to intermediate, once new-EH support appears)
33 
34   5) Investigate problems with PCI Message Signalled Interrupts (MSI).
35 
36   6) Add port multiplier support (intermediate)
37 
38   8) Develop a low-power-consumption strategy, and implement it.
39 
40   9) [Experiment, low priority] See if ATAPI can be supported using
41   "unknown FIS" or "vendor-specific FIS" support, or something creative
42   like that.
43 
44   10) [Experiment, low priority] Investigate interrupt coalescing.
45   Quite often, especially with PCI Message Signalled Interrupts (MSI),
46   the overhead reduced by interrupt mitigation is quite often not
47   worth the latency cost.
48 
49   11) [Experiment, Marvell value added] Is it possible to use target
50   mode to cross-connect two Linux boxes with Marvell cards?  If so,
51   creating LibATA target mode support would be very interesting.
52 
53   Target mode, for those without docs, is the ability to directly
54   connect two SATA controllers.
55 
56   13) Verify that 7042 is fully supported.  I only have a 6042.
57 
58 */
59 
60 
61 #include <linux/kernel.h>
62 #include <linux/module.h>
63 #include <linux/pci.h>
64 #include <linux/init.h>
65 #include <linux/blkdev.h>
66 #include <linux/delay.h>
67 #include <linux/interrupt.h>
68 #include <linux/dma-mapping.h>
69 #include <linux/device.h>
70 #include <scsi/scsi_host.h>
71 #include <scsi/scsi_cmnd.h>
72 #include <scsi/scsi_device.h>
73 #include <linux/libata.h>
74 
75 #define DRV_NAME	"sata_mv"
76 #define DRV_VERSION	"1.01"
77 
78 enum {
79 	/* BAR's are enumerated in terms of pci_resource_start() terms */
80 	MV_PRIMARY_BAR		= 0,	/* offset 0x10: memory space */
81 	MV_IO_BAR		= 2,	/* offset 0x18: IO space */
82 	MV_MISC_BAR		= 3,	/* offset 0x1c: FLASH, NVRAM, SRAM */
83 
84 	MV_MAJOR_REG_AREA_SZ	= 0x10000,	/* 64KB */
85 	MV_MINOR_REG_AREA_SZ	= 0x2000,	/* 8KB */
86 
87 	MV_PCI_REG_BASE		= 0,
88 	MV_IRQ_COAL_REG_BASE	= 0x18000,	/* 6xxx part only */
89 	MV_IRQ_COAL_CAUSE		= (MV_IRQ_COAL_REG_BASE + 0x08),
90 	MV_IRQ_COAL_CAUSE_LO		= (MV_IRQ_COAL_REG_BASE + 0x88),
91 	MV_IRQ_COAL_CAUSE_HI		= (MV_IRQ_COAL_REG_BASE + 0x8c),
92 	MV_IRQ_COAL_THRESHOLD		= (MV_IRQ_COAL_REG_BASE + 0xcc),
93 	MV_IRQ_COAL_TIME_THRESHOLD	= (MV_IRQ_COAL_REG_BASE + 0xd0),
94 
95 	MV_SATAHC0_REG_BASE	= 0x20000,
96 	MV_FLASH_CTL		= 0x1046c,
97 	MV_GPIO_PORT_CTL	= 0x104f0,
98 	MV_RESET_CFG		= 0x180d8,
99 
100 	MV_PCI_REG_SZ		= MV_MAJOR_REG_AREA_SZ,
101 	MV_SATAHC_REG_SZ	= MV_MAJOR_REG_AREA_SZ,
102 	MV_SATAHC_ARBTR_REG_SZ	= MV_MINOR_REG_AREA_SZ,		/* arbiter */
103 	MV_PORT_REG_SZ		= MV_MINOR_REG_AREA_SZ,
104 
105 	MV_MAX_Q_DEPTH		= 32,
106 	MV_MAX_Q_DEPTH_MASK	= MV_MAX_Q_DEPTH - 1,
107 
108 	/* CRQB needs alignment on a 1KB boundary. Size == 1KB
109 	 * CRPB needs alignment on a 256B boundary. Size == 256B
110 	 * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB
111 	 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
112 	 */
113 	MV_CRQB_Q_SZ		= (32 * MV_MAX_Q_DEPTH),
114 	MV_CRPB_Q_SZ		= (8 * MV_MAX_Q_DEPTH),
115 	MV_MAX_SG_CT		= 176,
116 	MV_SG_TBL_SZ		= (16 * MV_MAX_SG_CT),
117 	MV_PORT_PRIV_DMA_SZ	= (MV_CRQB_Q_SZ + MV_CRPB_Q_SZ + MV_SG_TBL_SZ),
118 
119 	MV_PORTS_PER_HC		= 4,
120 	/* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
121 	MV_PORT_HC_SHIFT	= 2,
122 	/* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
123 	MV_PORT_MASK		= 3,
124 
125 	/* Host Flags */
126 	MV_FLAG_DUAL_HC		= (1 << 30),  /* two SATA Host Controllers */
127 	MV_FLAG_IRQ_COALESCE	= (1 << 29),  /* IRQ coalescing capability */
128 	MV_COMMON_FLAGS		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
129 				  ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
130 				  ATA_FLAG_PIO_POLLING,
131 	MV_6XXX_FLAGS		= MV_FLAG_IRQ_COALESCE,
132 
133 	CRQB_FLAG_READ		= (1 << 0),
134 	CRQB_TAG_SHIFT		= 1,
135 	CRQB_IOID_SHIFT		= 6,	/* CRQB Gen-II/IIE IO Id shift */
136 	CRQB_HOSTQ_SHIFT	= 17,	/* CRQB Gen-II/IIE HostQueTag shift */
137 	CRQB_CMD_ADDR_SHIFT	= 8,
138 	CRQB_CMD_CS		= (0x2 << 11),
139 	CRQB_CMD_LAST		= (1 << 15),
140 
141 	CRPB_FLAG_STATUS_SHIFT	= 8,
142 	CRPB_IOID_SHIFT_6	= 5,	/* CRPB Gen-II IO Id shift */
143 	CRPB_IOID_SHIFT_7	= 7,	/* CRPB Gen-IIE IO Id shift */
144 
145 	EPRD_FLAG_END_OF_TBL	= (1 << 31),
146 
147 	/* PCI interface registers */
148 
149 	PCI_COMMAND_OFS		= 0xc00,
150 
151 	PCI_MAIN_CMD_STS_OFS	= 0xd30,
152 	STOP_PCI_MASTER		= (1 << 2),
153 	PCI_MASTER_EMPTY	= (1 << 3),
154 	GLOB_SFT_RST		= (1 << 4),
155 
156 	MV_PCI_MODE		= 0xd00,
157 	MV_PCI_EXP_ROM_BAR_CTL	= 0xd2c,
158 	MV_PCI_DISC_TIMER	= 0xd04,
159 	MV_PCI_MSI_TRIGGER	= 0xc38,
160 	MV_PCI_SERR_MASK	= 0xc28,
161 	MV_PCI_XBAR_TMOUT	= 0x1d04,
162 	MV_PCI_ERR_LOW_ADDRESS	= 0x1d40,
163 	MV_PCI_ERR_HIGH_ADDRESS	= 0x1d44,
164 	MV_PCI_ERR_ATTRIBUTE	= 0x1d48,
165 	MV_PCI_ERR_COMMAND	= 0x1d50,
166 
167 	PCI_IRQ_CAUSE_OFS		= 0x1d58,
168 	PCI_IRQ_MASK_OFS		= 0x1d5c,
169 	PCI_UNMASK_ALL_IRQS	= 0x7fffff,	/* bits 22-0 */
170 
171 	HC_MAIN_IRQ_CAUSE_OFS	= 0x1d60,
172 	HC_MAIN_IRQ_MASK_OFS	= 0x1d64,
173 	PORT0_ERR		= (1 << 0),	/* shift by port # */
174 	PORT0_DONE		= (1 << 1),	/* shift by port # */
175 	HC0_IRQ_PEND		= 0x1ff,	/* bits 0-8 = HC0's ports */
176 	HC_SHIFT		= 9,		/* bits 9-17 = HC1's ports */
177 	PCI_ERR			= (1 << 18),
178 	TRAN_LO_DONE		= (1 << 19),	/* 6xxx: IRQ coalescing */
179 	TRAN_HI_DONE		= (1 << 20),	/* 6xxx: IRQ coalescing */
180 	PORTS_0_3_COAL_DONE	= (1 << 8),
181 	PORTS_4_7_COAL_DONE	= (1 << 17),
182 	PORTS_0_7_COAL_DONE	= (1 << 21),	/* 6xxx: IRQ coalescing */
183 	GPIO_INT		= (1 << 22),
184 	SELF_INT		= (1 << 23),
185 	TWSI_INT		= (1 << 24),
186 	HC_MAIN_RSVD		= (0x7f << 25),	/* bits 31-25 */
187 	HC_MAIN_RSVD_5		= (0x1fff << 19), /* bits 31-19 */
188 	HC_MAIN_MASKED_IRQS	= (TRAN_LO_DONE | TRAN_HI_DONE |
189 				   PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
190 				   HC_MAIN_RSVD),
191 	HC_MAIN_MASKED_IRQS_5	= (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
192 				   HC_MAIN_RSVD_5),
193 
194 	/* SATAHC registers */
195 	HC_CFG_OFS		= 0,
196 
197 	HC_IRQ_CAUSE_OFS	= 0x14,
198 	CRPB_DMA_DONE		= (1 << 0),	/* shift by port # */
199 	HC_IRQ_COAL		= (1 << 4),	/* IRQ coalescing */
200 	DEV_IRQ			= (1 << 8),	/* shift by port # */
201 
202 	/* Shadow block registers */
203 	SHD_BLK_OFS		= 0x100,
204 	SHD_CTL_AST_OFS		= 0x20,		/* ofs from SHD_BLK_OFS */
205 
206 	/* SATA registers */
207 	SATA_STATUS_OFS		= 0x300,  /* ctrl, err regs follow status */
208 	SATA_ACTIVE_OFS		= 0x350,
209 	PHY_MODE3		= 0x310,
210 	PHY_MODE4		= 0x314,
211 	PHY_MODE2		= 0x330,
212 	MV5_PHY_MODE		= 0x74,
213 	MV5_LT_MODE		= 0x30,
214 	MV5_PHY_CTL		= 0x0C,
215 	SATA_INTERFACE_CTL	= 0x050,
216 
217 	MV_M2_PREAMP_MASK	= 0x7e0,
218 
219 	/* Port registers */
220 	EDMA_CFG_OFS		= 0,
221 	EDMA_CFG_Q_DEPTH	= 0,			/* queueing disabled */
222 	EDMA_CFG_NCQ		= (1 << 5),
223 	EDMA_CFG_NCQ_GO_ON_ERR	= (1 << 14),		/* continue on error */
224 	EDMA_CFG_RD_BRST_EXT	= (1 << 11),		/* read burst 512B */
225 	EDMA_CFG_WR_BUFF_LEN	= (1 << 13),		/* write buffer 512B */
226 
227 	EDMA_ERR_IRQ_CAUSE_OFS	= 0x8,
228 	EDMA_ERR_IRQ_MASK_OFS	= 0xc,
229 	EDMA_ERR_D_PAR		= (1 << 0),	/* UDMA data parity err */
230 	EDMA_ERR_PRD_PAR	= (1 << 1),	/* UDMA PRD parity err */
231 	EDMA_ERR_DEV		= (1 << 2),	/* device error */
232 	EDMA_ERR_DEV_DCON	= (1 << 3),	/* device disconnect */
233 	EDMA_ERR_DEV_CON	= (1 << 4),	/* device connected */
234 	EDMA_ERR_SERR		= (1 << 5),	/* SError bits [WBDST] raised */
235 	EDMA_ERR_SELF_DIS	= (1 << 7),	/* Gen II/IIE self-disable */
236 	EDMA_ERR_SELF_DIS_5	= (1 << 8),	/* Gen I self-disable */
237 	EDMA_ERR_BIST_ASYNC	= (1 << 8),	/* BIST FIS or Async Notify */
238 	EDMA_ERR_TRANS_IRQ_7	= (1 << 8),	/* Gen IIE transprt layer irq */
239 	EDMA_ERR_CRQB_PAR	= (1 << 9),	/* CRQB parity error */
240 	EDMA_ERR_CRPB_PAR	= (1 << 10),	/* CRPB parity error */
241 	EDMA_ERR_INTRL_PAR	= (1 << 11),	/* internal parity error */
242 	EDMA_ERR_IORDY		= (1 << 12),	/* IORdy timeout */
243 	EDMA_ERR_LNK_CTRL_RX	= (0xf << 13),	/* link ctrl rx error */
244 	EDMA_ERR_LNK_CTRL_RX_2	= (1 << 15),
245 	EDMA_ERR_LNK_DATA_RX	= (0xf << 17),	/* link data rx error */
246 	EDMA_ERR_LNK_CTRL_TX	= (0x1f << 21),	/* link ctrl tx error */
247 	EDMA_ERR_LNK_DATA_TX	= (0x1f << 26),	/* link data tx error */
248 	EDMA_ERR_TRANS_PROTO	= (1 << 31),	/* transport protocol error */
249 	EDMA_ERR_OVERRUN_5	= (1 << 5),
250 	EDMA_ERR_UNDERRUN_5	= (1 << 6),
251 	EDMA_EH_FREEZE		= EDMA_ERR_D_PAR |
252 				  EDMA_ERR_PRD_PAR |
253 				  EDMA_ERR_DEV_DCON |
254 				  EDMA_ERR_DEV_CON |
255 				  EDMA_ERR_SERR |
256 				  EDMA_ERR_SELF_DIS |
257 				  EDMA_ERR_CRQB_PAR |
258 				  EDMA_ERR_CRPB_PAR |
259 				  EDMA_ERR_INTRL_PAR |
260 				  EDMA_ERR_IORDY |
261 				  EDMA_ERR_LNK_CTRL_RX_2 |
262 				  EDMA_ERR_LNK_DATA_RX |
263 				  EDMA_ERR_LNK_DATA_TX |
264 				  EDMA_ERR_TRANS_PROTO,
265 	EDMA_EH_FREEZE_5	= EDMA_ERR_D_PAR |
266 				  EDMA_ERR_PRD_PAR |
267 				  EDMA_ERR_DEV_DCON |
268 				  EDMA_ERR_DEV_CON |
269 				  EDMA_ERR_OVERRUN_5 |
270 				  EDMA_ERR_UNDERRUN_5 |
271 				  EDMA_ERR_SELF_DIS_5 |
272 				  EDMA_ERR_CRQB_PAR |
273 				  EDMA_ERR_CRPB_PAR |
274 				  EDMA_ERR_INTRL_PAR |
275 				  EDMA_ERR_IORDY,
276 
277 	EDMA_REQ_Q_BASE_HI_OFS	= 0x10,
278 	EDMA_REQ_Q_IN_PTR_OFS	= 0x14,		/* also contains BASE_LO */
279 
280 	EDMA_REQ_Q_OUT_PTR_OFS	= 0x18,
281 	EDMA_REQ_Q_PTR_SHIFT	= 5,
282 
283 	EDMA_RSP_Q_BASE_HI_OFS	= 0x1c,
284 	EDMA_RSP_Q_IN_PTR_OFS	= 0x20,
285 	EDMA_RSP_Q_OUT_PTR_OFS	= 0x24,		/* also contains BASE_LO */
286 	EDMA_RSP_Q_PTR_SHIFT	= 3,
287 
288 	EDMA_CMD_OFS		= 0x28,		/* EDMA command register */
289 	EDMA_EN			= (1 << 0),	/* enable EDMA */
290 	EDMA_DS			= (1 << 1),	/* disable EDMA; self-negated */
291 	ATA_RST			= (1 << 2),	/* reset trans/link/phy */
292 
293 	EDMA_IORDY_TMOUT	= 0x34,
294 	EDMA_ARB_CFG		= 0x38,
295 
296 	/* Host private flags (hp_flags) */
297 	MV_HP_FLAG_MSI		= (1 << 0),
298 	MV_HP_ERRATA_50XXB0	= (1 << 1),
299 	MV_HP_ERRATA_50XXB2	= (1 << 2),
300 	MV_HP_ERRATA_60X1B2	= (1 << 3),
301 	MV_HP_ERRATA_60X1C0	= (1 << 4),
302 	MV_HP_ERRATA_XX42A0	= (1 << 5),
303 	MV_HP_GEN_I		= (1 << 6),	/* Generation I: 50xx */
304 	MV_HP_GEN_II		= (1 << 7),	/* Generation II: 60xx */
305 	MV_HP_GEN_IIE		= (1 << 8),	/* Generation IIE: 6042/7042 */
306 
307 	/* Port private flags (pp_flags) */
308 	MV_PP_FLAG_EDMA_EN	= (1 << 0),	/* is EDMA engine enabled? */
309 	MV_PP_FLAG_HAD_A_RESET	= (1 << 2),	/* 1st hard reset complete? */
310 };
311 
312 #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
313 #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
314 #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
315 
316 enum {
317 	/* DMA boundary 0xffff is required by the s/g splitting
318 	 * we need on /length/ in mv_fill-sg().
319 	 */
320 	MV_DMA_BOUNDARY		= 0xffffU,
321 
322 	/* mask of register bits containing lower 32 bits
323 	 * of EDMA request queue DMA address
324 	 */
325 	EDMA_REQ_Q_BASE_LO_MASK	= 0xfffffc00U,
326 
327 	/* ditto, for response queue */
328 	EDMA_RSP_Q_BASE_LO_MASK	= 0xffffff00U,
329 };
330 
331 enum chip_type {
332 	chip_504x,
333 	chip_508x,
334 	chip_5080,
335 	chip_604x,
336 	chip_608x,
337 	chip_6042,
338 	chip_7042,
339 };
340 
341 /* Command ReQuest Block: 32B */
342 struct mv_crqb {
343 	__le32			sg_addr;
344 	__le32			sg_addr_hi;
345 	__le16			ctrl_flags;
346 	__le16			ata_cmd[11];
347 };
348 
349 struct mv_crqb_iie {
350 	__le32			addr;
351 	__le32			addr_hi;
352 	__le32			flags;
353 	__le32			len;
354 	__le32			ata_cmd[4];
355 };
356 
357 /* Command ResPonse Block: 8B */
358 struct mv_crpb {
359 	__le16			id;
360 	__le16			flags;
361 	__le32			tmstmp;
362 };
363 
364 /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
365 struct mv_sg {
366 	__le32			addr;
367 	__le32			flags_size;
368 	__le32			addr_hi;
369 	__le32			reserved;
370 };
371 
372 struct mv_port_priv {
373 	struct mv_crqb		*crqb;
374 	dma_addr_t		crqb_dma;
375 	struct mv_crpb		*crpb;
376 	dma_addr_t		crpb_dma;
377 	struct mv_sg		*sg_tbl;
378 	dma_addr_t		sg_tbl_dma;
379 
380 	unsigned int		req_idx;
381 	unsigned int		resp_idx;
382 
383 	u32			pp_flags;
384 };
385 
386 struct mv_port_signal {
387 	u32			amps;
388 	u32			pre;
389 };
390 
391 struct mv_host_priv;
392 struct mv_hw_ops {
393 	void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
394 			   unsigned int port);
395 	void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
396 	void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
397 			   void __iomem *mmio);
398 	int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
399 			unsigned int n_hc);
400 	void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
401 	void (*reset_bus)(struct pci_dev *pdev, void __iomem *mmio);
402 };
403 
404 struct mv_host_priv {
405 	u32			hp_flags;
406 	struct mv_port_signal	signal[8];
407 	const struct mv_hw_ops	*ops;
408 };
409 
410 static void mv_irq_clear(struct ata_port *ap);
411 static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
412 static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
413 static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
414 static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
415 static int mv_port_start(struct ata_port *ap);
416 static void mv_port_stop(struct ata_port *ap);
417 static void mv_qc_prep(struct ata_queued_cmd *qc);
418 static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
419 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
420 static void mv_error_handler(struct ata_port *ap);
421 static void mv_post_int_cmd(struct ata_queued_cmd *qc);
422 static void mv_eh_freeze(struct ata_port *ap);
423 static void mv_eh_thaw(struct ata_port *ap);
424 static int mv_slave_config(struct scsi_device *sdev);
425 static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
426 
427 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
428 			   unsigned int port);
429 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
430 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
431 			   void __iomem *mmio);
432 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
433 			unsigned int n_hc);
434 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
435 static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio);
436 
437 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
438 			   unsigned int port);
439 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
440 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
441 			   void __iomem *mmio);
442 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
443 			unsigned int n_hc);
444 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
445 static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio);
446 static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
447 			     unsigned int port_no);
448 
449 static struct scsi_host_template mv5_sht = {
450 	.module			= THIS_MODULE,
451 	.name			= DRV_NAME,
452 	.ioctl			= ata_scsi_ioctl,
453 	.queuecommand		= ata_scsi_queuecmd,
454 	.can_queue		= ATA_DEF_QUEUE,
455 	.this_id		= ATA_SHT_THIS_ID,
456 	.sg_tablesize		= MV_MAX_SG_CT / 2,
457 	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
458 	.emulated		= ATA_SHT_EMULATED,
459 	.use_clustering		= 1,
460 	.proc_name		= DRV_NAME,
461 	.dma_boundary		= MV_DMA_BOUNDARY,
462 	.slave_configure	= mv_slave_config,
463 	.slave_destroy		= ata_scsi_slave_destroy,
464 	.bios_param		= ata_std_bios_param,
465 };
466 
467 static struct scsi_host_template mv6_sht = {
468 	.module			= THIS_MODULE,
469 	.name			= DRV_NAME,
470 	.ioctl			= ata_scsi_ioctl,
471 	.queuecommand		= ata_scsi_queuecmd,
472 	.can_queue		= ATA_DEF_QUEUE,
473 	.this_id		= ATA_SHT_THIS_ID,
474 	.sg_tablesize		= MV_MAX_SG_CT / 2,
475 	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
476 	.emulated		= ATA_SHT_EMULATED,
477 	.use_clustering		= 1,
478 	.proc_name		= DRV_NAME,
479 	.dma_boundary		= MV_DMA_BOUNDARY,
480 	.slave_configure	= mv_slave_config,
481 	.slave_destroy		= ata_scsi_slave_destroy,
482 	.bios_param		= ata_std_bios_param,
483 };
484 
485 static const struct ata_port_operations mv5_ops = {
486 	.tf_load		= ata_tf_load,
487 	.tf_read		= ata_tf_read,
488 	.check_status		= ata_check_status,
489 	.exec_command		= ata_exec_command,
490 	.dev_select		= ata_std_dev_select,
491 
492 	.cable_detect		= ata_cable_sata,
493 
494 	.qc_prep		= mv_qc_prep,
495 	.qc_issue		= mv_qc_issue,
496 	.data_xfer		= ata_data_xfer,
497 
498 	.irq_clear		= mv_irq_clear,
499 	.irq_on			= ata_irq_on,
500 
501 	.error_handler		= mv_error_handler,
502 	.post_internal_cmd	= mv_post_int_cmd,
503 	.freeze			= mv_eh_freeze,
504 	.thaw			= mv_eh_thaw,
505 
506 	.scr_read		= mv5_scr_read,
507 	.scr_write		= mv5_scr_write,
508 
509 	.port_start		= mv_port_start,
510 	.port_stop		= mv_port_stop,
511 };
512 
513 static const struct ata_port_operations mv6_ops = {
514 	.tf_load		= ata_tf_load,
515 	.tf_read		= ata_tf_read,
516 	.check_status		= ata_check_status,
517 	.exec_command		= ata_exec_command,
518 	.dev_select		= ata_std_dev_select,
519 
520 	.cable_detect		= ata_cable_sata,
521 
522 	.qc_prep		= mv_qc_prep,
523 	.qc_issue		= mv_qc_issue,
524 	.data_xfer		= ata_data_xfer,
525 
526 	.irq_clear		= mv_irq_clear,
527 	.irq_on			= ata_irq_on,
528 
529 	.error_handler		= mv_error_handler,
530 	.post_internal_cmd	= mv_post_int_cmd,
531 	.freeze			= mv_eh_freeze,
532 	.thaw			= mv_eh_thaw,
533 
534 	.scr_read		= mv_scr_read,
535 	.scr_write		= mv_scr_write,
536 
537 	.port_start		= mv_port_start,
538 	.port_stop		= mv_port_stop,
539 };
540 
541 static const struct ata_port_operations mv_iie_ops = {
542 	.tf_load		= ata_tf_load,
543 	.tf_read		= ata_tf_read,
544 	.check_status		= ata_check_status,
545 	.exec_command		= ata_exec_command,
546 	.dev_select		= ata_std_dev_select,
547 
548 	.cable_detect		= ata_cable_sata,
549 
550 	.qc_prep		= mv_qc_prep_iie,
551 	.qc_issue		= mv_qc_issue,
552 	.data_xfer		= ata_data_xfer,
553 
554 	.irq_clear		= mv_irq_clear,
555 	.irq_on			= ata_irq_on,
556 
557 	.error_handler		= mv_error_handler,
558 	.post_internal_cmd	= mv_post_int_cmd,
559 	.freeze			= mv_eh_freeze,
560 	.thaw			= mv_eh_thaw,
561 
562 	.scr_read		= mv_scr_read,
563 	.scr_write		= mv_scr_write,
564 
565 	.port_start		= mv_port_start,
566 	.port_stop		= mv_port_stop,
567 };
568 
569 static const struct ata_port_info mv_port_info[] = {
570 	{  /* chip_504x */
571 		.flags		= MV_COMMON_FLAGS,
572 		.pio_mask	= 0x1f,	/* pio0-4 */
573 		.udma_mask	= ATA_UDMA6,
574 		.port_ops	= &mv5_ops,
575 	},
576 	{  /* chip_508x */
577 		.flags		= MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
578 		.pio_mask	= 0x1f,	/* pio0-4 */
579 		.udma_mask	= ATA_UDMA6,
580 		.port_ops	= &mv5_ops,
581 	},
582 	{  /* chip_5080 */
583 		.flags		= MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
584 		.pio_mask	= 0x1f,	/* pio0-4 */
585 		.udma_mask	= ATA_UDMA6,
586 		.port_ops	= &mv5_ops,
587 	},
588 	{  /* chip_604x */
589 		.flags		= MV_COMMON_FLAGS | MV_6XXX_FLAGS,
590 		.pio_mask	= 0x1f,	/* pio0-4 */
591 		.udma_mask	= ATA_UDMA6,
592 		.port_ops	= &mv6_ops,
593 	},
594 	{  /* chip_608x */
595 		.flags		= MV_COMMON_FLAGS | MV_6XXX_FLAGS |
596 				  MV_FLAG_DUAL_HC,
597 		.pio_mask	= 0x1f,	/* pio0-4 */
598 		.udma_mask	= ATA_UDMA6,
599 		.port_ops	= &mv6_ops,
600 	},
601 	{  /* chip_6042 */
602 		.flags		= MV_COMMON_FLAGS | MV_6XXX_FLAGS,
603 		.pio_mask	= 0x1f,	/* pio0-4 */
604 		.udma_mask	= ATA_UDMA6,
605 		.port_ops	= &mv_iie_ops,
606 	},
607 	{  /* chip_7042 */
608 		.flags		= MV_COMMON_FLAGS | MV_6XXX_FLAGS,
609 		.pio_mask	= 0x1f,	/* pio0-4 */
610 		.udma_mask	= ATA_UDMA6,
611 		.port_ops	= &mv_iie_ops,
612 	},
613 };
614 
615 static const struct pci_device_id mv_pci_tbl[] = {
616 	{ PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
617 	{ PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
618 	{ PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
619 	{ PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
620 	/* RocketRAID 1740/174x have different identifiers */
621 	{ PCI_VDEVICE(TTI, 0x1740), chip_508x },
622 	{ PCI_VDEVICE(TTI, 0x1742), chip_508x },
623 
624 	{ PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
625 	{ PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
626 	{ PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
627 	{ PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
628 	{ PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
629 
630 	{ PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
631 
632 	/* Adaptec 1430SA */
633 	{ PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
634 
635 	{ PCI_VDEVICE(TTI, 0x2310), chip_7042 },
636 
637 	/* add Marvell 7042 support */
638 	{ PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
639 
640 	{ }			/* terminate list */
641 };
642 
643 static struct pci_driver mv_pci_driver = {
644 	.name			= DRV_NAME,
645 	.id_table		= mv_pci_tbl,
646 	.probe			= mv_init_one,
647 	.remove			= ata_pci_remove_one,
648 };
649 
650 static const struct mv_hw_ops mv5xxx_ops = {
651 	.phy_errata		= mv5_phy_errata,
652 	.enable_leds		= mv5_enable_leds,
653 	.read_preamp		= mv5_read_preamp,
654 	.reset_hc		= mv5_reset_hc,
655 	.reset_flash		= mv5_reset_flash,
656 	.reset_bus		= mv5_reset_bus,
657 };
658 
659 static const struct mv_hw_ops mv6xxx_ops = {
660 	.phy_errata		= mv6_phy_errata,
661 	.enable_leds		= mv6_enable_leds,
662 	.read_preamp		= mv6_read_preamp,
663 	.reset_hc		= mv6_reset_hc,
664 	.reset_flash		= mv6_reset_flash,
665 	.reset_bus		= mv_reset_pci_bus,
666 };
667 
668 /*
669  * module options
670  */
671 static int msi;	      /* Use PCI msi; either zero (off, default) or non-zero */
672 
673 
674 /* move to PCI layer or libata core? */
675 static int pci_go_64(struct pci_dev *pdev)
676 {
677 	int rc;
678 
679 	if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
680 		rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
681 		if (rc) {
682 			rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
683 			if (rc) {
684 				dev_printk(KERN_ERR, &pdev->dev,
685 					   "64-bit DMA enable failed\n");
686 				return rc;
687 			}
688 		}
689 	} else {
690 		rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
691 		if (rc) {
692 			dev_printk(KERN_ERR, &pdev->dev,
693 				   "32-bit DMA enable failed\n");
694 			return rc;
695 		}
696 		rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
697 		if (rc) {
698 			dev_printk(KERN_ERR, &pdev->dev,
699 				   "32-bit consistent DMA enable failed\n");
700 			return rc;
701 		}
702 	}
703 
704 	return rc;
705 }
706 
707 /*
708  * Functions
709  */
710 
711 static inline void writelfl(unsigned long data, void __iomem *addr)
712 {
713 	writel(data, addr);
714 	(void) readl(addr);	/* flush to avoid PCI posted write */
715 }
716 
717 static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
718 {
719 	return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
720 }
721 
722 static inline unsigned int mv_hc_from_port(unsigned int port)
723 {
724 	return port >> MV_PORT_HC_SHIFT;
725 }
726 
727 static inline unsigned int mv_hardport_from_port(unsigned int port)
728 {
729 	return port & MV_PORT_MASK;
730 }
731 
732 static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
733 						 unsigned int port)
734 {
735 	return mv_hc_base(base, mv_hc_from_port(port));
736 }
737 
738 static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
739 {
740 	return  mv_hc_base_from_port(base, port) +
741 		MV_SATAHC_ARBTR_REG_SZ +
742 		(mv_hardport_from_port(port) * MV_PORT_REG_SZ);
743 }
744 
745 static inline void __iomem *mv_ap_base(struct ata_port *ap)
746 {
747 	return mv_port_base(ap->host->iomap[MV_PRIMARY_BAR], ap->port_no);
748 }
749 
750 static inline int mv_get_hc_count(unsigned long port_flags)
751 {
752 	return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
753 }
754 
755 static void mv_irq_clear(struct ata_port *ap)
756 {
757 }
758 
759 static int mv_slave_config(struct scsi_device *sdev)
760 {
761 	int rc = ata_scsi_slave_config(sdev);
762 	if (rc)
763 		return rc;
764 
765 	blk_queue_max_phys_segments(sdev->request_queue, MV_MAX_SG_CT / 2);
766 
767 	return 0;	/* scsi layer doesn't check return value, sigh */
768 }
769 
770 static void mv_set_edma_ptrs(void __iomem *port_mmio,
771 			     struct mv_host_priv *hpriv,
772 			     struct mv_port_priv *pp)
773 {
774 	u32 index;
775 
776 	/*
777 	 * initialize request queue
778 	 */
779 	index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
780 
781 	WARN_ON(pp->crqb_dma & 0x3ff);
782 	writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
783 	writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
784 		 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
785 
786 	if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
787 		writelfl((pp->crqb_dma & 0xffffffff) | index,
788 			 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
789 	else
790 		writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
791 
792 	/*
793 	 * initialize response queue
794 	 */
795 	index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
796 
797 	WARN_ON(pp->crpb_dma & 0xff);
798 	writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
799 
800 	if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
801 		writelfl((pp->crpb_dma & 0xffffffff) | index,
802 			 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
803 	else
804 		writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
805 
806 	writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
807 		 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
808 }
809 
810 /**
811  *      mv_start_dma - Enable eDMA engine
812  *      @base: port base address
813  *      @pp: port private data
814  *
815  *      Verify the local cache of the eDMA state is accurate with a
816  *      WARN_ON.
817  *
818  *      LOCKING:
819  *      Inherited from caller.
820  */
821 static void mv_start_dma(void __iomem *base, struct mv_host_priv *hpriv,
822 			 struct mv_port_priv *pp)
823 {
824 	if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
825 		/* clear EDMA event indicators, if any */
826 		writelfl(0, base + EDMA_ERR_IRQ_CAUSE_OFS);
827 
828 		mv_set_edma_ptrs(base, hpriv, pp);
829 
830 		writelfl(EDMA_EN, base + EDMA_CMD_OFS);
831 		pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
832 	}
833 	WARN_ON(!(EDMA_EN & readl(base + EDMA_CMD_OFS)));
834 }
835 
836 /**
837  *      __mv_stop_dma - Disable eDMA engine
838  *      @ap: ATA channel to manipulate
839  *
840  *      Verify the local cache of the eDMA state is accurate with a
841  *      WARN_ON.
842  *
843  *      LOCKING:
844  *      Inherited from caller.
845  */
846 static int __mv_stop_dma(struct ata_port *ap)
847 {
848 	void __iomem *port_mmio = mv_ap_base(ap);
849 	struct mv_port_priv *pp	= ap->private_data;
850 	u32 reg;
851 	int i, err = 0;
852 
853 	if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
854 		/* Disable EDMA if active.   The disable bit auto clears.
855 		 */
856 		writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
857 		pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
858 	} else {
859 		WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
860   	}
861 
862 	/* now properly wait for the eDMA to stop */
863 	for (i = 1000; i > 0; i--) {
864 		reg = readl(port_mmio + EDMA_CMD_OFS);
865 		if (!(reg & EDMA_EN))
866 			break;
867 
868 		udelay(100);
869 	}
870 
871 	if (reg & EDMA_EN) {
872 		ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
873 		err = -EIO;
874 	}
875 
876 	return err;
877 }
878 
879 static int mv_stop_dma(struct ata_port *ap)
880 {
881 	unsigned long flags;
882 	int rc;
883 
884 	spin_lock_irqsave(&ap->host->lock, flags);
885 	rc = __mv_stop_dma(ap);
886 	spin_unlock_irqrestore(&ap->host->lock, flags);
887 
888 	return rc;
889 }
890 
891 #ifdef ATA_DEBUG
892 static void mv_dump_mem(void __iomem *start, unsigned bytes)
893 {
894 	int b, w;
895 	for (b = 0; b < bytes; ) {
896 		DPRINTK("%p: ", start + b);
897 		for (w = 0; b < bytes && w < 4; w++) {
898 			printk("%08x ",readl(start + b));
899 			b += sizeof(u32);
900 		}
901 		printk("\n");
902 	}
903 }
904 #endif
905 
906 static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
907 {
908 #ifdef ATA_DEBUG
909 	int b, w;
910 	u32 dw;
911 	for (b = 0; b < bytes; ) {
912 		DPRINTK("%02x: ", b);
913 		for (w = 0; b < bytes && w < 4; w++) {
914 			(void) pci_read_config_dword(pdev,b,&dw);
915 			printk("%08x ",dw);
916 			b += sizeof(u32);
917 		}
918 		printk("\n");
919 	}
920 #endif
921 }
922 static void mv_dump_all_regs(void __iomem *mmio_base, int port,
923 			     struct pci_dev *pdev)
924 {
925 #ifdef ATA_DEBUG
926 	void __iomem *hc_base = mv_hc_base(mmio_base,
927 					   port >> MV_PORT_HC_SHIFT);
928 	void __iomem *port_base;
929 	int start_port, num_ports, p, start_hc, num_hcs, hc;
930 
931 	if (0 > port) {
932 		start_hc = start_port = 0;
933 		num_ports = 8;		/* shld be benign for 4 port devs */
934 		num_hcs = 2;
935 	} else {
936 		start_hc = port >> MV_PORT_HC_SHIFT;
937 		start_port = port;
938 		num_ports = num_hcs = 1;
939 	}
940 	DPRINTK("All registers for port(s) %u-%u:\n", start_port,
941 		num_ports > 1 ? num_ports - 1 : start_port);
942 
943 	if (NULL != pdev) {
944 		DPRINTK("PCI config space regs:\n");
945 		mv_dump_pci_cfg(pdev, 0x68);
946 	}
947 	DPRINTK("PCI regs:\n");
948 	mv_dump_mem(mmio_base+0xc00, 0x3c);
949 	mv_dump_mem(mmio_base+0xd00, 0x34);
950 	mv_dump_mem(mmio_base+0xf00, 0x4);
951 	mv_dump_mem(mmio_base+0x1d00, 0x6c);
952 	for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
953 		hc_base = mv_hc_base(mmio_base, hc);
954 		DPRINTK("HC regs (HC %i):\n", hc);
955 		mv_dump_mem(hc_base, 0x1c);
956 	}
957 	for (p = start_port; p < start_port + num_ports; p++) {
958 		port_base = mv_port_base(mmio_base, p);
959 		DPRINTK("EDMA regs (port %i):\n",p);
960 		mv_dump_mem(port_base, 0x54);
961 		DPRINTK("SATA regs (port %i):\n",p);
962 		mv_dump_mem(port_base+0x300, 0x60);
963 	}
964 #endif
965 }
966 
967 static unsigned int mv_scr_offset(unsigned int sc_reg_in)
968 {
969 	unsigned int ofs;
970 
971 	switch (sc_reg_in) {
972 	case SCR_STATUS:
973 	case SCR_CONTROL:
974 	case SCR_ERROR:
975 		ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
976 		break;
977 	case SCR_ACTIVE:
978 		ofs = SATA_ACTIVE_OFS;   /* active is not with the others */
979 		break;
980 	default:
981 		ofs = 0xffffffffU;
982 		break;
983 	}
984 	return ofs;
985 }
986 
987 static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
988 {
989 	unsigned int ofs = mv_scr_offset(sc_reg_in);
990 
991 	if (ofs != 0xffffffffU) {
992 		*val = readl(mv_ap_base(ap) + ofs);
993 		return 0;
994 	} else
995 		return -EINVAL;
996 }
997 
998 static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
999 {
1000 	unsigned int ofs = mv_scr_offset(sc_reg_in);
1001 
1002 	if (ofs != 0xffffffffU) {
1003 		writelfl(val, mv_ap_base(ap) + ofs);
1004 		return 0;
1005 	} else
1006 		return -EINVAL;
1007 }
1008 
1009 static void mv_edma_cfg(struct ata_port *ap, struct mv_host_priv *hpriv,
1010 			void __iomem *port_mmio)
1011 {
1012 	u32 cfg = readl(port_mmio + EDMA_CFG_OFS);
1013 
1014 	/* set up non-NCQ EDMA configuration */
1015 	cfg &= ~(1 << 9);	/* disable eQue */
1016 
1017 	if (IS_GEN_I(hpriv)) {
1018 		cfg &= ~0x1f;		/* clear queue depth */
1019 		cfg |= (1 << 8);	/* enab config burst size mask */
1020 	}
1021 
1022 	else if (IS_GEN_II(hpriv)) {
1023 		cfg &= ~0x1f;		/* clear queue depth */
1024 		cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1025 		cfg &= ~(EDMA_CFG_NCQ | EDMA_CFG_NCQ_GO_ON_ERR); /* clear NCQ */
1026 	}
1027 
1028 	else if (IS_GEN_IIE(hpriv)) {
1029 		cfg |= (1 << 23);	/* do not mask PM field in rx'd FIS */
1030 		cfg |= (1 << 22);	/* enab 4-entry host queue cache */
1031 		cfg &= ~(1 << 19);	/* dis 128-entry queue (for now?) */
1032 		cfg |= (1 << 18);	/* enab early completion */
1033 		cfg |= (1 << 17);	/* enab cut-through (dis stor&forwrd) */
1034 		cfg &= ~(1 << 16);	/* dis FIS-based switching (for now) */
1035 		cfg &= ~(EDMA_CFG_NCQ);	/* clear NCQ */
1036 	}
1037 
1038 	writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1039 }
1040 
1041 /**
1042  *      mv_port_start - Port specific init/start routine.
1043  *      @ap: ATA channel to manipulate
1044  *
1045  *      Allocate and point to DMA memory, init port private memory,
1046  *      zero indices.
1047  *
1048  *      LOCKING:
1049  *      Inherited from caller.
1050  */
1051 static int mv_port_start(struct ata_port *ap)
1052 {
1053 	struct device *dev = ap->host->dev;
1054 	struct mv_host_priv *hpriv = ap->host->private_data;
1055 	struct mv_port_priv *pp;
1056 	void __iomem *port_mmio = mv_ap_base(ap);
1057 	void *mem;
1058 	dma_addr_t mem_dma;
1059 	unsigned long flags;
1060 	int rc;
1061 
1062 	pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1063 	if (!pp)
1064 		return -ENOMEM;
1065 
1066 	mem = dmam_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma,
1067 				  GFP_KERNEL);
1068 	if (!mem)
1069 		return -ENOMEM;
1070 	memset(mem, 0, MV_PORT_PRIV_DMA_SZ);
1071 
1072 	rc = ata_pad_alloc(ap, dev);
1073 	if (rc)
1074 		return rc;
1075 
1076 	/* First item in chunk of DMA memory:
1077 	 * 32-slot command request table (CRQB), 32 bytes each in size
1078 	 */
1079 	pp->crqb = mem;
1080 	pp->crqb_dma = mem_dma;
1081 	mem += MV_CRQB_Q_SZ;
1082 	mem_dma += MV_CRQB_Q_SZ;
1083 
1084 	/* Second item:
1085 	 * 32-slot command response table (CRPB), 8 bytes each in size
1086 	 */
1087 	pp->crpb = mem;
1088 	pp->crpb_dma = mem_dma;
1089 	mem += MV_CRPB_Q_SZ;
1090 	mem_dma += MV_CRPB_Q_SZ;
1091 
1092 	/* Third item:
1093 	 * Table of scatter-gather descriptors (ePRD), 16 bytes each
1094 	 */
1095 	pp->sg_tbl = mem;
1096 	pp->sg_tbl_dma = mem_dma;
1097 
1098 	spin_lock_irqsave(&ap->host->lock, flags);
1099 
1100 	mv_edma_cfg(ap, hpriv, port_mmio);
1101 
1102 	mv_set_edma_ptrs(port_mmio, hpriv, pp);
1103 
1104 	spin_unlock_irqrestore(&ap->host->lock, flags);
1105 
1106 	/* Don't turn on EDMA here...do it before DMA commands only.  Else
1107 	 * we'll be unable to send non-data, PIO, etc due to restricted access
1108 	 * to shadow regs.
1109 	 */
1110 	ap->private_data = pp;
1111 	return 0;
1112 }
1113 
1114 /**
1115  *      mv_port_stop - Port specific cleanup/stop routine.
1116  *      @ap: ATA channel to manipulate
1117  *
1118  *      Stop DMA, cleanup port memory.
1119  *
1120  *      LOCKING:
1121  *      This routine uses the host lock to protect the DMA stop.
1122  */
1123 static void mv_port_stop(struct ata_port *ap)
1124 {
1125 	mv_stop_dma(ap);
1126 }
1127 
1128 /**
1129  *      mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1130  *      @qc: queued command whose SG list to source from
1131  *
1132  *      Populate the SG list and mark the last entry.
1133  *
1134  *      LOCKING:
1135  *      Inherited from caller.
1136  */
1137 static void mv_fill_sg(struct ata_queued_cmd *qc)
1138 {
1139 	struct mv_port_priv *pp = qc->ap->private_data;
1140 	struct scatterlist *sg;
1141 	struct mv_sg *mv_sg;
1142 
1143 	mv_sg = pp->sg_tbl;
1144 	ata_for_each_sg(sg, qc) {
1145 		dma_addr_t addr = sg_dma_address(sg);
1146 		u32 sg_len = sg_dma_len(sg);
1147 
1148 		while (sg_len) {
1149 			u32 offset = addr & 0xffff;
1150 			u32 len = sg_len;
1151 
1152 			if ((offset + sg_len > 0x10000))
1153 				len = 0x10000 - offset;
1154 
1155 			mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1156 			mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
1157 			mv_sg->flags_size = cpu_to_le32(len & 0xffff);
1158 
1159 			sg_len -= len;
1160 			addr += len;
1161 
1162 			if (!sg_len && ata_sg_is_last(sg, qc))
1163 				mv_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
1164 
1165 			mv_sg++;
1166 		}
1167 
1168 	}
1169 }
1170 
1171 static inline void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
1172 {
1173 	u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
1174 		(last ? CRQB_CMD_LAST : 0);
1175 	*cmdw = cpu_to_le16(tmp);
1176 }
1177 
1178 /**
1179  *      mv_qc_prep - Host specific command preparation.
1180  *      @qc: queued command to prepare
1181  *
1182  *      This routine simply redirects to the general purpose routine
1183  *      if command is not DMA.  Else, it handles prep of the CRQB
1184  *      (command request block), does some sanity checking, and calls
1185  *      the SG load routine.
1186  *
1187  *      LOCKING:
1188  *      Inherited from caller.
1189  */
1190 static void mv_qc_prep(struct ata_queued_cmd *qc)
1191 {
1192 	struct ata_port *ap = qc->ap;
1193 	struct mv_port_priv *pp = ap->private_data;
1194 	__le16 *cw;
1195 	struct ata_taskfile *tf;
1196 	u16 flags = 0;
1197 	unsigned in_index;
1198 
1199  	if (qc->tf.protocol != ATA_PROT_DMA)
1200 		return;
1201 
1202 	/* Fill in command request block
1203 	 */
1204 	if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1205 		flags |= CRQB_FLAG_READ;
1206 	WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1207 	flags |= qc->tag << CRQB_TAG_SHIFT;
1208 	flags |= qc->tag << CRQB_IOID_SHIFT;	/* 50xx appears to ignore this*/
1209 
1210 	/* get current queue index from software */
1211 	in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1212 
1213 	pp->crqb[in_index].sg_addr =
1214 		cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1215 	pp->crqb[in_index].sg_addr_hi =
1216 		cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1217 	pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1218 
1219 	cw = &pp->crqb[in_index].ata_cmd[0];
1220 	tf = &qc->tf;
1221 
1222 	/* Sadly, the CRQB cannot accomodate all registers--there are
1223 	 * only 11 bytes...so we must pick and choose required
1224 	 * registers based on the command.  So, we drop feature and
1225 	 * hob_feature for [RW] DMA commands, but they are needed for
1226 	 * NCQ.  NCQ will drop hob_nsect.
1227 	 */
1228 	switch (tf->command) {
1229 	case ATA_CMD_READ:
1230 	case ATA_CMD_READ_EXT:
1231 	case ATA_CMD_WRITE:
1232 	case ATA_CMD_WRITE_EXT:
1233 	case ATA_CMD_WRITE_FUA_EXT:
1234 		mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1235 		break;
1236 #ifdef LIBATA_NCQ		/* FIXME: remove this line when NCQ added */
1237 	case ATA_CMD_FPDMA_READ:
1238 	case ATA_CMD_FPDMA_WRITE:
1239 		mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
1240 		mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1241 		break;
1242 #endif				/* FIXME: remove this line when NCQ added */
1243 	default:
1244 		/* The only other commands EDMA supports in non-queued and
1245 		 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1246 		 * of which are defined/used by Linux.  If we get here, this
1247 		 * driver needs work.
1248 		 *
1249 		 * FIXME: modify libata to give qc_prep a return value and
1250 		 * return error here.
1251 		 */
1252 		BUG_ON(tf->command);
1253 		break;
1254 	}
1255 	mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1256 	mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1257 	mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1258 	mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1259 	mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1260 	mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1261 	mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1262 	mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1263 	mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1);	/* last */
1264 
1265 	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1266 		return;
1267 	mv_fill_sg(qc);
1268 }
1269 
1270 /**
1271  *      mv_qc_prep_iie - Host specific command preparation.
1272  *      @qc: queued command to prepare
1273  *
1274  *      This routine simply redirects to the general purpose routine
1275  *      if command is not DMA.  Else, it handles prep of the CRQB
1276  *      (command request block), does some sanity checking, and calls
1277  *      the SG load routine.
1278  *
1279  *      LOCKING:
1280  *      Inherited from caller.
1281  */
1282 static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1283 {
1284 	struct ata_port *ap = qc->ap;
1285 	struct mv_port_priv *pp = ap->private_data;
1286 	struct mv_crqb_iie *crqb;
1287 	struct ata_taskfile *tf;
1288 	unsigned in_index;
1289 	u32 flags = 0;
1290 
1291  	if (qc->tf.protocol != ATA_PROT_DMA)
1292 		return;
1293 
1294 	/* Fill in Gen IIE command request block
1295 	 */
1296 	if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1297 		flags |= CRQB_FLAG_READ;
1298 
1299 	WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1300 	flags |= qc->tag << CRQB_TAG_SHIFT;
1301 	flags |= qc->tag << CRQB_IOID_SHIFT;	/* "I/O Id" is -really-
1302 						   what we use as our tag */
1303 
1304 	/* get current queue index from software */
1305 	in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1306 
1307 	crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
1308 	crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1309 	crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1310 	crqb->flags = cpu_to_le32(flags);
1311 
1312 	tf = &qc->tf;
1313 	crqb->ata_cmd[0] = cpu_to_le32(
1314 			(tf->command << 16) |
1315 			(tf->feature << 24)
1316 		);
1317 	crqb->ata_cmd[1] = cpu_to_le32(
1318 			(tf->lbal << 0) |
1319 			(tf->lbam << 8) |
1320 			(tf->lbah << 16) |
1321 			(tf->device << 24)
1322 		);
1323 	crqb->ata_cmd[2] = cpu_to_le32(
1324 			(tf->hob_lbal << 0) |
1325 			(tf->hob_lbam << 8) |
1326 			(tf->hob_lbah << 16) |
1327 			(tf->hob_feature << 24)
1328 		);
1329 	crqb->ata_cmd[3] = cpu_to_le32(
1330 			(tf->nsect << 0) |
1331 			(tf->hob_nsect << 8)
1332 		);
1333 
1334 	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1335 		return;
1336 	mv_fill_sg(qc);
1337 }
1338 
1339 /**
1340  *      mv_qc_issue - Initiate a command to the host
1341  *      @qc: queued command to start
1342  *
1343  *      This routine simply redirects to the general purpose routine
1344  *      if command is not DMA.  Else, it sanity checks our local
1345  *      caches of the request producer/consumer indices then enables
1346  *      DMA and bumps the request producer index.
1347  *
1348  *      LOCKING:
1349  *      Inherited from caller.
1350  */
1351 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1352 {
1353 	struct ata_port *ap = qc->ap;
1354 	void __iomem *port_mmio = mv_ap_base(ap);
1355 	struct mv_port_priv *pp = ap->private_data;
1356 	struct mv_host_priv *hpriv = ap->host->private_data;
1357 	u32 in_index;
1358 
1359 	if (qc->tf.protocol != ATA_PROT_DMA) {
1360 		/* We're about to send a non-EDMA capable command to the
1361 		 * port.  Turn off EDMA so there won't be problems accessing
1362 		 * shadow block, etc registers.
1363 		 */
1364 		__mv_stop_dma(ap);
1365 		return ata_qc_issue_prot(qc);
1366 	}
1367 
1368 	mv_start_dma(port_mmio, hpriv, pp);
1369 
1370 	in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1371 
1372 	/* until we do queuing, the queue should be empty at this point */
1373 	WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)
1374 		>> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
1375 
1376 	pp->req_idx++;
1377 
1378 	in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
1379 
1380 	/* and write the request in pointer to kick the EDMA to life */
1381 	writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1382 		 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1383 
1384 	return 0;
1385 }
1386 
1387 /**
1388  *      mv_err_intr - Handle error interrupts on the port
1389  *      @ap: ATA channel to manipulate
1390  *      @reset_allowed: bool: 0 == don't trigger from reset here
1391  *
1392  *      In most cases, just clear the interrupt and move on.  However,
1393  *      some cases require an eDMA reset, which is done right before
1394  *      the COMRESET in mv_phy_reset().  The SERR case requires a
1395  *      clear of pending errors in the SATA SERROR register.  Finally,
1396  *      if the port disabled DMA, update our cached copy to match.
1397  *
1398  *      LOCKING:
1399  *      Inherited from caller.
1400  */
1401 static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
1402 {
1403 	void __iomem *port_mmio = mv_ap_base(ap);
1404 	u32 edma_err_cause, eh_freeze_mask, serr = 0;
1405 	struct mv_port_priv *pp = ap->private_data;
1406 	struct mv_host_priv *hpriv = ap->host->private_data;
1407 	unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1408 	unsigned int action = 0, err_mask = 0;
1409 	struct ata_eh_info *ehi = &ap->link.eh_info;
1410 
1411 	ata_ehi_clear_desc(ehi);
1412 
1413 	if (!edma_enabled) {
1414 		/* just a guess: do we need to do this? should we
1415 		 * expand this, and do it in all cases?
1416 		 */
1417 		sata_scr_read(&ap->link, SCR_ERROR, &serr);
1418 		sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
1419 	}
1420 
1421 	edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1422 
1423 	ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1424 
1425 	/*
1426 	 * all generations share these EDMA error cause bits
1427 	 */
1428 
1429 	if (edma_err_cause & EDMA_ERR_DEV)
1430 		err_mask |= AC_ERR_DEV;
1431 	if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
1432 			EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
1433 			EDMA_ERR_INTRL_PAR)) {
1434 		err_mask |= AC_ERR_ATA_BUS;
1435 		action |= ATA_EH_HARDRESET;
1436 		ata_ehi_push_desc(ehi, "parity error");
1437 	}
1438 	if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1439 		ata_ehi_hotplugged(ehi);
1440 		ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
1441 			"dev disconnect" : "dev connect");
1442 	}
1443 
1444 	if (IS_GEN_I(hpriv)) {
1445 		eh_freeze_mask = EDMA_EH_FREEZE_5;
1446 
1447 		if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
1448 			struct mv_port_priv *pp	= ap->private_data;
1449 			pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1450 			ata_ehi_push_desc(ehi, "EDMA self-disable");
1451 		}
1452 	} else {
1453 		eh_freeze_mask = EDMA_EH_FREEZE;
1454 
1455 		if (edma_err_cause & EDMA_ERR_SELF_DIS) {
1456 			struct mv_port_priv *pp	= ap->private_data;
1457 			pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1458 			ata_ehi_push_desc(ehi, "EDMA self-disable");
1459 		}
1460 
1461 		if (edma_err_cause & EDMA_ERR_SERR) {
1462 			sata_scr_read(&ap->link, SCR_ERROR, &serr);
1463 			sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
1464 			err_mask = AC_ERR_ATA_BUS;
1465 			action |= ATA_EH_HARDRESET;
1466 		}
1467 	}
1468 
1469 	/* Clear EDMA now that SERR cleanup done */
1470 	writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1471 
1472 	if (!err_mask) {
1473 		err_mask = AC_ERR_OTHER;
1474 		action |= ATA_EH_HARDRESET;
1475 	}
1476 
1477 	ehi->serror |= serr;
1478 	ehi->action |= action;
1479 
1480 	if (qc)
1481 		qc->err_mask |= err_mask;
1482 	else
1483 		ehi->err_mask |= err_mask;
1484 
1485 	if (edma_err_cause & eh_freeze_mask)
1486 		ata_port_freeze(ap);
1487 	else
1488 		ata_port_abort(ap);
1489 }
1490 
1491 static void mv_intr_pio(struct ata_port *ap)
1492 {
1493 	struct ata_queued_cmd *qc;
1494 	u8 ata_status;
1495 
1496 	/* ignore spurious intr if drive still BUSY */
1497 	ata_status = readb(ap->ioaddr.status_addr);
1498 	if (unlikely(ata_status & ATA_BUSY))
1499 		return;
1500 
1501 	/* get active ATA command */
1502 	qc = ata_qc_from_tag(ap, ap->link.active_tag);
1503 	if (unlikely(!qc))			/* no active tag */
1504 		return;
1505 	if (qc->tf.flags & ATA_TFLAG_POLLING)	/* polling; we don't own qc */
1506 		return;
1507 
1508 	/* and finally, complete the ATA command */
1509 	qc->err_mask |= ac_err_mask(ata_status);
1510 	ata_qc_complete(qc);
1511 }
1512 
1513 static void mv_intr_edma(struct ata_port *ap)
1514 {
1515 	void __iomem *port_mmio = mv_ap_base(ap);
1516 	struct mv_host_priv *hpriv = ap->host->private_data;
1517 	struct mv_port_priv *pp = ap->private_data;
1518 	struct ata_queued_cmd *qc;
1519 	u32 out_index, in_index;
1520 	bool work_done = false;
1521 
1522 	/* get h/w response queue pointer */
1523 	in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1524 			>> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1525 
1526 	while (1) {
1527 		u16 status;
1528 		unsigned int tag;
1529 
1530 		/* get s/w response queue last-read pointer, and compare */
1531 		out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1532 		if (in_index == out_index)
1533 			break;
1534 
1535 		/* 50xx: get active ATA command */
1536 		if (IS_GEN_I(hpriv))
1537 			tag = ap->link.active_tag;
1538 
1539 		/* Gen II/IIE: get active ATA command via tag, to enable
1540 		 * support for queueing.  this works transparently for
1541 		 * queued and non-queued modes.
1542 		 */
1543 		else if (IS_GEN_II(hpriv))
1544 			tag = (le16_to_cpu(pp->crpb[out_index].id)
1545 				>> CRPB_IOID_SHIFT_6) & 0x3f;
1546 
1547 		else /* IS_GEN_IIE */
1548 			tag = (le16_to_cpu(pp->crpb[out_index].id)
1549 				>> CRPB_IOID_SHIFT_7) & 0x3f;
1550 
1551 		qc = ata_qc_from_tag(ap, tag);
1552 
1553 		/* lower 8 bits of status are EDMA_ERR_IRQ_CAUSE_OFS
1554 		 * bits (WARNING: might not necessarily be associated
1555 		 * with this command), which -should- be clear
1556 		 * if all is well
1557 		 */
1558 		status = le16_to_cpu(pp->crpb[out_index].flags);
1559 		if (unlikely(status & 0xff)) {
1560 			mv_err_intr(ap, qc);
1561 			return;
1562 		}
1563 
1564 		/* and finally, complete the ATA command */
1565 		if (qc) {
1566 			qc->err_mask |=
1567 				ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1568 			ata_qc_complete(qc);
1569 		}
1570 
1571 		/* advance software response queue pointer, to
1572 		 * indicate (after the loop completes) to hardware
1573 		 * that we have consumed a response queue entry.
1574 		 */
1575 		work_done = true;
1576 		pp->resp_idx++;
1577 	}
1578 
1579 	if (work_done)
1580 		writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1581 			 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1582 			 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1583 }
1584 
1585 /**
1586  *      mv_host_intr - Handle all interrupts on the given host controller
1587  *      @host: host specific structure
1588  *      @relevant: port error bits relevant to this host controller
1589  *      @hc: which host controller we're to look at
1590  *
1591  *      Read then write clear the HC interrupt status then walk each
1592  *      port connected to the HC and see if it needs servicing.  Port
1593  *      success ints are reported in the HC interrupt status reg, the
1594  *      port error ints are reported in the higher level main
1595  *      interrupt status register and thus are passed in via the
1596  *      'relevant' argument.
1597  *
1598  *      LOCKING:
1599  *      Inherited from caller.
1600  */
1601 static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
1602 {
1603 	void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
1604 	void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1605 	u32 hc_irq_cause;
1606 	int port, port0;
1607 
1608 	if (hc == 0)
1609 		port0 = 0;
1610 	else
1611 		port0 = MV_PORTS_PER_HC;
1612 
1613 	/* we'll need the HC success int register in most cases */
1614 	hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
1615 	if (!hc_irq_cause)
1616 		return;
1617 
1618 	writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
1619 
1620 	VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
1621 		hc,relevant,hc_irq_cause);
1622 
1623 	for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
1624 		struct ata_port *ap = host->ports[port];
1625 		struct mv_port_priv *pp = ap->private_data;
1626 		int have_err_bits, hard_port, shift;
1627 
1628 		if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
1629 			continue;
1630 
1631 		shift = port << 1;		/* (port * 2) */
1632 		if (port >= MV_PORTS_PER_HC) {
1633 			shift++;	/* skip bit 8 in the HC Main IRQ reg */
1634 		}
1635 		have_err_bits = ((PORT0_ERR << shift) & relevant);
1636 
1637 		if (unlikely(have_err_bits)) {
1638 			struct ata_queued_cmd *qc;
1639 
1640 			qc = ata_qc_from_tag(ap, ap->link.active_tag);
1641 			if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1642 				continue;
1643 
1644 			mv_err_intr(ap, qc);
1645 			continue;
1646 		}
1647 
1648 		hard_port = mv_hardport_from_port(port); /* range 0..3 */
1649 
1650 		if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1651 			if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1652 				mv_intr_edma(ap);
1653 		} else {
1654 			if ((DEV_IRQ << hard_port) & hc_irq_cause)
1655 				mv_intr_pio(ap);
1656 		}
1657 	}
1658 	VPRINTK("EXIT\n");
1659 }
1660 
1661 static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1662 {
1663 	struct ata_port *ap;
1664 	struct ata_queued_cmd *qc;
1665 	struct ata_eh_info *ehi;
1666 	unsigned int i, err_mask, printed = 0;
1667 	u32 err_cause;
1668 
1669 	err_cause = readl(mmio + PCI_IRQ_CAUSE_OFS);
1670 
1671 	dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1672 		   err_cause);
1673 
1674 	DPRINTK("All regs @ PCI error\n");
1675 	mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1676 
1677 	writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
1678 
1679 	for (i = 0; i < host->n_ports; i++) {
1680 		ap = host->ports[i];
1681 		if (!ata_link_offline(&ap->link)) {
1682 			ehi = &ap->link.eh_info;
1683 			ata_ehi_clear_desc(ehi);
1684 			if (!printed++)
1685 				ata_ehi_push_desc(ehi,
1686 					"PCI err cause 0x%08x", err_cause);
1687 			err_mask = AC_ERR_HOST_BUS;
1688 			ehi->action = ATA_EH_HARDRESET;
1689 			qc = ata_qc_from_tag(ap, ap->link.active_tag);
1690 			if (qc)
1691 				qc->err_mask |= err_mask;
1692 			else
1693 				ehi->err_mask |= err_mask;
1694 
1695 			ata_port_freeze(ap);
1696 		}
1697 	}
1698 }
1699 
1700 /**
1701  *      mv_interrupt - Main interrupt event handler
1702  *      @irq: unused
1703  *      @dev_instance: private data; in this case the host structure
1704  *
1705  *      Read the read only register to determine if any host
1706  *      controllers have pending interrupts.  If so, call lower level
1707  *      routine to handle.  Also check for PCI errors which are only
1708  *      reported here.
1709  *
1710  *      LOCKING:
1711  *      This routine holds the host lock while processing pending
1712  *      interrupts.
1713  */
1714 static irqreturn_t mv_interrupt(int irq, void *dev_instance)
1715 {
1716 	struct ata_host *host = dev_instance;
1717 	unsigned int hc, handled = 0, n_hcs;
1718 	void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
1719 	u32 irq_stat;
1720 
1721 	irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
1722 
1723 	/* check the cases where we either have nothing pending or have read
1724 	 * a bogus register value which can indicate HW removal or PCI fault
1725 	 */
1726 	if (!irq_stat || (0xffffffffU == irq_stat))
1727 		return IRQ_NONE;
1728 
1729 	n_hcs = mv_get_hc_count(host->ports[0]->flags);
1730 	spin_lock(&host->lock);
1731 
1732 	if (unlikely(irq_stat & PCI_ERR)) {
1733 		mv_pci_error(host, mmio);
1734 		handled = 1;
1735 		goto out_unlock;	/* skip all other HC irq handling */
1736 	}
1737 
1738 	for (hc = 0; hc < n_hcs; hc++) {
1739 		u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1740 		if (relevant) {
1741 			mv_host_intr(host, relevant, hc);
1742 			handled = 1;
1743 		}
1744 	}
1745 
1746 out_unlock:
1747 	spin_unlock(&host->lock);
1748 
1749 	return IRQ_RETVAL(handled);
1750 }
1751 
1752 static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1753 {
1754 	void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1755 	unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1756 
1757 	return hc_mmio + ofs;
1758 }
1759 
1760 static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1761 {
1762 	unsigned int ofs;
1763 
1764 	switch (sc_reg_in) {
1765 	case SCR_STATUS:
1766 	case SCR_ERROR:
1767 	case SCR_CONTROL:
1768 		ofs = sc_reg_in * sizeof(u32);
1769 		break;
1770 	default:
1771 		ofs = 0xffffffffU;
1772 		break;
1773 	}
1774 	return ofs;
1775 }
1776 
1777 static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
1778 {
1779 	void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1780 	void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1781 	unsigned int ofs = mv5_scr_offset(sc_reg_in);
1782 
1783 	if (ofs != 0xffffffffU) {
1784 		*val = readl(addr + ofs);
1785 		return 0;
1786 	} else
1787 		return -EINVAL;
1788 }
1789 
1790 static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1791 {
1792 	void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1793 	void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1794 	unsigned int ofs = mv5_scr_offset(sc_reg_in);
1795 
1796 	if (ofs != 0xffffffffU) {
1797 		writelfl(val, addr + ofs);
1798 		return 0;
1799 	} else
1800 		return -EINVAL;
1801 }
1802 
1803 static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio)
1804 {
1805 	int early_5080;
1806 
1807 	early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
1808 
1809 	if (!early_5080) {
1810 		u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1811 		tmp |= (1 << 0);
1812 		writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1813 	}
1814 
1815 	mv_reset_pci_bus(pdev, mmio);
1816 }
1817 
1818 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1819 {
1820 	writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1821 }
1822 
1823 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
1824 			   void __iomem *mmio)
1825 {
1826 	void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1827 	u32 tmp;
1828 
1829 	tmp = readl(phy_mmio + MV5_PHY_MODE);
1830 
1831 	hpriv->signal[idx].pre = tmp & 0x1800;	/* bits 12:11 */
1832 	hpriv->signal[idx].amps = tmp & 0xe0;	/* bits 7:5 */
1833 }
1834 
1835 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
1836 {
1837 	u32 tmp;
1838 
1839 	writel(0, mmio + MV_GPIO_PORT_CTL);
1840 
1841 	/* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1842 
1843 	tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1844 	tmp |= ~(1 << 0);
1845 	writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1846 }
1847 
1848 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1849 			   unsigned int port)
1850 {
1851 	void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1852 	const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1853 	u32 tmp;
1854 	int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1855 
1856 	if (fix_apm_sq) {
1857 		tmp = readl(phy_mmio + MV5_LT_MODE);
1858 		tmp |= (1 << 19);
1859 		writel(tmp, phy_mmio + MV5_LT_MODE);
1860 
1861 		tmp = readl(phy_mmio + MV5_PHY_CTL);
1862 		tmp &= ~0x3;
1863 		tmp |= 0x1;
1864 		writel(tmp, phy_mmio + MV5_PHY_CTL);
1865 	}
1866 
1867 	tmp = readl(phy_mmio + MV5_PHY_MODE);
1868 	tmp &= ~mask;
1869 	tmp |= hpriv->signal[port].pre;
1870 	tmp |= hpriv->signal[port].amps;
1871 	writel(tmp, phy_mmio + MV5_PHY_MODE);
1872 }
1873 
1874 
1875 #undef ZERO
1876 #define ZERO(reg) writel(0, port_mmio + (reg))
1877 static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1878 			     unsigned int port)
1879 {
1880 	void __iomem *port_mmio = mv_port_base(mmio, port);
1881 
1882 	writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1883 
1884 	mv_channel_reset(hpriv, mmio, port);
1885 
1886 	ZERO(0x028);	/* command */
1887 	writel(0x11f, port_mmio + EDMA_CFG_OFS);
1888 	ZERO(0x004);	/* timer */
1889 	ZERO(0x008);	/* irq err cause */
1890 	ZERO(0x00c);	/* irq err mask */
1891 	ZERO(0x010);	/* rq bah */
1892 	ZERO(0x014);	/* rq inp */
1893 	ZERO(0x018);	/* rq outp */
1894 	ZERO(0x01c);	/* respq bah */
1895 	ZERO(0x024);	/* respq outp */
1896 	ZERO(0x020);	/* respq inp */
1897 	ZERO(0x02c);	/* test control */
1898 	writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1899 }
1900 #undef ZERO
1901 
1902 #define ZERO(reg) writel(0, hc_mmio + (reg))
1903 static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1904 			unsigned int hc)
1905 {
1906 	void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1907 	u32 tmp;
1908 
1909 	ZERO(0x00c);
1910 	ZERO(0x010);
1911 	ZERO(0x014);
1912 	ZERO(0x018);
1913 
1914 	tmp = readl(hc_mmio + 0x20);
1915 	tmp &= 0x1c1c1c1c;
1916 	tmp |= 0x03030303;
1917 	writel(tmp, hc_mmio + 0x20);
1918 }
1919 #undef ZERO
1920 
1921 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1922 			unsigned int n_hc)
1923 {
1924 	unsigned int hc, port;
1925 
1926 	for (hc = 0; hc < n_hc; hc++) {
1927 		for (port = 0; port < MV_PORTS_PER_HC; port++)
1928 			mv5_reset_hc_port(hpriv, mmio,
1929 					  (hc * MV_PORTS_PER_HC) + port);
1930 
1931 		mv5_reset_one_hc(hpriv, mmio, hc);
1932 	}
1933 
1934 	return 0;
1935 }
1936 
1937 #undef ZERO
1938 #define ZERO(reg) writel(0, mmio + (reg))
1939 static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio)
1940 {
1941 	u32 tmp;
1942 
1943 	tmp = readl(mmio + MV_PCI_MODE);
1944 	tmp &= 0xff00ffff;
1945 	writel(tmp, mmio + MV_PCI_MODE);
1946 
1947 	ZERO(MV_PCI_DISC_TIMER);
1948 	ZERO(MV_PCI_MSI_TRIGGER);
1949 	writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
1950 	ZERO(HC_MAIN_IRQ_MASK_OFS);
1951 	ZERO(MV_PCI_SERR_MASK);
1952 	ZERO(PCI_IRQ_CAUSE_OFS);
1953 	ZERO(PCI_IRQ_MASK_OFS);
1954 	ZERO(MV_PCI_ERR_LOW_ADDRESS);
1955 	ZERO(MV_PCI_ERR_HIGH_ADDRESS);
1956 	ZERO(MV_PCI_ERR_ATTRIBUTE);
1957 	ZERO(MV_PCI_ERR_COMMAND);
1958 }
1959 #undef ZERO
1960 
1961 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1962 {
1963 	u32 tmp;
1964 
1965 	mv5_reset_flash(hpriv, mmio);
1966 
1967 	tmp = readl(mmio + MV_GPIO_PORT_CTL);
1968 	tmp &= 0x3;
1969 	tmp |= (1 << 5) | (1 << 6);
1970 	writel(tmp, mmio + MV_GPIO_PORT_CTL);
1971 }
1972 
1973 /**
1974  *      mv6_reset_hc - Perform the 6xxx global soft reset
1975  *      @mmio: base address of the HBA
1976  *
1977  *      This routine only applies to 6xxx parts.
1978  *
1979  *      LOCKING:
1980  *      Inherited from caller.
1981  */
1982 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1983 			unsigned int n_hc)
1984 {
1985 	void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
1986 	int i, rc = 0;
1987 	u32 t;
1988 
1989 	/* Following procedure defined in PCI "main command and status
1990 	 * register" table.
1991 	 */
1992 	t = readl(reg);
1993 	writel(t | STOP_PCI_MASTER, reg);
1994 
1995 	for (i = 0; i < 1000; i++) {
1996 		udelay(1);
1997 		t = readl(reg);
1998 		if (PCI_MASTER_EMPTY & t) {
1999 			break;
2000 		}
2001 	}
2002 	if (!(PCI_MASTER_EMPTY & t)) {
2003 		printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2004 		rc = 1;
2005 		goto done;
2006 	}
2007 
2008 	/* set reset */
2009 	i = 5;
2010 	do {
2011 		writel(t | GLOB_SFT_RST, reg);
2012 		t = readl(reg);
2013 		udelay(1);
2014 	} while (!(GLOB_SFT_RST & t) && (i-- > 0));
2015 
2016 	if (!(GLOB_SFT_RST & t)) {
2017 		printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2018 		rc = 1;
2019 		goto done;
2020 	}
2021 
2022 	/* clear reset and *reenable the PCI master* (not mentioned in spec) */
2023 	i = 5;
2024 	do {
2025 		writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2026 		t = readl(reg);
2027 		udelay(1);
2028 	} while ((GLOB_SFT_RST & t) && (i-- > 0));
2029 
2030 	if (GLOB_SFT_RST & t) {
2031 		printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2032 		rc = 1;
2033 	}
2034 done:
2035 	return rc;
2036 }
2037 
2038 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
2039 			   void __iomem *mmio)
2040 {
2041 	void __iomem *port_mmio;
2042 	u32 tmp;
2043 
2044 	tmp = readl(mmio + MV_RESET_CFG);
2045 	if ((tmp & (1 << 0)) == 0) {
2046 		hpriv->signal[idx].amps = 0x7 << 8;
2047 		hpriv->signal[idx].pre = 0x1 << 5;
2048 		return;
2049 	}
2050 
2051 	port_mmio = mv_port_base(mmio, idx);
2052 	tmp = readl(port_mmio + PHY_MODE2);
2053 
2054 	hpriv->signal[idx].amps = tmp & 0x700;	/* bits 10:8 */
2055 	hpriv->signal[idx].pre = tmp & 0xe0;	/* bits 7:5 */
2056 }
2057 
2058 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
2059 {
2060 	writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
2061 }
2062 
2063 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
2064 			   unsigned int port)
2065 {
2066 	void __iomem *port_mmio = mv_port_base(mmio, port);
2067 
2068 	u32 hp_flags = hpriv->hp_flags;
2069 	int fix_phy_mode2 =
2070 		hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2071 	int fix_phy_mode4 =
2072 		hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2073 	u32 m2, tmp;
2074 
2075 	if (fix_phy_mode2) {
2076 		m2 = readl(port_mmio + PHY_MODE2);
2077 		m2 &= ~(1 << 16);
2078 		m2 |= (1 << 31);
2079 		writel(m2, port_mmio + PHY_MODE2);
2080 
2081 		udelay(200);
2082 
2083 		m2 = readl(port_mmio + PHY_MODE2);
2084 		m2 &= ~((1 << 16) | (1 << 31));
2085 		writel(m2, port_mmio + PHY_MODE2);
2086 
2087 		udelay(200);
2088 	}
2089 
2090 	/* who knows what this magic does */
2091 	tmp = readl(port_mmio + PHY_MODE3);
2092 	tmp &= ~0x7F800000;
2093 	tmp |= 0x2A800000;
2094 	writel(tmp, port_mmio + PHY_MODE3);
2095 
2096 	if (fix_phy_mode4) {
2097 		u32 m4;
2098 
2099 		m4 = readl(port_mmio + PHY_MODE4);
2100 
2101 		if (hp_flags & MV_HP_ERRATA_60X1B2)
2102 			tmp = readl(port_mmio + 0x310);
2103 
2104 		m4 = (m4 & ~(1 << 1)) | (1 << 0);
2105 
2106 		writel(m4, port_mmio + PHY_MODE4);
2107 
2108 		if (hp_flags & MV_HP_ERRATA_60X1B2)
2109 			writel(tmp, port_mmio + 0x310);
2110 	}
2111 
2112 	/* Revert values of pre-emphasis and signal amps to the saved ones */
2113 	m2 = readl(port_mmio + PHY_MODE2);
2114 
2115 	m2 &= ~MV_M2_PREAMP_MASK;
2116 	m2 |= hpriv->signal[port].amps;
2117 	m2 |= hpriv->signal[port].pre;
2118 	m2 &= ~(1 << 16);
2119 
2120 	/* according to mvSata 3.6.1, some IIE values are fixed */
2121 	if (IS_GEN_IIE(hpriv)) {
2122 		m2 &= ~0xC30FF01F;
2123 		m2 |= 0x0000900F;
2124 	}
2125 
2126 	writel(m2, port_mmio + PHY_MODE2);
2127 }
2128 
2129 static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
2130 			     unsigned int port_no)
2131 {
2132 	void __iomem *port_mmio = mv_port_base(mmio, port_no);
2133 
2134 	writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2135 
2136 	if (IS_GEN_II(hpriv)) {
2137 		u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
2138 		ifctl |= (1 << 7);		/* enable gen2i speed */
2139 		ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2140 		writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2141 	}
2142 
2143 	udelay(25);		/* allow reset propagation */
2144 
2145 	/* Spec never mentions clearing the bit.  Marvell's driver does
2146 	 * clear the bit, however.
2147 	 */
2148 	writelfl(0, port_mmio + EDMA_CMD_OFS);
2149 
2150 	hpriv->ops->phy_errata(hpriv, mmio, port_no);
2151 
2152 	if (IS_GEN_I(hpriv))
2153 		mdelay(1);
2154 }
2155 
2156 /**
2157  *      mv_phy_reset - Perform eDMA reset followed by COMRESET
2158  *      @ap: ATA channel to manipulate
2159  *
2160  *      Part of this is taken from __sata_phy_reset and modified to
2161  *      not sleep since this routine gets called from interrupt level.
2162  *
2163  *      LOCKING:
2164  *      Inherited from caller.  This is coded to safe to call at
2165  *      interrupt level, i.e. it does not sleep.
2166  */
2167 static void mv_phy_reset(struct ata_port *ap, unsigned int *class,
2168 			 unsigned long deadline)
2169 {
2170 	struct mv_port_priv *pp	= ap->private_data;
2171 	struct mv_host_priv *hpriv = ap->host->private_data;
2172 	void __iomem *port_mmio = mv_ap_base(ap);
2173 	int retry = 5;
2174 	u32 sstatus;
2175 
2176 	VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
2177 
2178 #ifdef DEBUG
2179 	{
2180 		u32 sstatus, serror, scontrol;
2181 
2182 		mv_scr_read(ap, SCR_STATUS, &sstatus);
2183 		mv_scr_read(ap, SCR_ERROR, &serror);
2184 		mv_scr_read(ap, SCR_CONTROL, &scontrol);
2185 		DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
2186 			"SCtrl 0x%08x\n", status, serror, scontrol);
2187 	}
2188 #endif
2189 
2190 	/* Issue COMRESET via SControl */
2191 comreset_retry:
2192 	sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x301);
2193 	msleep(1);
2194 
2195 	sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x300);
2196 	msleep(20);
2197 
2198 	do {
2199 		sata_scr_read(&ap->link, SCR_STATUS, &sstatus);
2200 		if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
2201 			break;
2202 
2203 		msleep(1);
2204 	} while (time_before(jiffies, deadline));
2205 
2206 	/* work around errata */
2207 	if (IS_GEN_II(hpriv) &&
2208 	    (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
2209 	    (retry-- > 0))
2210 		goto comreset_retry;
2211 
2212 #ifdef DEBUG
2213 	{
2214 		u32 sstatus, serror, scontrol;
2215 
2216 		mv_scr_read(ap, SCR_STATUS, &sstatus);
2217 		mv_scr_read(ap, SCR_ERROR, &serror);
2218 		mv_scr_read(ap, SCR_CONTROL, &scontrol);
2219 		DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
2220 			"SCtrl 0x%08x\n", sstatus, serror, scontrol);
2221 	}
2222 #endif
2223 
2224 	if (ata_link_offline(&ap->link)) {
2225 		*class = ATA_DEV_NONE;
2226 		return;
2227 	}
2228 
2229 	/* even after SStatus reflects that device is ready,
2230 	 * it seems to take a while for link to be fully
2231 	 * established (and thus Status no longer 0x80/0x7F),
2232 	 * so we poll a bit for that, here.
2233 	 */
2234 	retry = 20;
2235 	while (1) {
2236 		u8 drv_stat = ata_check_status(ap);
2237 		if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2238 			break;
2239 		msleep(500);
2240 		if (retry-- <= 0)
2241 			break;
2242 		if (time_after(jiffies, deadline))
2243 			break;
2244 	}
2245 
2246 	/* FIXME: if we passed the deadline, the following
2247 	 * code probably produces an invalid result
2248 	 */
2249 
2250 	/* finally, read device signature from TF registers */
2251 	*class = ata_dev_try_classify(ap->link.device, 1, NULL);
2252 
2253 	writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2254 
2255 	WARN_ON(pp->pp_flags & MV_PP_FLAG_EDMA_EN);
2256 
2257 	VPRINTK("EXIT\n");
2258 }
2259 
2260 static int mv_prereset(struct ata_link *link, unsigned long deadline)
2261 {
2262 	struct ata_port *ap = link->ap;
2263 	struct mv_port_priv *pp	= ap->private_data;
2264 	struct ata_eh_context *ehc = &link->eh_context;
2265 	int rc;
2266 
2267 	rc = mv_stop_dma(ap);
2268 	if (rc)
2269 		ehc->i.action |= ATA_EH_HARDRESET;
2270 
2271 	if (!(pp->pp_flags & MV_PP_FLAG_HAD_A_RESET)) {
2272 		pp->pp_flags |= MV_PP_FLAG_HAD_A_RESET;
2273 		ehc->i.action |= ATA_EH_HARDRESET;
2274 	}
2275 
2276 	/* if we're about to do hardreset, nothing more to do */
2277 	if (ehc->i.action & ATA_EH_HARDRESET)
2278 		return 0;
2279 
2280 	if (ata_link_online(link))
2281 		rc = ata_wait_ready(ap, deadline);
2282 	else
2283 		rc = -ENODEV;
2284 
2285 	return rc;
2286 }
2287 
2288 static int mv_hardreset(struct ata_link *link, unsigned int *class,
2289 			unsigned long deadline)
2290 {
2291 	struct ata_port *ap = link->ap;
2292 	struct mv_host_priv *hpriv = ap->host->private_data;
2293 	void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2294 
2295 	mv_stop_dma(ap);
2296 
2297 	mv_channel_reset(hpriv, mmio, ap->port_no);
2298 
2299 	mv_phy_reset(ap, class, deadline);
2300 
2301 	return 0;
2302 }
2303 
2304 static void mv_postreset(struct ata_link *link, unsigned int *classes)
2305 {
2306 	struct ata_port *ap = link->ap;
2307 	u32 serr;
2308 
2309 	/* print link status */
2310 	sata_print_link_status(link);
2311 
2312 	/* clear SError */
2313 	sata_scr_read(link, SCR_ERROR, &serr);
2314 	sata_scr_write_flush(link, SCR_ERROR, serr);
2315 
2316 	/* bail out if no device is present */
2317 	if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2318 		DPRINTK("EXIT, no device\n");
2319 		return;
2320 	}
2321 
2322 	/* set up device control */
2323 	iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
2324 }
2325 
2326 static void mv_error_handler(struct ata_port *ap)
2327 {
2328 	ata_do_eh(ap, mv_prereset, ata_std_softreset,
2329 		  mv_hardreset, mv_postreset);
2330 }
2331 
2332 static void mv_post_int_cmd(struct ata_queued_cmd *qc)
2333 {
2334 	mv_stop_dma(qc->ap);
2335 }
2336 
2337 static void mv_eh_freeze(struct ata_port *ap)
2338 {
2339 	void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2340 	unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2341 	u32 tmp, mask;
2342 	unsigned int shift;
2343 
2344 	/* FIXME: handle coalescing completion events properly */
2345 
2346 	shift = ap->port_no * 2;
2347 	if (hc > 0)
2348 		shift++;
2349 
2350 	mask = 0x3 << shift;
2351 
2352 	/* disable assertion of portN err, done events */
2353 	tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2354 	writelfl(tmp & ~mask, mmio + HC_MAIN_IRQ_MASK_OFS);
2355 }
2356 
2357 static void mv_eh_thaw(struct ata_port *ap)
2358 {
2359 	void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2360 	unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2361 	void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2362 	void __iomem *port_mmio = mv_ap_base(ap);
2363 	u32 tmp, mask, hc_irq_cause;
2364 	unsigned int shift, hc_port_no = ap->port_no;
2365 
2366 	/* FIXME: handle coalescing completion events properly */
2367 
2368 	shift = ap->port_no * 2;
2369 	if (hc > 0) {
2370 		shift++;
2371 		hc_port_no -= 4;
2372 	}
2373 
2374 	mask = 0x3 << shift;
2375 
2376 	/* clear EDMA errors on this port */
2377 	writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2378 
2379 	/* clear pending irq events */
2380 	hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2381 	hc_irq_cause &= ~(1 << hc_port_no);	/* clear CRPB-done */
2382 	hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2383 	writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2384 
2385 	/* enable assertion of portN err, done events */
2386 	tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2387 	writelfl(tmp | mask, mmio + HC_MAIN_IRQ_MASK_OFS);
2388 }
2389 
2390 /**
2391  *      mv_port_init - Perform some early initialization on a single port.
2392  *      @port: libata data structure storing shadow register addresses
2393  *      @port_mmio: base address of the port
2394  *
2395  *      Initialize shadow register mmio addresses, clear outstanding
2396  *      interrupts on the port, and unmask interrupts for the future
2397  *      start of the port.
2398  *
2399  *      LOCKING:
2400  *      Inherited from caller.
2401  */
2402 static void mv_port_init(struct ata_ioports *port,  void __iomem *port_mmio)
2403 {
2404 	void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
2405 	unsigned serr_ofs;
2406 
2407 	/* PIO related setup
2408 	 */
2409 	port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
2410 	port->error_addr =
2411 		port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2412 	port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2413 	port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2414 	port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2415 	port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2416 	port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
2417 	port->status_addr =
2418 		port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2419 	/* special case: control/altstatus doesn't have ATA_REG_ address */
2420 	port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2421 
2422 	/* unused: */
2423 	port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
2424 
2425 	/* Clear any currently outstanding port interrupt conditions */
2426 	serr_ofs = mv_scr_offset(SCR_ERROR);
2427 	writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2428 	writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2429 
2430 	/* unmask all EDMA error interrupts */
2431 	writelfl(~0, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
2432 
2433 	VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
2434 		readl(port_mmio + EDMA_CFG_OFS),
2435 		readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2436 		readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
2437 }
2438 
2439 static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
2440 {
2441 	struct pci_dev *pdev = to_pci_dev(host->dev);
2442 	struct mv_host_priv *hpriv = host->private_data;
2443 	u32 hp_flags = hpriv->hp_flags;
2444 
2445 	switch(board_idx) {
2446 	case chip_5080:
2447 		hpriv->ops = &mv5xxx_ops;
2448 		hp_flags |= MV_HP_GEN_I;
2449 
2450 		switch (pdev->revision) {
2451 		case 0x1:
2452 			hp_flags |= MV_HP_ERRATA_50XXB0;
2453 			break;
2454 		case 0x3:
2455 			hp_flags |= MV_HP_ERRATA_50XXB2;
2456 			break;
2457 		default:
2458 			dev_printk(KERN_WARNING, &pdev->dev,
2459 			   "Applying 50XXB2 workarounds to unknown rev\n");
2460 			hp_flags |= MV_HP_ERRATA_50XXB2;
2461 			break;
2462 		}
2463 		break;
2464 
2465 	case chip_504x:
2466 	case chip_508x:
2467 		hpriv->ops = &mv5xxx_ops;
2468 		hp_flags |= MV_HP_GEN_I;
2469 
2470 		switch (pdev->revision) {
2471 		case 0x0:
2472 			hp_flags |= MV_HP_ERRATA_50XXB0;
2473 			break;
2474 		case 0x3:
2475 			hp_flags |= MV_HP_ERRATA_50XXB2;
2476 			break;
2477 		default:
2478 			dev_printk(KERN_WARNING, &pdev->dev,
2479 			   "Applying B2 workarounds to unknown rev\n");
2480 			hp_flags |= MV_HP_ERRATA_50XXB2;
2481 			break;
2482 		}
2483 		break;
2484 
2485 	case chip_604x:
2486 	case chip_608x:
2487 		hpriv->ops = &mv6xxx_ops;
2488 		hp_flags |= MV_HP_GEN_II;
2489 
2490 		switch (pdev->revision) {
2491 		case 0x7:
2492 			hp_flags |= MV_HP_ERRATA_60X1B2;
2493 			break;
2494 		case 0x9:
2495 			hp_flags |= MV_HP_ERRATA_60X1C0;
2496 			break;
2497 		default:
2498 			dev_printk(KERN_WARNING, &pdev->dev,
2499 				   "Applying B2 workarounds to unknown rev\n");
2500 			hp_flags |= MV_HP_ERRATA_60X1B2;
2501 			break;
2502 		}
2503 		break;
2504 
2505 	case chip_7042:
2506 	case chip_6042:
2507 		hpriv->ops = &mv6xxx_ops;
2508 		hp_flags |= MV_HP_GEN_IIE;
2509 
2510 		switch (pdev->revision) {
2511 		case 0x0:
2512 			hp_flags |= MV_HP_ERRATA_XX42A0;
2513 			break;
2514 		case 0x1:
2515 			hp_flags |= MV_HP_ERRATA_60X1C0;
2516 			break;
2517 		default:
2518 			dev_printk(KERN_WARNING, &pdev->dev,
2519 			   "Applying 60X1C0 workarounds to unknown rev\n");
2520 			hp_flags |= MV_HP_ERRATA_60X1C0;
2521 			break;
2522 		}
2523 		break;
2524 
2525 	default:
2526 		printk(KERN_ERR DRV_NAME ": BUG: invalid board index %u\n", board_idx);
2527 		return 1;
2528 	}
2529 
2530 	hpriv->hp_flags = hp_flags;
2531 
2532 	return 0;
2533 }
2534 
2535 /**
2536  *      mv_init_host - Perform some early initialization of the host.
2537  *	@host: ATA host to initialize
2538  *      @board_idx: controller index
2539  *
2540  *      If possible, do an early global reset of the host.  Then do
2541  *      our port init and clear/unmask all/relevant host interrupts.
2542  *
2543  *      LOCKING:
2544  *      Inherited from caller.
2545  */
2546 static int mv_init_host(struct ata_host *host, unsigned int board_idx)
2547 {
2548 	int rc = 0, n_hc, port, hc;
2549 	struct pci_dev *pdev = to_pci_dev(host->dev);
2550 	void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
2551 	struct mv_host_priv *hpriv = host->private_data;
2552 
2553 	/* global interrupt mask */
2554 	writel(0, mmio + HC_MAIN_IRQ_MASK_OFS);
2555 
2556 	rc = mv_chip_id(host, board_idx);
2557 	if (rc)
2558 		goto done;
2559 
2560 	n_hc = mv_get_hc_count(host->ports[0]->flags);
2561 
2562 	for (port = 0; port < host->n_ports; port++)
2563 		hpriv->ops->read_preamp(hpriv, port, mmio);
2564 
2565 	rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
2566 	if (rc)
2567 		goto done;
2568 
2569 	hpriv->ops->reset_flash(hpriv, mmio);
2570 	hpriv->ops->reset_bus(pdev, mmio);
2571 	hpriv->ops->enable_leds(hpriv, mmio);
2572 
2573 	for (port = 0; port < host->n_ports; port++) {
2574 		if (IS_GEN_II(hpriv)) {
2575 			void __iomem *port_mmio = mv_port_base(mmio, port);
2576 
2577 			u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
2578 			ifctl |= (1 << 7);		/* enable gen2i speed */
2579 			ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2580 			writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2581 		}
2582 
2583 		hpriv->ops->phy_errata(hpriv, mmio, port);
2584 	}
2585 
2586 	for (port = 0; port < host->n_ports; port++) {
2587 		struct ata_port *ap = host->ports[port];
2588 		void __iomem *port_mmio = mv_port_base(mmio, port);
2589 		unsigned int offset = port_mmio - mmio;
2590 
2591 		mv_port_init(&ap->ioaddr, port_mmio);
2592 
2593 		ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
2594 		ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
2595 	}
2596 
2597 	for (hc = 0; hc < n_hc; hc++) {
2598 		void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2599 
2600 		VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2601 			"(before clear)=0x%08x\n", hc,
2602 			readl(hc_mmio + HC_CFG_OFS),
2603 			readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2604 
2605 		/* Clear any currently outstanding hc interrupt conditions */
2606 		writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
2607 	}
2608 
2609 	/* Clear any currently outstanding host interrupt conditions */
2610 	writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
2611 
2612 	/* and unmask interrupt generation for host regs */
2613 	writelfl(PCI_UNMASK_ALL_IRQS, mmio + PCI_IRQ_MASK_OFS);
2614 
2615 	if (IS_GEN_I(hpriv))
2616 		writelfl(~HC_MAIN_MASKED_IRQS_5, mmio + HC_MAIN_IRQ_MASK_OFS);
2617 	else
2618 		writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
2619 
2620 	VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2621 		"PCI int cause/mask=0x%08x/0x%08x\n",
2622 		readl(mmio + HC_MAIN_IRQ_CAUSE_OFS),
2623 		readl(mmio + HC_MAIN_IRQ_MASK_OFS),
2624 		readl(mmio + PCI_IRQ_CAUSE_OFS),
2625 		readl(mmio + PCI_IRQ_MASK_OFS));
2626 
2627 done:
2628 	return rc;
2629 }
2630 
2631 /**
2632  *      mv_print_info - Dump key info to kernel log for perusal.
2633  *      @host: ATA host to print info about
2634  *
2635  *      FIXME: complete this.
2636  *
2637  *      LOCKING:
2638  *      Inherited from caller.
2639  */
2640 static void mv_print_info(struct ata_host *host)
2641 {
2642 	struct pci_dev *pdev = to_pci_dev(host->dev);
2643 	struct mv_host_priv *hpriv = host->private_data;
2644 	u8 scc;
2645 	const char *scc_s, *gen;
2646 
2647 	/* Use this to determine the HW stepping of the chip so we know
2648 	 * what errata to workaround
2649 	 */
2650 	pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2651 	if (scc == 0)
2652 		scc_s = "SCSI";
2653 	else if (scc == 0x01)
2654 		scc_s = "RAID";
2655 	else
2656 		scc_s = "?";
2657 
2658 	if (IS_GEN_I(hpriv))
2659 		gen = "I";
2660 	else if (IS_GEN_II(hpriv))
2661 		gen = "II";
2662 	else if (IS_GEN_IIE(hpriv))
2663 		gen = "IIE";
2664 	else
2665 		gen = "?";
2666 
2667 	dev_printk(KERN_INFO, &pdev->dev,
2668 	       "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2669 	       gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
2670 	       scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2671 }
2672 
2673 /**
2674  *      mv_init_one - handle a positive probe of a Marvell host
2675  *      @pdev: PCI device found
2676  *      @ent: PCI device ID entry for the matched host
2677  *
2678  *      LOCKING:
2679  *      Inherited from caller.
2680  */
2681 static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2682 {
2683 	static int printed_version = 0;
2684 	unsigned int board_idx = (unsigned int)ent->driver_data;
2685 	const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
2686 	struct ata_host *host;
2687 	struct mv_host_priv *hpriv;
2688 	int n_ports, rc;
2689 
2690 	if (!printed_version++)
2691 		dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2692 
2693 	/* allocate host */
2694 	n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
2695 
2696 	host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2697 	hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2698 	if (!host || !hpriv)
2699 		return -ENOMEM;
2700 	host->private_data = hpriv;
2701 
2702 	/* acquire resources */
2703 	rc = pcim_enable_device(pdev);
2704 	if (rc)
2705 		return rc;
2706 
2707 	rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
2708 	if (rc == -EBUSY)
2709 		pcim_pin_device(pdev);
2710 	if (rc)
2711 		return rc;
2712 	host->iomap = pcim_iomap_table(pdev);
2713 
2714 	rc = pci_go_64(pdev);
2715 	if (rc)
2716 		return rc;
2717 
2718 	/* initialize adapter */
2719 	rc = mv_init_host(host, board_idx);
2720 	if (rc)
2721 		return rc;
2722 
2723 	/* Enable interrupts */
2724 	if (msi && pci_enable_msi(pdev))
2725 		pci_intx(pdev, 1);
2726 
2727 	mv_dump_pci_cfg(pdev, 0x68);
2728 	mv_print_info(host);
2729 
2730 	pci_set_master(pdev);
2731 	pci_try_set_mwi(pdev);
2732 	return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
2733 				 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
2734 }
2735 
2736 static int __init mv_init(void)
2737 {
2738 	return pci_register_driver(&mv_pci_driver);
2739 }
2740 
2741 static void __exit mv_exit(void)
2742 {
2743 	pci_unregister_driver(&mv_pci_driver);
2744 }
2745 
2746 MODULE_AUTHOR("Brett Russ");
2747 MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
2748 MODULE_LICENSE("GPL");
2749 MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
2750 MODULE_VERSION(DRV_VERSION);
2751 
2752 module_param(msi, int, 0444);
2753 MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
2754 
2755 module_init(mv_init);
2756 module_exit(mv_exit);
2757