xref: /linux/drivers/ata/sata_mv.c (revision cd354f1ae75e6466a7e31b727faede57a1f89ca5)
1 /*
2  * sata_mv.c - Marvell SATA support
3  *
4  * Copyright 2005: EMC Corporation, all rights reserved.
5  * Copyright 2005 Red Hat, Inc.  All rights reserved.
6  *
7  * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; version 2 of the License.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21  *
22  */
23 
24 #include <linux/kernel.h>
25 #include <linux/module.h>
26 #include <linux/pci.h>
27 #include <linux/init.h>
28 #include <linux/blkdev.h>
29 #include <linux/delay.h>
30 #include <linux/interrupt.h>
31 #include <linux/dma-mapping.h>
32 #include <linux/device.h>
33 #include <scsi/scsi_host.h>
34 #include <scsi/scsi_cmnd.h>
35 #include <linux/libata.h>
36 
37 #define DRV_NAME	"sata_mv"
38 #define DRV_VERSION	"0.7"
39 
40 enum {
41 	/* BAR's are enumerated in terms of pci_resource_start() terms */
42 	MV_PRIMARY_BAR		= 0,	/* offset 0x10: memory space */
43 	MV_IO_BAR		= 2,	/* offset 0x18: IO space */
44 	MV_MISC_BAR		= 3,	/* offset 0x1c: FLASH, NVRAM, SRAM */
45 
46 	MV_MAJOR_REG_AREA_SZ	= 0x10000,	/* 64KB */
47 	MV_MINOR_REG_AREA_SZ	= 0x2000,	/* 8KB */
48 
49 	MV_PCI_REG_BASE		= 0,
50 	MV_IRQ_COAL_REG_BASE	= 0x18000,	/* 6xxx part only */
51 	MV_IRQ_COAL_CAUSE		= (MV_IRQ_COAL_REG_BASE + 0x08),
52 	MV_IRQ_COAL_CAUSE_LO		= (MV_IRQ_COAL_REG_BASE + 0x88),
53 	MV_IRQ_COAL_CAUSE_HI		= (MV_IRQ_COAL_REG_BASE + 0x8c),
54 	MV_IRQ_COAL_THRESHOLD		= (MV_IRQ_COAL_REG_BASE + 0xcc),
55 	MV_IRQ_COAL_TIME_THRESHOLD	= (MV_IRQ_COAL_REG_BASE + 0xd0),
56 
57 	MV_SATAHC0_REG_BASE	= 0x20000,
58 	MV_FLASH_CTL		= 0x1046c,
59 	MV_GPIO_PORT_CTL	= 0x104f0,
60 	MV_RESET_CFG		= 0x180d8,
61 
62 	MV_PCI_REG_SZ		= MV_MAJOR_REG_AREA_SZ,
63 	MV_SATAHC_REG_SZ	= MV_MAJOR_REG_AREA_SZ,
64 	MV_SATAHC_ARBTR_REG_SZ	= MV_MINOR_REG_AREA_SZ,		/* arbiter */
65 	MV_PORT_REG_SZ		= MV_MINOR_REG_AREA_SZ,
66 
67 	MV_USE_Q_DEPTH		= ATA_DEF_QUEUE,
68 
69 	MV_MAX_Q_DEPTH		= 32,
70 	MV_MAX_Q_DEPTH_MASK	= MV_MAX_Q_DEPTH - 1,
71 
72 	/* CRQB needs alignment on a 1KB boundary. Size == 1KB
73 	 * CRPB needs alignment on a 256B boundary. Size == 256B
74 	 * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB
75 	 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
76 	 */
77 	MV_CRQB_Q_SZ		= (32 * MV_MAX_Q_DEPTH),
78 	MV_CRPB_Q_SZ		= (8 * MV_MAX_Q_DEPTH),
79 	MV_MAX_SG_CT		= 176,
80 	MV_SG_TBL_SZ		= (16 * MV_MAX_SG_CT),
81 	MV_PORT_PRIV_DMA_SZ	= (MV_CRQB_Q_SZ + MV_CRPB_Q_SZ + MV_SG_TBL_SZ),
82 
83 	MV_PORTS_PER_HC		= 4,
84 	/* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
85 	MV_PORT_HC_SHIFT	= 2,
86 	/* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
87 	MV_PORT_MASK		= 3,
88 
89 	/* Host Flags */
90 	MV_FLAG_DUAL_HC		= (1 << 30),  /* two SATA Host Controllers */
91 	MV_FLAG_IRQ_COALESCE	= (1 << 29),  /* IRQ coalescing capability */
92 	MV_COMMON_FLAGS		= (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
93 				   ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO |
94 				   ATA_FLAG_NO_ATAPI | ATA_FLAG_PIO_POLLING),
95 	MV_6XXX_FLAGS		= MV_FLAG_IRQ_COALESCE,
96 
97 	CRQB_FLAG_READ		= (1 << 0),
98 	CRQB_TAG_SHIFT		= 1,
99 	CRQB_CMD_ADDR_SHIFT	= 8,
100 	CRQB_CMD_CS		= (0x2 << 11),
101 	CRQB_CMD_LAST		= (1 << 15),
102 
103 	CRPB_FLAG_STATUS_SHIFT	= 8,
104 
105 	EPRD_FLAG_END_OF_TBL	= (1 << 31),
106 
107 	/* PCI interface registers */
108 
109 	PCI_COMMAND_OFS		= 0xc00,
110 
111 	PCI_MAIN_CMD_STS_OFS	= 0xd30,
112 	STOP_PCI_MASTER		= (1 << 2),
113 	PCI_MASTER_EMPTY	= (1 << 3),
114 	GLOB_SFT_RST		= (1 << 4),
115 
116 	MV_PCI_MODE		= 0xd00,
117 	MV_PCI_EXP_ROM_BAR_CTL	= 0xd2c,
118 	MV_PCI_DISC_TIMER	= 0xd04,
119 	MV_PCI_MSI_TRIGGER	= 0xc38,
120 	MV_PCI_SERR_MASK	= 0xc28,
121 	MV_PCI_XBAR_TMOUT	= 0x1d04,
122 	MV_PCI_ERR_LOW_ADDRESS	= 0x1d40,
123 	MV_PCI_ERR_HIGH_ADDRESS	= 0x1d44,
124 	MV_PCI_ERR_ATTRIBUTE	= 0x1d48,
125 	MV_PCI_ERR_COMMAND	= 0x1d50,
126 
127 	PCI_IRQ_CAUSE_OFS		= 0x1d58,
128 	PCI_IRQ_MASK_OFS		= 0x1d5c,
129 	PCI_UNMASK_ALL_IRQS	= 0x7fffff,	/* bits 22-0 */
130 
131 	HC_MAIN_IRQ_CAUSE_OFS	= 0x1d60,
132 	HC_MAIN_IRQ_MASK_OFS	= 0x1d64,
133 	PORT0_ERR		= (1 << 0),	/* shift by port # */
134 	PORT0_DONE		= (1 << 1),	/* shift by port # */
135 	HC0_IRQ_PEND		= 0x1ff,	/* bits 0-8 = HC0's ports */
136 	HC_SHIFT		= 9,		/* bits 9-17 = HC1's ports */
137 	PCI_ERR			= (1 << 18),
138 	TRAN_LO_DONE		= (1 << 19),	/* 6xxx: IRQ coalescing */
139 	TRAN_HI_DONE		= (1 << 20),	/* 6xxx: IRQ coalescing */
140 	PORTS_0_7_COAL_DONE	= (1 << 21),	/* 6xxx: IRQ coalescing */
141 	GPIO_INT		= (1 << 22),
142 	SELF_INT		= (1 << 23),
143 	TWSI_INT		= (1 << 24),
144 	HC_MAIN_RSVD		= (0x7f << 25),	/* bits 31-25 */
145 	HC_MAIN_MASKED_IRQS	= (TRAN_LO_DONE | TRAN_HI_DONE |
146 				   PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
147 				   HC_MAIN_RSVD),
148 
149 	/* SATAHC registers */
150 	HC_CFG_OFS		= 0,
151 
152 	HC_IRQ_CAUSE_OFS	= 0x14,
153 	CRPB_DMA_DONE		= (1 << 0),	/* shift by port # */
154 	HC_IRQ_COAL		= (1 << 4),	/* IRQ coalescing */
155 	DEV_IRQ			= (1 << 8),	/* shift by port # */
156 
157 	/* Shadow block registers */
158 	SHD_BLK_OFS		= 0x100,
159 	SHD_CTL_AST_OFS		= 0x20,		/* ofs from SHD_BLK_OFS */
160 
161 	/* SATA registers */
162 	SATA_STATUS_OFS		= 0x300,  /* ctrl, err regs follow status */
163 	SATA_ACTIVE_OFS		= 0x350,
164 	PHY_MODE3		= 0x310,
165 	PHY_MODE4		= 0x314,
166 	PHY_MODE2		= 0x330,
167 	MV5_PHY_MODE		= 0x74,
168 	MV5_LT_MODE		= 0x30,
169 	MV5_PHY_CTL		= 0x0C,
170 	SATA_INTERFACE_CTL	= 0x050,
171 
172 	MV_M2_PREAMP_MASK	= 0x7e0,
173 
174 	/* Port registers */
175 	EDMA_CFG_OFS		= 0,
176 	EDMA_CFG_Q_DEPTH	= 0,			/* queueing disabled */
177 	EDMA_CFG_NCQ		= (1 << 5),
178 	EDMA_CFG_NCQ_GO_ON_ERR	= (1 << 14),		/* continue on error */
179 	EDMA_CFG_RD_BRST_EXT	= (1 << 11),		/* read burst 512B */
180 	EDMA_CFG_WR_BUFF_LEN	= (1 << 13),		/* write buffer 512B */
181 
182 	EDMA_ERR_IRQ_CAUSE_OFS	= 0x8,
183 	EDMA_ERR_IRQ_MASK_OFS	= 0xc,
184 	EDMA_ERR_D_PAR		= (1 << 0),
185 	EDMA_ERR_PRD_PAR	= (1 << 1),
186 	EDMA_ERR_DEV		= (1 << 2),
187 	EDMA_ERR_DEV_DCON	= (1 << 3),
188 	EDMA_ERR_DEV_CON	= (1 << 4),
189 	EDMA_ERR_SERR		= (1 << 5),
190 	EDMA_ERR_SELF_DIS	= (1 << 7),
191 	EDMA_ERR_BIST_ASYNC	= (1 << 8),
192 	EDMA_ERR_CRBQ_PAR	= (1 << 9),
193 	EDMA_ERR_CRPB_PAR	= (1 << 10),
194 	EDMA_ERR_INTRL_PAR	= (1 << 11),
195 	EDMA_ERR_IORDY		= (1 << 12),
196 	EDMA_ERR_LNK_CTRL_RX	= (0xf << 13),
197 	EDMA_ERR_LNK_CTRL_RX_2	= (1 << 15),
198 	EDMA_ERR_LNK_DATA_RX	= (0xf << 17),
199 	EDMA_ERR_LNK_CTRL_TX	= (0x1f << 21),
200 	EDMA_ERR_LNK_DATA_TX	= (0x1f << 26),
201 	EDMA_ERR_TRANS_PROTO	= (1 << 31),
202 	EDMA_ERR_FATAL		= (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
203 				   EDMA_ERR_DEV_DCON | EDMA_ERR_CRBQ_PAR |
204 				   EDMA_ERR_CRPB_PAR | EDMA_ERR_INTRL_PAR |
205 				   EDMA_ERR_IORDY | EDMA_ERR_LNK_CTRL_RX_2 |
206 				   EDMA_ERR_LNK_DATA_RX |
207 				   EDMA_ERR_LNK_DATA_TX |
208 				   EDMA_ERR_TRANS_PROTO),
209 
210 	EDMA_REQ_Q_BASE_HI_OFS	= 0x10,
211 	EDMA_REQ_Q_IN_PTR_OFS	= 0x14,		/* also contains BASE_LO */
212 
213 	EDMA_REQ_Q_OUT_PTR_OFS	= 0x18,
214 	EDMA_REQ_Q_PTR_SHIFT	= 5,
215 
216 	EDMA_RSP_Q_BASE_HI_OFS	= 0x1c,
217 	EDMA_RSP_Q_IN_PTR_OFS	= 0x20,
218 	EDMA_RSP_Q_OUT_PTR_OFS	= 0x24,		/* also contains BASE_LO */
219 	EDMA_RSP_Q_PTR_SHIFT	= 3,
220 
221 	EDMA_CMD_OFS		= 0x28,
222 	EDMA_EN			= (1 << 0),
223 	EDMA_DS			= (1 << 1),
224 	ATA_RST			= (1 << 2),
225 
226 	EDMA_IORDY_TMOUT	= 0x34,
227 	EDMA_ARB_CFG		= 0x38,
228 
229 	/* Host private flags (hp_flags) */
230 	MV_HP_FLAG_MSI		= (1 << 0),
231 	MV_HP_ERRATA_50XXB0	= (1 << 1),
232 	MV_HP_ERRATA_50XXB2	= (1 << 2),
233 	MV_HP_ERRATA_60X1B2	= (1 << 3),
234 	MV_HP_ERRATA_60X1C0	= (1 << 4),
235 	MV_HP_ERRATA_XX42A0	= (1 << 5),
236 	MV_HP_50XX		= (1 << 6),
237 	MV_HP_GEN_IIE		= (1 << 7),
238 
239 	/* Port private flags (pp_flags) */
240 	MV_PP_FLAG_EDMA_EN	= (1 << 0),
241 	MV_PP_FLAG_EDMA_DS_ACT	= (1 << 1),
242 };
243 
244 #define IS_50XX(hpriv) ((hpriv)->hp_flags & MV_HP_50XX)
245 #define IS_60XX(hpriv) (((hpriv)->hp_flags & MV_HP_50XX) == 0)
246 #define IS_GEN_I(hpriv) IS_50XX(hpriv)
247 #define IS_GEN_II(hpriv) IS_60XX(hpriv)
248 #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
249 
250 enum {
251 	/* Our DMA boundary is determined by an ePRD being unable to handle
252 	 * anything larger than 64KB
253 	 */
254 	MV_DMA_BOUNDARY		= 0xffffU,
255 
256 	EDMA_REQ_Q_BASE_LO_MASK	= 0xfffffc00U,
257 
258 	EDMA_RSP_Q_BASE_LO_MASK	= 0xffffff00U,
259 };
260 
261 enum chip_type {
262 	chip_504x,
263 	chip_508x,
264 	chip_5080,
265 	chip_604x,
266 	chip_608x,
267 	chip_6042,
268 	chip_7042,
269 };
270 
271 /* Command ReQuest Block: 32B */
272 struct mv_crqb {
273 	__le32			sg_addr;
274 	__le32			sg_addr_hi;
275 	__le16			ctrl_flags;
276 	__le16			ata_cmd[11];
277 };
278 
279 struct mv_crqb_iie {
280 	__le32			addr;
281 	__le32			addr_hi;
282 	__le32			flags;
283 	__le32			len;
284 	__le32			ata_cmd[4];
285 };
286 
287 /* Command ResPonse Block: 8B */
288 struct mv_crpb {
289 	__le16			id;
290 	__le16			flags;
291 	__le32			tmstmp;
292 };
293 
294 /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
295 struct mv_sg {
296 	__le32			addr;
297 	__le32			flags_size;
298 	__le32			addr_hi;
299 	__le32			reserved;
300 };
301 
302 struct mv_port_priv {
303 	struct mv_crqb		*crqb;
304 	dma_addr_t		crqb_dma;
305 	struct mv_crpb		*crpb;
306 	dma_addr_t		crpb_dma;
307 	struct mv_sg		*sg_tbl;
308 	dma_addr_t		sg_tbl_dma;
309 	u32			pp_flags;
310 };
311 
312 struct mv_port_signal {
313 	u32			amps;
314 	u32			pre;
315 };
316 
317 struct mv_host_priv;
318 struct mv_hw_ops {
319 	void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
320 			   unsigned int port);
321 	void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
322 	void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
323 			   void __iomem *mmio);
324 	int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
325 			unsigned int n_hc);
326 	void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
327 	void (*reset_bus)(struct pci_dev *pdev, void __iomem *mmio);
328 };
329 
330 struct mv_host_priv {
331 	u32			hp_flags;
332 	struct mv_port_signal	signal[8];
333 	const struct mv_hw_ops	*ops;
334 };
335 
336 static void mv_irq_clear(struct ata_port *ap);
337 static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in);
338 static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
339 static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in);
340 static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
341 static void mv_phy_reset(struct ata_port *ap);
342 static void __mv_phy_reset(struct ata_port *ap, int can_sleep);
343 static int mv_port_start(struct ata_port *ap);
344 static void mv_port_stop(struct ata_port *ap);
345 static void mv_qc_prep(struct ata_queued_cmd *qc);
346 static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
347 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
348 static irqreturn_t mv_interrupt(int irq, void *dev_instance);
349 static void mv_eng_timeout(struct ata_port *ap);
350 static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
351 
352 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
353 			   unsigned int port);
354 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
355 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
356 			   void __iomem *mmio);
357 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
358 			unsigned int n_hc);
359 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
360 static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio);
361 
362 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
363 			   unsigned int port);
364 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
365 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
366 			   void __iomem *mmio);
367 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
368 			unsigned int n_hc);
369 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
370 static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio);
371 static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
372 			     unsigned int port_no);
373 static void mv_stop_and_reset(struct ata_port *ap);
374 
375 static struct scsi_host_template mv_sht = {
376 	.module			= THIS_MODULE,
377 	.name			= DRV_NAME,
378 	.ioctl			= ata_scsi_ioctl,
379 	.queuecommand		= ata_scsi_queuecmd,
380 	.can_queue		= MV_USE_Q_DEPTH,
381 	.this_id		= ATA_SHT_THIS_ID,
382 	.sg_tablesize		= MV_MAX_SG_CT / 2,
383 	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
384 	.emulated		= ATA_SHT_EMULATED,
385 	.use_clustering		= ATA_SHT_USE_CLUSTERING,
386 	.proc_name		= DRV_NAME,
387 	.dma_boundary		= MV_DMA_BOUNDARY,
388 	.slave_configure	= ata_scsi_slave_config,
389 	.slave_destroy		= ata_scsi_slave_destroy,
390 	.bios_param		= ata_std_bios_param,
391 };
392 
393 static const struct ata_port_operations mv5_ops = {
394 	.port_disable		= ata_port_disable,
395 
396 	.tf_load		= ata_tf_load,
397 	.tf_read		= ata_tf_read,
398 	.check_status		= ata_check_status,
399 	.exec_command		= ata_exec_command,
400 	.dev_select		= ata_std_dev_select,
401 
402 	.phy_reset		= mv_phy_reset,
403 
404 	.qc_prep		= mv_qc_prep,
405 	.qc_issue		= mv_qc_issue,
406 	.data_xfer		= ata_data_xfer,
407 
408 	.eng_timeout		= mv_eng_timeout,
409 
410 	.irq_handler		= mv_interrupt,
411 	.irq_clear		= mv_irq_clear,
412 	.irq_on			= ata_irq_on,
413 	.irq_ack		= ata_irq_ack,
414 
415 	.scr_read		= mv5_scr_read,
416 	.scr_write		= mv5_scr_write,
417 
418 	.port_start		= mv_port_start,
419 	.port_stop		= mv_port_stop,
420 };
421 
422 static const struct ata_port_operations mv6_ops = {
423 	.port_disable		= ata_port_disable,
424 
425 	.tf_load		= ata_tf_load,
426 	.tf_read		= ata_tf_read,
427 	.check_status		= ata_check_status,
428 	.exec_command		= ata_exec_command,
429 	.dev_select		= ata_std_dev_select,
430 
431 	.phy_reset		= mv_phy_reset,
432 
433 	.qc_prep		= mv_qc_prep,
434 	.qc_issue		= mv_qc_issue,
435 	.data_xfer		= ata_data_xfer,
436 
437 	.eng_timeout		= mv_eng_timeout,
438 
439 	.irq_handler		= mv_interrupt,
440 	.irq_clear		= mv_irq_clear,
441 	.irq_on			= ata_irq_on,
442 	.irq_ack		= ata_irq_ack,
443 
444 	.scr_read		= mv_scr_read,
445 	.scr_write		= mv_scr_write,
446 
447 	.port_start		= mv_port_start,
448 	.port_stop		= mv_port_stop,
449 };
450 
451 static const struct ata_port_operations mv_iie_ops = {
452 	.port_disable		= ata_port_disable,
453 
454 	.tf_load		= ata_tf_load,
455 	.tf_read		= ata_tf_read,
456 	.check_status		= ata_check_status,
457 	.exec_command		= ata_exec_command,
458 	.dev_select		= ata_std_dev_select,
459 
460 	.phy_reset		= mv_phy_reset,
461 
462 	.qc_prep		= mv_qc_prep_iie,
463 	.qc_issue		= mv_qc_issue,
464 	.data_xfer		= ata_data_xfer,
465 
466 	.eng_timeout		= mv_eng_timeout,
467 
468 	.irq_handler		= mv_interrupt,
469 	.irq_clear		= mv_irq_clear,
470 	.irq_on			= ata_irq_on,
471 	.irq_ack		= ata_irq_ack,
472 
473 	.scr_read		= mv_scr_read,
474 	.scr_write		= mv_scr_write,
475 
476 	.port_start		= mv_port_start,
477 	.port_stop		= mv_port_stop,
478 };
479 
480 static const struct ata_port_info mv_port_info[] = {
481 	{  /* chip_504x */
482 		.sht		= &mv_sht,
483 		.flags		= MV_COMMON_FLAGS,
484 		.pio_mask	= 0x1f,	/* pio0-4 */
485 		.udma_mask	= 0x7f,	/* udma0-6 */
486 		.port_ops	= &mv5_ops,
487 	},
488 	{  /* chip_508x */
489 		.sht		= &mv_sht,
490 		.flags		= (MV_COMMON_FLAGS | MV_FLAG_DUAL_HC),
491 		.pio_mask	= 0x1f,	/* pio0-4 */
492 		.udma_mask	= 0x7f,	/* udma0-6 */
493 		.port_ops	= &mv5_ops,
494 	},
495 	{  /* chip_5080 */
496 		.sht		= &mv_sht,
497 		.flags		= (MV_COMMON_FLAGS | MV_FLAG_DUAL_HC),
498 		.pio_mask	= 0x1f,	/* pio0-4 */
499 		.udma_mask	= 0x7f,	/* udma0-6 */
500 		.port_ops	= &mv5_ops,
501 	},
502 	{  /* chip_604x */
503 		.sht		= &mv_sht,
504 		.flags		= (MV_COMMON_FLAGS | MV_6XXX_FLAGS),
505 		.pio_mask	= 0x1f,	/* pio0-4 */
506 		.udma_mask	= 0x7f,	/* udma0-6 */
507 		.port_ops	= &mv6_ops,
508 	},
509 	{  /* chip_608x */
510 		.sht		= &mv_sht,
511 		.flags		= (MV_COMMON_FLAGS | MV_6XXX_FLAGS |
512 				   MV_FLAG_DUAL_HC),
513 		.pio_mask	= 0x1f,	/* pio0-4 */
514 		.udma_mask	= 0x7f,	/* udma0-6 */
515 		.port_ops	= &mv6_ops,
516 	},
517 	{  /* chip_6042 */
518 		.sht		= &mv_sht,
519 		.flags		= (MV_COMMON_FLAGS | MV_6XXX_FLAGS),
520 		.pio_mask	= 0x1f,	/* pio0-4 */
521 		.udma_mask	= 0x7f,	/* udma0-6 */
522 		.port_ops	= &mv_iie_ops,
523 	},
524 	{  /* chip_7042 */
525 		.sht		= &mv_sht,
526 		.flags		= (MV_COMMON_FLAGS | MV_6XXX_FLAGS),
527 		.pio_mask	= 0x1f,	/* pio0-4 */
528 		.udma_mask	= 0x7f,	/* udma0-6 */
529 		.port_ops	= &mv_iie_ops,
530 	},
531 };
532 
533 static const struct pci_device_id mv_pci_tbl[] = {
534 	{ PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
535 	{ PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
536 	{ PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
537 	{ PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
538 
539 	{ PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
540 	{ PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
541 	{ PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
542 	{ PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
543 	{ PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
544 
545 	{ PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
546 
547 	{ PCI_VDEVICE(TTI, 0x2310), chip_7042 },
548 
549 	{ }			/* terminate list */
550 };
551 
552 static struct pci_driver mv_pci_driver = {
553 	.name			= DRV_NAME,
554 	.id_table		= mv_pci_tbl,
555 	.probe			= mv_init_one,
556 	.remove			= ata_pci_remove_one,
557 };
558 
559 static const struct mv_hw_ops mv5xxx_ops = {
560 	.phy_errata		= mv5_phy_errata,
561 	.enable_leds		= mv5_enable_leds,
562 	.read_preamp		= mv5_read_preamp,
563 	.reset_hc		= mv5_reset_hc,
564 	.reset_flash		= mv5_reset_flash,
565 	.reset_bus		= mv5_reset_bus,
566 };
567 
568 static const struct mv_hw_ops mv6xxx_ops = {
569 	.phy_errata		= mv6_phy_errata,
570 	.enable_leds		= mv6_enable_leds,
571 	.read_preamp		= mv6_read_preamp,
572 	.reset_hc		= mv6_reset_hc,
573 	.reset_flash		= mv6_reset_flash,
574 	.reset_bus		= mv_reset_pci_bus,
575 };
576 
577 /*
578  * module options
579  */
580 static int msi;	      /* Use PCI msi; either zero (off, default) or non-zero */
581 
582 
583 /*
584  * Functions
585  */
586 
587 static inline void writelfl(unsigned long data, void __iomem *addr)
588 {
589 	writel(data, addr);
590 	(void) readl(addr);	/* flush to avoid PCI posted write */
591 }
592 
593 static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
594 {
595 	return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
596 }
597 
598 static inline unsigned int mv_hc_from_port(unsigned int port)
599 {
600 	return port >> MV_PORT_HC_SHIFT;
601 }
602 
603 static inline unsigned int mv_hardport_from_port(unsigned int port)
604 {
605 	return port & MV_PORT_MASK;
606 }
607 
608 static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
609 						 unsigned int port)
610 {
611 	return mv_hc_base(base, mv_hc_from_port(port));
612 }
613 
614 static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
615 {
616 	return  mv_hc_base_from_port(base, port) +
617 		MV_SATAHC_ARBTR_REG_SZ +
618 		(mv_hardport_from_port(port) * MV_PORT_REG_SZ);
619 }
620 
621 static inline void __iomem *mv_ap_base(struct ata_port *ap)
622 {
623 	return mv_port_base(ap->host->iomap[MV_PRIMARY_BAR], ap->port_no);
624 }
625 
626 static inline int mv_get_hc_count(unsigned long port_flags)
627 {
628 	return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
629 }
630 
631 static void mv_irq_clear(struct ata_port *ap)
632 {
633 }
634 
635 /**
636  *      mv_start_dma - Enable eDMA engine
637  *      @base: port base address
638  *      @pp: port private data
639  *
640  *      Verify the local cache of the eDMA state is accurate with a
641  *      WARN_ON.
642  *
643  *      LOCKING:
644  *      Inherited from caller.
645  */
646 static void mv_start_dma(void __iomem *base, struct mv_port_priv *pp)
647 {
648 	if (!(MV_PP_FLAG_EDMA_EN & pp->pp_flags)) {
649 		writelfl(EDMA_EN, base + EDMA_CMD_OFS);
650 		pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
651 	}
652 	WARN_ON(!(EDMA_EN & readl(base + EDMA_CMD_OFS)));
653 }
654 
655 /**
656  *      mv_stop_dma - Disable eDMA engine
657  *      @ap: ATA channel to manipulate
658  *
659  *      Verify the local cache of the eDMA state is accurate with a
660  *      WARN_ON.
661  *
662  *      LOCKING:
663  *      Inherited from caller.
664  */
665 static void mv_stop_dma(struct ata_port *ap)
666 {
667 	void __iomem *port_mmio = mv_ap_base(ap);
668 	struct mv_port_priv *pp	= ap->private_data;
669 	u32 reg;
670 	int i;
671 
672 	if (MV_PP_FLAG_EDMA_EN & pp->pp_flags) {
673 		/* Disable EDMA if active.   The disable bit auto clears.
674 		 */
675 		writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
676 		pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
677 	} else {
678 		WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
679   	}
680 
681 	/* now properly wait for the eDMA to stop */
682 	for (i = 1000; i > 0; i--) {
683 		reg = readl(port_mmio + EDMA_CMD_OFS);
684 		if (!(EDMA_EN & reg)) {
685 			break;
686 		}
687 		udelay(100);
688 	}
689 
690 	if (EDMA_EN & reg) {
691 		ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
692 		/* FIXME: Consider doing a reset here to recover */
693 	}
694 }
695 
696 #ifdef ATA_DEBUG
697 static void mv_dump_mem(void __iomem *start, unsigned bytes)
698 {
699 	int b, w;
700 	for (b = 0; b < bytes; ) {
701 		DPRINTK("%p: ", start + b);
702 		for (w = 0; b < bytes && w < 4; w++) {
703 			printk("%08x ",readl(start + b));
704 			b += sizeof(u32);
705 		}
706 		printk("\n");
707 	}
708 }
709 #endif
710 
711 static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
712 {
713 #ifdef ATA_DEBUG
714 	int b, w;
715 	u32 dw;
716 	for (b = 0; b < bytes; ) {
717 		DPRINTK("%02x: ", b);
718 		for (w = 0; b < bytes && w < 4; w++) {
719 			(void) pci_read_config_dword(pdev,b,&dw);
720 			printk("%08x ",dw);
721 			b += sizeof(u32);
722 		}
723 		printk("\n");
724 	}
725 #endif
726 }
727 static void mv_dump_all_regs(void __iomem *mmio_base, int port,
728 			     struct pci_dev *pdev)
729 {
730 #ifdef ATA_DEBUG
731 	void __iomem *hc_base = mv_hc_base(mmio_base,
732 					   port >> MV_PORT_HC_SHIFT);
733 	void __iomem *port_base;
734 	int start_port, num_ports, p, start_hc, num_hcs, hc;
735 
736 	if (0 > port) {
737 		start_hc = start_port = 0;
738 		num_ports = 8;		/* shld be benign for 4 port devs */
739 		num_hcs = 2;
740 	} else {
741 		start_hc = port >> MV_PORT_HC_SHIFT;
742 		start_port = port;
743 		num_ports = num_hcs = 1;
744 	}
745 	DPRINTK("All registers for port(s) %u-%u:\n", start_port,
746 		num_ports > 1 ? num_ports - 1 : start_port);
747 
748 	if (NULL != pdev) {
749 		DPRINTK("PCI config space regs:\n");
750 		mv_dump_pci_cfg(pdev, 0x68);
751 	}
752 	DPRINTK("PCI regs:\n");
753 	mv_dump_mem(mmio_base+0xc00, 0x3c);
754 	mv_dump_mem(mmio_base+0xd00, 0x34);
755 	mv_dump_mem(mmio_base+0xf00, 0x4);
756 	mv_dump_mem(mmio_base+0x1d00, 0x6c);
757 	for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
758 		hc_base = mv_hc_base(mmio_base, hc);
759 		DPRINTK("HC regs (HC %i):\n", hc);
760 		mv_dump_mem(hc_base, 0x1c);
761 	}
762 	for (p = start_port; p < start_port + num_ports; p++) {
763 		port_base = mv_port_base(mmio_base, p);
764 		DPRINTK("EDMA regs (port %i):\n",p);
765 		mv_dump_mem(port_base, 0x54);
766 		DPRINTK("SATA regs (port %i):\n",p);
767 		mv_dump_mem(port_base+0x300, 0x60);
768 	}
769 #endif
770 }
771 
772 static unsigned int mv_scr_offset(unsigned int sc_reg_in)
773 {
774 	unsigned int ofs;
775 
776 	switch (sc_reg_in) {
777 	case SCR_STATUS:
778 	case SCR_CONTROL:
779 	case SCR_ERROR:
780 		ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
781 		break;
782 	case SCR_ACTIVE:
783 		ofs = SATA_ACTIVE_OFS;   /* active is not with the others */
784 		break;
785 	default:
786 		ofs = 0xffffffffU;
787 		break;
788 	}
789 	return ofs;
790 }
791 
792 static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in)
793 {
794 	unsigned int ofs = mv_scr_offset(sc_reg_in);
795 
796 	if (0xffffffffU != ofs) {
797 		return readl(mv_ap_base(ap) + ofs);
798 	} else {
799 		return (u32) ofs;
800 	}
801 }
802 
803 static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
804 {
805 	unsigned int ofs = mv_scr_offset(sc_reg_in);
806 
807 	if (0xffffffffU != ofs) {
808 		writelfl(val, mv_ap_base(ap) + ofs);
809 	}
810 }
811 
812 static void mv_edma_cfg(struct mv_host_priv *hpriv, void __iomem *port_mmio)
813 {
814 	u32 cfg = readl(port_mmio + EDMA_CFG_OFS);
815 
816 	/* set up non-NCQ EDMA configuration */
817 	cfg &= ~0x1f;		/* clear queue depth */
818 	cfg &= ~EDMA_CFG_NCQ;	/* clear NCQ mode */
819 	cfg &= ~(1 << 9);	/* disable equeue */
820 
821 	if (IS_GEN_I(hpriv))
822 		cfg |= (1 << 8);	/* enab config burst size mask */
823 
824 	else if (IS_GEN_II(hpriv))
825 		cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
826 
827 	else if (IS_GEN_IIE(hpriv)) {
828 		cfg |= (1 << 23);	/* dis RX PM port mask */
829 		cfg &= ~(1 << 16);	/* dis FIS-based switching (for now) */
830 		cfg &= ~(1 << 19);	/* dis 128-entry queue (for now?) */
831 		cfg |= (1 << 18);	/* enab early completion */
832 		cfg |= (1 << 17);	/* enab host q cache */
833 		cfg |= (1 << 22);	/* enab cutthrough */
834 	}
835 
836 	writelfl(cfg, port_mmio + EDMA_CFG_OFS);
837 }
838 
839 /**
840  *      mv_port_start - Port specific init/start routine.
841  *      @ap: ATA channel to manipulate
842  *
843  *      Allocate and point to DMA memory, init port private memory,
844  *      zero indices.
845  *
846  *      LOCKING:
847  *      Inherited from caller.
848  */
849 static int mv_port_start(struct ata_port *ap)
850 {
851 	struct device *dev = ap->host->dev;
852 	struct mv_host_priv *hpriv = ap->host->private_data;
853 	struct mv_port_priv *pp;
854 	void __iomem *port_mmio = mv_ap_base(ap);
855 	void *mem;
856 	dma_addr_t mem_dma;
857 	int rc;
858 
859 	pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
860 	if (!pp)
861 		return -ENOMEM;
862 
863 	mem = dmam_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma,
864 				  GFP_KERNEL);
865 	if (!mem)
866 		return -ENOMEM;
867 	memset(mem, 0, MV_PORT_PRIV_DMA_SZ);
868 
869 	rc = ata_pad_alloc(ap, dev);
870 	if (rc)
871 		return rc;
872 
873 	/* First item in chunk of DMA memory:
874 	 * 32-slot command request table (CRQB), 32 bytes each in size
875 	 */
876 	pp->crqb = mem;
877 	pp->crqb_dma = mem_dma;
878 	mem += MV_CRQB_Q_SZ;
879 	mem_dma += MV_CRQB_Q_SZ;
880 
881 	/* Second item:
882 	 * 32-slot command response table (CRPB), 8 bytes each in size
883 	 */
884 	pp->crpb = mem;
885 	pp->crpb_dma = mem_dma;
886 	mem += MV_CRPB_Q_SZ;
887 	mem_dma += MV_CRPB_Q_SZ;
888 
889 	/* Third item:
890 	 * Table of scatter-gather descriptors (ePRD), 16 bytes each
891 	 */
892 	pp->sg_tbl = mem;
893 	pp->sg_tbl_dma = mem_dma;
894 
895 	mv_edma_cfg(hpriv, port_mmio);
896 
897 	writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
898 	writelfl(pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK,
899 		 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
900 
901 	if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
902 		writelfl(pp->crqb_dma & 0xffffffff,
903 			 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
904 	else
905 		writelfl(0, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
906 
907 	writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
908 
909 	if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
910 		writelfl(pp->crpb_dma & 0xffffffff,
911 			 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
912 	else
913 		writelfl(0, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
914 
915 	writelfl(pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK,
916 		 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
917 
918 	/* Don't turn on EDMA here...do it before DMA commands only.  Else
919 	 * we'll be unable to send non-data, PIO, etc due to restricted access
920 	 * to shadow regs.
921 	 */
922 	ap->private_data = pp;
923 	return 0;
924 }
925 
926 /**
927  *      mv_port_stop - Port specific cleanup/stop routine.
928  *      @ap: ATA channel to manipulate
929  *
930  *      Stop DMA, cleanup port memory.
931  *
932  *      LOCKING:
933  *      This routine uses the host lock to protect the DMA stop.
934  */
935 static void mv_port_stop(struct ata_port *ap)
936 {
937 	unsigned long flags;
938 
939 	spin_lock_irqsave(&ap->host->lock, flags);
940 	mv_stop_dma(ap);
941 	spin_unlock_irqrestore(&ap->host->lock, flags);
942 }
943 
944 /**
945  *      mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
946  *      @qc: queued command whose SG list to source from
947  *
948  *      Populate the SG list and mark the last entry.
949  *
950  *      LOCKING:
951  *      Inherited from caller.
952  */
953 static void mv_fill_sg(struct ata_queued_cmd *qc)
954 {
955 	struct mv_port_priv *pp = qc->ap->private_data;
956 	unsigned int i = 0;
957 	struct scatterlist *sg;
958 
959 	ata_for_each_sg(sg, qc) {
960 		dma_addr_t addr;
961 		u32 sg_len, len, offset;
962 
963 		addr = sg_dma_address(sg);
964 		sg_len = sg_dma_len(sg);
965 
966 		while (sg_len) {
967 			offset = addr & MV_DMA_BOUNDARY;
968 			len = sg_len;
969 			if ((offset + sg_len) > 0x10000)
970 				len = 0x10000 - offset;
971 
972 			pp->sg_tbl[i].addr = cpu_to_le32(addr & 0xffffffff);
973 			pp->sg_tbl[i].addr_hi = cpu_to_le32((addr >> 16) >> 16);
974 			pp->sg_tbl[i].flags_size = cpu_to_le32(len & 0xffff);
975 
976 			sg_len -= len;
977 			addr += len;
978 
979 			if (!sg_len && ata_sg_is_last(sg, qc))
980 				pp->sg_tbl[i].flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
981 
982 			i++;
983 		}
984 	}
985 }
986 
987 static inline unsigned mv_inc_q_index(unsigned index)
988 {
989 	return (index + 1) & MV_MAX_Q_DEPTH_MASK;
990 }
991 
992 static inline void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
993 {
994 	u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
995 		(last ? CRQB_CMD_LAST : 0);
996 	*cmdw = cpu_to_le16(tmp);
997 }
998 
999 /**
1000  *      mv_qc_prep - Host specific command preparation.
1001  *      @qc: queued command to prepare
1002  *
1003  *      This routine simply redirects to the general purpose routine
1004  *      if command is not DMA.  Else, it handles prep of the CRQB
1005  *      (command request block), does some sanity checking, and calls
1006  *      the SG load routine.
1007  *
1008  *      LOCKING:
1009  *      Inherited from caller.
1010  */
1011 static void mv_qc_prep(struct ata_queued_cmd *qc)
1012 {
1013 	struct ata_port *ap = qc->ap;
1014 	struct mv_port_priv *pp = ap->private_data;
1015 	__le16 *cw;
1016 	struct ata_taskfile *tf;
1017 	u16 flags = 0;
1018 	unsigned in_index;
1019 
1020  	if (ATA_PROT_DMA != qc->tf.protocol)
1021 		return;
1022 
1023 	/* Fill in command request block
1024 	 */
1025 	if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1026 		flags |= CRQB_FLAG_READ;
1027 	WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1028 	flags |= qc->tag << CRQB_TAG_SHIFT;
1029 
1030 	/* get current queue index from hardware */
1031 	in_index = (readl(mv_ap_base(ap) + EDMA_REQ_Q_IN_PTR_OFS)
1032 			>> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1033 
1034 	pp->crqb[in_index].sg_addr =
1035 		cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1036 	pp->crqb[in_index].sg_addr_hi =
1037 		cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1038 	pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1039 
1040 	cw = &pp->crqb[in_index].ata_cmd[0];
1041 	tf = &qc->tf;
1042 
1043 	/* Sadly, the CRQB cannot accomodate all registers--there are
1044 	 * only 11 bytes...so we must pick and choose required
1045 	 * registers based on the command.  So, we drop feature and
1046 	 * hob_feature for [RW] DMA commands, but they are needed for
1047 	 * NCQ.  NCQ will drop hob_nsect.
1048 	 */
1049 	switch (tf->command) {
1050 	case ATA_CMD_READ:
1051 	case ATA_CMD_READ_EXT:
1052 	case ATA_CMD_WRITE:
1053 	case ATA_CMD_WRITE_EXT:
1054 	case ATA_CMD_WRITE_FUA_EXT:
1055 		mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1056 		break;
1057 #ifdef LIBATA_NCQ		/* FIXME: remove this line when NCQ added */
1058 	case ATA_CMD_FPDMA_READ:
1059 	case ATA_CMD_FPDMA_WRITE:
1060 		mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
1061 		mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1062 		break;
1063 #endif				/* FIXME: remove this line when NCQ added */
1064 	default:
1065 		/* The only other commands EDMA supports in non-queued and
1066 		 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1067 		 * of which are defined/used by Linux.  If we get here, this
1068 		 * driver needs work.
1069 		 *
1070 		 * FIXME: modify libata to give qc_prep a return value and
1071 		 * return error here.
1072 		 */
1073 		BUG_ON(tf->command);
1074 		break;
1075 	}
1076 	mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1077 	mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1078 	mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1079 	mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1080 	mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1081 	mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1082 	mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1083 	mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1084 	mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1);	/* last */
1085 
1086 	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1087 		return;
1088 	mv_fill_sg(qc);
1089 }
1090 
1091 /**
1092  *      mv_qc_prep_iie - Host specific command preparation.
1093  *      @qc: queued command to prepare
1094  *
1095  *      This routine simply redirects to the general purpose routine
1096  *      if command is not DMA.  Else, it handles prep of the CRQB
1097  *      (command request block), does some sanity checking, and calls
1098  *      the SG load routine.
1099  *
1100  *      LOCKING:
1101  *      Inherited from caller.
1102  */
1103 static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1104 {
1105 	struct ata_port *ap = qc->ap;
1106 	struct mv_port_priv *pp = ap->private_data;
1107 	struct mv_crqb_iie *crqb;
1108 	struct ata_taskfile *tf;
1109 	unsigned in_index;
1110 	u32 flags = 0;
1111 
1112  	if (ATA_PROT_DMA != qc->tf.protocol)
1113 		return;
1114 
1115 	/* Fill in Gen IIE command request block
1116 	 */
1117 	if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1118 		flags |= CRQB_FLAG_READ;
1119 
1120 	WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1121 	flags |= qc->tag << CRQB_TAG_SHIFT;
1122 
1123 	/* get current queue index from hardware */
1124 	in_index = (readl(mv_ap_base(ap) + EDMA_REQ_Q_IN_PTR_OFS)
1125 			>> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1126 
1127 	crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
1128 	crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1129 	crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1130 	crqb->flags = cpu_to_le32(flags);
1131 
1132 	tf = &qc->tf;
1133 	crqb->ata_cmd[0] = cpu_to_le32(
1134 			(tf->command << 16) |
1135 			(tf->feature << 24)
1136 		);
1137 	crqb->ata_cmd[1] = cpu_to_le32(
1138 			(tf->lbal << 0) |
1139 			(tf->lbam << 8) |
1140 			(tf->lbah << 16) |
1141 			(tf->device << 24)
1142 		);
1143 	crqb->ata_cmd[2] = cpu_to_le32(
1144 			(tf->hob_lbal << 0) |
1145 			(tf->hob_lbam << 8) |
1146 			(tf->hob_lbah << 16) |
1147 			(tf->hob_feature << 24)
1148 		);
1149 	crqb->ata_cmd[3] = cpu_to_le32(
1150 			(tf->nsect << 0) |
1151 			(tf->hob_nsect << 8)
1152 		);
1153 
1154 	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1155 		return;
1156 	mv_fill_sg(qc);
1157 }
1158 
1159 /**
1160  *      mv_qc_issue - Initiate a command to the host
1161  *      @qc: queued command to start
1162  *
1163  *      This routine simply redirects to the general purpose routine
1164  *      if command is not DMA.  Else, it sanity checks our local
1165  *      caches of the request producer/consumer indices then enables
1166  *      DMA and bumps the request producer index.
1167  *
1168  *      LOCKING:
1169  *      Inherited from caller.
1170  */
1171 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1172 {
1173 	void __iomem *port_mmio = mv_ap_base(qc->ap);
1174 	struct mv_port_priv *pp = qc->ap->private_data;
1175 	unsigned in_index;
1176 	u32 in_ptr;
1177 
1178 	if (ATA_PROT_DMA != qc->tf.protocol) {
1179 		/* We're about to send a non-EDMA capable command to the
1180 		 * port.  Turn off EDMA so there won't be problems accessing
1181 		 * shadow block, etc registers.
1182 		 */
1183 		mv_stop_dma(qc->ap);
1184 		return ata_qc_issue_prot(qc);
1185 	}
1186 
1187 	in_ptr   = readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1188 	in_index = (in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1189 
1190 	/* until we do queuing, the queue should be empty at this point */
1191 	WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)
1192 		>> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
1193 
1194 	in_index = mv_inc_q_index(in_index);	/* now incr producer index */
1195 
1196 	mv_start_dma(port_mmio, pp);
1197 
1198 	/* and write the request in pointer to kick the EDMA to life */
1199 	in_ptr &= EDMA_REQ_Q_BASE_LO_MASK;
1200 	in_ptr |= in_index << EDMA_REQ_Q_PTR_SHIFT;
1201 	writelfl(in_ptr, port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1202 
1203 	return 0;
1204 }
1205 
1206 /**
1207  *      mv_get_crpb_status - get status from most recently completed cmd
1208  *      @ap: ATA channel to manipulate
1209  *
1210  *      This routine is for use when the port is in DMA mode, when it
1211  *      will be using the CRPB (command response block) method of
1212  *      returning command completion information.  We check indices
1213  *      are good, grab status, and bump the response consumer index to
1214  *      prove that we're up to date.
1215  *
1216  *      LOCKING:
1217  *      Inherited from caller.
1218  */
1219 static u8 mv_get_crpb_status(struct ata_port *ap)
1220 {
1221 	void __iomem *port_mmio = mv_ap_base(ap);
1222 	struct mv_port_priv *pp = ap->private_data;
1223 	unsigned out_index;
1224 	u32 out_ptr;
1225 	u8 ata_status;
1226 
1227 	out_ptr   = readl(port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1228 	out_index = (out_ptr >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1229 
1230 	ata_status = le16_to_cpu(pp->crpb[out_index].flags)
1231 					>> CRPB_FLAG_STATUS_SHIFT;
1232 
1233 	/* increment our consumer index... */
1234 	out_index = mv_inc_q_index(out_index);
1235 
1236 	/* and, until we do NCQ, there should only be 1 CRPB waiting */
1237 	WARN_ON(out_index != ((readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1238 		>> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
1239 
1240 	/* write out our inc'd consumer index so EDMA knows we're caught up */
1241 	out_ptr &= EDMA_RSP_Q_BASE_LO_MASK;
1242 	out_ptr |= out_index << EDMA_RSP_Q_PTR_SHIFT;
1243 	writelfl(out_ptr, port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1244 
1245 	/* Return ATA status register for completed CRPB */
1246 	return ata_status;
1247 }
1248 
1249 /**
1250  *      mv_err_intr - Handle error interrupts on the port
1251  *      @ap: ATA channel to manipulate
1252  *      @reset_allowed: bool: 0 == don't trigger from reset here
1253  *
1254  *      In most cases, just clear the interrupt and move on.  However,
1255  *      some cases require an eDMA reset, which is done right before
1256  *      the COMRESET in mv_phy_reset().  The SERR case requires a
1257  *      clear of pending errors in the SATA SERROR register.  Finally,
1258  *      if the port disabled DMA, update our cached copy to match.
1259  *
1260  *      LOCKING:
1261  *      Inherited from caller.
1262  */
1263 static void mv_err_intr(struct ata_port *ap, int reset_allowed)
1264 {
1265 	void __iomem *port_mmio = mv_ap_base(ap);
1266 	u32 edma_err_cause, serr = 0;
1267 
1268 	edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1269 
1270 	if (EDMA_ERR_SERR & edma_err_cause) {
1271 		sata_scr_read(ap, SCR_ERROR, &serr);
1272 		sata_scr_write_flush(ap, SCR_ERROR, serr);
1273 	}
1274 	if (EDMA_ERR_SELF_DIS & edma_err_cause) {
1275 		struct mv_port_priv *pp	= ap->private_data;
1276 		pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1277 	}
1278 	DPRINTK(KERN_ERR "ata%u: port error; EDMA err cause: 0x%08x "
1279 		"SERR: 0x%08x\n", ap->id, edma_err_cause, serr);
1280 
1281 	/* Clear EDMA now that SERR cleanup done */
1282 	writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1283 
1284 	/* check for fatal here and recover if needed */
1285 	if (reset_allowed && (EDMA_ERR_FATAL & edma_err_cause))
1286 		mv_stop_and_reset(ap);
1287 }
1288 
1289 /**
1290  *      mv_host_intr - Handle all interrupts on the given host controller
1291  *      @host: host specific structure
1292  *      @relevant: port error bits relevant to this host controller
1293  *      @hc: which host controller we're to look at
1294  *
1295  *      Read then write clear the HC interrupt status then walk each
1296  *      port connected to the HC and see if it needs servicing.  Port
1297  *      success ints are reported in the HC interrupt status reg, the
1298  *      port error ints are reported in the higher level main
1299  *      interrupt status register and thus are passed in via the
1300  *      'relevant' argument.
1301  *
1302  *      LOCKING:
1303  *      Inherited from caller.
1304  */
1305 static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
1306 {
1307 	void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
1308 	void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1309 	struct ata_queued_cmd *qc;
1310 	u32 hc_irq_cause;
1311 	int shift, port, port0, hard_port, handled;
1312 	unsigned int err_mask;
1313 
1314 	if (hc == 0) {
1315 		port0 = 0;
1316 	} else {
1317 		port0 = MV_PORTS_PER_HC;
1318 	}
1319 
1320 	/* we'll need the HC success int register in most cases */
1321 	hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
1322 	if (hc_irq_cause) {
1323 		writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
1324 	}
1325 
1326 	VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
1327 		hc,relevant,hc_irq_cause);
1328 
1329 	for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
1330 		u8 ata_status = 0;
1331 		struct ata_port *ap = host->ports[port];
1332 		struct mv_port_priv *pp = ap->private_data;
1333 
1334 		hard_port = mv_hardport_from_port(port); /* range 0..3 */
1335 		handled = 0;	/* ensure ata_status is set if handled++ */
1336 
1337 		/* Note that DEV_IRQ might happen spuriously during EDMA,
1338 		 * and should be ignored in such cases.
1339 		 * The cause of this is still under investigation.
1340 		 */
1341 		if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1342 			/* EDMA: check for response queue interrupt */
1343 			if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause) {
1344 				ata_status = mv_get_crpb_status(ap);
1345 				handled = 1;
1346 			}
1347 		} else {
1348 			/* PIO: check for device (drive) interrupt */
1349 			if ((DEV_IRQ << hard_port) & hc_irq_cause) {
1350 				ata_status = readb(ap->ioaddr.status_addr);
1351 				handled = 1;
1352 				/* ignore spurious intr if drive still BUSY */
1353 				if (ata_status & ATA_BUSY) {
1354 					ata_status = 0;
1355 					handled = 0;
1356 				}
1357 			}
1358 		}
1359 
1360 		if (ap && (ap->flags & ATA_FLAG_DISABLED))
1361 			continue;
1362 
1363 		err_mask = ac_err_mask(ata_status);
1364 
1365 		shift = port << 1;		/* (port * 2) */
1366 		if (port >= MV_PORTS_PER_HC) {
1367 			shift++;	/* skip bit 8 in the HC Main IRQ reg */
1368 		}
1369 		if ((PORT0_ERR << shift) & relevant) {
1370 			mv_err_intr(ap, 1);
1371 			err_mask |= AC_ERR_OTHER;
1372 			handled = 1;
1373 		}
1374 
1375 		if (handled) {
1376 			qc = ata_qc_from_tag(ap, ap->active_tag);
1377 			if (qc && (qc->flags & ATA_QCFLAG_ACTIVE)) {
1378 				VPRINTK("port %u IRQ found for qc, "
1379 					"ata_status 0x%x\n", port,ata_status);
1380 				/* mark qc status appropriately */
1381 				if (!(qc->tf.flags & ATA_TFLAG_POLLING)) {
1382 					qc->err_mask |= err_mask;
1383 					ata_qc_complete(qc);
1384 				}
1385 			}
1386 		}
1387 	}
1388 	VPRINTK("EXIT\n");
1389 }
1390 
1391 /**
1392  *      mv_interrupt -
1393  *      @irq: unused
1394  *      @dev_instance: private data; in this case the host structure
1395  *      @regs: unused
1396  *
1397  *      Read the read only register to determine if any host
1398  *      controllers have pending interrupts.  If so, call lower level
1399  *      routine to handle.  Also check for PCI errors which are only
1400  *      reported here.
1401  *
1402  *      LOCKING:
1403  *      This routine holds the host lock while processing pending
1404  *      interrupts.
1405  */
1406 static irqreturn_t mv_interrupt(int irq, void *dev_instance)
1407 {
1408 	struct ata_host *host = dev_instance;
1409 	unsigned int hc, handled = 0, n_hcs;
1410 	void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
1411 	struct mv_host_priv *hpriv;
1412 	u32 irq_stat;
1413 
1414 	irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
1415 
1416 	/* check the cases where we either have nothing pending or have read
1417 	 * a bogus register value which can indicate HW removal or PCI fault
1418 	 */
1419 	if (!irq_stat || (0xffffffffU == irq_stat)) {
1420 		return IRQ_NONE;
1421 	}
1422 
1423 	n_hcs = mv_get_hc_count(host->ports[0]->flags);
1424 	spin_lock(&host->lock);
1425 
1426 	for (hc = 0; hc < n_hcs; hc++) {
1427 		u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1428 		if (relevant) {
1429 			mv_host_intr(host, relevant, hc);
1430 			handled++;
1431 		}
1432 	}
1433 
1434 	hpriv = host->private_data;
1435 	if (IS_60XX(hpriv)) {
1436 		/* deal with the interrupt coalescing bits */
1437 		if (irq_stat & (TRAN_LO_DONE | TRAN_HI_DONE | PORTS_0_7_COAL_DONE)) {
1438 			writelfl(0, mmio + MV_IRQ_COAL_CAUSE_LO);
1439 			writelfl(0, mmio + MV_IRQ_COAL_CAUSE_HI);
1440 			writelfl(0, mmio + MV_IRQ_COAL_CAUSE);
1441 		}
1442 	}
1443 
1444 	if (PCI_ERR & irq_stat) {
1445 		printk(KERN_ERR DRV_NAME ": PCI ERROR; PCI IRQ cause=0x%08x\n",
1446 		       readl(mmio + PCI_IRQ_CAUSE_OFS));
1447 
1448 		DPRINTK("All regs @ PCI error\n");
1449 		mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1450 
1451 		writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
1452 		handled++;
1453 	}
1454 	spin_unlock(&host->lock);
1455 
1456 	return IRQ_RETVAL(handled);
1457 }
1458 
1459 static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1460 {
1461 	void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1462 	unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1463 
1464 	return hc_mmio + ofs;
1465 }
1466 
1467 static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1468 {
1469 	unsigned int ofs;
1470 
1471 	switch (sc_reg_in) {
1472 	case SCR_STATUS:
1473 	case SCR_ERROR:
1474 	case SCR_CONTROL:
1475 		ofs = sc_reg_in * sizeof(u32);
1476 		break;
1477 	default:
1478 		ofs = 0xffffffffU;
1479 		break;
1480 	}
1481 	return ofs;
1482 }
1483 
1484 static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in)
1485 {
1486 	void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1487 	void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1488 	unsigned int ofs = mv5_scr_offset(sc_reg_in);
1489 
1490 	if (ofs != 0xffffffffU)
1491 		return readl(addr + ofs);
1492 	else
1493 		return (u32) ofs;
1494 }
1495 
1496 static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1497 {
1498 	void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1499 	void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1500 	unsigned int ofs = mv5_scr_offset(sc_reg_in);
1501 
1502 	if (ofs != 0xffffffffU)
1503 		writelfl(val, addr + ofs);
1504 }
1505 
1506 static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio)
1507 {
1508 	u8 rev_id;
1509 	int early_5080;
1510 
1511 	pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
1512 
1513 	early_5080 = (pdev->device == 0x5080) && (rev_id == 0);
1514 
1515 	if (!early_5080) {
1516 		u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1517 		tmp |= (1 << 0);
1518 		writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1519 	}
1520 
1521 	mv_reset_pci_bus(pdev, mmio);
1522 }
1523 
1524 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1525 {
1526 	writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1527 }
1528 
1529 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
1530 			   void __iomem *mmio)
1531 {
1532 	void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1533 	u32 tmp;
1534 
1535 	tmp = readl(phy_mmio + MV5_PHY_MODE);
1536 
1537 	hpriv->signal[idx].pre = tmp & 0x1800;	/* bits 12:11 */
1538 	hpriv->signal[idx].amps = tmp & 0xe0;	/* bits 7:5 */
1539 }
1540 
1541 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
1542 {
1543 	u32 tmp;
1544 
1545 	writel(0, mmio + MV_GPIO_PORT_CTL);
1546 
1547 	/* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1548 
1549 	tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1550 	tmp |= ~(1 << 0);
1551 	writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1552 }
1553 
1554 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1555 			   unsigned int port)
1556 {
1557 	void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1558 	const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1559 	u32 tmp;
1560 	int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1561 
1562 	if (fix_apm_sq) {
1563 		tmp = readl(phy_mmio + MV5_LT_MODE);
1564 		tmp |= (1 << 19);
1565 		writel(tmp, phy_mmio + MV5_LT_MODE);
1566 
1567 		tmp = readl(phy_mmio + MV5_PHY_CTL);
1568 		tmp &= ~0x3;
1569 		tmp |= 0x1;
1570 		writel(tmp, phy_mmio + MV5_PHY_CTL);
1571 	}
1572 
1573 	tmp = readl(phy_mmio + MV5_PHY_MODE);
1574 	tmp &= ~mask;
1575 	tmp |= hpriv->signal[port].pre;
1576 	tmp |= hpriv->signal[port].amps;
1577 	writel(tmp, phy_mmio + MV5_PHY_MODE);
1578 }
1579 
1580 
1581 #undef ZERO
1582 #define ZERO(reg) writel(0, port_mmio + (reg))
1583 static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1584 			     unsigned int port)
1585 {
1586 	void __iomem *port_mmio = mv_port_base(mmio, port);
1587 
1588 	writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1589 
1590 	mv_channel_reset(hpriv, mmio, port);
1591 
1592 	ZERO(0x028);	/* command */
1593 	writel(0x11f, port_mmio + EDMA_CFG_OFS);
1594 	ZERO(0x004);	/* timer */
1595 	ZERO(0x008);	/* irq err cause */
1596 	ZERO(0x00c);	/* irq err mask */
1597 	ZERO(0x010);	/* rq bah */
1598 	ZERO(0x014);	/* rq inp */
1599 	ZERO(0x018);	/* rq outp */
1600 	ZERO(0x01c);	/* respq bah */
1601 	ZERO(0x024);	/* respq outp */
1602 	ZERO(0x020);	/* respq inp */
1603 	ZERO(0x02c);	/* test control */
1604 	writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1605 }
1606 #undef ZERO
1607 
1608 #define ZERO(reg) writel(0, hc_mmio + (reg))
1609 static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1610 			unsigned int hc)
1611 {
1612 	void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1613 	u32 tmp;
1614 
1615 	ZERO(0x00c);
1616 	ZERO(0x010);
1617 	ZERO(0x014);
1618 	ZERO(0x018);
1619 
1620 	tmp = readl(hc_mmio + 0x20);
1621 	tmp &= 0x1c1c1c1c;
1622 	tmp |= 0x03030303;
1623 	writel(tmp, hc_mmio + 0x20);
1624 }
1625 #undef ZERO
1626 
1627 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1628 			unsigned int n_hc)
1629 {
1630 	unsigned int hc, port;
1631 
1632 	for (hc = 0; hc < n_hc; hc++) {
1633 		for (port = 0; port < MV_PORTS_PER_HC; port++)
1634 			mv5_reset_hc_port(hpriv, mmio,
1635 					  (hc * MV_PORTS_PER_HC) + port);
1636 
1637 		mv5_reset_one_hc(hpriv, mmio, hc);
1638 	}
1639 
1640 	return 0;
1641 }
1642 
1643 #undef ZERO
1644 #define ZERO(reg) writel(0, mmio + (reg))
1645 static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio)
1646 {
1647 	u32 tmp;
1648 
1649 	tmp = readl(mmio + MV_PCI_MODE);
1650 	tmp &= 0xff00ffff;
1651 	writel(tmp, mmio + MV_PCI_MODE);
1652 
1653 	ZERO(MV_PCI_DISC_TIMER);
1654 	ZERO(MV_PCI_MSI_TRIGGER);
1655 	writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
1656 	ZERO(HC_MAIN_IRQ_MASK_OFS);
1657 	ZERO(MV_PCI_SERR_MASK);
1658 	ZERO(PCI_IRQ_CAUSE_OFS);
1659 	ZERO(PCI_IRQ_MASK_OFS);
1660 	ZERO(MV_PCI_ERR_LOW_ADDRESS);
1661 	ZERO(MV_PCI_ERR_HIGH_ADDRESS);
1662 	ZERO(MV_PCI_ERR_ATTRIBUTE);
1663 	ZERO(MV_PCI_ERR_COMMAND);
1664 }
1665 #undef ZERO
1666 
1667 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1668 {
1669 	u32 tmp;
1670 
1671 	mv5_reset_flash(hpriv, mmio);
1672 
1673 	tmp = readl(mmio + MV_GPIO_PORT_CTL);
1674 	tmp &= 0x3;
1675 	tmp |= (1 << 5) | (1 << 6);
1676 	writel(tmp, mmio + MV_GPIO_PORT_CTL);
1677 }
1678 
1679 /**
1680  *      mv6_reset_hc - Perform the 6xxx global soft reset
1681  *      @mmio: base address of the HBA
1682  *
1683  *      This routine only applies to 6xxx parts.
1684  *
1685  *      LOCKING:
1686  *      Inherited from caller.
1687  */
1688 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1689 			unsigned int n_hc)
1690 {
1691 	void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
1692 	int i, rc = 0;
1693 	u32 t;
1694 
1695 	/* Following procedure defined in PCI "main command and status
1696 	 * register" table.
1697 	 */
1698 	t = readl(reg);
1699 	writel(t | STOP_PCI_MASTER, reg);
1700 
1701 	for (i = 0; i < 1000; i++) {
1702 		udelay(1);
1703 		t = readl(reg);
1704 		if (PCI_MASTER_EMPTY & t) {
1705 			break;
1706 		}
1707 	}
1708 	if (!(PCI_MASTER_EMPTY & t)) {
1709 		printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
1710 		rc = 1;
1711 		goto done;
1712 	}
1713 
1714 	/* set reset */
1715 	i = 5;
1716 	do {
1717 		writel(t | GLOB_SFT_RST, reg);
1718 		t = readl(reg);
1719 		udelay(1);
1720 	} while (!(GLOB_SFT_RST & t) && (i-- > 0));
1721 
1722 	if (!(GLOB_SFT_RST & t)) {
1723 		printk(KERN_ERR DRV_NAME ": can't set global reset\n");
1724 		rc = 1;
1725 		goto done;
1726 	}
1727 
1728 	/* clear reset and *reenable the PCI master* (not mentioned in spec) */
1729 	i = 5;
1730 	do {
1731 		writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
1732 		t = readl(reg);
1733 		udelay(1);
1734 	} while ((GLOB_SFT_RST & t) && (i-- > 0));
1735 
1736 	if (GLOB_SFT_RST & t) {
1737 		printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
1738 		rc = 1;
1739 	}
1740 done:
1741 	return rc;
1742 }
1743 
1744 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
1745 			   void __iomem *mmio)
1746 {
1747 	void __iomem *port_mmio;
1748 	u32 tmp;
1749 
1750 	tmp = readl(mmio + MV_RESET_CFG);
1751 	if ((tmp & (1 << 0)) == 0) {
1752 		hpriv->signal[idx].amps = 0x7 << 8;
1753 		hpriv->signal[idx].pre = 0x1 << 5;
1754 		return;
1755 	}
1756 
1757 	port_mmio = mv_port_base(mmio, idx);
1758 	tmp = readl(port_mmio + PHY_MODE2);
1759 
1760 	hpriv->signal[idx].amps = tmp & 0x700;	/* bits 10:8 */
1761 	hpriv->signal[idx].pre = tmp & 0xe0;	/* bits 7:5 */
1762 }
1763 
1764 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
1765 {
1766 	writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
1767 }
1768 
1769 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1770 			   unsigned int port)
1771 {
1772 	void __iomem *port_mmio = mv_port_base(mmio, port);
1773 
1774 	u32 hp_flags = hpriv->hp_flags;
1775 	int fix_phy_mode2 =
1776 		hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
1777 	int fix_phy_mode4 =
1778 		hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
1779 	u32 m2, tmp;
1780 
1781 	if (fix_phy_mode2) {
1782 		m2 = readl(port_mmio + PHY_MODE2);
1783 		m2 &= ~(1 << 16);
1784 		m2 |= (1 << 31);
1785 		writel(m2, port_mmio + PHY_MODE2);
1786 
1787 		udelay(200);
1788 
1789 		m2 = readl(port_mmio + PHY_MODE2);
1790 		m2 &= ~((1 << 16) | (1 << 31));
1791 		writel(m2, port_mmio + PHY_MODE2);
1792 
1793 		udelay(200);
1794 	}
1795 
1796 	/* who knows what this magic does */
1797 	tmp = readl(port_mmio + PHY_MODE3);
1798 	tmp &= ~0x7F800000;
1799 	tmp |= 0x2A800000;
1800 	writel(tmp, port_mmio + PHY_MODE3);
1801 
1802 	if (fix_phy_mode4) {
1803 		u32 m4;
1804 
1805 		m4 = readl(port_mmio + PHY_MODE4);
1806 
1807 		if (hp_flags & MV_HP_ERRATA_60X1B2)
1808 			tmp = readl(port_mmio + 0x310);
1809 
1810 		m4 = (m4 & ~(1 << 1)) | (1 << 0);
1811 
1812 		writel(m4, port_mmio + PHY_MODE4);
1813 
1814 		if (hp_flags & MV_HP_ERRATA_60X1B2)
1815 			writel(tmp, port_mmio + 0x310);
1816 	}
1817 
1818 	/* Revert values of pre-emphasis and signal amps to the saved ones */
1819 	m2 = readl(port_mmio + PHY_MODE2);
1820 
1821 	m2 &= ~MV_M2_PREAMP_MASK;
1822 	m2 |= hpriv->signal[port].amps;
1823 	m2 |= hpriv->signal[port].pre;
1824 	m2 &= ~(1 << 16);
1825 
1826 	/* according to mvSata 3.6.1, some IIE values are fixed */
1827 	if (IS_GEN_IIE(hpriv)) {
1828 		m2 &= ~0xC30FF01F;
1829 		m2 |= 0x0000900F;
1830 	}
1831 
1832 	writel(m2, port_mmio + PHY_MODE2);
1833 }
1834 
1835 static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
1836 			     unsigned int port_no)
1837 {
1838 	void __iomem *port_mmio = mv_port_base(mmio, port_no);
1839 
1840 	writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
1841 
1842 	if (IS_60XX(hpriv)) {
1843 		u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
1844 		ifctl |= (1 << 7);		/* enable gen2i speed */
1845 		ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
1846 		writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
1847 	}
1848 
1849 	udelay(25);		/* allow reset propagation */
1850 
1851 	/* Spec never mentions clearing the bit.  Marvell's driver does
1852 	 * clear the bit, however.
1853 	 */
1854 	writelfl(0, port_mmio + EDMA_CMD_OFS);
1855 
1856 	hpriv->ops->phy_errata(hpriv, mmio, port_no);
1857 
1858 	if (IS_50XX(hpriv))
1859 		mdelay(1);
1860 }
1861 
1862 static void mv_stop_and_reset(struct ata_port *ap)
1863 {
1864 	struct mv_host_priv *hpriv = ap->host->private_data;
1865 	void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1866 
1867 	mv_stop_dma(ap);
1868 
1869 	mv_channel_reset(hpriv, mmio, ap->port_no);
1870 
1871 	__mv_phy_reset(ap, 0);
1872 }
1873 
1874 static inline void __msleep(unsigned int msec, int can_sleep)
1875 {
1876 	if (can_sleep)
1877 		msleep(msec);
1878 	else
1879 		mdelay(msec);
1880 }
1881 
1882 /**
1883  *      __mv_phy_reset - Perform eDMA reset followed by COMRESET
1884  *      @ap: ATA channel to manipulate
1885  *
1886  *      Part of this is taken from __sata_phy_reset and modified to
1887  *      not sleep since this routine gets called from interrupt level.
1888  *
1889  *      LOCKING:
1890  *      Inherited from caller.  This is coded to safe to call at
1891  *      interrupt level, i.e. it does not sleep.
1892  */
1893 static void __mv_phy_reset(struct ata_port *ap, int can_sleep)
1894 {
1895 	struct mv_port_priv *pp	= ap->private_data;
1896 	struct mv_host_priv *hpriv = ap->host->private_data;
1897 	void __iomem *port_mmio = mv_ap_base(ap);
1898 	struct ata_taskfile tf;
1899 	struct ata_device *dev = &ap->device[0];
1900 	unsigned long timeout;
1901 	int retry = 5;
1902 	u32 sstatus;
1903 
1904 	VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
1905 
1906 	DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
1907 		"SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS),
1908 		mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL));
1909 
1910 	/* Issue COMRESET via SControl */
1911 comreset_retry:
1912 	sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
1913 	__msleep(1, can_sleep);
1914 
1915 	sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
1916 	__msleep(20, can_sleep);
1917 
1918 	timeout = jiffies + msecs_to_jiffies(200);
1919 	do {
1920 		sata_scr_read(ap, SCR_STATUS, &sstatus);
1921 		if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
1922 			break;
1923 
1924 		__msleep(1, can_sleep);
1925 	} while (time_before(jiffies, timeout));
1926 
1927 	/* work around errata */
1928 	if (IS_60XX(hpriv) &&
1929 	    (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
1930 	    (retry-- > 0))
1931 		goto comreset_retry;
1932 
1933 	DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
1934 		"SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS),
1935 		mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL));
1936 
1937 	if (ata_port_online(ap)) {
1938 		ata_port_probe(ap);
1939 	} else {
1940 		sata_scr_read(ap, SCR_STATUS, &sstatus);
1941 		ata_port_printk(ap, KERN_INFO,
1942 				"no device found (phy stat %08x)\n", sstatus);
1943 		ata_port_disable(ap);
1944 		return;
1945 	}
1946 	ap->cbl = ATA_CBL_SATA;
1947 
1948 	/* even after SStatus reflects that device is ready,
1949 	 * it seems to take a while for link to be fully
1950 	 * established (and thus Status no longer 0x80/0x7F),
1951 	 * so we poll a bit for that, here.
1952 	 */
1953 	retry = 20;
1954 	while (1) {
1955 		u8 drv_stat = ata_check_status(ap);
1956 		if ((drv_stat != 0x80) && (drv_stat != 0x7f))
1957 			break;
1958 		__msleep(500, can_sleep);
1959 		if (retry-- <= 0)
1960 			break;
1961 	}
1962 
1963 	tf.lbah = readb(ap->ioaddr.lbah_addr);
1964 	tf.lbam = readb(ap->ioaddr.lbam_addr);
1965 	tf.lbal = readb(ap->ioaddr.lbal_addr);
1966 	tf.nsect = readb(ap->ioaddr.nsect_addr);
1967 
1968 	dev->class = ata_dev_classify(&tf);
1969 	if (!ata_dev_enabled(dev)) {
1970 		VPRINTK("Port disabled post-sig: No device present.\n");
1971 		ata_port_disable(ap);
1972 	}
1973 
1974 	writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1975 
1976 	pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1977 
1978 	VPRINTK("EXIT\n");
1979 }
1980 
1981 static void mv_phy_reset(struct ata_port *ap)
1982 {
1983 	__mv_phy_reset(ap, 1);
1984 }
1985 
1986 /**
1987  *      mv_eng_timeout - Routine called by libata when SCSI times out I/O
1988  *      @ap: ATA channel to manipulate
1989  *
1990  *      Intent is to clear all pending error conditions, reset the
1991  *      chip/bus, fail the command, and move on.
1992  *
1993  *      LOCKING:
1994  *      This routine holds the host lock while failing the command.
1995  */
1996 static void mv_eng_timeout(struct ata_port *ap)
1997 {
1998 	void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1999 	struct ata_queued_cmd *qc;
2000 	unsigned long flags;
2001 
2002 	ata_port_printk(ap, KERN_ERR, "Entering mv_eng_timeout\n");
2003 	DPRINTK("All regs @ start of eng_timeout\n");
2004 	mv_dump_all_regs(mmio, ap->port_no, to_pci_dev(ap->host->dev));
2005 
2006 	qc = ata_qc_from_tag(ap, ap->active_tag);
2007         printk(KERN_ERR "mmio_base %p ap %p qc %p scsi_cmnd %p &cmnd %p\n",
2008 	       mmio, ap, qc, qc->scsicmd, &qc->scsicmd->cmnd);
2009 
2010 	spin_lock_irqsave(&ap->host->lock, flags);
2011 	mv_err_intr(ap, 0);
2012 	mv_stop_and_reset(ap);
2013 	spin_unlock_irqrestore(&ap->host->lock, flags);
2014 
2015 	WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
2016 	if (qc->flags & ATA_QCFLAG_ACTIVE) {
2017 		qc->err_mask |= AC_ERR_TIMEOUT;
2018 		ata_eh_qc_complete(qc);
2019 	}
2020 }
2021 
2022 /**
2023  *      mv_port_init - Perform some early initialization on a single port.
2024  *      @port: libata data structure storing shadow register addresses
2025  *      @port_mmio: base address of the port
2026  *
2027  *      Initialize shadow register mmio addresses, clear outstanding
2028  *      interrupts on the port, and unmask interrupts for the future
2029  *      start of the port.
2030  *
2031  *      LOCKING:
2032  *      Inherited from caller.
2033  */
2034 static void mv_port_init(struct ata_ioports *port,  void __iomem *port_mmio)
2035 {
2036 	void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
2037 	unsigned serr_ofs;
2038 
2039 	/* PIO related setup
2040 	 */
2041 	port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
2042 	port->error_addr =
2043 		port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2044 	port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2045 	port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2046 	port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2047 	port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2048 	port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
2049 	port->status_addr =
2050 		port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2051 	/* special case: control/altstatus doesn't have ATA_REG_ address */
2052 	port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2053 
2054 	/* unused: */
2055 	port->cmd_addr = port->bmdma_addr = port->scr_addr = 0;
2056 
2057 	/* Clear any currently outstanding port interrupt conditions */
2058 	serr_ofs = mv_scr_offset(SCR_ERROR);
2059 	writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2060 	writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2061 
2062 	/* unmask all EDMA error interrupts */
2063 	writelfl(~0, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
2064 
2065 	VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
2066 		readl(port_mmio + EDMA_CFG_OFS),
2067 		readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2068 		readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
2069 }
2070 
2071 static int mv_chip_id(struct pci_dev *pdev, struct mv_host_priv *hpriv,
2072 		      unsigned int board_idx)
2073 {
2074 	u8 rev_id;
2075 	u32 hp_flags = hpriv->hp_flags;
2076 
2077 	pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
2078 
2079 	switch(board_idx) {
2080 	case chip_5080:
2081 		hpriv->ops = &mv5xxx_ops;
2082 		hp_flags |= MV_HP_50XX;
2083 
2084 		switch (rev_id) {
2085 		case 0x1:
2086 			hp_flags |= MV_HP_ERRATA_50XXB0;
2087 			break;
2088 		case 0x3:
2089 			hp_flags |= MV_HP_ERRATA_50XXB2;
2090 			break;
2091 		default:
2092 			dev_printk(KERN_WARNING, &pdev->dev,
2093 			   "Applying 50XXB2 workarounds to unknown rev\n");
2094 			hp_flags |= MV_HP_ERRATA_50XXB2;
2095 			break;
2096 		}
2097 		break;
2098 
2099 	case chip_504x:
2100 	case chip_508x:
2101 		hpriv->ops = &mv5xxx_ops;
2102 		hp_flags |= MV_HP_50XX;
2103 
2104 		switch (rev_id) {
2105 		case 0x0:
2106 			hp_flags |= MV_HP_ERRATA_50XXB0;
2107 			break;
2108 		case 0x3:
2109 			hp_flags |= MV_HP_ERRATA_50XXB2;
2110 			break;
2111 		default:
2112 			dev_printk(KERN_WARNING, &pdev->dev,
2113 			   "Applying B2 workarounds to unknown rev\n");
2114 			hp_flags |= MV_HP_ERRATA_50XXB2;
2115 			break;
2116 		}
2117 		break;
2118 
2119 	case chip_604x:
2120 	case chip_608x:
2121 		hpriv->ops = &mv6xxx_ops;
2122 
2123 		switch (rev_id) {
2124 		case 0x7:
2125 			hp_flags |= MV_HP_ERRATA_60X1B2;
2126 			break;
2127 		case 0x9:
2128 			hp_flags |= MV_HP_ERRATA_60X1C0;
2129 			break;
2130 		default:
2131 			dev_printk(KERN_WARNING, &pdev->dev,
2132 				   "Applying B2 workarounds to unknown rev\n");
2133 			hp_flags |= MV_HP_ERRATA_60X1B2;
2134 			break;
2135 		}
2136 		break;
2137 
2138 	case chip_7042:
2139 	case chip_6042:
2140 		hpriv->ops = &mv6xxx_ops;
2141 
2142 		hp_flags |= MV_HP_GEN_IIE;
2143 
2144 		switch (rev_id) {
2145 		case 0x0:
2146 			hp_flags |= MV_HP_ERRATA_XX42A0;
2147 			break;
2148 		case 0x1:
2149 			hp_flags |= MV_HP_ERRATA_60X1C0;
2150 			break;
2151 		default:
2152 			dev_printk(KERN_WARNING, &pdev->dev,
2153 			   "Applying 60X1C0 workarounds to unknown rev\n");
2154 			hp_flags |= MV_HP_ERRATA_60X1C0;
2155 			break;
2156 		}
2157 		break;
2158 
2159 	default:
2160 		printk(KERN_ERR DRV_NAME ": BUG: invalid board index %u\n", board_idx);
2161 		return 1;
2162 	}
2163 
2164 	hpriv->hp_flags = hp_flags;
2165 
2166 	return 0;
2167 }
2168 
2169 /**
2170  *      mv_init_host - Perform some early initialization of the host.
2171  *	@pdev: host PCI device
2172  *      @probe_ent: early data struct representing the host
2173  *
2174  *      If possible, do an early global reset of the host.  Then do
2175  *      our port init and clear/unmask all/relevant host interrupts.
2176  *
2177  *      LOCKING:
2178  *      Inherited from caller.
2179  */
2180 static int mv_init_host(struct pci_dev *pdev, struct ata_probe_ent *probe_ent,
2181 			unsigned int board_idx)
2182 {
2183 	int rc = 0, n_hc, port, hc;
2184 	void __iomem *mmio = probe_ent->iomap[MV_PRIMARY_BAR];
2185 	struct mv_host_priv *hpriv = probe_ent->private_data;
2186 
2187 	/* global interrupt mask */
2188 	writel(0, mmio + HC_MAIN_IRQ_MASK_OFS);
2189 
2190 	rc = mv_chip_id(pdev, hpriv, board_idx);
2191 	if (rc)
2192 		goto done;
2193 
2194 	n_hc = mv_get_hc_count(probe_ent->port_flags);
2195 	probe_ent->n_ports = MV_PORTS_PER_HC * n_hc;
2196 
2197 	for (port = 0; port < probe_ent->n_ports; port++)
2198 		hpriv->ops->read_preamp(hpriv, port, mmio);
2199 
2200 	rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
2201 	if (rc)
2202 		goto done;
2203 
2204 	hpriv->ops->reset_flash(hpriv, mmio);
2205 	hpriv->ops->reset_bus(pdev, mmio);
2206 	hpriv->ops->enable_leds(hpriv, mmio);
2207 
2208 	for (port = 0; port < probe_ent->n_ports; port++) {
2209 		if (IS_60XX(hpriv)) {
2210 			void __iomem *port_mmio = mv_port_base(mmio, port);
2211 
2212 			u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
2213 			ifctl |= (1 << 7);		/* enable gen2i speed */
2214 			ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2215 			writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2216 		}
2217 
2218 		hpriv->ops->phy_errata(hpriv, mmio, port);
2219 	}
2220 
2221 	for (port = 0; port < probe_ent->n_ports; port++) {
2222 		void __iomem *port_mmio = mv_port_base(mmio, port);
2223 		mv_port_init(&probe_ent->port[port], port_mmio);
2224 	}
2225 
2226 	for (hc = 0; hc < n_hc; hc++) {
2227 		void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2228 
2229 		VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2230 			"(before clear)=0x%08x\n", hc,
2231 			readl(hc_mmio + HC_CFG_OFS),
2232 			readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2233 
2234 		/* Clear any currently outstanding hc interrupt conditions */
2235 		writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
2236 	}
2237 
2238 	/* Clear any currently outstanding host interrupt conditions */
2239 	writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
2240 
2241 	/* and unmask interrupt generation for host regs */
2242 	writelfl(PCI_UNMASK_ALL_IRQS, mmio + PCI_IRQ_MASK_OFS);
2243 	writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
2244 
2245 	VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2246 		"PCI int cause/mask=0x%08x/0x%08x\n",
2247 		readl(mmio + HC_MAIN_IRQ_CAUSE_OFS),
2248 		readl(mmio + HC_MAIN_IRQ_MASK_OFS),
2249 		readl(mmio + PCI_IRQ_CAUSE_OFS),
2250 		readl(mmio + PCI_IRQ_MASK_OFS));
2251 
2252 done:
2253 	return rc;
2254 }
2255 
2256 /**
2257  *      mv_print_info - Dump key info to kernel log for perusal.
2258  *      @probe_ent: early data struct representing the host
2259  *
2260  *      FIXME: complete this.
2261  *
2262  *      LOCKING:
2263  *      Inherited from caller.
2264  */
2265 static void mv_print_info(struct ata_probe_ent *probe_ent)
2266 {
2267 	struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
2268 	struct mv_host_priv *hpriv = probe_ent->private_data;
2269 	u8 rev_id, scc;
2270 	const char *scc_s;
2271 
2272 	/* Use this to determine the HW stepping of the chip so we know
2273 	 * what errata to workaround
2274 	 */
2275 	pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
2276 
2277 	pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2278 	if (scc == 0)
2279 		scc_s = "SCSI";
2280 	else if (scc == 0x01)
2281 		scc_s = "RAID";
2282 	else
2283 		scc_s = "unknown";
2284 
2285 	dev_printk(KERN_INFO, &pdev->dev,
2286 	       "%u slots %u ports %s mode IRQ via %s\n",
2287 	       (unsigned)MV_MAX_Q_DEPTH, probe_ent->n_ports,
2288 	       scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2289 }
2290 
2291 /**
2292  *      mv_init_one - handle a positive probe of a Marvell host
2293  *      @pdev: PCI device found
2294  *      @ent: PCI device ID entry for the matched host
2295  *
2296  *      LOCKING:
2297  *      Inherited from caller.
2298  */
2299 static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2300 {
2301 	static int printed_version = 0;
2302 	struct device *dev = &pdev->dev;
2303 	struct ata_probe_ent *probe_ent;
2304 	struct mv_host_priv *hpriv;
2305 	unsigned int board_idx = (unsigned int)ent->driver_data;
2306 	int rc;
2307 
2308 	if (!printed_version++)
2309 		dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2310 
2311 	rc = pcim_enable_device(pdev);
2312 	if (rc)
2313 		return rc;
2314 	pci_set_master(pdev);
2315 
2316 	rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
2317 	if (rc == -EBUSY)
2318 		pcim_pin_device(pdev);
2319 	if (rc)
2320 		return rc;
2321 
2322 	probe_ent = devm_kzalloc(dev, sizeof(*probe_ent), GFP_KERNEL);
2323 	if (probe_ent == NULL)
2324 		return -ENOMEM;
2325 
2326 	probe_ent->dev = pci_dev_to_dev(pdev);
2327 	INIT_LIST_HEAD(&probe_ent->node);
2328 
2329 	hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
2330 	if (!hpriv)
2331 		return -ENOMEM;
2332 
2333 	probe_ent->sht = mv_port_info[board_idx].sht;
2334 	probe_ent->port_flags = mv_port_info[board_idx].flags;
2335 	probe_ent->pio_mask = mv_port_info[board_idx].pio_mask;
2336 	probe_ent->udma_mask = mv_port_info[board_idx].udma_mask;
2337 	probe_ent->port_ops = mv_port_info[board_idx].port_ops;
2338 
2339 	probe_ent->irq = pdev->irq;
2340 	probe_ent->irq_flags = IRQF_SHARED;
2341 	probe_ent->iomap = pcim_iomap_table(pdev);
2342 	probe_ent->private_data = hpriv;
2343 
2344 	/* initialize adapter */
2345 	rc = mv_init_host(pdev, probe_ent, board_idx);
2346 	if (rc)
2347 		return rc;
2348 
2349 	/* Enable interrupts */
2350 	if (msi && !pci_enable_msi(pdev))
2351 		pci_intx(pdev, 1);
2352 
2353 	mv_dump_pci_cfg(pdev, 0x68);
2354 	mv_print_info(probe_ent);
2355 
2356 	if (ata_device_add(probe_ent) == 0)
2357 		return -ENODEV;
2358 
2359 	devm_kfree(dev, probe_ent);
2360 	return 0;
2361 }
2362 
2363 static int __init mv_init(void)
2364 {
2365 	return pci_register_driver(&mv_pci_driver);
2366 }
2367 
2368 static void __exit mv_exit(void)
2369 {
2370 	pci_unregister_driver(&mv_pci_driver);
2371 }
2372 
2373 MODULE_AUTHOR("Brett Russ");
2374 MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
2375 MODULE_LICENSE("GPL");
2376 MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
2377 MODULE_VERSION(DRV_VERSION);
2378 
2379 module_param(msi, int, 0444);
2380 MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
2381 
2382 module_init(mv_init);
2383 module_exit(mv_exit);
2384