xref: /linux/drivers/ata/ahci.c (revision 40d3057ac036f2501c1930728a6179be4fca577b)
1 /*
2  *  ahci.c - AHCI SATA support
3  *
4  *  Maintained by:  Jeff Garzik <jgarzik@pobox.com>
5  *    		    Please ALWAYS copy linux-ide@vger.kernel.org
6  *		    on emails.
7  *
8  *  Copyright 2004-2005 Red Hat, Inc.
9  *
10  *
11  *  This program is free software; you can redistribute it and/or modify
12  *  it under the terms of the GNU General Public License as published by
13  *  the Free Software Foundation; either version 2, or (at your option)
14  *  any later version.
15  *
16  *  This program is distributed in the hope that it will be useful,
17  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
18  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
19  *  GNU General Public License for more details.
20  *
21  *  You should have received a copy of the GNU General Public License
22  *  along with this program; see the file COPYING.  If not, write to
23  *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
24  *
25  *
26  * libata documentation is available via 'make {ps|pdf}docs',
27  * as Documentation/DocBook/libata.*
28  *
29  * AHCI hardware documentation:
30  * http://www.intel.com/technology/serialata/pdf/rev1_0.pdf
31  * http://www.intel.com/technology/serialata/pdf/rev1_1.pdf
32  *
33  */
34 
35 #include <linux/kernel.h>
36 #include <linux/module.h>
37 #include <linux/pci.h>
38 #include <linux/init.h>
39 #include <linux/blkdev.h>
40 #include <linux/delay.h>
41 #include <linux/interrupt.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/device.h>
44 #include <linux/dmi.h>
45 #include <scsi/scsi_host.h>
46 #include <scsi/scsi_cmnd.h>
47 #include <linux/libata.h>
48 
49 #define DRV_NAME	"ahci"
50 #define DRV_VERSION	"3.0"
51 
52 static int ahci_skip_host_reset;
53 module_param_named(skip_host_reset, ahci_skip_host_reset, int, 0444);
54 MODULE_PARM_DESC(skip_host_reset, "skip global host reset (0=don't skip, 1=skip)");
55 
56 static int ahci_enable_alpm(struct ata_port *ap,
57 		enum link_pm policy);
58 static void ahci_disable_alpm(struct ata_port *ap);
59 static ssize_t ahci_led_show(struct ata_port *ap, char *buf);
60 static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
61 			      size_t size);
62 static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
63 					ssize_t size);
64 #define MAX_SLOTS 8
65 
66 enum {
67 	AHCI_PCI_BAR		= 5,
68 	AHCI_MAX_PORTS		= 32,
69 	AHCI_MAX_SG		= 168, /* hardware max is 64K */
70 	AHCI_DMA_BOUNDARY	= 0xffffffff,
71 	AHCI_MAX_CMDS		= 32,
72 	AHCI_CMD_SZ		= 32,
73 	AHCI_CMD_SLOT_SZ	= AHCI_MAX_CMDS * AHCI_CMD_SZ,
74 	AHCI_RX_FIS_SZ		= 256,
75 	AHCI_CMD_TBL_CDB	= 0x40,
76 	AHCI_CMD_TBL_HDR_SZ	= 0x80,
77 	AHCI_CMD_TBL_SZ		= AHCI_CMD_TBL_HDR_SZ + (AHCI_MAX_SG * 16),
78 	AHCI_CMD_TBL_AR_SZ	= AHCI_CMD_TBL_SZ * AHCI_MAX_CMDS,
79 	AHCI_PORT_PRIV_DMA_SZ	= AHCI_CMD_SLOT_SZ + AHCI_CMD_TBL_AR_SZ +
80 				  AHCI_RX_FIS_SZ,
81 	AHCI_IRQ_ON_SG		= (1 << 31),
82 	AHCI_CMD_ATAPI		= (1 << 5),
83 	AHCI_CMD_WRITE		= (1 << 6),
84 	AHCI_CMD_PREFETCH	= (1 << 7),
85 	AHCI_CMD_RESET		= (1 << 8),
86 	AHCI_CMD_CLR_BUSY	= (1 << 10),
87 
88 	RX_FIS_D2H_REG		= 0x40,	/* offset of D2H Register FIS data */
89 	RX_FIS_SDB		= 0x58, /* offset of SDB FIS data */
90 	RX_FIS_UNK		= 0x60, /* offset of Unknown FIS data */
91 
92 	board_ahci		= 0,
93 	board_ahci_vt8251	= 1,
94 	board_ahci_ign_iferr	= 2,
95 	board_ahci_sb600	= 3,
96 	board_ahci_mv		= 4,
97 	board_ahci_sb700	= 5,
98 	board_ahci_mcp65	= 6,
99 	board_ahci_nopmp	= 7,
100 
101 	/* global controller registers */
102 	HOST_CAP		= 0x00, /* host capabilities */
103 	HOST_CTL		= 0x04, /* global host control */
104 	HOST_IRQ_STAT		= 0x08, /* interrupt status */
105 	HOST_PORTS_IMPL		= 0x0c, /* bitmap of implemented ports */
106 	HOST_VERSION		= 0x10, /* AHCI spec. version compliancy */
107 	HOST_EM_LOC		= 0x1c, /* Enclosure Management location */
108 	HOST_EM_CTL		= 0x20, /* Enclosure Management Control */
109 
110 	/* HOST_CTL bits */
111 	HOST_RESET		= (1 << 0),  /* reset controller; self-clear */
112 	HOST_IRQ_EN		= (1 << 1),  /* global IRQ enable */
113 	HOST_AHCI_EN		= (1 << 31), /* AHCI enabled */
114 
115 	/* HOST_CAP bits */
116 	HOST_CAP_EMS		= (1 << 6),  /* Enclosure Management support */
117 	HOST_CAP_SSC		= (1 << 14), /* Slumber capable */
118 	HOST_CAP_PMP		= (1 << 17), /* Port Multiplier support */
119 	HOST_CAP_CLO		= (1 << 24), /* Command List Override support */
120 	HOST_CAP_ALPM		= (1 << 26), /* Aggressive Link PM support */
121 	HOST_CAP_SSS		= (1 << 27), /* Staggered Spin-up */
122 	HOST_CAP_SNTF		= (1 << 29), /* SNotification register */
123 	HOST_CAP_NCQ		= (1 << 30), /* Native Command Queueing */
124 	HOST_CAP_64		= (1 << 31), /* PCI DAC (64-bit DMA) support */
125 
126 	/* registers for each SATA port */
127 	PORT_LST_ADDR		= 0x00, /* command list DMA addr */
128 	PORT_LST_ADDR_HI	= 0x04, /* command list DMA addr hi */
129 	PORT_FIS_ADDR		= 0x08, /* FIS rx buf addr */
130 	PORT_FIS_ADDR_HI	= 0x0c, /* FIS rx buf addr hi */
131 	PORT_IRQ_STAT		= 0x10, /* interrupt status */
132 	PORT_IRQ_MASK		= 0x14, /* interrupt enable/disable mask */
133 	PORT_CMD		= 0x18, /* port command */
134 	PORT_TFDATA		= 0x20,	/* taskfile data */
135 	PORT_SIG		= 0x24,	/* device TF signature */
136 	PORT_CMD_ISSUE		= 0x38, /* command issue */
137 	PORT_SCR_STAT		= 0x28, /* SATA phy register: SStatus */
138 	PORT_SCR_CTL		= 0x2c, /* SATA phy register: SControl */
139 	PORT_SCR_ERR		= 0x30, /* SATA phy register: SError */
140 	PORT_SCR_ACT		= 0x34, /* SATA phy register: SActive */
141 	PORT_SCR_NTF		= 0x3c, /* SATA phy register: SNotification */
142 
143 	/* PORT_IRQ_{STAT,MASK} bits */
144 	PORT_IRQ_COLD_PRES	= (1 << 31), /* cold presence detect */
145 	PORT_IRQ_TF_ERR		= (1 << 30), /* task file error */
146 	PORT_IRQ_HBUS_ERR	= (1 << 29), /* host bus fatal error */
147 	PORT_IRQ_HBUS_DATA_ERR	= (1 << 28), /* host bus data error */
148 	PORT_IRQ_IF_ERR		= (1 << 27), /* interface fatal error */
149 	PORT_IRQ_IF_NONFATAL	= (1 << 26), /* interface non-fatal error */
150 	PORT_IRQ_OVERFLOW	= (1 << 24), /* xfer exhausted available S/G */
151 	PORT_IRQ_BAD_PMP	= (1 << 23), /* incorrect port multiplier */
152 
153 	PORT_IRQ_PHYRDY		= (1 << 22), /* PhyRdy changed */
154 	PORT_IRQ_DEV_ILCK	= (1 << 7), /* device interlock */
155 	PORT_IRQ_CONNECT	= (1 << 6), /* port connect change status */
156 	PORT_IRQ_SG_DONE	= (1 << 5), /* descriptor processed */
157 	PORT_IRQ_UNK_FIS	= (1 << 4), /* unknown FIS rx'd */
158 	PORT_IRQ_SDB_FIS	= (1 << 3), /* Set Device Bits FIS rx'd */
159 	PORT_IRQ_DMAS_FIS	= (1 << 2), /* DMA Setup FIS rx'd */
160 	PORT_IRQ_PIOS_FIS	= (1 << 1), /* PIO Setup FIS rx'd */
161 	PORT_IRQ_D2H_REG_FIS	= (1 << 0), /* D2H Register FIS rx'd */
162 
163 	PORT_IRQ_FREEZE		= PORT_IRQ_HBUS_ERR |
164 				  PORT_IRQ_IF_ERR |
165 				  PORT_IRQ_CONNECT |
166 				  PORT_IRQ_PHYRDY |
167 				  PORT_IRQ_UNK_FIS |
168 				  PORT_IRQ_BAD_PMP,
169 	PORT_IRQ_ERROR		= PORT_IRQ_FREEZE |
170 				  PORT_IRQ_TF_ERR |
171 				  PORT_IRQ_HBUS_DATA_ERR,
172 	DEF_PORT_IRQ		= PORT_IRQ_ERROR | PORT_IRQ_SG_DONE |
173 				  PORT_IRQ_SDB_FIS | PORT_IRQ_DMAS_FIS |
174 				  PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS,
175 
176 	/* PORT_CMD bits */
177 	PORT_CMD_ASP		= (1 << 27), /* Aggressive Slumber/Partial */
178 	PORT_CMD_ALPE		= (1 << 26), /* Aggressive Link PM enable */
179 	PORT_CMD_ATAPI		= (1 << 24), /* Device is ATAPI */
180 	PORT_CMD_PMP		= (1 << 17), /* PMP attached */
181 	PORT_CMD_LIST_ON	= (1 << 15), /* cmd list DMA engine running */
182 	PORT_CMD_FIS_ON		= (1 << 14), /* FIS DMA engine running */
183 	PORT_CMD_FIS_RX		= (1 << 4), /* Enable FIS receive DMA engine */
184 	PORT_CMD_CLO		= (1 << 3), /* Command list override */
185 	PORT_CMD_POWER_ON	= (1 << 2), /* Power up device */
186 	PORT_CMD_SPIN_UP	= (1 << 1), /* Spin up device */
187 	PORT_CMD_START		= (1 << 0), /* Enable port DMA engine */
188 
189 	PORT_CMD_ICC_MASK	= (0xf << 28), /* i/f ICC state mask */
190 	PORT_CMD_ICC_ACTIVE	= (0x1 << 28), /* Put i/f in active state */
191 	PORT_CMD_ICC_PARTIAL	= (0x2 << 28), /* Put i/f in partial state */
192 	PORT_CMD_ICC_SLUMBER	= (0x6 << 28), /* Put i/f in slumber state */
193 
194 	/* hpriv->flags bits */
195 	AHCI_HFLAG_NO_NCQ		= (1 << 0),
196 	AHCI_HFLAG_IGN_IRQ_IF_ERR	= (1 << 1), /* ignore IRQ_IF_ERR */
197 	AHCI_HFLAG_IGN_SERR_INTERNAL	= (1 << 2), /* ignore SERR_INTERNAL */
198 	AHCI_HFLAG_32BIT_ONLY		= (1 << 3), /* force 32bit */
199 	AHCI_HFLAG_MV_PATA		= (1 << 4), /* PATA port */
200 	AHCI_HFLAG_NO_MSI		= (1 << 5), /* no PCI MSI */
201 	AHCI_HFLAG_NO_PMP		= (1 << 6), /* no PMP */
202 	AHCI_HFLAG_NO_HOTPLUG		= (1 << 7), /* ignore PxSERR.DIAG.N */
203 	AHCI_HFLAG_SECT255		= (1 << 8), /* max 255 sectors */
204 	AHCI_HFLAG_YES_NCQ		= (1 << 9), /* force NCQ cap on */
205 
206 	/* ap->flags bits */
207 
208 	AHCI_FLAG_COMMON		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
209 					  ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
210 					  ATA_FLAG_ACPI_SATA | ATA_FLAG_AN |
211 					  ATA_FLAG_IPM,
212 
213 	ICH_MAP				= 0x90, /* ICH MAP register */
214 
215 	/* em_ctl bits */
216 	EM_CTL_RST			= (1 << 9), /* Reset */
217 	EM_CTL_TM			= (1 << 8), /* Transmit Message */
218 	EM_CTL_ALHD			= (1 << 26), /* Activity LED */
219 };
220 
221 struct ahci_cmd_hdr {
222 	__le32			opts;
223 	__le32			status;
224 	__le32			tbl_addr;
225 	__le32			tbl_addr_hi;
226 	__le32			reserved[4];
227 };
228 
229 struct ahci_sg {
230 	__le32			addr;
231 	__le32			addr_hi;
232 	__le32			reserved;
233 	__le32			flags_size;
234 };
235 
236 struct ahci_em_priv {
237 	enum sw_activity blink_policy;
238 	struct timer_list timer;
239 	unsigned long saved_activity;
240 	unsigned long activity;
241 	unsigned long led_state;
242 };
243 
244 struct ahci_host_priv {
245 	unsigned int		flags;		/* AHCI_HFLAG_* */
246 	u32			cap;		/* cap to use */
247 	u32			port_map;	/* port map to use */
248 	u32			saved_cap;	/* saved initial cap */
249 	u32			saved_port_map;	/* saved initial port_map */
250 	u32 			em_loc; /* enclosure management location */
251 };
252 
253 struct ahci_port_priv {
254 	struct ata_link		*active_link;
255 	struct ahci_cmd_hdr	*cmd_slot;
256 	dma_addr_t		cmd_slot_dma;
257 	void			*cmd_tbl;
258 	dma_addr_t		cmd_tbl_dma;
259 	void			*rx_fis;
260 	dma_addr_t		rx_fis_dma;
261 	/* for NCQ spurious interrupt analysis */
262 	unsigned int		ncq_saw_d2h:1;
263 	unsigned int		ncq_saw_dmas:1;
264 	unsigned int		ncq_saw_sdb:1;
265 	u32 			intr_mask;	/* interrupts to enable */
266 	struct ahci_em_priv	em_priv[MAX_SLOTS];/* enclosure management info
267 					 	 * per PM slot */
268 };
269 
270 static int ahci_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val);
271 static int ahci_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val);
272 static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
273 static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc);
274 static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc);
275 static int ahci_port_start(struct ata_port *ap);
276 static void ahci_port_stop(struct ata_port *ap);
277 static void ahci_qc_prep(struct ata_queued_cmd *qc);
278 static void ahci_freeze(struct ata_port *ap);
279 static void ahci_thaw(struct ata_port *ap);
280 static void ahci_pmp_attach(struct ata_port *ap);
281 static void ahci_pmp_detach(struct ata_port *ap);
282 static int ahci_softreset(struct ata_link *link, unsigned int *class,
283 			  unsigned long deadline);
284 static int ahci_sb600_softreset(struct ata_link *link, unsigned int *class,
285 			  unsigned long deadline);
286 static int ahci_hardreset(struct ata_link *link, unsigned int *class,
287 			  unsigned long deadline);
288 static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
289 				 unsigned long deadline);
290 static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
291 				unsigned long deadline);
292 static void ahci_postreset(struct ata_link *link, unsigned int *class);
293 static void ahci_error_handler(struct ata_port *ap);
294 static void ahci_post_internal_cmd(struct ata_queued_cmd *qc);
295 static int ahci_port_resume(struct ata_port *ap);
296 static void ahci_dev_config(struct ata_device *dev);
297 static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl);
298 static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
299 			       u32 opts);
300 #ifdef CONFIG_PM
301 static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg);
302 static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
303 static int ahci_pci_device_resume(struct pci_dev *pdev);
304 #endif
305 static ssize_t ahci_activity_show(struct ata_device *dev, char *buf);
306 static ssize_t ahci_activity_store(struct ata_device *dev,
307 				   enum sw_activity val);
308 static void ahci_init_sw_activity(struct ata_link *link);
309 
310 static struct device_attribute *ahci_shost_attrs[] = {
311 	&dev_attr_link_power_management_policy,
312 	&dev_attr_em_message_type,
313 	&dev_attr_em_message,
314 	NULL
315 };
316 
317 static struct device_attribute *ahci_sdev_attrs[] = {
318 	&dev_attr_sw_activity,
319 	NULL
320 };
321 
322 static struct scsi_host_template ahci_sht = {
323 	ATA_NCQ_SHT(DRV_NAME),
324 	.can_queue		= AHCI_MAX_CMDS - 1,
325 	.sg_tablesize		= AHCI_MAX_SG,
326 	.dma_boundary		= AHCI_DMA_BOUNDARY,
327 	.shost_attrs		= ahci_shost_attrs,
328 	.sdev_attrs		= ahci_sdev_attrs,
329 };
330 
331 static struct ata_port_operations ahci_ops = {
332 	.inherits		= &sata_pmp_port_ops,
333 
334 	.qc_defer		= sata_pmp_qc_defer_cmd_switch,
335 	.qc_prep		= ahci_qc_prep,
336 	.qc_issue		= ahci_qc_issue,
337 	.qc_fill_rtf		= ahci_qc_fill_rtf,
338 
339 	.freeze			= ahci_freeze,
340 	.thaw			= ahci_thaw,
341 	.softreset		= ahci_softreset,
342 	.hardreset		= ahci_hardreset,
343 	.postreset		= ahci_postreset,
344 	.pmp_softreset		= ahci_softreset,
345 	.error_handler		= ahci_error_handler,
346 	.post_internal_cmd	= ahci_post_internal_cmd,
347 	.dev_config		= ahci_dev_config,
348 
349 	.scr_read		= ahci_scr_read,
350 	.scr_write		= ahci_scr_write,
351 	.pmp_attach		= ahci_pmp_attach,
352 	.pmp_detach		= ahci_pmp_detach,
353 
354 	.enable_pm		= ahci_enable_alpm,
355 	.disable_pm		= ahci_disable_alpm,
356 	.em_show		= ahci_led_show,
357 	.em_store		= ahci_led_store,
358 	.sw_activity_show	= ahci_activity_show,
359 	.sw_activity_store	= ahci_activity_store,
360 #ifdef CONFIG_PM
361 	.port_suspend		= ahci_port_suspend,
362 	.port_resume		= ahci_port_resume,
363 #endif
364 	.port_start		= ahci_port_start,
365 	.port_stop		= ahci_port_stop,
366 };
367 
368 static struct ata_port_operations ahci_vt8251_ops = {
369 	.inherits		= &ahci_ops,
370 	.hardreset		= ahci_vt8251_hardreset,
371 };
372 
373 static struct ata_port_operations ahci_p5wdh_ops = {
374 	.inherits		= &ahci_ops,
375 	.hardreset		= ahci_p5wdh_hardreset,
376 };
377 
378 static struct ata_port_operations ahci_sb600_ops = {
379 	.inherits		= &ahci_ops,
380 	.softreset		= ahci_sb600_softreset,
381 	.pmp_softreset		= ahci_sb600_softreset,
382 };
383 
384 #define AHCI_HFLAGS(flags)	.private_data	= (void *)(flags)
385 
386 static const struct ata_port_info ahci_port_info[] = {
387 	/* board_ahci */
388 	{
389 		.flags		= AHCI_FLAG_COMMON,
390 		.pio_mask	= 0x1f, /* pio0-4 */
391 		.udma_mask	= ATA_UDMA6,
392 		.port_ops	= &ahci_ops,
393 	},
394 	/* board_ahci_vt8251 */
395 	{
396 		AHCI_HFLAGS	(AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_PMP),
397 		.flags		= AHCI_FLAG_COMMON,
398 		.pio_mask	= 0x1f, /* pio0-4 */
399 		.udma_mask	= ATA_UDMA6,
400 		.port_ops	= &ahci_vt8251_ops,
401 	},
402 	/* board_ahci_ign_iferr */
403 	{
404 		AHCI_HFLAGS	(AHCI_HFLAG_IGN_IRQ_IF_ERR),
405 		.flags		= AHCI_FLAG_COMMON,
406 		.pio_mask	= 0x1f, /* pio0-4 */
407 		.udma_mask	= ATA_UDMA6,
408 		.port_ops	= &ahci_ops,
409 	},
410 	/* board_ahci_sb600 */
411 	{
412 		AHCI_HFLAGS	(AHCI_HFLAG_IGN_SERR_INTERNAL |
413 				 AHCI_HFLAG_32BIT_ONLY | AHCI_HFLAG_NO_MSI |
414 				 AHCI_HFLAG_SECT255),
415 		.flags		= AHCI_FLAG_COMMON,
416 		.pio_mask	= 0x1f, /* pio0-4 */
417 		.udma_mask	= ATA_UDMA6,
418 		.port_ops	= &ahci_sb600_ops,
419 	},
420 	/* board_ahci_mv */
421 	{
422 		AHCI_HFLAGS	(AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_MSI |
423 				 AHCI_HFLAG_MV_PATA | AHCI_HFLAG_NO_PMP),
424 		.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
425 				  ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA,
426 		.pio_mask	= 0x1f, /* pio0-4 */
427 		.udma_mask	= ATA_UDMA6,
428 		.port_ops	= &ahci_ops,
429 	},
430 	/* board_ahci_sb700 */
431 	{
432 		AHCI_HFLAGS	(AHCI_HFLAG_IGN_SERR_INTERNAL),
433 		.flags		= AHCI_FLAG_COMMON,
434 		.pio_mask	= 0x1f, /* pio0-4 */
435 		.udma_mask	= ATA_UDMA6,
436 		.port_ops	= &ahci_sb600_ops,
437 	},
438 	/* board_ahci_mcp65 */
439 	{
440 		AHCI_HFLAGS	(AHCI_HFLAG_YES_NCQ),
441 		.flags		= AHCI_FLAG_COMMON,
442 		.pio_mask	= 0x1f, /* pio0-4 */
443 		.udma_mask	= ATA_UDMA6,
444 		.port_ops	= &ahci_ops,
445 	},
446 	/* board_ahci_nopmp */
447 	{
448 		AHCI_HFLAGS	(AHCI_HFLAG_NO_PMP),
449 		.flags		= AHCI_FLAG_COMMON,
450 		.pio_mask	= 0x1f, /* pio0-4 */
451 		.udma_mask	= ATA_UDMA6,
452 		.port_ops	= &ahci_ops,
453 	},
454 };
455 
456 static const struct pci_device_id ahci_pci_tbl[] = {
457 	/* Intel */
458 	{ PCI_VDEVICE(INTEL, 0x2652), board_ahci }, /* ICH6 */
459 	{ PCI_VDEVICE(INTEL, 0x2653), board_ahci }, /* ICH6M */
460 	{ PCI_VDEVICE(INTEL, 0x27c1), board_ahci }, /* ICH7 */
461 	{ PCI_VDEVICE(INTEL, 0x27c5), board_ahci }, /* ICH7M */
462 	{ PCI_VDEVICE(INTEL, 0x27c3), board_ahci }, /* ICH7R */
463 	{ PCI_VDEVICE(AL, 0x5288), board_ahci_ign_iferr }, /* ULi M5288 */
464 	{ PCI_VDEVICE(INTEL, 0x2681), board_ahci }, /* ESB2 */
465 	{ PCI_VDEVICE(INTEL, 0x2682), board_ahci }, /* ESB2 */
466 	{ PCI_VDEVICE(INTEL, 0x2683), board_ahci }, /* ESB2 */
467 	{ PCI_VDEVICE(INTEL, 0x27c6), board_ahci }, /* ICH7-M DH */
468 	{ PCI_VDEVICE(INTEL, 0x2821), board_ahci }, /* ICH8 */
469 	{ PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* ICH8 */
470 	{ PCI_VDEVICE(INTEL, 0x2824), board_ahci }, /* ICH8 */
471 	{ PCI_VDEVICE(INTEL, 0x2829), board_ahci }, /* ICH8M */
472 	{ PCI_VDEVICE(INTEL, 0x282a), board_ahci }, /* ICH8M */
473 	{ PCI_VDEVICE(INTEL, 0x2922), board_ahci }, /* ICH9 */
474 	{ PCI_VDEVICE(INTEL, 0x2923), board_ahci }, /* ICH9 */
475 	{ PCI_VDEVICE(INTEL, 0x2924), board_ahci }, /* ICH9 */
476 	{ PCI_VDEVICE(INTEL, 0x2925), board_ahci }, /* ICH9 */
477 	{ PCI_VDEVICE(INTEL, 0x2927), board_ahci }, /* ICH9 */
478 	{ PCI_VDEVICE(INTEL, 0x2929), board_ahci }, /* ICH9M */
479 	{ PCI_VDEVICE(INTEL, 0x292a), board_ahci }, /* ICH9M */
480 	{ PCI_VDEVICE(INTEL, 0x292b), board_ahci }, /* ICH9M */
481 	{ PCI_VDEVICE(INTEL, 0x292c), board_ahci }, /* ICH9M */
482 	{ PCI_VDEVICE(INTEL, 0x292f), board_ahci }, /* ICH9M */
483 	{ PCI_VDEVICE(INTEL, 0x294d), board_ahci }, /* ICH9 */
484 	{ PCI_VDEVICE(INTEL, 0x294e), board_ahci }, /* ICH9M */
485 	{ PCI_VDEVICE(INTEL, 0x502a), board_ahci }, /* Tolapai */
486 	{ PCI_VDEVICE(INTEL, 0x502b), board_ahci }, /* Tolapai */
487 	{ PCI_VDEVICE(INTEL, 0x3a05), board_ahci }, /* ICH10 */
488 	{ PCI_VDEVICE(INTEL, 0x3a25), board_ahci }, /* ICH10 */
489 	{ PCI_VDEVICE(INTEL, 0x3b24), board_ahci }, /* PCH RAID */
490 	{ PCI_VDEVICE(INTEL, 0x3b25), board_ahci }, /* PCH RAID */
491 	{ PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */
492 	{ PCI_VDEVICE(INTEL, 0x3b2c), board_ahci }, /* PCH RAID */
493 
494 	/* JMicron 360/1/3/5/6, match class to avoid IDE function */
495 	{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
496 	  PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci_ign_iferr },
497 
498 	/* ATI */
499 	{ PCI_VDEVICE(ATI, 0x4380), board_ahci_sb600 }, /* ATI SB600 */
500 	{ PCI_VDEVICE(ATI, 0x4390), board_ahci_sb700 }, /* ATI SB700/800 */
501 	{ PCI_VDEVICE(ATI, 0x4391), board_ahci_sb700 }, /* ATI SB700/800 */
502 	{ PCI_VDEVICE(ATI, 0x4392), board_ahci_sb700 }, /* ATI SB700/800 */
503 	{ PCI_VDEVICE(ATI, 0x4393), board_ahci_sb700 }, /* ATI SB700/800 */
504 	{ PCI_VDEVICE(ATI, 0x4394), board_ahci_sb700 }, /* ATI SB700/800 */
505 	{ PCI_VDEVICE(ATI, 0x4395), board_ahci_sb700 }, /* ATI SB700/800 */
506 
507 	/* VIA */
508 	{ PCI_VDEVICE(VIA, 0x3349), board_ahci_vt8251 }, /* VIA VT8251 */
509 	{ PCI_VDEVICE(VIA, 0x6287), board_ahci_vt8251 }, /* VIA VT8251 */
510 
511 	/* NVIDIA */
512 	{ PCI_VDEVICE(NVIDIA, 0x044c), board_ahci_mcp65 },	/* MCP65 */
513 	{ PCI_VDEVICE(NVIDIA, 0x044d), board_ahci_mcp65 },	/* MCP65 */
514 	{ PCI_VDEVICE(NVIDIA, 0x044e), board_ahci_mcp65 },	/* MCP65 */
515 	{ PCI_VDEVICE(NVIDIA, 0x044f), board_ahci_mcp65 },	/* MCP65 */
516 	{ PCI_VDEVICE(NVIDIA, 0x045c), board_ahci_mcp65 },	/* MCP65 */
517 	{ PCI_VDEVICE(NVIDIA, 0x045d), board_ahci_mcp65 },	/* MCP65 */
518 	{ PCI_VDEVICE(NVIDIA, 0x045e), board_ahci_mcp65 },	/* MCP65 */
519 	{ PCI_VDEVICE(NVIDIA, 0x045f), board_ahci_mcp65 },	/* MCP65 */
520 	{ PCI_VDEVICE(NVIDIA, 0x0550), board_ahci },		/* MCP67 */
521 	{ PCI_VDEVICE(NVIDIA, 0x0551), board_ahci },		/* MCP67 */
522 	{ PCI_VDEVICE(NVIDIA, 0x0552), board_ahci },		/* MCP67 */
523 	{ PCI_VDEVICE(NVIDIA, 0x0553), board_ahci },		/* MCP67 */
524 	{ PCI_VDEVICE(NVIDIA, 0x0554), board_ahci },		/* MCP67 */
525 	{ PCI_VDEVICE(NVIDIA, 0x0555), board_ahci },		/* MCP67 */
526 	{ PCI_VDEVICE(NVIDIA, 0x0556), board_ahci },		/* MCP67 */
527 	{ PCI_VDEVICE(NVIDIA, 0x0557), board_ahci },		/* MCP67 */
528 	{ PCI_VDEVICE(NVIDIA, 0x0558), board_ahci },		/* MCP67 */
529 	{ PCI_VDEVICE(NVIDIA, 0x0559), board_ahci },		/* MCP67 */
530 	{ PCI_VDEVICE(NVIDIA, 0x055a), board_ahci },		/* MCP67 */
531 	{ PCI_VDEVICE(NVIDIA, 0x055b), board_ahci },		/* MCP67 */
532 	{ PCI_VDEVICE(NVIDIA, 0x07f0), board_ahci },		/* MCP73 */
533 	{ PCI_VDEVICE(NVIDIA, 0x07f1), board_ahci },		/* MCP73 */
534 	{ PCI_VDEVICE(NVIDIA, 0x07f2), board_ahci },		/* MCP73 */
535 	{ PCI_VDEVICE(NVIDIA, 0x07f3), board_ahci },		/* MCP73 */
536 	{ PCI_VDEVICE(NVIDIA, 0x07f4), board_ahci },		/* MCP73 */
537 	{ PCI_VDEVICE(NVIDIA, 0x07f5), board_ahci },		/* MCP73 */
538 	{ PCI_VDEVICE(NVIDIA, 0x07f6), board_ahci },		/* MCP73 */
539 	{ PCI_VDEVICE(NVIDIA, 0x07f7), board_ahci },		/* MCP73 */
540 	{ PCI_VDEVICE(NVIDIA, 0x07f8), board_ahci },		/* MCP73 */
541 	{ PCI_VDEVICE(NVIDIA, 0x07f9), board_ahci },		/* MCP73 */
542 	{ PCI_VDEVICE(NVIDIA, 0x07fa), board_ahci },		/* MCP73 */
543 	{ PCI_VDEVICE(NVIDIA, 0x07fb), board_ahci },		/* MCP73 */
544 	{ PCI_VDEVICE(NVIDIA, 0x0ad0), board_ahci },		/* MCP77 */
545 	{ PCI_VDEVICE(NVIDIA, 0x0ad1), board_ahci },		/* MCP77 */
546 	{ PCI_VDEVICE(NVIDIA, 0x0ad2), board_ahci },		/* MCP77 */
547 	{ PCI_VDEVICE(NVIDIA, 0x0ad3), board_ahci },		/* MCP77 */
548 	{ PCI_VDEVICE(NVIDIA, 0x0ad4), board_ahci },		/* MCP77 */
549 	{ PCI_VDEVICE(NVIDIA, 0x0ad5), board_ahci },		/* MCP77 */
550 	{ PCI_VDEVICE(NVIDIA, 0x0ad6), board_ahci },		/* MCP77 */
551 	{ PCI_VDEVICE(NVIDIA, 0x0ad7), board_ahci },		/* MCP77 */
552 	{ PCI_VDEVICE(NVIDIA, 0x0ad8), board_ahci },		/* MCP77 */
553 	{ PCI_VDEVICE(NVIDIA, 0x0ad9), board_ahci },		/* MCP77 */
554 	{ PCI_VDEVICE(NVIDIA, 0x0ada), board_ahci },		/* MCP77 */
555 	{ PCI_VDEVICE(NVIDIA, 0x0adb), board_ahci },		/* MCP77 */
556 	{ PCI_VDEVICE(NVIDIA, 0x0ab4), board_ahci },		/* MCP79 */
557 	{ PCI_VDEVICE(NVIDIA, 0x0ab5), board_ahci },		/* MCP79 */
558 	{ PCI_VDEVICE(NVIDIA, 0x0ab6), board_ahci },		/* MCP79 */
559 	{ PCI_VDEVICE(NVIDIA, 0x0ab7), board_ahci },		/* MCP79 */
560 	{ PCI_VDEVICE(NVIDIA, 0x0ab8), board_ahci },		/* MCP79 */
561 	{ PCI_VDEVICE(NVIDIA, 0x0ab9), board_ahci },		/* MCP79 */
562 	{ PCI_VDEVICE(NVIDIA, 0x0aba), board_ahci },		/* MCP79 */
563 	{ PCI_VDEVICE(NVIDIA, 0x0abb), board_ahci },		/* MCP79 */
564 	{ PCI_VDEVICE(NVIDIA, 0x0abc), board_ahci },		/* MCP79 */
565 	{ PCI_VDEVICE(NVIDIA, 0x0abd), board_ahci },		/* MCP79 */
566 	{ PCI_VDEVICE(NVIDIA, 0x0abe), board_ahci },		/* MCP79 */
567 	{ PCI_VDEVICE(NVIDIA, 0x0abf), board_ahci },		/* MCP79 */
568 	{ PCI_VDEVICE(NVIDIA, 0x0bc8), board_ahci },		/* MCP7B */
569 	{ PCI_VDEVICE(NVIDIA, 0x0bc9), board_ahci },		/* MCP7B */
570 	{ PCI_VDEVICE(NVIDIA, 0x0bca), board_ahci },		/* MCP7B */
571 	{ PCI_VDEVICE(NVIDIA, 0x0bcb), board_ahci },		/* MCP7B */
572 	{ PCI_VDEVICE(NVIDIA, 0x0bcc), board_ahci },		/* MCP7B */
573 	{ PCI_VDEVICE(NVIDIA, 0x0bcd), board_ahci },		/* MCP7B */
574 	{ PCI_VDEVICE(NVIDIA, 0x0bce), board_ahci },		/* MCP7B */
575 	{ PCI_VDEVICE(NVIDIA, 0x0bcf), board_ahci },		/* MCP7B */
576 	{ PCI_VDEVICE(NVIDIA, 0x0bc4), board_ahci },		/* MCP7B */
577 	{ PCI_VDEVICE(NVIDIA, 0x0bc5), board_ahci },		/* MCP7B */
578 	{ PCI_VDEVICE(NVIDIA, 0x0bc6), board_ahci },		/* MCP7B */
579 	{ PCI_VDEVICE(NVIDIA, 0x0bc7), board_ahci },		/* MCP7B */
580 
581 	/* SiS */
582 	{ PCI_VDEVICE(SI, 0x1184), board_ahci },		/* SiS 966 */
583 	{ PCI_VDEVICE(SI, 0x1185), board_ahci },		/* SiS 968 */
584 	{ PCI_VDEVICE(SI, 0x0186), board_ahci },		/* SiS 968 */
585 
586 	/* Marvell */
587 	{ PCI_VDEVICE(MARVELL, 0x6145), board_ahci_mv },	/* 6145 */
588 	{ PCI_VDEVICE(MARVELL, 0x6121), board_ahci_mv },	/* 6121 */
589 
590 	/* Generic, PCI class code for AHCI */
591 	{ PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
592 	  PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci },
593 
594 	{ }	/* terminate list */
595 };
596 
597 
598 static struct pci_driver ahci_pci_driver = {
599 	.name			= DRV_NAME,
600 	.id_table		= ahci_pci_tbl,
601 	.probe			= ahci_init_one,
602 	.remove			= ata_pci_remove_one,
603 #ifdef CONFIG_PM
604 	.suspend		= ahci_pci_device_suspend,
605 	.resume			= ahci_pci_device_resume,
606 #endif
607 };
608 
609 static int ahci_em_messages = 1;
610 module_param(ahci_em_messages, int, 0444);
611 /* add other LED protocol types when they become supported */
612 MODULE_PARM_DESC(ahci_em_messages,
613 	"Set AHCI Enclosure Management Message type (0 = disabled, 1 = LED");
614 
615 #if defined(CONFIG_PATA_MARVELL) || defined(CONFIG_PATA_MARVELL_MODULE)
616 static int marvell_enable;
617 #else
618 static int marvell_enable = 1;
619 #endif
620 module_param(marvell_enable, int, 0644);
621 MODULE_PARM_DESC(marvell_enable, "Marvell SATA via AHCI (1 = enabled)");
622 
623 
624 static inline int ahci_nr_ports(u32 cap)
625 {
626 	return (cap & 0x1f) + 1;
627 }
628 
629 static inline void __iomem *__ahci_port_base(struct ata_host *host,
630 					     unsigned int port_no)
631 {
632 	void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
633 
634 	return mmio + 0x100 + (port_no * 0x80);
635 }
636 
637 static inline void __iomem *ahci_port_base(struct ata_port *ap)
638 {
639 	return __ahci_port_base(ap->host, ap->port_no);
640 }
641 
642 static void ahci_enable_ahci(void __iomem *mmio)
643 {
644 	int i;
645 	u32 tmp;
646 
647 	/* turn on AHCI_EN */
648 	tmp = readl(mmio + HOST_CTL);
649 	if (tmp & HOST_AHCI_EN)
650 		return;
651 
652 	/* Some controllers need AHCI_EN to be written multiple times.
653 	 * Try a few times before giving up.
654 	 */
655 	for (i = 0; i < 5; i++) {
656 		tmp |= HOST_AHCI_EN;
657 		writel(tmp, mmio + HOST_CTL);
658 		tmp = readl(mmio + HOST_CTL);	/* flush && sanity check */
659 		if (tmp & HOST_AHCI_EN)
660 			return;
661 		msleep(10);
662 	}
663 
664 	WARN_ON(1);
665 }
666 
667 /**
668  *	ahci_save_initial_config - Save and fixup initial config values
669  *	@pdev: target PCI device
670  *	@hpriv: host private area to store config values
671  *
672  *	Some registers containing configuration info might be setup by
673  *	BIOS and might be cleared on reset.  This function saves the
674  *	initial values of those registers into @hpriv such that they
675  *	can be restored after controller reset.
676  *
677  *	If inconsistent, config values are fixed up by this function.
678  *
679  *	LOCKING:
680  *	None.
681  */
682 static void ahci_save_initial_config(struct pci_dev *pdev,
683 				     struct ahci_host_priv *hpriv)
684 {
685 	void __iomem *mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR];
686 	u32 cap, port_map;
687 	int i;
688 	int mv;
689 
690 	/* make sure AHCI mode is enabled before accessing CAP */
691 	ahci_enable_ahci(mmio);
692 
693 	/* Values prefixed with saved_ are written back to host after
694 	 * reset.  Values without are used for driver operation.
695 	 */
696 	hpriv->saved_cap = cap = readl(mmio + HOST_CAP);
697 	hpriv->saved_port_map = port_map = readl(mmio + HOST_PORTS_IMPL);
698 
699 	/* some chips have errata preventing 64bit use */
700 	if ((cap & HOST_CAP_64) && (hpriv->flags & AHCI_HFLAG_32BIT_ONLY)) {
701 		dev_printk(KERN_INFO, &pdev->dev,
702 			   "controller can't do 64bit DMA, forcing 32bit\n");
703 		cap &= ~HOST_CAP_64;
704 	}
705 
706 	if ((cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_NO_NCQ)) {
707 		dev_printk(KERN_INFO, &pdev->dev,
708 			   "controller can't do NCQ, turning off CAP_NCQ\n");
709 		cap &= ~HOST_CAP_NCQ;
710 	}
711 
712 	if (!(cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_YES_NCQ)) {
713 		dev_printk(KERN_INFO, &pdev->dev,
714 			   "controller can do NCQ, turning on CAP_NCQ\n");
715 		cap |= HOST_CAP_NCQ;
716 	}
717 
718 	if ((cap & HOST_CAP_PMP) && (hpriv->flags & AHCI_HFLAG_NO_PMP)) {
719 		dev_printk(KERN_INFO, &pdev->dev,
720 			   "controller can't do PMP, turning off CAP_PMP\n");
721 		cap &= ~HOST_CAP_PMP;
722 	}
723 
724 	if (pdev->vendor == PCI_VENDOR_ID_JMICRON && pdev->device == 0x2361 &&
725 	    port_map != 1) {
726 		dev_printk(KERN_INFO, &pdev->dev,
727 			   "JMB361 has only one port, port_map 0x%x -> 0x%x\n",
728 			   port_map, 1);
729 		port_map = 1;
730 	}
731 
732 	/*
733 	 * Temporary Marvell 6145 hack: PATA port presence
734 	 * is asserted through the standard AHCI port
735 	 * presence register, as bit 4 (counting from 0)
736 	 */
737 	if (hpriv->flags & AHCI_HFLAG_MV_PATA) {
738 		if (pdev->device == 0x6121)
739 			mv = 0x3;
740 		else
741 			mv = 0xf;
742 		dev_printk(KERN_ERR, &pdev->dev,
743 			   "MV_AHCI HACK: port_map %x -> %x\n",
744 			   port_map,
745 			   port_map & mv);
746 		dev_printk(KERN_ERR, &pdev->dev,
747 			  "Disabling your PATA port. Use the boot option 'ahci.marvell_enable=0' to avoid this.\n");
748 
749 		port_map &= mv;
750 	}
751 
752 	/* cross check port_map and cap.n_ports */
753 	if (port_map) {
754 		int map_ports = 0;
755 
756 		for (i = 0; i < AHCI_MAX_PORTS; i++)
757 			if (port_map & (1 << i))
758 				map_ports++;
759 
760 		/* If PI has more ports than n_ports, whine, clear
761 		 * port_map and let it be generated from n_ports.
762 		 */
763 		if (map_ports > ahci_nr_ports(cap)) {
764 			dev_printk(KERN_WARNING, &pdev->dev,
765 				   "implemented port map (0x%x) contains more "
766 				   "ports than nr_ports (%u), using nr_ports\n",
767 				   port_map, ahci_nr_ports(cap));
768 			port_map = 0;
769 		}
770 	}
771 
772 	/* fabricate port_map from cap.nr_ports */
773 	if (!port_map) {
774 		port_map = (1 << ahci_nr_ports(cap)) - 1;
775 		dev_printk(KERN_WARNING, &pdev->dev,
776 			   "forcing PORTS_IMPL to 0x%x\n", port_map);
777 
778 		/* write the fixed up value to the PI register */
779 		hpriv->saved_port_map = port_map;
780 	}
781 
782 	/* record values to use during operation */
783 	hpriv->cap = cap;
784 	hpriv->port_map = port_map;
785 }
786 
787 /**
788  *	ahci_restore_initial_config - Restore initial config
789  *	@host: target ATA host
790  *
791  *	Restore initial config stored by ahci_save_initial_config().
792  *
793  *	LOCKING:
794  *	None.
795  */
796 static void ahci_restore_initial_config(struct ata_host *host)
797 {
798 	struct ahci_host_priv *hpriv = host->private_data;
799 	void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
800 
801 	writel(hpriv->saved_cap, mmio + HOST_CAP);
802 	writel(hpriv->saved_port_map, mmio + HOST_PORTS_IMPL);
803 	(void) readl(mmio + HOST_PORTS_IMPL);	/* flush */
804 }
805 
806 static unsigned ahci_scr_offset(struct ata_port *ap, unsigned int sc_reg)
807 {
808 	static const int offset[] = {
809 		[SCR_STATUS]		= PORT_SCR_STAT,
810 		[SCR_CONTROL]		= PORT_SCR_CTL,
811 		[SCR_ERROR]		= PORT_SCR_ERR,
812 		[SCR_ACTIVE]		= PORT_SCR_ACT,
813 		[SCR_NOTIFICATION]	= PORT_SCR_NTF,
814 	};
815 	struct ahci_host_priv *hpriv = ap->host->private_data;
816 
817 	if (sc_reg < ARRAY_SIZE(offset) &&
818 	    (sc_reg != SCR_NOTIFICATION || (hpriv->cap & HOST_CAP_SNTF)))
819 		return offset[sc_reg];
820 	return 0;
821 }
822 
823 static int ahci_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val)
824 {
825 	void __iomem *port_mmio = ahci_port_base(ap);
826 	int offset = ahci_scr_offset(ap, sc_reg);
827 
828 	if (offset) {
829 		*val = readl(port_mmio + offset);
830 		return 0;
831 	}
832 	return -EINVAL;
833 }
834 
835 static int ahci_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val)
836 {
837 	void __iomem *port_mmio = ahci_port_base(ap);
838 	int offset = ahci_scr_offset(ap, sc_reg);
839 
840 	if (offset) {
841 		writel(val, port_mmio + offset);
842 		return 0;
843 	}
844 	return -EINVAL;
845 }
846 
847 static void ahci_start_engine(struct ata_port *ap)
848 {
849 	void __iomem *port_mmio = ahci_port_base(ap);
850 	u32 tmp;
851 
852 	/* start DMA */
853 	tmp = readl(port_mmio + PORT_CMD);
854 	tmp |= PORT_CMD_START;
855 	writel(tmp, port_mmio + PORT_CMD);
856 	readl(port_mmio + PORT_CMD); /* flush */
857 }
858 
859 static int ahci_stop_engine(struct ata_port *ap)
860 {
861 	void __iomem *port_mmio = ahci_port_base(ap);
862 	u32 tmp;
863 
864 	tmp = readl(port_mmio + PORT_CMD);
865 
866 	/* check if the HBA is idle */
867 	if ((tmp & (PORT_CMD_START | PORT_CMD_LIST_ON)) == 0)
868 		return 0;
869 
870 	/* setting HBA to idle */
871 	tmp &= ~PORT_CMD_START;
872 	writel(tmp, port_mmio + PORT_CMD);
873 
874 	/* wait for engine to stop. This could be as long as 500 msec */
875 	tmp = ata_wait_register(port_mmio + PORT_CMD,
876 				PORT_CMD_LIST_ON, PORT_CMD_LIST_ON, 1, 500);
877 	if (tmp & PORT_CMD_LIST_ON)
878 		return -EIO;
879 
880 	return 0;
881 }
882 
883 static void ahci_start_fis_rx(struct ata_port *ap)
884 {
885 	void __iomem *port_mmio = ahci_port_base(ap);
886 	struct ahci_host_priv *hpriv = ap->host->private_data;
887 	struct ahci_port_priv *pp = ap->private_data;
888 	u32 tmp;
889 
890 	/* set FIS registers */
891 	if (hpriv->cap & HOST_CAP_64)
892 		writel((pp->cmd_slot_dma >> 16) >> 16,
893 		       port_mmio + PORT_LST_ADDR_HI);
894 	writel(pp->cmd_slot_dma & 0xffffffff, port_mmio + PORT_LST_ADDR);
895 
896 	if (hpriv->cap & HOST_CAP_64)
897 		writel((pp->rx_fis_dma >> 16) >> 16,
898 		       port_mmio + PORT_FIS_ADDR_HI);
899 	writel(pp->rx_fis_dma & 0xffffffff, port_mmio + PORT_FIS_ADDR);
900 
901 	/* enable FIS reception */
902 	tmp = readl(port_mmio + PORT_CMD);
903 	tmp |= PORT_CMD_FIS_RX;
904 	writel(tmp, port_mmio + PORT_CMD);
905 
906 	/* flush */
907 	readl(port_mmio + PORT_CMD);
908 }
909 
910 static int ahci_stop_fis_rx(struct ata_port *ap)
911 {
912 	void __iomem *port_mmio = ahci_port_base(ap);
913 	u32 tmp;
914 
915 	/* disable FIS reception */
916 	tmp = readl(port_mmio + PORT_CMD);
917 	tmp &= ~PORT_CMD_FIS_RX;
918 	writel(tmp, port_mmio + PORT_CMD);
919 
920 	/* wait for completion, spec says 500ms, give it 1000 */
921 	tmp = ata_wait_register(port_mmio + PORT_CMD, PORT_CMD_FIS_ON,
922 				PORT_CMD_FIS_ON, 10, 1000);
923 	if (tmp & PORT_CMD_FIS_ON)
924 		return -EBUSY;
925 
926 	return 0;
927 }
928 
929 static void ahci_power_up(struct ata_port *ap)
930 {
931 	struct ahci_host_priv *hpriv = ap->host->private_data;
932 	void __iomem *port_mmio = ahci_port_base(ap);
933 	u32 cmd;
934 
935 	cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
936 
937 	/* spin up device */
938 	if (hpriv->cap & HOST_CAP_SSS) {
939 		cmd |= PORT_CMD_SPIN_UP;
940 		writel(cmd, port_mmio + PORT_CMD);
941 	}
942 
943 	/* wake up link */
944 	writel(cmd | PORT_CMD_ICC_ACTIVE, port_mmio + PORT_CMD);
945 }
946 
947 static void ahci_disable_alpm(struct ata_port *ap)
948 {
949 	struct ahci_host_priv *hpriv = ap->host->private_data;
950 	void __iomem *port_mmio = ahci_port_base(ap);
951 	u32 cmd;
952 	struct ahci_port_priv *pp = ap->private_data;
953 
954 	/* IPM bits should be disabled by libata-core */
955 	/* get the existing command bits */
956 	cmd = readl(port_mmio + PORT_CMD);
957 
958 	/* disable ALPM and ASP */
959 	cmd &= ~PORT_CMD_ASP;
960 	cmd &= ~PORT_CMD_ALPE;
961 
962 	/* force the interface back to active */
963 	cmd |= PORT_CMD_ICC_ACTIVE;
964 
965 	/* write out new cmd value */
966 	writel(cmd, port_mmio + PORT_CMD);
967 	cmd = readl(port_mmio + PORT_CMD);
968 
969 	/* wait 10ms to be sure we've come out of any low power state */
970 	msleep(10);
971 
972 	/* clear out any PhyRdy stuff from interrupt status */
973 	writel(PORT_IRQ_PHYRDY, port_mmio + PORT_IRQ_STAT);
974 
975 	/* go ahead and clean out PhyRdy Change from Serror too */
976 	ahci_scr_write(ap, SCR_ERROR, ((1 << 16) | (1 << 18)));
977 
978 	/*
979  	 * Clear flag to indicate that we should ignore all PhyRdy
980  	 * state changes
981  	 */
982 	hpriv->flags &= ~AHCI_HFLAG_NO_HOTPLUG;
983 
984 	/*
985  	 * Enable interrupts on Phy Ready.
986  	 */
987 	pp->intr_mask |= PORT_IRQ_PHYRDY;
988 	writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
989 
990 	/*
991  	 * don't change the link pm policy - we can be called
992  	 * just to turn of link pm temporarily
993  	 */
994 }
995 
996 static int ahci_enable_alpm(struct ata_port *ap,
997 	enum link_pm policy)
998 {
999 	struct ahci_host_priv *hpriv = ap->host->private_data;
1000 	void __iomem *port_mmio = ahci_port_base(ap);
1001 	u32 cmd;
1002 	struct ahci_port_priv *pp = ap->private_data;
1003 	u32 asp;
1004 
1005 	/* Make sure the host is capable of link power management */
1006 	if (!(hpriv->cap & HOST_CAP_ALPM))
1007 		return -EINVAL;
1008 
1009 	switch (policy) {
1010 	case MAX_PERFORMANCE:
1011 	case NOT_AVAILABLE:
1012 		/*
1013  		 * if we came here with NOT_AVAILABLE,
1014  		 * it just means this is the first time we
1015  		 * have tried to enable - default to max performance,
1016  		 * and let the user go to lower power modes on request.
1017  		 */
1018 		ahci_disable_alpm(ap);
1019 		return 0;
1020 	case MIN_POWER:
1021 		/* configure HBA to enter SLUMBER */
1022 		asp = PORT_CMD_ASP;
1023 		break;
1024 	case MEDIUM_POWER:
1025 		/* configure HBA to enter PARTIAL */
1026 		asp = 0;
1027 		break;
1028 	default:
1029 		return -EINVAL;
1030 	}
1031 
1032 	/*
1033  	 * Disable interrupts on Phy Ready. This keeps us from
1034  	 * getting woken up due to spurious phy ready interrupts
1035 	 * TBD - Hot plug should be done via polling now, is
1036 	 * that even supported?
1037  	 */
1038 	pp->intr_mask &= ~PORT_IRQ_PHYRDY;
1039 	writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
1040 
1041 	/*
1042  	 * Set a flag to indicate that we should ignore all PhyRdy
1043  	 * state changes since these can happen now whenever we
1044  	 * change link state
1045  	 */
1046 	hpriv->flags |= AHCI_HFLAG_NO_HOTPLUG;
1047 
1048 	/* get the existing command bits */
1049 	cmd = readl(port_mmio + PORT_CMD);
1050 
1051 	/*
1052  	 * Set ASP based on Policy
1053  	 */
1054 	cmd |= asp;
1055 
1056 	/*
1057  	 * Setting this bit will instruct the HBA to aggressively
1058  	 * enter a lower power link state when it's appropriate and
1059  	 * based on the value set above for ASP
1060  	 */
1061 	cmd |= PORT_CMD_ALPE;
1062 
1063 	/* write out new cmd value */
1064 	writel(cmd, port_mmio + PORT_CMD);
1065 	cmd = readl(port_mmio + PORT_CMD);
1066 
1067 	/* IPM bits should be set by libata-core */
1068 	return 0;
1069 }
1070 
1071 #ifdef CONFIG_PM
1072 static void ahci_power_down(struct ata_port *ap)
1073 {
1074 	struct ahci_host_priv *hpriv = ap->host->private_data;
1075 	void __iomem *port_mmio = ahci_port_base(ap);
1076 	u32 cmd, scontrol;
1077 
1078 	if (!(hpriv->cap & HOST_CAP_SSS))
1079 		return;
1080 
1081 	/* put device into listen mode, first set PxSCTL.DET to 0 */
1082 	scontrol = readl(port_mmio + PORT_SCR_CTL);
1083 	scontrol &= ~0xf;
1084 	writel(scontrol, port_mmio + PORT_SCR_CTL);
1085 
1086 	/* then set PxCMD.SUD to 0 */
1087 	cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
1088 	cmd &= ~PORT_CMD_SPIN_UP;
1089 	writel(cmd, port_mmio + PORT_CMD);
1090 }
1091 #endif
1092 
1093 static void ahci_start_port(struct ata_port *ap)
1094 {
1095 	struct ahci_port_priv *pp = ap->private_data;
1096 	struct ata_link *link;
1097 	struct ahci_em_priv *emp;
1098 
1099 	/* enable FIS reception */
1100 	ahci_start_fis_rx(ap);
1101 
1102 	/* enable DMA */
1103 	ahci_start_engine(ap);
1104 
1105 	/* turn on LEDs */
1106 	if (ap->flags & ATA_FLAG_EM) {
1107 		ata_port_for_each_link(link, ap) {
1108 			emp = &pp->em_priv[link->pmp];
1109 			ahci_transmit_led_message(ap, emp->led_state, 4);
1110 		}
1111 	}
1112 
1113 	if (ap->flags & ATA_FLAG_SW_ACTIVITY)
1114 		ata_port_for_each_link(link, ap)
1115 			ahci_init_sw_activity(link);
1116 
1117 }
1118 
1119 static int ahci_deinit_port(struct ata_port *ap, const char **emsg)
1120 {
1121 	int rc;
1122 
1123 	/* disable DMA */
1124 	rc = ahci_stop_engine(ap);
1125 	if (rc) {
1126 		*emsg = "failed to stop engine";
1127 		return rc;
1128 	}
1129 
1130 	/* disable FIS reception */
1131 	rc = ahci_stop_fis_rx(ap);
1132 	if (rc) {
1133 		*emsg = "failed stop FIS RX";
1134 		return rc;
1135 	}
1136 
1137 	return 0;
1138 }
1139 
1140 static int ahci_reset_controller(struct ata_host *host)
1141 {
1142 	struct pci_dev *pdev = to_pci_dev(host->dev);
1143 	struct ahci_host_priv *hpriv = host->private_data;
1144 	void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
1145 	u32 tmp;
1146 
1147 	/* we must be in AHCI mode, before using anything
1148 	 * AHCI-specific, such as HOST_RESET.
1149 	 */
1150 	ahci_enable_ahci(mmio);
1151 
1152 	/* global controller reset */
1153 	if (!ahci_skip_host_reset) {
1154 		tmp = readl(mmio + HOST_CTL);
1155 		if ((tmp & HOST_RESET) == 0) {
1156 			writel(tmp | HOST_RESET, mmio + HOST_CTL);
1157 			readl(mmio + HOST_CTL); /* flush */
1158 		}
1159 
1160 		/*
1161 		 * to perform host reset, OS should set HOST_RESET
1162 		 * and poll until this bit is read to be "0".
1163 		 * reset must complete within 1 second, or
1164 		 * the hardware should be considered fried.
1165 		 */
1166 		tmp = ata_wait_register(mmio + HOST_CTL, HOST_RESET,
1167 					HOST_RESET, 10, 1000);
1168 
1169 		if (tmp & HOST_RESET) {
1170 			dev_printk(KERN_ERR, host->dev,
1171 				   "controller reset failed (0x%x)\n", tmp);
1172 			return -EIO;
1173 		}
1174 
1175 		/* turn on AHCI mode */
1176 		ahci_enable_ahci(mmio);
1177 
1178 		/* Some registers might be cleared on reset.  Restore
1179 		 * initial values.
1180 		 */
1181 		ahci_restore_initial_config(host);
1182 	} else
1183 		dev_printk(KERN_INFO, host->dev,
1184 			   "skipping global host reset\n");
1185 
1186 	if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
1187 		u16 tmp16;
1188 
1189 		/* configure PCS */
1190 		pci_read_config_word(pdev, 0x92, &tmp16);
1191 		if ((tmp16 & hpriv->port_map) != hpriv->port_map) {
1192 			tmp16 |= hpriv->port_map;
1193 			pci_write_config_word(pdev, 0x92, tmp16);
1194 		}
1195 	}
1196 
1197 	return 0;
1198 }
1199 
1200 static void ahci_sw_activity(struct ata_link *link)
1201 {
1202 	struct ata_port *ap = link->ap;
1203 	struct ahci_port_priv *pp = ap->private_data;
1204 	struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1205 
1206 	if (!(link->flags & ATA_LFLAG_SW_ACTIVITY))
1207 		return;
1208 
1209 	emp->activity++;
1210 	if (!timer_pending(&emp->timer))
1211 		mod_timer(&emp->timer, jiffies + msecs_to_jiffies(10));
1212 }
1213 
1214 static void ahci_sw_activity_blink(unsigned long arg)
1215 {
1216 	struct ata_link *link = (struct ata_link *)arg;
1217 	struct ata_port *ap = link->ap;
1218 	struct ahci_port_priv *pp = ap->private_data;
1219 	struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1220 	unsigned long led_message = emp->led_state;
1221 	u32 activity_led_state;
1222 
1223 	led_message &= 0xffff0000;
1224 	led_message |= ap->port_no | (link->pmp << 8);
1225 
1226 	/* check to see if we've had activity.  If so,
1227 	 * toggle state of LED and reset timer.  If not,
1228 	 * turn LED to desired idle state.
1229 	 */
1230 	if (emp->saved_activity != emp->activity) {
1231 		emp->saved_activity = emp->activity;
1232 		/* get the current LED state */
1233 		activity_led_state = led_message & 0x00010000;
1234 
1235 		if (activity_led_state)
1236 			activity_led_state = 0;
1237 		else
1238 			activity_led_state = 1;
1239 
1240 		/* clear old state */
1241 		led_message &= 0xfff8ffff;
1242 
1243 		/* toggle state */
1244 		led_message |= (activity_led_state << 16);
1245 		mod_timer(&emp->timer, jiffies + msecs_to_jiffies(100));
1246 	} else {
1247 		/* switch to idle */
1248 		led_message &= 0xfff8ffff;
1249 		if (emp->blink_policy == BLINK_OFF)
1250 			led_message |= (1 << 16);
1251 	}
1252 	ahci_transmit_led_message(ap, led_message, 4);
1253 }
1254 
1255 static void ahci_init_sw_activity(struct ata_link *link)
1256 {
1257 	struct ata_port *ap = link->ap;
1258 	struct ahci_port_priv *pp = ap->private_data;
1259 	struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1260 
1261 	/* init activity stats, setup timer */
1262 	emp->saved_activity = emp->activity = 0;
1263 	setup_timer(&emp->timer, ahci_sw_activity_blink, (unsigned long)link);
1264 
1265 	/* check our blink policy and set flag for link if it's enabled */
1266 	if (emp->blink_policy)
1267 		link->flags |= ATA_LFLAG_SW_ACTIVITY;
1268 }
1269 
1270 static int ahci_reset_em(struct ata_host *host)
1271 {
1272 	void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
1273 	u32 em_ctl;
1274 
1275 	em_ctl = readl(mmio + HOST_EM_CTL);
1276 	if ((em_ctl & EM_CTL_TM) || (em_ctl & EM_CTL_RST))
1277 		return -EINVAL;
1278 
1279 	writel(em_ctl | EM_CTL_RST, mmio + HOST_EM_CTL);
1280 	return 0;
1281 }
1282 
1283 static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
1284 					ssize_t size)
1285 {
1286 	struct ahci_host_priv *hpriv = ap->host->private_data;
1287 	struct ahci_port_priv *pp = ap->private_data;
1288 	void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
1289 	u32 em_ctl;
1290 	u32 message[] = {0, 0};
1291 	unsigned long flags;
1292 	int pmp;
1293 	struct ahci_em_priv *emp;
1294 
1295 	/* get the slot number from the message */
1296 	pmp = (state & 0x0000ff00) >> 8;
1297 	if (pmp < MAX_SLOTS)
1298 		emp = &pp->em_priv[pmp];
1299 	else
1300 		return -EINVAL;
1301 
1302 	spin_lock_irqsave(ap->lock, flags);
1303 
1304 	/*
1305 	 * if we are still busy transmitting a previous message,
1306 	 * do not allow
1307 	 */
1308 	em_ctl = readl(mmio + HOST_EM_CTL);
1309 	if (em_ctl & EM_CTL_TM) {
1310 		spin_unlock_irqrestore(ap->lock, flags);
1311 		return -EINVAL;
1312 	}
1313 
1314 	/*
1315 	 * create message header - this is all zero except for
1316 	 * the message size, which is 4 bytes.
1317 	 */
1318 	message[0] |= (4 << 8);
1319 
1320 	/* ignore 0:4 of byte zero, fill in port info yourself */
1321 	message[1] = ((state & 0xfffffff0) | ap->port_no);
1322 
1323 	/* write message to EM_LOC */
1324 	writel(message[0], mmio + hpriv->em_loc);
1325 	writel(message[1], mmio + hpriv->em_loc+4);
1326 
1327 	/* save off new led state for port/slot */
1328 	emp->led_state = message[1];
1329 
1330 	/*
1331 	 * tell hardware to transmit the message
1332 	 */
1333 	writel(em_ctl | EM_CTL_TM, mmio + HOST_EM_CTL);
1334 
1335 	spin_unlock_irqrestore(ap->lock, flags);
1336 	return size;
1337 }
1338 
1339 static ssize_t ahci_led_show(struct ata_port *ap, char *buf)
1340 {
1341 	struct ahci_port_priv *pp = ap->private_data;
1342 	struct ata_link *link;
1343 	struct ahci_em_priv *emp;
1344 	int rc = 0;
1345 
1346 	ata_port_for_each_link(link, ap) {
1347 		emp = &pp->em_priv[link->pmp];
1348 		rc += sprintf(buf, "%lx\n", emp->led_state);
1349 	}
1350 	return rc;
1351 }
1352 
1353 static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
1354 				size_t size)
1355 {
1356 	int state;
1357 	int pmp;
1358 	struct ahci_port_priv *pp = ap->private_data;
1359 	struct ahci_em_priv *emp;
1360 
1361 	state = simple_strtoul(buf, NULL, 0);
1362 
1363 	/* get the slot number from the message */
1364 	pmp = (state & 0x0000ff00) >> 8;
1365 	if (pmp < MAX_SLOTS)
1366 		emp = &pp->em_priv[pmp];
1367 	else
1368 		return -EINVAL;
1369 
1370 	/* mask off the activity bits if we are in sw_activity
1371 	 * mode, user should turn off sw_activity before setting
1372 	 * activity led through em_message
1373 	 */
1374 	if (emp->blink_policy)
1375 		state &= 0xfff8ffff;
1376 
1377 	return ahci_transmit_led_message(ap, state, size);
1378 }
1379 
1380 static ssize_t ahci_activity_store(struct ata_device *dev, enum sw_activity val)
1381 {
1382 	struct ata_link *link = dev->link;
1383 	struct ata_port *ap = link->ap;
1384 	struct ahci_port_priv *pp = ap->private_data;
1385 	struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1386 	u32 port_led_state = emp->led_state;
1387 
1388 	/* save the desired Activity LED behavior */
1389 	if (val == OFF) {
1390 		/* clear LFLAG */
1391 		link->flags &= ~(ATA_LFLAG_SW_ACTIVITY);
1392 
1393 		/* set the LED to OFF */
1394 		port_led_state &= 0xfff80000;
1395 		port_led_state |= (ap->port_no | (link->pmp << 8));
1396 		ahci_transmit_led_message(ap, port_led_state, 4);
1397 	} else {
1398 		link->flags |= ATA_LFLAG_SW_ACTIVITY;
1399 		if (val == BLINK_OFF) {
1400 			/* set LED to ON for idle */
1401 			port_led_state &= 0xfff80000;
1402 			port_led_state |= (ap->port_no | (link->pmp << 8));
1403 			port_led_state |= 0x00010000; /* check this */
1404 			ahci_transmit_led_message(ap, port_led_state, 4);
1405 		}
1406 	}
1407 	emp->blink_policy = val;
1408 	return 0;
1409 }
1410 
1411 static ssize_t ahci_activity_show(struct ata_device *dev, char *buf)
1412 {
1413 	struct ata_link *link = dev->link;
1414 	struct ata_port *ap = link->ap;
1415 	struct ahci_port_priv *pp = ap->private_data;
1416 	struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1417 
1418 	/* display the saved value of activity behavior for this
1419 	 * disk.
1420 	 */
1421 	return sprintf(buf, "%d\n", emp->blink_policy);
1422 }
1423 
1424 static void ahci_port_init(struct pci_dev *pdev, struct ata_port *ap,
1425 			   int port_no, void __iomem *mmio,
1426 			   void __iomem *port_mmio)
1427 {
1428 	const char *emsg = NULL;
1429 	int rc;
1430 	u32 tmp;
1431 
1432 	/* make sure port is not active */
1433 	rc = ahci_deinit_port(ap, &emsg);
1434 	if (rc)
1435 		dev_printk(KERN_WARNING, &pdev->dev,
1436 			   "%s (%d)\n", emsg, rc);
1437 
1438 	/* clear SError */
1439 	tmp = readl(port_mmio + PORT_SCR_ERR);
1440 	VPRINTK("PORT_SCR_ERR 0x%x\n", tmp);
1441 	writel(tmp, port_mmio + PORT_SCR_ERR);
1442 
1443 	/* clear port IRQ */
1444 	tmp = readl(port_mmio + PORT_IRQ_STAT);
1445 	VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
1446 	if (tmp)
1447 		writel(tmp, port_mmio + PORT_IRQ_STAT);
1448 
1449 	writel(1 << port_no, mmio + HOST_IRQ_STAT);
1450 }
1451 
1452 static void ahci_init_controller(struct ata_host *host)
1453 {
1454 	struct ahci_host_priv *hpriv = host->private_data;
1455 	struct pci_dev *pdev = to_pci_dev(host->dev);
1456 	void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
1457 	int i;
1458 	void __iomem *port_mmio;
1459 	u32 tmp;
1460 	int mv;
1461 
1462 	if (hpriv->flags & AHCI_HFLAG_MV_PATA) {
1463 		if (pdev->device == 0x6121)
1464 			mv = 2;
1465 		else
1466 			mv = 4;
1467 		port_mmio = __ahci_port_base(host, mv);
1468 
1469 		writel(0, port_mmio + PORT_IRQ_MASK);
1470 
1471 		/* clear port IRQ */
1472 		tmp = readl(port_mmio + PORT_IRQ_STAT);
1473 		VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
1474 		if (tmp)
1475 			writel(tmp, port_mmio + PORT_IRQ_STAT);
1476 	}
1477 
1478 	for (i = 0; i < host->n_ports; i++) {
1479 		struct ata_port *ap = host->ports[i];
1480 
1481 		port_mmio = ahci_port_base(ap);
1482 		if (ata_port_is_dummy(ap))
1483 			continue;
1484 
1485 		ahci_port_init(pdev, ap, i, mmio, port_mmio);
1486 	}
1487 
1488 	tmp = readl(mmio + HOST_CTL);
1489 	VPRINTK("HOST_CTL 0x%x\n", tmp);
1490 	writel(tmp | HOST_IRQ_EN, mmio + HOST_CTL);
1491 	tmp = readl(mmio + HOST_CTL);
1492 	VPRINTK("HOST_CTL 0x%x\n", tmp);
1493 }
1494 
1495 static void ahci_dev_config(struct ata_device *dev)
1496 {
1497 	struct ahci_host_priv *hpriv = dev->link->ap->host->private_data;
1498 
1499 	if (hpriv->flags & AHCI_HFLAG_SECT255) {
1500 		dev->max_sectors = 255;
1501 		ata_dev_printk(dev, KERN_INFO,
1502 			       "SB600 AHCI: limiting to 255 sectors per cmd\n");
1503 	}
1504 }
1505 
1506 static unsigned int ahci_dev_classify(struct ata_port *ap)
1507 {
1508 	void __iomem *port_mmio = ahci_port_base(ap);
1509 	struct ata_taskfile tf;
1510 	u32 tmp;
1511 
1512 	tmp = readl(port_mmio + PORT_SIG);
1513 	tf.lbah		= (tmp >> 24)	& 0xff;
1514 	tf.lbam		= (tmp >> 16)	& 0xff;
1515 	tf.lbal		= (tmp >> 8)	& 0xff;
1516 	tf.nsect	= (tmp)		& 0xff;
1517 
1518 	return ata_dev_classify(&tf);
1519 }
1520 
1521 static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
1522 			       u32 opts)
1523 {
1524 	dma_addr_t cmd_tbl_dma;
1525 
1526 	cmd_tbl_dma = pp->cmd_tbl_dma + tag * AHCI_CMD_TBL_SZ;
1527 
1528 	pp->cmd_slot[tag].opts = cpu_to_le32(opts);
1529 	pp->cmd_slot[tag].status = 0;
1530 	pp->cmd_slot[tag].tbl_addr = cpu_to_le32(cmd_tbl_dma & 0xffffffff);
1531 	pp->cmd_slot[tag].tbl_addr_hi = cpu_to_le32((cmd_tbl_dma >> 16) >> 16);
1532 }
1533 
1534 static int ahci_kick_engine(struct ata_port *ap, int force_restart)
1535 {
1536 	void __iomem *port_mmio = ahci_port_base(ap);
1537 	struct ahci_host_priv *hpriv = ap->host->private_data;
1538 	u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
1539 	u32 tmp;
1540 	int busy, rc;
1541 
1542 	/* do we need to kick the port? */
1543 	busy = status & (ATA_BUSY | ATA_DRQ);
1544 	if (!busy && !force_restart)
1545 		return 0;
1546 
1547 	/* stop engine */
1548 	rc = ahci_stop_engine(ap);
1549 	if (rc)
1550 		goto out_restart;
1551 
1552 	/* need to do CLO? */
1553 	if (!busy) {
1554 		rc = 0;
1555 		goto out_restart;
1556 	}
1557 
1558 	if (!(hpriv->cap & HOST_CAP_CLO)) {
1559 		rc = -EOPNOTSUPP;
1560 		goto out_restart;
1561 	}
1562 
1563 	/* perform CLO */
1564 	tmp = readl(port_mmio + PORT_CMD);
1565 	tmp |= PORT_CMD_CLO;
1566 	writel(tmp, port_mmio + PORT_CMD);
1567 
1568 	rc = 0;
1569 	tmp = ata_wait_register(port_mmio + PORT_CMD,
1570 				PORT_CMD_CLO, PORT_CMD_CLO, 1, 500);
1571 	if (tmp & PORT_CMD_CLO)
1572 		rc = -EIO;
1573 
1574 	/* restart engine */
1575  out_restart:
1576 	ahci_start_engine(ap);
1577 	return rc;
1578 }
1579 
1580 static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
1581 				struct ata_taskfile *tf, int is_cmd, u16 flags,
1582 				unsigned long timeout_msec)
1583 {
1584 	const u32 cmd_fis_len = 5; /* five dwords */
1585 	struct ahci_port_priv *pp = ap->private_data;
1586 	void __iomem *port_mmio = ahci_port_base(ap);
1587 	u8 *fis = pp->cmd_tbl;
1588 	u32 tmp;
1589 
1590 	/* prep the command */
1591 	ata_tf_to_fis(tf, pmp, is_cmd, fis);
1592 	ahci_fill_cmd_slot(pp, 0, cmd_fis_len | flags | (pmp << 12));
1593 
1594 	/* issue & wait */
1595 	writel(1, port_mmio + PORT_CMD_ISSUE);
1596 
1597 	if (timeout_msec) {
1598 		tmp = ata_wait_register(port_mmio + PORT_CMD_ISSUE, 0x1, 0x1,
1599 					1, timeout_msec);
1600 		if (tmp & 0x1) {
1601 			ahci_kick_engine(ap, 1);
1602 			return -EBUSY;
1603 		}
1604 	} else
1605 		readl(port_mmio + PORT_CMD_ISSUE);	/* flush */
1606 
1607 	return 0;
1608 }
1609 
1610 static int ahci_do_softreset(struct ata_link *link, unsigned int *class,
1611 			     int pmp, unsigned long deadline,
1612 			     int (*check_ready)(struct ata_link *link))
1613 {
1614 	struct ata_port *ap = link->ap;
1615 	const char *reason = NULL;
1616 	unsigned long now, msecs;
1617 	struct ata_taskfile tf;
1618 	int rc;
1619 
1620 	DPRINTK("ENTER\n");
1621 
1622 	/* prepare for SRST (AHCI-1.1 10.4.1) */
1623 	rc = ahci_kick_engine(ap, 1);
1624 	if (rc && rc != -EOPNOTSUPP)
1625 		ata_link_printk(link, KERN_WARNING,
1626 				"failed to reset engine (errno=%d)\n", rc);
1627 
1628 	ata_tf_init(link->device, &tf);
1629 
1630 	/* issue the first D2H Register FIS */
1631 	msecs = 0;
1632 	now = jiffies;
1633 	if (time_after(now, deadline))
1634 		msecs = jiffies_to_msecs(deadline - now);
1635 
1636 	tf.ctl |= ATA_SRST;
1637 	if (ahci_exec_polled_cmd(ap, pmp, &tf, 0,
1638 				 AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY, msecs)) {
1639 		rc = -EIO;
1640 		reason = "1st FIS failed";
1641 		goto fail;
1642 	}
1643 
1644 	/* spec says at least 5us, but be generous and sleep for 1ms */
1645 	msleep(1);
1646 
1647 	/* issue the second D2H Register FIS */
1648 	tf.ctl &= ~ATA_SRST;
1649 	ahci_exec_polled_cmd(ap, pmp, &tf, 0, 0, 0);
1650 
1651 	/* wait for link to become ready */
1652 	rc = ata_wait_after_reset(link, deadline, check_ready);
1653 	/* link occupied, -ENODEV too is an error */
1654 	if (rc) {
1655 		reason = "device not ready";
1656 		goto fail;
1657 	}
1658 	*class = ahci_dev_classify(ap);
1659 
1660 	DPRINTK("EXIT, class=%u\n", *class);
1661 	return 0;
1662 
1663  fail:
1664 	ata_link_printk(link, KERN_ERR, "softreset failed (%s)\n", reason);
1665 	return rc;
1666 }
1667 
1668 static int ahci_check_ready(struct ata_link *link)
1669 {
1670 	void __iomem *port_mmio = ahci_port_base(link->ap);
1671 	u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
1672 
1673 	return ata_check_ready(status);
1674 }
1675 
1676 static int ahci_softreset(struct ata_link *link, unsigned int *class,
1677 			  unsigned long deadline)
1678 {
1679 	int pmp = sata_srst_pmp(link);
1680 
1681 	DPRINTK("ENTER\n");
1682 
1683 	return ahci_do_softreset(link, class, pmp, deadline, ahci_check_ready);
1684 }
1685 
1686 static int ahci_sb600_check_ready(struct ata_link *link)
1687 {
1688 	void __iomem *port_mmio = ahci_port_base(link->ap);
1689 	u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
1690 	u32 irq_status = readl(port_mmio + PORT_IRQ_STAT);
1691 
1692 	/*
1693 	 * There is no need to check TFDATA if BAD PMP is found due to HW bug,
1694 	 * which can save timeout delay.
1695 	 */
1696 	if (irq_status & PORT_IRQ_BAD_PMP)
1697 		return -EIO;
1698 
1699 	return ata_check_ready(status);
1700 }
1701 
1702 static int ahci_sb600_softreset(struct ata_link *link, unsigned int *class,
1703 				unsigned long deadline)
1704 {
1705 	struct ata_port *ap = link->ap;
1706 	void __iomem *port_mmio = ahci_port_base(ap);
1707 	int pmp = sata_srst_pmp(link);
1708 	int rc;
1709 	u32 irq_sts;
1710 
1711 	DPRINTK("ENTER\n");
1712 
1713 	rc = ahci_do_softreset(link, class, pmp, deadline,
1714 			       ahci_sb600_check_ready);
1715 
1716 	/*
1717 	 * Soft reset fails on some ATI chips with IPMS set when PMP
1718 	 * is enabled but SATA HDD/ODD is connected to SATA port,
1719 	 * do soft reset again to port 0.
1720 	 */
1721 	if (rc == -EIO) {
1722 		irq_sts = readl(port_mmio + PORT_IRQ_STAT);
1723 		if (irq_sts & PORT_IRQ_BAD_PMP) {
1724 			ata_link_printk(link, KERN_WARNING,
1725 					"failed due to HW bug, retry pmp=0\n");
1726 			rc = ahci_do_softreset(link, class, 0, deadline,
1727 					       ahci_check_ready);
1728 		}
1729 	}
1730 
1731 	return rc;
1732 }
1733 
1734 static int ahci_hardreset(struct ata_link *link, unsigned int *class,
1735 			  unsigned long deadline)
1736 {
1737 	const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
1738 	struct ata_port *ap = link->ap;
1739 	struct ahci_port_priv *pp = ap->private_data;
1740 	u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
1741 	struct ata_taskfile tf;
1742 	bool online;
1743 	int rc;
1744 
1745 	DPRINTK("ENTER\n");
1746 
1747 	ahci_stop_engine(ap);
1748 
1749 	/* clear D2H reception area to properly wait for D2H FIS */
1750 	ata_tf_init(link->device, &tf);
1751 	tf.command = 0x80;
1752 	ata_tf_to_fis(&tf, 0, 0, d2h_fis);
1753 
1754 	rc = sata_link_hardreset(link, timing, deadline, &online,
1755 				 ahci_check_ready);
1756 
1757 	ahci_start_engine(ap);
1758 
1759 	if (online)
1760 		*class = ahci_dev_classify(ap);
1761 
1762 	DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
1763 	return rc;
1764 }
1765 
1766 static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
1767 				 unsigned long deadline)
1768 {
1769 	struct ata_port *ap = link->ap;
1770 	bool online;
1771 	int rc;
1772 
1773 	DPRINTK("ENTER\n");
1774 
1775 	ahci_stop_engine(ap);
1776 
1777 	rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context),
1778 				 deadline, &online, NULL);
1779 
1780 	ahci_start_engine(ap);
1781 
1782 	DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
1783 
1784 	/* vt8251 doesn't clear BSY on signature FIS reception,
1785 	 * request follow-up softreset.
1786 	 */
1787 	return online ? -EAGAIN : rc;
1788 }
1789 
1790 static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
1791 				unsigned long deadline)
1792 {
1793 	struct ata_port *ap = link->ap;
1794 	struct ahci_port_priv *pp = ap->private_data;
1795 	u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
1796 	struct ata_taskfile tf;
1797 	bool online;
1798 	int rc;
1799 
1800 	ahci_stop_engine(ap);
1801 
1802 	/* clear D2H reception area to properly wait for D2H FIS */
1803 	ata_tf_init(link->device, &tf);
1804 	tf.command = 0x80;
1805 	ata_tf_to_fis(&tf, 0, 0, d2h_fis);
1806 
1807 	rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context),
1808 				 deadline, &online, NULL);
1809 
1810 	ahci_start_engine(ap);
1811 
1812 	/* The pseudo configuration device on SIMG4726 attached to
1813 	 * ASUS P5W-DH Deluxe doesn't send signature FIS after
1814 	 * hardreset if no device is attached to the first downstream
1815 	 * port && the pseudo device locks up on SRST w/ PMP==0.  To
1816 	 * work around this, wait for !BSY only briefly.  If BSY isn't
1817 	 * cleared, perform CLO and proceed to IDENTIFY (achieved by
1818 	 * ATA_LFLAG_NO_SRST and ATA_LFLAG_ASSUME_ATA).
1819 	 *
1820 	 * Wait for two seconds.  Devices attached to downstream port
1821 	 * which can't process the following IDENTIFY after this will
1822 	 * have to be reset again.  For most cases, this should
1823 	 * suffice while making probing snappish enough.
1824 	 */
1825 	if (online) {
1826 		rc = ata_wait_after_reset(link, jiffies + 2 * HZ,
1827 					  ahci_check_ready);
1828 		if (rc)
1829 			ahci_kick_engine(ap, 0);
1830 	}
1831 	return rc;
1832 }
1833 
1834 static void ahci_postreset(struct ata_link *link, unsigned int *class)
1835 {
1836 	struct ata_port *ap = link->ap;
1837 	void __iomem *port_mmio = ahci_port_base(ap);
1838 	u32 new_tmp, tmp;
1839 
1840 	ata_std_postreset(link, class);
1841 
1842 	/* Make sure port's ATAPI bit is set appropriately */
1843 	new_tmp = tmp = readl(port_mmio + PORT_CMD);
1844 	if (*class == ATA_DEV_ATAPI)
1845 		new_tmp |= PORT_CMD_ATAPI;
1846 	else
1847 		new_tmp &= ~PORT_CMD_ATAPI;
1848 	if (new_tmp != tmp) {
1849 		writel(new_tmp, port_mmio + PORT_CMD);
1850 		readl(port_mmio + PORT_CMD); /* flush */
1851 	}
1852 }
1853 
1854 static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl)
1855 {
1856 	struct scatterlist *sg;
1857 	struct ahci_sg *ahci_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ;
1858 	unsigned int si;
1859 
1860 	VPRINTK("ENTER\n");
1861 
1862 	/*
1863 	 * Next, the S/G list.
1864 	 */
1865 	for_each_sg(qc->sg, sg, qc->n_elem, si) {
1866 		dma_addr_t addr = sg_dma_address(sg);
1867 		u32 sg_len = sg_dma_len(sg);
1868 
1869 		ahci_sg[si].addr = cpu_to_le32(addr & 0xffffffff);
1870 		ahci_sg[si].addr_hi = cpu_to_le32((addr >> 16) >> 16);
1871 		ahci_sg[si].flags_size = cpu_to_le32(sg_len - 1);
1872 	}
1873 
1874 	return si;
1875 }
1876 
1877 static void ahci_qc_prep(struct ata_queued_cmd *qc)
1878 {
1879 	struct ata_port *ap = qc->ap;
1880 	struct ahci_port_priv *pp = ap->private_data;
1881 	int is_atapi = ata_is_atapi(qc->tf.protocol);
1882 	void *cmd_tbl;
1883 	u32 opts;
1884 	const u32 cmd_fis_len = 5; /* five dwords */
1885 	unsigned int n_elem;
1886 
1887 	/*
1888 	 * Fill in command table information.  First, the header,
1889 	 * a SATA Register - Host to Device command FIS.
1890 	 */
1891 	cmd_tbl = pp->cmd_tbl + qc->tag * AHCI_CMD_TBL_SZ;
1892 
1893 	ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, cmd_tbl);
1894 	if (is_atapi) {
1895 		memset(cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32);
1896 		memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len);
1897 	}
1898 
1899 	n_elem = 0;
1900 	if (qc->flags & ATA_QCFLAG_DMAMAP)
1901 		n_elem = ahci_fill_sg(qc, cmd_tbl);
1902 
1903 	/*
1904 	 * Fill in command slot information.
1905 	 */
1906 	opts = cmd_fis_len | n_elem << 16 | (qc->dev->link->pmp << 12);
1907 	if (qc->tf.flags & ATA_TFLAG_WRITE)
1908 		opts |= AHCI_CMD_WRITE;
1909 	if (is_atapi)
1910 		opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH;
1911 
1912 	ahci_fill_cmd_slot(pp, qc->tag, opts);
1913 }
1914 
1915 static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
1916 {
1917 	struct ahci_host_priv *hpriv = ap->host->private_data;
1918 	struct ahci_port_priv *pp = ap->private_data;
1919 	struct ata_eh_info *host_ehi = &ap->link.eh_info;
1920 	struct ata_link *link = NULL;
1921 	struct ata_queued_cmd *active_qc;
1922 	struct ata_eh_info *active_ehi;
1923 	u32 serror;
1924 
1925 	/* determine active link */
1926 	ata_port_for_each_link(link, ap)
1927 		if (ata_link_active(link))
1928 			break;
1929 	if (!link)
1930 		link = &ap->link;
1931 
1932 	active_qc = ata_qc_from_tag(ap, link->active_tag);
1933 	active_ehi = &link->eh_info;
1934 
1935 	/* record irq stat */
1936 	ata_ehi_clear_desc(host_ehi);
1937 	ata_ehi_push_desc(host_ehi, "irq_stat 0x%08x", irq_stat);
1938 
1939 	/* AHCI needs SError cleared; otherwise, it might lock up */
1940 	ahci_scr_read(ap, SCR_ERROR, &serror);
1941 	ahci_scr_write(ap, SCR_ERROR, serror);
1942 	host_ehi->serror |= serror;
1943 
1944 	/* some controllers set IRQ_IF_ERR on device errors, ignore it */
1945 	if (hpriv->flags & AHCI_HFLAG_IGN_IRQ_IF_ERR)
1946 		irq_stat &= ~PORT_IRQ_IF_ERR;
1947 
1948 	if (irq_stat & PORT_IRQ_TF_ERR) {
1949 		/* If qc is active, charge it; otherwise, the active
1950 		 * link.  There's no active qc on NCQ errors.  It will
1951 		 * be determined by EH by reading log page 10h.
1952 		 */
1953 		if (active_qc)
1954 			active_qc->err_mask |= AC_ERR_DEV;
1955 		else
1956 			active_ehi->err_mask |= AC_ERR_DEV;
1957 
1958 		if (hpriv->flags & AHCI_HFLAG_IGN_SERR_INTERNAL)
1959 			host_ehi->serror &= ~SERR_INTERNAL;
1960 	}
1961 
1962 	if (irq_stat & PORT_IRQ_UNK_FIS) {
1963 		u32 *unk = (u32 *)(pp->rx_fis + RX_FIS_UNK);
1964 
1965 		active_ehi->err_mask |= AC_ERR_HSM;
1966 		active_ehi->action |= ATA_EH_RESET;
1967 		ata_ehi_push_desc(active_ehi,
1968 				  "unknown FIS %08x %08x %08x %08x" ,
1969 				  unk[0], unk[1], unk[2], unk[3]);
1970 	}
1971 
1972 	if (sata_pmp_attached(ap) && (irq_stat & PORT_IRQ_BAD_PMP)) {
1973 		active_ehi->err_mask |= AC_ERR_HSM;
1974 		active_ehi->action |= ATA_EH_RESET;
1975 		ata_ehi_push_desc(active_ehi, "incorrect PMP");
1976 	}
1977 
1978 	if (irq_stat & (PORT_IRQ_HBUS_ERR | PORT_IRQ_HBUS_DATA_ERR)) {
1979 		host_ehi->err_mask |= AC_ERR_HOST_BUS;
1980 		host_ehi->action |= ATA_EH_RESET;
1981 		ata_ehi_push_desc(host_ehi, "host bus error");
1982 	}
1983 
1984 	if (irq_stat & PORT_IRQ_IF_ERR) {
1985 		host_ehi->err_mask |= AC_ERR_ATA_BUS;
1986 		host_ehi->action |= ATA_EH_RESET;
1987 		ata_ehi_push_desc(host_ehi, "interface fatal error");
1988 	}
1989 
1990 	if (irq_stat & (PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)) {
1991 		ata_ehi_hotplugged(host_ehi);
1992 		ata_ehi_push_desc(host_ehi, "%s",
1993 			irq_stat & PORT_IRQ_CONNECT ?
1994 			"connection status changed" : "PHY RDY changed");
1995 	}
1996 
1997 	/* okay, let's hand over to EH */
1998 
1999 	if (irq_stat & PORT_IRQ_FREEZE)
2000 		ata_port_freeze(ap);
2001 	else
2002 		ata_port_abort(ap);
2003 }
2004 
2005 static void ahci_port_intr(struct ata_port *ap)
2006 {
2007 	void __iomem *port_mmio = ahci_port_base(ap);
2008 	struct ata_eh_info *ehi = &ap->link.eh_info;
2009 	struct ahci_port_priv *pp = ap->private_data;
2010 	struct ahci_host_priv *hpriv = ap->host->private_data;
2011 	int resetting = !!(ap->pflags & ATA_PFLAG_RESETTING);
2012 	u32 status, qc_active;
2013 	int rc;
2014 
2015 	status = readl(port_mmio + PORT_IRQ_STAT);
2016 	writel(status, port_mmio + PORT_IRQ_STAT);
2017 
2018 	/* ignore BAD_PMP while resetting */
2019 	if (unlikely(resetting))
2020 		status &= ~PORT_IRQ_BAD_PMP;
2021 
2022 	/* If we are getting PhyRdy, this is
2023  	 * just a power state change, we should
2024  	 * clear out this, plus the PhyRdy/Comm
2025  	 * Wake bits from Serror
2026  	 */
2027 	if ((hpriv->flags & AHCI_HFLAG_NO_HOTPLUG) &&
2028 		(status & PORT_IRQ_PHYRDY)) {
2029 		status &= ~PORT_IRQ_PHYRDY;
2030 		ahci_scr_write(ap, SCR_ERROR, ((1 << 16) | (1 << 18)));
2031 	}
2032 
2033 	if (unlikely(status & PORT_IRQ_ERROR)) {
2034 		ahci_error_intr(ap, status);
2035 		return;
2036 	}
2037 
2038 	if (status & PORT_IRQ_SDB_FIS) {
2039 		/* If SNotification is available, leave notification
2040 		 * handling to sata_async_notification().  If not,
2041 		 * emulate it by snooping SDB FIS RX area.
2042 		 *
2043 		 * Snooping FIS RX area is probably cheaper than
2044 		 * poking SNotification but some constrollers which
2045 		 * implement SNotification, ICH9 for example, don't
2046 		 * store AN SDB FIS into receive area.
2047 		 */
2048 		if (hpriv->cap & HOST_CAP_SNTF)
2049 			sata_async_notification(ap);
2050 		else {
2051 			/* If the 'N' bit in word 0 of the FIS is set,
2052 			 * we just received asynchronous notification.
2053 			 * Tell libata about it.
2054 			 */
2055 			const __le32 *f = pp->rx_fis + RX_FIS_SDB;
2056 			u32 f0 = le32_to_cpu(f[0]);
2057 
2058 			if (f0 & (1 << 15))
2059 				sata_async_notification(ap);
2060 		}
2061 	}
2062 
2063 	/* pp->active_link is valid iff any command is in flight */
2064 	if (ap->qc_active && pp->active_link->sactive)
2065 		qc_active = readl(port_mmio + PORT_SCR_ACT);
2066 	else
2067 		qc_active = readl(port_mmio + PORT_CMD_ISSUE);
2068 
2069 	rc = ata_qc_complete_multiple(ap, qc_active);
2070 
2071 	/* while resetting, invalid completions are expected */
2072 	if (unlikely(rc < 0 && !resetting)) {
2073 		ehi->err_mask |= AC_ERR_HSM;
2074 		ehi->action |= ATA_EH_RESET;
2075 		ata_port_freeze(ap);
2076 	}
2077 }
2078 
2079 static irqreturn_t ahci_interrupt(int irq, void *dev_instance)
2080 {
2081 	struct ata_host *host = dev_instance;
2082 	struct ahci_host_priv *hpriv;
2083 	unsigned int i, handled = 0;
2084 	void __iomem *mmio;
2085 	u32 irq_stat, irq_masked;
2086 
2087 	VPRINTK("ENTER\n");
2088 
2089 	hpriv = host->private_data;
2090 	mmio = host->iomap[AHCI_PCI_BAR];
2091 
2092 	/* sigh.  0xffffffff is a valid return from h/w */
2093 	irq_stat = readl(mmio + HOST_IRQ_STAT);
2094 	if (!irq_stat)
2095 		return IRQ_NONE;
2096 
2097 	irq_masked = irq_stat & hpriv->port_map;
2098 
2099 	spin_lock(&host->lock);
2100 
2101 	for (i = 0; i < host->n_ports; i++) {
2102 		struct ata_port *ap;
2103 
2104 		if (!(irq_masked & (1 << i)))
2105 			continue;
2106 
2107 		ap = host->ports[i];
2108 		if (ap) {
2109 			ahci_port_intr(ap);
2110 			VPRINTK("port %u\n", i);
2111 		} else {
2112 			VPRINTK("port %u (no irq)\n", i);
2113 			if (ata_ratelimit())
2114 				dev_printk(KERN_WARNING, host->dev,
2115 					"interrupt on disabled port %u\n", i);
2116 		}
2117 
2118 		handled = 1;
2119 	}
2120 
2121 	/* HOST_IRQ_STAT behaves as level triggered latch meaning that
2122 	 * it should be cleared after all the port events are cleared;
2123 	 * otherwise, it will raise a spurious interrupt after each
2124 	 * valid one.  Please read section 10.6.2 of ahci 1.1 for more
2125 	 * information.
2126 	 *
2127 	 * Also, use the unmasked value to clear interrupt as spurious
2128 	 * pending event on a dummy port might cause screaming IRQ.
2129 	 */
2130 	writel(irq_stat, mmio + HOST_IRQ_STAT);
2131 
2132 	spin_unlock(&host->lock);
2133 
2134 	VPRINTK("EXIT\n");
2135 
2136 	return IRQ_RETVAL(handled);
2137 }
2138 
2139 static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
2140 {
2141 	struct ata_port *ap = qc->ap;
2142 	void __iomem *port_mmio = ahci_port_base(ap);
2143 	struct ahci_port_priv *pp = ap->private_data;
2144 
2145 	/* Keep track of the currently active link.  It will be used
2146 	 * in completion path to determine whether NCQ phase is in
2147 	 * progress.
2148 	 */
2149 	pp->active_link = qc->dev->link;
2150 
2151 	if (qc->tf.protocol == ATA_PROT_NCQ)
2152 		writel(1 << qc->tag, port_mmio + PORT_SCR_ACT);
2153 	writel(1 << qc->tag, port_mmio + PORT_CMD_ISSUE);
2154 
2155 	ahci_sw_activity(qc->dev->link);
2156 
2157 	return 0;
2158 }
2159 
2160 static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
2161 {
2162 	struct ahci_port_priv *pp = qc->ap->private_data;
2163 	u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
2164 
2165 	ata_tf_from_fis(d2h_fis, &qc->result_tf);
2166 	return true;
2167 }
2168 
2169 static void ahci_freeze(struct ata_port *ap)
2170 {
2171 	void __iomem *port_mmio = ahci_port_base(ap);
2172 
2173 	/* turn IRQ off */
2174 	writel(0, port_mmio + PORT_IRQ_MASK);
2175 }
2176 
2177 static void ahci_thaw(struct ata_port *ap)
2178 {
2179 	void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
2180 	void __iomem *port_mmio = ahci_port_base(ap);
2181 	u32 tmp;
2182 	struct ahci_port_priv *pp = ap->private_data;
2183 
2184 	/* clear IRQ */
2185 	tmp = readl(port_mmio + PORT_IRQ_STAT);
2186 	writel(tmp, port_mmio + PORT_IRQ_STAT);
2187 	writel(1 << ap->port_no, mmio + HOST_IRQ_STAT);
2188 
2189 	/* turn IRQ back on */
2190 	writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
2191 }
2192 
2193 static void ahci_error_handler(struct ata_port *ap)
2194 {
2195 	if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
2196 		/* restart engine */
2197 		ahci_stop_engine(ap);
2198 		ahci_start_engine(ap);
2199 	}
2200 
2201 	sata_pmp_error_handler(ap);
2202 }
2203 
2204 static void ahci_post_internal_cmd(struct ata_queued_cmd *qc)
2205 {
2206 	struct ata_port *ap = qc->ap;
2207 
2208 	/* make DMA engine forget about the failed command */
2209 	if (qc->flags & ATA_QCFLAG_FAILED)
2210 		ahci_kick_engine(ap, 1);
2211 }
2212 
2213 static void ahci_pmp_attach(struct ata_port *ap)
2214 {
2215 	void __iomem *port_mmio = ahci_port_base(ap);
2216 	struct ahci_port_priv *pp = ap->private_data;
2217 	u32 cmd;
2218 
2219 	cmd = readl(port_mmio + PORT_CMD);
2220 	cmd |= PORT_CMD_PMP;
2221 	writel(cmd, port_mmio + PORT_CMD);
2222 
2223 	pp->intr_mask |= PORT_IRQ_BAD_PMP;
2224 	writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
2225 }
2226 
2227 static void ahci_pmp_detach(struct ata_port *ap)
2228 {
2229 	void __iomem *port_mmio = ahci_port_base(ap);
2230 	struct ahci_port_priv *pp = ap->private_data;
2231 	u32 cmd;
2232 
2233 	cmd = readl(port_mmio + PORT_CMD);
2234 	cmd &= ~PORT_CMD_PMP;
2235 	writel(cmd, port_mmio + PORT_CMD);
2236 
2237 	pp->intr_mask &= ~PORT_IRQ_BAD_PMP;
2238 	writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
2239 }
2240 
2241 static int ahci_port_resume(struct ata_port *ap)
2242 {
2243 	ahci_power_up(ap);
2244 	ahci_start_port(ap);
2245 
2246 	if (sata_pmp_attached(ap))
2247 		ahci_pmp_attach(ap);
2248 	else
2249 		ahci_pmp_detach(ap);
2250 
2251 	return 0;
2252 }
2253 
2254 #ifdef CONFIG_PM
2255 static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg)
2256 {
2257 	const char *emsg = NULL;
2258 	int rc;
2259 
2260 	rc = ahci_deinit_port(ap, &emsg);
2261 	if (rc == 0)
2262 		ahci_power_down(ap);
2263 	else {
2264 		ata_port_printk(ap, KERN_ERR, "%s (%d)\n", emsg, rc);
2265 		ahci_start_port(ap);
2266 	}
2267 
2268 	return rc;
2269 }
2270 
2271 static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
2272 {
2273 	struct ata_host *host = dev_get_drvdata(&pdev->dev);
2274 	void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
2275 	u32 ctl;
2276 
2277 	if (mesg.event & PM_EVENT_SLEEP) {
2278 		/* AHCI spec rev1.1 section 8.3.3:
2279 		 * Software must disable interrupts prior to requesting a
2280 		 * transition of the HBA to D3 state.
2281 		 */
2282 		ctl = readl(mmio + HOST_CTL);
2283 		ctl &= ~HOST_IRQ_EN;
2284 		writel(ctl, mmio + HOST_CTL);
2285 		readl(mmio + HOST_CTL); /* flush */
2286 	}
2287 
2288 	return ata_pci_device_suspend(pdev, mesg);
2289 }
2290 
2291 static int ahci_pci_device_resume(struct pci_dev *pdev)
2292 {
2293 	struct ata_host *host = dev_get_drvdata(&pdev->dev);
2294 	int rc;
2295 
2296 	rc = ata_pci_device_do_resume(pdev);
2297 	if (rc)
2298 		return rc;
2299 
2300 	if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
2301 		rc = ahci_reset_controller(host);
2302 		if (rc)
2303 			return rc;
2304 
2305 		ahci_init_controller(host);
2306 	}
2307 
2308 	ata_host_resume(host);
2309 
2310 	return 0;
2311 }
2312 #endif
2313 
2314 static int ahci_port_start(struct ata_port *ap)
2315 {
2316 	struct device *dev = ap->host->dev;
2317 	struct ahci_port_priv *pp;
2318 	void *mem;
2319 	dma_addr_t mem_dma;
2320 
2321 	pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
2322 	if (!pp)
2323 		return -ENOMEM;
2324 
2325 	mem = dmam_alloc_coherent(dev, AHCI_PORT_PRIV_DMA_SZ, &mem_dma,
2326 				  GFP_KERNEL);
2327 	if (!mem)
2328 		return -ENOMEM;
2329 	memset(mem, 0, AHCI_PORT_PRIV_DMA_SZ);
2330 
2331 	/*
2332 	 * First item in chunk of DMA memory: 32-slot command table,
2333 	 * 32 bytes each in size
2334 	 */
2335 	pp->cmd_slot = mem;
2336 	pp->cmd_slot_dma = mem_dma;
2337 
2338 	mem += AHCI_CMD_SLOT_SZ;
2339 	mem_dma += AHCI_CMD_SLOT_SZ;
2340 
2341 	/*
2342 	 * Second item: Received-FIS area
2343 	 */
2344 	pp->rx_fis = mem;
2345 	pp->rx_fis_dma = mem_dma;
2346 
2347 	mem += AHCI_RX_FIS_SZ;
2348 	mem_dma += AHCI_RX_FIS_SZ;
2349 
2350 	/*
2351 	 * Third item: data area for storing a single command
2352 	 * and its scatter-gather table
2353 	 */
2354 	pp->cmd_tbl = mem;
2355 	pp->cmd_tbl_dma = mem_dma;
2356 
2357 	/*
2358 	 * Save off initial list of interrupts to be enabled.
2359 	 * This could be changed later
2360 	 */
2361 	pp->intr_mask = DEF_PORT_IRQ;
2362 
2363 	ap->private_data = pp;
2364 
2365 	/* engage engines, captain */
2366 	return ahci_port_resume(ap);
2367 }
2368 
2369 static void ahci_port_stop(struct ata_port *ap)
2370 {
2371 	const char *emsg = NULL;
2372 	int rc;
2373 
2374 	/* de-initialize port */
2375 	rc = ahci_deinit_port(ap, &emsg);
2376 	if (rc)
2377 		ata_port_printk(ap, KERN_WARNING, "%s (%d)\n", emsg, rc);
2378 }
2379 
2380 static int ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac)
2381 {
2382 	int rc;
2383 
2384 	if (using_dac &&
2385 	    !pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2386 		rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2387 		if (rc) {
2388 			rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2389 			if (rc) {
2390 				dev_printk(KERN_ERR, &pdev->dev,
2391 					   "64-bit DMA enable failed\n");
2392 				return rc;
2393 			}
2394 		}
2395 	} else {
2396 		rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
2397 		if (rc) {
2398 			dev_printk(KERN_ERR, &pdev->dev,
2399 				   "32-bit DMA enable failed\n");
2400 			return rc;
2401 		}
2402 		rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2403 		if (rc) {
2404 			dev_printk(KERN_ERR, &pdev->dev,
2405 				   "32-bit consistent DMA enable failed\n");
2406 			return rc;
2407 		}
2408 	}
2409 	return 0;
2410 }
2411 
2412 static void ahci_print_info(struct ata_host *host)
2413 {
2414 	struct ahci_host_priv *hpriv = host->private_data;
2415 	struct pci_dev *pdev = to_pci_dev(host->dev);
2416 	void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
2417 	u32 vers, cap, impl, speed;
2418 	const char *speed_s;
2419 	u16 cc;
2420 	const char *scc_s;
2421 
2422 	vers = readl(mmio + HOST_VERSION);
2423 	cap = hpriv->cap;
2424 	impl = hpriv->port_map;
2425 
2426 	speed = (cap >> 20) & 0xf;
2427 	if (speed == 1)
2428 		speed_s = "1.5";
2429 	else if (speed == 2)
2430 		speed_s = "3";
2431 	else
2432 		speed_s = "?";
2433 
2434 	pci_read_config_word(pdev, 0x0a, &cc);
2435 	if (cc == PCI_CLASS_STORAGE_IDE)
2436 		scc_s = "IDE";
2437 	else if (cc == PCI_CLASS_STORAGE_SATA)
2438 		scc_s = "SATA";
2439 	else if (cc == PCI_CLASS_STORAGE_RAID)
2440 		scc_s = "RAID";
2441 	else
2442 		scc_s = "unknown";
2443 
2444 	dev_printk(KERN_INFO, &pdev->dev,
2445 		"AHCI %02x%02x.%02x%02x "
2446 		"%u slots %u ports %s Gbps 0x%x impl %s mode\n"
2447 		,
2448 
2449 		(vers >> 24) & 0xff,
2450 		(vers >> 16) & 0xff,
2451 		(vers >> 8) & 0xff,
2452 		vers & 0xff,
2453 
2454 		((cap >> 8) & 0x1f) + 1,
2455 		(cap & 0x1f) + 1,
2456 		speed_s,
2457 		impl,
2458 		scc_s);
2459 
2460 	dev_printk(KERN_INFO, &pdev->dev,
2461 		"flags: "
2462 		"%s%s%s%s%s%s%s"
2463 		"%s%s%s%s%s%s%s"
2464 		"%s\n"
2465 		,
2466 
2467 		cap & (1 << 31) ? "64bit " : "",
2468 		cap & (1 << 30) ? "ncq " : "",
2469 		cap & (1 << 29) ? "sntf " : "",
2470 		cap & (1 << 28) ? "ilck " : "",
2471 		cap & (1 << 27) ? "stag " : "",
2472 		cap & (1 << 26) ? "pm " : "",
2473 		cap & (1 << 25) ? "led " : "",
2474 
2475 		cap & (1 << 24) ? "clo " : "",
2476 		cap & (1 << 19) ? "nz " : "",
2477 		cap & (1 << 18) ? "only " : "",
2478 		cap & (1 << 17) ? "pmp " : "",
2479 		cap & (1 << 15) ? "pio " : "",
2480 		cap & (1 << 14) ? "slum " : "",
2481 		cap & (1 << 13) ? "part " : "",
2482 		cap & (1 << 6) ? "ems ": ""
2483 		);
2484 }
2485 
2486 /* On ASUS P5W DH Deluxe, the second port of PCI device 00:1f.2 is
2487  * hardwired to on-board SIMG 4726.  The chipset is ICH8 and doesn't
2488  * support PMP and the 4726 either directly exports the device
2489  * attached to the first downstream port or acts as a hardware storage
2490  * controller and emulate a single ATA device (can be RAID 0/1 or some
2491  * other configuration).
2492  *
2493  * When there's no device attached to the first downstream port of the
2494  * 4726, "Config Disk" appears, which is a pseudo ATA device to
2495  * configure the 4726.  However, ATA emulation of the device is very
2496  * lame.  It doesn't send signature D2H Reg FIS after the initial
2497  * hardreset, pukes on SRST w/ PMP==0 and has bunch of other issues.
2498  *
2499  * The following function works around the problem by always using
2500  * hardreset on the port and not depending on receiving signature FIS
2501  * afterward.  If signature FIS isn't received soon, ATA class is
2502  * assumed without follow-up softreset.
2503  */
2504 static void ahci_p5wdh_workaround(struct ata_host *host)
2505 {
2506 	static struct dmi_system_id sysids[] = {
2507 		{
2508 			.ident = "P5W DH Deluxe",
2509 			.matches = {
2510 				DMI_MATCH(DMI_SYS_VENDOR,
2511 					  "ASUSTEK COMPUTER INC"),
2512 				DMI_MATCH(DMI_PRODUCT_NAME, "P5W DH Deluxe"),
2513 			},
2514 		},
2515 		{ }
2516 	};
2517 	struct pci_dev *pdev = to_pci_dev(host->dev);
2518 
2519 	if (pdev->bus->number == 0 && pdev->devfn == PCI_DEVFN(0x1f, 2) &&
2520 	    dmi_check_system(sysids)) {
2521 		struct ata_port *ap = host->ports[1];
2522 
2523 		dev_printk(KERN_INFO, &pdev->dev, "enabling ASUS P5W DH "
2524 			   "Deluxe on-board SIMG4726 workaround\n");
2525 
2526 		ap->ops = &ahci_p5wdh_ops;
2527 		ap->link.flags |= ATA_LFLAG_NO_SRST | ATA_LFLAG_ASSUME_ATA;
2528 	}
2529 }
2530 
2531 static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2532 {
2533 	static int printed_version;
2534 	unsigned int board_id = ent->driver_data;
2535 	struct ata_port_info pi = ahci_port_info[board_id];
2536 	const struct ata_port_info *ppi[] = { &pi, NULL };
2537 	struct device *dev = &pdev->dev;
2538 	struct ahci_host_priv *hpriv;
2539 	struct ata_host *host;
2540 	int n_ports, i, rc;
2541 
2542 	VPRINTK("ENTER\n");
2543 
2544 	WARN_ON(ATA_MAX_QUEUE > AHCI_MAX_CMDS);
2545 
2546 	if (!printed_version++)
2547 		dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
2548 
2549 	/* The AHCI driver can only drive the SATA ports, the PATA driver
2550 	   can drive them all so if both drivers are selected make sure
2551 	   AHCI stays out of the way */
2552 	if (pdev->vendor == PCI_VENDOR_ID_MARVELL && !marvell_enable)
2553 		return -ENODEV;
2554 
2555 	/* acquire resources */
2556 	rc = pcim_enable_device(pdev);
2557 	if (rc)
2558 		return rc;
2559 
2560 	/* AHCI controllers often implement SFF compatible interface.
2561 	 * Grab all PCI BARs just in case.
2562 	 */
2563 	rc = pcim_iomap_regions_request_all(pdev, 1 << AHCI_PCI_BAR, DRV_NAME);
2564 	if (rc == -EBUSY)
2565 		pcim_pin_device(pdev);
2566 	if (rc)
2567 		return rc;
2568 
2569 	if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
2570 	    (pdev->device == 0x2652 || pdev->device == 0x2653)) {
2571 		u8 map;
2572 
2573 		/* ICH6s share the same PCI ID for both piix and ahci
2574 		 * modes.  Enabling ahci mode while MAP indicates
2575 		 * combined mode is a bad idea.  Yield to ata_piix.
2576 		 */
2577 		pci_read_config_byte(pdev, ICH_MAP, &map);
2578 		if (map & 0x3) {
2579 			dev_printk(KERN_INFO, &pdev->dev, "controller is in "
2580 				   "combined mode, can't enable AHCI mode\n");
2581 			return -ENODEV;
2582 		}
2583 	}
2584 
2585 	hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
2586 	if (!hpriv)
2587 		return -ENOMEM;
2588 	hpriv->flags |= (unsigned long)pi.private_data;
2589 
2590 	/* MCP65 revision A1 and A2 can't do MSI */
2591 	if (board_id == board_ahci_mcp65 &&
2592 	    (pdev->revision == 0xa1 || pdev->revision == 0xa2))
2593 		hpriv->flags |= AHCI_HFLAG_NO_MSI;
2594 
2595 	if ((hpriv->flags & AHCI_HFLAG_NO_MSI) || pci_enable_msi(pdev))
2596 		pci_intx(pdev, 1);
2597 
2598 	/* save initial config */
2599 	ahci_save_initial_config(pdev, hpriv);
2600 
2601 	/* prepare host */
2602 	if (hpriv->cap & HOST_CAP_NCQ)
2603 		pi.flags |= ATA_FLAG_NCQ;
2604 
2605 	if (hpriv->cap & HOST_CAP_PMP)
2606 		pi.flags |= ATA_FLAG_PMP;
2607 
2608 	if (ahci_em_messages && (hpriv->cap & HOST_CAP_EMS)) {
2609 		u8 messages;
2610 		void __iomem *mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR];
2611 		u32 em_loc = readl(mmio + HOST_EM_LOC);
2612 		u32 em_ctl = readl(mmio + HOST_EM_CTL);
2613 
2614 		messages = (em_ctl & 0x000f0000) >> 16;
2615 
2616 		/* we only support LED message type right now */
2617 		if ((messages & 0x01) && (ahci_em_messages == 1)) {
2618 			/* store em_loc */
2619 			hpriv->em_loc = ((em_loc >> 16) * 4);
2620 			pi.flags |= ATA_FLAG_EM;
2621 			if (!(em_ctl & EM_CTL_ALHD))
2622 				pi.flags |= ATA_FLAG_SW_ACTIVITY;
2623 		}
2624 	}
2625 
2626 	/* CAP.NP sometimes indicate the index of the last enabled
2627 	 * port, at other times, that of the last possible port, so
2628 	 * determining the maximum port number requires looking at
2629 	 * both CAP.NP and port_map.
2630 	 */
2631 	n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map));
2632 
2633 	host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2634 	if (!host)
2635 		return -ENOMEM;
2636 	host->iomap = pcim_iomap_table(pdev);
2637 	host->private_data = hpriv;
2638 
2639 	if (pi.flags & ATA_FLAG_EM)
2640 		ahci_reset_em(host);
2641 
2642 	for (i = 0; i < host->n_ports; i++) {
2643 		struct ata_port *ap = host->ports[i];
2644 
2645 		ata_port_pbar_desc(ap, AHCI_PCI_BAR, -1, "abar");
2646 		ata_port_pbar_desc(ap, AHCI_PCI_BAR,
2647 				   0x100 + ap->port_no * 0x80, "port");
2648 
2649 		/* set initial link pm policy */
2650 		ap->pm_policy = NOT_AVAILABLE;
2651 
2652 		/* set enclosure management message type */
2653 		if (ap->flags & ATA_FLAG_EM)
2654 			ap->em_message_type = ahci_em_messages;
2655 
2656 
2657 		/* disabled/not-implemented port */
2658 		if (!(hpriv->port_map & (1 << i)))
2659 			ap->ops = &ata_dummy_port_ops;
2660 	}
2661 
2662 	/* apply workaround for ASUS P5W DH Deluxe mainboard */
2663 	ahci_p5wdh_workaround(host);
2664 
2665 	/* initialize adapter */
2666 	rc = ahci_configure_dma_masks(pdev, hpriv->cap & HOST_CAP_64);
2667 	if (rc)
2668 		return rc;
2669 
2670 	rc = ahci_reset_controller(host);
2671 	if (rc)
2672 		return rc;
2673 
2674 	ahci_init_controller(host);
2675 	ahci_print_info(host);
2676 
2677 	pci_set_master(pdev);
2678 	return ata_host_activate(host, pdev->irq, ahci_interrupt, IRQF_SHARED,
2679 				 &ahci_sht);
2680 }
2681 
2682 static int __init ahci_init(void)
2683 {
2684 	return pci_register_driver(&ahci_pci_driver);
2685 }
2686 
2687 static void __exit ahci_exit(void)
2688 {
2689 	pci_unregister_driver(&ahci_pci_driver);
2690 }
2691 
2692 
2693 MODULE_AUTHOR("Jeff Garzik");
2694 MODULE_DESCRIPTION("AHCI SATA low-level driver");
2695 MODULE_LICENSE("GPL");
2696 MODULE_DEVICE_TABLE(pci, ahci_pci_tbl);
2697 MODULE_VERSION(DRV_VERSION);
2698 
2699 module_init(ahci_init);
2700 module_exit(ahci_exit);
2701