xref: /linux/drivers/ata/ahci.c (revision b8bb76713ec50df2f11efee386e16f93d51e1076)
1 /*
2  *  ahci.c - AHCI SATA support
3  *
4  *  Maintained by:  Jeff Garzik <jgarzik@pobox.com>
5  *    		    Please ALWAYS copy linux-ide@vger.kernel.org
6  *		    on emails.
7  *
8  *  Copyright 2004-2005 Red Hat, Inc.
9  *
10  *
11  *  This program is free software; you can redistribute it and/or modify
12  *  it under the terms of the GNU General Public License as published by
13  *  the Free Software Foundation; either version 2, or (at your option)
14  *  any later version.
15  *
16  *  This program is distributed in the hope that it will be useful,
17  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
18  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
19  *  GNU General Public License for more details.
20  *
21  *  You should have received a copy of the GNU General Public License
22  *  along with this program; see the file COPYING.  If not, write to
23  *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
24  *
25  *
26  * libata documentation is available via 'make {ps|pdf}docs',
27  * as Documentation/DocBook/libata.*
28  *
29  * AHCI hardware documentation:
30  * http://www.intel.com/technology/serialata/pdf/rev1_0.pdf
31  * http://www.intel.com/technology/serialata/pdf/rev1_1.pdf
32  *
33  */
34 
35 #include <linux/kernel.h>
36 #include <linux/module.h>
37 #include <linux/pci.h>
38 #include <linux/init.h>
39 #include <linux/blkdev.h>
40 #include <linux/delay.h>
41 #include <linux/interrupt.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/device.h>
44 #include <linux/dmi.h>
45 #include <scsi/scsi_host.h>
46 #include <scsi/scsi_cmnd.h>
47 #include <linux/libata.h>
48 
49 #define DRV_NAME	"ahci"
50 #define DRV_VERSION	"3.0"
51 
52 /* Enclosure Management Control */
53 #define EM_CTRL_MSG_TYPE              0x000f0000
54 
55 /* Enclosure Management LED Message Type */
56 #define EM_MSG_LED_HBA_PORT           0x0000000f
57 #define EM_MSG_LED_PMP_SLOT           0x0000ff00
58 #define EM_MSG_LED_VALUE              0xffff0000
59 #define EM_MSG_LED_VALUE_ACTIVITY     0x00070000
60 #define EM_MSG_LED_VALUE_OFF          0xfff80000
61 #define EM_MSG_LED_VALUE_ON           0x00010000
62 
63 static int ahci_skip_host_reset;
64 static int ahci_ignore_sss;
65 
66 module_param_named(skip_host_reset, ahci_skip_host_reset, int, 0444);
67 MODULE_PARM_DESC(skip_host_reset, "skip global host reset (0=don't skip, 1=skip)");
68 
69 module_param_named(ignore_sss, ahci_ignore_sss, int, 0444);
70 MODULE_PARM_DESC(ignore_sss, "Ignore staggered spinup flag (0=don't ignore, 1=ignore)");
71 
72 static int ahci_enable_alpm(struct ata_port *ap,
73 		enum link_pm policy);
74 static void ahci_disable_alpm(struct ata_port *ap);
75 static ssize_t ahci_led_show(struct ata_port *ap, char *buf);
76 static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
77 			      size_t size);
78 static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
79 					ssize_t size);
80 #define MAX_SLOTS 8
81 
82 enum {
83 	AHCI_PCI_BAR		= 5,
84 	AHCI_MAX_PORTS		= 32,
85 	AHCI_MAX_SG		= 168, /* hardware max is 64K */
86 	AHCI_DMA_BOUNDARY	= 0xffffffff,
87 	AHCI_MAX_CMDS		= 32,
88 	AHCI_CMD_SZ		= 32,
89 	AHCI_CMD_SLOT_SZ	= AHCI_MAX_CMDS * AHCI_CMD_SZ,
90 	AHCI_RX_FIS_SZ		= 256,
91 	AHCI_CMD_TBL_CDB	= 0x40,
92 	AHCI_CMD_TBL_HDR_SZ	= 0x80,
93 	AHCI_CMD_TBL_SZ		= AHCI_CMD_TBL_HDR_SZ + (AHCI_MAX_SG * 16),
94 	AHCI_CMD_TBL_AR_SZ	= AHCI_CMD_TBL_SZ * AHCI_MAX_CMDS,
95 	AHCI_PORT_PRIV_DMA_SZ	= AHCI_CMD_SLOT_SZ + AHCI_CMD_TBL_AR_SZ +
96 				  AHCI_RX_FIS_SZ,
97 	AHCI_IRQ_ON_SG		= (1 << 31),
98 	AHCI_CMD_ATAPI		= (1 << 5),
99 	AHCI_CMD_WRITE		= (1 << 6),
100 	AHCI_CMD_PREFETCH	= (1 << 7),
101 	AHCI_CMD_RESET		= (1 << 8),
102 	AHCI_CMD_CLR_BUSY	= (1 << 10),
103 
104 	RX_FIS_D2H_REG		= 0x40,	/* offset of D2H Register FIS data */
105 	RX_FIS_SDB		= 0x58, /* offset of SDB FIS data */
106 	RX_FIS_UNK		= 0x60, /* offset of Unknown FIS data */
107 
108 	board_ahci		= 0,
109 	board_ahci_vt8251	= 1,
110 	board_ahci_ign_iferr	= 2,
111 	board_ahci_sb600	= 3,
112 	board_ahci_mv		= 4,
113 	board_ahci_sb700	= 5, /* for SB700 and SB800 */
114 	board_ahci_mcp65	= 6,
115 	board_ahci_nopmp	= 7,
116 
117 	/* global controller registers */
118 	HOST_CAP		= 0x00, /* host capabilities */
119 	HOST_CTL		= 0x04, /* global host control */
120 	HOST_IRQ_STAT		= 0x08, /* interrupt status */
121 	HOST_PORTS_IMPL		= 0x0c, /* bitmap of implemented ports */
122 	HOST_VERSION		= 0x10, /* AHCI spec. version compliancy */
123 	HOST_EM_LOC		= 0x1c, /* Enclosure Management location */
124 	HOST_EM_CTL		= 0x20, /* Enclosure Management Control */
125 
126 	/* HOST_CTL bits */
127 	HOST_RESET		= (1 << 0),  /* reset controller; self-clear */
128 	HOST_IRQ_EN		= (1 << 1),  /* global IRQ enable */
129 	HOST_AHCI_EN		= (1 << 31), /* AHCI enabled */
130 
131 	/* HOST_CAP bits */
132 	HOST_CAP_EMS		= (1 << 6),  /* Enclosure Management support */
133 	HOST_CAP_SSC		= (1 << 14), /* Slumber capable */
134 	HOST_CAP_PMP		= (1 << 17), /* Port Multiplier support */
135 	HOST_CAP_CLO		= (1 << 24), /* Command List Override support */
136 	HOST_CAP_ALPM		= (1 << 26), /* Aggressive Link PM support */
137 	HOST_CAP_SSS		= (1 << 27), /* Staggered Spin-up */
138 	HOST_CAP_SNTF		= (1 << 29), /* SNotification register */
139 	HOST_CAP_NCQ		= (1 << 30), /* Native Command Queueing */
140 	HOST_CAP_64		= (1 << 31), /* PCI DAC (64-bit DMA) support */
141 
142 	/* registers for each SATA port */
143 	PORT_LST_ADDR		= 0x00, /* command list DMA addr */
144 	PORT_LST_ADDR_HI	= 0x04, /* command list DMA addr hi */
145 	PORT_FIS_ADDR		= 0x08, /* FIS rx buf addr */
146 	PORT_FIS_ADDR_HI	= 0x0c, /* FIS rx buf addr hi */
147 	PORT_IRQ_STAT		= 0x10, /* interrupt status */
148 	PORT_IRQ_MASK		= 0x14, /* interrupt enable/disable mask */
149 	PORT_CMD		= 0x18, /* port command */
150 	PORT_TFDATA		= 0x20,	/* taskfile data */
151 	PORT_SIG		= 0x24,	/* device TF signature */
152 	PORT_CMD_ISSUE		= 0x38, /* command issue */
153 	PORT_SCR_STAT		= 0x28, /* SATA phy register: SStatus */
154 	PORT_SCR_CTL		= 0x2c, /* SATA phy register: SControl */
155 	PORT_SCR_ERR		= 0x30, /* SATA phy register: SError */
156 	PORT_SCR_ACT		= 0x34, /* SATA phy register: SActive */
157 	PORT_SCR_NTF		= 0x3c, /* SATA phy register: SNotification */
158 
159 	/* PORT_IRQ_{STAT,MASK} bits */
160 	PORT_IRQ_COLD_PRES	= (1 << 31), /* cold presence detect */
161 	PORT_IRQ_TF_ERR		= (1 << 30), /* task file error */
162 	PORT_IRQ_HBUS_ERR	= (1 << 29), /* host bus fatal error */
163 	PORT_IRQ_HBUS_DATA_ERR	= (1 << 28), /* host bus data error */
164 	PORT_IRQ_IF_ERR		= (1 << 27), /* interface fatal error */
165 	PORT_IRQ_IF_NONFATAL	= (1 << 26), /* interface non-fatal error */
166 	PORT_IRQ_OVERFLOW	= (1 << 24), /* xfer exhausted available S/G */
167 	PORT_IRQ_BAD_PMP	= (1 << 23), /* incorrect port multiplier */
168 
169 	PORT_IRQ_PHYRDY		= (1 << 22), /* PhyRdy changed */
170 	PORT_IRQ_DEV_ILCK	= (1 << 7), /* device interlock */
171 	PORT_IRQ_CONNECT	= (1 << 6), /* port connect change status */
172 	PORT_IRQ_SG_DONE	= (1 << 5), /* descriptor processed */
173 	PORT_IRQ_UNK_FIS	= (1 << 4), /* unknown FIS rx'd */
174 	PORT_IRQ_SDB_FIS	= (1 << 3), /* Set Device Bits FIS rx'd */
175 	PORT_IRQ_DMAS_FIS	= (1 << 2), /* DMA Setup FIS rx'd */
176 	PORT_IRQ_PIOS_FIS	= (1 << 1), /* PIO Setup FIS rx'd */
177 	PORT_IRQ_D2H_REG_FIS	= (1 << 0), /* D2H Register FIS rx'd */
178 
179 	PORT_IRQ_FREEZE		= PORT_IRQ_HBUS_ERR |
180 				  PORT_IRQ_IF_ERR |
181 				  PORT_IRQ_CONNECT |
182 				  PORT_IRQ_PHYRDY |
183 				  PORT_IRQ_UNK_FIS |
184 				  PORT_IRQ_BAD_PMP,
185 	PORT_IRQ_ERROR		= PORT_IRQ_FREEZE |
186 				  PORT_IRQ_TF_ERR |
187 				  PORT_IRQ_HBUS_DATA_ERR,
188 	DEF_PORT_IRQ		= PORT_IRQ_ERROR | PORT_IRQ_SG_DONE |
189 				  PORT_IRQ_SDB_FIS | PORT_IRQ_DMAS_FIS |
190 				  PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS,
191 
192 	/* PORT_CMD bits */
193 	PORT_CMD_ASP		= (1 << 27), /* Aggressive Slumber/Partial */
194 	PORT_CMD_ALPE		= (1 << 26), /* Aggressive Link PM enable */
195 	PORT_CMD_ATAPI		= (1 << 24), /* Device is ATAPI */
196 	PORT_CMD_PMP		= (1 << 17), /* PMP attached */
197 	PORT_CMD_LIST_ON	= (1 << 15), /* cmd list DMA engine running */
198 	PORT_CMD_FIS_ON		= (1 << 14), /* FIS DMA engine running */
199 	PORT_CMD_FIS_RX		= (1 << 4), /* Enable FIS receive DMA engine */
200 	PORT_CMD_CLO		= (1 << 3), /* Command list override */
201 	PORT_CMD_POWER_ON	= (1 << 2), /* Power up device */
202 	PORT_CMD_SPIN_UP	= (1 << 1), /* Spin up device */
203 	PORT_CMD_START		= (1 << 0), /* Enable port DMA engine */
204 
205 	PORT_CMD_ICC_MASK	= (0xf << 28), /* i/f ICC state mask */
206 	PORT_CMD_ICC_ACTIVE	= (0x1 << 28), /* Put i/f in active state */
207 	PORT_CMD_ICC_PARTIAL	= (0x2 << 28), /* Put i/f in partial state */
208 	PORT_CMD_ICC_SLUMBER	= (0x6 << 28), /* Put i/f in slumber state */
209 
210 	/* hpriv->flags bits */
211 	AHCI_HFLAG_NO_NCQ		= (1 << 0),
212 	AHCI_HFLAG_IGN_IRQ_IF_ERR	= (1 << 1), /* ignore IRQ_IF_ERR */
213 	AHCI_HFLAG_IGN_SERR_INTERNAL	= (1 << 2), /* ignore SERR_INTERNAL */
214 	AHCI_HFLAG_32BIT_ONLY		= (1 << 3), /* force 32bit */
215 	AHCI_HFLAG_MV_PATA		= (1 << 4), /* PATA port */
216 	AHCI_HFLAG_NO_MSI		= (1 << 5), /* no PCI MSI */
217 	AHCI_HFLAG_NO_PMP		= (1 << 6), /* no PMP */
218 	AHCI_HFLAG_NO_HOTPLUG		= (1 << 7), /* ignore PxSERR.DIAG.N */
219 	AHCI_HFLAG_SECT255		= (1 << 8), /* max 255 sectors */
220 	AHCI_HFLAG_YES_NCQ		= (1 << 9), /* force NCQ cap on */
221 
222 	/* ap->flags bits */
223 
224 	AHCI_FLAG_COMMON		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
225 					  ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
226 					  ATA_FLAG_ACPI_SATA | ATA_FLAG_AN |
227 					  ATA_FLAG_IPM,
228 
229 	ICH_MAP				= 0x90, /* ICH MAP register */
230 
231 	/* em_ctl bits */
232 	EM_CTL_RST			= (1 << 9), /* Reset */
233 	EM_CTL_TM			= (1 << 8), /* Transmit Message */
234 	EM_CTL_ALHD			= (1 << 26), /* Activity LED */
235 };
236 
237 struct ahci_cmd_hdr {
238 	__le32			opts;
239 	__le32			status;
240 	__le32			tbl_addr;
241 	__le32			tbl_addr_hi;
242 	__le32			reserved[4];
243 };
244 
245 struct ahci_sg {
246 	__le32			addr;
247 	__le32			addr_hi;
248 	__le32			reserved;
249 	__le32			flags_size;
250 };
251 
252 struct ahci_em_priv {
253 	enum sw_activity blink_policy;
254 	struct timer_list timer;
255 	unsigned long saved_activity;
256 	unsigned long activity;
257 	unsigned long led_state;
258 };
259 
260 struct ahci_host_priv {
261 	unsigned int		flags;		/* AHCI_HFLAG_* */
262 	u32			cap;		/* cap to use */
263 	u32			port_map;	/* port map to use */
264 	u32			saved_cap;	/* saved initial cap */
265 	u32			saved_port_map;	/* saved initial port_map */
266 	u32 			em_loc; /* enclosure management location */
267 };
268 
269 struct ahci_port_priv {
270 	struct ata_link		*active_link;
271 	struct ahci_cmd_hdr	*cmd_slot;
272 	dma_addr_t		cmd_slot_dma;
273 	void			*cmd_tbl;
274 	dma_addr_t		cmd_tbl_dma;
275 	void			*rx_fis;
276 	dma_addr_t		rx_fis_dma;
277 	/* for NCQ spurious interrupt analysis */
278 	unsigned int		ncq_saw_d2h:1;
279 	unsigned int		ncq_saw_dmas:1;
280 	unsigned int		ncq_saw_sdb:1;
281 	u32 			intr_mask;	/* interrupts to enable */
282 	struct ahci_em_priv	em_priv[MAX_SLOTS];/* enclosure management info
283 					 	 * per PM slot */
284 };
285 
286 static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
287 static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
288 static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
289 static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc);
290 static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc);
291 static int ahci_port_start(struct ata_port *ap);
292 static void ahci_port_stop(struct ata_port *ap);
293 static void ahci_qc_prep(struct ata_queued_cmd *qc);
294 static void ahci_freeze(struct ata_port *ap);
295 static void ahci_thaw(struct ata_port *ap);
296 static void ahci_pmp_attach(struct ata_port *ap);
297 static void ahci_pmp_detach(struct ata_port *ap);
298 static int ahci_softreset(struct ata_link *link, unsigned int *class,
299 			  unsigned long deadline);
300 static int ahci_sb600_softreset(struct ata_link *link, unsigned int *class,
301 			  unsigned long deadline);
302 static int ahci_hardreset(struct ata_link *link, unsigned int *class,
303 			  unsigned long deadline);
304 static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
305 				 unsigned long deadline);
306 static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
307 				unsigned long deadline);
308 static void ahci_postreset(struct ata_link *link, unsigned int *class);
309 static void ahci_error_handler(struct ata_port *ap);
310 static void ahci_post_internal_cmd(struct ata_queued_cmd *qc);
311 static int ahci_port_resume(struct ata_port *ap);
312 static void ahci_dev_config(struct ata_device *dev);
313 static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl);
314 static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
315 			       u32 opts);
316 #ifdef CONFIG_PM
317 static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg);
318 static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
319 static int ahci_pci_device_resume(struct pci_dev *pdev);
320 #endif
321 static ssize_t ahci_activity_show(struct ata_device *dev, char *buf);
322 static ssize_t ahci_activity_store(struct ata_device *dev,
323 				   enum sw_activity val);
324 static void ahci_init_sw_activity(struct ata_link *link);
325 
326 static struct device_attribute *ahci_shost_attrs[] = {
327 	&dev_attr_link_power_management_policy,
328 	&dev_attr_em_message_type,
329 	&dev_attr_em_message,
330 	NULL
331 };
332 
333 static struct device_attribute *ahci_sdev_attrs[] = {
334 	&dev_attr_sw_activity,
335 	&dev_attr_unload_heads,
336 	NULL
337 };
338 
339 static struct scsi_host_template ahci_sht = {
340 	ATA_NCQ_SHT(DRV_NAME),
341 	.can_queue		= AHCI_MAX_CMDS - 1,
342 	.sg_tablesize		= AHCI_MAX_SG,
343 	.dma_boundary		= AHCI_DMA_BOUNDARY,
344 	.shost_attrs		= ahci_shost_attrs,
345 	.sdev_attrs		= ahci_sdev_attrs,
346 };
347 
348 static struct ata_port_operations ahci_ops = {
349 	.inherits		= &sata_pmp_port_ops,
350 
351 	.qc_defer		= sata_pmp_qc_defer_cmd_switch,
352 	.qc_prep		= ahci_qc_prep,
353 	.qc_issue		= ahci_qc_issue,
354 	.qc_fill_rtf		= ahci_qc_fill_rtf,
355 
356 	.freeze			= ahci_freeze,
357 	.thaw			= ahci_thaw,
358 	.softreset		= ahci_softreset,
359 	.hardreset		= ahci_hardreset,
360 	.postreset		= ahci_postreset,
361 	.pmp_softreset		= ahci_softreset,
362 	.error_handler		= ahci_error_handler,
363 	.post_internal_cmd	= ahci_post_internal_cmd,
364 	.dev_config		= ahci_dev_config,
365 
366 	.scr_read		= ahci_scr_read,
367 	.scr_write		= ahci_scr_write,
368 	.pmp_attach		= ahci_pmp_attach,
369 	.pmp_detach		= ahci_pmp_detach,
370 
371 	.enable_pm		= ahci_enable_alpm,
372 	.disable_pm		= ahci_disable_alpm,
373 	.em_show		= ahci_led_show,
374 	.em_store		= ahci_led_store,
375 	.sw_activity_show	= ahci_activity_show,
376 	.sw_activity_store	= ahci_activity_store,
377 #ifdef CONFIG_PM
378 	.port_suspend		= ahci_port_suspend,
379 	.port_resume		= ahci_port_resume,
380 #endif
381 	.port_start		= ahci_port_start,
382 	.port_stop		= ahci_port_stop,
383 };
384 
385 static struct ata_port_operations ahci_vt8251_ops = {
386 	.inherits		= &ahci_ops,
387 	.hardreset		= ahci_vt8251_hardreset,
388 };
389 
390 static struct ata_port_operations ahci_p5wdh_ops = {
391 	.inherits		= &ahci_ops,
392 	.hardreset		= ahci_p5wdh_hardreset,
393 };
394 
395 static struct ata_port_operations ahci_sb600_ops = {
396 	.inherits		= &ahci_ops,
397 	.softreset		= ahci_sb600_softreset,
398 	.pmp_softreset		= ahci_sb600_softreset,
399 };
400 
401 #define AHCI_HFLAGS(flags)	.private_data	= (void *)(flags)
402 
403 static const struct ata_port_info ahci_port_info[] = {
404 	/* board_ahci */
405 	{
406 		.flags		= AHCI_FLAG_COMMON,
407 		.pio_mask	= ATA_PIO4,
408 		.udma_mask	= ATA_UDMA6,
409 		.port_ops	= &ahci_ops,
410 	},
411 	/* board_ahci_vt8251 */
412 	{
413 		AHCI_HFLAGS	(AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_PMP),
414 		.flags		= AHCI_FLAG_COMMON,
415 		.pio_mask	= ATA_PIO4,
416 		.udma_mask	= ATA_UDMA6,
417 		.port_ops	= &ahci_vt8251_ops,
418 	},
419 	/* board_ahci_ign_iferr */
420 	{
421 		AHCI_HFLAGS	(AHCI_HFLAG_IGN_IRQ_IF_ERR),
422 		.flags		= AHCI_FLAG_COMMON,
423 		.pio_mask	= ATA_PIO4,
424 		.udma_mask	= ATA_UDMA6,
425 		.port_ops	= &ahci_ops,
426 	},
427 	/* board_ahci_sb600 */
428 	{
429 		AHCI_HFLAGS	(AHCI_HFLAG_IGN_SERR_INTERNAL |
430 				 AHCI_HFLAG_32BIT_ONLY | AHCI_HFLAG_NO_MSI |
431 				 AHCI_HFLAG_SECT255),
432 		.flags		= AHCI_FLAG_COMMON,
433 		.pio_mask	= ATA_PIO4,
434 		.udma_mask	= ATA_UDMA6,
435 		.port_ops	= &ahci_sb600_ops,
436 	},
437 	/* board_ahci_mv */
438 	{
439 		AHCI_HFLAGS	(AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_MSI |
440 				 AHCI_HFLAG_MV_PATA | AHCI_HFLAG_NO_PMP),
441 		.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
442 				  ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA,
443 		.pio_mask	= ATA_PIO4,
444 		.udma_mask	= ATA_UDMA6,
445 		.port_ops	= &ahci_ops,
446 	},
447 	/* board_ahci_sb700, for SB700 and SB800 */
448 	{
449 		AHCI_HFLAGS	(AHCI_HFLAG_IGN_SERR_INTERNAL),
450 		.flags		= AHCI_FLAG_COMMON,
451 		.pio_mask	= ATA_PIO4,
452 		.udma_mask	= ATA_UDMA6,
453 		.port_ops	= &ahci_sb600_ops,
454 	},
455 	/* board_ahci_mcp65 */
456 	{
457 		AHCI_HFLAGS	(AHCI_HFLAG_YES_NCQ),
458 		.flags		= AHCI_FLAG_COMMON,
459 		.pio_mask	= ATA_PIO4,
460 		.udma_mask	= ATA_UDMA6,
461 		.port_ops	= &ahci_ops,
462 	},
463 	/* board_ahci_nopmp */
464 	{
465 		AHCI_HFLAGS	(AHCI_HFLAG_NO_PMP),
466 		.flags		= AHCI_FLAG_COMMON,
467 		.pio_mask	= ATA_PIO4,
468 		.udma_mask	= ATA_UDMA6,
469 		.port_ops	= &ahci_ops,
470 	},
471 };
472 
473 static const struct pci_device_id ahci_pci_tbl[] = {
474 	/* Intel */
475 	{ PCI_VDEVICE(INTEL, 0x2652), board_ahci }, /* ICH6 */
476 	{ PCI_VDEVICE(INTEL, 0x2653), board_ahci }, /* ICH6M */
477 	{ PCI_VDEVICE(INTEL, 0x27c1), board_ahci }, /* ICH7 */
478 	{ PCI_VDEVICE(INTEL, 0x27c5), board_ahci }, /* ICH7M */
479 	{ PCI_VDEVICE(INTEL, 0x27c3), board_ahci }, /* ICH7R */
480 	{ PCI_VDEVICE(AL, 0x5288), board_ahci_ign_iferr }, /* ULi M5288 */
481 	{ PCI_VDEVICE(INTEL, 0x2681), board_ahci }, /* ESB2 */
482 	{ PCI_VDEVICE(INTEL, 0x2682), board_ahci }, /* ESB2 */
483 	{ PCI_VDEVICE(INTEL, 0x2683), board_ahci }, /* ESB2 */
484 	{ PCI_VDEVICE(INTEL, 0x27c6), board_ahci }, /* ICH7-M DH */
485 	{ PCI_VDEVICE(INTEL, 0x2821), board_ahci }, /* ICH8 */
486 	{ PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* ICH8 */
487 	{ PCI_VDEVICE(INTEL, 0x2824), board_ahci }, /* ICH8 */
488 	{ PCI_VDEVICE(INTEL, 0x2829), board_ahci }, /* ICH8M */
489 	{ PCI_VDEVICE(INTEL, 0x282a), board_ahci }, /* ICH8M */
490 	{ PCI_VDEVICE(INTEL, 0x2922), board_ahci }, /* ICH9 */
491 	{ PCI_VDEVICE(INTEL, 0x2923), board_ahci }, /* ICH9 */
492 	{ PCI_VDEVICE(INTEL, 0x2924), board_ahci }, /* ICH9 */
493 	{ PCI_VDEVICE(INTEL, 0x2925), board_ahci }, /* ICH9 */
494 	{ PCI_VDEVICE(INTEL, 0x2927), board_ahci }, /* ICH9 */
495 	{ PCI_VDEVICE(INTEL, 0x2929), board_ahci }, /* ICH9M */
496 	{ PCI_VDEVICE(INTEL, 0x292a), board_ahci }, /* ICH9M */
497 	{ PCI_VDEVICE(INTEL, 0x292b), board_ahci }, /* ICH9M */
498 	{ PCI_VDEVICE(INTEL, 0x292c), board_ahci }, /* ICH9M */
499 	{ PCI_VDEVICE(INTEL, 0x292f), board_ahci }, /* ICH9M */
500 	{ PCI_VDEVICE(INTEL, 0x294d), board_ahci }, /* ICH9 */
501 	{ PCI_VDEVICE(INTEL, 0x294e), board_ahci }, /* ICH9M */
502 	{ PCI_VDEVICE(INTEL, 0x502a), board_ahci }, /* Tolapai */
503 	{ PCI_VDEVICE(INTEL, 0x502b), board_ahci }, /* Tolapai */
504 	{ PCI_VDEVICE(INTEL, 0x3a05), board_ahci }, /* ICH10 */
505 	{ PCI_VDEVICE(INTEL, 0x3a25), board_ahci }, /* ICH10 */
506 	{ PCI_VDEVICE(INTEL, 0x3b24), board_ahci }, /* PCH RAID */
507 	{ PCI_VDEVICE(INTEL, 0x3b25), board_ahci }, /* PCH RAID */
508 	{ PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */
509 	{ PCI_VDEVICE(INTEL, 0x3b2c), board_ahci }, /* PCH RAID */
510 
511 	/* JMicron 360/1/3/5/6, match class to avoid IDE function */
512 	{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
513 	  PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci_ign_iferr },
514 
515 	/* ATI */
516 	{ PCI_VDEVICE(ATI, 0x4380), board_ahci_sb600 }, /* ATI SB600 */
517 	{ PCI_VDEVICE(ATI, 0x4390), board_ahci_sb700 }, /* ATI SB700/800 */
518 	{ PCI_VDEVICE(ATI, 0x4391), board_ahci_sb700 }, /* ATI SB700/800 */
519 	{ PCI_VDEVICE(ATI, 0x4392), board_ahci_sb700 }, /* ATI SB700/800 */
520 	{ PCI_VDEVICE(ATI, 0x4393), board_ahci_sb700 }, /* ATI SB700/800 */
521 	{ PCI_VDEVICE(ATI, 0x4394), board_ahci_sb700 }, /* ATI SB700/800 */
522 	{ PCI_VDEVICE(ATI, 0x4395), board_ahci_sb700 }, /* ATI SB700/800 */
523 
524 	/* VIA */
525 	{ PCI_VDEVICE(VIA, 0x3349), board_ahci_vt8251 }, /* VIA VT8251 */
526 	{ PCI_VDEVICE(VIA, 0x6287), board_ahci_vt8251 }, /* VIA VT8251 */
527 
528 	/* NVIDIA */
529 	{ PCI_VDEVICE(NVIDIA, 0x044c), board_ahci_mcp65 },	/* MCP65 */
530 	{ PCI_VDEVICE(NVIDIA, 0x044d), board_ahci_mcp65 },	/* MCP65 */
531 	{ PCI_VDEVICE(NVIDIA, 0x044e), board_ahci_mcp65 },	/* MCP65 */
532 	{ PCI_VDEVICE(NVIDIA, 0x044f), board_ahci_mcp65 },	/* MCP65 */
533 	{ PCI_VDEVICE(NVIDIA, 0x045c), board_ahci_mcp65 },	/* MCP65 */
534 	{ PCI_VDEVICE(NVIDIA, 0x045d), board_ahci_mcp65 },	/* MCP65 */
535 	{ PCI_VDEVICE(NVIDIA, 0x045e), board_ahci_mcp65 },	/* MCP65 */
536 	{ PCI_VDEVICE(NVIDIA, 0x045f), board_ahci_mcp65 },	/* MCP65 */
537 	{ PCI_VDEVICE(NVIDIA, 0x0550), board_ahci },		/* MCP67 */
538 	{ PCI_VDEVICE(NVIDIA, 0x0551), board_ahci },		/* MCP67 */
539 	{ PCI_VDEVICE(NVIDIA, 0x0552), board_ahci },		/* MCP67 */
540 	{ PCI_VDEVICE(NVIDIA, 0x0553), board_ahci },		/* MCP67 */
541 	{ PCI_VDEVICE(NVIDIA, 0x0554), board_ahci },		/* MCP67 */
542 	{ PCI_VDEVICE(NVIDIA, 0x0555), board_ahci },		/* MCP67 */
543 	{ PCI_VDEVICE(NVIDIA, 0x0556), board_ahci },		/* MCP67 */
544 	{ PCI_VDEVICE(NVIDIA, 0x0557), board_ahci },		/* MCP67 */
545 	{ PCI_VDEVICE(NVIDIA, 0x0558), board_ahci },		/* MCP67 */
546 	{ PCI_VDEVICE(NVIDIA, 0x0559), board_ahci },		/* MCP67 */
547 	{ PCI_VDEVICE(NVIDIA, 0x055a), board_ahci },		/* MCP67 */
548 	{ PCI_VDEVICE(NVIDIA, 0x055b), board_ahci },		/* MCP67 */
549 	{ PCI_VDEVICE(NVIDIA, 0x07f0), board_ahci },		/* MCP73 */
550 	{ PCI_VDEVICE(NVIDIA, 0x07f1), board_ahci },		/* MCP73 */
551 	{ PCI_VDEVICE(NVIDIA, 0x07f2), board_ahci },		/* MCP73 */
552 	{ PCI_VDEVICE(NVIDIA, 0x07f3), board_ahci },		/* MCP73 */
553 	{ PCI_VDEVICE(NVIDIA, 0x07f4), board_ahci },		/* MCP73 */
554 	{ PCI_VDEVICE(NVIDIA, 0x07f5), board_ahci },		/* MCP73 */
555 	{ PCI_VDEVICE(NVIDIA, 0x07f6), board_ahci },		/* MCP73 */
556 	{ PCI_VDEVICE(NVIDIA, 0x07f7), board_ahci },		/* MCP73 */
557 	{ PCI_VDEVICE(NVIDIA, 0x07f8), board_ahci },		/* MCP73 */
558 	{ PCI_VDEVICE(NVIDIA, 0x07f9), board_ahci },		/* MCP73 */
559 	{ PCI_VDEVICE(NVIDIA, 0x07fa), board_ahci },		/* MCP73 */
560 	{ PCI_VDEVICE(NVIDIA, 0x07fb), board_ahci },		/* MCP73 */
561 	{ PCI_VDEVICE(NVIDIA, 0x0ad0), board_ahci },		/* MCP77 */
562 	{ PCI_VDEVICE(NVIDIA, 0x0ad1), board_ahci },		/* MCP77 */
563 	{ PCI_VDEVICE(NVIDIA, 0x0ad2), board_ahci },		/* MCP77 */
564 	{ PCI_VDEVICE(NVIDIA, 0x0ad3), board_ahci },		/* MCP77 */
565 	{ PCI_VDEVICE(NVIDIA, 0x0ad4), board_ahci },		/* MCP77 */
566 	{ PCI_VDEVICE(NVIDIA, 0x0ad5), board_ahci },		/* MCP77 */
567 	{ PCI_VDEVICE(NVIDIA, 0x0ad6), board_ahci },		/* MCP77 */
568 	{ PCI_VDEVICE(NVIDIA, 0x0ad7), board_ahci },		/* MCP77 */
569 	{ PCI_VDEVICE(NVIDIA, 0x0ad8), board_ahci },		/* MCP77 */
570 	{ PCI_VDEVICE(NVIDIA, 0x0ad9), board_ahci },		/* MCP77 */
571 	{ PCI_VDEVICE(NVIDIA, 0x0ada), board_ahci },		/* MCP77 */
572 	{ PCI_VDEVICE(NVIDIA, 0x0adb), board_ahci },		/* MCP77 */
573 	{ PCI_VDEVICE(NVIDIA, 0x0ab4), board_ahci },		/* MCP79 */
574 	{ PCI_VDEVICE(NVIDIA, 0x0ab5), board_ahci },		/* MCP79 */
575 	{ PCI_VDEVICE(NVIDIA, 0x0ab6), board_ahci },		/* MCP79 */
576 	{ PCI_VDEVICE(NVIDIA, 0x0ab7), board_ahci },		/* MCP79 */
577 	{ PCI_VDEVICE(NVIDIA, 0x0ab8), board_ahci },		/* MCP79 */
578 	{ PCI_VDEVICE(NVIDIA, 0x0ab9), board_ahci },		/* MCP79 */
579 	{ PCI_VDEVICE(NVIDIA, 0x0aba), board_ahci },		/* MCP79 */
580 	{ PCI_VDEVICE(NVIDIA, 0x0abb), board_ahci },		/* MCP79 */
581 	{ PCI_VDEVICE(NVIDIA, 0x0abc), board_ahci },		/* MCP79 */
582 	{ PCI_VDEVICE(NVIDIA, 0x0abd), board_ahci },		/* MCP79 */
583 	{ PCI_VDEVICE(NVIDIA, 0x0abe), board_ahci },		/* MCP79 */
584 	{ PCI_VDEVICE(NVIDIA, 0x0abf), board_ahci },		/* MCP79 */
585 	{ PCI_VDEVICE(NVIDIA, 0x0d84), board_ahci },		/* MCP89 */
586 	{ PCI_VDEVICE(NVIDIA, 0x0d85), board_ahci },		/* MCP89 */
587 	{ PCI_VDEVICE(NVIDIA, 0x0d86), board_ahci },		/* MCP89 */
588 	{ PCI_VDEVICE(NVIDIA, 0x0d87), board_ahci },		/* MCP89 */
589 	{ PCI_VDEVICE(NVIDIA, 0x0d88), board_ahci },		/* MCP89 */
590 	{ PCI_VDEVICE(NVIDIA, 0x0d89), board_ahci },		/* MCP89 */
591 	{ PCI_VDEVICE(NVIDIA, 0x0d8a), board_ahci },		/* MCP89 */
592 	{ PCI_VDEVICE(NVIDIA, 0x0d8b), board_ahci },		/* MCP89 */
593 	{ PCI_VDEVICE(NVIDIA, 0x0d8c), board_ahci },		/* MCP89 */
594 	{ PCI_VDEVICE(NVIDIA, 0x0d8d), board_ahci },		/* MCP89 */
595 	{ PCI_VDEVICE(NVIDIA, 0x0d8e), board_ahci },		/* MCP89 */
596 	{ PCI_VDEVICE(NVIDIA, 0x0d8f), board_ahci },		/* MCP89 */
597 
598 	/* SiS */
599 	{ PCI_VDEVICE(SI, 0x1184), board_ahci },		/* SiS 966 */
600 	{ PCI_VDEVICE(SI, 0x1185), board_ahci },		/* SiS 968 */
601 	{ PCI_VDEVICE(SI, 0x0186), board_ahci },		/* SiS 968 */
602 
603 	/* Marvell */
604 	{ PCI_VDEVICE(MARVELL, 0x6145), board_ahci_mv },	/* 6145 */
605 	{ PCI_VDEVICE(MARVELL, 0x6121), board_ahci_mv },	/* 6121 */
606 
607 	/* Promise */
608 	{ PCI_VDEVICE(PROMISE, 0x3f20), board_ahci },	/* PDC42819 */
609 
610 	/* Generic, PCI class code for AHCI */
611 	{ PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
612 	  PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci },
613 
614 	{ }	/* terminate list */
615 };
616 
617 
618 static struct pci_driver ahci_pci_driver = {
619 	.name			= DRV_NAME,
620 	.id_table		= ahci_pci_tbl,
621 	.probe			= ahci_init_one,
622 	.remove			= ata_pci_remove_one,
623 #ifdef CONFIG_PM
624 	.suspend		= ahci_pci_device_suspend,
625 	.resume			= ahci_pci_device_resume,
626 #endif
627 };
628 
629 static int ahci_em_messages = 1;
630 module_param(ahci_em_messages, int, 0444);
631 /* add other LED protocol types when they become supported */
632 MODULE_PARM_DESC(ahci_em_messages,
633 	"Set AHCI Enclosure Management Message type (0 = disabled, 1 = LED");
634 
635 #if defined(CONFIG_PATA_MARVELL) || defined(CONFIG_PATA_MARVELL_MODULE)
636 static int marvell_enable;
637 #else
638 static int marvell_enable = 1;
639 #endif
640 module_param(marvell_enable, int, 0644);
641 MODULE_PARM_DESC(marvell_enable, "Marvell SATA via AHCI (1 = enabled)");
642 
643 
644 static inline int ahci_nr_ports(u32 cap)
645 {
646 	return (cap & 0x1f) + 1;
647 }
648 
649 static inline void __iomem *__ahci_port_base(struct ata_host *host,
650 					     unsigned int port_no)
651 {
652 	void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
653 
654 	return mmio + 0x100 + (port_no * 0x80);
655 }
656 
657 static inline void __iomem *ahci_port_base(struct ata_port *ap)
658 {
659 	return __ahci_port_base(ap->host, ap->port_no);
660 }
661 
662 static void ahci_enable_ahci(void __iomem *mmio)
663 {
664 	int i;
665 	u32 tmp;
666 
667 	/* turn on AHCI_EN */
668 	tmp = readl(mmio + HOST_CTL);
669 	if (tmp & HOST_AHCI_EN)
670 		return;
671 
672 	/* Some controllers need AHCI_EN to be written multiple times.
673 	 * Try a few times before giving up.
674 	 */
675 	for (i = 0; i < 5; i++) {
676 		tmp |= HOST_AHCI_EN;
677 		writel(tmp, mmio + HOST_CTL);
678 		tmp = readl(mmio + HOST_CTL);	/* flush && sanity check */
679 		if (tmp & HOST_AHCI_EN)
680 			return;
681 		msleep(10);
682 	}
683 
684 	WARN_ON(1);
685 }
686 
687 /**
688  *	ahci_save_initial_config - Save and fixup initial config values
689  *	@pdev: target PCI device
690  *	@hpriv: host private area to store config values
691  *
692  *	Some registers containing configuration info might be setup by
693  *	BIOS and might be cleared on reset.  This function saves the
694  *	initial values of those registers into @hpriv such that they
695  *	can be restored after controller reset.
696  *
697  *	If inconsistent, config values are fixed up by this function.
698  *
699  *	LOCKING:
700  *	None.
701  */
702 static void ahci_save_initial_config(struct pci_dev *pdev,
703 				     struct ahci_host_priv *hpriv)
704 {
705 	void __iomem *mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR];
706 	u32 cap, port_map;
707 	int i;
708 	int mv;
709 
710 	/* make sure AHCI mode is enabled before accessing CAP */
711 	ahci_enable_ahci(mmio);
712 
713 	/* Values prefixed with saved_ are written back to host after
714 	 * reset.  Values without are used for driver operation.
715 	 */
716 	hpriv->saved_cap = cap = readl(mmio + HOST_CAP);
717 	hpriv->saved_port_map = port_map = readl(mmio + HOST_PORTS_IMPL);
718 
719 	/* some chips have errata preventing 64bit use */
720 	if ((cap & HOST_CAP_64) && (hpriv->flags & AHCI_HFLAG_32BIT_ONLY)) {
721 		dev_printk(KERN_INFO, &pdev->dev,
722 			   "controller can't do 64bit DMA, forcing 32bit\n");
723 		cap &= ~HOST_CAP_64;
724 	}
725 
726 	if ((cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_NO_NCQ)) {
727 		dev_printk(KERN_INFO, &pdev->dev,
728 			   "controller can't do NCQ, turning off CAP_NCQ\n");
729 		cap &= ~HOST_CAP_NCQ;
730 	}
731 
732 	if (!(cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_YES_NCQ)) {
733 		dev_printk(KERN_INFO, &pdev->dev,
734 			   "controller can do NCQ, turning on CAP_NCQ\n");
735 		cap |= HOST_CAP_NCQ;
736 	}
737 
738 	if ((cap & HOST_CAP_PMP) && (hpriv->flags & AHCI_HFLAG_NO_PMP)) {
739 		dev_printk(KERN_INFO, &pdev->dev,
740 			   "controller can't do PMP, turning off CAP_PMP\n");
741 		cap &= ~HOST_CAP_PMP;
742 	}
743 
744 	if (pdev->vendor == PCI_VENDOR_ID_JMICRON && pdev->device == 0x2361 &&
745 	    port_map != 1) {
746 		dev_printk(KERN_INFO, &pdev->dev,
747 			   "JMB361 has only one port, port_map 0x%x -> 0x%x\n",
748 			   port_map, 1);
749 		port_map = 1;
750 	}
751 
752 	/*
753 	 * Temporary Marvell 6145 hack: PATA port presence
754 	 * is asserted through the standard AHCI port
755 	 * presence register, as bit 4 (counting from 0)
756 	 */
757 	if (hpriv->flags & AHCI_HFLAG_MV_PATA) {
758 		if (pdev->device == 0x6121)
759 			mv = 0x3;
760 		else
761 			mv = 0xf;
762 		dev_printk(KERN_ERR, &pdev->dev,
763 			   "MV_AHCI HACK: port_map %x -> %x\n",
764 			   port_map,
765 			   port_map & mv);
766 		dev_printk(KERN_ERR, &pdev->dev,
767 			  "Disabling your PATA port. Use the boot option 'ahci.marvell_enable=0' to avoid this.\n");
768 
769 		port_map &= mv;
770 	}
771 
772 	/* cross check port_map and cap.n_ports */
773 	if (port_map) {
774 		int map_ports = 0;
775 
776 		for (i = 0; i < AHCI_MAX_PORTS; i++)
777 			if (port_map & (1 << i))
778 				map_ports++;
779 
780 		/* If PI has more ports than n_ports, whine, clear
781 		 * port_map and let it be generated from n_ports.
782 		 */
783 		if (map_ports > ahci_nr_ports(cap)) {
784 			dev_printk(KERN_WARNING, &pdev->dev,
785 				   "implemented port map (0x%x) contains more "
786 				   "ports than nr_ports (%u), using nr_ports\n",
787 				   port_map, ahci_nr_ports(cap));
788 			port_map = 0;
789 		}
790 	}
791 
792 	/* fabricate port_map from cap.nr_ports */
793 	if (!port_map) {
794 		port_map = (1 << ahci_nr_ports(cap)) - 1;
795 		dev_printk(KERN_WARNING, &pdev->dev,
796 			   "forcing PORTS_IMPL to 0x%x\n", port_map);
797 
798 		/* write the fixed up value to the PI register */
799 		hpriv->saved_port_map = port_map;
800 	}
801 
802 	/* record values to use during operation */
803 	hpriv->cap = cap;
804 	hpriv->port_map = port_map;
805 }
806 
807 /**
808  *	ahci_restore_initial_config - Restore initial config
809  *	@host: target ATA host
810  *
811  *	Restore initial config stored by ahci_save_initial_config().
812  *
813  *	LOCKING:
814  *	None.
815  */
816 static void ahci_restore_initial_config(struct ata_host *host)
817 {
818 	struct ahci_host_priv *hpriv = host->private_data;
819 	void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
820 
821 	writel(hpriv->saved_cap, mmio + HOST_CAP);
822 	writel(hpriv->saved_port_map, mmio + HOST_PORTS_IMPL);
823 	(void) readl(mmio + HOST_PORTS_IMPL);	/* flush */
824 }
825 
826 static unsigned ahci_scr_offset(struct ata_port *ap, unsigned int sc_reg)
827 {
828 	static const int offset[] = {
829 		[SCR_STATUS]		= PORT_SCR_STAT,
830 		[SCR_CONTROL]		= PORT_SCR_CTL,
831 		[SCR_ERROR]		= PORT_SCR_ERR,
832 		[SCR_ACTIVE]		= PORT_SCR_ACT,
833 		[SCR_NOTIFICATION]	= PORT_SCR_NTF,
834 	};
835 	struct ahci_host_priv *hpriv = ap->host->private_data;
836 
837 	if (sc_reg < ARRAY_SIZE(offset) &&
838 	    (sc_reg != SCR_NOTIFICATION || (hpriv->cap & HOST_CAP_SNTF)))
839 		return offset[sc_reg];
840 	return 0;
841 }
842 
843 static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
844 {
845 	void __iomem *port_mmio = ahci_port_base(link->ap);
846 	int offset = ahci_scr_offset(link->ap, sc_reg);
847 
848 	if (offset) {
849 		*val = readl(port_mmio + offset);
850 		return 0;
851 	}
852 	return -EINVAL;
853 }
854 
855 static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
856 {
857 	void __iomem *port_mmio = ahci_port_base(link->ap);
858 	int offset = ahci_scr_offset(link->ap, sc_reg);
859 
860 	if (offset) {
861 		writel(val, port_mmio + offset);
862 		return 0;
863 	}
864 	return -EINVAL;
865 }
866 
867 static void ahci_start_engine(struct ata_port *ap)
868 {
869 	void __iomem *port_mmio = ahci_port_base(ap);
870 	u32 tmp;
871 
872 	/* start DMA */
873 	tmp = readl(port_mmio + PORT_CMD);
874 	tmp |= PORT_CMD_START;
875 	writel(tmp, port_mmio + PORT_CMD);
876 	readl(port_mmio + PORT_CMD); /* flush */
877 }
878 
879 static int ahci_stop_engine(struct ata_port *ap)
880 {
881 	void __iomem *port_mmio = ahci_port_base(ap);
882 	u32 tmp;
883 
884 	tmp = readl(port_mmio + PORT_CMD);
885 
886 	/* check if the HBA is idle */
887 	if ((tmp & (PORT_CMD_START | PORT_CMD_LIST_ON)) == 0)
888 		return 0;
889 
890 	/* setting HBA to idle */
891 	tmp &= ~PORT_CMD_START;
892 	writel(tmp, port_mmio + PORT_CMD);
893 
894 	/* wait for engine to stop. This could be as long as 500 msec */
895 	tmp = ata_wait_register(port_mmio + PORT_CMD,
896 				PORT_CMD_LIST_ON, PORT_CMD_LIST_ON, 1, 500);
897 	if (tmp & PORT_CMD_LIST_ON)
898 		return -EIO;
899 
900 	return 0;
901 }
902 
903 static void ahci_start_fis_rx(struct ata_port *ap)
904 {
905 	void __iomem *port_mmio = ahci_port_base(ap);
906 	struct ahci_host_priv *hpriv = ap->host->private_data;
907 	struct ahci_port_priv *pp = ap->private_data;
908 	u32 tmp;
909 
910 	/* set FIS registers */
911 	if (hpriv->cap & HOST_CAP_64)
912 		writel((pp->cmd_slot_dma >> 16) >> 16,
913 		       port_mmio + PORT_LST_ADDR_HI);
914 	writel(pp->cmd_slot_dma & 0xffffffff, port_mmio + PORT_LST_ADDR);
915 
916 	if (hpriv->cap & HOST_CAP_64)
917 		writel((pp->rx_fis_dma >> 16) >> 16,
918 		       port_mmio + PORT_FIS_ADDR_HI);
919 	writel(pp->rx_fis_dma & 0xffffffff, port_mmio + PORT_FIS_ADDR);
920 
921 	/* enable FIS reception */
922 	tmp = readl(port_mmio + PORT_CMD);
923 	tmp |= PORT_CMD_FIS_RX;
924 	writel(tmp, port_mmio + PORT_CMD);
925 
926 	/* flush */
927 	readl(port_mmio + PORT_CMD);
928 }
929 
930 static int ahci_stop_fis_rx(struct ata_port *ap)
931 {
932 	void __iomem *port_mmio = ahci_port_base(ap);
933 	u32 tmp;
934 
935 	/* disable FIS reception */
936 	tmp = readl(port_mmio + PORT_CMD);
937 	tmp &= ~PORT_CMD_FIS_RX;
938 	writel(tmp, port_mmio + PORT_CMD);
939 
940 	/* wait for completion, spec says 500ms, give it 1000 */
941 	tmp = ata_wait_register(port_mmio + PORT_CMD, PORT_CMD_FIS_ON,
942 				PORT_CMD_FIS_ON, 10, 1000);
943 	if (tmp & PORT_CMD_FIS_ON)
944 		return -EBUSY;
945 
946 	return 0;
947 }
948 
949 static void ahci_power_up(struct ata_port *ap)
950 {
951 	struct ahci_host_priv *hpriv = ap->host->private_data;
952 	void __iomem *port_mmio = ahci_port_base(ap);
953 	u32 cmd;
954 
955 	cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
956 
957 	/* spin up device */
958 	if (hpriv->cap & HOST_CAP_SSS) {
959 		cmd |= PORT_CMD_SPIN_UP;
960 		writel(cmd, port_mmio + PORT_CMD);
961 	}
962 
963 	/* wake up link */
964 	writel(cmd | PORT_CMD_ICC_ACTIVE, port_mmio + PORT_CMD);
965 }
966 
967 static void ahci_disable_alpm(struct ata_port *ap)
968 {
969 	struct ahci_host_priv *hpriv = ap->host->private_data;
970 	void __iomem *port_mmio = ahci_port_base(ap);
971 	u32 cmd;
972 	struct ahci_port_priv *pp = ap->private_data;
973 
974 	/* IPM bits should be disabled by libata-core */
975 	/* get the existing command bits */
976 	cmd = readl(port_mmio + PORT_CMD);
977 
978 	/* disable ALPM and ASP */
979 	cmd &= ~PORT_CMD_ASP;
980 	cmd &= ~PORT_CMD_ALPE;
981 
982 	/* force the interface back to active */
983 	cmd |= PORT_CMD_ICC_ACTIVE;
984 
985 	/* write out new cmd value */
986 	writel(cmd, port_mmio + PORT_CMD);
987 	cmd = readl(port_mmio + PORT_CMD);
988 
989 	/* wait 10ms to be sure we've come out of any low power state */
990 	msleep(10);
991 
992 	/* clear out any PhyRdy stuff from interrupt status */
993 	writel(PORT_IRQ_PHYRDY, port_mmio + PORT_IRQ_STAT);
994 
995 	/* go ahead and clean out PhyRdy Change from Serror too */
996 	ahci_scr_write(&ap->link, SCR_ERROR, ((1 << 16) | (1 << 18)));
997 
998 	/*
999  	 * Clear flag to indicate that we should ignore all PhyRdy
1000  	 * state changes
1001  	 */
1002 	hpriv->flags &= ~AHCI_HFLAG_NO_HOTPLUG;
1003 
1004 	/*
1005  	 * Enable interrupts on Phy Ready.
1006  	 */
1007 	pp->intr_mask |= PORT_IRQ_PHYRDY;
1008 	writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
1009 
1010 	/*
1011  	 * don't change the link pm policy - we can be called
1012  	 * just to turn of link pm temporarily
1013  	 */
1014 }
1015 
1016 static int ahci_enable_alpm(struct ata_port *ap,
1017 	enum link_pm policy)
1018 {
1019 	struct ahci_host_priv *hpriv = ap->host->private_data;
1020 	void __iomem *port_mmio = ahci_port_base(ap);
1021 	u32 cmd;
1022 	struct ahci_port_priv *pp = ap->private_data;
1023 	u32 asp;
1024 
1025 	/* Make sure the host is capable of link power management */
1026 	if (!(hpriv->cap & HOST_CAP_ALPM))
1027 		return -EINVAL;
1028 
1029 	switch (policy) {
1030 	case MAX_PERFORMANCE:
1031 	case NOT_AVAILABLE:
1032 		/*
1033  		 * if we came here with NOT_AVAILABLE,
1034  		 * it just means this is the first time we
1035  		 * have tried to enable - default to max performance,
1036  		 * and let the user go to lower power modes on request.
1037  		 */
1038 		ahci_disable_alpm(ap);
1039 		return 0;
1040 	case MIN_POWER:
1041 		/* configure HBA to enter SLUMBER */
1042 		asp = PORT_CMD_ASP;
1043 		break;
1044 	case MEDIUM_POWER:
1045 		/* configure HBA to enter PARTIAL */
1046 		asp = 0;
1047 		break;
1048 	default:
1049 		return -EINVAL;
1050 	}
1051 
1052 	/*
1053  	 * Disable interrupts on Phy Ready. This keeps us from
1054  	 * getting woken up due to spurious phy ready interrupts
1055 	 * TBD - Hot plug should be done via polling now, is
1056 	 * that even supported?
1057  	 */
1058 	pp->intr_mask &= ~PORT_IRQ_PHYRDY;
1059 	writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
1060 
1061 	/*
1062  	 * Set a flag to indicate that we should ignore all PhyRdy
1063  	 * state changes since these can happen now whenever we
1064  	 * change link state
1065  	 */
1066 	hpriv->flags |= AHCI_HFLAG_NO_HOTPLUG;
1067 
1068 	/* get the existing command bits */
1069 	cmd = readl(port_mmio + PORT_CMD);
1070 
1071 	/*
1072  	 * Set ASP based on Policy
1073  	 */
1074 	cmd |= asp;
1075 
1076 	/*
1077  	 * Setting this bit will instruct the HBA to aggressively
1078  	 * enter a lower power link state when it's appropriate and
1079  	 * based on the value set above for ASP
1080  	 */
1081 	cmd |= PORT_CMD_ALPE;
1082 
1083 	/* write out new cmd value */
1084 	writel(cmd, port_mmio + PORT_CMD);
1085 	cmd = readl(port_mmio + PORT_CMD);
1086 
1087 	/* IPM bits should be set by libata-core */
1088 	return 0;
1089 }
1090 
1091 #ifdef CONFIG_PM
1092 static void ahci_power_down(struct ata_port *ap)
1093 {
1094 	struct ahci_host_priv *hpriv = ap->host->private_data;
1095 	void __iomem *port_mmio = ahci_port_base(ap);
1096 	u32 cmd, scontrol;
1097 
1098 	if (!(hpriv->cap & HOST_CAP_SSS))
1099 		return;
1100 
1101 	/* put device into listen mode, first set PxSCTL.DET to 0 */
1102 	scontrol = readl(port_mmio + PORT_SCR_CTL);
1103 	scontrol &= ~0xf;
1104 	writel(scontrol, port_mmio + PORT_SCR_CTL);
1105 
1106 	/* then set PxCMD.SUD to 0 */
1107 	cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
1108 	cmd &= ~PORT_CMD_SPIN_UP;
1109 	writel(cmd, port_mmio + PORT_CMD);
1110 }
1111 #endif
1112 
1113 static void ahci_start_port(struct ata_port *ap)
1114 {
1115 	struct ahci_port_priv *pp = ap->private_data;
1116 	struct ata_link *link;
1117 	struct ahci_em_priv *emp;
1118 
1119 	/* enable FIS reception */
1120 	ahci_start_fis_rx(ap);
1121 
1122 	/* enable DMA */
1123 	ahci_start_engine(ap);
1124 
1125 	/* turn on LEDs */
1126 	if (ap->flags & ATA_FLAG_EM) {
1127 		ata_for_each_link(link, ap, EDGE) {
1128 			emp = &pp->em_priv[link->pmp];
1129 			ahci_transmit_led_message(ap, emp->led_state, 4);
1130 		}
1131 	}
1132 
1133 	if (ap->flags & ATA_FLAG_SW_ACTIVITY)
1134 		ata_for_each_link(link, ap, EDGE)
1135 			ahci_init_sw_activity(link);
1136 
1137 }
1138 
1139 static int ahci_deinit_port(struct ata_port *ap, const char **emsg)
1140 {
1141 	int rc;
1142 
1143 	/* disable DMA */
1144 	rc = ahci_stop_engine(ap);
1145 	if (rc) {
1146 		*emsg = "failed to stop engine";
1147 		return rc;
1148 	}
1149 
1150 	/* disable FIS reception */
1151 	rc = ahci_stop_fis_rx(ap);
1152 	if (rc) {
1153 		*emsg = "failed stop FIS RX";
1154 		return rc;
1155 	}
1156 
1157 	return 0;
1158 }
1159 
1160 static int ahci_reset_controller(struct ata_host *host)
1161 {
1162 	struct pci_dev *pdev = to_pci_dev(host->dev);
1163 	struct ahci_host_priv *hpriv = host->private_data;
1164 	void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
1165 	u32 tmp;
1166 
1167 	/* we must be in AHCI mode, before using anything
1168 	 * AHCI-specific, such as HOST_RESET.
1169 	 */
1170 	ahci_enable_ahci(mmio);
1171 
1172 	/* global controller reset */
1173 	if (!ahci_skip_host_reset) {
1174 		tmp = readl(mmio + HOST_CTL);
1175 		if ((tmp & HOST_RESET) == 0) {
1176 			writel(tmp | HOST_RESET, mmio + HOST_CTL);
1177 			readl(mmio + HOST_CTL); /* flush */
1178 		}
1179 
1180 		/*
1181 		 * to perform host reset, OS should set HOST_RESET
1182 		 * and poll until this bit is read to be "0".
1183 		 * reset must complete within 1 second, or
1184 		 * the hardware should be considered fried.
1185 		 */
1186 		tmp = ata_wait_register(mmio + HOST_CTL, HOST_RESET,
1187 					HOST_RESET, 10, 1000);
1188 
1189 		if (tmp & HOST_RESET) {
1190 			dev_printk(KERN_ERR, host->dev,
1191 				   "controller reset failed (0x%x)\n", tmp);
1192 			return -EIO;
1193 		}
1194 
1195 		/* turn on AHCI mode */
1196 		ahci_enable_ahci(mmio);
1197 
1198 		/* Some registers might be cleared on reset.  Restore
1199 		 * initial values.
1200 		 */
1201 		ahci_restore_initial_config(host);
1202 	} else
1203 		dev_printk(KERN_INFO, host->dev,
1204 			   "skipping global host reset\n");
1205 
1206 	if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
1207 		u16 tmp16;
1208 
1209 		/* configure PCS */
1210 		pci_read_config_word(pdev, 0x92, &tmp16);
1211 		if ((tmp16 & hpriv->port_map) != hpriv->port_map) {
1212 			tmp16 |= hpriv->port_map;
1213 			pci_write_config_word(pdev, 0x92, tmp16);
1214 		}
1215 	}
1216 
1217 	return 0;
1218 }
1219 
1220 static void ahci_sw_activity(struct ata_link *link)
1221 {
1222 	struct ata_port *ap = link->ap;
1223 	struct ahci_port_priv *pp = ap->private_data;
1224 	struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1225 
1226 	if (!(link->flags & ATA_LFLAG_SW_ACTIVITY))
1227 		return;
1228 
1229 	emp->activity++;
1230 	if (!timer_pending(&emp->timer))
1231 		mod_timer(&emp->timer, jiffies + msecs_to_jiffies(10));
1232 }
1233 
1234 static void ahci_sw_activity_blink(unsigned long arg)
1235 {
1236 	struct ata_link *link = (struct ata_link *)arg;
1237 	struct ata_port *ap = link->ap;
1238 	struct ahci_port_priv *pp = ap->private_data;
1239 	struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1240 	unsigned long led_message = emp->led_state;
1241 	u32 activity_led_state;
1242 	unsigned long flags;
1243 
1244 	led_message &= EM_MSG_LED_VALUE;
1245 	led_message |= ap->port_no | (link->pmp << 8);
1246 
1247 	/* check to see if we've had activity.  If so,
1248 	 * toggle state of LED and reset timer.  If not,
1249 	 * turn LED to desired idle state.
1250 	 */
1251 	spin_lock_irqsave(ap->lock, flags);
1252 	if (emp->saved_activity != emp->activity) {
1253 		emp->saved_activity = emp->activity;
1254 		/* get the current LED state */
1255 		activity_led_state = led_message & EM_MSG_LED_VALUE_ON;
1256 
1257 		if (activity_led_state)
1258 			activity_led_state = 0;
1259 		else
1260 			activity_led_state = 1;
1261 
1262 		/* clear old state */
1263 		led_message &= ~EM_MSG_LED_VALUE_ACTIVITY;
1264 
1265 		/* toggle state */
1266 		led_message |= (activity_led_state << 16);
1267 		mod_timer(&emp->timer, jiffies + msecs_to_jiffies(100));
1268 	} else {
1269 		/* switch to idle */
1270 		led_message &= ~EM_MSG_LED_VALUE_ACTIVITY;
1271 		if (emp->blink_policy == BLINK_OFF)
1272 			led_message |= (1 << 16);
1273 	}
1274 	spin_unlock_irqrestore(ap->lock, flags);
1275 	ahci_transmit_led_message(ap, led_message, 4);
1276 }
1277 
1278 static void ahci_init_sw_activity(struct ata_link *link)
1279 {
1280 	struct ata_port *ap = link->ap;
1281 	struct ahci_port_priv *pp = ap->private_data;
1282 	struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1283 
1284 	/* init activity stats, setup timer */
1285 	emp->saved_activity = emp->activity = 0;
1286 	setup_timer(&emp->timer, ahci_sw_activity_blink, (unsigned long)link);
1287 
1288 	/* check our blink policy and set flag for link if it's enabled */
1289 	if (emp->blink_policy)
1290 		link->flags |= ATA_LFLAG_SW_ACTIVITY;
1291 }
1292 
1293 static int ahci_reset_em(struct ata_host *host)
1294 {
1295 	void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
1296 	u32 em_ctl;
1297 
1298 	em_ctl = readl(mmio + HOST_EM_CTL);
1299 	if ((em_ctl & EM_CTL_TM) || (em_ctl & EM_CTL_RST))
1300 		return -EINVAL;
1301 
1302 	writel(em_ctl | EM_CTL_RST, mmio + HOST_EM_CTL);
1303 	return 0;
1304 }
1305 
1306 static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
1307 					ssize_t size)
1308 {
1309 	struct ahci_host_priv *hpriv = ap->host->private_data;
1310 	struct ahci_port_priv *pp = ap->private_data;
1311 	void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
1312 	u32 em_ctl;
1313 	u32 message[] = {0, 0};
1314 	unsigned long flags;
1315 	int pmp;
1316 	struct ahci_em_priv *emp;
1317 
1318 	/* get the slot number from the message */
1319 	pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
1320 	if (pmp < MAX_SLOTS)
1321 		emp = &pp->em_priv[pmp];
1322 	else
1323 		return -EINVAL;
1324 
1325 	spin_lock_irqsave(ap->lock, flags);
1326 
1327 	/*
1328 	 * if we are still busy transmitting a previous message,
1329 	 * do not allow
1330 	 */
1331 	em_ctl = readl(mmio + HOST_EM_CTL);
1332 	if (em_ctl & EM_CTL_TM) {
1333 		spin_unlock_irqrestore(ap->lock, flags);
1334 		return -EINVAL;
1335 	}
1336 
1337 	/*
1338 	 * create message header - this is all zero except for
1339 	 * the message size, which is 4 bytes.
1340 	 */
1341 	message[0] |= (4 << 8);
1342 
1343 	/* ignore 0:4 of byte zero, fill in port info yourself */
1344 	message[1] = ((state & ~EM_MSG_LED_HBA_PORT) | ap->port_no);
1345 
1346 	/* write message to EM_LOC */
1347 	writel(message[0], mmio + hpriv->em_loc);
1348 	writel(message[1], mmio + hpriv->em_loc+4);
1349 
1350 	/* save off new led state for port/slot */
1351 	emp->led_state = state;
1352 
1353 	/*
1354 	 * tell hardware to transmit the message
1355 	 */
1356 	writel(em_ctl | EM_CTL_TM, mmio + HOST_EM_CTL);
1357 
1358 	spin_unlock_irqrestore(ap->lock, flags);
1359 	return size;
1360 }
1361 
1362 static ssize_t ahci_led_show(struct ata_port *ap, char *buf)
1363 {
1364 	struct ahci_port_priv *pp = ap->private_data;
1365 	struct ata_link *link;
1366 	struct ahci_em_priv *emp;
1367 	int rc = 0;
1368 
1369 	ata_for_each_link(link, ap, EDGE) {
1370 		emp = &pp->em_priv[link->pmp];
1371 		rc += sprintf(buf, "%lx\n", emp->led_state);
1372 	}
1373 	return rc;
1374 }
1375 
1376 static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
1377 				size_t size)
1378 {
1379 	int state;
1380 	int pmp;
1381 	struct ahci_port_priv *pp = ap->private_data;
1382 	struct ahci_em_priv *emp;
1383 
1384 	state = simple_strtoul(buf, NULL, 0);
1385 
1386 	/* get the slot number from the message */
1387 	pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
1388 	if (pmp < MAX_SLOTS)
1389 		emp = &pp->em_priv[pmp];
1390 	else
1391 		return -EINVAL;
1392 
1393 	/* mask off the activity bits if we are in sw_activity
1394 	 * mode, user should turn off sw_activity before setting
1395 	 * activity led through em_message
1396 	 */
1397 	if (emp->blink_policy)
1398 		state &= ~EM_MSG_LED_VALUE_ACTIVITY;
1399 
1400 	return ahci_transmit_led_message(ap, state, size);
1401 }
1402 
1403 static ssize_t ahci_activity_store(struct ata_device *dev, enum sw_activity val)
1404 {
1405 	struct ata_link *link = dev->link;
1406 	struct ata_port *ap = link->ap;
1407 	struct ahci_port_priv *pp = ap->private_data;
1408 	struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1409 	u32 port_led_state = emp->led_state;
1410 
1411 	/* save the desired Activity LED behavior */
1412 	if (val == OFF) {
1413 		/* clear LFLAG */
1414 		link->flags &= ~(ATA_LFLAG_SW_ACTIVITY);
1415 
1416 		/* set the LED to OFF */
1417 		port_led_state &= EM_MSG_LED_VALUE_OFF;
1418 		port_led_state |= (ap->port_no | (link->pmp << 8));
1419 		ahci_transmit_led_message(ap, port_led_state, 4);
1420 	} else {
1421 		link->flags |= ATA_LFLAG_SW_ACTIVITY;
1422 		if (val == BLINK_OFF) {
1423 			/* set LED to ON for idle */
1424 			port_led_state &= EM_MSG_LED_VALUE_OFF;
1425 			port_led_state |= (ap->port_no | (link->pmp << 8));
1426 			port_led_state |= EM_MSG_LED_VALUE_ON; /* check this */
1427 			ahci_transmit_led_message(ap, port_led_state, 4);
1428 		}
1429 	}
1430 	emp->blink_policy = val;
1431 	return 0;
1432 }
1433 
1434 static ssize_t ahci_activity_show(struct ata_device *dev, char *buf)
1435 {
1436 	struct ata_link *link = dev->link;
1437 	struct ata_port *ap = link->ap;
1438 	struct ahci_port_priv *pp = ap->private_data;
1439 	struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1440 
1441 	/* display the saved value of activity behavior for this
1442 	 * disk.
1443 	 */
1444 	return sprintf(buf, "%d\n", emp->blink_policy);
1445 }
1446 
1447 static void ahci_port_init(struct pci_dev *pdev, struct ata_port *ap,
1448 			   int port_no, void __iomem *mmio,
1449 			   void __iomem *port_mmio)
1450 {
1451 	const char *emsg = NULL;
1452 	int rc;
1453 	u32 tmp;
1454 
1455 	/* make sure port is not active */
1456 	rc = ahci_deinit_port(ap, &emsg);
1457 	if (rc)
1458 		dev_printk(KERN_WARNING, &pdev->dev,
1459 			   "%s (%d)\n", emsg, rc);
1460 
1461 	/* clear SError */
1462 	tmp = readl(port_mmio + PORT_SCR_ERR);
1463 	VPRINTK("PORT_SCR_ERR 0x%x\n", tmp);
1464 	writel(tmp, port_mmio + PORT_SCR_ERR);
1465 
1466 	/* clear port IRQ */
1467 	tmp = readl(port_mmio + PORT_IRQ_STAT);
1468 	VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
1469 	if (tmp)
1470 		writel(tmp, port_mmio + PORT_IRQ_STAT);
1471 
1472 	writel(1 << port_no, mmio + HOST_IRQ_STAT);
1473 }
1474 
1475 static void ahci_init_controller(struct ata_host *host)
1476 {
1477 	struct ahci_host_priv *hpriv = host->private_data;
1478 	struct pci_dev *pdev = to_pci_dev(host->dev);
1479 	void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
1480 	int i;
1481 	void __iomem *port_mmio;
1482 	u32 tmp;
1483 	int mv;
1484 
1485 	if (hpriv->flags & AHCI_HFLAG_MV_PATA) {
1486 		if (pdev->device == 0x6121)
1487 			mv = 2;
1488 		else
1489 			mv = 4;
1490 		port_mmio = __ahci_port_base(host, mv);
1491 
1492 		writel(0, port_mmio + PORT_IRQ_MASK);
1493 
1494 		/* clear port IRQ */
1495 		tmp = readl(port_mmio + PORT_IRQ_STAT);
1496 		VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
1497 		if (tmp)
1498 			writel(tmp, port_mmio + PORT_IRQ_STAT);
1499 	}
1500 
1501 	for (i = 0; i < host->n_ports; i++) {
1502 		struct ata_port *ap = host->ports[i];
1503 
1504 		port_mmio = ahci_port_base(ap);
1505 		if (ata_port_is_dummy(ap))
1506 			continue;
1507 
1508 		ahci_port_init(pdev, ap, i, mmio, port_mmio);
1509 	}
1510 
1511 	tmp = readl(mmio + HOST_CTL);
1512 	VPRINTK("HOST_CTL 0x%x\n", tmp);
1513 	writel(tmp | HOST_IRQ_EN, mmio + HOST_CTL);
1514 	tmp = readl(mmio + HOST_CTL);
1515 	VPRINTK("HOST_CTL 0x%x\n", tmp);
1516 }
1517 
1518 static void ahci_dev_config(struct ata_device *dev)
1519 {
1520 	struct ahci_host_priv *hpriv = dev->link->ap->host->private_data;
1521 
1522 	if (hpriv->flags & AHCI_HFLAG_SECT255) {
1523 		dev->max_sectors = 255;
1524 		ata_dev_printk(dev, KERN_INFO,
1525 			       "SB600 AHCI: limiting to 255 sectors per cmd\n");
1526 	}
1527 }
1528 
1529 static unsigned int ahci_dev_classify(struct ata_port *ap)
1530 {
1531 	void __iomem *port_mmio = ahci_port_base(ap);
1532 	struct ata_taskfile tf;
1533 	u32 tmp;
1534 
1535 	tmp = readl(port_mmio + PORT_SIG);
1536 	tf.lbah		= (tmp >> 24)	& 0xff;
1537 	tf.lbam		= (tmp >> 16)	& 0xff;
1538 	tf.lbal		= (tmp >> 8)	& 0xff;
1539 	tf.nsect	= (tmp)		& 0xff;
1540 
1541 	return ata_dev_classify(&tf);
1542 }
1543 
1544 static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
1545 			       u32 opts)
1546 {
1547 	dma_addr_t cmd_tbl_dma;
1548 
1549 	cmd_tbl_dma = pp->cmd_tbl_dma + tag * AHCI_CMD_TBL_SZ;
1550 
1551 	pp->cmd_slot[tag].opts = cpu_to_le32(opts);
1552 	pp->cmd_slot[tag].status = 0;
1553 	pp->cmd_slot[tag].tbl_addr = cpu_to_le32(cmd_tbl_dma & 0xffffffff);
1554 	pp->cmd_slot[tag].tbl_addr_hi = cpu_to_le32((cmd_tbl_dma >> 16) >> 16);
1555 }
1556 
1557 static int ahci_kick_engine(struct ata_port *ap, int force_restart)
1558 {
1559 	void __iomem *port_mmio = ahci_port_base(ap);
1560 	struct ahci_host_priv *hpriv = ap->host->private_data;
1561 	u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
1562 	u32 tmp;
1563 	int busy, rc;
1564 
1565 	/* do we need to kick the port? */
1566 	busy = status & (ATA_BUSY | ATA_DRQ);
1567 	if (!busy && !force_restart)
1568 		return 0;
1569 
1570 	/* stop engine */
1571 	rc = ahci_stop_engine(ap);
1572 	if (rc)
1573 		goto out_restart;
1574 
1575 	/* need to do CLO? */
1576 	if (!busy) {
1577 		rc = 0;
1578 		goto out_restart;
1579 	}
1580 
1581 	if (!(hpriv->cap & HOST_CAP_CLO)) {
1582 		rc = -EOPNOTSUPP;
1583 		goto out_restart;
1584 	}
1585 
1586 	/* perform CLO */
1587 	tmp = readl(port_mmio + PORT_CMD);
1588 	tmp |= PORT_CMD_CLO;
1589 	writel(tmp, port_mmio + PORT_CMD);
1590 
1591 	rc = 0;
1592 	tmp = ata_wait_register(port_mmio + PORT_CMD,
1593 				PORT_CMD_CLO, PORT_CMD_CLO, 1, 500);
1594 	if (tmp & PORT_CMD_CLO)
1595 		rc = -EIO;
1596 
1597 	/* restart engine */
1598  out_restart:
1599 	ahci_start_engine(ap);
1600 	return rc;
1601 }
1602 
1603 static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
1604 				struct ata_taskfile *tf, int is_cmd, u16 flags,
1605 				unsigned long timeout_msec)
1606 {
1607 	const u32 cmd_fis_len = 5; /* five dwords */
1608 	struct ahci_port_priv *pp = ap->private_data;
1609 	void __iomem *port_mmio = ahci_port_base(ap);
1610 	u8 *fis = pp->cmd_tbl;
1611 	u32 tmp;
1612 
1613 	/* prep the command */
1614 	ata_tf_to_fis(tf, pmp, is_cmd, fis);
1615 	ahci_fill_cmd_slot(pp, 0, cmd_fis_len | flags | (pmp << 12));
1616 
1617 	/* issue & wait */
1618 	writel(1, port_mmio + PORT_CMD_ISSUE);
1619 
1620 	if (timeout_msec) {
1621 		tmp = ata_wait_register(port_mmio + PORT_CMD_ISSUE, 0x1, 0x1,
1622 					1, timeout_msec);
1623 		if (tmp & 0x1) {
1624 			ahci_kick_engine(ap, 1);
1625 			return -EBUSY;
1626 		}
1627 	} else
1628 		readl(port_mmio + PORT_CMD_ISSUE);	/* flush */
1629 
1630 	return 0;
1631 }
1632 
1633 static int ahci_do_softreset(struct ata_link *link, unsigned int *class,
1634 			     int pmp, unsigned long deadline,
1635 			     int (*check_ready)(struct ata_link *link))
1636 {
1637 	struct ata_port *ap = link->ap;
1638 	const char *reason = NULL;
1639 	unsigned long now, msecs;
1640 	struct ata_taskfile tf;
1641 	int rc;
1642 
1643 	DPRINTK("ENTER\n");
1644 
1645 	/* prepare for SRST (AHCI-1.1 10.4.1) */
1646 	rc = ahci_kick_engine(ap, 1);
1647 	if (rc && rc != -EOPNOTSUPP)
1648 		ata_link_printk(link, KERN_WARNING,
1649 				"failed to reset engine (errno=%d)\n", rc);
1650 
1651 	ata_tf_init(link->device, &tf);
1652 
1653 	/* issue the first D2H Register FIS */
1654 	msecs = 0;
1655 	now = jiffies;
1656 	if (time_after(now, deadline))
1657 		msecs = jiffies_to_msecs(deadline - now);
1658 
1659 	tf.ctl |= ATA_SRST;
1660 	if (ahci_exec_polled_cmd(ap, pmp, &tf, 0,
1661 				 AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY, msecs)) {
1662 		rc = -EIO;
1663 		reason = "1st FIS failed";
1664 		goto fail;
1665 	}
1666 
1667 	/* spec says at least 5us, but be generous and sleep for 1ms */
1668 	msleep(1);
1669 
1670 	/* issue the second D2H Register FIS */
1671 	tf.ctl &= ~ATA_SRST;
1672 	ahci_exec_polled_cmd(ap, pmp, &tf, 0, 0, 0);
1673 
1674 	/* wait for link to become ready */
1675 	rc = ata_wait_after_reset(link, deadline, check_ready);
1676 	/* link occupied, -ENODEV too is an error */
1677 	if (rc) {
1678 		reason = "device not ready";
1679 		goto fail;
1680 	}
1681 	*class = ahci_dev_classify(ap);
1682 
1683 	DPRINTK("EXIT, class=%u\n", *class);
1684 	return 0;
1685 
1686  fail:
1687 	ata_link_printk(link, KERN_ERR, "softreset failed (%s)\n", reason);
1688 	return rc;
1689 }
1690 
1691 static int ahci_check_ready(struct ata_link *link)
1692 {
1693 	void __iomem *port_mmio = ahci_port_base(link->ap);
1694 	u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
1695 
1696 	return ata_check_ready(status);
1697 }
1698 
1699 static int ahci_softreset(struct ata_link *link, unsigned int *class,
1700 			  unsigned long deadline)
1701 {
1702 	int pmp = sata_srst_pmp(link);
1703 
1704 	DPRINTK("ENTER\n");
1705 
1706 	return ahci_do_softreset(link, class, pmp, deadline, ahci_check_ready);
1707 }
1708 
1709 static int ahci_sb600_check_ready(struct ata_link *link)
1710 {
1711 	void __iomem *port_mmio = ahci_port_base(link->ap);
1712 	u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
1713 	u32 irq_status = readl(port_mmio + PORT_IRQ_STAT);
1714 
1715 	/*
1716 	 * There is no need to check TFDATA if BAD PMP is found due to HW bug,
1717 	 * which can save timeout delay.
1718 	 */
1719 	if (irq_status & PORT_IRQ_BAD_PMP)
1720 		return -EIO;
1721 
1722 	return ata_check_ready(status);
1723 }
1724 
1725 static int ahci_sb600_softreset(struct ata_link *link, unsigned int *class,
1726 				unsigned long deadline)
1727 {
1728 	struct ata_port *ap = link->ap;
1729 	void __iomem *port_mmio = ahci_port_base(ap);
1730 	int pmp = sata_srst_pmp(link);
1731 	int rc;
1732 	u32 irq_sts;
1733 
1734 	DPRINTK("ENTER\n");
1735 
1736 	rc = ahci_do_softreset(link, class, pmp, deadline,
1737 			       ahci_sb600_check_ready);
1738 
1739 	/*
1740 	 * Soft reset fails on some ATI chips with IPMS set when PMP
1741 	 * is enabled but SATA HDD/ODD is connected to SATA port,
1742 	 * do soft reset again to port 0.
1743 	 */
1744 	if (rc == -EIO) {
1745 		irq_sts = readl(port_mmio + PORT_IRQ_STAT);
1746 		if (irq_sts & PORT_IRQ_BAD_PMP) {
1747 			ata_link_printk(link, KERN_WARNING,
1748 					"failed due to HW bug, retry pmp=0\n");
1749 			rc = ahci_do_softreset(link, class, 0, deadline,
1750 					       ahci_check_ready);
1751 		}
1752 	}
1753 
1754 	return rc;
1755 }
1756 
1757 static int ahci_hardreset(struct ata_link *link, unsigned int *class,
1758 			  unsigned long deadline)
1759 {
1760 	const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
1761 	struct ata_port *ap = link->ap;
1762 	struct ahci_port_priv *pp = ap->private_data;
1763 	u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
1764 	struct ata_taskfile tf;
1765 	bool online;
1766 	int rc;
1767 
1768 	DPRINTK("ENTER\n");
1769 
1770 	ahci_stop_engine(ap);
1771 
1772 	/* clear D2H reception area to properly wait for D2H FIS */
1773 	ata_tf_init(link->device, &tf);
1774 	tf.command = 0x80;
1775 	ata_tf_to_fis(&tf, 0, 0, d2h_fis);
1776 
1777 	rc = sata_link_hardreset(link, timing, deadline, &online,
1778 				 ahci_check_ready);
1779 
1780 	ahci_start_engine(ap);
1781 
1782 	if (online)
1783 		*class = ahci_dev_classify(ap);
1784 
1785 	DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
1786 	return rc;
1787 }
1788 
1789 static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
1790 				 unsigned long deadline)
1791 {
1792 	struct ata_port *ap = link->ap;
1793 	bool online;
1794 	int rc;
1795 
1796 	DPRINTK("ENTER\n");
1797 
1798 	ahci_stop_engine(ap);
1799 
1800 	rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context),
1801 				 deadline, &online, NULL);
1802 
1803 	ahci_start_engine(ap);
1804 
1805 	DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
1806 
1807 	/* vt8251 doesn't clear BSY on signature FIS reception,
1808 	 * request follow-up softreset.
1809 	 */
1810 	return online ? -EAGAIN : rc;
1811 }
1812 
1813 static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
1814 				unsigned long deadline)
1815 {
1816 	struct ata_port *ap = link->ap;
1817 	struct ahci_port_priv *pp = ap->private_data;
1818 	u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
1819 	struct ata_taskfile tf;
1820 	bool online;
1821 	int rc;
1822 
1823 	ahci_stop_engine(ap);
1824 
1825 	/* clear D2H reception area to properly wait for D2H FIS */
1826 	ata_tf_init(link->device, &tf);
1827 	tf.command = 0x80;
1828 	ata_tf_to_fis(&tf, 0, 0, d2h_fis);
1829 
1830 	rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context),
1831 				 deadline, &online, NULL);
1832 
1833 	ahci_start_engine(ap);
1834 
1835 	/* The pseudo configuration device on SIMG4726 attached to
1836 	 * ASUS P5W-DH Deluxe doesn't send signature FIS after
1837 	 * hardreset if no device is attached to the first downstream
1838 	 * port && the pseudo device locks up on SRST w/ PMP==0.  To
1839 	 * work around this, wait for !BSY only briefly.  If BSY isn't
1840 	 * cleared, perform CLO and proceed to IDENTIFY (achieved by
1841 	 * ATA_LFLAG_NO_SRST and ATA_LFLAG_ASSUME_ATA).
1842 	 *
1843 	 * Wait for two seconds.  Devices attached to downstream port
1844 	 * which can't process the following IDENTIFY after this will
1845 	 * have to be reset again.  For most cases, this should
1846 	 * suffice while making probing snappish enough.
1847 	 */
1848 	if (online) {
1849 		rc = ata_wait_after_reset(link, jiffies + 2 * HZ,
1850 					  ahci_check_ready);
1851 		if (rc)
1852 			ahci_kick_engine(ap, 0);
1853 	}
1854 	return rc;
1855 }
1856 
1857 static void ahci_postreset(struct ata_link *link, unsigned int *class)
1858 {
1859 	struct ata_port *ap = link->ap;
1860 	void __iomem *port_mmio = ahci_port_base(ap);
1861 	u32 new_tmp, tmp;
1862 
1863 	ata_std_postreset(link, class);
1864 
1865 	/* Make sure port's ATAPI bit is set appropriately */
1866 	new_tmp = tmp = readl(port_mmio + PORT_CMD);
1867 	if (*class == ATA_DEV_ATAPI)
1868 		new_tmp |= PORT_CMD_ATAPI;
1869 	else
1870 		new_tmp &= ~PORT_CMD_ATAPI;
1871 	if (new_tmp != tmp) {
1872 		writel(new_tmp, port_mmio + PORT_CMD);
1873 		readl(port_mmio + PORT_CMD); /* flush */
1874 	}
1875 }
1876 
1877 static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl)
1878 {
1879 	struct scatterlist *sg;
1880 	struct ahci_sg *ahci_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ;
1881 	unsigned int si;
1882 
1883 	VPRINTK("ENTER\n");
1884 
1885 	/*
1886 	 * Next, the S/G list.
1887 	 */
1888 	for_each_sg(qc->sg, sg, qc->n_elem, si) {
1889 		dma_addr_t addr = sg_dma_address(sg);
1890 		u32 sg_len = sg_dma_len(sg);
1891 
1892 		ahci_sg[si].addr = cpu_to_le32(addr & 0xffffffff);
1893 		ahci_sg[si].addr_hi = cpu_to_le32((addr >> 16) >> 16);
1894 		ahci_sg[si].flags_size = cpu_to_le32(sg_len - 1);
1895 	}
1896 
1897 	return si;
1898 }
1899 
1900 static void ahci_qc_prep(struct ata_queued_cmd *qc)
1901 {
1902 	struct ata_port *ap = qc->ap;
1903 	struct ahci_port_priv *pp = ap->private_data;
1904 	int is_atapi = ata_is_atapi(qc->tf.protocol);
1905 	void *cmd_tbl;
1906 	u32 opts;
1907 	const u32 cmd_fis_len = 5; /* five dwords */
1908 	unsigned int n_elem;
1909 
1910 	/*
1911 	 * Fill in command table information.  First, the header,
1912 	 * a SATA Register - Host to Device command FIS.
1913 	 */
1914 	cmd_tbl = pp->cmd_tbl + qc->tag * AHCI_CMD_TBL_SZ;
1915 
1916 	ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, cmd_tbl);
1917 	if (is_atapi) {
1918 		memset(cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32);
1919 		memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len);
1920 	}
1921 
1922 	n_elem = 0;
1923 	if (qc->flags & ATA_QCFLAG_DMAMAP)
1924 		n_elem = ahci_fill_sg(qc, cmd_tbl);
1925 
1926 	/*
1927 	 * Fill in command slot information.
1928 	 */
1929 	opts = cmd_fis_len | n_elem << 16 | (qc->dev->link->pmp << 12);
1930 	if (qc->tf.flags & ATA_TFLAG_WRITE)
1931 		opts |= AHCI_CMD_WRITE;
1932 	if (is_atapi)
1933 		opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH;
1934 
1935 	ahci_fill_cmd_slot(pp, qc->tag, opts);
1936 }
1937 
1938 static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
1939 {
1940 	struct ahci_host_priv *hpriv = ap->host->private_data;
1941 	struct ahci_port_priv *pp = ap->private_data;
1942 	struct ata_eh_info *host_ehi = &ap->link.eh_info;
1943 	struct ata_link *link = NULL;
1944 	struct ata_queued_cmd *active_qc;
1945 	struct ata_eh_info *active_ehi;
1946 	u32 serror;
1947 
1948 	/* determine active link */
1949 	ata_for_each_link(link, ap, EDGE)
1950 		if (ata_link_active(link))
1951 			break;
1952 	if (!link)
1953 		link = &ap->link;
1954 
1955 	active_qc = ata_qc_from_tag(ap, link->active_tag);
1956 	active_ehi = &link->eh_info;
1957 
1958 	/* record irq stat */
1959 	ata_ehi_clear_desc(host_ehi);
1960 	ata_ehi_push_desc(host_ehi, "irq_stat 0x%08x", irq_stat);
1961 
1962 	/* AHCI needs SError cleared; otherwise, it might lock up */
1963 	ahci_scr_read(&ap->link, SCR_ERROR, &serror);
1964 	ahci_scr_write(&ap->link, SCR_ERROR, serror);
1965 	host_ehi->serror |= serror;
1966 
1967 	/* some controllers set IRQ_IF_ERR on device errors, ignore it */
1968 	if (hpriv->flags & AHCI_HFLAG_IGN_IRQ_IF_ERR)
1969 		irq_stat &= ~PORT_IRQ_IF_ERR;
1970 
1971 	if (irq_stat & PORT_IRQ_TF_ERR) {
1972 		/* If qc is active, charge it; otherwise, the active
1973 		 * link.  There's no active qc on NCQ errors.  It will
1974 		 * be determined by EH by reading log page 10h.
1975 		 */
1976 		if (active_qc)
1977 			active_qc->err_mask |= AC_ERR_DEV;
1978 		else
1979 			active_ehi->err_mask |= AC_ERR_DEV;
1980 
1981 		if (hpriv->flags & AHCI_HFLAG_IGN_SERR_INTERNAL)
1982 			host_ehi->serror &= ~SERR_INTERNAL;
1983 	}
1984 
1985 	if (irq_stat & PORT_IRQ_UNK_FIS) {
1986 		u32 *unk = (u32 *)(pp->rx_fis + RX_FIS_UNK);
1987 
1988 		active_ehi->err_mask |= AC_ERR_HSM;
1989 		active_ehi->action |= ATA_EH_RESET;
1990 		ata_ehi_push_desc(active_ehi,
1991 				  "unknown FIS %08x %08x %08x %08x" ,
1992 				  unk[0], unk[1], unk[2], unk[3]);
1993 	}
1994 
1995 	if (sata_pmp_attached(ap) && (irq_stat & PORT_IRQ_BAD_PMP)) {
1996 		active_ehi->err_mask |= AC_ERR_HSM;
1997 		active_ehi->action |= ATA_EH_RESET;
1998 		ata_ehi_push_desc(active_ehi, "incorrect PMP");
1999 	}
2000 
2001 	if (irq_stat & (PORT_IRQ_HBUS_ERR | PORT_IRQ_HBUS_DATA_ERR)) {
2002 		host_ehi->err_mask |= AC_ERR_HOST_BUS;
2003 		host_ehi->action |= ATA_EH_RESET;
2004 		ata_ehi_push_desc(host_ehi, "host bus error");
2005 	}
2006 
2007 	if (irq_stat & PORT_IRQ_IF_ERR) {
2008 		host_ehi->err_mask |= AC_ERR_ATA_BUS;
2009 		host_ehi->action |= ATA_EH_RESET;
2010 		ata_ehi_push_desc(host_ehi, "interface fatal error");
2011 	}
2012 
2013 	if (irq_stat & (PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)) {
2014 		ata_ehi_hotplugged(host_ehi);
2015 		ata_ehi_push_desc(host_ehi, "%s",
2016 			irq_stat & PORT_IRQ_CONNECT ?
2017 			"connection status changed" : "PHY RDY changed");
2018 	}
2019 
2020 	/* okay, let's hand over to EH */
2021 
2022 	if (irq_stat & PORT_IRQ_FREEZE)
2023 		ata_port_freeze(ap);
2024 	else
2025 		ata_port_abort(ap);
2026 }
2027 
2028 static void ahci_port_intr(struct ata_port *ap)
2029 {
2030 	void __iomem *port_mmio = ahci_port_base(ap);
2031 	struct ata_eh_info *ehi = &ap->link.eh_info;
2032 	struct ahci_port_priv *pp = ap->private_data;
2033 	struct ahci_host_priv *hpriv = ap->host->private_data;
2034 	int resetting = !!(ap->pflags & ATA_PFLAG_RESETTING);
2035 	u32 status, qc_active;
2036 	int rc;
2037 
2038 	status = readl(port_mmio + PORT_IRQ_STAT);
2039 	writel(status, port_mmio + PORT_IRQ_STAT);
2040 
2041 	/* ignore BAD_PMP while resetting */
2042 	if (unlikely(resetting))
2043 		status &= ~PORT_IRQ_BAD_PMP;
2044 
2045 	/* If we are getting PhyRdy, this is
2046  	 * just a power state change, we should
2047  	 * clear out this, plus the PhyRdy/Comm
2048  	 * Wake bits from Serror
2049  	 */
2050 	if ((hpriv->flags & AHCI_HFLAG_NO_HOTPLUG) &&
2051 		(status & PORT_IRQ_PHYRDY)) {
2052 		status &= ~PORT_IRQ_PHYRDY;
2053 		ahci_scr_write(&ap->link, SCR_ERROR, ((1 << 16) | (1 << 18)));
2054 	}
2055 
2056 	if (unlikely(status & PORT_IRQ_ERROR)) {
2057 		ahci_error_intr(ap, status);
2058 		return;
2059 	}
2060 
2061 	if (status & PORT_IRQ_SDB_FIS) {
2062 		/* If SNotification is available, leave notification
2063 		 * handling to sata_async_notification().  If not,
2064 		 * emulate it by snooping SDB FIS RX area.
2065 		 *
2066 		 * Snooping FIS RX area is probably cheaper than
2067 		 * poking SNotification but some constrollers which
2068 		 * implement SNotification, ICH9 for example, don't
2069 		 * store AN SDB FIS into receive area.
2070 		 */
2071 		if (hpriv->cap & HOST_CAP_SNTF)
2072 			sata_async_notification(ap);
2073 		else {
2074 			/* If the 'N' bit in word 0 of the FIS is set,
2075 			 * we just received asynchronous notification.
2076 			 * Tell libata about it.
2077 			 */
2078 			const __le32 *f = pp->rx_fis + RX_FIS_SDB;
2079 			u32 f0 = le32_to_cpu(f[0]);
2080 
2081 			if (f0 & (1 << 15))
2082 				sata_async_notification(ap);
2083 		}
2084 	}
2085 
2086 	/* pp->active_link is valid iff any command is in flight */
2087 	if (ap->qc_active && pp->active_link->sactive)
2088 		qc_active = readl(port_mmio + PORT_SCR_ACT);
2089 	else
2090 		qc_active = readl(port_mmio + PORT_CMD_ISSUE);
2091 
2092 	rc = ata_qc_complete_multiple(ap, qc_active);
2093 
2094 	/* while resetting, invalid completions are expected */
2095 	if (unlikely(rc < 0 && !resetting)) {
2096 		ehi->err_mask |= AC_ERR_HSM;
2097 		ehi->action |= ATA_EH_RESET;
2098 		ata_port_freeze(ap);
2099 	}
2100 }
2101 
2102 static irqreturn_t ahci_interrupt(int irq, void *dev_instance)
2103 {
2104 	struct ata_host *host = dev_instance;
2105 	struct ahci_host_priv *hpriv;
2106 	unsigned int i, handled = 0;
2107 	void __iomem *mmio;
2108 	u32 irq_stat, irq_masked;
2109 
2110 	VPRINTK("ENTER\n");
2111 
2112 	hpriv = host->private_data;
2113 	mmio = host->iomap[AHCI_PCI_BAR];
2114 
2115 	/* sigh.  0xffffffff is a valid return from h/w */
2116 	irq_stat = readl(mmio + HOST_IRQ_STAT);
2117 	if (!irq_stat)
2118 		return IRQ_NONE;
2119 
2120 	irq_masked = irq_stat & hpriv->port_map;
2121 
2122 	spin_lock(&host->lock);
2123 
2124 	for (i = 0; i < host->n_ports; i++) {
2125 		struct ata_port *ap;
2126 
2127 		if (!(irq_masked & (1 << i)))
2128 			continue;
2129 
2130 		ap = host->ports[i];
2131 		if (ap) {
2132 			ahci_port_intr(ap);
2133 			VPRINTK("port %u\n", i);
2134 		} else {
2135 			VPRINTK("port %u (no irq)\n", i);
2136 			if (ata_ratelimit())
2137 				dev_printk(KERN_WARNING, host->dev,
2138 					"interrupt on disabled port %u\n", i);
2139 		}
2140 
2141 		handled = 1;
2142 	}
2143 
2144 	/* HOST_IRQ_STAT behaves as level triggered latch meaning that
2145 	 * it should be cleared after all the port events are cleared;
2146 	 * otherwise, it will raise a spurious interrupt after each
2147 	 * valid one.  Please read section 10.6.2 of ahci 1.1 for more
2148 	 * information.
2149 	 *
2150 	 * Also, use the unmasked value to clear interrupt as spurious
2151 	 * pending event on a dummy port might cause screaming IRQ.
2152 	 */
2153 	writel(irq_stat, mmio + HOST_IRQ_STAT);
2154 
2155 	spin_unlock(&host->lock);
2156 
2157 	VPRINTK("EXIT\n");
2158 
2159 	return IRQ_RETVAL(handled);
2160 }
2161 
2162 static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
2163 {
2164 	struct ata_port *ap = qc->ap;
2165 	void __iomem *port_mmio = ahci_port_base(ap);
2166 	struct ahci_port_priv *pp = ap->private_data;
2167 
2168 	/* Keep track of the currently active link.  It will be used
2169 	 * in completion path to determine whether NCQ phase is in
2170 	 * progress.
2171 	 */
2172 	pp->active_link = qc->dev->link;
2173 
2174 	if (qc->tf.protocol == ATA_PROT_NCQ)
2175 		writel(1 << qc->tag, port_mmio + PORT_SCR_ACT);
2176 	writel(1 << qc->tag, port_mmio + PORT_CMD_ISSUE);
2177 
2178 	ahci_sw_activity(qc->dev->link);
2179 
2180 	return 0;
2181 }
2182 
2183 static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
2184 {
2185 	struct ahci_port_priv *pp = qc->ap->private_data;
2186 	u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
2187 
2188 	ata_tf_from_fis(d2h_fis, &qc->result_tf);
2189 	return true;
2190 }
2191 
2192 static void ahci_freeze(struct ata_port *ap)
2193 {
2194 	void __iomem *port_mmio = ahci_port_base(ap);
2195 
2196 	/* turn IRQ off */
2197 	writel(0, port_mmio + PORT_IRQ_MASK);
2198 }
2199 
2200 static void ahci_thaw(struct ata_port *ap)
2201 {
2202 	void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
2203 	void __iomem *port_mmio = ahci_port_base(ap);
2204 	u32 tmp;
2205 	struct ahci_port_priv *pp = ap->private_data;
2206 
2207 	/* clear IRQ */
2208 	tmp = readl(port_mmio + PORT_IRQ_STAT);
2209 	writel(tmp, port_mmio + PORT_IRQ_STAT);
2210 	writel(1 << ap->port_no, mmio + HOST_IRQ_STAT);
2211 
2212 	/* turn IRQ back on */
2213 	writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
2214 }
2215 
2216 static void ahci_error_handler(struct ata_port *ap)
2217 {
2218 	if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
2219 		/* restart engine */
2220 		ahci_stop_engine(ap);
2221 		ahci_start_engine(ap);
2222 	}
2223 
2224 	sata_pmp_error_handler(ap);
2225 }
2226 
2227 static void ahci_post_internal_cmd(struct ata_queued_cmd *qc)
2228 {
2229 	struct ata_port *ap = qc->ap;
2230 
2231 	/* make DMA engine forget about the failed command */
2232 	if (qc->flags & ATA_QCFLAG_FAILED)
2233 		ahci_kick_engine(ap, 1);
2234 }
2235 
2236 static void ahci_pmp_attach(struct ata_port *ap)
2237 {
2238 	void __iomem *port_mmio = ahci_port_base(ap);
2239 	struct ahci_port_priv *pp = ap->private_data;
2240 	u32 cmd;
2241 
2242 	cmd = readl(port_mmio + PORT_CMD);
2243 	cmd |= PORT_CMD_PMP;
2244 	writel(cmd, port_mmio + PORT_CMD);
2245 
2246 	pp->intr_mask |= PORT_IRQ_BAD_PMP;
2247 	writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
2248 }
2249 
2250 static void ahci_pmp_detach(struct ata_port *ap)
2251 {
2252 	void __iomem *port_mmio = ahci_port_base(ap);
2253 	struct ahci_port_priv *pp = ap->private_data;
2254 	u32 cmd;
2255 
2256 	cmd = readl(port_mmio + PORT_CMD);
2257 	cmd &= ~PORT_CMD_PMP;
2258 	writel(cmd, port_mmio + PORT_CMD);
2259 
2260 	pp->intr_mask &= ~PORT_IRQ_BAD_PMP;
2261 	writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
2262 }
2263 
2264 static int ahci_port_resume(struct ata_port *ap)
2265 {
2266 	ahci_power_up(ap);
2267 	ahci_start_port(ap);
2268 
2269 	if (sata_pmp_attached(ap))
2270 		ahci_pmp_attach(ap);
2271 	else
2272 		ahci_pmp_detach(ap);
2273 
2274 	return 0;
2275 }
2276 
2277 #ifdef CONFIG_PM
2278 static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg)
2279 {
2280 	const char *emsg = NULL;
2281 	int rc;
2282 
2283 	rc = ahci_deinit_port(ap, &emsg);
2284 	if (rc == 0)
2285 		ahci_power_down(ap);
2286 	else {
2287 		ata_port_printk(ap, KERN_ERR, "%s (%d)\n", emsg, rc);
2288 		ahci_start_port(ap);
2289 	}
2290 
2291 	return rc;
2292 }
2293 
2294 static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
2295 {
2296 	struct ata_host *host = dev_get_drvdata(&pdev->dev);
2297 	void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
2298 	u32 ctl;
2299 
2300 	if (mesg.event & PM_EVENT_SLEEP) {
2301 		/* AHCI spec rev1.1 section 8.3.3:
2302 		 * Software must disable interrupts prior to requesting a
2303 		 * transition of the HBA to D3 state.
2304 		 */
2305 		ctl = readl(mmio + HOST_CTL);
2306 		ctl &= ~HOST_IRQ_EN;
2307 		writel(ctl, mmio + HOST_CTL);
2308 		readl(mmio + HOST_CTL); /* flush */
2309 	}
2310 
2311 	return ata_pci_device_suspend(pdev, mesg);
2312 }
2313 
2314 static int ahci_pci_device_resume(struct pci_dev *pdev)
2315 {
2316 	struct ata_host *host = dev_get_drvdata(&pdev->dev);
2317 	int rc;
2318 
2319 	rc = ata_pci_device_do_resume(pdev);
2320 	if (rc)
2321 		return rc;
2322 
2323 	if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
2324 		rc = ahci_reset_controller(host);
2325 		if (rc)
2326 			return rc;
2327 
2328 		ahci_init_controller(host);
2329 	}
2330 
2331 	ata_host_resume(host);
2332 
2333 	return 0;
2334 }
2335 #endif
2336 
2337 static int ahci_port_start(struct ata_port *ap)
2338 {
2339 	struct device *dev = ap->host->dev;
2340 	struct ahci_port_priv *pp;
2341 	void *mem;
2342 	dma_addr_t mem_dma;
2343 
2344 	pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
2345 	if (!pp)
2346 		return -ENOMEM;
2347 
2348 	mem = dmam_alloc_coherent(dev, AHCI_PORT_PRIV_DMA_SZ, &mem_dma,
2349 				  GFP_KERNEL);
2350 	if (!mem)
2351 		return -ENOMEM;
2352 	memset(mem, 0, AHCI_PORT_PRIV_DMA_SZ);
2353 
2354 	/*
2355 	 * First item in chunk of DMA memory: 32-slot command table,
2356 	 * 32 bytes each in size
2357 	 */
2358 	pp->cmd_slot = mem;
2359 	pp->cmd_slot_dma = mem_dma;
2360 
2361 	mem += AHCI_CMD_SLOT_SZ;
2362 	mem_dma += AHCI_CMD_SLOT_SZ;
2363 
2364 	/*
2365 	 * Second item: Received-FIS area
2366 	 */
2367 	pp->rx_fis = mem;
2368 	pp->rx_fis_dma = mem_dma;
2369 
2370 	mem += AHCI_RX_FIS_SZ;
2371 	mem_dma += AHCI_RX_FIS_SZ;
2372 
2373 	/*
2374 	 * Third item: data area for storing a single command
2375 	 * and its scatter-gather table
2376 	 */
2377 	pp->cmd_tbl = mem;
2378 	pp->cmd_tbl_dma = mem_dma;
2379 
2380 	/*
2381 	 * Save off initial list of interrupts to be enabled.
2382 	 * This could be changed later
2383 	 */
2384 	pp->intr_mask = DEF_PORT_IRQ;
2385 
2386 	ap->private_data = pp;
2387 
2388 	/* engage engines, captain */
2389 	return ahci_port_resume(ap);
2390 }
2391 
2392 static void ahci_port_stop(struct ata_port *ap)
2393 {
2394 	const char *emsg = NULL;
2395 	int rc;
2396 
2397 	/* de-initialize port */
2398 	rc = ahci_deinit_port(ap, &emsg);
2399 	if (rc)
2400 		ata_port_printk(ap, KERN_WARNING, "%s (%d)\n", emsg, rc);
2401 }
2402 
2403 static int ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac)
2404 {
2405 	int rc;
2406 
2407 	if (using_dac &&
2408 	    !pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2409 		rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2410 		if (rc) {
2411 			rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2412 			if (rc) {
2413 				dev_printk(KERN_ERR, &pdev->dev,
2414 					   "64-bit DMA enable failed\n");
2415 				return rc;
2416 			}
2417 		}
2418 	} else {
2419 		rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
2420 		if (rc) {
2421 			dev_printk(KERN_ERR, &pdev->dev,
2422 				   "32-bit DMA enable failed\n");
2423 			return rc;
2424 		}
2425 		rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2426 		if (rc) {
2427 			dev_printk(KERN_ERR, &pdev->dev,
2428 				   "32-bit consistent DMA enable failed\n");
2429 			return rc;
2430 		}
2431 	}
2432 	return 0;
2433 }
2434 
2435 static void ahci_print_info(struct ata_host *host)
2436 {
2437 	struct ahci_host_priv *hpriv = host->private_data;
2438 	struct pci_dev *pdev = to_pci_dev(host->dev);
2439 	void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
2440 	u32 vers, cap, impl, speed;
2441 	const char *speed_s;
2442 	u16 cc;
2443 	const char *scc_s;
2444 
2445 	vers = readl(mmio + HOST_VERSION);
2446 	cap = hpriv->cap;
2447 	impl = hpriv->port_map;
2448 
2449 	speed = (cap >> 20) & 0xf;
2450 	if (speed == 1)
2451 		speed_s = "1.5";
2452 	else if (speed == 2)
2453 		speed_s = "3";
2454 	else if (speed == 3)
2455 		speed_s = "6";
2456 	else
2457 		speed_s = "?";
2458 
2459 	pci_read_config_word(pdev, 0x0a, &cc);
2460 	if (cc == PCI_CLASS_STORAGE_IDE)
2461 		scc_s = "IDE";
2462 	else if (cc == PCI_CLASS_STORAGE_SATA)
2463 		scc_s = "SATA";
2464 	else if (cc == PCI_CLASS_STORAGE_RAID)
2465 		scc_s = "RAID";
2466 	else
2467 		scc_s = "unknown";
2468 
2469 	dev_printk(KERN_INFO, &pdev->dev,
2470 		"AHCI %02x%02x.%02x%02x "
2471 		"%u slots %u ports %s Gbps 0x%x impl %s mode\n"
2472 		,
2473 
2474 		(vers >> 24) & 0xff,
2475 		(vers >> 16) & 0xff,
2476 		(vers >> 8) & 0xff,
2477 		vers & 0xff,
2478 
2479 		((cap >> 8) & 0x1f) + 1,
2480 		(cap & 0x1f) + 1,
2481 		speed_s,
2482 		impl,
2483 		scc_s);
2484 
2485 	dev_printk(KERN_INFO, &pdev->dev,
2486 		"flags: "
2487 		"%s%s%s%s%s%s%s"
2488 		"%s%s%s%s%s%s%s"
2489 		"%s\n"
2490 		,
2491 
2492 		cap & (1 << 31) ? "64bit " : "",
2493 		cap & (1 << 30) ? "ncq " : "",
2494 		cap & (1 << 29) ? "sntf " : "",
2495 		cap & (1 << 28) ? "ilck " : "",
2496 		cap & (1 << 27) ? "stag " : "",
2497 		cap & (1 << 26) ? "pm " : "",
2498 		cap & (1 << 25) ? "led " : "",
2499 
2500 		cap & (1 << 24) ? "clo " : "",
2501 		cap & (1 << 19) ? "nz " : "",
2502 		cap & (1 << 18) ? "only " : "",
2503 		cap & (1 << 17) ? "pmp " : "",
2504 		cap & (1 << 15) ? "pio " : "",
2505 		cap & (1 << 14) ? "slum " : "",
2506 		cap & (1 << 13) ? "part " : "",
2507 		cap & (1 << 6) ? "ems ": ""
2508 		);
2509 }
2510 
2511 /* On ASUS P5W DH Deluxe, the second port of PCI device 00:1f.2 is
2512  * hardwired to on-board SIMG 4726.  The chipset is ICH8 and doesn't
2513  * support PMP and the 4726 either directly exports the device
2514  * attached to the first downstream port or acts as a hardware storage
2515  * controller and emulate a single ATA device (can be RAID 0/1 or some
2516  * other configuration).
2517  *
2518  * When there's no device attached to the first downstream port of the
2519  * 4726, "Config Disk" appears, which is a pseudo ATA device to
2520  * configure the 4726.  However, ATA emulation of the device is very
2521  * lame.  It doesn't send signature D2H Reg FIS after the initial
2522  * hardreset, pukes on SRST w/ PMP==0 and has bunch of other issues.
2523  *
2524  * The following function works around the problem by always using
2525  * hardreset on the port and not depending on receiving signature FIS
2526  * afterward.  If signature FIS isn't received soon, ATA class is
2527  * assumed without follow-up softreset.
2528  */
2529 static void ahci_p5wdh_workaround(struct ata_host *host)
2530 {
2531 	static struct dmi_system_id sysids[] = {
2532 		{
2533 			.ident = "P5W DH Deluxe",
2534 			.matches = {
2535 				DMI_MATCH(DMI_SYS_VENDOR,
2536 					  "ASUSTEK COMPUTER INC"),
2537 				DMI_MATCH(DMI_PRODUCT_NAME, "P5W DH Deluxe"),
2538 			},
2539 		},
2540 		{ }
2541 	};
2542 	struct pci_dev *pdev = to_pci_dev(host->dev);
2543 
2544 	if (pdev->bus->number == 0 && pdev->devfn == PCI_DEVFN(0x1f, 2) &&
2545 	    dmi_check_system(sysids)) {
2546 		struct ata_port *ap = host->ports[1];
2547 
2548 		dev_printk(KERN_INFO, &pdev->dev, "enabling ASUS P5W DH "
2549 			   "Deluxe on-board SIMG4726 workaround\n");
2550 
2551 		ap->ops = &ahci_p5wdh_ops;
2552 		ap->link.flags |= ATA_LFLAG_NO_SRST | ATA_LFLAG_ASSUME_ATA;
2553 	}
2554 }
2555 
2556 static bool ahci_broken_system_poweroff(struct pci_dev *pdev)
2557 {
2558 	static const struct dmi_system_id broken_systems[] = {
2559 		{
2560 			.ident = "HP Compaq nx6310",
2561 			.matches = {
2562 				DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
2563 				DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nx6310"),
2564 			},
2565 			/* PCI slot number of the controller */
2566 			.driver_data = (void *)0x1FUL,
2567 		},
2568 		{
2569 			.ident = "HP Compaq 6720s",
2570 			.matches = {
2571 				DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
2572 				DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq 6720s"),
2573 			},
2574 			/* PCI slot number of the controller */
2575 			.driver_data = (void *)0x1FUL,
2576 		},
2577 
2578 		{ }	/* terminate list */
2579 	};
2580 	const struct dmi_system_id *dmi = dmi_first_match(broken_systems);
2581 
2582 	if (dmi) {
2583 		unsigned long slot = (unsigned long)dmi->driver_data;
2584 		/* apply the quirk only to on-board controllers */
2585 		return slot == PCI_SLOT(pdev->devfn);
2586 	}
2587 
2588 	return false;
2589 }
2590 
2591 static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2592 {
2593 	static int printed_version;
2594 	unsigned int board_id = ent->driver_data;
2595 	struct ata_port_info pi = ahci_port_info[board_id];
2596 	const struct ata_port_info *ppi[] = { &pi, NULL };
2597 	struct device *dev = &pdev->dev;
2598 	struct ahci_host_priv *hpriv;
2599 	struct ata_host *host;
2600 	int n_ports, i, rc;
2601 
2602 	VPRINTK("ENTER\n");
2603 
2604 	WARN_ON(ATA_MAX_QUEUE > AHCI_MAX_CMDS);
2605 
2606 	if (!printed_version++)
2607 		dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
2608 
2609 	/* The AHCI driver can only drive the SATA ports, the PATA driver
2610 	   can drive them all so if both drivers are selected make sure
2611 	   AHCI stays out of the way */
2612 	if (pdev->vendor == PCI_VENDOR_ID_MARVELL && !marvell_enable)
2613 		return -ENODEV;
2614 
2615 	/* acquire resources */
2616 	rc = pcim_enable_device(pdev);
2617 	if (rc)
2618 		return rc;
2619 
2620 	/* AHCI controllers often implement SFF compatible interface.
2621 	 * Grab all PCI BARs just in case.
2622 	 */
2623 	rc = pcim_iomap_regions_request_all(pdev, 1 << AHCI_PCI_BAR, DRV_NAME);
2624 	if (rc == -EBUSY)
2625 		pcim_pin_device(pdev);
2626 	if (rc)
2627 		return rc;
2628 
2629 	if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
2630 	    (pdev->device == 0x2652 || pdev->device == 0x2653)) {
2631 		u8 map;
2632 
2633 		/* ICH6s share the same PCI ID for both piix and ahci
2634 		 * modes.  Enabling ahci mode while MAP indicates
2635 		 * combined mode is a bad idea.  Yield to ata_piix.
2636 		 */
2637 		pci_read_config_byte(pdev, ICH_MAP, &map);
2638 		if (map & 0x3) {
2639 			dev_printk(KERN_INFO, &pdev->dev, "controller is in "
2640 				   "combined mode, can't enable AHCI mode\n");
2641 			return -ENODEV;
2642 		}
2643 	}
2644 
2645 	hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
2646 	if (!hpriv)
2647 		return -ENOMEM;
2648 	hpriv->flags |= (unsigned long)pi.private_data;
2649 
2650 	/* MCP65 revision A1 and A2 can't do MSI */
2651 	if (board_id == board_ahci_mcp65 &&
2652 	    (pdev->revision == 0xa1 || pdev->revision == 0xa2))
2653 		hpriv->flags |= AHCI_HFLAG_NO_MSI;
2654 
2655 	/* SB800 does NOT need the workaround to ignore SERR_INTERNAL */
2656 	if (board_id == board_ahci_sb700 && pdev->revision >= 0x40)
2657 		hpriv->flags &= ~AHCI_HFLAG_IGN_SERR_INTERNAL;
2658 
2659 	if (!(hpriv->flags & AHCI_HFLAG_NO_MSI))
2660 		pci_enable_msi(pdev);
2661 
2662 	/* save initial config */
2663 	ahci_save_initial_config(pdev, hpriv);
2664 
2665 	/* prepare host */
2666 	if (hpriv->cap & HOST_CAP_NCQ)
2667 		pi.flags |= ATA_FLAG_NCQ;
2668 
2669 	if (hpriv->cap & HOST_CAP_PMP)
2670 		pi.flags |= ATA_FLAG_PMP;
2671 
2672 	if (ahci_em_messages && (hpriv->cap & HOST_CAP_EMS)) {
2673 		u8 messages;
2674 		void __iomem *mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR];
2675 		u32 em_loc = readl(mmio + HOST_EM_LOC);
2676 		u32 em_ctl = readl(mmio + HOST_EM_CTL);
2677 
2678 		messages = (em_ctl & EM_CTRL_MSG_TYPE) >> 16;
2679 
2680 		/* we only support LED message type right now */
2681 		if ((messages & 0x01) && (ahci_em_messages == 1)) {
2682 			/* store em_loc */
2683 			hpriv->em_loc = ((em_loc >> 16) * 4);
2684 			pi.flags |= ATA_FLAG_EM;
2685 			if (!(em_ctl & EM_CTL_ALHD))
2686 				pi.flags |= ATA_FLAG_SW_ACTIVITY;
2687 		}
2688 	}
2689 
2690 	if (ahci_broken_system_poweroff(pdev)) {
2691 		pi.flags |= ATA_FLAG_NO_POWEROFF_SPINDOWN;
2692 		dev_info(&pdev->dev,
2693 			"quirky BIOS, skipping spindown on poweroff\n");
2694 	}
2695 
2696 	/* CAP.NP sometimes indicate the index of the last enabled
2697 	 * port, at other times, that of the last possible port, so
2698 	 * determining the maximum port number requires looking at
2699 	 * both CAP.NP and port_map.
2700 	 */
2701 	n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map));
2702 
2703 	host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2704 	if (!host)
2705 		return -ENOMEM;
2706 	host->iomap = pcim_iomap_table(pdev);
2707 	host->private_data = hpriv;
2708 
2709 	if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
2710 		host->flags |= ATA_HOST_PARALLEL_SCAN;
2711 	else
2712 		printk(KERN_INFO "ahci: SSS flag set, parallel bus scan disabled\n");
2713 
2714 	if (pi.flags & ATA_FLAG_EM)
2715 		ahci_reset_em(host);
2716 
2717 	for (i = 0; i < host->n_ports; i++) {
2718 		struct ata_port *ap = host->ports[i];
2719 
2720 		ata_port_pbar_desc(ap, AHCI_PCI_BAR, -1, "abar");
2721 		ata_port_pbar_desc(ap, AHCI_PCI_BAR,
2722 				   0x100 + ap->port_no * 0x80, "port");
2723 
2724 		/* set initial link pm policy */
2725 		ap->pm_policy = NOT_AVAILABLE;
2726 
2727 		/* set enclosure management message type */
2728 		if (ap->flags & ATA_FLAG_EM)
2729 			ap->em_message_type = ahci_em_messages;
2730 
2731 
2732 		/* disabled/not-implemented port */
2733 		if (!(hpriv->port_map & (1 << i)))
2734 			ap->ops = &ata_dummy_port_ops;
2735 	}
2736 
2737 	/* apply workaround for ASUS P5W DH Deluxe mainboard */
2738 	ahci_p5wdh_workaround(host);
2739 
2740 	/* initialize adapter */
2741 	rc = ahci_configure_dma_masks(pdev, hpriv->cap & HOST_CAP_64);
2742 	if (rc)
2743 		return rc;
2744 
2745 	rc = ahci_reset_controller(host);
2746 	if (rc)
2747 		return rc;
2748 
2749 	ahci_init_controller(host);
2750 	ahci_print_info(host);
2751 
2752 	pci_set_master(pdev);
2753 	return ata_host_activate(host, pdev->irq, ahci_interrupt, IRQF_SHARED,
2754 				 &ahci_sht);
2755 }
2756 
2757 static int __init ahci_init(void)
2758 {
2759 	return pci_register_driver(&ahci_pci_driver);
2760 }
2761 
2762 static void __exit ahci_exit(void)
2763 {
2764 	pci_unregister_driver(&ahci_pci_driver);
2765 }
2766 
2767 
2768 MODULE_AUTHOR("Jeff Garzik");
2769 MODULE_DESCRIPTION("AHCI SATA low-level driver");
2770 MODULE_LICENSE("GPL");
2771 MODULE_DEVICE_TABLE(pci, ahci_pci_tbl);
2772 MODULE_VERSION(DRV_VERSION);
2773 
2774 module_init(ahci_init);
2775 module_exit(ahci_exit);
2776