xref: /linux/drivers/ata/ahci.c (revision 8b1935e6a36b0967efc593d67ed3aebbfbc1f5b1)
1 /*
2  *  ahci.c - AHCI SATA support
3  *
4  *  Maintained by:  Jeff Garzik <jgarzik@pobox.com>
5  *    		    Please ALWAYS copy linux-ide@vger.kernel.org
6  *		    on emails.
7  *
8  *  Copyright 2004-2005 Red Hat, Inc.
9  *
10  *
11  *  This program is free software; you can redistribute it and/or modify
12  *  it under the terms of the GNU General Public License as published by
13  *  the Free Software Foundation; either version 2, or (at your option)
14  *  any later version.
15  *
16  *  This program is distributed in the hope that it will be useful,
17  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
18  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
19  *  GNU General Public License for more details.
20  *
21  *  You should have received a copy of the GNU General Public License
22  *  along with this program; see the file COPYING.  If not, write to
23  *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
24  *
25  *
26  * libata documentation is available via 'make {ps|pdf}docs',
27  * as Documentation/DocBook/libata.*
28  *
29  * AHCI hardware documentation:
30  * http://www.intel.com/technology/serialata/pdf/rev1_0.pdf
31  * http://www.intel.com/technology/serialata/pdf/rev1_1.pdf
32  *
33  */
34 
35 #include <linux/kernel.h>
36 #include <linux/module.h>
37 #include <linux/pci.h>
38 #include <linux/init.h>
39 #include <linux/blkdev.h>
40 #include <linux/delay.h>
41 #include <linux/interrupt.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/device.h>
44 #include <linux/dmi.h>
45 #include <scsi/scsi_host.h>
46 #include <scsi/scsi_cmnd.h>
47 #include <linux/libata.h>
48 
49 #define DRV_NAME	"ahci"
50 #define DRV_VERSION	"3.0"
51 
52 /* Enclosure Management Control */
53 #define EM_CTRL_MSG_TYPE              0x000f0000
54 
55 /* Enclosure Management LED Message Type */
56 #define EM_MSG_LED_HBA_PORT           0x0000000f
57 #define EM_MSG_LED_PMP_SLOT           0x0000ff00
58 #define EM_MSG_LED_VALUE              0xffff0000
59 #define EM_MSG_LED_VALUE_ACTIVITY     0x00070000
60 #define EM_MSG_LED_VALUE_OFF          0xfff80000
61 #define EM_MSG_LED_VALUE_ON           0x00010000
62 
63 static int ahci_skip_host_reset;
64 static int ahci_ignore_sss;
65 
66 module_param_named(skip_host_reset, ahci_skip_host_reset, int, 0444);
67 MODULE_PARM_DESC(skip_host_reset, "skip global host reset (0=don't skip, 1=skip)");
68 
69 module_param_named(ignore_sss, ahci_ignore_sss, int, 0444);
70 MODULE_PARM_DESC(ignore_sss, "Ignore staggered spinup flag (0=don't ignore, 1=ignore)");
71 
72 static int ahci_enable_alpm(struct ata_port *ap,
73 		enum link_pm policy);
74 static void ahci_disable_alpm(struct ata_port *ap);
75 static ssize_t ahci_led_show(struct ata_port *ap, char *buf);
76 static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
77 			      size_t size);
78 static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
79 					ssize_t size);
80 
81 enum {
82 	AHCI_PCI_BAR		= 5,
83 	AHCI_MAX_PORTS		= 32,
84 	AHCI_MAX_SG		= 168, /* hardware max is 64K */
85 	AHCI_DMA_BOUNDARY	= 0xffffffff,
86 	AHCI_MAX_CMDS		= 32,
87 	AHCI_CMD_SZ		= 32,
88 	AHCI_CMD_SLOT_SZ	= AHCI_MAX_CMDS * AHCI_CMD_SZ,
89 	AHCI_RX_FIS_SZ		= 256,
90 	AHCI_CMD_TBL_CDB	= 0x40,
91 	AHCI_CMD_TBL_HDR_SZ	= 0x80,
92 	AHCI_CMD_TBL_SZ		= AHCI_CMD_TBL_HDR_SZ + (AHCI_MAX_SG * 16),
93 	AHCI_CMD_TBL_AR_SZ	= AHCI_CMD_TBL_SZ * AHCI_MAX_CMDS,
94 	AHCI_PORT_PRIV_DMA_SZ	= AHCI_CMD_SLOT_SZ + AHCI_CMD_TBL_AR_SZ +
95 				  AHCI_RX_FIS_SZ,
96 	AHCI_PORT_PRIV_FBS_DMA_SZ	= AHCI_CMD_SLOT_SZ +
97 					  AHCI_CMD_TBL_AR_SZ +
98 					  (AHCI_RX_FIS_SZ * 16),
99 	AHCI_IRQ_ON_SG		= (1 << 31),
100 	AHCI_CMD_ATAPI		= (1 << 5),
101 	AHCI_CMD_WRITE		= (1 << 6),
102 	AHCI_CMD_PREFETCH	= (1 << 7),
103 	AHCI_CMD_RESET		= (1 << 8),
104 	AHCI_CMD_CLR_BUSY	= (1 << 10),
105 
106 	RX_FIS_D2H_REG		= 0x40,	/* offset of D2H Register FIS data */
107 	RX_FIS_SDB		= 0x58, /* offset of SDB FIS data */
108 	RX_FIS_UNK		= 0x60, /* offset of Unknown FIS data */
109 
110 	board_ahci		= 0,
111 	board_ahci_vt8251	= 1,
112 	board_ahci_ign_iferr	= 2,
113 	board_ahci_sb600	= 3,
114 	board_ahci_mv		= 4,
115 	board_ahci_sb700	= 5, /* for SB700 and SB800 */
116 	board_ahci_mcp65	= 6,
117 	board_ahci_nopmp	= 7,
118 	board_ahci_yesncq	= 8,
119 	board_ahci_nosntf	= 9,
120 
121 	/* global controller registers */
122 	HOST_CAP		= 0x00, /* host capabilities */
123 	HOST_CTL		= 0x04, /* global host control */
124 	HOST_IRQ_STAT		= 0x08, /* interrupt status */
125 	HOST_PORTS_IMPL		= 0x0c, /* bitmap of implemented ports */
126 	HOST_VERSION		= 0x10, /* AHCI spec. version compliancy */
127 	HOST_EM_LOC		= 0x1c, /* Enclosure Management location */
128 	HOST_EM_CTL		= 0x20, /* Enclosure Management Control */
129 	HOST_CAP2		= 0x24, /* host capabilities, extended */
130 
131 	/* HOST_CTL bits */
132 	HOST_RESET		= (1 << 0),  /* reset controller; self-clear */
133 	HOST_IRQ_EN		= (1 << 1),  /* global IRQ enable */
134 	HOST_AHCI_EN		= (1 << 31), /* AHCI enabled */
135 
136 	/* HOST_CAP bits */
137 	HOST_CAP_SXS		= (1 << 5),  /* Supports External SATA */
138 	HOST_CAP_EMS		= (1 << 6),  /* Enclosure Management support */
139 	HOST_CAP_CCC		= (1 << 7),  /* Command Completion Coalescing */
140 	HOST_CAP_PART		= (1 << 13), /* Partial state capable */
141 	HOST_CAP_SSC		= (1 << 14), /* Slumber state capable */
142 	HOST_CAP_PIO_MULTI	= (1 << 15), /* PIO multiple DRQ support */
143 	HOST_CAP_FBS		= (1 << 16), /* FIS-based switching support */
144 	HOST_CAP_PMP		= (1 << 17), /* Port Multiplier support */
145 	HOST_CAP_ONLY		= (1 << 18), /* Supports AHCI mode only */
146 	HOST_CAP_CLO		= (1 << 24), /* Command List Override support */
147 	HOST_CAP_LED		= (1 << 25), /* Supports activity LED */
148 	HOST_CAP_ALPM		= (1 << 26), /* Aggressive Link PM support */
149 	HOST_CAP_SSS		= (1 << 27), /* Staggered Spin-up */
150 	HOST_CAP_MPS		= (1 << 28), /* Mechanical presence switch */
151 	HOST_CAP_SNTF		= (1 << 29), /* SNotification register */
152 	HOST_CAP_NCQ		= (1 << 30), /* Native Command Queueing */
153 	HOST_CAP_64		= (1 << 31), /* PCI DAC (64-bit DMA) support */
154 
155 	/* HOST_CAP2 bits */
156 	HOST_CAP2_BOH		= (1 << 0),  /* BIOS/OS handoff supported */
157 	HOST_CAP2_NVMHCI	= (1 << 1),  /* NVMHCI supported */
158 	HOST_CAP2_APST		= (1 << 2),  /* Automatic partial to slumber */
159 
160 	/* registers for each SATA port */
161 	PORT_LST_ADDR		= 0x00, /* command list DMA addr */
162 	PORT_LST_ADDR_HI	= 0x04, /* command list DMA addr hi */
163 	PORT_FIS_ADDR		= 0x08, /* FIS rx buf addr */
164 	PORT_FIS_ADDR_HI	= 0x0c, /* FIS rx buf addr hi */
165 	PORT_IRQ_STAT		= 0x10, /* interrupt status */
166 	PORT_IRQ_MASK		= 0x14, /* interrupt enable/disable mask */
167 	PORT_CMD		= 0x18, /* port command */
168 	PORT_TFDATA		= 0x20,	/* taskfile data */
169 	PORT_SIG		= 0x24,	/* device TF signature */
170 	PORT_CMD_ISSUE		= 0x38, /* command issue */
171 	PORT_SCR_STAT		= 0x28, /* SATA phy register: SStatus */
172 	PORT_SCR_CTL		= 0x2c, /* SATA phy register: SControl */
173 	PORT_SCR_ERR		= 0x30, /* SATA phy register: SError */
174 	PORT_SCR_ACT		= 0x34, /* SATA phy register: SActive */
175 	PORT_SCR_NTF		= 0x3c, /* SATA phy register: SNotification */
176 	PORT_FBS		= 0x40, /* FIS-based Switching */
177 
178 	/* PORT_IRQ_{STAT,MASK} bits */
179 	PORT_IRQ_COLD_PRES	= (1 << 31), /* cold presence detect */
180 	PORT_IRQ_TF_ERR		= (1 << 30), /* task file error */
181 	PORT_IRQ_HBUS_ERR	= (1 << 29), /* host bus fatal error */
182 	PORT_IRQ_HBUS_DATA_ERR	= (1 << 28), /* host bus data error */
183 	PORT_IRQ_IF_ERR		= (1 << 27), /* interface fatal error */
184 	PORT_IRQ_IF_NONFATAL	= (1 << 26), /* interface non-fatal error */
185 	PORT_IRQ_OVERFLOW	= (1 << 24), /* xfer exhausted available S/G */
186 	PORT_IRQ_BAD_PMP	= (1 << 23), /* incorrect port multiplier */
187 
188 	PORT_IRQ_PHYRDY		= (1 << 22), /* PhyRdy changed */
189 	PORT_IRQ_DEV_ILCK	= (1 << 7), /* device interlock */
190 	PORT_IRQ_CONNECT	= (1 << 6), /* port connect change status */
191 	PORT_IRQ_SG_DONE	= (1 << 5), /* descriptor processed */
192 	PORT_IRQ_UNK_FIS	= (1 << 4), /* unknown FIS rx'd */
193 	PORT_IRQ_SDB_FIS	= (1 << 3), /* Set Device Bits FIS rx'd */
194 	PORT_IRQ_DMAS_FIS	= (1 << 2), /* DMA Setup FIS rx'd */
195 	PORT_IRQ_PIOS_FIS	= (1 << 1), /* PIO Setup FIS rx'd */
196 	PORT_IRQ_D2H_REG_FIS	= (1 << 0), /* D2H Register FIS rx'd */
197 
198 	PORT_IRQ_FREEZE		= PORT_IRQ_HBUS_ERR |
199 				  PORT_IRQ_IF_ERR |
200 				  PORT_IRQ_CONNECT |
201 				  PORT_IRQ_PHYRDY |
202 				  PORT_IRQ_UNK_FIS |
203 				  PORT_IRQ_BAD_PMP,
204 	PORT_IRQ_ERROR		= PORT_IRQ_FREEZE |
205 				  PORT_IRQ_TF_ERR |
206 				  PORT_IRQ_HBUS_DATA_ERR,
207 	DEF_PORT_IRQ		= PORT_IRQ_ERROR | PORT_IRQ_SG_DONE |
208 				  PORT_IRQ_SDB_FIS | PORT_IRQ_DMAS_FIS |
209 				  PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS,
210 
211 	/* PORT_CMD bits */
212 	PORT_CMD_ASP		= (1 << 27), /* Aggressive Slumber/Partial */
213 	PORT_CMD_ALPE		= (1 << 26), /* Aggressive Link PM enable */
214 	PORT_CMD_ATAPI		= (1 << 24), /* Device is ATAPI */
215 	PORT_CMD_FBSCP		= (1 << 22), /* FBS Capable Port */
216 	PORT_CMD_PMP		= (1 << 17), /* PMP attached */
217 	PORT_CMD_LIST_ON	= (1 << 15), /* cmd list DMA engine running */
218 	PORT_CMD_FIS_ON		= (1 << 14), /* FIS DMA engine running */
219 	PORT_CMD_FIS_RX		= (1 << 4), /* Enable FIS receive DMA engine */
220 	PORT_CMD_CLO		= (1 << 3), /* Command list override */
221 	PORT_CMD_POWER_ON	= (1 << 2), /* Power up device */
222 	PORT_CMD_SPIN_UP	= (1 << 1), /* Spin up device */
223 	PORT_CMD_START		= (1 << 0), /* Enable port DMA engine */
224 
225 	PORT_CMD_ICC_MASK	= (0xf << 28), /* i/f ICC state mask */
226 	PORT_CMD_ICC_ACTIVE	= (0x1 << 28), /* Put i/f in active state */
227 	PORT_CMD_ICC_PARTIAL	= (0x2 << 28), /* Put i/f in partial state */
228 	PORT_CMD_ICC_SLUMBER	= (0x6 << 28), /* Put i/f in slumber state */
229 
230 	PORT_FBS_DWE_OFFSET	= 16, /* FBS device with error offset */
231 	PORT_FBS_ADO_OFFSET	= 12, /* FBS active dev optimization offset */
232 	PORT_FBS_DEV_OFFSET	= 8,  /* FBS device to issue offset */
233 	PORT_FBS_DEV_MASK	= (0xf << PORT_FBS_DEV_OFFSET),  /* FBS.DEV */
234 	PORT_FBS_SDE		= (1 << 2), /* FBS single device error */
235 	PORT_FBS_DEC		= (1 << 1), /* FBS device error clear */
236 	PORT_FBS_EN		= (1 << 0), /* Enable FBS */
237 
238 	/* hpriv->flags bits */
239 	AHCI_HFLAG_NO_NCQ		= (1 << 0),
240 	AHCI_HFLAG_IGN_IRQ_IF_ERR	= (1 << 1), /* ignore IRQ_IF_ERR */
241 	AHCI_HFLAG_IGN_SERR_INTERNAL	= (1 << 2), /* ignore SERR_INTERNAL */
242 	AHCI_HFLAG_32BIT_ONLY		= (1 << 3), /* force 32bit */
243 	AHCI_HFLAG_MV_PATA		= (1 << 4), /* PATA port */
244 	AHCI_HFLAG_NO_MSI		= (1 << 5), /* no PCI MSI */
245 	AHCI_HFLAG_NO_PMP		= (1 << 6), /* no PMP */
246 	AHCI_HFLAG_NO_HOTPLUG		= (1 << 7), /* ignore PxSERR.DIAG.N */
247 	AHCI_HFLAG_SECT255		= (1 << 8), /* max 255 sectors */
248 	AHCI_HFLAG_YES_NCQ		= (1 << 9), /* force NCQ cap on */
249 	AHCI_HFLAG_NO_SUSPEND		= (1 << 10), /* don't suspend */
250 	AHCI_HFLAG_SRST_TOUT_IS_OFFLINE	= (1 << 11), /* treat SRST timeout as
251 							link offline */
252 	AHCI_HFLAG_NO_SNTF		= (1 << 12), /* no sntf */
253 
254 	/* ap->flags bits */
255 
256 	AHCI_FLAG_COMMON		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
257 					  ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
258 					  ATA_FLAG_ACPI_SATA | ATA_FLAG_AN |
259 					  ATA_FLAG_IPM,
260 
261 	ICH_MAP				= 0x90, /* ICH MAP register */
262 
263 	/* em constants */
264 	EM_MAX_SLOTS			= 8,
265 	EM_MAX_RETRY			= 5,
266 
267 	/* em_ctl bits */
268 	EM_CTL_RST			= (1 << 9), /* Reset */
269 	EM_CTL_TM			= (1 << 8), /* Transmit Message */
270 	EM_CTL_ALHD			= (1 << 26), /* Activity LED */
271 };
272 
273 struct ahci_cmd_hdr {
274 	__le32			opts;
275 	__le32			status;
276 	__le32			tbl_addr;
277 	__le32			tbl_addr_hi;
278 	__le32			reserved[4];
279 };
280 
281 struct ahci_sg {
282 	__le32			addr;
283 	__le32			addr_hi;
284 	__le32			reserved;
285 	__le32			flags_size;
286 };
287 
288 struct ahci_em_priv {
289 	enum sw_activity blink_policy;
290 	struct timer_list timer;
291 	unsigned long saved_activity;
292 	unsigned long activity;
293 	unsigned long led_state;
294 };
295 
296 struct ahci_host_priv {
297 	unsigned int		flags;		/* AHCI_HFLAG_* */
298 	u32			cap;		/* cap to use */
299 	u32			cap2;		/* cap2 to use */
300 	u32			port_map;	/* port map to use */
301 	u32			saved_cap;	/* saved initial cap */
302 	u32			saved_cap2;	/* saved initial cap2 */
303 	u32			saved_port_map;	/* saved initial port_map */
304 	u32 			em_loc; /* enclosure management location */
305 };
306 
307 struct ahci_port_priv {
308 	struct ata_link		*active_link;
309 	struct ahci_cmd_hdr	*cmd_slot;
310 	dma_addr_t		cmd_slot_dma;
311 	void			*cmd_tbl;
312 	dma_addr_t		cmd_tbl_dma;
313 	void			*rx_fis;
314 	dma_addr_t		rx_fis_dma;
315 	/* for NCQ spurious interrupt analysis */
316 	unsigned int		ncq_saw_d2h:1;
317 	unsigned int		ncq_saw_dmas:1;
318 	unsigned int		ncq_saw_sdb:1;
319 	u32 			intr_mask;	/* interrupts to enable */
320 	bool			fbs_supported;	/* set iff FBS is supported */
321 	bool			fbs_enabled;	/* set iff FBS is enabled */
322 	int			fbs_last_dev;	/* save FBS.DEV of last FIS */
323 	/* enclosure management info per PM slot */
324 	struct ahci_em_priv	em_priv[EM_MAX_SLOTS];
325 };
326 
327 static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
328 static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
329 static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
330 static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc);
331 static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc);
332 static int ahci_port_start(struct ata_port *ap);
333 static void ahci_port_stop(struct ata_port *ap);
334 static int ahci_pmp_qc_defer(struct ata_queued_cmd *qc);
335 static void ahci_qc_prep(struct ata_queued_cmd *qc);
336 static void ahci_freeze(struct ata_port *ap);
337 static void ahci_thaw(struct ata_port *ap);
338 static void ahci_enable_fbs(struct ata_port *ap);
339 static void ahci_disable_fbs(struct ata_port *ap);
340 static void ahci_pmp_attach(struct ata_port *ap);
341 static void ahci_pmp_detach(struct ata_port *ap);
342 static int ahci_softreset(struct ata_link *link, unsigned int *class,
343 			  unsigned long deadline);
344 static int ahci_sb600_softreset(struct ata_link *link, unsigned int *class,
345 			  unsigned long deadline);
346 static int ahci_hardreset(struct ata_link *link, unsigned int *class,
347 			  unsigned long deadline);
348 static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
349 				 unsigned long deadline);
350 static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
351 				unsigned long deadline);
352 static void ahci_postreset(struct ata_link *link, unsigned int *class);
353 static void ahci_error_handler(struct ata_port *ap);
354 static void ahci_post_internal_cmd(struct ata_queued_cmd *qc);
355 static int ahci_port_resume(struct ata_port *ap);
356 static void ahci_dev_config(struct ata_device *dev);
357 static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
358 			       u32 opts);
359 #ifdef CONFIG_PM
360 static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg);
361 static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
362 static int ahci_pci_device_resume(struct pci_dev *pdev);
363 #endif
364 static ssize_t ahci_activity_show(struct ata_device *dev, char *buf);
365 static ssize_t ahci_activity_store(struct ata_device *dev,
366 				   enum sw_activity val);
367 static void ahci_init_sw_activity(struct ata_link *link);
368 
369 static ssize_t ahci_show_host_caps(struct device *dev,
370 				   struct device_attribute *attr, char *buf);
371 static ssize_t ahci_show_host_cap2(struct device *dev,
372 				   struct device_attribute *attr, char *buf);
373 static ssize_t ahci_show_host_version(struct device *dev,
374 				      struct device_attribute *attr, char *buf);
375 static ssize_t ahci_show_port_cmd(struct device *dev,
376 				  struct device_attribute *attr, char *buf);
377 
378 static DEVICE_ATTR(ahci_host_caps, S_IRUGO, ahci_show_host_caps, NULL);
379 static DEVICE_ATTR(ahci_host_cap2, S_IRUGO, ahci_show_host_cap2, NULL);
380 static DEVICE_ATTR(ahci_host_version, S_IRUGO, ahci_show_host_version, NULL);
381 static DEVICE_ATTR(ahci_port_cmd, S_IRUGO, ahci_show_port_cmd, NULL);
382 
383 static struct device_attribute *ahci_shost_attrs[] = {
384 	&dev_attr_link_power_management_policy,
385 	&dev_attr_em_message_type,
386 	&dev_attr_em_message,
387 	&dev_attr_ahci_host_caps,
388 	&dev_attr_ahci_host_cap2,
389 	&dev_attr_ahci_host_version,
390 	&dev_attr_ahci_port_cmd,
391 	NULL
392 };
393 
394 static struct device_attribute *ahci_sdev_attrs[] = {
395 	&dev_attr_sw_activity,
396 	&dev_attr_unload_heads,
397 	NULL
398 };
399 
400 static struct scsi_host_template ahci_sht = {
401 	ATA_NCQ_SHT(DRV_NAME),
402 	.can_queue		= AHCI_MAX_CMDS - 1,
403 	.sg_tablesize		= AHCI_MAX_SG,
404 	.dma_boundary		= AHCI_DMA_BOUNDARY,
405 	.shost_attrs		= ahci_shost_attrs,
406 	.sdev_attrs		= ahci_sdev_attrs,
407 };
408 
409 static struct ata_port_operations ahci_ops = {
410 	.inherits		= &sata_pmp_port_ops,
411 
412 	.qc_defer		= ahci_pmp_qc_defer,
413 	.qc_prep		= ahci_qc_prep,
414 	.qc_issue		= ahci_qc_issue,
415 	.qc_fill_rtf		= ahci_qc_fill_rtf,
416 
417 	.freeze			= ahci_freeze,
418 	.thaw			= ahci_thaw,
419 	.softreset		= ahci_softreset,
420 	.hardreset		= ahci_hardreset,
421 	.postreset		= ahci_postreset,
422 	.pmp_softreset		= ahci_softreset,
423 	.error_handler		= ahci_error_handler,
424 	.post_internal_cmd	= ahci_post_internal_cmd,
425 	.dev_config		= ahci_dev_config,
426 
427 	.scr_read		= ahci_scr_read,
428 	.scr_write		= ahci_scr_write,
429 	.pmp_attach		= ahci_pmp_attach,
430 	.pmp_detach		= ahci_pmp_detach,
431 
432 	.enable_pm		= ahci_enable_alpm,
433 	.disable_pm		= ahci_disable_alpm,
434 	.em_show		= ahci_led_show,
435 	.em_store		= ahci_led_store,
436 	.sw_activity_show	= ahci_activity_show,
437 	.sw_activity_store	= ahci_activity_store,
438 #ifdef CONFIG_PM
439 	.port_suspend		= ahci_port_suspend,
440 	.port_resume		= ahci_port_resume,
441 #endif
442 	.port_start		= ahci_port_start,
443 	.port_stop		= ahci_port_stop,
444 };
445 
446 static struct ata_port_operations ahci_vt8251_ops = {
447 	.inherits		= &ahci_ops,
448 	.hardreset		= ahci_vt8251_hardreset,
449 };
450 
451 static struct ata_port_operations ahci_p5wdh_ops = {
452 	.inherits		= &ahci_ops,
453 	.hardreset		= ahci_p5wdh_hardreset,
454 };
455 
456 static struct ata_port_operations ahci_sb600_ops = {
457 	.inherits		= &ahci_ops,
458 	.softreset		= ahci_sb600_softreset,
459 	.pmp_softreset		= ahci_sb600_softreset,
460 };
461 
462 #define AHCI_HFLAGS(flags)	.private_data	= (void *)(flags)
463 
464 static const struct ata_port_info ahci_port_info[] = {
465 	[board_ahci] =
466 	{
467 		.flags		= AHCI_FLAG_COMMON,
468 		.pio_mask	= ATA_PIO4,
469 		.udma_mask	= ATA_UDMA6,
470 		.port_ops	= &ahci_ops,
471 	},
472 	[board_ahci_vt8251] =
473 	{
474 		AHCI_HFLAGS	(AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_PMP),
475 		.flags		= AHCI_FLAG_COMMON,
476 		.pio_mask	= ATA_PIO4,
477 		.udma_mask	= ATA_UDMA6,
478 		.port_ops	= &ahci_vt8251_ops,
479 	},
480 	[board_ahci_ign_iferr] =
481 	{
482 		AHCI_HFLAGS	(AHCI_HFLAG_IGN_IRQ_IF_ERR),
483 		.flags		= AHCI_FLAG_COMMON,
484 		.pio_mask	= ATA_PIO4,
485 		.udma_mask	= ATA_UDMA6,
486 		.port_ops	= &ahci_ops,
487 	},
488 	[board_ahci_sb600] =
489 	{
490 		AHCI_HFLAGS	(AHCI_HFLAG_IGN_SERR_INTERNAL |
491 				 AHCI_HFLAG_NO_MSI | AHCI_HFLAG_SECT255 |
492 				 AHCI_HFLAG_32BIT_ONLY),
493 		.flags		= AHCI_FLAG_COMMON,
494 		.pio_mask	= ATA_PIO4,
495 		.udma_mask	= ATA_UDMA6,
496 		.port_ops	= &ahci_sb600_ops,
497 	},
498 	[board_ahci_mv] =
499 	{
500 		AHCI_HFLAGS	(AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_MSI |
501 				 AHCI_HFLAG_MV_PATA | AHCI_HFLAG_NO_PMP),
502 		.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
503 				  ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA,
504 		.pio_mask	= ATA_PIO4,
505 		.udma_mask	= ATA_UDMA6,
506 		.port_ops	= &ahci_ops,
507 	},
508 	[board_ahci_sb700] =	/* for SB700 and SB800 */
509 	{
510 		AHCI_HFLAGS	(AHCI_HFLAG_IGN_SERR_INTERNAL),
511 		.flags		= AHCI_FLAG_COMMON,
512 		.pio_mask	= ATA_PIO4,
513 		.udma_mask	= ATA_UDMA6,
514 		.port_ops	= &ahci_sb600_ops,
515 	},
516 	[board_ahci_mcp65] =
517 	{
518 		AHCI_HFLAGS	(AHCI_HFLAG_YES_NCQ),
519 		.flags		= AHCI_FLAG_COMMON,
520 		.pio_mask	= ATA_PIO4,
521 		.udma_mask	= ATA_UDMA6,
522 		.port_ops	= &ahci_ops,
523 	},
524 	[board_ahci_nopmp] =
525 	{
526 		AHCI_HFLAGS	(AHCI_HFLAG_NO_PMP),
527 		.flags		= AHCI_FLAG_COMMON,
528 		.pio_mask	= ATA_PIO4,
529 		.udma_mask	= ATA_UDMA6,
530 		.port_ops	= &ahci_ops,
531 	},
532 	[board_ahci_yesncq] =
533 	{
534 		AHCI_HFLAGS	(AHCI_HFLAG_YES_NCQ),
535 		.flags		= AHCI_FLAG_COMMON,
536 		.pio_mask	= ATA_PIO4,
537 		.udma_mask	= ATA_UDMA6,
538 		.port_ops	= &ahci_ops,
539 	},
540 	[board_ahci_nosntf] =
541 	{
542 		AHCI_HFLAGS	(AHCI_HFLAG_NO_SNTF),
543 		.flags		= AHCI_FLAG_COMMON,
544 		.pio_mask	= ATA_PIO4,
545 		.udma_mask	= ATA_UDMA6,
546 		.port_ops	= &ahci_ops,
547 	},
548 };
549 
550 static const struct pci_device_id ahci_pci_tbl[] = {
551 	/* Intel */
552 	{ PCI_VDEVICE(INTEL, 0x2652), board_ahci }, /* ICH6 */
553 	{ PCI_VDEVICE(INTEL, 0x2653), board_ahci }, /* ICH6M */
554 	{ PCI_VDEVICE(INTEL, 0x27c1), board_ahci }, /* ICH7 */
555 	{ PCI_VDEVICE(INTEL, 0x27c5), board_ahci }, /* ICH7M */
556 	{ PCI_VDEVICE(INTEL, 0x27c3), board_ahci }, /* ICH7R */
557 	{ PCI_VDEVICE(AL, 0x5288), board_ahci_ign_iferr }, /* ULi M5288 */
558 	{ PCI_VDEVICE(INTEL, 0x2681), board_ahci }, /* ESB2 */
559 	{ PCI_VDEVICE(INTEL, 0x2682), board_ahci }, /* ESB2 */
560 	{ PCI_VDEVICE(INTEL, 0x2683), board_ahci }, /* ESB2 */
561 	{ PCI_VDEVICE(INTEL, 0x27c6), board_ahci }, /* ICH7-M DH */
562 	{ PCI_VDEVICE(INTEL, 0x2821), board_ahci }, /* ICH8 */
563 	{ PCI_VDEVICE(INTEL, 0x2822), board_ahci_nosntf }, /* ICH8 */
564 	{ PCI_VDEVICE(INTEL, 0x2824), board_ahci }, /* ICH8 */
565 	{ PCI_VDEVICE(INTEL, 0x2829), board_ahci }, /* ICH8M */
566 	{ PCI_VDEVICE(INTEL, 0x282a), board_ahci }, /* ICH8M */
567 	{ PCI_VDEVICE(INTEL, 0x2922), board_ahci }, /* ICH9 */
568 	{ PCI_VDEVICE(INTEL, 0x2923), board_ahci }, /* ICH9 */
569 	{ PCI_VDEVICE(INTEL, 0x2924), board_ahci }, /* ICH9 */
570 	{ PCI_VDEVICE(INTEL, 0x2925), board_ahci }, /* ICH9 */
571 	{ PCI_VDEVICE(INTEL, 0x2927), board_ahci }, /* ICH9 */
572 	{ PCI_VDEVICE(INTEL, 0x2929), board_ahci }, /* ICH9M */
573 	{ PCI_VDEVICE(INTEL, 0x292a), board_ahci }, /* ICH9M */
574 	{ PCI_VDEVICE(INTEL, 0x292b), board_ahci }, /* ICH9M */
575 	{ PCI_VDEVICE(INTEL, 0x292c), board_ahci }, /* ICH9M */
576 	{ PCI_VDEVICE(INTEL, 0x292f), board_ahci }, /* ICH9M */
577 	{ PCI_VDEVICE(INTEL, 0x294d), board_ahci }, /* ICH9 */
578 	{ PCI_VDEVICE(INTEL, 0x294e), board_ahci }, /* ICH9M */
579 	{ PCI_VDEVICE(INTEL, 0x502a), board_ahci }, /* Tolapai */
580 	{ PCI_VDEVICE(INTEL, 0x502b), board_ahci }, /* Tolapai */
581 	{ PCI_VDEVICE(INTEL, 0x3a05), board_ahci }, /* ICH10 */
582 	{ PCI_VDEVICE(INTEL, 0x3a22), board_ahci }, /* ICH10 */
583 	{ PCI_VDEVICE(INTEL, 0x3a25), board_ahci }, /* ICH10 */
584 	{ PCI_VDEVICE(INTEL, 0x3b22), board_ahci }, /* PCH AHCI */
585 	{ PCI_VDEVICE(INTEL, 0x3b23), board_ahci }, /* PCH AHCI */
586 	{ PCI_VDEVICE(INTEL, 0x3b24), board_ahci }, /* PCH RAID */
587 	{ PCI_VDEVICE(INTEL, 0x3b25), board_ahci }, /* PCH RAID */
588 	{ PCI_VDEVICE(INTEL, 0x3b29), board_ahci }, /* PCH AHCI */
589 	{ PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */
590 	{ PCI_VDEVICE(INTEL, 0x3b2c), board_ahci }, /* PCH RAID */
591 	{ PCI_VDEVICE(INTEL, 0x3b2f), board_ahci }, /* PCH AHCI */
592 	{ PCI_VDEVICE(INTEL, 0x1c02), board_ahci }, /* CPT AHCI */
593 	{ PCI_VDEVICE(INTEL, 0x1c03), board_ahci }, /* CPT AHCI */
594 	{ PCI_VDEVICE(INTEL, 0x1c04), board_ahci }, /* CPT RAID */
595 	{ PCI_VDEVICE(INTEL, 0x1c05), board_ahci }, /* CPT RAID */
596 	{ PCI_VDEVICE(INTEL, 0x1c06), board_ahci }, /* CPT RAID */
597 	{ PCI_VDEVICE(INTEL, 0x1c07), board_ahci }, /* CPT RAID */
598 
599 	/* JMicron 360/1/3/5/6, match class to avoid IDE function */
600 	{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
601 	  PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci_ign_iferr },
602 
603 	/* ATI */
604 	{ PCI_VDEVICE(ATI, 0x4380), board_ahci_sb600 }, /* ATI SB600 */
605 	{ PCI_VDEVICE(ATI, 0x4390), board_ahci_sb700 }, /* ATI SB700/800 */
606 	{ PCI_VDEVICE(ATI, 0x4391), board_ahci_sb700 }, /* ATI SB700/800 */
607 	{ PCI_VDEVICE(ATI, 0x4392), board_ahci_sb700 }, /* ATI SB700/800 */
608 	{ PCI_VDEVICE(ATI, 0x4393), board_ahci_sb700 }, /* ATI SB700/800 */
609 	{ PCI_VDEVICE(ATI, 0x4394), board_ahci_sb700 }, /* ATI SB700/800 */
610 	{ PCI_VDEVICE(ATI, 0x4395), board_ahci_sb700 }, /* ATI SB700/800 */
611 
612 	/* AMD */
613 	{ PCI_VDEVICE(AMD, 0x7800), board_ahci }, /* AMD Hudson-2 */
614 	/* AMD is using RAID class only for ahci controllers */
615 	{ PCI_VENDOR_ID_AMD, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
616 	  PCI_CLASS_STORAGE_RAID << 8, 0xffffff, board_ahci },
617 
618 	/* VIA */
619 	{ PCI_VDEVICE(VIA, 0x3349), board_ahci_vt8251 }, /* VIA VT8251 */
620 	{ PCI_VDEVICE(VIA, 0x6287), board_ahci_vt8251 }, /* VIA VT8251 */
621 
622 	/* NVIDIA */
623 	{ PCI_VDEVICE(NVIDIA, 0x044c), board_ahci_mcp65 },	/* MCP65 */
624 	{ PCI_VDEVICE(NVIDIA, 0x044d), board_ahci_mcp65 },	/* MCP65 */
625 	{ PCI_VDEVICE(NVIDIA, 0x044e), board_ahci_mcp65 },	/* MCP65 */
626 	{ PCI_VDEVICE(NVIDIA, 0x044f), board_ahci_mcp65 },	/* MCP65 */
627 	{ PCI_VDEVICE(NVIDIA, 0x045c), board_ahci_mcp65 },	/* MCP65 */
628 	{ PCI_VDEVICE(NVIDIA, 0x045d), board_ahci_mcp65 },	/* MCP65 */
629 	{ PCI_VDEVICE(NVIDIA, 0x045e), board_ahci_mcp65 },	/* MCP65 */
630 	{ PCI_VDEVICE(NVIDIA, 0x045f), board_ahci_mcp65 },	/* MCP65 */
631 	{ PCI_VDEVICE(NVIDIA, 0x0550), board_ahci_yesncq },	/* MCP67 */
632 	{ PCI_VDEVICE(NVIDIA, 0x0551), board_ahci_yesncq },	/* MCP67 */
633 	{ PCI_VDEVICE(NVIDIA, 0x0552), board_ahci_yesncq },	/* MCP67 */
634 	{ PCI_VDEVICE(NVIDIA, 0x0553), board_ahci_yesncq },	/* MCP67 */
635 	{ PCI_VDEVICE(NVIDIA, 0x0554), board_ahci_yesncq },	/* MCP67 */
636 	{ PCI_VDEVICE(NVIDIA, 0x0555), board_ahci_yesncq },	/* MCP67 */
637 	{ PCI_VDEVICE(NVIDIA, 0x0556), board_ahci_yesncq },	/* MCP67 */
638 	{ PCI_VDEVICE(NVIDIA, 0x0557), board_ahci_yesncq },	/* MCP67 */
639 	{ PCI_VDEVICE(NVIDIA, 0x0558), board_ahci_yesncq },	/* MCP67 */
640 	{ PCI_VDEVICE(NVIDIA, 0x0559), board_ahci_yesncq },	/* MCP67 */
641 	{ PCI_VDEVICE(NVIDIA, 0x055a), board_ahci_yesncq },	/* MCP67 */
642 	{ PCI_VDEVICE(NVIDIA, 0x055b), board_ahci_yesncq },	/* MCP67 */
643 	{ PCI_VDEVICE(NVIDIA, 0x0580), board_ahci_yesncq },	/* Linux ID */
644 	{ PCI_VDEVICE(NVIDIA, 0x07f0), board_ahci_yesncq },	/* MCP73 */
645 	{ PCI_VDEVICE(NVIDIA, 0x07f1), board_ahci_yesncq },	/* MCP73 */
646 	{ PCI_VDEVICE(NVIDIA, 0x07f2), board_ahci_yesncq },	/* MCP73 */
647 	{ PCI_VDEVICE(NVIDIA, 0x07f3), board_ahci_yesncq },	/* MCP73 */
648 	{ PCI_VDEVICE(NVIDIA, 0x07f4), board_ahci_yesncq },	/* MCP73 */
649 	{ PCI_VDEVICE(NVIDIA, 0x07f5), board_ahci_yesncq },	/* MCP73 */
650 	{ PCI_VDEVICE(NVIDIA, 0x07f6), board_ahci_yesncq },	/* MCP73 */
651 	{ PCI_VDEVICE(NVIDIA, 0x07f7), board_ahci_yesncq },	/* MCP73 */
652 	{ PCI_VDEVICE(NVIDIA, 0x07f8), board_ahci_yesncq },	/* MCP73 */
653 	{ PCI_VDEVICE(NVIDIA, 0x07f9), board_ahci_yesncq },	/* MCP73 */
654 	{ PCI_VDEVICE(NVIDIA, 0x07fa), board_ahci_yesncq },	/* MCP73 */
655 	{ PCI_VDEVICE(NVIDIA, 0x07fb), board_ahci_yesncq },	/* MCP73 */
656 	{ PCI_VDEVICE(NVIDIA, 0x0ad0), board_ahci },		/* MCP77 */
657 	{ PCI_VDEVICE(NVIDIA, 0x0ad1), board_ahci },		/* MCP77 */
658 	{ PCI_VDEVICE(NVIDIA, 0x0ad2), board_ahci },		/* MCP77 */
659 	{ PCI_VDEVICE(NVIDIA, 0x0ad3), board_ahci },		/* MCP77 */
660 	{ PCI_VDEVICE(NVIDIA, 0x0ad4), board_ahci },		/* MCP77 */
661 	{ PCI_VDEVICE(NVIDIA, 0x0ad5), board_ahci },		/* MCP77 */
662 	{ PCI_VDEVICE(NVIDIA, 0x0ad6), board_ahci },		/* MCP77 */
663 	{ PCI_VDEVICE(NVIDIA, 0x0ad7), board_ahci },		/* MCP77 */
664 	{ PCI_VDEVICE(NVIDIA, 0x0ad8), board_ahci },		/* MCP77 */
665 	{ PCI_VDEVICE(NVIDIA, 0x0ad9), board_ahci },		/* MCP77 */
666 	{ PCI_VDEVICE(NVIDIA, 0x0ada), board_ahci },		/* MCP77 */
667 	{ PCI_VDEVICE(NVIDIA, 0x0adb), board_ahci },		/* MCP77 */
668 	{ PCI_VDEVICE(NVIDIA, 0x0ab4), board_ahci },		/* MCP79 */
669 	{ PCI_VDEVICE(NVIDIA, 0x0ab5), board_ahci },		/* MCP79 */
670 	{ PCI_VDEVICE(NVIDIA, 0x0ab6), board_ahci },		/* MCP79 */
671 	{ PCI_VDEVICE(NVIDIA, 0x0ab7), board_ahci },		/* MCP79 */
672 	{ PCI_VDEVICE(NVIDIA, 0x0ab8), board_ahci },		/* MCP79 */
673 	{ PCI_VDEVICE(NVIDIA, 0x0ab9), board_ahci },		/* MCP79 */
674 	{ PCI_VDEVICE(NVIDIA, 0x0aba), board_ahci },		/* MCP79 */
675 	{ PCI_VDEVICE(NVIDIA, 0x0abb), board_ahci },		/* MCP79 */
676 	{ PCI_VDEVICE(NVIDIA, 0x0abc), board_ahci },		/* MCP79 */
677 	{ PCI_VDEVICE(NVIDIA, 0x0abd), board_ahci },		/* MCP79 */
678 	{ PCI_VDEVICE(NVIDIA, 0x0abe), board_ahci },		/* MCP79 */
679 	{ PCI_VDEVICE(NVIDIA, 0x0abf), board_ahci },		/* MCP79 */
680 	{ PCI_VDEVICE(NVIDIA, 0x0d84), board_ahci },		/* MCP89 */
681 	{ PCI_VDEVICE(NVIDIA, 0x0d85), board_ahci },		/* MCP89 */
682 	{ PCI_VDEVICE(NVIDIA, 0x0d86), board_ahci },		/* MCP89 */
683 	{ PCI_VDEVICE(NVIDIA, 0x0d87), board_ahci },		/* MCP89 */
684 	{ PCI_VDEVICE(NVIDIA, 0x0d88), board_ahci },		/* MCP89 */
685 	{ PCI_VDEVICE(NVIDIA, 0x0d89), board_ahci },		/* MCP89 */
686 	{ PCI_VDEVICE(NVIDIA, 0x0d8a), board_ahci },		/* MCP89 */
687 	{ PCI_VDEVICE(NVIDIA, 0x0d8b), board_ahci },		/* MCP89 */
688 	{ PCI_VDEVICE(NVIDIA, 0x0d8c), board_ahci },		/* MCP89 */
689 	{ PCI_VDEVICE(NVIDIA, 0x0d8d), board_ahci },		/* MCP89 */
690 	{ PCI_VDEVICE(NVIDIA, 0x0d8e), board_ahci },		/* MCP89 */
691 	{ PCI_VDEVICE(NVIDIA, 0x0d8f), board_ahci },		/* MCP89 */
692 
693 	/* SiS */
694 	{ PCI_VDEVICE(SI, 0x1184), board_ahci },		/* SiS 966 */
695 	{ PCI_VDEVICE(SI, 0x1185), board_ahci },		/* SiS 968 */
696 	{ PCI_VDEVICE(SI, 0x0186), board_ahci },		/* SiS 968 */
697 
698 	/* Marvell */
699 	{ PCI_VDEVICE(MARVELL, 0x6145), board_ahci_mv },	/* 6145 */
700 	{ PCI_VDEVICE(MARVELL, 0x6121), board_ahci_mv },	/* 6121 */
701 
702 	/* Promise */
703 	{ PCI_VDEVICE(PROMISE, 0x3f20), board_ahci },	/* PDC42819 */
704 
705 	/* Generic, PCI class code for AHCI */
706 	{ PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
707 	  PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci },
708 
709 	{ }	/* terminate list */
710 };
711 
712 
713 static struct pci_driver ahci_pci_driver = {
714 	.name			= DRV_NAME,
715 	.id_table		= ahci_pci_tbl,
716 	.probe			= ahci_init_one,
717 	.remove			= ata_pci_remove_one,
718 #ifdef CONFIG_PM
719 	.suspend		= ahci_pci_device_suspend,
720 	.resume			= ahci_pci_device_resume,
721 #endif
722 };
723 
724 static int ahci_em_messages = 1;
725 module_param(ahci_em_messages, int, 0444);
726 /* add other LED protocol types when they become supported */
727 MODULE_PARM_DESC(ahci_em_messages,
728 	"Set AHCI Enclosure Management Message type (0 = disabled, 1 = LED");
729 
730 #if defined(CONFIG_PATA_MARVELL) || defined(CONFIG_PATA_MARVELL_MODULE)
731 static int marvell_enable;
732 #else
733 static int marvell_enable = 1;
734 #endif
735 module_param(marvell_enable, int, 0644);
736 MODULE_PARM_DESC(marvell_enable, "Marvell SATA via AHCI (1 = enabled)");
737 
738 
739 static inline int ahci_nr_ports(u32 cap)
740 {
741 	return (cap & 0x1f) + 1;
742 }
743 
744 static inline void __iomem *__ahci_port_base(struct ata_host *host,
745 					     unsigned int port_no)
746 {
747 	void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
748 
749 	return mmio + 0x100 + (port_no * 0x80);
750 }
751 
752 static inline void __iomem *ahci_port_base(struct ata_port *ap)
753 {
754 	return __ahci_port_base(ap->host, ap->port_no);
755 }
756 
757 static void ahci_enable_ahci(void __iomem *mmio)
758 {
759 	int i;
760 	u32 tmp;
761 
762 	/* turn on AHCI_EN */
763 	tmp = readl(mmio + HOST_CTL);
764 	if (tmp & HOST_AHCI_EN)
765 		return;
766 
767 	/* Some controllers need AHCI_EN to be written multiple times.
768 	 * Try a few times before giving up.
769 	 */
770 	for (i = 0; i < 5; i++) {
771 		tmp |= HOST_AHCI_EN;
772 		writel(tmp, mmio + HOST_CTL);
773 		tmp = readl(mmio + HOST_CTL);	/* flush && sanity check */
774 		if (tmp & HOST_AHCI_EN)
775 			return;
776 		msleep(10);
777 	}
778 
779 	WARN_ON(1);
780 }
781 
782 static ssize_t ahci_show_host_caps(struct device *dev,
783 				   struct device_attribute *attr, char *buf)
784 {
785 	struct Scsi_Host *shost = class_to_shost(dev);
786 	struct ata_port *ap = ata_shost_to_port(shost);
787 	struct ahci_host_priv *hpriv = ap->host->private_data;
788 
789 	return sprintf(buf, "%x\n", hpriv->cap);
790 }
791 
792 static ssize_t ahci_show_host_cap2(struct device *dev,
793 				   struct device_attribute *attr, char *buf)
794 {
795 	struct Scsi_Host *shost = class_to_shost(dev);
796 	struct ata_port *ap = ata_shost_to_port(shost);
797 	struct ahci_host_priv *hpriv = ap->host->private_data;
798 
799 	return sprintf(buf, "%x\n", hpriv->cap2);
800 }
801 
802 static ssize_t ahci_show_host_version(struct device *dev,
803 				   struct device_attribute *attr, char *buf)
804 {
805 	struct Scsi_Host *shost = class_to_shost(dev);
806 	struct ata_port *ap = ata_shost_to_port(shost);
807 	void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
808 
809 	return sprintf(buf, "%x\n", readl(mmio + HOST_VERSION));
810 }
811 
812 static ssize_t ahci_show_port_cmd(struct device *dev,
813 				  struct device_attribute *attr, char *buf)
814 {
815 	struct Scsi_Host *shost = class_to_shost(dev);
816 	struct ata_port *ap = ata_shost_to_port(shost);
817 	void __iomem *port_mmio = ahci_port_base(ap);
818 
819 	return sprintf(buf, "%x\n", readl(port_mmio + PORT_CMD));
820 }
821 
822 /**
823  *	ahci_save_initial_config - Save and fixup initial config values
824  *	@pdev: target PCI device
825  *	@hpriv: host private area to store config values
826  *
827  *	Some registers containing configuration info might be setup by
828  *	BIOS and might be cleared on reset.  This function saves the
829  *	initial values of those registers into @hpriv such that they
830  *	can be restored after controller reset.
831  *
832  *	If inconsistent, config values are fixed up by this function.
833  *
834  *	LOCKING:
835  *	None.
836  */
837 static void ahci_save_initial_config(struct pci_dev *pdev,
838 				     struct ahci_host_priv *hpriv)
839 {
840 	void __iomem *mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR];
841 	u32 cap, cap2, vers, port_map;
842 	int i;
843 	int mv;
844 
845 	/* make sure AHCI mode is enabled before accessing CAP */
846 	ahci_enable_ahci(mmio);
847 
848 	/* Values prefixed with saved_ are written back to host after
849 	 * reset.  Values without are used for driver operation.
850 	 */
851 	hpriv->saved_cap = cap = readl(mmio + HOST_CAP);
852 	hpriv->saved_port_map = port_map = readl(mmio + HOST_PORTS_IMPL);
853 
854 	/* CAP2 register is only defined for AHCI 1.2 and later */
855 	vers = readl(mmio + HOST_VERSION);
856 	if ((vers >> 16) > 1 ||
857 	   ((vers >> 16) == 1 && (vers & 0xFFFF) >= 0x200))
858 		hpriv->saved_cap2 = cap2 = readl(mmio + HOST_CAP2);
859 	else
860 		hpriv->saved_cap2 = cap2 = 0;
861 
862 	/* some chips have errata preventing 64bit use */
863 	if ((cap & HOST_CAP_64) && (hpriv->flags & AHCI_HFLAG_32BIT_ONLY)) {
864 		dev_printk(KERN_INFO, &pdev->dev,
865 			   "controller can't do 64bit DMA, forcing 32bit\n");
866 		cap &= ~HOST_CAP_64;
867 	}
868 
869 	if ((cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_NO_NCQ)) {
870 		dev_printk(KERN_INFO, &pdev->dev,
871 			   "controller can't do NCQ, turning off CAP_NCQ\n");
872 		cap &= ~HOST_CAP_NCQ;
873 	}
874 
875 	if (!(cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_YES_NCQ)) {
876 		dev_printk(KERN_INFO, &pdev->dev,
877 			   "controller can do NCQ, turning on CAP_NCQ\n");
878 		cap |= HOST_CAP_NCQ;
879 	}
880 
881 	if ((cap & HOST_CAP_PMP) && (hpriv->flags & AHCI_HFLAG_NO_PMP)) {
882 		dev_printk(KERN_INFO, &pdev->dev,
883 			   "controller can't do PMP, turning off CAP_PMP\n");
884 		cap &= ~HOST_CAP_PMP;
885 	}
886 
887 	if ((cap & HOST_CAP_SNTF) && (hpriv->flags & AHCI_HFLAG_NO_SNTF)) {
888 		dev_printk(KERN_INFO, &pdev->dev,
889 			   "controller can't do SNTF, turning off CAP_SNTF\n");
890 		cap &= ~HOST_CAP_SNTF;
891 	}
892 
893 	if (pdev->vendor == PCI_VENDOR_ID_JMICRON && pdev->device == 0x2361 &&
894 	    port_map != 1) {
895 		dev_printk(KERN_INFO, &pdev->dev,
896 			   "JMB361 has only one port, port_map 0x%x -> 0x%x\n",
897 			   port_map, 1);
898 		port_map = 1;
899 	}
900 
901 	/*
902 	 * Temporary Marvell 6145 hack: PATA port presence
903 	 * is asserted through the standard AHCI port
904 	 * presence register, as bit 4 (counting from 0)
905 	 */
906 	if (hpriv->flags & AHCI_HFLAG_MV_PATA) {
907 		if (pdev->device == 0x6121)
908 			mv = 0x3;
909 		else
910 			mv = 0xf;
911 		dev_printk(KERN_ERR, &pdev->dev,
912 			   "MV_AHCI HACK: port_map %x -> %x\n",
913 			   port_map,
914 			   port_map & mv);
915 		dev_printk(KERN_ERR, &pdev->dev,
916 			  "Disabling your PATA port. Use the boot option 'ahci.marvell_enable=0' to avoid this.\n");
917 
918 		port_map &= mv;
919 	}
920 
921 	/* cross check port_map and cap.n_ports */
922 	if (port_map) {
923 		int map_ports = 0;
924 
925 		for (i = 0; i < AHCI_MAX_PORTS; i++)
926 			if (port_map & (1 << i))
927 				map_ports++;
928 
929 		/* If PI has more ports than n_ports, whine, clear
930 		 * port_map and let it be generated from n_ports.
931 		 */
932 		if (map_ports > ahci_nr_ports(cap)) {
933 			dev_printk(KERN_WARNING, &pdev->dev,
934 				   "implemented port map (0x%x) contains more "
935 				   "ports than nr_ports (%u), using nr_ports\n",
936 				   port_map, ahci_nr_ports(cap));
937 			port_map = 0;
938 		}
939 	}
940 
941 	/* fabricate port_map from cap.nr_ports */
942 	if (!port_map) {
943 		port_map = (1 << ahci_nr_ports(cap)) - 1;
944 		dev_printk(KERN_WARNING, &pdev->dev,
945 			   "forcing PORTS_IMPL to 0x%x\n", port_map);
946 
947 		/* write the fixed up value to the PI register */
948 		hpriv->saved_port_map = port_map;
949 	}
950 
951 	/* record values to use during operation */
952 	hpriv->cap = cap;
953 	hpriv->cap2 = cap2;
954 	hpriv->port_map = port_map;
955 }
956 
957 /**
958  *	ahci_restore_initial_config - Restore initial config
959  *	@host: target ATA host
960  *
961  *	Restore initial config stored by ahci_save_initial_config().
962  *
963  *	LOCKING:
964  *	None.
965  */
966 static void ahci_restore_initial_config(struct ata_host *host)
967 {
968 	struct ahci_host_priv *hpriv = host->private_data;
969 	void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
970 
971 	writel(hpriv->saved_cap, mmio + HOST_CAP);
972 	if (hpriv->saved_cap2)
973 		writel(hpriv->saved_cap2, mmio + HOST_CAP2);
974 	writel(hpriv->saved_port_map, mmio + HOST_PORTS_IMPL);
975 	(void) readl(mmio + HOST_PORTS_IMPL);	/* flush */
976 }
977 
978 static unsigned ahci_scr_offset(struct ata_port *ap, unsigned int sc_reg)
979 {
980 	static const int offset[] = {
981 		[SCR_STATUS]		= PORT_SCR_STAT,
982 		[SCR_CONTROL]		= PORT_SCR_CTL,
983 		[SCR_ERROR]		= PORT_SCR_ERR,
984 		[SCR_ACTIVE]		= PORT_SCR_ACT,
985 		[SCR_NOTIFICATION]	= PORT_SCR_NTF,
986 	};
987 	struct ahci_host_priv *hpriv = ap->host->private_data;
988 
989 	if (sc_reg < ARRAY_SIZE(offset) &&
990 	    (sc_reg != SCR_NOTIFICATION || (hpriv->cap & HOST_CAP_SNTF)))
991 		return offset[sc_reg];
992 	return 0;
993 }
994 
995 static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
996 {
997 	void __iomem *port_mmio = ahci_port_base(link->ap);
998 	int offset = ahci_scr_offset(link->ap, sc_reg);
999 
1000 	if (offset) {
1001 		*val = readl(port_mmio + offset);
1002 		return 0;
1003 	}
1004 	return -EINVAL;
1005 }
1006 
1007 static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
1008 {
1009 	void __iomem *port_mmio = ahci_port_base(link->ap);
1010 	int offset = ahci_scr_offset(link->ap, sc_reg);
1011 
1012 	if (offset) {
1013 		writel(val, port_mmio + offset);
1014 		return 0;
1015 	}
1016 	return -EINVAL;
1017 }
1018 
1019 static void ahci_start_engine(struct ata_port *ap)
1020 {
1021 	void __iomem *port_mmio = ahci_port_base(ap);
1022 	u32 tmp;
1023 
1024 	/* start DMA */
1025 	tmp = readl(port_mmio + PORT_CMD);
1026 	tmp |= PORT_CMD_START;
1027 	writel(tmp, port_mmio + PORT_CMD);
1028 	readl(port_mmio + PORT_CMD); /* flush */
1029 }
1030 
1031 static int ahci_stop_engine(struct ata_port *ap)
1032 {
1033 	void __iomem *port_mmio = ahci_port_base(ap);
1034 	u32 tmp;
1035 
1036 	tmp = readl(port_mmio + PORT_CMD);
1037 
1038 	/* check if the HBA is idle */
1039 	if ((tmp & (PORT_CMD_START | PORT_CMD_LIST_ON)) == 0)
1040 		return 0;
1041 
1042 	/* setting HBA to idle */
1043 	tmp &= ~PORT_CMD_START;
1044 	writel(tmp, port_mmio + PORT_CMD);
1045 
1046 	/* wait for engine to stop. This could be as long as 500 msec */
1047 	tmp = ata_wait_register(port_mmio + PORT_CMD,
1048 				PORT_CMD_LIST_ON, PORT_CMD_LIST_ON, 1, 500);
1049 	if (tmp & PORT_CMD_LIST_ON)
1050 		return -EIO;
1051 
1052 	return 0;
1053 }
1054 
1055 static void ahci_start_fis_rx(struct ata_port *ap)
1056 {
1057 	void __iomem *port_mmio = ahci_port_base(ap);
1058 	struct ahci_host_priv *hpriv = ap->host->private_data;
1059 	struct ahci_port_priv *pp = ap->private_data;
1060 	u32 tmp;
1061 
1062 	/* set FIS registers */
1063 	if (hpriv->cap & HOST_CAP_64)
1064 		writel((pp->cmd_slot_dma >> 16) >> 16,
1065 		       port_mmio + PORT_LST_ADDR_HI);
1066 	writel(pp->cmd_slot_dma & 0xffffffff, port_mmio + PORT_LST_ADDR);
1067 
1068 	if (hpriv->cap & HOST_CAP_64)
1069 		writel((pp->rx_fis_dma >> 16) >> 16,
1070 		       port_mmio + PORT_FIS_ADDR_HI);
1071 	writel(pp->rx_fis_dma & 0xffffffff, port_mmio + PORT_FIS_ADDR);
1072 
1073 	/* enable FIS reception */
1074 	tmp = readl(port_mmio + PORT_CMD);
1075 	tmp |= PORT_CMD_FIS_RX;
1076 	writel(tmp, port_mmio + PORT_CMD);
1077 
1078 	/* flush */
1079 	readl(port_mmio + PORT_CMD);
1080 }
1081 
1082 static int ahci_stop_fis_rx(struct ata_port *ap)
1083 {
1084 	void __iomem *port_mmio = ahci_port_base(ap);
1085 	u32 tmp;
1086 
1087 	/* disable FIS reception */
1088 	tmp = readl(port_mmio + PORT_CMD);
1089 	tmp &= ~PORT_CMD_FIS_RX;
1090 	writel(tmp, port_mmio + PORT_CMD);
1091 
1092 	/* wait for completion, spec says 500ms, give it 1000 */
1093 	tmp = ata_wait_register(port_mmio + PORT_CMD, PORT_CMD_FIS_ON,
1094 				PORT_CMD_FIS_ON, 10, 1000);
1095 	if (tmp & PORT_CMD_FIS_ON)
1096 		return -EBUSY;
1097 
1098 	return 0;
1099 }
1100 
1101 static void ahci_power_up(struct ata_port *ap)
1102 {
1103 	struct ahci_host_priv *hpriv = ap->host->private_data;
1104 	void __iomem *port_mmio = ahci_port_base(ap);
1105 	u32 cmd;
1106 
1107 	cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
1108 
1109 	/* spin up device */
1110 	if (hpriv->cap & HOST_CAP_SSS) {
1111 		cmd |= PORT_CMD_SPIN_UP;
1112 		writel(cmd, port_mmio + PORT_CMD);
1113 	}
1114 
1115 	/* wake up link */
1116 	writel(cmd | PORT_CMD_ICC_ACTIVE, port_mmio + PORT_CMD);
1117 }
1118 
1119 static void ahci_disable_alpm(struct ata_port *ap)
1120 {
1121 	struct ahci_host_priv *hpriv = ap->host->private_data;
1122 	void __iomem *port_mmio = ahci_port_base(ap);
1123 	u32 cmd;
1124 	struct ahci_port_priv *pp = ap->private_data;
1125 
1126 	/* IPM bits should be disabled by libata-core */
1127 	/* get the existing command bits */
1128 	cmd = readl(port_mmio + PORT_CMD);
1129 
1130 	/* disable ALPM and ASP */
1131 	cmd &= ~PORT_CMD_ASP;
1132 	cmd &= ~PORT_CMD_ALPE;
1133 
1134 	/* force the interface back to active */
1135 	cmd |= PORT_CMD_ICC_ACTIVE;
1136 
1137 	/* write out new cmd value */
1138 	writel(cmd, port_mmio + PORT_CMD);
1139 	cmd = readl(port_mmio + PORT_CMD);
1140 
1141 	/* wait 10ms to be sure we've come out of any low power state */
1142 	msleep(10);
1143 
1144 	/* clear out any PhyRdy stuff from interrupt status */
1145 	writel(PORT_IRQ_PHYRDY, port_mmio + PORT_IRQ_STAT);
1146 
1147 	/* go ahead and clean out PhyRdy Change from Serror too */
1148 	ahci_scr_write(&ap->link, SCR_ERROR, ((1 << 16) | (1 << 18)));
1149 
1150 	/*
1151  	 * Clear flag to indicate that we should ignore all PhyRdy
1152  	 * state changes
1153  	 */
1154 	hpriv->flags &= ~AHCI_HFLAG_NO_HOTPLUG;
1155 
1156 	/*
1157  	 * Enable interrupts on Phy Ready.
1158  	 */
1159 	pp->intr_mask |= PORT_IRQ_PHYRDY;
1160 	writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
1161 
1162 	/*
1163  	 * don't change the link pm policy - we can be called
1164  	 * just to turn of link pm temporarily
1165  	 */
1166 }
1167 
1168 static int ahci_enable_alpm(struct ata_port *ap,
1169 	enum link_pm policy)
1170 {
1171 	struct ahci_host_priv *hpriv = ap->host->private_data;
1172 	void __iomem *port_mmio = ahci_port_base(ap);
1173 	u32 cmd;
1174 	struct ahci_port_priv *pp = ap->private_data;
1175 	u32 asp;
1176 
1177 	/* Make sure the host is capable of link power management */
1178 	if (!(hpriv->cap & HOST_CAP_ALPM))
1179 		return -EINVAL;
1180 
1181 	switch (policy) {
1182 	case MAX_PERFORMANCE:
1183 	case NOT_AVAILABLE:
1184 		/*
1185  		 * if we came here with NOT_AVAILABLE,
1186  		 * it just means this is the first time we
1187  		 * have tried to enable - default to max performance,
1188  		 * and let the user go to lower power modes on request.
1189  		 */
1190 		ahci_disable_alpm(ap);
1191 		return 0;
1192 	case MIN_POWER:
1193 		/* configure HBA to enter SLUMBER */
1194 		asp = PORT_CMD_ASP;
1195 		break;
1196 	case MEDIUM_POWER:
1197 		/* configure HBA to enter PARTIAL */
1198 		asp = 0;
1199 		break;
1200 	default:
1201 		return -EINVAL;
1202 	}
1203 
1204 	/*
1205  	 * Disable interrupts on Phy Ready. This keeps us from
1206  	 * getting woken up due to spurious phy ready interrupts
1207 	 * TBD - Hot plug should be done via polling now, is
1208 	 * that even supported?
1209  	 */
1210 	pp->intr_mask &= ~PORT_IRQ_PHYRDY;
1211 	writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
1212 
1213 	/*
1214  	 * Set a flag to indicate that we should ignore all PhyRdy
1215  	 * state changes since these can happen now whenever we
1216  	 * change link state
1217  	 */
1218 	hpriv->flags |= AHCI_HFLAG_NO_HOTPLUG;
1219 
1220 	/* get the existing command bits */
1221 	cmd = readl(port_mmio + PORT_CMD);
1222 
1223 	/*
1224  	 * Set ASP based on Policy
1225  	 */
1226 	cmd |= asp;
1227 
1228 	/*
1229  	 * Setting this bit will instruct the HBA to aggressively
1230  	 * enter a lower power link state when it's appropriate and
1231  	 * based on the value set above for ASP
1232  	 */
1233 	cmd |= PORT_CMD_ALPE;
1234 
1235 	/* write out new cmd value */
1236 	writel(cmd, port_mmio + PORT_CMD);
1237 	cmd = readl(port_mmio + PORT_CMD);
1238 
1239 	/* IPM bits should be set by libata-core */
1240 	return 0;
1241 }
1242 
1243 #ifdef CONFIG_PM
1244 static void ahci_power_down(struct ata_port *ap)
1245 {
1246 	struct ahci_host_priv *hpriv = ap->host->private_data;
1247 	void __iomem *port_mmio = ahci_port_base(ap);
1248 	u32 cmd, scontrol;
1249 
1250 	if (!(hpriv->cap & HOST_CAP_SSS))
1251 		return;
1252 
1253 	/* put device into listen mode, first set PxSCTL.DET to 0 */
1254 	scontrol = readl(port_mmio + PORT_SCR_CTL);
1255 	scontrol &= ~0xf;
1256 	writel(scontrol, port_mmio + PORT_SCR_CTL);
1257 
1258 	/* then set PxCMD.SUD to 0 */
1259 	cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
1260 	cmd &= ~PORT_CMD_SPIN_UP;
1261 	writel(cmd, port_mmio + PORT_CMD);
1262 }
1263 #endif
1264 
1265 static void ahci_start_port(struct ata_port *ap)
1266 {
1267 	struct ahci_port_priv *pp = ap->private_data;
1268 	struct ata_link *link;
1269 	struct ahci_em_priv *emp;
1270 	ssize_t rc;
1271 	int i;
1272 
1273 	/* enable FIS reception */
1274 	ahci_start_fis_rx(ap);
1275 
1276 	/* enable DMA */
1277 	ahci_start_engine(ap);
1278 
1279 	/* turn on LEDs */
1280 	if (ap->flags & ATA_FLAG_EM) {
1281 		ata_for_each_link(link, ap, EDGE) {
1282 			emp = &pp->em_priv[link->pmp];
1283 
1284 			/* EM Transmit bit maybe busy during init */
1285 			for (i = 0; i < EM_MAX_RETRY; i++) {
1286 				rc = ahci_transmit_led_message(ap,
1287 							       emp->led_state,
1288 							       4);
1289 				if (rc == -EBUSY)
1290 					msleep(1);
1291 				else
1292 					break;
1293 			}
1294 		}
1295 	}
1296 
1297 	if (ap->flags & ATA_FLAG_SW_ACTIVITY)
1298 		ata_for_each_link(link, ap, EDGE)
1299 			ahci_init_sw_activity(link);
1300 
1301 }
1302 
1303 static int ahci_deinit_port(struct ata_port *ap, const char **emsg)
1304 {
1305 	int rc;
1306 
1307 	/* disable DMA */
1308 	rc = ahci_stop_engine(ap);
1309 	if (rc) {
1310 		*emsg = "failed to stop engine";
1311 		return rc;
1312 	}
1313 
1314 	/* disable FIS reception */
1315 	rc = ahci_stop_fis_rx(ap);
1316 	if (rc) {
1317 		*emsg = "failed stop FIS RX";
1318 		return rc;
1319 	}
1320 
1321 	return 0;
1322 }
1323 
1324 static int ahci_reset_controller(struct ata_host *host)
1325 {
1326 	struct pci_dev *pdev = to_pci_dev(host->dev);
1327 	struct ahci_host_priv *hpriv = host->private_data;
1328 	void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
1329 	u32 tmp;
1330 
1331 	/* we must be in AHCI mode, before using anything
1332 	 * AHCI-specific, such as HOST_RESET.
1333 	 */
1334 	ahci_enable_ahci(mmio);
1335 
1336 	/* global controller reset */
1337 	if (!ahci_skip_host_reset) {
1338 		tmp = readl(mmio + HOST_CTL);
1339 		if ((tmp & HOST_RESET) == 0) {
1340 			writel(tmp | HOST_RESET, mmio + HOST_CTL);
1341 			readl(mmio + HOST_CTL); /* flush */
1342 		}
1343 
1344 		/*
1345 		 * to perform host reset, OS should set HOST_RESET
1346 		 * and poll until this bit is read to be "0".
1347 		 * reset must complete within 1 second, or
1348 		 * the hardware should be considered fried.
1349 		 */
1350 		tmp = ata_wait_register(mmio + HOST_CTL, HOST_RESET,
1351 					HOST_RESET, 10, 1000);
1352 
1353 		if (tmp & HOST_RESET) {
1354 			dev_printk(KERN_ERR, host->dev,
1355 				   "controller reset failed (0x%x)\n", tmp);
1356 			return -EIO;
1357 		}
1358 
1359 		/* turn on AHCI mode */
1360 		ahci_enable_ahci(mmio);
1361 
1362 		/* Some registers might be cleared on reset.  Restore
1363 		 * initial values.
1364 		 */
1365 		ahci_restore_initial_config(host);
1366 	} else
1367 		dev_printk(KERN_INFO, host->dev,
1368 			   "skipping global host reset\n");
1369 
1370 	if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
1371 		u16 tmp16;
1372 
1373 		/* configure PCS */
1374 		pci_read_config_word(pdev, 0x92, &tmp16);
1375 		if ((tmp16 & hpriv->port_map) != hpriv->port_map) {
1376 			tmp16 |= hpriv->port_map;
1377 			pci_write_config_word(pdev, 0x92, tmp16);
1378 		}
1379 	}
1380 
1381 	return 0;
1382 }
1383 
1384 static void ahci_sw_activity(struct ata_link *link)
1385 {
1386 	struct ata_port *ap = link->ap;
1387 	struct ahci_port_priv *pp = ap->private_data;
1388 	struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1389 
1390 	if (!(link->flags & ATA_LFLAG_SW_ACTIVITY))
1391 		return;
1392 
1393 	emp->activity++;
1394 	if (!timer_pending(&emp->timer))
1395 		mod_timer(&emp->timer, jiffies + msecs_to_jiffies(10));
1396 }
1397 
1398 static void ahci_sw_activity_blink(unsigned long arg)
1399 {
1400 	struct ata_link *link = (struct ata_link *)arg;
1401 	struct ata_port *ap = link->ap;
1402 	struct ahci_port_priv *pp = ap->private_data;
1403 	struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1404 	unsigned long led_message = emp->led_state;
1405 	u32 activity_led_state;
1406 	unsigned long flags;
1407 
1408 	led_message &= EM_MSG_LED_VALUE;
1409 	led_message |= ap->port_no | (link->pmp << 8);
1410 
1411 	/* check to see if we've had activity.  If so,
1412 	 * toggle state of LED and reset timer.  If not,
1413 	 * turn LED to desired idle state.
1414 	 */
1415 	spin_lock_irqsave(ap->lock, flags);
1416 	if (emp->saved_activity != emp->activity) {
1417 		emp->saved_activity = emp->activity;
1418 		/* get the current LED state */
1419 		activity_led_state = led_message & EM_MSG_LED_VALUE_ON;
1420 
1421 		if (activity_led_state)
1422 			activity_led_state = 0;
1423 		else
1424 			activity_led_state = 1;
1425 
1426 		/* clear old state */
1427 		led_message &= ~EM_MSG_LED_VALUE_ACTIVITY;
1428 
1429 		/* toggle state */
1430 		led_message |= (activity_led_state << 16);
1431 		mod_timer(&emp->timer, jiffies + msecs_to_jiffies(100));
1432 	} else {
1433 		/* switch to idle */
1434 		led_message &= ~EM_MSG_LED_VALUE_ACTIVITY;
1435 		if (emp->blink_policy == BLINK_OFF)
1436 			led_message |= (1 << 16);
1437 	}
1438 	spin_unlock_irqrestore(ap->lock, flags);
1439 	ahci_transmit_led_message(ap, led_message, 4);
1440 }
1441 
1442 static void ahci_init_sw_activity(struct ata_link *link)
1443 {
1444 	struct ata_port *ap = link->ap;
1445 	struct ahci_port_priv *pp = ap->private_data;
1446 	struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1447 
1448 	/* init activity stats, setup timer */
1449 	emp->saved_activity = emp->activity = 0;
1450 	setup_timer(&emp->timer, ahci_sw_activity_blink, (unsigned long)link);
1451 
1452 	/* check our blink policy and set flag for link if it's enabled */
1453 	if (emp->blink_policy)
1454 		link->flags |= ATA_LFLAG_SW_ACTIVITY;
1455 }
1456 
1457 static int ahci_reset_em(struct ata_host *host)
1458 {
1459 	void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
1460 	u32 em_ctl;
1461 
1462 	em_ctl = readl(mmio + HOST_EM_CTL);
1463 	if ((em_ctl & EM_CTL_TM) || (em_ctl & EM_CTL_RST))
1464 		return -EINVAL;
1465 
1466 	writel(em_ctl | EM_CTL_RST, mmio + HOST_EM_CTL);
1467 	return 0;
1468 }
1469 
1470 static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
1471 					ssize_t size)
1472 {
1473 	struct ahci_host_priv *hpriv = ap->host->private_data;
1474 	struct ahci_port_priv *pp = ap->private_data;
1475 	void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
1476 	u32 em_ctl;
1477 	u32 message[] = {0, 0};
1478 	unsigned long flags;
1479 	int pmp;
1480 	struct ahci_em_priv *emp;
1481 
1482 	/* get the slot number from the message */
1483 	pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
1484 	if (pmp < EM_MAX_SLOTS)
1485 		emp = &pp->em_priv[pmp];
1486 	else
1487 		return -EINVAL;
1488 
1489 	spin_lock_irqsave(ap->lock, flags);
1490 
1491 	/*
1492 	 * if we are still busy transmitting a previous message,
1493 	 * do not allow
1494 	 */
1495 	em_ctl = readl(mmio + HOST_EM_CTL);
1496 	if (em_ctl & EM_CTL_TM) {
1497 		spin_unlock_irqrestore(ap->lock, flags);
1498 		return -EBUSY;
1499 	}
1500 
1501 	/*
1502 	 * create message header - this is all zero except for
1503 	 * the message size, which is 4 bytes.
1504 	 */
1505 	message[0] |= (4 << 8);
1506 
1507 	/* ignore 0:4 of byte zero, fill in port info yourself */
1508 	message[1] = ((state & ~EM_MSG_LED_HBA_PORT) | ap->port_no);
1509 
1510 	/* write message to EM_LOC */
1511 	writel(message[0], mmio + hpriv->em_loc);
1512 	writel(message[1], mmio + hpriv->em_loc+4);
1513 
1514 	/* save off new led state for port/slot */
1515 	emp->led_state = state;
1516 
1517 	/*
1518 	 * tell hardware to transmit the message
1519 	 */
1520 	writel(em_ctl | EM_CTL_TM, mmio + HOST_EM_CTL);
1521 
1522 	spin_unlock_irqrestore(ap->lock, flags);
1523 	return size;
1524 }
1525 
1526 static ssize_t ahci_led_show(struct ata_port *ap, char *buf)
1527 {
1528 	struct ahci_port_priv *pp = ap->private_data;
1529 	struct ata_link *link;
1530 	struct ahci_em_priv *emp;
1531 	int rc = 0;
1532 
1533 	ata_for_each_link(link, ap, EDGE) {
1534 		emp = &pp->em_priv[link->pmp];
1535 		rc += sprintf(buf, "%lx\n", emp->led_state);
1536 	}
1537 	return rc;
1538 }
1539 
1540 static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
1541 				size_t size)
1542 {
1543 	int state;
1544 	int pmp;
1545 	struct ahci_port_priv *pp = ap->private_data;
1546 	struct ahci_em_priv *emp;
1547 
1548 	state = simple_strtoul(buf, NULL, 0);
1549 
1550 	/* get the slot number from the message */
1551 	pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
1552 	if (pmp < EM_MAX_SLOTS)
1553 		emp = &pp->em_priv[pmp];
1554 	else
1555 		return -EINVAL;
1556 
1557 	/* mask off the activity bits if we are in sw_activity
1558 	 * mode, user should turn off sw_activity before setting
1559 	 * activity led through em_message
1560 	 */
1561 	if (emp->blink_policy)
1562 		state &= ~EM_MSG_LED_VALUE_ACTIVITY;
1563 
1564 	return ahci_transmit_led_message(ap, state, size);
1565 }
1566 
1567 static ssize_t ahci_activity_store(struct ata_device *dev, enum sw_activity val)
1568 {
1569 	struct ata_link *link = dev->link;
1570 	struct ata_port *ap = link->ap;
1571 	struct ahci_port_priv *pp = ap->private_data;
1572 	struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1573 	u32 port_led_state = emp->led_state;
1574 
1575 	/* save the desired Activity LED behavior */
1576 	if (val == OFF) {
1577 		/* clear LFLAG */
1578 		link->flags &= ~(ATA_LFLAG_SW_ACTIVITY);
1579 
1580 		/* set the LED to OFF */
1581 		port_led_state &= EM_MSG_LED_VALUE_OFF;
1582 		port_led_state |= (ap->port_no | (link->pmp << 8));
1583 		ahci_transmit_led_message(ap, port_led_state, 4);
1584 	} else {
1585 		link->flags |= ATA_LFLAG_SW_ACTIVITY;
1586 		if (val == BLINK_OFF) {
1587 			/* set LED to ON for idle */
1588 			port_led_state &= EM_MSG_LED_VALUE_OFF;
1589 			port_led_state |= (ap->port_no | (link->pmp << 8));
1590 			port_led_state |= EM_MSG_LED_VALUE_ON; /* check this */
1591 			ahci_transmit_led_message(ap, port_led_state, 4);
1592 		}
1593 	}
1594 	emp->blink_policy = val;
1595 	return 0;
1596 }
1597 
1598 static ssize_t ahci_activity_show(struct ata_device *dev, char *buf)
1599 {
1600 	struct ata_link *link = dev->link;
1601 	struct ata_port *ap = link->ap;
1602 	struct ahci_port_priv *pp = ap->private_data;
1603 	struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1604 
1605 	/* display the saved value of activity behavior for this
1606 	 * disk.
1607 	 */
1608 	return sprintf(buf, "%d\n", emp->blink_policy);
1609 }
1610 
1611 static void ahci_port_init(struct pci_dev *pdev, struct ata_port *ap,
1612 			   int port_no, void __iomem *mmio,
1613 			   void __iomem *port_mmio)
1614 {
1615 	const char *emsg = NULL;
1616 	int rc;
1617 	u32 tmp;
1618 
1619 	/* make sure port is not active */
1620 	rc = ahci_deinit_port(ap, &emsg);
1621 	if (rc)
1622 		dev_printk(KERN_WARNING, &pdev->dev,
1623 			   "%s (%d)\n", emsg, rc);
1624 
1625 	/* clear SError */
1626 	tmp = readl(port_mmio + PORT_SCR_ERR);
1627 	VPRINTK("PORT_SCR_ERR 0x%x\n", tmp);
1628 	writel(tmp, port_mmio + PORT_SCR_ERR);
1629 
1630 	/* clear port IRQ */
1631 	tmp = readl(port_mmio + PORT_IRQ_STAT);
1632 	VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
1633 	if (tmp)
1634 		writel(tmp, port_mmio + PORT_IRQ_STAT);
1635 
1636 	writel(1 << port_no, mmio + HOST_IRQ_STAT);
1637 }
1638 
1639 static void ahci_init_controller(struct ata_host *host)
1640 {
1641 	struct ahci_host_priv *hpriv = host->private_data;
1642 	struct pci_dev *pdev = to_pci_dev(host->dev);
1643 	void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
1644 	int i;
1645 	void __iomem *port_mmio;
1646 	u32 tmp;
1647 	int mv;
1648 
1649 	if (hpriv->flags & AHCI_HFLAG_MV_PATA) {
1650 		if (pdev->device == 0x6121)
1651 			mv = 2;
1652 		else
1653 			mv = 4;
1654 		port_mmio = __ahci_port_base(host, mv);
1655 
1656 		writel(0, port_mmio + PORT_IRQ_MASK);
1657 
1658 		/* clear port IRQ */
1659 		tmp = readl(port_mmio + PORT_IRQ_STAT);
1660 		VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
1661 		if (tmp)
1662 			writel(tmp, port_mmio + PORT_IRQ_STAT);
1663 	}
1664 
1665 	for (i = 0; i < host->n_ports; i++) {
1666 		struct ata_port *ap = host->ports[i];
1667 
1668 		port_mmio = ahci_port_base(ap);
1669 		if (ata_port_is_dummy(ap))
1670 			continue;
1671 
1672 		ahci_port_init(pdev, ap, i, mmio, port_mmio);
1673 	}
1674 
1675 	tmp = readl(mmio + HOST_CTL);
1676 	VPRINTK("HOST_CTL 0x%x\n", tmp);
1677 	writel(tmp | HOST_IRQ_EN, mmio + HOST_CTL);
1678 	tmp = readl(mmio + HOST_CTL);
1679 	VPRINTK("HOST_CTL 0x%x\n", tmp);
1680 }
1681 
1682 static void ahci_dev_config(struct ata_device *dev)
1683 {
1684 	struct ahci_host_priv *hpriv = dev->link->ap->host->private_data;
1685 
1686 	if (hpriv->flags & AHCI_HFLAG_SECT255) {
1687 		dev->max_sectors = 255;
1688 		ata_dev_printk(dev, KERN_INFO,
1689 			       "SB600 AHCI: limiting to 255 sectors per cmd\n");
1690 	}
1691 }
1692 
1693 static unsigned int ahci_dev_classify(struct ata_port *ap)
1694 {
1695 	void __iomem *port_mmio = ahci_port_base(ap);
1696 	struct ata_taskfile tf;
1697 	u32 tmp;
1698 
1699 	tmp = readl(port_mmio + PORT_SIG);
1700 	tf.lbah		= (tmp >> 24)	& 0xff;
1701 	tf.lbam		= (tmp >> 16)	& 0xff;
1702 	tf.lbal		= (tmp >> 8)	& 0xff;
1703 	tf.nsect	= (tmp)		& 0xff;
1704 
1705 	return ata_dev_classify(&tf);
1706 }
1707 
1708 static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
1709 			       u32 opts)
1710 {
1711 	dma_addr_t cmd_tbl_dma;
1712 
1713 	cmd_tbl_dma = pp->cmd_tbl_dma + tag * AHCI_CMD_TBL_SZ;
1714 
1715 	pp->cmd_slot[tag].opts = cpu_to_le32(opts);
1716 	pp->cmd_slot[tag].status = 0;
1717 	pp->cmd_slot[tag].tbl_addr = cpu_to_le32(cmd_tbl_dma & 0xffffffff);
1718 	pp->cmd_slot[tag].tbl_addr_hi = cpu_to_le32((cmd_tbl_dma >> 16) >> 16);
1719 }
1720 
1721 static int ahci_kick_engine(struct ata_port *ap)
1722 {
1723 	void __iomem *port_mmio = ahci_port_base(ap);
1724 	struct ahci_host_priv *hpriv = ap->host->private_data;
1725 	u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
1726 	u32 tmp;
1727 	int busy, rc;
1728 
1729 	/* stop engine */
1730 	rc = ahci_stop_engine(ap);
1731 	if (rc)
1732 		goto out_restart;
1733 
1734 	/* need to do CLO?
1735 	 * always do CLO if PMP is attached (AHCI-1.3 9.2)
1736 	 */
1737 	busy = status & (ATA_BUSY | ATA_DRQ);
1738 	if (!busy && !sata_pmp_attached(ap)) {
1739 		rc = 0;
1740 		goto out_restart;
1741 	}
1742 
1743 	if (!(hpriv->cap & HOST_CAP_CLO)) {
1744 		rc = -EOPNOTSUPP;
1745 		goto out_restart;
1746 	}
1747 
1748 	/* perform CLO */
1749 	tmp = readl(port_mmio + PORT_CMD);
1750 	tmp |= PORT_CMD_CLO;
1751 	writel(tmp, port_mmio + PORT_CMD);
1752 
1753 	rc = 0;
1754 	tmp = ata_wait_register(port_mmio + PORT_CMD,
1755 				PORT_CMD_CLO, PORT_CMD_CLO, 1, 500);
1756 	if (tmp & PORT_CMD_CLO)
1757 		rc = -EIO;
1758 
1759 	/* restart engine */
1760  out_restart:
1761 	ahci_start_engine(ap);
1762 	return rc;
1763 }
1764 
1765 static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
1766 				struct ata_taskfile *tf, int is_cmd, u16 flags,
1767 				unsigned long timeout_msec)
1768 {
1769 	const u32 cmd_fis_len = 5; /* five dwords */
1770 	struct ahci_port_priv *pp = ap->private_data;
1771 	void __iomem *port_mmio = ahci_port_base(ap);
1772 	u8 *fis = pp->cmd_tbl;
1773 	u32 tmp;
1774 
1775 	/* prep the command */
1776 	ata_tf_to_fis(tf, pmp, is_cmd, fis);
1777 	ahci_fill_cmd_slot(pp, 0, cmd_fis_len | flags | (pmp << 12));
1778 
1779 	/* issue & wait */
1780 	writel(1, port_mmio + PORT_CMD_ISSUE);
1781 
1782 	if (timeout_msec) {
1783 		tmp = ata_wait_register(port_mmio + PORT_CMD_ISSUE, 0x1, 0x1,
1784 					1, timeout_msec);
1785 		if (tmp & 0x1) {
1786 			ahci_kick_engine(ap);
1787 			return -EBUSY;
1788 		}
1789 	} else
1790 		readl(port_mmio + PORT_CMD_ISSUE);	/* flush */
1791 
1792 	return 0;
1793 }
1794 
1795 static int ahci_do_softreset(struct ata_link *link, unsigned int *class,
1796 			     int pmp, unsigned long deadline,
1797 			     int (*check_ready)(struct ata_link *link))
1798 {
1799 	struct ata_port *ap = link->ap;
1800 	struct ahci_host_priv *hpriv = ap->host->private_data;
1801 	const char *reason = NULL;
1802 	unsigned long now, msecs;
1803 	struct ata_taskfile tf;
1804 	int rc;
1805 
1806 	DPRINTK("ENTER\n");
1807 
1808 	/* prepare for SRST (AHCI-1.1 10.4.1) */
1809 	rc = ahci_kick_engine(ap);
1810 	if (rc && rc != -EOPNOTSUPP)
1811 		ata_link_printk(link, KERN_WARNING,
1812 				"failed to reset engine (errno=%d)\n", rc);
1813 
1814 	ata_tf_init(link->device, &tf);
1815 
1816 	/* issue the first D2H Register FIS */
1817 	msecs = 0;
1818 	now = jiffies;
1819 	if (time_after(now, deadline))
1820 		msecs = jiffies_to_msecs(deadline - now);
1821 
1822 	tf.ctl |= ATA_SRST;
1823 	if (ahci_exec_polled_cmd(ap, pmp, &tf, 0,
1824 				 AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY, msecs)) {
1825 		rc = -EIO;
1826 		reason = "1st FIS failed";
1827 		goto fail;
1828 	}
1829 
1830 	/* spec says at least 5us, but be generous and sleep for 1ms */
1831 	msleep(1);
1832 
1833 	/* issue the second D2H Register FIS */
1834 	tf.ctl &= ~ATA_SRST;
1835 	ahci_exec_polled_cmd(ap, pmp, &tf, 0, 0, 0);
1836 
1837 	/* wait for link to become ready */
1838 	rc = ata_wait_after_reset(link, deadline, check_ready);
1839 	if (rc == -EBUSY && hpriv->flags & AHCI_HFLAG_SRST_TOUT_IS_OFFLINE) {
1840 		/*
1841 		 * Workaround for cases where link online status can't
1842 		 * be trusted.  Treat device readiness timeout as link
1843 		 * offline.
1844 		 */
1845 		ata_link_printk(link, KERN_INFO,
1846 				"device not ready, treating as offline\n");
1847 		*class = ATA_DEV_NONE;
1848 	} else if (rc) {
1849 		/* link occupied, -ENODEV too is an error */
1850 		reason = "device not ready";
1851 		goto fail;
1852 	} else
1853 		*class = ahci_dev_classify(ap);
1854 
1855 	DPRINTK("EXIT, class=%u\n", *class);
1856 	return 0;
1857 
1858  fail:
1859 	ata_link_printk(link, KERN_ERR, "softreset failed (%s)\n", reason);
1860 	return rc;
1861 }
1862 
1863 static int ahci_check_ready(struct ata_link *link)
1864 {
1865 	void __iomem *port_mmio = ahci_port_base(link->ap);
1866 	u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
1867 
1868 	return ata_check_ready(status);
1869 }
1870 
1871 static int ahci_softreset(struct ata_link *link, unsigned int *class,
1872 			  unsigned long deadline)
1873 {
1874 	int pmp = sata_srst_pmp(link);
1875 
1876 	DPRINTK("ENTER\n");
1877 
1878 	return ahci_do_softreset(link, class, pmp, deadline, ahci_check_ready);
1879 }
1880 
1881 static int ahci_sb600_check_ready(struct ata_link *link)
1882 {
1883 	void __iomem *port_mmio = ahci_port_base(link->ap);
1884 	u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
1885 	u32 irq_status = readl(port_mmio + PORT_IRQ_STAT);
1886 
1887 	/*
1888 	 * There is no need to check TFDATA if BAD PMP is found due to HW bug,
1889 	 * which can save timeout delay.
1890 	 */
1891 	if (irq_status & PORT_IRQ_BAD_PMP)
1892 		return -EIO;
1893 
1894 	return ata_check_ready(status);
1895 }
1896 
1897 static int ahci_sb600_softreset(struct ata_link *link, unsigned int *class,
1898 				unsigned long deadline)
1899 {
1900 	struct ata_port *ap = link->ap;
1901 	void __iomem *port_mmio = ahci_port_base(ap);
1902 	int pmp = sata_srst_pmp(link);
1903 	int rc;
1904 	u32 irq_sts;
1905 
1906 	DPRINTK("ENTER\n");
1907 
1908 	rc = ahci_do_softreset(link, class, pmp, deadline,
1909 			       ahci_sb600_check_ready);
1910 
1911 	/*
1912 	 * Soft reset fails on some ATI chips with IPMS set when PMP
1913 	 * is enabled but SATA HDD/ODD is connected to SATA port,
1914 	 * do soft reset again to port 0.
1915 	 */
1916 	if (rc == -EIO) {
1917 		irq_sts = readl(port_mmio + PORT_IRQ_STAT);
1918 		if (irq_sts & PORT_IRQ_BAD_PMP) {
1919 			ata_link_printk(link, KERN_WARNING,
1920 					"applying SB600 PMP SRST workaround "
1921 					"and retrying\n");
1922 			rc = ahci_do_softreset(link, class, 0, deadline,
1923 					       ahci_check_ready);
1924 		}
1925 	}
1926 
1927 	return rc;
1928 }
1929 
1930 static int ahci_hardreset(struct ata_link *link, unsigned int *class,
1931 			  unsigned long deadline)
1932 {
1933 	const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
1934 	struct ata_port *ap = link->ap;
1935 	struct ahci_port_priv *pp = ap->private_data;
1936 	u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
1937 	struct ata_taskfile tf;
1938 	bool online;
1939 	int rc;
1940 
1941 	DPRINTK("ENTER\n");
1942 
1943 	ahci_stop_engine(ap);
1944 
1945 	/* clear D2H reception area to properly wait for D2H FIS */
1946 	ata_tf_init(link->device, &tf);
1947 	tf.command = 0x80;
1948 	ata_tf_to_fis(&tf, 0, 0, d2h_fis);
1949 
1950 	rc = sata_link_hardreset(link, timing, deadline, &online,
1951 				 ahci_check_ready);
1952 
1953 	ahci_start_engine(ap);
1954 
1955 	if (online)
1956 		*class = ahci_dev_classify(ap);
1957 
1958 	DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
1959 	return rc;
1960 }
1961 
1962 static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
1963 				 unsigned long deadline)
1964 {
1965 	struct ata_port *ap = link->ap;
1966 	bool online;
1967 	int rc;
1968 
1969 	DPRINTK("ENTER\n");
1970 
1971 	ahci_stop_engine(ap);
1972 
1973 	rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context),
1974 				 deadline, &online, NULL);
1975 
1976 	ahci_start_engine(ap);
1977 
1978 	DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
1979 
1980 	/* vt8251 doesn't clear BSY on signature FIS reception,
1981 	 * request follow-up softreset.
1982 	 */
1983 	return online ? -EAGAIN : rc;
1984 }
1985 
1986 static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
1987 				unsigned long deadline)
1988 {
1989 	struct ata_port *ap = link->ap;
1990 	struct ahci_port_priv *pp = ap->private_data;
1991 	u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
1992 	struct ata_taskfile tf;
1993 	bool online;
1994 	int rc;
1995 
1996 	ahci_stop_engine(ap);
1997 
1998 	/* clear D2H reception area to properly wait for D2H FIS */
1999 	ata_tf_init(link->device, &tf);
2000 	tf.command = 0x80;
2001 	ata_tf_to_fis(&tf, 0, 0, d2h_fis);
2002 
2003 	rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context),
2004 				 deadline, &online, NULL);
2005 
2006 	ahci_start_engine(ap);
2007 
2008 	/* The pseudo configuration device on SIMG4726 attached to
2009 	 * ASUS P5W-DH Deluxe doesn't send signature FIS after
2010 	 * hardreset if no device is attached to the first downstream
2011 	 * port && the pseudo device locks up on SRST w/ PMP==0.  To
2012 	 * work around this, wait for !BSY only briefly.  If BSY isn't
2013 	 * cleared, perform CLO and proceed to IDENTIFY (achieved by
2014 	 * ATA_LFLAG_NO_SRST and ATA_LFLAG_ASSUME_ATA).
2015 	 *
2016 	 * Wait for two seconds.  Devices attached to downstream port
2017 	 * which can't process the following IDENTIFY after this will
2018 	 * have to be reset again.  For most cases, this should
2019 	 * suffice while making probing snappish enough.
2020 	 */
2021 	if (online) {
2022 		rc = ata_wait_after_reset(link, jiffies + 2 * HZ,
2023 					  ahci_check_ready);
2024 		if (rc)
2025 			ahci_kick_engine(ap);
2026 	}
2027 	return rc;
2028 }
2029 
2030 static void ahci_postreset(struct ata_link *link, unsigned int *class)
2031 {
2032 	struct ata_port *ap = link->ap;
2033 	void __iomem *port_mmio = ahci_port_base(ap);
2034 	u32 new_tmp, tmp;
2035 
2036 	ata_std_postreset(link, class);
2037 
2038 	/* Make sure port's ATAPI bit is set appropriately */
2039 	new_tmp = tmp = readl(port_mmio + PORT_CMD);
2040 	if (*class == ATA_DEV_ATAPI)
2041 		new_tmp |= PORT_CMD_ATAPI;
2042 	else
2043 		new_tmp &= ~PORT_CMD_ATAPI;
2044 	if (new_tmp != tmp) {
2045 		writel(new_tmp, port_mmio + PORT_CMD);
2046 		readl(port_mmio + PORT_CMD); /* flush */
2047 	}
2048 }
2049 
2050 static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl)
2051 {
2052 	struct scatterlist *sg;
2053 	struct ahci_sg *ahci_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ;
2054 	unsigned int si;
2055 
2056 	VPRINTK("ENTER\n");
2057 
2058 	/*
2059 	 * Next, the S/G list.
2060 	 */
2061 	for_each_sg(qc->sg, sg, qc->n_elem, si) {
2062 		dma_addr_t addr = sg_dma_address(sg);
2063 		u32 sg_len = sg_dma_len(sg);
2064 
2065 		ahci_sg[si].addr = cpu_to_le32(addr & 0xffffffff);
2066 		ahci_sg[si].addr_hi = cpu_to_le32((addr >> 16) >> 16);
2067 		ahci_sg[si].flags_size = cpu_to_le32(sg_len - 1);
2068 	}
2069 
2070 	return si;
2071 }
2072 
2073 static int ahci_pmp_qc_defer(struct ata_queued_cmd *qc)
2074 {
2075 	struct ata_port *ap = qc->ap;
2076 	struct ahci_port_priv *pp = ap->private_data;
2077 
2078 	if (!sata_pmp_attached(ap) || pp->fbs_enabled)
2079 		return ata_std_qc_defer(qc);
2080 	else
2081 		return sata_pmp_qc_defer_cmd_switch(qc);
2082 }
2083 
2084 static void ahci_qc_prep(struct ata_queued_cmd *qc)
2085 {
2086 	struct ata_port *ap = qc->ap;
2087 	struct ahci_port_priv *pp = ap->private_data;
2088 	int is_atapi = ata_is_atapi(qc->tf.protocol);
2089 	void *cmd_tbl;
2090 	u32 opts;
2091 	const u32 cmd_fis_len = 5; /* five dwords */
2092 	unsigned int n_elem;
2093 
2094 	/*
2095 	 * Fill in command table information.  First, the header,
2096 	 * a SATA Register - Host to Device command FIS.
2097 	 */
2098 	cmd_tbl = pp->cmd_tbl + qc->tag * AHCI_CMD_TBL_SZ;
2099 
2100 	ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, cmd_tbl);
2101 	if (is_atapi) {
2102 		memset(cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32);
2103 		memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len);
2104 	}
2105 
2106 	n_elem = 0;
2107 	if (qc->flags & ATA_QCFLAG_DMAMAP)
2108 		n_elem = ahci_fill_sg(qc, cmd_tbl);
2109 
2110 	/*
2111 	 * Fill in command slot information.
2112 	 */
2113 	opts = cmd_fis_len | n_elem << 16 | (qc->dev->link->pmp << 12);
2114 	if (qc->tf.flags & ATA_TFLAG_WRITE)
2115 		opts |= AHCI_CMD_WRITE;
2116 	if (is_atapi)
2117 		opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH;
2118 
2119 	ahci_fill_cmd_slot(pp, qc->tag, opts);
2120 }
2121 
2122 static void ahci_fbs_dec_intr(struct ata_port *ap)
2123 {
2124 	struct ahci_port_priv *pp = ap->private_data;
2125 	void __iomem *port_mmio = ahci_port_base(ap);
2126 	u32 fbs = readl(port_mmio + PORT_FBS);
2127 	int retries = 3;
2128 
2129 	DPRINTK("ENTER\n");
2130 	BUG_ON(!pp->fbs_enabled);
2131 
2132 	/* time to wait for DEC is not specified by AHCI spec,
2133 	 * add a retry loop for safety.
2134 	 */
2135 	writel(fbs | PORT_FBS_DEC, port_mmio + PORT_FBS);
2136 	fbs = readl(port_mmio + PORT_FBS);
2137 	while ((fbs & PORT_FBS_DEC) && retries--) {
2138 		udelay(1);
2139 		fbs = readl(port_mmio + PORT_FBS);
2140 	}
2141 
2142 	if (fbs & PORT_FBS_DEC)
2143 		dev_printk(KERN_ERR, ap->host->dev,
2144 			   "failed to clear device error\n");
2145 }
2146 
2147 static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
2148 {
2149 	struct ahci_host_priv *hpriv = ap->host->private_data;
2150 	struct ahci_port_priv *pp = ap->private_data;
2151 	struct ata_eh_info *host_ehi = &ap->link.eh_info;
2152 	struct ata_link *link = NULL;
2153 	struct ata_queued_cmd *active_qc;
2154 	struct ata_eh_info *active_ehi;
2155 	bool fbs_need_dec = false;
2156 	u32 serror;
2157 
2158 	/* determine active link with error */
2159 	if (pp->fbs_enabled) {
2160 		void __iomem *port_mmio = ahci_port_base(ap);
2161 		u32 fbs = readl(port_mmio + PORT_FBS);
2162 		int pmp = fbs >> PORT_FBS_DWE_OFFSET;
2163 
2164 		if ((fbs & PORT_FBS_SDE) && (pmp < ap->nr_pmp_links) &&
2165 		    ata_link_online(&ap->pmp_link[pmp])) {
2166 			link = &ap->pmp_link[pmp];
2167 			fbs_need_dec = true;
2168 		}
2169 
2170 	} else
2171 		ata_for_each_link(link, ap, EDGE)
2172 			if (ata_link_active(link))
2173 				break;
2174 
2175 	if (!link)
2176 		link = &ap->link;
2177 
2178 	active_qc = ata_qc_from_tag(ap, link->active_tag);
2179 	active_ehi = &link->eh_info;
2180 
2181 	/* record irq stat */
2182 	ata_ehi_clear_desc(host_ehi);
2183 	ata_ehi_push_desc(host_ehi, "irq_stat 0x%08x", irq_stat);
2184 
2185 	/* AHCI needs SError cleared; otherwise, it might lock up */
2186 	ahci_scr_read(&ap->link, SCR_ERROR, &serror);
2187 	ahci_scr_write(&ap->link, SCR_ERROR, serror);
2188 	host_ehi->serror |= serror;
2189 
2190 	/* some controllers set IRQ_IF_ERR on device errors, ignore it */
2191 	if (hpriv->flags & AHCI_HFLAG_IGN_IRQ_IF_ERR)
2192 		irq_stat &= ~PORT_IRQ_IF_ERR;
2193 
2194 	if (irq_stat & PORT_IRQ_TF_ERR) {
2195 		/* If qc is active, charge it; otherwise, the active
2196 		 * link.  There's no active qc on NCQ errors.  It will
2197 		 * be determined by EH by reading log page 10h.
2198 		 */
2199 		if (active_qc)
2200 			active_qc->err_mask |= AC_ERR_DEV;
2201 		else
2202 			active_ehi->err_mask |= AC_ERR_DEV;
2203 
2204 		if (hpriv->flags & AHCI_HFLAG_IGN_SERR_INTERNAL)
2205 			host_ehi->serror &= ~SERR_INTERNAL;
2206 	}
2207 
2208 	if (irq_stat & PORT_IRQ_UNK_FIS) {
2209 		u32 *unk = (u32 *)(pp->rx_fis + RX_FIS_UNK);
2210 
2211 		active_ehi->err_mask |= AC_ERR_HSM;
2212 		active_ehi->action |= ATA_EH_RESET;
2213 		ata_ehi_push_desc(active_ehi,
2214 				  "unknown FIS %08x %08x %08x %08x" ,
2215 				  unk[0], unk[1], unk[2], unk[3]);
2216 	}
2217 
2218 	if (sata_pmp_attached(ap) && (irq_stat & PORT_IRQ_BAD_PMP)) {
2219 		active_ehi->err_mask |= AC_ERR_HSM;
2220 		active_ehi->action |= ATA_EH_RESET;
2221 		ata_ehi_push_desc(active_ehi, "incorrect PMP");
2222 	}
2223 
2224 	if (irq_stat & (PORT_IRQ_HBUS_ERR | PORT_IRQ_HBUS_DATA_ERR)) {
2225 		host_ehi->err_mask |= AC_ERR_HOST_BUS;
2226 		host_ehi->action |= ATA_EH_RESET;
2227 		ata_ehi_push_desc(host_ehi, "host bus error");
2228 	}
2229 
2230 	if (irq_stat & PORT_IRQ_IF_ERR) {
2231 		if (fbs_need_dec)
2232 			active_ehi->err_mask |= AC_ERR_DEV;
2233 		else {
2234 			host_ehi->err_mask |= AC_ERR_ATA_BUS;
2235 			host_ehi->action |= ATA_EH_RESET;
2236 		}
2237 
2238 		ata_ehi_push_desc(host_ehi, "interface fatal error");
2239 	}
2240 
2241 	if (irq_stat & (PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)) {
2242 		ata_ehi_hotplugged(host_ehi);
2243 		ata_ehi_push_desc(host_ehi, "%s",
2244 			irq_stat & PORT_IRQ_CONNECT ?
2245 			"connection status changed" : "PHY RDY changed");
2246 	}
2247 
2248 	/* okay, let's hand over to EH */
2249 
2250 	if (irq_stat & PORT_IRQ_FREEZE)
2251 		ata_port_freeze(ap);
2252 	else if (fbs_need_dec) {
2253 		ata_link_abort(link);
2254 		ahci_fbs_dec_intr(ap);
2255 	} else
2256 		ata_port_abort(ap);
2257 }
2258 
2259 static void ahci_port_intr(struct ata_port *ap)
2260 {
2261 	void __iomem *port_mmio = ahci_port_base(ap);
2262 	struct ata_eh_info *ehi = &ap->link.eh_info;
2263 	struct ahci_port_priv *pp = ap->private_data;
2264 	struct ahci_host_priv *hpriv = ap->host->private_data;
2265 	int resetting = !!(ap->pflags & ATA_PFLAG_RESETTING);
2266 	u32 status, qc_active;
2267 	int rc;
2268 
2269 	status = readl(port_mmio + PORT_IRQ_STAT);
2270 	writel(status, port_mmio + PORT_IRQ_STAT);
2271 
2272 	/* ignore BAD_PMP while resetting */
2273 	if (unlikely(resetting))
2274 		status &= ~PORT_IRQ_BAD_PMP;
2275 
2276 	/* If we are getting PhyRdy, this is
2277  	 * just a power state change, we should
2278  	 * clear out this, plus the PhyRdy/Comm
2279  	 * Wake bits from Serror
2280  	 */
2281 	if ((hpriv->flags & AHCI_HFLAG_NO_HOTPLUG) &&
2282 		(status & PORT_IRQ_PHYRDY)) {
2283 		status &= ~PORT_IRQ_PHYRDY;
2284 		ahci_scr_write(&ap->link, SCR_ERROR, ((1 << 16) | (1 << 18)));
2285 	}
2286 
2287 	if (unlikely(status & PORT_IRQ_ERROR)) {
2288 		ahci_error_intr(ap, status);
2289 		return;
2290 	}
2291 
2292 	if (status & PORT_IRQ_SDB_FIS) {
2293 		/* If SNotification is available, leave notification
2294 		 * handling to sata_async_notification().  If not,
2295 		 * emulate it by snooping SDB FIS RX area.
2296 		 *
2297 		 * Snooping FIS RX area is probably cheaper than
2298 		 * poking SNotification but some constrollers which
2299 		 * implement SNotification, ICH9 for example, don't
2300 		 * store AN SDB FIS into receive area.
2301 		 */
2302 		if (hpriv->cap & HOST_CAP_SNTF)
2303 			sata_async_notification(ap);
2304 		else {
2305 			/* If the 'N' bit in word 0 of the FIS is set,
2306 			 * we just received asynchronous notification.
2307 			 * Tell libata about it.
2308 			 *
2309 			 * Lack of SNotification should not appear in
2310 			 * ahci 1.2, so the workaround is unnecessary
2311 			 * when FBS is enabled.
2312 			 */
2313 			if (pp->fbs_enabled)
2314 				WARN_ON_ONCE(1);
2315 			else {
2316 				const __le32 *f = pp->rx_fis + RX_FIS_SDB;
2317 				u32 f0 = le32_to_cpu(f[0]);
2318 				if (f0 & (1 << 15))
2319 					sata_async_notification(ap);
2320 			}
2321 		}
2322 	}
2323 
2324 	/* pp->active_link is valid iff any command is in flight */
2325 	if (ap->qc_active && pp->active_link->sactive)
2326 		qc_active = readl(port_mmio + PORT_SCR_ACT);
2327 	else
2328 		qc_active = readl(port_mmio + PORT_CMD_ISSUE);
2329 
2330 	rc = ata_qc_complete_multiple(ap, qc_active);
2331 
2332 	/* while resetting, invalid completions are expected */
2333 	if (unlikely(rc < 0 && !resetting)) {
2334 		ehi->err_mask |= AC_ERR_HSM;
2335 		ehi->action |= ATA_EH_RESET;
2336 		ata_port_freeze(ap);
2337 	}
2338 }
2339 
2340 static irqreturn_t ahci_interrupt(int irq, void *dev_instance)
2341 {
2342 	struct ata_host *host = dev_instance;
2343 	struct ahci_host_priv *hpriv;
2344 	unsigned int i, handled = 0;
2345 	void __iomem *mmio;
2346 	u32 irq_stat, irq_masked;
2347 
2348 	VPRINTK("ENTER\n");
2349 
2350 	hpriv = host->private_data;
2351 	mmio = host->iomap[AHCI_PCI_BAR];
2352 
2353 	/* sigh.  0xffffffff is a valid return from h/w */
2354 	irq_stat = readl(mmio + HOST_IRQ_STAT);
2355 	if (!irq_stat)
2356 		return IRQ_NONE;
2357 
2358 	irq_masked = irq_stat & hpriv->port_map;
2359 
2360 	spin_lock(&host->lock);
2361 
2362 	for (i = 0; i < host->n_ports; i++) {
2363 		struct ata_port *ap;
2364 
2365 		if (!(irq_masked & (1 << i)))
2366 			continue;
2367 
2368 		ap = host->ports[i];
2369 		if (ap) {
2370 			ahci_port_intr(ap);
2371 			VPRINTK("port %u\n", i);
2372 		} else {
2373 			VPRINTK("port %u (no irq)\n", i);
2374 			if (ata_ratelimit())
2375 				dev_printk(KERN_WARNING, host->dev,
2376 					"interrupt on disabled port %u\n", i);
2377 		}
2378 
2379 		handled = 1;
2380 	}
2381 
2382 	/* HOST_IRQ_STAT behaves as level triggered latch meaning that
2383 	 * it should be cleared after all the port events are cleared;
2384 	 * otherwise, it will raise a spurious interrupt after each
2385 	 * valid one.  Please read section 10.6.2 of ahci 1.1 for more
2386 	 * information.
2387 	 *
2388 	 * Also, use the unmasked value to clear interrupt as spurious
2389 	 * pending event on a dummy port might cause screaming IRQ.
2390 	 */
2391 	writel(irq_stat, mmio + HOST_IRQ_STAT);
2392 
2393 	spin_unlock(&host->lock);
2394 
2395 	VPRINTK("EXIT\n");
2396 
2397 	return IRQ_RETVAL(handled);
2398 }
2399 
2400 static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
2401 {
2402 	struct ata_port *ap = qc->ap;
2403 	void __iomem *port_mmio = ahci_port_base(ap);
2404 	struct ahci_port_priv *pp = ap->private_data;
2405 
2406 	/* Keep track of the currently active link.  It will be used
2407 	 * in completion path to determine whether NCQ phase is in
2408 	 * progress.
2409 	 */
2410 	pp->active_link = qc->dev->link;
2411 
2412 	if (qc->tf.protocol == ATA_PROT_NCQ)
2413 		writel(1 << qc->tag, port_mmio + PORT_SCR_ACT);
2414 
2415 	if (pp->fbs_enabled && pp->fbs_last_dev != qc->dev->link->pmp) {
2416 		u32 fbs = readl(port_mmio + PORT_FBS);
2417 		fbs &= ~(PORT_FBS_DEV_MASK | PORT_FBS_DEC);
2418 		fbs |= qc->dev->link->pmp << PORT_FBS_DEV_OFFSET;
2419 		writel(fbs, port_mmio + PORT_FBS);
2420 		pp->fbs_last_dev = qc->dev->link->pmp;
2421 	}
2422 
2423 	writel(1 << qc->tag, port_mmio + PORT_CMD_ISSUE);
2424 
2425 	ahci_sw_activity(qc->dev->link);
2426 
2427 	return 0;
2428 }
2429 
2430 static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
2431 {
2432 	struct ahci_port_priv *pp = qc->ap->private_data;
2433 	u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
2434 
2435 	if (pp->fbs_enabled)
2436 		d2h_fis += qc->dev->link->pmp * AHCI_RX_FIS_SZ;
2437 
2438 	ata_tf_from_fis(d2h_fis, &qc->result_tf);
2439 	return true;
2440 }
2441 
2442 static void ahci_freeze(struct ata_port *ap)
2443 {
2444 	void __iomem *port_mmio = ahci_port_base(ap);
2445 
2446 	/* turn IRQ off */
2447 	writel(0, port_mmio + PORT_IRQ_MASK);
2448 }
2449 
2450 static void ahci_thaw(struct ata_port *ap)
2451 {
2452 	void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
2453 	void __iomem *port_mmio = ahci_port_base(ap);
2454 	u32 tmp;
2455 	struct ahci_port_priv *pp = ap->private_data;
2456 
2457 	/* clear IRQ */
2458 	tmp = readl(port_mmio + PORT_IRQ_STAT);
2459 	writel(tmp, port_mmio + PORT_IRQ_STAT);
2460 	writel(1 << ap->port_no, mmio + HOST_IRQ_STAT);
2461 
2462 	/* turn IRQ back on */
2463 	writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
2464 }
2465 
2466 static void ahci_error_handler(struct ata_port *ap)
2467 {
2468 	if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
2469 		/* restart engine */
2470 		ahci_stop_engine(ap);
2471 		ahci_start_engine(ap);
2472 	}
2473 
2474 	sata_pmp_error_handler(ap);
2475 }
2476 
2477 static void ahci_post_internal_cmd(struct ata_queued_cmd *qc)
2478 {
2479 	struct ata_port *ap = qc->ap;
2480 
2481 	/* make DMA engine forget about the failed command */
2482 	if (qc->flags & ATA_QCFLAG_FAILED)
2483 		ahci_kick_engine(ap);
2484 }
2485 
2486 static void ahci_enable_fbs(struct ata_port *ap)
2487 {
2488 	struct ahci_port_priv *pp = ap->private_data;
2489 	void __iomem *port_mmio = ahci_port_base(ap);
2490 	u32 fbs;
2491 	int rc;
2492 
2493 	if (!pp->fbs_supported)
2494 		return;
2495 
2496 	fbs = readl(port_mmio + PORT_FBS);
2497 	if (fbs & PORT_FBS_EN) {
2498 		pp->fbs_enabled = true;
2499 		pp->fbs_last_dev = -1; /* initialization */
2500 		return;
2501 	}
2502 
2503 	rc = ahci_stop_engine(ap);
2504 	if (rc)
2505 		return;
2506 
2507 	writel(fbs | PORT_FBS_EN, port_mmio + PORT_FBS);
2508 	fbs = readl(port_mmio + PORT_FBS);
2509 	if (fbs & PORT_FBS_EN) {
2510 		dev_printk(KERN_INFO, ap->host->dev, "FBS is enabled.\n");
2511 		pp->fbs_enabled = true;
2512 		pp->fbs_last_dev = -1; /* initialization */
2513 	} else
2514 		dev_printk(KERN_ERR, ap->host->dev, "Failed to enable FBS\n");
2515 
2516 	ahci_start_engine(ap);
2517 }
2518 
2519 static void ahci_disable_fbs(struct ata_port *ap)
2520 {
2521 	struct ahci_port_priv *pp = ap->private_data;
2522 	void __iomem *port_mmio = ahci_port_base(ap);
2523 	u32 fbs;
2524 	int rc;
2525 
2526 	if (!pp->fbs_supported)
2527 		return;
2528 
2529 	fbs = readl(port_mmio + PORT_FBS);
2530 	if ((fbs & PORT_FBS_EN) == 0) {
2531 		pp->fbs_enabled = false;
2532 		return;
2533 	}
2534 
2535 	rc = ahci_stop_engine(ap);
2536 	if (rc)
2537 		return;
2538 
2539 	writel(fbs & ~PORT_FBS_EN, port_mmio + PORT_FBS);
2540 	fbs = readl(port_mmio + PORT_FBS);
2541 	if (fbs & PORT_FBS_EN)
2542 		dev_printk(KERN_ERR, ap->host->dev, "Failed to disable FBS\n");
2543 	else {
2544 		dev_printk(KERN_INFO, ap->host->dev, "FBS is disabled.\n");
2545 		pp->fbs_enabled = false;
2546 	}
2547 
2548 	ahci_start_engine(ap);
2549 }
2550 
2551 static void ahci_pmp_attach(struct ata_port *ap)
2552 {
2553 	void __iomem *port_mmio = ahci_port_base(ap);
2554 	struct ahci_port_priv *pp = ap->private_data;
2555 	u32 cmd;
2556 
2557 	cmd = readl(port_mmio + PORT_CMD);
2558 	cmd |= PORT_CMD_PMP;
2559 	writel(cmd, port_mmio + PORT_CMD);
2560 
2561 	ahci_enable_fbs(ap);
2562 
2563 	pp->intr_mask |= PORT_IRQ_BAD_PMP;
2564 	writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
2565 }
2566 
2567 static void ahci_pmp_detach(struct ata_port *ap)
2568 {
2569 	void __iomem *port_mmio = ahci_port_base(ap);
2570 	struct ahci_port_priv *pp = ap->private_data;
2571 	u32 cmd;
2572 
2573 	ahci_disable_fbs(ap);
2574 
2575 	cmd = readl(port_mmio + PORT_CMD);
2576 	cmd &= ~PORT_CMD_PMP;
2577 	writel(cmd, port_mmio + PORT_CMD);
2578 
2579 	pp->intr_mask &= ~PORT_IRQ_BAD_PMP;
2580 	writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
2581 }
2582 
2583 static int ahci_port_resume(struct ata_port *ap)
2584 {
2585 	ahci_power_up(ap);
2586 	ahci_start_port(ap);
2587 
2588 	if (sata_pmp_attached(ap))
2589 		ahci_pmp_attach(ap);
2590 	else
2591 		ahci_pmp_detach(ap);
2592 
2593 	return 0;
2594 }
2595 
2596 #ifdef CONFIG_PM
2597 static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg)
2598 {
2599 	const char *emsg = NULL;
2600 	int rc;
2601 
2602 	rc = ahci_deinit_port(ap, &emsg);
2603 	if (rc == 0)
2604 		ahci_power_down(ap);
2605 	else {
2606 		ata_port_printk(ap, KERN_ERR, "%s (%d)\n", emsg, rc);
2607 		ahci_start_port(ap);
2608 	}
2609 
2610 	return rc;
2611 }
2612 
2613 static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
2614 {
2615 	struct ata_host *host = dev_get_drvdata(&pdev->dev);
2616 	struct ahci_host_priv *hpriv = host->private_data;
2617 	void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
2618 	u32 ctl;
2619 
2620 	if (mesg.event & PM_EVENT_SUSPEND &&
2621 	    hpriv->flags & AHCI_HFLAG_NO_SUSPEND) {
2622 		dev_printk(KERN_ERR, &pdev->dev,
2623 			   "BIOS update required for suspend/resume\n");
2624 		return -EIO;
2625 	}
2626 
2627 	if (mesg.event & PM_EVENT_SLEEP) {
2628 		/* AHCI spec rev1.1 section 8.3.3:
2629 		 * Software must disable interrupts prior to requesting a
2630 		 * transition of the HBA to D3 state.
2631 		 */
2632 		ctl = readl(mmio + HOST_CTL);
2633 		ctl &= ~HOST_IRQ_EN;
2634 		writel(ctl, mmio + HOST_CTL);
2635 		readl(mmio + HOST_CTL); /* flush */
2636 	}
2637 
2638 	return ata_pci_device_suspend(pdev, mesg);
2639 }
2640 
2641 static int ahci_pci_device_resume(struct pci_dev *pdev)
2642 {
2643 	struct ata_host *host = dev_get_drvdata(&pdev->dev);
2644 	int rc;
2645 
2646 	rc = ata_pci_device_do_resume(pdev);
2647 	if (rc)
2648 		return rc;
2649 
2650 	if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
2651 		rc = ahci_reset_controller(host);
2652 		if (rc)
2653 			return rc;
2654 
2655 		ahci_init_controller(host);
2656 	}
2657 
2658 	ata_host_resume(host);
2659 
2660 	return 0;
2661 }
2662 #endif
2663 
2664 static int ahci_port_start(struct ata_port *ap)
2665 {
2666 	struct ahci_host_priv *hpriv = ap->host->private_data;
2667 	struct device *dev = ap->host->dev;
2668 	struct ahci_port_priv *pp;
2669 	void *mem;
2670 	dma_addr_t mem_dma;
2671 	size_t dma_sz, rx_fis_sz;
2672 
2673 	pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
2674 	if (!pp)
2675 		return -ENOMEM;
2676 
2677 	/* check FBS capability */
2678 	if ((hpriv->cap & HOST_CAP_FBS) && sata_pmp_supported(ap)) {
2679 		void __iomem *port_mmio = ahci_port_base(ap);
2680 		u32 cmd = readl(port_mmio + PORT_CMD);
2681 		if (cmd & PORT_CMD_FBSCP)
2682 			pp->fbs_supported = true;
2683 		else
2684 			dev_printk(KERN_WARNING, dev,
2685 				   "The port is not capable of FBS\n");
2686 	}
2687 
2688 	if (pp->fbs_supported) {
2689 		dma_sz = AHCI_PORT_PRIV_FBS_DMA_SZ;
2690 		rx_fis_sz = AHCI_RX_FIS_SZ * 16;
2691 	} else {
2692 		dma_sz = AHCI_PORT_PRIV_DMA_SZ;
2693 		rx_fis_sz = AHCI_RX_FIS_SZ;
2694 	}
2695 
2696 	mem = dmam_alloc_coherent(dev, dma_sz, &mem_dma, GFP_KERNEL);
2697 	if (!mem)
2698 		return -ENOMEM;
2699 	memset(mem, 0, dma_sz);
2700 
2701 	/*
2702 	 * First item in chunk of DMA memory: 32-slot command table,
2703 	 * 32 bytes each in size
2704 	 */
2705 	pp->cmd_slot = mem;
2706 	pp->cmd_slot_dma = mem_dma;
2707 
2708 	mem += AHCI_CMD_SLOT_SZ;
2709 	mem_dma += AHCI_CMD_SLOT_SZ;
2710 
2711 	/*
2712 	 * Second item: Received-FIS area
2713 	 */
2714 	pp->rx_fis = mem;
2715 	pp->rx_fis_dma = mem_dma;
2716 
2717 	mem += rx_fis_sz;
2718 	mem_dma += rx_fis_sz;
2719 
2720 	/*
2721 	 * Third item: data area for storing a single command
2722 	 * and its scatter-gather table
2723 	 */
2724 	pp->cmd_tbl = mem;
2725 	pp->cmd_tbl_dma = mem_dma;
2726 
2727 	/*
2728 	 * Save off initial list of interrupts to be enabled.
2729 	 * This could be changed later
2730 	 */
2731 	pp->intr_mask = DEF_PORT_IRQ;
2732 
2733 	ap->private_data = pp;
2734 
2735 	/* engage engines, captain */
2736 	return ahci_port_resume(ap);
2737 }
2738 
2739 static void ahci_port_stop(struct ata_port *ap)
2740 {
2741 	const char *emsg = NULL;
2742 	int rc;
2743 
2744 	/* de-initialize port */
2745 	rc = ahci_deinit_port(ap, &emsg);
2746 	if (rc)
2747 		ata_port_printk(ap, KERN_WARNING, "%s (%d)\n", emsg, rc);
2748 }
2749 
2750 static int ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac)
2751 {
2752 	int rc;
2753 
2754 	if (using_dac &&
2755 	    !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
2756 		rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
2757 		if (rc) {
2758 			rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2759 			if (rc) {
2760 				dev_printk(KERN_ERR, &pdev->dev,
2761 					   "64-bit DMA enable failed\n");
2762 				return rc;
2763 			}
2764 		}
2765 	} else {
2766 		rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2767 		if (rc) {
2768 			dev_printk(KERN_ERR, &pdev->dev,
2769 				   "32-bit DMA enable failed\n");
2770 			return rc;
2771 		}
2772 		rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2773 		if (rc) {
2774 			dev_printk(KERN_ERR, &pdev->dev,
2775 				   "32-bit consistent DMA enable failed\n");
2776 			return rc;
2777 		}
2778 	}
2779 	return 0;
2780 }
2781 
2782 static void ahci_print_info(struct ata_host *host)
2783 {
2784 	struct ahci_host_priv *hpriv = host->private_data;
2785 	struct pci_dev *pdev = to_pci_dev(host->dev);
2786 	void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
2787 	u32 vers, cap, cap2, impl, speed;
2788 	const char *speed_s;
2789 	u16 cc;
2790 	const char *scc_s;
2791 
2792 	vers = readl(mmio + HOST_VERSION);
2793 	cap = hpriv->cap;
2794 	cap2 = hpriv->cap2;
2795 	impl = hpriv->port_map;
2796 
2797 	speed = (cap >> 20) & 0xf;
2798 	if (speed == 1)
2799 		speed_s = "1.5";
2800 	else if (speed == 2)
2801 		speed_s = "3";
2802 	else if (speed == 3)
2803 		speed_s = "6";
2804 	else
2805 		speed_s = "?";
2806 
2807 	pci_read_config_word(pdev, 0x0a, &cc);
2808 	if (cc == PCI_CLASS_STORAGE_IDE)
2809 		scc_s = "IDE";
2810 	else if (cc == PCI_CLASS_STORAGE_SATA)
2811 		scc_s = "SATA";
2812 	else if (cc == PCI_CLASS_STORAGE_RAID)
2813 		scc_s = "RAID";
2814 	else
2815 		scc_s = "unknown";
2816 
2817 	dev_printk(KERN_INFO, &pdev->dev,
2818 		"AHCI %02x%02x.%02x%02x "
2819 		"%u slots %u ports %s Gbps 0x%x impl %s mode\n"
2820 		,
2821 
2822 		(vers >> 24) & 0xff,
2823 		(vers >> 16) & 0xff,
2824 		(vers >> 8) & 0xff,
2825 		vers & 0xff,
2826 
2827 		((cap >> 8) & 0x1f) + 1,
2828 		(cap & 0x1f) + 1,
2829 		speed_s,
2830 		impl,
2831 		scc_s);
2832 
2833 	dev_printk(KERN_INFO, &pdev->dev,
2834 		"flags: "
2835 		"%s%s%s%s%s%s%s"
2836 		"%s%s%s%s%s%s%s"
2837 		"%s%s%s%s%s%s\n"
2838 		,
2839 
2840 		cap & HOST_CAP_64 ? "64bit " : "",
2841 		cap & HOST_CAP_NCQ ? "ncq " : "",
2842 		cap & HOST_CAP_SNTF ? "sntf " : "",
2843 		cap & HOST_CAP_MPS ? "ilck " : "",
2844 		cap & HOST_CAP_SSS ? "stag " : "",
2845 		cap & HOST_CAP_ALPM ? "pm " : "",
2846 		cap & HOST_CAP_LED ? "led " : "",
2847 		cap & HOST_CAP_CLO ? "clo " : "",
2848 		cap & HOST_CAP_ONLY ? "only " : "",
2849 		cap & HOST_CAP_PMP ? "pmp " : "",
2850 		cap & HOST_CAP_FBS ? "fbs " : "",
2851 		cap & HOST_CAP_PIO_MULTI ? "pio " : "",
2852 		cap & HOST_CAP_SSC ? "slum " : "",
2853 		cap & HOST_CAP_PART ? "part " : "",
2854 		cap & HOST_CAP_CCC ? "ccc " : "",
2855 		cap & HOST_CAP_EMS ? "ems " : "",
2856 		cap & HOST_CAP_SXS ? "sxs " : "",
2857 		cap2 & HOST_CAP2_APST ? "apst " : "",
2858 		cap2 & HOST_CAP2_NVMHCI ? "nvmp " : "",
2859 		cap2 & HOST_CAP2_BOH ? "boh " : ""
2860 		);
2861 }
2862 
2863 /* On ASUS P5W DH Deluxe, the second port of PCI device 00:1f.2 is
2864  * hardwired to on-board SIMG 4726.  The chipset is ICH8 and doesn't
2865  * support PMP and the 4726 either directly exports the device
2866  * attached to the first downstream port or acts as a hardware storage
2867  * controller and emulate a single ATA device (can be RAID 0/1 or some
2868  * other configuration).
2869  *
2870  * When there's no device attached to the first downstream port of the
2871  * 4726, "Config Disk" appears, which is a pseudo ATA device to
2872  * configure the 4726.  However, ATA emulation of the device is very
2873  * lame.  It doesn't send signature D2H Reg FIS after the initial
2874  * hardreset, pukes on SRST w/ PMP==0 and has bunch of other issues.
2875  *
2876  * The following function works around the problem by always using
2877  * hardreset on the port and not depending on receiving signature FIS
2878  * afterward.  If signature FIS isn't received soon, ATA class is
2879  * assumed without follow-up softreset.
2880  */
2881 static void ahci_p5wdh_workaround(struct ata_host *host)
2882 {
2883 	static struct dmi_system_id sysids[] = {
2884 		{
2885 			.ident = "P5W DH Deluxe",
2886 			.matches = {
2887 				DMI_MATCH(DMI_SYS_VENDOR,
2888 					  "ASUSTEK COMPUTER INC"),
2889 				DMI_MATCH(DMI_PRODUCT_NAME, "P5W DH Deluxe"),
2890 			},
2891 		},
2892 		{ }
2893 	};
2894 	struct pci_dev *pdev = to_pci_dev(host->dev);
2895 
2896 	if (pdev->bus->number == 0 && pdev->devfn == PCI_DEVFN(0x1f, 2) &&
2897 	    dmi_check_system(sysids)) {
2898 		struct ata_port *ap = host->ports[1];
2899 
2900 		dev_printk(KERN_INFO, &pdev->dev, "enabling ASUS P5W DH "
2901 			   "Deluxe on-board SIMG4726 workaround\n");
2902 
2903 		ap->ops = &ahci_p5wdh_ops;
2904 		ap->link.flags |= ATA_LFLAG_NO_SRST | ATA_LFLAG_ASSUME_ATA;
2905 	}
2906 }
2907 
2908 /* only some SB600 ahci controllers can do 64bit DMA */
2909 static bool ahci_sb600_enable_64bit(struct pci_dev *pdev)
2910 {
2911 	static const struct dmi_system_id sysids[] = {
2912 		/*
2913 		 * The oldest version known to be broken is 0901 and
2914 		 * working is 1501 which was released on 2007-10-26.
2915 		 * Enable 64bit DMA on 1501 and anything newer.
2916 		 *
2917 		 * Please read bko#9412 for more info.
2918 		 */
2919 		{
2920 			.ident = "ASUS M2A-VM",
2921 			.matches = {
2922 				DMI_MATCH(DMI_BOARD_VENDOR,
2923 					  "ASUSTeK Computer INC."),
2924 				DMI_MATCH(DMI_BOARD_NAME, "M2A-VM"),
2925 			},
2926 			.driver_data = "20071026",	/* yyyymmdd */
2927 		},
2928 		/*
2929 		 * All BIOS versions for the MSI K9A2 Platinum (MS-7376)
2930 		 * support 64bit DMA.
2931 		 *
2932 		 * BIOS versions earlier than 1.5 had the Manufacturer DMI
2933 		 * fields as "MICRO-STAR INTERANTIONAL CO.,LTD".
2934 		 * This spelling mistake was fixed in BIOS version 1.5, so
2935 		 * 1.5 and later have the Manufacturer as
2936 		 * "MICRO-STAR INTERNATIONAL CO.,LTD".
2937 		 * So try to match on DMI_BOARD_VENDOR of "MICRO-STAR INTER".
2938 		 *
2939 		 * BIOS versions earlier than 1.9 had a Board Product Name
2940 		 * DMI field of "MS-7376". This was changed to be
2941 		 * "K9A2 Platinum (MS-7376)" in version 1.9, but we can still
2942 		 * match on DMI_BOARD_NAME of "MS-7376".
2943 		 */
2944 		{
2945 			.ident = "MSI K9A2 Platinum",
2946 			.matches = {
2947 				DMI_MATCH(DMI_BOARD_VENDOR,
2948 					  "MICRO-STAR INTER"),
2949 				DMI_MATCH(DMI_BOARD_NAME, "MS-7376"),
2950 			},
2951 		},
2952 		{ }
2953 	};
2954 	const struct dmi_system_id *match;
2955 	int year, month, date;
2956 	char buf[9];
2957 
2958 	match = dmi_first_match(sysids);
2959 	if (pdev->bus->number != 0 || pdev->devfn != PCI_DEVFN(0x12, 0) ||
2960 	    !match)
2961 		return false;
2962 
2963 	if (!match->driver_data)
2964 		goto enable_64bit;
2965 
2966 	dmi_get_date(DMI_BIOS_DATE, &year, &month, &date);
2967 	snprintf(buf, sizeof(buf), "%04d%02d%02d", year, month, date);
2968 
2969 	if (strcmp(buf, match->driver_data) >= 0)
2970 		goto enable_64bit;
2971 	else {
2972 		dev_printk(KERN_WARNING, &pdev->dev, "%s: BIOS too old, "
2973 			   "forcing 32bit DMA, update BIOS\n", match->ident);
2974 		return false;
2975 	}
2976 
2977 enable_64bit:
2978 	dev_printk(KERN_WARNING, &pdev->dev, "%s: enabling 64bit DMA\n",
2979 		   match->ident);
2980 	return true;
2981 }
2982 
2983 static bool ahci_broken_system_poweroff(struct pci_dev *pdev)
2984 {
2985 	static const struct dmi_system_id broken_systems[] = {
2986 		{
2987 			.ident = "HP Compaq nx6310",
2988 			.matches = {
2989 				DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
2990 				DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nx6310"),
2991 			},
2992 			/* PCI slot number of the controller */
2993 			.driver_data = (void *)0x1FUL,
2994 		},
2995 		{
2996 			.ident = "HP Compaq 6720s",
2997 			.matches = {
2998 				DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
2999 				DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq 6720s"),
3000 			},
3001 			/* PCI slot number of the controller */
3002 			.driver_data = (void *)0x1FUL,
3003 		},
3004 
3005 		{ }	/* terminate list */
3006 	};
3007 	const struct dmi_system_id *dmi = dmi_first_match(broken_systems);
3008 
3009 	if (dmi) {
3010 		unsigned long slot = (unsigned long)dmi->driver_data;
3011 		/* apply the quirk only to on-board controllers */
3012 		return slot == PCI_SLOT(pdev->devfn);
3013 	}
3014 
3015 	return false;
3016 }
3017 
3018 static bool ahci_broken_suspend(struct pci_dev *pdev)
3019 {
3020 	static const struct dmi_system_id sysids[] = {
3021 		/*
3022 		 * On HP dv[4-6] and HDX18 with earlier BIOSen, link
3023 		 * to the harddisk doesn't become online after
3024 		 * resuming from STR.  Warn and fail suspend.
3025 		 */
3026 		{
3027 			.ident = "dv4",
3028 			.matches = {
3029 				DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
3030 				DMI_MATCH(DMI_PRODUCT_NAME,
3031 					  "HP Pavilion dv4 Notebook PC"),
3032 			},
3033 			.driver_data = "F.30", /* cutoff BIOS version */
3034 		},
3035 		{
3036 			.ident = "dv5",
3037 			.matches = {
3038 				DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
3039 				DMI_MATCH(DMI_PRODUCT_NAME,
3040 					  "HP Pavilion dv5 Notebook PC"),
3041 			},
3042 			.driver_data = "F.16", /* cutoff BIOS version */
3043 		},
3044 		{
3045 			.ident = "dv6",
3046 			.matches = {
3047 				DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
3048 				DMI_MATCH(DMI_PRODUCT_NAME,
3049 					  "HP Pavilion dv6 Notebook PC"),
3050 			},
3051 			.driver_data = "F.21",	/* cutoff BIOS version */
3052 		},
3053 		{
3054 			.ident = "HDX18",
3055 			.matches = {
3056 				DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
3057 				DMI_MATCH(DMI_PRODUCT_NAME,
3058 					  "HP HDX18 Notebook PC"),
3059 			},
3060 			.driver_data = "F.23",	/* cutoff BIOS version */
3061 		},
3062 		/*
3063 		 * Acer eMachines G725 has the same problem.  BIOS
3064 		 * V1.03 is known to be broken.  V3.04 is known to
3065 		 * work.  Inbetween, there are V1.06, V2.06 and V3.03
3066 		 * that we don't have much idea about.  For now,
3067 		 * blacklist anything older than V3.04.
3068 		 */
3069 		{
3070 			.ident = "G725",
3071 			.matches = {
3072 				DMI_MATCH(DMI_SYS_VENDOR, "eMachines"),
3073 				DMI_MATCH(DMI_PRODUCT_NAME, "eMachines G725"),
3074 			},
3075 			.driver_data = "V3.04",	/* cutoff BIOS version */
3076 		},
3077 		{ }	/* terminate list */
3078 	};
3079 	const struct dmi_system_id *dmi = dmi_first_match(sysids);
3080 	const char *ver;
3081 
3082 	if (!dmi || pdev->bus->number || pdev->devfn != PCI_DEVFN(0x1f, 2))
3083 		return false;
3084 
3085 	ver = dmi_get_system_info(DMI_BIOS_VERSION);
3086 
3087 	return !ver || strcmp(ver, dmi->driver_data) < 0;
3088 }
3089 
3090 static bool ahci_broken_online(struct pci_dev *pdev)
3091 {
3092 #define ENCODE_BUSDEVFN(bus, slot, func)			\
3093 	(void *)(unsigned long)(((bus) << 8) | PCI_DEVFN((slot), (func)))
3094 	static const struct dmi_system_id sysids[] = {
3095 		/*
3096 		 * There are several gigabyte boards which use
3097 		 * SIMG5723s configured as hardware RAID.  Certain
3098 		 * 5723 firmware revisions shipped there keep the link
3099 		 * online but fail to answer properly to SRST or
3100 		 * IDENTIFY when no device is attached downstream
3101 		 * causing libata to retry quite a few times leading
3102 		 * to excessive detection delay.
3103 		 *
3104 		 * As these firmwares respond to the second reset try
3105 		 * with invalid device signature, considering unknown
3106 		 * sig as offline works around the problem acceptably.
3107 		 */
3108 		{
3109 			.ident = "EP45-DQ6",
3110 			.matches = {
3111 				DMI_MATCH(DMI_BOARD_VENDOR,
3112 					  "Gigabyte Technology Co., Ltd."),
3113 				DMI_MATCH(DMI_BOARD_NAME, "EP45-DQ6"),
3114 			},
3115 			.driver_data = ENCODE_BUSDEVFN(0x0a, 0x00, 0),
3116 		},
3117 		{
3118 			.ident = "EP45-DS5",
3119 			.matches = {
3120 				DMI_MATCH(DMI_BOARD_VENDOR,
3121 					  "Gigabyte Technology Co., Ltd."),
3122 				DMI_MATCH(DMI_BOARD_NAME, "EP45-DS5"),
3123 			},
3124 			.driver_data = ENCODE_BUSDEVFN(0x03, 0x00, 0),
3125 		},
3126 		{ }	/* terminate list */
3127 	};
3128 #undef ENCODE_BUSDEVFN
3129 	const struct dmi_system_id *dmi = dmi_first_match(sysids);
3130 	unsigned int val;
3131 
3132 	if (!dmi)
3133 		return false;
3134 
3135 	val = (unsigned long)dmi->driver_data;
3136 
3137 	return pdev->bus->number == (val >> 8) && pdev->devfn == (val & 0xff);
3138 }
3139 
3140 #ifdef CONFIG_ATA_ACPI
3141 static void ahci_gtf_filter_workaround(struct ata_host *host)
3142 {
3143 	static const struct dmi_system_id sysids[] = {
3144 		/*
3145 		 * Aspire 3810T issues a bunch of SATA enable commands
3146 		 * via _GTF including an invalid one and one which is
3147 		 * rejected by the device.  Among the successful ones
3148 		 * is FPDMA non-zero offset enable which when enabled
3149 		 * only on the drive side leads to NCQ command
3150 		 * failures.  Filter it out.
3151 		 */
3152 		{
3153 			.ident = "Aspire 3810T",
3154 			.matches = {
3155 				DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
3156 				DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 3810T"),
3157 			},
3158 			.driver_data = (void *)ATA_ACPI_FILTER_FPDMA_OFFSET,
3159 		},
3160 		{ }
3161 	};
3162 	const struct dmi_system_id *dmi = dmi_first_match(sysids);
3163 	unsigned int filter;
3164 	int i;
3165 
3166 	if (!dmi)
3167 		return;
3168 
3169 	filter = (unsigned long)dmi->driver_data;
3170 	dev_printk(KERN_INFO, host->dev,
3171 		   "applying extra ACPI _GTF filter 0x%x for %s\n",
3172 		   filter, dmi->ident);
3173 
3174 	for (i = 0; i < host->n_ports; i++) {
3175 		struct ata_port *ap = host->ports[i];
3176 		struct ata_link *link;
3177 		struct ata_device *dev;
3178 
3179 		ata_for_each_link(link, ap, EDGE)
3180 			ata_for_each_dev(dev, link, ALL)
3181 				dev->gtf_filter |= filter;
3182 	}
3183 }
3184 #else
3185 static inline void ahci_gtf_filter_workaround(struct ata_host *host)
3186 {}
3187 #endif
3188 
3189 static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3190 {
3191 	static int printed_version;
3192 	unsigned int board_id = ent->driver_data;
3193 	struct ata_port_info pi = ahci_port_info[board_id];
3194 	const struct ata_port_info *ppi[] = { &pi, NULL };
3195 	struct device *dev = &pdev->dev;
3196 	struct ahci_host_priv *hpriv;
3197 	struct ata_host *host;
3198 	int n_ports, i, rc;
3199 
3200 	VPRINTK("ENTER\n");
3201 
3202 	WARN_ON(ATA_MAX_QUEUE > AHCI_MAX_CMDS);
3203 
3204 	if (!printed_version++)
3205 		dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
3206 
3207 	/* The AHCI driver can only drive the SATA ports, the PATA driver
3208 	   can drive them all so if both drivers are selected make sure
3209 	   AHCI stays out of the way */
3210 	if (pdev->vendor == PCI_VENDOR_ID_MARVELL && !marvell_enable)
3211 		return -ENODEV;
3212 
3213 	/* Promise's PDC42819 is a SAS/SATA controller that has an AHCI mode.
3214 	 * At the moment, we can only use the AHCI mode. Let the users know
3215 	 * that for SAS drives they're out of luck.
3216 	 */
3217 	if (pdev->vendor == PCI_VENDOR_ID_PROMISE)
3218 		dev_printk(KERN_INFO, &pdev->dev, "PDC42819 "
3219 			   "can only drive SATA devices with this driver\n");
3220 
3221 	/* acquire resources */
3222 	rc = pcim_enable_device(pdev);
3223 	if (rc)
3224 		return rc;
3225 
3226 	/* AHCI controllers often implement SFF compatible interface.
3227 	 * Grab all PCI BARs just in case.
3228 	 */
3229 	rc = pcim_iomap_regions_request_all(pdev, 1 << AHCI_PCI_BAR, DRV_NAME);
3230 	if (rc == -EBUSY)
3231 		pcim_pin_device(pdev);
3232 	if (rc)
3233 		return rc;
3234 
3235 	if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
3236 	    (pdev->device == 0x2652 || pdev->device == 0x2653)) {
3237 		u8 map;
3238 
3239 		/* ICH6s share the same PCI ID for both piix and ahci
3240 		 * modes.  Enabling ahci mode while MAP indicates
3241 		 * combined mode is a bad idea.  Yield to ata_piix.
3242 		 */
3243 		pci_read_config_byte(pdev, ICH_MAP, &map);
3244 		if (map & 0x3) {
3245 			dev_printk(KERN_INFO, &pdev->dev, "controller is in "
3246 				   "combined mode, can't enable AHCI mode\n");
3247 			return -ENODEV;
3248 		}
3249 	}
3250 
3251 	hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
3252 	if (!hpriv)
3253 		return -ENOMEM;
3254 	hpriv->flags |= (unsigned long)pi.private_data;
3255 
3256 	/* MCP65 revision A1 and A2 can't do MSI */
3257 	if (board_id == board_ahci_mcp65 &&
3258 	    (pdev->revision == 0xa1 || pdev->revision == 0xa2))
3259 		hpriv->flags |= AHCI_HFLAG_NO_MSI;
3260 
3261 	/* SB800 does NOT need the workaround to ignore SERR_INTERNAL */
3262 	if (board_id == board_ahci_sb700 && pdev->revision >= 0x40)
3263 		hpriv->flags &= ~AHCI_HFLAG_IGN_SERR_INTERNAL;
3264 
3265 	/* only some SB600s can do 64bit DMA */
3266 	if (ahci_sb600_enable_64bit(pdev))
3267 		hpriv->flags &= ~AHCI_HFLAG_32BIT_ONLY;
3268 
3269 	if ((hpriv->flags & AHCI_HFLAG_NO_MSI) || pci_enable_msi(pdev))
3270 		pci_intx(pdev, 1);
3271 
3272 	/* save initial config */
3273 	ahci_save_initial_config(pdev, hpriv);
3274 
3275 	/* prepare host */
3276 	if (hpriv->cap & HOST_CAP_NCQ) {
3277 		pi.flags |= ATA_FLAG_NCQ;
3278 		/* Auto-activate optimization is supposed to be supported on
3279 		   all AHCI controllers indicating NCQ support, but it seems
3280 		   to be broken at least on some NVIDIA MCP79 chipsets.
3281 		   Until we get info on which NVIDIA chipsets don't have this
3282 		   issue, if any, disable AA on all NVIDIA AHCIs. */
3283 		if (pdev->vendor != PCI_VENDOR_ID_NVIDIA)
3284 			pi.flags |= ATA_FLAG_FPDMA_AA;
3285 	}
3286 
3287 	if (hpriv->cap & HOST_CAP_PMP)
3288 		pi.flags |= ATA_FLAG_PMP;
3289 
3290 	if (ahci_em_messages && (hpriv->cap & HOST_CAP_EMS)) {
3291 		u8 messages;
3292 		void __iomem *mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR];
3293 		u32 em_loc = readl(mmio + HOST_EM_LOC);
3294 		u32 em_ctl = readl(mmio + HOST_EM_CTL);
3295 
3296 		messages = (em_ctl & EM_CTRL_MSG_TYPE) >> 16;
3297 
3298 		/* we only support LED message type right now */
3299 		if ((messages & 0x01) && (ahci_em_messages == 1)) {
3300 			/* store em_loc */
3301 			hpriv->em_loc = ((em_loc >> 16) * 4);
3302 			pi.flags |= ATA_FLAG_EM;
3303 			if (!(em_ctl & EM_CTL_ALHD))
3304 				pi.flags |= ATA_FLAG_SW_ACTIVITY;
3305 		}
3306 	}
3307 
3308 	if (ahci_broken_system_poweroff(pdev)) {
3309 		pi.flags |= ATA_FLAG_NO_POWEROFF_SPINDOWN;
3310 		dev_info(&pdev->dev,
3311 			"quirky BIOS, skipping spindown on poweroff\n");
3312 	}
3313 
3314 	if (ahci_broken_suspend(pdev)) {
3315 		hpriv->flags |= AHCI_HFLAG_NO_SUSPEND;
3316 		dev_printk(KERN_WARNING, &pdev->dev,
3317 			   "BIOS update required for suspend/resume\n");
3318 	}
3319 
3320 	if (ahci_broken_online(pdev)) {
3321 		hpriv->flags |= AHCI_HFLAG_SRST_TOUT_IS_OFFLINE;
3322 		dev_info(&pdev->dev,
3323 			 "online status unreliable, applying workaround\n");
3324 	}
3325 
3326 	/* CAP.NP sometimes indicate the index of the last enabled
3327 	 * port, at other times, that of the last possible port, so
3328 	 * determining the maximum port number requires looking at
3329 	 * both CAP.NP and port_map.
3330 	 */
3331 	n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map));
3332 
3333 	host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
3334 	if (!host)
3335 		return -ENOMEM;
3336 	host->iomap = pcim_iomap_table(pdev);
3337 	host->private_data = hpriv;
3338 
3339 	if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
3340 		host->flags |= ATA_HOST_PARALLEL_SCAN;
3341 	else
3342 		printk(KERN_INFO "ahci: SSS flag set, parallel bus scan disabled\n");
3343 
3344 	if (pi.flags & ATA_FLAG_EM)
3345 		ahci_reset_em(host);
3346 
3347 	for (i = 0; i < host->n_ports; i++) {
3348 		struct ata_port *ap = host->ports[i];
3349 
3350 		ata_port_pbar_desc(ap, AHCI_PCI_BAR, -1, "abar");
3351 		ata_port_pbar_desc(ap, AHCI_PCI_BAR,
3352 				   0x100 + ap->port_no * 0x80, "port");
3353 
3354 		/* set initial link pm policy */
3355 		ap->pm_policy = NOT_AVAILABLE;
3356 
3357 		/* set enclosure management message type */
3358 		if (ap->flags & ATA_FLAG_EM)
3359 			ap->em_message_type = ahci_em_messages;
3360 
3361 
3362 		/* disabled/not-implemented port */
3363 		if (!(hpriv->port_map & (1 << i)))
3364 			ap->ops = &ata_dummy_port_ops;
3365 	}
3366 
3367 	/* apply workaround for ASUS P5W DH Deluxe mainboard */
3368 	ahci_p5wdh_workaround(host);
3369 
3370 	/* apply gtf filter quirk */
3371 	ahci_gtf_filter_workaround(host);
3372 
3373 	/* initialize adapter */
3374 	rc = ahci_configure_dma_masks(pdev, hpriv->cap & HOST_CAP_64);
3375 	if (rc)
3376 		return rc;
3377 
3378 	rc = ahci_reset_controller(host);
3379 	if (rc)
3380 		return rc;
3381 
3382 	ahci_init_controller(host);
3383 	ahci_print_info(host);
3384 
3385 	pci_set_master(pdev);
3386 	return ata_host_activate(host, pdev->irq, ahci_interrupt, IRQF_SHARED,
3387 				 &ahci_sht);
3388 }
3389 
3390 static int __init ahci_init(void)
3391 {
3392 	return pci_register_driver(&ahci_pci_driver);
3393 }
3394 
3395 static void __exit ahci_exit(void)
3396 {
3397 	pci_unregister_driver(&ahci_pci_driver);
3398 }
3399 
3400 
3401 MODULE_AUTHOR("Jeff Garzik");
3402 MODULE_DESCRIPTION("AHCI SATA low-level driver");
3403 MODULE_LICENSE("GPL");
3404 MODULE_DEVICE_TABLE(pci, ahci_pci_tbl);
3405 MODULE_VERSION(DRV_VERSION);
3406 
3407 module_init(ahci_init);
3408 module_exit(ahci_exit);
3409