xref: /linux/drivers/ata/ahci_xgene.c (revision 8b8eed05a1c650c27e78bc47d07f7d6c9ba779e8)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * AppliedMicro X-Gene SoC SATA Host Controller Driver
4  *
5  * Copyright (c) 2014, Applied Micro Circuits Corporation
6  * Author: Loc Ho <lho@apm.com>
7  *         Tuan Phan <tphan@apm.com>
8  *         Suman Tripathi <stripathi@apm.com>
9  *
10  * NOTE: PM support is not currently available.
11  */
12 #include <linux/acpi.h>
13 #include <linux/module.h>
14 #include <linux/platform_device.h>
15 #include <linux/ahci_platform.h>
16 #include <linux/of.h>
17 #include <linux/phy/phy.h>
18 #include "ahci.h"
19 
20 #define DRV_NAME "xgene-ahci"
21 
22 /* Max # of disk per a controller */
23 #define MAX_AHCI_CHN_PERCTR		2
24 
25 /* MUX CSR */
26 #define SATA_ENET_CONFIG_REG		0x00000000
27 #define  CFG_SATA_ENET_SELECT_MASK	0x00000001
28 
29 /* SATA core host controller CSR */
30 #define SLVRDERRATTRIBUTES		0x00000000
31 #define SLVWRERRATTRIBUTES		0x00000004
32 #define MSTRDERRATTRIBUTES		0x00000008
33 #define MSTWRERRATTRIBUTES		0x0000000c
34 #define BUSCTLREG			0x00000014
35 #define IOFMSTRWAUX			0x00000018
36 #define INTSTATUSMASK			0x0000002c
37 #define ERRINTSTATUS			0x00000030
38 #define ERRINTSTATUSMASK		0x00000034
39 
40 /* SATA host AHCI CSR */
41 #define PORTCFG				0x000000a4
42 #define  PORTADDR_SET(dst, src) \
43 		(((dst) & ~0x0000003f) | (((u32)(src)) & 0x0000003f))
44 #define PORTPHY1CFG		0x000000a8
45 #define PORTPHY1CFG_FRCPHYRDY_SET(dst, src) \
46 		(((dst) & ~0x00100000) | (((u32)(src) << 0x14) & 0x00100000))
47 #define PORTPHY2CFG			0x000000ac
48 #define PORTPHY3CFG			0x000000b0
49 #define PORTPHY4CFG			0x000000b4
50 #define PORTPHY5CFG			0x000000b8
51 #define SCTL0				0x0000012C
52 #define PORTPHY5CFG_RTCHG_SET(dst, src) \
53 		(((dst) & ~0xfff00000) | (((u32)(src) << 0x14) & 0xfff00000))
54 #define PORTAXICFG_EN_CONTEXT_SET(dst, src) \
55 		(((dst) & ~0x01000000) | (((u32)(src) << 0x18) & 0x01000000))
56 #define PORTAXICFG			0x000000bc
57 #define PORTAXICFG_OUTTRANS_SET(dst, src) \
58 		(((dst) & ~0x00f00000) | (((u32)(src) << 0x14) & 0x00f00000))
59 #define PORTRANSCFG			0x000000c8
60 #define PORTRANSCFG_RXWM_SET(dst, src)		\
61 		(((dst) & ~0x0000007f) | (((u32)(src)) & 0x0000007f))
62 
63 /* SATA host controller AXI CSR */
64 #define INT_SLV_TMOMASK			0x00000010
65 
66 /* SATA diagnostic CSR */
67 #define CFG_MEM_RAM_SHUTDOWN		0x00000070
68 #define BLOCK_MEM_RDY			0x00000074
69 
70 /* Max retry for link down */
71 #define MAX_LINK_DOWN_RETRY 3
72 
73 enum xgene_ahci_version {
74 	XGENE_AHCI_V1 = 1,
75 	XGENE_AHCI_V2,
76 };
77 
78 struct xgene_ahci_context {
79 	struct ahci_host_priv *hpriv;
80 	struct device *dev;
81 	u8 last_cmd[MAX_AHCI_CHN_PERCTR]; /* tracking the last command issued*/
82 	u32 class[MAX_AHCI_CHN_PERCTR]; /* tracking the class of device */
83 	void __iomem *csr_core;		/* Core CSR address of IP */
84 	void __iomem *csr_diag;		/* Diag CSR address of IP */
85 	void __iomem *csr_axi;		/* AXI CSR address of IP */
86 	void __iomem *csr_mux;		/* MUX CSR address of IP */
87 };
88 
89 static int xgene_ahci_init_memram(struct xgene_ahci_context *ctx)
90 {
91 	dev_dbg(ctx->dev, "Release memory from shutdown\n");
92 	writel(0x0, ctx->csr_diag + CFG_MEM_RAM_SHUTDOWN);
93 	readl(ctx->csr_diag + CFG_MEM_RAM_SHUTDOWN); /* Force a barrier */
94 	msleep(1);	/* reset may take up to 1ms */
95 	if (readl(ctx->csr_diag + BLOCK_MEM_RDY) != 0xFFFFFFFF) {
96 		dev_err(ctx->dev, "failed to release memory from shutdown\n");
97 		return -ENODEV;
98 	}
99 	return 0;
100 }
101 
102 /**
103  * xgene_ahci_poll_reg_val- Poll a register on a specific value.
104  * @ap : ATA port of interest.
105  * @reg : Register of interest.
106  * @val : Value to be attained.
107  * @interval : waiting interval for polling.
108  * @timeout : timeout for achieving the value.
109  */
110 static int xgene_ahci_poll_reg_val(struct ata_port *ap,
111 				   void __iomem *reg, unsigned int val,
112 				   unsigned int interval, unsigned int timeout)
113 {
114 	unsigned long deadline;
115 	unsigned int tmp;
116 
117 	tmp = ioread32(reg);
118 	deadline = ata_deadline(jiffies, timeout);
119 
120 	while (tmp != val && time_before(jiffies, deadline)) {
121 		ata_msleep(ap, interval);
122 		tmp = ioread32(reg);
123 	}
124 
125 	return tmp;
126 }
127 
128 /**
129  * xgene_ahci_restart_engine - Restart the dma engine.
130  * @ap : ATA port of interest
131  *
132  * Waits for completion of multiple commands and restarts
133  * the DMA engine inside the controller.
134  */
135 static int xgene_ahci_restart_engine(struct ata_port *ap)
136 {
137 	struct ahci_host_priv *hpriv = ap->host->private_data;
138 	struct ahci_port_priv *pp = ap->private_data;
139 	void __iomem *port_mmio = ahci_port_base(ap);
140 	u32 fbs;
141 
142 	/*
143 	 * In case of PMP multiple IDENTIFY DEVICE commands can be
144 	 * issued inside PxCI. So need to poll PxCI for the
145 	 * completion of outstanding IDENTIFY DEVICE commands before
146 	 * we restart the DMA engine.
147 	 */
148 	if (xgene_ahci_poll_reg_val(ap, port_mmio +
149 				    PORT_CMD_ISSUE, 0x0, 1, 100))
150 		  return -EBUSY;
151 
152 	hpriv->stop_engine(ap);
153 	ahci_start_fis_rx(ap);
154 
155 	/*
156 	 * Enable the PxFBS.FBS_EN bit as it
157 	 * gets cleared due to stopping the engine.
158 	 */
159 	if (pp->fbs_supported) {
160 		fbs = readl(port_mmio + PORT_FBS);
161 		writel(fbs | PORT_FBS_EN, port_mmio + PORT_FBS);
162 		fbs = readl(port_mmio + PORT_FBS);
163 	}
164 
165 	hpriv->start_engine(ap);
166 
167 	return 0;
168 }
169 
170 /**
171  * xgene_ahci_qc_issue - Issue commands to the device
172  * @qc: Command to issue
173  *
174  * Due to Hardware errata for IDENTIFY DEVICE command, the controller cannot
175  * clear the BSY bit after receiving the PIO setup FIS. This results in the dma
176  * state machine goes into the CMFatalErrorUpdate state and locks up. By
177  * restarting the dma engine, it removes the controller out of lock up state.
178  *
179  * Due to H/W errata, the controller is unable to save the PMP
180  * field fetched from command header before sending the H2D FIS.
181  * When the device returns the PMP port field in the D2H FIS, there is
182  * a mismatch and results in command completion failure. The
183  * workaround is to write the pmp value to PxFBS.DEV field before issuing
184  * any command to PMP.
185  */
186 static unsigned int xgene_ahci_qc_issue(struct ata_queued_cmd *qc)
187 {
188 	struct ata_port *ap = qc->ap;
189 	struct ahci_host_priv *hpriv = ap->host->private_data;
190 	struct xgene_ahci_context *ctx = hpriv->plat_data;
191 	int rc = 0;
192 	u32 port_fbs;
193 	void __iomem *port_mmio = ahci_port_base(ap);
194 
195 	/*
196 	 * Write the pmp value to PxFBS.DEV
197 	 * for case of Port Mulitplier.
198 	 */
199 	if (ctx->class[ap->port_no] == ATA_DEV_PMP) {
200 		port_fbs = readl(port_mmio + PORT_FBS);
201 		port_fbs &= ~PORT_FBS_DEV_MASK;
202 		port_fbs |= qc->dev->link->pmp << PORT_FBS_DEV_OFFSET;
203 		writel(port_fbs, port_mmio + PORT_FBS);
204 	}
205 
206 	if (unlikely((ctx->last_cmd[ap->port_no] == ATA_CMD_ID_ATA) ||
207 	    (ctx->last_cmd[ap->port_no] == ATA_CMD_PACKET) ||
208 	    (ctx->last_cmd[ap->port_no] == ATA_CMD_SMART)))
209 		xgene_ahci_restart_engine(ap);
210 
211 	rc = ahci_qc_issue(qc);
212 
213 	/* Save the last command issued */
214 	ctx->last_cmd[ap->port_no] = qc->tf.command;
215 
216 	return rc;
217 }
218 
219 static bool xgene_ahci_is_memram_inited(struct xgene_ahci_context *ctx)
220 {
221 	void __iomem *diagcsr = ctx->csr_diag;
222 
223 	return (readl(diagcsr + CFG_MEM_RAM_SHUTDOWN) == 0 &&
224 	        readl(diagcsr + BLOCK_MEM_RDY) == 0xFFFFFFFF);
225 }
226 
227 /**
228  * xgene_ahci_read_id - Read ID data from the specified device
229  * @dev: device
230  * @tf: proposed taskfile
231  * @id: data buffer
232  *
233  * This custom read ID function is required due to the fact that the HW
234  * does not support DEVSLP.
235  */
236 static unsigned int xgene_ahci_read_id(struct ata_device *dev,
237 				       struct ata_taskfile *tf, __le16 *id)
238 {
239 	u32 err_mask;
240 
241 	err_mask = ata_do_dev_read_id(dev, tf, id);
242 	if (err_mask)
243 		return err_mask;
244 
245 	/*
246 	 * Mask reserved area. Word78 spec of Link Power Management
247 	 * bit15-8: reserved
248 	 * bit7: NCQ autosence
249 	 * bit6: Software settings preservation supported
250 	 * bit5: reserved
251 	 * bit4: In-order sata delivery supported
252 	 * bit3: DIPM requests supported
253 	 * bit2: DMA Setup FIS Auto-Activate optimization supported
254 	 * bit1: DMA Setup FIX non-Zero buffer offsets supported
255 	 * bit0: Reserved
256 	 *
257 	 * Clear reserved bit 8 (DEVSLP bit) as we don't support DEVSLP
258 	 */
259 	id[ATA_ID_FEATURE_SUPP] &= cpu_to_le16(~(1 << 8));
260 
261 	return 0;
262 }
263 
264 static void xgene_ahci_set_phy_cfg(struct xgene_ahci_context *ctx, int channel)
265 {
266 	void __iomem *mmio = ctx->hpriv->mmio;
267 	u32 val;
268 
269 	dev_dbg(ctx->dev, "port configure mmio 0x%p channel %d\n",
270 		mmio, channel);
271 	val = readl(mmio + PORTCFG);
272 	val = PORTADDR_SET(val, channel == 0 ? 2 : 3);
273 	writel(val, mmio + PORTCFG);
274 	readl(mmio + PORTCFG);  /* Force a barrier */
275 	/* Disable fix rate */
276 	writel(0x0001fffe, mmio + PORTPHY1CFG);
277 	readl(mmio + PORTPHY1CFG); /* Force a barrier */
278 	writel(0x28183219, mmio + PORTPHY2CFG);
279 	readl(mmio + PORTPHY2CFG); /* Force a barrier */
280 	writel(0x13081008, mmio + PORTPHY3CFG);
281 	readl(mmio + PORTPHY3CFG); /* Force a barrier */
282 	writel(0x00480815, mmio + PORTPHY4CFG);
283 	readl(mmio + PORTPHY4CFG); /* Force a barrier */
284 	/* Set window negotiation */
285 	val = readl(mmio + PORTPHY5CFG);
286 	val = PORTPHY5CFG_RTCHG_SET(val, 0x300);
287 	writel(val, mmio + PORTPHY5CFG);
288 	readl(mmio + PORTPHY5CFG); /* Force a barrier */
289 	val = readl(mmio + PORTAXICFG);
290 	val = PORTAXICFG_EN_CONTEXT_SET(val, 0x1); /* Enable context mgmt */
291 	val = PORTAXICFG_OUTTRANS_SET(val, 0xe); /* Set outstanding */
292 	writel(val, mmio + PORTAXICFG);
293 	readl(mmio + PORTAXICFG); /* Force a barrier */
294 	/* Set the watermark threshold of the receive FIFO */
295 	val = readl(mmio + PORTRANSCFG);
296 	val = PORTRANSCFG_RXWM_SET(val, 0x30);
297 	writel(val, mmio + PORTRANSCFG);
298 }
299 
300 /**
301  * xgene_ahci_do_hardreset - Issue the actual COMRESET
302  * @link: link to reset
303  * @deadline: deadline jiffies for the operation
304  * @online: Return value to indicate if device online
305  *
306  * Due to the limitation of the hardware PHY, a difference set of setting is
307  * required for each supported disk speed - Gen3 (6.0Gbps), Gen2 (3.0Gbps),
308  * and Gen1 (1.5Gbps). Otherwise during long IO stress test, the PHY will
309  * report disparity error and etc. In addition, during COMRESET, there can
310  * be error reported in the register PORT_SCR_ERR. For SERR_DISPARITY and
311  * SERR_10B_8B_ERR, the PHY receiver line must be reseted. Also during long
312  * reboot cycle regression, sometimes the PHY reports link down even if the
313  * device is present because of speed negotiation failure. so need to retry
314  * the COMRESET to get the link up. The following algorithm is followed to
315  * proper configure the hardware PHY during COMRESET:
316  *
317  * Alg Part 1:
318  * 1. Start the PHY at Gen3 speed (default setting)
319  * 2. Issue the COMRESET
320  * 3. If no link, go to Alg Part 3
321  * 4. If link up, determine if the negotiated speed matches the PHY
322  *    configured speed
323  * 5. If they matched, go to Alg Part 2
324  * 6. If they do not matched and first time, configure the PHY for the linked
325  *    up disk speed and repeat step 2
326  * 7. Go to Alg Part 2
327  *
328  * Alg Part 2:
329  * 1. On link up, if there are any SERR_DISPARITY and SERR_10B_8B_ERR error
330  *    reported in the register PORT_SCR_ERR, then reset the PHY receiver line
331  * 2. Go to Alg Part 4
332  *
333  * Alg Part 3:
334  * 1. Check the PORT_SCR_STAT to see whether device presence detected but PHY
335  *    communication establishment failed and maximum link down attempts are
336  *    less than Max attempts 3 then goto Alg Part 1.
337  * 2. Go to Alg Part 4.
338  *
339  * Alg Part 4:
340  * 1. Clear any pending from register PORT_SCR_ERR.
341  *
342  * NOTE: For the initial version, we will NOT support Gen1/Gen2. In addition
343  *       and until the underlying PHY supports an method to reset the receiver
344  *       line, on detection of SERR_DISPARITY or SERR_10B_8B_ERR errors,
345  *       an warning message will be printed.
346  */
347 static int xgene_ahci_do_hardreset(struct ata_link *link,
348 				   unsigned long deadline, bool *online)
349 {
350 	const unsigned int *timing = sata_ehc_deb_timing(&link->eh_context);
351 	struct ata_port *ap = link->ap;
352 	struct ahci_host_priv *hpriv = ap->host->private_data;
353 	struct xgene_ahci_context *ctx = hpriv->plat_data;
354 	struct ahci_port_priv *pp = ap->private_data;
355 	u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
356 	void __iomem *port_mmio = ahci_port_base(ap);
357 	struct ata_taskfile tf;
358 	int link_down_retry = 0;
359 	int rc;
360 	u32 val, sstatus;
361 
362 	do {
363 		/* clear D2H reception area to properly wait for D2H FIS */
364 		ata_tf_init(link->device, &tf);
365 		tf.status = ATA_BUSY;
366 		ata_tf_to_fis(&tf, 0, 0, d2h_fis);
367 		rc = sata_link_hardreset(link, timing, deadline, online,
368 				 ahci_check_ready);
369 		if (*online) {
370 			val = readl(port_mmio + PORT_SCR_ERR);
371 			if (val & (SERR_DISPARITY | SERR_10B_8B_ERR))
372 				dev_warn(ctx->dev, "link has error\n");
373 			break;
374 		}
375 
376 		sata_scr_read(link, SCR_STATUS, &sstatus);
377 	} while (link_down_retry++ < MAX_LINK_DOWN_RETRY &&
378 		 (sstatus & 0xff) == 0x1);
379 
380 	/* clear all errors if any pending */
381 	val = readl(port_mmio + PORT_SCR_ERR);
382 	writel(val, port_mmio + PORT_SCR_ERR);
383 
384 	return rc;
385 }
386 
387 static int xgene_ahci_hardreset(struct ata_link *link, unsigned int *class,
388 				unsigned long deadline)
389 {
390 	struct ata_port *ap = link->ap;
391         struct ahci_host_priv *hpriv = ap->host->private_data;
392 	void __iomem *port_mmio = ahci_port_base(ap);
393 	bool online;
394 	int rc;
395 	u32 portcmd_saved;
396 	u32 portclb_saved;
397 	u32 portclbhi_saved;
398 	u32 portrxfis_saved;
399 	u32 portrxfishi_saved;
400 
401 	/* As hardreset resets these CSR, save it to restore later */
402 	portcmd_saved = readl(port_mmio + PORT_CMD);
403 	portclb_saved = readl(port_mmio + PORT_LST_ADDR);
404 	portclbhi_saved = readl(port_mmio + PORT_LST_ADDR_HI);
405 	portrxfis_saved = readl(port_mmio + PORT_FIS_ADDR);
406 	portrxfishi_saved = readl(port_mmio + PORT_FIS_ADDR_HI);
407 
408 	hpriv->stop_engine(ap);
409 
410 	rc = xgene_ahci_do_hardreset(link, deadline, &online);
411 
412 	/* As controller hardreset clears them, restore them */
413 	writel(portcmd_saved, port_mmio + PORT_CMD);
414 	writel(portclb_saved, port_mmio + PORT_LST_ADDR);
415 	writel(portclbhi_saved, port_mmio + PORT_LST_ADDR_HI);
416 	writel(portrxfis_saved, port_mmio + PORT_FIS_ADDR);
417 	writel(portrxfishi_saved, port_mmio + PORT_FIS_ADDR_HI);
418 
419 	hpriv->start_engine(ap);
420 
421 	if (online)
422 		*class = ahci_dev_classify(ap);
423 
424 	return rc;
425 }
426 
427 static void xgene_ahci_host_stop(struct ata_host *host)
428 {
429 	struct ahci_host_priv *hpriv = host->private_data;
430 
431 	ahci_platform_disable_resources(hpriv);
432 }
433 
434 /**
435  * xgene_ahci_pmp_softreset - Issue the softreset to the drives connected
436  *                            to Port Multiplier.
437  * @link: link to reset
438  * @class: Return value to indicate class of device
439  * @deadline: deadline jiffies for the operation
440  *
441  * Due to H/W errata, the controller is unable to save the PMP
442  * field fetched from command header before sending the H2D FIS.
443  * When the device returns the PMP port field in the D2H FIS, there is
444  * a mismatch and results in command completion failure. The workaround
445  * is to write the pmp value to PxFBS.DEV field before issuing any command
446  * to PMP.
447  */
448 static int xgene_ahci_pmp_softreset(struct ata_link *link, unsigned int *class,
449 			  unsigned long deadline)
450 {
451 	int pmp = sata_srst_pmp(link);
452 	struct ata_port *ap = link->ap;
453 	u32 rc;
454 	void __iomem *port_mmio = ahci_port_base(ap);
455 	u32 port_fbs;
456 
457 	/*
458 	 * Set PxFBS.DEV field with pmp
459 	 * value.
460 	 */
461 	port_fbs = readl(port_mmio + PORT_FBS);
462 	port_fbs &= ~PORT_FBS_DEV_MASK;
463 	port_fbs |= pmp << PORT_FBS_DEV_OFFSET;
464 	writel(port_fbs, port_mmio + PORT_FBS);
465 
466 	rc = ahci_do_softreset(link, class, pmp, deadline, ahci_check_ready);
467 
468 	return rc;
469 }
470 
471 /**
472  * xgene_ahci_softreset - Issue the softreset to the drive.
473  * @link: link to reset
474  * @class: Return value to indicate class of device
475  * @deadline: deadline jiffies for the operation
476  *
477  * Due to H/W errata, the controller is unable to save the PMP
478  * field fetched from command header before sending the H2D FIS.
479  * When the device returns the PMP port field in the D2H FIS, there is
480  * a mismatch and results in command completion failure. The workaround
481  * is to write the pmp value to PxFBS.DEV field before issuing any command
482  * to PMP. Here is the algorithm to detect PMP :
483  *
484  * 1. Save the PxFBS value
485  * 2. Program PxFBS.DEV with pmp value send by framework. Framework sends
486  *    0xF for both PMP/NON-PMP initially
487  * 3. Issue softreset
488  * 4. If signature class is PMP goto 6
489  * 5. restore the original PxFBS and goto 3
490  * 6. return
491  */
492 static int xgene_ahci_softreset(struct ata_link *link, unsigned int *class,
493 			  unsigned long deadline)
494 {
495 	int pmp = sata_srst_pmp(link);
496 	struct ata_port *ap = link->ap;
497 	struct ahci_host_priv *hpriv = ap->host->private_data;
498 	struct xgene_ahci_context *ctx = hpriv->plat_data;
499 	void __iomem *port_mmio = ahci_port_base(ap);
500 	u32 port_fbs;
501 	u32 port_fbs_save;
502 	u32 retry = 1;
503 	u32 rc;
504 
505 	port_fbs_save = readl(port_mmio + PORT_FBS);
506 
507 	/*
508 	 * Set PxFBS.DEV field with pmp
509 	 * value.
510 	 */
511 	port_fbs = readl(port_mmio + PORT_FBS);
512 	port_fbs &= ~PORT_FBS_DEV_MASK;
513 	port_fbs |= pmp << PORT_FBS_DEV_OFFSET;
514 	writel(port_fbs, port_mmio + PORT_FBS);
515 
516 softreset_retry:
517 	rc = ahci_do_softreset(link, class, pmp,
518 			       deadline, ahci_check_ready);
519 
520 	ctx->class[ap->port_no] = *class;
521 	if (*class != ATA_DEV_PMP) {
522 		/*
523 		 * Retry for normal drives without
524 		 * setting PxFBS.DEV field with pmp value.
525 		 */
526 		if (retry--) {
527 			writel(port_fbs_save, port_mmio + PORT_FBS);
528 			goto softreset_retry;
529 		}
530 	}
531 
532 	return rc;
533 }
534 
535 /**
536  * xgene_ahci_handle_broken_edge_irq - Handle the broken irq.
537  * @host: Host that recieved the irq
538  * @irq_masked: HOST_IRQ_STAT value
539  *
540  * For hardware with broken edge trigger latch
541  * the HOST_IRQ_STAT register misses the edge interrupt
542  * when clearing of HOST_IRQ_STAT register and hardware
543  * reporting the PORT_IRQ_STAT register at the
544  * same clock cycle.
545  * As such, the algorithm below outlines the workaround.
546  *
547  * 1. Read HOST_IRQ_STAT register and save the state.
548  * 2. Clear the HOST_IRQ_STAT register.
549  * 3. Read back the HOST_IRQ_STAT register.
550  * 4. If HOST_IRQ_STAT register equals to zero, then
551  *    traverse the rest of port's PORT_IRQ_STAT register
552  *    to check if an interrupt is triggered at that point else
553  *    go to step 6.
554  * 5. If PORT_IRQ_STAT register of rest ports is not equal to zero
555  *    then update the state of HOST_IRQ_STAT saved in step 1.
556  * 6. Handle port interrupts.
557  * 7. Exit
558  */
559 static int xgene_ahci_handle_broken_edge_irq(struct ata_host *host,
560 					     u32 irq_masked)
561 {
562 	struct ahci_host_priv *hpriv = host->private_data;
563 	void __iomem *port_mmio;
564 	int i;
565 
566 	if (!readl(hpriv->mmio + HOST_IRQ_STAT)) {
567 		for (i = 0; i < host->n_ports; i++) {
568 			if (irq_masked & (1 << i))
569 				continue;
570 
571 			port_mmio = ahci_port_base(host->ports[i]);
572 			if (readl(port_mmio + PORT_IRQ_STAT))
573 				irq_masked |= (1 << i);
574 		}
575 	}
576 
577 	return ahci_handle_port_intr(host, irq_masked);
578 }
579 
580 static irqreturn_t xgene_ahci_irq_intr(int irq, void *dev_instance)
581 {
582 	struct ata_host *host = dev_instance;
583 	struct ahci_host_priv *hpriv;
584 	unsigned int rc = 0;
585 	void __iomem *mmio;
586 	u32 irq_stat, irq_masked;
587 
588 	hpriv = host->private_data;
589 	mmio = hpriv->mmio;
590 
591 	/* sigh.  0xffffffff is a valid return from h/w */
592 	irq_stat = readl(mmio + HOST_IRQ_STAT);
593 	if (!irq_stat)
594 		return IRQ_NONE;
595 
596 	irq_masked = irq_stat & hpriv->port_map;
597 
598 	spin_lock(&host->lock);
599 
600 	/*
601 	 * HOST_IRQ_STAT behaves as edge triggered latch meaning that
602 	 * it should be cleared before all the port events are cleared.
603 	 */
604 	writel(irq_stat, mmio + HOST_IRQ_STAT);
605 
606 	rc = xgene_ahci_handle_broken_edge_irq(host, irq_masked);
607 
608 	spin_unlock(&host->lock);
609 
610 	return IRQ_RETVAL(rc);
611 }
612 
613 static struct ata_port_operations xgene_ahci_v1_ops = {
614 	.inherits = &ahci_ops,
615 	.host_stop = xgene_ahci_host_stop,
616 	.hardreset = xgene_ahci_hardreset,
617 	.read_id = xgene_ahci_read_id,
618 	.qc_issue = xgene_ahci_qc_issue,
619 	.softreset = xgene_ahci_softreset,
620 	.pmp_softreset = xgene_ahci_pmp_softreset
621 };
622 
623 static const struct ata_port_info xgene_ahci_v1_port_info = {
624 	.flags = AHCI_FLAG_COMMON | ATA_FLAG_PMP,
625 	.pio_mask = ATA_PIO4,
626 	.udma_mask = ATA_UDMA6,
627 	.port_ops = &xgene_ahci_v1_ops,
628 };
629 
630 static struct ata_port_operations xgene_ahci_v2_ops = {
631 	.inherits = &ahci_ops,
632 	.host_stop = xgene_ahci_host_stop,
633 	.hardreset = xgene_ahci_hardreset,
634 	.read_id = xgene_ahci_read_id,
635 };
636 
637 static const struct ata_port_info xgene_ahci_v2_port_info = {
638 	.flags = AHCI_FLAG_COMMON | ATA_FLAG_PMP,
639 	.pio_mask = ATA_PIO4,
640 	.udma_mask = ATA_UDMA6,
641 	.port_ops = &xgene_ahci_v2_ops,
642 };
643 
644 static int xgene_ahci_hw_init(struct ahci_host_priv *hpriv)
645 {
646 	struct xgene_ahci_context *ctx = hpriv->plat_data;
647 	int i;
648 	int rc;
649 	u32 val;
650 
651 	/* Remove IP RAM out of shutdown */
652 	rc = xgene_ahci_init_memram(ctx);
653 	if (rc)
654 		return rc;
655 
656 	for (i = 0; i < MAX_AHCI_CHN_PERCTR; i++)
657 		xgene_ahci_set_phy_cfg(ctx, i);
658 
659 	/* AXI disable Mask */
660 	writel(0xffffffff, hpriv->mmio + HOST_IRQ_STAT);
661 	readl(hpriv->mmio + HOST_IRQ_STAT); /* Force a barrier */
662 	writel(0, ctx->csr_core + INTSTATUSMASK);
663 	val = readl(ctx->csr_core + INTSTATUSMASK); /* Force a barrier */
664 	dev_dbg(ctx->dev, "top level interrupt mask 0x%X value 0x%08X\n",
665 		INTSTATUSMASK, val);
666 
667 	writel(0x0, ctx->csr_core + ERRINTSTATUSMASK);
668 	readl(ctx->csr_core + ERRINTSTATUSMASK); /* Force a barrier */
669 	writel(0x0, ctx->csr_axi + INT_SLV_TMOMASK);
670 	readl(ctx->csr_axi + INT_SLV_TMOMASK);
671 
672 	/* Enable AXI Interrupt */
673 	writel(0xffffffff, ctx->csr_core + SLVRDERRATTRIBUTES);
674 	writel(0xffffffff, ctx->csr_core + SLVWRERRATTRIBUTES);
675 	writel(0xffffffff, ctx->csr_core + MSTRDERRATTRIBUTES);
676 	writel(0xffffffff, ctx->csr_core + MSTWRERRATTRIBUTES);
677 
678 	/* Enable coherency */
679 	val = readl(ctx->csr_core + BUSCTLREG);
680 	val &= ~0x00000002;     /* Enable write coherency */
681 	val &= ~0x00000001;     /* Enable read coherency */
682 	writel(val, ctx->csr_core + BUSCTLREG);
683 
684 	val = readl(ctx->csr_core + IOFMSTRWAUX);
685 	val |= (1 << 3);        /* Enable read coherency */
686 	val |= (1 << 9);        /* Enable write coherency */
687 	writel(val, ctx->csr_core + IOFMSTRWAUX);
688 	val = readl(ctx->csr_core + IOFMSTRWAUX);
689 	dev_dbg(ctx->dev, "coherency 0x%X value 0x%08X\n",
690 		IOFMSTRWAUX, val);
691 
692 	return rc;
693 }
694 
695 static int xgene_ahci_mux_select(struct xgene_ahci_context *ctx)
696 {
697 	u32 val;
698 
699 	/* Check for optional MUX resource */
700 	if (!ctx->csr_mux)
701 		return 0;
702 
703 	val = readl(ctx->csr_mux + SATA_ENET_CONFIG_REG);
704 	val &= ~CFG_SATA_ENET_SELECT_MASK;
705 	writel(val, ctx->csr_mux + SATA_ENET_CONFIG_REG);
706 	val = readl(ctx->csr_mux + SATA_ENET_CONFIG_REG);
707 	return val & CFG_SATA_ENET_SELECT_MASK ? -1 : 0;
708 }
709 
710 static const struct scsi_host_template ahci_platform_sht = {
711 	AHCI_SHT(DRV_NAME),
712 };
713 
714 #ifdef CONFIG_ACPI
715 static const struct acpi_device_id xgene_ahci_acpi_match[] = {
716 	{ "APMC0D0D", XGENE_AHCI_V1},
717 	{ "APMC0D32", XGENE_AHCI_V2},
718 	{},
719 };
720 MODULE_DEVICE_TABLE(acpi, xgene_ahci_acpi_match);
721 #endif
722 
723 static const struct of_device_id xgene_ahci_of_match[] = {
724 	{.compatible = "apm,xgene-ahci", .data = (void *) XGENE_AHCI_V1},
725 	{.compatible = "apm,xgene-ahci-v2", .data = (void *) XGENE_AHCI_V2},
726 	{ /* sentinel */ }
727 };
728 MODULE_DEVICE_TABLE(of, xgene_ahci_of_match);
729 
730 static int xgene_ahci_probe(struct platform_device *pdev)
731 {
732 	struct device *dev = &pdev->dev;
733 	struct ahci_host_priv *hpriv;
734 	struct xgene_ahci_context *ctx;
735 	struct resource *res;
736 	enum xgene_ahci_version version = XGENE_AHCI_V1;
737 	const struct ata_port_info *ppi[] = { &xgene_ahci_v1_port_info,
738 					      &xgene_ahci_v2_port_info };
739 	int rc;
740 
741 	hpriv = ahci_platform_get_resources(pdev, 0);
742 	if (IS_ERR(hpriv))
743 		return PTR_ERR(hpriv);
744 
745 	ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
746 	if (!ctx)
747 		return -ENOMEM;
748 
749 	hpriv->plat_data = ctx;
750 	ctx->hpriv = hpriv;
751 	ctx->dev = dev;
752 
753 	/* Retrieve the IP core resource */
754 	ctx->csr_core = devm_platform_ioremap_resource(pdev, 1);
755 	if (IS_ERR(ctx->csr_core))
756 		return PTR_ERR(ctx->csr_core);
757 
758 	/* Retrieve the IP diagnostic resource */
759 	ctx->csr_diag = devm_platform_ioremap_resource(pdev, 2);
760 	if (IS_ERR(ctx->csr_diag))
761 		return PTR_ERR(ctx->csr_diag);
762 
763 	/* Retrieve the IP AXI resource */
764 	ctx->csr_axi = devm_platform_ioremap_resource(pdev, 3);
765 	if (IS_ERR(ctx->csr_axi))
766 		return PTR_ERR(ctx->csr_axi);
767 
768 	/* Retrieve the optional IP mux resource */
769 	res = platform_get_resource(pdev, IORESOURCE_MEM, 4);
770 	if (res) {
771 		void __iomem *csr = devm_ioremap_resource(dev, res);
772 		if (IS_ERR(csr))
773 			return PTR_ERR(csr);
774 
775 		ctx->csr_mux = csr;
776 	}
777 
778 	if (dev->of_node) {
779 		version = (enum xgene_ahci_version)of_device_get_match_data(dev);
780 	}
781 #ifdef CONFIG_ACPI
782 	else {
783 		const struct acpi_device_id *acpi_id;
784 		struct acpi_device_info *info;
785 		acpi_status status;
786 
787 		acpi_id = acpi_match_device(xgene_ahci_acpi_match, &pdev->dev);
788 		if (!acpi_id) {
789 			dev_warn(&pdev->dev, "No node entry in ACPI table. Assume version1\n");
790 			version = XGENE_AHCI_V1;
791 		} else if (acpi_id->driver_data) {
792 			version = (enum xgene_ahci_version) acpi_id->driver_data;
793 			status = acpi_get_object_info(ACPI_HANDLE(&pdev->dev), &info);
794 			if (ACPI_FAILURE(status)) {
795 				dev_warn(&pdev->dev, "%s: Error reading device info. Assume version1\n",
796 					__func__);
797 				version = XGENE_AHCI_V1;
798 			} else {
799 				if (info->valid & ACPI_VALID_CID)
800 					version = XGENE_AHCI_V2;
801 				kfree(info);
802 			}
803 		}
804 	}
805 #endif
806 
807 	dev_dbg(dev, "VAddr 0x%p Mmio VAddr 0x%p\n", ctx->csr_core,
808 		hpriv->mmio);
809 
810 	/* Select ATA */
811 	if ((rc = xgene_ahci_mux_select(ctx))) {
812 		dev_err(dev, "SATA mux selection failed error %d\n", rc);
813 		return -ENODEV;
814 	}
815 
816 	if (xgene_ahci_is_memram_inited(ctx)) {
817 		dev_info(dev, "skip clock and PHY initialization\n");
818 		goto skip_clk_phy;
819 	}
820 
821 	/* Due to errata, HW requires full toggle transition */
822 	rc = ahci_platform_enable_clks(hpriv);
823 	if (rc)
824 		goto disable_resources;
825 	ahci_platform_disable_clks(hpriv);
826 
827 	rc = ahci_platform_enable_resources(hpriv);
828 	if (rc)
829 		goto disable_resources;
830 
831 	/* Configure the host controller */
832 	xgene_ahci_hw_init(hpriv);
833 skip_clk_phy:
834 
835 	switch (version) {
836 	case XGENE_AHCI_V1:
837 		hpriv->flags = AHCI_HFLAG_NO_NCQ;
838 		break;
839 	case XGENE_AHCI_V2:
840 		hpriv->flags |= AHCI_HFLAG_YES_FBS;
841 		hpriv->irq_handler = xgene_ahci_irq_intr;
842 		break;
843 	default:
844 		break;
845 	}
846 
847 	rc = ahci_platform_init_host(pdev, hpriv, ppi[version - 1],
848 				     &ahci_platform_sht);
849 	if (rc)
850 		goto disable_resources;
851 
852 	dev_dbg(dev, "X-Gene SATA host controller initialized\n");
853 	return 0;
854 
855 disable_resources:
856 	ahci_platform_disable_resources(hpriv);
857 	return rc;
858 }
859 
860 static struct platform_driver xgene_ahci_driver = {
861 	.probe = xgene_ahci_probe,
862 	.remove_new = ata_platform_remove_one,
863 	.driver = {
864 		.name = DRV_NAME,
865 		.of_match_table = xgene_ahci_of_match,
866 		.acpi_match_table = ACPI_PTR(xgene_ahci_acpi_match),
867 	},
868 };
869 
870 module_platform_driver(xgene_ahci_driver);
871 
872 MODULE_DESCRIPTION("APM X-Gene AHCI SATA driver");
873 MODULE_AUTHOR("Loc Ho <lho@apm.com>");
874 MODULE_LICENSE("GPL");
875 MODULE_VERSION("0.4");
876