xref: /linux/drivers/spi/spi-xlp.c (revision 621cde16e49b3ecf7d59a8106a20aaebfb4a59a9)
181c9859bSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2d8c80d49SKamlakant Patel /*
3d8c80d49SKamlakant Patel  * Copyright (C) 2003-2015 Broadcom Corporation
4d8c80d49SKamlakant Patel  * All Rights Reserved
5d8c80d49SKamlakant Patel  */
6097d0619SKamlakant Patel #include <linux/acpi.h>
7d8c80d49SKamlakant Patel #include <linux/clk.h>
8d8c80d49SKamlakant Patel #include <linux/kernel.h>
9d8c80d49SKamlakant Patel #include <linux/module.h>
10d8c80d49SKamlakant Patel #include <linux/platform_device.h>
11d8c80d49SKamlakant Patel #include <linux/spi/spi.h>
12d8c80d49SKamlakant Patel #include <linux/interrupt.h>
13d8c80d49SKamlakant Patel 
14d8c80d49SKamlakant Patel /* SPI Configuration Register */
15d8c80d49SKamlakant Patel #define XLP_SPI_CONFIG			0x00
16d8c80d49SKamlakant Patel #define XLP_SPI_CPHA			BIT(0)
17d8c80d49SKamlakant Patel #define XLP_SPI_CPOL			BIT(1)
18d8c80d49SKamlakant Patel #define XLP_SPI_CS_POL			BIT(2)
19d8c80d49SKamlakant Patel #define XLP_SPI_TXMISO_EN		BIT(3)
20d8c80d49SKamlakant Patel #define XLP_SPI_TXMOSI_EN		BIT(4)
21d8c80d49SKamlakant Patel #define XLP_SPI_RXMISO_EN		BIT(5)
22d8c80d49SKamlakant Patel #define XLP_SPI_CS_LSBFE		BIT(10)
23d8c80d49SKamlakant Patel #define XLP_SPI_RXCAP_EN		BIT(11)
24d8c80d49SKamlakant Patel 
25d8c80d49SKamlakant Patel /* SPI Frequency Divider Register */
26d8c80d49SKamlakant Patel #define XLP_SPI_FDIV			0x04
27d8c80d49SKamlakant Patel 
28d8c80d49SKamlakant Patel /* SPI Command Register */
29d8c80d49SKamlakant Patel #define XLP_SPI_CMD			0x08
30d8c80d49SKamlakant Patel #define XLP_SPI_CMD_IDLE_MASK		0x0
31d8c80d49SKamlakant Patel #define XLP_SPI_CMD_TX_MASK		0x1
32d8c80d49SKamlakant Patel #define XLP_SPI_CMD_RX_MASK		0x2
33d8c80d49SKamlakant Patel #define XLP_SPI_CMD_TXRX_MASK		0x3
34d8c80d49SKamlakant Patel #define XLP_SPI_CMD_CONT		BIT(4)
35d8c80d49SKamlakant Patel #define XLP_SPI_XFR_BITCNT_SHIFT	16
36d8c80d49SKamlakant Patel 
37d8c80d49SKamlakant Patel /* SPI Status Register */
38d8c80d49SKamlakant Patel #define XLP_SPI_STATUS			0x0c
39d8c80d49SKamlakant Patel #define XLP_SPI_XFR_PENDING		BIT(0)
40d8c80d49SKamlakant Patel #define XLP_SPI_XFR_DONE		BIT(1)
41d8c80d49SKamlakant Patel #define XLP_SPI_TX_INT			BIT(2)
42d8c80d49SKamlakant Patel #define XLP_SPI_RX_INT			BIT(3)
43d8c80d49SKamlakant Patel #define XLP_SPI_TX_UF			BIT(4)
44d8c80d49SKamlakant Patel #define XLP_SPI_RX_OF			BIT(5)
45d8c80d49SKamlakant Patel #define XLP_SPI_STAT_MASK		0x3f
46d8c80d49SKamlakant Patel 
47d8c80d49SKamlakant Patel /* SPI Interrupt Enable Register */
48d8c80d49SKamlakant Patel #define XLP_SPI_INTR_EN			0x10
49d8c80d49SKamlakant Patel #define XLP_SPI_INTR_DONE		BIT(0)
50d8c80d49SKamlakant Patel #define XLP_SPI_INTR_TXTH		BIT(1)
51d8c80d49SKamlakant Patel #define XLP_SPI_INTR_RXTH		BIT(2)
52d8c80d49SKamlakant Patel #define XLP_SPI_INTR_TXUF		BIT(3)
53d8c80d49SKamlakant Patel #define XLP_SPI_INTR_RXOF		BIT(4)
54d8c80d49SKamlakant Patel 
55d8c80d49SKamlakant Patel /* SPI FIFO Threshold Register */
56d8c80d49SKamlakant Patel #define XLP_SPI_FIFO_THRESH		0x14
57d8c80d49SKamlakant Patel 
58d8c80d49SKamlakant Patel /* SPI FIFO Word Count Register */
59d8c80d49SKamlakant Patel #define XLP_SPI_FIFO_WCNT		0x18
60d8c80d49SKamlakant Patel #define XLP_SPI_RXFIFO_WCNT_MASK	0xf
61d8c80d49SKamlakant Patel #define XLP_SPI_TXFIFO_WCNT_MASK	0xf0
62d8c80d49SKamlakant Patel #define XLP_SPI_TXFIFO_WCNT_SHIFT	4
63d8c80d49SKamlakant Patel 
64d8c80d49SKamlakant Patel /* SPI Transmit Data FIFO Register */
65d8c80d49SKamlakant Patel #define XLP_SPI_TXDATA_FIFO		0x1c
66d8c80d49SKamlakant Patel 
67d8c80d49SKamlakant Patel /* SPI Receive Data FIFO Register */
68d8c80d49SKamlakant Patel #define XLP_SPI_RXDATA_FIFO		0x20
69d8c80d49SKamlakant Patel 
70d8c80d49SKamlakant Patel /* SPI System Control Register */
71d8c80d49SKamlakant Patel #define XLP_SPI_SYSCTRL			0x100
72d8c80d49SKamlakant Patel #define XLP_SPI_SYS_RESET		BIT(0)
73d8c80d49SKamlakant Patel #define XLP_SPI_SYS_CLKDIS		BIT(1)
74d8c80d49SKamlakant Patel #define XLP_SPI_SYS_PMEN		BIT(8)
75d8c80d49SKamlakant Patel 
76d8c80d49SKamlakant Patel #define SPI_CS_OFFSET			0x40
77d8c80d49SKamlakant Patel #define XLP_SPI_TXRXTH			0x80
78d8c80d49SKamlakant Patel #define XLP_SPI_FIFO_SIZE		8
79d8c80d49SKamlakant Patel #define XLP_SPI_MAX_CS			4
80d8c80d49SKamlakant Patel #define XLP_SPI_DEFAULT_FREQ		133333333
81d8c80d49SKamlakant Patel #define XLP_SPI_FDIV_MIN		4
82d8c80d49SKamlakant Patel #define XLP_SPI_FDIV_MAX		65535
83d8c80d49SKamlakant Patel /*
84d8c80d49SKamlakant Patel  * SPI can transfer only 28 bytes properly at a time. So split the
85d8c80d49SKamlakant Patel  * transfer into 28 bytes size.
86d8c80d49SKamlakant Patel  */
87d8c80d49SKamlakant Patel #define XLP_SPI_XFER_SIZE		28
88d8c80d49SKamlakant Patel 
89d8c80d49SKamlakant Patel struct xlp_spi_priv {
90d8c80d49SKamlakant Patel 	struct device		dev;		/* device structure */
91d8c80d49SKamlakant Patel 	void __iomem		*base;		/* spi registers base address */
92d8c80d49SKamlakant Patel 	const u8		*tx_buf;	/* tx data buffer */
93d8c80d49SKamlakant Patel 	u8			*rx_buf;	/* rx data buffer */
94d8c80d49SKamlakant Patel 	int			tx_len;		/* tx xfer length */
95d8c80d49SKamlakant Patel 	int			rx_len;		/* rx xfer length */
96d8c80d49SKamlakant Patel 	int			txerrors;	/* TXFIFO underflow count */
97d8c80d49SKamlakant Patel 	int			rxerrors;	/* RXFIFO overflow count */
981633ffd2SYang Yingliang 	int			cs;		/* target device chip select */
99d8c80d49SKamlakant Patel 	u32			spi_clk;	/* spi clock frequency */
100d8c80d49SKamlakant Patel 	bool			cmd_cont;	/* cs active */
101d8c80d49SKamlakant Patel 	struct completion	done;		/* completion notification */
102d8c80d49SKamlakant Patel };
103d8c80d49SKamlakant Patel 
xlp_spi_reg_read(struct xlp_spi_priv * priv,int cs,int regoff)104d8c80d49SKamlakant Patel static inline u32 xlp_spi_reg_read(struct xlp_spi_priv *priv,
105d8c80d49SKamlakant Patel 				int cs, int regoff)
106d8c80d49SKamlakant Patel {
107d8c80d49SKamlakant Patel 	return readl(priv->base + regoff + cs * SPI_CS_OFFSET);
108d8c80d49SKamlakant Patel }
109d8c80d49SKamlakant Patel 
xlp_spi_reg_write(struct xlp_spi_priv * priv,int cs,int regoff,u32 val)110d8c80d49SKamlakant Patel static inline void xlp_spi_reg_write(struct xlp_spi_priv *priv, int cs,
111d8c80d49SKamlakant Patel 				int regoff, u32 val)
112d8c80d49SKamlakant Patel {
113d8c80d49SKamlakant Patel 	writel(val, priv->base + regoff + cs * SPI_CS_OFFSET);
114d8c80d49SKamlakant Patel }
115d8c80d49SKamlakant Patel 
xlp_spi_sysctl_write(struct xlp_spi_priv * priv,int regoff,u32 val)116d8c80d49SKamlakant Patel static inline void xlp_spi_sysctl_write(struct xlp_spi_priv *priv,
117d8c80d49SKamlakant Patel 				int regoff, u32 val)
118d8c80d49SKamlakant Patel {
119d8c80d49SKamlakant Patel 	writel(val, priv->base + regoff);
120d8c80d49SKamlakant Patel }
121d8c80d49SKamlakant Patel 
122d8c80d49SKamlakant Patel /*
123d8c80d49SKamlakant Patel  * Setup global SPI_SYSCTRL register for all SPI channels.
124d8c80d49SKamlakant Patel  */
xlp_spi_sysctl_setup(struct xlp_spi_priv * xspi)125d8c80d49SKamlakant Patel static void xlp_spi_sysctl_setup(struct xlp_spi_priv *xspi)
126d8c80d49SKamlakant Patel {
127d8c80d49SKamlakant Patel 	int cs;
128d8c80d49SKamlakant Patel 
129d8c80d49SKamlakant Patel 	for (cs = 0; cs < XLP_SPI_MAX_CS; cs++)
130d8c80d49SKamlakant Patel 		xlp_spi_sysctl_write(xspi, XLP_SPI_SYSCTRL,
131d8c80d49SKamlakant Patel 				XLP_SPI_SYS_RESET << cs);
132d8c80d49SKamlakant Patel 	xlp_spi_sysctl_write(xspi, XLP_SPI_SYSCTRL, XLP_SPI_SYS_PMEN);
133d8c80d49SKamlakant Patel }
134d8c80d49SKamlakant Patel 
xlp_spi_setup(struct spi_device * spi)135d8c80d49SKamlakant Patel static int xlp_spi_setup(struct spi_device *spi)
136d8c80d49SKamlakant Patel {
137d8c80d49SKamlakant Patel 	struct xlp_spi_priv *xspi;
138d8c80d49SKamlakant Patel 	u32 fdiv, cfg;
139d8c80d49SKamlakant Patel 	int cs;
140d8c80d49SKamlakant Patel 
1411633ffd2SYang Yingliang 	xspi = spi_controller_get_devdata(spi->controller);
1429e264f3fSAmit Kumar Mahapatra via Alsa-devel 	cs = spi_get_chipselect(spi, 0);
143d8c80d49SKamlakant Patel 	/*
144d8c80d49SKamlakant Patel 	 * The value of fdiv must be between 4 and 65535.
145d8c80d49SKamlakant Patel 	 */
146d8c80d49SKamlakant Patel 	fdiv = DIV_ROUND_UP(xspi->spi_clk, spi->max_speed_hz);
147d8c80d49SKamlakant Patel 	if (fdiv > XLP_SPI_FDIV_MAX)
148d8c80d49SKamlakant Patel 		fdiv = XLP_SPI_FDIV_MAX;
149d8c80d49SKamlakant Patel 	else if (fdiv < XLP_SPI_FDIV_MIN)
150d8c80d49SKamlakant Patel 		fdiv = XLP_SPI_FDIV_MIN;
151d8c80d49SKamlakant Patel 
152d8c80d49SKamlakant Patel 	xlp_spi_reg_write(xspi, cs, XLP_SPI_FDIV, fdiv);
153d8c80d49SKamlakant Patel 	xlp_spi_reg_write(xspi, cs, XLP_SPI_FIFO_THRESH, XLP_SPI_TXRXTH);
154d8c80d49SKamlakant Patel 	cfg = xlp_spi_reg_read(xspi, cs, XLP_SPI_CONFIG);
155d8c80d49SKamlakant Patel 	if (spi->mode & SPI_CPHA)
156d8c80d49SKamlakant Patel 		cfg |= XLP_SPI_CPHA;
157d8c80d49SKamlakant Patel 	else
158d8c80d49SKamlakant Patel 		cfg &= ~XLP_SPI_CPHA;
159d8c80d49SKamlakant Patel 	if (spi->mode & SPI_CPOL)
160d8c80d49SKamlakant Patel 		cfg |= XLP_SPI_CPOL;
161d8c80d49SKamlakant Patel 	else
162d8c80d49SKamlakant Patel 		cfg &= ~XLP_SPI_CPOL;
163d8c80d49SKamlakant Patel 	if (!(spi->mode & SPI_CS_HIGH))
164d8c80d49SKamlakant Patel 		cfg |= XLP_SPI_CS_POL;
165d8c80d49SKamlakant Patel 	else
166d8c80d49SKamlakant Patel 		cfg &= ~XLP_SPI_CS_POL;
167d8c80d49SKamlakant Patel 	if (spi->mode & SPI_LSB_FIRST)
168d8c80d49SKamlakant Patel 		cfg |= XLP_SPI_CS_LSBFE;
169d8c80d49SKamlakant Patel 	else
170d8c80d49SKamlakant Patel 		cfg &= ~XLP_SPI_CS_LSBFE;
171d8c80d49SKamlakant Patel 
172d8c80d49SKamlakant Patel 	cfg |= XLP_SPI_TXMOSI_EN | XLP_SPI_RXMISO_EN;
173d8c80d49SKamlakant Patel 	if (fdiv == 4)
174d8c80d49SKamlakant Patel 		cfg |= XLP_SPI_RXCAP_EN;
175d8c80d49SKamlakant Patel 	xlp_spi_reg_write(xspi, cs, XLP_SPI_CONFIG, cfg);
176d8c80d49SKamlakant Patel 
177d8c80d49SKamlakant Patel 	return 0;
178d8c80d49SKamlakant Patel }
179d8c80d49SKamlakant Patel 
xlp_spi_read_rxfifo(struct xlp_spi_priv * xspi)180d8c80d49SKamlakant Patel static void xlp_spi_read_rxfifo(struct xlp_spi_priv *xspi)
181d8c80d49SKamlakant Patel {
182d8c80d49SKamlakant Patel 	u32 rx_data, rxfifo_cnt;
183d8c80d49SKamlakant Patel 	int i, j, nbytes;
184d8c80d49SKamlakant Patel 
185d8c80d49SKamlakant Patel 	rxfifo_cnt = xlp_spi_reg_read(xspi, xspi->cs, XLP_SPI_FIFO_WCNT);
186d8c80d49SKamlakant Patel 	rxfifo_cnt &= XLP_SPI_RXFIFO_WCNT_MASK;
187d8c80d49SKamlakant Patel 	while (rxfifo_cnt) {
188d8c80d49SKamlakant Patel 		rx_data = xlp_spi_reg_read(xspi, xspi->cs, XLP_SPI_RXDATA_FIFO);
189d8c80d49SKamlakant Patel 		j = 0;
190d8c80d49SKamlakant Patel 		nbytes = min(xspi->rx_len, 4);
191d8c80d49SKamlakant Patel 		for (i = nbytes - 1; i >= 0; i--, j++)
192d8c80d49SKamlakant Patel 			xspi->rx_buf[i] = (rx_data >> (j * 8)) & 0xff;
193d8c80d49SKamlakant Patel 
194d8c80d49SKamlakant Patel 		xspi->rx_len -= nbytes;
195d8c80d49SKamlakant Patel 		xspi->rx_buf += nbytes;
196d8c80d49SKamlakant Patel 		rxfifo_cnt--;
197d8c80d49SKamlakant Patel 	}
198d8c80d49SKamlakant Patel }
199d8c80d49SKamlakant Patel 
xlp_spi_fill_txfifo(struct xlp_spi_priv * xspi)200d8c80d49SKamlakant Patel static void xlp_spi_fill_txfifo(struct xlp_spi_priv *xspi)
201d8c80d49SKamlakant Patel {
202d8c80d49SKamlakant Patel 	u32 tx_data, txfifo_cnt;
203d8c80d49SKamlakant Patel 	int i, j, nbytes;
204d8c80d49SKamlakant Patel 
205d8c80d49SKamlakant Patel 	txfifo_cnt = xlp_spi_reg_read(xspi, xspi->cs, XLP_SPI_FIFO_WCNT);
206d8c80d49SKamlakant Patel 	txfifo_cnt &= XLP_SPI_TXFIFO_WCNT_MASK;
207d8c80d49SKamlakant Patel 	txfifo_cnt >>= XLP_SPI_TXFIFO_WCNT_SHIFT;
208d8c80d49SKamlakant Patel 	while (xspi->tx_len && (txfifo_cnt < XLP_SPI_FIFO_SIZE)) {
209d8c80d49SKamlakant Patel 		j = 0;
210d8c80d49SKamlakant Patel 		tx_data = 0;
211d8c80d49SKamlakant Patel 		nbytes = min(xspi->tx_len, 4);
212d8c80d49SKamlakant Patel 		for (i = nbytes - 1; i >= 0; i--, j++)
213d8c80d49SKamlakant Patel 			tx_data |= xspi->tx_buf[i] << (j * 8);
214d8c80d49SKamlakant Patel 
215d8c80d49SKamlakant Patel 		xlp_spi_reg_write(xspi, xspi->cs, XLP_SPI_TXDATA_FIFO, tx_data);
216d8c80d49SKamlakant Patel 		xspi->tx_len -= nbytes;
217d8c80d49SKamlakant Patel 		xspi->tx_buf += nbytes;
218d8c80d49SKamlakant Patel 		txfifo_cnt++;
219d8c80d49SKamlakant Patel 	}
220d8c80d49SKamlakant Patel }
221d8c80d49SKamlakant Patel 
xlp_spi_interrupt(int irq,void * dev_id)222d8c80d49SKamlakant Patel static irqreturn_t xlp_spi_interrupt(int irq, void *dev_id)
223d8c80d49SKamlakant Patel {
224d8c80d49SKamlakant Patel 	struct xlp_spi_priv *xspi = dev_id;
225d8c80d49SKamlakant Patel 	u32 stat;
226d8c80d49SKamlakant Patel 
227d8c80d49SKamlakant Patel 	stat = xlp_spi_reg_read(xspi, xspi->cs, XLP_SPI_STATUS) &
228d8c80d49SKamlakant Patel 		XLP_SPI_STAT_MASK;
229d8c80d49SKamlakant Patel 	if (!stat)
230d8c80d49SKamlakant Patel 		return IRQ_NONE;
231d8c80d49SKamlakant Patel 
232d8c80d49SKamlakant Patel 	if (stat & XLP_SPI_TX_INT) {
233d8c80d49SKamlakant Patel 		if (xspi->tx_len)
234d8c80d49SKamlakant Patel 			xlp_spi_fill_txfifo(xspi);
235d8c80d49SKamlakant Patel 		if (stat & XLP_SPI_TX_UF)
236d8c80d49SKamlakant Patel 			xspi->txerrors++;
237d8c80d49SKamlakant Patel 	}
238d8c80d49SKamlakant Patel 
239d8c80d49SKamlakant Patel 	if (stat & XLP_SPI_RX_INT) {
240d8c80d49SKamlakant Patel 		if (xspi->rx_len)
241d8c80d49SKamlakant Patel 			xlp_spi_read_rxfifo(xspi);
242d8c80d49SKamlakant Patel 		if (stat & XLP_SPI_RX_OF)
243d8c80d49SKamlakant Patel 			xspi->rxerrors++;
244d8c80d49SKamlakant Patel 	}
245d8c80d49SKamlakant Patel 
246d8c80d49SKamlakant Patel 	/* write status back to clear interrupts */
247d8c80d49SKamlakant Patel 	xlp_spi_reg_write(xspi, xspi->cs, XLP_SPI_STATUS, stat);
248d8c80d49SKamlakant Patel 	if (stat & XLP_SPI_XFR_DONE)
249d8c80d49SKamlakant Patel 		complete(&xspi->done);
250d8c80d49SKamlakant Patel 
251d8c80d49SKamlakant Patel 	return IRQ_HANDLED;
252d8c80d49SKamlakant Patel }
253d8c80d49SKamlakant Patel 
xlp_spi_send_cmd(struct xlp_spi_priv * xspi,int xfer_len,int cmd_cont)254d8c80d49SKamlakant Patel static void xlp_spi_send_cmd(struct xlp_spi_priv *xspi, int xfer_len,
255d8c80d49SKamlakant Patel 			int cmd_cont)
256d8c80d49SKamlakant Patel {
257d8c80d49SKamlakant Patel 	u32 cmd = 0;
258d8c80d49SKamlakant Patel 
259d8c80d49SKamlakant Patel 	if (xspi->tx_buf)
260d8c80d49SKamlakant Patel 		cmd |= XLP_SPI_CMD_TX_MASK;
261d8c80d49SKamlakant Patel 	if (xspi->rx_buf)
262d8c80d49SKamlakant Patel 		cmd |= XLP_SPI_CMD_RX_MASK;
263d8c80d49SKamlakant Patel 	if (cmd_cont)
264d8c80d49SKamlakant Patel 		cmd |= XLP_SPI_CMD_CONT;
265d8c80d49SKamlakant Patel 	cmd |= ((xfer_len * 8 - 1) << XLP_SPI_XFR_BITCNT_SHIFT);
266d8c80d49SKamlakant Patel 	xlp_spi_reg_write(xspi, xspi->cs, XLP_SPI_CMD, cmd);
267d8c80d49SKamlakant Patel }
268d8c80d49SKamlakant Patel 
xlp_spi_xfer_block(struct xlp_spi_priv * xs,const unsigned char * tx_buf,unsigned char * rx_buf,int xfer_len,int cmd_cont)269d8c80d49SKamlakant Patel static int xlp_spi_xfer_block(struct  xlp_spi_priv *xs,
270d8c80d49SKamlakant Patel 		const unsigned char *tx_buf,
271d8c80d49SKamlakant Patel 		unsigned char *rx_buf, int xfer_len, int cmd_cont)
272d8c80d49SKamlakant Patel {
273*594aa75dSWolfram Sang 	unsigned long time_left;
274d8c80d49SKamlakant Patel 	u32 intr_mask = 0;
275d8c80d49SKamlakant Patel 
276d8c80d49SKamlakant Patel 	xs->tx_buf = tx_buf;
277d8c80d49SKamlakant Patel 	xs->rx_buf = rx_buf;
278d8c80d49SKamlakant Patel 	xs->tx_len = (xs->tx_buf == NULL) ? 0 : xfer_len;
279d8c80d49SKamlakant Patel 	xs->rx_len = (xs->rx_buf == NULL) ? 0 : xfer_len;
280d8c80d49SKamlakant Patel 	xs->txerrors = xs->rxerrors = 0;
281d8c80d49SKamlakant Patel 
282d8c80d49SKamlakant Patel 	/* fill TXDATA_FIFO, then send the CMD */
283d8c80d49SKamlakant Patel 	if (xs->tx_len)
284d8c80d49SKamlakant Patel 		xlp_spi_fill_txfifo(xs);
285d8c80d49SKamlakant Patel 
286d8c80d49SKamlakant Patel 	xlp_spi_send_cmd(xs, xfer_len, cmd_cont);
287d8c80d49SKamlakant Patel 
288d8c80d49SKamlakant Patel 	/*
289d8c80d49SKamlakant Patel 	 * We are getting some spurious tx interrupts, so avoid enabling
290d8c80d49SKamlakant Patel 	 * tx interrupts when only rx is in process.
291d8c80d49SKamlakant Patel 	 * Enable all the interrupts in tx case.
292d8c80d49SKamlakant Patel 	 */
293d8c80d49SKamlakant Patel 	if (xs->tx_len)
294d8c80d49SKamlakant Patel 		intr_mask |= XLP_SPI_INTR_TXTH | XLP_SPI_INTR_TXUF |
295d8c80d49SKamlakant Patel 				XLP_SPI_INTR_RXTH | XLP_SPI_INTR_RXOF;
296d8c80d49SKamlakant Patel 	else
297d8c80d49SKamlakant Patel 		intr_mask |= XLP_SPI_INTR_RXTH | XLP_SPI_INTR_RXOF;
298d8c80d49SKamlakant Patel 
299d8c80d49SKamlakant Patel 	intr_mask |= XLP_SPI_INTR_DONE;
300d8c80d49SKamlakant Patel 	xlp_spi_reg_write(xs, xs->cs, XLP_SPI_INTR_EN, intr_mask);
301d8c80d49SKamlakant Patel 
302*594aa75dSWolfram Sang 	time_left = wait_for_completion_timeout(&xs->done,
303d8c80d49SKamlakant Patel 						msecs_to_jiffies(1000));
304d8c80d49SKamlakant Patel 	/* Disable interrupts */
305d8c80d49SKamlakant Patel 	xlp_spi_reg_write(xs, xs->cs, XLP_SPI_INTR_EN, 0x0);
306*594aa75dSWolfram Sang 	if (!time_left) {
307d8c80d49SKamlakant Patel 		dev_err(&xs->dev, "xfer timedout!\n");
308d8c80d49SKamlakant Patel 		goto out;
309d8c80d49SKamlakant Patel 	}
310d8c80d49SKamlakant Patel 	if (xs->txerrors || xs->rxerrors)
311d8c80d49SKamlakant Patel 		dev_err(&xs->dev, "Over/Underflow rx %d tx %d xfer %d!\n",
312d8c80d49SKamlakant Patel 				xs->rxerrors, xs->txerrors, xfer_len);
313d8c80d49SKamlakant Patel 
314d8c80d49SKamlakant Patel 	return xfer_len;
315d8c80d49SKamlakant Patel out:
316d8c80d49SKamlakant Patel 	return -ETIMEDOUT;
317d8c80d49SKamlakant Patel }
318d8c80d49SKamlakant Patel 
xlp_spi_txrx_bufs(struct xlp_spi_priv * xs,struct spi_transfer * t)319d8c80d49SKamlakant Patel static int xlp_spi_txrx_bufs(struct xlp_spi_priv *xs, struct spi_transfer *t)
320d8c80d49SKamlakant Patel {
321d8c80d49SKamlakant Patel 	int bytesleft, sz;
322d8c80d49SKamlakant Patel 	unsigned char *rx_buf;
323d8c80d49SKamlakant Patel 	const unsigned char *tx_buf;
324d8c80d49SKamlakant Patel 
325d8c80d49SKamlakant Patel 	tx_buf = t->tx_buf;
326d8c80d49SKamlakant Patel 	rx_buf = t->rx_buf;
327d8c80d49SKamlakant Patel 	bytesleft = t->len;
328d8c80d49SKamlakant Patel 	while (bytesleft) {
329d8c80d49SKamlakant Patel 		if (bytesleft > XLP_SPI_XFER_SIZE)
330d8c80d49SKamlakant Patel 			sz = xlp_spi_xfer_block(xs, tx_buf, rx_buf,
331d8c80d49SKamlakant Patel 					XLP_SPI_XFER_SIZE, 1);
332d8c80d49SKamlakant Patel 		else
333d8c80d49SKamlakant Patel 			sz = xlp_spi_xfer_block(xs, tx_buf, rx_buf,
334d8c80d49SKamlakant Patel 					bytesleft, xs->cmd_cont);
335d8c80d49SKamlakant Patel 		if (sz < 0)
336d8c80d49SKamlakant Patel 			return sz;
337d8c80d49SKamlakant Patel 		bytesleft -= sz;
338d8c80d49SKamlakant Patel 		if (tx_buf)
339d8c80d49SKamlakant Patel 			tx_buf += sz;
340d8c80d49SKamlakant Patel 		if (rx_buf)
341d8c80d49SKamlakant Patel 			rx_buf += sz;
342d8c80d49SKamlakant Patel 	}
343d8c80d49SKamlakant Patel 	return bytesleft;
344d8c80d49SKamlakant Patel }
345d8c80d49SKamlakant Patel 
xlp_spi_transfer_one(struct spi_controller * host,struct spi_device * spi,struct spi_transfer * t)3461633ffd2SYang Yingliang static int xlp_spi_transfer_one(struct spi_controller *host,
347d8c80d49SKamlakant Patel 					struct spi_device *spi,
348d8c80d49SKamlakant Patel 					struct spi_transfer *t)
349d8c80d49SKamlakant Patel {
3501633ffd2SYang Yingliang 	struct xlp_spi_priv *xspi = spi_controller_get_devdata(host);
351d8c80d49SKamlakant Patel 	int ret = 0;
352d8c80d49SKamlakant Patel 
3539e264f3fSAmit Kumar Mahapatra via Alsa-devel 	xspi->cs = spi_get_chipselect(spi, 0);
354d8c80d49SKamlakant Patel 	xspi->dev = spi->dev;
355d8c80d49SKamlakant Patel 
3561633ffd2SYang Yingliang 	if (spi_transfer_is_last(host, t))
357d8c80d49SKamlakant Patel 		xspi->cmd_cont = 0;
358d8c80d49SKamlakant Patel 	else
359d8c80d49SKamlakant Patel 		xspi->cmd_cont = 1;
360d8c80d49SKamlakant Patel 
361d8c80d49SKamlakant Patel 	if (xlp_spi_txrx_bufs(xspi, t))
362d8c80d49SKamlakant Patel 		ret = -EIO;
363d8c80d49SKamlakant Patel 
3641633ffd2SYang Yingliang 	spi_finalize_current_transfer(host);
365d8c80d49SKamlakant Patel 	return ret;
366d8c80d49SKamlakant Patel }
367d8c80d49SKamlakant Patel 
xlp_spi_probe(struct platform_device * pdev)368d8c80d49SKamlakant Patel static int xlp_spi_probe(struct platform_device *pdev)
369d8c80d49SKamlakant Patel {
3701633ffd2SYang Yingliang 	struct spi_controller *host;
371d8c80d49SKamlakant Patel 	struct xlp_spi_priv *xspi;
372d8c80d49SKamlakant Patel 	struct clk *clk;
373d8c80d49SKamlakant Patel 	int irq, err;
374d8c80d49SKamlakant Patel 
375d8c80d49SKamlakant Patel 	xspi = devm_kzalloc(&pdev->dev, sizeof(*xspi), GFP_KERNEL);
376d8c80d49SKamlakant Patel 	if (!xspi)
377d8c80d49SKamlakant Patel 		return -ENOMEM;
378d8c80d49SKamlakant Patel 
37977348293SYueHaibing 	xspi->base = devm_platform_ioremap_resource(pdev, 0);
380d8c80d49SKamlakant Patel 	if (IS_ERR(xspi->base))
381d8c80d49SKamlakant Patel 		return PTR_ERR(xspi->base);
382d8c80d49SKamlakant Patel 
383d8c80d49SKamlakant Patel 	irq = platform_get_irq(pdev, 0);
3846b8ac10eSStephen Boyd 	if (irq < 0)
3859a6b9479SGustavo A. R. Silva 		return irq;
386d8c80d49SKamlakant Patel 	err = devm_request_irq(&pdev->dev, irq, xlp_spi_interrupt, 0,
387d8c80d49SKamlakant Patel 			pdev->name, xspi);
388d8c80d49SKamlakant Patel 	if (err) {
389d8c80d49SKamlakant Patel 		dev_err(&pdev->dev, "unable to request irq %d\n", irq);
390d8c80d49SKamlakant Patel 		return err;
391d8c80d49SKamlakant Patel 	}
392d8c80d49SKamlakant Patel 
393d8c80d49SKamlakant Patel 	clk = devm_clk_get(&pdev->dev, NULL);
394d8c80d49SKamlakant Patel 	if (IS_ERR(clk)) {
395d8c80d49SKamlakant Patel 		dev_err(&pdev->dev, "could not get spi clock\n");
396097d0619SKamlakant Patel 		return PTR_ERR(clk);
397d8c80d49SKamlakant Patel 	}
398097d0619SKamlakant Patel 
399d8c80d49SKamlakant Patel 	xspi->spi_clk = clk_get_rate(clk);
400d8c80d49SKamlakant Patel 
4011633ffd2SYang Yingliang 	host = spi_alloc_host(&pdev->dev, 0);
4021633ffd2SYang Yingliang 	if (!host) {
4031633ffd2SYang Yingliang 		dev_err(&pdev->dev, "could not alloc host\n");
404d8c80d49SKamlakant Patel 		return -ENOMEM;
405d8c80d49SKamlakant Patel 	}
406d8c80d49SKamlakant Patel 
4071633ffd2SYang Yingliang 	host->bus_num = 0;
4081633ffd2SYang Yingliang 	host->num_chipselect = XLP_SPI_MAX_CS;
4091633ffd2SYang Yingliang 	host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
4101633ffd2SYang Yingliang 	host->setup = xlp_spi_setup;
4111633ffd2SYang Yingliang 	host->transfer_one = xlp_spi_transfer_one;
4121633ffd2SYang Yingliang 	host->dev.of_node = pdev->dev.of_node;
413d8c80d49SKamlakant Patel 
414d8c80d49SKamlakant Patel 	init_completion(&xspi->done);
4151633ffd2SYang Yingliang 	spi_controller_set_devdata(host, xspi);
416d8c80d49SKamlakant Patel 	xlp_spi_sysctl_setup(xspi);
417d8c80d49SKamlakant Patel 
418d8c80d49SKamlakant Patel 	/* register spi controller */
4191633ffd2SYang Yingliang 	err = devm_spi_register_controller(&pdev->dev, host);
420d8c80d49SKamlakant Patel 	if (err) {
4211633ffd2SYang Yingliang 		dev_err(&pdev->dev, "spi register host failed!\n");
4221633ffd2SYang Yingliang 		spi_controller_put(host);
423d8c80d49SKamlakant Patel 		return err;
424d8c80d49SKamlakant Patel 	}
425d8c80d49SKamlakant Patel 
426d8c80d49SKamlakant Patel 	return 0;
427d8c80d49SKamlakant Patel }
428d8c80d49SKamlakant Patel 
429097d0619SKamlakant Patel #ifdef CONFIG_ACPI
430097d0619SKamlakant Patel static const struct acpi_device_id xlp_spi_acpi_match[] = {
431097d0619SKamlakant Patel 	{ "BRCM900D", 0 },
432251831bdSJayachandran C 	{ "CAV900D",  0 },
433097d0619SKamlakant Patel 	{ },
434097d0619SKamlakant Patel };
435097d0619SKamlakant Patel MODULE_DEVICE_TABLE(acpi, xlp_spi_acpi_match);
436097d0619SKamlakant Patel #endif
437097d0619SKamlakant Patel 
438d8c80d49SKamlakant Patel static struct platform_driver xlp_spi_driver = {
439d8c80d49SKamlakant Patel 	.probe	= xlp_spi_probe,
440d8c80d49SKamlakant Patel 	.driver = {
441d8c80d49SKamlakant Patel 		.name	= "xlp-spi",
442097d0619SKamlakant Patel 		.acpi_match_table = ACPI_PTR(xlp_spi_acpi_match),
443d8c80d49SKamlakant Patel 	},
444d8c80d49SKamlakant Patel };
445d8c80d49SKamlakant Patel module_platform_driver(xlp_spi_driver);
446d8c80d49SKamlakant Patel 
447d8c80d49SKamlakant Patel MODULE_AUTHOR("Kamlakant Patel <kamlakant.patel@broadcom.com>");
448d8c80d49SKamlakant Patel MODULE_DESCRIPTION("Netlogic XLP SPI controller driver");
449d8c80d49SKamlakant Patel MODULE_LICENSE("GPL v2");
450