xref: /linux/drivers/spi/spi-fsl-dspi.c (revision bfd5bb6f90af092aa345b15cd78143956a13c2a8)
1 /*
2  * drivers/spi/spi-fsl-dspi.c
3  *
4  * Copyright 2013 Freescale Semiconductor, Inc.
5  *
6  * Freescale DSPI driver
7  * This file contains a driver for the Freescale DSPI
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; either version 2 of the License, or
12  * (at your option) any later version.
13  *
14  */
15 
16 #include <linux/clk.h>
17 #include <linux/delay.h>
18 #include <linux/dmaengine.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/err.h>
21 #include <linux/errno.h>
22 #include <linux/interrupt.h>
23 #include <linux/io.h>
24 #include <linux/kernel.h>
25 #include <linux/math64.h>
26 #include <linux/module.h>
27 #include <linux/of.h>
28 #include <linux/of_device.h>
29 #include <linux/pinctrl/consumer.h>
30 #include <linux/platform_device.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/regmap.h>
33 #include <linux/sched.h>
34 #include <linux/spi/spi.h>
35 #include <linux/spi/spi-fsl-dspi.h>
36 #include <linux/spi/spi_bitbang.h>
37 #include <linux/time.h>
38 
39 #define DRIVER_NAME "fsl-dspi"
40 
41 #define TRAN_STATE_RX_VOID		0x01
42 #define TRAN_STATE_TX_VOID		0x02
43 #define TRAN_STATE_WORD_ODD_NUM	0x04
44 
45 #define DSPI_FIFO_SIZE			4
46 #define DSPI_DMA_BUFSIZE		(DSPI_FIFO_SIZE * 1024)
47 
48 #define SPI_MCR		0x00
49 #define SPI_MCR_MASTER		(1 << 31)
50 #define SPI_MCR_PCSIS		(0x3F << 16)
51 #define SPI_MCR_CLR_TXF	(1 << 11)
52 #define SPI_MCR_CLR_RXF	(1 << 10)
53 
54 #define SPI_TCR			0x08
55 #define SPI_TCR_GET_TCNT(x)	(((x) & 0xffff0000) >> 16)
56 
57 #define SPI_CTAR(x)		(0x0c + (((x) & 0x3) * 4))
58 #define SPI_CTAR_FMSZ(x)	(((x) & 0x0000000f) << 27)
59 #define SPI_CTAR_CPOL(x)	((x) << 26)
60 #define SPI_CTAR_CPHA(x)	((x) << 25)
61 #define SPI_CTAR_LSBFE(x)	((x) << 24)
62 #define SPI_CTAR_PCSSCK(x)	(((x) & 0x00000003) << 22)
63 #define SPI_CTAR_PASC(x)	(((x) & 0x00000003) << 20)
64 #define SPI_CTAR_PDT(x)	(((x) & 0x00000003) << 18)
65 #define SPI_CTAR_PBR(x)	(((x) & 0x00000003) << 16)
66 #define SPI_CTAR_CSSCK(x)	(((x) & 0x0000000f) << 12)
67 #define SPI_CTAR_ASC(x)	(((x) & 0x0000000f) << 8)
68 #define SPI_CTAR_DT(x)		(((x) & 0x0000000f) << 4)
69 #define SPI_CTAR_BR(x)		((x) & 0x0000000f)
70 #define SPI_CTAR_SCALE_BITS	0xf
71 
72 #define SPI_CTAR0_SLAVE	0x0c
73 
74 #define SPI_SR			0x2c
75 #define SPI_SR_EOQF		0x10000000
76 #define SPI_SR_TCFQF		0x80000000
77 #define SPI_SR_CLEAR		0xdaad0000
78 
79 #define SPI_RSER_TFFFE		BIT(25)
80 #define SPI_RSER_TFFFD		BIT(24)
81 #define SPI_RSER_RFDFE		BIT(17)
82 #define SPI_RSER_RFDFD		BIT(16)
83 
84 #define SPI_RSER		0x30
85 #define SPI_RSER_EOQFE		0x10000000
86 #define SPI_RSER_TCFQE		0x80000000
87 
88 #define SPI_PUSHR		0x34
89 #define SPI_PUSHR_CONT		(1 << 31)
90 #define SPI_PUSHR_CTAS(x)	(((x) & 0x00000003) << 28)
91 #define SPI_PUSHR_EOQ		(1 << 27)
92 #define SPI_PUSHR_CTCNT	(1 << 26)
93 #define SPI_PUSHR_PCS(x)	(((1 << x) & 0x0000003f) << 16)
94 #define SPI_PUSHR_TXDATA(x)	((x) & 0x0000ffff)
95 
96 #define SPI_PUSHR_SLAVE	0x34
97 
98 #define SPI_POPR		0x38
99 #define SPI_POPR_RXDATA(x)	((x) & 0x0000ffff)
100 
101 #define SPI_TXFR0		0x3c
102 #define SPI_TXFR1		0x40
103 #define SPI_TXFR2		0x44
104 #define SPI_TXFR3		0x48
105 #define SPI_RXFR0		0x7c
106 #define SPI_RXFR1		0x80
107 #define SPI_RXFR2		0x84
108 #define SPI_RXFR3		0x88
109 
110 #define SPI_FRAME_BITS(bits)	SPI_CTAR_FMSZ((bits) - 1)
111 #define SPI_FRAME_BITS_MASK	SPI_CTAR_FMSZ(0xf)
112 #define SPI_FRAME_BITS_16	SPI_CTAR_FMSZ(0xf)
113 #define SPI_FRAME_BITS_8	SPI_CTAR_FMSZ(0x7)
114 
115 #define SPI_CS_INIT		0x01
116 #define SPI_CS_ASSERT		0x02
117 #define SPI_CS_DROP		0x04
118 
119 #define SPI_TCR_TCNT_MAX	0x10000
120 
121 #define DMA_COMPLETION_TIMEOUT	msecs_to_jiffies(3000)
122 
123 struct chip_data {
124 	u32 mcr_val;
125 	u32 ctar_val;
126 	u16 void_write_data;
127 };
128 
129 enum dspi_trans_mode {
130 	DSPI_EOQ_MODE = 0,
131 	DSPI_TCFQ_MODE,
132 	DSPI_DMA_MODE,
133 };
134 
135 struct fsl_dspi_devtype_data {
136 	enum dspi_trans_mode trans_mode;
137 	u8 max_clock_factor;
138 };
139 
140 static const struct fsl_dspi_devtype_data vf610_data = {
141 	.trans_mode = DSPI_DMA_MODE,
142 	.max_clock_factor = 2,
143 };
144 
145 static const struct fsl_dspi_devtype_data ls1021a_v1_data = {
146 	.trans_mode = DSPI_TCFQ_MODE,
147 	.max_clock_factor = 8,
148 };
149 
150 static const struct fsl_dspi_devtype_data ls2085a_data = {
151 	.trans_mode = DSPI_TCFQ_MODE,
152 	.max_clock_factor = 8,
153 };
154 
155 static const struct fsl_dspi_devtype_data coldfire_data = {
156 	.trans_mode = DSPI_EOQ_MODE,
157 	.max_clock_factor = 8,
158 };
159 
160 struct fsl_dspi_dma {
161 	/* Length of transfer in words of DSPI_FIFO_SIZE */
162 	u32 curr_xfer_len;
163 
164 	u32 *tx_dma_buf;
165 	struct dma_chan *chan_tx;
166 	dma_addr_t tx_dma_phys;
167 	struct completion cmd_tx_complete;
168 	struct dma_async_tx_descriptor *tx_desc;
169 
170 	u32 *rx_dma_buf;
171 	struct dma_chan *chan_rx;
172 	dma_addr_t rx_dma_phys;
173 	struct completion cmd_rx_complete;
174 	struct dma_async_tx_descriptor *rx_desc;
175 };
176 
177 struct fsl_dspi {
178 	struct spi_master	*master;
179 	struct platform_device	*pdev;
180 
181 	struct regmap		*regmap;
182 	int			irq;
183 	struct clk		*clk;
184 
185 	struct spi_transfer	*cur_transfer;
186 	struct spi_message	*cur_msg;
187 	struct chip_data	*cur_chip;
188 	size_t			len;
189 	void			*tx;
190 	void			*tx_end;
191 	void			*rx;
192 	void			*rx_end;
193 	char			dataflags;
194 	u8			cs;
195 	u16			void_write_data;
196 	u32			cs_change;
197 	const struct fsl_dspi_devtype_data *devtype_data;
198 
199 	wait_queue_head_t	waitq;
200 	u32			waitflags;
201 
202 	u32			spi_tcnt;
203 	struct fsl_dspi_dma	*dma;
204 };
205 
206 static u32 dspi_data_to_pushr(struct fsl_dspi *dspi, int tx_word);
207 
208 static inline int is_double_byte_mode(struct fsl_dspi *dspi)
209 {
210 	unsigned int val;
211 
212 	regmap_read(dspi->regmap, SPI_CTAR(0), &val);
213 
214 	return ((val & SPI_FRAME_BITS_MASK) == SPI_FRAME_BITS(8)) ? 0 : 1;
215 }
216 
217 static void dspi_tx_dma_callback(void *arg)
218 {
219 	struct fsl_dspi *dspi = arg;
220 	struct fsl_dspi_dma *dma = dspi->dma;
221 
222 	complete(&dma->cmd_tx_complete);
223 }
224 
225 static void dspi_rx_dma_callback(void *arg)
226 {
227 	struct fsl_dspi *dspi = arg;
228 	struct fsl_dspi_dma *dma = dspi->dma;
229 	int rx_word;
230 	int i;
231 	u16 d;
232 
233 	rx_word = is_double_byte_mode(dspi);
234 
235 	if (!(dspi->dataflags & TRAN_STATE_RX_VOID)) {
236 		for (i = 0; i < dma->curr_xfer_len; i++) {
237 			d = dspi->dma->rx_dma_buf[i];
238 			rx_word ? (*(u16 *)dspi->rx = d) :
239 						(*(u8 *)dspi->rx = d);
240 			dspi->rx += rx_word + 1;
241 		}
242 	}
243 
244 	complete(&dma->cmd_rx_complete);
245 }
246 
247 static int dspi_next_xfer_dma_submit(struct fsl_dspi *dspi)
248 {
249 	struct fsl_dspi_dma *dma = dspi->dma;
250 	struct device *dev = &dspi->pdev->dev;
251 	int time_left;
252 	int tx_word;
253 	int i;
254 
255 	tx_word = is_double_byte_mode(dspi);
256 
257 	for (i = 0; i < dma->curr_xfer_len; i++) {
258 		dspi->dma->tx_dma_buf[i] = dspi_data_to_pushr(dspi, tx_word);
259 		if ((dspi->cs_change) && (!dspi->len))
260 			dspi->dma->tx_dma_buf[i] &= ~SPI_PUSHR_CONT;
261 	}
262 
263 	dma->tx_desc = dmaengine_prep_slave_single(dma->chan_tx,
264 					dma->tx_dma_phys,
265 					dma->curr_xfer_len *
266 					DMA_SLAVE_BUSWIDTH_4_BYTES,
267 					DMA_MEM_TO_DEV,
268 					DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
269 	if (!dma->tx_desc) {
270 		dev_err(dev, "Not able to get desc for DMA xfer\n");
271 		return -EIO;
272 	}
273 
274 	dma->tx_desc->callback = dspi_tx_dma_callback;
275 	dma->tx_desc->callback_param = dspi;
276 	if (dma_submit_error(dmaengine_submit(dma->tx_desc))) {
277 		dev_err(dev, "DMA submit failed\n");
278 		return -EINVAL;
279 	}
280 
281 	dma->rx_desc = dmaengine_prep_slave_single(dma->chan_rx,
282 					dma->rx_dma_phys,
283 					dma->curr_xfer_len *
284 					DMA_SLAVE_BUSWIDTH_4_BYTES,
285 					DMA_DEV_TO_MEM,
286 					DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
287 	if (!dma->rx_desc) {
288 		dev_err(dev, "Not able to get desc for DMA xfer\n");
289 		return -EIO;
290 	}
291 
292 	dma->rx_desc->callback = dspi_rx_dma_callback;
293 	dma->rx_desc->callback_param = dspi;
294 	if (dma_submit_error(dmaengine_submit(dma->rx_desc))) {
295 		dev_err(dev, "DMA submit failed\n");
296 		return -EINVAL;
297 	}
298 
299 	reinit_completion(&dspi->dma->cmd_rx_complete);
300 	reinit_completion(&dspi->dma->cmd_tx_complete);
301 
302 	dma_async_issue_pending(dma->chan_rx);
303 	dma_async_issue_pending(dma->chan_tx);
304 
305 	time_left = wait_for_completion_timeout(&dspi->dma->cmd_tx_complete,
306 					DMA_COMPLETION_TIMEOUT);
307 	if (time_left == 0) {
308 		dev_err(dev, "DMA tx timeout\n");
309 		dmaengine_terminate_all(dma->chan_tx);
310 		dmaengine_terminate_all(dma->chan_rx);
311 		return -ETIMEDOUT;
312 	}
313 
314 	time_left = wait_for_completion_timeout(&dspi->dma->cmd_rx_complete,
315 					DMA_COMPLETION_TIMEOUT);
316 	if (time_left == 0) {
317 		dev_err(dev, "DMA rx timeout\n");
318 		dmaengine_terminate_all(dma->chan_tx);
319 		dmaengine_terminate_all(dma->chan_rx);
320 		return -ETIMEDOUT;
321 	}
322 
323 	return 0;
324 }
325 
326 static int dspi_dma_xfer(struct fsl_dspi *dspi)
327 {
328 	struct fsl_dspi_dma *dma = dspi->dma;
329 	struct device *dev = &dspi->pdev->dev;
330 	int curr_remaining_bytes;
331 	int bytes_per_buffer;
332 	int word = 1;
333 	int ret = 0;
334 
335 	if (is_double_byte_mode(dspi))
336 		word = 2;
337 	curr_remaining_bytes = dspi->len;
338 	bytes_per_buffer = DSPI_DMA_BUFSIZE / DSPI_FIFO_SIZE;
339 	while (curr_remaining_bytes) {
340 		/* Check if current transfer fits the DMA buffer */
341 		dma->curr_xfer_len = curr_remaining_bytes / word;
342 		if (dma->curr_xfer_len > bytes_per_buffer)
343 			dma->curr_xfer_len = bytes_per_buffer;
344 
345 		ret = dspi_next_xfer_dma_submit(dspi);
346 		if (ret) {
347 			dev_err(dev, "DMA transfer failed\n");
348 			goto exit;
349 
350 		} else {
351 			curr_remaining_bytes -= dma->curr_xfer_len * word;
352 			if (curr_remaining_bytes < 0)
353 				curr_remaining_bytes = 0;
354 		}
355 	}
356 
357 exit:
358 	return ret;
359 }
360 
361 static int dspi_request_dma(struct fsl_dspi *dspi, phys_addr_t phy_addr)
362 {
363 	struct fsl_dspi_dma *dma;
364 	struct dma_slave_config cfg;
365 	struct device *dev = &dspi->pdev->dev;
366 	int ret;
367 
368 	dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL);
369 	if (!dma)
370 		return -ENOMEM;
371 
372 	dma->chan_rx = dma_request_slave_channel(dev, "rx");
373 	if (!dma->chan_rx) {
374 		dev_err(dev, "rx dma channel not available\n");
375 		ret = -ENODEV;
376 		return ret;
377 	}
378 
379 	dma->chan_tx = dma_request_slave_channel(dev, "tx");
380 	if (!dma->chan_tx) {
381 		dev_err(dev, "tx dma channel not available\n");
382 		ret = -ENODEV;
383 		goto err_tx_channel;
384 	}
385 
386 	dma->tx_dma_buf = dma_alloc_coherent(dev, DSPI_DMA_BUFSIZE,
387 					&dma->tx_dma_phys, GFP_KERNEL);
388 	if (!dma->tx_dma_buf) {
389 		ret = -ENOMEM;
390 		goto err_tx_dma_buf;
391 	}
392 
393 	dma->rx_dma_buf = dma_alloc_coherent(dev, DSPI_DMA_BUFSIZE,
394 					&dma->rx_dma_phys, GFP_KERNEL);
395 	if (!dma->rx_dma_buf) {
396 		ret = -ENOMEM;
397 		goto err_rx_dma_buf;
398 	}
399 
400 	cfg.src_addr = phy_addr + SPI_POPR;
401 	cfg.dst_addr = phy_addr + SPI_PUSHR;
402 	cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
403 	cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
404 	cfg.src_maxburst = 1;
405 	cfg.dst_maxburst = 1;
406 
407 	cfg.direction = DMA_DEV_TO_MEM;
408 	ret = dmaengine_slave_config(dma->chan_rx, &cfg);
409 	if (ret) {
410 		dev_err(dev, "can't configure rx dma channel\n");
411 		ret = -EINVAL;
412 		goto err_slave_config;
413 	}
414 
415 	cfg.direction = DMA_MEM_TO_DEV;
416 	ret = dmaengine_slave_config(dma->chan_tx, &cfg);
417 	if (ret) {
418 		dev_err(dev, "can't configure tx dma channel\n");
419 		ret = -EINVAL;
420 		goto err_slave_config;
421 	}
422 
423 	dspi->dma = dma;
424 	init_completion(&dma->cmd_tx_complete);
425 	init_completion(&dma->cmd_rx_complete);
426 
427 	return 0;
428 
429 err_slave_config:
430 	dma_free_coherent(dev, DSPI_DMA_BUFSIZE,
431 			dma->rx_dma_buf, dma->rx_dma_phys);
432 err_rx_dma_buf:
433 	dma_free_coherent(dev, DSPI_DMA_BUFSIZE,
434 			dma->tx_dma_buf, dma->tx_dma_phys);
435 err_tx_dma_buf:
436 	dma_release_channel(dma->chan_tx);
437 err_tx_channel:
438 	dma_release_channel(dma->chan_rx);
439 
440 	devm_kfree(dev, dma);
441 	dspi->dma = NULL;
442 
443 	return ret;
444 }
445 
446 static void dspi_release_dma(struct fsl_dspi *dspi)
447 {
448 	struct fsl_dspi_dma *dma = dspi->dma;
449 	struct device *dev = &dspi->pdev->dev;
450 
451 	if (dma) {
452 		if (dma->chan_tx) {
453 			dma_unmap_single(dev, dma->tx_dma_phys,
454 					DSPI_DMA_BUFSIZE, DMA_TO_DEVICE);
455 			dma_release_channel(dma->chan_tx);
456 		}
457 
458 		if (dma->chan_rx) {
459 			dma_unmap_single(dev, dma->rx_dma_phys,
460 					DSPI_DMA_BUFSIZE, DMA_FROM_DEVICE);
461 			dma_release_channel(dma->chan_rx);
462 		}
463 	}
464 }
465 
466 static void hz_to_spi_baud(char *pbr, char *br, int speed_hz,
467 		unsigned long clkrate)
468 {
469 	/* Valid baud rate pre-scaler values */
470 	int pbr_tbl[4] = {2, 3, 5, 7};
471 	int brs[16] = {	2,	4,	6,	8,
472 		16,	32,	64,	128,
473 		256,	512,	1024,	2048,
474 		4096,	8192,	16384,	32768 };
475 	int scale_needed, scale, minscale = INT_MAX;
476 	int i, j;
477 
478 	scale_needed = clkrate / speed_hz;
479 	if (clkrate % speed_hz)
480 		scale_needed++;
481 
482 	for (i = 0; i < ARRAY_SIZE(brs); i++)
483 		for (j = 0; j < ARRAY_SIZE(pbr_tbl); j++) {
484 			scale = brs[i] * pbr_tbl[j];
485 			if (scale >= scale_needed) {
486 				if (scale < minscale) {
487 					minscale = scale;
488 					*br = i;
489 					*pbr = j;
490 				}
491 				break;
492 			}
493 		}
494 
495 	if (minscale == INT_MAX) {
496 		pr_warn("Can not find valid baud rate,speed_hz is %d,clkrate is %ld, we use the max prescaler value.\n",
497 			speed_hz, clkrate);
498 		*pbr = ARRAY_SIZE(pbr_tbl) - 1;
499 		*br =  ARRAY_SIZE(brs) - 1;
500 	}
501 }
502 
503 static void ns_delay_scale(char *psc, char *sc, int delay_ns,
504 		unsigned long clkrate)
505 {
506 	int pscale_tbl[4] = {1, 3, 5, 7};
507 	int scale_needed, scale, minscale = INT_MAX;
508 	int i, j;
509 	u32 remainder;
510 
511 	scale_needed = div_u64_rem((u64)delay_ns * clkrate, NSEC_PER_SEC,
512 			&remainder);
513 	if (remainder)
514 		scale_needed++;
515 
516 	for (i = 0; i < ARRAY_SIZE(pscale_tbl); i++)
517 		for (j = 0; j <= SPI_CTAR_SCALE_BITS; j++) {
518 			scale = pscale_tbl[i] * (2 << j);
519 			if (scale >= scale_needed) {
520 				if (scale < minscale) {
521 					minscale = scale;
522 					*psc = i;
523 					*sc = j;
524 				}
525 				break;
526 			}
527 		}
528 
529 	if (minscale == INT_MAX) {
530 		pr_warn("Cannot find correct scale values for %dns delay at clkrate %ld, using max prescaler value",
531 			delay_ns, clkrate);
532 		*psc = ARRAY_SIZE(pscale_tbl) - 1;
533 		*sc = SPI_CTAR_SCALE_BITS;
534 	}
535 }
536 
537 static u32 dspi_data_to_pushr(struct fsl_dspi *dspi, int tx_word)
538 {
539 	u16 d16;
540 
541 	if (!(dspi->dataflags & TRAN_STATE_TX_VOID))
542 		d16 = tx_word ? *(u16 *)dspi->tx : *(u8 *)dspi->tx;
543 	else
544 		d16 = dspi->void_write_data;
545 
546 	dspi->tx += tx_word + 1;
547 	dspi->len -= tx_word + 1;
548 
549 	return	SPI_PUSHR_TXDATA(d16) |
550 		SPI_PUSHR_PCS(dspi->cs) |
551 		SPI_PUSHR_CTAS(0) |
552 		SPI_PUSHR_CONT;
553 }
554 
555 static void dspi_data_from_popr(struct fsl_dspi *dspi, int rx_word)
556 {
557 	u16 d;
558 	unsigned int val;
559 
560 	regmap_read(dspi->regmap, SPI_POPR, &val);
561 	d = SPI_POPR_RXDATA(val);
562 
563 	if (!(dspi->dataflags & TRAN_STATE_RX_VOID))
564 		rx_word ? (*(u16 *)dspi->rx = d) : (*(u8 *)dspi->rx = d);
565 
566 	dspi->rx += rx_word + 1;
567 }
568 
569 static int dspi_eoq_write(struct fsl_dspi *dspi)
570 {
571 	int tx_count = 0;
572 	int tx_word;
573 	u32 dspi_pushr = 0;
574 
575 	tx_word = is_double_byte_mode(dspi);
576 
577 	while (dspi->len && (tx_count < DSPI_FIFO_SIZE)) {
578 		/* If we are in word mode, only have a single byte to transfer
579 		 * switch to byte mode temporarily.  Will switch back at the
580 		 * end of the transfer.
581 		 */
582 		if (tx_word && (dspi->len == 1)) {
583 			dspi->dataflags |= TRAN_STATE_WORD_ODD_NUM;
584 			regmap_update_bits(dspi->regmap, SPI_CTAR(0),
585 					SPI_FRAME_BITS_MASK, SPI_FRAME_BITS(8));
586 			tx_word = 0;
587 		}
588 
589 		dspi_pushr = dspi_data_to_pushr(dspi, tx_word);
590 
591 		if (dspi->len == 0 || tx_count == DSPI_FIFO_SIZE - 1) {
592 			/* last transfer in the transfer */
593 			dspi_pushr |= SPI_PUSHR_EOQ;
594 			if ((dspi->cs_change) && (!dspi->len))
595 				dspi_pushr &= ~SPI_PUSHR_CONT;
596 		} else if (tx_word && (dspi->len == 1))
597 			dspi_pushr |= SPI_PUSHR_EOQ;
598 
599 		regmap_write(dspi->regmap, SPI_PUSHR, dspi_pushr);
600 
601 		tx_count++;
602 	}
603 
604 	return tx_count * (tx_word + 1);
605 }
606 
607 static int dspi_eoq_read(struct fsl_dspi *dspi)
608 {
609 	int rx_count = 0;
610 	int rx_word = is_double_byte_mode(dspi);
611 
612 	while ((dspi->rx < dspi->rx_end)
613 			&& (rx_count < DSPI_FIFO_SIZE)) {
614 		if (rx_word && (dspi->rx_end - dspi->rx) == 1)
615 			rx_word = 0;
616 
617 		dspi_data_from_popr(dspi, rx_word);
618 		rx_count++;
619 	}
620 
621 	return rx_count;
622 }
623 
624 static int dspi_tcfq_write(struct fsl_dspi *dspi)
625 {
626 	int tx_word;
627 	u32 dspi_pushr = 0;
628 
629 	tx_word = is_double_byte_mode(dspi);
630 
631 	if (tx_word && (dspi->len == 1)) {
632 		dspi->dataflags |= TRAN_STATE_WORD_ODD_NUM;
633 		regmap_update_bits(dspi->regmap, SPI_CTAR(0),
634 				SPI_FRAME_BITS_MASK, SPI_FRAME_BITS(8));
635 		tx_word = 0;
636 	}
637 
638 	dspi_pushr = dspi_data_to_pushr(dspi, tx_word);
639 
640 	if ((dspi->cs_change) && (!dspi->len))
641 		dspi_pushr &= ~SPI_PUSHR_CONT;
642 
643 	regmap_write(dspi->regmap, SPI_PUSHR, dspi_pushr);
644 
645 	return tx_word + 1;
646 }
647 
648 static void dspi_tcfq_read(struct fsl_dspi *dspi)
649 {
650 	int rx_word = is_double_byte_mode(dspi);
651 
652 	if (rx_word && (dspi->rx_end - dspi->rx) == 1)
653 		rx_word = 0;
654 
655 	dspi_data_from_popr(dspi, rx_word);
656 }
657 
658 static int dspi_transfer_one_message(struct spi_master *master,
659 		struct spi_message *message)
660 {
661 	struct fsl_dspi *dspi = spi_master_get_devdata(master);
662 	struct spi_device *spi = message->spi;
663 	struct spi_transfer *transfer;
664 	int status = 0;
665 	enum dspi_trans_mode trans_mode;
666 	u32 spi_tcr;
667 
668 	regmap_read(dspi->regmap, SPI_TCR, &spi_tcr);
669 	dspi->spi_tcnt = SPI_TCR_GET_TCNT(spi_tcr);
670 
671 	message->actual_length = 0;
672 
673 	list_for_each_entry(transfer, &message->transfers, transfer_list) {
674 		dspi->cur_transfer = transfer;
675 		dspi->cur_msg = message;
676 		dspi->cur_chip = spi_get_ctldata(spi);
677 		dspi->cs = spi->chip_select;
678 		dspi->cs_change = 0;
679 		if (list_is_last(&dspi->cur_transfer->transfer_list,
680 				 &dspi->cur_msg->transfers) || transfer->cs_change)
681 			dspi->cs_change = 1;
682 		dspi->void_write_data = dspi->cur_chip->void_write_data;
683 
684 		dspi->dataflags = 0;
685 		dspi->tx = (void *)transfer->tx_buf;
686 		dspi->tx_end = dspi->tx + transfer->len;
687 		dspi->rx = transfer->rx_buf;
688 		dspi->rx_end = dspi->rx + transfer->len;
689 		dspi->len = transfer->len;
690 
691 		if (!dspi->rx)
692 			dspi->dataflags |= TRAN_STATE_RX_VOID;
693 
694 		if (!dspi->tx)
695 			dspi->dataflags |= TRAN_STATE_TX_VOID;
696 
697 		regmap_write(dspi->regmap, SPI_MCR, dspi->cur_chip->mcr_val);
698 		regmap_update_bits(dspi->regmap, SPI_MCR,
699 				SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF,
700 				SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF);
701 		regmap_write(dspi->regmap, SPI_CTAR(0),
702 				dspi->cur_chip->ctar_val);
703 
704 		trans_mode = dspi->devtype_data->trans_mode;
705 		switch (trans_mode) {
706 		case DSPI_EOQ_MODE:
707 			regmap_write(dspi->regmap, SPI_RSER, SPI_RSER_EOQFE);
708 			dspi_eoq_write(dspi);
709 			break;
710 		case DSPI_TCFQ_MODE:
711 			regmap_write(dspi->regmap, SPI_RSER, SPI_RSER_TCFQE);
712 			dspi_tcfq_write(dspi);
713 			break;
714 		case DSPI_DMA_MODE:
715 			regmap_write(dspi->regmap, SPI_RSER,
716 				SPI_RSER_TFFFE | SPI_RSER_TFFFD |
717 				SPI_RSER_RFDFE | SPI_RSER_RFDFD);
718 			status = dspi_dma_xfer(dspi);
719 			break;
720 		default:
721 			dev_err(&dspi->pdev->dev, "unsupported trans_mode %u\n",
722 				trans_mode);
723 			status = -EINVAL;
724 			goto out;
725 		}
726 
727 		if (trans_mode != DSPI_DMA_MODE) {
728 			if (wait_event_interruptible(dspi->waitq,
729 						dspi->waitflags))
730 				dev_err(&dspi->pdev->dev,
731 					"wait transfer complete fail!\n");
732 			dspi->waitflags = 0;
733 		}
734 
735 		if (transfer->delay_usecs)
736 			udelay(transfer->delay_usecs);
737 	}
738 
739 out:
740 	message->status = status;
741 	spi_finalize_current_message(master);
742 
743 	return status;
744 }
745 
746 static int dspi_setup(struct spi_device *spi)
747 {
748 	struct chip_data *chip;
749 	struct fsl_dspi *dspi = spi_master_get_devdata(spi->master);
750 	struct fsl_dspi_platform_data *pdata;
751 	u32 cs_sck_delay = 0, sck_cs_delay = 0;
752 	unsigned char br = 0, pbr = 0, pcssck = 0, cssck = 0;
753 	unsigned char pasc = 0, asc = 0, fmsz = 0;
754 	unsigned long clkrate;
755 
756 	if ((spi->bits_per_word >= 4) && (spi->bits_per_word <= 16)) {
757 		fmsz = spi->bits_per_word - 1;
758 	} else {
759 		pr_err("Invalid wordsize\n");
760 		return -ENODEV;
761 	}
762 
763 	/* Only alloc on first setup */
764 	chip = spi_get_ctldata(spi);
765 	if (chip == NULL) {
766 		chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
767 		if (!chip)
768 			return -ENOMEM;
769 	}
770 
771 	pdata = dev_get_platdata(&dspi->pdev->dev);
772 
773 	if (!pdata) {
774 		of_property_read_u32(spi->dev.of_node, "fsl,spi-cs-sck-delay",
775 				&cs_sck_delay);
776 
777 		of_property_read_u32(spi->dev.of_node, "fsl,spi-sck-cs-delay",
778 				&sck_cs_delay);
779 	} else {
780 		cs_sck_delay = pdata->cs_sck_delay;
781 		sck_cs_delay = pdata->sck_cs_delay;
782 	}
783 
784 	chip->mcr_val = SPI_MCR_MASTER | SPI_MCR_PCSIS |
785 		SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF;
786 
787 	chip->void_write_data = 0;
788 
789 	clkrate = clk_get_rate(dspi->clk);
790 	hz_to_spi_baud(&pbr, &br, spi->max_speed_hz, clkrate);
791 
792 	/* Set PCS to SCK delay scale values */
793 	ns_delay_scale(&pcssck, &cssck, cs_sck_delay, clkrate);
794 
795 	/* Set After SCK delay scale values */
796 	ns_delay_scale(&pasc, &asc, sck_cs_delay, clkrate);
797 
798 	chip->ctar_val =  SPI_CTAR_FMSZ(fmsz)
799 		| SPI_CTAR_CPOL(spi->mode & SPI_CPOL ? 1 : 0)
800 		| SPI_CTAR_CPHA(spi->mode & SPI_CPHA ? 1 : 0)
801 		| SPI_CTAR_LSBFE(spi->mode & SPI_LSB_FIRST ? 1 : 0)
802 		| SPI_CTAR_PCSSCK(pcssck)
803 		| SPI_CTAR_CSSCK(cssck)
804 		| SPI_CTAR_PASC(pasc)
805 		| SPI_CTAR_ASC(asc)
806 		| SPI_CTAR_PBR(pbr)
807 		| SPI_CTAR_BR(br);
808 
809 	spi_set_ctldata(spi, chip);
810 
811 	return 0;
812 }
813 
814 static void dspi_cleanup(struct spi_device *spi)
815 {
816 	struct chip_data *chip = spi_get_ctldata((struct spi_device *)spi);
817 
818 	dev_dbg(&spi->dev, "spi_device %u.%u cleanup\n",
819 			spi->master->bus_num, spi->chip_select);
820 
821 	kfree(chip);
822 }
823 
824 static irqreturn_t dspi_interrupt(int irq, void *dev_id)
825 {
826 	struct fsl_dspi *dspi = (struct fsl_dspi *)dev_id;
827 	struct spi_message *msg = dspi->cur_msg;
828 	enum dspi_trans_mode trans_mode;
829 	u32 spi_sr, spi_tcr;
830 	u32 spi_tcnt, tcnt_diff;
831 	int tx_word;
832 
833 	regmap_read(dspi->regmap, SPI_SR, &spi_sr);
834 	regmap_write(dspi->regmap, SPI_SR, spi_sr);
835 
836 
837 	if (spi_sr & (SPI_SR_EOQF | SPI_SR_TCFQF)) {
838 		tx_word = is_double_byte_mode(dspi);
839 
840 		regmap_read(dspi->regmap, SPI_TCR, &spi_tcr);
841 		spi_tcnt = SPI_TCR_GET_TCNT(spi_tcr);
842 		/*
843 		 * The width of SPI Transfer Counter in SPI_TCR is 16bits,
844 		 * so the max couner is 65535. When the counter reach 65535,
845 		 * it will wrap around, counter reset to zero.
846 		 * spi_tcnt my be less than dspi->spi_tcnt, it means the
847 		 * counter already wrapped around.
848 		 * SPI Transfer Counter is a counter of transmitted frames.
849 		 * The size of frame maybe two bytes.
850 		 */
851 		tcnt_diff = ((spi_tcnt + SPI_TCR_TCNT_MAX) - dspi->spi_tcnt)
852 			% SPI_TCR_TCNT_MAX;
853 		tcnt_diff *= (tx_word + 1);
854 		if (dspi->dataflags & TRAN_STATE_WORD_ODD_NUM)
855 			tcnt_diff--;
856 
857 		msg->actual_length += tcnt_diff;
858 
859 		dspi->spi_tcnt = spi_tcnt;
860 
861 		trans_mode = dspi->devtype_data->trans_mode;
862 		switch (trans_mode) {
863 		case DSPI_EOQ_MODE:
864 			dspi_eoq_read(dspi);
865 			break;
866 		case DSPI_TCFQ_MODE:
867 			dspi_tcfq_read(dspi);
868 			break;
869 		default:
870 			dev_err(&dspi->pdev->dev, "unsupported trans_mode %u\n",
871 				trans_mode);
872 				return IRQ_HANDLED;
873 		}
874 
875 		if (!dspi->len) {
876 			if (dspi->dataflags & TRAN_STATE_WORD_ODD_NUM) {
877 				regmap_update_bits(dspi->regmap,
878 						   SPI_CTAR(0),
879 						   SPI_FRAME_BITS_MASK,
880 						   SPI_FRAME_BITS(16));
881 				dspi->dataflags &= ~TRAN_STATE_WORD_ODD_NUM;
882 			}
883 
884 			dspi->waitflags = 1;
885 			wake_up_interruptible(&dspi->waitq);
886 		} else {
887 			switch (trans_mode) {
888 			case DSPI_EOQ_MODE:
889 				dspi_eoq_write(dspi);
890 				break;
891 			case DSPI_TCFQ_MODE:
892 				dspi_tcfq_write(dspi);
893 				break;
894 			default:
895 				dev_err(&dspi->pdev->dev,
896 					"unsupported trans_mode %u\n",
897 					trans_mode);
898 			}
899 		}
900 	}
901 
902 	return IRQ_HANDLED;
903 }
904 
905 static const struct of_device_id fsl_dspi_dt_ids[] = {
906 	{ .compatible = "fsl,vf610-dspi", .data = &vf610_data, },
907 	{ .compatible = "fsl,ls1021a-v1.0-dspi", .data = &ls1021a_v1_data, },
908 	{ .compatible = "fsl,ls2085a-dspi", .data = &ls2085a_data, },
909 	{ /* sentinel */ }
910 };
911 MODULE_DEVICE_TABLE(of, fsl_dspi_dt_ids);
912 
913 #ifdef CONFIG_PM_SLEEP
914 static int dspi_suspend(struct device *dev)
915 {
916 	struct spi_master *master = dev_get_drvdata(dev);
917 	struct fsl_dspi *dspi = spi_master_get_devdata(master);
918 
919 	spi_master_suspend(master);
920 	clk_disable_unprepare(dspi->clk);
921 
922 	pinctrl_pm_select_sleep_state(dev);
923 
924 	return 0;
925 }
926 
927 static int dspi_resume(struct device *dev)
928 {
929 	struct spi_master *master = dev_get_drvdata(dev);
930 	struct fsl_dspi *dspi = spi_master_get_devdata(master);
931 	int ret;
932 
933 	pinctrl_pm_select_default_state(dev);
934 
935 	ret = clk_prepare_enable(dspi->clk);
936 	if (ret)
937 		return ret;
938 	spi_master_resume(master);
939 
940 	return 0;
941 }
942 #endif /* CONFIG_PM_SLEEP */
943 
944 static SIMPLE_DEV_PM_OPS(dspi_pm, dspi_suspend, dspi_resume);
945 
946 static const struct regmap_config dspi_regmap_config = {
947 	.reg_bits = 32,
948 	.val_bits = 32,
949 	.reg_stride = 4,
950 	.max_register = 0x88,
951 };
952 
953 static void dspi_init(struct fsl_dspi *dspi)
954 {
955 	regmap_write(dspi->regmap, SPI_SR, SPI_SR_CLEAR);
956 }
957 
958 static int dspi_probe(struct platform_device *pdev)
959 {
960 	struct device_node *np = pdev->dev.of_node;
961 	struct spi_master *master;
962 	struct fsl_dspi *dspi;
963 	struct resource *res;
964 	void __iomem *base;
965 	struct fsl_dspi_platform_data *pdata;
966 	int ret = 0, cs_num, bus_num;
967 
968 	master = spi_alloc_master(&pdev->dev, sizeof(struct fsl_dspi));
969 	if (!master)
970 		return -ENOMEM;
971 
972 	dspi = spi_master_get_devdata(master);
973 	dspi->pdev = pdev;
974 	dspi->master = master;
975 
976 	master->transfer = NULL;
977 	master->setup = dspi_setup;
978 	master->transfer_one_message = dspi_transfer_one_message;
979 	master->dev.of_node = pdev->dev.of_node;
980 
981 	master->cleanup = dspi_cleanup;
982 	master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
983 	master->bits_per_word_mask = SPI_BPW_MASK(4) | SPI_BPW_MASK(8) |
984 					SPI_BPW_MASK(16);
985 
986 	pdata = dev_get_platdata(&pdev->dev);
987 	if (pdata) {
988 		master->num_chipselect = pdata->cs_num;
989 		master->bus_num = pdata->bus_num;
990 
991 		dspi->devtype_data = &coldfire_data;
992 	} else {
993 
994 		ret = of_property_read_u32(np, "spi-num-chipselects", &cs_num);
995 		if (ret < 0) {
996 			dev_err(&pdev->dev, "can't get spi-num-chipselects\n");
997 			goto out_master_put;
998 		}
999 		master->num_chipselect = cs_num;
1000 
1001 		ret = of_property_read_u32(np, "bus-num", &bus_num);
1002 		if (ret < 0) {
1003 			dev_err(&pdev->dev, "can't get bus-num\n");
1004 			goto out_master_put;
1005 		}
1006 		master->bus_num = bus_num;
1007 
1008 		dspi->devtype_data = of_device_get_match_data(&pdev->dev);
1009 		if (!dspi->devtype_data) {
1010 			dev_err(&pdev->dev, "can't get devtype_data\n");
1011 			ret = -EFAULT;
1012 			goto out_master_put;
1013 		}
1014 	}
1015 
1016 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1017 	base = devm_ioremap_resource(&pdev->dev, res);
1018 	if (IS_ERR(base)) {
1019 		ret = PTR_ERR(base);
1020 		goto out_master_put;
1021 	}
1022 
1023 	dspi->regmap = devm_regmap_init_mmio_clk(&pdev->dev, NULL, base,
1024 						&dspi_regmap_config);
1025 	if (IS_ERR(dspi->regmap)) {
1026 		dev_err(&pdev->dev, "failed to init regmap: %ld\n",
1027 				PTR_ERR(dspi->regmap));
1028 		ret = PTR_ERR(dspi->regmap);
1029 		goto out_master_put;
1030 	}
1031 
1032 	dspi_init(dspi);
1033 	dspi->irq = platform_get_irq(pdev, 0);
1034 	if (dspi->irq < 0) {
1035 		dev_err(&pdev->dev, "can't get platform irq\n");
1036 		ret = dspi->irq;
1037 		goto out_master_put;
1038 	}
1039 
1040 	ret = devm_request_irq(&pdev->dev, dspi->irq, dspi_interrupt, 0,
1041 			pdev->name, dspi);
1042 	if (ret < 0) {
1043 		dev_err(&pdev->dev, "Unable to attach DSPI interrupt\n");
1044 		goto out_master_put;
1045 	}
1046 
1047 	dspi->clk = devm_clk_get(&pdev->dev, "dspi");
1048 	if (IS_ERR(dspi->clk)) {
1049 		ret = PTR_ERR(dspi->clk);
1050 		dev_err(&pdev->dev, "unable to get clock\n");
1051 		goto out_master_put;
1052 	}
1053 	ret = clk_prepare_enable(dspi->clk);
1054 	if (ret)
1055 		goto out_master_put;
1056 
1057 	if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE) {
1058 		ret = dspi_request_dma(dspi, res->start);
1059 		if (ret < 0) {
1060 			dev_err(&pdev->dev, "can't get dma channels\n");
1061 			goto out_clk_put;
1062 		}
1063 	}
1064 
1065 	master->max_speed_hz =
1066 		clk_get_rate(dspi->clk) / dspi->devtype_data->max_clock_factor;
1067 
1068 	init_waitqueue_head(&dspi->waitq);
1069 	platform_set_drvdata(pdev, master);
1070 
1071 	ret = spi_register_master(master);
1072 	if (ret != 0) {
1073 		dev_err(&pdev->dev, "Problem registering DSPI master\n");
1074 		goto out_clk_put;
1075 	}
1076 
1077 	return ret;
1078 
1079 out_clk_put:
1080 	clk_disable_unprepare(dspi->clk);
1081 out_master_put:
1082 	spi_master_put(master);
1083 
1084 	return ret;
1085 }
1086 
1087 static int dspi_remove(struct platform_device *pdev)
1088 {
1089 	struct spi_master *master = platform_get_drvdata(pdev);
1090 	struct fsl_dspi *dspi = spi_master_get_devdata(master);
1091 
1092 	/* Disconnect from the SPI framework */
1093 	dspi_release_dma(dspi);
1094 	clk_disable_unprepare(dspi->clk);
1095 	spi_unregister_master(dspi->master);
1096 
1097 	return 0;
1098 }
1099 
1100 static struct platform_driver fsl_dspi_driver = {
1101 	.driver.name    = DRIVER_NAME,
1102 	.driver.of_match_table = fsl_dspi_dt_ids,
1103 	.driver.owner   = THIS_MODULE,
1104 	.driver.pm = &dspi_pm,
1105 	.probe          = dspi_probe,
1106 	.remove		= dspi_remove,
1107 };
1108 module_platform_driver(fsl_dspi_driver);
1109 
1110 MODULE_DESCRIPTION("Freescale DSPI Controller Driver");
1111 MODULE_LICENSE("GPL");
1112 MODULE_ALIAS("platform:" DRIVER_NAME);
1113