xref: /linux/drivers/spi/spi-rspi.c (revision c4ee0af3fa0dc65f690fc908f02b8355f9576ea0)
1 /*
2  * SH RSPI driver
3  *
4  * Copyright (C) 2012  Renesas Solutions Corp.
5  *
6  * Based on spi-sh.c:
7  * Copyright (C) 2011 Renesas Solutions Corp.
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; version 2 of the License.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
21  *
22  */
23 
24 #include <linux/module.h>
25 #include <linux/kernel.h>
26 #include <linux/sched.h>
27 #include <linux/errno.h>
28 #include <linux/list.h>
29 #include <linux/workqueue.h>
30 #include <linux/interrupt.h>
31 #include <linux/platform_device.h>
32 #include <linux/io.h>
33 #include <linux/clk.h>
34 #include <linux/dmaengine.h>
35 #include <linux/dma-mapping.h>
36 #include <linux/sh_dma.h>
37 #include <linux/spi/spi.h>
38 #include <linux/spi/rspi.h>
39 
40 #define RSPI_SPCR		0x00
41 #define RSPI_SSLP		0x01
42 #define RSPI_SPPCR		0x02
43 #define RSPI_SPSR		0x03
44 #define RSPI_SPDR		0x04
45 #define RSPI_SPSCR		0x08
46 #define RSPI_SPSSR		0x09
47 #define RSPI_SPBR		0x0a
48 #define RSPI_SPDCR		0x0b
49 #define RSPI_SPCKD		0x0c
50 #define RSPI_SSLND		0x0d
51 #define RSPI_SPND		0x0e
52 #define RSPI_SPCR2		0x0f
53 #define RSPI_SPCMD0		0x10
54 #define RSPI_SPCMD1		0x12
55 #define RSPI_SPCMD2		0x14
56 #define RSPI_SPCMD3		0x16
57 #define RSPI_SPCMD4		0x18
58 #define RSPI_SPCMD5		0x1a
59 #define RSPI_SPCMD6		0x1c
60 #define RSPI_SPCMD7		0x1e
61 
62 /*qspi only */
63 #define QSPI_SPBFCR		0x18
64 #define QSPI_SPBDCR		0x1a
65 #define QSPI_SPBMUL0		0x1c
66 #define QSPI_SPBMUL1		0x20
67 #define QSPI_SPBMUL2		0x24
68 #define QSPI_SPBMUL3		0x28
69 
70 /* SPCR */
71 #define SPCR_SPRIE		0x80
72 #define SPCR_SPE		0x40
73 #define SPCR_SPTIE		0x20
74 #define SPCR_SPEIE		0x10
75 #define SPCR_MSTR		0x08
76 #define SPCR_MODFEN		0x04
77 #define SPCR_TXMD		0x02
78 #define SPCR_SPMS		0x01
79 
80 /* SSLP */
81 #define SSLP_SSL1P		0x02
82 #define SSLP_SSL0P		0x01
83 
84 /* SPPCR */
85 #define SPPCR_MOIFE		0x20
86 #define SPPCR_MOIFV		0x10
87 #define SPPCR_SPOM		0x04
88 #define SPPCR_SPLP2		0x02
89 #define SPPCR_SPLP		0x01
90 
91 /* SPSR */
92 #define SPSR_SPRF		0x80
93 #define SPSR_SPTEF		0x20
94 #define SPSR_PERF		0x08
95 #define SPSR_MODF		0x04
96 #define SPSR_IDLNF		0x02
97 #define SPSR_OVRF		0x01
98 
99 /* SPSCR */
100 #define SPSCR_SPSLN_MASK	0x07
101 
102 /* SPSSR */
103 #define SPSSR_SPECM_MASK	0x70
104 #define SPSSR_SPCP_MASK		0x07
105 
106 /* SPDCR */
107 #define SPDCR_SPLW		0x20
108 #define SPDCR_SPRDTD		0x10
109 #define SPDCR_SLSEL1		0x08
110 #define SPDCR_SLSEL0		0x04
111 #define SPDCR_SLSEL_MASK	0x0c
112 #define SPDCR_SPFC1		0x02
113 #define SPDCR_SPFC0		0x01
114 
115 /* SPCKD */
116 #define SPCKD_SCKDL_MASK	0x07
117 
118 /* SSLND */
119 #define SSLND_SLNDL_MASK	0x07
120 
121 /* SPND */
122 #define SPND_SPNDL_MASK		0x07
123 
124 /* SPCR2 */
125 #define SPCR2_PTE		0x08
126 #define SPCR2_SPIE		0x04
127 #define SPCR2_SPOE		0x02
128 #define SPCR2_SPPE		0x01
129 
130 /* SPCMDn */
131 #define SPCMD_SCKDEN		0x8000
132 #define SPCMD_SLNDEN		0x4000
133 #define SPCMD_SPNDEN		0x2000
134 #define SPCMD_LSBF		0x1000
135 #define SPCMD_SPB_MASK		0x0f00
136 #define SPCMD_SPB_8_TO_16(bit)	(((bit - 1) << 8) & SPCMD_SPB_MASK)
137 #define SPCMD_SPB_8BIT		0x0000	/* qspi only */
138 #define SPCMD_SPB_16BIT		0x0100
139 #define SPCMD_SPB_20BIT		0x0000
140 #define SPCMD_SPB_24BIT		0x0100
141 #define SPCMD_SPB_32BIT		0x0200
142 #define SPCMD_SSLKP		0x0080
143 #define SPCMD_SSLA_MASK		0x0030
144 #define SPCMD_BRDV_MASK		0x000c
145 #define SPCMD_CPOL		0x0002
146 #define SPCMD_CPHA		0x0001
147 
148 /* SPBFCR */
149 #define SPBFCR_TXRST		0x80	/* qspi only */
150 #define SPBFCR_RXRST		0x40	/* qspi only */
151 
152 struct rspi_data {
153 	void __iomem *addr;
154 	u32 max_speed_hz;
155 	struct spi_master *master;
156 	struct list_head queue;
157 	struct work_struct ws;
158 	wait_queue_head_t wait;
159 	spinlock_t lock;
160 	struct clk *clk;
161 	unsigned char spsr;
162 	const struct spi_ops *ops;
163 
164 	/* for dmaengine */
165 	struct dma_chan *chan_tx;
166 	struct dma_chan *chan_rx;
167 	int irq;
168 
169 	unsigned dma_width_16bit:1;
170 	unsigned dma_callbacked:1;
171 };
172 
173 static void rspi_write8(struct rspi_data *rspi, u8 data, u16 offset)
174 {
175 	iowrite8(data, rspi->addr + offset);
176 }
177 
178 static void rspi_write16(struct rspi_data *rspi, u16 data, u16 offset)
179 {
180 	iowrite16(data, rspi->addr + offset);
181 }
182 
183 static void rspi_write32(struct rspi_data *rspi, u32 data, u16 offset)
184 {
185 	iowrite32(data, rspi->addr + offset);
186 }
187 
188 static u8 rspi_read8(struct rspi_data *rspi, u16 offset)
189 {
190 	return ioread8(rspi->addr + offset);
191 }
192 
193 static u16 rspi_read16(struct rspi_data *rspi, u16 offset)
194 {
195 	return ioread16(rspi->addr + offset);
196 }
197 
198 /* optional functions */
199 struct spi_ops {
200 	int (*set_config_register)(struct rspi_data *rspi, int access_size);
201 	int (*send_pio)(struct rspi_data *rspi, struct spi_message *mesg,
202 			struct spi_transfer *t);
203 	int (*receive_pio)(struct rspi_data *rspi, struct spi_message *mesg,
204 			   struct spi_transfer *t);
205 
206 };
207 
208 /*
209  * functions for RSPI
210  */
211 static int rspi_set_config_register(struct rspi_data *rspi, int access_size)
212 {
213 	int spbr;
214 
215 	/* Sets output mode(CMOS) and MOSI signal(from previous transfer) */
216 	rspi_write8(rspi, 0x00, RSPI_SPPCR);
217 
218 	/* Sets transfer bit rate */
219 	spbr = clk_get_rate(rspi->clk) / (2 * rspi->max_speed_hz) - 1;
220 	rspi_write8(rspi, clamp(spbr, 0, 255), RSPI_SPBR);
221 
222 	/* Sets number of frames to be used: 1 frame */
223 	rspi_write8(rspi, 0x00, RSPI_SPDCR);
224 
225 	/* Sets RSPCK, SSL, next-access delay value */
226 	rspi_write8(rspi, 0x00, RSPI_SPCKD);
227 	rspi_write8(rspi, 0x00, RSPI_SSLND);
228 	rspi_write8(rspi, 0x00, RSPI_SPND);
229 
230 	/* Sets parity, interrupt mask */
231 	rspi_write8(rspi, 0x00, RSPI_SPCR2);
232 
233 	/* Sets SPCMD */
234 	rspi_write16(rspi, SPCMD_SPB_8_TO_16(access_size) | SPCMD_SSLKP,
235 		     RSPI_SPCMD0);
236 
237 	/* Sets RSPI mode */
238 	rspi_write8(rspi, SPCR_MSTR, RSPI_SPCR);
239 
240 	return 0;
241 }
242 
243 /*
244  * functions for QSPI
245  */
246 static int qspi_set_config_register(struct rspi_data *rspi, int access_size)
247 {
248 	u16 spcmd;
249 	int spbr;
250 
251 	/* Sets output mode(CMOS) and MOSI signal(from previous transfer) */
252 	rspi_write8(rspi, 0x00, RSPI_SPPCR);
253 
254 	/* Sets transfer bit rate */
255 	spbr = clk_get_rate(rspi->clk) / (2 * rspi->max_speed_hz);
256 	rspi_write8(rspi, clamp(spbr, 0, 255), RSPI_SPBR);
257 
258 	/* Sets number of frames to be used: 1 frame */
259 	rspi_write8(rspi, 0x00, RSPI_SPDCR);
260 
261 	/* Sets RSPCK, SSL, next-access delay value */
262 	rspi_write8(rspi, 0x00, RSPI_SPCKD);
263 	rspi_write8(rspi, 0x00, RSPI_SSLND);
264 	rspi_write8(rspi, 0x00, RSPI_SPND);
265 
266 	/* Data Length Setting */
267 	if (access_size == 8)
268 		spcmd = SPCMD_SPB_8BIT;
269 	else if (access_size == 16)
270 		spcmd = SPCMD_SPB_16BIT;
271 	else if (access_size == 32)
272 		spcmd = SPCMD_SPB_32BIT;
273 
274 	spcmd |= SPCMD_SCKDEN | SPCMD_SLNDEN | SPCMD_SSLKP | SPCMD_SPNDEN;
275 
276 	/* Resets transfer data length */
277 	rspi_write32(rspi, 0, QSPI_SPBMUL0);
278 
279 	/* Resets transmit and receive buffer */
280 	rspi_write8(rspi, SPBFCR_TXRST | SPBFCR_RXRST, QSPI_SPBFCR);
281 	/* Sets buffer to allow normal operation */
282 	rspi_write8(rspi, 0x00, QSPI_SPBFCR);
283 
284 	/* Sets SPCMD */
285 	rspi_write16(rspi, spcmd, RSPI_SPCMD0);
286 
287 	/* Enables SPI function in a master mode */
288 	rspi_write8(rspi, SPCR_SPE | SPCR_MSTR, RSPI_SPCR);
289 
290 	return 0;
291 }
292 
293 #define set_config_register(spi, n) spi->ops->set_config_register(spi, n)
294 
295 static void rspi_enable_irq(struct rspi_data *rspi, u8 enable)
296 {
297 	rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) | enable, RSPI_SPCR);
298 }
299 
300 static void rspi_disable_irq(struct rspi_data *rspi, u8 disable)
301 {
302 	rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) & ~disable, RSPI_SPCR);
303 }
304 
305 static int rspi_wait_for_interrupt(struct rspi_data *rspi, u8 wait_mask,
306 				   u8 enable_bit)
307 {
308 	int ret;
309 
310 	rspi->spsr = rspi_read8(rspi, RSPI_SPSR);
311 	rspi_enable_irq(rspi, enable_bit);
312 	ret = wait_event_timeout(rspi->wait, rspi->spsr & wait_mask, HZ);
313 	if (ret == 0 && !(rspi->spsr & wait_mask))
314 		return -ETIMEDOUT;
315 
316 	return 0;
317 }
318 
319 static void rspi_assert_ssl(struct rspi_data *rspi)
320 {
321 	rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) | SPCR_SPE, RSPI_SPCR);
322 }
323 
324 static void rspi_negate_ssl(struct rspi_data *rspi)
325 {
326 	rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) & ~SPCR_SPE, RSPI_SPCR);
327 }
328 
329 static int rspi_send_pio(struct rspi_data *rspi, struct spi_message *mesg,
330 			 struct spi_transfer *t)
331 {
332 	int remain = t->len;
333 	u8 *data;
334 
335 	data = (u8 *)t->tx_buf;
336 	while (remain > 0) {
337 		rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) | SPCR_TXMD,
338 			    RSPI_SPCR);
339 
340 		if (rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE) < 0) {
341 			dev_err(&rspi->master->dev,
342 				"%s: tx empty timeout\n", __func__);
343 			return -ETIMEDOUT;
344 		}
345 
346 		rspi_write16(rspi, *data, RSPI_SPDR);
347 		data++;
348 		remain--;
349 	}
350 
351 	/* Waiting for the last transmition */
352 	rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE);
353 
354 	return 0;
355 }
356 
357 static int qspi_send_pio(struct rspi_data *rspi, struct spi_message *mesg,
358 			 struct spi_transfer *t)
359 {
360 	int remain = t->len;
361 	u8 *data;
362 
363 	rspi_write8(rspi, SPBFCR_TXRST, QSPI_SPBFCR);
364 	rspi_write8(rspi, 0x00, QSPI_SPBFCR);
365 
366 	data = (u8 *)t->tx_buf;
367 	while (remain > 0) {
368 
369 		if (rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE) < 0) {
370 			dev_err(&rspi->master->dev,
371 				"%s: tx empty timeout\n", __func__);
372 			return -ETIMEDOUT;
373 		}
374 		rspi_write8(rspi, *data++, RSPI_SPDR);
375 
376 		if (rspi_wait_for_interrupt(rspi, SPSR_SPRF, SPCR_SPRIE) < 0) {
377 			dev_err(&rspi->master->dev,
378 				"%s: receive timeout\n", __func__);
379 			return -ETIMEDOUT;
380 		}
381 		rspi_read8(rspi, RSPI_SPDR);
382 
383 		remain--;
384 	}
385 
386 	/* Waiting for the last transmition */
387 	rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE);
388 
389 	return 0;
390 }
391 
392 #define send_pio(spi, mesg, t) spi->ops->send_pio(spi, mesg, t)
393 
394 static void rspi_dma_complete(void *arg)
395 {
396 	struct rspi_data *rspi = arg;
397 
398 	rspi->dma_callbacked = 1;
399 	wake_up_interruptible(&rspi->wait);
400 }
401 
402 static int rspi_dma_map_sg(struct scatterlist *sg, void *buf, unsigned len,
403 			   struct dma_chan *chan,
404 			   enum dma_transfer_direction dir)
405 {
406 	sg_init_table(sg, 1);
407 	sg_set_buf(sg, buf, len);
408 	sg_dma_len(sg) = len;
409 	return dma_map_sg(chan->device->dev, sg, 1, dir);
410 }
411 
412 static void rspi_dma_unmap_sg(struct scatterlist *sg, struct dma_chan *chan,
413 			      enum dma_transfer_direction dir)
414 {
415 	dma_unmap_sg(chan->device->dev, sg, 1, dir);
416 }
417 
418 static void rspi_memory_to_8bit(void *buf, const void *data, unsigned len)
419 {
420 	u16 *dst = buf;
421 	const u8 *src = data;
422 
423 	while (len) {
424 		*dst++ = (u16)(*src++);
425 		len--;
426 	}
427 }
428 
429 static void rspi_memory_from_8bit(void *buf, const void *data, unsigned len)
430 {
431 	u8 *dst = buf;
432 	const u16 *src = data;
433 
434 	while (len) {
435 		*dst++ = (u8)*src++;
436 		len--;
437 	}
438 }
439 
440 static int rspi_send_dma(struct rspi_data *rspi, struct spi_transfer *t)
441 {
442 	struct scatterlist sg;
443 	void *buf = NULL;
444 	struct dma_async_tx_descriptor *desc;
445 	unsigned len;
446 	int ret = 0;
447 
448 	if (rspi->dma_width_16bit) {
449 		/*
450 		 * If DMAC bus width is 16-bit, the driver allocates a dummy
451 		 * buffer. And, the driver converts original data into the
452 		 * DMAC data as the following format:
453 		 *  original data: 1st byte, 2nd byte ...
454 		 *  DMAC data:     1st byte, dummy, 2nd byte, dummy ...
455 		 */
456 		len = t->len * 2;
457 		buf = kmalloc(len, GFP_KERNEL);
458 		if (!buf)
459 			return -ENOMEM;
460 		rspi_memory_to_8bit(buf, t->tx_buf, t->len);
461 	} else {
462 		len = t->len;
463 		buf = (void *)t->tx_buf;
464 	}
465 
466 	if (!rspi_dma_map_sg(&sg, buf, len, rspi->chan_tx, DMA_TO_DEVICE)) {
467 		ret = -EFAULT;
468 		goto end_nomap;
469 	}
470 	desc = dmaengine_prep_slave_sg(rspi->chan_tx, &sg, 1, DMA_TO_DEVICE,
471 				       DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
472 	if (!desc) {
473 		ret = -EIO;
474 		goto end;
475 	}
476 
477 	/*
478 	 * DMAC needs SPTIE, but if SPTIE is set, this IRQ routine will be
479 	 * called. So, this driver disables the IRQ while DMA transfer.
480 	 */
481 	disable_irq(rspi->irq);
482 
483 	rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) | SPCR_TXMD, RSPI_SPCR);
484 	rspi_enable_irq(rspi, SPCR_SPTIE);
485 	rspi->dma_callbacked = 0;
486 
487 	desc->callback = rspi_dma_complete;
488 	desc->callback_param = rspi;
489 	dmaengine_submit(desc);
490 	dma_async_issue_pending(rspi->chan_tx);
491 
492 	ret = wait_event_interruptible_timeout(rspi->wait,
493 					       rspi->dma_callbacked, HZ);
494 	if (ret > 0 && rspi->dma_callbacked)
495 		ret = 0;
496 	else if (!ret)
497 		ret = -ETIMEDOUT;
498 	rspi_disable_irq(rspi, SPCR_SPTIE);
499 
500 	enable_irq(rspi->irq);
501 
502 end:
503 	rspi_dma_unmap_sg(&sg, rspi->chan_tx, DMA_TO_DEVICE);
504 end_nomap:
505 	if (rspi->dma_width_16bit)
506 		kfree(buf);
507 
508 	return ret;
509 }
510 
511 static void rspi_receive_init(struct rspi_data *rspi)
512 {
513 	unsigned char spsr;
514 
515 	spsr = rspi_read8(rspi, RSPI_SPSR);
516 	if (spsr & SPSR_SPRF)
517 		rspi_read16(rspi, RSPI_SPDR);	/* dummy read */
518 	if (spsr & SPSR_OVRF)
519 		rspi_write8(rspi, rspi_read8(rspi, RSPI_SPSR) & ~SPSR_OVRF,
520 			    RSPI_SPCR);
521 }
522 
523 static int rspi_receive_pio(struct rspi_data *rspi, struct spi_message *mesg,
524 			    struct spi_transfer *t)
525 {
526 	int remain = t->len;
527 	u8 *data;
528 
529 	rspi_receive_init(rspi);
530 
531 	data = (u8 *)t->rx_buf;
532 	while (remain > 0) {
533 		rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) & ~SPCR_TXMD,
534 			    RSPI_SPCR);
535 
536 		if (rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE) < 0) {
537 			dev_err(&rspi->master->dev,
538 				"%s: tx empty timeout\n", __func__);
539 			return -ETIMEDOUT;
540 		}
541 		/* dummy write for generate clock */
542 		rspi_write16(rspi, 0x00, RSPI_SPDR);
543 
544 		if (rspi_wait_for_interrupt(rspi, SPSR_SPRF, SPCR_SPRIE) < 0) {
545 			dev_err(&rspi->master->dev,
546 				"%s: receive timeout\n", __func__);
547 			return -ETIMEDOUT;
548 		}
549 		/* SPDR allows 16 or 32-bit access only */
550 		*data = (u8)rspi_read16(rspi, RSPI_SPDR);
551 
552 		data++;
553 		remain--;
554 	}
555 
556 	return 0;
557 }
558 
559 static void qspi_receive_init(struct rspi_data *rspi)
560 {
561 	unsigned char spsr;
562 
563 	spsr = rspi_read8(rspi, RSPI_SPSR);
564 	if (spsr & SPSR_SPRF)
565 		rspi_read8(rspi, RSPI_SPDR);   /* dummy read */
566 	rspi_write8(rspi, SPBFCR_TXRST | SPBFCR_RXRST, QSPI_SPBFCR);
567 	rspi_write8(rspi, 0x00, QSPI_SPBFCR);
568 }
569 
570 static int qspi_receive_pio(struct rspi_data *rspi, struct spi_message *mesg,
571 			    struct spi_transfer *t)
572 {
573 	int remain = t->len;
574 	u8 *data;
575 
576 	qspi_receive_init(rspi);
577 
578 	data = (u8 *)t->rx_buf;
579 	while (remain > 0) {
580 
581 		if (rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE) < 0) {
582 			dev_err(&rspi->master->dev,
583 				"%s: tx empty timeout\n", __func__);
584 			return -ETIMEDOUT;
585 		}
586 		/* dummy write for generate clock */
587 		rspi_write8(rspi, 0x00, RSPI_SPDR);
588 
589 		if (rspi_wait_for_interrupt(rspi, SPSR_SPRF, SPCR_SPRIE) < 0) {
590 			dev_err(&rspi->master->dev,
591 				"%s: receive timeout\n", __func__);
592 			return -ETIMEDOUT;
593 		}
594 		/* SPDR allows 8, 16 or 32-bit access */
595 		*data++ = rspi_read8(rspi, RSPI_SPDR);
596 		remain--;
597 	}
598 
599 	return 0;
600 }
601 
602 #define receive_pio(spi, mesg, t) spi->ops->receive_pio(spi, mesg, t)
603 
604 static int rspi_receive_dma(struct rspi_data *rspi, struct spi_transfer *t)
605 {
606 	struct scatterlist sg, sg_dummy;
607 	void *dummy = NULL, *rx_buf = NULL;
608 	struct dma_async_tx_descriptor *desc, *desc_dummy;
609 	unsigned len;
610 	int ret = 0;
611 
612 	if (rspi->dma_width_16bit) {
613 		/*
614 		 * If DMAC bus width is 16-bit, the driver allocates a dummy
615 		 * buffer. And, finally the driver converts the DMAC data into
616 		 * actual data as the following format:
617 		 *  DMAC data:   1st byte, dummy, 2nd byte, dummy ...
618 		 *  actual data: 1st byte, 2nd byte ...
619 		 */
620 		len = t->len * 2;
621 		rx_buf = kmalloc(len, GFP_KERNEL);
622 		if (!rx_buf)
623 			return -ENOMEM;
624 	 } else {
625 		len = t->len;
626 		rx_buf = t->rx_buf;
627 	}
628 
629 	/* prepare dummy transfer to generate SPI clocks */
630 	dummy = kzalloc(len, GFP_KERNEL);
631 	if (!dummy) {
632 		ret = -ENOMEM;
633 		goto end_nomap;
634 	}
635 	if (!rspi_dma_map_sg(&sg_dummy, dummy, len, rspi->chan_tx,
636 			     DMA_TO_DEVICE)) {
637 		ret = -EFAULT;
638 		goto end_nomap;
639 	}
640 	desc_dummy = dmaengine_prep_slave_sg(rspi->chan_tx, &sg_dummy, 1,
641 			DMA_TO_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
642 	if (!desc_dummy) {
643 		ret = -EIO;
644 		goto end_dummy_mapped;
645 	}
646 
647 	/* prepare receive transfer */
648 	if (!rspi_dma_map_sg(&sg, rx_buf, len, rspi->chan_rx,
649 			     DMA_FROM_DEVICE)) {
650 		ret = -EFAULT;
651 		goto end_dummy_mapped;
652 
653 	}
654 	desc = dmaengine_prep_slave_sg(rspi->chan_rx, &sg, 1, DMA_FROM_DEVICE,
655 				       DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
656 	if (!desc) {
657 		ret = -EIO;
658 		goto end;
659 	}
660 
661 	rspi_receive_init(rspi);
662 
663 	/*
664 	 * DMAC needs SPTIE, but if SPTIE is set, this IRQ routine will be
665 	 * called. So, this driver disables the IRQ while DMA transfer.
666 	 */
667 	disable_irq(rspi->irq);
668 
669 	rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) & ~SPCR_TXMD, RSPI_SPCR);
670 	rspi_enable_irq(rspi, SPCR_SPTIE | SPCR_SPRIE);
671 	rspi->dma_callbacked = 0;
672 
673 	desc->callback = rspi_dma_complete;
674 	desc->callback_param = rspi;
675 	dmaengine_submit(desc);
676 	dma_async_issue_pending(rspi->chan_rx);
677 
678 	desc_dummy->callback = NULL;	/* No callback */
679 	dmaengine_submit(desc_dummy);
680 	dma_async_issue_pending(rspi->chan_tx);
681 
682 	ret = wait_event_interruptible_timeout(rspi->wait,
683 					       rspi->dma_callbacked, HZ);
684 	if (ret > 0 && rspi->dma_callbacked)
685 		ret = 0;
686 	else if (!ret)
687 		ret = -ETIMEDOUT;
688 	rspi_disable_irq(rspi, SPCR_SPTIE | SPCR_SPRIE);
689 
690 	enable_irq(rspi->irq);
691 
692 end:
693 	rspi_dma_unmap_sg(&sg, rspi->chan_rx, DMA_FROM_DEVICE);
694 end_dummy_mapped:
695 	rspi_dma_unmap_sg(&sg_dummy, rspi->chan_tx, DMA_TO_DEVICE);
696 end_nomap:
697 	if (rspi->dma_width_16bit) {
698 		if (!ret)
699 			rspi_memory_from_8bit(t->rx_buf, rx_buf, t->len);
700 		kfree(rx_buf);
701 	}
702 	kfree(dummy);
703 
704 	return ret;
705 }
706 
707 static int rspi_is_dma(struct rspi_data *rspi, struct spi_transfer *t)
708 {
709 	if (t->tx_buf && rspi->chan_tx)
710 		return 1;
711 	/* If the module receives data by DMAC, it also needs TX DMAC */
712 	if (t->rx_buf && rspi->chan_tx && rspi->chan_rx)
713 		return 1;
714 
715 	return 0;
716 }
717 
718 static void rspi_work(struct work_struct *work)
719 {
720 	struct rspi_data *rspi = container_of(work, struct rspi_data, ws);
721 	struct spi_message *mesg;
722 	struct spi_transfer *t;
723 	unsigned long flags;
724 	int ret;
725 
726 	while (1) {
727 		spin_lock_irqsave(&rspi->lock, flags);
728 		if (list_empty(&rspi->queue)) {
729 			spin_unlock_irqrestore(&rspi->lock, flags);
730 			break;
731 		}
732 		mesg = list_entry(rspi->queue.next, struct spi_message, queue);
733 		list_del_init(&mesg->queue);
734 		spin_unlock_irqrestore(&rspi->lock, flags);
735 
736 		rspi_assert_ssl(rspi);
737 
738 		list_for_each_entry(t, &mesg->transfers, transfer_list) {
739 			if (t->tx_buf) {
740 				if (rspi_is_dma(rspi, t))
741 					ret = rspi_send_dma(rspi, t);
742 				else
743 					ret = send_pio(rspi, mesg, t);
744 				if (ret < 0)
745 					goto error;
746 			}
747 			if (t->rx_buf) {
748 				if (rspi_is_dma(rspi, t))
749 					ret = rspi_receive_dma(rspi, t);
750 				else
751 					ret = receive_pio(rspi, mesg, t);
752 				if (ret < 0)
753 					goto error;
754 			}
755 			mesg->actual_length += t->len;
756 		}
757 		rspi_negate_ssl(rspi);
758 
759 		mesg->status = 0;
760 		mesg->complete(mesg->context);
761 	}
762 
763 	return;
764 
765 error:
766 	mesg->status = ret;
767 	mesg->complete(mesg->context);
768 }
769 
770 static int rspi_setup(struct spi_device *spi)
771 {
772 	struct rspi_data *rspi = spi_master_get_devdata(spi->master);
773 
774 	if (!spi->bits_per_word)
775 		spi->bits_per_word = 8;
776 	rspi->max_speed_hz = spi->max_speed_hz;
777 
778 	set_config_register(rspi, 8);
779 
780 	return 0;
781 }
782 
783 static int rspi_transfer(struct spi_device *spi, struct spi_message *mesg)
784 {
785 	struct rspi_data *rspi = spi_master_get_devdata(spi->master);
786 	unsigned long flags;
787 
788 	mesg->actual_length = 0;
789 	mesg->status = -EINPROGRESS;
790 
791 	spin_lock_irqsave(&rspi->lock, flags);
792 	list_add_tail(&mesg->queue, &rspi->queue);
793 	schedule_work(&rspi->ws);
794 	spin_unlock_irqrestore(&rspi->lock, flags);
795 
796 	return 0;
797 }
798 
799 static void rspi_cleanup(struct spi_device *spi)
800 {
801 }
802 
803 static irqreturn_t rspi_irq(int irq, void *_sr)
804 {
805 	struct rspi_data *rspi = (struct rspi_data *)_sr;
806 	unsigned long spsr;
807 	irqreturn_t ret = IRQ_NONE;
808 	unsigned char disable_irq = 0;
809 
810 	rspi->spsr = spsr = rspi_read8(rspi, RSPI_SPSR);
811 	if (spsr & SPSR_SPRF)
812 		disable_irq |= SPCR_SPRIE;
813 	if (spsr & SPSR_SPTEF)
814 		disable_irq |= SPCR_SPTIE;
815 
816 	if (disable_irq) {
817 		ret = IRQ_HANDLED;
818 		rspi_disable_irq(rspi, disable_irq);
819 		wake_up(&rspi->wait);
820 	}
821 
822 	return ret;
823 }
824 
825 static int rspi_request_dma(struct rspi_data *rspi,
826 				      struct platform_device *pdev)
827 {
828 	struct rspi_plat_data *rspi_pd = dev_get_platdata(&pdev->dev);
829 	struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
830 	dma_cap_mask_t mask;
831 	struct dma_slave_config cfg;
832 	int ret;
833 
834 	if (!res || !rspi_pd)
835 		return 0;	/* The driver assumes no error. */
836 
837 	rspi->dma_width_16bit = rspi_pd->dma_width_16bit;
838 
839 	/* If the module receives data by DMAC, it also needs TX DMAC */
840 	if (rspi_pd->dma_rx_id && rspi_pd->dma_tx_id) {
841 		dma_cap_zero(mask);
842 		dma_cap_set(DMA_SLAVE, mask);
843 		rspi->chan_rx = dma_request_channel(mask, shdma_chan_filter,
844 						    (void *)rspi_pd->dma_rx_id);
845 		if (rspi->chan_rx) {
846 			cfg.slave_id = rspi_pd->dma_rx_id;
847 			cfg.direction = DMA_DEV_TO_MEM;
848 			cfg.dst_addr = 0;
849 			cfg.src_addr = res->start + RSPI_SPDR;
850 			ret = dmaengine_slave_config(rspi->chan_rx, &cfg);
851 			if (!ret)
852 				dev_info(&pdev->dev, "Use DMA when rx.\n");
853 			else
854 				return ret;
855 		}
856 	}
857 	if (rspi_pd->dma_tx_id) {
858 		dma_cap_zero(mask);
859 		dma_cap_set(DMA_SLAVE, mask);
860 		rspi->chan_tx = dma_request_channel(mask, shdma_chan_filter,
861 						    (void *)rspi_pd->dma_tx_id);
862 		if (rspi->chan_tx) {
863 			cfg.slave_id = rspi_pd->dma_tx_id;
864 			cfg.direction = DMA_MEM_TO_DEV;
865 			cfg.dst_addr = res->start + RSPI_SPDR;
866 			cfg.src_addr = 0;
867 			ret = dmaengine_slave_config(rspi->chan_tx, &cfg);
868 			if (!ret)
869 				dev_info(&pdev->dev, "Use DMA when tx\n");
870 			else
871 				return ret;
872 		}
873 	}
874 
875 	return 0;
876 }
877 
878 static void rspi_release_dma(struct rspi_data *rspi)
879 {
880 	if (rspi->chan_tx)
881 		dma_release_channel(rspi->chan_tx);
882 	if (rspi->chan_rx)
883 		dma_release_channel(rspi->chan_rx);
884 }
885 
886 static int rspi_remove(struct platform_device *pdev)
887 {
888 	struct rspi_data *rspi = platform_get_drvdata(pdev);
889 
890 	spi_unregister_master(rspi->master);
891 	rspi_release_dma(rspi);
892 	free_irq(platform_get_irq(pdev, 0), rspi);
893 	clk_put(rspi->clk);
894 	iounmap(rspi->addr);
895 
896 	return 0;
897 }
898 
899 static int rspi_probe(struct platform_device *pdev)
900 {
901 	struct resource *res;
902 	struct spi_master *master;
903 	struct rspi_data *rspi;
904 	int ret, irq;
905 	char clk_name[16];
906 	struct rspi_plat_data *rspi_pd = pdev->dev.platform_data;
907 	const struct spi_ops *ops;
908 	const struct platform_device_id *id_entry = pdev->id_entry;
909 
910 	ops = (struct spi_ops *)id_entry->driver_data;
911 	/* ops parameter check */
912 	if (!ops->set_config_register) {
913 		dev_err(&pdev->dev, "there is no set_config_register\n");
914 		return -ENODEV;
915 	}
916 	/* get base addr */
917 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
918 	if (unlikely(res == NULL)) {
919 		dev_err(&pdev->dev, "invalid resource\n");
920 		return -EINVAL;
921 	}
922 
923 	irq = platform_get_irq(pdev, 0);
924 	if (irq < 0) {
925 		dev_err(&pdev->dev, "platform_get_irq error\n");
926 		return -ENODEV;
927 	}
928 
929 	master = spi_alloc_master(&pdev->dev, sizeof(struct rspi_data));
930 	if (master == NULL) {
931 		dev_err(&pdev->dev, "spi_alloc_master error.\n");
932 		return -ENOMEM;
933 	}
934 
935 	rspi = spi_master_get_devdata(master);
936 	platform_set_drvdata(pdev, rspi);
937 	rspi->ops = ops;
938 	rspi->master = master;
939 	rspi->addr = ioremap(res->start, resource_size(res));
940 	if (rspi->addr == NULL) {
941 		dev_err(&pdev->dev, "ioremap error.\n");
942 		ret = -ENOMEM;
943 		goto error1;
944 	}
945 
946 	snprintf(clk_name, sizeof(clk_name), "%s%d", id_entry->name, pdev->id);
947 	rspi->clk = clk_get(&pdev->dev, clk_name);
948 	if (IS_ERR(rspi->clk)) {
949 		dev_err(&pdev->dev, "cannot get clock\n");
950 		ret = PTR_ERR(rspi->clk);
951 		goto error2;
952 	}
953 	clk_enable(rspi->clk);
954 
955 	INIT_LIST_HEAD(&rspi->queue);
956 	spin_lock_init(&rspi->lock);
957 	INIT_WORK(&rspi->ws, rspi_work);
958 	init_waitqueue_head(&rspi->wait);
959 
960 	master->num_chipselect = rspi_pd->num_chipselect;
961 	if (!master->num_chipselect)
962 		master->num_chipselect = 2; /* default */
963 
964 	master->bus_num = pdev->id;
965 	master->setup = rspi_setup;
966 	master->transfer = rspi_transfer;
967 	master->cleanup = rspi_cleanup;
968 
969 	ret = request_irq(irq, rspi_irq, 0, dev_name(&pdev->dev), rspi);
970 	if (ret < 0) {
971 		dev_err(&pdev->dev, "request_irq error\n");
972 		goto error3;
973 	}
974 
975 	rspi->irq = irq;
976 	ret = rspi_request_dma(rspi, pdev);
977 	if (ret < 0) {
978 		dev_err(&pdev->dev, "rspi_request_dma failed.\n");
979 		goto error4;
980 	}
981 
982 	ret = spi_register_master(master);
983 	if (ret < 0) {
984 		dev_err(&pdev->dev, "spi_register_master error.\n");
985 		goto error4;
986 	}
987 
988 	dev_info(&pdev->dev, "probed\n");
989 
990 	return 0;
991 
992 error4:
993 	rspi_release_dma(rspi);
994 	free_irq(irq, rspi);
995 error3:
996 	clk_put(rspi->clk);
997 error2:
998 	iounmap(rspi->addr);
999 error1:
1000 	spi_master_put(master);
1001 
1002 	return ret;
1003 }
1004 
1005 static struct spi_ops rspi_ops = {
1006 	.set_config_register =		rspi_set_config_register,
1007 	.send_pio =			rspi_send_pio,
1008 	.receive_pio =			rspi_receive_pio,
1009 };
1010 
1011 static struct spi_ops qspi_ops = {
1012 	.set_config_register =		qspi_set_config_register,
1013 	.send_pio =			qspi_send_pio,
1014 	.receive_pio =			qspi_receive_pio,
1015 };
1016 
1017 static struct platform_device_id spi_driver_ids[] = {
1018 	{ "rspi",	(kernel_ulong_t)&rspi_ops },
1019 	{ "qspi",	(kernel_ulong_t)&qspi_ops },
1020 	{},
1021 };
1022 
1023 MODULE_DEVICE_TABLE(platform, spi_driver_ids);
1024 
1025 static struct platform_driver rspi_driver = {
1026 	.probe =	rspi_probe,
1027 	.remove =	rspi_remove,
1028 	.id_table =	spi_driver_ids,
1029 	.driver		= {
1030 		.name = "renesas_spi",
1031 		.owner	= THIS_MODULE,
1032 	},
1033 };
1034 module_platform_driver(rspi_driver);
1035 
1036 MODULE_DESCRIPTION("Renesas RSPI bus driver");
1037 MODULE_LICENSE("GPL v2");
1038 MODULE_AUTHOR("Yoshihiro Shimoda");
1039 MODULE_ALIAS("platform:rspi");
1040