xref: /linux/drivers/spi/spi-omap2-mcspi.c (revision 02adc1490e6d8681cc81057ed86d123d0240909b)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * OMAP2 McSPI controller driver
4  *
5  * Copyright (C) 2005, 2006 Nokia Corporation
6  * Author:	Samuel Ortiz <samuel.ortiz@nokia.com> and
7  *		Juha Yrjola <juha.yrjola@nokia.com>
8  */
9 
10 #include <linux/kernel.h>
11 #include <linux/interrupt.h>
12 #include <linux/module.h>
13 #include <linux/device.h>
14 #include <linux/delay.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/dmaengine.h>
17 #include <linux/pinctrl/consumer.h>
18 #include <linux/platform_device.h>
19 #include <linux/err.h>
20 #include <linux/clk.h>
21 #include <linux/io.h>
22 #include <linux/slab.h>
23 #include <linux/pm_runtime.h>
24 #include <linux/of.h>
25 #include <linux/of_device.h>
26 #include <linux/gcd.h>
27 
28 #include <linux/spi/spi.h>
29 
30 #include "internals.h"
31 
32 #include <linux/platform_data/spi-omap2-mcspi.h>
33 
34 #define OMAP2_MCSPI_MAX_FREQ		48000000
35 #define OMAP2_MCSPI_MAX_DIVIDER		4096
36 #define OMAP2_MCSPI_MAX_FIFODEPTH	64
37 #define OMAP2_MCSPI_MAX_FIFOWCNT	0xFFFF
38 #define SPI_AUTOSUSPEND_TIMEOUT		2000
39 
40 #define OMAP2_MCSPI_REVISION		0x00
41 #define OMAP2_MCSPI_SYSSTATUS		0x14
42 #define OMAP2_MCSPI_IRQSTATUS		0x18
43 #define OMAP2_MCSPI_IRQENABLE		0x1c
44 #define OMAP2_MCSPI_WAKEUPENABLE	0x20
45 #define OMAP2_MCSPI_SYST		0x24
46 #define OMAP2_MCSPI_MODULCTRL		0x28
47 #define OMAP2_MCSPI_XFERLEVEL		0x7c
48 
49 /* per-channel banks, 0x14 bytes each, first is: */
50 #define OMAP2_MCSPI_CHCONF0		0x2c
51 #define OMAP2_MCSPI_CHSTAT0		0x30
52 #define OMAP2_MCSPI_CHCTRL0		0x34
53 #define OMAP2_MCSPI_TX0			0x38
54 #define OMAP2_MCSPI_RX0			0x3c
55 
56 /* per-register bitmasks: */
57 #define OMAP2_MCSPI_IRQSTATUS_EOW	BIT(17)
58 
59 #define OMAP2_MCSPI_MODULCTRL_SINGLE	BIT(0)
60 #define OMAP2_MCSPI_MODULCTRL_MS	BIT(2)
61 #define OMAP2_MCSPI_MODULCTRL_STEST	BIT(3)
62 
63 #define OMAP2_MCSPI_CHCONF_PHA		BIT(0)
64 #define OMAP2_MCSPI_CHCONF_POL		BIT(1)
65 #define OMAP2_MCSPI_CHCONF_CLKD_MASK	(0x0f << 2)
66 #define OMAP2_MCSPI_CHCONF_EPOL		BIT(6)
67 #define OMAP2_MCSPI_CHCONF_WL_MASK	(0x1f << 7)
68 #define OMAP2_MCSPI_CHCONF_TRM_RX_ONLY	BIT(12)
69 #define OMAP2_MCSPI_CHCONF_TRM_TX_ONLY	BIT(13)
70 #define OMAP2_MCSPI_CHCONF_TRM_MASK	(0x03 << 12)
71 #define OMAP2_MCSPI_CHCONF_DMAW		BIT(14)
72 #define OMAP2_MCSPI_CHCONF_DMAR		BIT(15)
73 #define OMAP2_MCSPI_CHCONF_DPE0		BIT(16)
74 #define OMAP2_MCSPI_CHCONF_DPE1		BIT(17)
75 #define OMAP2_MCSPI_CHCONF_IS		BIT(18)
76 #define OMAP2_MCSPI_CHCONF_TURBO	BIT(19)
77 #define OMAP2_MCSPI_CHCONF_FORCE	BIT(20)
78 #define OMAP2_MCSPI_CHCONF_FFET		BIT(27)
79 #define OMAP2_MCSPI_CHCONF_FFER		BIT(28)
80 #define OMAP2_MCSPI_CHCONF_CLKG		BIT(29)
81 
82 #define OMAP2_MCSPI_CHSTAT_RXS		BIT(0)
83 #define OMAP2_MCSPI_CHSTAT_TXS		BIT(1)
84 #define OMAP2_MCSPI_CHSTAT_EOT		BIT(2)
85 #define OMAP2_MCSPI_CHSTAT_TXFFE	BIT(3)
86 
87 #define OMAP2_MCSPI_CHCTRL_EN		BIT(0)
88 #define OMAP2_MCSPI_CHCTRL_EXTCLK_MASK	(0xff << 8)
89 
90 #define OMAP2_MCSPI_WAKEUPENABLE_WKEN	BIT(0)
91 
92 /* We have 2 DMA channels per CS, one for RX and one for TX */
93 struct omap2_mcspi_dma {
94 	struct dma_chan *dma_tx;
95 	struct dma_chan *dma_rx;
96 
97 	struct completion dma_tx_completion;
98 	struct completion dma_rx_completion;
99 
100 	char dma_rx_ch_name[14];
101 	char dma_tx_ch_name[14];
102 };
103 
104 /* use PIO for small transfers, avoiding DMA setup/teardown overhead and
105  * cache operations; better heuristics consider wordsize and bitrate.
106  */
107 #define DMA_MIN_BYTES			160
108 
109 
110 /*
111  * Used for context save and restore, structure members to be updated whenever
112  * corresponding registers are modified.
113  */
114 struct omap2_mcspi_regs {
115 	u32 modulctrl;
116 	u32 wakeupenable;
117 	struct list_head cs;
118 };
119 
120 struct omap2_mcspi {
121 	struct completion	txdone;
122 	struct spi_controller	*ctlr;
123 	/* Virtual base address of the controller */
124 	void __iomem		*base;
125 	unsigned long		phys;
126 	/* SPI1 has 4 channels, while SPI2 has 2 */
127 	struct omap2_mcspi_dma	*dma_channels;
128 	struct device		*dev;
129 	struct omap2_mcspi_regs ctx;
130 	struct clk		*ref_clk;
131 	int			fifo_depth;
132 	bool			target_aborted;
133 	unsigned int		pin_dir:1;
134 	size_t			max_xfer_len;
135 	u32			ref_clk_hz;
136 	bool			use_multi_mode;
137 	bool			last_msg_kept_cs;
138 };
139 
140 struct omap2_mcspi_cs {
141 	void __iomem		*base;
142 	unsigned long		phys;
143 	int			word_len;
144 	u16			mode;
145 	struct list_head	node;
146 	/* Context save and restore shadow register */
147 	u32			chconf0, chctrl0;
148 };
149 
mcspi_write_reg(struct spi_controller * ctlr,int idx,u32 val)150 static inline void mcspi_write_reg(struct spi_controller *ctlr,
151 		int idx, u32 val)
152 {
153 	struct omap2_mcspi *mcspi = spi_controller_get_devdata(ctlr);
154 
155 	writel_relaxed(val, mcspi->base + idx);
156 }
157 
mcspi_read_reg(struct spi_controller * ctlr,int idx)158 static inline u32 mcspi_read_reg(struct spi_controller *ctlr, int idx)
159 {
160 	struct omap2_mcspi *mcspi = spi_controller_get_devdata(ctlr);
161 
162 	return readl_relaxed(mcspi->base + idx);
163 }
164 
mcspi_write_cs_reg(const struct spi_device * spi,int idx,u32 val)165 static inline void mcspi_write_cs_reg(const struct spi_device *spi,
166 		int idx, u32 val)
167 {
168 	struct omap2_mcspi_cs	*cs = spi->controller_state;
169 
170 	writel_relaxed(val, cs->base +  idx);
171 }
172 
mcspi_read_cs_reg(const struct spi_device * spi,int idx)173 static inline u32 mcspi_read_cs_reg(const struct spi_device *spi, int idx)
174 {
175 	struct omap2_mcspi_cs	*cs = spi->controller_state;
176 
177 	return readl_relaxed(cs->base + idx);
178 }
179 
mcspi_cached_chconf0(const struct spi_device * spi)180 static inline u32 mcspi_cached_chconf0(const struct spi_device *spi)
181 {
182 	struct omap2_mcspi_cs *cs = spi->controller_state;
183 
184 	return cs->chconf0;
185 }
186 
mcspi_write_chconf0(const struct spi_device * spi,u32 val)187 static inline void mcspi_write_chconf0(const struct spi_device *spi, u32 val)
188 {
189 	struct omap2_mcspi_cs *cs = spi->controller_state;
190 
191 	cs->chconf0 = val;
192 	mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCONF0, val);
193 	mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCONF0);
194 }
195 
mcspi_bytes_per_word(int word_len)196 static inline int mcspi_bytes_per_word(int word_len)
197 {
198 	if (word_len <= 8)
199 		return 1;
200 	else if (word_len <= 16)
201 		return 2;
202 	else /* word_len <= 32 */
203 		return 4;
204 }
205 
omap2_mcspi_set_dma_req(const struct spi_device * spi,int is_read,int enable)206 static void omap2_mcspi_set_dma_req(const struct spi_device *spi,
207 		int is_read, int enable)
208 {
209 	u32 l, rw;
210 
211 	l = mcspi_cached_chconf0(spi);
212 
213 	if (is_read) /* 1 is read, 0 write */
214 		rw = OMAP2_MCSPI_CHCONF_DMAR;
215 	else
216 		rw = OMAP2_MCSPI_CHCONF_DMAW;
217 
218 	if (enable)
219 		l |= rw;
220 	else
221 		l &= ~rw;
222 
223 	mcspi_write_chconf0(spi, l);
224 }
225 
omap2_mcspi_set_enable(const struct spi_device * spi,int enable)226 static void omap2_mcspi_set_enable(const struct spi_device *spi, int enable)
227 {
228 	struct omap2_mcspi_cs *cs = spi->controller_state;
229 	u32 l;
230 
231 	l = cs->chctrl0;
232 	if (enable)
233 		l |= OMAP2_MCSPI_CHCTRL_EN;
234 	else
235 		l &= ~OMAP2_MCSPI_CHCTRL_EN;
236 	cs->chctrl0 = l;
237 	mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCTRL0, cs->chctrl0);
238 	/* Flash post-writes */
239 	mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCTRL0);
240 }
241 
omap2_mcspi_set_cs(struct spi_device * spi,bool enable)242 static void omap2_mcspi_set_cs(struct spi_device *spi, bool enable)
243 {
244 	struct omap2_mcspi *mcspi = spi_controller_get_devdata(spi->controller);
245 	u32 l;
246 
247 	/* The controller handles the inverted chip selects
248 	 * using the OMAP2_MCSPI_CHCONF_EPOL bit so revert
249 	 * the inversion from the core spi_set_cs function.
250 	 */
251 	if (spi->mode & SPI_CS_HIGH)
252 		enable = !enable;
253 
254 	if (spi->controller_state) {
255 		int err = pm_runtime_resume_and_get(mcspi->dev);
256 		if (err < 0) {
257 			dev_err(mcspi->dev, "failed to get sync: %d\n", err);
258 			return;
259 		}
260 
261 		l = mcspi_cached_chconf0(spi);
262 
263 		/* Only enable chip select manually if single mode is used */
264 		if (mcspi->use_multi_mode) {
265 			l &= ~OMAP2_MCSPI_CHCONF_FORCE;
266 		} else {
267 			if (enable)
268 				l &= ~OMAP2_MCSPI_CHCONF_FORCE;
269 			else
270 				l |= OMAP2_MCSPI_CHCONF_FORCE;
271 		}
272 
273 		mcspi_write_chconf0(spi, l);
274 
275 		pm_runtime_mark_last_busy(mcspi->dev);
276 		pm_runtime_put_autosuspend(mcspi->dev);
277 	}
278 }
279 
omap2_mcspi_set_mode(struct spi_controller * ctlr)280 static void omap2_mcspi_set_mode(struct spi_controller *ctlr)
281 {
282 	struct omap2_mcspi	*mcspi = spi_controller_get_devdata(ctlr);
283 	struct omap2_mcspi_regs	*ctx = &mcspi->ctx;
284 	u32 l;
285 
286 	/*
287 	 * Choose host or target mode
288 	 */
289 	l = mcspi_read_reg(ctlr, OMAP2_MCSPI_MODULCTRL);
290 	l &= ~(OMAP2_MCSPI_MODULCTRL_STEST);
291 	if (spi_controller_is_target(ctlr)) {
292 		l |= (OMAP2_MCSPI_MODULCTRL_MS);
293 	} else {
294 		l &= ~(OMAP2_MCSPI_MODULCTRL_MS);
295 
296 		/* Enable single mode if needed */
297 		if (mcspi->use_multi_mode)
298 			l &= ~OMAP2_MCSPI_MODULCTRL_SINGLE;
299 		else
300 			l |= OMAP2_MCSPI_MODULCTRL_SINGLE;
301 	}
302 	mcspi_write_reg(ctlr, OMAP2_MCSPI_MODULCTRL, l);
303 
304 	ctx->modulctrl = l;
305 }
306 
omap2_mcspi_set_fifo(const struct spi_device * spi,struct spi_transfer * t,int enable)307 static void omap2_mcspi_set_fifo(const struct spi_device *spi,
308 				struct spi_transfer *t, int enable)
309 {
310 	struct spi_controller *ctlr = spi->controller;
311 	struct omap2_mcspi_cs *cs = spi->controller_state;
312 	struct omap2_mcspi *mcspi;
313 	unsigned int wcnt;
314 	int max_fifo_depth, bytes_per_word;
315 	u32 chconf, xferlevel;
316 
317 	mcspi = spi_controller_get_devdata(ctlr);
318 
319 	chconf = mcspi_cached_chconf0(spi);
320 	if (enable) {
321 		bytes_per_word = mcspi_bytes_per_word(cs->word_len);
322 		if (t->len % bytes_per_word != 0)
323 			goto disable_fifo;
324 
325 		if (t->rx_buf != NULL && t->tx_buf != NULL)
326 			max_fifo_depth = OMAP2_MCSPI_MAX_FIFODEPTH / 2;
327 		else
328 			max_fifo_depth = OMAP2_MCSPI_MAX_FIFODEPTH;
329 
330 		wcnt = t->len / bytes_per_word;
331 		if (wcnt > OMAP2_MCSPI_MAX_FIFOWCNT)
332 			goto disable_fifo;
333 
334 		xferlevel = wcnt << 16;
335 		if (t->rx_buf != NULL) {
336 			chconf |= OMAP2_MCSPI_CHCONF_FFER;
337 			xferlevel |= (bytes_per_word - 1) << 8;
338 		}
339 
340 		if (t->tx_buf != NULL) {
341 			chconf |= OMAP2_MCSPI_CHCONF_FFET;
342 			xferlevel |= bytes_per_word - 1;
343 		}
344 
345 		mcspi_write_reg(ctlr, OMAP2_MCSPI_XFERLEVEL, xferlevel);
346 		mcspi_write_chconf0(spi, chconf);
347 		mcspi->fifo_depth = max_fifo_depth;
348 
349 		return;
350 	}
351 
352 disable_fifo:
353 	if (t->rx_buf != NULL)
354 		chconf &= ~OMAP2_MCSPI_CHCONF_FFER;
355 
356 	if (t->tx_buf != NULL)
357 		chconf &= ~OMAP2_MCSPI_CHCONF_FFET;
358 
359 	mcspi_write_chconf0(spi, chconf);
360 	mcspi->fifo_depth = 0;
361 }
362 
mcspi_wait_for_reg_bit(void __iomem * reg,unsigned long bit)363 static int mcspi_wait_for_reg_bit(void __iomem *reg, unsigned long bit)
364 {
365 	unsigned long timeout;
366 
367 	timeout = jiffies + msecs_to_jiffies(1000);
368 	while (!(readl_relaxed(reg) & bit)) {
369 		if (time_after(jiffies, timeout)) {
370 			if (!(readl_relaxed(reg) & bit))
371 				return -ETIMEDOUT;
372 			else
373 				return 0;
374 		}
375 		cpu_relax();
376 	}
377 	return 0;
378 }
379 
mcspi_wait_for_completion(struct omap2_mcspi * mcspi,struct completion * x)380 static int mcspi_wait_for_completion(struct  omap2_mcspi *mcspi,
381 				     struct completion *x)
382 {
383 	if (spi_controller_is_target(mcspi->ctlr)) {
384 		if (wait_for_completion_interruptible(x) ||
385 		    mcspi->target_aborted)
386 			return -EINTR;
387 	} else {
388 		wait_for_completion(x);
389 	}
390 
391 	return 0;
392 }
393 
omap2_mcspi_rx_callback(void * data)394 static void omap2_mcspi_rx_callback(void *data)
395 {
396 	struct spi_device *spi = data;
397 	struct omap2_mcspi *mcspi = spi_controller_get_devdata(spi->controller);
398 	struct omap2_mcspi_dma *mcspi_dma = &mcspi->dma_channels[spi_get_chipselect(spi, 0)];
399 
400 	/* We must disable the DMA RX request */
401 	omap2_mcspi_set_dma_req(spi, 1, 0);
402 
403 	complete(&mcspi_dma->dma_rx_completion);
404 }
405 
omap2_mcspi_tx_callback(void * data)406 static void omap2_mcspi_tx_callback(void *data)
407 {
408 	struct spi_device *spi = data;
409 	struct omap2_mcspi *mcspi = spi_controller_get_devdata(spi->controller);
410 	struct omap2_mcspi_dma *mcspi_dma = &mcspi->dma_channels[spi_get_chipselect(spi, 0)];
411 
412 	/* We must disable the DMA TX request */
413 	omap2_mcspi_set_dma_req(spi, 0, 0);
414 
415 	complete(&mcspi_dma->dma_tx_completion);
416 }
417 
omap2_mcspi_tx_dma(struct spi_device * spi,struct spi_transfer * xfer,struct dma_slave_config cfg)418 static void omap2_mcspi_tx_dma(struct spi_device *spi,
419 				struct spi_transfer *xfer,
420 				struct dma_slave_config cfg)
421 {
422 	struct omap2_mcspi	*mcspi;
423 	struct omap2_mcspi_dma  *mcspi_dma;
424 	struct dma_async_tx_descriptor *tx;
425 
426 	mcspi = spi_controller_get_devdata(spi->controller);
427 	mcspi_dma = &mcspi->dma_channels[spi_get_chipselect(spi, 0)];
428 
429 	dmaengine_slave_config(mcspi_dma->dma_tx, &cfg);
430 
431 	tx = dmaengine_prep_slave_sg(mcspi_dma->dma_tx, xfer->tx_sg.sgl,
432 				     xfer->tx_sg.nents,
433 				     DMA_MEM_TO_DEV,
434 				     DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
435 	if (tx) {
436 		tx->callback = omap2_mcspi_tx_callback;
437 		tx->callback_param = spi;
438 		dmaengine_submit(tx);
439 	} else {
440 		/* FIXME: fall back to PIO? */
441 	}
442 	dma_async_issue_pending(mcspi_dma->dma_tx);
443 	omap2_mcspi_set_dma_req(spi, 0, 1);
444 }
445 
446 static unsigned
omap2_mcspi_rx_dma(struct spi_device * spi,struct spi_transfer * xfer,struct dma_slave_config cfg,unsigned es)447 omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer,
448 				struct dma_slave_config cfg,
449 				unsigned es)
450 {
451 	struct omap2_mcspi	*mcspi;
452 	struct omap2_mcspi_dma  *mcspi_dma;
453 	unsigned int		count, transfer_reduction = 0;
454 	struct scatterlist	*sg_out[2];
455 	int			nb_sizes = 0, out_mapped_nents[2], ret, x;
456 	size_t			sizes[2];
457 	u32			l;
458 	int			elements = 0;
459 	int			word_len, element_count;
460 	struct omap2_mcspi_cs	*cs = spi->controller_state;
461 	void __iomem		*chstat_reg = cs->base + OMAP2_MCSPI_CHSTAT0;
462 	struct dma_async_tx_descriptor *tx;
463 
464 	mcspi = spi_controller_get_devdata(spi->controller);
465 	mcspi_dma = &mcspi->dma_channels[spi_get_chipselect(spi, 0)];
466 	count = xfer->len;
467 
468 	/*
469 	 *  In the "End-of-Transfer Procedure" section for DMA RX in OMAP35x TRM
470 	 *  it mentions reducing DMA transfer length by one element in host
471 	 *  normal mode.
472 	 */
473 	if (mcspi->fifo_depth == 0)
474 		transfer_reduction = es;
475 
476 	word_len = cs->word_len;
477 	l = mcspi_cached_chconf0(spi);
478 
479 	if (word_len <= 8)
480 		element_count = count;
481 	else if (word_len <= 16)
482 		element_count = count >> 1;
483 	else /* word_len <= 32 */
484 		element_count = count >> 2;
485 
486 
487 	dmaengine_slave_config(mcspi_dma->dma_rx, &cfg);
488 
489 	/*
490 	 *  Reduce DMA transfer length by one more if McSPI is
491 	 *  configured in turbo mode.
492 	 */
493 	if ((l & OMAP2_MCSPI_CHCONF_TURBO) && mcspi->fifo_depth == 0)
494 		transfer_reduction += es;
495 
496 	if (transfer_reduction) {
497 		/* Split sgl into two. The second sgl won't be used. */
498 		sizes[0] = count - transfer_reduction;
499 		sizes[1] = transfer_reduction;
500 		nb_sizes = 2;
501 	} else {
502 		/*
503 		 * Don't bother splitting the sgl. This essentially
504 		 * clones the original sgl.
505 		 */
506 		sizes[0] = count;
507 		nb_sizes = 1;
508 	}
509 
510 	ret = sg_split(xfer->rx_sg.sgl, xfer->rx_sg.nents, 0, nb_sizes,
511 		       sizes, sg_out, out_mapped_nents, GFP_KERNEL);
512 
513 	if (ret < 0) {
514 		dev_err(&spi->dev, "sg_split failed\n");
515 		return 0;
516 	}
517 
518 	tx = dmaengine_prep_slave_sg(mcspi_dma->dma_rx, sg_out[0],
519 				     out_mapped_nents[0], DMA_DEV_TO_MEM,
520 				     DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
521 	if (tx) {
522 		tx->callback = omap2_mcspi_rx_callback;
523 		tx->callback_param = spi;
524 		dmaengine_submit(tx);
525 	} else {
526 		/* FIXME: fall back to PIO? */
527 	}
528 
529 	dma_async_issue_pending(mcspi_dma->dma_rx);
530 	omap2_mcspi_set_dma_req(spi, 1, 1);
531 
532 	ret = mcspi_wait_for_completion(mcspi, &mcspi_dma->dma_rx_completion);
533 	if (ret || mcspi->target_aborted) {
534 		dmaengine_terminate_sync(mcspi_dma->dma_rx);
535 		omap2_mcspi_set_dma_req(spi, 1, 0);
536 		return 0;
537 	}
538 
539 	for (x = 0; x < nb_sizes; x++)
540 		kfree(sg_out[x]);
541 
542 	if (mcspi->fifo_depth > 0)
543 		return count;
544 
545 	/*
546 	 *  Due to the DMA transfer length reduction the missing bytes must
547 	 *  be read manually to receive all of the expected data.
548 	 */
549 	omap2_mcspi_set_enable(spi, 0);
550 
551 	elements = element_count - 1;
552 
553 	if (l & OMAP2_MCSPI_CHCONF_TURBO) {
554 		elements--;
555 
556 		if (!mcspi_wait_for_reg_bit(chstat_reg,
557 					    OMAP2_MCSPI_CHSTAT_RXS)) {
558 			u32 w;
559 
560 			w = mcspi_read_cs_reg(spi, OMAP2_MCSPI_RX0);
561 			if (word_len <= 8)
562 				((u8 *)xfer->rx_buf)[elements++] = w;
563 			else if (word_len <= 16)
564 				((u16 *)xfer->rx_buf)[elements++] = w;
565 			else /* word_len <= 32 */
566 				((u32 *)xfer->rx_buf)[elements++] = w;
567 		} else {
568 			int bytes_per_word = mcspi_bytes_per_word(word_len);
569 			dev_err(&spi->dev, "DMA RX penultimate word empty\n");
570 			count -= (bytes_per_word << 1);
571 			omap2_mcspi_set_enable(spi, 1);
572 			return count;
573 		}
574 	}
575 	if (!mcspi_wait_for_reg_bit(chstat_reg, OMAP2_MCSPI_CHSTAT_RXS)) {
576 		u32 w;
577 
578 		w = mcspi_read_cs_reg(spi, OMAP2_MCSPI_RX0);
579 		if (word_len <= 8)
580 			((u8 *)xfer->rx_buf)[elements] = w;
581 		else if (word_len <= 16)
582 			((u16 *)xfer->rx_buf)[elements] = w;
583 		else /* word_len <= 32 */
584 			((u32 *)xfer->rx_buf)[elements] = w;
585 	} else {
586 		dev_err(&spi->dev, "DMA RX last word empty\n");
587 		count -= mcspi_bytes_per_word(word_len);
588 	}
589 	omap2_mcspi_set_enable(spi, 1);
590 	return count;
591 }
592 
593 static unsigned
omap2_mcspi_txrx_dma(struct spi_device * spi,struct spi_transfer * xfer)594 omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
595 {
596 	struct omap2_mcspi	*mcspi;
597 	struct omap2_mcspi_cs	*cs = spi->controller_state;
598 	struct omap2_mcspi_dma  *mcspi_dma;
599 	unsigned int		count;
600 	u8			*rx;
601 	const u8		*tx;
602 	struct dma_slave_config	cfg;
603 	enum dma_slave_buswidth width;
604 	unsigned es;
605 	void __iomem		*chstat_reg;
606 	void __iomem            *irqstat_reg;
607 	int			wait_res;
608 
609 	mcspi = spi_controller_get_devdata(spi->controller);
610 	mcspi_dma = &mcspi->dma_channels[spi_get_chipselect(spi, 0)];
611 
612 	if (cs->word_len <= 8) {
613 		width = DMA_SLAVE_BUSWIDTH_1_BYTE;
614 		es = 1;
615 	} else if (cs->word_len <= 16) {
616 		width = DMA_SLAVE_BUSWIDTH_2_BYTES;
617 		es = 2;
618 	} else {
619 		width = DMA_SLAVE_BUSWIDTH_4_BYTES;
620 		es = 4;
621 	}
622 
623 	count = xfer->len;
624 
625 	memset(&cfg, 0, sizeof(cfg));
626 	cfg.src_addr = cs->phys + OMAP2_MCSPI_RX0;
627 	cfg.dst_addr = cs->phys + OMAP2_MCSPI_TX0;
628 	cfg.src_addr_width = width;
629 	cfg.dst_addr_width = width;
630 	cfg.src_maxburst = 1;
631 	cfg.dst_maxburst = 1;
632 
633 	rx = xfer->rx_buf;
634 	tx = xfer->tx_buf;
635 
636 	mcspi->target_aborted = false;
637 	reinit_completion(&mcspi_dma->dma_tx_completion);
638 	reinit_completion(&mcspi_dma->dma_rx_completion);
639 	reinit_completion(&mcspi->txdone);
640 	if (tx) {
641 		/* Enable EOW IRQ to know end of tx in target mode */
642 		if (spi_controller_is_target(spi->controller))
643 			mcspi_write_reg(spi->controller,
644 					OMAP2_MCSPI_IRQENABLE,
645 					OMAP2_MCSPI_IRQSTATUS_EOW);
646 		omap2_mcspi_tx_dma(spi, xfer, cfg);
647 	}
648 
649 	if (rx != NULL)
650 		count = omap2_mcspi_rx_dma(spi, xfer, cfg, es);
651 
652 	if (tx != NULL) {
653 		int ret;
654 
655 		ret = mcspi_wait_for_completion(mcspi, &mcspi_dma->dma_tx_completion);
656 		if (ret || mcspi->target_aborted) {
657 			dmaengine_terminate_sync(mcspi_dma->dma_tx);
658 			omap2_mcspi_set_dma_req(spi, 0, 0);
659 			return 0;
660 		}
661 
662 		if (spi_controller_is_target(mcspi->ctlr)) {
663 			ret = mcspi_wait_for_completion(mcspi, &mcspi->txdone);
664 			if (ret || mcspi->target_aborted)
665 				return 0;
666 		}
667 
668 		if (mcspi->fifo_depth > 0) {
669 			irqstat_reg = mcspi->base + OMAP2_MCSPI_IRQSTATUS;
670 
671 			if (mcspi_wait_for_reg_bit(irqstat_reg,
672 						OMAP2_MCSPI_IRQSTATUS_EOW) < 0)
673 				dev_err(&spi->dev, "EOW timed out\n");
674 
675 			mcspi_write_reg(mcspi->ctlr, OMAP2_MCSPI_IRQSTATUS,
676 					OMAP2_MCSPI_IRQSTATUS_EOW);
677 		}
678 
679 		/* for TX_ONLY mode, be sure all words have shifted out */
680 		if (rx == NULL) {
681 			chstat_reg = cs->base + OMAP2_MCSPI_CHSTAT0;
682 			if (mcspi->fifo_depth > 0) {
683 				wait_res = mcspi_wait_for_reg_bit(chstat_reg,
684 						OMAP2_MCSPI_CHSTAT_TXFFE);
685 				if (wait_res < 0)
686 					dev_err(&spi->dev, "TXFFE timed out\n");
687 			} else {
688 				wait_res = mcspi_wait_for_reg_bit(chstat_reg,
689 						OMAP2_MCSPI_CHSTAT_TXS);
690 				if (wait_res < 0)
691 					dev_err(&spi->dev, "TXS timed out\n");
692 			}
693 			if (wait_res >= 0 &&
694 				(mcspi_wait_for_reg_bit(chstat_reg,
695 					OMAP2_MCSPI_CHSTAT_EOT) < 0))
696 				dev_err(&spi->dev, "EOT timed out\n");
697 		}
698 	}
699 	return count;
700 }
701 
702 static unsigned
omap2_mcspi_txrx_pio(struct spi_device * spi,struct spi_transfer * xfer)703 omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
704 {
705 	struct omap2_mcspi_cs	*cs = spi->controller_state;
706 	unsigned int		count, c;
707 	u32			l;
708 	void __iomem		*base = cs->base;
709 	void __iomem		*tx_reg;
710 	void __iomem		*rx_reg;
711 	void __iomem		*chstat_reg;
712 	int			word_len;
713 
714 	count = xfer->len;
715 	c = count;
716 	word_len = cs->word_len;
717 
718 	l = mcspi_cached_chconf0(spi);
719 
720 	/* We store the pre-calculated register addresses on stack to speed
721 	 * up the transfer loop. */
722 	tx_reg		= base + OMAP2_MCSPI_TX0;
723 	rx_reg		= base + OMAP2_MCSPI_RX0;
724 	chstat_reg	= base + OMAP2_MCSPI_CHSTAT0;
725 
726 	if (c < (word_len>>3))
727 		return 0;
728 
729 	if (word_len <= 8) {
730 		u8		*rx;
731 		const u8	*tx;
732 
733 		rx = xfer->rx_buf;
734 		tx = xfer->tx_buf;
735 
736 		do {
737 			c -= 1;
738 			if (tx != NULL) {
739 				if (mcspi_wait_for_reg_bit(chstat_reg,
740 						OMAP2_MCSPI_CHSTAT_TXS) < 0) {
741 					dev_err(&spi->dev, "TXS timed out\n");
742 					goto out;
743 				}
744 				dev_vdbg(&spi->dev, "write-%d %02x\n",
745 						word_len, *tx);
746 				writel_relaxed(*tx++, tx_reg);
747 			}
748 			if (rx != NULL) {
749 				if (mcspi_wait_for_reg_bit(chstat_reg,
750 						OMAP2_MCSPI_CHSTAT_RXS) < 0) {
751 					dev_err(&spi->dev, "RXS timed out\n");
752 					goto out;
753 				}
754 
755 				if (c == 1 && tx == NULL &&
756 				    (l & OMAP2_MCSPI_CHCONF_TURBO)) {
757 					omap2_mcspi_set_enable(spi, 0);
758 					*rx++ = readl_relaxed(rx_reg);
759 					dev_vdbg(&spi->dev, "read-%d %02x\n",
760 						    word_len, *(rx - 1));
761 					if (mcspi_wait_for_reg_bit(chstat_reg,
762 						OMAP2_MCSPI_CHSTAT_RXS) < 0) {
763 						dev_err(&spi->dev,
764 							"RXS timed out\n");
765 						goto out;
766 					}
767 					c = 0;
768 				} else if (c == 0 && tx == NULL) {
769 					omap2_mcspi_set_enable(spi, 0);
770 				}
771 
772 				*rx++ = readl_relaxed(rx_reg);
773 				dev_vdbg(&spi->dev, "read-%d %02x\n",
774 						word_len, *(rx - 1));
775 			}
776 			/* Add word delay between each word */
777 			spi_delay_exec(&xfer->word_delay, xfer);
778 		} while (c);
779 	} else if (word_len <= 16) {
780 		u16		*rx;
781 		const u16	*tx;
782 
783 		rx = xfer->rx_buf;
784 		tx = xfer->tx_buf;
785 		do {
786 			c -= 2;
787 			if (tx != NULL) {
788 				if (mcspi_wait_for_reg_bit(chstat_reg,
789 						OMAP2_MCSPI_CHSTAT_TXS) < 0) {
790 					dev_err(&spi->dev, "TXS timed out\n");
791 					goto out;
792 				}
793 				dev_vdbg(&spi->dev, "write-%d %04x\n",
794 						word_len, *tx);
795 				writel_relaxed(*tx++, tx_reg);
796 			}
797 			if (rx != NULL) {
798 				if (mcspi_wait_for_reg_bit(chstat_reg,
799 						OMAP2_MCSPI_CHSTAT_RXS) < 0) {
800 					dev_err(&spi->dev, "RXS timed out\n");
801 					goto out;
802 				}
803 
804 				if (c == 2 && tx == NULL &&
805 				    (l & OMAP2_MCSPI_CHCONF_TURBO)) {
806 					omap2_mcspi_set_enable(spi, 0);
807 					*rx++ = readl_relaxed(rx_reg);
808 					dev_vdbg(&spi->dev, "read-%d %04x\n",
809 						    word_len, *(rx - 1));
810 					if (mcspi_wait_for_reg_bit(chstat_reg,
811 						OMAP2_MCSPI_CHSTAT_RXS) < 0) {
812 						dev_err(&spi->dev,
813 							"RXS timed out\n");
814 						goto out;
815 					}
816 					c = 0;
817 				} else if (c == 0 && tx == NULL) {
818 					omap2_mcspi_set_enable(spi, 0);
819 				}
820 
821 				*rx++ = readl_relaxed(rx_reg);
822 				dev_vdbg(&spi->dev, "read-%d %04x\n",
823 						word_len, *(rx - 1));
824 			}
825 			/* Add word delay between each word */
826 			spi_delay_exec(&xfer->word_delay, xfer);
827 		} while (c >= 2);
828 	} else if (word_len <= 32) {
829 		u32		*rx;
830 		const u32	*tx;
831 
832 		rx = xfer->rx_buf;
833 		tx = xfer->tx_buf;
834 		do {
835 			c -= 4;
836 			if (tx != NULL) {
837 				if (mcspi_wait_for_reg_bit(chstat_reg,
838 						OMAP2_MCSPI_CHSTAT_TXS) < 0) {
839 					dev_err(&spi->dev, "TXS timed out\n");
840 					goto out;
841 				}
842 				dev_vdbg(&spi->dev, "write-%d %08x\n",
843 						word_len, *tx);
844 				writel_relaxed(*tx++, tx_reg);
845 			}
846 			if (rx != NULL) {
847 				if (mcspi_wait_for_reg_bit(chstat_reg,
848 						OMAP2_MCSPI_CHSTAT_RXS) < 0) {
849 					dev_err(&spi->dev, "RXS timed out\n");
850 					goto out;
851 				}
852 
853 				if (c == 4 && tx == NULL &&
854 				    (l & OMAP2_MCSPI_CHCONF_TURBO)) {
855 					omap2_mcspi_set_enable(spi, 0);
856 					*rx++ = readl_relaxed(rx_reg);
857 					dev_vdbg(&spi->dev, "read-%d %08x\n",
858 						    word_len, *(rx - 1));
859 					if (mcspi_wait_for_reg_bit(chstat_reg,
860 						OMAP2_MCSPI_CHSTAT_RXS) < 0) {
861 						dev_err(&spi->dev,
862 							"RXS timed out\n");
863 						goto out;
864 					}
865 					c = 0;
866 				} else if (c == 0 && tx == NULL) {
867 					omap2_mcspi_set_enable(spi, 0);
868 				}
869 
870 				*rx++ = readl_relaxed(rx_reg);
871 				dev_vdbg(&spi->dev, "read-%d %08x\n",
872 						word_len, *(rx - 1));
873 			}
874 			/* Add word delay between each word */
875 			spi_delay_exec(&xfer->word_delay, xfer);
876 		} while (c >= 4);
877 	}
878 
879 	/* for TX_ONLY mode, be sure all words have shifted out */
880 	if (xfer->rx_buf == NULL) {
881 		if (mcspi_wait_for_reg_bit(chstat_reg,
882 				OMAP2_MCSPI_CHSTAT_TXS) < 0) {
883 			dev_err(&spi->dev, "TXS timed out\n");
884 		} else if (mcspi_wait_for_reg_bit(chstat_reg,
885 				OMAP2_MCSPI_CHSTAT_EOT) < 0)
886 			dev_err(&spi->dev, "EOT timed out\n");
887 
888 		/* disable chan to purge rx datas received in TX_ONLY transfer,
889 		 * otherwise these rx datas will affect the direct following
890 		 * RX_ONLY transfer.
891 		 */
892 		omap2_mcspi_set_enable(spi, 0);
893 	}
894 out:
895 	omap2_mcspi_set_enable(spi, 1);
896 	return count - c;
897 }
898 
omap2_mcspi_calc_divisor(u32 speed_hz,u32 ref_clk_hz)899 static u32 omap2_mcspi_calc_divisor(u32 speed_hz, u32 ref_clk_hz)
900 {
901 	u32 div;
902 
903 	for (div = 0; div < 15; div++)
904 		if (speed_hz >= (ref_clk_hz >> div))
905 			return div;
906 
907 	return 15;
908 }
909 
910 /* called only when no transfer is active to this device */
omap2_mcspi_setup_transfer(struct spi_device * spi,struct spi_transfer * t)911 static int omap2_mcspi_setup_transfer(struct spi_device *spi,
912 		struct spi_transfer *t)
913 {
914 	struct omap2_mcspi_cs *cs = spi->controller_state;
915 	struct omap2_mcspi *mcspi;
916 	u32 ref_clk_hz, l = 0, clkd = 0, div, extclk = 0, clkg = 0;
917 	u8 word_len = spi->bits_per_word;
918 	u32 speed_hz = spi->max_speed_hz;
919 
920 	mcspi = spi_controller_get_devdata(spi->controller);
921 
922 	if (t != NULL && t->bits_per_word)
923 		word_len = t->bits_per_word;
924 
925 	cs->word_len = word_len;
926 
927 	if (t && t->speed_hz)
928 		speed_hz = t->speed_hz;
929 
930 	ref_clk_hz = mcspi->ref_clk_hz;
931 	speed_hz = min_t(u32, speed_hz, ref_clk_hz);
932 	if (speed_hz < (ref_clk_hz / OMAP2_MCSPI_MAX_DIVIDER)) {
933 		clkd = omap2_mcspi_calc_divisor(speed_hz, ref_clk_hz);
934 		speed_hz = ref_clk_hz >> clkd;
935 		clkg = 0;
936 	} else {
937 		div = (ref_clk_hz + speed_hz - 1) / speed_hz;
938 		speed_hz = ref_clk_hz / div;
939 		clkd = (div - 1) & 0xf;
940 		extclk = (div - 1) >> 4;
941 		clkg = OMAP2_MCSPI_CHCONF_CLKG;
942 	}
943 
944 	l = mcspi_cached_chconf0(spi);
945 
946 	/* standard 4-wire host mode:  SCK, MOSI/out, MISO/in, nCS
947 	 * REVISIT: this controller could support SPI_3WIRE mode.
948 	 */
949 	if (mcspi->pin_dir == MCSPI_PINDIR_D0_IN_D1_OUT) {
950 		l &= ~OMAP2_MCSPI_CHCONF_IS;
951 		l &= ~OMAP2_MCSPI_CHCONF_DPE1;
952 		l |= OMAP2_MCSPI_CHCONF_DPE0;
953 	} else {
954 		l |= OMAP2_MCSPI_CHCONF_IS;
955 		l |= OMAP2_MCSPI_CHCONF_DPE1;
956 		l &= ~OMAP2_MCSPI_CHCONF_DPE0;
957 	}
958 
959 	/* wordlength */
960 	l &= ~OMAP2_MCSPI_CHCONF_WL_MASK;
961 	l |= (word_len - 1) << 7;
962 
963 	/* set chipselect polarity; manage with FORCE */
964 	if (!(spi->mode & SPI_CS_HIGH))
965 		l |= OMAP2_MCSPI_CHCONF_EPOL;	/* active-low; normal */
966 	else
967 		l &= ~OMAP2_MCSPI_CHCONF_EPOL;
968 
969 	/* set clock divisor */
970 	l &= ~OMAP2_MCSPI_CHCONF_CLKD_MASK;
971 	l |= clkd << 2;
972 
973 	/* set clock granularity */
974 	l &= ~OMAP2_MCSPI_CHCONF_CLKG;
975 	l |= clkg;
976 	if (clkg) {
977 		cs->chctrl0 &= ~OMAP2_MCSPI_CHCTRL_EXTCLK_MASK;
978 		cs->chctrl0 |= extclk << 8;
979 		mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCTRL0, cs->chctrl0);
980 	}
981 
982 	/* set SPI mode 0..3 */
983 	if (spi->mode & SPI_CPOL)
984 		l |= OMAP2_MCSPI_CHCONF_POL;
985 	else
986 		l &= ~OMAP2_MCSPI_CHCONF_POL;
987 	if (spi->mode & SPI_CPHA)
988 		l |= OMAP2_MCSPI_CHCONF_PHA;
989 	else
990 		l &= ~OMAP2_MCSPI_CHCONF_PHA;
991 
992 	mcspi_write_chconf0(spi, l);
993 
994 	cs->mode = spi->mode;
995 
996 	dev_dbg(&spi->dev, "setup: speed %d, sample %s edge, clk %s\n",
997 			speed_hz,
998 			(spi->mode & SPI_CPHA) ? "trailing" : "leading",
999 			(spi->mode & SPI_CPOL) ? "inverted" : "normal");
1000 
1001 	return 0;
1002 }
1003 
1004 /*
1005  * Note that we currently allow DMA only if we get a channel
1006  * for both rx and tx. Otherwise we'll do PIO for both rx and tx.
1007  */
omap2_mcspi_request_dma(struct omap2_mcspi * mcspi,struct omap2_mcspi_dma * mcspi_dma)1008 static int omap2_mcspi_request_dma(struct omap2_mcspi *mcspi,
1009 				   struct omap2_mcspi_dma *mcspi_dma)
1010 {
1011 	int ret = 0;
1012 
1013 	mcspi_dma->dma_rx = dma_request_chan(mcspi->dev,
1014 					     mcspi_dma->dma_rx_ch_name);
1015 	if (IS_ERR(mcspi_dma->dma_rx)) {
1016 		ret = PTR_ERR(mcspi_dma->dma_rx);
1017 		mcspi_dma->dma_rx = NULL;
1018 		goto no_dma;
1019 	}
1020 
1021 	mcspi_dma->dma_tx = dma_request_chan(mcspi->dev,
1022 					     mcspi_dma->dma_tx_ch_name);
1023 	if (IS_ERR(mcspi_dma->dma_tx)) {
1024 		ret = PTR_ERR(mcspi_dma->dma_tx);
1025 		mcspi_dma->dma_tx = NULL;
1026 		dma_release_channel(mcspi_dma->dma_rx);
1027 		mcspi_dma->dma_rx = NULL;
1028 	}
1029 
1030 	init_completion(&mcspi_dma->dma_rx_completion);
1031 	init_completion(&mcspi_dma->dma_tx_completion);
1032 
1033 no_dma:
1034 	return ret;
1035 }
1036 
omap2_mcspi_release_dma(struct spi_controller * ctlr)1037 static void omap2_mcspi_release_dma(struct spi_controller *ctlr)
1038 {
1039 	struct omap2_mcspi *mcspi = spi_controller_get_devdata(ctlr);
1040 	struct omap2_mcspi_dma	*mcspi_dma;
1041 	int i;
1042 
1043 	for (i = 0; i < ctlr->num_chipselect; i++) {
1044 		mcspi_dma = &mcspi->dma_channels[i];
1045 
1046 		if (mcspi_dma->dma_rx) {
1047 			dma_release_channel(mcspi_dma->dma_rx);
1048 			mcspi_dma->dma_rx = NULL;
1049 		}
1050 		if (mcspi_dma->dma_tx) {
1051 			dma_release_channel(mcspi_dma->dma_tx);
1052 			mcspi_dma->dma_tx = NULL;
1053 		}
1054 	}
1055 }
1056 
omap2_mcspi_cleanup(struct spi_device * spi)1057 static void omap2_mcspi_cleanup(struct spi_device *spi)
1058 {
1059 	struct omap2_mcspi_cs	*cs;
1060 
1061 	if (spi->controller_state) {
1062 		/* Unlink controller state from context save list */
1063 		cs = spi->controller_state;
1064 		list_del(&cs->node);
1065 
1066 		kfree(cs);
1067 	}
1068 }
1069 
omap2_mcspi_setup(struct spi_device * spi)1070 static int omap2_mcspi_setup(struct spi_device *spi)
1071 {
1072 	bool			initial_setup = false;
1073 	int			ret;
1074 	struct omap2_mcspi	*mcspi = spi_controller_get_devdata(spi->controller);
1075 	struct omap2_mcspi_regs	*ctx = &mcspi->ctx;
1076 	struct omap2_mcspi_cs	*cs = spi->controller_state;
1077 
1078 	if (!cs) {
1079 		cs = kzalloc(sizeof(*cs), GFP_KERNEL);
1080 		if (!cs)
1081 			return -ENOMEM;
1082 		cs->base = mcspi->base + spi_get_chipselect(spi, 0) * 0x14;
1083 		cs->phys = mcspi->phys + spi_get_chipselect(spi, 0) * 0x14;
1084 		cs->mode = 0;
1085 		cs->chconf0 = 0;
1086 		cs->chctrl0 = 0;
1087 		spi->controller_state = cs;
1088 		/* Link this to context save list */
1089 		list_add_tail(&cs->node, &ctx->cs);
1090 		initial_setup = true;
1091 	}
1092 
1093 	ret = pm_runtime_resume_and_get(mcspi->dev);
1094 	if (ret < 0) {
1095 		if (initial_setup)
1096 			omap2_mcspi_cleanup(spi);
1097 
1098 		return ret;
1099 	}
1100 
1101 	ret = omap2_mcspi_setup_transfer(spi, NULL);
1102 	if (ret && initial_setup)
1103 		omap2_mcspi_cleanup(spi);
1104 
1105 	pm_runtime_mark_last_busy(mcspi->dev);
1106 	pm_runtime_put_autosuspend(mcspi->dev);
1107 
1108 	return ret;
1109 }
1110 
omap2_mcspi_irq_handler(int irq,void * data)1111 static irqreturn_t omap2_mcspi_irq_handler(int irq, void *data)
1112 {
1113 	struct omap2_mcspi *mcspi = data;
1114 	u32 irqstat;
1115 
1116 	irqstat	= mcspi_read_reg(mcspi->ctlr, OMAP2_MCSPI_IRQSTATUS);
1117 	if (!irqstat)
1118 		return IRQ_NONE;
1119 
1120 	/* Disable IRQ and wakeup target xfer task */
1121 	mcspi_write_reg(mcspi->ctlr, OMAP2_MCSPI_IRQENABLE, 0);
1122 	if (irqstat & OMAP2_MCSPI_IRQSTATUS_EOW)
1123 		complete(&mcspi->txdone);
1124 
1125 	return IRQ_HANDLED;
1126 }
1127 
omap2_mcspi_target_abort(struct spi_controller * ctlr)1128 static int omap2_mcspi_target_abort(struct spi_controller *ctlr)
1129 {
1130 	struct omap2_mcspi *mcspi = spi_controller_get_devdata(ctlr);
1131 	struct omap2_mcspi_dma *mcspi_dma = mcspi->dma_channels;
1132 
1133 	mcspi->target_aborted = true;
1134 	complete(&mcspi_dma->dma_rx_completion);
1135 	complete(&mcspi_dma->dma_tx_completion);
1136 	complete(&mcspi->txdone);
1137 
1138 	return 0;
1139 }
1140 
omap2_mcspi_transfer_one(struct spi_controller * ctlr,struct spi_device * spi,struct spi_transfer * t)1141 static int omap2_mcspi_transfer_one(struct spi_controller *ctlr,
1142 				    struct spi_device *spi,
1143 				    struct spi_transfer *t)
1144 {
1145 
1146 	/* We only enable one channel at a time -- the one whose message is
1147 	 * -- although this controller would gladly
1148 	 * arbitrate among multiple channels.  This corresponds to "single
1149 	 * channel" host mode.  As a side effect, we need to manage the
1150 	 * chipselect with the FORCE bit ... CS != channel enable.
1151 	 */
1152 
1153 	struct omap2_mcspi		*mcspi;
1154 	struct omap2_mcspi_dma		*mcspi_dma;
1155 	struct omap2_mcspi_cs		*cs;
1156 	struct omap2_mcspi_device_config *cd;
1157 	int				par_override = 0;
1158 	int				status = 0;
1159 	u32				chconf;
1160 
1161 	mcspi = spi_controller_get_devdata(ctlr);
1162 	mcspi_dma = mcspi->dma_channels + spi_get_chipselect(spi, 0);
1163 	cs = spi->controller_state;
1164 	cd = spi->controller_data;
1165 
1166 	/*
1167 	 * The target driver could have changed spi->mode in which case
1168 	 * it will be different from cs->mode (the current hardware setup).
1169 	 * If so, set par_override (even though its not a parity issue) so
1170 	 * omap2_mcspi_setup_transfer will be called to configure the hardware
1171 	 * with the correct mode on the first iteration of the loop below.
1172 	 */
1173 	if (spi->mode != cs->mode)
1174 		par_override = 1;
1175 
1176 	omap2_mcspi_set_enable(spi, 0);
1177 
1178 	if (spi_get_csgpiod(spi, 0))
1179 		omap2_mcspi_set_cs(spi, spi->mode & SPI_CS_HIGH);
1180 
1181 	if (par_override ||
1182 	    (t->speed_hz != spi->max_speed_hz) ||
1183 	    (t->bits_per_word != spi->bits_per_word)) {
1184 		par_override = 1;
1185 		status = omap2_mcspi_setup_transfer(spi, t);
1186 		if (status < 0)
1187 			goto out;
1188 		if (t->speed_hz == spi->max_speed_hz &&
1189 		    t->bits_per_word == spi->bits_per_word)
1190 			par_override = 0;
1191 	}
1192 
1193 	chconf = mcspi_cached_chconf0(spi);
1194 	chconf &= ~OMAP2_MCSPI_CHCONF_TRM_MASK;
1195 	chconf &= ~OMAP2_MCSPI_CHCONF_TURBO;
1196 
1197 	if (t->tx_buf == NULL)
1198 		chconf |= OMAP2_MCSPI_CHCONF_TRM_RX_ONLY;
1199 	else if (t->rx_buf == NULL)
1200 		chconf |= OMAP2_MCSPI_CHCONF_TRM_TX_ONLY;
1201 
1202 	if (cd && cd->turbo_mode && t->tx_buf == NULL) {
1203 		/* Turbo mode is for more than one word */
1204 		if (t->len > ((cs->word_len + 7) >> 3))
1205 			chconf |= OMAP2_MCSPI_CHCONF_TURBO;
1206 	}
1207 
1208 	mcspi_write_chconf0(spi, chconf);
1209 
1210 	if (t->len) {
1211 		unsigned	count;
1212 
1213 		if ((mcspi_dma->dma_rx && mcspi_dma->dma_tx) &&
1214 		    spi_xfer_is_dma_mapped(ctlr, spi, t))
1215 			omap2_mcspi_set_fifo(spi, t, 1);
1216 
1217 		omap2_mcspi_set_enable(spi, 1);
1218 
1219 		/* RX_ONLY mode needs dummy data in TX reg */
1220 		if (t->tx_buf == NULL)
1221 			writel_relaxed(0, cs->base
1222 					+ OMAP2_MCSPI_TX0);
1223 
1224 		if ((mcspi_dma->dma_rx && mcspi_dma->dma_tx) &&
1225 		    spi_xfer_is_dma_mapped(ctlr, spi, t))
1226 			count = omap2_mcspi_txrx_dma(spi, t);
1227 		else
1228 			count = omap2_mcspi_txrx_pio(spi, t);
1229 
1230 		if (count != t->len) {
1231 			status = -EIO;
1232 			goto out;
1233 		}
1234 	}
1235 
1236 	omap2_mcspi_set_enable(spi, 0);
1237 
1238 	if (mcspi->fifo_depth > 0)
1239 		omap2_mcspi_set_fifo(spi, t, 0);
1240 
1241 out:
1242 	/* Restore defaults if they were overriden */
1243 	if (par_override) {
1244 		par_override = 0;
1245 		status = omap2_mcspi_setup_transfer(spi, NULL);
1246 	}
1247 
1248 	omap2_mcspi_set_enable(spi, 0);
1249 
1250 	if (spi_get_csgpiod(spi, 0))
1251 		omap2_mcspi_set_cs(spi, !(spi->mode & SPI_CS_HIGH));
1252 
1253 	if (mcspi->fifo_depth > 0 && t)
1254 		omap2_mcspi_set_fifo(spi, t, 0);
1255 
1256 	return status;
1257 }
1258 
omap2_mcspi_prepare_message(struct spi_controller * ctlr,struct spi_message * msg)1259 static int omap2_mcspi_prepare_message(struct spi_controller *ctlr,
1260 				       struct spi_message *msg)
1261 {
1262 	struct omap2_mcspi	*mcspi = spi_controller_get_devdata(ctlr);
1263 	struct omap2_mcspi_regs	*ctx = &mcspi->ctx;
1264 	struct omap2_mcspi_cs	*cs;
1265 	struct spi_transfer	*tr;
1266 	u8 bits_per_word;
1267 
1268 	/*
1269 	 * The conditions are strict, it is mandatory to check each transfer of the list to see if
1270 	 * multi-mode is applicable.
1271 	 */
1272 	mcspi->use_multi_mode = true;
1273 
1274 	if (mcspi->last_msg_kept_cs)
1275 		mcspi->use_multi_mode = false;
1276 
1277 	list_for_each_entry(tr, &msg->transfers, transfer_list) {
1278 		if (!tr->bits_per_word)
1279 			bits_per_word = msg->spi->bits_per_word;
1280 		else
1281 			bits_per_word = tr->bits_per_word;
1282 
1283 		/*
1284 		 * Check if this transfer contains only one word;
1285 		 */
1286 		if (bits_per_word < 8 && tr->len == 1) {
1287 			/* multi-mode is applicable, only one word (1..7 bits) */
1288 		} else if (bits_per_word >= 8 && tr->len == bits_per_word / 8) {
1289 			/* multi-mode is applicable, only one word (8..32 bits) */
1290 		} else {
1291 			/* multi-mode is not applicable: more than one word in the transfer */
1292 			mcspi->use_multi_mode = false;
1293 		}
1294 
1295 		if (list_is_last(&tr->transfer_list, &msg->transfers)) {
1296 			/* Check if transfer asks to keep the CS status after the whole message */
1297 			if (tr->cs_change) {
1298 				mcspi->use_multi_mode = false;
1299 				mcspi->last_msg_kept_cs = true;
1300 			} else {
1301 				mcspi->last_msg_kept_cs = false;
1302 			}
1303 		} else {
1304 			/* Check if transfer asks to change the CS status after the transfer */
1305 			if (!tr->cs_change)
1306 				mcspi->use_multi_mode = false;
1307 		}
1308 	}
1309 
1310 	omap2_mcspi_set_mode(ctlr);
1311 
1312 	/* In single mode only a single channel can have the FORCE bit enabled
1313 	 * in its chconf0 register.
1314 	 * Scan all channels and disable them except the current one.
1315 	 * A FORCE can remain from a last transfer having cs_change enabled
1316 	 *
1317 	 * In multi mode all FORCE bits must be disabled.
1318 	 */
1319 	list_for_each_entry(cs, &ctx->cs, node) {
1320 		if (msg->spi->controller_state == cs && !mcspi->use_multi_mode) {
1321 			continue;
1322 		}
1323 
1324 		if ((cs->chconf0 & OMAP2_MCSPI_CHCONF_FORCE)) {
1325 			cs->chconf0 &= ~OMAP2_MCSPI_CHCONF_FORCE;
1326 			writel_relaxed(cs->chconf0,
1327 					cs->base + OMAP2_MCSPI_CHCONF0);
1328 			readl_relaxed(cs->base + OMAP2_MCSPI_CHCONF0);
1329 		}
1330 	}
1331 
1332 	return 0;
1333 }
1334 
omap2_mcspi_can_dma(struct spi_controller * ctlr,struct spi_device * spi,struct spi_transfer * xfer)1335 static bool omap2_mcspi_can_dma(struct spi_controller *ctlr,
1336 				struct spi_device *spi,
1337 				struct spi_transfer *xfer)
1338 {
1339 	struct omap2_mcspi *mcspi = spi_controller_get_devdata(spi->controller);
1340 	struct omap2_mcspi_dma *mcspi_dma =
1341 		&mcspi->dma_channels[spi_get_chipselect(spi, 0)];
1342 
1343 	if (!mcspi_dma->dma_rx || !mcspi_dma->dma_tx)
1344 		return false;
1345 
1346 	if (spi_controller_is_target(ctlr))
1347 		return true;
1348 
1349 	ctlr->dma_rx = mcspi_dma->dma_rx;
1350 	ctlr->dma_tx = mcspi_dma->dma_tx;
1351 
1352 	return (xfer->len >= DMA_MIN_BYTES);
1353 }
1354 
omap2_mcspi_max_xfer_size(struct spi_device * spi)1355 static size_t omap2_mcspi_max_xfer_size(struct spi_device *spi)
1356 {
1357 	struct omap2_mcspi *mcspi = spi_controller_get_devdata(spi->controller);
1358 	struct omap2_mcspi_dma *mcspi_dma =
1359 		&mcspi->dma_channels[spi_get_chipselect(spi, 0)];
1360 
1361 	if (mcspi->max_xfer_len && mcspi_dma->dma_rx)
1362 		return mcspi->max_xfer_len;
1363 
1364 	return SIZE_MAX;
1365 }
1366 
omap2_mcspi_controller_setup(struct omap2_mcspi * mcspi)1367 static int omap2_mcspi_controller_setup(struct omap2_mcspi *mcspi)
1368 {
1369 	struct spi_controller	*ctlr = mcspi->ctlr;
1370 	struct omap2_mcspi_regs	*ctx = &mcspi->ctx;
1371 	int			ret = 0;
1372 
1373 	ret = pm_runtime_resume_and_get(mcspi->dev);
1374 	if (ret < 0)
1375 		return ret;
1376 
1377 	mcspi_write_reg(ctlr, OMAP2_MCSPI_WAKEUPENABLE,
1378 			OMAP2_MCSPI_WAKEUPENABLE_WKEN);
1379 	ctx->wakeupenable = OMAP2_MCSPI_WAKEUPENABLE_WKEN;
1380 
1381 	omap2_mcspi_set_mode(ctlr);
1382 	pm_runtime_mark_last_busy(mcspi->dev);
1383 	pm_runtime_put_autosuspend(mcspi->dev);
1384 	return 0;
1385 }
1386 
omap_mcspi_runtime_suspend(struct device * dev)1387 static int omap_mcspi_runtime_suspend(struct device *dev)
1388 {
1389 	int error;
1390 
1391 	error = pinctrl_pm_select_idle_state(dev);
1392 	if (error)
1393 		dev_warn(dev, "%s: failed to set pins: %i\n", __func__, error);
1394 
1395 	return 0;
1396 }
1397 
1398 /*
1399  * When SPI wake up from off-mode, CS is in activate state. If it was in
1400  * inactive state when driver was suspend, then force it to inactive state at
1401  * wake up.
1402  */
omap_mcspi_runtime_resume(struct device * dev)1403 static int omap_mcspi_runtime_resume(struct device *dev)
1404 {
1405 	struct spi_controller *ctlr = dev_get_drvdata(dev);
1406 	struct omap2_mcspi *mcspi = spi_controller_get_devdata(ctlr);
1407 	struct omap2_mcspi_regs *ctx = &mcspi->ctx;
1408 	struct omap2_mcspi_cs *cs;
1409 	int error;
1410 
1411 	error = pinctrl_pm_select_default_state(dev);
1412 	if (error)
1413 		dev_warn(dev, "%s: failed to set pins: %i\n", __func__, error);
1414 
1415 	/* McSPI: context restore */
1416 	mcspi_write_reg(ctlr, OMAP2_MCSPI_MODULCTRL, ctx->modulctrl);
1417 	mcspi_write_reg(ctlr, OMAP2_MCSPI_WAKEUPENABLE, ctx->wakeupenable);
1418 
1419 	list_for_each_entry(cs, &ctx->cs, node) {
1420 		/*
1421 		 * We need to toggle CS state for OMAP take this
1422 		 * change in account.
1423 		 */
1424 		if ((cs->chconf0 & OMAP2_MCSPI_CHCONF_FORCE) == 0) {
1425 			cs->chconf0 |= OMAP2_MCSPI_CHCONF_FORCE;
1426 			writel_relaxed(cs->chconf0,
1427 				       cs->base + OMAP2_MCSPI_CHCONF0);
1428 			cs->chconf0 &= ~OMAP2_MCSPI_CHCONF_FORCE;
1429 			writel_relaxed(cs->chconf0,
1430 				       cs->base + OMAP2_MCSPI_CHCONF0);
1431 		} else {
1432 			writel_relaxed(cs->chconf0,
1433 				       cs->base + OMAP2_MCSPI_CHCONF0);
1434 		}
1435 	}
1436 
1437 	return 0;
1438 }
1439 
1440 static struct omap2_mcspi_platform_config omap2_pdata = {
1441 	.regs_offset = 0,
1442 };
1443 
1444 static struct omap2_mcspi_platform_config omap4_pdata = {
1445 	.regs_offset = OMAP4_MCSPI_REG_OFFSET,
1446 };
1447 
1448 static struct omap2_mcspi_platform_config am654_pdata = {
1449 	.regs_offset = OMAP4_MCSPI_REG_OFFSET,
1450 	.max_xfer_len = SZ_4K - 1,
1451 };
1452 
1453 static const struct of_device_id omap_mcspi_of_match[] = {
1454 	{
1455 		.compatible = "ti,omap2-mcspi",
1456 		.data = &omap2_pdata,
1457 	},
1458 	{
1459 		.compatible = "ti,omap4-mcspi",
1460 		.data = &omap4_pdata,
1461 	},
1462 	{
1463 		.compatible = "ti,am654-mcspi",
1464 		.data = &am654_pdata,
1465 	},
1466 	{ },
1467 };
1468 MODULE_DEVICE_TABLE(of, omap_mcspi_of_match);
1469 
omap2_mcspi_probe(struct platform_device * pdev)1470 static int omap2_mcspi_probe(struct platform_device *pdev)
1471 {
1472 	struct spi_controller	*ctlr;
1473 	const struct omap2_mcspi_platform_config *pdata;
1474 	struct omap2_mcspi	*mcspi;
1475 	struct resource		*r;
1476 	int			status = 0, i;
1477 	u32			regs_offset = 0;
1478 	struct device_node	*node = pdev->dev.of_node;
1479 	const struct of_device_id *match;
1480 
1481 	if (of_property_read_bool(node, "spi-slave"))
1482 		ctlr = spi_alloc_target(&pdev->dev, sizeof(*mcspi));
1483 	else
1484 		ctlr = spi_alloc_host(&pdev->dev, sizeof(*mcspi));
1485 	if (!ctlr)
1486 		return -ENOMEM;
1487 
1488 	/* the spi->mode bits understood by this driver: */
1489 	ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
1490 	ctlr->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
1491 	ctlr->setup = omap2_mcspi_setup;
1492 	ctlr->auto_runtime_pm = true;
1493 	ctlr->prepare_message = omap2_mcspi_prepare_message;
1494 	ctlr->can_dma = omap2_mcspi_can_dma;
1495 	ctlr->transfer_one = omap2_mcspi_transfer_one;
1496 	ctlr->set_cs = omap2_mcspi_set_cs;
1497 	ctlr->cleanup = omap2_mcspi_cleanup;
1498 	ctlr->target_abort = omap2_mcspi_target_abort;
1499 	ctlr->dev.of_node = node;
1500 	ctlr->use_gpio_descriptors = true;
1501 
1502 	platform_set_drvdata(pdev, ctlr);
1503 
1504 	mcspi = spi_controller_get_devdata(ctlr);
1505 	mcspi->ctlr = ctlr;
1506 
1507 	match = of_match_device(omap_mcspi_of_match, &pdev->dev);
1508 	if (match) {
1509 		u32 num_cs = 1; /* default number of chipselect */
1510 		pdata = match->data;
1511 
1512 		of_property_read_u32(node, "ti,spi-num-cs", &num_cs);
1513 		ctlr->num_chipselect = num_cs;
1514 		if (of_property_read_bool(node, "ti,pindir-d0-out-d1-in"))
1515 			mcspi->pin_dir = MCSPI_PINDIR_D0_OUT_D1_IN;
1516 	} else {
1517 		pdata = dev_get_platdata(&pdev->dev);
1518 		ctlr->num_chipselect = pdata->num_cs;
1519 		mcspi->pin_dir = pdata->pin_dir;
1520 	}
1521 	regs_offset = pdata->regs_offset;
1522 	if (pdata->max_xfer_len) {
1523 		mcspi->max_xfer_len = pdata->max_xfer_len;
1524 		ctlr->max_transfer_size = omap2_mcspi_max_xfer_size;
1525 	}
1526 
1527 	mcspi->base = devm_platform_get_and_ioremap_resource(pdev, 0, &r);
1528 	if (IS_ERR(mcspi->base)) {
1529 		status = PTR_ERR(mcspi->base);
1530 		goto free_ctlr;
1531 	}
1532 	mcspi->phys = r->start + regs_offset;
1533 	mcspi->base += regs_offset;
1534 
1535 	mcspi->dev = &pdev->dev;
1536 
1537 	INIT_LIST_HEAD(&mcspi->ctx.cs);
1538 
1539 	mcspi->dma_channels = devm_kcalloc(&pdev->dev, ctlr->num_chipselect,
1540 					   sizeof(struct omap2_mcspi_dma),
1541 					   GFP_KERNEL);
1542 	if (mcspi->dma_channels == NULL) {
1543 		status = -ENOMEM;
1544 		goto free_ctlr;
1545 	}
1546 
1547 	for (i = 0; i < ctlr->num_chipselect; i++) {
1548 		sprintf(mcspi->dma_channels[i].dma_rx_ch_name, "rx%d", i);
1549 		sprintf(mcspi->dma_channels[i].dma_tx_ch_name, "tx%d", i);
1550 
1551 		status = omap2_mcspi_request_dma(mcspi,
1552 						 &mcspi->dma_channels[i]);
1553 		if (status == -EPROBE_DEFER)
1554 			goto free_ctlr;
1555 	}
1556 
1557 	status = platform_get_irq(pdev, 0);
1558 	if (status < 0)
1559 		goto free_ctlr;
1560 	init_completion(&mcspi->txdone);
1561 	status = devm_request_irq(&pdev->dev, status,
1562 				  omap2_mcspi_irq_handler, 0, pdev->name,
1563 				  mcspi);
1564 	if (status) {
1565 		dev_err(&pdev->dev, "Cannot request IRQ");
1566 		goto free_ctlr;
1567 	}
1568 
1569 	mcspi->ref_clk = devm_clk_get_optional_enabled(&pdev->dev, NULL);
1570 	if (IS_ERR(mcspi->ref_clk)) {
1571 		status = PTR_ERR(mcspi->ref_clk);
1572 		dev_err_probe(&pdev->dev, status, "Failed to get ref_clk");
1573 		goto free_ctlr;
1574 	}
1575 	if (mcspi->ref_clk)
1576 		mcspi->ref_clk_hz = clk_get_rate(mcspi->ref_clk);
1577 	else
1578 		mcspi->ref_clk_hz = OMAP2_MCSPI_MAX_FREQ;
1579 	ctlr->max_speed_hz = mcspi->ref_clk_hz;
1580 	ctlr->min_speed_hz = mcspi->ref_clk_hz >> 15;
1581 
1582 	pm_runtime_use_autosuspend(&pdev->dev);
1583 	pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT);
1584 	pm_runtime_enable(&pdev->dev);
1585 
1586 	status = omap2_mcspi_controller_setup(mcspi);
1587 	if (status < 0)
1588 		goto disable_pm;
1589 
1590 	status = devm_spi_register_controller(&pdev->dev, ctlr);
1591 	if (status < 0)
1592 		goto disable_pm;
1593 
1594 	return status;
1595 
1596 disable_pm:
1597 	pm_runtime_dont_use_autosuspend(&pdev->dev);
1598 	pm_runtime_put_sync(&pdev->dev);
1599 	pm_runtime_disable(&pdev->dev);
1600 free_ctlr:
1601 	omap2_mcspi_release_dma(ctlr);
1602 	spi_controller_put(ctlr);
1603 	return status;
1604 }
1605 
omap2_mcspi_remove(struct platform_device * pdev)1606 static void omap2_mcspi_remove(struct platform_device *pdev)
1607 {
1608 	struct spi_controller *ctlr = platform_get_drvdata(pdev);
1609 	struct omap2_mcspi *mcspi = spi_controller_get_devdata(ctlr);
1610 
1611 	omap2_mcspi_release_dma(ctlr);
1612 
1613 	pm_runtime_dont_use_autosuspend(mcspi->dev);
1614 	pm_runtime_put_sync(mcspi->dev);
1615 	pm_runtime_disable(&pdev->dev);
1616 }
1617 
1618 /* work with hotplug and coldplug */
1619 MODULE_ALIAS("platform:omap2_mcspi");
1620 
omap2_mcspi_suspend(struct device * dev)1621 static int __maybe_unused omap2_mcspi_suspend(struct device *dev)
1622 {
1623 	struct spi_controller *ctlr = dev_get_drvdata(dev);
1624 	struct omap2_mcspi *mcspi = spi_controller_get_devdata(ctlr);
1625 	int error;
1626 
1627 	error = pinctrl_pm_select_sleep_state(dev);
1628 	if (error)
1629 		dev_warn(mcspi->dev, "%s: failed to set pins: %i\n",
1630 			 __func__, error);
1631 
1632 	error = spi_controller_suspend(ctlr);
1633 	if (error)
1634 		dev_warn(mcspi->dev, "%s: controller suspend failed: %i\n",
1635 			 __func__, error);
1636 
1637 	return pm_runtime_force_suspend(dev);
1638 }
1639 
omap2_mcspi_resume(struct device * dev)1640 static int __maybe_unused omap2_mcspi_resume(struct device *dev)
1641 {
1642 	struct spi_controller *ctlr = dev_get_drvdata(dev);
1643 	struct omap2_mcspi *mcspi = spi_controller_get_devdata(ctlr);
1644 	int error;
1645 
1646 	error = spi_controller_resume(ctlr);
1647 	if (error)
1648 		dev_warn(mcspi->dev, "%s: controller resume failed: %i\n",
1649 			 __func__, error);
1650 
1651 	return pm_runtime_force_resume(dev);
1652 }
1653 
1654 static const struct dev_pm_ops omap2_mcspi_pm_ops = {
1655 	SET_SYSTEM_SLEEP_PM_OPS(omap2_mcspi_suspend,
1656 				omap2_mcspi_resume)
1657 	.runtime_suspend	= omap_mcspi_runtime_suspend,
1658 	.runtime_resume		= omap_mcspi_runtime_resume,
1659 };
1660 
1661 static struct platform_driver omap2_mcspi_driver = {
1662 	.driver = {
1663 		.name =		"omap2_mcspi",
1664 		.pm =		&omap2_mcspi_pm_ops,
1665 		.of_match_table = omap_mcspi_of_match,
1666 	},
1667 	.probe =	omap2_mcspi_probe,
1668 	.remove =	omap2_mcspi_remove,
1669 };
1670 
1671 module_platform_driver(omap2_mcspi_driver);
1672 MODULE_DESCRIPTION("OMAP2 McSPI controller driver");
1673 MODULE_LICENSE("GPL");
1674