xref: /linux/drivers/spi/spi-omap2-mcspi.c (revision a5b2a9f5056b64aa41bd11d9166d836df30b0897)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * OMAP2 McSPI controller driver
4  *
5  * Copyright (C) 2005, 2006 Nokia Corporation
6  * Author:	Samuel Ortiz <samuel.ortiz@nokia.com> and
7  *		Juha Yrjola <juha.yrjola@nokia.com>
8  */
9 
10 #include <linux/kernel.h>
11 #include <linux/interrupt.h>
12 #include <linux/module.h>
13 #include <linux/device.h>
14 #include <linux/delay.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/dmaengine.h>
17 #include <linux/pinctrl/consumer.h>
18 #include <linux/platform_device.h>
19 #include <linux/err.h>
20 #include <linux/clk.h>
21 #include <linux/io.h>
22 #include <linux/slab.h>
23 #include <linux/pm_runtime.h>
24 #include <linux/of.h>
25 #include <linux/of_device.h>
26 #include <linux/gcd.h>
27 
28 #include <linux/spi/spi.h>
29 
30 #include "internals.h"
31 
32 #include <linux/platform_data/spi-omap2-mcspi.h>
33 
34 #define OMAP2_MCSPI_MAX_FREQ		48000000
35 #define OMAP2_MCSPI_MAX_DIVIDER		4096
36 #define OMAP2_MCSPI_MAX_FIFODEPTH	64
37 #define OMAP2_MCSPI_MAX_FIFOWCNT	0xFFFF
38 #define SPI_AUTOSUSPEND_TIMEOUT		2000
39 
40 #define OMAP2_MCSPI_REVISION		0x00
41 #define OMAP2_MCSPI_SYSSTATUS		0x14
42 #define OMAP2_MCSPI_IRQSTATUS		0x18
43 #define OMAP2_MCSPI_IRQENABLE		0x1c
44 #define OMAP2_MCSPI_WAKEUPENABLE	0x20
45 #define OMAP2_MCSPI_SYST		0x24
46 #define OMAP2_MCSPI_MODULCTRL		0x28
47 #define OMAP2_MCSPI_XFERLEVEL		0x7c
48 
49 /* per-channel banks, 0x14 bytes each, first is: */
50 #define OMAP2_MCSPI_CHCONF0		0x2c
51 #define OMAP2_MCSPI_CHSTAT0		0x30
52 #define OMAP2_MCSPI_CHCTRL0		0x34
53 #define OMAP2_MCSPI_TX0			0x38
54 #define OMAP2_MCSPI_RX0			0x3c
55 
56 /* per-register bitmasks: */
57 #define OMAP2_MCSPI_IRQSTATUS_EOW	BIT(17)
58 
59 #define OMAP2_MCSPI_MODULCTRL_SINGLE	BIT(0)
60 #define OMAP2_MCSPI_MODULCTRL_MS	BIT(2)
61 #define OMAP2_MCSPI_MODULCTRL_STEST	BIT(3)
62 
63 #define OMAP2_MCSPI_CHCONF_PHA		BIT(0)
64 #define OMAP2_MCSPI_CHCONF_POL		BIT(1)
65 #define OMAP2_MCSPI_CHCONF_CLKD_MASK	(0x0f << 2)
66 #define OMAP2_MCSPI_CHCONF_EPOL		BIT(6)
67 #define OMAP2_MCSPI_CHCONF_WL_MASK	(0x1f << 7)
68 #define OMAP2_MCSPI_CHCONF_TRM_RX_ONLY	BIT(12)
69 #define OMAP2_MCSPI_CHCONF_TRM_TX_ONLY	BIT(13)
70 #define OMAP2_MCSPI_CHCONF_TRM_MASK	(0x03 << 12)
71 #define OMAP2_MCSPI_CHCONF_DMAW		BIT(14)
72 #define OMAP2_MCSPI_CHCONF_DMAR		BIT(15)
73 #define OMAP2_MCSPI_CHCONF_DPE0		BIT(16)
74 #define OMAP2_MCSPI_CHCONF_DPE1		BIT(17)
75 #define OMAP2_MCSPI_CHCONF_IS		BIT(18)
76 #define OMAP2_MCSPI_CHCONF_TURBO	BIT(19)
77 #define OMAP2_MCSPI_CHCONF_FORCE	BIT(20)
78 #define OMAP2_MCSPI_CHCONF_FFET		BIT(27)
79 #define OMAP2_MCSPI_CHCONF_FFER		BIT(28)
80 #define OMAP2_MCSPI_CHCONF_CLKG		BIT(29)
81 
82 #define OMAP2_MCSPI_CHSTAT_RXS		BIT(0)
83 #define OMAP2_MCSPI_CHSTAT_TXS		BIT(1)
84 #define OMAP2_MCSPI_CHSTAT_EOT		BIT(2)
85 #define OMAP2_MCSPI_CHSTAT_TXFFE	BIT(3)
86 
87 #define OMAP2_MCSPI_CHCTRL_EN		BIT(0)
88 #define OMAP2_MCSPI_CHCTRL_EXTCLK_MASK	(0xff << 8)
89 
90 #define OMAP2_MCSPI_WAKEUPENABLE_WKEN	BIT(0)
91 
92 /* We have 2 DMA channels per CS, one for RX and one for TX */
93 struct omap2_mcspi_dma {
94 	struct dma_chan *dma_tx;
95 	struct dma_chan *dma_rx;
96 
97 	struct completion dma_tx_completion;
98 	struct completion dma_rx_completion;
99 
100 	char dma_rx_ch_name[14];
101 	char dma_tx_ch_name[14];
102 };
103 
104 /* use PIO for small transfers, avoiding DMA setup/teardown overhead and
105  * cache operations; better heuristics consider wordsize and bitrate.
106  */
107 #define DMA_MIN_BYTES			160
108 
109 
110 /*
111  * Used for context save and restore, structure members to be updated whenever
112  * corresponding registers are modified.
113  */
114 struct omap2_mcspi_regs {
115 	u32 modulctrl;
116 	u32 wakeupenable;
117 	struct list_head cs;
118 };
119 
120 struct omap2_mcspi {
121 	struct completion	txdone;
122 	struct spi_controller	*ctlr;
123 	/* Virtual base address of the controller */
124 	void __iomem		*base;
125 	unsigned long		phys;
126 	/* SPI1 has 4 channels, while SPI2 has 2 */
127 	struct omap2_mcspi_dma	*dma_channels;
128 	struct device		*dev;
129 	struct omap2_mcspi_regs ctx;
130 	struct clk		*ref_clk;
131 	int			fifo_depth;
132 	bool			target_aborted;
133 	unsigned int		pin_dir:1;
134 	size_t			max_xfer_len;
135 	u32			ref_clk_hz;
136 	bool			use_multi_mode;
137 	bool			last_msg_kept_cs;
138 };
139 
140 struct omap2_mcspi_cs {
141 	void __iomem		*base;
142 	unsigned long		phys;
143 	int			word_len;
144 	u16			mode;
145 	struct list_head	node;
146 	/* Context save and restore shadow register */
147 	u32			chconf0, chctrl0;
148 };
149 
mcspi_write_reg(struct spi_controller * ctlr,int idx,u32 val)150 static inline void mcspi_write_reg(struct spi_controller *ctlr,
151 		int idx, u32 val)
152 {
153 	struct omap2_mcspi *mcspi = spi_controller_get_devdata(ctlr);
154 
155 	writel_relaxed(val, mcspi->base + idx);
156 }
157 
mcspi_read_reg(struct spi_controller * ctlr,int idx)158 static inline u32 mcspi_read_reg(struct spi_controller *ctlr, int idx)
159 {
160 	struct omap2_mcspi *mcspi = spi_controller_get_devdata(ctlr);
161 
162 	return readl_relaxed(mcspi->base + idx);
163 }
164 
mcspi_write_cs_reg(const struct spi_device * spi,int idx,u32 val)165 static inline void mcspi_write_cs_reg(const struct spi_device *spi,
166 		int idx, u32 val)
167 {
168 	struct omap2_mcspi_cs	*cs = spi->controller_state;
169 
170 	writel_relaxed(val, cs->base +  idx);
171 }
172 
mcspi_read_cs_reg(const struct spi_device * spi,int idx)173 static inline u32 mcspi_read_cs_reg(const struct spi_device *spi, int idx)
174 {
175 	struct omap2_mcspi_cs	*cs = spi->controller_state;
176 
177 	return readl_relaxed(cs->base + idx);
178 }
179 
mcspi_cached_chconf0(const struct spi_device * spi)180 static inline u32 mcspi_cached_chconf0(const struct spi_device *spi)
181 {
182 	struct omap2_mcspi_cs *cs = spi->controller_state;
183 
184 	return cs->chconf0;
185 }
186 
mcspi_write_chconf0(const struct spi_device * spi,u32 val)187 static inline void mcspi_write_chconf0(const struct spi_device *spi, u32 val)
188 {
189 	struct omap2_mcspi_cs *cs = spi->controller_state;
190 
191 	cs->chconf0 = val;
192 	mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCONF0, val);
193 	mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCONF0);
194 }
195 
mcspi_bytes_per_word(int word_len)196 static inline int mcspi_bytes_per_word(int word_len)
197 {
198 	if (word_len <= 8)
199 		return 1;
200 	else if (word_len <= 16)
201 		return 2;
202 	else /* word_len <= 32 */
203 		return 4;
204 }
205 
omap2_mcspi_set_dma_req(const struct spi_device * spi,int is_read,int enable)206 static void omap2_mcspi_set_dma_req(const struct spi_device *spi,
207 		int is_read, int enable)
208 {
209 	u32 l, rw;
210 
211 	l = mcspi_cached_chconf0(spi);
212 
213 	if (is_read) /* 1 is read, 0 write */
214 		rw = OMAP2_MCSPI_CHCONF_DMAR;
215 	else
216 		rw = OMAP2_MCSPI_CHCONF_DMAW;
217 
218 	if (enable)
219 		l |= rw;
220 	else
221 		l &= ~rw;
222 
223 	mcspi_write_chconf0(spi, l);
224 }
225 
omap2_mcspi_set_enable(const struct spi_device * spi,int enable)226 static void omap2_mcspi_set_enable(const struct spi_device *spi, int enable)
227 {
228 	struct omap2_mcspi_cs *cs = spi->controller_state;
229 	u32 l;
230 
231 	l = cs->chctrl0;
232 	if (enable)
233 		l |= OMAP2_MCSPI_CHCTRL_EN;
234 	else
235 		l &= ~OMAP2_MCSPI_CHCTRL_EN;
236 	cs->chctrl0 = l;
237 	mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCTRL0, cs->chctrl0);
238 	/* Flash post-writes */
239 	mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCTRL0);
240 }
241 
omap2_mcspi_set_cs(struct spi_device * spi,bool enable)242 static void omap2_mcspi_set_cs(struct spi_device *spi, bool enable)
243 {
244 	struct omap2_mcspi *mcspi = spi_controller_get_devdata(spi->controller);
245 	u32 l;
246 
247 	/* The controller handles the inverted chip selects
248 	 * using the OMAP2_MCSPI_CHCONF_EPOL bit so revert
249 	 * the inversion from the core spi_set_cs function.
250 	 */
251 	if (spi->mode & SPI_CS_HIGH)
252 		enable = !enable;
253 
254 	if (spi->controller_state) {
255 		int err = pm_runtime_resume_and_get(mcspi->dev);
256 		if (err < 0) {
257 			dev_err(mcspi->dev, "failed to get sync: %d\n", err);
258 			return;
259 		}
260 
261 		l = mcspi_cached_chconf0(spi);
262 
263 		/* Only enable chip select manually if single mode is used */
264 		if (mcspi->use_multi_mode) {
265 			l &= ~OMAP2_MCSPI_CHCONF_FORCE;
266 		} else {
267 			if (enable)
268 				l &= ~OMAP2_MCSPI_CHCONF_FORCE;
269 			else
270 				l |= OMAP2_MCSPI_CHCONF_FORCE;
271 		}
272 
273 		mcspi_write_chconf0(spi, l);
274 
275 		pm_runtime_put_autosuspend(mcspi->dev);
276 	}
277 }
278 
omap2_mcspi_set_mode(struct spi_controller * ctlr)279 static void omap2_mcspi_set_mode(struct spi_controller *ctlr)
280 {
281 	struct omap2_mcspi	*mcspi = spi_controller_get_devdata(ctlr);
282 	struct omap2_mcspi_regs	*ctx = &mcspi->ctx;
283 	u32 l;
284 
285 	/*
286 	 * Choose host or target mode
287 	 */
288 	l = mcspi_read_reg(ctlr, OMAP2_MCSPI_MODULCTRL);
289 	l &= ~(OMAP2_MCSPI_MODULCTRL_STEST);
290 	if (spi_controller_is_target(ctlr)) {
291 		l |= (OMAP2_MCSPI_MODULCTRL_MS);
292 	} else {
293 		l &= ~(OMAP2_MCSPI_MODULCTRL_MS);
294 
295 		/* Enable single mode if needed */
296 		if (mcspi->use_multi_mode)
297 			l &= ~OMAP2_MCSPI_MODULCTRL_SINGLE;
298 		else
299 			l |= OMAP2_MCSPI_MODULCTRL_SINGLE;
300 	}
301 	mcspi_write_reg(ctlr, OMAP2_MCSPI_MODULCTRL, l);
302 
303 	ctx->modulctrl = l;
304 }
305 
omap2_mcspi_set_fifo(const struct spi_device * spi,struct spi_transfer * t,int enable)306 static void omap2_mcspi_set_fifo(const struct spi_device *spi,
307 				struct spi_transfer *t, int enable)
308 {
309 	struct spi_controller *ctlr = spi->controller;
310 	struct omap2_mcspi_cs *cs = spi->controller_state;
311 	struct omap2_mcspi *mcspi;
312 	unsigned int wcnt;
313 	int max_fifo_depth, bytes_per_word;
314 	u32 chconf, xferlevel;
315 
316 	mcspi = spi_controller_get_devdata(ctlr);
317 
318 	chconf = mcspi_cached_chconf0(spi);
319 	if (enable) {
320 		bytes_per_word = mcspi_bytes_per_word(cs->word_len);
321 		if (t->len % bytes_per_word != 0)
322 			goto disable_fifo;
323 
324 		if (t->rx_buf != NULL && t->tx_buf != NULL)
325 			max_fifo_depth = OMAP2_MCSPI_MAX_FIFODEPTH / 2;
326 		else
327 			max_fifo_depth = OMAP2_MCSPI_MAX_FIFODEPTH;
328 
329 		wcnt = t->len / bytes_per_word;
330 		if (wcnt > OMAP2_MCSPI_MAX_FIFOWCNT)
331 			goto disable_fifo;
332 
333 		xferlevel = wcnt << 16;
334 		if (t->rx_buf != NULL) {
335 			chconf |= OMAP2_MCSPI_CHCONF_FFER;
336 			xferlevel |= (bytes_per_word - 1) << 8;
337 		}
338 
339 		if (t->tx_buf != NULL) {
340 			chconf |= OMAP2_MCSPI_CHCONF_FFET;
341 			xferlevel |= bytes_per_word - 1;
342 		}
343 
344 		mcspi_write_reg(ctlr, OMAP2_MCSPI_XFERLEVEL, xferlevel);
345 		mcspi_write_chconf0(spi, chconf);
346 		mcspi->fifo_depth = max_fifo_depth;
347 
348 		return;
349 	}
350 
351 disable_fifo:
352 	if (t->rx_buf != NULL)
353 		chconf &= ~OMAP2_MCSPI_CHCONF_FFER;
354 
355 	if (t->tx_buf != NULL)
356 		chconf &= ~OMAP2_MCSPI_CHCONF_FFET;
357 
358 	mcspi_write_chconf0(spi, chconf);
359 	mcspi->fifo_depth = 0;
360 }
361 
mcspi_wait_for_reg_bit(void __iomem * reg,unsigned long bit)362 static int mcspi_wait_for_reg_bit(void __iomem *reg, unsigned long bit)
363 {
364 	unsigned long timeout;
365 
366 	timeout = jiffies + msecs_to_jiffies(1000);
367 	while (!(readl_relaxed(reg) & bit)) {
368 		if (time_after(jiffies, timeout)) {
369 			if (!(readl_relaxed(reg) & bit))
370 				return -ETIMEDOUT;
371 			else
372 				return 0;
373 		}
374 		cpu_relax();
375 	}
376 	return 0;
377 }
378 
mcspi_wait_for_completion(struct omap2_mcspi * mcspi,struct completion * x)379 static int mcspi_wait_for_completion(struct  omap2_mcspi *mcspi,
380 				     struct completion *x)
381 {
382 	if (spi_controller_is_target(mcspi->ctlr)) {
383 		if (wait_for_completion_interruptible(x) ||
384 		    mcspi->target_aborted)
385 			return -EINTR;
386 	} else {
387 		wait_for_completion(x);
388 	}
389 
390 	return 0;
391 }
392 
omap2_mcspi_rx_callback(void * data)393 static void omap2_mcspi_rx_callback(void *data)
394 {
395 	struct spi_device *spi = data;
396 	struct omap2_mcspi *mcspi = spi_controller_get_devdata(spi->controller);
397 	struct omap2_mcspi_dma *mcspi_dma = &mcspi->dma_channels[spi_get_chipselect(spi, 0)];
398 
399 	/* We must disable the DMA RX request */
400 	omap2_mcspi_set_dma_req(spi, 1, 0);
401 
402 	complete(&mcspi_dma->dma_rx_completion);
403 }
404 
omap2_mcspi_tx_callback(void * data)405 static void omap2_mcspi_tx_callback(void *data)
406 {
407 	struct spi_device *spi = data;
408 	struct omap2_mcspi *mcspi = spi_controller_get_devdata(spi->controller);
409 	struct omap2_mcspi_dma *mcspi_dma = &mcspi->dma_channels[spi_get_chipselect(spi, 0)];
410 
411 	/* We must disable the DMA TX request */
412 	omap2_mcspi_set_dma_req(spi, 0, 0);
413 
414 	complete(&mcspi_dma->dma_tx_completion);
415 }
416 
omap2_mcspi_tx_dma(struct spi_device * spi,struct spi_transfer * xfer,struct dma_slave_config cfg)417 static void omap2_mcspi_tx_dma(struct spi_device *spi,
418 				struct spi_transfer *xfer,
419 				struct dma_slave_config cfg)
420 {
421 	struct omap2_mcspi	*mcspi;
422 	struct omap2_mcspi_dma  *mcspi_dma;
423 	struct dma_async_tx_descriptor *tx;
424 
425 	mcspi = spi_controller_get_devdata(spi->controller);
426 	mcspi_dma = &mcspi->dma_channels[spi_get_chipselect(spi, 0)];
427 
428 	dmaengine_slave_config(mcspi_dma->dma_tx, &cfg);
429 
430 	tx = dmaengine_prep_slave_sg(mcspi_dma->dma_tx, xfer->tx_sg.sgl,
431 				     xfer->tx_sg.nents,
432 				     DMA_MEM_TO_DEV,
433 				     DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
434 	if (tx) {
435 		tx->callback = omap2_mcspi_tx_callback;
436 		tx->callback_param = spi;
437 		dmaengine_submit(tx);
438 	} else {
439 		/* FIXME: fall back to PIO? */
440 	}
441 	dma_async_issue_pending(mcspi_dma->dma_tx);
442 	omap2_mcspi_set_dma_req(spi, 0, 1);
443 }
444 
445 static unsigned
omap2_mcspi_rx_dma(struct spi_device * spi,struct spi_transfer * xfer,struct dma_slave_config cfg,unsigned es)446 omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer,
447 				struct dma_slave_config cfg,
448 				unsigned es)
449 {
450 	struct omap2_mcspi	*mcspi;
451 	struct omap2_mcspi_dma  *mcspi_dma;
452 	unsigned int		count, transfer_reduction = 0;
453 	struct scatterlist	*sg_out[2];
454 	int			nb_sizes = 0, out_mapped_nents[2], ret, x;
455 	size_t			sizes[2];
456 	u32			l;
457 	int			elements = 0;
458 	int			word_len, element_count;
459 	struct omap2_mcspi_cs	*cs = spi->controller_state;
460 	void __iomem		*chstat_reg = cs->base + OMAP2_MCSPI_CHSTAT0;
461 	struct dma_async_tx_descriptor *tx;
462 
463 	mcspi = spi_controller_get_devdata(spi->controller);
464 	mcspi_dma = &mcspi->dma_channels[spi_get_chipselect(spi, 0)];
465 	count = xfer->len;
466 
467 	/*
468 	 *  In the "End-of-Transfer Procedure" section for DMA RX in OMAP35x TRM
469 	 *  it mentions reducing DMA transfer length by one element in host
470 	 *  normal mode.
471 	 */
472 	if (mcspi->fifo_depth == 0)
473 		transfer_reduction = es;
474 
475 	word_len = cs->word_len;
476 	l = mcspi_cached_chconf0(spi);
477 
478 	if (word_len <= 8)
479 		element_count = count;
480 	else if (word_len <= 16)
481 		element_count = count >> 1;
482 	else /* word_len <= 32 */
483 		element_count = count >> 2;
484 
485 
486 	dmaengine_slave_config(mcspi_dma->dma_rx, &cfg);
487 
488 	/*
489 	 *  Reduce DMA transfer length by one more if McSPI is
490 	 *  configured in turbo mode.
491 	 */
492 	if ((l & OMAP2_MCSPI_CHCONF_TURBO) && mcspi->fifo_depth == 0)
493 		transfer_reduction += es;
494 
495 	if (transfer_reduction) {
496 		/* Split sgl into two. The second sgl won't be used. */
497 		sizes[0] = count - transfer_reduction;
498 		sizes[1] = transfer_reduction;
499 		nb_sizes = 2;
500 	} else {
501 		/*
502 		 * Don't bother splitting the sgl. This essentially
503 		 * clones the original sgl.
504 		 */
505 		sizes[0] = count;
506 		nb_sizes = 1;
507 	}
508 
509 	ret = sg_split(xfer->rx_sg.sgl, xfer->rx_sg.nents, 0, nb_sizes,
510 		       sizes, sg_out, out_mapped_nents, GFP_KERNEL);
511 
512 	if (ret < 0) {
513 		dev_err(&spi->dev, "sg_split failed\n");
514 		return 0;
515 	}
516 
517 	tx = dmaengine_prep_slave_sg(mcspi_dma->dma_rx, sg_out[0],
518 				     out_mapped_nents[0], DMA_DEV_TO_MEM,
519 				     DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
520 	if (tx) {
521 		tx->callback = omap2_mcspi_rx_callback;
522 		tx->callback_param = spi;
523 		dmaengine_submit(tx);
524 	} else {
525 		/* FIXME: fall back to PIO? */
526 	}
527 
528 	dma_async_issue_pending(mcspi_dma->dma_rx);
529 	omap2_mcspi_set_dma_req(spi, 1, 1);
530 
531 	ret = mcspi_wait_for_completion(mcspi, &mcspi_dma->dma_rx_completion);
532 	if (ret || mcspi->target_aborted) {
533 		dmaengine_terminate_sync(mcspi_dma->dma_rx);
534 		omap2_mcspi_set_dma_req(spi, 1, 0);
535 		return 0;
536 	}
537 
538 	for (x = 0; x < nb_sizes; x++)
539 		kfree(sg_out[x]);
540 
541 	if (mcspi->fifo_depth > 0)
542 		return count;
543 
544 	/*
545 	 *  Due to the DMA transfer length reduction the missing bytes must
546 	 *  be read manually to receive all of the expected data.
547 	 */
548 	omap2_mcspi_set_enable(spi, 0);
549 
550 	elements = element_count - 1;
551 
552 	if (l & OMAP2_MCSPI_CHCONF_TURBO) {
553 		elements--;
554 
555 		if (!mcspi_wait_for_reg_bit(chstat_reg,
556 					    OMAP2_MCSPI_CHSTAT_RXS)) {
557 			u32 w;
558 
559 			w = mcspi_read_cs_reg(spi, OMAP2_MCSPI_RX0);
560 			if (word_len <= 8)
561 				((u8 *)xfer->rx_buf)[elements++] = w;
562 			else if (word_len <= 16)
563 				((u16 *)xfer->rx_buf)[elements++] = w;
564 			else /* word_len <= 32 */
565 				((u32 *)xfer->rx_buf)[elements++] = w;
566 		} else {
567 			int bytes_per_word = mcspi_bytes_per_word(word_len);
568 			dev_err(&spi->dev, "DMA RX penultimate word empty\n");
569 			count -= (bytes_per_word << 1);
570 			omap2_mcspi_set_enable(spi, 1);
571 			return count;
572 		}
573 	}
574 	if (!mcspi_wait_for_reg_bit(chstat_reg, OMAP2_MCSPI_CHSTAT_RXS)) {
575 		u32 w;
576 
577 		w = mcspi_read_cs_reg(spi, OMAP2_MCSPI_RX0);
578 		if (word_len <= 8)
579 			((u8 *)xfer->rx_buf)[elements] = w;
580 		else if (word_len <= 16)
581 			((u16 *)xfer->rx_buf)[elements] = w;
582 		else /* word_len <= 32 */
583 			((u32 *)xfer->rx_buf)[elements] = w;
584 	} else {
585 		dev_err(&spi->dev, "DMA RX last word empty\n");
586 		count -= mcspi_bytes_per_word(word_len);
587 	}
588 	omap2_mcspi_set_enable(spi, 1);
589 	return count;
590 }
591 
592 static unsigned
omap2_mcspi_txrx_dma(struct spi_device * spi,struct spi_transfer * xfer)593 omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
594 {
595 	struct omap2_mcspi	*mcspi;
596 	struct omap2_mcspi_cs	*cs = spi->controller_state;
597 	struct omap2_mcspi_dma  *mcspi_dma;
598 	unsigned int		count;
599 	u8			*rx;
600 	const u8		*tx;
601 	struct dma_slave_config	cfg;
602 	enum dma_slave_buswidth width;
603 	unsigned es;
604 	void __iomem		*chstat_reg;
605 	void __iomem            *irqstat_reg;
606 	int			wait_res;
607 
608 	mcspi = spi_controller_get_devdata(spi->controller);
609 	mcspi_dma = &mcspi->dma_channels[spi_get_chipselect(spi, 0)];
610 
611 	if (cs->word_len <= 8) {
612 		width = DMA_SLAVE_BUSWIDTH_1_BYTE;
613 		es = 1;
614 	} else if (cs->word_len <= 16) {
615 		width = DMA_SLAVE_BUSWIDTH_2_BYTES;
616 		es = 2;
617 	} else {
618 		width = DMA_SLAVE_BUSWIDTH_4_BYTES;
619 		es = 4;
620 	}
621 
622 	count = xfer->len;
623 
624 	memset(&cfg, 0, sizeof(cfg));
625 	cfg.src_addr = cs->phys + OMAP2_MCSPI_RX0;
626 	cfg.dst_addr = cs->phys + OMAP2_MCSPI_TX0;
627 	cfg.src_addr_width = width;
628 	cfg.dst_addr_width = width;
629 	cfg.src_maxburst = 1;
630 	cfg.dst_maxburst = 1;
631 
632 	rx = xfer->rx_buf;
633 	tx = xfer->tx_buf;
634 
635 	mcspi->target_aborted = false;
636 	reinit_completion(&mcspi_dma->dma_tx_completion);
637 	reinit_completion(&mcspi_dma->dma_rx_completion);
638 	reinit_completion(&mcspi->txdone);
639 	if (tx) {
640 		/* Enable EOW IRQ to know end of tx in target mode */
641 		if (spi_controller_is_target(spi->controller))
642 			mcspi_write_reg(spi->controller,
643 					OMAP2_MCSPI_IRQENABLE,
644 					OMAP2_MCSPI_IRQSTATUS_EOW);
645 		omap2_mcspi_tx_dma(spi, xfer, cfg);
646 	}
647 
648 	if (rx != NULL)
649 		count = omap2_mcspi_rx_dma(spi, xfer, cfg, es);
650 
651 	if (tx != NULL) {
652 		int ret;
653 
654 		ret = mcspi_wait_for_completion(mcspi, &mcspi_dma->dma_tx_completion);
655 		if (ret || mcspi->target_aborted) {
656 			dmaengine_terminate_sync(mcspi_dma->dma_tx);
657 			omap2_mcspi_set_dma_req(spi, 0, 0);
658 			return 0;
659 		}
660 
661 		if (spi_controller_is_target(mcspi->ctlr)) {
662 			ret = mcspi_wait_for_completion(mcspi, &mcspi->txdone);
663 			if (ret || mcspi->target_aborted)
664 				return 0;
665 		}
666 
667 		if (mcspi->fifo_depth > 0) {
668 			irqstat_reg = mcspi->base + OMAP2_MCSPI_IRQSTATUS;
669 
670 			if (mcspi_wait_for_reg_bit(irqstat_reg,
671 						OMAP2_MCSPI_IRQSTATUS_EOW) < 0)
672 				dev_err(&spi->dev, "EOW timed out\n");
673 
674 			mcspi_write_reg(mcspi->ctlr, OMAP2_MCSPI_IRQSTATUS,
675 					OMAP2_MCSPI_IRQSTATUS_EOW);
676 		}
677 
678 		/* for TX_ONLY mode, be sure all words have shifted out */
679 		if (rx == NULL) {
680 			chstat_reg = cs->base + OMAP2_MCSPI_CHSTAT0;
681 			if (mcspi->fifo_depth > 0) {
682 				wait_res = mcspi_wait_for_reg_bit(chstat_reg,
683 						OMAP2_MCSPI_CHSTAT_TXFFE);
684 				if (wait_res < 0)
685 					dev_err(&spi->dev, "TXFFE timed out\n");
686 			} else {
687 				wait_res = mcspi_wait_for_reg_bit(chstat_reg,
688 						OMAP2_MCSPI_CHSTAT_TXS);
689 				if (wait_res < 0)
690 					dev_err(&spi->dev, "TXS timed out\n");
691 			}
692 			if (wait_res >= 0 &&
693 				(mcspi_wait_for_reg_bit(chstat_reg,
694 					OMAP2_MCSPI_CHSTAT_EOT) < 0))
695 				dev_err(&spi->dev, "EOT timed out\n");
696 		}
697 	}
698 	return count;
699 }
700 
701 static unsigned
omap2_mcspi_txrx_pio(struct spi_device * spi,struct spi_transfer * xfer)702 omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
703 {
704 	struct omap2_mcspi_cs	*cs = spi->controller_state;
705 	unsigned int		count, c;
706 	u32			l;
707 	void __iomem		*base = cs->base;
708 	void __iomem		*tx_reg;
709 	void __iomem		*rx_reg;
710 	void __iomem		*chstat_reg;
711 	int			word_len;
712 
713 	count = xfer->len;
714 	c = count;
715 	word_len = cs->word_len;
716 
717 	l = mcspi_cached_chconf0(spi);
718 
719 	/* We store the pre-calculated register addresses on stack to speed
720 	 * up the transfer loop. */
721 	tx_reg		= base + OMAP2_MCSPI_TX0;
722 	rx_reg		= base + OMAP2_MCSPI_RX0;
723 	chstat_reg	= base + OMAP2_MCSPI_CHSTAT0;
724 
725 	if (c < (word_len>>3))
726 		return 0;
727 
728 	if (word_len <= 8) {
729 		u8		*rx;
730 		const u8	*tx;
731 
732 		rx = xfer->rx_buf;
733 		tx = xfer->tx_buf;
734 
735 		do {
736 			c -= 1;
737 			if (tx != NULL) {
738 				if (mcspi_wait_for_reg_bit(chstat_reg,
739 						OMAP2_MCSPI_CHSTAT_TXS) < 0) {
740 					dev_err(&spi->dev, "TXS timed out\n");
741 					goto out;
742 				}
743 				dev_vdbg(&spi->dev, "write-%d %02x\n",
744 						word_len, *tx);
745 				writel_relaxed(*tx++, tx_reg);
746 			}
747 			if (rx != NULL) {
748 				if (mcspi_wait_for_reg_bit(chstat_reg,
749 						OMAP2_MCSPI_CHSTAT_RXS) < 0) {
750 					dev_err(&spi->dev, "RXS timed out\n");
751 					goto out;
752 				}
753 
754 				if (c == 1 && tx == NULL &&
755 				    (l & OMAP2_MCSPI_CHCONF_TURBO)) {
756 					omap2_mcspi_set_enable(spi, 0);
757 					*rx++ = readl_relaxed(rx_reg);
758 					dev_vdbg(&spi->dev, "read-%d %02x\n",
759 						    word_len, *(rx - 1));
760 					if (mcspi_wait_for_reg_bit(chstat_reg,
761 						OMAP2_MCSPI_CHSTAT_RXS) < 0) {
762 						dev_err(&spi->dev,
763 							"RXS timed out\n");
764 						goto out;
765 					}
766 					c = 0;
767 				} else if (c == 0 && tx == NULL) {
768 					omap2_mcspi_set_enable(spi, 0);
769 				}
770 
771 				*rx++ = readl_relaxed(rx_reg);
772 				dev_vdbg(&spi->dev, "read-%d %02x\n",
773 						word_len, *(rx - 1));
774 			}
775 			/* Add word delay between each word */
776 			spi_delay_exec(&xfer->word_delay, xfer);
777 		} while (c);
778 	} else if (word_len <= 16) {
779 		u16		*rx;
780 		const u16	*tx;
781 
782 		rx = xfer->rx_buf;
783 		tx = xfer->tx_buf;
784 		do {
785 			c -= 2;
786 			if (tx != NULL) {
787 				if (mcspi_wait_for_reg_bit(chstat_reg,
788 						OMAP2_MCSPI_CHSTAT_TXS) < 0) {
789 					dev_err(&spi->dev, "TXS timed out\n");
790 					goto out;
791 				}
792 				dev_vdbg(&spi->dev, "write-%d %04x\n",
793 						word_len, *tx);
794 				writel_relaxed(*tx++, tx_reg);
795 			}
796 			if (rx != NULL) {
797 				if (mcspi_wait_for_reg_bit(chstat_reg,
798 						OMAP2_MCSPI_CHSTAT_RXS) < 0) {
799 					dev_err(&spi->dev, "RXS timed out\n");
800 					goto out;
801 				}
802 
803 				if (c == 2 && tx == NULL &&
804 				    (l & OMAP2_MCSPI_CHCONF_TURBO)) {
805 					omap2_mcspi_set_enable(spi, 0);
806 					*rx++ = readl_relaxed(rx_reg);
807 					dev_vdbg(&spi->dev, "read-%d %04x\n",
808 						    word_len, *(rx - 1));
809 					if (mcspi_wait_for_reg_bit(chstat_reg,
810 						OMAP2_MCSPI_CHSTAT_RXS) < 0) {
811 						dev_err(&spi->dev,
812 							"RXS timed out\n");
813 						goto out;
814 					}
815 					c = 0;
816 				} else if (c == 0 && tx == NULL) {
817 					omap2_mcspi_set_enable(spi, 0);
818 				}
819 
820 				*rx++ = readl_relaxed(rx_reg);
821 				dev_vdbg(&spi->dev, "read-%d %04x\n",
822 						word_len, *(rx - 1));
823 			}
824 			/* Add word delay between each word */
825 			spi_delay_exec(&xfer->word_delay, xfer);
826 		} while (c >= 2);
827 	} else if (word_len <= 32) {
828 		u32		*rx;
829 		const u32	*tx;
830 
831 		rx = xfer->rx_buf;
832 		tx = xfer->tx_buf;
833 		do {
834 			c -= 4;
835 			if (tx != NULL) {
836 				if (mcspi_wait_for_reg_bit(chstat_reg,
837 						OMAP2_MCSPI_CHSTAT_TXS) < 0) {
838 					dev_err(&spi->dev, "TXS timed out\n");
839 					goto out;
840 				}
841 				dev_vdbg(&spi->dev, "write-%d %08x\n",
842 						word_len, *tx);
843 				writel_relaxed(*tx++, tx_reg);
844 			}
845 			if (rx != NULL) {
846 				if (mcspi_wait_for_reg_bit(chstat_reg,
847 						OMAP2_MCSPI_CHSTAT_RXS) < 0) {
848 					dev_err(&spi->dev, "RXS timed out\n");
849 					goto out;
850 				}
851 
852 				if (c == 4 && tx == NULL &&
853 				    (l & OMAP2_MCSPI_CHCONF_TURBO)) {
854 					omap2_mcspi_set_enable(spi, 0);
855 					*rx++ = readl_relaxed(rx_reg);
856 					dev_vdbg(&spi->dev, "read-%d %08x\n",
857 						    word_len, *(rx - 1));
858 					if (mcspi_wait_for_reg_bit(chstat_reg,
859 						OMAP2_MCSPI_CHSTAT_RXS) < 0) {
860 						dev_err(&spi->dev,
861 							"RXS timed out\n");
862 						goto out;
863 					}
864 					c = 0;
865 				} else if (c == 0 && tx == NULL) {
866 					omap2_mcspi_set_enable(spi, 0);
867 				}
868 
869 				*rx++ = readl_relaxed(rx_reg);
870 				dev_vdbg(&spi->dev, "read-%d %08x\n",
871 						word_len, *(rx - 1));
872 			}
873 			/* Add word delay between each word */
874 			spi_delay_exec(&xfer->word_delay, xfer);
875 		} while (c >= 4);
876 	}
877 
878 	/* for TX_ONLY mode, be sure all words have shifted out */
879 	if (xfer->rx_buf == NULL) {
880 		if (mcspi_wait_for_reg_bit(chstat_reg,
881 				OMAP2_MCSPI_CHSTAT_TXS) < 0) {
882 			dev_err(&spi->dev, "TXS timed out\n");
883 		} else if (mcspi_wait_for_reg_bit(chstat_reg,
884 				OMAP2_MCSPI_CHSTAT_EOT) < 0)
885 			dev_err(&spi->dev, "EOT timed out\n");
886 
887 		/* disable chan to purge rx datas received in TX_ONLY transfer,
888 		 * otherwise these rx datas will affect the direct following
889 		 * RX_ONLY transfer.
890 		 */
891 		omap2_mcspi_set_enable(spi, 0);
892 	}
893 out:
894 	omap2_mcspi_set_enable(spi, 1);
895 	return count - c;
896 }
897 
omap2_mcspi_calc_divisor(u32 speed_hz,u32 ref_clk_hz)898 static u32 omap2_mcspi_calc_divisor(u32 speed_hz, u32 ref_clk_hz)
899 {
900 	u32 div;
901 
902 	for (div = 0; div < 15; div++)
903 		if (speed_hz >= (ref_clk_hz >> div))
904 			return div;
905 
906 	return 15;
907 }
908 
909 /* called only when no transfer is active to this device */
omap2_mcspi_setup_transfer(struct spi_device * spi,struct spi_transfer * t)910 static int omap2_mcspi_setup_transfer(struct spi_device *spi,
911 		struct spi_transfer *t)
912 {
913 	struct omap2_mcspi_cs *cs = spi->controller_state;
914 	struct omap2_mcspi *mcspi;
915 	u32 ref_clk_hz, l = 0, clkd = 0, div, extclk = 0, clkg = 0;
916 	u8 word_len = spi->bits_per_word;
917 	u32 speed_hz = spi->max_speed_hz;
918 
919 	mcspi = spi_controller_get_devdata(spi->controller);
920 
921 	if (t != NULL && t->bits_per_word)
922 		word_len = t->bits_per_word;
923 
924 	cs->word_len = word_len;
925 
926 	if (t && t->speed_hz)
927 		speed_hz = t->speed_hz;
928 
929 	ref_clk_hz = mcspi->ref_clk_hz;
930 	speed_hz = min_t(u32, speed_hz, ref_clk_hz);
931 	if (speed_hz < (ref_clk_hz / OMAP2_MCSPI_MAX_DIVIDER)) {
932 		clkd = omap2_mcspi_calc_divisor(speed_hz, ref_clk_hz);
933 		speed_hz = ref_clk_hz >> clkd;
934 		clkg = 0;
935 	} else {
936 		div = (ref_clk_hz + speed_hz - 1) / speed_hz;
937 		speed_hz = ref_clk_hz / div;
938 		clkd = (div - 1) & 0xf;
939 		extclk = (div - 1) >> 4;
940 		clkg = OMAP2_MCSPI_CHCONF_CLKG;
941 	}
942 
943 	l = mcspi_cached_chconf0(spi);
944 
945 	/* standard 4-wire host mode:  SCK, MOSI/out, MISO/in, nCS
946 	 * REVISIT: this controller could support SPI_3WIRE mode.
947 	 */
948 	if (mcspi->pin_dir == MCSPI_PINDIR_D0_IN_D1_OUT) {
949 		l &= ~OMAP2_MCSPI_CHCONF_IS;
950 		l &= ~OMAP2_MCSPI_CHCONF_DPE1;
951 		l |= OMAP2_MCSPI_CHCONF_DPE0;
952 	} else {
953 		l |= OMAP2_MCSPI_CHCONF_IS;
954 		l |= OMAP2_MCSPI_CHCONF_DPE1;
955 		l &= ~OMAP2_MCSPI_CHCONF_DPE0;
956 	}
957 
958 	/* wordlength */
959 	l &= ~OMAP2_MCSPI_CHCONF_WL_MASK;
960 	l |= (word_len - 1) << 7;
961 
962 	/* set chipselect polarity; manage with FORCE */
963 	if (!(spi->mode & SPI_CS_HIGH))
964 		l |= OMAP2_MCSPI_CHCONF_EPOL;	/* active-low; normal */
965 	else
966 		l &= ~OMAP2_MCSPI_CHCONF_EPOL;
967 
968 	/* set clock divisor */
969 	l &= ~OMAP2_MCSPI_CHCONF_CLKD_MASK;
970 	l |= clkd << 2;
971 
972 	/* set clock granularity */
973 	l &= ~OMAP2_MCSPI_CHCONF_CLKG;
974 	l |= clkg;
975 	if (clkg) {
976 		cs->chctrl0 &= ~OMAP2_MCSPI_CHCTRL_EXTCLK_MASK;
977 		cs->chctrl0 |= extclk << 8;
978 		mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCTRL0, cs->chctrl0);
979 	}
980 
981 	/* set SPI mode 0..3 */
982 	if (spi->mode & SPI_CPOL)
983 		l |= OMAP2_MCSPI_CHCONF_POL;
984 	else
985 		l &= ~OMAP2_MCSPI_CHCONF_POL;
986 	if (spi->mode & SPI_CPHA)
987 		l |= OMAP2_MCSPI_CHCONF_PHA;
988 	else
989 		l &= ~OMAP2_MCSPI_CHCONF_PHA;
990 
991 	mcspi_write_chconf0(spi, l | OMAP2_MCSPI_CHCONF_FORCE);
992 	mcspi_write_chconf0(spi, l);
993 
994 	cs->mode = spi->mode;
995 
996 	dev_dbg(&spi->dev, "setup: speed %d, sample %s edge, clk %s\n",
997 			speed_hz,
998 			(spi->mode & SPI_CPHA) ? "trailing" : "leading",
999 			(spi->mode & SPI_CPOL) ? "inverted" : "normal");
1000 
1001 	return 0;
1002 }
1003 
1004 /*
1005  * Note that we currently allow DMA only if we get a channel
1006  * for both rx and tx. Otherwise we'll do PIO for both rx and tx.
1007  */
omap2_mcspi_request_dma(struct omap2_mcspi * mcspi,struct omap2_mcspi_dma * mcspi_dma)1008 static int omap2_mcspi_request_dma(struct omap2_mcspi *mcspi,
1009 				   struct omap2_mcspi_dma *mcspi_dma)
1010 {
1011 	int ret = 0;
1012 
1013 	mcspi_dma->dma_rx = dma_request_chan(mcspi->dev,
1014 					     mcspi_dma->dma_rx_ch_name);
1015 	if (IS_ERR(mcspi_dma->dma_rx)) {
1016 		ret = PTR_ERR(mcspi_dma->dma_rx);
1017 		mcspi_dma->dma_rx = NULL;
1018 		goto no_dma;
1019 	}
1020 
1021 	mcspi_dma->dma_tx = dma_request_chan(mcspi->dev,
1022 					     mcspi_dma->dma_tx_ch_name);
1023 	if (IS_ERR(mcspi_dma->dma_tx)) {
1024 		ret = PTR_ERR(mcspi_dma->dma_tx);
1025 		mcspi_dma->dma_tx = NULL;
1026 		dma_release_channel(mcspi_dma->dma_rx);
1027 		mcspi_dma->dma_rx = NULL;
1028 	}
1029 
1030 	init_completion(&mcspi_dma->dma_rx_completion);
1031 	init_completion(&mcspi_dma->dma_tx_completion);
1032 
1033 no_dma:
1034 	return ret;
1035 }
1036 
omap2_mcspi_release_dma(struct spi_controller * ctlr)1037 static void omap2_mcspi_release_dma(struct spi_controller *ctlr)
1038 {
1039 	struct omap2_mcspi *mcspi = spi_controller_get_devdata(ctlr);
1040 	struct omap2_mcspi_dma	*mcspi_dma;
1041 	int i;
1042 
1043 	for (i = 0; i < ctlr->num_chipselect; i++) {
1044 		mcspi_dma = &mcspi->dma_channels[i];
1045 
1046 		if (mcspi_dma->dma_rx) {
1047 			dma_release_channel(mcspi_dma->dma_rx);
1048 			mcspi_dma->dma_rx = NULL;
1049 		}
1050 		if (mcspi_dma->dma_tx) {
1051 			dma_release_channel(mcspi_dma->dma_tx);
1052 			mcspi_dma->dma_tx = NULL;
1053 		}
1054 	}
1055 }
1056 
omap2_mcspi_cleanup(struct spi_device * spi)1057 static void omap2_mcspi_cleanup(struct spi_device *spi)
1058 {
1059 	struct omap2_mcspi_cs	*cs;
1060 
1061 	if (spi->controller_state) {
1062 		/* Unlink controller state from context save list */
1063 		cs = spi->controller_state;
1064 		list_del(&cs->node);
1065 
1066 		kfree(cs);
1067 	}
1068 }
1069 
omap2_mcspi_setup(struct spi_device * spi)1070 static int omap2_mcspi_setup(struct spi_device *spi)
1071 {
1072 	bool			initial_setup = false;
1073 	int			ret;
1074 	struct omap2_mcspi	*mcspi = spi_controller_get_devdata(spi->controller);
1075 	struct omap2_mcspi_regs	*ctx = &mcspi->ctx;
1076 	struct omap2_mcspi_cs	*cs = spi->controller_state;
1077 
1078 	if (!cs) {
1079 		cs = kzalloc(sizeof(*cs), GFP_KERNEL);
1080 		if (!cs)
1081 			return -ENOMEM;
1082 		cs->base = mcspi->base + spi_get_chipselect(spi, 0) * 0x14;
1083 		cs->phys = mcspi->phys + spi_get_chipselect(spi, 0) * 0x14;
1084 		cs->mode = 0;
1085 		cs->chconf0 = 0;
1086 		cs->chctrl0 = 0;
1087 		spi->controller_state = cs;
1088 		/* Link this to context save list */
1089 		list_add_tail(&cs->node, &ctx->cs);
1090 		initial_setup = true;
1091 	}
1092 
1093 	ret = pm_runtime_resume_and_get(mcspi->dev);
1094 	if (ret < 0) {
1095 		if (initial_setup)
1096 			omap2_mcspi_cleanup(spi);
1097 
1098 		return ret;
1099 	}
1100 
1101 	ret = omap2_mcspi_setup_transfer(spi, NULL);
1102 	if (ret && initial_setup)
1103 		omap2_mcspi_cleanup(spi);
1104 
1105 	pm_runtime_put_autosuspend(mcspi->dev);
1106 
1107 	return ret;
1108 }
1109 
omap2_mcspi_irq_handler(int irq,void * data)1110 static irqreturn_t omap2_mcspi_irq_handler(int irq, void *data)
1111 {
1112 	struct omap2_mcspi *mcspi = data;
1113 	u32 irqstat;
1114 
1115 	irqstat	= mcspi_read_reg(mcspi->ctlr, OMAP2_MCSPI_IRQSTATUS);
1116 	if (!irqstat)
1117 		return IRQ_NONE;
1118 
1119 	/* Disable IRQ and wakeup target xfer task */
1120 	mcspi_write_reg(mcspi->ctlr, OMAP2_MCSPI_IRQENABLE, 0);
1121 	if (irqstat & OMAP2_MCSPI_IRQSTATUS_EOW)
1122 		complete(&mcspi->txdone);
1123 
1124 	return IRQ_HANDLED;
1125 }
1126 
omap2_mcspi_target_abort(struct spi_controller * ctlr)1127 static int omap2_mcspi_target_abort(struct spi_controller *ctlr)
1128 {
1129 	struct omap2_mcspi *mcspi = spi_controller_get_devdata(ctlr);
1130 	struct omap2_mcspi_dma *mcspi_dma = mcspi->dma_channels;
1131 
1132 	mcspi->target_aborted = true;
1133 	complete(&mcspi_dma->dma_rx_completion);
1134 	complete(&mcspi_dma->dma_tx_completion);
1135 	complete(&mcspi->txdone);
1136 
1137 	return 0;
1138 }
1139 
omap2_mcspi_transfer_one(struct spi_controller * ctlr,struct spi_device * spi,struct spi_transfer * t)1140 static int omap2_mcspi_transfer_one(struct spi_controller *ctlr,
1141 				    struct spi_device *spi,
1142 				    struct spi_transfer *t)
1143 {
1144 
1145 	/* We only enable one channel at a time -- the one whose message is
1146 	 * -- although this controller would gladly
1147 	 * arbitrate among multiple channels.  This corresponds to "single
1148 	 * channel" host mode.  As a side effect, we need to manage the
1149 	 * chipselect with the FORCE bit ... CS != channel enable.
1150 	 */
1151 
1152 	struct omap2_mcspi		*mcspi;
1153 	struct omap2_mcspi_dma		*mcspi_dma;
1154 	struct omap2_mcspi_cs		*cs;
1155 	struct omap2_mcspi_device_config *cd;
1156 	int				par_override = 0;
1157 	int				status = 0;
1158 	u32				chconf;
1159 
1160 	mcspi = spi_controller_get_devdata(ctlr);
1161 	mcspi_dma = mcspi->dma_channels + spi_get_chipselect(spi, 0);
1162 	cs = spi->controller_state;
1163 	cd = spi->controller_data;
1164 
1165 	/*
1166 	 * The target driver could have changed spi->mode in which case
1167 	 * it will be different from cs->mode (the current hardware setup).
1168 	 * If so, set par_override (even though its not a parity issue) so
1169 	 * omap2_mcspi_setup_transfer will be called to configure the hardware
1170 	 * with the correct mode on the first iteration of the loop below.
1171 	 */
1172 	if (spi->mode != cs->mode)
1173 		par_override = 1;
1174 
1175 	omap2_mcspi_set_enable(spi, 0);
1176 
1177 	if (spi_get_csgpiod(spi, 0))
1178 		omap2_mcspi_set_cs(spi, spi->mode & SPI_CS_HIGH);
1179 
1180 	if (par_override ||
1181 	    (t->speed_hz != spi->max_speed_hz) ||
1182 	    (t->bits_per_word != spi->bits_per_word)) {
1183 		par_override = 1;
1184 		status = omap2_mcspi_setup_transfer(spi, t);
1185 		if (status < 0)
1186 			goto out;
1187 		if (t->speed_hz == spi->max_speed_hz &&
1188 		    t->bits_per_word == spi->bits_per_word)
1189 			par_override = 0;
1190 	}
1191 
1192 	chconf = mcspi_cached_chconf0(spi);
1193 	chconf &= ~OMAP2_MCSPI_CHCONF_TRM_MASK;
1194 	chconf &= ~OMAP2_MCSPI_CHCONF_TURBO;
1195 
1196 	if (t->tx_buf == NULL)
1197 		chconf |= OMAP2_MCSPI_CHCONF_TRM_RX_ONLY;
1198 	else if (t->rx_buf == NULL)
1199 		chconf |= OMAP2_MCSPI_CHCONF_TRM_TX_ONLY;
1200 
1201 	if (cd && cd->turbo_mode && t->tx_buf == NULL) {
1202 		/* Turbo mode is for more than one word */
1203 		if (t->len > ((cs->word_len + 7) >> 3))
1204 			chconf |= OMAP2_MCSPI_CHCONF_TURBO;
1205 	}
1206 
1207 	mcspi_write_chconf0(spi, chconf);
1208 
1209 	if (t->len) {
1210 		unsigned	count;
1211 
1212 		if ((mcspi_dma->dma_rx && mcspi_dma->dma_tx) &&
1213 		    spi_xfer_is_dma_mapped(ctlr, spi, t))
1214 			omap2_mcspi_set_fifo(spi, t, 1);
1215 
1216 		omap2_mcspi_set_enable(spi, 1);
1217 
1218 		/* RX_ONLY mode needs dummy data in TX reg */
1219 		if (t->tx_buf == NULL)
1220 			writel_relaxed(0, cs->base
1221 					+ OMAP2_MCSPI_TX0);
1222 
1223 		if ((mcspi_dma->dma_rx && mcspi_dma->dma_tx) &&
1224 		    spi_xfer_is_dma_mapped(ctlr, spi, t))
1225 			count = omap2_mcspi_txrx_dma(spi, t);
1226 		else
1227 			count = omap2_mcspi_txrx_pio(spi, t);
1228 
1229 		if (count != t->len) {
1230 			status = -EIO;
1231 			goto out;
1232 		}
1233 	}
1234 
1235 	omap2_mcspi_set_enable(spi, 0);
1236 
1237 	if (mcspi->fifo_depth > 0)
1238 		omap2_mcspi_set_fifo(spi, t, 0);
1239 
1240 out:
1241 	/* Restore defaults if they were overriden */
1242 	if (par_override) {
1243 		par_override = 0;
1244 		status = omap2_mcspi_setup_transfer(spi, NULL);
1245 	}
1246 
1247 	omap2_mcspi_set_enable(spi, 0);
1248 
1249 	if (spi_get_csgpiod(spi, 0))
1250 		omap2_mcspi_set_cs(spi, !(spi->mode & SPI_CS_HIGH));
1251 
1252 	if (mcspi->fifo_depth > 0 && t)
1253 		omap2_mcspi_set_fifo(spi, t, 0);
1254 
1255 	return status;
1256 }
1257 
omap2_mcspi_prepare_message(struct spi_controller * ctlr,struct spi_message * msg)1258 static int omap2_mcspi_prepare_message(struct spi_controller *ctlr,
1259 				       struct spi_message *msg)
1260 {
1261 	struct omap2_mcspi	*mcspi = spi_controller_get_devdata(ctlr);
1262 	struct omap2_mcspi_regs	*ctx = &mcspi->ctx;
1263 	struct omap2_mcspi_cs	*cs;
1264 	struct spi_transfer	*tr;
1265 	u8 bits_per_word;
1266 
1267 	/*
1268 	 * The conditions are strict, it is mandatory to check each transfer of the list to see if
1269 	 * multi-mode is applicable.
1270 	 */
1271 	mcspi->use_multi_mode = true;
1272 
1273 	if (mcspi->last_msg_kept_cs)
1274 		mcspi->use_multi_mode = false;
1275 
1276 	list_for_each_entry(tr, &msg->transfers, transfer_list) {
1277 		if (!tr->bits_per_word)
1278 			bits_per_word = msg->spi->bits_per_word;
1279 		else
1280 			bits_per_word = tr->bits_per_word;
1281 
1282 		/*
1283 		 * Check if this transfer contains only one word;
1284 		 */
1285 		if (bits_per_word < 8 && tr->len == 1) {
1286 			/* multi-mode is applicable, only one word (1..7 bits) */
1287 		} else if (bits_per_word >= 8 && tr->len == bits_per_word / 8) {
1288 			/* multi-mode is applicable, only one word (8..32 bits) */
1289 		} else {
1290 			/* multi-mode is not applicable: more than one word in the transfer */
1291 			mcspi->use_multi_mode = false;
1292 		}
1293 
1294 		if (list_is_last(&tr->transfer_list, &msg->transfers)) {
1295 			/* Check if transfer asks to keep the CS status after the whole message */
1296 			if (tr->cs_change) {
1297 				mcspi->use_multi_mode = false;
1298 				mcspi->last_msg_kept_cs = true;
1299 			} else {
1300 				mcspi->last_msg_kept_cs = false;
1301 			}
1302 		} else {
1303 			/* Check if transfer asks to change the CS status after the transfer */
1304 			if (!tr->cs_change)
1305 				mcspi->use_multi_mode = false;
1306 		}
1307 	}
1308 
1309 	omap2_mcspi_set_mode(ctlr);
1310 
1311 	/* In single mode only a single channel can have the FORCE bit enabled
1312 	 * in its chconf0 register.
1313 	 * Scan all channels and disable them except the current one.
1314 	 * A FORCE can remain from a last transfer having cs_change enabled
1315 	 *
1316 	 * In multi mode all FORCE bits must be disabled.
1317 	 */
1318 	list_for_each_entry(cs, &ctx->cs, node) {
1319 		if (msg->spi->controller_state == cs && !mcspi->use_multi_mode) {
1320 			continue;
1321 		}
1322 
1323 		if ((cs->chconf0 & OMAP2_MCSPI_CHCONF_FORCE)) {
1324 			cs->chconf0 &= ~OMAP2_MCSPI_CHCONF_FORCE;
1325 			writel_relaxed(cs->chconf0,
1326 					cs->base + OMAP2_MCSPI_CHCONF0);
1327 			readl_relaxed(cs->base + OMAP2_MCSPI_CHCONF0);
1328 		}
1329 	}
1330 
1331 	return 0;
1332 }
1333 
omap2_mcspi_can_dma(struct spi_controller * ctlr,struct spi_device * spi,struct spi_transfer * xfer)1334 static bool omap2_mcspi_can_dma(struct spi_controller *ctlr,
1335 				struct spi_device *spi,
1336 				struct spi_transfer *xfer)
1337 {
1338 	struct omap2_mcspi *mcspi = spi_controller_get_devdata(spi->controller);
1339 	struct omap2_mcspi_dma *mcspi_dma =
1340 		&mcspi->dma_channels[spi_get_chipselect(spi, 0)];
1341 
1342 	if (!mcspi_dma->dma_rx || !mcspi_dma->dma_tx)
1343 		return false;
1344 
1345 	if (spi_controller_is_target(ctlr))
1346 		return true;
1347 
1348 	ctlr->dma_rx = mcspi_dma->dma_rx;
1349 	ctlr->dma_tx = mcspi_dma->dma_tx;
1350 
1351 	return (xfer->len >= DMA_MIN_BYTES);
1352 }
1353 
omap2_mcspi_max_xfer_size(struct spi_device * spi)1354 static size_t omap2_mcspi_max_xfer_size(struct spi_device *spi)
1355 {
1356 	struct omap2_mcspi *mcspi = spi_controller_get_devdata(spi->controller);
1357 	struct omap2_mcspi_dma *mcspi_dma =
1358 		&mcspi->dma_channels[spi_get_chipselect(spi, 0)];
1359 
1360 	if (mcspi->max_xfer_len && mcspi_dma->dma_rx)
1361 		return mcspi->max_xfer_len;
1362 
1363 	return SIZE_MAX;
1364 }
1365 
omap2_mcspi_controller_setup(struct omap2_mcspi * mcspi)1366 static int omap2_mcspi_controller_setup(struct omap2_mcspi *mcspi)
1367 {
1368 	struct spi_controller	*ctlr = mcspi->ctlr;
1369 	struct omap2_mcspi_regs	*ctx = &mcspi->ctx;
1370 	int			ret = 0;
1371 
1372 	ret = pm_runtime_resume_and_get(mcspi->dev);
1373 	if (ret < 0)
1374 		return ret;
1375 
1376 	mcspi_write_reg(ctlr, OMAP2_MCSPI_WAKEUPENABLE,
1377 			OMAP2_MCSPI_WAKEUPENABLE_WKEN);
1378 	ctx->wakeupenable = OMAP2_MCSPI_WAKEUPENABLE_WKEN;
1379 
1380 	omap2_mcspi_set_mode(ctlr);
1381 	pm_runtime_put_autosuspend(mcspi->dev);
1382 	return 0;
1383 }
1384 
omap_mcspi_runtime_suspend(struct device * dev)1385 static int omap_mcspi_runtime_suspend(struct device *dev)
1386 {
1387 	int error;
1388 
1389 	error = pinctrl_pm_select_idle_state(dev);
1390 	if (error)
1391 		dev_warn(dev, "%s: failed to set pins: %i\n", __func__, error);
1392 
1393 	return 0;
1394 }
1395 
1396 /*
1397  * When SPI wake up from off-mode, CS is in activate state. If it was in
1398  * inactive state when driver was suspend, then force it to inactive state at
1399  * wake up.
1400  */
omap_mcspi_runtime_resume(struct device * dev)1401 static int omap_mcspi_runtime_resume(struct device *dev)
1402 {
1403 	struct spi_controller *ctlr = dev_get_drvdata(dev);
1404 	struct omap2_mcspi *mcspi = spi_controller_get_devdata(ctlr);
1405 	struct omap2_mcspi_regs *ctx = &mcspi->ctx;
1406 	struct omap2_mcspi_cs *cs;
1407 	int error;
1408 
1409 	error = pinctrl_pm_select_default_state(dev);
1410 	if (error)
1411 		dev_warn(dev, "%s: failed to set pins: %i\n", __func__, error);
1412 
1413 	/* McSPI: context restore */
1414 	mcspi_write_reg(ctlr, OMAP2_MCSPI_MODULCTRL, ctx->modulctrl);
1415 	mcspi_write_reg(ctlr, OMAP2_MCSPI_WAKEUPENABLE, ctx->wakeupenable);
1416 
1417 	list_for_each_entry(cs, &ctx->cs, node) {
1418 		/*
1419 		 * We need to toggle CS state for OMAP take this
1420 		 * change in account.
1421 		 */
1422 		if ((cs->chconf0 & OMAP2_MCSPI_CHCONF_FORCE) == 0) {
1423 			cs->chconf0 |= OMAP2_MCSPI_CHCONF_FORCE;
1424 			writel_relaxed(cs->chconf0,
1425 				       cs->base + OMAP2_MCSPI_CHCONF0);
1426 			cs->chconf0 &= ~OMAP2_MCSPI_CHCONF_FORCE;
1427 			writel_relaxed(cs->chconf0,
1428 				       cs->base + OMAP2_MCSPI_CHCONF0);
1429 		} else {
1430 			writel_relaxed(cs->chconf0,
1431 				       cs->base + OMAP2_MCSPI_CHCONF0);
1432 		}
1433 	}
1434 
1435 	return 0;
1436 }
1437 
1438 static struct omap2_mcspi_platform_config omap2_pdata = {
1439 	.regs_offset = 0,
1440 };
1441 
1442 static struct omap2_mcspi_platform_config omap4_pdata = {
1443 	.regs_offset = OMAP4_MCSPI_REG_OFFSET,
1444 };
1445 
1446 static struct omap2_mcspi_platform_config am654_pdata = {
1447 	.regs_offset = OMAP4_MCSPI_REG_OFFSET,
1448 	.max_xfer_len = SZ_4K - 1,
1449 };
1450 
1451 static const struct of_device_id omap_mcspi_of_match[] = {
1452 	{
1453 		.compatible = "ti,omap2-mcspi",
1454 		.data = &omap2_pdata,
1455 	},
1456 	{
1457 		.compatible = "ti,omap4-mcspi",
1458 		.data = &omap4_pdata,
1459 	},
1460 	{
1461 		.compatible = "ti,am654-mcspi",
1462 		.data = &am654_pdata,
1463 	},
1464 	{ },
1465 };
1466 MODULE_DEVICE_TABLE(of, omap_mcspi_of_match);
1467 
omap2_mcspi_probe(struct platform_device * pdev)1468 static int omap2_mcspi_probe(struct platform_device *pdev)
1469 {
1470 	struct spi_controller	*ctlr;
1471 	const struct omap2_mcspi_platform_config *pdata;
1472 	struct omap2_mcspi	*mcspi;
1473 	struct resource		*r;
1474 	int			status = 0, i;
1475 	u32			regs_offset = 0;
1476 	struct device_node	*node = pdev->dev.of_node;
1477 	const struct of_device_id *match;
1478 
1479 	if (of_property_read_bool(node, "spi-slave"))
1480 		ctlr = spi_alloc_target(&pdev->dev, sizeof(*mcspi));
1481 	else
1482 		ctlr = spi_alloc_host(&pdev->dev, sizeof(*mcspi));
1483 	if (!ctlr)
1484 		return -ENOMEM;
1485 
1486 	/* the spi->mode bits understood by this driver: */
1487 	ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
1488 	ctlr->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
1489 	ctlr->setup = omap2_mcspi_setup;
1490 	ctlr->auto_runtime_pm = true;
1491 	ctlr->prepare_message = omap2_mcspi_prepare_message;
1492 	ctlr->can_dma = omap2_mcspi_can_dma;
1493 	ctlr->transfer_one = omap2_mcspi_transfer_one;
1494 	ctlr->set_cs = omap2_mcspi_set_cs;
1495 	ctlr->cleanup = omap2_mcspi_cleanup;
1496 	ctlr->target_abort = omap2_mcspi_target_abort;
1497 	ctlr->dev.of_node = node;
1498 	ctlr->use_gpio_descriptors = true;
1499 
1500 	platform_set_drvdata(pdev, ctlr);
1501 
1502 	mcspi = spi_controller_get_devdata(ctlr);
1503 	mcspi->ctlr = ctlr;
1504 
1505 	match = of_match_device(omap_mcspi_of_match, &pdev->dev);
1506 	if (match) {
1507 		u32 num_cs = 1; /* default number of chipselect */
1508 		pdata = match->data;
1509 
1510 		of_property_read_u32(node, "ti,spi-num-cs", &num_cs);
1511 		ctlr->num_chipselect = num_cs;
1512 		if (of_property_read_bool(node, "ti,pindir-d0-out-d1-in"))
1513 			mcspi->pin_dir = MCSPI_PINDIR_D0_OUT_D1_IN;
1514 	} else {
1515 		pdata = dev_get_platdata(&pdev->dev);
1516 		ctlr->num_chipselect = pdata->num_cs;
1517 		mcspi->pin_dir = pdata->pin_dir;
1518 	}
1519 	regs_offset = pdata->regs_offset;
1520 	if (pdata->max_xfer_len) {
1521 		mcspi->max_xfer_len = pdata->max_xfer_len;
1522 		ctlr->max_transfer_size = omap2_mcspi_max_xfer_size;
1523 	}
1524 
1525 	mcspi->base = devm_platform_get_and_ioremap_resource(pdev, 0, &r);
1526 	if (IS_ERR(mcspi->base)) {
1527 		status = PTR_ERR(mcspi->base);
1528 		goto free_ctlr;
1529 	}
1530 	mcspi->phys = r->start + regs_offset;
1531 	mcspi->base += regs_offset;
1532 
1533 	mcspi->dev = &pdev->dev;
1534 
1535 	INIT_LIST_HEAD(&mcspi->ctx.cs);
1536 
1537 	mcspi->dma_channels = devm_kcalloc(&pdev->dev, ctlr->num_chipselect,
1538 					   sizeof(struct omap2_mcspi_dma),
1539 					   GFP_KERNEL);
1540 	if (mcspi->dma_channels == NULL) {
1541 		status = -ENOMEM;
1542 		goto free_ctlr;
1543 	}
1544 
1545 	for (i = 0; i < ctlr->num_chipselect; i++) {
1546 		sprintf(mcspi->dma_channels[i].dma_rx_ch_name, "rx%d", i);
1547 		sprintf(mcspi->dma_channels[i].dma_tx_ch_name, "tx%d", i);
1548 
1549 		status = omap2_mcspi_request_dma(mcspi,
1550 						 &mcspi->dma_channels[i]);
1551 		if (status == -EPROBE_DEFER)
1552 			goto free_ctlr;
1553 	}
1554 
1555 	status = platform_get_irq(pdev, 0);
1556 	if (status < 0)
1557 		goto free_ctlr;
1558 	init_completion(&mcspi->txdone);
1559 	status = devm_request_irq(&pdev->dev, status,
1560 				  omap2_mcspi_irq_handler, 0, pdev->name,
1561 				  mcspi);
1562 	if (status) {
1563 		dev_err(&pdev->dev, "Cannot request IRQ");
1564 		goto free_ctlr;
1565 	}
1566 
1567 	mcspi->ref_clk = devm_clk_get_optional_enabled(&pdev->dev, NULL);
1568 	if (IS_ERR(mcspi->ref_clk)) {
1569 		status = PTR_ERR(mcspi->ref_clk);
1570 		dev_err_probe(&pdev->dev, status, "Failed to get ref_clk");
1571 		goto free_ctlr;
1572 	}
1573 	if (mcspi->ref_clk)
1574 		mcspi->ref_clk_hz = clk_get_rate(mcspi->ref_clk);
1575 	else
1576 		mcspi->ref_clk_hz = OMAP2_MCSPI_MAX_FREQ;
1577 	ctlr->max_speed_hz = mcspi->ref_clk_hz;
1578 	ctlr->min_speed_hz = mcspi->ref_clk_hz >> 15;
1579 
1580 	pm_runtime_use_autosuspend(&pdev->dev);
1581 	pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT);
1582 	pm_runtime_enable(&pdev->dev);
1583 
1584 	status = omap2_mcspi_controller_setup(mcspi);
1585 	if (status < 0)
1586 		goto disable_pm;
1587 
1588 	status = devm_spi_register_controller(&pdev->dev, ctlr);
1589 	if (status < 0)
1590 		goto disable_pm;
1591 
1592 	return status;
1593 
1594 disable_pm:
1595 	pm_runtime_dont_use_autosuspend(&pdev->dev);
1596 	pm_runtime_put_sync(&pdev->dev);
1597 	pm_runtime_disable(&pdev->dev);
1598 free_ctlr:
1599 	omap2_mcspi_release_dma(ctlr);
1600 	spi_controller_put(ctlr);
1601 	return status;
1602 }
1603 
omap2_mcspi_remove(struct platform_device * pdev)1604 static void omap2_mcspi_remove(struct platform_device *pdev)
1605 {
1606 	struct spi_controller *ctlr = platform_get_drvdata(pdev);
1607 	struct omap2_mcspi *mcspi = spi_controller_get_devdata(ctlr);
1608 
1609 	omap2_mcspi_release_dma(ctlr);
1610 
1611 	pm_runtime_dont_use_autosuspend(mcspi->dev);
1612 	pm_runtime_put_sync(mcspi->dev);
1613 	pm_runtime_disable(&pdev->dev);
1614 }
1615 
1616 /* work with hotplug and coldplug */
1617 MODULE_ALIAS("platform:omap2_mcspi");
1618 
omap2_mcspi_suspend(struct device * dev)1619 static int __maybe_unused omap2_mcspi_suspend(struct device *dev)
1620 {
1621 	struct spi_controller *ctlr = dev_get_drvdata(dev);
1622 	struct omap2_mcspi *mcspi = spi_controller_get_devdata(ctlr);
1623 	int error;
1624 
1625 	error = pinctrl_pm_select_sleep_state(dev);
1626 	if (error)
1627 		dev_warn(mcspi->dev, "%s: failed to set pins: %i\n",
1628 			 __func__, error);
1629 
1630 	error = spi_controller_suspend(ctlr);
1631 	if (error)
1632 		dev_warn(mcspi->dev, "%s: controller suspend failed: %i\n",
1633 			 __func__, error);
1634 
1635 	return pm_runtime_force_suspend(dev);
1636 }
1637 
omap2_mcspi_resume(struct device * dev)1638 static int __maybe_unused omap2_mcspi_resume(struct device *dev)
1639 {
1640 	struct spi_controller *ctlr = dev_get_drvdata(dev);
1641 	struct omap2_mcspi *mcspi = spi_controller_get_devdata(ctlr);
1642 	int error;
1643 
1644 	error = spi_controller_resume(ctlr);
1645 	if (error)
1646 		dev_warn(mcspi->dev, "%s: controller resume failed: %i\n",
1647 			 __func__, error);
1648 
1649 	return pm_runtime_force_resume(dev);
1650 }
1651 
1652 static const struct dev_pm_ops omap2_mcspi_pm_ops = {
1653 	SET_SYSTEM_SLEEP_PM_OPS(omap2_mcspi_suspend,
1654 				omap2_mcspi_resume)
1655 	.runtime_suspend	= omap_mcspi_runtime_suspend,
1656 	.runtime_resume		= omap_mcspi_runtime_resume,
1657 };
1658 
1659 static struct platform_driver omap2_mcspi_driver = {
1660 	.driver = {
1661 		.name =		"omap2_mcspi",
1662 		.pm =		&omap2_mcspi_pm_ops,
1663 		.of_match_table = omap_mcspi_of_match,
1664 	},
1665 	.probe =	omap2_mcspi_probe,
1666 	.remove =	omap2_mcspi_remove,
1667 };
1668 
1669 module_platform_driver(omap2_mcspi_driver);
1670 MODULE_DESCRIPTION("OMAP2 McSPI controller driver");
1671 MODULE_LICENSE("GPL");
1672