xref: /linux/drivers/spi/spi-omap2-mcspi.c (revision bfb921b2a9d5d1123d1d10b196a39db629ddef87)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * OMAP2 McSPI controller driver
4  *
5  * Copyright (C) 2005, 2006 Nokia Corporation
6  * Author:	Samuel Ortiz <samuel.ortiz@nokia.com> and
7  *		Juha Yrjola <juha.yrjola@nokia.com>
8  */
9 
10 #include <linux/kernel.h>
11 #include <linux/interrupt.h>
12 #include <linux/module.h>
13 #include <linux/device.h>
14 #include <linux/delay.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/dmaengine.h>
17 #include <linux/pinctrl/consumer.h>
18 #include <linux/platform_device.h>
19 #include <linux/err.h>
20 #include <linux/clk.h>
21 #include <linux/io.h>
22 #include <linux/slab.h>
23 #include <linux/pm_runtime.h>
24 #include <linux/of.h>
25 #include <linux/of_device.h>
26 #include <linux/gcd.h>
27 
28 #include <linux/spi/spi.h>
29 
30 #include <linux/platform_data/spi-omap2-mcspi.h>
31 
32 #define OMAP2_MCSPI_MAX_FREQ		48000000
33 #define OMAP2_MCSPI_MAX_DIVIDER		4096
34 #define OMAP2_MCSPI_MAX_FIFODEPTH	64
35 #define OMAP2_MCSPI_MAX_FIFOWCNT	0xFFFF
36 #define SPI_AUTOSUSPEND_TIMEOUT		2000
37 
38 #define OMAP2_MCSPI_REVISION		0x00
39 #define OMAP2_MCSPI_SYSSTATUS		0x14
40 #define OMAP2_MCSPI_IRQSTATUS		0x18
41 #define OMAP2_MCSPI_IRQENABLE		0x1c
42 #define OMAP2_MCSPI_WAKEUPENABLE	0x20
43 #define OMAP2_MCSPI_SYST		0x24
44 #define OMAP2_MCSPI_MODULCTRL		0x28
45 #define OMAP2_MCSPI_XFERLEVEL		0x7c
46 
47 /* per-channel banks, 0x14 bytes each, first is: */
48 #define OMAP2_MCSPI_CHCONF0		0x2c
49 #define OMAP2_MCSPI_CHSTAT0		0x30
50 #define OMAP2_MCSPI_CHCTRL0		0x34
51 #define OMAP2_MCSPI_TX0			0x38
52 #define OMAP2_MCSPI_RX0			0x3c
53 
54 /* per-register bitmasks: */
55 #define OMAP2_MCSPI_IRQSTATUS_EOW	BIT(17)
56 
57 #define OMAP2_MCSPI_MODULCTRL_SINGLE	BIT(0)
58 #define OMAP2_MCSPI_MODULCTRL_MS	BIT(2)
59 #define OMAP2_MCSPI_MODULCTRL_STEST	BIT(3)
60 
61 #define OMAP2_MCSPI_CHCONF_PHA		BIT(0)
62 #define OMAP2_MCSPI_CHCONF_POL		BIT(1)
63 #define OMAP2_MCSPI_CHCONF_CLKD_MASK	(0x0f << 2)
64 #define OMAP2_MCSPI_CHCONF_EPOL		BIT(6)
65 #define OMAP2_MCSPI_CHCONF_WL_MASK	(0x1f << 7)
66 #define OMAP2_MCSPI_CHCONF_TRM_RX_ONLY	BIT(12)
67 #define OMAP2_MCSPI_CHCONF_TRM_TX_ONLY	BIT(13)
68 #define OMAP2_MCSPI_CHCONF_TRM_MASK	(0x03 << 12)
69 #define OMAP2_MCSPI_CHCONF_DMAW		BIT(14)
70 #define OMAP2_MCSPI_CHCONF_DMAR		BIT(15)
71 #define OMAP2_MCSPI_CHCONF_DPE0		BIT(16)
72 #define OMAP2_MCSPI_CHCONF_DPE1		BIT(17)
73 #define OMAP2_MCSPI_CHCONF_IS		BIT(18)
74 #define OMAP2_MCSPI_CHCONF_TURBO	BIT(19)
75 #define OMAP2_MCSPI_CHCONF_FORCE	BIT(20)
76 #define OMAP2_MCSPI_CHCONF_FFET		BIT(27)
77 #define OMAP2_MCSPI_CHCONF_FFER		BIT(28)
78 #define OMAP2_MCSPI_CHCONF_CLKG		BIT(29)
79 
80 #define OMAP2_MCSPI_CHSTAT_RXS		BIT(0)
81 #define OMAP2_MCSPI_CHSTAT_TXS		BIT(1)
82 #define OMAP2_MCSPI_CHSTAT_EOT		BIT(2)
83 #define OMAP2_MCSPI_CHSTAT_TXFFE	BIT(3)
84 
85 #define OMAP2_MCSPI_CHCTRL_EN		BIT(0)
86 #define OMAP2_MCSPI_CHCTRL_EXTCLK_MASK	(0xff << 8)
87 
88 #define OMAP2_MCSPI_WAKEUPENABLE_WKEN	BIT(0)
89 
90 /* We have 2 DMA channels per CS, one for RX and one for TX */
91 struct omap2_mcspi_dma {
92 	struct dma_chan *dma_tx;
93 	struct dma_chan *dma_rx;
94 
95 	struct completion dma_tx_completion;
96 	struct completion dma_rx_completion;
97 
98 	char dma_rx_ch_name[14];
99 	char dma_tx_ch_name[14];
100 };
101 
102 /* use PIO for small transfers, avoiding DMA setup/teardown overhead and
103  * cache operations; better heuristics consider wordsize and bitrate.
104  */
105 #define DMA_MIN_BYTES			160
106 
107 
108 /*
109  * Used for context save and restore, structure members to be updated whenever
110  * corresponding registers are modified.
111  */
112 struct omap2_mcspi_regs {
113 	u32 modulctrl;
114 	u32 wakeupenable;
115 	struct list_head cs;
116 };
117 
118 struct omap2_mcspi {
119 	struct completion	txdone;
120 	struct spi_controller	*ctlr;
121 	/* Virtual base address of the controller */
122 	void __iomem		*base;
123 	unsigned long		phys;
124 	/* SPI1 has 4 channels, while SPI2 has 2 */
125 	struct omap2_mcspi_dma	*dma_channels;
126 	struct device		*dev;
127 	struct omap2_mcspi_regs ctx;
128 	struct clk		*ref_clk;
129 	int			fifo_depth;
130 	bool			target_aborted;
131 	unsigned int		pin_dir:1;
132 	size_t			max_xfer_len;
133 	u32			ref_clk_hz;
134 	bool			use_multi_mode;
135 };
136 
137 struct omap2_mcspi_cs {
138 	void __iomem		*base;
139 	unsigned long		phys;
140 	int			word_len;
141 	u16			mode;
142 	struct list_head	node;
143 	/* Context save and restore shadow register */
144 	u32			chconf0, chctrl0;
145 };
146 
147 static inline void mcspi_write_reg(struct spi_controller *ctlr,
148 		int idx, u32 val)
149 {
150 	struct omap2_mcspi *mcspi = spi_controller_get_devdata(ctlr);
151 
152 	writel_relaxed(val, mcspi->base + idx);
153 }
154 
155 static inline u32 mcspi_read_reg(struct spi_controller *ctlr, int idx)
156 {
157 	struct omap2_mcspi *mcspi = spi_controller_get_devdata(ctlr);
158 
159 	return readl_relaxed(mcspi->base + idx);
160 }
161 
162 static inline void mcspi_write_cs_reg(const struct spi_device *spi,
163 		int idx, u32 val)
164 {
165 	struct omap2_mcspi_cs	*cs = spi->controller_state;
166 
167 	writel_relaxed(val, cs->base +  idx);
168 }
169 
170 static inline u32 mcspi_read_cs_reg(const struct spi_device *spi, int idx)
171 {
172 	struct omap2_mcspi_cs	*cs = spi->controller_state;
173 
174 	return readl_relaxed(cs->base + idx);
175 }
176 
177 static inline u32 mcspi_cached_chconf0(const struct spi_device *spi)
178 {
179 	struct omap2_mcspi_cs *cs = spi->controller_state;
180 
181 	return cs->chconf0;
182 }
183 
184 static inline void mcspi_write_chconf0(const struct spi_device *spi, u32 val)
185 {
186 	struct omap2_mcspi_cs *cs = spi->controller_state;
187 
188 	cs->chconf0 = val;
189 	mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCONF0, val);
190 	mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCONF0);
191 }
192 
193 static inline int mcspi_bytes_per_word(int word_len)
194 {
195 	if (word_len <= 8)
196 		return 1;
197 	else if (word_len <= 16)
198 		return 2;
199 	else /* word_len <= 32 */
200 		return 4;
201 }
202 
203 static void omap2_mcspi_set_dma_req(const struct spi_device *spi,
204 		int is_read, int enable)
205 {
206 	u32 l, rw;
207 
208 	l = mcspi_cached_chconf0(spi);
209 
210 	if (is_read) /* 1 is read, 0 write */
211 		rw = OMAP2_MCSPI_CHCONF_DMAR;
212 	else
213 		rw = OMAP2_MCSPI_CHCONF_DMAW;
214 
215 	if (enable)
216 		l |= rw;
217 	else
218 		l &= ~rw;
219 
220 	mcspi_write_chconf0(spi, l);
221 }
222 
223 static void omap2_mcspi_set_enable(const struct spi_device *spi, int enable)
224 {
225 	struct omap2_mcspi_cs *cs = spi->controller_state;
226 	u32 l;
227 
228 	l = cs->chctrl0;
229 	if (enable)
230 		l |= OMAP2_MCSPI_CHCTRL_EN;
231 	else
232 		l &= ~OMAP2_MCSPI_CHCTRL_EN;
233 	cs->chctrl0 = l;
234 	mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCTRL0, cs->chctrl0);
235 	/* Flash post-writes */
236 	mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCTRL0);
237 }
238 
239 static void omap2_mcspi_set_cs(struct spi_device *spi, bool enable)
240 {
241 	struct omap2_mcspi *mcspi = spi_controller_get_devdata(spi->controller);
242 	u32 l;
243 
244 	/* The controller handles the inverted chip selects
245 	 * using the OMAP2_MCSPI_CHCONF_EPOL bit so revert
246 	 * the inversion from the core spi_set_cs function.
247 	 */
248 	if (spi->mode & SPI_CS_HIGH)
249 		enable = !enable;
250 
251 	if (spi->controller_state) {
252 		int err = pm_runtime_resume_and_get(mcspi->dev);
253 		if (err < 0) {
254 			dev_err(mcspi->dev, "failed to get sync: %d\n", err);
255 			return;
256 		}
257 
258 		l = mcspi_cached_chconf0(spi);
259 
260 		/* Only enable chip select manually if single mode is used */
261 		if (mcspi->use_multi_mode) {
262 			l &= ~OMAP2_MCSPI_CHCONF_FORCE;
263 		} else {
264 			if (enable)
265 				l &= ~OMAP2_MCSPI_CHCONF_FORCE;
266 			else
267 				l |= OMAP2_MCSPI_CHCONF_FORCE;
268 		}
269 
270 		mcspi_write_chconf0(spi, l);
271 
272 		pm_runtime_mark_last_busy(mcspi->dev);
273 		pm_runtime_put_autosuspend(mcspi->dev);
274 	}
275 }
276 
277 static void omap2_mcspi_set_mode(struct spi_controller *ctlr)
278 {
279 	struct omap2_mcspi	*mcspi = spi_controller_get_devdata(ctlr);
280 	struct omap2_mcspi_regs	*ctx = &mcspi->ctx;
281 	u32 l;
282 
283 	/*
284 	 * Choose host or target mode
285 	 */
286 	l = mcspi_read_reg(ctlr, OMAP2_MCSPI_MODULCTRL);
287 	l &= ~(OMAP2_MCSPI_MODULCTRL_STEST);
288 	if (spi_controller_is_target(ctlr)) {
289 		l |= (OMAP2_MCSPI_MODULCTRL_MS);
290 	} else {
291 		l &= ~(OMAP2_MCSPI_MODULCTRL_MS);
292 
293 		/* Enable single mode if needed */
294 		if (mcspi->use_multi_mode)
295 			l &= ~OMAP2_MCSPI_MODULCTRL_SINGLE;
296 		else
297 			l |= OMAP2_MCSPI_MODULCTRL_SINGLE;
298 	}
299 	mcspi_write_reg(ctlr, OMAP2_MCSPI_MODULCTRL, l);
300 
301 	ctx->modulctrl = l;
302 }
303 
304 static void omap2_mcspi_set_fifo(const struct spi_device *spi,
305 				struct spi_transfer *t, int enable)
306 {
307 	struct spi_controller *ctlr = spi->controller;
308 	struct omap2_mcspi_cs *cs = spi->controller_state;
309 	struct omap2_mcspi *mcspi;
310 	unsigned int wcnt;
311 	int max_fifo_depth, bytes_per_word;
312 	u32 chconf, xferlevel;
313 
314 	mcspi = spi_controller_get_devdata(ctlr);
315 
316 	chconf = mcspi_cached_chconf0(spi);
317 	if (enable) {
318 		bytes_per_word = mcspi_bytes_per_word(cs->word_len);
319 		if (t->len % bytes_per_word != 0)
320 			goto disable_fifo;
321 
322 		if (t->rx_buf != NULL && t->tx_buf != NULL)
323 			max_fifo_depth = OMAP2_MCSPI_MAX_FIFODEPTH / 2;
324 		else
325 			max_fifo_depth = OMAP2_MCSPI_MAX_FIFODEPTH;
326 
327 		wcnt = t->len / bytes_per_word;
328 		if (wcnt > OMAP2_MCSPI_MAX_FIFOWCNT)
329 			goto disable_fifo;
330 
331 		xferlevel = wcnt << 16;
332 		if (t->rx_buf != NULL) {
333 			chconf |= OMAP2_MCSPI_CHCONF_FFER;
334 			xferlevel |= (bytes_per_word - 1) << 8;
335 		}
336 
337 		if (t->tx_buf != NULL) {
338 			chconf |= OMAP2_MCSPI_CHCONF_FFET;
339 			xferlevel |= bytes_per_word - 1;
340 		}
341 
342 		mcspi_write_reg(ctlr, OMAP2_MCSPI_XFERLEVEL, xferlevel);
343 		mcspi_write_chconf0(spi, chconf);
344 		mcspi->fifo_depth = max_fifo_depth;
345 
346 		return;
347 	}
348 
349 disable_fifo:
350 	if (t->rx_buf != NULL)
351 		chconf &= ~OMAP2_MCSPI_CHCONF_FFER;
352 
353 	if (t->tx_buf != NULL)
354 		chconf &= ~OMAP2_MCSPI_CHCONF_FFET;
355 
356 	mcspi_write_chconf0(spi, chconf);
357 	mcspi->fifo_depth = 0;
358 }
359 
360 static int mcspi_wait_for_reg_bit(void __iomem *reg, unsigned long bit)
361 {
362 	unsigned long timeout;
363 
364 	timeout = jiffies + msecs_to_jiffies(1000);
365 	while (!(readl_relaxed(reg) & bit)) {
366 		if (time_after(jiffies, timeout)) {
367 			if (!(readl_relaxed(reg) & bit))
368 				return -ETIMEDOUT;
369 			else
370 				return 0;
371 		}
372 		cpu_relax();
373 	}
374 	return 0;
375 }
376 
377 static int mcspi_wait_for_completion(struct  omap2_mcspi *mcspi,
378 				     struct completion *x)
379 {
380 	if (spi_controller_is_target(mcspi->ctlr)) {
381 		if (wait_for_completion_interruptible(x) ||
382 		    mcspi->target_aborted)
383 			return -EINTR;
384 	} else {
385 		wait_for_completion(x);
386 	}
387 
388 	return 0;
389 }
390 
391 static void omap2_mcspi_rx_callback(void *data)
392 {
393 	struct spi_device *spi = data;
394 	struct omap2_mcspi *mcspi = spi_controller_get_devdata(spi->controller);
395 	struct omap2_mcspi_dma *mcspi_dma = &mcspi->dma_channels[spi_get_chipselect(spi, 0)];
396 
397 	/* We must disable the DMA RX request */
398 	omap2_mcspi_set_dma_req(spi, 1, 0);
399 
400 	complete(&mcspi_dma->dma_rx_completion);
401 }
402 
403 static void omap2_mcspi_tx_callback(void *data)
404 {
405 	struct spi_device *spi = data;
406 	struct omap2_mcspi *mcspi = spi_controller_get_devdata(spi->controller);
407 	struct omap2_mcspi_dma *mcspi_dma = &mcspi->dma_channels[spi_get_chipselect(spi, 0)];
408 
409 	/* We must disable the DMA TX request */
410 	omap2_mcspi_set_dma_req(spi, 0, 0);
411 
412 	complete(&mcspi_dma->dma_tx_completion);
413 }
414 
415 static void omap2_mcspi_tx_dma(struct spi_device *spi,
416 				struct spi_transfer *xfer,
417 				struct dma_slave_config cfg)
418 {
419 	struct omap2_mcspi	*mcspi;
420 	struct omap2_mcspi_dma  *mcspi_dma;
421 	struct dma_async_tx_descriptor *tx;
422 
423 	mcspi = spi_controller_get_devdata(spi->controller);
424 	mcspi_dma = &mcspi->dma_channels[spi_get_chipselect(spi, 0)];
425 
426 	dmaengine_slave_config(mcspi_dma->dma_tx, &cfg);
427 
428 	tx = dmaengine_prep_slave_sg(mcspi_dma->dma_tx, xfer->tx_sg.sgl,
429 				     xfer->tx_sg.nents,
430 				     DMA_MEM_TO_DEV,
431 				     DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
432 	if (tx) {
433 		tx->callback = omap2_mcspi_tx_callback;
434 		tx->callback_param = spi;
435 		dmaengine_submit(tx);
436 	} else {
437 		/* FIXME: fall back to PIO? */
438 	}
439 	dma_async_issue_pending(mcspi_dma->dma_tx);
440 	omap2_mcspi_set_dma_req(spi, 0, 1);
441 }
442 
443 static unsigned
444 omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer,
445 				struct dma_slave_config cfg,
446 				unsigned es)
447 {
448 	struct omap2_mcspi	*mcspi;
449 	struct omap2_mcspi_dma  *mcspi_dma;
450 	unsigned int		count, transfer_reduction = 0;
451 	struct scatterlist	*sg_out[2];
452 	int			nb_sizes = 0, out_mapped_nents[2], ret, x;
453 	size_t			sizes[2];
454 	u32			l;
455 	int			elements = 0;
456 	int			word_len, element_count;
457 	struct omap2_mcspi_cs	*cs = spi->controller_state;
458 	void __iomem		*chstat_reg = cs->base + OMAP2_MCSPI_CHSTAT0;
459 	struct dma_async_tx_descriptor *tx;
460 
461 	mcspi = spi_controller_get_devdata(spi->controller);
462 	mcspi_dma = &mcspi->dma_channels[spi_get_chipselect(spi, 0)];
463 	count = xfer->len;
464 
465 	/*
466 	 *  In the "End-of-Transfer Procedure" section for DMA RX in OMAP35x TRM
467 	 *  it mentions reducing DMA transfer length by one element in host
468 	 *  normal mode.
469 	 */
470 	if (mcspi->fifo_depth == 0)
471 		transfer_reduction = es;
472 
473 	word_len = cs->word_len;
474 	l = mcspi_cached_chconf0(spi);
475 
476 	if (word_len <= 8)
477 		element_count = count;
478 	else if (word_len <= 16)
479 		element_count = count >> 1;
480 	else /* word_len <= 32 */
481 		element_count = count >> 2;
482 
483 
484 	dmaengine_slave_config(mcspi_dma->dma_rx, &cfg);
485 
486 	/*
487 	 *  Reduce DMA transfer length by one more if McSPI is
488 	 *  configured in turbo mode.
489 	 */
490 	if ((l & OMAP2_MCSPI_CHCONF_TURBO) && mcspi->fifo_depth == 0)
491 		transfer_reduction += es;
492 
493 	if (transfer_reduction) {
494 		/* Split sgl into two. The second sgl won't be used. */
495 		sizes[0] = count - transfer_reduction;
496 		sizes[1] = transfer_reduction;
497 		nb_sizes = 2;
498 	} else {
499 		/*
500 		 * Don't bother splitting the sgl. This essentially
501 		 * clones the original sgl.
502 		 */
503 		sizes[0] = count;
504 		nb_sizes = 1;
505 	}
506 
507 	ret = sg_split(xfer->rx_sg.sgl, xfer->rx_sg.nents, 0, nb_sizes,
508 		       sizes, sg_out, out_mapped_nents, GFP_KERNEL);
509 
510 	if (ret < 0) {
511 		dev_err(&spi->dev, "sg_split failed\n");
512 		return 0;
513 	}
514 
515 	tx = dmaengine_prep_slave_sg(mcspi_dma->dma_rx, sg_out[0],
516 				     out_mapped_nents[0], DMA_DEV_TO_MEM,
517 				     DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
518 	if (tx) {
519 		tx->callback = omap2_mcspi_rx_callback;
520 		tx->callback_param = spi;
521 		dmaengine_submit(tx);
522 	} else {
523 		/* FIXME: fall back to PIO? */
524 	}
525 
526 	dma_async_issue_pending(mcspi_dma->dma_rx);
527 	omap2_mcspi_set_dma_req(spi, 1, 1);
528 
529 	ret = mcspi_wait_for_completion(mcspi, &mcspi_dma->dma_rx_completion);
530 	if (ret || mcspi->target_aborted) {
531 		dmaengine_terminate_sync(mcspi_dma->dma_rx);
532 		omap2_mcspi_set_dma_req(spi, 1, 0);
533 		return 0;
534 	}
535 
536 	for (x = 0; x < nb_sizes; x++)
537 		kfree(sg_out[x]);
538 
539 	if (mcspi->fifo_depth > 0)
540 		return count;
541 
542 	/*
543 	 *  Due to the DMA transfer length reduction the missing bytes must
544 	 *  be read manually to receive all of the expected data.
545 	 */
546 	omap2_mcspi_set_enable(spi, 0);
547 
548 	elements = element_count - 1;
549 
550 	if (l & OMAP2_MCSPI_CHCONF_TURBO) {
551 		elements--;
552 
553 		if (!mcspi_wait_for_reg_bit(chstat_reg,
554 					    OMAP2_MCSPI_CHSTAT_RXS)) {
555 			u32 w;
556 
557 			w = mcspi_read_cs_reg(spi, OMAP2_MCSPI_RX0);
558 			if (word_len <= 8)
559 				((u8 *)xfer->rx_buf)[elements++] = w;
560 			else if (word_len <= 16)
561 				((u16 *)xfer->rx_buf)[elements++] = w;
562 			else /* word_len <= 32 */
563 				((u32 *)xfer->rx_buf)[elements++] = w;
564 		} else {
565 			int bytes_per_word = mcspi_bytes_per_word(word_len);
566 			dev_err(&spi->dev, "DMA RX penultimate word empty\n");
567 			count -= (bytes_per_word << 1);
568 			omap2_mcspi_set_enable(spi, 1);
569 			return count;
570 		}
571 	}
572 	if (!mcspi_wait_for_reg_bit(chstat_reg, OMAP2_MCSPI_CHSTAT_RXS)) {
573 		u32 w;
574 
575 		w = mcspi_read_cs_reg(spi, OMAP2_MCSPI_RX0);
576 		if (word_len <= 8)
577 			((u8 *)xfer->rx_buf)[elements] = w;
578 		else if (word_len <= 16)
579 			((u16 *)xfer->rx_buf)[elements] = w;
580 		else /* word_len <= 32 */
581 			((u32 *)xfer->rx_buf)[elements] = w;
582 	} else {
583 		dev_err(&spi->dev, "DMA RX last word empty\n");
584 		count -= mcspi_bytes_per_word(word_len);
585 	}
586 	omap2_mcspi_set_enable(spi, 1);
587 	return count;
588 }
589 
590 static unsigned
591 omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
592 {
593 	struct omap2_mcspi	*mcspi;
594 	struct omap2_mcspi_cs	*cs = spi->controller_state;
595 	struct omap2_mcspi_dma  *mcspi_dma;
596 	unsigned int		count;
597 	u8			*rx;
598 	const u8		*tx;
599 	struct dma_slave_config	cfg;
600 	enum dma_slave_buswidth width;
601 	unsigned es;
602 	void __iomem		*chstat_reg;
603 	void __iomem            *irqstat_reg;
604 	int			wait_res;
605 
606 	mcspi = spi_controller_get_devdata(spi->controller);
607 	mcspi_dma = &mcspi->dma_channels[spi_get_chipselect(spi, 0)];
608 
609 	if (cs->word_len <= 8) {
610 		width = DMA_SLAVE_BUSWIDTH_1_BYTE;
611 		es = 1;
612 	} else if (cs->word_len <= 16) {
613 		width = DMA_SLAVE_BUSWIDTH_2_BYTES;
614 		es = 2;
615 	} else {
616 		width = DMA_SLAVE_BUSWIDTH_4_BYTES;
617 		es = 4;
618 	}
619 
620 	count = xfer->len;
621 
622 	memset(&cfg, 0, sizeof(cfg));
623 	cfg.src_addr = cs->phys + OMAP2_MCSPI_RX0;
624 	cfg.dst_addr = cs->phys + OMAP2_MCSPI_TX0;
625 	cfg.src_addr_width = width;
626 	cfg.dst_addr_width = width;
627 	cfg.src_maxburst = 1;
628 	cfg.dst_maxburst = 1;
629 
630 	rx = xfer->rx_buf;
631 	tx = xfer->tx_buf;
632 
633 	mcspi->target_aborted = false;
634 	reinit_completion(&mcspi_dma->dma_tx_completion);
635 	reinit_completion(&mcspi_dma->dma_rx_completion);
636 	reinit_completion(&mcspi->txdone);
637 	if (tx) {
638 		/* Enable EOW IRQ to know end of tx in target mode */
639 		if (spi_controller_is_target(spi->controller))
640 			mcspi_write_reg(spi->controller,
641 					OMAP2_MCSPI_IRQENABLE,
642 					OMAP2_MCSPI_IRQSTATUS_EOW);
643 		omap2_mcspi_tx_dma(spi, xfer, cfg);
644 	}
645 
646 	if (rx != NULL)
647 		count = omap2_mcspi_rx_dma(spi, xfer, cfg, es);
648 
649 	if (tx != NULL) {
650 		int ret;
651 
652 		ret = mcspi_wait_for_completion(mcspi, &mcspi_dma->dma_tx_completion);
653 		if (ret || mcspi->target_aborted) {
654 			dmaengine_terminate_sync(mcspi_dma->dma_tx);
655 			omap2_mcspi_set_dma_req(spi, 0, 0);
656 			return 0;
657 		}
658 
659 		if (spi_controller_is_target(mcspi->ctlr)) {
660 			ret = mcspi_wait_for_completion(mcspi, &mcspi->txdone);
661 			if (ret || mcspi->target_aborted)
662 				return 0;
663 		}
664 
665 		if (mcspi->fifo_depth > 0) {
666 			irqstat_reg = mcspi->base + OMAP2_MCSPI_IRQSTATUS;
667 
668 			if (mcspi_wait_for_reg_bit(irqstat_reg,
669 						OMAP2_MCSPI_IRQSTATUS_EOW) < 0)
670 				dev_err(&spi->dev, "EOW timed out\n");
671 
672 			mcspi_write_reg(mcspi->ctlr, OMAP2_MCSPI_IRQSTATUS,
673 					OMAP2_MCSPI_IRQSTATUS_EOW);
674 		}
675 
676 		/* for TX_ONLY mode, be sure all words have shifted out */
677 		if (rx == NULL) {
678 			chstat_reg = cs->base + OMAP2_MCSPI_CHSTAT0;
679 			if (mcspi->fifo_depth > 0) {
680 				wait_res = mcspi_wait_for_reg_bit(chstat_reg,
681 						OMAP2_MCSPI_CHSTAT_TXFFE);
682 				if (wait_res < 0)
683 					dev_err(&spi->dev, "TXFFE timed out\n");
684 			} else {
685 				wait_res = mcspi_wait_for_reg_bit(chstat_reg,
686 						OMAP2_MCSPI_CHSTAT_TXS);
687 				if (wait_res < 0)
688 					dev_err(&spi->dev, "TXS timed out\n");
689 			}
690 			if (wait_res >= 0 &&
691 				(mcspi_wait_for_reg_bit(chstat_reg,
692 					OMAP2_MCSPI_CHSTAT_EOT) < 0))
693 				dev_err(&spi->dev, "EOT timed out\n");
694 		}
695 	}
696 	return count;
697 }
698 
699 static unsigned
700 omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
701 {
702 	struct omap2_mcspi_cs	*cs = spi->controller_state;
703 	unsigned int		count, c;
704 	u32			l;
705 	void __iomem		*base = cs->base;
706 	void __iomem		*tx_reg;
707 	void __iomem		*rx_reg;
708 	void __iomem		*chstat_reg;
709 	int			word_len;
710 
711 	count = xfer->len;
712 	c = count;
713 	word_len = cs->word_len;
714 
715 	l = mcspi_cached_chconf0(spi);
716 
717 	/* We store the pre-calculated register addresses on stack to speed
718 	 * up the transfer loop. */
719 	tx_reg		= base + OMAP2_MCSPI_TX0;
720 	rx_reg		= base + OMAP2_MCSPI_RX0;
721 	chstat_reg	= base + OMAP2_MCSPI_CHSTAT0;
722 
723 	if (c < (word_len>>3))
724 		return 0;
725 
726 	if (word_len <= 8) {
727 		u8		*rx;
728 		const u8	*tx;
729 
730 		rx = xfer->rx_buf;
731 		tx = xfer->tx_buf;
732 
733 		do {
734 			c -= 1;
735 			if (tx != NULL) {
736 				if (mcspi_wait_for_reg_bit(chstat_reg,
737 						OMAP2_MCSPI_CHSTAT_TXS) < 0) {
738 					dev_err(&spi->dev, "TXS timed out\n");
739 					goto out;
740 				}
741 				dev_vdbg(&spi->dev, "write-%d %02x\n",
742 						word_len, *tx);
743 				writel_relaxed(*tx++, tx_reg);
744 			}
745 			if (rx != NULL) {
746 				if (mcspi_wait_for_reg_bit(chstat_reg,
747 						OMAP2_MCSPI_CHSTAT_RXS) < 0) {
748 					dev_err(&spi->dev, "RXS timed out\n");
749 					goto out;
750 				}
751 
752 				if (c == 1 && tx == NULL &&
753 				    (l & OMAP2_MCSPI_CHCONF_TURBO)) {
754 					omap2_mcspi_set_enable(spi, 0);
755 					*rx++ = readl_relaxed(rx_reg);
756 					dev_vdbg(&spi->dev, "read-%d %02x\n",
757 						    word_len, *(rx - 1));
758 					if (mcspi_wait_for_reg_bit(chstat_reg,
759 						OMAP2_MCSPI_CHSTAT_RXS) < 0) {
760 						dev_err(&spi->dev,
761 							"RXS timed out\n");
762 						goto out;
763 					}
764 					c = 0;
765 				} else if (c == 0 && tx == NULL) {
766 					omap2_mcspi_set_enable(spi, 0);
767 				}
768 
769 				*rx++ = readl_relaxed(rx_reg);
770 				dev_vdbg(&spi->dev, "read-%d %02x\n",
771 						word_len, *(rx - 1));
772 			}
773 			/* Add word delay between each word */
774 			spi_delay_exec(&xfer->word_delay, xfer);
775 		} while (c);
776 	} else if (word_len <= 16) {
777 		u16		*rx;
778 		const u16	*tx;
779 
780 		rx = xfer->rx_buf;
781 		tx = xfer->tx_buf;
782 		do {
783 			c -= 2;
784 			if (tx != NULL) {
785 				if (mcspi_wait_for_reg_bit(chstat_reg,
786 						OMAP2_MCSPI_CHSTAT_TXS) < 0) {
787 					dev_err(&spi->dev, "TXS timed out\n");
788 					goto out;
789 				}
790 				dev_vdbg(&spi->dev, "write-%d %04x\n",
791 						word_len, *tx);
792 				writel_relaxed(*tx++, tx_reg);
793 			}
794 			if (rx != NULL) {
795 				if (mcspi_wait_for_reg_bit(chstat_reg,
796 						OMAP2_MCSPI_CHSTAT_RXS) < 0) {
797 					dev_err(&spi->dev, "RXS timed out\n");
798 					goto out;
799 				}
800 
801 				if (c == 2 && tx == NULL &&
802 				    (l & OMAP2_MCSPI_CHCONF_TURBO)) {
803 					omap2_mcspi_set_enable(spi, 0);
804 					*rx++ = readl_relaxed(rx_reg);
805 					dev_vdbg(&spi->dev, "read-%d %04x\n",
806 						    word_len, *(rx - 1));
807 					if (mcspi_wait_for_reg_bit(chstat_reg,
808 						OMAP2_MCSPI_CHSTAT_RXS) < 0) {
809 						dev_err(&spi->dev,
810 							"RXS timed out\n");
811 						goto out;
812 					}
813 					c = 0;
814 				} else if (c == 0 && tx == NULL) {
815 					omap2_mcspi_set_enable(spi, 0);
816 				}
817 
818 				*rx++ = readl_relaxed(rx_reg);
819 				dev_vdbg(&spi->dev, "read-%d %04x\n",
820 						word_len, *(rx - 1));
821 			}
822 			/* Add word delay between each word */
823 			spi_delay_exec(&xfer->word_delay, xfer);
824 		} while (c >= 2);
825 	} else if (word_len <= 32) {
826 		u32		*rx;
827 		const u32	*tx;
828 
829 		rx = xfer->rx_buf;
830 		tx = xfer->tx_buf;
831 		do {
832 			c -= 4;
833 			if (tx != NULL) {
834 				if (mcspi_wait_for_reg_bit(chstat_reg,
835 						OMAP2_MCSPI_CHSTAT_TXS) < 0) {
836 					dev_err(&spi->dev, "TXS timed out\n");
837 					goto out;
838 				}
839 				dev_vdbg(&spi->dev, "write-%d %08x\n",
840 						word_len, *tx);
841 				writel_relaxed(*tx++, tx_reg);
842 			}
843 			if (rx != NULL) {
844 				if (mcspi_wait_for_reg_bit(chstat_reg,
845 						OMAP2_MCSPI_CHSTAT_RXS) < 0) {
846 					dev_err(&spi->dev, "RXS timed out\n");
847 					goto out;
848 				}
849 
850 				if (c == 4 && tx == NULL &&
851 				    (l & OMAP2_MCSPI_CHCONF_TURBO)) {
852 					omap2_mcspi_set_enable(spi, 0);
853 					*rx++ = readl_relaxed(rx_reg);
854 					dev_vdbg(&spi->dev, "read-%d %08x\n",
855 						    word_len, *(rx - 1));
856 					if (mcspi_wait_for_reg_bit(chstat_reg,
857 						OMAP2_MCSPI_CHSTAT_RXS) < 0) {
858 						dev_err(&spi->dev,
859 							"RXS timed out\n");
860 						goto out;
861 					}
862 					c = 0;
863 				} else if (c == 0 && tx == NULL) {
864 					omap2_mcspi_set_enable(spi, 0);
865 				}
866 
867 				*rx++ = readl_relaxed(rx_reg);
868 				dev_vdbg(&spi->dev, "read-%d %08x\n",
869 						word_len, *(rx - 1));
870 			}
871 			/* Add word delay between each word */
872 			spi_delay_exec(&xfer->word_delay, xfer);
873 		} while (c >= 4);
874 	}
875 
876 	/* for TX_ONLY mode, be sure all words have shifted out */
877 	if (xfer->rx_buf == NULL) {
878 		if (mcspi_wait_for_reg_bit(chstat_reg,
879 				OMAP2_MCSPI_CHSTAT_TXS) < 0) {
880 			dev_err(&spi->dev, "TXS timed out\n");
881 		} else if (mcspi_wait_for_reg_bit(chstat_reg,
882 				OMAP2_MCSPI_CHSTAT_EOT) < 0)
883 			dev_err(&spi->dev, "EOT timed out\n");
884 
885 		/* disable chan to purge rx datas received in TX_ONLY transfer,
886 		 * otherwise these rx datas will affect the direct following
887 		 * RX_ONLY transfer.
888 		 */
889 		omap2_mcspi_set_enable(spi, 0);
890 	}
891 out:
892 	omap2_mcspi_set_enable(spi, 1);
893 	return count - c;
894 }
895 
896 static u32 omap2_mcspi_calc_divisor(u32 speed_hz, u32 ref_clk_hz)
897 {
898 	u32 div;
899 
900 	for (div = 0; div < 15; div++)
901 		if (speed_hz >= (ref_clk_hz >> div))
902 			return div;
903 
904 	return 15;
905 }
906 
907 /* called only when no transfer is active to this device */
908 static int omap2_mcspi_setup_transfer(struct spi_device *spi,
909 		struct spi_transfer *t)
910 {
911 	struct omap2_mcspi_cs *cs = spi->controller_state;
912 	struct omap2_mcspi *mcspi;
913 	u32 ref_clk_hz, l = 0, clkd = 0, div, extclk = 0, clkg = 0;
914 	u8 word_len = spi->bits_per_word;
915 	u32 speed_hz = spi->max_speed_hz;
916 
917 	mcspi = spi_controller_get_devdata(spi->controller);
918 
919 	if (t != NULL && t->bits_per_word)
920 		word_len = t->bits_per_word;
921 
922 	cs->word_len = word_len;
923 
924 	if (t && t->speed_hz)
925 		speed_hz = t->speed_hz;
926 
927 	ref_clk_hz = mcspi->ref_clk_hz;
928 	speed_hz = min_t(u32, speed_hz, ref_clk_hz);
929 	if (speed_hz < (ref_clk_hz / OMAP2_MCSPI_MAX_DIVIDER)) {
930 		clkd = omap2_mcspi_calc_divisor(speed_hz, ref_clk_hz);
931 		speed_hz = ref_clk_hz >> clkd;
932 		clkg = 0;
933 	} else {
934 		div = (ref_clk_hz + speed_hz - 1) / speed_hz;
935 		speed_hz = ref_clk_hz / div;
936 		clkd = (div - 1) & 0xf;
937 		extclk = (div - 1) >> 4;
938 		clkg = OMAP2_MCSPI_CHCONF_CLKG;
939 	}
940 
941 	l = mcspi_cached_chconf0(spi);
942 
943 	/* standard 4-wire host mode:  SCK, MOSI/out, MISO/in, nCS
944 	 * REVISIT: this controller could support SPI_3WIRE mode.
945 	 */
946 	if (mcspi->pin_dir == MCSPI_PINDIR_D0_IN_D1_OUT) {
947 		l &= ~OMAP2_MCSPI_CHCONF_IS;
948 		l &= ~OMAP2_MCSPI_CHCONF_DPE1;
949 		l |= OMAP2_MCSPI_CHCONF_DPE0;
950 	} else {
951 		l |= OMAP2_MCSPI_CHCONF_IS;
952 		l |= OMAP2_MCSPI_CHCONF_DPE1;
953 		l &= ~OMAP2_MCSPI_CHCONF_DPE0;
954 	}
955 
956 	/* wordlength */
957 	l &= ~OMAP2_MCSPI_CHCONF_WL_MASK;
958 	l |= (word_len - 1) << 7;
959 
960 	/* set chipselect polarity; manage with FORCE */
961 	if (!(spi->mode & SPI_CS_HIGH))
962 		l |= OMAP2_MCSPI_CHCONF_EPOL;	/* active-low; normal */
963 	else
964 		l &= ~OMAP2_MCSPI_CHCONF_EPOL;
965 
966 	/* set clock divisor */
967 	l &= ~OMAP2_MCSPI_CHCONF_CLKD_MASK;
968 	l |= clkd << 2;
969 
970 	/* set clock granularity */
971 	l &= ~OMAP2_MCSPI_CHCONF_CLKG;
972 	l |= clkg;
973 	if (clkg) {
974 		cs->chctrl0 &= ~OMAP2_MCSPI_CHCTRL_EXTCLK_MASK;
975 		cs->chctrl0 |= extclk << 8;
976 		mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCTRL0, cs->chctrl0);
977 	}
978 
979 	/* set SPI mode 0..3 */
980 	if (spi->mode & SPI_CPOL)
981 		l |= OMAP2_MCSPI_CHCONF_POL;
982 	else
983 		l &= ~OMAP2_MCSPI_CHCONF_POL;
984 	if (spi->mode & SPI_CPHA)
985 		l |= OMAP2_MCSPI_CHCONF_PHA;
986 	else
987 		l &= ~OMAP2_MCSPI_CHCONF_PHA;
988 
989 	mcspi_write_chconf0(spi, l);
990 
991 	cs->mode = spi->mode;
992 
993 	dev_dbg(&spi->dev, "setup: speed %d, sample %s edge, clk %s\n",
994 			speed_hz,
995 			(spi->mode & SPI_CPHA) ? "trailing" : "leading",
996 			(spi->mode & SPI_CPOL) ? "inverted" : "normal");
997 
998 	return 0;
999 }
1000 
1001 /*
1002  * Note that we currently allow DMA only if we get a channel
1003  * for both rx and tx. Otherwise we'll do PIO for both rx and tx.
1004  */
1005 static int omap2_mcspi_request_dma(struct omap2_mcspi *mcspi,
1006 				   struct omap2_mcspi_dma *mcspi_dma)
1007 {
1008 	int ret = 0;
1009 
1010 	mcspi_dma->dma_rx = dma_request_chan(mcspi->dev,
1011 					     mcspi_dma->dma_rx_ch_name);
1012 	if (IS_ERR(mcspi_dma->dma_rx)) {
1013 		ret = PTR_ERR(mcspi_dma->dma_rx);
1014 		mcspi_dma->dma_rx = NULL;
1015 		goto no_dma;
1016 	}
1017 
1018 	mcspi_dma->dma_tx = dma_request_chan(mcspi->dev,
1019 					     mcspi_dma->dma_tx_ch_name);
1020 	if (IS_ERR(mcspi_dma->dma_tx)) {
1021 		ret = PTR_ERR(mcspi_dma->dma_tx);
1022 		mcspi_dma->dma_tx = NULL;
1023 		dma_release_channel(mcspi_dma->dma_rx);
1024 		mcspi_dma->dma_rx = NULL;
1025 	}
1026 
1027 	init_completion(&mcspi_dma->dma_rx_completion);
1028 	init_completion(&mcspi_dma->dma_tx_completion);
1029 
1030 no_dma:
1031 	return ret;
1032 }
1033 
1034 static void omap2_mcspi_release_dma(struct spi_controller *ctlr)
1035 {
1036 	struct omap2_mcspi *mcspi = spi_controller_get_devdata(ctlr);
1037 	struct omap2_mcspi_dma	*mcspi_dma;
1038 	int i;
1039 
1040 	for (i = 0; i < ctlr->num_chipselect; i++) {
1041 		mcspi_dma = &mcspi->dma_channels[i];
1042 
1043 		if (mcspi_dma->dma_rx) {
1044 			dma_release_channel(mcspi_dma->dma_rx);
1045 			mcspi_dma->dma_rx = NULL;
1046 		}
1047 		if (mcspi_dma->dma_tx) {
1048 			dma_release_channel(mcspi_dma->dma_tx);
1049 			mcspi_dma->dma_tx = NULL;
1050 		}
1051 	}
1052 }
1053 
1054 static void omap2_mcspi_cleanup(struct spi_device *spi)
1055 {
1056 	struct omap2_mcspi_cs	*cs;
1057 
1058 	if (spi->controller_state) {
1059 		/* Unlink controller state from context save list */
1060 		cs = spi->controller_state;
1061 		list_del(&cs->node);
1062 
1063 		kfree(cs);
1064 	}
1065 }
1066 
1067 static int omap2_mcspi_setup(struct spi_device *spi)
1068 {
1069 	bool			initial_setup = false;
1070 	int			ret;
1071 	struct omap2_mcspi	*mcspi = spi_controller_get_devdata(spi->controller);
1072 	struct omap2_mcspi_regs	*ctx = &mcspi->ctx;
1073 	struct omap2_mcspi_cs	*cs = spi->controller_state;
1074 
1075 	if (!cs) {
1076 		cs = kzalloc(sizeof(*cs), GFP_KERNEL);
1077 		if (!cs)
1078 			return -ENOMEM;
1079 		cs->base = mcspi->base + spi_get_chipselect(spi, 0) * 0x14;
1080 		cs->phys = mcspi->phys + spi_get_chipselect(spi, 0) * 0x14;
1081 		cs->mode = 0;
1082 		cs->chconf0 = 0;
1083 		cs->chctrl0 = 0;
1084 		spi->controller_state = cs;
1085 		/* Link this to context save list */
1086 		list_add_tail(&cs->node, &ctx->cs);
1087 		initial_setup = true;
1088 	}
1089 
1090 	ret = pm_runtime_resume_and_get(mcspi->dev);
1091 	if (ret < 0) {
1092 		if (initial_setup)
1093 			omap2_mcspi_cleanup(spi);
1094 
1095 		return ret;
1096 	}
1097 
1098 	ret = omap2_mcspi_setup_transfer(spi, NULL);
1099 	if (ret && initial_setup)
1100 		omap2_mcspi_cleanup(spi);
1101 
1102 	pm_runtime_mark_last_busy(mcspi->dev);
1103 	pm_runtime_put_autosuspend(mcspi->dev);
1104 
1105 	return ret;
1106 }
1107 
1108 static irqreturn_t omap2_mcspi_irq_handler(int irq, void *data)
1109 {
1110 	struct omap2_mcspi *mcspi = data;
1111 	u32 irqstat;
1112 
1113 	irqstat	= mcspi_read_reg(mcspi->ctlr, OMAP2_MCSPI_IRQSTATUS);
1114 	if (!irqstat)
1115 		return IRQ_NONE;
1116 
1117 	/* Disable IRQ and wakeup target xfer task */
1118 	mcspi_write_reg(mcspi->ctlr, OMAP2_MCSPI_IRQENABLE, 0);
1119 	if (irqstat & OMAP2_MCSPI_IRQSTATUS_EOW)
1120 		complete(&mcspi->txdone);
1121 
1122 	return IRQ_HANDLED;
1123 }
1124 
1125 static int omap2_mcspi_target_abort(struct spi_controller *ctlr)
1126 {
1127 	struct omap2_mcspi *mcspi = spi_controller_get_devdata(ctlr);
1128 	struct omap2_mcspi_dma *mcspi_dma = mcspi->dma_channels;
1129 
1130 	mcspi->target_aborted = true;
1131 	complete(&mcspi_dma->dma_rx_completion);
1132 	complete(&mcspi_dma->dma_tx_completion);
1133 	complete(&mcspi->txdone);
1134 
1135 	return 0;
1136 }
1137 
1138 static int omap2_mcspi_transfer_one(struct spi_controller *ctlr,
1139 				    struct spi_device *spi,
1140 				    struct spi_transfer *t)
1141 {
1142 
1143 	/* We only enable one channel at a time -- the one whose message is
1144 	 * -- although this controller would gladly
1145 	 * arbitrate among multiple channels.  This corresponds to "single
1146 	 * channel" host mode.  As a side effect, we need to manage the
1147 	 * chipselect with the FORCE bit ... CS != channel enable.
1148 	 */
1149 
1150 	struct omap2_mcspi		*mcspi;
1151 	struct omap2_mcspi_dma		*mcspi_dma;
1152 	struct omap2_mcspi_cs		*cs;
1153 	struct omap2_mcspi_device_config *cd;
1154 	int				par_override = 0;
1155 	int				status = 0;
1156 	u32				chconf;
1157 
1158 	mcspi = spi_controller_get_devdata(ctlr);
1159 	mcspi_dma = mcspi->dma_channels + spi_get_chipselect(spi, 0);
1160 	cs = spi->controller_state;
1161 	cd = spi->controller_data;
1162 
1163 	/*
1164 	 * The target driver could have changed spi->mode in which case
1165 	 * it will be different from cs->mode (the current hardware setup).
1166 	 * If so, set par_override (even though its not a parity issue) so
1167 	 * omap2_mcspi_setup_transfer will be called to configure the hardware
1168 	 * with the correct mode on the first iteration of the loop below.
1169 	 */
1170 	if (spi->mode != cs->mode)
1171 		par_override = 1;
1172 
1173 	omap2_mcspi_set_enable(spi, 0);
1174 
1175 	if (spi_get_csgpiod(spi, 0))
1176 		omap2_mcspi_set_cs(spi, spi->mode & SPI_CS_HIGH);
1177 
1178 	if (par_override ||
1179 	    (t->speed_hz != spi->max_speed_hz) ||
1180 	    (t->bits_per_word != spi->bits_per_word)) {
1181 		par_override = 1;
1182 		status = omap2_mcspi_setup_transfer(spi, t);
1183 		if (status < 0)
1184 			goto out;
1185 		if (t->speed_hz == spi->max_speed_hz &&
1186 		    t->bits_per_word == spi->bits_per_word)
1187 			par_override = 0;
1188 	}
1189 
1190 	chconf = mcspi_cached_chconf0(spi);
1191 	chconf &= ~OMAP2_MCSPI_CHCONF_TRM_MASK;
1192 	chconf &= ~OMAP2_MCSPI_CHCONF_TURBO;
1193 
1194 	if (t->tx_buf == NULL)
1195 		chconf |= OMAP2_MCSPI_CHCONF_TRM_RX_ONLY;
1196 	else if (t->rx_buf == NULL)
1197 		chconf |= OMAP2_MCSPI_CHCONF_TRM_TX_ONLY;
1198 
1199 	if (cd && cd->turbo_mode && t->tx_buf == NULL) {
1200 		/* Turbo mode is for more than one word */
1201 		if (t->len > ((cs->word_len + 7) >> 3))
1202 			chconf |= OMAP2_MCSPI_CHCONF_TURBO;
1203 	}
1204 
1205 	mcspi_write_chconf0(spi, chconf);
1206 
1207 	if (t->len) {
1208 		unsigned	count;
1209 
1210 		if ((mcspi_dma->dma_rx && mcspi_dma->dma_tx) &&
1211 		    ctlr->cur_msg_mapped &&
1212 		    ctlr->can_dma(ctlr, spi, t))
1213 			omap2_mcspi_set_fifo(spi, t, 1);
1214 
1215 		omap2_mcspi_set_enable(spi, 1);
1216 
1217 		/* RX_ONLY mode needs dummy data in TX reg */
1218 		if (t->tx_buf == NULL)
1219 			writel_relaxed(0, cs->base
1220 					+ OMAP2_MCSPI_TX0);
1221 
1222 		if ((mcspi_dma->dma_rx && mcspi_dma->dma_tx) &&
1223 		    ctlr->cur_msg_mapped &&
1224 		    ctlr->can_dma(ctlr, spi, t))
1225 			count = omap2_mcspi_txrx_dma(spi, t);
1226 		else
1227 			count = omap2_mcspi_txrx_pio(spi, t);
1228 
1229 		if (count != t->len) {
1230 			status = -EIO;
1231 			goto out;
1232 		}
1233 	}
1234 
1235 	omap2_mcspi_set_enable(spi, 0);
1236 
1237 	if (mcspi->fifo_depth > 0)
1238 		omap2_mcspi_set_fifo(spi, t, 0);
1239 
1240 out:
1241 	/* Restore defaults if they were overriden */
1242 	if (par_override) {
1243 		par_override = 0;
1244 		status = omap2_mcspi_setup_transfer(spi, NULL);
1245 	}
1246 
1247 	omap2_mcspi_set_enable(spi, 0);
1248 
1249 	if (spi_get_csgpiod(spi, 0))
1250 		omap2_mcspi_set_cs(spi, !(spi->mode & SPI_CS_HIGH));
1251 
1252 	if (mcspi->fifo_depth > 0 && t)
1253 		omap2_mcspi_set_fifo(spi, t, 0);
1254 
1255 	return status;
1256 }
1257 
1258 static int omap2_mcspi_prepare_message(struct spi_controller *ctlr,
1259 				       struct spi_message *msg)
1260 {
1261 	struct omap2_mcspi	*mcspi = spi_controller_get_devdata(ctlr);
1262 	struct omap2_mcspi_regs	*ctx = &mcspi->ctx;
1263 	struct omap2_mcspi_cs	*cs;
1264 	struct spi_transfer	*tr;
1265 	u8 bits_per_word;
1266 
1267 	/*
1268 	 * The conditions are strict, it is mandatory to check each transfer of the list to see if
1269 	 * multi-mode is applicable.
1270 	 */
1271 	mcspi->use_multi_mode = true;
1272 	list_for_each_entry(tr, &msg->transfers, transfer_list) {
1273 		if (!tr->bits_per_word)
1274 			bits_per_word = msg->spi->bits_per_word;
1275 		else
1276 			bits_per_word = tr->bits_per_word;
1277 
1278 		/*
1279 		 * Check if this transfer contains only one word;
1280 		 * OR contains 1 to 4 words, with bits_per_word == 8 and no delay between each word
1281 		 * OR contains 1 to 2 words, with bits_per_word == 16 and no delay between each word
1282 		 *
1283 		 * If one of the two last case is true, this also change the bits_per_word of this
1284 		 * transfer to make it a bit faster.
1285 		 * It's not an issue to change the bits_per_word here even if the multi-mode is not
1286 		 * applicable for this message, the signal on the wire will be the same.
1287 		 */
1288 		if (bits_per_word < 8 && tr->len == 1) {
1289 			/* multi-mode is applicable, only one word (1..7 bits) */
1290 		} else if (tr->word_delay.value == 0 && bits_per_word == 8 && tr->len <= 4) {
1291 			/* multi-mode is applicable, only one "bigger" word (8,16,24,32 bits) */
1292 			tr->bits_per_word = tr->len * bits_per_word;
1293 		} else if (tr->word_delay.value == 0 && bits_per_word == 16 && tr->len <= 2) {
1294 			/* multi-mode is applicable, only one "bigger" word (16,32 bits) */
1295 			tr->bits_per_word = tr->len * bits_per_word / 2;
1296 		} else if (bits_per_word >= 8 && tr->len == bits_per_word / 8) {
1297 			/* multi-mode is applicable, only one word (9..15,17..32 bits) */
1298 		} else {
1299 			/* multi-mode is not applicable: more than one word in the transfer */
1300 			mcspi->use_multi_mode = false;
1301 		}
1302 
1303 		/* Check if transfer asks to change the CS status after the transfer */
1304 		if (!tr->cs_change)
1305 			mcspi->use_multi_mode = false;
1306 
1307 		/*
1308 		 * If at least one message is not compatible, switch back to single mode
1309 		 *
1310 		 * The bits_per_word of certain transfer can be different, but it will have no
1311 		 * impact on the signal itself.
1312 		 */
1313 		if (!mcspi->use_multi_mode)
1314 			break;
1315 	}
1316 
1317 	omap2_mcspi_set_mode(ctlr);
1318 
1319 	/* In single mode only a single channel can have the FORCE bit enabled
1320 	 * in its chconf0 register.
1321 	 * Scan all channels and disable them except the current one.
1322 	 * A FORCE can remain from a last transfer having cs_change enabled
1323 	 *
1324 	 * In multi mode all FORCE bits must be disabled.
1325 	 */
1326 	list_for_each_entry(cs, &ctx->cs, node) {
1327 		if (msg->spi->controller_state == cs && !mcspi->use_multi_mode) {
1328 			continue;
1329 		}
1330 
1331 		if ((cs->chconf0 & OMAP2_MCSPI_CHCONF_FORCE)) {
1332 			cs->chconf0 &= ~OMAP2_MCSPI_CHCONF_FORCE;
1333 			writel_relaxed(cs->chconf0,
1334 					cs->base + OMAP2_MCSPI_CHCONF0);
1335 			readl_relaxed(cs->base + OMAP2_MCSPI_CHCONF0);
1336 		}
1337 	}
1338 
1339 	return 0;
1340 }
1341 
1342 static bool omap2_mcspi_can_dma(struct spi_controller *ctlr,
1343 				struct spi_device *spi,
1344 				struct spi_transfer *xfer)
1345 {
1346 	struct omap2_mcspi *mcspi = spi_controller_get_devdata(spi->controller);
1347 	struct omap2_mcspi_dma *mcspi_dma =
1348 		&mcspi->dma_channels[spi_get_chipselect(spi, 0)];
1349 
1350 	if (!mcspi_dma->dma_rx || !mcspi_dma->dma_tx)
1351 		return false;
1352 
1353 	if (spi_controller_is_target(ctlr))
1354 		return true;
1355 
1356 	ctlr->dma_rx = mcspi_dma->dma_rx;
1357 	ctlr->dma_tx = mcspi_dma->dma_tx;
1358 
1359 	return (xfer->len >= DMA_MIN_BYTES);
1360 }
1361 
1362 static size_t omap2_mcspi_max_xfer_size(struct spi_device *spi)
1363 {
1364 	struct omap2_mcspi *mcspi = spi_controller_get_devdata(spi->controller);
1365 	struct omap2_mcspi_dma *mcspi_dma =
1366 		&mcspi->dma_channels[spi_get_chipselect(spi, 0)];
1367 
1368 	if (mcspi->max_xfer_len && mcspi_dma->dma_rx)
1369 		return mcspi->max_xfer_len;
1370 
1371 	return SIZE_MAX;
1372 }
1373 
1374 static int omap2_mcspi_controller_setup(struct omap2_mcspi *mcspi)
1375 {
1376 	struct spi_controller	*ctlr = mcspi->ctlr;
1377 	struct omap2_mcspi_regs	*ctx = &mcspi->ctx;
1378 	int			ret = 0;
1379 
1380 	ret = pm_runtime_resume_and_get(mcspi->dev);
1381 	if (ret < 0)
1382 		return ret;
1383 
1384 	mcspi_write_reg(ctlr, OMAP2_MCSPI_WAKEUPENABLE,
1385 			OMAP2_MCSPI_WAKEUPENABLE_WKEN);
1386 	ctx->wakeupenable = OMAP2_MCSPI_WAKEUPENABLE_WKEN;
1387 
1388 	omap2_mcspi_set_mode(ctlr);
1389 	pm_runtime_mark_last_busy(mcspi->dev);
1390 	pm_runtime_put_autosuspend(mcspi->dev);
1391 	return 0;
1392 }
1393 
1394 static int omap_mcspi_runtime_suspend(struct device *dev)
1395 {
1396 	int error;
1397 
1398 	error = pinctrl_pm_select_idle_state(dev);
1399 	if (error)
1400 		dev_warn(dev, "%s: failed to set pins: %i\n", __func__, error);
1401 
1402 	return 0;
1403 }
1404 
1405 /*
1406  * When SPI wake up from off-mode, CS is in activate state. If it was in
1407  * inactive state when driver was suspend, then force it to inactive state at
1408  * wake up.
1409  */
1410 static int omap_mcspi_runtime_resume(struct device *dev)
1411 {
1412 	struct spi_controller *ctlr = dev_get_drvdata(dev);
1413 	struct omap2_mcspi *mcspi = spi_controller_get_devdata(ctlr);
1414 	struct omap2_mcspi_regs *ctx = &mcspi->ctx;
1415 	struct omap2_mcspi_cs *cs;
1416 	int error;
1417 
1418 	error = pinctrl_pm_select_default_state(dev);
1419 	if (error)
1420 		dev_warn(dev, "%s: failed to set pins: %i\n", __func__, error);
1421 
1422 	/* McSPI: context restore */
1423 	mcspi_write_reg(ctlr, OMAP2_MCSPI_MODULCTRL, ctx->modulctrl);
1424 	mcspi_write_reg(ctlr, OMAP2_MCSPI_WAKEUPENABLE, ctx->wakeupenable);
1425 
1426 	list_for_each_entry(cs, &ctx->cs, node) {
1427 		/*
1428 		 * We need to toggle CS state for OMAP take this
1429 		 * change in account.
1430 		 */
1431 		if ((cs->chconf0 & OMAP2_MCSPI_CHCONF_FORCE) == 0) {
1432 			cs->chconf0 |= OMAP2_MCSPI_CHCONF_FORCE;
1433 			writel_relaxed(cs->chconf0,
1434 				       cs->base + OMAP2_MCSPI_CHCONF0);
1435 			cs->chconf0 &= ~OMAP2_MCSPI_CHCONF_FORCE;
1436 			writel_relaxed(cs->chconf0,
1437 				       cs->base + OMAP2_MCSPI_CHCONF0);
1438 		} else {
1439 			writel_relaxed(cs->chconf0,
1440 				       cs->base + OMAP2_MCSPI_CHCONF0);
1441 		}
1442 	}
1443 
1444 	return 0;
1445 }
1446 
1447 static struct omap2_mcspi_platform_config omap2_pdata = {
1448 	.regs_offset = 0,
1449 };
1450 
1451 static struct omap2_mcspi_platform_config omap4_pdata = {
1452 	.regs_offset = OMAP4_MCSPI_REG_OFFSET,
1453 };
1454 
1455 static struct omap2_mcspi_platform_config am654_pdata = {
1456 	.regs_offset = OMAP4_MCSPI_REG_OFFSET,
1457 	.max_xfer_len = SZ_4K - 1,
1458 };
1459 
1460 static const struct of_device_id omap_mcspi_of_match[] = {
1461 	{
1462 		.compatible = "ti,omap2-mcspi",
1463 		.data = &omap2_pdata,
1464 	},
1465 	{
1466 		.compatible = "ti,omap4-mcspi",
1467 		.data = &omap4_pdata,
1468 	},
1469 	{
1470 		.compatible = "ti,am654-mcspi",
1471 		.data = &am654_pdata,
1472 	},
1473 	{ },
1474 };
1475 MODULE_DEVICE_TABLE(of, omap_mcspi_of_match);
1476 
1477 static int omap2_mcspi_probe(struct platform_device *pdev)
1478 {
1479 	struct spi_controller	*ctlr;
1480 	const struct omap2_mcspi_platform_config *pdata;
1481 	struct omap2_mcspi	*mcspi;
1482 	struct resource		*r;
1483 	int			status = 0, i;
1484 	u32			regs_offset = 0;
1485 	struct device_node	*node = pdev->dev.of_node;
1486 	const struct of_device_id *match;
1487 
1488 	if (of_property_read_bool(node, "spi-slave"))
1489 		ctlr = spi_alloc_target(&pdev->dev, sizeof(*mcspi));
1490 	else
1491 		ctlr = spi_alloc_host(&pdev->dev, sizeof(*mcspi));
1492 	if (!ctlr)
1493 		return -ENOMEM;
1494 
1495 	/* the spi->mode bits understood by this driver: */
1496 	ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
1497 	ctlr->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
1498 	ctlr->setup = omap2_mcspi_setup;
1499 	ctlr->auto_runtime_pm = true;
1500 	ctlr->prepare_message = omap2_mcspi_prepare_message;
1501 	ctlr->can_dma = omap2_mcspi_can_dma;
1502 	ctlr->transfer_one = omap2_mcspi_transfer_one;
1503 	ctlr->set_cs = omap2_mcspi_set_cs;
1504 	ctlr->cleanup = omap2_mcspi_cleanup;
1505 	ctlr->target_abort = omap2_mcspi_target_abort;
1506 	ctlr->dev.of_node = node;
1507 	ctlr->use_gpio_descriptors = true;
1508 
1509 	platform_set_drvdata(pdev, ctlr);
1510 
1511 	mcspi = spi_controller_get_devdata(ctlr);
1512 	mcspi->ctlr = ctlr;
1513 
1514 	match = of_match_device(omap_mcspi_of_match, &pdev->dev);
1515 	if (match) {
1516 		u32 num_cs = 1; /* default number of chipselect */
1517 		pdata = match->data;
1518 
1519 		of_property_read_u32(node, "ti,spi-num-cs", &num_cs);
1520 		ctlr->num_chipselect = num_cs;
1521 		if (of_property_read_bool(node, "ti,pindir-d0-out-d1-in"))
1522 			mcspi->pin_dir = MCSPI_PINDIR_D0_OUT_D1_IN;
1523 	} else {
1524 		pdata = dev_get_platdata(&pdev->dev);
1525 		ctlr->num_chipselect = pdata->num_cs;
1526 		mcspi->pin_dir = pdata->pin_dir;
1527 	}
1528 	regs_offset = pdata->regs_offset;
1529 	if (pdata->max_xfer_len) {
1530 		mcspi->max_xfer_len = pdata->max_xfer_len;
1531 		ctlr->max_transfer_size = omap2_mcspi_max_xfer_size;
1532 	}
1533 
1534 	mcspi->base = devm_platform_get_and_ioremap_resource(pdev, 0, &r);
1535 	if (IS_ERR(mcspi->base)) {
1536 		status = PTR_ERR(mcspi->base);
1537 		goto free_ctlr;
1538 	}
1539 	mcspi->phys = r->start + regs_offset;
1540 	mcspi->base += regs_offset;
1541 
1542 	mcspi->dev = &pdev->dev;
1543 
1544 	INIT_LIST_HEAD(&mcspi->ctx.cs);
1545 
1546 	mcspi->dma_channels = devm_kcalloc(&pdev->dev, ctlr->num_chipselect,
1547 					   sizeof(struct omap2_mcspi_dma),
1548 					   GFP_KERNEL);
1549 	if (mcspi->dma_channels == NULL) {
1550 		status = -ENOMEM;
1551 		goto free_ctlr;
1552 	}
1553 
1554 	for (i = 0; i < ctlr->num_chipselect; i++) {
1555 		sprintf(mcspi->dma_channels[i].dma_rx_ch_name, "rx%d", i);
1556 		sprintf(mcspi->dma_channels[i].dma_tx_ch_name, "tx%d", i);
1557 
1558 		status = omap2_mcspi_request_dma(mcspi,
1559 						 &mcspi->dma_channels[i]);
1560 		if (status == -EPROBE_DEFER)
1561 			goto free_ctlr;
1562 	}
1563 
1564 	status = platform_get_irq(pdev, 0);
1565 	if (status < 0)
1566 		goto free_ctlr;
1567 	init_completion(&mcspi->txdone);
1568 	status = devm_request_irq(&pdev->dev, status,
1569 				  omap2_mcspi_irq_handler, 0, pdev->name,
1570 				  mcspi);
1571 	if (status) {
1572 		dev_err(&pdev->dev, "Cannot request IRQ");
1573 		goto free_ctlr;
1574 	}
1575 
1576 	mcspi->ref_clk = devm_clk_get_optional_enabled(&pdev->dev, NULL);
1577 	if (mcspi->ref_clk)
1578 		mcspi->ref_clk_hz = clk_get_rate(mcspi->ref_clk);
1579 	else
1580 		mcspi->ref_clk_hz = OMAP2_MCSPI_MAX_FREQ;
1581 	ctlr->max_speed_hz = mcspi->ref_clk_hz;
1582 	ctlr->min_speed_hz = mcspi->ref_clk_hz >> 15;
1583 
1584 	pm_runtime_use_autosuspend(&pdev->dev);
1585 	pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT);
1586 	pm_runtime_enable(&pdev->dev);
1587 
1588 	status = omap2_mcspi_controller_setup(mcspi);
1589 	if (status < 0)
1590 		goto disable_pm;
1591 
1592 	status = devm_spi_register_controller(&pdev->dev, ctlr);
1593 	if (status < 0)
1594 		goto disable_pm;
1595 
1596 	return status;
1597 
1598 disable_pm:
1599 	pm_runtime_dont_use_autosuspend(&pdev->dev);
1600 	pm_runtime_put_sync(&pdev->dev);
1601 	pm_runtime_disable(&pdev->dev);
1602 free_ctlr:
1603 	omap2_mcspi_release_dma(ctlr);
1604 	spi_controller_put(ctlr);
1605 	return status;
1606 }
1607 
1608 static void omap2_mcspi_remove(struct platform_device *pdev)
1609 {
1610 	struct spi_controller *ctlr = platform_get_drvdata(pdev);
1611 	struct omap2_mcspi *mcspi = spi_controller_get_devdata(ctlr);
1612 
1613 	omap2_mcspi_release_dma(ctlr);
1614 
1615 	pm_runtime_dont_use_autosuspend(mcspi->dev);
1616 	pm_runtime_put_sync(mcspi->dev);
1617 	pm_runtime_disable(&pdev->dev);
1618 }
1619 
1620 /* work with hotplug and coldplug */
1621 MODULE_ALIAS("platform:omap2_mcspi");
1622 
1623 static int __maybe_unused omap2_mcspi_suspend(struct device *dev)
1624 {
1625 	struct spi_controller *ctlr = dev_get_drvdata(dev);
1626 	struct omap2_mcspi *mcspi = spi_controller_get_devdata(ctlr);
1627 	int error;
1628 
1629 	error = pinctrl_pm_select_sleep_state(dev);
1630 	if (error)
1631 		dev_warn(mcspi->dev, "%s: failed to set pins: %i\n",
1632 			 __func__, error);
1633 
1634 	error = spi_controller_suspend(ctlr);
1635 	if (error)
1636 		dev_warn(mcspi->dev, "%s: controller suspend failed: %i\n",
1637 			 __func__, error);
1638 
1639 	return pm_runtime_force_suspend(dev);
1640 }
1641 
1642 static int __maybe_unused omap2_mcspi_resume(struct device *dev)
1643 {
1644 	struct spi_controller *ctlr = dev_get_drvdata(dev);
1645 	struct omap2_mcspi *mcspi = spi_controller_get_devdata(ctlr);
1646 	int error;
1647 
1648 	error = spi_controller_resume(ctlr);
1649 	if (error)
1650 		dev_warn(mcspi->dev, "%s: controller resume failed: %i\n",
1651 			 __func__, error);
1652 
1653 	return pm_runtime_force_resume(dev);
1654 }
1655 
1656 static const struct dev_pm_ops omap2_mcspi_pm_ops = {
1657 	SET_SYSTEM_SLEEP_PM_OPS(omap2_mcspi_suspend,
1658 				omap2_mcspi_resume)
1659 	.runtime_suspend	= omap_mcspi_runtime_suspend,
1660 	.runtime_resume		= omap_mcspi_runtime_resume,
1661 };
1662 
1663 static struct platform_driver omap2_mcspi_driver = {
1664 	.driver = {
1665 		.name =		"omap2_mcspi",
1666 		.pm =		&omap2_mcspi_pm_ops,
1667 		.of_match_table = omap_mcspi_of_match,
1668 	},
1669 	.probe =	omap2_mcspi_probe,
1670 	.remove_new =	omap2_mcspi_remove,
1671 };
1672 
1673 module_platform_driver(omap2_mcspi_driver);
1674 MODULE_LICENSE("GPL");
1675