xref: /linux/drivers/spi/spi-omap2-mcspi.c (revision 8e07e0e3964ca4e23ce7b68e2096fe660a888942)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * OMAP2 McSPI controller driver
4  *
5  * Copyright (C) 2005, 2006 Nokia Corporation
6  * Author:	Samuel Ortiz <samuel.ortiz@nokia.com> and
7  *		Juha Yrjola <juha.yrjola@nokia.com>
8  */
9 
10 #include <linux/kernel.h>
11 #include <linux/interrupt.h>
12 #include <linux/module.h>
13 #include <linux/device.h>
14 #include <linux/delay.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/dmaengine.h>
17 #include <linux/pinctrl/consumer.h>
18 #include <linux/platform_device.h>
19 #include <linux/err.h>
20 #include <linux/clk.h>
21 #include <linux/io.h>
22 #include <linux/slab.h>
23 #include <linux/pm_runtime.h>
24 #include <linux/of.h>
25 #include <linux/of_device.h>
26 #include <linux/gcd.h>
27 
28 #include <linux/spi/spi.h>
29 
30 #include <linux/platform_data/spi-omap2-mcspi.h>
31 
32 #define OMAP2_MCSPI_MAX_FREQ		48000000
33 #define OMAP2_MCSPI_MAX_DIVIDER		4096
34 #define OMAP2_MCSPI_MAX_FIFODEPTH	64
35 #define OMAP2_MCSPI_MAX_FIFOWCNT	0xFFFF
36 #define SPI_AUTOSUSPEND_TIMEOUT		2000
37 
38 #define OMAP2_MCSPI_REVISION		0x00
39 #define OMAP2_MCSPI_SYSSTATUS		0x14
40 #define OMAP2_MCSPI_IRQSTATUS		0x18
41 #define OMAP2_MCSPI_IRQENABLE		0x1c
42 #define OMAP2_MCSPI_WAKEUPENABLE	0x20
43 #define OMAP2_MCSPI_SYST		0x24
44 #define OMAP2_MCSPI_MODULCTRL		0x28
45 #define OMAP2_MCSPI_XFERLEVEL		0x7c
46 
47 /* per-channel banks, 0x14 bytes each, first is: */
48 #define OMAP2_MCSPI_CHCONF0		0x2c
49 #define OMAP2_MCSPI_CHSTAT0		0x30
50 #define OMAP2_MCSPI_CHCTRL0		0x34
51 #define OMAP2_MCSPI_TX0			0x38
52 #define OMAP2_MCSPI_RX0			0x3c
53 
54 /* per-register bitmasks: */
55 #define OMAP2_MCSPI_IRQSTATUS_EOW	BIT(17)
56 #define OMAP2_MCSPI_IRQSTATUS_TX0_EMPTY    BIT(0)
57 #define OMAP2_MCSPI_IRQSTATUS_RX0_FULL    BIT(2)
58 
59 #define OMAP2_MCSPI_MODULCTRL_SINGLE	BIT(0)
60 #define OMAP2_MCSPI_MODULCTRL_MS	BIT(2)
61 #define OMAP2_MCSPI_MODULCTRL_STEST	BIT(3)
62 
63 #define OMAP2_MCSPI_CHCONF_PHA		BIT(0)
64 #define OMAP2_MCSPI_CHCONF_POL		BIT(1)
65 #define OMAP2_MCSPI_CHCONF_CLKD_MASK	(0x0f << 2)
66 #define OMAP2_MCSPI_CHCONF_EPOL		BIT(6)
67 #define OMAP2_MCSPI_CHCONF_WL_MASK	(0x1f << 7)
68 #define OMAP2_MCSPI_CHCONF_TRM_RX_ONLY	BIT(12)
69 #define OMAP2_MCSPI_CHCONF_TRM_TX_ONLY	BIT(13)
70 #define OMAP2_MCSPI_CHCONF_TRM_MASK	(0x03 << 12)
71 #define OMAP2_MCSPI_CHCONF_DMAW		BIT(14)
72 #define OMAP2_MCSPI_CHCONF_DMAR		BIT(15)
73 #define OMAP2_MCSPI_CHCONF_DPE0		BIT(16)
74 #define OMAP2_MCSPI_CHCONF_DPE1		BIT(17)
75 #define OMAP2_MCSPI_CHCONF_IS		BIT(18)
76 #define OMAP2_MCSPI_CHCONF_TURBO	BIT(19)
77 #define OMAP2_MCSPI_CHCONF_FORCE	BIT(20)
78 #define OMAP2_MCSPI_CHCONF_FFET		BIT(27)
79 #define OMAP2_MCSPI_CHCONF_FFER		BIT(28)
80 #define OMAP2_MCSPI_CHCONF_CLKG		BIT(29)
81 
82 #define OMAP2_MCSPI_CHSTAT_RXS		BIT(0)
83 #define OMAP2_MCSPI_CHSTAT_TXS		BIT(1)
84 #define OMAP2_MCSPI_CHSTAT_EOT		BIT(2)
85 #define OMAP2_MCSPI_CHSTAT_TXFFE	BIT(3)
86 
87 #define OMAP2_MCSPI_CHCTRL_EN		BIT(0)
88 #define OMAP2_MCSPI_CHCTRL_EXTCLK_MASK	(0xff << 8)
89 
90 #define OMAP2_MCSPI_WAKEUPENABLE_WKEN	BIT(0)
91 
92 /* We have 2 DMA channels per CS, one for RX and one for TX */
93 struct omap2_mcspi_dma {
94 	struct dma_chan *dma_tx;
95 	struct dma_chan *dma_rx;
96 
97 	struct completion dma_tx_completion;
98 	struct completion dma_rx_completion;
99 
100 	char dma_rx_ch_name[14];
101 	char dma_tx_ch_name[14];
102 };
103 
104 /* use PIO for small transfers, avoiding DMA setup/teardown overhead and
105  * cache operations; better heuristics consider wordsize and bitrate.
106  */
107 #define DMA_MIN_BYTES			160
108 
109 
110 /*
111  * Used for context save and restore, structure members to be updated whenever
112  * corresponding registers are modified.
113  */
114 struct omap2_mcspi_regs {
115 	u32 modulctrl;
116 	u32 wakeupenable;
117 	struct list_head cs;
118 };
119 
120 struct omap2_mcspi {
121 	struct completion	txdone;
122 	struct spi_controller	*ctlr;
123 	/* Virtual base address of the controller */
124 	void __iomem		*base;
125 	unsigned long		phys;
126 	/* SPI1 has 4 channels, while SPI2 has 2 */
127 	struct omap2_mcspi_dma	*dma_channels;
128 	struct device		*dev;
129 	struct omap2_mcspi_regs ctx;
130 	struct clk		*ref_clk;
131 	int			fifo_depth;
132 	bool			target_aborted;
133 	unsigned int		pin_dir:1;
134 	size_t			max_xfer_len;
135 	u32			ref_clk_hz;
136 };
137 
138 struct omap2_mcspi_cs {
139 	void __iomem		*base;
140 	unsigned long		phys;
141 	int			word_len;
142 	u16			mode;
143 	struct list_head	node;
144 	/* Context save and restore shadow register */
145 	u32			chconf0, chctrl0;
146 };
147 
148 static inline void mcspi_write_reg(struct spi_controller *ctlr,
149 		int idx, u32 val)
150 {
151 	struct omap2_mcspi *mcspi = spi_controller_get_devdata(ctlr);
152 
153 	writel_relaxed(val, mcspi->base + idx);
154 }
155 
156 static inline u32 mcspi_read_reg(struct spi_controller *ctlr, int idx)
157 {
158 	struct omap2_mcspi *mcspi = spi_controller_get_devdata(ctlr);
159 
160 	return readl_relaxed(mcspi->base + idx);
161 }
162 
163 static inline void mcspi_write_cs_reg(const struct spi_device *spi,
164 		int idx, u32 val)
165 {
166 	struct omap2_mcspi_cs	*cs = spi->controller_state;
167 
168 	writel_relaxed(val, cs->base +  idx);
169 }
170 
171 static inline u32 mcspi_read_cs_reg(const struct spi_device *spi, int idx)
172 {
173 	struct omap2_mcspi_cs	*cs = spi->controller_state;
174 
175 	return readl_relaxed(cs->base + idx);
176 }
177 
178 static inline u32 mcspi_cached_chconf0(const struct spi_device *spi)
179 {
180 	struct omap2_mcspi_cs *cs = spi->controller_state;
181 
182 	return cs->chconf0;
183 }
184 
185 static inline void mcspi_write_chconf0(const struct spi_device *spi, u32 val)
186 {
187 	struct omap2_mcspi_cs *cs = spi->controller_state;
188 
189 	cs->chconf0 = val;
190 	mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCONF0, val);
191 	mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCONF0);
192 }
193 
194 static inline int mcspi_bytes_per_word(int word_len)
195 {
196 	if (word_len <= 8)
197 		return 1;
198 	else if (word_len <= 16)
199 		return 2;
200 	else /* word_len <= 32 */
201 		return 4;
202 }
203 
204 static void omap2_mcspi_set_dma_req(const struct spi_device *spi,
205 		int is_read, int enable)
206 {
207 	u32 l, rw;
208 
209 	l = mcspi_cached_chconf0(spi);
210 
211 	if (is_read) /* 1 is read, 0 write */
212 		rw = OMAP2_MCSPI_CHCONF_DMAR;
213 	else
214 		rw = OMAP2_MCSPI_CHCONF_DMAW;
215 
216 	if (enable)
217 		l |= rw;
218 	else
219 		l &= ~rw;
220 
221 	mcspi_write_chconf0(spi, l);
222 }
223 
224 static void omap2_mcspi_set_enable(const struct spi_device *spi, int enable)
225 {
226 	struct omap2_mcspi_cs *cs = spi->controller_state;
227 	u32 l;
228 
229 	l = cs->chctrl0;
230 	if (enable)
231 		l |= OMAP2_MCSPI_CHCTRL_EN;
232 	else
233 		l &= ~OMAP2_MCSPI_CHCTRL_EN;
234 	cs->chctrl0 = l;
235 	mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCTRL0, cs->chctrl0);
236 	/* Flash post-writes */
237 	mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCTRL0);
238 }
239 
240 static void omap2_mcspi_set_cs(struct spi_device *spi, bool enable)
241 {
242 	struct omap2_mcspi *mcspi = spi_controller_get_devdata(spi->controller);
243 	u32 l;
244 
245 	/* The controller handles the inverted chip selects
246 	 * using the OMAP2_MCSPI_CHCONF_EPOL bit so revert
247 	 * the inversion from the core spi_set_cs function.
248 	 */
249 	if (spi->mode & SPI_CS_HIGH)
250 		enable = !enable;
251 
252 	if (spi->controller_state) {
253 		int err = pm_runtime_resume_and_get(mcspi->dev);
254 		if (err < 0) {
255 			dev_err(mcspi->dev, "failed to get sync: %d\n", err);
256 			return;
257 		}
258 
259 		l = mcspi_cached_chconf0(spi);
260 
261 		if (enable)
262 			l &= ~OMAP2_MCSPI_CHCONF_FORCE;
263 		else
264 			l |= OMAP2_MCSPI_CHCONF_FORCE;
265 
266 		mcspi_write_chconf0(spi, l);
267 
268 		pm_runtime_mark_last_busy(mcspi->dev);
269 		pm_runtime_put_autosuspend(mcspi->dev);
270 	}
271 }
272 
273 static void omap2_mcspi_set_mode(struct spi_controller *ctlr)
274 {
275 	struct omap2_mcspi	*mcspi = spi_controller_get_devdata(ctlr);
276 	struct omap2_mcspi_regs	*ctx = &mcspi->ctx;
277 	u32 l;
278 
279 	/*
280 	 * Choose host or target mode
281 	 */
282 	l = mcspi_read_reg(ctlr, OMAP2_MCSPI_MODULCTRL);
283 	l &= ~(OMAP2_MCSPI_MODULCTRL_STEST);
284 	if (spi_controller_is_target(ctlr)) {
285 		l |= (OMAP2_MCSPI_MODULCTRL_MS);
286 	} else {
287 		l &= ~(OMAP2_MCSPI_MODULCTRL_MS);
288 		l |= OMAP2_MCSPI_MODULCTRL_SINGLE;
289 	}
290 	mcspi_write_reg(ctlr, OMAP2_MCSPI_MODULCTRL, l);
291 
292 	ctx->modulctrl = l;
293 }
294 
295 static void omap2_mcspi_set_fifo(const struct spi_device *spi,
296 				struct spi_transfer *t, int enable, int dma_enabled)
297 {
298 	struct spi_controller *ctlr = spi->controller;
299 	struct omap2_mcspi_cs *cs = spi->controller_state;
300 	struct omap2_mcspi *mcspi;
301 	unsigned int wcnt;
302 	int max_fifo_depth, bytes_per_word;
303 	u32 chconf, xferlevel;
304 
305 	mcspi = spi_controller_get_devdata(ctlr);
306 
307 	chconf = mcspi_cached_chconf0(spi);
308 	if (enable) {
309 		bytes_per_word = mcspi_bytes_per_word(cs->word_len);
310 		if (t->len % bytes_per_word != 0)
311 			goto disable_fifo;
312 
313 		if (t->rx_buf != NULL && t->tx_buf != NULL)
314 			max_fifo_depth = OMAP2_MCSPI_MAX_FIFODEPTH / 2;
315 		else
316 			max_fifo_depth = OMAP2_MCSPI_MAX_FIFODEPTH;
317 		if (dma_enabled)
318 			wcnt = t->len / bytes_per_word;
319 		else
320 			wcnt = 0;
321 		if (wcnt > OMAP2_MCSPI_MAX_FIFOWCNT)
322 			goto disable_fifo;
323 
324 		xferlevel = wcnt << 16;
325 		if (t->rx_buf != NULL) {
326 			chconf |= OMAP2_MCSPI_CHCONF_FFER;
327 			if (dma_enabled)
328 				xferlevel |= (bytes_per_word - 1) << 8;
329 			else
330 				xferlevel |= (max_fifo_depth - 1) << 8;
331 		}
332 
333 		if (t->tx_buf != NULL) {
334 			chconf |= OMAP2_MCSPI_CHCONF_FFET;
335 			if (dma_enabled)
336 				xferlevel |= bytes_per_word - 1;
337 			else
338 				xferlevel |= (max_fifo_depth - 1);
339 		}
340 
341 		mcspi_write_reg(ctlr, OMAP2_MCSPI_XFERLEVEL, xferlevel);
342 		mcspi_write_chconf0(spi, chconf);
343 		mcspi->fifo_depth = max_fifo_depth;
344 
345 		return;
346 	}
347 
348 disable_fifo:
349 	if (t->rx_buf != NULL)
350 		chconf &= ~OMAP2_MCSPI_CHCONF_FFER;
351 
352 	if (t->tx_buf != NULL)
353 		chconf &= ~OMAP2_MCSPI_CHCONF_FFET;
354 
355 	mcspi_write_chconf0(spi, chconf);
356 	mcspi->fifo_depth = 0;
357 }
358 
359 static int mcspi_wait_for_reg_bit(void __iomem *reg, unsigned long bit)
360 {
361 	unsigned long timeout;
362 
363 	timeout = jiffies + msecs_to_jiffies(1000);
364 	while (!(readl_relaxed(reg) & bit)) {
365 		if (time_after(jiffies, timeout)) {
366 			if (!(readl_relaxed(reg) & bit))
367 				return -ETIMEDOUT;
368 			else
369 				return 0;
370 		}
371 		cpu_relax();
372 	}
373 	return 0;
374 }
375 
376 static int mcspi_wait_for_completion(struct  omap2_mcspi *mcspi,
377 				     struct completion *x)
378 {
379 	if (spi_controller_is_target(mcspi->ctlr)) {
380 		if (wait_for_completion_interruptible(x) ||
381 		    mcspi->target_aborted)
382 			return -EINTR;
383 	} else {
384 		wait_for_completion(x);
385 	}
386 
387 	return 0;
388 }
389 
390 static void omap2_mcspi_rx_callback(void *data)
391 {
392 	struct spi_device *spi = data;
393 	struct omap2_mcspi *mcspi = spi_controller_get_devdata(spi->controller);
394 	struct omap2_mcspi_dma *mcspi_dma = &mcspi->dma_channels[spi_get_chipselect(spi, 0)];
395 
396 	/* We must disable the DMA RX request */
397 	omap2_mcspi_set_dma_req(spi, 1, 0);
398 
399 	complete(&mcspi_dma->dma_rx_completion);
400 }
401 
402 static void omap2_mcspi_tx_callback(void *data)
403 {
404 	struct spi_device *spi = data;
405 	struct omap2_mcspi *mcspi = spi_controller_get_devdata(spi->controller);
406 	struct omap2_mcspi_dma *mcspi_dma = &mcspi->dma_channels[spi_get_chipselect(spi, 0)];
407 
408 	/* We must disable the DMA TX request */
409 	omap2_mcspi_set_dma_req(spi, 0, 0);
410 
411 	complete(&mcspi_dma->dma_tx_completion);
412 }
413 
414 static void omap2_mcspi_tx_dma(struct spi_device *spi,
415 				struct spi_transfer *xfer,
416 				struct dma_slave_config cfg)
417 {
418 	struct omap2_mcspi	*mcspi;
419 	struct omap2_mcspi_dma  *mcspi_dma;
420 	struct dma_async_tx_descriptor *tx;
421 
422 	mcspi = spi_controller_get_devdata(spi->controller);
423 	mcspi_dma = &mcspi->dma_channels[spi_get_chipselect(spi, 0)];
424 
425 	dmaengine_slave_config(mcspi_dma->dma_tx, &cfg);
426 
427 	tx = dmaengine_prep_slave_sg(mcspi_dma->dma_tx, xfer->tx_sg.sgl,
428 				     xfer->tx_sg.nents,
429 				     DMA_MEM_TO_DEV,
430 				     DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
431 	if (tx) {
432 		tx->callback = omap2_mcspi_tx_callback;
433 		tx->callback_param = spi;
434 		dmaengine_submit(tx);
435 	} else {
436 		/* FIXME: fall back to PIO? */
437 	}
438 	dma_async_issue_pending(mcspi_dma->dma_tx);
439 	omap2_mcspi_set_dma_req(spi, 0, 1);
440 }
441 
442 static unsigned
443 omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer,
444 				struct dma_slave_config cfg,
445 				unsigned es)
446 {
447 	struct omap2_mcspi	*mcspi;
448 	struct omap2_mcspi_dma  *mcspi_dma;
449 	unsigned int		count, transfer_reduction = 0;
450 	struct scatterlist	*sg_out[2];
451 	int			nb_sizes = 0, out_mapped_nents[2], ret, x;
452 	size_t			sizes[2];
453 	u32			l;
454 	int			elements = 0;
455 	int			word_len, element_count;
456 	struct omap2_mcspi_cs	*cs = spi->controller_state;
457 	void __iomem		*chstat_reg = cs->base + OMAP2_MCSPI_CHSTAT0;
458 	struct dma_async_tx_descriptor *tx;
459 
460 	mcspi = spi_controller_get_devdata(spi->controller);
461 	mcspi_dma = &mcspi->dma_channels[spi_get_chipselect(spi, 0)];
462 	count = xfer->len;
463 
464 	/*
465 	 *  In the "End-of-Transfer Procedure" section for DMA RX in OMAP35x TRM
466 	 *  it mentions reducing DMA transfer length by one element in host
467 	 *  normal mode.
468 	 */
469 	if (mcspi->fifo_depth == 0)
470 		transfer_reduction = es;
471 
472 	word_len = cs->word_len;
473 	l = mcspi_cached_chconf0(spi);
474 
475 	if (word_len <= 8)
476 		element_count = count;
477 	else if (word_len <= 16)
478 		element_count = count >> 1;
479 	else /* word_len <= 32 */
480 		element_count = count >> 2;
481 
482 
483 	dmaengine_slave_config(mcspi_dma->dma_rx, &cfg);
484 
485 	/*
486 	 *  Reduce DMA transfer length by one more if McSPI is
487 	 *  configured in turbo mode.
488 	 */
489 	if ((l & OMAP2_MCSPI_CHCONF_TURBO) && mcspi->fifo_depth == 0)
490 		transfer_reduction += es;
491 
492 	if (transfer_reduction) {
493 		/* Split sgl into two. The second sgl won't be used. */
494 		sizes[0] = count - transfer_reduction;
495 		sizes[1] = transfer_reduction;
496 		nb_sizes = 2;
497 	} else {
498 		/*
499 		 * Don't bother splitting the sgl. This essentially
500 		 * clones the original sgl.
501 		 */
502 		sizes[0] = count;
503 		nb_sizes = 1;
504 	}
505 
506 	ret = sg_split(xfer->rx_sg.sgl, xfer->rx_sg.nents, 0, nb_sizes,
507 		       sizes, sg_out, out_mapped_nents, GFP_KERNEL);
508 
509 	if (ret < 0) {
510 		dev_err(&spi->dev, "sg_split failed\n");
511 		return 0;
512 	}
513 
514 	tx = dmaengine_prep_slave_sg(mcspi_dma->dma_rx, sg_out[0],
515 				     out_mapped_nents[0], DMA_DEV_TO_MEM,
516 				     DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
517 	if (tx) {
518 		tx->callback = omap2_mcspi_rx_callback;
519 		tx->callback_param = spi;
520 		dmaengine_submit(tx);
521 	} else {
522 		/* FIXME: fall back to PIO? */
523 	}
524 
525 	dma_async_issue_pending(mcspi_dma->dma_rx);
526 	omap2_mcspi_set_dma_req(spi, 1, 1);
527 
528 	ret = mcspi_wait_for_completion(mcspi, &mcspi_dma->dma_rx_completion);
529 	if (ret || mcspi->target_aborted) {
530 		dmaengine_terminate_sync(mcspi_dma->dma_rx);
531 		omap2_mcspi_set_dma_req(spi, 1, 0);
532 		return 0;
533 	}
534 
535 	for (x = 0; x < nb_sizes; x++)
536 		kfree(sg_out[x]);
537 
538 	if (mcspi->fifo_depth > 0)
539 		return count;
540 
541 	/*
542 	 *  Due to the DMA transfer length reduction the missing bytes must
543 	 *  be read manually to receive all of the expected data.
544 	 */
545 	omap2_mcspi_set_enable(spi, 0);
546 
547 	elements = element_count - 1;
548 
549 	if (l & OMAP2_MCSPI_CHCONF_TURBO) {
550 		elements--;
551 
552 		if (!mcspi_wait_for_reg_bit(chstat_reg,
553 					    OMAP2_MCSPI_CHSTAT_RXS)) {
554 			u32 w;
555 
556 			w = mcspi_read_cs_reg(spi, OMAP2_MCSPI_RX0);
557 			if (word_len <= 8)
558 				((u8 *)xfer->rx_buf)[elements++] = w;
559 			else if (word_len <= 16)
560 				((u16 *)xfer->rx_buf)[elements++] = w;
561 			else /* word_len <= 32 */
562 				((u32 *)xfer->rx_buf)[elements++] = w;
563 		} else {
564 			int bytes_per_word = mcspi_bytes_per_word(word_len);
565 			dev_err(&spi->dev, "DMA RX penultimate word empty\n");
566 			count -= (bytes_per_word << 1);
567 			omap2_mcspi_set_enable(spi, 1);
568 			return count;
569 		}
570 	}
571 	if (!mcspi_wait_for_reg_bit(chstat_reg, OMAP2_MCSPI_CHSTAT_RXS)) {
572 		u32 w;
573 
574 		w = mcspi_read_cs_reg(spi, OMAP2_MCSPI_RX0);
575 		if (word_len <= 8)
576 			((u8 *)xfer->rx_buf)[elements] = w;
577 		else if (word_len <= 16)
578 			((u16 *)xfer->rx_buf)[elements] = w;
579 		else /* word_len <= 32 */
580 			((u32 *)xfer->rx_buf)[elements] = w;
581 	} else {
582 		dev_err(&spi->dev, "DMA RX last word empty\n");
583 		count -= mcspi_bytes_per_word(word_len);
584 	}
585 	omap2_mcspi_set_enable(spi, 1);
586 	return count;
587 }
588 
589 static unsigned
590 omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
591 {
592 	struct omap2_mcspi	*mcspi;
593 	struct omap2_mcspi_cs	*cs = spi->controller_state;
594 	struct omap2_mcspi_dma  *mcspi_dma;
595 	unsigned int		count;
596 	u8			*rx;
597 	const u8		*tx;
598 	struct dma_slave_config	cfg;
599 	enum dma_slave_buswidth width;
600 	unsigned es;
601 	void __iomem		*chstat_reg;
602 	void __iomem            *irqstat_reg;
603 	int			wait_res;
604 
605 	mcspi = spi_controller_get_devdata(spi->controller);
606 	mcspi_dma = &mcspi->dma_channels[spi_get_chipselect(spi, 0)];
607 
608 	if (cs->word_len <= 8) {
609 		width = DMA_SLAVE_BUSWIDTH_1_BYTE;
610 		es = 1;
611 	} else if (cs->word_len <= 16) {
612 		width = DMA_SLAVE_BUSWIDTH_2_BYTES;
613 		es = 2;
614 	} else {
615 		width = DMA_SLAVE_BUSWIDTH_4_BYTES;
616 		es = 4;
617 	}
618 
619 	count = xfer->len;
620 
621 	memset(&cfg, 0, sizeof(cfg));
622 	cfg.src_addr = cs->phys + OMAP2_MCSPI_RX0;
623 	cfg.dst_addr = cs->phys + OMAP2_MCSPI_TX0;
624 	cfg.src_addr_width = width;
625 	cfg.dst_addr_width = width;
626 	cfg.src_maxburst = 1;
627 	cfg.dst_maxburst = 1;
628 
629 	rx = xfer->rx_buf;
630 	tx = xfer->tx_buf;
631 
632 	mcspi->target_aborted = false;
633 	reinit_completion(&mcspi_dma->dma_tx_completion);
634 	reinit_completion(&mcspi_dma->dma_rx_completion);
635 	reinit_completion(&mcspi->txdone);
636 	if (tx) {
637 		/* Enable EOW IRQ to know end of tx in target mode */
638 		if (spi_controller_is_target(spi->controller))
639 			mcspi_write_reg(spi->controller,
640 					OMAP2_MCSPI_IRQENABLE,
641 					OMAP2_MCSPI_IRQSTATUS_EOW);
642 		omap2_mcspi_tx_dma(spi, xfer, cfg);
643 	}
644 
645 	if (rx != NULL)
646 		count = omap2_mcspi_rx_dma(spi, xfer, cfg, es);
647 
648 	if (tx != NULL) {
649 		int ret;
650 
651 		ret = mcspi_wait_for_completion(mcspi, &mcspi_dma->dma_tx_completion);
652 		if (ret || mcspi->target_aborted) {
653 			dmaengine_terminate_sync(mcspi_dma->dma_tx);
654 			omap2_mcspi_set_dma_req(spi, 0, 0);
655 			return 0;
656 		}
657 
658 		if (spi_controller_is_target(mcspi->ctlr)) {
659 			ret = mcspi_wait_for_completion(mcspi, &mcspi->txdone);
660 			if (ret || mcspi->target_aborted)
661 				return 0;
662 		}
663 
664 		if (mcspi->fifo_depth > 0) {
665 			irqstat_reg = mcspi->base + OMAP2_MCSPI_IRQSTATUS;
666 
667 			if (mcspi_wait_for_reg_bit(irqstat_reg,
668 						OMAP2_MCSPI_IRQSTATUS_EOW) < 0)
669 				dev_err(&spi->dev, "EOW timed out\n");
670 
671 			mcspi_write_reg(mcspi->ctlr, OMAP2_MCSPI_IRQSTATUS,
672 					OMAP2_MCSPI_IRQSTATUS_EOW);
673 		}
674 
675 		/* for TX_ONLY mode, be sure all words have shifted out */
676 		if (rx == NULL) {
677 			chstat_reg = cs->base + OMAP2_MCSPI_CHSTAT0;
678 			if (mcspi->fifo_depth > 0) {
679 				wait_res = mcspi_wait_for_reg_bit(chstat_reg,
680 						OMAP2_MCSPI_CHSTAT_TXFFE);
681 				if (wait_res < 0)
682 					dev_err(&spi->dev, "TXFFE timed out\n");
683 			} else {
684 				wait_res = mcspi_wait_for_reg_bit(chstat_reg,
685 						OMAP2_MCSPI_CHSTAT_TXS);
686 				if (wait_res < 0)
687 					dev_err(&spi->dev, "TXS timed out\n");
688 			}
689 			if (wait_res >= 0 &&
690 				(mcspi_wait_for_reg_bit(chstat_reg,
691 					OMAP2_MCSPI_CHSTAT_EOT) < 0))
692 				dev_err(&spi->dev, "EOT timed out\n");
693 		}
694 	}
695 	return count;
696 }
697 
698 static unsigned
699 omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
700 {
701 	struct omap2_mcspi_cs	*cs = spi->controller_state;
702 	unsigned int		count, c;
703 	u32			l;
704 	void __iomem		*base = cs->base;
705 	void __iomem		*tx_reg;
706 	void __iomem		*rx_reg;
707 	void __iomem		*chstat_reg;
708 	int			word_len;
709 
710 	count = xfer->len;
711 	c = count;
712 	word_len = cs->word_len;
713 
714 	l = mcspi_cached_chconf0(spi);
715 
716 	/* We store the pre-calculated register addresses on stack to speed
717 	 * up the transfer loop. */
718 	tx_reg		= base + OMAP2_MCSPI_TX0;
719 	rx_reg		= base + OMAP2_MCSPI_RX0;
720 	chstat_reg	= base + OMAP2_MCSPI_CHSTAT0;
721 
722 	if (c < (word_len>>3))
723 		return 0;
724 
725 	if (word_len <= 8) {
726 		u8		*rx;
727 		const u8	*tx;
728 
729 		rx = xfer->rx_buf;
730 		tx = xfer->tx_buf;
731 
732 		do {
733 			c -= 1;
734 			if (tx != NULL) {
735 				if (mcspi_wait_for_reg_bit(chstat_reg,
736 						OMAP2_MCSPI_CHSTAT_TXS) < 0) {
737 					dev_err(&spi->dev, "TXS timed out\n");
738 					goto out;
739 				}
740 				dev_vdbg(&spi->dev, "write-%d %02x\n",
741 						word_len, *tx);
742 				writel_relaxed(*tx++, tx_reg);
743 			}
744 			if (rx != NULL) {
745 				if (mcspi_wait_for_reg_bit(chstat_reg,
746 						OMAP2_MCSPI_CHSTAT_RXS) < 0) {
747 					dev_err(&spi->dev, "RXS timed out\n");
748 					goto out;
749 				}
750 
751 				if (c == 1 && tx == NULL &&
752 				    (l & OMAP2_MCSPI_CHCONF_TURBO)) {
753 					omap2_mcspi_set_enable(spi, 0);
754 					*rx++ = readl_relaxed(rx_reg);
755 					dev_vdbg(&spi->dev, "read-%d %02x\n",
756 						    word_len, *(rx - 1));
757 					if (mcspi_wait_for_reg_bit(chstat_reg,
758 						OMAP2_MCSPI_CHSTAT_RXS) < 0) {
759 						dev_err(&spi->dev,
760 							"RXS timed out\n");
761 						goto out;
762 					}
763 					c = 0;
764 				} else if (c == 0 && tx == NULL) {
765 					omap2_mcspi_set_enable(spi, 0);
766 				}
767 
768 				*rx++ = readl_relaxed(rx_reg);
769 				dev_vdbg(&spi->dev, "read-%d %02x\n",
770 						word_len, *(rx - 1));
771 			}
772 			/* Add word delay between each word */
773 			spi_delay_exec(&xfer->word_delay, xfer);
774 		} while (c);
775 	} else if (word_len <= 16) {
776 		u16		*rx;
777 		const u16	*tx;
778 
779 		rx = xfer->rx_buf;
780 		tx = xfer->tx_buf;
781 		do {
782 			c -= 2;
783 			if (tx != NULL) {
784 				if (mcspi_wait_for_reg_bit(chstat_reg,
785 						OMAP2_MCSPI_CHSTAT_TXS) < 0) {
786 					dev_err(&spi->dev, "TXS timed out\n");
787 					goto out;
788 				}
789 				dev_vdbg(&spi->dev, "write-%d %04x\n",
790 						word_len, *tx);
791 				writel_relaxed(*tx++, tx_reg);
792 			}
793 			if (rx != NULL) {
794 				if (mcspi_wait_for_reg_bit(chstat_reg,
795 						OMAP2_MCSPI_CHSTAT_RXS) < 0) {
796 					dev_err(&spi->dev, "RXS timed out\n");
797 					goto out;
798 				}
799 
800 				if (c == 2 && tx == NULL &&
801 				    (l & OMAP2_MCSPI_CHCONF_TURBO)) {
802 					omap2_mcspi_set_enable(spi, 0);
803 					*rx++ = readl_relaxed(rx_reg);
804 					dev_vdbg(&spi->dev, "read-%d %04x\n",
805 						    word_len, *(rx - 1));
806 					if (mcspi_wait_for_reg_bit(chstat_reg,
807 						OMAP2_MCSPI_CHSTAT_RXS) < 0) {
808 						dev_err(&spi->dev,
809 							"RXS timed out\n");
810 						goto out;
811 					}
812 					c = 0;
813 				} else if (c == 0 && tx == NULL) {
814 					omap2_mcspi_set_enable(spi, 0);
815 				}
816 
817 				*rx++ = readl_relaxed(rx_reg);
818 				dev_vdbg(&spi->dev, "read-%d %04x\n",
819 						word_len, *(rx - 1));
820 			}
821 			/* Add word delay between each word */
822 			spi_delay_exec(&xfer->word_delay, xfer);
823 		} while (c >= 2);
824 	} else if (word_len <= 32) {
825 		u32		*rx;
826 		const u32	*tx;
827 
828 		rx = xfer->rx_buf;
829 		tx = xfer->tx_buf;
830 		do {
831 			c -= 4;
832 			if (tx != NULL) {
833 				if (mcspi_wait_for_reg_bit(chstat_reg,
834 						OMAP2_MCSPI_CHSTAT_TXS) < 0) {
835 					dev_err(&spi->dev, "TXS timed out\n");
836 					goto out;
837 				}
838 				dev_vdbg(&spi->dev, "write-%d %08x\n",
839 						word_len, *tx);
840 				writel_relaxed(*tx++, tx_reg);
841 			}
842 			if (rx != NULL) {
843 				if (mcspi_wait_for_reg_bit(chstat_reg,
844 						OMAP2_MCSPI_CHSTAT_RXS) < 0) {
845 					dev_err(&spi->dev, "RXS timed out\n");
846 					goto out;
847 				}
848 
849 				if (c == 4 && tx == NULL &&
850 				    (l & OMAP2_MCSPI_CHCONF_TURBO)) {
851 					omap2_mcspi_set_enable(spi, 0);
852 					*rx++ = readl_relaxed(rx_reg);
853 					dev_vdbg(&spi->dev, "read-%d %08x\n",
854 						    word_len, *(rx - 1));
855 					if (mcspi_wait_for_reg_bit(chstat_reg,
856 						OMAP2_MCSPI_CHSTAT_RXS) < 0) {
857 						dev_err(&spi->dev,
858 							"RXS timed out\n");
859 						goto out;
860 					}
861 					c = 0;
862 				} else if (c == 0 && tx == NULL) {
863 					omap2_mcspi_set_enable(spi, 0);
864 				}
865 
866 				*rx++ = readl_relaxed(rx_reg);
867 				dev_vdbg(&spi->dev, "read-%d %08x\n",
868 						word_len, *(rx - 1));
869 			}
870 			/* Add word delay between each word */
871 			spi_delay_exec(&xfer->word_delay, xfer);
872 		} while (c >= 4);
873 	}
874 
875 	/* for TX_ONLY mode, be sure all words have shifted out */
876 	if (xfer->rx_buf == NULL) {
877 		if (mcspi_wait_for_reg_bit(chstat_reg,
878 				OMAP2_MCSPI_CHSTAT_TXS) < 0) {
879 			dev_err(&spi->dev, "TXS timed out\n");
880 		} else if (mcspi_wait_for_reg_bit(chstat_reg,
881 				OMAP2_MCSPI_CHSTAT_EOT) < 0)
882 			dev_err(&spi->dev, "EOT timed out\n");
883 
884 		/* disable chan to purge rx datas received in TX_ONLY transfer,
885 		 * otherwise these rx datas will affect the direct following
886 		 * RX_ONLY transfer.
887 		 */
888 		omap2_mcspi_set_enable(spi, 0);
889 	}
890 out:
891 	omap2_mcspi_set_enable(spi, 1);
892 	return count - c;
893 }
894 
895 static unsigned
896 omap2_mcspi_txrx_piofifo(struct spi_device *spi, struct spi_transfer *xfer)
897 {
898 	struct omap2_mcspi_cs	*cs = spi->controller_state;
899 	struct omap2_mcspi    *mcspi;
900 	unsigned int		count, c;
901 	unsigned int		iter, cwc;
902 	int last_request;
903 	void __iomem		*base = cs->base;
904 	void __iomem		*tx_reg;
905 	void __iomem		*rx_reg;
906 	void __iomem		*chstat_reg;
907 	void __iomem        *irqstat_reg;
908 	int			word_len, bytes_per_word;
909 	u8		*rx;
910 	const u8	*tx;
911 
912 	mcspi = spi_controller_get_devdata(spi->controller);
913 	count = xfer->len;
914 	c = count;
915 	word_len = cs->word_len;
916 	bytes_per_word = mcspi_bytes_per_word(word_len);
917 
918 	/*
919 	 * We store the pre-calculated register addresses on stack to speed
920 	 * up the transfer loop.
921 	 */
922 	tx_reg		= base + OMAP2_MCSPI_TX0;
923 	rx_reg		= base + OMAP2_MCSPI_RX0;
924 	chstat_reg	= base + OMAP2_MCSPI_CHSTAT0;
925 	irqstat_reg    = base + OMAP2_MCSPI_IRQSTATUS;
926 
927 	if (c < (word_len >> 3))
928 		return 0;
929 
930 	rx = xfer->rx_buf;
931 	tx = xfer->tx_buf;
932 
933 	do {
934 		/* calculate number of words in current iteration */
935 		cwc = min((unsigned int)mcspi->fifo_depth / bytes_per_word,
936 			  c / bytes_per_word);
937 		last_request = cwc != (mcspi->fifo_depth / bytes_per_word);
938 		if (tx) {
939 			if (mcspi_wait_for_reg_bit(irqstat_reg,
940 						   OMAP2_MCSPI_IRQSTATUS_TX0_EMPTY) < 0) {
941 				dev_err(&spi->dev, "TX Empty timed out\n");
942 				goto out;
943 			}
944 			writel_relaxed(OMAP2_MCSPI_IRQSTATUS_TX0_EMPTY, irqstat_reg);
945 
946 			for (iter = 0; iter < cwc; iter++, tx += bytes_per_word) {
947 				if (bytes_per_word == 1)
948 					writel_relaxed(*tx, tx_reg);
949 				else if (bytes_per_word == 2)
950 					writel_relaxed(*((u16 *)tx), tx_reg);
951 				else if (bytes_per_word == 4)
952 					writel_relaxed(*((u32 *)tx), tx_reg);
953 			}
954 		}
955 
956 		if (rx) {
957 			if (!last_request &&
958 			    mcspi_wait_for_reg_bit(irqstat_reg,
959 						   OMAP2_MCSPI_IRQSTATUS_RX0_FULL) < 0) {
960 				dev_err(&spi->dev, "RX_FULL timed out\n");
961 				goto out;
962 			}
963 			writel_relaxed(OMAP2_MCSPI_IRQSTATUS_RX0_FULL, irqstat_reg);
964 
965 			for (iter = 0; iter < cwc; iter++, rx += bytes_per_word) {
966 				if (last_request &&
967 				    mcspi_wait_for_reg_bit(chstat_reg,
968 							   OMAP2_MCSPI_CHSTAT_RXS) < 0) {
969 					dev_err(&spi->dev, "RXS timed out\n");
970 					goto out;
971 				}
972 				if (bytes_per_word == 1)
973 					*rx = readl_relaxed(rx_reg);
974 				else if (bytes_per_word == 2)
975 					*((u16 *)rx) = readl_relaxed(rx_reg);
976 				else if (bytes_per_word == 4)
977 					*((u32 *)rx) = readl_relaxed(rx_reg);
978 			}
979 		}
980 
981 		if (last_request) {
982 			if (mcspi_wait_for_reg_bit(chstat_reg,
983 						   OMAP2_MCSPI_CHSTAT_EOT) < 0) {
984 				dev_err(&spi->dev, "EOT timed out\n");
985 				goto out;
986 			}
987 			if (mcspi_wait_for_reg_bit(chstat_reg,
988 						   OMAP2_MCSPI_CHSTAT_TXFFE) < 0) {
989 				dev_err(&spi->dev, "TXFFE timed out\n");
990 				goto out;
991 			}
992 			omap2_mcspi_set_enable(spi, 0);
993 		}
994 		c -= cwc * bytes_per_word;
995 	} while (c >= bytes_per_word);
996 
997 out:
998 	omap2_mcspi_set_enable(spi, 1);
999 	return count - c;
1000 }
1001 
1002 static u32 omap2_mcspi_calc_divisor(u32 speed_hz, u32 ref_clk_hz)
1003 {
1004 	u32 div;
1005 
1006 	for (div = 0; div < 15; div++)
1007 		if (speed_hz >= (ref_clk_hz >> div))
1008 			return div;
1009 
1010 	return 15;
1011 }
1012 
1013 /* called only when no transfer is active to this device */
1014 static int omap2_mcspi_setup_transfer(struct spi_device *spi,
1015 		struct spi_transfer *t)
1016 {
1017 	struct omap2_mcspi_cs *cs = spi->controller_state;
1018 	struct omap2_mcspi *mcspi;
1019 	u32 ref_clk_hz, l = 0, clkd = 0, div, extclk = 0, clkg = 0;
1020 	u8 word_len = spi->bits_per_word;
1021 	u32 speed_hz = spi->max_speed_hz;
1022 
1023 	mcspi = spi_controller_get_devdata(spi->controller);
1024 
1025 	if (t != NULL && t->bits_per_word)
1026 		word_len = t->bits_per_word;
1027 
1028 	cs->word_len = word_len;
1029 
1030 	if (t && t->speed_hz)
1031 		speed_hz = t->speed_hz;
1032 
1033 	ref_clk_hz = mcspi->ref_clk_hz;
1034 	speed_hz = min_t(u32, speed_hz, ref_clk_hz);
1035 	if (speed_hz < (ref_clk_hz / OMAP2_MCSPI_MAX_DIVIDER)) {
1036 		clkd = omap2_mcspi_calc_divisor(speed_hz, ref_clk_hz);
1037 		speed_hz = ref_clk_hz >> clkd;
1038 		clkg = 0;
1039 	} else {
1040 		div = (ref_clk_hz + speed_hz - 1) / speed_hz;
1041 		speed_hz = ref_clk_hz / div;
1042 		clkd = (div - 1) & 0xf;
1043 		extclk = (div - 1) >> 4;
1044 		clkg = OMAP2_MCSPI_CHCONF_CLKG;
1045 	}
1046 
1047 	l = mcspi_cached_chconf0(spi);
1048 
1049 	/* standard 4-wire host mode:  SCK, MOSI/out, MISO/in, nCS
1050 	 * REVISIT: this controller could support SPI_3WIRE mode.
1051 	 */
1052 	if (mcspi->pin_dir == MCSPI_PINDIR_D0_IN_D1_OUT) {
1053 		l &= ~OMAP2_MCSPI_CHCONF_IS;
1054 		l &= ~OMAP2_MCSPI_CHCONF_DPE1;
1055 		l |= OMAP2_MCSPI_CHCONF_DPE0;
1056 	} else {
1057 		l |= OMAP2_MCSPI_CHCONF_IS;
1058 		l |= OMAP2_MCSPI_CHCONF_DPE1;
1059 		l &= ~OMAP2_MCSPI_CHCONF_DPE0;
1060 	}
1061 
1062 	/* wordlength */
1063 	l &= ~OMAP2_MCSPI_CHCONF_WL_MASK;
1064 	l |= (word_len - 1) << 7;
1065 
1066 	/* set chipselect polarity; manage with FORCE */
1067 	if (!(spi->mode & SPI_CS_HIGH))
1068 		l |= OMAP2_MCSPI_CHCONF_EPOL;	/* active-low; normal */
1069 	else
1070 		l &= ~OMAP2_MCSPI_CHCONF_EPOL;
1071 
1072 	/* set clock divisor */
1073 	l &= ~OMAP2_MCSPI_CHCONF_CLKD_MASK;
1074 	l |= clkd << 2;
1075 
1076 	/* set clock granularity */
1077 	l &= ~OMAP2_MCSPI_CHCONF_CLKG;
1078 	l |= clkg;
1079 	if (clkg) {
1080 		cs->chctrl0 &= ~OMAP2_MCSPI_CHCTRL_EXTCLK_MASK;
1081 		cs->chctrl0 |= extclk << 8;
1082 		mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCTRL0, cs->chctrl0);
1083 	}
1084 
1085 	/* set SPI mode 0..3 */
1086 	if (spi->mode & SPI_CPOL)
1087 		l |= OMAP2_MCSPI_CHCONF_POL;
1088 	else
1089 		l &= ~OMAP2_MCSPI_CHCONF_POL;
1090 	if (spi->mode & SPI_CPHA)
1091 		l |= OMAP2_MCSPI_CHCONF_PHA;
1092 	else
1093 		l &= ~OMAP2_MCSPI_CHCONF_PHA;
1094 
1095 	mcspi_write_chconf0(spi, l);
1096 
1097 	cs->mode = spi->mode;
1098 
1099 	dev_dbg(&spi->dev, "setup: speed %d, sample %s edge, clk %s\n",
1100 			speed_hz,
1101 			(spi->mode & SPI_CPHA) ? "trailing" : "leading",
1102 			(spi->mode & SPI_CPOL) ? "inverted" : "normal");
1103 
1104 	return 0;
1105 }
1106 
1107 /*
1108  * Note that we currently allow DMA only if we get a channel
1109  * for both rx and tx. Otherwise we'll do PIO for both rx and tx.
1110  */
1111 static int omap2_mcspi_request_dma(struct omap2_mcspi *mcspi,
1112 				   struct omap2_mcspi_dma *mcspi_dma)
1113 {
1114 	int ret = 0;
1115 
1116 	mcspi_dma->dma_rx = dma_request_chan(mcspi->dev,
1117 					     mcspi_dma->dma_rx_ch_name);
1118 	if (IS_ERR(mcspi_dma->dma_rx)) {
1119 		ret = PTR_ERR(mcspi_dma->dma_rx);
1120 		mcspi_dma->dma_rx = NULL;
1121 		goto no_dma;
1122 	}
1123 
1124 	mcspi_dma->dma_tx = dma_request_chan(mcspi->dev,
1125 					     mcspi_dma->dma_tx_ch_name);
1126 	if (IS_ERR(mcspi_dma->dma_tx)) {
1127 		ret = PTR_ERR(mcspi_dma->dma_tx);
1128 		mcspi_dma->dma_tx = NULL;
1129 		dma_release_channel(mcspi_dma->dma_rx);
1130 		mcspi_dma->dma_rx = NULL;
1131 	}
1132 
1133 	init_completion(&mcspi_dma->dma_rx_completion);
1134 	init_completion(&mcspi_dma->dma_tx_completion);
1135 
1136 no_dma:
1137 	return ret;
1138 }
1139 
1140 static void omap2_mcspi_release_dma(struct spi_controller *ctlr)
1141 {
1142 	struct omap2_mcspi *mcspi = spi_controller_get_devdata(ctlr);
1143 	struct omap2_mcspi_dma	*mcspi_dma;
1144 	int i;
1145 
1146 	for (i = 0; i < ctlr->num_chipselect; i++) {
1147 		mcspi_dma = &mcspi->dma_channels[i];
1148 
1149 		if (mcspi_dma->dma_rx) {
1150 			dma_release_channel(mcspi_dma->dma_rx);
1151 			mcspi_dma->dma_rx = NULL;
1152 		}
1153 		if (mcspi_dma->dma_tx) {
1154 			dma_release_channel(mcspi_dma->dma_tx);
1155 			mcspi_dma->dma_tx = NULL;
1156 		}
1157 	}
1158 }
1159 
1160 static void omap2_mcspi_cleanup(struct spi_device *spi)
1161 {
1162 	struct omap2_mcspi_cs	*cs;
1163 
1164 	if (spi->controller_state) {
1165 		/* Unlink controller state from context save list */
1166 		cs = spi->controller_state;
1167 		list_del(&cs->node);
1168 
1169 		kfree(cs);
1170 	}
1171 }
1172 
1173 static int omap2_mcspi_setup(struct spi_device *spi)
1174 {
1175 	bool			initial_setup = false;
1176 	int			ret;
1177 	struct omap2_mcspi	*mcspi = spi_controller_get_devdata(spi->controller);
1178 	struct omap2_mcspi_regs	*ctx = &mcspi->ctx;
1179 	struct omap2_mcspi_cs	*cs = spi->controller_state;
1180 
1181 	if (!cs) {
1182 		cs = kzalloc(sizeof(*cs), GFP_KERNEL);
1183 		if (!cs)
1184 			return -ENOMEM;
1185 		cs->base = mcspi->base + spi_get_chipselect(spi, 0) * 0x14;
1186 		cs->phys = mcspi->phys + spi_get_chipselect(spi, 0) * 0x14;
1187 		cs->mode = 0;
1188 		cs->chconf0 = 0;
1189 		cs->chctrl0 = 0;
1190 		spi->controller_state = cs;
1191 		/* Link this to context save list */
1192 		list_add_tail(&cs->node, &ctx->cs);
1193 		initial_setup = true;
1194 	}
1195 
1196 	ret = pm_runtime_resume_and_get(mcspi->dev);
1197 	if (ret < 0) {
1198 		if (initial_setup)
1199 			omap2_mcspi_cleanup(spi);
1200 
1201 		return ret;
1202 	}
1203 
1204 	ret = omap2_mcspi_setup_transfer(spi, NULL);
1205 	if (ret && initial_setup)
1206 		omap2_mcspi_cleanup(spi);
1207 
1208 	pm_runtime_mark_last_busy(mcspi->dev);
1209 	pm_runtime_put_autosuspend(mcspi->dev);
1210 
1211 	return ret;
1212 }
1213 
1214 static irqreturn_t omap2_mcspi_irq_handler(int irq, void *data)
1215 {
1216 	struct omap2_mcspi *mcspi = data;
1217 	u32 irqstat;
1218 
1219 	irqstat	= mcspi_read_reg(mcspi->ctlr, OMAP2_MCSPI_IRQSTATUS);
1220 	if (!irqstat)
1221 		return IRQ_NONE;
1222 
1223 	/* Disable IRQ and wakeup target xfer task */
1224 	mcspi_write_reg(mcspi->ctlr, OMAP2_MCSPI_IRQENABLE, 0);
1225 	if (irqstat & OMAP2_MCSPI_IRQSTATUS_EOW)
1226 		complete(&mcspi->txdone);
1227 
1228 	return IRQ_HANDLED;
1229 }
1230 
1231 static int omap2_mcspi_target_abort(struct spi_controller *ctlr)
1232 {
1233 	struct omap2_mcspi *mcspi = spi_controller_get_devdata(ctlr);
1234 	struct omap2_mcspi_dma *mcspi_dma = mcspi->dma_channels;
1235 
1236 	mcspi->target_aborted = true;
1237 	complete(&mcspi_dma->dma_rx_completion);
1238 	complete(&mcspi_dma->dma_tx_completion);
1239 	complete(&mcspi->txdone);
1240 
1241 	return 0;
1242 }
1243 
1244 static int omap2_mcspi_transfer_one(struct spi_controller *ctlr,
1245 				    struct spi_device *spi,
1246 				    struct spi_transfer *t)
1247 {
1248 
1249 	/* We only enable one channel at a time -- the one whose message is
1250 	 * -- although this controller would gladly
1251 	 * arbitrate among multiple channels.  This corresponds to "single
1252 	 * channel" host mode.  As a side effect, we need to manage the
1253 	 * chipselect with the FORCE bit ... CS != channel enable.
1254 	 */
1255 
1256 	struct omap2_mcspi		*mcspi;
1257 	struct omap2_mcspi_dma		*mcspi_dma;
1258 	struct omap2_mcspi_cs		*cs;
1259 	struct omap2_mcspi_device_config *cd;
1260 	int				par_override = 0;
1261 	int				status = 0;
1262 	u32				chconf;
1263 
1264 	mcspi = spi_controller_get_devdata(ctlr);
1265 	mcspi_dma = mcspi->dma_channels + spi_get_chipselect(spi, 0);
1266 	cs = spi->controller_state;
1267 	cd = spi->controller_data;
1268 
1269 	/*
1270 	 * The target driver could have changed spi->mode in which case
1271 	 * it will be different from cs->mode (the current hardware setup).
1272 	 * If so, set par_override (even though its not a parity issue) so
1273 	 * omap2_mcspi_setup_transfer will be called to configure the hardware
1274 	 * with the correct mode on the first iteration of the loop below.
1275 	 */
1276 	if (spi->mode != cs->mode)
1277 		par_override = 1;
1278 
1279 	omap2_mcspi_set_enable(spi, 0);
1280 
1281 	if (spi_get_csgpiod(spi, 0))
1282 		omap2_mcspi_set_cs(spi, spi->mode & SPI_CS_HIGH);
1283 
1284 	if (par_override ||
1285 	    (t->speed_hz != spi->max_speed_hz) ||
1286 	    (t->bits_per_word != spi->bits_per_word)) {
1287 		par_override = 1;
1288 		status = omap2_mcspi_setup_transfer(spi, t);
1289 		if (status < 0)
1290 			goto out;
1291 		if (t->speed_hz == spi->max_speed_hz &&
1292 		    t->bits_per_word == spi->bits_per_word)
1293 			par_override = 0;
1294 	}
1295 	if (cd && cd->cs_per_word) {
1296 		chconf = mcspi->ctx.modulctrl;
1297 		chconf &= ~OMAP2_MCSPI_MODULCTRL_SINGLE;
1298 		mcspi_write_reg(ctlr, OMAP2_MCSPI_MODULCTRL, chconf);
1299 		mcspi->ctx.modulctrl =
1300 			mcspi_read_cs_reg(spi, OMAP2_MCSPI_MODULCTRL);
1301 	}
1302 
1303 	chconf = mcspi_cached_chconf0(spi);
1304 	chconf &= ~OMAP2_MCSPI_CHCONF_TRM_MASK;
1305 	chconf &= ~OMAP2_MCSPI_CHCONF_TURBO;
1306 
1307 	if (t->tx_buf == NULL)
1308 		chconf |= OMAP2_MCSPI_CHCONF_TRM_RX_ONLY;
1309 	else if (t->rx_buf == NULL)
1310 		chconf |= OMAP2_MCSPI_CHCONF_TRM_TX_ONLY;
1311 
1312 	if (cd && cd->turbo_mode && t->tx_buf == NULL) {
1313 		/* Turbo mode is for more than one word */
1314 		if (t->len > ((cs->word_len + 7) >> 3))
1315 			chconf |= OMAP2_MCSPI_CHCONF_TURBO;
1316 	}
1317 
1318 	mcspi_write_chconf0(spi, chconf);
1319 
1320 	if (t->len) {
1321 		unsigned	count;
1322 
1323 		if ((mcspi_dma->dma_rx && mcspi_dma->dma_tx) &&
1324 		    ctlr->cur_msg_mapped &&
1325 		    ctlr->can_dma(ctlr, spi, t))
1326 			omap2_mcspi_set_fifo(spi, t, 1, 1);
1327 		else if (t->len > OMAP2_MCSPI_MAX_FIFODEPTH)
1328 			omap2_mcspi_set_fifo(spi, t, 1, 0);
1329 
1330 		omap2_mcspi_set_enable(spi, 1);
1331 
1332 		/* RX_ONLY mode needs dummy data in TX reg */
1333 		if (t->tx_buf == NULL)
1334 			writel_relaxed(0, cs->base
1335 					+ OMAP2_MCSPI_TX0);
1336 
1337 		if ((mcspi_dma->dma_rx && mcspi_dma->dma_tx) &&
1338 		    ctlr->cur_msg_mapped &&
1339 		    ctlr->can_dma(ctlr, spi, t))
1340 			count = omap2_mcspi_txrx_dma(spi, t);
1341 		else if (mcspi->fifo_depth > 0)
1342 			count = omap2_mcspi_txrx_piofifo(spi, t);
1343 		else
1344 			count = omap2_mcspi_txrx_pio(spi, t);
1345 
1346 		if (count != t->len) {
1347 			status = -EIO;
1348 			goto out;
1349 		}
1350 	}
1351 
1352 	omap2_mcspi_set_enable(spi, 0);
1353 
1354 	if (mcspi->fifo_depth > 0)
1355 		omap2_mcspi_set_fifo(spi, t, 0, 0);
1356 
1357 out:
1358 	/* Restore defaults if they were overriden */
1359 	if (par_override) {
1360 		par_override = 0;
1361 		status = omap2_mcspi_setup_transfer(spi, NULL);
1362 	}
1363 
1364 	if (cd && cd->cs_per_word) {
1365 		chconf = mcspi->ctx.modulctrl;
1366 		chconf |= OMAP2_MCSPI_MODULCTRL_SINGLE;
1367 		mcspi_write_reg(ctlr, OMAP2_MCSPI_MODULCTRL, chconf);
1368 		mcspi->ctx.modulctrl =
1369 			mcspi_read_cs_reg(spi, OMAP2_MCSPI_MODULCTRL);
1370 	}
1371 
1372 	omap2_mcspi_set_enable(spi, 0);
1373 
1374 	if (spi_get_csgpiod(spi, 0))
1375 		omap2_mcspi_set_cs(spi, !(spi->mode & SPI_CS_HIGH));
1376 
1377 	if (mcspi->fifo_depth > 0 && t)
1378 		omap2_mcspi_set_fifo(spi, t, 0, 0);
1379 
1380 	return status;
1381 }
1382 
1383 static int omap2_mcspi_prepare_message(struct spi_controller *ctlr,
1384 				       struct spi_message *msg)
1385 {
1386 	struct omap2_mcspi	*mcspi = spi_controller_get_devdata(ctlr);
1387 	struct omap2_mcspi_regs	*ctx = &mcspi->ctx;
1388 	struct omap2_mcspi_cs	*cs;
1389 
1390 	/* Only a single channel can have the FORCE bit enabled
1391 	 * in its chconf0 register.
1392 	 * Scan all channels and disable them except the current one.
1393 	 * A FORCE can remain from a last transfer having cs_change enabled
1394 	 */
1395 	list_for_each_entry(cs, &ctx->cs, node) {
1396 		if (msg->spi->controller_state == cs)
1397 			continue;
1398 
1399 		if ((cs->chconf0 & OMAP2_MCSPI_CHCONF_FORCE)) {
1400 			cs->chconf0 &= ~OMAP2_MCSPI_CHCONF_FORCE;
1401 			writel_relaxed(cs->chconf0,
1402 					cs->base + OMAP2_MCSPI_CHCONF0);
1403 			readl_relaxed(cs->base + OMAP2_MCSPI_CHCONF0);
1404 		}
1405 	}
1406 
1407 	return 0;
1408 }
1409 
1410 static bool omap2_mcspi_can_dma(struct spi_controller *ctlr,
1411 				struct spi_device *spi,
1412 				struct spi_transfer *xfer)
1413 {
1414 	struct omap2_mcspi *mcspi = spi_controller_get_devdata(spi->controller);
1415 	struct omap2_mcspi_dma *mcspi_dma =
1416 		&mcspi->dma_channels[spi_get_chipselect(spi, 0)];
1417 
1418 	if (!mcspi_dma->dma_rx || !mcspi_dma->dma_tx)
1419 		return false;
1420 
1421 	if (spi_controller_is_target(ctlr))
1422 		return true;
1423 
1424 	ctlr->dma_rx = mcspi_dma->dma_rx;
1425 	ctlr->dma_tx = mcspi_dma->dma_tx;
1426 
1427 	return (xfer->len >= DMA_MIN_BYTES);
1428 }
1429 
1430 static size_t omap2_mcspi_max_xfer_size(struct spi_device *spi)
1431 {
1432 	struct omap2_mcspi *mcspi = spi_controller_get_devdata(spi->controller);
1433 	struct omap2_mcspi_dma *mcspi_dma =
1434 		&mcspi->dma_channels[spi_get_chipselect(spi, 0)];
1435 
1436 	if (mcspi->max_xfer_len && mcspi_dma->dma_rx)
1437 		return mcspi->max_xfer_len;
1438 
1439 	return SIZE_MAX;
1440 }
1441 
1442 static int omap2_mcspi_controller_setup(struct omap2_mcspi *mcspi)
1443 {
1444 	struct spi_controller	*ctlr = mcspi->ctlr;
1445 	struct omap2_mcspi_regs	*ctx = &mcspi->ctx;
1446 	int			ret = 0;
1447 
1448 	ret = pm_runtime_resume_and_get(mcspi->dev);
1449 	if (ret < 0)
1450 		return ret;
1451 
1452 	mcspi_write_reg(ctlr, OMAP2_MCSPI_WAKEUPENABLE,
1453 			OMAP2_MCSPI_WAKEUPENABLE_WKEN);
1454 	ctx->wakeupenable = OMAP2_MCSPI_WAKEUPENABLE_WKEN;
1455 
1456 	omap2_mcspi_set_mode(ctlr);
1457 	pm_runtime_mark_last_busy(mcspi->dev);
1458 	pm_runtime_put_autosuspend(mcspi->dev);
1459 	return 0;
1460 }
1461 
1462 static int omap_mcspi_runtime_suspend(struct device *dev)
1463 {
1464 	int error;
1465 
1466 	error = pinctrl_pm_select_idle_state(dev);
1467 	if (error)
1468 		dev_warn(dev, "%s: failed to set pins: %i\n", __func__, error);
1469 
1470 	return 0;
1471 }
1472 
1473 /*
1474  * When SPI wake up from off-mode, CS is in activate state. If it was in
1475  * inactive state when driver was suspend, then force it to inactive state at
1476  * wake up.
1477  */
1478 static int omap_mcspi_runtime_resume(struct device *dev)
1479 {
1480 	struct spi_controller *ctlr = dev_get_drvdata(dev);
1481 	struct omap2_mcspi *mcspi = spi_controller_get_devdata(ctlr);
1482 	struct omap2_mcspi_regs *ctx = &mcspi->ctx;
1483 	struct omap2_mcspi_cs *cs;
1484 	int error;
1485 
1486 	error = pinctrl_pm_select_default_state(dev);
1487 	if (error)
1488 		dev_warn(dev, "%s: failed to set pins: %i\n", __func__, error);
1489 
1490 	/* McSPI: context restore */
1491 	mcspi_write_reg(ctlr, OMAP2_MCSPI_MODULCTRL, ctx->modulctrl);
1492 	mcspi_write_reg(ctlr, OMAP2_MCSPI_WAKEUPENABLE, ctx->wakeupenable);
1493 
1494 	list_for_each_entry(cs, &ctx->cs, node) {
1495 		/*
1496 		 * We need to toggle CS state for OMAP take this
1497 		 * change in account.
1498 		 */
1499 		if ((cs->chconf0 & OMAP2_MCSPI_CHCONF_FORCE) == 0) {
1500 			cs->chconf0 |= OMAP2_MCSPI_CHCONF_FORCE;
1501 			writel_relaxed(cs->chconf0,
1502 				       cs->base + OMAP2_MCSPI_CHCONF0);
1503 			cs->chconf0 &= ~OMAP2_MCSPI_CHCONF_FORCE;
1504 			writel_relaxed(cs->chconf0,
1505 				       cs->base + OMAP2_MCSPI_CHCONF0);
1506 		} else {
1507 			writel_relaxed(cs->chconf0,
1508 				       cs->base + OMAP2_MCSPI_CHCONF0);
1509 		}
1510 	}
1511 
1512 	return 0;
1513 }
1514 
1515 static struct omap2_mcspi_platform_config omap2_pdata = {
1516 	.regs_offset = 0,
1517 };
1518 
1519 static struct omap2_mcspi_platform_config omap4_pdata = {
1520 	.regs_offset = OMAP4_MCSPI_REG_OFFSET,
1521 };
1522 
1523 static struct omap2_mcspi_platform_config am654_pdata = {
1524 	.regs_offset = OMAP4_MCSPI_REG_OFFSET,
1525 	.max_xfer_len = SZ_4K - 1,
1526 };
1527 
1528 static const struct of_device_id omap_mcspi_of_match[] = {
1529 	{
1530 		.compatible = "ti,omap2-mcspi",
1531 		.data = &omap2_pdata,
1532 	},
1533 	{
1534 		.compatible = "ti,omap4-mcspi",
1535 		.data = &omap4_pdata,
1536 	},
1537 	{
1538 		.compatible = "ti,am654-mcspi",
1539 		.data = &am654_pdata,
1540 	},
1541 	{ },
1542 };
1543 MODULE_DEVICE_TABLE(of, omap_mcspi_of_match);
1544 
1545 static int omap2_mcspi_probe(struct platform_device *pdev)
1546 {
1547 	struct spi_controller	*ctlr;
1548 	const struct omap2_mcspi_platform_config *pdata;
1549 	struct omap2_mcspi	*mcspi;
1550 	struct resource		*r;
1551 	int			status = 0, i;
1552 	u32			regs_offset = 0;
1553 	struct device_node	*node = pdev->dev.of_node;
1554 	const struct of_device_id *match;
1555 
1556 	if (of_property_read_bool(node, "spi-slave"))
1557 		ctlr = spi_alloc_target(&pdev->dev, sizeof(*mcspi));
1558 	else
1559 		ctlr = spi_alloc_host(&pdev->dev, sizeof(*mcspi));
1560 	if (!ctlr)
1561 		return -ENOMEM;
1562 
1563 	/* the spi->mode bits understood by this driver: */
1564 	ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
1565 	ctlr->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
1566 	ctlr->setup = omap2_mcspi_setup;
1567 	ctlr->auto_runtime_pm = true;
1568 	ctlr->prepare_message = omap2_mcspi_prepare_message;
1569 	ctlr->can_dma = omap2_mcspi_can_dma;
1570 	ctlr->transfer_one = omap2_mcspi_transfer_one;
1571 	ctlr->set_cs = omap2_mcspi_set_cs;
1572 	ctlr->cleanup = omap2_mcspi_cleanup;
1573 	ctlr->target_abort = omap2_mcspi_target_abort;
1574 	ctlr->dev.of_node = node;
1575 	ctlr->use_gpio_descriptors = true;
1576 
1577 	platform_set_drvdata(pdev, ctlr);
1578 
1579 	mcspi = spi_controller_get_devdata(ctlr);
1580 	mcspi->ctlr = ctlr;
1581 
1582 	match = of_match_device(omap_mcspi_of_match, &pdev->dev);
1583 	if (match) {
1584 		u32 num_cs = 1; /* default number of chipselect */
1585 		pdata = match->data;
1586 
1587 		of_property_read_u32(node, "ti,spi-num-cs", &num_cs);
1588 		ctlr->num_chipselect = num_cs;
1589 		if (of_property_read_bool(node, "ti,pindir-d0-out-d1-in"))
1590 			mcspi->pin_dir = MCSPI_PINDIR_D0_OUT_D1_IN;
1591 	} else {
1592 		pdata = dev_get_platdata(&pdev->dev);
1593 		ctlr->num_chipselect = pdata->num_cs;
1594 		mcspi->pin_dir = pdata->pin_dir;
1595 	}
1596 	regs_offset = pdata->regs_offset;
1597 	if (pdata->max_xfer_len) {
1598 		mcspi->max_xfer_len = pdata->max_xfer_len;
1599 		ctlr->max_transfer_size = omap2_mcspi_max_xfer_size;
1600 	}
1601 
1602 	mcspi->base = devm_platform_get_and_ioremap_resource(pdev, 0, &r);
1603 	if (IS_ERR(mcspi->base)) {
1604 		status = PTR_ERR(mcspi->base);
1605 		goto free_ctlr;
1606 	}
1607 	mcspi->phys = r->start + regs_offset;
1608 	mcspi->base += regs_offset;
1609 
1610 	mcspi->dev = &pdev->dev;
1611 
1612 	INIT_LIST_HEAD(&mcspi->ctx.cs);
1613 
1614 	mcspi->dma_channels = devm_kcalloc(&pdev->dev, ctlr->num_chipselect,
1615 					   sizeof(struct omap2_mcspi_dma),
1616 					   GFP_KERNEL);
1617 	if (mcspi->dma_channels == NULL) {
1618 		status = -ENOMEM;
1619 		goto free_ctlr;
1620 	}
1621 
1622 	for (i = 0; i < ctlr->num_chipselect; i++) {
1623 		sprintf(mcspi->dma_channels[i].dma_rx_ch_name, "rx%d", i);
1624 		sprintf(mcspi->dma_channels[i].dma_tx_ch_name, "tx%d", i);
1625 
1626 		status = omap2_mcspi_request_dma(mcspi,
1627 						 &mcspi->dma_channels[i]);
1628 		if (status == -EPROBE_DEFER)
1629 			goto free_ctlr;
1630 	}
1631 
1632 	status = platform_get_irq(pdev, 0);
1633 	if (status < 0)
1634 		goto free_ctlr;
1635 	init_completion(&mcspi->txdone);
1636 	status = devm_request_irq(&pdev->dev, status,
1637 				  omap2_mcspi_irq_handler, 0, pdev->name,
1638 				  mcspi);
1639 	if (status) {
1640 		dev_err(&pdev->dev, "Cannot request IRQ");
1641 		goto free_ctlr;
1642 	}
1643 
1644 	mcspi->ref_clk = devm_clk_get_optional_enabled(&pdev->dev, NULL);
1645 	if (mcspi->ref_clk)
1646 		mcspi->ref_clk_hz = clk_get_rate(mcspi->ref_clk);
1647 	else
1648 		mcspi->ref_clk_hz = OMAP2_MCSPI_MAX_FREQ;
1649 	ctlr->max_speed_hz = mcspi->ref_clk_hz;
1650 	ctlr->min_speed_hz = mcspi->ref_clk_hz >> 15;
1651 
1652 	pm_runtime_use_autosuspend(&pdev->dev);
1653 	pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT);
1654 	pm_runtime_enable(&pdev->dev);
1655 
1656 	status = omap2_mcspi_controller_setup(mcspi);
1657 	if (status < 0)
1658 		goto disable_pm;
1659 
1660 	status = devm_spi_register_controller(&pdev->dev, ctlr);
1661 	if (status < 0)
1662 		goto disable_pm;
1663 
1664 	return status;
1665 
1666 disable_pm:
1667 	pm_runtime_dont_use_autosuspend(&pdev->dev);
1668 	pm_runtime_put_sync(&pdev->dev);
1669 	pm_runtime_disable(&pdev->dev);
1670 free_ctlr:
1671 	omap2_mcspi_release_dma(ctlr);
1672 	spi_controller_put(ctlr);
1673 	return status;
1674 }
1675 
1676 static void omap2_mcspi_remove(struct platform_device *pdev)
1677 {
1678 	struct spi_controller *ctlr = platform_get_drvdata(pdev);
1679 	struct omap2_mcspi *mcspi = spi_controller_get_devdata(ctlr);
1680 
1681 	omap2_mcspi_release_dma(ctlr);
1682 
1683 	pm_runtime_dont_use_autosuspend(mcspi->dev);
1684 	pm_runtime_put_sync(mcspi->dev);
1685 	pm_runtime_disable(&pdev->dev);
1686 }
1687 
1688 /* work with hotplug and coldplug */
1689 MODULE_ALIAS("platform:omap2_mcspi");
1690 
1691 static int __maybe_unused omap2_mcspi_suspend(struct device *dev)
1692 {
1693 	struct spi_controller *ctlr = dev_get_drvdata(dev);
1694 	struct omap2_mcspi *mcspi = spi_controller_get_devdata(ctlr);
1695 	int error;
1696 
1697 	error = pinctrl_pm_select_sleep_state(dev);
1698 	if (error)
1699 		dev_warn(mcspi->dev, "%s: failed to set pins: %i\n",
1700 			 __func__, error);
1701 
1702 	error = spi_controller_suspend(ctlr);
1703 	if (error)
1704 		dev_warn(mcspi->dev, "%s: controller suspend failed: %i\n",
1705 			 __func__, error);
1706 
1707 	return pm_runtime_force_suspend(dev);
1708 }
1709 
1710 static int __maybe_unused omap2_mcspi_resume(struct device *dev)
1711 {
1712 	struct spi_controller *ctlr = dev_get_drvdata(dev);
1713 	struct omap2_mcspi *mcspi = spi_controller_get_devdata(ctlr);
1714 	int error;
1715 
1716 	error = spi_controller_resume(ctlr);
1717 	if (error)
1718 		dev_warn(mcspi->dev, "%s: controller resume failed: %i\n",
1719 			 __func__, error);
1720 
1721 	return pm_runtime_force_resume(dev);
1722 }
1723 
1724 static const struct dev_pm_ops omap2_mcspi_pm_ops = {
1725 	SET_SYSTEM_SLEEP_PM_OPS(omap2_mcspi_suspend,
1726 				omap2_mcspi_resume)
1727 	.runtime_suspend	= omap_mcspi_runtime_suspend,
1728 	.runtime_resume		= omap_mcspi_runtime_resume,
1729 };
1730 
1731 static struct platform_driver omap2_mcspi_driver = {
1732 	.driver = {
1733 		.name =		"omap2_mcspi",
1734 		.pm =		&omap2_mcspi_pm_ops,
1735 		.of_match_table = omap_mcspi_of_match,
1736 	},
1737 	.probe =	omap2_mcspi_probe,
1738 	.remove_new =	omap2_mcspi_remove,
1739 };
1740 
1741 module_platform_driver(omap2_mcspi_driver);
1742 MODULE_LICENSE("GPL");
1743