xref: /linux/drivers/spi/spi-omap2-mcspi.c (revision b889fcf63cb62e7fdb7816565e28f44dbe4a76a5)
1 /*
2  * OMAP2 McSPI controller driver
3  *
4  * Copyright (C) 2005, 2006 Nokia Corporation
5  * Author:	Samuel Ortiz <samuel.ortiz@nokia.com> and
6  *		Juha Yrj�l� <juha.yrjola@nokia.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21  *
22  */
23 
24 #include <linux/kernel.h>
25 #include <linux/init.h>
26 #include <linux/interrupt.h>
27 #include <linux/module.h>
28 #include <linux/device.h>
29 #include <linux/delay.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/dmaengine.h>
32 #include <linux/omap-dma.h>
33 #include <linux/platform_device.h>
34 #include <linux/err.h>
35 #include <linux/clk.h>
36 #include <linux/io.h>
37 #include <linux/slab.h>
38 #include <linux/pm_runtime.h>
39 #include <linux/of.h>
40 #include <linux/of_device.h>
41 #include <linux/pinctrl/consumer.h>
42 
43 #include <linux/spi/spi.h>
44 
45 #include <linux/platform_data/spi-omap2-mcspi.h>
46 
47 #define OMAP2_MCSPI_MAX_FREQ		48000000
48 #define SPI_AUTOSUSPEND_TIMEOUT		2000
49 
50 #define OMAP2_MCSPI_REVISION		0x00
51 #define OMAP2_MCSPI_SYSSTATUS		0x14
52 #define OMAP2_MCSPI_IRQSTATUS		0x18
53 #define OMAP2_MCSPI_IRQENABLE		0x1c
54 #define OMAP2_MCSPI_WAKEUPENABLE	0x20
55 #define OMAP2_MCSPI_SYST		0x24
56 #define OMAP2_MCSPI_MODULCTRL		0x28
57 
58 /* per-channel banks, 0x14 bytes each, first is: */
59 #define OMAP2_MCSPI_CHCONF0		0x2c
60 #define OMAP2_MCSPI_CHSTAT0		0x30
61 #define OMAP2_MCSPI_CHCTRL0		0x34
62 #define OMAP2_MCSPI_TX0			0x38
63 #define OMAP2_MCSPI_RX0			0x3c
64 
65 /* per-register bitmasks: */
66 
67 #define OMAP2_MCSPI_MODULCTRL_SINGLE	BIT(0)
68 #define OMAP2_MCSPI_MODULCTRL_MS	BIT(2)
69 #define OMAP2_MCSPI_MODULCTRL_STEST	BIT(3)
70 
71 #define OMAP2_MCSPI_CHCONF_PHA		BIT(0)
72 #define OMAP2_MCSPI_CHCONF_POL		BIT(1)
73 #define OMAP2_MCSPI_CHCONF_CLKD_MASK	(0x0f << 2)
74 #define OMAP2_MCSPI_CHCONF_EPOL		BIT(6)
75 #define OMAP2_MCSPI_CHCONF_WL_MASK	(0x1f << 7)
76 #define OMAP2_MCSPI_CHCONF_TRM_RX_ONLY	BIT(12)
77 #define OMAP2_MCSPI_CHCONF_TRM_TX_ONLY	BIT(13)
78 #define OMAP2_MCSPI_CHCONF_TRM_MASK	(0x03 << 12)
79 #define OMAP2_MCSPI_CHCONF_DMAW		BIT(14)
80 #define OMAP2_MCSPI_CHCONF_DMAR		BIT(15)
81 #define OMAP2_MCSPI_CHCONF_DPE0		BIT(16)
82 #define OMAP2_MCSPI_CHCONF_DPE1		BIT(17)
83 #define OMAP2_MCSPI_CHCONF_IS		BIT(18)
84 #define OMAP2_MCSPI_CHCONF_TURBO	BIT(19)
85 #define OMAP2_MCSPI_CHCONF_FORCE	BIT(20)
86 
87 #define OMAP2_MCSPI_CHSTAT_RXS		BIT(0)
88 #define OMAP2_MCSPI_CHSTAT_TXS		BIT(1)
89 #define OMAP2_MCSPI_CHSTAT_EOT		BIT(2)
90 
91 #define OMAP2_MCSPI_CHCTRL_EN		BIT(0)
92 
93 #define OMAP2_MCSPI_WAKEUPENABLE_WKEN	BIT(0)
94 
95 /* We have 2 DMA channels per CS, one for RX and one for TX */
96 struct omap2_mcspi_dma {
97 	struct dma_chan *dma_tx;
98 	struct dma_chan *dma_rx;
99 
100 	int dma_tx_sync_dev;
101 	int dma_rx_sync_dev;
102 
103 	struct completion dma_tx_completion;
104 	struct completion dma_rx_completion;
105 };
106 
107 /* use PIO for small transfers, avoiding DMA setup/teardown overhead and
108  * cache operations; better heuristics consider wordsize and bitrate.
109  */
110 #define DMA_MIN_BYTES			160
111 
112 
113 /*
114  * Used for context save and restore, structure members to be updated whenever
115  * corresponding registers are modified.
116  */
117 struct omap2_mcspi_regs {
118 	u32 modulctrl;
119 	u32 wakeupenable;
120 	struct list_head cs;
121 };
122 
123 struct omap2_mcspi {
124 	struct spi_master	*master;
125 	/* Virtual base address of the controller */
126 	void __iomem		*base;
127 	unsigned long		phys;
128 	/* SPI1 has 4 channels, while SPI2 has 2 */
129 	struct omap2_mcspi_dma	*dma_channels;
130 	struct device		*dev;
131 	struct omap2_mcspi_regs ctx;
132 	unsigned int		pin_dir:1;
133 };
134 
135 struct omap2_mcspi_cs {
136 	void __iomem		*base;
137 	unsigned long		phys;
138 	int			word_len;
139 	struct list_head	node;
140 	/* Context save and restore shadow register */
141 	u32			chconf0;
142 };
143 
144 static inline void mcspi_write_reg(struct spi_master *master,
145 		int idx, u32 val)
146 {
147 	struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
148 
149 	__raw_writel(val, mcspi->base + idx);
150 }
151 
152 static inline u32 mcspi_read_reg(struct spi_master *master, int idx)
153 {
154 	struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
155 
156 	return __raw_readl(mcspi->base + idx);
157 }
158 
159 static inline void mcspi_write_cs_reg(const struct spi_device *spi,
160 		int idx, u32 val)
161 {
162 	struct omap2_mcspi_cs	*cs = spi->controller_state;
163 
164 	__raw_writel(val, cs->base +  idx);
165 }
166 
167 static inline u32 mcspi_read_cs_reg(const struct spi_device *spi, int idx)
168 {
169 	struct omap2_mcspi_cs	*cs = spi->controller_state;
170 
171 	return __raw_readl(cs->base + idx);
172 }
173 
174 static inline u32 mcspi_cached_chconf0(const struct spi_device *spi)
175 {
176 	struct omap2_mcspi_cs *cs = spi->controller_state;
177 
178 	return cs->chconf0;
179 }
180 
181 static inline void mcspi_write_chconf0(const struct spi_device *spi, u32 val)
182 {
183 	struct omap2_mcspi_cs *cs = spi->controller_state;
184 
185 	cs->chconf0 = val;
186 	mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCONF0, val);
187 	mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCONF0);
188 }
189 
190 static void omap2_mcspi_set_dma_req(const struct spi_device *spi,
191 		int is_read, int enable)
192 {
193 	u32 l, rw;
194 
195 	l = mcspi_cached_chconf0(spi);
196 
197 	if (is_read) /* 1 is read, 0 write */
198 		rw = OMAP2_MCSPI_CHCONF_DMAR;
199 	else
200 		rw = OMAP2_MCSPI_CHCONF_DMAW;
201 
202 	if (enable)
203 		l |= rw;
204 	else
205 		l &= ~rw;
206 
207 	mcspi_write_chconf0(spi, l);
208 }
209 
210 static void omap2_mcspi_set_enable(const struct spi_device *spi, int enable)
211 {
212 	u32 l;
213 
214 	l = enable ? OMAP2_MCSPI_CHCTRL_EN : 0;
215 	mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCTRL0, l);
216 	/* Flash post-writes */
217 	mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCTRL0);
218 }
219 
220 static void omap2_mcspi_force_cs(struct spi_device *spi, int cs_active)
221 {
222 	u32 l;
223 
224 	l = mcspi_cached_chconf0(spi);
225 	if (cs_active)
226 		l |= OMAP2_MCSPI_CHCONF_FORCE;
227 	else
228 		l &= ~OMAP2_MCSPI_CHCONF_FORCE;
229 
230 	mcspi_write_chconf0(spi, l);
231 }
232 
233 static void omap2_mcspi_set_master_mode(struct spi_master *master)
234 {
235 	struct omap2_mcspi	*mcspi = spi_master_get_devdata(master);
236 	struct omap2_mcspi_regs	*ctx = &mcspi->ctx;
237 	u32 l;
238 
239 	/*
240 	 * Setup when switching from (reset default) slave mode
241 	 * to single-channel master mode
242 	 */
243 	l = mcspi_read_reg(master, OMAP2_MCSPI_MODULCTRL);
244 	l &= ~(OMAP2_MCSPI_MODULCTRL_STEST | OMAP2_MCSPI_MODULCTRL_MS);
245 	l |= OMAP2_MCSPI_MODULCTRL_SINGLE;
246 	mcspi_write_reg(master, OMAP2_MCSPI_MODULCTRL, l);
247 
248 	ctx->modulctrl = l;
249 }
250 
251 static void omap2_mcspi_restore_ctx(struct omap2_mcspi *mcspi)
252 {
253 	struct spi_master	*spi_cntrl = mcspi->master;
254 	struct omap2_mcspi_regs	*ctx = &mcspi->ctx;
255 	struct omap2_mcspi_cs	*cs;
256 
257 	/* McSPI: context restore */
258 	mcspi_write_reg(spi_cntrl, OMAP2_MCSPI_MODULCTRL, ctx->modulctrl);
259 	mcspi_write_reg(spi_cntrl, OMAP2_MCSPI_WAKEUPENABLE, ctx->wakeupenable);
260 
261 	list_for_each_entry(cs, &ctx->cs, node)
262 		__raw_writel(cs->chconf0, cs->base + OMAP2_MCSPI_CHCONF0);
263 }
264 
265 static int omap2_prepare_transfer(struct spi_master *master)
266 {
267 	struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
268 
269 	pm_runtime_get_sync(mcspi->dev);
270 	return 0;
271 }
272 
273 static int omap2_unprepare_transfer(struct spi_master *master)
274 {
275 	struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
276 
277 	pm_runtime_mark_last_busy(mcspi->dev);
278 	pm_runtime_put_autosuspend(mcspi->dev);
279 	return 0;
280 }
281 
282 static int mcspi_wait_for_reg_bit(void __iomem *reg, unsigned long bit)
283 {
284 	unsigned long timeout;
285 
286 	timeout = jiffies + msecs_to_jiffies(1000);
287 	while (!(__raw_readl(reg) & bit)) {
288 		if (time_after(jiffies, timeout))
289 			return -1;
290 		cpu_relax();
291 	}
292 	return 0;
293 }
294 
295 static void omap2_mcspi_rx_callback(void *data)
296 {
297 	struct spi_device *spi = data;
298 	struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master);
299 	struct omap2_mcspi_dma *mcspi_dma = &mcspi->dma_channels[spi->chip_select];
300 
301 	complete(&mcspi_dma->dma_rx_completion);
302 
303 	/* We must disable the DMA RX request */
304 	omap2_mcspi_set_dma_req(spi, 1, 0);
305 }
306 
307 static void omap2_mcspi_tx_callback(void *data)
308 {
309 	struct spi_device *spi = data;
310 	struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master);
311 	struct omap2_mcspi_dma *mcspi_dma = &mcspi->dma_channels[spi->chip_select];
312 
313 	complete(&mcspi_dma->dma_tx_completion);
314 
315 	/* We must disable the DMA TX request */
316 	omap2_mcspi_set_dma_req(spi, 0, 0);
317 }
318 
319 static void omap2_mcspi_tx_dma(struct spi_device *spi,
320 				struct spi_transfer *xfer,
321 				struct dma_slave_config cfg)
322 {
323 	struct omap2_mcspi	*mcspi;
324 	struct omap2_mcspi_dma  *mcspi_dma;
325 	unsigned int		count;
326 
327 	mcspi = spi_master_get_devdata(spi->master);
328 	mcspi_dma = &mcspi->dma_channels[spi->chip_select];
329 	count = xfer->len;
330 
331 	if (mcspi_dma->dma_tx) {
332 		struct dma_async_tx_descriptor *tx;
333 		struct scatterlist sg;
334 
335 		dmaengine_slave_config(mcspi_dma->dma_tx, &cfg);
336 
337 		sg_init_table(&sg, 1);
338 		sg_dma_address(&sg) = xfer->tx_dma;
339 		sg_dma_len(&sg) = xfer->len;
340 
341 		tx = dmaengine_prep_slave_sg(mcspi_dma->dma_tx, &sg, 1,
342 		DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
343 		if (tx) {
344 			tx->callback = omap2_mcspi_tx_callback;
345 			tx->callback_param = spi;
346 			dmaengine_submit(tx);
347 		} else {
348 			/* FIXME: fall back to PIO? */
349 		}
350 	}
351 	dma_async_issue_pending(mcspi_dma->dma_tx);
352 	omap2_mcspi_set_dma_req(spi, 0, 1);
353 
354 }
355 
356 static unsigned
357 omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer,
358 				struct dma_slave_config cfg,
359 				unsigned es)
360 {
361 	struct omap2_mcspi	*mcspi;
362 	struct omap2_mcspi_dma  *mcspi_dma;
363 	unsigned int		count;
364 	u32			l;
365 	int			elements = 0;
366 	int			word_len, element_count;
367 	struct omap2_mcspi_cs	*cs = spi->controller_state;
368 	mcspi = spi_master_get_devdata(spi->master);
369 	mcspi_dma = &mcspi->dma_channels[spi->chip_select];
370 	count = xfer->len;
371 	word_len = cs->word_len;
372 	l = mcspi_cached_chconf0(spi);
373 
374 	if (word_len <= 8)
375 		element_count = count;
376 	else if (word_len <= 16)
377 		element_count = count >> 1;
378 	else /* word_len <= 32 */
379 		element_count = count >> 2;
380 
381 	if (mcspi_dma->dma_rx) {
382 		struct dma_async_tx_descriptor *tx;
383 		struct scatterlist sg;
384 		size_t len = xfer->len - es;
385 
386 		dmaengine_slave_config(mcspi_dma->dma_rx, &cfg);
387 
388 		if (l & OMAP2_MCSPI_CHCONF_TURBO)
389 			len -= es;
390 
391 		sg_init_table(&sg, 1);
392 		sg_dma_address(&sg) = xfer->rx_dma;
393 		sg_dma_len(&sg) = len;
394 
395 		tx = dmaengine_prep_slave_sg(mcspi_dma->dma_rx, &sg, 1,
396 				DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT |
397 				DMA_CTRL_ACK);
398 		if (tx) {
399 			tx->callback = omap2_mcspi_rx_callback;
400 			tx->callback_param = spi;
401 			dmaengine_submit(tx);
402 		} else {
403 				/* FIXME: fall back to PIO? */
404 		}
405 	}
406 
407 	dma_async_issue_pending(mcspi_dma->dma_rx);
408 	omap2_mcspi_set_dma_req(spi, 1, 1);
409 
410 	wait_for_completion(&mcspi_dma->dma_rx_completion);
411 	dma_unmap_single(mcspi->dev, xfer->rx_dma, count,
412 			 DMA_FROM_DEVICE);
413 	omap2_mcspi_set_enable(spi, 0);
414 
415 	elements = element_count - 1;
416 
417 	if (l & OMAP2_MCSPI_CHCONF_TURBO) {
418 		elements--;
419 
420 		if (likely(mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHSTAT0)
421 				   & OMAP2_MCSPI_CHSTAT_RXS)) {
422 			u32 w;
423 
424 			w = mcspi_read_cs_reg(spi, OMAP2_MCSPI_RX0);
425 			if (word_len <= 8)
426 				((u8 *)xfer->rx_buf)[elements++] = w;
427 			else if (word_len <= 16)
428 				((u16 *)xfer->rx_buf)[elements++] = w;
429 			else /* word_len <= 32 */
430 				((u32 *)xfer->rx_buf)[elements++] = w;
431 		} else {
432 			dev_err(&spi->dev, "DMA RX penultimate word empty");
433 			count -= (word_len <= 8)  ? 2 :
434 				(word_len <= 16) ? 4 :
435 				/* word_len <= 32 */ 8;
436 			omap2_mcspi_set_enable(spi, 1);
437 			return count;
438 		}
439 	}
440 	if (likely(mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHSTAT0)
441 				& OMAP2_MCSPI_CHSTAT_RXS)) {
442 		u32 w;
443 
444 		w = mcspi_read_cs_reg(spi, OMAP2_MCSPI_RX0);
445 		if (word_len <= 8)
446 			((u8 *)xfer->rx_buf)[elements] = w;
447 		else if (word_len <= 16)
448 			((u16 *)xfer->rx_buf)[elements] = w;
449 		else /* word_len <= 32 */
450 			((u32 *)xfer->rx_buf)[elements] = w;
451 	} else {
452 		dev_err(&spi->dev, "DMA RX last word empty");
453 		count -= (word_len <= 8)  ? 1 :
454 			 (word_len <= 16) ? 2 :
455 		       /* word_len <= 32 */ 4;
456 	}
457 	omap2_mcspi_set_enable(spi, 1);
458 	return count;
459 }
460 
461 static unsigned
462 omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
463 {
464 	struct omap2_mcspi	*mcspi;
465 	struct omap2_mcspi_cs	*cs = spi->controller_state;
466 	struct omap2_mcspi_dma  *mcspi_dma;
467 	unsigned int		count;
468 	u32			l;
469 	u8			*rx;
470 	const u8		*tx;
471 	struct dma_slave_config	cfg;
472 	enum dma_slave_buswidth width;
473 	unsigned es;
474 	void __iomem		*chstat_reg;
475 
476 	mcspi = spi_master_get_devdata(spi->master);
477 	mcspi_dma = &mcspi->dma_channels[spi->chip_select];
478 	l = mcspi_cached_chconf0(spi);
479 
480 
481 	if (cs->word_len <= 8) {
482 		width = DMA_SLAVE_BUSWIDTH_1_BYTE;
483 		es = 1;
484 	} else if (cs->word_len <= 16) {
485 		width = DMA_SLAVE_BUSWIDTH_2_BYTES;
486 		es = 2;
487 	} else {
488 		width = DMA_SLAVE_BUSWIDTH_4_BYTES;
489 		es = 4;
490 	}
491 
492 	memset(&cfg, 0, sizeof(cfg));
493 	cfg.src_addr = cs->phys + OMAP2_MCSPI_RX0;
494 	cfg.dst_addr = cs->phys + OMAP2_MCSPI_TX0;
495 	cfg.src_addr_width = width;
496 	cfg.dst_addr_width = width;
497 	cfg.src_maxburst = 1;
498 	cfg.dst_maxburst = 1;
499 
500 	rx = xfer->rx_buf;
501 	tx = xfer->tx_buf;
502 
503 	count = xfer->len;
504 
505 	if (tx != NULL)
506 		omap2_mcspi_tx_dma(spi, xfer, cfg);
507 
508 	if (rx != NULL)
509 		count = omap2_mcspi_rx_dma(spi, xfer, cfg, es);
510 
511 	if (tx != NULL) {
512 		chstat_reg = cs->base + OMAP2_MCSPI_CHSTAT0;
513 		wait_for_completion(&mcspi_dma->dma_tx_completion);
514 		dma_unmap_single(mcspi->dev, xfer->tx_dma, xfer->len,
515 				 DMA_TO_DEVICE);
516 
517 		/* for TX_ONLY mode, be sure all words have shifted out */
518 		if (rx == NULL) {
519 			if (mcspi_wait_for_reg_bit(chstat_reg,
520 						OMAP2_MCSPI_CHSTAT_TXS) < 0)
521 				dev_err(&spi->dev, "TXS timed out\n");
522 			else if (mcspi_wait_for_reg_bit(chstat_reg,
523 						OMAP2_MCSPI_CHSTAT_EOT) < 0)
524 				dev_err(&spi->dev, "EOT timed out\n");
525 		}
526 	}
527 	return count;
528 }
529 
530 static unsigned
531 omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
532 {
533 	struct omap2_mcspi	*mcspi;
534 	struct omap2_mcspi_cs	*cs = spi->controller_state;
535 	unsigned int		count, c;
536 	u32			l;
537 	void __iomem		*base = cs->base;
538 	void __iomem		*tx_reg;
539 	void __iomem		*rx_reg;
540 	void __iomem		*chstat_reg;
541 	int			word_len;
542 
543 	mcspi = spi_master_get_devdata(spi->master);
544 	count = xfer->len;
545 	c = count;
546 	word_len = cs->word_len;
547 
548 	l = mcspi_cached_chconf0(spi);
549 
550 	/* We store the pre-calculated register addresses on stack to speed
551 	 * up the transfer loop. */
552 	tx_reg		= base + OMAP2_MCSPI_TX0;
553 	rx_reg		= base + OMAP2_MCSPI_RX0;
554 	chstat_reg	= base + OMAP2_MCSPI_CHSTAT0;
555 
556 	if (c < (word_len>>3))
557 		return 0;
558 
559 	if (word_len <= 8) {
560 		u8		*rx;
561 		const u8	*tx;
562 
563 		rx = xfer->rx_buf;
564 		tx = xfer->tx_buf;
565 
566 		do {
567 			c -= 1;
568 			if (tx != NULL) {
569 				if (mcspi_wait_for_reg_bit(chstat_reg,
570 						OMAP2_MCSPI_CHSTAT_TXS) < 0) {
571 					dev_err(&spi->dev, "TXS timed out\n");
572 					goto out;
573 				}
574 				dev_vdbg(&spi->dev, "write-%d %02x\n",
575 						word_len, *tx);
576 				__raw_writel(*tx++, tx_reg);
577 			}
578 			if (rx != NULL) {
579 				if (mcspi_wait_for_reg_bit(chstat_reg,
580 						OMAP2_MCSPI_CHSTAT_RXS) < 0) {
581 					dev_err(&spi->dev, "RXS timed out\n");
582 					goto out;
583 				}
584 
585 				if (c == 1 && tx == NULL &&
586 				    (l & OMAP2_MCSPI_CHCONF_TURBO)) {
587 					omap2_mcspi_set_enable(spi, 0);
588 					*rx++ = __raw_readl(rx_reg);
589 					dev_vdbg(&spi->dev, "read-%d %02x\n",
590 						    word_len, *(rx - 1));
591 					if (mcspi_wait_for_reg_bit(chstat_reg,
592 						OMAP2_MCSPI_CHSTAT_RXS) < 0) {
593 						dev_err(&spi->dev,
594 							"RXS timed out\n");
595 						goto out;
596 					}
597 					c = 0;
598 				} else if (c == 0 && tx == NULL) {
599 					omap2_mcspi_set_enable(spi, 0);
600 				}
601 
602 				*rx++ = __raw_readl(rx_reg);
603 				dev_vdbg(&spi->dev, "read-%d %02x\n",
604 						word_len, *(rx - 1));
605 			}
606 		} while (c);
607 	} else if (word_len <= 16) {
608 		u16		*rx;
609 		const u16	*tx;
610 
611 		rx = xfer->rx_buf;
612 		tx = xfer->tx_buf;
613 		do {
614 			c -= 2;
615 			if (tx != NULL) {
616 				if (mcspi_wait_for_reg_bit(chstat_reg,
617 						OMAP2_MCSPI_CHSTAT_TXS) < 0) {
618 					dev_err(&spi->dev, "TXS timed out\n");
619 					goto out;
620 				}
621 				dev_vdbg(&spi->dev, "write-%d %04x\n",
622 						word_len, *tx);
623 				__raw_writel(*tx++, tx_reg);
624 			}
625 			if (rx != NULL) {
626 				if (mcspi_wait_for_reg_bit(chstat_reg,
627 						OMAP2_MCSPI_CHSTAT_RXS) < 0) {
628 					dev_err(&spi->dev, "RXS timed out\n");
629 					goto out;
630 				}
631 
632 				if (c == 2 && tx == NULL &&
633 				    (l & OMAP2_MCSPI_CHCONF_TURBO)) {
634 					omap2_mcspi_set_enable(spi, 0);
635 					*rx++ = __raw_readl(rx_reg);
636 					dev_vdbg(&spi->dev, "read-%d %04x\n",
637 						    word_len, *(rx - 1));
638 					if (mcspi_wait_for_reg_bit(chstat_reg,
639 						OMAP2_MCSPI_CHSTAT_RXS) < 0) {
640 						dev_err(&spi->dev,
641 							"RXS timed out\n");
642 						goto out;
643 					}
644 					c = 0;
645 				} else if (c == 0 && tx == NULL) {
646 					omap2_mcspi_set_enable(spi, 0);
647 				}
648 
649 				*rx++ = __raw_readl(rx_reg);
650 				dev_vdbg(&spi->dev, "read-%d %04x\n",
651 						word_len, *(rx - 1));
652 			}
653 		} while (c >= 2);
654 	} else if (word_len <= 32) {
655 		u32		*rx;
656 		const u32	*tx;
657 
658 		rx = xfer->rx_buf;
659 		tx = xfer->tx_buf;
660 		do {
661 			c -= 4;
662 			if (tx != NULL) {
663 				if (mcspi_wait_for_reg_bit(chstat_reg,
664 						OMAP2_MCSPI_CHSTAT_TXS) < 0) {
665 					dev_err(&spi->dev, "TXS timed out\n");
666 					goto out;
667 				}
668 				dev_vdbg(&spi->dev, "write-%d %08x\n",
669 						word_len, *tx);
670 				__raw_writel(*tx++, tx_reg);
671 			}
672 			if (rx != NULL) {
673 				if (mcspi_wait_for_reg_bit(chstat_reg,
674 						OMAP2_MCSPI_CHSTAT_RXS) < 0) {
675 					dev_err(&spi->dev, "RXS timed out\n");
676 					goto out;
677 				}
678 
679 				if (c == 4 && tx == NULL &&
680 				    (l & OMAP2_MCSPI_CHCONF_TURBO)) {
681 					omap2_mcspi_set_enable(spi, 0);
682 					*rx++ = __raw_readl(rx_reg);
683 					dev_vdbg(&spi->dev, "read-%d %08x\n",
684 						    word_len, *(rx - 1));
685 					if (mcspi_wait_for_reg_bit(chstat_reg,
686 						OMAP2_MCSPI_CHSTAT_RXS) < 0) {
687 						dev_err(&spi->dev,
688 							"RXS timed out\n");
689 						goto out;
690 					}
691 					c = 0;
692 				} else if (c == 0 && tx == NULL) {
693 					omap2_mcspi_set_enable(spi, 0);
694 				}
695 
696 				*rx++ = __raw_readl(rx_reg);
697 				dev_vdbg(&spi->dev, "read-%d %08x\n",
698 						word_len, *(rx - 1));
699 			}
700 		} while (c >= 4);
701 	}
702 
703 	/* for TX_ONLY mode, be sure all words have shifted out */
704 	if (xfer->rx_buf == NULL) {
705 		if (mcspi_wait_for_reg_bit(chstat_reg,
706 				OMAP2_MCSPI_CHSTAT_TXS) < 0) {
707 			dev_err(&spi->dev, "TXS timed out\n");
708 		} else if (mcspi_wait_for_reg_bit(chstat_reg,
709 				OMAP2_MCSPI_CHSTAT_EOT) < 0)
710 			dev_err(&spi->dev, "EOT timed out\n");
711 
712 		/* disable chan to purge rx datas received in TX_ONLY transfer,
713 		 * otherwise these rx datas will affect the direct following
714 		 * RX_ONLY transfer.
715 		 */
716 		omap2_mcspi_set_enable(spi, 0);
717 	}
718 out:
719 	omap2_mcspi_set_enable(spi, 1);
720 	return count - c;
721 }
722 
723 static u32 omap2_mcspi_calc_divisor(u32 speed_hz)
724 {
725 	u32 div;
726 
727 	for (div = 0; div < 15; div++)
728 		if (speed_hz >= (OMAP2_MCSPI_MAX_FREQ >> div))
729 			return div;
730 
731 	return 15;
732 }
733 
734 /* called only when no transfer is active to this device */
735 static int omap2_mcspi_setup_transfer(struct spi_device *spi,
736 		struct spi_transfer *t)
737 {
738 	struct omap2_mcspi_cs *cs = spi->controller_state;
739 	struct omap2_mcspi *mcspi;
740 	struct spi_master *spi_cntrl;
741 	u32 l = 0, div = 0;
742 	u8 word_len = spi->bits_per_word;
743 	u32 speed_hz = spi->max_speed_hz;
744 
745 	mcspi = spi_master_get_devdata(spi->master);
746 	spi_cntrl = mcspi->master;
747 
748 	if (t != NULL && t->bits_per_word)
749 		word_len = t->bits_per_word;
750 
751 	cs->word_len = word_len;
752 
753 	if (t && t->speed_hz)
754 		speed_hz = t->speed_hz;
755 
756 	speed_hz = min_t(u32, speed_hz, OMAP2_MCSPI_MAX_FREQ);
757 	div = omap2_mcspi_calc_divisor(speed_hz);
758 
759 	l = mcspi_cached_chconf0(spi);
760 
761 	/* standard 4-wire master mode:  SCK, MOSI/out, MISO/in, nCS
762 	 * REVISIT: this controller could support SPI_3WIRE mode.
763 	 */
764 	if (mcspi->pin_dir == MCSPI_PINDIR_D0_IN_D1_OUT) {
765 		l &= ~OMAP2_MCSPI_CHCONF_IS;
766 		l &= ~OMAP2_MCSPI_CHCONF_DPE1;
767 		l |= OMAP2_MCSPI_CHCONF_DPE0;
768 	} else {
769 		l |= OMAP2_MCSPI_CHCONF_IS;
770 		l |= OMAP2_MCSPI_CHCONF_DPE1;
771 		l &= ~OMAP2_MCSPI_CHCONF_DPE0;
772 	}
773 
774 	/* wordlength */
775 	l &= ~OMAP2_MCSPI_CHCONF_WL_MASK;
776 	l |= (word_len - 1) << 7;
777 
778 	/* set chipselect polarity; manage with FORCE */
779 	if (!(spi->mode & SPI_CS_HIGH))
780 		l |= OMAP2_MCSPI_CHCONF_EPOL;	/* active-low; normal */
781 	else
782 		l &= ~OMAP2_MCSPI_CHCONF_EPOL;
783 
784 	/* set clock divisor */
785 	l &= ~OMAP2_MCSPI_CHCONF_CLKD_MASK;
786 	l |= div << 2;
787 
788 	/* set SPI mode 0..3 */
789 	if (spi->mode & SPI_CPOL)
790 		l |= OMAP2_MCSPI_CHCONF_POL;
791 	else
792 		l &= ~OMAP2_MCSPI_CHCONF_POL;
793 	if (spi->mode & SPI_CPHA)
794 		l |= OMAP2_MCSPI_CHCONF_PHA;
795 	else
796 		l &= ~OMAP2_MCSPI_CHCONF_PHA;
797 
798 	mcspi_write_chconf0(spi, l);
799 
800 	dev_dbg(&spi->dev, "setup: speed %d, sample %s edge, clk %s\n",
801 			OMAP2_MCSPI_MAX_FREQ >> div,
802 			(spi->mode & SPI_CPHA) ? "trailing" : "leading",
803 			(spi->mode & SPI_CPOL) ? "inverted" : "normal");
804 
805 	return 0;
806 }
807 
808 static int omap2_mcspi_request_dma(struct spi_device *spi)
809 {
810 	struct spi_master	*master = spi->master;
811 	struct omap2_mcspi	*mcspi;
812 	struct omap2_mcspi_dma	*mcspi_dma;
813 	dma_cap_mask_t mask;
814 	unsigned sig;
815 
816 	mcspi = spi_master_get_devdata(master);
817 	mcspi_dma = mcspi->dma_channels + spi->chip_select;
818 
819 	init_completion(&mcspi_dma->dma_rx_completion);
820 	init_completion(&mcspi_dma->dma_tx_completion);
821 
822 	dma_cap_zero(mask);
823 	dma_cap_set(DMA_SLAVE, mask);
824 	sig = mcspi_dma->dma_rx_sync_dev;
825 	mcspi_dma->dma_rx = dma_request_channel(mask, omap_dma_filter_fn, &sig);
826 	if (!mcspi_dma->dma_rx) {
827 		dev_err(&spi->dev, "no RX DMA engine channel for McSPI\n");
828 		return -EAGAIN;
829 	}
830 
831 	sig = mcspi_dma->dma_tx_sync_dev;
832 	mcspi_dma->dma_tx = dma_request_channel(mask, omap_dma_filter_fn, &sig);
833 	if (!mcspi_dma->dma_tx) {
834 		dev_err(&spi->dev, "no TX DMA engine channel for McSPI\n");
835 		dma_release_channel(mcspi_dma->dma_rx);
836 		mcspi_dma->dma_rx = NULL;
837 		return -EAGAIN;
838 	}
839 
840 	return 0;
841 }
842 
843 static int omap2_mcspi_setup(struct spi_device *spi)
844 {
845 	int			ret;
846 	struct omap2_mcspi	*mcspi = spi_master_get_devdata(spi->master);
847 	struct omap2_mcspi_regs	*ctx = &mcspi->ctx;
848 	struct omap2_mcspi_dma	*mcspi_dma;
849 	struct omap2_mcspi_cs	*cs = spi->controller_state;
850 
851 	if (spi->bits_per_word < 4 || spi->bits_per_word > 32) {
852 		dev_dbg(&spi->dev, "setup: unsupported %d bit words\n",
853 			spi->bits_per_word);
854 		return -EINVAL;
855 	}
856 
857 	mcspi_dma = &mcspi->dma_channels[spi->chip_select];
858 
859 	if (!cs) {
860 		cs = kzalloc(sizeof *cs, GFP_KERNEL);
861 		if (!cs)
862 			return -ENOMEM;
863 		cs->base = mcspi->base + spi->chip_select * 0x14;
864 		cs->phys = mcspi->phys + spi->chip_select * 0x14;
865 		cs->chconf0 = 0;
866 		spi->controller_state = cs;
867 		/* Link this to context save list */
868 		list_add_tail(&cs->node, &ctx->cs);
869 	}
870 
871 	if (!mcspi_dma->dma_rx || !mcspi_dma->dma_tx) {
872 		ret = omap2_mcspi_request_dma(spi);
873 		if (ret < 0)
874 			return ret;
875 	}
876 
877 	ret = pm_runtime_get_sync(mcspi->dev);
878 	if (ret < 0)
879 		return ret;
880 
881 	ret = omap2_mcspi_setup_transfer(spi, NULL);
882 	pm_runtime_mark_last_busy(mcspi->dev);
883 	pm_runtime_put_autosuspend(mcspi->dev);
884 
885 	return ret;
886 }
887 
888 static void omap2_mcspi_cleanup(struct spi_device *spi)
889 {
890 	struct omap2_mcspi	*mcspi;
891 	struct omap2_mcspi_dma	*mcspi_dma;
892 	struct omap2_mcspi_cs	*cs;
893 
894 	mcspi = spi_master_get_devdata(spi->master);
895 
896 	if (spi->controller_state) {
897 		/* Unlink controller state from context save list */
898 		cs = spi->controller_state;
899 		list_del(&cs->node);
900 
901 		kfree(cs);
902 	}
903 
904 	if (spi->chip_select < spi->master->num_chipselect) {
905 		mcspi_dma = &mcspi->dma_channels[spi->chip_select];
906 
907 		if (mcspi_dma->dma_rx) {
908 			dma_release_channel(mcspi_dma->dma_rx);
909 			mcspi_dma->dma_rx = NULL;
910 		}
911 		if (mcspi_dma->dma_tx) {
912 			dma_release_channel(mcspi_dma->dma_tx);
913 			mcspi_dma->dma_tx = NULL;
914 		}
915 	}
916 }
917 
918 static void omap2_mcspi_work(struct omap2_mcspi *mcspi, struct spi_message *m)
919 {
920 
921 	/* We only enable one channel at a time -- the one whose message is
922 	 * -- although this controller would gladly
923 	 * arbitrate among multiple channels.  This corresponds to "single
924 	 * channel" master mode.  As a side effect, we need to manage the
925 	 * chipselect with the FORCE bit ... CS != channel enable.
926 	 */
927 
928 	struct spi_device		*spi;
929 	struct spi_transfer		*t = NULL;
930 	int				cs_active = 0;
931 	struct omap2_mcspi_cs		*cs;
932 	struct omap2_mcspi_device_config *cd;
933 	int				par_override = 0;
934 	int				status = 0;
935 	u32				chconf;
936 
937 	spi = m->spi;
938 	cs = spi->controller_state;
939 	cd = spi->controller_data;
940 
941 	omap2_mcspi_set_enable(spi, 1);
942 	list_for_each_entry(t, &m->transfers, transfer_list) {
943 		if (t->tx_buf == NULL && t->rx_buf == NULL && t->len) {
944 			status = -EINVAL;
945 			break;
946 		}
947 		if (par_override || t->speed_hz || t->bits_per_word) {
948 			par_override = 1;
949 			status = omap2_mcspi_setup_transfer(spi, t);
950 			if (status < 0)
951 				break;
952 			if (!t->speed_hz && !t->bits_per_word)
953 				par_override = 0;
954 		}
955 
956 		if (!cs_active) {
957 			omap2_mcspi_force_cs(spi, 1);
958 			cs_active = 1;
959 		}
960 
961 		chconf = mcspi_cached_chconf0(spi);
962 		chconf &= ~OMAP2_MCSPI_CHCONF_TRM_MASK;
963 		chconf &= ~OMAP2_MCSPI_CHCONF_TURBO;
964 
965 		if (t->tx_buf == NULL)
966 			chconf |= OMAP2_MCSPI_CHCONF_TRM_RX_ONLY;
967 		else if (t->rx_buf == NULL)
968 			chconf |= OMAP2_MCSPI_CHCONF_TRM_TX_ONLY;
969 
970 		if (cd && cd->turbo_mode && t->tx_buf == NULL) {
971 			/* Turbo mode is for more than one word */
972 			if (t->len > ((cs->word_len + 7) >> 3))
973 				chconf |= OMAP2_MCSPI_CHCONF_TURBO;
974 		}
975 
976 		mcspi_write_chconf0(spi, chconf);
977 
978 		if (t->len) {
979 			unsigned	count;
980 
981 			/* RX_ONLY mode needs dummy data in TX reg */
982 			if (t->tx_buf == NULL)
983 				__raw_writel(0, cs->base
984 						+ OMAP2_MCSPI_TX0);
985 
986 			if (m->is_dma_mapped || t->len >= DMA_MIN_BYTES)
987 				count = omap2_mcspi_txrx_dma(spi, t);
988 			else
989 				count = omap2_mcspi_txrx_pio(spi, t);
990 			m->actual_length += count;
991 
992 			if (count != t->len) {
993 				status = -EIO;
994 				break;
995 			}
996 		}
997 
998 		if (t->delay_usecs)
999 			udelay(t->delay_usecs);
1000 
1001 		/* ignore the "leave it on after last xfer" hint */
1002 		if (t->cs_change) {
1003 			omap2_mcspi_force_cs(spi, 0);
1004 			cs_active = 0;
1005 		}
1006 	}
1007 	/* Restore defaults if they were overriden */
1008 	if (par_override) {
1009 		par_override = 0;
1010 		status = omap2_mcspi_setup_transfer(spi, NULL);
1011 	}
1012 
1013 	if (cs_active)
1014 		omap2_mcspi_force_cs(spi, 0);
1015 
1016 	omap2_mcspi_set_enable(spi, 0);
1017 
1018 	m->status = status;
1019 
1020 }
1021 
1022 static int omap2_mcspi_transfer_one_message(struct spi_master *master,
1023 						struct spi_message *m)
1024 {
1025 	struct omap2_mcspi	*mcspi;
1026 	struct spi_transfer	*t;
1027 
1028 	mcspi = spi_master_get_devdata(master);
1029 	m->actual_length = 0;
1030 	m->status = 0;
1031 
1032 	/* reject invalid messages and transfers */
1033 	if (list_empty(&m->transfers))
1034 		return -EINVAL;
1035 	list_for_each_entry(t, &m->transfers, transfer_list) {
1036 		const void	*tx_buf = t->tx_buf;
1037 		void		*rx_buf = t->rx_buf;
1038 		unsigned	len = t->len;
1039 
1040 		if (t->speed_hz > OMAP2_MCSPI_MAX_FREQ
1041 				|| (len && !(rx_buf || tx_buf))
1042 				|| (t->bits_per_word &&
1043 					(  t->bits_per_word < 4
1044 					|| t->bits_per_word > 32))) {
1045 			dev_dbg(mcspi->dev, "transfer: %d Hz, %d %s%s, %d bpw\n",
1046 					t->speed_hz,
1047 					len,
1048 					tx_buf ? "tx" : "",
1049 					rx_buf ? "rx" : "",
1050 					t->bits_per_word);
1051 			return -EINVAL;
1052 		}
1053 		if (t->speed_hz && t->speed_hz < (OMAP2_MCSPI_MAX_FREQ >> 15)) {
1054 			dev_dbg(mcspi->dev, "speed_hz %d below minimum %d Hz\n",
1055 				t->speed_hz,
1056 				OMAP2_MCSPI_MAX_FREQ >> 15);
1057 			return -EINVAL;
1058 		}
1059 
1060 		if (m->is_dma_mapped || len < DMA_MIN_BYTES)
1061 			continue;
1062 
1063 		if (tx_buf != NULL) {
1064 			t->tx_dma = dma_map_single(mcspi->dev, (void *) tx_buf,
1065 					len, DMA_TO_DEVICE);
1066 			if (dma_mapping_error(mcspi->dev, t->tx_dma)) {
1067 				dev_dbg(mcspi->dev, "dma %cX %d bytes error\n",
1068 						'T', len);
1069 				return -EINVAL;
1070 			}
1071 		}
1072 		if (rx_buf != NULL) {
1073 			t->rx_dma = dma_map_single(mcspi->dev, rx_buf, t->len,
1074 					DMA_FROM_DEVICE);
1075 			if (dma_mapping_error(mcspi->dev, t->rx_dma)) {
1076 				dev_dbg(mcspi->dev, "dma %cX %d bytes error\n",
1077 						'R', len);
1078 				if (tx_buf != NULL)
1079 					dma_unmap_single(mcspi->dev, t->tx_dma,
1080 							len, DMA_TO_DEVICE);
1081 				return -EINVAL;
1082 			}
1083 		}
1084 	}
1085 
1086 	omap2_mcspi_work(mcspi, m);
1087 	spi_finalize_current_message(master);
1088 	return 0;
1089 }
1090 
1091 static int omap2_mcspi_master_setup(struct omap2_mcspi *mcspi)
1092 {
1093 	struct spi_master	*master = mcspi->master;
1094 	struct omap2_mcspi_regs	*ctx = &mcspi->ctx;
1095 	int			ret = 0;
1096 
1097 	ret = pm_runtime_get_sync(mcspi->dev);
1098 	if (ret < 0)
1099 		return ret;
1100 
1101 	mcspi_write_reg(master, OMAP2_MCSPI_WAKEUPENABLE,
1102 				OMAP2_MCSPI_WAKEUPENABLE_WKEN);
1103 	ctx->wakeupenable = OMAP2_MCSPI_WAKEUPENABLE_WKEN;
1104 
1105 	omap2_mcspi_set_master_mode(master);
1106 	pm_runtime_mark_last_busy(mcspi->dev);
1107 	pm_runtime_put_autosuspend(mcspi->dev);
1108 	return 0;
1109 }
1110 
1111 static int omap_mcspi_runtime_resume(struct device *dev)
1112 {
1113 	struct omap2_mcspi	*mcspi;
1114 	struct spi_master	*master;
1115 
1116 	master = dev_get_drvdata(dev);
1117 	mcspi = spi_master_get_devdata(master);
1118 	omap2_mcspi_restore_ctx(mcspi);
1119 
1120 	return 0;
1121 }
1122 
1123 static struct omap2_mcspi_platform_config omap2_pdata = {
1124 	.regs_offset = 0,
1125 };
1126 
1127 static struct omap2_mcspi_platform_config omap4_pdata = {
1128 	.regs_offset = OMAP4_MCSPI_REG_OFFSET,
1129 };
1130 
1131 static const struct of_device_id omap_mcspi_of_match[] = {
1132 	{
1133 		.compatible = "ti,omap2-mcspi",
1134 		.data = &omap2_pdata,
1135 	},
1136 	{
1137 		.compatible = "ti,omap4-mcspi",
1138 		.data = &omap4_pdata,
1139 	},
1140 	{ },
1141 };
1142 MODULE_DEVICE_TABLE(of, omap_mcspi_of_match);
1143 
1144 static int omap2_mcspi_probe(struct platform_device *pdev)
1145 {
1146 	struct spi_master	*master;
1147 	const struct omap2_mcspi_platform_config *pdata;
1148 	struct omap2_mcspi	*mcspi;
1149 	struct resource		*r;
1150 	int			status = 0, i;
1151 	u32			regs_offset = 0;
1152 	static int		bus_num = 1;
1153 	struct device_node	*node = pdev->dev.of_node;
1154 	const struct of_device_id *match;
1155 	struct pinctrl *pinctrl;
1156 
1157 	master = spi_alloc_master(&pdev->dev, sizeof *mcspi);
1158 	if (master == NULL) {
1159 		dev_dbg(&pdev->dev, "master allocation failed\n");
1160 		return -ENOMEM;
1161 	}
1162 
1163 	/* the spi->mode bits understood by this driver: */
1164 	master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
1165 
1166 	master->setup = omap2_mcspi_setup;
1167 	master->prepare_transfer_hardware = omap2_prepare_transfer;
1168 	master->unprepare_transfer_hardware = omap2_unprepare_transfer;
1169 	master->transfer_one_message = omap2_mcspi_transfer_one_message;
1170 	master->cleanup = omap2_mcspi_cleanup;
1171 	master->dev.of_node = node;
1172 
1173 	dev_set_drvdata(&pdev->dev, master);
1174 
1175 	mcspi = spi_master_get_devdata(master);
1176 	mcspi->master = master;
1177 
1178 	match = of_match_device(omap_mcspi_of_match, &pdev->dev);
1179 	if (match) {
1180 		u32 num_cs = 1; /* default number of chipselect */
1181 		pdata = match->data;
1182 
1183 		of_property_read_u32(node, "ti,spi-num-cs", &num_cs);
1184 		master->num_chipselect = num_cs;
1185 		master->bus_num = bus_num++;
1186 		if (of_get_property(node, "ti,pindir-d0-out-d1-in", NULL))
1187 			mcspi->pin_dir = MCSPI_PINDIR_D0_OUT_D1_IN;
1188 	} else {
1189 		pdata = pdev->dev.platform_data;
1190 		master->num_chipselect = pdata->num_cs;
1191 		if (pdev->id != -1)
1192 			master->bus_num = pdev->id;
1193 		mcspi->pin_dir = pdata->pin_dir;
1194 	}
1195 	regs_offset = pdata->regs_offset;
1196 
1197 	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1198 	if (r == NULL) {
1199 		status = -ENODEV;
1200 		goto free_master;
1201 	}
1202 
1203 	r->start += regs_offset;
1204 	r->end += regs_offset;
1205 	mcspi->phys = r->start;
1206 
1207 	mcspi->base = devm_request_and_ioremap(&pdev->dev, r);
1208 	if (!mcspi->base) {
1209 		dev_dbg(&pdev->dev, "can't ioremap MCSPI\n");
1210 		status = -ENOMEM;
1211 		goto free_master;
1212 	}
1213 
1214 	mcspi->dev = &pdev->dev;
1215 
1216 	INIT_LIST_HEAD(&mcspi->ctx.cs);
1217 
1218 	mcspi->dma_channels = kcalloc(master->num_chipselect,
1219 			sizeof(struct omap2_mcspi_dma),
1220 			GFP_KERNEL);
1221 
1222 	if (mcspi->dma_channels == NULL)
1223 		goto free_master;
1224 
1225 	for (i = 0; i < master->num_chipselect; i++) {
1226 		char dma_ch_name[14];
1227 		struct resource *dma_res;
1228 
1229 		sprintf(dma_ch_name, "rx%d", i);
1230 		dma_res = platform_get_resource_byname(pdev, IORESOURCE_DMA,
1231 							dma_ch_name);
1232 		if (!dma_res) {
1233 			dev_dbg(&pdev->dev, "cannot get DMA RX channel\n");
1234 			status = -ENODEV;
1235 			break;
1236 		}
1237 
1238 		mcspi->dma_channels[i].dma_rx_sync_dev = dma_res->start;
1239 		sprintf(dma_ch_name, "tx%d", i);
1240 		dma_res = platform_get_resource_byname(pdev, IORESOURCE_DMA,
1241 							dma_ch_name);
1242 		if (!dma_res) {
1243 			dev_dbg(&pdev->dev, "cannot get DMA TX channel\n");
1244 			status = -ENODEV;
1245 			break;
1246 		}
1247 
1248 		mcspi->dma_channels[i].dma_tx_sync_dev = dma_res->start;
1249 	}
1250 
1251 	if (status < 0)
1252 		goto dma_chnl_free;
1253 
1254 	pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
1255 	if (IS_ERR(pinctrl))
1256 		dev_warn(&pdev->dev,
1257 			"pins are not configured from the driver\n");
1258 
1259 	pm_runtime_use_autosuspend(&pdev->dev);
1260 	pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT);
1261 	pm_runtime_enable(&pdev->dev);
1262 
1263 	if (status || omap2_mcspi_master_setup(mcspi) < 0)
1264 		goto disable_pm;
1265 
1266 	status = spi_register_master(master);
1267 	if (status < 0)
1268 		goto disable_pm;
1269 
1270 	return status;
1271 
1272 disable_pm:
1273 	pm_runtime_disable(&pdev->dev);
1274 dma_chnl_free:
1275 	kfree(mcspi->dma_channels);
1276 free_master:
1277 	spi_master_put(master);
1278 	return status;
1279 }
1280 
1281 static int omap2_mcspi_remove(struct platform_device *pdev)
1282 {
1283 	struct spi_master	*master;
1284 	struct omap2_mcspi	*mcspi;
1285 	struct omap2_mcspi_dma	*dma_channels;
1286 
1287 	master = dev_get_drvdata(&pdev->dev);
1288 	mcspi = spi_master_get_devdata(master);
1289 	dma_channels = mcspi->dma_channels;
1290 
1291 	pm_runtime_put_sync(mcspi->dev);
1292 	pm_runtime_disable(&pdev->dev);
1293 
1294 	spi_unregister_master(master);
1295 	kfree(dma_channels);
1296 
1297 	return 0;
1298 }
1299 
1300 /* work with hotplug and coldplug */
1301 MODULE_ALIAS("platform:omap2_mcspi");
1302 
1303 #ifdef	CONFIG_SUSPEND
1304 /*
1305  * When SPI wake up from off-mode, CS is in activate state. If it was in
1306  * unactive state when driver was suspend, then force it to unactive state at
1307  * wake up.
1308  */
1309 static int omap2_mcspi_resume(struct device *dev)
1310 {
1311 	struct spi_master	*master = dev_get_drvdata(dev);
1312 	struct omap2_mcspi	*mcspi = spi_master_get_devdata(master);
1313 	struct omap2_mcspi_regs	*ctx = &mcspi->ctx;
1314 	struct omap2_mcspi_cs	*cs;
1315 
1316 	pm_runtime_get_sync(mcspi->dev);
1317 	list_for_each_entry(cs, &ctx->cs, node) {
1318 		if ((cs->chconf0 & OMAP2_MCSPI_CHCONF_FORCE) == 0) {
1319 			/*
1320 			 * We need to toggle CS state for OMAP take this
1321 			 * change in account.
1322 			 */
1323 			cs->chconf0 |= OMAP2_MCSPI_CHCONF_FORCE;
1324 			__raw_writel(cs->chconf0, cs->base + OMAP2_MCSPI_CHCONF0);
1325 			cs->chconf0 &= ~OMAP2_MCSPI_CHCONF_FORCE;
1326 			__raw_writel(cs->chconf0, cs->base + OMAP2_MCSPI_CHCONF0);
1327 		}
1328 	}
1329 	pm_runtime_mark_last_busy(mcspi->dev);
1330 	pm_runtime_put_autosuspend(mcspi->dev);
1331 	return 0;
1332 }
1333 #else
1334 #define	omap2_mcspi_resume	NULL
1335 #endif
1336 
1337 static const struct dev_pm_ops omap2_mcspi_pm_ops = {
1338 	.resume = omap2_mcspi_resume,
1339 	.runtime_resume	= omap_mcspi_runtime_resume,
1340 };
1341 
1342 static struct platform_driver omap2_mcspi_driver = {
1343 	.driver = {
1344 		.name =		"omap2_mcspi",
1345 		.owner =	THIS_MODULE,
1346 		.pm =		&omap2_mcspi_pm_ops,
1347 		.of_match_table = omap_mcspi_of_match,
1348 	},
1349 	.probe =	omap2_mcspi_probe,
1350 	.remove =	omap2_mcspi_remove,
1351 };
1352 
1353 module_platform_driver(omap2_mcspi_driver);
1354 MODULE_LICENSE("GPL");
1355