xref: /linux/drivers/spi/spi-omap2-mcspi.c (revision b43ab901d671e3e3cad425ea5e9a3c74e266dcdd)
1 /*
2  * OMAP2 McSPI controller driver
3  *
4  * Copyright (C) 2005, 2006 Nokia Corporation
5  * Author:	Samuel Ortiz <samuel.ortiz@nokia.com> and
6  *		Juha Yrj�l� <juha.yrjola@nokia.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21  *
22  */
23 
24 #include <linux/kernel.h>
25 #include <linux/init.h>
26 #include <linux/interrupt.h>
27 #include <linux/module.h>
28 #include <linux/device.h>
29 #include <linux/delay.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/platform_device.h>
32 #include <linux/err.h>
33 #include <linux/clk.h>
34 #include <linux/io.h>
35 #include <linux/slab.h>
36 #include <linux/pm_runtime.h>
37 
38 #include <linux/spi/spi.h>
39 
40 #include <plat/dma.h>
41 #include <plat/clock.h>
42 #include <plat/mcspi.h>
43 
44 #define OMAP2_MCSPI_MAX_FREQ		48000000
45 
46 /* OMAP2 has 3 SPI controllers, while OMAP3 has 4 */
47 #define OMAP2_MCSPI_MAX_CTRL 		4
48 
49 #define OMAP2_MCSPI_REVISION		0x00
50 #define OMAP2_MCSPI_SYSSTATUS		0x14
51 #define OMAP2_MCSPI_IRQSTATUS		0x18
52 #define OMAP2_MCSPI_IRQENABLE		0x1c
53 #define OMAP2_MCSPI_WAKEUPENABLE	0x20
54 #define OMAP2_MCSPI_SYST		0x24
55 #define OMAP2_MCSPI_MODULCTRL		0x28
56 
57 /* per-channel banks, 0x14 bytes each, first is: */
58 #define OMAP2_MCSPI_CHCONF0		0x2c
59 #define OMAP2_MCSPI_CHSTAT0		0x30
60 #define OMAP2_MCSPI_CHCTRL0		0x34
61 #define OMAP2_MCSPI_TX0			0x38
62 #define OMAP2_MCSPI_RX0			0x3c
63 
64 /* per-register bitmasks: */
65 
66 #define OMAP2_MCSPI_MODULCTRL_SINGLE	BIT(0)
67 #define OMAP2_MCSPI_MODULCTRL_MS	BIT(2)
68 #define OMAP2_MCSPI_MODULCTRL_STEST	BIT(3)
69 
70 #define OMAP2_MCSPI_CHCONF_PHA		BIT(0)
71 #define OMAP2_MCSPI_CHCONF_POL		BIT(1)
72 #define OMAP2_MCSPI_CHCONF_CLKD_MASK	(0x0f << 2)
73 #define OMAP2_MCSPI_CHCONF_EPOL		BIT(6)
74 #define OMAP2_MCSPI_CHCONF_WL_MASK	(0x1f << 7)
75 #define OMAP2_MCSPI_CHCONF_TRM_RX_ONLY	BIT(12)
76 #define OMAP2_MCSPI_CHCONF_TRM_TX_ONLY	BIT(13)
77 #define OMAP2_MCSPI_CHCONF_TRM_MASK	(0x03 << 12)
78 #define OMAP2_MCSPI_CHCONF_DMAW		BIT(14)
79 #define OMAP2_MCSPI_CHCONF_DMAR		BIT(15)
80 #define OMAP2_MCSPI_CHCONF_DPE0		BIT(16)
81 #define OMAP2_MCSPI_CHCONF_DPE1		BIT(17)
82 #define OMAP2_MCSPI_CHCONF_IS		BIT(18)
83 #define OMAP2_MCSPI_CHCONF_TURBO	BIT(19)
84 #define OMAP2_MCSPI_CHCONF_FORCE	BIT(20)
85 
86 #define OMAP2_MCSPI_CHSTAT_RXS		BIT(0)
87 #define OMAP2_MCSPI_CHSTAT_TXS		BIT(1)
88 #define OMAP2_MCSPI_CHSTAT_EOT		BIT(2)
89 
90 #define OMAP2_MCSPI_CHCTRL_EN		BIT(0)
91 
92 #define OMAP2_MCSPI_WAKEUPENABLE_WKEN	BIT(0)
93 
94 /* We have 2 DMA channels per CS, one for RX and one for TX */
95 struct omap2_mcspi_dma {
96 	int dma_tx_channel;
97 	int dma_rx_channel;
98 
99 	int dma_tx_sync_dev;
100 	int dma_rx_sync_dev;
101 
102 	struct completion dma_tx_completion;
103 	struct completion dma_rx_completion;
104 };
105 
106 /* use PIO for small transfers, avoiding DMA setup/teardown overhead and
107  * cache operations; better heuristics consider wordsize and bitrate.
108  */
109 #define DMA_MIN_BYTES			160
110 
111 
112 struct omap2_mcspi {
113 	struct work_struct	work;
114 	/* lock protects queue and registers */
115 	spinlock_t		lock;
116 	struct list_head	msg_queue;
117 	struct spi_master	*master;
118 	/* Virtual base address of the controller */
119 	void __iomem		*base;
120 	unsigned long		phys;
121 	/* SPI1 has 4 channels, while SPI2 has 2 */
122 	struct omap2_mcspi_dma	*dma_channels;
123 	struct  device		*dev;
124 	struct workqueue_struct *wq;
125 };
126 
127 struct omap2_mcspi_cs {
128 	void __iomem		*base;
129 	unsigned long		phys;
130 	int			word_len;
131 	struct list_head	node;
132 	/* Context save and restore shadow register */
133 	u32			chconf0;
134 };
135 
136 /* used for context save and restore, structure members to be updated whenever
137  * corresponding registers are modified.
138  */
139 struct omap2_mcspi_regs {
140 	u32 modulctrl;
141 	u32 wakeupenable;
142 	struct list_head cs;
143 };
144 
145 static struct omap2_mcspi_regs omap2_mcspi_ctx[OMAP2_MCSPI_MAX_CTRL];
146 
147 #define MOD_REG_BIT(val, mask, set) do { \
148 	if (set) \
149 		val |= mask; \
150 	else \
151 		val &= ~mask; \
152 } while (0)
153 
154 static inline void mcspi_write_reg(struct spi_master *master,
155 		int idx, u32 val)
156 {
157 	struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
158 
159 	__raw_writel(val, mcspi->base + idx);
160 }
161 
162 static inline u32 mcspi_read_reg(struct spi_master *master, int idx)
163 {
164 	struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
165 
166 	return __raw_readl(mcspi->base + idx);
167 }
168 
169 static inline void mcspi_write_cs_reg(const struct spi_device *spi,
170 		int idx, u32 val)
171 {
172 	struct omap2_mcspi_cs	*cs = spi->controller_state;
173 
174 	__raw_writel(val, cs->base +  idx);
175 }
176 
177 static inline u32 mcspi_read_cs_reg(const struct spi_device *spi, int idx)
178 {
179 	struct omap2_mcspi_cs	*cs = spi->controller_state;
180 
181 	return __raw_readl(cs->base + idx);
182 }
183 
184 static inline u32 mcspi_cached_chconf0(const struct spi_device *spi)
185 {
186 	struct omap2_mcspi_cs *cs = spi->controller_state;
187 
188 	return cs->chconf0;
189 }
190 
191 static inline void mcspi_write_chconf0(const struct spi_device *spi, u32 val)
192 {
193 	struct omap2_mcspi_cs *cs = spi->controller_state;
194 
195 	cs->chconf0 = val;
196 	mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCONF0, val);
197 	mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCONF0);
198 }
199 
200 static void omap2_mcspi_set_dma_req(const struct spi_device *spi,
201 		int is_read, int enable)
202 {
203 	u32 l, rw;
204 
205 	l = mcspi_cached_chconf0(spi);
206 
207 	if (is_read) /* 1 is read, 0 write */
208 		rw = OMAP2_MCSPI_CHCONF_DMAR;
209 	else
210 		rw = OMAP2_MCSPI_CHCONF_DMAW;
211 
212 	MOD_REG_BIT(l, rw, enable);
213 	mcspi_write_chconf0(spi, l);
214 }
215 
216 static void omap2_mcspi_set_enable(const struct spi_device *spi, int enable)
217 {
218 	u32 l;
219 
220 	l = enable ? OMAP2_MCSPI_CHCTRL_EN : 0;
221 	mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCTRL0, l);
222 	/* Flash post-writes */
223 	mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCTRL0);
224 }
225 
226 static void omap2_mcspi_force_cs(struct spi_device *spi, int cs_active)
227 {
228 	u32 l;
229 
230 	l = mcspi_cached_chconf0(spi);
231 	MOD_REG_BIT(l, OMAP2_MCSPI_CHCONF_FORCE, cs_active);
232 	mcspi_write_chconf0(spi, l);
233 }
234 
235 static void omap2_mcspi_set_master_mode(struct spi_master *master)
236 {
237 	u32 l;
238 
239 	/* setup when switching from (reset default) slave mode
240 	 * to single-channel master mode
241 	 */
242 	l = mcspi_read_reg(master, OMAP2_MCSPI_MODULCTRL);
243 	MOD_REG_BIT(l, OMAP2_MCSPI_MODULCTRL_STEST, 0);
244 	MOD_REG_BIT(l, OMAP2_MCSPI_MODULCTRL_MS, 0);
245 	MOD_REG_BIT(l, OMAP2_MCSPI_MODULCTRL_SINGLE, 1);
246 	mcspi_write_reg(master, OMAP2_MCSPI_MODULCTRL, l);
247 
248 	omap2_mcspi_ctx[master->bus_num - 1].modulctrl = l;
249 }
250 
251 static void omap2_mcspi_restore_ctx(struct omap2_mcspi *mcspi)
252 {
253 	struct spi_master *spi_cntrl;
254 	struct omap2_mcspi_cs *cs;
255 	spi_cntrl = mcspi->master;
256 
257 	/* McSPI: context restore */
258 	mcspi_write_reg(spi_cntrl, OMAP2_MCSPI_MODULCTRL,
259 			omap2_mcspi_ctx[spi_cntrl->bus_num - 1].modulctrl);
260 
261 	mcspi_write_reg(spi_cntrl, OMAP2_MCSPI_WAKEUPENABLE,
262 			omap2_mcspi_ctx[spi_cntrl->bus_num - 1].wakeupenable);
263 
264 	list_for_each_entry(cs, &omap2_mcspi_ctx[spi_cntrl->bus_num - 1].cs,
265 			node)
266 		__raw_writel(cs->chconf0, cs->base + OMAP2_MCSPI_CHCONF0);
267 }
268 static void omap2_mcspi_disable_clocks(struct omap2_mcspi *mcspi)
269 {
270 	pm_runtime_put_sync(mcspi->dev);
271 }
272 
273 static int omap2_mcspi_enable_clocks(struct omap2_mcspi *mcspi)
274 {
275 	return pm_runtime_get_sync(mcspi->dev);
276 }
277 
278 static int mcspi_wait_for_reg_bit(void __iomem *reg, unsigned long bit)
279 {
280 	unsigned long timeout;
281 
282 	timeout = jiffies + msecs_to_jiffies(1000);
283 	while (!(__raw_readl(reg) & bit)) {
284 		if (time_after(jiffies, timeout))
285 			return -1;
286 		cpu_relax();
287 	}
288 	return 0;
289 }
290 
291 static unsigned
292 omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
293 {
294 	struct omap2_mcspi	*mcspi;
295 	struct omap2_mcspi_cs	*cs = spi->controller_state;
296 	struct omap2_mcspi_dma  *mcspi_dma;
297 	unsigned int		count, c;
298 	unsigned long		base, tx_reg, rx_reg;
299 	int			word_len, data_type, element_count;
300 	int			elements = 0;
301 	u32			l;
302 	u8			* rx;
303 	const u8		* tx;
304 	void __iomem		*chstat_reg;
305 
306 	mcspi = spi_master_get_devdata(spi->master);
307 	mcspi_dma = &mcspi->dma_channels[spi->chip_select];
308 	l = mcspi_cached_chconf0(spi);
309 
310 	chstat_reg = cs->base + OMAP2_MCSPI_CHSTAT0;
311 
312 	count = xfer->len;
313 	c = count;
314 	word_len = cs->word_len;
315 
316 	base = cs->phys;
317 	tx_reg = base + OMAP2_MCSPI_TX0;
318 	rx_reg = base + OMAP2_MCSPI_RX0;
319 	rx = xfer->rx_buf;
320 	tx = xfer->tx_buf;
321 
322 	if (word_len <= 8) {
323 		data_type = OMAP_DMA_DATA_TYPE_S8;
324 		element_count = count;
325 	} else if (word_len <= 16) {
326 		data_type = OMAP_DMA_DATA_TYPE_S16;
327 		element_count = count >> 1;
328 	} else /* word_len <= 32 */ {
329 		data_type = OMAP_DMA_DATA_TYPE_S32;
330 		element_count = count >> 2;
331 	}
332 
333 	if (tx != NULL) {
334 		omap_set_dma_transfer_params(mcspi_dma->dma_tx_channel,
335 				data_type, element_count, 1,
336 				OMAP_DMA_SYNC_ELEMENT,
337 				mcspi_dma->dma_tx_sync_dev, 0);
338 
339 		omap_set_dma_dest_params(mcspi_dma->dma_tx_channel, 0,
340 				OMAP_DMA_AMODE_CONSTANT,
341 				tx_reg, 0, 0);
342 
343 		omap_set_dma_src_params(mcspi_dma->dma_tx_channel, 0,
344 				OMAP_DMA_AMODE_POST_INC,
345 				xfer->tx_dma, 0, 0);
346 	}
347 
348 	if (rx != NULL) {
349 		elements = element_count - 1;
350 		if (l & OMAP2_MCSPI_CHCONF_TURBO)
351 			elements--;
352 
353 		omap_set_dma_transfer_params(mcspi_dma->dma_rx_channel,
354 				data_type, elements, 1,
355 				OMAP_DMA_SYNC_ELEMENT,
356 				mcspi_dma->dma_rx_sync_dev, 1);
357 
358 		omap_set_dma_src_params(mcspi_dma->dma_rx_channel, 0,
359 				OMAP_DMA_AMODE_CONSTANT,
360 				rx_reg, 0, 0);
361 
362 		omap_set_dma_dest_params(mcspi_dma->dma_rx_channel, 0,
363 				OMAP_DMA_AMODE_POST_INC,
364 				xfer->rx_dma, 0, 0);
365 	}
366 
367 	if (tx != NULL) {
368 		omap_start_dma(mcspi_dma->dma_tx_channel);
369 		omap2_mcspi_set_dma_req(spi, 0, 1);
370 	}
371 
372 	if (rx != NULL) {
373 		omap_start_dma(mcspi_dma->dma_rx_channel);
374 		omap2_mcspi_set_dma_req(spi, 1, 1);
375 	}
376 
377 	if (tx != NULL) {
378 		wait_for_completion(&mcspi_dma->dma_tx_completion);
379 		dma_unmap_single(&spi->dev, xfer->tx_dma, count, DMA_TO_DEVICE);
380 
381 		/* for TX_ONLY mode, be sure all words have shifted out */
382 		if (rx == NULL) {
383 			if (mcspi_wait_for_reg_bit(chstat_reg,
384 						OMAP2_MCSPI_CHSTAT_TXS) < 0)
385 				dev_err(&spi->dev, "TXS timed out\n");
386 			else if (mcspi_wait_for_reg_bit(chstat_reg,
387 						OMAP2_MCSPI_CHSTAT_EOT) < 0)
388 				dev_err(&spi->dev, "EOT timed out\n");
389 		}
390 	}
391 
392 	if (rx != NULL) {
393 		wait_for_completion(&mcspi_dma->dma_rx_completion);
394 		dma_unmap_single(&spi->dev, xfer->rx_dma, count, DMA_FROM_DEVICE);
395 		omap2_mcspi_set_enable(spi, 0);
396 
397 		if (l & OMAP2_MCSPI_CHCONF_TURBO) {
398 
399 			if (likely(mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHSTAT0)
400 				   & OMAP2_MCSPI_CHSTAT_RXS)) {
401 				u32 w;
402 
403 				w = mcspi_read_cs_reg(spi, OMAP2_MCSPI_RX0);
404 				if (word_len <= 8)
405 					((u8 *)xfer->rx_buf)[elements++] = w;
406 				else if (word_len <= 16)
407 					((u16 *)xfer->rx_buf)[elements++] = w;
408 				else /* word_len <= 32 */
409 					((u32 *)xfer->rx_buf)[elements++] = w;
410 			} else {
411 				dev_err(&spi->dev,
412 					"DMA RX penultimate word empty");
413 				count -= (word_len <= 8)  ? 2 :
414 					(word_len <= 16) ? 4 :
415 					/* word_len <= 32 */ 8;
416 				omap2_mcspi_set_enable(spi, 1);
417 				return count;
418 			}
419 		}
420 
421 		if (likely(mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHSTAT0)
422 				& OMAP2_MCSPI_CHSTAT_RXS)) {
423 			u32 w;
424 
425 			w = mcspi_read_cs_reg(spi, OMAP2_MCSPI_RX0);
426 			if (word_len <= 8)
427 				((u8 *)xfer->rx_buf)[elements] = w;
428 			else if (word_len <= 16)
429 				((u16 *)xfer->rx_buf)[elements] = w;
430 			else /* word_len <= 32 */
431 				((u32 *)xfer->rx_buf)[elements] = w;
432 		} else {
433 			dev_err(&spi->dev, "DMA RX last word empty");
434 			count -= (word_len <= 8)  ? 1 :
435 				 (word_len <= 16) ? 2 :
436 			       /* word_len <= 32 */ 4;
437 		}
438 		omap2_mcspi_set_enable(spi, 1);
439 	}
440 	return count;
441 }
442 
443 static unsigned
444 omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
445 {
446 	struct omap2_mcspi	*mcspi;
447 	struct omap2_mcspi_cs	*cs = spi->controller_state;
448 	unsigned int		count, c;
449 	u32			l;
450 	void __iomem		*base = cs->base;
451 	void __iomem		*tx_reg;
452 	void __iomem		*rx_reg;
453 	void __iomem		*chstat_reg;
454 	int			word_len;
455 
456 	mcspi = spi_master_get_devdata(spi->master);
457 	count = xfer->len;
458 	c = count;
459 	word_len = cs->word_len;
460 
461 	l = mcspi_cached_chconf0(spi);
462 
463 	/* We store the pre-calculated register addresses on stack to speed
464 	 * up the transfer loop. */
465 	tx_reg		= base + OMAP2_MCSPI_TX0;
466 	rx_reg		= base + OMAP2_MCSPI_RX0;
467 	chstat_reg	= base + OMAP2_MCSPI_CHSTAT0;
468 
469 	if (c < (word_len>>3))
470 		return 0;
471 
472 	if (word_len <= 8) {
473 		u8		*rx;
474 		const u8	*tx;
475 
476 		rx = xfer->rx_buf;
477 		tx = xfer->tx_buf;
478 
479 		do {
480 			c -= 1;
481 			if (tx != NULL) {
482 				if (mcspi_wait_for_reg_bit(chstat_reg,
483 						OMAP2_MCSPI_CHSTAT_TXS) < 0) {
484 					dev_err(&spi->dev, "TXS timed out\n");
485 					goto out;
486 				}
487 				dev_vdbg(&spi->dev, "write-%d %02x\n",
488 						word_len, *tx);
489 				__raw_writel(*tx++, tx_reg);
490 			}
491 			if (rx != NULL) {
492 				if (mcspi_wait_for_reg_bit(chstat_reg,
493 						OMAP2_MCSPI_CHSTAT_RXS) < 0) {
494 					dev_err(&spi->dev, "RXS timed out\n");
495 					goto out;
496 				}
497 
498 				if (c == 1 && tx == NULL &&
499 				    (l & OMAP2_MCSPI_CHCONF_TURBO)) {
500 					omap2_mcspi_set_enable(spi, 0);
501 					*rx++ = __raw_readl(rx_reg);
502 					dev_vdbg(&spi->dev, "read-%d %02x\n",
503 						    word_len, *(rx - 1));
504 					if (mcspi_wait_for_reg_bit(chstat_reg,
505 						OMAP2_MCSPI_CHSTAT_RXS) < 0) {
506 						dev_err(&spi->dev,
507 							"RXS timed out\n");
508 						goto out;
509 					}
510 					c = 0;
511 				} else if (c == 0 && tx == NULL) {
512 					omap2_mcspi_set_enable(spi, 0);
513 				}
514 
515 				*rx++ = __raw_readl(rx_reg);
516 				dev_vdbg(&spi->dev, "read-%d %02x\n",
517 						word_len, *(rx - 1));
518 			}
519 		} while (c);
520 	} else if (word_len <= 16) {
521 		u16		*rx;
522 		const u16	*tx;
523 
524 		rx = xfer->rx_buf;
525 		tx = xfer->tx_buf;
526 		do {
527 			c -= 2;
528 			if (tx != NULL) {
529 				if (mcspi_wait_for_reg_bit(chstat_reg,
530 						OMAP2_MCSPI_CHSTAT_TXS) < 0) {
531 					dev_err(&spi->dev, "TXS timed out\n");
532 					goto out;
533 				}
534 				dev_vdbg(&spi->dev, "write-%d %04x\n",
535 						word_len, *tx);
536 				__raw_writel(*tx++, tx_reg);
537 			}
538 			if (rx != NULL) {
539 				if (mcspi_wait_for_reg_bit(chstat_reg,
540 						OMAP2_MCSPI_CHSTAT_RXS) < 0) {
541 					dev_err(&spi->dev, "RXS timed out\n");
542 					goto out;
543 				}
544 
545 				if (c == 2 && tx == NULL &&
546 				    (l & OMAP2_MCSPI_CHCONF_TURBO)) {
547 					omap2_mcspi_set_enable(spi, 0);
548 					*rx++ = __raw_readl(rx_reg);
549 					dev_vdbg(&spi->dev, "read-%d %04x\n",
550 						    word_len, *(rx - 1));
551 					if (mcspi_wait_for_reg_bit(chstat_reg,
552 						OMAP2_MCSPI_CHSTAT_RXS) < 0) {
553 						dev_err(&spi->dev,
554 							"RXS timed out\n");
555 						goto out;
556 					}
557 					c = 0;
558 				} else if (c == 0 && tx == NULL) {
559 					omap2_mcspi_set_enable(spi, 0);
560 				}
561 
562 				*rx++ = __raw_readl(rx_reg);
563 				dev_vdbg(&spi->dev, "read-%d %04x\n",
564 						word_len, *(rx - 1));
565 			}
566 		} while (c >= 2);
567 	} else if (word_len <= 32) {
568 		u32		*rx;
569 		const u32	*tx;
570 
571 		rx = xfer->rx_buf;
572 		tx = xfer->tx_buf;
573 		do {
574 			c -= 4;
575 			if (tx != NULL) {
576 				if (mcspi_wait_for_reg_bit(chstat_reg,
577 						OMAP2_MCSPI_CHSTAT_TXS) < 0) {
578 					dev_err(&spi->dev, "TXS timed out\n");
579 					goto out;
580 				}
581 				dev_vdbg(&spi->dev, "write-%d %08x\n",
582 						word_len, *tx);
583 				__raw_writel(*tx++, tx_reg);
584 			}
585 			if (rx != NULL) {
586 				if (mcspi_wait_for_reg_bit(chstat_reg,
587 						OMAP2_MCSPI_CHSTAT_RXS) < 0) {
588 					dev_err(&spi->dev, "RXS timed out\n");
589 					goto out;
590 				}
591 
592 				if (c == 4 && tx == NULL &&
593 				    (l & OMAP2_MCSPI_CHCONF_TURBO)) {
594 					omap2_mcspi_set_enable(spi, 0);
595 					*rx++ = __raw_readl(rx_reg);
596 					dev_vdbg(&spi->dev, "read-%d %08x\n",
597 						    word_len, *(rx - 1));
598 					if (mcspi_wait_for_reg_bit(chstat_reg,
599 						OMAP2_MCSPI_CHSTAT_RXS) < 0) {
600 						dev_err(&spi->dev,
601 							"RXS timed out\n");
602 						goto out;
603 					}
604 					c = 0;
605 				} else if (c == 0 && tx == NULL) {
606 					omap2_mcspi_set_enable(spi, 0);
607 				}
608 
609 				*rx++ = __raw_readl(rx_reg);
610 				dev_vdbg(&spi->dev, "read-%d %08x\n",
611 						word_len, *(rx - 1));
612 			}
613 		} while (c >= 4);
614 	}
615 
616 	/* for TX_ONLY mode, be sure all words have shifted out */
617 	if (xfer->rx_buf == NULL) {
618 		if (mcspi_wait_for_reg_bit(chstat_reg,
619 				OMAP2_MCSPI_CHSTAT_TXS) < 0) {
620 			dev_err(&spi->dev, "TXS timed out\n");
621 		} else if (mcspi_wait_for_reg_bit(chstat_reg,
622 				OMAP2_MCSPI_CHSTAT_EOT) < 0)
623 			dev_err(&spi->dev, "EOT timed out\n");
624 
625 		/* disable chan to purge rx datas received in TX_ONLY transfer,
626 		 * otherwise these rx datas will affect the direct following
627 		 * RX_ONLY transfer.
628 		 */
629 		omap2_mcspi_set_enable(spi, 0);
630 	}
631 out:
632 	omap2_mcspi_set_enable(spi, 1);
633 	return count - c;
634 }
635 
636 static u32 omap2_mcspi_calc_divisor(u32 speed_hz)
637 {
638 	u32 div;
639 
640 	for (div = 0; div < 15; div++)
641 		if (speed_hz >= (OMAP2_MCSPI_MAX_FREQ >> div))
642 			return div;
643 
644 	return 15;
645 }
646 
647 /* called only when no transfer is active to this device */
648 static int omap2_mcspi_setup_transfer(struct spi_device *spi,
649 		struct spi_transfer *t)
650 {
651 	struct omap2_mcspi_cs *cs = spi->controller_state;
652 	struct omap2_mcspi *mcspi;
653 	struct spi_master *spi_cntrl;
654 	u32 l = 0, div = 0;
655 	u8 word_len = spi->bits_per_word;
656 	u32 speed_hz = spi->max_speed_hz;
657 
658 	mcspi = spi_master_get_devdata(spi->master);
659 	spi_cntrl = mcspi->master;
660 
661 	if (t != NULL && t->bits_per_word)
662 		word_len = t->bits_per_word;
663 
664 	cs->word_len = word_len;
665 
666 	if (t && t->speed_hz)
667 		speed_hz = t->speed_hz;
668 
669 	speed_hz = min_t(u32, speed_hz, OMAP2_MCSPI_MAX_FREQ);
670 	div = omap2_mcspi_calc_divisor(speed_hz);
671 
672 	l = mcspi_cached_chconf0(spi);
673 
674 	/* standard 4-wire master mode:  SCK, MOSI/out, MISO/in, nCS
675 	 * REVISIT: this controller could support SPI_3WIRE mode.
676 	 */
677 	l &= ~(OMAP2_MCSPI_CHCONF_IS|OMAP2_MCSPI_CHCONF_DPE1);
678 	l |= OMAP2_MCSPI_CHCONF_DPE0;
679 
680 	/* wordlength */
681 	l &= ~OMAP2_MCSPI_CHCONF_WL_MASK;
682 	l |= (word_len - 1) << 7;
683 
684 	/* set chipselect polarity; manage with FORCE */
685 	if (!(spi->mode & SPI_CS_HIGH))
686 		l |= OMAP2_MCSPI_CHCONF_EPOL;	/* active-low; normal */
687 	else
688 		l &= ~OMAP2_MCSPI_CHCONF_EPOL;
689 
690 	/* set clock divisor */
691 	l &= ~OMAP2_MCSPI_CHCONF_CLKD_MASK;
692 	l |= div << 2;
693 
694 	/* set SPI mode 0..3 */
695 	if (spi->mode & SPI_CPOL)
696 		l |= OMAP2_MCSPI_CHCONF_POL;
697 	else
698 		l &= ~OMAP2_MCSPI_CHCONF_POL;
699 	if (spi->mode & SPI_CPHA)
700 		l |= OMAP2_MCSPI_CHCONF_PHA;
701 	else
702 		l &= ~OMAP2_MCSPI_CHCONF_PHA;
703 
704 	mcspi_write_chconf0(spi, l);
705 
706 	dev_dbg(&spi->dev, "setup: speed %d, sample %s edge, clk %s\n",
707 			OMAP2_MCSPI_MAX_FREQ >> div,
708 			(spi->mode & SPI_CPHA) ? "trailing" : "leading",
709 			(spi->mode & SPI_CPOL) ? "inverted" : "normal");
710 
711 	return 0;
712 }
713 
714 static void omap2_mcspi_dma_rx_callback(int lch, u16 ch_status, void *data)
715 {
716 	struct spi_device	*spi = data;
717 	struct omap2_mcspi	*mcspi;
718 	struct omap2_mcspi_dma	*mcspi_dma;
719 
720 	mcspi = spi_master_get_devdata(spi->master);
721 	mcspi_dma = &(mcspi->dma_channels[spi->chip_select]);
722 
723 	complete(&mcspi_dma->dma_rx_completion);
724 
725 	/* We must disable the DMA RX request */
726 	omap2_mcspi_set_dma_req(spi, 1, 0);
727 }
728 
729 static void omap2_mcspi_dma_tx_callback(int lch, u16 ch_status, void *data)
730 {
731 	struct spi_device	*spi = data;
732 	struct omap2_mcspi	*mcspi;
733 	struct omap2_mcspi_dma	*mcspi_dma;
734 
735 	mcspi = spi_master_get_devdata(spi->master);
736 	mcspi_dma = &(mcspi->dma_channels[spi->chip_select]);
737 
738 	complete(&mcspi_dma->dma_tx_completion);
739 
740 	/* We must disable the DMA TX request */
741 	omap2_mcspi_set_dma_req(spi, 0, 0);
742 }
743 
744 static int omap2_mcspi_request_dma(struct spi_device *spi)
745 {
746 	struct spi_master	*master = spi->master;
747 	struct omap2_mcspi	*mcspi;
748 	struct omap2_mcspi_dma	*mcspi_dma;
749 
750 	mcspi = spi_master_get_devdata(master);
751 	mcspi_dma = mcspi->dma_channels + spi->chip_select;
752 
753 	if (omap_request_dma(mcspi_dma->dma_rx_sync_dev, "McSPI RX",
754 			omap2_mcspi_dma_rx_callback, spi,
755 			&mcspi_dma->dma_rx_channel)) {
756 		dev_err(&spi->dev, "no RX DMA channel for McSPI\n");
757 		return -EAGAIN;
758 	}
759 
760 	if (omap_request_dma(mcspi_dma->dma_tx_sync_dev, "McSPI TX",
761 			omap2_mcspi_dma_tx_callback, spi,
762 			&mcspi_dma->dma_tx_channel)) {
763 		omap_free_dma(mcspi_dma->dma_rx_channel);
764 		mcspi_dma->dma_rx_channel = -1;
765 		dev_err(&spi->dev, "no TX DMA channel for McSPI\n");
766 		return -EAGAIN;
767 	}
768 
769 	init_completion(&mcspi_dma->dma_rx_completion);
770 	init_completion(&mcspi_dma->dma_tx_completion);
771 
772 	return 0;
773 }
774 
775 static int omap2_mcspi_setup(struct spi_device *spi)
776 {
777 	int			ret;
778 	struct omap2_mcspi	*mcspi;
779 	struct omap2_mcspi_dma	*mcspi_dma;
780 	struct omap2_mcspi_cs	*cs = spi->controller_state;
781 
782 	if (spi->bits_per_word < 4 || spi->bits_per_word > 32) {
783 		dev_dbg(&spi->dev, "setup: unsupported %d bit words\n",
784 			spi->bits_per_word);
785 		return -EINVAL;
786 	}
787 
788 	mcspi = spi_master_get_devdata(spi->master);
789 	mcspi_dma = &mcspi->dma_channels[spi->chip_select];
790 
791 	if (!cs) {
792 		cs = kzalloc(sizeof *cs, GFP_KERNEL);
793 		if (!cs)
794 			return -ENOMEM;
795 		cs->base = mcspi->base + spi->chip_select * 0x14;
796 		cs->phys = mcspi->phys + spi->chip_select * 0x14;
797 		cs->chconf0 = 0;
798 		spi->controller_state = cs;
799 		/* Link this to context save list */
800 		list_add_tail(&cs->node,
801 			&omap2_mcspi_ctx[mcspi->master->bus_num - 1].cs);
802 	}
803 
804 	if (mcspi_dma->dma_rx_channel == -1
805 			|| mcspi_dma->dma_tx_channel == -1) {
806 		ret = omap2_mcspi_request_dma(spi);
807 		if (ret < 0)
808 			return ret;
809 	}
810 
811 	ret = omap2_mcspi_enable_clocks(mcspi);
812 	if (ret < 0)
813 		return ret;
814 
815 	ret = omap2_mcspi_setup_transfer(spi, NULL);
816 	omap2_mcspi_disable_clocks(mcspi);
817 
818 	return ret;
819 }
820 
821 static void omap2_mcspi_cleanup(struct spi_device *spi)
822 {
823 	struct omap2_mcspi	*mcspi;
824 	struct omap2_mcspi_dma	*mcspi_dma;
825 	struct omap2_mcspi_cs	*cs;
826 
827 	mcspi = spi_master_get_devdata(spi->master);
828 
829 	if (spi->controller_state) {
830 		/* Unlink controller state from context save list */
831 		cs = spi->controller_state;
832 		list_del(&cs->node);
833 
834 		kfree(spi->controller_state);
835 	}
836 
837 	if (spi->chip_select < spi->master->num_chipselect) {
838 		mcspi_dma = &mcspi->dma_channels[spi->chip_select];
839 
840 		if (mcspi_dma->dma_rx_channel != -1) {
841 			omap_free_dma(mcspi_dma->dma_rx_channel);
842 			mcspi_dma->dma_rx_channel = -1;
843 		}
844 		if (mcspi_dma->dma_tx_channel != -1) {
845 			omap_free_dma(mcspi_dma->dma_tx_channel);
846 			mcspi_dma->dma_tx_channel = -1;
847 		}
848 	}
849 }
850 
851 static void omap2_mcspi_work(struct work_struct *work)
852 {
853 	struct omap2_mcspi	*mcspi;
854 
855 	mcspi = container_of(work, struct omap2_mcspi, work);
856 
857 	if (omap2_mcspi_enable_clocks(mcspi) < 0)
858 		return;
859 
860 	spin_lock_irq(&mcspi->lock);
861 
862 	/* We only enable one channel at a time -- the one whose message is
863 	 * at the head of the queue -- although this controller would gladly
864 	 * arbitrate among multiple channels.  This corresponds to "single
865 	 * channel" master mode.  As a side effect, we need to manage the
866 	 * chipselect with the FORCE bit ... CS != channel enable.
867 	 */
868 	while (!list_empty(&mcspi->msg_queue)) {
869 		struct spi_message		*m;
870 		struct spi_device		*spi;
871 		struct spi_transfer		*t = NULL;
872 		int				cs_active = 0;
873 		struct omap2_mcspi_cs		*cs;
874 		struct omap2_mcspi_device_config *cd;
875 		int				par_override = 0;
876 		int				status = 0;
877 		u32				chconf;
878 
879 		m = container_of(mcspi->msg_queue.next, struct spi_message,
880 				 queue);
881 
882 		list_del_init(&m->queue);
883 		spin_unlock_irq(&mcspi->lock);
884 
885 		spi = m->spi;
886 		cs = spi->controller_state;
887 		cd = spi->controller_data;
888 
889 		omap2_mcspi_set_enable(spi, 1);
890 		list_for_each_entry(t, &m->transfers, transfer_list) {
891 			if (t->tx_buf == NULL && t->rx_buf == NULL && t->len) {
892 				status = -EINVAL;
893 				break;
894 			}
895 			if (par_override || t->speed_hz || t->bits_per_word) {
896 				par_override = 1;
897 				status = omap2_mcspi_setup_transfer(spi, t);
898 				if (status < 0)
899 					break;
900 				if (!t->speed_hz && !t->bits_per_word)
901 					par_override = 0;
902 			}
903 
904 			if (!cs_active) {
905 				omap2_mcspi_force_cs(spi, 1);
906 				cs_active = 1;
907 			}
908 
909 			chconf = mcspi_cached_chconf0(spi);
910 			chconf &= ~OMAP2_MCSPI_CHCONF_TRM_MASK;
911 			chconf &= ~OMAP2_MCSPI_CHCONF_TURBO;
912 
913 			if (t->tx_buf == NULL)
914 				chconf |= OMAP2_MCSPI_CHCONF_TRM_RX_ONLY;
915 			else if (t->rx_buf == NULL)
916 				chconf |= OMAP2_MCSPI_CHCONF_TRM_TX_ONLY;
917 
918 			if (cd && cd->turbo_mode && t->tx_buf == NULL) {
919 				/* Turbo mode is for more than one word */
920 				if (t->len > ((cs->word_len + 7) >> 3))
921 					chconf |= OMAP2_MCSPI_CHCONF_TURBO;
922 			}
923 
924 			mcspi_write_chconf0(spi, chconf);
925 
926 			if (t->len) {
927 				unsigned	count;
928 
929 				/* RX_ONLY mode needs dummy data in TX reg */
930 				if (t->tx_buf == NULL)
931 					__raw_writel(0, cs->base
932 							+ OMAP2_MCSPI_TX0);
933 
934 				if (m->is_dma_mapped || t->len >= DMA_MIN_BYTES)
935 					count = omap2_mcspi_txrx_dma(spi, t);
936 				else
937 					count = omap2_mcspi_txrx_pio(spi, t);
938 				m->actual_length += count;
939 
940 				if (count != t->len) {
941 					status = -EIO;
942 					break;
943 				}
944 			}
945 
946 			if (t->delay_usecs)
947 				udelay(t->delay_usecs);
948 
949 			/* ignore the "leave it on after last xfer" hint */
950 			if (t->cs_change) {
951 				omap2_mcspi_force_cs(spi, 0);
952 				cs_active = 0;
953 			}
954 		}
955 
956 		/* Restore defaults if they were overriden */
957 		if (par_override) {
958 			par_override = 0;
959 			status = omap2_mcspi_setup_transfer(spi, NULL);
960 		}
961 
962 		if (cs_active)
963 			omap2_mcspi_force_cs(spi, 0);
964 
965 		omap2_mcspi_set_enable(spi, 0);
966 
967 		m->status = status;
968 		m->complete(m->context);
969 
970 		spin_lock_irq(&mcspi->lock);
971 	}
972 
973 	spin_unlock_irq(&mcspi->lock);
974 
975 	omap2_mcspi_disable_clocks(mcspi);
976 }
977 
978 static int omap2_mcspi_transfer(struct spi_device *spi, struct spi_message *m)
979 {
980 	struct omap2_mcspi	*mcspi;
981 	unsigned long		flags;
982 	struct spi_transfer	*t;
983 
984 	m->actual_length = 0;
985 	m->status = 0;
986 
987 	/* reject invalid messages and transfers */
988 	if (list_empty(&m->transfers) || !m->complete)
989 		return -EINVAL;
990 	list_for_each_entry(t, &m->transfers, transfer_list) {
991 		const void	*tx_buf = t->tx_buf;
992 		void		*rx_buf = t->rx_buf;
993 		unsigned	len = t->len;
994 
995 		if (t->speed_hz > OMAP2_MCSPI_MAX_FREQ
996 				|| (len && !(rx_buf || tx_buf))
997 				|| (t->bits_per_word &&
998 					(  t->bits_per_word < 4
999 					|| t->bits_per_word > 32))) {
1000 			dev_dbg(&spi->dev, "transfer: %d Hz, %d %s%s, %d bpw\n",
1001 					t->speed_hz,
1002 					len,
1003 					tx_buf ? "tx" : "",
1004 					rx_buf ? "rx" : "",
1005 					t->bits_per_word);
1006 			return -EINVAL;
1007 		}
1008 		if (t->speed_hz && t->speed_hz < (OMAP2_MCSPI_MAX_FREQ >> 15)) {
1009 			dev_dbg(&spi->dev, "speed_hz %d below minimum %d Hz\n",
1010 				t->speed_hz,
1011 				OMAP2_MCSPI_MAX_FREQ >> 15);
1012 			return -EINVAL;
1013 		}
1014 
1015 		if (m->is_dma_mapped || len < DMA_MIN_BYTES)
1016 			continue;
1017 
1018 		if (tx_buf != NULL) {
1019 			t->tx_dma = dma_map_single(&spi->dev, (void *) tx_buf,
1020 					len, DMA_TO_DEVICE);
1021 			if (dma_mapping_error(&spi->dev, t->tx_dma)) {
1022 				dev_dbg(&spi->dev, "dma %cX %d bytes error\n",
1023 						'T', len);
1024 				return -EINVAL;
1025 			}
1026 		}
1027 		if (rx_buf != NULL) {
1028 			t->rx_dma = dma_map_single(&spi->dev, rx_buf, t->len,
1029 					DMA_FROM_DEVICE);
1030 			if (dma_mapping_error(&spi->dev, t->rx_dma)) {
1031 				dev_dbg(&spi->dev, "dma %cX %d bytes error\n",
1032 						'R', len);
1033 				if (tx_buf != NULL)
1034 					dma_unmap_single(&spi->dev, t->tx_dma,
1035 							len, DMA_TO_DEVICE);
1036 				return -EINVAL;
1037 			}
1038 		}
1039 	}
1040 
1041 	mcspi = spi_master_get_devdata(spi->master);
1042 
1043 	spin_lock_irqsave(&mcspi->lock, flags);
1044 	list_add_tail(&m->queue, &mcspi->msg_queue);
1045 	queue_work(mcspi->wq, &mcspi->work);
1046 	spin_unlock_irqrestore(&mcspi->lock, flags);
1047 
1048 	return 0;
1049 }
1050 
1051 static int __init omap2_mcspi_master_setup(struct omap2_mcspi *mcspi)
1052 {
1053 	struct spi_master	*master = mcspi->master;
1054 	u32			tmp;
1055 	int ret = 0;
1056 
1057 	ret = omap2_mcspi_enable_clocks(mcspi);
1058 	if (ret < 0)
1059 		return ret;
1060 
1061 	tmp = OMAP2_MCSPI_WAKEUPENABLE_WKEN;
1062 	mcspi_write_reg(master, OMAP2_MCSPI_WAKEUPENABLE, tmp);
1063 	omap2_mcspi_ctx[master->bus_num - 1].wakeupenable = tmp;
1064 
1065 	omap2_mcspi_set_master_mode(master);
1066 	omap2_mcspi_disable_clocks(mcspi);
1067 	return 0;
1068 }
1069 
1070 static int omap_mcspi_runtime_resume(struct device *dev)
1071 {
1072 	struct omap2_mcspi	*mcspi;
1073 	struct spi_master	*master;
1074 
1075 	master = dev_get_drvdata(dev);
1076 	mcspi = spi_master_get_devdata(master);
1077 	omap2_mcspi_restore_ctx(mcspi);
1078 
1079 	return 0;
1080 }
1081 
1082 
1083 static int __init omap2_mcspi_probe(struct platform_device *pdev)
1084 {
1085 	struct spi_master	*master;
1086 	struct omap2_mcspi_platform_config *pdata = pdev->dev.platform_data;
1087 	struct omap2_mcspi	*mcspi;
1088 	struct resource		*r;
1089 	int			status = 0, i;
1090 	char			wq_name[20];
1091 
1092 	master = spi_alloc_master(&pdev->dev, sizeof *mcspi);
1093 	if (master == NULL) {
1094 		dev_dbg(&pdev->dev, "master allocation failed\n");
1095 		return -ENOMEM;
1096 	}
1097 
1098 	/* the spi->mode bits understood by this driver: */
1099 	master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
1100 
1101 	if (pdev->id != -1)
1102 		master->bus_num = pdev->id;
1103 
1104 	master->setup = omap2_mcspi_setup;
1105 	master->transfer = omap2_mcspi_transfer;
1106 	master->cleanup = omap2_mcspi_cleanup;
1107 	master->num_chipselect = pdata->num_cs;
1108 
1109 	dev_set_drvdata(&pdev->dev, master);
1110 
1111 	mcspi = spi_master_get_devdata(master);
1112 	mcspi->master = master;
1113 
1114 	sprintf(wq_name, "omap2_mcspi/%d", master->bus_num);
1115 	mcspi->wq = alloc_workqueue(wq_name, WQ_MEM_RECLAIM, 1);
1116 	if (mcspi->wq == NULL) {
1117 		status = -ENOMEM;
1118 		goto free_master;
1119 	}
1120 
1121 	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1122 	if (r == NULL) {
1123 		status = -ENODEV;
1124 		goto free_master;
1125 	}
1126 
1127 	r->start += pdata->regs_offset;
1128 	r->end += pdata->regs_offset;
1129 	mcspi->phys = r->start;
1130 	if (!request_mem_region(r->start, resource_size(r),
1131 				dev_name(&pdev->dev))) {
1132 		status = -EBUSY;
1133 		goto free_master;
1134 	}
1135 
1136 	mcspi->base = ioremap(r->start, resource_size(r));
1137 	if (!mcspi->base) {
1138 		dev_dbg(&pdev->dev, "can't ioremap MCSPI\n");
1139 		status = -ENOMEM;
1140 		goto release_region;
1141 	}
1142 
1143 	mcspi->dev = &pdev->dev;
1144 	INIT_WORK(&mcspi->work, omap2_mcspi_work);
1145 
1146 	spin_lock_init(&mcspi->lock);
1147 	INIT_LIST_HEAD(&mcspi->msg_queue);
1148 	INIT_LIST_HEAD(&omap2_mcspi_ctx[master->bus_num - 1].cs);
1149 
1150 	mcspi->dma_channels = kcalloc(master->num_chipselect,
1151 			sizeof(struct omap2_mcspi_dma),
1152 			GFP_KERNEL);
1153 
1154 	if (mcspi->dma_channels == NULL)
1155 		goto unmap_io;
1156 
1157 	for (i = 0; i < master->num_chipselect; i++) {
1158 		char dma_ch_name[14];
1159 		struct resource *dma_res;
1160 
1161 		sprintf(dma_ch_name, "rx%d", i);
1162 		dma_res = platform_get_resource_byname(pdev, IORESOURCE_DMA,
1163 							dma_ch_name);
1164 		if (!dma_res) {
1165 			dev_dbg(&pdev->dev, "cannot get DMA RX channel\n");
1166 			status = -ENODEV;
1167 			break;
1168 		}
1169 
1170 		mcspi->dma_channels[i].dma_rx_channel = -1;
1171 		mcspi->dma_channels[i].dma_rx_sync_dev = dma_res->start;
1172 		sprintf(dma_ch_name, "tx%d", i);
1173 		dma_res = platform_get_resource_byname(pdev, IORESOURCE_DMA,
1174 							dma_ch_name);
1175 		if (!dma_res) {
1176 			dev_dbg(&pdev->dev, "cannot get DMA TX channel\n");
1177 			status = -ENODEV;
1178 			break;
1179 		}
1180 
1181 		mcspi->dma_channels[i].dma_tx_channel = -1;
1182 		mcspi->dma_channels[i].dma_tx_sync_dev = dma_res->start;
1183 	}
1184 
1185 	if (status < 0)
1186 		goto dma_chnl_free;
1187 
1188 	pm_runtime_enable(&pdev->dev);
1189 
1190 	if (status || omap2_mcspi_master_setup(mcspi) < 0)
1191 		goto disable_pm;
1192 
1193 	status = spi_register_master(master);
1194 	if (status < 0)
1195 		goto err_spi_register;
1196 
1197 	return status;
1198 
1199 err_spi_register:
1200 	spi_master_put(master);
1201 disable_pm:
1202 	pm_runtime_disable(&pdev->dev);
1203 dma_chnl_free:
1204 	kfree(mcspi->dma_channels);
1205 unmap_io:
1206 	iounmap(mcspi->base);
1207 release_region:
1208 	release_mem_region(r->start, resource_size(r));
1209 free_master:
1210 	kfree(master);
1211 	platform_set_drvdata(pdev, NULL);
1212 	return status;
1213 }
1214 
1215 static int __exit omap2_mcspi_remove(struct platform_device *pdev)
1216 {
1217 	struct spi_master	*master;
1218 	struct omap2_mcspi	*mcspi;
1219 	struct omap2_mcspi_dma	*dma_channels;
1220 	struct resource		*r;
1221 	void __iomem *base;
1222 
1223 	master = dev_get_drvdata(&pdev->dev);
1224 	mcspi = spi_master_get_devdata(master);
1225 	dma_channels = mcspi->dma_channels;
1226 
1227 	omap2_mcspi_disable_clocks(mcspi);
1228 	pm_runtime_disable(&pdev->dev);
1229 	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1230 	release_mem_region(r->start, resource_size(r));
1231 
1232 	base = mcspi->base;
1233 	spi_unregister_master(master);
1234 	iounmap(base);
1235 	kfree(dma_channels);
1236 	destroy_workqueue(mcspi->wq);
1237 	platform_set_drvdata(pdev, NULL);
1238 
1239 	return 0;
1240 }
1241 
1242 /* work with hotplug and coldplug */
1243 MODULE_ALIAS("platform:omap2_mcspi");
1244 
1245 #ifdef	CONFIG_SUSPEND
1246 /*
1247  * When SPI wake up from off-mode, CS is in activate state. If it was in
1248  * unactive state when driver was suspend, then force it to unactive state at
1249  * wake up.
1250  */
1251 static int omap2_mcspi_resume(struct device *dev)
1252 {
1253 	struct spi_master	*master = dev_get_drvdata(dev);
1254 	struct omap2_mcspi	*mcspi = spi_master_get_devdata(master);
1255 	struct omap2_mcspi_cs *cs;
1256 
1257 	omap2_mcspi_enable_clocks(mcspi);
1258 	list_for_each_entry(cs, &omap2_mcspi_ctx[master->bus_num - 1].cs,
1259 			    node) {
1260 		if ((cs->chconf0 & OMAP2_MCSPI_CHCONF_FORCE) == 0) {
1261 
1262 			/*
1263 			 * We need to toggle CS state for OMAP take this
1264 			 * change in account.
1265 			 */
1266 			MOD_REG_BIT(cs->chconf0, OMAP2_MCSPI_CHCONF_FORCE, 1);
1267 			__raw_writel(cs->chconf0, cs->base + OMAP2_MCSPI_CHCONF0);
1268 			MOD_REG_BIT(cs->chconf0, OMAP2_MCSPI_CHCONF_FORCE, 0);
1269 			__raw_writel(cs->chconf0, cs->base + OMAP2_MCSPI_CHCONF0);
1270 		}
1271 	}
1272 	omap2_mcspi_disable_clocks(mcspi);
1273 	return 0;
1274 }
1275 #else
1276 #define	omap2_mcspi_resume	NULL
1277 #endif
1278 
1279 static const struct dev_pm_ops omap2_mcspi_pm_ops = {
1280 	.resume = omap2_mcspi_resume,
1281 	.runtime_resume	= omap_mcspi_runtime_resume,
1282 };
1283 
1284 static struct platform_driver omap2_mcspi_driver = {
1285 	.driver = {
1286 		.name =		"omap2_mcspi",
1287 		.owner =	THIS_MODULE,
1288 		.pm =		&omap2_mcspi_pm_ops
1289 	},
1290 	.remove =	__exit_p(omap2_mcspi_remove),
1291 };
1292 
1293 
1294 static int __init omap2_mcspi_init(void)
1295 {
1296 	return platform_driver_probe(&omap2_mcspi_driver, omap2_mcspi_probe);
1297 }
1298 subsys_initcall(omap2_mcspi_init);
1299 
1300 static void __exit omap2_mcspi_exit(void)
1301 {
1302 	platform_driver_unregister(&omap2_mcspi_driver);
1303 
1304 }
1305 module_exit(omap2_mcspi_exit);
1306 
1307 MODULE_LICENSE("GPL");
1308