xref: /linux/drivers/spi/spi-fsl-espi.c (revision a1c613ae4c322ddd58d5a8539dbfba2a0380a8c0)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Freescale eSPI controller driver.
4  *
5  * Copyright 2010 Freescale Semiconductor, Inc.
6  */
7 #include <linux/delay.h>
8 #include <linux/err.h>
9 #include <linux/fsl_devices.h>
10 #include <linux/interrupt.h>
11 #include <linux/module.h>
12 #include <linux/mm.h>
13 #include <linux/of.h>
14 #include <linux/of_address.h>
15 #include <linux/of_irq.h>
16 #include <linux/of_platform.h>
17 #include <linux/platform_device.h>
18 #include <linux/spi/spi.h>
19 #include <linux/pm_runtime.h>
20 #include <sysdev/fsl_soc.h>
21 
22 /* eSPI Controller registers */
23 #define ESPI_SPMODE	0x00	/* eSPI mode register */
24 #define ESPI_SPIE	0x04	/* eSPI event register */
25 #define ESPI_SPIM	0x08	/* eSPI mask register */
26 #define ESPI_SPCOM	0x0c	/* eSPI command register */
27 #define ESPI_SPITF	0x10	/* eSPI transmit FIFO access register*/
28 #define ESPI_SPIRF	0x14	/* eSPI receive FIFO access register*/
29 #define ESPI_SPMODE0	0x20	/* eSPI cs0 mode register */
30 
31 #define ESPI_SPMODEx(x)	(ESPI_SPMODE0 + (x) * 4)
32 
33 /* eSPI Controller mode register definitions */
34 #define SPMODE_ENABLE		BIT(31)
35 #define SPMODE_LOOP		BIT(30)
36 #define SPMODE_TXTHR(x)		((x) << 8)
37 #define SPMODE_RXTHR(x)		((x) << 0)
38 
39 /* eSPI Controller CS mode register definitions */
40 #define CSMODE_CI_INACTIVEHIGH	BIT(31)
41 #define CSMODE_CP_BEGIN_EDGECLK	BIT(30)
42 #define CSMODE_REV		BIT(29)
43 #define CSMODE_DIV16		BIT(28)
44 #define CSMODE_PM(x)		((x) << 24)
45 #define CSMODE_POL_1		BIT(20)
46 #define CSMODE_LEN(x)		((x) << 16)
47 #define CSMODE_BEF(x)		((x) << 12)
48 #define CSMODE_AFT(x)		((x) << 8)
49 #define CSMODE_CG(x)		((x) << 3)
50 
51 #define FSL_ESPI_FIFO_SIZE	32
52 #define FSL_ESPI_RXTHR		15
53 
54 /* Default mode/csmode for eSPI controller */
55 #define SPMODE_INIT_VAL (SPMODE_TXTHR(4) | SPMODE_RXTHR(FSL_ESPI_RXTHR))
56 #define CSMODE_INIT_VAL (CSMODE_POL_1 | CSMODE_BEF(0) \
57 		| CSMODE_AFT(0) | CSMODE_CG(1))
58 
59 /* SPIE register values */
60 #define SPIE_RXCNT(reg)     ((reg >> 24) & 0x3F)
61 #define SPIE_TXCNT(reg)     ((reg >> 16) & 0x3F)
62 #define	SPIE_TXE		BIT(15)	/* TX FIFO empty */
63 #define	SPIE_DON		BIT(14)	/* TX done */
64 #define	SPIE_RXT		BIT(13)	/* RX FIFO threshold */
65 #define	SPIE_RXF		BIT(12)	/* RX FIFO full */
66 #define	SPIE_TXT		BIT(11)	/* TX FIFO threshold*/
67 #define	SPIE_RNE		BIT(9)	/* RX FIFO not empty */
68 #define	SPIE_TNF		BIT(8)	/* TX FIFO not full */
69 
70 /* SPIM register values */
71 #define	SPIM_TXE		BIT(15)	/* TX FIFO empty */
72 #define	SPIM_DON		BIT(14)	/* TX done */
73 #define	SPIM_RXT		BIT(13)	/* RX FIFO threshold */
74 #define	SPIM_RXF		BIT(12)	/* RX FIFO full */
75 #define	SPIM_TXT		BIT(11)	/* TX FIFO threshold*/
76 #define	SPIM_RNE		BIT(9)	/* RX FIFO not empty */
77 #define	SPIM_TNF		BIT(8)	/* TX FIFO not full */
78 
79 /* SPCOM register values */
80 #define SPCOM_CS(x)		((x) << 30)
81 #define SPCOM_DO		BIT(28) /* Dual output */
82 #define SPCOM_TO		BIT(27) /* TX only */
83 #define SPCOM_RXSKIP(x)		((x) << 16)
84 #define SPCOM_TRANLEN(x)	((x) << 0)
85 
86 #define	SPCOM_TRANLEN_MAX	0x10000	/* Max transaction length */
87 
88 #define AUTOSUSPEND_TIMEOUT 2000
89 
90 struct fsl_espi {
91 	struct device *dev;
92 	void __iomem *reg_base;
93 
94 	struct list_head *m_transfers;
95 	struct spi_transfer *tx_t;
96 	unsigned int tx_pos;
97 	bool tx_done;
98 	struct spi_transfer *rx_t;
99 	unsigned int rx_pos;
100 	bool rx_done;
101 
102 	bool swab;
103 	unsigned int rxskip;
104 
105 	spinlock_t lock;
106 
107 	u32 spibrg;             /* SPIBRG input clock */
108 
109 	struct completion done;
110 };
111 
112 struct fsl_espi_cs {
113 	u32 hw_mode;
114 };
115 
fsl_espi_read_reg(struct fsl_espi * espi,int offset)116 static inline u32 fsl_espi_read_reg(struct fsl_espi *espi, int offset)
117 {
118 	return ioread32be(espi->reg_base + offset);
119 }
120 
fsl_espi_read_reg16(struct fsl_espi * espi,int offset)121 static inline u16 fsl_espi_read_reg16(struct fsl_espi *espi, int offset)
122 {
123 	return ioread16be(espi->reg_base + offset);
124 }
125 
fsl_espi_read_reg8(struct fsl_espi * espi,int offset)126 static inline u8 fsl_espi_read_reg8(struct fsl_espi *espi, int offset)
127 {
128 	return ioread8(espi->reg_base + offset);
129 }
130 
fsl_espi_write_reg(struct fsl_espi * espi,int offset,u32 val)131 static inline void fsl_espi_write_reg(struct fsl_espi *espi, int offset,
132 				      u32 val)
133 {
134 	iowrite32be(val, espi->reg_base + offset);
135 }
136 
fsl_espi_write_reg16(struct fsl_espi * espi,int offset,u16 val)137 static inline void fsl_espi_write_reg16(struct fsl_espi *espi, int offset,
138 					u16 val)
139 {
140 	iowrite16be(val, espi->reg_base + offset);
141 }
142 
fsl_espi_write_reg8(struct fsl_espi * espi,int offset,u8 val)143 static inline void fsl_espi_write_reg8(struct fsl_espi *espi, int offset,
144 				       u8 val)
145 {
146 	iowrite8(val, espi->reg_base + offset);
147 }
148 
fsl_espi_check_message(struct spi_message * m)149 static int fsl_espi_check_message(struct spi_message *m)
150 {
151 	struct fsl_espi *espi = spi_controller_get_devdata(m->spi->controller);
152 	struct spi_transfer *t, *first;
153 
154 	if (m->frame_length > SPCOM_TRANLEN_MAX) {
155 		dev_err(espi->dev, "message too long, size is %u bytes\n",
156 			m->frame_length);
157 		return -EMSGSIZE;
158 	}
159 
160 	first = list_first_entry(&m->transfers, struct spi_transfer,
161 				 transfer_list);
162 
163 	list_for_each_entry(t, &m->transfers, transfer_list) {
164 		if (first->bits_per_word != t->bits_per_word ||
165 		    first->speed_hz != t->speed_hz) {
166 			dev_err(espi->dev, "bits_per_word/speed_hz should be the same for all transfers\n");
167 			return -EINVAL;
168 		}
169 	}
170 
171 	/* ESPI supports MSB-first transfers for word size 8 / 16 only */
172 	if (!(m->spi->mode & SPI_LSB_FIRST) && first->bits_per_word != 8 &&
173 	    first->bits_per_word != 16) {
174 		dev_err(espi->dev,
175 			"MSB-first transfer not supported for wordsize %u\n",
176 			first->bits_per_word);
177 		return -EINVAL;
178 	}
179 
180 	return 0;
181 }
182 
fsl_espi_check_rxskip_mode(struct spi_message * m)183 static unsigned int fsl_espi_check_rxskip_mode(struct spi_message *m)
184 {
185 	struct spi_transfer *t;
186 	unsigned int i = 0, rxskip = 0;
187 
188 	/*
189 	 * prerequisites for ESPI rxskip mode:
190 	 * - message has two transfers
191 	 * - first transfer is a write and second is a read
192 	 *
193 	 * In addition the current low-level transfer mechanism requires
194 	 * that the rxskip bytes fit into the TX FIFO. Else the transfer
195 	 * would hang because after the first FSL_ESPI_FIFO_SIZE bytes
196 	 * the TX FIFO isn't re-filled.
197 	 */
198 	list_for_each_entry(t, &m->transfers, transfer_list) {
199 		if (i == 0) {
200 			if (!t->tx_buf || t->rx_buf ||
201 			    t->len > FSL_ESPI_FIFO_SIZE)
202 				return 0;
203 			rxskip = t->len;
204 		} else if (i == 1) {
205 			if (t->tx_buf || !t->rx_buf)
206 				return 0;
207 		}
208 		i++;
209 	}
210 
211 	return i == 2 ? rxskip : 0;
212 }
213 
fsl_espi_fill_tx_fifo(struct fsl_espi * espi,u32 events)214 static void fsl_espi_fill_tx_fifo(struct fsl_espi *espi, u32 events)
215 {
216 	u32 tx_fifo_avail;
217 	unsigned int tx_left;
218 	const void *tx_buf;
219 
220 	/* if events is zero transfer has not started and tx fifo is empty */
221 	tx_fifo_avail = events ? SPIE_TXCNT(events) :  FSL_ESPI_FIFO_SIZE;
222 start:
223 	tx_left = espi->tx_t->len - espi->tx_pos;
224 	tx_buf = espi->tx_t->tx_buf;
225 	while (tx_fifo_avail >= min(4U, tx_left) && tx_left) {
226 		if (tx_left >= 4) {
227 			if (!tx_buf)
228 				fsl_espi_write_reg(espi, ESPI_SPITF, 0);
229 			else if (espi->swab)
230 				fsl_espi_write_reg(espi, ESPI_SPITF,
231 					swahb32p(tx_buf + espi->tx_pos));
232 			else
233 				fsl_espi_write_reg(espi, ESPI_SPITF,
234 					*(u32 *)(tx_buf + espi->tx_pos));
235 			espi->tx_pos += 4;
236 			tx_left -= 4;
237 			tx_fifo_avail -= 4;
238 		} else if (tx_left >= 2 && tx_buf && espi->swab) {
239 			fsl_espi_write_reg16(espi, ESPI_SPITF,
240 					swab16p(tx_buf + espi->tx_pos));
241 			espi->tx_pos += 2;
242 			tx_left -= 2;
243 			tx_fifo_avail -= 2;
244 		} else {
245 			if (!tx_buf)
246 				fsl_espi_write_reg8(espi, ESPI_SPITF, 0);
247 			else
248 				fsl_espi_write_reg8(espi, ESPI_SPITF,
249 					*(u8 *)(tx_buf + espi->tx_pos));
250 			espi->tx_pos += 1;
251 			tx_left -= 1;
252 			tx_fifo_avail -= 1;
253 		}
254 	}
255 
256 	if (!tx_left) {
257 		/* Last transfer finished, in rxskip mode only one is needed */
258 		if (list_is_last(&espi->tx_t->transfer_list,
259 		    espi->m_transfers) || espi->rxskip) {
260 			espi->tx_done = true;
261 			return;
262 		}
263 		espi->tx_t = list_next_entry(espi->tx_t, transfer_list);
264 		espi->tx_pos = 0;
265 		/* continue with next transfer if tx fifo is not full */
266 		if (tx_fifo_avail)
267 			goto start;
268 	}
269 }
270 
fsl_espi_read_rx_fifo(struct fsl_espi * espi,u32 events)271 static void fsl_espi_read_rx_fifo(struct fsl_espi *espi, u32 events)
272 {
273 	u32 rx_fifo_avail = SPIE_RXCNT(events);
274 	unsigned int rx_left;
275 	void *rx_buf;
276 
277 start:
278 	rx_left = espi->rx_t->len - espi->rx_pos;
279 	rx_buf = espi->rx_t->rx_buf;
280 	while (rx_fifo_avail >= min(4U, rx_left) && rx_left) {
281 		if (rx_left >= 4) {
282 			u32 val = fsl_espi_read_reg(espi, ESPI_SPIRF);
283 
284 			if (rx_buf && espi->swab)
285 				*(u32 *)(rx_buf + espi->rx_pos) = swahb32(val);
286 			else if (rx_buf)
287 				*(u32 *)(rx_buf + espi->rx_pos) = val;
288 			espi->rx_pos += 4;
289 			rx_left -= 4;
290 			rx_fifo_avail -= 4;
291 		} else if (rx_left >= 2 && rx_buf && espi->swab) {
292 			u16 val = fsl_espi_read_reg16(espi, ESPI_SPIRF);
293 
294 			*(u16 *)(rx_buf + espi->rx_pos) = swab16(val);
295 			espi->rx_pos += 2;
296 			rx_left -= 2;
297 			rx_fifo_avail -= 2;
298 		} else {
299 			u8 val = fsl_espi_read_reg8(espi, ESPI_SPIRF);
300 
301 			if (rx_buf)
302 				*(u8 *)(rx_buf + espi->rx_pos) = val;
303 			espi->rx_pos += 1;
304 			rx_left -= 1;
305 			rx_fifo_avail -= 1;
306 		}
307 	}
308 
309 	if (!rx_left) {
310 		if (list_is_last(&espi->rx_t->transfer_list,
311 		    espi->m_transfers)) {
312 			espi->rx_done = true;
313 			return;
314 		}
315 		espi->rx_t = list_next_entry(espi->rx_t, transfer_list);
316 		espi->rx_pos = 0;
317 		/* continue with next transfer if rx fifo is not empty */
318 		if (rx_fifo_avail)
319 			goto start;
320 	}
321 }
322 
fsl_espi_setup_transfer(struct spi_device * spi,struct spi_transfer * t)323 static void fsl_espi_setup_transfer(struct spi_device *spi,
324 					struct spi_transfer *t)
325 {
326 	struct fsl_espi *espi = spi_controller_get_devdata(spi->controller);
327 	int bits_per_word = t ? t->bits_per_word : spi->bits_per_word;
328 	u32 pm, hz = t ? t->speed_hz : spi->max_speed_hz;
329 	struct fsl_espi_cs *cs = spi_get_ctldata(spi);
330 	u32 hw_mode_old = cs->hw_mode;
331 
332 	/* mask out bits we are going to set */
333 	cs->hw_mode &= ~(CSMODE_LEN(0xF) | CSMODE_DIV16 | CSMODE_PM(0xF));
334 
335 	cs->hw_mode |= CSMODE_LEN(bits_per_word - 1);
336 
337 	pm = DIV_ROUND_UP(espi->spibrg, hz * 4) - 1;
338 
339 	if (pm > 15) {
340 		cs->hw_mode |= CSMODE_DIV16;
341 		pm = DIV_ROUND_UP(espi->spibrg, hz * 16 * 4) - 1;
342 	}
343 
344 	cs->hw_mode |= CSMODE_PM(pm);
345 
346 	/* don't write the mode register if the mode doesn't change */
347 	if (cs->hw_mode != hw_mode_old)
348 		fsl_espi_write_reg(espi, ESPI_SPMODEx(spi_get_chipselect(spi, 0)),
349 				   cs->hw_mode);
350 }
351 
fsl_espi_bufs(struct spi_device * spi,struct spi_transfer * t)352 static int fsl_espi_bufs(struct spi_device *spi, struct spi_transfer *t)
353 {
354 	struct fsl_espi *espi = spi_controller_get_devdata(spi->controller);
355 	unsigned int rx_len = t->len;
356 	u32 mask, spcom;
357 	int ret;
358 
359 	reinit_completion(&espi->done);
360 
361 	/* Set SPCOM[CS] and SPCOM[TRANLEN] field */
362 	spcom = SPCOM_CS(spi_get_chipselect(spi, 0));
363 	spcom |= SPCOM_TRANLEN(t->len - 1);
364 
365 	/* configure RXSKIP mode */
366 	if (espi->rxskip) {
367 		spcom |= SPCOM_RXSKIP(espi->rxskip);
368 		rx_len = t->len - espi->rxskip;
369 		if (t->rx_nbits == SPI_NBITS_DUAL)
370 			spcom |= SPCOM_DO;
371 	}
372 
373 	fsl_espi_write_reg(espi, ESPI_SPCOM, spcom);
374 
375 	/* enable interrupts */
376 	mask = SPIM_DON;
377 	if (rx_len > FSL_ESPI_FIFO_SIZE)
378 		mask |= SPIM_RXT;
379 	fsl_espi_write_reg(espi, ESPI_SPIM, mask);
380 
381 	/* Prevent filling the fifo from getting interrupted */
382 	spin_lock_irq(&espi->lock);
383 	fsl_espi_fill_tx_fifo(espi, 0);
384 	spin_unlock_irq(&espi->lock);
385 
386 	/* Won't hang up forever, SPI bus sometimes got lost interrupts... */
387 	ret = wait_for_completion_timeout(&espi->done, 2 * HZ);
388 	if (ret == 0)
389 		dev_err(espi->dev, "Transfer timed out!\n");
390 
391 	/* disable rx ints */
392 	fsl_espi_write_reg(espi, ESPI_SPIM, 0);
393 
394 	return ret == 0 ? -ETIMEDOUT : 0;
395 }
396 
fsl_espi_trans(struct spi_message * m,struct spi_transfer * trans)397 static int fsl_espi_trans(struct spi_message *m, struct spi_transfer *trans)
398 {
399 	struct fsl_espi *espi = spi_controller_get_devdata(m->spi->controller);
400 	struct spi_device *spi = m->spi;
401 	int ret;
402 
403 	/* In case of LSB-first and bits_per_word > 8 byte-swap all words */
404 	espi->swab = spi->mode & SPI_LSB_FIRST && trans->bits_per_word > 8;
405 
406 	espi->m_transfers = &m->transfers;
407 	espi->tx_t = list_first_entry(&m->transfers, struct spi_transfer,
408 				      transfer_list);
409 	espi->tx_pos = 0;
410 	espi->tx_done = false;
411 	espi->rx_t = list_first_entry(&m->transfers, struct spi_transfer,
412 				      transfer_list);
413 	espi->rx_pos = 0;
414 	espi->rx_done = false;
415 
416 	espi->rxskip = fsl_espi_check_rxskip_mode(m);
417 	if (trans->rx_nbits == SPI_NBITS_DUAL && !espi->rxskip) {
418 		dev_err(espi->dev, "Dual output mode requires RXSKIP mode!\n");
419 		return -EINVAL;
420 	}
421 
422 	/* In RXSKIP mode skip first transfer for reads */
423 	if (espi->rxskip)
424 		espi->rx_t = list_next_entry(espi->rx_t, transfer_list);
425 
426 	fsl_espi_setup_transfer(spi, trans);
427 
428 	ret = fsl_espi_bufs(spi, trans);
429 
430 	spi_transfer_delay_exec(trans);
431 
432 	return ret;
433 }
434 
fsl_espi_do_one_msg(struct spi_controller * host,struct spi_message * m)435 static int fsl_espi_do_one_msg(struct spi_controller *host,
436 			       struct spi_message *m)
437 {
438 	unsigned int rx_nbits = 0, delay_nsecs = 0;
439 	struct spi_transfer *t, trans = {};
440 	int ret;
441 
442 	ret = fsl_espi_check_message(m);
443 	if (ret)
444 		goto out;
445 
446 	list_for_each_entry(t, &m->transfers, transfer_list) {
447 		unsigned int delay = spi_delay_to_ns(&t->delay, t);
448 
449 		if (delay > delay_nsecs)
450 			delay_nsecs = delay;
451 		if (t->rx_nbits > rx_nbits)
452 			rx_nbits = t->rx_nbits;
453 	}
454 
455 	t = list_first_entry(&m->transfers, struct spi_transfer,
456 			     transfer_list);
457 
458 	trans.len = m->frame_length;
459 	trans.speed_hz = t->speed_hz;
460 	trans.bits_per_word = t->bits_per_word;
461 	trans.delay.value = delay_nsecs;
462 	trans.delay.unit = SPI_DELAY_UNIT_NSECS;
463 	trans.rx_nbits = rx_nbits;
464 
465 	if (trans.len)
466 		ret = fsl_espi_trans(m, &trans);
467 
468 	m->actual_length = ret ? 0 : trans.len;
469 out:
470 	if (m->status == -EINPROGRESS)
471 		m->status = ret;
472 
473 	spi_finalize_current_message(host);
474 
475 	return ret;
476 }
477 
fsl_espi_setup(struct spi_device * spi)478 static int fsl_espi_setup(struct spi_device *spi)
479 {
480 	struct fsl_espi *espi;
481 	u32 loop_mode;
482 	struct fsl_espi_cs *cs = spi_get_ctldata(spi);
483 
484 	if (!cs) {
485 		cs = kzalloc(sizeof(*cs), GFP_KERNEL);
486 		if (!cs)
487 			return -ENOMEM;
488 		spi_set_ctldata(spi, cs);
489 	}
490 
491 	espi = spi_controller_get_devdata(spi->controller);
492 
493 	pm_runtime_get_sync(espi->dev);
494 
495 	cs->hw_mode = fsl_espi_read_reg(espi, ESPI_SPMODEx(spi_get_chipselect(spi, 0)));
496 	/* mask out bits we are going to set */
497 	cs->hw_mode &= ~(CSMODE_CP_BEGIN_EDGECLK | CSMODE_CI_INACTIVEHIGH
498 			 | CSMODE_REV);
499 
500 	if (spi->mode & SPI_CPHA)
501 		cs->hw_mode |= CSMODE_CP_BEGIN_EDGECLK;
502 	if (spi->mode & SPI_CPOL)
503 		cs->hw_mode |= CSMODE_CI_INACTIVEHIGH;
504 	if (!(spi->mode & SPI_LSB_FIRST))
505 		cs->hw_mode |= CSMODE_REV;
506 
507 	/* Handle the loop mode */
508 	loop_mode = fsl_espi_read_reg(espi, ESPI_SPMODE);
509 	loop_mode &= ~SPMODE_LOOP;
510 	if (spi->mode & SPI_LOOP)
511 		loop_mode |= SPMODE_LOOP;
512 	fsl_espi_write_reg(espi, ESPI_SPMODE, loop_mode);
513 
514 	fsl_espi_setup_transfer(spi, NULL);
515 
516 	pm_runtime_mark_last_busy(espi->dev);
517 	pm_runtime_put_autosuspend(espi->dev);
518 
519 	return 0;
520 }
521 
fsl_espi_cleanup(struct spi_device * spi)522 static void fsl_espi_cleanup(struct spi_device *spi)
523 {
524 	struct fsl_espi_cs *cs = spi_get_ctldata(spi);
525 
526 	kfree(cs);
527 	spi_set_ctldata(spi, NULL);
528 }
529 
fsl_espi_cpu_irq(struct fsl_espi * espi,u32 events)530 static void fsl_espi_cpu_irq(struct fsl_espi *espi, u32 events)
531 {
532 	if (!espi->rx_done)
533 		fsl_espi_read_rx_fifo(espi, events);
534 
535 	if (!espi->tx_done)
536 		fsl_espi_fill_tx_fifo(espi, events);
537 
538 	if (!espi->tx_done || !espi->rx_done)
539 		return;
540 
541 	/* we're done, but check for errors before returning */
542 	events = fsl_espi_read_reg(espi, ESPI_SPIE);
543 
544 	if (!(events & SPIE_DON))
545 		dev_err(espi->dev,
546 			"Transfer done but SPIE_DON isn't set!\n");
547 
548 	if (SPIE_RXCNT(events) || SPIE_TXCNT(events) != FSL_ESPI_FIFO_SIZE) {
549 		dev_err(espi->dev, "Transfer done but rx/tx fifo's aren't empty!\n");
550 		dev_err(espi->dev, "SPIE_RXCNT = %d, SPIE_TXCNT = %d\n",
551 			SPIE_RXCNT(events), SPIE_TXCNT(events));
552 	}
553 
554 	complete(&espi->done);
555 }
556 
fsl_espi_irq(s32 irq,void * context_data)557 static irqreturn_t fsl_espi_irq(s32 irq, void *context_data)
558 {
559 	struct fsl_espi *espi = context_data;
560 	u32 events, mask;
561 
562 	spin_lock(&espi->lock);
563 
564 	/* Get interrupt events(tx/rx) */
565 	events = fsl_espi_read_reg(espi, ESPI_SPIE);
566 	mask = fsl_espi_read_reg(espi, ESPI_SPIM);
567 	if (!(events & mask)) {
568 		spin_unlock(&espi->lock);
569 		return IRQ_NONE;
570 	}
571 
572 	dev_vdbg(espi->dev, "%s: events %x\n", __func__, events);
573 
574 	fsl_espi_cpu_irq(espi, events);
575 
576 	/* Clear the events */
577 	fsl_espi_write_reg(espi, ESPI_SPIE, events);
578 
579 	spin_unlock(&espi->lock);
580 
581 	return IRQ_HANDLED;
582 }
583 
584 #ifdef CONFIG_PM
fsl_espi_runtime_suspend(struct device * dev)585 static int fsl_espi_runtime_suspend(struct device *dev)
586 {
587 	struct spi_controller *host = dev_get_drvdata(dev);
588 	struct fsl_espi *espi = spi_controller_get_devdata(host);
589 	u32 regval;
590 
591 	regval = fsl_espi_read_reg(espi, ESPI_SPMODE);
592 	regval &= ~SPMODE_ENABLE;
593 	fsl_espi_write_reg(espi, ESPI_SPMODE, regval);
594 
595 	return 0;
596 }
597 
fsl_espi_runtime_resume(struct device * dev)598 static int fsl_espi_runtime_resume(struct device *dev)
599 {
600 	struct spi_controller *host = dev_get_drvdata(dev);
601 	struct fsl_espi *espi = spi_controller_get_devdata(host);
602 	u32 regval;
603 
604 	regval = fsl_espi_read_reg(espi, ESPI_SPMODE);
605 	regval |= SPMODE_ENABLE;
606 	fsl_espi_write_reg(espi, ESPI_SPMODE, regval);
607 
608 	return 0;
609 }
610 #endif
611 
fsl_espi_max_message_size(struct spi_device * spi)612 static size_t fsl_espi_max_message_size(struct spi_device *spi)
613 {
614 	return SPCOM_TRANLEN_MAX;
615 }
616 
fsl_espi_init_regs(struct device * dev,bool initial)617 static void fsl_espi_init_regs(struct device *dev, bool initial)
618 {
619 	struct spi_controller *host = dev_get_drvdata(dev);
620 	struct fsl_espi *espi = spi_controller_get_devdata(host);
621 	struct device_node *nc;
622 	u32 csmode, cs, prop;
623 	int ret;
624 
625 	/* SPI controller initializations */
626 	fsl_espi_write_reg(espi, ESPI_SPMODE, 0);
627 	fsl_espi_write_reg(espi, ESPI_SPIM, 0);
628 	fsl_espi_write_reg(espi, ESPI_SPCOM, 0);
629 	fsl_espi_write_reg(espi, ESPI_SPIE, 0xffffffff);
630 
631 	/* Init eSPI CS mode register */
632 	for_each_available_child_of_node(host->dev.of_node, nc) {
633 		/* get chip select */
634 		ret = of_property_read_u32(nc, "reg", &cs);
635 		if (ret || cs >= host->num_chipselect)
636 			continue;
637 
638 		csmode = CSMODE_INIT_VAL;
639 
640 		/* check if CSBEF is set in device tree */
641 		ret = of_property_read_u32(nc, "fsl,csbef", &prop);
642 		if (!ret) {
643 			csmode &= ~(CSMODE_BEF(0xf));
644 			csmode |= CSMODE_BEF(prop);
645 		}
646 
647 		/* check if CSAFT is set in device tree */
648 		ret = of_property_read_u32(nc, "fsl,csaft", &prop);
649 		if (!ret) {
650 			csmode &= ~(CSMODE_AFT(0xf));
651 			csmode |= CSMODE_AFT(prop);
652 		}
653 
654 		fsl_espi_write_reg(espi, ESPI_SPMODEx(cs), csmode);
655 
656 		if (initial)
657 			dev_info(dev, "cs=%u, init_csmode=0x%x\n", cs, csmode);
658 	}
659 
660 	/* Enable SPI interface */
661 	fsl_espi_write_reg(espi, ESPI_SPMODE, SPMODE_INIT_VAL | SPMODE_ENABLE);
662 }
663 
fsl_espi_probe(struct device * dev,struct resource * mem,unsigned int irq,unsigned int num_cs)664 static int fsl_espi_probe(struct device *dev, struct resource *mem,
665 			  unsigned int irq, unsigned int num_cs)
666 {
667 	struct spi_controller *host;
668 	struct fsl_espi *espi;
669 	int ret;
670 
671 	host = spi_alloc_host(dev, sizeof(struct fsl_espi));
672 	if (!host)
673 		return -ENOMEM;
674 
675 	dev_set_drvdata(dev, host);
676 
677 	host->mode_bits = SPI_RX_DUAL | SPI_CPOL | SPI_CPHA | SPI_CS_HIGH |
678 			  SPI_LSB_FIRST | SPI_LOOP;
679 	host->dev.of_node = dev->of_node;
680 	host->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
681 	host->setup = fsl_espi_setup;
682 	host->cleanup = fsl_espi_cleanup;
683 	host->transfer_one_message = fsl_espi_do_one_msg;
684 	host->auto_runtime_pm = true;
685 	host->max_message_size = fsl_espi_max_message_size;
686 	host->num_chipselect = num_cs;
687 
688 	espi = spi_controller_get_devdata(host);
689 	spin_lock_init(&espi->lock);
690 
691 	espi->dev = dev;
692 	espi->spibrg = fsl_get_sys_freq();
693 	if (espi->spibrg == -1) {
694 		dev_err(dev, "Can't get sys frequency!\n");
695 		ret = -EINVAL;
696 		goto err_probe;
697 	}
698 	/* determined by clock divider fields DIV16/PM in register SPMODEx */
699 	host->min_speed_hz = DIV_ROUND_UP(espi->spibrg, 4 * 16 * 16);
700 	host->max_speed_hz = DIV_ROUND_UP(espi->spibrg, 4);
701 
702 	init_completion(&espi->done);
703 
704 	espi->reg_base = devm_ioremap_resource(dev, mem);
705 	if (IS_ERR(espi->reg_base)) {
706 		ret = PTR_ERR(espi->reg_base);
707 		goto err_probe;
708 	}
709 
710 	/* Register for SPI Interrupt */
711 	ret = devm_request_irq(dev, irq, fsl_espi_irq, 0, "fsl_espi", espi);
712 	if (ret)
713 		goto err_probe;
714 
715 	fsl_espi_init_regs(dev, true);
716 
717 	pm_runtime_set_autosuspend_delay(dev, AUTOSUSPEND_TIMEOUT);
718 	pm_runtime_use_autosuspend(dev);
719 	pm_runtime_set_active(dev);
720 	pm_runtime_enable(dev);
721 	pm_runtime_get_sync(dev);
722 
723 	ret = devm_spi_register_controller(dev, host);
724 	if (ret < 0)
725 		goto err_pm;
726 
727 	dev_info(dev, "irq = %u\n", irq);
728 
729 	pm_runtime_mark_last_busy(dev);
730 	pm_runtime_put_autosuspend(dev);
731 
732 	return 0;
733 
734 err_pm:
735 	pm_runtime_put_noidle(dev);
736 	pm_runtime_disable(dev);
737 	pm_runtime_set_suspended(dev);
738 err_probe:
739 	spi_controller_put(host);
740 	return ret;
741 }
742 
of_fsl_espi_get_chipselects(struct device * dev)743 static int of_fsl_espi_get_chipselects(struct device *dev)
744 {
745 	struct device_node *np = dev->of_node;
746 	u32 num_cs;
747 	int ret;
748 
749 	ret = of_property_read_u32(np, "fsl,espi-num-chipselects", &num_cs);
750 	if (ret) {
751 		dev_err(dev, "No 'fsl,espi-num-chipselects' property\n");
752 		return 0;
753 	}
754 
755 	return num_cs;
756 }
757 
of_fsl_espi_probe(struct platform_device * ofdev)758 static int of_fsl_espi_probe(struct platform_device *ofdev)
759 {
760 	struct device *dev = &ofdev->dev;
761 	struct device_node *np = ofdev->dev.of_node;
762 	struct resource mem;
763 	unsigned int irq, num_cs;
764 	int ret;
765 
766 	if (of_property_read_bool(np, "mode")) {
767 		dev_err(dev, "mode property is not supported on ESPI!\n");
768 		return -EINVAL;
769 	}
770 
771 	num_cs = of_fsl_espi_get_chipselects(dev);
772 	if (!num_cs)
773 		return -EINVAL;
774 
775 	ret = of_address_to_resource(np, 0, &mem);
776 	if (ret)
777 		return ret;
778 
779 	irq = irq_of_parse_and_map(np, 0);
780 	if (!irq)
781 		return -EINVAL;
782 
783 	return fsl_espi_probe(dev, &mem, irq, num_cs);
784 }
785 
of_fsl_espi_remove(struct platform_device * dev)786 static void of_fsl_espi_remove(struct platform_device *dev)
787 {
788 	pm_runtime_disable(&dev->dev);
789 }
790 
791 #ifdef CONFIG_PM_SLEEP
of_fsl_espi_suspend(struct device * dev)792 static int of_fsl_espi_suspend(struct device *dev)
793 {
794 	struct spi_controller *host = dev_get_drvdata(dev);
795 	int ret;
796 
797 	ret = spi_controller_suspend(host);
798 	if (ret)
799 		return ret;
800 
801 	return pm_runtime_force_suspend(dev);
802 }
803 
of_fsl_espi_resume(struct device * dev)804 static int of_fsl_espi_resume(struct device *dev)
805 {
806 	struct spi_controller *host = dev_get_drvdata(dev);
807 	int ret;
808 
809 	fsl_espi_init_regs(dev, false);
810 
811 	ret = pm_runtime_force_resume(dev);
812 	if (ret < 0)
813 		return ret;
814 
815 	return spi_controller_resume(host);
816 }
817 #endif /* CONFIG_PM_SLEEP */
818 
819 static const struct dev_pm_ops espi_pm = {
820 	SET_RUNTIME_PM_OPS(fsl_espi_runtime_suspend,
821 			   fsl_espi_runtime_resume, NULL)
822 	SET_SYSTEM_SLEEP_PM_OPS(of_fsl_espi_suspend, of_fsl_espi_resume)
823 };
824 
825 static const struct of_device_id of_fsl_espi_match[] = {
826 	{ .compatible = "fsl,mpc8536-espi" },
827 	{}
828 };
829 MODULE_DEVICE_TABLE(of, of_fsl_espi_match);
830 
831 static struct platform_driver fsl_espi_driver = {
832 	.driver = {
833 		.name = "fsl_espi",
834 		.of_match_table = of_fsl_espi_match,
835 		.pm = &espi_pm,
836 	},
837 	.probe		= of_fsl_espi_probe,
838 	.remove_new	= of_fsl_espi_remove,
839 };
840 module_platform_driver(fsl_espi_driver);
841 
842 MODULE_AUTHOR("Mingkai Hu");
843 MODULE_DESCRIPTION("Enhanced Freescale SPI Driver");
844 MODULE_LICENSE("GPL");
845