xref: /linux/drivers/spi/spi-qup.c (revision a1ff5a7d78a036d6c2178ee5acd6ba4946243800)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2008-2014, The Linux foundation. All rights reserved.
4  */
5 
6 #include <linux/clk.h>
7 #include <linux/delay.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/dmaengine.h>
10 #include <linux/err.h>
11 #include <linux/interconnect.h>
12 #include <linux/interrupt.h>
13 #include <linux/io.h>
14 #include <linux/list.h>
15 #include <linux/module.h>
16 #include <linux/of.h>
17 #include <linux/platform_device.h>
18 #include <linux/pm_opp.h>
19 #include <linux/pm_runtime.h>
20 #include <linux/spi/spi.h>
21 #include "internals.h"
22 
23 #define QUP_CONFIG			0x0000
24 #define QUP_STATE			0x0004
25 #define QUP_IO_M_MODES			0x0008
26 #define QUP_SW_RESET			0x000c
27 #define QUP_OPERATIONAL			0x0018
28 #define QUP_ERROR_FLAGS			0x001c
29 #define QUP_ERROR_FLAGS_EN		0x0020
30 #define QUP_OPERATIONAL_MASK		0x0028
31 #define QUP_HW_VERSION			0x0030
32 #define QUP_MX_OUTPUT_CNT		0x0100
33 #define QUP_OUTPUT_FIFO			0x0110
34 #define QUP_MX_WRITE_CNT		0x0150
35 #define QUP_MX_INPUT_CNT		0x0200
36 #define QUP_MX_READ_CNT			0x0208
37 #define QUP_INPUT_FIFO			0x0218
38 
39 #define SPI_CONFIG			0x0300
40 #define SPI_IO_CONTROL			0x0304
41 #define SPI_ERROR_FLAGS			0x0308
42 #define SPI_ERROR_FLAGS_EN		0x030c
43 
44 /* QUP_CONFIG fields */
45 #define QUP_CONFIG_SPI_MODE		(1 << 8)
46 #define QUP_CONFIG_CLOCK_AUTO_GATE	BIT(13)
47 #define QUP_CONFIG_NO_INPUT		BIT(7)
48 #define QUP_CONFIG_NO_OUTPUT		BIT(6)
49 #define QUP_CONFIG_N			0x001f
50 
51 /* QUP_STATE fields */
52 #define QUP_STATE_VALID			BIT(2)
53 #define QUP_STATE_RESET			0
54 #define QUP_STATE_RUN			1
55 #define QUP_STATE_PAUSE			3
56 #define QUP_STATE_MASK			3
57 #define QUP_STATE_CLEAR			2
58 
59 #define QUP_HW_VERSION_2_1_1		0x20010001
60 
61 /* QUP_IO_M_MODES fields */
62 #define QUP_IO_M_PACK_EN		BIT(15)
63 #define QUP_IO_M_UNPACK_EN		BIT(14)
64 #define QUP_IO_M_INPUT_MODE_MASK_SHIFT	12
65 #define QUP_IO_M_OUTPUT_MODE_MASK_SHIFT	10
66 #define QUP_IO_M_INPUT_MODE_MASK	(3 << QUP_IO_M_INPUT_MODE_MASK_SHIFT)
67 #define QUP_IO_M_OUTPUT_MODE_MASK	(3 << QUP_IO_M_OUTPUT_MODE_MASK_SHIFT)
68 
69 #define QUP_IO_M_OUTPUT_BLOCK_SIZE(x)	(((x) & (0x03 << 0)) >> 0)
70 #define QUP_IO_M_OUTPUT_FIFO_SIZE(x)	(((x) & (0x07 << 2)) >> 2)
71 #define QUP_IO_M_INPUT_BLOCK_SIZE(x)	(((x) & (0x03 << 5)) >> 5)
72 #define QUP_IO_M_INPUT_FIFO_SIZE(x)	(((x) & (0x07 << 7)) >> 7)
73 
74 #define QUP_IO_M_MODE_FIFO		0
75 #define QUP_IO_M_MODE_BLOCK		1
76 #define QUP_IO_M_MODE_DMOV		2
77 #define QUP_IO_M_MODE_BAM		3
78 
79 /* QUP_OPERATIONAL fields */
80 #define QUP_OP_IN_BLOCK_READ_REQ	BIT(13)
81 #define QUP_OP_OUT_BLOCK_WRITE_REQ	BIT(12)
82 #define QUP_OP_MAX_INPUT_DONE_FLAG	BIT(11)
83 #define QUP_OP_MAX_OUTPUT_DONE_FLAG	BIT(10)
84 #define QUP_OP_IN_SERVICE_FLAG		BIT(9)
85 #define QUP_OP_OUT_SERVICE_FLAG		BIT(8)
86 #define QUP_OP_IN_FIFO_FULL		BIT(7)
87 #define QUP_OP_OUT_FIFO_FULL		BIT(6)
88 #define QUP_OP_IN_FIFO_NOT_EMPTY	BIT(5)
89 #define QUP_OP_OUT_FIFO_NOT_EMPTY	BIT(4)
90 
91 /* QUP_ERROR_FLAGS and QUP_ERROR_FLAGS_EN fields */
92 #define QUP_ERROR_OUTPUT_OVER_RUN	BIT(5)
93 #define QUP_ERROR_INPUT_UNDER_RUN	BIT(4)
94 #define QUP_ERROR_OUTPUT_UNDER_RUN	BIT(3)
95 #define QUP_ERROR_INPUT_OVER_RUN	BIT(2)
96 
97 /* SPI_CONFIG fields */
98 #define SPI_CONFIG_HS_MODE		BIT(10)
99 #define SPI_CONFIG_INPUT_FIRST		BIT(9)
100 #define SPI_CONFIG_LOOPBACK		BIT(8)
101 
102 /* SPI_IO_CONTROL fields */
103 #define SPI_IO_C_FORCE_CS		BIT(11)
104 #define SPI_IO_C_CLK_IDLE_HIGH		BIT(10)
105 #define SPI_IO_C_MX_CS_MODE		BIT(8)
106 #define SPI_IO_C_CS_N_POLARITY_0	BIT(4)
107 #define SPI_IO_C_CS_SELECT(x)		(((x) & 3) << 2)
108 #define SPI_IO_C_CS_SELECT_MASK		0x000c
109 #define SPI_IO_C_TRISTATE_CS		BIT(1)
110 #define SPI_IO_C_NO_TRI_STATE		BIT(0)
111 
112 /* SPI_ERROR_FLAGS and SPI_ERROR_FLAGS_EN fields */
113 #define SPI_ERROR_CLK_OVER_RUN		BIT(1)
114 #define SPI_ERROR_CLK_UNDER_RUN		BIT(0)
115 
116 #define SPI_NUM_CHIPSELECTS		4
117 
118 #define SPI_MAX_XFER			(SZ_64K - 64)
119 
120 /* high speed mode is when bus rate is greater then 26MHz */
121 #define SPI_HS_MIN_RATE			26000000
122 #define SPI_MAX_RATE			50000000
123 
124 #define SPI_DELAY_THRESHOLD		1
125 #define SPI_DELAY_RETRY			10
126 
127 #define SPI_BUS_WIDTH			8
128 
129 struct spi_qup {
130 	void __iomem		*base;
131 	struct device		*dev;
132 	struct clk		*cclk;	/* core clock */
133 	struct clk		*iclk;	/* interface clock */
134 	struct icc_path		*icc_path; /* interconnect to RAM */
135 	int			irq;
136 	spinlock_t		lock;
137 
138 	int			in_fifo_sz;
139 	int			out_fifo_sz;
140 	int			in_blk_sz;
141 	int			out_blk_sz;
142 
143 	struct spi_transfer	*xfer;
144 	struct completion	done;
145 	int			error;
146 	int			w_size;	/* bytes per SPI word */
147 	int			n_words;
148 	int			tx_bytes;
149 	int			rx_bytes;
150 	const u8		*tx_buf;
151 	u8			*rx_buf;
152 	int			qup_v1;
153 
154 	int			mode;
155 	struct dma_slave_config	rx_conf;
156 	struct dma_slave_config	tx_conf;
157 
158 	u32			bw_speed_hz;
159 };
160 
161 static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer);
162 
spi_qup_is_flag_set(struct spi_qup * controller,u32 flag)163 static inline bool spi_qup_is_flag_set(struct spi_qup *controller, u32 flag)
164 {
165 	u32 opflag = readl_relaxed(controller->base + QUP_OPERATIONAL);
166 
167 	return (opflag & flag) != 0;
168 }
169 
spi_qup_is_dma_xfer(int mode)170 static inline bool spi_qup_is_dma_xfer(int mode)
171 {
172 	if (mode == QUP_IO_M_MODE_DMOV || mode == QUP_IO_M_MODE_BAM)
173 		return true;
174 
175 	return false;
176 }
177 
178 /* get's the transaction size length */
spi_qup_len(struct spi_qup * controller)179 static inline unsigned int spi_qup_len(struct spi_qup *controller)
180 {
181 	return controller->n_words * controller->w_size;
182 }
183 
spi_qup_is_valid_state(struct spi_qup * controller)184 static inline bool spi_qup_is_valid_state(struct spi_qup *controller)
185 {
186 	u32 opstate = readl_relaxed(controller->base + QUP_STATE);
187 
188 	return opstate & QUP_STATE_VALID;
189 }
190 
spi_qup_vote_bw(struct spi_qup * controller,u32 speed_hz)191 static int spi_qup_vote_bw(struct spi_qup *controller, u32 speed_hz)
192 {
193 	u32 needed_peak_bw;
194 	int ret;
195 
196 	if (controller->bw_speed_hz == speed_hz)
197 		return 0;
198 
199 	needed_peak_bw = Bps_to_icc(speed_hz * SPI_BUS_WIDTH);
200 	ret = icc_set_bw(controller->icc_path, 0, needed_peak_bw);
201 	if (ret)
202 		return ret;
203 
204 	controller->bw_speed_hz = speed_hz;
205 	return 0;
206 }
207 
spi_qup_set_state(struct spi_qup * controller,u32 state)208 static int spi_qup_set_state(struct spi_qup *controller, u32 state)
209 {
210 	unsigned long loop;
211 	u32 cur_state;
212 
213 	loop = 0;
214 	while (!spi_qup_is_valid_state(controller)) {
215 
216 		usleep_range(SPI_DELAY_THRESHOLD, SPI_DELAY_THRESHOLD * 2);
217 
218 		if (++loop > SPI_DELAY_RETRY)
219 			return -EIO;
220 	}
221 
222 	if (loop)
223 		dev_dbg(controller->dev, "invalid state for %ld,us %d\n",
224 			loop, state);
225 
226 	cur_state = readl_relaxed(controller->base + QUP_STATE);
227 	/*
228 	 * Per spec: for PAUSE_STATE to RESET_STATE, two writes
229 	 * of (b10) are required
230 	 */
231 	if (((cur_state & QUP_STATE_MASK) == QUP_STATE_PAUSE) &&
232 	    (state == QUP_STATE_RESET)) {
233 		writel_relaxed(QUP_STATE_CLEAR, controller->base + QUP_STATE);
234 		writel_relaxed(QUP_STATE_CLEAR, controller->base + QUP_STATE);
235 	} else {
236 		cur_state &= ~QUP_STATE_MASK;
237 		cur_state |= state;
238 		writel_relaxed(cur_state, controller->base + QUP_STATE);
239 	}
240 
241 	loop = 0;
242 	while (!spi_qup_is_valid_state(controller)) {
243 
244 		usleep_range(SPI_DELAY_THRESHOLD, SPI_DELAY_THRESHOLD * 2);
245 
246 		if (++loop > SPI_DELAY_RETRY)
247 			return -EIO;
248 	}
249 
250 	return 0;
251 }
252 
spi_qup_read_from_fifo(struct spi_qup * controller,u32 num_words)253 static void spi_qup_read_from_fifo(struct spi_qup *controller, u32 num_words)
254 {
255 	u8 *rx_buf = controller->rx_buf;
256 	int i, shift, num_bytes;
257 	u32 word;
258 
259 	for (; num_words; num_words--) {
260 
261 		word = readl_relaxed(controller->base + QUP_INPUT_FIFO);
262 
263 		num_bytes = min_t(int, spi_qup_len(controller) -
264 				       controller->rx_bytes,
265 				       controller->w_size);
266 
267 		if (!rx_buf) {
268 			controller->rx_bytes += num_bytes;
269 			continue;
270 		}
271 
272 		for (i = 0; i < num_bytes; i++, controller->rx_bytes++) {
273 			/*
274 			 * The data format depends on bytes per SPI word:
275 			 *  4 bytes: 0x12345678
276 			 *  2 bytes: 0x00001234
277 			 *  1 byte : 0x00000012
278 			 */
279 			shift = BITS_PER_BYTE;
280 			shift *= (controller->w_size - i - 1);
281 			rx_buf[controller->rx_bytes] = word >> shift;
282 		}
283 	}
284 }
285 
spi_qup_read(struct spi_qup * controller,u32 * opflags)286 static void spi_qup_read(struct spi_qup *controller, u32 *opflags)
287 {
288 	u32 remainder, words_per_block, num_words;
289 	bool is_block_mode = controller->mode == QUP_IO_M_MODE_BLOCK;
290 
291 	remainder = DIV_ROUND_UP(spi_qup_len(controller) - controller->rx_bytes,
292 				 controller->w_size);
293 	words_per_block = controller->in_blk_sz >> 2;
294 
295 	do {
296 		/* ACK by clearing service flag */
297 		writel_relaxed(QUP_OP_IN_SERVICE_FLAG,
298 			       controller->base + QUP_OPERATIONAL);
299 
300 		if (!remainder)
301 			goto exit;
302 
303 		if (is_block_mode) {
304 			num_words = (remainder > words_per_block) ?
305 					words_per_block : remainder;
306 		} else {
307 			if (!spi_qup_is_flag_set(controller,
308 						 QUP_OP_IN_FIFO_NOT_EMPTY))
309 				break;
310 
311 			num_words = 1;
312 		}
313 
314 		/* read up to the maximum transfer size available */
315 		spi_qup_read_from_fifo(controller, num_words);
316 
317 		remainder -= num_words;
318 
319 		/* if block mode, check to see if next block is available */
320 		if (is_block_mode && !spi_qup_is_flag_set(controller,
321 					QUP_OP_IN_BLOCK_READ_REQ))
322 			break;
323 
324 	} while (remainder);
325 
326 	/*
327 	 * Due to extra stickiness of the QUP_OP_IN_SERVICE_FLAG during block
328 	 * reads, it has to be cleared again at the very end.  However, be sure
329 	 * to refresh opflags value because MAX_INPUT_DONE_FLAG may now be
330 	 * present and this is used to determine if transaction is complete
331 	 */
332 exit:
333 	if (!remainder) {
334 		*opflags = readl_relaxed(controller->base + QUP_OPERATIONAL);
335 		if (is_block_mode && *opflags & QUP_OP_MAX_INPUT_DONE_FLAG)
336 			writel_relaxed(QUP_OP_IN_SERVICE_FLAG,
337 				       controller->base + QUP_OPERATIONAL);
338 	}
339 }
340 
spi_qup_write_to_fifo(struct spi_qup * controller,u32 num_words)341 static void spi_qup_write_to_fifo(struct spi_qup *controller, u32 num_words)
342 {
343 	const u8 *tx_buf = controller->tx_buf;
344 	int i, num_bytes;
345 	u32 word, data;
346 
347 	for (; num_words; num_words--) {
348 		word = 0;
349 
350 		num_bytes = min_t(int, spi_qup_len(controller) -
351 				       controller->tx_bytes,
352 				       controller->w_size);
353 		if (tx_buf)
354 			for (i = 0; i < num_bytes; i++) {
355 				data = tx_buf[controller->tx_bytes + i];
356 				word |= data << (BITS_PER_BYTE * (3 - i));
357 			}
358 
359 		controller->tx_bytes += num_bytes;
360 
361 		writel_relaxed(word, controller->base + QUP_OUTPUT_FIFO);
362 	}
363 }
364 
spi_qup_dma_done(void * data)365 static void spi_qup_dma_done(void *data)
366 {
367 	struct spi_qup *qup = data;
368 
369 	complete(&qup->done);
370 }
371 
spi_qup_write(struct spi_qup * controller)372 static void spi_qup_write(struct spi_qup *controller)
373 {
374 	bool is_block_mode = controller->mode == QUP_IO_M_MODE_BLOCK;
375 	u32 remainder, words_per_block, num_words;
376 
377 	remainder = DIV_ROUND_UP(spi_qup_len(controller) - controller->tx_bytes,
378 				 controller->w_size);
379 	words_per_block = controller->out_blk_sz >> 2;
380 
381 	do {
382 		/* ACK by clearing service flag */
383 		writel_relaxed(QUP_OP_OUT_SERVICE_FLAG,
384 			       controller->base + QUP_OPERATIONAL);
385 
386 		/* make sure the interrupt is valid */
387 		if (!remainder)
388 			return;
389 
390 		if (is_block_mode) {
391 			num_words = (remainder > words_per_block) ?
392 				words_per_block : remainder;
393 		} else {
394 			if (spi_qup_is_flag_set(controller,
395 						QUP_OP_OUT_FIFO_FULL))
396 				break;
397 
398 			num_words = 1;
399 		}
400 
401 		spi_qup_write_to_fifo(controller, num_words);
402 
403 		remainder -= num_words;
404 
405 		/* if block mode, check to see if next block is available */
406 		if (is_block_mode && !spi_qup_is_flag_set(controller,
407 					QUP_OP_OUT_BLOCK_WRITE_REQ))
408 			break;
409 
410 	} while (remainder);
411 }
412 
spi_qup_prep_sg(struct spi_controller * host,struct scatterlist * sgl,unsigned int nents,enum dma_transfer_direction dir,dma_async_tx_callback callback)413 static int spi_qup_prep_sg(struct spi_controller *host, struct scatterlist *sgl,
414 			   unsigned int nents, enum dma_transfer_direction dir,
415 			   dma_async_tx_callback callback)
416 {
417 	struct spi_qup *qup = spi_controller_get_devdata(host);
418 	unsigned long flags = DMA_PREP_INTERRUPT | DMA_PREP_FENCE;
419 	struct dma_async_tx_descriptor *desc;
420 	struct dma_chan *chan;
421 	dma_cookie_t cookie;
422 
423 	if (dir == DMA_MEM_TO_DEV)
424 		chan = host->dma_tx;
425 	else
426 		chan = host->dma_rx;
427 
428 	desc = dmaengine_prep_slave_sg(chan, sgl, nents, dir, flags);
429 	if (IS_ERR_OR_NULL(desc))
430 		return desc ? PTR_ERR(desc) : -EINVAL;
431 
432 	desc->callback = callback;
433 	desc->callback_param = qup;
434 
435 	cookie = dmaengine_submit(desc);
436 
437 	return dma_submit_error(cookie);
438 }
439 
spi_qup_dma_terminate(struct spi_controller * host,struct spi_transfer * xfer)440 static void spi_qup_dma_terminate(struct spi_controller *host,
441 				  struct spi_transfer *xfer)
442 {
443 	if (xfer->tx_buf)
444 		dmaengine_terminate_all(host->dma_tx);
445 	if (xfer->rx_buf)
446 		dmaengine_terminate_all(host->dma_rx);
447 }
448 
spi_qup_sgl_get_nents_len(struct scatterlist * sgl,u32 max,u32 * nents)449 static u32 spi_qup_sgl_get_nents_len(struct scatterlist *sgl, u32 max,
450 				     u32 *nents)
451 {
452 	struct scatterlist *sg;
453 	u32 total = 0;
454 
455 	for (sg = sgl; sg; sg = sg_next(sg)) {
456 		unsigned int len = sg_dma_len(sg);
457 
458 		/* check for overflow as well as limit */
459 		if (((total + len) < total) || ((total + len) > max))
460 			break;
461 
462 		total += len;
463 		(*nents)++;
464 	}
465 
466 	return total;
467 }
468 
spi_qup_do_dma(struct spi_device * spi,struct spi_transfer * xfer,unsigned long timeout)469 static int spi_qup_do_dma(struct spi_device *spi, struct spi_transfer *xfer,
470 			  unsigned long timeout)
471 {
472 	dma_async_tx_callback rx_done = NULL, tx_done = NULL;
473 	struct spi_controller *host = spi->controller;
474 	struct spi_qup *qup = spi_controller_get_devdata(host);
475 	struct scatterlist *tx_sgl, *rx_sgl;
476 	int ret;
477 
478 	ret = spi_qup_vote_bw(qup, xfer->speed_hz);
479 	if (ret) {
480 		dev_err(qup->dev, "fail to vote for ICC bandwidth: %d\n", ret);
481 		return -EIO;
482 	}
483 
484 	if (xfer->rx_buf)
485 		rx_done = spi_qup_dma_done;
486 	else if (xfer->tx_buf)
487 		tx_done = spi_qup_dma_done;
488 
489 	rx_sgl = xfer->rx_sg.sgl;
490 	tx_sgl = xfer->tx_sg.sgl;
491 
492 	do {
493 		u32 rx_nents = 0, tx_nents = 0;
494 
495 		if (rx_sgl)
496 			qup->n_words = spi_qup_sgl_get_nents_len(rx_sgl,
497 					SPI_MAX_XFER, &rx_nents) / qup->w_size;
498 		if (tx_sgl)
499 			qup->n_words = spi_qup_sgl_get_nents_len(tx_sgl,
500 					SPI_MAX_XFER, &tx_nents) / qup->w_size;
501 		if (!qup->n_words)
502 			return -EIO;
503 
504 		ret = spi_qup_io_config(spi, xfer);
505 		if (ret)
506 			return ret;
507 
508 		/* before issuing the descriptors, set the QUP to run */
509 		ret = spi_qup_set_state(qup, QUP_STATE_RUN);
510 		if (ret) {
511 			dev_warn(qup->dev, "cannot set RUN state\n");
512 			return ret;
513 		}
514 		if (rx_sgl) {
515 			ret = spi_qup_prep_sg(host, rx_sgl, rx_nents,
516 					      DMA_DEV_TO_MEM, rx_done);
517 			if (ret)
518 				return ret;
519 			dma_async_issue_pending(host->dma_rx);
520 		}
521 
522 		if (tx_sgl) {
523 			ret = spi_qup_prep_sg(host, tx_sgl, tx_nents,
524 					      DMA_MEM_TO_DEV, tx_done);
525 			if (ret)
526 				return ret;
527 
528 			dma_async_issue_pending(host->dma_tx);
529 		}
530 
531 		if (!wait_for_completion_timeout(&qup->done, timeout))
532 			return -ETIMEDOUT;
533 
534 		for (; rx_sgl && rx_nents--; rx_sgl = sg_next(rx_sgl))
535 			;
536 		for (; tx_sgl && tx_nents--; tx_sgl = sg_next(tx_sgl))
537 			;
538 
539 	} while (rx_sgl || tx_sgl);
540 
541 	return 0;
542 }
543 
spi_qup_do_pio(struct spi_device * spi,struct spi_transfer * xfer,unsigned long timeout)544 static int spi_qup_do_pio(struct spi_device *spi, struct spi_transfer *xfer,
545 			  unsigned long timeout)
546 {
547 	struct spi_controller *host = spi->controller;
548 	struct spi_qup *qup = spi_controller_get_devdata(host);
549 	int ret, n_words, iterations, offset = 0;
550 
551 	n_words = qup->n_words;
552 	iterations = n_words / SPI_MAX_XFER; /* round down */
553 	qup->rx_buf = xfer->rx_buf;
554 	qup->tx_buf = xfer->tx_buf;
555 
556 	do {
557 		if (iterations)
558 			qup->n_words = SPI_MAX_XFER;
559 		else
560 			qup->n_words = n_words % SPI_MAX_XFER;
561 
562 		if (qup->tx_buf && offset)
563 			qup->tx_buf = xfer->tx_buf + offset * SPI_MAX_XFER;
564 
565 		if (qup->rx_buf && offset)
566 			qup->rx_buf = xfer->rx_buf + offset * SPI_MAX_XFER;
567 
568 		/*
569 		 * if the transaction is small enough, we need
570 		 * to fallback to FIFO mode
571 		 */
572 		if (qup->n_words <= (qup->in_fifo_sz / sizeof(u32)))
573 			qup->mode = QUP_IO_M_MODE_FIFO;
574 
575 		ret = spi_qup_io_config(spi, xfer);
576 		if (ret)
577 			return ret;
578 
579 		ret = spi_qup_set_state(qup, QUP_STATE_RUN);
580 		if (ret) {
581 			dev_warn(qup->dev, "cannot set RUN state\n");
582 			return ret;
583 		}
584 
585 		ret = spi_qup_set_state(qup, QUP_STATE_PAUSE);
586 		if (ret) {
587 			dev_warn(qup->dev, "cannot set PAUSE state\n");
588 			return ret;
589 		}
590 
591 		if (qup->mode == QUP_IO_M_MODE_FIFO)
592 			spi_qup_write(qup);
593 
594 		ret = spi_qup_set_state(qup, QUP_STATE_RUN);
595 		if (ret) {
596 			dev_warn(qup->dev, "cannot set RUN state\n");
597 			return ret;
598 		}
599 
600 		if (!wait_for_completion_timeout(&qup->done, timeout))
601 			return -ETIMEDOUT;
602 
603 		offset++;
604 	} while (iterations--);
605 
606 	return 0;
607 }
608 
spi_qup_data_pending(struct spi_qup * controller)609 static bool spi_qup_data_pending(struct spi_qup *controller)
610 {
611 	unsigned int remainder_tx, remainder_rx;
612 
613 	remainder_tx = DIV_ROUND_UP(spi_qup_len(controller) -
614 				    controller->tx_bytes, controller->w_size);
615 
616 	remainder_rx = DIV_ROUND_UP(spi_qup_len(controller) -
617 				    controller->rx_bytes, controller->w_size);
618 
619 	return remainder_tx || remainder_rx;
620 }
621 
spi_qup_qup_irq(int irq,void * dev_id)622 static irqreturn_t spi_qup_qup_irq(int irq, void *dev_id)
623 {
624 	struct spi_qup *controller = dev_id;
625 	u32 opflags, qup_err, spi_err;
626 	int error = 0;
627 
628 	qup_err = readl_relaxed(controller->base + QUP_ERROR_FLAGS);
629 	spi_err = readl_relaxed(controller->base + SPI_ERROR_FLAGS);
630 	opflags = readl_relaxed(controller->base + QUP_OPERATIONAL);
631 
632 	writel_relaxed(qup_err, controller->base + QUP_ERROR_FLAGS);
633 	writel_relaxed(spi_err, controller->base + SPI_ERROR_FLAGS);
634 
635 	if (qup_err) {
636 		if (qup_err & QUP_ERROR_OUTPUT_OVER_RUN)
637 			dev_warn(controller->dev, "OUTPUT_OVER_RUN\n");
638 		if (qup_err & QUP_ERROR_INPUT_UNDER_RUN)
639 			dev_warn(controller->dev, "INPUT_UNDER_RUN\n");
640 		if (qup_err & QUP_ERROR_OUTPUT_UNDER_RUN)
641 			dev_warn(controller->dev, "OUTPUT_UNDER_RUN\n");
642 		if (qup_err & QUP_ERROR_INPUT_OVER_RUN)
643 			dev_warn(controller->dev, "INPUT_OVER_RUN\n");
644 
645 		error = -EIO;
646 	}
647 
648 	if (spi_err) {
649 		if (spi_err & SPI_ERROR_CLK_OVER_RUN)
650 			dev_warn(controller->dev, "CLK_OVER_RUN\n");
651 		if (spi_err & SPI_ERROR_CLK_UNDER_RUN)
652 			dev_warn(controller->dev, "CLK_UNDER_RUN\n");
653 
654 		error = -EIO;
655 	}
656 
657 	spin_lock(&controller->lock);
658 	if (!controller->error)
659 		controller->error = error;
660 	spin_unlock(&controller->lock);
661 
662 	if (spi_qup_is_dma_xfer(controller->mode)) {
663 		writel_relaxed(opflags, controller->base + QUP_OPERATIONAL);
664 	} else {
665 		if (opflags & QUP_OP_IN_SERVICE_FLAG)
666 			spi_qup_read(controller, &opflags);
667 
668 		if (opflags & QUP_OP_OUT_SERVICE_FLAG)
669 			spi_qup_write(controller);
670 
671 		if (!spi_qup_data_pending(controller))
672 			complete(&controller->done);
673 	}
674 
675 	if (error)
676 		complete(&controller->done);
677 
678 	if (opflags & QUP_OP_MAX_INPUT_DONE_FLAG) {
679 		if (!spi_qup_is_dma_xfer(controller->mode)) {
680 			if (spi_qup_data_pending(controller))
681 				return IRQ_HANDLED;
682 		}
683 		complete(&controller->done);
684 	}
685 
686 	return IRQ_HANDLED;
687 }
688 
689 /* set clock freq ... bits per word, determine mode */
spi_qup_io_prep(struct spi_device * spi,struct spi_transfer * xfer)690 static int spi_qup_io_prep(struct spi_device *spi, struct spi_transfer *xfer)
691 {
692 	struct spi_qup *controller = spi_controller_get_devdata(spi->controller);
693 	int ret;
694 
695 	if (spi->mode & SPI_LOOP && xfer->len > controller->in_fifo_sz) {
696 		dev_err(controller->dev, "too big size for loopback %d > %d\n",
697 			xfer->len, controller->in_fifo_sz);
698 		return -EIO;
699 	}
700 
701 	ret = dev_pm_opp_set_rate(controller->dev, xfer->speed_hz);
702 	if (ret) {
703 		dev_err(controller->dev, "fail to set frequency %d",
704 			xfer->speed_hz);
705 		return -EIO;
706 	}
707 
708 	controller->w_size = DIV_ROUND_UP(xfer->bits_per_word, 8);
709 	controller->n_words = xfer->len / controller->w_size;
710 
711 	if (controller->n_words <= (controller->in_fifo_sz / sizeof(u32)))
712 		controller->mode = QUP_IO_M_MODE_FIFO;
713 	else if (spi_xfer_is_dma_mapped(spi->controller, spi, xfer))
714 		controller->mode = QUP_IO_M_MODE_BAM;
715 	else
716 		controller->mode = QUP_IO_M_MODE_BLOCK;
717 
718 	return 0;
719 }
720 
721 /* prep qup for another spi transaction of specific type */
spi_qup_io_config(struct spi_device * spi,struct spi_transfer * xfer)722 static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer)
723 {
724 	struct spi_qup *controller = spi_controller_get_devdata(spi->controller);
725 	u32 config, iomode, control;
726 	unsigned long flags;
727 
728 	spin_lock_irqsave(&controller->lock, flags);
729 	controller->xfer     = xfer;
730 	controller->error    = 0;
731 	controller->rx_bytes = 0;
732 	controller->tx_bytes = 0;
733 	spin_unlock_irqrestore(&controller->lock, flags);
734 
735 
736 	if (spi_qup_set_state(controller, QUP_STATE_RESET)) {
737 		dev_err(controller->dev, "cannot set RESET state\n");
738 		return -EIO;
739 	}
740 
741 	switch (controller->mode) {
742 	case QUP_IO_M_MODE_FIFO:
743 		writel_relaxed(controller->n_words,
744 			       controller->base + QUP_MX_READ_CNT);
745 		writel_relaxed(controller->n_words,
746 			       controller->base + QUP_MX_WRITE_CNT);
747 		/* must be zero for FIFO */
748 		writel_relaxed(0, controller->base + QUP_MX_INPUT_CNT);
749 		writel_relaxed(0, controller->base + QUP_MX_OUTPUT_CNT);
750 		break;
751 	case QUP_IO_M_MODE_BAM:
752 		writel_relaxed(controller->n_words,
753 			       controller->base + QUP_MX_INPUT_CNT);
754 		writel_relaxed(controller->n_words,
755 			       controller->base + QUP_MX_OUTPUT_CNT);
756 		/* must be zero for BLOCK and BAM */
757 		writel_relaxed(0, controller->base + QUP_MX_READ_CNT);
758 		writel_relaxed(0, controller->base + QUP_MX_WRITE_CNT);
759 
760 		if (!controller->qup_v1) {
761 			void __iomem *input_cnt;
762 
763 			input_cnt = controller->base + QUP_MX_INPUT_CNT;
764 			/*
765 			 * for DMA transfers, both QUP_MX_INPUT_CNT and
766 			 * QUP_MX_OUTPUT_CNT must be zero to all cases but one.
767 			 * That case is a non-balanced transfer when there is
768 			 * only a rx_buf.
769 			 */
770 			if (xfer->tx_buf)
771 				writel_relaxed(0, input_cnt);
772 			else
773 				writel_relaxed(controller->n_words, input_cnt);
774 
775 			writel_relaxed(0, controller->base + QUP_MX_OUTPUT_CNT);
776 		}
777 		break;
778 	case QUP_IO_M_MODE_BLOCK:
779 		reinit_completion(&controller->done);
780 		writel_relaxed(controller->n_words,
781 			       controller->base + QUP_MX_INPUT_CNT);
782 		writel_relaxed(controller->n_words,
783 			       controller->base + QUP_MX_OUTPUT_CNT);
784 		/* must be zero for BLOCK and BAM */
785 		writel_relaxed(0, controller->base + QUP_MX_READ_CNT);
786 		writel_relaxed(0, controller->base + QUP_MX_WRITE_CNT);
787 		break;
788 	default:
789 		dev_err(controller->dev, "unknown mode = %d\n",
790 				controller->mode);
791 		return -EIO;
792 	}
793 
794 	iomode = readl_relaxed(controller->base + QUP_IO_M_MODES);
795 	/* Set input and output transfer mode */
796 	iomode &= ~(QUP_IO_M_INPUT_MODE_MASK | QUP_IO_M_OUTPUT_MODE_MASK);
797 
798 	if (!spi_qup_is_dma_xfer(controller->mode))
799 		iomode &= ~(QUP_IO_M_PACK_EN | QUP_IO_M_UNPACK_EN);
800 	else
801 		iomode |= QUP_IO_M_PACK_EN | QUP_IO_M_UNPACK_EN;
802 
803 	iomode |= (controller->mode << QUP_IO_M_OUTPUT_MODE_MASK_SHIFT);
804 	iomode |= (controller->mode << QUP_IO_M_INPUT_MODE_MASK_SHIFT);
805 
806 	writel_relaxed(iomode, controller->base + QUP_IO_M_MODES);
807 
808 	control = readl_relaxed(controller->base + SPI_IO_CONTROL);
809 
810 	if (spi->mode & SPI_CPOL)
811 		control |= SPI_IO_C_CLK_IDLE_HIGH;
812 	else
813 		control &= ~SPI_IO_C_CLK_IDLE_HIGH;
814 
815 	writel_relaxed(control, controller->base + SPI_IO_CONTROL);
816 
817 	config = readl_relaxed(controller->base + SPI_CONFIG);
818 
819 	if (spi->mode & SPI_LOOP)
820 		config |= SPI_CONFIG_LOOPBACK;
821 	else
822 		config &= ~SPI_CONFIG_LOOPBACK;
823 
824 	if (spi->mode & SPI_CPHA)
825 		config &= ~SPI_CONFIG_INPUT_FIRST;
826 	else
827 		config |= SPI_CONFIG_INPUT_FIRST;
828 
829 	/*
830 	 * HS_MODE improves signal stability for spi-clk high rates,
831 	 * but is invalid in loop back mode.
832 	 */
833 	if ((xfer->speed_hz >= SPI_HS_MIN_RATE) && !(spi->mode & SPI_LOOP))
834 		config |= SPI_CONFIG_HS_MODE;
835 	else
836 		config &= ~SPI_CONFIG_HS_MODE;
837 
838 	writel_relaxed(config, controller->base + SPI_CONFIG);
839 
840 	config = readl_relaxed(controller->base + QUP_CONFIG);
841 	config &= ~(QUP_CONFIG_NO_INPUT | QUP_CONFIG_NO_OUTPUT | QUP_CONFIG_N);
842 	config |= xfer->bits_per_word - 1;
843 	config |= QUP_CONFIG_SPI_MODE;
844 
845 	if (spi_qup_is_dma_xfer(controller->mode)) {
846 		if (!xfer->tx_buf)
847 			config |= QUP_CONFIG_NO_OUTPUT;
848 		if (!xfer->rx_buf)
849 			config |= QUP_CONFIG_NO_INPUT;
850 	}
851 
852 	writel_relaxed(config, controller->base + QUP_CONFIG);
853 
854 	/* only write to OPERATIONAL_MASK when register is present */
855 	if (!controller->qup_v1) {
856 		u32 mask = 0;
857 
858 		/*
859 		 * mask INPUT and OUTPUT service flags to prevent IRQs on FIFO
860 		 * status change in BAM mode
861 		 */
862 
863 		if (spi_qup_is_dma_xfer(controller->mode))
864 			mask = QUP_OP_IN_SERVICE_FLAG | QUP_OP_OUT_SERVICE_FLAG;
865 
866 		writel_relaxed(mask, controller->base + QUP_OPERATIONAL_MASK);
867 	}
868 
869 	return 0;
870 }
871 
spi_qup_transfer_one(struct spi_controller * host,struct spi_device * spi,struct spi_transfer * xfer)872 static int spi_qup_transfer_one(struct spi_controller *host,
873 			      struct spi_device *spi,
874 			      struct spi_transfer *xfer)
875 {
876 	struct spi_qup *controller = spi_controller_get_devdata(host);
877 	unsigned long timeout, flags;
878 	int ret;
879 
880 	ret = spi_qup_io_prep(spi, xfer);
881 	if (ret)
882 		return ret;
883 
884 	timeout = DIV_ROUND_UP(xfer->speed_hz, MSEC_PER_SEC);
885 	timeout = DIV_ROUND_UP(min_t(unsigned long, SPI_MAX_XFER,
886 				     xfer->len) * 8, timeout);
887 	timeout = 100 * msecs_to_jiffies(timeout);
888 
889 	reinit_completion(&controller->done);
890 
891 	spin_lock_irqsave(&controller->lock, flags);
892 	controller->xfer     = xfer;
893 	controller->error    = 0;
894 	controller->rx_bytes = 0;
895 	controller->tx_bytes = 0;
896 	spin_unlock_irqrestore(&controller->lock, flags);
897 
898 	if (spi_qup_is_dma_xfer(controller->mode))
899 		ret = spi_qup_do_dma(spi, xfer, timeout);
900 	else
901 		ret = spi_qup_do_pio(spi, xfer, timeout);
902 
903 	spi_qup_set_state(controller, QUP_STATE_RESET);
904 	spin_lock_irqsave(&controller->lock, flags);
905 	if (!ret)
906 		ret = controller->error;
907 	spin_unlock_irqrestore(&controller->lock, flags);
908 
909 	if (ret && spi_qup_is_dma_xfer(controller->mode))
910 		spi_qup_dma_terminate(host, xfer);
911 
912 	return ret;
913 }
914 
spi_qup_can_dma(struct spi_controller * host,struct spi_device * spi,struct spi_transfer * xfer)915 static bool spi_qup_can_dma(struct spi_controller *host, struct spi_device *spi,
916 			    struct spi_transfer *xfer)
917 {
918 	struct spi_qup *qup = spi_controller_get_devdata(host);
919 	size_t dma_align = dma_get_cache_alignment();
920 	int n_words;
921 
922 	if (xfer->rx_buf) {
923 		if (!IS_ALIGNED((size_t)xfer->rx_buf, dma_align) ||
924 		    IS_ERR_OR_NULL(host->dma_rx))
925 			return false;
926 		if (qup->qup_v1 && (xfer->len % qup->in_blk_sz))
927 			return false;
928 	}
929 
930 	if (xfer->tx_buf) {
931 		if (!IS_ALIGNED((size_t)xfer->tx_buf, dma_align) ||
932 		    IS_ERR_OR_NULL(host->dma_tx))
933 			return false;
934 		if (qup->qup_v1 && (xfer->len % qup->out_blk_sz))
935 			return false;
936 	}
937 
938 	n_words = xfer->len / DIV_ROUND_UP(xfer->bits_per_word, 8);
939 	if (n_words <= (qup->in_fifo_sz / sizeof(u32)))
940 		return false;
941 
942 	return true;
943 }
944 
spi_qup_release_dma(struct spi_controller * host)945 static void spi_qup_release_dma(struct spi_controller *host)
946 {
947 	if (!IS_ERR_OR_NULL(host->dma_rx))
948 		dma_release_channel(host->dma_rx);
949 	if (!IS_ERR_OR_NULL(host->dma_tx))
950 		dma_release_channel(host->dma_tx);
951 }
952 
spi_qup_init_dma(struct spi_controller * host,resource_size_t base)953 static int spi_qup_init_dma(struct spi_controller *host, resource_size_t base)
954 {
955 	struct spi_qup *spi = spi_controller_get_devdata(host);
956 	struct dma_slave_config *rx_conf = &spi->rx_conf,
957 				*tx_conf = &spi->tx_conf;
958 	struct device *dev = spi->dev;
959 	int ret;
960 
961 	/* allocate dma resources, if available */
962 	host->dma_rx = dma_request_chan(dev, "rx");
963 	if (IS_ERR(host->dma_rx))
964 		return PTR_ERR(host->dma_rx);
965 
966 	host->dma_tx = dma_request_chan(dev, "tx");
967 	if (IS_ERR(host->dma_tx)) {
968 		ret = PTR_ERR(host->dma_tx);
969 		goto err_tx;
970 	}
971 
972 	/* set DMA parameters */
973 	rx_conf->direction = DMA_DEV_TO_MEM;
974 	rx_conf->device_fc = 1;
975 	rx_conf->src_addr = base + QUP_INPUT_FIFO;
976 	rx_conf->src_maxburst = spi->in_blk_sz;
977 
978 	tx_conf->direction = DMA_MEM_TO_DEV;
979 	tx_conf->device_fc = 1;
980 	tx_conf->dst_addr = base + QUP_OUTPUT_FIFO;
981 	tx_conf->dst_maxburst = spi->out_blk_sz;
982 
983 	ret = dmaengine_slave_config(host->dma_rx, rx_conf);
984 	if (ret) {
985 		dev_err(dev, "failed to configure RX channel\n");
986 		goto err;
987 	}
988 
989 	ret = dmaengine_slave_config(host->dma_tx, tx_conf);
990 	if (ret) {
991 		dev_err(dev, "failed to configure TX channel\n");
992 		goto err;
993 	}
994 
995 	return 0;
996 
997 err:
998 	dma_release_channel(host->dma_tx);
999 err_tx:
1000 	dma_release_channel(host->dma_rx);
1001 	return ret;
1002 }
1003 
spi_qup_set_cs(struct spi_device * spi,bool val)1004 static void spi_qup_set_cs(struct spi_device *spi, bool val)
1005 {
1006 	struct spi_qup *controller;
1007 	u32 spi_ioc;
1008 	u32 spi_ioc_orig;
1009 
1010 	controller = spi_controller_get_devdata(spi->controller);
1011 	spi_ioc = readl_relaxed(controller->base + SPI_IO_CONTROL);
1012 	spi_ioc_orig = spi_ioc;
1013 	if (!val)
1014 		spi_ioc |= SPI_IO_C_FORCE_CS;
1015 	else
1016 		spi_ioc &= ~SPI_IO_C_FORCE_CS;
1017 
1018 	if (spi_ioc != spi_ioc_orig)
1019 		writel_relaxed(spi_ioc, controller->base + SPI_IO_CONTROL);
1020 }
1021 
spi_qup_probe(struct platform_device * pdev)1022 static int spi_qup_probe(struct platform_device *pdev)
1023 {
1024 	struct spi_controller *host;
1025 	struct icc_path *icc_path;
1026 	struct clk *iclk, *cclk;
1027 	struct spi_qup *controller;
1028 	struct resource *res;
1029 	struct device *dev;
1030 	void __iomem *base;
1031 	u32 max_freq, iomode, num_cs;
1032 	int ret, irq, size;
1033 
1034 	dev = &pdev->dev;
1035 	base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
1036 	if (IS_ERR(base))
1037 		return PTR_ERR(base);
1038 
1039 	irq = platform_get_irq(pdev, 0);
1040 	if (irq < 0)
1041 		return irq;
1042 
1043 	cclk = devm_clk_get(dev, "core");
1044 	if (IS_ERR(cclk))
1045 		return PTR_ERR(cclk);
1046 
1047 	iclk = devm_clk_get(dev, "iface");
1048 	if (IS_ERR(iclk))
1049 		return PTR_ERR(iclk);
1050 
1051 	icc_path = devm_of_icc_get(dev, NULL);
1052 	if (IS_ERR(icc_path))
1053 		return dev_err_probe(dev, PTR_ERR(icc_path),
1054 				     "failed to get interconnect path\n");
1055 
1056 	/* This is optional parameter */
1057 	if (of_property_read_u32(dev->of_node, "spi-max-frequency", &max_freq))
1058 		max_freq = SPI_MAX_RATE;
1059 
1060 	if (!max_freq || max_freq > SPI_MAX_RATE) {
1061 		dev_err(dev, "invalid clock frequency %d\n", max_freq);
1062 		return -ENXIO;
1063 	}
1064 
1065 	ret = devm_pm_opp_set_clkname(dev, "core");
1066 	if (ret)
1067 		return ret;
1068 
1069 	/* OPP table is optional */
1070 	ret = devm_pm_opp_of_add_table(dev);
1071 	if (ret && ret != -ENODEV)
1072 		return dev_err_probe(dev, ret, "invalid OPP table\n");
1073 
1074 	host = spi_alloc_host(dev, sizeof(struct spi_qup));
1075 	if (!host) {
1076 		dev_err(dev, "cannot allocate host\n");
1077 		return -ENOMEM;
1078 	}
1079 
1080 	/* use num-cs unless not present or out of range */
1081 	if (of_property_read_u32(dev->of_node, "num-cs", &num_cs) ||
1082 	    num_cs > SPI_NUM_CHIPSELECTS)
1083 		host->num_chipselect = SPI_NUM_CHIPSELECTS;
1084 	else
1085 		host->num_chipselect = num_cs;
1086 
1087 	host->use_gpio_descriptors = true;
1088 	host->max_native_cs = SPI_NUM_CHIPSELECTS;
1089 	host->bus_num = pdev->id;
1090 	host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
1091 	host->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
1092 	host->max_speed_hz = max_freq;
1093 	host->transfer_one = spi_qup_transfer_one;
1094 	host->dev.of_node = pdev->dev.of_node;
1095 	host->auto_runtime_pm = true;
1096 	host->dma_alignment = dma_get_cache_alignment();
1097 	host->max_dma_len = SPI_MAX_XFER;
1098 
1099 	platform_set_drvdata(pdev, host);
1100 
1101 	controller = spi_controller_get_devdata(host);
1102 
1103 	controller->dev = dev;
1104 	controller->base = base;
1105 	controller->iclk = iclk;
1106 	controller->cclk = cclk;
1107 	controller->icc_path = icc_path;
1108 	controller->irq = irq;
1109 
1110 	ret = spi_qup_init_dma(host, res->start);
1111 	if (ret == -EPROBE_DEFER)
1112 		goto error;
1113 	else if (!ret)
1114 		host->can_dma = spi_qup_can_dma;
1115 
1116 	controller->qup_v1 = (uintptr_t)of_device_get_match_data(dev);
1117 
1118 	if (!controller->qup_v1)
1119 		host->set_cs = spi_qup_set_cs;
1120 
1121 	spin_lock_init(&controller->lock);
1122 	init_completion(&controller->done);
1123 
1124 	ret = clk_prepare_enable(cclk);
1125 	if (ret) {
1126 		dev_err(dev, "cannot enable core clock\n");
1127 		goto error_dma;
1128 	}
1129 
1130 	ret = clk_prepare_enable(iclk);
1131 	if (ret) {
1132 		clk_disable_unprepare(cclk);
1133 		dev_err(dev, "cannot enable iface clock\n");
1134 		goto error_dma;
1135 	}
1136 
1137 	iomode = readl_relaxed(base + QUP_IO_M_MODES);
1138 
1139 	size = QUP_IO_M_OUTPUT_BLOCK_SIZE(iomode);
1140 	if (size)
1141 		controller->out_blk_sz = size * 16;
1142 	else
1143 		controller->out_blk_sz = 4;
1144 
1145 	size = QUP_IO_M_INPUT_BLOCK_SIZE(iomode);
1146 	if (size)
1147 		controller->in_blk_sz = size * 16;
1148 	else
1149 		controller->in_blk_sz = 4;
1150 
1151 	size = QUP_IO_M_OUTPUT_FIFO_SIZE(iomode);
1152 	controller->out_fifo_sz = controller->out_blk_sz * (2 << size);
1153 
1154 	size = QUP_IO_M_INPUT_FIFO_SIZE(iomode);
1155 	controller->in_fifo_sz = controller->in_blk_sz * (2 << size);
1156 
1157 	dev_info(dev, "IN:block:%d, fifo:%d, OUT:block:%d, fifo:%d\n",
1158 		 controller->in_blk_sz, controller->in_fifo_sz,
1159 		 controller->out_blk_sz, controller->out_fifo_sz);
1160 
1161 	writel_relaxed(1, base + QUP_SW_RESET);
1162 
1163 	ret = spi_qup_set_state(controller, QUP_STATE_RESET);
1164 	if (ret) {
1165 		dev_err(dev, "cannot set RESET state\n");
1166 		goto error_clk;
1167 	}
1168 
1169 	writel_relaxed(0, base + QUP_OPERATIONAL);
1170 	writel_relaxed(0, base + QUP_IO_M_MODES);
1171 
1172 	if (!controller->qup_v1)
1173 		writel_relaxed(0, base + QUP_OPERATIONAL_MASK);
1174 
1175 	writel_relaxed(SPI_ERROR_CLK_UNDER_RUN | SPI_ERROR_CLK_OVER_RUN,
1176 		       base + SPI_ERROR_FLAGS_EN);
1177 
1178 	/* if earlier version of the QUP, disable INPUT_OVERRUN */
1179 	if (controller->qup_v1)
1180 		writel_relaxed(QUP_ERROR_OUTPUT_OVER_RUN |
1181 			QUP_ERROR_INPUT_UNDER_RUN | QUP_ERROR_OUTPUT_UNDER_RUN,
1182 			base + QUP_ERROR_FLAGS_EN);
1183 
1184 	writel_relaxed(0, base + SPI_CONFIG);
1185 	writel_relaxed(SPI_IO_C_NO_TRI_STATE, base + SPI_IO_CONTROL);
1186 
1187 	ret = devm_request_irq(dev, irq, spi_qup_qup_irq,
1188 			       IRQF_TRIGGER_HIGH, pdev->name, controller);
1189 	if (ret)
1190 		goto error_clk;
1191 
1192 	pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC);
1193 	pm_runtime_use_autosuspend(dev);
1194 	pm_runtime_set_active(dev);
1195 	pm_runtime_enable(dev);
1196 
1197 	ret = devm_spi_register_controller(dev, host);
1198 	if (ret)
1199 		goto disable_pm;
1200 
1201 	return 0;
1202 
1203 disable_pm:
1204 	pm_runtime_disable(&pdev->dev);
1205 error_clk:
1206 	clk_disable_unprepare(cclk);
1207 	clk_disable_unprepare(iclk);
1208 error_dma:
1209 	spi_qup_release_dma(host);
1210 error:
1211 	spi_controller_put(host);
1212 	return ret;
1213 }
1214 
1215 #ifdef CONFIG_PM
spi_qup_pm_suspend_runtime(struct device * device)1216 static int spi_qup_pm_suspend_runtime(struct device *device)
1217 {
1218 	struct spi_controller *host = dev_get_drvdata(device);
1219 	struct spi_qup *controller = spi_controller_get_devdata(host);
1220 	u32 config;
1221 
1222 	/* Enable clocks auto gaiting */
1223 	config = readl(controller->base + QUP_CONFIG);
1224 	config |= QUP_CONFIG_CLOCK_AUTO_GATE;
1225 	writel_relaxed(config, controller->base + QUP_CONFIG);
1226 
1227 	clk_disable_unprepare(controller->cclk);
1228 	spi_qup_vote_bw(controller, 0);
1229 	clk_disable_unprepare(controller->iclk);
1230 
1231 	return 0;
1232 }
1233 
spi_qup_pm_resume_runtime(struct device * device)1234 static int spi_qup_pm_resume_runtime(struct device *device)
1235 {
1236 	struct spi_controller *host = dev_get_drvdata(device);
1237 	struct spi_qup *controller = spi_controller_get_devdata(host);
1238 	u32 config;
1239 	int ret;
1240 
1241 	ret = clk_prepare_enable(controller->iclk);
1242 	if (ret)
1243 		return ret;
1244 
1245 	ret = clk_prepare_enable(controller->cclk);
1246 	if (ret) {
1247 		clk_disable_unprepare(controller->iclk);
1248 		return ret;
1249 	}
1250 
1251 	/* Disable clocks auto gaiting */
1252 	config = readl_relaxed(controller->base + QUP_CONFIG);
1253 	config &= ~QUP_CONFIG_CLOCK_AUTO_GATE;
1254 	writel_relaxed(config, controller->base + QUP_CONFIG);
1255 	return 0;
1256 }
1257 #endif /* CONFIG_PM */
1258 
1259 #ifdef CONFIG_PM_SLEEP
spi_qup_suspend(struct device * device)1260 static int spi_qup_suspend(struct device *device)
1261 {
1262 	struct spi_controller *host = dev_get_drvdata(device);
1263 	struct spi_qup *controller = spi_controller_get_devdata(host);
1264 	int ret;
1265 
1266 	if (pm_runtime_suspended(device)) {
1267 		ret = spi_qup_pm_resume_runtime(device);
1268 		if (ret)
1269 			return ret;
1270 	}
1271 	ret = spi_controller_suspend(host);
1272 	if (ret)
1273 		return ret;
1274 
1275 	ret = spi_qup_set_state(controller, QUP_STATE_RESET);
1276 	if (ret)
1277 		return ret;
1278 
1279 	clk_disable_unprepare(controller->cclk);
1280 	spi_qup_vote_bw(controller, 0);
1281 	clk_disable_unprepare(controller->iclk);
1282 	return 0;
1283 }
1284 
spi_qup_resume(struct device * device)1285 static int spi_qup_resume(struct device *device)
1286 {
1287 	struct spi_controller *host = dev_get_drvdata(device);
1288 	struct spi_qup *controller = spi_controller_get_devdata(host);
1289 	int ret;
1290 
1291 	ret = clk_prepare_enable(controller->iclk);
1292 	if (ret)
1293 		return ret;
1294 
1295 	ret = clk_prepare_enable(controller->cclk);
1296 	if (ret) {
1297 		clk_disable_unprepare(controller->iclk);
1298 		return ret;
1299 	}
1300 
1301 	ret = spi_qup_set_state(controller, QUP_STATE_RESET);
1302 	if (ret)
1303 		goto disable_clk;
1304 
1305 	ret = spi_controller_resume(host);
1306 	if (ret)
1307 		goto disable_clk;
1308 
1309 	return 0;
1310 
1311 disable_clk:
1312 	clk_disable_unprepare(controller->cclk);
1313 	clk_disable_unprepare(controller->iclk);
1314 	return ret;
1315 }
1316 #endif /* CONFIG_PM_SLEEP */
1317 
spi_qup_remove(struct platform_device * pdev)1318 static void spi_qup_remove(struct platform_device *pdev)
1319 {
1320 	struct spi_controller *host = dev_get_drvdata(&pdev->dev);
1321 	struct spi_qup *controller = spi_controller_get_devdata(host);
1322 	int ret;
1323 
1324 	ret = pm_runtime_get_sync(&pdev->dev);
1325 
1326 	if (ret >= 0) {
1327 		ret = spi_qup_set_state(controller, QUP_STATE_RESET);
1328 		if (ret)
1329 			dev_warn(&pdev->dev, "failed to reset controller (%pe)\n",
1330 				 ERR_PTR(ret));
1331 
1332 		clk_disable_unprepare(controller->cclk);
1333 		clk_disable_unprepare(controller->iclk);
1334 	} else {
1335 		dev_warn(&pdev->dev, "failed to resume, skip hw disable (%pe)\n",
1336 			 ERR_PTR(ret));
1337 	}
1338 
1339 	spi_qup_release_dma(host);
1340 
1341 	pm_runtime_put_noidle(&pdev->dev);
1342 	pm_runtime_disable(&pdev->dev);
1343 }
1344 
1345 static const struct of_device_id spi_qup_dt_match[] = {
1346 	{ .compatible = "qcom,spi-qup-v1.1.1", .data = (void *)1, },
1347 	{ .compatible = "qcom,spi-qup-v2.1.1", },
1348 	{ .compatible = "qcom,spi-qup-v2.2.1", },
1349 	{ }
1350 };
1351 MODULE_DEVICE_TABLE(of, spi_qup_dt_match);
1352 
1353 static const struct dev_pm_ops spi_qup_dev_pm_ops = {
1354 	SET_SYSTEM_SLEEP_PM_OPS(spi_qup_suspend, spi_qup_resume)
1355 	SET_RUNTIME_PM_OPS(spi_qup_pm_suspend_runtime,
1356 			   spi_qup_pm_resume_runtime,
1357 			   NULL)
1358 };
1359 
1360 static struct platform_driver spi_qup_driver = {
1361 	.driver = {
1362 		.name		= "spi_qup",
1363 		.pm		= &spi_qup_dev_pm_ops,
1364 		.of_match_table = spi_qup_dt_match,
1365 	},
1366 	.probe = spi_qup_probe,
1367 	.remove_new = spi_qup_remove,
1368 };
1369 module_platform_driver(spi_qup_driver);
1370 
1371 MODULE_DESCRIPTION("Qualcomm SPI controller with QUP interface");
1372 MODULE_LICENSE("GPL v2");
1373 MODULE_ALIAS("platform:spi_qup");
1374