xref: /linux/drivers/spi/spi-qup.c (revision f7018c21350204c4cf628462f229d44d03545254)
1 /*
2  * Copyright (c) 2008-2014, The Linux foundation. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License rev 2 and
6  * only rev 2 as published by the free Software foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or fITNESS fOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  */
13 
14 #include <linux/clk.h>
15 #include <linux/delay.h>
16 #include <linux/err.h>
17 #include <linux/interrupt.h>
18 #include <linux/io.h>
19 #include <linux/list.h>
20 #include <linux/module.h>
21 #include <linux/of.h>
22 #include <linux/platform_device.h>
23 #include <linux/pm_runtime.h>
24 #include <linux/spi/spi.h>
25 
26 #define QUP_CONFIG			0x0000
27 #define QUP_STATE			0x0004
28 #define QUP_IO_M_MODES			0x0008
29 #define QUP_SW_RESET			0x000c
30 #define QUP_OPERATIONAL			0x0018
31 #define QUP_ERROR_FLAGS			0x001c
32 #define QUP_ERROR_FLAGS_EN		0x0020
33 #define QUP_OPERATIONAL_MASK		0x0028
34 #define QUP_HW_VERSION			0x0030
35 #define QUP_MX_OUTPUT_CNT		0x0100
36 #define QUP_OUTPUT_FIFO			0x0110
37 #define QUP_MX_WRITE_CNT		0x0150
38 #define QUP_MX_INPUT_CNT		0x0200
39 #define QUP_MX_READ_CNT			0x0208
40 #define QUP_INPUT_FIFO			0x0218
41 
42 #define SPI_CONFIG			0x0300
43 #define SPI_IO_CONTROL			0x0304
44 #define SPI_ERROR_FLAGS			0x0308
45 #define SPI_ERROR_FLAGS_EN		0x030c
46 
47 /* QUP_CONFIG fields */
48 #define QUP_CONFIG_SPI_MODE		(1 << 8)
49 #define QUP_CONFIG_CLOCK_AUTO_GATE	BIT(13)
50 #define QUP_CONFIG_NO_INPUT		BIT(7)
51 #define QUP_CONFIG_NO_OUTPUT		BIT(6)
52 #define QUP_CONFIG_N			0x001f
53 
54 /* QUP_STATE fields */
55 #define QUP_STATE_VALID			BIT(2)
56 #define QUP_STATE_RESET			0
57 #define QUP_STATE_RUN			1
58 #define QUP_STATE_PAUSE			3
59 #define QUP_STATE_MASK			3
60 #define QUP_STATE_CLEAR			2
61 
62 #define QUP_HW_VERSION_2_1_1		0x20010001
63 
64 /* QUP_IO_M_MODES fields */
65 #define QUP_IO_M_PACK_EN		BIT(15)
66 #define QUP_IO_M_UNPACK_EN		BIT(14)
67 #define QUP_IO_M_INPUT_MODE_MASK_SHIFT	12
68 #define QUP_IO_M_OUTPUT_MODE_MASK_SHIFT	10
69 #define QUP_IO_M_INPUT_MODE_MASK	(3 << QUP_IO_M_INPUT_MODE_MASK_SHIFT)
70 #define QUP_IO_M_OUTPUT_MODE_MASK	(3 << QUP_IO_M_OUTPUT_MODE_MASK_SHIFT)
71 
72 #define QUP_IO_M_OUTPUT_BLOCK_SIZE(x)	(((x) & (0x03 << 0)) >> 0)
73 #define QUP_IO_M_OUTPUT_FIFO_SIZE(x)	(((x) & (0x07 << 2)) >> 2)
74 #define QUP_IO_M_INPUT_BLOCK_SIZE(x)	(((x) & (0x03 << 5)) >> 5)
75 #define QUP_IO_M_INPUT_FIFO_SIZE(x)	(((x) & (0x07 << 7)) >> 7)
76 
77 #define QUP_IO_M_MODE_FIFO		0
78 #define QUP_IO_M_MODE_BLOCK		1
79 #define QUP_IO_M_MODE_DMOV		2
80 #define QUP_IO_M_MODE_BAM		3
81 
82 /* QUP_OPERATIONAL fields */
83 #define QUP_OP_MAX_INPUT_DONE_FLAG	BIT(11)
84 #define QUP_OP_MAX_OUTPUT_DONE_FLAG	BIT(10)
85 #define QUP_OP_IN_SERVICE_FLAG		BIT(9)
86 #define QUP_OP_OUT_SERVICE_FLAG		BIT(8)
87 #define QUP_OP_IN_FIFO_FULL		BIT(7)
88 #define QUP_OP_OUT_FIFO_FULL		BIT(6)
89 #define QUP_OP_IN_FIFO_NOT_EMPTY	BIT(5)
90 #define QUP_OP_OUT_FIFO_NOT_EMPTY	BIT(4)
91 
92 /* QUP_ERROR_FLAGS and QUP_ERROR_FLAGS_EN fields */
93 #define QUP_ERROR_OUTPUT_OVER_RUN	BIT(5)
94 #define QUP_ERROR_INPUT_UNDER_RUN	BIT(4)
95 #define QUP_ERROR_OUTPUT_UNDER_RUN	BIT(3)
96 #define QUP_ERROR_INPUT_OVER_RUN	BIT(2)
97 
98 /* SPI_CONFIG fields */
99 #define SPI_CONFIG_HS_MODE		BIT(10)
100 #define SPI_CONFIG_INPUT_FIRST		BIT(9)
101 #define SPI_CONFIG_LOOPBACK		BIT(8)
102 
103 /* SPI_IO_CONTROL fields */
104 #define SPI_IO_C_FORCE_CS		BIT(11)
105 #define SPI_IO_C_CLK_IDLE_HIGH		BIT(10)
106 #define SPI_IO_C_MX_CS_MODE		BIT(8)
107 #define SPI_IO_C_CS_N_POLARITY_0	BIT(4)
108 #define SPI_IO_C_CS_SELECT(x)		(((x) & 3) << 2)
109 #define SPI_IO_C_CS_SELECT_MASK		0x000c
110 #define SPI_IO_C_TRISTATE_CS		BIT(1)
111 #define SPI_IO_C_NO_TRI_STATE		BIT(0)
112 
113 /* SPI_ERROR_FLAGS and SPI_ERROR_FLAGS_EN fields */
114 #define SPI_ERROR_CLK_OVER_RUN		BIT(1)
115 #define SPI_ERROR_CLK_UNDER_RUN		BIT(0)
116 
117 #define SPI_NUM_CHIPSELECTS		4
118 
119 /* high speed mode is when bus rate is greater then 26MHz */
120 #define SPI_HS_MIN_RATE			26000000
121 #define SPI_MAX_RATE			50000000
122 
123 #define SPI_DELAY_THRESHOLD		1
124 #define SPI_DELAY_RETRY			10
125 
126 struct spi_qup {
127 	void __iomem		*base;
128 	struct device		*dev;
129 	struct clk		*cclk;	/* core clock */
130 	struct clk		*iclk;	/* interface clock */
131 	int			irq;
132 	spinlock_t		lock;
133 
134 	int			in_fifo_sz;
135 	int			out_fifo_sz;
136 	int			in_blk_sz;
137 	int			out_blk_sz;
138 
139 	struct spi_transfer	*xfer;
140 	struct completion	done;
141 	int			error;
142 	int			w_size;	/* bytes per SPI word */
143 	int			tx_bytes;
144 	int			rx_bytes;
145 };
146 
147 
148 static inline bool spi_qup_is_valid_state(struct spi_qup *controller)
149 {
150 	u32 opstate = readl_relaxed(controller->base + QUP_STATE);
151 
152 	return opstate & QUP_STATE_VALID;
153 }
154 
155 static int spi_qup_set_state(struct spi_qup *controller, u32 state)
156 {
157 	unsigned long loop;
158 	u32 cur_state;
159 
160 	loop = 0;
161 	while (!spi_qup_is_valid_state(controller)) {
162 
163 		usleep_range(SPI_DELAY_THRESHOLD, SPI_DELAY_THRESHOLD * 2);
164 
165 		if (++loop > SPI_DELAY_RETRY)
166 			return -EIO;
167 	}
168 
169 	if (loop)
170 		dev_dbg(controller->dev, "invalid state for %ld,us %d\n",
171 			loop, state);
172 
173 	cur_state = readl_relaxed(controller->base + QUP_STATE);
174 	/*
175 	 * Per spec: for PAUSE_STATE to RESET_STATE, two writes
176 	 * of (b10) are required
177 	 */
178 	if (((cur_state & QUP_STATE_MASK) == QUP_STATE_PAUSE) &&
179 	    (state == QUP_STATE_RESET)) {
180 		writel_relaxed(QUP_STATE_CLEAR, controller->base + QUP_STATE);
181 		writel_relaxed(QUP_STATE_CLEAR, controller->base + QUP_STATE);
182 	} else {
183 		cur_state &= ~QUP_STATE_MASK;
184 		cur_state |= state;
185 		writel_relaxed(cur_state, controller->base + QUP_STATE);
186 	}
187 
188 	loop = 0;
189 	while (!spi_qup_is_valid_state(controller)) {
190 
191 		usleep_range(SPI_DELAY_THRESHOLD, SPI_DELAY_THRESHOLD * 2);
192 
193 		if (++loop > SPI_DELAY_RETRY)
194 			return -EIO;
195 	}
196 
197 	return 0;
198 }
199 
200 
201 static void spi_qup_fifo_read(struct spi_qup *controller,
202 			    struct spi_transfer *xfer)
203 {
204 	u8 *rx_buf = xfer->rx_buf;
205 	u32 word, state;
206 	int idx, shift, w_size;
207 
208 	w_size = controller->w_size;
209 
210 	while (controller->rx_bytes < xfer->len) {
211 
212 		state = readl_relaxed(controller->base + QUP_OPERATIONAL);
213 		if (0 == (state & QUP_OP_IN_FIFO_NOT_EMPTY))
214 			break;
215 
216 		word = readl_relaxed(controller->base + QUP_INPUT_FIFO);
217 
218 		if (!rx_buf) {
219 			controller->rx_bytes += w_size;
220 			continue;
221 		}
222 
223 		for (idx = 0; idx < w_size; idx++, controller->rx_bytes++) {
224 			/*
225 			 * The data format depends on bytes per SPI word:
226 			 *  4 bytes: 0x12345678
227 			 *  2 bytes: 0x00001234
228 			 *  1 byte : 0x00000012
229 			 */
230 			shift = BITS_PER_BYTE;
231 			shift *= (w_size - idx - 1);
232 			rx_buf[controller->rx_bytes] = word >> shift;
233 		}
234 	}
235 }
236 
237 static void spi_qup_fifo_write(struct spi_qup *controller,
238 			    struct spi_transfer *xfer)
239 {
240 	const u8 *tx_buf = xfer->tx_buf;
241 	u32 word, state, data;
242 	int idx, w_size;
243 
244 	w_size = controller->w_size;
245 
246 	while (controller->tx_bytes < xfer->len) {
247 
248 		state = readl_relaxed(controller->base + QUP_OPERATIONAL);
249 		if (state & QUP_OP_OUT_FIFO_FULL)
250 			break;
251 
252 		word = 0;
253 		for (idx = 0; idx < w_size; idx++, controller->tx_bytes++) {
254 
255 			if (!tx_buf) {
256 				controller->tx_bytes += w_size;
257 				break;
258 			}
259 
260 			data = tx_buf[controller->tx_bytes];
261 			word |= data << (BITS_PER_BYTE * (3 - idx));
262 		}
263 
264 		writel_relaxed(word, controller->base + QUP_OUTPUT_FIFO);
265 	}
266 }
267 
268 static irqreturn_t spi_qup_qup_irq(int irq, void *dev_id)
269 {
270 	struct spi_qup *controller = dev_id;
271 	struct spi_transfer *xfer;
272 	u32 opflags, qup_err, spi_err;
273 	unsigned long flags;
274 	int error = 0;
275 
276 	spin_lock_irqsave(&controller->lock, flags);
277 	xfer = controller->xfer;
278 	controller->xfer = NULL;
279 	spin_unlock_irqrestore(&controller->lock, flags);
280 
281 	qup_err = readl_relaxed(controller->base + QUP_ERROR_FLAGS);
282 	spi_err = readl_relaxed(controller->base + SPI_ERROR_FLAGS);
283 	opflags = readl_relaxed(controller->base + QUP_OPERATIONAL);
284 
285 	writel_relaxed(qup_err, controller->base + QUP_ERROR_FLAGS);
286 	writel_relaxed(spi_err, controller->base + SPI_ERROR_FLAGS);
287 	writel_relaxed(opflags, controller->base + QUP_OPERATIONAL);
288 
289 	if (!xfer) {
290 		dev_err_ratelimited(controller->dev, "unexpected irq %x08 %x08 %x08\n",
291 				    qup_err, spi_err, opflags);
292 		return IRQ_HANDLED;
293 	}
294 
295 	if (qup_err) {
296 		if (qup_err & QUP_ERROR_OUTPUT_OVER_RUN)
297 			dev_warn(controller->dev, "OUTPUT_OVER_RUN\n");
298 		if (qup_err & QUP_ERROR_INPUT_UNDER_RUN)
299 			dev_warn(controller->dev, "INPUT_UNDER_RUN\n");
300 		if (qup_err & QUP_ERROR_OUTPUT_UNDER_RUN)
301 			dev_warn(controller->dev, "OUTPUT_UNDER_RUN\n");
302 		if (qup_err & QUP_ERROR_INPUT_OVER_RUN)
303 			dev_warn(controller->dev, "INPUT_OVER_RUN\n");
304 
305 		error = -EIO;
306 	}
307 
308 	if (spi_err) {
309 		if (spi_err & SPI_ERROR_CLK_OVER_RUN)
310 			dev_warn(controller->dev, "CLK_OVER_RUN\n");
311 		if (spi_err & SPI_ERROR_CLK_UNDER_RUN)
312 			dev_warn(controller->dev, "CLK_UNDER_RUN\n");
313 
314 		error = -EIO;
315 	}
316 
317 	if (opflags & QUP_OP_IN_SERVICE_FLAG)
318 		spi_qup_fifo_read(controller, xfer);
319 
320 	if (opflags & QUP_OP_OUT_SERVICE_FLAG)
321 		spi_qup_fifo_write(controller, xfer);
322 
323 	spin_lock_irqsave(&controller->lock, flags);
324 	controller->error = error;
325 	controller->xfer = xfer;
326 	spin_unlock_irqrestore(&controller->lock, flags);
327 
328 	if (controller->rx_bytes == xfer->len || error)
329 		complete(&controller->done);
330 
331 	return IRQ_HANDLED;
332 }
333 
334 
335 /* set clock freq ... bits per word */
336 static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer)
337 {
338 	struct spi_qup *controller = spi_master_get_devdata(spi->master);
339 	u32 config, iomode, mode;
340 	int ret, n_words, w_size;
341 
342 	if (spi->mode & SPI_LOOP && xfer->len > controller->in_fifo_sz) {
343 		dev_err(controller->dev, "too big size for loopback %d > %d\n",
344 			xfer->len, controller->in_fifo_sz);
345 		return -EIO;
346 	}
347 
348 	ret = clk_set_rate(controller->cclk, xfer->speed_hz);
349 	if (ret) {
350 		dev_err(controller->dev, "fail to set frequency %d",
351 			xfer->speed_hz);
352 		return -EIO;
353 	}
354 
355 	if (spi_qup_set_state(controller, QUP_STATE_RESET)) {
356 		dev_err(controller->dev, "cannot set RESET state\n");
357 		return -EIO;
358 	}
359 
360 	w_size = 4;
361 	if (xfer->bits_per_word <= 8)
362 		w_size = 1;
363 	else if (xfer->bits_per_word <= 16)
364 		w_size = 2;
365 
366 	n_words = xfer->len / w_size;
367 	controller->w_size = w_size;
368 
369 	if (n_words <= controller->in_fifo_sz) {
370 		mode = QUP_IO_M_MODE_FIFO;
371 		writel_relaxed(n_words, controller->base + QUP_MX_READ_CNT);
372 		writel_relaxed(n_words, controller->base + QUP_MX_WRITE_CNT);
373 		/* must be zero for FIFO */
374 		writel_relaxed(0, controller->base + QUP_MX_INPUT_CNT);
375 		writel_relaxed(0, controller->base + QUP_MX_OUTPUT_CNT);
376 	} else {
377 		mode = QUP_IO_M_MODE_BLOCK;
378 		writel_relaxed(n_words, controller->base + QUP_MX_INPUT_CNT);
379 		writel_relaxed(n_words, controller->base + QUP_MX_OUTPUT_CNT);
380 		/* must be zero for BLOCK and BAM */
381 		writel_relaxed(0, controller->base + QUP_MX_READ_CNT);
382 		writel_relaxed(0, controller->base + QUP_MX_WRITE_CNT);
383 	}
384 
385 	iomode = readl_relaxed(controller->base + QUP_IO_M_MODES);
386 	/* Set input and output transfer mode */
387 	iomode &= ~(QUP_IO_M_INPUT_MODE_MASK | QUP_IO_M_OUTPUT_MODE_MASK);
388 	iomode &= ~(QUP_IO_M_PACK_EN | QUP_IO_M_UNPACK_EN);
389 	iomode |= (mode << QUP_IO_M_OUTPUT_MODE_MASK_SHIFT);
390 	iomode |= (mode << QUP_IO_M_INPUT_MODE_MASK_SHIFT);
391 
392 	writel_relaxed(iomode, controller->base + QUP_IO_M_MODES);
393 
394 	config = readl_relaxed(controller->base + SPI_CONFIG);
395 
396 	if (spi->mode & SPI_LOOP)
397 		config |= SPI_CONFIG_LOOPBACK;
398 	else
399 		config &= ~SPI_CONFIG_LOOPBACK;
400 
401 	if (spi->mode & SPI_CPHA)
402 		config &= ~SPI_CONFIG_INPUT_FIRST;
403 	else
404 		config |= SPI_CONFIG_INPUT_FIRST;
405 
406 	/*
407 	 * HS_MODE improves signal stability for spi-clk high rates,
408 	 * but is invalid in loop back mode.
409 	 */
410 	if ((xfer->speed_hz >= SPI_HS_MIN_RATE) && !(spi->mode & SPI_LOOP))
411 		config |= SPI_CONFIG_HS_MODE;
412 	else
413 		config &= ~SPI_CONFIG_HS_MODE;
414 
415 	writel_relaxed(config, controller->base + SPI_CONFIG);
416 
417 	config = readl_relaxed(controller->base + QUP_CONFIG);
418 	config &= ~(QUP_CONFIG_NO_INPUT | QUP_CONFIG_NO_OUTPUT | QUP_CONFIG_N);
419 	config |= xfer->bits_per_word - 1;
420 	config |= QUP_CONFIG_SPI_MODE;
421 	writel_relaxed(config, controller->base + QUP_CONFIG);
422 
423 	writel_relaxed(0, controller->base + QUP_OPERATIONAL_MASK);
424 	return 0;
425 }
426 
427 static void spi_qup_set_cs(struct spi_device *spi, bool enable)
428 {
429 	struct spi_qup *controller = spi_master_get_devdata(spi->master);
430 
431 	u32 iocontol, mask;
432 
433 	iocontol = readl_relaxed(controller->base + SPI_IO_CONTROL);
434 
435 	/* Disable auto CS toggle and use manual */
436 	iocontol &= ~SPI_IO_C_MX_CS_MODE;
437 	iocontol |= SPI_IO_C_FORCE_CS;
438 
439 	iocontol &= ~SPI_IO_C_CS_SELECT_MASK;
440 	iocontol |= SPI_IO_C_CS_SELECT(spi->chip_select);
441 
442 	mask = SPI_IO_C_CS_N_POLARITY_0 << spi->chip_select;
443 
444 	if (enable)
445 		iocontol |= mask;
446 	else
447 		iocontol &= ~mask;
448 
449 	writel_relaxed(iocontol, controller->base + SPI_IO_CONTROL);
450 }
451 
452 static int spi_qup_transfer_one(struct spi_master *master,
453 			      struct spi_device *spi,
454 			      struct spi_transfer *xfer)
455 {
456 	struct spi_qup *controller = spi_master_get_devdata(master);
457 	unsigned long timeout, flags;
458 	int ret = -EIO;
459 
460 	ret = spi_qup_io_config(spi, xfer);
461 	if (ret)
462 		return ret;
463 
464 	timeout = DIV_ROUND_UP(xfer->speed_hz, MSEC_PER_SEC);
465 	timeout = DIV_ROUND_UP(xfer->len * 8, timeout);
466 	timeout = 100 * msecs_to_jiffies(timeout);
467 
468 	reinit_completion(&controller->done);
469 
470 	spin_lock_irqsave(&controller->lock, flags);
471 	controller->xfer     = xfer;
472 	controller->error    = 0;
473 	controller->rx_bytes = 0;
474 	controller->tx_bytes = 0;
475 	spin_unlock_irqrestore(&controller->lock, flags);
476 
477 	if (spi_qup_set_state(controller, QUP_STATE_RUN)) {
478 		dev_warn(controller->dev, "cannot set RUN state\n");
479 		goto exit;
480 	}
481 
482 	if (spi_qup_set_state(controller, QUP_STATE_PAUSE)) {
483 		dev_warn(controller->dev, "cannot set PAUSE state\n");
484 		goto exit;
485 	}
486 
487 	spi_qup_fifo_write(controller, xfer);
488 
489 	if (spi_qup_set_state(controller, QUP_STATE_RUN)) {
490 		dev_warn(controller->dev, "cannot set EXECUTE state\n");
491 		goto exit;
492 	}
493 
494 	if (!wait_for_completion_timeout(&controller->done, timeout))
495 		ret = -ETIMEDOUT;
496 exit:
497 	spi_qup_set_state(controller, QUP_STATE_RESET);
498 	spin_lock_irqsave(&controller->lock, flags);
499 	controller->xfer = NULL;
500 	if (!ret)
501 		ret = controller->error;
502 	spin_unlock_irqrestore(&controller->lock, flags);
503 	return ret;
504 }
505 
506 static int spi_qup_probe(struct platform_device *pdev)
507 {
508 	struct spi_master *master;
509 	struct clk *iclk, *cclk;
510 	struct spi_qup *controller;
511 	struct resource *res;
512 	struct device *dev;
513 	void __iomem *base;
514 	u32 data, max_freq, iomode;
515 	int ret, irq, size;
516 
517 	dev = &pdev->dev;
518 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
519 	base = devm_ioremap_resource(dev, res);
520 	if (IS_ERR(base))
521 		return PTR_ERR(base);
522 
523 	irq = platform_get_irq(pdev, 0);
524 	if (irq < 0)
525 		return irq;
526 
527 	cclk = devm_clk_get(dev, "core");
528 	if (IS_ERR(cclk))
529 		return PTR_ERR(cclk);
530 
531 	iclk = devm_clk_get(dev, "iface");
532 	if (IS_ERR(iclk))
533 		return PTR_ERR(iclk);
534 
535 	/* This is optional parameter */
536 	if (of_property_read_u32(dev->of_node, "spi-max-frequency", &max_freq))
537 		max_freq = SPI_MAX_RATE;
538 
539 	if (!max_freq || max_freq > SPI_MAX_RATE) {
540 		dev_err(dev, "invalid clock frequency %d\n", max_freq);
541 		return -ENXIO;
542 	}
543 
544 	ret = clk_prepare_enable(cclk);
545 	if (ret) {
546 		dev_err(dev, "cannot enable core clock\n");
547 		return ret;
548 	}
549 
550 	ret = clk_prepare_enable(iclk);
551 	if (ret) {
552 		clk_disable_unprepare(cclk);
553 		dev_err(dev, "cannot enable iface clock\n");
554 		return ret;
555 	}
556 
557 	data = readl_relaxed(base + QUP_HW_VERSION);
558 
559 	if (data < QUP_HW_VERSION_2_1_1) {
560 		clk_disable_unprepare(cclk);
561 		clk_disable_unprepare(iclk);
562 		dev_err(dev, "v.%08x is not supported\n", data);
563 		return -ENXIO;
564 	}
565 
566 	master = spi_alloc_master(dev, sizeof(struct spi_qup));
567 	if (!master) {
568 		clk_disable_unprepare(cclk);
569 		clk_disable_unprepare(iclk);
570 		dev_err(dev, "cannot allocate master\n");
571 		return -ENOMEM;
572 	}
573 
574 	master->bus_num = pdev->id;
575 	master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
576 	master->num_chipselect = SPI_NUM_CHIPSELECTS;
577 	master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
578 	master->max_speed_hz = max_freq;
579 	master->set_cs = spi_qup_set_cs;
580 	master->transfer_one = spi_qup_transfer_one;
581 	master->dev.of_node = pdev->dev.of_node;
582 	master->auto_runtime_pm = true;
583 
584 	platform_set_drvdata(pdev, master);
585 
586 	controller = spi_master_get_devdata(master);
587 
588 	controller->dev = dev;
589 	controller->base = base;
590 	controller->iclk = iclk;
591 	controller->cclk = cclk;
592 	controller->irq = irq;
593 
594 	spin_lock_init(&controller->lock);
595 	init_completion(&controller->done);
596 
597 	iomode = readl_relaxed(base + QUP_IO_M_MODES);
598 
599 	size = QUP_IO_M_OUTPUT_BLOCK_SIZE(iomode);
600 	if (size)
601 		controller->out_blk_sz = size * 16;
602 	else
603 		controller->out_blk_sz = 4;
604 
605 	size = QUP_IO_M_INPUT_BLOCK_SIZE(iomode);
606 	if (size)
607 		controller->in_blk_sz = size * 16;
608 	else
609 		controller->in_blk_sz = 4;
610 
611 	size = QUP_IO_M_OUTPUT_FIFO_SIZE(iomode);
612 	controller->out_fifo_sz = controller->out_blk_sz * (2 << size);
613 
614 	size = QUP_IO_M_INPUT_FIFO_SIZE(iomode);
615 	controller->in_fifo_sz = controller->in_blk_sz * (2 << size);
616 
617 	dev_info(dev, "v.%08x IN:block:%d, fifo:%d, OUT:block:%d, fifo:%d\n",
618 		 data, controller->in_blk_sz, controller->in_fifo_sz,
619 		 controller->out_blk_sz, controller->out_fifo_sz);
620 
621 	writel_relaxed(1, base + QUP_SW_RESET);
622 
623 	ret = spi_qup_set_state(controller, QUP_STATE_RESET);
624 	if (ret) {
625 		dev_err(dev, "cannot set RESET state\n");
626 		goto error;
627 	}
628 
629 	writel_relaxed(0, base + QUP_OPERATIONAL);
630 	writel_relaxed(0, base + QUP_IO_M_MODES);
631 	writel_relaxed(0, base + QUP_OPERATIONAL_MASK);
632 	writel_relaxed(SPI_ERROR_CLK_UNDER_RUN | SPI_ERROR_CLK_OVER_RUN,
633 		       base + SPI_ERROR_FLAGS_EN);
634 
635 	writel_relaxed(0, base + SPI_CONFIG);
636 	writel_relaxed(SPI_IO_C_NO_TRI_STATE, base + SPI_IO_CONTROL);
637 
638 	ret = devm_request_irq(dev, irq, spi_qup_qup_irq,
639 			       IRQF_TRIGGER_HIGH, pdev->name, controller);
640 	if (ret)
641 		goto error;
642 
643 	ret = devm_spi_register_master(dev, master);
644 	if (ret)
645 		goto error;
646 
647 	pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC);
648 	pm_runtime_use_autosuspend(dev);
649 	pm_runtime_set_active(dev);
650 	pm_runtime_enable(dev);
651 	return 0;
652 
653 error:
654 	clk_disable_unprepare(cclk);
655 	clk_disable_unprepare(iclk);
656 	spi_master_put(master);
657 	return ret;
658 }
659 
660 #ifdef CONFIG_PM_RUNTIME
661 static int spi_qup_pm_suspend_runtime(struct device *device)
662 {
663 	struct spi_master *master = dev_get_drvdata(device);
664 	struct spi_qup *controller = spi_master_get_devdata(master);
665 	u32 config;
666 
667 	/* Enable clocks auto gaiting */
668 	config = readl(controller->base + QUP_CONFIG);
669 	config |= QUP_CONFIG_CLOCK_AUTO_GATE;
670 	writel_relaxed(config, controller->base + QUP_CONFIG);
671 	return 0;
672 }
673 
674 static int spi_qup_pm_resume_runtime(struct device *device)
675 {
676 	struct spi_master *master = dev_get_drvdata(device);
677 	struct spi_qup *controller = spi_master_get_devdata(master);
678 	u32 config;
679 
680 	/* Disable clocks auto gaiting */
681 	config = readl_relaxed(controller->base + QUP_CONFIG);
682 	config &= ~QUP_CONFIG_CLOCK_AUTO_GATE;
683 	writel_relaxed(config, controller->base + QUP_CONFIG);
684 	return 0;
685 }
686 #endif /* CONFIG_PM_RUNTIME */
687 
688 #ifdef CONFIG_PM_SLEEP
689 static int spi_qup_suspend(struct device *device)
690 {
691 	struct spi_master *master = dev_get_drvdata(device);
692 	struct spi_qup *controller = spi_master_get_devdata(master);
693 	int ret;
694 
695 	ret = spi_master_suspend(master);
696 	if (ret)
697 		return ret;
698 
699 	ret = spi_qup_set_state(controller, QUP_STATE_RESET);
700 	if (ret)
701 		return ret;
702 
703 	clk_disable_unprepare(controller->cclk);
704 	clk_disable_unprepare(controller->iclk);
705 	return 0;
706 }
707 
708 static int spi_qup_resume(struct device *device)
709 {
710 	struct spi_master *master = dev_get_drvdata(device);
711 	struct spi_qup *controller = spi_master_get_devdata(master);
712 	int ret;
713 
714 	ret = clk_prepare_enable(controller->iclk);
715 	if (ret)
716 		return ret;
717 
718 	ret = clk_prepare_enable(controller->cclk);
719 	if (ret)
720 		return ret;
721 
722 	ret = spi_qup_set_state(controller, QUP_STATE_RESET);
723 	if (ret)
724 		return ret;
725 
726 	return spi_master_resume(master);
727 }
728 #endif /* CONFIG_PM_SLEEP */
729 
730 static int spi_qup_remove(struct platform_device *pdev)
731 {
732 	struct spi_master *master = dev_get_drvdata(&pdev->dev);
733 	struct spi_qup *controller = spi_master_get_devdata(master);
734 	int ret;
735 
736 	ret = pm_runtime_get_sync(&pdev->dev);
737 	if (ret)
738 		return ret;
739 
740 	ret = spi_qup_set_state(controller, QUP_STATE_RESET);
741 	if (ret)
742 		return ret;
743 
744 	clk_disable_unprepare(controller->cclk);
745 	clk_disable_unprepare(controller->iclk);
746 
747 	pm_runtime_put_noidle(&pdev->dev);
748 	pm_runtime_disable(&pdev->dev);
749 	return 0;
750 }
751 
752 static struct of_device_id spi_qup_dt_match[] = {
753 	{ .compatible = "qcom,spi-qup-v2.1.1", },
754 	{ .compatible = "qcom,spi-qup-v2.2.1", },
755 	{ }
756 };
757 MODULE_DEVICE_TABLE(of, spi_qup_dt_match);
758 
759 static const struct dev_pm_ops spi_qup_dev_pm_ops = {
760 	SET_SYSTEM_SLEEP_PM_OPS(spi_qup_suspend, spi_qup_resume)
761 	SET_RUNTIME_PM_OPS(spi_qup_pm_suspend_runtime,
762 			   spi_qup_pm_resume_runtime,
763 			   NULL)
764 };
765 
766 static struct platform_driver spi_qup_driver = {
767 	.driver = {
768 		.name		= "spi_qup",
769 		.owner		= THIS_MODULE,
770 		.pm		= &spi_qup_dev_pm_ops,
771 		.of_match_table = spi_qup_dt_match,
772 	},
773 	.probe = spi_qup_probe,
774 	.remove = spi_qup_remove,
775 };
776 module_platform_driver(spi_qup_driver);
777 
778 MODULE_LICENSE("GPL v2");
779 MODULE_ALIAS("platform:spi_qup");
780