xref: /linux/drivers/spi/spi-bcm-qspi.c (revision 6fdcba32711044c35c0e1b094cbd8f3f0b4472c9)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Driver for Broadcom BRCMSTB, NSP,  NS2, Cygnus SPI Controllers
4  *
5  * Copyright 2016 Broadcom
6  */
7 
8 #include <linux/clk.h>
9 #include <linux/delay.h>
10 #include <linux/device.h>
11 #include <linux/init.h>
12 #include <linux/interrupt.h>
13 #include <linux/io.h>
14 #include <linux/ioport.h>
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/of.h>
18 #include <linux/of_irq.h>
19 #include <linux/platform_device.h>
20 #include <linux/slab.h>
21 #include <linux/spi/spi.h>
22 #include <linux/spi/spi-mem.h>
23 #include <linux/sysfs.h>
24 #include <linux/types.h>
25 #include "spi-bcm-qspi.h"
26 
27 #define DRIVER_NAME "bcm_qspi"
28 
29 
30 /* BSPI register offsets */
31 #define BSPI_REVISION_ID			0x000
32 #define BSPI_SCRATCH				0x004
33 #define BSPI_MAST_N_BOOT_CTRL			0x008
34 #define BSPI_BUSY_STATUS			0x00c
35 #define BSPI_INTR_STATUS			0x010
36 #define BSPI_B0_STATUS				0x014
37 #define BSPI_B0_CTRL				0x018
38 #define BSPI_B1_STATUS				0x01c
39 #define BSPI_B1_CTRL				0x020
40 #define BSPI_STRAP_OVERRIDE_CTRL		0x024
41 #define BSPI_FLEX_MODE_ENABLE			0x028
42 #define BSPI_BITS_PER_CYCLE			0x02c
43 #define BSPI_BITS_PER_PHASE			0x030
44 #define BSPI_CMD_AND_MODE_BYTE			0x034
45 #define BSPI_BSPI_FLASH_UPPER_ADDR_BYTE	0x038
46 #define BSPI_BSPI_XOR_VALUE			0x03c
47 #define BSPI_BSPI_XOR_ENABLE			0x040
48 #define BSPI_BSPI_PIO_MODE_ENABLE		0x044
49 #define BSPI_BSPI_PIO_IODIR			0x048
50 #define BSPI_BSPI_PIO_DATA			0x04c
51 
52 /* RAF register offsets */
53 #define BSPI_RAF_START_ADDR			0x100
54 #define BSPI_RAF_NUM_WORDS			0x104
55 #define BSPI_RAF_CTRL				0x108
56 #define BSPI_RAF_FULLNESS			0x10c
57 #define BSPI_RAF_WATERMARK			0x110
58 #define BSPI_RAF_STATUS			0x114
59 #define BSPI_RAF_READ_DATA			0x118
60 #define BSPI_RAF_WORD_CNT			0x11c
61 #define BSPI_RAF_CURR_ADDR			0x120
62 
63 /* Override mode masks */
64 #define BSPI_STRAP_OVERRIDE_CTRL_OVERRIDE	BIT(0)
65 #define BSPI_STRAP_OVERRIDE_CTRL_DATA_DUAL	BIT(1)
66 #define BSPI_STRAP_OVERRIDE_CTRL_ADDR_4BYTE	BIT(2)
67 #define BSPI_STRAP_OVERRIDE_CTRL_DATA_QUAD	BIT(3)
68 #define BSPI_STRAP_OVERRIDE_CTRL_ENDAIN_MODE	BIT(4)
69 
70 #define BSPI_ADDRLEN_3BYTES			3
71 #define BSPI_ADDRLEN_4BYTES			4
72 
73 #define BSPI_RAF_STATUS_FIFO_EMPTY_MASK	BIT(1)
74 
75 #define BSPI_RAF_CTRL_START_MASK		BIT(0)
76 #define BSPI_RAF_CTRL_CLEAR_MASK		BIT(1)
77 
78 #define BSPI_BPP_MODE_SELECT_MASK		BIT(8)
79 #define BSPI_BPP_ADDR_SELECT_MASK		BIT(16)
80 
81 #define BSPI_READ_LENGTH			256
82 
83 /* MSPI register offsets */
84 #define MSPI_SPCR0_LSB				0x000
85 #define MSPI_SPCR0_MSB				0x004
86 #define MSPI_SPCR1_LSB				0x008
87 #define MSPI_SPCR1_MSB				0x00c
88 #define MSPI_NEWQP				0x010
89 #define MSPI_ENDQP				0x014
90 #define MSPI_SPCR2				0x018
91 #define MSPI_MSPI_STATUS			0x020
92 #define MSPI_CPTQP				0x024
93 #define MSPI_SPCR3				0x028
94 #define MSPI_TXRAM				0x040
95 #define MSPI_RXRAM				0x0c0
96 #define MSPI_CDRAM				0x140
97 #define MSPI_WRITE_LOCK			0x180
98 
99 #define MSPI_MASTER_BIT			BIT(7)
100 
101 #define MSPI_NUM_CDRAM				16
102 #define MSPI_CDRAM_CONT_BIT			BIT(7)
103 #define MSPI_CDRAM_BITSE_BIT			BIT(6)
104 #define MSPI_CDRAM_PCS				0xf
105 
106 #define MSPI_SPCR2_SPE				BIT(6)
107 #define MSPI_SPCR2_CONT_AFTER_CMD		BIT(7)
108 
109 #define MSPI_MSPI_STATUS_SPIF			BIT(0)
110 
111 #define INTR_BASE_BIT_SHIFT			0x02
112 #define INTR_COUNT				0x07
113 
114 #define NUM_CHIPSELECT				4
115 #define QSPI_SPBR_MIN				8U
116 #define QSPI_SPBR_MAX				255U
117 
118 #define OPCODE_DIOR				0xBB
119 #define OPCODE_QIOR				0xEB
120 #define OPCODE_DIOR_4B				0xBC
121 #define OPCODE_QIOR_4B				0xEC
122 
123 #define MAX_CMD_SIZE				6
124 
125 #define ADDR_4MB_MASK				GENMASK(22, 0)
126 
127 /* stop at end of transfer, no other reason */
128 #define TRANS_STATUS_BREAK_NONE		0
129 /* stop at end of spi_message */
130 #define TRANS_STATUS_BREAK_EOM			1
131 /* stop at end of spi_transfer if delay */
132 #define TRANS_STATUS_BREAK_DELAY		2
133 /* stop at end of spi_transfer if cs_change */
134 #define TRANS_STATUS_BREAK_CS_CHANGE		4
135 /* stop if we run out of bytes */
136 #define TRANS_STATUS_BREAK_NO_BYTES		8
137 
138 /* events that make us stop filling TX slots */
139 #define TRANS_STATUS_BREAK_TX (TRANS_STATUS_BREAK_EOM |		\
140 			       TRANS_STATUS_BREAK_DELAY |		\
141 			       TRANS_STATUS_BREAK_CS_CHANGE)
142 
143 /* events that make us deassert CS */
144 #define TRANS_STATUS_BREAK_DESELECT (TRANS_STATUS_BREAK_EOM |		\
145 				     TRANS_STATUS_BREAK_CS_CHANGE)
146 
147 struct bcm_qspi_parms {
148 	u32 speed_hz;
149 	u8 mode;
150 	u8 bits_per_word;
151 };
152 
153 struct bcm_xfer_mode {
154 	bool flex_mode;
155 	unsigned int width;
156 	unsigned int addrlen;
157 	unsigned int hp;
158 };
159 
160 enum base_type {
161 	MSPI,
162 	BSPI,
163 	CHIP_SELECT,
164 	BASEMAX,
165 };
166 
167 enum irq_source {
168 	SINGLE_L2,
169 	MUXED_L1,
170 };
171 
172 struct bcm_qspi_irq {
173 	const char *irq_name;
174 	const irq_handler_t irq_handler;
175 	int irq_source;
176 	u32 mask;
177 };
178 
179 struct bcm_qspi_dev_id {
180 	const struct bcm_qspi_irq *irqp;
181 	void *dev;
182 };
183 
184 
185 struct qspi_trans {
186 	struct spi_transfer *trans;
187 	int byte;
188 	bool mspi_last_trans;
189 };
190 
191 struct bcm_qspi {
192 	struct platform_device *pdev;
193 	struct spi_master *master;
194 	struct clk *clk;
195 	u32 base_clk;
196 	u32 max_speed_hz;
197 	void __iomem *base[BASEMAX];
198 
199 	/* Some SoCs provide custom interrupt status register(s) */
200 	struct bcm_qspi_soc_intc	*soc_intc;
201 
202 	struct bcm_qspi_parms last_parms;
203 	struct qspi_trans  trans_pos;
204 	int curr_cs;
205 	int bspi_maj_rev;
206 	int bspi_min_rev;
207 	int bspi_enabled;
208 	const struct spi_mem_op *bspi_rf_op;
209 	u32 bspi_rf_op_idx;
210 	u32 bspi_rf_op_len;
211 	u32 bspi_rf_op_status;
212 	struct bcm_xfer_mode xfer_mode;
213 	u32 s3_strap_override_ctrl;
214 	bool bspi_mode;
215 	bool big_endian;
216 	int num_irqs;
217 	struct bcm_qspi_dev_id *dev_ids;
218 	struct completion mspi_done;
219 	struct completion bspi_done;
220 };
221 
222 static inline bool has_bspi(struct bcm_qspi *qspi)
223 {
224 	return qspi->bspi_mode;
225 }
226 
227 /* Read qspi controller register*/
228 static inline u32 bcm_qspi_read(struct bcm_qspi *qspi, enum base_type type,
229 				unsigned int offset)
230 {
231 	return bcm_qspi_readl(qspi->big_endian, qspi->base[type] + offset);
232 }
233 
234 /* Write qspi controller register*/
235 static inline void bcm_qspi_write(struct bcm_qspi *qspi, enum base_type type,
236 				  unsigned int offset, unsigned int data)
237 {
238 	bcm_qspi_writel(qspi->big_endian, data, qspi->base[type] + offset);
239 }
240 
241 /* BSPI helpers */
242 static int bcm_qspi_bspi_busy_poll(struct bcm_qspi *qspi)
243 {
244 	int i;
245 
246 	/* this should normally finish within 10us */
247 	for (i = 0; i < 1000; i++) {
248 		if (!(bcm_qspi_read(qspi, BSPI, BSPI_BUSY_STATUS) & 1))
249 			return 0;
250 		udelay(1);
251 	}
252 	dev_warn(&qspi->pdev->dev, "timeout waiting for !busy_status\n");
253 	return -EIO;
254 }
255 
256 static inline bool bcm_qspi_bspi_ver_three(struct bcm_qspi *qspi)
257 {
258 	if (qspi->bspi_maj_rev < 4)
259 		return true;
260 	return false;
261 }
262 
263 static void bcm_qspi_bspi_flush_prefetch_buffers(struct bcm_qspi *qspi)
264 {
265 	bcm_qspi_bspi_busy_poll(qspi);
266 	/* Force rising edge for the b0/b1 'flush' field */
267 	bcm_qspi_write(qspi, BSPI, BSPI_B0_CTRL, 1);
268 	bcm_qspi_write(qspi, BSPI, BSPI_B1_CTRL, 1);
269 	bcm_qspi_write(qspi, BSPI, BSPI_B0_CTRL, 0);
270 	bcm_qspi_write(qspi, BSPI, BSPI_B1_CTRL, 0);
271 }
272 
273 static int bcm_qspi_bspi_lr_is_fifo_empty(struct bcm_qspi *qspi)
274 {
275 	return (bcm_qspi_read(qspi, BSPI, BSPI_RAF_STATUS) &
276 				BSPI_RAF_STATUS_FIFO_EMPTY_MASK);
277 }
278 
279 static inline u32 bcm_qspi_bspi_lr_read_fifo(struct bcm_qspi *qspi)
280 {
281 	u32 data = bcm_qspi_read(qspi, BSPI, BSPI_RAF_READ_DATA);
282 
283 	/* BSPI v3 LR is LE only, convert data to host endianness */
284 	if (bcm_qspi_bspi_ver_three(qspi))
285 		data = le32_to_cpu(data);
286 
287 	return data;
288 }
289 
290 static inline void bcm_qspi_bspi_lr_start(struct bcm_qspi *qspi)
291 {
292 	bcm_qspi_bspi_busy_poll(qspi);
293 	bcm_qspi_write(qspi, BSPI, BSPI_RAF_CTRL,
294 		       BSPI_RAF_CTRL_START_MASK);
295 }
296 
297 static inline void bcm_qspi_bspi_lr_clear(struct bcm_qspi *qspi)
298 {
299 	bcm_qspi_write(qspi, BSPI, BSPI_RAF_CTRL,
300 		       BSPI_RAF_CTRL_CLEAR_MASK);
301 	bcm_qspi_bspi_flush_prefetch_buffers(qspi);
302 }
303 
304 static void bcm_qspi_bspi_lr_data_read(struct bcm_qspi *qspi)
305 {
306 	u32 *buf = (u32 *)qspi->bspi_rf_op->data.buf.in;
307 	u32 data = 0;
308 
309 	dev_dbg(&qspi->pdev->dev, "xfer %p rx %p rxlen %d\n", qspi->bspi_rf_op,
310 		qspi->bspi_rf_op->data.buf.in, qspi->bspi_rf_op_len);
311 	while (!bcm_qspi_bspi_lr_is_fifo_empty(qspi)) {
312 		data = bcm_qspi_bspi_lr_read_fifo(qspi);
313 		if (likely(qspi->bspi_rf_op_len >= 4) &&
314 		    IS_ALIGNED((uintptr_t)buf, 4)) {
315 			buf[qspi->bspi_rf_op_idx++] = data;
316 			qspi->bspi_rf_op_len -= 4;
317 		} else {
318 			/* Read out remaining bytes, make sure*/
319 			u8 *cbuf = (u8 *)&buf[qspi->bspi_rf_op_idx];
320 
321 			data = cpu_to_le32(data);
322 			while (qspi->bspi_rf_op_len) {
323 				*cbuf++ = (u8)data;
324 				data >>= 8;
325 				qspi->bspi_rf_op_len--;
326 			}
327 		}
328 	}
329 }
330 
331 static void bcm_qspi_bspi_set_xfer_params(struct bcm_qspi *qspi, u8 cmd_byte,
332 					  int bpp, int bpc, int flex_mode)
333 {
334 	bcm_qspi_write(qspi, BSPI, BSPI_FLEX_MODE_ENABLE, 0);
335 	bcm_qspi_write(qspi, BSPI, BSPI_BITS_PER_CYCLE, bpc);
336 	bcm_qspi_write(qspi, BSPI, BSPI_BITS_PER_PHASE, bpp);
337 	bcm_qspi_write(qspi, BSPI, BSPI_CMD_AND_MODE_BYTE, cmd_byte);
338 	bcm_qspi_write(qspi, BSPI, BSPI_FLEX_MODE_ENABLE, flex_mode);
339 }
340 
341 static int bcm_qspi_bspi_set_flex_mode(struct bcm_qspi *qspi,
342 				       const struct spi_mem_op *op, int hp)
343 {
344 	int bpc = 0, bpp = 0;
345 	u8 command = op->cmd.opcode;
346 	int width = op->data.buswidth ? op->data.buswidth : SPI_NBITS_SINGLE;
347 	int addrlen = op->addr.nbytes;
348 	int flex_mode = 1;
349 
350 	dev_dbg(&qspi->pdev->dev, "set flex mode w %x addrlen %x hp %d\n",
351 		width, addrlen, hp);
352 
353 	if (addrlen == BSPI_ADDRLEN_4BYTES)
354 		bpp = BSPI_BPP_ADDR_SELECT_MASK;
355 
356 	bpp |= (op->dummy.nbytes * 8) / op->dummy.buswidth;
357 
358 	switch (width) {
359 	case SPI_NBITS_SINGLE:
360 		if (addrlen == BSPI_ADDRLEN_3BYTES)
361 			/* default mode, does not need flex_cmd */
362 			flex_mode = 0;
363 		break;
364 	case SPI_NBITS_DUAL:
365 		bpc = 0x00000001;
366 		if (hp) {
367 			bpc |= 0x00010100; /* address and mode are 2-bit */
368 			bpp = BSPI_BPP_MODE_SELECT_MASK;
369 		}
370 		break;
371 	case SPI_NBITS_QUAD:
372 		bpc = 0x00000002;
373 		if (hp) {
374 			bpc |= 0x00020200; /* address and mode are 4-bit */
375 			bpp |= BSPI_BPP_MODE_SELECT_MASK;
376 		}
377 		break;
378 	default:
379 		return -EINVAL;
380 	}
381 
382 	bcm_qspi_bspi_set_xfer_params(qspi, command, bpp, bpc, flex_mode);
383 
384 	return 0;
385 }
386 
387 static int bcm_qspi_bspi_set_override(struct bcm_qspi *qspi,
388 				      const struct spi_mem_op *op, int hp)
389 {
390 	int width = op->data.buswidth ? op->data.buswidth : SPI_NBITS_SINGLE;
391 	int addrlen = op->addr.nbytes;
392 	u32 data = bcm_qspi_read(qspi, BSPI, BSPI_STRAP_OVERRIDE_CTRL);
393 
394 	dev_dbg(&qspi->pdev->dev, "set override mode w %x addrlen %x hp %d\n",
395 		width, addrlen, hp);
396 
397 	switch (width) {
398 	case SPI_NBITS_SINGLE:
399 		/* clear quad/dual mode */
400 		data &= ~(BSPI_STRAP_OVERRIDE_CTRL_DATA_QUAD |
401 			  BSPI_STRAP_OVERRIDE_CTRL_DATA_DUAL);
402 		break;
403 	case SPI_NBITS_QUAD:
404 		/* clear dual mode and set quad mode */
405 		data &= ~BSPI_STRAP_OVERRIDE_CTRL_DATA_DUAL;
406 		data |= BSPI_STRAP_OVERRIDE_CTRL_DATA_QUAD;
407 		break;
408 	case SPI_NBITS_DUAL:
409 		/* clear quad mode set dual mode */
410 		data &= ~BSPI_STRAP_OVERRIDE_CTRL_DATA_QUAD;
411 		data |= BSPI_STRAP_OVERRIDE_CTRL_DATA_DUAL;
412 		break;
413 	default:
414 		return -EINVAL;
415 	}
416 
417 	if (addrlen == BSPI_ADDRLEN_4BYTES)
418 		/* set 4byte mode*/
419 		data |= BSPI_STRAP_OVERRIDE_CTRL_ADDR_4BYTE;
420 	else
421 		/* clear 4 byte mode */
422 		data &= ~BSPI_STRAP_OVERRIDE_CTRL_ADDR_4BYTE;
423 
424 	/* set the override mode */
425 	data |=	BSPI_STRAP_OVERRIDE_CTRL_OVERRIDE;
426 	bcm_qspi_write(qspi, BSPI, BSPI_STRAP_OVERRIDE_CTRL, data);
427 	bcm_qspi_bspi_set_xfer_params(qspi, op->cmd.opcode, 0, 0, 0);
428 
429 	return 0;
430 }
431 
432 static int bcm_qspi_bspi_set_mode(struct bcm_qspi *qspi,
433 				  const struct spi_mem_op *op, int hp)
434 {
435 	int error = 0;
436 	int width = op->data.buswidth ? op->data.buswidth : SPI_NBITS_SINGLE;
437 	int addrlen = op->addr.nbytes;
438 
439 	/* default mode */
440 	qspi->xfer_mode.flex_mode = true;
441 
442 	if (!bcm_qspi_bspi_ver_three(qspi)) {
443 		u32 val, mask;
444 
445 		val = bcm_qspi_read(qspi, BSPI, BSPI_STRAP_OVERRIDE_CTRL);
446 		mask = BSPI_STRAP_OVERRIDE_CTRL_OVERRIDE;
447 		if (val & mask || qspi->s3_strap_override_ctrl & mask) {
448 			qspi->xfer_mode.flex_mode = false;
449 			bcm_qspi_write(qspi, BSPI, BSPI_FLEX_MODE_ENABLE, 0);
450 			error = bcm_qspi_bspi_set_override(qspi, op, hp);
451 		}
452 	}
453 
454 	if (qspi->xfer_mode.flex_mode)
455 		error = bcm_qspi_bspi_set_flex_mode(qspi, op, hp);
456 
457 	if (error) {
458 		dev_warn(&qspi->pdev->dev,
459 			 "INVALID COMBINATION: width=%d addrlen=%d hp=%d\n",
460 			 width, addrlen, hp);
461 	} else if (qspi->xfer_mode.width != width ||
462 		   qspi->xfer_mode.addrlen != addrlen ||
463 		   qspi->xfer_mode.hp != hp) {
464 		qspi->xfer_mode.width = width;
465 		qspi->xfer_mode.addrlen = addrlen;
466 		qspi->xfer_mode.hp = hp;
467 		dev_dbg(&qspi->pdev->dev,
468 			"cs:%d %d-lane output, %d-byte address%s\n",
469 			qspi->curr_cs,
470 			qspi->xfer_mode.width,
471 			qspi->xfer_mode.addrlen,
472 			qspi->xfer_mode.hp != -1 ? ", hp mode" : "");
473 	}
474 
475 	return error;
476 }
477 
478 static void bcm_qspi_enable_bspi(struct bcm_qspi *qspi)
479 {
480 	if (!has_bspi(qspi))
481 		return;
482 
483 	qspi->bspi_enabled = 1;
484 	if ((bcm_qspi_read(qspi, BSPI, BSPI_MAST_N_BOOT_CTRL) & 1) == 0)
485 		return;
486 
487 	bcm_qspi_bspi_flush_prefetch_buffers(qspi);
488 	udelay(1);
489 	bcm_qspi_write(qspi, BSPI, BSPI_MAST_N_BOOT_CTRL, 0);
490 	udelay(1);
491 }
492 
493 static void bcm_qspi_disable_bspi(struct bcm_qspi *qspi)
494 {
495 	if (!has_bspi(qspi))
496 		return;
497 
498 	qspi->bspi_enabled = 0;
499 	if ((bcm_qspi_read(qspi, BSPI, BSPI_MAST_N_BOOT_CTRL) & 1))
500 		return;
501 
502 	bcm_qspi_bspi_busy_poll(qspi);
503 	bcm_qspi_write(qspi, BSPI, BSPI_MAST_N_BOOT_CTRL, 1);
504 	udelay(1);
505 }
506 
507 static void bcm_qspi_chip_select(struct bcm_qspi *qspi, int cs)
508 {
509 	u32 rd = 0;
510 	u32 wr = 0;
511 
512 	if (qspi->base[CHIP_SELECT]) {
513 		rd = bcm_qspi_read(qspi, CHIP_SELECT, 0);
514 		wr = (rd & ~0xff) | (1 << cs);
515 		if (rd == wr)
516 			return;
517 		bcm_qspi_write(qspi, CHIP_SELECT, 0, wr);
518 		usleep_range(10, 20);
519 	}
520 
521 	dev_dbg(&qspi->pdev->dev, "using cs:%d\n", cs);
522 	qspi->curr_cs = cs;
523 }
524 
525 /* MSPI helpers */
526 static void bcm_qspi_hw_set_parms(struct bcm_qspi *qspi,
527 				  const struct bcm_qspi_parms *xp)
528 {
529 	u32 spcr, spbr = 0;
530 
531 	if (xp->speed_hz)
532 		spbr = qspi->base_clk / (2 * xp->speed_hz);
533 
534 	spcr = clamp_val(spbr, QSPI_SPBR_MIN, QSPI_SPBR_MAX);
535 	bcm_qspi_write(qspi, MSPI, MSPI_SPCR0_LSB, spcr);
536 
537 	spcr = MSPI_MASTER_BIT;
538 	/* for 16 bit the data should be zero */
539 	if (xp->bits_per_word != 16)
540 		spcr |= xp->bits_per_word << 2;
541 	spcr |= xp->mode & 3;
542 	bcm_qspi_write(qspi, MSPI, MSPI_SPCR0_MSB, spcr);
543 
544 	qspi->last_parms = *xp;
545 }
546 
547 static void bcm_qspi_update_parms(struct bcm_qspi *qspi,
548 				  struct spi_device *spi,
549 				  struct spi_transfer *trans)
550 {
551 	struct bcm_qspi_parms xp;
552 
553 	xp.speed_hz = trans->speed_hz;
554 	xp.bits_per_word = trans->bits_per_word;
555 	xp.mode = spi->mode;
556 
557 	bcm_qspi_hw_set_parms(qspi, &xp);
558 }
559 
560 static int bcm_qspi_setup(struct spi_device *spi)
561 {
562 	struct bcm_qspi_parms *xp;
563 
564 	if (spi->bits_per_word > 16)
565 		return -EINVAL;
566 
567 	xp = spi_get_ctldata(spi);
568 	if (!xp) {
569 		xp = kzalloc(sizeof(*xp), GFP_KERNEL);
570 		if (!xp)
571 			return -ENOMEM;
572 		spi_set_ctldata(spi, xp);
573 	}
574 	xp->speed_hz = spi->max_speed_hz;
575 	xp->mode = spi->mode;
576 
577 	if (spi->bits_per_word)
578 		xp->bits_per_word = spi->bits_per_word;
579 	else
580 		xp->bits_per_word = 8;
581 
582 	return 0;
583 }
584 
585 static bool bcm_qspi_mspi_transfer_is_last(struct bcm_qspi *qspi,
586 					   struct qspi_trans *qt)
587 {
588 	if (qt->mspi_last_trans &&
589 	    spi_transfer_is_last(qspi->master, qt->trans))
590 		return true;
591 	else
592 		return false;
593 }
594 
595 static int update_qspi_trans_byte_count(struct bcm_qspi *qspi,
596 					struct qspi_trans *qt, int flags)
597 {
598 	int ret = TRANS_STATUS_BREAK_NONE;
599 
600 	/* count the last transferred bytes */
601 	if (qt->trans->bits_per_word <= 8)
602 		qt->byte++;
603 	else
604 		qt->byte += 2;
605 
606 	if (qt->byte >= qt->trans->len) {
607 		/* we're at the end of the spi_transfer */
608 		/* in TX mode, need to pause for a delay or CS change */
609 		if (qt->trans->delay_usecs &&
610 		    (flags & TRANS_STATUS_BREAK_DELAY))
611 			ret |= TRANS_STATUS_BREAK_DELAY;
612 		if (qt->trans->cs_change &&
613 		    (flags & TRANS_STATUS_BREAK_CS_CHANGE))
614 			ret |= TRANS_STATUS_BREAK_CS_CHANGE;
615 		if (ret)
616 			goto done;
617 
618 		dev_dbg(&qspi->pdev->dev, "advance msg exit\n");
619 		if (bcm_qspi_mspi_transfer_is_last(qspi, qt))
620 			ret = TRANS_STATUS_BREAK_EOM;
621 		else
622 			ret = TRANS_STATUS_BREAK_NO_BYTES;
623 
624 		qt->trans = NULL;
625 	}
626 
627 done:
628 	dev_dbg(&qspi->pdev->dev, "trans %p len %d byte %d ret %x\n",
629 		qt->trans, qt->trans ? qt->trans->len : 0, qt->byte, ret);
630 	return ret;
631 }
632 
633 static inline u8 read_rxram_slot_u8(struct bcm_qspi *qspi, int slot)
634 {
635 	u32 slot_offset = MSPI_RXRAM + (slot << 3) + 0x4;
636 
637 	/* mask out reserved bits */
638 	return bcm_qspi_read(qspi, MSPI, slot_offset) & 0xff;
639 }
640 
641 static inline u16 read_rxram_slot_u16(struct bcm_qspi *qspi, int slot)
642 {
643 	u32 reg_offset = MSPI_RXRAM;
644 	u32 lsb_offset = reg_offset + (slot << 3) + 0x4;
645 	u32 msb_offset = reg_offset + (slot << 3);
646 
647 	return (bcm_qspi_read(qspi, MSPI, lsb_offset) & 0xff) |
648 		((bcm_qspi_read(qspi, MSPI, msb_offset) & 0xff) << 8);
649 }
650 
651 static void read_from_hw(struct bcm_qspi *qspi, int slots)
652 {
653 	struct qspi_trans tp;
654 	int slot;
655 
656 	bcm_qspi_disable_bspi(qspi);
657 
658 	if (slots > MSPI_NUM_CDRAM) {
659 		/* should never happen */
660 		dev_err(&qspi->pdev->dev, "%s: too many slots!\n", __func__);
661 		return;
662 	}
663 
664 	tp = qspi->trans_pos;
665 
666 	for (slot = 0; slot < slots; slot++) {
667 		if (tp.trans->bits_per_word <= 8) {
668 			u8 *buf = tp.trans->rx_buf;
669 
670 			if (buf)
671 				buf[tp.byte] = read_rxram_slot_u8(qspi, slot);
672 			dev_dbg(&qspi->pdev->dev, "RD %02x\n",
673 				buf ? buf[tp.byte] : 0xff);
674 		} else {
675 			u16 *buf = tp.trans->rx_buf;
676 
677 			if (buf)
678 				buf[tp.byte / 2] = read_rxram_slot_u16(qspi,
679 								      slot);
680 			dev_dbg(&qspi->pdev->dev, "RD %04x\n",
681 				buf ? buf[tp.byte] : 0xffff);
682 		}
683 
684 		update_qspi_trans_byte_count(qspi, &tp,
685 					     TRANS_STATUS_BREAK_NONE);
686 	}
687 
688 	qspi->trans_pos = tp;
689 }
690 
691 static inline void write_txram_slot_u8(struct bcm_qspi *qspi, int slot,
692 				       u8 val)
693 {
694 	u32 reg_offset = MSPI_TXRAM + (slot << 3);
695 
696 	/* mask out reserved bits */
697 	bcm_qspi_write(qspi, MSPI, reg_offset, val);
698 }
699 
700 static inline void write_txram_slot_u16(struct bcm_qspi *qspi, int slot,
701 					u16 val)
702 {
703 	u32 reg_offset = MSPI_TXRAM;
704 	u32 msb_offset = reg_offset + (slot << 3);
705 	u32 lsb_offset = reg_offset + (slot << 3) + 0x4;
706 
707 	bcm_qspi_write(qspi, MSPI, msb_offset, (val >> 8));
708 	bcm_qspi_write(qspi, MSPI, lsb_offset, (val & 0xff));
709 }
710 
711 static inline u32 read_cdram_slot(struct bcm_qspi *qspi, int slot)
712 {
713 	return bcm_qspi_read(qspi, MSPI, MSPI_CDRAM + (slot << 2));
714 }
715 
716 static inline void write_cdram_slot(struct bcm_qspi *qspi, int slot, u32 val)
717 {
718 	bcm_qspi_write(qspi, MSPI, (MSPI_CDRAM + (slot << 2)), val);
719 }
720 
721 /* Return number of slots written */
722 static int write_to_hw(struct bcm_qspi *qspi, struct spi_device *spi)
723 {
724 	struct qspi_trans tp;
725 	int slot = 0, tstatus = 0;
726 	u32 mspi_cdram = 0;
727 
728 	bcm_qspi_disable_bspi(qspi);
729 	tp = qspi->trans_pos;
730 	bcm_qspi_update_parms(qspi, spi, tp.trans);
731 
732 	/* Run until end of transfer or reached the max data */
733 	while (!tstatus && slot < MSPI_NUM_CDRAM) {
734 		if (tp.trans->bits_per_word <= 8) {
735 			const u8 *buf = tp.trans->tx_buf;
736 			u8 val = buf ? buf[tp.byte] : 0xff;
737 
738 			write_txram_slot_u8(qspi, slot, val);
739 			dev_dbg(&qspi->pdev->dev, "WR %02x\n", val);
740 		} else {
741 			const u16 *buf = tp.trans->tx_buf;
742 			u16 val = buf ? buf[tp.byte / 2] : 0xffff;
743 
744 			write_txram_slot_u16(qspi, slot, val);
745 			dev_dbg(&qspi->pdev->dev, "WR %04x\n", val);
746 		}
747 		mspi_cdram = MSPI_CDRAM_CONT_BIT;
748 
749 		if (has_bspi(qspi))
750 			mspi_cdram &= ~1;
751 		else
752 			mspi_cdram |= (~(1 << spi->chip_select) &
753 				       MSPI_CDRAM_PCS);
754 
755 		mspi_cdram |= ((tp.trans->bits_per_word <= 8) ? 0 :
756 				MSPI_CDRAM_BITSE_BIT);
757 
758 		write_cdram_slot(qspi, slot, mspi_cdram);
759 
760 		tstatus = update_qspi_trans_byte_count(qspi, &tp,
761 						       TRANS_STATUS_BREAK_TX);
762 		slot++;
763 	}
764 
765 	if (!slot) {
766 		dev_err(&qspi->pdev->dev, "%s: no data to send?", __func__);
767 		goto done;
768 	}
769 
770 	dev_dbg(&qspi->pdev->dev, "submitting %d slots\n", slot);
771 	bcm_qspi_write(qspi, MSPI, MSPI_NEWQP, 0);
772 	bcm_qspi_write(qspi, MSPI, MSPI_ENDQP, slot - 1);
773 
774 	if (tstatus & TRANS_STATUS_BREAK_DESELECT) {
775 		mspi_cdram = read_cdram_slot(qspi, slot - 1) &
776 			~MSPI_CDRAM_CONT_BIT;
777 		write_cdram_slot(qspi, slot - 1, mspi_cdram);
778 	}
779 
780 	if (has_bspi(qspi))
781 		bcm_qspi_write(qspi, MSPI, MSPI_WRITE_LOCK, 1);
782 
783 	/* Must flush previous writes before starting MSPI operation */
784 	mb();
785 	/* Set cont | spe | spifie */
786 	bcm_qspi_write(qspi, MSPI, MSPI_SPCR2, 0xe0);
787 
788 done:
789 	return slot;
790 }
791 
792 static int bcm_qspi_bspi_exec_mem_op(struct spi_device *spi,
793 				     const struct spi_mem_op *op)
794 {
795 	struct bcm_qspi *qspi = spi_master_get_devdata(spi->master);
796 	u32 addr = 0, len, rdlen, len_words, from = 0;
797 	int ret = 0;
798 	unsigned long timeo = msecs_to_jiffies(100);
799 	struct bcm_qspi_soc_intc *soc_intc = qspi->soc_intc;
800 
801 	if (bcm_qspi_bspi_ver_three(qspi))
802 		if (op->addr.nbytes == BSPI_ADDRLEN_4BYTES)
803 			return -EIO;
804 
805 	from = op->addr.val;
806 	if (!spi->cs_gpiod)
807 		bcm_qspi_chip_select(qspi, spi->chip_select);
808 	bcm_qspi_write(qspi, MSPI, MSPI_WRITE_LOCK, 0);
809 
810 	/*
811 	 * when using flex mode we need to send
812 	 * the upper address byte to bspi
813 	 */
814 	if (bcm_qspi_bspi_ver_three(qspi) == false) {
815 		addr = from & 0xff000000;
816 		bcm_qspi_write(qspi, BSPI,
817 			       BSPI_BSPI_FLASH_UPPER_ADDR_BYTE, addr);
818 	}
819 
820 	if (!qspi->xfer_mode.flex_mode)
821 		addr = from;
822 	else
823 		addr = from & 0x00ffffff;
824 
825 	if (bcm_qspi_bspi_ver_three(qspi) == true)
826 		addr = (addr + 0xc00000) & 0xffffff;
827 
828 	/*
829 	 * read into the entire buffer by breaking the reads
830 	 * into RAF buffer read lengths
831 	 */
832 	len = op->data.nbytes;
833 	qspi->bspi_rf_op_idx = 0;
834 
835 	do {
836 		if (len > BSPI_READ_LENGTH)
837 			rdlen = BSPI_READ_LENGTH;
838 		else
839 			rdlen = len;
840 
841 		reinit_completion(&qspi->bspi_done);
842 		bcm_qspi_enable_bspi(qspi);
843 		len_words = (rdlen + 3) >> 2;
844 		qspi->bspi_rf_op = op;
845 		qspi->bspi_rf_op_status = 0;
846 		qspi->bspi_rf_op_len = rdlen;
847 		dev_dbg(&qspi->pdev->dev,
848 			"bspi xfr addr 0x%x len 0x%x", addr, rdlen);
849 		bcm_qspi_write(qspi, BSPI, BSPI_RAF_START_ADDR, addr);
850 		bcm_qspi_write(qspi, BSPI, BSPI_RAF_NUM_WORDS, len_words);
851 		bcm_qspi_write(qspi, BSPI, BSPI_RAF_WATERMARK, 0);
852 		if (qspi->soc_intc) {
853 			/*
854 			 * clear soc MSPI and BSPI interrupts and enable
855 			 * BSPI interrupts.
856 			 */
857 			soc_intc->bcm_qspi_int_ack(soc_intc, MSPI_BSPI_DONE);
858 			soc_intc->bcm_qspi_int_set(soc_intc, BSPI_DONE, true);
859 		}
860 
861 		/* Must flush previous writes before starting BSPI operation */
862 		mb();
863 		bcm_qspi_bspi_lr_start(qspi);
864 		if (!wait_for_completion_timeout(&qspi->bspi_done, timeo)) {
865 			dev_err(&qspi->pdev->dev, "timeout waiting for BSPI\n");
866 			ret = -ETIMEDOUT;
867 			break;
868 		}
869 
870 		/* set msg return length */
871 		addr += rdlen;
872 		len -= rdlen;
873 	} while (len);
874 
875 	return ret;
876 }
877 
878 static int bcm_qspi_transfer_one(struct spi_master *master,
879 				 struct spi_device *spi,
880 				 struct spi_transfer *trans)
881 {
882 	struct bcm_qspi *qspi = spi_master_get_devdata(master);
883 	int slots;
884 	unsigned long timeo = msecs_to_jiffies(100);
885 
886 	if (!spi->cs_gpiod)
887 		bcm_qspi_chip_select(qspi, spi->chip_select);
888 	qspi->trans_pos.trans = trans;
889 	qspi->trans_pos.byte = 0;
890 
891 	while (qspi->trans_pos.byte < trans->len) {
892 		reinit_completion(&qspi->mspi_done);
893 
894 		slots = write_to_hw(qspi, spi);
895 		if (!wait_for_completion_timeout(&qspi->mspi_done, timeo)) {
896 			dev_err(&qspi->pdev->dev, "timeout waiting for MSPI\n");
897 			return -ETIMEDOUT;
898 		}
899 
900 		read_from_hw(qspi, slots);
901 	}
902 	bcm_qspi_enable_bspi(qspi);
903 
904 	return 0;
905 }
906 
907 static int bcm_qspi_mspi_exec_mem_op(struct spi_device *spi,
908 				     const struct spi_mem_op *op)
909 {
910 	struct spi_master *master = spi->master;
911 	struct bcm_qspi *qspi = spi_master_get_devdata(master);
912 	struct spi_transfer t[2];
913 	u8 cmd[6] = { };
914 	int ret, i;
915 
916 	memset(cmd, 0, sizeof(cmd));
917 	memset(t, 0, sizeof(t));
918 
919 	/* tx */
920 	/* opcode is in cmd[0] */
921 	cmd[0] = op->cmd.opcode;
922 	for (i = 0; i < op->addr.nbytes; i++)
923 		cmd[1 + i] = op->addr.val >> (8 * (op->addr.nbytes - i - 1));
924 
925 	t[0].tx_buf = cmd;
926 	t[0].len = op->addr.nbytes + op->dummy.nbytes + 1;
927 	t[0].bits_per_word = spi->bits_per_word;
928 	t[0].tx_nbits = op->cmd.buswidth;
929 	/* lets mspi know that this is not last transfer */
930 	qspi->trans_pos.mspi_last_trans = false;
931 	ret = bcm_qspi_transfer_one(master, spi, &t[0]);
932 
933 	/* rx */
934 	qspi->trans_pos.mspi_last_trans = true;
935 	if (!ret) {
936 		/* rx */
937 		t[1].rx_buf = op->data.buf.in;
938 		t[1].len = op->data.nbytes;
939 		t[1].rx_nbits =  op->data.buswidth;
940 		t[1].bits_per_word = spi->bits_per_word;
941 		ret = bcm_qspi_transfer_one(master, spi, &t[1]);
942 	}
943 
944 	return ret;
945 }
946 
947 static int bcm_qspi_exec_mem_op(struct spi_mem *mem,
948 				const struct spi_mem_op *op)
949 {
950 	struct spi_device *spi = mem->spi;
951 	struct bcm_qspi *qspi = spi_master_get_devdata(spi->master);
952 	int ret = 0;
953 	bool mspi_read = false;
954 	u32 addr = 0, len;
955 	u_char *buf;
956 
957 	if (!op->data.nbytes || !op->addr.nbytes || op->addr.nbytes > 4 ||
958 	    op->data.dir != SPI_MEM_DATA_IN)
959 		return -ENOTSUPP;
960 
961 	buf = op->data.buf.in;
962 	addr = op->addr.val;
963 	len = op->data.nbytes;
964 
965 	if (bcm_qspi_bspi_ver_three(qspi) == true) {
966 		/*
967 		 * The address coming into this function is a raw flash offset.
968 		 * But for BSPI <= V3, we need to convert it to a remapped BSPI
969 		 * address. If it crosses a 4MB boundary, just revert back to
970 		 * using MSPI.
971 		 */
972 		addr = (addr + 0xc00000) & 0xffffff;
973 
974 		if ((~ADDR_4MB_MASK & addr) ^
975 		    (~ADDR_4MB_MASK & (addr + len - 1)))
976 			mspi_read = true;
977 	}
978 
979 	/* non-aligned and very short transfers are handled by MSPI */
980 	if (!IS_ALIGNED((uintptr_t)addr, 4) || !IS_ALIGNED((uintptr_t)buf, 4) ||
981 	    len < 4)
982 		mspi_read = true;
983 
984 	if (mspi_read)
985 		return bcm_qspi_mspi_exec_mem_op(spi, op);
986 
987 	ret = bcm_qspi_bspi_set_mode(qspi, op, 0);
988 
989 	if (!ret)
990 		ret = bcm_qspi_bspi_exec_mem_op(spi, op);
991 
992 	return ret;
993 }
994 
995 static void bcm_qspi_cleanup(struct spi_device *spi)
996 {
997 	struct bcm_qspi_parms *xp = spi_get_ctldata(spi);
998 
999 	kfree(xp);
1000 }
1001 
1002 static irqreturn_t bcm_qspi_mspi_l2_isr(int irq, void *dev_id)
1003 {
1004 	struct bcm_qspi_dev_id *qspi_dev_id = dev_id;
1005 	struct bcm_qspi *qspi = qspi_dev_id->dev;
1006 	u32 status = bcm_qspi_read(qspi, MSPI, MSPI_MSPI_STATUS);
1007 
1008 	if (status & MSPI_MSPI_STATUS_SPIF) {
1009 		struct bcm_qspi_soc_intc *soc_intc = qspi->soc_intc;
1010 		/* clear interrupt */
1011 		status &= ~MSPI_MSPI_STATUS_SPIF;
1012 		bcm_qspi_write(qspi, MSPI, MSPI_MSPI_STATUS, status);
1013 		if (qspi->soc_intc)
1014 			soc_intc->bcm_qspi_int_ack(soc_intc, MSPI_DONE);
1015 		complete(&qspi->mspi_done);
1016 		return IRQ_HANDLED;
1017 	}
1018 
1019 	return IRQ_NONE;
1020 }
1021 
1022 static irqreturn_t bcm_qspi_bspi_lr_l2_isr(int irq, void *dev_id)
1023 {
1024 	struct bcm_qspi_dev_id *qspi_dev_id = dev_id;
1025 	struct bcm_qspi *qspi = qspi_dev_id->dev;
1026 	struct bcm_qspi_soc_intc *soc_intc = qspi->soc_intc;
1027 	u32 status = qspi_dev_id->irqp->mask;
1028 
1029 	if (qspi->bspi_enabled && qspi->bspi_rf_op) {
1030 		bcm_qspi_bspi_lr_data_read(qspi);
1031 		if (qspi->bspi_rf_op_len == 0) {
1032 			qspi->bspi_rf_op = NULL;
1033 			if (qspi->soc_intc) {
1034 				/* disable soc BSPI interrupt */
1035 				soc_intc->bcm_qspi_int_set(soc_intc, BSPI_DONE,
1036 							   false);
1037 				/* indicate done */
1038 				status = INTR_BSPI_LR_SESSION_DONE_MASK;
1039 			}
1040 
1041 			if (qspi->bspi_rf_op_status)
1042 				bcm_qspi_bspi_lr_clear(qspi);
1043 			else
1044 				bcm_qspi_bspi_flush_prefetch_buffers(qspi);
1045 		}
1046 
1047 		if (qspi->soc_intc)
1048 			/* clear soc BSPI interrupt */
1049 			soc_intc->bcm_qspi_int_ack(soc_intc, BSPI_DONE);
1050 	}
1051 
1052 	status &= INTR_BSPI_LR_SESSION_DONE_MASK;
1053 	if (qspi->bspi_enabled && status && qspi->bspi_rf_op_len == 0)
1054 		complete(&qspi->bspi_done);
1055 
1056 	return IRQ_HANDLED;
1057 }
1058 
1059 static irqreturn_t bcm_qspi_bspi_lr_err_l2_isr(int irq, void *dev_id)
1060 {
1061 	struct bcm_qspi_dev_id *qspi_dev_id = dev_id;
1062 	struct bcm_qspi *qspi = qspi_dev_id->dev;
1063 	struct bcm_qspi_soc_intc *soc_intc = qspi->soc_intc;
1064 
1065 	dev_err(&qspi->pdev->dev, "BSPI INT error\n");
1066 	qspi->bspi_rf_op_status = -EIO;
1067 	if (qspi->soc_intc)
1068 		/* clear soc interrupt */
1069 		soc_intc->bcm_qspi_int_ack(soc_intc, BSPI_ERR);
1070 
1071 	complete(&qspi->bspi_done);
1072 	return IRQ_HANDLED;
1073 }
1074 
1075 static irqreturn_t bcm_qspi_l1_isr(int irq, void *dev_id)
1076 {
1077 	struct bcm_qspi_dev_id *qspi_dev_id = dev_id;
1078 	struct bcm_qspi *qspi = qspi_dev_id->dev;
1079 	struct bcm_qspi_soc_intc *soc_intc = qspi->soc_intc;
1080 	irqreturn_t ret = IRQ_NONE;
1081 
1082 	if (soc_intc) {
1083 		u32 status = soc_intc->bcm_qspi_get_int_status(soc_intc);
1084 
1085 		if (status & MSPI_DONE)
1086 			ret = bcm_qspi_mspi_l2_isr(irq, dev_id);
1087 		else if (status & BSPI_DONE)
1088 			ret = bcm_qspi_bspi_lr_l2_isr(irq, dev_id);
1089 		else if (status & BSPI_ERR)
1090 			ret = bcm_qspi_bspi_lr_err_l2_isr(irq, dev_id);
1091 	}
1092 
1093 	return ret;
1094 }
1095 
1096 static const struct bcm_qspi_irq qspi_irq_tab[] = {
1097 	{
1098 		.irq_name = "spi_lr_fullness_reached",
1099 		.irq_handler = bcm_qspi_bspi_lr_l2_isr,
1100 		.mask = INTR_BSPI_LR_FULLNESS_REACHED_MASK,
1101 	},
1102 	{
1103 		.irq_name = "spi_lr_session_aborted",
1104 		.irq_handler = bcm_qspi_bspi_lr_err_l2_isr,
1105 		.mask = INTR_BSPI_LR_SESSION_ABORTED_MASK,
1106 	},
1107 	{
1108 		.irq_name = "spi_lr_impatient",
1109 		.irq_handler = bcm_qspi_bspi_lr_err_l2_isr,
1110 		.mask = INTR_BSPI_LR_IMPATIENT_MASK,
1111 	},
1112 	{
1113 		.irq_name = "spi_lr_session_done",
1114 		.irq_handler = bcm_qspi_bspi_lr_l2_isr,
1115 		.mask = INTR_BSPI_LR_SESSION_DONE_MASK,
1116 	},
1117 #ifdef QSPI_INT_DEBUG
1118 	/* this interrupt is for debug purposes only, dont request irq */
1119 	{
1120 		.irq_name = "spi_lr_overread",
1121 		.irq_handler = bcm_qspi_bspi_lr_err_l2_isr,
1122 		.mask = INTR_BSPI_LR_OVERREAD_MASK,
1123 	},
1124 #endif
1125 	{
1126 		.irq_name = "mspi_done",
1127 		.irq_handler = bcm_qspi_mspi_l2_isr,
1128 		.mask = INTR_MSPI_DONE_MASK,
1129 	},
1130 	{
1131 		.irq_name = "mspi_halted",
1132 		.irq_handler = bcm_qspi_mspi_l2_isr,
1133 		.mask = INTR_MSPI_HALTED_MASK,
1134 	},
1135 	{
1136 		/* single muxed L1 interrupt source */
1137 		.irq_name = "spi_l1_intr",
1138 		.irq_handler = bcm_qspi_l1_isr,
1139 		.irq_source = MUXED_L1,
1140 		.mask = QSPI_INTERRUPTS_ALL,
1141 	},
1142 };
1143 
1144 static void bcm_qspi_bspi_init(struct bcm_qspi *qspi)
1145 {
1146 	u32 val = 0;
1147 
1148 	val = bcm_qspi_read(qspi, BSPI, BSPI_REVISION_ID);
1149 	qspi->bspi_maj_rev = (val >> 8) & 0xff;
1150 	qspi->bspi_min_rev = val & 0xff;
1151 	if (!(bcm_qspi_bspi_ver_three(qspi))) {
1152 		/* Force mapping of BSPI address -> flash offset */
1153 		bcm_qspi_write(qspi, BSPI, BSPI_BSPI_XOR_VALUE, 0);
1154 		bcm_qspi_write(qspi, BSPI, BSPI_BSPI_XOR_ENABLE, 1);
1155 	}
1156 	qspi->bspi_enabled = 1;
1157 	bcm_qspi_disable_bspi(qspi);
1158 	bcm_qspi_write(qspi, BSPI, BSPI_B0_CTRL, 0);
1159 	bcm_qspi_write(qspi, BSPI, BSPI_B1_CTRL, 0);
1160 }
1161 
1162 static void bcm_qspi_hw_init(struct bcm_qspi *qspi)
1163 {
1164 	struct bcm_qspi_parms parms;
1165 
1166 	bcm_qspi_write(qspi, MSPI, MSPI_SPCR1_LSB, 0);
1167 	bcm_qspi_write(qspi, MSPI, MSPI_SPCR1_MSB, 0);
1168 	bcm_qspi_write(qspi, MSPI, MSPI_NEWQP, 0);
1169 	bcm_qspi_write(qspi, MSPI, MSPI_ENDQP, 0);
1170 	bcm_qspi_write(qspi, MSPI, MSPI_SPCR2, 0x20);
1171 
1172 	parms.mode = SPI_MODE_3;
1173 	parms.bits_per_word = 8;
1174 	parms.speed_hz = qspi->max_speed_hz;
1175 	bcm_qspi_hw_set_parms(qspi, &parms);
1176 
1177 	if (has_bspi(qspi))
1178 		bcm_qspi_bspi_init(qspi);
1179 }
1180 
1181 static void bcm_qspi_hw_uninit(struct bcm_qspi *qspi)
1182 {
1183 	bcm_qspi_write(qspi, MSPI, MSPI_SPCR2, 0);
1184 	if (has_bspi(qspi))
1185 		bcm_qspi_write(qspi, MSPI, MSPI_WRITE_LOCK, 0);
1186 
1187 }
1188 
1189 static const struct spi_controller_mem_ops bcm_qspi_mem_ops = {
1190 	.exec_op = bcm_qspi_exec_mem_op,
1191 };
1192 
1193 static const struct of_device_id bcm_qspi_of_match[] = {
1194 	{ .compatible = "brcm,spi-bcm-qspi" },
1195 	{},
1196 };
1197 MODULE_DEVICE_TABLE(of, bcm_qspi_of_match);
1198 
1199 int bcm_qspi_probe(struct platform_device *pdev,
1200 		   struct bcm_qspi_soc_intc *soc_intc)
1201 {
1202 	struct device *dev = &pdev->dev;
1203 	struct bcm_qspi *qspi;
1204 	struct spi_master *master;
1205 	struct resource *res;
1206 	int irq, ret = 0, num_ints = 0;
1207 	u32 val;
1208 	const char *name = NULL;
1209 	int num_irqs = ARRAY_SIZE(qspi_irq_tab);
1210 
1211 	/* We only support device-tree instantiation */
1212 	if (!dev->of_node)
1213 		return -ENODEV;
1214 
1215 	if (!of_match_node(bcm_qspi_of_match, dev->of_node))
1216 		return -ENODEV;
1217 
1218 	master = spi_alloc_master(dev, sizeof(struct bcm_qspi));
1219 	if (!master) {
1220 		dev_err(dev, "error allocating spi_master\n");
1221 		return -ENOMEM;
1222 	}
1223 
1224 	qspi = spi_master_get_devdata(master);
1225 	qspi->pdev = pdev;
1226 	qspi->trans_pos.trans = NULL;
1227 	qspi->trans_pos.byte = 0;
1228 	qspi->trans_pos.mspi_last_trans = true;
1229 	qspi->master = master;
1230 
1231 	master->bus_num = -1;
1232 	master->mode_bits = SPI_CPHA | SPI_CPOL | SPI_RX_DUAL | SPI_RX_QUAD;
1233 	master->setup = bcm_qspi_setup;
1234 	master->transfer_one = bcm_qspi_transfer_one;
1235 	master->mem_ops = &bcm_qspi_mem_ops;
1236 	master->cleanup = bcm_qspi_cleanup;
1237 	master->dev.of_node = dev->of_node;
1238 	master->num_chipselect = NUM_CHIPSELECT;
1239 	master->use_gpio_descriptors = true;
1240 
1241 	qspi->big_endian = of_device_is_big_endian(dev->of_node);
1242 
1243 	if (!of_property_read_u32(dev->of_node, "num-cs", &val))
1244 		master->num_chipselect = val;
1245 
1246 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hif_mspi");
1247 	if (!res)
1248 		res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1249 						   "mspi");
1250 
1251 	if (res) {
1252 		qspi->base[MSPI]  = devm_ioremap_resource(dev, res);
1253 		if (IS_ERR(qspi->base[MSPI])) {
1254 			ret = PTR_ERR(qspi->base[MSPI]);
1255 			goto qspi_resource_err;
1256 		}
1257 	} else {
1258 		goto qspi_resource_err;
1259 	}
1260 
1261 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "bspi");
1262 	if (res) {
1263 		qspi->base[BSPI]  = devm_ioremap_resource(dev, res);
1264 		if (IS_ERR(qspi->base[BSPI])) {
1265 			ret = PTR_ERR(qspi->base[BSPI]);
1266 			goto qspi_resource_err;
1267 		}
1268 		qspi->bspi_mode = true;
1269 	} else {
1270 		qspi->bspi_mode = false;
1271 	}
1272 
1273 	dev_info(dev, "using %smspi mode\n", qspi->bspi_mode ? "bspi-" : "");
1274 
1275 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cs_reg");
1276 	if (res) {
1277 		qspi->base[CHIP_SELECT]  = devm_ioremap_resource(dev, res);
1278 		if (IS_ERR(qspi->base[CHIP_SELECT])) {
1279 			ret = PTR_ERR(qspi->base[CHIP_SELECT]);
1280 			goto qspi_resource_err;
1281 		}
1282 	}
1283 
1284 	qspi->dev_ids = kcalloc(num_irqs, sizeof(struct bcm_qspi_dev_id),
1285 				GFP_KERNEL);
1286 	if (!qspi->dev_ids) {
1287 		ret = -ENOMEM;
1288 		goto qspi_resource_err;
1289 	}
1290 
1291 	for (val = 0; val < num_irqs; val++) {
1292 		irq = -1;
1293 		name = qspi_irq_tab[val].irq_name;
1294 		if (qspi_irq_tab[val].irq_source == SINGLE_L2) {
1295 			/* get the l2 interrupts */
1296 			irq = platform_get_irq_byname(pdev, name);
1297 		} else if (!num_ints && soc_intc) {
1298 			/* all mspi, bspi intrs muxed to one L1 intr */
1299 			irq = platform_get_irq(pdev, 0);
1300 		}
1301 
1302 		if (irq  >= 0) {
1303 			ret = devm_request_irq(&pdev->dev, irq,
1304 					       qspi_irq_tab[val].irq_handler, 0,
1305 					       name,
1306 					       &qspi->dev_ids[val]);
1307 			if (ret < 0) {
1308 				dev_err(&pdev->dev, "IRQ %s not found\n", name);
1309 				goto qspi_probe_err;
1310 			}
1311 
1312 			qspi->dev_ids[val].dev = qspi;
1313 			qspi->dev_ids[val].irqp = &qspi_irq_tab[val];
1314 			num_ints++;
1315 			dev_dbg(&pdev->dev, "registered IRQ %s %d\n",
1316 				qspi_irq_tab[val].irq_name,
1317 				irq);
1318 		}
1319 	}
1320 
1321 	if (!num_ints) {
1322 		dev_err(&pdev->dev, "no IRQs registered, cannot init driver\n");
1323 		ret = -EINVAL;
1324 		goto qspi_probe_err;
1325 	}
1326 
1327 	/*
1328 	 * Some SoCs integrate spi controller (e.g., its interrupt bits)
1329 	 * in specific ways
1330 	 */
1331 	if (soc_intc) {
1332 		qspi->soc_intc = soc_intc;
1333 		soc_intc->bcm_qspi_int_set(soc_intc, MSPI_DONE, true);
1334 	} else {
1335 		qspi->soc_intc = NULL;
1336 	}
1337 
1338 	qspi->clk = devm_clk_get(&pdev->dev, NULL);
1339 	if (IS_ERR(qspi->clk)) {
1340 		dev_warn(dev, "unable to get clock\n");
1341 		ret = PTR_ERR(qspi->clk);
1342 		goto qspi_probe_err;
1343 	}
1344 
1345 	ret = clk_prepare_enable(qspi->clk);
1346 	if (ret) {
1347 		dev_err(dev, "failed to prepare clock\n");
1348 		goto qspi_probe_err;
1349 	}
1350 
1351 	qspi->base_clk = clk_get_rate(qspi->clk);
1352 	qspi->max_speed_hz = qspi->base_clk / (QSPI_SPBR_MIN * 2);
1353 
1354 	bcm_qspi_hw_init(qspi);
1355 	init_completion(&qspi->mspi_done);
1356 	init_completion(&qspi->bspi_done);
1357 	qspi->curr_cs = -1;
1358 
1359 	platform_set_drvdata(pdev, qspi);
1360 
1361 	qspi->xfer_mode.width = -1;
1362 	qspi->xfer_mode.addrlen = -1;
1363 	qspi->xfer_mode.hp = -1;
1364 
1365 	ret = devm_spi_register_master(&pdev->dev, master);
1366 	if (ret < 0) {
1367 		dev_err(dev, "can't register master\n");
1368 		goto qspi_reg_err;
1369 	}
1370 
1371 	return 0;
1372 
1373 qspi_reg_err:
1374 	bcm_qspi_hw_uninit(qspi);
1375 	clk_disable_unprepare(qspi->clk);
1376 qspi_probe_err:
1377 	kfree(qspi->dev_ids);
1378 qspi_resource_err:
1379 	spi_master_put(master);
1380 	return ret;
1381 }
1382 /* probe function to be called by SoC specific platform driver probe */
1383 EXPORT_SYMBOL_GPL(bcm_qspi_probe);
1384 
1385 int bcm_qspi_remove(struct platform_device *pdev)
1386 {
1387 	struct bcm_qspi *qspi = platform_get_drvdata(pdev);
1388 
1389 	bcm_qspi_hw_uninit(qspi);
1390 	clk_disable_unprepare(qspi->clk);
1391 	kfree(qspi->dev_ids);
1392 	spi_unregister_master(qspi->master);
1393 
1394 	return 0;
1395 }
1396 /* function to be called by SoC specific platform driver remove() */
1397 EXPORT_SYMBOL_GPL(bcm_qspi_remove);
1398 
1399 static int __maybe_unused bcm_qspi_suspend(struct device *dev)
1400 {
1401 	struct bcm_qspi *qspi = dev_get_drvdata(dev);
1402 
1403 	/* store the override strap value */
1404 	if (!bcm_qspi_bspi_ver_three(qspi))
1405 		qspi->s3_strap_override_ctrl =
1406 			bcm_qspi_read(qspi, BSPI, BSPI_STRAP_OVERRIDE_CTRL);
1407 
1408 	spi_master_suspend(qspi->master);
1409 	clk_disable(qspi->clk);
1410 	bcm_qspi_hw_uninit(qspi);
1411 
1412 	return 0;
1413 };
1414 
1415 static int __maybe_unused bcm_qspi_resume(struct device *dev)
1416 {
1417 	struct bcm_qspi *qspi = dev_get_drvdata(dev);
1418 	int ret = 0;
1419 
1420 	bcm_qspi_hw_init(qspi);
1421 	bcm_qspi_chip_select(qspi, qspi->curr_cs);
1422 	if (qspi->soc_intc)
1423 		/* enable MSPI interrupt */
1424 		qspi->soc_intc->bcm_qspi_int_set(qspi->soc_intc, MSPI_DONE,
1425 						 true);
1426 
1427 	ret = clk_enable(qspi->clk);
1428 	if (!ret)
1429 		spi_master_resume(qspi->master);
1430 
1431 	return ret;
1432 }
1433 
1434 SIMPLE_DEV_PM_OPS(bcm_qspi_pm_ops, bcm_qspi_suspend, bcm_qspi_resume);
1435 
1436 /* pm_ops to be called by SoC specific platform driver */
1437 EXPORT_SYMBOL_GPL(bcm_qspi_pm_ops);
1438 
1439 MODULE_AUTHOR("Kamal Dasu");
1440 MODULE_DESCRIPTION("Broadcom QSPI driver");
1441 MODULE_LICENSE("GPL v2");
1442 MODULE_ALIAS("platform:" DRIVER_NAME);
1443