xref: /linux/drivers/spi/spi-nxp-xspi.c (revision c17ee635fd3a482b2ad2bf5e269755c2eae5f25e)
1 // SPDX-License-Identifier: GPL-2.0+
2 
3 /*
4  * NXP xSPI controller driver.
5  *
6  * Copyright 2025 NXP
7  *
8  * xSPI is a flexible SPI host controller which supports single
9  * external devices. This device can have up to eight bidirectional
10  * data lines, this means xSPI support Single/Dual/Quad/Octal mode
11  * data transfer (1/2/4/8 bidirectional data lines).
12  *
13  * xSPI controller is driven by the LUT(Look-up Table) registers
14  * LUT registers are a look-up-table for sequences of instructions.
15  * A valid sequence consists of five LUT registers.
16  * Maximum 16 LUT sequences can be programmed simultaneously.
17  *
18  * LUTs are being created at run-time based on the commands passed
19  * from the spi-mem framework, thus using single LUT index.
20  *
21  * Software triggered Flash read/write access by IP Bus.
22  *
23  * Memory mapped read access by AHB Bus.
24  *
25  * Based on SPI MEM interface and spi-nxp-fspi.c driver.
26  *
27  * Author:
28  *     Haibo Chen <haibo.chen@nxp.com>
29  * Co-author:
30  *     Han Xu <han.xu@nxp.com>
31  */
32 
33 #include <linux/bitops.h>
34 #include <linux/bitfield.h>
35 #include <linux/clk.h>
36 #include <linux/completion.h>
37 #include <linux/delay.h>
38 #include <linux/err.h>
39 #include <linux/errno.h>
40 #include <linux/interrupt.h>
41 #include <linux/io.h>
42 #include <linux/iopoll.h>
43 #include <linux/jiffies.h>
44 #include <linux/kernel.h>
45 #include <linux/log2.h>
46 #include <linux/module.h>
47 #include <linux/mutex.h>
48 #include <linux/of.h>
49 #include <linux/platform_device.h>
50 #include <linux/pinctrl/consumer.h>
51 #include <linux/pm_runtime.h>
52 #include <linux/spi/spi.h>
53 #include <linux/spi/spi-mem.h>
54 
55 /* Runtime pm timeout */
56 #define XSPI_RPM_TIMEOUT_MS 50	/* 50ms */
57 /*
58  * The driver only uses one single LUT entry, that is updated on
59  * each call of exec_op(). Index 0 is preset at boot with a basic
60  * read operation, so let's use the last entry (15).
61  */
62 #define	XSPI_SEQID_LUT			15
63 
64 #define XSPI_MCR			0x0
65 #define XSPI_MCR_CKN_FA_EN		BIT(26)
66 #define XSPI_MCR_DQS_FA_SEL_MASK	GENMASK(25, 24)
67 #define XSPI_MCR_ISD3FA			BIT(17)
68 #define XSPI_MCR_ISD2FA			BIT(16)
69 #define XSPI_MCR_DOZE			BIT(15)
70 #define XSPI_MCR_MDIS			BIT(14)
71 #define XSPI_MCR_DLPEN			BIT(12)
72 #define XSPI_MCR_CLR_TXF		BIT(11)
73 #define XSPI_MCR_CLR_RXF		BIT(10)
74 #define XSPI_MCR_IPS_TG_RST		BIT(9)
75 #define XSPI_MCR_VAR_LAT_EN		BIT(8)
76 #define XSPI_MCR_DDR_EN			BIT(7)
77 #define XSPI_MCR_DQS_EN			BIT(6)
78 #define XSPI_MCR_DQS_LAT_EN		BIT(5)
79 #define XSPI_MCR_DQS_OUT_EN		BIT(4)
80 #define XSPI_MCR_SWRSTHD		BIT(1)
81 #define XSPI_MCR_SWRSTSD		BIT(0)
82 
83 #define XSPI_IPCR			0x8
84 
85 #define XSPI_FLSHCR			0xC
86 #define XSPI_FLSHCR_TDH_MASK		GENMASK(17, 16)
87 #define XSPI_FLSHCR_TCSH_MASK		GENMASK(11, 8)
88 #define XSPI_FLSHCR_TCSS_MASK		GENMASK(3, 0)
89 
90 #define XSPI_BUF0CR			0x10
91 #define XSPI_BUF1CR			0x14
92 #define XSPI_BUF2CR			0x18
93 #define XSPI_BUF3CR			0x1C
94 #define XSPI_BUF3CR_ALLMST		BIT(31)
95 #define XSPI_BUF3CR_ADATSZ_MASK		GENMASK(17, 8)
96 #define XSPI_BUF3CR_MSTRID_MASK		GENMASK(3, 0)
97 
98 #define XSPI_BFGENCR			0x20
99 #define XSPI_BFGENCR_SEQID_WR_MASK	GENMASK(31, 28)
100 #define XSPI_BFGENCR_ALIGN_MASK		GENMASK(24, 22)
101 #define XSPI_BFGENCR_PPWF_CLR		BIT(20)
102 #define XSPI_BFGENCR_WR_FLUSH_EN	BIT(21)
103 #define XSPI_BFGENCR_SEQID_WR_EN	BIT(17)
104 #define XSPI_BFGENCR_SEQID_MASK		GENMASK(15, 12)
105 
106 #define XSPI_BUF0IND			0x30
107 #define XSPI_BUF1IND			0x34
108 #define XSPI_BUF2IND			0x38
109 
110 #define XSPI_DLLCRA			0x60
111 #define XSPI_DLLCRA_DLLEN		BIT(31)
112 #define XSPI_DLLCRA_FREQEN		BIT(30)
113 #define XSPI_DLLCRA_DLL_REFCNTR_MASK	GENMASK(27, 24)
114 #define XSPI_DLLCRA_DLLRES_MASK		GENMASK(23, 20)
115 #define XSPI_DLLCRA_SLV_FINE_MASK	GENMASK(19, 16)
116 #define XSPI_DLLCRA_SLV_DLY_MASK	GENMASK(14, 12)
117 #define XSPI_DLLCRA_SLV_DLY_COARSE_MASK	GENMASK(11,  8)
118 #define XSPI_DLLCRA_SLV_DLY_FINE_MASK	GENMASK(7, 5)
119 #define XSPI_DLLCRA_DLL_CDL8		BIT(4)
120 #define XSPI_DLLCRA_SLAVE_AUTO_UPDT	BIT(3)
121 #define XSPI_DLLCRA_SLV_EN		BIT(2)
122 #define XSPI_DLLCRA_SLV_DLL_BYPASS	BIT(1)
123 #define XSPI_DLLCRA_SLV_UPD		BIT(0)
124 
125 #define XSPI_SFAR			0x100
126 
127 #define XSPI_SFACR			0x104
128 #define XSPI_SFACR_FORCE_A10		BIT(22)
129 #define XSPI_SFACR_WA_4B_EN		BIT(21)
130 #define XSPI_SFACR_CAS_INTRLVD		BIT(20)
131 #define XSPI_SFACR_RX_BP_EN		BIT(18)
132 #define XSPI_SFACR_BYTE_SWAP		BIT(17)
133 #define XSPI_SFACR_WA			BIT(16)
134 #define XSPI_SFACR_CAS_MASK		GENMASK(3, 0)
135 
136 #define XSPI_SMPR			0x108
137 #define XSPI_SMPR_DLLFSMPFA_MASK	GENMASK(26, 24)
138 #define XSPI_SMPR_FSDLY			BIT(6)
139 #define XSPI_SMPR_FSPHS			BIT(5)
140 
141 #define XSPI_RBSR			0x10C
142 
143 #define XSPI_RBCT			0x110
144 #define XSPI_RBCT_WMRK_MASK		GENMASK(6, 0)
145 
146 #define XSPI_DLLSR			0x12C
147 #define XSPI_DLLSR_DLLA_LOCK		BIT(15)
148 #define XSPI_DLLSR_SLVA_LOCK		BIT(14)
149 #define XSPI_DLLSR_DLLA_RANGE_ERR	BIT(13)
150 #define XSPI_DLLSR_DLLA_FINE_UNDERFLOW	BIT(12)
151 
152 #define XSPI_TBSR			0x150
153 
154 #define XSPI_TBDR			0x154
155 
156 #define XSPI_TBCT			0x158
157 #define XSPI_TBCT_WMRK_MASK		GENMASK(7, 0)
158 
159 #define XSPI_SR				0x15C
160 #define XSPI_SR_TXFULL			BIT(27)
161 #define XSPI_SR_TXDMA			BIT(26)
162 #define XSPI_SR_TXWA			BIT(25)
163 #define XSPI_SR_TXNE			BIT(24)
164 #define XSPI_SR_RXDMA			BIT(23)
165 #define XSPI_SR_ARB_STATE_MASK		GENMASK(23, 20)
166 #define XSPI_SR_RXFULL			BIT(19)
167 #define XSPI_SR_RXWE			BIT(16)
168 #define XSPI_SR_ARB_LCK			BIT(15)
169 #define XSPI_SR_AHBnFUL			BIT(11)
170 #define XSPI_SR_AHBnNE			BIT(7)
171 #define XSPI_SR_AHBTRN			BIT(6)
172 #define XSPI_SR_AWRACC			BIT(4)
173 #define XSPI_SR_AHB_ACC			BIT(2)
174 #define XSPI_SR_IP_ACC			BIT(1)
175 #define XSPI_SR_BUSY			BIT(0)
176 
177 #define XSPI_FR				0x160
178 #define XSPI_FR_DLPFF			BIT(31)
179 #define XSPI_FR_DLLABRT			BIT(28)
180 #define XSPI_FR_TBFF			BIT(27)
181 #define XSPI_FR_TBUF			BIT(26)
182 #define XSPI_FR_DLLUNLCK		BIT(24)
183 #define XSPI_FR_ILLINE			BIT(23)
184 #define XSPI_FR_RBOF			BIT(17)
185 #define XSPI_FR_RBDF			BIT(16)
186 #define XSPI_FR_AAEF			BIT(15)
187 #define XSPI_FR_AITEF			BIT(14)
188 #define XSPI_FR_AIBSEF			BIT(13)
189 #define XSPI_FR_ABOF			BIT(12)
190 #define XSPI_FR_CRCAEF			BIT(10)
191 #define XSPI_FR_PPWF			BIT(8)
192 #define XSPI_FR_IPIEF			BIT(6)
193 #define XSPI_FR_IPEDERR			BIT(5)
194 #define XSPI_FR_PERFOVF			BIT(2)
195 #define XSPI_FR_RDADDR			BIT(1)
196 #define XSPI_FR_TFF			BIT(0)
197 
198 #define XSPI_RSER			0x164
199 #define XSPI_RSER_TFIE			BIT(0)
200 
201 #define XSPI_SFA1AD			0x180
202 
203 #define XSPI_SFA2AD			0x184
204 
205 #define XSPI_RBDR0			0x200
206 
207 #define XSPI_LUTKEY			0x300
208 #define XSPI_LUT_KEY_VAL		(0x5AF05AF0UL)
209 
210 #define XSPI_LCKCR			0x304
211 #define XSPI_LOKCR_LOCK			BIT(0)
212 #define XSPI_LOKCR_UNLOCK		BIT(1)
213 
214 #define XSPI_LUT			0x310
215 #define XSPI_LUT_OFFSET			(XSPI_SEQID_LUT * 5 * 4)
216 #define XSPI_LUT_REG(idx) \
217 	(XSPI_LUT + XSPI_LUT_OFFSET + (idx) * 4)
218 
219 #define XSPI_MCREXT			0x4FC
220 #define XSPI_MCREXT_RST_MASK		GENMASK(3, 0)
221 
222 
223 #define XSPI_FRAD0_WORD2		0x808
224 #define XSPI_FRAD0_WORD2_MD0ACP_MASK	GENMASK(2, 0)
225 
226 #define XSPI_FRAD0_WORD3		0x80C
227 #define XSPI_FRAD0_WORD3_VLD		BIT(31)
228 
229 #define XSPI_TG0MDAD			0x900
230 #define XSPI_TG0MDAD_VLD		BIT(31)
231 
232 #define XSPI_TG1MDAD			0x910
233 
234 #define XSPI_MGC			0x920
235 #define XSPI_MGC_GVLD			BIT(31)
236 #define XSPI_MGC_GVLDMDAD		BIT(29)
237 #define XSPI_MGC_GVLDFRAD		BIT(27)
238 
239 #define XSPI_MTO			0x928
240 
241 #define XSPI_ERRSTAT			0x938
242 #define XSPI_INT_EN			0x93C
243 
244 #define XSPI_SFP_TG_IPCR		0x958
245 #define XSPI_SFP_TG_IPCR_SEQID_MASK	GENMASK(27, 24)
246 #define XSPI_SFP_TG_IPCR_ARB_UNLOCK	BIT(23)
247 #define XSPI_SFP_TG_IPCR_ARB_LOCK	BIT(22)
248 #define XSPI_SFP_TG_IPCR_IDATSZ_MASK	GENMASK(15, 0)
249 
250 #define XSPI_SFP_TG_SFAR 0x95C
251 
252 /* Register map end */
253 
254 /********* XSPI CMD definitions ***************************/
255 #define LUT_STOP	0x00
256 #define LUT_CMD_SDR	0x01
257 #define LUT_ADDR_SDR	0x02
258 #define LUT_DUMMY	0x03
259 #define LUT_MODE8_SDR	0x04
260 #define LUT_MODE2_SDR	0x05
261 #define LUT_MODE4_SDR	0x06
262 #define LUT_READ_SDR	0x07
263 #define LUT_WRITE_SDR	0x08
264 #define LUT_JMP_ON_CS	0x09
265 #define LUT_ADDR_DDR	0x0A
266 #define LUT_MODE8_DDR	0x0B
267 #define LUT_MODE2_DDR	0x0C
268 #define LUT_MODE4_DDR	0x0D
269 #define LUT_READ_DDR	0x0E
270 #define LUT_WRITE_DDR	0x0F
271 #define LUT_DATA_LEARN	0x10
272 #define LUT_CMD_DDR	0x11
273 #define LUT_CADDR_SDR	0x12
274 #define LUT_CADDR_DDR	0x13
275 #define JMP_TO_SEQ	0x14
276 
277 #define XSPI_64BIT_LE	0x3
278 /*
279  * Calculate number of required PAD bits for LUT register.
280  *
281  * The pad stands for the number of IO lines [0:7].
282  * For example, the octal read needs eight IO lines,
283  * so you should use LUT_PAD(8). This macro
284  * returns 3 i.e. use eight (2^3) IP lines for read.
285  */
286 #define LUT_PAD(x) (fls(x) - 1)
287 
288 /*
289  * Macro for constructing the LUT entries with the following
290  * register layout:
291  *
292  *  ---------------------------------------------------
293  *  | INSTR1 | PAD1 | OPRND1 | INSTR0 | PAD0 | OPRND0 |
294  *  ---------------------------------------------------
295  */
296 #define PAD_SHIFT		8
297 #define INSTR_SHIFT		10
298 #define OPRND_SHIFT		16
299 
300 /* Macros for constructing the LUT register. */
301 #define LUT_DEF(idx, ins, pad, opr)			  \
302 	((((ins) << INSTR_SHIFT) | ((pad) << PAD_SHIFT) | \
303 	(opr)) << (((idx) % 2) * OPRND_SHIFT))
304 
305 #define NXP_XSPI_MIN_IOMAP	SZ_4M
306 #define NXP_XSPI_MAX_CHIPSELECT		2
307 #define POLL_TOUT_US		5000
308 
309 /* Access flash memory using IP bus only */
310 #define XSPI_QUIRK_USE_IP_ONLY	BIT(0)
311 
312 struct nxp_xspi_devtype_data {
313 	unsigned int rxfifo;
314 	unsigned int txfifo;
315 	unsigned int ahb_buf_size;
316 	unsigned int quirks;
317 };
318 
319 static struct nxp_xspi_devtype_data imx94_data = {
320 	.rxfifo = SZ_512,       /* (128 * 4 bytes)  */
321 	.txfifo = SZ_1K,        /* (256 * 4 bytes)  */
322 	.ahb_buf_size = SZ_4K,  /* (1024 * 4 bytes)  */
323 };
324 
325 struct nxp_xspi {
326 	void __iomem *iobase;
327 	void __iomem *ahb_addr;
328 	u32 memmap_phy;
329 	u32 memmap_phy_size;
330 	u32 memmap_start;
331 	u32 memmap_len;
332 	struct clk *clk;
333 	struct device *dev;
334 	struct completion c;
335 	const struct nxp_xspi_devtype_data *devtype_data;
336 	/* mutex lock for each operation */
337 	struct mutex lock;
338 	int selected;
339 #define XSPI_DTR_PROTO		BIT(0)
340 	int flags;
341 	/* Save the previous operation clock rate */
342 	unsigned long pre_op_rate;
343 	/* The max clock rate xspi supported output to device */
344 	unsigned long support_max_rate;
345 };
346 
347 static inline int needs_ip_only(struct nxp_xspi *xspi)
348 {
349 	return xspi->devtype_data->quirks & XSPI_QUIRK_USE_IP_ONLY;
350 }
351 
352 static irqreturn_t nxp_xspi_irq_handler(int irq, void *dev_id)
353 {
354 	struct nxp_xspi *xspi = dev_id;
355 	u32 reg;
356 
357 	reg = readl(xspi->iobase + XSPI_FR);
358 	if (reg & XSPI_FR_TFF) {
359 		/* Clear interrupt */
360 		writel(XSPI_FR_TFF, xspi->iobase + XSPI_FR);
361 		complete(&xspi->c);
362 		return IRQ_HANDLED;
363 	}
364 
365 	return IRQ_NONE;
366 }
367 
368 static int nxp_xspi_check_buswidth(struct nxp_xspi *xspi, u8 width)
369 {
370 	return (is_power_of_2(width) && width <= 8) ? 0 : -EOPNOTSUPP;
371 }
372 
373 static bool nxp_xspi_supports_op(struct spi_mem *mem,
374 				 const struct spi_mem_op *op)
375 {
376 	struct nxp_xspi *xspi = spi_controller_get_devdata(mem->spi->controller);
377 	int ret;
378 
379 	ret = nxp_xspi_check_buswidth(xspi, op->cmd.buswidth);
380 
381 	if (op->addr.nbytes)
382 		ret |= nxp_xspi_check_buswidth(xspi, op->addr.buswidth);
383 
384 	if (op->dummy.nbytes)
385 		ret |= nxp_xspi_check_buswidth(xspi, op->dummy.buswidth);
386 
387 	if (op->data.nbytes)
388 		ret |= nxp_xspi_check_buswidth(xspi, op->data.buswidth);
389 
390 	if (ret)
391 		return false;
392 
393 	/*
394 	 * The number of address bytes should be equal to or less than 4 bytes.
395 	 */
396 	if (op->addr.nbytes > 4)
397 		return false;
398 
399 	/* Max 32 dummy clock cycles supported */
400 	if (op->dummy.buswidth &&
401 	    (op->dummy.nbytes * 8 / op->dummy.buswidth > 64))
402 		return false;
403 
404 	if (needs_ip_only(xspi) && op->data.dir == SPI_MEM_DATA_IN &&
405 	    op->data.nbytes > xspi->devtype_data->rxfifo)
406 		return false;
407 
408 	if (op->data.dir == SPI_MEM_DATA_OUT &&
409 			op->data.nbytes > xspi->devtype_data->txfifo)
410 		return false;
411 
412 	return spi_mem_default_supports_op(mem, op);
413 }
414 
415 static void nxp_xspi_prepare_lut(struct nxp_xspi *xspi,
416 				 const struct spi_mem_op *op)
417 {
418 	void __iomem *base = xspi->iobase;
419 	u32 lutval[5] = {};
420 	int lutidx = 1, i;
421 
422 	/* cmd */
423 	if (op->cmd.dtr) {
424 		lutval[0] |= LUT_DEF(0, LUT_CMD_DDR, LUT_PAD(op->cmd.buswidth),
425 				     op->cmd.opcode >> 8);
426 		lutval[lutidx / 2] |= LUT_DEF(lutidx, LUT_CMD_DDR,
427 					      LUT_PAD(op->cmd.buswidth),
428 					      op->cmd.opcode & 0x00ff);
429 		lutidx++;
430 	} else {
431 		lutval[0] |= LUT_DEF(0, LUT_CMD_SDR, LUT_PAD(op->cmd.buswidth),
432 				     op->cmd.opcode);
433 	}
434 
435 	/* Addr bytes */
436 	if (op->addr.nbytes) {
437 		lutval[lutidx / 2] |= LUT_DEF(lutidx, op->addr.dtr ?
438 					      LUT_ADDR_DDR : LUT_ADDR_SDR,
439 					      LUT_PAD(op->addr.buswidth),
440 					      op->addr.nbytes * 8);
441 		lutidx++;
442 	}
443 
444 	/* Dummy bytes, if needed */
445 	if (op->dummy.nbytes) {
446 		lutval[lutidx / 2] |= LUT_DEF(lutidx, LUT_DUMMY,
447 					      LUT_PAD(op->data.buswidth),
448 					      op->dummy.nbytes * 8 /
449 						/* need distinguish ddr mode */
450 					      op->dummy.buswidth / (op->dummy.dtr ? 2 : 1));
451 		lutidx++;
452 	}
453 
454 	/* Read/Write data bytes */
455 	if (op->data.nbytes) {
456 		lutval[lutidx / 2] |= LUT_DEF(lutidx,
457 					      op->data.dir == SPI_MEM_DATA_IN ?
458 					      (op->data.dtr ? LUT_READ_DDR : LUT_READ_SDR) :
459 					      (op->data.dtr ? LUT_WRITE_DDR : LUT_WRITE_SDR),
460 					      LUT_PAD(op->data.buswidth),
461 					      0);
462 		lutidx++;
463 	}
464 
465 	/* Stop condition. */
466 	lutval[lutidx / 2] |= LUT_DEF(lutidx, LUT_STOP, 0, 0);
467 
468 	/* Unlock LUT */
469 	writel(XSPI_LUT_KEY_VAL, xspi->iobase + XSPI_LUTKEY);
470 	writel(XSPI_LOKCR_UNLOCK, xspi->iobase + XSPI_LCKCR);
471 
472 	/* Fill LUT */
473 	for (i = 0; i < ARRAY_SIZE(lutval); i++)
474 		writel(lutval[i], base + XSPI_LUT_REG(i));
475 
476 	dev_dbg(xspi->dev, "CMD[%02x] lutval[0:%08x 1:%08x 2:%08x 3:%08x 4:%08x], size: 0x%08x\n",
477 		op->cmd.opcode, lutval[0], lutval[1], lutval[2], lutval[3], lutval[4],
478 		op->data.nbytes);
479 
480 	/* Lock LUT */
481 	writel(XSPI_LUT_KEY_VAL, xspi->iobase + XSPI_LUTKEY);
482 	writel(XSPI_LOKCR_LOCK, xspi->iobase + XSPI_LCKCR);
483 }
484 
485 static void nxp_xspi_disable_ddr(struct nxp_xspi *xspi)
486 {
487 	void __iomem *base = xspi->iobase;
488 	u32 reg;
489 
490 	/* Disable module */
491 	reg = readl(base + XSPI_MCR);
492 	reg |= XSPI_MCR_MDIS;
493 	writel(reg, base + XSPI_MCR);
494 
495 	reg &= ~XSPI_MCR_DDR_EN;
496 	reg &= ~XSPI_MCR_DQS_FA_SEL_MASK;
497 	/* Use dummy pad loopback mode to sample data */
498 	reg |= FIELD_PREP(XSPI_MCR_DQS_FA_SEL_MASK, 0x01);
499 	writel(reg, base + XSPI_MCR);
500 	xspi->support_max_rate = 133000000;
501 
502 	reg = readl(base + XSPI_FLSHCR);
503 	reg &= ~XSPI_FLSHCR_TDH_MASK;
504 	writel(reg, base + XSPI_FLSHCR);
505 
506 	/* Select sampling at inverted clock */
507 	reg = FIELD_PREP(XSPI_SMPR_DLLFSMPFA_MASK, 0) | XSPI_SMPR_FSPHS;
508 	writel(reg, base + XSPI_SMPR);
509 
510 	/* Enable module */
511 	reg = readl(base + XSPI_MCR);
512 	reg &= ~XSPI_MCR_MDIS;
513 	writel(reg, base + XSPI_MCR);
514 }
515 
516 static void nxp_xspi_enable_ddr(struct nxp_xspi *xspi)
517 {
518 	void __iomem *base = xspi->iobase;
519 	u32 reg;
520 
521 	/* Disable module */
522 	reg = readl(base + XSPI_MCR);
523 	reg |= XSPI_MCR_MDIS;
524 	writel(reg, base + XSPI_MCR);
525 
526 	reg |= XSPI_MCR_DDR_EN;
527 	reg &= ~XSPI_MCR_DQS_FA_SEL_MASK;
528 	/* Use external dqs to sample data */
529 	reg |= FIELD_PREP(XSPI_MCR_DQS_FA_SEL_MASK, 0x03);
530 	writel(reg, base + XSPI_MCR);
531 	xspi->support_max_rate = 200000000;
532 
533 	reg = readl(base + XSPI_FLSHCR);
534 	reg &= ~XSPI_FLSHCR_TDH_MASK;
535 	reg |= FIELD_PREP(XSPI_FLSHCR_TDH_MASK, 0x01);
536 	writel(reg, base + XSPI_FLSHCR);
537 
538 	reg = FIELD_PREP(XSPI_SMPR_DLLFSMPFA_MASK, 0x04);
539 	writel(reg, base + XSPI_SMPR);
540 
541 	/* Enable module */
542 	reg = readl(base + XSPI_MCR);
543 	reg &= ~XSPI_MCR_MDIS;
544 	writel(reg, base + XSPI_MCR);
545 }
546 
547 static void nxp_xspi_sw_reset(struct nxp_xspi *xspi)
548 {
549 	void __iomem *base = xspi->iobase;
550 	bool mdis_flag = false;
551 	u32 reg;
552 	int ret;
553 
554 	reg = readl(base + XSPI_MCR);
555 
556 	/*
557 	 * Per RM, when reset SWRSTSD and SWRSTHD, XSPI must be
558 	 * enabled (MDIS = 0).
559 	 * So if MDIS is 1, should clear it before assert SWRSTSD
560 	 * and SWRSTHD.
561 	 */
562 	if (reg & XSPI_MCR_MDIS) {
563 		reg &= ~XSPI_MCR_MDIS;
564 		writel(reg, base + XSPI_MCR);
565 		mdis_flag = true;
566 	}
567 
568 	/* Software reset for AHB domain and Serial flash memory domain */
569 	reg |= XSPI_MCR_SWRSTHD | XSPI_MCR_SWRSTSD;
570 	/* Software Reset for IPS Target Group Queue 0 */
571 	reg |= XSPI_MCR_IPS_TG_RST;
572 	writel(reg, base + XSPI_MCR);
573 
574 	/* IPS_TG_RST will self-clear to 0 once IPS_TG_RST complete */
575 	ret = readl_poll_timeout(base + XSPI_MCR, reg, !(reg & XSPI_MCR_IPS_TG_RST),
576 			      100, 5000);
577 	if (ret == -ETIMEDOUT)
578 		dev_warn(xspi->dev, "XSPI_MCR_IPS_TG_RST do not self-clear in 5ms!");
579 
580 	/*
581 	 * Per RM, must wait for at least three system cycles and
582 	 * three flash cycles after changing the value of reset field.
583 	 * delay 5us for safe.
584 	 */
585 	fsleep(5);
586 
587 	/*
588 	 * Per RM, before dessert SWRSTSD and SWRSTHD, XSPI must be
589 	 * disabled (MIDS = 1).
590 	 */
591 	reg = readl(base + XSPI_MCR);
592 	reg |= XSPI_MCR_MDIS;
593 	writel(reg, base + XSPI_MCR);
594 
595 	/* deassert software reset */
596 	reg &= ~(XSPI_MCR_SWRSTHD | XSPI_MCR_SWRSTSD);
597 	writel(reg, base + XSPI_MCR);
598 
599 	/*
600 	 * Per RM, must wait for at least three system cycles and
601 	 * three flash cycles after changing the value of reset field.
602 	 * delay 5us for safe.
603 	 */
604 	fsleep(5);
605 
606 	/* Re-enable XSPI if it is enabled at beginning */
607 	if (!mdis_flag) {
608 		reg &= ~XSPI_MCR_MDIS;
609 		writel(reg, base + XSPI_MCR);
610 	}
611 }
612 
613 static void nxp_xspi_dll_bypass(struct nxp_xspi *xspi)
614 {
615 	void __iomem *base = xspi->iobase;
616 	int ret;
617 	u32 reg;
618 
619 	nxp_xspi_sw_reset(xspi);
620 
621 	writel(0, base + XSPI_DLLCRA);
622 
623 	/* Set SLV EN first */
624 	reg = XSPI_DLLCRA_SLV_EN;
625 	writel(reg, base + XSPI_DLLCRA);
626 
627 	reg = XSPI_DLLCRA_FREQEN |
628 	      FIELD_PREP(XSPI_DLLCRA_SLV_DLY_COARSE_MASK, 0x0) |
629 	      XSPI_DLLCRA_SLV_EN | XSPI_DLLCRA_SLV_DLL_BYPASS;
630 	writel(reg, base + XSPI_DLLCRA);
631 
632 	reg |= XSPI_DLLCRA_SLV_UPD;
633 	writel(reg, base + XSPI_DLLCRA);
634 
635 	ret = readl_poll_timeout(base + XSPI_DLLSR, reg,
636 			      reg & XSPI_DLLSR_SLVA_LOCK, 0, POLL_TOUT_US);
637 	if (ret)
638 		dev_err(xspi->dev,
639 			"DLL SLVA unlock, the DLL status is %x, need to check!\n",
640 			readl(base + XSPI_DLLSR));
641 }
642 
643 static void nxp_xspi_dll_auto(struct nxp_xspi *xspi, unsigned long rate)
644 {
645 	void __iomem *base = xspi->iobase;
646 	int ret;
647 	u32 reg;
648 
649 	nxp_xspi_sw_reset(xspi);
650 
651 	writel(0, base + XSPI_DLLCRA);
652 
653 	/* Set SLV EN first */
654 	reg = XSPI_DLLCRA_SLV_EN;
655 	writel(reg, base + XSPI_DLLCRA);
656 
657 	reg = FIELD_PREP(XSPI_DLLCRA_DLL_REFCNTR_MASK, 0x02) |
658 	      FIELD_PREP(XSPI_DLLCRA_DLLRES_MASK, 0x08) |
659 	      XSPI_DLLCRA_SLAVE_AUTO_UPDT | XSPI_DLLCRA_SLV_EN;
660 	if (rate > 133000000)
661 		reg |= XSPI_DLLCRA_FREQEN;
662 
663 	writel(reg, base + XSPI_DLLCRA);
664 
665 	reg |= XSPI_DLLCRA_SLV_UPD;
666 	writel(reg, base + XSPI_DLLCRA);
667 
668 	reg |= XSPI_DLLCRA_DLLEN;
669 	writel(reg, base + XSPI_DLLCRA);
670 
671 	ret = readl_poll_timeout(base + XSPI_DLLSR, reg,
672 			      reg & XSPI_DLLSR_DLLA_LOCK, 0, POLL_TOUT_US);
673 	if (ret)
674 		dev_err(xspi->dev,
675 			"DLL unlock, the DLL status is %x, need to check!\n",
676 			readl(base + XSPI_DLLSR));
677 
678 	ret = readl_poll_timeout(base + XSPI_DLLSR, reg,
679 			      reg & XSPI_DLLSR_SLVA_LOCK, 0, POLL_TOUT_US);
680 	if (ret)
681 		dev_err(xspi->dev,
682 			"DLL SLVA unlock, the DLL status is %x, need to check!\n",
683 			readl(base + XSPI_DLLSR));
684 }
685 
686 static void nxp_xspi_select_mem(struct nxp_xspi *xspi, struct spi_device *spi,
687 				const struct spi_mem_op *op)
688 {
689 	/* xspi only support one DTR mode: 8D-8D-8D */
690 	bool op_is_dtr = op->cmd.dtr && op->addr.dtr && op->dummy.dtr && op->data.dtr;
691 	unsigned long root_clk_rate, rate;
692 	uint64_t cs0_top_address;
693 	uint64_t cs1_top_address;
694 	u32 reg;
695 	int ret;
696 
697 	/*
698 	 * Return when following condition all meet,
699 	 * 1, if previously selected target device is same as current
700 	 *    requested target device.
701 	 * 2, the DTR or STR mode do not change.
702 	 * 3, previous operation max rate equals current one.
703 	 *
704 	 * For other case, need to re-config.
705 	 */
706 	if (xspi->selected == spi_get_chipselect(spi, 0) &&
707 	    (!!(xspi->flags & XSPI_DTR_PROTO) == op_is_dtr) &&
708 	    (xspi->pre_op_rate == op->max_freq))
709 		return;
710 
711 	if (op_is_dtr) {
712 		nxp_xspi_enable_ddr(xspi);
713 		xspi->flags |= XSPI_DTR_PROTO;
714 	} else {
715 		nxp_xspi_disable_ddr(xspi);
716 		xspi->flags &= ~XSPI_DTR_PROTO;
717 	}
718 	rate = min_t(unsigned long, xspi->support_max_rate, op->max_freq);
719 	/*
720 	 * There is two dividers between xspi_clk_root(from SoC CCM) and xspi_sfif.
721 	 * xspi_clk_root ---->divider1 ----> ipg_clk_2xsfif
722 	 *                              |
723 	 *                              |
724 	 *                              |---> divider2 ---> ipg_clk_sfif
725 	 * divider1 is controlled by SOCCR, SOCCR default value is 0.
726 	 * divider2 fix to divide 2.
727 	 * when SOCCR = 0:
728 	 *        ipg_clk_2xsfif = xspi_clk_root
729 	 *        ipg_clk_sfif = ipg_clk_2xsfif / 2 = xspi_clk_root / 2
730 	 * ipg_clk_2xsfif is used for DTR mode.
731 	 * xspi_sck(output to device) is defined based on xspi_sfif clock.
732 	 */
733 	root_clk_rate = rate * 2;
734 
735 	clk_disable_unprepare(xspi->clk);
736 
737 	ret = clk_set_rate(xspi->clk, root_clk_rate);
738 	if (ret)
739 		return;
740 
741 	ret = clk_prepare_enable(xspi->clk);
742 	if (ret)
743 		return;
744 
745 	xspi->pre_op_rate = op->max_freq;
746 	xspi->selected = spi_get_chipselect(spi, 0);
747 
748 	if (xspi->selected) {		/* CS1 select */
749 		cs0_top_address = xspi->memmap_phy;
750 		cs1_top_address = SZ_4G - 1;
751 	} else {			/* CS0 select */
752 		cs0_top_address = SZ_4G - 1;
753 		cs1_top_address = SZ_4G - 1;
754 	}
755 	writel(cs0_top_address, xspi->iobase + XSPI_SFA1AD);
756 	writel(cs1_top_address, xspi->iobase + XSPI_SFA2AD);
757 
758 	reg = readl(xspi->iobase + XSPI_SFACR);
759 	if (op->data.swap16)
760 		reg |= XSPI_SFACR_BYTE_SWAP;
761 	else
762 		reg &= ~XSPI_SFACR_BYTE_SWAP;
763 	writel(reg, xspi->iobase + XSPI_SFACR);
764 
765 	if (!op_is_dtr || rate < 60000000)
766 		nxp_xspi_dll_bypass(xspi);
767 	else
768 		nxp_xspi_dll_auto(xspi, rate);
769 }
770 
771 static int nxp_xspi_ahb_read(struct nxp_xspi *xspi, const struct spi_mem_op *op)
772 {
773 	u32 start = op->addr.val;
774 	u32 len = op->data.nbytes;
775 
776 	/* If necessary, ioremap before AHB read */
777 	if ((!xspi->ahb_addr) || start < xspi->memmap_start ||
778 	     start + len > xspi->memmap_start + xspi->memmap_len) {
779 		if (xspi->ahb_addr)
780 			iounmap(xspi->ahb_addr);
781 
782 		xspi->memmap_start = start;
783 		xspi->memmap_len = len > NXP_XSPI_MIN_IOMAP ?
784 				len : NXP_XSPI_MIN_IOMAP;
785 
786 		xspi->ahb_addr = ioremap(xspi->memmap_phy + xspi->memmap_start,
787 					 xspi->memmap_len);
788 
789 		if (!xspi->ahb_addr) {
790 			dev_err(xspi->dev, "failed to alloc memory\n");
791 			return -ENOMEM;
792 		}
793 	}
794 
795 	/* Read out the data directly from the AHB buffer. */
796 	memcpy_fromio(op->data.buf.in,
797 			xspi->ahb_addr + start - xspi->memmap_start, len);
798 
799 	return 0;
800 }
801 
802 static int nxp_xspi_fill_txfifo(struct nxp_xspi *xspi,
803 				 const struct spi_mem_op *op)
804 {
805 	void __iomem *base = xspi->iobase;
806 	u8 *buf = (u8 *)op->data.buf.out;
807 	u32 reg, left;
808 	int i;
809 
810 	for (i = 0; i < ALIGN(op->data.nbytes, 4); i += 4) {
811 		reg = readl(base + XSPI_FR);
812 		reg |= XSPI_FR_TBFF;
813 		writel(reg, base + XSPI_FR);
814 		/* Read again to check whether the tx fifo has rom */
815 		reg = readl(base + XSPI_FR);
816 		if (!(reg & XSPI_FR_TBFF)) {
817 			WARN_ON(1);
818 			return -EIO;
819 		}
820 
821 		if (i == ALIGN_DOWN(op->data.nbytes, 4)) {
822 			/* Use 0xFF for extra bytes */
823 			left = 0xFFFFFFFF;
824 			/* The last 1 to 3 bytes */
825 			memcpy((u8 *)&left, buf + i, op->data.nbytes - i);
826 			writel(left, base + XSPI_TBDR);
827 		} else {
828 			writel(*(u32 *)(buf + i), base + XSPI_TBDR);
829 		}
830 	}
831 
832 	return 0;
833 }
834 
835 static int nxp_xspi_read_rxfifo(struct nxp_xspi *xspi,
836 				const struct spi_mem_op *op)
837 {
838 	u32 watermark, watermark_bytes, reg;
839 	void __iomem *base = xspi->iobase;
840 	u8 *buf = (u8 *) op->data.buf.in;
841 	int i, ret, len;
842 
843 	/*
844 	 * Config the rx watermark half of the 64 memory-mapped RX data buffer RBDRn
845 	 * refer to the RBCT config in nxp_xspi_do_op()
846 	 */
847 	watermark = 32;
848 	watermark_bytes = watermark * 4;
849 
850 	len = op->data.nbytes;
851 
852 	while (len >= watermark_bytes) {
853 		/* Make sure the RX FIFO contains valid data before read */
854 		ret = readl_poll_timeout(base + XSPI_FR, reg,
855 				      reg & XSPI_FR_RBDF, 0, POLL_TOUT_US);
856 		if (ret) {
857 			WARN_ON(1);
858 			return ret;
859 		}
860 
861 		for (i = 0; i < watermark; i++)
862 			*(u32 *)(buf + i * 4) = readl(base + XSPI_RBDR0 + i * 4);
863 
864 		len = len - watermark_bytes;
865 		buf = buf + watermark_bytes;
866 		/* Pop up data to RXFIFO for next read. */
867 		reg = readl(base + XSPI_FR);
868 		reg |= XSPI_FR_RBDF;
869 		writel(reg, base + XSPI_FR);
870 	}
871 
872 	/* Wait for the total data transfer finished */
873 	ret = readl_poll_timeout(base + XSPI_SR, reg, !(reg & XSPI_SR_BUSY), 0, POLL_TOUT_US);
874 	if (ret) {
875 		WARN_ON(1);
876 		return ret;
877 	}
878 
879 	i = 0;
880 	while (len >= 4) {
881 		*(u32 *)(buf) = readl(base + XSPI_RBDR0 + i);
882 		i += 4;
883 		len -= 4;
884 		buf += 4;
885 	}
886 
887 	if (len > 0) {
888 		reg = readl(base + XSPI_RBDR0 + i);
889 		memcpy(buf, (u8 *)&reg, len);
890 	}
891 
892 	/* Invalid RXFIFO first */
893 	reg = readl(base + XSPI_MCR);
894 	reg |= XSPI_MCR_CLR_RXF;
895 	writel(reg, base + XSPI_MCR);
896 	/* Wait for the CLR_RXF clear */
897 	ret = readl_poll_timeout(base + XSPI_MCR, reg,
898 			      !(reg & XSPI_MCR_CLR_RXF), 1, POLL_TOUT_US);
899 	WARN_ON(ret);
900 
901 	return ret;
902 }
903 
904 static int nxp_xspi_do_op(struct nxp_xspi *xspi, const struct spi_mem_op *op)
905 {
906 	void __iomem *base = xspi->iobase;
907 	int watermark, err = 0;
908 	u32 reg, len;
909 
910 	len = op->data.nbytes;
911 	if (op->data.nbytes && op->data.dir == SPI_MEM_DATA_OUT) {
912 		/* Clear the TX FIFO. */
913 		reg = readl(base + XSPI_MCR);
914 		reg |= XSPI_MCR_CLR_TXF;
915 		writel(reg, base + XSPI_MCR);
916 		/* Wait for the CLR_TXF clear */
917 		err = readl_poll_timeout(base + XSPI_MCR, reg,
918 				      !(reg & XSPI_MCR_CLR_TXF), 1, POLL_TOUT_US);
919 		if (err) {
920 			WARN_ON(1);
921 			return err;
922 		}
923 
924 		/* Cover the no 4bytes alignment data length */
925 		watermark = (xspi->devtype_data->txfifo - ALIGN(op->data.nbytes, 4)) / 4 + 1;
926 		reg = FIELD_PREP(XSPI_TBCT_WMRK_MASK, watermark);
927 		writel(reg, base + XSPI_TBCT);
928 		/*
929 		 * According to the RM, for TBDR register, a write transaction on the
930 		 * flash memory with data size of less than 32 bits leads to the removal
931 		 * of one data entry from the TX buffer. The valid bits are used and the
932 		 * rest of the bits are discarded.
933 		 * But for data size large than 32 bits, according to test, for no 4bytes
934 		 * alignment data, the last 1~3 bytes will lost, because TX buffer use
935 		 * 4 bytes entries.
936 		 * So here adjust the transfer data length to make it 4bytes alignment.
937 		 * then will meet the upper watermark setting, trigger the 4bytes entries
938 		 * pop out.
939 		 * Will use extra 0xff to append, refer to nxp_xspi_fill_txfifo().
940 		 */
941 		if (len > 4)
942 			len = ALIGN(op->data.nbytes, 4);
943 
944 	} else if (op->data.nbytes && op->data.dir == SPI_MEM_DATA_IN) {
945 		/* Invalid RXFIFO first */
946 		reg = readl(base + XSPI_MCR);
947 		reg |= XSPI_MCR_CLR_RXF;
948 		writel(reg, base + XSPI_MCR);
949 		/* Wait for the CLR_RXF clear */
950 		err = readl_poll_timeout(base + XSPI_MCR, reg,
951 				      !(reg & XSPI_MCR_CLR_RXF), 1, POLL_TOUT_US);
952 		if (err) {
953 			WARN_ON(1);
954 			return err;
955 		}
956 
957 		reg = FIELD_PREP(XSPI_RBCT_WMRK_MASK, 31);
958 		writel(reg, base + XSPI_RBCT);
959 	}
960 
961 	init_completion(&xspi->c);
962 
963 	/* Config the data address */
964 	writel(op->addr.val + xspi->memmap_phy, base + XSPI_SFP_TG_SFAR);
965 
966 	/* Config the data size and lut id, trigger the transfer */
967 	reg = FIELD_PREP(XSPI_SFP_TG_IPCR_SEQID_MASK, XSPI_SEQID_LUT) |
968 	      FIELD_PREP(XSPI_SFP_TG_IPCR_IDATSZ_MASK, len);
969 	writel(reg, base + XSPI_SFP_TG_IPCR);
970 
971 	if (op->data.nbytes && op->data.dir == SPI_MEM_DATA_OUT) {
972 		err = nxp_xspi_fill_txfifo(xspi, op);
973 		if (err)
974 			return err;
975 	}
976 
977 	/* Wait for the interrupt. */
978 	if (!wait_for_completion_timeout(&xspi->c, msecs_to_jiffies(1000)))
979 		err = -ETIMEDOUT;
980 
981 	/* Invoke IP data read. */
982 	if (!err && op->data.nbytes && op->data.dir == SPI_MEM_DATA_IN)
983 		err = nxp_xspi_read_rxfifo(xspi, op);
984 
985 	return err;
986 }
987 
988 static int nxp_xspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
989 {
990 	struct nxp_xspi *xspi = spi_controller_get_devdata(mem->spi->controller);
991 	void __iomem *base = xspi->iobase;
992 	u32 reg;
993 	int err;
994 
995 	guard(mutex)(&xspi->lock);
996 
997 	PM_RUNTIME_ACQUIRE_AUTOSUSPEND(xspi->dev, pm);
998 	err = PM_RUNTIME_ACQUIRE_ERR(&pm);
999 	if (err)
1000 		return err;
1001 
1002 	/* Wait for controller being ready. */
1003 	err = readl_poll_timeout(base + XSPI_SR, reg,
1004 			      !(reg & XSPI_SR_BUSY), 1, POLL_TOUT_US);
1005 	if (err) {
1006 		dev_err(xspi->dev, "SR keeps in BUSY!");
1007 		return err;
1008 	}
1009 
1010 	nxp_xspi_select_mem(xspi, mem->spi, op);
1011 
1012 	nxp_xspi_prepare_lut(xspi, op);
1013 
1014 	/*
1015 	 * For read:
1016 	 *     the address in AHB mapped range will use AHB read.
1017 	 *     the address out of AHB mapped range will use IP read.
1018 	 * For write:
1019 	 *     all use IP write.
1020 	 */
1021 	if ((op->data.dir == SPI_MEM_DATA_IN) && !needs_ip_only(xspi)
1022 		&& ((op->addr.val + op->data.nbytes) <= xspi->memmap_phy_size))
1023 		err = nxp_xspi_ahb_read(xspi, op);
1024 	else
1025 		err = nxp_xspi_do_op(xspi, op);
1026 
1027 	nxp_xspi_sw_reset(xspi);
1028 
1029 	return err;
1030 }
1031 
1032 static int nxp_xspi_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
1033 {
1034 	struct nxp_xspi *xspi = spi_controller_get_devdata(mem->spi->controller);
1035 
1036 	if (op->data.dir == SPI_MEM_DATA_OUT) {
1037 		if (op->data.nbytes > xspi->devtype_data->txfifo)
1038 			op->data.nbytes = xspi->devtype_data->txfifo;
1039 	} else {
1040 		/* Limit data bytes to RX FIFO in case of IP read only */
1041 		if (needs_ip_only(xspi) && (op->data.nbytes > xspi->devtype_data->rxfifo))
1042 			op->data.nbytes = xspi->devtype_data->rxfifo;
1043 
1044 		/* Address in AHB mapped range prefer to use AHB read. */
1045 		if (!needs_ip_only(xspi) && (op->addr.val < xspi->memmap_phy_size)
1046 			&& ((op->addr.val + op->data.nbytes) > xspi->memmap_phy_size))
1047 			op->data.nbytes = xspi->memmap_phy_size - op->addr.val;
1048 	}
1049 
1050 	return 0;
1051 }
1052 
1053 static void nxp_xspi_config_ahb_buffer(struct nxp_xspi *xspi)
1054 {
1055 	void __iomem *base = xspi->iobase;
1056 	u32 ahb_data_trans_size;
1057 	u32 reg;
1058 
1059 	writel(0xA, base + XSPI_BUF0CR);
1060 	writel(0x2, base + XSPI_BUF1CR);
1061 	writel(0xD, base + XSPI_BUF2CR);
1062 
1063 	/* Configure buffer3 for All Master Access */
1064 	reg = FIELD_PREP(XSPI_BUF3CR_MSTRID_MASK, 0x06) |
1065 	      XSPI_BUF3CR_ALLMST;
1066 
1067 	ahb_data_trans_size = xspi->devtype_data->ahb_buf_size / 8;
1068 	reg |= FIELD_PREP(XSPI_BUF3CR_ADATSZ_MASK, ahb_data_trans_size);
1069 	writel(reg, base + XSPI_BUF3CR);
1070 
1071 	/* Only the buffer3 is used */
1072 	writel(0, base + XSPI_BUF0IND);
1073 	writel(0, base + XSPI_BUF1IND);
1074 	writel(0, base + XSPI_BUF2IND);
1075 
1076 	/* AHB only use ID=15 for read */
1077 	reg = FIELD_PREP(XSPI_BFGENCR_SEQID_MASK, XSPI_SEQID_LUT);
1078 	reg |= XSPI_BFGENCR_WR_FLUSH_EN;
1079 	/* No limit for align */
1080 	reg |= FIELD_PREP(XSPI_BFGENCR_ALIGN_MASK, 0);
1081 	writel(reg, base + XSPI_BFGENCR);
1082 }
1083 
1084 static int nxp_xspi_default_setup(struct nxp_xspi *xspi)
1085 {
1086 	void __iomem *base = xspi->iobase;
1087 	u32 reg;
1088 
1089 	/* Bypass SFP check, clear MGC_GVLD, MGC_GVLDMDAD, MGC_GVLDFRAD */
1090 	writel(0, base + XSPI_MGC);
1091 
1092 	/* Enable the EENV0 SFP check */
1093 	reg = readl(base + XSPI_TG0MDAD);
1094 	reg |= XSPI_TG0MDAD_VLD;
1095 	writel(reg, base + XSPI_TG0MDAD);
1096 
1097 	/* Give read/write access right to EENV0 */
1098 	reg = readl(base + XSPI_FRAD0_WORD2);
1099 	reg &= ~XSPI_FRAD0_WORD2_MD0ACP_MASK;
1100 	reg |= FIELD_PREP(XSPI_FRAD0_WORD2_MD0ACP_MASK, 0x03);
1101 	writel(reg, base + XSPI_FRAD0_WORD2);
1102 
1103 	/* Enable the FRAD check for EENV0 */
1104 	reg = readl(base + XSPI_FRAD0_WORD3);
1105 	reg |= XSPI_FRAD0_WORD3_VLD;
1106 	writel(reg, base + XSPI_FRAD0_WORD3);
1107 
1108 	/*
1109 	 * Config the timeout to max value, this timeout will affect the
1110 	 * TBDR and RBDRn access right after IP cmd triggered.
1111 	 */
1112 	writel(0xFFFFFFFF, base + XSPI_MTO);
1113 
1114 	/* Disable module */
1115 	reg = readl(base + XSPI_MCR);
1116 	reg |= XSPI_MCR_MDIS;
1117 	writel(reg, base + XSPI_MCR);
1118 
1119 	nxp_xspi_sw_reset(xspi);
1120 
1121 	reg = readl(base + XSPI_MCR);
1122 	reg &= ~(XSPI_MCR_CKN_FA_EN | XSPI_MCR_DQS_FA_SEL_MASK |
1123 		 XSPI_MCR_DOZE | XSPI_MCR_VAR_LAT_EN |
1124 		 XSPI_MCR_DDR_EN | XSPI_MCR_DQS_OUT_EN);
1125 	reg |= XSPI_MCR_DQS_EN;
1126 	reg |= XSPI_MCR_ISD3FA | XSPI_MCR_ISD2FA;
1127 	writel(reg, base + XSPI_MCR);
1128 
1129 	reg = readl(base + XSPI_SFACR);
1130 	reg &= ~(XSPI_SFACR_FORCE_A10 | XSPI_SFACR_WA_4B_EN |
1131 		 XSPI_SFACR_BYTE_SWAP | XSPI_SFACR_WA |
1132 		 XSPI_SFACR_CAS_MASK);
1133 	reg |= XSPI_SFACR_FORCE_A10;
1134 	writel(reg, base + XSPI_SFACR);
1135 
1136 	nxp_xspi_config_ahb_buffer(xspi);
1137 
1138 	reg = FIELD_PREP(XSPI_FLSHCR_TCSH_MASK, 0x03) |
1139 	      FIELD_PREP(XSPI_FLSHCR_TCSS_MASK, 0x03);
1140 	writel(reg, base + XSPI_FLSHCR);
1141 
1142 	/* Enable module */
1143 	reg = readl(base + XSPI_MCR);
1144 	reg &= ~XSPI_MCR_MDIS;
1145 	writel(reg, base + XSPI_MCR);
1146 
1147 	xspi->selected = -1;
1148 
1149 	/* Enable the interrupt */
1150 	writel(XSPI_RSER_TFIE, base + XSPI_RSER);
1151 
1152 	return 0;
1153 }
1154 
1155 static const char *nxp_xspi_get_name(struct spi_mem *mem)
1156 {
1157 	struct nxp_xspi *xspi = spi_controller_get_devdata(mem->spi->controller);
1158 	struct device *dev = &mem->spi->dev;
1159 	const char *name;
1160 
1161 	/* Set custom name derived from the platform_device of the controller. */
1162 	if (of_get_available_child_count(xspi->dev->of_node) == 1)
1163 		return dev_name(xspi->dev);
1164 
1165 	name = devm_kasprintf(dev, GFP_KERNEL,
1166 			      "%s-%d", dev_name(xspi->dev),
1167 			      spi_get_chipselect(mem->spi, 0));
1168 
1169 	if (!name) {
1170 		dev_err(dev, "failed to get memory for custom flash name\n");
1171 		return ERR_PTR(-ENOMEM);
1172 	}
1173 
1174 	return name;
1175 }
1176 
1177 static const struct spi_controller_mem_ops nxp_xspi_mem_ops = {
1178 	.adjust_op_size = nxp_xspi_adjust_op_size,
1179 	.supports_op = nxp_xspi_supports_op,
1180 	.exec_op = nxp_xspi_exec_op,
1181 	.get_name = nxp_xspi_get_name,
1182 };
1183 
1184 static const struct spi_controller_mem_caps nxp_xspi_mem_caps = {
1185 	.dtr = true,
1186 	.per_op_freq = true,
1187 	.swap16 = true,
1188 };
1189 
1190 static void nxp_xspi_cleanup(void *data)
1191 {
1192 	struct nxp_xspi *xspi = data;
1193 	u32 reg;
1194 
1195 	pm_runtime_get_sync(xspi->dev);
1196 
1197 	/* Disable interrupt */
1198 	writel(0, xspi->iobase + XSPI_RSER);
1199 	/* Clear all the internal logic flags */
1200 	writel(0xFFFFFFFF, xspi->iobase + XSPI_FR);
1201 	/* Disable the hardware */
1202 	reg = readl(xspi->iobase + XSPI_MCR);
1203 	reg |= XSPI_MCR_MDIS;
1204 	writel(reg, xspi->iobase + XSPI_MCR);
1205 
1206 	pm_runtime_put_sync(xspi->dev);
1207 
1208 	if (xspi->ahb_addr)
1209 		iounmap(xspi->ahb_addr);
1210 }
1211 
1212 static int nxp_xspi_probe(struct platform_device *pdev)
1213 {
1214 	struct device *dev = &pdev->dev;
1215 	struct spi_controller *ctlr;
1216 	struct nxp_xspi *xspi;
1217 	struct resource *res;
1218 	int ret, irq;
1219 
1220 	ctlr = devm_spi_alloc_host(dev, sizeof(*xspi));
1221 	if (!ctlr)
1222 		return -ENOMEM;
1223 
1224 	ctlr->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL |
1225 			  SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL;
1226 
1227 	xspi = spi_controller_get_devdata(ctlr);
1228 	xspi->dev = dev;
1229 	xspi->devtype_data = device_get_match_data(dev);
1230 	if (!xspi->devtype_data)
1231 		return -ENODEV;
1232 
1233 	platform_set_drvdata(pdev, xspi);
1234 
1235 	/* Find the resources - configuration register address space */
1236 	xspi->iobase = devm_platform_ioremap_resource_byname(pdev, "base");
1237 	if (IS_ERR(xspi->iobase))
1238 		return PTR_ERR(xspi->iobase);
1239 
1240 	/* Find the resources - controller memory mapped space */
1241 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mmap");
1242 	if (!res)
1243 		return -ENODEV;
1244 
1245 	/* Assign memory mapped starting address and mapped size. */
1246 	xspi->memmap_phy = res->start;
1247 	xspi->memmap_phy_size = resource_size(res);
1248 
1249 	/* Find the clocks */
1250 	xspi->clk = devm_clk_get(dev, "per");
1251 	if (IS_ERR(xspi->clk))
1252 		return PTR_ERR(xspi->clk);
1253 
1254 	/* Find the irq */
1255 	irq = platform_get_irq(pdev, 0);
1256 	if (irq < 0)
1257 		return dev_err_probe(dev, irq,  "Failed to get irq source");
1258 
1259 	pm_runtime_set_autosuspend_delay(dev, XSPI_RPM_TIMEOUT_MS);
1260 	pm_runtime_use_autosuspend(dev);
1261 	ret = devm_pm_runtime_enable(dev);
1262 	if (ret)
1263 		return ret;
1264 
1265 	PM_RUNTIME_ACQUIRE_AUTOSUSPEND(dev, pm);
1266 	ret = PM_RUNTIME_ACQUIRE_ERR(&pm);
1267 	if (ret)
1268 		return dev_err_probe(dev, ret, "Failed to enable clock");
1269 
1270 	/* Clear potential interrupt by write xspi errstat */
1271 	writel(0xFFFFFFFF, xspi->iobase + XSPI_ERRSTAT);
1272 	writel(0xFFFFFFFF, xspi->iobase + XSPI_FR);
1273 
1274 	nxp_xspi_default_setup(xspi);
1275 
1276 	ret = devm_request_irq(dev, irq,
1277 			nxp_xspi_irq_handler, 0, pdev->name, xspi);
1278 	if (ret)
1279 		return dev_err_probe(dev, ret, "failed to request irq");
1280 
1281 	ret = devm_mutex_init(dev, &xspi->lock);
1282 	if (ret)
1283 		return ret;
1284 
1285 	ret = devm_add_action_or_reset(dev, nxp_xspi_cleanup, xspi);
1286 	if (ret)
1287 		return ret;
1288 
1289 	ctlr->bus_num = -1;
1290 	ctlr->num_chipselect = NXP_XSPI_MAX_CHIPSELECT;
1291 	ctlr->mem_ops = &nxp_xspi_mem_ops;
1292 	ctlr->mem_caps = &nxp_xspi_mem_caps;
1293 
1294 	return devm_spi_register_controller(dev, ctlr);
1295 }
1296 
1297 static int nxp_xspi_runtime_suspend(struct device *dev)
1298 {
1299 	struct nxp_xspi *xspi = dev_get_drvdata(dev);
1300 	u32 reg;
1301 
1302 	reg = readl(xspi->iobase + XSPI_MCR);
1303 	reg |= XSPI_MCR_MDIS;
1304 	writel(reg, xspi->iobase + XSPI_MCR);
1305 
1306 	clk_disable_unprepare(xspi->clk);
1307 
1308 	return 0;
1309 }
1310 
1311 static int nxp_xspi_runtime_resume(struct device *dev)
1312 {
1313 	struct nxp_xspi *xspi = dev_get_drvdata(dev);
1314 	u32 reg;
1315 	int ret;
1316 
1317 	ret = clk_prepare_enable(xspi->clk);
1318 	if (ret)
1319 		return ret;
1320 
1321 	reg = readl(xspi->iobase + XSPI_MCR);
1322 	reg &= ~XSPI_MCR_MDIS;
1323 	writel(reg, xspi->iobase + XSPI_MCR);
1324 
1325 	return 0;
1326 }
1327 
1328 static int nxp_xspi_suspend(struct device *dev)
1329 {
1330 	int ret;
1331 
1332 	ret = pinctrl_pm_select_sleep_state(dev);
1333 	if (ret) {
1334 		dev_err(dev, "select flexspi sleep pinctrl failed!\n");
1335 		return ret;
1336 	}
1337 
1338 	return pm_runtime_force_suspend(dev);
1339 }
1340 
1341 static int nxp_xspi_resume(struct device *dev)
1342 {
1343 	struct nxp_xspi *xspi = dev_get_drvdata(dev);
1344 	int ret;
1345 
1346 	ret = pm_runtime_force_resume(dev);
1347 	if (ret)
1348 		return ret;
1349 
1350 	nxp_xspi_default_setup(xspi);
1351 
1352 	ret = pinctrl_pm_select_default_state(dev);
1353 	if (ret)
1354 		dev_err(dev, "select flexspi default pinctrl failed!\n");
1355 
1356 	return ret;
1357 }
1358 
1359 
1360 static const struct dev_pm_ops nxp_xspi_pm_ops = {
1361 	RUNTIME_PM_OPS(nxp_xspi_runtime_suspend, nxp_xspi_runtime_resume, NULL)
1362 	SYSTEM_SLEEP_PM_OPS(nxp_xspi_suspend, nxp_xspi_resume)
1363 };
1364 
1365 static const struct of_device_id nxp_xspi_dt_ids[] = {
1366 	{ .compatible = "nxp,imx94-xspi", .data = (void *)&imx94_data, },
1367 	{ /* sentinel */ }
1368 };
1369 MODULE_DEVICE_TABLE(of, nxp_xspi_dt_ids);
1370 
1371 static struct platform_driver nxp_xspi_driver = {
1372 	.driver = {
1373 		.name	= "nxp-xspi",
1374 		.of_match_table = nxp_xspi_dt_ids,
1375 		.pm =   pm_ptr(&nxp_xspi_pm_ops),
1376 	},
1377 	.probe          = nxp_xspi_probe,
1378 };
1379 module_platform_driver(nxp_xspi_driver);
1380 
1381 MODULE_DESCRIPTION("NXP xSPI Controller Driver");
1382 MODULE_AUTHOR("NXP Semiconductor");
1383 MODULE_AUTHOR("Haibo Chen <haibo.chen@nxp.com>");
1384 MODULE_LICENSE("GPL");
1385