xref: /linux/drivers/spi/spi-nxp-xspi.c (revision c17ee635fd3a482b2ad2bf5e269755c2eae5f25e)
1*29c8c00dSHaibo Chen // SPDX-License-Identifier: GPL-2.0+
2*29c8c00dSHaibo Chen 
3*29c8c00dSHaibo Chen /*
4*29c8c00dSHaibo Chen  * NXP xSPI controller driver.
5*29c8c00dSHaibo Chen  *
6*29c8c00dSHaibo Chen  * Copyright 2025 NXP
7*29c8c00dSHaibo Chen  *
8*29c8c00dSHaibo Chen  * xSPI is a flexible SPI host controller which supports single
9*29c8c00dSHaibo Chen  * external devices. This device can have up to eight bidirectional
10*29c8c00dSHaibo Chen  * data lines, this means xSPI support Single/Dual/Quad/Octal mode
11*29c8c00dSHaibo Chen  * data transfer (1/2/4/8 bidirectional data lines).
12*29c8c00dSHaibo Chen  *
13*29c8c00dSHaibo Chen  * xSPI controller is driven by the LUT(Look-up Table) registers
14*29c8c00dSHaibo Chen  * LUT registers are a look-up-table for sequences of instructions.
15*29c8c00dSHaibo Chen  * A valid sequence consists of five LUT registers.
16*29c8c00dSHaibo Chen  * Maximum 16 LUT sequences can be programmed simultaneously.
17*29c8c00dSHaibo Chen  *
18*29c8c00dSHaibo Chen  * LUTs are being created at run-time based on the commands passed
19*29c8c00dSHaibo Chen  * from the spi-mem framework, thus using single LUT index.
20*29c8c00dSHaibo Chen  *
21*29c8c00dSHaibo Chen  * Software triggered Flash read/write access by IP Bus.
22*29c8c00dSHaibo Chen  *
23*29c8c00dSHaibo Chen  * Memory mapped read access by AHB Bus.
24*29c8c00dSHaibo Chen  *
25*29c8c00dSHaibo Chen  * Based on SPI MEM interface and spi-nxp-fspi.c driver.
26*29c8c00dSHaibo Chen  *
27*29c8c00dSHaibo Chen  * Author:
28*29c8c00dSHaibo Chen  *     Haibo Chen <haibo.chen@nxp.com>
29*29c8c00dSHaibo Chen  * Co-author:
30*29c8c00dSHaibo Chen  *     Han Xu <han.xu@nxp.com>
31*29c8c00dSHaibo Chen  */
32*29c8c00dSHaibo Chen 
33*29c8c00dSHaibo Chen #include <linux/bitops.h>
34*29c8c00dSHaibo Chen #include <linux/bitfield.h>
35*29c8c00dSHaibo Chen #include <linux/clk.h>
36*29c8c00dSHaibo Chen #include <linux/completion.h>
37*29c8c00dSHaibo Chen #include <linux/delay.h>
38*29c8c00dSHaibo Chen #include <linux/err.h>
39*29c8c00dSHaibo Chen #include <linux/errno.h>
40*29c8c00dSHaibo Chen #include <linux/interrupt.h>
41*29c8c00dSHaibo Chen #include <linux/io.h>
42*29c8c00dSHaibo Chen #include <linux/iopoll.h>
43*29c8c00dSHaibo Chen #include <linux/jiffies.h>
44*29c8c00dSHaibo Chen #include <linux/kernel.h>
45*29c8c00dSHaibo Chen #include <linux/log2.h>
46*29c8c00dSHaibo Chen #include <linux/module.h>
47*29c8c00dSHaibo Chen #include <linux/mutex.h>
48*29c8c00dSHaibo Chen #include <linux/of.h>
49*29c8c00dSHaibo Chen #include <linux/platform_device.h>
50*29c8c00dSHaibo Chen #include <linux/pinctrl/consumer.h>
51*29c8c00dSHaibo Chen #include <linux/pm_runtime.h>
52*29c8c00dSHaibo Chen #include <linux/spi/spi.h>
53*29c8c00dSHaibo Chen #include <linux/spi/spi-mem.h>
54*29c8c00dSHaibo Chen 
55*29c8c00dSHaibo Chen /* Runtime pm timeout */
56*29c8c00dSHaibo Chen #define XSPI_RPM_TIMEOUT_MS 50	/* 50ms */
57*29c8c00dSHaibo Chen /*
58*29c8c00dSHaibo Chen  * The driver only uses one single LUT entry, that is updated on
59*29c8c00dSHaibo Chen  * each call of exec_op(). Index 0 is preset at boot with a basic
60*29c8c00dSHaibo Chen  * read operation, so let's use the last entry (15).
61*29c8c00dSHaibo Chen  */
62*29c8c00dSHaibo Chen #define	XSPI_SEQID_LUT			15
63*29c8c00dSHaibo Chen 
64*29c8c00dSHaibo Chen #define XSPI_MCR			0x0
65*29c8c00dSHaibo Chen #define XSPI_MCR_CKN_FA_EN		BIT(26)
66*29c8c00dSHaibo Chen #define XSPI_MCR_DQS_FA_SEL_MASK	GENMASK(25, 24)
67*29c8c00dSHaibo Chen #define XSPI_MCR_ISD3FA			BIT(17)
68*29c8c00dSHaibo Chen #define XSPI_MCR_ISD2FA			BIT(16)
69*29c8c00dSHaibo Chen #define XSPI_MCR_DOZE			BIT(15)
70*29c8c00dSHaibo Chen #define XSPI_MCR_MDIS			BIT(14)
71*29c8c00dSHaibo Chen #define XSPI_MCR_DLPEN			BIT(12)
72*29c8c00dSHaibo Chen #define XSPI_MCR_CLR_TXF		BIT(11)
73*29c8c00dSHaibo Chen #define XSPI_MCR_CLR_RXF		BIT(10)
74*29c8c00dSHaibo Chen #define XSPI_MCR_IPS_TG_RST		BIT(9)
75*29c8c00dSHaibo Chen #define XSPI_MCR_VAR_LAT_EN		BIT(8)
76*29c8c00dSHaibo Chen #define XSPI_MCR_DDR_EN			BIT(7)
77*29c8c00dSHaibo Chen #define XSPI_MCR_DQS_EN			BIT(6)
78*29c8c00dSHaibo Chen #define XSPI_MCR_DQS_LAT_EN		BIT(5)
79*29c8c00dSHaibo Chen #define XSPI_MCR_DQS_OUT_EN		BIT(4)
80*29c8c00dSHaibo Chen #define XSPI_MCR_SWRSTHD		BIT(1)
81*29c8c00dSHaibo Chen #define XSPI_MCR_SWRSTSD		BIT(0)
82*29c8c00dSHaibo Chen 
83*29c8c00dSHaibo Chen #define XSPI_IPCR			0x8
84*29c8c00dSHaibo Chen 
85*29c8c00dSHaibo Chen #define XSPI_FLSHCR			0xC
86*29c8c00dSHaibo Chen #define XSPI_FLSHCR_TDH_MASK		GENMASK(17, 16)
87*29c8c00dSHaibo Chen #define XSPI_FLSHCR_TCSH_MASK		GENMASK(11, 8)
88*29c8c00dSHaibo Chen #define XSPI_FLSHCR_TCSS_MASK		GENMASK(3, 0)
89*29c8c00dSHaibo Chen 
90*29c8c00dSHaibo Chen #define XSPI_BUF0CR			0x10
91*29c8c00dSHaibo Chen #define XSPI_BUF1CR			0x14
92*29c8c00dSHaibo Chen #define XSPI_BUF2CR			0x18
93*29c8c00dSHaibo Chen #define XSPI_BUF3CR			0x1C
94*29c8c00dSHaibo Chen #define XSPI_BUF3CR_ALLMST		BIT(31)
95*29c8c00dSHaibo Chen #define XSPI_BUF3CR_ADATSZ_MASK		GENMASK(17, 8)
96*29c8c00dSHaibo Chen #define XSPI_BUF3CR_MSTRID_MASK		GENMASK(3, 0)
97*29c8c00dSHaibo Chen 
98*29c8c00dSHaibo Chen #define XSPI_BFGENCR			0x20
99*29c8c00dSHaibo Chen #define XSPI_BFGENCR_SEQID_WR_MASK	GENMASK(31, 28)
100*29c8c00dSHaibo Chen #define XSPI_BFGENCR_ALIGN_MASK		GENMASK(24, 22)
101*29c8c00dSHaibo Chen #define XSPI_BFGENCR_PPWF_CLR		BIT(20)
102*29c8c00dSHaibo Chen #define XSPI_BFGENCR_WR_FLUSH_EN	BIT(21)
103*29c8c00dSHaibo Chen #define XSPI_BFGENCR_SEQID_WR_EN	BIT(17)
104*29c8c00dSHaibo Chen #define XSPI_BFGENCR_SEQID_MASK		GENMASK(15, 12)
105*29c8c00dSHaibo Chen 
106*29c8c00dSHaibo Chen #define XSPI_BUF0IND			0x30
107*29c8c00dSHaibo Chen #define XSPI_BUF1IND			0x34
108*29c8c00dSHaibo Chen #define XSPI_BUF2IND			0x38
109*29c8c00dSHaibo Chen 
110*29c8c00dSHaibo Chen #define XSPI_DLLCRA			0x60
111*29c8c00dSHaibo Chen #define XSPI_DLLCRA_DLLEN		BIT(31)
112*29c8c00dSHaibo Chen #define XSPI_DLLCRA_FREQEN		BIT(30)
113*29c8c00dSHaibo Chen #define XSPI_DLLCRA_DLL_REFCNTR_MASK	GENMASK(27, 24)
114*29c8c00dSHaibo Chen #define XSPI_DLLCRA_DLLRES_MASK		GENMASK(23, 20)
115*29c8c00dSHaibo Chen #define XSPI_DLLCRA_SLV_FINE_MASK	GENMASK(19, 16)
116*29c8c00dSHaibo Chen #define XSPI_DLLCRA_SLV_DLY_MASK	GENMASK(14, 12)
117*29c8c00dSHaibo Chen #define XSPI_DLLCRA_SLV_DLY_COARSE_MASK	GENMASK(11,  8)
118*29c8c00dSHaibo Chen #define XSPI_DLLCRA_SLV_DLY_FINE_MASK	GENMASK(7, 5)
119*29c8c00dSHaibo Chen #define XSPI_DLLCRA_DLL_CDL8		BIT(4)
120*29c8c00dSHaibo Chen #define XSPI_DLLCRA_SLAVE_AUTO_UPDT	BIT(3)
121*29c8c00dSHaibo Chen #define XSPI_DLLCRA_SLV_EN		BIT(2)
122*29c8c00dSHaibo Chen #define XSPI_DLLCRA_SLV_DLL_BYPASS	BIT(1)
123*29c8c00dSHaibo Chen #define XSPI_DLLCRA_SLV_UPD		BIT(0)
124*29c8c00dSHaibo Chen 
125*29c8c00dSHaibo Chen #define XSPI_SFAR			0x100
126*29c8c00dSHaibo Chen 
127*29c8c00dSHaibo Chen #define XSPI_SFACR			0x104
128*29c8c00dSHaibo Chen #define XSPI_SFACR_FORCE_A10		BIT(22)
129*29c8c00dSHaibo Chen #define XSPI_SFACR_WA_4B_EN		BIT(21)
130*29c8c00dSHaibo Chen #define XSPI_SFACR_CAS_INTRLVD		BIT(20)
131*29c8c00dSHaibo Chen #define XSPI_SFACR_RX_BP_EN		BIT(18)
132*29c8c00dSHaibo Chen #define XSPI_SFACR_BYTE_SWAP		BIT(17)
133*29c8c00dSHaibo Chen #define XSPI_SFACR_WA			BIT(16)
134*29c8c00dSHaibo Chen #define XSPI_SFACR_CAS_MASK		GENMASK(3, 0)
135*29c8c00dSHaibo Chen 
136*29c8c00dSHaibo Chen #define XSPI_SMPR			0x108
137*29c8c00dSHaibo Chen #define XSPI_SMPR_DLLFSMPFA_MASK	GENMASK(26, 24)
138*29c8c00dSHaibo Chen #define XSPI_SMPR_FSDLY			BIT(6)
139*29c8c00dSHaibo Chen #define XSPI_SMPR_FSPHS			BIT(5)
140*29c8c00dSHaibo Chen 
141*29c8c00dSHaibo Chen #define XSPI_RBSR			0x10C
142*29c8c00dSHaibo Chen 
143*29c8c00dSHaibo Chen #define XSPI_RBCT			0x110
144*29c8c00dSHaibo Chen #define XSPI_RBCT_WMRK_MASK		GENMASK(6, 0)
145*29c8c00dSHaibo Chen 
146*29c8c00dSHaibo Chen #define XSPI_DLLSR			0x12C
147*29c8c00dSHaibo Chen #define XSPI_DLLSR_DLLA_LOCK		BIT(15)
148*29c8c00dSHaibo Chen #define XSPI_DLLSR_SLVA_LOCK		BIT(14)
149*29c8c00dSHaibo Chen #define XSPI_DLLSR_DLLA_RANGE_ERR	BIT(13)
150*29c8c00dSHaibo Chen #define XSPI_DLLSR_DLLA_FINE_UNDERFLOW	BIT(12)
151*29c8c00dSHaibo Chen 
152*29c8c00dSHaibo Chen #define XSPI_TBSR			0x150
153*29c8c00dSHaibo Chen 
154*29c8c00dSHaibo Chen #define XSPI_TBDR			0x154
155*29c8c00dSHaibo Chen 
156*29c8c00dSHaibo Chen #define XSPI_TBCT			0x158
157*29c8c00dSHaibo Chen #define XSPI_TBCT_WMRK_MASK		GENMASK(7, 0)
158*29c8c00dSHaibo Chen 
159*29c8c00dSHaibo Chen #define XSPI_SR				0x15C
160*29c8c00dSHaibo Chen #define XSPI_SR_TXFULL			BIT(27)
161*29c8c00dSHaibo Chen #define XSPI_SR_TXDMA			BIT(26)
162*29c8c00dSHaibo Chen #define XSPI_SR_TXWA			BIT(25)
163*29c8c00dSHaibo Chen #define XSPI_SR_TXNE			BIT(24)
164*29c8c00dSHaibo Chen #define XSPI_SR_RXDMA			BIT(23)
165*29c8c00dSHaibo Chen #define XSPI_SR_ARB_STATE_MASK		GENMASK(23, 20)
166*29c8c00dSHaibo Chen #define XSPI_SR_RXFULL			BIT(19)
167*29c8c00dSHaibo Chen #define XSPI_SR_RXWE			BIT(16)
168*29c8c00dSHaibo Chen #define XSPI_SR_ARB_LCK			BIT(15)
169*29c8c00dSHaibo Chen #define XSPI_SR_AHBnFUL			BIT(11)
170*29c8c00dSHaibo Chen #define XSPI_SR_AHBnNE			BIT(7)
171*29c8c00dSHaibo Chen #define XSPI_SR_AHBTRN			BIT(6)
172*29c8c00dSHaibo Chen #define XSPI_SR_AWRACC			BIT(4)
173*29c8c00dSHaibo Chen #define XSPI_SR_AHB_ACC			BIT(2)
174*29c8c00dSHaibo Chen #define XSPI_SR_IP_ACC			BIT(1)
175*29c8c00dSHaibo Chen #define XSPI_SR_BUSY			BIT(0)
176*29c8c00dSHaibo Chen 
177*29c8c00dSHaibo Chen #define XSPI_FR				0x160
178*29c8c00dSHaibo Chen #define XSPI_FR_DLPFF			BIT(31)
179*29c8c00dSHaibo Chen #define XSPI_FR_DLLABRT			BIT(28)
180*29c8c00dSHaibo Chen #define XSPI_FR_TBFF			BIT(27)
181*29c8c00dSHaibo Chen #define XSPI_FR_TBUF			BIT(26)
182*29c8c00dSHaibo Chen #define XSPI_FR_DLLUNLCK		BIT(24)
183*29c8c00dSHaibo Chen #define XSPI_FR_ILLINE			BIT(23)
184*29c8c00dSHaibo Chen #define XSPI_FR_RBOF			BIT(17)
185*29c8c00dSHaibo Chen #define XSPI_FR_RBDF			BIT(16)
186*29c8c00dSHaibo Chen #define XSPI_FR_AAEF			BIT(15)
187*29c8c00dSHaibo Chen #define XSPI_FR_AITEF			BIT(14)
188*29c8c00dSHaibo Chen #define XSPI_FR_AIBSEF			BIT(13)
189*29c8c00dSHaibo Chen #define XSPI_FR_ABOF			BIT(12)
190*29c8c00dSHaibo Chen #define XSPI_FR_CRCAEF			BIT(10)
191*29c8c00dSHaibo Chen #define XSPI_FR_PPWF			BIT(8)
192*29c8c00dSHaibo Chen #define XSPI_FR_IPIEF			BIT(6)
193*29c8c00dSHaibo Chen #define XSPI_FR_IPEDERR			BIT(5)
194*29c8c00dSHaibo Chen #define XSPI_FR_PERFOVF			BIT(2)
195*29c8c00dSHaibo Chen #define XSPI_FR_RDADDR			BIT(1)
196*29c8c00dSHaibo Chen #define XSPI_FR_TFF			BIT(0)
197*29c8c00dSHaibo Chen 
198*29c8c00dSHaibo Chen #define XSPI_RSER			0x164
199*29c8c00dSHaibo Chen #define XSPI_RSER_TFIE			BIT(0)
200*29c8c00dSHaibo Chen 
201*29c8c00dSHaibo Chen #define XSPI_SFA1AD			0x180
202*29c8c00dSHaibo Chen 
203*29c8c00dSHaibo Chen #define XSPI_SFA2AD			0x184
204*29c8c00dSHaibo Chen 
205*29c8c00dSHaibo Chen #define XSPI_RBDR0			0x200
206*29c8c00dSHaibo Chen 
207*29c8c00dSHaibo Chen #define XSPI_LUTKEY			0x300
208*29c8c00dSHaibo Chen #define XSPI_LUT_KEY_VAL		(0x5AF05AF0UL)
209*29c8c00dSHaibo Chen 
210*29c8c00dSHaibo Chen #define XSPI_LCKCR			0x304
211*29c8c00dSHaibo Chen #define XSPI_LOKCR_LOCK			BIT(0)
212*29c8c00dSHaibo Chen #define XSPI_LOKCR_UNLOCK		BIT(1)
213*29c8c00dSHaibo Chen 
214*29c8c00dSHaibo Chen #define XSPI_LUT			0x310
215*29c8c00dSHaibo Chen #define XSPI_LUT_OFFSET			(XSPI_SEQID_LUT * 5 * 4)
216*29c8c00dSHaibo Chen #define XSPI_LUT_REG(idx) \
217*29c8c00dSHaibo Chen 	(XSPI_LUT + XSPI_LUT_OFFSET + (idx) * 4)
218*29c8c00dSHaibo Chen 
219*29c8c00dSHaibo Chen #define XSPI_MCREXT			0x4FC
220*29c8c00dSHaibo Chen #define XSPI_MCREXT_RST_MASK		GENMASK(3, 0)
221*29c8c00dSHaibo Chen 
222*29c8c00dSHaibo Chen 
223*29c8c00dSHaibo Chen #define XSPI_FRAD0_WORD2		0x808
224*29c8c00dSHaibo Chen #define XSPI_FRAD0_WORD2_MD0ACP_MASK	GENMASK(2, 0)
225*29c8c00dSHaibo Chen 
226*29c8c00dSHaibo Chen #define XSPI_FRAD0_WORD3		0x80C
227*29c8c00dSHaibo Chen #define XSPI_FRAD0_WORD3_VLD		BIT(31)
228*29c8c00dSHaibo Chen 
229*29c8c00dSHaibo Chen #define XSPI_TG0MDAD			0x900
230*29c8c00dSHaibo Chen #define XSPI_TG0MDAD_VLD		BIT(31)
231*29c8c00dSHaibo Chen 
232*29c8c00dSHaibo Chen #define XSPI_TG1MDAD			0x910
233*29c8c00dSHaibo Chen 
234*29c8c00dSHaibo Chen #define XSPI_MGC			0x920
235*29c8c00dSHaibo Chen #define XSPI_MGC_GVLD			BIT(31)
236*29c8c00dSHaibo Chen #define XSPI_MGC_GVLDMDAD		BIT(29)
237*29c8c00dSHaibo Chen #define XSPI_MGC_GVLDFRAD		BIT(27)
238*29c8c00dSHaibo Chen 
239*29c8c00dSHaibo Chen #define XSPI_MTO			0x928
240*29c8c00dSHaibo Chen 
241*29c8c00dSHaibo Chen #define XSPI_ERRSTAT			0x938
242*29c8c00dSHaibo Chen #define XSPI_INT_EN			0x93C
243*29c8c00dSHaibo Chen 
244*29c8c00dSHaibo Chen #define XSPI_SFP_TG_IPCR		0x958
245*29c8c00dSHaibo Chen #define XSPI_SFP_TG_IPCR_SEQID_MASK	GENMASK(27, 24)
246*29c8c00dSHaibo Chen #define XSPI_SFP_TG_IPCR_ARB_UNLOCK	BIT(23)
247*29c8c00dSHaibo Chen #define XSPI_SFP_TG_IPCR_ARB_LOCK	BIT(22)
248*29c8c00dSHaibo Chen #define XSPI_SFP_TG_IPCR_IDATSZ_MASK	GENMASK(15, 0)
249*29c8c00dSHaibo Chen 
250*29c8c00dSHaibo Chen #define XSPI_SFP_TG_SFAR 0x95C
251*29c8c00dSHaibo Chen 
252*29c8c00dSHaibo Chen /* Register map end */
253*29c8c00dSHaibo Chen 
254*29c8c00dSHaibo Chen /********* XSPI CMD definitions ***************************/
255*29c8c00dSHaibo Chen #define LUT_STOP	0x00
256*29c8c00dSHaibo Chen #define LUT_CMD_SDR	0x01
257*29c8c00dSHaibo Chen #define LUT_ADDR_SDR	0x02
258*29c8c00dSHaibo Chen #define LUT_DUMMY	0x03
259*29c8c00dSHaibo Chen #define LUT_MODE8_SDR	0x04
260*29c8c00dSHaibo Chen #define LUT_MODE2_SDR	0x05
261*29c8c00dSHaibo Chen #define LUT_MODE4_SDR	0x06
262*29c8c00dSHaibo Chen #define LUT_READ_SDR	0x07
263*29c8c00dSHaibo Chen #define LUT_WRITE_SDR	0x08
264*29c8c00dSHaibo Chen #define LUT_JMP_ON_CS	0x09
265*29c8c00dSHaibo Chen #define LUT_ADDR_DDR	0x0A
266*29c8c00dSHaibo Chen #define LUT_MODE8_DDR	0x0B
267*29c8c00dSHaibo Chen #define LUT_MODE2_DDR	0x0C
268*29c8c00dSHaibo Chen #define LUT_MODE4_DDR	0x0D
269*29c8c00dSHaibo Chen #define LUT_READ_DDR	0x0E
270*29c8c00dSHaibo Chen #define LUT_WRITE_DDR	0x0F
271*29c8c00dSHaibo Chen #define LUT_DATA_LEARN	0x10
272*29c8c00dSHaibo Chen #define LUT_CMD_DDR	0x11
273*29c8c00dSHaibo Chen #define LUT_CADDR_SDR	0x12
274*29c8c00dSHaibo Chen #define LUT_CADDR_DDR	0x13
275*29c8c00dSHaibo Chen #define JMP_TO_SEQ	0x14
276*29c8c00dSHaibo Chen 
277*29c8c00dSHaibo Chen #define XSPI_64BIT_LE	0x3
278*29c8c00dSHaibo Chen /*
279*29c8c00dSHaibo Chen  * Calculate number of required PAD bits for LUT register.
280*29c8c00dSHaibo Chen  *
281*29c8c00dSHaibo Chen  * The pad stands for the number of IO lines [0:7].
282*29c8c00dSHaibo Chen  * For example, the octal read needs eight IO lines,
283*29c8c00dSHaibo Chen  * so you should use LUT_PAD(8). This macro
284*29c8c00dSHaibo Chen  * returns 3 i.e. use eight (2^3) IP lines for read.
285*29c8c00dSHaibo Chen  */
286*29c8c00dSHaibo Chen #define LUT_PAD(x) (fls(x) - 1)
287*29c8c00dSHaibo Chen 
288*29c8c00dSHaibo Chen /*
289*29c8c00dSHaibo Chen  * Macro for constructing the LUT entries with the following
290*29c8c00dSHaibo Chen  * register layout:
291*29c8c00dSHaibo Chen  *
292*29c8c00dSHaibo Chen  *  ---------------------------------------------------
293*29c8c00dSHaibo Chen  *  | INSTR1 | PAD1 | OPRND1 | INSTR0 | PAD0 | OPRND0 |
294*29c8c00dSHaibo Chen  *  ---------------------------------------------------
295*29c8c00dSHaibo Chen  */
296*29c8c00dSHaibo Chen #define PAD_SHIFT		8
297*29c8c00dSHaibo Chen #define INSTR_SHIFT		10
298*29c8c00dSHaibo Chen #define OPRND_SHIFT		16
299*29c8c00dSHaibo Chen 
300*29c8c00dSHaibo Chen /* Macros for constructing the LUT register. */
301*29c8c00dSHaibo Chen #define LUT_DEF(idx, ins, pad, opr)			  \
302*29c8c00dSHaibo Chen 	((((ins) << INSTR_SHIFT) | ((pad) << PAD_SHIFT) | \
303*29c8c00dSHaibo Chen 	(opr)) << (((idx) % 2) * OPRND_SHIFT))
304*29c8c00dSHaibo Chen 
305*29c8c00dSHaibo Chen #define NXP_XSPI_MIN_IOMAP	SZ_4M
306*29c8c00dSHaibo Chen #define NXP_XSPI_MAX_CHIPSELECT		2
307*29c8c00dSHaibo Chen #define POLL_TOUT_US		5000
308*29c8c00dSHaibo Chen 
309*29c8c00dSHaibo Chen /* Access flash memory using IP bus only */
310*29c8c00dSHaibo Chen #define XSPI_QUIRK_USE_IP_ONLY	BIT(0)
311*29c8c00dSHaibo Chen 
312*29c8c00dSHaibo Chen struct nxp_xspi_devtype_data {
313*29c8c00dSHaibo Chen 	unsigned int rxfifo;
314*29c8c00dSHaibo Chen 	unsigned int txfifo;
315*29c8c00dSHaibo Chen 	unsigned int ahb_buf_size;
316*29c8c00dSHaibo Chen 	unsigned int quirks;
317*29c8c00dSHaibo Chen };
318*29c8c00dSHaibo Chen 
319*29c8c00dSHaibo Chen static struct nxp_xspi_devtype_data imx94_data = {
320*29c8c00dSHaibo Chen 	.rxfifo = SZ_512,       /* (128 * 4 bytes)  */
321*29c8c00dSHaibo Chen 	.txfifo = SZ_1K,        /* (256 * 4 bytes)  */
322*29c8c00dSHaibo Chen 	.ahb_buf_size = SZ_4K,  /* (1024 * 4 bytes)  */
323*29c8c00dSHaibo Chen };
324*29c8c00dSHaibo Chen 
325*29c8c00dSHaibo Chen struct nxp_xspi {
326*29c8c00dSHaibo Chen 	void __iomem *iobase;
327*29c8c00dSHaibo Chen 	void __iomem *ahb_addr;
328*29c8c00dSHaibo Chen 	u32 memmap_phy;
329*29c8c00dSHaibo Chen 	u32 memmap_phy_size;
330*29c8c00dSHaibo Chen 	u32 memmap_start;
331*29c8c00dSHaibo Chen 	u32 memmap_len;
332*29c8c00dSHaibo Chen 	struct clk *clk;
333*29c8c00dSHaibo Chen 	struct device *dev;
334*29c8c00dSHaibo Chen 	struct completion c;
335*29c8c00dSHaibo Chen 	const struct nxp_xspi_devtype_data *devtype_data;
336*29c8c00dSHaibo Chen 	/* mutex lock for each operation */
337*29c8c00dSHaibo Chen 	struct mutex lock;
338*29c8c00dSHaibo Chen 	int selected;
339*29c8c00dSHaibo Chen #define XSPI_DTR_PROTO		BIT(0)
340*29c8c00dSHaibo Chen 	int flags;
341*29c8c00dSHaibo Chen 	/* Save the previous operation clock rate */
342*29c8c00dSHaibo Chen 	unsigned long pre_op_rate;
343*29c8c00dSHaibo Chen 	/* The max clock rate xspi supported output to device */
344*29c8c00dSHaibo Chen 	unsigned long support_max_rate;
345*29c8c00dSHaibo Chen };
346*29c8c00dSHaibo Chen 
347*29c8c00dSHaibo Chen static inline int needs_ip_only(struct nxp_xspi *xspi)
348*29c8c00dSHaibo Chen {
349*29c8c00dSHaibo Chen 	return xspi->devtype_data->quirks & XSPI_QUIRK_USE_IP_ONLY;
350*29c8c00dSHaibo Chen }
351*29c8c00dSHaibo Chen 
352*29c8c00dSHaibo Chen static irqreturn_t nxp_xspi_irq_handler(int irq, void *dev_id)
353*29c8c00dSHaibo Chen {
354*29c8c00dSHaibo Chen 	struct nxp_xspi *xspi = dev_id;
355*29c8c00dSHaibo Chen 	u32 reg;
356*29c8c00dSHaibo Chen 
357*29c8c00dSHaibo Chen 	reg = readl(xspi->iobase + XSPI_FR);
358*29c8c00dSHaibo Chen 	if (reg & XSPI_FR_TFF) {
359*29c8c00dSHaibo Chen 		/* Clear interrupt */
360*29c8c00dSHaibo Chen 		writel(XSPI_FR_TFF, xspi->iobase + XSPI_FR);
361*29c8c00dSHaibo Chen 		complete(&xspi->c);
362*29c8c00dSHaibo Chen 		return IRQ_HANDLED;
363*29c8c00dSHaibo Chen 	}
364*29c8c00dSHaibo Chen 
365*29c8c00dSHaibo Chen 	return IRQ_NONE;
366*29c8c00dSHaibo Chen }
367*29c8c00dSHaibo Chen 
368*29c8c00dSHaibo Chen static int nxp_xspi_check_buswidth(struct nxp_xspi *xspi, u8 width)
369*29c8c00dSHaibo Chen {
370*29c8c00dSHaibo Chen 	return (is_power_of_2(width) && width <= 8) ? 0 : -EOPNOTSUPP;
371*29c8c00dSHaibo Chen }
372*29c8c00dSHaibo Chen 
373*29c8c00dSHaibo Chen static bool nxp_xspi_supports_op(struct spi_mem *mem,
374*29c8c00dSHaibo Chen 				 const struct spi_mem_op *op)
375*29c8c00dSHaibo Chen {
376*29c8c00dSHaibo Chen 	struct nxp_xspi *xspi = spi_controller_get_devdata(mem->spi->controller);
377*29c8c00dSHaibo Chen 	int ret;
378*29c8c00dSHaibo Chen 
379*29c8c00dSHaibo Chen 	ret = nxp_xspi_check_buswidth(xspi, op->cmd.buswidth);
380*29c8c00dSHaibo Chen 
381*29c8c00dSHaibo Chen 	if (op->addr.nbytes)
382*29c8c00dSHaibo Chen 		ret |= nxp_xspi_check_buswidth(xspi, op->addr.buswidth);
383*29c8c00dSHaibo Chen 
384*29c8c00dSHaibo Chen 	if (op->dummy.nbytes)
385*29c8c00dSHaibo Chen 		ret |= nxp_xspi_check_buswidth(xspi, op->dummy.buswidth);
386*29c8c00dSHaibo Chen 
387*29c8c00dSHaibo Chen 	if (op->data.nbytes)
388*29c8c00dSHaibo Chen 		ret |= nxp_xspi_check_buswidth(xspi, op->data.buswidth);
389*29c8c00dSHaibo Chen 
390*29c8c00dSHaibo Chen 	if (ret)
391*29c8c00dSHaibo Chen 		return false;
392*29c8c00dSHaibo Chen 
393*29c8c00dSHaibo Chen 	/*
394*29c8c00dSHaibo Chen 	 * The number of address bytes should be equal to or less than 4 bytes.
395*29c8c00dSHaibo Chen 	 */
396*29c8c00dSHaibo Chen 	if (op->addr.nbytes > 4)
397*29c8c00dSHaibo Chen 		return false;
398*29c8c00dSHaibo Chen 
399*29c8c00dSHaibo Chen 	/* Max 32 dummy clock cycles supported */
400*29c8c00dSHaibo Chen 	if (op->dummy.buswidth &&
401*29c8c00dSHaibo Chen 	    (op->dummy.nbytes * 8 / op->dummy.buswidth > 64))
402*29c8c00dSHaibo Chen 		return false;
403*29c8c00dSHaibo Chen 
404*29c8c00dSHaibo Chen 	if (needs_ip_only(xspi) && op->data.dir == SPI_MEM_DATA_IN &&
405*29c8c00dSHaibo Chen 	    op->data.nbytes > xspi->devtype_data->rxfifo)
406*29c8c00dSHaibo Chen 		return false;
407*29c8c00dSHaibo Chen 
408*29c8c00dSHaibo Chen 	if (op->data.dir == SPI_MEM_DATA_OUT &&
409*29c8c00dSHaibo Chen 			op->data.nbytes > xspi->devtype_data->txfifo)
410*29c8c00dSHaibo Chen 		return false;
411*29c8c00dSHaibo Chen 
412*29c8c00dSHaibo Chen 	return spi_mem_default_supports_op(mem, op);
413*29c8c00dSHaibo Chen }
414*29c8c00dSHaibo Chen 
415*29c8c00dSHaibo Chen static void nxp_xspi_prepare_lut(struct nxp_xspi *xspi,
416*29c8c00dSHaibo Chen 				 const struct spi_mem_op *op)
417*29c8c00dSHaibo Chen {
418*29c8c00dSHaibo Chen 	void __iomem *base = xspi->iobase;
419*29c8c00dSHaibo Chen 	u32 lutval[5] = {};
420*29c8c00dSHaibo Chen 	int lutidx = 1, i;
421*29c8c00dSHaibo Chen 
422*29c8c00dSHaibo Chen 	/* cmd */
423*29c8c00dSHaibo Chen 	if (op->cmd.dtr) {
424*29c8c00dSHaibo Chen 		lutval[0] |= LUT_DEF(0, LUT_CMD_DDR, LUT_PAD(op->cmd.buswidth),
425*29c8c00dSHaibo Chen 				     op->cmd.opcode >> 8);
426*29c8c00dSHaibo Chen 		lutval[lutidx / 2] |= LUT_DEF(lutidx, LUT_CMD_DDR,
427*29c8c00dSHaibo Chen 					      LUT_PAD(op->cmd.buswidth),
428*29c8c00dSHaibo Chen 					      op->cmd.opcode & 0x00ff);
429*29c8c00dSHaibo Chen 		lutidx++;
430*29c8c00dSHaibo Chen 	} else {
431*29c8c00dSHaibo Chen 		lutval[0] |= LUT_DEF(0, LUT_CMD_SDR, LUT_PAD(op->cmd.buswidth),
432*29c8c00dSHaibo Chen 				     op->cmd.opcode);
433*29c8c00dSHaibo Chen 	}
434*29c8c00dSHaibo Chen 
435*29c8c00dSHaibo Chen 	/* Addr bytes */
436*29c8c00dSHaibo Chen 	if (op->addr.nbytes) {
437*29c8c00dSHaibo Chen 		lutval[lutidx / 2] |= LUT_DEF(lutidx, op->addr.dtr ?
438*29c8c00dSHaibo Chen 					      LUT_ADDR_DDR : LUT_ADDR_SDR,
439*29c8c00dSHaibo Chen 					      LUT_PAD(op->addr.buswidth),
440*29c8c00dSHaibo Chen 					      op->addr.nbytes * 8);
441*29c8c00dSHaibo Chen 		lutidx++;
442*29c8c00dSHaibo Chen 	}
443*29c8c00dSHaibo Chen 
444*29c8c00dSHaibo Chen 	/* Dummy bytes, if needed */
445*29c8c00dSHaibo Chen 	if (op->dummy.nbytes) {
446*29c8c00dSHaibo Chen 		lutval[lutidx / 2] |= LUT_DEF(lutidx, LUT_DUMMY,
447*29c8c00dSHaibo Chen 					      LUT_PAD(op->data.buswidth),
448*29c8c00dSHaibo Chen 					      op->dummy.nbytes * 8 /
449*29c8c00dSHaibo Chen 						/* need distinguish ddr mode */
450*29c8c00dSHaibo Chen 					      op->dummy.buswidth / (op->dummy.dtr ? 2 : 1));
451*29c8c00dSHaibo Chen 		lutidx++;
452*29c8c00dSHaibo Chen 	}
453*29c8c00dSHaibo Chen 
454*29c8c00dSHaibo Chen 	/* Read/Write data bytes */
455*29c8c00dSHaibo Chen 	if (op->data.nbytes) {
456*29c8c00dSHaibo Chen 		lutval[lutidx / 2] |= LUT_DEF(lutidx,
457*29c8c00dSHaibo Chen 					      op->data.dir == SPI_MEM_DATA_IN ?
458*29c8c00dSHaibo Chen 					      (op->data.dtr ? LUT_READ_DDR : LUT_READ_SDR) :
459*29c8c00dSHaibo Chen 					      (op->data.dtr ? LUT_WRITE_DDR : LUT_WRITE_SDR),
460*29c8c00dSHaibo Chen 					      LUT_PAD(op->data.buswidth),
461*29c8c00dSHaibo Chen 					      0);
462*29c8c00dSHaibo Chen 		lutidx++;
463*29c8c00dSHaibo Chen 	}
464*29c8c00dSHaibo Chen 
465*29c8c00dSHaibo Chen 	/* Stop condition. */
466*29c8c00dSHaibo Chen 	lutval[lutidx / 2] |= LUT_DEF(lutidx, LUT_STOP, 0, 0);
467*29c8c00dSHaibo Chen 
468*29c8c00dSHaibo Chen 	/* Unlock LUT */
469*29c8c00dSHaibo Chen 	writel(XSPI_LUT_KEY_VAL, xspi->iobase + XSPI_LUTKEY);
470*29c8c00dSHaibo Chen 	writel(XSPI_LOKCR_UNLOCK, xspi->iobase + XSPI_LCKCR);
471*29c8c00dSHaibo Chen 
472*29c8c00dSHaibo Chen 	/* Fill LUT */
473*29c8c00dSHaibo Chen 	for (i = 0; i < ARRAY_SIZE(lutval); i++)
474*29c8c00dSHaibo Chen 		writel(lutval[i], base + XSPI_LUT_REG(i));
475*29c8c00dSHaibo Chen 
476*29c8c00dSHaibo Chen 	dev_dbg(xspi->dev, "CMD[%02x] lutval[0:%08x 1:%08x 2:%08x 3:%08x 4:%08x], size: 0x%08x\n",
477*29c8c00dSHaibo Chen 		op->cmd.opcode, lutval[0], lutval[1], lutval[2], lutval[3], lutval[4],
478*29c8c00dSHaibo Chen 		op->data.nbytes);
479*29c8c00dSHaibo Chen 
480*29c8c00dSHaibo Chen 	/* Lock LUT */
481*29c8c00dSHaibo Chen 	writel(XSPI_LUT_KEY_VAL, xspi->iobase + XSPI_LUTKEY);
482*29c8c00dSHaibo Chen 	writel(XSPI_LOKCR_LOCK, xspi->iobase + XSPI_LCKCR);
483*29c8c00dSHaibo Chen }
484*29c8c00dSHaibo Chen 
485*29c8c00dSHaibo Chen static void nxp_xspi_disable_ddr(struct nxp_xspi *xspi)
486*29c8c00dSHaibo Chen {
487*29c8c00dSHaibo Chen 	void __iomem *base = xspi->iobase;
488*29c8c00dSHaibo Chen 	u32 reg;
489*29c8c00dSHaibo Chen 
490*29c8c00dSHaibo Chen 	/* Disable module */
491*29c8c00dSHaibo Chen 	reg = readl(base + XSPI_MCR);
492*29c8c00dSHaibo Chen 	reg |= XSPI_MCR_MDIS;
493*29c8c00dSHaibo Chen 	writel(reg, base + XSPI_MCR);
494*29c8c00dSHaibo Chen 
495*29c8c00dSHaibo Chen 	reg &= ~XSPI_MCR_DDR_EN;
496*29c8c00dSHaibo Chen 	reg &= ~XSPI_MCR_DQS_FA_SEL_MASK;
497*29c8c00dSHaibo Chen 	/* Use dummy pad loopback mode to sample data */
498*29c8c00dSHaibo Chen 	reg |= FIELD_PREP(XSPI_MCR_DQS_FA_SEL_MASK, 0x01);
499*29c8c00dSHaibo Chen 	writel(reg, base + XSPI_MCR);
500*29c8c00dSHaibo Chen 	xspi->support_max_rate = 133000000;
501*29c8c00dSHaibo Chen 
502*29c8c00dSHaibo Chen 	reg = readl(base + XSPI_FLSHCR);
503*29c8c00dSHaibo Chen 	reg &= ~XSPI_FLSHCR_TDH_MASK;
504*29c8c00dSHaibo Chen 	writel(reg, base + XSPI_FLSHCR);
505*29c8c00dSHaibo Chen 
506*29c8c00dSHaibo Chen 	/* Select sampling at inverted clock */
507*29c8c00dSHaibo Chen 	reg = FIELD_PREP(XSPI_SMPR_DLLFSMPFA_MASK, 0) | XSPI_SMPR_FSPHS;
508*29c8c00dSHaibo Chen 	writel(reg, base + XSPI_SMPR);
509*29c8c00dSHaibo Chen 
510*29c8c00dSHaibo Chen 	/* Enable module */
511*29c8c00dSHaibo Chen 	reg = readl(base + XSPI_MCR);
512*29c8c00dSHaibo Chen 	reg &= ~XSPI_MCR_MDIS;
513*29c8c00dSHaibo Chen 	writel(reg, base + XSPI_MCR);
514*29c8c00dSHaibo Chen }
515*29c8c00dSHaibo Chen 
516*29c8c00dSHaibo Chen static void nxp_xspi_enable_ddr(struct nxp_xspi *xspi)
517*29c8c00dSHaibo Chen {
518*29c8c00dSHaibo Chen 	void __iomem *base = xspi->iobase;
519*29c8c00dSHaibo Chen 	u32 reg;
520*29c8c00dSHaibo Chen 
521*29c8c00dSHaibo Chen 	/* Disable module */
522*29c8c00dSHaibo Chen 	reg = readl(base + XSPI_MCR);
523*29c8c00dSHaibo Chen 	reg |= XSPI_MCR_MDIS;
524*29c8c00dSHaibo Chen 	writel(reg, base + XSPI_MCR);
525*29c8c00dSHaibo Chen 
526*29c8c00dSHaibo Chen 	reg |= XSPI_MCR_DDR_EN;
527*29c8c00dSHaibo Chen 	reg &= ~XSPI_MCR_DQS_FA_SEL_MASK;
528*29c8c00dSHaibo Chen 	/* Use external dqs to sample data */
529*29c8c00dSHaibo Chen 	reg |= FIELD_PREP(XSPI_MCR_DQS_FA_SEL_MASK, 0x03);
530*29c8c00dSHaibo Chen 	writel(reg, base + XSPI_MCR);
531*29c8c00dSHaibo Chen 	xspi->support_max_rate = 200000000;
532*29c8c00dSHaibo Chen 
533*29c8c00dSHaibo Chen 	reg = readl(base + XSPI_FLSHCR);
534*29c8c00dSHaibo Chen 	reg &= ~XSPI_FLSHCR_TDH_MASK;
535*29c8c00dSHaibo Chen 	reg |= FIELD_PREP(XSPI_FLSHCR_TDH_MASK, 0x01);
536*29c8c00dSHaibo Chen 	writel(reg, base + XSPI_FLSHCR);
537*29c8c00dSHaibo Chen 
538*29c8c00dSHaibo Chen 	reg = FIELD_PREP(XSPI_SMPR_DLLFSMPFA_MASK, 0x04);
539*29c8c00dSHaibo Chen 	writel(reg, base + XSPI_SMPR);
540*29c8c00dSHaibo Chen 
541*29c8c00dSHaibo Chen 	/* Enable module */
542*29c8c00dSHaibo Chen 	reg = readl(base + XSPI_MCR);
543*29c8c00dSHaibo Chen 	reg &= ~XSPI_MCR_MDIS;
544*29c8c00dSHaibo Chen 	writel(reg, base + XSPI_MCR);
545*29c8c00dSHaibo Chen }
546*29c8c00dSHaibo Chen 
547*29c8c00dSHaibo Chen static void nxp_xspi_sw_reset(struct nxp_xspi *xspi)
548*29c8c00dSHaibo Chen {
549*29c8c00dSHaibo Chen 	void __iomem *base = xspi->iobase;
550*29c8c00dSHaibo Chen 	bool mdis_flag = false;
551*29c8c00dSHaibo Chen 	u32 reg;
552*29c8c00dSHaibo Chen 	int ret;
553*29c8c00dSHaibo Chen 
554*29c8c00dSHaibo Chen 	reg = readl(base + XSPI_MCR);
555*29c8c00dSHaibo Chen 
556*29c8c00dSHaibo Chen 	/*
557*29c8c00dSHaibo Chen 	 * Per RM, when reset SWRSTSD and SWRSTHD, XSPI must be
558*29c8c00dSHaibo Chen 	 * enabled (MDIS = 0).
559*29c8c00dSHaibo Chen 	 * So if MDIS is 1, should clear it before assert SWRSTSD
560*29c8c00dSHaibo Chen 	 * and SWRSTHD.
561*29c8c00dSHaibo Chen 	 */
562*29c8c00dSHaibo Chen 	if (reg & XSPI_MCR_MDIS) {
563*29c8c00dSHaibo Chen 		reg &= ~XSPI_MCR_MDIS;
564*29c8c00dSHaibo Chen 		writel(reg, base + XSPI_MCR);
565*29c8c00dSHaibo Chen 		mdis_flag = true;
566*29c8c00dSHaibo Chen 	}
567*29c8c00dSHaibo Chen 
568*29c8c00dSHaibo Chen 	/* Software reset for AHB domain and Serial flash memory domain */
569*29c8c00dSHaibo Chen 	reg |= XSPI_MCR_SWRSTHD | XSPI_MCR_SWRSTSD;
570*29c8c00dSHaibo Chen 	/* Software Reset for IPS Target Group Queue 0 */
571*29c8c00dSHaibo Chen 	reg |= XSPI_MCR_IPS_TG_RST;
572*29c8c00dSHaibo Chen 	writel(reg, base + XSPI_MCR);
573*29c8c00dSHaibo Chen 
574*29c8c00dSHaibo Chen 	/* IPS_TG_RST will self-clear to 0 once IPS_TG_RST complete */
575*29c8c00dSHaibo Chen 	ret = readl_poll_timeout(base + XSPI_MCR, reg, !(reg & XSPI_MCR_IPS_TG_RST),
576*29c8c00dSHaibo Chen 			      100, 5000);
577*29c8c00dSHaibo Chen 	if (ret == -ETIMEDOUT)
578*29c8c00dSHaibo Chen 		dev_warn(xspi->dev, "XSPI_MCR_IPS_TG_RST do not self-clear in 5ms!");
579*29c8c00dSHaibo Chen 
580*29c8c00dSHaibo Chen 	/*
581*29c8c00dSHaibo Chen 	 * Per RM, must wait for at least three system cycles and
582*29c8c00dSHaibo Chen 	 * three flash cycles after changing the value of reset field.
583*29c8c00dSHaibo Chen 	 * delay 5us for safe.
584*29c8c00dSHaibo Chen 	 */
585*29c8c00dSHaibo Chen 	fsleep(5);
586*29c8c00dSHaibo Chen 
587*29c8c00dSHaibo Chen 	/*
588*29c8c00dSHaibo Chen 	 * Per RM, before dessert SWRSTSD and SWRSTHD, XSPI must be
589*29c8c00dSHaibo Chen 	 * disabled (MIDS = 1).
590*29c8c00dSHaibo Chen 	 */
591*29c8c00dSHaibo Chen 	reg = readl(base + XSPI_MCR);
592*29c8c00dSHaibo Chen 	reg |= XSPI_MCR_MDIS;
593*29c8c00dSHaibo Chen 	writel(reg, base + XSPI_MCR);
594*29c8c00dSHaibo Chen 
595*29c8c00dSHaibo Chen 	/* deassert software reset */
596*29c8c00dSHaibo Chen 	reg &= ~(XSPI_MCR_SWRSTHD | XSPI_MCR_SWRSTSD);
597*29c8c00dSHaibo Chen 	writel(reg, base + XSPI_MCR);
598*29c8c00dSHaibo Chen 
599*29c8c00dSHaibo Chen 	/*
600*29c8c00dSHaibo Chen 	 * Per RM, must wait for at least three system cycles and
601*29c8c00dSHaibo Chen 	 * three flash cycles after changing the value of reset field.
602*29c8c00dSHaibo Chen 	 * delay 5us for safe.
603*29c8c00dSHaibo Chen 	 */
604*29c8c00dSHaibo Chen 	fsleep(5);
605*29c8c00dSHaibo Chen 
606*29c8c00dSHaibo Chen 	/* Re-enable XSPI if it is enabled at beginning */
607*29c8c00dSHaibo Chen 	if (!mdis_flag) {
608*29c8c00dSHaibo Chen 		reg &= ~XSPI_MCR_MDIS;
609*29c8c00dSHaibo Chen 		writel(reg, base + XSPI_MCR);
610*29c8c00dSHaibo Chen 	}
611*29c8c00dSHaibo Chen }
612*29c8c00dSHaibo Chen 
613*29c8c00dSHaibo Chen static void nxp_xspi_dll_bypass(struct nxp_xspi *xspi)
614*29c8c00dSHaibo Chen {
615*29c8c00dSHaibo Chen 	void __iomem *base = xspi->iobase;
616*29c8c00dSHaibo Chen 	int ret;
617*29c8c00dSHaibo Chen 	u32 reg;
618*29c8c00dSHaibo Chen 
619*29c8c00dSHaibo Chen 	nxp_xspi_sw_reset(xspi);
620*29c8c00dSHaibo Chen 
621*29c8c00dSHaibo Chen 	writel(0, base + XSPI_DLLCRA);
622*29c8c00dSHaibo Chen 
623*29c8c00dSHaibo Chen 	/* Set SLV EN first */
624*29c8c00dSHaibo Chen 	reg = XSPI_DLLCRA_SLV_EN;
625*29c8c00dSHaibo Chen 	writel(reg, base + XSPI_DLLCRA);
626*29c8c00dSHaibo Chen 
627*29c8c00dSHaibo Chen 	reg = XSPI_DLLCRA_FREQEN |
628*29c8c00dSHaibo Chen 	      FIELD_PREP(XSPI_DLLCRA_SLV_DLY_COARSE_MASK, 0x0) |
629*29c8c00dSHaibo Chen 	      XSPI_DLLCRA_SLV_EN | XSPI_DLLCRA_SLV_DLL_BYPASS;
630*29c8c00dSHaibo Chen 	writel(reg, base + XSPI_DLLCRA);
631*29c8c00dSHaibo Chen 
632*29c8c00dSHaibo Chen 	reg |= XSPI_DLLCRA_SLV_UPD;
633*29c8c00dSHaibo Chen 	writel(reg, base + XSPI_DLLCRA);
634*29c8c00dSHaibo Chen 
635*29c8c00dSHaibo Chen 	ret = readl_poll_timeout(base + XSPI_DLLSR, reg,
636*29c8c00dSHaibo Chen 			      reg & XSPI_DLLSR_SLVA_LOCK, 0, POLL_TOUT_US);
637*29c8c00dSHaibo Chen 	if (ret)
638*29c8c00dSHaibo Chen 		dev_err(xspi->dev,
639*29c8c00dSHaibo Chen 			"DLL SLVA unlock, the DLL status is %x, need to check!\n",
640*29c8c00dSHaibo Chen 			readl(base + XSPI_DLLSR));
641*29c8c00dSHaibo Chen }
642*29c8c00dSHaibo Chen 
643*29c8c00dSHaibo Chen static void nxp_xspi_dll_auto(struct nxp_xspi *xspi, unsigned long rate)
644*29c8c00dSHaibo Chen {
645*29c8c00dSHaibo Chen 	void __iomem *base = xspi->iobase;
646*29c8c00dSHaibo Chen 	int ret;
647*29c8c00dSHaibo Chen 	u32 reg;
648*29c8c00dSHaibo Chen 
649*29c8c00dSHaibo Chen 	nxp_xspi_sw_reset(xspi);
650*29c8c00dSHaibo Chen 
651*29c8c00dSHaibo Chen 	writel(0, base + XSPI_DLLCRA);
652*29c8c00dSHaibo Chen 
653*29c8c00dSHaibo Chen 	/* Set SLV EN first */
654*29c8c00dSHaibo Chen 	reg = XSPI_DLLCRA_SLV_EN;
655*29c8c00dSHaibo Chen 	writel(reg, base + XSPI_DLLCRA);
656*29c8c00dSHaibo Chen 
657*29c8c00dSHaibo Chen 	reg = FIELD_PREP(XSPI_DLLCRA_DLL_REFCNTR_MASK, 0x02) |
658*29c8c00dSHaibo Chen 	      FIELD_PREP(XSPI_DLLCRA_DLLRES_MASK, 0x08) |
659*29c8c00dSHaibo Chen 	      XSPI_DLLCRA_SLAVE_AUTO_UPDT | XSPI_DLLCRA_SLV_EN;
660*29c8c00dSHaibo Chen 	if (rate > 133000000)
661*29c8c00dSHaibo Chen 		reg |= XSPI_DLLCRA_FREQEN;
662*29c8c00dSHaibo Chen 
663*29c8c00dSHaibo Chen 	writel(reg, base + XSPI_DLLCRA);
664*29c8c00dSHaibo Chen 
665*29c8c00dSHaibo Chen 	reg |= XSPI_DLLCRA_SLV_UPD;
666*29c8c00dSHaibo Chen 	writel(reg, base + XSPI_DLLCRA);
667*29c8c00dSHaibo Chen 
668*29c8c00dSHaibo Chen 	reg |= XSPI_DLLCRA_DLLEN;
669*29c8c00dSHaibo Chen 	writel(reg, base + XSPI_DLLCRA);
670*29c8c00dSHaibo Chen 
671*29c8c00dSHaibo Chen 	ret = readl_poll_timeout(base + XSPI_DLLSR, reg,
672*29c8c00dSHaibo Chen 			      reg & XSPI_DLLSR_DLLA_LOCK, 0, POLL_TOUT_US);
673*29c8c00dSHaibo Chen 	if (ret)
674*29c8c00dSHaibo Chen 		dev_err(xspi->dev,
675*29c8c00dSHaibo Chen 			"DLL unlock, the DLL status is %x, need to check!\n",
676*29c8c00dSHaibo Chen 			readl(base + XSPI_DLLSR));
677*29c8c00dSHaibo Chen 
678*29c8c00dSHaibo Chen 	ret = readl_poll_timeout(base + XSPI_DLLSR, reg,
679*29c8c00dSHaibo Chen 			      reg & XSPI_DLLSR_SLVA_LOCK, 0, POLL_TOUT_US);
680*29c8c00dSHaibo Chen 	if (ret)
681*29c8c00dSHaibo Chen 		dev_err(xspi->dev,
682*29c8c00dSHaibo Chen 			"DLL SLVA unlock, the DLL status is %x, need to check!\n",
683*29c8c00dSHaibo Chen 			readl(base + XSPI_DLLSR));
684*29c8c00dSHaibo Chen }
685*29c8c00dSHaibo Chen 
686*29c8c00dSHaibo Chen static void nxp_xspi_select_mem(struct nxp_xspi *xspi, struct spi_device *spi,
687*29c8c00dSHaibo Chen 				const struct spi_mem_op *op)
688*29c8c00dSHaibo Chen {
689*29c8c00dSHaibo Chen 	/* xspi only support one DTR mode: 8D-8D-8D */
690*29c8c00dSHaibo Chen 	bool op_is_dtr = op->cmd.dtr && op->addr.dtr && op->dummy.dtr && op->data.dtr;
691*29c8c00dSHaibo Chen 	unsigned long root_clk_rate, rate;
692*29c8c00dSHaibo Chen 	uint64_t cs0_top_address;
693*29c8c00dSHaibo Chen 	uint64_t cs1_top_address;
694*29c8c00dSHaibo Chen 	u32 reg;
695*29c8c00dSHaibo Chen 	int ret;
696*29c8c00dSHaibo Chen 
697*29c8c00dSHaibo Chen 	/*
698*29c8c00dSHaibo Chen 	 * Return when following condition all meet,
699*29c8c00dSHaibo Chen 	 * 1, if previously selected target device is same as current
700*29c8c00dSHaibo Chen 	 *    requested target device.
701*29c8c00dSHaibo Chen 	 * 2, the DTR or STR mode do not change.
702*29c8c00dSHaibo Chen 	 * 3, previous operation max rate equals current one.
703*29c8c00dSHaibo Chen 	 *
704*29c8c00dSHaibo Chen 	 * For other case, need to re-config.
705*29c8c00dSHaibo Chen 	 */
706*29c8c00dSHaibo Chen 	if (xspi->selected == spi_get_chipselect(spi, 0) &&
707*29c8c00dSHaibo Chen 	    (!!(xspi->flags & XSPI_DTR_PROTO) == op_is_dtr) &&
708*29c8c00dSHaibo Chen 	    (xspi->pre_op_rate == op->max_freq))
709*29c8c00dSHaibo Chen 		return;
710*29c8c00dSHaibo Chen 
711*29c8c00dSHaibo Chen 	if (op_is_dtr) {
712*29c8c00dSHaibo Chen 		nxp_xspi_enable_ddr(xspi);
713*29c8c00dSHaibo Chen 		xspi->flags |= XSPI_DTR_PROTO;
714*29c8c00dSHaibo Chen 	} else {
715*29c8c00dSHaibo Chen 		nxp_xspi_disable_ddr(xspi);
716*29c8c00dSHaibo Chen 		xspi->flags &= ~XSPI_DTR_PROTO;
717*29c8c00dSHaibo Chen 	}
718*29c8c00dSHaibo Chen 	rate = min_t(unsigned long, xspi->support_max_rate, op->max_freq);
719*29c8c00dSHaibo Chen 	/*
720*29c8c00dSHaibo Chen 	 * There is two dividers between xspi_clk_root(from SoC CCM) and xspi_sfif.
721*29c8c00dSHaibo Chen 	 * xspi_clk_root ---->divider1 ----> ipg_clk_2xsfif
722*29c8c00dSHaibo Chen 	 *                              |
723*29c8c00dSHaibo Chen 	 *                              |
724*29c8c00dSHaibo Chen 	 *                              |---> divider2 ---> ipg_clk_sfif
725*29c8c00dSHaibo Chen 	 * divider1 is controlled by SOCCR, SOCCR default value is 0.
726*29c8c00dSHaibo Chen 	 * divider2 fix to divide 2.
727*29c8c00dSHaibo Chen 	 * when SOCCR = 0:
728*29c8c00dSHaibo Chen 	 *        ipg_clk_2xsfif = xspi_clk_root
729*29c8c00dSHaibo Chen 	 *        ipg_clk_sfif = ipg_clk_2xsfif / 2 = xspi_clk_root / 2
730*29c8c00dSHaibo Chen 	 * ipg_clk_2xsfif is used for DTR mode.
731*29c8c00dSHaibo Chen 	 * xspi_sck(output to device) is defined based on xspi_sfif clock.
732*29c8c00dSHaibo Chen 	 */
733*29c8c00dSHaibo Chen 	root_clk_rate = rate * 2;
734*29c8c00dSHaibo Chen 
735*29c8c00dSHaibo Chen 	clk_disable_unprepare(xspi->clk);
736*29c8c00dSHaibo Chen 
737*29c8c00dSHaibo Chen 	ret = clk_set_rate(xspi->clk, root_clk_rate);
738*29c8c00dSHaibo Chen 	if (ret)
739*29c8c00dSHaibo Chen 		return;
740*29c8c00dSHaibo Chen 
741*29c8c00dSHaibo Chen 	ret = clk_prepare_enable(xspi->clk);
742*29c8c00dSHaibo Chen 	if (ret)
743*29c8c00dSHaibo Chen 		return;
744*29c8c00dSHaibo Chen 
745*29c8c00dSHaibo Chen 	xspi->pre_op_rate = op->max_freq;
746*29c8c00dSHaibo Chen 	xspi->selected = spi_get_chipselect(spi, 0);
747*29c8c00dSHaibo Chen 
748*29c8c00dSHaibo Chen 	if (xspi->selected) {		/* CS1 select */
749*29c8c00dSHaibo Chen 		cs0_top_address = xspi->memmap_phy;
750*29c8c00dSHaibo Chen 		cs1_top_address = SZ_4G - 1;
751*29c8c00dSHaibo Chen 	} else {			/* CS0 select */
752*29c8c00dSHaibo Chen 		cs0_top_address = SZ_4G - 1;
753*29c8c00dSHaibo Chen 		cs1_top_address = SZ_4G - 1;
754*29c8c00dSHaibo Chen 	}
755*29c8c00dSHaibo Chen 	writel(cs0_top_address, xspi->iobase + XSPI_SFA1AD);
756*29c8c00dSHaibo Chen 	writel(cs1_top_address, xspi->iobase + XSPI_SFA2AD);
757*29c8c00dSHaibo Chen 
758*29c8c00dSHaibo Chen 	reg = readl(xspi->iobase + XSPI_SFACR);
759*29c8c00dSHaibo Chen 	if (op->data.swap16)
760*29c8c00dSHaibo Chen 		reg |= XSPI_SFACR_BYTE_SWAP;
761*29c8c00dSHaibo Chen 	else
762*29c8c00dSHaibo Chen 		reg &= ~XSPI_SFACR_BYTE_SWAP;
763*29c8c00dSHaibo Chen 	writel(reg, xspi->iobase + XSPI_SFACR);
764*29c8c00dSHaibo Chen 
765*29c8c00dSHaibo Chen 	if (!op_is_dtr || rate < 60000000)
766*29c8c00dSHaibo Chen 		nxp_xspi_dll_bypass(xspi);
767*29c8c00dSHaibo Chen 	else
768*29c8c00dSHaibo Chen 		nxp_xspi_dll_auto(xspi, rate);
769*29c8c00dSHaibo Chen }
770*29c8c00dSHaibo Chen 
771*29c8c00dSHaibo Chen static int nxp_xspi_ahb_read(struct nxp_xspi *xspi, const struct spi_mem_op *op)
772*29c8c00dSHaibo Chen {
773*29c8c00dSHaibo Chen 	u32 start = op->addr.val;
774*29c8c00dSHaibo Chen 	u32 len = op->data.nbytes;
775*29c8c00dSHaibo Chen 
776*29c8c00dSHaibo Chen 	/* If necessary, ioremap before AHB read */
777*29c8c00dSHaibo Chen 	if ((!xspi->ahb_addr) || start < xspi->memmap_start ||
778*29c8c00dSHaibo Chen 	     start + len > xspi->memmap_start + xspi->memmap_len) {
779*29c8c00dSHaibo Chen 		if (xspi->ahb_addr)
780*29c8c00dSHaibo Chen 			iounmap(xspi->ahb_addr);
781*29c8c00dSHaibo Chen 
782*29c8c00dSHaibo Chen 		xspi->memmap_start = start;
783*29c8c00dSHaibo Chen 		xspi->memmap_len = len > NXP_XSPI_MIN_IOMAP ?
784*29c8c00dSHaibo Chen 				len : NXP_XSPI_MIN_IOMAP;
785*29c8c00dSHaibo Chen 
786*29c8c00dSHaibo Chen 		xspi->ahb_addr = ioremap(xspi->memmap_phy + xspi->memmap_start,
787*29c8c00dSHaibo Chen 					 xspi->memmap_len);
788*29c8c00dSHaibo Chen 
789*29c8c00dSHaibo Chen 		if (!xspi->ahb_addr) {
790*29c8c00dSHaibo Chen 			dev_err(xspi->dev, "failed to alloc memory\n");
791*29c8c00dSHaibo Chen 			return -ENOMEM;
792*29c8c00dSHaibo Chen 		}
793*29c8c00dSHaibo Chen 	}
794*29c8c00dSHaibo Chen 
795*29c8c00dSHaibo Chen 	/* Read out the data directly from the AHB buffer. */
796*29c8c00dSHaibo Chen 	memcpy_fromio(op->data.buf.in,
797*29c8c00dSHaibo Chen 			xspi->ahb_addr + start - xspi->memmap_start, len);
798*29c8c00dSHaibo Chen 
799*29c8c00dSHaibo Chen 	return 0;
800*29c8c00dSHaibo Chen }
801*29c8c00dSHaibo Chen 
802*29c8c00dSHaibo Chen static int nxp_xspi_fill_txfifo(struct nxp_xspi *xspi,
803*29c8c00dSHaibo Chen 				 const struct spi_mem_op *op)
804*29c8c00dSHaibo Chen {
805*29c8c00dSHaibo Chen 	void __iomem *base = xspi->iobase;
806*29c8c00dSHaibo Chen 	u8 *buf = (u8 *)op->data.buf.out;
807*29c8c00dSHaibo Chen 	u32 reg, left;
808*29c8c00dSHaibo Chen 	int i;
809*29c8c00dSHaibo Chen 
810*29c8c00dSHaibo Chen 	for (i = 0; i < ALIGN(op->data.nbytes, 4); i += 4) {
811*29c8c00dSHaibo Chen 		reg = readl(base + XSPI_FR);
812*29c8c00dSHaibo Chen 		reg |= XSPI_FR_TBFF;
813*29c8c00dSHaibo Chen 		writel(reg, base + XSPI_FR);
814*29c8c00dSHaibo Chen 		/* Read again to check whether the tx fifo has rom */
815*29c8c00dSHaibo Chen 		reg = readl(base + XSPI_FR);
816*29c8c00dSHaibo Chen 		if (!(reg & XSPI_FR_TBFF)) {
817*29c8c00dSHaibo Chen 			WARN_ON(1);
818*29c8c00dSHaibo Chen 			return -EIO;
819*29c8c00dSHaibo Chen 		}
820*29c8c00dSHaibo Chen 
821*29c8c00dSHaibo Chen 		if (i == ALIGN_DOWN(op->data.nbytes, 4)) {
822*29c8c00dSHaibo Chen 			/* Use 0xFF for extra bytes */
823*29c8c00dSHaibo Chen 			left = 0xFFFFFFFF;
824*29c8c00dSHaibo Chen 			/* The last 1 to 3 bytes */
825*29c8c00dSHaibo Chen 			memcpy((u8 *)&left, buf + i, op->data.nbytes - i);
826*29c8c00dSHaibo Chen 			writel(left, base + XSPI_TBDR);
827*29c8c00dSHaibo Chen 		} else {
828*29c8c00dSHaibo Chen 			writel(*(u32 *)(buf + i), base + XSPI_TBDR);
829*29c8c00dSHaibo Chen 		}
830*29c8c00dSHaibo Chen 	}
831*29c8c00dSHaibo Chen 
832*29c8c00dSHaibo Chen 	return 0;
833*29c8c00dSHaibo Chen }
834*29c8c00dSHaibo Chen 
835*29c8c00dSHaibo Chen static int nxp_xspi_read_rxfifo(struct nxp_xspi *xspi,
836*29c8c00dSHaibo Chen 				const struct spi_mem_op *op)
837*29c8c00dSHaibo Chen {
838*29c8c00dSHaibo Chen 	u32 watermark, watermark_bytes, reg;
839*29c8c00dSHaibo Chen 	void __iomem *base = xspi->iobase;
840*29c8c00dSHaibo Chen 	u8 *buf = (u8 *) op->data.buf.in;
841*29c8c00dSHaibo Chen 	int i, ret, len;
842*29c8c00dSHaibo Chen 
843*29c8c00dSHaibo Chen 	/*
844*29c8c00dSHaibo Chen 	 * Config the rx watermark half of the 64 memory-mapped RX data buffer RBDRn
845*29c8c00dSHaibo Chen 	 * refer to the RBCT config in nxp_xspi_do_op()
846*29c8c00dSHaibo Chen 	 */
847*29c8c00dSHaibo Chen 	watermark = 32;
848*29c8c00dSHaibo Chen 	watermark_bytes = watermark * 4;
849*29c8c00dSHaibo Chen 
850*29c8c00dSHaibo Chen 	len = op->data.nbytes;
851*29c8c00dSHaibo Chen 
852*29c8c00dSHaibo Chen 	while (len >= watermark_bytes) {
853*29c8c00dSHaibo Chen 		/* Make sure the RX FIFO contains valid data before read */
854*29c8c00dSHaibo Chen 		ret = readl_poll_timeout(base + XSPI_FR, reg,
855*29c8c00dSHaibo Chen 				      reg & XSPI_FR_RBDF, 0, POLL_TOUT_US);
856*29c8c00dSHaibo Chen 		if (ret) {
857*29c8c00dSHaibo Chen 			WARN_ON(1);
858*29c8c00dSHaibo Chen 			return ret;
859*29c8c00dSHaibo Chen 		}
860*29c8c00dSHaibo Chen 
861*29c8c00dSHaibo Chen 		for (i = 0; i < watermark; i++)
862*29c8c00dSHaibo Chen 			*(u32 *)(buf + i * 4) = readl(base + XSPI_RBDR0 + i * 4);
863*29c8c00dSHaibo Chen 
864*29c8c00dSHaibo Chen 		len = len - watermark_bytes;
865*29c8c00dSHaibo Chen 		buf = buf + watermark_bytes;
866*29c8c00dSHaibo Chen 		/* Pop up data to RXFIFO for next read. */
867*29c8c00dSHaibo Chen 		reg = readl(base + XSPI_FR);
868*29c8c00dSHaibo Chen 		reg |= XSPI_FR_RBDF;
869*29c8c00dSHaibo Chen 		writel(reg, base + XSPI_FR);
870*29c8c00dSHaibo Chen 	}
871*29c8c00dSHaibo Chen 
872*29c8c00dSHaibo Chen 	/* Wait for the total data transfer finished */
873*29c8c00dSHaibo Chen 	ret = readl_poll_timeout(base + XSPI_SR, reg, !(reg & XSPI_SR_BUSY), 0, POLL_TOUT_US);
874*29c8c00dSHaibo Chen 	if (ret) {
875*29c8c00dSHaibo Chen 		WARN_ON(1);
876*29c8c00dSHaibo Chen 		return ret;
877*29c8c00dSHaibo Chen 	}
878*29c8c00dSHaibo Chen 
879*29c8c00dSHaibo Chen 	i = 0;
880*29c8c00dSHaibo Chen 	while (len >= 4) {
881*29c8c00dSHaibo Chen 		*(u32 *)(buf) = readl(base + XSPI_RBDR0 + i);
882*29c8c00dSHaibo Chen 		i += 4;
883*29c8c00dSHaibo Chen 		len -= 4;
884*29c8c00dSHaibo Chen 		buf += 4;
885*29c8c00dSHaibo Chen 	}
886*29c8c00dSHaibo Chen 
887*29c8c00dSHaibo Chen 	if (len > 0) {
888*29c8c00dSHaibo Chen 		reg = readl(base + XSPI_RBDR0 + i);
889*29c8c00dSHaibo Chen 		memcpy(buf, (u8 *)&reg, len);
890*29c8c00dSHaibo Chen 	}
891*29c8c00dSHaibo Chen 
892*29c8c00dSHaibo Chen 	/* Invalid RXFIFO first */
893*29c8c00dSHaibo Chen 	reg = readl(base + XSPI_MCR);
894*29c8c00dSHaibo Chen 	reg |= XSPI_MCR_CLR_RXF;
895*29c8c00dSHaibo Chen 	writel(reg, base + XSPI_MCR);
896*29c8c00dSHaibo Chen 	/* Wait for the CLR_RXF clear */
897*29c8c00dSHaibo Chen 	ret = readl_poll_timeout(base + XSPI_MCR, reg,
898*29c8c00dSHaibo Chen 			      !(reg & XSPI_MCR_CLR_RXF), 1, POLL_TOUT_US);
899*29c8c00dSHaibo Chen 	WARN_ON(ret);
900*29c8c00dSHaibo Chen 
901*29c8c00dSHaibo Chen 	return ret;
902*29c8c00dSHaibo Chen }
903*29c8c00dSHaibo Chen 
904*29c8c00dSHaibo Chen static int nxp_xspi_do_op(struct nxp_xspi *xspi, const struct spi_mem_op *op)
905*29c8c00dSHaibo Chen {
906*29c8c00dSHaibo Chen 	void __iomem *base = xspi->iobase;
907*29c8c00dSHaibo Chen 	int watermark, err = 0;
908*29c8c00dSHaibo Chen 	u32 reg, len;
909*29c8c00dSHaibo Chen 
910*29c8c00dSHaibo Chen 	len = op->data.nbytes;
911*29c8c00dSHaibo Chen 	if (op->data.nbytes && op->data.dir == SPI_MEM_DATA_OUT) {
912*29c8c00dSHaibo Chen 		/* Clear the TX FIFO. */
913*29c8c00dSHaibo Chen 		reg = readl(base + XSPI_MCR);
914*29c8c00dSHaibo Chen 		reg |= XSPI_MCR_CLR_TXF;
915*29c8c00dSHaibo Chen 		writel(reg, base + XSPI_MCR);
916*29c8c00dSHaibo Chen 		/* Wait for the CLR_TXF clear */
917*29c8c00dSHaibo Chen 		err = readl_poll_timeout(base + XSPI_MCR, reg,
918*29c8c00dSHaibo Chen 				      !(reg & XSPI_MCR_CLR_TXF), 1, POLL_TOUT_US);
919*29c8c00dSHaibo Chen 		if (err) {
920*29c8c00dSHaibo Chen 			WARN_ON(1);
921*29c8c00dSHaibo Chen 			return err;
922*29c8c00dSHaibo Chen 		}
923*29c8c00dSHaibo Chen 
924*29c8c00dSHaibo Chen 		/* Cover the no 4bytes alignment data length */
925*29c8c00dSHaibo Chen 		watermark = (xspi->devtype_data->txfifo - ALIGN(op->data.nbytes, 4)) / 4 + 1;
926*29c8c00dSHaibo Chen 		reg = FIELD_PREP(XSPI_TBCT_WMRK_MASK, watermark);
927*29c8c00dSHaibo Chen 		writel(reg, base + XSPI_TBCT);
928*29c8c00dSHaibo Chen 		/*
929*29c8c00dSHaibo Chen 		 * According to the RM, for TBDR register, a write transaction on the
930*29c8c00dSHaibo Chen 		 * flash memory with data size of less than 32 bits leads to the removal
931*29c8c00dSHaibo Chen 		 * of one data entry from the TX buffer. The valid bits are used and the
932*29c8c00dSHaibo Chen 		 * rest of the bits are discarded.
933*29c8c00dSHaibo Chen 		 * But for data size large than 32 bits, according to test, for no 4bytes
934*29c8c00dSHaibo Chen 		 * alignment data, the last 1~3 bytes will lost, because TX buffer use
935*29c8c00dSHaibo Chen 		 * 4 bytes entries.
936*29c8c00dSHaibo Chen 		 * So here adjust the transfer data length to make it 4bytes alignment.
937*29c8c00dSHaibo Chen 		 * then will meet the upper watermark setting, trigger the 4bytes entries
938*29c8c00dSHaibo Chen 		 * pop out.
939*29c8c00dSHaibo Chen 		 * Will use extra 0xff to append, refer to nxp_xspi_fill_txfifo().
940*29c8c00dSHaibo Chen 		 */
941*29c8c00dSHaibo Chen 		if (len > 4)
942*29c8c00dSHaibo Chen 			len = ALIGN(op->data.nbytes, 4);
943*29c8c00dSHaibo Chen 
944*29c8c00dSHaibo Chen 	} else if (op->data.nbytes && op->data.dir == SPI_MEM_DATA_IN) {
945*29c8c00dSHaibo Chen 		/* Invalid RXFIFO first */
946*29c8c00dSHaibo Chen 		reg = readl(base + XSPI_MCR);
947*29c8c00dSHaibo Chen 		reg |= XSPI_MCR_CLR_RXF;
948*29c8c00dSHaibo Chen 		writel(reg, base + XSPI_MCR);
949*29c8c00dSHaibo Chen 		/* Wait for the CLR_RXF clear */
950*29c8c00dSHaibo Chen 		err = readl_poll_timeout(base + XSPI_MCR, reg,
951*29c8c00dSHaibo Chen 				      !(reg & XSPI_MCR_CLR_RXF), 1, POLL_TOUT_US);
952*29c8c00dSHaibo Chen 		if (err) {
953*29c8c00dSHaibo Chen 			WARN_ON(1);
954*29c8c00dSHaibo Chen 			return err;
955*29c8c00dSHaibo Chen 		}
956*29c8c00dSHaibo Chen 
957*29c8c00dSHaibo Chen 		reg = FIELD_PREP(XSPI_RBCT_WMRK_MASK, 31);
958*29c8c00dSHaibo Chen 		writel(reg, base + XSPI_RBCT);
959*29c8c00dSHaibo Chen 	}
960*29c8c00dSHaibo Chen 
961*29c8c00dSHaibo Chen 	init_completion(&xspi->c);
962*29c8c00dSHaibo Chen 
963*29c8c00dSHaibo Chen 	/* Config the data address */
964*29c8c00dSHaibo Chen 	writel(op->addr.val + xspi->memmap_phy, base + XSPI_SFP_TG_SFAR);
965*29c8c00dSHaibo Chen 
966*29c8c00dSHaibo Chen 	/* Config the data size and lut id, trigger the transfer */
967*29c8c00dSHaibo Chen 	reg = FIELD_PREP(XSPI_SFP_TG_IPCR_SEQID_MASK, XSPI_SEQID_LUT) |
968*29c8c00dSHaibo Chen 	      FIELD_PREP(XSPI_SFP_TG_IPCR_IDATSZ_MASK, len);
969*29c8c00dSHaibo Chen 	writel(reg, base + XSPI_SFP_TG_IPCR);
970*29c8c00dSHaibo Chen 
971*29c8c00dSHaibo Chen 	if (op->data.nbytes && op->data.dir == SPI_MEM_DATA_OUT) {
972*29c8c00dSHaibo Chen 		err = nxp_xspi_fill_txfifo(xspi, op);
973*29c8c00dSHaibo Chen 		if (err)
974*29c8c00dSHaibo Chen 			return err;
975*29c8c00dSHaibo Chen 	}
976*29c8c00dSHaibo Chen 
977*29c8c00dSHaibo Chen 	/* Wait for the interrupt. */
978*29c8c00dSHaibo Chen 	if (!wait_for_completion_timeout(&xspi->c, msecs_to_jiffies(1000)))
979*29c8c00dSHaibo Chen 		err = -ETIMEDOUT;
980*29c8c00dSHaibo Chen 
981*29c8c00dSHaibo Chen 	/* Invoke IP data read. */
982*29c8c00dSHaibo Chen 	if (!err && op->data.nbytes && op->data.dir == SPI_MEM_DATA_IN)
983*29c8c00dSHaibo Chen 		err = nxp_xspi_read_rxfifo(xspi, op);
984*29c8c00dSHaibo Chen 
985*29c8c00dSHaibo Chen 	return err;
986*29c8c00dSHaibo Chen }
987*29c8c00dSHaibo Chen 
988*29c8c00dSHaibo Chen static int nxp_xspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
989*29c8c00dSHaibo Chen {
990*29c8c00dSHaibo Chen 	struct nxp_xspi *xspi = spi_controller_get_devdata(mem->spi->controller);
991*29c8c00dSHaibo Chen 	void __iomem *base = xspi->iobase;
992*29c8c00dSHaibo Chen 	u32 reg;
993*29c8c00dSHaibo Chen 	int err;
994*29c8c00dSHaibo Chen 
995*29c8c00dSHaibo Chen 	guard(mutex)(&xspi->lock);
996*29c8c00dSHaibo Chen 
997*29c8c00dSHaibo Chen 	PM_RUNTIME_ACQUIRE_AUTOSUSPEND(xspi->dev, pm);
998*29c8c00dSHaibo Chen 	err = PM_RUNTIME_ACQUIRE_ERR(&pm);
999*29c8c00dSHaibo Chen 	if (err)
1000*29c8c00dSHaibo Chen 		return err;
1001*29c8c00dSHaibo Chen 
1002*29c8c00dSHaibo Chen 	/* Wait for controller being ready. */
1003*29c8c00dSHaibo Chen 	err = readl_poll_timeout(base + XSPI_SR, reg,
1004*29c8c00dSHaibo Chen 			      !(reg & XSPI_SR_BUSY), 1, POLL_TOUT_US);
1005*29c8c00dSHaibo Chen 	if (err) {
1006*29c8c00dSHaibo Chen 		dev_err(xspi->dev, "SR keeps in BUSY!");
1007*29c8c00dSHaibo Chen 		return err;
1008*29c8c00dSHaibo Chen 	}
1009*29c8c00dSHaibo Chen 
1010*29c8c00dSHaibo Chen 	nxp_xspi_select_mem(xspi, mem->spi, op);
1011*29c8c00dSHaibo Chen 
1012*29c8c00dSHaibo Chen 	nxp_xspi_prepare_lut(xspi, op);
1013*29c8c00dSHaibo Chen 
1014*29c8c00dSHaibo Chen 	/*
1015*29c8c00dSHaibo Chen 	 * For read:
1016*29c8c00dSHaibo Chen 	 *     the address in AHB mapped range will use AHB read.
1017*29c8c00dSHaibo Chen 	 *     the address out of AHB mapped range will use IP read.
1018*29c8c00dSHaibo Chen 	 * For write:
1019*29c8c00dSHaibo Chen 	 *     all use IP write.
1020*29c8c00dSHaibo Chen 	 */
1021*29c8c00dSHaibo Chen 	if ((op->data.dir == SPI_MEM_DATA_IN) && !needs_ip_only(xspi)
1022*29c8c00dSHaibo Chen 		&& ((op->addr.val + op->data.nbytes) <= xspi->memmap_phy_size))
1023*29c8c00dSHaibo Chen 		err = nxp_xspi_ahb_read(xspi, op);
1024*29c8c00dSHaibo Chen 	else
1025*29c8c00dSHaibo Chen 		err = nxp_xspi_do_op(xspi, op);
1026*29c8c00dSHaibo Chen 
1027*29c8c00dSHaibo Chen 	nxp_xspi_sw_reset(xspi);
1028*29c8c00dSHaibo Chen 
1029*29c8c00dSHaibo Chen 	return err;
1030*29c8c00dSHaibo Chen }
1031*29c8c00dSHaibo Chen 
1032*29c8c00dSHaibo Chen static int nxp_xspi_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
1033*29c8c00dSHaibo Chen {
1034*29c8c00dSHaibo Chen 	struct nxp_xspi *xspi = spi_controller_get_devdata(mem->spi->controller);
1035*29c8c00dSHaibo Chen 
1036*29c8c00dSHaibo Chen 	if (op->data.dir == SPI_MEM_DATA_OUT) {
1037*29c8c00dSHaibo Chen 		if (op->data.nbytes > xspi->devtype_data->txfifo)
1038*29c8c00dSHaibo Chen 			op->data.nbytes = xspi->devtype_data->txfifo;
1039*29c8c00dSHaibo Chen 	} else {
1040*29c8c00dSHaibo Chen 		/* Limit data bytes to RX FIFO in case of IP read only */
1041*29c8c00dSHaibo Chen 		if (needs_ip_only(xspi) && (op->data.nbytes > xspi->devtype_data->rxfifo))
1042*29c8c00dSHaibo Chen 			op->data.nbytes = xspi->devtype_data->rxfifo;
1043*29c8c00dSHaibo Chen 
1044*29c8c00dSHaibo Chen 		/* Address in AHB mapped range prefer to use AHB read. */
1045*29c8c00dSHaibo Chen 		if (!needs_ip_only(xspi) && (op->addr.val < xspi->memmap_phy_size)
1046*29c8c00dSHaibo Chen 			&& ((op->addr.val + op->data.nbytes) > xspi->memmap_phy_size))
1047*29c8c00dSHaibo Chen 			op->data.nbytes = xspi->memmap_phy_size - op->addr.val;
1048*29c8c00dSHaibo Chen 	}
1049*29c8c00dSHaibo Chen 
1050*29c8c00dSHaibo Chen 	return 0;
1051*29c8c00dSHaibo Chen }
1052*29c8c00dSHaibo Chen 
1053*29c8c00dSHaibo Chen static void nxp_xspi_config_ahb_buffer(struct nxp_xspi *xspi)
1054*29c8c00dSHaibo Chen {
1055*29c8c00dSHaibo Chen 	void __iomem *base = xspi->iobase;
1056*29c8c00dSHaibo Chen 	u32 ahb_data_trans_size;
1057*29c8c00dSHaibo Chen 	u32 reg;
1058*29c8c00dSHaibo Chen 
1059*29c8c00dSHaibo Chen 	writel(0xA, base + XSPI_BUF0CR);
1060*29c8c00dSHaibo Chen 	writel(0x2, base + XSPI_BUF1CR);
1061*29c8c00dSHaibo Chen 	writel(0xD, base + XSPI_BUF2CR);
1062*29c8c00dSHaibo Chen 
1063*29c8c00dSHaibo Chen 	/* Configure buffer3 for All Master Access */
1064*29c8c00dSHaibo Chen 	reg = FIELD_PREP(XSPI_BUF3CR_MSTRID_MASK, 0x06) |
1065*29c8c00dSHaibo Chen 	      XSPI_BUF3CR_ALLMST;
1066*29c8c00dSHaibo Chen 
1067*29c8c00dSHaibo Chen 	ahb_data_trans_size = xspi->devtype_data->ahb_buf_size / 8;
1068*29c8c00dSHaibo Chen 	reg |= FIELD_PREP(XSPI_BUF3CR_ADATSZ_MASK, ahb_data_trans_size);
1069*29c8c00dSHaibo Chen 	writel(reg, base + XSPI_BUF3CR);
1070*29c8c00dSHaibo Chen 
1071*29c8c00dSHaibo Chen 	/* Only the buffer3 is used */
1072*29c8c00dSHaibo Chen 	writel(0, base + XSPI_BUF0IND);
1073*29c8c00dSHaibo Chen 	writel(0, base + XSPI_BUF1IND);
1074*29c8c00dSHaibo Chen 	writel(0, base + XSPI_BUF2IND);
1075*29c8c00dSHaibo Chen 
1076*29c8c00dSHaibo Chen 	/* AHB only use ID=15 for read */
1077*29c8c00dSHaibo Chen 	reg = FIELD_PREP(XSPI_BFGENCR_SEQID_MASK, XSPI_SEQID_LUT);
1078*29c8c00dSHaibo Chen 	reg |= XSPI_BFGENCR_WR_FLUSH_EN;
1079*29c8c00dSHaibo Chen 	/* No limit for align */
1080*29c8c00dSHaibo Chen 	reg |= FIELD_PREP(XSPI_BFGENCR_ALIGN_MASK, 0);
1081*29c8c00dSHaibo Chen 	writel(reg, base + XSPI_BFGENCR);
1082*29c8c00dSHaibo Chen }
1083*29c8c00dSHaibo Chen 
1084*29c8c00dSHaibo Chen static int nxp_xspi_default_setup(struct nxp_xspi *xspi)
1085*29c8c00dSHaibo Chen {
1086*29c8c00dSHaibo Chen 	void __iomem *base = xspi->iobase;
1087*29c8c00dSHaibo Chen 	u32 reg;
1088*29c8c00dSHaibo Chen 
1089*29c8c00dSHaibo Chen 	/* Bypass SFP check, clear MGC_GVLD, MGC_GVLDMDAD, MGC_GVLDFRAD */
1090*29c8c00dSHaibo Chen 	writel(0, base + XSPI_MGC);
1091*29c8c00dSHaibo Chen 
1092*29c8c00dSHaibo Chen 	/* Enable the EENV0 SFP check */
1093*29c8c00dSHaibo Chen 	reg = readl(base + XSPI_TG0MDAD);
1094*29c8c00dSHaibo Chen 	reg |= XSPI_TG0MDAD_VLD;
1095*29c8c00dSHaibo Chen 	writel(reg, base + XSPI_TG0MDAD);
1096*29c8c00dSHaibo Chen 
1097*29c8c00dSHaibo Chen 	/* Give read/write access right to EENV0 */
1098*29c8c00dSHaibo Chen 	reg = readl(base + XSPI_FRAD0_WORD2);
1099*29c8c00dSHaibo Chen 	reg &= ~XSPI_FRAD0_WORD2_MD0ACP_MASK;
1100*29c8c00dSHaibo Chen 	reg |= FIELD_PREP(XSPI_FRAD0_WORD2_MD0ACP_MASK, 0x03);
1101*29c8c00dSHaibo Chen 	writel(reg, base + XSPI_FRAD0_WORD2);
1102*29c8c00dSHaibo Chen 
1103*29c8c00dSHaibo Chen 	/* Enable the FRAD check for EENV0 */
1104*29c8c00dSHaibo Chen 	reg = readl(base + XSPI_FRAD0_WORD3);
1105*29c8c00dSHaibo Chen 	reg |= XSPI_FRAD0_WORD3_VLD;
1106*29c8c00dSHaibo Chen 	writel(reg, base + XSPI_FRAD0_WORD3);
1107*29c8c00dSHaibo Chen 
1108*29c8c00dSHaibo Chen 	/*
1109*29c8c00dSHaibo Chen 	 * Config the timeout to max value, this timeout will affect the
1110*29c8c00dSHaibo Chen 	 * TBDR and RBDRn access right after IP cmd triggered.
1111*29c8c00dSHaibo Chen 	 */
1112*29c8c00dSHaibo Chen 	writel(0xFFFFFFFF, base + XSPI_MTO);
1113*29c8c00dSHaibo Chen 
1114*29c8c00dSHaibo Chen 	/* Disable module */
1115*29c8c00dSHaibo Chen 	reg = readl(base + XSPI_MCR);
1116*29c8c00dSHaibo Chen 	reg |= XSPI_MCR_MDIS;
1117*29c8c00dSHaibo Chen 	writel(reg, base + XSPI_MCR);
1118*29c8c00dSHaibo Chen 
1119*29c8c00dSHaibo Chen 	nxp_xspi_sw_reset(xspi);
1120*29c8c00dSHaibo Chen 
1121*29c8c00dSHaibo Chen 	reg = readl(base + XSPI_MCR);
1122*29c8c00dSHaibo Chen 	reg &= ~(XSPI_MCR_CKN_FA_EN | XSPI_MCR_DQS_FA_SEL_MASK |
1123*29c8c00dSHaibo Chen 		 XSPI_MCR_DOZE | XSPI_MCR_VAR_LAT_EN |
1124*29c8c00dSHaibo Chen 		 XSPI_MCR_DDR_EN | XSPI_MCR_DQS_OUT_EN);
1125*29c8c00dSHaibo Chen 	reg |= XSPI_MCR_DQS_EN;
1126*29c8c00dSHaibo Chen 	reg |= XSPI_MCR_ISD3FA | XSPI_MCR_ISD2FA;
1127*29c8c00dSHaibo Chen 	writel(reg, base + XSPI_MCR);
1128*29c8c00dSHaibo Chen 
1129*29c8c00dSHaibo Chen 	reg = readl(base + XSPI_SFACR);
1130*29c8c00dSHaibo Chen 	reg &= ~(XSPI_SFACR_FORCE_A10 | XSPI_SFACR_WA_4B_EN |
1131*29c8c00dSHaibo Chen 		 XSPI_SFACR_BYTE_SWAP | XSPI_SFACR_WA |
1132*29c8c00dSHaibo Chen 		 XSPI_SFACR_CAS_MASK);
1133*29c8c00dSHaibo Chen 	reg |= XSPI_SFACR_FORCE_A10;
1134*29c8c00dSHaibo Chen 	writel(reg, base + XSPI_SFACR);
1135*29c8c00dSHaibo Chen 
1136*29c8c00dSHaibo Chen 	nxp_xspi_config_ahb_buffer(xspi);
1137*29c8c00dSHaibo Chen 
1138*29c8c00dSHaibo Chen 	reg = FIELD_PREP(XSPI_FLSHCR_TCSH_MASK, 0x03) |
1139*29c8c00dSHaibo Chen 	      FIELD_PREP(XSPI_FLSHCR_TCSS_MASK, 0x03);
1140*29c8c00dSHaibo Chen 	writel(reg, base + XSPI_FLSHCR);
1141*29c8c00dSHaibo Chen 
1142*29c8c00dSHaibo Chen 	/* Enable module */
1143*29c8c00dSHaibo Chen 	reg = readl(base + XSPI_MCR);
1144*29c8c00dSHaibo Chen 	reg &= ~XSPI_MCR_MDIS;
1145*29c8c00dSHaibo Chen 	writel(reg, base + XSPI_MCR);
1146*29c8c00dSHaibo Chen 
1147*29c8c00dSHaibo Chen 	xspi->selected = -1;
1148*29c8c00dSHaibo Chen 
1149*29c8c00dSHaibo Chen 	/* Enable the interrupt */
1150*29c8c00dSHaibo Chen 	writel(XSPI_RSER_TFIE, base + XSPI_RSER);
1151*29c8c00dSHaibo Chen 
1152*29c8c00dSHaibo Chen 	return 0;
1153*29c8c00dSHaibo Chen }
1154*29c8c00dSHaibo Chen 
1155*29c8c00dSHaibo Chen static const char *nxp_xspi_get_name(struct spi_mem *mem)
1156*29c8c00dSHaibo Chen {
1157*29c8c00dSHaibo Chen 	struct nxp_xspi *xspi = spi_controller_get_devdata(mem->spi->controller);
1158*29c8c00dSHaibo Chen 	struct device *dev = &mem->spi->dev;
1159*29c8c00dSHaibo Chen 	const char *name;
1160*29c8c00dSHaibo Chen 
1161*29c8c00dSHaibo Chen 	/* Set custom name derived from the platform_device of the controller. */
1162*29c8c00dSHaibo Chen 	if (of_get_available_child_count(xspi->dev->of_node) == 1)
1163*29c8c00dSHaibo Chen 		return dev_name(xspi->dev);
1164*29c8c00dSHaibo Chen 
1165*29c8c00dSHaibo Chen 	name = devm_kasprintf(dev, GFP_KERNEL,
1166*29c8c00dSHaibo Chen 			      "%s-%d", dev_name(xspi->dev),
1167*29c8c00dSHaibo Chen 			      spi_get_chipselect(mem->spi, 0));
1168*29c8c00dSHaibo Chen 
1169*29c8c00dSHaibo Chen 	if (!name) {
1170*29c8c00dSHaibo Chen 		dev_err(dev, "failed to get memory for custom flash name\n");
1171*29c8c00dSHaibo Chen 		return ERR_PTR(-ENOMEM);
1172*29c8c00dSHaibo Chen 	}
1173*29c8c00dSHaibo Chen 
1174*29c8c00dSHaibo Chen 	return name;
1175*29c8c00dSHaibo Chen }
1176*29c8c00dSHaibo Chen 
1177*29c8c00dSHaibo Chen static const struct spi_controller_mem_ops nxp_xspi_mem_ops = {
1178*29c8c00dSHaibo Chen 	.adjust_op_size = nxp_xspi_adjust_op_size,
1179*29c8c00dSHaibo Chen 	.supports_op = nxp_xspi_supports_op,
1180*29c8c00dSHaibo Chen 	.exec_op = nxp_xspi_exec_op,
1181*29c8c00dSHaibo Chen 	.get_name = nxp_xspi_get_name,
1182*29c8c00dSHaibo Chen };
1183*29c8c00dSHaibo Chen 
1184*29c8c00dSHaibo Chen static const struct spi_controller_mem_caps nxp_xspi_mem_caps = {
1185*29c8c00dSHaibo Chen 	.dtr = true,
1186*29c8c00dSHaibo Chen 	.per_op_freq = true,
1187*29c8c00dSHaibo Chen 	.swap16 = true,
1188*29c8c00dSHaibo Chen };
1189*29c8c00dSHaibo Chen 
1190*29c8c00dSHaibo Chen static void nxp_xspi_cleanup(void *data)
1191*29c8c00dSHaibo Chen {
1192*29c8c00dSHaibo Chen 	struct nxp_xspi *xspi = data;
1193*29c8c00dSHaibo Chen 	u32 reg;
1194*29c8c00dSHaibo Chen 
1195*29c8c00dSHaibo Chen 	pm_runtime_get_sync(xspi->dev);
1196*29c8c00dSHaibo Chen 
1197*29c8c00dSHaibo Chen 	/* Disable interrupt */
1198*29c8c00dSHaibo Chen 	writel(0, xspi->iobase + XSPI_RSER);
1199*29c8c00dSHaibo Chen 	/* Clear all the internal logic flags */
1200*29c8c00dSHaibo Chen 	writel(0xFFFFFFFF, xspi->iobase + XSPI_FR);
1201*29c8c00dSHaibo Chen 	/* Disable the hardware */
1202*29c8c00dSHaibo Chen 	reg = readl(xspi->iobase + XSPI_MCR);
1203*29c8c00dSHaibo Chen 	reg |= XSPI_MCR_MDIS;
1204*29c8c00dSHaibo Chen 	writel(reg, xspi->iobase + XSPI_MCR);
1205*29c8c00dSHaibo Chen 
1206*29c8c00dSHaibo Chen 	pm_runtime_put_sync(xspi->dev);
1207*29c8c00dSHaibo Chen 
1208*29c8c00dSHaibo Chen 	if (xspi->ahb_addr)
1209*29c8c00dSHaibo Chen 		iounmap(xspi->ahb_addr);
1210*29c8c00dSHaibo Chen }
1211*29c8c00dSHaibo Chen 
1212*29c8c00dSHaibo Chen static int nxp_xspi_probe(struct platform_device *pdev)
1213*29c8c00dSHaibo Chen {
1214*29c8c00dSHaibo Chen 	struct device *dev = &pdev->dev;
1215*29c8c00dSHaibo Chen 	struct spi_controller *ctlr;
1216*29c8c00dSHaibo Chen 	struct nxp_xspi *xspi;
1217*29c8c00dSHaibo Chen 	struct resource *res;
1218*29c8c00dSHaibo Chen 	int ret, irq;
1219*29c8c00dSHaibo Chen 
1220*29c8c00dSHaibo Chen 	ctlr = devm_spi_alloc_host(dev, sizeof(*xspi));
1221*29c8c00dSHaibo Chen 	if (!ctlr)
1222*29c8c00dSHaibo Chen 		return -ENOMEM;
1223*29c8c00dSHaibo Chen 
1224*29c8c00dSHaibo Chen 	ctlr->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL |
1225*29c8c00dSHaibo Chen 			  SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL;
1226*29c8c00dSHaibo Chen 
1227*29c8c00dSHaibo Chen 	xspi = spi_controller_get_devdata(ctlr);
1228*29c8c00dSHaibo Chen 	xspi->dev = dev;
1229*29c8c00dSHaibo Chen 	xspi->devtype_data = device_get_match_data(dev);
1230*29c8c00dSHaibo Chen 	if (!xspi->devtype_data)
1231*29c8c00dSHaibo Chen 		return -ENODEV;
1232*29c8c00dSHaibo Chen 
1233*29c8c00dSHaibo Chen 	platform_set_drvdata(pdev, xspi);
1234*29c8c00dSHaibo Chen 
1235*29c8c00dSHaibo Chen 	/* Find the resources - configuration register address space */
1236*29c8c00dSHaibo Chen 	xspi->iobase = devm_platform_ioremap_resource_byname(pdev, "base");
1237*29c8c00dSHaibo Chen 	if (IS_ERR(xspi->iobase))
1238*29c8c00dSHaibo Chen 		return PTR_ERR(xspi->iobase);
1239*29c8c00dSHaibo Chen 
1240*29c8c00dSHaibo Chen 	/* Find the resources - controller memory mapped space */
1241*29c8c00dSHaibo Chen 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mmap");
1242*29c8c00dSHaibo Chen 	if (!res)
1243*29c8c00dSHaibo Chen 		return -ENODEV;
1244*29c8c00dSHaibo Chen 
1245*29c8c00dSHaibo Chen 	/* Assign memory mapped starting address and mapped size. */
1246*29c8c00dSHaibo Chen 	xspi->memmap_phy = res->start;
1247*29c8c00dSHaibo Chen 	xspi->memmap_phy_size = resource_size(res);
1248*29c8c00dSHaibo Chen 
1249*29c8c00dSHaibo Chen 	/* Find the clocks */
1250*29c8c00dSHaibo Chen 	xspi->clk = devm_clk_get(dev, "per");
1251*29c8c00dSHaibo Chen 	if (IS_ERR(xspi->clk))
1252*29c8c00dSHaibo Chen 		return PTR_ERR(xspi->clk);
1253*29c8c00dSHaibo Chen 
1254*29c8c00dSHaibo Chen 	/* Find the irq */
1255*29c8c00dSHaibo Chen 	irq = platform_get_irq(pdev, 0);
1256*29c8c00dSHaibo Chen 	if (irq < 0)
1257*29c8c00dSHaibo Chen 		return dev_err_probe(dev, irq,  "Failed to get irq source");
1258*29c8c00dSHaibo Chen 
1259*29c8c00dSHaibo Chen 	pm_runtime_set_autosuspend_delay(dev, XSPI_RPM_TIMEOUT_MS);
1260*29c8c00dSHaibo Chen 	pm_runtime_use_autosuspend(dev);
1261*29c8c00dSHaibo Chen 	ret = devm_pm_runtime_enable(dev);
1262*29c8c00dSHaibo Chen 	if (ret)
1263*29c8c00dSHaibo Chen 		return ret;
1264*29c8c00dSHaibo Chen 
1265*29c8c00dSHaibo Chen 	PM_RUNTIME_ACQUIRE_AUTOSUSPEND(dev, pm);
1266*29c8c00dSHaibo Chen 	ret = PM_RUNTIME_ACQUIRE_ERR(&pm);
1267*29c8c00dSHaibo Chen 	if (ret)
1268*29c8c00dSHaibo Chen 		return dev_err_probe(dev, ret, "Failed to enable clock");
1269*29c8c00dSHaibo Chen 
1270*29c8c00dSHaibo Chen 	/* Clear potential interrupt by write xspi errstat */
1271*29c8c00dSHaibo Chen 	writel(0xFFFFFFFF, xspi->iobase + XSPI_ERRSTAT);
1272*29c8c00dSHaibo Chen 	writel(0xFFFFFFFF, xspi->iobase + XSPI_FR);
1273*29c8c00dSHaibo Chen 
1274*29c8c00dSHaibo Chen 	nxp_xspi_default_setup(xspi);
1275*29c8c00dSHaibo Chen 
1276*29c8c00dSHaibo Chen 	ret = devm_request_irq(dev, irq,
1277*29c8c00dSHaibo Chen 			nxp_xspi_irq_handler, 0, pdev->name, xspi);
1278*29c8c00dSHaibo Chen 	if (ret)
1279*29c8c00dSHaibo Chen 		return dev_err_probe(dev, ret, "failed to request irq");
1280*29c8c00dSHaibo Chen 
1281*29c8c00dSHaibo Chen 	ret = devm_mutex_init(dev, &xspi->lock);
1282*29c8c00dSHaibo Chen 	if (ret)
1283*29c8c00dSHaibo Chen 		return ret;
1284*29c8c00dSHaibo Chen 
1285*29c8c00dSHaibo Chen 	ret = devm_add_action_or_reset(dev, nxp_xspi_cleanup, xspi);
1286*29c8c00dSHaibo Chen 	if (ret)
1287*29c8c00dSHaibo Chen 		return ret;
1288*29c8c00dSHaibo Chen 
1289*29c8c00dSHaibo Chen 	ctlr->bus_num = -1;
1290*29c8c00dSHaibo Chen 	ctlr->num_chipselect = NXP_XSPI_MAX_CHIPSELECT;
1291*29c8c00dSHaibo Chen 	ctlr->mem_ops = &nxp_xspi_mem_ops;
1292*29c8c00dSHaibo Chen 	ctlr->mem_caps = &nxp_xspi_mem_caps;
1293*29c8c00dSHaibo Chen 
1294*29c8c00dSHaibo Chen 	return devm_spi_register_controller(dev, ctlr);
1295*29c8c00dSHaibo Chen }
1296*29c8c00dSHaibo Chen 
1297*29c8c00dSHaibo Chen static int nxp_xspi_runtime_suspend(struct device *dev)
1298*29c8c00dSHaibo Chen {
1299*29c8c00dSHaibo Chen 	struct nxp_xspi *xspi = dev_get_drvdata(dev);
1300*29c8c00dSHaibo Chen 	u32 reg;
1301*29c8c00dSHaibo Chen 
1302*29c8c00dSHaibo Chen 	reg = readl(xspi->iobase + XSPI_MCR);
1303*29c8c00dSHaibo Chen 	reg |= XSPI_MCR_MDIS;
1304*29c8c00dSHaibo Chen 	writel(reg, xspi->iobase + XSPI_MCR);
1305*29c8c00dSHaibo Chen 
1306*29c8c00dSHaibo Chen 	clk_disable_unprepare(xspi->clk);
1307*29c8c00dSHaibo Chen 
1308*29c8c00dSHaibo Chen 	return 0;
1309*29c8c00dSHaibo Chen }
1310*29c8c00dSHaibo Chen 
1311*29c8c00dSHaibo Chen static int nxp_xspi_runtime_resume(struct device *dev)
1312*29c8c00dSHaibo Chen {
1313*29c8c00dSHaibo Chen 	struct nxp_xspi *xspi = dev_get_drvdata(dev);
1314*29c8c00dSHaibo Chen 	u32 reg;
1315*29c8c00dSHaibo Chen 	int ret;
1316*29c8c00dSHaibo Chen 
1317*29c8c00dSHaibo Chen 	ret = clk_prepare_enable(xspi->clk);
1318*29c8c00dSHaibo Chen 	if (ret)
1319*29c8c00dSHaibo Chen 		return ret;
1320*29c8c00dSHaibo Chen 
1321*29c8c00dSHaibo Chen 	reg = readl(xspi->iobase + XSPI_MCR);
1322*29c8c00dSHaibo Chen 	reg &= ~XSPI_MCR_MDIS;
1323*29c8c00dSHaibo Chen 	writel(reg, xspi->iobase + XSPI_MCR);
1324*29c8c00dSHaibo Chen 
1325*29c8c00dSHaibo Chen 	return 0;
1326*29c8c00dSHaibo Chen }
1327*29c8c00dSHaibo Chen 
1328*29c8c00dSHaibo Chen static int nxp_xspi_suspend(struct device *dev)
1329*29c8c00dSHaibo Chen {
1330*29c8c00dSHaibo Chen 	int ret;
1331*29c8c00dSHaibo Chen 
1332*29c8c00dSHaibo Chen 	ret = pinctrl_pm_select_sleep_state(dev);
1333*29c8c00dSHaibo Chen 	if (ret) {
1334*29c8c00dSHaibo Chen 		dev_err(dev, "select flexspi sleep pinctrl failed!\n");
1335*29c8c00dSHaibo Chen 		return ret;
1336*29c8c00dSHaibo Chen 	}
1337*29c8c00dSHaibo Chen 
1338*29c8c00dSHaibo Chen 	return pm_runtime_force_suspend(dev);
1339*29c8c00dSHaibo Chen }
1340*29c8c00dSHaibo Chen 
1341*29c8c00dSHaibo Chen static int nxp_xspi_resume(struct device *dev)
1342*29c8c00dSHaibo Chen {
1343*29c8c00dSHaibo Chen 	struct nxp_xspi *xspi = dev_get_drvdata(dev);
1344*29c8c00dSHaibo Chen 	int ret;
1345*29c8c00dSHaibo Chen 
1346*29c8c00dSHaibo Chen 	ret = pm_runtime_force_resume(dev);
1347*29c8c00dSHaibo Chen 	if (ret)
1348*29c8c00dSHaibo Chen 		return ret;
1349*29c8c00dSHaibo Chen 
1350*29c8c00dSHaibo Chen 	nxp_xspi_default_setup(xspi);
1351*29c8c00dSHaibo Chen 
1352*29c8c00dSHaibo Chen 	ret = pinctrl_pm_select_default_state(dev);
1353*29c8c00dSHaibo Chen 	if (ret)
1354*29c8c00dSHaibo Chen 		dev_err(dev, "select flexspi default pinctrl failed!\n");
1355*29c8c00dSHaibo Chen 
1356*29c8c00dSHaibo Chen 	return ret;
1357*29c8c00dSHaibo Chen }
1358*29c8c00dSHaibo Chen 
1359*29c8c00dSHaibo Chen 
1360*29c8c00dSHaibo Chen static const struct dev_pm_ops nxp_xspi_pm_ops = {
1361*29c8c00dSHaibo Chen 	RUNTIME_PM_OPS(nxp_xspi_runtime_suspend, nxp_xspi_runtime_resume, NULL)
1362*29c8c00dSHaibo Chen 	SYSTEM_SLEEP_PM_OPS(nxp_xspi_suspend, nxp_xspi_resume)
1363*29c8c00dSHaibo Chen };
1364*29c8c00dSHaibo Chen 
1365*29c8c00dSHaibo Chen static const struct of_device_id nxp_xspi_dt_ids[] = {
1366*29c8c00dSHaibo Chen 	{ .compatible = "nxp,imx94-xspi", .data = (void *)&imx94_data, },
1367*29c8c00dSHaibo Chen 	{ /* sentinel */ }
1368*29c8c00dSHaibo Chen };
1369*29c8c00dSHaibo Chen MODULE_DEVICE_TABLE(of, nxp_xspi_dt_ids);
1370*29c8c00dSHaibo Chen 
1371*29c8c00dSHaibo Chen static struct platform_driver nxp_xspi_driver = {
1372*29c8c00dSHaibo Chen 	.driver = {
1373*29c8c00dSHaibo Chen 		.name	= "nxp-xspi",
1374*29c8c00dSHaibo Chen 		.of_match_table = nxp_xspi_dt_ids,
1375*29c8c00dSHaibo Chen 		.pm =   pm_ptr(&nxp_xspi_pm_ops),
1376*29c8c00dSHaibo Chen 	},
1377*29c8c00dSHaibo Chen 	.probe          = nxp_xspi_probe,
1378*29c8c00dSHaibo Chen };
1379*29c8c00dSHaibo Chen module_platform_driver(nxp_xspi_driver);
1380*29c8c00dSHaibo Chen 
1381*29c8c00dSHaibo Chen MODULE_DESCRIPTION("NXP xSPI Controller Driver");
1382*29c8c00dSHaibo Chen MODULE_AUTHOR("NXP Semiconductor");
1383*29c8c00dSHaibo Chen MODULE_AUTHOR("Haibo Chen <haibo.chen@nxp.com>");
1384*29c8c00dSHaibo Chen MODULE_LICENSE("GPL");
1385