xref: /linux/drivers/spi/atmel-quadspi.c (revision 0262163136de813894cb172aa8ccf762b92e5fd7)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Driver for Atmel QSPI Controller
4  *
5  * Copyright (C) 2015 Atmel Corporation
6  * Copyright (C) 2018 Cryptera A/S
7  *
8  * Author: Cyrille Pitchen <cyrille.pitchen@atmel.com>
9  * Author: Piotr Bugalski <bugalski.piotr@gmail.com>
10  *
11  * This driver is based on drivers/mtd/spi-nor/fsl-quadspi.c from Freescale.
12  */
13 
14 #include <linux/bitfield.h>
15 #include <linux/clk.h>
16 #include <linux/delay.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/dmaengine.h>
19 #include <linux/err.h>
20 #include <linux/interrupt.h>
21 #include <linux/io.h>
22 #include <linux/iopoll.h>
23 #include <linux/kernel.h>
24 #include <linux/module.h>
25 #include <linux/of.h>
26 #include <linux/of_platform.h>
27 #include <linux/platform_device.h>
28 #include <linux/pm_runtime.h>
29 #include <linux/spi/spi-mem.h>
30 
31 /* QSPI register offsets */
32 #define QSPI_CR      0x0000  /* Control Register */
33 #define QSPI_MR      0x0004  /* Mode Register */
34 #define QSPI_RD      0x0008  /* Receive Data Register */
35 #define QSPI_TD      0x000c  /* Transmit Data Register */
36 #define QSPI_SR      0x0010  /* Status Register */
37 #define QSPI_IER     0x0014  /* Interrupt Enable Register */
38 #define QSPI_IDR     0x0018  /* Interrupt Disable Register */
39 #define QSPI_IMR     0x001c  /* Interrupt Mask Register */
40 #define QSPI_SCR     0x0020  /* Serial Clock Register */
41 #define QSPI_SR2     0x0024  /* SAMA7G5 Status Register */
42 
43 #define QSPI_IAR     0x0030  /* Instruction Address Register */
44 #define QSPI_ICR     0x0034  /* Instruction Code Register */
45 #define QSPI_WICR    0x0034  /* Write Instruction Code Register */
46 #define QSPI_IFR     0x0038  /* Instruction Frame Register */
47 #define QSPI_RICR    0x003C  /* Read Instruction Code Register */
48 
49 #define QSPI_SMR     0x0040  /* Scrambling Mode Register */
50 #define QSPI_SKR     0x0044  /* Scrambling Key Register */
51 
52 #define QSPI_REFRESH	0x0050	/* Refresh Register */
53 #define QSPI_WRACNT	0x0054	/* Write Access Counter Register */
54 #define QSPI_DLLCFG	0x0058	/* DLL Configuration Register */
55 #define QSPI_PCALCFG	0x005C	/* Pad Calibration Configuration Register */
56 #define QSPI_PCALBP	0x0060	/* Pad Calibration Bypass Register */
57 #define QSPI_TOUT	0x0064	/* Timeout Register */
58 
59 #define QSPI_WPMR    0x00E4  /* Write Protection Mode Register */
60 #define QSPI_WPSR    0x00E8  /* Write Protection Status Register */
61 
62 #define QSPI_VERSION 0x00FC  /* Version Register */
63 
64 #define SAMA7G5_QSPI0_MAX_SPEED_HZ	200000000
65 #define SAMA7G5_QSPI1_SDR_MAX_SPEED_HZ	133000000
66 
67 /* Bitfields in QSPI_CR (Control Register) */
68 #define QSPI_CR_QSPIEN                  BIT(0)
69 #define QSPI_CR_QSPIDIS                 BIT(1)
70 #define QSPI_CR_DLLON			BIT(2)
71 #define QSPI_CR_DLLOFF			BIT(3)
72 #define QSPI_CR_STPCAL			BIT(4)
73 #define QSPI_CR_SRFRSH			BIT(5)
74 #define QSPI_CR_SWRST                   BIT(7)
75 #define QSPI_CR_UPDCFG			BIT(8)
76 #define QSPI_CR_STTFR			BIT(9)
77 #define QSPI_CR_RTOUT			BIT(10)
78 #define QSPI_CR_LASTXFER                BIT(24)
79 
80 /* Bitfields in QSPI_MR (Mode Register) */
81 #define QSPI_MR_SMM                     BIT(0)
82 #define QSPI_MR_LLB                     BIT(1)
83 #define QSPI_MR_WDRBT                   BIT(2)
84 #define QSPI_MR_SMRM                    BIT(3)
85 #define QSPI_MR_DQSDLYEN		BIT(3)
86 #define QSPI_MR_CSMODE_MASK             GENMASK(5, 4)
87 #define QSPI_MR_CSMODE_NOT_RELOADED     (0 << 4)
88 #define QSPI_MR_CSMODE_LASTXFER         (1 << 4)
89 #define QSPI_MR_CSMODE_SYSTEMATICALLY   (2 << 4)
90 #define QSPI_MR_NBBITS_MASK             GENMASK(11, 8)
91 #define QSPI_MR_NBBITS(n)               ((((n) - 8) << 8) & QSPI_MR_NBBITS_MASK)
92 #define QSPI_MR_OENSD			BIT(15)
93 #define QSPI_MR_DLYBCT_MASK             GENMASK(23, 16)
94 #define QSPI_MR_DLYBCT(n)               (((n) << 16) & QSPI_MR_DLYBCT_MASK)
95 #define QSPI_MR_DLYCS_MASK              GENMASK(31, 24)
96 #define QSPI_MR_DLYCS(n)                (((n) << 24) & QSPI_MR_DLYCS_MASK)
97 
98 /* Bitfields in QSPI_SR/QSPI_IER/QSPI_IDR/QSPI_IMR  */
99 #define QSPI_SR_RDRF                    BIT(0)
100 #define QSPI_SR_TDRE                    BIT(1)
101 #define QSPI_SR_TXEMPTY                 BIT(2)
102 #define QSPI_SR_OVRES                   BIT(3)
103 #define QSPI_SR_CSR                     BIT(8)
104 #define QSPI_SR_CSS                     BIT(9)
105 #define QSPI_SR_INSTRE                  BIT(10)
106 #define QSPI_SR_LWRA			BIT(11)
107 #define QSPI_SR_QITF			BIT(12)
108 #define QSPI_SR_QITR			BIT(13)
109 #define QSPI_SR_CSFA			BIT(14)
110 #define QSPI_SR_CSRA			BIT(15)
111 #define QSPI_SR_RFRSHD			BIT(16)
112 #define QSPI_SR_TOUT			BIT(17)
113 #define QSPI_SR_QSPIENS                 BIT(24)
114 
115 #define QSPI_SR_CMD_COMPLETED	(QSPI_SR_INSTRE | QSPI_SR_CSR)
116 
117 /* Bitfields in QSPI_SCR (Serial Clock Register) */
118 #define QSPI_SCR_CPOL                   BIT(0)
119 #define QSPI_SCR_CPHA                   BIT(1)
120 #define QSPI_SCR_SCBR_MASK              GENMASK(15, 8)
121 #define QSPI_SCR_SCBR(n)                (((n) << 8) & QSPI_SCR_SCBR_MASK)
122 #define QSPI_SCR_DLYBS_MASK             GENMASK(23, 16)
123 #define QSPI_SCR_DLYBS(n)               (((n) << 16) & QSPI_SCR_DLYBS_MASK)
124 
125 /* Bitfields in QSPI_SR2 (SAMA7G5 Status Register) */
126 #define QSPI_SR2_SYNCBSY		BIT(0)
127 #define QSPI_SR2_QSPIENS		BIT(1)
128 #define QSPI_SR2_CSS			BIT(2)
129 #define QSPI_SR2_RBUSY			BIT(3)
130 #define QSPI_SR2_HIDLE			BIT(4)
131 #define QSPI_SR2_DLOCK			BIT(5)
132 #define QSPI_SR2_CALBSY			BIT(6)
133 
134 /* Bitfields in QSPI_IAR (Instruction Address Register) */
135 #define QSPI_IAR_ADDR			GENMASK(31, 0)
136 
137 /* Bitfields in QSPI_ICR (Read/Write Instruction Code Register) */
138 #define QSPI_ICR_INST_MASK              GENMASK(7, 0)
139 #define QSPI_ICR_INST(inst)             (((inst) << 0) & QSPI_ICR_INST_MASK)
140 #define QSPI_ICR_INST_MASK_SAMA7G5	GENMASK(15, 0)
141 #define QSPI_ICR_OPT_MASK               GENMASK(23, 16)
142 #define QSPI_ICR_OPT(opt)               (((opt) << 16) & QSPI_ICR_OPT_MASK)
143 
144 /* Bitfields in QSPI_IFR (Instruction Frame Register) */
145 #define QSPI_IFR_WIDTH_MASK             GENMASK(2, 0)
146 #define QSPI_IFR_WIDTH_SINGLE_BIT_SPI   (0 << 0)
147 #define QSPI_IFR_WIDTH_DUAL_OUTPUT      (1 << 0)
148 #define QSPI_IFR_WIDTH_QUAD_OUTPUT      (2 << 0)
149 #define QSPI_IFR_WIDTH_DUAL_IO          (3 << 0)
150 #define QSPI_IFR_WIDTH_QUAD_IO          (4 << 0)
151 #define QSPI_IFR_WIDTH_DUAL_CMD         (5 << 0)
152 #define QSPI_IFR_WIDTH_QUAD_CMD         (6 << 0)
153 #define QSPI_IFR_WIDTH_OCT_OUTPUT	(7 << 0)
154 #define QSPI_IFR_WIDTH_OCT_IO		(8 << 0)
155 #define QSPI_IFR_WIDTH_OCT_CMD		(9 << 0)
156 #define QSPI_IFR_INSTEN                 BIT(4)
157 #define QSPI_IFR_ADDREN                 BIT(5)
158 #define QSPI_IFR_OPTEN                  BIT(6)
159 #define QSPI_IFR_DATAEN                 BIT(7)
160 #define QSPI_IFR_OPTL_MASK              GENMASK(9, 8)
161 #define QSPI_IFR_OPTL_1BIT              (0 << 8)
162 #define QSPI_IFR_OPTL_2BIT              (1 << 8)
163 #define QSPI_IFR_OPTL_4BIT              (2 << 8)
164 #define QSPI_IFR_OPTL_8BIT              (3 << 8)
165 #define QSPI_IFR_ADDRL                  BIT(10)
166 #define QSPI_IFR_ADDRL_SAMA7G5		GENMASK(11, 10)
167 #define QSPI_IFR_TFRTYP_MEM		BIT(12)
168 #define QSPI_IFR_SAMA5D2_WRITE_TRSFR	BIT(13)
169 #define QSPI_IFR_CRM                    BIT(14)
170 #define QSPI_IFR_DDREN			BIT(15)
171 #define QSPI_IFR_NBDUM_MASK             GENMASK(20, 16)
172 #define QSPI_IFR_NBDUM(n)               (((n) << 16) & QSPI_IFR_NBDUM_MASK)
173 #define QSPI_IFR_END			BIT(22)
174 #define QSPI_IFR_SMRM			BIT(23)
175 #define QSPI_IFR_APBTFRTYP_READ		BIT(24)	/* Defined in SAM9X60 */
176 #define QSPI_IFR_DQSEN			BIT(25)
177 #define QSPI_IFR_DDRCMDEN		BIT(26)
178 #define QSPI_IFR_HFWBEN			BIT(27)
179 #define QSPI_IFR_PROTTYP		GENMASK(29, 28)
180 #define QSPI_IFR_PROTTYP_STD_SPI	0
181 #define QSPI_IFR_PROTTYP_TWIN_QUAD	1
182 #define QSPI_IFR_PROTTYP_OCTAFLASH	2
183 #define QSPI_IFR_PROTTYP_HYPERFLASH	3
184 
185 /* Bitfields in QSPI_SMR (Scrambling Mode Register) */
186 #define QSPI_SMR_SCREN                  BIT(0)
187 #define QSPI_SMR_RVDIS                  BIT(1)
188 #define QSPI_SMR_SCRKL                  BIT(2)
189 
190 /* Bitfields in QSPI_REFRESH (Refresh Register) */
191 #define QSPI_REFRESH_DELAY_COUNTER	GENMASK(31, 0)
192 
193 /* Bitfields in QSPI_WRACNT (Write Access Counter Register) */
194 #define QSPI_WRACNT_NBWRA		GENMASK(31, 0)
195 
196 /* Bitfields in QSPI_DLLCFG (DLL Configuration Register) */
197 #define QSPI_DLLCFG_RANGE		BIT(0)
198 
199 /* Bitfields in QSPI_PCALCFG (DLL Pad Calibration Configuration Register) */
200 #define QSPI_PCALCFG_AAON		BIT(0)
201 #define QSPI_PCALCFG_DAPCAL		BIT(1)
202 #define QSPI_PCALCFG_DIFFPM		BIT(2)
203 #define QSPI_PCALCFG_CLKDIV		GENMASK(6, 4)
204 #define QSPI_PCALCFG_CALCNT		GENMASK(16, 8)
205 #define QSPI_PCALCFG_CALP		GENMASK(27, 24)
206 #define QSPI_PCALCFG_CALN		GENMASK(31, 28)
207 
208 /* Bitfields in QSPI_PCALBP (DLL Pad Calibration Bypass Register) */
209 #define QSPI_PCALBP_BPEN		BIT(0)
210 #define QSPI_PCALBP_CALPBP		GENMASK(11, 8)
211 #define QSPI_PCALBP_CALNBP		GENMASK(19, 16)
212 
213 /* Bitfields in QSPI_TOUT (Timeout Register) */
214 #define QSPI_TOUT_TCNTM			GENMASK(15, 0)
215 
216 /* Bitfields in QSPI_WPMR (Write Protection Mode Register) */
217 #define QSPI_WPMR_WPEN                  BIT(0)
218 #define QSPI_WPMR_WPITEN		BIT(1)
219 #define QSPI_WPMR_WPCREN		BIT(2)
220 #define QSPI_WPMR_WPKEY_MASK            GENMASK(31, 8)
221 #define QSPI_WPMR_WPKEY(wpkey)          (((wpkey) << 8) & QSPI_WPMR_WPKEY_MASK)
222 
223 /* Bitfields in QSPI_WPSR (Write Protection Status Register) */
224 #define QSPI_WPSR_WPVS                  BIT(0)
225 #define QSPI_WPSR_WPVSRC_MASK           GENMASK(15, 8)
226 #define QSPI_WPSR_WPVSRC(src)           (((src) << 8) & QSPI_WPSR_WPVSRC)
227 
228 #define ATMEL_QSPI_TIMEOUT		1000	/* ms */
229 #define ATMEL_QSPI_SYNC_TIMEOUT		300	/* ms */
230 #define QSPI_DLLCFG_THRESHOLD_FREQ	90000000U
231 #define QSPI_CALIB_TIME			2000	/* 2 us */
232 
233 /* Use PIO for small transfers. */
234 #define ATMEL_QSPI_DMA_MIN_BYTES	16
235 /**
236  * struct atmel_qspi_pcal - Pad Calibration Clock Division
237  * @pclk_rate: peripheral clock rate.
238  * @pclk_div: calibration clock division. The clock applied to the calibration
239  *           cell is divided by pclk_div + 1.
240  */
241 struct atmel_qspi_pcal {
242 	u32 pclk_rate;
243 	u8 pclk_div;
244 };
245 
246 #define ATMEL_QSPI_PCAL_ARRAY_SIZE	8
247 static const struct atmel_qspi_pcal pcal[ATMEL_QSPI_PCAL_ARRAY_SIZE] = {
248 	{25000000, 0},
249 	{50000000, 1},
250 	{75000000, 2},
251 	{100000000, 3},
252 	{125000000, 4},
253 	{150000000, 5},
254 	{175000000, 6},
255 	{200000000, 7},
256 };
257 
258 struct atmel_qspi_caps {
259 	u32 max_speed_hz;
260 	bool has_qspick;
261 	bool has_gclk;
262 	bool has_ricr;
263 	bool octal;
264 	bool has_dma;
265 };
266 
267 struct atmel_qspi_ops;
268 
269 struct atmel_qspi {
270 	void __iomem		*regs;
271 	void __iomem		*mem;
272 	struct clk		*pclk;
273 	struct clk		*qspick;
274 	struct clk		*gclk;
275 	struct platform_device	*pdev;
276 	const struct atmel_qspi_caps *caps;
277 	const struct atmel_qspi_ops *ops;
278 	resource_size_t		mmap_size;
279 	u32			pending;
280 	u32			irq_mask;
281 	u32			mr;
282 	u32			scr;
283 	u32			target_max_speed_hz;
284 	struct completion	cmd_completion;
285 	struct completion	dma_completion;
286 	dma_addr_t		mmap_phys_base;
287 	struct dma_chan		*rx_chan;
288 	struct dma_chan		*tx_chan;
289 };
290 
291 struct atmel_qspi_ops {
292 	int (*set_cfg)(struct atmel_qspi *aq, const struct spi_mem_op *op,
293 		       u32 *offset);
294 	int (*transfer)(struct spi_mem *mem, const struct spi_mem_op *op,
295 			u32 offset);
296 };
297 
298 struct atmel_qspi_mode {
299 	u8 cmd_buswidth;
300 	u8 addr_buswidth;
301 	u8 data_buswidth;
302 	u32 config;
303 };
304 
305 static const struct atmel_qspi_mode atmel_qspi_modes[] = {
306 	{ 1, 1, 1, QSPI_IFR_WIDTH_SINGLE_BIT_SPI },
307 	{ 1, 1, 2, QSPI_IFR_WIDTH_DUAL_OUTPUT },
308 	{ 1, 1, 4, QSPI_IFR_WIDTH_QUAD_OUTPUT },
309 	{ 1, 2, 2, QSPI_IFR_WIDTH_DUAL_IO },
310 	{ 1, 4, 4, QSPI_IFR_WIDTH_QUAD_IO },
311 	{ 2, 2, 2, QSPI_IFR_WIDTH_DUAL_CMD },
312 	{ 4, 4, 4, QSPI_IFR_WIDTH_QUAD_CMD },
313 };
314 
315 static const struct atmel_qspi_mode atmel_qspi_sama7g5_modes[] = {
316 	{ 1, 1, 1, QSPI_IFR_WIDTH_SINGLE_BIT_SPI },
317 	{ 1, 1, 2, QSPI_IFR_WIDTH_DUAL_OUTPUT },
318 	{ 1, 1, 4, QSPI_IFR_WIDTH_QUAD_OUTPUT },
319 	{ 1, 2, 2, QSPI_IFR_WIDTH_DUAL_IO },
320 	{ 1, 4, 4, QSPI_IFR_WIDTH_QUAD_IO },
321 	{ 2, 2, 2, QSPI_IFR_WIDTH_DUAL_CMD },
322 	{ 4, 4, 4, QSPI_IFR_WIDTH_QUAD_CMD },
323 	{ 1, 1, 8, QSPI_IFR_WIDTH_OCT_OUTPUT },
324 	{ 1, 8, 8, QSPI_IFR_WIDTH_OCT_IO },
325 	{ 8, 8, 8, QSPI_IFR_WIDTH_OCT_CMD },
326 };
327 
328 #ifdef VERBOSE_DEBUG
atmel_qspi_reg_name(u32 offset,char * tmp,size_t sz)329 static const char *atmel_qspi_reg_name(u32 offset, char *tmp, size_t sz)
330 {
331 	switch (offset) {
332 	case QSPI_CR:
333 		return "CR";
334 	case QSPI_MR:
335 		return "MR";
336 	case QSPI_RD:
337 		return "RD";
338 	case QSPI_TD:
339 		return "TD";
340 	case QSPI_SR:
341 		return "SR";
342 	case QSPI_IER:
343 		return "IER";
344 	case QSPI_IDR:
345 		return "IDR";
346 	case QSPI_IMR:
347 		return "IMR";
348 	case QSPI_SCR:
349 		return "SCR";
350 	case QSPI_SR2:
351 		return "SR2";
352 	case QSPI_IAR:
353 		return "IAR";
354 	case QSPI_ICR:
355 		return "ICR/WICR";
356 	case QSPI_IFR:
357 		return "IFR";
358 	case QSPI_RICR:
359 		return "RICR";
360 	case QSPI_SMR:
361 		return "SMR";
362 	case QSPI_SKR:
363 		return "SKR";
364 	case QSPI_REFRESH:
365 		return "REFRESH";
366 	case QSPI_WRACNT:
367 		return "WRACNT";
368 	case QSPI_DLLCFG:
369 		return "DLLCFG";
370 	case QSPI_PCALCFG:
371 		return "PCALCFG";
372 	case QSPI_PCALBP:
373 		return "PCALBP";
374 	case QSPI_TOUT:
375 		return "TOUT";
376 	case QSPI_WPMR:
377 		return "WPMR";
378 	case QSPI_WPSR:
379 		return "WPSR";
380 	case QSPI_VERSION:
381 		return "VERSION";
382 	default:
383 		snprintf(tmp, sz, "0x%02x", offset);
384 		break;
385 	}
386 
387 	return tmp;
388 }
389 #endif /* VERBOSE_DEBUG */
390 
atmel_qspi_read(struct atmel_qspi * aq,u32 offset)391 static u32 atmel_qspi_read(struct atmel_qspi *aq, u32 offset)
392 {
393 	u32 value = readl_relaxed(aq->regs + offset);
394 
395 #ifdef VERBOSE_DEBUG
396 	char tmp[8];
397 
398 	dev_vdbg(&aq->pdev->dev, "read 0x%08x from %s\n", value,
399 		 atmel_qspi_reg_name(offset, tmp, sizeof(tmp)));
400 #endif /* VERBOSE_DEBUG */
401 
402 	return value;
403 }
404 
atmel_qspi_write(u32 value,struct atmel_qspi * aq,u32 offset)405 static void atmel_qspi_write(u32 value, struct atmel_qspi *aq, u32 offset)
406 {
407 #ifdef VERBOSE_DEBUG
408 	char tmp[8];
409 
410 	dev_vdbg(&aq->pdev->dev, "write 0x%08x into %s\n", value,
411 		 atmel_qspi_reg_name(offset, tmp, sizeof(tmp)));
412 #endif /* VERBOSE_DEBUG */
413 
414 	writel_relaxed(value, aq->regs + offset);
415 }
416 
atmel_qspi_reg_sync(struct atmel_qspi * aq)417 static int atmel_qspi_reg_sync(struct atmel_qspi *aq)
418 {
419 	u32 val;
420 	int ret;
421 
422 	ret = readl_poll_timeout(aq->regs + QSPI_SR2, val,
423 				 !(val & QSPI_SR2_SYNCBSY), 40,
424 				 ATMEL_QSPI_SYNC_TIMEOUT);
425 	return ret;
426 }
427 
atmel_qspi_update_config(struct atmel_qspi * aq)428 static int atmel_qspi_update_config(struct atmel_qspi *aq)
429 {
430 	int ret;
431 
432 	ret = atmel_qspi_reg_sync(aq);
433 	if (ret)
434 		return ret;
435 	atmel_qspi_write(QSPI_CR_UPDCFG, aq, QSPI_CR);
436 	return atmel_qspi_reg_sync(aq);
437 }
438 
atmel_qspi_is_compatible(const struct spi_mem_op * op,const struct atmel_qspi_mode * mode)439 static inline bool atmel_qspi_is_compatible(const struct spi_mem_op *op,
440 					    const struct atmel_qspi_mode *mode)
441 {
442 	if (op->cmd.buswidth != mode->cmd_buswidth)
443 		return false;
444 
445 	if (op->addr.nbytes && op->addr.buswidth != mode->addr_buswidth)
446 		return false;
447 
448 	if (op->data.nbytes && op->data.buswidth != mode->data_buswidth)
449 		return false;
450 
451 	return true;
452 }
453 
atmel_qspi_find_mode(const struct spi_mem_op * op)454 static int atmel_qspi_find_mode(const struct spi_mem_op *op)
455 {
456 	u32 i;
457 
458 	for (i = 0; i < ARRAY_SIZE(atmel_qspi_modes); i++)
459 		if (atmel_qspi_is_compatible(op, &atmel_qspi_modes[i]))
460 			return i;
461 
462 	return -EOPNOTSUPP;
463 }
464 
atmel_qspi_sama7g5_find_mode(const struct spi_mem_op * op)465 static int atmel_qspi_sama7g5_find_mode(const struct spi_mem_op *op)
466 {
467 	u32 i;
468 
469 	for (i = 0; i < ARRAY_SIZE(atmel_qspi_sama7g5_modes); i++)
470 		if (atmel_qspi_is_compatible(op, &atmel_qspi_sama7g5_modes[i]))
471 			return i;
472 
473 	return -EOPNOTSUPP;
474 }
475 
atmel_qspi_supports_op(struct spi_mem * mem,const struct spi_mem_op * op)476 static bool atmel_qspi_supports_op(struct spi_mem *mem,
477 				   const struct spi_mem_op *op)
478 {
479 	struct atmel_qspi *aq = spi_controller_get_devdata(mem->spi->controller);
480 	if (!spi_mem_default_supports_op(mem, op))
481 		return false;
482 
483 	if (aq->caps->octal) {
484 		if (atmel_qspi_sama7g5_find_mode(op) < 0)
485 			return false;
486 		else
487 			return true;
488 	}
489 
490 	if (atmel_qspi_find_mode(op) < 0)
491 		return false;
492 
493 	/* special case not supported by hardware */
494 	if (op->addr.nbytes == 2 && op->cmd.buswidth != op->addr.buswidth &&
495 	    op->dummy.nbytes == 0)
496 		return false;
497 
498 	return true;
499 }
500 
501 /*
502  * If the QSPI controller is set in regular SPI mode, set it in
503  * Serial Memory Mode (SMM).
504  */
atmel_qspi_set_serial_memory_mode(struct atmel_qspi * aq)505 static int atmel_qspi_set_serial_memory_mode(struct atmel_qspi *aq)
506 {
507 	int ret = 0;
508 
509 	if (!(aq->mr & QSPI_MR_SMM)) {
510 		aq->mr |= QSPI_MR_SMM;
511 		atmel_qspi_write(aq->mr, aq, QSPI_MR);
512 
513 		if (aq->caps->has_gclk)
514 			ret = atmel_qspi_update_config(aq);
515 	}
516 
517 	return ret;
518 }
519 
atmel_qspi_set_cfg(struct atmel_qspi * aq,const struct spi_mem_op * op,u32 * offset)520 static int atmel_qspi_set_cfg(struct atmel_qspi *aq,
521 			      const struct spi_mem_op *op, u32 *offset)
522 {
523 	u32 iar, icr, ifr;
524 	u32 dummy_cycles = 0;
525 	int mode;
526 
527 	iar = 0;
528 	icr = QSPI_ICR_INST(op->cmd.opcode);
529 	ifr = QSPI_IFR_INSTEN;
530 
531 	mode = atmel_qspi_find_mode(op);
532 	if (mode < 0)
533 		return mode;
534 	ifr |= atmel_qspi_modes[mode].config;
535 
536 	if (op->dummy.nbytes)
537 		dummy_cycles = op->dummy.nbytes * 8 / op->dummy.buswidth;
538 
539 	/*
540 	 * The controller allows 24 and 32-bit addressing while NAND-flash
541 	 * requires 16-bit long. Handling 8-bit long addresses is done using
542 	 * the option field. For the 16-bit addresses, the workaround depends
543 	 * of the number of requested dummy bits. If there are 8 or more dummy
544 	 * cycles, the address is shifted and sent with the first dummy byte.
545 	 * Otherwise opcode is disabled and the first byte of the address
546 	 * contains the command opcode (works only if the opcode and address
547 	 * use the same buswidth). The limitation is when the 16-bit address is
548 	 * used without enough dummy cycles and the opcode is using a different
549 	 * buswidth than the address.
550 	 */
551 	if (op->addr.buswidth) {
552 		switch (op->addr.nbytes) {
553 		case 0:
554 			break;
555 		case 1:
556 			ifr |= QSPI_IFR_OPTEN | QSPI_IFR_OPTL_8BIT;
557 			icr |= QSPI_ICR_OPT(op->addr.val & 0xff);
558 			break;
559 		case 2:
560 			if (dummy_cycles < 8 / op->addr.buswidth) {
561 				ifr &= ~QSPI_IFR_INSTEN;
562 				ifr |= QSPI_IFR_ADDREN;
563 				iar = (op->cmd.opcode << 16) |
564 					(op->addr.val & 0xffff);
565 			} else {
566 				ifr |= QSPI_IFR_ADDREN;
567 				iar = (op->addr.val << 8) & 0xffffff;
568 				dummy_cycles -= 8 / op->addr.buswidth;
569 			}
570 			break;
571 		case 3:
572 			ifr |= QSPI_IFR_ADDREN;
573 			iar = op->addr.val & 0xffffff;
574 			break;
575 		case 4:
576 			ifr |= QSPI_IFR_ADDREN | QSPI_IFR_ADDRL;
577 			iar = op->addr.val & 0x7ffffff;
578 			break;
579 		default:
580 			return -ENOTSUPP;
581 		}
582 	}
583 
584 	/* offset of the data access in the QSPI memory space */
585 	*offset = iar;
586 
587 	/* Set number of dummy cycles */
588 	if (dummy_cycles)
589 		ifr |= QSPI_IFR_NBDUM(dummy_cycles);
590 
591 	/* Set data enable and data transfer type. */
592 	if (op->data.nbytes) {
593 		ifr |= QSPI_IFR_DATAEN;
594 
595 		if (op->addr.nbytes)
596 			ifr |= QSPI_IFR_TFRTYP_MEM;
597 	}
598 
599 	mode = atmel_qspi_set_serial_memory_mode(aq);
600 	if (mode < 0)
601 		return mode;
602 
603 	/* Clear pending interrupts */
604 	(void)atmel_qspi_read(aq, QSPI_SR);
605 
606 	/* Set QSPI Instruction Frame registers. */
607 	if (op->addr.nbytes && !op->data.nbytes)
608 		atmel_qspi_write(iar, aq, QSPI_IAR);
609 
610 	if (aq->caps->has_ricr) {
611 		if (op->data.dir == SPI_MEM_DATA_IN)
612 			atmel_qspi_write(icr, aq, QSPI_RICR);
613 		else
614 			atmel_qspi_write(icr, aq, QSPI_WICR);
615 	} else {
616 		if (op->data.nbytes && op->data.dir == SPI_MEM_DATA_OUT)
617 			ifr |= QSPI_IFR_SAMA5D2_WRITE_TRSFR;
618 
619 		atmel_qspi_write(icr, aq, QSPI_ICR);
620 	}
621 
622 	atmel_qspi_write(ifr, aq, QSPI_IFR);
623 
624 	return 0;
625 }
626 
atmel_qspi_wait_for_completion(struct atmel_qspi * aq,u32 irq_mask)627 static int atmel_qspi_wait_for_completion(struct atmel_qspi *aq, u32 irq_mask)
628 {
629 	int err = 0;
630 	u32 sr;
631 
632 	/* Poll INSTRuction End status */
633 	sr = atmel_qspi_read(aq, QSPI_SR);
634 	if ((sr & irq_mask) == irq_mask)
635 		return 0;
636 
637 	/* Wait for INSTRuction End interrupt */
638 	reinit_completion(&aq->cmd_completion);
639 	aq->pending = sr & irq_mask;
640 	aq->irq_mask = irq_mask;
641 	atmel_qspi_write(irq_mask, aq, QSPI_IER);
642 	if (!wait_for_completion_timeout(&aq->cmd_completion,
643 					 msecs_to_jiffies(ATMEL_QSPI_TIMEOUT)))
644 		err = -ETIMEDOUT;
645 	atmel_qspi_write(irq_mask, aq, QSPI_IDR);
646 
647 	return err;
648 }
649 
atmel_qspi_transfer(struct spi_mem * mem,const struct spi_mem_op * op,u32 offset)650 static int atmel_qspi_transfer(struct spi_mem *mem,
651 			       const struct spi_mem_op *op, u32 offset)
652 {
653 	struct atmel_qspi *aq = spi_controller_get_devdata(mem->spi->controller);
654 
655 	/* Skip to the final steps if there is no data */
656 	if (!op->data.nbytes)
657 		return atmel_qspi_wait_for_completion(aq,
658 						      QSPI_SR_CMD_COMPLETED);
659 
660 	/* Dummy read of QSPI_IFR to synchronize APB and AHB accesses */
661 	(void)atmel_qspi_read(aq, QSPI_IFR);
662 
663 	/* Send/Receive data */
664 	if (op->data.dir == SPI_MEM_DATA_IN) {
665 		memcpy_fromio(op->data.buf.in, aq->mem + offset,
666 			      op->data.nbytes);
667 
668 		/* Synchronize AHB and APB accesses again */
669 		rmb();
670 	} else {
671 		memcpy_toio(aq->mem + offset, op->data.buf.out,
672 			    op->data.nbytes);
673 
674 		/* Synchronize AHB and APB accesses again */
675 		wmb();
676 	}
677 
678 	/* Release the chip-select */
679 	atmel_qspi_write(QSPI_CR_LASTXFER, aq, QSPI_CR);
680 
681 	return atmel_qspi_wait_for_completion(aq, QSPI_SR_CMD_COMPLETED);
682 }
683 
atmel_qspi_sama7g5_set_cfg(struct atmel_qspi * aq,const struct spi_mem_op * op,u32 * offset)684 static int atmel_qspi_sama7g5_set_cfg(struct atmel_qspi *aq,
685 				      const struct spi_mem_op *op, u32 *offset)
686 {
687 	u32 iar, icr, ifr;
688 	int mode, ret;
689 
690 	iar = 0;
691 	icr = FIELD_PREP(QSPI_ICR_INST_MASK_SAMA7G5, op->cmd.opcode);
692 	ifr = QSPI_IFR_INSTEN;
693 
694 	mode = atmel_qspi_sama7g5_find_mode(op);
695 	if (mode < 0)
696 		return mode;
697 	ifr |= atmel_qspi_sama7g5_modes[mode].config;
698 
699 	if (op->dummy.buswidth && op->dummy.nbytes) {
700 		if (op->addr.dtr && op->dummy.dtr && op->data.dtr)
701 			ifr |= QSPI_IFR_NBDUM(op->dummy.nbytes * 8 /
702 					      (2 * op->dummy.buswidth));
703 		else
704 			ifr |= QSPI_IFR_NBDUM(op->dummy.nbytes * 8 /
705 					      op->dummy.buswidth);
706 	}
707 
708 	if (op->addr.buswidth && op->addr.nbytes) {
709 		ifr |= FIELD_PREP(QSPI_IFR_ADDRL_SAMA7G5, op->addr.nbytes - 1) |
710 		       QSPI_IFR_ADDREN;
711 		iar = FIELD_PREP(QSPI_IAR_ADDR, op->addr.val);
712 	}
713 
714 	if (op->addr.dtr && op->dummy.dtr && op->data.dtr) {
715 		ifr |= QSPI_IFR_DDREN;
716 		if (op->cmd.dtr)
717 			ifr |= QSPI_IFR_DDRCMDEN;
718 
719 		ifr |= QSPI_IFR_DQSEN;
720 	}
721 
722 	if (op->cmd.buswidth == 8 || op->addr.buswidth == 8 ||
723 	    op->data.buswidth == 8)
724 		ifr |= FIELD_PREP(QSPI_IFR_PROTTYP, QSPI_IFR_PROTTYP_OCTAFLASH);
725 
726 	/* offset of the data access in the QSPI memory space */
727 	*offset = iar;
728 
729 	/* Set data enable */
730 	if (op->data.nbytes) {
731 		ifr |= QSPI_IFR_DATAEN;
732 
733 		if (op->addr.nbytes)
734 			ifr |= QSPI_IFR_TFRTYP_MEM;
735 	}
736 
737 	ret = atmel_qspi_set_serial_memory_mode(aq);
738 	if (ret < 0)
739 		return ret;
740 
741 	/* Clear pending interrupts */
742 	(void)atmel_qspi_read(aq, QSPI_SR);
743 
744 	/* Set QSPI Instruction Frame registers */
745 	if (op->addr.nbytes && !op->data.nbytes)
746 		atmel_qspi_write(iar, aq, QSPI_IAR);
747 
748 	if (op->data.dir == SPI_MEM_DATA_IN) {
749 		atmel_qspi_write(icr, aq, QSPI_RICR);
750 	} else {
751 		atmel_qspi_write(icr, aq, QSPI_WICR);
752 		if (op->data.nbytes)
753 			atmel_qspi_write(FIELD_PREP(QSPI_WRACNT_NBWRA,
754 						    op->data.nbytes),
755 					 aq, QSPI_WRACNT);
756 	}
757 
758 	atmel_qspi_write(ifr, aq, QSPI_IFR);
759 
760 	return atmel_qspi_update_config(aq);
761 }
762 
atmel_qspi_dma_callback(void * param)763 static void atmel_qspi_dma_callback(void *param)
764 {
765 	struct atmel_qspi *aq = param;
766 
767 	complete(&aq->dma_completion);
768 }
769 
atmel_qspi_dma_xfer(struct atmel_qspi * aq,struct dma_chan * chan,dma_addr_t dma_dst,dma_addr_t dma_src,unsigned int len)770 static int atmel_qspi_dma_xfer(struct atmel_qspi *aq, struct dma_chan *chan,
771 			       dma_addr_t dma_dst, dma_addr_t dma_src,
772 			       unsigned int len)
773 {
774 	struct dma_async_tx_descriptor *tx;
775 	dma_cookie_t cookie;
776 	int ret;
777 
778 	tx = dmaengine_prep_dma_memcpy(chan, dma_dst, dma_src, len,
779 				       DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
780 	if (!tx) {
781 		dev_err(&aq->pdev->dev, "device_prep_dma_memcpy error\n");
782 		return -EIO;
783 	}
784 
785 	reinit_completion(&aq->dma_completion);
786 	tx->callback = atmel_qspi_dma_callback;
787 	tx->callback_param = aq;
788 	cookie = tx->tx_submit(tx);
789 	ret = dma_submit_error(cookie);
790 	if (ret) {
791 		dev_err(&aq->pdev->dev, "dma_submit_error %d\n", cookie);
792 		return ret;
793 	}
794 
795 	dma_async_issue_pending(chan);
796 	ret = wait_for_completion_timeout(&aq->dma_completion,
797 					  msecs_to_jiffies(20 * ATMEL_QSPI_TIMEOUT));
798 	if (ret == 0) {
799 		dmaengine_terminate_sync(chan);
800 		dev_err(&aq->pdev->dev, "DMA wait_for_completion_timeout\n");
801 		return -ETIMEDOUT;
802 	}
803 
804 	return 0;
805 }
806 
atmel_qspi_dma_rx_xfer(struct spi_mem * mem,const struct spi_mem_op * op,struct sg_table * sgt,loff_t loff)807 static int atmel_qspi_dma_rx_xfer(struct spi_mem *mem,
808 				  const struct spi_mem_op *op,
809 				  struct sg_table *sgt, loff_t loff)
810 {
811 	struct atmel_qspi *aq =
812 		spi_controller_get_devdata(mem->spi->controller);
813 	struct scatterlist *sg;
814 	dma_addr_t dma_src;
815 	unsigned int i, len;
816 	int ret;
817 
818 	dma_src = aq->mmap_phys_base + loff;
819 
820 	for_each_sg(sgt->sgl, sg, sgt->nents, i) {
821 		len = sg_dma_len(sg);
822 		ret = atmel_qspi_dma_xfer(aq, aq->rx_chan, sg_dma_address(sg),
823 					  dma_src, len);
824 		if (ret)
825 			return ret;
826 		dma_src += len;
827 	}
828 
829 	return 0;
830 }
831 
atmel_qspi_dma_tx_xfer(struct spi_mem * mem,const struct spi_mem_op * op,struct sg_table * sgt,loff_t loff)832 static int atmel_qspi_dma_tx_xfer(struct spi_mem *mem,
833 				  const struct spi_mem_op *op,
834 				  struct sg_table *sgt, loff_t loff)
835 {
836 	struct atmel_qspi *aq =
837 		spi_controller_get_devdata(mem->spi->controller);
838 	struct scatterlist *sg;
839 	dma_addr_t dma_dst;
840 	unsigned int i, len;
841 	int ret;
842 
843 	dma_dst = aq->mmap_phys_base + loff;
844 
845 	for_each_sg(sgt->sgl, sg, sgt->nents, i) {
846 		len = sg_dma_len(sg);
847 		ret = atmel_qspi_dma_xfer(aq, aq->tx_chan, dma_dst,
848 					  sg_dma_address(sg), len);
849 		if (ret)
850 			return ret;
851 		dma_dst += len;
852 	}
853 
854 	return 0;
855 }
856 
atmel_qspi_dma_transfer(struct spi_mem * mem,const struct spi_mem_op * op,loff_t loff)857 static int atmel_qspi_dma_transfer(struct spi_mem *mem,
858 				   const struct spi_mem_op *op, loff_t loff)
859 {
860 	struct sg_table sgt;
861 	int ret;
862 
863 	ret = spi_controller_dma_map_mem_op_data(mem->spi->controller, op,
864 						 &sgt);
865 	if (ret)
866 		return ret;
867 
868 	if (op->data.dir == SPI_MEM_DATA_IN)
869 		ret = atmel_qspi_dma_rx_xfer(mem, op, &sgt, loff);
870 	else
871 		ret = atmel_qspi_dma_tx_xfer(mem, op, &sgt, loff);
872 
873 	spi_controller_dma_unmap_mem_op_data(mem->spi->controller, op, &sgt);
874 
875 	return ret;
876 }
877 
atmel_qspi_sama7g5_transfer(struct spi_mem * mem,const struct spi_mem_op * op,u32 offset)878 static int atmel_qspi_sama7g5_transfer(struct spi_mem *mem,
879 				       const struct spi_mem_op *op, u32 offset)
880 {
881 	struct atmel_qspi *aq =
882 		spi_controller_get_devdata(mem->spi->controller);
883 	u32 val;
884 	int ret;
885 
886 	if (!op->data.nbytes) {
887 		/* Start the transfer. */
888 		ret = atmel_qspi_reg_sync(aq);
889 		if (ret)
890 			return ret;
891 		atmel_qspi_write(QSPI_CR_STTFR, aq, QSPI_CR);
892 
893 		return atmel_qspi_wait_for_completion(aq, QSPI_SR_CSRA);
894 	}
895 
896 	/* Send/Receive data. */
897 	if (op->data.dir == SPI_MEM_DATA_IN) {
898 		if (aq->rx_chan && op->addr.nbytes &&
899 		    op->data.nbytes > ATMEL_QSPI_DMA_MIN_BYTES) {
900 			ret = atmel_qspi_dma_transfer(mem, op, offset);
901 			if (ret)
902 				return ret;
903 		} else {
904 			memcpy_fromio(op->data.buf.in, aq->mem + offset,
905 				      op->data.nbytes);
906 		}
907 
908 		if (op->addr.nbytes) {
909 			ret = readl_poll_timeout(aq->regs + QSPI_SR2, val,
910 						 !(val & QSPI_SR2_RBUSY), 40,
911 						 ATMEL_QSPI_SYNC_TIMEOUT);
912 			if (ret)
913 				return ret;
914 		}
915 	} else {
916 		if (aq->tx_chan && op->addr.nbytes &&
917 		    op->data.nbytes > ATMEL_QSPI_DMA_MIN_BYTES) {
918 			ret = atmel_qspi_dma_transfer(mem, op, offset);
919 			if (ret)
920 				return ret;
921 		} else {
922 			memcpy_toio(aq->mem + offset, op->data.buf.out,
923 				    op->data.nbytes);
924 		}
925 
926 		ret = atmel_qspi_wait_for_completion(aq, QSPI_SR_LWRA);
927 		if (ret)
928 			return ret;
929 	}
930 
931 	/* Release the chip-select. */
932 	ret = atmel_qspi_reg_sync(aq);
933 	if (ret)
934 		return ret;
935 	atmel_qspi_write(QSPI_CR_LASTXFER, aq, QSPI_CR);
936 
937 	return atmel_qspi_wait_for_completion(aq, QSPI_SR_CSRA);
938 }
939 
atmel_qspi_exec_op(struct spi_mem * mem,const struct spi_mem_op * op)940 static int atmel_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
941 {
942 	struct atmel_qspi *aq = spi_controller_get_devdata(mem->spi->controller);
943 	u32 offset;
944 	int err;
945 
946 	/*
947 	 * Check if the address exceeds the MMIO window size. An improvement
948 	 * would be to add support for regular SPI mode and fall back to it
949 	 * when the flash memories overrun the controller's memory space.
950 	 */
951 	if (op->addr.val + op->data.nbytes > aq->mmap_size)
952 		return -EOPNOTSUPP;
953 
954 	if (op->addr.nbytes > 4)
955 		return -EOPNOTSUPP;
956 
957 	err = pm_runtime_resume_and_get(&aq->pdev->dev);
958 	if (err < 0)
959 		return err;
960 
961 	err = aq->ops->set_cfg(aq, op, &offset);
962 	if (err)
963 		goto pm_runtime_put;
964 
965 	err = aq->ops->transfer(mem, op, offset);
966 
967 pm_runtime_put:
968 	pm_runtime_put_autosuspend(&aq->pdev->dev);
969 	return err;
970 }
971 
atmel_qspi_get_name(struct spi_mem * spimem)972 static const char *atmel_qspi_get_name(struct spi_mem *spimem)
973 {
974 	return dev_name(spimem->spi->dev.parent);
975 }
976 
977 static const struct spi_controller_mem_ops atmel_qspi_mem_ops = {
978 	.supports_op = atmel_qspi_supports_op,
979 	.exec_op = atmel_qspi_exec_op,
980 	.get_name = atmel_qspi_get_name
981 };
982 
atmel_qspi_set_pad_calibration(struct atmel_qspi * aq)983 static int atmel_qspi_set_pad_calibration(struct atmel_qspi *aq)
984 {
985 	unsigned long pclk_rate;
986 	u32 status, val;
987 	int i, ret;
988 	u8 pclk_div = 0;
989 
990 	pclk_rate = clk_get_rate(aq->pclk);
991 	if (!pclk_rate)
992 		return -EINVAL;
993 
994 	for (i = 0; i < ATMEL_QSPI_PCAL_ARRAY_SIZE; i++) {
995 		if (pclk_rate <= pcal[i].pclk_rate) {
996 			pclk_div = pcal[i].pclk_div;
997 			break;
998 		}
999 	}
1000 
1001 	/*
1002 	 * Use the biggest divider in case the peripheral clock exceeds
1003 	 * 200MHZ.
1004 	 */
1005 	if (pclk_rate > pcal[ATMEL_QSPI_PCAL_ARRAY_SIZE - 1].pclk_rate)
1006 		pclk_div = pcal[ATMEL_QSPI_PCAL_ARRAY_SIZE - 1].pclk_div;
1007 
1008 	/* Disable QSPI while configuring the pad calibration. */
1009 	status = atmel_qspi_read(aq, QSPI_SR2);
1010 	if (status & QSPI_SR2_QSPIENS) {
1011 		ret = atmel_qspi_reg_sync(aq);
1012 		if (ret)
1013 			return ret;
1014 		atmel_qspi_write(QSPI_CR_QSPIDIS, aq, QSPI_CR);
1015 	}
1016 
1017 	/*
1018 	 * The analog circuitry is not shut down at the end of the calibration
1019 	 * and the start-up time is only required for the first calibration
1020 	 * sequence, thus increasing performance. Set the delay between the Pad
1021 	 * calibration analog circuitry and the calibration request to 2us.
1022 	 */
1023 	atmel_qspi_write(QSPI_PCALCFG_AAON |
1024 			 FIELD_PREP(QSPI_PCALCFG_CLKDIV, pclk_div) |
1025 			 FIELD_PREP(QSPI_PCALCFG_CALCNT,
1026 				    2 * (pclk_rate / 1000000)),
1027 			 aq, QSPI_PCALCFG);
1028 
1029 	/* DLL On + start calibration. */
1030 	atmel_qspi_write(QSPI_CR_DLLON | QSPI_CR_STPCAL, aq, QSPI_CR);
1031 
1032 	/* Check synchronization status before updating configuration. */
1033 	ret =  readl_poll_timeout(aq->regs + QSPI_SR2, val,
1034 				  (val & QSPI_SR2_DLOCK) &&
1035 				  !(val & QSPI_SR2_CALBSY), 40,
1036 				  ATMEL_QSPI_TIMEOUT);
1037 
1038 	/* Refresh analogic blocks every 1 ms.*/
1039 	atmel_qspi_write(FIELD_PREP(QSPI_REFRESH_DELAY_COUNTER,
1040 				    aq->target_max_speed_hz / 1000),
1041 			 aq, QSPI_REFRESH);
1042 
1043 	return ret;
1044 }
1045 
atmel_qspi_set_gclk(struct atmel_qspi * aq)1046 static int atmel_qspi_set_gclk(struct atmel_qspi *aq)
1047 {
1048 	u32 status, val;
1049 	int ret;
1050 
1051 	/* Disable DLL before setting GCLK */
1052 	status = atmel_qspi_read(aq, QSPI_SR2);
1053 	if (status & QSPI_SR2_DLOCK) {
1054 		atmel_qspi_write(QSPI_CR_DLLOFF, aq, QSPI_CR);
1055 
1056 		ret = readl_poll_timeout(aq->regs + QSPI_SR2, val,
1057 					 !(val & QSPI_SR2_DLOCK), 40,
1058 					 ATMEL_QSPI_TIMEOUT);
1059 		if (ret)
1060 			return ret;
1061 	}
1062 
1063 	if (aq->target_max_speed_hz > QSPI_DLLCFG_THRESHOLD_FREQ)
1064 		atmel_qspi_write(QSPI_DLLCFG_RANGE, aq, QSPI_DLLCFG);
1065 	else
1066 		atmel_qspi_write(0, aq, QSPI_DLLCFG);
1067 
1068 	ret = clk_set_rate(aq->gclk, aq->target_max_speed_hz);
1069 	if (ret) {
1070 		dev_err(&aq->pdev->dev, "Failed to set generic clock rate.\n");
1071 		return ret;
1072 	}
1073 
1074 	/* Enable the QSPI generic clock */
1075 	ret = clk_prepare_enable(aq->gclk);
1076 	if (ret)
1077 		dev_err(&aq->pdev->dev, "Failed to enable generic clock.\n");
1078 
1079 	return ret;
1080 }
1081 
atmel_qspi_sama7g5_init(struct atmel_qspi * aq)1082 static int atmel_qspi_sama7g5_init(struct atmel_qspi *aq)
1083 {
1084 	u32 val;
1085 	int ret;
1086 
1087 	ret = atmel_qspi_set_gclk(aq);
1088 	if (ret)
1089 		return ret;
1090 
1091 	if (aq->caps->octal) {
1092 		ret = atmel_qspi_set_pad_calibration(aq);
1093 		if (ret)
1094 			return ret;
1095 	} else {
1096 		atmel_qspi_write(QSPI_CR_DLLON, aq, QSPI_CR);
1097 		ret =  readl_poll_timeout(aq->regs + QSPI_SR2, val,
1098 					  (val & QSPI_SR2_DLOCK), 40,
1099 					  ATMEL_QSPI_TIMEOUT);
1100 	}
1101 
1102 	/* Set the QSPI controller by default in Serial Memory Mode */
1103 	aq->mr |= QSPI_MR_DQSDLYEN;
1104 	ret = atmel_qspi_set_serial_memory_mode(aq);
1105 	if (ret < 0)
1106 		return ret;
1107 
1108 	/* Enable the QSPI controller. */
1109 	atmel_qspi_write(QSPI_CR_QSPIEN, aq, QSPI_CR);
1110 	ret = readl_poll_timeout(aq->regs + QSPI_SR2, val,
1111 				 val & QSPI_SR2_QSPIENS, 40,
1112 				 ATMEL_QSPI_SYNC_TIMEOUT);
1113 	if (ret)
1114 		return ret;
1115 
1116 	if (aq->caps->octal) {
1117 		ret = readl_poll_timeout(aq->regs + QSPI_SR, val,
1118 					 val & QSPI_SR_RFRSHD, 40,
1119 					 ATMEL_QSPI_TIMEOUT);
1120 	}
1121 
1122 	atmel_qspi_write(QSPI_TOUT_TCNTM, aq, QSPI_TOUT);
1123 	return ret;
1124 }
1125 
atmel_qspi_sama7g5_setup(struct spi_device * spi)1126 static int atmel_qspi_sama7g5_setup(struct spi_device *spi)
1127 {
1128 	struct atmel_qspi *aq = spi_controller_get_devdata(spi->controller);
1129 
1130 	/* The controller can communicate with a single peripheral device (target). */
1131 	aq->target_max_speed_hz = spi->max_speed_hz;
1132 
1133 	return atmel_qspi_sama7g5_init(aq);
1134 }
1135 
atmel_qspi_setup(struct spi_device * spi)1136 static int atmel_qspi_setup(struct spi_device *spi)
1137 {
1138 	struct spi_controller *ctrl = spi->controller;
1139 	struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
1140 	unsigned long src_rate;
1141 	u32 scbr;
1142 	int ret;
1143 
1144 	if (ctrl->busy)
1145 		return -EBUSY;
1146 
1147 	if (!spi->max_speed_hz)
1148 		return -EINVAL;
1149 
1150 	if (aq->caps->has_gclk)
1151 		return atmel_qspi_sama7g5_setup(spi);
1152 
1153 	src_rate = clk_get_rate(aq->pclk);
1154 	if (!src_rate)
1155 		return -EINVAL;
1156 
1157 	/* Compute the QSPI baudrate */
1158 	scbr = DIV_ROUND_UP(src_rate, spi->max_speed_hz);
1159 	if (scbr > 0)
1160 		scbr--;
1161 
1162 	ret = pm_runtime_resume_and_get(ctrl->dev.parent);
1163 	if (ret < 0)
1164 		return ret;
1165 
1166 	aq->scr &= ~QSPI_SCR_SCBR_MASK;
1167 	aq->scr |= QSPI_SCR_SCBR(scbr);
1168 	atmel_qspi_write(aq->scr, aq, QSPI_SCR);
1169 
1170 	pm_runtime_put_autosuspend(ctrl->dev.parent);
1171 
1172 	return 0;
1173 }
1174 
atmel_qspi_set_cs_timing(struct spi_device * spi)1175 static int atmel_qspi_set_cs_timing(struct spi_device *spi)
1176 {
1177 	struct spi_controller *ctrl = spi->controller;
1178 	struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
1179 	unsigned long clk_rate;
1180 	u32 cs_inactive;
1181 	u32 cs_setup;
1182 	u32 cs_hold;
1183 	int delay;
1184 	int ret;
1185 
1186 	clk_rate = clk_get_rate(aq->pclk);
1187 	if (!clk_rate)
1188 		return -EINVAL;
1189 
1190 	/* hold */
1191 	delay = spi_delay_to_ns(&spi->cs_hold, NULL);
1192 	if (aq->mr & QSPI_MR_SMM) {
1193 		if (delay > 0)
1194 			dev_warn(&aq->pdev->dev,
1195 				 "Ignoring cs_hold, must be 0 in Serial Memory Mode.\n");
1196 		cs_hold = 0;
1197 	} else {
1198 		delay = spi_delay_to_ns(&spi->cs_hold, NULL);
1199 		if (delay < 0)
1200 			return delay;
1201 
1202 		cs_hold = DIV_ROUND_UP((delay * DIV_ROUND_UP(clk_rate, 1000000)), 32000);
1203 	}
1204 
1205 	/* setup */
1206 	delay = spi_delay_to_ns(&spi->cs_setup, NULL);
1207 	if (delay < 0)
1208 		return delay;
1209 
1210 	cs_setup = DIV_ROUND_UP((delay * DIV_ROUND_UP(clk_rate, 1000000)),
1211 				1000);
1212 
1213 	/* inactive */
1214 	delay = spi_delay_to_ns(&spi->cs_inactive, NULL);
1215 	if (delay < 0)
1216 		return delay;
1217 	cs_inactive = DIV_ROUND_UP((delay * DIV_ROUND_UP(clk_rate, 1000000)), 1000);
1218 
1219 	ret = pm_runtime_resume_and_get(ctrl->dev.parent);
1220 	if (ret < 0)
1221 		return ret;
1222 
1223 	aq->scr &= ~QSPI_SCR_DLYBS_MASK;
1224 	aq->scr |= QSPI_SCR_DLYBS(cs_setup);
1225 	atmel_qspi_write(aq->scr, aq, QSPI_SCR);
1226 
1227 	aq->mr &= ~(QSPI_MR_DLYBCT_MASK | QSPI_MR_DLYCS_MASK);
1228 	aq->mr |= QSPI_MR_DLYBCT(cs_hold) | QSPI_MR_DLYCS(cs_inactive);
1229 	atmel_qspi_write(aq->mr, aq, QSPI_MR);
1230 
1231 	pm_runtime_put_autosuspend(ctrl->dev.parent);
1232 
1233 	return 0;
1234 }
1235 
atmel_qspi_init(struct atmel_qspi * aq)1236 static int atmel_qspi_init(struct atmel_qspi *aq)
1237 {
1238 	int ret;
1239 
1240 	if (aq->caps->has_gclk) {
1241 		ret = atmel_qspi_reg_sync(aq);
1242 		if (ret)
1243 			return ret;
1244 		atmel_qspi_write(QSPI_CR_SWRST, aq, QSPI_CR);
1245 		return 0;
1246 	}
1247 
1248 	/* Reset the QSPI controller */
1249 	atmel_qspi_write(QSPI_CR_SWRST, aq, QSPI_CR);
1250 
1251 	/* Set the QSPI controller by default in Serial Memory Mode */
1252 	ret = atmel_qspi_set_serial_memory_mode(aq);
1253 	if (ret < 0)
1254 		return ret;
1255 
1256 	/* Enable the QSPI controller */
1257 	atmel_qspi_write(QSPI_CR_QSPIEN, aq, QSPI_CR);
1258 	return 0;
1259 }
1260 
atmel_qspi_interrupt(int irq,void * dev_id)1261 static irqreturn_t atmel_qspi_interrupt(int irq, void *dev_id)
1262 {
1263 	struct atmel_qspi *aq = dev_id;
1264 	u32 status, mask, pending;
1265 
1266 	status = atmel_qspi_read(aq, QSPI_SR);
1267 	mask = atmel_qspi_read(aq, QSPI_IMR);
1268 	pending = status & mask;
1269 
1270 	if (!pending)
1271 		return IRQ_NONE;
1272 
1273 	aq->pending |= pending;
1274 	if ((aq->pending & aq->irq_mask) == aq->irq_mask)
1275 		complete(&aq->cmd_completion);
1276 
1277 	return IRQ_HANDLED;
1278 }
1279 
atmel_qspi_dma_init(struct spi_controller * ctrl)1280 static int atmel_qspi_dma_init(struct spi_controller *ctrl)
1281 {
1282 	struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
1283 	int ret;
1284 
1285 	aq->rx_chan = devm_dma_request_chan(&aq->pdev->dev, "rx");
1286 	if (IS_ERR(aq->rx_chan)) {
1287 		ret = dev_err_probe(&aq->pdev->dev, PTR_ERR(aq->rx_chan),
1288 				    "RX DMA channel is not available\n");
1289 		aq->rx_chan = NULL;
1290 		return ret;
1291 	}
1292 
1293 	aq->tx_chan = devm_dma_request_chan(&aq->pdev->dev, "tx");
1294 	if (IS_ERR(aq->tx_chan)) {
1295 		ret = dev_err_probe(&aq->pdev->dev, PTR_ERR(aq->tx_chan),
1296 				    "TX DMA channel is not available\n");
1297 		aq->rx_chan = NULL;
1298 		aq->tx_chan = NULL;
1299 		return ret;
1300 	}
1301 
1302 	ctrl->dma_rx = aq->rx_chan;
1303 	ctrl->dma_tx = aq->tx_chan;
1304 	init_completion(&aq->dma_completion);
1305 
1306 	dev_info(&aq->pdev->dev, "Using %s (tx) and %s (rx) for DMA transfers\n",
1307 		 dma_chan_name(aq->tx_chan), dma_chan_name(aq->rx_chan));
1308 
1309 	return 0;
1310 }
1311 
1312 static const struct atmel_qspi_ops atmel_qspi_ops = {
1313 	.set_cfg = atmel_qspi_set_cfg,
1314 	.transfer = atmel_qspi_transfer,
1315 };
1316 
1317 static const struct atmel_qspi_ops atmel_qspi_sama7g5_ops = {
1318 	.set_cfg = atmel_qspi_sama7g5_set_cfg,
1319 	.transfer = atmel_qspi_sama7g5_transfer,
1320 };
1321 
atmel_qspi_probe(struct platform_device * pdev)1322 static int atmel_qspi_probe(struct platform_device *pdev)
1323 {
1324 	struct spi_controller *ctrl;
1325 	struct atmel_qspi *aq;
1326 	struct resource *res;
1327 	int irq, err = 0;
1328 
1329 	ctrl = devm_spi_alloc_host(&pdev->dev, sizeof(*aq));
1330 	if (!ctrl)
1331 		return -ENOMEM;
1332 
1333 	aq = spi_controller_get_devdata(ctrl);
1334 
1335 	aq->caps = of_device_get_match_data(&pdev->dev);
1336 	if (!aq->caps) {
1337 		dev_err(&pdev->dev, "Could not retrieve QSPI caps\n");
1338 		return -EINVAL;
1339 	}
1340 
1341 	init_completion(&aq->cmd_completion);
1342 	aq->pdev = pdev;
1343 
1344 	ctrl->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD | SPI_TX_DUAL | SPI_TX_QUAD;
1345 	if (aq->caps->octal)
1346 		ctrl->mode_bits |= SPI_RX_OCTAL | SPI_TX_OCTAL;
1347 
1348 	if (aq->caps->has_gclk)
1349 		aq->ops = &atmel_qspi_sama7g5_ops;
1350 	else
1351 		aq->ops = &atmel_qspi_ops;
1352 
1353 	ctrl->max_speed_hz = aq->caps->max_speed_hz;
1354 	ctrl->setup = atmel_qspi_setup;
1355 	ctrl->set_cs_timing = atmel_qspi_set_cs_timing;
1356 	ctrl->bus_num = -1;
1357 	ctrl->mem_ops = &atmel_qspi_mem_ops;
1358 	ctrl->num_chipselect = 1;
1359 	ctrl->dev.of_node = pdev->dev.of_node;
1360 	platform_set_drvdata(pdev, ctrl);
1361 
1362 	/* Map the registers */
1363 	aq->regs = devm_platform_ioremap_resource_byname(pdev, "qspi_base");
1364 	if (IS_ERR(aq->regs))
1365 		return dev_err_probe(&pdev->dev, PTR_ERR(aq->regs),
1366 				     "missing registers\n");
1367 
1368 	/* Map the AHB memory */
1369 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi_mmap");
1370 	aq->mem = devm_ioremap_resource(&pdev->dev, res);
1371 	if (IS_ERR(aq->mem))
1372 		return dev_err_probe(&pdev->dev, PTR_ERR(aq->mem),
1373 				     "missing AHB memory\n");
1374 
1375 	aq->mmap_size = resource_size(res);
1376 	aq->mmap_phys_base = (dma_addr_t)res->start;
1377 
1378 	/* Get the peripheral clock */
1379 	aq->pclk = devm_clk_get_enabled(&pdev->dev, "pclk");
1380 	if (IS_ERR(aq->pclk))
1381 		aq->pclk = devm_clk_get_enabled(&pdev->dev, NULL);
1382 
1383 	if (IS_ERR(aq->pclk))
1384 		return dev_err_probe(&pdev->dev, PTR_ERR(aq->pclk),
1385 				     "missing peripheral clock\n");
1386 
1387 	if (aq->caps->has_qspick) {
1388 		/* Get the QSPI system clock */
1389 		aq->qspick = devm_clk_get_enabled(&pdev->dev, "qspick");
1390 		if (IS_ERR(aq->qspick)) {
1391 			dev_err(&pdev->dev, "missing system clock\n");
1392 			err = PTR_ERR(aq->qspick);
1393 			return err;
1394 		}
1395 
1396 	} else if (aq->caps->has_gclk) {
1397 		/* Get the QSPI generic clock */
1398 		aq->gclk = devm_clk_get(&pdev->dev, "gclk");
1399 		if (IS_ERR(aq->gclk)) {
1400 			dev_err(&pdev->dev, "missing Generic clock\n");
1401 			err = PTR_ERR(aq->gclk);
1402 			return err;
1403 		}
1404 	}
1405 
1406 	if (aq->caps->has_dma) {
1407 		err = atmel_qspi_dma_init(ctrl);
1408 		if (err == -EPROBE_DEFER)
1409 			return err;
1410 	}
1411 
1412 	/* Request the IRQ */
1413 	irq = platform_get_irq(pdev, 0);
1414 	if (irq < 0)
1415 		return irq;
1416 
1417 	err = devm_request_irq(&pdev->dev, irq, atmel_qspi_interrupt,
1418 			       0, dev_name(&pdev->dev), aq);
1419 	if (err)
1420 		return err;
1421 
1422 	pm_runtime_set_autosuspend_delay(&pdev->dev, 500);
1423 	pm_runtime_use_autosuspend(&pdev->dev);
1424 	devm_pm_runtime_set_active_enabled(&pdev->dev);
1425 	devm_pm_runtime_get_noresume(&pdev->dev);
1426 
1427 	err = atmel_qspi_init(aq);
1428 	if (err)
1429 		return err;
1430 
1431 	err = spi_register_controller(ctrl);
1432 	if (err)
1433 		return err;
1434 
1435 	pm_runtime_put_autosuspend(&pdev->dev);
1436 
1437 	return 0;
1438 }
1439 
atmel_qspi_sama7g5_suspend(struct atmel_qspi * aq)1440 static int atmel_qspi_sama7g5_suspend(struct atmel_qspi *aq)
1441 {
1442 	int ret;
1443 	u32 val;
1444 
1445 	ret = readl_poll_timeout(aq->regs + QSPI_SR2, val,
1446 				 !(val & QSPI_SR2_RBUSY) &&
1447 				 (val & QSPI_SR2_HIDLE), 40,
1448 				 ATMEL_QSPI_SYNC_TIMEOUT);
1449 	if (ret)
1450 		return ret;
1451 
1452 	atmel_qspi_write(QSPI_CR_QSPIDIS, aq, QSPI_CR);
1453 	ret = readl_poll_timeout(aq->regs + QSPI_SR2, val,
1454 				 !(val & QSPI_SR2_QSPIENS), 40,
1455 				 ATMEL_QSPI_SYNC_TIMEOUT);
1456 	if (ret)
1457 		return ret;
1458 
1459 	clk_disable_unprepare(aq->gclk);
1460 
1461 	atmel_qspi_write(QSPI_CR_DLLOFF, aq, QSPI_CR);
1462 	ret = readl_poll_timeout(aq->regs + QSPI_SR2, val,
1463 				 !(val & QSPI_SR2_DLOCK), 40,
1464 				 ATMEL_QSPI_TIMEOUT);
1465 	if (ret)
1466 		return ret;
1467 
1468 	ret =  readl_poll_timeout(aq->regs + QSPI_SR2, val,
1469 				  !(val & QSPI_SR2_CALBSY), 40,
1470 				  ATMEL_QSPI_TIMEOUT);
1471 	if (ret)
1472 		return ret;
1473 
1474 	return 0;
1475 }
1476 
atmel_qspi_remove(struct platform_device * pdev)1477 static void atmel_qspi_remove(struct platform_device *pdev)
1478 {
1479 	struct spi_controller *ctrl = platform_get_drvdata(pdev);
1480 	struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
1481 	int ret;
1482 
1483 	spi_unregister_controller(ctrl);
1484 
1485 	ret = pm_runtime_get_sync(&pdev->dev);
1486 	if (ret >= 0) {
1487 		if (aq->caps->has_gclk) {
1488 			ret = atmel_qspi_sama7g5_suspend(aq);
1489 			if (ret)
1490 				dev_warn(&pdev->dev, "Failed to de-init device on remove: %d\n", ret);
1491 			return;
1492 		}
1493 
1494 		atmel_qspi_write(QSPI_CR_QSPIDIS, aq, QSPI_CR);
1495 	} else {
1496 		/*
1497 		 * atmel_qspi_runtime_{suspend,resume} just disable and enable
1498 		 * the two clks respectively. So after resume failed these are
1499 		 * off, and we skip hardware access and disabling these clks again.
1500 		 */
1501 		dev_warn(&pdev->dev, "Failed to resume device on remove\n");
1502 	}
1503 }
1504 
atmel_qspi_suspend(struct device * dev)1505 static int __maybe_unused atmel_qspi_suspend(struct device *dev)
1506 {
1507 	struct spi_controller *ctrl = dev_get_drvdata(dev);
1508 	struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
1509 	int ret;
1510 
1511 	ret = pm_runtime_resume_and_get(dev);
1512 	if (ret < 0)
1513 		return ret;
1514 
1515 	if (aq->caps->has_gclk) {
1516 		ret = atmel_qspi_sama7g5_suspend(aq);
1517 		clk_disable_unprepare(aq->pclk);
1518 		return ret;
1519 	}
1520 
1521 	atmel_qspi_write(QSPI_CR_QSPIDIS, aq, QSPI_CR);
1522 
1523 	pm_runtime_mark_last_busy(dev);
1524 	pm_runtime_force_suspend(dev);
1525 
1526 	clk_unprepare(aq->qspick);
1527 	clk_unprepare(aq->pclk);
1528 
1529 	return 0;
1530 }
1531 
atmel_qspi_resume(struct device * dev)1532 static int __maybe_unused atmel_qspi_resume(struct device *dev)
1533 {
1534 	struct spi_controller *ctrl = dev_get_drvdata(dev);
1535 	struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
1536 	int ret;
1537 
1538 	ret = clk_prepare(aq->pclk);
1539 	if (ret)
1540 		return ret;
1541 
1542 	ret = clk_prepare(aq->qspick);
1543 	if (ret) {
1544 		clk_unprepare(aq->pclk);
1545 		return ret;
1546 	}
1547 
1548 	if (aq->caps->has_gclk)
1549 		return atmel_qspi_sama7g5_init(aq);
1550 
1551 	ret = pm_runtime_force_resume(dev);
1552 	if (ret < 0)
1553 		return ret;
1554 
1555 	atmel_qspi_init(aq);
1556 
1557 	atmel_qspi_write(aq->scr, aq, QSPI_SCR);
1558 
1559 	pm_runtime_put_autosuspend(dev);
1560 
1561 	return 0;
1562 }
1563 
atmel_qspi_runtime_suspend(struct device * dev)1564 static int __maybe_unused atmel_qspi_runtime_suspend(struct device *dev)
1565 {
1566 	struct spi_controller *ctrl = dev_get_drvdata(dev);
1567 	struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
1568 
1569 	clk_disable(aq->qspick);
1570 	clk_disable(aq->pclk);
1571 
1572 	return 0;
1573 }
1574 
atmel_qspi_runtime_resume(struct device * dev)1575 static int __maybe_unused atmel_qspi_runtime_resume(struct device *dev)
1576 {
1577 	struct spi_controller *ctrl = dev_get_drvdata(dev);
1578 	struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
1579 	int ret;
1580 
1581 	ret = clk_enable(aq->pclk);
1582 	if (ret)
1583 		return ret;
1584 
1585 	ret = clk_enable(aq->qspick);
1586 	if (ret)
1587 		clk_disable(aq->pclk);
1588 
1589 	return ret;
1590 }
1591 
1592 static const struct dev_pm_ops __maybe_unused atmel_qspi_pm_ops = {
1593 	SET_SYSTEM_SLEEP_PM_OPS(atmel_qspi_suspend, atmel_qspi_resume)
1594 	SET_RUNTIME_PM_OPS(atmel_qspi_runtime_suspend,
1595 			   atmel_qspi_runtime_resume, NULL)
1596 };
1597 
1598 static const struct atmel_qspi_caps atmel_sama5d2_qspi_caps = {};
1599 
1600 static const struct atmel_qspi_caps atmel_sam9x60_qspi_caps = {
1601 	.has_qspick = true,
1602 	.has_ricr = true,
1603 };
1604 
1605 static const struct atmel_qspi_caps atmel_sama7g5_ospi_caps = {
1606 	.max_speed_hz = SAMA7G5_QSPI0_MAX_SPEED_HZ,
1607 	.has_gclk = true,
1608 	.octal = true,
1609 	.has_dma = true,
1610 };
1611 
1612 static const struct atmel_qspi_caps atmel_sama7g5_qspi_caps = {
1613 	.max_speed_hz = SAMA7G5_QSPI1_SDR_MAX_SPEED_HZ,
1614 	.has_gclk = true,
1615 	.has_dma = true,
1616 };
1617 
1618 static const struct of_device_id atmel_qspi_dt_ids[] = {
1619 	{
1620 		.compatible = "atmel,sama5d2-qspi",
1621 		.data = &atmel_sama5d2_qspi_caps,
1622 	},
1623 	{
1624 		.compatible = "microchip,sam9x60-qspi",
1625 		.data = &atmel_sam9x60_qspi_caps,
1626 	},
1627 	{
1628 		.compatible = "microchip,sama7g5-ospi",
1629 		.data = &atmel_sama7g5_ospi_caps,
1630 	},
1631 	{
1632 		.compatible = "microchip,sama7g5-qspi",
1633 		.data = &atmel_sama7g5_qspi_caps,
1634 	},
1635 
1636 	{ /* sentinel */ }
1637 };
1638 
1639 MODULE_DEVICE_TABLE(of, atmel_qspi_dt_ids);
1640 
1641 static struct platform_driver atmel_qspi_driver = {
1642 	.driver = {
1643 		.name	= "atmel_qspi",
1644 		.of_match_table	= atmel_qspi_dt_ids,
1645 		.pm	= pm_ptr(&atmel_qspi_pm_ops),
1646 	},
1647 	.probe		= atmel_qspi_probe,
1648 	.remove		= atmel_qspi_remove,
1649 };
1650 module_platform_driver(atmel_qspi_driver);
1651 
1652 MODULE_AUTHOR("Cyrille Pitchen <cyrille.pitchen@atmel.com>");
1653 MODULE_AUTHOR("Piotr Bugalski <bugalski.piotr@gmail.com");
1654 MODULE_DESCRIPTION("Atmel QSPI Controller driver");
1655 MODULE_LICENSE("GPL v2");
1656