xref: /linux/drivers/spi/atmel-quadspi.c (revision 07fdad3a93756b872da7b53647715c48d0f4a2d0)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Driver for Atmel QSPI Controller
4  *
5  * Copyright (C) 2015 Atmel Corporation
6  * Copyright (C) 2018 Cryptera A/S
7  *
8  * Author: Cyrille Pitchen <cyrille.pitchen@atmel.com>
9  * Author: Piotr Bugalski <bugalski.piotr@gmail.com>
10  *
11  * This driver is based on drivers/mtd/spi-nor/fsl-quadspi.c from Freescale.
12  */
13 
14 #include <linux/bitfield.h>
15 #include <linux/clk.h>
16 #include <linux/delay.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/dmaengine.h>
19 #include <linux/err.h>
20 #include <linux/interrupt.h>
21 #include <linux/io.h>
22 #include <linux/iopoll.h>
23 #include <linux/kernel.h>
24 #include <linux/module.h>
25 #include <linux/of.h>
26 #include <linux/of_platform.h>
27 #include <linux/platform_device.h>
28 #include <linux/pm_runtime.h>
29 #include <linux/spi/spi-mem.h>
30 
31 /* QSPI register offsets */
32 #define QSPI_CR      0x0000  /* Control Register */
33 #define QSPI_MR      0x0004  /* Mode Register */
34 #define QSPI_RD      0x0008  /* Receive Data Register */
35 #define QSPI_TD      0x000c  /* Transmit Data Register */
36 #define QSPI_SR      0x0010  /* Status Register */
37 #define QSPI_IER     0x0014  /* Interrupt Enable Register */
38 #define QSPI_IDR     0x0018  /* Interrupt Disable Register */
39 #define QSPI_IMR     0x001c  /* Interrupt Mask Register */
40 #define QSPI_SCR     0x0020  /* Serial Clock Register */
41 #define QSPI_SR2     0x0024  /* SAMA7G5 Status Register */
42 
43 #define QSPI_IAR     0x0030  /* Instruction Address Register */
44 #define QSPI_ICR     0x0034  /* Instruction Code Register */
45 #define QSPI_WICR    0x0034  /* Write Instruction Code Register */
46 #define QSPI_IFR     0x0038  /* Instruction Frame Register */
47 #define QSPI_RICR    0x003C  /* Read Instruction Code Register */
48 
49 #define QSPI_SMR     0x0040  /* Scrambling Mode Register */
50 #define QSPI_SKR     0x0044  /* Scrambling Key Register */
51 
52 #define QSPI_REFRESH	0x0050	/* Refresh Register */
53 #define QSPI_WRACNT	0x0054	/* Write Access Counter Register */
54 #define QSPI_DLLCFG	0x0058	/* DLL Configuration Register */
55 #define QSPI_PCALCFG	0x005C	/* Pad Calibration Configuration Register */
56 #define QSPI_PCALBP	0x0060	/* Pad Calibration Bypass Register */
57 #define QSPI_TOUT	0x0064	/* Timeout Register */
58 
59 #define QSPI_WPMR    0x00E4  /* Write Protection Mode Register */
60 #define QSPI_WPSR    0x00E8  /* Write Protection Status Register */
61 
62 #define QSPI_VERSION 0x00FC  /* Version Register */
63 
64 #define SAMA7G5_QSPI0_MAX_SPEED_HZ	200000000
65 #define SAMA7G5_QSPI1_SDR_MAX_SPEED_HZ	133000000
66 #define SAM9X7_QSPI_MAX_SPEED_HZ	100000000
67 
68 /* Bitfields in QSPI_CR (Control Register) */
69 #define QSPI_CR_QSPIEN                  BIT(0)
70 #define QSPI_CR_QSPIDIS                 BIT(1)
71 #define QSPI_CR_DLLON			BIT(2)
72 #define QSPI_CR_DLLOFF			BIT(3)
73 #define QSPI_CR_STPCAL			BIT(4)
74 #define QSPI_CR_SRFRSH			BIT(5)
75 #define QSPI_CR_SWRST                   BIT(7)
76 #define QSPI_CR_UPDCFG			BIT(8)
77 #define QSPI_CR_STTFR			BIT(9)
78 #define QSPI_CR_RTOUT			BIT(10)
79 #define QSPI_CR_LASTXFER                BIT(24)
80 
81 /* Bitfields in QSPI_MR (Mode Register) */
82 #define QSPI_MR_SMM                     BIT(0)
83 #define QSPI_MR_LLB                     BIT(1)
84 #define QSPI_MR_WDRBT                   BIT(2)
85 #define QSPI_MR_SMRM                    BIT(3)
86 #define QSPI_MR_DQSDLYEN		BIT(3)
87 #define QSPI_MR_CSMODE_MASK             GENMASK(5, 4)
88 #define QSPI_MR_CSMODE_NOT_RELOADED     (0 << 4)
89 #define QSPI_MR_CSMODE_LASTXFER         (1 << 4)
90 #define QSPI_MR_CSMODE_SYSTEMATICALLY   (2 << 4)
91 #define QSPI_MR_NBBITS_MASK             GENMASK(11, 8)
92 #define QSPI_MR_NBBITS(n)               ((((n) - 8) << 8) & QSPI_MR_NBBITS_MASK)
93 #define QSPI_MR_OENSD			BIT(15)
94 #define QSPI_MR_DLYBCT_MASK             GENMASK(23, 16)
95 #define QSPI_MR_DLYBCT(n)               (((n) << 16) & QSPI_MR_DLYBCT_MASK)
96 #define QSPI_MR_DLYCS_MASK              GENMASK(31, 24)
97 #define QSPI_MR_DLYCS(n)                (((n) << 24) & QSPI_MR_DLYCS_MASK)
98 
99 /* Bitfields in QSPI_SR/QSPI_IER/QSPI_IDR/QSPI_IMR  */
100 #define QSPI_SR_RDRF                    BIT(0)
101 #define QSPI_SR_TDRE                    BIT(1)
102 #define QSPI_SR_TXEMPTY                 BIT(2)
103 #define QSPI_SR_OVRES                   BIT(3)
104 #define QSPI_SR_CSR                     BIT(8)
105 #define QSPI_SR_CSS                     BIT(9)
106 #define QSPI_SR_INSTRE                  BIT(10)
107 #define QSPI_SR_LWRA			BIT(11)
108 #define QSPI_SR_QITF			BIT(12)
109 #define QSPI_SR_QITR			BIT(13)
110 #define QSPI_SR_CSFA			BIT(14)
111 #define QSPI_SR_CSRA			BIT(15)
112 #define QSPI_SR_RFRSHD			BIT(16)
113 #define QSPI_SR_TOUT			BIT(17)
114 #define QSPI_SR_QSPIENS                 BIT(24)
115 
116 #define QSPI_SR_CMD_COMPLETED	(QSPI_SR_INSTRE | QSPI_SR_CSR)
117 
118 /* Bitfields in QSPI_SCR (Serial Clock Register) */
119 #define QSPI_SCR_CPOL                   BIT(0)
120 #define QSPI_SCR_CPHA                   BIT(1)
121 #define QSPI_SCR_SCBR_MASK              GENMASK(15, 8)
122 #define QSPI_SCR_SCBR(n)                (((n) << 8) & QSPI_SCR_SCBR_MASK)
123 #define QSPI_SCR_DLYBS_MASK             GENMASK(23, 16)
124 #define QSPI_SCR_DLYBS(n)               (((n) << 16) & QSPI_SCR_DLYBS_MASK)
125 
126 /* Bitfields in QSPI_SR2 (SAMA7G5 Status Register) */
127 #define QSPI_SR2_SYNCBSY		BIT(0)
128 #define QSPI_SR2_QSPIENS		BIT(1)
129 #define QSPI_SR2_CSS			BIT(2)
130 #define QSPI_SR2_RBUSY			BIT(3)
131 #define QSPI_SR2_HIDLE			BIT(4)
132 #define QSPI_SR2_DLOCK			BIT(5)
133 #define QSPI_SR2_CALBSY			BIT(6)
134 
135 /* Bitfields in QSPI_IAR (Instruction Address Register) */
136 #define QSPI_IAR_ADDR			GENMASK(31, 0)
137 
138 /* Bitfields in QSPI_ICR (Read/Write Instruction Code Register) */
139 #define QSPI_ICR_INST_MASK              GENMASK(7, 0)
140 #define QSPI_ICR_INST(inst)             (((inst) << 0) & QSPI_ICR_INST_MASK)
141 #define QSPI_ICR_INST_MASK_SAMA7G5	GENMASK(15, 0)
142 #define QSPI_ICR_OPT_MASK               GENMASK(23, 16)
143 #define QSPI_ICR_OPT(opt)               (((opt) << 16) & QSPI_ICR_OPT_MASK)
144 
145 /* Bitfields in QSPI_IFR (Instruction Frame Register) */
146 #define QSPI_IFR_WIDTH_MASK             GENMASK(2, 0)
147 #define QSPI_IFR_WIDTH_SINGLE_BIT_SPI   (0 << 0)
148 #define QSPI_IFR_WIDTH_DUAL_OUTPUT      (1 << 0)
149 #define QSPI_IFR_WIDTH_QUAD_OUTPUT      (2 << 0)
150 #define QSPI_IFR_WIDTH_DUAL_IO          (3 << 0)
151 #define QSPI_IFR_WIDTH_QUAD_IO          (4 << 0)
152 #define QSPI_IFR_WIDTH_DUAL_CMD         (5 << 0)
153 #define QSPI_IFR_WIDTH_QUAD_CMD         (6 << 0)
154 #define QSPI_IFR_WIDTH_OCT_OUTPUT	(7 << 0)
155 #define QSPI_IFR_WIDTH_OCT_IO		(8 << 0)
156 #define QSPI_IFR_WIDTH_OCT_CMD		(9 << 0)
157 #define QSPI_IFR_INSTEN                 BIT(4)
158 #define QSPI_IFR_ADDREN                 BIT(5)
159 #define QSPI_IFR_OPTEN                  BIT(6)
160 #define QSPI_IFR_DATAEN                 BIT(7)
161 #define QSPI_IFR_OPTL_MASK              GENMASK(9, 8)
162 #define QSPI_IFR_OPTL_1BIT              (0 << 8)
163 #define QSPI_IFR_OPTL_2BIT              (1 << 8)
164 #define QSPI_IFR_OPTL_4BIT              (2 << 8)
165 #define QSPI_IFR_OPTL_8BIT              (3 << 8)
166 #define QSPI_IFR_ADDRL                  BIT(10)
167 #define QSPI_IFR_ADDRL_SAMA7G5		GENMASK(11, 10)
168 #define QSPI_IFR_TFRTYP_MEM		BIT(12)
169 #define QSPI_IFR_SAMA5D2_WRITE_TRSFR	BIT(13)
170 #define QSPI_IFR_CRM                    BIT(14)
171 #define QSPI_IFR_DDREN			BIT(15)
172 #define QSPI_IFR_NBDUM_MASK             GENMASK(20, 16)
173 #define QSPI_IFR_NBDUM(n)               (((n) << 16) & QSPI_IFR_NBDUM_MASK)
174 #define QSPI_IFR_END			BIT(22)
175 #define QSPI_IFR_SMRM			BIT(23)
176 #define QSPI_IFR_APBTFRTYP_READ		BIT(24)	/* Defined in SAM9X60 */
177 #define QSPI_IFR_DQSEN			BIT(25)
178 #define QSPI_IFR_DDRCMDEN		BIT(26)
179 #define QSPI_IFR_HFWBEN			BIT(27)
180 #define QSPI_IFR_PROTTYP		GENMASK(29, 28)
181 #define QSPI_IFR_PROTTYP_STD_SPI	0
182 #define QSPI_IFR_PROTTYP_TWIN_QUAD	1
183 #define QSPI_IFR_PROTTYP_OCTAFLASH	2
184 #define QSPI_IFR_PROTTYP_HYPERFLASH	3
185 
186 /* Bitfields in QSPI_SMR (Scrambling Mode Register) */
187 #define QSPI_SMR_SCREN                  BIT(0)
188 #define QSPI_SMR_RVDIS                  BIT(1)
189 #define QSPI_SMR_SCRKL                  BIT(2)
190 
191 /* Bitfields in QSPI_REFRESH (Refresh Register) */
192 #define QSPI_REFRESH_DELAY_COUNTER	GENMASK(31, 0)
193 
194 /* Bitfields in QSPI_WRACNT (Write Access Counter Register) */
195 #define QSPI_WRACNT_NBWRA		GENMASK(31, 0)
196 
197 /* Bitfields in QSPI_DLLCFG (DLL Configuration Register) */
198 #define QSPI_DLLCFG_RANGE		BIT(0)
199 
200 /* Bitfields in QSPI_PCALCFG (DLL Pad Calibration Configuration Register) */
201 #define QSPI_PCALCFG_AAON		BIT(0)
202 #define QSPI_PCALCFG_DAPCAL		BIT(1)
203 #define QSPI_PCALCFG_DIFFPM		BIT(2)
204 #define QSPI_PCALCFG_CLKDIV		GENMASK(6, 4)
205 #define QSPI_PCALCFG_CALCNT		GENMASK(16, 8)
206 #define QSPI_PCALCFG_CALP		GENMASK(27, 24)
207 #define QSPI_PCALCFG_CALN		GENMASK(31, 28)
208 
209 /* Bitfields in QSPI_PCALBP (DLL Pad Calibration Bypass Register) */
210 #define QSPI_PCALBP_BPEN		BIT(0)
211 #define QSPI_PCALBP_CALPBP		GENMASK(11, 8)
212 #define QSPI_PCALBP_CALNBP		GENMASK(19, 16)
213 
214 /* Bitfields in QSPI_TOUT (Timeout Register) */
215 #define QSPI_TOUT_TCNTM			GENMASK(15, 0)
216 
217 /* Bitfields in QSPI_WPMR (Write Protection Mode Register) */
218 #define QSPI_WPMR_WPEN                  BIT(0)
219 #define QSPI_WPMR_WPITEN		BIT(1)
220 #define QSPI_WPMR_WPCREN		BIT(2)
221 #define QSPI_WPMR_WPKEY_MASK            GENMASK(31, 8)
222 #define QSPI_WPMR_WPKEY(wpkey)          (((wpkey) << 8) & QSPI_WPMR_WPKEY_MASK)
223 
224 /* Bitfields in QSPI_WPSR (Write Protection Status Register) */
225 #define QSPI_WPSR_WPVS                  BIT(0)
226 #define QSPI_WPSR_WPVSRC_MASK           GENMASK(15, 8)
227 #define QSPI_WPSR_WPVSRC(src)           (((src) << 8) & QSPI_WPSR_WPVSRC)
228 
229 #define ATMEL_QSPI_TIMEOUT		1000	/* ms */
230 #define ATMEL_QSPI_SYNC_TIMEOUT		300	/* ms */
231 #define QSPI_DLLCFG_THRESHOLD_FREQ	90000000U
232 #define QSPI_CALIB_TIME			2000	/* 2 us */
233 
234 /* Use PIO for small transfers. */
235 #define ATMEL_QSPI_DMA_MIN_BYTES	16
236 /**
237  * struct atmel_qspi_pcal - Pad Calibration Clock Division
238  * @pclk_rate: peripheral clock rate.
239  * @pclk_div: calibration clock division. The clock applied to the calibration
240  *           cell is divided by pclk_div + 1.
241  */
242 struct atmel_qspi_pcal {
243 	u32 pclk_rate;
244 	u8 pclk_div;
245 };
246 
247 #define ATMEL_QSPI_PCAL_ARRAY_SIZE	8
248 static const struct atmel_qspi_pcal pcal[ATMEL_QSPI_PCAL_ARRAY_SIZE] = {
249 	{25000000, 0},
250 	{50000000, 1},
251 	{75000000, 2},
252 	{100000000, 3},
253 	{125000000, 4},
254 	{150000000, 5},
255 	{175000000, 6},
256 	{200000000, 7},
257 };
258 
259 struct atmel_qspi_caps {
260 	u32 max_speed_hz;
261 	bool has_qspick;
262 	bool has_gclk;
263 	bool has_ricr;
264 	bool octal;
265 	bool has_dma;
266 	bool has_2xgclk;
267 	bool has_padcalib;
268 	bool has_dllon;
269 };
270 
271 struct atmel_qspi_ops;
272 
273 struct atmel_qspi {
274 	void __iomem		*regs;
275 	void __iomem		*mem;
276 	struct clk		*pclk;
277 	struct clk		*qspick;
278 	struct clk		*gclk;
279 	struct platform_device	*pdev;
280 	const struct atmel_qspi_caps *caps;
281 	const struct atmel_qspi_ops *ops;
282 	resource_size_t		mmap_size;
283 	u32			pending;
284 	u32			irq_mask;
285 	u32			mr;
286 	u32			scr;
287 	u32			target_max_speed_hz;
288 	struct completion	cmd_completion;
289 	struct completion	dma_completion;
290 	dma_addr_t		mmap_phys_base;
291 	struct dma_chan		*rx_chan;
292 	struct dma_chan		*tx_chan;
293 };
294 
295 struct atmel_qspi_ops {
296 	int (*set_cfg)(struct atmel_qspi *aq, const struct spi_mem_op *op,
297 		       u32 *offset);
298 	int (*transfer)(struct spi_mem *mem, const struct spi_mem_op *op,
299 			u32 offset);
300 };
301 
302 struct atmel_qspi_mode {
303 	u8 cmd_buswidth;
304 	u8 addr_buswidth;
305 	u8 data_buswidth;
306 	u32 config;
307 };
308 
309 static const struct atmel_qspi_mode atmel_qspi_modes[] = {
310 	{ 1, 1, 1, QSPI_IFR_WIDTH_SINGLE_BIT_SPI },
311 	{ 1, 1, 2, QSPI_IFR_WIDTH_DUAL_OUTPUT },
312 	{ 1, 1, 4, QSPI_IFR_WIDTH_QUAD_OUTPUT },
313 	{ 1, 2, 2, QSPI_IFR_WIDTH_DUAL_IO },
314 	{ 1, 4, 4, QSPI_IFR_WIDTH_QUAD_IO },
315 	{ 2, 2, 2, QSPI_IFR_WIDTH_DUAL_CMD },
316 	{ 4, 4, 4, QSPI_IFR_WIDTH_QUAD_CMD },
317 };
318 
319 static const struct atmel_qspi_mode atmel_qspi_sama7g5_modes[] = {
320 	{ 1, 1, 1, QSPI_IFR_WIDTH_SINGLE_BIT_SPI },
321 	{ 1, 1, 2, QSPI_IFR_WIDTH_DUAL_OUTPUT },
322 	{ 1, 1, 4, QSPI_IFR_WIDTH_QUAD_OUTPUT },
323 	{ 1, 2, 2, QSPI_IFR_WIDTH_DUAL_IO },
324 	{ 1, 4, 4, QSPI_IFR_WIDTH_QUAD_IO },
325 	{ 2, 2, 2, QSPI_IFR_WIDTH_DUAL_CMD },
326 	{ 4, 4, 4, QSPI_IFR_WIDTH_QUAD_CMD },
327 	{ 1, 1, 8, QSPI_IFR_WIDTH_OCT_OUTPUT },
328 	{ 1, 8, 8, QSPI_IFR_WIDTH_OCT_IO },
329 	{ 8, 8, 8, QSPI_IFR_WIDTH_OCT_CMD },
330 };
331 
332 #ifdef VERBOSE_DEBUG
333 static const char *atmel_qspi_reg_name(u32 offset, char *tmp, size_t sz)
334 {
335 	switch (offset) {
336 	case QSPI_CR:
337 		return "CR";
338 	case QSPI_MR:
339 		return "MR";
340 	case QSPI_RD:
341 		return "RD";
342 	case QSPI_TD:
343 		return "TD";
344 	case QSPI_SR:
345 		return "SR";
346 	case QSPI_IER:
347 		return "IER";
348 	case QSPI_IDR:
349 		return "IDR";
350 	case QSPI_IMR:
351 		return "IMR";
352 	case QSPI_SCR:
353 		return "SCR";
354 	case QSPI_SR2:
355 		return "SR2";
356 	case QSPI_IAR:
357 		return "IAR";
358 	case QSPI_ICR:
359 		return "ICR/WICR";
360 	case QSPI_IFR:
361 		return "IFR";
362 	case QSPI_RICR:
363 		return "RICR";
364 	case QSPI_SMR:
365 		return "SMR";
366 	case QSPI_SKR:
367 		return "SKR";
368 	case QSPI_REFRESH:
369 		return "REFRESH";
370 	case QSPI_WRACNT:
371 		return "WRACNT";
372 	case QSPI_DLLCFG:
373 		return "DLLCFG";
374 	case QSPI_PCALCFG:
375 		return "PCALCFG";
376 	case QSPI_PCALBP:
377 		return "PCALBP";
378 	case QSPI_TOUT:
379 		return "TOUT";
380 	case QSPI_WPMR:
381 		return "WPMR";
382 	case QSPI_WPSR:
383 		return "WPSR";
384 	case QSPI_VERSION:
385 		return "VERSION";
386 	default:
387 		snprintf(tmp, sz, "0x%02x", offset);
388 		break;
389 	}
390 
391 	return tmp;
392 }
393 #endif /* VERBOSE_DEBUG */
394 
395 static u32 atmel_qspi_read(struct atmel_qspi *aq, u32 offset)
396 {
397 	u32 value = readl_relaxed(aq->regs + offset);
398 
399 #ifdef VERBOSE_DEBUG
400 	char tmp[8];
401 
402 	dev_vdbg(&aq->pdev->dev, "read 0x%08x from %s\n", value,
403 		 atmel_qspi_reg_name(offset, tmp, sizeof(tmp)));
404 #endif /* VERBOSE_DEBUG */
405 
406 	return value;
407 }
408 
409 static void atmel_qspi_write(u32 value, struct atmel_qspi *aq, u32 offset)
410 {
411 #ifdef VERBOSE_DEBUG
412 	char tmp[8];
413 
414 	dev_vdbg(&aq->pdev->dev, "write 0x%08x into %s\n", value,
415 		 atmel_qspi_reg_name(offset, tmp, sizeof(tmp)));
416 #endif /* VERBOSE_DEBUG */
417 
418 	writel_relaxed(value, aq->regs + offset);
419 }
420 
421 static int atmel_qspi_reg_sync(struct atmel_qspi *aq)
422 {
423 	u32 val;
424 	int ret;
425 
426 	ret = readl_poll_timeout(aq->regs + QSPI_SR2, val,
427 				 !(val & QSPI_SR2_SYNCBSY), 40,
428 				 ATMEL_QSPI_SYNC_TIMEOUT);
429 	return ret;
430 }
431 
432 static int atmel_qspi_update_config(struct atmel_qspi *aq)
433 {
434 	int ret;
435 
436 	ret = atmel_qspi_reg_sync(aq);
437 	if (ret)
438 		return ret;
439 	atmel_qspi_write(QSPI_CR_UPDCFG, aq, QSPI_CR);
440 	return atmel_qspi_reg_sync(aq);
441 }
442 
443 static inline bool atmel_qspi_is_compatible(const struct spi_mem_op *op,
444 					    const struct atmel_qspi_mode *mode)
445 {
446 	if (op->cmd.buswidth != mode->cmd_buswidth)
447 		return false;
448 
449 	if (op->addr.nbytes && op->addr.buswidth != mode->addr_buswidth)
450 		return false;
451 
452 	if (op->data.nbytes && op->data.buswidth != mode->data_buswidth)
453 		return false;
454 
455 	return true;
456 }
457 
458 static int atmel_qspi_find_mode(const struct spi_mem_op *op)
459 {
460 	u32 i;
461 
462 	for (i = 0; i < ARRAY_SIZE(atmel_qspi_modes); i++)
463 		if (atmel_qspi_is_compatible(op, &atmel_qspi_modes[i]))
464 			return i;
465 
466 	return -EOPNOTSUPP;
467 }
468 
469 static int atmel_qspi_sama7g5_find_mode(const struct spi_mem_op *op)
470 {
471 	u32 i;
472 
473 	for (i = 0; i < ARRAY_SIZE(atmel_qspi_sama7g5_modes); i++)
474 		if (atmel_qspi_is_compatible(op, &atmel_qspi_sama7g5_modes[i]))
475 			return i;
476 
477 	return -EOPNOTSUPP;
478 }
479 
480 static bool atmel_qspi_supports_op(struct spi_mem *mem,
481 				   const struct spi_mem_op *op)
482 {
483 	struct atmel_qspi *aq = spi_controller_get_devdata(mem->spi->controller);
484 	if (!spi_mem_default_supports_op(mem, op))
485 		return false;
486 
487 	if (aq->caps->octal) {
488 		if (atmel_qspi_sama7g5_find_mode(op) < 0)
489 			return false;
490 		else
491 			return true;
492 	}
493 
494 	if (atmel_qspi_find_mode(op) < 0)
495 		return false;
496 
497 	/* special case not supported by hardware */
498 	if (op->addr.nbytes == 2 && op->cmd.buswidth != op->addr.buswidth &&
499 	    op->dummy.nbytes == 0)
500 		return false;
501 
502 	return true;
503 }
504 
505 /*
506  * If the QSPI controller is set in regular SPI mode, set it in
507  * Serial Memory Mode (SMM).
508  */
509 static int atmel_qspi_set_serial_memory_mode(struct atmel_qspi *aq)
510 {
511 	int ret = 0;
512 
513 	if (!(aq->mr & QSPI_MR_SMM)) {
514 		aq->mr |= QSPI_MR_SMM;
515 		atmel_qspi_write(aq->mr, aq, QSPI_MR);
516 
517 		if (aq->caps->has_gclk)
518 			ret = atmel_qspi_update_config(aq);
519 	}
520 
521 	return ret;
522 }
523 
524 static int atmel_qspi_set_cfg(struct atmel_qspi *aq,
525 			      const struct spi_mem_op *op, u32 *offset)
526 {
527 	u32 iar, icr, ifr;
528 	u32 dummy_cycles = 0;
529 	int mode;
530 
531 	iar = 0;
532 	icr = QSPI_ICR_INST(op->cmd.opcode);
533 	ifr = QSPI_IFR_INSTEN;
534 
535 	mode = atmel_qspi_find_mode(op);
536 	if (mode < 0)
537 		return mode;
538 	ifr |= atmel_qspi_modes[mode].config;
539 
540 	if (op->dummy.nbytes)
541 		dummy_cycles = op->dummy.nbytes * 8 / op->dummy.buswidth;
542 
543 	/*
544 	 * The controller allows 24 and 32-bit addressing while NAND-flash
545 	 * requires 16-bit long. Handling 8-bit long addresses is done using
546 	 * the option field. For the 16-bit addresses, the workaround depends
547 	 * of the number of requested dummy bits. If there are 8 or more dummy
548 	 * cycles, the address is shifted and sent with the first dummy byte.
549 	 * Otherwise opcode is disabled and the first byte of the address
550 	 * contains the command opcode (works only if the opcode and address
551 	 * use the same buswidth). The limitation is when the 16-bit address is
552 	 * used without enough dummy cycles and the opcode is using a different
553 	 * buswidth than the address.
554 	 */
555 	if (op->addr.buswidth) {
556 		switch (op->addr.nbytes) {
557 		case 0:
558 			break;
559 		case 1:
560 			ifr |= QSPI_IFR_OPTEN | QSPI_IFR_OPTL_8BIT;
561 			icr |= QSPI_ICR_OPT(op->addr.val & 0xff);
562 			break;
563 		case 2:
564 			if (dummy_cycles < 8 / op->addr.buswidth) {
565 				ifr &= ~QSPI_IFR_INSTEN;
566 				ifr |= QSPI_IFR_ADDREN;
567 				iar = (op->cmd.opcode << 16) |
568 					(op->addr.val & 0xffff);
569 			} else {
570 				ifr |= QSPI_IFR_ADDREN;
571 				iar = (op->addr.val << 8) & 0xffffff;
572 				dummy_cycles -= 8 / op->addr.buswidth;
573 			}
574 			break;
575 		case 3:
576 			ifr |= QSPI_IFR_ADDREN;
577 			iar = op->addr.val & 0xffffff;
578 			break;
579 		case 4:
580 			ifr |= QSPI_IFR_ADDREN | QSPI_IFR_ADDRL;
581 			iar = op->addr.val & 0x7ffffff;
582 			break;
583 		default:
584 			return -ENOTSUPP;
585 		}
586 	}
587 
588 	/* offset of the data access in the QSPI memory space */
589 	*offset = iar;
590 
591 	/* Set number of dummy cycles */
592 	if (dummy_cycles)
593 		ifr |= QSPI_IFR_NBDUM(dummy_cycles);
594 
595 	/* Set data enable and data transfer type. */
596 	if (op->data.nbytes) {
597 		ifr |= QSPI_IFR_DATAEN;
598 
599 		if (op->addr.nbytes)
600 			ifr |= QSPI_IFR_TFRTYP_MEM;
601 	}
602 
603 	mode = atmel_qspi_set_serial_memory_mode(aq);
604 	if (mode < 0)
605 		return mode;
606 
607 	/* Clear pending interrupts */
608 	(void)atmel_qspi_read(aq, QSPI_SR);
609 
610 	/* Set QSPI Instruction Frame registers. */
611 	if (op->addr.nbytes && !op->data.nbytes)
612 		atmel_qspi_write(iar, aq, QSPI_IAR);
613 
614 	if (aq->caps->has_ricr) {
615 		if (op->data.dir == SPI_MEM_DATA_IN)
616 			atmel_qspi_write(icr, aq, QSPI_RICR);
617 		else
618 			atmel_qspi_write(icr, aq, QSPI_WICR);
619 	} else {
620 		if (op->data.nbytes && op->data.dir == SPI_MEM_DATA_OUT)
621 			ifr |= QSPI_IFR_SAMA5D2_WRITE_TRSFR;
622 
623 		atmel_qspi_write(icr, aq, QSPI_ICR);
624 	}
625 
626 	atmel_qspi_write(ifr, aq, QSPI_IFR);
627 
628 	return 0;
629 }
630 
631 static int atmel_qspi_wait_for_completion(struct atmel_qspi *aq, u32 irq_mask)
632 {
633 	int err = 0;
634 	u32 sr;
635 
636 	/* Poll INSTRuction End status */
637 	sr = atmel_qspi_read(aq, QSPI_SR);
638 	if ((sr & irq_mask) == irq_mask)
639 		return 0;
640 
641 	/* Wait for INSTRuction End interrupt */
642 	reinit_completion(&aq->cmd_completion);
643 	aq->pending = sr & irq_mask;
644 	aq->irq_mask = irq_mask;
645 	atmel_qspi_write(irq_mask, aq, QSPI_IER);
646 	if (!wait_for_completion_timeout(&aq->cmd_completion,
647 					 msecs_to_jiffies(ATMEL_QSPI_TIMEOUT)))
648 		err = -ETIMEDOUT;
649 	atmel_qspi_write(irq_mask, aq, QSPI_IDR);
650 
651 	return err;
652 }
653 
654 static int atmel_qspi_transfer(struct spi_mem *mem,
655 			       const struct spi_mem_op *op, u32 offset)
656 {
657 	struct atmel_qspi *aq = spi_controller_get_devdata(mem->spi->controller);
658 
659 	/* Skip to the final steps if there is no data */
660 	if (!op->data.nbytes)
661 		return atmel_qspi_wait_for_completion(aq,
662 						      QSPI_SR_CMD_COMPLETED);
663 
664 	/* Dummy read of QSPI_IFR to synchronize APB and AHB accesses */
665 	(void)atmel_qspi_read(aq, QSPI_IFR);
666 
667 	/* Send/Receive data */
668 	if (op->data.dir == SPI_MEM_DATA_IN) {
669 		memcpy_fromio(op->data.buf.in, aq->mem + offset,
670 			      op->data.nbytes);
671 
672 		/* Synchronize AHB and APB accesses again */
673 		rmb();
674 	} else {
675 		memcpy_toio(aq->mem + offset, op->data.buf.out,
676 			    op->data.nbytes);
677 
678 		/* Synchronize AHB and APB accesses again */
679 		wmb();
680 	}
681 
682 	/* Release the chip-select */
683 	atmel_qspi_write(QSPI_CR_LASTXFER, aq, QSPI_CR);
684 
685 	return atmel_qspi_wait_for_completion(aq, QSPI_SR_CMD_COMPLETED);
686 }
687 
688 static int atmel_qspi_sama7g5_set_cfg(struct atmel_qspi *aq,
689 				      const struct spi_mem_op *op, u32 *offset)
690 {
691 	u32 iar, icr, ifr;
692 	int mode, ret;
693 
694 	iar = 0;
695 	icr = FIELD_PREP(QSPI_ICR_INST_MASK_SAMA7G5, op->cmd.opcode);
696 	ifr = QSPI_IFR_INSTEN;
697 
698 	mode = atmel_qspi_sama7g5_find_mode(op);
699 	if (mode < 0)
700 		return mode;
701 	ifr |= atmel_qspi_sama7g5_modes[mode].config;
702 
703 	if (op->dummy.buswidth && op->dummy.nbytes) {
704 		if (op->addr.dtr && op->dummy.dtr && op->data.dtr)
705 			ifr |= QSPI_IFR_NBDUM(op->dummy.nbytes * 8 /
706 					      (2 * op->dummy.buswidth));
707 		else
708 			ifr |= QSPI_IFR_NBDUM(op->dummy.nbytes * 8 /
709 					      op->dummy.buswidth);
710 	}
711 
712 	if (op->addr.buswidth && op->addr.nbytes) {
713 		ifr |= FIELD_PREP(QSPI_IFR_ADDRL_SAMA7G5, op->addr.nbytes - 1) |
714 		       QSPI_IFR_ADDREN;
715 		iar = FIELD_PREP(QSPI_IAR_ADDR, op->addr.val);
716 	}
717 
718 	if (op->addr.dtr && op->dummy.dtr && op->data.dtr) {
719 		ifr |= QSPI_IFR_DDREN;
720 		if (op->cmd.dtr)
721 			ifr |= QSPI_IFR_DDRCMDEN;
722 
723 		ifr |= QSPI_IFR_DQSEN;
724 	}
725 
726 	if (op->cmd.buswidth == 8 || op->addr.buswidth == 8 ||
727 	    op->data.buswidth == 8)
728 		ifr |= FIELD_PREP(QSPI_IFR_PROTTYP, QSPI_IFR_PROTTYP_OCTAFLASH);
729 
730 	/* offset of the data access in the QSPI memory space */
731 	*offset = iar;
732 
733 	/* Set data enable */
734 	if (op->data.nbytes) {
735 		ifr |= QSPI_IFR_DATAEN;
736 
737 		if (op->addr.nbytes)
738 			ifr |= QSPI_IFR_TFRTYP_MEM;
739 	}
740 
741 	ret = atmel_qspi_set_serial_memory_mode(aq);
742 	if (ret < 0)
743 		return ret;
744 
745 	/* Clear pending interrupts */
746 	(void)atmel_qspi_read(aq, QSPI_SR);
747 
748 	/* Set QSPI Instruction Frame registers */
749 	if (op->addr.nbytes && !op->data.nbytes)
750 		atmel_qspi_write(iar, aq, QSPI_IAR);
751 
752 	if (op->data.dir == SPI_MEM_DATA_IN) {
753 		atmel_qspi_write(icr, aq, QSPI_RICR);
754 	} else {
755 		atmel_qspi_write(icr, aq, QSPI_WICR);
756 		if (op->data.nbytes)
757 			atmel_qspi_write(FIELD_PREP(QSPI_WRACNT_NBWRA,
758 						    op->data.nbytes),
759 					 aq, QSPI_WRACNT);
760 	}
761 
762 	atmel_qspi_write(ifr, aq, QSPI_IFR);
763 
764 	return atmel_qspi_update_config(aq);
765 }
766 
767 static void atmel_qspi_dma_callback(void *param)
768 {
769 	struct atmel_qspi *aq = param;
770 
771 	complete(&aq->dma_completion);
772 }
773 
774 static int atmel_qspi_dma_xfer(struct atmel_qspi *aq, struct dma_chan *chan,
775 			       dma_addr_t dma_dst, dma_addr_t dma_src,
776 			       unsigned int len)
777 {
778 	struct dma_async_tx_descriptor *tx;
779 	dma_cookie_t cookie;
780 	int ret;
781 
782 	tx = dmaengine_prep_dma_memcpy(chan, dma_dst, dma_src, len,
783 				       DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
784 	if (!tx) {
785 		dev_err(&aq->pdev->dev, "device_prep_dma_memcpy error\n");
786 		return -EIO;
787 	}
788 
789 	reinit_completion(&aq->dma_completion);
790 	tx->callback = atmel_qspi_dma_callback;
791 	tx->callback_param = aq;
792 	cookie = tx->tx_submit(tx);
793 	ret = dma_submit_error(cookie);
794 	if (ret) {
795 		dev_err(&aq->pdev->dev, "dma_submit_error %d\n", cookie);
796 		return ret;
797 	}
798 
799 	dma_async_issue_pending(chan);
800 	ret = wait_for_completion_timeout(&aq->dma_completion,
801 					  msecs_to_jiffies(20 * ATMEL_QSPI_TIMEOUT));
802 	if (ret == 0) {
803 		dmaengine_terminate_sync(chan);
804 		dev_err(&aq->pdev->dev, "DMA wait_for_completion_timeout\n");
805 		return -ETIMEDOUT;
806 	}
807 
808 	return 0;
809 }
810 
811 static int atmel_qspi_dma_rx_xfer(struct spi_mem *mem,
812 				  const struct spi_mem_op *op,
813 				  struct sg_table *sgt, loff_t loff)
814 {
815 	struct atmel_qspi *aq =
816 		spi_controller_get_devdata(mem->spi->controller);
817 	struct scatterlist *sg;
818 	dma_addr_t dma_src;
819 	unsigned int i, len;
820 	int ret;
821 
822 	dma_src = aq->mmap_phys_base + loff;
823 
824 	for_each_sg(sgt->sgl, sg, sgt->nents, i) {
825 		len = sg_dma_len(sg);
826 		ret = atmel_qspi_dma_xfer(aq, aq->rx_chan, sg_dma_address(sg),
827 					  dma_src, len);
828 		if (ret)
829 			return ret;
830 		dma_src += len;
831 	}
832 
833 	return 0;
834 }
835 
836 static int atmel_qspi_dma_tx_xfer(struct spi_mem *mem,
837 				  const struct spi_mem_op *op,
838 				  struct sg_table *sgt, loff_t loff)
839 {
840 	struct atmel_qspi *aq =
841 		spi_controller_get_devdata(mem->spi->controller);
842 	struct scatterlist *sg;
843 	dma_addr_t dma_dst;
844 	unsigned int i, len;
845 	int ret;
846 
847 	dma_dst = aq->mmap_phys_base + loff;
848 
849 	for_each_sg(sgt->sgl, sg, sgt->nents, i) {
850 		len = sg_dma_len(sg);
851 		ret = atmel_qspi_dma_xfer(aq, aq->tx_chan, dma_dst,
852 					  sg_dma_address(sg), len);
853 		if (ret)
854 			return ret;
855 		dma_dst += len;
856 	}
857 
858 	return 0;
859 }
860 
861 static int atmel_qspi_dma_transfer(struct spi_mem *mem,
862 				   const struct spi_mem_op *op, loff_t loff)
863 {
864 	struct sg_table sgt;
865 	int ret;
866 
867 	ret = spi_controller_dma_map_mem_op_data(mem->spi->controller, op,
868 						 &sgt);
869 	if (ret)
870 		return ret;
871 
872 	if (op->data.dir == SPI_MEM_DATA_IN)
873 		ret = atmel_qspi_dma_rx_xfer(mem, op, &sgt, loff);
874 	else
875 		ret = atmel_qspi_dma_tx_xfer(mem, op, &sgt, loff);
876 
877 	spi_controller_dma_unmap_mem_op_data(mem->spi->controller, op, &sgt);
878 
879 	return ret;
880 }
881 
882 static int atmel_qspi_sama7g5_transfer(struct spi_mem *mem,
883 				       const struct spi_mem_op *op, u32 offset)
884 {
885 	struct atmel_qspi *aq =
886 		spi_controller_get_devdata(mem->spi->controller);
887 	u32 val;
888 	int ret;
889 
890 	if (!op->data.nbytes) {
891 		/* Start the transfer. */
892 		ret = atmel_qspi_reg_sync(aq);
893 		if (ret)
894 			return ret;
895 		atmel_qspi_write(QSPI_CR_STTFR, aq, QSPI_CR);
896 
897 		return atmel_qspi_wait_for_completion(aq, QSPI_SR_CSRA);
898 	}
899 
900 	/* Send/Receive data. */
901 	if (op->data.dir == SPI_MEM_DATA_IN) {
902 		if (aq->rx_chan && op->addr.nbytes &&
903 		    op->data.nbytes > ATMEL_QSPI_DMA_MIN_BYTES) {
904 			ret = atmel_qspi_dma_transfer(mem, op, offset);
905 			if (ret)
906 				return ret;
907 		} else {
908 			memcpy_fromio(op->data.buf.in, aq->mem + offset,
909 				      op->data.nbytes);
910 		}
911 
912 		if (op->addr.nbytes) {
913 			ret = readl_poll_timeout(aq->regs + QSPI_SR2, val,
914 						 !(val & QSPI_SR2_RBUSY), 40,
915 						 ATMEL_QSPI_SYNC_TIMEOUT);
916 			if (ret)
917 				return ret;
918 		}
919 	} else {
920 		if (aq->tx_chan && op->addr.nbytes &&
921 		    op->data.nbytes > ATMEL_QSPI_DMA_MIN_BYTES) {
922 			ret = atmel_qspi_dma_transfer(mem, op, offset);
923 			if (ret)
924 				return ret;
925 		} else {
926 			memcpy_toio(aq->mem + offset, op->data.buf.out,
927 				    op->data.nbytes);
928 		}
929 
930 		ret = atmel_qspi_wait_for_completion(aq, QSPI_SR_LWRA);
931 		if (ret)
932 			return ret;
933 	}
934 
935 	/* Release the chip-select. */
936 	ret = atmel_qspi_reg_sync(aq);
937 	if (ret)
938 		return ret;
939 	atmel_qspi_write(QSPI_CR_LASTXFER, aq, QSPI_CR);
940 
941 	return atmel_qspi_wait_for_completion(aq, QSPI_SR_CSRA);
942 }
943 
944 static int atmel_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
945 {
946 	struct atmel_qspi *aq = spi_controller_get_devdata(mem->spi->controller);
947 	u32 offset;
948 	int err;
949 
950 	/*
951 	 * Check if the address exceeds the MMIO window size. An improvement
952 	 * would be to add support for regular SPI mode and fall back to it
953 	 * when the flash memories overrun the controller's memory space.
954 	 */
955 	if (op->addr.val + op->data.nbytes > aq->mmap_size)
956 		return -EOPNOTSUPP;
957 
958 	if (op->addr.nbytes > 4)
959 		return -EOPNOTSUPP;
960 
961 	err = pm_runtime_resume_and_get(&aq->pdev->dev);
962 	if (err < 0)
963 		return err;
964 
965 	err = aq->ops->set_cfg(aq, op, &offset);
966 	if (err)
967 		goto pm_runtime_put;
968 
969 	err = aq->ops->transfer(mem, op, offset);
970 
971 pm_runtime_put:
972 	pm_runtime_put_autosuspend(&aq->pdev->dev);
973 	return err;
974 }
975 
976 static const char *atmel_qspi_get_name(struct spi_mem *spimem)
977 {
978 	return dev_name(spimem->spi->dev.parent);
979 }
980 
981 static const struct spi_controller_mem_ops atmel_qspi_mem_ops = {
982 	.supports_op = atmel_qspi_supports_op,
983 	.exec_op = atmel_qspi_exec_op,
984 	.get_name = atmel_qspi_get_name
985 };
986 
987 static int atmel_qspi_set_pad_calibration(struct atmel_qspi *aq)
988 {
989 	unsigned long pclk_rate;
990 	u32 status, val;
991 	int i, ret;
992 	u8 pclk_div = 0;
993 
994 	pclk_rate = clk_get_rate(aq->pclk);
995 	if (!pclk_rate)
996 		return -EINVAL;
997 
998 	for (i = 0; i < ATMEL_QSPI_PCAL_ARRAY_SIZE; i++) {
999 		if (pclk_rate <= pcal[i].pclk_rate) {
1000 			pclk_div = pcal[i].pclk_div;
1001 			break;
1002 		}
1003 	}
1004 
1005 	/*
1006 	 * Use the biggest divider in case the peripheral clock exceeds
1007 	 * 200MHZ.
1008 	 */
1009 	if (pclk_rate > pcal[ATMEL_QSPI_PCAL_ARRAY_SIZE - 1].pclk_rate)
1010 		pclk_div = pcal[ATMEL_QSPI_PCAL_ARRAY_SIZE - 1].pclk_div;
1011 
1012 	/* Disable QSPI while configuring the pad calibration. */
1013 	status = atmel_qspi_read(aq, QSPI_SR2);
1014 	if (status & QSPI_SR2_QSPIENS) {
1015 		ret = atmel_qspi_reg_sync(aq);
1016 		if (ret)
1017 			return ret;
1018 		atmel_qspi_write(QSPI_CR_QSPIDIS, aq, QSPI_CR);
1019 	}
1020 
1021 	/*
1022 	 * The analog circuitry is not shut down at the end of the calibration
1023 	 * and the start-up time is only required for the first calibration
1024 	 * sequence, thus increasing performance. Set the delay between the Pad
1025 	 * calibration analog circuitry and the calibration request to 2us.
1026 	 */
1027 	atmel_qspi_write(QSPI_PCALCFG_AAON |
1028 			 FIELD_PREP(QSPI_PCALCFG_CLKDIV, pclk_div) |
1029 			 FIELD_PREP(QSPI_PCALCFG_CALCNT,
1030 				    2 * (pclk_rate / 1000000)),
1031 			 aq, QSPI_PCALCFG);
1032 
1033 	/* DLL On + start calibration. */
1034 	if (aq->caps->has_dllon)
1035 		atmel_qspi_write(QSPI_CR_DLLON | QSPI_CR_STPCAL, aq, QSPI_CR);
1036 	/* If there is no DLL support only start calibration. */
1037 	else
1038 		atmel_qspi_write(QSPI_CR_STPCAL, aq, QSPI_CR);
1039 
1040 	/*
1041 	 * Check DLL clock lock and synchronization status before updating
1042 	 * configuration.
1043 	 */
1044 	if (aq->caps->has_dllon)
1045 		ret =  readl_poll_timeout(aq->regs + QSPI_SR2, val,
1046 					  (val & QSPI_SR2_DLOCK) &&
1047 					  !(val & QSPI_SR2_CALBSY), 40,
1048 					  ATMEL_QSPI_TIMEOUT);
1049 	else
1050 		ret =  readl_poll_timeout(aq->regs + QSPI_SR2, val,
1051 					  !(val & QSPI_SR2_CALBSY), 40,
1052 					  ATMEL_QSPI_TIMEOUT);
1053 
1054 	/* Refresh analogic blocks every 1 ms.*/
1055 	atmel_qspi_write(FIELD_PREP(QSPI_REFRESH_DELAY_COUNTER,
1056 				    aq->target_max_speed_hz / 1000),
1057 			 aq, QSPI_REFRESH);
1058 
1059 	return ret;
1060 }
1061 
1062 static int atmel_qspi_set_gclk(struct atmel_qspi *aq)
1063 {
1064 	u32 status, val;
1065 	int ret;
1066 
1067 	/* Disable DLL before setting GCLK */
1068 	if (aq->caps->has_dllon) {
1069 		status = atmel_qspi_read(aq, QSPI_SR2);
1070 		if (status & QSPI_SR2_DLOCK) {
1071 			atmel_qspi_write(QSPI_CR_DLLOFF, aq, QSPI_CR);
1072 			ret = readl_poll_timeout(aq->regs + QSPI_SR2, val,
1073 						 !(val & QSPI_SR2_DLOCK), 40,
1074 						 ATMEL_QSPI_TIMEOUT);
1075 			if (ret)
1076 				return ret;
1077 		}
1078 
1079 		if (aq->target_max_speed_hz > QSPI_DLLCFG_THRESHOLD_FREQ)
1080 			atmel_qspi_write(QSPI_DLLCFG_RANGE, aq, QSPI_DLLCFG);
1081 		else
1082 			atmel_qspi_write(0, aq, QSPI_DLLCFG);
1083 	}
1084 
1085 	if (aq->caps->has_2xgclk)
1086 		ret = clk_set_rate(aq->gclk, 2 * aq->target_max_speed_hz);
1087 	else
1088 		ret = clk_set_rate(aq->gclk, aq->target_max_speed_hz);
1089 
1090 	if (ret) {
1091 		dev_err(&aq->pdev->dev, "Failed to set generic clock rate.\n");
1092 		return ret;
1093 	}
1094 
1095 	/* Enable the QSPI generic clock */
1096 	ret = clk_prepare_enable(aq->gclk);
1097 	if (ret)
1098 		dev_err(&aq->pdev->dev, "Failed to enable generic clock.\n");
1099 
1100 	return ret;
1101 }
1102 
1103 static int atmel_qspi_sama7g5_init(struct atmel_qspi *aq)
1104 {
1105 	u32 val;
1106 	int ret;
1107 
1108 	ret = atmel_qspi_set_gclk(aq);
1109 	if (ret)
1110 		return ret;
1111 
1112 	/*
1113 	 * Check if the SoC supports pad calibration in Octal SPI mode.
1114 	 * Proceed only if both the capabilities are true.
1115 	 */
1116 	if (aq->caps->octal && aq->caps->has_padcalib) {
1117 		ret = atmel_qspi_set_pad_calibration(aq);
1118 		if (ret)
1119 			return ret;
1120 	/* Start DLL on only if the SoC supports the same */
1121 	} else if (aq->caps->has_dllon) {
1122 		atmel_qspi_write(QSPI_CR_DLLON, aq, QSPI_CR);
1123 		ret =  readl_poll_timeout(aq->regs + QSPI_SR2, val,
1124 					  (val & QSPI_SR2_DLOCK), 40,
1125 					  ATMEL_QSPI_TIMEOUT);
1126 	}
1127 
1128 	/* Set the QSPI controller by default in Serial Memory Mode */
1129 	aq->mr |= QSPI_MR_DQSDLYEN;
1130 	ret = atmel_qspi_set_serial_memory_mode(aq);
1131 	if (ret < 0)
1132 		return ret;
1133 
1134 	/* Enable the QSPI controller. */
1135 	atmel_qspi_write(QSPI_CR_QSPIEN, aq, QSPI_CR);
1136 	ret = readl_poll_timeout(aq->regs + QSPI_SR2, val,
1137 				 val & QSPI_SR2_QSPIENS, 40,
1138 				 ATMEL_QSPI_SYNC_TIMEOUT);
1139 	if (ret)
1140 		return ret;
1141 
1142 	if (aq->caps->octal) {
1143 		ret = readl_poll_timeout(aq->regs + QSPI_SR, val,
1144 					 val & QSPI_SR_RFRSHD, 40,
1145 					 ATMEL_QSPI_TIMEOUT);
1146 	}
1147 
1148 	atmel_qspi_write(QSPI_TOUT_TCNTM, aq, QSPI_TOUT);
1149 	return ret;
1150 }
1151 
1152 static int atmel_qspi_sama7g5_setup(struct spi_device *spi)
1153 {
1154 	struct atmel_qspi *aq = spi_controller_get_devdata(spi->controller);
1155 
1156 	/* The controller can communicate with a single peripheral device (target). */
1157 	aq->target_max_speed_hz = spi->max_speed_hz;
1158 
1159 	return atmel_qspi_sama7g5_init(aq);
1160 }
1161 
1162 static int atmel_qspi_setup(struct spi_device *spi)
1163 {
1164 	struct spi_controller *ctrl = spi->controller;
1165 	struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
1166 	unsigned long src_rate;
1167 	u32 scbr;
1168 	int ret;
1169 
1170 	if (ctrl->busy)
1171 		return -EBUSY;
1172 
1173 	if (!spi->max_speed_hz)
1174 		return -EINVAL;
1175 
1176 	if (aq->caps->has_gclk)
1177 		return atmel_qspi_sama7g5_setup(spi);
1178 
1179 	src_rate = clk_get_rate(aq->pclk);
1180 	if (!src_rate)
1181 		return -EINVAL;
1182 
1183 	/* Compute the QSPI baudrate */
1184 	scbr = DIV_ROUND_UP(src_rate, spi->max_speed_hz);
1185 	if (scbr > 0)
1186 		scbr--;
1187 
1188 	ret = pm_runtime_resume_and_get(ctrl->dev.parent);
1189 	if (ret < 0)
1190 		return ret;
1191 
1192 	aq->scr &= ~QSPI_SCR_SCBR_MASK;
1193 	aq->scr |= QSPI_SCR_SCBR(scbr);
1194 	atmel_qspi_write(aq->scr, aq, QSPI_SCR);
1195 
1196 	pm_runtime_put_autosuspend(ctrl->dev.parent);
1197 
1198 	return 0;
1199 }
1200 
1201 static int atmel_qspi_set_cs_timing(struct spi_device *spi)
1202 {
1203 	struct spi_controller *ctrl = spi->controller;
1204 	struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
1205 	unsigned long clk_rate;
1206 	u32 cs_inactive;
1207 	u32 cs_setup;
1208 	u32 cs_hold;
1209 	int delay;
1210 	int ret;
1211 
1212 	clk_rate = clk_get_rate(aq->pclk);
1213 	if (!clk_rate)
1214 		return -EINVAL;
1215 
1216 	/* hold */
1217 	delay = spi_delay_to_ns(&spi->cs_hold, NULL);
1218 	if (aq->mr & QSPI_MR_SMM) {
1219 		if (delay > 0)
1220 			dev_warn(&aq->pdev->dev,
1221 				 "Ignoring cs_hold, must be 0 in Serial Memory Mode.\n");
1222 		cs_hold = 0;
1223 	} else {
1224 		delay = spi_delay_to_ns(&spi->cs_hold, NULL);
1225 		if (delay < 0)
1226 			return delay;
1227 
1228 		cs_hold = DIV_ROUND_UP((delay * DIV_ROUND_UP(clk_rate, 1000000)), 32000);
1229 	}
1230 
1231 	/* setup */
1232 	delay = spi_delay_to_ns(&spi->cs_setup, NULL);
1233 	if (delay < 0)
1234 		return delay;
1235 
1236 	cs_setup = DIV_ROUND_UP((delay * DIV_ROUND_UP(clk_rate, 1000000)),
1237 				1000);
1238 
1239 	/* inactive */
1240 	delay = spi_delay_to_ns(&spi->cs_inactive, NULL);
1241 	if (delay < 0)
1242 		return delay;
1243 	cs_inactive = DIV_ROUND_UP((delay * DIV_ROUND_UP(clk_rate, 1000000)), 1000);
1244 
1245 	ret = pm_runtime_resume_and_get(ctrl->dev.parent);
1246 	if (ret < 0)
1247 		return ret;
1248 
1249 	aq->scr &= ~QSPI_SCR_DLYBS_MASK;
1250 	aq->scr |= QSPI_SCR_DLYBS(cs_setup);
1251 	atmel_qspi_write(aq->scr, aq, QSPI_SCR);
1252 
1253 	aq->mr &= ~(QSPI_MR_DLYBCT_MASK | QSPI_MR_DLYCS_MASK);
1254 	aq->mr |= QSPI_MR_DLYBCT(cs_hold) | QSPI_MR_DLYCS(cs_inactive);
1255 	atmel_qspi_write(aq->mr, aq, QSPI_MR);
1256 
1257 	pm_runtime_put_autosuspend(ctrl->dev.parent);
1258 
1259 	return 0;
1260 }
1261 
1262 static int atmel_qspi_init(struct atmel_qspi *aq)
1263 {
1264 	int ret;
1265 
1266 	if (aq->caps->has_gclk) {
1267 		ret = atmel_qspi_reg_sync(aq);
1268 		if (ret)
1269 			return ret;
1270 		atmel_qspi_write(QSPI_CR_SWRST, aq, QSPI_CR);
1271 		return 0;
1272 	}
1273 
1274 	/* Reset the QSPI controller */
1275 	atmel_qspi_write(QSPI_CR_SWRST, aq, QSPI_CR);
1276 
1277 	/* Set the QSPI controller by default in Serial Memory Mode */
1278 	ret = atmel_qspi_set_serial_memory_mode(aq);
1279 	if (ret < 0)
1280 		return ret;
1281 
1282 	/* Enable the QSPI controller */
1283 	atmel_qspi_write(QSPI_CR_QSPIEN, aq, QSPI_CR);
1284 	return 0;
1285 }
1286 
1287 static irqreturn_t atmel_qspi_interrupt(int irq, void *dev_id)
1288 {
1289 	struct atmel_qspi *aq = dev_id;
1290 	u32 status, mask, pending;
1291 
1292 	status = atmel_qspi_read(aq, QSPI_SR);
1293 	mask = atmel_qspi_read(aq, QSPI_IMR);
1294 	pending = status & mask;
1295 
1296 	if (!pending)
1297 		return IRQ_NONE;
1298 
1299 	aq->pending |= pending;
1300 	if ((aq->pending & aq->irq_mask) == aq->irq_mask)
1301 		complete(&aq->cmd_completion);
1302 
1303 	return IRQ_HANDLED;
1304 }
1305 
1306 static int atmel_qspi_dma_init(struct spi_controller *ctrl)
1307 {
1308 	struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
1309 	int ret;
1310 
1311 	aq->rx_chan = devm_dma_request_chan(&aq->pdev->dev, "rx");
1312 	if (IS_ERR(aq->rx_chan)) {
1313 		ret = dev_err_probe(&aq->pdev->dev, PTR_ERR(aq->rx_chan),
1314 				    "RX DMA channel is not available\n");
1315 		aq->rx_chan = NULL;
1316 		return ret;
1317 	}
1318 
1319 	aq->tx_chan = devm_dma_request_chan(&aq->pdev->dev, "tx");
1320 	if (IS_ERR(aq->tx_chan)) {
1321 		ret = dev_err_probe(&aq->pdev->dev, PTR_ERR(aq->tx_chan),
1322 				    "TX DMA channel is not available\n");
1323 		aq->rx_chan = NULL;
1324 		aq->tx_chan = NULL;
1325 		return ret;
1326 	}
1327 
1328 	ctrl->dma_rx = aq->rx_chan;
1329 	ctrl->dma_tx = aq->tx_chan;
1330 	init_completion(&aq->dma_completion);
1331 
1332 	dev_info(&aq->pdev->dev, "Using %s (tx) and %s (rx) for DMA transfers\n",
1333 		 dma_chan_name(aq->tx_chan), dma_chan_name(aq->rx_chan));
1334 
1335 	return 0;
1336 }
1337 
1338 static const struct atmel_qspi_ops atmel_qspi_ops = {
1339 	.set_cfg = atmel_qspi_set_cfg,
1340 	.transfer = atmel_qspi_transfer,
1341 };
1342 
1343 static const struct atmel_qspi_ops atmel_qspi_sama7g5_ops = {
1344 	.set_cfg = atmel_qspi_sama7g5_set_cfg,
1345 	.transfer = atmel_qspi_sama7g5_transfer,
1346 };
1347 
1348 static int atmel_qspi_probe(struct platform_device *pdev)
1349 {
1350 	struct spi_controller *ctrl;
1351 	struct atmel_qspi *aq;
1352 	struct resource *res;
1353 	int irq, err = 0;
1354 
1355 	ctrl = devm_spi_alloc_host(&pdev->dev, sizeof(*aq));
1356 	if (!ctrl)
1357 		return -ENOMEM;
1358 
1359 	aq = spi_controller_get_devdata(ctrl);
1360 
1361 	aq->caps = of_device_get_match_data(&pdev->dev);
1362 	if (!aq->caps) {
1363 		dev_err(&pdev->dev, "Could not retrieve QSPI caps\n");
1364 		return -EINVAL;
1365 	}
1366 
1367 	init_completion(&aq->cmd_completion);
1368 	aq->pdev = pdev;
1369 
1370 	ctrl->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD | SPI_TX_DUAL | SPI_TX_QUAD;
1371 	if (aq->caps->octal)
1372 		ctrl->mode_bits |= SPI_RX_OCTAL | SPI_TX_OCTAL;
1373 
1374 	if (aq->caps->has_gclk)
1375 		aq->ops = &atmel_qspi_sama7g5_ops;
1376 	else
1377 		aq->ops = &atmel_qspi_ops;
1378 
1379 	ctrl->max_speed_hz = aq->caps->max_speed_hz;
1380 	ctrl->setup = atmel_qspi_setup;
1381 	ctrl->set_cs_timing = atmel_qspi_set_cs_timing;
1382 	ctrl->bus_num = -1;
1383 	ctrl->mem_ops = &atmel_qspi_mem_ops;
1384 	ctrl->num_chipselect = 1;
1385 	ctrl->dev.of_node = pdev->dev.of_node;
1386 	platform_set_drvdata(pdev, ctrl);
1387 
1388 	/* Map the registers */
1389 	aq->regs = devm_platform_ioremap_resource_byname(pdev, "qspi_base");
1390 	if (IS_ERR(aq->regs))
1391 		return dev_err_probe(&pdev->dev, PTR_ERR(aq->regs),
1392 				     "missing registers\n");
1393 
1394 	/* Map the AHB memory */
1395 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi_mmap");
1396 	aq->mem = devm_ioremap_resource(&pdev->dev, res);
1397 	if (IS_ERR(aq->mem))
1398 		return dev_err_probe(&pdev->dev, PTR_ERR(aq->mem),
1399 				     "missing AHB memory\n");
1400 
1401 	aq->mmap_size = resource_size(res);
1402 	aq->mmap_phys_base = (dma_addr_t)res->start;
1403 
1404 	/* Get the peripheral clock */
1405 	aq->pclk = devm_clk_get_enabled(&pdev->dev, "pclk");
1406 	if (IS_ERR(aq->pclk))
1407 		aq->pclk = devm_clk_get_enabled(&pdev->dev, NULL);
1408 
1409 	if (IS_ERR(aq->pclk))
1410 		return dev_err_probe(&pdev->dev, PTR_ERR(aq->pclk),
1411 				     "missing peripheral clock\n");
1412 
1413 	if (aq->caps->has_qspick) {
1414 		/* Get the QSPI system clock */
1415 		aq->qspick = devm_clk_get_enabled(&pdev->dev, "qspick");
1416 		if (IS_ERR(aq->qspick)) {
1417 			dev_err(&pdev->dev, "missing system clock\n");
1418 			err = PTR_ERR(aq->qspick);
1419 			return err;
1420 		}
1421 
1422 	} else if (aq->caps->has_gclk) {
1423 		/* Get the QSPI generic clock */
1424 		aq->gclk = devm_clk_get(&pdev->dev, "gclk");
1425 		if (IS_ERR(aq->gclk)) {
1426 			dev_err(&pdev->dev, "missing Generic clock\n");
1427 			err = PTR_ERR(aq->gclk);
1428 			return err;
1429 		}
1430 	}
1431 
1432 	if (aq->caps->has_dma) {
1433 		err = atmel_qspi_dma_init(ctrl);
1434 		if (err == -EPROBE_DEFER)
1435 			return err;
1436 	}
1437 
1438 	/* Request the IRQ */
1439 	irq = platform_get_irq(pdev, 0);
1440 	if (irq < 0)
1441 		return irq;
1442 
1443 	err = devm_request_irq(&pdev->dev, irq, atmel_qspi_interrupt,
1444 			       0, dev_name(&pdev->dev), aq);
1445 	if (err)
1446 		return err;
1447 
1448 	pm_runtime_set_autosuspend_delay(&pdev->dev, 500);
1449 	pm_runtime_use_autosuspend(&pdev->dev);
1450 	devm_pm_runtime_set_active_enabled(&pdev->dev);
1451 	devm_pm_runtime_get_noresume(&pdev->dev);
1452 
1453 	err = atmel_qspi_init(aq);
1454 	if (err)
1455 		return err;
1456 
1457 	err = spi_register_controller(ctrl);
1458 	if (err)
1459 		return err;
1460 
1461 	pm_runtime_put_autosuspend(&pdev->dev);
1462 
1463 	return 0;
1464 }
1465 
1466 static int atmel_qspi_sama7g5_suspend(struct atmel_qspi *aq)
1467 {
1468 	int ret;
1469 	u32 val;
1470 
1471 	ret = readl_poll_timeout(aq->regs + QSPI_SR2, val,
1472 				 !(val & QSPI_SR2_RBUSY) &&
1473 				 (val & QSPI_SR2_HIDLE), 40,
1474 				 ATMEL_QSPI_SYNC_TIMEOUT);
1475 	if (ret)
1476 		return ret;
1477 
1478 	atmel_qspi_write(QSPI_CR_QSPIDIS, aq, QSPI_CR);
1479 	ret = readl_poll_timeout(aq->regs + QSPI_SR2, val,
1480 				 !(val & QSPI_SR2_QSPIENS), 40,
1481 				 ATMEL_QSPI_SYNC_TIMEOUT);
1482 	if (ret)
1483 		return ret;
1484 
1485 	clk_disable_unprepare(aq->gclk);
1486 
1487 	if (aq->caps->has_dllon) {
1488 		atmel_qspi_write(QSPI_CR_DLLOFF, aq, QSPI_CR);
1489 		ret = readl_poll_timeout(aq->regs + QSPI_SR2, val,
1490 					 !(val & QSPI_SR2_DLOCK), 40,
1491 					 ATMEL_QSPI_TIMEOUT);
1492 		if (ret)
1493 			return ret;
1494 	}
1495 
1496 	if (aq->caps->has_padcalib)
1497 		return readl_poll_timeout(aq->regs + QSPI_SR2, val,
1498 					  !(val & QSPI_SR2_CALBSY), 40,
1499 					  ATMEL_QSPI_TIMEOUT);
1500 	return 0;
1501 }
1502 
1503 static void atmel_qspi_remove(struct platform_device *pdev)
1504 {
1505 	struct spi_controller *ctrl = platform_get_drvdata(pdev);
1506 	struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
1507 	int ret;
1508 
1509 	spi_unregister_controller(ctrl);
1510 
1511 	ret = pm_runtime_get_sync(&pdev->dev);
1512 	if (ret >= 0) {
1513 		if (aq->caps->has_gclk) {
1514 			ret = atmel_qspi_sama7g5_suspend(aq);
1515 			if (ret)
1516 				dev_warn(&pdev->dev, "Failed to de-init device on remove: %d\n", ret);
1517 			return;
1518 		}
1519 
1520 		atmel_qspi_write(QSPI_CR_QSPIDIS, aq, QSPI_CR);
1521 	} else {
1522 		/*
1523 		 * atmel_qspi_runtime_{suspend,resume} just disable and enable
1524 		 * the two clks respectively. So after resume failed these are
1525 		 * off, and we skip hardware access and disabling these clks again.
1526 		 */
1527 		dev_warn(&pdev->dev, "Failed to resume device on remove\n");
1528 	}
1529 }
1530 
1531 static int __maybe_unused atmel_qspi_suspend(struct device *dev)
1532 {
1533 	struct spi_controller *ctrl = dev_get_drvdata(dev);
1534 	struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
1535 	int ret;
1536 
1537 	ret = pm_runtime_resume_and_get(dev);
1538 	if (ret < 0)
1539 		return ret;
1540 
1541 	if (aq->caps->has_gclk) {
1542 		ret = atmel_qspi_sama7g5_suspend(aq);
1543 		clk_disable_unprepare(aq->pclk);
1544 		return ret;
1545 	}
1546 
1547 	atmel_qspi_write(QSPI_CR_QSPIDIS, aq, QSPI_CR);
1548 
1549 	pm_runtime_mark_last_busy(dev);
1550 	pm_runtime_force_suspend(dev);
1551 
1552 	clk_unprepare(aq->qspick);
1553 	clk_unprepare(aq->pclk);
1554 
1555 	return 0;
1556 }
1557 
1558 static int __maybe_unused atmel_qspi_resume(struct device *dev)
1559 {
1560 	struct spi_controller *ctrl = dev_get_drvdata(dev);
1561 	struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
1562 	int ret;
1563 
1564 	ret = clk_prepare(aq->pclk);
1565 	if (ret)
1566 		return ret;
1567 
1568 	ret = clk_prepare(aq->qspick);
1569 	if (ret) {
1570 		clk_unprepare(aq->pclk);
1571 		return ret;
1572 	}
1573 
1574 	if (aq->caps->has_gclk)
1575 		return atmel_qspi_sama7g5_init(aq);
1576 
1577 	ret = pm_runtime_force_resume(dev);
1578 	if (ret < 0)
1579 		return ret;
1580 
1581 	atmel_qspi_init(aq);
1582 
1583 	atmel_qspi_write(aq->scr, aq, QSPI_SCR);
1584 
1585 	pm_runtime_put_autosuspend(dev);
1586 
1587 	return 0;
1588 }
1589 
1590 static int __maybe_unused atmel_qspi_runtime_suspend(struct device *dev)
1591 {
1592 	struct spi_controller *ctrl = dev_get_drvdata(dev);
1593 	struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
1594 
1595 	clk_disable(aq->qspick);
1596 	clk_disable(aq->pclk);
1597 
1598 	return 0;
1599 }
1600 
1601 static int __maybe_unused atmel_qspi_runtime_resume(struct device *dev)
1602 {
1603 	struct spi_controller *ctrl = dev_get_drvdata(dev);
1604 	struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
1605 	int ret;
1606 
1607 	ret = clk_enable(aq->pclk);
1608 	if (ret)
1609 		return ret;
1610 
1611 	ret = clk_enable(aq->qspick);
1612 	if (ret)
1613 		clk_disable(aq->pclk);
1614 
1615 	return ret;
1616 }
1617 
1618 static const struct dev_pm_ops __maybe_unused atmel_qspi_pm_ops = {
1619 	SET_SYSTEM_SLEEP_PM_OPS(atmel_qspi_suspend, atmel_qspi_resume)
1620 	SET_RUNTIME_PM_OPS(atmel_qspi_runtime_suspend,
1621 			   atmel_qspi_runtime_resume, NULL)
1622 };
1623 
1624 static const struct atmel_qspi_caps atmel_sama5d2_qspi_caps = {};
1625 
1626 static const struct atmel_qspi_caps atmel_sam9x60_qspi_caps = {
1627 	.has_qspick = true,
1628 	.has_ricr = true,
1629 };
1630 
1631 static const struct atmel_qspi_caps atmel_sam9x7_ospi_caps = {
1632 	.max_speed_hz = SAM9X7_QSPI_MAX_SPEED_HZ,
1633 	.has_gclk = true,
1634 	.octal = true,
1635 	.has_dma = true,
1636 	.has_2xgclk = true,
1637 	.has_padcalib = false,
1638 	.has_dllon = false,
1639 };
1640 
1641 static const struct atmel_qspi_caps atmel_sama7d65_ospi_caps = {
1642 	.max_speed_hz = SAMA7G5_QSPI0_MAX_SPEED_HZ,
1643 	.has_gclk = true,
1644 	.octal = true,
1645 	.has_dma = true,
1646 	.has_2xgclk = true,
1647 	.has_padcalib = true,
1648 	.has_dllon = false,
1649 };
1650 
1651 static const struct atmel_qspi_caps atmel_sama7d65_qspi_caps = {
1652 	.max_speed_hz = SAMA7G5_QSPI1_SDR_MAX_SPEED_HZ,
1653 	.has_gclk = true,
1654 	.has_dma = true,
1655 	.has_2xgclk = true,
1656 	.has_dllon = false,
1657 };
1658 
1659 static const struct atmel_qspi_caps atmel_sama7g5_ospi_caps = {
1660 	.max_speed_hz = SAMA7G5_QSPI0_MAX_SPEED_HZ,
1661 	.has_gclk = true,
1662 	.octal = true,
1663 	.has_dma = true,
1664 	.has_padcalib = true,
1665 	.has_dllon = true,
1666 };
1667 
1668 static const struct atmel_qspi_caps atmel_sama7g5_qspi_caps = {
1669 	.max_speed_hz = SAMA7G5_QSPI1_SDR_MAX_SPEED_HZ,
1670 	.has_gclk = true,
1671 	.has_dma = true,
1672 	.has_dllon = true,
1673 };
1674 
1675 static const struct of_device_id atmel_qspi_dt_ids[] = {
1676 	{
1677 		.compatible = "atmel,sama5d2-qspi",
1678 		.data = &atmel_sama5d2_qspi_caps,
1679 	},
1680 	{
1681 		.compatible = "microchip,sam9x60-qspi",
1682 		.data = &atmel_sam9x60_qspi_caps,
1683 	},
1684 	{
1685 		.compatible = "microchip,sama7g5-ospi",
1686 		.data = &atmel_sama7g5_ospi_caps,
1687 	},
1688 	{
1689 		.compatible = "microchip,sama7g5-qspi",
1690 		.data = &atmel_sama7g5_qspi_caps,
1691 	},
1692 	{
1693 		.compatible = "microchip,sam9x7-ospi",
1694 		.data = &atmel_sam9x7_ospi_caps,
1695 	},
1696 	{
1697 		.compatible = "microchip,sama7d65-ospi",
1698 		.data = &atmel_sama7d65_ospi_caps,
1699 	},
1700 	{
1701 		.compatible = "microchip,sama7d65-qspi",
1702 		.data = &atmel_sama7d65_qspi_caps,
1703 	},
1704 
1705 
1706 	{ /* sentinel */ }
1707 };
1708 
1709 MODULE_DEVICE_TABLE(of, atmel_qspi_dt_ids);
1710 
1711 static struct platform_driver atmel_qspi_driver = {
1712 	.driver = {
1713 		.name	= "atmel_qspi",
1714 		.of_match_table	= atmel_qspi_dt_ids,
1715 		.pm	= pm_ptr(&atmel_qspi_pm_ops),
1716 	},
1717 	.probe		= atmel_qspi_probe,
1718 	.remove		= atmel_qspi_remove,
1719 };
1720 module_platform_driver(atmel_qspi_driver);
1721 
1722 MODULE_AUTHOR("Cyrille Pitchen <cyrille.pitchen@atmel.com>");
1723 MODULE_AUTHOR("Piotr Bugalski <bugalski.piotr@gmail.com");
1724 MODULE_DESCRIPTION("Atmel QSPI Controller driver");
1725 MODULE_LICENSE("GPL v2");
1726