xref: /linux/drivers/spi/atmel-quadspi.c (revision 6e8e2f82bc26d4b4ac9bc4f3abe99a5661a04009)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Driver for Atmel QSPI Controller
4  *
5  * Copyright (C) 2015 Atmel Corporation
6  * Copyright (C) 2018 Cryptera A/S
7  *
8  * Author: Cyrille Pitchen <cyrille.pitchen@atmel.com>
9  * Author: Piotr Bugalski <bugalski.piotr@gmail.com>
10  *
11  * This driver is based on drivers/mtd/spi-nor/fsl-quadspi.c from Freescale.
12  */
13 
14 #include <linux/bitfield.h>
15 #include <linux/clk.h>
16 #include <linux/delay.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/dmaengine.h>
19 #include <linux/err.h>
20 #include <linux/interrupt.h>
21 #include <linux/io.h>
22 #include <linux/iopoll.h>
23 #include <linux/kernel.h>
24 #include <linux/module.h>
25 #include <linux/of.h>
26 #include <linux/of_platform.h>
27 #include <linux/platform_device.h>
28 #include <linux/pm_runtime.h>
29 #include <linux/spi/spi-mem.h>
30 
31 /* QSPI register offsets */
32 #define QSPI_CR      0x0000  /* Control Register */
33 #define QSPI_MR      0x0004  /* Mode Register */
34 #define QSPI_RD      0x0008  /* Receive Data Register */
35 #define QSPI_TD      0x000c  /* Transmit Data Register */
36 #define QSPI_SR      0x0010  /* Status Register */
37 #define QSPI_IER     0x0014  /* Interrupt Enable Register */
38 #define QSPI_IDR     0x0018  /* Interrupt Disable Register */
39 #define QSPI_IMR     0x001c  /* Interrupt Mask Register */
40 #define QSPI_SCR     0x0020  /* Serial Clock Register */
41 #define QSPI_SR2     0x0024  /* SAMA7G5 Status Register */
42 
43 #define QSPI_IAR     0x0030  /* Instruction Address Register */
44 #define QSPI_ICR     0x0034  /* Instruction Code Register */
45 #define QSPI_WICR    0x0034  /* Write Instruction Code Register */
46 #define QSPI_IFR     0x0038  /* Instruction Frame Register */
47 #define QSPI_RICR    0x003C  /* Read Instruction Code Register */
48 
49 #define QSPI_SMR     0x0040  /* Scrambling Mode Register */
50 #define QSPI_SKR     0x0044  /* Scrambling Key Register */
51 
52 #define QSPI_REFRESH	0x0050	/* Refresh Register */
53 #define QSPI_WRACNT	0x0054	/* Write Access Counter Register */
54 #define QSPI_DLLCFG	0x0058	/* DLL Configuration Register */
55 #define QSPI_PCALCFG	0x005C	/* Pad Calibration Configuration Register */
56 #define QSPI_PCALBP	0x0060	/* Pad Calibration Bypass Register */
57 #define QSPI_TOUT	0x0064	/* Timeout Register */
58 
59 #define QSPI_WPMR    0x00E4  /* Write Protection Mode Register */
60 #define QSPI_WPSR    0x00E8  /* Write Protection Status Register */
61 
62 #define QSPI_VERSION 0x00FC  /* Version Register */
63 
64 #define SAMA7G5_QSPI0_MAX_SPEED_HZ	200000000
65 #define SAMA7G5_QSPI1_SDR_MAX_SPEED_HZ	133000000
66 
67 /* Bitfields in QSPI_CR (Control Register) */
68 #define QSPI_CR_QSPIEN                  BIT(0)
69 #define QSPI_CR_QSPIDIS                 BIT(1)
70 #define QSPI_CR_DLLON			BIT(2)
71 #define QSPI_CR_DLLOFF			BIT(3)
72 #define QSPI_CR_STPCAL			BIT(4)
73 #define QSPI_CR_SRFRSH			BIT(5)
74 #define QSPI_CR_SWRST                   BIT(7)
75 #define QSPI_CR_UPDCFG			BIT(8)
76 #define QSPI_CR_STTFR			BIT(9)
77 #define QSPI_CR_RTOUT			BIT(10)
78 #define QSPI_CR_LASTXFER                BIT(24)
79 
80 /* Bitfields in QSPI_MR (Mode Register) */
81 #define QSPI_MR_SMM                     BIT(0)
82 #define QSPI_MR_LLB                     BIT(1)
83 #define QSPI_MR_WDRBT                   BIT(2)
84 #define QSPI_MR_SMRM                    BIT(3)
85 #define QSPI_MR_DQSDLYEN		BIT(3)
86 #define QSPI_MR_CSMODE_MASK             GENMASK(5, 4)
87 #define QSPI_MR_CSMODE_NOT_RELOADED     (0 << 4)
88 #define QSPI_MR_CSMODE_LASTXFER         (1 << 4)
89 #define QSPI_MR_CSMODE_SYSTEMATICALLY   (2 << 4)
90 #define QSPI_MR_NBBITS_MASK             GENMASK(11, 8)
91 #define QSPI_MR_NBBITS(n)               ((((n) - 8) << 8) & QSPI_MR_NBBITS_MASK)
92 #define QSPI_MR_OENSD			BIT(15)
93 #define QSPI_MR_DLYBCT_MASK             GENMASK(23, 16)
94 #define QSPI_MR_DLYBCT(n)               (((n) << 16) & QSPI_MR_DLYBCT_MASK)
95 #define QSPI_MR_DLYCS_MASK              GENMASK(31, 24)
96 #define QSPI_MR_DLYCS(n)                (((n) << 24) & QSPI_MR_DLYCS_MASK)
97 
98 /* Bitfields in QSPI_SR/QSPI_IER/QSPI_IDR/QSPI_IMR  */
99 #define QSPI_SR_RDRF                    BIT(0)
100 #define QSPI_SR_TDRE                    BIT(1)
101 #define QSPI_SR_TXEMPTY                 BIT(2)
102 #define QSPI_SR_OVRES                   BIT(3)
103 #define QSPI_SR_CSR                     BIT(8)
104 #define QSPI_SR_CSS                     BIT(9)
105 #define QSPI_SR_INSTRE                  BIT(10)
106 #define QSPI_SR_LWRA			BIT(11)
107 #define QSPI_SR_QITF			BIT(12)
108 #define QSPI_SR_QITR			BIT(13)
109 #define QSPI_SR_CSFA			BIT(14)
110 #define QSPI_SR_CSRA			BIT(15)
111 #define QSPI_SR_RFRSHD			BIT(16)
112 #define QSPI_SR_TOUT			BIT(17)
113 #define QSPI_SR_QSPIENS                 BIT(24)
114 
115 #define QSPI_SR_CMD_COMPLETED	(QSPI_SR_INSTRE | QSPI_SR_CSR)
116 
117 /* Bitfields in QSPI_SCR (Serial Clock Register) */
118 #define QSPI_SCR_CPOL                   BIT(0)
119 #define QSPI_SCR_CPHA                   BIT(1)
120 #define QSPI_SCR_SCBR_MASK              GENMASK(15, 8)
121 #define QSPI_SCR_SCBR(n)                (((n) << 8) & QSPI_SCR_SCBR_MASK)
122 #define QSPI_SCR_DLYBS_MASK             GENMASK(23, 16)
123 #define QSPI_SCR_DLYBS(n)               (((n) << 16) & QSPI_SCR_DLYBS_MASK)
124 
125 /* Bitfields in QSPI_SR2 (SAMA7G5 Status Register) */
126 #define QSPI_SR2_SYNCBSY		BIT(0)
127 #define QSPI_SR2_QSPIENS		BIT(1)
128 #define QSPI_SR2_CSS			BIT(2)
129 #define QSPI_SR2_RBUSY			BIT(3)
130 #define QSPI_SR2_HIDLE			BIT(4)
131 #define QSPI_SR2_DLOCK			BIT(5)
132 #define QSPI_SR2_CALBSY			BIT(6)
133 
134 /* Bitfields in QSPI_IAR (Instruction Address Register) */
135 #define QSPI_IAR_ADDR			GENMASK(31, 0)
136 
137 /* Bitfields in QSPI_ICR (Read/Write Instruction Code Register) */
138 #define QSPI_ICR_INST_MASK              GENMASK(7, 0)
139 #define QSPI_ICR_INST(inst)             (((inst) << 0) & QSPI_ICR_INST_MASK)
140 #define QSPI_ICR_INST_MASK_SAMA7G5	GENMASK(15, 0)
141 #define QSPI_ICR_OPT_MASK               GENMASK(23, 16)
142 #define QSPI_ICR_OPT(opt)               (((opt) << 16) & QSPI_ICR_OPT_MASK)
143 
144 /* Bitfields in QSPI_IFR (Instruction Frame Register) */
145 #define QSPI_IFR_WIDTH_MASK             GENMASK(2, 0)
146 #define QSPI_IFR_WIDTH_SINGLE_BIT_SPI   (0 << 0)
147 #define QSPI_IFR_WIDTH_DUAL_OUTPUT      (1 << 0)
148 #define QSPI_IFR_WIDTH_QUAD_OUTPUT      (2 << 0)
149 #define QSPI_IFR_WIDTH_DUAL_IO          (3 << 0)
150 #define QSPI_IFR_WIDTH_QUAD_IO          (4 << 0)
151 #define QSPI_IFR_WIDTH_DUAL_CMD         (5 << 0)
152 #define QSPI_IFR_WIDTH_QUAD_CMD         (6 << 0)
153 #define QSPI_IFR_WIDTH_OCT_OUTPUT	(7 << 0)
154 #define QSPI_IFR_WIDTH_OCT_IO		(8 << 0)
155 #define QSPI_IFR_WIDTH_OCT_CMD		(9 << 0)
156 #define QSPI_IFR_INSTEN                 BIT(4)
157 #define QSPI_IFR_ADDREN                 BIT(5)
158 #define QSPI_IFR_OPTEN                  BIT(6)
159 #define QSPI_IFR_DATAEN                 BIT(7)
160 #define QSPI_IFR_OPTL_MASK              GENMASK(9, 8)
161 #define QSPI_IFR_OPTL_1BIT              (0 << 8)
162 #define QSPI_IFR_OPTL_2BIT              (1 << 8)
163 #define QSPI_IFR_OPTL_4BIT              (2 << 8)
164 #define QSPI_IFR_OPTL_8BIT              (3 << 8)
165 #define QSPI_IFR_ADDRL                  BIT(10)
166 #define QSPI_IFR_ADDRL_SAMA7G5		GENMASK(11, 10)
167 #define QSPI_IFR_TFRTYP_MEM		BIT(12)
168 #define QSPI_IFR_SAMA5D2_WRITE_TRSFR	BIT(13)
169 #define QSPI_IFR_CRM                    BIT(14)
170 #define QSPI_IFR_DDREN			BIT(15)
171 #define QSPI_IFR_NBDUM_MASK             GENMASK(20, 16)
172 #define QSPI_IFR_NBDUM(n)               (((n) << 16) & QSPI_IFR_NBDUM_MASK)
173 #define QSPI_IFR_END			BIT(22)
174 #define QSPI_IFR_SMRM			BIT(23)
175 #define QSPI_IFR_APBTFRTYP_READ		BIT(24)	/* Defined in SAM9X60 */
176 #define QSPI_IFR_DQSEN			BIT(25)
177 #define QSPI_IFR_DDRCMDEN		BIT(26)
178 #define QSPI_IFR_HFWBEN			BIT(27)
179 #define QSPI_IFR_PROTTYP		GENMASK(29, 28)
180 #define QSPI_IFR_PROTTYP_STD_SPI	0
181 #define QSPI_IFR_PROTTYP_TWIN_QUAD	1
182 #define QSPI_IFR_PROTTYP_OCTAFLASH	2
183 #define QSPI_IFR_PROTTYP_HYPERFLASH	3
184 
185 /* Bitfields in QSPI_SMR (Scrambling Mode Register) */
186 #define QSPI_SMR_SCREN                  BIT(0)
187 #define QSPI_SMR_RVDIS                  BIT(1)
188 #define QSPI_SMR_SCRKL                  BIT(2)
189 
190 /* Bitfields in QSPI_REFRESH (Refresh Register) */
191 #define QSPI_REFRESH_DELAY_COUNTER	GENMASK(31, 0)
192 
193 /* Bitfields in QSPI_WRACNT (Write Access Counter Register) */
194 #define QSPI_WRACNT_NBWRA		GENMASK(31, 0)
195 
196 /* Bitfields in QSPI_DLLCFG (DLL Configuration Register) */
197 #define QSPI_DLLCFG_RANGE		BIT(0)
198 
199 /* Bitfields in QSPI_PCALCFG (DLL Pad Calibration Configuration Register) */
200 #define QSPI_PCALCFG_AAON		BIT(0)
201 #define QSPI_PCALCFG_DAPCAL		BIT(1)
202 #define QSPI_PCALCFG_DIFFPM		BIT(2)
203 #define QSPI_PCALCFG_CLKDIV		GENMASK(6, 4)
204 #define QSPI_PCALCFG_CALCNT		GENMASK(16, 8)
205 #define QSPI_PCALCFG_CALP		GENMASK(27, 24)
206 #define QSPI_PCALCFG_CALN		GENMASK(31, 28)
207 
208 /* Bitfields in QSPI_PCALBP (DLL Pad Calibration Bypass Register) */
209 #define QSPI_PCALBP_BPEN		BIT(0)
210 #define QSPI_PCALBP_CALPBP		GENMASK(11, 8)
211 #define QSPI_PCALBP_CALNBP		GENMASK(19, 16)
212 
213 /* Bitfields in QSPI_TOUT (Timeout Register) */
214 #define QSPI_TOUT_TCNTM			GENMASK(15, 0)
215 
216 /* Bitfields in QSPI_WPMR (Write Protection Mode Register) */
217 #define QSPI_WPMR_WPEN                  BIT(0)
218 #define QSPI_WPMR_WPITEN		BIT(1)
219 #define QSPI_WPMR_WPCREN		BIT(2)
220 #define QSPI_WPMR_WPKEY_MASK            GENMASK(31, 8)
221 #define QSPI_WPMR_WPKEY(wpkey)          (((wpkey) << 8) & QSPI_WPMR_WPKEY_MASK)
222 
223 /* Bitfields in QSPI_WPSR (Write Protection Status Register) */
224 #define QSPI_WPSR_WPVS                  BIT(0)
225 #define QSPI_WPSR_WPVSRC_MASK           GENMASK(15, 8)
226 #define QSPI_WPSR_WPVSRC(src)           (((src) << 8) & QSPI_WPSR_WPVSRC)
227 
228 #define ATMEL_QSPI_TIMEOUT		1000	/* ms */
229 #define ATMEL_QSPI_SYNC_TIMEOUT		300	/* ms */
230 #define QSPI_DLLCFG_THRESHOLD_FREQ	90000000U
231 #define QSPI_CALIB_TIME			2000	/* 2 us */
232 
233 /* Use PIO for small transfers. */
234 #define ATMEL_QSPI_DMA_MIN_BYTES	16
235 /**
236  * struct atmel_qspi_pcal - Pad Calibration Clock Division
237  * @pclk_rate: peripheral clock rate.
238  * @pclk_div: calibration clock division. The clock applied to the calibration
239  *           cell is divided by pclk_div + 1.
240  */
241 struct atmel_qspi_pcal {
242 	u32 pclk_rate;
243 	u8 pclk_div;
244 };
245 
246 #define ATMEL_QSPI_PCAL_ARRAY_SIZE	8
247 static const struct atmel_qspi_pcal pcal[ATMEL_QSPI_PCAL_ARRAY_SIZE] = {
248 	{25000000, 0},
249 	{50000000, 1},
250 	{75000000, 2},
251 	{100000000, 3},
252 	{125000000, 4},
253 	{150000000, 5},
254 	{175000000, 6},
255 	{200000000, 7},
256 };
257 
258 struct atmel_qspi_caps {
259 	u32 max_speed_hz;
260 	bool has_qspick;
261 	bool has_gclk;
262 	bool has_ricr;
263 	bool octal;
264 	bool has_dma;
265 };
266 
267 struct atmel_qspi_ops;
268 
269 struct atmel_qspi {
270 	void __iomem		*regs;
271 	void __iomem		*mem;
272 	struct clk		*pclk;
273 	struct clk		*qspick;
274 	struct clk		*gclk;
275 	struct platform_device	*pdev;
276 	const struct atmel_qspi_caps *caps;
277 	const struct atmel_qspi_ops *ops;
278 	resource_size_t		mmap_size;
279 	u32			pending;
280 	u32			irq_mask;
281 	u32			mr;
282 	u32			scr;
283 	u32			target_max_speed_hz;
284 	struct completion	cmd_completion;
285 	struct completion	dma_completion;
286 	dma_addr_t		mmap_phys_base;
287 	struct dma_chan		*rx_chan;
288 	struct dma_chan		*tx_chan;
289 };
290 
291 struct atmel_qspi_ops {
292 	int (*set_cfg)(struct atmel_qspi *aq, const struct spi_mem_op *op,
293 		       u32 *offset);
294 	int (*transfer)(struct spi_mem *mem, const struct spi_mem_op *op,
295 			u32 offset);
296 };
297 
298 struct atmel_qspi_mode {
299 	u8 cmd_buswidth;
300 	u8 addr_buswidth;
301 	u8 data_buswidth;
302 	u32 config;
303 };
304 
305 static const struct atmel_qspi_mode atmel_qspi_modes[] = {
306 	{ 1, 1, 1, QSPI_IFR_WIDTH_SINGLE_BIT_SPI },
307 	{ 1, 1, 2, QSPI_IFR_WIDTH_DUAL_OUTPUT },
308 	{ 1, 1, 4, QSPI_IFR_WIDTH_QUAD_OUTPUT },
309 	{ 1, 2, 2, QSPI_IFR_WIDTH_DUAL_IO },
310 	{ 1, 4, 4, QSPI_IFR_WIDTH_QUAD_IO },
311 	{ 2, 2, 2, QSPI_IFR_WIDTH_DUAL_CMD },
312 	{ 4, 4, 4, QSPI_IFR_WIDTH_QUAD_CMD },
313 };
314 
315 static const struct atmel_qspi_mode atmel_qspi_sama7g5_modes[] = {
316 	{ 1, 1, 1, QSPI_IFR_WIDTH_SINGLE_BIT_SPI },
317 	{ 1, 1, 2, QSPI_IFR_WIDTH_DUAL_OUTPUT },
318 	{ 1, 1, 4, QSPI_IFR_WIDTH_QUAD_OUTPUT },
319 	{ 1, 2, 2, QSPI_IFR_WIDTH_DUAL_IO },
320 	{ 1, 4, 4, QSPI_IFR_WIDTH_QUAD_IO },
321 	{ 2, 2, 2, QSPI_IFR_WIDTH_DUAL_CMD },
322 	{ 4, 4, 4, QSPI_IFR_WIDTH_QUAD_CMD },
323 	{ 1, 1, 8, QSPI_IFR_WIDTH_OCT_OUTPUT },
324 	{ 1, 8, 8, QSPI_IFR_WIDTH_OCT_IO },
325 	{ 8, 8, 8, QSPI_IFR_WIDTH_OCT_CMD },
326 };
327 
328 #ifdef VERBOSE_DEBUG
atmel_qspi_reg_name(u32 offset,char * tmp,size_t sz)329 static const char *atmel_qspi_reg_name(u32 offset, char *tmp, size_t sz)
330 {
331 	switch (offset) {
332 	case QSPI_CR:
333 		return "CR";
334 	case QSPI_MR:
335 		return "MR";
336 	case QSPI_RD:
337 		return "RD";
338 	case QSPI_TD:
339 		return "TD";
340 	case QSPI_SR:
341 		return "SR";
342 	case QSPI_IER:
343 		return "IER";
344 	case QSPI_IDR:
345 		return "IDR";
346 	case QSPI_IMR:
347 		return "IMR";
348 	case QSPI_SCR:
349 		return "SCR";
350 	case QSPI_SR2:
351 		return "SR2";
352 	case QSPI_IAR:
353 		return "IAR";
354 	case QSPI_ICR:
355 		return "ICR/WICR";
356 	case QSPI_IFR:
357 		return "IFR";
358 	case QSPI_RICR:
359 		return "RICR";
360 	case QSPI_SMR:
361 		return "SMR";
362 	case QSPI_SKR:
363 		return "SKR";
364 	case QSPI_REFRESH:
365 		return "REFRESH";
366 	case QSPI_WRACNT:
367 		return "WRACNT";
368 	case QSPI_DLLCFG:
369 		return "DLLCFG";
370 	case QSPI_PCALCFG:
371 		return "PCALCFG";
372 	case QSPI_PCALBP:
373 		return "PCALBP";
374 	case QSPI_TOUT:
375 		return "TOUT";
376 	case QSPI_WPMR:
377 		return "WPMR";
378 	case QSPI_WPSR:
379 		return "WPSR";
380 	case QSPI_VERSION:
381 		return "VERSION";
382 	default:
383 		snprintf(tmp, sz, "0x%02x", offset);
384 		break;
385 	}
386 
387 	return tmp;
388 }
389 #endif /* VERBOSE_DEBUG */
390 
atmel_qspi_read(struct atmel_qspi * aq,u32 offset)391 static u32 atmel_qspi_read(struct atmel_qspi *aq, u32 offset)
392 {
393 	u32 value = readl_relaxed(aq->regs + offset);
394 
395 #ifdef VERBOSE_DEBUG
396 	char tmp[8];
397 
398 	dev_vdbg(&aq->pdev->dev, "read 0x%08x from %s\n", value,
399 		 atmel_qspi_reg_name(offset, tmp, sizeof(tmp)));
400 #endif /* VERBOSE_DEBUG */
401 
402 	return value;
403 }
404 
atmel_qspi_write(u32 value,struct atmel_qspi * aq,u32 offset)405 static void atmel_qspi_write(u32 value, struct atmel_qspi *aq, u32 offset)
406 {
407 #ifdef VERBOSE_DEBUG
408 	char tmp[8];
409 
410 	dev_vdbg(&aq->pdev->dev, "write 0x%08x into %s\n", value,
411 		 atmel_qspi_reg_name(offset, tmp, sizeof(tmp)));
412 #endif /* VERBOSE_DEBUG */
413 
414 	writel_relaxed(value, aq->regs + offset);
415 }
416 
atmel_qspi_reg_sync(struct atmel_qspi * aq)417 static int atmel_qspi_reg_sync(struct atmel_qspi *aq)
418 {
419 	u32 val;
420 	int ret;
421 
422 	ret = readl_poll_timeout(aq->regs + QSPI_SR2, val,
423 				 !(val & QSPI_SR2_SYNCBSY), 40,
424 				 ATMEL_QSPI_SYNC_TIMEOUT);
425 	return ret;
426 }
427 
atmel_qspi_update_config(struct atmel_qspi * aq)428 static int atmel_qspi_update_config(struct atmel_qspi *aq)
429 {
430 	int ret;
431 
432 	ret = atmel_qspi_reg_sync(aq);
433 	if (ret)
434 		return ret;
435 	atmel_qspi_write(QSPI_CR_UPDCFG, aq, QSPI_CR);
436 	return atmel_qspi_reg_sync(aq);
437 }
438 
atmel_qspi_is_compatible(const struct spi_mem_op * op,const struct atmel_qspi_mode * mode)439 static inline bool atmel_qspi_is_compatible(const struct spi_mem_op *op,
440 					    const struct atmel_qspi_mode *mode)
441 {
442 	if (op->cmd.buswidth != mode->cmd_buswidth)
443 		return false;
444 
445 	if (op->addr.nbytes && op->addr.buswidth != mode->addr_buswidth)
446 		return false;
447 
448 	if (op->data.nbytes && op->data.buswidth != mode->data_buswidth)
449 		return false;
450 
451 	return true;
452 }
453 
atmel_qspi_find_mode(const struct spi_mem_op * op)454 static int atmel_qspi_find_mode(const struct spi_mem_op *op)
455 {
456 	u32 i;
457 
458 	for (i = 0; i < ARRAY_SIZE(atmel_qspi_modes); i++)
459 		if (atmel_qspi_is_compatible(op, &atmel_qspi_modes[i]))
460 			return i;
461 
462 	return -EOPNOTSUPP;
463 }
464 
atmel_qspi_sama7g5_find_mode(const struct spi_mem_op * op)465 static int atmel_qspi_sama7g5_find_mode(const struct spi_mem_op *op)
466 {
467 	u32 i;
468 
469 	for (i = 0; i < ARRAY_SIZE(atmel_qspi_sama7g5_modes); i++)
470 		if (atmel_qspi_is_compatible(op, &atmel_qspi_sama7g5_modes[i]))
471 			return i;
472 
473 	return -EOPNOTSUPP;
474 }
475 
atmel_qspi_supports_op(struct spi_mem * mem,const struct spi_mem_op * op)476 static bool atmel_qspi_supports_op(struct spi_mem *mem,
477 				   const struct spi_mem_op *op)
478 {
479 	struct atmel_qspi *aq = spi_controller_get_devdata(mem->spi->controller);
480 	if (!spi_mem_default_supports_op(mem, op))
481 		return false;
482 
483 	if (aq->caps->octal) {
484 		if (atmel_qspi_sama7g5_find_mode(op) < 0)
485 			return false;
486 		else
487 			return true;
488 	}
489 
490 	if (atmel_qspi_find_mode(op) < 0)
491 		return false;
492 
493 	/* special case not supported by hardware */
494 	if (op->addr.nbytes == 2 && op->cmd.buswidth != op->addr.buswidth &&
495 	    op->dummy.nbytes == 0)
496 		return false;
497 
498 	return true;
499 }
500 
501 /*
502  * If the QSPI controller is set in regular SPI mode, set it in
503  * Serial Memory Mode (SMM).
504  */
atmel_qspi_set_serial_memory_mode(struct atmel_qspi * aq)505 static int atmel_qspi_set_serial_memory_mode(struct atmel_qspi *aq)
506 {
507 	int ret = 0;
508 
509 	if (!(aq->mr & QSPI_MR_SMM)) {
510 		aq->mr |= QSPI_MR_SMM;
511 		atmel_qspi_write(aq->mr, aq, QSPI_MR);
512 
513 		if (aq->caps->has_gclk)
514 			ret = atmel_qspi_update_config(aq);
515 	}
516 
517 	return ret;
518 }
519 
atmel_qspi_set_cfg(struct atmel_qspi * aq,const struct spi_mem_op * op,u32 * offset)520 static int atmel_qspi_set_cfg(struct atmel_qspi *aq,
521 			      const struct spi_mem_op *op, u32 *offset)
522 {
523 	u32 iar, icr, ifr;
524 	u32 dummy_cycles = 0;
525 	int mode;
526 
527 	iar = 0;
528 	icr = QSPI_ICR_INST(op->cmd.opcode);
529 	ifr = QSPI_IFR_INSTEN;
530 
531 	mode = atmel_qspi_find_mode(op);
532 	if (mode < 0)
533 		return mode;
534 	ifr |= atmel_qspi_modes[mode].config;
535 
536 	if (op->dummy.nbytes)
537 		dummy_cycles = op->dummy.nbytes * 8 / op->dummy.buswidth;
538 
539 	/*
540 	 * The controller allows 24 and 32-bit addressing while NAND-flash
541 	 * requires 16-bit long. Handling 8-bit long addresses is done using
542 	 * the option field. For the 16-bit addresses, the workaround depends
543 	 * of the number of requested dummy bits. If there are 8 or more dummy
544 	 * cycles, the address is shifted and sent with the first dummy byte.
545 	 * Otherwise opcode is disabled and the first byte of the address
546 	 * contains the command opcode (works only if the opcode and address
547 	 * use the same buswidth). The limitation is when the 16-bit address is
548 	 * used without enough dummy cycles and the opcode is using a different
549 	 * buswidth than the address.
550 	 */
551 	if (op->addr.buswidth) {
552 		switch (op->addr.nbytes) {
553 		case 0:
554 			break;
555 		case 1:
556 			ifr |= QSPI_IFR_OPTEN | QSPI_IFR_OPTL_8BIT;
557 			icr |= QSPI_ICR_OPT(op->addr.val & 0xff);
558 			break;
559 		case 2:
560 			if (dummy_cycles < 8 / op->addr.buswidth) {
561 				ifr &= ~QSPI_IFR_INSTEN;
562 				ifr |= QSPI_IFR_ADDREN;
563 				iar = (op->cmd.opcode << 16) |
564 					(op->addr.val & 0xffff);
565 			} else {
566 				ifr |= QSPI_IFR_ADDREN;
567 				iar = (op->addr.val << 8) & 0xffffff;
568 				dummy_cycles -= 8 / op->addr.buswidth;
569 			}
570 			break;
571 		case 3:
572 			ifr |= QSPI_IFR_ADDREN;
573 			iar = op->addr.val & 0xffffff;
574 			break;
575 		case 4:
576 			ifr |= QSPI_IFR_ADDREN | QSPI_IFR_ADDRL;
577 			iar = op->addr.val & 0x7ffffff;
578 			break;
579 		default:
580 			return -ENOTSUPP;
581 		}
582 	}
583 
584 	/* offset of the data access in the QSPI memory space */
585 	*offset = iar;
586 
587 	/* Set number of dummy cycles */
588 	if (dummy_cycles)
589 		ifr |= QSPI_IFR_NBDUM(dummy_cycles);
590 
591 	/* Set data enable and data transfer type. */
592 	if (op->data.nbytes) {
593 		ifr |= QSPI_IFR_DATAEN;
594 
595 		if (op->addr.nbytes)
596 			ifr |= QSPI_IFR_TFRTYP_MEM;
597 	}
598 
599 	mode = atmel_qspi_set_serial_memory_mode(aq);
600 	if (mode < 0)
601 		return mode;
602 
603 	/* Clear pending interrupts */
604 	(void)atmel_qspi_read(aq, QSPI_SR);
605 
606 	/* Set QSPI Instruction Frame registers. */
607 	if (op->addr.nbytes && !op->data.nbytes)
608 		atmel_qspi_write(iar, aq, QSPI_IAR);
609 
610 	if (aq->caps->has_ricr) {
611 		if (op->data.dir == SPI_MEM_DATA_IN)
612 			atmel_qspi_write(icr, aq, QSPI_RICR);
613 		else
614 			atmel_qspi_write(icr, aq, QSPI_WICR);
615 	} else {
616 		if (op->data.nbytes && op->data.dir == SPI_MEM_DATA_OUT)
617 			ifr |= QSPI_IFR_SAMA5D2_WRITE_TRSFR;
618 
619 		atmel_qspi_write(icr, aq, QSPI_ICR);
620 	}
621 
622 	atmel_qspi_write(ifr, aq, QSPI_IFR);
623 
624 	return 0;
625 }
626 
atmel_qspi_wait_for_completion(struct atmel_qspi * aq,u32 irq_mask)627 static int atmel_qspi_wait_for_completion(struct atmel_qspi *aq, u32 irq_mask)
628 {
629 	int err = 0;
630 	u32 sr;
631 
632 	/* Poll INSTRuction End status */
633 	sr = atmel_qspi_read(aq, QSPI_SR);
634 	if ((sr & irq_mask) == irq_mask)
635 		return 0;
636 
637 	/* Wait for INSTRuction End interrupt */
638 	reinit_completion(&aq->cmd_completion);
639 	aq->pending = sr & irq_mask;
640 	aq->irq_mask = irq_mask;
641 	atmel_qspi_write(irq_mask, aq, QSPI_IER);
642 	if (!wait_for_completion_timeout(&aq->cmd_completion,
643 					 msecs_to_jiffies(ATMEL_QSPI_TIMEOUT)))
644 		err = -ETIMEDOUT;
645 	atmel_qspi_write(irq_mask, aq, QSPI_IDR);
646 
647 	return err;
648 }
649 
atmel_qspi_transfer(struct spi_mem * mem,const struct spi_mem_op * op,u32 offset)650 static int atmel_qspi_transfer(struct spi_mem *mem,
651 			       const struct spi_mem_op *op, u32 offset)
652 {
653 	struct atmel_qspi *aq = spi_controller_get_devdata(mem->spi->controller);
654 
655 	/* Skip to the final steps if there is no data */
656 	if (!op->data.nbytes)
657 		return atmel_qspi_wait_for_completion(aq,
658 						      QSPI_SR_CMD_COMPLETED);
659 
660 	/* Dummy read of QSPI_IFR to synchronize APB and AHB accesses */
661 	(void)atmel_qspi_read(aq, QSPI_IFR);
662 
663 	/* Send/Receive data */
664 	if (op->data.dir == SPI_MEM_DATA_IN) {
665 		memcpy_fromio(op->data.buf.in, aq->mem + offset,
666 			      op->data.nbytes);
667 
668 		/* Synchronize AHB and APB accesses again */
669 		rmb();
670 	} else {
671 		memcpy_toio(aq->mem + offset, op->data.buf.out,
672 			    op->data.nbytes);
673 
674 		/* Synchronize AHB and APB accesses again */
675 		wmb();
676 	}
677 
678 	/* Release the chip-select */
679 	atmel_qspi_write(QSPI_CR_LASTXFER, aq, QSPI_CR);
680 
681 	return atmel_qspi_wait_for_completion(aq, QSPI_SR_CMD_COMPLETED);
682 }
683 
atmel_qspi_sama7g5_set_cfg(struct atmel_qspi * aq,const struct spi_mem_op * op,u32 * offset)684 static int atmel_qspi_sama7g5_set_cfg(struct atmel_qspi *aq,
685 				      const struct spi_mem_op *op, u32 *offset)
686 {
687 	u32 iar, icr, ifr;
688 	int mode, ret;
689 
690 	iar = 0;
691 	icr = FIELD_PREP(QSPI_ICR_INST_MASK_SAMA7G5, op->cmd.opcode);
692 	ifr = QSPI_IFR_INSTEN;
693 
694 	mode = atmel_qspi_sama7g5_find_mode(op);
695 	if (mode < 0)
696 		return mode;
697 	ifr |= atmel_qspi_sama7g5_modes[mode].config;
698 
699 	if (op->dummy.buswidth && op->dummy.nbytes) {
700 		if (op->addr.dtr && op->dummy.dtr && op->data.dtr)
701 			ifr |= QSPI_IFR_NBDUM(op->dummy.nbytes * 8 /
702 					      (2 * op->dummy.buswidth));
703 		else
704 			ifr |= QSPI_IFR_NBDUM(op->dummy.nbytes * 8 /
705 					      op->dummy.buswidth);
706 	}
707 
708 	if (op->addr.buswidth && op->addr.nbytes) {
709 		ifr |= FIELD_PREP(QSPI_IFR_ADDRL_SAMA7G5, op->addr.nbytes - 1) |
710 		       QSPI_IFR_ADDREN;
711 		iar = FIELD_PREP(QSPI_IAR_ADDR, op->addr.val);
712 	}
713 
714 	if (op->addr.dtr && op->dummy.dtr && op->data.dtr) {
715 		ifr |= QSPI_IFR_DDREN;
716 		if (op->cmd.dtr)
717 			ifr |= QSPI_IFR_DDRCMDEN;
718 
719 		ifr |= QSPI_IFR_DQSEN;
720 	}
721 
722 	if (op->cmd.buswidth == 8 || op->addr.buswidth == 8 ||
723 	    op->data.buswidth == 8)
724 		ifr |= FIELD_PREP(QSPI_IFR_PROTTYP, QSPI_IFR_PROTTYP_OCTAFLASH);
725 
726 	/* offset of the data access in the QSPI memory space */
727 	*offset = iar;
728 
729 	/* Set data enable */
730 	if (op->data.nbytes) {
731 		ifr |= QSPI_IFR_DATAEN;
732 
733 		if (op->addr.nbytes)
734 			ifr |= QSPI_IFR_TFRTYP_MEM;
735 	}
736 
737 	ret = atmel_qspi_set_serial_memory_mode(aq);
738 	if (ret < 0)
739 		return ret;
740 
741 	/* Clear pending interrupts */
742 	(void)atmel_qspi_read(aq, QSPI_SR);
743 
744 	/* Set QSPI Instruction Frame registers */
745 	if (op->addr.nbytes && !op->data.nbytes)
746 		atmel_qspi_write(iar, aq, QSPI_IAR);
747 
748 	if (op->data.dir == SPI_MEM_DATA_IN) {
749 		atmel_qspi_write(icr, aq, QSPI_RICR);
750 	} else {
751 		atmel_qspi_write(icr, aq, QSPI_WICR);
752 		if (op->data.nbytes)
753 			atmel_qspi_write(FIELD_PREP(QSPI_WRACNT_NBWRA,
754 						    op->data.nbytes),
755 					 aq, QSPI_WRACNT);
756 	}
757 
758 	atmel_qspi_write(ifr, aq, QSPI_IFR);
759 
760 	return atmel_qspi_update_config(aq);
761 }
762 
atmel_qspi_dma_callback(void * param)763 static void atmel_qspi_dma_callback(void *param)
764 {
765 	struct atmel_qspi *aq = param;
766 
767 	complete(&aq->dma_completion);
768 }
769 
atmel_qspi_dma_xfer(struct atmel_qspi * aq,struct dma_chan * chan,dma_addr_t dma_dst,dma_addr_t dma_src,unsigned int len)770 static int atmel_qspi_dma_xfer(struct atmel_qspi *aq, struct dma_chan *chan,
771 			       dma_addr_t dma_dst, dma_addr_t dma_src,
772 			       unsigned int len)
773 {
774 	struct dma_async_tx_descriptor *tx;
775 	dma_cookie_t cookie;
776 	int ret;
777 
778 	tx = dmaengine_prep_dma_memcpy(chan, dma_dst, dma_src, len,
779 				       DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
780 	if (!tx) {
781 		dev_err(&aq->pdev->dev, "device_prep_dma_memcpy error\n");
782 		return -EIO;
783 	}
784 
785 	reinit_completion(&aq->dma_completion);
786 	tx->callback = atmel_qspi_dma_callback;
787 	tx->callback_param = aq;
788 	cookie = tx->tx_submit(tx);
789 	ret = dma_submit_error(cookie);
790 	if (ret) {
791 		dev_err(&aq->pdev->dev, "dma_submit_error %d\n", cookie);
792 		return ret;
793 	}
794 
795 	dma_async_issue_pending(chan);
796 	ret = wait_for_completion_timeout(&aq->dma_completion,
797 					  msecs_to_jiffies(20 * ATMEL_QSPI_TIMEOUT));
798 	if (ret == 0) {
799 		dmaengine_terminate_sync(chan);
800 		dev_err(&aq->pdev->dev, "DMA wait_for_completion_timeout\n");
801 		return -ETIMEDOUT;
802 	}
803 
804 	return 0;
805 }
806 
atmel_qspi_dma_rx_xfer(struct spi_mem * mem,const struct spi_mem_op * op,struct sg_table * sgt,loff_t loff)807 static int atmel_qspi_dma_rx_xfer(struct spi_mem *mem,
808 				  const struct spi_mem_op *op,
809 				  struct sg_table *sgt, loff_t loff)
810 {
811 	struct atmel_qspi *aq =
812 		spi_controller_get_devdata(mem->spi->controller);
813 	struct scatterlist *sg;
814 	dma_addr_t dma_src;
815 	unsigned int i, len;
816 	int ret;
817 
818 	dma_src = aq->mmap_phys_base + loff;
819 
820 	for_each_sg(sgt->sgl, sg, sgt->nents, i) {
821 		len = sg_dma_len(sg);
822 		ret = atmel_qspi_dma_xfer(aq, aq->rx_chan, sg_dma_address(sg),
823 					  dma_src, len);
824 		if (ret)
825 			return ret;
826 		dma_src += len;
827 	}
828 
829 	return 0;
830 }
831 
atmel_qspi_dma_tx_xfer(struct spi_mem * mem,const struct spi_mem_op * op,struct sg_table * sgt,loff_t loff)832 static int atmel_qspi_dma_tx_xfer(struct spi_mem *mem,
833 				  const struct spi_mem_op *op,
834 				  struct sg_table *sgt, loff_t loff)
835 {
836 	struct atmel_qspi *aq =
837 		spi_controller_get_devdata(mem->spi->controller);
838 	struct scatterlist *sg;
839 	dma_addr_t dma_dst;
840 	unsigned int i, len;
841 	int ret;
842 
843 	dma_dst = aq->mmap_phys_base + loff;
844 
845 	for_each_sg(sgt->sgl, sg, sgt->nents, i) {
846 		len = sg_dma_len(sg);
847 		ret = atmel_qspi_dma_xfer(aq, aq->tx_chan, dma_dst,
848 					  sg_dma_address(sg), len);
849 		if (ret)
850 			return ret;
851 		dma_dst += len;
852 	}
853 
854 	return 0;
855 }
856 
atmel_qspi_dma_transfer(struct spi_mem * mem,const struct spi_mem_op * op,loff_t loff)857 static int atmel_qspi_dma_transfer(struct spi_mem *mem,
858 				   const struct spi_mem_op *op, loff_t loff)
859 {
860 	struct sg_table sgt;
861 	int ret;
862 
863 	ret = spi_controller_dma_map_mem_op_data(mem->spi->controller, op,
864 						 &sgt);
865 	if (ret)
866 		return ret;
867 
868 	if (op->data.dir == SPI_MEM_DATA_IN)
869 		ret = atmel_qspi_dma_rx_xfer(mem, op, &sgt, loff);
870 	else
871 		ret = atmel_qspi_dma_tx_xfer(mem, op, &sgt, loff);
872 
873 	spi_controller_dma_unmap_mem_op_data(mem->spi->controller, op, &sgt);
874 
875 	return ret;
876 }
877 
atmel_qspi_sama7g5_transfer(struct spi_mem * mem,const struct spi_mem_op * op,u32 offset)878 static int atmel_qspi_sama7g5_transfer(struct spi_mem *mem,
879 				       const struct spi_mem_op *op, u32 offset)
880 {
881 	struct atmel_qspi *aq =
882 		spi_controller_get_devdata(mem->spi->controller);
883 	u32 val;
884 	int ret;
885 
886 	if (!op->data.nbytes) {
887 		/* Start the transfer. */
888 		ret = atmel_qspi_reg_sync(aq);
889 		if (ret)
890 			return ret;
891 		atmel_qspi_write(QSPI_CR_STTFR, aq, QSPI_CR);
892 
893 		return atmel_qspi_wait_for_completion(aq, QSPI_SR_CSRA);
894 	}
895 
896 	/* Send/Receive data. */
897 	if (op->data.dir == SPI_MEM_DATA_IN) {
898 		if (aq->rx_chan && op->addr.nbytes &&
899 		    op->data.nbytes > ATMEL_QSPI_DMA_MIN_BYTES) {
900 			ret = atmel_qspi_dma_transfer(mem, op, offset);
901 			if (ret)
902 				return ret;
903 		} else {
904 			memcpy_fromio(op->data.buf.in, aq->mem + offset,
905 				      op->data.nbytes);
906 		}
907 
908 		if (op->addr.nbytes) {
909 			ret = readl_poll_timeout(aq->regs + QSPI_SR2, val,
910 						 !(val & QSPI_SR2_RBUSY), 40,
911 						 ATMEL_QSPI_SYNC_TIMEOUT);
912 			if (ret)
913 				return ret;
914 		}
915 	} else {
916 		if (aq->tx_chan && op->addr.nbytes &&
917 		    op->data.nbytes > ATMEL_QSPI_DMA_MIN_BYTES) {
918 			ret = atmel_qspi_dma_transfer(mem, op, offset);
919 			if (ret)
920 				return ret;
921 		} else {
922 			memcpy_toio(aq->mem + offset, op->data.buf.out,
923 				    op->data.nbytes);
924 		}
925 
926 		ret = atmel_qspi_wait_for_completion(aq, QSPI_SR_LWRA);
927 		if (ret)
928 			return ret;
929 	}
930 
931 	/* Release the chip-select. */
932 	ret = atmel_qspi_reg_sync(aq);
933 	if (ret)
934 		return ret;
935 	atmel_qspi_write(QSPI_CR_LASTXFER, aq, QSPI_CR);
936 
937 	return atmel_qspi_wait_for_completion(aq, QSPI_SR_CSRA);
938 }
939 
atmel_qspi_exec_op(struct spi_mem * mem,const struct spi_mem_op * op)940 static int atmel_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
941 {
942 	struct atmel_qspi *aq = spi_controller_get_devdata(mem->spi->controller);
943 	u32 offset;
944 	int err;
945 
946 	/*
947 	 * Check if the address exceeds the MMIO window size. An improvement
948 	 * would be to add support for regular SPI mode and fall back to it
949 	 * when the flash memories overrun the controller's memory space.
950 	 */
951 	if (op->addr.val + op->data.nbytes > aq->mmap_size)
952 		return -EOPNOTSUPP;
953 
954 	if (op->addr.nbytes > 4)
955 		return -EOPNOTSUPP;
956 
957 	err = pm_runtime_resume_and_get(&aq->pdev->dev);
958 	if (err < 0)
959 		return err;
960 
961 	err = aq->ops->set_cfg(aq, op, &offset);
962 	if (err)
963 		goto pm_runtime_put;
964 
965 	err = aq->ops->transfer(mem, op, offset);
966 
967 pm_runtime_put:
968 	pm_runtime_mark_last_busy(&aq->pdev->dev);
969 	pm_runtime_put_autosuspend(&aq->pdev->dev);
970 	return err;
971 }
972 
atmel_qspi_get_name(struct spi_mem * spimem)973 static const char *atmel_qspi_get_name(struct spi_mem *spimem)
974 {
975 	return dev_name(spimem->spi->dev.parent);
976 }
977 
978 static const struct spi_controller_mem_ops atmel_qspi_mem_ops = {
979 	.supports_op = atmel_qspi_supports_op,
980 	.exec_op = atmel_qspi_exec_op,
981 	.get_name = atmel_qspi_get_name
982 };
983 
atmel_qspi_set_pad_calibration(struct atmel_qspi * aq)984 static int atmel_qspi_set_pad_calibration(struct atmel_qspi *aq)
985 {
986 	unsigned long pclk_rate;
987 	u32 status, val;
988 	int i, ret;
989 	u8 pclk_div = 0;
990 
991 	pclk_rate = clk_get_rate(aq->pclk);
992 	if (!pclk_rate)
993 		return -EINVAL;
994 
995 	for (i = 0; i < ATMEL_QSPI_PCAL_ARRAY_SIZE; i++) {
996 		if (pclk_rate <= pcal[i].pclk_rate) {
997 			pclk_div = pcal[i].pclk_div;
998 			break;
999 		}
1000 	}
1001 
1002 	/*
1003 	 * Use the biggest divider in case the peripheral clock exceeds
1004 	 * 200MHZ.
1005 	 */
1006 	if (pclk_rate > pcal[ATMEL_QSPI_PCAL_ARRAY_SIZE - 1].pclk_rate)
1007 		pclk_div = pcal[ATMEL_QSPI_PCAL_ARRAY_SIZE - 1].pclk_div;
1008 
1009 	/* Disable QSPI while configuring the pad calibration. */
1010 	status = atmel_qspi_read(aq, QSPI_SR2);
1011 	if (status & QSPI_SR2_QSPIENS) {
1012 		ret = atmel_qspi_reg_sync(aq);
1013 		if (ret)
1014 			return ret;
1015 		atmel_qspi_write(QSPI_CR_QSPIDIS, aq, QSPI_CR);
1016 	}
1017 
1018 	/*
1019 	 * The analog circuitry is not shut down at the end of the calibration
1020 	 * and the start-up time is only required for the first calibration
1021 	 * sequence, thus increasing performance. Set the delay between the Pad
1022 	 * calibration analog circuitry and the calibration request to 2us.
1023 	 */
1024 	atmel_qspi_write(QSPI_PCALCFG_AAON |
1025 			 FIELD_PREP(QSPI_PCALCFG_CLKDIV, pclk_div) |
1026 			 FIELD_PREP(QSPI_PCALCFG_CALCNT,
1027 				    2 * (pclk_rate / 1000000)),
1028 			 aq, QSPI_PCALCFG);
1029 
1030 	/* DLL On + start calibration. */
1031 	atmel_qspi_write(QSPI_CR_DLLON | QSPI_CR_STPCAL, aq, QSPI_CR);
1032 
1033 	/* Check synchronization status before updating configuration. */
1034 	ret =  readl_poll_timeout(aq->regs + QSPI_SR2, val,
1035 				  (val & QSPI_SR2_DLOCK) &&
1036 				  !(val & QSPI_SR2_CALBSY), 40,
1037 				  ATMEL_QSPI_TIMEOUT);
1038 
1039 	/* Refresh analogic blocks every 1 ms.*/
1040 	atmel_qspi_write(FIELD_PREP(QSPI_REFRESH_DELAY_COUNTER,
1041 				    aq->target_max_speed_hz / 1000),
1042 			 aq, QSPI_REFRESH);
1043 
1044 	return ret;
1045 }
1046 
atmel_qspi_set_gclk(struct atmel_qspi * aq)1047 static int atmel_qspi_set_gclk(struct atmel_qspi *aq)
1048 {
1049 	u32 status, val;
1050 	int ret;
1051 
1052 	/* Disable DLL before setting GCLK */
1053 	status = atmel_qspi_read(aq, QSPI_SR2);
1054 	if (status & QSPI_SR2_DLOCK) {
1055 		atmel_qspi_write(QSPI_CR_DLLOFF, aq, QSPI_CR);
1056 
1057 		ret = readl_poll_timeout(aq->regs + QSPI_SR2, val,
1058 					 !(val & QSPI_SR2_DLOCK), 40,
1059 					 ATMEL_QSPI_TIMEOUT);
1060 		if (ret)
1061 			return ret;
1062 	}
1063 
1064 	if (aq->target_max_speed_hz > QSPI_DLLCFG_THRESHOLD_FREQ)
1065 		atmel_qspi_write(QSPI_DLLCFG_RANGE, aq, QSPI_DLLCFG);
1066 	else
1067 		atmel_qspi_write(0, aq, QSPI_DLLCFG);
1068 
1069 	ret = clk_set_rate(aq->gclk, aq->target_max_speed_hz);
1070 	if (ret) {
1071 		dev_err(&aq->pdev->dev, "Failed to set generic clock rate.\n");
1072 		return ret;
1073 	}
1074 
1075 	/* Enable the QSPI generic clock */
1076 	ret = clk_prepare_enable(aq->gclk);
1077 	if (ret)
1078 		dev_err(&aq->pdev->dev, "Failed to enable generic clock.\n");
1079 
1080 	return ret;
1081 }
1082 
atmel_qspi_sama7g5_init(struct atmel_qspi * aq)1083 static int atmel_qspi_sama7g5_init(struct atmel_qspi *aq)
1084 {
1085 	u32 val;
1086 	int ret;
1087 
1088 	ret = atmel_qspi_set_gclk(aq);
1089 	if (ret)
1090 		return ret;
1091 
1092 	if (aq->caps->octal) {
1093 		ret = atmel_qspi_set_pad_calibration(aq);
1094 		if (ret)
1095 			return ret;
1096 	} else {
1097 		atmel_qspi_write(QSPI_CR_DLLON, aq, QSPI_CR);
1098 		ret =  readl_poll_timeout(aq->regs + QSPI_SR2, val,
1099 					  (val & QSPI_SR2_DLOCK), 40,
1100 					  ATMEL_QSPI_TIMEOUT);
1101 	}
1102 
1103 	/* Set the QSPI controller by default in Serial Memory Mode */
1104 	aq->mr |= QSPI_MR_DQSDLYEN;
1105 	ret = atmel_qspi_set_serial_memory_mode(aq);
1106 	if (ret < 0)
1107 		return ret;
1108 
1109 	/* Enable the QSPI controller. */
1110 	atmel_qspi_write(QSPI_CR_QSPIEN, aq, QSPI_CR);
1111 	ret = readl_poll_timeout(aq->regs + QSPI_SR2, val,
1112 				 val & QSPI_SR2_QSPIENS, 40,
1113 				 ATMEL_QSPI_SYNC_TIMEOUT);
1114 	if (ret)
1115 		return ret;
1116 
1117 	if (aq->caps->octal) {
1118 		ret = readl_poll_timeout(aq->regs + QSPI_SR, val,
1119 					 val & QSPI_SR_RFRSHD, 40,
1120 					 ATMEL_QSPI_TIMEOUT);
1121 	}
1122 
1123 	atmel_qspi_write(QSPI_TOUT_TCNTM, aq, QSPI_TOUT);
1124 	return ret;
1125 }
1126 
atmel_qspi_sama7g5_setup(struct spi_device * spi)1127 static int atmel_qspi_sama7g5_setup(struct spi_device *spi)
1128 {
1129 	struct atmel_qspi *aq = spi_controller_get_devdata(spi->controller);
1130 
1131 	/* The controller can communicate with a single peripheral device (target). */
1132 	aq->target_max_speed_hz = spi->max_speed_hz;
1133 
1134 	return atmel_qspi_sama7g5_init(aq);
1135 }
1136 
atmel_qspi_setup(struct spi_device * spi)1137 static int atmel_qspi_setup(struct spi_device *spi)
1138 {
1139 	struct spi_controller *ctrl = spi->controller;
1140 	struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
1141 	unsigned long src_rate;
1142 	u32 scbr;
1143 	int ret;
1144 
1145 	if (ctrl->busy)
1146 		return -EBUSY;
1147 
1148 	if (!spi->max_speed_hz)
1149 		return -EINVAL;
1150 
1151 	if (aq->caps->has_gclk)
1152 		return atmel_qspi_sama7g5_setup(spi);
1153 
1154 	src_rate = clk_get_rate(aq->pclk);
1155 	if (!src_rate)
1156 		return -EINVAL;
1157 
1158 	/* Compute the QSPI baudrate */
1159 	scbr = DIV_ROUND_UP(src_rate, spi->max_speed_hz);
1160 	if (scbr > 0)
1161 		scbr--;
1162 
1163 	ret = pm_runtime_resume_and_get(ctrl->dev.parent);
1164 	if (ret < 0)
1165 		return ret;
1166 
1167 	aq->scr &= ~QSPI_SCR_SCBR_MASK;
1168 	aq->scr |= QSPI_SCR_SCBR(scbr);
1169 	atmel_qspi_write(aq->scr, aq, QSPI_SCR);
1170 
1171 	pm_runtime_mark_last_busy(ctrl->dev.parent);
1172 	pm_runtime_put_autosuspend(ctrl->dev.parent);
1173 
1174 	return 0;
1175 }
1176 
atmel_qspi_set_cs_timing(struct spi_device * spi)1177 static int atmel_qspi_set_cs_timing(struct spi_device *spi)
1178 {
1179 	struct spi_controller *ctrl = spi->controller;
1180 	struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
1181 	unsigned long clk_rate;
1182 	u32 cs_inactive;
1183 	u32 cs_setup;
1184 	u32 cs_hold;
1185 	int delay;
1186 	int ret;
1187 
1188 	clk_rate = clk_get_rate(aq->pclk);
1189 	if (!clk_rate)
1190 		return -EINVAL;
1191 
1192 	/* hold */
1193 	delay = spi_delay_to_ns(&spi->cs_hold, NULL);
1194 	if (aq->mr & QSPI_MR_SMM) {
1195 		if (delay > 0)
1196 			dev_warn(&aq->pdev->dev,
1197 				 "Ignoring cs_hold, must be 0 in Serial Memory Mode.\n");
1198 		cs_hold = 0;
1199 	} else {
1200 		delay = spi_delay_to_ns(&spi->cs_hold, NULL);
1201 		if (delay < 0)
1202 			return delay;
1203 
1204 		cs_hold = DIV_ROUND_UP((delay * DIV_ROUND_UP(clk_rate, 1000000)), 32000);
1205 	}
1206 
1207 	/* setup */
1208 	delay = spi_delay_to_ns(&spi->cs_setup, NULL);
1209 	if (delay < 0)
1210 		return delay;
1211 
1212 	cs_setup = DIV_ROUND_UP((delay * DIV_ROUND_UP(clk_rate, 1000000)),
1213 				1000);
1214 
1215 	/* inactive */
1216 	delay = spi_delay_to_ns(&spi->cs_inactive, NULL);
1217 	if (delay < 0)
1218 		return delay;
1219 	cs_inactive = DIV_ROUND_UP((delay * DIV_ROUND_UP(clk_rate, 1000000)), 1000);
1220 
1221 	ret = pm_runtime_resume_and_get(ctrl->dev.parent);
1222 	if (ret < 0)
1223 		return ret;
1224 
1225 	aq->scr &= ~QSPI_SCR_DLYBS_MASK;
1226 	aq->scr |= QSPI_SCR_DLYBS(cs_setup);
1227 	atmel_qspi_write(aq->scr, aq, QSPI_SCR);
1228 
1229 	aq->mr &= ~(QSPI_MR_DLYBCT_MASK | QSPI_MR_DLYCS_MASK);
1230 	aq->mr |= QSPI_MR_DLYBCT(cs_hold) | QSPI_MR_DLYCS(cs_inactive);
1231 	atmel_qspi_write(aq->mr, aq, QSPI_MR);
1232 
1233 	pm_runtime_mark_last_busy(ctrl->dev.parent);
1234 	pm_runtime_put_autosuspend(ctrl->dev.parent);
1235 
1236 	return 0;
1237 }
1238 
atmel_qspi_init(struct atmel_qspi * aq)1239 static int atmel_qspi_init(struct atmel_qspi *aq)
1240 {
1241 	int ret;
1242 
1243 	if (aq->caps->has_gclk) {
1244 		ret = atmel_qspi_reg_sync(aq);
1245 		if (ret)
1246 			return ret;
1247 		atmel_qspi_write(QSPI_CR_SWRST, aq, QSPI_CR);
1248 		return 0;
1249 	}
1250 
1251 	/* Reset the QSPI controller */
1252 	atmel_qspi_write(QSPI_CR_SWRST, aq, QSPI_CR);
1253 
1254 	/* Set the QSPI controller by default in Serial Memory Mode */
1255 	ret = atmel_qspi_set_serial_memory_mode(aq);
1256 	if (ret < 0)
1257 		return ret;
1258 
1259 	/* Enable the QSPI controller */
1260 	atmel_qspi_write(QSPI_CR_QSPIEN, aq, QSPI_CR);
1261 	return 0;
1262 }
1263 
atmel_qspi_interrupt(int irq,void * dev_id)1264 static irqreturn_t atmel_qspi_interrupt(int irq, void *dev_id)
1265 {
1266 	struct atmel_qspi *aq = dev_id;
1267 	u32 status, mask, pending;
1268 
1269 	status = atmel_qspi_read(aq, QSPI_SR);
1270 	mask = atmel_qspi_read(aq, QSPI_IMR);
1271 	pending = status & mask;
1272 
1273 	if (!pending)
1274 		return IRQ_NONE;
1275 
1276 	aq->pending |= pending;
1277 	if ((aq->pending & aq->irq_mask) == aq->irq_mask)
1278 		complete(&aq->cmd_completion);
1279 
1280 	return IRQ_HANDLED;
1281 }
1282 
atmel_qspi_dma_init(struct spi_controller * ctrl)1283 static int atmel_qspi_dma_init(struct spi_controller *ctrl)
1284 {
1285 	struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
1286 	int ret;
1287 
1288 	aq->rx_chan = dma_request_chan(&aq->pdev->dev, "rx");
1289 	if (IS_ERR(aq->rx_chan)) {
1290 		aq->rx_chan = NULL;
1291 		return dev_err_probe(&aq->pdev->dev, PTR_ERR(aq->rx_chan),
1292 				     "RX DMA channel is not available\n");
1293 	}
1294 
1295 	aq->tx_chan = dma_request_chan(&aq->pdev->dev, "tx");
1296 	if (IS_ERR(aq->tx_chan)) {
1297 		ret = dev_err_probe(&aq->pdev->dev, PTR_ERR(aq->tx_chan),
1298 				    "TX DMA channel is not available\n");
1299 		goto release_rx_chan;
1300 	}
1301 
1302 	ctrl->dma_rx = aq->rx_chan;
1303 	ctrl->dma_tx = aq->tx_chan;
1304 	init_completion(&aq->dma_completion);
1305 
1306 	dev_info(&aq->pdev->dev, "Using %s (tx) and %s (rx) for DMA transfers\n",
1307 		 dma_chan_name(aq->tx_chan), dma_chan_name(aq->rx_chan));
1308 
1309 	return 0;
1310 
1311 release_rx_chan:
1312 	dma_release_channel(aq->rx_chan);
1313 	aq->rx_chan = NULL;
1314 	aq->tx_chan = NULL;
1315 	return ret;
1316 }
1317 
atmel_qspi_dma_release(struct atmel_qspi * aq)1318 static void atmel_qspi_dma_release(struct atmel_qspi *aq)
1319 {
1320 	if (aq->rx_chan)
1321 		dma_release_channel(aq->rx_chan);
1322 	if (aq->tx_chan)
1323 		dma_release_channel(aq->tx_chan);
1324 }
1325 
1326 static const struct atmel_qspi_ops atmel_qspi_ops = {
1327 	.set_cfg = atmel_qspi_set_cfg,
1328 	.transfer = atmel_qspi_transfer,
1329 };
1330 
1331 static const struct atmel_qspi_ops atmel_qspi_sama7g5_ops = {
1332 	.set_cfg = atmel_qspi_sama7g5_set_cfg,
1333 	.transfer = atmel_qspi_sama7g5_transfer,
1334 };
1335 
atmel_qspi_probe(struct platform_device * pdev)1336 static int atmel_qspi_probe(struct platform_device *pdev)
1337 {
1338 	struct spi_controller *ctrl;
1339 	struct atmel_qspi *aq;
1340 	struct resource *res;
1341 	int irq, err = 0;
1342 
1343 	ctrl = devm_spi_alloc_host(&pdev->dev, sizeof(*aq));
1344 	if (!ctrl)
1345 		return -ENOMEM;
1346 
1347 	aq = spi_controller_get_devdata(ctrl);
1348 
1349 	aq->caps = of_device_get_match_data(&pdev->dev);
1350 	if (!aq->caps) {
1351 		dev_err(&pdev->dev, "Could not retrieve QSPI caps\n");
1352 		return -EINVAL;
1353 	}
1354 
1355 	init_completion(&aq->cmd_completion);
1356 	aq->pdev = pdev;
1357 
1358 	ctrl->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD | SPI_TX_DUAL | SPI_TX_QUAD;
1359 	if (aq->caps->octal)
1360 		ctrl->mode_bits |= SPI_RX_OCTAL | SPI_TX_OCTAL;
1361 
1362 	if (aq->caps->has_gclk)
1363 		aq->ops = &atmel_qspi_sama7g5_ops;
1364 	else
1365 		aq->ops = &atmel_qspi_ops;
1366 
1367 	ctrl->max_speed_hz = aq->caps->max_speed_hz;
1368 	ctrl->setup = atmel_qspi_setup;
1369 	ctrl->set_cs_timing = atmel_qspi_set_cs_timing;
1370 	ctrl->bus_num = -1;
1371 	ctrl->mem_ops = &atmel_qspi_mem_ops;
1372 	ctrl->num_chipselect = 1;
1373 	ctrl->dev.of_node = pdev->dev.of_node;
1374 	platform_set_drvdata(pdev, ctrl);
1375 
1376 	/* Map the registers */
1377 	aq->regs = devm_platform_ioremap_resource_byname(pdev, "qspi_base");
1378 	if (IS_ERR(aq->regs))
1379 		return dev_err_probe(&pdev->dev, PTR_ERR(aq->regs),
1380 				     "missing registers\n");
1381 
1382 	/* Map the AHB memory */
1383 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi_mmap");
1384 	aq->mem = devm_ioremap_resource(&pdev->dev, res);
1385 	if (IS_ERR(aq->mem))
1386 		return dev_err_probe(&pdev->dev, PTR_ERR(aq->mem),
1387 				     "missing AHB memory\n");
1388 
1389 	aq->mmap_size = resource_size(res);
1390 	aq->mmap_phys_base = (dma_addr_t)res->start;
1391 
1392 	/* Get the peripheral clock */
1393 	aq->pclk = devm_clk_get_enabled(&pdev->dev, "pclk");
1394 	if (IS_ERR(aq->pclk))
1395 		aq->pclk = devm_clk_get_enabled(&pdev->dev, NULL);
1396 
1397 	if (IS_ERR(aq->pclk))
1398 		return dev_err_probe(&pdev->dev, PTR_ERR(aq->pclk),
1399 				     "missing peripheral clock\n");
1400 
1401 	if (aq->caps->has_qspick) {
1402 		/* Get the QSPI system clock */
1403 		aq->qspick = devm_clk_get_enabled(&pdev->dev, "qspick");
1404 		if (IS_ERR(aq->qspick)) {
1405 			dev_err(&pdev->dev, "missing system clock\n");
1406 			err = PTR_ERR(aq->qspick);
1407 			return err;
1408 		}
1409 
1410 	} else if (aq->caps->has_gclk) {
1411 		/* Get the QSPI generic clock */
1412 		aq->gclk = devm_clk_get(&pdev->dev, "gclk");
1413 		if (IS_ERR(aq->gclk)) {
1414 			dev_err(&pdev->dev, "missing Generic clock\n");
1415 			err = PTR_ERR(aq->gclk);
1416 			return err;
1417 		}
1418 	}
1419 
1420 	if (aq->caps->has_dma) {
1421 		err = atmel_qspi_dma_init(ctrl);
1422 		if (err == -EPROBE_DEFER)
1423 			return err;
1424 	}
1425 
1426 	/* Request the IRQ */
1427 	irq = platform_get_irq(pdev, 0);
1428 	if (irq < 0) {
1429 		err = irq;
1430 		goto dma_release;
1431 	}
1432 	err = devm_request_irq(&pdev->dev, irq, atmel_qspi_interrupt,
1433 			       0, dev_name(&pdev->dev), aq);
1434 	if (err)
1435 		goto dma_release;
1436 
1437 	pm_runtime_set_autosuspend_delay(&pdev->dev, 500);
1438 	pm_runtime_use_autosuspend(&pdev->dev);
1439 	pm_runtime_set_active(&pdev->dev);
1440 	pm_runtime_enable(&pdev->dev);
1441 	pm_runtime_get_noresume(&pdev->dev);
1442 
1443 	err = atmel_qspi_init(aq);
1444 	if (err)
1445 		goto dma_release;
1446 
1447 	err = spi_register_controller(ctrl);
1448 	if (err) {
1449 		pm_runtime_put_noidle(&pdev->dev);
1450 		pm_runtime_disable(&pdev->dev);
1451 		pm_runtime_set_suspended(&pdev->dev);
1452 		pm_runtime_dont_use_autosuspend(&pdev->dev);
1453 		goto dma_release;
1454 	}
1455 	pm_runtime_mark_last_busy(&pdev->dev);
1456 	pm_runtime_put_autosuspend(&pdev->dev);
1457 
1458 	return 0;
1459 
1460 dma_release:
1461 	if (aq->caps->has_dma)
1462 		atmel_qspi_dma_release(aq);
1463 
1464 	return err;
1465 }
1466 
atmel_qspi_sama7g5_suspend(struct atmel_qspi * aq)1467 static int atmel_qspi_sama7g5_suspend(struct atmel_qspi *aq)
1468 {
1469 	int ret;
1470 	u32 val;
1471 
1472 	ret = readl_poll_timeout(aq->regs + QSPI_SR2, val,
1473 				 !(val & QSPI_SR2_RBUSY) &&
1474 				 (val & QSPI_SR2_HIDLE), 40,
1475 				 ATMEL_QSPI_SYNC_TIMEOUT);
1476 	if (ret)
1477 		return ret;
1478 
1479 	atmel_qspi_write(QSPI_CR_QSPIDIS, aq, QSPI_CR);
1480 	ret = readl_poll_timeout(aq->regs + QSPI_SR2, val,
1481 				 !(val & QSPI_SR2_QSPIENS), 40,
1482 				 ATMEL_QSPI_SYNC_TIMEOUT);
1483 	if (ret)
1484 		return ret;
1485 
1486 	clk_disable_unprepare(aq->gclk);
1487 
1488 	atmel_qspi_write(QSPI_CR_DLLOFF, aq, QSPI_CR);
1489 	ret = readl_poll_timeout(aq->regs + QSPI_SR2, val,
1490 				 !(val & QSPI_SR2_DLOCK), 40,
1491 				 ATMEL_QSPI_TIMEOUT);
1492 	if (ret)
1493 		return ret;
1494 
1495 	ret =  readl_poll_timeout(aq->regs + QSPI_SR2, val,
1496 				  !(val & QSPI_SR2_CALBSY), 40,
1497 				  ATMEL_QSPI_TIMEOUT);
1498 	if (ret)
1499 		return ret;
1500 
1501 	return 0;
1502 }
1503 
atmel_qspi_remove(struct platform_device * pdev)1504 static void atmel_qspi_remove(struct platform_device *pdev)
1505 {
1506 	struct spi_controller *ctrl = platform_get_drvdata(pdev);
1507 	struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
1508 	int ret;
1509 
1510 	spi_unregister_controller(ctrl);
1511 
1512 	ret = pm_runtime_get_sync(&pdev->dev);
1513 	if (ret >= 0) {
1514 		if (aq->caps->has_dma)
1515 			atmel_qspi_dma_release(aq);
1516 
1517 		if (aq->caps->has_gclk) {
1518 			ret = atmel_qspi_sama7g5_suspend(aq);
1519 			if (ret)
1520 				dev_warn(&pdev->dev, "Failed to de-init device on remove: %d\n", ret);
1521 			return;
1522 		}
1523 
1524 		atmel_qspi_write(QSPI_CR_QSPIDIS, aq, QSPI_CR);
1525 	} else {
1526 		/*
1527 		 * atmel_qspi_runtime_{suspend,resume} just disable and enable
1528 		 * the two clks respectively. So after resume failed these are
1529 		 * off, and we skip hardware access and disabling these clks again.
1530 		 */
1531 		dev_warn(&pdev->dev, "Failed to resume device on remove\n");
1532 	}
1533 
1534 	pm_runtime_disable(&pdev->dev);
1535 	pm_runtime_dont_use_autosuspend(&pdev->dev);
1536 	pm_runtime_put_noidle(&pdev->dev);
1537 }
1538 
atmel_qspi_suspend(struct device * dev)1539 static int __maybe_unused atmel_qspi_suspend(struct device *dev)
1540 {
1541 	struct spi_controller *ctrl = dev_get_drvdata(dev);
1542 	struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
1543 	int ret;
1544 
1545 	ret = pm_runtime_resume_and_get(dev);
1546 	if (ret < 0)
1547 		return ret;
1548 
1549 	if (aq->caps->has_gclk) {
1550 		ret = atmel_qspi_sama7g5_suspend(aq);
1551 		clk_disable_unprepare(aq->pclk);
1552 		return ret;
1553 	}
1554 
1555 	atmel_qspi_write(QSPI_CR_QSPIDIS, aq, QSPI_CR);
1556 
1557 	pm_runtime_mark_last_busy(dev);
1558 	pm_runtime_force_suspend(dev);
1559 
1560 	clk_unprepare(aq->qspick);
1561 	clk_unprepare(aq->pclk);
1562 
1563 	return 0;
1564 }
1565 
atmel_qspi_resume(struct device * dev)1566 static int __maybe_unused atmel_qspi_resume(struct device *dev)
1567 {
1568 	struct spi_controller *ctrl = dev_get_drvdata(dev);
1569 	struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
1570 	int ret;
1571 
1572 	ret = clk_prepare(aq->pclk);
1573 	if (ret)
1574 		return ret;
1575 
1576 	ret = clk_prepare(aq->qspick);
1577 	if (ret) {
1578 		clk_unprepare(aq->pclk);
1579 		return ret;
1580 	}
1581 
1582 	if (aq->caps->has_gclk)
1583 		return atmel_qspi_sama7g5_init(aq);
1584 
1585 	ret = pm_runtime_force_resume(dev);
1586 	if (ret < 0)
1587 		return ret;
1588 
1589 	atmel_qspi_init(aq);
1590 
1591 	atmel_qspi_write(aq->scr, aq, QSPI_SCR);
1592 
1593 	pm_runtime_mark_last_busy(dev);
1594 	pm_runtime_put_autosuspend(dev);
1595 
1596 	return 0;
1597 }
1598 
atmel_qspi_runtime_suspend(struct device * dev)1599 static int __maybe_unused atmel_qspi_runtime_suspend(struct device *dev)
1600 {
1601 	struct spi_controller *ctrl = dev_get_drvdata(dev);
1602 	struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
1603 
1604 	clk_disable(aq->qspick);
1605 	clk_disable(aq->pclk);
1606 
1607 	return 0;
1608 }
1609 
atmel_qspi_runtime_resume(struct device * dev)1610 static int __maybe_unused atmel_qspi_runtime_resume(struct device *dev)
1611 {
1612 	struct spi_controller *ctrl = dev_get_drvdata(dev);
1613 	struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
1614 	int ret;
1615 
1616 	ret = clk_enable(aq->pclk);
1617 	if (ret)
1618 		return ret;
1619 
1620 	ret = clk_enable(aq->qspick);
1621 	if (ret)
1622 		clk_disable(aq->pclk);
1623 
1624 	return ret;
1625 }
1626 
1627 static const struct dev_pm_ops __maybe_unused atmel_qspi_pm_ops = {
1628 	SET_SYSTEM_SLEEP_PM_OPS(atmel_qspi_suspend, atmel_qspi_resume)
1629 	SET_RUNTIME_PM_OPS(atmel_qspi_runtime_suspend,
1630 			   atmel_qspi_runtime_resume, NULL)
1631 };
1632 
1633 static const struct atmel_qspi_caps atmel_sama5d2_qspi_caps = {};
1634 
1635 static const struct atmel_qspi_caps atmel_sam9x60_qspi_caps = {
1636 	.has_qspick = true,
1637 	.has_ricr = true,
1638 };
1639 
1640 static const struct atmel_qspi_caps atmel_sama7g5_ospi_caps = {
1641 	.max_speed_hz = SAMA7G5_QSPI0_MAX_SPEED_HZ,
1642 	.has_gclk = true,
1643 	.octal = true,
1644 	.has_dma = true,
1645 };
1646 
1647 static const struct atmel_qspi_caps atmel_sama7g5_qspi_caps = {
1648 	.max_speed_hz = SAMA7G5_QSPI1_SDR_MAX_SPEED_HZ,
1649 	.has_gclk = true,
1650 	.has_dma = true,
1651 };
1652 
1653 static const struct of_device_id atmel_qspi_dt_ids[] = {
1654 	{
1655 		.compatible = "atmel,sama5d2-qspi",
1656 		.data = &atmel_sama5d2_qspi_caps,
1657 	},
1658 	{
1659 		.compatible = "microchip,sam9x60-qspi",
1660 		.data = &atmel_sam9x60_qspi_caps,
1661 	},
1662 	{
1663 		.compatible = "microchip,sama7g5-ospi",
1664 		.data = &atmel_sama7g5_ospi_caps,
1665 	},
1666 	{
1667 		.compatible = "microchip,sama7g5-qspi",
1668 		.data = &atmel_sama7g5_qspi_caps,
1669 	},
1670 
1671 	{ /* sentinel */ }
1672 };
1673 
1674 MODULE_DEVICE_TABLE(of, atmel_qspi_dt_ids);
1675 
1676 static struct platform_driver atmel_qspi_driver = {
1677 	.driver = {
1678 		.name	= "atmel_qspi",
1679 		.of_match_table	= atmel_qspi_dt_ids,
1680 		.pm	= pm_ptr(&atmel_qspi_pm_ops),
1681 	},
1682 	.probe		= atmel_qspi_probe,
1683 	.remove		= atmel_qspi_remove,
1684 };
1685 module_platform_driver(atmel_qspi_driver);
1686 
1687 MODULE_AUTHOR("Cyrille Pitchen <cyrille.pitchen@atmel.com>");
1688 MODULE_AUTHOR("Piotr Bugalski <bugalski.piotr@gmail.com");
1689 MODULE_DESCRIPTION("Atmel QSPI Controller driver");
1690 MODULE_LICENSE("GPL v2");
1691