xref: /linux/drivers/spi/atmel-quadspi.c (revision 86f5536004a61a0c797c14a248fc976f03f55cd5)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Driver for Atmel QSPI Controller
4  *
5  * Copyright (C) 2015 Atmel Corporation
6  * Copyright (C) 2018 Cryptera A/S
7  *
8  * Author: Cyrille Pitchen <cyrille.pitchen@atmel.com>
9  * Author: Piotr Bugalski <bugalski.piotr@gmail.com>
10  *
11  * This driver is based on drivers/mtd/spi-nor/fsl-quadspi.c from Freescale.
12  */
13 
14 #include <linux/bitfield.h>
15 #include <linux/clk.h>
16 #include <linux/delay.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/dmaengine.h>
19 #include <linux/err.h>
20 #include <linux/interrupt.h>
21 #include <linux/io.h>
22 #include <linux/iopoll.h>
23 #include <linux/kernel.h>
24 #include <linux/module.h>
25 #include <linux/of.h>
26 #include <linux/of_platform.h>
27 #include <linux/platform_device.h>
28 #include <linux/pm_runtime.h>
29 #include <linux/spi/spi-mem.h>
30 
31 /* QSPI register offsets */
32 #define QSPI_CR      0x0000  /* Control Register */
33 #define QSPI_MR      0x0004  /* Mode Register */
34 #define QSPI_RD      0x0008  /* Receive Data Register */
35 #define QSPI_TD      0x000c  /* Transmit Data Register */
36 #define QSPI_SR      0x0010  /* Status Register */
37 #define QSPI_IER     0x0014  /* Interrupt Enable Register */
38 #define QSPI_IDR     0x0018  /* Interrupt Disable Register */
39 #define QSPI_IMR     0x001c  /* Interrupt Mask Register */
40 #define QSPI_SCR     0x0020  /* Serial Clock Register */
41 #define QSPI_SR2     0x0024  /* SAMA7G5 Status Register */
42 
43 #define QSPI_IAR     0x0030  /* Instruction Address Register */
44 #define QSPI_ICR     0x0034  /* Instruction Code Register */
45 #define QSPI_WICR    0x0034  /* Write Instruction Code Register */
46 #define QSPI_IFR     0x0038  /* Instruction Frame Register */
47 #define QSPI_RICR    0x003C  /* Read Instruction Code Register */
48 
49 #define QSPI_SMR     0x0040  /* Scrambling Mode Register */
50 #define QSPI_SKR     0x0044  /* Scrambling Key Register */
51 
52 #define QSPI_REFRESH	0x0050	/* Refresh Register */
53 #define QSPI_WRACNT	0x0054	/* Write Access Counter Register */
54 #define QSPI_DLLCFG	0x0058	/* DLL Configuration Register */
55 #define QSPI_PCALCFG	0x005C	/* Pad Calibration Configuration Register */
56 #define QSPI_PCALBP	0x0060	/* Pad Calibration Bypass Register */
57 #define QSPI_TOUT	0x0064	/* Timeout Register */
58 
59 #define QSPI_WPMR    0x00E4  /* Write Protection Mode Register */
60 #define QSPI_WPSR    0x00E8  /* Write Protection Status Register */
61 
62 #define QSPI_VERSION 0x00FC  /* Version Register */
63 
64 #define SAMA7G5_QSPI0_MAX_SPEED_HZ	200000000
65 #define SAMA7G5_QSPI1_SDR_MAX_SPEED_HZ	133000000
66 
67 /* Bitfields in QSPI_CR (Control Register) */
68 #define QSPI_CR_QSPIEN                  BIT(0)
69 #define QSPI_CR_QSPIDIS                 BIT(1)
70 #define QSPI_CR_DLLON			BIT(2)
71 #define QSPI_CR_DLLOFF			BIT(3)
72 #define QSPI_CR_STPCAL			BIT(4)
73 #define QSPI_CR_SRFRSH			BIT(5)
74 #define QSPI_CR_SWRST                   BIT(7)
75 #define QSPI_CR_UPDCFG			BIT(8)
76 #define QSPI_CR_STTFR			BIT(9)
77 #define QSPI_CR_RTOUT			BIT(10)
78 #define QSPI_CR_LASTXFER                BIT(24)
79 
80 /* Bitfields in QSPI_MR (Mode Register) */
81 #define QSPI_MR_SMM                     BIT(0)
82 #define QSPI_MR_LLB                     BIT(1)
83 #define QSPI_MR_WDRBT                   BIT(2)
84 #define QSPI_MR_SMRM                    BIT(3)
85 #define QSPI_MR_DQSDLYEN		BIT(3)
86 #define QSPI_MR_CSMODE_MASK             GENMASK(5, 4)
87 #define QSPI_MR_CSMODE_NOT_RELOADED     (0 << 4)
88 #define QSPI_MR_CSMODE_LASTXFER         (1 << 4)
89 #define QSPI_MR_CSMODE_SYSTEMATICALLY   (2 << 4)
90 #define QSPI_MR_NBBITS_MASK             GENMASK(11, 8)
91 #define QSPI_MR_NBBITS(n)               ((((n) - 8) << 8) & QSPI_MR_NBBITS_MASK)
92 #define QSPI_MR_OENSD			BIT(15)
93 #define QSPI_MR_DLYBCT_MASK             GENMASK(23, 16)
94 #define QSPI_MR_DLYBCT(n)               (((n) << 16) & QSPI_MR_DLYBCT_MASK)
95 #define QSPI_MR_DLYCS_MASK              GENMASK(31, 24)
96 #define QSPI_MR_DLYCS(n)                (((n) << 24) & QSPI_MR_DLYCS_MASK)
97 
98 /* Bitfields in QSPI_SR/QSPI_IER/QSPI_IDR/QSPI_IMR  */
99 #define QSPI_SR_RDRF                    BIT(0)
100 #define QSPI_SR_TDRE                    BIT(1)
101 #define QSPI_SR_TXEMPTY                 BIT(2)
102 #define QSPI_SR_OVRES                   BIT(3)
103 #define QSPI_SR_CSR                     BIT(8)
104 #define QSPI_SR_CSS                     BIT(9)
105 #define QSPI_SR_INSTRE                  BIT(10)
106 #define QSPI_SR_LWRA			BIT(11)
107 #define QSPI_SR_QITF			BIT(12)
108 #define QSPI_SR_QITR			BIT(13)
109 #define QSPI_SR_CSFA			BIT(14)
110 #define QSPI_SR_CSRA			BIT(15)
111 #define QSPI_SR_RFRSHD			BIT(16)
112 #define QSPI_SR_TOUT			BIT(17)
113 #define QSPI_SR_QSPIENS                 BIT(24)
114 
115 #define QSPI_SR_CMD_COMPLETED	(QSPI_SR_INSTRE | QSPI_SR_CSR)
116 
117 /* Bitfields in QSPI_SCR (Serial Clock Register) */
118 #define QSPI_SCR_CPOL                   BIT(0)
119 #define QSPI_SCR_CPHA                   BIT(1)
120 #define QSPI_SCR_SCBR_MASK              GENMASK(15, 8)
121 #define QSPI_SCR_SCBR(n)                (((n) << 8) & QSPI_SCR_SCBR_MASK)
122 #define QSPI_SCR_DLYBS_MASK             GENMASK(23, 16)
123 #define QSPI_SCR_DLYBS(n)               (((n) << 16) & QSPI_SCR_DLYBS_MASK)
124 
125 /* Bitfields in QSPI_SR2 (SAMA7G5 Status Register) */
126 #define QSPI_SR2_SYNCBSY		BIT(0)
127 #define QSPI_SR2_QSPIENS		BIT(1)
128 #define QSPI_SR2_CSS			BIT(2)
129 #define QSPI_SR2_RBUSY			BIT(3)
130 #define QSPI_SR2_HIDLE			BIT(4)
131 #define QSPI_SR2_DLOCK			BIT(5)
132 #define QSPI_SR2_CALBSY			BIT(6)
133 
134 /* Bitfields in QSPI_IAR (Instruction Address Register) */
135 #define QSPI_IAR_ADDR			GENMASK(31, 0)
136 
137 /* Bitfields in QSPI_ICR (Read/Write Instruction Code Register) */
138 #define QSPI_ICR_INST_MASK              GENMASK(7, 0)
139 #define QSPI_ICR_INST(inst)             (((inst) << 0) & QSPI_ICR_INST_MASK)
140 #define QSPI_ICR_INST_MASK_SAMA7G5	GENMASK(15, 0)
141 #define QSPI_ICR_OPT_MASK               GENMASK(23, 16)
142 #define QSPI_ICR_OPT(opt)               (((opt) << 16) & QSPI_ICR_OPT_MASK)
143 
144 /* Bitfields in QSPI_IFR (Instruction Frame Register) */
145 #define QSPI_IFR_WIDTH_MASK             GENMASK(2, 0)
146 #define QSPI_IFR_WIDTH_SINGLE_BIT_SPI   (0 << 0)
147 #define QSPI_IFR_WIDTH_DUAL_OUTPUT      (1 << 0)
148 #define QSPI_IFR_WIDTH_QUAD_OUTPUT      (2 << 0)
149 #define QSPI_IFR_WIDTH_DUAL_IO          (3 << 0)
150 #define QSPI_IFR_WIDTH_QUAD_IO          (4 << 0)
151 #define QSPI_IFR_WIDTH_DUAL_CMD         (5 << 0)
152 #define QSPI_IFR_WIDTH_QUAD_CMD         (6 << 0)
153 #define QSPI_IFR_WIDTH_OCT_OUTPUT	(7 << 0)
154 #define QSPI_IFR_WIDTH_OCT_IO		(8 << 0)
155 #define QSPI_IFR_WIDTH_OCT_CMD		(9 << 0)
156 #define QSPI_IFR_INSTEN                 BIT(4)
157 #define QSPI_IFR_ADDREN                 BIT(5)
158 #define QSPI_IFR_OPTEN                  BIT(6)
159 #define QSPI_IFR_DATAEN                 BIT(7)
160 #define QSPI_IFR_OPTL_MASK              GENMASK(9, 8)
161 #define QSPI_IFR_OPTL_1BIT              (0 << 8)
162 #define QSPI_IFR_OPTL_2BIT              (1 << 8)
163 #define QSPI_IFR_OPTL_4BIT              (2 << 8)
164 #define QSPI_IFR_OPTL_8BIT              (3 << 8)
165 #define QSPI_IFR_ADDRL                  BIT(10)
166 #define QSPI_IFR_ADDRL_SAMA7G5		GENMASK(11, 10)
167 #define QSPI_IFR_TFRTYP_MEM		BIT(12)
168 #define QSPI_IFR_SAMA5D2_WRITE_TRSFR	BIT(13)
169 #define QSPI_IFR_CRM                    BIT(14)
170 #define QSPI_IFR_DDREN			BIT(15)
171 #define QSPI_IFR_NBDUM_MASK             GENMASK(20, 16)
172 #define QSPI_IFR_NBDUM(n)               (((n) << 16) & QSPI_IFR_NBDUM_MASK)
173 #define QSPI_IFR_END			BIT(22)
174 #define QSPI_IFR_SMRM			BIT(23)
175 #define QSPI_IFR_APBTFRTYP_READ		BIT(24)	/* Defined in SAM9X60 */
176 #define QSPI_IFR_DQSEN			BIT(25)
177 #define QSPI_IFR_DDRCMDEN		BIT(26)
178 #define QSPI_IFR_HFWBEN			BIT(27)
179 #define QSPI_IFR_PROTTYP		GENMASK(29, 28)
180 #define QSPI_IFR_PROTTYP_STD_SPI	0
181 #define QSPI_IFR_PROTTYP_TWIN_QUAD	1
182 #define QSPI_IFR_PROTTYP_OCTAFLASH	2
183 #define QSPI_IFR_PROTTYP_HYPERFLASH	3
184 
185 /* Bitfields in QSPI_SMR (Scrambling Mode Register) */
186 #define QSPI_SMR_SCREN                  BIT(0)
187 #define QSPI_SMR_RVDIS                  BIT(1)
188 #define QSPI_SMR_SCRKL                  BIT(2)
189 
190 /* Bitfields in QSPI_REFRESH (Refresh Register) */
191 #define QSPI_REFRESH_DELAY_COUNTER	GENMASK(31, 0)
192 
193 /* Bitfields in QSPI_WRACNT (Write Access Counter Register) */
194 #define QSPI_WRACNT_NBWRA		GENMASK(31, 0)
195 
196 /* Bitfields in QSPI_DLLCFG (DLL Configuration Register) */
197 #define QSPI_DLLCFG_RANGE		BIT(0)
198 
199 /* Bitfields in QSPI_PCALCFG (DLL Pad Calibration Configuration Register) */
200 #define QSPI_PCALCFG_AAON		BIT(0)
201 #define QSPI_PCALCFG_DAPCAL		BIT(1)
202 #define QSPI_PCALCFG_DIFFPM		BIT(2)
203 #define QSPI_PCALCFG_CLKDIV		GENMASK(6, 4)
204 #define QSPI_PCALCFG_CALCNT		GENMASK(16, 8)
205 #define QSPI_PCALCFG_CALP		GENMASK(27, 24)
206 #define QSPI_PCALCFG_CALN		GENMASK(31, 28)
207 
208 /* Bitfields in QSPI_PCALBP (DLL Pad Calibration Bypass Register) */
209 #define QSPI_PCALBP_BPEN		BIT(0)
210 #define QSPI_PCALBP_CALPBP		GENMASK(11, 8)
211 #define QSPI_PCALBP_CALNBP		GENMASK(19, 16)
212 
213 /* Bitfields in QSPI_TOUT (Timeout Register) */
214 #define QSPI_TOUT_TCNTM			GENMASK(15, 0)
215 
216 /* Bitfields in QSPI_WPMR (Write Protection Mode Register) */
217 #define QSPI_WPMR_WPEN                  BIT(0)
218 #define QSPI_WPMR_WPITEN		BIT(1)
219 #define QSPI_WPMR_WPCREN		BIT(2)
220 #define QSPI_WPMR_WPKEY_MASK            GENMASK(31, 8)
221 #define QSPI_WPMR_WPKEY(wpkey)          (((wpkey) << 8) & QSPI_WPMR_WPKEY_MASK)
222 
223 /* Bitfields in QSPI_WPSR (Write Protection Status Register) */
224 #define QSPI_WPSR_WPVS                  BIT(0)
225 #define QSPI_WPSR_WPVSRC_MASK           GENMASK(15, 8)
226 #define QSPI_WPSR_WPVSRC(src)           (((src) << 8) & QSPI_WPSR_WPVSRC)
227 
228 #define ATMEL_QSPI_TIMEOUT		1000	/* ms */
229 #define ATMEL_QSPI_SYNC_TIMEOUT		300	/* ms */
230 #define QSPI_DLLCFG_THRESHOLD_FREQ	90000000U
231 #define QSPI_CALIB_TIME			2000	/* 2 us */
232 
233 /* Use PIO for small transfers. */
234 #define ATMEL_QSPI_DMA_MIN_BYTES	16
235 /**
236  * struct atmel_qspi_pcal - Pad Calibration Clock Division
237  * @pclk_rate: peripheral clock rate.
238  * @pclkdiv: calibration clock division. The clock applied to the calibration
239  *           cell is divided by pclkdiv + 1.
240  */
241 struct atmel_qspi_pcal {
242 	u32 pclk_rate;
243 	u8 pclk_div;
244 };
245 
246 #define ATMEL_QSPI_PCAL_ARRAY_SIZE	8
247 static const struct atmel_qspi_pcal pcal[ATMEL_QSPI_PCAL_ARRAY_SIZE] = {
248 	{25000000, 0},
249 	{50000000, 1},
250 	{75000000, 2},
251 	{100000000, 3},
252 	{125000000, 4},
253 	{150000000, 5},
254 	{175000000, 6},
255 	{200000000, 7},
256 };
257 
258 struct atmel_qspi_caps {
259 	u32 max_speed_hz;
260 	bool has_qspick;
261 	bool has_gclk;
262 	bool has_ricr;
263 	bool octal;
264 	bool has_dma;
265 };
266 
267 struct atmel_qspi_ops;
268 
269 struct atmel_qspi {
270 	void __iomem		*regs;
271 	void __iomem		*mem;
272 	struct clk		*pclk;
273 	struct clk		*qspick;
274 	struct clk		*gclk;
275 	struct platform_device	*pdev;
276 	const struct atmel_qspi_caps *caps;
277 	const struct atmel_qspi_ops *ops;
278 	resource_size_t		mmap_size;
279 	u32			pending;
280 	u32			irq_mask;
281 	u32			mr;
282 	u32			scr;
283 	u32			target_max_speed_hz;
284 	struct completion	cmd_completion;
285 	struct completion	dma_completion;
286 	dma_addr_t		mmap_phys_base;
287 	struct dma_chan		*rx_chan;
288 	struct dma_chan		*tx_chan;
289 };
290 
291 struct atmel_qspi_ops {
292 	int (*set_cfg)(struct atmel_qspi *aq, const struct spi_mem_op *op,
293 		       u32 *offset);
294 	int (*transfer)(struct spi_mem *mem, const struct spi_mem_op *op,
295 			u32 offset);
296 };
297 
298 struct atmel_qspi_mode {
299 	u8 cmd_buswidth;
300 	u8 addr_buswidth;
301 	u8 data_buswidth;
302 	u32 config;
303 };
304 
305 static const struct atmel_qspi_mode atmel_qspi_modes[] = {
306 	{ 1, 1, 1, QSPI_IFR_WIDTH_SINGLE_BIT_SPI },
307 	{ 1, 1, 2, QSPI_IFR_WIDTH_DUAL_OUTPUT },
308 	{ 1, 1, 4, QSPI_IFR_WIDTH_QUAD_OUTPUT },
309 	{ 1, 2, 2, QSPI_IFR_WIDTH_DUAL_IO },
310 	{ 1, 4, 4, QSPI_IFR_WIDTH_QUAD_IO },
311 	{ 2, 2, 2, QSPI_IFR_WIDTH_DUAL_CMD },
312 	{ 4, 4, 4, QSPI_IFR_WIDTH_QUAD_CMD },
313 };
314 
315 static const struct atmel_qspi_mode atmel_qspi_sama7g5_modes[] = {
316 	{ 1, 1, 1, QSPI_IFR_WIDTH_SINGLE_BIT_SPI },
317 	{ 1, 1, 2, QSPI_IFR_WIDTH_DUAL_OUTPUT },
318 	{ 1, 1, 4, QSPI_IFR_WIDTH_QUAD_OUTPUT },
319 	{ 1, 2, 2, QSPI_IFR_WIDTH_DUAL_IO },
320 	{ 1, 4, 4, QSPI_IFR_WIDTH_QUAD_IO },
321 	{ 2, 2, 2, QSPI_IFR_WIDTH_DUAL_CMD },
322 	{ 4, 4, 4, QSPI_IFR_WIDTH_QUAD_CMD },
323 	{ 1, 1, 8, QSPI_IFR_WIDTH_OCT_OUTPUT },
324 	{ 1, 8, 8, QSPI_IFR_WIDTH_OCT_IO },
325 	{ 8, 8, 8, QSPI_IFR_WIDTH_OCT_CMD },
326 };
327 
328 #ifdef VERBOSE_DEBUG
329 static const char *atmel_qspi_reg_name(u32 offset, char *tmp, size_t sz)
330 {
331 	switch (offset) {
332 	case QSPI_CR:
333 		return "CR";
334 	case QSPI_MR:
335 		return "MR";
336 	case QSPI_RD:
337 		return "RD";
338 	case QSPI_TD:
339 		return "TD";
340 	case QSPI_SR:
341 		return "SR";
342 	case QSPI_IER:
343 		return "IER";
344 	case QSPI_IDR:
345 		return "IDR";
346 	case QSPI_IMR:
347 		return "IMR";
348 	case QSPI_SCR:
349 		return "SCR";
350 	case QSPI_SR2:
351 		return "SR2";
352 	case QSPI_IAR:
353 		return "IAR";
354 	case QSPI_ICR:
355 		return "ICR/WICR";
356 	case QSPI_IFR:
357 		return "IFR";
358 	case QSPI_RICR:
359 		return "RICR";
360 	case QSPI_SMR:
361 		return "SMR";
362 	case QSPI_SKR:
363 		return "SKR";
364 	case QSPI_REFRESH:
365 		return "REFRESH";
366 	case QSPI_WRACNT:
367 		return "WRACNT";
368 	case QSPI_DLLCFG:
369 		return "DLLCFG";
370 	case QSPI_PCALCFG:
371 		return "PCALCFG";
372 	case QSPI_PCALBP:
373 		return "PCALBP";
374 	case QSPI_TOUT:
375 		return "TOUT";
376 	case QSPI_WPMR:
377 		return "WPMR";
378 	case QSPI_WPSR:
379 		return "WPSR";
380 	case QSPI_VERSION:
381 		return "VERSION";
382 	default:
383 		snprintf(tmp, sz, "0x%02x", offset);
384 		break;
385 	}
386 
387 	return tmp;
388 }
389 #endif /* VERBOSE_DEBUG */
390 
391 static u32 atmel_qspi_read(struct atmel_qspi *aq, u32 offset)
392 {
393 	u32 value = readl_relaxed(aq->regs + offset);
394 
395 #ifdef VERBOSE_DEBUG
396 	char tmp[8];
397 
398 	dev_vdbg(&aq->pdev->dev, "read 0x%08x from %s\n", value,
399 		 atmel_qspi_reg_name(offset, tmp, sizeof(tmp)));
400 #endif /* VERBOSE_DEBUG */
401 
402 	return value;
403 }
404 
405 static void atmel_qspi_write(u32 value, struct atmel_qspi *aq, u32 offset)
406 {
407 #ifdef VERBOSE_DEBUG
408 	char tmp[8];
409 
410 	dev_vdbg(&aq->pdev->dev, "write 0x%08x into %s\n", value,
411 		 atmel_qspi_reg_name(offset, tmp, sizeof(tmp)));
412 #endif /* VERBOSE_DEBUG */
413 
414 	writel_relaxed(value, aq->regs + offset);
415 }
416 
417 static int atmel_qspi_reg_sync(struct atmel_qspi *aq)
418 {
419 	u32 val;
420 	int ret;
421 
422 	ret = readl_poll_timeout(aq->regs + QSPI_SR2, val,
423 				 !(val & QSPI_SR2_SYNCBSY), 40,
424 				 ATMEL_QSPI_SYNC_TIMEOUT);
425 	return ret;
426 }
427 
428 static int atmel_qspi_update_config(struct atmel_qspi *aq)
429 {
430 	int ret;
431 
432 	ret = atmel_qspi_reg_sync(aq);
433 	if (ret)
434 		return ret;
435 	atmel_qspi_write(QSPI_CR_UPDCFG, aq, QSPI_CR);
436 	return atmel_qspi_reg_sync(aq);
437 }
438 
439 static inline bool atmel_qspi_is_compatible(const struct spi_mem_op *op,
440 					    const struct atmel_qspi_mode *mode)
441 {
442 	if (op->cmd.buswidth != mode->cmd_buswidth)
443 		return false;
444 
445 	if (op->addr.nbytes && op->addr.buswidth != mode->addr_buswidth)
446 		return false;
447 
448 	if (op->data.nbytes && op->data.buswidth != mode->data_buswidth)
449 		return false;
450 
451 	return true;
452 }
453 
454 static int atmel_qspi_find_mode(const struct spi_mem_op *op)
455 {
456 	u32 i;
457 
458 	for (i = 0; i < ARRAY_SIZE(atmel_qspi_modes); i++)
459 		if (atmel_qspi_is_compatible(op, &atmel_qspi_modes[i]))
460 			return i;
461 
462 	return -EOPNOTSUPP;
463 }
464 
465 static int atmel_qspi_sama7g5_find_mode(const struct spi_mem_op *op)
466 {
467 	u32 i;
468 
469 	for (i = 0; i < ARRAY_SIZE(atmel_qspi_sama7g5_modes); i++)
470 		if (atmel_qspi_is_compatible(op, &atmel_qspi_sama7g5_modes[i]))
471 			return i;
472 
473 	return -EOPNOTSUPP;
474 }
475 
476 static bool atmel_qspi_supports_op(struct spi_mem *mem,
477 				   const struct spi_mem_op *op)
478 {
479 	struct atmel_qspi *aq = spi_controller_get_devdata(mem->spi->controller);
480 	if (!spi_mem_default_supports_op(mem, op))
481 		return false;
482 
483 	if (aq->caps->octal) {
484 		if (atmel_qspi_sama7g5_find_mode(op) < 0)
485 			return false;
486 		else
487 			return true;
488 	}
489 
490 	if (atmel_qspi_find_mode(op) < 0)
491 		return false;
492 
493 	/* special case not supported by hardware */
494 	if (op->addr.nbytes == 2 && op->cmd.buswidth != op->addr.buswidth &&
495 	    op->dummy.nbytes == 0)
496 		return false;
497 
498 	return true;
499 }
500 
501 /*
502  * If the QSPI controller is set in regular SPI mode, set it in
503  * Serial Memory Mode (SMM).
504  */
505 static int atmel_qspi_set_serial_memory_mode(struct atmel_qspi *aq)
506 {
507 	int ret = 0;
508 
509 	if (!(aq->mr & QSPI_MR_SMM)) {
510 		aq->mr |= QSPI_MR_SMM;
511 		atmel_qspi_write(aq->mr, aq, QSPI_MR);
512 
513 		if (aq->caps->has_gclk)
514 			ret = atmel_qspi_update_config(aq);
515 	}
516 
517 	return ret;
518 }
519 
520 static int atmel_qspi_set_cfg(struct atmel_qspi *aq,
521 			      const struct spi_mem_op *op, u32 *offset)
522 {
523 	u32 iar, icr, ifr;
524 	u32 dummy_cycles = 0;
525 	int mode;
526 
527 	iar = 0;
528 	icr = QSPI_ICR_INST(op->cmd.opcode);
529 	ifr = QSPI_IFR_INSTEN;
530 
531 	mode = atmel_qspi_find_mode(op);
532 	if (mode < 0)
533 		return mode;
534 	ifr |= atmel_qspi_modes[mode].config;
535 
536 	if (op->dummy.nbytes)
537 		dummy_cycles = op->dummy.nbytes * 8 / op->dummy.buswidth;
538 
539 	/*
540 	 * The controller allows 24 and 32-bit addressing while NAND-flash
541 	 * requires 16-bit long. Handling 8-bit long addresses is done using
542 	 * the option field. For the 16-bit addresses, the workaround depends
543 	 * of the number of requested dummy bits. If there are 8 or more dummy
544 	 * cycles, the address is shifted and sent with the first dummy byte.
545 	 * Otherwise opcode is disabled and the first byte of the address
546 	 * contains the command opcode (works only if the opcode and address
547 	 * use the same buswidth). The limitation is when the 16-bit address is
548 	 * used without enough dummy cycles and the opcode is using a different
549 	 * buswidth than the address.
550 	 */
551 	if (op->addr.buswidth) {
552 		switch (op->addr.nbytes) {
553 		case 0:
554 			break;
555 		case 1:
556 			ifr |= QSPI_IFR_OPTEN | QSPI_IFR_OPTL_8BIT;
557 			icr |= QSPI_ICR_OPT(op->addr.val & 0xff);
558 			break;
559 		case 2:
560 			if (dummy_cycles < 8 / op->addr.buswidth) {
561 				ifr &= ~QSPI_IFR_INSTEN;
562 				ifr |= QSPI_IFR_ADDREN;
563 				iar = (op->cmd.opcode << 16) |
564 					(op->addr.val & 0xffff);
565 			} else {
566 				ifr |= QSPI_IFR_ADDREN;
567 				iar = (op->addr.val << 8) & 0xffffff;
568 				dummy_cycles -= 8 / op->addr.buswidth;
569 			}
570 			break;
571 		case 3:
572 			ifr |= QSPI_IFR_ADDREN;
573 			iar = op->addr.val & 0xffffff;
574 			break;
575 		case 4:
576 			ifr |= QSPI_IFR_ADDREN | QSPI_IFR_ADDRL;
577 			iar = op->addr.val & 0x7ffffff;
578 			break;
579 		default:
580 			return -ENOTSUPP;
581 		}
582 	}
583 
584 	/* offset of the data access in the QSPI memory space */
585 	*offset = iar;
586 
587 	/* Set number of dummy cycles */
588 	if (dummy_cycles)
589 		ifr |= QSPI_IFR_NBDUM(dummy_cycles);
590 
591 	/* Set data enable and data transfer type. */
592 	if (op->data.nbytes) {
593 		ifr |= QSPI_IFR_DATAEN;
594 
595 		if (op->addr.nbytes)
596 			ifr |= QSPI_IFR_TFRTYP_MEM;
597 	}
598 
599 	mode = atmel_qspi_set_serial_memory_mode(aq);
600 	if (mode < 0)
601 		return mode;
602 
603 	/* Clear pending interrupts */
604 	(void)atmel_qspi_read(aq, QSPI_SR);
605 
606 	/* Set QSPI Instruction Frame registers. */
607 	if (op->addr.nbytes && !op->data.nbytes)
608 		atmel_qspi_write(iar, aq, QSPI_IAR);
609 
610 	if (aq->caps->has_ricr) {
611 		if (op->data.dir == SPI_MEM_DATA_IN)
612 			atmel_qspi_write(icr, aq, QSPI_RICR);
613 		else
614 			atmel_qspi_write(icr, aq, QSPI_WICR);
615 	} else {
616 		if (op->data.nbytes && op->data.dir == SPI_MEM_DATA_OUT)
617 			ifr |= QSPI_IFR_SAMA5D2_WRITE_TRSFR;
618 
619 		atmel_qspi_write(icr, aq, QSPI_ICR);
620 	}
621 
622 	atmel_qspi_write(ifr, aq, QSPI_IFR);
623 
624 	return 0;
625 }
626 
627 static int atmel_qspi_wait_for_completion(struct atmel_qspi *aq, u32 irq_mask)
628 {
629 	int err = 0;
630 	u32 sr;
631 
632 	/* Poll INSTRuction End status */
633 	sr = atmel_qspi_read(aq, QSPI_SR);
634 	if ((sr & irq_mask) == irq_mask)
635 		return 0;
636 
637 	/* Wait for INSTRuction End interrupt */
638 	reinit_completion(&aq->cmd_completion);
639 	aq->pending = sr & irq_mask;
640 	aq->irq_mask = irq_mask;
641 	atmel_qspi_write(irq_mask, aq, QSPI_IER);
642 	if (!wait_for_completion_timeout(&aq->cmd_completion,
643 					 msecs_to_jiffies(ATMEL_QSPI_TIMEOUT)))
644 		err = -ETIMEDOUT;
645 	atmel_qspi_write(irq_mask, aq, QSPI_IDR);
646 
647 	return err;
648 }
649 
650 static int atmel_qspi_transfer(struct spi_mem *mem,
651 			       const struct spi_mem_op *op, u32 offset)
652 {
653 	struct atmel_qspi *aq = spi_controller_get_devdata(mem->spi->controller);
654 
655 	/* Skip to the final steps if there is no data */
656 	if (!op->data.nbytes)
657 		return atmel_qspi_wait_for_completion(aq,
658 						      QSPI_SR_CMD_COMPLETED);
659 
660 	/* Dummy read of QSPI_IFR to synchronize APB and AHB accesses */
661 	(void)atmel_qspi_read(aq, QSPI_IFR);
662 
663 	/* Send/Receive data */
664 	if (op->data.dir == SPI_MEM_DATA_IN) {
665 		memcpy_fromio(op->data.buf.in, aq->mem + offset,
666 			      op->data.nbytes);
667 
668 		/* Synchronize AHB and APB accesses again */
669 		rmb();
670 	} else {
671 		memcpy_toio(aq->mem + offset, op->data.buf.out,
672 			    op->data.nbytes);
673 
674 		/* Synchronize AHB and APB accesses again */
675 		wmb();
676 	}
677 
678 	/* Release the chip-select */
679 	atmel_qspi_write(QSPI_CR_LASTXFER, aq, QSPI_CR);
680 
681 	return atmel_qspi_wait_for_completion(aq, QSPI_SR_CMD_COMPLETED);
682 }
683 
684 static int atmel_qspi_sama7g5_set_cfg(struct atmel_qspi *aq,
685 				      const struct spi_mem_op *op, u32 *offset)
686 {
687 	u32 iar, icr, ifr;
688 	int mode, ret;
689 
690 	iar = 0;
691 	icr = FIELD_PREP(QSPI_ICR_INST_MASK_SAMA7G5, op->cmd.opcode);
692 	ifr = QSPI_IFR_INSTEN;
693 
694 	mode = atmel_qspi_sama7g5_find_mode(op);
695 	if (mode < 0)
696 		return mode;
697 	ifr |= atmel_qspi_sama7g5_modes[mode].config;
698 
699 	if (op->dummy.buswidth && op->dummy.nbytes) {
700 		if (op->addr.dtr && op->dummy.dtr && op->data.dtr)
701 			ifr |= QSPI_IFR_NBDUM(op->dummy.nbytes * 8 /
702 					      (2 * op->dummy.buswidth));
703 		else
704 			ifr |= QSPI_IFR_NBDUM(op->dummy.nbytes * 8 /
705 					      op->dummy.buswidth);
706 	}
707 
708 	if (op->addr.buswidth && op->addr.nbytes) {
709 		ifr |= FIELD_PREP(QSPI_IFR_ADDRL_SAMA7G5, op->addr.nbytes - 1) |
710 		       QSPI_IFR_ADDREN;
711 		iar = FIELD_PREP(QSPI_IAR_ADDR, op->addr.val);
712 	}
713 
714 	if (op->addr.dtr && op->dummy.dtr && op->data.dtr) {
715 		ifr |= QSPI_IFR_DDREN;
716 		if (op->cmd.dtr)
717 			ifr |= QSPI_IFR_DDRCMDEN;
718 
719 		ifr |= QSPI_IFR_DQSEN;
720 	}
721 
722 	if (op->cmd.buswidth == 8 || op->addr.buswidth == 8 ||
723 	    op->data.buswidth == 8)
724 		ifr |= FIELD_PREP(QSPI_IFR_PROTTYP, QSPI_IFR_PROTTYP_OCTAFLASH);
725 
726 	/* offset of the data access in the QSPI memory space */
727 	*offset = iar;
728 
729 	/* Set data enable */
730 	if (op->data.nbytes) {
731 		ifr |= QSPI_IFR_DATAEN;
732 
733 		if (op->addr.nbytes)
734 			ifr |= QSPI_IFR_TFRTYP_MEM;
735 	}
736 
737 	ret = atmel_qspi_set_serial_memory_mode(aq);
738 	if (ret < 0)
739 		return ret;
740 
741 	/* Clear pending interrupts */
742 	(void)atmel_qspi_read(aq, QSPI_SR);
743 
744 	/* Set QSPI Instruction Frame registers */
745 	if (op->addr.nbytes && !op->data.nbytes)
746 		atmel_qspi_write(iar, aq, QSPI_IAR);
747 
748 	if (op->data.dir == SPI_MEM_DATA_IN) {
749 		atmel_qspi_write(icr, aq, QSPI_RICR);
750 	} else {
751 		atmel_qspi_write(icr, aq, QSPI_WICR);
752 		if (op->data.nbytes)
753 			atmel_qspi_write(FIELD_PREP(QSPI_WRACNT_NBWRA,
754 						    op->data.nbytes),
755 					 aq, QSPI_WRACNT);
756 	}
757 
758 	atmel_qspi_write(ifr, aq, QSPI_IFR);
759 
760 	return atmel_qspi_update_config(aq);
761 }
762 
763 static void atmel_qspi_dma_callback(void *param)
764 {
765 	struct atmel_qspi *aq = param;
766 
767 	complete(&aq->dma_completion);
768 }
769 
770 static int atmel_qspi_dma_xfer(struct atmel_qspi *aq, struct dma_chan *chan,
771 			       dma_addr_t dma_dst, dma_addr_t dma_src,
772 			       unsigned int len)
773 {
774 	struct dma_async_tx_descriptor *tx;
775 	dma_cookie_t cookie;
776 	int ret;
777 
778 	tx = dmaengine_prep_dma_memcpy(chan, dma_dst, dma_src, len,
779 				       DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
780 	if (!tx) {
781 		dev_err(&aq->pdev->dev, "device_prep_dma_memcpy error\n");
782 		return -EIO;
783 	}
784 
785 	reinit_completion(&aq->dma_completion);
786 	tx->callback = atmel_qspi_dma_callback;
787 	tx->callback_param = aq;
788 	cookie = tx->tx_submit(tx);
789 	ret = dma_submit_error(cookie);
790 	if (ret) {
791 		dev_err(&aq->pdev->dev, "dma_submit_error %d\n", cookie);
792 		return ret;
793 	}
794 
795 	dma_async_issue_pending(chan);
796 	ret = wait_for_completion_timeout(&aq->dma_completion,
797 					  msecs_to_jiffies(20 * ATMEL_QSPI_TIMEOUT));
798 	if (ret == 0) {
799 		dmaengine_terminate_sync(chan);
800 		dev_err(&aq->pdev->dev, "DMA wait_for_completion_timeout\n");
801 		return -ETIMEDOUT;
802 	}
803 
804 	return 0;
805 }
806 
807 static int atmel_qspi_dma_rx_xfer(struct spi_mem *mem,
808 				  const struct spi_mem_op *op,
809 				  struct sg_table *sgt, loff_t loff)
810 {
811 	struct atmel_qspi *aq =
812 		spi_controller_get_devdata(mem->spi->controller);
813 	struct scatterlist *sg;
814 	dma_addr_t dma_src;
815 	unsigned int i, len;
816 	int ret;
817 
818 	dma_src = aq->mmap_phys_base + loff;
819 
820 	for_each_sg(sgt->sgl, sg, sgt->nents, i) {
821 		len = sg_dma_len(sg);
822 		ret = atmel_qspi_dma_xfer(aq, aq->rx_chan, sg_dma_address(sg),
823 					  dma_src, len);
824 		if (ret)
825 			return ret;
826 		dma_src += len;
827 	}
828 
829 	return 0;
830 }
831 
832 static int atmel_qspi_dma_tx_xfer(struct spi_mem *mem,
833 				  const struct spi_mem_op *op,
834 				  struct sg_table *sgt, loff_t loff)
835 {
836 	struct atmel_qspi *aq =
837 		spi_controller_get_devdata(mem->spi->controller);
838 	struct scatterlist *sg;
839 	dma_addr_t dma_dst;
840 	unsigned int i, len;
841 	int ret;
842 
843 	dma_dst = aq->mmap_phys_base + loff;
844 
845 	for_each_sg(sgt->sgl, sg, sgt->nents, i) {
846 		len = sg_dma_len(sg);
847 		ret = atmel_qspi_dma_xfer(aq, aq->tx_chan, dma_dst,
848 					  sg_dma_address(sg), len);
849 		if (ret)
850 			return ret;
851 		dma_dst += len;
852 	}
853 
854 	return 0;
855 }
856 
857 static int atmel_qspi_dma_transfer(struct spi_mem *mem,
858 				   const struct spi_mem_op *op, loff_t loff)
859 {
860 	struct sg_table sgt;
861 	int ret;
862 
863 	ret = spi_controller_dma_map_mem_op_data(mem->spi->controller, op,
864 						 &sgt);
865 	if (ret)
866 		return ret;
867 
868 	if (op->data.dir == SPI_MEM_DATA_IN)
869 		ret = atmel_qspi_dma_rx_xfer(mem, op, &sgt, loff);
870 	else
871 		ret = atmel_qspi_dma_tx_xfer(mem, op, &sgt, loff);
872 
873 	spi_controller_dma_unmap_mem_op_data(mem->spi->controller, op, &sgt);
874 
875 	return ret;
876 }
877 
878 static int atmel_qspi_sama7g5_transfer(struct spi_mem *mem,
879 				       const struct spi_mem_op *op, u32 offset)
880 {
881 	struct atmel_qspi *aq =
882 		spi_controller_get_devdata(mem->spi->controller);
883 	u32 val;
884 	int ret;
885 
886 	if (!op->data.nbytes) {
887 		/* Start the transfer. */
888 		ret = atmel_qspi_reg_sync(aq);
889 		if (ret)
890 			return ret;
891 		atmel_qspi_write(QSPI_CR_STTFR, aq, QSPI_CR);
892 
893 		return atmel_qspi_wait_for_completion(aq, QSPI_SR_CSRA);
894 	}
895 
896 	/* Send/Receive data. */
897 	if (op->data.dir == SPI_MEM_DATA_IN) {
898 		if (aq->rx_chan && op->addr.nbytes &&
899 		    op->data.nbytes > ATMEL_QSPI_DMA_MIN_BYTES) {
900 			ret = atmel_qspi_dma_transfer(mem, op, offset);
901 			if (ret)
902 				return ret;
903 		} else {
904 			memcpy_fromio(op->data.buf.in, aq->mem + offset,
905 				      op->data.nbytes);
906 		}
907 
908 		if (op->addr.nbytes) {
909 			ret = readl_poll_timeout(aq->regs + QSPI_SR2, val,
910 						 !(val & QSPI_SR2_RBUSY), 40,
911 						 ATMEL_QSPI_SYNC_TIMEOUT);
912 			if (ret)
913 				return ret;
914 		}
915 	} else {
916 		if (aq->tx_chan && op->addr.nbytes &&
917 		    op->data.nbytes > ATMEL_QSPI_DMA_MIN_BYTES) {
918 			ret = atmel_qspi_dma_transfer(mem, op, offset);
919 			if (ret)
920 				return ret;
921 		} else {
922 			memcpy_toio(aq->mem + offset, op->data.buf.out,
923 				    op->data.nbytes);
924 		}
925 
926 		ret = atmel_qspi_wait_for_completion(aq, QSPI_SR_LWRA);
927 		if (ret)
928 			return ret;
929 	}
930 
931 	/* Release the chip-select. */
932 	ret = atmel_qspi_reg_sync(aq);
933 	if (ret) {
934 		pm_runtime_mark_last_busy(&aq->pdev->dev);
935 		pm_runtime_put_autosuspend(&aq->pdev->dev);
936 		return ret;
937 	}
938 	atmel_qspi_write(QSPI_CR_LASTXFER, aq, QSPI_CR);
939 
940 	return atmel_qspi_wait_for_completion(aq, QSPI_SR_CSRA);
941 }
942 
943 static int atmel_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
944 {
945 	struct atmel_qspi *aq = spi_controller_get_devdata(mem->spi->controller);
946 	u32 offset;
947 	int err;
948 
949 	/*
950 	 * Check if the address exceeds the MMIO window size. An improvement
951 	 * would be to add support for regular SPI mode and fall back to it
952 	 * when the flash memories overrun the controller's memory space.
953 	 */
954 	if (op->addr.val + op->data.nbytes > aq->mmap_size)
955 		return -EOPNOTSUPP;
956 
957 	if (op->addr.nbytes > 4)
958 		return -EOPNOTSUPP;
959 
960 	err = pm_runtime_resume_and_get(&aq->pdev->dev);
961 	if (err < 0)
962 		return err;
963 
964 	err = aq->ops->set_cfg(aq, op, &offset);
965 	if (err)
966 		goto pm_runtime_put;
967 
968 	err = aq->ops->transfer(mem, op, offset);
969 
970 pm_runtime_put:
971 	pm_runtime_mark_last_busy(&aq->pdev->dev);
972 	pm_runtime_put_autosuspend(&aq->pdev->dev);
973 	return err;
974 }
975 
976 static const char *atmel_qspi_get_name(struct spi_mem *spimem)
977 {
978 	return dev_name(spimem->spi->dev.parent);
979 }
980 
981 static const struct spi_controller_mem_ops atmel_qspi_mem_ops = {
982 	.supports_op = atmel_qspi_supports_op,
983 	.exec_op = atmel_qspi_exec_op,
984 	.get_name = atmel_qspi_get_name
985 };
986 
987 static int atmel_qspi_set_pad_calibration(struct atmel_qspi *aq)
988 {
989 	unsigned long pclk_rate;
990 	u32 status, val;
991 	int i, ret;
992 	u8 pclk_div = 0;
993 
994 	pclk_rate = clk_get_rate(aq->pclk);
995 	if (!pclk_rate)
996 		return -EINVAL;
997 
998 	for (i = 0; i < ATMEL_QSPI_PCAL_ARRAY_SIZE; i++) {
999 		if (pclk_rate <= pcal[i].pclk_rate) {
1000 			pclk_div = pcal[i].pclk_div;
1001 			break;
1002 		}
1003 	}
1004 
1005 	/*
1006 	 * Use the biggest divider in case the peripheral clock exceeds
1007 	 * 200MHZ.
1008 	 */
1009 	if (pclk_rate > pcal[ATMEL_QSPI_PCAL_ARRAY_SIZE - 1].pclk_rate)
1010 		pclk_div = pcal[ATMEL_QSPI_PCAL_ARRAY_SIZE - 1].pclk_div;
1011 
1012 	/* Disable QSPI while configuring the pad calibration. */
1013 	status = atmel_qspi_read(aq, QSPI_SR2);
1014 	if (status & QSPI_SR2_QSPIENS) {
1015 		ret = atmel_qspi_reg_sync(aq);
1016 		if (ret)
1017 			return ret;
1018 		atmel_qspi_write(QSPI_CR_QSPIDIS, aq, QSPI_CR);
1019 	}
1020 
1021 	/*
1022 	 * The analog circuitry is not shut down at the end of the calibration
1023 	 * and the start-up time is only required for the first calibration
1024 	 * sequence, thus increasing performance. Set the delay between the Pad
1025 	 * calibration analog circuitry and the calibration request to 2us.
1026 	 */
1027 	atmel_qspi_write(QSPI_PCALCFG_AAON |
1028 			 FIELD_PREP(QSPI_PCALCFG_CLKDIV, pclk_div) |
1029 			 FIELD_PREP(QSPI_PCALCFG_CALCNT,
1030 				    2 * (pclk_rate / 1000000)),
1031 			 aq, QSPI_PCALCFG);
1032 
1033 	/* DLL On + start calibration. */
1034 	atmel_qspi_write(QSPI_CR_DLLON | QSPI_CR_STPCAL, aq, QSPI_CR);
1035 
1036 	/* Check synchronization status before updating configuration. */
1037 	ret =  readl_poll_timeout(aq->regs + QSPI_SR2, val,
1038 				  (val & QSPI_SR2_DLOCK) &&
1039 				  !(val & QSPI_SR2_CALBSY), 40,
1040 				  ATMEL_QSPI_TIMEOUT);
1041 
1042 	/* Refresh analogic blocks every 1 ms.*/
1043 	atmel_qspi_write(FIELD_PREP(QSPI_REFRESH_DELAY_COUNTER,
1044 				    aq->target_max_speed_hz / 1000),
1045 			 aq, QSPI_REFRESH);
1046 
1047 	return ret;
1048 }
1049 
1050 static int atmel_qspi_set_gclk(struct atmel_qspi *aq)
1051 {
1052 	u32 status, val;
1053 	int ret;
1054 
1055 	/* Disable DLL before setting GCLK */
1056 	status = atmel_qspi_read(aq, QSPI_SR2);
1057 	if (status & QSPI_SR2_DLOCK) {
1058 		atmel_qspi_write(QSPI_CR_DLLOFF, aq, QSPI_CR);
1059 
1060 		ret = readl_poll_timeout(aq->regs + QSPI_SR2, val,
1061 					 !(val & QSPI_SR2_DLOCK), 40,
1062 					 ATMEL_QSPI_TIMEOUT);
1063 		if (ret)
1064 			return ret;
1065 	}
1066 
1067 	if (aq->target_max_speed_hz > QSPI_DLLCFG_THRESHOLD_FREQ)
1068 		atmel_qspi_write(QSPI_DLLCFG_RANGE, aq, QSPI_DLLCFG);
1069 	else
1070 		atmel_qspi_write(0, aq, QSPI_DLLCFG);
1071 
1072 	ret = clk_set_rate(aq->gclk, aq->target_max_speed_hz);
1073 	if (ret) {
1074 		dev_err(&aq->pdev->dev, "Failed to set generic clock rate.\n");
1075 		return ret;
1076 	}
1077 
1078 	/* Enable the QSPI generic clock */
1079 	ret = clk_prepare_enable(aq->gclk);
1080 	if (ret)
1081 		dev_err(&aq->pdev->dev, "Failed to enable generic clock.\n");
1082 
1083 	return ret;
1084 }
1085 
1086 static int atmel_qspi_sama7g5_init(struct atmel_qspi *aq)
1087 {
1088 	u32 val;
1089 	int ret;
1090 
1091 	ret = atmel_qspi_set_gclk(aq);
1092 	if (ret)
1093 		return ret;
1094 
1095 	if (aq->caps->octal) {
1096 		ret = atmel_qspi_set_pad_calibration(aq);
1097 		if (ret)
1098 			return ret;
1099 	} else {
1100 		atmel_qspi_write(QSPI_CR_DLLON, aq, QSPI_CR);
1101 		ret =  readl_poll_timeout(aq->regs + QSPI_SR2, val,
1102 					  (val & QSPI_SR2_DLOCK), 40,
1103 					  ATMEL_QSPI_TIMEOUT);
1104 	}
1105 
1106 	/* Set the QSPI controller by default in Serial Memory Mode */
1107 	aq->mr |= QSPI_MR_DQSDLYEN;
1108 	ret = atmel_qspi_set_serial_memory_mode(aq);
1109 	if (ret < 0)
1110 		return ret;
1111 
1112 	/* Enable the QSPI controller. */
1113 	atmel_qspi_write(QSPI_CR_QSPIEN, aq, QSPI_CR);
1114 	ret = readl_poll_timeout(aq->regs + QSPI_SR2, val,
1115 				 val & QSPI_SR2_QSPIENS, 40,
1116 				 ATMEL_QSPI_SYNC_TIMEOUT);
1117 	if (ret)
1118 		return ret;
1119 
1120 	if (aq->caps->octal) {
1121 		ret = readl_poll_timeout(aq->regs + QSPI_SR, val,
1122 					 val & QSPI_SR_RFRSHD, 40,
1123 					 ATMEL_QSPI_TIMEOUT);
1124 	}
1125 
1126 	atmel_qspi_write(QSPI_TOUT_TCNTM, aq, QSPI_TOUT);
1127 	return ret;
1128 }
1129 
1130 static int atmel_qspi_sama7g5_setup(struct spi_device *spi)
1131 {
1132 	struct atmel_qspi *aq = spi_controller_get_devdata(spi->controller);
1133 
1134 	/* The controller can communicate with a single peripheral device (target). */
1135 	aq->target_max_speed_hz = spi->max_speed_hz;
1136 
1137 	return atmel_qspi_sama7g5_init(aq);
1138 }
1139 
1140 static int atmel_qspi_setup(struct spi_device *spi)
1141 {
1142 	struct spi_controller *ctrl = spi->controller;
1143 	struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
1144 	unsigned long src_rate;
1145 	u32 scbr;
1146 	int ret;
1147 
1148 	if (ctrl->busy)
1149 		return -EBUSY;
1150 
1151 	if (!spi->max_speed_hz)
1152 		return -EINVAL;
1153 
1154 	if (aq->caps->has_gclk)
1155 		return atmel_qspi_sama7g5_setup(spi);
1156 
1157 	src_rate = clk_get_rate(aq->pclk);
1158 	if (!src_rate)
1159 		return -EINVAL;
1160 
1161 	/* Compute the QSPI baudrate */
1162 	scbr = DIV_ROUND_UP(src_rate, spi->max_speed_hz);
1163 	if (scbr > 0)
1164 		scbr--;
1165 
1166 	ret = pm_runtime_resume_and_get(ctrl->dev.parent);
1167 	if (ret < 0)
1168 		return ret;
1169 
1170 	aq->scr &= ~QSPI_SCR_SCBR_MASK;
1171 	aq->scr |= QSPI_SCR_SCBR(scbr);
1172 	atmel_qspi_write(aq->scr, aq, QSPI_SCR);
1173 
1174 	pm_runtime_mark_last_busy(ctrl->dev.parent);
1175 	pm_runtime_put_autosuspend(ctrl->dev.parent);
1176 
1177 	return 0;
1178 }
1179 
1180 static int atmel_qspi_set_cs_timing(struct spi_device *spi)
1181 {
1182 	struct spi_controller *ctrl = spi->controller;
1183 	struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
1184 	unsigned long clk_rate;
1185 	u32 cs_inactive;
1186 	u32 cs_setup;
1187 	u32 cs_hold;
1188 	int delay;
1189 	int ret;
1190 
1191 	clk_rate = clk_get_rate(aq->pclk);
1192 	if (!clk_rate)
1193 		return -EINVAL;
1194 
1195 	/* hold */
1196 	delay = spi_delay_to_ns(&spi->cs_hold, NULL);
1197 	if (aq->mr & QSPI_MR_SMM) {
1198 		if (delay > 0)
1199 			dev_warn(&aq->pdev->dev,
1200 				 "Ignoring cs_hold, must be 0 in Serial Memory Mode.\n");
1201 		cs_hold = 0;
1202 	} else {
1203 		delay = spi_delay_to_ns(&spi->cs_hold, NULL);
1204 		if (delay < 0)
1205 			return delay;
1206 
1207 		cs_hold = DIV_ROUND_UP((delay * DIV_ROUND_UP(clk_rate, 1000000)), 32000);
1208 	}
1209 
1210 	/* setup */
1211 	delay = spi_delay_to_ns(&spi->cs_setup, NULL);
1212 	if (delay < 0)
1213 		return delay;
1214 
1215 	cs_setup = DIV_ROUND_UP((delay * DIV_ROUND_UP(clk_rate, 1000000)),
1216 				1000);
1217 
1218 	/* inactive */
1219 	delay = spi_delay_to_ns(&spi->cs_inactive, NULL);
1220 	if (delay < 0)
1221 		return delay;
1222 	cs_inactive = DIV_ROUND_UP((delay * DIV_ROUND_UP(clk_rate, 1000000)), 1000);
1223 
1224 	ret = pm_runtime_resume_and_get(ctrl->dev.parent);
1225 	if (ret < 0)
1226 		return ret;
1227 
1228 	aq->scr &= ~QSPI_SCR_DLYBS_MASK;
1229 	aq->scr |= QSPI_SCR_DLYBS(cs_setup);
1230 	atmel_qspi_write(aq->scr, aq, QSPI_SCR);
1231 
1232 	aq->mr &= ~(QSPI_MR_DLYBCT_MASK | QSPI_MR_DLYCS_MASK);
1233 	aq->mr |= QSPI_MR_DLYBCT(cs_hold) | QSPI_MR_DLYCS(cs_inactive);
1234 	atmel_qspi_write(aq->mr, aq, QSPI_MR);
1235 
1236 	pm_runtime_mark_last_busy(ctrl->dev.parent);
1237 	pm_runtime_put_autosuspend(ctrl->dev.parent);
1238 
1239 	return 0;
1240 }
1241 
1242 static int atmel_qspi_init(struct atmel_qspi *aq)
1243 {
1244 	int ret;
1245 
1246 	if (aq->caps->has_gclk) {
1247 		ret = atmel_qspi_reg_sync(aq);
1248 		if (ret)
1249 			return ret;
1250 		atmel_qspi_write(QSPI_CR_SWRST, aq, QSPI_CR);
1251 		return 0;
1252 	}
1253 
1254 	/* Reset the QSPI controller */
1255 	atmel_qspi_write(QSPI_CR_SWRST, aq, QSPI_CR);
1256 
1257 	/* Set the QSPI controller by default in Serial Memory Mode */
1258 	ret = atmel_qspi_set_serial_memory_mode(aq);
1259 	if (ret < 0)
1260 		return ret;
1261 
1262 	/* Enable the QSPI controller */
1263 	atmel_qspi_write(QSPI_CR_QSPIEN, aq, QSPI_CR);
1264 	return 0;
1265 }
1266 
1267 static irqreturn_t atmel_qspi_interrupt(int irq, void *dev_id)
1268 {
1269 	struct atmel_qspi *aq = dev_id;
1270 	u32 status, mask, pending;
1271 
1272 	status = atmel_qspi_read(aq, QSPI_SR);
1273 	mask = atmel_qspi_read(aq, QSPI_IMR);
1274 	pending = status & mask;
1275 
1276 	if (!pending)
1277 		return IRQ_NONE;
1278 
1279 	aq->pending |= pending;
1280 	if ((aq->pending & aq->irq_mask) == aq->irq_mask)
1281 		complete(&aq->cmd_completion);
1282 
1283 	return IRQ_HANDLED;
1284 }
1285 
1286 static int atmel_qspi_dma_init(struct spi_controller *ctrl)
1287 {
1288 	struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
1289 	int ret;
1290 
1291 	aq->rx_chan = dma_request_chan(&aq->pdev->dev, "rx");
1292 	if (IS_ERR(aq->rx_chan)) {
1293 		aq->rx_chan = NULL;
1294 		return dev_err_probe(&aq->pdev->dev, PTR_ERR(aq->rx_chan),
1295 				     "RX DMA channel is not available\n");
1296 	}
1297 
1298 	aq->tx_chan = dma_request_chan(&aq->pdev->dev, "tx");
1299 	if (IS_ERR(aq->tx_chan)) {
1300 		ret = dev_err_probe(&aq->pdev->dev, PTR_ERR(aq->tx_chan),
1301 				    "TX DMA channel is not available\n");
1302 		goto release_rx_chan;
1303 	}
1304 
1305 	ctrl->dma_rx = aq->rx_chan;
1306 	ctrl->dma_tx = aq->tx_chan;
1307 	init_completion(&aq->dma_completion);
1308 
1309 	dev_info(&aq->pdev->dev, "Using %s (tx) and %s (rx) for DMA transfers\n",
1310 		 dma_chan_name(aq->tx_chan), dma_chan_name(aq->rx_chan));
1311 
1312 	return 0;
1313 
1314 release_rx_chan:
1315 	dma_release_channel(aq->rx_chan);
1316 	aq->rx_chan = NULL;
1317 	aq->tx_chan = NULL;
1318 	return ret;
1319 }
1320 
1321 static void atmel_qspi_dma_release(struct atmel_qspi *aq)
1322 {
1323 	if (aq->rx_chan)
1324 		dma_release_channel(aq->rx_chan);
1325 	if (aq->tx_chan)
1326 		dma_release_channel(aq->tx_chan);
1327 }
1328 
1329 static const struct atmel_qspi_ops atmel_qspi_ops = {
1330 	.set_cfg = atmel_qspi_set_cfg,
1331 	.transfer = atmel_qspi_transfer,
1332 };
1333 
1334 static const struct atmel_qspi_ops atmel_qspi_sama7g5_ops = {
1335 	.set_cfg = atmel_qspi_sama7g5_set_cfg,
1336 	.transfer = atmel_qspi_sama7g5_transfer,
1337 };
1338 
1339 static int atmel_qspi_probe(struct platform_device *pdev)
1340 {
1341 	struct spi_controller *ctrl;
1342 	struct atmel_qspi *aq;
1343 	struct resource *res;
1344 	int irq, err = 0;
1345 
1346 	ctrl = devm_spi_alloc_host(&pdev->dev, sizeof(*aq));
1347 	if (!ctrl)
1348 		return -ENOMEM;
1349 
1350 	aq = spi_controller_get_devdata(ctrl);
1351 
1352 	aq->caps = of_device_get_match_data(&pdev->dev);
1353 	if (!aq->caps) {
1354 		dev_err(&pdev->dev, "Could not retrieve QSPI caps\n");
1355 		return -EINVAL;
1356 	}
1357 
1358 	init_completion(&aq->cmd_completion);
1359 	aq->pdev = pdev;
1360 
1361 	ctrl->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD | SPI_TX_DUAL | SPI_TX_QUAD;
1362 	if (aq->caps->octal)
1363 		ctrl->mode_bits |= SPI_RX_OCTAL | SPI_TX_OCTAL;
1364 
1365 	if (aq->caps->has_gclk)
1366 		aq->ops = &atmel_qspi_sama7g5_ops;
1367 	else
1368 		aq->ops = &atmel_qspi_ops;
1369 
1370 	ctrl->max_speed_hz = aq->caps->max_speed_hz;
1371 	ctrl->setup = atmel_qspi_setup;
1372 	ctrl->set_cs_timing = atmel_qspi_set_cs_timing;
1373 	ctrl->bus_num = -1;
1374 	ctrl->mem_ops = &atmel_qspi_mem_ops;
1375 	ctrl->num_chipselect = 1;
1376 	ctrl->dev.of_node = pdev->dev.of_node;
1377 	platform_set_drvdata(pdev, ctrl);
1378 
1379 	/* Map the registers */
1380 	aq->regs = devm_platform_ioremap_resource_byname(pdev, "qspi_base");
1381 	if (IS_ERR(aq->regs))
1382 		return dev_err_probe(&pdev->dev, PTR_ERR(aq->regs),
1383 				     "missing registers\n");
1384 
1385 	/* Map the AHB memory */
1386 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi_mmap");
1387 	aq->mem = devm_ioremap_resource(&pdev->dev, res);
1388 	if (IS_ERR(aq->mem))
1389 		return dev_err_probe(&pdev->dev, PTR_ERR(aq->mem),
1390 				     "missing AHB memory\n");
1391 
1392 	aq->mmap_size = resource_size(res);
1393 	aq->mmap_phys_base = (dma_addr_t)res->start;
1394 
1395 	/* Get the peripheral clock */
1396 	aq->pclk = devm_clk_get_enabled(&pdev->dev, "pclk");
1397 	if (IS_ERR(aq->pclk))
1398 		aq->pclk = devm_clk_get_enabled(&pdev->dev, NULL);
1399 
1400 	if (IS_ERR(aq->pclk))
1401 		return dev_err_probe(&pdev->dev, PTR_ERR(aq->pclk),
1402 				     "missing peripheral clock\n");
1403 
1404 	if (aq->caps->has_qspick) {
1405 		/* Get the QSPI system clock */
1406 		aq->qspick = devm_clk_get_enabled(&pdev->dev, "qspick");
1407 		if (IS_ERR(aq->qspick)) {
1408 			dev_err(&pdev->dev, "missing system clock\n");
1409 			err = PTR_ERR(aq->qspick);
1410 			return err;
1411 		}
1412 
1413 	} else if (aq->caps->has_gclk) {
1414 		/* Get the QSPI generic clock */
1415 		aq->gclk = devm_clk_get(&pdev->dev, "gclk");
1416 		if (IS_ERR(aq->gclk)) {
1417 			dev_err(&pdev->dev, "missing Generic clock\n");
1418 			err = PTR_ERR(aq->gclk);
1419 			return err;
1420 		}
1421 	}
1422 
1423 	if (aq->caps->has_dma) {
1424 		err = atmel_qspi_dma_init(ctrl);
1425 		if (err == -EPROBE_DEFER)
1426 			return err;
1427 	}
1428 
1429 	/* Request the IRQ */
1430 	irq = platform_get_irq(pdev, 0);
1431 	if (irq < 0) {
1432 		err = irq;
1433 		goto dma_release;
1434 	}
1435 	err = devm_request_irq(&pdev->dev, irq, atmel_qspi_interrupt,
1436 			       0, dev_name(&pdev->dev), aq);
1437 	if (err)
1438 		goto dma_release;
1439 
1440 	pm_runtime_set_autosuspend_delay(&pdev->dev, 500);
1441 	pm_runtime_use_autosuspend(&pdev->dev);
1442 	pm_runtime_set_active(&pdev->dev);
1443 	pm_runtime_enable(&pdev->dev);
1444 	pm_runtime_get_noresume(&pdev->dev);
1445 
1446 	err = atmel_qspi_init(aq);
1447 	if (err)
1448 		goto dma_release;
1449 
1450 	err = spi_register_controller(ctrl);
1451 	if (err) {
1452 		pm_runtime_put_noidle(&pdev->dev);
1453 		pm_runtime_disable(&pdev->dev);
1454 		pm_runtime_set_suspended(&pdev->dev);
1455 		pm_runtime_dont_use_autosuspend(&pdev->dev);
1456 		goto dma_release;
1457 	}
1458 	pm_runtime_mark_last_busy(&pdev->dev);
1459 	pm_runtime_put_autosuspend(&pdev->dev);
1460 
1461 	return 0;
1462 
1463 dma_release:
1464 	if (aq->caps->has_dma)
1465 		atmel_qspi_dma_release(aq);
1466 
1467 	return err;
1468 }
1469 
1470 static int atmel_qspi_sama7g5_suspend(struct atmel_qspi *aq)
1471 {
1472 	int ret;
1473 	u32 val;
1474 
1475 	ret = readl_poll_timeout(aq->regs + QSPI_SR2, val,
1476 				 !(val & QSPI_SR2_RBUSY) &&
1477 				 (val & QSPI_SR2_HIDLE), 40,
1478 				 ATMEL_QSPI_SYNC_TIMEOUT);
1479 	if (ret)
1480 		return ret;
1481 
1482 	atmel_qspi_write(QSPI_CR_QSPIDIS, aq, QSPI_CR);
1483 	ret = readl_poll_timeout(aq->regs + QSPI_SR2, val,
1484 				 !(val & QSPI_SR2_QSPIENS), 40,
1485 				 ATMEL_QSPI_SYNC_TIMEOUT);
1486 	if (ret)
1487 		return ret;
1488 
1489 	clk_disable_unprepare(aq->gclk);
1490 
1491 	atmel_qspi_write(QSPI_CR_DLLOFF, aq, QSPI_CR);
1492 	ret = readl_poll_timeout(aq->regs + QSPI_SR2, val,
1493 				 !(val & QSPI_SR2_DLOCK), 40,
1494 				 ATMEL_QSPI_TIMEOUT);
1495 	if (ret)
1496 		return ret;
1497 
1498 	ret =  readl_poll_timeout(aq->regs + QSPI_SR2, val,
1499 				  !(val & QSPI_SR2_CALBSY), 40,
1500 				  ATMEL_QSPI_TIMEOUT);
1501 	if (ret)
1502 		return ret;
1503 
1504 	return 0;
1505 }
1506 
1507 static void atmel_qspi_remove(struct platform_device *pdev)
1508 {
1509 	struct spi_controller *ctrl = platform_get_drvdata(pdev);
1510 	struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
1511 	int ret;
1512 
1513 	spi_unregister_controller(ctrl);
1514 
1515 	ret = pm_runtime_get_sync(&pdev->dev);
1516 	if (ret >= 0) {
1517 		if (aq->caps->has_dma)
1518 			atmel_qspi_dma_release(aq);
1519 
1520 		if (aq->caps->has_gclk) {
1521 			ret = atmel_qspi_sama7g5_suspend(aq);
1522 			if (ret)
1523 				dev_warn(&pdev->dev, "Failed to de-init device on remove: %d\n", ret);
1524 			return;
1525 		}
1526 
1527 		atmel_qspi_write(QSPI_CR_QSPIDIS, aq, QSPI_CR);
1528 	} else {
1529 		/*
1530 		 * atmel_qspi_runtime_{suspend,resume} just disable and enable
1531 		 * the two clks respectively. So after resume failed these are
1532 		 * off, and we skip hardware access and disabling these clks again.
1533 		 */
1534 		dev_warn(&pdev->dev, "Failed to resume device on remove\n");
1535 	}
1536 
1537 	pm_runtime_disable(&pdev->dev);
1538 	pm_runtime_dont_use_autosuspend(&pdev->dev);
1539 	pm_runtime_put_noidle(&pdev->dev);
1540 }
1541 
1542 static int __maybe_unused atmel_qspi_suspend(struct device *dev)
1543 {
1544 	struct spi_controller *ctrl = dev_get_drvdata(dev);
1545 	struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
1546 	int ret;
1547 
1548 	ret = pm_runtime_resume_and_get(dev);
1549 	if (ret < 0)
1550 		return ret;
1551 
1552 	if (aq->caps->has_gclk) {
1553 		ret = atmel_qspi_sama7g5_suspend(aq);
1554 		clk_disable_unprepare(aq->pclk);
1555 		return ret;
1556 	}
1557 
1558 	atmel_qspi_write(QSPI_CR_QSPIDIS, aq, QSPI_CR);
1559 
1560 	pm_runtime_mark_last_busy(dev);
1561 	pm_runtime_force_suspend(dev);
1562 
1563 	clk_unprepare(aq->qspick);
1564 	clk_unprepare(aq->pclk);
1565 
1566 	return 0;
1567 }
1568 
1569 static int __maybe_unused atmel_qspi_resume(struct device *dev)
1570 {
1571 	struct spi_controller *ctrl = dev_get_drvdata(dev);
1572 	struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
1573 	int ret;
1574 
1575 	ret = clk_prepare(aq->pclk);
1576 	if (ret)
1577 		return ret;
1578 
1579 	ret = clk_prepare(aq->qspick);
1580 	if (ret) {
1581 		clk_unprepare(aq->pclk);
1582 		return ret;
1583 	}
1584 
1585 	if (aq->caps->has_gclk)
1586 		return atmel_qspi_sama7g5_init(aq);
1587 
1588 	ret = pm_runtime_force_resume(dev);
1589 	if (ret < 0)
1590 		return ret;
1591 
1592 	atmel_qspi_init(aq);
1593 
1594 	atmel_qspi_write(aq->scr, aq, QSPI_SCR);
1595 
1596 	pm_runtime_mark_last_busy(dev);
1597 	pm_runtime_put_autosuspend(dev);
1598 
1599 	return 0;
1600 }
1601 
1602 static int __maybe_unused atmel_qspi_runtime_suspend(struct device *dev)
1603 {
1604 	struct spi_controller *ctrl = dev_get_drvdata(dev);
1605 	struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
1606 
1607 	clk_disable(aq->qspick);
1608 	clk_disable(aq->pclk);
1609 
1610 	return 0;
1611 }
1612 
1613 static int __maybe_unused atmel_qspi_runtime_resume(struct device *dev)
1614 {
1615 	struct spi_controller *ctrl = dev_get_drvdata(dev);
1616 	struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
1617 	int ret;
1618 
1619 	ret = clk_enable(aq->pclk);
1620 	if (ret)
1621 		return ret;
1622 
1623 	ret = clk_enable(aq->qspick);
1624 	if (ret)
1625 		clk_disable(aq->pclk);
1626 
1627 	return ret;
1628 }
1629 
1630 static const struct dev_pm_ops __maybe_unused atmel_qspi_pm_ops = {
1631 	SET_SYSTEM_SLEEP_PM_OPS(atmel_qspi_suspend, atmel_qspi_resume)
1632 	SET_RUNTIME_PM_OPS(atmel_qspi_runtime_suspend,
1633 			   atmel_qspi_runtime_resume, NULL)
1634 };
1635 
1636 static const struct atmel_qspi_caps atmel_sama5d2_qspi_caps = {};
1637 
1638 static const struct atmel_qspi_caps atmel_sam9x60_qspi_caps = {
1639 	.has_qspick = true,
1640 	.has_ricr = true,
1641 };
1642 
1643 static const struct atmel_qspi_caps atmel_sama7g5_ospi_caps = {
1644 	.max_speed_hz = SAMA7G5_QSPI0_MAX_SPEED_HZ,
1645 	.has_gclk = true,
1646 	.octal = true,
1647 	.has_dma = true,
1648 };
1649 
1650 static const struct atmel_qspi_caps atmel_sama7g5_qspi_caps = {
1651 	.max_speed_hz = SAMA7G5_QSPI1_SDR_MAX_SPEED_HZ,
1652 	.has_gclk = true,
1653 	.has_dma = true,
1654 };
1655 
1656 static const struct of_device_id atmel_qspi_dt_ids[] = {
1657 	{
1658 		.compatible = "atmel,sama5d2-qspi",
1659 		.data = &atmel_sama5d2_qspi_caps,
1660 	},
1661 	{
1662 		.compatible = "microchip,sam9x60-qspi",
1663 		.data = &atmel_sam9x60_qspi_caps,
1664 	},
1665 	{
1666 		.compatible = "microchip,sama7g5-ospi",
1667 		.data = &atmel_sama7g5_ospi_caps,
1668 	},
1669 	{
1670 		.compatible = "microchip,sama7g5-qspi",
1671 		.data = &atmel_sama7g5_qspi_caps,
1672 	},
1673 
1674 	{ /* sentinel */ }
1675 };
1676 
1677 MODULE_DEVICE_TABLE(of, atmel_qspi_dt_ids);
1678 
1679 static struct platform_driver atmel_qspi_driver = {
1680 	.driver = {
1681 		.name	= "atmel_qspi",
1682 		.of_match_table	= atmel_qspi_dt_ids,
1683 		.pm	= pm_ptr(&atmel_qspi_pm_ops),
1684 	},
1685 	.probe		= atmel_qspi_probe,
1686 	.remove		= atmel_qspi_remove,
1687 };
1688 module_platform_driver(atmel_qspi_driver);
1689 
1690 MODULE_AUTHOR("Cyrille Pitchen <cyrille.pitchen@atmel.com>");
1691 MODULE_AUTHOR("Piotr Bugalski <bugalski.piotr@gmail.com");
1692 MODULE_DESCRIPTION("Atmel QSPI Controller driver");
1693 MODULE_LICENSE("GPL v2");
1694