1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Driver for Atmel QSPI Controller
4 *
5 * Copyright (C) 2015 Atmel Corporation
6 * Copyright (C) 2018 Cryptera A/S
7 *
8 * Author: Cyrille Pitchen <cyrille.pitchen@atmel.com>
9 * Author: Piotr Bugalski <bugalski.piotr@gmail.com>
10 *
11 * This driver is based on drivers/mtd/spi-nor/fsl-quadspi.c from Freescale.
12 */
13
14 #include <linux/clk.h>
15 #include <linux/delay.h>
16 #include <linux/err.h>
17 #include <linux/interrupt.h>
18 #include <linux/io.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/of.h>
22 #include <linux/of_platform.h>
23 #include <linux/platform_device.h>
24 #include <linux/pm_runtime.h>
25 #include <linux/spi/spi-mem.h>
26
27 /* QSPI register offsets */
28 #define QSPI_CR 0x0000 /* Control Register */
29 #define QSPI_MR 0x0004 /* Mode Register */
30 #define QSPI_RD 0x0008 /* Receive Data Register */
31 #define QSPI_TD 0x000c /* Transmit Data Register */
32 #define QSPI_SR 0x0010 /* Status Register */
33 #define QSPI_IER 0x0014 /* Interrupt Enable Register */
34 #define QSPI_IDR 0x0018 /* Interrupt Disable Register */
35 #define QSPI_IMR 0x001c /* Interrupt Mask Register */
36 #define QSPI_SCR 0x0020 /* Serial Clock Register */
37
38 #define QSPI_IAR 0x0030 /* Instruction Address Register */
39 #define QSPI_ICR 0x0034 /* Instruction Code Register */
40 #define QSPI_WICR 0x0034 /* Write Instruction Code Register */
41 #define QSPI_IFR 0x0038 /* Instruction Frame Register */
42 #define QSPI_RICR 0x003C /* Read Instruction Code Register */
43
44 #define QSPI_SMR 0x0040 /* Scrambling Mode Register */
45 #define QSPI_SKR 0x0044 /* Scrambling Key Register */
46
47 #define QSPI_WPMR 0x00E4 /* Write Protection Mode Register */
48 #define QSPI_WPSR 0x00E8 /* Write Protection Status Register */
49
50 #define QSPI_VERSION 0x00FC /* Version Register */
51
52
53 /* Bitfields in QSPI_CR (Control Register) */
54 #define QSPI_CR_QSPIEN BIT(0)
55 #define QSPI_CR_QSPIDIS BIT(1)
56 #define QSPI_CR_SWRST BIT(7)
57 #define QSPI_CR_LASTXFER BIT(24)
58
59 /* Bitfields in QSPI_MR (Mode Register) */
60 #define QSPI_MR_SMM BIT(0)
61 #define QSPI_MR_LLB BIT(1)
62 #define QSPI_MR_WDRBT BIT(2)
63 #define QSPI_MR_SMRM BIT(3)
64 #define QSPI_MR_CSMODE_MASK GENMASK(5, 4)
65 #define QSPI_MR_CSMODE_NOT_RELOADED (0 << 4)
66 #define QSPI_MR_CSMODE_LASTXFER (1 << 4)
67 #define QSPI_MR_CSMODE_SYSTEMATICALLY (2 << 4)
68 #define QSPI_MR_NBBITS_MASK GENMASK(11, 8)
69 #define QSPI_MR_NBBITS(n) ((((n) - 8) << 8) & QSPI_MR_NBBITS_MASK)
70 #define QSPI_MR_DLYBCT_MASK GENMASK(23, 16)
71 #define QSPI_MR_DLYBCT(n) (((n) << 16) & QSPI_MR_DLYBCT_MASK)
72 #define QSPI_MR_DLYCS_MASK GENMASK(31, 24)
73 #define QSPI_MR_DLYCS(n) (((n) << 24) & QSPI_MR_DLYCS_MASK)
74
75 /* Bitfields in QSPI_SR/QSPI_IER/QSPI_IDR/QSPI_IMR */
76 #define QSPI_SR_RDRF BIT(0)
77 #define QSPI_SR_TDRE BIT(1)
78 #define QSPI_SR_TXEMPTY BIT(2)
79 #define QSPI_SR_OVRES BIT(3)
80 #define QSPI_SR_CSR BIT(8)
81 #define QSPI_SR_CSS BIT(9)
82 #define QSPI_SR_INSTRE BIT(10)
83 #define QSPI_SR_QSPIENS BIT(24)
84
85 #define QSPI_SR_CMD_COMPLETED (QSPI_SR_INSTRE | QSPI_SR_CSR)
86
87 /* Bitfields in QSPI_SCR (Serial Clock Register) */
88 #define QSPI_SCR_CPOL BIT(0)
89 #define QSPI_SCR_CPHA BIT(1)
90 #define QSPI_SCR_SCBR_MASK GENMASK(15, 8)
91 #define QSPI_SCR_SCBR(n) (((n) << 8) & QSPI_SCR_SCBR_MASK)
92 #define QSPI_SCR_DLYBS_MASK GENMASK(23, 16)
93 #define QSPI_SCR_DLYBS(n) (((n) << 16) & QSPI_SCR_DLYBS_MASK)
94
95 /* Bitfields in QSPI_ICR (Read/Write Instruction Code Register) */
96 #define QSPI_ICR_INST_MASK GENMASK(7, 0)
97 #define QSPI_ICR_INST(inst) (((inst) << 0) & QSPI_ICR_INST_MASK)
98 #define QSPI_ICR_OPT_MASK GENMASK(23, 16)
99 #define QSPI_ICR_OPT(opt) (((opt) << 16) & QSPI_ICR_OPT_MASK)
100
101 /* Bitfields in QSPI_IFR (Instruction Frame Register) */
102 #define QSPI_IFR_WIDTH_MASK GENMASK(2, 0)
103 #define QSPI_IFR_WIDTH_SINGLE_BIT_SPI (0 << 0)
104 #define QSPI_IFR_WIDTH_DUAL_OUTPUT (1 << 0)
105 #define QSPI_IFR_WIDTH_QUAD_OUTPUT (2 << 0)
106 #define QSPI_IFR_WIDTH_DUAL_IO (3 << 0)
107 #define QSPI_IFR_WIDTH_QUAD_IO (4 << 0)
108 #define QSPI_IFR_WIDTH_DUAL_CMD (5 << 0)
109 #define QSPI_IFR_WIDTH_QUAD_CMD (6 << 0)
110 #define QSPI_IFR_INSTEN BIT(4)
111 #define QSPI_IFR_ADDREN BIT(5)
112 #define QSPI_IFR_OPTEN BIT(6)
113 #define QSPI_IFR_DATAEN BIT(7)
114 #define QSPI_IFR_OPTL_MASK GENMASK(9, 8)
115 #define QSPI_IFR_OPTL_1BIT (0 << 8)
116 #define QSPI_IFR_OPTL_2BIT (1 << 8)
117 #define QSPI_IFR_OPTL_4BIT (2 << 8)
118 #define QSPI_IFR_OPTL_8BIT (3 << 8)
119 #define QSPI_IFR_ADDRL BIT(10)
120 #define QSPI_IFR_TFRTYP_MEM BIT(12)
121 #define QSPI_IFR_SAMA5D2_WRITE_TRSFR BIT(13)
122 #define QSPI_IFR_CRM BIT(14)
123 #define QSPI_IFR_NBDUM_MASK GENMASK(20, 16)
124 #define QSPI_IFR_NBDUM(n) (((n) << 16) & QSPI_IFR_NBDUM_MASK)
125 #define QSPI_IFR_APBTFRTYP_READ BIT(24) /* Defined in SAM9X60 */
126
127 /* Bitfields in QSPI_SMR (Scrambling Mode Register) */
128 #define QSPI_SMR_SCREN BIT(0)
129 #define QSPI_SMR_RVDIS BIT(1)
130
131 /* Bitfields in QSPI_WPMR (Write Protection Mode Register) */
132 #define QSPI_WPMR_WPEN BIT(0)
133 #define QSPI_WPMR_WPKEY_MASK GENMASK(31, 8)
134 #define QSPI_WPMR_WPKEY(wpkey) (((wpkey) << 8) & QSPI_WPMR_WPKEY_MASK)
135
136 /* Bitfields in QSPI_WPSR (Write Protection Status Register) */
137 #define QSPI_WPSR_WPVS BIT(0)
138 #define QSPI_WPSR_WPVSRC_MASK GENMASK(15, 8)
139 #define QSPI_WPSR_WPVSRC(src) (((src) << 8) & QSPI_WPSR_WPVSRC)
140
141 struct atmel_qspi_caps {
142 bool has_qspick;
143 bool has_ricr;
144 };
145
146 struct atmel_qspi {
147 void __iomem *regs;
148 void __iomem *mem;
149 struct clk *pclk;
150 struct clk *qspick;
151 struct platform_device *pdev;
152 const struct atmel_qspi_caps *caps;
153 resource_size_t mmap_size;
154 u32 pending;
155 u32 mr;
156 u32 scr;
157 struct completion cmd_completion;
158 };
159
160 struct atmel_qspi_mode {
161 u8 cmd_buswidth;
162 u8 addr_buswidth;
163 u8 data_buswidth;
164 u32 config;
165 };
166
167 static const struct atmel_qspi_mode atmel_qspi_modes[] = {
168 { 1, 1, 1, QSPI_IFR_WIDTH_SINGLE_BIT_SPI },
169 { 1, 1, 2, QSPI_IFR_WIDTH_DUAL_OUTPUT },
170 { 1, 1, 4, QSPI_IFR_WIDTH_QUAD_OUTPUT },
171 { 1, 2, 2, QSPI_IFR_WIDTH_DUAL_IO },
172 { 1, 4, 4, QSPI_IFR_WIDTH_QUAD_IO },
173 { 2, 2, 2, QSPI_IFR_WIDTH_DUAL_CMD },
174 { 4, 4, 4, QSPI_IFR_WIDTH_QUAD_CMD },
175 };
176
177 #ifdef VERBOSE_DEBUG
atmel_qspi_reg_name(u32 offset,char * tmp,size_t sz)178 static const char *atmel_qspi_reg_name(u32 offset, char *tmp, size_t sz)
179 {
180 switch (offset) {
181 case QSPI_CR:
182 return "CR";
183 case QSPI_MR:
184 return "MR";
185 case QSPI_RD:
186 return "RD";
187 case QSPI_TD:
188 return "TD";
189 case QSPI_SR:
190 return "SR";
191 case QSPI_IER:
192 return "IER";
193 case QSPI_IDR:
194 return "IDR";
195 case QSPI_IMR:
196 return "IMR";
197 case QSPI_SCR:
198 return "SCR";
199 case QSPI_IAR:
200 return "IAR";
201 case QSPI_ICR:
202 return "ICR/WICR";
203 case QSPI_IFR:
204 return "IFR";
205 case QSPI_RICR:
206 return "RICR";
207 case QSPI_SMR:
208 return "SMR";
209 case QSPI_SKR:
210 return "SKR";
211 case QSPI_WPMR:
212 return "WPMR";
213 case QSPI_WPSR:
214 return "WPSR";
215 case QSPI_VERSION:
216 return "VERSION";
217 default:
218 snprintf(tmp, sz, "0x%02x", offset);
219 break;
220 }
221
222 return tmp;
223 }
224 #endif /* VERBOSE_DEBUG */
225
atmel_qspi_read(struct atmel_qspi * aq,u32 offset)226 static u32 atmel_qspi_read(struct atmel_qspi *aq, u32 offset)
227 {
228 u32 value = readl_relaxed(aq->regs + offset);
229
230 #ifdef VERBOSE_DEBUG
231 char tmp[8];
232
233 dev_vdbg(&aq->pdev->dev, "read 0x%08x from %s\n", value,
234 atmel_qspi_reg_name(offset, tmp, sizeof(tmp)));
235 #endif /* VERBOSE_DEBUG */
236
237 return value;
238 }
239
atmel_qspi_write(u32 value,struct atmel_qspi * aq,u32 offset)240 static void atmel_qspi_write(u32 value, struct atmel_qspi *aq, u32 offset)
241 {
242 #ifdef VERBOSE_DEBUG
243 char tmp[8];
244
245 dev_vdbg(&aq->pdev->dev, "write 0x%08x into %s\n", value,
246 atmel_qspi_reg_name(offset, tmp, sizeof(tmp)));
247 #endif /* VERBOSE_DEBUG */
248
249 writel_relaxed(value, aq->regs + offset);
250 }
251
atmel_qspi_is_compatible(const struct spi_mem_op * op,const struct atmel_qspi_mode * mode)252 static inline bool atmel_qspi_is_compatible(const struct spi_mem_op *op,
253 const struct atmel_qspi_mode *mode)
254 {
255 if (op->cmd.buswidth != mode->cmd_buswidth)
256 return false;
257
258 if (op->addr.nbytes && op->addr.buswidth != mode->addr_buswidth)
259 return false;
260
261 if (op->data.nbytes && op->data.buswidth != mode->data_buswidth)
262 return false;
263
264 return true;
265 }
266
atmel_qspi_find_mode(const struct spi_mem_op * op)267 static int atmel_qspi_find_mode(const struct spi_mem_op *op)
268 {
269 u32 i;
270
271 for (i = 0; i < ARRAY_SIZE(atmel_qspi_modes); i++)
272 if (atmel_qspi_is_compatible(op, &atmel_qspi_modes[i]))
273 return i;
274
275 return -EOPNOTSUPP;
276 }
277
atmel_qspi_supports_op(struct spi_mem * mem,const struct spi_mem_op * op)278 static bool atmel_qspi_supports_op(struct spi_mem *mem,
279 const struct spi_mem_op *op)
280 {
281 if (!spi_mem_default_supports_op(mem, op))
282 return false;
283
284 if (atmel_qspi_find_mode(op) < 0)
285 return false;
286
287 /* special case not supported by hardware */
288 if (op->addr.nbytes == 2 && op->cmd.buswidth != op->addr.buswidth &&
289 op->dummy.nbytes == 0)
290 return false;
291
292 return true;
293 }
294
atmel_qspi_set_cfg(struct atmel_qspi * aq,const struct spi_mem_op * op,u32 * offset)295 static int atmel_qspi_set_cfg(struct atmel_qspi *aq,
296 const struct spi_mem_op *op, u32 *offset)
297 {
298 u32 iar, icr, ifr;
299 u32 dummy_cycles = 0;
300 int mode;
301
302 iar = 0;
303 icr = QSPI_ICR_INST(op->cmd.opcode);
304 ifr = QSPI_IFR_INSTEN;
305
306 mode = atmel_qspi_find_mode(op);
307 if (mode < 0)
308 return mode;
309 ifr |= atmel_qspi_modes[mode].config;
310
311 if (op->dummy.nbytes)
312 dummy_cycles = op->dummy.nbytes * 8 / op->dummy.buswidth;
313
314 /*
315 * The controller allows 24 and 32-bit addressing while NAND-flash
316 * requires 16-bit long. Handling 8-bit long addresses is done using
317 * the option field. For the 16-bit addresses, the workaround depends
318 * of the number of requested dummy bits. If there are 8 or more dummy
319 * cycles, the address is shifted and sent with the first dummy byte.
320 * Otherwise opcode is disabled and the first byte of the address
321 * contains the command opcode (works only if the opcode and address
322 * use the same buswidth). The limitation is when the 16-bit address is
323 * used without enough dummy cycles and the opcode is using a different
324 * buswidth than the address.
325 */
326 if (op->addr.buswidth) {
327 switch (op->addr.nbytes) {
328 case 0:
329 break;
330 case 1:
331 ifr |= QSPI_IFR_OPTEN | QSPI_IFR_OPTL_8BIT;
332 icr |= QSPI_ICR_OPT(op->addr.val & 0xff);
333 break;
334 case 2:
335 if (dummy_cycles < 8 / op->addr.buswidth) {
336 ifr &= ~QSPI_IFR_INSTEN;
337 ifr |= QSPI_IFR_ADDREN;
338 iar = (op->cmd.opcode << 16) |
339 (op->addr.val & 0xffff);
340 } else {
341 ifr |= QSPI_IFR_ADDREN;
342 iar = (op->addr.val << 8) & 0xffffff;
343 dummy_cycles -= 8 / op->addr.buswidth;
344 }
345 break;
346 case 3:
347 ifr |= QSPI_IFR_ADDREN;
348 iar = op->addr.val & 0xffffff;
349 break;
350 case 4:
351 ifr |= QSPI_IFR_ADDREN | QSPI_IFR_ADDRL;
352 iar = op->addr.val & 0x7ffffff;
353 break;
354 default:
355 return -ENOTSUPP;
356 }
357 }
358
359 /* offset of the data access in the QSPI memory space */
360 *offset = iar;
361
362 /* Set number of dummy cycles */
363 if (dummy_cycles)
364 ifr |= QSPI_IFR_NBDUM(dummy_cycles);
365
366 /* Set data enable and data transfer type. */
367 if (op->data.nbytes) {
368 ifr |= QSPI_IFR_DATAEN;
369
370 if (op->addr.nbytes)
371 ifr |= QSPI_IFR_TFRTYP_MEM;
372 }
373
374 /*
375 * If the QSPI controller is set in regular SPI mode, set it in
376 * Serial Memory Mode (SMM).
377 */
378 if (!(aq->mr & QSPI_MR_SMM)) {
379 aq->mr |= QSPI_MR_SMM;
380 atmel_qspi_write(aq->mr, aq, QSPI_MR);
381 }
382
383 /* Clear pending interrupts */
384 (void)atmel_qspi_read(aq, QSPI_SR);
385
386 /* Set QSPI Instruction Frame registers. */
387 if (op->addr.nbytes && !op->data.nbytes)
388 atmel_qspi_write(iar, aq, QSPI_IAR);
389
390 if (aq->caps->has_ricr) {
391 if (op->data.dir == SPI_MEM_DATA_IN)
392 atmel_qspi_write(icr, aq, QSPI_RICR);
393 else
394 atmel_qspi_write(icr, aq, QSPI_WICR);
395 } else {
396 if (op->data.nbytes && op->data.dir == SPI_MEM_DATA_OUT)
397 ifr |= QSPI_IFR_SAMA5D2_WRITE_TRSFR;
398
399 atmel_qspi_write(icr, aq, QSPI_ICR);
400 }
401
402 atmel_qspi_write(ifr, aq, QSPI_IFR);
403
404 return 0;
405 }
406
atmel_qspi_exec_op(struct spi_mem * mem,const struct spi_mem_op * op)407 static int atmel_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
408 {
409 struct atmel_qspi *aq = spi_controller_get_devdata(mem->spi->controller);
410 u32 sr, offset;
411 int err;
412
413 /*
414 * Check if the address exceeds the MMIO window size. An improvement
415 * would be to add support for regular SPI mode and fall back to it
416 * when the flash memories overrun the controller's memory space.
417 */
418 if (op->addr.val + op->data.nbytes > aq->mmap_size)
419 return -ENOTSUPP;
420
421 err = pm_runtime_resume_and_get(&aq->pdev->dev);
422 if (err < 0)
423 return err;
424
425 err = atmel_qspi_set_cfg(aq, op, &offset);
426 if (err)
427 goto pm_runtime_put;
428
429 /* Skip to the final steps if there is no data */
430 if (op->data.nbytes) {
431 /* Dummy read of QSPI_IFR to synchronize APB and AHB accesses */
432 (void)atmel_qspi_read(aq, QSPI_IFR);
433
434 /* Send/Receive data */
435 if (op->data.dir == SPI_MEM_DATA_IN)
436 memcpy_fromio(op->data.buf.in, aq->mem + offset,
437 op->data.nbytes);
438 else
439 memcpy_toio(aq->mem + offset, op->data.buf.out,
440 op->data.nbytes);
441
442 /* Release the chip-select */
443 atmel_qspi_write(QSPI_CR_LASTXFER, aq, QSPI_CR);
444 }
445
446 /* Poll INSTRuction End status */
447 sr = atmel_qspi_read(aq, QSPI_SR);
448 if ((sr & QSPI_SR_CMD_COMPLETED) == QSPI_SR_CMD_COMPLETED)
449 goto pm_runtime_put;
450
451 /* Wait for INSTRuction End interrupt */
452 reinit_completion(&aq->cmd_completion);
453 aq->pending = sr & QSPI_SR_CMD_COMPLETED;
454 atmel_qspi_write(QSPI_SR_CMD_COMPLETED, aq, QSPI_IER);
455 if (!wait_for_completion_timeout(&aq->cmd_completion,
456 msecs_to_jiffies(1000)))
457 err = -ETIMEDOUT;
458 atmel_qspi_write(QSPI_SR_CMD_COMPLETED, aq, QSPI_IDR);
459
460 pm_runtime_put:
461 pm_runtime_mark_last_busy(&aq->pdev->dev);
462 pm_runtime_put_autosuspend(&aq->pdev->dev);
463 return err;
464 }
465
atmel_qspi_get_name(struct spi_mem * spimem)466 static const char *atmel_qspi_get_name(struct spi_mem *spimem)
467 {
468 return dev_name(spimem->spi->dev.parent);
469 }
470
471 static const struct spi_controller_mem_ops atmel_qspi_mem_ops = {
472 .supports_op = atmel_qspi_supports_op,
473 .exec_op = atmel_qspi_exec_op,
474 .get_name = atmel_qspi_get_name
475 };
476
atmel_qspi_setup(struct spi_device * spi)477 static int atmel_qspi_setup(struct spi_device *spi)
478 {
479 struct spi_controller *ctrl = spi->controller;
480 struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
481 unsigned long src_rate;
482 u32 scbr;
483 int ret;
484
485 if (ctrl->busy)
486 return -EBUSY;
487
488 if (!spi->max_speed_hz)
489 return -EINVAL;
490
491 src_rate = clk_get_rate(aq->pclk);
492 if (!src_rate)
493 return -EINVAL;
494
495 /* Compute the QSPI baudrate */
496 scbr = DIV_ROUND_UP(src_rate, spi->max_speed_hz);
497 if (scbr > 0)
498 scbr--;
499
500 ret = pm_runtime_resume_and_get(ctrl->dev.parent);
501 if (ret < 0)
502 return ret;
503
504 aq->scr &= ~QSPI_SCR_SCBR_MASK;
505 aq->scr |= QSPI_SCR_SCBR(scbr);
506 atmel_qspi_write(aq->scr, aq, QSPI_SCR);
507
508 pm_runtime_mark_last_busy(ctrl->dev.parent);
509 pm_runtime_put_autosuspend(ctrl->dev.parent);
510
511 return 0;
512 }
513
atmel_qspi_set_cs_timing(struct spi_device * spi)514 static int atmel_qspi_set_cs_timing(struct spi_device *spi)
515 {
516 struct spi_controller *ctrl = spi->controller;
517 struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
518 unsigned long clk_rate;
519 u32 cs_inactive;
520 u32 cs_setup;
521 u32 cs_hold;
522 int delay;
523 int ret;
524
525 clk_rate = clk_get_rate(aq->pclk);
526 if (!clk_rate)
527 return -EINVAL;
528
529 /* hold */
530 delay = spi_delay_to_ns(&spi->cs_hold, NULL);
531 if (aq->mr & QSPI_MR_SMM) {
532 if (delay > 0)
533 dev_warn(&aq->pdev->dev,
534 "Ignoring cs_hold, must be 0 in Serial Memory Mode.\n");
535 cs_hold = 0;
536 } else {
537 delay = spi_delay_to_ns(&spi->cs_hold, NULL);
538 if (delay < 0)
539 return delay;
540
541 cs_hold = DIV_ROUND_UP((delay * DIV_ROUND_UP(clk_rate, 1000000)), 32000);
542 }
543
544 /* setup */
545 delay = spi_delay_to_ns(&spi->cs_setup, NULL);
546 if (delay < 0)
547 return delay;
548
549 cs_setup = DIV_ROUND_UP((delay * DIV_ROUND_UP(clk_rate, 1000000)),
550 1000);
551
552 /* inactive */
553 delay = spi_delay_to_ns(&spi->cs_inactive, NULL);
554 if (delay < 0)
555 return delay;
556 cs_inactive = DIV_ROUND_UP((delay * DIV_ROUND_UP(clk_rate, 1000000)), 1000);
557
558 ret = pm_runtime_resume_and_get(ctrl->dev.parent);
559 if (ret < 0)
560 return ret;
561
562 aq->scr &= ~QSPI_SCR_DLYBS_MASK;
563 aq->scr |= QSPI_SCR_DLYBS(cs_setup);
564 atmel_qspi_write(aq->scr, aq, QSPI_SCR);
565
566 aq->mr &= ~(QSPI_MR_DLYBCT_MASK | QSPI_MR_DLYCS_MASK);
567 aq->mr |= QSPI_MR_DLYBCT(cs_hold) | QSPI_MR_DLYCS(cs_inactive);
568 atmel_qspi_write(aq->mr, aq, QSPI_MR);
569
570 pm_runtime_mark_last_busy(ctrl->dev.parent);
571 pm_runtime_put_autosuspend(ctrl->dev.parent);
572
573 return 0;
574 }
575
atmel_qspi_init(struct atmel_qspi * aq)576 static void atmel_qspi_init(struct atmel_qspi *aq)
577 {
578 /* Reset the QSPI controller */
579 atmel_qspi_write(QSPI_CR_SWRST, aq, QSPI_CR);
580
581 /* Set the QSPI controller by default in Serial Memory Mode */
582 aq->mr |= QSPI_MR_SMM;
583 atmel_qspi_write(aq->mr, aq, QSPI_MR);
584
585 /* Enable the QSPI controller */
586 atmel_qspi_write(QSPI_CR_QSPIEN, aq, QSPI_CR);
587 }
588
atmel_qspi_interrupt(int irq,void * dev_id)589 static irqreturn_t atmel_qspi_interrupt(int irq, void *dev_id)
590 {
591 struct atmel_qspi *aq = dev_id;
592 u32 status, mask, pending;
593
594 status = atmel_qspi_read(aq, QSPI_SR);
595 mask = atmel_qspi_read(aq, QSPI_IMR);
596 pending = status & mask;
597
598 if (!pending)
599 return IRQ_NONE;
600
601 aq->pending |= pending;
602 if ((aq->pending & QSPI_SR_CMD_COMPLETED) == QSPI_SR_CMD_COMPLETED)
603 complete(&aq->cmd_completion);
604
605 return IRQ_HANDLED;
606 }
607
atmel_qspi_probe(struct platform_device * pdev)608 static int atmel_qspi_probe(struct platform_device *pdev)
609 {
610 struct spi_controller *ctrl;
611 struct atmel_qspi *aq;
612 struct resource *res;
613 int irq, err = 0;
614
615 ctrl = devm_spi_alloc_host(&pdev->dev, sizeof(*aq));
616 if (!ctrl)
617 return -ENOMEM;
618
619 ctrl->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD | SPI_TX_DUAL | SPI_TX_QUAD;
620 ctrl->setup = atmel_qspi_setup;
621 ctrl->set_cs_timing = atmel_qspi_set_cs_timing;
622 ctrl->bus_num = -1;
623 ctrl->mem_ops = &atmel_qspi_mem_ops;
624 ctrl->num_chipselect = 1;
625 ctrl->dev.of_node = pdev->dev.of_node;
626 platform_set_drvdata(pdev, ctrl);
627
628 aq = spi_controller_get_devdata(ctrl);
629
630 init_completion(&aq->cmd_completion);
631 aq->pdev = pdev;
632
633 /* Map the registers */
634 aq->regs = devm_platform_ioremap_resource_byname(pdev, "qspi_base");
635 if (IS_ERR(aq->regs))
636 return dev_err_probe(&pdev->dev, PTR_ERR(aq->regs),
637 "missing registers\n");
638
639 /* Map the AHB memory */
640 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi_mmap");
641 aq->mem = devm_ioremap_resource(&pdev->dev, res);
642 if (IS_ERR(aq->mem))
643 return dev_err_probe(&pdev->dev, PTR_ERR(aq->mem),
644 "missing AHB memory\n");
645
646 aq->mmap_size = resource_size(res);
647
648 /* Get the peripheral clock */
649 aq->pclk = devm_clk_get(&pdev->dev, "pclk");
650 if (IS_ERR(aq->pclk))
651 aq->pclk = devm_clk_get(&pdev->dev, NULL);
652
653 if (IS_ERR(aq->pclk))
654 return dev_err_probe(&pdev->dev, PTR_ERR(aq->pclk),
655 "missing peripheral clock\n");
656
657 /* Enable the peripheral clock */
658 err = clk_prepare_enable(aq->pclk);
659 if (err)
660 return dev_err_probe(&pdev->dev, err,
661 "failed to enable the peripheral clock\n");
662
663 aq->caps = of_device_get_match_data(&pdev->dev);
664 if (!aq->caps) {
665 dev_err(&pdev->dev, "Could not retrieve QSPI caps\n");
666 err = -EINVAL;
667 goto disable_pclk;
668 }
669
670 if (aq->caps->has_qspick) {
671 /* Get the QSPI system clock */
672 aq->qspick = devm_clk_get(&pdev->dev, "qspick");
673 if (IS_ERR(aq->qspick)) {
674 dev_err(&pdev->dev, "missing system clock\n");
675 err = PTR_ERR(aq->qspick);
676 goto disable_pclk;
677 }
678
679 /* Enable the QSPI system clock */
680 err = clk_prepare_enable(aq->qspick);
681 if (err) {
682 dev_err(&pdev->dev,
683 "failed to enable the QSPI system clock\n");
684 goto disable_pclk;
685 }
686 }
687
688 /* Request the IRQ */
689 irq = platform_get_irq(pdev, 0);
690 if (irq < 0) {
691 err = irq;
692 goto disable_qspick;
693 }
694 err = devm_request_irq(&pdev->dev, irq, atmel_qspi_interrupt,
695 0, dev_name(&pdev->dev), aq);
696 if (err)
697 goto disable_qspick;
698
699 pm_runtime_set_autosuspend_delay(&pdev->dev, 500);
700 pm_runtime_use_autosuspend(&pdev->dev);
701 pm_runtime_set_active(&pdev->dev);
702 pm_runtime_enable(&pdev->dev);
703 pm_runtime_get_noresume(&pdev->dev);
704
705 atmel_qspi_init(aq);
706
707 err = spi_register_controller(ctrl);
708 if (err) {
709 pm_runtime_put_noidle(&pdev->dev);
710 pm_runtime_disable(&pdev->dev);
711 pm_runtime_set_suspended(&pdev->dev);
712 pm_runtime_dont_use_autosuspend(&pdev->dev);
713 goto disable_qspick;
714 }
715 pm_runtime_mark_last_busy(&pdev->dev);
716 pm_runtime_put_autosuspend(&pdev->dev);
717
718 return 0;
719
720 disable_qspick:
721 clk_disable_unprepare(aq->qspick);
722 disable_pclk:
723 clk_disable_unprepare(aq->pclk);
724
725 return err;
726 }
727
atmel_qspi_remove(struct platform_device * pdev)728 static void atmel_qspi_remove(struct platform_device *pdev)
729 {
730 struct spi_controller *ctrl = platform_get_drvdata(pdev);
731 struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
732 int ret;
733
734 spi_unregister_controller(ctrl);
735
736 ret = pm_runtime_get_sync(&pdev->dev);
737 if (ret >= 0) {
738 atmel_qspi_write(QSPI_CR_QSPIDIS, aq, QSPI_CR);
739 clk_disable(aq->qspick);
740 clk_disable(aq->pclk);
741 } else {
742 /*
743 * atmel_qspi_runtime_{suspend,resume} just disable and enable
744 * the two clks respectively. So after resume failed these are
745 * off, and we skip hardware access and disabling these clks again.
746 */
747 dev_warn(&pdev->dev, "Failed to resume device on remove\n");
748 }
749
750 clk_unprepare(aq->qspick);
751 clk_unprepare(aq->pclk);
752
753 pm_runtime_disable(&pdev->dev);
754 pm_runtime_dont_use_autosuspend(&pdev->dev);
755 pm_runtime_put_noidle(&pdev->dev);
756 }
757
atmel_qspi_suspend(struct device * dev)758 static int __maybe_unused atmel_qspi_suspend(struct device *dev)
759 {
760 struct spi_controller *ctrl = dev_get_drvdata(dev);
761 struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
762 int ret;
763
764 ret = pm_runtime_resume_and_get(dev);
765 if (ret < 0)
766 return ret;
767
768 atmel_qspi_write(QSPI_CR_QSPIDIS, aq, QSPI_CR);
769
770 pm_runtime_mark_last_busy(dev);
771 pm_runtime_force_suspend(dev);
772
773 clk_unprepare(aq->qspick);
774 clk_unprepare(aq->pclk);
775
776 return 0;
777 }
778
atmel_qspi_resume(struct device * dev)779 static int __maybe_unused atmel_qspi_resume(struct device *dev)
780 {
781 struct spi_controller *ctrl = dev_get_drvdata(dev);
782 struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
783 int ret;
784
785 ret = clk_prepare(aq->pclk);
786 if (ret)
787 return ret;
788
789 ret = clk_prepare(aq->qspick);
790 if (ret) {
791 clk_unprepare(aq->pclk);
792 return ret;
793 }
794
795 ret = pm_runtime_force_resume(dev);
796 if (ret < 0)
797 return ret;
798
799 atmel_qspi_init(aq);
800
801 atmel_qspi_write(aq->scr, aq, QSPI_SCR);
802
803 pm_runtime_mark_last_busy(dev);
804 pm_runtime_put_autosuspend(dev);
805
806 return 0;
807 }
808
atmel_qspi_runtime_suspend(struct device * dev)809 static int __maybe_unused atmel_qspi_runtime_suspend(struct device *dev)
810 {
811 struct spi_controller *ctrl = dev_get_drvdata(dev);
812 struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
813
814 clk_disable(aq->qspick);
815 clk_disable(aq->pclk);
816
817 return 0;
818 }
819
atmel_qspi_runtime_resume(struct device * dev)820 static int __maybe_unused atmel_qspi_runtime_resume(struct device *dev)
821 {
822 struct spi_controller *ctrl = dev_get_drvdata(dev);
823 struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
824 int ret;
825
826 ret = clk_enable(aq->pclk);
827 if (ret)
828 return ret;
829
830 ret = clk_enable(aq->qspick);
831 if (ret)
832 clk_disable(aq->pclk);
833
834 return ret;
835 }
836
837 static const struct dev_pm_ops __maybe_unused atmel_qspi_pm_ops = {
838 SET_SYSTEM_SLEEP_PM_OPS(atmel_qspi_suspend, atmel_qspi_resume)
839 SET_RUNTIME_PM_OPS(atmel_qspi_runtime_suspend,
840 atmel_qspi_runtime_resume, NULL)
841 };
842
843 static const struct atmel_qspi_caps atmel_sama5d2_qspi_caps = {};
844
845 static const struct atmel_qspi_caps atmel_sam9x60_qspi_caps = {
846 .has_qspick = true,
847 .has_ricr = true,
848 };
849
850 static const struct of_device_id atmel_qspi_dt_ids[] = {
851 {
852 .compatible = "atmel,sama5d2-qspi",
853 .data = &atmel_sama5d2_qspi_caps,
854 },
855 {
856 .compatible = "microchip,sam9x60-qspi",
857 .data = &atmel_sam9x60_qspi_caps,
858 },
859 { /* sentinel */ }
860 };
861
862 MODULE_DEVICE_TABLE(of, atmel_qspi_dt_ids);
863
864 static struct platform_driver atmel_qspi_driver = {
865 .driver = {
866 .name = "atmel_qspi",
867 .of_match_table = atmel_qspi_dt_ids,
868 .pm = pm_ptr(&atmel_qspi_pm_ops),
869 },
870 .probe = atmel_qspi_probe,
871 .remove = atmel_qspi_remove,
872 };
873 module_platform_driver(atmel_qspi_driver);
874
875 MODULE_AUTHOR("Cyrille Pitchen <cyrille.pitchen@atmel.com>");
876 MODULE_AUTHOR("Piotr Bugalski <bugalski.piotr@gmail.com");
877 MODULE_DESCRIPTION("Atmel QSPI Controller driver");
878 MODULE_LICENSE("GPL v2");
879