1 // SPDX-License-Identifier: (GPL-2.0)
2 /*
3 * Microchip coreQSPI QSPI controller driver
4 *
5 * Copyright (C) 2018-2022 Microchip Technology Inc. and its subsidiaries
6 *
7 * Author: Naga Sureshkumar Relli <nagasuresh.relli@microchip.com>
8 *
9 */
10
11 #include <linux/clk.h>
12 #include <linux/err.h>
13 #include <linux/init.h>
14 #include <linux/interrupt.h>
15 #include <linux/io.h>
16 #include <linux/iopoll.h>
17 #include <linux/module.h>
18 #include <linux/of.h>
19 #include <linux/of_irq.h>
20 #include <linux/platform_device.h>
21 #include <linux/spi/spi.h>
22 #include <linux/spi/spi-mem.h>
23
24 /*
25 * QSPI Control register mask defines
26 */
27 #define CONTROL_ENABLE BIT(0)
28 #define CONTROL_MASTER BIT(1)
29 #define CONTROL_XIP BIT(2)
30 #define CONTROL_XIPADDR BIT(3)
31 #define CONTROL_CLKIDLE BIT(10)
32 #define CONTROL_SAMPLE_MASK GENMASK(12, 11)
33 #define CONTROL_MODE0 BIT(13)
34 #define CONTROL_MODE12_MASK GENMASK(15, 14)
35 #define CONTROL_MODE12_EX_RO BIT(14)
36 #define CONTROL_MODE12_EX_RW BIT(15)
37 #define CONTROL_MODE12_FULL GENMASK(15, 14)
38 #define CONTROL_FLAGSX4 BIT(16)
39 #define CONTROL_CLKRATE_MASK GENMASK(27, 24)
40 #define CONTROL_CLKRATE_SHIFT 24
41
42 /*
43 * QSPI Frames register mask defines
44 */
45 #define FRAMES_TOTALBYTES_MASK GENMASK(15, 0)
46 #define FRAMES_CMDBYTES_MASK GENMASK(24, 16)
47 #define FRAMES_CMDBYTES_SHIFT 16
48 #define FRAMES_SHIFT 25
49 #define FRAMES_IDLE_MASK GENMASK(29, 26)
50 #define FRAMES_IDLE_SHIFT 26
51 #define FRAMES_FLAGBYTE BIT(30)
52 #define FRAMES_FLAGWORD BIT(31)
53
54 /*
55 * QSPI Interrupt Enable register mask defines
56 */
57 #define IEN_TXDONE BIT(0)
58 #define IEN_RXDONE BIT(1)
59 #define IEN_RXAVAILABLE BIT(2)
60 #define IEN_TXAVAILABLE BIT(3)
61 #define IEN_RXFIFOEMPTY BIT(4)
62 #define IEN_TXFIFOFULL BIT(5)
63
64 /*
65 * QSPI Status register mask defines
66 */
67 #define STATUS_TXDONE BIT(0)
68 #define STATUS_RXDONE BIT(1)
69 #define STATUS_RXAVAILABLE BIT(2)
70 #define STATUS_TXAVAILABLE BIT(3)
71 #define STATUS_RXFIFOEMPTY BIT(4)
72 #define STATUS_TXFIFOFULL BIT(5)
73 #define STATUS_READY BIT(7)
74 #define STATUS_FLAGSX4 BIT(8)
75 #define STATUS_MASK GENMASK(8, 0)
76
77 #define BYTESUPPER_MASK GENMASK(31, 16)
78 #define BYTESLOWER_MASK GENMASK(15, 0)
79
80 #define MAX_DIVIDER 16
81 #define MIN_DIVIDER 0
82 #define MAX_DATA_CMD_LEN 256
83
84 /* QSPI ready time out value */
85 #define TIMEOUT_MS 500
86
87 /*
88 * QSPI Register offsets.
89 */
90 #define REG_CONTROL (0x00)
91 #define REG_FRAMES (0x04)
92 #define REG_IEN (0x0c)
93 #define REG_STATUS (0x10)
94 #define REG_DIRECT_ACCESS (0x14)
95 #define REG_UPPER_ACCESS (0x18)
96 #define REG_RX_DATA (0x40)
97 #define REG_TX_DATA (0x44)
98 #define REG_X4_RX_DATA (0x48)
99 #define REG_X4_TX_DATA (0x4c)
100 #define REG_FRAMESUP (0x50)
101
102 /**
103 * struct mchp_coreqspi - Defines qspi driver instance
104 * @regs: Virtual address of the QSPI controller registers
105 * @clk: QSPI Operating clock
106 * @data_completion: completion structure
107 * @op_lock: lock access to the device
108 * @txbuf: TX buffer
109 * @rxbuf: RX buffer
110 * @irq: IRQ number
111 * @tx_len: Number of bytes left to transfer
112 * @rx_len: Number of bytes left to receive
113 */
114 struct mchp_coreqspi {
115 void __iomem *regs;
116 struct clk *clk;
117 struct completion data_completion;
118 struct mutex op_lock; /* lock access to the device */
119 u8 *txbuf;
120 u8 *rxbuf;
121 int irq;
122 int tx_len;
123 int rx_len;
124 };
125
mchp_coreqspi_set_mode(struct mchp_coreqspi * qspi,const struct spi_mem_op * op)126 static int mchp_coreqspi_set_mode(struct mchp_coreqspi *qspi, const struct spi_mem_op *op)
127 {
128 u32 control = readl_relaxed(qspi->regs + REG_CONTROL);
129
130 /*
131 * The operating mode can be configured based on the command that needs to be send.
132 * bits[15:14]: Sets whether multiple bit SPI operates in normal, extended or full modes.
133 * 00: Normal (single DQ0 TX and single DQ1 RX lines)
134 * 01: Extended RO (command and address bytes on DQ0 only)
135 * 10: Extended RW (command byte on DQ0 only)
136 * 11: Full. (command and address are on all DQ lines)
137 * bit[13]: Sets whether multiple bit SPI uses 2 or 4 bits of data
138 * 0: 2-bits (BSPI)
139 * 1: 4-bits (QSPI)
140 */
141 if (op->data.buswidth == 4 || op->data.buswidth == 2) {
142 control &= ~CONTROL_MODE12_MASK;
143 if (op->cmd.buswidth == 1 && (op->addr.buswidth == 1 || op->addr.buswidth == 0))
144 control |= CONTROL_MODE12_EX_RO;
145 else if (op->cmd.buswidth == 1)
146 control |= CONTROL_MODE12_EX_RW;
147 else
148 control |= CONTROL_MODE12_FULL;
149
150 control |= CONTROL_MODE0;
151 } else {
152 control &= ~(CONTROL_MODE12_MASK |
153 CONTROL_MODE0);
154 }
155
156 writel_relaxed(control, qspi->regs + REG_CONTROL);
157
158 return 0;
159 }
160
mchp_coreqspi_read_op(struct mchp_coreqspi * qspi)161 static inline void mchp_coreqspi_read_op(struct mchp_coreqspi *qspi)
162 {
163 u32 control, data;
164
165 if (!qspi->rx_len)
166 return;
167
168 control = readl_relaxed(qspi->regs + REG_CONTROL);
169
170 /*
171 * Read 4-bytes from the SPI FIFO in single transaction and then read
172 * the reamaining data byte wise.
173 */
174 control |= CONTROL_FLAGSX4;
175 writel_relaxed(control, qspi->regs + REG_CONTROL);
176
177 while (qspi->rx_len >= 4) {
178 while (readl_relaxed(qspi->regs + REG_STATUS) & STATUS_RXFIFOEMPTY)
179 ;
180 data = readl_relaxed(qspi->regs + REG_X4_RX_DATA);
181 *(u32 *)qspi->rxbuf = data;
182 qspi->rxbuf += 4;
183 qspi->rx_len -= 4;
184 }
185
186 control &= ~CONTROL_FLAGSX4;
187 writel_relaxed(control, qspi->regs + REG_CONTROL);
188
189 while (qspi->rx_len--) {
190 while (readl_relaxed(qspi->regs + REG_STATUS) & STATUS_RXFIFOEMPTY)
191 ;
192 data = readl_relaxed(qspi->regs + REG_RX_DATA);
193 *qspi->rxbuf++ = (data & 0xFF);
194 }
195 }
196
mchp_coreqspi_write_op(struct mchp_coreqspi * qspi)197 static inline void mchp_coreqspi_write_op(struct mchp_coreqspi *qspi)
198 {
199 u32 control, data;
200
201 control = readl_relaxed(qspi->regs + REG_CONTROL);
202 control |= CONTROL_FLAGSX4;
203 writel_relaxed(control, qspi->regs + REG_CONTROL);
204
205 while (qspi->tx_len >= 4) {
206 while (readl_relaxed(qspi->regs + REG_STATUS) & STATUS_TXFIFOFULL)
207 ;
208 data = *(u32 *)qspi->txbuf;
209 qspi->txbuf += 4;
210 qspi->tx_len -= 4;
211 writel_relaxed(data, qspi->regs + REG_X4_TX_DATA);
212 }
213
214 control &= ~CONTROL_FLAGSX4;
215 writel_relaxed(control, qspi->regs + REG_CONTROL);
216
217 while (qspi->tx_len--) {
218 while (readl_relaxed(qspi->regs + REG_STATUS) & STATUS_TXFIFOFULL)
219 ;
220 data = *qspi->txbuf++;
221 writel_relaxed(data, qspi->regs + REG_TX_DATA);
222 }
223 }
224
mchp_coreqspi_write_read_op(struct mchp_coreqspi * qspi)225 static inline void mchp_coreqspi_write_read_op(struct mchp_coreqspi *qspi)
226 {
227 u32 control, data;
228
229 qspi->rx_len = qspi->tx_len;
230
231 control = readl_relaxed(qspi->regs + REG_CONTROL);
232 control |= CONTROL_FLAGSX4;
233 writel_relaxed(control, qspi->regs + REG_CONTROL);
234
235 while (qspi->tx_len >= 4) {
236 while (readl_relaxed(qspi->regs + REG_STATUS) & STATUS_TXFIFOFULL)
237 ;
238
239 data = qspi->txbuf ? *((u32 *)qspi->txbuf) : 0xaa;
240 if (qspi->txbuf)
241 qspi->txbuf += 4;
242 qspi->tx_len -= 4;
243 writel_relaxed(data, qspi->regs + REG_X4_TX_DATA);
244
245 /*
246 * The rx FIFO is twice the size of the tx FIFO, so there is
247 * no requirement to block transmission if receive data is not
248 * ready, and it is fine to let the tx FIFO completely fill
249 * without reading anything from the rx FIFO. Once the tx FIFO
250 * has been filled and becomes non-full due to a transmission
251 * occurring there will always be something to receive.
252 * IOW, this is safe as TX_FIFO_SIZE + 4 < 2 * TX_FIFO_SIZE
253 */
254 if (qspi->rx_len >= 4) {
255 if (readl_relaxed(qspi->regs + REG_STATUS) & STATUS_RXAVAILABLE) {
256 data = readl_relaxed(qspi->regs + REG_X4_RX_DATA);
257 *(u32 *)qspi->rxbuf = data;
258 qspi->rxbuf += 4;
259 qspi->rx_len -= 4;
260 }
261 }
262 }
263
264 /*
265 * Since transmission is not being blocked by clearing the rx FIFO,
266 * loop here until all received data "leaked" by the loop above has
267 * been dealt with.
268 */
269 while (qspi->rx_len >= 4) {
270 while (readl_relaxed(qspi->regs + REG_STATUS) & STATUS_RXFIFOEMPTY)
271 ;
272 data = readl_relaxed(qspi->regs + REG_X4_RX_DATA);
273 *(u32 *)qspi->rxbuf = data;
274 qspi->rxbuf += 4;
275 qspi->rx_len -= 4;
276 }
277
278 /*
279 * Since rx_len and tx_len must be < 4 bytes at this point, there's no
280 * concern about overflowing the rx or tx FIFOs any longer. It's
281 * therefore safe to loop over the remainder of the transmit data before
282 * handling the remaining receive data.
283 */
284 if (!qspi->tx_len)
285 return;
286
287 control &= ~CONTROL_FLAGSX4;
288 writel_relaxed(control, qspi->regs + REG_CONTROL);
289
290 while (qspi->tx_len--) {
291 while (readl_relaxed(qspi->regs + REG_STATUS) & STATUS_TXFIFOFULL)
292 ;
293 data = qspi->txbuf ? *qspi->txbuf : 0xaa;
294 qspi->txbuf++;
295 writel_relaxed(data, qspi->regs + REG_TX_DATA);
296 }
297
298 while (qspi->rx_len--) {
299 while (readl_relaxed(qspi->regs + REG_STATUS) & STATUS_RXFIFOEMPTY)
300 ;
301 data = readl_relaxed(qspi->regs + REG_RX_DATA);
302 *qspi->rxbuf++ = (data & 0xFF);
303 }
304 }
305
mchp_coreqspi_enable_ints(struct mchp_coreqspi * qspi)306 static void mchp_coreqspi_enable_ints(struct mchp_coreqspi *qspi)
307 {
308 u32 mask = IEN_TXDONE |
309 IEN_RXDONE |
310 IEN_RXAVAILABLE;
311
312 writel_relaxed(mask, qspi->regs + REG_IEN);
313 }
314
mchp_coreqspi_disable_ints(struct mchp_coreqspi * qspi)315 static void mchp_coreqspi_disable_ints(struct mchp_coreqspi *qspi)
316 {
317 writel_relaxed(0, qspi->regs + REG_IEN);
318 }
319
mchp_coreqspi_isr(int irq,void * dev_id)320 static irqreturn_t mchp_coreqspi_isr(int irq, void *dev_id)
321 {
322 struct mchp_coreqspi *qspi = (struct mchp_coreqspi *)dev_id;
323 irqreturn_t ret = IRQ_NONE;
324 int intfield = readl_relaxed(qspi->regs + REG_STATUS) & STATUS_MASK;
325
326 if (intfield == 0)
327 return ret;
328
329 if (intfield & IEN_TXDONE) {
330 writel_relaxed(IEN_TXDONE, qspi->regs + REG_STATUS);
331 ret = IRQ_HANDLED;
332 }
333
334 if (intfield & IEN_RXAVAILABLE) {
335 writel_relaxed(IEN_RXAVAILABLE, qspi->regs + REG_STATUS);
336 mchp_coreqspi_read_op(qspi);
337 ret = IRQ_HANDLED;
338 }
339
340 if (intfield & IEN_RXDONE) {
341 writel_relaxed(IEN_RXDONE, qspi->regs + REG_STATUS);
342 complete(&qspi->data_completion);
343 ret = IRQ_HANDLED;
344 }
345
346 return ret;
347 }
348
mchp_coreqspi_setup_clock(struct mchp_coreqspi * qspi,struct spi_device * spi,u32 max_freq)349 static int mchp_coreqspi_setup_clock(struct mchp_coreqspi *qspi, struct spi_device *spi,
350 u32 max_freq)
351 {
352 unsigned long clk_hz;
353 u32 control, baud_rate_val = 0;
354
355 clk_hz = clk_get_rate(qspi->clk);
356 if (!clk_hz)
357 return -EINVAL;
358
359 baud_rate_val = DIV_ROUND_UP(clk_hz, 2 * max_freq);
360 if (baud_rate_val > MAX_DIVIDER || baud_rate_val < MIN_DIVIDER) {
361 dev_err(&spi->dev,
362 "could not configure the clock for spi clock %d Hz & system clock %ld Hz\n",
363 max_freq, clk_hz);
364 return -EINVAL;
365 }
366
367 control = readl_relaxed(qspi->regs + REG_CONTROL);
368 control &= ~CONTROL_CLKRATE_MASK;
369 control |= baud_rate_val << CONTROL_CLKRATE_SHIFT;
370 writel_relaxed(control, qspi->regs + REG_CONTROL);
371 control = readl_relaxed(qspi->regs + REG_CONTROL);
372
373 if ((spi->mode & SPI_CPOL) && (spi->mode & SPI_CPHA))
374 control |= CONTROL_CLKIDLE;
375 else
376 control &= ~CONTROL_CLKIDLE;
377
378 writel_relaxed(control, qspi->regs + REG_CONTROL);
379
380 return 0;
381 }
382
mchp_coreqspi_setup_op(struct spi_device * spi_dev)383 static int mchp_coreqspi_setup_op(struct spi_device *spi_dev)
384 {
385 struct spi_controller *ctlr = spi_dev->controller;
386 struct mchp_coreqspi *qspi = spi_controller_get_devdata(ctlr);
387 u32 control = readl_relaxed(qspi->regs + REG_CONTROL);
388
389 control |= (CONTROL_MASTER | CONTROL_ENABLE);
390 control &= ~CONTROL_CLKIDLE;
391 writel_relaxed(control, qspi->regs + REG_CONTROL);
392
393 return 0;
394 }
395
mchp_coreqspi_config_op(struct mchp_coreqspi * qspi,const struct spi_mem_op * op)396 static inline void mchp_coreqspi_config_op(struct mchp_coreqspi *qspi, const struct spi_mem_op *op)
397 {
398 u32 idle_cycles = 0;
399 int total_bytes, cmd_bytes, frames, ctrl;
400
401 cmd_bytes = op->cmd.nbytes + op->addr.nbytes;
402 total_bytes = cmd_bytes + op->data.nbytes;
403
404 /*
405 * As per the coreQSPI IP spec,the number of command and data bytes are
406 * controlled by the frames register for each SPI sequence. This supports
407 * the SPI flash memory read and writes sequences as below. so configure
408 * the cmd and total bytes accordingly.
409 * ---------------------------------------------------------------------
410 * TOTAL BYTES | CMD BYTES | What happens |
411 * ______________________________________________________________________
412 * | | |
413 * 1 | 1 | The SPI core will transmit a single byte |
414 * | | and receive data is discarded |
415 * | | |
416 * 1 | 0 | The SPI core will transmit a single byte |
417 * | | and return a single byte |
418 * | | |
419 * 10 | 4 | The SPI core will transmit 4 command |
420 * | | bytes discarding the receive data and |
421 * | | transmits 6 dummy bytes returning the 6 |
422 * | | received bytes and return a single byte |
423 * | | |
424 * 10 | 10 | The SPI core will transmit 10 command |
425 * | | |
426 * 10 | 0 | The SPI core will transmit 10 command |
427 * | | bytes and returning 10 received bytes |
428 * ______________________________________________________________________
429 */
430 if (!(op->data.dir == SPI_MEM_DATA_IN))
431 cmd_bytes = total_bytes;
432
433 frames = total_bytes & BYTESUPPER_MASK;
434 writel_relaxed(frames, qspi->regs + REG_FRAMESUP);
435 frames = total_bytes & BYTESLOWER_MASK;
436 frames |= cmd_bytes << FRAMES_CMDBYTES_SHIFT;
437
438 if (op->dummy.buswidth)
439 idle_cycles = op->dummy.nbytes * 8 / op->dummy.buswidth;
440
441 frames |= idle_cycles << FRAMES_IDLE_SHIFT;
442 ctrl = readl_relaxed(qspi->regs + REG_CONTROL);
443
444 if (ctrl & CONTROL_MODE12_MASK)
445 frames |= (1 << FRAMES_SHIFT);
446
447 frames |= FRAMES_FLAGWORD;
448 writel_relaxed(frames, qspi->regs + REG_FRAMES);
449 }
450
mchp_coreqspi_wait_for_ready(struct mchp_coreqspi * qspi)451 static int mchp_coreqspi_wait_for_ready(struct mchp_coreqspi *qspi)
452 {
453 u32 status;
454
455 return readl_poll_timeout(qspi->regs + REG_STATUS, status,
456 (status & STATUS_READY), 0,
457 TIMEOUT_MS);
458 }
459
mchp_coreqspi_exec_op(struct spi_mem * mem,const struct spi_mem_op * op)460 static int mchp_coreqspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
461 {
462 struct mchp_coreqspi *qspi = spi_controller_get_devdata
463 (mem->spi->controller);
464 u32 address = op->addr.val;
465 u8 opcode = op->cmd.opcode;
466 u8 opaddr[5];
467 int err, i;
468
469 mutex_lock(&qspi->op_lock);
470 err = mchp_coreqspi_wait_for_ready(qspi);
471 if (err) {
472 dev_err(&mem->spi->dev, "Timeout waiting on QSPI ready.\n");
473 goto error;
474 }
475
476 err = mchp_coreqspi_setup_clock(qspi, mem->spi, op->max_freq);
477 if (err)
478 goto error;
479
480 err = mchp_coreqspi_set_mode(qspi, op);
481 if (err)
482 goto error;
483
484 reinit_completion(&qspi->data_completion);
485 mchp_coreqspi_config_op(qspi, op);
486 if (op->cmd.opcode) {
487 qspi->txbuf = &opcode;
488 qspi->rxbuf = NULL;
489 qspi->tx_len = op->cmd.nbytes;
490 qspi->rx_len = 0;
491 mchp_coreqspi_write_op(qspi);
492 }
493
494 qspi->txbuf = &opaddr[0];
495 if (op->addr.nbytes) {
496 for (i = 0; i < op->addr.nbytes; i++)
497 qspi->txbuf[i] = address >> (8 * (op->addr.nbytes - i - 1));
498
499 qspi->rxbuf = NULL;
500 qspi->tx_len = op->addr.nbytes;
501 qspi->rx_len = 0;
502 mchp_coreqspi_write_op(qspi);
503 }
504
505 if (op->data.nbytes) {
506 if (op->data.dir == SPI_MEM_DATA_OUT) {
507 qspi->txbuf = (u8 *)op->data.buf.out;
508 qspi->rxbuf = NULL;
509 qspi->rx_len = 0;
510 qspi->tx_len = op->data.nbytes;
511 mchp_coreqspi_write_op(qspi);
512 } else {
513 qspi->txbuf = NULL;
514 qspi->rxbuf = (u8 *)op->data.buf.in;
515 qspi->rx_len = op->data.nbytes;
516 qspi->tx_len = 0;
517 }
518 }
519
520 mchp_coreqspi_enable_ints(qspi);
521
522 if (!wait_for_completion_timeout(&qspi->data_completion, msecs_to_jiffies(1000)))
523 err = -ETIMEDOUT;
524
525 error:
526 mutex_unlock(&qspi->op_lock);
527 mchp_coreqspi_disable_ints(qspi);
528
529 return err;
530 }
531
mchp_coreqspi_supports_op(struct spi_mem * mem,const struct spi_mem_op * op)532 static bool mchp_coreqspi_supports_op(struct spi_mem *mem, const struct spi_mem_op *op)
533 {
534 struct mchp_coreqspi *qspi = spi_controller_get_devdata(mem->spi->controller);
535 unsigned long clk_hz;
536 u32 baud_rate_val;
537
538 if (!spi_mem_default_supports_op(mem, op))
539 return false;
540
541 if ((op->data.buswidth == 4 || op->data.buswidth == 2) &&
542 (op->cmd.buswidth == 1 && (op->addr.buswidth == 1 || op->addr.buswidth == 0))) {
543 /*
544 * If the command and address are on DQ0 only, then this
545 * controller doesn't support sending data on dual and
546 * quad lines. but it supports reading data on dual and
547 * quad lines with same configuration as command and
548 * address on DQ0.
549 * i.e. The control register[15:13] :EX_RO(read only) is
550 * meant only for the command and address are on DQ0 but
551 * not to write data, it is just to read.
552 * Ex: 0x34h is Quad Load Program Data which is not
553 * supported. Then the spi-mem layer will iterate over
554 * each command and it will chose the supported one.
555 */
556 if (op->data.dir == SPI_MEM_DATA_OUT)
557 return false;
558 }
559
560 clk_hz = clk_get_rate(qspi->clk);
561 if (!clk_hz)
562 return false;
563
564 baud_rate_val = DIV_ROUND_UP(clk_hz, 2 * op->max_freq);
565 if (baud_rate_val > MAX_DIVIDER || baud_rate_val < MIN_DIVIDER)
566 return false;
567
568 return true;
569 }
570
mchp_coreqspi_adjust_op_size(struct spi_mem * mem,struct spi_mem_op * op)571 static int mchp_coreqspi_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
572 {
573 if (op->data.dir == SPI_MEM_DATA_OUT || op->data.dir == SPI_MEM_DATA_IN) {
574 if (op->data.nbytes > MAX_DATA_CMD_LEN)
575 op->data.nbytes = MAX_DATA_CMD_LEN;
576 }
577
578 return 0;
579 }
580
581 static const struct spi_controller_mem_ops mchp_coreqspi_mem_ops = {
582 .adjust_op_size = mchp_coreqspi_adjust_op_size,
583 .supports_op = mchp_coreqspi_supports_op,
584 .exec_op = mchp_coreqspi_exec_op,
585 };
586
587 static const struct spi_controller_mem_caps mchp_coreqspi_mem_caps = {
588 .per_op_freq = true,
589 };
590
mchp_coreqspi_unprepare_message(struct spi_controller * ctlr,struct spi_message * m)591 static int mchp_coreqspi_unprepare_message(struct spi_controller *ctlr, struct spi_message *m)
592 {
593 struct mchp_coreqspi *qspi = spi_controller_get_devdata(ctlr);
594
595 /*
596 * This delay is required for the driver to function correctly,
597 * but no explanation has been determined for why it is required.
598 */
599 udelay(750);
600
601 mutex_unlock(&qspi->op_lock);
602
603 return 0;
604 }
605
mchp_coreqspi_prepare_message(struct spi_controller * ctlr,struct spi_message * m)606 static int mchp_coreqspi_prepare_message(struct spi_controller *ctlr, struct spi_message *m)
607 {
608 struct mchp_coreqspi *qspi = spi_controller_get_devdata(ctlr);
609 struct spi_transfer *t = NULL;
610 u32 control, frames;
611 u32 total_bytes = 0, cmd_bytes = 0, idle_cycles = 0;
612 int ret;
613 bool quad = false, dual = false;
614
615 mutex_lock(&qspi->op_lock);
616 ret = mchp_coreqspi_wait_for_ready(qspi);
617 if (ret) {
618 mutex_unlock(&qspi->op_lock);
619 dev_err(&ctlr->dev, "Timeout waiting on QSPI ready.\n");
620 return ret;
621 }
622
623 ret = mchp_coreqspi_setup_clock(qspi, m->spi, m->spi->max_speed_hz);
624 if (ret) {
625 mutex_unlock(&qspi->op_lock);
626 return ret;
627 }
628
629 control = readl_relaxed(qspi->regs + REG_CONTROL);
630 control &= ~(CONTROL_MODE12_MASK | CONTROL_MODE0);
631 writel_relaxed(control, qspi->regs + REG_CONTROL);
632
633 reinit_completion(&qspi->data_completion);
634
635 list_for_each_entry(t, &m->transfers, transfer_list) {
636 total_bytes += t->len;
637 if (!cmd_bytes && !(t->tx_buf && t->rx_buf))
638 cmd_bytes = t->len;
639 if (!t->rx_buf)
640 cmd_bytes = total_bytes;
641 if (t->tx_nbits == SPI_NBITS_QUAD || t->rx_nbits == SPI_NBITS_QUAD)
642 quad = true;
643 else if (t->tx_nbits == SPI_NBITS_DUAL || t->rx_nbits == SPI_NBITS_DUAL)
644 dual = true;
645 }
646
647 control = readl_relaxed(qspi->regs + REG_CONTROL);
648 if (quad) {
649 control |= (CONTROL_MODE0 | CONTROL_MODE12_EX_RW);
650 } else if (dual) {
651 control &= ~CONTROL_MODE0;
652 control |= CONTROL_MODE12_FULL;
653 } else {
654 control &= ~(CONTROL_MODE12_MASK | CONTROL_MODE0);
655 }
656 writel_relaxed(control, qspi->regs + REG_CONTROL);
657
658 frames = total_bytes & BYTESUPPER_MASK;
659 writel_relaxed(frames, qspi->regs + REG_FRAMESUP);
660 frames = total_bytes & BYTESLOWER_MASK;
661 frames |= cmd_bytes << FRAMES_CMDBYTES_SHIFT;
662 frames |= idle_cycles << FRAMES_IDLE_SHIFT;
663 control = readl_relaxed(qspi->regs + REG_CONTROL);
664 if (control & CONTROL_MODE12_MASK)
665 frames |= (1 << FRAMES_SHIFT);
666
667 frames |= FRAMES_FLAGWORD;
668 writel_relaxed(frames, qspi->regs + REG_FRAMES);
669
670 return 0;
671 };
672
mchp_coreqspi_transfer_one(struct spi_controller * ctlr,struct spi_device * spi,struct spi_transfer * t)673 static int mchp_coreqspi_transfer_one(struct spi_controller *ctlr, struct spi_device *spi,
674 struct spi_transfer *t)
675 {
676 struct mchp_coreqspi *qspi = spi_controller_get_devdata(ctlr);
677
678 qspi->tx_len = t->len;
679
680 if (t->tx_buf)
681 qspi->txbuf = (u8 *)t->tx_buf;
682
683 if (!t->rx_buf) {
684 mchp_coreqspi_write_op(qspi);
685 } else {
686 qspi->rxbuf = (u8 *)t->rx_buf;
687 qspi->rx_len = t->len;
688 mchp_coreqspi_write_read_op(qspi);
689 }
690
691 return 0;
692 }
693
mchp_coreqspi_probe(struct platform_device * pdev)694 static int mchp_coreqspi_probe(struct platform_device *pdev)
695 {
696 struct spi_controller *ctlr;
697 struct mchp_coreqspi *qspi;
698 struct device *dev = &pdev->dev;
699 struct device_node *np = dev->of_node;
700 int ret;
701
702 ctlr = devm_spi_alloc_host(&pdev->dev, sizeof(*qspi));
703 if (!ctlr)
704 return dev_err_probe(&pdev->dev, -ENOMEM,
705 "unable to allocate host for QSPI controller\n");
706
707 qspi = spi_controller_get_devdata(ctlr);
708 platform_set_drvdata(pdev, qspi);
709
710 qspi->regs = devm_platform_ioremap_resource(pdev, 0);
711 if (IS_ERR(qspi->regs))
712 return dev_err_probe(&pdev->dev, PTR_ERR(qspi->regs),
713 "failed to map registers\n");
714
715 qspi->clk = devm_clk_get_enabled(&pdev->dev, NULL);
716 if (IS_ERR(qspi->clk))
717 return dev_err_probe(&pdev->dev, PTR_ERR(qspi->clk),
718 "could not get clock\n");
719
720 init_completion(&qspi->data_completion);
721 mutex_init(&qspi->op_lock);
722
723 qspi->irq = platform_get_irq(pdev, 0);
724 if (qspi->irq < 0)
725 return qspi->irq;
726
727 ret = devm_request_irq(&pdev->dev, qspi->irq, mchp_coreqspi_isr,
728 IRQF_SHARED, pdev->name, qspi);
729 if (ret) {
730 dev_err(&pdev->dev, "request_irq failed %d\n", ret);
731 return ret;
732 }
733
734 ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
735 ctlr->mem_ops = &mchp_coreqspi_mem_ops;
736 ctlr->mem_caps = &mchp_coreqspi_mem_caps;
737 ctlr->setup = mchp_coreqspi_setup_op;
738 ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_RX_DUAL | SPI_RX_QUAD |
739 SPI_TX_DUAL | SPI_TX_QUAD;
740 ctlr->dev.of_node = np;
741 ctlr->min_speed_hz = clk_get_rate(qspi->clk) / 30;
742 ctlr->prepare_message = mchp_coreqspi_prepare_message;
743 ctlr->unprepare_message = mchp_coreqspi_unprepare_message;
744 ctlr->transfer_one = mchp_coreqspi_transfer_one;
745 ctlr->num_chipselect = 2;
746 ctlr->use_gpio_descriptors = true;
747
748 ret = devm_spi_register_controller(&pdev->dev, ctlr);
749 if (ret)
750 return dev_err_probe(&pdev->dev, ret,
751 "spi_register_controller failed\n");
752
753 return 0;
754 }
755
mchp_coreqspi_remove(struct platform_device * pdev)756 static void mchp_coreqspi_remove(struct platform_device *pdev)
757 {
758 struct mchp_coreqspi *qspi = platform_get_drvdata(pdev);
759 u32 control = readl_relaxed(qspi->regs + REG_CONTROL);
760
761 mchp_coreqspi_disable_ints(qspi);
762 control &= ~CONTROL_ENABLE;
763 writel_relaxed(control, qspi->regs + REG_CONTROL);
764 }
765
766 static const struct of_device_id mchp_coreqspi_of_match[] = {
767 { .compatible = "microchip,coreqspi-rtl-v2" },
768 { /* sentinel */ }
769 };
770 MODULE_DEVICE_TABLE(of, mchp_coreqspi_of_match);
771
772 static struct platform_driver mchp_coreqspi_driver = {
773 .probe = mchp_coreqspi_probe,
774 .driver = {
775 .name = "microchip,coreqspi",
776 .of_match_table = mchp_coreqspi_of_match,
777 },
778 .remove = mchp_coreqspi_remove,
779 };
780 module_platform_driver(mchp_coreqspi_driver);
781
782 MODULE_AUTHOR("Naga Sureshkumar Relli <nagasuresh.relli@microchip.com");
783 MODULE_DESCRIPTION("Microchip coreQSPI QSPI controller driver");
784 MODULE_LICENSE("GPL");
785