1 // SPDX-License-Identifier: (GPL-2.0)
2 /*
3 * Microchip coreQSPI QSPI controller driver
4 *
5 * Copyright (C) 2018-2022 Microchip Technology Inc. and its subsidiaries
6 *
7 * Author: Naga Sureshkumar Relli <nagasuresh.relli@microchip.com>
8 *
9 */
10
11 #include <linux/clk.h>
12 #include <linux/err.h>
13 #include <linux/init.h>
14 #include <linux/interrupt.h>
15 #include <linux/io.h>
16 #include <linux/iopoll.h>
17 #include <linux/module.h>
18 #include <linux/of.h>
19 #include <linux/of_irq.h>
20 #include <linux/platform_device.h>
21 #include <linux/spi/spi.h>
22 #include <linux/spi/spi-mem.h>
23
24 /*
25 * QSPI Control register mask defines
26 */
27 #define CONTROL_ENABLE BIT(0)
28 #define CONTROL_MASTER BIT(1)
29 #define CONTROL_XIP BIT(2)
30 #define CONTROL_XIPADDR BIT(3)
31 #define CONTROL_CLKIDLE BIT(10)
32 #define CONTROL_SAMPLE_MASK GENMASK(12, 11)
33 #define CONTROL_MODE0 BIT(13)
34 #define CONTROL_MODE12_MASK GENMASK(15, 14)
35 #define CONTROL_MODE12_EX_RO BIT(14)
36 #define CONTROL_MODE12_EX_RW BIT(15)
37 #define CONTROL_MODE12_FULL GENMASK(15, 14)
38 #define CONTROL_FLAGSX4 BIT(16)
39 #define CONTROL_CLKRATE_MASK GENMASK(27, 24)
40 #define CONTROL_CLKRATE_SHIFT 24
41
42 /*
43 * QSPI Frames register mask defines
44 */
45 #define FRAMES_TOTALBYTES_MASK GENMASK(15, 0)
46 #define FRAMES_CMDBYTES_MASK GENMASK(24, 16)
47 #define FRAMES_CMDBYTES_SHIFT 16
48 #define FRAMES_SHIFT 25
49 #define FRAMES_IDLE_MASK GENMASK(29, 26)
50 #define FRAMES_IDLE_SHIFT 26
51 #define FRAMES_FLAGBYTE BIT(30)
52 #define FRAMES_FLAGWORD BIT(31)
53
54 /*
55 * QSPI Interrupt Enable register mask defines
56 */
57 #define IEN_TXDONE BIT(0)
58 #define IEN_RXDONE BIT(1)
59 #define IEN_RXAVAILABLE BIT(2)
60 #define IEN_TXAVAILABLE BIT(3)
61 #define IEN_RXFIFOEMPTY BIT(4)
62 #define IEN_TXFIFOFULL BIT(5)
63
64 /*
65 * QSPI Status register mask defines
66 */
67 #define STATUS_TXDONE BIT(0)
68 #define STATUS_RXDONE BIT(1)
69 #define STATUS_RXAVAILABLE BIT(2)
70 #define STATUS_TXAVAILABLE BIT(3)
71 #define STATUS_RXFIFOEMPTY BIT(4)
72 #define STATUS_TXFIFOFULL BIT(5)
73 #define STATUS_READY BIT(7)
74 #define STATUS_FLAGSX4 BIT(8)
75 #define STATUS_MASK GENMASK(8, 0)
76
77 /*
78 * QSPI Direct Access register defines
79 */
80 #define DIRECT_ACCESS_EN_SSEL BIT(0)
81 #define DIRECT_ACCESS_OP_SSEL BIT(1)
82 #define DIRECT_ACCESS_OP_SSEL_SHIFT 1
83
84 #define BYTESUPPER_MASK GENMASK(31, 16)
85 #define BYTESLOWER_MASK GENMASK(15, 0)
86
87 #define MAX_DIVIDER 16
88 #define MIN_DIVIDER 0
89 #define MAX_DATA_CMD_LEN 256
90
91 /* QSPI ready time out value */
92 #define TIMEOUT_MS 500
93
94 /*
95 * QSPI Register offsets.
96 */
97 #define REG_CONTROL (0x00)
98 #define REG_FRAMES (0x04)
99 #define REG_IEN (0x0c)
100 #define REG_STATUS (0x10)
101 #define REG_DIRECT_ACCESS (0x14)
102 #define REG_UPPER_ACCESS (0x18)
103 #define REG_RX_DATA (0x40)
104 #define REG_TX_DATA (0x44)
105 #define REG_X4_RX_DATA (0x48)
106 #define REG_X4_TX_DATA (0x4c)
107 #define REG_FRAMESUP (0x50)
108
109 /**
110 * struct mchp_coreqspi - Defines qspi driver instance
111 * @regs: Virtual address of the QSPI controller registers
112 * @clk: QSPI Operating clock
113 * @data_completion: completion structure
114 * @op_lock: lock access to the device
115 * @txbuf: TX buffer
116 * @rxbuf: RX buffer
117 * @irq: IRQ number
118 * @tx_len: Number of bytes left to transfer
119 * @rx_len: Number of bytes left to receive
120 */
121 struct mchp_coreqspi {
122 void __iomem *regs;
123 struct clk *clk;
124 struct completion data_completion;
125 struct mutex op_lock; /* lock access to the device */
126 u8 *txbuf;
127 u8 *rxbuf;
128 int irq;
129 int tx_len;
130 int rx_len;
131 };
132
mchp_coreqspi_set_mode(struct mchp_coreqspi * qspi,const struct spi_mem_op * op)133 static int mchp_coreqspi_set_mode(struct mchp_coreqspi *qspi, const struct spi_mem_op *op)
134 {
135 u32 control = readl_relaxed(qspi->regs + REG_CONTROL);
136
137 /*
138 * The operating mode can be configured based on the command that needs to be send.
139 * bits[15:14]: Sets whether multiple bit SPI operates in normal, extended or full modes.
140 * 00: Normal (single DQ0 TX and single DQ1 RX lines)
141 * 01: Extended RO (command and address bytes on DQ0 only)
142 * 10: Extended RW (command byte on DQ0 only)
143 * 11: Full. (command and address are on all DQ lines)
144 * bit[13]: Sets whether multiple bit SPI uses 2 or 4 bits of data
145 * 0: 2-bits (BSPI)
146 * 1: 4-bits (QSPI)
147 */
148 if (op->data.buswidth == 4 || op->data.buswidth == 2) {
149 control &= ~CONTROL_MODE12_MASK;
150 if (op->cmd.buswidth == 1 && (op->addr.buswidth == 1 || op->addr.buswidth == 0))
151 control |= CONTROL_MODE12_EX_RO;
152 else if (op->cmd.buswidth == 1)
153 control |= CONTROL_MODE12_EX_RW;
154 else
155 control |= CONTROL_MODE12_FULL;
156
157 control |= CONTROL_MODE0;
158 } else {
159 control &= ~(CONTROL_MODE12_MASK |
160 CONTROL_MODE0);
161 }
162
163 writel_relaxed(control, qspi->regs + REG_CONTROL);
164
165 return 0;
166 }
167
mchp_coreqspi_set_cs(struct spi_device * spi,bool enable)168 static void mchp_coreqspi_set_cs(struct spi_device *spi, bool enable)
169 {
170 struct mchp_coreqspi *qspi = spi_controller_get_devdata(spi->controller);
171 u32 val;
172
173 val = readl(qspi->regs + REG_DIRECT_ACCESS);
174
175 val &= ~DIRECT_ACCESS_OP_SSEL;
176 val |= !enable << DIRECT_ACCESS_OP_SSEL_SHIFT;
177
178 writel(val, qspi->regs + REG_DIRECT_ACCESS);
179 }
180
mchp_coreqspi_setup(struct spi_device * spi)181 static int mchp_coreqspi_setup(struct spi_device *spi)
182 {
183 struct mchp_coreqspi *qspi = spi_controller_get_devdata(spi->controller);
184 u32 val;
185
186 /*
187 * Active low devices need to be specifically set to their inactive
188 * states during probe.
189 */
190 if (spi->mode & SPI_CS_HIGH)
191 return 0;
192
193 val = readl(qspi->regs + REG_DIRECT_ACCESS);
194 val |= DIRECT_ACCESS_OP_SSEL;
195 writel(val, qspi->regs + REG_DIRECT_ACCESS);
196
197 return 0;
198 }
199
mchp_coreqspi_read_op(struct mchp_coreqspi * qspi)200 static void mchp_coreqspi_read_op(struct mchp_coreqspi *qspi)
201 {
202 u32 control, data;
203
204 if (!qspi->rx_len)
205 return;
206
207 control = readl_relaxed(qspi->regs + REG_CONTROL);
208
209 /*
210 * Read 4-bytes from the SPI FIFO in single transaction and then read
211 * the reamaining data byte wise.
212 */
213 control |= CONTROL_FLAGSX4;
214 writel_relaxed(control, qspi->regs + REG_CONTROL);
215
216 while (qspi->rx_len >= 4) {
217 while (readl_relaxed(qspi->regs + REG_STATUS) & STATUS_RXFIFOEMPTY)
218 ;
219 data = readl_relaxed(qspi->regs + REG_X4_RX_DATA);
220 *(u32 *)qspi->rxbuf = data;
221 qspi->rxbuf += 4;
222 qspi->rx_len -= 4;
223 }
224
225 control &= ~CONTROL_FLAGSX4;
226 writel_relaxed(control, qspi->regs + REG_CONTROL);
227
228 while (qspi->rx_len--) {
229 while (readl_relaxed(qspi->regs + REG_STATUS) & STATUS_RXFIFOEMPTY)
230 ;
231 data = readl_relaxed(qspi->regs + REG_RX_DATA);
232 *qspi->rxbuf++ = (data & 0xFF);
233 }
234 }
235
mchp_coreqspi_write_op(struct mchp_coreqspi * qspi)236 static void mchp_coreqspi_write_op(struct mchp_coreqspi *qspi)
237 {
238 u32 control, data;
239
240 control = readl_relaxed(qspi->regs + REG_CONTROL);
241 control |= CONTROL_FLAGSX4;
242 writel_relaxed(control, qspi->regs + REG_CONTROL);
243
244 while (qspi->tx_len >= 4) {
245 while (readl_relaxed(qspi->regs + REG_STATUS) & STATUS_TXFIFOFULL)
246 ;
247 data = *(u32 *)qspi->txbuf;
248 qspi->txbuf += 4;
249 qspi->tx_len -= 4;
250 writel_relaxed(data, qspi->regs + REG_X4_TX_DATA);
251 }
252
253 control &= ~CONTROL_FLAGSX4;
254 writel_relaxed(control, qspi->regs + REG_CONTROL);
255
256 while (qspi->tx_len--) {
257 while (readl_relaxed(qspi->regs + REG_STATUS) & STATUS_TXFIFOFULL)
258 ;
259 data = *qspi->txbuf++;
260 writel_relaxed(data, qspi->regs + REG_TX_DATA);
261 }
262 }
263
mchp_coreqspi_write_read_op(struct mchp_coreqspi * qspi)264 static void mchp_coreqspi_write_read_op(struct mchp_coreqspi *qspi)
265 {
266 u32 control, data;
267
268 qspi->rx_len = qspi->tx_len;
269
270 control = readl_relaxed(qspi->regs + REG_CONTROL);
271 control |= CONTROL_FLAGSX4;
272 writel_relaxed(control, qspi->regs + REG_CONTROL);
273
274 while (qspi->tx_len >= 4) {
275 while (readl_relaxed(qspi->regs + REG_STATUS) & STATUS_TXFIFOFULL)
276 ;
277
278 data = qspi->txbuf ? *((u32 *)qspi->txbuf) : 0xaa;
279 if (qspi->txbuf)
280 qspi->txbuf += 4;
281 qspi->tx_len -= 4;
282 writel_relaxed(data, qspi->regs + REG_X4_TX_DATA);
283
284 /*
285 * The rx FIFO is twice the size of the tx FIFO, so there is
286 * no requirement to block transmission if receive data is not
287 * ready, and it is fine to let the tx FIFO completely fill
288 * without reading anything from the rx FIFO. Once the tx FIFO
289 * has been filled and becomes non-full due to a transmission
290 * occurring there will always be something to receive.
291 * IOW, this is safe as TX_FIFO_SIZE + 4 < 2 * TX_FIFO_SIZE
292 */
293 if (qspi->rx_len >= 4) {
294 if (readl_relaxed(qspi->regs + REG_STATUS) & STATUS_RXAVAILABLE) {
295 data = readl_relaxed(qspi->regs + REG_X4_RX_DATA);
296 *(u32 *)qspi->rxbuf = data;
297 qspi->rxbuf += 4;
298 qspi->rx_len -= 4;
299 }
300 }
301 }
302
303 /*
304 * Since transmission is not being blocked by clearing the rx FIFO,
305 * loop here until all received data "leaked" by the loop above has
306 * been dealt with.
307 */
308 while (qspi->rx_len >= 4) {
309 while (readl_relaxed(qspi->regs + REG_STATUS) & STATUS_RXFIFOEMPTY)
310 ;
311 data = readl_relaxed(qspi->regs + REG_X4_RX_DATA);
312 *(u32 *)qspi->rxbuf = data;
313 qspi->rxbuf += 4;
314 qspi->rx_len -= 4;
315 }
316
317 /*
318 * Since rx_len and tx_len must be < 4 bytes at this point, there's no
319 * concern about overflowing the rx or tx FIFOs any longer. It's
320 * therefore safe to loop over the remainder of the transmit data before
321 * handling the remaining receive data.
322 */
323 if (!qspi->tx_len)
324 return;
325
326 control &= ~CONTROL_FLAGSX4;
327 writel_relaxed(control, qspi->regs + REG_CONTROL);
328
329 while (qspi->tx_len--) {
330 while (readl_relaxed(qspi->regs + REG_STATUS) & STATUS_TXFIFOFULL)
331 ;
332 data = qspi->txbuf ? *qspi->txbuf : 0xaa;
333 qspi->txbuf++;
334 writel_relaxed(data, qspi->regs + REG_TX_DATA);
335 }
336
337 while (qspi->rx_len--) {
338 while (readl_relaxed(qspi->regs + REG_STATUS) & STATUS_RXFIFOEMPTY)
339 ;
340 data = readl_relaxed(qspi->regs + REG_RX_DATA);
341 *qspi->rxbuf++ = (data & 0xFF);
342 }
343 }
344
mchp_coreqspi_enable_ints(struct mchp_coreqspi * qspi)345 static void mchp_coreqspi_enable_ints(struct mchp_coreqspi *qspi)
346 {
347 u32 mask = IEN_TXDONE |
348 IEN_RXDONE |
349 IEN_RXAVAILABLE;
350
351 writel_relaxed(mask, qspi->regs + REG_IEN);
352 }
353
mchp_coreqspi_disable_ints(struct mchp_coreqspi * qspi)354 static void mchp_coreqspi_disable_ints(struct mchp_coreqspi *qspi)
355 {
356 writel_relaxed(0, qspi->regs + REG_IEN);
357 }
358
mchp_coreqspi_isr(int irq,void * dev_id)359 static irqreturn_t mchp_coreqspi_isr(int irq, void *dev_id)
360 {
361 struct mchp_coreqspi *qspi = (struct mchp_coreqspi *)dev_id;
362 irqreturn_t ret = IRQ_NONE;
363 int intfield = readl_relaxed(qspi->regs + REG_STATUS) & STATUS_MASK;
364
365 if (intfield == 0)
366 return ret;
367
368 if (intfield & IEN_TXDONE) {
369 writel_relaxed(IEN_TXDONE, qspi->regs + REG_STATUS);
370 ret = IRQ_HANDLED;
371 }
372
373 if (intfield & IEN_RXAVAILABLE) {
374 writel_relaxed(IEN_RXAVAILABLE, qspi->regs + REG_STATUS);
375 mchp_coreqspi_read_op(qspi);
376 ret = IRQ_HANDLED;
377 }
378
379 if (intfield & IEN_RXDONE) {
380 writel_relaxed(IEN_RXDONE, qspi->regs + REG_STATUS);
381 complete(&qspi->data_completion);
382 ret = IRQ_HANDLED;
383 }
384
385 return ret;
386 }
387
mchp_coreqspi_setup_clock(struct mchp_coreqspi * qspi,struct spi_device * spi,u32 max_freq)388 static int mchp_coreqspi_setup_clock(struct mchp_coreqspi *qspi, struct spi_device *spi,
389 u32 max_freq)
390 {
391 unsigned long clk_hz;
392 u32 control, baud_rate_val = 0;
393
394 clk_hz = clk_get_rate(qspi->clk);
395 if (!clk_hz)
396 return -EINVAL;
397
398 baud_rate_val = DIV_ROUND_UP(clk_hz, 2 * max_freq);
399 if (baud_rate_val > MAX_DIVIDER || baud_rate_val < MIN_DIVIDER) {
400 dev_err(&spi->dev,
401 "could not configure the clock for spi clock %d Hz & system clock %ld Hz\n",
402 max_freq, clk_hz);
403 return -EINVAL;
404 }
405
406 control = readl_relaxed(qspi->regs + REG_CONTROL);
407 control &= ~CONTROL_CLKRATE_MASK;
408 control |= baud_rate_val << CONTROL_CLKRATE_SHIFT;
409 writel_relaxed(control, qspi->regs + REG_CONTROL);
410 control = readl_relaxed(qspi->regs + REG_CONTROL);
411
412 if ((spi->mode & SPI_CPOL) && (spi->mode & SPI_CPHA))
413 control |= CONTROL_CLKIDLE;
414 else
415 control &= ~CONTROL_CLKIDLE;
416
417 writel_relaxed(control, qspi->regs + REG_CONTROL);
418
419 return 0;
420 }
421
mchp_coreqspi_config_op(struct mchp_coreqspi * qspi,const struct spi_mem_op * op)422 static void mchp_coreqspi_config_op(struct mchp_coreqspi *qspi, const struct spi_mem_op *op)
423 {
424 u32 idle_cycles = 0;
425 int total_bytes, cmd_bytes, frames, ctrl;
426
427 cmd_bytes = op->cmd.nbytes + op->addr.nbytes;
428 total_bytes = cmd_bytes + op->data.nbytes;
429
430 /*
431 * As per the coreQSPI IP spec,the number of command and data bytes are
432 * controlled by the frames register for each SPI sequence. This supports
433 * the SPI flash memory read and writes sequences as below. so configure
434 * the cmd and total bytes accordingly.
435 * ---------------------------------------------------------------------
436 * TOTAL BYTES | CMD BYTES | What happens |
437 * ______________________________________________________________________
438 * | | |
439 * 1 | 1 | The SPI core will transmit a single byte |
440 * | | and receive data is discarded |
441 * | | |
442 * 1 | 0 | The SPI core will transmit a single byte |
443 * | | and return a single byte |
444 * | | |
445 * 10 | 4 | The SPI core will transmit 4 command |
446 * | | bytes discarding the receive data and |
447 * | | transmits 6 dummy bytes returning the 6 |
448 * | | received bytes and return a single byte |
449 * | | |
450 * 10 | 10 | The SPI core will transmit 10 command |
451 * | | |
452 * 10 | 0 | The SPI core will transmit 10 command |
453 * | | bytes and returning 10 received bytes |
454 * ______________________________________________________________________
455 */
456 if (!(op->data.dir == SPI_MEM_DATA_IN))
457 cmd_bytes = total_bytes;
458
459 frames = total_bytes & BYTESUPPER_MASK;
460 writel_relaxed(frames, qspi->regs + REG_FRAMESUP);
461 frames = total_bytes & BYTESLOWER_MASK;
462 frames |= cmd_bytes << FRAMES_CMDBYTES_SHIFT;
463
464 if (op->dummy.buswidth)
465 idle_cycles = op->dummy.nbytes * 8 / op->dummy.buswidth;
466
467 frames |= idle_cycles << FRAMES_IDLE_SHIFT;
468 ctrl = readl_relaxed(qspi->regs + REG_CONTROL);
469
470 if (ctrl & CONTROL_MODE12_MASK)
471 frames |= (1 << FRAMES_SHIFT);
472
473 frames |= FRAMES_FLAGWORD;
474 writel_relaxed(frames, qspi->regs + REG_FRAMES);
475 }
476
mchp_coreqspi_wait_for_ready(struct mchp_coreqspi * qspi)477 static int mchp_coreqspi_wait_for_ready(struct mchp_coreqspi *qspi)
478 {
479 u32 status;
480
481 return readl_poll_timeout(qspi->regs + REG_STATUS, status,
482 (status & STATUS_READY), 0,
483 TIMEOUT_MS);
484 }
485
mchp_coreqspi_exec_op(struct spi_mem * mem,const struct spi_mem_op * op)486 static int mchp_coreqspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
487 {
488 struct mchp_coreqspi *qspi = spi_controller_get_devdata
489 (mem->spi->controller);
490 u32 address = op->addr.val;
491 u8 opcode = op->cmd.opcode;
492 u8 opaddr[5];
493 int err, i;
494
495 mutex_lock(&qspi->op_lock);
496 err = mchp_coreqspi_wait_for_ready(qspi);
497 if (err) {
498 dev_err(&mem->spi->dev, "Timeout waiting on QSPI ready.\n");
499 goto error;
500 }
501
502 err = mchp_coreqspi_setup_clock(qspi, mem->spi, op->max_freq);
503 if (err)
504 goto error;
505
506 err = mchp_coreqspi_set_mode(qspi, op);
507 if (err)
508 goto error;
509
510 reinit_completion(&qspi->data_completion);
511 mchp_coreqspi_config_op(qspi, op);
512 mchp_coreqspi_set_cs(mem->spi, true);
513 if (op->cmd.opcode) {
514 qspi->txbuf = &opcode;
515 qspi->rxbuf = NULL;
516 qspi->tx_len = op->cmd.nbytes;
517 qspi->rx_len = 0;
518 mchp_coreqspi_write_op(qspi);
519 }
520
521 qspi->txbuf = &opaddr[0];
522 if (op->addr.nbytes) {
523 for (i = 0; i < op->addr.nbytes; i++)
524 qspi->txbuf[i] = address >> (8 * (op->addr.nbytes - i - 1));
525
526 qspi->rxbuf = NULL;
527 qspi->tx_len = op->addr.nbytes;
528 qspi->rx_len = 0;
529 mchp_coreqspi_write_op(qspi);
530 }
531
532 if (op->data.nbytes) {
533 if (op->data.dir == SPI_MEM_DATA_OUT) {
534 qspi->txbuf = (u8 *)op->data.buf.out;
535 qspi->rxbuf = NULL;
536 qspi->rx_len = 0;
537 qspi->tx_len = op->data.nbytes;
538 mchp_coreqspi_write_op(qspi);
539 } else {
540 qspi->txbuf = NULL;
541 qspi->rxbuf = (u8 *)op->data.buf.in;
542 qspi->rx_len = op->data.nbytes;
543 qspi->tx_len = 0;
544 }
545 }
546
547 mchp_coreqspi_enable_ints(qspi);
548
549 if (!wait_for_completion_timeout(&qspi->data_completion, msecs_to_jiffies(1000)))
550 err = -ETIMEDOUT;
551
552 error:
553 mchp_coreqspi_set_cs(mem->spi, false);
554 mutex_unlock(&qspi->op_lock);
555 mchp_coreqspi_disable_ints(qspi);
556
557 return err;
558 }
559
mchp_coreqspi_supports_op(struct spi_mem * mem,const struct spi_mem_op * op)560 static bool mchp_coreqspi_supports_op(struct spi_mem *mem, const struct spi_mem_op *op)
561 {
562 if (!spi_mem_default_supports_op(mem, op))
563 return false;
564
565 if ((op->data.buswidth == 4 || op->data.buswidth == 2) &&
566 (op->cmd.buswidth == 1 && (op->addr.buswidth == 1 || op->addr.buswidth == 0))) {
567 /*
568 * If the command and address are on DQ0 only, then this
569 * controller doesn't support sending data on dual and
570 * quad lines. but it supports reading data on dual and
571 * quad lines with same configuration as command and
572 * address on DQ0.
573 * i.e. The control register[15:13] :EX_RO(read only) is
574 * meant only for the command and address are on DQ0 but
575 * not to write data, it is just to read.
576 * Ex: 0x34h is Quad Load Program Data which is not
577 * supported. Then the spi-mem layer will iterate over
578 * each command and it will chose the supported one.
579 */
580 if (op->data.dir == SPI_MEM_DATA_OUT)
581 return false;
582 }
583
584 return true;
585 }
586
mchp_coreqspi_adjust_op_size(struct spi_mem * mem,struct spi_mem_op * op)587 static int mchp_coreqspi_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
588 {
589 if (op->data.dir == SPI_MEM_DATA_OUT || op->data.dir == SPI_MEM_DATA_IN) {
590 if (op->data.nbytes > MAX_DATA_CMD_LEN)
591 op->data.nbytes = MAX_DATA_CMD_LEN;
592 }
593
594 return 0;
595 }
596
597 static const struct spi_controller_mem_ops mchp_coreqspi_mem_ops = {
598 .adjust_op_size = mchp_coreqspi_adjust_op_size,
599 .supports_op = mchp_coreqspi_supports_op,
600 .exec_op = mchp_coreqspi_exec_op,
601 };
602
603 static const struct spi_controller_mem_caps mchp_coreqspi_mem_caps = {
604 .per_op_freq = true,
605 };
606
mchp_coreqspi_unprepare_message(struct spi_controller * ctlr,struct spi_message * m)607 static int mchp_coreqspi_unprepare_message(struct spi_controller *ctlr, struct spi_message *m)
608 {
609 struct mchp_coreqspi *qspi = spi_controller_get_devdata(ctlr);
610
611 /*
612 * This delay is required for the driver to function correctly,
613 * but no explanation has been determined for why it is required.
614 */
615 udelay(750);
616
617 mutex_unlock(&qspi->op_lock);
618
619 return 0;
620 }
621
mchp_coreqspi_prepare_message(struct spi_controller * ctlr,struct spi_message * m)622 static int mchp_coreqspi_prepare_message(struct spi_controller *ctlr, struct spi_message *m)
623 {
624 struct mchp_coreqspi *qspi = spi_controller_get_devdata(ctlr);
625 struct spi_transfer *t = NULL;
626 u32 control, frames;
627 u32 total_bytes = 0, cmd_bytes = 0, idle_cycles = 0;
628 int ret;
629 bool quad = false, dual = false;
630
631 mutex_lock(&qspi->op_lock);
632 ret = mchp_coreqspi_wait_for_ready(qspi);
633 if (ret) {
634 mutex_unlock(&qspi->op_lock);
635 dev_err(&ctlr->dev, "Timeout waiting on QSPI ready.\n");
636 return ret;
637 }
638
639 ret = mchp_coreqspi_setup_clock(qspi, m->spi, m->spi->max_speed_hz);
640 if (ret) {
641 mutex_unlock(&qspi->op_lock);
642 return ret;
643 }
644
645 control = readl_relaxed(qspi->regs + REG_CONTROL);
646 control &= ~(CONTROL_MODE12_MASK | CONTROL_MODE0);
647 writel_relaxed(control, qspi->regs + REG_CONTROL);
648
649 reinit_completion(&qspi->data_completion);
650
651 list_for_each_entry(t, &m->transfers, transfer_list) {
652 total_bytes += t->len;
653 if (!cmd_bytes && !(t->tx_buf && t->rx_buf))
654 cmd_bytes = t->len;
655 if (!t->rx_buf)
656 cmd_bytes = total_bytes;
657 if (t->tx_nbits == SPI_NBITS_QUAD || t->rx_nbits == SPI_NBITS_QUAD)
658 quad = true;
659 else if (t->tx_nbits == SPI_NBITS_DUAL || t->rx_nbits == SPI_NBITS_DUAL)
660 dual = true;
661 }
662
663 control = readl_relaxed(qspi->regs + REG_CONTROL);
664 if (quad) {
665 control |= (CONTROL_MODE0 | CONTROL_MODE12_EX_RW);
666 } else if (dual) {
667 control &= ~CONTROL_MODE0;
668 control |= CONTROL_MODE12_FULL;
669 } else {
670 control &= ~(CONTROL_MODE12_MASK | CONTROL_MODE0);
671 }
672 writel_relaxed(control, qspi->regs + REG_CONTROL);
673
674 frames = total_bytes & BYTESUPPER_MASK;
675 writel_relaxed(frames, qspi->regs + REG_FRAMESUP);
676 frames = total_bytes & BYTESLOWER_MASK;
677 frames |= cmd_bytes << FRAMES_CMDBYTES_SHIFT;
678 frames |= idle_cycles << FRAMES_IDLE_SHIFT;
679 control = readl_relaxed(qspi->regs + REG_CONTROL);
680 if (control & CONTROL_MODE12_MASK)
681 frames |= (1 << FRAMES_SHIFT);
682
683 frames |= FRAMES_FLAGWORD;
684 writel_relaxed(frames, qspi->regs + REG_FRAMES);
685
686 return 0;
687 };
688
mchp_coreqspi_transfer_one(struct spi_controller * ctlr,struct spi_device * spi,struct spi_transfer * t)689 static int mchp_coreqspi_transfer_one(struct spi_controller *ctlr, struct spi_device *spi,
690 struct spi_transfer *t)
691 {
692 struct mchp_coreqspi *qspi = spi_controller_get_devdata(ctlr);
693 bool dual_quad = false;
694
695 qspi->tx_len = t->len;
696
697 if (t->tx_nbits == SPI_NBITS_QUAD || t->rx_nbits == SPI_NBITS_QUAD ||
698 t->tx_nbits == SPI_NBITS_DUAL ||
699 t->rx_nbits == SPI_NBITS_DUAL)
700 dual_quad = true;
701
702 if (t->tx_buf)
703 qspi->txbuf = (u8 *)t->tx_buf;
704
705 if (!t->rx_buf) {
706 mchp_coreqspi_write_op(qspi);
707 } else if (!dual_quad) {
708 qspi->rxbuf = (u8 *)t->rx_buf;
709 qspi->rx_len = t->len;
710 mchp_coreqspi_write_read_op(qspi);
711 } else {
712 qspi->rxbuf = (u8 *)t->rx_buf;
713 qspi->rx_len = t->len;
714 mchp_coreqspi_read_op(qspi);
715 }
716
717 return 0;
718 }
719
mchp_coreqspi_probe(struct platform_device * pdev)720 static int mchp_coreqspi_probe(struct platform_device *pdev)
721 {
722 struct spi_controller *ctlr;
723 struct mchp_coreqspi *qspi;
724 struct device *dev = &pdev->dev;
725 struct device_node *np = dev->of_node;
726 int ret;
727 u32 num_cs, val;
728
729 ctlr = devm_spi_alloc_host(&pdev->dev, sizeof(*qspi));
730 if (!ctlr)
731 return -ENOMEM;
732
733 qspi = spi_controller_get_devdata(ctlr);
734 platform_set_drvdata(pdev, ctlr);
735
736 qspi->regs = devm_platform_ioremap_resource(pdev, 0);
737 if (IS_ERR(qspi->regs))
738 return dev_err_probe(&pdev->dev, PTR_ERR(qspi->regs),
739 "failed to map registers\n");
740
741 qspi->clk = devm_clk_get_enabled(&pdev->dev, NULL);
742 if (IS_ERR(qspi->clk))
743 return dev_err_probe(&pdev->dev, PTR_ERR(qspi->clk),
744 "could not get clock\n");
745
746 init_completion(&qspi->data_completion);
747 mutex_init(&qspi->op_lock);
748
749 qspi->irq = platform_get_irq(pdev, 0);
750 if (qspi->irq < 0)
751 return qspi->irq;
752
753 ret = devm_request_irq(&pdev->dev, qspi->irq, mchp_coreqspi_isr,
754 IRQF_SHARED, pdev->name, qspi);
755 if (ret) {
756 dev_err(&pdev->dev, "request_irq failed %d\n", ret);
757 return ret;
758 }
759
760 /*
761 * The IP core only has a single CS, any more have to be provided via
762 * gpios
763 */
764 if (of_property_read_u32(pdev->dev.of_node, "num-cs", &num_cs))
765 num_cs = 1;
766
767 ctlr->num_chipselect = num_cs;
768
769 ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
770 ctlr->mem_ops = &mchp_coreqspi_mem_ops;
771 ctlr->mem_caps = &mchp_coreqspi_mem_caps;
772 ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_RX_DUAL | SPI_RX_QUAD |
773 SPI_TX_DUAL | SPI_TX_QUAD;
774 ctlr->dev.of_node = np;
775 ctlr->min_speed_hz = clk_get_rate(qspi->clk) / 30;
776 ctlr->prepare_message = mchp_coreqspi_prepare_message;
777 ctlr->unprepare_message = mchp_coreqspi_unprepare_message;
778 ctlr->transfer_one = mchp_coreqspi_transfer_one;
779 ctlr->setup = mchp_coreqspi_setup;
780 ctlr->set_cs = mchp_coreqspi_set_cs;
781 ctlr->use_gpio_descriptors = true;
782
783 val = readl_relaxed(qspi->regs + REG_CONTROL);
784 val |= (CONTROL_MASTER | CONTROL_ENABLE);
785 writel_relaxed(val, qspi->regs + REG_CONTROL);
786
787 /*
788 * Put cs into software controlled mode
789 */
790 val = readl_relaxed(qspi->regs + REG_DIRECT_ACCESS);
791 val |= DIRECT_ACCESS_EN_SSEL;
792 writel(val, qspi->regs + REG_DIRECT_ACCESS);
793
794 ret = spi_register_controller(ctlr);
795 if (ret)
796 return dev_err_probe(&pdev->dev, ret,
797 "spi_register_controller failed\n");
798
799 return 0;
800 }
801
mchp_coreqspi_remove(struct platform_device * pdev)802 static void mchp_coreqspi_remove(struct platform_device *pdev)
803 {
804 struct spi_controller *ctlr = platform_get_drvdata(pdev);
805 struct mchp_coreqspi *qspi = spi_controller_get_devdata(ctlr);
806 u32 control;
807
808 spi_unregister_controller(ctlr);
809
810 control = readl_relaxed(qspi->regs + REG_CONTROL);
811 mchp_coreqspi_disable_ints(qspi);
812 control &= ~CONTROL_ENABLE;
813 writel_relaxed(control, qspi->regs + REG_CONTROL);
814 }
815
816 static const struct of_device_id mchp_coreqspi_of_match[] = {
817 { .compatible = "microchip,coreqspi-rtl-v2" },
818 { /* sentinel */ }
819 };
820 MODULE_DEVICE_TABLE(of, mchp_coreqspi_of_match);
821
822 static struct platform_driver mchp_coreqspi_driver = {
823 .probe = mchp_coreqspi_probe,
824 .driver = {
825 .name = "microchip,coreqspi",
826 .of_match_table = mchp_coreqspi_of_match,
827 },
828 .remove = mchp_coreqspi_remove,
829 };
830 module_platform_driver(mchp_coreqspi_driver);
831
832 MODULE_AUTHOR("Naga Sureshkumar Relli <nagasuresh.relli@microchip.com");
833 MODULE_DESCRIPTION("Microchip coreQSPI QSPI controller driver");
834 MODULE_LICENSE("GPL");
835