1 // SPDX-License-Identifier: (GPL-2.0) 2 /* 3 * Microchip coreQSPI QSPI controller driver 4 * 5 * Copyright (C) 2018-2022 Microchip Technology Inc. and its subsidiaries 6 * 7 * Author: Naga Sureshkumar Relli <nagasuresh.relli@microchip.com> 8 * 9 */ 10 11 #include <linux/clk.h> 12 #include <linux/err.h> 13 #include <linux/init.h> 14 #include <linux/interrupt.h> 15 #include <linux/io.h> 16 #include <linux/iopoll.h> 17 #include <linux/module.h> 18 #include <linux/of.h> 19 #include <linux/of_irq.h> 20 #include <linux/platform_device.h> 21 #include <linux/spi/spi.h> 22 #include <linux/spi/spi-mem.h> 23 24 /* 25 * QSPI Control register mask defines 26 */ 27 #define CONTROL_ENABLE BIT(0) 28 #define CONTROL_MASTER BIT(1) 29 #define CONTROL_XIP BIT(2) 30 #define CONTROL_XIPADDR BIT(3) 31 #define CONTROL_CLKIDLE BIT(10) 32 #define CONTROL_SAMPLE_MASK GENMASK(12, 11) 33 #define CONTROL_MODE0 BIT(13) 34 #define CONTROL_MODE12_MASK GENMASK(15, 14) 35 #define CONTROL_MODE12_EX_RO BIT(14) 36 #define CONTROL_MODE12_EX_RW BIT(15) 37 #define CONTROL_MODE12_FULL GENMASK(15, 14) 38 #define CONTROL_FLAGSX4 BIT(16) 39 #define CONTROL_CLKRATE_MASK GENMASK(27, 24) 40 #define CONTROL_CLKRATE_SHIFT 24 41 42 /* 43 * QSPI Frames register mask defines 44 */ 45 #define FRAMES_TOTALBYTES_MASK GENMASK(15, 0) 46 #define FRAMES_CMDBYTES_MASK GENMASK(24, 16) 47 #define FRAMES_CMDBYTES_SHIFT 16 48 #define FRAMES_SHIFT 25 49 #define FRAMES_IDLE_MASK GENMASK(29, 26) 50 #define FRAMES_IDLE_SHIFT 26 51 #define FRAMES_FLAGBYTE BIT(30) 52 #define FRAMES_FLAGWORD BIT(31) 53 54 /* 55 * QSPI Interrupt Enable register mask defines 56 */ 57 #define IEN_TXDONE BIT(0) 58 #define IEN_RXDONE BIT(1) 59 #define IEN_RXAVAILABLE BIT(2) 60 #define IEN_TXAVAILABLE BIT(3) 61 #define IEN_RXFIFOEMPTY BIT(4) 62 #define IEN_TXFIFOFULL BIT(5) 63 64 /* 65 * QSPI Status register mask defines 66 */ 67 #define STATUS_TXDONE BIT(0) 68 #define STATUS_RXDONE BIT(1) 69 #define STATUS_RXAVAILABLE BIT(2) 70 #define STATUS_TXAVAILABLE BIT(3) 71 #define STATUS_RXFIFOEMPTY BIT(4) 72 #define STATUS_TXFIFOFULL BIT(5) 73 #define STATUS_READY BIT(7) 74 #define STATUS_FLAGSX4 BIT(8) 75 #define STATUS_MASK GENMASK(8, 0) 76 77 #define BYTESUPPER_MASK GENMASK(31, 16) 78 #define BYTESLOWER_MASK GENMASK(15, 0) 79 80 #define MAX_DIVIDER 16 81 #define MIN_DIVIDER 0 82 #define MAX_DATA_CMD_LEN 256 83 84 /* QSPI ready time out value */ 85 #define TIMEOUT_MS 500 86 87 /* 88 * QSPI Register offsets. 89 */ 90 #define REG_CONTROL (0x00) 91 #define REG_FRAMES (0x04) 92 #define REG_IEN (0x0c) 93 #define REG_STATUS (0x10) 94 #define REG_DIRECT_ACCESS (0x14) 95 #define REG_UPPER_ACCESS (0x18) 96 #define REG_RX_DATA (0x40) 97 #define REG_TX_DATA (0x44) 98 #define REG_X4_RX_DATA (0x48) 99 #define REG_X4_TX_DATA (0x4c) 100 #define REG_FRAMESUP (0x50) 101 102 /** 103 * struct mchp_coreqspi - Defines qspi driver instance 104 * @regs: Virtual address of the QSPI controller registers 105 * @clk: QSPI Operating clock 106 * @data_completion: completion structure 107 * @op_lock: lock access to the device 108 * @txbuf: TX buffer 109 * @rxbuf: RX buffer 110 * @irq: IRQ number 111 * @tx_len: Number of bytes left to transfer 112 * @rx_len: Number of bytes left to receive 113 */ 114 struct mchp_coreqspi { 115 void __iomem *regs; 116 struct clk *clk; 117 struct completion data_completion; 118 struct mutex op_lock; /* lock access to the device */ 119 u8 *txbuf; 120 u8 *rxbuf; 121 int irq; 122 int tx_len; 123 int rx_len; 124 }; 125 126 static int mchp_coreqspi_set_mode(struct mchp_coreqspi *qspi, const struct spi_mem_op *op) 127 { 128 u32 control = readl_relaxed(qspi->regs + REG_CONTROL); 129 130 /* 131 * The operating mode can be configured based on the command that needs to be send. 132 * bits[15:14]: Sets whether multiple bit SPI operates in normal, extended or full modes. 133 * 00: Normal (single DQ0 TX and single DQ1 RX lines) 134 * 01: Extended RO (command and address bytes on DQ0 only) 135 * 10: Extended RW (command byte on DQ0 only) 136 * 11: Full. (command and address are on all DQ lines) 137 * bit[13]: Sets whether multiple bit SPI uses 2 or 4 bits of data 138 * 0: 2-bits (BSPI) 139 * 1: 4-bits (QSPI) 140 */ 141 if (op->data.buswidth == 4 || op->data.buswidth == 2) { 142 control &= ~CONTROL_MODE12_MASK; 143 if (op->cmd.buswidth == 1 && (op->addr.buswidth == 1 || op->addr.buswidth == 0)) 144 control |= CONTROL_MODE12_EX_RO; 145 else if (op->cmd.buswidth == 1) 146 control |= CONTROL_MODE12_EX_RW; 147 else 148 control |= CONTROL_MODE12_FULL; 149 150 control |= CONTROL_MODE0; 151 } else { 152 control &= ~(CONTROL_MODE12_MASK | 153 CONTROL_MODE0); 154 } 155 156 writel_relaxed(control, qspi->regs + REG_CONTROL); 157 158 return 0; 159 } 160 161 static inline void mchp_coreqspi_read_op(struct mchp_coreqspi *qspi) 162 { 163 u32 control, data; 164 165 if (!qspi->rx_len) 166 return; 167 168 control = readl_relaxed(qspi->regs + REG_CONTROL); 169 170 /* 171 * Read 4-bytes from the SPI FIFO in single transaction and then read 172 * the reamaining data byte wise. 173 */ 174 control |= CONTROL_FLAGSX4; 175 writel_relaxed(control, qspi->regs + REG_CONTROL); 176 177 while (qspi->rx_len >= 4) { 178 while (readl_relaxed(qspi->regs + REG_STATUS) & STATUS_RXFIFOEMPTY) 179 ; 180 data = readl_relaxed(qspi->regs + REG_X4_RX_DATA); 181 *(u32 *)qspi->rxbuf = data; 182 qspi->rxbuf += 4; 183 qspi->rx_len -= 4; 184 } 185 186 control &= ~CONTROL_FLAGSX4; 187 writel_relaxed(control, qspi->regs + REG_CONTROL); 188 189 while (qspi->rx_len--) { 190 while (readl_relaxed(qspi->regs + REG_STATUS) & STATUS_RXFIFOEMPTY) 191 ; 192 data = readl_relaxed(qspi->regs + REG_RX_DATA); 193 *qspi->rxbuf++ = (data & 0xFF); 194 } 195 } 196 197 static inline void mchp_coreqspi_write_op(struct mchp_coreqspi *qspi, bool word) 198 { 199 u32 control, data; 200 201 control = readl_relaxed(qspi->regs + REG_CONTROL); 202 control |= CONTROL_FLAGSX4; 203 writel_relaxed(control, qspi->regs + REG_CONTROL); 204 205 while (qspi->tx_len >= 4) { 206 while (readl_relaxed(qspi->regs + REG_STATUS) & STATUS_TXFIFOFULL) 207 ; 208 data = *(u32 *)qspi->txbuf; 209 qspi->txbuf += 4; 210 qspi->tx_len -= 4; 211 writel_relaxed(data, qspi->regs + REG_X4_TX_DATA); 212 } 213 214 control &= ~CONTROL_FLAGSX4; 215 writel_relaxed(control, qspi->regs + REG_CONTROL); 216 217 while (qspi->tx_len--) { 218 while (readl_relaxed(qspi->regs + REG_STATUS) & STATUS_TXFIFOFULL) 219 ; 220 data = *qspi->txbuf++; 221 writel_relaxed(data, qspi->regs + REG_TX_DATA); 222 } 223 } 224 225 static void mchp_coreqspi_enable_ints(struct mchp_coreqspi *qspi) 226 { 227 u32 mask = IEN_TXDONE | 228 IEN_RXDONE | 229 IEN_RXAVAILABLE; 230 231 writel_relaxed(mask, qspi->regs + REG_IEN); 232 } 233 234 static void mchp_coreqspi_disable_ints(struct mchp_coreqspi *qspi) 235 { 236 writel_relaxed(0, qspi->regs + REG_IEN); 237 } 238 239 static irqreturn_t mchp_coreqspi_isr(int irq, void *dev_id) 240 { 241 struct mchp_coreqspi *qspi = (struct mchp_coreqspi *)dev_id; 242 irqreturn_t ret = IRQ_NONE; 243 int intfield = readl_relaxed(qspi->regs + REG_STATUS) & STATUS_MASK; 244 245 if (intfield == 0) 246 return ret; 247 248 if (intfield & IEN_TXDONE) { 249 writel_relaxed(IEN_TXDONE, qspi->regs + REG_STATUS); 250 ret = IRQ_HANDLED; 251 } 252 253 if (intfield & IEN_RXAVAILABLE) { 254 writel_relaxed(IEN_RXAVAILABLE, qspi->regs + REG_STATUS); 255 mchp_coreqspi_read_op(qspi); 256 ret = IRQ_HANDLED; 257 } 258 259 if (intfield & IEN_RXDONE) { 260 writel_relaxed(IEN_RXDONE, qspi->regs + REG_STATUS); 261 complete(&qspi->data_completion); 262 ret = IRQ_HANDLED; 263 } 264 265 return ret; 266 } 267 268 static int mchp_coreqspi_setup_clock(struct mchp_coreqspi *qspi, struct spi_device *spi, 269 const struct spi_mem_op *op) 270 { 271 unsigned long clk_hz; 272 u32 control, baud_rate_val = 0; 273 274 clk_hz = clk_get_rate(qspi->clk); 275 if (!clk_hz) 276 return -EINVAL; 277 278 baud_rate_val = DIV_ROUND_UP(clk_hz, 2 * op->max_freq); 279 if (baud_rate_val > MAX_DIVIDER || baud_rate_val < MIN_DIVIDER) { 280 dev_err(&spi->dev, 281 "could not configure the clock for spi clock %d Hz & system clock %ld Hz\n", 282 op->max_freq, clk_hz); 283 return -EINVAL; 284 } 285 286 control = readl_relaxed(qspi->regs + REG_CONTROL); 287 control &= ~CONTROL_CLKRATE_MASK; 288 control |= baud_rate_val << CONTROL_CLKRATE_SHIFT; 289 writel_relaxed(control, qspi->regs + REG_CONTROL); 290 control = readl_relaxed(qspi->regs + REG_CONTROL); 291 292 if ((spi->mode & SPI_CPOL) && (spi->mode & SPI_CPHA)) 293 control |= CONTROL_CLKIDLE; 294 else 295 control &= ~CONTROL_CLKIDLE; 296 297 writel_relaxed(control, qspi->regs + REG_CONTROL); 298 299 return 0; 300 } 301 302 static int mchp_coreqspi_setup_op(struct spi_device *spi_dev) 303 { 304 struct spi_controller *ctlr = spi_dev->controller; 305 struct mchp_coreqspi *qspi = spi_controller_get_devdata(ctlr); 306 u32 control = readl_relaxed(qspi->regs + REG_CONTROL); 307 308 control |= (CONTROL_MASTER | CONTROL_ENABLE); 309 control &= ~CONTROL_CLKIDLE; 310 writel_relaxed(control, qspi->regs + REG_CONTROL); 311 312 return 0; 313 } 314 315 static inline void mchp_coreqspi_config_op(struct mchp_coreqspi *qspi, const struct spi_mem_op *op) 316 { 317 u32 idle_cycles = 0; 318 int total_bytes, cmd_bytes, frames, ctrl; 319 320 cmd_bytes = op->cmd.nbytes + op->addr.nbytes; 321 total_bytes = cmd_bytes + op->data.nbytes; 322 323 /* 324 * As per the coreQSPI IP spec,the number of command and data bytes are 325 * controlled by the frames register for each SPI sequence. This supports 326 * the SPI flash memory read and writes sequences as below. so configure 327 * the cmd and total bytes accordingly. 328 * --------------------------------------------------------------------- 329 * TOTAL BYTES | CMD BYTES | What happens | 330 * ______________________________________________________________________ 331 * | | | 332 * 1 | 1 | The SPI core will transmit a single byte | 333 * | | and receive data is discarded | 334 * | | | 335 * 1 | 0 | The SPI core will transmit a single byte | 336 * | | and return a single byte | 337 * | | | 338 * 10 | 4 | The SPI core will transmit 4 command | 339 * | | bytes discarding the receive data and | 340 * | | transmits 6 dummy bytes returning the 6 | 341 * | | received bytes and return a single byte | 342 * | | | 343 * 10 | 10 | The SPI core will transmit 10 command | 344 * | | | 345 * 10 | 0 | The SPI core will transmit 10 command | 346 * | | bytes and returning 10 received bytes | 347 * ______________________________________________________________________ 348 */ 349 if (!(op->data.dir == SPI_MEM_DATA_IN)) 350 cmd_bytes = total_bytes; 351 352 frames = total_bytes & BYTESUPPER_MASK; 353 writel_relaxed(frames, qspi->regs + REG_FRAMESUP); 354 frames = total_bytes & BYTESLOWER_MASK; 355 frames |= cmd_bytes << FRAMES_CMDBYTES_SHIFT; 356 357 if (op->dummy.buswidth) 358 idle_cycles = op->dummy.nbytes * 8 / op->dummy.buswidth; 359 360 frames |= idle_cycles << FRAMES_IDLE_SHIFT; 361 ctrl = readl_relaxed(qspi->regs + REG_CONTROL); 362 363 if (ctrl & CONTROL_MODE12_MASK) 364 frames |= (1 << FRAMES_SHIFT); 365 366 frames |= FRAMES_FLAGWORD; 367 writel_relaxed(frames, qspi->regs + REG_FRAMES); 368 } 369 370 static int mchp_qspi_wait_for_ready(struct spi_mem *mem) 371 { 372 struct mchp_coreqspi *qspi = spi_controller_get_devdata 373 (mem->spi->controller); 374 u32 status; 375 int ret; 376 377 ret = readl_poll_timeout(qspi->regs + REG_STATUS, status, 378 (status & STATUS_READY), 0, 379 TIMEOUT_MS); 380 if (ret) { 381 dev_err(&mem->spi->dev, 382 "Timeout waiting on QSPI ready.\n"); 383 return -ETIMEDOUT; 384 } 385 386 return ret; 387 } 388 389 static int mchp_coreqspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) 390 { 391 struct mchp_coreqspi *qspi = spi_controller_get_devdata 392 (mem->spi->controller); 393 u32 address = op->addr.val; 394 u8 opcode = op->cmd.opcode; 395 u8 opaddr[5]; 396 int err, i; 397 398 mutex_lock(&qspi->op_lock); 399 err = mchp_qspi_wait_for_ready(mem); 400 if (err) 401 goto error; 402 403 err = mchp_coreqspi_setup_clock(qspi, mem->spi, op); 404 if (err) 405 goto error; 406 407 err = mchp_coreqspi_set_mode(qspi, op); 408 if (err) 409 goto error; 410 411 reinit_completion(&qspi->data_completion); 412 mchp_coreqspi_config_op(qspi, op); 413 if (op->cmd.opcode) { 414 qspi->txbuf = &opcode; 415 qspi->rxbuf = NULL; 416 qspi->tx_len = op->cmd.nbytes; 417 qspi->rx_len = 0; 418 mchp_coreqspi_write_op(qspi, false); 419 } 420 421 qspi->txbuf = &opaddr[0]; 422 if (op->addr.nbytes) { 423 for (i = 0; i < op->addr.nbytes; i++) 424 qspi->txbuf[i] = address >> (8 * (op->addr.nbytes - i - 1)); 425 426 qspi->rxbuf = NULL; 427 qspi->tx_len = op->addr.nbytes; 428 qspi->rx_len = 0; 429 mchp_coreqspi_write_op(qspi, false); 430 } 431 432 if (op->data.nbytes) { 433 if (op->data.dir == SPI_MEM_DATA_OUT) { 434 qspi->txbuf = (u8 *)op->data.buf.out; 435 qspi->rxbuf = NULL; 436 qspi->rx_len = 0; 437 qspi->tx_len = op->data.nbytes; 438 mchp_coreqspi_write_op(qspi, true); 439 } else { 440 qspi->txbuf = NULL; 441 qspi->rxbuf = (u8 *)op->data.buf.in; 442 qspi->rx_len = op->data.nbytes; 443 qspi->tx_len = 0; 444 } 445 } 446 447 mchp_coreqspi_enable_ints(qspi); 448 449 if (!wait_for_completion_timeout(&qspi->data_completion, msecs_to_jiffies(1000))) 450 err = -ETIMEDOUT; 451 452 error: 453 mutex_unlock(&qspi->op_lock); 454 mchp_coreqspi_disable_ints(qspi); 455 456 return err; 457 } 458 459 static bool mchp_coreqspi_supports_op(struct spi_mem *mem, const struct spi_mem_op *op) 460 { 461 struct mchp_coreqspi *qspi = spi_controller_get_devdata(mem->spi->controller); 462 unsigned long clk_hz; 463 u32 baud_rate_val; 464 465 if (!spi_mem_default_supports_op(mem, op)) 466 return false; 467 468 if ((op->data.buswidth == 4 || op->data.buswidth == 2) && 469 (op->cmd.buswidth == 1 && (op->addr.buswidth == 1 || op->addr.buswidth == 0))) { 470 /* 471 * If the command and address are on DQ0 only, then this 472 * controller doesn't support sending data on dual and 473 * quad lines. but it supports reading data on dual and 474 * quad lines with same configuration as command and 475 * address on DQ0. 476 * i.e. The control register[15:13] :EX_RO(read only) is 477 * meant only for the command and address are on DQ0 but 478 * not to write data, it is just to read. 479 * Ex: 0x34h is Quad Load Program Data which is not 480 * supported. Then the spi-mem layer will iterate over 481 * each command and it will chose the supported one. 482 */ 483 if (op->data.dir == SPI_MEM_DATA_OUT) 484 return false; 485 } 486 487 clk_hz = clk_get_rate(qspi->clk); 488 if (!clk_hz) 489 return false; 490 491 baud_rate_val = DIV_ROUND_UP(clk_hz, 2 * op->max_freq); 492 if (baud_rate_val > MAX_DIVIDER || baud_rate_val < MIN_DIVIDER) 493 return false; 494 495 return true; 496 } 497 498 static int mchp_coreqspi_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op) 499 { 500 if (op->data.dir == SPI_MEM_DATA_OUT || op->data.dir == SPI_MEM_DATA_IN) { 501 if (op->data.nbytes > MAX_DATA_CMD_LEN) 502 op->data.nbytes = MAX_DATA_CMD_LEN; 503 } 504 505 return 0; 506 } 507 508 static const struct spi_controller_mem_ops mchp_coreqspi_mem_ops = { 509 .adjust_op_size = mchp_coreqspi_adjust_op_size, 510 .supports_op = mchp_coreqspi_supports_op, 511 .exec_op = mchp_coreqspi_exec_op, 512 }; 513 514 static const struct spi_controller_mem_caps mchp_coreqspi_mem_caps = { 515 .per_op_freq = true, 516 }; 517 518 static int mchp_coreqspi_probe(struct platform_device *pdev) 519 { 520 struct spi_controller *ctlr; 521 struct mchp_coreqspi *qspi; 522 struct device *dev = &pdev->dev; 523 struct device_node *np = dev->of_node; 524 int ret; 525 526 ctlr = devm_spi_alloc_host(&pdev->dev, sizeof(*qspi)); 527 if (!ctlr) 528 return dev_err_probe(&pdev->dev, -ENOMEM, 529 "unable to allocate host for QSPI controller\n"); 530 531 qspi = spi_controller_get_devdata(ctlr); 532 platform_set_drvdata(pdev, qspi); 533 534 qspi->regs = devm_platform_ioremap_resource(pdev, 0); 535 if (IS_ERR(qspi->regs)) 536 return dev_err_probe(&pdev->dev, PTR_ERR(qspi->regs), 537 "failed to map registers\n"); 538 539 qspi->clk = devm_clk_get_enabled(&pdev->dev, NULL); 540 if (IS_ERR(qspi->clk)) 541 return dev_err_probe(&pdev->dev, PTR_ERR(qspi->clk), 542 "could not get clock\n"); 543 544 init_completion(&qspi->data_completion); 545 mutex_init(&qspi->op_lock); 546 547 qspi->irq = platform_get_irq(pdev, 0); 548 if (qspi->irq < 0) 549 return qspi->irq; 550 551 ret = devm_request_irq(&pdev->dev, qspi->irq, mchp_coreqspi_isr, 552 IRQF_SHARED, pdev->name, qspi); 553 if (ret) { 554 dev_err(&pdev->dev, "request_irq failed %d\n", ret); 555 return ret; 556 } 557 558 ctlr->bits_per_word_mask = SPI_BPW_MASK(8); 559 ctlr->mem_ops = &mchp_coreqspi_mem_ops; 560 ctlr->mem_caps = &mchp_coreqspi_mem_caps; 561 ctlr->setup = mchp_coreqspi_setup_op; 562 ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_RX_DUAL | SPI_RX_QUAD | 563 SPI_TX_DUAL | SPI_TX_QUAD; 564 ctlr->dev.of_node = np; 565 566 ret = devm_spi_register_controller(&pdev->dev, ctlr); 567 if (ret) 568 return dev_err_probe(&pdev->dev, ret, 569 "spi_register_controller failed\n"); 570 571 return 0; 572 } 573 574 static void mchp_coreqspi_remove(struct platform_device *pdev) 575 { 576 struct mchp_coreqspi *qspi = platform_get_drvdata(pdev); 577 u32 control = readl_relaxed(qspi->regs + REG_CONTROL); 578 579 mchp_coreqspi_disable_ints(qspi); 580 control &= ~CONTROL_ENABLE; 581 writel_relaxed(control, qspi->regs + REG_CONTROL); 582 } 583 584 static const struct of_device_id mchp_coreqspi_of_match[] = { 585 { .compatible = "microchip,coreqspi-rtl-v2" }, 586 { /* sentinel */ } 587 }; 588 MODULE_DEVICE_TABLE(of, mchp_coreqspi_of_match); 589 590 static struct platform_driver mchp_coreqspi_driver = { 591 .probe = mchp_coreqspi_probe, 592 .driver = { 593 .name = "microchip,coreqspi", 594 .of_match_table = mchp_coreqspi_of_match, 595 }, 596 .remove = mchp_coreqspi_remove, 597 }; 598 module_platform_driver(mchp_coreqspi_driver); 599 600 MODULE_AUTHOR("Naga Sureshkumar Relli <nagasuresh.relli@microchip.com"); 601 MODULE_DESCRIPTION("Microchip coreQSPI QSPI controller driver"); 602 MODULE_LICENSE("GPL"); 603