1 // SPDX-License-Identifier: (GPL-2.0) 2 /* 3 * Microchip coreQSPI QSPI controller driver 4 * 5 * Copyright (C) 2018-2022 Microchip Technology Inc. and its subsidiaries 6 * 7 * Author: Naga Sureshkumar Relli <nagasuresh.relli@microchip.com> 8 * 9 */ 10 11 #include <linux/clk.h> 12 #include <linux/err.h> 13 #include <linux/init.h> 14 #include <linux/interrupt.h> 15 #include <linux/io.h> 16 #include <linux/iopoll.h> 17 #include <linux/module.h> 18 #include <linux/of.h> 19 #include <linux/of_irq.h> 20 #include <linux/platform_device.h> 21 #include <linux/spi/spi.h> 22 #include <linux/spi/spi-mem.h> 23 24 /* 25 * QSPI Control register mask defines 26 */ 27 #define CONTROL_ENABLE BIT(0) 28 #define CONTROL_MASTER BIT(1) 29 #define CONTROL_XIP BIT(2) 30 #define CONTROL_XIPADDR BIT(3) 31 #define CONTROL_CLKIDLE BIT(10) 32 #define CONTROL_SAMPLE_MASK GENMASK(12, 11) 33 #define CONTROL_MODE0 BIT(13) 34 #define CONTROL_MODE12_MASK GENMASK(15, 14) 35 #define CONTROL_MODE12_EX_RO BIT(14) 36 #define CONTROL_MODE12_EX_RW BIT(15) 37 #define CONTROL_MODE12_FULL GENMASK(15, 14) 38 #define CONTROL_FLAGSX4 BIT(16) 39 #define CONTROL_CLKRATE_MASK GENMASK(27, 24) 40 #define CONTROL_CLKRATE_SHIFT 24 41 42 /* 43 * QSPI Frames register mask defines 44 */ 45 #define FRAMES_TOTALBYTES_MASK GENMASK(15, 0) 46 #define FRAMES_CMDBYTES_MASK GENMASK(24, 16) 47 #define FRAMES_CMDBYTES_SHIFT 16 48 #define FRAMES_SHIFT 25 49 #define FRAMES_IDLE_MASK GENMASK(29, 26) 50 #define FRAMES_IDLE_SHIFT 26 51 #define FRAMES_FLAGBYTE BIT(30) 52 #define FRAMES_FLAGWORD BIT(31) 53 54 /* 55 * QSPI Interrupt Enable register mask defines 56 */ 57 #define IEN_TXDONE BIT(0) 58 #define IEN_RXDONE BIT(1) 59 #define IEN_RXAVAILABLE BIT(2) 60 #define IEN_TXAVAILABLE BIT(3) 61 #define IEN_RXFIFOEMPTY BIT(4) 62 #define IEN_TXFIFOFULL BIT(5) 63 64 /* 65 * QSPI Status register mask defines 66 */ 67 #define STATUS_TXDONE BIT(0) 68 #define STATUS_RXDONE BIT(1) 69 #define STATUS_RXAVAILABLE BIT(2) 70 #define STATUS_TXAVAILABLE BIT(3) 71 #define STATUS_RXFIFOEMPTY BIT(4) 72 #define STATUS_TXFIFOFULL BIT(5) 73 #define STATUS_READY BIT(7) 74 #define STATUS_FLAGSX4 BIT(8) 75 #define STATUS_MASK GENMASK(8, 0) 76 77 #define BYTESUPPER_MASK GENMASK(31, 16) 78 #define BYTESLOWER_MASK GENMASK(15, 0) 79 80 #define MAX_DIVIDER 16 81 #define MIN_DIVIDER 0 82 #define MAX_DATA_CMD_LEN 256 83 84 /* QSPI ready time out value */ 85 #define TIMEOUT_MS 500 86 87 /* 88 * QSPI Register offsets. 89 */ 90 #define REG_CONTROL (0x00) 91 #define REG_FRAMES (0x04) 92 #define REG_IEN (0x0c) 93 #define REG_STATUS (0x10) 94 #define REG_DIRECT_ACCESS (0x14) 95 #define REG_UPPER_ACCESS (0x18) 96 #define REG_RX_DATA (0x40) 97 #define REG_TX_DATA (0x44) 98 #define REG_X4_RX_DATA (0x48) 99 #define REG_X4_TX_DATA (0x4c) 100 #define REG_FRAMESUP (0x50) 101 102 /** 103 * struct mchp_coreqspi - Defines qspi driver instance 104 * @regs: Virtual address of the QSPI controller registers 105 * @clk: QSPI Operating clock 106 * @data_completion: completion structure 107 * @op_lock: lock access to the device 108 * @txbuf: TX buffer 109 * @rxbuf: RX buffer 110 * @irq: IRQ number 111 * @tx_len: Number of bytes left to transfer 112 * @rx_len: Number of bytes left to receive 113 */ 114 struct mchp_coreqspi { 115 void __iomem *regs; 116 struct clk *clk; 117 struct completion data_completion; 118 struct mutex op_lock; /* lock access to the device */ 119 u8 *txbuf; 120 u8 *rxbuf; 121 int irq; 122 int tx_len; 123 int rx_len; 124 }; 125 126 static int mchp_coreqspi_set_mode(struct mchp_coreqspi *qspi, const struct spi_mem_op *op) 127 { 128 u32 control = readl_relaxed(qspi->regs + REG_CONTROL); 129 130 /* 131 * The operating mode can be configured based on the command that needs to be send. 132 * bits[15:14]: Sets whether multiple bit SPI operates in normal, extended or full modes. 133 * 00: Normal (single DQ0 TX and single DQ1 RX lines) 134 * 01: Extended RO (command and address bytes on DQ0 only) 135 * 10: Extended RW (command byte on DQ0 only) 136 * 11: Full. (command and address are on all DQ lines) 137 * bit[13]: Sets whether multiple bit SPI uses 2 or 4 bits of data 138 * 0: 2-bits (BSPI) 139 * 1: 4-bits (QSPI) 140 */ 141 if (op->data.buswidth == 4 || op->data.buswidth == 2) { 142 control &= ~CONTROL_MODE12_MASK; 143 if (op->cmd.buswidth == 1 && (op->addr.buswidth == 1 || op->addr.buswidth == 0)) 144 control |= CONTROL_MODE12_EX_RO; 145 else if (op->cmd.buswidth == 1) 146 control |= CONTROL_MODE12_EX_RW; 147 else 148 control |= CONTROL_MODE12_FULL; 149 150 control |= CONTROL_MODE0; 151 } else { 152 control &= ~(CONTROL_MODE12_MASK | 153 CONTROL_MODE0); 154 } 155 156 writel_relaxed(control, qspi->regs + REG_CONTROL); 157 158 return 0; 159 } 160 161 static inline void mchp_coreqspi_read_op(struct mchp_coreqspi *qspi) 162 { 163 u32 control, data; 164 165 if (!qspi->rx_len) 166 return; 167 168 control = readl_relaxed(qspi->regs + REG_CONTROL); 169 170 /* 171 * Read 4-bytes from the SPI FIFO in single transaction and then read 172 * the reamaining data byte wise. 173 */ 174 control |= CONTROL_FLAGSX4; 175 writel_relaxed(control, qspi->regs + REG_CONTROL); 176 177 while (qspi->rx_len >= 4) { 178 while (readl_relaxed(qspi->regs + REG_STATUS) & STATUS_RXFIFOEMPTY) 179 ; 180 data = readl_relaxed(qspi->regs + REG_X4_RX_DATA); 181 *(u32 *)qspi->rxbuf = data; 182 qspi->rxbuf += 4; 183 qspi->rx_len -= 4; 184 } 185 186 control &= ~CONTROL_FLAGSX4; 187 writel_relaxed(control, qspi->regs + REG_CONTROL); 188 189 while (qspi->rx_len--) { 190 while (readl_relaxed(qspi->regs + REG_STATUS) & STATUS_RXFIFOEMPTY) 191 ; 192 data = readl_relaxed(qspi->regs + REG_RX_DATA); 193 *qspi->rxbuf++ = (data & 0xFF); 194 } 195 } 196 197 static inline void mchp_coreqspi_write_op(struct mchp_coreqspi *qspi, bool word) 198 { 199 u32 control, data; 200 201 control = readl_relaxed(qspi->regs + REG_CONTROL); 202 control |= CONTROL_FLAGSX4; 203 writel_relaxed(control, qspi->regs + REG_CONTROL); 204 205 while (qspi->tx_len >= 4) { 206 while (readl_relaxed(qspi->regs + REG_STATUS) & STATUS_TXFIFOFULL) 207 ; 208 data = *(u32 *)qspi->txbuf; 209 qspi->txbuf += 4; 210 qspi->tx_len -= 4; 211 writel_relaxed(data, qspi->regs + REG_X4_TX_DATA); 212 } 213 214 control &= ~CONTROL_FLAGSX4; 215 writel_relaxed(control, qspi->regs + REG_CONTROL); 216 217 while (qspi->tx_len--) { 218 while (readl_relaxed(qspi->regs + REG_STATUS) & STATUS_TXFIFOFULL) 219 ; 220 data = *qspi->txbuf++; 221 writel_relaxed(data, qspi->regs + REG_TX_DATA); 222 } 223 } 224 225 static void mchp_coreqspi_enable_ints(struct mchp_coreqspi *qspi) 226 { 227 u32 mask = IEN_TXDONE | 228 IEN_RXDONE | 229 IEN_RXAVAILABLE; 230 231 writel_relaxed(mask, qspi->regs + REG_IEN); 232 } 233 234 static void mchp_coreqspi_disable_ints(struct mchp_coreqspi *qspi) 235 { 236 writel_relaxed(0, qspi->regs + REG_IEN); 237 } 238 239 static irqreturn_t mchp_coreqspi_isr(int irq, void *dev_id) 240 { 241 struct mchp_coreqspi *qspi = (struct mchp_coreqspi *)dev_id; 242 irqreturn_t ret = IRQ_NONE; 243 int intfield = readl_relaxed(qspi->regs + REG_STATUS) & STATUS_MASK; 244 245 if (intfield == 0) 246 return ret; 247 248 if (intfield & IEN_TXDONE) { 249 writel_relaxed(IEN_TXDONE, qspi->regs + REG_STATUS); 250 ret = IRQ_HANDLED; 251 } 252 253 if (intfield & IEN_RXAVAILABLE) { 254 writel_relaxed(IEN_RXAVAILABLE, qspi->regs + REG_STATUS); 255 mchp_coreqspi_read_op(qspi); 256 ret = IRQ_HANDLED; 257 } 258 259 if (intfield & IEN_RXDONE) { 260 writel_relaxed(IEN_RXDONE, qspi->regs + REG_STATUS); 261 complete(&qspi->data_completion); 262 ret = IRQ_HANDLED; 263 } 264 265 return ret; 266 } 267 268 static int mchp_coreqspi_setup_clock(struct mchp_coreqspi *qspi, struct spi_device *spi) 269 { 270 unsigned long clk_hz; 271 u32 control, baud_rate_val = 0; 272 273 clk_hz = clk_get_rate(qspi->clk); 274 if (!clk_hz) 275 return -EINVAL; 276 277 baud_rate_val = DIV_ROUND_UP(clk_hz, 2 * spi->max_speed_hz); 278 if (baud_rate_val > MAX_DIVIDER || baud_rate_val < MIN_DIVIDER) { 279 dev_err(&spi->dev, 280 "could not configure the clock for spi clock %d Hz & system clock %ld Hz\n", 281 spi->max_speed_hz, clk_hz); 282 return -EINVAL; 283 } 284 285 control = readl_relaxed(qspi->regs + REG_CONTROL); 286 control &= ~CONTROL_CLKRATE_MASK; 287 control |= baud_rate_val << CONTROL_CLKRATE_SHIFT; 288 writel_relaxed(control, qspi->regs + REG_CONTROL); 289 control = readl_relaxed(qspi->regs + REG_CONTROL); 290 291 if ((spi->mode & SPI_CPOL) && (spi->mode & SPI_CPHA)) 292 control |= CONTROL_CLKIDLE; 293 else 294 control &= ~CONTROL_CLKIDLE; 295 296 writel_relaxed(control, qspi->regs + REG_CONTROL); 297 298 return 0; 299 } 300 301 static int mchp_coreqspi_setup_op(struct spi_device *spi_dev) 302 { 303 struct spi_controller *ctlr = spi_dev->controller; 304 struct mchp_coreqspi *qspi = spi_controller_get_devdata(ctlr); 305 u32 control = readl_relaxed(qspi->regs + REG_CONTROL); 306 307 control |= (CONTROL_MASTER | CONTROL_ENABLE); 308 control &= ~CONTROL_CLKIDLE; 309 writel_relaxed(control, qspi->regs + REG_CONTROL); 310 311 return 0; 312 } 313 314 static inline void mchp_coreqspi_config_op(struct mchp_coreqspi *qspi, const struct spi_mem_op *op) 315 { 316 u32 idle_cycles = 0; 317 int total_bytes, cmd_bytes, frames, ctrl; 318 319 cmd_bytes = op->cmd.nbytes + op->addr.nbytes; 320 total_bytes = cmd_bytes + op->data.nbytes; 321 322 /* 323 * As per the coreQSPI IP spec,the number of command and data bytes are 324 * controlled by the frames register for each SPI sequence. This supports 325 * the SPI flash memory read and writes sequences as below. so configure 326 * the cmd and total bytes accordingly. 327 * --------------------------------------------------------------------- 328 * TOTAL BYTES | CMD BYTES | What happens | 329 * ______________________________________________________________________ 330 * | | | 331 * 1 | 1 | The SPI core will transmit a single byte | 332 * | | and receive data is discarded | 333 * | | | 334 * 1 | 0 | The SPI core will transmit a single byte | 335 * | | and return a single byte | 336 * | | | 337 * 10 | 4 | The SPI core will transmit 4 command | 338 * | | bytes discarding the receive data and | 339 * | | transmits 6 dummy bytes returning the 6 | 340 * | | received bytes and return a single byte | 341 * | | | 342 * 10 | 10 | The SPI core will transmit 10 command | 343 * | | | 344 * 10 | 0 | The SPI core will transmit 10 command | 345 * | | bytes and returning 10 received bytes | 346 * ______________________________________________________________________ 347 */ 348 if (!(op->data.dir == SPI_MEM_DATA_IN)) 349 cmd_bytes = total_bytes; 350 351 frames = total_bytes & BYTESUPPER_MASK; 352 writel_relaxed(frames, qspi->regs + REG_FRAMESUP); 353 frames = total_bytes & BYTESLOWER_MASK; 354 frames |= cmd_bytes << FRAMES_CMDBYTES_SHIFT; 355 356 if (op->dummy.buswidth) 357 idle_cycles = op->dummy.nbytes * 8 / op->dummy.buswidth; 358 359 frames |= idle_cycles << FRAMES_IDLE_SHIFT; 360 ctrl = readl_relaxed(qspi->regs + REG_CONTROL); 361 362 if (ctrl & CONTROL_MODE12_MASK) 363 frames |= (1 << FRAMES_SHIFT); 364 365 frames |= FRAMES_FLAGWORD; 366 writel_relaxed(frames, qspi->regs + REG_FRAMES); 367 } 368 369 static int mchp_qspi_wait_for_ready(struct spi_mem *mem) 370 { 371 struct mchp_coreqspi *qspi = spi_controller_get_devdata 372 (mem->spi->controller); 373 u32 status; 374 int ret; 375 376 ret = readl_poll_timeout(qspi->regs + REG_STATUS, status, 377 (status & STATUS_READY), 0, 378 TIMEOUT_MS); 379 if (ret) { 380 dev_err(&mem->spi->dev, 381 "Timeout waiting on QSPI ready.\n"); 382 return -ETIMEDOUT; 383 } 384 385 return ret; 386 } 387 388 static int mchp_coreqspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) 389 { 390 struct mchp_coreqspi *qspi = spi_controller_get_devdata 391 (mem->spi->controller); 392 u32 address = op->addr.val; 393 u8 opcode = op->cmd.opcode; 394 u8 opaddr[5]; 395 int err, i; 396 397 mutex_lock(&qspi->op_lock); 398 err = mchp_qspi_wait_for_ready(mem); 399 if (err) 400 goto error; 401 402 err = mchp_coreqspi_setup_clock(qspi, mem->spi); 403 if (err) 404 goto error; 405 406 err = mchp_coreqspi_set_mode(qspi, op); 407 if (err) 408 goto error; 409 410 reinit_completion(&qspi->data_completion); 411 mchp_coreqspi_config_op(qspi, op); 412 if (op->cmd.opcode) { 413 qspi->txbuf = &opcode; 414 qspi->rxbuf = NULL; 415 qspi->tx_len = op->cmd.nbytes; 416 qspi->rx_len = 0; 417 mchp_coreqspi_write_op(qspi, false); 418 } 419 420 qspi->txbuf = &opaddr[0]; 421 if (op->addr.nbytes) { 422 for (i = 0; i < op->addr.nbytes; i++) 423 qspi->txbuf[i] = address >> (8 * (op->addr.nbytes - i - 1)); 424 425 qspi->rxbuf = NULL; 426 qspi->tx_len = op->addr.nbytes; 427 qspi->rx_len = 0; 428 mchp_coreqspi_write_op(qspi, false); 429 } 430 431 if (op->data.nbytes) { 432 if (op->data.dir == SPI_MEM_DATA_OUT) { 433 qspi->txbuf = (u8 *)op->data.buf.out; 434 qspi->rxbuf = NULL; 435 qspi->rx_len = 0; 436 qspi->tx_len = op->data.nbytes; 437 mchp_coreqspi_write_op(qspi, true); 438 } else { 439 qspi->txbuf = NULL; 440 qspi->rxbuf = (u8 *)op->data.buf.in; 441 qspi->rx_len = op->data.nbytes; 442 qspi->tx_len = 0; 443 } 444 } 445 446 mchp_coreqspi_enable_ints(qspi); 447 448 if (!wait_for_completion_timeout(&qspi->data_completion, msecs_to_jiffies(1000))) 449 err = -ETIMEDOUT; 450 451 error: 452 mutex_unlock(&qspi->op_lock); 453 mchp_coreqspi_disable_ints(qspi); 454 455 return err; 456 } 457 458 static bool mchp_coreqspi_supports_op(struct spi_mem *mem, const struct spi_mem_op *op) 459 { 460 if (!spi_mem_default_supports_op(mem, op)) 461 return false; 462 463 if ((op->data.buswidth == 4 || op->data.buswidth == 2) && 464 (op->cmd.buswidth == 1 && (op->addr.buswidth == 1 || op->addr.buswidth == 0))) { 465 /* 466 * If the command and address are on DQ0 only, then this 467 * controller doesn't support sending data on dual and 468 * quad lines. but it supports reading data on dual and 469 * quad lines with same configuration as command and 470 * address on DQ0. 471 * i.e. The control register[15:13] :EX_RO(read only) is 472 * meant only for the command and address are on DQ0 but 473 * not to write data, it is just to read. 474 * Ex: 0x34h is Quad Load Program Data which is not 475 * supported. Then the spi-mem layer will iterate over 476 * each command and it will chose the supported one. 477 */ 478 if (op->data.dir == SPI_MEM_DATA_OUT) 479 return false; 480 } 481 482 return true; 483 } 484 485 static int mchp_coreqspi_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op) 486 { 487 if (op->data.dir == SPI_MEM_DATA_OUT || op->data.dir == SPI_MEM_DATA_IN) { 488 if (op->data.nbytes > MAX_DATA_CMD_LEN) 489 op->data.nbytes = MAX_DATA_CMD_LEN; 490 } 491 492 return 0; 493 } 494 495 static const struct spi_controller_mem_ops mchp_coreqspi_mem_ops = { 496 .adjust_op_size = mchp_coreqspi_adjust_op_size, 497 .supports_op = mchp_coreqspi_supports_op, 498 .exec_op = mchp_coreqspi_exec_op, 499 }; 500 501 static int mchp_coreqspi_probe(struct platform_device *pdev) 502 { 503 struct spi_controller *ctlr; 504 struct mchp_coreqspi *qspi; 505 struct device *dev = &pdev->dev; 506 struct device_node *np = dev->of_node; 507 int ret; 508 509 ctlr = devm_spi_alloc_host(&pdev->dev, sizeof(*qspi)); 510 if (!ctlr) 511 return dev_err_probe(&pdev->dev, -ENOMEM, 512 "unable to allocate host for QSPI controller\n"); 513 514 qspi = spi_controller_get_devdata(ctlr); 515 platform_set_drvdata(pdev, qspi); 516 517 qspi->regs = devm_platform_ioremap_resource(pdev, 0); 518 if (IS_ERR(qspi->regs)) 519 return dev_err_probe(&pdev->dev, PTR_ERR(qspi->regs), 520 "failed to map registers\n"); 521 522 qspi->clk = devm_clk_get_enabled(&pdev->dev, NULL); 523 if (IS_ERR(qspi->clk)) 524 return dev_err_probe(&pdev->dev, PTR_ERR(qspi->clk), 525 "could not get clock\n"); 526 527 init_completion(&qspi->data_completion); 528 mutex_init(&qspi->op_lock); 529 530 qspi->irq = platform_get_irq(pdev, 0); 531 if (qspi->irq < 0) 532 return qspi->irq; 533 534 ret = devm_request_irq(&pdev->dev, qspi->irq, mchp_coreqspi_isr, 535 IRQF_SHARED, pdev->name, qspi); 536 if (ret) { 537 dev_err(&pdev->dev, "request_irq failed %d\n", ret); 538 return ret; 539 } 540 541 ctlr->bits_per_word_mask = SPI_BPW_MASK(8); 542 ctlr->mem_ops = &mchp_coreqspi_mem_ops; 543 ctlr->setup = mchp_coreqspi_setup_op; 544 ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_RX_DUAL | SPI_RX_QUAD | 545 SPI_TX_DUAL | SPI_TX_QUAD; 546 ctlr->dev.of_node = np; 547 548 ret = devm_spi_register_controller(&pdev->dev, ctlr); 549 if (ret) 550 return dev_err_probe(&pdev->dev, ret, 551 "spi_register_controller failed\n"); 552 553 return 0; 554 } 555 556 static void mchp_coreqspi_remove(struct platform_device *pdev) 557 { 558 struct mchp_coreqspi *qspi = platform_get_drvdata(pdev); 559 u32 control = readl_relaxed(qspi->regs + REG_CONTROL); 560 561 mchp_coreqspi_disable_ints(qspi); 562 control &= ~CONTROL_ENABLE; 563 writel_relaxed(control, qspi->regs + REG_CONTROL); 564 } 565 566 static const struct of_device_id mchp_coreqspi_of_match[] = { 567 { .compatible = "microchip,coreqspi-rtl-v2" }, 568 { /* sentinel */ } 569 }; 570 MODULE_DEVICE_TABLE(of, mchp_coreqspi_of_match); 571 572 static struct platform_driver mchp_coreqspi_driver = { 573 .probe = mchp_coreqspi_probe, 574 .driver = { 575 .name = "microchip,coreqspi", 576 .of_match_table = mchp_coreqspi_of_match, 577 }, 578 .remove_new = mchp_coreqspi_remove, 579 }; 580 module_platform_driver(mchp_coreqspi_driver); 581 582 MODULE_AUTHOR("Naga Sureshkumar Relli <nagasuresh.relli@microchip.com"); 583 MODULE_DESCRIPTION("Microchip coreQSPI QSPI controller driver"); 584 MODULE_LICENSE("GPL"); 585