1 // SPDX-License-Identifier: GPL-2.0 2 // Copyright (c) 2017-2018, The Linux foundation. All rights reserved. 3 4 #include <linux/clk.h> 5 #include <linux/dmapool.h> 6 #include <linux/dma-mapping.h> 7 #include <linux/interconnect.h> 8 #include <linux/interrupt.h> 9 #include <linux/io.h> 10 #include <linux/module.h> 11 #include <linux/of.h> 12 #include <linux/of_platform.h> 13 #include <linux/pinctrl/consumer.h> 14 #include <linux/pm_runtime.h> 15 #include <linux/pm_opp.h> 16 #include <linux/spi/spi.h> 17 #include <linux/spi/spi-mem.h> 18 19 20 #define QSPI_NUM_CS 2 21 #define QSPI_BYTES_PER_WORD 4 22 23 #define MSTR_CONFIG 0x0000 24 #define FULL_CYCLE_MODE BIT(3) 25 #define FB_CLK_EN BIT(4) 26 #define PIN_HOLDN BIT(6) 27 #define PIN_WPN BIT(7) 28 #define DMA_ENABLE BIT(8) 29 #define BIG_ENDIAN_MODE BIT(9) 30 #define SPI_MODE_MSK 0xc00 31 #define SPI_MODE_SHFT 10 32 #define CHIP_SELECT_NUM BIT(12) 33 #define SBL_EN BIT(13) 34 #define LPA_BASE_MSK 0x3c000 35 #define LPA_BASE_SHFT 14 36 #define TX_DATA_DELAY_MSK 0xc0000 37 #define TX_DATA_DELAY_SHFT 18 38 #define TX_CLK_DELAY_MSK 0x300000 39 #define TX_CLK_DELAY_SHFT 20 40 #define TX_CS_N_DELAY_MSK 0xc00000 41 #define TX_CS_N_DELAY_SHFT 22 42 #define TX_DATA_OE_DELAY_MSK 0x3000000 43 #define TX_DATA_OE_DELAY_SHFT 24 44 45 #define AHB_MASTER_CFG 0x0004 46 #define HMEM_TYPE_START_MID_TRANS_MSK 0x7 47 #define HMEM_TYPE_START_MID_TRANS_SHFT 0 48 #define HMEM_TYPE_LAST_TRANS_MSK 0x38 49 #define HMEM_TYPE_LAST_TRANS_SHFT 3 50 #define USE_HMEMTYPE_LAST_ON_DESC_OR_CHAIN_MSK 0xc0 51 #define USE_HMEMTYPE_LAST_ON_DESC_OR_CHAIN_SHFT 6 52 #define HMEMTYPE_READ_TRANS_MSK 0x700 53 #define HMEMTYPE_READ_TRANS_SHFT 8 54 #define HSHARED BIT(11) 55 #define HINNERSHARED BIT(12) 56 57 #define MSTR_INT_EN 0x000C 58 #define MSTR_INT_STATUS 0x0010 59 #define RESP_FIFO_UNDERRUN BIT(0) 60 #define RESP_FIFO_NOT_EMPTY BIT(1) 61 #define RESP_FIFO_RDY BIT(2) 62 #define HRESP_FROM_NOC_ERR BIT(3) 63 #define WR_FIFO_EMPTY BIT(9) 64 #define WR_FIFO_FULL BIT(10) 65 #define WR_FIFO_OVERRUN BIT(11) 66 #define TRANSACTION_DONE BIT(16) 67 #define DMA_CHAIN_DONE BIT(31) 68 #define QSPI_ERR_IRQS (RESP_FIFO_UNDERRUN | HRESP_FROM_NOC_ERR | \ 69 WR_FIFO_OVERRUN) 70 #define QSPI_ALL_IRQS (QSPI_ERR_IRQS | RESP_FIFO_RDY | \ 71 WR_FIFO_EMPTY | WR_FIFO_FULL | \ 72 TRANSACTION_DONE) 73 74 #define PIO_XFER_CTRL 0x0014 75 #define REQUEST_COUNT_MSK 0xffff 76 77 #define PIO_XFER_CFG 0x0018 78 #define TRANSFER_DIRECTION BIT(0) 79 #define MULTI_IO_MODE_MSK 0xe 80 #define MULTI_IO_MODE_SHFT 1 81 #define TRANSFER_FRAGMENT BIT(8) 82 #define SDR_1BIT 1 83 #define SDR_2BIT 2 84 #define SDR_4BIT 3 85 #define DDR_1BIT 5 86 #define DDR_2BIT 6 87 #define DDR_4BIT 7 88 #define DMA_DESC_SINGLE_SPI 1 89 #define DMA_DESC_DUAL_SPI 2 90 #define DMA_DESC_QUAD_SPI 3 91 92 #define PIO_XFER_STATUS 0x001c 93 #define WR_FIFO_BYTES_MSK 0xffff0000 94 #define WR_FIFO_BYTES_SHFT 16 95 96 #define PIO_DATAOUT_1B 0x0020 97 #define PIO_DATAOUT_4B 0x0024 98 99 #define RD_FIFO_CFG 0x0028 100 #define CONTINUOUS_MODE BIT(0) 101 102 #define RD_FIFO_STATUS 0x002c 103 #define FIFO_EMPTY BIT(11) 104 #define WR_CNTS_MSK 0x7f0 105 #define WR_CNTS_SHFT 4 106 #define RDY_64BYTE BIT(3) 107 #define RDY_32BYTE BIT(2) 108 #define RDY_16BYTE BIT(1) 109 #define FIFO_RDY BIT(0) 110 111 #define RD_FIFO_RESET 0x0030 112 #define RESET_FIFO BIT(0) 113 114 #define NEXT_DMA_DESC_ADDR 0x0040 115 #define CURRENT_DMA_DESC_ADDR 0x0044 116 #define CURRENT_MEM_ADDR 0x0048 117 118 #define CUR_MEM_ADDR 0x0048 119 #define HW_VERSION 0x004c 120 #define RD_FIFO 0x0050 121 #define SAMPLING_CLK_CFG 0x0090 122 #define SAMPLING_CLK_STATUS 0x0094 123 124 #define QSPI_ALIGN_REQ 32 125 126 enum qspi_dir { 127 QSPI_READ, 128 QSPI_WRITE, 129 }; 130 131 struct qspi_cmd_desc { 132 u32 data_address; 133 u32 next_descriptor; 134 u32 direction:1; 135 u32 multi_io_mode:3; 136 u32 reserved1:4; 137 u32 fragment:1; 138 u32 reserved2:7; 139 u32 length:16; 140 }; 141 142 struct qspi_xfer { 143 union { 144 const void *tx_buf; 145 void *rx_buf; 146 }; 147 unsigned int rem_bytes; 148 unsigned int buswidth; 149 enum qspi_dir dir; 150 bool is_last; 151 }; 152 153 enum qspi_clocks { 154 QSPI_CLK_CORE, 155 QSPI_CLK_IFACE, 156 QSPI_NUM_CLKS 157 }; 158 159 /* 160 * Number of entries in sgt returned from spi framework that- 161 * will be supported. Can be modified as required. 162 * In practice, given max_dma_len is 64KB, the number of 163 * entries is not expected to exceed 1. 164 */ 165 #define QSPI_MAX_SG 5 166 167 struct qcom_qspi { 168 void __iomem *base; 169 struct device *dev; 170 struct clk_bulk_data *clks; 171 struct qspi_xfer xfer; 172 struct dma_pool *dma_cmd_pool; 173 dma_addr_t dma_cmd_desc[QSPI_MAX_SG]; 174 void *virt_cmd_desc[QSPI_MAX_SG]; 175 unsigned int n_cmd_desc; 176 struct icc_path *icc_path_cpu_to_qspi; 177 unsigned long last_speed; 178 /* Lock to protect data accessed by IRQs */ 179 spinlock_t lock; 180 }; 181 182 static u32 qspi_buswidth_to_iomode(struct qcom_qspi *ctrl, 183 unsigned int buswidth) 184 { 185 switch (buswidth) { 186 case 1: 187 return SDR_1BIT; 188 case 2: 189 return SDR_2BIT; 190 case 4: 191 return SDR_4BIT; 192 default: 193 dev_warn_once(ctrl->dev, 194 "Unexpected bus width: %u\n", buswidth); 195 return SDR_1BIT; 196 } 197 } 198 199 static void qcom_qspi_pio_xfer_cfg(struct qcom_qspi *ctrl) 200 { 201 u32 pio_xfer_cfg; 202 u32 iomode; 203 const struct qspi_xfer *xfer; 204 205 xfer = &ctrl->xfer; 206 pio_xfer_cfg = readl(ctrl->base + PIO_XFER_CFG); 207 pio_xfer_cfg &= ~TRANSFER_DIRECTION; 208 pio_xfer_cfg |= xfer->dir; 209 if (xfer->is_last) 210 pio_xfer_cfg &= ~TRANSFER_FRAGMENT; 211 else 212 pio_xfer_cfg |= TRANSFER_FRAGMENT; 213 pio_xfer_cfg &= ~MULTI_IO_MODE_MSK; 214 iomode = qspi_buswidth_to_iomode(ctrl, xfer->buswidth); 215 pio_xfer_cfg |= iomode << MULTI_IO_MODE_SHFT; 216 217 writel(pio_xfer_cfg, ctrl->base + PIO_XFER_CFG); 218 } 219 220 static void qcom_qspi_pio_xfer_ctrl(struct qcom_qspi *ctrl) 221 { 222 u32 pio_xfer_ctrl; 223 224 pio_xfer_ctrl = readl(ctrl->base + PIO_XFER_CTRL); 225 pio_xfer_ctrl &= ~REQUEST_COUNT_MSK; 226 pio_xfer_ctrl |= ctrl->xfer.rem_bytes; 227 writel(pio_xfer_ctrl, ctrl->base + PIO_XFER_CTRL); 228 } 229 230 static void qcom_qspi_pio_xfer(struct qcom_qspi *ctrl) 231 { 232 u32 ints; 233 234 qcom_qspi_pio_xfer_cfg(ctrl); 235 236 /* Ack any previous interrupts that might be hanging around */ 237 writel(QSPI_ALL_IRQS, ctrl->base + MSTR_INT_STATUS); 238 239 /* Setup new interrupts */ 240 if (ctrl->xfer.dir == QSPI_WRITE) 241 ints = QSPI_ERR_IRQS | WR_FIFO_EMPTY; 242 else 243 ints = QSPI_ERR_IRQS | RESP_FIFO_RDY; 244 writel(ints, ctrl->base + MSTR_INT_EN); 245 246 /* Kick off the transfer */ 247 qcom_qspi_pio_xfer_ctrl(ctrl); 248 } 249 250 static void qcom_qspi_handle_err(struct spi_master *master, 251 struct spi_message *msg) 252 { 253 u32 int_status; 254 struct qcom_qspi *ctrl = spi_master_get_devdata(master); 255 unsigned long flags; 256 int i; 257 258 spin_lock_irqsave(&ctrl->lock, flags); 259 writel(0, ctrl->base + MSTR_INT_EN); 260 int_status = readl(ctrl->base + MSTR_INT_STATUS); 261 writel(int_status, ctrl->base + MSTR_INT_STATUS); 262 ctrl->xfer.rem_bytes = 0; 263 264 /* free cmd descriptors if they are around (DMA mode) */ 265 for (i = 0; i < ctrl->n_cmd_desc; i++) 266 dma_pool_free(ctrl->dma_cmd_pool, ctrl->virt_cmd_desc[i], 267 ctrl->dma_cmd_desc[i]); 268 ctrl->n_cmd_desc = 0; 269 spin_unlock_irqrestore(&ctrl->lock, flags); 270 } 271 272 static int qcom_qspi_set_speed(struct qcom_qspi *ctrl, unsigned long speed_hz) 273 { 274 int ret; 275 unsigned int avg_bw_cpu; 276 277 if (speed_hz == ctrl->last_speed) 278 return 0; 279 280 /* In regular operation (SBL_EN=1) core must be 4x transfer clock */ 281 ret = dev_pm_opp_set_rate(ctrl->dev, speed_hz * 4); 282 if (ret) { 283 dev_err(ctrl->dev, "Failed to set core clk %d\n", ret); 284 return ret; 285 } 286 287 /* 288 * Set BW quota for CPU. 289 * We don't have explicit peak requirement so keep it equal to avg_bw. 290 */ 291 avg_bw_cpu = Bps_to_icc(speed_hz); 292 ret = icc_set_bw(ctrl->icc_path_cpu_to_qspi, avg_bw_cpu, avg_bw_cpu); 293 if (ret) { 294 dev_err(ctrl->dev, "%s: ICC BW voting failed for cpu: %d\n", 295 __func__, ret); 296 return ret; 297 } 298 299 ctrl->last_speed = speed_hz; 300 301 return 0; 302 } 303 304 static int qcom_qspi_alloc_desc(struct qcom_qspi *ctrl, dma_addr_t dma_ptr, 305 uint32_t n_bytes) 306 { 307 struct qspi_cmd_desc *virt_cmd_desc, *prev; 308 dma_addr_t dma_cmd_desc; 309 310 /* allocate for dma cmd descriptor */ 311 virt_cmd_desc = dma_pool_alloc(ctrl->dma_cmd_pool, GFP_KERNEL | __GFP_ZERO, &dma_cmd_desc); 312 if (!virt_cmd_desc) 313 return -ENOMEM; 314 315 ctrl->virt_cmd_desc[ctrl->n_cmd_desc] = virt_cmd_desc; 316 ctrl->dma_cmd_desc[ctrl->n_cmd_desc] = dma_cmd_desc; 317 ctrl->n_cmd_desc++; 318 319 /* setup cmd descriptor */ 320 virt_cmd_desc->data_address = dma_ptr; 321 virt_cmd_desc->direction = ctrl->xfer.dir; 322 virt_cmd_desc->multi_io_mode = qspi_buswidth_to_iomode(ctrl, ctrl->xfer.buswidth); 323 virt_cmd_desc->fragment = !ctrl->xfer.is_last; 324 virt_cmd_desc->length = n_bytes; 325 326 /* update previous descriptor */ 327 if (ctrl->n_cmd_desc >= 2) { 328 prev = (ctrl->virt_cmd_desc)[ctrl->n_cmd_desc - 2]; 329 prev->next_descriptor = dma_cmd_desc; 330 prev->fragment = 1; 331 } 332 333 return 0; 334 } 335 336 static int qcom_qspi_setup_dma_desc(struct qcom_qspi *ctrl, 337 struct spi_transfer *xfer) 338 { 339 int ret; 340 struct sg_table *sgt; 341 dma_addr_t dma_ptr_sg; 342 unsigned int dma_len_sg; 343 int i; 344 345 if (ctrl->n_cmd_desc) { 346 dev_err(ctrl->dev, "Remnant dma buffers n_cmd_desc-%d\n", ctrl->n_cmd_desc); 347 return -EIO; 348 } 349 350 sgt = (ctrl->xfer.dir == QSPI_READ) ? &xfer->rx_sg : &xfer->tx_sg; 351 if (!sgt->nents || sgt->nents > QSPI_MAX_SG) { 352 dev_warn_once(ctrl->dev, "Cannot handle %d entries in scatter list\n", sgt->nents); 353 return -EAGAIN; 354 } 355 356 for (i = 0; i < sgt->nents; i++) { 357 dma_ptr_sg = sg_dma_address(sgt->sgl + i); 358 if (!IS_ALIGNED(dma_ptr_sg, QSPI_ALIGN_REQ)) { 359 dev_warn_once(ctrl->dev, "dma_address not aligned to %d\n", QSPI_ALIGN_REQ); 360 return -EAGAIN; 361 } 362 } 363 364 for (i = 0; i < sgt->nents; i++) { 365 dma_ptr_sg = sg_dma_address(sgt->sgl + i); 366 dma_len_sg = sg_dma_len(sgt->sgl + i); 367 368 ret = qcom_qspi_alloc_desc(ctrl, dma_ptr_sg, dma_len_sg); 369 if (ret) 370 goto cleanup; 371 } 372 return 0; 373 374 cleanup: 375 for (i = 0; i < ctrl->n_cmd_desc; i++) 376 dma_pool_free(ctrl->dma_cmd_pool, ctrl->virt_cmd_desc[i], 377 ctrl->dma_cmd_desc[i]); 378 ctrl->n_cmd_desc = 0; 379 return ret; 380 } 381 382 static void qcom_qspi_dma_xfer(struct qcom_qspi *ctrl) 383 { 384 /* Setup new interrupts */ 385 writel(DMA_CHAIN_DONE, ctrl->base + MSTR_INT_EN); 386 387 /* kick off transfer */ 388 writel((u32)((ctrl->dma_cmd_desc)[0]), ctrl->base + NEXT_DMA_DESC_ADDR); 389 } 390 391 /* Switch to DMA if transfer length exceeds this */ 392 #define QSPI_MAX_BYTES_FIFO 64 393 394 static bool qcom_qspi_can_dma(struct spi_controller *ctlr, 395 struct spi_device *slv, struct spi_transfer *xfer) 396 { 397 return xfer->len > QSPI_MAX_BYTES_FIFO; 398 } 399 400 static int qcom_qspi_transfer_one(struct spi_master *master, 401 struct spi_device *slv, 402 struct spi_transfer *xfer) 403 { 404 struct qcom_qspi *ctrl = spi_master_get_devdata(master); 405 int ret; 406 unsigned long speed_hz; 407 unsigned long flags; 408 u32 mstr_cfg; 409 410 speed_hz = slv->max_speed_hz; 411 if (xfer->speed_hz) 412 speed_hz = xfer->speed_hz; 413 414 ret = qcom_qspi_set_speed(ctrl, speed_hz); 415 if (ret) 416 return ret; 417 418 spin_lock_irqsave(&ctrl->lock, flags); 419 mstr_cfg = readl(ctrl->base + MSTR_CONFIG); 420 421 /* We are half duplex, so either rx or tx will be set */ 422 if (xfer->rx_buf) { 423 ctrl->xfer.dir = QSPI_READ; 424 ctrl->xfer.buswidth = xfer->rx_nbits; 425 ctrl->xfer.rx_buf = xfer->rx_buf; 426 } else { 427 ctrl->xfer.dir = QSPI_WRITE; 428 ctrl->xfer.buswidth = xfer->tx_nbits; 429 ctrl->xfer.tx_buf = xfer->tx_buf; 430 } 431 ctrl->xfer.is_last = list_is_last(&xfer->transfer_list, 432 &master->cur_msg->transfers); 433 ctrl->xfer.rem_bytes = xfer->len; 434 435 if (xfer->rx_sg.nents || xfer->tx_sg.nents) { 436 /* do DMA transfer */ 437 if (!(mstr_cfg & DMA_ENABLE)) { 438 mstr_cfg |= DMA_ENABLE; 439 writel(mstr_cfg, ctrl->base + MSTR_CONFIG); 440 } 441 442 ret = qcom_qspi_setup_dma_desc(ctrl, xfer); 443 if (ret != -EAGAIN) { 444 if (!ret) 445 qcom_qspi_dma_xfer(ctrl); 446 goto exit; 447 } 448 dev_warn_once(ctrl->dev, "DMA failure, falling back to PIO\n"); 449 ret = 0; /* We'll retry w/ PIO */ 450 } 451 452 if (mstr_cfg & DMA_ENABLE) { 453 mstr_cfg &= ~DMA_ENABLE; 454 writel(mstr_cfg, ctrl->base + MSTR_CONFIG); 455 } 456 qcom_qspi_pio_xfer(ctrl); 457 458 exit: 459 spin_unlock_irqrestore(&ctrl->lock, flags); 460 461 if (ret) 462 return ret; 463 464 /* We'll call spi_finalize_current_transfer() when done */ 465 return 1; 466 } 467 468 static int qcom_qspi_prepare_message(struct spi_master *master, 469 struct spi_message *message) 470 { 471 u32 mstr_cfg; 472 struct qcom_qspi *ctrl; 473 int tx_data_oe_delay = 1; 474 int tx_data_delay = 1; 475 unsigned long flags; 476 477 ctrl = spi_master_get_devdata(master); 478 spin_lock_irqsave(&ctrl->lock, flags); 479 480 mstr_cfg = readl(ctrl->base + MSTR_CONFIG); 481 mstr_cfg &= ~CHIP_SELECT_NUM; 482 if (spi_get_chipselect(message->spi, 0)) 483 mstr_cfg |= CHIP_SELECT_NUM; 484 485 mstr_cfg |= FB_CLK_EN | PIN_WPN | PIN_HOLDN | SBL_EN | FULL_CYCLE_MODE; 486 mstr_cfg &= ~(SPI_MODE_MSK | TX_DATA_OE_DELAY_MSK | TX_DATA_DELAY_MSK); 487 mstr_cfg |= message->spi->mode << SPI_MODE_SHFT; 488 mstr_cfg |= tx_data_oe_delay << TX_DATA_OE_DELAY_SHFT; 489 mstr_cfg |= tx_data_delay << TX_DATA_DELAY_SHFT; 490 mstr_cfg &= ~DMA_ENABLE; 491 492 writel(mstr_cfg, ctrl->base + MSTR_CONFIG); 493 spin_unlock_irqrestore(&ctrl->lock, flags); 494 495 return 0; 496 } 497 498 static int qcom_qspi_alloc_dma(struct qcom_qspi *ctrl) 499 { 500 ctrl->dma_cmd_pool = dmam_pool_create("qspi cmd desc pool", 501 ctrl->dev, sizeof(struct qspi_cmd_desc), 0, 0); 502 if (!ctrl->dma_cmd_pool) 503 return -ENOMEM; 504 505 return 0; 506 } 507 508 static irqreturn_t pio_read(struct qcom_qspi *ctrl) 509 { 510 u32 rd_fifo_status; 511 u32 rd_fifo; 512 unsigned int wr_cnts; 513 unsigned int bytes_to_read; 514 unsigned int words_to_read; 515 u32 *word_buf; 516 u8 *byte_buf; 517 int i; 518 519 rd_fifo_status = readl(ctrl->base + RD_FIFO_STATUS); 520 521 if (!(rd_fifo_status & FIFO_RDY)) { 522 dev_dbg(ctrl->dev, "Spurious IRQ %#x\n", rd_fifo_status); 523 return IRQ_NONE; 524 } 525 526 wr_cnts = (rd_fifo_status & WR_CNTS_MSK) >> WR_CNTS_SHFT; 527 wr_cnts = min(wr_cnts, ctrl->xfer.rem_bytes); 528 529 words_to_read = wr_cnts / QSPI_BYTES_PER_WORD; 530 bytes_to_read = wr_cnts % QSPI_BYTES_PER_WORD; 531 532 if (words_to_read) { 533 word_buf = ctrl->xfer.rx_buf; 534 ctrl->xfer.rem_bytes -= words_to_read * QSPI_BYTES_PER_WORD; 535 ioread32_rep(ctrl->base + RD_FIFO, word_buf, words_to_read); 536 ctrl->xfer.rx_buf = word_buf + words_to_read; 537 } 538 539 if (bytes_to_read) { 540 byte_buf = ctrl->xfer.rx_buf; 541 rd_fifo = readl(ctrl->base + RD_FIFO); 542 ctrl->xfer.rem_bytes -= bytes_to_read; 543 for (i = 0; i < bytes_to_read; i++) 544 *byte_buf++ = rd_fifo >> (i * BITS_PER_BYTE); 545 ctrl->xfer.rx_buf = byte_buf; 546 } 547 548 return IRQ_HANDLED; 549 } 550 551 static irqreturn_t pio_write(struct qcom_qspi *ctrl) 552 { 553 const void *xfer_buf = ctrl->xfer.tx_buf; 554 const int *word_buf; 555 const char *byte_buf; 556 unsigned int wr_fifo_bytes; 557 unsigned int wr_fifo_words; 558 unsigned int wr_size; 559 unsigned int rem_words; 560 561 wr_fifo_bytes = readl(ctrl->base + PIO_XFER_STATUS); 562 wr_fifo_bytes >>= WR_FIFO_BYTES_SHFT; 563 564 if (ctrl->xfer.rem_bytes < QSPI_BYTES_PER_WORD) { 565 /* Process the last 1-3 bytes */ 566 wr_size = min(wr_fifo_bytes, ctrl->xfer.rem_bytes); 567 ctrl->xfer.rem_bytes -= wr_size; 568 569 byte_buf = xfer_buf; 570 while (wr_size--) 571 writel(*byte_buf++, 572 ctrl->base + PIO_DATAOUT_1B); 573 ctrl->xfer.tx_buf = byte_buf; 574 } else { 575 /* 576 * Process all the whole words; to keep things simple we'll 577 * just wait for the next interrupt to handle the last 1-3 578 * bytes if we don't have an even number of words. 579 */ 580 rem_words = ctrl->xfer.rem_bytes / QSPI_BYTES_PER_WORD; 581 wr_fifo_words = wr_fifo_bytes / QSPI_BYTES_PER_WORD; 582 583 wr_size = min(rem_words, wr_fifo_words); 584 ctrl->xfer.rem_bytes -= wr_size * QSPI_BYTES_PER_WORD; 585 586 word_buf = xfer_buf; 587 iowrite32_rep(ctrl->base + PIO_DATAOUT_4B, word_buf, wr_size); 588 ctrl->xfer.tx_buf = word_buf + wr_size; 589 590 } 591 592 return IRQ_HANDLED; 593 } 594 595 static irqreturn_t qcom_qspi_irq(int irq, void *dev_id) 596 { 597 u32 int_status; 598 struct qcom_qspi *ctrl = dev_id; 599 irqreturn_t ret = IRQ_NONE; 600 601 spin_lock(&ctrl->lock); 602 603 int_status = readl(ctrl->base + MSTR_INT_STATUS); 604 writel(int_status, ctrl->base + MSTR_INT_STATUS); 605 606 /* PIO mode handling */ 607 if (ctrl->xfer.dir == QSPI_WRITE) { 608 if (int_status & WR_FIFO_EMPTY) 609 ret = pio_write(ctrl); 610 } else { 611 if (int_status & RESP_FIFO_RDY) 612 ret = pio_read(ctrl); 613 } 614 615 if (int_status & QSPI_ERR_IRQS) { 616 if (int_status & RESP_FIFO_UNDERRUN) 617 dev_err(ctrl->dev, "IRQ error: FIFO underrun\n"); 618 if (int_status & WR_FIFO_OVERRUN) 619 dev_err(ctrl->dev, "IRQ error: FIFO overrun\n"); 620 if (int_status & HRESP_FROM_NOC_ERR) 621 dev_err(ctrl->dev, "IRQ error: NOC response error\n"); 622 ret = IRQ_HANDLED; 623 } 624 625 if (!ctrl->xfer.rem_bytes) { 626 writel(0, ctrl->base + MSTR_INT_EN); 627 spi_finalize_current_transfer(dev_get_drvdata(ctrl->dev)); 628 } 629 630 /* DMA mode handling */ 631 if (int_status & DMA_CHAIN_DONE) { 632 int i; 633 634 writel(0, ctrl->base + MSTR_INT_EN); 635 ctrl->xfer.rem_bytes = 0; 636 637 for (i = 0; i < ctrl->n_cmd_desc; i++) 638 dma_pool_free(ctrl->dma_cmd_pool, ctrl->virt_cmd_desc[i], 639 ctrl->dma_cmd_desc[i]); 640 ctrl->n_cmd_desc = 0; 641 642 ret = IRQ_HANDLED; 643 spi_finalize_current_transfer(dev_get_drvdata(ctrl->dev)); 644 } 645 646 spin_unlock(&ctrl->lock); 647 return ret; 648 } 649 650 static int qcom_qspi_probe(struct platform_device *pdev) 651 { 652 int ret; 653 struct device *dev; 654 struct spi_master *master; 655 struct qcom_qspi *ctrl; 656 657 dev = &pdev->dev; 658 659 master = devm_spi_alloc_master(dev, sizeof(*ctrl)); 660 if (!master) 661 return -ENOMEM; 662 663 platform_set_drvdata(pdev, master); 664 665 ctrl = spi_master_get_devdata(master); 666 667 spin_lock_init(&ctrl->lock); 668 ctrl->dev = dev; 669 ctrl->base = devm_platform_ioremap_resource(pdev, 0); 670 if (IS_ERR(ctrl->base)) 671 return PTR_ERR(ctrl->base); 672 673 ctrl->clks = devm_kcalloc(dev, QSPI_NUM_CLKS, 674 sizeof(*ctrl->clks), GFP_KERNEL); 675 if (!ctrl->clks) 676 return -ENOMEM; 677 678 ctrl->clks[QSPI_CLK_CORE].id = "core"; 679 ctrl->clks[QSPI_CLK_IFACE].id = "iface"; 680 ret = devm_clk_bulk_get(dev, QSPI_NUM_CLKS, ctrl->clks); 681 if (ret) 682 return ret; 683 684 ctrl->icc_path_cpu_to_qspi = devm_of_icc_get(dev, "qspi-config"); 685 if (IS_ERR(ctrl->icc_path_cpu_to_qspi)) 686 return dev_err_probe(dev, PTR_ERR(ctrl->icc_path_cpu_to_qspi), 687 "Failed to get cpu path\n"); 688 689 /* Set BW vote for register access */ 690 ret = icc_set_bw(ctrl->icc_path_cpu_to_qspi, Bps_to_icc(1000), 691 Bps_to_icc(1000)); 692 if (ret) { 693 dev_err(ctrl->dev, "%s: ICC BW voting failed for cpu: %d\n", 694 __func__, ret); 695 return ret; 696 } 697 698 ret = icc_disable(ctrl->icc_path_cpu_to_qspi); 699 if (ret) { 700 dev_err(ctrl->dev, "%s: ICC disable failed for cpu: %d\n", 701 __func__, ret); 702 return ret; 703 } 704 705 ret = platform_get_irq(pdev, 0); 706 if (ret < 0) 707 return ret; 708 ret = devm_request_irq(dev, ret, qcom_qspi_irq, 0, dev_name(dev), ctrl); 709 if (ret) { 710 dev_err(dev, "Failed to request irq %d\n", ret); 711 return ret; 712 } 713 714 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); 715 if (ret) 716 return dev_err_probe(dev, ret, "could not set DMA mask\n"); 717 718 master->max_speed_hz = 300000000; 719 master->max_dma_len = 65536; /* as per HPG */ 720 master->dma_alignment = QSPI_ALIGN_REQ; 721 master->num_chipselect = QSPI_NUM_CS; 722 master->bus_num = -1; 723 master->dev.of_node = pdev->dev.of_node; 724 master->mode_bits = SPI_MODE_0 | 725 SPI_TX_DUAL | SPI_RX_DUAL | 726 SPI_TX_QUAD | SPI_RX_QUAD; 727 master->flags = SPI_MASTER_HALF_DUPLEX; 728 master->prepare_message = qcom_qspi_prepare_message; 729 master->transfer_one = qcom_qspi_transfer_one; 730 master->handle_err = qcom_qspi_handle_err; 731 if (of_property_read_bool(pdev->dev.of_node, "iommus")) 732 master->can_dma = qcom_qspi_can_dma; 733 master->auto_runtime_pm = true; 734 735 ret = devm_pm_opp_set_clkname(&pdev->dev, "core"); 736 if (ret) 737 return ret; 738 /* OPP table is optional */ 739 ret = devm_pm_opp_of_add_table(&pdev->dev); 740 if (ret && ret != -ENODEV) { 741 dev_err(&pdev->dev, "invalid OPP table in device tree\n"); 742 return ret; 743 } 744 745 ret = qcom_qspi_alloc_dma(ctrl); 746 if (ret) 747 return ret; 748 749 pm_runtime_use_autosuspend(dev); 750 pm_runtime_set_autosuspend_delay(dev, 250); 751 pm_runtime_enable(dev); 752 753 ret = spi_register_master(master); 754 if (!ret) 755 return 0; 756 757 pm_runtime_disable(dev); 758 759 return ret; 760 } 761 762 static void qcom_qspi_remove(struct platform_device *pdev) 763 { 764 struct spi_master *master = platform_get_drvdata(pdev); 765 766 /* Unregister _before_ disabling pm_runtime() so we stop transfers */ 767 spi_unregister_master(master); 768 769 pm_runtime_disable(&pdev->dev); 770 } 771 772 static int __maybe_unused qcom_qspi_runtime_suspend(struct device *dev) 773 { 774 struct spi_master *master = dev_get_drvdata(dev); 775 struct qcom_qspi *ctrl = spi_master_get_devdata(master); 776 int ret; 777 778 /* Drop the performance state vote */ 779 dev_pm_opp_set_rate(dev, 0); 780 clk_bulk_disable_unprepare(QSPI_NUM_CLKS, ctrl->clks); 781 782 ret = icc_disable(ctrl->icc_path_cpu_to_qspi); 783 if (ret) { 784 dev_err_ratelimited(ctrl->dev, "%s: ICC disable failed for cpu: %d\n", 785 __func__, ret); 786 return ret; 787 } 788 789 pinctrl_pm_select_sleep_state(dev); 790 791 return 0; 792 } 793 794 static int __maybe_unused qcom_qspi_runtime_resume(struct device *dev) 795 { 796 struct spi_master *master = dev_get_drvdata(dev); 797 struct qcom_qspi *ctrl = spi_master_get_devdata(master); 798 int ret; 799 800 pinctrl_pm_select_default_state(dev); 801 802 ret = icc_enable(ctrl->icc_path_cpu_to_qspi); 803 if (ret) { 804 dev_err_ratelimited(ctrl->dev, "%s: ICC enable failed for cpu: %d\n", 805 __func__, ret); 806 return ret; 807 } 808 809 ret = clk_bulk_prepare_enable(QSPI_NUM_CLKS, ctrl->clks); 810 if (ret) 811 return ret; 812 813 return dev_pm_opp_set_rate(dev, ctrl->last_speed * 4); 814 } 815 816 static int __maybe_unused qcom_qspi_suspend(struct device *dev) 817 { 818 struct spi_master *master = dev_get_drvdata(dev); 819 int ret; 820 821 ret = spi_master_suspend(master); 822 if (ret) 823 return ret; 824 825 ret = pm_runtime_force_suspend(dev); 826 if (ret) 827 spi_master_resume(master); 828 829 return ret; 830 } 831 832 static int __maybe_unused qcom_qspi_resume(struct device *dev) 833 { 834 struct spi_master *master = dev_get_drvdata(dev); 835 int ret; 836 837 ret = pm_runtime_force_resume(dev); 838 if (ret) 839 return ret; 840 841 ret = spi_master_resume(master); 842 if (ret) 843 pm_runtime_force_suspend(dev); 844 845 return ret; 846 } 847 848 static const struct dev_pm_ops qcom_qspi_dev_pm_ops = { 849 SET_RUNTIME_PM_OPS(qcom_qspi_runtime_suspend, 850 qcom_qspi_runtime_resume, NULL) 851 SET_SYSTEM_SLEEP_PM_OPS(qcom_qspi_suspend, qcom_qspi_resume) 852 }; 853 854 static const struct of_device_id qcom_qspi_dt_match[] = { 855 { .compatible = "qcom,qspi-v1", }, 856 { } 857 }; 858 MODULE_DEVICE_TABLE(of, qcom_qspi_dt_match); 859 860 static struct platform_driver qcom_qspi_driver = { 861 .driver = { 862 .name = "qcom_qspi", 863 .pm = &qcom_qspi_dev_pm_ops, 864 .of_match_table = qcom_qspi_dt_match, 865 }, 866 .probe = qcom_qspi_probe, 867 .remove_new = qcom_qspi_remove, 868 }; 869 module_platform_driver(qcom_qspi_driver); 870 871 MODULE_DESCRIPTION("SPI driver for QSPI cores"); 872 MODULE_LICENSE("GPL v2"); 873