Lines Matching +full:sfc +full:- +full:no +full:- +full:dma
1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (c) 2017-2021, Rockchip Inc.
6 * Author: Shawn Lin <shawn.lin@rock-chips.com>
8 * Jon Lin <Jon.lin@rock-chips.com>
14 #include <linux/dma-mapping.h>
24 #include <linux/spi/spi-mem.h>
164 /* DMA is only enabled for large data transmission */
168 * 150MHz. No minimum or average value is suggested.
190 static int rockchip_sfc_reset(struct rockchip_sfc *sfc)
195 writel_relaxed(SFC_RCVR_RESET, sfc->regbase + SFC_RCVR);
197 err = readl_poll_timeout(sfc->regbase + SFC_RCVR, status,
201 dev_err(sfc->dev, "SFC reset never finished\n");
204 writel_relaxed(0xFFFFFFFF, sfc->regbase + SFC_ICLR);
206 dev_dbg(sfc->dev, "reset\n");
211 static u16 rockchip_sfc_get_version(struct rockchip_sfc *sfc)
213 return (u16)(readl(sfc->regbase + SFC_VER) & 0xffff);
216 static u32 rockchip_sfc_get_max_iosize(struct rockchip_sfc *sfc)
221 static int rockchip_sfc_clk_set_rate(struct rockchip_sfc *sfc, unsigned long speed)
223 if (sfc->version >= SFC_VER_8)
224 return clk_set_rate(sfc->clk, speed * 2);
226 return clk_set_rate(sfc->clk, speed);
229 static unsigned long rockchip_sfc_clk_get_rate(struct rockchip_sfc *sfc)
231 if (sfc->version >= SFC_VER_8)
232 return clk_get_rate(sfc->clk) / 2;
234 return clk_get_rate(sfc->clk);
237 static void rockchip_sfc_irq_unmask(struct rockchip_sfc *sfc, u32 mask)
242 reg = readl(sfc->regbase + SFC_IMR);
244 writel(reg, sfc->regbase + SFC_IMR);
247 static void rockchip_sfc_irq_mask(struct rockchip_sfc *sfc, u32 mask)
252 reg = readl(sfc->regbase + SFC_IMR);
254 writel(reg, sfc->regbase + SFC_IMR);
257 static int rockchip_sfc_init(struct rockchip_sfc *sfc)
259 writel(0, sfc->regbase + SFC_CTRL);
260 writel(0xFFFFFFFF, sfc->regbase + SFC_ICLR);
261 rockchip_sfc_irq_mask(sfc, 0xFFFFFFFF);
262 if (rockchip_sfc_get_version(sfc) >= SFC_VER_4)
263 writel(SFC_LEN_CTRL_TRB_SEL, sfc->regbase + SFC_LEN_CTRL);
268 static int rockchip_sfc_wait_txfifo_ready(struct rockchip_sfc *sfc, u32 timeout_us)
273 ret = readl_poll_timeout(sfc->regbase + SFC_FSR, status,
277 dev_dbg(sfc->dev, "sfc wait tx fifo timeout\n");
279 return -ETIMEDOUT;
285 static int rockchip_sfc_wait_rxfifo_ready(struct rockchip_sfc *sfc, u32 timeout_us)
290 ret = readl_poll_timeout(sfc->regbase + SFC_FSR, status,
294 dev_dbg(sfc->dev, "sfc wait rx fifo timeout\n");
296 return -ETIMEDOUT;
304 if (unlikely(op->dummy.nbytes && !op->addr.nbytes)) {
306 * SFC not support output DUMMY cycles right after CMD cycles, so
309 op->addr.nbytes = op->dummy.nbytes;
310 op->addr.buswidth = op->dummy.buswidth;
311 op->addr.val = 0xFFFFFFFFF;
313 op->dummy.nbytes = 0;
317 static int rockchip_sfc_xfer_setup(struct rockchip_sfc *sfc,
323 u8 cs = spi_get_chipselect(mem->spi, 0);
326 cmd = op->cmd.opcode;
327 ctrl |= ((op->cmd.buswidth >> 1) << SFC_CTRL_CMD_BITS_SHIFT);
330 if (op->addr.nbytes) {
331 if (op->addr.nbytes == 4) {
333 } else if (op->addr.nbytes == 3) {
337 writel(op->addr.nbytes * 8 - 1,
338 sfc->regbase + cs * SFC_CS1_REG_OFFSET + SFC_ABIT);
341 ctrl |= ((op->addr.buswidth >> 1) << SFC_CTRL_ADDR_BITS_SHIFT);
345 if (op->dummy.nbytes) {
346 if (op->dummy.buswidth == 4)
347 cmd |= op->dummy.nbytes * 2 << SFC_CMD_DUMMY_SHIFT;
348 else if (op->dummy.buswidth == 2)
349 cmd |= op->dummy.nbytes * 4 << SFC_CMD_DUMMY_SHIFT;
351 cmd |= op->dummy.nbytes * 8 << SFC_CMD_DUMMY_SHIFT;
355 if (sfc->version >= SFC_VER_4) /* Clear it if no data to transfer */
356 writel(len, sfc->regbase + SFC_LEN_EXT);
360 if (op->data.dir == SPI_MEM_DATA_OUT)
363 ctrl |= ((op->data.buswidth >> 1) << SFC_CTRL_DATA_BITS_SHIFT);
365 if (!len && op->addr.nbytes)
372 dev_dbg(sfc->dev, "sfc addr.nbytes=%x(x%d) dummy.nbytes=%x(x%d)\n",
373 op->addr.nbytes, op->addr.buswidth,
374 op->dummy.nbytes, op->dummy.buswidth);
375 dev_dbg(sfc->dev, "sfc ctrl=%x cmd=%x addr=%llx len=%x\n",
376 ctrl, cmd, op->addr.val, len);
378 writel(ctrl, sfc->regbase + cs * SFC_CS1_REG_OFFSET + SFC_CTRL);
379 writel(cmd, sfc->regbase + SFC_CMD);
380 if (op->addr.nbytes)
381 writel(op->addr.val, sfc->regbase + SFC_ADDR);
386 static int rockchip_sfc_write_fifo(struct rockchip_sfc *sfc, const u8 *buf, int len)
396 tx_level = rockchip_sfc_wait_txfifo_ready(sfc, 1000);
400 iowrite32_rep(sfc->regbase + SFC_DATA, buf, write_words);
402 dwords -= write_words;
407 tx_level = rockchip_sfc_wait_txfifo_ready(sfc, 1000);
411 writel(tmp, sfc->regbase + SFC_DATA);
417 static int rockchip_sfc_read_fifo(struct rockchip_sfc *sfc, u8 *buf, int len)
428 rx_level = rockchip_sfc_wait_rxfifo_ready(sfc, 1000);
432 ioread32_rep(sfc->regbase + SFC_DATA, buf, read_words);
434 dwords -= read_words;
439 rx_level = rockchip_sfc_wait_rxfifo_ready(sfc, 1000);
442 tmp = readl(sfc->regbase + SFC_DATA);
449 static int rockchip_sfc_fifo_transfer_dma(struct rockchip_sfc *sfc, dma_addr_t dma_buf, size_t len)
451 writel(0xFFFFFFFF, sfc->regbase + SFC_ICLR);
452 writel((u32)dma_buf, sfc->regbase + SFC_DMA_ADDR);
453 writel(SFC_DMA_TRIGGER_START, sfc->regbase + SFC_DMA_TRIGGER);
458 static int rockchip_sfc_xfer_data_poll(struct rockchip_sfc *sfc,
461 dev_dbg(sfc->dev, "sfc xfer_poll len=%x\n", len);
463 if (op->data.dir == SPI_MEM_DATA_OUT)
464 return rockchip_sfc_write_fifo(sfc, op->data.buf.out, len);
466 return rockchip_sfc_read_fifo(sfc, op->data.buf.in, len);
469 static int rockchip_sfc_xfer_data_dma(struct rockchip_sfc *sfc,
474 dev_dbg(sfc->dev, "sfc xfer_dma len=%x\n", len);
476 if (op->data.dir == SPI_MEM_DATA_OUT) {
477 memcpy(sfc->buffer, op->data.buf.out, len);
478 dma_sync_single_for_device(sfc->dev, sfc->dma_buffer, len, DMA_TO_DEVICE);
481 ret = rockchip_sfc_fifo_transfer_dma(sfc, sfc->dma_buffer, len);
482 if (!wait_for_completion_timeout(&sfc->cp, msecs_to_jiffies(2000))) {
483 dev_err(sfc->dev, "DMA wait for transfer finish timeout\n");
484 ret = -ETIMEDOUT;
486 rockchip_sfc_irq_mask(sfc, SFC_IMR_DMA);
488 if (op->data.dir == SPI_MEM_DATA_IN) {
489 dma_sync_single_for_cpu(sfc->dev, sfc->dma_buffer, len, DMA_FROM_DEVICE);
490 memcpy(op->data.buf.in, sfc->buffer, len);
496 static int rockchip_sfc_xfer_done(struct rockchip_sfc *sfc, u32 timeout_us)
505 ret = readl_poll_timeout(sfc->regbase + SFC_SR, status,
511 ret = readl_poll_timeout(sfc->regbase + SFC_SR, status,
515 dev_err(sfc->dev, "wait sfc idle timeout\n");
516 rockchip_sfc_reset(sfc);
518 ret = -EIO;
526 struct rockchip_sfc *sfc = spi_controller_get_devdata(mem->spi->controller);
527 u32 len = op->data.nbytes;
529 u8 cs = spi_get_chipselect(mem->spi, 0);
531 ret = pm_runtime_get_sync(sfc->dev);
533 pm_runtime_put_noidle(sfc->dev);
537 if (unlikely(op->max_freq != sfc->speed[cs]) &&
538 !has_acpi_companion(sfc->dev)) {
539 ret = rockchip_sfc_clk_set_rate(sfc, op->max_freq);
542 sfc->speed[cs] = op->max_freq;
543 dev_dbg(sfc->dev, "set_freq=%dHz real_freq=%ldHz\n",
544 sfc->speed[cs], rockchip_sfc_clk_get_rate(sfc));
548 rockchip_sfc_xfer_setup(sfc, mem, op, len);
550 if (likely(sfc->use_dma) && len >= SFC_DMA_TRANS_THRETHOLD && !(len & 0x3)) {
551 init_completion(&sfc->cp);
552 rockchip_sfc_irq_unmask(sfc, SFC_IMR_DMA);
553 ret = rockchip_sfc_xfer_data_dma(sfc, op, len);
555 ret = rockchip_sfc_xfer_data_poll(sfc, op, len);
559 dev_err(sfc->dev, "xfer data failed ret %d dir %d\n", ret, op->data.dir);
561 ret = -EIO;
566 ret = rockchip_sfc_xfer_done(sfc, 100000);
568 pm_runtime_mark_last_busy(sfc->dev);
569 pm_runtime_put_autosuspend(sfc->dev);
576 struct rockchip_sfc *sfc = spi_controller_get_devdata(mem->spi->controller);
578 op->data.nbytes = min(op->data.nbytes, sfc->max_iosize);
594 struct rockchip_sfc *sfc = dev_id;
597 reg = readl(sfc->regbase + SFC_RISR);
600 writel_relaxed(reg, sfc->regbase + SFC_ICLR);
603 complete(&sfc->cp);
613 struct device *dev = &pdev->dev;
615 struct rockchip_sfc *sfc;
619 host = devm_spi_alloc_host(&pdev->dev, sizeof(*sfc));
621 return -ENOMEM;
623 host->flags = SPI_CONTROLLER_HALF_DUPLEX;
624 host->mem_ops = &rockchip_sfc_mem_ops;
625 host->mem_caps = &rockchip_sfc_mem_caps;
626 host->dev.of_node = pdev->dev.of_node;
627 host->mode_bits = SPI_TX_QUAD | SPI_TX_DUAL | SPI_RX_QUAD | SPI_RX_DUAL;
628 host->max_speed_hz = SFC_MAX_SPEED;
629 host->num_chipselect = SFC_MAX_CHIPSELECT_NUM;
631 sfc = spi_controller_get_devdata(host);
632 sfc->dev = dev;
633 sfc->host = host;
635 sfc->regbase = devm_platform_ioremap_resource(pdev, 0);
636 if (IS_ERR(sfc->regbase))
637 return PTR_ERR(sfc->regbase);
639 if (!has_acpi_companion(&pdev->dev))
640 sfc->clk = devm_clk_get(&pdev->dev, "clk_sfc");
641 if (IS_ERR(sfc->clk))
642 return dev_err_probe(&pdev->dev, PTR_ERR(sfc->clk),
643 "Failed to get sfc interface clk\n");
645 if (!has_acpi_companion(&pdev->dev))
646 sfc->hclk = devm_clk_get(&pdev->dev, "hclk_sfc");
647 if (IS_ERR(sfc->hclk))
648 return dev_err_probe(&pdev->dev, PTR_ERR(sfc->hclk),
649 "Failed to get sfc ahb clk\n");
651 if (has_acpi_companion(&pdev->dev)) {
652 ret = device_property_read_u32(&pdev->dev, "clock-frequency", &val);
654 return dev_err_probe(&pdev->dev, ret,
655 "Failed to find clock-frequency in ACPI\n");
657 sfc->speed[i] = val;
660 sfc->use_dma = !of_property_read_bool(sfc->dev->of_node, "rockchip,sfc-no-dma");
662 ret = clk_prepare_enable(sfc->hclk);
664 dev_err(&pdev->dev, "Failed to enable ahb clk\n");
668 ret = clk_prepare_enable(sfc->clk);
670 dev_err(&pdev->dev, "Failed to enable interface clk\n");
680 0, pdev->name, sfc);
686 platform_set_drvdata(pdev, sfc);
688 ret = rockchip_sfc_init(sfc);
692 sfc->version = rockchip_sfc_get_version(sfc);
693 sfc->max_iosize = rockchip_sfc_get_max_iosize(sfc);
701 if (sfc->use_dma) {
702 sfc->buffer = (u8 *)__get_free_pages(GFP_KERNEL | GFP_DMA32,
703 get_order(sfc->max_iosize));
704 if (!sfc->buffer) {
705 ret = -ENOMEM;
708 sfc->dma_buffer = virt_to_phys(sfc->buffer);
720 free_pages((unsigned long)sfc->buffer, get_order(sfc->max_iosize));
728 clk_disable_unprepare(sfc->clk);
730 clk_disable_unprepare(sfc->hclk);
737 struct rockchip_sfc *sfc = platform_get_drvdata(pdev);
738 struct spi_controller *host = sfc->host;
741 free_pages((unsigned long)sfc->buffer, get_order(sfc->max_iosize));
743 clk_disable_unprepare(sfc->clk);
744 clk_disable_unprepare(sfc->hclk);
750 struct rockchip_sfc *sfc = dev_get_drvdata(dev);
752 clk_disable_unprepare(sfc->clk);
753 clk_disable_unprepare(sfc->hclk);
760 struct rockchip_sfc *sfc = dev_get_drvdata(dev);
763 ret = clk_prepare_enable(sfc->hclk);
767 ret = clk_prepare_enable(sfc->clk);
769 clk_disable_unprepare(sfc->hclk);
785 struct rockchip_sfc *sfc = dev_get_drvdata(dev);
800 rockchip_sfc_init(sfc);
816 { .compatible = "rockchip,sfc"},
823 .name = "rockchip-sfc",
834 MODULE_AUTHOR("Shawn Lin <shawn.lin@rock-chips.com>");
836 MODULE_AUTHOR("Jon Lin <Jon.lin@rock-chips.com>");