1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2015 MediaTek Inc. 4 * Author: Leilk Liu <leilk.liu@mediatek.com> 5 */ 6 7 #include <linux/clk.h> 8 #include <linux/device.h> 9 #include <linux/err.h> 10 #include <linux/interrupt.h> 11 #include <linux/io.h> 12 #include <linux/ioport.h> 13 #include <linux/module.h> 14 #include <linux/of.h> 15 #include <linux/of_gpio.h> 16 #include <linux/platform_device.h> 17 #include <linux/platform_data/spi-mt65xx.h> 18 #include <linux/pm_runtime.h> 19 #include <linux/spi/spi.h> 20 #include <linux/dma-mapping.h> 21 22 #define SPI_CFG0_REG 0x0000 23 #define SPI_CFG1_REG 0x0004 24 #define SPI_TX_SRC_REG 0x0008 25 #define SPI_RX_DST_REG 0x000c 26 #define SPI_TX_DATA_REG 0x0010 27 #define SPI_RX_DATA_REG 0x0014 28 #define SPI_CMD_REG 0x0018 29 #define SPI_STATUS0_REG 0x001c 30 #define SPI_PAD_SEL_REG 0x0024 31 #define SPI_CFG2_REG 0x0028 32 #define SPI_TX_SRC_REG_64 0x002c 33 #define SPI_RX_DST_REG_64 0x0030 34 35 #define SPI_CFG0_SCK_HIGH_OFFSET 0 36 #define SPI_CFG0_SCK_LOW_OFFSET 8 37 #define SPI_CFG0_CS_HOLD_OFFSET 16 38 #define SPI_CFG0_CS_SETUP_OFFSET 24 39 #define SPI_ADJUST_CFG0_CS_HOLD_OFFSET 0 40 #define SPI_ADJUST_CFG0_CS_SETUP_OFFSET 16 41 42 #define SPI_CFG1_CS_IDLE_OFFSET 0 43 #define SPI_CFG1_PACKET_LOOP_OFFSET 8 44 #define SPI_CFG1_PACKET_LENGTH_OFFSET 16 45 #define SPI_CFG1_GET_TICK_DLY_OFFSET 30 46 47 #define SPI_CFG1_CS_IDLE_MASK 0xff 48 #define SPI_CFG1_PACKET_LOOP_MASK 0xff00 49 #define SPI_CFG1_PACKET_LENGTH_MASK 0x3ff0000 50 #define SPI_CFG2_SCK_HIGH_OFFSET 0 51 #define SPI_CFG2_SCK_LOW_OFFSET 16 52 53 #define SPI_CMD_ACT BIT(0) 54 #define SPI_CMD_RESUME BIT(1) 55 #define SPI_CMD_RST BIT(2) 56 #define SPI_CMD_PAUSE_EN BIT(4) 57 #define SPI_CMD_DEASSERT BIT(5) 58 #define SPI_CMD_SAMPLE_SEL BIT(6) 59 #define SPI_CMD_CS_POL BIT(7) 60 #define SPI_CMD_CPHA BIT(8) 61 #define SPI_CMD_CPOL BIT(9) 62 #define SPI_CMD_RX_DMA BIT(10) 63 #define SPI_CMD_TX_DMA BIT(11) 64 #define SPI_CMD_TXMSBF BIT(12) 65 #define SPI_CMD_RXMSBF BIT(13) 66 #define SPI_CMD_RX_ENDIAN BIT(14) 67 #define SPI_CMD_TX_ENDIAN BIT(15) 68 #define SPI_CMD_FINISH_IE BIT(16) 69 #define SPI_CMD_PAUSE_IE BIT(17) 70 71 #define MT8173_SPI_MAX_PAD_SEL 3 72 73 #define MTK_SPI_PAUSE_INT_STATUS 0x2 74 75 #define MTK_SPI_IDLE 0 76 #define MTK_SPI_PAUSED 1 77 78 #define MTK_SPI_MAX_FIFO_SIZE 32U 79 #define MTK_SPI_PACKET_SIZE 1024 80 #define MTK_SPI_32BITS_MASK (0xffffffff) 81 82 #define DMA_ADDR_EXT_BITS (36) 83 #define DMA_ADDR_DEF_BITS (32) 84 85 struct mtk_spi_compatible { 86 bool need_pad_sel; 87 /* Must explicitly send dummy Tx bytes to do Rx only transfer */ 88 bool must_tx; 89 /* some IC design adjust cfg register to enhance time accuracy */ 90 bool enhance_timing; 91 /* some IC support DMA addr extension */ 92 bool dma_ext; 93 }; 94 95 struct mtk_spi { 96 void __iomem *base; 97 u32 state; 98 int pad_num; 99 u32 *pad_sel; 100 struct clk *parent_clk, *sel_clk, *spi_clk; 101 struct spi_transfer *cur_transfer; 102 u32 xfer_len; 103 u32 num_xfered; 104 struct scatterlist *tx_sgl, *rx_sgl; 105 u32 tx_sgl_len, rx_sgl_len; 106 const struct mtk_spi_compatible *dev_comp; 107 }; 108 109 static const struct mtk_spi_compatible mtk_common_compat; 110 111 static const struct mtk_spi_compatible mt2712_compat = { 112 .must_tx = true, 113 }; 114 115 static const struct mtk_spi_compatible mt6765_compat = { 116 .need_pad_sel = true, 117 .must_tx = true, 118 .enhance_timing = true, 119 .dma_ext = true, 120 }; 121 122 static const struct mtk_spi_compatible mt7622_compat = { 123 .must_tx = true, 124 .enhance_timing = true, 125 }; 126 127 static const struct mtk_spi_compatible mt8173_compat = { 128 .need_pad_sel = true, 129 .must_tx = true, 130 }; 131 132 static const struct mtk_spi_compatible mt8183_compat = { 133 .need_pad_sel = true, 134 .must_tx = true, 135 .enhance_timing = true, 136 }; 137 138 /* 139 * A piece of default chip info unless the platform 140 * supplies it. 141 */ 142 static const struct mtk_chip_config mtk_default_chip_info = { 143 .sample_sel = 0, 144 }; 145 146 static const struct of_device_id mtk_spi_of_match[] = { 147 { .compatible = "mediatek,mt2701-spi", 148 .data = (void *)&mtk_common_compat, 149 }, 150 { .compatible = "mediatek,mt2712-spi", 151 .data = (void *)&mt2712_compat, 152 }, 153 { .compatible = "mediatek,mt6589-spi", 154 .data = (void *)&mtk_common_compat, 155 }, 156 { .compatible = "mediatek,mt6765-spi", 157 .data = (void *)&mt6765_compat, 158 }, 159 { .compatible = "mediatek,mt7622-spi", 160 .data = (void *)&mt7622_compat, 161 }, 162 { .compatible = "mediatek,mt7629-spi", 163 .data = (void *)&mt7622_compat, 164 }, 165 { .compatible = "mediatek,mt8135-spi", 166 .data = (void *)&mtk_common_compat, 167 }, 168 { .compatible = "mediatek,mt8173-spi", 169 .data = (void *)&mt8173_compat, 170 }, 171 { .compatible = "mediatek,mt8183-spi", 172 .data = (void *)&mt8183_compat, 173 }, 174 {} 175 }; 176 MODULE_DEVICE_TABLE(of, mtk_spi_of_match); 177 178 static void mtk_spi_reset(struct mtk_spi *mdata) 179 { 180 u32 reg_val; 181 182 /* set the software reset bit in SPI_CMD_REG. */ 183 reg_val = readl(mdata->base + SPI_CMD_REG); 184 reg_val |= SPI_CMD_RST; 185 writel(reg_val, mdata->base + SPI_CMD_REG); 186 187 reg_val = readl(mdata->base + SPI_CMD_REG); 188 reg_val &= ~SPI_CMD_RST; 189 writel(reg_val, mdata->base + SPI_CMD_REG); 190 } 191 192 static int mtk_spi_prepare_message(struct spi_master *master, 193 struct spi_message *msg) 194 { 195 u16 cpha, cpol; 196 u32 reg_val; 197 struct spi_device *spi = msg->spi; 198 struct mtk_chip_config *chip_config = spi->controller_data; 199 struct mtk_spi *mdata = spi_master_get_devdata(master); 200 201 cpha = spi->mode & SPI_CPHA ? 1 : 0; 202 cpol = spi->mode & SPI_CPOL ? 1 : 0; 203 204 reg_val = readl(mdata->base + SPI_CMD_REG); 205 if (cpha) 206 reg_val |= SPI_CMD_CPHA; 207 else 208 reg_val &= ~SPI_CMD_CPHA; 209 if (cpol) 210 reg_val |= SPI_CMD_CPOL; 211 else 212 reg_val &= ~SPI_CMD_CPOL; 213 214 /* set the mlsbx and mlsbtx */ 215 if (spi->mode & SPI_LSB_FIRST) { 216 reg_val &= ~SPI_CMD_TXMSBF; 217 reg_val &= ~SPI_CMD_RXMSBF; 218 } else { 219 reg_val |= SPI_CMD_TXMSBF; 220 reg_val |= SPI_CMD_RXMSBF; 221 } 222 223 /* set the tx/rx endian */ 224 #ifdef __LITTLE_ENDIAN 225 reg_val &= ~SPI_CMD_TX_ENDIAN; 226 reg_val &= ~SPI_CMD_RX_ENDIAN; 227 #else 228 reg_val |= SPI_CMD_TX_ENDIAN; 229 reg_val |= SPI_CMD_RX_ENDIAN; 230 #endif 231 232 if (mdata->dev_comp->enhance_timing) { 233 /* set CS polarity */ 234 if (spi->mode & SPI_CS_HIGH) 235 reg_val |= SPI_CMD_CS_POL; 236 else 237 reg_val &= ~SPI_CMD_CS_POL; 238 239 if (chip_config->sample_sel) 240 reg_val |= SPI_CMD_SAMPLE_SEL; 241 else 242 reg_val &= ~SPI_CMD_SAMPLE_SEL; 243 } 244 245 /* set finish and pause interrupt always enable */ 246 reg_val |= SPI_CMD_FINISH_IE | SPI_CMD_PAUSE_IE; 247 248 /* disable dma mode */ 249 reg_val &= ~(SPI_CMD_TX_DMA | SPI_CMD_RX_DMA); 250 251 /* disable deassert mode */ 252 reg_val &= ~SPI_CMD_DEASSERT; 253 254 writel(reg_val, mdata->base + SPI_CMD_REG); 255 256 /* pad select */ 257 if (mdata->dev_comp->need_pad_sel) 258 writel(mdata->pad_sel[spi->chip_select], 259 mdata->base + SPI_PAD_SEL_REG); 260 261 return 0; 262 } 263 264 static void mtk_spi_set_cs(struct spi_device *spi, bool enable) 265 { 266 u32 reg_val; 267 struct mtk_spi *mdata = spi_master_get_devdata(spi->master); 268 269 if (spi->mode & SPI_CS_HIGH) 270 enable = !enable; 271 272 reg_val = readl(mdata->base + SPI_CMD_REG); 273 if (!enable) { 274 reg_val |= SPI_CMD_PAUSE_EN; 275 writel(reg_val, mdata->base + SPI_CMD_REG); 276 } else { 277 reg_val &= ~SPI_CMD_PAUSE_EN; 278 writel(reg_val, mdata->base + SPI_CMD_REG); 279 mdata->state = MTK_SPI_IDLE; 280 mtk_spi_reset(mdata); 281 } 282 } 283 284 static void mtk_spi_prepare_transfer(struct spi_master *master, 285 struct spi_transfer *xfer) 286 { 287 u32 spi_clk_hz, div, sck_time, cs_time, reg_val; 288 struct mtk_spi *mdata = spi_master_get_devdata(master); 289 290 spi_clk_hz = clk_get_rate(mdata->spi_clk); 291 if (xfer->speed_hz < spi_clk_hz / 2) 292 div = DIV_ROUND_UP(spi_clk_hz, xfer->speed_hz); 293 else 294 div = 1; 295 296 sck_time = (div + 1) / 2; 297 cs_time = sck_time * 2; 298 299 if (mdata->dev_comp->enhance_timing) { 300 reg_val = (((sck_time - 1) & 0xffff) 301 << SPI_CFG2_SCK_HIGH_OFFSET); 302 reg_val |= (((sck_time - 1) & 0xffff) 303 << SPI_CFG2_SCK_LOW_OFFSET); 304 writel(reg_val, mdata->base + SPI_CFG2_REG); 305 reg_val = (((cs_time - 1) & 0xffff) 306 << SPI_ADJUST_CFG0_CS_HOLD_OFFSET); 307 reg_val |= (((cs_time - 1) & 0xffff) 308 << SPI_ADJUST_CFG0_CS_SETUP_OFFSET); 309 writel(reg_val, mdata->base + SPI_CFG0_REG); 310 } else { 311 reg_val = (((sck_time - 1) & 0xff) 312 << SPI_CFG0_SCK_HIGH_OFFSET); 313 reg_val |= (((sck_time - 1) & 0xff) << SPI_CFG0_SCK_LOW_OFFSET); 314 reg_val |= (((cs_time - 1) & 0xff) << SPI_CFG0_CS_HOLD_OFFSET); 315 reg_val |= (((cs_time - 1) & 0xff) << SPI_CFG0_CS_SETUP_OFFSET); 316 writel(reg_val, mdata->base + SPI_CFG0_REG); 317 } 318 319 reg_val = readl(mdata->base + SPI_CFG1_REG); 320 reg_val &= ~SPI_CFG1_CS_IDLE_MASK; 321 reg_val |= (((cs_time - 1) & 0xff) << SPI_CFG1_CS_IDLE_OFFSET); 322 writel(reg_val, mdata->base + SPI_CFG1_REG); 323 } 324 325 static void mtk_spi_setup_packet(struct spi_master *master) 326 { 327 u32 packet_size, packet_loop, reg_val; 328 struct mtk_spi *mdata = spi_master_get_devdata(master); 329 330 packet_size = min_t(u32, mdata->xfer_len, MTK_SPI_PACKET_SIZE); 331 packet_loop = mdata->xfer_len / packet_size; 332 333 reg_val = readl(mdata->base + SPI_CFG1_REG); 334 reg_val &= ~(SPI_CFG1_PACKET_LENGTH_MASK | SPI_CFG1_PACKET_LOOP_MASK); 335 reg_val |= (packet_size - 1) << SPI_CFG1_PACKET_LENGTH_OFFSET; 336 reg_val |= (packet_loop - 1) << SPI_CFG1_PACKET_LOOP_OFFSET; 337 writel(reg_val, mdata->base + SPI_CFG1_REG); 338 } 339 340 static void mtk_spi_enable_transfer(struct spi_master *master) 341 { 342 u32 cmd; 343 struct mtk_spi *mdata = spi_master_get_devdata(master); 344 345 cmd = readl(mdata->base + SPI_CMD_REG); 346 if (mdata->state == MTK_SPI_IDLE) 347 cmd |= SPI_CMD_ACT; 348 else 349 cmd |= SPI_CMD_RESUME; 350 writel(cmd, mdata->base + SPI_CMD_REG); 351 } 352 353 static int mtk_spi_get_mult_delta(u32 xfer_len) 354 { 355 u32 mult_delta; 356 357 if (xfer_len > MTK_SPI_PACKET_SIZE) 358 mult_delta = xfer_len % MTK_SPI_PACKET_SIZE; 359 else 360 mult_delta = 0; 361 362 return mult_delta; 363 } 364 365 static void mtk_spi_update_mdata_len(struct spi_master *master) 366 { 367 int mult_delta; 368 struct mtk_spi *mdata = spi_master_get_devdata(master); 369 370 if (mdata->tx_sgl_len && mdata->rx_sgl_len) { 371 if (mdata->tx_sgl_len > mdata->rx_sgl_len) { 372 mult_delta = mtk_spi_get_mult_delta(mdata->rx_sgl_len); 373 mdata->xfer_len = mdata->rx_sgl_len - mult_delta; 374 mdata->rx_sgl_len = mult_delta; 375 mdata->tx_sgl_len -= mdata->xfer_len; 376 } else { 377 mult_delta = mtk_spi_get_mult_delta(mdata->tx_sgl_len); 378 mdata->xfer_len = mdata->tx_sgl_len - mult_delta; 379 mdata->tx_sgl_len = mult_delta; 380 mdata->rx_sgl_len -= mdata->xfer_len; 381 } 382 } else if (mdata->tx_sgl_len) { 383 mult_delta = mtk_spi_get_mult_delta(mdata->tx_sgl_len); 384 mdata->xfer_len = mdata->tx_sgl_len - mult_delta; 385 mdata->tx_sgl_len = mult_delta; 386 } else if (mdata->rx_sgl_len) { 387 mult_delta = mtk_spi_get_mult_delta(mdata->rx_sgl_len); 388 mdata->xfer_len = mdata->rx_sgl_len - mult_delta; 389 mdata->rx_sgl_len = mult_delta; 390 } 391 } 392 393 static void mtk_spi_setup_dma_addr(struct spi_master *master, 394 struct spi_transfer *xfer) 395 { 396 struct mtk_spi *mdata = spi_master_get_devdata(master); 397 398 if (mdata->tx_sgl) { 399 writel((u32)(xfer->tx_dma & MTK_SPI_32BITS_MASK), 400 mdata->base + SPI_TX_SRC_REG); 401 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 402 if (mdata->dev_comp->dma_ext) 403 writel((u32)(xfer->tx_dma >> 32), 404 mdata->base + SPI_TX_SRC_REG_64); 405 #endif 406 } 407 408 if (mdata->rx_sgl) { 409 writel((u32)(xfer->rx_dma & MTK_SPI_32BITS_MASK), 410 mdata->base + SPI_RX_DST_REG); 411 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 412 if (mdata->dev_comp->dma_ext) 413 writel((u32)(xfer->rx_dma >> 32), 414 mdata->base + SPI_RX_DST_REG_64); 415 #endif 416 } 417 } 418 419 static int mtk_spi_fifo_transfer(struct spi_master *master, 420 struct spi_device *spi, 421 struct spi_transfer *xfer) 422 { 423 int cnt, remainder; 424 u32 reg_val; 425 struct mtk_spi *mdata = spi_master_get_devdata(master); 426 427 mdata->cur_transfer = xfer; 428 mdata->xfer_len = min(MTK_SPI_MAX_FIFO_SIZE, xfer->len); 429 mdata->num_xfered = 0; 430 mtk_spi_prepare_transfer(master, xfer); 431 mtk_spi_setup_packet(master); 432 433 cnt = xfer->len / 4; 434 iowrite32_rep(mdata->base + SPI_TX_DATA_REG, xfer->tx_buf, cnt); 435 436 remainder = xfer->len % 4; 437 if (remainder > 0) { 438 reg_val = 0; 439 memcpy(®_val, xfer->tx_buf + (cnt * 4), remainder); 440 writel(reg_val, mdata->base + SPI_TX_DATA_REG); 441 } 442 443 mtk_spi_enable_transfer(master); 444 445 return 1; 446 } 447 448 static int mtk_spi_dma_transfer(struct spi_master *master, 449 struct spi_device *spi, 450 struct spi_transfer *xfer) 451 { 452 int cmd; 453 struct mtk_spi *mdata = spi_master_get_devdata(master); 454 455 mdata->tx_sgl = NULL; 456 mdata->rx_sgl = NULL; 457 mdata->tx_sgl_len = 0; 458 mdata->rx_sgl_len = 0; 459 mdata->cur_transfer = xfer; 460 mdata->num_xfered = 0; 461 462 mtk_spi_prepare_transfer(master, xfer); 463 464 cmd = readl(mdata->base + SPI_CMD_REG); 465 if (xfer->tx_buf) 466 cmd |= SPI_CMD_TX_DMA; 467 if (xfer->rx_buf) 468 cmd |= SPI_CMD_RX_DMA; 469 writel(cmd, mdata->base + SPI_CMD_REG); 470 471 if (xfer->tx_buf) 472 mdata->tx_sgl = xfer->tx_sg.sgl; 473 if (xfer->rx_buf) 474 mdata->rx_sgl = xfer->rx_sg.sgl; 475 476 if (mdata->tx_sgl) { 477 xfer->tx_dma = sg_dma_address(mdata->tx_sgl); 478 mdata->tx_sgl_len = sg_dma_len(mdata->tx_sgl); 479 } 480 if (mdata->rx_sgl) { 481 xfer->rx_dma = sg_dma_address(mdata->rx_sgl); 482 mdata->rx_sgl_len = sg_dma_len(mdata->rx_sgl); 483 } 484 485 mtk_spi_update_mdata_len(master); 486 mtk_spi_setup_packet(master); 487 mtk_spi_setup_dma_addr(master, xfer); 488 mtk_spi_enable_transfer(master); 489 490 return 1; 491 } 492 493 static int mtk_spi_transfer_one(struct spi_master *master, 494 struct spi_device *spi, 495 struct spi_transfer *xfer) 496 { 497 if (master->can_dma(master, spi, xfer)) 498 return mtk_spi_dma_transfer(master, spi, xfer); 499 else 500 return mtk_spi_fifo_transfer(master, spi, xfer); 501 } 502 503 static bool mtk_spi_can_dma(struct spi_master *master, 504 struct spi_device *spi, 505 struct spi_transfer *xfer) 506 { 507 /* Buffers for DMA transactions must be 4-byte aligned */ 508 return (xfer->len > MTK_SPI_MAX_FIFO_SIZE && 509 (unsigned long)xfer->tx_buf % 4 == 0 && 510 (unsigned long)xfer->rx_buf % 4 == 0); 511 } 512 513 static int mtk_spi_setup(struct spi_device *spi) 514 { 515 struct mtk_spi *mdata = spi_master_get_devdata(spi->master); 516 517 if (!spi->controller_data) 518 spi->controller_data = (void *)&mtk_default_chip_info; 519 520 if (mdata->dev_comp->need_pad_sel && gpio_is_valid(spi->cs_gpio)) 521 gpio_direction_output(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH)); 522 523 return 0; 524 } 525 526 static irqreturn_t mtk_spi_interrupt(int irq, void *dev_id) 527 { 528 u32 cmd, reg_val, cnt, remainder, len; 529 struct spi_master *master = dev_id; 530 struct mtk_spi *mdata = spi_master_get_devdata(master); 531 struct spi_transfer *trans = mdata->cur_transfer; 532 533 reg_val = readl(mdata->base + SPI_STATUS0_REG); 534 if (reg_val & MTK_SPI_PAUSE_INT_STATUS) 535 mdata->state = MTK_SPI_PAUSED; 536 else 537 mdata->state = MTK_SPI_IDLE; 538 539 if (!master->can_dma(master, master->cur_msg->spi, trans)) { 540 if (trans->rx_buf) { 541 cnt = mdata->xfer_len / 4; 542 ioread32_rep(mdata->base + SPI_RX_DATA_REG, 543 trans->rx_buf + mdata->num_xfered, cnt); 544 remainder = mdata->xfer_len % 4; 545 if (remainder > 0) { 546 reg_val = readl(mdata->base + SPI_RX_DATA_REG); 547 memcpy(trans->rx_buf + 548 mdata->num_xfered + 549 (cnt * 4), 550 ®_val, 551 remainder); 552 } 553 } 554 555 mdata->num_xfered += mdata->xfer_len; 556 if (mdata->num_xfered == trans->len) { 557 spi_finalize_current_transfer(master); 558 return IRQ_HANDLED; 559 } 560 561 len = trans->len - mdata->num_xfered; 562 mdata->xfer_len = min(MTK_SPI_MAX_FIFO_SIZE, len); 563 mtk_spi_setup_packet(master); 564 565 cnt = mdata->xfer_len / 4; 566 iowrite32_rep(mdata->base + SPI_TX_DATA_REG, 567 trans->tx_buf + mdata->num_xfered, cnt); 568 569 remainder = mdata->xfer_len % 4; 570 if (remainder > 0) { 571 reg_val = 0; 572 memcpy(®_val, 573 trans->tx_buf + (cnt * 4) + mdata->num_xfered, 574 remainder); 575 writel(reg_val, mdata->base + SPI_TX_DATA_REG); 576 } 577 578 mtk_spi_enable_transfer(master); 579 580 return IRQ_HANDLED; 581 } 582 583 if (mdata->tx_sgl) 584 trans->tx_dma += mdata->xfer_len; 585 if (mdata->rx_sgl) 586 trans->rx_dma += mdata->xfer_len; 587 588 if (mdata->tx_sgl && (mdata->tx_sgl_len == 0)) { 589 mdata->tx_sgl = sg_next(mdata->tx_sgl); 590 if (mdata->tx_sgl) { 591 trans->tx_dma = sg_dma_address(mdata->tx_sgl); 592 mdata->tx_sgl_len = sg_dma_len(mdata->tx_sgl); 593 } 594 } 595 if (mdata->rx_sgl && (mdata->rx_sgl_len == 0)) { 596 mdata->rx_sgl = sg_next(mdata->rx_sgl); 597 if (mdata->rx_sgl) { 598 trans->rx_dma = sg_dma_address(mdata->rx_sgl); 599 mdata->rx_sgl_len = sg_dma_len(mdata->rx_sgl); 600 } 601 } 602 603 if (!mdata->tx_sgl && !mdata->rx_sgl) { 604 /* spi disable dma */ 605 cmd = readl(mdata->base + SPI_CMD_REG); 606 cmd &= ~SPI_CMD_TX_DMA; 607 cmd &= ~SPI_CMD_RX_DMA; 608 writel(cmd, mdata->base + SPI_CMD_REG); 609 610 spi_finalize_current_transfer(master); 611 return IRQ_HANDLED; 612 } 613 614 mtk_spi_update_mdata_len(master); 615 mtk_spi_setup_packet(master); 616 mtk_spi_setup_dma_addr(master, trans); 617 mtk_spi_enable_transfer(master); 618 619 return IRQ_HANDLED; 620 } 621 622 static int mtk_spi_probe(struct platform_device *pdev) 623 { 624 struct spi_master *master; 625 struct mtk_spi *mdata; 626 const struct of_device_id *of_id; 627 int i, irq, ret, addr_bits; 628 629 master = spi_alloc_master(&pdev->dev, sizeof(*mdata)); 630 if (!master) { 631 dev_err(&pdev->dev, "failed to alloc spi master\n"); 632 return -ENOMEM; 633 } 634 635 master->auto_runtime_pm = true; 636 master->dev.of_node = pdev->dev.of_node; 637 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST; 638 639 master->set_cs = mtk_spi_set_cs; 640 master->prepare_message = mtk_spi_prepare_message; 641 master->transfer_one = mtk_spi_transfer_one; 642 master->can_dma = mtk_spi_can_dma; 643 master->setup = mtk_spi_setup; 644 645 of_id = of_match_node(mtk_spi_of_match, pdev->dev.of_node); 646 if (!of_id) { 647 dev_err(&pdev->dev, "failed to probe of_node\n"); 648 ret = -EINVAL; 649 goto err_put_master; 650 } 651 652 mdata = spi_master_get_devdata(master); 653 mdata->dev_comp = of_id->data; 654 655 if (mdata->dev_comp->enhance_timing) 656 master->mode_bits |= SPI_CS_HIGH; 657 658 if (mdata->dev_comp->must_tx) 659 master->flags = SPI_MASTER_MUST_TX; 660 661 if (mdata->dev_comp->need_pad_sel) { 662 mdata->pad_num = of_property_count_u32_elems( 663 pdev->dev.of_node, 664 "mediatek,pad-select"); 665 if (mdata->pad_num < 0) { 666 dev_err(&pdev->dev, 667 "No 'mediatek,pad-select' property\n"); 668 ret = -EINVAL; 669 goto err_put_master; 670 } 671 672 mdata->pad_sel = devm_kmalloc_array(&pdev->dev, mdata->pad_num, 673 sizeof(u32), GFP_KERNEL); 674 if (!mdata->pad_sel) { 675 ret = -ENOMEM; 676 goto err_put_master; 677 } 678 679 for (i = 0; i < mdata->pad_num; i++) { 680 of_property_read_u32_index(pdev->dev.of_node, 681 "mediatek,pad-select", 682 i, &mdata->pad_sel[i]); 683 if (mdata->pad_sel[i] > MT8173_SPI_MAX_PAD_SEL) { 684 dev_err(&pdev->dev, "wrong pad-sel[%d]: %u\n", 685 i, mdata->pad_sel[i]); 686 ret = -EINVAL; 687 goto err_put_master; 688 } 689 } 690 } 691 692 platform_set_drvdata(pdev, master); 693 mdata->base = devm_platform_ioremap_resource(pdev, 0); 694 if (IS_ERR(mdata->base)) { 695 ret = PTR_ERR(mdata->base); 696 goto err_put_master; 697 } 698 699 irq = platform_get_irq(pdev, 0); 700 if (irq < 0) { 701 ret = irq; 702 goto err_put_master; 703 } 704 705 if (!pdev->dev.dma_mask) 706 pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; 707 708 ret = devm_request_irq(&pdev->dev, irq, mtk_spi_interrupt, 709 IRQF_TRIGGER_NONE, dev_name(&pdev->dev), master); 710 if (ret) { 711 dev_err(&pdev->dev, "failed to register irq (%d)\n", ret); 712 goto err_put_master; 713 } 714 715 mdata->parent_clk = devm_clk_get(&pdev->dev, "parent-clk"); 716 if (IS_ERR(mdata->parent_clk)) { 717 ret = PTR_ERR(mdata->parent_clk); 718 dev_err(&pdev->dev, "failed to get parent-clk: %d\n", ret); 719 goto err_put_master; 720 } 721 722 mdata->sel_clk = devm_clk_get(&pdev->dev, "sel-clk"); 723 if (IS_ERR(mdata->sel_clk)) { 724 ret = PTR_ERR(mdata->sel_clk); 725 dev_err(&pdev->dev, "failed to get sel-clk: %d\n", ret); 726 goto err_put_master; 727 } 728 729 mdata->spi_clk = devm_clk_get(&pdev->dev, "spi-clk"); 730 if (IS_ERR(mdata->spi_clk)) { 731 ret = PTR_ERR(mdata->spi_clk); 732 dev_err(&pdev->dev, "failed to get spi-clk: %d\n", ret); 733 goto err_put_master; 734 } 735 736 ret = clk_prepare_enable(mdata->spi_clk); 737 if (ret < 0) { 738 dev_err(&pdev->dev, "failed to enable spi_clk (%d)\n", ret); 739 goto err_put_master; 740 } 741 742 ret = clk_set_parent(mdata->sel_clk, mdata->parent_clk); 743 if (ret < 0) { 744 dev_err(&pdev->dev, "failed to clk_set_parent (%d)\n", ret); 745 clk_disable_unprepare(mdata->spi_clk); 746 goto err_put_master; 747 } 748 749 clk_disable_unprepare(mdata->spi_clk); 750 751 pm_runtime_enable(&pdev->dev); 752 753 ret = devm_spi_register_master(&pdev->dev, master); 754 if (ret) { 755 dev_err(&pdev->dev, "failed to register master (%d)\n", ret); 756 goto err_disable_runtime_pm; 757 } 758 759 if (mdata->dev_comp->need_pad_sel) { 760 if (mdata->pad_num != master->num_chipselect) { 761 dev_err(&pdev->dev, 762 "pad_num does not match num_chipselect(%d != %d)\n", 763 mdata->pad_num, master->num_chipselect); 764 ret = -EINVAL; 765 goto err_disable_runtime_pm; 766 } 767 768 if (!master->cs_gpios && master->num_chipselect > 1) { 769 dev_err(&pdev->dev, 770 "cs_gpios not specified and num_chipselect > 1\n"); 771 ret = -EINVAL; 772 goto err_disable_runtime_pm; 773 } 774 775 if (master->cs_gpios) { 776 for (i = 0; i < master->num_chipselect; i++) { 777 ret = devm_gpio_request(&pdev->dev, 778 master->cs_gpios[i], 779 dev_name(&pdev->dev)); 780 if (ret) { 781 dev_err(&pdev->dev, 782 "can't get CS GPIO %i\n", i); 783 goto err_disable_runtime_pm; 784 } 785 } 786 } 787 } 788 789 if (mdata->dev_comp->dma_ext) 790 addr_bits = DMA_ADDR_EXT_BITS; 791 else 792 addr_bits = DMA_ADDR_DEF_BITS; 793 ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(addr_bits)); 794 if (ret) 795 dev_notice(&pdev->dev, "SPI dma_set_mask(%d) failed, ret:%d\n", 796 addr_bits, ret); 797 798 return 0; 799 800 err_disable_runtime_pm: 801 pm_runtime_disable(&pdev->dev); 802 err_put_master: 803 spi_master_put(master); 804 805 return ret; 806 } 807 808 static int mtk_spi_remove(struct platform_device *pdev) 809 { 810 struct spi_master *master = platform_get_drvdata(pdev); 811 struct mtk_spi *mdata = spi_master_get_devdata(master); 812 813 pm_runtime_disable(&pdev->dev); 814 815 mtk_spi_reset(mdata); 816 817 return 0; 818 } 819 820 #ifdef CONFIG_PM_SLEEP 821 static int mtk_spi_suspend(struct device *dev) 822 { 823 int ret; 824 struct spi_master *master = dev_get_drvdata(dev); 825 struct mtk_spi *mdata = spi_master_get_devdata(master); 826 827 ret = spi_master_suspend(master); 828 if (ret) 829 return ret; 830 831 if (!pm_runtime_suspended(dev)) 832 clk_disable_unprepare(mdata->spi_clk); 833 834 return ret; 835 } 836 837 static int mtk_spi_resume(struct device *dev) 838 { 839 int ret; 840 struct spi_master *master = dev_get_drvdata(dev); 841 struct mtk_spi *mdata = spi_master_get_devdata(master); 842 843 if (!pm_runtime_suspended(dev)) { 844 ret = clk_prepare_enable(mdata->spi_clk); 845 if (ret < 0) { 846 dev_err(dev, "failed to enable spi_clk (%d)\n", ret); 847 return ret; 848 } 849 } 850 851 ret = spi_master_resume(master); 852 if (ret < 0) 853 clk_disable_unprepare(mdata->spi_clk); 854 855 return ret; 856 } 857 #endif /* CONFIG_PM_SLEEP */ 858 859 #ifdef CONFIG_PM 860 static int mtk_spi_runtime_suspend(struct device *dev) 861 { 862 struct spi_master *master = dev_get_drvdata(dev); 863 struct mtk_spi *mdata = spi_master_get_devdata(master); 864 865 clk_disable_unprepare(mdata->spi_clk); 866 867 return 0; 868 } 869 870 static int mtk_spi_runtime_resume(struct device *dev) 871 { 872 struct spi_master *master = dev_get_drvdata(dev); 873 struct mtk_spi *mdata = spi_master_get_devdata(master); 874 int ret; 875 876 ret = clk_prepare_enable(mdata->spi_clk); 877 if (ret < 0) { 878 dev_err(dev, "failed to enable spi_clk (%d)\n", ret); 879 return ret; 880 } 881 882 return 0; 883 } 884 #endif /* CONFIG_PM */ 885 886 static const struct dev_pm_ops mtk_spi_pm = { 887 SET_SYSTEM_SLEEP_PM_OPS(mtk_spi_suspend, mtk_spi_resume) 888 SET_RUNTIME_PM_OPS(mtk_spi_runtime_suspend, 889 mtk_spi_runtime_resume, NULL) 890 }; 891 892 static struct platform_driver mtk_spi_driver = { 893 .driver = { 894 .name = "mtk-spi", 895 .pm = &mtk_spi_pm, 896 .of_match_table = mtk_spi_of_match, 897 }, 898 .probe = mtk_spi_probe, 899 .remove = mtk_spi_remove, 900 }; 901 902 module_platform_driver(mtk_spi_driver); 903 904 MODULE_DESCRIPTION("MTK SPI Controller driver"); 905 MODULE_AUTHOR("Leilk Liu <leilk.liu@mediatek.com>"); 906 MODULE_LICENSE("GPL v2"); 907 MODULE_ALIAS("platform:mtk-spi"); 908