1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2015 MediaTek Inc. 4 * Author: Leilk Liu <leilk.liu@mediatek.com> 5 */ 6 7 #include <linux/clk.h> 8 #include <linux/device.h> 9 #include <linux/err.h> 10 #include <linux/interrupt.h> 11 #include <linux/io.h> 12 #include <linux/ioport.h> 13 #include <linux/module.h> 14 #include <linux/of.h> 15 #include <linux/of_gpio.h> 16 #include <linux/platform_device.h> 17 #include <linux/platform_data/spi-mt65xx.h> 18 #include <linux/pm_runtime.h> 19 #include <linux/spi/spi.h> 20 21 #define SPI_CFG0_REG 0x0000 22 #define SPI_CFG1_REG 0x0004 23 #define SPI_TX_SRC_REG 0x0008 24 #define SPI_RX_DST_REG 0x000c 25 #define SPI_TX_DATA_REG 0x0010 26 #define SPI_RX_DATA_REG 0x0014 27 #define SPI_CMD_REG 0x0018 28 #define SPI_STATUS0_REG 0x001c 29 #define SPI_PAD_SEL_REG 0x0024 30 #define SPI_CFG2_REG 0x0028 31 32 #define SPI_CFG0_SCK_HIGH_OFFSET 0 33 #define SPI_CFG0_SCK_LOW_OFFSET 8 34 #define SPI_CFG0_CS_HOLD_OFFSET 16 35 #define SPI_CFG0_CS_SETUP_OFFSET 24 36 #define SPI_ADJUST_CFG0_SCK_LOW_OFFSET 16 37 #define SPI_ADJUST_CFG0_CS_HOLD_OFFSET 0 38 #define SPI_ADJUST_CFG0_CS_SETUP_OFFSET 16 39 40 #define SPI_CFG1_CS_IDLE_OFFSET 0 41 #define SPI_CFG1_PACKET_LOOP_OFFSET 8 42 #define SPI_CFG1_PACKET_LENGTH_OFFSET 16 43 #define SPI_CFG1_GET_TICK_DLY_OFFSET 30 44 45 #define SPI_CFG1_CS_IDLE_MASK 0xff 46 #define SPI_CFG1_PACKET_LOOP_MASK 0xff00 47 #define SPI_CFG1_PACKET_LENGTH_MASK 0x3ff0000 48 49 #define SPI_CMD_ACT BIT(0) 50 #define SPI_CMD_RESUME BIT(1) 51 #define SPI_CMD_RST BIT(2) 52 #define SPI_CMD_PAUSE_EN BIT(4) 53 #define SPI_CMD_DEASSERT BIT(5) 54 #define SPI_CMD_SAMPLE_SEL BIT(6) 55 #define SPI_CMD_CS_POL BIT(7) 56 #define SPI_CMD_CPHA BIT(8) 57 #define SPI_CMD_CPOL BIT(9) 58 #define SPI_CMD_RX_DMA BIT(10) 59 #define SPI_CMD_TX_DMA BIT(11) 60 #define SPI_CMD_TXMSBF BIT(12) 61 #define SPI_CMD_RXMSBF BIT(13) 62 #define SPI_CMD_RX_ENDIAN BIT(14) 63 #define SPI_CMD_TX_ENDIAN BIT(15) 64 #define SPI_CMD_FINISH_IE BIT(16) 65 #define SPI_CMD_PAUSE_IE BIT(17) 66 67 #define MT8173_SPI_MAX_PAD_SEL 3 68 69 #define MTK_SPI_PAUSE_INT_STATUS 0x2 70 71 #define MTK_SPI_IDLE 0 72 #define MTK_SPI_PAUSED 1 73 74 #define MTK_SPI_MAX_FIFO_SIZE 32U 75 #define MTK_SPI_PACKET_SIZE 1024 76 77 struct mtk_spi_compatible { 78 bool need_pad_sel; 79 /* Must explicitly send dummy Tx bytes to do Rx only transfer */ 80 bool must_tx; 81 /* some IC design adjust cfg register to enhance time accuracy */ 82 bool enhance_timing; 83 }; 84 85 struct mtk_spi { 86 void __iomem *base; 87 u32 state; 88 int pad_num; 89 u32 *pad_sel; 90 struct clk *parent_clk, *sel_clk, *spi_clk; 91 struct spi_transfer *cur_transfer; 92 u32 xfer_len; 93 u32 num_xfered; 94 struct scatterlist *tx_sgl, *rx_sgl; 95 u32 tx_sgl_len, rx_sgl_len; 96 const struct mtk_spi_compatible *dev_comp; 97 }; 98 99 static const struct mtk_spi_compatible mtk_common_compat; 100 101 static const struct mtk_spi_compatible mt2712_compat = { 102 .must_tx = true, 103 }; 104 105 static const struct mtk_spi_compatible mt7622_compat = { 106 .must_tx = true, 107 .enhance_timing = true, 108 }; 109 110 static const struct mtk_spi_compatible mt8173_compat = { 111 .need_pad_sel = true, 112 .must_tx = true, 113 }; 114 115 static const struct mtk_spi_compatible mt8183_compat = { 116 .need_pad_sel = true, 117 .must_tx = true, 118 .enhance_timing = true, 119 }; 120 121 /* 122 * A piece of default chip info unless the platform 123 * supplies it. 124 */ 125 static const struct mtk_chip_config mtk_default_chip_info = { 126 .rx_mlsb = 1, 127 .tx_mlsb = 1, 128 .cs_pol = 0, 129 .sample_sel = 0, 130 }; 131 132 static const struct of_device_id mtk_spi_of_match[] = { 133 { .compatible = "mediatek,mt2701-spi", 134 .data = (void *)&mtk_common_compat, 135 }, 136 { .compatible = "mediatek,mt2712-spi", 137 .data = (void *)&mt2712_compat, 138 }, 139 { .compatible = "mediatek,mt6589-spi", 140 .data = (void *)&mtk_common_compat, 141 }, 142 { .compatible = "mediatek,mt7622-spi", 143 .data = (void *)&mt7622_compat, 144 }, 145 { .compatible = "mediatek,mt7629-spi", 146 .data = (void *)&mt7622_compat, 147 }, 148 { .compatible = "mediatek,mt8135-spi", 149 .data = (void *)&mtk_common_compat, 150 }, 151 { .compatible = "mediatek,mt8173-spi", 152 .data = (void *)&mt8173_compat, 153 }, 154 { .compatible = "mediatek,mt8183-spi", 155 .data = (void *)&mt8183_compat, 156 }, 157 {} 158 }; 159 MODULE_DEVICE_TABLE(of, mtk_spi_of_match); 160 161 static void mtk_spi_reset(struct mtk_spi *mdata) 162 { 163 u32 reg_val; 164 165 /* set the software reset bit in SPI_CMD_REG. */ 166 reg_val = readl(mdata->base + SPI_CMD_REG); 167 reg_val |= SPI_CMD_RST; 168 writel(reg_val, mdata->base + SPI_CMD_REG); 169 170 reg_val = readl(mdata->base + SPI_CMD_REG); 171 reg_val &= ~SPI_CMD_RST; 172 writel(reg_val, mdata->base + SPI_CMD_REG); 173 } 174 175 static int mtk_spi_prepare_message(struct spi_master *master, 176 struct spi_message *msg) 177 { 178 u16 cpha, cpol; 179 u32 reg_val; 180 struct spi_device *spi = msg->spi; 181 struct mtk_chip_config *chip_config = spi->controller_data; 182 struct mtk_spi *mdata = spi_master_get_devdata(master); 183 184 cpha = spi->mode & SPI_CPHA ? 1 : 0; 185 cpol = spi->mode & SPI_CPOL ? 1 : 0; 186 187 reg_val = readl(mdata->base + SPI_CMD_REG); 188 if (cpha) 189 reg_val |= SPI_CMD_CPHA; 190 else 191 reg_val &= ~SPI_CMD_CPHA; 192 if (cpol) 193 reg_val |= SPI_CMD_CPOL; 194 else 195 reg_val &= ~SPI_CMD_CPOL; 196 197 /* set the mlsbx and mlsbtx */ 198 if (chip_config->tx_mlsb) 199 reg_val |= SPI_CMD_TXMSBF; 200 else 201 reg_val &= ~SPI_CMD_TXMSBF; 202 if (chip_config->rx_mlsb) 203 reg_val |= SPI_CMD_RXMSBF; 204 else 205 reg_val &= ~SPI_CMD_RXMSBF; 206 207 /* set the tx/rx endian */ 208 #ifdef __LITTLE_ENDIAN 209 reg_val &= ~SPI_CMD_TX_ENDIAN; 210 reg_val &= ~SPI_CMD_RX_ENDIAN; 211 #else 212 reg_val |= SPI_CMD_TX_ENDIAN; 213 reg_val |= SPI_CMD_RX_ENDIAN; 214 #endif 215 216 if (mdata->dev_comp->enhance_timing) { 217 if (chip_config->cs_pol) 218 reg_val |= SPI_CMD_CS_POL; 219 else 220 reg_val &= ~SPI_CMD_CS_POL; 221 if (chip_config->sample_sel) 222 reg_val |= SPI_CMD_SAMPLE_SEL; 223 else 224 reg_val &= ~SPI_CMD_SAMPLE_SEL; 225 } 226 227 /* set finish and pause interrupt always enable */ 228 reg_val |= SPI_CMD_FINISH_IE | SPI_CMD_PAUSE_IE; 229 230 /* disable dma mode */ 231 reg_val &= ~(SPI_CMD_TX_DMA | SPI_CMD_RX_DMA); 232 233 /* disable deassert mode */ 234 reg_val &= ~SPI_CMD_DEASSERT; 235 236 writel(reg_val, mdata->base + SPI_CMD_REG); 237 238 /* pad select */ 239 if (mdata->dev_comp->need_pad_sel) 240 writel(mdata->pad_sel[spi->chip_select], 241 mdata->base + SPI_PAD_SEL_REG); 242 243 return 0; 244 } 245 246 static void mtk_spi_set_cs(struct spi_device *spi, bool enable) 247 { 248 u32 reg_val; 249 struct mtk_spi *mdata = spi_master_get_devdata(spi->master); 250 251 reg_val = readl(mdata->base + SPI_CMD_REG); 252 if (!enable) { 253 reg_val |= SPI_CMD_PAUSE_EN; 254 writel(reg_val, mdata->base + SPI_CMD_REG); 255 } else { 256 reg_val &= ~SPI_CMD_PAUSE_EN; 257 writel(reg_val, mdata->base + SPI_CMD_REG); 258 mdata->state = MTK_SPI_IDLE; 259 mtk_spi_reset(mdata); 260 } 261 } 262 263 static void mtk_spi_prepare_transfer(struct spi_master *master, 264 struct spi_transfer *xfer) 265 { 266 u32 spi_clk_hz, div, sck_time, cs_time, reg_val = 0; 267 struct mtk_spi *mdata = spi_master_get_devdata(master); 268 269 spi_clk_hz = clk_get_rate(mdata->spi_clk); 270 if (xfer->speed_hz < spi_clk_hz / 2) 271 div = DIV_ROUND_UP(spi_clk_hz, xfer->speed_hz); 272 else 273 div = 1; 274 275 sck_time = (div + 1) / 2; 276 cs_time = sck_time * 2; 277 278 if (mdata->dev_comp->enhance_timing) { 279 reg_val |= (((sck_time - 1) & 0xffff) 280 << SPI_CFG0_SCK_HIGH_OFFSET); 281 reg_val |= (((sck_time - 1) & 0xffff) 282 << SPI_ADJUST_CFG0_SCK_LOW_OFFSET); 283 writel(reg_val, mdata->base + SPI_CFG2_REG); 284 reg_val |= (((cs_time - 1) & 0xffff) 285 << SPI_ADJUST_CFG0_CS_HOLD_OFFSET); 286 reg_val |= (((cs_time - 1) & 0xffff) 287 << SPI_ADJUST_CFG0_CS_SETUP_OFFSET); 288 writel(reg_val, mdata->base + SPI_CFG0_REG); 289 } else { 290 reg_val |= (((sck_time - 1) & 0xff) 291 << SPI_CFG0_SCK_HIGH_OFFSET); 292 reg_val |= (((sck_time - 1) & 0xff) << SPI_CFG0_SCK_LOW_OFFSET); 293 reg_val |= (((cs_time - 1) & 0xff) << SPI_CFG0_CS_HOLD_OFFSET); 294 reg_val |= (((cs_time - 1) & 0xff) << SPI_CFG0_CS_SETUP_OFFSET); 295 writel(reg_val, mdata->base + SPI_CFG0_REG); 296 } 297 298 reg_val = readl(mdata->base + SPI_CFG1_REG); 299 reg_val &= ~SPI_CFG1_CS_IDLE_MASK; 300 reg_val |= (((cs_time - 1) & 0xff) << SPI_CFG1_CS_IDLE_OFFSET); 301 writel(reg_val, mdata->base + SPI_CFG1_REG); 302 } 303 304 static void mtk_spi_setup_packet(struct spi_master *master) 305 { 306 u32 packet_size, packet_loop, reg_val; 307 struct mtk_spi *mdata = spi_master_get_devdata(master); 308 309 packet_size = min_t(u32, mdata->xfer_len, MTK_SPI_PACKET_SIZE); 310 packet_loop = mdata->xfer_len / packet_size; 311 312 reg_val = readl(mdata->base + SPI_CFG1_REG); 313 reg_val &= ~(SPI_CFG1_PACKET_LENGTH_MASK | SPI_CFG1_PACKET_LOOP_MASK); 314 reg_val |= (packet_size - 1) << SPI_CFG1_PACKET_LENGTH_OFFSET; 315 reg_val |= (packet_loop - 1) << SPI_CFG1_PACKET_LOOP_OFFSET; 316 writel(reg_val, mdata->base + SPI_CFG1_REG); 317 } 318 319 static void mtk_spi_enable_transfer(struct spi_master *master) 320 { 321 u32 cmd; 322 struct mtk_spi *mdata = spi_master_get_devdata(master); 323 324 cmd = readl(mdata->base + SPI_CMD_REG); 325 if (mdata->state == MTK_SPI_IDLE) 326 cmd |= SPI_CMD_ACT; 327 else 328 cmd |= SPI_CMD_RESUME; 329 writel(cmd, mdata->base + SPI_CMD_REG); 330 } 331 332 static int mtk_spi_get_mult_delta(u32 xfer_len) 333 { 334 u32 mult_delta; 335 336 if (xfer_len > MTK_SPI_PACKET_SIZE) 337 mult_delta = xfer_len % MTK_SPI_PACKET_SIZE; 338 else 339 mult_delta = 0; 340 341 return mult_delta; 342 } 343 344 static void mtk_spi_update_mdata_len(struct spi_master *master) 345 { 346 int mult_delta; 347 struct mtk_spi *mdata = spi_master_get_devdata(master); 348 349 if (mdata->tx_sgl_len && mdata->rx_sgl_len) { 350 if (mdata->tx_sgl_len > mdata->rx_sgl_len) { 351 mult_delta = mtk_spi_get_mult_delta(mdata->rx_sgl_len); 352 mdata->xfer_len = mdata->rx_sgl_len - mult_delta; 353 mdata->rx_sgl_len = mult_delta; 354 mdata->tx_sgl_len -= mdata->xfer_len; 355 } else { 356 mult_delta = mtk_spi_get_mult_delta(mdata->tx_sgl_len); 357 mdata->xfer_len = mdata->tx_sgl_len - mult_delta; 358 mdata->tx_sgl_len = mult_delta; 359 mdata->rx_sgl_len -= mdata->xfer_len; 360 } 361 } else if (mdata->tx_sgl_len) { 362 mult_delta = mtk_spi_get_mult_delta(mdata->tx_sgl_len); 363 mdata->xfer_len = mdata->tx_sgl_len - mult_delta; 364 mdata->tx_sgl_len = mult_delta; 365 } else if (mdata->rx_sgl_len) { 366 mult_delta = mtk_spi_get_mult_delta(mdata->rx_sgl_len); 367 mdata->xfer_len = mdata->rx_sgl_len - mult_delta; 368 mdata->rx_sgl_len = mult_delta; 369 } 370 } 371 372 static void mtk_spi_setup_dma_addr(struct spi_master *master, 373 struct spi_transfer *xfer) 374 { 375 struct mtk_spi *mdata = spi_master_get_devdata(master); 376 377 if (mdata->tx_sgl) 378 writel(xfer->tx_dma, mdata->base + SPI_TX_SRC_REG); 379 if (mdata->rx_sgl) 380 writel(xfer->rx_dma, mdata->base + SPI_RX_DST_REG); 381 } 382 383 static int mtk_spi_fifo_transfer(struct spi_master *master, 384 struct spi_device *spi, 385 struct spi_transfer *xfer) 386 { 387 int cnt, remainder; 388 u32 reg_val; 389 struct mtk_spi *mdata = spi_master_get_devdata(master); 390 391 mdata->cur_transfer = xfer; 392 mdata->xfer_len = min(MTK_SPI_MAX_FIFO_SIZE, xfer->len); 393 mdata->num_xfered = 0; 394 mtk_spi_prepare_transfer(master, xfer); 395 mtk_spi_setup_packet(master); 396 397 cnt = xfer->len / 4; 398 iowrite32_rep(mdata->base + SPI_TX_DATA_REG, xfer->tx_buf, cnt); 399 400 remainder = xfer->len % 4; 401 if (remainder > 0) { 402 reg_val = 0; 403 memcpy(®_val, xfer->tx_buf + (cnt * 4), remainder); 404 writel(reg_val, mdata->base + SPI_TX_DATA_REG); 405 } 406 407 mtk_spi_enable_transfer(master); 408 409 return 1; 410 } 411 412 static int mtk_spi_dma_transfer(struct spi_master *master, 413 struct spi_device *spi, 414 struct spi_transfer *xfer) 415 { 416 int cmd; 417 struct mtk_spi *mdata = spi_master_get_devdata(master); 418 419 mdata->tx_sgl = NULL; 420 mdata->rx_sgl = NULL; 421 mdata->tx_sgl_len = 0; 422 mdata->rx_sgl_len = 0; 423 mdata->cur_transfer = xfer; 424 mdata->num_xfered = 0; 425 426 mtk_spi_prepare_transfer(master, xfer); 427 428 cmd = readl(mdata->base + SPI_CMD_REG); 429 if (xfer->tx_buf) 430 cmd |= SPI_CMD_TX_DMA; 431 if (xfer->rx_buf) 432 cmd |= SPI_CMD_RX_DMA; 433 writel(cmd, mdata->base + SPI_CMD_REG); 434 435 if (xfer->tx_buf) 436 mdata->tx_sgl = xfer->tx_sg.sgl; 437 if (xfer->rx_buf) 438 mdata->rx_sgl = xfer->rx_sg.sgl; 439 440 if (mdata->tx_sgl) { 441 xfer->tx_dma = sg_dma_address(mdata->tx_sgl); 442 mdata->tx_sgl_len = sg_dma_len(mdata->tx_sgl); 443 } 444 if (mdata->rx_sgl) { 445 xfer->rx_dma = sg_dma_address(mdata->rx_sgl); 446 mdata->rx_sgl_len = sg_dma_len(mdata->rx_sgl); 447 } 448 449 mtk_spi_update_mdata_len(master); 450 mtk_spi_setup_packet(master); 451 mtk_spi_setup_dma_addr(master, xfer); 452 mtk_spi_enable_transfer(master); 453 454 return 1; 455 } 456 457 static int mtk_spi_transfer_one(struct spi_master *master, 458 struct spi_device *spi, 459 struct spi_transfer *xfer) 460 { 461 if (master->can_dma(master, spi, xfer)) 462 return mtk_spi_dma_transfer(master, spi, xfer); 463 else 464 return mtk_spi_fifo_transfer(master, spi, xfer); 465 } 466 467 static bool mtk_spi_can_dma(struct spi_master *master, 468 struct spi_device *spi, 469 struct spi_transfer *xfer) 470 { 471 /* Buffers for DMA transactions must be 4-byte aligned */ 472 return (xfer->len > MTK_SPI_MAX_FIFO_SIZE && 473 (unsigned long)xfer->tx_buf % 4 == 0 && 474 (unsigned long)xfer->rx_buf % 4 == 0); 475 } 476 477 static int mtk_spi_setup(struct spi_device *spi) 478 { 479 struct mtk_spi *mdata = spi_master_get_devdata(spi->master); 480 481 if (!spi->controller_data) 482 spi->controller_data = (void *)&mtk_default_chip_info; 483 484 if (mdata->dev_comp->need_pad_sel && gpio_is_valid(spi->cs_gpio)) 485 gpio_direction_output(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH)); 486 487 return 0; 488 } 489 490 static irqreturn_t mtk_spi_interrupt(int irq, void *dev_id) 491 { 492 u32 cmd, reg_val, cnt, remainder, len; 493 struct spi_master *master = dev_id; 494 struct mtk_spi *mdata = spi_master_get_devdata(master); 495 struct spi_transfer *trans = mdata->cur_transfer; 496 497 reg_val = readl(mdata->base + SPI_STATUS0_REG); 498 if (reg_val & MTK_SPI_PAUSE_INT_STATUS) 499 mdata->state = MTK_SPI_PAUSED; 500 else 501 mdata->state = MTK_SPI_IDLE; 502 503 if (!master->can_dma(master, master->cur_msg->spi, trans)) { 504 if (trans->rx_buf) { 505 cnt = mdata->xfer_len / 4; 506 ioread32_rep(mdata->base + SPI_RX_DATA_REG, 507 trans->rx_buf + mdata->num_xfered, cnt); 508 remainder = mdata->xfer_len % 4; 509 if (remainder > 0) { 510 reg_val = readl(mdata->base + SPI_RX_DATA_REG); 511 memcpy(trans->rx_buf + 512 mdata->num_xfered + 513 (cnt * 4), 514 ®_val, 515 remainder); 516 } 517 } 518 519 mdata->num_xfered += mdata->xfer_len; 520 if (mdata->num_xfered == trans->len) { 521 spi_finalize_current_transfer(master); 522 return IRQ_HANDLED; 523 } 524 525 len = trans->len - mdata->num_xfered; 526 mdata->xfer_len = min(MTK_SPI_MAX_FIFO_SIZE, len); 527 mtk_spi_setup_packet(master); 528 529 cnt = mdata->xfer_len / 4; 530 iowrite32_rep(mdata->base + SPI_TX_DATA_REG, 531 trans->tx_buf + mdata->num_xfered, cnt); 532 533 remainder = mdata->xfer_len % 4; 534 if (remainder > 0) { 535 reg_val = 0; 536 memcpy(®_val, 537 trans->tx_buf + (cnt * 4) + mdata->num_xfered, 538 remainder); 539 writel(reg_val, mdata->base + SPI_TX_DATA_REG); 540 } 541 542 mtk_spi_enable_transfer(master); 543 544 return IRQ_HANDLED; 545 } 546 547 if (mdata->tx_sgl) 548 trans->tx_dma += mdata->xfer_len; 549 if (mdata->rx_sgl) 550 trans->rx_dma += mdata->xfer_len; 551 552 if (mdata->tx_sgl && (mdata->tx_sgl_len == 0)) { 553 mdata->tx_sgl = sg_next(mdata->tx_sgl); 554 if (mdata->tx_sgl) { 555 trans->tx_dma = sg_dma_address(mdata->tx_sgl); 556 mdata->tx_sgl_len = sg_dma_len(mdata->tx_sgl); 557 } 558 } 559 if (mdata->rx_sgl && (mdata->rx_sgl_len == 0)) { 560 mdata->rx_sgl = sg_next(mdata->rx_sgl); 561 if (mdata->rx_sgl) { 562 trans->rx_dma = sg_dma_address(mdata->rx_sgl); 563 mdata->rx_sgl_len = sg_dma_len(mdata->rx_sgl); 564 } 565 } 566 567 if (!mdata->tx_sgl && !mdata->rx_sgl) { 568 /* spi disable dma */ 569 cmd = readl(mdata->base + SPI_CMD_REG); 570 cmd &= ~SPI_CMD_TX_DMA; 571 cmd &= ~SPI_CMD_RX_DMA; 572 writel(cmd, mdata->base + SPI_CMD_REG); 573 574 spi_finalize_current_transfer(master); 575 return IRQ_HANDLED; 576 } 577 578 mtk_spi_update_mdata_len(master); 579 mtk_spi_setup_packet(master); 580 mtk_spi_setup_dma_addr(master, trans); 581 mtk_spi_enable_transfer(master); 582 583 return IRQ_HANDLED; 584 } 585 586 static int mtk_spi_probe(struct platform_device *pdev) 587 { 588 struct spi_master *master; 589 struct mtk_spi *mdata; 590 const struct of_device_id *of_id; 591 struct resource *res; 592 int i, irq, ret; 593 594 master = spi_alloc_master(&pdev->dev, sizeof(*mdata)); 595 if (!master) { 596 dev_err(&pdev->dev, "failed to alloc spi master\n"); 597 return -ENOMEM; 598 } 599 600 master->auto_runtime_pm = true; 601 master->dev.of_node = pdev->dev.of_node; 602 master->mode_bits = SPI_CPOL | SPI_CPHA; 603 604 master->set_cs = mtk_spi_set_cs; 605 master->prepare_message = mtk_spi_prepare_message; 606 master->transfer_one = mtk_spi_transfer_one; 607 master->can_dma = mtk_spi_can_dma; 608 master->setup = mtk_spi_setup; 609 610 of_id = of_match_node(mtk_spi_of_match, pdev->dev.of_node); 611 if (!of_id) { 612 dev_err(&pdev->dev, "failed to probe of_node\n"); 613 ret = -EINVAL; 614 goto err_put_master; 615 } 616 617 mdata = spi_master_get_devdata(master); 618 mdata->dev_comp = of_id->data; 619 if (mdata->dev_comp->must_tx) 620 master->flags = SPI_MASTER_MUST_TX; 621 622 if (mdata->dev_comp->need_pad_sel) { 623 mdata->pad_num = of_property_count_u32_elems( 624 pdev->dev.of_node, 625 "mediatek,pad-select"); 626 if (mdata->pad_num < 0) { 627 dev_err(&pdev->dev, 628 "No 'mediatek,pad-select' property\n"); 629 ret = -EINVAL; 630 goto err_put_master; 631 } 632 633 mdata->pad_sel = devm_kmalloc_array(&pdev->dev, mdata->pad_num, 634 sizeof(u32), GFP_KERNEL); 635 if (!mdata->pad_sel) { 636 ret = -ENOMEM; 637 goto err_put_master; 638 } 639 640 for (i = 0; i < mdata->pad_num; i++) { 641 of_property_read_u32_index(pdev->dev.of_node, 642 "mediatek,pad-select", 643 i, &mdata->pad_sel[i]); 644 if (mdata->pad_sel[i] > MT8173_SPI_MAX_PAD_SEL) { 645 dev_err(&pdev->dev, "wrong pad-sel[%d]: %u\n", 646 i, mdata->pad_sel[i]); 647 ret = -EINVAL; 648 goto err_put_master; 649 } 650 } 651 } 652 653 platform_set_drvdata(pdev, master); 654 655 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 656 if (!res) { 657 ret = -ENODEV; 658 dev_err(&pdev->dev, "failed to determine base address\n"); 659 goto err_put_master; 660 } 661 662 mdata->base = devm_ioremap_resource(&pdev->dev, res); 663 if (IS_ERR(mdata->base)) { 664 ret = PTR_ERR(mdata->base); 665 goto err_put_master; 666 } 667 668 irq = platform_get_irq(pdev, 0); 669 if (irq < 0) { 670 dev_err(&pdev->dev, "failed to get irq (%d)\n", irq); 671 ret = irq; 672 goto err_put_master; 673 } 674 675 if (!pdev->dev.dma_mask) 676 pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; 677 678 ret = devm_request_irq(&pdev->dev, irq, mtk_spi_interrupt, 679 IRQF_TRIGGER_NONE, dev_name(&pdev->dev), master); 680 if (ret) { 681 dev_err(&pdev->dev, "failed to register irq (%d)\n", ret); 682 goto err_put_master; 683 } 684 685 mdata->parent_clk = devm_clk_get(&pdev->dev, "parent-clk"); 686 if (IS_ERR(mdata->parent_clk)) { 687 ret = PTR_ERR(mdata->parent_clk); 688 dev_err(&pdev->dev, "failed to get parent-clk: %d\n", ret); 689 goto err_put_master; 690 } 691 692 mdata->sel_clk = devm_clk_get(&pdev->dev, "sel-clk"); 693 if (IS_ERR(mdata->sel_clk)) { 694 ret = PTR_ERR(mdata->sel_clk); 695 dev_err(&pdev->dev, "failed to get sel-clk: %d\n", ret); 696 goto err_put_master; 697 } 698 699 mdata->spi_clk = devm_clk_get(&pdev->dev, "spi-clk"); 700 if (IS_ERR(mdata->spi_clk)) { 701 ret = PTR_ERR(mdata->spi_clk); 702 dev_err(&pdev->dev, "failed to get spi-clk: %d\n", ret); 703 goto err_put_master; 704 } 705 706 ret = clk_prepare_enable(mdata->spi_clk); 707 if (ret < 0) { 708 dev_err(&pdev->dev, "failed to enable spi_clk (%d)\n", ret); 709 goto err_put_master; 710 } 711 712 ret = clk_set_parent(mdata->sel_clk, mdata->parent_clk); 713 if (ret < 0) { 714 dev_err(&pdev->dev, "failed to clk_set_parent (%d)\n", ret); 715 clk_disable_unprepare(mdata->spi_clk); 716 goto err_put_master; 717 } 718 719 clk_disable_unprepare(mdata->spi_clk); 720 721 pm_runtime_enable(&pdev->dev); 722 723 ret = devm_spi_register_master(&pdev->dev, master); 724 if (ret) { 725 dev_err(&pdev->dev, "failed to register master (%d)\n", ret); 726 goto err_disable_runtime_pm; 727 } 728 729 if (mdata->dev_comp->need_pad_sel) { 730 if (mdata->pad_num != master->num_chipselect) { 731 dev_err(&pdev->dev, 732 "pad_num does not match num_chipselect(%d != %d)\n", 733 mdata->pad_num, master->num_chipselect); 734 ret = -EINVAL; 735 goto err_disable_runtime_pm; 736 } 737 738 if (!master->cs_gpios && master->num_chipselect > 1) { 739 dev_err(&pdev->dev, 740 "cs_gpios not specified and num_chipselect > 1\n"); 741 ret = -EINVAL; 742 goto err_disable_runtime_pm; 743 } 744 745 if (master->cs_gpios) { 746 for (i = 0; i < master->num_chipselect; i++) { 747 ret = devm_gpio_request(&pdev->dev, 748 master->cs_gpios[i], 749 dev_name(&pdev->dev)); 750 if (ret) { 751 dev_err(&pdev->dev, 752 "can't get CS GPIO %i\n", i); 753 goto err_disable_runtime_pm; 754 } 755 } 756 } 757 } 758 759 return 0; 760 761 err_disable_runtime_pm: 762 pm_runtime_disable(&pdev->dev); 763 err_put_master: 764 spi_master_put(master); 765 766 return ret; 767 } 768 769 static int mtk_spi_remove(struct platform_device *pdev) 770 { 771 struct spi_master *master = platform_get_drvdata(pdev); 772 struct mtk_spi *mdata = spi_master_get_devdata(master); 773 774 pm_runtime_disable(&pdev->dev); 775 776 mtk_spi_reset(mdata); 777 778 return 0; 779 } 780 781 #ifdef CONFIG_PM_SLEEP 782 static int mtk_spi_suspend(struct device *dev) 783 { 784 int ret; 785 struct spi_master *master = dev_get_drvdata(dev); 786 struct mtk_spi *mdata = spi_master_get_devdata(master); 787 788 ret = spi_master_suspend(master); 789 if (ret) 790 return ret; 791 792 if (!pm_runtime_suspended(dev)) 793 clk_disable_unprepare(mdata->spi_clk); 794 795 return ret; 796 } 797 798 static int mtk_spi_resume(struct device *dev) 799 { 800 int ret; 801 struct spi_master *master = dev_get_drvdata(dev); 802 struct mtk_spi *mdata = spi_master_get_devdata(master); 803 804 if (!pm_runtime_suspended(dev)) { 805 ret = clk_prepare_enable(mdata->spi_clk); 806 if (ret < 0) { 807 dev_err(dev, "failed to enable spi_clk (%d)\n", ret); 808 return ret; 809 } 810 } 811 812 ret = spi_master_resume(master); 813 if (ret < 0) 814 clk_disable_unprepare(mdata->spi_clk); 815 816 return ret; 817 } 818 #endif /* CONFIG_PM_SLEEP */ 819 820 #ifdef CONFIG_PM 821 static int mtk_spi_runtime_suspend(struct device *dev) 822 { 823 struct spi_master *master = dev_get_drvdata(dev); 824 struct mtk_spi *mdata = spi_master_get_devdata(master); 825 826 clk_disable_unprepare(mdata->spi_clk); 827 828 return 0; 829 } 830 831 static int mtk_spi_runtime_resume(struct device *dev) 832 { 833 struct spi_master *master = dev_get_drvdata(dev); 834 struct mtk_spi *mdata = spi_master_get_devdata(master); 835 int ret; 836 837 ret = clk_prepare_enable(mdata->spi_clk); 838 if (ret < 0) { 839 dev_err(dev, "failed to enable spi_clk (%d)\n", ret); 840 return ret; 841 } 842 843 return 0; 844 } 845 #endif /* CONFIG_PM */ 846 847 static const struct dev_pm_ops mtk_spi_pm = { 848 SET_SYSTEM_SLEEP_PM_OPS(mtk_spi_suspend, mtk_spi_resume) 849 SET_RUNTIME_PM_OPS(mtk_spi_runtime_suspend, 850 mtk_spi_runtime_resume, NULL) 851 }; 852 853 static struct platform_driver mtk_spi_driver = { 854 .driver = { 855 .name = "mtk-spi", 856 .pm = &mtk_spi_pm, 857 .of_match_table = mtk_spi_of_match, 858 }, 859 .probe = mtk_spi_probe, 860 .remove = mtk_spi_remove, 861 }; 862 863 module_platform_driver(mtk_spi_driver); 864 865 MODULE_DESCRIPTION("MTK SPI Controller driver"); 866 MODULE_AUTHOR("Leilk Liu <leilk.liu@mediatek.com>"); 867 MODULE_LICENSE("GPL v2"); 868 MODULE_ALIAS("platform:mtk-spi"); 869