1 /* 2 * Copyright (c) 2015 MediaTek Inc. 3 * Author: Leilk Liu <leilk.liu@mediatek.com> 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License version 2 as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 */ 14 15 #include <linux/clk.h> 16 #include <linux/device.h> 17 #include <linux/err.h> 18 #include <linux/interrupt.h> 19 #include <linux/io.h> 20 #include <linux/ioport.h> 21 #include <linux/module.h> 22 #include <linux/of.h> 23 #include <linux/of_gpio.h> 24 #include <linux/platform_device.h> 25 #include <linux/platform_data/spi-mt65xx.h> 26 #include <linux/pm_runtime.h> 27 #include <linux/spi/spi.h> 28 29 #define SPI_CFG0_REG 0x0000 30 #define SPI_CFG1_REG 0x0004 31 #define SPI_TX_SRC_REG 0x0008 32 #define SPI_RX_DST_REG 0x000c 33 #define SPI_TX_DATA_REG 0x0010 34 #define SPI_RX_DATA_REG 0x0014 35 #define SPI_CMD_REG 0x0018 36 #define SPI_STATUS0_REG 0x001c 37 #define SPI_PAD_SEL_REG 0x0024 38 39 #define SPI_CFG0_SCK_HIGH_OFFSET 0 40 #define SPI_CFG0_SCK_LOW_OFFSET 8 41 #define SPI_CFG0_CS_HOLD_OFFSET 16 42 #define SPI_CFG0_CS_SETUP_OFFSET 24 43 44 #define SPI_CFG1_CS_IDLE_OFFSET 0 45 #define SPI_CFG1_PACKET_LOOP_OFFSET 8 46 #define SPI_CFG1_PACKET_LENGTH_OFFSET 16 47 #define SPI_CFG1_GET_TICK_DLY_OFFSET 30 48 49 #define SPI_CFG1_CS_IDLE_MASK 0xff 50 #define SPI_CFG1_PACKET_LOOP_MASK 0xff00 51 #define SPI_CFG1_PACKET_LENGTH_MASK 0x3ff0000 52 53 #define SPI_CMD_ACT BIT(0) 54 #define SPI_CMD_RESUME BIT(1) 55 #define SPI_CMD_RST BIT(2) 56 #define SPI_CMD_PAUSE_EN BIT(4) 57 #define SPI_CMD_DEASSERT BIT(5) 58 #define SPI_CMD_CPHA BIT(8) 59 #define SPI_CMD_CPOL BIT(9) 60 #define SPI_CMD_RX_DMA BIT(10) 61 #define SPI_CMD_TX_DMA BIT(11) 62 #define SPI_CMD_TXMSBF BIT(12) 63 #define SPI_CMD_RXMSBF BIT(13) 64 #define SPI_CMD_RX_ENDIAN BIT(14) 65 #define SPI_CMD_TX_ENDIAN BIT(15) 66 #define SPI_CMD_FINISH_IE BIT(16) 67 #define SPI_CMD_PAUSE_IE BIT(17) 68 69 #define MT8173_SPI_MAX_PAD_SEL 3 70 71 #define MTK_SPI_PAUSE_INT_STATUS 0x2 72 73 #define MTK_SPI_IDLE 0 74 #define MTK_SPI_PAUSED 1 75 76 #define MTK_SPI_MAX_FIFO_SIZE 32 77 #define MTK_SPI_PACKET_SIZE 1024 78 79 struct mtk_spi_compatible { 80 bool need_pad_sel; 81 /* Must explicitly send dummy Tx bytes to do Rx only transfer */ 82 bool must_tx; 83 }; 84 85 struct mtk_spi { 86 void __iomem *base; 87 u32 state; 88 int pad_num; 89 u32 *pad_sel; 90 struct clk *parent_clk, *sel_clk, *spi_clk; 91 struct spi_transfer *cur_transfer; 92 u32 xfer_len; 93 struct scatterlist *tx_sgl, *rx_sgl; 94 u32 tx_sgl_len, rx_sgl_len; 95 const struct mtk_spi_compatible *dev_comp; 96 }; 97 98 static const struct mtk_spi_compatible mtk_common_compat; 99 static const struct mtk_spi_compatible mt8173_compat = { 100 .need_pad_sel = true, 101 .must_tx = true, 102 }; 103 104 /* 105 * A piece of default chip info unless the platform 106 * supplies it. 107 */ 108 static const struct mtk_chip_config mtk_default_chip_info = { 109 .rx_mlsb = 1, 110 .tx_mlsb = 1, 111 }; 112 113 static const struct of_device_id mtk_spi_of_match[] = { 114 { .compatible = "mediatek,mt2701-spi", 115 .data = (void *)&mtk_common_compat, 116 }, 117 { .compatible = "mediatek,mt6589-spi", 118 .data = (void *)&mtk_common_compat, 119 }, 120 { .compatible = "mediatek,mt8135-spi", 121 .data = (void *)&mtk_common_compat, 122 }, 123 { .compatible = "mediatek,mt8173-spi", 124 .data = (void *)&mt8173_compat, 125 }, 126 {} 127 }; 128 MODULE_DEVICE_TABLE(of, mtk_spi_of_match); 129 130 static void mtk_spi_reset(struct mtk_spi *mdata) 131 { 132 u32 reg_val; 133 134 /* set the software reset bit in SPI_CMD_REG. */ 135 reg_val = readl(mdata->base + SPI_CMD_REG); 136 reg_val |= SPI_CMD_RST; 137 writel(reg_val, mdata->base + SPI_CMD_REG); 138 139 reg_val = readl(mdata->base + SPI_CMD_REG); 140 reg_val &= ~SPI_CMD_RST; 141 writel(reg_val, mdata->base + SPI_CMD_REG); 142 } 143 144 static int mtk_spi_prepare_message(struct spi_master *master, 145 struct spi_message *msg) 146 { 147 u16 cpha, cpol; 148 u32 reg_val; 149 struct spi_device *spi = msg->spi; 150 struct mtk_chip_config *chip_config = spi->controller_data; 151 struct mtk_spi *mdata = spi_master_get_devdata(master); 152 153 cpha = spi->mode & SPI_CPHA ? 1 : 0; 154 cpol = spi->mode & SPI_CPOL ? 1 : 0; 155 156 reg_val = readl(mdata->base + SPI_CMD_REG); 157 if (cpha) 158 reg_val |= SPI_CMD_CPHA; 159 else 160 reg_val &= ~SPI_CMD_CPHA; 161 if (cpol) 162 reg_val |= SPI_CMD_CPOL; 163 else 164 reg_val &= ~SPI_CMD_CPOL; 165 166 /* set the mlsbx and mlsbtx */ 167 if (chip_config->tx_mlsb) 168 reg_val |= SPI_CMD_TXMSBF; 169 else 170 reg_val &= ~SPI_CMD_TXMSBF; 171 if (chip_config->rx_mlsb) 172 reg_val |= SPI_CMD_RXMSBF; 173 else 174 reg_val &= ~SPI_CMD_RXMSBF; 175 176 /* set the tx/rx endian */ 177 #ifdef __LITTLE_ENDIAN 178 reg_val &= ~SPI_CMD_TX_ENDIAN; 179 reg_val &= ~SPI_CMD_RX_ENDIAN; 180 #else 181 reg_val |= SPI_CMD_TX_ENDIAN; 182 reg_val |= SPI_CMD_RX_ENDIAN; 183 #endif 184 185 /* set finish and pause interrupt always enable */ 186 reg_val |= SPI_CMD_FINISH_IE | SPI_CMD_PAUSE_IE; 187 188 /* disable dma mode */ 189 reg_val &= ~(SPI_CMD_TX_DMA | SPI_CMD_RX_DMA); 190 191 /* disable deassert mode */ 192 reg_val &= ~SPI_CMD_DEASSERT; 193 194 writel(reg_val, mdata->base + SPI_CMD_REG); 195 196 /* pad select */ 197 if (mdata->dev_comp->need_pad_sel) 198 writel(mdata->pad_sel[spi->chip_select], 199 mdata->base + SPI_PAD_SEL_REG); 200 201 return 0; 202 } 203 204 static void mtk_spi_set_cs(struct spi_device *spi, bool enable) 205 { 206 u32 reg_val; 207 struct mtk_spi *mdata = spi_master_get_devdata(spi->master); 208 209 reg_val = readl(mdata->base + SPI_CMD_REG); 210 if (!enable) { 211 reg_val |= SPI_CMD_PAUSE_EN; 212 writel(reg_val, mdata->base + SPI_CMD_REG); 213 } else { 214 reg_val &= ~SPI_CMD_PAUSE_EN; 215 writel(reg_val, mdata->base + SPI_CMD_REG); 216 mdata->state = MTK_SPI_IDLE; 217 mtk_spi_reset(mdata); 218 } 219 } 220 221 static void mtk_spi_prepare_transfer(struct spi_master *master, 222 struct spi_transfer *xfer) 223 { 224 u32 spi_clk_hz, div, sck_time, cs_time, reg_val = 0; 225 struct mtk_spi *mdata = spi_master_get_devdata(master); 226 227 spi_clk_hz = clk_get_rate(mdata->spi_clk); 228 if (xfer->speed_hz < spi_clk_hz / 2) 229 div = DIV_ROUND_UP(spi_clk_hz, xfer->speed_hz); 230 else 231 div = 1; 232 233 sck_time = (div + 1) / 2; 234 cs_time = sck_time * 2; 235 236 reg_val |= (((sck_time - 1) & 0xff) << SPI_CFG0_SCK_HIGH_OFFSET); 237 reg_val |= (((sck_time - 1) & 0xff) << SPI_CFG0_SCK_LOW_OFFSET); 238 reg_val |= (((cs_time - 1) & 0xff) << SPI_CFG0_CS_HOLD_OFFSET); 239 reg_val |= (((cs_time - 1) & 0xff) << SPI_CFG0_CS_SETUP_OFFSET); 240 writel(reg_val, mdata->base + SPI_CFG0_REG); 241 242 reg_val = readl(mdata->base + SPI_CFG1_REG); 243 reg_val &= ~SPI_CFG1_CS_IDLE_MASK; 244 reg_val |= (((cs_time - 1) & 0xff) << SPI_CFG1_CS_IDLE_OFFSET); 245 writel(reg_val, mdata->base + SPI_CFG1_REG); 246 } 247 248 static void mtk_spi_setup_packet(struct spi_master *master) 249 { 250 u32 packet_size, packet_loop, reg_val; 251 struct mtk_spi *mdata = spi_master_get_devdata(master); 252 253 packet_size = min_t(u32, mdata->xfer_len, MTK_SPI_PACKET_SIZE); 254 packet_loop = mdata->xfer_len / packet_size; 255 256 reg_val = readl(mdata->base + SPI_CFG1_REG); 257 reg_val &= ~(SPI_CFG1_PACKET_LENGTH_MASK | SPI_CFG1_PACKET_LOOP_MASK); 258 reg_val |= (packet_size - 1) << SPI_CFG1_PACKET_LENGTH_OFFSET; 259 reg_val |= (packet_loop - 1) << SPI_CFG1_PACKET_LOOP_OFFSET; 260 writel(reg_val, mdata->base + SPI_CFG1_REG); 261 } 262 263 static void mtk_spi_enable_transfer(struct spi_master *master) 264 { 265 u32 cmd; 266 struct mtk_spi *mdata = spi_master_get_devdata(master); 267 268 cmd = readl(mdata->base + SPI_CMD_REG); 269 if (mdata->state == MTK_SPI_IDLE) 270 cmd |= SPI_CMD_ACT; 271 else 272 cmd |= SPI_CMD_RESUME; 273 writel(cmd, mdata->base + SPI_CMD_REG); 274 } 275 276 static int mtk_spi_get_mult_delta(u32 xfer_len) 277 { 278 u32 mult_delta; 279 280 if (xfer_len > MTK_SPI_PACKET_SIZE) 281 mult_delta = xfer_len % MTK_SPI_PACKET_SIZE; 282 else 283 mult_delta = 0; 284 285 return mult_delta; 286 } 287 288 static void mtk_spi_update_mdata_len(struct spi_master *master) 289 { 290 int mult_delta; 291 struct mtk_spi *mdata = spi_master_get_devdata(master); 292 293 if (mdata->tx_sgl_len && mdata->rx_sgl_len) { 294 if (mdata->tx_sgl_len > mdata->rx_sgl_len) { 295 mult_delta = mtk_spi_get_mult_delta(mdata->rx_sgl_len); 296 mdata->xfer_len = mdata->rx_sgl_len - mult_delta; 297 mdata->rx_sgl_len = mult_delta; 298 mdata->tx_sgl_len -= mdata->xfer_len; 299 } else { 300 mult_delta = mtk_spi_get_mult_delta(mdata->tx_sgl_len); 301 mdata->xfer_len = mdata->tx_sgl_len - mult_delta; 302 mdata->tx_sgl_len = mult_delta; 303 mdata->rx_sgl_len -= mdata->xfer_len; 304 } 305 } else if (mdata->tx_sgl_len) { 306 mult_delta = mtk_spi_get_mult_delta(mdata->tx_sgl_len); 307 mdata->xfer_len = mdata->tx_sgl_len - mult_delta; 308 mdata->tx_sgl_len = mult_delta; 309 } else if (mdata->rx_sgl_len) { 310 mult_delta = mtk_spi_get_mult_delta(mdata->rx_sgl_len); 311 mdata->xfer_len = mdata->rx_sgl_len - mult_delta; 312 mdata->rx_sgl_len = mult_delta; 313 } 314 } 315 316 static void mtk_spi_setup_dma_addr(struct spi_master *master, 317 struct spi_transfer *xfer) 318 { 319 struct mtk_spi *mdata = spi_master_get_devdata(master); 320 321 if (mdata->tx_sgl) 322 writel(xfer->tx_dma, mdata->base + SPI_TX_SRC_REG); 323 if (mdata->rx_sgl) 324 writel(xfer->rx_dma, mdata->base + SPI_RX_DST_REG); 325 } 326 327 static int mtk_spi_fifo_transfer(struct spi_master *master, 328 struct spi_device *spi, 329 struct spi_transfer *xfer) 330 { 331 int cnt, remainder; 332 u32 reg_val; 333 struct mtk_spi *mdata = spi_master_get_devdata(master); 334 335 mdata->cur_transfer = xfer; 336 mdata->xfer_len = xfer->len; 337 mtk_spi_prepare_transfer(master, xfer); 338 mtk_spi_setup_packet(master); 339 340 cnt = xfer->len / 4; 341 iowrite32_rep(mdata->base + SPI_TX_DATA_REG, xfer->tx_buf, cnt); 342 343 remainder = xfer->len % 4; 344 if (remainder > 0) { 345 reg_val = 0; 346 memcpy(®_val, xfer->tx_buf + (cnt * 4), remainder); 347 writel(reg_val, mdata->base + SPI_TX_DATA_REG); 348 } 349 350 mtk_spi_enable_transfer(master); 351 352 return 1; 353 } 354 355 static int mtk_spi_dma_transfer(struct spi_master *master, 356 struct spi_device *spi, 357 struct spi_transfer *xfer) 358 { 359 int cmd; 360 struct mtk_spi *mdata = spi_master_get_devdata(master); 361 362 mdata->tx_sgl = NULL; 363 mdata->rx_sgl = NULL; 364 mdata->tx_sgl_len = 0; 365 mdata->rx_sgl_len = 0; 366 mdata->cur_transfer = xfer; 367 368 mtk_spi_prepare_transfer(master, xfer); 369 370 cmd = readl(mdata->base + SPI_CMD_REG); 371 if (xfer->tx_buf) 372 cmd |= SPI_CMD_TX_DMA; 373 if (xfer->rx_buf) 374 cmd |= SPI_CMD_RX_DMA; 375 writel(cmd, mdata->base + SPI_CMD_REG); 376 377 if (xfer->tx_buf) 378 mdata->tx_sgl = xfer->tx_sg.sgl; 379 if (xfer->rx_buf) 380 mdata->rx_sgl = xfer->rx_sg.sgl; 381 382 if (mdata->tx_sgl) { 383 xfer->tx_dma = sg_dma_address(mdata->tx_sgl); 384 mdata->tx_sgl_len = sg_dma_len(mdata->tx_sgl); 385 } 386 if (mdata->rx_sgl) { 387 xfer->rx_dma = sg_dma_address(mdata->rx_sgl); 388 mdata->rx_sgl_len = sg_dma_len(mdata->rx_sgl); 389 } 390 391 mtk_spi_update_mdata_len(master); 392 mtk_spi_setup_packet(master); 393 mtk_spi_setup_dma_addr(master, xfer); 394 mtk_spi_enable_transfer(master); 395 396 return 1; 397 } 398 399 static int mtk_spi_transfer_one(struct spi_master *master, 400 struct spi_device *spi, 401 struct spi_transfer *xfer) 402 { 403 if (master->can_dma(master, spi, xfer)) 404 return mtk_spi_dma_transfer(master, spi, xfer); 405 else 406 return mtk_spi_fifo_transfer(master, spi, xfer); 407 } 408 409 static bool mtk_spi_can_dma(struct spi_master *master, 410 struct spi_device *spi, 411 struct spi_transfer *xfer) 412 { 413 return xfer->len > MTK_SPI_MAX_FIFO_SIZE; 414 } 415 416 static int mtk_spi_setup(struct spi_device *spi) 417 { 418 struct mtk_spi *mdata = spi_master_get_devdata(spi->master); 419 420 if (!spi->controller_data) 421 spi->controller_data = (void *)&mtk_default_chip_info; 422 423 if (mdata->dev_comp->need_pad_sel && gpio_is_valid(spi->cs_gpio)) 424 gpio_direction_output(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH)); 425 426 return 0; 427 } 428 429 static irqreturn_t mtk_spi_interrupt(int irq, void *dev_id) 430 { 431 u32 cmd, reg_val, cnt, remainder; 432 struct spi_master *master = dev_id; 433 struct mtk_spi *mdata = spi_master_get_devdata(master); 434 struct spi_transfer *trans = mdata->cur_transfer; 435 436 reg_val = readl(mdata->base + SPI_STATUS0_REG); 437 if (reg_val & MTK_SPI_PAUSE_INT_STATUS) 438 mdata->state = MTK_SPI_PAUSED; 439 else 440 mdata->state = MTK_SPI_IDLE; 441 442 if (!master->can_dma(master, master->cur_msg->spi, trans)) { 443 if (trans->rx_buf) { 444 cnt = mdata->xfer_len / 4; 445 ioread32_rep(mdata->base + SPI_RX_DATA_REG, 446 trans->rx_buf, cnt); 447 remainder = mdata->xfer_len % 4; 448 if (remainder > 0) { 449 reg_val = readl(mdata->base + SPI_RX_DATA_REG); 450 memcpy(trans->rx_buf + (cnt * 4), 451 ®_val, remainder); 452 } 453 } 454 spi_finalize_current_transfer(master); 455 return IRQ_HANDLED; 456 } 457 458 if (mdata->tx_sgl) 459 trans->tx_dma += mdata->xfer_len; 460 if (mdata->rx_sgl) 461 trans->rx_dma += mdata->xfer_len; 462 463 if (mdata->tx_sgl && (mdata->tx_sgl_len == 0)) { 464 mdata->tx_sgl = sg_next(mdata->tx_sgl); 465 if (mdata->tx_sgl) { 466 trans->tx_dma = sg_dma_address(mdata->tx_sgl); 467 mdata->tx_sgl_len = sg_dma_len(mdata->tx_sgl); 468 } 469 } 470 if (mdata->rx_sgl && (mdata->rx_sgl_len == 0)) { 471 mdata->rx_sgl = sg_next(mdata->rx_sgl); 472 if (mdata->rx_sgl) { 473 trans->rx_dma = sg_dma_address(mdata->rx_sgl); 474 mdata->rx_sgl_len = sg_dma_len(mdata->rx_sgl); 475 } 476 } 477 478 if (!mdata->tx_sgl && !mdata->rx_sgl) { 479 /* spi disable dma */ 480 cmd = readl(mdata->base + SPI_CMD_REG); 481 cmd &= ~SPI_CMD_TX_DMA; 482 cmd &= ~SPI_CMD_RX_DMA; 483 writel(cmd, mdata->base + SPI_CMD_REG); 484 485 spi_finalize_current_transfer(master); 486 return IRQ_HANDLED; 487 } 488 489 mtk_spi_update_mdata_len(master); 490 mtk_spi_setup_packet(master); 491 mtk_spi_setup_dma_addr(master, trans); 492 mtk_spi_enable_transfer(master); 493 494 return IRQ_HANDLED; 495 } 496 497 static int mtk_spi_probe(struct platform_device *pdev) 498 { 499 struct spi_master *master; 500 struct mtk_spi *mdata; 501 const struct of_device_id *of_id; 502 struct resource *res; 503 int i, irq, ret; 504 505 master = spi_alloc_master(&pdev->dev, sizeof(*mdata)); 506 if (!master) { 507 dev_err(&pdev->dev, "failed to alloc spi master\n"); 508 return -ENOMEM; 509 } 510 511 master->auto_runtime_pm = true; 512 master->dev.of_node = pdev->dev.of_node; 513 master->mode_bits = SPI_CPOL | SPI_CPHA; 514 515 master->set_cs = mtk_spi_set_cs; 516 master->prepare_message = mtk_spi_prepare_message; 517 master->transfer_one = mtk_spi_transfer_one; 518 master->can_dma = mtk_spi_can_dma; 519 master->setup = mtk_spi_setup; 520 521 of_id = of_match_node(mtk_spi_of_match, pdev->dev.of_node); 522 if (!of_id) { 523 dev_err(&pdev->dev, "failed to probe of_node\n"); 524 ret = -EINVAL; 525 goto err_put_master; 526 } 527 528 mdata = spi_master_get_devdata(master); 529 mdata->dev_comp = of_id->data; 530 if (mdata->dev_comp->must_tx) 531 master->flags = SPI_MASTER_MUST_TX; 532 533 if (mdata->dev_comp->need_pad_sel) { 534 mdata->pad_num = of_property_count_u32_elems( 535 pdev->dev.of_node, 536 "mediatek,pad-select"); 537 if (mdata->pad_num < 0) { 538 dev_err(&pdev->dev, 539 "No 'mediatek,pad-select' property\n"); 540 ret = -EINVAL; 541 goto err_put_master; 542 } 543 544 mdata->pad_sel = devm_kmalloc_array(&pdev->dev, mdata->pad_num, 545 sizeof(u32), GFP_KERNEL); 546 if (!mdata->pad_sel) { 547 ret = -ENOMEM; 548 goto err_put_master; 549 } 550 551 for (i = 0; i < mdata->pad_num; i++) { 552 of_property_read_u32_index(pdev->dev.of_node, 553 "mediatek,pad-select", 554 i, &mdata->pad_sel[i]); 555 if (mdata->pad_sel[i] > MT8173_SPI_MAX_PAD_SEL) { 556 dev_err(&pdev->dev, "wrong pad-sel[%d]: %u\n", 557 i, mdata->pad_sel[i]); 558 ret = -EINVAL; 559 goto err_put_master; 560 } 561 } 562 } 563 564 platform_set_drvdata(pdev, master); 565 566 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 567 if (!res) { 568 ret = -ENODEV; 569 dev_err(&pdev->dev, "failed to determine base address\n"); 570 goto err_put_master; 571 } 572 573 mdata->base = devm_ioremap_resource(&pdev->dev, res); 574 if (IS_ERR(mdata->base)) { 575 ret = PTR_ERR(mdata->base); 576 goto err_put_master; 577 } 578 579 irq = platform_get_irq(pdev, 0); 580 if (irq < 0) { 581 dev_err(&pdev->dev, "failed to get irq (%d)\n", irq); 582 ret = irq; 583 goto err_put_master; 584 } 585 586 if (!pdev->dev.dma_mask) 587 pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; 588 589 ret = devm_request_irq(&pdev->dev, irq, mtk_spi_interrupt, 590 IRQF_TRIGGER_NONE, dev_name(&pdev->dev), master); 591 if (ret) { 592 dev_err(&pdev->dev, "failed to register irq (%d)\n", ret); 593 goto err_put_master; 594 } 595 596 mdata->parent_clk = devm_clk_get(&pdev->dev, "parent-clk"); 597 if (IS_ERR(mdata->parent_clk)) { 598 ret = PTR_ERR(mdata->parent_clk); 599 dev_err(&pdev->dev, "failed to get parent-clk: %d\n", ret); 600 goto err_put_master; 601 } 602 603 mdata->sel_clk = devm_clk_get(&pdev->dev, "sel-clk"); 604 if (IS_ERR(mdata->sel_clk)) { 605 ret = PTR_ERR(mdata->sel_clk); 606 dev_err(&pdev->dev, "failed to get sel-clk: %d\n", ret); 607 goto err_put_master; 608 } 609 610 mdata->spi_clk = devm_clk_get(&pdev->dev, "spi-clk"); 611 if (IS_ERR(mdata->spi_clk)) { 612 ret = PTR_ERR(mdata->spi_clk); 613 dev_err(&pdev->dev, "failed to get spi-clk: %d\n", ret); 614 goto err_put_master; 615 } 616 617 ret = clk_prepare_enable(mdata->spi_clk); 618 if (ret < 0) { 619 dev_err(&pdev->dev, "failed to enable spi_clk (%d)\n", ret); 620 goto err_put_master; 621 } 622 623 ret = clk_set_parent(mdata->sel_clk, mdata->parent_clk); 624 if (ret < 0) { 625 dev_err(&pdev->dev, "failed to clk_set_parent (%d)\n", ret); 626 clk_disable_unprepare(mdata->spi_clk); 627 goto err_put_master; 628 } 629 630 clk_disable_unprepare(mdata->spi_clk); 631 632 pm_runtime_enable(&pdev->dev); 633 634 ret = devm_spi_register_master(&pdev->dev, master); 635 if (ret) { 636 dev_err(&pdev->dev, "failed to register master (%d)\n", ret); 637 goto err_disable_runtime_pm; 638 } 639 640 if (mdata->dev_comp->need_pad_sel) { 641 if (mdata->pad_num != master->num_chipselect) { 642 dev_err(&pdev->dev, 643 "pad_num does not match num_chipselect(%d != %d)\n", 644 mdata->pad_num, master->num_chipselect); 645 ret = -EINVAL; 646 goto err_disable_runtime_pm; 647 } 648 649 if (!master->cs_gpios && master->num_chipselect > 1) { 650 dev_err(&pdev->dev, 651 "cs_gpios not specified and num_chipselect > 1\n"); 652 ret = -EINVAL; 653 goto err_disable_runtime_pm; 654 } 655 656 if (master->cs_gpios) { 657 for (i = 0; i < master->num_chipselect; i++) { 658 ret = devm_gpio_request(&pdev->dev, 659 master->cs_gpios[i], 660 dev_name(&pdev->dev)); 661 if (ret) { 662 dev_err(&pdev->dev, 663 "can't get CS GPIO %i\n", i); 664 goto err_disable_runtime_pm; 665 } 666 } 667 } 668 } 669 670 return 0; 671 672 err_disable_runtime_pm: 673 pm_runtime_disable(&pdev->dev); 674 err_put_master: 675 spi_master_put(master); 676 677 return ret; 678 } 679 680 static int mtk_spi_remove(struct platform_device *pdev) 681 { 682 struct spi_master *master = platform_get_drvdata(pdev); 683 struct mtk_spi *mdata = spi_master_get_devdata(master); 684 685 pm_runtime_disable(&pdev->dev); 686 687 mtk_spi_reset(mdata); 688 spi_master_put(master); 689 690 return 0; 691 } 692 693 #ifdef CONFIG_PM_SLEEP 694 static int mtk_spi_suspend(struct device *dev) 695 { 696 int ret; 697 struct spi_master *master = dev_get_drvdata(dev); 698 struct mtk_spi *mdata = spi_master_get_devdata(master); 699 700 ret = spi_master_suspend(master); 701 if (ret) 702 return ret; 703 704 if (!pm_runtime_suspended(dev)) 705 clk_disable_unprepare(mdata->spi_clk); 706 707 return ret; 708 } 709 710 static int mtk_spi_resume(struct device *dev) 711 { 712 int ret; 713 struct spi_master *master = dev_get_drvdata(dev); 714 struct mtk_spi *mdata = spi_master_get_devdata(master); 715 716 if (!pm_runtime_suspended(dev)) { 717 ret = clk_prepare_enable(mdata->spi_clk); 718 if (ret < 0) { 719 dev_err(dev, "failed to enable spi_clk (%d)\n", ret); 720 return ret; 721 } 722 } 723 724 ret = spi_master_resume(master); 725 if (ret < 0) 726 clk_disable_unprepare(mdata->spi_clk); 727 728 return ret; 729 } 730 #endif /* CONFIG_PM_SLEEP */ 731 732 #ifdef CONFIG_PM 733 static int mtk_spi_runtime_suspend(struct device *dev) 734 { 735 struct spi_master *master = dev_get_drvdata(dev); 736 struct mtk_spi *mdata = spi_master_get_devdata(master); 737 738 clk_disable_unprepare(mdata->spi_clk); 739 740 return 0; 741 } 742 743 static int mtk_spi_runtime_resume(struct device *dev) 744 { 745 struct spi_master *master = dev_get_drvdata(dev); 746 struct mtk_spi *mdata = spi_master_get_devdata(master); 747 int ret; 748 749 ret = clk_prepare_enable(mdata->spi_clk); 750 if (ret < 0) { 751 dev_err(dev, "failed to enable spi_clk (%d)\n", ret); 752 return ret; 753 } 754 755 return 0; 756 } 757 #endif /* CONFIG_PM */ 758 759 static const struct dev_pm_ops mtk_spi_pm = { 760 SET_SYSTEM_SLEEP_PM_OPS(mtk_spi_suspend, mtk_spi_resume) 761 SET_RUNTIME_PM_OPS(mtk_spi_runtime_suspend, 762 mtk_spi_runtime_resume, NULL) 763 }; 764 765 static struct platform_driver mtk_spi_driver = { 766 .driver = { 767 .name = "mtk-spi", 768 .pm = &mtk_spi_pm, 769 .of_match_table = mtk_spi_of_match, 770 }, 771 .probe = mtk_spi_probe, 772 .remove = mtk_spi_remove, 773 }; 774 775 module_platform_driver(mtk_spi_driver); 776 777 MODULE_DESCRIPTION("MTK SPI Controller driver"); 778 MODULE_AUTHOR("Leilk Liu <leilk.liu@mediatek.com>"); 779 MODULE_LICENSE("GPL v2"); 780 MODULE_ALIAS("platform:mtk-spi"); 781