1 /* 2 * Copyright (c) 2015 MediaTek Inc. 3 * Author: Leilk Liu <leilk.liu@mediatek.com> 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License version 2 as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 */ 14 15 #include <linux/clk.h> 16 #include <linux/device.h> 17 #include <linux/err.h> 18 #include <linux/interrupt.h> 19 #include <linux/io.h> 20 #include <linux/ioport.h> 21 #include <linux/module.h> 22 #include <linux/of.h> 23 #include <linux/platform_device.h> 24 #include <linux/platform_data/spi-mt65xx.h> 25 #include <linux/pm_runtime.h> 26 #include <linux/spi/spi.h> 27 28 #define SPI_CFG0_REG 0x0000 29 #define SPI_CFG1_REG 0x0004 30 #define SPI_TX_SRC_REG 0x0008 31 #define SPI_RX_DST_REG 0x000c 32 #define SPI_TX_DATA_REG 0x0010 33 #define SPI_RX_DATA_REG 0x0014 34 #define SPI_CMD_REG 0x0018 35 #define SPI_STATUS0_REG 0x001c 36 #define SPI_PAD_SEL_REG 0x0024 37 38 #define SPI_CFG0_SCK_HIGH_OFFSET 0 39 #define SPI_CFG0_SCK_LOW_OFFSET 8 40 #define SPI_CFG0_CS_HOLD_OFFSET 16 41 #define SPI_CFG0_CS_SETUP_OFFSET 24 42 43 #define SPI_CFG1_CS_IDLE_OFFSET 0 44 #define SPI_CFG1_PACKET_LOOP_OFFSET 8 45 #define SPI_CFG1_PACKET_LENGTH_OFFSET 16 46 #define SPI_CFG1_GET_TICK_DLY_OFFSET 30 47 48 #define SPI_CFG1_CS_IDLE_MASK 0xff 49 #define SPI_CFG1_PACKET_LOOP_MASK 0xff00 50 #define SPI_CFG1_PACKET_LENGTH_MASK 0x3ff0000 51 52 #define SPI_CMD_ACT BIT(0) 53 #define SPI_CMD_RESUME BIT(1) 54 #define SPI_CMD_RST BIT(2) 55 #define SPI_CMD_PAUSE_EN BIT(4) 56 #define SPI_CMD_DEASSERT BIT(5) 57 #define SPI_CMD_CPHA BIT(8) 58 #define SPI_CMD_CPOL BIT(9) 59 #define SPI_CMD_RX_DMA BIT(10) 60 #define SPI_CMD_TX_DMA BIT(11) 61 #define SPI_CMD_TXMSBF BIT(12) 62 #define SPI_CMD_RXMSBF BIT(13) 63 #define SPI_CMD_RX_ENDIAN BIT(14) 64 #define SPI_CMD_TX_ENDIAN BIT(15) 65 #define SPI_CMD_FINISH_IE BIT(16) 66 #define SPI_CMD_PAUSE_IE BIT(17) 67 68 #define MT8173_SPI_MAX_PAD_SEL 3 69 70 #define MTK_SPI_PAUSE_INT_STATUS 0x2 71 72 #define MTK_SPI_IDLE 0 73 #define MTK_SPI_PAUSED 1 74 75 #define MTK_SPI_MAX_FIFO_SIZE 32 76 #define MTK_SPI_PACKET_SIZE 1024 77 78 struct mtk_spi_compatible { 79 bool need_pad_sel; 80 /* Must explicitly send dummy Tx bytes to do Rx only transfer */ 81 bool must_tx; 82 }; 83 84 struct mtk_spi { 85 void __iomem *base; 86 u32 state; 87 u32 pad_sel; 88 struct clk *parent_clk, *sel_clk, *spi_clk; 89 struct spi_transfer *cur_transfer; 90 u32 xfer_len; 91 struct scatterlist *tx_sgl, *rx_sgl; 92 u32 tx_sgl_len, rx_sgl_len; 93 const struct mtk_spi_compatible *dev_comp; 94 }; 95 96 static const struct mtk_spi_compatible mt6589_compat; 97 static const struct mtk_spi_compatible mt8135_compat; 98 static const struct mtk_spi_compatible mt8173_compat = { 99 .need_pad_sel = true, 100 .must_tx = true, 101 }; 102 103 /* 104 * A piece of default chip info unless the platform 105 * supplies it. 106 */ 107 static const struct mtk_chip_config mtk_default_chip_info = { 108 .rx_mlsb = 1, 109 .tx_mlsb = 1, 110 }; 111 112 static const struct of_device_id mtk_spi_of_match[] = { 113 { .compatible = "mediatek,mt6589-spi", .data = (void *)&mt6589_compat }, 114 { .compatible = "mediatek,mt8135-spi", .data = (void *)&mt8135_compat }, 115 { .compatible = "mediatek,mt8173-spi", .data = (void *)&mt8173_compat }, 116 {} 117 }; 118 MODULE_DEVICE_TABLE(of, mtk_spi_of_match); 119 120 static void mtk_spi_reset(struct mtk_spi *mdata) 121 { 122 u32 reg_val; 123 124 /* set the software reset bit in SPI_CMD_REG. */ 125 reg_val = readl(mdata->base + SPI_CMD_REG); 126 reg_val |= SPI_CMD_RST; 127 writel(reg_val, mdata->base + SPI_CMD_REG); 128 129 reg_val = readl(mdata->base + SPI_CMD_REG); 130 reg_val &= ~SPI_CMD_RST; 131 writel(reg_val, mdata->base + SPI_CMD_REG); 132 } 133 134 static void mtk_spi_config(struct mtk_spi *mdata, 135 struct mtk_chip_config *chip_config) 136 { 137 u32 reg_val; 138 139 reg_val = readl(mdata->base + SPI_CMD_REG); 140 141 /* set the mlsbx and mlsbtx */ 142 if (chip_config->tx_mlsb) 143 reg_val |= SPI_CMD_TXMSBF; 144 else 145 reg_val &= ~SPI_CMD_TXMSBF; 146 if (chip_config->rx_mlsb) 147 reg_val |= SPI_CMD_RXMSBF; 148 else 149 reg_val &= ~SPI_CMD_RXMSBF; 150 151 /* set the tx/rx endian */ 152 #ifdef __LITTLE_ENDIAN 153 reg_val &= ~SPI_CMD_TX_ENDIAN; 154 reg_val &= ~SPI_CMD_RX_ENDIAN; 155 #else 156 reg_val |= SPI_CMD_TX_ENDIAN; 157 reg_val |= SPI_CMD_RX_ENDIAN; 158 #endif 159 160 /* set finish and pause interrupt always enable */ 161 reg_val |= SPI_CMD_FINISH_IE | SPI_CMD_PAUSE_IE; 162 163 /* disable dma mode */ 164 reg_val &= ~(SPI_CMD_TX_DMA | SPI_CMD_RX_DMA); 165 166 /* disable deassert mode */ 167 reg_val &= ~SPI_CMD_DEASSERT; 168 169 writel(reg_val, mdata->base + SPI_CMD_REG); 170 171 /* pad select */ 172 if (mdata->dev_comp->need_pad_sel) 173 writel(mdata->pad_sel, mdata->base + SPI_PAD_SEL_REG); 174 } 175 176 static int mtk_spi_prepare_message(struct spi_master *master, 177 struct spi_message *msg) 178 { 179 u32 reg_val; 180 u8 cpha, cpol; 181 struct mtk_chip_config *chip_config; 182 struct spi_device *spi = msg->spi; 183 struct mtk_spi *mdata = spi_master_get_devdata(master); 184 185 cpha = spi->mode & SPI_CPHA ? 1 : 0; 186 cpol = spi->mode & SPI_CPOL ? 1 : 0; 187 188 reg_val = readl(mdata->base + SPI_CMD_REG); 189 if (cpha) 190 reg_val |= SPI_CMD_CPHA; 191 else 192 reg_val &= ~SPI_CMD_CPHA; 193 if (cpol) 194 reg_val |= SPI_CMD_CPOL; 195 else 196 reg_val &= ~SPI_CMD_CPOL; 197 writel(reg_val, mdata->base + SPI_CMD_REG); 198 199 chip_config = spi->controller_data; 200 if (!chip_config) { 201 chip_config = (void *)&mtk_default_chip_info; 202 spi->controller_data = chip_config; 203 } 204 mtk_spi_config(mdata, chip_config); 205 206 return 0; 207 } 208 209 static void mtk_spi_set_cs(struct spi_device *spi, bool enable) 210 { 211 u32 reg_val; 212 struct mtk_spi *mdata = spi_master_get_devdata(spi->master); 213 214 reg_val = readl(mdata->base + SPI_CMD_REG); 215 if (!enable) { 216 reg_val |= SPI_CMD_PAUSE_EN; 217 writel(reg_val, mdata->base + SPI_CMD_REG); 218 } else { 219 reg_val &= ~SPI_CMD_PAUSE_EN; 220 writel(reg_val, mdata->base + SPI_CMD_REG); 221 mdata->state = MTK_SPI_IDLE; 222 mtk_spi_reset(mdata); 223 } 224 } 225 226 static void mtk_spi_prepare_transfer(struct spi_master *master, 227 struct spi_transfer *xfer) 228 { 229 u32 spi_clk_hz, div, sck_time, cs_time, reg_val = 0; 230 struct mtk_spi *mdata = spi_master_get_devdata(master); 231 232 spi_clk_hz = clk_get_rate(mdata->spi_clk); 233 if (xfer->speed_hz < spi_clk_hz / 2) 234 div = DIV_ROUND_UP(spi_clk_hz, xfer->speed_hz); 235 else 236 div = 1; 237 238 sck_time = (div + 1) / 2; 239 cs_time = sck_time * 2; 240 241 reg_val |= (((sck_time - 1) & 0xff) << SPI_CFG0_SCK_HIGH_OFFSET); 242 reg_val |= (((sck_time - 1) & 0xff) << SPI_CFG0_SCK_LOW_OFFSET); 243 reg_val |= (((cs_time - 1) & 0xff) << SPI_CFG0_CS_HOLD_OFFSET); 244 reg_val |= (((cs_time - 1) & 0xff) << SPI_CFG0_CS_SETUP_OFFSET); 245 writel(reg_val, mdata->base + SPI_CFG0_REG); 246 247 reg_val = readl(mdata->base + SPI_CFG1_REG); 248 reg_val &= ~SPI_CFG1_CS_IDLE_MASK; 249 reg_val |= (((cs_time - 1) & 0xff) << SPI_CFG1_CS_IDLE_OFFSET); 250 writel(reg_val, mdata->base + SPI_CFG1_REG); 251 } 252 253 static void mtk_spi_setup_packet(struct spi_master *master) 254 { 255 u32 packet_size, packet_loop, reg_val; 256 struct mtk_spi *mdata = spi_master_get_devdata(master); 257 258 packet_size = min_t(u32, mdata->xfer_len, MTK_SPI_PACKET_SIZE); 259 packet_loop = mdata->xfer_len / packet_size; 260 261 reg_val = readl(mdata->base + SPI_CFG1_REG); 262 reg_val &= ~(SPI_CFG1_PACKET_LENGTH_MASK | SPI_CFG1_PACKET_LOOP_MASK); 263 reg_val |= (packet_size - 1) << SPI_CFG1_PACKET_LENGTH_OFFSET; 264 reg_val |= (packet_loop - 1) << SPI_CFG1_PACKET_LOOP_OFFSET; 265 writel(reg_val, mdata->base + SPI_CFG1_REG); 266 } 267 268 static void mtk_spi_enable_transfer(struct spi_master *master) 269 { 270 u32 cmd; 271 struct mtk_spi *mdata = spi_master_get_devdata(master); 272 273 cmd = readl(mdata->base + SPI_CMD_REG); 274 if (mdata->state == MTK_SPI_IDLE) 275 cmd |= SPI_CMD_ACT; 276 else 277 cmd |= SPI_CMD_RESUME; 278 writel(cmd, mdata->base + SPI_CMD_REG); 279 } 280 281 static int mtk_spi_get_mult_delta(u32 xfer_len) 282 { 283 u32 mult_delta; 284 285 if (xfer_len > MTK_SPI_PACKET_SIZE) 286 mult_delta = xfer_len % MTK_SPI_PACKET_SIZE; 287 else 288 mult_delta = 0; 289 290 return mult_delta; 291 } 292 293 static void mtk_spi_update_mdata_len(struct spi_master *master) 294 { 295 int mult_delta; 296 struct mtk_spi *mdata = spi_master_get_devdata(master); 297 298 if (mdata->tx_sgl_len && mdata->rx_sgl_len) { 299 if (mdata->tx_sgl_len > mdata->rx_sgl_len) { 300 mult_delta = mtk_spi_get_mult_delta(mdata->rx_sgl_len); 301 mdata->xfer_len = mdata->rx_sgl_len - mult_delta; 302 mdata->rx_sgl_len = mult_delta; 303 mdata->tx_sgl_len -= mdata->xfer_len; 304 } else { 305 mult_delta = mtk_spi_get_mult_delta(mdata->tx_sgl_len); 306 mdata->xfer_len = mdata->tx_sgl_len - mult_delta; 307 mdata->tx_sgl_len = mult_delta; 308 mdata->rx_sgl_len -= mdata->xfer_len; 309 } 310 } else if (mdata->tx_sgl_len) { 311 mult_delta = mtk_spi_get_mult_delta(mdata->tx_sgl_len); 312 mdata->xfer_len = mdata->tx_sgl_len - mult_delta; 313 mdata->tx_sgl_len = mult_delta; 314 } else if (mdata->rx_sgl_len) { 315 mult_delta = mtk_spi_get_mult_delta(mdata->rx_sgl_len); 316 mdata->xfer_len = mdata->rx_sgl_len - mult_delta; 317 mdata->rx_sgl_len = mult_delta; 318 } 319 } 320 321 static void mtk_spi_setup_dma_addr(struct spi_master *master, 322 struct spi_transfer *xfer) 323 { 324 struct mtk_spi *mdata = spi_master_get_devdata(master); 325 326 if (mdata->tx_sgl) 327 writel(xfer->tx_dma, mdata->base + SPI_TX_SRC_REG); 328 if (mdata->rx_sgl) 329 writel(xfer->rx_dma, mdata->base + SPI_RX_DST_REG); 330 } 331 332 static int mtk_spi_fifo_transfer(struct spi_master *master, 333 struct spi_device *spi, 334 struct spi_transfer *xfer) 335 { 336 int cnt; 337 struct mtk_spi *mdata = spi_master_get_devdata(master); 338 339 mdata->cur_transfer = xfer; 340 mdata->xfer_len = xfer->len; 341 mtk_spi_prepare_transfer(master, xfer); 342 mtk_spi_setup_packet(master); 343 344 if (xfer->len % 4) 345 cnt = xfer->len / 4 + 1; 346 else 347 cnt = xfer->len / 4; 348 iowrite32_rep(mdata->base + SPI_TX_DATA_REG, xfer->tx_buf, cnt); 349 350 mtk_spi_enable_transfer(master); 351 352 return 1; 353 } 354 355 static int mtk_spi_dma_transfer(struct spi_master *master, 356 struct spi_device *spi, 357 struct spi_transfer *xfer) 358 { 359 int cmd; 360 struct mtk_spi *mdata = spi_master_get_devdata(master); 361 362 mdata->tx_sgl = NULL; 363 mdata->rx_sgl = NULL; 364 mdata->tx_sgl_len = 0; 365 mdata->rx_sgl_len = 0; 366 mdata->cur_transfer = xfer; 367 368 mtk_spi_prepare_transfer(master, xfer); 369 370 cmd = readl(mdata->base + SPI_CMD_REG); 371 if (xfer->tx_buf) 372 cmd |= SPI_CMD_TX_DMA; 373 if (xfer->rx_buf) 374 cmd |= SPI_CMD_RX_DMA; 375 writel(cmd, mdata->base + SPI_CMD_REG); 376 377 if (xfer->tx_buf) 378 mdata->tx_sgl = xfer->tx_sg.sgl; 379 if (xfer->rx_buf) 380 mdata->rx_sgl = xfer->rx_sg.sgl; 381 382 if (mdata->tx_sgl) { 383 xfer->tx_dma = sg_dma_address(mdata->tx_sgl); 384 mdata->tx_sgl_len = sg_dma_len(mdata->tx_sgl); 385 } 386 if (mdata->rx_sgl) { 387 xfer->rx_dma = sg_dma_address(mdata->rx_sgl); 388 mdata->rx_sgl_len = sg_dma_len(mdata->rx_sgl); 389 } 390 391 mtk_spi_update_mdata_len(master); 392 mtk_spi_setup_packet(master); 393 mtk_spi_setup_dma_addr(master, xfer); 394 mtk_spi_enable_transfer(master); 395 396 return 1; 397 } 398 399 static int mtk_spi_transfer_one(struct spi_master *master, 400 struct spi_device *spi, 401 struct spi_transfer *xfer) 402 { 403 if (master->can_dma(master, spi, xfer)) 404 return mtk_spi_dma_transfer(master, spi, xfer); 405 else 406 return mtk_spi_fifo_transfer(master, spi, xfer); 407 } 408 409 static bool mtk_spi_can_dma(struct spi_master *master, 410 struct spi_device *spi, 411 struct spi_transfer *xfer) 412 { 413 return xfer->len > MTK_SPI_MAX_FIFO_SIZE; 414 } 415 416 static irqreturn_t mtk_spi_interrupt(int irq, void *dev_id) 417 { 418 u32 cmd, reg_val, cnt; 419 struct spi_master *master = dev_id; 420 struct mtk_spi *mdata = spi_master_get_devdata(master); 421 struct spi_transfer *trans = mdata->cur_transfer; 422 423 reg_val = readl(mdata->base + SPI_STATUS0_REG); 424 if (reg_val & MTK_SPI_PAUSE_INT_STATUS) 425 mdata->state = MTK_SPI_PAUSED; 426 else 427 mdata->state = MTK_SPI_IDLE; 428 429 if (!master->can_dma(master, master->cur_msg->spi, trans)) { 430 if (trans->rx_buf) { 431 if (mdata->xfer_len % 4) 432 cnt = mdata->xfer_len / 4 + 1; 433 else 434 cnt = mdata->xfer_len / 4; 435 ioread32_rep(mdata->base + SPI_RX_DATA_REG, 436 trans->rx_buf, cnt); 437 } 438 spi_finalize_current_transfer(master); 439 return IRQ_HANDLED; 440 } 441 442 if (mdata->tx_sgl) 443 trans->tx_dma += mdata->xfer_len; 444 if (mdata->rx_sgl) 445 trans->rx_dma += mdata->xfer_len; 446 447 if (mdata->tx_sgl && (mdata->tx_sgl_len == 0)) { 448 mdata->tx_sgl = sg_next(mdata->tx_sgl); 449 if (mdata->tx_sgl) { 450 trans->tx_dma = sg_dma_address(mdata->tx_sgl); 451 mdata->tx_sgl_len = sg_dma_len(mdata->tx_sgl); 452 } 453 } 454 if (mdata->rx_sgl && (mdata->rx_sgl_len == 0)) { 455 mdata->rx_sgl = sg_next(mdata->rx_sgl); 456 if (mdata->rx_sgl) { 457 trans->rx_dma = sg_dma_address(mdata->rx_sgl); 458 mdata->rx_sgl_len = sg_dma_len(mdata->rx_sgl); 459 } 460 } 461 462 if (!mdata->tx_sgl && !mdata->rx_sgl) { 463 /* spi disable dma */ 464 cmd = readl(mdata->base + SPI_CMD_REG); 465 cmd &= ~SPI_CMD_TX_DMA; 466 cmd &= ~SPI_CMD_RX_DMA; 467 writel(cmd, mdata->base + SPI_CMD_REG); 468 469 spi_finalize_current_transfer(master); 470 return IRQ_HANDLED; 471 } 472 473 mtk_spi_update_mdata_len(master); 474 mtk_spi_setup_packet(master); 475 mtk_spi_setup_dma_addr(master, trans); 476 mtk_spi_enable_transfer(master); 477 478 return IRQ_HANDLED; 479 } 480 481 static int mtk_spi_probe(struct platform_device *pdev) 482 { 483 struct spi_master *master; 484 struct mtk_spi *mdata; 485 const struct of_device_id *of_id; 486 struct resource *res; 487 int irq, ret; 488 489 master = spi_alloc_master(&pdev->dev, sizeof(*mdata)); 490 if (!master) { 491 dev_err(&pdev->dev, "failed to alloc spi master\n"); 492 return -ENOMEM; 493 } 494 495 master->auto_runtime_pm = true; 496 master->dev.of_node = pdev->dev.of_node; 497 master->mode_bits = SPI_CPOL | SPI_CPHA; 498 499 master->set_cs = mtk_spi_set_cs; 500 master->prepare_message = mtk_spi_prepare_message; 501 master->transfer_one = mtk_spi_transfer_one; 502 master->can_dma = mtk_spi_can_dma; 503 504 of_id = of_match_node(mtk_spi_of_match, pdev->dev.of_node); 505 if (!of_id) { 506 dev_err(&pdev->dev, "failed to probe of_node\n"); 507 ret = -EINVAL; 508 goto err_put_master; 509 } 510 511 mdata = spi_master_get_devdata(master); 512 mdata->dev_comp = of_id->data; 513 if (mdata->dev_comp->must_tx) 514 master->flags = SPI_MASTER_MUST_TX; 515 516 if (mdata->dev_comp->need_pad_sel) { 517 ret = of_property_read_u32(pdev->dev.of_node, 518 "mediatek,pad-select", 519 &mdata->pad_sel); 520 if (ret) { 521 dev_err(&pdev->dev, "failed to read pad select: %d\n", 522 ret); 523 goto err_put_master; 524 } 525 526 if (mdata->pad_sel > MT8173_SPI_MAX_PAD_SEL) { 527 dev_err(&pdev->dev, "wrong pad-select: %u\n", 528 mdata->pad_sel); 529 ret = -EINVAL; 530 goto err_put_master; 531 } 532 } 533 534 platform_set_drvdata(pdev, master); 535 536 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 537 if (!res) { 538 ret = -ENODEV; 539 dev_err(&pdev->dev, "failed to determine base address\n"); 540 goto err_put_master; 541 } 542 543 mdata->base = devm_ioremap_resource(&pdev->dev, res); 544 if (IS_ERR(mdata->base)) { 545 ret = PTR_ERR(mdata->base); 546 goto err_put_master; 547 } 548 549 irq = platform_get_irq(pdev, 0); 550 if (irq < 0) { 551 dev_err(&pdev->dev, "failed to get irq (%d)\n", irq); 552 ret = irq; 553 goto err_put_master; 554 } 555 556 if (!pdev->dev.dma_mask) 557 pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; 558 559 ret = devm_request_irq(&pdev->dev, irq, mtk_spi_interrupt, 560 IRQF_TRIGGER_NONE, dev_name(&pdev->dev), master); 561 if (ret) { 562 dev_err(&pdev->dev, "failed to register irq (%d)\n", ret); 563 goto err_put_master; 564 } 565 566 mdata->parent_clk = devm_clk_get(&pdev->dev, "parent-clk"); 567 if (IS_ERR(mdata->parent_clk)) { 568 ret = PTR_ERR(mdata->parent_clk); 569 dev_err(&pdev->dev, "failed to get parent-clk: %d\n", ret); 570 goto err_put_master; 571 } 572 573 mdata->sel_clk = devm_clk_get(&pdev->dev, "sel-clk"); 574 if (IS_ERR(mdata->sel_clk)) { 575 ret = PTR_ERR(mdata->sel_clk); 576 dev_err(&pdev->dev, "failed to get sel-clk: %d\n", ret); 577 goto err_put_master; 578 } 579 580 mdata->spi_clk = devm_clk_get(&pdev->dev, "spi-clk"); 581 if (IS_ERR(mdata->spi_clk)) { 582 ret = PTR_ERR(mdata->spi_clk); 583 dev_err(&pdev->dev, "failed to get spi-clk: %d\n", ret); 584 goto err_put_master; 585 } 586 587 ret = clk_prepare_enable(mdata->spi_clk); 588 if (ret < 0) { 589 dev_err(&pdev->dev, "failed to enable spi_clk (%d)\n", ret); 590 goto err_put_master; 591 } 592 593 ret = clk_set_parent(mdata->sel_clk, mdata->parent_clk); 594 if (ret < 0) { 595 dev_err(&pdev->dev, "failed to clk_set_parent (%d)\n", ret); 596 goto err_disable_clk; 597 } 598 599 clk_disable_unprepare(mdata->spi_clk); 600 601 pm_runtime_enable(&pdev->dev); 602 603 ret = devm_spi_register_master(&pdev->dev, master); 604 if (ret) { 605 dev_err(&pdev->dev, "failed to register master (%d)\n", ret); 606 goto err_put_master; 607 } 608 609 return 0; 610 611 err_disable_clk: 612 clk_disable_unprepare(mdata->spi_clk); 613 err_put_master: 614 spi_master_put(master); 615 616 return ret; 617 } 618 619 static int mtk_spi_remove(struct platform_device *pdev) 620 { 621 struct spi_master *master = platform_get_drvdata(pdev); 622 struct mtk_spi *mdata = spi_master_get_devdata(master); 623 624 pm_runtime_disable(&pdev->dev); 625 626 mtk_spi_reset(mdata); 627 spi_master_put(master); 628 629 return 0; 630 } 631 632 #ifdef CONFIG_PM_SLEEP 633 static int mtk_spi_suspend(struct device *dev) 634 { 635 int ret; 636 struct spi_master *master = dev_get_drvdata(dev); 637 struct mtk_spi *mdata = spi_master_get_devdata(master); 638 639 ret = spi_master_suspend(master); 640 if (ret) 641 return ret; 642 643 if (!pm_runtime_suspended(dev)) 644 clk_disable_unprepare(mdata->spi_clk); 645 646 return ret; 647 } 648 649 static int mtk_spi_resume(struct device *dev) 650 { 651 int ret; 652 struct spi_master *master = dev_get_drvdata(dev); 653 struct mtk_spi *mdata = spi_master_get_devdata(master); 654 655 if (!pm_runtime_suspended(dev)) { 656 ret = clk_prepare_enable(mdata->spi_clk); 657 if (ret < 0) { 658 dev_err(dev, "failed to enable spi_clk (%d)\n", ret); 659 return ret; 660 } 661 } 662 663 ret = spi_master_resume(master); 664 if (ret < 0) 665 clk_disable_unprepare(mdata->spi_clk); 666 667 return ret; 668 } 669 #endif /* CONFIG_PM_SLEEP */ 670 671 #ifdef CONFIG_PM 672 static int mtk_spi_runtime_suspend(struct device *dev) 673 { 674 struct spi_master *master = dev_get_drvdata(dev); 675 struct mtk_spi *mdata = spi_master_get_devdata(master); 676 677 clk_disable_unprepare(mdata->spi_clk); 678 679 return 0; 680 } 681 682 static int mtk_spi_runtime_resume(struct device *dev) 683 { 684 struct spi_master *master = dev_get_drvdata(dev); 685 struct mtk_spi *mdata = spi_master_get_devdata(master); 686 int ret; 687 688 ret = clk_prepare_enable(mdata->spi_clk); 689 if (ret < 0) { 690 dev_err(dev, "failed to enable spi_clk (%d)\n", ret); 691 return ret; 692 } 693 694 return 0; 695 } 696 #endif /* CONFIG_PM */ 697 698 static const struct dev_pm_ops mtk_spi_pm = { 699 SET_SYSTEM_SLEEP_PM_OPS(mtk_spi_suspend, mtk_spi_resume) 700 SET_RUNTIME_PM_OPS(mtk_spi_runtime_suspend, 701 mtk_spi_runtime_resume, NULL) 702 }; 703 704 static struct platform_driver mtk_spi_driver = { 705 .driver = { 706 .name = "mtk-spi", 707 .pm = &mtk_spi_pm, 708 .of_match_table = mtk_spi_of_match, 709 }, 710 .probe = mtk_spi_probe, 711 .remove = mtk_spi_remove, 712 }; 713 714 module_platform_driver(mtk_spi_driver); 715 716 MODULE_DESCRIPTION("MTK SPI Controller driver"); 717 MODULE_AUTHOR("Leilk Liu <leilk.liu@mediatek.com>"); 718 MODULE_LICENSE("GPL v2"); 719 MODULE_ALIAS("platform:mtk-spi"); 720