1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2015 MediaTek Inc. 4 * Author: Leilk Liu <leilk.liu@mediatek.com> 5 */ 6 7 #include <linux/clk.h> 8 #include <linux/device.h> 9 #include <linux/err.h> 10 #include <linux/interrupt.h> 11 #include <linux/io.h> 12 #include <linux/ioport.h> 13 #include <linux/module.h> 14 #include <linux/of.h> 15 #include <linux/gpio/consumer.h> 16 #include <linux/pinctrl/consumer.h> 17 #include <linux/platform_device.h> 18 #include <linux/platform_data/spi-mt65xx.h> 19 #include <linux/pm_runtime.h> 20 #include <linux/spi/spi.h> 21 #include <linux/spi/spi-mem.h> 22 #include <linux/dma-mapping.h> 23 #include <linux/pm_qos.h> 24 25 #define SPI_CFG0_REG 0x0000 26 #define SPI_CFG1_REG 0x0004 27 #define SPI_TX_SRC_REG 0x0008 28 #define SPI_RX_DST_REG 0x000c 29 #define SPI_TX_DATA_REG 0x0010 30 #define SPI_RX_DATA_REG 0x0014 31 #define SPI_CMD_REG 0x0018 32 #define SPI_STATUS0_REG 0x001c 33 #define SPI_PAD_SEL_REG 0x0024 34 #define SPI_CFG2_REG 0x0028 35 #define SPI_TX_SRC_REG_64 0x002c 36 #define SPI_RX_DST_REG_64 0x0030 37 #define SPI_CFG3_IPM_REG 0x0040 38 39 #define SPI_CFG0_SCK_HIGH_OFFSET 0 40 #define SPI_CFG0_SCK_LOW_OFFSET 8 41 #define SPI_CFG0_CS_HOLD_OFFSET 16 42 #define SPI_CFG0_CS_SETUP_OFFSET 24 43 #define SPI_ADJUST_CFG0_CS_HOLD_OFFSET 0 44 #define SPI_ADJUST_CFG0_CS_SETUP_OFFSET 16 45 46 #define SPI_CFG1_CS_IDLE_OFFSET 0 47 #define SPI_CFG1_PACKET_LOOP_OFFSET 8 48 #define SPI_CFG1_PACKET_LENGTH_OFFSET 16 49 #define SPI_CFG1_GET_TICK_DLY_OFFSET 29 50 #define SPI_CFG1_GET_TICK_DLY_OFFSET_V1 30 51 52 #define SPI_CFG1_GET_TICK_DLY_MASK 0xe0000000 53 #define SPI_CFG1_GET_TICK_DLY_MASK_V1 0xc0000000 54 55 #define SPI_CFG1_CS_IDLE_MASK 0xff 56 #define SPI_CFG1_PACKET_LOOP_MASK 0xff00 57 #define SPI_CFG1_PACKET_LENGTH_MASK 0x3ff0000 58 #define SPI_CFG1_IPM_PACKET_LENGTH_MASK GENMASK(31, 16) 59 #define SPI_CFG2_SCK_HIGH_OFFSET 0 60 #define SPI_CFG2_SCK_LOW_OFFSET 16 61 62 #define SPI_CMD_ACT BIT(0) 63 #define SPI_CMD_RESUME BIT(1) 64 #define SPI_CMD_RST BIT(2) 65 #define SPI_CMD_PAUSE_EN BIT(4) 66 #define SPI_CMD_DEASSERT BIT(5) 67 #define SPI_CMD_SAMPLE_SEL BIT(6) 68 #define SPI_CMD_CS_POL BIT(7) 69 #define SPI_CMD_CPHA BIT(8) 70 #define SPI_CMD_CPOL BIT(9) 71 #define SPI_CMD_RX_DMA BIT(10) 72 #define SPI_CMD_TX_DMA BIT(11) 73 #define SPI_CMD_TXMSBF BIT(12) 74 #define SPI_CMD_RXMSBF BIT(13) 75 #define SPI_CMD_RX_ENDIAN BIT(14) 76 #define SPI_CMD_TX_ENDIAN BIT(15) 77 #define SPI_CMD_FINISH_IE BIT(16) 78 #define SPI_CMD_PAUSE_IE BIT(17) 79 #define SPI_CMD_IPM_NONIDLE_MODE BIT(19) 80 #define SPI_CMD_IPM_SPIM_LOOP BIT(21) 81 #define SPI_CMD_IPM_GET_TICKDLY_OFFSET 22 82 83 #define SPI_CMD_IPM_GET_TICKDLY_MASK GENMASK(24, 22) 84 85 #define PIN_MODE_CFG(x) ((x) / 2) 86 87 #define SPI_CFG3_IPM_HALF_DUPLEX_DIR BIT(2) 88 #define SPI_CFG3_IPM_HALF_DUPLEX_EN BIT(3) 89 #define SPI_CFG3_IPM_XMODE_EN BIT(4) 90 #define SPI_CFG3_IPM_NODATA_FLAG BIT(5) 91 #define SPI_CFG3_IPM_CMD_BYTELEN_OFFSET 8 92 #define SPI_CFG3_IPM_ADDR_BYTELEN_OFFSET 12 93 94 #define SPI_CFG3_IPM_CMD_PIN_MODE_MASK GENMASK(1, 0) 95 #define SPI_CFG3_IPM_CMD_BYTELEN_MASK GENMASK(11, 8) 96 #define SPI_CFG3_IPM_ADDR_BYTELEN_MASK GENMASK(15, 12) 97 98 #define MT8173_SPI_MAX_PAD_SEL 3 99 100 #define MTK_SPI_PAUSE_INT_STATUS 0x2 101 102 #define MTK_SPI_MAX_FIFO_SIZE 32U 103 #define MTK_SPI_PACKET_SIZE 1024 104 #define MTK_SPI_IPM_PACKET_SIZE SZ_64K 105 #define MTK_SPI_IPM_PACKET_LOOP SZ_256 106 107 #define MTK_SPI_IDLE 0 108 #define MTK_SPI_PAUSED 1 109 110 #define MTK_SPI_32BITS_MASK (0xffffffff) 111 112 #define DMA_ADDR_EXT_BITS (36) 113 #define DMA_ADDR_DEF_BITS (32) 114 115 /** 116 * struct mtk_spi_compatible - device data structure 117 * @need_pad_sel: Enable pad (pins) selection in SPI controller 118 * @must_tx: Must explicitly send dummy TX bytes to do RX only transfer 119 * @enhance_timing: Enable adjusting cfg register to enhance time accuracy 120 * @dma_ext: DMA address extension supported 121 * @no_need_unprepare: Don't unprepare the SPI clk during runtime 122 * @ipm_design: Adjust/extend registers to support IPM design IP features 123 */ 124 struct mtk_spi_compatible { 125 bool need_pad_sel; 126 bool must_tx; 127 bool enhance_timing; 128 bool dma_ext; 129 bool no_need_unprepare; 130 bool ipm_design; 131 }; 132 133 /** 134 * struct mtk_spi - SPI driver instance 135 * @base: Start address of the SPI controller registers 136 * @state: SPI controller state 137 * @pad_num: Number of pad_sel entries 138 * @pad_sel: Groups of pins to select 139 * @parent_clk: Parent of sel_clk 140 * @sel_clk: SPI host mux clock 141 * @spi_clk: Peripheral clock 142 * @spi_hclk: AHB bus clock 143 * @cur_transfer: Currently processed SPI transfer 144 * @xfer_len: Number of bytes to transfer 145 * @num_xfered: Number of transferred bytes 146 * @tx_sgl: TX transfer scatterlist 147 * @rx_sgl: RX transfer scatterlist 148 * @tx_sgl_len: Size of TX DMA transfer 149 * @rx_sgl_len: Size of RX DMA transfer 150 * @dev_comp: Device data structure 151 * @qos_request: QoS request 152 * @spi_clk_hz: Current SPI clock in Hz 153 * @spimem_done: SPI-MEM operation completion 154 * @use_spimem: Enables SPI-MEM 155 * @dev: Device pointer 156 * @tx_dma: DMA start for SPI-MEM TX 157 * @rx_dma: DMA start for SPI-MEM RX 158 */ 159 struct mtk_spi { 160 void __iomem *base; 161 u32 state; 162 int pad_num; 163 u32 *pad_sel; 164 struct clk *parent_clk, *sel_clk, *spi_clk, *spi_hclk; 165 struct spi_transfer *cur_transfer; 166 u32 xfer_len; 167 u32 num_xfered; 168 struct scatterlist *tx_sgl, *rx_sgl; 169 u32 tx_sgl_len, rx_sgl_len; 170 const struct mtk_spi_compatible *dev_comp; 171 struct pm_qos_request qos_request; 172 u32 spi_clk_hz; 173 struct completion spimem_done; 174 bool use_spimem; 175 struct device *dev; 176 dma_addr_t tx_dma; 177 dma_addr_t rx_dma; 178 }; 179 180 static const struct mtk_spi_compatible mtk_common_compat; 181 182 static const struct mtk_spi_compatible mt2712_compat = { 183 .must_tx = true, 184 }; 185 186 static const struct mtk_spi_compatible mtk_ipm_compat = { 187 .enhance_timing = true, 188 .dma_ext = true, 189 .ipm_design = true, 190 }; 191 192 static const struct mtk_spi_compatible mt6765_compat = { 193 .need_pad_sel = true, 194 .must_tx = true, 195 .enhance_timing = true, 196 .dma_ext = true, 197 }; 198 199 static const struct mtk_spi_compatible mt7622_compat = { 200 .must_tx = true, 201 .enhance_timing = true, 202 }; 203 204 static const struct mtk_spi_compatible mt8173_compat = { 205 .need_pad_sel = true, 206 .must_tx = true, 207 }; 208 209 static const struct mtk_spi_compatible mt8183_compat = { 210 .need_pad_sel = true, 211 .must_tx = true, 212 .enhance_timing = true, 213 }; 214 215 static const struct mtk_spi_compatible mt6893_compat = { 216 .need_pad_sel = true, 217 .must_tx = true, 218 .enhance_timing = true, 219 .dma_ext = true, 220 .no_need_unprepare = true, 221 }; 222 223 static const struct mtk_spi_compatible mt6991_compat = { 224 .need_pad_sel = true, 225 .must_tx = true, 226 .enhance_timing = true, 227 .dma_ext = true, 228 .ipm_design = true, 229 }; 230 231 /* 232 * A piece of default chip info unless the platform 233 * supplies it. 234 */ 235 static const struct mtk_chip_config mtk_default_chip_info = { 236 .sample_sel = 0, 237 .tick_delay = 0, 238 }; 239 240 static const struct of_device_id mtk_spi_of_match[] = { 241 { .compatible = "mediatek,spi-ipm", 242 .data = (void *)&mtk_ipm_compat, 243 }, 244 { .compatible = "mediatek,mt2701-spi", 245 .data = (void *)&mtk_common_compat, 246 }, 247 { .compatible = "mediatek,mt2712-spi", 248 .data = (void *)&mt2712_compat, 249 }, 250 { .compatible = "mediatek,mt6589-spi", 251 .data = (void *)&mtk_common_compat, 252 }, 253 { .compatible = "mediatek,mt6765-spi", 254 .data = (void *)&mt6765_compat, 255 }, 256 { .compatible = "mediatek,mt6991-spi", 257 .data = (void *)&mt6991_compat, 258 }, 259 { .compatible = "mediatek,mt7622-spi", 260 .data = (void *)&mt7622_compat, 261 }, 262 { .compatible = "mediatek,mt7629-spi", 263 .data = (void *)&mt7622_compat, 264 }, 265 { .compatible = "mediatek,mt8135-spi", 266 .data = (void *)&mtk_common_compat, 267 }, 268 { .compatible = "mediatek,mt8173-spi", 269 .data = (void *)&mt8173_compat, 270 }, 271 { .compatible = "mediatek,mt8183-spi", 272 .data = (void *)&mt8183_compat, 273 }, 274 { .compatible = "mediatek,mt8192-spi", 275 .data = (void *)&mt6765_compat, 276 }, 277 { .compatible = "mediatek,mt6893-spi", 278 .data = (void *)&mt6893_compat, 279 }, 280 {} 281 }; 282 MODULE_DEVICE_TABLE(of, mtk_spi_of_match); 283 284 static void mtk_spi_reset(struct mtk_spi *mdata) 285 { 286 u32 reg_val; 287 288 /* set the software reset bit in SPI_CMD_REG. */ 289 reg_val = readl(mdata->base + SPI_CMD_REG); 290 reg_val |= SPI_CMD_RST; 291 writel(reg_val, mdata->base + SPI_CMD_REG); 292 293 reg_val = readl(mdata->base + SPI_CMD_REG); 294 reg_val &= ~SPI_CMD_RST; 295 writel(reg_val, mdata->base + SPI_CMD_REG); 296 } 297 298 static int mtk_spi_set_hw_cs_timing(struct spi_device *spi) 299 { 300 struct mtk_spi *mdata = spi_controller_get_devdata(spi->controller); 301 struct spi_delay *cs_setup = &spi->cs_setup; 302 struct spi_delay *cs_hold = &spi->cs_hold; 303 struct spi_delay *cs_inactive = &spi->cs_inactive; 304 u32 setup, hold, inactive; 305 u32 reg_val; 306 int delay; 307 308 delay = spi_delay_to_ns(cs_setup, NULL); 309 if (delay < 0) 310 return delay; 311 setup = (delay * DIV_ROUND_UP(mdata->spi_clk_hz, 1000000)) / 1000; 312 313 delay = spi_delay_to_ns(cs_hold, NULL); 314 if (delay < 0) 315 return delay; 316 hold = (delay * DIV_ROUND_UP(mdata->spi_clk_hz, 1000000)) / 1000; 317 318 delay = spi_delay_to_ns(cs_inactive, NULL); 319 if (delay < 0) 320 return delay; 321 inactive = (delay * DIV_ROUND_UP(mdata->spi_clk_hz, 1000000)) / 1000; 322 323 if (hold || setup) { 324 reg_val = readl(mdata->base + SPI_CFG0_REG); 325 if (mdata->dev_comp->enhance_timing) { 326 if (hold) { 327 hold = min_t(u32, hold, 0x10000); 328 reg_val &= ~(0xffff << SPI_ADJUST_CFG0_CS_HOLD_OFFSET); 329 reg_val |= (((hold - 1) & 0xffff) 330 << SPI_ADJUST_CFG0_CS_HOLD_OFFSET); 331 } 332 if (setup) { 333 setup = min_t(u32, setup, 0x10000); 334 reg_val &= ~(0xffff << SPI_ADJUST_CFG0_CS_SETUP_OFFSET); 335 reg_val |= (((setup - 1) & 0xffff) 336 << SPI_ADJUST_CFG0_CS_SETUP_OFFSET); 337 } 338 } else { 339 if (hold) { 340 hold = min_t(u32, hold, 0x100); 341 reg_val &= ~(0xff << SPI_CFG0_CS_HOLD_OFFSET); 342 reg_val |= (((hold - 1) & 0xff) << SPI_CFG0_CS_HOLD_OFFSET); 343 } 344 if (setup) { 345 setup = min_t(u32, setup, 0x100); 346 reg_val &= ~(0xff << SPI_CFG0_CS_SETUP_OFFSET); 347 reg_val |= (((setup - 1) & 0xff) 348 << SPI_CFG0_CS_SETUP_OFFSET); 349 } 350 } 351 writel(reg_val, mdata->base + SPI_CFG0_REG); 352 } 353 354 if (inactive) { 355 inactive = min_t(u32, inactive, 0x100); 356 reg_val = readl(mdata->base + SPI_CFG1_REG); 357 reg_val &= ~SPI_CFG1_CS_IDLE_MASK; 358 reg_val |= (((inactive - 1) & 0xff) << SPI_CFG1_CS_IDLE_OFFSET); 359 writel(reg_val, mdata->base + SPI_CFG1_REG); 360 } 361 362 return 0; 363 } 364 365 static int mtk_spi_hw_init(struct spi_controller *host, 366 struct spi_device *spi) 367 { 368 u16 cpha, cpol; 369 u32 reg_val; 370 struct mtk_chip_config *chip_config = spi->controller_data; 371 struct mtk_spi *mdata = spi_controller_get_devdata(host); 372 373 cpu_latency_qos_update_request(&mdata->qos_request, 500); 374 cpha = spi->mode & SPI_CPHA ? 1 : 0; 375 cpol = spi->mode & SPI_CPOL ? 1 : 0; 376 377 reg_val = readl(mdata->base + SPI_CMD_REG); 378 if (mdata->dev_comp->ipm_design) { 379 /* SPI transfer without idle time until packet length done */ 380 reg_val |= SPI_CMD_IPM_NONIDLE_MODE; 381 if (spi->mode & SPI_LOOP) 382 reg_val |= SPI_CMD_IPM_SPIM_LOOP; 383 else 384 reg_val &= ~SPI_CMD_IPM_SPIM_LOOP; 385 } 386 387 if (cpha) 388 reg_val |= SPI_CMD_CPHA; 389 else 390 reg_val &= ~SPI_CMD_CPHA; 391 if (cpol) 392 reg_val |= SPI_CMD_CPOL; 393 else 394 reg_val &= ~SPI_CMD_CPOL; 395 396 /* set the mlsbx and mlsbtx */ 397 if (spi->mode & SPI_LSB_FIRST) { 398 reg_val &= ~SPI_CMD_TXMSBF; 399 reg_val &= ~SPI_CMD_RXMSBF; 400 } else { 401 reg_val |= SPI_CMD_TXMSBF; 402 reg_val |= SPI_CMD_RXMSBF; 403 } 404 405 /* set the tx/rx endian */ 406 #ifdef __LITTLE_ENDIAN 407 reg_val &= ~SPI_CMD_TX_ENDIAN; 408 reg_val &= ~SPI_CMD_RX_ENDIAN; 409 #else 410 reg_val |= SPI_CMD_TX_ENDIAN; 411 reg_val |= SPI_CMD_RX_ENDIAN; 412 #endif 413 414 if (mdata->dev_comp->enhance_timing) { 415 /* set CS polarity */ 416 if (spi->mode & SPI_CS_HIGH) 417 reg_val |= SPI_CMD_CS_POL; 418 else 419 reg_val &= ~SPI_CMD_CS_POL; 420 421 if (chip_config->sample_sel) 422 reg_val |= SPI_CMD_SAMPLE_SEL; 423 else 424 reg_val &= ~SPI_CMD_SAMPLE_SEL; 425 } 426 427 /* set finish and pause interrupt always enable */ 428 reg_val |= SPI_CMD_FINISH_IE | SPI_CMD_PAUSE_IE; 429 430 /* disable dma mode */ 431 reg_val &= ~(SPI_CMD_TX_DMA | SPI_CMD_RX_DMA); 432 433 /* disable deassert mode */ 434 reg_val &= ~SPI_CMD_DEASSERT; 435 436 writel(reg_val, mdata->base + SPI_CMD_REG); 437 438 /* pad select */ 439 if (mdata->dev_comp->need_pad_sel) 440 writel(mdata->pad_sel[spi_get_chipselect(spi, 0)], 441 mdata->base + SPI_PAD_SEL_REG); 442 443 /* tick delay */ 444 if (mdata->dev_comp->enhance_timing) { 445 if (mdata->dev_comp->ipm_design) { 446 reg_val = readl(mdata->base + SPI_CMD_REG); 447 reg_val &= ~SPI_CMD_IPM_GET_TICKDLY_MASK; 448 reg_val |= ((chip_config->tick_delay & 0x7) 449 << SPI_CMD_IPM_GET_TICKDLY_OFFSET); 450 writel(reg_val, mdata->base + SPI_CMD_REG); 451 } else { 452 reg_val = readl(mdata->base + SPI_CFG1_REG); 453 reg_val &= ~SPI_CFG1_GET_TICK_DLY_MASK; 454 reg_val |= ((chip_config->tick_delay & 0x7) 455 << SPI_CFG1_GET_TICK_DLY_OFFSET); 456 writel(reg_val, mdata->base + SPI_CFG1_REG); 457 } 458 } else { 459 reg_val = readl(mdata->base + SPI_CFG1_REG); 460 reg_val &= ~SPI_CFG1_GET_TICK_DLY_MASK_V1; 461 reg_val |= ((chip_config->tick_delay & 0x3) 462 << SPI_CFG1_GET_TICK_DLY_OFFSET_V1); 463 writel(reg_val, mdata->base + SPI_CFG1_REG); 464 } 465 466 /* set hw cs timing */ 467 mtk_spi_set_hw_cs_timing(spi); 468 return 0; 469 } 470 471 static int mtk_spi_prepare_message(struct spi_controller *host, 472 struct spi_message *msg) 473 { 474 return mtk_spi_hw_init(host, msg->spi); 475 } 476 477 static int mtk_spi_unprepare_message(struct spi_controller *host, 478 struct spi_message *message) 479 { 480 struct mtk_spi *mdata = spi_controller_get_devdata(host); 481 482 cpu_latency_qos_update_request(&mdata->qos_request, PM_QOS_DEFAULT_VALUE); 483 return 0; 484 } 485 486 static void mtk_spi_set_cs(struct spi_device *spi, bool enable) 487 { 488 u32 reg_val; 489 struct mtk_spi *mdata = spi_controller_get_devdata(spi->controller); 490 491 if (spi->mode & SPI_CS_HIGH) 492 enable = !enable; 493 494 reg_val = readl(mdata->base + SPI_CMD_REG); 495 if (!enable) { 496 reg_val |= SPI_CMD_PAUSE_EN; 497 writel(reg_val, mdata->base + SPI_CMD_REG); 498 } else { 499 reg_val &= ~SPI_CMD_PAUSE_EN; 500 writel(reg_val, mdata->base + SPI_CMD_REG); 501 mdata->state = MTK_SPI_IDLE; 502 mtk_spi_reset(mdata); 503 } 504 } 505 506 static void mtk_spi_prepare_transfer(struct spi_controller *host, 507 u32 speed_hz) 508 { 509 u32 div, sck_time, reg_val; 510 struct mtk_spi *mdata = spi_controller_get_devdata(host); 511 512 if (speed_hz < mdata->spi_clk_hz / 2) 513 div = DIV_ROUND_UP(mdata->spi_clk_hz, speed_hz); 514 else 515 div = 1; 516 517 sck_time = (div + 1) / 2; 518 519 if (mdata->dev_comp->enhance_timing) { 520 reg_val = readl(mdata->base + SPI_CFG2_REG); 521 reg_val &= ~(0xffff << SPI_CFG2_SCK_HIGH_OFFSET); 522 reg_val |= (((sck_time - 1) & 0xffff) 523 << SPI_CFG2_SCK_HIGH_OFFSET); 524 reg_val &= ~(0xffff << SPI_CFG2_SCK_LOW_OFFSET); 525 reg_val |= (((sck_time - 1) & 0xffff) 526 << SPI_CFG2_SCK_LOW_OFFSET); 527 writel(reg_val, mdata->base + SPI_CFG2_REG); 528 } else { 529 reg_val = readl(mdata->base + SPI_CFG0_REG); 530 reg_val &= ~(0xff << SPI_CFG0_SCK_HIGH_OFFSET); 531 reg_val |= (((sck_time - 1) & 0xff) 532 << SPI_CFG0_SCK_HIGH_OFFSET); 533 reg_val &= ~(0xff << SPI_CFG0_SCK_LOW_OFFSET); 534 reg_val |= (((sck_time - 1) & 0xff) << SPI_CFG0_SCK_LOW_OFFSET); 535 writel(reg_val, mdata->base + SPI_CFG0_REG); 536 } 537 } 538 539 static void mtk_spi_setup_packet(struct spi_controller *host) 540 { 541 u32 packet_size, packet_loop, reg_val; 542 struct mtk_spi *mdata = spi_controller_get_devdata(host); 543 544 if (mdata->dev_comp->ipm_design) 545 packet_size = min_t(u32, 546 mdata->xfer_len, 547 MTK_SPI_IPM_PACKET_SIZE); 548 else 549 packet_size = min_t(u32, 550 mdata->xfer_len, 551 MTK_SPI_PACKET_SIZE); 552 553 packet_loop = mdata->xfer_len / packet_size; 554 555 reg_val = readl(mdata->base + SPI_CFG1_REG); 556 if (mdata->dev_comp->ipm_design) 557 reg_val &= ~SPI_CFG1_IPM_PACKET_LENGTH_MASK; 558 else 559 reg_val &= ~SPI_CFG1_PACKET_LENGTH_MASK; 560 reg_val |= (packet_size - 1) << SPI_CFG1_PACKET_LENGTH_OFFSET; 561 reg_val &= ~SPI_CFG1_PACKET_LOOP_MASK; 562 reg_val |= (packet_loop - 1) << SPI_CFG1_PACKET_LOOP_OFFSET; 563 writel(reg_val, mdata->base + SPI_CFG1_REG); 564 } 565 566 static void mtk_spi_enable_transfer(struct spi_controller *host) 567 { 568 u32 cmd; 569 struct mtk_spi *mdata = spi_controller_get_devdata(host); 570 571 cmd = readl(mdata->base + SPI_CMD_REG); 572 if (mdata->state == MTK_SPI_IDLE) 573 cmd |= SPI_CMD_ACT; 574 else 575 cmd |= SPI_CMD_RESUME; 576 writel(cmd, mdata->base + SPI_CMD_REG); 577 } 578 579 static int mtk_spi_get_mult_delta(struct mtk_spi *mdata, u32 xfer_len) 580 { 581 u32 mult_delta = 0; 582 583 if (mdata->dev_comp->ipm_design) { 584 if (xfer_len > MTK_SPI_IPM_PACKET_SIZE) 585 mult_delta = xfer_len % MTK_SPI_IPM_PACKET_SIZE; 586 } else { 587 if (xfer_len > MTK_SPI_PACKET_SIZE) 588 mult_delta = xfer_len % MTK_SPI_PACKET_SIZE; 589 } 590 591 return mult_delta; 592 } 593 594 static void mtk_spi_update_mdata_len(struct spi_controller *host) 595 { 596 int mult_delta; 597 struct mtk_spi *mdata = spi_controller_get_devdata(host); 598 599 if (mdata->tx_sgl_len && mdata->rx_sgl_len) { 600 if (mdata->tx_sgl_len > mdata->rx_sgl_len) { 601 mult_delta = mtk_spi_get_mult_delta(mdata, mdata->rx_sgl_len); 602 mdata->xfer_len = mdata->rx_sgl_len - mult_delta; 603 mdata->rx_sgl_len = mult_delta; 604 mdata->tx_sgl_len -= mdata->xfer_len; 605 } else { 606 mult_delta = mtk_spi_get_mult_delta(mdata, mdata->tx_sgl_len); 607 mdata->xfer_len = mdata->tx_sgl_len - mult_delta; 608 mdata->tx_sgl_len = mult_delta; 609 mdata->rx_sgl_len -= mdata->xfer_len; 610 } 611 } else if (mdata->tx_sgl_len) { 612 mult_delta = mtk_spi_get_mult_delta(mdata, mdata->tx_sgl_len); 613 mdata->xfer_len = mdata->tx_sgl_len - mult_delta; 614 mdata->tx_sgl_len = mult_delta; 615 } else if (mdata->rx_sgl_len) { 616 mult_delta = mtk_spi_get_mult_delta(mdata, mdata->rx_sgl_len); 617 mdata->xfer_len = mdata->rx_sgl_len - mult_delta; 618 mdata->rx_sgl_len = mult_delta; 619 } 620 } 621 622 static void mtk_spi_setup_dma_addr(struct spi_controller *host, 623 struct spi_transfer *xfer) 624 { 625 struct mtk_spi *mdata = spi_controller_get_devdata(host); 626 627 if (mdata->tx_sgl) { 628 writel((u32)(xfer->tx_dma & MTK_SPI_32BITS_MASK), 629 mdata->base + SPI_TX_SRC_REG); 630 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 631 if (mdata->dev_comp->dma_ext) 632 writel((u32)(xfer->tx_dma >> 32), 633 mdata->base + SPI_TX_SRC_REG_64); 634 #endif 635 } 636 637 if (mdata->rx_sgl) { 638 writel((u32)(xfer->rx_dma & MTK_SPI_32BITS_MASK), 639 mdata->base + SPI_RX_DST_REG); 640 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 641 if (mdata->dev_comp->dma_ext) 642 writel((u32)(xfer->rx_dma >> 32), 643 mdata->base + SPI_RX_DST_REG_64); 644 #endif 645 } 646 } 647 648 static int mtk_spi_fifo_transfer(struct spi_controller *host, 649 struct spi_device *spi, 650 struct spi_transfer *xfer) 651 { 652 int cnt, remainder; 653 u32 reg_val; 654 struct mtk_spi *mdata = spi_controller_get_devdata(host); 655 656 mdata->cur_transfer = xfer; 657 mdata->xfer_len = min(MTK_SPI_MAX_FIFO_SIZE, xfer->len); 658 mdata->num_xfered = 0; 659 mtk_spi_prepare_transfer(host, xfer->speed_hz); 660 mtk_spi_setup_packet(host); 661 662 if (xfer->tx_buf) { 663 cnt = xfer->len / 4; 664 iowrite32_rep(mdata->base + SPI_TX_DATA_REG, xfer->tx_buf, cnt); 665 remainder = xfer->len % 4; 666 if (remainder > 0) { 667 reg_val = 0; 668 memcpy(®_val, xfer->tx_buf + (cnt * 4), remainder); 669 writel(reg_val, mdata->base + SPI_TX_DATA_REG); 670 } 671 } 672 673 mtk_spi_enable_transfer(host); 674 675 return 1; 676 } 677 678 static int mtk_spi_dma_transfer(struct spi_controller *host, 679 struct spi_device *spi, 680 struct spi_transfer *xfer) 681 { 682 int cmd; 683 struct mtk_spi *mdata = spi_controller_get_devdata(host); 684 685 mdata->tx_sgl = NULL; 686 mdata->rx_sgl = NULL; 687 mdata->tx_sgl_len = 0; 688 mdata->rx_sgl_len = 0; 689 mdata->cur_transfer = xfer; 690 mdata->num_xfered = 0; 691 692 mtk_spi_prepare_transfer(host, xfer->speed_hz); 693 694 cmd = readl(mdata->base + SPI_CMD_REG); 695 if (xfer->tx_buf) 696 cmd |= SPI_CMD_TX_DMA; 697 if (xfer->rx_buf) 698 cmd |= SPI_CMD_RX_DMA; 699 writel(cmd, mdata->base + SPI_CMD_REG); 700 701 if (xfer->tx_buf) 702 mdata->tx_sgl = xfer->tx_sg.sgl; 703 if (xfer->rx_buf) 704 mdata->rx_sgl = xfer->rx_sg.sgl; 705 706 if (mdata->tx_sgl) { 707 xfer->tx_dma = sg_dma_address(mdata->tx_sgl); 708 mdata->tx_sgl_len = sg_dma_len(mdata->tx_sgl); 709 } 710 if (mdata->rx_sgl) { 711 xfer->rx_dma = sg_dma_address(mdata->rx_sgl); 712 mdata->rx_sgl_len = sg_dma_len(mdata->rx_sgl); 713 } 714 715 mtk_spi_update_mdata_len(host); 716 mtk_spi_setup_packet(host); 717 mtk_spi_setup_dma_addr(host, xfer); 718 mtk_spi_enable_transfer(host); 719 720 return 1; 721 } 722 723 static int mtk_spi_transfer_one(struct spi_controller *host, 724 struct spi_device *spi, 725 struct spi_transfer *xfer) 726 { 727 struct mtk_spi *mdata = spi_controller_get_devdata(spi->controller); 728 u32 reg_val = 0; 729 730 /* prepare xfer direction and duplex mode */ 731 if (mdata->dev_comp->ipm_design) { 732 if (!xfer->tx_buf || !xfer->rx_buf) { 733 reg_val |= SPI_CFG3_IPM_HALF_DUPLEX_EN; 734 if (xfer->rx_buf) 735 reg_val |= SPI_CFG3_IPM_HALF_DUPLEX_DIR; 736 } 737 writel(reg_val, mdata->base + SPI_CFG3_IPM_REG); 738 } 739 740 if (host->can_dma(host, spi, xfer)) 741 return mtk_spi_dma_transfer(host, spi, xfer); 742 else 743 return mtk_spi_fifo_transfer(host, spi, xfer); 744 } 745 746 static bool mtk_spi_can_dma(struct spi_controller *host, 747 struct spi_device *spi, 748 struct spi_transfer *xfer) 749 { 750 /* Buffers for DMA transactions must be 4-byte aligned */ 751 return (xfer->len > MTK_SPI_MAX_FIFO_SIZE && 752 (unsigned long)xfer->tx_buf % 4 == 0 && 753 (unsigned long)xfer->rx_buf % 4 == 0); 754 } 755 756 static int mtk_spi_setup(struct spi_device *spi) 757 { 758 struct mtk_spi *mdata = spi_controller_get_devdata(spi->controller); 759 760 if (!spi->controller_data) 761 spi->controller_data = (void *)&mtk_default_chip_info; 762 763 if (mdata->dev_comp->need_pad_sel && spi_get_csgpiod(spi, 0)) 764 /* CS de-asserted, gpiolib will handle inversion */ 765 gpiod_direction_output(spi_get_csgpiod(spi, 0), 0); 766 767 return 0; 768 } 769 770 static irqreturn_t mtk_spi_interrupt_thread(int irq, void *dev_id) 771 { 772 u32 cmd, reg_val, cnt, remainder, len; 773 struct spi_controller *host = dev_id; 774 struct mtk_spi *mdata = spi_controller_get_devdata(host); 775 struct spi_transfer *xfer = mdata->cur_transfer; 776 777 if (!host->can_dma(host, NULL, xfer)) { 778 if (xfer->rx_buf) { 779 cnt = mdata->xfer_len / 4; 780 ioread32_rep(mdata->base + SPI_RX_DATA_REG, 781 xfer->rx_buf + mdata->num_xfered, cnt); 782 remainder = mdata->xfer_len % 4; 783 if (remainder > 0) { 784 reg_val = readl(mdata->base + SPI_RX_DATA_REG); 785 memcpy(xfer->rx_buf + (cnt * 4) + mdata->num_xfered, 786 ®_val, 787 remainder); 788 } 789 } 790 791 mdata->num_xfered += mdata->xfer_len; 792 if (mdata->num_xfered == xfer->len) { 793 spi_finalize_current_transfer(host); 794 return IRQ_HANDLED; 795 } 796 797 len = xfer->len - mdata->num_xfered; 798 mdata->xfer_len = min(MTK_SPI_MAX_FIFO_SIZE, len); 799 mtk_spi_setup_packet(host); 800 801 if (xfer->tx_buf) { 802 cnt = mdata->xfer_len / 4; 803 iowrite32_rep(mdata->base + SPI_TX_DATA_REG, 804 xfer->tx_buf + mdata->num_xfered, cnt); 805 806 remainder = mdata->xfer_len % 4; 807 if (remainder > 0) { 808 reg_val = 0; 809 memcpy(®_val, 810 xfer->tx_buf + (cnt * 4) + mdata->num_xfered, 811 remainder); 812 writel(reg_val, mdata->base + SPI_TX_DATA_REG); 813 } 814 } 815 816 mtk_spi_enable_transfer(host); 817 818 return IRQ_HANDLED; 819 } 820 821 if (mdata->tx_sgl) 822 xfer->tx_dma += mdata->xfer_len; 823 if (mdata->rx_sgl) 824 xfer->rx_dma += mdata->xfer_len; 825 826 if (mdata->tx_sgl && (mdata->tx_sgl_len == 0)) { 827 mdata->tx_sgl = sg_next(mdata->tx_sgl); 828 if (mdata->tx_sgl) { 829 xfer->tx_dma = sg_dma_address(mdata->tx_sgl); 830 mdata->tx_sgl_len = sg_dma_len(mdata->tx_sgl); 831 } 832 } 833 if (mdata->rx_sgl && (mdata->rx_sgl_len == 0)) { 834 mdata->rx_sgl = sg_next(mdata->rx_sgl); 835 if (mdata->rx_sgl) { 836 xfer->rx_dma = sg_dma_address(mdata->rx_sgl); 837 mdata->rx_sgl_len = sg_dma_len(mdata->rx_sgl); 838 } 839 } 840 841 if (!mdata->tx_sgl && !mdata->rx_sgl) { 842 /* spi disable dma */ 843 cmd = readl(mdata->base + SPI_CMD_REG); 844 cmd &= ~SPI_CMD_TX_DMA; 845 cmd &= ~SPI_CMD_RX_DMA; 846 writel(cmd, mdata->base + SPI_CMD_REG); 847 848 spi_finalize_current_transfer(host); 849 return IRQ_HANDLED; 850 } 851 852 mtk_spi_update_mdata_len(host); 853 mtk_spi_setup_packet(host); 854 mtk_spi_setup_dma_addr(host, xfer); 855 mtk_spi_enable_transfer(host); 856 857 return IRQ_HANDLED; 858 } 859 860 static irqreturn_t mtk_spi_interrupt(int irq, void *dev_id) 861 { 862 struct spi_controller *host = dev_id; 863 struct mtk_spi *mdata = spi_controller_get_devdata(host); 864 u32 reg_val; 865 866 reg_val = readl(mdata->base + SPI_STATUS0_REG); 867 if (reg_val & MTK_SPI_PAUSE_INT_STATUS) 868 mdata->state = MTK_SPI_PAUSED; 869 else 870 mdata->state = MTK_SPI_IDLE; 871 872 /* SPI-MEM ops */ 873 if (mdata->use_spimem) { 874 complete(&mdata->spimem_done); 875 return IRQ_HANDLED; 876 } 877 878 return IRQ_WAKE_THREAD; 879 } 880 881 static int mtk_spi_mem_adjust_op_size(struct spi_mem *mem, 882 struct spi_mem_op *op) 883 { 884 int opcode_len; 885 886 if (op->data.dir != SPI_MEM_NO_DATA) { 887 opcode_len = 1 + op->addr.nbytes + op->dummy.nbytes; 888 if (opcode_len + op->data.nbytes > MTK_SPI_IPM_PACKET_SIZE) { 889 op->data.nbytes = MTK_SPI_IPM_PACKET_SIZE - opcode_len; 890 /* force data buffer dma-aligned. */ 891 op->data.nbytes -= op->data.nbytes % 4; 892 } 893 } 894 895 return 0; 896 } 897 898 static bool mtk_spi_mem_supports_op(struct spi_mem *mem, 899 const struct spi_mem_op *op) 900 { 901 if (!spi_mem_default_supports_op(mem, op)) 902 return false; 903 904 if (op->addr.nbytes && op->dummy.nbytes && 905 op->addr.buswidth != op->dummy.buswidth) 906 return false; 907 908 if (op->addr.nbytes + op->dummy.nbytes > 16) 909 return false; 910 911 if (op->data.nbytes > MTK_SPI_IPM_PACKET_SIZE) { 912 if (op->data.nbytes / MTK_SPI_IPM_PACKET_SIZE > 913 MTK_SPI_IPM_PACKET_LOOP || 914 op->data.nbytes % MTK_SPI_IPM_PACKET_SIZE != 0) 915 return false; 916 } 917 918 return true; 919 } 920 921 static void mtk_spi_mem_setup_dma_xfer(struct spi_controller *host, 922 const struct spi_mem_op *op) 923 { 924 struct mtk_spi *mdata = spi_controller_get_devdata(host); 925 926 writel((u32)(mdata->tx_dma & MTK_SPI_32BITS_MASK), 927 mdata->base + SPI_TX_SRC_REG); 928 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 929 if (mdata->dev_comp->dma_ext) 930 writel((u32)(mdata->tx_dma >> 32), 931 mdata->base + SPI_TX_SRC_REG_64); 932 #endif 933 934 if (op->data.dir == SPI_MEM_DATA_IN) { 935 writel((u32)(mdata->rx_dma & MTK_SPI_32BITS_MASK), 936 mdata->base + SPI_RX_DST_REG); 937 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 938 if (mdata->dev_comp->dma_ext) 939 writel((u32)(mdata->rx_dma >> 32), 940 mdata->base + SPI_RX_DST_REG_64); 941 #endif 942 } 943 } 944 945 static int mtk_spi_transfer_wait(struct spi_mem *mem, 946 const struct spi_mem_op *op) 947 { 948 struct mtk_spi *mdata = spi_controller_get_devdata(mem->spi->controller); 949 /* 950 * For each byte we wait for 8 cycles of the SPI clock. 951 * Since speed is defined in Hz and we want milliseconds, 952 * so it should be 8 * 1000. 953 */ 954 u64 ms = 8000LL; 955 956 if (op->data.dir == SPI_MEM_NO_DATA) 957 ms *= 32; /* prevent we may get 0 for short transfers. */ 958 else 959 ms *= op->data.nbytes; 960 ms = div_u64(ms, mem->spi->max_speed_hz); 961 ms += ms + 1000; /* 1s tolerance */ 962 963 if (ms > UINT_MAX) 964 ms = UINT_MAX; 965 966 if (!wait_for_completion_timeout(&mdata->spimem_done, 967 msecs_to_jiffies(ms))) { 968 dev_err(mdata->dev, "spi-mem transfer timeout\n"); 969 return -ETIMEDOUT; 970 } 971 972 return 0; 973 } 974 975 static int mtk_spi_mem_exec_op(struct spi_mem *mem, 976 const struct spi_mem_op *op) 977 { 978 struct mtk_spi *mdata = spi_controller_get_devdata(mem->spi->controller); 979 u32 reg_val, nio, tx_size; 980 char *tx_tmp_buf, *rx_tmp_buf; 981 int ret = 0; 982 983 mdata->use_spimem = true; 984 reinit_completion(&mdata->spimem_done); 985 986 mtk_spi_reset(mdata); 987 mtk_spi_hw_init(mem->spi->controller, mem->spi); 988 mtk_spi_prepare_transfer(mem->spi->controller, op->max_freq); 989 990 reg_val = readl(mdata->base + SPI_CFG3_IPM_REG); 991 /* opcode byte len */ 992 reg_val &= ~SPI_CFG3_IPM_CMD_BYTELEN_MASK; 993 reg_val |= 1 << SPI_CFG3_IPM_CMD_BYTELEN_OFFSET; 994 995 /* addr & dummy byte len */ 996 reg_val &= ~SPI_CFG3_IPM_ADDR_BYTELEN_MASK; 997 if (op->addr.nbytes || op->dummy.nbytes) 998 reg_val |= (op->addr.nbytes + op->dummy.nbytes) << 999 SPI_CFG3_IPM_ADDR_BYTELEN_OFFSET; 1000 1001 /* data byte len */ 1002 if (op->data.dir == SPI_MEM_NO_DATA) { 1003 reg_val |= SPI_CFG3_IPM_NODATA_FLAG; 1004 writel(0, mdata->base + SPI_CFG1_REG); 1005 } else { 1006 reg_val &= ~SPI_CFG3_IPM_NODATA_FLAG; 1007 mdata->xfer_len = op->data.nbytes; 1008 mtk_spi_setup_packet(mem->spi->controller); 1009 } 1010 1011 if (op->addr.nbytes || op->dummy.nbytes) { 1012 if (op->addr.buswidth == 1 || op->dummy.buswidth == 1) 1013 reg_val |= SPI_CFG3_IPM_XMODE_EN; 1014 else 1015 reg_val &= ~SPI_CFG3_IPM_XMODE_EN; 1016 } 1017 1018 if (op->addr.buswidth == 2 || 1019 op->dummy.buswidth == 2 || 1020 op->data.buswidth == 2) 1021 nio = 2; 1022 else if (op->addr.buswidth == 4 || 1023 op->dummy.buswidth == 4 || 1024 op->data.buswidth == 4) 1025 nio = 4; 1026 else 1027 nio = 1; 1028 1029 reg_val &= ~SPI_CFG3_IPM_CMD_PIN_MODE_MASK; 1030 reg_val |= PIN_MODE_CFG(nio); 1031 1032 reg_val |= SPI_CFG3_IPM_HALF_DUPLEX_EN; 1033 if (op->data.dir == SPI_MEM_DATA_IN) 1034 reg_val |= SPI_CFG3_IPM_HALF_DUPLEX_DIR; 1035 else 1036 reg_val &= ~SPI_CFG3_IPM_HALF_DUPLEX_DIR; 1037 writel(reg_val, mdata->base + SPI_CFG3_IPM_REG); 1038 1039 tx_size = 1 + op->addr.nbytes + op->dummy.nbytes; 1040 if (op->data.dir == SPI_MEM_DATA_OUT) 1041 tx_size += op->data.nbytes; 1042 1043 tx_size = max_t(u32, tx_size, 32); 1044 1045 tx_tmp_buf = kzalloc(tx_size, GFP_KERNEL | GFP_DMA); 1046 if (!tx_tmp_buf) { 1047 mdata->use_spimem = false; 1048 return -ENOMEM; 1049 } 1050 1051 tx_tmp_buf[0] = op->cmd.opcode; 1052 1053 if (op->addr.nbytes) { 1054 int i; 1055 1056 for (i = 0; i < op->addr.nbytes; i++) 1057 tx_tmp_buf[i + 1] = op->addr.val >> 1058 (8 * (op->addr.nbytes - i - 1)); 1059 } 1060 1061 if (op->dummy.nbytes) 1062 memset(tx_tmp_buf + op->addr.nbytes + 1, 1063 0xff, 1064 op->dummy.nbytes); 1065 1066 if (op->data.nbytes && op->data.dir == SPI_MEM_DATA_OUT) 1067 memcpy(tx_tmp_buf + op->dummy.nbytes + op->addr.nbytes + 1, 1068 op->data.buf.out, 1069 op->data.nbytes); 1070 1071 mdata->tx_dma = dma_map_single(mdata->dev, tx_tmp_buf, 1072 tx_size, DMA_TO_DEVICE); 1073 if (dma_mapping_error(mdata->dev, mdata->tx_dma)) { 1074 ret = -ENOMEM; 1075 goto err_exit; 1076 } 1077 1078 if (op->data.dir == SPI_MEM_DATA_IN) { 1079 if (!IS_ALIGNED((size_t)op->data.buf.in, 4)) { 1080 rx_tmp_buf = kzalloc(op->data.nbytes, 1081 GFP_KERNEL | GFP_DMA); 1082 if (!rx_tmp_buf) { 1083 ret = -ENOMEM; 1084 goto unmap_tx_dma; 1085 } 1086 } else { 1087 rx_tmp_buf = op->data.buf.in; 1088 } 1089 1090 mdata->rx_dma = dma_map_single(mdata->dev, 1091 rx_tmp_buf, 1092 op->data.nbytes, 1093 DMA_FROM_DEVICE); 1094 if (dma_mapping_error(mdata->dev, mdata->rx_dma)) { 1095 ret = -ENOMEM; 1096 goto kfree_rx_tmp_buf; 1097 } 1098 } 1099 1100 reg_val = readl(mdata->base + SPI_CMD_REG); 1101 reg_val |= SPI_CMD_TX_DMA; 1102 if (op->data.dir == SPI_MEM_DATA_IN) 1103 reg_val |= SPI_CMD_RX_DMA; 1104 writel(reg_val, mdata->base + SPI_CMD_REG); 1105 1106 mtk_spi_mem_setup_dma_xfer(mem->spi->controller, op); 1107 1108 mtk_spi_enable_transfer(mem->spi->controller); 1109 1110 /* Wait for the interrupt. */ 1111 ret = mtk_spi_transfer_wait(mem, op); 1112 if (ret) 1113 goto unmap_rx_dma; 1114 1115 /* spi disable dma */ 1116 reg_val = readl(mdata->base + SPI_CMD_REG); 1117 reg_val &= ~SPI_CMD_TX_DMA; 1118 if (op->data.dir == SPI_MEM_DATA_IN) 1119 reg_val &= ~SPI_CMD_RX_DMA; 1120 writel(reg_val, mdata->base + SPI_CMD_REG); 1121 1122 unmap_rx_dma: 1123 if (op->data.dir == SPI_MEM_DATA_IN) { 1124 dma_unmap_single(mdata->dev, mdata->rx_dma, 1125 op->data.nbytes, DMA_FROM_DEVICE); 1126 if (!IS_ALIGNED((size_t)op->data.buf.in, 4)) 1127 memcpy(op->data.buf.in, rx_tmp_buf, op->data.nbytes); 1128 } 1129 kfree_rx_tmp_buf: 1130 if (op->data.dir == SPI_MEM_DATA_IN && 1131 !IS_ALIGNED((size_t)op->data.buf.in, 4)) 1132 kfree(rx_tmp_buf); 1133 unmap_tx_dma: 1134 dma_unmap_single(mdata->dev, mdata->tx_dma, 1135 tx_size, DMA_TO_DEVICE); 1136 err_exit: 1137 kfree(tx_tmp_buf); 1138 mdata->use_spimem = false; 1139 1140 return ret; 1141 } 1142 1143 static const struct spi_controller_mem_ops mtk_spi_mem_ops = { 1144 .adjust_op_size = mtk_spi_mem_adjust_op_size, 1145 .supports_op = mtk_spi_mem_supports_op, 1146 .exec_op = mtk_spi_mem_exec_op, 1147 }; 1148 1149 static const struct spi_controller_mem_caps mtk_spi_mem_caps = { 1150 .per_op_freq = true, 1151 }; 1152 1153 static int mtk_spi_probe(struct platform_device *pdev) 1154 { 1155 struct device *dev = &pdev->dev; 1156 struct spi_controller *host; 1157 struct mtk_spi *mdata; 1158 int i, irq, ret, addr_bits; 1159 1160 host = devm_spi_alloc_host(dev, sizeof(*mdata)); 1161 if (!host) 1162 return dev_err_probe(dev, -ENOMEM, "failed to alloc spi host\n"); 1163 1164 host->auto_runtime_pm = true; 1165 host->dev.of_node = dev->of_node; 1166 host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST; 1167 1168 host->set_cs = mtk_spi_set_cs; 1169 host->prepare_message = mtk_spi_prepare_message; 1170 host->unprepare_message = mtk_spi_unprepare_message; 1171 host->transfer_one = mtk_spi_transfer_one; 1172 host->can_dma = mtk_spi_can_dma; 1173 host->setup = mtk_spi_setup; 1174 host->set_cs_timing = mtk_spi_set_hw_cs_timing; 1175 host->use_gpio_descriptors = true; 1176 1177 mdata = spi_controller_get_devdata(host); 1178 mdata->dev_comp = device_get_match_data(dev); 1179 1180 if (mdata->dev_comp->enhance_timing) 1181 host->mode_bits |= SPI_CS_HIGH; 1182 1183 if (mdata->dev_comp->must_tx) 1184 host->flags = SPI_CONTROLLER_MUST_TX; 1185 if (mdata->dev_comp->ipm_design) 1186 host->mode_bits |= SPI_LOOP | SPI_RX_DUAL | SPI_TX_DUAL | 1187 SPI_RX_QUAD | SPI_TX_QUAD; 1188 1189 if (mdata->dev_comp->ipm_design) { 1190 mdata->dev = dev; 1191 host->mem_ops = &mtk_spi_mem_ops; 1192 host->mem_caps = &mtk_spi_mem_caps; 1193 init_completion(&mdata->spimem_done); 1194 } 1195 1196 if (mdata->dev_comp->need_pad_sel) { 1197 mdata->pad_num = of_property_count_u32_elems(dev->of_node, 1198 "mediatek,pad-select"); 1199 if (mdata->pad_num < 0) 1200 return dev_err_probe(dev, -EINVAL, 1201 "No 'mediatek,pad-select' property\n"); 1202 1203 mdata->pad_sel = devm_kmalloc_array(dev, mdata->pad_num, 1204 sizeof(u32), GFP_KERNEL); 1205 if (!mdata->pad_sel) 1206 return -ENOMEM; 1207 1208 for (i = 0; i < mdata->pad_num; i++) { 1209 of_property_read_u32_index(dev->of_node, 1210 "mediatek,pad-select", 1211 i, &mdata->pad_sel[i]); 1212 if (mdata->pad_sel[i] > MT8173_SPI_MAX_PAD_SEL) 1213 return dev_err_probe(dev, -EINVAL, 1214 "wrong pad-sel[%d]: %u\n", 1215 i, mdata->pad_sel[i]); 1216 } 1217 } 1218 1219 platform_set_drvdata(pdev, host); 1220 mdata->base = devm_platform_ioremap_resource(pdev, 0); 1221 if (IS_ERR(mdata->base)) 1222 return PTR_ERR(mdata->base); 1223 1224 irq = platform_get_irq(pdev, 0); 1225 if (irq < 0) 1226 return irq; 1227 1228 if (!dev->dma_mask) 1229 dev->dma_mask = &dev->coherent_dma_mask; 1230 1231 if (mdata->dev_comp->ipm_design) 1232 dma_set_max_seg_size(dev, SZ_16M); 1233 else 1234 dma_set_max_seg_size(dev, SZ_256K); 1235 1236 mdata->parent_clk = devm_clk_get(dev, "parent-clk"); 1237 if (IS_ERR(mdata->parent_clk)) 1238 return dev_err_probe(dev, PTR_ERR(mdata->parent_clk), 1239 "failed to get parent-clk\n"); 1240 1241 mdata->sel_clk = devm_clk_get(dev, "sel-clk"); 1242 if (IS_ERR(mdata->sel_clk)) 1243 return dev_err_probe(dev, PTR_ERR(mdata->sel_clk), "failed to get sel-clk\n"); 1244 1245 mdata->spi_clk = devm_clk_get(dev, "spi-clk"); 1246 if (IS_ERR(mdata->spi_clk)) 1247 return dev_err_probe(dev, PTR_ERR(mdata->spi_clk), "failed to get spi-clk\n"); 1248 1249 mdata->spi_hclk = devm_clk_get_optional(dev, "hclk"); 1250 if (IS_ERR(mdata->spi_hclk)) 1251 return dev_err_probe(dev, PTR_ERR(mdata->spi_hclk), "failed to get hclk\n"); 1252 1253 ret = clk_set_parent(mdata->sel_clk, mdata->parent_clk); 1254 if (ret < 0) 1255 return dev_err_probe(dev, ret, "failed to clk_set_parent\n"); 1256 1257 ret = clk_prepare_enable(mdata->spi_hclk); 1258 if (ret < 0) 1259 return dev_err_probe(dev, ret, "failed to enable hclk\n"); 1260 1261 ret = clk_prepare_enable(mdata->spi_clk); 1262 if (ret < 0) { 1263 clk_disable_unprepare(mdata->spi_hclk); 1264 return dev_err_probe(dev, ret, "failed to enable spi_clk\n"); 1265 } 1266 1267 mdata->spi_clk_hz = clk_get_rate(mdata->spi_clk); 1268 1269 if (mdata->dev_comp->no_need_unprepare) { 1270 clk_disable(mdata->spi_clk); 1271 clk_disable(mdata->spi_hclk); 1272 } else { 1273 clk_disable_unprepare(mdata->spi_clk); 1274 clk_disable_unprepare(mdata->spi_hclk); 1275 } 1276 1277 cpu_latency_qos_add_request(&mdata->qos_request, PM_QOS_DEFAULT_VALUE); 1278 1279 if (mdata->dev_comp->need_pad_sel) { 1280 if (mdata->pad_num != host->num_chipselect) 1281 return dev_err_probe(dev, -EINVAL, 1282 "pad_num does not match num_chipselect(%d != %d)\n", 1283 mdata->pad_num, host->num_chipselect); 1284 1285 if (!host->cs_gpiods && host->num_chipselect > 1) 1286 return dev_err_probe(dev, -EINVAL, 1287 "cs_gpios not specified and num_chipselect > 1\n"); 1288 } 1289 1290 if (mdata->dev_comp->dma_ext) 1291 addr_bits = DMA_ADDR_EXT_BITS; 1292 else 1293 addr_bits = DMA_ADDR_DEF_BITS; 1294 ret = dma_set_mask(dev, DMA_BIT_MASK(addr_bits)); 1295 if (ret) 1296 dev_notice(dev, "SPI dma_set_mask(%d) failed, ret:%d\n", 1297 addr_bits, ret); 1298 1299 ret = devm_request_threaded_irq(dev, irq, mtk_spi_interrupt, 1300 mtk_spi_interrupt_thread, 1301 IRQF_TRIGGER_NONE, dev_name(dev), host); 1302 if (ret) 1303 return dev_err_probe(dev, ret, "failed to register irq\n"); 1304 1305 pm_runtime_enable(dev); 1306 1307 ret = devm_spi_register_controller(dev, host); 1308 if (ret) { 1309 pm_runtime_disable(dev); 1310 return dev_err_probe(dev, ret, "failed to register host\n"); 1311 } 1312 1313 return 0; 1314 } 1315 1316 static void mtk_spi_remove(struct platform_device *pdev) 1317 { 1318 struct spi_controller *host = platform_get_drvdata(pdev); 1319 struct mtk_spi *mdata = spi_controller_get_devdata(host); 1320 int ret; 1321 1322 cpu_latency_qos_remove_request(&mdata->qos_request); 1323 if (mdata->use_spimem && !completion_done(&mdata->spimem_done)) 1324 complete(&mdata->spimem_done); 1325 1326 ret = pm_runtime_get_sync(&pdev->dev); 1327 if (ret < 0) { 1328 dev_warn(&pdev->dev, "Failed to resume hardware (%pe)\n", ERR_PTR(ret)); 1329 } else { 1330 /* 1331 * If pm runtime resume failed, clks are disabled and 1332 * unprepared. So don't access the hardware and skip clk 1333 * unpreparing. 1334 */ 1335 mtk_spi_reset(mdata); 1336 1337 if (mdata->dev_comp->no_need_unprepare) { 1338 clk_unprepare(mdata->spi_clk); 1339 clk_unprepare(mdata->spi_hclk); 1340 } 1341 } 1342 1343 pm_runtime_put_noidle(&pdev->dev); 1344 pm_runtime_disable(&pdev->dev); 1345 } 1346 1347 #ifdef CONFIG_PM_SLEEP 1348 static int mtk_spi_suspend(struct device *dev) 1349 { 1350 int ret; 1351 struct spi_controller *host = dev_get_drvdata(dev); 1352 struct mtk_spi *mdata = spi_controller_get_devdata(host); 1353 1354 ret = spi_controller_suspend(host); 1355 if (ret) 1356 return ret; 1357 1358 if (!pm_runtime_suspended(dev)) { 1359 clk_disable_unprepare(mdata->spi_clk); 1360 clk_disable_unprepare(mdata->spi_hclk); 1361 } 1362 1363 pinctrl_pm_select_sleep_state(dev); 1364 1365 return 0; 1366 } 1367 1368 static int mtk_spi_resume(struct device *dev) 1369 { 1370 int ret; 1371 struct spi_controller *host = dev_get_drvdata(dev); 1372 struct mtk_spi *mdata = spi_controller_get_devdata(host); 1373 1374 pinctrl_pm_select_default_state(dev); 1375 1376 if (!pm_runtime_suspended(dev)) { 1377 ret = clk_prepare_enable(mdata->spi_clk); 1378 if (ret < 0) { 1379 dev_err(dev, "failed to enable spi_clk (%d)\n", ret); 1380 return ret; 1381 } 1382 1383 ret = clk_prepare_enable(mdata->spi_hclk); 1384 if (ret < 0) { 1385 dev_err(dev, "failed to enable spi_hclk (%d)\n", ret); 1386 clk_disable_unprepare(mdata->spi_clk); 1387 return ret; 1388 } 1389 } 1390 1391 ret = spi_controller_resume(host); 1392 if (ret < 0) { 1393 clk_disable_unprepare(mdata->spi_clk); 1394 clk_disable_unprepare(mdata->spi_hclk); 1395 } 1396 1397 return ret; 1398 } 1399 #endif /* CONFIG_PM_SLEEP */ 1400 1401 #ifdef CONFIG_PM 1402 static int mtk_spi_runtime_suspend(struct device *dev) 1403 { 1404 struct spi_controller *host = dev_get_drvdata(dev); 1405 struct mtk_spi *mdata = spi_controller_get_devdata(host); 1406 1407 if (mdata->dev_comp->no_need_unprepare) { 1408 clk_disable(mdata->spi_clk); 1409 clk_disable(mdata->spi_hclk); 1410 } else { 1411 clk_disable_unprepare(mdata->spi_clk); 1412 clk_disable_unprepare(mdata->spi_hclk); 1413 } 1414 1415 return 0; 1416 } 1417 1418 static int mtk_spi_runtime_resume(struct device *dev) 1419 { 1420 struct spi_controller *host = dev_get_drvdata(dev); 1421 struct mtk_spi *mdata = spi_controller_get_devdata(host); 1422 int ret; 1423 1424 if (mdata->dev_comp->no_need_unprepare) { 1425 ret = clk_enable(mdata->spi_clk); 1426 if (ret < 0) { 1427 dev_err(dev, "failed to enable spi_clk (%d)\n", ret); 1428 return ret; 1429 } 1430 ret = clk_enable(mdata->spi_hclk); 1431 if (ret < 0) { 1432 dev_err(dev, "failed to enable spi_hclk (%d)\n", ret); 1433 clk_disable(mdata->spi_clk); 1434 return ret; 1435 } 1436 } else { 1437 ret = clk_prepare_enable(mdata->spi_clk); 1438 if (ret < 0) { 1439 dev_err(dev, "failed to prepare_enable spi_clk (%d)\n", ret); 1440 return ret; 1441 } 1442 1443 ret = clk_prepare_enable(mdata->spi_hclk); 1444 if (ret < 0) { 1445 dev_err(dev, "failed to prepare_enable spi_hclk (%d)\n", ret); 1446 clk_disable_unprepare(mdata->spi_clk); 1447 return ret; 1448 } 1449 } 1450 1451 return 0; 1452 } 1453 #endif /* CONFIG_PM */ 1454 1455 static const struct dev_pm_ops mtk_spi_pm = { 1456 SET_SYSTEM_SLEEP_PM_OPS(mtk_spi_suspend, mtk_spi_resume) 1457 SET_RUNTIME_PM_OPS(mtk_spi_runtime_suspend, 1458 mtk_spi_runtime_resume, NULL) 1459 }; 1460 1461 static struct platform_driver mtk_spi_driver = { 1462 .driver = { 1463 .name = "mtk-spi", 1464 .pm = &mtk_spi_pm, 1465 .of_match_table = mtk_spi_of_match, 1466 }, 1467 .probe = mtk_spi_probe, 1468 .remove = mtk_spi_remove, 1469 }; 1470 1471 module_platform_driver(mtk_spi_driver); 1472 1473 MODULE_DESCRIPTION("MTK SPI Controller driver"); 1474 MODULE_AUTHOR("Leilk Liu <leilk.liu@mediatek.com>"); 1475 MODULE_LICENSE("GPL v2"); 1476 MODULE_ALIAS("platform:mtk-spi"); 1477