1 // SPDX-License-Identifier: GPL-2.0 2 // 3 // Synquacer HSSPI controller driver 4 // 5 // Copyright (c) 2015-2018 Socionext Inc. 6 // Copyright (c) 2018-2019 Linaro Ltd. 7 // 8 9 #include <linux/acpi.h> 10 #include <linux/delay.h> 11 #include <linux/interrupt.h> 12 #include <linux/io.h> 13 #include <linux/module.h> 14 #include <linux/of.h> 15 #include <linux/platform_device.h> 16 #include <linux/pm_runtime.h> 17 #include <linux/scatterlist.h> 18 #include <linux/slab.h> 19 #include <linux/spi/spi.h> 20 #include <linux/spinlock.h> 21 #include <linux/clk.h> 22 23 /* HSSPI register address definitions */ 24 #define SYNQUACER_HSSPI_REG_MCTRL 0x00 25 #define SYNQUACER_HSSPI_REG_PCC0 0x04 26 #define SYNQUACER_HSSPI_REG_PCC(n) (SYNQUACER_HSSPI_REG_PCC0 + (n) * 4) 27 #define SYNQUACER_HSSPI_REG_TXF 0x14 28 #define SYNQUACER_HSSPI_REG_TXE 0x18 29 #define SYNQUACER_HSSPI_REG_TXC 0x1C 30 #define SYNQUACER_HSSPI_REG_RXF 0x20 31 #define SYNQUACER_HSSPI_REG_RXE 0x24 32 #define SYNQUACER_HSSPI_REG_RXC 0x28 33 #define SYNQUACER_HSSPI_REG_FAULTF 0x2C 34 #define SYNQUACER_HSSPI_REG_FAULTC 0x30 35 #define SYNQUACER_HSSPI_REG_DMCFG 0x34 36 #define SYNQUACER_HSSPI_REG_DMSTART 0x38 37 #define SYNQUACER_HSSPI_REG_DMBCC 0x3C 38 #define SYNQUACER_HSSPI_REG_DMSTATUS 0x40 39 #define SYNQUACER_HSSPI_REG_FIFOCFG 0x4C 40 #define SYNQUACER_HSSPI_REG_TX_FIFO 0x50 41 #define SYNQUACER_HSSPI_REG_RX_FIFO 0x90 42 #define SYNQUACER_HSSPI_REG_MID 0xFC 43 44 /* HSSPI register bit definitions */ 45 #define SYNQUACER_HSSPI_MCTRL_MEN BIT(0) 46 #define SYNQUACER_HSSPI_MCTRL_COMMAND_SEQUENCE_EN BIT(1) 47 #define SYNQUACER_HSSPI_MCTRL_CDSS BIT(3) 48 #define SYNQUACER_HSSPI_MCTRL_MES BIT(4) 49 #define SYNQUACER_HSSPI_MCTRL_SYNCON BIT(5) 50 51 #define SYNQUACER_HSSPI_PCC_CPHA BIT(0) 52 #define SYNQUACER_HSSPI_PCC_CPOL BIT(1) 53 #define SYNQUACER_HSSPI_PCC_ACES BIT(2) 54 #define SYNQUACER_HSSPI_PCC_RTM BIT(3) 55 #define SYNQUACER_HSSPI_PCC_SSPOL BIT(4) 56 #define SYNQUACER_HSSPI_PCC_SDIR BIT(7) 57 #define SYNQUACER_HSSPI_PCC_SENDIAN BIT(8) 58 #define SYNQUACER_HSSPI_PCC_SAFESYNC BIT(16) 59 #define SYNQUACER_HSSPI_PCC_SS2CD_SHIFT 5U 60 #define SYNQUACER_HSSPI_PCC_CDRS_MASK 0x7f 61 #define SYNQUACER_HSSPI_PCC_CDRS_SHIFT 9U 62 63 #define SYNQUACER_HSSPI_TXF_FIFO_FULL BIT(0) 64 #define SYNQUACER_HSSPI_TXF_FIFO_EMPTY BIT(1) 65 #define SYNQUACER_HSSPI_TXF_SLAVE_RELEASED BIT(6) 66 67 #define SYNQUACER_HSSPI_TXE_FIFO_FULL BIT(0) 68 #define SYNQUACER_HSSPI_TXE_FIFO_EMPTY BIT(1) 69 #define SYNQUACER_HSSPI_TXE_SLAVE_RELEASED BIT(6) 70 71 #define SYNQUACER_HSSPI_RXF_FIFO_MORE_THAN_THRESHOLD BIT(5) 72 #define SYNQUACER_HSSPI_RXF_SLAVE_RELEASED BIT(6) 73 74 #define SYNQUACER_HSSPI_RXE_FIFO_MORE_THAN_THRESHOLD BIT(5) 75 #define SYNQUACER_HSSPI_RXE_SLAVE_RELEASED BIT(6) 76 77 #define SYNQUACER_HSSPI_DMCFG_SSDC BIT(1) 78 #define SYNQUACER_HSSPI_DMCFG_MSTARTEN BIT(2) 79 80 #define SYNQUACER_HSSPI_DMSTART_START BIT(0) 81 #define SYNQUACER_HSSPI_DMSTOP_STOP BIT(8) 82 #define SYNQUACER_HSSPI_DMPSEL_CS_MASK 0x3 83 #define SYNQUACER_HSSPI_DMPSEL_CS_SHIFT 16U 84 #define SYNQUACER_HSSPI_DMTRP_BUS_WIDTH_SHIFT 24U 85 #define SYNQUACER_HSSPI_DMTRP_DATA_MASK 0x3 86 #define SYNQUACER_HSSPI_DMTRP_DATA_SHIFT 26U 87 #define SYNQUACER_HSSPI_DMTRP_DATA_TXRX 0 88 #define SYNQUACER_HSSPI_DMTRP_DATA_RX 1 89 #define SYNQUACER_HSSPI_DMTRP_DATA_TX 2 90 91 #define SYNQUACER_HSSPI_DMSTATUS_RX_DATA_MASK 0x1f 92 #define SYNQUACER_HSSPI_DMSTATUS_RX_DATA_SHIFT 8U 93 #define SYNQUACER_HSSPI_DMSTATUS_TX_DATA_MASK 0x1f 94 #define SYNQUACER_HSSPI_DMSTATUS_TX_DATA_SHIFT 16U 95 96 #define SYNQUACER_HSSPI_FIFOCFG_RX_THRESHOLD_MASK 0xf 97 #define SYNQUACER_HSSPI_FIFOCFG_RX_THRESHOLD_SHIFT 0U 98 #define SYNQUACER_HSSPI_FIFOCFG_TX_THRESHOLD_MASK 0xf 99 #define SYNQUACER_HSSPI_FIFOCFG_TX_THRESHOLD_SHIFT 4U 100 #define SYNQUACER_HSSPI_FIFOCFG_FIFO_WIDTH_MASK 0x3 101 #define SYNQUACER_HSSPI_FIFOCFG_FIFO_WIDTH_SHIFT 8U 102 #define SYNQUACER_HSSPI_FIFOCFG_RX_FLUSH BIT(11) 103 #define SYNQUACER_HSSPI_FIFOCFG_TX_FLUSH BIT(12) 104 105 #define SYNQUACER_HSSPI_FIFO_DEPTH 16U 106 #define SYNQUACER_HSSPI_FIFO_TX_THRESHOLD 4U 107 #define SYNQUACER_HSSPI_FIFO_RX_THRESHOLD \ 108 (SYNQUACER_HSSPI_FIFO_DEPTH - SYNQUACER_HSSPI_FIFO_TX_THRESHOLD) 109 110 #define SYNQUACER_HSSPI_TRANSFER_MODE_TX BIT(1) 111 #define SYNQUACER_HSSPI_TRANSFER_MODE_RX BIT(2) 112 #define SYNQUACER_HSSPI_TRANSFER_TMOUT_MSEC 2000U 113 #define SYNQUACER_HSSPI_ENABLE_TMOUT_MSEC 1000U 114 115 #define SYNQUACER_HSSPI_CLOCK_SRC_IHCLK 0 116 #define SYNQUACER_HSSPI_CLOCK_SRC_IPCLK 1 117 118 #define SYNQUACER_HSSPI_NUM_CHIP_SELECT 4U 119 #define SYNQUACER_HSSPI_IRQ_NAME_MAX 32U 120 121 struct synquacer_spi { 122 struct device *dev; 123 struct completion transfer_done; 124 unsigned int cs; 125 unsigned int bpw; 126 unsigned int mode; 127 unsigned int speed; 128 bool aces, rtm; 129 void *rx_buf; 130 const void *tx_buf; 131 struct clk *clk; 132 int clk_src_type; 133 void __iomem *regs; 134 u32 tx_words, rx_words; 135 unsigned int bus_width; 136 unsigned int transfer_mode; 137 char rx_irq_name[SYNQUACER_HSSPI_IRQ_NAME_MAX]; 138 char tx_irq_name[SYNQUACER_HSSPI_IRQ_NAME_MAX]; 139 }; 140 141 static int read_fifo(struct synquacer_spi *sspi) 142 { 143 u32 len = readl(sspi->regs + SYNQUACER_HSSPI_REG_DMSTATUS); 144 145 len = (len >> SYNQUACER_HSSPI_DMSTATUS_RX_DATA_SHIFT) & 146 SYNQUACER_HSSPI_DMSTATUS_RX_DATA_MASK; 147 len = min(len, sspi->rx_words); 148 149 switch (sspi->bpw) { 150 case 8: { 151 u8 *buf = sspi->rx_buf; 152 153 ioread8_rep(sspi->regs + SYNQUACER_HSSPI_REG_RX_FIFO, 154 buf, len); 155 sspi->rx_buf = buf + len; 156 break; 157 } 158 case 16: { 159 u16 *buf = sspi->rx_buf; 160 161 ioread16_rep(sspi->regs + SYNQUACER_HSSPI_REG_RX_FIFO, 162 buf, len); 163 sspi->rx_buf = buf + len; 164 break; 165 } 166 case 24: 167 /* fallthrough, should use 32-bits access */ 168 case 32: { 169 u32 *buf = sspi->rx_buf; 170 171 ioread32_rep(sspi->regs + SYNQUACER_HSSPI_REG_RX_FIFO, 172 buf, len); 173 sspi->rx_buf = buf + len; 174 break; 175 } 176 default: 177 return -EINVAL; 178 } 179 180 sspi->rx_words -= len; 181 return 0; 182 } 183 184 static int write_fifo(struct synquacer_spi *sspi) 185 { 186 u32 len = readl(sspi->regs + SYNQUACER_HSSPI_REG_DMSTATUS); 187 188 len = (len >> SYNQUACER_HSSPI_DMSTATUS_TX_DATA_SHIFT) & 189 SYNQUACER_HSSPI_DMSTATUS_TX_DATA_MASK; 190 len = min(SYNQUACER_HSSPI_FIFO_DEPTH - len, 191 sspi->tx_words); 192 193 switch (sspi->bpw) { 194 case 8: { 195 const u8 *buf = sspi->tx_buf; 196 197 iowrite8_rep(sspi->regs + SYNQUACER_HSSPI_REG_TX_FIFO, 198 buf, len); 199 sspi->tx_buf = buf + len; 200 break; 201 } 202 case 16: { 203 const u16 *buf = sspi->tx_buf; 204 205 iowrite16_rep(sspi->regs + SYNQUACER_HSSPI_REG_TX_FIFO, 206 buf, len); 207 sspi->tx_buf = buf + len; 208 break; 209 } 210 case 24: 211 /* fallthrough, should use 32-bits access */ 212 case 32: { 213 const u32 *buf = sspi->tx_buf; 214 215 iowrite32_rep(sspi->regs + SYNQUACER_HSSPI_REG_TX_FIFO, 216 buf, len); 217 sspi->tx_buf = buf + len; 218 break; 219 } 220 default: 221 return -EINVAL; 222 } 223 224 sspi->tx_words -= len; 225 return 0; 226 } 227 228 static int synquacer_spi_config(struct spi_controller *host, 229 struct spi_device *spi, 230 struct spi_transfer *xfer) 231 { 232 struct synquacer_spi *sspi = spi_controller_get_devdata(host); 233 unsigned int speed, mode, bpw, cs, bus_width, transfer_mode; 234 u32 rate, val, div; 235 236 /* Full Duplex only on 1-bit wide bus */ 237 if (xfer->rx_buf && xfer->tx_buf && 238 (xfer->rx_nbits != 1 || xfer->tx_nbits != 1)) { 239 dev_err(sspi->dev, 240 "RX and TX bus widths must be 1-bit for Full-Duplex!\n"); 241 return -EINVAL; 242 } 243 244 if (xfer->tx_buf) { 245 bus_width = xfer->tx_nbits; 246 transfer_mode = SYNQUACER_HSSPI_TRANSFER_MODE_TX; 247 } else { 248 bus_width = xfer->rx_nbits; 249 transfer_mode = SYNQUACER_HSSPI_TRANSFER_MODE_RX; 250 } 251 252 mode = spi->mode; 253 cs = spi_get_chipselect(spi, 0); 254 speed = xfer->speed_hz; 255 bpw = xfer->bits_per_word; 256 257 /* return if nothing to change */ 258 if (speed == sspi->speed && 259 bus_width == sspi->bus_width && bpw == sspi->bpw && 260 mode == sspi->mode && cs == sspi->cs && 261 transfer_mode == sspi->transfer_mode) { 262 return 0; 263 } 264 265 sspi->transfer_mode = transfer_mode; 266 rate = host->max_speed_hz; 267 268 div = DIV_ROUND_UP(rate, speed); 269 if (div > 254) { 270 dev_err(sspi->dev, "Requested rate too low (%u)\n", 271 sspi->speed); 272 return -EINVAL; 273 } 274 275 val = readl(sspi->regs + SYNQUACER_HSSPI_REG_PCC(cs)); 276 val &= ~SYNQUACER_HSSPI_PCC_SAFESYNC; 277 if (bpw == 8 && (mode & (SPI_TX_DUAL | SPI_RX_DUAL)) && div < 3) 278 val |= SYNQUACER_HSSPI_PCC_SAFESYNC; 279 if (bpw == 8 && (mode & (SPI_TX_QUAD | SPI_RX_QUAD)) && div < 6) 280 val |= SYNQUACER_HSSPI_PCC_SAFESYNC; 281 if (bpw == 16 && (mode & (SPI_TX_QUAD | SPI_RX_QUAD)) && div < 3) 282 val |= SYNQUACER_HSSPI_PCC_SAFESYNC; 283 284 if (mode & SPI_CPHA) 285 val |= SYNQUACER_HSSPI_PCC_CPHA; 286 else 287 val &= ~SYNQUACER_HSSPI_PCC_CPHA; 288 289 if (mode & SPI_CPOL) 290 val |= SYNQUACER_HSSPI_PCC_CPOL; 291 else 292 val &= ~SYNQUACER_HSSPI_PCC_CPOL; 293 294 if (mode & SPI_CS_HIGH) 295 val |= SYNQUACER_HSSPI_PCC_SSPOL; 296 else 297 val &= ~SYNQUACER_HSSPI_PCC_SSPOL; 298 299 if (mode & SPI_LSB_FIRST) 300 val |= SYNQUACER_HSSPI_PCC_SDIR; 301 else 302 val &= ~SYNQUACER_HSSPI_PCC_SDIR; 303 304 if (sspi->aces) 305 val |= SYNQUACER_HSSPI_PCC_ACES; 306 else 307 val &= ~SYNQUACER_HSSPI_PCC_ACES; 308 309 if (sspi->rtm) 310 val |= SYNQUACER_HSSPI_PCC_RTM; 311 else 312 val &= ~SYNQUACER_HSSPI_PCC_RTM; 313 314 val |= (3 << SYNQUACER_HSSPI_PCC_SS2CD_SHIFT); 315 val |= SYNQUACER_HSSPI_PCC_SENDIAN; 316 317 val &= ~(SYNQUACER_HSSPI_PCC_CDRS_MASK << 318 SYNQUACER_HSSPI_PCC_CDRS_SHIFT); 319 val |= ((div >> 1) << SYNQUACER_HSSPI_PCC_CDRS_SHIFT); 320 321 writel(val, sspi->regs + SYNQUACER_HSSPI_REG_PCC(cs)); 322 323 val = readl(sspi->regs + SYNQUACER_HSSPI_REG_FIFOCFG); 324 val &= ~(SYNQUACER_HSSPI_FIFOCFG_FIFO_WIDTH_MASK << 325 SYNQUACER_HSSPI_FIFOCFG_FIFO_WIDTH_SHIFT); 326 val |= ((bpw / 8 - 1) << SYNQUACER_HSSPI_FIFOCFG_FIFO_WIDTH_SHIFT); 327 writel(val, sspi->regs + SYNQUACER_HSSPI_REG_FIFOCFG); 328 329 val = readl(sspi->regs + SYNQUACER_HSSPI_REG_DMSTART); 330 val &= ~(SYNQUACER_HSSPI_DMTRP_DATA_MASK << 331 SYNQUACER_HSSPI_DMTRP_DATA_SHIFT); 332 333 if (xfer->rx_buf) 334 val |= (SYNQUACER_HSSPI_DMTRP_DATA_RX << 335 SYNQUACER_HSSPI_DMTRP_DATA_SHIFT); 336 else 337 val |= (SYNQUACER_HSSPI_DMTRP_DATA_TX << 338 SYNQUACER_HSSPI_DMTRP_DATA_SHIFT); 339 340 val &= ~(3 << SYNQUACER_HSSPI_DMTRP_BUS_WIDTH_SHIFT); 341 val |= ((bus_width >> 1) << SYNQUACER_HSSPI_DMTRP_BUS_WIDTH_SHIFT); 342 writel(val, sspi->regs + SYNQUACER_HSSPI_REG_DMSTART); 343 344 sspi->bpw = bpw; 345 sspi->mode = mode; 346 sspi->speed = speed; 347 sspi->cs = spi_get_chipselect(spi, 0); 348 sspi->bus_width = bus_width; 349 350 return 0; 351 } 352 353 static int synquacer_spi_transfer_one(struct spi_controller *host, 354 struct spi_device *spi, 355 struct spi_transfer *xfer) 356 { 357 struct synquacer_spi *sspi = spi_controller_get_devdata(host); 358 int ret; 359 int status = 0; 360 u32 words; 361 u8 bpw; 362 u32 val; 363 364 val = readl(sspi->regs + SYNQUACER_HSSPI_REG_DMSTART); 365 val &= ~SYNQUACER_HSSPI_DMSTOP_STOP; 366 writel(val, sspi->regs + SYNQUACER_HSSPI_REG_DMSTART); 367 368 val = readl(sspi->regs + SYNQUACER_HSSPI_REG_FIFOCFG); 369 val |= SYNQUACER_HSSPI_FIFOCFG_RX_FLUSH; 370 val |= SYNQUACER_HSSPI_FIFOCFG_TX_FLUSH; 371 writel(val, sspi->regs + SYNQUACER_HSSPI_REG_FIFOCFG); 372 373 /* 374 * See if we can transfer 4-bytes as 1 word 375 * to maximize the FIFO buffer efficiency. 376 */ 377 bpw = xfer->bits_per_word; 378 if (bpw == 8 && !(xfer->len % 4) && !(spi->mode & SPI_LSB_FIRST)) 379 xfer->bits_per_word = 32; 380 381 ret = synquacer_spi_config(host, spi, xfer); 382 383 /* restore */ 384 xfer->bits_per_word = bpw; 385 386 if (ret) 387 return ret; 388 389 reinit_completion(&sspi->transfer_done); 390 391 sspi->tx_buf = xfer->tx_buf; 392 sspi->rx_buf = xfer->rx_buf; 393 394 switch (sspi->bpw) { 395 case 8: 396 words = xfer->len; 397 break; 398 case 16: 399 words = xfer->len / 2; 400 break; 401 case 24: 402 /* fallthrough, should use 32-bits access */ 403 case 32: 404 words = xfer->len / 4; 405 break; 406 default: 407 dev_err(sspi->dev, "unsupported bpw: %d\n", sspi->bpw); 408 return -EINVAL; 409 } 410 411 if (xfer->tx_buf) 412 sspi->tx_words = words; 413 else 414 sspi->tx_words = 0; 415 416 if (xfer->rx_buf) 417 sspi->rx_words = words; 418 else 419 sspi->rx_words = 0; 420 421 if (xfer->tx_buf) { 422 status = write_fifo(sspi); 423 if (status < 0) { 424 dev_err(sspi->dev, "failed write_fifo. status: 0x%x\n", 425 status); 426 return status; 427 } 428 } 429 430 if (xfer->rx_buf) { 431 val = readl(sspi->regs + SYNQUACER_HSSPI_REG_FIFOCFG); 432 val &= ~(SYNQUACER_HSSPI_FIFOCFG_RX_THRESHOLD_MASK << 433 SYNQUACER_HSSPI_FIFOCFG_RX_THRESHOLD_SHIFT); 434 val |= ((sspi->rx_words > SYNQUACER_HSSPI_FIFO_DEPTH ? 435 SYNQUACER_HSSPI_FIFO_RX_THRESHOLD : sspi->rx_words) << 436 SYNQUACER_HSSPI_FIFOCFG_RX_THRESHOLD_SHIFT); 437 writel(val, sspi->regs + SYNQUACER_HSSPI_REG_FIFOCFG); 438 } 439 440 writel(~0, sspi->regs + SYNQUACER_HSSPI_REG_TXC); 441 writel(~0, sspi->regs + SYNQUACER_HSSPI_REG_RXC); 442 443 /* Trigger */ 444 val = readl(sspi->regs + SYNQUACER_HSSPI_REG_DMSTART); 445 val |= SYNQUACER_HSSPI_DMSTART_START; 446 writel(val, sspi->regs + SYNQUACER_HSSPI_REG_DMSTART); 447 448 if (xfer->tx_buf) { 449 val = SYNQUACER_HSSPI_TXE_FIFO_EMPTY; 450 writel(val, sspi->regs + SYNQUACER_HSSPI_REG_TXE); 451 status = wait_for_completion_timeout(&sspi->transfer_done, 452 msecs_to_jiffies(SYNQUACER_HSSPI_TRANSFER_TMOUT_MSEC)); 453 writel(0, sspi->regs + SYNQUACER_HSSPI_REG_TXE); 454 } 455 456 if (xfer->rx_buf) { 457 u32 buf[SYNQUACER_HSSPI_FIFO_DEPTH]; 458 459 val = SYNQUACER_HSSPI_RXE_FIFO_MORE_THAN_THRESHOLD | 460 SYNQUACER_HSSPI_RXE_SLAVE_RELEASED; 461 writel(val, sspi->regs + SYNQUACER_HSSPI_REG_RXE); 462 status = wait_for_completion_timeout(&sspi->transfer_done, 463 msecs_to_jiffies(SYNQUACER_HSSPI_TRANSFER_TMOUT_MSEC)); 464 writel(0, sspi->regs + SYNQUACER_HSSPI_REG_RXE); 465 466 /* stop RX and clean RXFIFO */ 467 val = readl(sspi->regs + SYNQUACER_HSSPI_REG_DMSTART); 468 val |= SYNQUACER_HSSPI_DMSTOP_STOP; 469 writel(val, sspi->regs + SYNQUACER_HSSPI_REG_DMSTART); 470 sspi->rx_buf = buf; 471 sspi->rx_words = SYNQUACER_HSSPI_FIFO_DEPTH; 472 read_fifo(sspi); 473 } 474 475 if (status == 0) { 476 dev_err(sspi->dev, "failed to transfer. Timeout.\n"); 477 return -ETIMEDOUT; 478 } 479 480 return 0; 481 } 482 483 static void synquacer_spi_set_cs(struct spi_device *spi, bool enable) 484 { 485 struct synquacer_spi *sspi = spi_controller_get_devdata(spi->controller); 486 u32 val; 487 488 val = readl(sspi->regs + SYNQUACER_HSSPI_REG_DMSTART); 489 val &= ~(SYNQUACER_HSSPI_DMPSEL_CS_MASK << 490 SYNQUACER_HSSPI_DMPSEL_CS_SHIFT); 491 val |= spi_get_chipselect(spi, 0) << SYNQUACER_HSSPI_DMPSEL_CS_SHIFT; 492 493 if (!enable) 494 val |= SYNQUACER_HSSPI_DMSTOP_STOP; 495 496 writel(val, sspi->regs + SYNQUACER_HSSPI_REG_DMSTART); 497 } 498 499 static int synquacer_spi_wait_status_update(struct synquacer_spi *sspi, 500 bool enable) 501 { 502 u32 val; 503 unsigned long timeout = jiffies + 504 msecs_to_jiffies(SYNQUACER_HSSPI_ENABLE_TMOUT_MSEC); 505 506 /* wait MES(Module Enable Status) is updated */ 507 do { 508 val = readl(sspi->regs + SYNQUACER_HSSPI_REG_MCTRL) & 509 SYNQUACER_HSSPI_MCTRL_MES; 510 if (enable && val) 511 return 0; 512 if (!enable && !val) 513 return 0; 514 } while (time_before(jiffies, timeout)); 515 516 dev_err(sspi->dev, "timeout occurs in updating Module Enable Status\n"); 517 return -EBUSY; 518 } 519 520 static int synquacer_spi_enable(struct spi_controller *host) 521 { 522 u32 val; 523 int status; 524 struct synquacer_spi *sspi = spi_controller_get_devdata(host); 525 526 /* Disable module */ 527 writel(0, sspi->regs + SYNQUACER_HSSPI_REG_MCTRL); 528 status = synquacer_spi_wait_status_update(sspi, false); 529 if (status < 0) 530 return status; 531 532 writel(0, sspi->regs + SYNQUACER_HSSPI_REG_TXE); 533 writel(0, sspi->regs + SYNQUACER_HSSPI_REG_RXE); 534 writel(~0, sspi->regs + SYNQUACER_HSSPI_REG_TXC); 535 writel(~0, sspi->regs + SYNQUACER_HSSPI_REG_RXC); 536 writel(~0, sspi->regs + SYNQUACER_HSSPI_REG_FAULTC); 537 538 val = readl(sspi->regs + SYNQUACER_HSSPI_REG_DMCFG); 539 val &= ~SYNQUACER_HSSPI_DMCFG_SSDC; 540 val &= ~SYNQUACER_HSSPI_DMCFG_MSTARTEN; 541 writel(val, sspi->regs + SYNQUACER_HSSPI_REG_DMCFG); 542 543 val = readl(sspi->regs + SYNQUACER_HSSPI_REG_MCTRL); 544 if (sspi->clk_src_type == SYNQUACER_HSSPI_CLOCK_SRC_IPCLK) 545 val |= SYNQUACER_HSSPI_MCTRL_CDSS; 546 else 547 val &= ~SYNQUACER_HSSPI_MCTRL_CDSS; 548 549 val &= ~SYNQUACER_HSSPI_MCTRL_COMMAND_SEQUENCE_EN; 550 val |= SYNQUACER_HSSPI_MCTRL_MEN; 551 val |= SYNQUACER_HSSPI_MCTRL_SYNCON; 552 553 /* Enable module */ 554 writel(val, sspi->regs + SYNQUACER_HSSPI_REG_MCTRL); 555 status = synquacer_spi_wait_status_update(sspi, true); 556 if (status < 0) 557 return status; 558 559 return 0; 560 } 561 562 static irqreturn_t sq_spi_rx_handler(int irq, void *priv) 563 { 564 uint32_t val; 565 struct synquacer_spi *sspi = priv; 566 567 val = readl(sspi->regs + SYNQUACER_HSSPI_REG_RXF); 568 if ((val & SYNQUACER_HSSPI_RXF_SLAVE_RELEASED) || 569 (val & SYNQUACER_HSSPI_RXF_FIFO_MORE_THAN_THRESHOLD)) { 570 read_fifo(sspi); 571 572 if (sspi->rx_words == 0) { 573 writel(0, sspi->regs + SYNQUACER_HSSPI_REG_RXE); 574 complete(&sspi->transfer_done); 575 } 576 return IRQ_HANDLED; 577 } 578 579 return IRQ_NONE; 580 } 581 582 static irqreturn_t sq_spi_tx_handler(int irq, void *priv) 583 { 584 uint32_t val; 585 struct synquacer_spi *sspi = priv; 586 587 val = readl(sspi->regs + SYNQUACER_HSSPI_REG_TXF); 588 if (val & SYNQUACER_HSSPI_TXF_FIFO_EMPTY) { 589 if (sspi->tx_words == 0) { 590 writel(0, sspi->regs + SYNQUACER_HSSPI_REG_TXE); 591 complete(&sspi->transfer_done); 592 } else { 593 write_fifo(sspi); 594 } 595 return IRQ_HANDLED; 596 } 597 598 return IRQ_NONE; 599 } 600 601 static int synquacer_spi_probe(struct platform_device *pdev) 602 { 603 struct spi_controller *host; 604 struct synquacer_spi *sspi; 605 int ret; 606 int rx_irq, tx_irq; 607 608 host = spi_alloc_host(&pdev->dev, sizeof(*sspi)); 609 if (!host) 610 return -ENOMEM; 611 612 platform_set_drvdata(pdev, host); 613 614 sspi = spi_controller_get_devdata(host); 615 sspi->dev = &pdev->dev; 616 617 init_completion(&sspi->transfer_done); 618 619 sspi->regs = devm_platform_ioremap_resource(pdev, 0); 620 if (IS_ERR(sspi->regs)) { 621 ret = PTR_ERR(sspi->regs); 622 goto put_spi; 623 } 624 625 sspi->clk_src_type = SYNQUACER_HSSPI_CLOCK_SRC_IHCLK; /* Default */ 626 device_property_read_u32(&pdev->dev, "socionext,ihclk-rate", 627 &host->max_speed_hz); /* for ACPI */ 628 629 if (dev_of_node(&pdev->dev)) { 630 if (device_property_match_string(&pdev->dev, 631 "clock-names", "iHCLK") >= 0) { 632 sspi->clk_src_type = SYNQUACER_HSSPI_CLOCK_SRC_IHCLK; 633 sspi->clk = devm_clk_get(sspi->dev, "iHCLK"); 634 } else if (device_property_match_string(&pdev->dev, 635 "clock-names", "iPCLK") >= 0) { 636 sspi->clk_src_type = SYNQUACER_HSSPI_CLOCK_SRC_IPCLK; 637 sspi->clk = devm_clk_get(sspi->dev, "iPCLK"); 638 } else { 639 dev_err(&pdev->dev, "specified wrong clock source\n"); 640 ret = -EINVAL; 641 goto put_spi; 642 } 643 644 if (IS_ERR(sspi->clk)) { 645 ret = dev_err_probe(&pdev->dev, PTR_ERR(sspi->clk), 646 "clock not found\n"); 647 goto put_spi; 648 } 649 650 ret = clk_prepare_enable(sspi->clk); 651 if (ret) { 652 dev_err(&pdev->dev, "failed to enable clock (%d)\n", 653 ret); 654 goto put_spi; 655 } 656 657 host->max_speed_hz = clk_get_rate(sspi->clk); 658 } 659 660 if (!host->max_speed_hz) { 661 dev_err(&pdev->dev, "missing clock source\n"); 662 ret = -EINVAL; 663 goto disable_clk; 664 } 665 host->min_speed_hz = host->max_speed_hz / 254; 666 667 sspi->aces = device_property_read_bool(&pdev->dev, 668 "socionext,set-aces"); 669 sspi->rtm = device_property_read_bool(&pdev->dev, "socionext,use-rtm"); 670 671 host->num_chipselect = SYNQUACER_HSSPI_NUM_CHIP_SELECT; 672 673 rx_irq = platform_get_irq(pdev, 0); 674 if (rx_irq <= 0) { 675 ret = rx_irq; 676 goto disable_clk; 677 } 678 snprintf(sspi->rx_irq_name, SYNQUACER_HSSPI_IRQ_NAME_MAX, "%s-rx", 679 dev_name(&pdev->dev)); 680 ret = devm_request_irq(&pdev->dev, rx_irq, sq_spi_rx_handler, 681 0, sspi->rx_irq_name, sspi); 682 if (ret) { 683 dev_err(&pdev->dev, "request rx_irq failed (%d)\n", ret); 684 goto disable_clk; 685 } 686 687 tx_irq = platform_get_irq(pdev, 1); 688 if (tx_irq <= 0) { 689 ret = tx_irq; 690 goto disable_clk; 691 } 692 snprintf(sspi->tx_irq_name, SYNQUACER_HSSPI_IRQ_NAME_MAX, "%s-tx", 693 dev_name(&pdev->dev)); 694 ret = devm_request_irq(&pdev->dev, tx_irq, sq_spi_tx_handler, 695 0, sspi->tx_irq_name, sspi); 696 if (ret) { 697 dev_err(&pdev->dev, "request tx_irq failed (%d)\n", ret); 698 goto disable_clk; 699 } 700 701 host->auto_runtime_pm = true; 702 host->bus_num = pdev->id; 703 704 host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_TX_DUAL | SPI_RX_DUAL | 705 SPI_TX_QUAD | SPI_RX_QUAD; 706 host->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(24) | 707 SPI_BPW_MASK(16) | SPI_BPW_MASK(8); 708 709 host->set_cs = synquacer_spi_set_cs; 710 host->transfer_one = synquacer_spi_transfer_one; 711 712 ret = synquacer_spi_enable(host); 713 if (ret) 714 goto disable_clk; 715 716 pm_runtime_set_active(sspi->dev); 717 pm_runtime_enable(sspi->dev); 718 719 ret = devm_spi_register_controller(sspi->dev, host); 720 if (ret) 721 goto disable_pm; 722 723 return 0; 724 725 disable_pm: 726 pm_runtime_disable(sspi->dev); 727 disable_clk: 728 clk_disable_unprepare(sspi->clk); 729 put_spi: 730 spi_controller_put(host); 731 732 return ret; 733 } 734 735 static void synquacer_spi_remove(struct platform_device *pdev) 736 { 737 struct spi_controller *host = platform_get_drvdata(pdev); 738 struct synquacer_spi *sspi = spi_controller_get_devdata(host); 739 740 pm_runtime_disable(sspi->dev); 741 742 clk_disable_unprepare(sspi->clk); 743 } 744 745 static int __maybe_unused synquacer_spi_suspend(struct device *dev) 746 { 747 struct spi_controller *host = dev_get_drvdata(dev); 748 struct synquacer_spi *sspi = spi_controller_get_devdata(host); 749 int ret; 750 751 ret = spi_controller_suspend(host); 752 if (ret) 753 return ret; 754 755 if (!pm_runtime_suspended(dev)) 756 clk_disable_unprepare(sspi->clk); 757 758 return ret; 759 } 760 761 static int __maybe_unused synquacer_spi_resume(struct device *dev) 762 { 763 struct spi_controller *host = dev_get_drvdata(dev); 764 struct synquacer_spi *sspi = spi_controller_get_devdata(host); 765 int ret; 766 767 if (!pm_runtime_suspended(dev)) { 768 /* Ensure reconfigure during next xfer */ 769 sspi->speed = 0; 770 771 ret = clk_prepare_enable(sspi->clk); 772 if (ret < 0) { 773 dev_err(dev, "failed to enable clk (%d)\n", 774 ret); 775 return ret; 776 } 777 778 ret = synquacer_spi_enable(host); 779 if (ret) { 780 clk_disable_unprepare(sspi->clk); 781 dev_err(dev, "failed to enable spi (%d)\n", ret); 782 return ret; 783 } 784 } 785 786 ret = spi_controller_resume(host); 787 if (ret < 0) 788 clk_disable_unprepare(sspi->clk); 789 790 return ret; 791 } 792 793 static SIMPLE_DEV_PM_OPS(synquacer_spi_pm_ops, synquacer_spi_suspend, 794 synquacer_spi_resume); 795 796 static const struct of_device_id synquacer_spi_of_match[] = { 797 {.compatible = "socionext,synquacer-spi"}, 798 {} 799 }; 800 MODULE_DEVICE_TABLE(of, synquacer_spi_of_match); 801 802 #ifdef CONFIG_ACPI 803 static const struct acpi_device_id synquacer_hsspi_acpi_ids[] = { 804 { "SCX0004" }, 805 { /* sentinel */ } 806 }; 807 MODULE_DEVICE_TABLE(acpi, synquacer_hsspi_acpi_ids); 808 #endif 809 810 static struct platform_driver synquacer_spi_driver = { 811 .driver = { 812 .name = "synquacer-spi", 813 .pm = &synquacer_spi_pm_ops, 814 .of_match_table = synquacer_spi_of_match, 815 .acpi_match_table = ACPI_PTR(synquacer_hsspi_acpi_ids), 816 }, 817 .probe = synquacer_spi_probe, 818 .remove = synquacer_spi_remove, 819 }; 820 module_platform_driver(synquacer_spi_driver); 821 822 MODULE_DESCRIPTION("Socionext Synquacer HS-SPI controller driver"); 823 MODULE_AUTHOR("Masahisa Kojima <masahisa.kojima@linaro.org>"); 824 MODULE_AUTHOR("Jassi Brar <jaswinder.singh@linaro.org>"); 825 MODULE_LICENSE("GPL v2"); 826