1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2011-2015 Daniel Schwierzeck <daniel.schwierzeck@gmail.com> 4 * Copyright (C) 2016 Hauke Mehrtens <hauke@hauke-m.de> 5 */ 6 7 #include <linux/kernel.h> 8 #include <linux/module.h> 9 #include <linux/of.h> 10 #include <linux/platform_device.h> 11 #include <linux/clk.h> 12 #include <linux/io.h> 13 #include <linux/delay.h> 14 #include <linux/interrupt.h> 15 #include <linux/sched.h> 16 #include <linux/completion.h> 17 #include <linux/spinlock.h> 18 #include <linux/err.h> 19 #include <linux/pm_runtime.h> 20 #include <linux/spi/spi.h> 21 22 #ifdef CONFIG_LANTIQ 23 #include <lantiq_soc.h> 24 #endif 25 26 #define LTQ_SPI_RX_IRQ_NAME "spi_rx" 27 #define LTQ_SPI_TX_IRQ_NAME "spi_tx" 28 #define LTQ_SPI_ERR_IRQ_NAME "spi_err" 29 #define LTQ_SPI_FRM_IRQ_NAME "spi_frm" 30 31 #define LTQ_SPI_CLC 0x00 32 #define LTQ_SPI_PISEL 0x04 33 #define LTQ_SPI_ID 0x08 34 #define LTQ_SPI_CON 0x10 35 #define LTQ_SPI_STAT 0x14 36 #define LTQ_SPI_WHBSTATE 0x18 37 #define LTQ_SPI_TB 0x20 38 #define LTQ_SPI_RB 0x24 39 #define LTQ_SPI_RXFCON 0x30 40 #define LTQ_SPI_TXFCON 0x34 41 #define LTQ_SPI_FSTAT 0x38 42 #define LTQ_SPI_BRT 0x40 43 #define LTQ_SPI_BRSTAT 0x44 44 #define LTQ_SPI_SFCON 0x60 45 #define LTQ_SPI_SFSTAT 0x64 46 #define LTQ_SPI_GPOCON 0x70 47 #define LTQ_SPI_GPOSTAT 0x74 48 #define LTQ_SPI_FPGO 0x78 49 #define LTQ_SPI_RXREQ 0x80 50 #define LTQ_SPI_RXCNT 0x84 51 #define LTQ_SPI_DMACON 0xec 52 #define LTQ_SPI_IRNEN 0xf4 53 54 #define LTQ_SPI_CLC_SMC_S 16 /* Clock divider for sleep mode */ 55 #define LTQ_SPI_CLC_SMC_M (0xFF << LTQ_SPI_CLC_SMC_S) 56 #define LTQ_SPI_CLC_RMC_S 8 /* Clock divider for normal run mode */ 57 #define LTQ_SPI_CLC_RMC_M (0xFF << LTQ_SPI_CLC_RMC_S) 58 #define LTQ_SPI_CLC_DISS BIT(1) /* Disable status bit */ 59 #define LTQ_SPI_CLC_DISR BIT(0) /* Disable request bit */ 60 61 #define LTQ_SPI_ID_TXFS_S 24 /* Implemented TX FIFO size */ 62 #define LTQ_SPI_ID_RXFS_S 16 /* Implemented RX FIFO size */ 63 #define LTQ_SPI_ID_MOD_S 8 /* Module ID */ 64 #define LTQ_SPI_ID_MOD_M (0xff << LTQ_SPI_ID_MOD_S) 65 #define LTQ_SPI_ID_CFG_S 5 /* DMA interface support */ 66 #define LTQ_SPI_ID_CFG_M (1 << LTQ_SPI_ID_CFG_S) 67 #define LTQ_SPI_ID_REV_M 0x1F /* Hardware revision number */ 68 69 #define LTQ_SPI_CON_BM_S 16 /* Data width selection */ 70 #define LTQ_SPI_CON_BM_M (0x1F << LTQ_SPI_CON_BM_S) 71 #define LTQ_SPI_CON_EM BIT(24) /* Echo mode */ 72 #define LTQ_SPI_CON_IDLE BIT(23) /* Idle bit value */ 73 #define LTQ_SPI_CON_ENBV BIT(22) /* Enable byte valid control */ 74 #define LTQ_SPI_CON_RUEN BIT(12) /* Receive underflow error enable */ 75 #define LTQ_SPI_CON_TUEN BIT(11) /* Transmit underflow error enable */ 76 #define LTQ_SPI_CON_AEN BIT(10) /* Abort error enable */ 77 #define LTQ_SPI_CON_REN BIT(9) /* Receive overflow error enable */ 78 #define LTQ_SPI_CON_TEN BIT(8) /* Transmit overflow error enable */ 79 #define LTQ_SPI_CON_LB BIT(7) /* Loopback control */ 80 #define LTQ_SPI_CON_PO BIT(6) /* Clock polarity control */ 81 #define LTQ_SPI_CON_PH BIT(5) /* Clock phase control */ 82 #define LTQ_SPI_CON_HB BIT(4) /* Heading control */ 83 #define LTQ_SPI_CON_RXOFF BIT(1) /* Switch receiver off */ 84 #define LTQ_SPI_CON_TXOFF BIT(0) /* Switch transmitter off */ 85 86 #define LTQ_SPI_STAT_RXBV_S 28 87 #define LTQ_SPI_STAT_RXBV_M (0x7 << LTQ_SPI_STAT_RXBV_S) 88 #define LTQ_SPI_STAT_BSY BIT(13) /* Busy flag */ 89 #define LTQ_SPI_STAT_RUE BIT(12) /* Receive underflow error flag */ 90 #define LTQ_SPI_STAT_TUE BIT(11) /* Transmit underflow error flag */ 91 #define LTQ_SPI_STAT_AE BIT(10) /* Abort error flag */ 92 #define LTQ_SPI_STAT_RE BIT(9) /* Receive error flag */ 93 #define LTQ_SPI_STAT_TE BIT(8) /* Transmit error flag */ 94 #define LTQ_SPI_STAT_ME BIT(7) /* Mode error flag */ 95 #define LTQ_SPI_STAT_MS BIT(1) /* Host/target select bit */ 96 #define LTQ_SPI_STAT_EN BIT(0) /* Enable bit */ 97 #define LTQ_SPI_STAT_ERRORS (LTQ_SPI_STAT_ME | LTQ_SPI_STAT_TE | \ 98 LTQ_SPI_STAT_RE | LTQ_SPI_STAT_AE | \ 99 LTQ_SPI_STAT_TUE | LTQ_SPI_STAT_RUE) 100 101 #define LTQ_SPI_WHBSTATE_SETTUE BIT(15) /* Set transmit underflow error flag */ 102 #define LTQ_SPI_WHBSTATE_SETAE BIT(14) /* Set abort error flag */ 103 #define LTQ_SPI_WHBSTATE_SETRE BIT(13) /* Set receive error flag */ 104 #define LTQ_SPI_WHBSTATE_SETTE BIT(12) /* Set transmit error flag */ 105 #define LTQ_SPI_WHBSTATE_CLRTUE BIT(11) /* Clear transmit underflow error flag */ 106 #define LTQ_SPI_WHBSTATE_CLRAE BIT(10) /* Clear abort error flag */ 107 #define LTQ_SPI_WHBSTATE_CLRRE BIT(9) /* Clear receive error flag */ 108 #define LTQ_SPI_WHBSTATE_CLRTE BIT(8) /* Clear transmit error flag */ 109 #define LTQ_SPI_WHBSTATE_SETME BIT(7) /* Set mode error flag */ 110 #define LTQ_SPI_WHBSTATE_CLRME BIT(6) /* Clear mode error flag */ 111 #define LTQ_SPI_WHBSTATE_SETRUE BIT(5) /* Set receive underflow error flag */ 112 #define LTQ_SPI_WHBSTATE_CLRRUE BIT(4) /* Clear receive underflow error flag */ 113 #define LTQ_SPI_WHBSTATE_SETMS BIT(3) /* Set host select bit */ 114 #define LTQ_SPI_WHBSTATE_CLRMS BIT(2) /* Clear host select bit */ 115 #define LTQ_SPI_WHBSTATE_SETEN BIT(1) /* Set enable bit (operational mode) */ 116 #define LTQ_SPI_WHBSTATE_CLREN BIT(0) /* Clear enable bit (config mode */ 117 #define LTQ_SPI_WHBSTATE_CLR_ERRORS (LTQ_SPI_WHBSTATE_CLRRUE | \ 118 LTQ_SPI_WHBSTATE_CLRME | \ 119 LTQ_SPI_WHBSTATE_CLRTE | \ 120 LTQ_SPI_WHBSTATE_CLRRE | \ 121 LTQ_SPI_WHBSTATE_CLRAE | \ 122 LTQ_SPI_WHBSTATE_CLRTUE) 123 124 #define LTQ_SPI_RXFCON_RXFITL_S 8 /* FIFO interrupt trigger level */ 125 #define LTQ_SPI_RXFCON_RXFLU BIT(1) /* FIFO flush */ 126 #define LTQ_SPI_RXFCON_RXFEN BIT(0) /* FIFO enable */ 127 128 #define LTQ_SPI_TXFCON_TXFITL_S 8 /* FIFO interrupt trigger level */ 129 #define LTQ_SPI_TXFCON_TXFLU BIT(1) /* FIFO flush */ 130 #define LTQ_SPI_TXFCON_TXFEN BIT(0) /* FIFO enable */ 131 132 #define LTQ_SPI_FSTAT_RXFFL_S 0 133 #define LTQ_SPI_FSTAT_TXFFL_S 8 134 135 #define LTQ_SPI_GPOCON_ISCSBN_S 8 136 #define LTQ_SPI_GPOCON_INVOUTN_S 0 137 138 #define LTQ_SPI_FGPO_SETOUTN_S 8 139 #define LTQ_SPI_FGPO_CLROUTN_S 0 140 141 #define LTQ_SPI_RXREQ_RXCNT_M 0xFFFF /* Receive count value */ 142 #define LTQ_SPI_RXCNT_TODO_M 0xFFFF /* Receive to-do value */ 143 144 #define LTQ_SPI_IRNEN_TFI BIT(4) /* TX finished interrupt */ 145 #define LTQ_SPI_IRNEN_F BIT(3) /* Frame end interrupt request */ 146 #define LTQ_SPI_IRNEN_E BIT(2) /* Error end interrupt request */ 147 #define LTQ_SPI_IRNEN_T_XWAY BIT(1) /* Transmit end interrupt request */ 148 #define LTQ_SPI_IRNEN_R_XWAY BIT(0) /* Receive end interrupt request */ 149 #define LTQ_SPI_IRNEN_R_XRX BIT(1) /* Transmit end interrupt request */ 150 #define LTQ_SPI_IRNEN_T_XRX BIT(0) /* Receive end interrupt request */ 151 #define LTQ_SPI_IRNEN_ALL 0x1F 152 153 struct lantiq_ssc_spi; 154 155 struct lantiq_ssc_hwcfg { 156 int (*cfg_irq)(struct platform_device *pdev, struct lantiq_ssc_spi *spi); 157 unsigned int irnen_r; 158 unsigned int irnen_t; 159 unsigned int irncr; 160 unsigned int irnicr; 161 bool irq_ack; 162 u32 fifo_size_mask; 163 }; 164 165 struct lantiq_ssc_spi { 166 struct spi_controller *host; 167 struct device *dev; 168 void __iomem *regbase; 169 struct clk *spi_clk; 170 struct clk *fpi_clk; 171 const struct lantiq_ssc_hwcfg *hwcfg; 172 173 spinlock_t lock; 174 struct workqueue_struct *wq; 175 struct work_struct work; 176 177 const u8 *tx; 178 u8 *rx; 179 unsigned int tx_todo; 180 unsigned int rx_todo; 181 unsigned int bits_per_word; 182 unsigned int speed_hz; 183 unsigned int tx_fifo_size; 184 unsigned int rx_fifo_size; 185 unsigned int base_cs; 186 unsigned int fdx_tx_level; 187 }; 188 189 static u32 lantiq_ssc_readl(const struct lantiq_ssc_spi *spi, u32 reg) 190 { 191 return __raw_readl(spi->regbase + reg); 192 } 193 194 static void lantiq_ssc_writel(const struct lantiq_ssc_spi *spi, u32 val, 195 u32 reg) 196 { 197 __raw_writel(val, spi->regbase + reg); 198 } 199 200 static void lantiq_ssc_maskl(const struct lantiq_ssc_spi *spi, u32 clr, 201 u32 set, u32 reg) 202 { 203 u32 val = __raw_readl(spi->regbase + reg); 204 205 val &= ~clr; 206 val |= set; 207 __raw_writel(val, spi->regbase + reg); 208 } 209 210 static unsigned int tx_fifo_level(const struct lantiq_ssc_spi *spi) 211 { 212 const struct lantiq_ssc_hwcfg *hwcfg = spi->hwcfg; 213 u32 fstat = lantiq_ssc_readl(spi, LTQ_SPI_FSTAT); 214 215 return (fstat >> LTQ_SPI_FSTAT_TXFFL_S) & hwcfg->fifo_size_mask; 216 } 217 218 static unsigned int rx_fifo_level(const struct lantiq_ssc_spi *spi) 219 { 220 const struct lantiq_ssc_hwcfg *hwcfg = spi->hwcfg; 221 u32 fstat = lantiq_ssc_readl(spi, LTQ_SPI_FSTAT); 222 223 return (fstat >> LTQ_SPI_FSTAT_RXFFL_S) & hwcfg->fifo_size_mask; 224 } 225 226 static unsigned int tx_fifo_free(const struct lantiq_ssc_spi *spi) 227 { 228 return spi->tx_fifo_size - tx_fifo_level(spi); 229 } 230 231 static void rx_fifo_reset(const struct lantiq_ssc_spi *spi) 232 { 233 u32 val = spi->rx_fifo_size << LTQ_SPI_RXFCON_RXFITL_S; 234 235 val |= LTQ_SPI_RXFCON_RXFEN | LTQ_SPI_RXFCON_RXFLU; 236 lantiq_ssc_writel(spi, val, LTQ_SPI_RXFCON); 237 } 238 239 static void tx_fifo_reset(const struct lantiq_ssc_spi *spi) 240 { 241 u32 val = 1 << LTQ_SPI_TXFCON_TXFITL_S; 242 243 val |= LTQ_SPI_TXFCON_TXFEN | LTQ_SPI_TXFCON_TXFLU; 244 lantiq_ssc_writel(spi, val, LTQ_SPI_TXFCON); 245 } 246 247 static void rx_fifo_flush(const struct lantiq_ssc_spi *spi) 248 { 249 lantiq_ssc_maskl(spi, 0, LTQ_SPI_RXFCON_RXFLU, LTQ_SPI_RXFCON); 250 } 251 252 static void tx_fifo_flush(const struct lantiq_ssc_spi *spi) 253 { 254 lantiq_ssc_maskl(spi, 0, LTQ_SPI_TXFCON_TXFLU, LTQ_SPI_TXFCON); 255 } 256 257 static void hw_enter_config_mode(const struct lantiq_ssc_spi *spi) 258 { 259 lantiq_ssc_writel(spi, LTQ_SPI_WHBSTATE_CLREN, LTQ_SPI_WHBSTATE); 260 } 261 262 static void hw_enter_active_mode(const struct lantiq_ssc_spi *spi) 263 { 264 lantiq_ssc_writel(spi, LTQ_SPI_WHBSTATE_SETEN, LTQ_SPI_WHBSTATE); 265 } 266 267 static void hw_setup_speed_hz(const struct lantiq_ssc_spi *spi, 268 unsigned int max_speed_hz) 269 { 270 u32 spi_clk, brt; 271 272 /* 273 * SPI module clock is derived from FPI bus clock dependent on 274 * divider value in CLC.RMS which is always set to 1. 275 * 276 * f_SPI 277 * baudrate = -------------- 278 * 2 * (BR + 1) 279 */ 280 spi_clk = clk_get_rate(spi->fpi_clk) / 2; 281 282 if (max_speed_hz > spi_clk) 283 brt = 0; 284 else 285 brt = spi_clk / max_speed_hz - 1; 286 287 if (brt > 0xFFFF) 288 brt = 0xFFFF; 289 290 dev_dbg(spi->dev, "spi_clk %u, max_speed_hz %u, brt %u\n", 291 spi_clk, max_speed_hz, brt); 292 293 lantiq_ssc_writel(spi, brt, LTQ_SPI_BRT); 294 } 295 296 static void hw_setup_bits_per_word(const struct lantiq_ssc_spi *spi, 297 unsigned int bits_per_word) 298 { 299 u32 bm; 300 301 /* CON.BM value = bits_per_word - 1 */ 302 bm = (bits_per_word - 1) << LTQ_SPI_CON_BM_S; 303 304 lantiq_ssc_maskl(spi, LTQ_SPI_CON_BM_M, bm, LTQ_SPI_CON); 305 } 306 307 static void hw_setup_clock_mode(const struct lantiq_ssc_spi *spi, 308 unsigned int mode) 309 { 310 u32 con_set = 0, con_clr = 0; 311 312 /* 313 * SPI mode mapping in CON register: 314 * Mode CPOL CPHA CON.PO CON.PH 315 * 0 0 0 0 1 316 * 1 0 1 0 0 317 * 2 1 0 1 1 318 * 3 1 1 1 0 319 */ 320 if (mode & SPI_CPHA) 321 con_clr |= LTQ_SPI_CON_PH; 322 else 323 con_set |= LTQ_SPI_CON_PH; 324 325 if (mode & SPI_CPOL) 326 con_set |= LTQ_SPI_CON_PO | LTQ_SPI_CON_IDLE; 327 else 328 con_clr |= LTQ_SPI_CON_PO | LTQ_SPI_CON_IDLE; 329 330 /* Set heading control */ 331 if (mode & SPI_LSB_FIRST) 332 con_clr |= LTQ_SPI_CON_HB; 333 else 334 con_set |= LTQ_SPI_CON_HB; 335 336 /* Set loopback mode */ 337 if (mode & SPI_LOOP) 338 con_set |= LTQ_SPI_CON_LB; 339 else 340 con_clr |= LTQ_SPI_CON_LB; 341 342 lantiq_ssc_maskl(spi, con_clr, con_set, LTQ_SPI_CON); 343 } 344 345 static void lantiq_ssc_hw_init(const struct lantiq_ssc_spi *spi) 346 { 347 const struct lantiq_ssc_hwcfg *hwcfg = spi->hwcfg; 348 349 /* 350 * Set clock divider for run mode to 1 to 351 * run at same frequency as FPI bus 352 */ 353 lantiq_ssc_writel(spi, 1 << LTQ_SPI_CLC_RMC_S, LTQ_SPI_CLC); 354 355 /* Put controller into config mode */ 356 hw_enter_config_mode(spi); 357 358 /* Clear error flags */ 359 lantiq_ssc_maskl(spi, 0, LTQ_SPI_WHBSTATE_CLR_ERRORS, LTQ_SPI_WHBSTATE); 360 361 /* Enable error checking, disable TX/RX */ 362 lantiq_ssc_writel(spi, LTQ_SPI_CON_RUEN | LTQ_SPI_CON_AEN | 363 LTQ_SPI_CON_TEN | LTQ_SPI_CON_REN | LTQ_SPI_CON_TXOFF | 364 LTQ_SPI_CON_RXOFF, LTQ_SPI_CON); 365 366 /* Setup default SPI mode */ 367 hw_setup_bits_per_word(spi, spi->bits_per_word); 368 hw_setup_clock_mode(spi, SPI_MODE_0); 369 370 /* Enable host mode and clear error flags */ 371 lantiq_ssc_writel(spi, LTQ_SPI_WHBSTATE_SETMS | 372 LTQ_SPI_WHBSTATE_CLR_ERRORS, 373 LTQ_SPI_WHBSTATE); 374 375 /* Reset GPIO/CS registers */ 376 lantiq_ssc_writel(spi, 0, LTQ_SPI_GPOCON); 377 lantiq_ssc_writel(spi, 0xFF00, LTQ_SPI_FPGO); 378 379 /* Enable and flush FIFOs */ 380 rx_fifo_reset(spi); 381 tx_fifo_reset(spi); 382 383 /* Enable interrupts */ 384 lantiq_ssc_writel(spi, hwcfg->irnen_t | hwcfg->irnen_r | 385 LTQ_SPI_IRNEN_E, LTQ_SPI_IRNEN); 386 } 387 388 static int lantiq_ssc_setup(struct spi_device *spidev) 389 { 390 struct spi_controller *host = spidev->controller; 391 struct lantiq_ssc_spi *spi = spi_controller_get_devdata(host); 392 unsigned int cs = spi_get_chipselect(spidev, 0); 393 u32 gpocon; 394 395 /* GPIOs are used for CS */ 396 if (spi_get_csgpiod(spidev, 0)) 397 return 0; 398 399 dev_dbg(spi->dev, "using internal chipselect %u\n", cs); 400 401 if (cs < spi->base_cs) { 402 dev_err(spi->dev, 403 "chipselect %i too small (min %i)\n", cs, spi->base_cs); 404 return -EINVAL; 405 } 406 407 /* set GPO pin to CS mode */ 408 gpocon = 1 << ((cs - spi->base_cs) + LTQ_SPI_GPOCON_ISCSBN_S); 409 410 /* invert GPO pin */ 411 if (spidev->mode & SPI_CS_HIGH) 412 gpocon |= 1 << (cs - spi->base_cs); 413 414 lantiq_ssc_maskl(spi, 0, gpocon, LTQ_SPI_GPOCON); 415 416 return 0; 417 } 418 419 static int lantiq_ssc_prepare_message(struct spi_controller *host, 420 struct spi_message *message) 421 { 422 struct lantiq_ssc_spi *spi = spi_controller_get_devdata(host); 423 424 hw_enter_config_mode(spi); 425 hw_setup_clock_mode(spi, message->spi->mode); 426 hw_enter_active_mode(spi); 427 428 return 0; 429 } 430 431 static void hw_setup_transfer(struct lantiq_ssc_spi *spi, 432 struct spi_device *spidev, struct spi_transfer *t) 433 { 434 unsigned int speed_hz = t->speed_hz; 435 unsigned int bits_per_word = t->bits_per_word; 436 u32 con; 437 438 if (bits_per_word != spi->bits_per_word || 439 speed_hz != spi->speed_hz) { 440 hw_enter_config_mode(spi); 441 hw_setup_speed_hz(spi, speed_hz); 442 hw_setup_bits_per_word(spi, bits_per_word); 443 hw_enter_active_mode(spi); 444 445 spi->speed_hz = speed_hz; 446 spi->bits_per_word = bits_per_word; 447 } 448 449 /* Configure transmitter and receiver */ 450 con = lantiq_ssc_readl(spi, LTQ_SPI_CON); 451 if (t->tx_buf) 452 con &= ~LTQ_SPI_CON_TXOFF; 453 else 454 con |= LTQ_SPI_CON_TXOFF; 455 456 if (t->rx_buf) 457 con &= ~LTQ_SPI_CON_RXOFF; 458 else 459 con |= LTQ_SPI_CON_RXOFF; 460 461 lantiq_ssc_writel(spi, con, LTQ_SPI_CON); 462 } 463 464 static int lantiq_ssc_unprepare_message(struct spi_controller *host, 465 struct spi_message *message) 466 { 467 struct lantiq_ssc_spi *spi = spi_controller_get_devdata(host); 468 469 flush_workqueue(spi->wq); 470 471 /* Disable transmitter and receiver while idle */ 472 lantiq_ssc_maskl(spi, 0, LTQ_SPI_CON_TXOFF | LTQ_SPI_CON_RXOFF, 473 LTQ_SPI_CON); 474 475 return 0; 476 } 477 478 static void tx_fifo_write(struct lantiq_ssc_spi *spi) 479 { 480 const u8 *tx8; 481 const u16 *tx16; 482 const u32 *tx32; 483 u32 data; 484 unsigned int tx_free = tx_fifo_free(spi); 485 486 spi->fdx_tx_level = 0; 487 while (spi->tx_todo && tx_free) { 488 switch (spi->bits_per_word) { 489 case 2 ... 8: 490 tx8 = spi->tx; 491 data = *tx8; 492 spi->tx_todo--; 493 spi->tx++; 494 break; 495 case 16: 496 tx16 = (u16 *) spi->tx; 497 data = *tx16; 498 spi->tx_todo -= 2; 499 spi->tx += 2; 500 break; 501 case 32: 502 tx32 = (u32 *) spi->tx; 503 data = *tx32; 504 spi->tx_todo -= 4; 505 spi->tx += 4; 506 break; 507 default: 508 WARN_ON(1); 509 data = 0; 510 break; 511 } 512 513 lantiq_ssc_writel(spi, data, LTQ_SPI_TB); 514 tx_free--; 515 spi->fdx_tx_level++; 516 } 517 } 518 519 static void rx_fifo_read_full_duplex(struct lantiq_ssc_spi *spi) 520 { 521 u8 *rx8; 522 u16 *rx16; 523 u32 *rx32; 524 u32 data; 525 unsigned int rx_fill = rx_fifo_level(spi); 526 527 /* 528 * Wait until all expected data to be shifted in. 529 * Otherwise, rx overrun may occur. 530 */ 531 while (rx_fill != spi->fdx_tx_level) 532 rx_fill = rx_fifo_level(spi); 533 534 while (rx_fill) { 535 data = lantiq_ssc_readl(spi, LTQ_SPI_RB); 536 537 switch (spi->bits_per_word) { 538 case 2 ... 8: 539 rx8 = spi->rx; 540 *rx8 = data; 541 spi->rx_todo--; 542 spi->rx++; 543 break; 544 case 16: 545 rx16 = (u16 *) spi->rx; 546 *rx16 = data; 547 spi->rx_todo -= 2; 548 spi->rx += 2; 549 break; 550 case 32: 551 rx32 = (u32 *) spi->rx; 552 *rx32 = data; 553 spi->rx_todo -= 4; 554 spi->rx += 4; 555 break; 556 default: 557 WARN_ON(1); 558 break; 559 } 560 561 rx_fill--; 562 } 563 } 564 565 static void rx_fifo_read_half_duplex(struct lantiq_ssc_spi *spi) 566 { 567 u32 data, *rx32; 568 u8 *rx8; 569 unsigned int rxbv, shift; 570 unsigned int rx_fill = rx_fifo_level(spi); 571 572 /* 573 * In RX-only mode the bits per word value is ignored by HW. A value 574 * of 32 is used instead. Thus all 4 bytes per FIFO must be read. 575 * If remaining RX bytes are less than 4, the FIFO must be read 576 * differently. The amount of received and valid bytes is indicated 577 * by STAT.RXBV register value. 578 */ 579 while (rx_fill) { 580 if (spi->rx_todo < 4) { 581 rxbv = (lantiq_ssc_readl(spi, LTQ_SPI_STAT) & 582 LTQ_SPI_STAT_RXBV_M) >> LTQ_SPI_STAT_RXBV_S; 583 data = lantiq_ssc_readl(spi, LTQ_SPI_RB); 584 585 shift = (rxbv - 1) * 8; 586 rx8 = spi->rx; 587 588 while (rxbv) { 589 *rx8++ = (data >> shift) & 0xFF; 590 rxbv--; 591 shift -= 8; 592 spi->rx_todo--; 593 spi->rx++; 594 } 595 } else { 596 data = lantiq_ssc_readl(spi, LTQ_SPI_RB); 597 rx32 = (u32 *) spi->rx; 598 599 *rx32++ = data; 600 spi->rx_todo -= 4; 601 spi->rx += 4; 602 } 603 rx_fill--; 604 } 605 } 606 607 static void rx_request(struct lantiq_ssc_spi *spi) 608 { 609 unsigned int rxreq, rxreq_max; 610 611 /* 612 * To avoid receive overflows at high clocks it is better to request 613 * only the amount of bytes that fits into all FIFOs. This value 614 * depends on the FIFO size implemented in hardware. 615 */ 616 rxreq = spi->rx_todo; 617 rxreq_max = spi->rx_fifo_size * 4; 618 if (rxreq > rxreq_max) 619 rxreq = rxreq_max; 620 621 lantiq_ssc_writel(spi, rxreq, LTQ_SPI_RXREQ); 622 } 623 624 static irqreturn_t lantiq_ssc_xmit_interrupt(int irq, void *data) 625 { 626 struct lantiq_ssc_spi *spi = data; 627 const struct lantiq_ssc_hwcfg *hwcfg = spi->hwcfg; 628 u32 val = lantiq_ssc_readl(spi, hwcfg->irncr); 629 630 spin_lock(&spi->lock); 631 if (hwcfg->irq_ack) 632 lantiq_ssc_writel(spi, val, hwcfg->irncr); 633 634 if (spi->tx) { 635 if (spi->rx && spi->rx_todo) 636 rx_fifo_read_full_duplex(spi); 637 638 if (spi->tx_todo) 639 tx_fifo_write(spi); 640 else if (!tx_fifo_level(spi)) 641 goto completed; 642 } else if (spi->rx) { 643 if (spi->rx_todo) { 644 rx_fifo_read_half_duplex(spi); 645 646 if (spi->rx_todo) 647 rx_request(spi); 648 else 649 goto completed; 650 } else { 651 goto completed; 652 } 653 } 654 655 spin_unlock(&spi->lock); 656 return IRQ_HANDLED; 657 658 completed: 659 queue_work(spi->wq, &spi->work); 660 spin_unlock(&spi->lock); 661 662 return IRQ_HANDLED; 663 } 664 665 static irqreturn_t lantiq_ssc_err_interrupt(int irq, void *data) 666 { 667 struct lantiq_ssc_spi *spi = data; 668 const struct lantiq_ssc_hwcfg *hwcfg = spi->hwcfg; 669 u32 stat = lantiq_ssc_readl(spi, LTQ_SPI_STAT); 670 u32 val = lantiq_ssc_readl(spi, hwcfg->irncr); 671 672 if (!(stat & LTQ_SPI_STAT_ERRORS)) 673 return IRQ_NONE; 674 675 spin_lock(&spi->lock); 676 if (hwcfg->irq_ack) 677 lantiq_ssc_writel(spi, val, hwcfg->irncr); 678 679 if (stat & LTQ_SPI_STAT_RUE) 680 dev_err(spi->dev, "receive underflow error\n"); 681 if (stat & LTQ_SPI_STAT_TUE) 682 dev_err(spi->dev, "transmit underflow error\n"); 683 if (stat & LTQ_SPI_STAT_AE) 684 dev_err(spi->dev, "abort error\n"); 685 if (stat & LTQ_SPI_STAT_RE) 686 dev_err(spi->dev, "receive overflow error\n"); 687 if (stat & LTQ_SPI_STAT_TE) 688 dev_err(spi->dev, "transmit overflow error\n"); 689 if (stat & LTQ_SPI_STAT_ME) 690 dev_err(spi->dev, "mode error\n"); 691 692 /* Clear error flags */ 693 lantiq_ssc_maskl(spi, 0, LTQ_SPI_WHBSTATE_CLR_ERRORS, LTQ_SPI_WHBSTATE); 694 695 /* set bad status so it can be retried */ 696 if (spi->host->cur_msg) 697 spi->host->cur_msg->status = -EIO; 698 queue_work(spi->wq, &spi->work); 699 spin_unlock(&spi->lock); 700 701 return IRQ_HANDLED; 702 } 703 704 static irqreturn_t intel_lgm_ssc_isr(int irq, void *data) 705 { 706 struct lantiq_ssc_spi *spi = data; 707 const struct lantiq_ssc_hwcfg *hwcfg = spi->hwcfg; 708 u32 val = lantiq_ssc_readl(spi, hwcfg->irncr); 709 710 if (!(val & LTQ_SPI_IRNEN_ALL)) 711 return IRQ_NONE; 712 713 if (val & LTQ_SPI_IRNEN_E) 714 return lantiq_ssc_err_interrupt(irq, data); 715 716 if ((val & hwcfg->irnen_t) || (val & hwcfg->irnen_r)) 717 return lantiq_ssc_xmit_interrupt(irq, data); 718 719 return IRQ_HANDLED; 720 } 721 722 static int transfer_start(struct lantiq_ssc_spi *spi, struct spi_device *spidev, 723 struct spi_transfer *t) 724 { 725 unsigned long flags; 726 727 spin_lock_irqsave(&spi->lock, flags); 728 729 spi->tx = t->tx_buf; 730 spi->rx = t->rx_buf; 731 732 if (t->tx_buf) { 733 spi->tx_todo = t->len; 734 735 /* initially fill TX FIFO */ 736 tx_fifo_write(spi); 737 } 738 739 if (spi->rx) { 740 spi->rx_todo = t->len; 741 742 /* start shift clock in RX-only mode */ 743 if (!spi->tx) 744 rx_request(spi); 745 } 746 747 spin_unlock_irqrestore(&spi->lock, flags); 748 749 return t->len; 750 } 751 752 /* 753 * The driver only gets an interrupt when the FIFO is empty, but there 754 * is an additional shift register from which the data is written to 755 * the wire. We get the last interrupt when the controller starts to 756 * write the last word to the wire, not when it is finished. Do busy 757 * waiting till it finishes. 758 */ 759 static void lantiq_ssc_bussy_work(struct work_struct *work) 760 { 761 struct lantiq_ssc_spi *spi; 762 unsigned long long timeout = 8LL * 1000LL; 763 unsigned long end; 764 765 spi = container_of(work, typeof(*spi), work); 766 767 do_div(timeout, spi->speed_hz); 768 timeout += timeout + 100; /* some tolerance */ 769 770 end = jiffies + msecs_to_jiffies(timeout); 771 do { 772 u32 stat = lantiq_ssc_readl(spi, LTQ_SPI_STAT); 773 774 if (!(stat & LTQ_SPI_STAT_BSY)) { 775 spi_finalize_current_transfer(spi->host); 776 return; 777 } 778 779 cond_resched(); 780 } while (!time_after_eq(jiffies, end)); 781 782 if (spi->host->cur_msg) 783 spi->host->cur_msg->status = -EIO; 784 spi_finalize_current_transfer(spi->host); 785 } 786 787 static void lantiq_ssc_handle_err(struct spi_controller *host, 788 struct spi_message *message) 789 { 790 struct lantiq_ssc_spi *spi = spi_controller_get_devdata(host); 791 792 /* flush FIFOs on timeout */ 793 rx_fifo_flush(spi); 794 tx_fifo_flush(spi); 795 } 796 797 static void lantiq_ssc_set_cs(struct spi_device *spidev, bool enable) 798 { 799 struct lantiq_ssc_spi *spi = spi_controller_get_devdata(spidev->controller); 800 unsigned int cs = spi_get_chipselect(spidev, 0); 801 u32 fgpo; 802 803 if (!!(spidev->mode & SPI_CS_HIGH) == enable) 804 fgpo = (1 << (cs - spi->base_cs)); 805 else 806 fgpo = (1 << (cs - spi->base_cs + LTQ_SPI_FGPO_SETOUTN_S)); 807 808 lantiq_ssc_writel(spi, fgpo, LTQ_SPI_FPGO); 809 } 810 811 static int lantiq_ssc_transfer_one(struct spi_controller *host, 812 struct spi_device *spidev, 813 struct spi_transfer *t) 814 { 815 struct lantiq_ssc_spi *spi = spi_controller_get_devdata(host); 816 817 hw_setup_transfer(spi, spidev, t); 818 819 return transfer_start(spi, spidev, t); 820 } 821 822 static int intel_lgm_cfg_irq(struct platform_device *pdev, struct lantiq_ssc_spi *spi) 823 { 824 int irq; 825 826 irq = platform_get_irq(pdev, 0); 827 if (irq < 0) 828 return irq; 829 830 return devm_request_irq(&pdev->dev, irq, intel_lgm_ssc_isr, 0, "spi", spi); 831 } 832 833 static int lantiq_cfg_irq(struct platform_device *pdev, struct lantiq_ssc_spi *spi) 834 { 835 int irq, err; 836 837 irq = platform_get_irq_byname(pdev, LTQ_SPI_RX_IRQ_NAME); 838 if (irq < 0) 839 return irq; 840 841 err = devm_request_irq(&pdev->dev, irq, lantiq_ssc_xmit_interrupt, 842 0, LTQ_SPI_RX_IRQ_NAME, spi); 843 if (err) 844 return err; 845 846 irq = platform_get_irq_byname(pdev, LTQ_SPI_TX_IRQ_NAME); 847 if (irq < 0) 848 return irq; 849 850 err = devm_request_irq(&pdev->dev, irq, lantiq_ssc_xmit_interrupt, 851 0, LTQ_SPI_TX_IRQ_NAME, spi); 852 853 if (err) 854 return err; 855 856 irq = platform_get_irq_byname(pdev, LTQ_SPI_ERR_IRQ_NAME); 857 if (irq < 0) 858 return irq; 859 860 err = devm_request_irq(&pdev->dev, irq, lantiq_ssc_err_interrupt, 861 0, LTQ_SPI_ERR_IRQ_NAME, spi); 862 return err; 863 } 864 865 static const struct lantiq_ssc_hwcfg lantiq_ssc_xway = { 866 .cfg_irq = lantiq_cfg_irq, 867 .irnen_r = LTQ_SPI_IRNEN_R_XWAY, 868 .irnen_t = LTQ_SPI_IRNEN_T_XWAY, 869 .irnicr = 0xF8, 870 .irncr = 0xFC, 871 .fifo_size_mask = GENMASK(5, 0), 872 .irq_ack = false, 873 }; 874 875 static const struct lantiq_ssc_hwcfg lantiq_ssc_xrx = { 876 .cfg_irq = lantiq_cfg_irq, 877 .irnen_r = LTQ_SPI_IRNEN_R_XRX, 878 .irnen_t = LTQ_SPI_IRNEN_T_XRX, 879 .irnicr = 0xF8, 880 .irncr = 0xFC, 881 .fifo_size_mask = GENMASK(5, 0), 882 .irq_ack = false, 883 }; 884 885 static const struct lantiq_ssc_hwcfg intel_ssc_lgm = { 886 .cfg_irq = intel_lgm_cfg_irq, 887 .irnen_r = LTQ_SPI_IRNEN_R_XRX, 888 .irnen_t = LTQ_SPI_IRNEN_T_XRX, 889 .irnicr = 0xFC, 890 .irncr = 0xF8, 891 .fifo_size_mask = GENMASK(7, 0), 892 .irq_ack = true, 893 }; 894 895 static const struct of_device_id lantiq_ssc_match[] = { 896 { .compatible = "lantiq,ase-spi", .data = &lantiq_ssc_xway, }, 897 { .compatible = "lantiq,falcon-spi", .data = &lantiq_ssc_xrx, }, 898 { .compatible = "lantiq,xrx100-spi", .data = &lantiq_ssc_xrx, }, 899 { .compatible = "intel,lgm-spi", .data = &intel_ssc_lgm, }, 900 {}, 901 }; 902 MODULE_DEVICE_TABLE(of, lantiq_ssc_match); 903 904 static int lantiq_ssc_probe(struct platform_device *pdev) 905 { 906 struct device *dev = &pdev->dev; 907 struct spi_controller *host; 908 struct lantiq_ssc_spi *spi; 909 const struct lantiq_ssc_hwcfg *hwcfg; 910 u32 id, supports_dma, revision; 911 unsigned int num_cs; 912 int err; 913 914 hwcfg = of_device_get_match_data(dev); 915 916 host = spi_alloc_host(dev, sizeof(struct lantiq_ssc_spi)); 917 if (!host) 918 return -ENOMEM; 919 920 spi = spi_controller_get_devdata(host); 921 spi->host = host; 922 spi->dev = dev; 923 spi->hwcfg = hwcfg; 924 platform_set_drvdata(pdev, spi); 925 spi->regbase = devm_platform_ioremap_resource(pdev, 0); 926 if (IS_ERR(spi->regbase)) { 927 err = PTR_ERR(spi->regbase); 928 goto err_host_put; 929 } 930 931 err = hwcfg->cfg_irq(pdev, spi); 932 if (err) 933 goto err_host_put; 934 935 spi->spi_clk = devm_clk_get_enabled(dev, "gate"); 936 if (IS_ERR(spi->spi_clk)) { 937 err = PTR_ERR(spi->spi_clk); 938 goto err_host_put; 939 } 940 941 /* 942 * Use the old clk_get_fpi() function on Lantiq platform, till it 943 * supports common clk. 944 */ 945 #if defined(CONFIG_LANTIQ) && !defined(CONFIG_COMMON_CLK) 946 spi->fpi_clk = clk_get_fpi(); 947 #else 948 spi->fpi_clk = clk_get(dev, "freq"); 949 #endif 950 if (IS_ERR(spi->fpi_clk)) { 951 err = PTR_ERR(spi->fpi_clk); 952 goto err_host_put; 953 } 954 955 num_cs = 8; 956 of_property_read_u32(pdev->dev.of_node, "num-cs", &num_cs); 957 958 spi->base_cs = 1; 959 of_property_read_u32(pdev->dev.of_node, "base-cs", &spi->base_cs); 960 961 spin_lock_init(&spi->lock); 962 spi->bits_per_word = 8; 963 spi->speed_hz = 0; 964 965 host->dev.of_node = pdev->dev.of_node; 966 host->num_chipselect = num_cs; 967 host->use_gpio_descriptors = true; 968 host->setup = lantiq_ssc_setup; 969 host->set_cs = lantiq_ssc_set_cs; 970 host->handle_err = lantiq_ssc_handle_err; 971 host->prepare_message = lantiq_ssc_prepare_message; 972 host->unprepare_message = lantiq_ssc_unprepare_message; 973 host->transfer_one = lantiq_ssc_transfer_one; 974 host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST | SPI_CS_HIGH | 975 SPI_LOOP; 976 host->bits_per_word_mask = SPI_BPW_RANGE_MASK(2, 8) | 977 SPI_BPW_MASK(16) | SPI_BPW_MASK(32); 978 979 spi->wq = alloc_ordered_workqueue(dev_name(dev), WQ_MEM_RECLAIM); 980 if (!spi->wq) { 981 err = -ENOMEM; 982 goto err_clk_put; 983 } 984 INIT_WORK(&spi->work, lantiq_ssc_bussy_work); 985 986 id = lantiq_ssc_readl(spi, LTQ_SPI_ID); 987 spi->tx_fifo_size = (id >> LTQ_SPI_ID_TXFS_S) & hwcfg->fifo_size_mask; 988 spi->rx_fifo_size = (id >> LTQ_SPI_ID_RXFS_S) & hwcfg->fifo_size_mask; 989 supports_dma = (id & LTQ_SPI_ID_CFG_M) >> LTQ_SPI_ID_CFG_S; 990 revision = id & LTQ_SPI_ID_REV_M; 991 992 lantiq_ssc_hw_init(spi); 993 994 dev_info(dev, 995 "Lantiq SSC SPI controller (Rev %i, TXFS %u, RXFS %u, DMA %u)\n", 996 revision, spi->tx_fifo_size, spi->rx_fifo_size, supports_dma); 997 998 err = devm_spi_register_controller(dev, host); 999 if (err) { 1000 dev_err(dev, "failed to register spi host\n"); 1001 goto err_wq_destroy; 1002 } 1003 1004 return 0; 1005 1006 err_wq_destroy: 1007 destroy_workqueue(spi->wq); 1008 err_clk_put: 1009 clk_put(spi->fpi_clk); 1010 err_host_put: 1011 spi_controller_put(host); 1012 1013 return err; 1014 } 1015 1016 static void lantiq_ssc_remove(struct platform_device *pdev) 1017 { 1018 struct lantiq_ssc_spi *spi = platform_get_drvdata(pdev); 1019 1020 lantiq_ssc_writel(spi, 0, LTQ_SPI_IRNEN); 1021 lantiq_ssc_writel(spi, 0, LTQ_SPI_CLC); 1022 rx_fifo_flush(spi); 1023 tx_fifo_flush(spi); 1024 hw_enter_config_mode(spi); 1025 1026 destroy_workqueue(spi->wq); 1027 clk_put(spi->fpi_clk); 1028 } 1029 1030 static struct platform_driver lantiq_ssc_driver = { 1031 .probe = lantiq_ssc_probe, 1032 .remove = lantiq_ssc_remove, 1033 .driver = { 1034 .name = "spi-lantiq-ssc", 1035 .of_match_table = lantiq_ssc_match, 1036 }, 1037 }; 1038 module_platform_driver(lantiq_ssc_driver); 1039 1040 MODULE_DESCRIPTION("Lantiq SSC SPI controller driver"); 1041 MODULE_AUTHOR("Daniel Schwierzeck <daniel.schwierzeck@gmail.com>"); 1042 MODULE_AUTHOR("Hauke Mehrtens <hauke@hauke-m.de>"); 1043 MODULE_LICENSE("GPL"); 1044 MODULE_ALIAS("platform:spi-lantiq-ssc"); 1045