1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * SuperH MSIOF SPI Controller Interface 4 * 5 * Copyright (c) 2009 Magnus Damm 6 * Copyright (C) 2014 Renesas Electronics Corporation 7 * Copyright (C) 2014-2017 Glider bvba 8 */ 9 10 #include <linux/bitmap.h> 11 #include <linux/clk.h> 12 #include <linux/completion.h> 13 #include <linux/delay.h> 14 #include <linux/dma-mapping.h> 15 #include <linux/dmaengine.h> 16 #include <linux/err.h> 17 #include <linux/interrupt.h> 18 #include <linux/io.h> 19 #include <linux/iopoll.h> 20 #include <linux/kernel.h> 21 #include <linux/module.h> 22 #include <linux/of.h> 23 #include <linux/platform_device.h> 24 #include <linux/pm_runtime.h> 25 #include <linux/sh_dma.h> 26 27 #include <linux/spi/sh_msiof.h> 28 #include <linux/spi/spi.h> 29 30 #include <asm/unaligned.h> 31 32 #define SH_MSIOF_FLAG_FIXED_DTDL_200 BIT(0) 33 34 struct sh_msiof_chipdata { 35 u32 bits_per_word_mask; 36 u16 tx_fifo_size; 37 u16 rx_fifo_size; 38 u16 ctlr_flags; 39 u16 min_div_pow; 40 u32 flags; 41 }; 42 43 struct sh_msiof_spi_priv { 44 struct spi_controller *ctlr; 45 void __iomem *mapbase; 46 struct clk *clk; 47 struct platform_device *pdev; 48 struct sh_msiof_spi_info *info; 49 struct completion done; 50 struct completion done_txdma; 51 unsigned int tx_fifo_size; 52 unsigned int rx_fifo_size; 53 unsigned int min_div_pow; 54 void *tx_dma_page; 55 void *rx_dma_page; 56 dma_addr_t tx_dma_addr; 57 dma_addr_t rx_dma_addr; 58 bool native_cs_inited; 59 bool native_cs_high; 60 bool target_aborted; 61 }; 62 63 #define MAX_SS 3 /* Maximum number of native chip selects */ 64 65 #define SITMDR1 0x00 /* Transmit Mode Register 1 */ 66 #define SITMDR2 0x04 /* Transmit Mode Register 2 */ 67 #define SITMDR3 0x08 /* Transmit Mode Register 3 */ 68 #define SIRMDR1 0x10 /* Receive Mode Register 1 */ 69 #define SIRMDR2 0x14 /* Receive Mode Register 2 */ 70 #define SIRMDR3 0x18 /* Receive Mode Register 3 */ 71 #define SITSCR 0x20 /* Transmit Clock Select Register */ 72 #define SIRSCR 0x22 /* Receive Clock Select Register (SH, A1, APE6) */ 73 #define SICTR 0x28 /* Control Register */ 74 #define SIFCTR 0x30 /* FIFO Control Register */ 75 #define SISTR 0x40 /* Status Register */ 76 #define SIIER 0x44 /* Interrupt Enable Register */ 77 #define SITDR1 0x48 /* Transmit Control Data Register 1 (SH, A1) */ 78 #define SITDR2 0x4c /* Transmit Control Data Register 2 (SH, A1) */ 79 #define SITFDR 0x50 /* Transmit FIFO Data Register */ 80 #define SIRDR1 0x58 /* Receive Control Data Register 1 (SH, A1) */ 81 #define SIRDR2 0x5c /* Receive Control Data Register 2 (SH, A1) */ 82 #define SIRFDR 0x60 /* Receive FIFO Data Register */ 83 84 /* SITMDR1 and SIRMDR1 */ 85 #define SIMDR1_TRMD BIT(31) /* Transfer Mode (1 = Master mode) */ 86 #define SIMDR1_SYNCMD_MASK GENMASK(29, 28) /* SYNC Mode */ 87 #define SIMDR1_SYNCMD_SPI (2 << 28) /* Level mode/SPI */ 88 #define SIMDR1_SYNCMD_LR (3 << 28) /* L/R mode */ 89 #define SIMDR1_SYNCAC_SHIFT 25 /* Sync Polarity (1 = Active-low) */ 90 #define SIMDR1_BITLSB_SHIFT 24 /* MSB/LSB First (1 = LSB first) */ 91 #define SIMDR1_DTDL_SHIFT 20 /* Data Pin Bit Delay for MSIOF_SYNC */ 92 #define SIMDR1_SYNCDL_SHIFT 16 /* Frame Sync Signal Timing Delay */ 93 #define SIMDR1_FLD_MASK GENMASK(3, 2) /* Frame Sync Signal Interval (0-3) */ 94 #define SIMDR1_FLD_SHIFT 2 95 #define SIMDR1_XXSTP BIT(0) /* Transmission/Reception Stop on FIFO */ 96 /* SITMDR1 */ 97 #define SITMDR1_PCON BIT(30) /* Transfer Signal Connection */ 98 #define SITMDR1_SYNCCH_MASK GENMASK(27, 26) /* Sync Signal Channel Select */ 99 #define SITMDR1_SYNCCH_SHIFT 26 /* 0=MSIOF_SYNC, 1=MSIOF_SS1, 2=MSIOF_SS2 */ 100 101 /* SITMDR2 and SIRMDR2 */ 102 #define SIMDR2_BITLEN1(i) (((i) - 1) << 24) /* Data Size (8-32 bits) */ 103 #define SIMDR2_WDLEN1(i) (((i) - 1) << 16) /* Word Count (1-64/256 (SH, A1))) */ 104 #define SIMDR2_GRPMASK1 BIT(0) /* Group Output Mask 1 (SH, A1) */ 105 106 /* SITSCR and SIRSCR */ 107 #define SISCR_BRPS_MASK GENMASK(12, 8) /* Prescaler Setting (1-32) */ 108 #define SISCR_BRPS(i) (((i) - 1) << 8) 109 #define SISCR_BRDV_MASK GENMASK(2, 0) /* Baud Rate Generator's Division Ratio */ 110 #define SISCR_BRDV_DIV_2 0 111 #define SISCR_BRDV_DIV_4 1 112 #define SISCR_BRDV_DIV_8 2 113 #define SISCR_BRDV_DIV_16 3 114 #define SISCR_BRDV_DIV_32 4 115 #define SISCR_BRDV_DIV_1 7 116 117 /* SICTR */ 118 #define SICTR_TSCKIZ_MASK GENMASK(31, 30) /* Transmit Clock I/O Polarity Select */ 119 #define SICTR_TSCKIZ_SCK BIT(31) /* Disable SCK when TX disabled */ 120 #define SICTR_TSCKIZ_POL_SHIFT 30 /* Transmit Clock Polarity */ 121 #define SICTR_RSCKIZ_MASK GENMASK(29, 28) /* Receive Clock Polarity Select */ 122 #define SICTR_RSCKIZ_SCK BIT(29) /* Must match CTR_TSCKIZ_SCK */ 123 #define SICTR_RSCKIZ_POL_SHIFT 28 /* Receive Clock Polarity */ 124 #define SICTR_TEDG_SHIFT 27 /* Transmit Timing (1 = falling edge) */ 125 #define SICTR_REDG_SHIFT 26 /* Receive Timing (1 = falling edge) */ 126 #define SICTR_TXDIZ_MASK GENMASK(23, 22) /* Pin Output When TX is Disabled */ 127 #define SICTR_TXDIZ_LOW (0 << 22) /* 0 */ 128 #define SICTR_TXDIZ_HIGH (1 << 22) /* 1 */ 129 #define SICTR_TXDIZ_HIZ (2 << 22) /* High-impedance */ 130 #define SICTR_TSCKE BIT(15) /* Transmit Serial Clock Output Enable */ 131 #define SICTR_TFSE BIT(14) /* Transmit Frame Sync Signal Output Enable */ 132 #define SICTR_TXE BIT(9) /* Transmit Enable */ 133 #define SICTR_RXE BIT(8) /* Receive Enable */ 134 #define SICTR_TXRST BIT(1) /* Transmit Reset */ 135 #define SICTR_RXRST BIT(0) /* Receive Reset */ 136 137 /* SIFCTR */ 138 #define SIFCTR_TFWM_MASK GENMASK(31, 29) /* Transmit FIFO Watermark */ 139 #define SIFCTR_TFWM_64 (0UL << 29) /* Transfer Request when 64 empty stages */ 140 #define SIFCTR_TFWM_32 (1UL << 29) /* Transfer Request when 32 empty stages */ 141 #define SIFCTR_TFWM_24 (2UL << 29) /* Transfer Request when 24 empty stages */ 142 #define SIFCTR_TFWM_16 (3UL << 29) /* Transfer Request when 16 empty stages */ 143 #define SIFCTR_TFWM_12 (4UL << 29) /* Transfer Request when 12 empty stages */ 144 #define SIFCTR_TFWM_8 (5UL << 29) /* Transfer Request when 8 empty stages */ 145 #define SIFCTR_TFWM_4 (6UL << 29) /* Transfer Request when 4 empty stages */ 146 #define SIFCTR_TFWM_1 (7UL << 29) /* Transfer Request when 1 empty stage */ 147 #define SIFCTR_TFUA_MASK GENMASK(26, 20) /* Transmit FIFO Usable Area */ 148 #define SIFCTR_TFUA_SHIFT 20 149 #define SIFCTR_TFUA(i) ((i) << SIFCTR_TFUA_SHIFT) 150 #define SIFCTR_RFWM_MASK GENMASK(15, 13) /* Receive FIFO Watermark */ 151 #define SIFCTR_RFWM_1 (0 << 13) /* Transfer Request when 1 valid stages */ 152 #define SIFCTR_RFWM_4 (1 << 13) /* Transfer Request when 4 valid stages */ 153 #define SIFCTR_RFWM_8 (2 << 13) /* Transfer Request when 8 valid stages */ 154 #define SIFCTR_RFWM_16 (3 << 13) /* Transfer Request when 16 valid stages */ 155 #define SIFCTR_RFWM_32 (4 << 13) /* Transfer Request when 32 valid stages */ 156 #define SIFCTR_RFWM_64 (5 << 13) /* Transfer Request when 64 valid stages */ 157 #define SIFCTR_RFWM_128 (6 << 13) /* Transfer Request when 128 valid stages */ 158 #define SIFCTR_RFWM_256 (7 << 13) /* Transfer Request when 256 valid stages */ 159 #define SIFCTR_RFUA_MASK GENMASK(12, 4) /* Receive FIFO Usable Area (0x40 = full) */ 160 #define SIFCTR_RFUA_SHIFT 4 161 #define SIFCTR_RFUA(i) ((i) << SIFCTR_RFUA_SHIFT) 162 163 /* SISTR */ 164 #define SISTR_TFEMP BIT(29) /* Transmit FIFO Empty */ 165 #define SISTR_TDREQ BIT(28) /* Transmit Data Transfer Request */ 166 #define SISTR_TEOF BIT(23) /* Frame Transmission End */ 167 #define SISTR_TFSERR BIT(21) /* Transmit Frame Synchronization Error */ 168 #define SISTR_TFOVF BIT(20) /* Transmit FIFO Overflow */ 169 #define SISTR_TFUDF BIT(19) /* Transmit FIFO Underflow */ 170 #define SISTR_RFFUL BIT(13) /* Receive FIFO Full */ 171 #define SISTR_RDREQ BIT(12) /* Receive Data Transfer Request */ 172 #define SISTR_REOF BIT(7) /* Frame Reception End */ 173 #define SISTR_RFSERR BIT(5) /* Receive Frame Synchronization Error */ 174 #define SISTR_RFUDF BIT(4) /* Receive FIFO Underflow */ 175 #define SISTR_RFOVF BIT(3) /* Receive FIFO Overflow */ 176 177 /* SIIER */ 178 #define SIIER_TDMAE BIT(31) /* Transmit Data DMA Transfer Req. Enable */ 179 #define SIIER_TFEMPE BIT(29) /* Transmit FIFO Empty Enable */ 180 #define SIIER_TDREQE BIT(28) /* Transmit Data Transfer Request Enable */ 181 #define SIIER_TEOFE BIT(23) /* Frame Transmission End Enable */ 182 #define SIIER_TFSERRE BIT(21) /* Transmit Frame Sync Error Enable */ 183 #define SIIER_TFOVFE BIT(20) /* Transmit FIFO Overflow Enable */ 184 #define SIIER_TFUDFE BIT(19) /* Transmit FIFO Underflow Enable */ 185 #define SIIER_RDMAE BIT(15) /* Receive Data DMA Transfer Req. Enable */ 186 #define SIIER_RFFULE BIT(13) /* Receive FIFO Full Enable */ 187 #define SIIER_RDREQE BIT(12) /* Receive Data Transfer Request Enable */ 188 #define SIIER_REOFE BIT(7) /* Frame Reception End Enable */ 189 #define SIIER_RFSERRE BIT(5) /* Receive Frame Sync Error Enable */ 190 #define SIIER_RFUDFE BIT(4) /* Receive FIFO Underflow Enable */ 191 #define SIIER_RFOVFE BIT(3) /* Receive FIFO Overflow Enable */ 192 193 194 static u32 sh_msiof_read(struct sh_msiof_spi_priv *p, int reg_offs) 195 { 196 switch (reg_offs) { 197 case SITSCR: 198 case SIRSCR: 199 return ioread16(p->mapbase + reg_offs); 200 default: 201 return ioread32(p->mapbase + reg_offs); 202 } 203 } 204 205 static void sh_msiof_write(struct sh_msiof_spi_priv *p, int reg_offs, 206 u32 value) 207 { 208 switch (reg_offs) { 209 case SITSCR: 210 case SIRSCR: 211 iowrite16(value, p->mapbase + reg_offs); 212 break; 213 default: 214 iowrite32(value, p->mapbase + reg_offs); 215 break; 216 } 217 } 218 219 static int sh_msiof_modify_ctr_wait(struct sh_msiof_spi_priv *p, 220 u32 clr, u32 set) 221 { 222 u32 mask = clr | set; 223 u32 data; 224 225 data = sh_msiof_read(p, SICTR); 226 data &= ~clr; 227 data |= set; 228 sh_msiof_write(p, SICTR, data); 229 230 return readl_poll_timeout_atomic(p->mapbase + SICTR, data, 231 (data & mask) == set, 1, 100); 232 } 233 234 static irqreturn_t sh_msiof_spi_irq(int irq, void *data) 235 { 236 struct sh_msiof_spi_priv *p = data; 237 238 /* just disable the interrupt and wake up */ 239 sh_msiof_write(p, SIIER, 0); 240 complete(&p->done); 241 242 return IRQ_HANDLED; 243 } 244 245 static void sh_msiof_spi_reset_regs(struct sh_msiof_spi_priv *p) 246 { 247 u32 mask = SICTR_TXRST | SICTR_RXRST; 248 u32 data; 249 250 data = sh_msiof_read(p, SICTR); 251 data |= mask; 252 sh_msiof_write(p, SICTR, data); 253 254 readl_poll_timeout_atomic(p->mapbase + SICTR, data, !(data & mask), 1, 255 100); 256 } 257 258 static const u32 sh_msiof_spi_div_array[] = { 259 SISCR_BRDV_DIV_1, SISCR_BRDV_DIV_2, SISCR_BRDV_DIV_4, 260 SISCR_BRDV_DIV_8, SISCR_BRDV_DIV_16, SISCR_BRDV_DIV_32, 261 }; 262 263 static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p, 264 struct spi_transfer *t) 265 { 266 unsigned long parent_rate = clk_get_rate(p->clk); 267 unsigned int div_pow = p->min_div_pow; 268 u32 spi_hz = t->speed_hz; 269 unsigned long div; 270 u32 brps, scr; 271 272 if (!spi_hz || !parent_rate) { 273 WARN(1, "Invalid clock rate parameters %lu and %u\n", 274 parent_rate, spi_hz); 275 return; 276 } 277 278 div = DIV_ROUND_UP(parent_rate, spi_hz); 279 if (div <= 1024) { 280 /* SISCR_BRDV_DIV_1 is valid only if BRPS is x 1/1 or x 1/2 */ 281 if (!div_pow && div <= 32 && div > 2) 282 div_pow = 1; 283 284 if (div_pow) 285 brps = (div + 1) >> div_pow; 286 else 287 brps = div; 288 289 for (; brps > 32; div_pow++) 290 brps = (brps + 1) >> 1; 291 } else { 292 /* Set transfer rate composite divisor to 2^5 * 32 = 1024 */ 293 dev_err(&p->pdev->dev, 294 "Requested SPI transfer rate %d is too low\n", spi_hz); 295 div_pow = 5; 296 brps = 32; 297 } 298 299 t->effective_speed_hz = parent_rate / (brps << div_pow); 300 301 scr = sh_msiof_spi_div_array[div_pow] | SISCR_BRPS(brps); 302 sh_msiof_write(p, SITSCR, scr); 303 if (!(p->ctlr->flags & SPI_CONTROLLER_MUST_TX)) 304 sh_msiof_write(p, SIRSCR, scr); 305 } 306 307 static u32 sh_msiof_get_delay_bit(u32 dtdl_or_syncdl) 308 { 309 /* 310 * DTDL/SYNCDL bit : p->info->dtdl or p->info->syncdl 311 * b'000 : 0 312 * b'001 : 100 313 * b'010 : 200 314 * b'011 (SYNCDL only) : 300 315 * b'101 : 50 316 * b'110 : 150 317 */ 318 if (dtdl_or_syncdl % 100) 319 return dtdl_or_syncdl / 100 + 5; 320 else 321 return dtdl_or_syncdl / 100; 322 } 323 324 static u32 sh_msiof_spi_get_dtdl_and_syncdl(struct sh_msiof_spi_priv *p) 325 { 326 u32 val; 327 328 if (!p->info) 329 return 0; 330 331 /* check if DTDL and SYNCDL is allowed value */ 332 if (p->info->dtdl > 200 || p->info->syncdl > 300) { 333 dev_warn(&p->pdev->dev, "DTDL or SYNCDL is too large\n"); 334 return 0; 335 } 336 337 /* check if the sum of DTDL and SYNCDL becomes an integer value */ 338 if ((p->info->dtdl + p->info->syncdl) % 100) { 339 dev_warn(&p->pdev->dev, "the sum of DTDL/SYNCDL is not good\n"); 340 return 0; 341 } 342 343 val = sh_msiof_get_delay_bit(p->info->dtdl) << SIMDR1_DTDL_SHIFT; 344 val |= sh_msiof_get_delay_bit(p->info->syncdl) << SIMDR1_SYNCDL_SHIFT; 345 346 return val; 347 } 348 349 static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p, u32 ss, 350 u32 cpol, u32 cpha, 351 u32 tx_hi_z, u32 lsb_first, u32 cs_high) 352 { 353 u32 tmp; 354 int edge; 355 356 /* 357 * CPOL CPHA TSCKIZ RSCKIZ TEDG REDG 358 * 0 0 10 10 1 1 359 * 0 1 10 10 0 0 360 * 1 0 11 11 0 0 361 * 1 1 11 11 1 1 362 */ 363 tmp = SIMDR1_SYNCMD_SPI | 1 << SIMDR1_FLD_SHIFT | SIMDR1_XXSTP; 364 tmp |= !cs_high << SIMDR1_SYNCAC_SHIFT; 365 tmp |= lsb_first << SIMDR1_BITLSB_SHIFT; 366 tmp |= sh_msiof_spi_get_dtdl_and_syncdl(p); 367 if (spi_controller_is_target(p->ctlr)) { 368 sh_msiof_write(p, SITMDR1, tmp | SITMDR1_PCON); 369 } else { 370 sh_msiof_write(p, SITMDR1, 371 tmp | SIMDR1_TRMD | SITMDR1_PCON | 372 (ss < MAX_SS ? ss : 0) << SITMDR1_SYNCCH_SHIFT); 373 } 374 if (p->ctlr->flags & SPI_CONTROLLER_MUST_TX) { 375 /* These bits are reserved if RX needs TX */ 376 tmp &= ~0x0000ffff; 377 } 378 sh_msiof_write(p, SIRMDR1, tmp); 379 380 tmp = 0; 381 tmp |= SICTR_TSCKIZ_SCK | cpol << SICTR_TSCKIZ_POL_SHIFT; 382 tmp |= SICTR_RSCKIZ_SCK | cpol << SICTR_RSCKIZ_POL_SHIFT; 383 384 edge = cpol ^ !cpha; 385 386 tmp |= edge << SICTR_TEDG_SHIFT; 387 tmp |= edge << SICTR_REDG_SHIFT; 388 tmp |= tx_hi_z ? SICTR_TXDIZ_HIZ : SICTR_TXDIZ_LOW; 389 sh_msiof_write(p, SICTR, tmp); 390 } 391 392 static void sh_msiof_spi_set_mode_regs(struct sh_msiof_spi_priv *p, 393 const void *tx_buf, void *rx_buf, 394 u32 bits, u32 words) 395 { 396 u32 dr2 = SIMDR2_BITLEN1(bits) | SIMDR2_WDLEN1(words); 397 398 if (tx_buf || (p->ctlr->flags & SPI_CONTROLLER_MUST_TX)) 399 sh_msiof_write(p, SITMDR2, dr2); 400 else 401 sh_msiof_write(p, SITMDR2, dr2 | SIMDR2_GRPMASK1); 402 403 if (rx_buf) 404 sh_msiof_write(p, SIRMDR2, dr2); 405 } 406 407 static void sh_msiof_reset_str(struct sh_msiof_spi_priv *p) 408 { 409 sh_msiof_write(p, SISTR, 410 sh_msiof_read(p, SISTR) & ~(SISTR_TDREQ | SISTR_RDREQ)); 411 } 412 413 static void sh_msiof_spi_write_fifo_8(struct sh_msiof_spi_priv *p, 414 const void *tx_buf, int words, int fs) 415 { 416 const u8 *buf_8 = tx_buf; 417 int k; 418 419 for (k = 0; k < words; k++) 420 sh_msiof_write(p, SITFDR, buf_8[k] << fs); 421 } 422 423 static void sh_msiof_spi_write_fifo_16(struct sh_msiof_spi_priv *p, 424 const void *tx_buf, int words, int fs) 425 { 426 const u16 *buf_16 = tx_buf; 427 int k; 428 429 for (k = 0; k < words; k++) 430 sh_msiof_write(p, SITFDR, buf_16[k] << fs); 431 } 432 433 static void sh_msiof_spi_write_fifo_16u(struct sh_msiof_spi_priv *p, 434 const void *tx_buf, int words, int fs) 435 { 436 const u16 *buf_16 = tx_buf; 437 int k; 438 439 for (k = 0; k < words; k++) 440 sh_msiof_write(p, SITFDR, get_unaligned(&buf_16[k]) << fs); 441 } 442 443 static void sh_msiof_spi_write_fifo_32(struct sh_msiof_spi_priv *p, 444 const void *tx_buf, int words, int fs) 445 { 446 const u32 *buf_32 = tx_buf; 447 int k; 448 449 for (k = 0; k < words; k++) 450 sh_msiof_write(p, SITFDR, buf_32[k] << fs); 451 } 452 453 static void sh_msiof_spi_write_fifo_32u(struct sh_msiof_spi_priv *p, 454 const void *tx_buf, int words, int fs) 455 { 456 const u32 *buf_32 = tx_buf; 457 int k; 458 459 for (k = 0; k < words; k++) 460 sh_msiof_write(p, SITFDR, get_unaligned(&buf_32[k]) << fs); 461 } 462 463 static void sh_msiof_spi_write_fifo_s32(struct sh_msiof_spi_priv *p, 464 const void *tx_buf, int words, int fs) 465 { 466 const u32 *buf_32 = tx_buf; 467 int k; 468 469 for (k = 0; k < words; k++) 470 sh_msiof_write(p, SITFDR, swab32(buf_32[k] << fs)); 471 } 472 473 static void sh_msiof_spi_write_fifo_s32u(struct sh_msiof_spi_priv *p, 474 const void *tx_buf, int words, int fs) 475 { 476 const u32 *buf_32 = tx_buf; 477 int k; 478 479 for (k = 0; k < words; k++) 480 sh_msiof_write(p, SITFDR, swab32(get_unaligned(&buf_32[k]) << fs)); 481 } 482 483 static void sh_msiof_spi_read_fifo_8(struct sh_msiof_spi_priv *p, 484 void *rx_buf, int words, int fs) 485 { 486 u8 *buf_8 = rx_buf; 487 int k; 488 489 for (k = 0; k < words; k++) 490 buf_8[k] = sh_msiof_read(p, SIRFDR) >> fs; 491 } 492 493 static void sh_msiof_spi_read_fifo_16(struct sh_msiof_spi_priv *p, 494 void *rx_buf, int words, int fs) 495 { 496 u16 *buf_16 = rx_buf; 497 int k; 498 499 for (k = 0; k < words; k++) 500 buf_16[k] = sh_msiof_read(p, SIRFDR) >> fs; 501 } 502 503 static void sh_msiof_spi_read_fifo_16u(struct sh_msiof_spi_priv *p, 504 void *rx_buf, int words, int fs) 505 { 506 u16 *buf_16 = rx_buf; 507 int k; 508 509 for (k = 0; k < words; k++) 510 put_unaligned(sh_msiof_read(p, SIRFDR) >> fs, &buf_16[k]); 511 } 512 513 static void sh_msiof_spi_read_fifo_32(struct sh_msiof_spi_priv *p, 514 void *rx_buf, int words, int fs) 515 { 516 u32 *buf_32 = rx_buf; 517 int k; 518 519 for (k = 0; k < words; k++) 520 buf_32[k] = sh_msiof_read(p, SIRFDR) >> fs; 521 } 522 523 static void sh_msiof_spi_read_fifo_32u(struct sh_msiof_spi_priv *p, 524 void *rx_buf, int words, int fs) 525 { 526 u32 *buf_32 = rx_buf; 527 int k; 528 529 for (k = 0; k < words; k++) 530 put_unaligned(sh_msiof_read(p, SIRFDR) >> fs, &buf_32[k]); 531 } 532 533 static void sh_msiof_spi_read_fifo_s32(struct sh_msiof_spi_priv *p, 534 void *rx_buf, int words, int fs) 535 { 536 u32 *buf_32 = rx_buf; 537 int k; 538 539 for (k = 0; k < words; k++) 540 buf_32[k] = swab32(sh_msiof_read(p, SIRFDR) >> fs); 541 } 542 543 static void sh_msiof_spi_read_fifo_s32u(struct sh_msiof_spi_priv *p, 544 void *rx_buf, int words, int fs) 545 { 546 u32 *buf_32 = rx_buf; 547 int k; 548 549 for (k = 0; k < words; k++) 550 put_unaligned(swab32(sh_msiof_read(p, SIRFDR) >> fs), &buf_32[k]); 551 } 552 553 static int sh_msiof_spi_setup(struct spi_device *spi) 554 { 555 struct sh_msiof_spi_priv *p = 556 spi_controller_get_devdata(spi->controller); 557 u32 clr, set, tmp; 558 559 if (spi_get_csgpiod(spi, 0) || spi_controller_is_target(p->ctlr)) 560 return 0; 561 562 if (p->native_cs_inited && 563 (p->native_cs_high == !!(spi->mode & SPI_CS_HIGH))) 564 return 0; 565 566 /* Configure native chip select mode/polarity early */ 567 clr = SIMDR1_SYNCMD_MASK; 568 set = SIMDR1_SYNCMD_SPI; 569 if (spi->mode & SPI_CS_HIGH) 570 clr |= BIT(SIMDR1_SYNCAC_SHIFT); 571 else 572 set |= BIT(SIMDR1_SYNCAC_SHIFT); 573 pm_runtime_get_sync(&p->pdev->dev); 574 tmp = sh_msiof_read(p, SITMDR1) & ~clr; 575 sh_msiof_write(p, SITMDR1, tmp | set | SIMDR1_TRMD | SITMDR1_PCON); 576 tmp = sh_msiof_read(p, SIRMDR1) & ~clr; 577 sh_msiof_write(p, SIRMDR1, tmp | set); 578 pm_runtime_put(&p->pdev->dev); 579 p->native_cs_high = spi->mode & SPI_CS_HIGH; 580 p->native_cs_inited = true; 581 return 0; 582 } 583 584 static int sh_msiof_prepare_message(struct spi_controller *ctlr, 585 struct spi_message *msg) 586 { 587 struct sh_msiof_spi_priv *p = spi_controller_get_devdata(ctlr); 588 const struct spi_device *spi = msg->spi; 589 u32 ss, cs_high; 590 591 /* Configure pins before asserting CS */ 592 if (spi_get_csgpiod(spi, 0)) { 593 ss = ctlr->unused_native_cs; 594 cs_high = p->native_cs_high; 595 } else { 596 ss = spi_get_chipselect(spi, 0); 597 cs_high = !!(spi->mode & SPI_CS_HIGH); 598 } 599 sh_msiof_spi_set_pin_regs(p, ss, !!(spi->mode & SPI_CPOL), 600 !!(spi->mode & SPI_CPHA), 601 !!(spi->mode & SPI_3WIRE), 602 !!(spi->mode & SPI_LSB_FIRST), cs_high); 603 return 0; 604 } 605 606 static int sh_msiof_spi_start(struct sh_msiof_spi_priv *p, void *rx_buf) 607 { 608 bool target = spi_controller_is_target(p->ctlr); 609 int ret = 0; 610 611 /* setup clock and rx/tx signals */ 612 if (!target) 613 ret = sh_msiof_modify_ctr_wait(p, 0, SICTR_TSCKE); 614 if (rx_buf && !ret) 615 ret = sh_msiof_modify_ctr_wait(p, 0, SICTR_RXE); 616 if (!ret) 617 ret = sh_msiof_modify_ctr_wait(p, 0, SICTR_TXE); 618 619 /* start by setting frame bit */ 620 if (!ret && !target) 621 ret = sh_msiof_modify_ctr_wait(p, 0, SICTR_TFSE); 622 623 return ret; 624 } 625 626 static int sh_msiof_spi_stop(struct sh_msiof_spi_priv *p, void *rx_buf) 627 { 628 bool target = spi_controller_is_target(p->ctlr); 629 int ret = 0; 630 631 /* shut down frame, rx/tx and clock signals */ 632 if (!target) 633 ret = sh_msiof_modify_ctr_wait(p, SICTR_TFSE, 0); 634 if (!ret) 635 ret = sh_msiof_modify_ctr_wait(p, SICTR_TXE, 0); 636 if (rx_buf && !ret) 637 ret = sh_msiof_modify_ctr_wait(p, SICTR_RXE, 0); 638 if (!ret && !target) 639 ret = sh_msiof_modify_ctr_wait(p, SICTR_TSCKE, 0); 640 641 return ret; 642 } 643 644 static int sh_msiof_target_abort(struct spi_controller *ctlr) 645 { 646 struct sh_msiof_spi_priv *p = spi_controller_get_devdata(ctlr); 647 648 p->target_aborted = true; 649 complete(&p->done); 650 complete(&p->done_txdma); 651 return 0; 652 } 653 654 static int sh_msiof_wait_for_completion(struct sh_msiof_spi_priv *p, 655 struct completion *x) 656 { 657 if (spi_controller_is_target(p->ctlr)) { 658 if (wait_for_completion_interruptible(x) || 659 p->target_aborted) { 660 dev_dbg(&p->pdev->dev, "interrupted\n"); 661 return -EINTR; 662 } 663 } else { 664 if (!wait_for_completion_timeout(x, HZ)) { 665 dev_err(&p->pdev->dev, "timeout\n"); 666 return -ETIMEDOUT; 667 } 668 } 669 670 return 0; 671 } 672 673 static int sh_msiof_spi_txrx_once(struct sh_msiof_spi_priv *p, 674 void (*tx_fifo)(struct sh_msiof_spi_priv *, 675 const void *, int, int), 676 void (*rx_fifo)(struct sh_msiof_spi_priv *, 677 void *, int, int), 678 const void *tx_buf, void *rx_buf, 679 int words, int bits) 680 { 681 int fifo_shift; 682 int ret; 683 684 /* limit maximum word transfer to rx/tx fifo size */ 685 if (tx_buf) 686 words = min_t(int, words, p->tx_fifo_size); 687 if (rx_buf) 688 words = min_t(int, words, p->rx_fifo_size); 689 690 /* the fifo contents need shifting */ 691 fifo_shift = 32 - bits; 692 693 /* default FIFO watermarks for PIO */ 694 sh_msiof_write(p, SIFCTR, 0); 695 696 /* setup msiof transfer mode registers */ 697 sh_msiof_spi_set_mode_regs(p, tx_buf, rx_buf, bits, words); 698 sh_msiof_write(p, SIIER, SIIER_TEOFE | SIIER_REOFE); 699 700 /* write tx fifo */ 701 if (tx_buf) 702 tx_fifo(p, tx_buf, words, fifo_shift); 703 704 reinit_completion(&p->done); 705 p->target_aborted = false; 706 707 ret = sh_msiof_spi_start(p, rx_buf); 708 if (ret) { 709 dev_err(&p->pdev->dev, "failed to start hardware\n"); 710 goto stop_ier; 711 } 712 713 /* wait for tx fifo to be emptied / rx fifo to be filled */ 714 ret = sh_msiof_wait_for_completion(p, &p->done); 715 if (ret) 716 goto stop_reset; 717 718 /* read rx fifo */ 719 if (rx_buf) 720 rx_fifo(p, rx_buf, words, fifo_shift); 721 722 /* clear status bits */ 723 sh_msiof_reset_str(p); 724 725 ret = sh_msiof_spi_stop(p, rx_buf); 726 if (ret) { 727 dev_err(&p->pdev->dev, "failed to shut down hardware\n"); 728 return ret; 729 } 730 731 return words; 732 733 stop_reset: 734 sh_msiof_reset_str(p); 735 sh_msiof_spi_stop(p, rx_buf); 736 stop_ier: 737 sh_msiof_write(p, SIIER, 0); 738 return ret; 739 } 740 741 static void sh_msiof_dma_complete(void *arg) 742 { 743 complete(arg); 744 } 745 746 static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx, 747 void *rx, unsigned int len) 748 { 749 u32 ier_bits = 0; 750 struct dma_async_tx_descriptor *desc_tx = NULL, *desc_rx = NULL; 751 dma_cookie_t cookie; 752 int ret; 753 754 /* First prepare and submit the DMA request(s), as this may fail */ 755 if (rx) { 756 ier_bits |= SIIER_RDREQE | SIIER_RDMAE; 757 desc_rx = dmaengine_prep_slave_single(p->ctlr->dma_rx, 758 p->rx_dma_addr, len, DMA_DEV_TO_MEM, 759 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 760 if (!desc_rx) 761 return -EAGAIN; 762 763 desc_rx->callback = sh_msiof_dma_complete; 764 desc_rx->callback_param = &p->done; 765 cookie = dmaengine_submit(desc_rx); 766 if (dma_submit_error(cookie)) 767 return cookie; 768 } 769 770 if (tx) { 771 ier_bits |= SIIER_TDREQE | SIIER_TDMAE; 772 dma_sync_single_for_device(p->ctlr->dma_tx->device->dev, 773 p->tx_dma_addr, len, DMA_TO_DEVICE); 774 desc_tx = dmaengine_prep_slave_single(p->ctlr->dma_tx, 775 p->tx_dma_addr, len, DMA_MEM_TO_DEV, 776 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 777 if (!desc_tx) { 778 ret = -EAGAIN; 779 goto no_dma_tx; 780 } 781 782 desc_tx->callback = sh_msiof_dma_complete; 783 desc_tx->callback_param = &p->done_txdma; 784 cookie = dmaengine_submit(desc_tx); 785 if (dma_submit_error(cookie)) { 786 ret = cookie; 787 goto no_dma_tx; 788 } 789 } 790 791 /* 1 stage FIFO watermarks for DMA */ 792 sh_msiof_write(p, SIFCTR, SIFCTR_TFWM_1 | SIFCTR_RFWM_1); 793 794 /* setup msiof transfer mode registers (32-bit words) */ 795 sh_msiof_spi_set_mode_regs(p, tx, rx, 32, len / 4); 796 797 sh_msiof_write(p, SIIER, ier_bits); 798 799 reinit_completion(&p->done); 800 if (tx) 801 reinit_completion(&p->done_txdma); 802 p->target_aborted = false; 803 804 /* Now start DMA */ 805 if (rx) 806 dma_async_issue_pending(p->ctlr->dma_rx); 807 if (tx) 808 dma_async_issue_pending(p->ctlr->dma_tx); 809 810 ret = sh_msiof_spi_start(p, rx); 811 if (ret) { 812 dev_err(&p->pdev->dev, "failed to start hardware\n"); 813 goto stop_dma; 814 } 815 816 if (tx) { 817 /* wait for tx DMA completion */ 818 ret = sh_msiof_wait_for_completion(p, &p->done_txdma); 819 if (ret) 820 goto stop_reset; 821 } 822 823 if (rx) { 824 /* wait for rx DMA completion */ 825 ret = sh_msiof_wait_for_completion(p, &p->done); 826 if (ret) 827 goto stop_reset; 828 829 sh_msiof_write(p, SIIER, 0); 830 } else { 831 /* wait for tx fifo to be emptied */ 832 sh_msiof_write(p, SIIER, SIIER_TEOFE); 833 ret = sh_msiof_wait_for_completion(p, &p->done); 834 if (ret) 835 goto stop_reset; 836 } 837 838 /* clear status bits */ 839 sh_msiof_reset_str(p); 840 841 ret = sh_msiof_spi_stop(p, rx); 842 if (ret) { 843 dev_err(&p->pdev->dev, "failed to shut down hardware\n"); 844 return ret; 845 } 846 847 if (rx) 848 dma_sync_single_for_cpu(p->ctlr->dma_rx->device->dev, 849 p->rx_dma_addr, len, DMA_FROM_DEVICE); 850 851 return 0; 852 853 stop_reset: 854 sh_msiof_reset_str(p); 855 sh_msiof_spi_stop(p, rx); 856 stop_dma: 857 if (tx) 858 dmaengine_terminate_sync(p->ctlr->dma_tx); 859 no_dma_tx: 860 if (rx) 861 dmaengine_terminate_sync(p->ctlr->dma_rx); 862 sh_msiof_write(p, SIIER, 0); 863 return ret; 864 } 865 866 static void copy_bswap32(u32 *dst, const u32 *src, unsigned int words) 867 { 868 /* src or dst can be unaligned, but not both */ 869 if ((unsigned long)src & 3) { 870 while (words--) { 871 *dst++ = swab32(get_unaligned(src)); 872 src++; 873 } 874 } else if ((unsigned long)dst & 3) { 875 while (words--) { 876 put_unaligned(swab32(*src++), dst); 877 dst++; 878 } 879 } else { 880 while (words--) 881 *dst++ = swab32(*src++); 882 } 883 } 884 885 static void copy_wswap32(u32 *dst, const u32 *src, unsigned int words) 886 { 887 /* src or dst can be unaligned, but not both */ 888 if ((unsigned long)src & 3) { 889 while (words--) { 890 *dst++ = swahw32(get_unaligned(src)); 891 src++; 892 } 893 } else if ((unsigned long)dst & 3) { 894 while (words--) { 895 put_unaligned(swahw32(*src++), dst); 896 dst++; 897 } 898 } else { 899 while (words--) 900 *dst++ = swahw32(*src++); 901 } 902 } 903 904 static void copy_plain32(u32 *dst, const u32 *src, unsigned int words) 905 { 906 memcpy(dst, src, words * 4); 907 } 908 909 static int sh_msiof_transfer_one(struct spi_controller *ctlr, 910 struct spi_device *spi, 911 struct spi_transfer *t) 912 { 913 struct sh_msiof_spi_priv *p = spi_controller_get_devdata(ctlr); 914 void (*copy32)(u32 *, const u32 *, unsigned int); 915 void (*tx_fifo)(struct sh_msiof_spi_priv *, const void *, int, int); 916 void (*rx_fifo)(struct sh_msiof_spi_priv *, void *, int, int); 917 const void *tx_buf = t->tx_buf; 918 void *rx_buf = t->rx_buf; 919 unsigned int len = t->len; 920 unsigned int bits = t->bits_per_word; 921 unsigned int bytes_per_word; 922 unsigned int words; 923 int n; 924 bool swab; 925 int ret; 926 927 /* reset registers */ 928 sh_msiof_spi_reset_regs(p); 929 930 /* setup clocks (clock already enabled in chipselect()) */ 931 if (!spi_controller_is_target(p->ctlr)) 932 sh_msiof_spi_set_clk_regs(p, t); 933 934 while (ctlr->dma_tx && len > 15) { 935 /* 936 * DMA supports 32-bit words only, hence pack 8-bit and 16-bit 937 * words, with byte resp. word swapping. 938 */ 939 unsigned int l = 0; 940 941 if (tx_buf) 942 l = min(round_down(len, 4), p->tx_fifo_size * 4); 943 if (rx_buf) 944 l = min(round_down(len, 4), p->rx_fifo_size * 4); 945 946 if (bits <= 8) { 947 copy32 = copy_bswap32; 948 } else if (bits <= 16) { 949 copy32 = copy_wswap32; 950 } else { 951 copy32 = copy_plain32; 952 } 953 954 if (tx_buf) 955 copy32(p->tx_dma_page, tx_buf, l / 4); 956 957 ret = sh_msiof_dma_once(p, tx_buf, rx_buf, l); 958 if (ret == -EAGAIN) { 959 dev_warn_once(&p->pdev->dev, 960 "DMA not available, falling back to PIO\n"); 961 break; 962 } 963 if (ret) 964 return ret; 965 966 if (rx_buf) { 967 copy32(rx_buf, p->rx_dma_page, l / 4); 968 rx_buf += l; 969 } 970 if (tx_buf) 971 tx_buf += l; 972 973 len -= l; 974 if (!len) 975 return 0; 976 } 977 978 if (bits <= 8 && len > 15) { 979 bits = 32; 980 swab = true; 981 } else { 982 swab = false; 983 } 984 985 /* setup bytes per word and fifo read/write functions */ 986 if (bits <= 8) { 987 bytes_per_word = 1; 988 tx_fifo = sh_msiof_spi_write_fifo_8; 989 rx_fifo = sh_msiof_spi_read_fifo_8; 990 } else if (bits <= 16) { 991 bytes_per_word = 2; 992 if ((unsigned long)tx_buf & 0x01) 993 tx_fifo = sh_msiof_spi_write_fifo_16u; 994 else 995 tx_fifo = sh_msiof_spi_write_fifo_16; 996 997 if ((unsigned long)rx_buf & 0x01) 998 rx_fifo = sh_msiof_spi_read_fifo_16u; 999 else 1000 rx_fifo = sh_msiof_spi_read_fifo_16; 1001 } else if (swab) { 1002 bytes_per_word = 4; 1003 if ((unsigned long)tx_buf & 0x03) 1004 tx_fifo = sh_msiof_spi_write_fifo_s32u; 1005 else 1006 tx_fifo = sh_msiof_spi_write_fifo_s32; 1007 1008 if ((unsigned long)rx_buf & 0x03) 1009 rx_fifo = sh_msiof_spi_read_fifo_s32u; 1010 else 1011 rx_fifo = sh_msiof_spi_read_fifo_s32; 1012 } else { 1013 bytes_per_word = 4; 1014 if ((unsigned long)tx_buf & 0x03) 1015 tx_fifo = sh_msiof_spi_write_fifo_32u; 1016 else 1017 tx_fifo = sh_msiof_spi_write_fifo_32; 1018 1019 if ((unsigned long)rx_buf & 0x03) 1020 rx_fifo = sh_msiof_spi_read_fifo_32u; 1021 else 1022 rx_fifo = sh_msiof_spi_read_fifo_32; 1023 } 1024 1025 /* transfer in fifo sized chunks */ 1026 words = len / bytes_per_word; 1027 1028 while (words > 0) { 1029 n = sh_msiof_spi_txrx_once(p, tx_fifo, rx_fifo, tx_buf, rx_buf, 1030 words, bits); 1031 if (n < 0) 1032 return n; 1033 1034 if (tx_buf) 1035 tx_buf += n * bytes_per_word; 1036 if (rx_buf) 1037 rx_buf += n * bytes_per_word; 1038 words -= n; 1039 1040 if (words == 0 && (len % bytes_per_word)) { 1041 words = len % bytes_per_word; 1042 bits = t->bits_per_word; 1043 bytes_per_word = 1; 1044 tx_fifo = sh_msiof_spi_write_fifo_8; 1045 rx_fifo = sh_msiof_spi_read_fifo_8; 1046 } 1047 } 1048 1049 return 0; 1050 } 1051 1052 static const struct sh_msiof_chipdata sh_data = { 1053 .bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32), 1054 .tx_fifo_size = 64, 1055 .rx_fifo_size = 64, 1056 .ctlr_flags = 0, 1057 .min_div_pow = 0, 1058 }; 1059 1060 static const struct sh_msiof_chipdata rcar_gen2_data = { 1061 .bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16) | 1062 SPI_BPW_MASK(24) | SPI_BPW_MASK(32), 1063 .tx_fifo_size = 64, 1064 .rx_fifo_size = 64, 1065 .ctlr_flags = SPI_CONTROLLER_MUST_TX, 1066 .min_div_pow = 0, 1067 }; 1068 1069 static const struct sh_msiof_chipdata rcar_gen3_data = { 1070 .bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16) | 1071 SPI_BPW_MASK(24) | SPI_BPW_MASK(32), 1072 .tx_fifo_size = 64, 1073 .rx_fifo_size = 64, 1074 .ctlr_flags = SPI_CONTROLLER_MUST_TX, 1075 .min_div_pow = 1, 1076 }; 1077 1078 static const struct sh_msiof_chipdata rcar_r8a7795_data = { 1079 .bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16) | 1080 SPI_BPW_MASK(24) | SPI_BPW_MASK(32), 1081 .tx_fifo_size = 64, 1082 .rx_fifo_size = 64, 1083 .ctlr_flags = SPI_CONTROLLER_MUST_TX, 1084 .min_div_pow = 1, 1085 .flags = SH_MSIOF_FLAG_FIXED_DTDL_200, 1086 }; 1087 1088 static const struct of_device_id sh_msiof_match[] __maybe_unused = { 1089 { .compatible = "renesas,sh-mobile-msiof", .data = &sh_data }, 1090 { .compatible = "renesas,msiof-r8a7743", .data = &rcar_gen2_data }, 1091 { .compatible = "renesas,msiof-r8a7745", .data = &rcar_gen2_data }, 1092 { .compatible = "renesas,msiof-r8a7790", .data = &rcar_gen2_data }, 1093 { .compatible = "renesas,msiof-r8a7791", .data = &rcar_gen2_data }, 1094 { .compatible = "renesas,msiof-r8a7792", .data = &rcar_gen2_data }, 1095 { .compatible = "renesas,msiof-r8a7793", .data = &rcar_gen2_data }, 1096 { .compatible = "renesas,msiof-r8a7794", .data = &rcar_gen2_data }, 1097 { .compatible = "renesas,rcar-gen2-msiof", .data = &rcar_gen2_data }, 1098 { .compatible = "renesas,msiof-r8a7795", .data = &rcar_r8a7795_data }, 1099 { .compatible = "renesas,msiof-r8a7796", .data = &rcar_gen3_data }, 1100 { .compatible = "renesas,rcar-gen3-msiof", .data = &rcar_gen3_data }, 1101 { .compatible = "renesas,rcar-gen4-msiof", .data = &rcar_gen3_data }, 1102 { .compatible = "renesas,sh-msiof", .data = &sh_data }, /* Deprecated */ 1103 {}, 1104 }; 1105 MODULE_DEVICE_TABLE(of, sh_msiof_match); 1106 1107 #ifdef CONFIG_OF 1108 static struct sh_msiof_spi_info *sh_msiof_spi_parse_dt(struct device *dev) 1109 { 1110 struct sh_msiof_spi_info *info; 1111 struct device_node *np = dev->of_node; 1112 u32 num_cs = 1; 1113 1114 info = devm_kzalloc(dev, sizeof(struct sh_msiof_spi_info), GFP_KERNEL); 1115 if (!info) 1116 return NULL; 1117 1118 info->mode = of_property_read_bool(np, "spi-slave") ? MSIOF_SPI_TARGET 1119 : MSIOF_SPI_HOST; 1120 1121 /* Parse the MSIOF properties */ 1122 if (info->mode == MSIOF_SPI_HOST) 1123 of_property_read_u32(np, "num-cs", &num_cs); 1124 of_property_read_u32(np, "renesas,tx-fifo-size", 1125 &info->tx_fifo_override); 1126 of_property_read_u32(np, "renesas,rx-fifo-size", 1127 &info->rx_fifo_override); 1128 of_property_read_u32(np, "renesas,dtdl", &info->dtdl); 1129 of_property_read_u32(np, "renesas,syncdl", &info->syncdl); 1130 1131 info->num_chipselect = num_cs; 1132 1133 return info; 1134 } 1135 #else 1136 static struct sh_msiof_spi_info *sh_msiof_spi_parse_dt(struct device *dev) 1137 { 1138 return NULL; 1139 } 1140 #endif 1141 1142 static struct dma_chan *sh_msiof_request_dma_chan(struct device *dev, 1143 enum dma_transfer_direction dir, unsigned int id, dma_addr_t port_addr) 1144 { 1145 dma_cap_mask_t mask; 1146 struct dma_chan *chan; 1147 struct dma_slave_config cfg; 1148 int ret; 1149 1150 dma_cap_zero(mask); 1151 dma_cap_set(DMA_SLAVE, mask); 1152 1153 chan = dma_request_slave_channel_compat(mask, shdma_chan_filter, 1154 (void *)(unsigned long)id, dev, 1155 dir == DMA_MEM_TO_DEV ? "tx" : "rx"); 1156 if (!chan) { 1157 dev_warn(dev, "dma_request_slave_channel_compat failed\n"); 1158 return NULL; 1159 } 1160 1161 memset(&cfg, 0, sizeof(cfg)); 1162 cfg.direction = dir; 1163 if (dir == DMA_MEM_TO_DEV) { 1164 cfg.dst_addr = port_addr; 1165 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 1166 } else { 1167 cfg.src_addr = port_addr; 1168 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 1169 } 1170 1171 ret = dmaengine_slave_config(chan, &cfg); 1172 if (ret) { 1173 dev_warn(dev, "dmaengine_slave_config failed %d\n", ret); 1174 dma_release_channel(chan); 1175 return NULL; 1176 } 1177 1178 return chan; 1179 } 1180 1181 static int sh_msiof_request_dma(struct sh_msiof_spi_priv *p) 1182 { 1183 struct platform_device *pdev = p->pdev; 1184 struct device *dev = &pdev->dev; 1185 const struct sh_msiof_spi_info *info = p->info; 1186 unsigned int dma_tx_id, dma_rx_id; 1187 const struct resource *res; 1188 struct spi_controller *ctlr; 1189 struct device *tx_dev, *rx_dev; 1190 1191 if (dev->of_node) { 1192 /* In the OF case we will get the slave IDs from the DT */ 1193 dma_tx_id = 0; 1194 dma_rx_id = 0; 1195 } else if (info && info->dma_tx_id && info->dma_rx_id) { 1196 dma_tx_id = info->dma_tx_id; 1197 dma_rx_id = info->dma_rx_id; 1198 } else { 1199 /* The driver assumes no error */ 1200 return 0; 1201 } 1202 1203 /* The DMA engine uses the second register set, if present */ 1204 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 1205 if (!res) 1206 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1207 1208 ctlr = p->ctlr; 1209 ctlr->dma_tx = sh_msiof_request_dma_chan(dev, DMA_MEM_TO_DEV, 1210 dma_tx_id, res->start + SITFDR); 1211 if (!ctlr->dma_tx) 1212 return -ENODEV; 1213 1214 ctlr->dma_rx = sh_msiof_request_dma_chan(dev, DMA_DEV_TO_MEM, 1215 dma_rx_id, res->start + SIRFDR); 1216 if (!ctlr->dma_rx) 1217 goto free_tx_chan; 1218 1219 p->tx_dma_page = (void *)__get_free_page(GFP_KERNEL | GFP_DMA); 1220 if (!p->tx_dma_page) 1221 goto free_rx_chan; 1222 1223 p->rx_dma_page = (void *)__get_free_page(GFP_KERNEL | GFP_DMA); 1224 if (!p->rx_dma_page) 1225 goto free_tx_page; 1226 1227 tx_dev = ctlr->dma_tx->device->dev; 1228 p->tx_dma_addr = dma_map_single(tx_dev, p->tx_dma_page, PAGE_SIZE, 1229 DMA_TO_DEVICE); 1230 if (dma_mapping_error(tx_dev, p->tx_dma_addr)) 1231 goto free_rx_page; 1232 1233 rx_dev = ctlr->dma_rx->device->dev; 1234 p->rx_dma_addr = dma_map_single(rx_dev, p->rx_dma_page, PAGE_SIZE, 1235 DMA_FROM_DEVICE); 1236 if (dma_mapping_error(rx_dev, p->rx_dma_addr)) 1237 goto unmap_tx_page; 1238 1239 dev_info(dev, "DMA available"); 1240 return 0; 1241 1242 unmap_tx_page: 1243 dma_unmap_single(tx_dev, p->tx_dma_addr, PAGE_SIZE, DMA_TO_DEVICE); 1244 free_rx_page: 1245 free_page((unsigned long)p->rx_dma_page); 1246 free_tx_page: 1247 free_page((unsigned long)p->tx_dma_page); 1248 free_rx_chan: 1249 dma_release_channel(ctlr->dma_rx); 1250 free_tx_chan: 1251 dma_release_channel(ctlr->dma_tx); 1252 ctlr->dma_tx = NULL; 1253 return -ENODEV; 1254 } 1255 1256 static void sh_msiof_release_dma(struct sh_msiof_spi_priv *p) 1257 { 1258 struct spi_controller *ctlr = p->ctlr; 1259 1260 if (!ctlr->dma_tx) 1261 return; 1262 1263 dma_unmap_single(ctlr->dma_rx->device->dev, p->rx_dma_addr, PAGE_SIZE, 1264 DMA_FROM_DEVICE); 1265 dma_unmap_single(ctlr->dma_tx->device->dev, p->tx_dma_addr, PAGE_SIZE, 1266 DMA_TO_DEVICE); 1267 free_page((unsigned long)p->rx_dma_page); 1268 free_page((unsigned long)p->tx_dma_page); 1269 dma_release_channel(ctlr->dma_rx); 1270 dma_release_channel(ctlr->dma_tx); 1271 } 1272 1273 static int sh_msiof_spi_probe(struct platform_device *pdev) 1274 { 1275 struct spi_controller *ctlr; 1276 const struct sh_msiof_chipdata *chipdata; 1277 struct sh_msiof_spi_info *info; 1278 struct sh_msiof_spi_priv *p; 1279 unsigned long clksrc; 1280 int i; 1281 int ret; 1282 1283 chipdata = of_device_get_match_data(&pdev->dev); 1284 if (chipdata) { 1285 info = sh_msiof_spi_parse_dt(&pdev->dev); 1286 } else { 1287 chipdata = (const void *)pdev->id_entry->driver_data; 1288 info = dev_get_platdata(&pdev->dev); 1289 } 1290 1291 if (!info) { 1292 dev_err(&pdev->dev, "failed to obtain device info\n"); 1293 return -ENXIO; 1294 } 1295 1296 if (chipdata->flags & SH_MSIOF_FLAG_FIXED_DTDL_200) 1297 info->dtdl = 200; 1298 1299 if (info->mode == MSIOF_SPI_TARGET) 1300 ctlr = spi_alloc_target(&pdev->dev, 1301 sizeof(struct sh_msiof_spi_priv)); 1302 else 1303 ctlr = spi_alloc_host(&pdev->dev, 1304 sizeof(struct sh_msiof_spi_priv)); 1305 if (ctlr == NULL) 1306 return -ENOMEM; 1307 1308 p = spi_controller_get_devdata(ctlr); 1309 1310 platform_set_drvdata(pdev, p); 1311 p->ctlr = ctlr; 1312 p->info = info; 1313 p->min_div_pow = chipdata->min_div_pow; 1314 1315 init_completion(&p->done); 1316 init_completion(&p->done_txdma); 1317 1318 p->clk = devm_clk_get(&pdev->dev, NULL); 1319 if (IS_ERR(p->clk)) { 1320 dev_err(&pdev->dev, "cannot get clock\n"); 1321 ret = PTR_ERR(p->clk); 1322 goto err1; 1323 } 1324 1325 i = platform_get_irq(pdev, 0); 1326 if (i < 0) { 1327 ret = i; 1328 goto err1; 1329 } 1330 1331 p->mapbase = devm_platform_ioremap_resource(pdev, 0); 1332 if (IS_ERR(p->mapbase)) { 1333 ret = PTR_ERR(p->mapbase); 1334 goto err1; 1335 } 1336 1337 ret = devm_request_irq(&pdev->dev, i, sh_msiof_spi_irq, 0, 1338 dev_name(&pdev->dev), p); 1339 if (ret) { 1340 dev_err(&pdev->dev, "unable to request irq\n"); 1341 goto err1; 1342 } 1343 1344 p->pdev = pdev; 1345 pm_runtime_enable(&pdev->dev); 1346 1347 /* Platform data may override FIFO sizes */ 1348 p->tx_fifo_size = chipdata->tx_fifo_size; 1349 p->rx_fifo_size = chipdata->rx_fifo_size; 1350 if (p->info->tx_fifo_override) 1351 p->tx_fifo_size = p->info->tx_fifo_override; 1352 if (p->info->rx_fifo_override) 1353 p->rx_fifo_size = p->info->rx_fifo_override; 1354 1355 /* init controller code */ 1356 ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; 1357 ctlr->mode_bits |= SPI_LSB_FIRST | SPI_3WIRE; 1358 clksrc = clk_get_rate(p->clk); 1359 ctlr->min_speed_hz = DIV_ROUND_UP(clksrc, 1024); 1360 ctlr->max_speed_hz = DIV_ROUND_UP(clksrc, 1 << p->min_div_pow); 1361 ctlr->flags = chipdata->ctlr_flags; 1362 ctlr->bus_num = pdev->id; 1363 ctlr->num_chipselect = p->info->num_chipselect; 1364 ctlr->dev.of_node = pdev->dev.of_node; 1365 ctlr->setup = sh_msiof_spi_setup; 1366 ctlr->prepare_message = sh_msiof_prepare_message; 1367 ctlr->target_abort = sh_msiof_target_abort; 1368 ctlr->bits_per_word_mask = chipdata->bits_per_word_mask; 1369 ctlr->auto_runtime_pm = true; 1370 ctlr->transfer_one = sh_msiof_transfer_one; 1371 ctlr->use_gpio_descriptors = true; 1372 ctlr->max_native_cs = MAX_SS; 1373 1374 ret = sh_msiof_request_dma(p); 1375 if (ret < 0) 1376 dev_warn(&pdev->dev, "DMA not available, using PIO\n"); 1377 1378 ret = devm_spi_register_controller(&pdev->dev, ctlr); 1379 if (ret < 0) { 1380 dev_err(&pdev->dev, "devm_spi_register_controller error.\n"); 1381 goto err2; 1382 } 1383 1384 return 0; 1385 1386 err2: 1387 sh_msiof_release_dma(p); 1388 pm_runtime_disable(&pdev->dev); 1389 err1: 1390 spi_controller_put(ctlr); 1391 return ret; 1392 } 1393 1394 static void sh_msiof_spi_remove(struct platform_device *pdev) 1395 { 1396 struct sh_msiof_spi_priv *p = platform_get_drvdata(pdev); 1397 1398 sh_msiof_release_dma(p); 1399 pm_runtime_disable(&pdev->dev); 1400 } 1401 1402 static const struct platform_device_id spi_driver_ids[] = { 1403 { "spi_sh_msiof", (kernel_ulong_t)&sh_data }, 1404 {}, 1405 }; 1406 MODULE_DEVICE_TABLE(platform, spi_driver_ids); 1407 1408 #ifdef CONFIG_PM_SLEEP 1409 static int sh_msiof_spi_suspend(struct device *dev) 1410 { 1411 struct sh_msiof_spi_priv *p = dev_get_drvdata(dev); 1412 1413 return spi_controller_suspend(p->ctlr); 1414 } 1415 1416 static int sh_msiof_spi_resume(struct device *dev) 1417 { 1418 struct sh_msiof_spi_priv *p = dev_get_drvdata(dev); 1419 1420 return spi_controller_resume(p->ctlr); 1421 } 1422 1423 static SIMPLE_DEV_PM_OPS(sh_msiof_spi_pm_ops, sh_msiof_spi_suspend, 1424 sh_msiof_spi_resume); 1425 #define DEV_PM_OPS (&sh_msiof_spi_pm_ops) 1426 #else 1427 #define DEV_PM_OPS NULL 1428 #endif /* CONFIG_PM_SLEEP */ 1429 1430 static struct platform_driver sh_msiof_spi_drv = { 1431 .probe = sh_msiof_spi_probe, 1432 .remove_new = sh_msiof_spi_remove, 1433 .id_table = spi_driver_ids, 1434 .driver = { 1435 .name = "spi_sh_msiof", 1436 .pm = DEV_PM_OPS, 1437 .of_match_table = of_match_ptr(sh_msiof_match), 1438 }, 1439 }; 1440 module_platform_driver(sh_msiof_spi_drv); 1441 1442 MODULE_DESCRIPTION("SuperH MSIOF SPI Controller Interface Driver"); 1443 MODULE_AUTHOR("Magnus Damm"); 1444 MODULE_LICENSE("GPL v2"); 1445