1 /* 2 * IMG SPFI controller driver 3 * 4 * Copyright (C) 2007,2008,2013 Imagination Technologies Ltd. 5 * Copyright (C) 2014 Google, Inc. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2, as published by the Free Software Foundation. 10 */ 11 12 #include <linux/clk.h> 13 #include <linux/delay.h> 14 #include <linux/dmaengine.h> 15 #include <linux/interrupt.h> 16 #include <linux/io.h> 17 #include <linux/irq.h> 18 #include <linux/module.h> 19 #include <linux/of.h> 20 #include <linux/platform_device.h> 21 #include <linux/pm_runtime.h> 22 #include <linux/scatterlist.h> 23 #include <linux/slab.h> 24 #include <linux/spi/spi.h> 25 #include <linux/spinlock.h> 26 27 #define SPFI_DEVICE_PARAMETER(x) (0x00 + 0x4 * (x)) 28 #define SPFI_DEVICE_PARAMETER_BITCLK_SHIFT 24 29 #define SPFI_DEVICE_PARAMETER_BITCLK_MASK 0xff 30 #define SPFI_DEVICE_PARAMETER_CSSETUP_SHIFT 16 31 #define SPFI_DEVICE_PARAMETER_CSSETUP_MASK 0xff 32 #define SPFI_DEVICE_PARAMETER_CSHOLD_SHIFT 8 33 #define SPFI_DEVICE_PARAMETER_CSHOLD_MASK 0xff 34 #define SPFI_DEVICE_PARAMETER_CSDELAY_SHIFT 0 35 #define SPFI_DEVICE_PARAMETER_CSDELAY_MASK 0xff 36 37 #define SPFI_CONTROL 0x14 38 #define SPFI_CONTROL_CONTINUE BIT(12) 39 #define SPFI_CONTROL_SOFT_RESET BIT(11) 40 #define SPFI_CONTROL_SEND_DMA BIT(10) 41 #define SPFI_CONTROL_GET_DMA BIT(9) 42 #define SPFI_CONTROL_TMODE_SHIFT 5 43 #define SPFI_CONTROL_TMODE_MASK 0x7 44 #define SPFI_CONTROL_TMODE_SINGLE 0 45 #define SPFI_CONTROL_TMODE_DUAL 1 46 #define SPFI_CONTROL_TMODE_QUAD 2 47 #define SPFI_CONTROL_SPFI_EN BIT(0) 48 49 #define SPFI_TRANSACTION 0x18 50 #define SPFI_TRANSACTION_TSIZE_SHIFT 16 51 #define SPFI_TRANSACTION_TSIZE_MASK 0xffff 52 53 #define SPFI_PORT_STATE 0x1c 54 #define SPFI_PORT_STATE_DEV_SEL_SHIFT 20 55 #define SPFI_PORT_STATE_DEV_SEL_MASK 0x7 56 #define SPFI_PORT_STATE_CK_POL(x) BIT(19 - (x)) 57 #define SPFI_PORT_STATE_CK_PHASE(x) BIT(14 - (x)) 58 59 #define SPFI_TX_32BIT_VALID_DATA 0x20 60 #define SPFI_TX_8BIT_VALID_DATA 0x24 61 #define SPFI_RX_32BIT_VALID_DATA 0x28 62 #define SPFI_RX_8BIT_VALID_DATA 0x2c 63 64 #define SPFI_INTERRUPT_STATUS 0x30 65 #define SPFI_INTERRUPT_ENABLE 0x34 66 #define SPFI_INTERRUPT_CLEAR 0x38 67 #define SPFI_INTERRUPT_IACCESS BIT(12) 68 #define SPFI_INTERRUPT_GDEX8BIT BIT(11) 69 #define SPFI_INTERRUPT_ALLDONETRIG BIT(9) 70 #define SPFI_INTERRUPT_GDFUL BIT(8) 71 #define SPFI_INTERRUPT_GDHF BIT(7) 72 #define SPFI_INTERRUPT_GDEX32BIT BIT(6) 73 #define SPFI_INTERRUPT_GDTRIG BIT(5) 74 #define SPFI_INTERRUPT_SDFUL BIT(3) 75 #define SPFI_INTERRUPT_SDHF BIT(2) 76 #define SPFI_INTERRUPT_SDE BIT(1) 77 #define SPFI_INTERRUPT_SDTRIG BIT(0) 78 79 /* 80 * There are four parallel FIFOs of 16 bytes each. The word buffer 81 * (*_32BIT_VALID_DATA) accesses all four FIFOs at once, resulting in an 82 * effective FIFO size of 64 bytes. The byte buffer (*_8BIT_VALID_DATA) 83 * accesses only a single FIFO, resulting in an effective FIFO size of 84 * 16 bytes. 85 */ 86 #define SPFI_32BIT_FIFO_SIZE 64 87 #define SPFI_8BIT_FIFO_SIZE 16 88 89 struct img_spfi { 90 struct device *dev; 91 struct spi_master *master; 92 spinlock_t lock; 93 94 void __iomem *regs; 95 phys_addr_t phys; 96 int irq; 97 struct clk *spfi_clk; 98 struct clk *sys_clk; 99 100 struct dma_chan *rx_ch; 101 struct dma_chan *tx_ch; 102 bool tx_dma_busy; 103 bool rx_dma_busy; 104 }; 105 106 static inline u32 spfi_readl(struct img_spfi *spfi, u32 reg) 107 { 108 return readl(spfi->regs + reg); 109 } 110 111 static inline void spfi_writel(struct img_spfi *spfi, u32 val, u32 reg) 112 { 113 writel(val, spfi->regs + reg); 114 } 115 116 static inline void spfi_start(struct img_spfi *spfi) 117 { 118 u32 val; 119 120 val = spfi_readl(spfi, SPFI_CONTROL); 121 val |= SPFI_CONTROL_SPFI_EN; 122 spfi_writel(spfi, val, SPFI_CONTROL); 123 } 124 125 static inline void spfi_stop(struct img_spfi *spfi) 126 { 127 u32 val; 128 129 val = spfi_readl(spfi, SPFI_CONTROL); 130 val &= ~SPFI_CONTROL_SPFI_EN; 131 spfi_writel(spfi, val, SPFI_CONTROL); 132 } 133 134 static inline void spfi_reset(struct img_spfi *spfi) 135 { 136 spfi_writel(spfi, SPFI_CONTROL_SOFT_RESET, SPFI_CONTROL); 137 udelay(1); 138 spfi_writel(spfi, 0, SPFI_CONTROL); 139 } 140 141 static void spfi_flush_tx_fifo(struct img_spfi *spfi) 142 { 143 unsigned long timeout = jiffies + msecs_to_jiffies(10); 144 145 spfi_writel(spfi, SPFI_INTERRUPT_SDE, SPFI_INTERRUPT_CLEAR); 146 while (time_before(jiffies, timeout)) { 147 if (spfi_readl(spfi, SPFI_INTERRUPT_STATUS) & 148 SPFI_INTERRUPT_SDE) 149 return; 150 cpu_relax(); 151 } 152 153 dev_err(spfi->dev, "Timed out waiting for FIFO to drain\n"); 154 spfi_reset(spfi); 155 } 156 157 static unsigned int spfi_pio_write32(struct img_spfi *spfi, const u32 *buf, 158 unsigned int max) 159 { 160 unsigned int count = 0; 161 u32 status; 162 163 while (count < max / 4) { 164 spfi_writel(spfi, SPFI_INTERRUPT_SDFUL, SPFI_INTERRUPT_CLEAR); 165 status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS); 166 if (status & SPFI_INTERRUPT_SDFUL) 167 break; 168 spfi_writel(spfi, buf[count], SPFI_TX_32BIT_VALID_DATA); 169 count++; 170 } 171 172 return count * 4; 173 } 174 175 static unsigned int spfi_pio_write8(struct img_spfi *spfi, const u8 *buf, 176 unsigned int max) 177 { 178 unsigned int count = 0; 179 u32 status; 180 181 while (count < max) { 182 spfi_writel(spfi, SPFI_INTERRUPT_SDFUL, SPFI_INTERRUPT_CLEAR); 183 status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS); 184 if (status & SPFI_INTERRUPT_SDFUL) 185 break; 186 spfi_writel(spfi, buf[count], SPFI_TX_8BIT_VALID_DATA); 187 count++; 188 } 189 190 return count; 191 } 192 193 static unsigned int spfi_pio_read32(struct img_spfi *spfi, u32 *buf, 194 unsigned int max) 195 { 196 unsigned int count = 0; 197 u32 status; 198 199 while (count < max / 4) { 200 spfi_writel(spfi, SPFI_INTERRUPT_GDEX32BIT, 201 SPFI_INTERRUPT_CLEAR); 202 status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS); 203 if (!(status & SPFI_INTERRUPT_GDEX32BIT)) 204 break; 205 buf[count] = spfi_readl(spfi, SPFI_RX_32BIT_VALID_DATA); 206 count++; 207 } 208 209 return count * 4; 210 } 211 212 static unsigned int spfi_pio_read8(struct img_spfi *spfi, u8 *buf, 213 unsigned int max) 214 { 215 unsigned int count = 0; 216 u32 status; 217 218 while (count < max) { 219 spfi_writel(spfi, SPFI_INTERRUPT_GDEX8BIT, 220 SPFI_INTERRUPT_CLEAR); 221 status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS); 222 if (!(status & SPFI_INTERRUPT_GDEX8BIT)) 223 break; 224 buf[count] = spfi_readl(spfi, SPFI_RX_8BIT_VALID_DATA); 225 count++; 226 } 227 228 return count; 229 } 230 231 static int img_spfi_start_pio(struct spi_master *master, 232 struct spi_device *spi, 233 struct spi_transfer *xfer) 234 { 235 struct img_spfi *spfi = spi_master_get_devdata(spi->master); 236 unsigned int tx_bytes = 0, rx_bytes = 0; 237 const void *tx_buf = xfer->tx_buf; 238 void *rx_buf = xfer->rx_buf; 239 unsigned long timeout; 240 241 if (tx_buf) 242 tx_bytes = xfer->len; 243 if (rx_buf) 244 rx_bytes = xfer->len; 245 246 spfi_start(spfi); 247 248 timeout = jiffies + 249 msecs_to_jiffies(xfer->len * 8 * 1000 / xfer->speed_hz + 100); 250 while ((tx_bytes > 0 || rx_bytes > 0) && 251 time_before(jiffies, timeout)) { 252 unsigned int tx_count, rx_count; 253 254 if (tx_bytes >= 4) 255 tx_count = spfi_pio_write32(spfi, tx_buf, tx_bytes); 256 else 257 tx_count = spfi_pio_write8(spfi, tx_buf, tx_bytes); 258 259 if (rx_bytes >= 4) 260 rx_count = spfi_pio_read32(spfi, rx_buf, rx_bytes); 261 else 262 rx_count = spfi_pio_read8(spfi, rx_buf, rx_bytes); 263 264 tx_buf += tx_count; 265 rx_buf += rx_count; 266 tx_bytes -= tx_count; 267 rx_bytes -= rx_count; 268 269 cpu_relax(); 270 } 271 272 if (rx_bytes > 0 || tx_bytes > 0) { 273 dev_err(spfi->dev, "PIO transfer timed out\n"); 274 spfi_reset(spfi); 275 return -ETIMEDOUT; 276 } 277 278 if (tx_buf) 279 spfi_flush_tx_fifo(spfi); 280 spfi_stop(spfi); 281 282 return 0; 283 } 284 285 static void img_spfi_dma_rx_cb(void *data) 286 { 287 struct img_spfi *spfi = data; 288 unsigned long flags; 289 290 spin_lock_irqsave(&spfi->lock, flags); 291 292 spfi->rx_dma_busy = false; 293 if (!spfi->tx_dma_busy) { 294 spfi_stop(spfi); 295 spi_finalize_current_transfer(spfi->master); 296 } 297 298 spin_unlock_irqrestore(&spfi->lock, flags); 299 } 300 301 static void img_spfi_dma_tx_cb(void *data) 302 { 303 struct img_spfi *spfi = data; 304 unsigned long flags; 305 306 spfi_flush_tx_fifo(spfi); 307 308 spin_lock_irqsave(&spfi->lock, flags); 309 310 spfi->tx_dma_busy = false; 311 if (!spfi->rx_dma_busy) { 312 spfi_stop(spfi); 313 spi_finalize_current_transfer(spfi->master); 314 } 315 316 spin_unlock_irqrestore(&spfi->lock, flags); 317 } 318 319 static int img_spfi_start_dma(struct spi_master *master, 320 struct spi_device *spi, 321 struct spi_transfer *xfer) 322 { 323 struct img_spfi *spfi = spi_master_get_devdata(spi->master); 324 struct dma_async_tx_descriptor *rxdesc = NULL, *txdesc = NULL; 325 struct dma_slave_config rxconf, txconf; 326 327 spfi->rx_dma_busy = false; 328 spfi->tx_dma_busy = false; 329 330 if (xfer->rx_buf) { 331 rxconf.direction = DMA_DEV_TO_MEM; 332 if (xfer->len % 4 == 0) { 333 rxconf.src_addr = spfi->phys + SPFI_RX_32BIT_VALID_DATA; 334 rxconf.src_addr_width = 4; 335 rxconf.src_maxburst = 4; 336 } else { 337 rxconf.src_addr = spfi->phys + SPFI_RX_8BIT_VALID_DATA; 338 rxconf.src_addr_width = 1; 339 rxconf.src_maxburst = 4; 340 } 341 dmaengine_slave_config(spfi->rx_ch, &rxconf); 342 343 rxdesc = dmaengine_prep_slave_sg(spfi->rx_ch, xfer->rx_sg.sgl, 344 xfer->rx_sg.nents, 345 DMA_DEV_TO_MEM, 346 DMA_PREP_INTERRUPT); 347 if (!rxdesc) 348 goto stop_dma; 349 350 rxdesc->callback = img_spfi_dma_rx_cb; 351 rxdesc->callback_param = spfi; 352 } 353 354 if (xfer->tx_buf) { 355 txconf.direction = DMA_MEM_TO_DEV; 356 if (xfer->len % 4 == 0) { 357 txconf.dst_addr = spfi->phys + SPFI_TX_32BIT_VALID_DATA; 358 txconf.dst_addr_width = 4; 359 txconf.dst_maxburst = 4; 360 } else { 361 txconf.dst_addr = spfi->phys + SPFI_TX_8BIT_VALID_DATA; 362 txconf.dst_addr_width = 1; 363 txconf.dst_maxburst = 4; 364 } 365 dmaengine_slave_config(spfi->tx_ch, &txconf); 366 367 txdesc = dmaengine_prep_slave_sg(spfi->tx_ch, xfer->tx_sg.sgl, 368 xfer->tx_sg.nents, 369 DMA_MEM_TO_DEV, 370 DMA_PREP_INTERRUPT); 371 if (!txdesc) 372 goto stop_dma; 373 374 txdesc->callback = img_spfi_dma_tx_cb; 375 txdesc->callback_param = spfi; 376 } 377 378 if (xfer->rx_buf) { 379 spfi->rx_dma_busy = true; 380 dmaengine_submit(rxdesc); 381 dma_async_issue_pending(spfi->rx_ch); 382 } 383 384 spfi_start(spfi); 385 386 if (xfer->tx_buf) { 387 spfi->tx_dma_busy = true; 388 dmaengine_submit(txdesc); 389 dma_async_issue_pending(spfi->tx_ch); 390 } 391 392 return 1; 393 394 stop_dma: 395 dmaengine_terminate_all(spfi->rx_ch); 396 dmaengine_terminate_all(spfi->tx_ch); 397 return -EIO; 398 } 399 400 static void img_spfi_config(struct spi_master *master, struct spi_device *spi, 401 struct spi_transfer *xfer) 402 { 403 struct img_spfi *spfi = spi_master_get_devdata(spi->master); 404 u32 val, div; 405 406 /* 407 * output = spfi_clk * (BITCLK / 512), where BITCLK must be a 408 * power of 2 up to 256 (where 255 == 256 since BITCLK is 8 bits) 409 */ 410 div = DIV_ROUND_UP(master->max_speed_hz, xfer->speed_hz); 411 div = clamp(512 / (1 << get_count_order(div)), 1, 255); 412 413 val = spfi_readl(spfi, SPFI_DEVICE_PARAMETER(spi->chip_select)); 414 val &= ~(SPFI_DEVICE_PARAMETER_BITCLK_MASK << 415 SPFI_DEVICE_PARAMETER_BITCLK_SHIFT); 416 val |= div << SPFI_DEVICE_PARAMETER_BITCLK_SHIFT; 417 spfi_writel(spfi, val, SPFI_DEVICE_PARAMETER(spi->chip_select)); 418 419 val = spfi_readl(spfi, SPFI_CONTROL); 420 val &= ~(SPFI_CONTROL_SEND_DMA | SPFI_CONTROL_GET_DMA); 421 if (xfer->tx_buf) 422 val |= SPFI_CONTROL_SEND_DMA; 423 if (xfer->rx_buf) 424 val |= SPFI_CONTROL_GET_DMA; 425 val &= ~(SPFI_CONTROL_TMODE_MASK << SPFI_CONTROL_TMODE_SHIFT); 426 if (xfer->tx_nbits == SPI_NBITS_DUAL && 427 xfer->rx_nbits == SPI_NBITS_DUAL) 428 val |= SPFI_CONTROL_TMODE_DUAL << SPFI_CONTROL_TMODE_SHIFT; 429 else if (xfer->tx_nbits == SPI_NBITS_QUAD && 430 xfer->rx_nbits == SPI_NBITS_QUAD) 431 val |= SPFI_CONTROL_TMODE_QUAD << SPFI_CONTROL_TMODE_SHIFT; 432 val &= ~SPFI_CONTROL_CONTINUE; 433 if (!xfer->cs_change && !list_is_last(&xfer->transfer_list, 434 &master->cur_msg->transfers)) 435 val |= SPFI_CONTROL_CONTINUE; 436 spfi_writel(spfi, val, SPFI_CONTROL); 437 438 val = spfi_readl(spfi, SPFI_PORT_STATE); 439 if (spi->mode & SPI_CPHA) 440 val |= SPFI_PORT_STATE_CK_PHASE(spi->chip_select); 441 else 442 val &= ~SPFI_PORT_STATE_CK_PHASE(spi->chip_select); 443 if (spi->mode & SPI_CPOL) 444 val |= SPFI_PORT_STATE_CK_POL(spi->chip_select); 445 else 446 val &= ~SPFI_PORT_STATE_CK_POL(spi->chip_select); 447 spfi_writel(spfi, val, SPFI_PORT_STATE); 448 449 spfi_writel(spfi, xfer->len << SPFI_TRANSACTION_TSIZE_SHIFT, 450 SPFI_TRANSACTION); 451 } 452 453 static int img_spfi_transfer_one(struct spi_master *master, 454 struct spi_device *spi, 455 struct spi_transfer *xfer) 456 { 457 struct img_spfi *spfi = spi_master_get_devdata(spi->master); 458 bool dma_reset = false; 459 unsigned long flags; 460 int ret; 461 462 if (xfer->len > SPFI_TRANSACTION_TSIZE_MASK) { 463 dev_err(spfi->dev, 464 "Transfer length (%d) is greater than the max supported (%d)", 465 xfer->len, SPFI_TRANSACTION_TSIZE_MASK); 466 return -EINVAL; 467 } 468 469 /* 470 * Stop all DMA and reset the controller if the previous transaction 471 * timed-out and never completed it's DMA. 472 */ 473 spin_lock_irqsave(&spfi->lock, flags); 474 if (spfi->tx_dma_busy || spfi->rx_dma_busy) { 475 dev_err(spfi->dev, "SPI DMA still busy\n"); 476 dma_reset = true; 477 } 478 spin_unlock_irqrestore(&spfi->lock, flags); 479 480 if (dma_reset) { 481 dmaengine_terminate_all(spfi->tx_ch); 482 dmaengine_terminate_all(spfi->rx_ch); 483 spfi_reset(spfi); 484 } 485 486 img_spfi_config(master, spi, xfer); 487 if (master->can_dma && master->can_dma(master, spi, xfer)) 488 ret = img_spfi_start_dma(master, spi, xfer); 489 else 490 ret = img_spfi_start_pio(master, spi, xfer); 491 492 return ret; 493 } 494 495 static void img_spfi_set_cs(struct spi_device *spi, bool enable) 496 { 497 struct img_spfi *spfi = spi_master_get_devdata(spi->master); 498 u32 val; 499 500 val = spfi_readl(spfi, SPFI_PORT_STATE); 501 val &= ~(SPFI_PORT_STATE_DEV_SEL_MASK << SPFI_PORT_STATE_DEV_SEL_SHIFT); 502 val |= spi->chip_select << SPFI_PORT_STATE_DEV_SEL_SHIFT; 503 spfi_writel(spfi, val, SPFI_PORT_STATE); 504 } 505 506 static bool img_spfi_can_dma(struct spi_master *master, struct spi_device *spi, 507 struct spi_transfer *xfer) 508 { 509 if (xfer->len > SPFI_32BIT_FIFO_SIZE) 510 return true; 511 return false; 512 } 513 514 static irqreturn_t img_spfi_irq(int irq, void *dev_id) 515 { 516 struct img_spfi *spfi = (struct img_spfi *)dev_id; 517 u32 status; 518 519 status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS); 520 if (status & SPFI_INTERRUPT_IACCESS) { 521 spfi_writel(spfi, SPFI_INTERRUPT_IACCESS, SPFI_INTERRUPT_CLEAR); 522 dev_err(spfi->dev, "Illegal access interrupt"); 523 return IRQ_HANDLED; 524 } 525 526 return IRQ_NONE; 527 } 528 529 static int img_spfi_probe(struct platform_device *pdev) 530 { 531 struct spi_master *master; 532 struct img_spfi *spfi; 533 struct resource *res; 534 int ret; 535 536 master = spi_alloc_master(&pdev->dev, sizeof(*spfi)); 537 if (!master) 538 return -ENOMEM; 539 platform_set_drvdata(pdev, master); 540 541 spfi = spi_master_get_devdata(master); 542 spfi->dev = &pdev->dev; 543 spfi->master = master; 544 spin_lock_init(&spfi->lock); 545 546 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 547 spfi->regs = devm_ioremap_resource(spfi->dev, res); 548 if (IS_ERR(spfi->regs)) { 549 ret = PTR_ERR(spfi->regs); 550 goto put_spi; 551 } 552 spfi->phys = res->start; 553 554 spfi->irq = platform_get_irq(pdev, 0); 555 if (spfi->irq < 0) { 556 ret = spfi->irq; 557 goto put_spi; 558 } 559 ret = devm_request_irq(spfi->dev, spfi->irq, img_spfi_irq, 560 IRQ_TYPE_LEVEL_HIGH, dev_name(spfi->dev), spfi); 561 if (ret) 562 goto put_spi; 563 564 spfi->sys_clk = devm_clk_get(spfi->dev, "sys"); 565 if (IS_ERR(spfi->sys_clk)) { 566 ret = PTR_ERR(spfi->sys_clk); 567 goto put_spi; 568 } 569 spfi->spfi_clk = devm_clk_get(spfi->dev, "spfi"); 570 if (IS_ERR(spfi->spfi_clk)) { 571 ret = PTR_ERR(spfi->spfi_clk); 572 goto put_spi; 573 } 574 575 ret = clk_prepare_enable(spfi->sys_clk); 576 if (ret) 577 goto put_spi; 578 ret = clk_prepare_enable(spfi->spfi_clk); 579 if (ret) 580 goto disable_pclk; 581 582 spfi_reset(spfi); 583 /* 584 * Only enable the error (IACCESS) interrupt. In PIO mode we'll 585 * poll the status of the FIFOs. 586 */ 587 spfi_writel(spfi, SPFI_INTERRUPT_IACCESS, SPFI_INTERRUPT_ENABLE); 588 589 master->auto_runtime_pm = true; 590 master->bus_num = pdev->id; 591 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_TX_DUAL | SPI_RX_DUAL; 592 if (of_property_read_bool(spfi->dev->of_node, "img,supports-quad-mode")) 593 master->mode_bits |= SPI_TX_QUAD | SPI_RX_QUAD; 594 master->num_chipselect = 5; 595 master->dev.of_node = pdev->dev.of_node; 596 master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(8); 597 master->max_speed_hz = clk_get_rate(spfi->spfi_clk); 598 master->min_speed_hz = master->max_speed_hz / 512; 599 600 master->set_cs = img_spfi_set_cs; 601 master->transfer_one = img_spfi_transfer_one; 602 603 spfi->tx_ch = dma_request_slave_channel(spfi->dev, "tx"); 604 spfi->rx_ch = dma_request_slave_channel(spfi->dev, "rx"); 605 if (!spfi->tx_ch || !spfi->rx_ch) { 606 if (spfi->tx_ch) 607 dma_release_channel(spfi->tx_ch); 608 if (spfi->rx_ch) 609 dma_release_channel(spfi->rx_ch); 610 dev_warn(spfi->dev, "Failed to get DMA channels, falling back to PIO mode\n"); 611 } else { 612 master->dma_tx = spfi->tx_ch; 613 master->dma_rx = spfi->rx_ch; 614 master->can_dma = img_spfi_can_dma; 615 } 616 617 pm_runtime_set_active(spfi->dev); 618 pm_runtime_enable(spfi->dev); 619 620 ret = devm_spi_register_master(spfi->dev, master); 621 if (ret) 622 goto disable_pm; 623 624 return 0; 625 626 disable_pm: 627 pm_runtime_disable(spfi->dev); 628 if (spfi->rx_ch) 629 dma_release_channel(spfi->rx_ch); 630 if (spfi->tx_ch) 631 dma_release_channel(spfi->tx_ch); 632 clk_disable_unprepare(spfi->spfi_clk); 633 disable_pclk: 634 clk_disable_unprepare(spfi->sys_clk); 635 put_spi: 636 spi_master_put(master); 637 638 return ret; 639 } 640 641 static int img_spfi_remove(struct platform_device *pdev) 642 { 643 struct spi_master *master = platform_get_drvdata(pdev); 644 struct img_spfi *spfi = spi_master_get_devdata(master); 645 646 if (spfi->tx_ch) 647 dma_release_channel(spfi->tx_ch); 648 if (spfi->rx_ch) 649 dma_release_channel(spfi->rx_ch); 650 651 pm_runtime_disable(spfi->dev); 652 if (!pm_runtime_status_suspended(spfi->dev)) { 653 clk_disable_unprepare(spfi->spfi_clk); 654 clk_disable_unprepare(spfi->sys_clk); 655 } 656 657 spi_master_put(master); 658 659 return 0; 660 } 661 662 #ifdef CONFIG_PM 663 static int img_spfi_runtime_suspend(struct device *dev) 664 { 665 struct spi_master *master = dev_get_drvdata(dev); 666 struct img_spfi *spfi = spi_master_get_devdata(master); 667 668 clk_disable_unprepare(spfi->spfi_clk); 669 clk_disable_unprepare(spfi->sys_clk); 670 671 return 0; 672 } 673 674 static int img_spfi_runtime_resume(struct device *dev) 675 { 676 struct spi_master *master = dev_get_drvdata(dev); 677 struct img_spfi *spfi = spi_master_get_devdata(master); 678 int ret; 679 680 ret = clk_prepare_enable(spfi->sys_clk); 681 if (ret) 682 return ret; 683 ret = clk_prepare_enable(spfi->spfi_clk); 684 if (ret) { 685 clk_disable_unprepare(spfi->sys_clk); 686 return ret; 687 } 688 689 return 0; 690 } 691 #endif /* CONFIG_PM */ 692 693 #ifdef CONFIG_PM_SLEEP 694 static int img_spfi_suspend(struct device *dev) 695 { 696 struct spi_master *master = dev_get_drvdata(dev); 697 698 return spi_master_suspend(master); 699 } 700 701 static int img_spfi_resume(struct device *dev) 702 { 703 struct spi_master *master = dev_get_drvdata(dev); 704 struct img_spfi *spfi = spi_master_get_devdata(master); 705 int ret; 706 707 ret = pm_runtime_get_sync(dev); 708 if (ret) 709 return ret; 710 spfi_reset(spfi); 711 pm_runtime_put(dev); 712 713 return spi_master_resume(master); 714 } 715 #endif /* CONFIG_PM_SLEEP */ 716 717 static const struct dev_pm_ops img_spfi_pm_ops = { 718 SET_RUNTIME_PM_OPS(img_spfi_runtime_suspend, img_spfi_runtime_resume, 719 NULL) 720 SET_SYSTEM_SLEEP_PM_OPS(img_spfi_suspend, img_spfi_resume) 721 }; 722 723 static const struct of_device_id img_spfi_of_match[] = { 724 { .compatible = "img,spfi", }, 725 { }, 726 }; 727 MODULE_DEVICE_TABLE(of, img_spfi_of_match); 728 729 static struct platform_driver img_spfi_driver = { 730 .driver = { 731 .name = "img-spfi", 732 .pm = &img_spfi_pm_ops, 733 .of_match_table = of_match_ptr(img_spfi_of_match), 734 }, 735 .probe = img_spfi_probe, 736 .remove = img_spfi_remove, 737 }; 738 module_platform_driver(img_spfi_driver); 739 740 MODULE_DESCRIPTION("IMG SPFI controller driver"); 741 MODULE_AUTHOR("Andrew Bresticker <abrestic@chromium.org>"); 742 MODULE_LICENSE("GPL v2"); 743