1 /* 2 * IMG SPFI controller driver 3 * 4 * Copyright (C) 2007,2008,2013 Imagination Technologies Ltd. 5 * Copyright (C) 2014 Google, Inc. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2, as published by the Free Software Foundation. 10 */ 11 12 #include <linux/clk.h> 13 #include <linux/delay.h> 14 #include <linux/dmaengine.h> 15 #include <linux/gpio.h> 16 #include <linux/interrupt.h> 17 #include <linux/io.h> 18 #include <linux/irq.h> 19 #include <linux/module.h> 20 #include <linux/of.h> 21 #include <linux/platform_device.h> 22 #include <linux/pm_runtime.h> 23 #include <linux/scatterlist.h> 24 #include <linux/slab.h> 25 #include <linux/spi/spi.h> 26 #include <linux/spinlock.h> 27 28 #define SPFI_DEVICE_PARAMETER(x) (0x00 + 0x4 * (x)) 29 #define SPFI_DEVICE_PARAMETER_BITCLK_SHIFT 24 30 #define SPFI_DEVICE_PARAMETER_BITCLK_MASK 0xff 31 #define SPFI_DEVICE_PARAMETER_CSSETUP_SHIFT 16 32 #define SPFI_DEVICE_PARAMETER_CSSETUP_MASK 0xff 33 #define SPFI_DEVICE_PARAMETER_CSHOLD_SHIFT 8 34 #define SPFI_DEVICE_PARAMETER_CSHOLD_MASK 0xff 35 #define SPFI_DEVICE_PARAMETER_CSDELAY_SHIFT 0 36 #define SPFI_DEVICE_PARAMETER_CSDELAY_MASK 0xff 37 38 #define SPFI_CONTROL 0x14 39 #define SPFI_CONTROL_CONTINUE BIT(12) 40 #define SPFI_CONTROL_SOFT_RESET BIT(11) 41 #define SPFI_CONTROL_SEND_DMA BIT(10) 42 #define SPFI_CONTROL_GET_DMA BIT(9) 43 #define SPFI_CONTROL_SE BIT(8) 44 #define SPFI_CONTROL_TMODE_SHIFT 5 45 #define SPFI_CONTROL_TMODE_MASK 0x7 46 #define SPFI_CONTROL_TMODE_SINGLE 0 47 #define SPFI_CONTROL_TMODE_DUAL 1 48 #define SPFI_CONTROL_TMODE_QUAD 2 49 #define SPFI_CONTROL_SPFI_EN BIT(0) 50 51 #define SPFI_TRANSACTION 0x18 52 #define SPFI_TRANSACTION_TSIZE_SHIFT 16 53 #define SPFI_TRANSACTION_TSIZE_MASK 0xffff 54 55 #define SPFI_PORT_STATE 0x1c 56 #define SPFI_PORT_STATE_DEV_SEL_SHIFT 20 57 #define SPFI_PORT_STATE_DEV_SEL_MASK 0x7 58 #define SPFI_PORT_STATE_CK_POL(x) BIT(19 - (x)) 59 #define SPFI_PORT_STATE_CK_PHASE(x) BIT(14 - (x)) 60 61 #define SPFI_TX_32BIT_VALID_DATA 0x20 62 #define SPFI_TX_8BIT_VALID_DATA 0x24 63 #define SPFI_RX_32BIT_VALID_DATA 0x28 64 #define SPFI_RX_8BIT_VALID_DATA 0x2c 65 66 #define SPFI_INTERRUPT_STATUS 0x30 67 #define SPFI_INTERRUPT_ENABLE 0x34 68 #define SPFI_INTERRUPT_CLEAR 0x38 69 #define SPFI_INTERRUPT_IACCESS BIT(12) 70 #define SPFI_INTERRUPT_GDEX8BIT BIT(11) 71 #define SPFI_INTERRUPT_ALLDONETRIG BIT(9) 72 #define SPFI_INTERRUPT_GDFUL BIT(8) 73 #define SPFI_INTERRUPT_GDHF BIT(7) 74 #define SPFI_INTERRUPT_GDEX32BIT BIT(6) 75 #define SPFI_INTERRUPT_GDTRIG BIT(5) 76 #define SPFI_INTERRUPT_SDFUL BIT(3) 77 #define SPFI_INTERRUPT_SDHF BIT(2) 78 #define SPFI_INTERRUPT_SDE BIT(1) 79 #define SPFI_INTERRUPT_SDTRIG BIT(0) 80 81 /* 82 * There are four parallel FIFOs of 16 bytes each. The word buffer 83 * (*_32BIT_VALID_DATA) accesses all four FIFOs at once, resulting in an 84 * effective FIFO size of 64 bytes. The byte buffer (*_8BIT_VALID_DATA) 85 * accesses only a single FIFO, resulting in an effective FIFO size of 86 * 16 bytes. 87 */ 88 #define SPFI_32BIT_FIFO_SIZE 64 89 #define SPFI_8BIT_FIFO_SIZE 16 90 91 struct img_spfi { 92 struct device *dev; 93 struct spi_master *master; 94 spinlock_t lock; 95 96 void __iomem *regs; 97 phys_addr_t phys; 98 int irq; 99 struct clk *spfi_clk; 100 struct clk *sys_clk; 101 102 struct dma_chan *rx_ch; 103 struct dma_chan *tx_ch; 104 bool tx_dma_busy; 105 bool rx_dma_busy; 106 }; 107 108 static inline u32 spfi_readl(struct img_spfi *spfi, u32 reg) 109 { 110 return readl(spfi->regs + reg); 111 } 112 113 static inline void spfi_writel(struct img_spfi *spfi, u32 val, u32 reg) 114 { 115 writel(val, spfi->regs + reg); 116 } 117 118 static inline void spfi_start(struct img_spfi *spfi) 119 { 120 u32 val; 121 122 val = spfi_readl(spfi, SPFI_CONTROL); 123 val |= SPFI_CONTROL_SPFI_EN; 124 spfi_writel(spfi, val, SPFI_CONTROL); 125 } 126 127 static inline void spfi_reset(struct img_spfi *spfi) 128 { 129 spfi_writel(spfi, SPFI_CONTROL_SOFT_RESET, SPFI_CONTROL); 130 spfi_writel(spfi, 0, SPFI_CONTROL); 131 } 132 133 static int spfi_wait_all_done(struct img_spfi *spfi) 134 { 135 unsigned long timeout = jiffies + msecs_to_jiffies(50); 136 137 while (time_before(jiffies, timeout)) { 138 u32 status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS); 139 140 if (status & SPFI_INTERRUPT_ALLDONETRIG) { 141 spfi_writel(spfi, SPFI_INTERRUPT_ALLDONETRIG, 142 SPFI_INTERRUPT_CLEAR); 143 return 0; 144 } 145 cpu_relax(); 146 } 147 148 dev_err(spfi->dev, "Timed out waiting for transaction to complete\n"); 149 spfi_reset(spfi); 150 151 return -ETIMEDOUT; 152 } 153 154 static unsigned int spfi_pio_write32(struct img_spfi *spfi, const u32 *buf, 155 unsigned int max) 156 { 157 unsigned int count = 0; 158 u32 status; 159 160 while (count < max / 4) { 161 spfi_writel(spfi, SPFI_INTERRUPT_SDFUL, SPFI_INTERRUPT_CLEAR); 162 status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS); 163 if (status & SPFI_INTERRUPT_SDFUL) 164 break; 165 spfi_writel(spfi, buf[count], SPFI_TX_32BIT_VALID_DATA); 166 count++; 167 } 168 169 return count * 4; 170 } 171 172 static unsigned int spfi_pio_write8(struct img_spfi *spfi, const u8 *buf, 173 unsigned int max) 174 { 175 unsigned int count = 0; 176 u32 status; 177 178 while (count < max) { 179 spfi_writel(spfi, SPFI_INTERRUPT_SDFUL, SPFI_INTERRUPT_CLEAR); 180 status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS); 181 if (status & SPFI_INTERRUPT_SDFUL) 182 break; 183 spfi_writel(spfi, buf[count], SPFI_TX_8BIT_VALID_DATA); 184 count++; 185 } 186 187 return count; 188 } 189 190 static unsigned int spfi_pio_read32(struct img_spfi *spfi, u32 *buf, 191 unsigned int max) 192 { 193 unsigned int count = 0; 194 u32 status; 195 196 while (count < max / 4) { 197 spfi_writel(spfi, SPFI_INTERRUPT_GDEX32BIT, 198 SPFI_INTERRUPT_CLEAR); 199 status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS); 200 if (!(status & SPFI_INTERRUPT_GDEX32BIT)) 201 break; 202 buf[count] = spfi_readl(spfi, SPFI_RX_32BIT_VALID_DATA); 203 count++; 204 } 205 206 return count * 4; 207 } 208 209 static unsigned int spfi_pio_read8(struct img_spfi *spfi, u8 *buf, 210 unsigned int max) 211 { 212 unsigned int count = 0; 213 u32 status; 214 215 while (count < max) { 216 spfi_writel(spfi, SPFI_INTERRUPT_GDEX8BIT, 217 SPFI_INTERRUPT_CLEAR); 218 status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS); 219 if (!(status & SPFI_INTERRUPT_GDEX8BIT)) 220 break; 221 buf[count] = spfi_readl(spfi, SPFI_RX_8BIT_VALID_DATA); 222 count++; 223 } 224 225 return count; 226 } 227 228 static int img_spfi_start_pio(struct spi_master *master, 229 struct spi_device *spi, 230 struct spi_transfer *xfer) 231 { 232 struct img_spfi *spfi = spi_master_get_devdata(spi->master); 233 unsigned int tx_bytes = 0, rx_bytes = 0; 234 const void *tx_buf = xfer->tx_buf; 235 void *rx_buf = xfer->rx_buf; 236 unsigned long timeout; 237 int ret; 238 239 if (tx_buf) 240 tx_bytes = xfer->len; 241 if (rx_buf) 242 rx_bytes = xfer->len; 243 244 spfi_start(spfi); 245 246 timeout = jiffies + 247 msecs_to_jiffies(xfer->len * 8 * 1000 / xfer->speed_hz + 100); 248 while ((tx_bytes > 0 || rx_bytes > 0) && 249 time_before(jiffies, timeout)) { 250 unsigned int tx_count, rx_count; 251 252 if (tx_bytes >= 4) 253 tx_count = spfi_pio_write32(spfi, tx_buf, tx_bytes); 254 else 255 tx_count = spfi_pio_write8(spfi, tx_buf, tx_bytes); 256 257 if (rx_bytes >= 4) 258 rx_count = spfi_pio_read32(spfi, rx_buf, rx_bytes); 259 else 260 rx_count = spfi_pio_read8(spfi, rx_buf, rx_bytes); 261 262 tx_buf += tx_count; 263 rx_buf += rx_count; 264 tx_bytes -= tx_count; 265 rx_bytes -= rx_count; 266 267 cpu_relax(); 268 } 269 270 ret = spfi_wait_all_done(spfi); 271 if (ret < 0) 272 return ret; 273 274 if (rx_bytes > 0 || tx_bytes > 0) { 275 dev_err(spfi->dev, "PIO transfer timed out\n"); 276 return -ETIMEDOUT; 277 } 278 279 return 0; 280 } 281 282 static void img_spfi_dma_rx_cb(void *data) 283 { 284 struct img_spfi *spfi = data; 285 unsigned long flags; 286 287 spfi_wait_all_done(spfi); 288 289 spin_lock_irqsave(&spfi->lock, flags); 290 spfi->rx_dma_busy = false; 291 if (!spfi->tx_dma_busy) 292 spi_finalize_current_transfer(spfi->master); 293 spin_unlock_irqrestore(&spfi->lock, flags); 294 } 295 296 static void img_spfi_dma_tx_cb(void *data) 297 { 298 struct img_spfi *spfi = data; 299 unsigned long flags; 300 301 spfi_wait_all_done(spfi); 302 303 spin_lock_irqsave(&spfi->lock, flags); 304 spfi->tx_dma_busy = false; 305 if (!spfi->rx_dma_busy) 306 spi_finalize_current_transfer(spfi->master); 307 spin_unlock_irqrestore(&spfi->lock, flags); 308 } 309 310 static int img_spfi_start_dma(struct spi_master *master, 311 struct spi_device *spi, 312 struct spi_transfer *xfer) 313 { 314 struct img_spfi *spfi = spi_master_get_devdata(spi->master); 315 struct dma_async_tx_descriptor *rxdesc = NULL, *txdesc = NULL; 316 struct dma_slave_config rxconf, txconf; 317 318 spfi->rx_dma_busy = false; 319 spfi->tx_dma_busy = false; 320 321 if (xfer->rx_buf) { 322 rxconf.direction = DMA_DEV_TO_MEM; 323 if (xfer->len % 4 == 0) { 324 rxconf.src_addr = spfi->phys + SPFI_RX_32BIT_VALID_DATA; 325 rxconf.src_addr_width = 4; 326 rxconf.src_maxburst = 4; 327 } else { 328 rxconf.src_addr = spfi->phys + SPFI_RX_8BIT_VALID_DATA; 329 rxconf.src_addr_width = 1; 330 rxconf.src_maxburst = 4; 331 } 332 dmaengine_slave_config(spfi->rx_ch, &rxconf); 333 334 rxdesc = dmaengine_prep_slave_sg(spfi->rx_ch, xfer->rx_sg.sgl, 335 xfer->rx_sg.nents, 336 DMA_DEV_TO_MEM, 337 DMA_PREP_INTERRUPT); 338 if (!rxdesc) 339 goto stop_dma; 340 341 rxdesc->callback = img_spfi_dma_rx_cb; 342 rxdesc->callback_param = spfi; 343 } 344 345 if (xfer->tx_buf) { 346 txconf.direction = DMA_MEM_TO_DEV; 347 if (xfer->len % 4 == 0) { 348 txconf.dst_addr = spfi->phys + SPFI_TX_32BIT_VALID_DATA; 349 txconf.dst_addr_width = 4; 350 txconf.dst_maxburst = 4; 351 } else { 352 txconf.dst_addr = spfi->phys + SPFI_TX_8BIT_VALID_DATA; 353 txconf.dst_addr_width = 1; 354 txconf.dst_maxburst = 4; 355 } 356 dmaengine_slave_config(spfi->tx_ch, &txconf); 357 358 txdesc = dmaengine_prep_slave_sg(spfi->tx_ch, xfer->tx_sg.sgl, 359 xfer->tx_sg.nents, 360 DMA_MEM_TO_DEV, 361 DMA_PREP_INTERRUPT); 362 if (!txdesc) 363 goto stop_dma; 364 365 txdesc->callback = img_spfi_dma_tx_cb; 366 txdesc->callback_param = spfi; 367 } 368 369 if (xfer->rx_buf) { 370 spfi->rx_dma_busy = true; 371 dmaengine_submit(rxdesc); 372 dma_async_issue_pending(spfi->rx_ch); 373 } 374 375 spfi_start(spfi); 376 377 if (xfer->tx_buf) { 378 spfi->tx_dma_busy = true; 379 dmaengine_submit(txdesc); 380 dma_async_issue_pending(spfi->tx_ch); 381 } 382 383 return 1; 384 385 stop_dma: 386 dmaengine_terminate_all(spfi->rx_ch); 387 dmaengine_terminate_all(spfi->tx_ch); 388 return -EIO; 389 } 390 391 static void img_spfi_handle_err(struct spi_master *master, 392 struct spi_message *msg) 393 { 394 struct img_spfi *spfi = spi_master_get_devdata(master); 395 unsigned long flags; 396 397 /* 398 * Stop all DMA and reset the controller if the previous transaction 399 * timed-out and never completed it's DMA. 400 */ 401 spin_lock_irqsave(&spfi->lock, flags); 402 if (spfi->tx_dma_busy || spfi->rx_dma_busy) { 403 spfi->tx_dma_busy = false; 404 spfi->rx_dma_busy = false; 405 406 dmaengine_terminate_all(spfi->tx_ch); 407 dmaengine_terminate_all(spfi->rx_ch); 408 } 409 spin_unlock_irqrestore(&spfi->lock, flags); 410 } 411 412 static int img_spfi_prepare(struct spi_master *master, struct spi_message *msg) 413 { 414 struct img_spfi *spfi = spi_master_get_devdata(master); 415 u32 val; 416 417 val = spfi_readl(spfi, SPFI_PORT_STATE); 418 if (msg->spi->mode & SPI_CPHA) 419 val |= SPFI_PORT_STATE_CK_PHASE(msg->spi->chip_select); 420 else 421 val &= ~SPFI_PORT_STATE_CK_PHASE(msg->spi->chip_select); 422 if (msg->spi->mode & SPI_CPOL) 423 val |= SPFI_PORT_STATE_CK_POL(msg->spi->chip_select); 424 else 425 val &= ~SPFI_PORT_STATE_CK_POL(msg->spi->chip_select); 426 spfi_writel(spfi, val, SPFI_PORT_STATE); 427 428 return 0; 429 } 430 431 static int img_spfi_unprepare(struct spi_master *master, 432 struct spi_message *msg) 433 { 434 struct img_spfi *spfi = spi_master_get_devdata(master); 435 436 spfi_reset(spfi); 437 438 return 0; 439 } 440 441 static int img_spfi_setup(struct spi_device *spi) 442 { 443 int ret; 444 445 ret = gpio_request_one(spi->cs_gpio, (spi->mode & SPI_CS_HIGH) ? 446 GPIOF_OUT_INIT_LOW : GPIOF_OUT_INIT_HIGH, 447 dev_name(&spi->dev)); 448 if (ret) 449 dev_err(&spi->dev, "can't request chipselect gpio %d\n", 450 spi->cs_gpio); 451 452 return ret; 453 } 454 455 static void img_spfi_cleanup(struct spi_device *spi) 456 { 457 gpio_free(spi->cs_gpio); 458 } 459 460 static void img_spfi_config(struct spi_master *master, struct spi_device *spi, 461 struct spi_transfer *xfer) 462 { 463 struct img_spfi *spfi = spi_master_get_devdata(spi->master); 464 u32 val, div; 465 466 /* 467 * output = spfi_clk * (BITCLK / 512), where BITCLK must be a 468 * power of 2 up to 128 469 */ 470 div = DIV_ROUND_UP(clk_get_rate(spfi->spfi_clk), xfer->speed_hz); 471 div = clamp(512 / (1 << get_count_order(div)), 1, 128); 472 473 val = spfi_readl(spfi, SPFI_DEVICE_PARAMETER(spi->chip_select)); 474 val &= ~(SPFI_DEVICE_PARAMETER_BITCLK_MASK << 475 SPFI_DEVICE_PARAMETER_BITCLK_SHIFT); 476 val |= div << SPFI_DEVICE_PARAMETER_BITCLK_SHIFT; 477 spfi_writel(spfi, val, SPFI_DEVICE_PARAMETER(spi->chip_select)); 478 479 spfi_writel(spfi, xfer->len << SPFI_TRANSACTION_TSIZE_SHIFT, 480 SPFI_TRANSACTION); 481 482 val = spfi_readl(spfi, SPFI_CONTROL); 483 val &= ~(SPFI_CONTROL_SEND_DMA | SPFI_CONTROL_GET_DMA); 484 if (xfer->tx_buf) 485 val |= SPFI_CONTROL_SEND_DMA; 486 if (xfer->rx_buf) 487 val |= SPFI_CONTROL_GET_DMA; 488 val &= ~(SPFI_CONTROL_TMODE_MASK << SPFI_CONTROL_TMODE_SHIFT); 489 if (xfer->tx_nbits == SPI_NBITS_DUAL && 490 xfer->rx_nbits == SPI_NBITS_DUAL) 491 val |= SPFI_CONTROL_TMODE_DUAL << SPFI_CONTROL_TMODE_SHIFT; 492 else if (xfer->tx_nbits == SPI_NBITS_QUAD && 493 xfer->rx_nbits == SPI_NBITS_QUAD) 494 val |= SPFI_CONTROL_TMODE_QUAD << SPFI_CONTROL_TMODE_SHIFT; 495 val |= SPFI_CONTROL_SE; 496 spfi_writel(spfi, val, SPFI_CONTROL); 497 } 498 499 static int img_spfi_transfer_one(struct spi_master *master, 500 struct spi_device *spi, 501 struct spi_transfer *xfer) 502 { 503 struct img_spfi *spfi = spi_master_get_devdata(spi->master); 504 int ret; 505 506 if (xfer->len > SPFI_TRANSACTION_TSIZE_MASK) { 507 dev_err(spfi->dev, 508 "Transfer length (%d) is greater than the max supported (%d)", 509 xfer->len, SPFI_TRANSACTION_TSIZE_MASK); 510 return -EINVAL; 511 } 512 513 img_spfi_config(master, spi, xfer); 514 if (master->can_dma && master->can_dma(master, spi, xfer)) 515 ret = img_spfi_start_dma(master, spi, xfer); 516 else 517 ret = img_spfi_start_pio(master, spi, xfer); 518 519 return ret; 520 } 521 522 static bool img_spfi_can_dma(struct spi_master *master, struct spi_device *spi, 523 struct spi_transfer *xfer) 524 { 525 if (xfer->len > SPFI_32BIT_FIFO_SIZE) 526 return true; 527 return false; 528 } 529 530 static irqreturn_t img_spfi_irq(int irq, void *dev_id) 531 { 532 struct img_spfi *spfi = (struct img_spfi *)dev_id; 533 u32 status; 534 535 status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS); 536 if (status & SPFI_INTERRUPT_IACCESS) { 537 spfi_writel(spfi, SPFI_INTERRUPT_IACCESS, SPFI_INTERRUPT_CLEAR); 538 dev_err(spfi->dev, "Illegal access interrupt"); 539 return IRQ_HANDLED; 540 } 541 542 return IRQ_NONE; 543 } 544 545 static int img_spfi_probe(struct platform_device *pdev) 546 { 547 struct spi_master *master; 548 struct img_spfi *spfi; 549 struct resource *res; 550 int ret; 551 552 master = spi_alloc_master(&pdev->dev, sizeof(*spfi)); 553 if (!master) 554 return -ENOMEM; 555 platform_set_drvdata(pdev, master); 556 557 spfi = spi_master_get_devdata(master); 558 spfi->dev = &pdev->dev; 559 spfi->master = master; 560 spin_lock_init(&spfi->lock); 561 562 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 563 spfi->regs = devm_ioremap_resource(spfi->dev, res); 564 if (IS_ERR(spfi->regs)) { 565 ret = PTR_ERR(spfi->regs); 566 goto put_spi; 567 } 568 spfi->phys = res->start; 569 570 spfi->irq = platform_get_irq(pdev, 0); 571 if (spfi->irq < 0) { 572 ret = spfi->irq; 573 goto put_spi; 574 } 575 ret = devm_request_irq(spfi->dev, spfi->irq, img_spfi_irq, 576 IRQ_TYPE_LEVEL_HIGH, dev_name(spfi->dev), spfi); 577 if (ret) 578 goto put_spi; 579 580 spfi->sys_clk = devm_clk_get(spfi->dev, "sys"); 581 if (IS_ERR(spfi->sys_clk)) { 582 ret = PTR_ERR(spfi->sys_clk); 583 goto put_spi; 584 } 585 spfi->spfi_clk = devm_clk_get(spfi->dev, "spfi"); 586 if (IS_ERR(spfi->spfi_clk)) { 587 ret = PTR_ERR(spfi->spfi_clk); 588 goto put_spi; 589 } 590 591 ret = clk_prepare_enable(spfi->sys_clk); 592 if (ret) 593 goto put_spi; 594 ret = clk_prepare_enable(spfi->spfi_clk); 595 if (ret) 596 goto disable_pclk; 597 598 spfi_reset(spfi); 599 /* 600 * Only enable the error (IACCESS) interrupt. In PIO mode we'll 601 * poll the status of the FIFOs. 602 */ 603 spfi_writel(spfi, SPFI_INTERRUPT_IACCESS, SPFI_INTERRUPT_ENABLE); 604 605 master->auto_runtime_pm = true; 606 master->bus_num = pdev->id; 607 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_TX_DUAL | SPI_RX_DUAL; 608 if (of_property_read_bool(spfi->dev->of_node, "img,supports-quad-mode")) 609 master->mode_bits |= SPI_TX_QUAD | SPI_RX_QUAD; 610 master->dev.of_node = pdev->dev.of_node; 611 master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(8); 612 master->max_speed_hz = clk_get_rate(spfi->spfi_clk) / 4; 613 master->min_speed_hz = clk_get_rate(spfi->spfi_clk) / 512; 614 615 master->setup = img_spfi_setup; 616 master->cleanup = img_spfi_cleanup; 617 master->transfer_one = img_spfi_transfer_one; 618 master->prepare_message = img_spfi_prepare; 619 master->unprepare_message = img_spfi_unprepare; 620 master->handle_err = img_spfi_handle_err; 621 622 spfi->tx_ch = dma_request_slave_channel(spfi->dev, "tx"); 623 spfi->rx_ch = dma_request_slave_channel(spfi->dev, "rx"); 624 if (!spfi->tx_ch || !spfi->rx_ch) { 625 if (spfi->tx_ch) 626 dma_release_channel(spfi->tx_ch); 627 if (spfi->rx_ch) 628 dma_release_channel(spfi->rx_ch); 629 dev_warn(spfi->dev, "Failed to get DMA channels, falling back to PIO mode\n"); 630 } else { 631 master->dma_tx = spfi->tx_ch; 632 master->dma_rx = spfi->rx_ch; 633 master->can_dma = img_spfi_can_dma; 634 } 635 636 pm_runtime_set_active(spfi->dev); 637 pm_runtime_enable(spfi->dev); 638 639 ret = devm_spi_register_master(spfi->dev, master); 640 if (ret) 641 goto disable_pm; 642 643 return 0; 644 645 disable_pm: 646 pm_runtime_disable(spfi->dev); 647 if (spfi->rx_ch) 648 dma_release_channel(spfi->rx_ch); 649 if (spfi->tx_ch) 650 dma_release_channel(spfi->tx_ch); 651 clk_disable_unprepare(spfi->spfi_clk); 652 disable_pclk: 653 clk_disable_unprepare(spfi->sys_clk); 654 put_spi: 655 spi_master_put(master); 656 657 return ret; 658 } 659 660 static int img_spfi_remove(struct platform_device *pdev) 661 { 662 struct spi_master *master = platform_get_drvdata(pdev); 663 struct img_spfi *spfi = spi_master_get_devdata(master); 664 665 if (spfi->tx_ch) 666 dma_release_channel(spfi->tx_ch); 667 if (spfi->rx_ch) 668 dma_release_channel(spfi->rx_ch); 669 670 pm_runtime_disable(spfi->dev); 671 if (!pm_runtime_status_suspended(spfi->dev)) { 672 clk_disable_unprepare(spfi->spfi_clk); 673 clk_disable_unprepare(spfi->sys_clk); 674 } 675 676 spi_master_put(master); 677 678 return 0; 679 } 680 681 #ifdef CONFIG_PM 682 static int img_spfi_runtime_suspend(struct device *dev) 683 { 684 struct spi_master *master = dev_get_drvdata(dev); 685 struct img_spfi *spfi = spi_master_get_devdata(master); 686 687 clk_disable_unprepare(spfi->spfi_clk); 688 clk_disable_unprepare(spfi->sys_clk); 689 690 return 0; 691 } 692 693 static int img_spfi_runtime_resume(struct device *dev) 694 { 695 struct spi_master *master = dev_get_drvdata(dev); 696 struct img_spfi *spfi = spi_master_get_devdata(master); 697 int ret; 698 699 ret = clk_prepare_enable(spfi->sys_clk); 700 if (ret) 701 return ret; 702 ret = clk_prepare_enable(spfi->spfi_clk); 703 if (ret) { 704 clk_disable_unprepare(spfi->sys_clk); 705 return ret; 706 } 707 708 return 0; 709 } 710 #endif /* CONFIG_PM */ 711 712 #ifdef CONFIG_PM_SLEEP 713 static int img_spfi_suspend(struct device *dev) 714 { 715 struct spi_master *master = dev_get_drvdata(dev); 716 717 return spi_master_suspend(master); 718 } 719 720 static int img_spfi_resume(struct device *dev) 721 { 722 struct spi_master *master = dev_get_drvdata(dev); 723 struct img_spfi *spfi = spi_master_get_devdata(master); 724 int ret; 725 726 ret = pm_runtime_get_sync(dev); 727 if (ret) 728 return ret; 729 spfi_reset(spfi); 730 pm_runtime_put(dev); 731 732 return spi_master_resume(master); 733 } 734 #endif /* CONFIG_PM_SLEEP */ 735 736 static const struct dev_pm_ops img_spfi_pm_ops = { 737 SET_RUNTIME_PM_OPS(img_spfi_runtime_suspend, img_spfi_runtime_resume, 738 NULL) 739 SET_SYSTEM_SLEEP_PM_OPS(img_spfi_suspend, img_spfi_resume) 740 }; 741 742 static const struct of_device_id img_spfi_of_match[] = { 743 { .compatible = "img,spfi", }, 744 { }, 745 }; 746 MODULE_DEVICE_TABLE(of, img_spfi_of_match); 747 748 static struct platform_driver img_spfi_driver = { 749 .driver = { 750 .name = "img-spfi", 751 .pm = &img_spfi_pm_ops, 752 .of_match_table = of_match_ptr(img_spfi_of_match), 753 }, 754 .probe = img_spfi_probe, 755 .remove = img_spfi_remove, 756 }; 757 module_platform_driver(img_spfi_driver); 758 759 MODULE_DESCRIPTION("IMG SPFI controller driver"); 760 MODULE_AUTHOR("Andrew Bresticker <abrestic@chromium.org>"); 761 MODULE_LICENSE("GPL v2"); 762