1 /* 2 * Freescale MXS SPI master driver 3 * 4 * Copyright 2012 DENX Software Engineering, GmbH. 5 * Copyright 2012 Freescale Semiconductor, Inc. 6 * Copyright 2008 Embedded Alley Solutions, Inc All Rights Reserved. 7 * 8 * Rework and transition to new API by: 9 * Marek Vasut <marex@denx.de> 10 * 11 * Based on previous attempt by: 12 * Fabio Estevam <fabio.estevam@freescale.com> 13 * 14 * Based on code from U-Boot bootloader by: 15 * Marek Vasut <marex@denx.de> 16 * 17 * Based on spi-stmp.c, which is: 18 * Author: Dmitry Pervushin <dimka@embeddedalley.com> 19 * 20 * This program is free software; you can redistribute it and/or modify 21 * it under the terms of the GNU General Public License as published by 22 * the Free Software Foundation; either version 2 of the License, or 23 * (at your option) any later version. 24 * 25 * This program is distributed in the hope that it will be useful, 26 * but WITHOUT ANY WARRANTY; without even the implied warranty of 27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 28 * GNU General Public License for more details. 29 */ 30 31 #include <linux/kernel.h> 32 #include <linux/init.h> 33 #include <linux/ioport.h> 34 #include <linux/of.h> 35 #include <linux/of_device.h> 36 #include <linux/of_gpio.h> 37 #include <linux/platform_device.h> 38 #include <linux/delay.h> 39 #include <linux/interrupt.h> 40 #include <linux/dma-mapping.h> 41 #include <linux/dmaengine.h> 42 #include <linux/highmem.h> 43 #include <linux/clk.h> 44 #include <linux/err.h> 45 #include <linux/completion.h> 46 #include <linux/gpio.h> 47 #include <linux/regulator/consumer.h> 48 #include <linux/module.h> 49 #include <linux/pinctrl/consumer.h> 50 #include <linux/stmp_device.h> 51 #include <linux/spi/spi.h> 52 #include <linux/spi/mxs-spi.h> 53 54 #define DRIVER_NAME "mxs-spi" 55 56 /* Use 10S timeout for very long transfers, it should suffice. */ 57 #define SSP_TIMEOUT 10000 58 59 #define SG_MAXLEN 0xff00 60 61 struct mxs_spi { 62 struct mxs_ssp ssp; 63 struct completion c; 64 }; 65 66 static int mxs_spi_setup_transfer(struct spi_device *dev, 67 struct spi_transfer *t) 68 { 69 struct mxs_spi *spi = spi_master_get_devdata(dev->master); 70 struct mxs_ssp *ssp = &spi->ssp; 71 uint8_t bits_per_word; 72 uint32_t hz = 0; 73 74 bits_per_word = dev->bits_per_word; 75 if (t && t->bits_per_word) 76 bits_per_word = t->bits_per_word; 77 78 if (bits_per_word != 8) { 79 dev_err(&dev->dev, "%s, unsupported bits_per_word=%d\n", 80 __func__, bits_per_word); 81 return -EINVAL; 82 } 83 84 hz = dev->max_speed_hz; 85 if (t && t->speed_hz) 86 hz = min(hz, t->speed_hz); 87 if (hz == 0) { 88 dev_err(&dev->dev, "Cannot continue with zero clock\n"); 89 return -EINVAL; 90 } 91 92 mxs_ssp_set_clk_rate(ssp, hz); 93 94 writel(BF_SSP_CTRL1_SSP_MODE(BV_SSP_CTRL1_SSP_MODE__SPI) | 95 BF_SSP_CTRL1_WORD_LENGTH 96 (BV_SSP_CTRL1_WORD_LENGTH__EIGHT_BITS) | 97 ((dev->mode & SPI_CPOL) ? BM_SSP_CTRL1_POLARITY : 0) | 98 ((dev->mode & SPI_CPHA) ? BM_SSP_CTRL1_PHASE : 0), 99 ssp->base + HW_SSP_CTRL1(ssp)); 100 101 writel(0x0, ssp->base + HW_SSP_CMD0); 102 writel(0x0, ssp->base + HW_SSP_CMD1); 103 104 return 0; 105 } 106 107 static int mxs_spi_setup(struct spi_device *dev) 108 { 109 int err = 0; 110 111 if (!dev->bits_per_word) 112 dev->bits_per_word = 8; 113 114 if (dev->mode & ~(SPI_CPOL | SPI_CPHA)) 115 return -EINVAL; 116 117 err = mxs_spi_setup_transfer(dev, NULL); 118 if (err) { 119 dev_err(&dev->dev, 120 "Failed to setup transfer, error = %d\n", err); 121 } 122 123 return err; 124 } 125 126 static uint32_t mxs_spi_cs_to_reg(unsigned cs) 127 { 128 uint32_t select = 0; 129 130 /* 131 * i.MX28 Datasheet: 17.10.1: HW_SSP_CTRL0 132 * 133 * The bits BM_SSP_CTRL0_WAIT_FOR_CMD and BM_SSP_CTRL0_WAIT_FOR_IRQ 134 * in HW_SSP_CTRL0 register do have multiple usage, please refer to 135 * the datasheet for further details. In SPI mode, they are used to 136 * toggle the chip-select lines (nCS pins). 137 */ 138 if (cs & 1) 139 select |= BM_SSP_CTRL0_WAIT_FOR_CMD; 140 if (cs & 2) 141 select |= BM_SSP_CTRL0_WAIT_FOR_IRQ; 142 143 return select; 144 } 145 146 static void mxs_spi_set_cs(struct mxs_spi *spi, unsigned cs) 147 { 148 const uint32_t mask = 149 BM_SSP_CTRL0_WAIT_FOR_CMD | BM_SSP_CTRL0_WAIT_FOR_IRQ; 150 uint32_t select; 151 struct mxs_ssp *ssp = &spi->ssp; 152 153 writel(mask, ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR); 154 select = mxs_spi_cs_to_reg(cs); 155 writel(select, ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET); 156 } 157 158 static inline void mxs_spi_enable(struct mxs_spi *spi) 159 { 160 struct mxs_ssp *ssp = &spi->ssp; 161 162 writel(BM_SSP_CTRL0_LOCK_CS, 163 ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET); 164 writel(BM_SSP_CTRL0_IGNORE_CRC, 165 ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR); 166 } 167 168 static inline void mxs_spi_disable(struct mxs_spi *spi) 169 { 170 struct mxs_ssp *ssp = &spi->ssp; 171 172 writel(BM_SSP_CTRL0_LOCK_CS, 173 ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR); 174 writel(BM_SSP_CTRL0_IGNORE_CRC, 175 ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET); 176 } 177 178 static int mxs_ssp_wait(struct mxs_spi *spi, int offset, int mask, bool set) 179 { 180 const unsigned long timeout = jiffies + msecs_to_jiffies(SSP_TIMEOUT); 181 struct mxs_ssp *ssp = &spi->ssp; 182 uint32_t reg; 183 184 do { 185 reg = readl_relaxed(ssp->base + offset); 186 187 if (!set) 188 reg = ~reg; 189 190 reg &= mask; 191 192 if (reg == mask) 193 return 0; 194 } while (time_before(jiffies, timeout)); 195 196 return -ETIMEDOUT; 197 } 198 199 static void mxs_ssp_dma_irq_callback(void *param) 200 { 201 struct mxs_spi *spi = param; 202 complete(&spi->c); 203 } 204 205 static irqreturn_t mxs_ssp_irq_handler(int irq, void *dev_id) 206 { 207 struct mxs_ssp *ssp = dev_id; 208 dev_err(ssp->dev, "%s[%i] CTRL1=%08x STATUS=%08x\n", 209 __func__, __LINE__, 210 readl(ssp->base + HW_SSP_CTRL1(ssp)), 211 readl(ssp->base + HW_SSP_STATUS(ssp))); 212 return IRQ_HANDLED; 213 } 214 215 static int mxs_spi_txrx_dma(struct mxs_spi *spi, int cs, 216 unsigned char *buf, int len, 217 int *first, int *last, int write) 218 { 219 struct mxs_ssp *ssp = &spi->ssp; 220 struct dma_async_tx_descriptor *desc = NULL; 221 const bool vmalloced_buf = is_vmalloc_addr(buf); 222 const int desc_len = vmalloced_buf ? PAGE_SIZE : SG_MAXLEN; 223 const int sgs = DIV_ROUND_UP(len, desc_len); 224 int sg_count; 225 int min, ret; 226 uint32_t ctrl0; 227 struct page *vm_page; 228 void *sg_buf; 229 struct { 230 uint32_t pio[4]; 231 struct scatterlist sg; 232 } *dma_xfer; 233 234 if (!len) 235 return -EINVAL; 236 237 dma_xfer = kzalloc(sizeof(*dma_xfer) * sgs, GFP_KERNEL); 238 if (!dma_xfer) 239 return -ENOMEM; 240 241 INIT_COMPLETION(spi->c); 242 243 ctrl0 = readl(ssp->base + HW_SSP_CTRL0); 244 ctrl0 |= BM_SSP_CTRL0_DATA_XFER | mxs_spi_cs_to_reg(cs); 245 246 if (*first) 247 ctrl0 |= BM_SSP_CTRL0_LOCK_CS; 248 if (!write) 249 ctrl0 |= BM_SSP_CTRL0_READ; 250 251 /* Queue the DMA data transfer. */ 252 for (sg_count = 0; sg_count < sgs; sg_count++) { 253 min = min(len, desc_len); 254 255 /* Prepare the transfer descriptor. */ 256 if ((sg_count + 1 == sgs) && *last) 257 ctrl0 |= BM_SSP_CTRL0_IGNORE_CRC; 258 259 if (ssp->devid == IMX23_SSP) 260 ctrl0 |= min; 261 262 dma_xfer[sg_count].pio[0] = ctrl0; 263 dma_xfer[sg_count].pio[3] = min; 264 265 if (vmalloced_buf) { 266 vm_page = vmalloc_to_page(buf); 267 if (!vm_page) { 268 ret = -ENOMEM; 269 goto err_vmalloc; 270 } 271 sg_buf = page_address(vm_page) + 272 ((size_t)buf & ~PAGE_MASK); 273 } else { 274 sg_buf = buf; 275 } 276 277 sg_init_one(&dma_xfer[sg_count].sg, sg_buf, min); 278 ret = dma_map_sg(ssp->dev, &dma_xfer[sg_count].sg, 1, 279 write ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 280 281 len -= min; 282 buf += min; 283 284 /* Queue the PIO register write transfer. */ 285 desc = dmaengine_prep_slave_sg(ssp->dmach, 286 (struct scatterlist *)dma_xfer[sg_count].pio, 287 (ssp->devid == IMX23_SSP) ? 1 : 4, 288 DMA_TRANS_NONE, 289 sg_count ? DMA_PREP_INTERRUPT : 0); 290 if (!desc) { 291 dev_err(ssp->dev, 292 "Failed to get PIO reg. write descriptor.\n"); 293 ret = -EINVAL; 294 goto err_mapped; 295 } 296 297 desc = dmaengine_prep_slave_sg(ssp->dmach, 298 &dma_xfer[sg_count].sg, 1, 299 write ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM, 300 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 301 302 if (!desc) { 303 dev_err(ssp->dev, 304 "Failed to get DMA data write descriptor.\n"); 305 ret = -EINVAL; 306 goto err_mapped; 307 } 308 } 309 310 /* 311 * The last descriptor must have this callback, 312 * to finish the DMA transaction. 313 */ 314 desc->callback = mxs_ssp_dma_irq_callback; 315 desc->callback_param = spi; 316 317 /* Start the transfer. */ 318 dmaengine_submit(desc); 319 dma_async_issue_pending(ssp->dmach); 320 321 ret = wait_for_completion_timeout(&spi->c, 322 msecs_to_jiffies(SSP_TIMEOUT)); 323 if (!ret) { 324 dev_err(ssp->dev, "DMA transfer timeout\n"); 325 ret = -ETIMEDOUT; 326 dmaengine_terminate_all(ssp->dmach); 327 goto err_vmalloc; 328 } 329 330 ret = 0; 331 332 err_vmalloc: 333 while (--sg_count >= 0) { 334 err_mapped: 335 dma_unmap_sg(ssp->dev, &dma_xfer[sg_count].sg, 1, 336 write ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 337 } 338 339 kfree(dma_xfer); 340 341 return ret; 342 } 343 344 static int mxs_spi_txrx_pio(struct mxs_spi *spi, int cs, 345 unsigned char *buf, int len, 346 int *first, int *last, int write) 347 { 348 struct mxs_ssp *ssp = &spi->ssp; 349 350 if (*first) 351 mxs_spi_enable(spi); 352 353 mxs_spi_set_cs(spi, cs); 354 355 while (len--) { 356 if (*last && len == 0) 357 mxs_spi_disable(spi); 358 359 if (ssp->devid == IMX23_SSP) { 360 writel(BM_SSP_CTRL0_XFER_COUNT, 361 ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR); 362 writel(1, 363 ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET); 364 } else { 365 writel(1, ssp->base + HW_SSP_XFER_SIZE); 366 } 367 368 if (write) 369 writel(BM_SSP_CTRL0_READ, 370 ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR); 371 else 372 writel(BM_SSP_CTRL0_READ, 373 ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET); 374 375 writel(BM_SSP_CTRL0_RUN, 376 ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET); 377 378 if (mxs_ssp_wait(spi, HW_SSP_CTRL0, BM_SSP_CTRL0_RUN, 1)) 379 return -ETIMEDOUT; 380 381 if (write) 382 writel(*buf, ssp->base + HW_SSP_DATA(ssp)); 383 384 writel(BM_SSP_CTRL0_DATA_XFER, 385 ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET); 386 387 if (!write) { 388 if (mxs_ssp_wait(spi, HW_SSP_STATUS(ssp), 389 BM_SSP_STATUS_FIFO_EMPTY, 0)) 390 return -ETIMEDOUT; 391 392 *buf = (readl(ssp->base + HW_SSP_DATA(ssp)) & 0xff); 393 } 394 395 if (mxs_ssp_wait(spi, HW_SSP_CTRL0, BM_SSP_CTRL0_RUN, 0)) 396 return -ETIMEDOUT; 397 398 buf++; 399 } 400 401 if (len <= 0) 402 return 0; 403 404 return -ETIMEDOUT; 405 } 406 407 static int mxs_spi_transfer_one(struct spi_master *master, 408 struct spi_message *m) 409 { 410 struct mxs_spi *spi = spi_master_get_devdata(master); 411 struct mxs_ssp *ssp = &spi->ssp; 412 int first, last; 413 struct spi_transfer *t, *tmp_t; 414 int status = 0; 415 int cs; 416 417 first = last = 0; 418 419 cs = m->spi->chip_select; 420 421 list_for_each_entry_safe(t, tmp_t, &m->transfers, transfer_list) { 422 423 status = mxs_spi_setup_transfer(m->spi, t); 424 if (status) 425 break; 426 427 if (&t->transfer_list == m->transfers.next) 428 first = 1; 429 if (&t->transfer_list == m->transfers.prev) 430 last = 1; 431 if ((t->rx_buf && t->tx_buf) || (t->rx_dma && t->tx_dma)) { 432 dev_err(ssp->dev, 433 "Cannot send and receive simultaneously\n"); 434 status = -EINVAL; 435 break; 436 } 437 438 /* 439 * Small blocks can be transfered via PIO. 440 * Measured by empiric means: 441 * 442 * dd if=/dev/mtdblock0 of=/dev/null bs=1024k count=1 443 * 444 * DMA only: 2.164808 seconds, 473.0KB/s 445 * Combined: 1.676276 seconds, 610.9KB/s 446 */ 447 if (t->len < 32) { 448 writel(BM_SSP_CTRL1_DMA_ENABLE, 449 ssp->base + HW_SSP_CTRL1(ssp) + 450 STMP_OFFSET_REG_CLR); 451 452 if (t->tx_buf) 453 status = mxs_spi_txrx_pio(spi, cs, 454 (void *)t->tx_buf, 455 t->len, &first, &last, 1); 456 if (t->rx_buf) 457 status = mxs_spi_txrx_pio(spi, cs, 458 t->rx_buf, t->len, 459 &first, &last, 0); 460 } else { 461 writel(BM_SSP_CTRL1_DMA_ENABLE, 462 ssp->base + HW_SSP_CTRL1(ssp) + 463 STMP_OFFSET_REG_SET); 464 465 if (t->tx_buf) 466 status = mxs_spi_txrx_dma(spi, cs, 467 (void *)t->tx_buf, t->len, 468 &first, &last, 1); 469 if (t->rx_buf) 470 status = mxs_spi_txrx_dma(spi, cs, 471 t->rx_buf, t->len, 472 &first, &last, 0); 473 } 474 475 if (status) { 476 stmp_reset_block(ssp->base); 477 break; 478 } 479 480 m->actual_length += t->len; 481 first = last = 0; 482 } 483 484 m->status = status; 485 spi_finalize_current_message(master); 486 487 return status; 488 } 489 490 static bool mxs_ssp_dma_filter(struct dma_chan *chan, void *param) 491 { 492 struct mxs_ssp *ssp = param; 493 494 if (!mxs_dma_is_apbh(chan)) 495 return false; 496 497 if (chan->chan_id != ssp->dma_channel) 498 return false; 499 500 chan->private = &ssp->dma_data; 501 502 return true; 503 } 504 505 static const struct of_device_id mxs_spi_dt_ids[] = { 506 { .compatible = "fsl,imx23-spi", .data = (void *) IMX23_SSP, }, 507 { .compatible = "fsl,imx28-spi", .data = (void *) IMX28_SSP, }, 508 { /* sentinel */ } 509 }; 510 MODULE_DEVICE_TABLE(of, mxs_spi_dt_ids); 511 512 static int mxs_spi_probe(struct platform_device *pdev) 513 { 514 const struct of_device_id *of_id = 515 of_match_device(mxs_spi_dt_ids, &pdev->dev); 516 struct device_node *np = pdev->dev.of_node; 517 struct spi_master *master; 518 struct mxs_spi *spi; 519 struct mxs_ssp *ssp; 520 struct resource *iores, *dmares; 521 struct pinctrl *pinctrl; 522 struct clk *clk; 523 void __iomem *base; 524 int devid, dma_channel, clk_freq; 525 int ret = 0, irq_err, irq_dma; 526 dma_cap_mask_t mask; 527 528 /* 529 * Default clock speed for the SPI core. 160MHz seems to 530 * work reasonably well with most SPI flashes, so use this 531 * as a default. Override with "clock-frequency" DT prop. 532 */ 533 const int clk_freq_default = 160000000; 534 535 iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); 536 irq_err = platform_get_irq(pdev, 0); 537 irq_dma = platform_get_irq(pdev, 1); 538 if (!iores || irq_err < 0 || irq_dma < 0) 539 return -EINVAL; 540 541 base = devm_request_and_ioremap(&pdev->dev, iores); 542 if (!base) 543 return -EADDRNOTAVAIL; 544 545 pinctrl = devm_pinctrl_get_select_default(&pdev->dev); 546 if (IS_ERR(pinctrl)) 547 return PTR_ERR(pinctrl); 548 549 clk = devm_clk_get(&pdev->dev, NULL); 550 if (IS_ERR(clk)) 551 return PTR_ERR(clk); 552 553 if (np) { 554 devid = (enum mxs_ssp_id) of_id->data; 555 /* 556 * TODO: This is a temporary solution and should be changed 557 * to use generic DMA binding later when the helpers get in. 558 */ 559 ret = of_property_read_u32(np, "fsl,ssp-dma-channel", 560 &dma_channel); 561 if (ret) { 562 dev_err(&pdev->dev, 563 "Failed to get DMA channel\n"); 564 return -EINVAL; 565 } 566 567 ret = of_property_read_u32(np, "clock-frequency", 568 &clk_freq); 569 if (ret) 570 clk_freq = clk_freq_default; 571 } else { 572 dmares = platform_get_resource(pdev, IORESOURCE_DMA, 0); 573 if (!dmares) 574 return -EINVAL; 575 devid = pdev->id_entry->driver_data; 576 dma_channel = dmares->start; 577 clk_freq = clk_freq_default; 578 } 579 580 master = spi_alloc_master(&pdev->dev, sizeof(*spi)); 581 if (!master) 582 return -ENOMEM; 583 584 master->transfer_one_message = mxs_spi_transfer_one; 585 master->setup = mxs_spi_setup; 586 master->mode_bits = SPI_CPOL | SPI_CPHA; 587 master->num_chipselect = 3; 588 master->dev.of_node = np; 589 master->flags = SPI_MASTER_HALF_DUPLEX; 590 591 spi = spi_master_get_devdata(master); 592 ssp = &spi->ssp; 593 ssp->dev = &pdev->dev; 594 ssp->clk = clk; 595 ssp->base = base; 596 ssp->devid = devid; 597 ssp->dma_channel = dma_channel; 598 599 init_completion(&spi->c); 600 601 ret = devm_request_irq(&pdev->dev, irq_err, mxs_ssp_irq_handler, 0, 602 DRIVER_NAME, ssp); 603 if (ret) 604 goto out_master_free; 605 606 dma_cap_zero(mask); 607 dma_cap_set(DMA_SLAVE, mask); 608 ssp->dma_data.chan_irq = irq_dma; 609 ssp->dmach = dma_request_channel(mask, mxs_ssp_dma_filter, ssp); 610 if (!ssp->dmach) { 611 dev_err(ssp->dev, "Failed to request DMA\n"); 612 goto out_master_free; 613 } 614 615 clk_prepare_enable(ssp->clk); 616 clk_set_rate(ssp->clk, clk_freq); 617 ssp->clk_rate = clk_get_rate(ssp->clk) / 1000; 618 619 stmp_reset_block(ssp->base); 620 621 platform_set_drvdata(pdev, master); 622 623 ret = spi_register_master(master); 624 if (ret) { 625 dev_err(&pdev->dev, "Cannot register SPI master, %d\n", ret); 626 goto out_free_dma; 627 } 628 629 return 0; 630 631 out_free_dma: 632 dma_release_channel(ssp->dmach); 633 clk_disable_unprepare(ssp->clk); 634 out_master_free: 635 spi_master_put(master); 636 return ret; 637 } 638 639 static int mxs_spi_remove(struct platform_device *pdev) 640 { 641 struct spi_master *master; 642 struct mxs_spi *spi; 643 struct mxs_ssp *ssp; 644 645 master = spi_master_get(platform_get_drvdata(pdev)); 646 spi = spi_master_get_devdata(master); 647 ssp = &spi->ssp; 648 649 spi_unregister_master(master); 650 651 dma_release_channel(ssp->dmach); 652 653 clk_disable_unprepare(ssp->clk); 654 655 spi_master_put(master); 656 657 return 0; 658 } 659 660 static struct platform_driver mxs_spi_driver = { 661 .probe = mxs_spi_probe, 662 .remove = mxs_spi_remove, 663 .driver = { 664 .name = DRIVER_NAME, 665 .owner = THIS_MODULE, 666 .of_match_table = mxs_spi_dt_ids, 667 }, 668 }; 669 670 module_platform_driver(mxs_spi_driver); 671 672 MODULE_AUTHOR("Marek Vasut <marex@denx.de>"); 673 MODULE_DESCRIPTION("MXS SPI master driver"); 674 MODULE_LICENSE("GPL"); 675 MODULE_ALIAS("platform:mxs-spi"); 676