1 /* 2 * Freescale MXS SPI master driver 3 * 4 * Copyright 2012 DENX Software Engineering, GmbH. 5 * Copyright 2012 Freescale Semiconductor, Inc. 6 * Copyright 2008 Embedded Alley Solutions, Inc All Rights Reserved. 7 * 8 * Rework and transition to new API by: 9 * Marek Vasut <marex@denx.de> 10 * 11 * Based on previous attempt by: 12 * Fabio Estevam <fabio.estevam@freescale.com> 13 * 14 * Based on code from U-Boot bootloader by: 15 * Marek Vasut <marex@denx.de> 16 * 17 * Based on spi-stmp.c, which is: 18 * Author: Dmitry Pervushin <dimka@embeddedalley.com> 19 * 20 * This program is free software; you can redistribute it and/or modify 21 * it under the terms of the GNU General Public License as published by 22 * the Free Software Foundation; either version 2 of the License, or 23 * (at your option) any later version. 24 * 25 * This program is distributed in the hope that it will be useful, 26 * but WITHOUT ANY WARRANTY; without even the implied warranty of 27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 28 * GNU General Public License for more details. 29 */ 30 31 #include <linux/kernel.h> 32 #include <linux/init.h> 33 #include <linux/ioport.h> 34 #include <linux/of.h> 35 #include <linux/of_device.h> 36 #include <linux/of_gpio.h> 37 #include <linux/platform_device.h> 38 #include <linux/delay.h> 39 #include <linux/interrupt.h> 40 #include <linux/dma-mapping.h> 41 #include <linux/dmaengine.h> 42 #include <linux/highmem.h> 43 #include <linux/clk.h> 44 #include <linux/err.h> 45 #include <linux/completion.h> 46 #include <linux/gpio.h> 47 #include <linux/regulator/consumer.h> 48 #include <linux/module.h> 49 #include <linux/stmp_device.h> 50 #include <linux/spi/spi.h> 51 #include <linux/spi/mxs-spi.h> 52 53 #define DRIVER_NAME "mxs-spi" 54 55 /* Use 10S timeout for very long transfers, it should suffice. */ 56 #define SSP_TIMEOUT 10000 57 58 #define SG_MAXLEN 0xff00 59 60 struct mxs_spi { 61 struct mxs_ssp ssp; 62 struct completion c; 63 }; 64 65 static int mxs_spi_setup_transfer(struct spi_device *dev, 66 struct spi_transfer *t) 67 { 68 struct mxs_spi *spi = spi_master_get_devdata(dev->master); 69 struct mxs_ssp *ssp = &spi->ssp; 70 uint8_t bits_per_word; 71 uint32_t hz = 0; 72 73 bits_per_word = dev->bits_per_word; 74 if (t && t->bits_per_word) 75 bits_per_word = t->bits_per_word; 76 77 hz = dev->max_speed_hz; 78 if (t && t->speed_hz) 79 hz = min(hz, t->speed_hz); 80 if (hz == 0) { 81 dev_err(&dev->dev, "Cannot continue with zero clock\n"); 82 return -EINVAL; 83 } 84 85 mxs_ssp_set_clk_rate(ssp, hz); 86 87 writel(BF_SSP_CTRL1_SSP_MODE(BV_SSP_CTRL1_SSP_MODE__SPI) | 88 BF_SSP_CTRL1_WORD_LENGTH 89 (BV_SSP_CTRL1_WORD_LENGTH__EIGHT_BITS) | 90 ((dev->mode & SPI_CPOL) ? BM_SSP_CTRL1_POLARITY : 0) | 91 ((dev->mode & SPI_CPHA) ? BM_SSP_CTRL1_PHASE : 0), 92 ssp->base + HW_SSP_CTRL1(ssp)); 93 94 writel(0x0, ssp->base + HW_SSP_CMD0); 95 writel(0x0, ssp->base + HW_SSP_CMD1); 96 97 return 0; 98 } 99 100 static int mxs_spi_setup(struct spi_device *dev) 101 { 102 int err = 0; 103 104 if (!dev->bits_per_word) 105 dev->bits_per_word = 8; 106 107 if (dev->mode & ~(SPI_CPOL | SPI_CPHA)) 108 return -EINVAL; 109 110 err = mxs_spi_setup_transfer(dev, NULL); 111 if (err) { 112 dev_err(&dev->dev, 113 "Failed to setup transfer, error = %d\n", err); 114 } 115 116 return err; 117 } 118 119 static uint32_t mxs_spi_cs_to_reg(unsigned cs) 120 { 121 uint32_t select = 0; 122 123 /* 124 * i.MX28 Datasheet: 17.10.1: HW_SSP_CTRL0 125 * 126 * The bits BM_SSP_CTRL0_WAIT_FOR_CMD and BM_SSP_CTRL0_WAIT_FOR_IRQ 127 * in HW_SSP_CTRL0 register do have multiple usage, please refer to 128 * the datasheet for further details. In SPI mode, they are used to 129 * toggle the chip-select lines (nCS pins). 130 */ 131 if (cs & 1) 132 select |= BM_SSP_CTRL0_WAIT_FOR_CMD; 133 if (cs & 2) 134 select |= BM_SSP_CTRL0_WAIT_FOR_IRQ; 135 136 return select; 137 } 138 139 static void mxs_spi_set_cs(struct mxs_spi *spi, unsigned cs) 140 { 141 const uint32_t mask = 142 BM_SSP_CTRL0_WAIT_FOR_CMD | BM_SSP_CTRL0_WAIT_FOR_IRQ; 143 uint32_t select; 144 struct mxs_ssp *ssp = &spi->ssp; 145 146 writel(mask, ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR); 147 select = mxs_spi_cs_to_reg(cs); 148 writel(select, ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET); 149 } 150 151 static inline void mxs_spi_enable(struct mxs_spi *spi) 152 { 153 struct mxs_ssp *ssp = &spi->ssp; 154 155 writel(BM_SSP_CTRL0_LOCK_CS, 156 ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET); 157 writel(BM_SSP_CTRL0_IGNORE_CRC, 158 ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR); 159 } 160 161 static inline void mxs_spi_disable(struct mxs_spi *spi) 162 { 163 struct mxs_ssp *ssp = &spi->ssp; 164 165 writel(BM_SSP_CTRL0_LOCK_CS, 166 ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR); 167 writel(BM_SSP_CTRL0_IGNORE_CRC, 168 ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET); 169 } 170 171 static int mxs_ssp_wait(struct mxs_spi *spi, int offset, int mask, bool set) 172 { 173 const unsigned long timeout = jiffies + msecs_to_jiffies(SSP_TIMEOUT); 174 struct mxs_ssp *ssp = &spi->ssp; 175 uint32_t reg; 176 177 do { 178 reg = readl_relaxed(ssp->base + offset); 179 180 if (!set) 181 reg = ~reg; 182 183 reg &= mask; 184 185 if (reg == mask) 186 return 0; 187 } while (time_before(jiffies, timeout)); 188 189 return -ETIMEDOUT; 190 } 191 192 static void mxs_ssp_dma_irq_callback(void *param) 193 { 194 struct mxs_spi *spi = param; 195 complete(&spi->c); 196 } 197 198 static irqreturn_t mxs_ssp_irq_handler(int irq, void *dev_id) 199 { 200 struct mxs_ssp *ssp = dev_id; 201 dev_err(ssp->dev, "%s[%i] CTRL1=%08x STATUS=%08x\n", 202 __func__, __LINE__, 203 readl(ssp->base + HW_SSP_CTRL1(ssp)), 204 readl(ssp->base + HW_SSP_STATUS(ssp))); 205 return IRQ_HANDLED; 206 } 207 208 static int mxs_spi_txrx_dma(struct mxs_spi *spi, int cs, 209 unsigned char *buf, int len, 210 int *first, int *last, int write) 211 { 212 struct mxs_ssp *ssp = &spi->ssp; 213 struct dma_async_tx_descriptor *desc = NULL; 214 const bool vmalloced_buf = is_vmalloc_addr(buf); 215 const int desc_len = vmalloced_buf ? PAGE_SIZE : SG_MAXLEN; 216 const int sgs = DIV_ROUND_UP(len, desc_len); 217 int sg_count; 218 int min, ret; 219 uint32_t ctrl0; 220 struct page *vm_page; 221 void *sg_buf; 222 struct { 223 uint32_t pio[4]; 224 struct scatterlist sg; 225 } *dma_xfer; 226 227 if (!len) 228 return -EINVAL; 229 230 dma_xfer = kzalloc(sizeof(*dma_xfer) * sgs, GFP_KERNEL); 231 if (!dma_xfer) 232 return -ENOMEM; 233 234 INIT_COMPLETION(spi->c); 235 236 ctrl0 = readl(ssp->base + HW_SSP_CTRL0); 237 ctrl0 &= ~BM_SSP_CTRL0_XFER_COUNT; 238 ctrl0 |= BM_SSP_CTRL0_DATA_XFER | mxs_spi_cs_to_reg(cs); 239 240 if (*first) 241 ctrl0 |= BM_SSP_CTRL0_LOCK_CS; 242 if (!write) 243 ctrl0 |= BM_SSP_CTRL0_READ; 244 245 /* Queue the DMA data transfer. */ 246 for (sg_count = 0; sg_count < sgs; sg_count++) { 247 min = min(len, desc_len); 248 249 /* Prepare the transfer descriptor. */ 250 if ((sg_count + 1 == sgs) && *last) 251 ctrl0 |= BM_SSP_CTRL0_IGNORE_CRC; 252 253 if (ssp->devid == IMX23_SSP) { 254 ctrl0 &= ~BM_SSP_CTRL0_XFER_COUNT; 255 ctrl0 |= min; 256 } 257 258 dma_xfer[sg_count].pio[0] = ctrl0; 259 dma_xfer[sg_count].pio[3] = min; 260 261 if (vmalloced_buf) { 262 vm_page = vmalloc_to_page(buf); 263 if (!vm_page) { 264 ret = -ENOMEM; 265 goto err_vmalloc; 266 } 267 sg_buf = page_address(vm_page) + 268 ((size_t)buf & ~PAGE_MASK); 269 } else { 270 sg_buf = buf; 271 } 272 273 sg_init_one(&dma_xfer[sg_count].sg, sg_buf, min); 274 ret = dma_map_sg(ssp->dev, &dma_xfer[sg_count].sg, 1, 275 write ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 276 277 len -= min; 278 buf += min; 279 280 /* Queue the PIO register write transfer. */ 281 desc = dmaengine_prep_slave_sg(ssp->dmach, 282 (struct scatterlist *)dma_xfer[sg_count].pio, 283 (ssp->devid == IMX23_SSP) ? 1 : 4, 284 DMA_TRANS_NONE, 285 sg_count ? DMA_PREP_INTERRUPT : 0); 286 if (!desc) { 287 dev_err(ssp->dev, 288 "Failed to get PIO reg. write descriptor.\n"); 289 ret = -EINVAL; 290 goto err_mapped; 291 } 292 293 desc = dmaengine_prep_slave_sg(ssp->dmach, 294 &dma_xfer[sg_count].sg, 1, 295 write ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM, 296 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 297 298 if (!desc) { 299 dev_err(ssp->dev, 300 "Failed to get DMA data write descriptor.\n"); 301 ret = -EINVAL; 302 goto err_mapped; 303 } 304 } 305 306 /* 307 * The last descriptor must have this callback, 308 * to finish the DMA transaction. 309 */ 310 desc->callback = mxs_ssp_dma_irq_callback; 311 desc->callback_param = spi; 312 313 /* Start the transfer. */ 314 dmaengine_submit(desc); 315 dma_async_issue_pending(ssp->dmach); 316 317 ret = wait_for_completion_timeout(&spi->c, 318 msecs_to_jiffies(SSP_TIMEOUT)); 319 if (!ret) { 320 dev_err(ssp->dev, "DMA transfer timeout\n"); 321 ret = -ETIMEDOUT; 322 dmaengine_terminate_all(ssp->dmach); 323 goto err_vmalloc; 324 } 325 326 ret = 0; 327 328 err_vmalloc: 329 while (--sg_count >= 0) { 330 err_mapped: 331 dma_unmap_sg(ssp->dev, &dma_xfer[sg_count].sg, 1, 332 write ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 333 } 334 335 kfree(dma_xfer); 336 337 return ret; 338 } 339 340 static int mxs_spi_txrx_pio(struct mxs_spi *spi, int cs, 341 unsigned char *buf, int len, 342 int *first, int *last, int write) 343 { 344 struct mxs_ssp *ssp = &spi->ssp; 345 346 if (*first) 347 mxs_spi_enable(spi); 348 349 mxs_spi_set_cs(spi, cs); 350 351 while (len--) { 352 if (*last && len == 0) 353 mxs_spi_disable(spi); 354 355 if (ssp->devid == IMX23_SSP) { 356 writel(BM_SSP_CTRL0_XFER_COUNT, 357 ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR); 358 writel(1, 359 ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET); 360 } else { 361 writel(1, ssp->base + HW_SSP_XFER_SIZE); 362 } 363 364 if (write) 365 writel(BM_SSP_CTRL0_READ, 366 ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR); 367 else 368 writel(BM_SSP_CTRL0_READ, 369 ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET); 370 371 writel(BM_SSP_CTRL0_RUN, 372 ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET); 373 374 if (mxs_ssp_wait(spi, HW_SSP_CTRL0, BM_SSP_CTRL0_RUN, 1)) 375 return -ETIMEDOUT; 376 377 if (write) 378 writel(*buf, ssp->base + HW_SSP_DATA(ssp)); 379 380 writel(BM_SSP_CTRL0_DATA_XFER, 381 ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET); 382 383 if (!write) { 384 if (mxs_ssp_wait(spi, HW_SSP_STATUS(ssp), 385 BM_SSP_STATUS_FIFO_EMPTY, 0)) 386 return -ETIMEDOUT; 387 388 *buf = (readl(ssp->base + HW_SSP_DATA(ssp)) & 0xff); 389 } 390 391 if (mxs_ssp_wait(spi, HW_SSP_CTRL0, BM_SSP_CTRL0_RUN, 0)) 392 return -ETIMEDOUT; 393 394 buf++; 395 } 396 397 if (len <= 0) 398 return 0; 399 400 return -ETIMEDOUT; 401 } 402 403 static int mxs_spi_transfer_one(struct spi_master *master, 404 struct spi_message *m) 405 { 406 struct mxs_spi *spi = spi_master_get_devdata(master); 407 struct mxs_ssp *ssp = &spi->ssp; 408 int first, last; 409 struct spi_transfer *t, *tmp_t; 410 int status = 0; 411 int cs; 412 413 first = last = 0; 414 415 cs = m->spi->chip_select; 416 417 list_for_each_entry_safe(t, tmp_t, &m->transfers, transfer_list) { 418 419 status = mxs_spi_setup_transfer(m->spi, t); 420 if (status) 421 break; 422 423 if (&t->transfer_list == m->transfers.next) 424 first = 1; 425 if (&t->transfer_list == m->transfers.prev) 426 last = 1; 427 if ((t->rx_buf && t->tx_buf) || (t->rx_dma && t->tx_dma)) { 428 dev_err(ssp->dev, 429 "Cannot send and receive simultaneously\n"); 430 status = -EINVAL; 431 break; 432 } 433 434 /* 435 * Small blocks can be transfered via PIO. 436 * Measured by empiric means: 437 * 438 * dd if=/dev/mtdblock0 of=/dev/null bs=1024k count=1 439 * 440 * DMA only: 2.164808 seconds, 473.0KB/s 441 * Combined: 1.676276 seconds, 610.9KB/s 442 */ 443 if (t->len < 32) { 444 writel(BM_SSP_CTRL1_DMA_ENABLE, 445 ssp->base + HW_SSP_CTRL1(ssp) + 446 STMP_OFFSET_REG_CLR); 447 448 if (t->tx_buf) 449 status = mxs_spi_txrx_pio(spi, cs, 450 (void *)t->tx_buf, 451 t->len, &first, &last, 1); 452 if (t->rx_buf) 453 status = mxs_spi_txrx_pio(spi, cs, 454 t->rx_buf, t->len, 455 &first, &last, 0); 456 } else { 457 writel(BM_SSP_CTRL1_DMA_ENABLE, 458 ssp->base + HW_SSP_CTRL1(ssp) + 459 STMP_OFFSET_REG_SET); 460 461 if (t->tx_buf) 462 status = mxs_spi_txrx_dma(spi, cs, 463 (void *)t->tx_buf, t->len, 464 &first, &last, 1); 465 if (t->rx_buf) 466 status = mxs_spi_txrx_dma(spi, cs, 467 t->rx_buf, t->len, 468 &first, &last, 0); 469 } 470 471 if (status) { 472 stmp_reset_block(ssp->base); 473 break; 474 } 475 476 m->actual_length += t->len; 477 first = last = 0; 478 } 479 480 m->status = status; 481 spi_finalize_current_message(master); 482 483 return status; 484 } 485 486 static const struct of_device_id mxs_spi_dt_ids[] = { 487 { .compatible = "fsl,imx23-spi", .data = (void *) IMX23_SSP, }, 488 { .compatible = "fsl,imx28-spi", .data = (void *) IMX28_SSP, }, 489 { /* sentinel */ } 490 }; 491 MODULE_DEVICE_TABLE(of, mxs_spi_dt_ids); 492 493 static int mxs_spi_probe(struct platform_device *pdev) 494 { 495 const struct of_device_id *of_id = 496 of_match_device(mxs_spi_dt_ids, &pdev->dev); 497 struct device_node *np = pdev->dev.of_node; 498 struct spi_master *master; 499 struct mxs_spi *spi; 500 struct mxs_ssp *ssp; 501 struct resource *iores; 502 struct clk *clk; 503 void __iomem *base; 504 int devid, clk_freq; 505 int ret = 0, irq_err; 506 507 /* 508 * Default clock speed for the SPI core. 160MHz seems to 509 * work reasonably well with most SPI flashes, so use this 510 * as a default. Override with "clock-frequency" DT prop. 511 */ 512 const int clk_freq_default = 160000000; 513 514 iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); 515 irq_err = platform_get_irq(pdev, 0); 516 if (!iores || irq_err < 0) 517 return -EINVAL; 518 519 base = devm_ioremap_resource(&pdev->dev, iores); 520 if (IS_ERR(base)) 521 return PTR_ERR(base); 522 523 clk = devm_clk_get(&pdev->dev, NULL); 524 if (IS_ERR(clk)) 525 return PTR_ERR(clk); 526 527 devid = (enum mxs_ssp_id) of_id->data; 528 ret = of_property_read_u32(np, "clock-frequency", 529 &clk_freq); 530 if (ret) 531 clk_freq = clk_freq_default; 532 533 master = spi_alloc_master(&pdev->dev, sizeof(*spi)); 534 if (!master) 535 return -ENOMEM; 536 537 master->transfer_one_message = mxs_spi_transfer_one; 538 master->setup = mxs_spi_setup; 539 master->bits_per_word_mask = SPI_BPW_MASK(8); 540 master->mode_bits = SPI_CPOL | SPI_CPHA; 541 master->num_chipselect = 3; 542 master->dev.of_node = np; 543 master->flags = SPI_MASTER_HALF_DUPLEX; 544 545 spi = spi_master_get_devdata(master); 546 ssp = &spi->ssp; 547 ssp->dev = &pdev->dev; 548 ssp->clk = clk; 549 ssp->base = base; 550 ssp->devid = devid; 551 552 init_completion(&spi->c); 553 554 ret = devm_request_irq(&pdev->dev, irq_err, mxs_ssp_irq_handler, 0, 555 DRIVER_NAME, ssp); 556 if (ret) 557 goto out_master_free; 558 559 ssp->dmach = dma_request_slave_channel(&pdev->dev, "rx-tx"); 560 if (!ssp->dmach) { 561 dev_err(ssp->dev, "Failed to request DMA\n"); 562 ret = -ENODEV; 563 goto out_master_free; 564 } 565 566 clk_prepare_enable(ssp->clk); 567 clk_set_rate(ssp->clk, clk_freq); 568 ssp->clk_rate = clk_get_rate(ssp->clk) / 1000; 569 570 stmp_reset_block(ssp->base); 571 572 platform_set_drvdata(pdev, master); 573 574 ret = spi_register_master(master); 575 if (ret) { 576 dev_err(&pdev->dev, "Cannot register SPI master, %d\n", ret); 577 goto out_free_dma; 578 } 579 580 return 0; 581 582 out_free_dma: 583 dma_release_channel(ssp->dmach); 584 clk_disable_unprepare(ssp->clk); 585 out_master_free: 586 spi_master_put(master); 587 return ret; 588 } 589 590 static int mxs_spi_remove(struct platform_device *pdev) 591 { 592 struct spi_master *master; 593 struct mxs_spi *spi; 594 struct mxs_ssp *ssp; 595 596 master = spi_master_get(platform_get_drvdata(pdev)); 597 spi = spi_master_get_devdata(master); 598 ssp = &spi->ssp; 599 600 spi_unregister_master(master); 601 602 dma_release_channel(ssp->dmach); 603 604 clk_disable_unprepare(ssp->clk); 605 606 spi_master_put(master); 607 608 return 0; 609 } 610 611 static struct platform_driver mxs_spi_driver = { 612 .probe = mxs_spi_probe, 613 .remove = mxs_spi_remove, 614 .driver = { 615 .name = DRIVER_NAME, 616 .owner = THIS_MODULE, 617 .of_match_table = mxs_spi_dt_ids, 618 }, 619 }; 620 621 module_platform_driver(mxs_spi_driver); 622 623 MODULE_AUTHOR("Marek Vasut <marex@denx.de>"); 624 MODULE_DESCRIPTION("MXS SPI master driver"); 625 MODULE_LICENSE("GPL"); 626 MODULE_ALIAS("platform:mxs-spi"); 627