1 // SPDX-License-Identifier: GPL-2.0+ 2 // 3 // Freescale MXS SPI master driver 4 // 5 // Copyright 2012 DENX Software Engineering, GmbH. 6 // Copyright 2012 Freescale Semiconductor, Inc. 7 // Copyright 2008 Embedded Alley Solutions, Inc All Rights Reserved. 8 // 9 // Rework and transition to new API by: 10 // Marek Vasut <marex@denx.de> 11 // 12 // Based on previous attempt by: 13 // Fabio Estevam <fabio.estevam@freescale.com> 14 // 15 // Based on code from U-Boot bootloader by: 16 // Marek Vasut <marex@denx.de> 17 // 18 // Based on spi-stmp.c, which is: 19 // Author: Dmitry Pervushin <dimka@embeddedalley.com> 20 21 #include <linux/kernel.h> 22 #include <linux/ioport.h> 23 #include <linux/of.h> 24 #include <linux/of_device.h> 25 #include <linux/platform_device.h> 26 #include <linux/delay.h> 27 #include <linux/interrupt.h> 28 #include <linux/dma-mapping.h> 29 #include <linux/dmaengine.h> 30 #include <linux/highmem.h> 31 #include <linux/clk.h> 32 #include <linux/err.h> 33 #include <linux/completion.h> 34 #include <linux/pinctrl/consumer.h> 35 #include <linux/regulator/consumer.h> 36 #include <linux/pm_runtime.h> 37 #include <linux/module.h> 38 #include <linux/stmp_device.h> 39 #include <linux/spi/spi.h> 40 #include <linux/spi/mxs-spi.h> 41 #include <trace/events/spi.h> 42 43 #define DRIVER_NAME "mxs-spi" 44 45 /* Use 10S timeout for very long transfers, it should suffice. */ 46 #define SSP_TIMEOUT 10000 47 48 #define SG_MAXLEN 0xff00 49 50 /* 51 * Flags for txrx functions. More efficient that using an argument register for 52 * each one. 53 */ 54 #define TXRX_WRITE (1<<0) /* This is a write */ 55 #define TXRX_DEASSERT_CS (1<<1) /* De-assert CS at end of txrx */ 56 57 struct mxs_spi { 58 struct mxs_ssp ssp; 59 struct completion c; 60 unsigned int sck; /* Rate requested (vs actual) */ 61 }; 62 63 static int mxs_spi_setup_transfer(struct spi_device *dev, 64 const struct spi_transfer *t) 65 { 66 struct mxs_spi *spi = spi_master_get_devdata(dev->master); 67 struct mxs_ssp *ssp = &spi->ssp; 68 const unsigned int hz = min(dev->max_speed_hz, t->speed_hz); 69 70 if (hz == 0) { 71 dev_err(&dev->dev, "SPI clock rate of zero not allowed\n"); 72 return -EINVAL; 73 } 74 75 if (hz != spi->sck) { 76 mxs_ssp_set_clk_rate(ssp, hz); 77 /* 78 * Save requested rate, hz, rather than the actual rate, 79 * ssp->clk_rate. Otherwise we would set the rate every transfer 80 * when the actual rate is not quite the same as requested rate. 81 */ 82 spi->sck = hz; 83 /* 84 * Perhaps we should return an error if the actual clock is 85 * nowhere close to what was requested? 86 */ 87 } 88 89 writel(BM_SSP_CTRL0_LOCK_CS, 90 ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET); 91 92 writel(BF_SSP_CTRL1_SSP_MODE(BV_SSP_CTRL1_SSP_MODE__SPI) | 93 BF_SSP_CTRL1_WORD_LENGTH(BV_SSP_CTRL1_WORD_LENGTH__EIGHT_BITS) | 94 ((dev->mode & SPI_CPOL) ? BM_SSP_CTRL1_POLARITY : 0) | 95 ((dev->mode & SPI_CPHA) ? BM_SSP_CTRL1_PHASE : 0), 96 ssp->base + HW_SSP_CTRL1(ssp)); 97 98 writel(0x0, ssp->base + HW_SSP_CMD0); 99 writel(0x0, ssp->base + HW_SSP_CMD1); 100 101 return 0; 102 } 103 104 static u32 mxs_spi_cs_to_reg(unsigned cs) 105 { 106 u32 select = 0; 107 108 /* 109 * i.MX28 Datasheet: 17.10.1: HW_SSP_CTRL0 110 * 111 * The bits BM_SSP_CTRL0_WAIT_FOR_CMD and BM_SSP_CTRL0_WAIT_FOR_IRQ 112 * in HW_SSP_CTRL0 register do have multiple usage, please refer to 113 * the datasheet for further details. In SPI mode, they are used to 114 * toggle the chip-select lines (nCS pins). 115 */ 116 if (cs & 1) 117 select |= BM_SSP_CTRL0_WAIT_FOR_CMD; 118 if (cs & 2) 119 select |= BM_SSP_CTRL0_WAIT_FOR_IRQ; 120 121 return select; 122 } 123 124 static int mxs_ssp_wait(struct mxs_spi *spi, int offset, int mask, bool set) 125 { 126 const unsigned long timeout = jiffies + msecs_to_jiffies(SSP_TIMEOUT); 127 struct mxs_ssp *ssp = &spi->ssp; 128 u32 reg; 129 130 do { 131 reg = readl_relaxed(ssp->base + offset); 132 133 if (!set) 134 reg = ~reg; 135 136 reg &= mask; 137 138 if (reg == mask) 139 return 0; 140 } while (time_before(jiffies, timeout)); 141 142 return -ETIMEDOUT; 143 } 144 145 static void mxs_ssp_dma_irq_callback(void *param) 146 { 147 struct mxs_spi *spi = param; 148 149 complete(&spi->c); 150 } 151 152 static irqreturn_t mxs_ssp_irq_handler(int irq, void *dev_id) 153 { 154 struct mxs_ssp *ssp = dev_id; 155 156 dev_err(ssp->dev, "%s[%i] CTRL1=%08x STATUS=%08x\n", 157 __func__, __LINE__, 158 readl(ssp->base + HW_SSP_CTRL1(ssp)), 159 readl(ssp->base + HW_SSP_STATUS(ssp))); 160 return IRQ_HANDLED; 161 } 162 163 static int mxs_spi_txrx_dma(struct mxs_spi *spi, 164 unsigned char *buf, int len, 165 unsigned int flags) 166 { 167 struct mxs_ssp *ssp = &spi->ssp; 168 struct dma_async_tx_descriptor *desc = NULL; 169 const bool vmalloced_buf = is_vmalloc_addr(buf); 170 const int desc_len = vmalloced_buf ? PAGE_SIZE : SG_MAXLEN; 171 const int sgs = DIV_ROUND_UP(len, desc_len); 172 int sg_count; 173 int min, ret; 174 u32 ctrl0; 175 struct page *vm_page; 176 struct { 177 u32 pio[4]; 178 struct scatterlist sg; 179 } *dma_xfer; 180 181 if (!len) 182 return -EINVAL; 183 184 dma_xfer = kcalloc(sgs, sizeof(*dma_xfer), GFP_KERNEL); 185 if (!dma_xfer) 186 return -ENOMEM; 187 188 reinit_completion(&spi->c); 189 190 /* Chip select was already programmed into CTRL0 */ 191 ctrl0 = readl(ssp->base + HW_SSP_CTRL0); 192 ctrl0 &= ~(BM_SSP_CTRL0_XFER_COUNT | BM_SSP_CTRL0_IGNORE_CRC | 193 BM_SSP_CTRL0_READ); 194 ctrl0 |= BM_SSP_CTRL0_DATA_XFER; 195 196 if (!(flags & TXRX_WRITE)) 197 ctrl0 |= BM_SSP_CTRL0_READ; 198 199 /* Queue the DMA data transfer. */ 200 for (sg_count = 0; sg_count < sgs; sg_count++) { 201 /* Prepare the transfer descriptor. */ 202 min = min(len, desc_len); 203 204 /* 205 * De-assert CS on last segment if flag is set (i.e., no more 206 * transfers will follow) 207 */ 208 if ((sg_count + 1 == sgs) && (flags & TXRX_DEASSERT_CS)) 209 ctrl0 |= BM_SSP_CTRL0_IGNORE_CRC; 210 211 if (ssp->devid == IMX23_SSP) { 212 ctrl0 &= ~BM_SSP_CTRL0_XFER_COUNT; 213 ctrl0 |= min; 214 } 215 216 dma_xfer[sg_count].pio[0] = ctrl0; 217 dma_xfer[sg_count].pio[3] = min; 218 219 if (vmalloced_buf) { 220 vm_page = vmalloc_to_page(buf); 221 if (!vm_page) { 222 ret = -ENOMEM; 223 goto err_vmalloc; 224 } 225 226 sg_init_table(&dma_xfer[sg_count].sg, 1); 227 sg_set_page(&dma_xfer[sg_count].sg, vm_page, 228 min, offset_in_page(buf)); 229 } else { 230 sg_init_one(&dma_xfer[sg_count].sg, buf, min); 231 } 232 233 ret = dma_map_sg(ssp->dev, &dma_xfer[sg_count].sg, 1, 234 (flags & TXRX_WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 235 236 len -= min; 237 buf += min; 238 239 /* Queue the PIO register write transfer. */ 240 desc = dmaengine_prep_slave_sg(ssp->dmach, 241 (struct scatterlist *)dma_xfer[sg_count].pio, 242 (ssp->devid == IMX23_SSP) ? 1 : 4, 243 DMA_TRANS_NONE, 244 sg_count ? DMA_PREP_INTERRUPT : 0); 245 if (!desc) { 246 dev_err(ssp->dev, 247 "Failed to get PIO reg. write descriptor.\n"); 248 ret = -EINVAL; 249 goto err_mapped; 250 } 251 252 desc = dmaengine_prep_slave_sg(ssp->dmach, 253 &dma_xfer[sg_count].sg, 1, 254 (flags & TXRX_WRITE) ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM, 255 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 256 257 if (!desc) { 258 dev_err(ssp->dev, 259 "Failed to get DMA data write descriptor.\n"); 260 ret = -EINVAL; 261 goto err_mapped; 262 } 263 } 264 265 /* 266 * The last descriptor must have this callback, 267 * to finish the DMA transaction. 268 */ 269 desc->callback = mxs_ssp_dma_irq_callback; 270 desc->callback_param = spi; 271 272 /* Start the transfer. */ 273 dmaengine_submit(desc); 274 dma_async_issue_pending(ssp->dmach); 275 276 if (!wait_for_completion_timeout(&spi->c, 277 msecs_to_jiffies(SSP_TIMEOUT))) { 278 dev_err(ssp->dev, "DMA transfer timeout\n"); 279 ret = -ETIMEDOUT; 280 dmaengine_terminate_all(ssp->dmach); 281 goto err_vmalloc; 282 } 283 284 ret = 0; 285 286 err_vmalloc: 287 while (--sg_count >= 0) { 288 err_mapped: 289 dma_unmap_sg(ssp->dev, &dma_xfer[sg_count].sg, 1, 290 (flags & TXRX_WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 291 } 292 293 kfree(dma_xfer); 294 295 return ret; 296 } 297 298 static int mxs_spi_txrx_pio(struct mxs_spi *spi, 299 unsigned char *buf, int len, 300 unsigned int flags) 301 { 302 struct mxs_ssp *ssp = &spi->ssp; 303 304 writel(BM_SSP_CTRL0_IGNORE_CRC, 305 ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR); 306 307 while (len--) { 308 if (len == 0 && (flags & TXRX_DEASSERT_CS)) 309 writel(BM_SSP_CTRL0_IGNORE_CRC, 310 ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET); 311 312 if (ssp->devid == IMX23_SSP) { 313 writel(BM_SSP_CTRL0_XFER_COUNT, 314 ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR); 315 writel(1, 316 ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET); 317 } else { 318 writel(1, ssp->base + HW_SSP_XFER_SIZE); 319 } 320 321 if (flags & TXRX_WRITE) 322 writel(BM_SSP_CTRL0_READ, 323 ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR); 324 else 325 writel(BM_SSP_CTRL0_READ, 326 ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET); 327 328 writel(BM_SSP_CTRL0_RUN, 329 ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET); 330 331 if (mxs_ssp_wait(spi, HW_SSP_CTRL0, BM_SSP_CTRL0_RUN, 1)) 332 return -ETIMEDOUT; 333 334 if (flags & TXRX_WRITE) 335 writel(*buf, ssp->base + HW_SSP_DATA(ssp)); 336 337 writel(BM_SSP_CTRL0_DATA_XFER, 338 ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET); 339 340 if (!(flags & TXRX_WRITE)) { 341 if (mxs_ssp_wait(spi, HW_SSP_STATUS(ssp), 342 BM_SSP_STATUS_FIFO_EMPTY, 0)) 343 return -ETIMEDOUT; 344 345 *buf = (readl(ssp->base + HW_SSP_DATA(ssp)) & 0xff); 346 } 347 348 if (mxs_ssp_wait(spi, HW_SSP_CTRL0, BM_SSP_CTRL0_RUN, 0)) 349 return -ETIMEDOUT; 350 351 buf++; 352 } 353 354 if (len <= 0) 355 return 0; 356 357 return -ETIMEDOUT; 358 } 359 360 static int mxs_spi_transfer_one(struct spi_master *master, 361 struct spi_message *m) 362 { 363 struct mxs_spi *spi = spi_master_get_devdata(master); 364 struct mxs_ssp *ssp = &spi->ssp; 365 struct spi_transfer *t; 366 unsigned int flag; 367 int status = 0; 368 369 /* Program CS register bits here, it will be used for all transfers. */ 370 writel(BM_SSP_CTRL0_WAIT_FOR_CMD | BM_SSP_CTRL0_WAIT_FOR_IRQ, 371 ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR); 372 writel(mxs_spi_cs_to_reg(m->spi->chip_select), 373 ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET); 374 375 list_for_each_entry(t, &m->transfers, transfer_list) { 376 377 trace_spi_transfer_start(m, t); 378 379 status = mxs_spi_setup_transfer(m->spi, t); 380 if (status) 381 break; 382 383 /* De-assert on last transfer, inverted by cs_change flag */ 384 flag = (&t->transfer_list == m->transfers.prev) ^ t->cs_change ? 385 TXRX_DEASSERT_CS : 0; 386 387 /* 388 * Small blocks can be transfered via PIO. 389 * Measured by empiric means: 390 * 391 * dd if=/dev/mtdblock0 of=/dev/null bs=1024k count=1 392 * 393 * DMA only: 2.164808 seconds, 473.0KB/s 394 * Combined: 1.676276 seconds, 610.9KB/s 395 */ 396 if (t->len < 32) { 397 writel(BM_SSP_CTRL1_DMA_ENABLE, 398 ssp->base + HW_SSP_CTRL1(ssp) + 399 STMP_OFFSET_REG_CLR); 400 401 if (t->tx_buf) 402 status = mxs_spi_txrx_pio(spi, 403 (void *)t->tx_buf, 404 t->len, flag | TXRX_WRITE); 405 if (t->rx_buf) 406 status = mxs_spi_txrx_pio(spi, 407 t->rx_buf, t->len, 408 flag); 409 } else { 410 writel(BM_SSP_CTRL1_DMA_ENABLE, 411 ssp->base + HW_SSP_CTRL1(ssp) + 412 STMP_OFFSET_REG_SET); 413 414 if (t->tx_buf) 415 status = mxs_spi_txrx_dma(spi, 416 (void *)t->tx_buf, t->len, 417 flag | TXRX_WRITE); 418 if (t->rx_buf) 419 status = mxs_spi_txrx_dma(spi, 420 t->rx_buf, t->len, 421 flag); 422 } 423 424 trace_spi_transfer_stop(m, t); 425 426 if (status) { 427 stmp_reset_block(ssp->base); 428 break; 429 } 430 431 m->actual_length += t->len; 432 } 433 434 m->status = status; 435 spi_finalize_current_message(master); 436 437 return status; 438 } 439 440 static int mxs_spi_runtime_suspend(struct device *dev) 441 { 442 struct spi_master *master = dev_get_drvdata(dev); 443 struct mxs_spi *spi = spi_master_get_devdata(master); 444 struct mxs_ssp *ssp = &spi->ssp; 445 int ret; 446 447 clk_disable_unprepare(ssp->clk); 448 449 ret = pinctrl_pm_select_idle_state(dev); 450 if (ret) { 451 int ret2 = clk_prepare_enable(ssp->clk); 452 453 if (ret2) 454 dev_warn(dev, "Failed to reenable clock after failing pinctrl request (pinctrl: %d, clk: %d)\n", 455 ret, ret2); 456 } 457 458 return ret; 459 } 460 461 static int mxs_spi_runtime_resume(struct device *dev) 462 { 463 struct spi_master *master = dev_get_drvdata(dev); 464 struct mxs_spi *spi = spi_master_get_devdata(master); 465 struct mxs_ssp *ssp = &spi->ssp; 466 int ret; 467 468 ret = pinctrl_pm_select_default_state(dev); 469 if (ret) 470 return ret; 471 472 ret = clk_prepare_enable(ssp->clk); 473 if (ret) 474 pinctrl_pm_select_idle_state(dev); 475 476 return ret; 477 } 478 479 static int __maybe_unused mxs_spi_suspend(struct device *dev) 480 { 481 struct spi_master *master = dev_get_drvdata(dev); 482 int ret; 483 484 ret = spi_master_suspend(master); 485 if (ret) 486 return ret; 487 488 if (!pm_runtime_suspended(dev)) 489 return mxs_spi_runtime_suspend(dev); 490 else 491 return 0; 492 } 493 494 static int __maybe_unused mxs_spi_resume(struct device *dev) 495 { 496 struct spi_master *master = dev_get_drvdata(dev); 497 int ret; 498 499 if (!pm_runtime_suspended(dev)) 500 ret = mxs_spi_runtime_resume(dev); 501 else 502 ret = 0; 503 if (ret) 504 return ret; 505 506 ret = spi_master_resume(master); 507 if (ret < 0 && !pm_runtime_suspended(dev)) 508 mxs_spi_runtime_suspend(dev); 509 510 return ret; 511 } 512 513 static const struct dev_pm_ops mxs_spi_pm = { 514 SET_RUNTIME_PM_OPS(mxs_spi_runtime_suspend, 515 mxs_spi_runtime_resume, NULL) 516 SET_SYSTEM_SLEEP_PM_OPS(mxs_spi_suspend, mxs_spi_resume) 517 }; 518 519 static const struct of_device_id mxs_spi_dt_ids[] = { 520 { .compatible = "fsl,imx23-spi", .data = (void *) IMX23_SSP, }, 521 { .compatible = "fsl,imx28-spi", .data = (void *) IMX28_SSP, }, 522 { /* sentinel */ } 523 }; 524 MODULE_DEVICE_TABLE(of, mxs_spi_dt_ids); 525 526 static int mxs_spi_probe(struct platform_device *pdev) 527 { 528 const struct of_device_id *of_id = 529 of_match_device(mxs_spi_dt_ids, &pdev->dev); 530 struct device_node *np = pdev->dev.of_node; 531 struct spi_master *master; 532 struct mxs_spi *spi; 533 struct mxs_ssp *ssp; 534 struct clk *clk; 535 void __iomem *base; 536 int devid, clk_freq; 537 int ret = 0, irq_err; 538 539 /* 540 * Default clock speed for the SPI core. 160MHz seems to 541 * work reasonably well with most SPI flashes, so use this 542 * as a default. Override with "clock-frequency" DT prop. 543 */ 544 const int clk_freq_default = 160000000; 545 546 irq_err = platform_get_irq(pdev, 0); 547 if (irq_err < 0) 548 return irq_err; 549 550 base = devm_platform_ioremap_resource(pdev, 0); 551 if (IS_ERR(base)) 552 return PTR_ERR(base); 553 554 clk = devm_clk_get(&pdev->dev, NULL); 555 if (IS_ERR(clk)) 556 return PTR_ERR(clk); 557 558 devid = (enum mxs_ssp_id) of_id->data; 559 ret = of_property_read_u32(np, "clock-frequency", 560 &clk_freq); 561 if (ret) 562 clk_freq = clk_freq_default; 563 564 master = spi_alloc_master(&pdev->dev, sizeof(*spi)); 565 if (!master) 566 return -ENOMEM; 567 568 platform_set_drvdata(pdev, master); 569 570 master->transfer_one_message = mxs_spi_transfer_one; 571 master->bits_per_word_mask = SPI_BPW_MASK(8); 572 master->mode_bits = SPI_CPOL | SPI_CPHA; 573 master->num_chipselect = 3; 574 master->dev.of_node = np; 575 master->flags = SPI_MASTER_HALF_DUPLEX; 576 master->auto_runtime_pm = true; 577 578 spi = spi_master_get_devdata(master); 579 ssp = &spi->ssp; 580 ssp->dev = &pdev->dev; 581 ssp->clk = clk; 582 ssp->base = base; 583 ssp->devid = devid; 584 585 init_completion(&spi->c); 586 587 ret = devm_request_irq(&pdev->dev, irq_err, mxs_ssp_irq_handler, 0, 588 dev_name(&pdev->dev), ssp); 589 if (ret) 590 goto out_master_free; 591 592 ssp->dmach = dma_request_chan(&pdev->dev, "rx-tx"); 593 if (IS_ERR(ssp->dmach)) { 594 dev_err(ssp->dev, "Failed to request DMA\n"); 595 ret = PTR_ERR(ssp->dmach); 596 goto out_master_free; 597 } 598 599 pm_runtime_enable(ssp->dev); 600 if (!pm_runtime_enabled(ssp->dev)) { 601 ret = mxs_spi_runtime_resume(ssp->dev); 602 if (ret < 0) { 603 dev_err(ssp->dev, "runtime resume failed\n"); 604 goto out_dma_release; 605 } 606 } 607 608 ret = pm_runtime_get_sync(ssp->dev); 609 if (ret < 0) { 610 pm_runtime_put_noidle(ssp->dev); 611 dev_err(ssp->dev, "runtime_get_sync failed\n"); 612 goto out_pm_runtime_disable; 613 } 614 615 clk_set_rate(ssp->clk, clk_freq); 616 617 ret = stmp_reset_block(ssp->base); 618 if (ret) 619 goto out_pm_runtime_put; 620 621 ret = devm_spi_register_master(&pdev->dev, master); 622 if (ret) { 623 dev_err(&pdev->dev, "Cannot register SPI master, %d\n", ret); 624 goto out_pm_runtime_put; 625 } 626 627 pm_runtime_put(ssp->dev); 628 629 return 0; 630 631 out_pm_runtime_put: 632 pm_runtime_put(ssp->dev); 633 out_pm_runtime_disable: 634 pm_runtime_disable(ssp->dev); 635 out_dma_release: 636 dma_release_channel(ssp->dmach); 637 out_master_free: 638 spi_master_put(master); 639 return ret; 640 } 641 642 static int mxs_spi_remove(struct platform_device *pdev) 643 { 644 struct spi_master *master; 645 struct mxs_spi *spi; 646 struct mxs_ssp *ssp; 647 648 master = platform_get_drvdata(pdev); 649 spi = spi_master_get_devdata(master); 650 ssp = &spi->ssp; 651 652 pm_runtime_disable(&pdev->dev); 653 if (!pm_runtime_status_suspended(&pdev->dev)) 654 mxs_spi_runtime_suspend(&pdev->dev); 655 656 dma_release_channel(ssp->dmach); 657 658 return 0; 659 } 660 661 static struct platform_driver mxs_spi_driver = { 662 .probe = mxs_spi_probe, 663 .remove = mxs_spi_remove, 664 .driver = { 665 .name = DRIVER_NAME, 666 .of_match_table = mxs_spi_dt_ids, 667 .pm = &mxs_spi_pm, 668 }, 669 }; 670 671 module_platform_driver(mxs_spi_driver); 672 673 MODULE_AUTHOR("Marek Vasut <marex@denx.de>"); 674 MODULE_DESCRIPTION("MXS SPI master driver"); 675 MODULE_LICENSE("GPL"); 676 MODULE_ALIAS("platform:mxs-spi"); 677