1 /* 2 * linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver 3 * 4 * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 #include <linux/module.h> 11 #include <linux/moduleparam.h> 12 #include <linux/init.h> 13 #include <linux/ioport.h> 14 #include <linux/device.h> 15 #include <linux/interrupt.h> 16 #include <linux/delay.h> 17 #include <linux/err.h> 18 #include <linux/highmem.h> 19 #include <linux/log2.h> 20 #include <linux/mmc/host.h> 21 #include <linux/amba/bus.h> 22 #include <linux/clk.h> 23 #include <linux/scatterlist.h> 24 #include <linux/gpio.h> 25 26 #include <asm/cacheflush.h> 27 #include <asm/div64.h> 28 #include <asm/io.h> 29 #include <asm/sizes.h> 30 #include <asm/mach/mmc.h> 31 32 #include "mmci.h" 33 34 #define DRIVER_NAME "mmci-pl18x" 35 36 #define DBG(host,fmt,args...) \ 37 pr_debug("%s: %s: " fmt, mmc_hostname(host->mmc), __func__ , args) 38 39 static unsigned int fmax = 515633; 40 41 static void 42 mmci_request_end(struct mmci_host *host, struct mmc_request *mrq) 43 { 44 writel(0, host->base + MMCICOMMAND); 45 46 BUG_ON(host->data); 47 48 host->mrq = NULL; 49 host->cmd = NULL; 50 51 if (mrq->data) 52 mrq->data->bytes_xfered = host->data_xfered; 53 54 /* 55 * Need to drop the host lock here; mmc_request_done may call 56 * back into the driver... 57 */ 58 spin_unlock(&host->lock); 59 mmc_request_done(host->mmc, mrq); 60 spin_lock(&host->lock); 61 } 62 63 static void mmci_stop_data(struct mmci_host *host) 64 { 65 writel(0, host->base + MMCIDATACTRL); 66 writel(0, host->base + MMCIMASK1); 67 host->data = NULL; 68 } 69 70 static void mmci_start_data(struct mmci_host *host, struct mmc_data *data) 71 { 72 unsigned int datactrl, timeout, irqmask; 73 unsigned long long clks; 74 void __iomem *base; 75 int blksz_bits; 76 77 DBG(host, "blksz %04x blks %04x flags %08x\n", 78 data->blksz, data->blocks, data->flags); 79 80 host->data = data; 81 host->size = data->blksz; 82 host->data_xfered = 0; 83 84 mmci_init_sg(host, data); 85 86 clks = (unsigned long long)data->timeout_ns * host->cclk; 87 do_div(clks, 1000000000UL); 88 89 timeout = data->timeout_clks + (unsigned int)clks; 90 91 base = host->base; 92 writel(timeout, base + MMCIDATATIMER); 93 writel(host->size, base + MMCIDATALENGTH); 94 95 blksz_bits = ffs(data->blksz) - 1; 96 BUG_ON(1 << blksz_bits != data->blksz); 97 98 datactrl = MCI_DPSM_ENABLE | blksz_bits << 4; 99 if (data->flags & MMC_DATA_READ) { 100 datactrl |= MCI_DPSM_DIRECTION; 101 irqmask = MCI_RXFIFOHALFFULLMASK; 102 103 /* 104 * If we have less than a FIFOSIZE of bytes to transfer, 105 * trigger a PIO interrupt as soon as any data is available. 106 */ 107 if (host->size < MCI_FIFOSIZE) 108 irqmask |= MCI_RXDATAAVLBLMASK; 109 } else { 110 /* 111 * We don't actually need to include "FIFO empty" here 112 * since its implicit in "FIFO half empty". 113 */ 114 irqmask = MCI_TXFIFOHALFEMPTYMASK; 115 } 116 117 writel(datactrl, base + MMCIDATACTRL); 118 writel(readl(base + MMCIMASK0) & ~MCI_DATAENDMASK, base + MMCIMASK0); 119 writel(irqmask, base + MMCIMASK1); 120 } 121 122 static void 123 mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c) 124 { 125 void __iomem *base = host->base; 126 127 DBG(host, "op %02x arg %08x flags %08x\n", 128 cmd->opcode, cmd->arg, cmd->flags); 129 130 if (readl(base + MMCICOMMAND) & MCI_CPSM_ENABLE) { 131 writel(0, base + MMCICOMMAND); 132 udelay(1); 133 } 134 135 c |= cmd->opcode | MCI_CPSM_ENABLE; 136 if (cmd->flags & MMC_RSP_PRESENT) { 137 if (cmd->flags & MMC_RSP_136) 138 c |= MCI_CPSM_LONGRSP; 139 c |= MCI_CPSM_RESPONSE; 140 } 141 if (/*interrupt*/0) 142 c |= MCI_CPSM_INTERRUPT; 143 144 host->cmd = cmd; 145 146 writel(cmd->arg, base + MMCIARGUMENT); 147 writel(c, base + MMCICOMMAND); 148 } 149 150 static void 151 mmci_data_irq(struct mmci_host *host, struct mmc_data *data, 152 unsigned int status) 153 { 154 if (status & MCI_DATABLOCKEND) { 155 host->data_xfered += data->blksz; 156 } 157 if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|MCI_RXOVERRUN)) { 158 if (status & MCI_DATACRCFAIL) 159 data->error = -EILSEQ; 160 else if (status & MCI_DATATIMEOUT) 161 data->error = -ETIMEDOUT; 162 else if (status & (MCI_TXUNDERRUN|MCI_RXOVERRUN)) 163 data->error = -EIO; 164 status |= MCI_DATAEND; 165 166 /* 167 * We hit an error condition. Ensure that any data 168 * partially written to a page is properly coherent. 169 */ 170 if (host->sg_len && data->flags & MMC_DATA_READ) 171 flush_dcache_page(sg_page(host->sg_ptr)); 172 } 173 if (status & MCI_DATAEND) { 174 mmci_stop_data(host); 175 176 if (!data->stop) { 177 mmci_request_end(host, data->mrq); 178 } else { 179 mmci_start_command(host, data->stop, 0); 180 } 181 } 182 } 183 184 static void 185 mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd, 186 unsigned int status) 187 { 188 void __iomem *base = host->base; 189 190 host->cmd = NULL; 191 192 cmd->resp[0] = readl(base + MMCIRESPONSE0); 193 cmd->resp[1] = readl(base + MMCIRESPONSE1); 194 cmd->resp[2] = readl(base + MMCIRESPONSE2); 195 cmd->resp[3] = readl(base + MMCIRESPONSE3); 196 197 if (status & MCI_CMDTIMEOUT) { 198 cmd->error = -ETIMEDOUT; 199 } else if (status & MCI_CMDCRCFAIL && cmd->flags & MMC_RSP_CRC) { 200 cmd->error = -EILSEQ; 201 } 202 203 if (!cmd->data || cmd->error) { 204 if (host->data) 205 mmci_stop_data(host); 206 mmci_request_end(host, cmd->mrq); 207 } else if (!(cmd->data->flags & MMC_DATA_READ)) { 208 mmci_start_data(host, cmd->data); 209 } 210 } 211 212 static int mmci_pio_read(struct mmci_host *host, char *buffer, unsigned int remain) 213 { 214 void __iomem *base = host->base; 215 char *ptr = buffer; 216 u32 status; 217 int host_remain = host->size; 218 219 do { 220 int count = host_remain - (readl(base + MMCIFIFOCNT) << 2); 221 222 if (count > remain) 223 count = remain; 224 225 if (count <= 0) 226 break; 227 228 readsl(base + MMCIFIFO, ptr, count >> 2); 229 230 ptr += count; 231 remain -= count; 232 host_remain -= count; 233 234 if (remain == 0) 235 break; 236 237 status = readl(base + MMCISTATUS); 238 } while (status & MCI_RXDATAAVLBL); 239 240 return ptr - buffer; 241 } 242 243 static int mmci_pio_write(struct mmci_host *host, char *buffer, unsigned int remain, u32 status) 244 { 245 void __iomem *base = host->base; 246 char *ptr = buffer; 247 248 do { 249 unsigned int count, maxcnt; 250 251 maxcnt = status & MCI_TXFIFOEMPTY ? MCI_FIFOSIZE : MCI_FIFOHALFSIZE; 252 count = min(remain, maxcnt); 253 254 writesl(base + MMCIFIFO, ptr, count >> 2); 255 256 ptr += count; 257 remain -= count; 258 259 if (remain == 0) 260 break; 261 262 status = readl(base + MMCISTATUS); 263 } while (status & MCI_TXFIFOHALFEMPTY); 264 265 return ptr - buffer; 266 } 267 268 /* 269 * PIO data transfer IRQ handler. 270 */ 271 static irqreturn_t mmci_pio_irq(int irq, void *dev_id) 272 { 273 struct mmci_host *host = dev_id; 274 void __iomem *base = host->base; 275 u32 status; 276 277 status = readl(base + MMCISTATUS); 278 279 DBG(host, "irq1 %08x\n", status); 280 281 do { 282 unsigned long flags; 283 unsigned int remain, len; 284 char *buffer; 285 286 /* 287 * For write, we only need to test the half-empty flag 288 * here - if the FIFO is completely empty, then by 289 * definition it is more than half empty. 290 * 291 * For read, check for data available. 292 */ 293 if (!(status & (MCI_TXFIFOHALFEMPTY|MCI_RXDATAAVLBL))) 294 break; 295 296 /* 297 * Map the current scatter buffer. 298 */ 299 buffer = mmci_kmap_atomic(host, &flags) + host->sg_off; 300 remain = host->sg_ptr->length - host->sg_off; 301 302 len = 0; 303 if (status & MCI_RXACTIVE) 304 len = mmci_pio_read(host, buffer, remain); 305 if (status & MCI_TXACTIVE) 306 len = mmci_pio_write(host, buffer, remain, status); 307 308 /* 309 * Unmap the buffer. 310 */ 311 mmci_kunmap_atomic(host, buffer, &flags); 312 313 host->sg_off += len; 314 host->size -= len; 315 remain -= len; 316 317 if (remain) 318 break; 319 320 /* 321 * If we were reading, and we have completed this 322 * page, ensure that the data cache is coherent. 323 */ 324 if (status & MCI_RXACTIVE) 325 flush_dcache_page(sg_page(host->sg_ptr)); 326 327 if (!mmci_next_sg(host)) 328 break; 329 330 status = readl(base + MMCISTATUS); 331 } while (1); 332 333 /* 334 * If we're nearing the end of the read, switch to 335 * "any data available" mode. 336 */ 337 if (status & MCI_RXACTIVE && host->size < MCI_FIFOSIZE) 338 writel(MCI_RXDATAAVLBLMASK, base + MMCIMASK1); 339 340 /* 341 * If we run out of data, disable the data IRQs; this 342 * prevents a race where the FIFO becomes empty before 343 * the chip itself has disabled the data path, and 344 * stops us racing with our data end IRQ. 345 */ 346 if (host->size == 0) { 347 writel(0, base + MMCIMASK1); 348 writel(readl(base + MMCIMASK0) | MCI_DATAENDMASK, base + MMCIMASK0); 349 } 350 351 return IRQ_HANDLED; 352 } 353 354 /* 355 * Handle completion of command and data transfers. 356 */ 357 static irqreturn_t mmci_irq(int irq, void *dev_id) 358 { 359 struct mmci_host *host = dev_id; 360 u32 status; 361 int ret = 0; 362 363 spin_lock(&host->lock); 364 365 do { 366 struct mmc_command *cmd; 367 struct mmc_data *data; 368 369 status = readl(host->base + MMCISTATUS); 370 status &= readl(host->base + MMCIMASK0); 371 writel(status, host->base + MMCICLEAR); 372 373 DBG(host, "irq0 %08x\n", status); 374 375 data = host->data; 376 if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN| 377 MCI_RXOVERRUN|MCI_DATAEND|MCI_DATABLOCKEND) && data) 378 mmci_data_irq(host, data, status); 379 380 cmd = host->cmd; 381 if (status & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT|MCI_CMDSENT|MCI_CMDRESPEND) && cmd) 382 mmci_cmd_irq(host, cmd, status); 383 384 ret = 1; 385 } while (status); 386 387 spin_unlock(&host->lock); 388 389 return IRQ_RETVAL(ret); 390 } 391 392 static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq) 393 { 394 struct mmci_host *host = mmc_priv(mmc); 395 unsigned long flags; 396 397 WARN_ON(host->mrq != NULL); 398 399 if (mrq->data && !is_power_of_2(mrq->data->blksz)) { 400 printk(KERN_ERR "%s: Unsupported block size (%d bytes)\n", 401 mmc_hostname(mmc), mrq->data->blksz); 402 mrq->cmd->error = -EINVAL; 403 mmc_request_done(mmc, mrq); 404 return; 405 } 406 407 spin_lock_irqsave(&host->lock, flags); 408 409 host->mrq = mrq; 410 411 if (mrq->data && mrq->data->flags & MMC_DATA_READ) 412 mmci_start_data(host, mrq->data); 413 414 mmci_start_command(host, mrq->cmd, 0); 415 416 spin_unlock_irqrestore(&host->lock, flags); 417 } 418 419 static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 420 { 421 struct mmci_host *host = mmc_priv(mmc); 422 u32 clk = 0, pwr = 0; 423 424 if (ios->clock) { 425 if (ios->clock >= host->mclk) { 426 clk = MCI_CLK_BYPASS; 427 host->cclk = host->mclk; 428 } else { 429 clk = host->mclk / (2 * ios->clock) - 1; 430 if (clk >= 256) 431 clk = 255; 432 host->cclk = host->mclk / (2 * (clk + 1)); 433 } 434 if (host->hw_designer == AMBA_VENDOR_ST) 435 clk |= MCI_FCEN; /* Bug fix in ST IP block */ 436 clk |= MCI_CLK_ENABLE; 437 } 438 439 if (host->plat->translate_vdd) 440 pwr |= host->plat->translate_vdd(mmc_dev(mmc), ios->vdd); 441 442 switch (ios->power_mode) { 443 case MMC_POWER_OFF: 444 break; 445 case MMC_POWER_UP: 446 /* The ST version does not have this, fall through to POWER_ON */ 447 if (host->hw_designer != AMBA_VENDOR_ST) { 448 pwr |= MCI_PWR_UP; 449 break; 450 } 451 case MMC_POWER_ON: 452 pwr |= MCI_PWR_ON; 453 break; 454 } 455 456 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) { 457 if (host->hw_designer != AMBA_VENDOR_ST) 458 pwr |= MCI_ROD; 459 else { 460 /* 461 * The ST Micro variant use the ROD bit for something 462 * else and only has OD (Open Drain). 463 */ 464 pwr |= MCI_OD; 465 } 466 } 467 468 writel(clk, host->base + MMCICLOCK); 469 470 if (host->pwr != pwr) { 471 host->pwr = pwr; 472 writel(pwr, host->base + MMCIPOWER); 473 } 474 } 475 476 static int mmci_get_ro(struct mmc_host *mmc) 477 { 478 struct mmci_host *host = mmc_priv(mmc); 479 480 if (host->gpio_wp == -ENOSYS) 481 return -ENOSYS; 482 483 return gpio_get_value(host->gpio_wp); 484 } 485 486 static int mmci_get_cd(struct mmc_host *mmc) 487 { 488 struct mmci_host *host = mmc_priv(mmc); 489 unsigned int status; 490 491 if (host->gpio_cd == -ENOSYS) 492 status = host->plat->status(mmc_dev(host->mmc)); 493 else 494 status = gpio_get_value(host->gpio_cd); 495 496 return !status; 497 } 498 499 static const struct mmc_host_ops mmci_ops = { 500 .request = mmci_request, 501 .set_ios = mmci_set_ios, 502 .get_ro = mmci_get_ro, 503 .get_cd = mmci_get_cd, 504 }; 505 506 static void mmci_check_status(unsigned long data) 507 { 508 struct mmci_host *host = (struct mmci_host *)data; 509 unsigned int status = mmci_get_cd(host->mmc); 510 511 if (status ^ host->oldstat) 512 mmc_detect_change(host->mmc, 0); 513 514 host->oldstat = status; 515 mod_timer(&host->timer, jiffies + HZ); 516 } 517 518 static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id) 519 { 520 struct mmc_platform_data *plat = dev->dev.platform_data; 521 struct mmci_host *host; 522 struct mmc_host *mmc; 523 int ret; 524 525 /* must have platform data */ 526 if (!plat) { 527 ret = -EINVAL; 528 goto out; 529 } 530 531 ret = amba_request_regions(dev, DRIVER_NAME); 532 if (ret) 533 goto out; 534 535 mmc = mmc_alloc_host(sizeof(struct mmci_host), &dev->dev); 536 if (!mmc) { 537 ret = -ENOMEM; 538 goto rel_regions; 539 } 540 541 host = mmc_priv(mmc); 542 host->mmc = mmc; 543 544 host->gpio_wp = -ENOSYS; 545 host->gpio_cd = -ENOSYS; 546 547 host->hw_designer = amba_manf(dev); 548 host->hw_revision = amba_rev(dev); 549 DBG(host, "designer ID = 0x%02x\n", host->hw_designer); 550 DBG(host, "revision = 0x%01x\n", host->hw_revision); 551 552 host->clk = clk_get(&dev->dev, NULL); 553 if (IS_ERR(host->clk)) { 554 ret = PTR_ERR(host->clk); 555 host->clk = NULL; 556 goto host_free; 557 } 558 559 ret = clk_enable(host->clk); 560 if (ret) 561 goto clk_free; 562 563 host->plat = plat; 564 host->mclk = clk_get_rate(host->clk); 565 /* 566 * According to the spec, mclk is max 100 MHz, 567 * so we try to adjust the clock down to this, 568 * (if possible). 569 */ 570 if (host->mclk > 100000000) { 571 ret = clk_set_rate(host->clk, 100000000); 572 if (ret < 0) 573 goto clk_disable; 574 host->mclk = clk_get_rate(host->clk); 575 DBG(host, "eventual mclk rate: %u Hz\n", host->mclk); 576 } 577 host->base = ioremap(dev->res.start, resource_size(&dev->res)); 578 if (!host->base) { 579 ret = -ENOMEM; 580 goto clk_disable; 581 } 582 583 mmc->ops = &mmci_ops; 584 mmc->f_min = (host->mclk + 511) / 512; 585 mmc->f_max = min(host->mclk, fmax); 586 mmc->ocr_avail = plat->ocr_mask; 587 588 /* 589 * We can do SGIO 590 */ 591 mmc->max_hw_segs = 16; 592 mmc->max_phys_segs = NR_SG; 593 594 /* 595 * Since we only have a 16-bit data length register, we must 596 * ensure that we don't exceed 2^16-1 bytes in a single request. 597 */ 598 mmc->max_req_size = 65535; 599 600 /* 601 * Set the maximum segment size. Since we aren't doing DMA 602 * (yet) we are only limited by the data length register. 603 */ 604 mmc->max_seg_size = mmc->max_req_size; 605 606 /* 607 * Block size can be up to 2048 bytes, but must be a power of two. 608 */ 609 mmc->max_blk_size = 2048; 610 611 /* 612 * No limit on the number of blocks transferred. 613 */ 614 mmc->max_blk_count = mmc->max_req_size; 615 616 spin_lock_init(&host->lock); 617 618 writel(0, host->base + MMCIMASK0); 619 writel(0, host->base + MMCIMASK1); 620 writel(0xfff, host->base + MMCICLEAR); 621 622 #ifdef CONFIG_GPIOLIB 623 if (gpio_is_valid(plat->gpio_cd)) { 624 ret = gpio_request(plat->gpio_cd, DRIVER_NAME " (cd)"); 625 if (ret == 0) 626 ret = gpio_direction_input(plat->gpio_cd); 627 if (ret == 0) 628 host->gpio_cd = plat->gpio_cd; 629 else if (ret != -ENOSYS) 630 goto err_gpio_cd; 631 } 632 if (gpio_is_valid(plat->gpio_wp)) { 633 ret = gpio_request(plat->gpio_wp, DRIVER_NAME " (wp)"); 634 if (ret == 0) 635 ret = gpio_direction_input(plat->gpio_wp); 636 if (ret == 0) 637 host->gpio_wp = plat->gpio_wp; 638 else if (ret != -ENOSYS) 639 goto err_gpio_wp; 640 } 641 #endif 642 643 ret = request_irq(dev->irq[0], mmci_irq, IRQF_SHARED, DRIVER_NAME " (cmd)", host); 644 if (ret) 645 goto unmap; 646 647 ret = request_irq(dev->irq[1], mmci_pio_irq, IRQF_SHARED, DRIVER_NAME " (pio)", host); 648 if (ret) 649 goto irq0_free; 650 651 writel(MCI_IRQENABLE, host->base + MMCIMASK0); 652 653 amba_set_drvdata(dev, mmc); 654 host->oldstat = mmci_get_cd(host->mmc); 655 656 mmc_add_host(mmc); 657 658 printk(KERN_INFO "%s: MMCI rev %x cfg %02x at 0x%016llx irq %d,%d\n", 659 mmc_hostname(mmc), amba_rev(dev), amba_config(dev), 660 (unsigned long long)dev->res.start, dev->irq[0], dev->irq[1]); 661 662 init_timer(&host->timer); 663 host->timer.data = (unsigned long)host; 664 host->timer.function = mmci_check_status; 665 host->timer.expires = jiffies + HZ; 666 add_timer(&host->timer); 667 668 return 0; 669 670 irq0_free: 671 free_irq(dev->irq[0], host); 672 unmap: 673 if (host->gpio_wp != -ENOSYS) 674 gpio_free(host->gpio_wp); 675 err_gpio_wp: 676 if (host->gpio_cd != -ENOSYS) 677 gpio_free(host->gpio_cd); 678 err_gpio_cd: 679 iounmap(host->base); 680 clk_disable: 681 clk_disable(host->clk); 682 clk_free: 683 clk_put(host->clk); 684 host_free: 685 mmc_free_host(mmc); 686 rel_regions: 687 amba_release_regions(dev); 688 out: 689 return ret; 690 } 691 692 static int __devexit mmci_remove(struct amba_device *dev) 693 { 694 struct mmc_host *mmc = amba_get_drvdata(dev); 695 696 amba_set_drvdata(dev, NULL); 697 698 if (mmc) { 699 struct mmci_host *host = mmc_priv(mmc); 700 701 del_timer_sync(&host->timer); 702 703 mmc_remove_host(mmc); 704 705 writel(0, host->base + MMCIMASK0); 706 writel(0, host->base + MMCIMASK1); 707 708 writel(0, host->base + MMCICOMMAND); 709 writel(0, host->base + MMCIDATACTRL); 710 711 free_irq(dev->irq[0], host); 712 free_irq(dev->irq[1], host); 713 714 if (host->gpio_wp != -ENOSYS) 715 gpio_free(host->gpio_wp); 716 if (host->gpio_cd != -ENOSYS) 717 gpio_free(host->gpio_cd); 718 719 iounmap(host->base); 720 clk_disable(host->clk); 721 clk_put(host->clk); 722 723 mmc_free_host(mmc); 724 725 amba_release_regions(dev); 726 } 727 728 return 0; 729 } 730 731 #ifdef CONFIG_PM 732 static int mmci_suspend(struct amba_device *dev, pm_message_t state) 733 { 734 struct mmc_host *mmc = amba_get_drvdata(dev); 735 int ret = 0; 736 737 if (mmc) { 738 struct mmci_host *host = mmc_priv(mmc); 739 740 ret = mmc_suspend_host(mmc, state); 741 if (ret == 0) 742 writel(0, host->base + MMCIMASK0); 743 } 744 745 return ret; 746 } 747 748 static int mmci_resume(struct amba_device *dev) 749 { 750 struct mmc_host *mmc = amba_get_drvdata(dev); 751 int ret = 0; 752 753 if (mmc) { 754 struct mmci_host *host = mmc_priv(mmc); 755 756 writel(MCI_IRQENABLE, host->base + MMCIMASK0); 757 758 ret = mmc_resume_host(mmc); 759 } 760 761 return ret; 762 } 763 #else 764 #define mmci_suspend NULL 765 #define mmci_resume NULL 766 #endif 767 768 static struct amba_id mmci_ids[] = { 769 { 770 .id = 0x00041180, 771 .mask = 0x000fffff, 772 }, 773 { 774 .id = 0x00041181, 775 .mask = 0x000fffff, 776 }, 777 /* ST Micro variants */ 778 { 779 .id = 0x00180180, 780 .mask = 0x00ffffff, 781 }, 782 { 783 .id = 0x00280180, 784 .mask = 0x00ffffff, 785 }, 786 { 0, 0 }, 787 }; 788 789 static struct amba_driver mmci_driver = { 790 .drv = { 791 .name = DRIVER_NAME, 792 }, 793 .probe = mmci_probe, 794 .remove = __devexit_p(mmci_remove), 795 .suspend = mmci_suspend, 796 .resume = mmci_resume, 797 .id_table = mmci_ids, 798 }; 799 800 static int __init mmci_init(void) 801 { 802 return amba_driver_register(&mmci_driver); 803 } 804 805 static void __exit mmci_exit(void) 806 { 807 amba_driver_unregister(&mmci_driver); 808 } 809 810 module_init(mmci_init); 811 module_exit(mmci_exit); 812 module_param(fmax, uint, 0444); 813 814 MODULE_DESCRIPTION("ARM PrimeCell PL180/181 Multimedia Card Interface driver"); 815 MODULE_LICENSE("GPL"); 816