1 /* 2 * linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver 3 * 4 * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved. 5 * Copyright (C) 2010 ST-Ericsson SA 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 #include <linux/module.h> 12 #include <linux/moduleparam.h> 13 #include <linux/init.h> 14 #include <linux/ioport.h> 15 #include <linux/device.h> 16 #include <linux/interrupt.h> 17 #include <linux/kernel.h> 18 #include <linux/delay.h> 19 #include <linux/err.h> 20 #include <linux/highmem.h> 21 #include <linux/log2.h> 22 #include <linux/mmc/host.h> 23 #include <linux/mmc/card.h> 24 #include <linux/amba/bus.h> 25 #include <linux/clk.h> 26 #include <linux/scatterlist.h> 27 #include <linux/gpio.h> 28 #include <linux/regulator/consumer.h> 29 #include <linux/dmaengine.h> 30 #include <linux/dma-mapping.h> 31 #include <linux/amba/mmci.h> 32 #include <linux/pm_runtime.h> 33 34 #include <asm/div64.h> 35 #include <asm/io.h> 36 #include <asm/sizes.h> 37 38 #include "mmci.h" 39 40 #define DRIVER_NAME "mmci-pl18x" 41 42 static unsigned int fmax = 515633; 43 44 /** 45 * struct variant_data - MMCI variant-specific quirks 46 * @clkreg: default value for MCICLOCK register 47 * @clkreg_enable: enable value for MMCICLOCK register 48 * @datalength_bits: number of bits in the MMCIDATALENGTH register 49 * @fifosize: number of bytes that can be written when MMCI_TXFIFOEMPTY 50 * is asserted (likewise for RX) 51 * @fifohalfsize: number of bytes that can be written when MCI_TXFIFOHALFEMPTY 52 * is asserted (likewise for RX) 53 * @sdio: variant supports SDIO 54 * @st_clkdiv: true if using a ST-specific clock divider algorithm 55 * @blksz_datactrl16: true if Block size is at b16..b30 position in datactrl register 56 */ 57 struct variant_data { 58 unsigned int clkreg; 59 unsigned int clkreg_enable; 60 unsigned int datalength_bits; 61 unsigned int fifosize; 62 unsigned int fifohalfsize; 63 bool sdio; 64 bool st_clkdiv; 65 bool blksz_datactrl16; 66 }; 67 68 static struct variant_data variant_arm = { 69 .fifosize = 16 * 4, 70 .fifohalfsize = 8 * 4, 71 .datalength_bits = 16, 72 }; 73 74 static struct variant_data variant_arm_extended_fifo = { 75 .fifosize = 128 * 4, 76 .fifohalfsize = 64 * 4, 77 .datalength_bits = 16, 78 }; 79 80 static struct variant_data variant_u300 = { 81 .fifosize = 16 * 4, 82 .fifohalfsize = 8 * 4, 83 .clkreg_enable = MCI_ST_U300_HWFCEN, 84 .datalength_bits = 16, 85 .sdio = true, 86 }; 87 88 static struct variant_data variant_ux500 = { 89 .fifosize = 30 * 4, 90 .fifohalfsize = 8 * 4, 91 .clkreg = MCI_CLK_ENABLE, 92 .clkreg_enable = MCI_ST_UX500_HWFCEN, 93 .datalength_bits = 24, 94 .sdio = true, 95 .st_clkdiv = true, 96 }; 97 98 static struct variant_data variant_ux500v2 = { 99 .fifosize = 30 * 4, 100 .fifohalfsize = 8 * 4, 101 .clkreg = MCI_CLK_ENABLE, 102 .clkreg_enable = MCI_ST_UX500_HWFCEN, 103 .datalength_bits = 24, 104 .sdio = true, 105 .st_clkdiv = true, 106 .blksz_datactrl16 = true, 107 }; 108 109 /* 110 * This must be called with host->lock held 111 */ 112 static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired) 113 { 114 struct variant_data *variant = host->variant; 115 u32 clk = variant->clkreg; 116 117 if (desired) { 118 if (desired >= host->mclk) { 119 clk = MCI_CLK_BYPASS; 120 if (variant->st_clkdiv) 121 clk |= MCI_ST_UX500_NEG_EDGE; 122 host->cclk = host->mclk; 123 } else if (variant->st_clkdiv) { 124 /* 125 * DB8500 TRM says f = mclk / (clkdiv + 2) 126 * => clkdiv = (mclk / f) - 2 127 * Round the divider up so we don't exceed the max 128 * frequency 129 */ 130 clk = DIV_ROUND_UP(host->mclk, desired) - 2; 131 if (clk >= 256) 132 clk = 255; 133 host->cclk = host->mclk / (clk + 2); 134 } else { 135 /* 136 * PL180 TRM says f = mclk / (2 * (clkdiv + 1)) 137 * => clkdiv = mclk / (2 * f) - 1 138 */ 139 clk = host->mclk / (2 * desired) - 1; 140 if (clk >= 256) 141 clk = 255; 142 host->cclk = host->mclk / (2 * (clk + 1)); 143 } 144 145 clk |= variant->clkreg_enable; 146 clk |= MCI_CLK_ENABLE; 147 /* This hasn't proven to be worthwhile */ 148 /* clk |= MCI_CLK_PWRSAVE; */ 149 } 150 151 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4) 152 clk |= MCI_4BIT_BUS; 153 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_8) 154 clk |= MCI_ST_8BIT_BUS; 155 156 writel(clk, host->base + MMCICLOCK); 157 } 158 159 static void 160 mmci_request_end(struct mmci_host *host, struct mmc_request *mrq) 161 { 162 writel(0, host->base + MMCICOMMAND); 163 164 BUG_ON(host->data); 165 166 host->mrq = NULL; 167 host->cmd = NULL; 168 169 /* 170 * Need to drop the host lock here; mmc_request_done may call 171 * back into the driver... 172 */ 173 spin_unlock(&host->lock); 174 pm_runtime_put(mmc_dev(host->mmc)); 175 mmc_request_done(host->mmc, mrq); 176 spin_lock(&host->lock); 177 } 178 179 static void mmci_set_mask1(struct mmci_host *host, unsigned int mask) 180 { 181 void __iomem *base = host->base; 182 183 if (host->singleirq) { 184 unsigned int mask0 = readl(base + MMCIMASK0); 185 186 mask0 &= ~MCI_IRQ1MASK; 187 mask0 |= mask; 188 189 writel(mask0, base + MMCIMASK0); 190 } 191 192 writel(mask, base + MMCIMASK1); 193 } 194 195 static void mmci_stop_data(struct mmci_host *host) 196 { 197 writel(0, host->base + MMCIDATACTRL); 198 mmci_set_mask1(host, 0); 199 host->data = NULL; 200 } 201 202 static void mmci_init_sg(struct mmci_host *host, struct mmc_data *data) 203 { 204 unsigned int flags = SG_MITER_ATOMIC; 205 206 if (data->flags & MMC_DATA_READ) 207 flags |= SG_MITER_TO_SG; 208 else 209 flags |= SG_MITER_FROM_SG; 210 211 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags); 212 } 213 214 /* 215 * All the DMA operation mode stuff goes inside this ifdef. 216 * This assumes that you have a generic DMA device interface, 217 * no custom DMA interfaces are supported. 218 */ 219 #ifdef CONFIG_DMA_ENGINE 220 static void __devinit mmci_dma_setup(struct mmci_host *host) 221 { 222 struct mmci_platform_data *plat = host->plat; 223 const char *rxname, *txname; 224 dma_cap_mask_t mask; 225 226 if (!plat || !plat->dma_filter) { 227 dev_info(mmc_dev(host->mmc), "no DMA platform data\n"); 228 return; 229 } 230 231 /* initialize pre request cookie */ 232 host->next_data.cookie = 1; 233 234 /* Try to acquire a generic DMA engine slave channel */ 235 dma_cap_zero(mask); 236 dma_cap_set(DMA_SLAVE, mask); 237 238 /* 239 * If only an RX channel is specified, the driver will 240 * attempt to use it bidirectionally, however if it is 241 * is specified but cannot be located, DMA will be disabled. 242 */ 243 if (plat->dma_rx_param) { 244 host->dma_rx_channel = dma_request_channel(mask, 245 plat->dma_filter, 246 plat->dma_rx_param); 247 /* E.g if no DMA hardware is present */ 248 if (!host->dma_rx_channel) 249 dev_err(mmc_dev(host->mmc), "no RX DMA channel\n"); 250 } 251 252 if (plat->dma_tx_param) { 253 host->dma_tx_channel = dma_request_channel(mask, 254 plat->dma_filter, 255 plat->dma_tx_param); 256 if (!host->dma_tx_channel) 257 dev_warn(mmc_dev(host->mmc), "no TX DMA channel\n"); 258 } else { 259 host->dma_tx_channel = host->dma_rx_channel; 260 } 261 262 if (host->dma_rx_channel) 263 rxname = dma_chan_name(host->dma_rx_channel); 264 else 265 rxname = "none"; 266 267 if (host->dma_tx_channel) 268 txname = dma_chan_name(host->dma_tx_channel); 269 else 270 txname = "none"; 271 272 dev_info(mmc_dev(host->mmc), "DMA channels RX %s, TX %s\n", 273 rxname, txname); 274 275 /* 276 * Limit the maximum segment size in any SG entry according to 277 * the parameters of the DMA engine device. 278 */ 279 if (host->dma_tx_channel) { 280 struct device *dev = host->dma_tx_channel->device->dev; 281 unsigned int max_seg_size = dma_get_max_seg_size(dev); 282 283 if (max_seg_size < host->mmc->max_seg_size) 284 host->mmc->max_seg_size = max_seg_size; 285 } 286 if (host->dma_rx_channel) { 287 struct device *dev = host->dma_rx_channel->device->dev; 288 unsigned int max_seg_size = dma_get_max_seg_size(dev); 289 290 if (max_seg_size < host->mmc->max_seg_size) 291 host->mmc->max_seg_size = max_seg_size; 292 } 293 } 294 295 /* 296 * This is used in __devinit or __devexit so inline it 297 * so it can be discarded. 298 */ 299 static inline void mmci_dma_release(struct mmci_host *host) 300 { 301 struct mmci_platform_data *plat = host->plat; 302 303 if (host->dma_rx_channel) 304 dma_release_channel(host->dma_rx_channel); 305 if (host->dma_tx_channel && plat->dma_tx_param) 306 dma_release_channel(host->dma_tx_channel); 307 host->dma_rx_channel = host->dma_tx_channel = NULL; 308 } 309 310 static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data) 311 { 312 struct dma_chan *chan = host->dma_current; 313 enum dma_data_direction dir; 314 u32 status; 315 int i; 316 317 /* Wait up to 1ms for the DMA to complete */ 318 for (i = 0; ; i++) { 319 status = readl(host->base + MMCISTATUS); 320 if (!(status & MCI_RXDATAAVLBLMASK) || i >= 100) 321 break; 322 udelay(10); 323 } 324 325 /* 326 * Check to see whether we still have some data left in the FIFO - 327 * this catches DMA controllers which are unable to monitor the 328 * DMALBREQ and DMALSREQ signals while allowing us to DMA to non- 329 * contiguous buffers. On TX, we'll get a FIFO underrun error. 330 */ 331 if (status & MCI_RXDATAAVLBLMASK) { 332 dmaengine_terminate_all(chan); 333 if (!data->error) 334 data->error = -EIO; 335 } 336 337 if (data->flags & MMC_DATA_WRITE) { 338 dir = DMA_TO_DEVICE; 339 } else { 340 dir = DMA_FROM_DEVICE; 341 } 342 343 if (!data->host_cookie) 344 dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir); 345 346 /* 347 * Use of DMA with scatter-gather is impossible. 348 * Give up with DMA and switch back to PIO mode. 349 */ 350 if (status & MCI_RXDATAAVLBLMASK) { 351 dev_err(mmc_dev(host->mmc), "buggy DMA detected. Taking evasive action.\n"); 352 mmci_dma_release(host); 353 } 354 } 355 356 static void mmci_dma_data_error(struct mmci_host *host) 357 { 358 dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n"); 359 dmaengine_terminate_all(host->dma_current); 360 } 361 362 static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data, 363 struct mmci_host_next *next) 364 { 365 struct variant_data *variant = host->variant; 366 struct dma_slave_config conf = { 367 .src_addr = host->phybase + MMCIFIFO, 368 .dst_addr = host->phybase + MMCIFIFO, 369 .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, 370 .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, 371 .src_maxburst = variant->fifohalfsize >> 2, /* # of words */ 372 .dst_maxburst = variant->fifohalfsize >> 2, /* # of words */ 373 }; 374 struct dma_chan *chan; 375 struct dma_device *device; 376 struct dma_async_tx_descriptor *desc; 377 int nr_sg; 378 379 /* Check if next job is already prepared */ 380 if (data->host_cookie && !next && 381 host->dma_current && host->dma_desc_current) 382 return 0; 383 384 if (!next) { 385 host->dma_current = NULL; 386 host->dma_desc_current = NULL; 387 } 388 389 if (data->flags & MMC_DATA_READ) { 390 conf.direction = DMA_FROM_DEVICE; 391 chan = host->dma_rx_channel; 392 } else { 393 conf.direction = DMA_TO_DEVICE; 394 chan = host->dma_tx_channel; 395 } 396 397 /* If there's no DMA channel, fall back to PIO */ 398 if (!chan) 399 return -EINVAL; 400 401 /* If less than or equal to the fifo size, don't bother with DMA */ 402 if (data->blksz * data->blocks <= variant->fifosize) 403 return -EINVAL; 404 405 device = chan->device; 406 nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len, conf.direction); 407 if (nr_sg == 0) 408 return -EINVAL; 409 410 dmaengine_slave_config(chan, &conf); 411 desc = device->device_prep_slave_sg(chan, data->sg, nr_sg, 412 conf.direction, DMA_CTRL_ACK); 413 if (!desc) 414 goto unmap_exit; 415 416 if (next) { 417 next->dma_chan = chan; 418 next->dma_desc = desc; 419 } else { 420 host->dma_current = chan; 421 host->dma_desc_current = desc; 422 } 423 424 return 0; 425 426 unmap_exit: 427 if (!next) 428 dmaengine_terminate_all(chan); 429 dma_unmap_sg(device->dev, data->sg, data->sg_len, conf.direction); 430 return -ENOMEM; 431 } 432 433 static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl) 434 { 435 int ret; 436 struct mmc_data *data = host->data; 437 438 ret = mmci_dma_prep_data(host, host->data, NULL); 439 if (ret) 440 return ret; 441 442 /* Okay, go for it. */ 443 dev_vdbg(mmc_dev(host->mmc), 444 "Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n", 445 data->sg_len, data->blksz, data->blocks, data->flags); 446 dmaengine_submit(host->dma_desc_current); 447 dma_async_issue_pending(host->dma_current); 448 449 datactrl |= MCI_DPSM_DMAENABLE; 450 451 /* Trigger the DMA transfer */ 452 writel(datactrl, host->base + MMCIDATACTRL); 453 454 /* 455 * Let the MMCI say when the data is ended and it's time 456 * to fire next DMA request. When that happens, MMCI will 457 * call mmci_data_end() 458 */ 459 writel(readl(host->base + MMCIMASK0) | MCI_DATAENDMASK, 460 host->base + MMCIMASK0); 461 return 0; 462 } 463 464 static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data) 465 { 466 struct mmci_host_next *next = &host->next_data; 467 468 if (data->host_cookie && data->host_cookie != next->cookie) { 469 pr_warning("[%s] invalid cookie: data->host_cookie %d" 470 " host->next_data.cookie %d\n", 471 __func__, data->host_cookie, host->next_data.cookie); 472 data->host_cookie = 0; 473 } 474 475 if (!data->host_cookie) 476 return; 477 478 host->dma_desc_current = next->dma_desc; 479 host->dma_current = next->dma_chan; 480 481 next->dma_desc = NULL; 482 next->dma_chan = NULL; 483 } 484 485 static void mmci_pre_request(struct mmc_host *mmc, struct mmc_request *mrq, 486 bool is_first_req) 487 { 488 struct mmci_host *host = mmc_priv(mmc); 489 struct mmc_data *data = mrq->data; 490 struct mmci_host_next *nd = &host->next_data; 491 492 if (!data) 493 return; 494 495 if (data->host_cookie) { 496 data->host_cookie = 0; 497 return; 498 } 499 500 /* if config for dma */ 501 if (((data->flags & MMC_DATA_WRITE) && host->dma_tx_channel) || 502 ((data->flags & MMC_DATA_READ) && host->dma_rx_channel)) { 503 if (mmci_dma_prep_data(host, data, nd)) 504 data->host_cookie = 0; 505 else 506 data->host_cookie = ++nd->cookie < 0 ? 1 : nd->cookie; 507 } 508 } 509 510 static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq, 511 int err) 512 { 513 struct mmci_host *host = mmc_priv(mmc); 514 struct mmc_data *data = mrq->data; 515 struct dma_chan *chan; 516 enum dma_data_direction dir; 517 518 if (!data) 519 return; 520 521 if (data->flags & MMC_DATA_READ) { 522 dir = DMA_FROM_DEVICE; 523 chan = host->dma_rx_channel; 524 } else { 525 dir = DMA_TO_DEVICE; 526 chan = host->dma_tx_channel; 527 } 528 529 530 /* if config for dma */ 531 if (chan) { 532 if (err) 533 dmaengine_terminate_all(chan); 534 if (data->host_cookie) 535 dma_unmap_sg(mmc_dev(host->mmc), data->sg, 536 data->sg_len, dir); 537 mrq->data->host_cookie = 0; 538 } 539 } 540 541 #else 542 /* Blank functions if the DMA engine is not available */ 543 static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data) 544 { 545 } 546 static inline void mmci_dma_setup(struct mmci_host *host) 547 { 548 } 549 550 static inline void mmci_dma_release(struct mmci_host *host) 551 { 552 } 553 554 static inline void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data) 555 { 556 } 557 558 static inline void mmci_dma_data_error(struct mmci_host *host) 559 { 560 } 561 562 static inline int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl) 563 { 564 return -ENOSYS; 565 } 566 567 #define mmci_pre_request NULL 568 #define mmci_post_request NULL 569 570 #endif 571 572 static void mmci_start_data(struct mmci_host *host, struct mmc_data *data) 573 { 574 struct variant_data *variant = host->variant; 575 unsigned int datactrl, timeout, irqmask; 576 unsigned long long clks; 577 void __iomem *base; 578 int blksz_bits; 579 580 dev_dbg(mmc_dev(host->mmc), "blksz %04x blks %04x flags %08x\n", 581 data->blksz, data->blocks, data->flags); 582 583 host->data = data; 584 host->size = data->blksz * data->blocks; 585 data->bytes_xfered = 0; 586 587 clks = (unsigned long long)data->timeout_ns * host->cclk; 588 do_div(clks, 1000000000UL); 589 590 timeout = data->timeout_clks + (unsigned int)clks; 591 592 base = host->base; 593 writel(timeout, base + MMCIDATATIMER); 594 writel(host->size, base + MMCIDATALENGTH); 595 596 blksz_bits = ffs(data->blksz) - 1; 597 BUG_ON(1 << blksz_bits != data->blksz); 598 599 if (variant->blksz_datactrl16) 600 datactrl = MCI_DPSM_ENABLE | (data->blksz << 16); 601 else 602 datactrl = MCI_DPSM_ENABLE | blksz_bits << 4; 603 604 if (data->flags & MMC_DATA_READ) 605 datactrl |= MCI_DPSM_DIRECTION; 606 607 /* 608 * Attempt to use DMA operation mode, if this 609 * should fail, fall back to PIO mode 610 */ 611 if (!mmci_dma_start_data(host, datactrl)) 612 return; 613 614 /* IRQ mode, map the SG list for CPU reading/writing */ 615 mmci_init_sg(host, data); 616 617 if (data->flags & MMC_DATA_READ) { 618 irqmask = MCI_RXFIFOHALFFULLMASK; 619 620 /* 621 * If we have less than the fifo 'half-full' threshold to 622 * transfer, trigger a PIO interrupt as soon as any data 623 * is available. 624 */ 625 if (host->size < variant->fifohalfsize) 626 irqmask |= MCI_RXDATAAVLBLMASK; 627 } else { 628 /* 629 * We don't actually need to include "FIFO empty" here 630 * since its implicit in "FIFO half empty". 631 */ 632 irqmask = MCI_TXFIFOHALFEMPTYMASK; 633 } 634 635 /* The ST Micro variants has a special bit to enable SDIO */ 636 if (variant->sdio && host->mmc->card) 637 if (mmc_card_sdio(host->mmc->card)) 638 datactrl |= MCI_ST_DPSM_SDIOEN; 639 640 writel(datactrl, base + MMCIDATACTRL); 641 writel(readl(base + MMCIMASK0) & ~MCI_DATAENDMASK, base + MMCIMASK0); 642 mmci_set_mask1(host, irqmask); 643 } 644 645 static void 646 mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c) 647 { 648 void __iomem *base = host->base; 649 650 dev_dbg(mmc_dev(host->mmc), "op %02x arg %08x flags %08x\n", 651 cmd->opcode, cmd->arg, cmd->flags); 652 653 if (readl(base + MMCICOMMAND) & MCI_CPSM_ENABLE) { 654 writel(0, base + MMCICOMMAND); 655 udelay(1); 656 } 657 658 c |= cmd->opcode | MCI_CPSM_ENABLE; 659 if (cmd->flags & MMC_RSP_PRESENT) { 660 if (cmd->flags & MMC_RSP_136) 661 c |= MCI_CPSM_LONGRSP; 662 c |= MCI_CPSM_RESPONSE; 663 } 664 if (/*interrupt*/0) 665 c |= MCI_CPSM_INTERRUPT; 666 667 host->cmd = cmd; 668 669 writel(cmd->arg, base + MMCIARGUMENT); 670 writel(c, base + MMCICOMMAND); 671 } 672 673 static void 674 mmci_data_irq(struct mmci_host *host, struct mmc_data *data, 675 unsigned int status) 676 { 677 /* First check for errors */ 678 if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|MCI_RXOVERRUN)) { 679 u32 remain, success; 680 681 /* Terminate the DMA transfer */ 682 if (dma_inprogress(host)) 683 mmci_dma_data_error(host); 684 685 /* 686 * Calculate how far we are into the transfer. Note that 687 * the data counter gives the number of bytes transferred 688 * on the MMC bus, not on the host side. On reads, this 689 * can be as much as a FIFO-worth of data ahead. This 690 * matters for FIFO overruns only. 691 */ 692 remain = readl(host->base + MMCIDATACNT); 693 success = data->blksz * data->blocks - remain; 694 695 dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ, status 0x%08x at 0x%08x\n", 696 status, success); 697 if (status & MCI_DATACRCFAIL) { 698 /* Last block was not successful */ 699 success -= 1; 700 data->error = -EILSEQ; 701 } else if (status & MCI_DATATIMEOUT) { 702 data->error = -ETIMEDOUT; 703 } else if (status & MCI_STARTBITERR) { 704 data->error = -ECOMM; 705 } else if (status & MCI_TXUNDERRUN) { 706 data->error = -EIO; 707 } else if (status & MCI_RXOVERRUN) { 708 if (success > host->variant->fifosize) 709 success -= host->variant->fifosize; 710 else 711 success = 0; 712 data->error = -EIO; 713 } 714 data->bytes_xfered = round_down(success, data->blksz); 715 } 716 717 if (status & MCI_DATABLOCKEND) 718 dev_err(mmc_dev(host->mmc), "stray MCI_DATABLOCKEND interrupt\n"); 719 720 if (status & MCI_DATAEND || data->error) { 721 if (dma_inprogress(host)) 722 mmci_dma_unmap(host, data); 723 mmci_stop_data(host); 724 725 if (!data->error) 726 /* The error clause is handled above, success! */ 727 data->bytes_xfered = data->blksz * data->blocks; 728 729 if (!data->stop) { 730 mmci_request_end(host, data->mrq); 731 } else { 732 mmci_start_command(host, data->stop, 0); 733 } 734 } 735 } 736 737 static void 738 mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd, 739 unsigned int status) 740 { 741 void __iomem *base = host->base; 742 743 host->cmd = NULL; 744 745 if (status & MCI_CMDTIMEOUT) { 746 cmd->error = -ETIMEDOUT; 747 } else if (status & MCI_CMDCRCFAIL && cmd->flags & MMC_RSP_CRC) { 748 cmd->error = -EILSEQ; 749 } else { 750 cmd->resp[0] = readl(base + MMCIRESPONSE0); 751 cmd->resp[1] = readl(base + MMCIRESPONSE1); 752 cmd->resp[2] = readl(base + MMCIRESPONSE2); 753 cmd->resp[3] = readl(base + MMCIRESPONSE3); 754 } 755 756 if (!cmd->data || cmd->error) { 757 if (host->data) 758 mmci_stop_data(host); 759 mmci_request_end(host, cmd->mrq); 760 } else if (!(cmd->data->flags & MMC_DATA_READ)) { 761 mmci_start_data(host, cmd->data); 762 } 763 } 764 765 static int mmci_pio_read(struct mmci_host *host, char *buffer, unsigned int remain) 766 { 767 void __iomem *base = host->base; 768 char *ptr = buffer; 769 u32 status; 770 int host_remain = host->size; 771 772 do { 773 int count = host_remain - (readl(base + MMCIFIFOCNT) << 2); 774 775 if (count > remain) 776 count = remain; 777 778 if (count <= 0) 779 break; 780 781 readsl(base + MMCIFIFO, ptr, count >> 2); 782 783 ptr += count; 784 remain -= count; 785 host_remain -= count; 786 787 if (remain == 0) 788 break; 789 790 status = readl(base + MMCISTATUS); 791 } while (status & MCI_RXDATAAVLBL); 792 793 return ptr - buffer; 794 } 795 796 static int mmci_pio_write(struct mmci_host *host, char *buffer, unsigned int remain, u32 status) 797 { 798 struct variant_data *variant = host->variant; 799 void __iomem *base = host->base; 800 char *ptr = buffer; 801 802 do { 803 unsigned int count, maxcnt; 804 805 maxcnt = status & MCI_TXFIFOEMPTY ? 806 variant->fifosize : variant->fifohalfsize; 807 count = min(remain, maxcnt); 808 809 /* 810 * The ST Micro variant for SDIO transfer sizes 811 * less then 8 bytes should have clock H/W flow 812 * control disabled. 813 */ 814 if (variant->sdio && 815 mmc_card_sdio(host->mmc->card)) { 816 if (count < 8) 817 writel(readl(host->base + MMCICLOCK) & 818 ~variant->clkreg_enable, 819 host->base + MMCICLOCK); 820 else 821 writel(readl(host->base + MMCICLOCK) | 822 variant->clkreg_enable, 823 host->base + MMCICLOCK); 824 } 825 826 /* 827 * SDIO especially may want to send something that is 828 * not divisible by 4 (as opposed to card sectors 829 * etc), and the FIFO only accept full 32-bit writes. 830 * So compensate by adding +3 on the count, a single 831 * byte become a 32bit write, 7 bytes will be two 832 * 32bit writes etc. 833 */ 834 writesl(base + MMCIFIFO, ptr, (count + 3) >> 2); 835 836 ptr += count; 837 remain -= count; 838 839 if (remain == 0) 840 break; 841 842 status = readl(base + MMCISTATUS); 843 } while (status & MCI_TXFIFOHALFEMPTY); 844 845 return ptr - buffer; 846 } 847 848 /* 849 * PIO data transfer IRQ handler. 850 */ 851 static irqreturn_t mmci_pio_irq(int irq, void *dev_id) 852 { 853 struct mmci_host *host = dev_id; 854 struct sg_mapping_iter *sg_miter = &host->sg_miter; 855 struct variant_data *variant = host->variant; 856 void __iomem *base = host->base; 857 unsigned long flags; 858 u32 status; 859 860 status = readl(base + MMCISTATUS); 861 862 dev_dbg(mmc_dev(host->mmc), "irq1 (pio) %08x\n", status); 863 864 local_irq_save(flags); 865 866 do { 867 unsigned int remain, len; 868 char *buffer; 869 870 /* 871 * For write, we only need to test the half-empty flag 872 * here - if the FIFO is completely empty, then by 873 * definition it is more than half empty. 874 * 875 * For read, check for data available. 876 */ 877 if (!(status & (MCI_TXFIFOHALFEMPTY|MCI_RXDATAAVLBL))) 878 break; 879 880 if (!sg_miter_next(sg_miter)) 881 break; 882 883 buffer = sg_miter->addr; 884 remain = sg_miter->length; 885 886 len = 0; 887 if (status & MCI_RXACTIVE) 888 len = mmci_pio_read(host, buffer, remain); 889 if (status & MCI_TXACTIVE) 890 len = mmci_pio_write(host, buffer, remain, status); 891 892 sg_miter->consumed = len; 893 894 host->size -= len; 895 remain -= len; 896 897 if (remain) 898 break; 899 900 status = readl(base + MMCISTATUS); 901 } while (1); 902 903 sg_miter_stop(sg_miter); 904 905 local_irq_restore(flags); 906 907 /* 908 * If we have less than the fifo 'half-full' threshold to transfer, 909 * trigger a PIO interrupt as soon as any data is available. 910 */ 911 if (status & MCI_RXACTIVE && host->size < variant->fifohalfsize) 912 mmci_set_mask1(host, MCI_RXDATAAVLBLMASK); 913 914 /* 915 * If we run out of data, disable the data IRQs; this 916 * prevents a race where the FIFO becomes empty before 917 * the chip itself has disabled the data path, and 918 * stops us racing with our data end IRQ. 919 */ 920 if (host->size == 0) { 921 mmci_set_mask1(host, 0); 922 writel(readl(base + MMCIMASK0) | MCI_DATAENDMASK, base + MMCIMASK0); 923 } 924 925 return IRQ_HANDLED; 926 } 927 928 /* 929 * Handle completion of command and data transfers. 930 */ 931 static irqreturn_t mmci_irq(int irq, void *dev_id) 932 { 933 struct mmci_host *host = dev_id; 934 u32 status; 935 int ret = 0; 936 937 spin_lock(&host->lock); 938 939 do { 940 struct mmc_command *cmd; 941 struct mmc_data *data; 942 943 status = readl(host->base + MMCISTATUS); 944 945 if (host->singleirq) { 946 if (status & readl(host->base + MMCIMASK1)) 947 mmci_pio_irq(irq, dev_id); 948 949 status &= ~MCI_IRQ1MASK; 950 } 951 952 status &= readl(host->base + MMCIMASK0); 953 writel(status, host->base + MMCICLEAR); 954 955 dev_dbg(mmc_dev(host->mmc), "irq0 (data+cmd) %08x\n", status); 956 957 data = host->data; 958 if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN| 959 MCI_RXOVERRUN|MCI_DATAEND|MCI_DATABLOCKEND) && data) 960 mmci_data_irq(host, data, status); 961 962 cmd = host->cmd; 963 if (status & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT|MCI_CMDSENT|MCI_CMDRESPEND) && cmd) 964 mmci_cmd_irq(host, cmd, status); 965 966 ret = 1; 967 } while (status); 968 969 spin_unlock(&host->lock); 970 971 return IRQ_RETVAL(ret); 972 } 973 974 static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq) 975 { 976 struct mmci_host *host = mmc_priv(mmc); 977 unsigned long flags; 978 979 WARN_ON(host->mrq != NULL); 980 981 if (mrq->data && !is_power_of_2(mrq->data->blksz)) { 982 dev_err(mmc_dev(mmc), "unsupported block size (%d bytes)\n", 983 mrq->data->blksz); 984 mrq->cmd->error = -EINVAL; 985 mmc_request_done(mmc, mrq); 986 return; 987 } 988 989 pm_runtime_get_sync(mmc_dev(mmc)); 990 991 spin_lock_irqsave(&host->lock, flags); 992 993 host->mrq = mrq; 994 995 if (mrq->data) 996 mmci_get_next_data(host, mrq->data); 997 998 if (mrq->data && mrq->data->flags & MMC_DATA_READ) 999 mmci_start_data(host, mrq->data); 1000 1001 mmci_start_command(host, mrq->cmd, 0); 1002 1003 spin_unlock_irqrestore(&host->lock, flags); 1004 } 1005 1006 static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 1007 { 1008 struct mmci_host *host = mmc_priv(mmc); 1009 u32 pwr = 0; 1010 unsigned long flags; 1011 int ret; 1012 1013 switch (ios->power_mode) { 1014 case MMC_POWER_OFF: 1015 if (host->vcc) 1016 ret = mmc_regulator_set_ocr(mmc, host->vcc, 0); 1017 break; 1018 case MMC_POWER_UP: 1019 if (host->vcc) { 1020 ret = mmc_regulator_set_ocr(mmc, host->vcc, ios->vdd); 1021 if (ret) { 1022 dev_err(mmc_dev(mmc), "unable to set OCR\n"); 1023 /* 1024 * The .set_ios() function in the mmc_host_ops 1025 * struct return void, and failing to set the 1026 * power should be rare so we print an error 1027 * and return here. 1028 */ 1029 return; 1030 } 1031 } 1032 if (host->plat->vdd_handler) 1033 pwr |= host->plat->vdd_handler(mmc_dev(mmc), ios->vdd, 1034 ios->power_mode); 1035 /* The ST version does not have this, fall through to POWER_ON */ 1036 if (host->hw_designer != AMBA_VENDOR_ST) { 1037 pwr |= MCI_PWR_UP; 1038 break; 1039 } 1040 case MMC_POWER_ON: 1041 pwr |= MCI_PWR_ON; 1042 break; 1043 } 1044 1045 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) { 1046 if (host->hw_designer != AMBA_VENDOR_ST) 1047 pwr |= MCI_ROD; 1048 else { 1049 /* 1050 * The ST Micro variant use the ROD bit for something 1051 * else and only has OD (Open Drain). 1052 */ 1053 pwr |= MCI_OD; 1054 } 1055 } 1056 1057 spin_lock_irqsave(&host->lock, flags); 1058 1059 mmci_set_clkreg(host, ios->clock); 1060 1061 if (host->pwr != pwr) { 1062 host->pwr = pwr; 1063 writel(pwr, host->base + MMCIPOWER); 1064 } 1065 1066 spin_unlock_irqrestore(&host->lock, flags); 1067 } 1068 1069 static int mmci_get_ro(struct mmc_host *mmc) 1070 { 1071 struct mmci_host *host = mmc_priv(mmc); 1072 1073 if (host->gpio_wp == -ENOSYS) 1074 return -ENOSYS; 1075 1076 return gpio_get_value_cansleep(host->gpio_wp); 1077 } 1078 1079 static int mmci_get_cd(struct mmc_host *mmc) 1080 { 1081 struct mmci_host *host = mmc_priv(mmc); 1082 struct mmci_platform_data *plat = host->plat; 1083 unsigned int status; 1084 1085 if (host->gpio_cd == -ENOSYS) { 1086 if (!plat->status) 1087 return 1; /* Assume always present */ 1088 1089 status = plat->status(mmc_dev(host->mmc)); 1090 } else 1091 status = !!gpio_get_value_cansleep(host->gpio_cd) 1092 ^ plat->cd_invert; 1093 1094 /* 1095 * Use positive logic throughout - status is zero for no card, 1096 * non-zero for card inserted. 1097 */ 1098 return status; 1099 } 1100 1101 static irqreturn_t mmci_cd_irq(int irq, void *dev_id) 1102 { 1103 struct mmci_host *host = dev_id; 1104 1105 mmc_detect_change(host->mmc, msecs_to_jiffies(500)); 1106 1107 return IRQ_HANDLED; 1108 } 1109 1110 static const struct mmc_host_ops mmci_ops = { 1111 .request = mmci_request, 1112 .pre_req = mmci_pre_request, 1113 .post_req = mmci_post_request, 1114 .set_ios = mmci_set_ios, 1115 .get_ro = mmci_get_ro, 1116 .get_cd = mmci_get_cd, 1117 }; 1118 1119 static int __devinit mmci_probe(struct amba_device *dev, 1120 const struct amba_id *id) 1121 { 1122 struct mmci_platform_data *plat = dev->dev.platform_data; 1123 struct variant_data *variant = id->data; 1124 struct mmci_host *host; 1125 struct mmc_host *mmc; 1126 int ret; 1127 1128 /* must have platform data */ 1129 if (!plat) { 1130 ret = -EINVAL; 1131 goto out; 1132 } 1133 1134 ret = amba_request_regions(dev, DRIVER_NAME); 1135 if (ret) 1136 goto out; 1137 1138 mmc = mmc_alloc_host(sizeof(struct mmci_host), &dev->dev); 1139 if (!mmc) { 1140 ret = -ENOMEM; 1141 goto rel_regions; 1142 } 1143 1144 host = mmc_priv(mmc); 1145 host->mmc = mmc; 1146 1147 host->gpio_wp = -ENOSYS; 1148 host->gpio_cd = -ENOSYS; 1149 host->gpio_cd_irq = -1; 1150 1151 host->hw_designer = amba_manf(dev); 1152 host->hw_revision = amba_rev(dev); 1153 dev_dbg(mmc_dev(mmc), "designer ID = 0x%02x\n", host->hw_designer); 1154 dev_dbg(mmc_dev(mmc), "revision = 0x%01x\n", host->hw_revision); 1155 1156 host->clk = clk_get(&dev->dev, NULL); 1157 if (IS_ERR(host->clk)) { 1158 ret = PTR_ERR(host->clk); 1159 host->clk = NULL; 1160 goto host_free; 1161 } 1162 1163 ret = clk_prepare(host->clk); 1164 if (ret) 1165 goto clk_free; 1166 1167 ret = clk_enable(host->clk); 1168 if (ret) 1169 goto clk_unprep; 1170 1171 host->plat = plat; 1172 host->variant = variant; 1173 host->mclk = clk_get_rate(host->clk); 1174 /* 1175 * According to the spec, mclk is max 100 MHz, 1176 * so we try to adjust the clock down to this, 1177 * (if possible). 1178 */ 1179 if (host->mclk > 100000000) { 1180 ret = clk_set_rate(host->clk, 100000000); 1181 if (ret < 0) 1182 goto clk_disable; 1183 host->mclk = clk_get_rate(host->clk); 1184 dev_dbg(mmc_dev(mmc), "eventual mclk rate: %u Hz\n", 1185 host->mclk); 1186 } 1187 host->phybase = dev->res.start; 1188 host->base = ioremap(dev->res.start, resource_size(&dev->res)); 1189 if (!host->base) { 1190 ret = -ENOMEM; 1191 goto clk_disable; 1192 } 1193 1194 mmc->ops = &mmci_ops; 1195 /* 1196 * The ARM and ST versions of the block have slightly different 1197 * clock divider equations which means that the minimum divider 1198 * differs too. 1199 */ 1200 if (variant->st_clkdiv) 1201 mmc->f_min = DIV_ROUND_UP(host->mclk, 257); 1202 else 1203 mmc->f_min = DIV_ROUND_UP(host->mclk, 512); 1204 /* 1205 * If the platform data supplies a maximum operating 1206 * frequency, this takes precedence. Else, we fall back 1207 * to using the module parameter, which has a (low) 1208 * default value in case it is not specified. Either 1209 * value must not exceed the clock rate into the block, 1210 * of course. 1211 */ 1212 if (plat->f_max) 1213 mmc->f_max = min(host->mclk, plat->f_max); 1214 else 1215 mmc->f_max = min(host->mclk, fmax); 1216 dev_dbg(mmc_dev(mmc), "clocking block at %u Hz\n", mmc->f_max); 1217 1218 #ifdef CONFIG_REGULATOR 1219 /* If we're using the regulator framework, try to fetch a regulator */ 1220 host->vcc = regulator_get(&dev->dev, "vmmc"); 1221 if (IS_ERR(host->vcc)) 1222 host->vcc = NULL; 1223 else { 1224 int mask = mmc_regulator_get_ocrmask(host->vcc); 1225 1226 if (mask < 0) 1227 dev_err(&dev->dev, "error getting OCR mask (%d)\n", 1228 mask); 1229 else { 1230 host->mmc->ocr_avail = (u32) mask; 1231 if (plat->ocr_mask) 1232 dev_warn(&dev->dev, 1233 "Provided ocr_mask/setpower will not be used " 1234 "(using regulator instead)\n"); 1235 } 1236 } 1237 #endif 1238 /* Fall back to platform data if no regulator is found */ 1239 if (host->vcc == NULL) 1240 mmc->ocr_avail = plat->ocr_mask; 1241 mmc->caps = plat->capabilities; 1242 1243 /* 1244 * We can do SGIO 1245 */ 1246 mmc->max_segs = NR_SG; 1247 1248 /* 1249 * Since only a certain number of bits are valid in the data length 1250 * register, we must ensure that we don't exceed 2^num-1 bytes in a 1251 * single request. 1252 */ 1253 mmc->max_req_size = (1 << variant->datalength_bits) - 1; 1254 1255 /* 1256 * Set the maximum segment size. Since we aren't doing DMA 1257 * (yet) we are only limited by the data length register. 1258 */ 1259 mmc->max_seg_size = mmc->max_req_size; 1260 1261 /* 1262 * Block size can be up to 2048 bytes, but must be a power of two. 1263 */ 1264 mmc->max_blk_size = 2048; 1265 1266 /* 1267 * No limit on the number of blocks transferred. 1268 */ 1269 mmc->max_blk_count = mmc->max_req_size; 1270 1271 spin_lock_init(&host->lock); 1272 1273 writel(0, host->base + MMCIMASK0); 1274 writel(0, host->base + MMCIMASK1); 1275 writel(0xfff, host->base + MMCICLEAR); 1276 1277 if (gpio_is_valid(plat->gpio_cd)) { 1278 ret = gpio_request(plat->gpio_cd, DRIVER_NAME " (cd)"); 1279 if (ret == 0) 1280 ret = gpio_direction_input(plat->gpio_cd); 1281 if (ret == 0) 1282 host->gpio_cd = plat->gpio_cd; 1283 else if (ret != -ENOSYS) 1284 goto err_gpio_cd; 1285 1286 /* 1287 * A gpio pin that will detect cards when inserted and removed 1288 * will most likely want to trigger on the edges if it is 1289 * 0 when ejected and 1 when inserted (or mutatis mutandis 1290 * for the inverted case) so we request triggers on both 1291 * edges. 1292 */ 1293 ret = request_any_context_irq(gpio_to_irq(plat->gpio_cd), 1294 mmci_cd_irq, 1295 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, 1296 DRIVER_NAME " (cd)", host); 1297 if (ret >= 0) 1298 host->gpio_cd_irq = gpio_to_irq(plat->gpio_cd); 1299 } 1300 if (gpio_is_valid(plat->gpio_wp)) { 1301 ret = gpio_request(plat->gpio_wp, DRIVER_NAME " (wp)"); 1302 if (ret == 0) 1303 ret = gpio_direction_input(plat->gpio_wp); 1304 if (ret == 0) 1305 host->gpio_wp = plat->gpio_wp; 1306 else if (ret != -ENOSYS) 1307 goto err_gpio_wp; 1308 } 1309 1310 if ((host->plat->status || host->gpio_cd != -ENOSYS) 1311 && host->gpio_cd_irq < 0) 1312 mmc->caps |= MMC_CAP_NEEDS_POLL; 1313 1314 ret = request_irq(dev->irq[0], mmci_irq, IRQF_SHARED, DRIVER_NAME " (cmd)", host); 1315 if (ret) 1316 goto unmap; 1317 1318 if (dev->irq[1] == NO_IRQ) 1319 host->singleirq = true; 1320 else { 1321 ret = request_irq(dev->irq[1], mmci_pio_irq, IRQF_SHARED, 1322 DRIVER_NAME " (pio)", host); 1323 if (ret) 1324 goto irq0_free; 1325 } 1326 1327 writel(MCI_IRQENABLE, host->base + MMCIMASK0); 1328 1329 amba_set_drvdata(dev, mmc); 1330 1331 dev_info(&dev->dev, "%s: PL%03x manf %x rev%u at 0x%08llx irq %d,%d (pio)\n", 1332 mmc_hostname(mmc), amba_part(dev), amba_manf(dev), 1333 amba_rev(dev), (unsigned long long)dev->res.start, 1334 dev->irq[0], dev->irq[1]); 1335 1336 mmci_dma_setup(host); 1337 1338 pm_runtime_put(&dev->dev); 1339 1340 mmc_add_host(mmc); 1341 1342 return 0; 1343 1344 irq0_free: 1345 free_irq(dev->irq[0], host); 1346 unmap: 1347 if (host->gpio_wp != -ENOSYS) 1348 gpio_free(host->gpio_wp); 1349 err_gpio_wp: 1350 if (host->gpio_cd_irq >= 0) 1351 free_irq(host->gpio_cd_irq, host); 1352 if (host->gpio_cd != -ENOSYS) 1353 gpio_free(host->gpio_cd); 1354 err_gpio_cd: 1355 iounmap(host->base); 1356 clk_disable: 1357 clk_disable(host->clk); 1358 clk_unprep: 1359 clk_unprepare(host->clk); 1360 clk_free: 1361 clk_put(host->clk); 1362 host_free: 1363 mmc_free_host(mmc); 1364 rel_regions: 1365 amba_release_regions(dev); 1366 out: 1367 return ret; 1368 } 1369 1370 static int __devexit mmci_remove(struct amba_device *dev) 1371 { 1372 struct mmc_host *mmc = amba_get_drvdata(dev); 1373 1374 amba_set_drvdata(dev, NULL); 1375 1376 if (mmc) { 1377 struct mmci_host *host = mmc_priv(mmc); 1378 1379 /* 1380 * Undo pm_runtime_put() in probe. We use the _sync 1381 * version here so that we can access the primecell. 1382 */ 1383 pm_runtime_get_sync(&dev->dev); 1384 1385 mmc_remove_host(mmc); 1386 1387 writel(0, host->base + MMCIMASK0); 1388 writel(0, host->base + MMCIMASK1); 1389 1390 writel(0, host->base + MMCICOMMAND); 1391 writel(0, host->base + MMCIDATACTRL); 1392 1393 mmci_dma_release(host); 1394 free_irq(dev->irq[0], host); 1395 if (!host->singleirq) 1396 free_irq(dev->irq[1], host); 1397 1398 if (host->gpio_wp != -ENOSYS) 1399 gpio_free(host->gpio_wp); 1400 if (host->gpio_cd_irq >= 0) 1401 free_irq(host->gpio_cd_irq, host); 1402 if (host->gpio_cd != -ENOSYS) 1403 gpio_free(host->gpio_cd); 1404 1405 iounmap(host->base); 1406 clk_disable(host->clk); 1407 clk_unprepare(host->clk); 1408 clk_put(host->clk); 1409 1410 if (host->vcc) 1411 mmc_regulator_set_ocr(mmc, host->vcc, 0); 1412 regulator_put(host->vcc); 1413 1414 mmc_free_host(mmc); 1415 1416 amba_release_regions(dev); 1417 } 1418 1419 return 0; 1420 } 1421 1422 #ifdef CONFIG_PM 1423 static int mmci_suspend(struct amba_device *dev, pm_message_t state) 1424 { 1425 struct mmc_host *mmc = amba_get_drvdata(dev); 1426 int ret = 0; 1427 1428 if (mmc) { 1429 struct mmci_host *host = mmc_priv(mmc); 1430 1431 ret = mmc_suspend_host(mmc); 1432 if (ret == 0) 1433 writel(0, host->base + MMCIMASK0); 1434 } 1435 1436 return ret; 1437 } 1438 1439 static int mmci_resume(struct amba_device *dev) 1440 { 1441 struct mmc_host *mmc = amba_get_drvdata(dev); 1442 int ret = 0; 1443 1444 if (mmc) { 1445 struct mmci_host *host = mmc_priv(mmc); 1446 1447 writel(MCI_IRQENABLE, host->base + MMCIMASK0); 1448 1449 ret = mmc_resume_host(mmc); 1450 } 1451 1452 return ret; 1453 } 1454 #else 1455 #define mmci_suspend NULL 1456 #define mmci_resume NULL 1457 #endif 1458 1459 static struct amba_id mmci_ids[] = { 1460 { 1461 .id = 0x00041180, 1462 .mask = 0xff0fffff, 1463 .data = &variant_arm, 1464 }, 1465 { 1466 .id = 0x01041180, 1467 .mask = 0xff0fffff, 1468 .data = &variant_arm_extended_fifo, 1469 }, 1470 { 1471 .id = 0x00041181, 1472 .mask = 0x000fffff, 1473 .data = &variant_arm, 1474 }, 1475 /* ST Micro variants */ 1476 { 1477 .id = 0x00180180, 1478 .mask = 0x00ffffff, 1479 .data = &variant_u300, 1480 }, 1481 { 1482 .id = 0x00280180, 1483 .mask = 0x00ffffff, 1484 .data = &variant_u300, 1485 }, 1486 { 1487 .id = 0x00480180, 1488 .mask = 0xf0ffffff, 1489 .data = &variant_ux500, 1490 }, 1491 { 1492 .id = 0x10480180, 1493 .mask = 0xf0ffffff, 1494 .data = &variant_ux500v2, 1495 }, 1496 { 0, 0 }, 1497 }; 1498 1499 static struct amba_driver mmci_driver = { 1500 .drv = { 1501 .name = DRIVER_NAME, 1502 }, 1503 .probe = mmci_probe, 1504 .remove = __devexit_p(mmci_remove), 1505 .suspend = mmci_suspend, 1506 .resume = mmci_resume, 1507 .id_table = mmci_ids, 1508 }; 1509 1510 static int __init mmci_init(void) 1511 { 1512 return amba_driver_register(&mmci_driver); 1513 } 1514 1515 static void __exit mmci_exit(void) 1516 { 1517 amba_driver_unregister(&mmci_driver); 1518 } 1519 1520 module_init(mmci_init); 1521 module_exit(mmci_exit); 1522 module_param(fmax, uint, 0444); 1523 1524 MODULE_DESCRIPTION("ARM PrimeCell PL180/181 Multimedia Card Interface driver"); 1525 MODULE_LICENSE("GPL"); 1526