1 /* 2 * linux/drivers/mmc/host/pxa.c - PXA MMCI driver 3 * 4 * Copyright (C) 2003 Russell King, All Rights Reserved. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 * 10 * This hardware is really sick: 11 * - No way to clear interrupts. 12 * - Have to turn off the clock whenever we touch the device. 13 * - Doesn't tell you how many data blocks were transferred. 14 * Yuck! 15 * 16 * 1 and 3 byte data transfers not supported 17 * max block length up to 1023 18 */ 19 #include <linux/module.h> 20 #include <linux/init.h> 21 #include <linux/ioport.h> 22 #include <linux/platform_device.h> 23 #include <linux/delay.h> 24 #include <linux/interrupt.h> 25 #include <linux/dma-mapping.h> 26 #include <linux/clk.h> 27 #include <linux/err.h> 28 #include <linux/mmc/host.h> 29 #include <linux/io.h> 30 #include <linux/regulator/consumer.h> 31 #include <linux/gpio.h> 32 #include <linux/gfp.h> 33 34 #include <asm/sizes.h> 35 36 #include <mach/hardware.h> 37 #include <mach/dma.h> 38 #include <mach/mmc.h> 39 40 #include "pxamci.h" 41 42 #define DRIVER_NAME "pxa2xx-mci" 43 44 #define NR_SG 1 45 #define CLKRT_OFF (~0) 46 47 #define mmc_has_26MHz() (cpu_is_pxa300() || cpu_is_pxa310() \ 48 || cpu_is_pxa935()) 49 50 struct pxamci_host { 51 struct mmc_host *mmc; 52 spinlock_t lock; 53 struct resource *res; 54 void __iomem *base; 55 struct clk *clk; 56 unsigned long clkrate; 57 int irq; 58 int dma; 59 unsigned int clkrt; 60 unsigned int cmdat; 61 unsigned int imask; 62 unsigned int power_mode; 63 struct pxamci_platform_data *pdata; 64 65 struct mmc_request *mrq; 66 struct mmc_command *cmd; 67 struct mmc_data *data; 68 69 dma_addr_t sg_dma; 70 struct pxa_dma_desc *sg_cpu; 71 unsigned int dma_len; 72 73 unsigned int dma_dir; 74 unsigned int dma_drcmrrx; 75 unsigned int dma_drcmrtx; 76 77 struct regulator *vcc; 78 }; 79 80 static inline void pxamci_init_ocr(struct pxamci_host *host) 81 { 82 #ifdef CONFIG_REGULATOR 83 host->vcc = regulator_get(mmc_dev(host->mmc), "vmmc"); 84 85 if (IS_ERR(host->vcc)) 86 host->vcc = NULL; 87 else { 88 host->mmc->ocr_avail = mmc_regulator_get_ocrmask(host->vcc); 89 if (host->pdata && host->pdata->ocr_mask) 90 dev_warn(mmc_dev(host->mmc), 91 "ocr_mask/setpower will not be used\n"); 92 } 93 #endif 94 if (host->vcc == NULL) { 95 /* fall-back to platform data */ 96 host->mmc->ocr_avail = host->pdata ? 97 host->pdata->ocr_mask : 98 MMC_VDD_32_33 | MMC_VDD_33_34; 99 } 100 } 101 102 static inline void pxamci_set_power(struct pxamci_host *host, unsigned int vdd) 103 { 104 int on; 105 106 #ifdef CONFIG_REGULATOR 107 if (host->vcc) 108 mmc_regulator_set_ocr(host->vcc, vdd); 109 #endif 110 if (!host->vcc && host->pdata && 111 gpio_is_valid(host->pdata->gpio_power)) { 112 on = ((1 << vdd) & host->pdata->ocr_mask); 113 gpio_set_value(host->pdata->gpio_power, 114 !!on ^ host->pdata->gpio_power_invert); 115 } 116 if (!host->vcc && host->pdata && host->pdata->setpower) 117 host->pdata->setpower(mmc_dev(host->mmc), vdd); 118 } 119 120 static void pxamci_stop_clock(struct pxamci_host *host) 121 { 122 if (readl(host->base + MMC_STAT) & STAT_CLK_EN) { 123 unsigned long timeout = 10000; 124 unsigned int v; 125 126 writel(STOP_CLOCK, host->base + MMC_STRPCL); 127 128 do { 129 v = readl(host->base + MMC_STAT); 130 if (!(v & STAT_CLK_EN)) 131 break; 132 udelay(1); 133 } while (timeout--); 134 135 if (v & STAT_CLK_EN) 136 dev_err(mmc_dev(host->mmc), "unable to stop clock\n"); 137 } 138 } 139 140 static void pxamci_enable_irq(struct pxamci_host *host, unsigned int mask) 141 { 142 unsigned long flags; 143 144 spin_lock_irqsave(&host->lock, flags); 145 host->imask &= ~mask; 146 writel(host->imask, host->base + MMC_I_MASK); 147 spin_unlock_irqrestore(&host->lock, flags); 148 } 149 150 static void pxamci_disable_irq(struct pxamci_host *host, unsigned int mask) 151 { 152 unsigned long flags; 153 154 spin_lock_irqsave(&host->lock, flags); 155 host->imask |= mask; 156 writel(host->imask, host->base + MMC_I_MASK); 157 spin_unlock_irqrestore(&host->lock, flags); 158 } 159 160 static void pxamci_setup_data(struct pxamci_host *host, struct mmc_data *data) 161 { 162 unsigned int nob = data->blocks; 163 unsigned long long clks; 164 unsigned int timeout; 165 bool dalgn = 0; 166 u32 dcmd; 167 int i; 168 169 host->data = data; 170 171 if (data->flags & MMC_DATA_STREAM) 172 nob = 0xffff; 173 174 writel(nob, host->base + MMC_NOB); 175 writel(data->blksz, host->base + MMC_BLKLEN); 176 177 clks = (unsigned long long)data->timeout_ns * host->clkrate; 178 do_div(clks, 1000000000UL); 179 timeout = (unsigned int)clks + (data->timeout_clks << host->clkrt); 180 writel((timeout + 255) / 256, host->base + MMC_RDTO); 181 182 if (data->flags & MMC_DATA_READ) { 183 host->dma_dir = DMA_FROM_DEVICE; 184 dcmd = DCMD_INCTRGADDR | DCMD_FLOWSRC; 185 DRCMR(host->dma_drcmrtx) = 0; 186 DRCMR(host->dma_drcmrrx) = host->dma | DRCMR_MAPVLD; 187 } else { 188 host->dma_dir = DMA_TO_DEVICE; 189 dcmd = DCMD_INCSRCADDR | DCMD_FLOWTRG; 190 DRCMR(host->dma_drcmrrx) = 0; 191 DRCMR(host->dma_drcmrtx) = host->dma | DRCMR_MAPVLD; 192 } 193 194 dcmd |= DCMD_BURST32 | DCMD_WIDTH1; 195 196 host->dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, 197 host->dma_dir); 198 199 for (i = 0; i < host->dma_len; i++) { 200 unsigned int length = sg_dma_len(&data->sg[i]); 201 host->sg_cpu[i].dcmd = dcmd | length; 202 if (length & 31 && !(data->flags & MMC_DATA_READ)) 203 host->sg_cpu[i].dcmd |= DCMD_ENDIRQEN; 204 /* Not aligned to 8-byte boundary? */ 205 if (sg_dma_address(&data->sg[i]) & 0x7) 206 dalgn = 1; 207 if (data->flags & MMC_DATA_READ) { 208 host->sg_cpu[i].dsadr = host->res->start + MMC_RXFIFO; 209 host->sg_cpu[i].dtadr = sg_dma_address(&data->sg[i]); 210 } else { 211 host->sg_cpu[i].dsadr = sg_dma_address(&data->sg[i]); 212 host->sg_cpu[i].dtadr = host->res->start + MMC_TXFIFO; 213 } 214 host->sg_cpu[i].ddadr = host->sg_dma + (i + 1) * 215 sizeof(struct pxa_dma_desc); 216 } 217 host->sg_cpu[host->dma_len - 1].ddadr = DDADR_STOP; 218 wmb(); 219 220 /* 221 * The PXA27x DMA controller encounters overhead when working with 222 * unaligned (to 8-byte boundaries) data, so switch on byte alignment 223 * mode only if we have unaligned data. 224 */ 225 if (dalgn) 226 DALGN |= (1 << host->dma); 227 else 228 DALGN &= ~(1 << host->dma); 229 DDADR(host->dma) = host->sg_dma; 230 231 /* 232 * workaround for erratum #91: 233 * only start DMA now if we are doing a read, 234 * otherwise we wait until CMD/RESP has finished 235 * before starting DMA. 236 */ 237 if (!cpu_is_pxa27x() || data->flags & MMC_DATA_READ) 238 DCSR(host->dma) = DCSR_RUN; 239 } 240 241 static void pxamci_start_cmd(struct pxamci_host *host, struct mmc_command *cmd, unsigned int cmdat) 242 { 243 WARN_ON(host->cmd != NULL); 244 host->cmd = cmd; 245 246 if (cmd->flags & MMC_RSP_BUSY) 247 cmdat |= CMDAT_BUSY; 248 249 #define RSP_TYPE(x) ((x) & ~(MMC_RSP_BUSY|MMC_RSP_OPCODE)) 250 switch (RSP_TYPE(mmc_resp_type(cmd))) { 251 case RSP_TYPE(MMC_RSP_R1): /* r1, r1b, r6, r7 */ 252 cmdat |= CMDAT_RESP_SHORT; 253 break; 254 case RSP_TYPE(MMC_RSP_R3): 255 cmdat |= CMDAT_RESP_R3; 256 break; 257 case RSP_TYPE(MMC_RSP_R2): 258 cmdat |= CMDAT_RESP_R2; 259 break; 260 default: 261 break; 262 } 263 264 writel(cmd->opcode, host->base + MMC_CMD); 265 writel(cmd->arg >> 16, host->base + MMC_ARGH); 266 writel(cmd->arg & 0xffff, host->base + MMC_ARGL); 267 writel(cmdat, host->base + MMC_CMDAT); 268 writel(host->clkrt, host->base + MMC_CLKRT); 269 270 writel(START_CLOCK, host->base + MMC_STRPCL); 271 272 pxamci_enable_irq(host, END_CMD_RES); 273 } 274 275 static void pxamci_finish_request(struct pxamci_host *host, struct mmc_request *mrq) 276 { 277 host->mrq = NULL; 278 host->cmd = NULL; 279 host->data = NULL; 280 mmc_request_done(host->mmc, mrq); 281 } 282 283 static int pxamci_cmd_done(struct pxamci_host *host, unsigned int stat) 284 { 285 struct mmc_command *cmd = host->cmd; 286 int i; 287 u32 v; 288 289 if (!cmd) 290 return 0; 291 292 host->cmd = NULL; 293 294 /* 295 * Did I mention this is Sick. We always need to 296 * discard the upper 8 bits of the first 16-bit word. 297 */ 298 v = readl(host->base + MMC_RES) & 0xffff; 299 for (i = 0; i < 4; i++) { 300 u32 w1 = readl(host->base + MMC_RES) & 0xffff; 301 u32 w2 = readl(host->base + MMC_RES) & 0xffff; 302 cmd->resp[i] = v << 24 | w1 << 8 | w2 >> 8; 303 v = w2; 304 } 305 306 if (stat & STAT_TIME_OUT_RESPONSE) { 307 cmd->error = -ETIMEDOUT; 308 } else if (stat & STAT_RES_CRC_ERR && cmd->flags & MMC_RSP_CRC) { 309 /* 310 * workaround for erratum #42: 311 * Intel PXA27x Family Processor Specification Update Rev 001 312 * A bogus CRC error can appear if the msb of a 136 bit 313 * response is a one. 314 */ 315 if (cpu_is_pxa27x() && 316 (cmd->flags & MMC_RSP_136 && cmd->resp[0] & 0x80000000)) 317 pr_debug("ignoring CRC from command %d - *risky*\n", cmd->opcode); 318 else 319 cmd->error = -EILSEQ; 320 } 321 322 pxamci_disable_irq(host, END_CMD_RES); 323 if (host->data && !cmd->error) { 324 pxamci_enable_irq(host, DATA_TRAN_DONE); 325 /* 326 * workaround for erratum #91, if doing write 327 * enable DMA late 328 */ 329 if (cpu_is_pxa27x() && host->data->flags & MMC_DATA_WRITE) 330 DCSR(host->dma) = DCSR_RUN; 331 } else { 332 pxamci_finish_request(host, host->mrq); 333 } 334 335 return 1; 336 } 337 338 static int pxamci_data_done(struct pxamci_host *host, unsigned int stat) 339 { 340 struct mmc_data *data = host->data; 341 342 if (!data) 343 return 0; 344 345 DCSR(host->dma) = 0; 346 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, 347 host->dma_dir); 348 349 if (stat & STAT_READ_TIME_OUT) 350 data->error = -ETIMEDOUT; 351 else if (stat & (STAT_CRC_READ_ERROR|STAT_CRC_WRITE_ERROR)) 352 data->error = -EILSEQ; 353 354 /* 355 * There appears to be a hardware design bug here. There seems to 356 * be no way to find out how much data was transferred to the card. 357 * This means that if there was an error on any block, we mark all 358 * data blocks as being in error. 359 */ 360 if (!data->error) 361 data->bytes_xfered = data->blocks * data->blksz; 362 else 363 data->bytes_xfered = 0; 364 365 pxamci_disable_irq(host, DATA_TRAN_DONE); 366 367 host->data = NULL; 368 if (host->mrq->stop) { 369 pxamci_stop_clock(host); 370 pxamci_start_cmd(host, host->mrq->stop, host->cmdat); 371 } else { 372 pxamci_finish_request(host, host->mrq); 373 } 374 375 return 1; 376 } 377 378 static irqreturn_t pxamci_irq(int irq, void *devid) 379 { 380 struct pxamci_host *host = devid; 381 unsigned int ireg; 382 int handled = 0; 383 384 ireg = readl(host->base + MMC_I_REG) & ~readl(host->base + MMC_I_MASK); 385 386 if (ireg) { 387 unsigned stat = readl(host->base + MMC_STAT); 388 389 pr_debug("PXAMCI: irq %08x stat %08x\n", ireg, stat); 390 391 if (ireg & END_CMD_RES) 392 handled |= pxamci_cmd_done(host, stat); 393 if (ireg & DATA_TRAN_DONE) 394 handled |= pxamci_data_done(host, stat); 395 if (ireg & SDIO_INT) { 396 mmc_signal_sdio_irq(host->mmc); 397 handled = 1; 398 } 399 } 400 401 return IRQ_RETVAL(handled); 402 } 403 404 static void pxamci_request(struct mmc_host *mmc, struct mmc_request *mrq) 405 { 406 struct pxamci_host *host = mmc_priv(mmc); 407 unsigned int cmdat; 408 409 WARN_ON(host->mrq != NULL); 410 411 host->mrq = mrq; 412 413 pxamci_stop_clock(host); 414 415 cmdat = host->cmdat; 416 host->cmdat &= ~CMDAT_INIT; 417 418 if (mrq->data) { 419 pxamci_setup_data(host, mrq->data); 420 421 cmdat &= ~CMDAT_BUSY; 422 cmdat |= CMDAT_DATAEN | CMDAT_DMAEN; 423 if (mrq->data->flags & MMC_DATA_WRITE) 424 cmdat |= CMDAT_WRITE; 425 426 if (mrq->data->flags & MMC_DATA_STREAM) 427 cmdat |= CMDAT_STREAM; 428 } 429 430 pxamci_start_cmd(host, mrq->cmd, cmdat); 431 } 432 433 static int pxamci_get_ro(struct mmc_host *mmc) 434 { 435 struct pxamci_host *host = mmc_priv(mmc); 436 437 if (host->pdata && gpio_is_valid(host->pdata->gpio_card_ro)) { 438 if (host->pdata->gpio_card_ro_invert) 439 return !gpio_get_value(host->pdata->gpio_card_ro); 440 else 441 return gpio_get_value(host->pdata->gpio_card_ro); 442 } 443 if (host->pdata && host->pdata->get_ro) 444 return !!host->pdata->get_ro(mmc_dev(mmc)); 445 /* 446 * Board doesn't support read only detection; let the mmc core 447 * decide what to do. 448 */ 449 return -ENOSYS; 450 } 451 452 static void pxamci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 453 { 454 struct pxamci_host *host = mmc_priv(mmc); 455 456 if (ios->clock) { 457 unsigned long rate = host->clkrate; 458 unsigned int clk = rate / ios->clock; 459 460 if (host->clkrt == CLKRT_OFF) 461 clk_enable(host->clk); 462 463 if (ios->clock == 26000000) { 464 /* to support 26MHz */ 465 host->clkrt = 7; 466 } else { 467 /* to handle (19.5MHz, 26MHz) */ 468 if (!clk) 469 clk = 1; 470 471 /* 472 * clk might result in a lower divisor than we 473 * desire. check for that condition and adjust 474 * as appropriate. 475 */ 476 if (rate / clk > ios->clock) 477 clk <<= 1; 478 host->clkrt = fls(clk) - 1; 479 } 480 481 /* 482 * we write clkrt on the next command 483 */ 484 } else { 485 pxamci_stop_clock(host); 486 if (host->clkrt != CLKRT_OFF) { 487 host->clkrt = CLKRT_OFF; 488 clk_disable(host->clk); 489 } 490 } 491 492 if (host->power_mode != ios->power_mode) { 493 host->power_mode = ios->power_mode; 494 495 pxamci_set_power(host, ios->vdd); 496 497 if (ios->power_mode == MMC_POWER_ON) 498 host->cmdat |= CMDAT_INIT; 499 } 500 501 if (ios->bus_width == MMC_BUS_WIDTH_4) 502 host->cmdat |= CMDAT_SD_4DAT; 503 else 504 host->cmdat &= ~CMDAT_SD_4DAT; 505 506 pr_debug("PXAMCI: clkrt = %x cmdat = %x\n", 507 host->clkrt, host->cmdat); 508 } 509 510 static void pxamci_enable_sdio_irq(struct mmc_host *host, int enable) 511 { 512 struct pxamci_host *pxa_host = mmc_priv(host); 513 514 if (enable) 515 pxamci_enable_irq(pxa_host, SDIO_INT); 516 else 517 pxamci_disable_irq(pxa_host, SDIO_INT); 518 } 519 520 static const struct mmc_host_ops pxamci_ops = { 521 .request = pxamci_request, 522 .get_ro = pxamci_get_ro, 523 .set_ios = pxamci_set_ios, 524 .enable_sdio_irq = pxamci_enable_sdio_irq, 525 }; 526 527 static void pxamci_dma_irq(int dma, void *devid) 528 { 529 struct pxamci_host *host = devid; 530 int dcsr = DCSR(dma); 531 DCSR(dma) = dcsr & ~DCSR_STOPIRQEN; 532 533 if (dcsr & DCSR_ENDINTR) { 534 writel(BUF_PART_FULL, host->base + MMC_PRTBUF); 535 } else { 536 printk(KERN_ERR "%s: DMA error on channel %d (DCSR=%#x)\n", 537 mmc_hostname(host->mmc), dma, dcsr); 538 host->data->error = -EIO; 539 pxamci_data_done(host, 0); 540 } 541 } 542 543 static irqreturn_t pxamci_detect_irq(int irq, void *devid) 544 { 545 struct pxamci_host *host = mmc_priv(devid); 546 547 mmc_detect_change(devid, msecs_to_jiffies(host->pdata->detect_delay_ms)); 548 return IRQ_HANDLED; 549 } 550 551 static int pxamci_probe(struct platform_device *pdev) 552 { 553 struct mmc_host *mmc; 554 struct pxamci_host *host = NULL; 555 struct resource *r, *dmarx, *dmatx; 556 int ret, irq, gpio_cd = -1, gpio_ro = -1, gpio_power = -1; 557 558 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 559 irq = platform_get_irq(pdev, 0); 560 if (!r || irq < 0) 561 return -ENXIO; 562 563 r = request_mem_region(r->start, SZ_4K, DRIVER_NAME); 564 if (!r) 565 return -EBUSY; 566 567 mmc = mmc_alloc_host(sizeof(struct pxamci_host), &pdev->dev); 568 if (!mmc) { 569 ret = -ENOMEM; 570 goto out; 571 } 572 573 mmc->ops = &pxamci_ops; 574 575 /* 576 * We can do SG-DMA, but we don't because we never know how much 577 * data we successfully wrote to the card. 578 */ 579 mmc->max_phys_segs = NR_SG; 580 581 /* 582 * Our hardware DMA can handle a maximum of one page per SG entry. 583 */ 584 mmc->max_seg_size = PAGE_SIZE; 585 586 /* 587 * Block length register is only 10 bits before PXA27x. 588 */ 589 mmc->max_blk_size = cpu_is_pxa25x() ? 1023 : 2048; 590 591 /* 592 * Block count register is 16 bits. 593 */ 594 mmc->max_blk_count = 65535; 595 596 host = mmc_priv(mmc); 597 host->mmc = mmc; 598 host->dma = -1; 599 host->pdata = pdev->dev.platform_data; 600 host->clkrt = CLKRT_OFF; 601 602 host->clk = clk_get(&pdev->dev, NULL); 603 if (IS_ERR(host->clk)) { 604 ret = PTR_ERR(host->clk); 605 host->clk = NULL; 606 goto out; 607 } 608 609 host->clkrate = clk_get_rate(host->clk); 610 611 /* 612 * Calculate minimum clock rate, rounding up. 613 */ 614 mmc->f_min = (host->clkrate + 63) / 64; 615 mmc->f_max = (mmc_has_26MHz()) ? 26000000 : host->clkrate; 616 617 pxamci_init_ocr(host); 618 619 mmc->caps = 0; 620 host->cmdat = 0; 621 if (!cpu_is_pxa25x()) { 622 mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ; 623 host->cmdat |= CMDAT_SDIO_INT_EN; 624 if (mmc_has_26MHz()) 625 mmc->caps |= MMC_CAP_MMC_HIGHSPEED | 626 MMC_CAP_SD_HIGHSPEED; 627 } 628 629 host->sg_cpu = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, &host->sg_dma, GFP_KERNEL); 630 if (!host->sg_cpu) { 631 ret = -ENOMEM; 632 goto out; 633 } 634 635 spin_lock_init(&host->lock); 636 host->res = r; 637 host->irq = irq; 638 host->imask = MMC_I_MASK_ALL; 639 640 host->base = ioremap(r->start, SZ_4K); 641 if (!host->base) { 642 ret = -ENOMEM; 643 goto out; 644 } 645 646 /* 647 * Ensure that the host controller is shut down, and setup 648 * with our defaults. 649 */ 650 pxamci_stop_clock(host); 651 writel(0, host->base + MMC_SPI); 652 writel(64, host->base + MMC_RESTO); 653 writel(host->imask, host->base + MMC_I_MASK); 654 655 host->dma = pxa_request_dma(DRIVER_NAME, DMA_PRIO_LOW, 656 pxamci_dma_irq, host); 657 if (host->dma < 0) { 658 ret = -EBUSY; 659 goto out; 660 } 661 662 ret = request_irq(host->irq, pxamci_irq, 0, DRIVER_NAME, host); 663 if (ret) 664 goto out; 665 666 platform_set_drvdata(pdev, mmc); 667 668 dmarx = platform_get_resource(pdev, IORESOURCE_DMA, 0); 669 if (!dmarx) { 670 ret = -ENXIO; 671 goto out; 672 } 673 host->dma_drcmrrx = dmarx->start; 674 675 dmatx = platform_get_resource(pdev, IORESOURCE_DMA, 1); 676 if (!dmatx) { 677 ret = -ENXIO; 678 goto out; 679 } 680 host->dma_drcmrtx = dmatx->start; 681 682 if (host->pdata) { 683 gpio_cd = host->pdata->gpio_card_detect; 684 gpio_ro = host->pdata->gpio_card_ro; 685 gpio_power = host->pdata->gpio_power; 686 } 687 if (gpio_is_valid(gpio_power)) { 688 ret = gpio_request(gpio_power, "mmc card power"); 689 if (ret) { 690 dev_err(&pdev->dev, "Failed requesting gpio_power %d\n", gpio_power); 691 goto out; 692 } 693 gpio_direction_output(gpio_power, 694 host->pdata->gpio_power_invert); 695 } 696 if (gpio_is_valid(gpio_ro)) { 697 ret = gpio_request(gpio_ro, "mmc card read only"); 698 if (ret) { 699 dev_err(&pdev->dev, "Failed requesting gpio_ro %d\n", gpio_ro); 700 goto err_gpio_ro; 701 } 702 gpio_direction_input(gpio_ro); 703 } 704 if (gpio_is_valid(gpio_cd)) { 705 ret = gpio_request(gpio_cd, "mmc card detect"); 706 if (ret) { 707 dev_err(&pdev->dev, "Failed requesting gpio_cd %d\n", gpio_cd); 708 goto err_gpio_cd; 709 } 710 gpio_direction_input(gpio_cd); 711 712 ret = request_irq(gpio_to_irq(gpio_cd), pxamci_detect_irq, 713 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, 714 "mmc card detect", mmc); 715 if (ret) { 716 dev_err(&pdev->dev, "failed to request card detect IRQ\n"); 717 goto err_request_irq; 718 } 719 } 720 721 if (host->pdata && host->pdata->init) 722 host->pdata->init(&pdev->dev, pxamci_detect_irq, mmc); 723 724 if (gpio_is_valid(gpio_power) && host->pdata->setpower) 725 dev_warn(&pdev->dev, "gpio_power and setpower() both defined\n"); 726 if (gpio_is_valid(gpio_ro) && host->pdata->get_ro) 727 dev_warn(&pdev->dev, "gpio_ro and get_ro() both defined\n"); 728 729 mmc_add_host(mmc); 730 731 return 0; 732 733 err_request_irq: 734 gpio_free(gpio_cd); 735 err_gpio_cd: 736 gpio_free(gpio_ro); 737 err_gpio_ro: 738 gpio_free(gpio_power); 739 out: 740 if (host) { 741 if (host->dma >= 0) 742 pxa_free_dma(host->dma); 743 if (host->base) 744 iounmap(host->base); 745 if (host->sg_cpu) 746 dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma); 747 if (host->clk) 748 clk_put(host->clk); 749 } 750 if (mmc) 751 mmc_free_host(mmc); 752 release_resource(r); 753 return ret; 754 } 755 756 static int pxamci_remove(struct platform_device *pdev) 757 { 758 struct mmc_host *mmc = platform_get_drvdata(pdev); 759 int gpio_cd = -1, gpio_ro = -1, gpio_power = -1; 760 761 platform_set_drvdata(pdev, NULL); 762 763 if (mmc) { 764 struct pxamci_host *host = mmc_priv(mmc); 765 766 mmc_remove_host(mmc); 767 768 if (host->pdata) { 769 gpio_cd = host->pdata->gpio_card_detect; 770 gpio_ro = host->pdata->gpio_card_ro; 771 gpio_power = host->pdata->gpio_power; 772 } 773 if (gpio_is_valid(gpio_cd)) { 774 free_irq(gpio_to_irq(gpio_cd), mmc); 775 gpio_free(gpio_cd); 776 } 777 if (gpio_is_valid(gpio_ro)) 778 gpio_free(gpio_ro); 779 if (gpio_is_valid(gpio_power)) 780 gpio_free(gpio_power); 781 if (host->vcc) 782 regulator_put(host->vcc); 783 784 if (host->pdata && host->pdata->exit) 785 host->pdata->exit(&pdev->dev, mmc); 786 787 pxamci_stop_clock(host); 788 writel(TXFIFO_WR_REQ|RXFIFO_RD_REQ|CLK_IS_OFF|STOP_CMD| 789 END_CMD_RES|PRG_DONE|DATA_TRAN_DONE, 790 host->base + MMC_I_MASK); 791 792 DRCMR(host->dma_drcmrrx) = 0; 793 DRCMR(host->dma_drcmrtx) = 0; 794 795 free_irq(host->irq, host); 796 pxa_free_dma(host->dma); 797 iounmap(host->base); 798 dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma); 799 800 clk_put(host->clk); 801 802 release_resource(host->res); 803 804 mmc_free_host(mmc); 805 } 806 return 0; 807 } 808 809 #ifdef CONFIG_PM 810 static int pxamci_suspend(struct device *dev) 811 { 812 struct mmc_host *mmc = dev_get_drvdata(dev); 813 int ret = 0; 814 815 if (mmc) 816 ret = mmc_suspend_host(mmc); 817 818 return ret; 819 } 820 821 static int pxamci_resume(struct device *dev) 822 { 823 struct mmc_host *mmc = dev_get_drvdata(dev); 824 int ret = 0; 825 826 if (mmc) 827 ret = mmc_resume_host(mmc); 828 829 return ret; 830 } 831 832 static const struct dev_pm_ops pxamci_pm_ops = { 833 .suspend = pxamci_suspend, 834 .resume = pxamci_resume, 835 }; 836 #endif 837 838 static struct platform_driver pxamci_driver = { 839 .probe = pxamci_probe, 840 .remove = pxamci_remove, 841 .driver = { 842 .name = DRIVER_NAME, 843 .owner = THIS_MODULE, 844 #ifdef CONFIG_PM 845 .pm = &pxamci_pm_ops, 846 #endif 847 }, 848 }; 849 850 static int __init pxamci_init(void) 851 { 852 return platform_driver_register(&pxamci_driver); 853 } 854 855 static void __exit pxamci_exit(void) 856 { 857 platform_driver_unregister(&pxamci_driver); 858 } 859 860 module_init(pxamci_init); 861 module_exit(pxamci_exit); 862 863 MODULE_DESCRIPTION("PXA Multimedia Card Interface Driver"); 864 MODULE_LICENSE("GPL"); 865 MODULE_ALIAS("platform:pxa2xx-mci"); 866