1 /* 2 * Synopsys DesignWare Multimedia Card Interface driver 3 * (Based on NXP driver for lpc 31xx) 4 * 5 * Copyright (C) 2009 NXP Semiconductors 6 * Copyright (C) 2009, 2010 Imagination Technologies Ltd. 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2 of the License, or 11 * (at your option) any later version. 12 */ 13 14 #include <linux/blkdev.h> 15 #include <linux/clk.h> 16 #include <linux/debugfs.h> 17 #include <linux/device.h> 18 #include <linux/dma-mapping.h> 19 #include <linux/err.h> 20 #include <linux/init.h> 21 #include <linux/interrupt.h> 22 #include <linux/ioport.h> 23 #include <linux/module.h> 24 #include <linux/platform_device.h> 25 #include <linux/seq_file.h> 26 #include <linux/slab.h> 27 #include <linux/stat.h> 28 #include <linux/delay.h> 29 #include <linux/irq.h> 30 #include <linux/mmc/host.h> 31 #include <linux/mmc/mmc.h> 32 #include <linux/mmc/dw_mmc.h> 33 #include <linux/bitops.h> 34 #include <linux/regulator/consumer.h> 35 #include <linux/workqueue.h> 36 37 #include "dw_mmc.h" 38 39 /* Common flag combinations */ 40 #define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DTO | SDMMC_INT_DCRC | \ 41 SDMMC_INT_HTO | SDMMC_INT_SBE | \ 42 SDMMC_INT_EBE) 43 #define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \ 44 SDMMC_INT_RESP_ERR) 45 #define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \ 46 DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_HLE) 47 #define DW_MCI_SEND_STATUS 1 48 #define DW_MCI_RECV_STATUS 2 49 #define DW_MCI_DMA_THRESHOLD 16 50 51 #ifdef CONFIG_MMC_DW_IDMAC 52 struct idmac_desc { 53 u32 des0; /* Control Descriptor */ 54 #define IDMAC_DES0_DIC BIT(1) 55 #define IDMAC_DES0_LD BIT(2) 56 #define IDMAC_DES0_FD BIT(3) 57 #define IDMAC_DES0_CH BIT(4) 58 #define IDMAC_DES0_ER BIT(5) 59 #define IDMAC_DES0_CES BIT(30) 60 #define IDMAC_DES0_OWN BIT(31) 61 62 u32 des1; /* Buffer sizes */ 63 #define IDMAC_SET_BUFFER1_SIZE(d, s) \ 64 ((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff)) 65 66 u32 des2; /* buffer 1 physical address */ 67 68 u32 des3; /* buffer 2 physical address */ 69 }; 70 #endif /* CONFIG_MMC_DW_IDMAC */ 71 72 /** 73 * struct dw_mci_slot - MMC slot state 74 * @mmc: The mmc_host representing this slot. 75 * @host: The MMC controller this slot is using. 76 * @ctype: Card type for this slot. 77 * @mrq: mmc_request currently being processed or waiting to be 78 * processed, or NULL when the slot is idle. 79 * @queue_node: List node for placing this node in the @queue list of 80 * &struct dw_mci. 81 * @clock: Clock rate configured by set_ios(). Protected by host->lock. 82 * @flags: Random state bits associated with the slot. 83 * @id: Number of this slot. 84 * @last_detect_state: Most recently observed card detect state. 85 */ 86 struct dw_mci_slot { 87 struct mmc_host *mmc; 88 struct dw_mci *host; 89 90 u32 ctype; 91 92 struct mmc_request *mrq; 93 struct list_head queue_node; 94 95 unsigned int clock; 96 unsigned long flags; 97 #define DW_MMC_CARD_PRESENT 0 98 #define DW_MMC_CARD_NEED_INIT 1 99 int id; 100 int last_detect_state; 101 }; 102 103 static struct workqueue_struct *dw_mci_card_workqueue; 104 105 #if defined(CONFIG_DEBUG_FS) 106 static int dw_mci_req_show(struct seq_file *s, void *v) 107 { 108 struct dw_mci_slot *slot = s->private; 109 struct mmc_request *mrq; 110 struct mmc_command *cmd; 111 struct mmc_command *stop; 112 struct mmc_data *data; 113 114 /* Make sure we get a consistent snapshot */ 115 spin_lock_bh(&slot->host->lock); 116 mrq = slot->mrq; 117 118 if (mrq) { 119 cmd = mrq->cmd; 120 data = mrq->data; 121 stop = mrq->stop; 122 123 if (cmd) 124 seq_printf(s, 125 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n", 126 cmd->opcode, cmd->arg, cmd->flags, 127 cmd->resp[0], cmd->resp[1], cmd->resp[2], 128 cmd->resp[2], cmd->error); 129 if (data) 130 seq_printf(s, "DATA %u / %u * %u flg %x err %d\n", 131 data->bytes_xfered, data->blocks, 132 data->blksz, data->flags, data->error); 133 if (stop) 134 seq_printf(s, 135 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n", 136 stop->opcode, stop->arg, stop->flags, 137 stop->resp[0], stop->resp[1], stop->resp[2], 138 stop->resp[2], stop->error); 139 } 140 141 spin_unlock_bh(&slot->host->lock); 142 143 return 0; 144 } 145 146 static int dw_mci_req_open(struct inode *inode, struct file *file) 147 { 148 return single_open(file, dw_mci_req_show, inode->i_private); 149 } 150 151 static const struct file_operations dw_mci_req_fops = { 152 .owner = THIS_MODULE, 153 .open = dw_mci_req_open, 154 .read = seq_read, 155 .llseek = seq_lseek, 156 .release = single_release, 157 }; 158 159 static int dw_mci_regs_show(struct seq_file *s, void *v) 160 { 161 seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS); 162 seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS); 163 seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD); 164 seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL); 165 seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK); 166 seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA); 167 168 return 0; 169 } 170 171 static int dw_mci_regs_open(struct inode *inode, struct file *file) 172 { 173 return single_open(file, dw_mci_regs_show, inode->i_private); 174 } 175 176 static const struct file_operations dw_mci_regs_fops = { 177 .owner = THIS_MODULE, 178 .open = dw_mci_regs_open, 179 .read = seq_read, 180 .llseek = seq_lseek, 181 .release = single_release, 182 }; 183 184 static void dw_mci_init_debugfs(struct dw_mci_slot *slot) 185 { 186 struct mmc_host *mmc = slot->mmc; 187 struct dw_mci *host = slot->host; 188 struct dentry *root; 189 struct dentry *node; 190 191 root = mmc->debugfs_root; 192 if (!root) 193 return; 194 195 node = debugfs_create_file("regs", S_IRUSR, root, host, 196 &dw_mci_regs_fops); 197 if (!node) 198 goto err; 199 200 node = debugfs_create_file("req", S_IRUSR, root, slot, 201 &dw_mci_req_fops); 202 if (!node) 203 goto err; 204 205 node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state); 206 if (!node) 207 goto err; 208 209 node = debugfs_create_x32("pending_events", S_IRUSR, root, 210 (u32 *)&host->pending_events); 211 if (!node) 212 goto err; 213 214 node = debugfs_create_x32("completed_events", S_IRUSR, root, 215 (u32 *)&host->completed_events); 216 if (!node) 217 goto err; 218 219 return; 220 221 err: 222 dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n"); 223 } 224 #endif /* defined(CONFIG_DEBUG_FS) */ 225 226 static void dw_mci_set_timeout(struct dw_mci *host) 227 { 228 /* timeout (maximum) */ 229 mci_writel(host, TMOUT, 0xffffffff); 230 } 231 232 static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd) 233 { 234 struct mmc_data *data; 235 u32 cmdr; 236 cmd->error = -EINPROGRESS; 237 238 cmdr = cmd->opcode; 239 240 if (cmdr == MMC_STOP_TRANSMISSION) 241 cmdr |= SDMMC_CMD_STOP; 242 else 243 cmdr |= SDMMC_CMD_PRV_DAT_WAIT; 244 245 if (cmd->flags & MMC_RSP_PRESENT) { 246 /* We expect a response, so set this bit */ 247 cmdr |= SDMMC_CMD_RESP_EXP; 248 if (cmd->flags & MMC_RSP_136) 249 cmdr |= SDMMC_CMD_RESP_LONG; 250 } 251 252 if (cmd->flags & MMC_RSP_CRC) 253 cmdr |= SDMMC_CMD_RESP_CRC; 254 255 data = cmd->data; 256 if (data) { 257 cmdr |= SDMMC_CMD_DAT_EXP; 258 if (data->flags & MMC_DATA_STREAM) 259 cmdr |= SDMMC_CMD_STRM_MODE; 260 if (data->flags & MMC_DATA_WRITE) 261 cmdr |= SDMMC_CMD_DAT_WR; 262 } 263 264 return cmdr; 265 } 266 267 static void dw_mci_start_command(struct dw_mci *host, 268 struct mmc_command *cmd, u32 cmd_flags) 269 { 270 host->cmd = cmd; 271 dev_vdbg(&host->pdev->dev, 272 "start command: ARGR=0x%08x CMDR=0x%08x\n", 273 cmd->arg, cmd_flags); 274 275 mci_writel(host, CMDARG, cmd->arg); 276 wmb(); 277 278 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START); 279 } 280 281 static void send_stop_cmd(struct dw_mci *host, struct mmc_data *data) 282 { 283 dw_mci_start_command(host, data->stop, host->stop_cmdr); 284 } 285 286 /* DMA interface functions */ 287 static void dw_mci_stop_dma(struct dw_mci *host) 288 { 289 if (host->using_dma) { 290 host->dma_ops->stop(host); 291 host->dma_ops->cleanup(host); 292 } else { 293 /* Data transfer was stopped by the interrupt handler */ 294 set_bit(EVENT_XFER_COMPLETE, &host->pending_events); 295 } 296 } 297 298 #ifdef CONFIG_MMC_DW_IDMAC 299 static void dw_mci_dma_cleanup(struct dw_mci *host) 300 { 301 struct mmc_data *data = host->data; 302 303 if (data) 304 dma_unmap_sg(&host->pdev->dev, data->sg, data->sg_len, 305 ((data->flags & MMC_DATA_WRITE) 306 ? DMA_TO_DEVICE : DMA_FROM_DEVICE)); 307 } 308 309 static void dw_mci_idmac_stop_dma(struct dw_mci *host) 310 { 311 u32 temp; 312 313 /* Disable and reset the IDMAC interface */ 314 temp = mci_readl(host, CTRL); 315 temp &= ~SDMMC_CTRL_USE_IDMAC; 316 temp |= SDMMC_CTRL_DMA_RESET; 317 mci_writel(host, CTRL, temp); 318 319 /* Stop the IDMAC running */ 320 temp = mci_readl(host, BMOD); 321 temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB); 322 mci_writel(host, BMOD, temp); 323 } 324 325 static void dw_mci_idmac_complete_dma(struct dw_mci *host) 326 { 327 struct mmc_data *data = host->data; 328 329 dev_vdbg(&host->pdev->dev, "DMA complete\n"); 330 331 host->dma_ops->cleanup(host); 332 333 /* 334 * If the card was removed, data will be NULL. No point in trying to 335 * send the stop command or waiting for NBUSY in this case. 336 */ 337 if (data) { 338 set_bit(EVENT_XFER_COMPLETE, &host->pending_events); 339 tasklet_schedule(&host->tasklet); 340 } 341 } 342 343 static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data, 344 unsigned int sg_len) 345 { 346 int i; 347 struct idmac_desc *desc = host->sg_cpu; 348 349 for (i = 0; i < sg_len; i++, desc++) { 350 unsigned int length = sg_dma_len(&data->sg[i]); 351 u32 mem_addr = sg_dma_address(&data->sg[i]); 352 353 /* Set the OWN bit and disable interrupts for this descriptor */ 354 desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | IDMAC_DES0_CH; 355 356 /* Buffer length */ 357 IDMAC_SET_BUFFER1_SIZE(desc, length); 358 359 /* Physical address to DMA to/from */ 360 desc->des2 = mem_addr; 361 } 362 363 /* Set first descriptor */ 364 desc = host->sg_cpu; 365 desc->des0 |= IDMAC_DES0_FD; 366 367 /* Set last descriptor */ 368 desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc); 369 desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC); 370 desc->des0 |= IDMAC_DES0_LD; 371 372 wmb(); 373 } 374 375 static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len) 376 { 377 u32 temp; 378 379 dw_mci_translate_sglist(host, host->data, sg_len); 380 381 /* Select IDMAC interface */ 382 temp = mci_readl(host, CTRL); 383 temp |= SDMMC_CTRL_USE_IDMAC; 384 mci_writel(host, CTRL, temp); 385 386 wmb(); 387 388 /* Enable the IDMAC */ 389 temp = mci_readl(host, BMOD); 390 temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB; 391 mci_writel(host, BMOD, temp); 392 393 /* Start it running */ 394 mci_writel(host, PLDMND, 1); 395 } 396 397 static int dw_mci_idmac_init(struct dw_mci *host) 398 { 399 struct idmac_desc *p; 400 int i; 401 402 /* Number of descriptors in the ring buffer */ 403 host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc); 404 405 /* Forward link the descriptor list */ 406 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++) 407 p->des3 = host->sg_dma + (sizeof(struct idmac_desc) * (i + 1)); 408 409 /* Set the last descriptor as the end-of-ring descriptor */ 410 p->des3 = host->sg_dma; 411 p->des0 = IDMAC_DES0_ER; 412 413 /* Mask out interrupts - get Tx & Rx complete only */ 414 mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI | 415 SDMMC_IDMAC_INT_TI); 416 417 /* Set the descriptor base address */ 418 mci_writel(host, DBADDR, host->sg_dma); 419 return 0; 420 } 421 422 static struct dw_mci_dma_ops dw_mci_idmac_ops = { 423 .init = dw_mci_idmac_init, 424 .start = dw_mci_idmac_start_dma, 425 .stop = dw_mci_idmac_stop_dma, 426 .complete = dw_mci_idmac_complete_dma, 427 .cleanup = dw_mci_dma_cleanup, 428 }; 429 #endif /* CONFIG_MMC_DW_IDMAC */ 430 431 static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data) 432 { 433 struct scatterlist *sg; 434 unsigned int i, direction, sg_len; 435 u32 temp; 436 437 host->using_dma = 0; 438 439 /* If we don't have a channel, we can't do DMA */ 440 if (!host->use_dma) 441 return -ENODEV; 442 443 /* 444 * We don't do DMA on "complex" transfers, i.e. with 445 * non-word-aligned buffers or lengths. Also, we don't bother 446 * with all the DMA setup overhead for short transfers. 447 */ 448 if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD) 449 return -EINVAL; 450 if (data->blksz & 3) 451 return -EINVAL; 452 453 for_each_sg(data->sg, sg, data->sg_len, i) { 454 if (sg->offset & 3 || sg->length & 3) 455 return -EINVAL; 456 } 457 458 host->using_dma = 1; 459 460 if (data->flags & MMC_DATA_READ) 461 direction = DMA_FROM_DEVICE; 462 else 463 direction = DMA_TO_DEVICE; 464 465 sg_len = dma_map_sg(&host->pdev->dev, data->sg, data->sg_len, 466 direction); 467 468 dev_vdbg(&host->pdev->dev, 469 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n", 470 (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma, 471 sg_len); 472 473 /* Enable the DMA interface */ 474 temp = mci_readl(host, CTRL); 475 temp |= SDMMC_CTRL_DMA_ENABLE; 476 mci_writel(host, CTRL, temp); 477 478 /* Disable RX/TX IRQs, let DMA handle it */ 479 temp = mci_readl(host, INTMASK); 480 temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR); 481 mci_writel(host, INTMASK, temp); 482 483 host->dma_ops->start(host, sg_len); 484 485 return 0; 486 } 487 488 static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data) 489 { 490 u32 temp; 491 492 data->error = -EINPROGRESS; 493 494 WARN_ON(host->data); 495 host->sg = NULL; 496 host->data = data; 497 498 if (data->flags & MMC_DATA_READ) 499 host->dir_status = DW_MCI_RECV_STATUS; 500 else 501 host->dir_status = DW_MCI_SEND_STATUS; 502 503 if (dw_mci_submit_data_dma(host, data)) { 504 int flags = SG_MITER_ATOMIC; 505 if (host->data->flags & MMC_DATA_READ) 506 flags |= SG_MITER_TO_SG; 507 else 508 flags |= SG_MITER_FROM_SG; 509 510 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags); 511 host->sg = data->sg; 512 host->part_buf_start = 0; 513 host->part_buf_count = 0; 514 515 mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR); 516 temp = mci_readl(host, INTMASK); 517 temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR; 518 mci_writel(host, INTMASK, temp); 519 520 temp = mci_readl(host, CTRL); 521 temp &= ~SDMMC_CTRL_DMA_ENABLE; 522 mci_writel(host, CTRL, temp); 523 } 524 } 525 526 static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg) 527 { 528 struct dw_mci *host = slot->host; 529 unsigned long timeout = jiffies + msecs_to_jiffies(500); 530 unsigned int cmd_status = 0; 531 532 mci_writel(host, CMDARG, arg); 533 wmb(); 534 mci_writel(host, CMD, SDMMC_CMD_START | cmd); 535 536 while (time_before(jiffies, timeout)) { 537 cmd_status = mci_readl(host, CMD); 538 if (!(cmd_status & SDMMC_CMD_START)) 539 return; 540 } 541 dev_err(&slot->mmc->class_dev, 542 "Timeout sending command (cmd %#x arg %#x status %#x)\n", 543 cmd, arg, cmd_status); 544 } 545 546 static void dw_mci_setup_bus(struct dw_mci_slot *slot) 547 { 548 struct dw_mci *host = slot->host; 549 u32 div; 550 551 if (slot->clock != host->current_speed) { 552 if (host->bus_hz % slot->clock) 553 /* 554 * move the + 1 after the divide to prevent 555 * over-clocking the card. 556 */ 557 div = ((host->bus_hz / slot->clock) >> 1) + 1; 558 else 559 div = (host->bus_hz / slot->clock) >> 1; 560 561 dev_info(&slot->mmc->class_dev, 562 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ" 563 " div = %d)\n", slot->id, host->bus_hz, slot->clock, 564 div ? ((host->bus_hz / div) >> 1) : host->bus_hz, div); 565 566 /* disable clock */ 567 mci_writel(host, CLKENA, 0); 568 mci_writel(host, CLKSRC, 0); 569 570 /* inform CIU */ 571 mci_send_cmd(slot, 572 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0); 573 574 /* set clock to desired speed */ 575 mci_writel(host, CLKDIV, div); 576 577 /* inform CIU */ 578 mci_send_cmd(slot, 579 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0); 580 581 /* enable clock */ 582 mci_writel(host, CLKENA, SDMMC_CLKEN_ENABLE | 583 SDMMC_CLKEN_LOW_PWR); 584 585 /* inform CIU */ 586 mci_send_cmd(slot, 587 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0); 588 589 host->current_speed = slot->clock; 590 } 591 592 /* Set the current slot bus width */ 593 mci_writel(host, CTYPE, (slot->ctype << slot->id)); 594 } 595 596 static void __dw_mci_start_request(struct dw_mci *host, 597 struct dw_mci_slot *slot, 598 struct mmc_command *cmd) 599 { 600 struct mmc_request *mrq; 601 struct mmc_data *data; 602 u32 cmdflags; 603 604 mrq = slot->mrq; 605 if (host->pdata->select_slot) 606 host->pdata->select_slot(slot->id); 607 608 /* Slot specific timing and width adjustment */ 609 dw_mci_setup_bus(slot); 610 611 host->cur_slot = slot; 612 host->mrq = mrq; 613 614 host->pending_events = 0; 615 host->completed_events = 0; 616 host->data_status = 0; 617 618 data = cmd->data; 619 if (data) { 620 dw_mci_set_timeout(host); 621 mci_writel(host, BYTCNT, data->blksz*data->blocks); 622 mci_writel(host, BLKSIZ, data->blksz); 623 } 624 625 cmdflags = dw_mci_prepare_command(slot->mmc, cmd); 626 627 /* this is the first command, send the initialization clock */ 628 if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags)) 629 cmdflags |= SDMMC_CMD_INIT; 630 631 if (data) { 632 dw_mci_submit_data(host, data); 633 wmb(); 634 } 635 636 dw_mci_start_command(host, cmd, cmdflags); 637 638 if (mrq->stop) 639 host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop); 640 } 641 642 static void dw_mci_start_request(struct dw_mci *host, 643 struct dw_mci_slot *slot) 644 { 645 struct mmc_request *mrq = slot->mrq; 646 struct mmc_command *cmd; 647 648 cmd = mrq->sbc ? mrq->sbc : mrq->cmd; 649 __dw_mci_start_request(host, slot, cmd); 650 } 651 652 /* must be called with host->lock held */ 653 static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot, 654 struct mmc_request *mrq) 655 { 656 dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n", 657 host->state); 658 659 slot->mrq = mrq; 660 661 if (host->state == STATE_IDLE) { 662 host->state = STATE_SENDING_CMD; 663 dw_mci_start_request(host, slot); 664 } else { 665 list_add_tail(&slot->queue_node, &host->queue); 666 } 667 } 668 669 static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq) 670 { 671 struct dw_mci_slot *slot = mmc_priv(mmc); 672 struct dw_mci *host = slot->host; 673 674 WARN_ON(slot->mrq); 675 676 /* 677 * The check for card presence and queueing of the request must be 678 * atomic, otherwise the card could be removed in between and the 679 * request wouldn't fail until another card was inserted. 680 */ 681 spin_lock_bh(&host->lock); 682 683 if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) { 684 spin_unlock_bh(&host->lock); 685 mrq->cmd->error = -ENOMEDIUM; 686 mmc_request_done(mmc, mrq); 687 return; 688 } 689 690 dw_mci_queue_request(host, slot, mrq); 691 692 spin_unlock_bh(&host->lock); 693 } 694 695 static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 696 { 697 struct dw_mci_slot *slot = mmc_priv(mmc); 698 u32 regs; 699 700 /* set default 1 bit mode */ 701 slot->ctype = SDMMC_CTYPE_1BIT; 702 703 switch (ios->bus_width) { 704 case MMC_BUS_WIDTH_1: 705 slot->ctype = SDMMC_CTYPE_1BIT; 706 break; 707 case MMC_BUS_WIDTH_4: 708 slot->ctype = SDMMC_CTYPE_4BIT; 709 break; 710 case MMC_BUS_WIDTH_8: 711 slot->ctype = SDMMC_CTYPE_8BIT; 712 break; 713 } 714 715 regs = mci_readl(slot->host, UHS_REG); 716 717 /* DDR mode set */ 718 if (ios->timing == MMC_TIMING_UHS_DDR50) 719 regs |= (0x1 << slot->id) << 16; 720 else 721 regs &= ~(0x1 << slot->id) << 16; 722 723 mci_writel(slot->host, UHS_REG, regs); 724 725 if (ios->clock) { 726 /* 727 * Use mirror of ios->clock to prevent race with mmc 728 * core ios update when finding the minimum. 729 */ 730 slot->clock = ios->clock; 731 } 732 733 switch (ios->power_mode) { 734 case MMC_POWER_UP: 735 set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags); 736 break; 737 default: 738 break; 739 } 740 } 741 742 static int dw_mci_get_ro(struct mmc_host *mmc) 743 { 744 int read_only; 745 struct dw_mci_slot *slot = mmc_priv(mmc); 746 struct dw_mci_board *brd = slot->host->pdata; 747 748 /* Use platform get_ro function, else try on board write protect */ 749 if (brd->get_ro) 750 read_only = brd->get_ro(slot->id); 751 else 752 read_only = 753 mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0; 754 755 dev_dbg(&mmc->class_dev, "card is %s\n", 756 read_only ? "read-only" : "read-write"); 757 758 return read_only; 759 } 760 761 static int dw_mci_get_cd(struct mmc_host *mmc) 762 { 763 int present; 764 struct dw_mci_slot *slot = mmc_priv(mmc); 765 struct dw_mci_board *brd = slot->host->pdata; 766 767 /* Use platform get_cd function, else try onboard card detect */ 768 if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION) 769 present = 1; 770 else if (brd->get_cd) 771 present = !brd->get_cd(slot->id); 772 else 773 present = (mci_readl(slot->host, CDETECT) & (1 << slot->id)) 774 == 0 ? 1 : 0; 775 776 if (present) 777 dev_dbg(&mmc->class_dev, "card is present\n"); 778 else 779 dev_dbg(&mmc->class_dev, "card is not present\n"); 780 781 return present; 782 } 783 784 static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb) 785 { 786 struct dw_mci_slot *slot = mmc_priv(mmc); 787 struct dw_mci *host = slot->host; 788 u32 int_mask; 789 790 /* Enable/disable Slot Specific SDIO interrupt */ 791 int_mask = mci_readl(host, INTMASK); 792 if (enb) { 793 mci_writel(host, INTMASK, 794 (int_mask | (1 << SDMMC_INT_SDIO(slot->id)))); 795 } else { 796 mci_writel(host, INTMASK, 797 (int_mask & ~(1 << SDMMC_INT_SDIO(slot->id)))); 798 } 799 } 800 801 static const struct mmc_host_ops dw_mci_ops = { 802 .request = dw_mci_request, 803 .set_ios = dw_mci_set_ios, 804 .get_ro = dw_mci_get_ro, 805 .get_cd = dw_mci_get_cd, 806 .enable_sdio_irq = dw_mci_enable_sdio_irq, 807 }; 808 809 static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq) 810 __releases(&host->lock) 811 __acquires(&host->lock) 812 { 813 struct dw_mci_slot *slot; 814 struct mmc_host *prev_mmc = host->cur_slot->mmc; 815 816 WARN_ON(host->cmd || host->data); 817 818 host->cur_slot->mrq = NULL; 819 host->mrq = NULL; 820 if (!list_empty(&host->queue)) { 821 slot = list_entry(host->queue.next, 822 struct dw_mci_slot, queue_node); 823 list_del(&slot->queue_node); 824 dev_vdbg(&host->pdev->dev, "list not empty: %s is next\n", 825 mmc_hostname(slot->mmc)); 826 host->state = STATE_SENDING_CMD; 827 dw_mci_start_request(host, slot); 828 } else { 829 dev_vdbg(&host->pdev->dev, "list empty\n"); 830 host->state = STATE_IDLE; 831 } 832 833 spin_unlock(&host->lock); 834 mmc_request_done(prev_mmc, mrq); 835 spin_lock(&host->lock); 836 } 837 838 static void dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd) 839 { 840 u32 status = host->cmd_status; 841 842 host->cmd_status = 0; 843 844 /* Read the response from the card (up to 16 bytes) */ 845 if (cmd->flags & MMC_RSP_PRESENT) { 846 if (cmd->flags & MMC_RSP_136) { 847 cmd->resp[3] = mci_readl(host, RESP0); 848 cmd->resp[2] = mci_readl(host, RESP1); 849 cmd->resp[1] = mci_readl(host, RESP2); 850 cmd->resp[0] = mci_readl(host, RESP3); 851 } else { 852 cmd->resp[0] = mci_readl(host, RESP0); 853 cmd->resp[1] = 0; 854 cmd->resp[2] = 0; 855 cmd->resp[3] = 0; 856 } 857 } 858 859 if (status & SDMMC_INT_RTO) 860 cmd->error = -ETIMEDOUT; 861 else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC)) 862 cmd->error = -EILSEQ; 863 else if (status & SDMMC_INT_RESP_ERR) 864 cmd->error = -EIO; 865 else 866 cmd->error = 0; 867 868 if (cmd->error) { 869 /* newer ip versions need a delay between retries */ 870 if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY) 871 mdelay(20); 872 873 if (cmd->data) { 874 host->data = NULL; 875 dw_mci_stop_dma(host); 876 } 877 } 878 } 879 880 static void dw_mci_tasklet_func(unsigned long priv) 881 { 882 struct dw_mci *host = (struct dw_mci *)priv; 883 struct mmc_data *data; 884 struct mmc_command *cmd; 885 enum dw_mci_state state; 886 enum dw_mci_state prev_state; 887 u32 status, ctrl; 888 889 spin_lock(&host->lock); 890 891 state = host->state; 892 data = host->data; 893 894 do { 895 prev_state = state; 896 897 switch (state) { 898 case STATE_IDLE: 899 break; 900 901 case STATE_SENDING_CMD: 902 if (!test_and_clear_bit(EVENT_CMD_COMPLETE, 903 &host->pending_events)) 904 break; 905 906 cmd = host->cmd; 907 host->cmd = NULL; 908 set_bit(EVENT_CMD_COMPLETE, &host->completed_events); 909 dw_mci_command_complete(host, cmd); 910 if (cmd == host->mrq->sbc && !cmd->error) { 911 prev_state = state = STATE_SENDING_CMD; 912 __dw_mci_start_request(host, host->cur_slot, 913 host->mrq->cmd); 914 goto unlock; 915 } 916 917 if (!host->mrq->data || cmd->error) { 918 dw_mci_request_end(host, host->mrq); 919 goto unlock; 920 } 921 922 prev_state = state = STATE_SENDING_DATA; 923 /* fall through */ 924 925 case STATE_SENDING_DATA: 926 if (test_and_clear_bit(EVENT_DATA_ERROR, 927 &host->pending_events)) { 928 dw_mci_stop_dma(host); 929 if (data->stop) 930 send_stop_cmd(host, data); 931 state = STATE_DATA_ERROR; 932 break; 933 } 934 935 if (!test_and_clear_bit(EVENT_XFER_COMPLETE, 936 &host->pending_events)) 937 break; 938 939 set_bit(EVENT_XFER_COMPLETE, &host->completed_events); 940 prev_state = state = STATE_DATA_BUSY; 941 /* fall through */ 942 943 case STATE_DATA_BUSY: 944 if (!test_and_clear_bit(EVENT_DATA_COMPLETE, 945 &host->pending_events)) 946 break; 947 948 host->data = NULL; 949 set_bit(EVENT_DATA_COMPLETE, &host->completed_events); 950 status = host->data_status; 951 952 if (status & DW_MCI_DATA_ERROR_FLAGS) { 953 if (status & SDMMC_INT_DTO) { 954 data->error = -ETIMEDOUT; 955 } else if (status & SDMMC_INT_DCRC) { 956 data->error = -EILSEQ; 957 } else if (status & SDMMC_INT_EBE && 958 host->dir_status == 959 DW_MCI_SEND_STATUS) { 960 /* 961 * No data CRC status was returned. 962 * The number of bytes transferred will 963 * be exaggerated in PIO mode. 964 */ 965 data->bytes_xfered = 0; 966 data->error = -ETIMEDOUT; 967 } else { 968 dev_err(&host->pdev->dev, 969 "data FIFO error " 970 "(status=%08x)\n", 971 status); 972 data->error = -EIO; 973 } 974 /* 975 * After an error, there may be data lingering 976 * in the FIFO, so reset it - doing so 977 * generates a block interrupt, hence setting 978 * the scatter-gather pointer to NULL. 979 */ 980 sg_miter_stop(&host->sg_miter); 981 host->sg = NULL; 982 ctrl = mci_readl(host, CTRL); 983 ctrl |= SDMMC_CTRL_FIFO_RESET; 984 mci_writel(host, CTRL, ctrl); 985 } else { 986 data->bytes_xfered = data->blocks * data->blksz; 987 data->error = 0; 988 } 989 990 if (!data->stop) { 991 dw_mci_request_end(host, host->mrq); 992 goto unlock; 993 } 994 995 if (host->mrq->sbc && !data->error) { 996 data->stop->error = 0; 997 dw_mci_request_end(host, host->mrq); 998 goto unlock; 999 } 1000 1001 prev_state = state = STATE_SENDING_STOP; 1002 if (!data->error) 1003 send_stop_cmd(host, data); 1004 /* fall through */ 1005 1006 case STATE_SENDING_STOP: 1007 if (!test_and_clear_bit(EVENT_CMD_COMPLETE, 1008 &host->pending_events)) 1009 break; 1010 1011 host->cmd = NULL; 1012 dw_mci_command_complete(host, host->mrq->stop); 1013 dw_mci_request_end(host, host->mrq); 1014 goto unlock; 1015 1016 case STATE_DATA_ERROR: 1017 if (!test_and_clear_bit(EVENT_XFER_COMPLETE, 1018 &host->pending_events)) 1019 break; 1020 1021 state = STATE_DATA_BUSY; 1022 break; 1023 } 1024 } while (state != prev_state); 1025 1026 host->state = state; 1027 unlock: 1028 spin_unlock(&host->lock); 1029 1030 } 1031 1032 /* push final bytes to part_buf, only use during push */ 1033 static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt) 1034 { 1035 memcpy((void *)&host->part_buf, buf, cnt); 1036 host->part_buf_count = cnt; 1037 } 1038 1039 /* append bytes to part_buf, only use during push */ 1040 static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt) 1041 { 1042 cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count); 1043 memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt); 1044 host->part_buf_count += cnt; 1045 return cnt; 1046 } 1047 1048 /* pull first bytes from part_buf, only use during pull */ 1049 static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt) 1050 { 1051 cnt = min(cnt, (int)host->part_buf_count); 1052 if (cnt) { 1053 memcpy(buf, (void *)&host->part_buf + host->part_buf_start, 1054 cnt); 1055 host->part_buf_count -= cnt; 1056 host->part_buf_start += cnt; 1057 } 1058 return cnt; 1059 } 1060 1061 /* pull final bytes from the part_buf, assuming it's just been filled */ 1062 static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt) 1063 { 1064 memcpy(buf, &host->part_buf, cnt); 1065 host->part_buf_start = cnt; 1066 host->part_buf_count = (1 << host->data_shift) - cnt; 1067 } 1068 1069 static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt) 1070 { 1071 /* try and push anything in the part_buf */ 1072 if (unlikely(host->part_buf_count)) { 1073 int len = dw_mci_push_part_bytes(host, buf, cnt); 1074 buf += len; 1075 cnt -= len; 1076 if (!sg_next(host->sg) || host->part_buf_count == 2) { 1077 mci_writew(host, DATA(host->data_offset), 1078 host->part_buf16); 1079 host->part_buf_count = 0; 1080 } 1081 } 1082 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1083 if (unlikely((unsigned long)buf & 0x1)) { 1084 while (cnt >= 2) { 1085 u16 aligned_buf[64]; 1086 int len = min(cnt & -2, (int)sizeof(aligned_buf)); 1087 int items = len >> 1; 1088 int i; 1089 /* memcpy from input buffer into aligned buffer */ 1090 memcpy(aligned_buf, buf, len); 1091 buf += len; 1092 cnt -= len; 1093 /* push data from aligned buffer into fifo */ 1094 for (i = 0; i < items; ++i) 1095 mci_writew(host, DATA(host->data_offset), 1096 aligned_buf[i]); 1097 } 1098 } else 1099 #endif 1100 { 1101 u16 *pdata = buf; 1102 for (; cnt >= 2; cnt -= 2) 1103 mci_writew(host, DATA(host->data_offset), *pdata++); 1104 buf = pdata; 1105 } 1106 /* put anything remaining in the part_buf */ 1107 if (cnt) { 1108 dw_mci_set_part_bytes(host, buf, cnt); 1109 if (!sg_next(host->sg)) 1110 mci_writew(host, DATA(host->data_offset), 1111 host->part_buf16); 1112 } 1113 } 1114 1115 static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt) 1116 { 1117 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1118 if (unlikely((unsigned long)buf & 0x1)) { 1119 while (cnt >= 2) { 1120 /* pull data from fifo into aligned buffer */ 1121 u16 aligned_buf[64]; 1122 int len = min(cnt & -2, (int)sizeof(aligned_buf)); 1123 int items = len >> 1; 1124 int i; 1125 for (i = 0; i < items; ++i) 1126 aligned_buf[i] = mci_readw(host, 1127 DATA(host->data_offset)); 1128 /* memcpy from aligned buffer into output buffer */ 1129 memcpy(buf, aligned_buf, len); 1130 buf += len; 1131 cnt -= len; 1132 } 1133 } else 1134 #endif 1135 { 1136 u16 *pdata = buf; 1137 for (; cnt >= 2; cnt -= 2) 1138 *pdata++ = mci_readw(host, DATA(host->data_offset)); 1139 buf = pdata; 1140 } 1141 if (cnt) { 1142 host->part_buf16 = mci_readw(host, DATA(host->data_offset)); 1143 dw_mci_pull_final_bytes(host, buf, cnt); 1144 } 1145 } 1146 1147 static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt) 1148 { 1149 /* try and push anything in the part_buf */ 1150 if (unlikely(host->part_buf_count)) { 1151 int len = dw_mci_push_part_bytes(host, buf, cnt); 1152 buf += len; 1153 cnt -= len; 1154 if (!sg_next(host->sg) || host->part_buf_count == 4) { 1155 mci_writel(host, DATA(host->data_offset), 1156 host->part_buf32); 1157 host->part_buf_count = 0; 1158 } 1159 } 1160 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1161 if (unlikely((unsigned long)buf & 0x3)) { 1162 while (cnt >= 4) { 1163 u32 aligned_buf[32]; 1164 int len = min(cnt & -4, (int)sizeof(aligned_buf)); 1165 int items = len >> 2; 1166 int i; 1167 /* memcpy from input buffer into aligned buffer */ 1168 memcpy(aligned_buf, buf, len); 1169 buf += len; 1170 cnt -= len; 1171 /* push data from aligned buffer into fifo */ 1172 for (i = 0; i < items; ++i) 1173 mci_writel(host, DATA(host->data_offset), 1174 aligned_buf[i]); 1175 } 1176 } else 1177 #endif 1178 { 1179 u32 *pdata = buf; 1180 for (; cnt >= 4; cnt -= 4) 1181 mci_writel(host, DATA(host->data_offset), *pdata++); 1182 buf = pdata; 1183 } 1184 /* put anything remaining in the part_buf */ 1185 if (cnt) { 1186 dw_mci_set_part_bytes(host, buf, cnt); 1187 if (!sg_next(host->sg)) 1188 mci_writel(host, DATA(host->data_offset), 1189 host->part_buf32); 1190 } 1191 } 1192 1193 static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt) 1194 { 1195 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1196 if (unlikely((unsigned long)buf & 0x3)) { 1197 while (cnt >= 4) { 1198 /* pull data from fifo into aligned buffer */ 1199 u32 aligned_buf[32]; 1200 int len = min(cnt & -4, (int)sizeof(aligned_buf)); 1201 int items = len >> 2; 1202 int i; 1203 for (i = 0; i < items; ++i) 1204 aligned_buf[i] = mci_readl(host, 1205 DATA(host->data_offset)); 1206 /* memcpy from aligned buffer into output buffer */ 1207 memcpy(buf, aligned_buf, len); 1208 buf += len; 1209 cnt -= len; 1210 } 1211 } else 1212 #endif 1213 { 1214 u32 *pdata = buf; 1215 for (; cnt >= 4; cnt -= 4) 1216 *pdata++ = mci_readl(host, DATA(host->data_offset)); 1217 buf = pdata; 1218 } 1219 if (cnt) { 1220 host->part_buf32 = mci_readl(host, DATA(host->data_offset)); 1221 dw_mci_pull_final_bytes(host, buf, cnt); 1222 } 1223 } 1224 1225 static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt) 1226 { 1227 /* try and push anything in the part_buf */ 1228 if (unlikely(host->part_buf_count)) { 1229 int len = dw_mci_push_part_bytes(host, buf, cnt); 1230 buf += len; 1231 cnt -= len; 1232 if (!sg_next(host->sg) || host->part_buf_count == 8) { 1233 mci_writew(host, DATA(host->data_offset), 1234 host->part_buf); 1235 host->part_buf_count = 0; 1236 } 1237 } 1238 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1239 if (unlikely((unsigned long)buf & 0x7)) { 1240 while (cnt >= 8) { 1241 u64 aligned_buf[16]; 1242 int len = min(cnt & -8, (int)sizeof(aligned_buf)); 1243 int items = len >> 3; 1244 int i; 1245 /* memcpy from input buffer into aligned buffer */ 1246 memcpy(aligned_buf, buf, len); 1247 buf += len; 1248 cnt -= len; 1249 /* push data from aligned buffer into fifo */ 1250 for (i = 0; i < items; ++i) 1251 mci_writeq(host, DATA(host->data_offset), 1252 aligned_buf[i]); 1253 } 1254 } else 1255 #endif 1256 { 1257 u64 *pdata = buf; 1258 for (; cnt >= 8; cnt -= 8) 1259 mci_writeq(host, DATA(host->data_offset), *pdata++); 1260 buf = pdata; 1261 } 1262 /* put anything remaining in the part_buf */ 1263 if (cnt) { 1264 dw_mci_set_part_bytes(host, buf, cnt); 1265 if (!sg_next(host->sg)) 1266 mci_writeq(host, DATA(host->data_offset), 1267 host->part_buf); 1268 } 1269 } 1270 1271 static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt) 1272 { 1273 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1274 if (unlikely((unsigned long)buf & 0x7)) { 1275 while (cnt >= 8) { 1276 /* pull data from fifo into aligned buffer */ 1277 u64 aligned_buf[16]; 1278 int len = min(cnt & -8, (int)sizeof(aligned_buf)); 1279 int items = len >> 3; 1280 int i; 1281 for (i = 0; i < items; ++i) 1282 aligned_buf[i] = mci_readq(host, 1283 DATA(host->data_offset)); 1284 /* memcpy from aligned buffer into output buffer */ 1285 memcpy(buf, aligned_buf, len); 1286 buf += len; 1287 cnt -= len; 1288 } 1289 } else 1290 #endif 1291 { 1292 u64 *pdata = buf; 1293 for (; cnt >= 8; cnt -= 8) 1294 *pdata++ = mci_readq(host, DATA(host->data_offset)); 1295 buf = pdata; 1296 } 1297 if (cnt) { 1298 host->part_buf = mci_readq(host, DATA(host->data_offset)); 1299 dw_mci_pull_final_bytes(host, buf, cnt); 1300 } 1301 } 1302 1303 static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt) 1304 { 1305 int len; 1306 1307 /* get remaining partial bytes */ 1308 len = dw_mci_pull_part_bytes(host, buf, cnt); 1309 if (unlikely(len == cnt)) 1310 return; 1311 buf += len; 1312 cnt -= len; 1313 1314 /* get the rest of the data */ 1315 host->pull_data(host, buf, cnt); 1316 } 1317 1318 static void dw_mci_read_data_pio(struct dw_mci *host) 1319 { 1320 struct sg_mapping_iter *sg_miter = &host->sg_miter; 1321 void *buf; 1322 unsigned int offset; 1323 struct mmc_data *data = host->data; 1324 int shift = host->data_shift; 1325 u32 status; 1326 unsigned int nbytes = 0, len; 1327 unsigned int remain, fcnt; 1328 1329 do { 1330 if (!sg_miter_next(sg_miter)) 1331 goto done; 1332 1333 host->sg = sg_miter->__sg; 1334 buf = sg_miter->addr; 1335 remain = sg_miter->length; 1336 offset = 0; 1337 1338 do { 1339 fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS)) 1340 << shift) + host->part_buf_count; 1341 len = min(remain, fcnt); 1342 if (!len) 1343 break; 1344 dw_mci_pull_data(host, (void *)(buf + offset), len); 1345 offset += len; 1346 nbytes += len; 1347 remain -= len; 1348 } while (remain); 1349 sg_miter->consumed = offset; 1350 1351 status = mci_readl(host, MINTSTS); 1352 mci_writel(host, RINTSTS, SDMMC_INT_RXDR); 1353 if (status & DW_MCI_DATA_ERROR_FLAGS) { 1354 host->data_status = status; 1355 data->bytes_xfered += nbytes; 1356 sg_miter_stop(sg_miter); 1357 host->sg = NULL; 1358 smp_wmb(); 1359 1360 set_bit(EVENT_DATA_ERROR, &host->pending_events); 1361 1362 tasklet_schedule(&host->tasklet); 1363 return; 1364 } 1365 } while (status & SDMMC_INT_RXDR); /*if the RXDR is ready read again*/ 1366 data->bytes_xfered += nbytes; 1367 1368 if (!remain) { 1369 if (!sg_miter_next(sg_miter)) 1370 goto done; 1371 sg_miter->consumed = 0; 1372 } 1373 sg_miter_stop(sg_miter); 1374 return; 1375 1376 done: 1377 data->bytes_xfered += nbytes; 1378 sg_miter_stop(sg_miter); 1379 host->sg = NULL; 1380 smp_wmb(); 1381 set_bit(EVENT_XFER_COMPLETE, &host->pending_events); 1382 } 1383 1384 static void dw_mci_write_data_pio(struct dw_mci *host) 1385 { 1386 struct sg_mapping_iter *sg_miter = &host->sg_miter; 1387 void *buf; 1388 unsigned int offset; 1389 struct mmc_data *data = host->data; 1390 int shift = host->data_shift; 1391 u32 status; 1392 unsigned int nbytes = 0, len; 1393 unsigned int fifo_depth = host->fifo_depth; 1394 unsigned int remain, fcnt; 1395 1396 do { 1397 if (!sg_miter_next(sg_miter)) 1398 goto done; 1399 1400 host->sg = sg_miter->__sg; 1401 buf = sg_miter->addr; 1402 remain = sg_miter->length; 1403 offset = 0; 1404 1405 do { 1406 fcnt = ((fifo_depth - 1407 SDMMC_GET_FCNT(mci_readl(host, STATUS))) 1408 << shift) - host->part_buf_count; 1409 len = min(remain, fcnt); 1410 if (!len) 1411 break; 1412 host->push_data(host, (void *)(buf + offset), len); 1413 offset += len; 1414 nbytes += len; 1415 remain -= len; 1416 } while (remain); 1417 sg_miter->consumed = offset; 1418 1419 status = mci_readl(host, MINTSTS); 1420 mci_writel(host, RINTSTS, SDMMC_INT_TXDR); 1421 if (status & DW_MCI_DATA_ERROR_FLAGS) { 1422 host->data_status = status; 1423 data->bytes_xfered += nbytes; 1424 sg_miter_stop(sg_miter); 1425 host->sg = NULL; 1426 1427 smp_wmb(); 1428 1429 set_bit(EVENT_DATA_ERROR, &host->pending_events); 1430 1431 tasklet_schedule(&host->tasklet); 1432 return; 1433 } 1434 } while (status & SDMMC_INT_TXDR); /* if TXDR write again */ 1435 data->bytes_xfered += nbytes; 1436 1437 if (!remain) { 1438 if (!sg_miter_next(sg_miter)) 1439 goto done; 1440 sg_miter->consumed = 0; 1441 } 1442 sg_miter_stop(sg_miter); 1443 return; 1444 1445 done: 1446 data->bytes_xfered += nbytes; 1447 sg_miter_stop(sg_miter); 1448 host->sg = NULL; 1449 smp_wmb(); 1450 set_bit(EVENT_XFER_COMPLETE, &host->pending_events); 1451 } 1452 1453 static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status) 1454 { 1455 if (!host->cmd_status) 1456 host->cmd_status = status; 1457 1458 smp_wmb(); 1459 1460 set_bit(EVENT_CMD_COMPLETE, &host->pending_events); 1461 tasklet_schedule(&host->tasklet); 1462 } 1463 1464 static irqreturn_t dw_mci_interrupt(int irq, void *dev_id) 1465 { 1466 struct dw_mci *host = dev_id; 1467 u32 status, pending; 1468 unsigned int pass_count = 0; 1469 int i; 1470 1471 do { 1472 status = mci_readl(host, RINTSTS); 1473 pending = mci_readl(host, MINTSTS); /* read-only mask reg */ 1474 1475 /* 1476 * DTO fix - version 2.10a and below, and only if internal DMA 1477 * is configured. 1478 */ 1479 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) { 1480 if (!pending && 1481 ((mci_readl(host, STATUS) >> 17) & 0x1fff)) 1482 pending |= SDMMC_INT_DATA_OVER; 1483 } 1484 1485 if (!pending) 1486 break; 1487 1488 if (pending & DW_MCI_CMD_ERROR_FLAGS) { 1489 mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS); 1490 host->cmd_status = status; 1491 smp_wmb(); 1492 set_bit(EVENT_CMD_COMPLETE, &host->pending_events); 1493 } 1494 1495 if (pending & DW_MCI_DATA_ERROR_FLAGS) { 1496 /* if there is an error report DATA_ERROR */ 1497 mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS); 1498 host->data_status = status; 1499 smp_wmb(); 1500 set_bit(EVENT_DATA_ERROR, &host->pending_events); 1501 if (!(pending & (SDMMC_INT_DTO | SDMMC_INT_DCRC | 1502 SDMMC_INT_SBE | SDMMC_INT_EBE))) 1503 tasklet_schedule(&host->tasklet); 1504 } 1505 1506 if (pending & SDMMC_INT_DATA_OVER) { 1507 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER); 1508 if (!host->data_status) 1509 host->data_status = status; 1510 smp_wmb(); 1511 if (host->dir_status == DW_MCI_RECV_STATUS) { 1512 if (host->sg != NULL) 1513 dw_mci_read_data_pio(host); 1514 } 1515 set_bit(EVENT_DATA_COMPLETE, &host->pending_events); 1516 tasklet_schedule(&host->tasklet); 1517 } 1518 1519 if (pending & SDMMC_INT_RXDR) { 1520 mci_writel(host, RINTSTS, SDMMC_INT_RXDR); 1521 if (host->dir_status == DW_MCI_RECV_STATUS && host->sg) 1522 dw_mci_read_data_pio(host); 1523 } 1524 1525 if (pending & SDMMC_INT_TXDR) { 1526 mci_writel(host, RINTSTS, SDMMC_INT_TXDR); 1527 if (host->dir_status == DW_MCI_SEND_STATUS && host->sg) 1528 dw_mci_write_data_pio(host); 1529 } 1530 1531 if (pending & SDMMC_INT_CMD_DONE) { 1532 mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE); 1533 dw_mci_cmd_interrupt(host, status); 1534 } 1535 1536 if (pending & SDMMC_INT_CD) { 1537 mci_writel(host, RINTSTS, SDMMC_INT_CD); 1538 queue_work(dw_mci_card_workqueue, &host->card_work); 1539 } 1540 1541 /* Handle SDIO Interrupts */ 1542 for (i = 0; i < host->num_slots; i++) { 1543 struct dw_mci_slot *slot = host->slot[i]; 1544 if (pending & SDMMC_INT_SDIO(i)) { 1545 mci_writel(host, RINTSTS, SDMMC_INT_SDIO(i)); 1546 mmc_signal_sdio_irq(slot->mmc); 1547 } 1548 } 1549 1550 } while (pass_count++ < 5); 1551 1552 #ifdef CONFIG_MMC_DW_IDMAC 1553 /* Handle DMA interrupts */ 1554 pending = mci_readl(host, IDSTS); 1555 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) { 1556 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI); 1557 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI); 1558 set_bit(EVENT_DATA_COMPLETE, &host->pending_events); 1559 host->dma_ops->complete(host); 1560 } 1561 #endif 1562 1563 return IRQ_HANDLED; 1564 } 1565 1566 static void dw_mci_work_routine_card(struct work_struct *work) 1567 { 1568 struct dw_mci *host = container_of(work, struct dw_mci, card_work); 1569 int i; 1570 1571 for (i = 0; i < host->num_slots; i++) { 1572 struct dw_mci_slot *slot = host->slot[i]; 1573 struct mmc_host *mmc = slot->mmc; 1574 struct mmc_request *mrq; 1575 int present; 1576 u32 ctrl; 1577 1578 present = dw_mci_get_cd(mmc); 1579 while (present != slot->last_detect_state) { 1580 dev_dbg(&slot->mmc->class_dev, "card %s\n", 1581 present ? "inserted" : "removed"); 1582 1583 /* Power up slot (before spin_lock, may sleep) */ 1584 if (present != 0 && host->pdata->setpower) 1585 host->pdata->setpower(slot->id, mmc->ocr_avail); 1586 1587 spin_lock_bh(&host->lock); 1588 1589 /* Card change detected */ 1590 slot->last_detect_state = present; 1591 1592 /* Mark card as present if applicable */ 1593 if (present != 0) 1594 set_bit(DW_MMC_CARD_PRESENT, &slot->flags); 1595 1596 /* Clean up queue if present */ 1597 mrq = slot->mrq; 1598 if (mrq) { 1599 if (mrq == host->mrq) { 1600 host->data = NULL; 1601 host->cmd = NULL; 1602 1603 switch (host->state) { 1604 case STATE_IDLE: 1605 break; 1606 case STATE_SENDING_CMD: 1607 mrq->cmd->error = -ENOMEDIUM; 1608 if (!mrq->data) 1609 break; 1610 /* fall through */ 1611 case STATE_SENDING_DATA: 1612 mrq->data->error = -ENOMEDIUM; 1613 dw_mci_stop_dma(host); 1614 break; 1615 case STATE_DATA_BUSY: 1616 case STATE_DATA_ERROR: 1617 if (mrq->data->error == -EINPROGRESS) 1618 mrq->data->error = -ENOMEDIUM; 1619 if (!mrq->stop) 1620 break; 1621 /* fall through */ 1622 case STATE_SENDING_STOP: 1623 mrq->stop->error = -ENOMEDIUM; 1624 break; 1625 } 1626 1627 dw_mci_request_end(host, mrq); 1628 } else { 1629 list_del(&slot->queue_node); 1630 mrq->cmd->error = -ENOMEDIUM; 1631 if (mrq->data) 1632 mrq->data->error = -ENOMEDIUM; 1633 if (mrq->stop) 1634 mrq->stop->error = -ENOMEDIUM; 1635 1636 spin_unlock(&host->lock); 1637 mmc_request_done(slot->mmc, mrq); 1638 spin_lock(&host->lock); 1639 } 1640 } 1641 1642 /* Power down slot */ 1643 if (present == 0) { 1644 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags); 1645 1646 /* 1647 * Clear down the FIFO - doing so generates a 1648 * block interrupt, hence setting the 1649 * scatter-gather pointer to NULL. 1650 */ 1651 sg_miter_stop(&host->sg_miter); 1652 host->sg = NULL; 1653 1654 ctrl = mci_readl(host, CTRL); 1655 ctrl |= SDMMC_CTRL_FIFO_RESET; 1656 mci_writel(host, CTRL, ctrl); 1657 1658 #ifdef CONFIG_MMC_DW_IDMAC 1659 ctrl = mci_readl(host, BMOD); 1660 ctrl |= 0x01; /* Software reset of DMA */ 1661 mci_writel(host, BMOD, ctrl); 1662 #endif 1663 1664 } 1665 1666 spin_unlock_bh(&host->lock); 1667 1668 /* Power down slot (after spin_unlock, may sleep) */ 1669 if (present == 0 && host->pdata->setpower) 1670 host->pdata->setpower(slot->id, 0); 1671 1672 present = dw_mci_get_cd(mmc); 1673 } 1674 1675 mmc_detect_change(slot->mmc, 1676 msecs_to_jiffies(host->pdata->detect_delay_ms)); 1677 } 1678 } 1679 1680 static int __init dw_mci_init_slot(struct dw_mci *host, unsigned int id) 1681 { 1682 struct mmc_host *mmc; 1683 struct dw_mci_slot *slot; 1684 1685 mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), &host->pdev->dev); 1686 if (!mmc) 1687 return -ENOMEM; 1688 1689 slot = mmc_priv(mmc); 1690 slot->id = id; 1691 slot->mmc = mmc; 1692 slot->host = host; 1693 1694 mmc->ops = &dw_mci_ops; 1695 mmc->f_min = DIV_ROUND_UP(host->bus_hz, 510); 1696 mmc->f_max = host->bus_hz; 1697 1698 if (host->pdata->get_ocr) 1699 mmc->ocr_avail = host->pdata->get_ocr(id); 1700 else 1701 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; 1702 1703 /* 1704 * Start with slot power disabled, it will be enabled when a card 1705 * is detected. 1706 */ 1707 if (host->pdata->setpower) 1708 host->pdata->setpower(id, 0); 1709 1710 if (host->pdata->caps) 1711 mmc->caps = host->pdata->caps; 1712 1713 if (host->pdata->caps2) 1714 mmc->caps2 = host->pdata->caps2; 1715 1716 if (host->pdata->get_bus_wd) 1717 if (host->pdata->get_bus_wd(slot->id) >= 4) 1718 mmc->caps |= MMC_CAP_4_BIT_DATA; 1719 1720 if (host->pdata->quirks & DW_MCI_QUIRK_HIGHSPEED) 1721 mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED; 1722 1723 #ifdef CONFIG_MMC_DW_IDMAC 1724 mmc->max_segs = host->ring_size; 1725 mmc->max_blk_size = 65536; 1726 mmc->max_blk_count = host->ring_size; 1727 mmc->max_seg_size = 0x1000; 1728 mmc->max_req_size = mmc->max_seg_size * mmc->max_blk_count; 1729 #else 1730 if (host->pdata->blk_settings) { 1731 mmc->max_segs = host->pdata->blk_settings->max_segs; 1732 mmc->max_blk_size = host->pdata->blk_settings->max_blk_size; 1733 mmc->max_blk_count = host->pdata->blk_settings->max_blk_count; 1734 mmc->max_req_size = host->pdata->blk_settings->max_req_size; 1735 mmc->max_seg_size = host->pdata->blk_settings->max_seg_size; 1736 } else { 1737 /* Useful defaults if platform data is unset. */ 1738 mmc->max_segs = 64; 1739 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */ 1740 mmc->max_blk_count = 512; 1741 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; 1742 mmc->max_seg_size = mmc->max_req_size; 1743 } 1744 #endif /* CONFIG_MMC_DW_IDMAC */ 1745 1746 host->vmmc = regulator_get(mmc_dev(mmc), "vmmc"); 1747 if (IS_ERR(host->vmmc)) { 1748 pr_info("%s: no vmmc regulator found\n", mmc_hostname(mmc)); 1749 host->vmmc = NULL; 1750 } else 1751 regulator_enable(host->vmmc); 1752 1753 if (dw_mci_get_cd(mmc)) 1754 set_bit(DW_MMC_CARD_PRESENT, &slot->flags); 1755 else 1756 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags); 1757 1758 host->slot[id] = slot; 1759 mmc_add_host(mmc); 1760 1761 #if defined(CONFIG_DEBUG_FS) 1762 dw_mci_init_debugfs(slot); 1763 #endif 1764 1765 /* Card initially undetected */ 1766 slot->last_detect_state = 0; 1767 1768 /* 1769 * Card may have been plugged in prior to boot so we 1770 * need to run the detect tasklet 1771 */ 1772 queue_work(dw_mci_card_workqueue, &host->card_work); 1773 1774 return 0; 1775 } 1776 1777 static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id) 1778 { 1779 /* Shutdown detect IRQ */ 1780 if (slot->host->pdata->exit) 1781 slot->host->pdata->exit(id); 1782 1783 /* Debugfs stuff is cleaned up by mmc core */ 1784 mmc_remove_host(slot->mmc); 1785 slot->host->slot[id] = NULL; 1786 mmc_free_host(slot->mmc); 1787 } 1788 1789 static void dw_mci_init_dma(struct dw_mci *host) 1790 { 1791 /* Alloc memory for sg translation */ 1792 host->sg_cpu = dma_alloc_coherent(&host->pdev->dev, PAGE_SIZE, 1793 &host->sg_dma, GFP_KERNEL); 1794 if (!host->sg_cpu) { 1795 dev_err(&host->pdev->dev, "%s: could not alloc DMA memory\n", 1796 __func__); 1797 goto no_dma; 1798 } 1799 1800 /* Determine which DMA interface to use */ 1801 #ifdef CONFIG_MMC_DW_IDMAC 1802 host->dma_ops = &dw_mci_idmac_ops; 1803 dev_info(&host->pdev->dev, "Using internal DMA controller.\n"); 1804 #endif 1805 1806 if (!host->dma_ops) 1807 goto no_dma; 1808 1809 if (host->dma_ops->init) { 1810 if (host->dma_ops->init(host)) { 1811 dev_err(&host->pdev->dev, "%s: Unable to initialize " 1812 "DMA Controller.\n", __func__); 1813 goto no_dma; 1814 } 1815 } else { 1816 dev_err(&host->pdev->dev, "DMA initialization not found.\n"); 1817 goto no_dma; 1818 } 1819 1820 host->use_dma = 1; 1821 return; 1822 1823 no_dma: 1824 dev_info(&host->pdev->dev, "Using PIO mode.\n"); 1825 host->use_dma = 0; 1826 return; 1827 } 1828 1829 static bool mci_wait_reset(struct device *dev, struct dw_mci *host) 1830 { 1831 unsigned long timeout = jiffies + msecs_to_jiffies(500); 1832 unsigned int ctrl; 1833 1834 mci_writel(host, CTRL, (SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET | 1835 SDMMC_CTRL_DMA_RESET)); 1836 1837 /* wait till resets clear */ 1838 do { 1839 ctrl = mci_readl(host, CTRL); 1840 if (!(ctrl & (SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET | 1841 SDMMC_CTRL_DMA_RESET))) 1842 return true; 1843 } while (time_before(jiffies, timeout)); 1844 1845 dev_err(dev, "Timeout resetting block (ctrl %#x)\n", ctrl); 1846 1847 return false; 1848 } 1849 1850 static int dw_mci_probe(struct platform_device *pdev) 1851 { 1852 struct dw_mci *host; 1853 struct resource *regs; 1854 struct dw_mci_board *pdata; 1855 int irq, ret, i, width; 1856 u32 fifo_size; 1857 1858 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1859 if (!regs) 1860 return -ENXIO; 1861 1862 irq = platform_get_irq(pdev, 0); 1863 if (irq < 0) 1864 return irq; 1865 1866 host = kzalloc(sizeof(struct dw_mci), GFP_KERNEL); 1867 if (!host) 1868 return -ENOMEM; 1869 1870 host->pdev = pdev; 1871 host->pdata = pdata = pdev->dev.platform_data; 1872 if (!pdata || !pdata->init) { 1873 dev_err(&pdev->dev, 1874 "Platform data must supply init function\n"); 1875 ret = -ENODEV; 1876 goto err_freehost; 1877 } 1878 1879 if (!pdata->select_slot && pdata->num_slots > 1) { 1880 dev_err(&pdev->dev, 1881 "Platform data must supply select_slot function\n"); 1882 ret = -ENODEV; 1883 goto err_freehost; 1884 } 1885 1886 if (!pdata->bus_hz) { 1887 dev_err(&pdev->dev, 1888 "Platform data must supply bus speed\n"); 1889 ret = -ENODEV; 1890 goto err_freehost; 1891 } 1892 1893 host->bus_hz = pdata->bus_hz; 1894 host->quirks = pdata->quirks; 1895 1896 spin_lock_init(&host->lock); 1897 INIT_LIST_HEAD(&host->queue); 1898 1899 ret = -ENOMEM; 1900 host->regs = ioremap(regs->start, resource_size(regs)); 1901 if (!host->regs) 1902 goto err_freehost; 1903 1904 host->dma_ops = pdata->dma_ops; 1905 dw_mci_init_dma(host); 1906 1907 /* 1908 * Get the host data width - this assumes that HCON has been set with 1909 * the correct values. 1910 */ 1911 i = (mci_readl(host, HCON) >> 7) & 0x7; 1912 if (!i) { 1913 host->push_data = dw_mci_push_data16; 1914 host->pull_data = dw_mci_pull_data16; 1915 width = 16; 1916 host->data_shift = 1; 1917 } else if (i == 2) { 1918 host->push_data = dw_mci_push_data64; 1919 host->pull_data = dw_mci_pull_data64; 1920 width = 64; 1921 host->data_shift = 3; 1922 } else { 1923 /* Check for a reserved value, and warn if it is */ 1924 WARN((i != 1), 1925 "HCON reports a reserved host data width!\n" 1926 "Defaulting to 32-bit access.\n"); 1927 host->push_data = dw_mci_push_data32; 1928 host->pull_data = dw_mci_pull_data32; 1929 width = 32; 1930 host->data_shift = 2; 1931 } 1932 1933 /* Reset all blocks */ 1934 if (!mci_wait_reset(&pdev->dev, host)) { 1935 ret = -ENODEV; 1936 goto err_dmaunmap; 1937 } 1938 1939 /* Clear the interrupts for the host controller */ 1940 mci_writel(host, RINTSTS, 0xFFFFFFFF); 1941 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */ 1942 1943 /* Put in max timeout */ 1944 mci_writel(host, TMOUT, 0xFFFFFFFF); 1945 1946 /* 1947 * FIFO threshold settings RxMark = fifo_size / 2 - 1, 1948 * Tx Mark = fifo_size / 2 DMA Size = 8 1949 */ 1950 if (!host->pdata->fifo_depth) { 1951 /* 1952 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may 1953 * have been overwritten by the bootloader, just like we're 1954 * about to do, so if you know the value for your hardware, you 1955 * should put it in the platform data. 1956 */ 1957 fifo_size = mci_readl(host, FIFOTH); 1958 fifo_size = 1 + ((fifo_size >> 16) & 0xfff); 1959 } else { 1960 fifo_size = host->pdata->fifo_depth; 1961 } 1962 host->fifo_depth = fifo_size; 1963 host->fifoth_val = ((0x2 << 28) | ((fifo_size/2 - 1) << 16) | 1964 ((fifo_size/2) << 0)); 1965 mci_writel(host, FIFOTH, host->fifoth_val); 1966 1967 /* disable clock to CIU */ 1968 mci_writel(host, CLKENA, 0); 1969 mci_writel(host, CLKSRC, 0); 1970 1971 tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host); 1972 dw_mci_card_workqueue = alloc_workqueue("dw-mci-card", 1973 WQ_MEM_RECLAIM | WQ_NON_REENTRANT, 1); 1974 if (!dw_mci_card_workqueue) 1975 goto err_dmaunmap; 1976 INIT_WORK(&host->card_work, dw_mci_work_routine_card); 1977 1978 ret = request_irq(irq, dw_mci_interrupt, 0, "dw-mci", host); 1979 if (ret) 1980 goto err_workqueue; 1981 1982 platform_set_drvdata(pdev, host); 1983 1984 if (host->pdata->num_slots) 1985 host->num_slots = host->pdata->num_slots; 1986 else 1987 host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1; 1988 1989 /* We need at least one slot to succeed */ 1990 for (i = 0; i < host->num_slots; i++) { 1991 ret = dw_mci_init_slot(host, i); 1992 if (ret) { 1993 ret = -ENODEV; 1994 goto err_init_slot; 1995 } 1996 } 1997 1998 /* 1999 * In 2.40a spec, Data offset is changed. 2000 * Need to check the version-id and set data-offset for DATA register. 2001 */ 2002 host->verid = SDMMC_GET_VERID(mci_readl(host, VERID)); 2003 dev_info(&pdev->dev, "Version ID is %04x\n", host->verid); 2004 2005 if (host->verid < DW_MMC_240A) 2006 host->data_offset = DATA_OFFSET; 2007 else 2008 host->data_offset = DATA_240A_OFFSET; 2009 2010 /* 2011 * Enable interrupts for command done, data over, data empty, card det, 2012 * receive ready and error such as transmit, receive timeout, crc error 2013 */ 2014 mci_writel(host, RINTSTS, 0xFFFFFFFF); 2015 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | 2016 SDMMC_INT_TXDR | SDMMC_INT_RXDR | 2017 DW_MCI_ERROR_FLAGS | SDMMC_INT_CD); 2018 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */ 2019 2020 dev_info(&pdev->dev, "DW MMC controller at irq %d, " 2021 "%d bit host data width, " 2022 "%u deep fifo\n", 2023 irq, width, fifo_size); 2024 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) 2025 dev_info(&pdev->dev, "Internal DMAC interrupt fix enabled.\n"); 2026 2027 return 0; 2028 2029 err_init_slot: 2030 /* De-init any initialized slots */ 2031 while (i > 0) { 2032 if (host->slot[i]) 2033 dw_mci_cleanup_slot(host->slot[i], i); 2034 i--; 2035 } 2036 free_irq(irq, host); 2037 2038 err_workqueue: 2039 destroy_workqueue(dw_mci_card_workqueue); 2040 2041 err_dmaunmap: 2042 if (host->use_dma && host->dma_ops->exit) 2043 host->dma_ops->exit(host); 2044 dma_free_coherent(&host->pdev->dev, PAGE_SIZE, 2045 host->sg_cpu, host->sg_dma); 2046 iounmap(host->regs); 2047 2048 if (host->vmmc) { 2049 regulator_disable(host->vmmc); 2050 regulator_put(host->vmmc); 2051 } 2052 2053 2054 err_freehost: 2055 kfree(host); 2056 return ret; 2057 } 2058 2059 static int __exit dw_mci_remove(struct platform_device *pdev) 2060 { 2061 struct dw_mci *host = platform_get_drvdata(pdev); 2062 int i; 2063 2064 mci_writel(host, RINTSTS, 0xFFFFFFFF); 2065 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */ 2066 2067 platform_set_drvdata(pdev, NULL); 2068 2069 for (i = 0; i < host->num_slots; i++) { 2070 dev_dbg(&pdev->dev, "remove slot %d\n", i); 2071 if (host->slot[i]) 2072 dw_mci_cleanup_slot(host->slot[i], i); 2073 } 2074 2075 /* disable clock to CIU */ 2076 mci_writel(host, CLKENA, 0); 2077 mci_writel(host, CLKSRC, 0); 2078 2079 free_irq(platform_get_irq(pdev, 0), host); 2080 destroy_workqueue(dw_mci_card_workqueue); 2081 dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma); 2082 2083 if (host->use_dma && host->dma_ops->exit) 2084 host->dma_ops->exit(host); 2085 2086 if (host->vmmc) { 2087 regulator_disable(host->vmmc); 2088 regulator_put(host->vmmc); 2089 } 2090 2091 iounmap(host->regs); 2092 2093 kfree(host); 2094 return 0; 2095 } 2096 2097 #ifdef CONFIG_PM_SLEEP 2098 /* 2099 * TODO: we should probably disable the clock to the card in the suspend path. 2100 */ 2101 static int dw_mci_suspend(struct device *dev) 2102 { 2103 int i, ret; 2104 struct dw_mci *host = dev_get_drvdata(dev); 2105 2106 for (i = 0; i < host->num_slots; i++) { 2107 struct dw_mci_slot *slot = host->slot[i]; 2108 if (!slot) 2109 continue; 2110 ret = mmc_suspend_host(slot->mmc); 2111 if (ret < 0) { 2112 while (--i >= 0) { 2113 slot = host->slot[i]; 2114 if (slot) 2115 mmc_resume_host(host->slot[i]->mmc); 2116 } 2117 return ret; 2118 } 2119 } 2120 2121 if (host->vmmc) 2122 regulator_disable(host->vmmc); 2123 2124 return 0; 2125 } 2126 2127 static int dw_mci_resume(struct device *dev) 2128 { 2129 int i, ret; 2130 struct dw_mci *host = dev_get_drvdata(dev); 2131 2132 if (host->vmmc) 2133 regulator_enable(host->vmmc); 2134 2135 if (host->dma_ops->init) 2136 host->dma_ops->init(host); 2137 2138 if (!mci_wait_reset(dev, host)) { 2139 ret = -ENODEV; 2140 return ret; 2141 } 2142 2143 /* Restore the old value at FIFOTH register */ 2144 mci_writel(host, FIFOTH, host->fifoth_val); 2145 2146 mci_writel(host, RINTSTS, 0xFFFFFFFF); 2147 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | 2148 SDMMC_INT_TXDR | SDMMC_INT_RXDR | 2149 DW_MCI_ERROR_FLAGS | SDMMC_INT_CD); 2150 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); 2151 2152 for (i = 0; i < host->num_slots; i++) { 2153 struct dw_mci_slot *slot = host->slot[i]; 2154 if (!slot) 2155 continue; 2156 ret = mmc_resume_host(host->slot[i]->mmc); 2157 if (ret < 0) 2158 return ret; 2159 } 2160 2161 return 0; 2162 } 2163 #else 2164 #define dw_mci_suspend NULL 2165 #define dw_mci_resume NULL 2166 #endif /* CONFIG_PM_SLEEP */ 2167 2168 static SIMPLE_DEV_PM_OPS(dw_mci_pmops, dw_mci_suspend, dw_mci_resume); 2169 2170 static struct platform_driver dw_mci_driver = { 2171 .remove = __exit_p(dw_mci_remove), 2172 .driver = { 2173 .name = "dw_mmc", 2174 .pm = &dw_mci_pmops, 2175 }, 2176 }; 2177 2178 static int __init dw_mci_init(void) 2179 { 2180 return platform_driver_probe(&dw_mci_driver, dw_mci_probe); 2181 } 2182 2183 static void __exit dw_mci_exit(void) 2184 { 2185 platform_driver_unregister(&dw_mci_driver); 2186 } 2187 2188 module_init(dw_mci_init); 2189 module_exit(dw_mci_exit); 2190 2191 MODULE_DESCRIPTION("DW Multimedia Card Interface driver"); 2192 MODULE_AUTHOR("NXP Semiconductor VietNam"); 2193 MODULE_AUTHOR("Imagination Technologies Ltd"); 2194 MODULE_LICENSE("GPL v2"); 2195