1 /* 2 * Synopsys DesignWare Multimedia Card Interface driver 3 * (Based on NXP driver for lpc 31xx) 4 * 5 * Copyright (C) 2009 NXP Semiconductors 6 * Copyright (C) 2009, 2010 Imagination Technologies Ltd. 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2 of the License, or 11 * (at your option) any later version. 12 */ 13 14 #include <linux/blkdev.h> 15 #include <linux/clk.h> 16 #include <linux/debugfs.h> 17 #include <linux/device.h> 18 #include <linux/dma-mapping.h> 19 #include <linux/err.h> 20 #include <linux/init.h> 21 #include <linux/interrupt.h> 22 #include <linux/ioport.h> 23 #include <linux/module.h> 24 #include <linux/platform_device.h> 25 #include <linux/seq_file.h> 26 #include <linux/slab.h> 27 #include <linux/stat.h> 28 #include <linux/delay.h> 29 #include <linux/irq.h> 30 #include <linux/mmc/host.h> 31 #include <linux/mmc/mmc.h> 32 #include <linux/mmc/dw_mmc.h> 33 #include <linux/bitops.h> 34 #include <linux/regulator/consumer.h> 35 #include <linux/workqueue.h> 36 37 #include "dw_mmc.h" 38 39 /* Common flag combinations */ 40 #define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DTO | SDMMC_INT_DCRC | \ 41 SDMMC_INT_HTO | SDMMC_INT_SBE | \ 42 SDMMC_INT_EBE) 43 #define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \ 44 SDMMC_INT_RESP_ERR) 45 #define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \ 46 DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_HLE) 47 #define DW_MCI_SEND_STATUS 1 48 #define DW_MCI_RECV_STATUS 2 49 #define DW_MCI_DMA_THRESHOLD 16 50 51 #ifdef CONFIG_MMC_DW_IDMAC 52 struct idmac_desc { 53 u32 des0; /* Control Descriptor */ 54 #define IDMAC_DES0_DIC BIT(1) 55 #define IDMAC_DES0_LD BIT(2) 56 #define IDMAC_DES0_FD BIT(3) 57 #define IDMAC_DES0_CH BIT(4) 58 #define IDMAC_DES0_ER BIT(5) 59 #define IDMAC_DES0_CES BIT(30) 60 #define IDMAC_DES0_OWN BIT(31) 61 62 u32 des1; /* Buffer sizes */ 63 #define IDMAC_SET_BUFFER1_SIZE(d, s) \ 64 ((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff)) 65 66 u32 des2; /* buffer 1 physical address */ 67 68 u32 des3; /* buffer 2 physical address */ 69 }; 70 #endif /* CONFIG_MMC_DW_IDMAC */ 71 72 /** 73 * struct dw_mci_slot - MMC slot state 74 * @mmc: The mmc_host representing this slot. 75 * @host: The MMC controller this slot is using. 76 * @ctype: Card type for this slot. 77 * @mrq: mmc_request currently being processed or waiting to be 78 * processed, or NULL when the slot is idle. 79 * @queue_node: List node for placing this node in the @queue list of 80 * &struct dw_mci. 81 * @clock: Clock rate configured by set_ios(). Protected by host->lock. 82 * @flags: Random state bits associated with the slot. 83 * @id: Number of this slot. 84 * @last_detect_state: Most recently observed card detect state. 85 */ 86 struct dw_mci_slot { 87 struct mmc_host *mmc; 88 struct dw_mci *host; 89 90 u32 ctype; 91 92 struct mmc_request *mrq; 93 struct list_head queue_node; 94 95 unsigned int clock; 96 unsigned long flags; 97 #define DW_MMC_CARD_PRESENT 0 98 #define DW_MMC_CARD_NEED_INIT 1 99 int id; 100 int last_detect_state; 101 }; 102 103 #if defined(CONFIG_DEBUG_FS) 104 static int dw_mci_req_show(struct seq_file *s, void *v) 105 { 106 struct dw_mci_slot *slot = s->private; 107 struct mmc_request *mrq; 108 struct mmc_command *cmd; 109 struct mmc_command *stop; 110 struct mmc_data *data; 111 112 /* Make sure we get a consistent snapshot */ 113 spin_lock_bh(&slot->host->lock); 114 mrq = slot->mrq; 115 116 if (mrq) { 117 cmd = mrq->cmd; 118 data = mrq->data; 119 stop = mrq->stop; 120 121 if (cmd) 122 seq_printf(s, 123 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n", 124 cmd->opcode, cmd->arg, cmd->flags, 125 cmd->resp[0], cmd->resp[1], cmd->resp[2], 126 cmd->resp[2], cmd->error); 127 if (data) 128 seq_printf(s, "DATA %u / %u * %u flg %x err %d\n", 129 data->bytes_xfered, data->blocks, 130 data->blksz, data->flags, data->error); 131 if (stop) 132 seq_printf(s, 133 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n", 134 stop->opcode, stop->arg, stop->flags, 135 stop->resp[0], stop->resp[1], stop->resp[2], 136 stop->resp[2], stop->error); 137 } 138 139 spin_unlock_bh(&slot->host->lock); 140 141 return 0; 142 } 143 144 static int dw_mci_req_open(struct inode *inode, struct file *file) 145 { 146 return single_open(file, dw_mci_req_show, inode->i_private); 147 } 148 149 static const struct file_operations dw_mci_req_fops = { 150 .owner = THIS_MODULE, 151 .open = dw_mci_req_open, 152 .read = seq_read, 153 .llseek = seq_lseek, 154 .release = single_release, 155 }; 156 157 static int dw_mci_regs_show(struct seq_file *s, void *v) 158 { 159 seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS); 160 seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS); 161 seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD); 162 seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL); 163 seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK); 164 seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA); 165 166 return 0; 167 } 168 169 static int dw_mci_regs_open(struct inode *inode, struct file *file) 170 { 171 return single_open(file, dw_mci_regs_show, inode->i_private); 172 } 173 174 static const struct file_operations dw_mci_regs_fops = { 175 .owner = THIS_MODULE, 176 .open = dw_mci_regs_open, 177 .read = seq_read, 178 .llseek = seq_lseek, 179 .release = single_release, 180 }; 181 182 static void dw_mci_init_debugfs(struct dw_mci_slot *slot) 183 { 184 struct mmc_host *mmc = slot->mmc; 185 struct dw_mci *host = slot->host; 186 struct dentry *root; 187 struct dentry *node; 188 189 root = mmc->debugfs_root; 190 if (!root) 191 return; 192 193 node = debugfs_create_file("regs", S_IRUSR, root, host, 194 &dw_mci_regs_fops); 195 if (!node) 196 goto err; 197 198 node = debugfs_create_file("req", S_IRUSR, root, slot, 199 &dw_mci_req_fops); 200 if (!node) 201 goto err; 202 203 node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state); 204 if (!node) 205 goto err; 206 207 node = debugfs_create_x32("pending_events", S_IRUSR, root, 208 (u32 *)&host->pending_events); 209 if (!node) 210 goto err; 211 212 node = debugfs_create_x32("completed_events", S_IRUSR, root, 213 (u32 *)&host->completed_events); 214 if (!node) 215 goto err; 216 217 return; 218 219 err: 220 dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n"); 221 } 222 #endif /* defined(CONFIG_DEBUG_FS) */ 223 224 static void dw_mci_set_timeout(struct dw_mci *host) 225 { 226 /* timeout (maximum) */ 227 mci_writel(host, TMOUT, 0xffffffff); 228 } 229 230 static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd) 231 { 232 struct mmc_data *data; 233 u32 cmdr; 234 cmd->error = -EINPROGRESS; 235 236 cmdr = cmd->opcode; 237 238 if (cmdr == MMC_STOP_TRANSMISSION) 239 cmdr |= SDMMC_CMD_STOP; 240 else 241 cmdr |= SDMMC_CMD_PRV_DAT_WAIT; 242 243 if (cmd->flags & MMC_RSP_PRESENT) { 244 /* We expect a response, so set this bit */ 245 cmdr |= SDMMC_CMD_RESP_EXP; 246 if (cmd->flags & MMC_RSP_136) 247 cmdr |= SDMMC_CMD_RESP_LONG; 248 } 249 250 if (cmd->flags & MMC_RSP_CRC) 251 cmdr |= SDMMC_CMD_RESP_CRC; 252 253 data = cmd->data; 254 if (data) { 255 cmdr |= SDMMC_CMD_DAT_EXP; 256 if (data->flags & MMC_DATA_STREAM) 257 cmdr |= SDMMC_CMD_STRM_MODE; 258 if (data->flags & MMC_DATA_WRITE) 259 cmdr |= SDMMC_CMD_DAT_WR; 260 } 261 262 return cmdr; 263 } 264 265 static void dw_mci_start_command(struct dw_mci *host, 266 struct mmc_command *cmd, u32 cmd_flags) 267 { 268 host->cmd = cmd; 269 dev_vdbg(&host->dev, 270 "start command: ARGR=0x%08x CMDR=0x%08x\n", 271 cmd->arg, cmd_flags); 272 273 mci_writel(host, CMDARG, cmd->arg); 274 wmb(); 275 276 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START); 277 } 278 279 static void send_stop_cmd(struct dw_mci *host, struct mmc_data *data) 280 { 281 dw_mci_start_command(host, data->stop, host->stop_cmdr); 282 } 283 284 /* DMA interface functions */ 285 static void dw_mci_stop_dma(struct dw_mci *host) 286 { 287 if (host->using_dma) { 288 host->dma_ops->stop(host); 289 host->dma_ops->cleanup(host); 290 } else { 291 /* Data transfer was stopped by the interrupt handler */ 292 set_bit(EVENT_XFER_COMPLETE, &host->pending_events); 293 } 294 } 295 296 static int dw_mci_get_dma_dir(struct mmc_data *data) 297 { 298 if (data->flags & MMC_DATA_WRITE) 299 return DMA_TO_DEVICE; 300 else 301 return DMA_FROM_DEVICE; 302 } 303 304 #ifdef CONFIG_MMC_DW_IDMAC 305 static void dw_mci_dma_cleanup(struct dw_mci *host) 306 { 307 struct mmc_data *data = host->data; 308 309 if (data) 310 if (!data->host_cookie) 311 dma_unmap_sg(&host->dev, 312 data->sg, 313 data->sg_len, 314 dw_mci_get_dma_dir(data)); 315 } 316 317 static void dw_mci_idmac_stop_dma(struct dw_mci *host) 318 { 319 u32 temp; 320 321 /* Disable and reset the IDMAC interface */ 322 temp = mci_readl(host, CTRL); 323 temp &= ~SDMMC_CTRL_USE_IDMAC; 324 temp |= SDMMC_CTRL_DMA_RESET; 325 mci_writel(host, CTRL, temp); 326 327 /* Stop the IDMAC running */ 328 temp = mci_readl(host, BMOD); 329 temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB); 330 mci_writel(host, BMOD, temp); 331 } 332 333 static void dw_mci_idmac_complete_dma(struct dw_mci *host) 334 { 335 struct mmc_data *data = host->data; 336 337 dev_vdbg(&host->dev, "DMA complete\n"); 338 339 host->dma_ops->cleanup(host); 340 341 /* 342 * If the card was removed, data will be NULL. No point in trying to 343 * send the stop command or waiting for NBUSY in this case. 344 */ 345 if (data) { 346 set_bit(EVENT_XFER_COMPLETE, &host->pending_events); 347 tasklet_schedule(&host->tasklet); 348 } 349 } 350 351 static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data, 352 unsigned int sg_len) 353 { 354 int i; 355 struct idmac_desc *desc = host->sg_cpu; 356 357 for (i = 0; i < sg_len; i++, desc++) { 358 unsigned int length = sg_dma_len(&data->sg[i]); 359 u32 mem_addr = sg_dma_address(&data->sg[i]); 360 361 /* Set the OWN bit and disable interrupts for this descriptor */ 362 desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | IDMAC_DES0_CH; 363 364 /* Buffer length */ 365 IDMAC_SET_BUFFER1_SIZE(desc, length); 366 367 /* Physical address to DMA to/from */ 368 desc->des2 = mem_addr; 369 } 370 371 /* Set first descriptor */ 372 desc = host->sg_cpu; 373 desc->des0 |= IDMAC_DES0_FD; 374 375 /* Set last descriptor */ 376 desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc); 377 desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC); 378 desc->des0 |= IDMAC_DES0_LD; 379 380 wmb(); 381 } 382 383 static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len) 384 { 385 u32 temp; 386 387 dw_mci_translate_sglist(host, host->data, sg_len); 388 389 /* Select IDMAC interface */ 390 temp = mci_readl(host, CTRL); 391 temp |= SDMMC_CTRL_USE_IDMAC; 392 mci_writel(host, CTRL, temp); 393 394 wmb(); 395 396 /* Enable the IDMAC */ 397 temp = mci_readl(host, BMOD); 398 temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB; 399 mci_writel(host, BMOD, temp); 400 401 /* Start it running */ 402 mci_writel(host, PLDMND, 1); 403 } 404 405 static int dw_mci_idmac_init(struct dw_mci *host) 406 { 407 struct idmac_desc *p; 408 int i; 409 410 /* Number of descriptors in the ring buffer */ 411 host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc); 412 413 /* Forward link the descriptor list */ 414 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++) 415 p->des3 = host->sg_dma + (sizeof(struct idmac_desc) * (i + 1)); 416 417 /* Set the last descriptor as the end-of-ring descriptor */ 418 p->des3 = host->sg_dma; 419 p->des0 = IDMAC_DES0_ER; 420 421 mci_writel(host, BMOD, SDMMC_IDMAC_SWRESET); 422 423 /* Mask out interrupts - get Tx & Rx complete only */ 424 mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI | 425 SDMMC_IDMAC_INT_TI); 426 427 /* Set the descriptor base address */ 428 mci_writel(host, DBADDR, host->sg_dma); 429 return 0; 430 } 431 432 static struct dw_mci_dma_ops dw_mci_idmac_ops = { 433 .init = dw_mci_idmac_init, 434 .start = dw_mci_idmac_start_dma, 435 .stop = dw_mci_idmac_stop_dma, 436 .complete = dw_mci_idmac_complete_dma, 437 .cleanup = dw_mci_dma_cleanup, 438 }; 439 #endif /* CONFIG_MMC_DW_IDMAC */ 440 441 static int dw_mci_pre_dma_transfer(struct dw_mci *host, 442 struct mmc_data *data, 443 bool next) 444 { 445 struct scatterlist *sg; 446 unsigned int i, sg_len; 447 448 if (!next && data->host_cookie) 449 return data->host_cookie; 450 451 /* 452 * We don't do DMA on "complex" transfers, i.e. with 453 * non-word-aligned buffers or lengths. Also, we don't bother 454 * with all the DMA setup overhead for short transfers. 455 */ 456 if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD) 457 return -EINVAL; 458 459 if (data->blksz & 3) 460 return -EINVAL; 461 462 for_each_sg(data->sg, sg, data->sg_len, i) { 463 if (sg->offset & 3 || sg->length & 3) 464 return -EINVAL; 465 } 466 467 sg_len = dma_map_sg(&host->dev, 468 data->sg, 469 data->sg_len, 470 dw_mci_get_dma_dir(data)); 471 if (sg_len == 0) 472 return -EINVAL; 473 474 if (next) 475 data->host_cookie = sg_len; 476 477 return sg_len; 478 } 479 480 static void dw_mci_pre_req(struct mmc_host *mmc, 481 struct mmc_request *mrq, 482 bool is_first_req) 483 { 484 struct dw_mci_slot *slot = mmc_priv(mmc); 485 struct mmc_data *data = mrq->data; 486 487 if (!slot->host->use_dma || !data) 488 return; 489 490 if (data->host_cookie) { 491 data->host_cookie = 0; 492 return; 493 } 494 495 if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0) 496 data->host_cookie = 0; 497 } 498 499 static void dw_mci_post_req(struct mmc_host *mmc, 500 struct mmc_request *mrq, 501 int err) 502 { 503 struct dw_mci_slot *slot = mmc_priv(mmc); 504 struct mmc_data *data = mrq->data; 505 506 if (!slot->host->use_dma || !data) 507 return; 508 509 if (data->host_cookie) 510 dma_unmap_sg(&slot->host->dev, 511 data->sg, 512 data->sg_len, 513 dw_mci_get_dma_dir(data)); 514 data->host_cookie = 0; 515 } 516 517 static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data) 518 { 519 int sg_len; 520 u32 temp; 521 522 host->using_dma = 0; 523 524 /* If we don't have a channel, we can't do DMA */ 525 if (!host->use_dma) 526 return -ENODEV; 527 528 sg_len = dw_mci_pre_dma_transfer(host, data, 0); 529 if (sg_len < 0) { 530 host->dma_ops->stop(host); 531 return sg_len; 532 } 533 534 host->using_dma = 1; 535 536 dev_vdbg(&host->dev, 537 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n", 538 (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma, 539 sg_len); 540 541 /* Enable the DMA interface */ 542 temp = mci_readl(host, CTRL); 543 temp |= SDMMC_CTRL_DMA_ENABLE; 544 mci_writel(host, CTRL, temp); 545 546 /* Disable RX/TX IRQs, let DMA handle it */ 547 temp = mci_readl(host, INTMASK); 548 temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR); 549 mci_writel(host, INTMASK, temp); 550 551 host->dma_ops->start(host, sg_len); 552 553 return 0; 554 } 555 556 static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data) 557 { 558 u32 temp; 559 560 data->error = -EINPROGRESS; 561 562 WARN_ON(host->data); 563 host->sg = NULL; 564 host->data = data; 565 566 if (data->flags & MMC_DATA_READ) 567 host->dir_status = DW_MCI_RECV_STATUS; 568 else 569 host->dir_status = DW_MCI_SEND_STATUS; 570 571 if (dw_mci_submit_data_dma(host, data)) { 572 int flags = SG_MITER_ATOMIC; 573 if (host->data->flags & MMC_DATA_READ) 574 flags |= SG_MITER_TO_SG; 575 else 576 flags |= SG_MITER_FROM_SG; 577 578 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags); 579 host->sg = data->sg; 580 host->part_buf_start = 0; 581 host->part_buf_count = 0; 582 583 mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR); 584 temp = mci_readl(host, INTMASK); 585 temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR; 586 mci_writel(host, INTMASK, temp); 587 588 temp = mci_readl(host, CTRL); 589 temp &= ~SDMMC_CTRL_DMA_ENABLE; 590 mci_writel(host, CTRL, temp); 591 } 592 } 593 594 static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg) 595 { 596 struct dw_mci *host = slot->host; 597 unsigned long timeout = jiffies + msecs_to_jiffies(500); 598 unsigned int cmd_status = 0; 599 600 mci_writel(host, CMDARG, arg); 601 wmb(); 602 mci_writel(host, CMD, SDMMC_CMD_START | cmd); 603 604 while (time_before(jiffies, timeout)) { 605 cmd_status = mci_readl(host, CMD); 606 if (!(cmd_status & SDMMC_CMD_START)) 607 return; 608 } 609 dev_err(&slot->mmc->class_dev, 610 "Timeout sending command (cmd %#x arg %#x status %#x)\n", 611 cmd, arg, cmd_status); 612 } 613 614 static void dw_mci_setup_bus(struct dw_mci_slot *slot) 615 { 616 struct dw_mci *host = slot->host; 617 u32 div; 618 619 if (slot->clock != host->current_speed) { 620 div = host->bus_hz / slot->clock; 621 if (host->bus_hz % slot->clock && host->bus_hz > slot->clock) 622 /* 623 * move the + 1 after the divide to prevent 624 * over-clocking the card. 625 */ 626 div += 1; 627 628 div = (host->bus_hz != slot->clock) ? DIV_ROUND_UP(div, 2) : 0; 629 630 dev_info(&slot->mmc->class_dev, 631 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ" 632 " div = %d)\n", slot->id, host->bus_hz, slot->clock, 633 div ? ((host->bus_hz / div) >> 1) : host->bus_hz, div); 634 635 /* disable clock */ 636 mci_writel(host, CLKENA, 0); 637 mci_writel(host, CLKSRC, 0); 638 639 /* inform CIU */ 640 mci_send_cmd(slot, 641 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0); 642 643 /* set clock to desired speed */ 644 mci_writel(host, CLKDIV, div); 645 646 /* inform CIU */ 647 mci_send_cmd(slot, 648 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0); 649 650 /* enable clock */ 651 mci_writel(host, CLKENA, ((SDMMC_CLKEN_ENABLE | 652 SDMMC_CLKEN_LOW_PWR) << slot->id)); 653 654 /* inform CIU */ 655 mci_send_cmd(slot, 656 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0); 657 658 host->current_speed = slot->clock; 659 } 660 661 /* Set the current slot bus width */ 662 mci_writel(host, CTYPE, (slot->ctype << slot->id)); 663 } 664 665 static void __dw_mci_start_request(struct dw_mci *host, 666 struct dw_mci_slot *slot, 667 struct mmc_command *cmd) 668 { 669 struct mmc_request *mrq; 670 struct mmc_data *data; 671 u32 cmdflags; 672 673 mrq = slot->mrq; 674 if (host->pdata->select_slot) 675 host->pdata->select_slot(slot->id); 676 677 /* Slot specific timing and width adjustment */ 678 dw_mci_setup_bus(slot); 679 680 host->cur_slot = slot; 681 host->mrq = mrq; 682 683 host->pending_events = 0; 684 host->completed_events = 0; 685 host->data_status = 0; 686 687 data = cmd->data; 688 if (data) { 689 dw_mci_set_timeout(host); 690 mci_writel(host, BYTCNT, data->blksz*data->blocks); 691 mci_writel(host, BLKSIZ, data->blksz); 692 } 693 694 cmdflags = dw_mci_prepare_command(slot->mmc, cmd); 695 696 /* this is the first command, send the initialization clock */ 697 if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags)) 698 cmdflags |= SDMMC_CMD_INIT; 699 700 if (data) { 701 dw_mci_submit_data(host, data); 702 wmb(); 703 } 704 705 dw_mci_start_command(host, cmd, cmdflags); 706 707 if (mrq->stop) 708 host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop); 709 } 710 711 static void dw_mci_start_request(struct dw_mci *host, 712 struct dw_mci_slot *slot) 713 { 714 struct mmc_request *mrq = slot->mrq; 715 struct mmc_command *cmd; 716 717 cmd = mrq->sbc ? mrq->sbc : mrq->cmd; 718 __dw_mci_start_request(host, slot, cmd); 719 } 720 721 /* must be called with host->lock held */ 722 static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot, 723 struct mmc_request *mrq) 724 { 725 dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n", 726 host->state); 727 728 slot->mrq = mrq; 729 730 if (host->state == STATE_IDLE) { 731 host->state = STATE_SENDING_CMD; 732 dw_mci_start_request(host, slot); 733 } else { 734 list_add_tail(&slot->queue_node, &host->queue); 735 } 736 } 737 738 static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq) 739 { 740 struct dw_mci_slot *slot = mmc_priv(mmc); 741 struct dw_mci *host = slot->host; 742 743 WARN_ON(slot->mrq); 744 745 /* 746 * The check for card presence and queueing of the request must be 747 * atomic, otherwise the card could be removed in between and the 748 * request wouldn't fail until another card was inserted. 749 */ 750 spin_lock_bh(&host->lock); 751 752 if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) { 753 spin_unlock_bh(&host->lock); 754 mrq->cmd->error = -ENOMEDIUM; 755 mmc_request_done(mmc, mrq); 756 return; 757 } 758 759 dw_mci_queue_request(host, slot, mrq); 760 761 spin_unlock_bh(&host->lock); 762 } 763 764 static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 765 { 766 struct dw_mci_slot *slot = mmc_priv(mmc); 767 u32 regs; 768 769 /* set default 1 bit mode */ 770 slot->ctype = SDMMC_CTYPE_1BIT; 771 772 switch (ios->bus_width) { 773 case MMC_BUS_WIDTH_1: 774 slot->ctype = SDMMC_CTYPE_1BIT; 775 break; 776 case MMC_BUS_WIDTH_4: 777 slot->ctype = SDMMC_CTYPE_4BIT; 778 break; 779 case MMC_BUS_WIDTH_8: 780 slot->ctype = SDMMC_CTYPE_8BIT; 781 break; 782 } 783 784 regs = mci_readl(slot->host, UHS_REG); 785 786 /* DDR mode set */ 787 if (ios->timing == MMC_TIMING_UHS_DDR50) 788 regs |= (0x1 << slot->id) << 16; 789 else 790 regs &= ~(0x1 << slot->id) << 16; 791 792 mci_writel(slot->host, UHS_REG, regs); 793 794 if (ios->clock) { 795 /* 796 * Use mirror of ios->clock to prevent race with mmc 797 * core ios update when finding the minimum. 798 */ 799 slot->clock = ios->clock; 800 } 801 802 switch (ios->power_mode) { 803 case MMC_POWER_UP: 804 set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags); 805 break; 806 default: 807 break; 808 } 809 } 810 811 static int dw_mci_get_ro(struct mmc_host *mmc) 812 { 813 int read_only; 814 struct dw_mci_slot *slot = mmc_priv(mmc); 815 struct dw_mci_board *brd = slot->host->pdata; 816 817 /* Use platform get_ro function, else try on board write protect */ 818 if (brd->get_ro) 819 read_only = brd->get_ro(slot->id); 820 else 821 read_only = 822 mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0; 823 824 dev_dbg(&mmc->class_dev, "card is %s\n", 825 read_only ? "read-only" : "read-write"); 826 827 return read_only; 828 } 829 830 static int dw_mci_get_cd(struct mmc_host *mmc) 831 { 832 int present; 833 struct dw_mci_slot *slot = mmc_priv(mmc); 834 struct dw_mci_board *brd = slot->host->pdata; 835 836 /* Use platform get_cd function, else try onboard card detect */ 837 if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION) 838 present = 1; 839 else if (brd->get_cd) 840 present = !brd->get_cd(slot->id); 841 else 842 present = (mci_readl(slot->host, CDETECT) & (1 << slot->id)) 843 == 0 ? 1 : 0; 844 845 if (present) 846 dev_dbg(&mmc->class_dev, "card is present\n"); 847 else 848 dev_dbg(&mmc->class_dev, "card is not present\n"); 849 850 return present; 851 } 852 853 static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb) 854 { 855 struct dw_mci_slot *slot = mmc_priv(mmc); 856 struct dw_mci *host = slot->host; 857 u32 int_mask; 858 859 /* Enable/disable Slot Specific SDIO interrupt */ 860 int_mask = mci_readl(host, INTMASK); 861 if (enb) { 862 mci_writel(host, INTMASK, 863 (int_mask | SDMMC_INT_SDIO(slot->id))); 864 } else { 865 mci_writel(host, INTMASK, 866 (int_mask & ~SDMMC_INT_SDIO(slot->id))); 867 } 868 } 869 870 static const struct mmc_host_ops dw_mci_ops = { 871 .request = dw_mci_request, 872 .pre_req = dw_mci_pre_req, 873 .post_req = dw_mci_post_req, 874 .set_ios = dw_mci_set_ios, 875 .get_ro = dw_mci_get_ro, 876 .get_cd = dw_mci_get_cd, 877 .enable_sdio_irq = dw_mci_enable_sdio_irq, 878 }; 879 880 static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq) 881 __releases(&host->lock) 882 __acquires(&host->lock) 883 { 884 struct dw_mci_slot *slot; 885 struct mmc_host *prev_mmc = host->cur_slot->mmc; 886 887 WARN_ON(host->cmd || host->data); 888 889 host->cur_slot->mrq = NULL; 890 host->mrq = NULL; 891 if (!list_empty(&host->queue)) { 892 slot = list_entry(host->queue.next, 893 struct dw_mci_slot, queue_node); 894 list_del(&slot->queue_node); 895 dev_vdbg(&host->dev, "list not empty: %s is next\n", 896 mmc_hostname(slot->mmc)); 897 host->state = STATE_SENDING_CMD; 898 dw_mci_start_request(host, slot); 899 } else { 900 dev_vdbg(&host->dev, "list empty\n"); 901 host->state = STATE_IDLE; 902 } 903 904 spin_unlock(&host->lock); 905 mmc_request_done(prev_mmc, mrq); 906 spin_lock(&host->lock); 907 } 908 909 static void dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd) 910 { 911 u32 status = host->cmd_status; 912 913 host->cmd_status = 0; 914 915 /* Read the response from the card (up to 16 bytes) */ 916 if (cmd->flags & MMC_RSP_PRESENT) { 917 if (cmd->flags & MMC_RSP_136) { 918 cmd->resp[3] = mci_readl(host, RESP0); 919 cmd->resp[2] = mci_readl(host, RESP1); 920 cmd->resp[1] = mci_readl(host, RESP2); 921 cmd->resp[0] = mci_readl(host, RESP3); 922 } else { 923 cmd->resp[0] = mci_readl(host, RESP0); 924 cmd->resp[1] = 0; 925 cmd->resp[2] = 0; 926 cmd->resp[3] = 0; 927 } 928 } 929 930 if (status & SDMMC_INT_RTO) 931 cmd->error = -ETIMEDOUT; 932 else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC)) 933 cmd->error = -EILSEQ; 934 else if (status & SDMMC_INT_RESP_ERR) 935 cmd->error = -EIO; 936 else 937 cmd->error = 0; 938 939 if (cmd->error) { 940 /* newer ip versions need a delay between retries */ 941 if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY) 942 mdelay(20); 943 944 if (cmd->data) { 945 dw_mci_stop_dma(host); 946 host->data = NULL; 947 } 948 } 949 } 950 951 static void dw_mci_tasklet_func(unsigned long priv) 952 { 953 struct dw_mci *host = (struct dw_mci *)priv; 954 struct mmc_data *data; 955 struct mmc_command *cmd; 956 enum dw_mci_state state; 957 enum dw_mci_state prev_state; 958 u32 status, ctrl; 959 960 spin_lock(&host->lock); 961 962 state = host->state; 963 data = host->data; 964 965 do { 966 prev_state = state; 967 968 switch (state) { 969 case STATE_IDLE: 970 break; 971 972 case STATE_SENDING_CMD: 973 if (!test_and_clear_bit(EVENT_CMD_COMPLETE, 974 &host->pending_events)) 975 break; 976 977 cmd = host->cmd; 978 host->cmd = NULL; 979 set_bit(EVENT_CMD_COMPLETE, &host->completed_events); 980 dw_mci_command_complete(host, cmd); 981 if (cmd == host->mrq->sbc && !cmd->error) { 982 prev_state = state = STATE_SENDING_CMD; 983 __dw_mci_start_request(host, host->cur_slot, 984 host->mrq->cmd); 985 goto unlock; 986 } 987 988 if (!host->mrq->data || cmd->error) { 989 dw_mci_request_end(host, host->mrq); 990 goto unlock; 991 } 992 993 prev_state = state = STATE_SENDING_DATA; 994 /* fall through */ 995 996 case STATE_SENDING_DATA: 997 if (test_and_clear_bit(EVENT_DATA_ERROR, 998 &host->pending_events)) { 999 dw_mci_stop_dma(host); 1000 if (data->stop) 1001 send_stop_cmd(host, data); 1002 state = STATE_DATA_ERROR; 1003 break; 1004 } 1005 1006 if (!test_and_clear_bit(EVENT_XFER_COMPLETE, 1007 &host->pending_events)) 1008 break; 1009 1010 set_bit(EVENT_XFER_COMPLETE, &host->completed_events); 1011 prev_state = state = STATE_DATA_BUSY; 1012 /* fall through */ 1013 1014 case STATE_DATA_BUSY: 1015 if (!test_and_clear_bit(EVENT_DATA_COMPLETE, 1016 &host->pending_events)) 1017 break; 1018 1019 host->data = NULL; 1020 set_bit(EVENT_DATA_COMPLETE, &host->completed_events); 1021 status = host->data_status; 1022 1023 if (status & DW_MCI_DATA_ERROR_FLAGS) { 1024 if (status & SDMMC_INT_DTO) { 1025 data->error = -ETIMEDOUT; 1026 } else if (status & SDMMC_INT_DCRC) { 1027 data->error = -EILSEQ; 1028 } else if (status & SDMMC_INT_EBE && 1029 host->dir_status == 1030 DW_MCI_SEND_STATUS) { 1031 /* 1032 * No data CRC status was returned. 1033 * The number of bytes transferred will 1034 * be exaggerated in PIO mode. 1035 */ 1036 data->bytes_xfered = 0; 1037 data->error = -ETIMEDOUT; 1038 } else { 1039 dev_err(&host->dev, 1040 "data FIFO error " 1041 "(status=%08x)\n", 1042 status); 1043 data->error = -EIO; 1044 } 1045 /* 1046 * After an error, there may be data lingering 1047 * in the FIFO, so reset it - doing so 1048 * generates a block interrupt, hence setting 1049 * the scatter-gather pointer to NULL. 1050 */ 1051 sg_miter_stop(&host->sg_miter); 1052 host->sg = NULL; 1053 ctrl = mci_readl(host, CTRL); 1054 ctrl |= SDMMC_CTRL_FIFO_RESET; 1055 mci_writel(host, CTRL, ctrl); 1056 } else { 1057 data->bytes_xfered = data->blocks * data->blksz; 1058 data->error = 0; 1059 } 1060 1061 if (!data->stop) { 1062 dw_mci_request_end(host, host->mrq); 1063 goto unlock; 1064 } 1065 1066 if (host->mrq->sbc && !data->error) { 1067 data->stop->error = 0; 1068 dw_mci_request_end(host, host->mrq); 1069 goto unlock; 1070 } 1071 1072 prev_state = state = STATE_SENDING_STOP; 1073 if (!data->error) 1074 send_stop_cmd(host, data); 1075 /* fall through */ 1076 1077 case STATE_SENDING_STOP: 1078 if (!test_and_clear_bit(EVENT_CMD_COMPLETE, 1079 &host->pending_events)) 1080 break; 1081 1082 host->cmd = NULL; 1083 dw_mci_command_complete(host, host->mrq->stop); 1084 dw_mci_request_end(host, host->mrq); 1085 goto unlock; 1086 1087 case STATE_DATA_ERROR: 1088 if (!test_and_clear_bit(EVENT_XFER_COMPLETE, 1089 &host->pending_events)) 1090 break; 1091 1092 state = STATE_DATA_BUSY; 1093 break; 1094 } 1095 } while (state != prev_state); 1096 1097 host->state = state; 1098 unlock: 1099 spin_unlock(&host->lock); 1100 1101 } 1102 1103 /* push final bytes to part_buf, only use during push */ 1104 static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt) 1105 { 1106 memcpy((void *)&host->part_buf, buf, cnt); 1107 host->part_buf_count = cnt; 1108 } 1109 1110 /* append bytes to part_buf, only use during push */ 1111 static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt) 1112 { 1113 cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count); 1114 memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt); 1115 host->part_buf_count += cnt; 1116 return cnt; 1117 } 1118 1119 /* pull first bytes from part_buf, only use during pull */ 1120 static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt) 1121 { 1122 cnt = min(cnt, (int)host->part_buf_count); 1123 if (cnt) { 1124 memcpy(buf, (void *)&host->part_buf + host->part_buf_start, 1125 cnt); 1126 host->part_buf_count -= cnt; 1127 host->part_buf_start += cnt; 1128 } 1129 return cnt; 1130 } 1131 1132 /* pull final bytes from the part_buf, assuming it's just been filled */ 1133 static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt) 1134 { 1135 memcpy(buf, &host->part_buf, cnt); 1136 host->part_buf_start = cnt; 1137 host->part_buf_count = (1 << host->data_shift) - cnt; 1138 } 1139 1140 static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt) 1141 { 1142 /* try and push anything in the part_buf */ 1143 if (unlikely(host->part_buf_count)) { 1144 int len = dw_mci_push_part_bytes(host, buf, cnt); 1145 buf += len; 1146 cnt -= len; 1147 if (!sg_next(host->sg) || host->part_buf_count == 2) { 1148 mci_writew(host, DATA(host->data_offset), 1149 host->part_buf16); 1150 host->part_buf_count = 0; 1151 } 1152 } 1153 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1154 if (unlikely((unsigned long)buf & 0x1)) { 1155 while (cnt >= 2) { 1156 u16 aligned_buf[64]; 1157 int len = min(cnt & -2, (int)sizeof(aligned_buf)); 1158 int items = len >> 1; 1159 int i; 1160 /* memcpy from input buffer into aligned buffer */ 1161 memcpy(aligned_buf, buf, len); 1162 buf += len; 1163 cnt -= len; 1164 /* push data from aligned buffer into fifo */ 1165 for (i = 0; i < items; ++i) 1166 mci_writew(host, DATA(host->data_offset), 1167 aligned_buf[i]); 1168 } 1169 } else 1170 #endif 1171 { 1172 u16 *pdata = buf; 1173 for (; cnt >= 2; cnt -= 2) 1174 mci_writew(host, DATA(host->data_offset), *pdata++); 1175 buf = pdata; 1176 } 1177 /* put anything remaining in the part_buf */ 1178 if (cnt) { 1179 dw_mci_set_part_bytes(host, buf, cnt); 1180 if (!sg_next(host->sg)) 1181 mci_writew(host, DATA(host->data_offset), 1182 host->part_buf16); 1183 } 1184 } 1185 1186 static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt) 1187 { 1188 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1189 if (unlikely((unsigned long)buf & 0x1)) { 1190 while (cnt >= 2) { 1191 /* pull data from fifo into aligned buffer */ 1192 u16 aligned_buf[64]; 1193 int len = min(cnt & -2, (int)sizeof(aligned_buf)); 1194 int items = len >> 1; 1195 int i; 1196 for (i = 0; i < items; ++i) 1197 aligned_buf[i] = mci_readw(host, 1198 DATA(host->data_offset)); 1199 /* memcpy from aligned buffer into output buffer */ 1200 memcpy(buf, aligned_buf, len); 1201 buf += len; 1202 cnt -= len; 1203 } 1204 } else 1205 #endif 1206 { 1207 u16 *pdata = buf; 1208 for (; cnt >= 2; cnt -= 2) 1209 *pdata++ = mci_readw(host, DATA(host->data_offset)); 1210 buf = pdata; 1211 } 1212 if (cnt) { 1213 host->part_buf16 = mci_readw(host, DATA(host->data_offset)); 1214 dw_mci_pull_final_bytes(host, buf, cnt); 1215 } 1216 } 1217 1218 static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt) 1219 { 1220 /* try and push anything in the part_buf */ 1221 if (unlikely(host->part_buf_count)) { 1222 int len = dw_mci_push_part_bytes(host, buf, cnt); 1223 buf += len; 1224 cnt -= len; 1225 if (!sg_next(host->sg) || host->part_buf_count == 4) { 1226 mci_writel(host, DATA(host->data_offset), 1227 host->part_buf32); 1228 host->part_buf_count = 0; 1229 } 1230 } 1231 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1232 if (unlikely((unsigned long)buf & 0x3)) { 1233 while (cnt >= 4) { 1234 u32 aligned_buf[32]; 1235 int len = min(cnt & -4, (int)sizeof(aligned_buf)); 1236 int items = len >> 2; 1237 int i; 1238 /* memcpy from input buffer into aligned buffer */ 1239 memcpy(aligned_buf, buf, len); 1240 buf += len; 1241 cnt -= len; 1242 /* push data from aligned buffer into fifo */ 1243 for (i = 0; i < items; ++i) 1244 mci_writel(host, DATA(host->data_offset), 1245 aligned_buf[i]); 1246 } 1247 } else 1248 #endif 1249 { 1250 u32 *pdata = buf; 1251 for (; cnt >= 4; cnt -= 4) 1252 mci_writel(host, DATA(host->data_offset), *pdata++); 1253 buf = pdata; 1254 } 1255 /* put anything remaining in the part_buf */ 1256 if (cnt) { 1257 dw_mci_set_part_bytes(host, buf, cnt); 1258 if (!sg_next(host->sg)) 1259 mci_writel(host, DATA(host->data_offset), 1260 host->part_buf32); 1261 } 1262 } 1263 1264 static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt) 1265 { 1266 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1267 if (unlikely((unsigned long)buf & 0x3)) { 1268 while (cnt >= 4) { 1269 /* pull data from fifo into aligned buffer */ 1270 u32 aligned_buf[32]; 1271 int len = min(cnt & -4, (int)sizeof(aligned_buf)); 1272 int items = len >> 2; 1273 int i; 1274 for (i = 0; i < items; ++i) 1275 aligned_buf[i] = mci_readl(host, 1276 DATA(host->data_offset)); 1277 /* memcpy from aligned buffer into output buffer */ 1278 memcpy(buf, aligned_buf, len); 1279 buf += len; 1280 cnt -= len; 1281 } 1282 } else 1283 #endif 1284 { 1285 u32 *pdata = buf; 1286 for (; cnt >= 4; cnt -= 4) 1287 *pdata++ = mci_readl(host, DATA(host->data_offset)); 1288 buf = pdata; 1289 } 1290 if (cnt) { 1291 host->part_buf32 = mci_readl(host, DATA(host->data_offset)); 1292 dw_mci_pull_final_bytes(host, buf, cnt); 1293 } 1294 } 1295 1296 static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt) 1297 { 1298 /* try and push anything in the part_buf */ 1299 if (unlikely(host->part_buf_count)) { 1300 int len = dw_mci_push_part_bytes(host, buf, cnt); 1301 buf += len; 1302 cnt -= len; 1303 if (!sg_next(host->sg) || host->part_buf_count == 8) { 1304 mci_writew(host, DATA(host->data_offset), 1305 host->part_buf); 1306 host->part_buf_count = 0; 1307 } 1308 } 1309 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1310 if (unlikely((unsigned long)buf & 0x7)) { 1311 while (cnt >= 8) { 1312 u64 aligned_buf[16]; 1313 int len = min(cnt & -8, (int)sizeof(aligned_buf)); 1314 int items = len >> 3; 1315 int i; 1316 /* memcpy from input buffer into aligned buffer */ 1317 memcpy(aligned_buf, buf, len); 1318 buf += len; 1319 cnt -= len; 1320 /* push data from aligned buffer into fifo */ 1321 for (i = 0; i < items; ++i) 1322 mci_writeq(host, DATA(host->data_offset), 1323 aligned_buf[i]); 1324 } 1325 } else 1326 #endif 1327 { 1328 u64 *pdata = buf; 1329 for (; cnt >= 8; cnt -= 8) 1330 mci_writeq(host, DATA(host->data_offset), *pdata++); 1331 buf = pdata; 1332 } 1333 /* put anything remaining in the part_buf */ 1334 if (cnt) { 1335 dw_mci_set_part_bytes(host, buf, cnt); 1336 if (!sg_next(host->sg)) 1337 mci_writeq(host, DATA(host->data_offset), 1338 host->part_buf); 1339 } 1340 } 1341 1342 static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt) 1343 { 1344 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1345 if (unlikely((unsigned long)buf & 0x7)) { 1346 while (cnt >= 8) { 1347 /* pull data from fifo into aligned buffer */ 1348 u64 aligned_buf[16]; 1349 int len = min(cnt & -8, (int)sizeof(aligned_buf)); 1350 int items = len >> 3; 1351 int i; 1352 for (i = 0; i < items; ++i) 1353 aligned_buf[i] = mci_readq(host, 1354 DATA(host->data_offset)); 1355 /* memcpy from aligned buffer into output buffer */ 1356 memcpy(buf, aligned_buf, len); 1357 buf += len; 1358 cnt -= len; 1359 } 1360 } else 1361 #endif 1362 { 1363 u64 *pdata = buf; 1364 for (; cnt >= 8; cnt -= 8) 1365 *pdata++ = mci_readq(host, DATA(host->data_offset)); 1366 buf = pdata; 1367 } 1368 if (cnt) { 1369 host->part_buf = mci_readq(host, DATA(host->data_offset)); 1370 dw_mci_pull_final_bytes(host, buf, cnt); 1371 } 1372 } 1373 1374 static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt) 1375 { 1376 int len; 1377 1378 /* get remaining partial bytes */ 1379 len = dw_mci_pull_part_bytes(host, buf, cnt); 1380 if (unlikely(len == cnt)) 1381 return; 1382 buf += len; 1383 cnt -= len; 1384 1385 /* get the rest of the data */ 1386 host->pull_data(host, buf, cnt); 1387 } 1388 1389 static void dw_mci_read_data_pio(struct dw_mci *host) 1390 { 1391 struct sg_mapping_iter *sg_miter = &host->sg_miter; 1392 void *buf; 1393 unsigned int offset; 1394 struct mmc_data *data = host->data; 1395 int shift = host->data_shift; 1396 u32 status; 1397 unsigned int nbytes = 0, len; 1398 unsigned int remain, fcnt; 1399 1400 do { 1401 if (!sg_miter_next(sg_miter)) 1402 goto done; 1403 1404 host->sg = sg_miter->__sg; 1405 buf = sg_miter->addr; 1406 remain = sg_miter->length; 1407 offset = 0; 1408 1409 do { 1410 fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS)) 1411 << shift) + host->part_buf_count; 1412 len = min(remain, fcnt); 1413 if (!len) 1414 break; 1415 dw_mci_pull_data(host, (void *)(buf + offset), len); 1416 offset += len; 1417 nbytes += len; 1418 remain -= len; 1419 } while (remain); 1420 sg_miter->consumed = offset; 1421 1422 status = mci_readl(host, MINTSTS); 1423 mci_writel(host, RINTSTS, SDMMC_INT_RXDR); 1424 if (status & DW_MCI_DATA_ERROR_FLAGS) { 1425 host->data_status = status; 1426 data->bytes_xfered += nbytes; 1427 sg_miter_stop(sg_miter); 1428 host->sg = NULL; 1429 smp_wmb(); 1430 1431 set_bit(EVENT_DATA_ERROR, &host->pending_events); 1432 1433 tasklet_schedule(&host->tasklet); 1434 return; 1435 } 1436 } while (status & SDMMC_INT_RXDR); /*if the RXDR is ready read again*/ 1437 data->bytes_xfered += nbytes; 1438 1439 if (!remain) { 1440 if (!sg_miter_next(sg_miter)) 1441 goto done; 1442 sg_miter->consumed = 0; 1443 } 1444 sg_miter_stop(sg_miter); 1445 return; 1446 1447 done: 1448 data->bytes_xfered += nbytes; 1449 sg_miter_stop(sg_miter); 1450 host->sg = NULL; 1451 smp_wmb(); 1452 set_bit(EVENT_XFER_COMPLETE, &host->pending_events); 1453 } 1454 1455 static void dw_mci_write_data_pio(struct dw_mci *host) 1456 { 1457 struct sg_mapping_iter *sg_miter = &host->sg_miter; 1458 void *buf; 1459 unsigned int offset; 1460 struct mmc_data *data = host->data; 1461 int shift = host->data_shift; 1462 u32 status; 1463 unsigned int nbytes = 0, len; 1464 unsigned int fifo_depth = host->fifo_depth; 1465 unsigned int remain, fcnt; 1466 1467 do { 1468 if (!sg_miter_next(sg_miter)) 1469 goto done; 1470 1471 host->sg = sg_miter->__sg; 1472 buf = sg_miter->addr; 1473 remain = sg_miter->length; 1474 offset = 0; 1475 1476 do { 1477 fcnt = ((fifo_depth - 1478 SDMMC_GET_FCNT(mci_readl(host, STATUS))) 1479 << shift) - host->part_buf_count; 1480 len = min(remain, fcnt); 1481 if (!len) 1482 break; 1483 host->push_data(host, (void *)(buf + offset), len); 1484 offset += len; 1485 nbytes += len; 1486 remain -= len; 1487 } while (remain); 1488 sg_miter->consumed = offset; 1489 1490 status = mci_readl(host, MINTSTS); 1491 mci_writel(host, RINTSTS, SDMMC_INT_TXDR); 1492 if (status & DW_MCI_DATA_ERROR_FLAGS) { 1493 host->data_status = status; 1494 data->bytes_xfered += nbytes; 1495 sg_miter_stop(sg_miter); 1496 host->sg = NULL; 1497 1498 smp_wmb(); 1499 1500 set_bit(EVENT_DATA_ERROR, &host->pending_events); 1501 1502 tasklet_schedule(&host->tasklet); 1503 return; 1504 } 1505 } while (status & SDMMC_INT_TXDR); /* if TXDR write again */ 1506 data->bytes_xfered += nbytes; 1507 1508 if (!remain) { 1509 if (!sg_miter_next(sg_miter)) 1510 goto done; 1511 sg_miter->consumed = 0; 1512 } 1513 sg_miter_stop(sg_miter); 1514 return; 1515 1516 done: 1517 data->bytes_xfered += nbytes; 1518 sg_miter_stop(sg_miter); 1519 host->sg = NULL; 1520 smp_wmb(); 1521 set_bit(EVENT_XFER_COMPLETE, &host->pending_events); 1522 } 1523 1524 static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status) 1525 { 1526 if (!host->cmd_status) 1527 host->cmd_status = status; 1528 1529 smp_wmb(); 1530 1531 set_bit(EVENT_CMD_COMPLETE, &host->pending_events); 1532 tasklet_schedule(&host->tasklet); 1533 } 1534 1535 static irqreturn_t dw_mci_interrupt(int irq, void *dev_id) 1536 { 1537 struct dw_mci *host = dev_id; 1538 u32 status, pending; 1539 unsigned int pass_count = 0; 1540 int i; 1541 1542 do { 1543 status = mci_readl(host, RINTSTS); 1544 pending = mci_readl(host, MINTSTS); /* read-only mask reg */ 1545 1546 /* 1547 * DTO fix - version 2.10a and below, and only if internal DMA 1548 * is configured. 1549 */ 1550 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) { 1551 if (!pending && 1552 ((mci_readl(host, STATUS) >> 17) & 0x1fff)) 1553 pending |= SDMMC_INT_DATA_OVER; 1554 } 1555 1556 if (!pending) 1557 break; 1558 1559 if (pending & DW_MCI_CMD_ERROR_FLAGS) { 1560 mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS); 1561 host->cmd_status = status; 1562 smp_wmb(); 1563 set_bit(EVENT_CMD_COMPLETE, &host->pending_events); 1564 } 1565 1566 if (pending & DW_MCI_DATA_ERROR_FLAGS) { 1567 /* if there is an error report DATA_ERROR */ 1568 mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS); 1569 host->data_status = status; 1570 smp_wmb(); 1571 set_bit(EVENT_DATA_ERROR, &host->pending_events); 1572 if (!(pending & (SDMMC_INT_DTO | SDMMC_INT_DCRC | 1573 SDMMC_INT_SBE | SDMMC_INT_EBE))) 1574 tasklet_schedule(&host->tasklet); 1575 } 1576 1577 if (pending & SDMMC_INT_DATA_OVER) { 1578 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER); 1579 if (!host->data_status) 1580 host->data_status = status; 1581 smp_wmb(); 1582 if (host->dir_status == DW_MCI_RECV_STATUS) { 1583 if (host->sg != NULL) 1584 dw_mci_read_data_pio(host); 1585 } 1586 set_bit(EVENT_DATA_COMPLETE, &host->pending_events); 1587 tasklet_schedule(&host->tasklet); 1588 } 1589 1590 if (pending & SDMMC_INT_RXDR) { 1591 mci_writel(host, RINTSTS, SDMMC_INT_RXDR); 1592 if (host->dir_status == DW_MCI_RECV_STATUS && host->sg) 1593 dw_mci_read_data_pio(host); 1594 } 1595 1596 if (pending & SDMMC_INT_TXDR) { 1597 mci_writel(host, RINTSTS, SDMMC_INT_TXDR); 1598 if (host->dir_status == DW_MCI_SEND_STATUS && host->sg) 1599 dw_mci_write_data_pio(host); 1600 } 1601 1602 if (pending & SDMMC_INT_CMD_DONE) { 1603 mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE); 1604 dw_mci_cmd_interrupt(host, status); 1605 } 1606 1607 if (pending & SDMMC_INT_CD) { 1608 mci_writel(host, RINTSTS, SDMMC_INT_CD); 1609 queue_work(host->card_workqueue, &host->card_work); 1610 } 1611 1612 /* Handle SDIO Interrupts */ 1613 for (i = 0; i < host->num_slots; i++) { 1614 struct dw_mci_slot *slot = host->slot[i]; 1615 if (pending & SDMMC_INT_SDIO(i)) { 1616 mci_writel(host, RINTSTS, SDMMC_INT_SDIO(i)); 1617 mmc_signal_sdio_irq(slot->mmc); 1618 } 1619 } 1620 1621 } while (pass_count++ < 5); 1622 1623 #ifdef CONFIG_MMC_DW_IDMAC 1624 /* Handle DMA interrupts */ 1625 pending = mci_readl(host, IDSTS); 1626 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) { 1627 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI); 1628 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI); 1629 host->dma_ops->complete(host); 1630 } 1631 #endif 1632 1633 return IRQ_HANDLED; 1634 } 1635 1636 static void dw_mci_work_routine_card(struct work_struct *work) 1637 { 1638 struct dw_mci *host = container_of(work, struct dw_mci, card_work); 1639 int i; 1640 1641 for (i = 0; i < host->num_slots; i++) { 1642 struct dw_mci_slot *slot = host->slot[i]; 1643 struct mmc_host *mmc = slot->mmc; 1644 struct mmc_request *mrq; 1645 int present; 1646 u32 ctrl; 1647 1648 present = dw_mci_get_cd(mmc); 1649 while (present != slot->last_detect_state) { 1650 dev_dbg(&slot->mmc->class_dev, "card %s\n", 1651 present ? "inserted" : "removed"); 1652 1653 /* Power up slot (before spin_lock, may sleep) */ 1654 if (present != 0 && host->pdata->setpower) 1655 host->pdata->setpower(slot->id, mmc->ocr_avail); 1656 1657 spin_lock_bh(&host->lock); 1658 1659 /* Card change detected */ 1660 slot->last_detect_state = present; 1661 1662 /* Mark card as present if applicable */ 1663 if (present != 0) 1664 set_bit(DW_MMC_CARD_PRESENT, &slot->flags); 1665 1666 /* Clean up queue if present */ 1667 mrq = slot->mrq; 1668 if (mrq) { 1669 if (mrq == host->mrq) { 1670 host->data = NULL; 1671 host->cmd = NULL; 1672 1673 switch (host->state) { 1674 case STATE_IDLE: 1675 break; 1676 case STATE_SENDING_CMD: 1677 mrq->cmd->error = -ENOMEDIUM; 1678 if (!mrq->data) 1679 break; 1680 /* fall through */ 1681 case STATE_SENDING_DATA: 1682 mrq->data->error = -ENOMEDIUM; 1683 dw_mci_stop_dma(host); 1684 break; 1685 case STATE_DATA_BUSY: 1686 case STATE_DATA_ERROR: 1687 if (mrq->data->error == -EINPROGRESS) 1688 mrq->data->error = -ENOMEDIUM; 1689 if (!mrq->stop) 1690 break; 1691 /* fall through */ 1692 case STATE_SENDING_STOP: 1693 mrq->stop->error = -ENOMEDIUM; 1694 break; 1695 } 1696 1697 dw_mci_request_end(host, mrq); 1698 } else { 1699 list_del(&slot->queue_node); 1700 mrq->cmd->error = -ENOMEDIUM; 1701 if (mrq->data) 1702 mrq->data->error = -ENOMEDIUM; 1703 if (mrq->stop) 1704 mrq->stop->error = -ENOMEDIUM; 1705 1706 spin_unlock(&host->lock); 1707 mmc_request_done(slot->mmc, mrq); 1708 spin_lock(&host->lock); 1709 } 1710 } 1711 1712 /* Power down slot */ 1713 if (present == 0) { 1714 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags); 1715 1716 /* 1717 * Clear down the FIFO - doing so generates a 1718 * block interrupt, hence setting the 1719 * scatter-gather pointer to NULL. 1720 */ 1721 sg_miter_stop(&host->sg_miter); 1722 host->sg = NULL; 1723 1724 ctrl = mci_readl(host, CTRL); 1725 ctrl |= SDMMC_CTRL_FIFO_RESET; 1726 mci_writel(host, CTRL, ctrl); 1727 1728 #ifdef CONFIG_MMC_DW_IDMAC 1729 ctrl = mci_readl(host, BMOD); 1730 /* Software reset of DMA */ 1731 ctrl |= SDMMC_IDMAC_SWRESET; 1732 mci_writel(host, BMOD, ctrl); 1733 #endif 1734 1735 } 1736 1737 spin_unlock_bh(&host->lock); 1738 1739 /* Power down slot (after spin_unlock, may sleep) */ 1740 if (present == 0 && host->pdata->setpower) 1741 host->pdata->setpower(slot->id, 0); 1742 1743 present = dw_mci_get_cd(mmc); 1744 } 1745 1746 mmc_detect_change(slot->mmc, 1747 msecs_to_jiffies(host->pdata->detect_delay_ms)); 1748 } 1749 } 1750 1751 static int __init dw_mci_init_slot(struct dw_mci *host, unsigned int id) 1752 { 1753 struct mmc_host *mmc; 1754 struct dw_mci_slot *slot; 1755 1756 mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), &host->dev); 1757 if (!mmc) 1758 return -ENOMEM; 1759 1760 slot = mmc_priv(mmc); 1761 slot->id = id; 1762 slot->mmc = mmc; 1763 slot->host = host; 1764 1765 mmc->ops = &dw_mci_ops; 1766 mmc->f_min = DIV_ROUND_UP(host->bus_hz, 510); 1767 mmc->f_max = host->bus_hz; 1768 1769 if (host->pdata->get_ocr) 1770 mmc->ocr_avail = host->pdata->get_ocr(id); 1771 else 1772 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; 1773 1774 /* 1775 * Start with slot power disabled, it will be enabled when a card 1776 * is detected. 1777 */ 1778 if (host->pdata->setpower) 1779 host->pdata->setpower(id, 0); 1780 1781 if (host->pdata->caps) 1782 mmc->caps = host->pdata->caps; 1783 1784 if (host->pdata->caps2) 1785 mmc->caps2 = host->pdata->caps2; 1786 1787 if (host->pdata->get_bus_wd) 1788 if (host->pdata->get_bus_wd(slot->id) >= 4) 1789 mmc->caps |= MMC_CAP_4_BIT_DATA; 1790 1791 if (host->pdata->quirks & DW_MCI_QUIRK_HIGHSPEED) 1792 mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED; 1793 1794 if (mmc->caps2 & MMC_CAP2_POWEROFF_NOTIFY) 1795 mmc->power_notify_type = MMC_HOST_PW_NOTIFY_SHORT; 1796 else 1797 mmc->power_notify_type = MMC_HOST_PW_NOTIFY_NONE; 1798 1799 if (host->pdata->blk_settings) { 1800 mmc->max_segs = host->pdata->blk_settings->max_segs; 1801 mmc->max_blk_size = host->pdata->blk_settings->max_blk_size; 1802 mmc->max_blk_count = host->pdata->blk_settings->max_blk_count; 1803 mmc->max_req_size = host->pdata->blk_settings->max_req_size; 1804 mmc->max_seg_size = host->pdata->blk_settings->max_seg_size; 1805 } else { 1806 /* Useful defaults if platform data is unset. */ 1807 #ifdef CONFIG_MMC_DW_IDMAC 1808 mmc->max_segs = host->ring_size; 1809 mmc->max_blk_size = 65536; 1810 mmc->max_blk_count = host->ring_size; 1811 mmc->max_seg_size = 0x1000; 1812 mmc->max_req_size = mmc->max_seg_size * mmc->max_blk_count; 1813 #else 1814 mmc->max_segs = 64; 1815 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */ 1816 mmc->max_blk_count = 512; 1817 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; 1818 mmc->max_seg_size = mmc->max_req_size; 1819 #endif /* CONFIG_MMC_DW_IDMAC */ 1820 } 1821 1822 host->vmmc = regulator_get(mmc_dev(mmc), "vmmc"); 1823 if (IS_ERR(host->vmmc)) { 1824 pr_info("%s: no vmmc regulator found\n", mmc_hostname(mmc)); 1825 host->vmmc = NULL; 1826 } else 1827 regulator_enable(host->vmmc); 1828 1829 if (dw_mci_get_cd(mmc)) 1830 set_bit(DW_MMC_CARD_PRESENT, &slot->flags); 1831 else 1832 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags); 1833 1834 host->slot[id] = slot; 1835 mmc_add_host(mmc); 1836 1837 #if defined(CONFIG_DEBUG_FS) 1838 dw_mci_init_debugfs(slot); 1839 #endif 1840 1841 /* Card initially undetected */ 1842 slot->last_detect_state = 0; 1843 1844 /* 1845 * Card may have been plugged in prior to boot so we 1846 * need to run the detect tasklet 1847 */ 1848 queue_work(host->card_workqueue, &host->card_work); 1849 1850 return 0; 1851 } 1852 1853 static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id) 1854 { 1855 /* Shutdown detect IRQ */ 1856 if (slot->host->pdata->exit) 1857 slot->host->pdata->exit(id); 1858 1859 /* Debugfs stuff is cleaned up by mmc core */ 1860 mmc_remove_host(slot->mmc); 1861 slot->host->slot[id] = NULL; 1862 mmc_free_host(slot->mmc); 1863 } 1864 1865 static void dw_mci_init_dma(struct dw_mci *host) 1866 { 1867 /* Alloc memory for sg translation */ 1868 host->sg_cpu = dma_alloc_coherent(&host->dev, PAGE_SIZE, 1869 &host->sg_dma, GFP_KERNEL); 1870 if (!host->sg_cpu) { 1871 dev_err(&host->dev, "%s: could not alloc DMA memory\n", 1872 __func__); 1873 goto no_dma; 1874 } 1875 1876 /* Determine which DMA interface to use */ 1877 #ifdef CONFIG_MMC_DW_IDMAC 1878 host->dma_ops = &dw_mci_idmac_ops; 1879 dev_info(&host->dev, "Using internal DMA controller.\n"); 1880 #endif 1881 1882 if (!host->dma_ops) 1883 goto no_dma; 1884 1885 if (host->dma_ops->init && host->dma_ops->start && 1886 host->dma_ops->stop && host->dma_ops->cleanup) { 1887 if (host->dma_ops->init(host)) { 1888 dev_err(&host->dev, "%s: Unable to initialize " 1889 "DMA Controller.\n", __func__); 1890 goto no_dma; 1891 } 1892 } else { 1893 dev_err(&host->dev, "DMA initialization not found.\n"); 1894 goto no_dma; 1895 } 1896 1897 host->use_dma = 1; 1898 return; 1899 1900 no_dma: 1901 dev_info(&host->dev, "Using PIO mode.\n"); 1902 host->use_dma = 0; 1903 return; 1904 } 1905 1906 static bool mci_wait_reset(struct device *dev, struct dw_mci *host) 1907 { 1908 unsigned long timeout = jiffies + msecs_to_jiffies(500); 1909 unsigned int ctrl; 1910 1911 mci_writel(host, CTRL, (SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET | 1912 SDMMC_CTRL_DMA_RESET)); 1913 1914 /* wait till resets clear */ 1915 do { 1916 ctrl = mci_readl(host, CTRL); 1917 if (!(ctrl & (SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET | 1918 SDMMC_CTRL_DMA_RESET))) 1919 return true; 1920 } while (time_before(jiffies, timeout)); 1921 1922 dev_err(dev, "Timeout resetting block (ctrl %#x)\n", ctrl); 1923 1924 return false; 1925 } 1926 1927 int dw_mci_probe(struct dw_mci *host) 1928 { 1929 int width, i, ret = 0; 1930 u32 fifo_size; 1931 1932 if (!host->pdata || !host->pdata->init) { 1933 dev_err(&host->dev, 1934 "Platform data must supply init function\n"); 1935 return -ENODEV; 1936 } 1937 1938 if (!host->pdata->select_slot && host->pdata->num_slots > 1) { 1939 dev_err(&host->dev, 1940 "Platform data must supply select_slot function\n"); 1941 return -ENODEV; 1942 } 1943 1944 if (!host->pdata->bus_hz) { 1945 dev_err(&host->dev, 1946 "Platform data must supply bus speed\n"); 1947 return -ENODEV; 1948 } 1949 1950 host->bus_hz = host->pdata->bus_hz; 1951 host->quirks = host->pdata->quirks; 1952 1953 spin_lock_init(&host->lock); 1954 INIT_LIST_HEAD(&host->queue); 1955 1956 /* 1957 * Get the host data width - this assumes that HCON has been set with 1958 * the correct values. 1959 */ 1960 i = (mci_readl(host, HCON) >> 7) & 0x7; 1961 if (!i) { 1962 host->push_data = dw_mci_push_data16; 1963 host->pull_data = dw_mci_pull_data16; 1964 width = 16; 1965 host->data_shift = 1; 1966 } else if (i == 2) { 1967 host->push_data = dw_mci_push_data64; 1968 host->pull_data = dw_mci_pull_data64; 1969 width = 64; 1970 host->data_shift = 3; 1971 } else { 1972 /* Check for a reserved value, and warn if it is */ 1973 WARN((i != 1), 1974 "HCON reports a reserved host data width!\n" 1975 "Defaulting to 32-bit access.\n"); 1976 host->push_data = dw_mci_push_data32; 1977 host->pull_data = dw_mci_pull_data32; 1978 width = 32; 1979 host->data_shift = 2; 1980 } 1981 1982 /* Reset all blocks */ 1983 if (!mci_wait_reset(&host->dev, host)) 1984 return -ENODEV; 1985 1986 host->dma_ops = host->pdata->dma_ops; 1987 dw_mci_init_dma(host); 1988 1989 /* Clear the interrupts for the host controller */ 1990 mci_writel(host, RINTSTS, 0xFFFFFFFF); 1991 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */ 1992 1993 /* Put in max timeout */ 1994 mci_writel(host, TMOUT, 0xFFFFFFFF); 1995 1996 /* 1997 * FIFO threshold settings RxMark = fifo_size / 2 - 1, 1998 * Tx Mark = fifo_size / 2 DMA Size = 8 1999 */ 2000 if (!host->pdata->fifo_depth) { 2001 /* 2002 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may 2003 * have been overwritten by the bootloader, just like we're 2004 * about to do, so if you know the value for your hardware, you 2005 * should put it in the platform data. 2006 */ 2007 fifo_size = mci_readl(host, FIFOTH); 2008 fifo_size = 1 + ((fifo_size >> 16) & 0xfff); 2009 } else { 2010 fifo_size = host->pdata->fifo_depth; 2011 } 2012 host->fifo_depth = fifo_size; 2013 host->fifoth_val = ((0x2 << 28) | ((fifo_size/2 - 1) << 16) | 2014 ((fifo_size/2) << 0)); 2015 mci_writel(host, FIFOTH, host->fifoth_val); 2016 2017 /* disable clock to CIU */ 2018 mci_writel(host, CLKENA, 0); 2019 mci_writel(host, CLKSRC, 0); 2020 2021 tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host); 2022 host->card_workqueue = alloc_workqueue("dw-mci-card", 2023 WQ_MEM_RECLAIM | WQ_NON_REENTRANT, 1); 2024 if (!host->card_workqueue) 2025 goto err_dmaunmap; 2026 INIT_WORK(&host->card_work, dw_mci_work_routine_card); 2027 ret = request_irq(host->irq, dw_mci_interrupt, host->irq_flags, "dw-mci", host); 2028 if (ret) 2029 goto err_workqueue; 2030 2031 if (host->pdata->num_slots) 2032 host->num_slots = host->pdata->num_slots; 2033 else 2034 host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1; 2035 2036 /* We need at least one slot to succeed */ 2037 for (i = 0; i < host->num_slots; i++) { 2038 ret = dw_mci_init_slot(host, i); 2039 if (ret) { 2040 ret = -ENODEV; 2041 goto err_init_slot; 2042 } 2043 } 2044 2045 /* 2046 * In 2.40a spec, Data offset is changed. 2047 * Need to check the version-id and set data-offset for DATA register. 2048 */ 2049 host->verid = SDMMC_GET_VERID(mci_readl(host, VERID)); 2050 dev_info(&host->dev, "Version ID is %04x\n", host->verid); 2051 2052 if (host->verid < DW_MMC_240A) 2053 host->data_offset = DATA_OFFSET; 2054 else 2055 host->data_offset = DATA_240A_OFFSET; 2056 2057 /* 2058 * Enable interrupts for command done, data over, data empty, card det, 2059 * receive ready and error such as transmit, receive timeout, crc error 2060 */ 2061 mci_writel(host, RINTSTS, 0xFFFFFFFF); 2062 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | 2063 SDMMC_INT_TXDR | SDMMC_INT_RXDR | 2064 DW_MCI_ERROR_FLAGS | SDMMC_INT_CD); 2065 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */ 2066 2067 dev_info(&host->dev, "DW MMC controller at irq %d, " 2068 "%d bit host data width, " 2069 "%u deep fifo\n", 2070 host->irq, width, fifo_size); 2071 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) 2072 dev_info(&host->dev, "Internal DMAC interrupt fix enabled.\n"); 2073 2074 return 0; 2075 2076 err_init_slot: 2077 /* De-init any initialized slots */ 2078 while (i > 0) { 2079 if (host->slot[i]) 2080 dw_mci_cleanup_slot(host->slot[i], i); 2081 i--; 2082 } 2083 free_irq(host->irq, host); 2084 2085 err_workqueue: 2086 destroy_workqueue(host->card_workqueue); 2087 2088 err_dmaunmap: 2089 if (host->use_dma && host->dma_ops->exit) 2090 host->dma_ops->exit(host); 2091 dma_free_coherent(&host->dev, PAGE_SIZE, 2092 host->sg_cpu, host->sg_dma); 2093 2094 if (host->vmmc) { 2095 regulator_disable(host->vmmc); 2096 regulator_put(host->vmmc); 2097 } 2098 return ret; 2099 } 2100 EXPORT_SYMBOL(dw_mci_probe); 2101 2102 void dw_mci_remove(struct dw_mci *host) 2103 { 2104 int i; 2105 2106 mci_writel(host, RINTSTS, 0xFFFFFFFF); 2107 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */ 2108 2109 for (i = 0; i < host->num_slots; i++) { 2110 dev_dbg(&host->dev, "remove slot %d\n", i); 2111 if (host->slot[i]) 2112 dw_mci_cleanup_slot(host->slot[i], i); 2113 } 2114 2115 /* disable clock to CIU */ 2116 mci_writel(host, CLKENA, 0); 2117 mci_writel(host, CLKSRC, 0); 2118 2119 free_irq(host->irq, host); 2120 destroy_workqueue(host->card_workqueue); 2121 dma_free_coherent(&host->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma); 2122 2123 if (host->use_dma && host->dma_ops->exit) 2124 host->dma_ops->exit(host); 2125 2126 if (host->vmmc) { 2127 regulator_disable(host->vmmc); 2128 regulator_put(host->vmmc); 2129 } 2130 2131 } 2132 EXPORT_SYMBOL(dw_mci_remove); 2133 2134 2135 2136 #ifdef CONFIG_PM_SLEEP 2137 /* 2138 * TODO: we should probably disable the clock to the card in the suspend path. 2139 */ 2140 int dw_mci_suspend(struct dw_mci *host) 2141 { 2142 int i, ret = 0; 2143 2144 for (i = 0; i < host->num_slots; i++) { 2145 struct dw_mci_slot *slot = host->slot[i]; 2146 if (!slot) 2147 continue; 2148 ret = mmc_suspend_host(slot->mmc); 2149 if (ret < 0) { 2150 while (--i >= 0) { 2151 slot = host->slot[i]; 2152 if (slot) 2153 mmc_resume_host(host->slot[i]->mmc); 2154 } 2155 return ret; 2156 } 2157 } 2158 2159 if (host->vmmc) 2160 regulator_disable(host->vmmc); 2161 2162 return 0; 2163 } 2164 EXPORT_SYMBOL(dw_mci_suspend); 2165 2166 int dw_mci_resume(struct dw_mci *host) 2167 { 2168 int i, ret; 2169 2170 if (host->vmmc) 2171 regulator_enable(host->vmmc); 2172 2173 if (!mci_wait_reset(&host->dev, host)) { 2174 ret = -ENODEV; 2175 return ret; 2176 } 2177 2178 if (host->dma_ops->init) 2179 host->dma_ops->init(host); 2180 2181 /* Restore the old value at FIFOTH register */ 2182 mci_writel(host, FIFOTH, host->fifoth_val); 2183 2184 mci_writel(host, RINTSTS, 0xFFFFFFFF); 2185 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | 2186 SDMMC_INT_TXDR | SDMMC_INT_RXDR | 2187 DW_MCI_ERROR_FLAGS | SDMMC_INT_CD); 2188 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); 2189 2190 for (i = 0; i < host->num_slots; i++) { 2191 struct dw_mci_slot *slot = host->slot[i]; 2192 if (!slot) 2193 continue; 2194 ret = mmc_resume_host(host->slot[i]->mmc); 2195 if (ret < 0) 2196 return ret; 2197 } 2198 return 0; 2199 } 2200 EXPORT_SYMBOL(dw_mci_resume); 2201 #endif /* CONFIG_PM_SLEEP */ 2202 2203 static int __init dw_mci_init(void) 2204 { 2205 printk(KERN_INFO "Synopsys Designware Multimedia Card Interface Driver"); 2206 return 0; 2207 } 2208 2209 static void __exit dw_mci_exit(void) 2210 { 2211 } 2212 2213 module_init(dw_mci_init); 2214 module_exit(dw_mci_exit); 2215 2216 MODULE_DESCRIPTION("DW Multimedia Card Interface driver"); 2217 MODULE_AUTHOR("NXP Semiconductor VietNam"); 2218 MODULE_AUTHOR("Imagination Technologies Ltd"); 2219 MODULE_LICENSE("GPL v2"); 2220