1 /* 2 * Synopsys DesignWare Multimedia Card Interface driver 3 * (Based on NXP driver for lpc 31xx) 4 * 5 * Copyright (C) 2009 NXP Semiconductors 6 * Copyright (C) 2009, 2010 Imagination Technologies Ltd. 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2 of the License, or 11 * (at your option) any later version. 12 */ 13 14 #include <linux/blkdev.h> 15 #include <linux/clk.h> 16 #include <linux/debugfs.h> 17 #include <linux/device.h> 18 #include <linux/dma-mapping.h> 19 #include <linux/err.h> 20 #include <linux/init.h> 21 #include <linux/interrupt.h> 22 #include <linux/ioport.h> 23 #include <linux/module.h> 24 #include <linux/platform_device.h> 25 #include <linux/seq_file.h> 26 #include <linux/slab.h> 27 #include <linux/stat.h> 28 #include <linux/delay.h> 29 #include <linux/irq.h> 30 #include <linux/mmc/host.h> 31 #include <linux/mmc/mmc.h> 32 #include <linux/mmc/dw_mmc.h> 33 #include <linux/bitops.h> 34 #include <linux/regulator/consumer.h> 35 #include <linux/workqueue.h> 36 #include <linux/of.h> 37 38 #include "dw_mmc.h" 39 40 /* Common flag combinations */ 41 #define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DTO | SDMMC_INT_DCRC | \ 42 SDMMC_INT_HTO | SDMMC_INT_SBE | \ 43 SDMMC_INT_EBE) 44 #define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \ 45 SDMMC_INT_RESP_ERR) 46 #define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \ 47 DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_HLE) 48 #define DW_MCI_SEND_STATUS 1 49 #define DW_MCI_RECV_STATUS 2 50 #define DW_MCI_DMA_THRESHOLD 16 51 52 #ifdef CONFIG_MMC_DW_IDMAC 53 struct idmac_desc { 54 u32 des0; /* Control Descriptor */ 55 #define IDMAC_DES0_DIC BIT(1) 56 #define IDMAC_DES0_LD BIT(2) 57 #define IDMAC_DES0_FD BIT(3) 58 #define IDMAC_DES0_CH BIT(4) 59 #define IDMAC_DES0_ER BIT(5) 60 #define IDMAC_DES0_CES BIT(30) 61 #define IDMAC_DES0_OWN BIT(31) 62 63 u32 des1; /* Buffer sizes */ 64 #define IDMAC_SET_BUFFER1_SIZE(d, s) \ 65 ((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff)) 66 67 u32 des2; /* buffer 1 physical address */ 68 69 u32 des3; /* buffer 2 physical address */ 70 }; 71 #endif /* CONFIG_MMC_DW_IDMAC */ 72 73 /** 74 * struct dw_mci_slot - MMC slot state 75 * @mmc: The mmc_host representing this slot. 76 * @host: The MMC controller this slot is using. 77 * @ctype: Card type for this slot. 78 * @mrq: mmc_request currently being processed or waiting to be 79 * processed, or NULL when the slot is idle. 80 * @queue_node: List node for placing this node in the @queue list of 81 * &struct dw_mci. 82 * @clock: Clock rate configured by set_ios(). Protected by host->lock. 83 * @flags: Random state bits associated with the slot. 84 * @id: Number of this slot. 85 * @last_detect_state: Most recently observed card detect state. 86 */ 87 struct dw_mci_slot { 88 struct mmc_host *mmc; 89 struct dw_mci *host; 90 91 u32 ctype; 92 93 struct mmc_request *mrq; 94 struct list_head queue_node; 95 96 unsigned int clock; 97 unsigned long flags; 98 #define DW_MMC_CARD_PRESENT 0 99 #define DW_MMC_CARD_NEED_INIT 1 100 int id; 101 int last_detect_state; 102 }; 103 104 #if defined(CONFIG_DEBUG_FS) 105 static int dw_mci_req_show(struct seq_file *s, void *v) 106 { 107 struct dw_mci_slot *slot = s->private; 108 struct mmc_request *mrq; 109 struct mmc_command *cmd; 110 struct mmc_command *stop; 111 struct mmc_data *data; 112 113 /* Make sure we get a consistent snapshot */ 114 spin_lock_bh(&slot->host->lock); 115 mrq = slot->mrq; 116 117 if (mrq) { 118 cmd = mrq->cmd; 119 data = mrq->data; 120 stop = mrq->stop; 121 122 if (cmd) 123 seq_printf(s, 124 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n", 125 cmd->opcode, cmd->arg, cmd->flags, 126 cmd->resp[0], cmd->resp[1], cmd->resp[2], 127 cmd->resp[2], cmd->error); 128 if (data) 129 seq_printf(s, "DATA %u / %u * %u flg %x err %d\n", 130 data->bytes_xfered, data->blocks, 131 data->blksz, data->flags, data->error); 132 if (stop) 133 seq_printf(s, 134 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n", 135 stop->opcode, stop->arg, stop->flags, 136 stop->resp[0], stop->resp[1], stop->resp[2], 137 stop->resp[2], stop->error); 138 } 139 140 spin_unlock_bh(&slot->host->lock); 141 142 return 0; 143 } 144 145 static int dw_mci_req_open(struct inode *inode, struct file *file) 146 { 147 return single_open(file, dw_mci_req_show, inode->i_private); 148 } 149 150 static const struct file_operations dw_mci_req_fops = { 151 .owner = THIS_MODULE, 152 .open = dw_mci_req_open, 153 .read = seq_read, 154 .llseek = seq_lseek, 155 .release = single_release, 156 }; 157 158 static int dw_mci_regs_show(struct seq_file *s, void *v) 159 { 160 seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS); 161 seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS); 162 seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD); 163 seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL); 164 seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK); 165 seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA); 166 167 return 0; 168 } 169 170 static int dw_mci_regs_open(struct inode *inode, struct file *file) 171 { 172 return single_open(file, dw_mci_regs_show, inode->i_private); 173 } 174 175 static const struct file_operations dw_mci_regs_fops = { 176 .owner = THIS_MODULE, 177 .open = dw_mci_regs_open, 178 .read = seq_read, 179 .llseek = seq_lseek, 180 .release = single_release, 181 }; 182 183 static void dw_mci_init_debugfs(struct dw_mci_slot *slot) 184 { 185 struct mmc_host *mmc = slot->mmc; 186 struct dw_mci *host = slot->host; 187 struct dentry *root; 188 struct dentry *node; 189 190 root = mmc->debugfs_root; 191 if (!root) 192 return; 193 194 node = debugfs_create_file("regs", S_IRUSR, root, host, 195 &dw_mci_regs_fops); 196 if (!node) 197 goto err; 198 199 node = debugfs_create_file("req", S_IRUSR, root, slot, 200 &dw_mci_req_fops); 201 if (!node) 202 goto err; 203 204 node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state); 205 if (!node) 206 goto err; 207 208 node = debugfs_create_x32("pending_events", S_IRUSR, root, 209 (u32 *)&host->pending_events); 210 if (!node) 211 goto err; 212 213 node = debugfs_create_x32("completed_events", S_IRUSR, root, 214 (u32 *)&host->completed_events); 215 if (!node) 216 goto err; 217 218 return; 219 220 err: 221 dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n"); 222 } 223 #endif /* defined(CONFIG_DEBUG_FS) */ 224 225 static void dw_mci_set_timeout(struct dw_mci *host) 226 { 227 /* timeout (maximum) */ 228 mci_writel(host, TMOUT, 0xffffffff); 229 } 230 231 static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd) 232 { 233 struct mmc_data *data; 234 struct dw_mci_slot *slot = mmc_priv(mmc); 235 u32 cmdr; 236 cmd->error = -EINPROGRESS; 237 238 cmdr = cmd->opcode; 239 240 if (cmdr == MMC_STOP_TRANSMISSION) 241 cmdr |= SDMMC_CMD_STOP; 242 else 243 cmdr |= SDMMC_CMD_PRV_DAT_WAIT; 244 245 if (cmd->flags & MMC_RSP_PRESENT) { 246 /* We expect a response, so set this bit */ 247 cmdr |= SDMMC_CMD_RESP_EXP; 248 if (cmd->flags & MMC_RSP_136) 249 cmdr |= SDMMC_CMD_RESP_LONG; 250 } 251 252 if (cmd->flags & MMC_RSP_CRC) 253 cmdr |= SDMMC_CMD_RESP_CRC; 254 255 data = cmd->data; 256 if (data) { 257 cmdr |= SDMMC_CMD_DAT_EXP; 258 if (data->flags & MMC_DATA_STREAM) 259 cmdr |= SDMMC_CMD_STRM_MODE; 260 if (data->flags & MMC_DATA_WRITE) 261 cmdr |= SDMMC_CMD_DAT_WR; 262 } 263 264 if (slot->host->drv_data->prepare_command) 265 slot->host->drv_data->prepare_command(slot->host, &cmdr); 266 267 return cmdr; 268 } 269 270 static void dw_mci_start_command(struct dw_mci *host, 271 struct mmc_command *cmd, u32 cmd_flags) 272 { 273 host->cmd = cmd; 274 dev_vdbg(host->dev, 275 "start command: ARGR=0x%08x CMDR=0x%08x\n", 276 cmd->arg, cmd_flags); 277 278 mci_writel(host, CMDARG, cmd->arg); 279 wmb(); 280 281 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START); 282 } 283 284 static void send_stop_cmd(struct dw_mci *host, struct mmc_data *data) 285 { 286 dw_mci_start_command(host, data->stop, host->stop_cmdr); 287 } 288 289 /* DMA interface functions */ 290 static void dw_mci_stop_dma(struct dw_mci *host) 291 { 292 if (host->using_dma) { 293 host->dma_ops->stop(host); 294 host->dma_ops->cleanup(host); 295 } else { 296 /* Data transfer was stopped by the interrupt handler */ 297 set_bit(EVENT_XFER_COMPLETE, &host->pending_events); 298 } 299 } 300 301 static int dw_mci_get_dma_dir(struct mmc_data *data) 302 { 303 if (data->flags & MMC_DATA_WRITE) 304 return DMA_TO_DEVICE; 305 else 306 return DMA_FROM_DEVICE; 307 } 308 309 #ifdef CONFIG_MMC_DW_IDMAC 310 static void dw_mci_dma_cleanup(struct dw_mci *host) 311 { 312 struct mmc_data *data = host->data; 313 314 if (data) 315 if (!data->host_cookie) 316 dma_unmap_sg(host->dev, 317 data->sg, 318 data->sg_len, 319 dw_mci_get_dma_dir(data)); 320 } 321 322 static void dw_mci_idmac_stop_dma(struct dw_mci *host) 323 { 324 u32 temp; 325 326 /* Disable and reset the IDMAC interface */ 327 temp = mci_readl(host, CTRL); 328 temp &= ~SDMMC_CTRL_USE_IDMAC; 329 temp |= SDMMC_CTRL_DMA_RESET; 330 mci_writel(host, CTRL, temp); 331 332 /* Stop the IDMAC running */ 333 temp = mci_readl(host, BMOD); 334 temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB); 335 mci_writel(host, BMOD, temp); 336 } 337 338 static void dw_mci_idmac_complete_dma(struct dw_mci *host) 339 { 340 struct mmc_data *data = host->data; 341 342 dev_vdbg(host->dev, "DMA complete\n"); 343 344 host->dma_ops->cleanup(host); 345 346 /* 347 * If the card was removed, data will be NULL. No point in trying to 348 * send the stop command or waiting for NBUSY in this case. 349 */ 350 if (data) { 351 set_bit(EVENT_XFER_COMPLETE, &host->pending_events); 352 tasklet_schedule(&host->tasklet); 353 } 354 } 355 356 static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data, 357 unsigned int sg_len) 358 { 359 int i; 360 struct idmac_desc *desc = host->sg_cpu; 361 362 for (i = 0; i < sg_len; i++, desc++) { 363 unsigned int length = sg_dma_len(&data->sg[i]); 364 u32 mem_addr = sg_dma_address(&data->sg[i]); 365 366 /* Set the OWN bit and disable interrupts for this descriptor */ 367 desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | IDMAC_DES0_CH; 368 369 /* Buffer length */ 370 IDMAC_SET_BUFFER1_SIZE(desc, length); 371 372 /* Physical address to DMA to/from */ 373 desc->des2 = mem_addr; 374 } 375 376 /* Set first descriptor */ 377 desc = host->sg_cpu; 378 desc->des0 |= IDMAC_DES0_FD; 379 380 /* Set last descriptor */ 381 desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc); 382 desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC); 383 desc->des0 |= IDMAC_DES0_LD; 384 385 wmb(); 386 } 387 388 static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len) 389 { 390 u32 temp; 391 392 dw_mci_translate_sglist(host, host->data, sg_len); 393 394 /* Select IDMAC interface */ 395 temp = mci_readl(host, CTRL); 396 temp |= SDMMC_CTRL_USE_IDMAC; 397 mci_writel(host, CTRL, temp); 398 399 wmb(); 400 401 /* Enable the IDMAC */ 402 temp = mci_readl(host, BMOD); 403 temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB; 404 mci_writel(host, BMOD, temp); 405 406 /* Start it running */ 407 mci_writel(host, PLDMND, 1); 408 } 409 410 static int dw_mci_idmac_init(struct dw_mci *host) 411 { 412 struct idmac_desc *p; 413 int i; 414 415 /* Number of descriptors in the ring buffer */ 416 host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc); 417 418 /* Forward link the descriptor list */ 419 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++) 420 p->des3 = host->sg_dma + (sizeof(struct idmac_desc) * (i + 1)); 421 422 /* Set the last descriptor as the end-of-ring descriptor */ 423 p->des3 = host->sg_dma; 424 p->des0 = IDMAC_DES0_ER; 425 426 mci_writel(host, BMOD, SDMMC_IDMAC_SWRESET); 427 428 /* Mask out interrupts - get Tx & Rx complete only */ 429 mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI | 430 SDMMC_IDMAC_INT_TI); 431 432 /* Set the descriptor base address */ 433 mci_writel(host, DBADDR, host->sg_dma); 434 return 0; 435 } 436 437 static struct dw_mci_dma_ops dw_mci_idmac_ops = { 438 .init = dw_mci_idmac_init, 439 .start = dw_mci_idmac_start_dma, 440 .stop = dw_mci_idmac_stop_dma, 441 .complete = dw_mci_idmac_complete_dma, 442 .cleanup = dw_mci_dma_cleanup, 443 }; 444 #endif /* CONFIG_MMC_DW_IDMAC */ 445 446 static int dw_mci_pre_dma_transfer(struct dw_mci *host, 447 struct mmc_data *data, 448 bool next) 449 { 450 struct scatterlist *sg; 451 unsigned int i, sg_len; 452 453 if (!next && data->host_cookie) 454 return data->host_cookie; 455 456 /* 457 * We don't do DMA on "complex" transfers, i.e. with 458 * non-word-aligned buffers or lengths. Also, we don't bother 459 * with all the DMA setup overhead for short transfers. 460 */ 461 if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD) 462 return -EINVAL; 463 464 if (data->blksz & 3) 465 return -EINVAL; 466 467 for_each_sg(data->sg, sg, data->sg_len, i) { 468 if (sg->offset & 3 || sg->length & 3) 469 return -EINVAL; 470 } 471 472 sg_len = dma_map_sg(host->dev, 473 data->sg, 474 data->sg_len, 475 dw_mci_get_dma_dir(data)); 476 if (sg_len == 0) 477 return -EINVAL; 478 479 if (next) 480 data->host_cookie = sg_len; 481 482 return sg_len; 483 } 484 485 static void dw_mci_pre_req(struct mmc_host *mmc, 486 struct mmc_request *mrq, 487 bool is_first_req) 488 { 489 struct dw_mci_slot *slot = mmc_priv(mmc); 490 struct mmc_data *data = mrq->data; 491 492 if (!slot->host->use_dma || !data) 493 return; 494 495 if (data->host_cookie) { 496 data->host_cookie = 0; 497 return; 498 } 499 500 if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0) 501 data->host_cookie = 0; 502 } 503 504 static void dw_mci_post_req(struct mmc_host *mmc, 505 struct mmc_request *mrq, 506 int err) 507 { 508 struct dw_mci_slot *slot = mmc_priv(mmc); 509 struct mmc_data *data = mrq->data; 510 511 if (!slot->host->use_dma || !data) 512 return; 513 514 if (data->host_cookie) 515 dma_unmap_sg(slot->host->dev, 516 data->sg, 517 data->sg_len, 518 dw_mci_get_dma_dir(data)); 519 data->host_cookie = 0; 520 } 521 522 static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data) 523 { 524 int sg_len; 525 u32 temp; 526 527 host->using_dma = 0; 528 529 /* If we don't have a channel, we can't do DMA */ 530 if (!host->use_dma) 531 return -ENODEV; 532 533 sg_len = dw_mci_pre_dma_transfer(host, data, 0); 534 if (sg_len < 0) { 535 host->dma_ops->stop(host); 536 return sg_len; 537 } 538 539 host->using_dma = 1; 540 541 dev_vdbg(host->dev, 542 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n", 543 (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma, 544 sg_len); 545 546 /* Enable the DMA interface */ 547 temp = mci_readl(host, CTRL); 548 temp |= SDMMC_CTRL_DMA_ENABLE; 549 mci_writel(host, CTRL, temp); 550 551 /* Disable RX/TX IRQs, let DMA handle it */ 552 temp = mci_readl(host, INTMASK); 553 temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR); 554 mci_writel(host, INTMASK, temp); 555 556 host->dma_ops->start(host, sg_len); 557 558 return 0; 559 } 560 561 static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data) 562 { 563 u32 temp; 564 565 data->error = -EINPROGRESS; 566 567 WARN_ON(host->data); 568 host->sg = NULL; 569 host->data = data; 570 571 if (data->flags & MMC_DATA_READ) 572 host->dir_status = DW_MCI_RECV_STATUS; 573 else 574 host->dir_status = DW_MCI_SEND_STATUS; 575 576 if (dw_mci_submit_data_dma(host, data)) { 577 int flags = SG_MITER_ATOMIC; 578 if (host->data->flags & MMC_DATA_READ) 579 flags |= SG_MITER_TO_SG; 580 else 581 flags |= SG_MITER_FROM_SG; 582 583 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags); 584 host->sg = data->sg; 585 host->part_buf_start = 0; 586 host->part_buf_count = 0; 587 588 mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR); 589 temp = mci_readl(host, INTMASK); 590 temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR; 591 mci_writel(host, INTMASK, temp); 592 593 temp = mci_readl(host, CTRL); 594 temp &= ~SDMMC_CTRL_DMA_ENABLE; 595 mci_writel(host, CTRL, temp); 596 } 597 } 598 599 static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg) 600 { 601 struct dw_mci *host = slot->host; 602 unsigned long timeout = jiffies + msecs_to_jiffies(500); 603 unsigned int cmd_status = 0; 604 605 mci_writel(host, CMDARG, arg); 606 wmb(); 607 mci_writel(host, CMD, SDMMC_CMD_START | cmd); 608 609 while (time_before(jiffies, timeout)) { 610 cmd_status = mci_readl(host, CMD); 611 if (!(cmd_status & SDMMC_CMD_START)) 612 return; 613 } 614 dev_err(&slot->mmc->class_dev, 615 "Timeout sending command (cmd %#x arg %#x status %#x)\n", 616 cmd, arg, cmd_status); 617 } 618 619 static void dw_mci_setup_bus(struct dw_mci_slot *slot) 620 { 621 struct dw_mci *host = slot->host; 622 u32 div; 623 u32 clk_en_a; 624 625 if (slot->clock != host->current_speed) { 626 div = host->bus_hz / slot->clock; 627 if (host->bus_hz % slot->clock && host->bus_hz > slot->clock) 628 /* 629 * move the + 1 after the divide to prevent 630 * over-clocking the card. 631 */ 632 div += 1; 633 634 div = (host->bus_hz != slot->clock) ? DIV_ROUND_UP(div, 2) : 0; 635 636 dev_info(&slot->mmc->class_dev, 637 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ" 638 " div = %d)\n", slot->id, host->bus_hz, slot->clock, 639 div ? ((host->bus_hz / div) >> 1) : host->bus_hz, div); 640 641 /* disable clock */ 642 mci_writel(host, CLKENA, 0); 643 mci_writel(host, CLKSRC, 0); 644 645 /* inform CIU */ 646 mci_send_cmd(slot, 647 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0); 648 649 /* set clock to desired speed */ 650 mci_writel(host, CLKDIV, div); 651 652 /* inform CIU */ 653 mci_send_cmd(slot, 654 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0); 655 656 /* enable clock; only low power if no SDIO */ 657 clk_en_a = SDMMC_CLKEN_ENABLE << slot->id; 658 if (!(mci_readl(host, INTMASK) & SDMMC_INT_SDIO(slot->id))) 659 clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id; 660 mci_writel(host, CLKENA, clk_en_a); 661 662 /* inform CIU */ 663 mci_send_cmd(slot, 664 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0); 665 666 host->current_speed = slot->clock; 667 } 668 669 /* Set the current slot bus width */ 670 mci_writel(host, CTYPE, (slot->ctype << slot->id)); 671 } 672 673 static void __dw_mci_start_request(struct dw_mci *host, 674 struct dw_mci_slot *slot, 675 struct mmc_command *cmd) 676 { 677 struct mmc_request *mrq; 678 struct mmc_data *data; 679 u32 cmdflags; 680 681 mrq = slot->mrq; 682 if (host->pdata->select_slot) 683 host->pdata->select_slot(slot->id); 684 685 /* Slot specific timing and width adjustment */ 686 dw_mci_setup_bus(slot); 687 688 host->cur_slot = slot; 689 host->mrq = mrq; 690 691 host->pending_events = 0; 692 host->completed_events = 0; 693 host->data_status = 0; 694 695 data = cmd->data; 696 if (data) { 697 dw_mci_set_timeout(host); 698 mci_writel(host, BYTCNT, data->blksz*data->blocks); 699 mci_writel(host, BLKSIZ, data->blksz); 700 } 701 702 cmdflags = dw_mci_prepare_command(slot->mmc, cmd); 703 704 /* this is the first command, send the initialization clock */ 705 if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags)) 706 cmdflags |= SDMMC_CMD_INIT; 707 708 if (data) { 709 dw_mci_submit_data(host, data); 710 wmb(); 711 } 712 713 dw_mci_start_command(host, cmd, cmdflags); 714 715 if (mrq->stop) 716 host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop); 717 } 718 719 static void dw_mci_start_request(struct dw_mci *host, 720 struct dw_mci_slot *slot) 721 { 722 struct mmc_request *mrq = slot->mrq; 723 struct mmc_command *cmd; 724 725 cmd = mrq->sbc ? mrq->sbc : mrq->cmd; 726 __dw_mci_start_request(host, slot, cmd); 727 } 728 729 /* must be called with host->lock held */ 730 static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot, 731 struct mmc_request *mrq) 732 { 733 dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n", 734 host->state); 735 736 slot->mrq = mrq; 737 738 if (host->state == STATE_IDLE) { 739 host->state = STATE_SENDING_CMD; 740 dw_mci_start_request(host, slot); 741 } else { 742 list_add_tail(&slot->queue_node, &host->queue); 743 } 744 } 745 746 static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq) 747 { 748 struct dw_mci_slot *slot = mmc_priv(mmc); 749 struct dw_mci *host = slot->host; 750 751 WARN_ON(slot->mrq); 752 753 /* 754 * The check for card presence and queueing of the request must be 755 * atomic, otherwise the card could be removed in between and the 756 * request wouldn't fail until another card was inserted. 757 */ 758 spin_lock_bh(&host->lock); 759 760 if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) { 761 spin_unlock_bh(&host->lock); 762 mrq->cmd->error = -ENOMEDIUM; 763 mmc_request_done(mmc, mrq); 764 return; 765 } 766 767 dw_mci_queue_request(host, slot, mrq); 768 769 spin_unlock_bh(&host->lock); 770 } 771 772 static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 773 { 774 struct dw_mci_slot *slot = mmc_priv(mmc); 775 u32 regs; 776 777 /* set default 1 bit mode */ 778 slot->ctype = SDMMC_CTYPE_1BIT; 779 780 switch (ios->bus_width) { 781 case MMC_BUS_WIDTH_1: 782 slot->ctype = SDMMC_CTYPE_1BIT; 783 break; 784 case MMC_BUS_WIDTH_4: 785 slot->ctype = SDMMC_CTYPE_4BIT; 786 break; 787 case MMC_BUS_WIDTH_8: 788 slot->ctype = SDMMC_CTYPE_8BIT; 789 break; 790 } 791 792 regs = mci_readl(slot->host, UHS_REG); 793 794 /* DDR mode set */ 795 if (ios->timing == MMC_TIMING_UHS_DDR50) 796 regs |= (0x1 << slot->id) << 16; 797 else 798 regs &= ~(0x1 << slot->id) << 16; 799 800 mci_writel(slot->host, UHS_REG, regs); 801 802 if (ios->clock) { 803 /* 804 * Use mirror of ios->clock to prevent race with mmc 805 * core ios update when finding the minimum. 806 */ 807 slot->clock = ios->clock; 808 } 809 810 if (slot->host->drv_data->set_ios) 811 slot->host->drv_data->set_ios(slot->host, ios); 812 813 switch (ios->power_mode) { 814 case MMC_POWER_UP: 815 set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags); 816 break; 817 default: 818 break; 819 } 820 } 821 822 static int dw_mci_get_ro(struct mmc_host *mmc) 823 { 824 int read_only; 825 struct dw_mci_slot *slot = mmc_priv(mmc); 826 struct dw_mci_board *brd = slot->host->pdata; 827 828 /* Use platform get_ro function, else try on board write protect */ 829 if (brd->quirks & DW_MCI_QUIRK_NO_WRITE_PROTECT) 830 read_only = 0; 831 else if (brd->get_ro) 832 read_only = brd->get_ro(slot->id); 833 else 834 read_only = 835 mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0; 836 837 dev_dbg(&mmc->class_dev, "card is %s\n", 838 read_only ? "read-only" : "read-write"); 839 840 return read_only; 841 } 842 843 static int dw_mci_get_cd(struct mmc_host *mmc) 844 { 845 int present; 846 struct dw_mci_slot *slot = mmc_priv(mmc); 847 struct dw_mci_board *brd = slot->host->pdata; 848 849 /* Use platform get_cd function, else try onboard card detect */ 850 if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION) 851 present = 1; 852 else if (brd->get_cd) 853 present = !brd->get_cd(slot->id); 854 else 855 present = (mci_readl(slot->host, CDETECT) & (1 << slot->id)) 856 == 0 ? 1 : 0; 857 858 if (present) 859 dev_dbg(&mmc->class_dev, "card is present\n"); 860 else 861 dev_dbg(&mmc->class_dev, "card is not present\n"); 862 863 return present; 864 } 865 866 /* 867 * Disable lower power mode. 868 * 869 * Low power mode will stop the card clock when idle. According to the 870 * description of the CLKENA register we should disable low power mode 871 * for SDIO cards if we need SDIO interrupts to work. 872 * 873 * This function is fast if low power mode is already disabled. 874 */ 875 static void dw_mci_disable_low_power(struct dw_mci_slot *slot) 876 { 877 struct dw_mci *host = slot->host; 878 u32 clk_en_a; 879 const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id; 880 881 clk_en_a = mci_readl(host, CLKENA); 882 883 if (clk_en_a & clken_low_pwr) { 884 mci_writel(host, CLKENA, clk_en_a & ~clken_low_pwr); 885 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | 886 SDMMC_CMD_PRV_DAT_WAIT, 0); 887 } 888 } 889 890 static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb) 891 { 892 struct dw_mci_slot *slot = mmc_priv(mmc); 893 struct dw_mci *host = slot->host; 894 u32 int_mask; 895 896 /* Enable/disable Slot Specific SDIO interrupt */ 897 int_mask = mci_readl(host, INTMASK); 898 if (enb) { 899 /* 900 * Turn off low power mode if it was enabled. This is a bit of 901 * a heavy operation and we disable / enable IRQs a lot, so 902 * we'll leave low power mode disabled and it will get 903 * re-enabled again in dw_mci_setup_bus(). 904 */ 905 dw_mci_disable_low_power(slot); 906 907 mci_writel(host, INTMASK, 908 (int_mask | SDMMC_INT_SDIO(slot->id))); 909 } else { 910 mci_writel(host, INTMASK, 911 (int_mask & ~SDMMC_INT_SDIO(slot->id))); 912 } 913 } 914 915 static const struct mmc_host_ops dw_mci_ops = { 916 .request = dw_mci_request, 917 .pre_req = dw_mci_pre_req, 918 .post_req = dw_mci_post_req, 919 .set_ios = dw_mci_set_ios, 920 .get_ro = dw_mci_get_ro, 921 .get_cd = dw_mci_get_cd, 922 .enable_sdio_irq = dw_mci_enable_sdio_irq, 923 }; 924 925 static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq) 926 __releases(&host->lock) 927 __acquires(&host->lock) 928 { 929 struct dw_mci_slot *slot; 930 struct mmc_host *prev_mmc = host->cur_slot->mmc; 931 932 WARN_ON(host->cmd || host->data); 933 934 host->cur_slot->mrq = NULL; 935 host->mrq = NULL; 936 if (!list_empty(&host->queue)) { 937 slot = list_entry(host->queue.next, 938 struct dw_mci_slot, queue_node); 939 list_del(&slot->queue_node); 940 dev_vdbg(host->dev, "list not empty: %s is next\n", 941 mmc_hostname(slot->mmc)); 942 host->state = STATE_SENDING_CMD; 943 dw_mci_start_request(host, slot); 944 } else { 945 dev_vdbg(host->dev, "list empty\n"); 946 host->state = STATE_IDLE; 947 } 948 949 spin_unlock(&host->lock); 950 mmc_request_done(prev_mmc, mrq); 951 spin_lock(&host->lock); 952 } 953 954 static void dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd) 955 { 956 u32 status = host->cmd_status; 957 958 host->cmd_status = 0; 959 960 /* Read the response from the card (up to 16 bytes) */ 961 if (cmd->flags & MMC_RSP_PRESENT) { 962 if (cmd->flags & MMC_RSP_136) { 963 cmd->resp[3] = mci_readl(host, RESP0); 964 cmd->resp[2] = mci_readl(host, RESP1); 965 cmd->resp[1] = mci_readl(host, RESP2); 966 cmd->resp[0] = mci_readl(host, RESP3); 967 } else { 968 cmd->resp[0] = mci_readl(host, RESP0); 969 cmd->resp[1] = 0; 970 cmd->resp[2] = 0; 971 cmd->resp[3] = 0; 972 } 973 } 974 975 if (status & SDMMC_INT_RTO) 976 cmd->error = -ETIMEDOUT; 977 else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC)) 978 cmd->error = -EILSEQ; 979 else if (status & SDMMC_INT_RESP_ERR) 980 cmd->error = -EIO; 981 else 982 cmd->error = 0; 983 984 if (cmd->error) { 985 /* newer ip versions need a delay between retries */ 986 if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY) 987 mdelay(20); 988 989 if (cmd->data) { 990 dw_mci_stop_dma(host); 991 host->data = NULL; 992 } 993 } 994 } 995 996 static void dw_mci_tasklet_func(unsigned long priv) 997 { 998 struct dw_mci *host = (struct dw_mci *)priv; 999 struct mmc_data *data; 1000 struct mmc_command *cmd; 1001 enum dw_mci_state state; 1002 enum dw_mci_state prev_state; 1003 u32 status, ctrl; 1004 1005 spin_lock(&host->lock); 1006 1007 state = host->state; 1008 data = host->data; 1009 1010 do { 1011 prev_state = state; 1012 1013 switch (state) { 1014 case STATE_IDLE: 1015 break; 1016 1017 case STATE_SENDING_CMD: 1018 if (!test_and_clear_bit(EVENT_CMD_COMPLETE, 1019 &host->pending_events)) 1020 break; 1021 1022 cmd = host->cmd; 1023 host->cmd = NULL; 1024 set_bit(EVENT_CMD_COMPLETE, &host->completed_events); 1025 dw_mci_command_complete(host, cmd); 1026 if (cmd == host->mrq->sbc && !cmd->error) { 1027 prev_state = state = STATE_SENDING_CMD; 1028 __dw_mci_start_request(host, host->cur_slot, 1029 host->mrq->cmd); 1030 goto unlock; 1031 } 1032 1033 if (!host->mrq->data || cmd->error) { 1034 dw_mci_request_end(host, host->mrq); 1035 goto unlock; 1036 } 1037 1038 prev_state = state = STATE_SENDING_DATA; 1039 /* fall through */ 1040 1041 case STATE_SENDING_DATA: 1042 if (test_and_clear_bit(EVENT_DATA_ERROR, 1043 &host->pending_events)) { 1044 dw_mci_stop_dma(host); 1045 if (data->stop) 1046 send_stop_cmd(host, data); 1047 state = STATE_DATA_ERROR; 1048 break; 1049 } 1050 1051 if (!test_and_clear_bit(EVENT_XFER_COMPLETE, 1052 &host->pending_events)) 1053 break; 1054 1055 set_bit(EVENT_XFER_COMPLETE, &host->completed_events); 1056 prev_state = state = STATE_DATA_BUSY; 1057 /* fall through */ 1058 1059 case STATE_DATA_BUSY: 1060 if (!test_and_clear_bit(EVENT_DATA_COMPLETE, 1061 &host->pending_events)) 1062 break; 1063 1064 host->data = NULL; 1065 set_bit(EVENT_DATA_COMPLETE, &host->completed_events); 1066 status = host->data_status; 1067 1068 if (status & DW_MCI_DATA_ERROR_FLAGS) { 1069 if (status & SDMMC_INT_DTO) { 1070 data->error = -ETIMEDOUT; 1071 } else if (status & SDMMC_INT_DCRC) { 1072 data->error = -EILSEQ; 1073 } else if (status & SDMMC_INT_EBE && 1074 host->dir_status == 1075 DW_MCI_SEND_STATUS) { 1076 /* 1077 * No data CRC status was returned. 1078 * The number of bytes transferred will 1079 * be exaggerated in PIO mode. 1080 */ 1081 data->bytes_xfered = 0; 1082 data->error = -ETIMEDOUT; 1083 } else { 1084 dev_err(host->dev, 1085 "data FIFO error " 1086 "(status=%08x)\n", 1087 status); 1088 data->error = -EIO; 1089 } 1090 /* 1091 * After an error, there may be data lingering 1092 * in the FIFO, so reset it - doing so 1093 * generates a block interrupt, hence setting 1094 * the scatter-gather pointer to NULL. 1095 */ 1096 sg_miter_stop(&host->sg_miter); 1097 host->sg = NULL; 1098 ctrl = mci_readl(host, CTRL); 1099 ctrl |= SDMMC_CTRL_FIFO_RESET; 1100 mci_writel(host, CTRL, ctrl); 1101 } else { 1102 data->bytes_xfered = data->blocks * data->blksz; 1103 data->error = 0; 1104 } 1105 1106 if (!data->stop) { 1107 dw_mci_request_end(host, host->mrq); 1108 goto unlock; 1109 } 1110 1111 if (host->mrq->sbc && !data->error) { 1112 data->stop->error = 0; 1113 dw_mci_request_end(host, host->mrq); 1114 goto unlock; 1115 } 1116 1117 prev_state = state = STATE_SENDING_STOP; 1118 if (!data->error) 1119 send_stop_cmd(host, data); 1120 /* fall through */ 1121 1122 case STATE_SENDING_STOP: 1123 if (!test_and_clear_bit(EVENT_CMD_COMPLETE, 1124 &host->pending_events)) 1125 break; 1126 1127 host->cmd = NULL; 1128 dw_mci_command_complete(host, host->mrq->stop); 1129 dw_mci_request_end(host, host->mrq); 1130 goto unlock; 1131 1132 case STATE_DATA_ERROR: 1133 if (!test_and_clear_bit(EVENT_XFER_COMPLETE, 1134 &host->pending_events)) 1135 break; 1136 1137 state = STATE_DATA_BUSY; 1138 break; 1139 } 1140 } while (state != prev_state); 1141 1142 host->state = state; 1143 unlock: 1144 spin_unlock(&host->lock); 1145 1146 } 1147 1148 /* push final bytes to part_buf, only use during push */ 1149 static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt) 1150 { 1151 memcpy((void *)&host->part_buf, buf, cnt); 1152 host->part_buf_count = cnt; 1153 } 1154 1155 /* append bytes to part_buf, only use during push */ 1156 static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt) 1157 { 1158 cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count); 1159 memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt); 1160 host->part_buf_count += cnt; 1161 return cnt; 1162 } 1163 1164 /* pull first bytes from part_buf, only use during pull */ 1165 static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt) 1166 { 1167 cnt = min(cnt, (int)host->part_buf_count); 1168 if (cnt) { 1169 memcpy(buf, (void *)&host->part_buf + host->part_buf_start, 1170 cnt); 1171 host->part_buf_count -= cnt; 1172 host->part_buf_start += cnt; 1173 } 1174 return cnt; 1175 } 1176 1177 /* pull final bytes from the part_buf, assuming it's just been filled */ 1178 static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt) 1179 { 1180 memcpy(buf, &host->part_buf, cnt); 1181 host->part_buf_start = cnt; 1182 host->part_buf_count = (1 << host->data_shift) - cnt; 1183 } 1184 1185 static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt) 1186 { 1187 /* try and push anything in the part_buf */ 1188 if (unlikely(host->part_buf_count)) { 1189 int len = dw_mci_push_part_bytes(host, buf, cnt); 1190 buf += len; 1191 cnt -= len; 1192 if (!sg_next(host->sg) || host->part_buf_count == 2) { 1193 mci_writew(host, DATA(host->data_offset), 1194 host->part_buf16); 1195 host->part_buf_count = 0; 1196 } 1197 } 1198 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1199 if (unlikely((unsigned long)buf & 0x1)) { 1200 while (cnt >= 2) { 1201 u16 aligned_buf[64]; 1202 int len = min(cnt & -2, (int)sizeof(aligned_buf)); 1203 int items = len >> 1; 1204 int i; 1205 /* memcpy from input buffer into aligned buffer */ 1206 memcpy(aligned_buf, buf, len); 1207 buf += len; 1208 cnt -= len; 1209 /* push data from aligned buffer into fifo */ 1210 for (i = 0; i < items; ++i) 1211 mci_writew(host, DATA(host->data_offset), 1212 aligned_buf[i]); 1213 } 1214 } else 1215 #endif 1216 { 1217 u16 *pdata = buf; 1218 for (; cnt >= 2; cnt -= 2) 1219 mci_writew(host, DATA(host->data_offset), *pdata++); 1220 buf = pdata; 1221 } 1222 /* put anything remaining in the part_buf */ 1223 if (cnt) { 1224 dw_mci_set_part_bytes(host, buf, cnt); 1225 if (!sg_next(host->sg)) 1226 mci_writew(host, DATA(host->data_offset), 1227 host->part_buf16); 1228 } 1229 } 1230 1231 static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt) 1232 { 1233 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1234 if (unlikely((unsigned long)buf & 0x1)) { 1235 while (cnt >= 2) { 1236 /* pull data from fifo into aligned buffer */ 1237 u16 aligned_buf[64]; 1238 int len = min(cnt & -2, (int)sizeof(aligned_buf)); 1239 int items = len >> 1; 1240 int i; 1241 for (i = 0; i < items; ++i) 1242 aligned_buf[i] = mci_readw(host, 1243 DATA(host->data_offset)); 1244 /* memcpy from aligned buffer into output buffer */ 1245 memcpy(buf, aligned_buf, len); 1246 buf += len; 1247 cnt -= len; 1248 } 1249 } else 1250 #endif 1251 { 1252 u16 *pdata = buf; 1253 for (; cnt >= 2; cnt -= 2) 1254 *pdata++ = mci_readw(host, DATA(host->data_offset)); 1255 buf = pdata; 1256 } 1257 if (cnt) { 1258 host->part_buf16 = mci_readw(host, DATA(host->data_offset)); 1259 dw_mci_pull_final_bytes(host, buf, cnt); 1260 } 1261 } 1262 1263 static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt) 1264 { 1265 /* try and push anything in the part_buf */ 1266 if (unlikely(host->part_buf_count)) { 1267 int len = dw_mci_push_part_bytes(host, buf, cnt); 1268 buf += len; 1269 cnt -= len; 1270 if (!sg_next(host->sg) || host->part_buf_count == 4) { 1271 mci_writel(host, DATA(host->data_offset), 1272 host->part_buf32); 1273 host->part_buf_count = 0; 1274 } 1275 } 1276 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1277 if (unlikely((unsigned long)buf & 0x3)) { 1278 while (cnt >= 4) { 1279 u32 aligned_buf[32]; 1280 int len = min(cnt & -4, (int)sizeof(aligned_buf)); 1281 int items = len >> 2; 1282 int i; 1283 /* memcpy from input buffer into aligned buffer */ 1284 memcpy(aligned_buf, buf, len); 1285 buf += len; 1286 cnt -= len; 1287 /* push data from aligned buffer into fifo */ 1288 for (i = 0; i < items; ++i) 1289 mci_writel(host, DATA(host->data_offset), 1290 aligned_buf[i]); 1291 } 1292 } else 1293 #endif 1294 { 1295 u32 *pdata = buf; 1296 for (; cnt >= 4; cnt -= 4) 1297 mci_writel(host, DATA(host->data_offset), *pdata++); 1298 buf = pdata; 1299 } 1300 /* put anything remaining in the part_buf */ 1301 if (cnt) { 1302 dw_mci_set_part_bytes(host, buf, cnt); 1303 if (!sg_next(host->sg)) 1304 mci_writel(host, DATA(host->data_offset), 1305 host->part_buf32); 1306 } 1307 } 1308 1309 static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt) 1310 { 1311 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1312 if (unlikely((unsigned long)buf & 0x3)) { 1313 while (cnt >= 4) { 1314 /* pull data from fifo into aligned buffer */ 1315 u32 aligned_buf[32]; 1316 int len = min(cnt & -4, (int)sizeof(aligned_buf)); 1317 int items = len >> 2; 1318 int i; 1319 for (i = 0; i < items; ++i) 1320 aligned_buf[i] = mci_readl(host, 1321 DATA(host->data_offset)); 1322 /* memcpy from aligned buffer into output buffer */ 1323 memcpy(buf, aligned_buf, len); 1324 buf += len; 1325 cnt -= len; 1326 } 1327 } else 1328 #endif 1329 { 1330 u32 *pdata = buf; 1331 for (; cnt >= 4; cnt -= 4) 1332 *pdata++ = mci_readl(host, DATA(host->data_offset)); 1333 buf = pdata; 1334 } 1335 if (cnt) { 1336 host->part_buf32 = mci_readl(host, DATA(host->data_offset)); 1337 dw_mci_pull_final_bytes(host, buf, cnt); 1338 } 1339 } 1340 1341 static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt) 1342 { 1343 /* try and push anything in the part_buf */ 1344 if (unlikely(host->part_buf_count)) { 1345 int len = dw_mci_push_part_bytes(host, buf, cnt); 1346 buf += len; 1347 cnt -= len; 1348 if (!sg_next(host->sg) || host->part_buf_count == 8) { 1349 mci_writew(host, DATA(host->data_offset), 1350 host->part_buf); 1351 host->part_buf_count = 0; 1352 } 1353 } 1354 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1355 if (unlikely((unsigned long)buf & 0x7)) { 1356 while (cnt >= 8) { 1357 u64 aligned_buf[16]; 1358 int len = min(cnt & -8, (int)sizeof(aligned_buf)); 1359 int items = len >> 3; 1360 int i; 1361 /* memcpy from input buffer into aligned buffer */ 1362 memcpy(aligned_buf, buf, len); 1363 buf += len; 1364 cnt -= len; 1365 /* push data from aligned buffer into fifo */ 1366 for (i = 0; i < items; ++i) 1367 mci_writeq(host, DATA(host->data_offset), 1368 aligned_buf[i]); 1369 } 1370 } else 1371 #endif 1372 { 1373 u64 *pdata = buf; 1374 for (; cnt >= 8; cnt -= 8) 1375 mci_writeq(host, DATA(host->data_offset), *pdata++); 1376 buf = pdata; 1377 } 1378 /* put anything remaining in the part_buf */ 1379 if (cnt) { 1380 dw_mci_set_part_bytes(host, buf, cnt); 1381 if (!sg_next(host->sg)) 1382 mci_writeq(host, DATA(host->data_offset), 1383 host->part_buf); 1384 } 1385 } 1386 1387 static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt) 1388 { 1389 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1390 if (unlikely((unsigned long)buf & 0x7)) { 1391 while (cnt >= 8) { 1392 /* pull data from fifo into aligned buffer */ 1393 u64 aligned_buf[16]; 1394 int len = min(cnt & -8, (int)sizeof(aligned_buf)); 1395 int items = len >> 3; 1396 int i; 1397 for (i = 0; i < items; ++i) 1398 aligned_buf[i] = mci_readq(host, 1399 DATA(host->data_offset)); 1400 /* memcpy from aligned buffer into output buffer */ 1401 memcpy(buf, aligned_buf, len); 1402 buf += len; 1403 cnt -= len; 1404 } 1405 } else 1406 #endif 1407 { 1408 u64 *pdata = buf; 1409 for (; cnt >= 8; cnt -= 8) 1410 *pdata++ = mci_readq(host, DATA(host->data_offset)); 1411 buf = pdata; 1412 } 1413 if (cnt) { 1414 host->part_buf = mci_readq(host, DATA(host->data_offset)); 1415 dw_mci_pull_final_bytes(host, buf, cnt); 1416 } 1417 } 1418 1419 static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt) 1420 { 1421 int len; 1422 1423 /* get remaining partial bytes */ 1424 len = dw_mci_pull_part_bytes(host, buf, cnt); 1425 if (unlikely(len == cnt)) 1426 return; 1427 buf += len; 1428 cnt -= len; 1429 1430 /* get the rest of the data */ 1431 host->pull_data(host, buf, cnt); 1432 } 1433 1434 static void dw_mci_read_data_pio(struct dw_mci *host) 1435 { 1436 struct sg_mapping_iter *sg_miter = &host->sg_miter; 1437 void *buf; 1438 unsigned int offset; 1439 struct mmc_data *data = host->data; 1440 int shift = host->data_shift; 1441 u32 status; 1442 unsigned int nbytes = 0, len; 1443 unsigned int remain, fcnt; 1444 1445 do { 1446 if (!sg_miter_next(sg_miter)) 1447 goto done; 1448 1449 host->sg = sg_miter->__sg; 1450 buf = sg_miter->addr; 1451 remain = sg_miter->length; 1452 offset = 0; 1453 1454 do { 1455 fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS)) 1456 << shift) + host->part_buf_count; 1457 len = min(remain, fcnt); 1458 if (!len) 1459 break; 1460 dw_mci_pull_data(host, (void *)(buf + offset), len); 1461 offset += len; 1462 nbytes += len; 1463 remain -= len; 1464 } while (remain); 1465 1466 sg_miter->consumed = offset; 1467 status = mci_readl(host, MINTSTS); 1468 mci_writel(host, RINTSTS, SDMMC_INT_RXDR); 1469 } while (status & SDMMC_INT_RXDR); /*if the RXDR is ready read again*/ 1470 data->bytes_xfered += nbytes; 1471 1472 if (!remain) { 1473 if (!sg_miter_next(sg_miter)) 1474 goto done; 1475 sg_miter->consumed = 0; 1476 } 1477 sg_miter_stop(sg_miter); 1478 return; 1479 1480 done: 1481 data->bytes_xfered += nbytes; 1482 sg_miter_stop(sg_miter); 1483 host->sg = NULL; 1484 smp_wmb(); 1485 set_bit(EVENT_XFER_COMPLETE, &host->pending_events); 1486 } 1487 1488 static void dw_mci_write_data_pio(struct dw_mci *host) 1489 { 1490 struct sg_mapping_iter *sg_miter = &host->sg_miter; 1491 void *buf; 1492 unsigned int offset; 1493 struct mmc_data *data = host->data; 1494 int shift = host->data_shift; 1495 u32 status; 1496 unsigned int nbytes = 0, len; 1497 unsigned int fifo_depth = host->fifo_depth; 1498 unsigned int remain, fcnt; 1499 1500 do { 1501 if (!sg_miter_next(sg_miter)) 1502 goto done; 1503 1504 host->sg = sg_miter->__sg; 1505 buf = sg_miter->addr; 1506 remain = sg_miter->length; 1507 offset = 0; 1508 1509 do { 1510 fcnt = ((fifo_depth - 1511 SDMMC_GET_FCNT(mci_readl(host, STATUS))) 1512 << shift) - host->part_buf_count; 1513 len = min(remain, fcnt); 1514 if (!len) 1515 break; 1516 host->push_data(host, (void *)(buf + offset), len); 1517 offset += len; 1518 nbytes += len; 1519 remain -= len; 1520 } while (remain); 1521 1522 sg_miter->consumed = offset; 1523 status = mci_readl(host, MINTSTS); 1524 mci_writel(host, RINTSTS, SDMMC_INT_TXDR); 1525 } while (status & SDMMC_INT_TXDR); /* if TXDR write again */ 1526 data->bytes_xfered += nbytes; 1527 1528 if (!remain) { 1529 if (!sg_miter_next(sg_miter)) 1530 goto done; 1531 sg_miter->consumed = 0; 1532 } 1533 sg_miter_stop(sg_miter); 1534 return; 1535 1536 done: 1537 data->bytes_xfered += nbytes; 1538 sg_miter_stop(sg_miter); 1539 host->sg = NULL; 1540 smp_wmb(); 1541 set_bit(EVENT_XFER_COMPLETE, &host->pending_events); 1542 } 1543 1544 static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status) 1545 { 1546 if (!host->cmd_status) 1547 host->cmd_status = status; 1548 1549 smp_wmb(); 1550 1551 set_bit(EVENT_CMD_COMPLETE, &host->pending_events); 1552 tasklet_schedule(&host->tasklet); 1553 } 1554 1555 static irqreturn_t dw_mci_interrupt(int irq, void *dev_id) 1556 { 1557 struct dw_mci *host = dev_id; 1558 u32 pending; 1559 unsigned int pass_count = 0; 1560 int i; 1561 1562 do { 1563 pending = mci_readl(host, MINTSTS); /* read-only mask reg */ 1564 1565 /* 1566 * DTO fix - version 2.10a and below, and only if internal DMA 1567 * is configured. 1568 */ 1569 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) { 1570 if (!pending && 1571 ((mci_readl(host, STATUS) >> 17) & 0x1fff)) 1572 pending |= SDMMC_INT_DATA_OVER; 1573 } 1574 1575 if (!pending) 1576 break; 1577 1578 if (pending & DW_MCI_CMD_ERROR_FLAGS) { 1579 mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS); 1580 host->cmd_status = pending; 1581 smp_wmb(); 1582 set_bit(EVENT_CMD_COMPLETE, &host->pending_events); 1583 } 1584 1585 if (pending & DW_MCI_DATA_ERROR_FLAGS) { 1586 /* if there is an error report DATA_ERROR */ 1587 mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS); 1588 host->data_status = pending; 1589 smp_wmb(); 1590 set_bit(EVENT_DATA_ERROR, &host->pending_events); 1591 tasklet_schedule(&host->tasklet); 1592 } 1593 1594 if (pending & SDMMC_INT_DATA_OVER) { 1595 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER); 1596 if (!host->data_status) 1597 host->data_status = pending; 1598 smp_wmb(); 1599 if (host->dir_status == DW_MCI_RECV_STATUS) { 1600 if (host->sg != NULL) 1601 dw_mci_read_data_pio(host); 1602 } 1603 set_bit(EVENT_DATA_COMPLETE, &host->pending_events); 1604 tasklet_schedule(&host->tasklet); 1605 } 1606 1607 if (pending & SDMMC_INT_RXDR) { 1608 mci_writel(host, RINTSTS, SDMMC_INT_RXDR); 1609 if (host->dir_status == DW_MCI_RECV_STATUS && host->sg) 1610 dw_mci_read_data_pio(host); 1611 } 1612 1613 if (pending & SDMMC_INT_TXDR) { 1614 mci_writel(host, RINTSTS, SDMMC_INT_TXDR); 1615 if (host->dir_status == DW_MCI_SEND_STATUS && host->sg) 1616 dw_mci_write_data_pio(host); 1617 } 1618 1619 if (pending & SDMMC_INT_CMD_DONE) { 1620 mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE); 1621 dw_mci_cmd_interrupt(host, pending); 1622 } 1623 1624 if (pending & SDMMC_INT_CD) { 1625 mci_writel(host, RINTSTS, SDMMC_INT_CD); 1626 queue_work(host->card_workqueue, &host->card_work); 1627 } 1628 1629 /* Handle SDIO Interrupts */ 1630 for (i = 0; i < host->num_slots; i++) { 1631 struct dw_mci_slot *slot = host->slot[i]; 1632 if (pending & SDMMC_INT_SDIO(i)) { 1633 mci_writel(host, RINTSTS, SDMMC_INT_SDIO(i)); 1634 mmc_signal_sdio_irq(slot->mmc); 1635 } 1636 } 1637 1638 } while (pass_count++ < 5); 1639 1640 #ifdef CONFIG_MMC_DW_IDMAC 1641 /* Handle DMA interrupts */ 1642 pending = mci_readl(host, IDSTS); 1643 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) { 1644 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI); 1645 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI); 1646 host->dma_ops->complete(host); 1647 } 1648 #endif 1649 1650 return IRQ_HANDLED; 1651 } 1652 1653 static void dw_mci_work_routine_card(struct work_struct *work) 1654 { 1655 struct dw_mci *host = container_of(work, struct dw_mci, card_work); 1656 int i; 1657 1658 for (i = 0; i < host->num_slots; i++) { 1659 struct dw_mci_slot *slot = host->slot[i]; 1660 struct mmc_host *mmc = slot->mmc; 1661 struct mmc_request *mrq; 1662 int present; 1663 u32 ctrl; 1664 1665 present = dw_mci_get_cd(mmc); 1666 while (present != slot->last_detect_state) { 1667 dev_dbg(&slot->mmc->class_dev, "card %s\n", 1668 present ? "inserted" : "removed"); 1669 1670 /* Power up slot (before spin_lock, may sleep) */ 1671 if (present != 0 && host->pdata->setpower) 1672 host->pdata->setpower(slot->id, mmc->ocr_avail); 1673 1674 spin_lock_bh(&host->lock); 1675 1676 /* Card change detected */ 1677 slot->last_detect_state = present; 1678 1679 /* Mark card as present if applicable */ 1680 if (present != 0) 1681 set_bit(DW_MMC_CARD_PRESENT, &slot->flags); 1682 1683 /* Clean up queue if present */ 1684 mrq = slot->mrq; 1685 if (mrq) { 1686 if (mrq == host->mrq) { 1687 host->data = NULL; 1688 host->cmd = NULL; 1689 1690 switch (host->state) { 1691 case STATE_IDLE: 1692 break; 1693 case STATE_SENDING_CMD: 1694 mrq->cmd->error = -ENOMEDIUM; 1695 if (!mrq->data) 1696 break; 1697 /* fall through */ 1698 case STATE_SENDING_DATA: 1699 mrq->data->error = -ENOMEDIUM; 1700 dw_mci_stop_dma(host); 1701 break; 1702 case STATE_DATA_BUSY: 1703 case STATE_DATA_ERROR: 1704 if (mrq->data->error == -EINPROGRESS) 1705 mrq->data->error = -ENOMEDIUM; 1706 if (!mrq->stop) 1707 break; 1708 /* fall through */ 1709 case STATE_SENDING_STOP: 1710 mrq->stop->error = -ENOMEDIUM; 1711 break; 1712 } 1713 1714 dw_mci_request_end(host, mrq); 1715 } else { 1716 list_del(&slot->queue_node); 1717 mrq->cmd->error = -ENOMEDIUM; 1718 if (mrq->data) 1719 mrq->data->error = -ENOMEDIUM; 1720 if (mrq->stop) 1721 mrq->stop->error = -ENOMEDIUM; 1722 1723 spin_unlock(&host->lock); 1724 mmc_request_done(slot->mmc, mrq); 1725 spin_lock(&host->lock); 1726 } 1727 } 1728 1729 /* Power down slot */ 1730 if (present == 0) { 1731 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags); 1732 1733 /* 1734 * Clear down the FIFO - doing so generates a 1735 * block interrupt, hence setting the 1736 * scatter-gather pointer to NULL. 1737 */ 1738 sg_miter_stop(&host->sg_miter); 1739 host->sg = NULL; 1740 1741 ctrl = mci_readl(host, CTRL); 1742 ctrl |= SDMMC_CTRL_FIFO_RESET; 1743 mci_writel(host, CTRL, ctrl); 1744 1745 #ifdef CONFIG_MMC_DW_IDMAC 1746 ctrl = mci_readl(host, BMOD); 1747 /* Software reset of DMA */ 1748 ctrl |= SDMMC_IDMAC_SWRESET; 1749 mci_writel(host, BMOD, ctrl); 1750 #endif 1751 1752 } 1753 1754 spin_unlock_bh(&host->lock); 1755 1756 /* Power down slot (after spin_unlock, may sleep) */ 1757 if (present == 0 && host->pdata->setpower) 1758 host->pdata->setpower(slot->id, 0); 1759 1760 present = dw_mci_get_cd(mmc); 1761 } 1762 1763 mmc_detect_change(slot->mmc, 1764 msecs_to_jiffies(host->pdata->detect_delay_ms)); 1765 } 1766 } 1767 1768 #ifdef CONFIG_OF 1769 /* given a slot id, find out the device node representing that slot */ 1770 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot) 1771 { 1772 struct device_node *np; 1773 const __be32 *addr; 1774 int len; 1775 1776 if (!dev || !dev->of_node) 1777 return NULL; 1778 1779 for_each_child_of_node(dev->of_node, np) { 1780 addr = of_get_property(np, "reg", &len); 1781 if (!addr || (len < sizeof(int))) 1782 continue; 1783 if (be32_to_cpup(addr) == slot) 1784 return np; 1785 } 1786 return NULL; 1787 } 1788 1789 /* find out bus-width for a given slot */ 1790 static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot) 1791 { 1792 struct device_node *np = dw_mci_of_find_slot_node(dev, slot); 1793 u32 bus_wd = 1; 1794 1795 if (!np) 1796 return 1; 1797 1798 if (of_property_read_u32(np, "bus-width", &bus_wd)) 1799 dev_err(dev, "bus-width property not found, assuming width" 1800 " as 1\n"); 1801 return bus_wd; 1802 } 1803 #else /* CONFIG_OF */ 1804 static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot) 1805 { 1806 return 1; 1807 } 1808 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot) 1809 { 1810 return NULL; 1811 } 1812 #endif /* CONFIG_OF */ 1813 1814 static int dw_mci_init_slot(struct dw_mci *host, unsigned int id) 1815 { 1816 struct mmc_host *mmc; 1817 struct dw_mci_slot *slot; 1818 int ctrl_id, ret; 1819 u8 bus_width; 1820 1821 mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev); 1822 if (!mmc) 1823 return -ENOMEM; 1824 1825 slot = mmc_priv(mmc); 1826 slot->id = id; 1827 slot->mmc = mmc; 1828 slot->host = host; 1829 host->slot[id] = slot; 1830 1831 mmc->ops = &dw_mci_ops; 1832 mmc->f_min = DIV_ROUND_UP(host->bus_hz, 510); 1833 mmc->f_max = host->bus_hz; 1834 1835 if (host->pdata->get_ocr) 1836 mmc->ocr_avail = host->pdata->get_ocr(id); 1837 else 1838 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; 1839 1840 /* 1841 * Start with slot power disabled, it will be enabled when a card 1842 * is detected. 1843 */ 1844 if (host->pdata->setpower) 1845 host->pdata->setpower(id, 0); 1846 1847 if (host->pdata->caps) 1848 mmc->caps = host->pdata->caps; 1849 1850 if (host->dev->of_node) { 1851 ctrl_id = of_alias_get_id(host->dev->of_node, "mshc"); 1852 if (ctrl_id < 0) 1853 ctrl_id = 0; 1854 } else { 1855 ctrl_id = to_platform_device(host->dev)->id; 1856 } 1857 if (host->drv_data && host->drv_data->caps) 1858 mmc->caps |= host->drv_data->caps[ctrl_id]; 1859 1860 if (host->pdata->caps2) 1861 mmc->caps2 = host->pdata->caps2; 1862 1863 if (host->pdata->get_bus_wd) 1864 bus_width = host->pdata->get_bus_wd(slot->id); 1865 else if (host->dev->of_node) 1866 bus_width = dw_mci_of_get_bus_wd(host->dev, slot->id); 1867 else 1868 bus_width = 1; 1869 1870 if (host->drv_data->setup_bus) { 1871 struct device_node *slot_np; 1872 slot_np = dw_mci_of_find_slot_node(host->dev, slot->id); 1873 ret = host->drv_data->setup_bus(host, slot_np, bus_width); 1874 if (ret) 1875 goto err_setup_bus; 1876 } 1877 1878 switch (bus_width) { 1879 case 8: 1880 mmc->caps |= MMC_CAP_8_BIT_DATA; 1881 case 4: 1882 mmc->caps |= MMC_CAP_4_BIT_DATA; 1883 } 1884 1885 if (host->pdata->quirks & DW_MCI_QUIRK_HIGHSPEED) 1886 mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED; 1887 1888 if (host->pdata->blk_settings) { 1889 mmc->max_segs = host->pdata->blk_settings->max_segs; 1890 mmc->max_blk_size = host->pdata->blk_settings->max_blk_size; 1891 mmc->max_blk_count = host->pdata->blk_settings->max_blk_count; 1892 mmc->max_req_size = host->pdata->blk_settings->max_req_size; 1893 mmc->max_seg_size = host->pdata->blk_settings->max_seg_size; 1894 } else { 1895 /* Useful defaults if platform data is unset. */ 1896 #ifdef CONFIG_MMC_DW_IDMAC 1897 mmc->max_segs = host->ring_size; 1898 mmc->max_blk_size = 65536; 1899 mmc->max_blk_count = host->ring_size; 1900 mmc->max_seg_size = 0x1000; 1901 mmc->max_req_size = mmc->max_seg_size * mmc->max_blk_count; 1902 #else 1903 mmc->max_segs = 64; 1904 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */ 1905 mmc->max_blk_count = 512; 1906 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; 1907 mmc->max_seg_size = mmc->max_req_size; 1908 #endif /* CONFIG_MMC_DW_IDMAC */ 1909 } 1910 1911 host->vmmc = regulator_get(mmc_dev(mmc), "vmmc"); 1912 if (IS_ERR(host->vmmc)) { 1913 pr_info("%s: no vmmc regulator found\n", mmc_hostname(mmc)); 1914 host->vmmc = NULL; 1915 } else 1916 regulator_enable(host->vmmc); 1917 1918 if (dw_mci_get_cd(mmc)) 1919 set_bit(DW_MMC_CARD_PRESENT, &slot->flags); 1920 else 1921 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags); 1922 1923 mmc_add_host(mmc); 1924 1925 #if defined(CONFIG_DEBUG_FS) 1926 dw_mci_init_debugfs(slot); 1927 #endif 1928 1929 /* Card initially undetected */ 1930 slot->last_detect_state = 0; 1931 1932 /* 1933 * Card may have been plugged in prior to boot so we 1934 * need to run the detect tasklet 1935 */ 1936 queue_work(host->card_workqueue, &host->card_work); 1937 1938 return 0; 1939 1940 err_setup_bus: 1941 mmc_free_host(mmc); 1942 return -EINVAL; 1943 } 1944 1945 static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id) 1946 { 1947 /* Shutdown detect IRQ */ 1948 if (slot->host->pdata->exit) 1949 slot->host->pdata->exit(id); 1950 1951 /* Debugfs stuff is cleaned up by mmc core */ 1952 mmc_remove_host(slot->mmc); 1953 slot->host->slot[id] = NULL; 1954 mmc_free_host(slot->mmc); 1955 } 1956 1957 static void dw_mci_init_dma(struct dw_mci *host) 1958 { 1959 /* Alloc memory for sg translation */ 1960 host->sg_cpu = dma_alloc_coherent(host->dev, PAGE_SIZE, 1961 &host->sg_dma, GFP_KERNEL); 1962 if (!host->sg_cpu) { 1963 dev_err(host->dev, "%s: could not alloc DMA memory\n", 1964 __func__); 1965 goto no_dma; 1966 } 1967 1968 /* Determine which DMA interface to use */ 1969 #ifdef CONFIG_MMC_DW_IDMAC 1970 host->dma_ops = &dw_mci_idmac_ops; 1971 dev_info(&host->dev, "Using internal DMA controller.\n"); 1972 #endif 1973 1974 if (!host->dma_ops) 1975 goto no_dma; 1976 1977 if (host->dma_ops->init && host->dma_ops->start && 1978 host->dma_ops->stop && host->dma_ops->cleanup) { 1979 if (host->dma_ops->init(host)) { 1980 dev_err(host->dev, "%s: Unable to initialize " 1981 "DMA Controller.\n", __func__); 1982 goto no_dma; 1983 } 1984 } else { 1985 dev_err(host->dev, "DMA initialization not found.\n"); 1986 goto no_dma; 1987 } 1988 1989 host->use_dma = 1; 1990 return; 1991 1992 no_dma: 1993 dev_info(host->dev, "Using PIO mode.\n"); 1994 host->use_dma = 0; 1995 return; 1996 } 1997 1998 static bool mci_wait_reset(struct device *dev, struct dw_mci *host) 1999 { 2000 unsigned long timeout = jiffies + msecs_to_jiffies(500); 2001 unsigned int ctrl; 2002 2003 mci_writel(host, CTRL, (SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET | 2004 SDMMC_CTRL_DMA_RESET)); 2005 2006 /* wait till resets clear */ 2007 do { 2008 ctrl = mci_readl(host, CTRL); 2009 if (!(ctrl & (SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET | 2010 SDMMC_CTRL_DMA_RESET))) 2011 return true; 2012 } while (time_before(jiffies, timeout)); 2013 2014 dev_err(dev, "Timeout resetting block (ctrl %#x)\n", ctrl); 2015 2016 return false; 2017 } 2018 2019 #ifdef CONFIG_OF 2020 static struct dw_mci_of_quirks { 2021 char *quirk; 2022 int id; 2023 } of_quirks[] = { 2024 { 2025 .quirk = "supports-highspeed", 2026 .id = DW_MCI_QUIRK_HIGHSPEED, 2027 }, { 2028 .quirk = "broken-cd", 2029 .id = DW_MCI_QUIRK_BROKEN_CARD_DETECTION, 2030 }, 2031 }; 2032 2033 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host) 2034 { 2035 struct dw_mci_board *pdata; 2036 struct device *dev = host->dev; 2037 struct device_node *np = dev->of_node; 2038 int idx, ret; 2039 2040 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); 2041 if (!pdata) { 2042 dev_err(dev, "could not allocate memory for pdata\n"); 2043 return ERR_PTR(-ENOMEM); 2044 } 2045 2046 /* find out number of slots supported */ 2047 if (of_property_read_u32(dev->of_node, "num-slots", 2048 &pdata->num_slots)) { 2049 dev_info(dev, "num-slots property not found, " 2050 "assuming 1 slot is available\n"); 2051 pdata->num_slots = 1; 2052 } 2053 2054 /* get quirks */ 2055 for (idx = 0; idx < ARRAY_SIZE(of_quirks); idx++) 2056 if (of_get_property(np, of_quirks[idx].quirk, NULL)) 2057 pdata->quirks |= of_quirks[idx].id; 2058 2059 if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth)) 2060 dev_info(dev, "fifo-depth property not found, using " 2061 "value of FIFOTH register as default\n"); 2062 2063 of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms); 2064 2065 if (host->drv_data->parse_dt) { 2066 ret = host->drv_data->parse_dt(host); 2067 if (ret) 2068 return ERR_PTR(ret); 2069 } 2070 2071 return pdata; 2072 } 2073 2074 #else /* CONFIG_OF */ 2075 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host) 2076 { 2077 return ERR_PTR(-EINVAL); 2078 } 2079 #endif /* CONFIG_OF */ 2080 2081 int dw_mci_probe(struct dw_mci *host) 2082 { 2083 int width, i, ret = 0; 2084 u32 fifo_size; 2085 int init_slots = 0; 2086 2087 if (!host->pdata) { 2088 host->pdata = dw_mci_parse_dt(host); 2089 if (IS_ERR(host->pdata)) { 2090 dev_err(host->dev, "platform data not available\n"); 2091 return -EINVAL; 2092 } 2093 } 2094 2095 if (!host->pdata->select_slot && host->pdata->num_slots > 1) { 2096 dev_err(host->dev, 2097 "Platform data must supply select_slot function\n"); 2098 return -ENODEV; 2099 } 2100 2101 host->biu_clk = clk_get(host->dev, "biu"); 2102 if (IS_ERR(host->biu_clk)) { 2103 dev_dbg(host->dev, "biu clock not available\n"); 2104 } else { 2105 ret = clk_prepare_enable(host->biu_clk); 2106 if (ret) { 2107 dev_err(host->dev, "failed to enable biu clock\n"); 2108 clk_put(host->biu_clk); 2109 return ret; 2110 } 2111 } 2112 2113 host->ciu_clk = clk_get(host->dev, "ciu"); 2114 if (IS_ERR(host->ciu_clk)) { 2115 dev_dbg(host->dev, "ciu clock not available\n"); 2116 } else { 2117 ret = clk_prepare_enable(host->ciu_clk); 2118 if (ret) { 2119 dev_err(host->dev, "failed to enable ciu clock\n"); 2120 clk_put(host->ciu_clk); 2121 goto err_clk_biu; 2122 } 2123 } 2124 2125 if (IS_ERR(host->ciu_clk)) 2126 host->bus_hz = host->pdata->bus_hz; 2127 else 2128 host->bus_hz = clk_get_rate(host->ciu_clk); 2129 2130 if (host->drv_data->setup_clock) { 2131 ret = host->drv_data->setup_clock(host); 2132 if (ret) { 2133 dev_err(host->dev, 2134 "implementation specific clock setup failed\n"); 2135 goto err_clk_ciu; 2136 } 2137 } 2138 2139 if (!host->bus_hz) { 2140 dev_err(host->dev, 2141 "Platform data must supply bus speed\n"); 2142 ret = -ENODEV; 2143 goto err_clk_ciu; 2144 } 2145 2146 host->quirks = host->pdata->quirks; 2147 2148 spin_lock_init(&host->lock); 2149 INIT_LIST_HEAD(&host->queue); 2150 2151 /* 2152 * Get the host data width - this assumes that HCON has been set with 2153 * the correct values. 2154 */ 2155 i = (mci_readl(host, HCON) >> 7) & 0x7; 2156 if (!i) { 2157 host->push_data = dw_mci_push_data16; 2158 host->pull_data = dw_mci_pull_data16; 2159 width = 16; 2160 host->data_shift = 1; 2161 } else if (i == 2) { 2162 host->push_data = dw_mci_push_data64; 2163 host->pull_data = dw_mci_pull_data64; 2164 width = 64; 2165 host->data_shift = 3; 2166 } else { 2167 /* Check for a reserved value, and warn if it is */ 2168 WARN((i != 1), 2169 "HCON reports a reserved host data width!\n" 2170 "Defaulting to 32-bit access.\n"); 2171 host->push_data = dw_mci_push_data32; 2172 host->pull_data = dw_mci_pull_data32; 2173 width = 32; 2174 host->data_shift = 2; 2175 } 2176 2177 /* Reset all blocks */ 2178 if (!mci_wait_reset(host->dev, host)) 2179 return -ENODEV; 2180 2181 host->dma_ops = host->pdata->dma_ops; 2182 dw_mci_init_dma(host); 2183 2184 /* Clear the interrupts for the host controller */ 2185 mci_writel(host, RINTSTS, 0xFFFFFFFF); 2186 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */ 2187 2188 /* Put in max timeout */ 2189 mci_writel(host, TMOUT, 0xFFFFFFFF); 2190 2191 /* 2192 * FIFO threshold settings RxMark = fifo_size / 2 - 1, 2193 * Tx Mark = fifo_size / 2 DMA Size = 8 2194 */ 2195 if (!host->pdata->fifo_depth) { 2196 /* 2197 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may 2198 * have been overwritten by the bootloader, just like we're 2199 * about to do, so if you know the value for your hardware, you 2200 * should put it in the platform data. 2201 */ 2202 fifo_size = mci_readl(host, FIFOTH); 2203 fifo_size = 1 + ((fifo_size >> 16) & 0xfff); 2204 } else { 2205 fifo_size = host->pdata->fifo_depth; 2206 } 2207 host->fifo_depth = fifo_size; 2208 host->fifoth_val = ((0x2 << 28) | ((fifo_size/2 - 1) << 16) | 2209 ((fifo_size/2) << 0)); 2210 mci_writel(host, FIFOTH, host->fifoth_val); 2211 2212 /* disable clock to CIU */ 2213 mci_writel(host, CLKENA, 0); 2214 mci_writel(host, CLKSRC, 0); 2215 2216 tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host); 2217 host->card_workqueue = alloc_workqueue("dw-mci-card", 2218 WQ_MEM_RECLAIM | WQ_NON_REENTRANT, 1); 2219 if (!host->card_workqueue) 2220 goto err_dmaunmap; 2221 INIT_WORK(&host->card_work, dw_mci_work_routine_card); 2222 ret = request_irq(host->irq, dw_mci_interrupt, host->irq_flags, "dw-mci", host); 2223 if (ret) 2224 goto err_workqueue; 2225 2226 if (host->pdata->num_slots) 2227 host->num_slots = host->pdata->num_slots; 2228 else 2229 host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1; 2230 2231 /* We need at least one slot to succeed */ 2232 for (i = 0; i < host->num_slots; i++) { 2233 ret = dw_mci_init_slot(host, i); 2234 if (ret) 2235 dev_dbg(host->dev, "slot %d init failed\n", i); 2236 else 2237 init_slots++; 2238 } 2239 2240 if (init_slots) { 2241 dev_info(host->dev, "%d slots initialized\n", init_slots); 2242 } else { 2243 dev_dbg(host->dev, "attempted to initialize %d slots, " 2244 "but failed on all\n", host->num_slots); 2245 goto err_init_slot; 2246 } 2247 2248 /* 2249 * In 2.40a spec, Data offset is changed. 2250 * Need to check the version-id and set data-offset for DATA register. 2251 */ 2252 host->verid = SDMMC_GET_VERID(mci_readl(host, VERID)); 2253 dev_info(host->dev, "Version ID is %04x\n", host->verid); 2254 2255 if (host->verid < DW_MMC_240A) 2256 host->data_offset = DATA_OFFSET; 2257 else 2258 host->data_offset = DATA_240A_OFFSET; 2259 2260 /* 2261 * Enable interrupts for command done, data over, data empty, card det, 2262 * receive ready and error such as transmit, receive timeout, crc error 2263 */ 2264 mci_writel(host, RINTSTS, 0xFFFFFFFF); 2265 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | 2266 SDMMC_INT_TXDR | SDMMC_INT_RXDR | 2267 DW_MCI_ERROR_FLAGS | SDMMC_INT_CD); 2268 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */ 2269 2270 dev_info(host->dev, "DW MMC controller at irq %d, " 2271 "%d bit host data width, " 2272 "%u deep fifo\n", 2273 host->irq, width, fifo_size); 2274 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) 2275 dev_info(host->dev, "Internal DMAC interrupt fix enabled.\n"); 2276 2277 return 0; 2278 2279 err_init_slot: 2280 free_irq(host->irq, host); 2281 2282 err_workqueue: 2283 destroy_workqueue(host->card_workqueue); 2284 2285 err_dmaunmap: 2286 if (host->use_dma && host->dma_ops->exit) 2287 host->dma_ops->exit(host); 2288 dma_free_coherent(host->dev, PAGE_SIZE, 2289 host->sg_cpu, host->sg_dma); 2290 2291 if (host->vmmc) { 2292 regulator_disable(host->vmmc); 2293 regulator_put(host->vmmc); 2294 } 2295 2296 err_clk_ciu: 2297 if (!IS_ERR(host->ciu_clk)) { 2298 clk_disable_unprepare(host->ciu_clk); 2299 clk_put(host->ciu_clk); 2300 } 2301 err_clk_biu: 2302 if (!IS_ERR(host->biu_clk)) { 2303 clk_disable_unprepare(host->biu_clk); 2304 clk_put(host->biu_clk); 2305 } 2306 return ret; 2307 } 2308 EXPORT_SYMBOL(dw_mci_probe); 2309 2310 void dw_mci_remove(struct dw_mci *host) 2311 { 2312 int i; 2313 2314 mci_writel(host, RINTSTS, 0xFFFFFFFF); 2315 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */ 2316 2317 for (i = 0; i < host->num_slots; i++) { 2318 dev_dbg(host->dev, "remove slot %d\n", i); 2319 if (host->slot[i]) 2320 dw_mci_cleanup_slot(host->slot[i], i); 2321 } 2322 2323 /* disable clock to CIU */ 2324 mci_writel(host, CLKENA, 0); 2325 mci_writel(host, CLKSRC, 0); 2326 2327 free_irq(host->irq, host); 2328 destroy_workqueue(host->card_workqueue); 2329 dma_free_coherent(host->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma); 2330 2331 if (host->use_dma && host->dma_ops->exit) 2332 host->dma_ops->exit(host); 2333 2334 if (host->vmmc) { 2335 regulator_disable(host->vmmc); 2336 regulator_put(host->vmmc); 2337 } 2338 2339 if (!IS_ERR(host->ciu_clk)) 2340 clk_disable_unprepare(host->ciu_clk); 2341 if (!IS_ERR(host->biu_clk)) 2342 clk_disable_unprepare(host->biu_clk); 2343 clk_put(host->ciu_clk); 2344 clk_put(host->biu_clk); 2345 } 2346 EXPORT_SYMBOL(dw_mci_remove); 2347 2348 2349 2350 #ifdef CONFIG_PM_SLEEP 2351 /* 2352 * TODO: we should probably disable the clock to the card in the suspend path. 2353 */ 2354 int dw_mci_suspend(struct dw_mci *host) 2355 { 2356 int i, ret = 0; 2357 2358 for (i = 0; i < host->num_slots; i++) { 2359 struct dw_mci_slot *slot = host->slot[i]; 2360 if (!slot) 2361 continue; 2362 ret = mmc_suspend_host(slot->mmc); 2363 if (ret < 0) { 2364 while (--i >= 0) { 2365 slot = host->slot[i]; 2366 if (slot) 2367 mmc_resume_host(host->slot[i]->mmc); 2368 } 2369 return ret; 2370 } 2371 } 2372 2373 if (host->vmmc) 2374 regulator_disable(host->vmmc); 2375 2376 return 0; 2377 } 2378 EXPORT_SYMBOL(dw_mci_suspend); 2379 2380 int dw_mci_resume(struct dw_mci *host) 2381 { 2382 int i, ret; 2383 2384 if (host->vmmc) 2385 regulator_enable(host->vmmc); 2386 2387 if (!mci_wait_reset(host->dev, host)) { 2388 ret = -ENODEV; 2389 return ret; 2390 } 2391 2392 if (host->use_dma && host->dma_ops->init) 2393 host->dma_ops->init(host); 2394 2395 /* Restore the old value at FIFOTH register */ 2396 mci_writel(host, FIFOTH, host->fifoth_val); 2397 2398 mci_writel(host, RINTSTS, 0xFFFFFFFF); 2399 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | 2400 SDMMC_INT_TXDR | SDMMC_INT_RXDR | 2401 DW_MCI_ERROR_FLAGS | SDMMC_INT_CD); 2402 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); 2403 2404 for (i = 0; i < host->num_slots; i++) { 2405 struct dw_mci_slot *slot = host->slot[i]; 2406 if (!slot) 2407 continue; 2408 ret = mmc_resume_host(host->slot[i]->mmc); 2409 if (ret < 0) 2410 return ret; 2411 } 2412 return 0; 2413 } 2414 EXPORT_SYMBOL(dw_mci_resume); 2415 #endif /* CONFIG_PM_SLEEP */ 2416 2417 static int __init dw_mci_init(void) 2418 { 2419 printk(KERN_INFO "Synopsys Designware Multimedia Card Interface Driver"); 2420 return 0; 2421 } 2422 2423 static void __exit dw_mci_exit(void) 2424 { 2425 } 2426 2427 module_init(dw_mci_init); 2428 module_exit(dw_mci_exit); 2429 2430 MODULE_DESCRIPTION("DW Multimedia Card Interface driver"); 2431 MODULE_AUTHOR("NXP Semiconductor VietNam"); 2432 MODULE_AUTHOR("Imagination Technologies Ltd"); 2433 MODULE_LICENSE("GPL v2"); 2434