1 /* 2 * davinci_mmc.c - TI DaVinci MMC/SD/SDIO driver 3 * 4 * Copyright (C) 2006 Texas Instruments. 5 * Original author: Purushotam Kumar 6 * Copyright (C) 2009 David Brownell 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2 of the License, or 11 * (at your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 21 */ 22 23 #include <linux/module.h> 24 #include <linux/ioport.h> 25 #include <linux/platform_device.h> 26 #include <linux/clk.h> 27 #include <linux/err.h> 28 #include <linux/cpufreq.h> 29 #include <linux/mmc/host.h> 30 #include <linux/io.h> 31 #include <linux/irq.h> 32 #include <linux/delay.h> 33 #include <linux/dma-mapping.h> 34 #include <linux/mmc/mmc.h> 35 36 #include <mach/mmc.h> 37 #include <mach/edma.h> 38 39 /* 40 * Register Definitions 41 */ 42 #define DAVINCI_MMCCTL 0x00 /* Control Register */ 43 #define DAVINCI_MMCCLK 0x04 /* Memory Clock Control Register */ 44 #define DAVINCI_MMCST0 0x08 /* Status Register 0 */ 45 #define DAVINCI_MMCST1 0x0C /* Status Register 1 */ 46 #define DAVINCI_MMCIM 0x10 /* Interrupt Mask Register */ 47 #define DAVINCI_MMCTOR 0x14 /* Response Time-Out Register */ 48 #define DAVINCI_MMCTOD 0x18 /* Data Read Time-Out Register */ 49 #define DAVINCI_MMCBLEN 0x1C /* Block Length Register */ 50 #define DAVINCI_MMCNBLK 0x20 /* Number of Blocks Register */ 51 #define DAVINCI_MMCNBLC 0x24 /* Number of Blocks Counter Register */ 52 #define DAVINCI_MMCDRR 0x28 /* Data Receive Register */ 53 #define DAVINCI_MMCDXR 0x2C /* Data Transmit Register */ 54 #define DAVINCI_MMCCMD 0x30 /* Command Register */ 55 #define DAVINCI_MMCARGHL 0x34 /* Argument Register */ 56 #define DAVINCI_MMCRSP01 0x38 /* Response Register 0 and 1 */ 57 #define DAVINCI_MMCRSP23 0x3C /* Response Register 0 and 1 */ 58 #define DAVINCI_MMCRSP45 0x40 /* Response Register 0 and 1 */ 59 #define DAVINCI_MMCRSP67 0x44 /* Response Register 0 and 1 */ 60 #define DAVINCI_MMCDRSP 0x48 /* Data Response Register */ 61 #define DAVINCI_MMCETOK 0x4C 62 #define DAVINCI_MMCCIDX 0x50 /* Command Index Register */ 63 #define DAVINCI_MMCCKC 0x54 64 #define DAVINCI_MMCTORC 0x58 65 #define DAVINCI_MMCTODC 0x5C 66 #define DAVINCI_MMCBLNC 0x60 67 #define DAVINCI_SDIOCTL 0x64 68 #define DAVINCI_SDIOST0 0x68 69 #define DAVINCI_SDIOEN 0x6C 70 #define DAVINCI_SDIOST 0x70 71 #define DAVINCI_MMCFIFOCTL 0x74 /* FIFO Control Register */ 72 73 /* DAVINCI_MMCCTL definitions */ 74 #define MMCCTL_DATRST (1 << 0) 75 #define MMCCTL_CMDRST (1 << 1) 76 #define MMCCTL_WIDTH_4_BIT (1 << 2) 77 #define MMCCTL_DATEG_DISABLED (0 << 6) 78 #define MMCCTL_DATEG_RISING (1 << 6) 79 #define MMCCTL_DATEG_FALLING (2 << 6) 80 #define MMCCTL_DATEG_BOTH (3 << 6) 81 #define MMCCTL_PERMDR_LE (0 << 9) 82 #define MMCCTL_PERMDR_BE (1 << 9) 83 #define MMCCTL_PERMDX_LE (0 << 10) 84 #define MMCCTL_PERMDX_BE (1 << 10) 85 86 /* DAVINCI_MMCCLK definitions */ 87 #define MMCCLK_CLKEN (1 << 8) 88 #define MMCCLK_CLKRT_MASK (0xFF << 0) 89 90 /* IRQ bit definitions, for DAVINCI_MMCST0 and DAVINCI_MMCIM */ 91 #define MMCST0_DATDNE BIT(0) /* data done */ 92 #define MMCST0_BSYDNE BIT(1) /* busy done */ 93 #define MMCST0_RSPDNE BIT(2) /* command done */ 94 #define MMCST0_TOUTRD BIT(3) /* data read timeout */ 95 #define MMCST0_TOUTRS BIT(4) /* command response timeout */ 96 #define MMCST0_CRCWR BIT(5) /* data write CRC error */ 97 #define MMCST0_CRCRD BIT(6) /* data read CRC error */ 98 #define MMCST0_CRCRS BIT(7) /* command response CRC error */ 99 #define MMCST0_DXRDY BIT(9) /* data transmit ready (fifo empty) */ 100 #define MMCST0_DRRDY BIT(10) /* data receive ready (data in fifo)*/ 101 #define MMCST0_DATED BIT(11) /* DAT3 edge detect */ 102 #define MMCST0_TRNDNE BIT(12) /* transfer done */ 103 104 /* DAVINCI_MMCST1 definitions */ 105 #define MMCST1_BUSY (1 << 0) 106 107 /* DAVINCI_MMCCMD definitions */ 108 #define MMCCMD_CMD_MASK (0x3F << 0) 109 #define MMCCMD_PPLEN (1 << 7) 110 #define MMCCMD_BSYEXP (1 << 8) 111 #define MMCCMD_RSPFMT_MASK (3 << 9) 112 #define MMCCMD_RSPFMT_NONE (0 << 9) 113 #define MMCCMD_RSPFMT_R1456 (1 << 9) 114 #define MMCCMD_RSPFMT_R2 (2 << 9) 115 #define MMCCMD_RSPFMT_R3 (3 << 9) 116 #define MMCCMD_DTRW (1 << 11) 117 #define MMCCMD_STRMTP (1 << 12) 118 #define MMCCMD_WDATX (1 << 13) 119 #define MMCCMD_INITCK (1 << 14) 120 #define MMCCMD_DCLR (1 << 15) 121 #define MMCCMD_DMATRIG (1 << 16) 122 123 /* DAVINCI_MMCFIFOCTL definitions */ 124 #define MMCFIFOCTL_FIFORST (1 << 0) 125 #define MMCFIFOCTL_FIFODIR_WR (1 << 1) 126 #define MMCFIFOCTL_FIFODIR_RD (0 << 1) 127 #define MMCFIFOCTL_FIFOLEV (1 << 2) /* 0 = 128 bits, 1 = 256 bits */ 128 #define MMCFIFOCTL_ACCWD_4 (0 << 3) /* access width of 4 bytes */ 129 #define MMCFIFOCTL_ACCWD_3 (1 << 3) /* access width of 3 bytes */ 130 #define MMCFIFOCTL_ACCWD_2 (2 << 3) /* access width of 2 bytes */ 131 #define MMCFIFOCTL_ACCWD_1 (3 << 3) /* access width of 1 byte */ 132 133 134 /* MMCSD Init clock in Hz in opendrain mode */ 135 #define MMCSD_INIT_CLOCK 200000 136 137 /* 138 * One scatterlist dma "segment" is at most MAX_CCNT rw_threshold units, 139 * and we handle up to NR_SG segments. MMC_BLOCK_BOUNCE kicks in only 140 * for drivers with max_hw_segs == 1, making the segments bigger (64KB) 141 * than the page or two that's otherwise typical. NR_SG == 16 gives at 142 * least the same throughput boost, using EDMA transfer linkage instead 143 * of spending CPU time copying pages. 144 */ 145 #define MAX_CCNT ((1 << 16) - 1) 146 147 #define NR_SG 16 148 149 static unsigned rw_threshold = 32; 150 module_param(rw_threshold, uint, S_IRUGO); 151 MODULE_PARM_DESC(rw_threshold, 152 "Read/Write threshold. Default = 32"); 153 154 static unsigned __initdata use_dma = 1; 155 module_param(use_dma, uint, 0); 156 MODULE_PARM_DESC(use_dma, "Whether to use DMA or not. Default = 1"); 157 158 struct mmc_davinci_host { 159 struct mmc_command *cmd; 160 struct mmc_data *data; 161 struct mmc_host *mmc; 162 struct clk *clk; 163 unsigned int mmc_input_clk; 164 void __iomem *base; 165 struct resource *mem_res; 166 int irq; 167 unsigned char bus_mode; 168 169 #define DAVINCI_MMC_DATADIR_NONE 0 170 #define DAVINCI_MMC_DATADIR_READ 1 171 #define DAVINCI_MMC_DATADIR_WRITE 2 172 unsigned char data_dir; 173 174 /* buffer is used during PIO of one scatterlist segment, and 175 * is updated along with buffer_bytes_left. bytes_left applies 176 * to all N blocks of the PIO transfer. 177 */ 178 u8 *buffer; 179 u32 buffer_bytes_left; 180 u32 bytes_left; 181 182 u32 rxdma, txdma; 183 bool use_dma; 184 bool do_dma; 185 186 /* Scatterlist DMA uses one or more parameter RAM entries: 187 * the main one (associated with rxdma or txdma) plus zero or 188 * more links. The entries for a given transfer differ only 189 * by memory buffer (address, length) and link field. 190 */ 191 struct edmacc_param tx_template; 192 struct edmacc_param rx_template; 193 unsigned n_link; 194 u32 links[NR_SG - 1]; 195 196 /* For PIO we walk scatterlists one segment at a time. */ 197 unsigned int sg_len; 198 struct scatterlist *sg; 199 200 /* Version of the MMC/SD controller */ 201 u8 version; 202 /* for ns in one cycle calculation */ 203 unsigned ns_in_one_cycle; 204 #ifdef CONFIG_CPU_FREQ 205 struct notifier_block freq_transition; 206 #endif 207 }; 208 209 210 /* PIO only */ 211 static void mmc_davinci_sg_to_buf(struct mmc_davinci_host *host) 212 { 213 host->buffer_bytes_left = sg_dma_len(host->sg); 214 host->buffer = sg_virt(host->sg); 215 if (host->buffer_bytes_left > host->bytes_left) 216 host->buffer_bytes_left = host->bytes_left; 217 } 218 219 static void davinci_fifo_data_trans(struct mmc_davinci_host *host, 220 unsigned int n) 221 { 222 u8 *p; 223 unsigned int i; 224 225 if (host->buffer_bytes_left == 0) { 226 host->sg = sg_next(host->data->sg); 227 mmc_davinci_sg_to_buf(host); 228 } 229 230 p = host->buffer; 231 if (n > host->buffer_bytes_left) 232 n = host->buffer_bytes_left; 233 host->buffer_bytes_left -= n; 234 host->bytes_left -= n; 235 236 /* NOTE: we never transfer more than rw_threshold bytes 237 * to/from the fifo here; there's no I/O overlap. 238 * This also assumes that access width( i.e. ACCWD) is 4 bytes 239 */ 240 if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) { 241 for (i = 0; i < (n >> 2); i++) { 242 writel(*((u32 *)p), host->base + DAVINCI_MMCDXR); 243 p = p + 4; 244 } 245 if (n & 3) { 246 iowrite8_rep(host->base + DAVINCI_MMCDXR, p, (n & 3)); 247 p = p + (n & 3); 248 } 249 } else { 250 for (i = 0; i < (n >> 2); i++) { 251 *((u32 *)p) = readl(host->base + DAVINCI_MMCDRR); 252 p = p + 4; 253 } 254 if (n & 3) { 255 ioread8_rep(host->base + DAVINCI_MMCDRR, p, (n & 3)); 256 p = p + (n & 3); 257 } 258 } 259 host->buffer = p; 260 } 261 262 static void mmc_davinci_start_command(struct mmc_davinci_host *host, 263 struct mmc_command *cmd) 264 { 265 u32 cmd_reg = 0; 266 u32 im_val; 267 268 dev_dbg(mmc_dev(host->mmc), "CMD%d, arg 0x%08x%s\n", 269 cmd->opcode, cmd->arg, 270 ({ char *s; 271 switch (mmc_resp_type(cmd)) { 272 case MMC_RSP_R1: 273 s = ", R1/R5/R6/R7 response"; 274 break; 275 case MMC_RSP_R1B: 276 s = ", R1b response"; 277 break; 278 case MMC_RSP_R2: 279 s = ", R2 response"; 280 break; 281 case MMC_RSP_R3: 282 s = ", R3/R4 response"; 283 break; 284 default: 285 s = ", (R? response)"; 286 break; 287 }; s; })); 288 host->cmd = cmd; 289 290 switch (mmc_resp_type(cmd)) { 291 case MMC_RSP_R1B: 292 /* There's some spec confusion about when R1B is 293 * allowed, but if the card doesn't issue a BUSY 294 * then it's harmless for us to allow it. 295 */ 296 cmd_reg |= MMCCMD_BSYEXP; 297 /* FALLTHROUGH */ 298 case MMC_RSP_R1: /* 48 bits, CRC */ 299 cmd_reg |= MMCCMD_RSPFMT_R1456; 300 break; 301 case MMC_RSP_R2: /* 136 bits, CRC */ 302 cmd_reg |= MMCCMD_RSPFMT_R2; 303 break; 304 case MMC_RSP_R3: /* 48 bits, no CRC */ 305 cmd_reg |= MMCCMD_RSPFMT_R3; 306 break; 307 default: 308 cmd_reg |= MMCCMD_RSPFMT_NONE; 309 dev_dbg(mmc_dev(host->mmc), "unknown resp_type %04x\n", 310 mmc_resp_type(cmd)); 311 break; 312 } 313 314 /* Set command index */ 315 cmd_reg |= cmd->opcode; 316 317 /* Enable EDMA transfer triggers */ 318 if (host->do_dma) 319 cmd_reg |= MMCCMD_DMATRIG; 320 321 if (host->version == MMC_CTLR_VERSION_2 && host->data != NULL && 322 host->data_dir == DAVINCI_MMC_DATADIR_READ) 323 cmd_reg |= MMCCMD_DMATRIG; 324 325 /* Setting whether command involves data transfer or not */ 326 if (cmd->data) 327 cmd_reg |= MMCCMD_WDATX; 328 329 /* Setting whether stream or block transfer */ 330 if (cmd->flags & MMC_DATA_STREAM) 331 cmd_reg |= MMCCMD_STRMTP; 332 333 /* Setting whether data read or write */ 334 if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) 335 cmd_reg |= MMCCMD_DTRW; 336 337 if (host->bus_mode == MMC_BUSMODE_PUSHPULL) 338 cmd_reg |= MMCCMD_PPLEN; 339 340 /* set Command timeout */ 341 writel(0x1FFF, host->base + DAVINCI_MMCTOR); 342 343 /* Enable interrupt (calculate here, defer until FIFO is stuffed). */ 344 im_val = MMCST0_RSPDNE | MMCST0_CRCRS | MMCST0_TOUTRS; 345 if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) { 346 im_val |= MMCST0_DATDNE | MMCST0_CRCWR; 347 348 if (!host->do_dma) 349 im_val |= MMCST0_DXRDY; 350 } else if (host->data_dir == DAVINCI_MMC_DATADIR_READ) { 351 im_val |= MMCST0_DATDNE | MMCST0_CRCRD | MMCST0_TOUTRD; 352 353 if (!host->do_dma) 354 im_val |= MMCST0_DRRDY; 355 } 356 357 /* 358 * Before non-DMA WRITE commands the controller needs priming: 359 * FIFO should be populated with 32 bytes i.e. whatever is the FIFO size 360 */ 361 if (!host->do_dma && (host->data_dir == DAVINCI_MMC_DATADIR_WRITE)) 362 davinci_fifo_data_trans(host, rw_threshold); 363 364 writel(cmd->arg, host->base + DAVINCI_MMCARGHL); 365 writel(cmd_reg, host->base + DAVINCI_MMCCMD); 366 writel(im_val, host->base + DAVINCI_MMCIM); 367 } 368 369 /*----------------------------------------------------------------------*/ 370 371 /* DMA infrastructure */ 372 373 static void davinci_abort_dma(struct mmc_davinci_host *host) 374 { 375 int sync_dev; 376 377 if (host->data_dir == DAVINCI_MMC_DATADIR_READ) 378 sync_dev = host->rxdma; 379 else 380 sync_dev = host->txdma; 381 382 edma_stop(sync_dev); 383 edma_clean_channel(sync_dev); 384 } 385 386 static void 387 mmc_davinci_xfer_done(struct mmc_davinci_host *host, struct mmc_data *data); 388 389 static void mmc_davinci_dma_cb(unsigned channel, u16 ch_status, void *data) 390 { 391 if (DMA_COMPLETE != ch_status) { 392 struct mmc_davinci_host *host = data; 393 394 /* Currently means: DMA Event Missed, or "null" transfer 395 * request was seen. In the future, TC errors (like bad 396 * addresses) might be presented too. 397 */ 398 dev_warn(mmc_dev(host->mmc), "DMA %s error\n", 399 (host->data->flags & MMC_DATA_WRITE) 400 ? "write" : "read"); 401 host->data->error = -EIO; 402 mmc_davinci_xfer_done(host, host->data); 403 } 404 } 405 406 /* Set up tx or rx template, to be modified and updated later */ 407 static void __init mmc_davinci_dma_setup(struct mmc_davinci_host *host, 408 bool tx, struct edmacc_param *template) 409 { 410 unsigned sync_dev; 411 const u16 acnt = 4; 412 const u16 bcnt = rw_threshold >> 2; 413 const u16 ccnt = 0; 414 u32 src_port = 0; 415 u32 dst_port = 0; 416 s16 src_bidx, dst_bidx; 417 s16 src_cidx, dst_cidx; 418 419 /* 420 * A-B Sync transfer: each DMA request is for one "frame" of 421 * rw_threshold bytes, broken into "acnt"-size chunks repeated 422 * "bcnt" times. Each segment needs "ccnt" such frames; since 423 * we tell the block layer our mmc->max_seg_size limit, we can 424 * trust (later) that it's within bounds. 425 * 426 * The FIFOs are read/written in 4-byte chunks (acnt == 4) and 427 * EDMA will optimize memory operations to use larger bursts. 428 */ 429 if (tx) { 430 sync_dev = host->txdma; 431 432 /* src_prt, ccnt, and link to be set up later */ 433 src_bidx = acnt; 434 src_cidx = acnt * bcnt; 435 436 dst_port = host->mem_res->start + DAVINCI_MMCDXR; 437 dst_bidx = 0; 438 dst_cidx = 0; 439 } else { 440 sync_dev = host->rxdma; 441 442 src_port = host->mem_res->start + DAVINCI_MMCDRR; 443 src_bidx = 0; 444 src_cidx = 0; 445 446 /* dst_prt, ccnt, and link to be set up later */ 447 dst_bidx = acnt; 448 dst_cidx = acnt * bcnt; 449 } 450 451 /* 452 * We can't use FIFO mode for the FIFOs because MMC FIFO addresses 453 * are not 256-bit (32-byte) aligned. So we use INCR, and the W8BIT 454 * parameter is ignored. 455 */ 456 edma_set_src(sync_dev, src_port, INCR, W8BIT); 457 edma_set_dest(sync_dev, dst_port, INCR, W8BIT); 458 459 edma_set_src_index(sync_dev, src_bidx, src_cidx); 460 edma_set_dest_index(sync_dev, dst_bidx, dst_cidx); 461 462 edma_set_transfer_params(sync_dev, acnt, bcnt, ccnt, 8, ABSYNC); 463 464 edma_read_slot(sync_dev, template); 465 466 /* don't bother with irqs or chaining */ 467 template->opt |= EDMA_CHAN_SLOT(sync_dev) << 12; 468 } 469 470 static void mmc_davinci_send_dma_request(struct mmc_davinci_host *host, 471 struct mmc_data *data) 472 { 473 struct edmacc_param *template; 474 int channel, slot; 475 unsigned link; 476 struct scatterlist *sg; 477 unsigned sg_len; 478 unsigned bytes_left = host->bytes_left; 479 const unsigned shift = ffs(rw_threshold) - 1;; 480 481 if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) { 482 template = &host->tx_template; 483 channel = host->txdma; 484 } else { 485 template = &host->rx_template; 486 channel = host->rxdma; 487 } 488 489 /* We know sg_len and ccnt will never be out of range because 490 * we told the mmc layer which in turn tells the block layer 491 * to ensure that it only hands us one scatterlist segment 492 * per EDMA PARAM entry. Update the PARAM 493 * entries needed for each segment of this scatterlist. 494 */ 495 for (slot = channel, link = 0, sg = data->sg, sg_len = host->sg_len; 496 sg_len-- != 0 && bytes_left; 497 sg = sg_next(sg), slot = host->links[link++]) { 498 u32 buf = sg_dma_address(sg); 499 unsigned count = sg_dma_len(sg); 500 501 template->link_bcntrld = sg_len 502 ? (EDMA_CHAN_SLOT(host->links[link]) << 5) 503 : 0xffff; 504 505 if (count > bytes_left) 506 count = bytes_left; 507 bytes_left -= count; 508 509 if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) 510 template->src = buf; 511 else 512 template->dst = buf; 513 template->ccnt = count >> shift; 514 515 edma_write_slot(slot, template); 516 } 517 518 if (host->version == MMC_CTLR_VERSION_2) 519 edma_clear_event(channel); 520 521 edma_start(channel); 522 } 523 524 static int mmc_davinci_start_dma_transfer(struct mmc_davinci_host *host, 525 struct mmc_data *data) 526 { 527 int i; 528 int mask = rw_threshold - 1; 529 530 host->sg_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, 531 ((data->flags & MMC_DATA_WRITE) 532 ? DMA_TO_DEVICE 533 : DMA_FROM_DEVICE)); 534 535 /* no individual DMA segment should need a partial FIFO */ 536 for (i = 0; i < host->sg_len; i++) { 537 if (sg_dma_len(data->sg + i) & mask) { 538 dma_unmap_sg(mmc_dev(host->mmc), 539 data->sg, data->sg_len, 540 (data->flags & MMC_DATA_WRITE) 541 ? DMA_TO_DEVICE 542 : DMA_FROM_DEVICE); 543 return -1; 544 } 545 } 546 547 host->do_dma = 1; 548 mmc_davinci_send_dma_request(host, data); 549 550 return 0; 551 } 552 553 static void __init_or_module 554 davinci_release_dma_channels(struct mmc_davinci_host *host) 555 { 556 unsigned i; 557 558 if (!host->use_dma) 559 return; 560 561 for (i = 0; i < host->n_link; i++) 562 edma_free_slot(host->links[i]); 563 564 edma_free_channel(host->txdma); 565 edma_free_channel(host->rxdma); 566 } 567 568 static int __init davinci_acquire_dma_channels(struct mmc_davinci_host *host) 569 { 570 int r, i; 571 572 /* Acquire master DMA write channel */ 573 r = edma_alloc_channel(host->txdma, mmc_davinci_dma_cb, host, 574 EVENTQ_DEFAULT); 575 if (r < 0) { 576 dev_warn(mmc_dev(host->mmc), "alloc %s channel err %d\n", 577 "tx", r); 578 return r; 579 } 580 mmc_davinci_dma_setup(host, true, &host->tx_template); 581 582 /* Acquire master DMA read channel */ 583 r = edma_alloc_channel(host->rxdma, mmc_davinci_dma_cb, host, 584 EVENTQ_DEFAULT); 585 if (r < 0) { 586 dev_warn(mmc_dev(host->mmc), "alloc %s channel err %d\n", 587 "rx", r); 588 goto free_master_write; 589 } 590 mmc_davinci_dma_setup(host, false, &host->rx_template); 591 592 /* Allocate parameter RAM slots, which will later be bound to a 593 * channel as needed to handle a scatterlist. 594 */ 595 for (i = 0; i < ARRAY_SIZE(host->links); i++) { 596 r = edma_alloc_slot(EDMA_CTLR(host->txdma), EDMA_SLOT_ANY); 597 if (r < 0) { 598 dev_dbg(mmc_dev(host->mmc), "dma PaRAM alloc --> %d\n", 599 r); 600 break; 601 } 602 host->links[i] = r; 603 } 604 host->n_link = i; 605 606 return 0; 607 608 free_master_write: 609 edma_free_channel(host->txdma); 610 611 return r; 612 } 613 614 /*----------------------------------------------------------------------*/ 615 616 static void 617 mmc_davinci_prepare_data(struct mmc_davinci_host *host, struct mmc_request *req) 618 { 619 int fifo_lev = (rw_threshold == 32) ? MMCFIFOCTL_FIFOLEV : 0; 620 int timeout; 621 struct mmc_data *data = req->data; 622 623 if (host->version == MMC_CTLR_VERSION_2) 624 fifo_lev = (rw_threshold == 64) ? MMCFIFOCTL_FIFOLEV : 0; 625 626 host->data = data; 627 if (data == NULL) { 628 host->data_dir = DAVINCI_MMC_DATADIR_NONE; 629 writel(0, host->base + DAVINCI_MMCBLEN); 630 writel(0, host->base + DAVINCI_MMCNBLK); 631 return; 632 } 633 634 dev_dbg(mmc_dev(host->mmc), "%s %s, %d blocks of %d bytes\n", 635 (data->flags & MMC_DATA_STREAM) ? "stream" : "block", 636 (data->flags & MMC_DATA_WRITE) ? "write" : "read", 637 data->blocks, data->blksz); 638 dev_dbg(mmc_dev(host->mmc), " DTO %d cycles + %d ns\n", 639 data->timeout_clks, data->timeout_ns); 640 timeout = data->timeout_clks + 641 (data->timeout_ns / host->ns_in_one_cycle); 642 if (timeout > 0xffff) 643 timeout = 0xffff; 644 645 writel(timeout, host->base + DAVINCI_MMCTOD); 646 writel(data->blocks, host->base + DAVINCI_MMCNBLK); 647 writel(data->blksz, host->base + DAVINCI_MMCBLEN); 648 649 /* Configure the FIFO */ 650 switch (data->flags & MMC_DATA_WRITE) { 651 case MMC_DATA_WRITE: 652 host->data_dir = DAVINCI_MMC_DATADIR_WRITE; 653 writel(fifo_lev | MMCFIFOCTL_FIFODIR_WR | MMCFIFOCTL_FIFORST, 654 host->base + DAVINCI_MMCFIFOCTL); 655 writel(fifo_lev | MMCFIFOCTL_FIFODIR_WR, 656 host->base + DAVINCI_MMCFIFOCTL); 657 break; 658 659 default: 660 host->data_dir = DAVINCI_MMC_DATADIR_READ; 661 writel(fifo_lev | MMCFIFOCTL_FIFODIR_RD | MMCFIFOCTL_FIFORST, 662 host->base + DAVINCI_MMCFIFOCTL); 663 writel(fifo_lev | MMCFIFOCTL_FIFODIR_RD, 664 host->base + DAVINCI_MMCFIFOCTL); 665 break; 666 } 667 668 host->buffer = NULL; 669 host->bytes_left = data->blocks * data->blksz; 670 671 /* For now we try to use DMA whenever we won't need partial FIFO 672 * reads or writes, either for the whole transfer (as tested here) 673 * or for any individual scatterlist segment (tested when we call 674 * start_dma_transfer). 675 * 676 * While we *could* change that, unusual block sizes are rarely 677 * used. The occasional fallback to PIO should't hurt. 678 */ 679 if (host->use_dma && (host->bytes_left & (rw_threshold - 1)) == 0 680 && mmc_davinci_start_dma_transfer(host, data) == 0) { 681 /* zero this to ensure we take no PIO paths */ 682 host->bytes_left = 0; 683 } else { 684 /* Revert to CPU Copy */ 685 host->sg_len = data->sg_len; 686 host->sg = host->data->sg; 687 mmc_davinci_sg_to_buf(host); 688 } 689 } 690 691 static void mmc_davinci_request(struct mmc_host *mmc, struct mmc_request *req) 692 { 693 struct mmc_davinci_host *host = mmc_priv(mmc); 694 unsigned long timeout = jiffies + msecs_to_jiffies(900); 695 u32 mmcst1 = 0; 696 697 /* Card may still be sending BUSY after a previous operation, 698 * typically some kind of write. If so, we can't proceed yet. 699 */ 700 while (time_before(jiffies, timeout)) { 701 mmcst1 = readl(host->base + DAVINCI_MMCST1); 702 if (!(mmcst1 & MMCST1_BUSY)) 703 break; 704 cpu_relax(); 705 } 706 if (mmcst1 & MMCST1_BUSY) { 707 dev_err(mmc_dev(host->mmc), "still BUSY? bad ... \n"); 708 req->cmd->error = -ETIMEDOUT; 709 mmc_request_done(mmc, req); 710 return; 711 } 712 713 host->do_dma = 0; 714 mmc_davinci_prepare_data(host, req); 715 mmc_davinci_start_command(host, req->cmd); 716 } 717 718 static unsigned int calculate_freq_for_card(struct mmc_davinci_host *host, 719 unsigned int mmc_req_freq) 720 { 721 unsigned int mmc_freq = 0, mmc_pclk = 0, mmc_push_pull_divisor = 0; 722 723 mmc_pclk = host->mmc_input_clk; 724 if (mmc_req_freq && mmc_pclk > (2 * mmc_req_freq)) 725 mmc_push_pull_divisor = ((unsigned int)mmc_pclk 726 / (2 * mmc_req_freq)) - 1; 727 else 728 mmc_push_pull_divisor = 0; 729 730 mmc_freq = (unsigned int)mmc_pclk 731 / (2 * (mmc_push_pull_divisor + 1)); 732 733 if (mmc_freq > mmc_req_freq) 734 mmc_push_pull_divisor = mmc_push_pull_divisor + 1; 735 /* Convert ns to clock cycles */ 736 if (mmc_req_freq <= 400000) 737 host->ns_in_one_cycle = (1000000) / (((mmc_pclk 738 / (2 * (mmc_push_pull_divisor + 1)))/1000)); 739 else 740 host->ns_in_one_cycle = (1000000) / (((mmc_pclk 741 / (2 * (mmc_push_pull_divisor + 1)))/1000000)); 742 743 return mmc_push_pull_divisor; 744 } 745 746 static void calculate_clk_divider(struct mmc_host *mmc, struct mmc_ios *ios) 747 { 748 unsigned int open_drain_freq = 0, mmc_pclk = 0; 749 unsigned int mmc_push_pull_freq = 0; 750 struct mmc_davinci_host *host = mmc_priv(mmc); 751 752 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) { 753 u32 temp; 754 755 /* Ignoring the init clock value passed for fixing the inter 756 * operability with different cards. 757 */ 758 open_drain_freq = ((unsigned int)mmc_pclk 759 / (2 * MMCSD_INIT_CLOCK)) - 1; 760 761 if (open_drain_freq > 0xFF) 762 open_drain_freq = 0xFF; 763 764 temp = readl(host->base + DAVINCI_MMCCLK) & ~MMCCLK_CLKRT_MASK; 765 temp |= open_drain_freq; 766 writel(temp, host->base + DAVINCI_MMCCLK); 767 768 /* Convert ns to clock cycles */ 769 host->ns_in_one_cycle = (1000000) / (MMCSD_INIT_CLOCK/1000); 770 } else { 771 u32 temp; 772 mmc_push_pull_freq = calculate_freq_for_card(host, ios->clock); 773 774 if (mmc_push_pull_freq > 0xFF) 775 mmc_push_pull_freq = 0xFF; 776 777 temp = readl(host->base + DAVINCI_MMCCLK) & ~MMCCLK_CLKEN; 778 writel(temp, host->base + DAVINCI_MMCCLK); 779 780 udelay(10); 781 782 temp = readl(host->base + DAVINCI_MMCCLK) & ~MMCCLK_CLKRT_MASK; 783 temp |= mmc_push_pull_freq; 784 writel(temp, host->base + DAVINCI_MMCCLK); 785 786 writel(temp | MMCCLK_CLKEN, host->base + DAVINCI_MMCCLK); 787 788 udelay(10); 789 } 790 } 791 792 static void mmc_davinci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 793 { 794 unsigned int mmc_pclk = 0; 795 struct mmc_davinci_host *host = mmc_priv(mmc); 796 797 mmc_pclk = host->mmc_input_clk; 798 dev_dbg(mmc_dev(host->mmc), 799 "clock %dHz busmode %d powermode %d Vdd %04x\n", 800 ios->clock, ios->bus_mode, ios->power_mode, 801 ios->vdd); 802 if (ios->bus_width == MMC_BUS_WIDTH_4) { 803 dev_dbg(mmc_dev(host->mmc), "Enabling 4 bit mode\n"); 804 writel(readl(host->base + DAVINCI_MMCCTL) | MMCCTL_WIDTH_4_BIT, 805 host->base + DAVINCI_MMCCTL); 806 } else { 807 dev_dbg(mmc_dev(host->mmc), "Disabling 4 bit mode\n"); 808 writel(readl(host->base + DAVINCI_MMCCTL) & ~MMCCTL_WIDTH_4_BIT, 809 host->base + DAVINCI_MMCCTL); 810 } 811 812 calculate_clk_divider(mmc, ios); 813 814 host->bus_mode = ios->bus_mode; 815 if (ios->power_mode == MMC_POWER_UP) { 816 unsigned long timeout = jiffies + msecs_to_jiffies(50); 817 bool lose = true; 818 819 /* Send clock cycles, poll completion */ 820 writel(0, host->base + DAVINCI_MMCARGHL); 821 writel(MMCCMD_INITCK, host->base + DAVINCI_MMCCMD); 822 while (time_before(jiffies, timeout)) { 823 u32 tmp = readl(host->base + DAVINCI_MMCST0); 824 825 if (tmp & MMCST0_RSPDNE) { 826 lose = false; 827 break; 828 } 829 cpu_relax(); 830 } 831 if (lose) 832 dev_warn(mmc_dev(host->mmc), "powerup timeout\n"); 833 } 834 835 /* FIXME on power OFF, reset things ... */ 836 } 837 838 static void 839 mmc_davinci_xfer_done(struct mmc_davinci_host *host, struct mmc_data *data) 840 { 841 host->data = NULL; 842 843 if (host->do_dma) { 844 davinci_abort_dma(host); 845 846 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, 847 (data->flags & MMC_DATA_WRITE) 848 ? DMA_TO_DEVICE 849 : DMA_FROM_DEVICE); 850 host->do_dma = false; 851 } 852 host->data_dir = DAVINCI_MMC_DATADIR_NONE; 853 854 if (!data->stop || (host->cmd && host->cmd->error)) { 855 mmc_request_done(host->mmc, data->mrq); 856 writel(0, host->base + DAVINCI_MMCIM); 857 } else 858 mmc_davinci_start_command(host, data->stop); 859 } 860 861 static void mmc_davinci_cmd_done(struct mmc_davinci_host *host, 862 struct mmc_command *cmd) 863 { 864 host->cmd = NULL; 865 866 if (cmd->flags & MMC_RSP_PRESENT) { 867 if (cmd->flags & MMC_RSP_136) { 868 /* response type 2 */ 869 cmd->resp[3] = readl(host->base + DAVINCI_MMCRSP01); 870 cmd->resp[2] = readl(host->base + DAVINCI_MMCRSP23); 871 cmd->resp[1] = readl(host->base + DAVINCI_MMCRSP45); 872 cmd->resp[0] = readl(host->base + DAVINCI_MMCRSP67); 873 } else { 874 /* response types 1, 1b, 3, 4, 5, 6 */ 875 cmd->resp[0] = readl(host->base + DAVINCI_MMCRSP67); 876 } 877 } 878 879 if (host->data == NULL || cmd->error) { 880 if (cmd->error == -ETIMEDOUT) 881 cmd->mrq->cmd->retries = 0; 882 mmc_request_done(host->mmc, cmd->mrq); 883 writel(0, host->base + DAVINCI_MMCIM); 884 } 885 } 886 887 static void 888 davinci_abort_data(struct mmc_davinci_host *host, struct mmc_data *data) 889 { 890 u32 temp; 891 892 /* reset command and data state machines */ 893 temp = readl(host->base + DAVINCI_MMCCTL); 894 writel(temp | MMCCTL_CMDRST | MMCCTL_DATRST, 895 host->base + DAVINCI_MMCCTL); 896 897 temp &= ~(MMCCTL_CMDRST | MMCCTL_DATRST); 898 udelay(10); 899 writel(temp, host->base + DAVINCI_MMCCTL); 900 } 901 902 static irqreturn_t mmc_davinci_irq(int irq, void *dev_id) 903 { 904 struct mmc_davinci_host *host = (struct mmc_davinci_host *)dev_id; 905 unsigned int status, qstatus; 906 int end_command = 0; 907 int end_transfer = 0; 908 struct mmc_data *data = host->data; 909 910 if (host->cmd == NULL && host->data == NULL) { 911 status = readl(host->base + DAVINCI_MMCST0); 912 dev_dbg(mmc_dev(host->mmc), 913 "Spurious interrupt 0x%04x\n", status); 914 /* Disable the interrupt from mmcsd */ 915 writel(0, host->base + DAVINCI_MMCIM); 916 return IRQ_NONE; 917 } 918 919 status = readl(host->base + DAVINCI_MMCST0); 920 qstatus = status; 921 922 /* handle FIFO first when using PIO for data. 923 * bytes_left will decrease to zero as I/O progress and status will 924 * read zero over iteration because this controller status 925 * register(MMCST0) reports any status only once and it is cleared 926 * by read. So, it is not unbouned loop even in the case of 927 * non-dma. 928 */ 929 while (host->bytes_left && (status & (MMCST0_DXRDY | MMCST0_DRRDY))) { 930 davinci_fifo_data_trans(host, rw_threshold); 931 status = readl(host->base + DAVINCI_MMCST0); 932 if (!status) 933 break; 934 qstatus |= status; 935 } 936 937 if (qstatus & MMCST0_DATDNE) { 938 /* All blocks sent/received, and CRC checks passed */ 939 if (data != NULL) { 940 if ((host->do_dma == 0) && (host->bytes_left > 0)) { 941 /* if datasize < rw_threshold 942 * no RX ints are generated 943 */ 944 davinci_fifo_data_trans(host, host->bytes_left); 945 } 946 end_transfer = 1; 947 data->bytes_xfered = data->blocks * data->blksz; 948 } else { 949 dev_err(mmc_dev(host->mmc), 950 "DATDNE with no host->data\n"); 951 } 952 } 953 954 if (qstatus & MMCST0_TOUTRD) { 955 /* Read data timeout */ 956 data->error = -ETIMEDOUT; 957 end_transfer = 1; 958 959 dev_dbg(mmc_dev(host->mmc), 960 "read data timeout, status %x\n", 961 qstatus); 962 963 davinci_abort_data(host, data); 964 } 965 966 if (qstatus & (MMCST0_CRCWR | MMCST0_CRCRD)) { 967 /* Data CRC error */ 968 data->error = -EILSEQ; 969 end_transfer = 1; 970 971 /* NOTE: this controller uses CRCWR to report both CRC 972 * errors and timeouts (on writes). MMCDRSP values are 973 * only weakly documented, but 0x9f was clearly a timeout 974 * case and the two three-bit patterns in various SD specs 975 * (101, 010) aren't part of it ... 976 */ 977 if (qstatus & MMCST0_CRCWR) { 978 u32 temp = readb(host->base + DAVINCI_MMCDRSP); 979 980 if (temp == 0x9f) 981 data->error = -ETIMEDOUT; 982 } 983 dev_dbg(mmc_dev(host->mmc), "data %s %s error\n", 984 (qstatus & MMCST0_CRCWR) ? "write" : "read", 985 (data->error == -ETIMEDOUT) ? "timeout" : "CRC"); 986 987 davinci_abort_data(host, data); 988 } 989 990 if (qstatus & MMCST0_TOUTRS) { 991 /* Command timeout */ 992 if (host->cmd) { 993 dev_dbg(mmc_dev(host->mmc), 994 "CMD%d timeout, status %x\n", 995 host->cmd->opcode, qstatus); 996 host->cmd->error = -ETIMEDOUT; 997 if (data) { 998 end_transfer = 1; 999 davinci_abort_data(host, data); 1000 } else 1001 end_command = 1; 1002 } 1003 } 1004 1005 if (qstatus & MMCST0_CRCRS) { 1006 /* Command CRC error */ 1007 dev_dbg(mmc_dev(host->mmc), "Command CRC error\n"); 1008 if (host->cmd) { 1009 host->cmd->error = -EILSEQ; 1010 end_command = 1; 1011 } 1012 } 1013 1014 if (qstatus & MMCST0_RSPDNE) { 1015 /* End of command phase */ 1016 end_command = (int) host->cmd; 1017 } 1018 1019 if (end_command) 1020 mmc_davinci_cmd_done(host, host->cmd); 1021 if (end_transfer) 1022 mmc_davinci_xfer_done(host, data); 1023 return IRQ_HANDLED; 1024 } 1025 1026 static int mmc_davinci_get_cd(struct mmc_host *mmc) 1027 { 1028 struct platform_device *pdev = to_platform_device(mmc->parent); 1029 struct davinci_mmc_config *config = pdev->dev.platform_data; 1030 1031 if (!config || !config->get_cd) 1032 return -ENOSYS; 1033 return config->get_cd(pdev->id); 1034 } 1035 1036 static int mmc_davinci_get_ro(struct mmc_host *mmc) 1037 { 1038 struct platform_device *pdev = to_platform_device(mmc->parent); 1039 struct davinci_mmc_config *config = pdev->dev.platform_data; 1040 1041 if (!config || !config->get_ro) 1042 return -ENOSYS; 1043 return config->get_ro(pdev->id); 1044 } 1045 1046 static struct mmc_host_ops mmc_davinci_ops = { 1047 .request = mmc_davinci_request, 1048 .set_ios = mmc_davinci_set_ios, 1049 .get_cd = mmc_davinci_get_cd, 1050 .get_ro = mmc_davinci_get_ro, 1051 }; 1052 1053 /*----------------------------------------------------------------------*/ 1054 1055 #ifdef CONFIG_CPU_FREQ 1056 static int mmc_davinci_cpufreq_transition(struct notifier_block *nb, 1057 unsigned long val, void *data) 1058 { 1059 struct mmc_davinci_host *host; 1060 unsigned int mmc_pclk; 1061 struct mmc_host *mmc; 1062 unsigned long flags; 1063 1064 host = container_of(nb, struct mmc_davinci_host, freq_transition); 1065 mmc = host->mmc; 1066 mmc_pclk = clk_get_rate(host->clk); 1067 1068 if (val == CPUFREQ_POSTCHANGE) { 1069 spin_lock_irqsave(&mmc->lock, flags); 1070 host->mmc_input_clk = mmc_pclk; 1071 calculate_clk_divider(mmc, &mmc->ios); 1072 spin_unlock_irqrestore(&mmc->lock, flags); 1073 } 1074 1075 return 0; 1076 } 1077 1078 static inline int mmc_davinci_cpufreq_register(struct mmc_davinci_host *host) 1079 { 1080 host->freq_transition.notifier_call = mmc_davinci_cpufreq_transition; 1081 1082 return cpufreq_register_notifier(&host->freq_transition, 1083 CPUFREQ_TRANSITION_NOTIFIER); 1084 } 1085 1086 static inline void mmc_davinci_cpufreq_deregister(struct mmc_davinci_host *host) 1087 { 1088 cpufreq_unregister_notifier(&host->freq_transition, 1089 CPUFREQ_TRANSITION_NOTIFIER); 1090 } 1091 #else 1092 static inline int mmc_davinci_cpufreq_register(struct mmc_davinci_host *host) 1093 { 1094 return 0; 1095 } 1096 1097 static inline void mmc_davinci_cpufreq_deregister(struct mmc_davinci_host *host) 1098 { 1099 } 1100 #endif 1101 static void __init init_mmcsd_host(struct mmc_davinci_host *host) 1102 { 1103 /* DAT line portion is diabled and in reset state */ 1104 writel(readl(host->base + DAVINCI_MMCCTL) | MMCCTL_DATRST, 1105 host->base + DAVINCI_MMCCTL); 1106 1107 /* CMD line portion is diabled and in reset state */ 1108 writel(readl(host->base + DAVINCI_MMCCTL) | MMCCTL_CMDRST, 1109 host->base + DAVINCI_MMCCTL); 1110 1111 udelay(10); 1112 1113 writel(0, host->base + DAVINCI_MMCCLK); 1114 writel(MMCCLK_CLKEN, host->base + DAVINCI_MMCCLK); 1115 1116 writel(0x1FFF, host->base + DAVINCI_MMCTOR); 1117 writel(0xFFFF, host->base + DAVINCI_MMCTOD); 1118 1119 writel(readl(host->base + DAVINCI_MMCCTL) & ~MMCCTL_DATRST, 1120 host->base + DAVINCI_MMCCTL); 1121 writel(readl(host->base + DAVINCI_MMCCTL) & ~MMCCTL_CMDRST, 1122 host->base + DAVINCI_MMCCTL); 1123 1124 udelay(10); 1125 } 1126 1127 static int __init davinci_mmcsd_probe(struct platform_device *pdev) 1128 { 1129 struct davinci_mmc_config *pdata = pdev->dev.platform_data; 1130 struct mmc_davinci_host *host = NULL; 1131 struct mmc_host *mmc = NULL; 1132 struct resource *r, *mem = NULL; 1133 int ret = 0, irq = 0; 1134 size_t mem_size; 1135 1136 /* REVISIT: when we're fully converted, fail if pdata is NULL */ 1137 1138 ret = -ENODEV; 1139 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1140 irq = platform_get_irq(pdev, 0); 1141 if (!r || irq == NO_IRQ) 1142 goto out; 1143 1144 ret = -EBUSY; 1145 mem_size = resource_size(r); 1146 mem = request_mem_region(r->start, mem_size, pdev->name); 1147 if (!mem) 1148 goto out; 1149 1150 ret = -ENOMEM; 1151 mmc = mmc_alloc_host(sizeof(struct mmc_davinci_host), &pdev->dev); 1152 if (!mmc) 1153 goto out; 1154 1155 host = mmc_priv(mmc); 1156 host->mmc = mmc; /* Important */ 1157 1158 r = platform_get_resource(pdev, IORESOURCE_DMA, 0); 1159 if (!r) 1160 goto out; 1161 host->rxdma = r->start; 1162 1163 r = platform_get_resource(pdev, IORESOURCE_DMA, 1); 1164 if (!r) 1165 goto out; 1166 host->txdma = r->start; 1167 1168 host->mem_res = mem; 1169 host->base = ioremap(mem->start, mem_size); 1170 if (!host->base) 1171 goto out; 1172 1173 ret = -ENXIO; 1174 host->clk = clk_get(&pdev->dev, "MMCSDCLK"); 1175 if (IS_ERR(host->clk)) { 1176 ret = PTR_ERR(host->clk); 1177 goto out; 1178 } 1179 clk_enable(host->clk); 1180 host->mmc_input_clk = clk_get_rate(host->clk); 1181 1182 init_mmcsd_host(host); 1183 1184 host->use_dma = use_dma; 1185 host->irq = irq; 1186 1187 if (host->use_dma && davinci_acquire_dma_channels(host) != 0) 1188 host->use_dma = 0; 1189 1190 /* REVISIT: someday, support IRQ-driven card detection. */ 1191 mmc->caps |= MMC_CAP_NEEDS_POLL; 1192 1193 if (!pdata || pdata->wires == 4 || pdata->wires == 0) 1194 mmc->caps |= MMC_CAP_4_BIT_DATA; 1195 1196 host->version = pdata->version; 1197 1198 mmc->ops = &mmc_davinci_ops; 1199 mmc->f_min = 312500; 1200 mmc->f_max = 25000000; 1201 if (pdata && pdata->max_freq) 1202 mmc->f_max = pdata->max_freq; 1203 if (pdata && pdata->caps) 1204 mmc->caps |= pdata->caps; 1205 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; 1206 1207 /* With no iommu coalescing pages, each phys_seg is a hw_seg. 1208 * Each hw_seg uses one EDMA parameter RAM slot, always one 1209 * channel and then usually some linked slots. 1210 */ 1211 mmc->max_hw_segs = 1 + host->n_link; 1212 mmc->max_phys_segs = mmc->max_hw_segs; 1213 1214 /* EDMA limit per hw segment (one or two MBytes) */ 1215 mmc->max_seg_size = MAX_CCNT * rw_threshold; 1216 1217 /* MMC/SD controller limits for multiblock requests */ 1218 mmc->max_blk_size = 4095; /* BLEN is 12 bits */ 1219 mmc->max_blk_count = 65535; /* NBLK is 16 bits */ 1220 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; 1221 1222 dev_dbg(mmc_dev(host->mmc), "max_phys_segs=%d\n", mmc->max_phys_segs); 1223 dev_dbg(mmc_dev(host->mmc), "max_hw_segs=%d\n", mmc->max_hw_segs); 1224 dev_dbg(mmc_dev(host->mmc), "max_blk_size=%d\n", mmc->max_blk_size); 1225 dev_dbg(mmc_dev(host->mmc), "max_req_size=%d\n", mmc->max_req_size); 1226 dev_dbg(mmc_dev(host->mmc), "max_seg_size=%d\n", mmc->max_seg_size); 1227 1228 platform_set_drvdata(pdev, host); 1229 1230 ret = mmc_davinci_cpufreq_register(host); 1231 if (ret) { 1232 dev_err(&pdev->dev, "failed to register cpufreq\n"); 1233 goto cpu_freq_fail; 1234 } 1235 1236 ret = mmc_add_host(mmc); 1237 if (ret < 0) 1238 goto out; 1239 1240 ret = request_irq(irq, mmc_davinci_irq, 0, mmc_hostname(mmc), host); 1241 if (ret) 1242 goto out; 1243 1244 rename_region(mem, mmc_hostname(mmc)); 1245 1246 dev_info(mmc_dev(host->mmc), "Using %s, %d-bit mode\n", 1247 host->use_dma ? "DMA" : "PIO", 1248 (mmc->caps & MMC_CAP_4_BIT_DATA) ? 4 : 1); 1249 1250 return 0; 1251 1252 out: 1253 mmc_davinci_cpufreq_deregister(host); 1254 cpu_freq_fail: 1255 if (host) { 1256 davinci_release_dma_channels(host); 1257 1258 if (host->clk) { 1259 clk_disable(host->clk); 1260 clk_put(host->clk); 1261 } 1262 1263 if (host->base) 1264 iounmap(host->base); 1265 } 1266 1267 if (mmc) 1268 mmc_free_host(mmc); 1269 1270 if (mem) 1271 release_resource(mem); 1272 1273 dev_dbg(&pdev->dev, "probe err %d\n", ret); 1274 1275 return ret; 1276 } 1277 1278 static int __exit davinci_mmcsd_remove(struct platform_device *pdev) 1279 { 1280 struct mmc_davinci_host *host = platform_get_drvdata(pdev); 1281 1282 platform_set_drvdata(pdev, NULL); 1283 if (host) { 1284 mmc_davinci_cpufreq_deregister(host); 1285 1286 mmc_remove_host(host->mmc); 1287 free_irq(host->irq, host); 1288 1289 davinci_release_dma_channels(host); 1290 1291 clk_disable(host->clk); 1292 clk_put(host->clk); 1293 1294 iounmap(host->base); 1295 1296 release_resource(host->mem_res); 1297 1298 mmc_free_host(host->mmc); 1299 } 1300 1301 return 0; 1302 } 1303 1304 #ifdef CONFIG_PM 1305 static int davinci_mmcsd_suspend(struct platform_device *pdev, pm_message_t msg) 1306 { 1307 struct mmc_davinci_host *host = platform_get_drvdata(pdev); 1308 1309 return mmc_suspend_host(host->mmc, msg); 1310 } 1311 1312 static int davinci_mmcsd_resume(struct platform_device *pdev) 1313 { 1314 struct mmc_davinci_host *host = platform_get_drvdata(pdev); 1315 1316 return mmc_resume_host(host->mmc); 1317 } 1318 #else 1319 #define davinci_mmcsd_suspend NULL 1320 #define davinci_mmcsd_resume NULL 1321 #endif 1322 1323 static struct platform_driver davinci_mmcsd_driver = { 1324 .driver = { 1325 .name = "davinci_mmc", 1326 .owner = THIS_MODULE, 1327 }, 1328 .remove = __exit_p(davinci_mmcsd_remove), 1329 .suspend = davinci_mmcsd_suspend, 1330 .resume = davinci_mmcsd_resume, 1331 }; 1332 1333 static int __init davinci_mmcsd_init(void) 1334 { 1335 return platform_driver_probe(&davinci_mmcsd_driver, 1336 davinci_mmcsd_probe); 1337 } 1338 module_init(davinci_mmcsd_init); 1339 1340 static void __exit davinci_mmcsd_exit(void) 1341 { 1342 platform_driver_unregister(&davinci_mmcsd_driver); 1343 } 1344 module_exit(davinci_mmcsd_exit); 1345 1346 MODULE_AUTHOR("Texas Instruments India"); 1347 MODULE_LICENSE("GPL"); 1348 MODULE_DESCRIPTION("MMC/SD driver for Davinci MMC controller"); 1349 1350