1 /* 2 * davinci_mmc.c - TI DaVinci MMC/SD/SDIO driver 3 * 4 * Copyright (C) 2006 Texas Instruments. 5 * Original author: Purushotam Kumar 6 * Copyright (C) 2009 David Brownell 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2 of the License, or 11 * (at your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 21 */ 22 23 #include <linux/module.h> 24 #include <linux/ioport.h> 25 #include <linux/platform_device.h> 26 #include <linux/clk.h> 27 #include <linux/err.h> 28 #include <linux/cpufreq.h> 29 #include <linux/mmc/host.h> 30 #include <linux/io.h> 31 #include <linux/irq.h> 32 #include <linux/delay.h> 33 #include <linux/dmaengine.h> 34 #include <linux/dma-mapping.h> 35 #include <linux/edma.h> 36 #include <linux/mmc/mmc.h> 37 38 #include <linux/platform_data/mmc-davinci.h> 39 40 /* 41 * Register Definitions 42 */ 43 #define DAVINCI_MMCCTL 0x00 /* Control Register */ 44 #define DAVINCI_MMCCLK 0x04 /* Memory Clock Control Register */ 45 #define DAVINCI_MMCST0 0x08 /* Status Register 0 */ 46 #define DAVINCI_MMCST1 0x0C /* Status Register 1 */ 47 #define DAVINCI_MMCIM 0x10 /* Interrupt Mask Register */ 48 #define DAVINCI_MMCTOR 0x14 /* Response Time-Out Register */ 49 #define DAVINCI_MMCTOD 0x18 /* Data Read Time-Out Register */ 50 #define DAVINCI_MMCBLEN 0x1C /* Block Length Register */ 51 #define DAVINCI_MMCNBLK 0x20 /* Number of Blocks Register */ 52 #define DAVINCI_MMCNBLC 0x24 /* Number of Blocks Counter Register */ 53 #define DAVINCI_MMCDRR 0x28 /* Data Receive Register */ 54 #define DAVINCI_MMCDXR 0x2C /* Data Transmit Register */ 55 #define DAVINCI_MMCCMD 0x30 /* Command Register */ 56 #define DAVINCI_MMCARGHL 0x34 /* Argument Register */ 57 #define DAVINCI_MMCRSP01 0x38 /* Response Register 0 and 1 */ 58 #define DAVINCI_MMCRSP23 0x3C /* Response Register 0 and 1 */ 59 #define DAVINCI_MMCRSP45 0x40 /* Response Register 0 and 1 */ 60 #define DAVINCI_MMCRSP67 0x44 /* Response Register 0 and 1 */ 61 #define DAVINCI_MMCDRSP 0x48 /* Data Response Register */ 62 #define DAVINCI_MMCETOK 0x4C 63 #define DAVINCI_MMCCIDX 0x50 /* Command Index Register */ 64 #define DAVINCI_MMCCKC 0x54 65 #define DAVINCI_MMCTORC 0x58 66 #define DAVINCI_MMCTODC 0x5C 67 #define DAVINCI_MMCBLNC 0x60 68 #define DAVINCI_SDIOCTL 0x64 69 #define DAVINCI_SDIOST0 0x68 70 #define DAVINCI_SDIOIEN 0x6C 71 #define DAVINCI_SDIOIST 0x70 72 #define DAVINCI_MMCFIFOCTL 0x74 /* FIFO Control Register */ 73 74 /* DAVINCI_MMCCTL definitions */ 75 #define MMCCTL_DATRST (1 << 0) 76 #define MMCCTL_CMDRST (1 << 1) 77 #define MMCCTL_WIDTH_8_BIT (1 << 8) 78 #define MMCCTL_WIDTH_4_BIT (1 << 2) 79 #define MMCCTL_DATEG_DISABLED (0 << 6) 80 #define MMCCTL_DATEG_RISING (1 << 6) 81 #define MMCCTL_DATEG_FALLING (2 << 6) 82 #define MMCCTL_DATEG_BOTH (3 << 6) 83 #define MMCCTL_PERMDR_LE (0 << 9) 84 #define MMCCTL_PERMDR_BE (1 << 9) 85 #define MMCCTL_PERMDX_LE (0 << 10) 86 #define MMCCTL_PERMDX_BE (1 << 10) 87 88 /* DAVINCI_MMCCLK definitions */ 89 #define MMCCLK_CLKEN (1 << 8) 90 #define MMCCLK_CLKRT_MASK (0xFF << 0) 91 92 /* IRQ bit definitions, for DAVINCI_MMCST0 and DAVINCI_MMCIM */ 93 #define MMCST0_DATDNE BIT(0) /* data done */ 94 #define MMCST0_BSYDNE BIT(1) /* busy done */ 95 #define MMCST0_RSPDNE BIT(2) /* command done */ 96 #define MMCST0_TOUTRD BIT(3) /* data read timeout */ 97 #define MMCST0_TOUTRS BIT(4) /* command response timeout */ 98 #define MMCST0_CRCWR BIT(5) /* data write CRC error */ 99 #define MMCST0_CRCRD BIT(6) /* data read CRC error */ 100 #define MMCST0_CRCRS BIT(7) /* command response CRC error */ 101 #define MMCST0_DXRDY BIT(9) /* data transmit ready (fifo empty) */ 102 #define MMCST0_DRRDY BIT(10) /* data receive ready (data in fifo)*/ 103 #define MMCST0_DATED BIT(11) /* DAT3 edge detect */ 104 #define MMCST0_TRNDNE BIT(12) /* transfer done */ 105 106 /* DAVINCI_MMCST1 definitions */ 107 #define MMCST1_BUSY (1 << 0) 108 109 /* DAVINCI_MMCCMD definitions */ 110 #define MMCCMD_CMD_MASK (0x3F << 0) 111 #define MMCCMD_PPLEN (1 << 7) 112 #define MMCCMD_BSYEXP (1 << 8) 113 #define MMCCMD_RSPFMT_MASK (3 << 9) 114 #define MMCCMD_RSPFMT_NONE (0 << 9) 115 #define MMCCMD_RSPFMT_R1456 (1 << 9) 116 #define MMCCMD_RSPFMT_R2 (2 << 9) 117 #define MMCCMD_RSPFMT_R3 (3 << 9) 118 #define MMCCMD_DTRW (1 << 11) 119 #define MMCCMD_STRMTP (1 << 12) 120 #define MMCCMD_WDATX (1 << 13) 121 #define MMCCMD_INITCK (1 << 14) 122 #define MMCCMD_DCLR (1 << 15) 123 #define MMCCMD_DMATRIG (1 << 16) 124 125 /* DAVINCI_MMCFIFOCTL definitions */ 126 #define MMCFIFOCTL_FIFORST (1 << 0) 127 #define MMCFIFOCTL_FIFODIR_WR (1 << 1) 128 #define MMCFIFOCTL_FIFODIR_RD (0 << 1) 129 #define MMCFIFOCTL_FIFOLEV (1 << 2) /* 0 = 128 bits, 1 = 256 bits */ 130 #define MMCFIFOCTL_ACCWD_4 (0 << 3) /* access width of 4 bytes */ 131 #define MMCFIFOCTL_ACCWD_3 (1 << 3) /* access width of 3 bytes */ 132 #define MMCFIFOCTL_ACCWD_2 (2 << 3) /* access width of 2 bytes */ 133 #define MMCFIFOCTL_ACCWD_1 (3 << 3) /* access width of 1 byte */ 134 135 /* DAVINCI_SDIOST0 definitions */ 136 #define SDIOST0_DAT1_HI BIT(0) 137 138 /* DAVINCI_SDIOIEN definitions */ 139 #define SDIOIEN_IOINTEN BIT(0) 140 141 /* DAVINCI_SDIOIST definitions */ 142 #define SDIOIST_IOINT BIT(0) 143 144 /* MMCSD Init clock in Hz in opendrain mode */ 145 #define MMCSD_INIT_CLOCK 200000 146 147 /* 148 * One scatterlist dma "segment" is at most MAX_CCNT rw_threshold units, 149 * and we handle up to MAX_NR_SG segments. MMC_BLOCK_BOUNCE kicks in only 150 * for drivers with max_segs == 1, making the segments bigger (64KB) 151 * than the page or two that's otherwise typical. nr_sg (passed from 152 * platform data) == 16 gives at least the same throughput boost, using 153 * EDMA transfer linkage instead of spending CPU time copying pages. 154 */ 155 #define MAX_CCNT ((1 << 16) - 1) 156 157 #define MAX_NR_SG 16 158 159 static unsigned rw_threshold = 32; 160 module_param(rw_threshold, uint, S_IRUGO); 161 MODULE_PARM_DESC(rw_threshold, 162 "Read/Write threshold. Default = 32"); 163 164 static unsigned poll_threshold = 128; 165 module_param(poll_threshold, uint, S_IRUGO); 166 MODULE_PARM_DESC(poll_threshold, 167 "Polling transaction size threshold. Default = 128"); 168 169 static unsigned poll_loopcount = 32; 170 module_param(poll_loopcount, uint, S_IRUGO); 171 MODULE_PARM_DESC(poll_loopcount, 172 "Maximum polling loop count. Default = 32"); 173 174 static unsigned __initdata use_dma = 1; 175 module_param(use_dma, uint, 0); 176 MODULE_PARM_DESC(use_dma, "Whether to use DMA or not. Default = 1"); 177 178 struct mmc_davinci_host { 179 struct mmc_command *cmd; 180 struct mmc_data *data; 181 struct mmc_host *mmc; 182 struct clk *clk; 183 unsigned int mmc_input_clk; 184 void __iomem *base; 185 struct resource *mem_res; 186 int mmc_irq, sdio_irq; 187 unsigned char bus_mode; 188 189 #define DAVINCI_MMC_DATADIR_NONE 0 190 #define DAVINCI_MMC_DATADIR_READ 1 191 #define DAVINCI_MMC_DATADIR_WRITE 2 192 unsigned char data_dir; 193 unsigned char suspended; 194 195 /* buffer is used during PIO of one scatterlist segment, and 196 * is updated along with buffer_bytes_left. bytes_left applies 197 * to all N blocks of the PIO transfer. 198 */ 199 u8 *buffer; 200 u32 buffer_bytes_left; 201 u32 bytes_left; 202 203 u32 rxdma, txdma; 204 struct dma_chan *dma_tx; 205 struct dma_chan *dma_rx; 206 bool use_dma; 207 bool do_dma; 208 bool sdio_int; 209 bool active_request; 210 211 /* For PIO we walk scatterlists one segment at a time. */ 212 unsigned int sg_len; 213 struct scatterlist *sg; 214 215 /* Version of the MMC/SD controller */ 216 u8 version; 217 /* for ns in one cycle calculation */ 218 unsigned ns_in_one_cycle; 219 /* Number of sg segments */ 220 u8 nr_sg; 221 #ifdef CONFIG_CPU_FREQ 222 struct notifier_block freq_transition; 223 #endif 224 }; 225 226 static irqreturn_t mmc_davinci_irq(int irq, void *dev_id); 227 228 /* PIO only */ 229 static void mmc_davinci_sg_to_buf(struct mmc_davinci_host *host) 230 { 231 host->buffer_bytes_left = sg_dma_len(host->sg); 232 host->buffer = sg_virt(host->sg); 233 if (host->buffer_bytes_left > host->bytes_left) 234 host->buffer_bytes_left = host->bytes_left; 235 } 236 237 static void davinci_fifo_data_trans(struct mmc_davinci_host *host, 238 unsigned int n) 239 { 240 u8 *p; 241 unsigned int i; 242 243 if (host->buffer_bytes_left == 0) { 244 host->sg = sg_next(host->data->sg); 245 mmc_davinci_sg_to_buf(host); 246 } 247 248 p = host->buffer; 249 if (n > host->buffer_bytes_left) 250 n = host->buffer_bytes_left; 251 host->buffer_bytes_left -= n; 252 host->bytes_left -= n; 253 254 /* NOTE: we never transfer more than rw_threshold bytes 255 * to/from the fifo here; there's no I/O overlap. 256 * This also assumes that access width( i.e. ACCWD) is 4 bytes 257 */ 258 if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) { 259 for (i = 0; i < (n >> 2); i++) { 260 writel(*((u32 *)p), host->base + DAVINCI_MMCDXR); 261 p = p + 4; 262 } 263 if (n & 3) { 264 iowrite8_rep(host->base + DAVINCI_MMCDXR, p, (n & 3)); 265 p = p + (n & 3); 266 } 267 } else { 268 for (i = 0; i < (n >> 2); i++) { 269 *((u32 *)p) = readl(host->base + DAVINCI_MMCDRR); 270 p = p + 4; 271 } 272 if (n & 3) { 273 ioread8_rep(host->base + DAVINCI_MMCDRR, p, (n & 3)); 274 p = p + (n & 3); 275 } 276 } 277 host->buffer = p; 278 } 279 280 static void mmc_davinci_start_command(struct mmc_davinci_host *host, 281 struct mmc_command *cmd) 282 { 283 u32 cmd_reg = 0; 284 u32 im_val; 285 286 dev_dbg(mmc_dev(host->mmc), "CMD%d, arg 0x%08x%s\n", 287 cmd->opcode, cmd->arg, 288 ({ char *s; 289 switch (mmc_resp_type(cmd)) { 290 case MMC_RSP_R1: 291 s = ", R1/R5/R6/R7 response"; 292 break; 293 case MMC_RSP_R1B: 294 s = ", R1b response"; 295 break; 296 case MMC_RSP_R2: 297 s = ", R2 response"; 298 break; 299 case MMC_RSP_R3: 300 s = ", R3/R4 response"; 301 break; 302 default: 303 s = ", (R? response)"; 304 break; 305 }; s; })); 306 host->cmd = cmd; 307 308 switch (mmc_resp_type(cmd)) { 309 case MMC_RSP_R1B: 310 /* There's some spec confusion about when R1B is 311 * allowed, but if the card doesn't issue a BUSY 312 * then it's harmless for us to allow it. 313 */ 314 cmd_reg |= MMCCMD_BSYEXP; 315 /* FALLTHROUGH */ 316 case MMC_RSP_R1: /* 48 bits, CRC */ 317 cmd_reg |= MMCCMD_RSPFMT_R1456; 318 break; 319 case MMC_RSP_R2: /* 136 bits, CRC */ 320 cmd_reg |= MMCCMD_RSPFMT_R2; 321 break; 322 case MMC_RSP_R3: /* 48 bits, no CRC */ 323 cmd_reg |= MMCCMD_RSPFMT_R3; 324 break; 325 default: 326 cmd_reg |= MMCCMD_RSPFMT_NONE; 327 dev_dbg(mmc_dev(host->mmc), "unknown resp_type %04x\n", 328 mmc_resp_type(cmd)); 329 break; 330 } 331 332 /* Set command index */ 333 cmd_reg |= cmd->opcode; 334 335 /* Enable EDMA transfer triggers */ 336 if (host->do_dma) 337 cmd_reg |= MMCCMD_DMATRIG; 338 339 if (host->version == MMC_CTLR_VERSION_2 && host->data != NULL && 340 host->data_dir == DAVINCI_MMC_DATADIR_READ) 341 cmd_reg |= MMCCMD_DMATRIG; 342 343 /* Setting whether command involves data transfer or not */ 344 if (cmd->data) 345 cmd_reg |= MMCCMD_WDATX; 346 347 /* Setting whether stream or block transfer */ 348 if (cmd->flags & MMC_DATA_STREAM) 349 cmd_reg |= MMCCMD_STRMTP; 350 351 /* Setting whether data read or write */ 352 if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) 353 cmd_reg |= MMCCMD_DTRW; 354 355 if (host->bus_mode == MMC_BUSMODE_PUSHPULL) 356 cmd_reg |= MMCCMD_PPLEN; 357 358 /* set Command timeout */ 359 writel(0x1FFF, host->base + DAVINCI_MMCTOR); 360 361 /* Enable interrupt (calculate here, defer until FIFO is stuffed). */ 362 im_val = MMCST0_RSPDNE | MMCST0_CRCRS | MMCST0_TOUTRS; 363 if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) { 364 im_val |= MMCST0_DATDNE | MMCST0_CRCWR; 365 366 if (!host->do_dma) 367 im_val |= MMCST0_DXRDY; 368 } else if (host->data_dir == DAVINCI_MMC_DATADIR_READ) { 369 im_val |= MMCST0_DATDNE | MMCST0_CRCRD | MMCST0_TOUTRD; 370 371 if (!host->do_dma) 372 im_val |= MMCST0_DRRDY; 373 } 374 375 /* 376 * Before non-DMA WRITE commands the controller needs priming: 377 * FIFO should be populated with 32 bytes i.e. whatever is the FIFO size 378 */ 379 if (!host->do_dma && (host->data_dir == DAVINCI_MMC_DATADIR_WRITE)) 380 davinci_fifo_data_trans(host, rw_threshold); 381 382 writel(cmd->arg, host->base + DAVINCI_MMCARGHL); 383 writel(cmd_reg, host->base + DAVINCI_MMCCMD); 384 385 host->active_request = true; 386 387 if (!host->do_dma && host->bytes_left <= poll_threshold) { 388 u32 count = poll_loopcount; 389 390 while (host->active_request && count--) { 391 mmc_davinci_irq(0, host); 392 cpu_relax(); 393 } 394 } 395 396 if (host->active_request) 397 writel(im_val, host->base + DAVINCI_MMCIM); 398 } 399 400 /*----------------------------------------------------------------------*/ 401 402 /* DMA infrastructure */ 403 404 static void davinci_abort_dma(struct mmc_davinci_host *host) 405 { 406 struct dma_chan *sync_dev; 407 408 if (host->data_dir == DAVINCI_MMC_DATADIR_READ) 409 sync_dev = host->dma_rx; 410 else 411 sync_dev = host->dma_tx; 412 413 dmaengine_terminate_all(sync_dev); 414 } 415 416 static int mmc_davinci_send_dma_request(struct mmc_davinci_host *host, 417 struct mmc_data *data) 418 { 419 struct dma_chan *chan; 420 struct dma_async_tx_descriptor *desc; 421 int ret = 0; 422 423 if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) { 424 struct dma_slave_config dma_tx_conf = { 425 .direction = DMA_MEM_TO_DEV, 426 .dst_addr = host->mem_res->start + DAVINCI_MMCDXR, 427 .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, 428 .dst_maxburst = 429 rw_threshold / DMA_SLAVE_BUSWIDTH_4_BYTES, 430 }; 431 chan = host->dma_tx; 432 dmaengine_slave_config(host->dma_tx, &dma_tx_conf); 433 434 desc = dmaengine_prep_slave_sg(host->dma_tx, 435 data->sg, 436 host->sg_len, 437 DMA_MEM_TO_DEV, 438 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 439 if (!desc) { 440 dev_dbg(mmc_dev(host->mmc), 441 "failed to allocate DMA TX descriptor"); 442 ret = -1; 443 goto out; 444 } 445 } else { 446 struct dma_slave_config dma_rx_conf = { 447 .direction = DMA_DEV_TO_MEM, 448 .src_addr = host->mem_res->start + DAVINCI_MMCDRR, 449 .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, 450 .src_maxburst = 451 rw_threshold / DMA_SLAVE_BUSWIDTH_4_BYTES, 452 }; 453 chan = host->dma_rx; 454 dmaengine_slave_config(host->dma_rx, &dma_rx_conf); 455 456 desc = dmaengine_prep_slave_sg(host->dma_rx, 457 data->sg, 458 host->sg_len, 459 DMA_DEV_TO_MEM, 460 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 461 if (!desc) { 462 dev_dbg(mmc_dev(host->mmc), 463 "failed to allocate DMA RX descriptor"); 464 ret = -1; 465 goto out; 466 } 467 } 468 469 dmaengine_submit(desc); 470 dma_async_issue_pending(chan); 471 472 out: 473 return ret; 474 } 475 476 static int mmc_davinci_start_dma_transfer(struct mmc_davinci_host *host, 477 struct mmc_data *data) 478 { 479 int i; 480 int mask = rw_threshold - 1; 481 int ret = 0; 482 483 host->sg_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, 484 ((data->flags & MMC_DATA_WRITE) 485 ? DMA_TO_DEVICE 486 : DMA_FROM_DEVICE)); 487 488 /* no individual DMA segment should need a partial FIFO */ 489 for (i = 0; i < host->sg_len; i++) { 490 if (sg_dma_len(data->sg + i) & mask) { 491 dma_unmap_sg(mmc_dev(host->mmc), 492 data->sg, data->sg_len, 493 (data->flags & MMC_DATA_WRITE) 494 ? DMA_TO_DEVICE 495 : DMA_FROM_DEVICE); 496 return -1; 497 } 498 } 499 500 host->do_dma = 1; 501 ret = mmc_davinci_send_dma_request(host, data); 502 503 return ret; 504 } 505 506 static void __init_or_module 507 davinci_release_dma_channels(struct mmc_davinci_host *host) 508 { 509 if (!host->use_dma) 510 return; 511 512 dma_release_channel(host->dma_tx); 513 dma_release_channel(host->dma_rx); 514 } 515 516 static int __init davinci_acquire_dma_channels(struct mmc_davinci_host *host) 517 { 518 int r; 519 dma_cap_mask_t mask; 520 521 dma_cap_zero(mask); 522 dma_cap_set(DMA_SLAVE, mask); 523 524 host->dma_tx = 525 dma_request_channel(mask, edma_filter_fn, &host->txdma); 526 if (!host->dma_tx) { 527 dev_err(mmc_dev(host->mmc), "Can't get dma_tx channel\n"); 528 return -ENODEV; 529 } 530 531 host->dma_rx = 532 dma_request_channel(mask, edma_filter_fn, &host->rxdma); 533 if (!host->dma_rx) { 534 dev_err(mmc_dev(host->mmc), "Can't get dma_rx channel\n"); 535 r = -ENODEV; 536 goto free_master_write; 537 } 538 539 return 0; 540 541 free_master_write: 542 dma_release_channel(host->dma_tx); 543 544 return r; 545 } 546 547 /*----------------------------------------------------------------------*/ 548 549 static void 550 mmc_davinci_prepare_data(struct mmc_davinci_host *host, struct mmc_request *req) 551 { 552 int fifo_lev = (rw_threshold == 32) ? MMCFIFOCTL_FIFOLEV : 0; 553 int timeout; 554 struct mmc_data *data = req->data; 555 556 if (host->version == MMC_CTLR_VERSION_2) 557 fifo_lev = (rw_threshold == 64) ? MMCFIFOCTL_FIFOLEV : 0; 558 559 host->data = data; 560 if (data == NULL) { 561 host->data_dir = DAVINCI_MMC_DATADIR_NONE; 562 writel(0, host->base + DAVINCI_MMCBLEN); 563 writel(0, host->base + DAVINCI_MMCNBLK); 564 return; 565 } 566 567 dev_dbg(mmc_dev(host->mmc), "%s %s, %d blocks of %d bytes\n", 568 (data->flags & MMC_DATA_STREAM) ? "stream" : "block", 569 (data->flags & MMC_DATA_WRITE) ? "write" : "read", 570 data->blocks, data->blksz); 571 dev_dbg(mmc_dev(host->mmc), " DTO %d cycles + %d ns\n", 572 data->timeout_clks, data->timeout_ns); 573 timeout = data->timeout_clks + 574 (data->timeout_ns / host->ns_in_one_cycle); 575 if (timeout > 0xffff) 576 timeout = 0xffff; 577 578 writel(timeout, host->base + DAVINCI_MMCTOD); 579 writel(data->blocks, host->base + DAVINCI_MMCNBLK); 580 writel(data->blksz, host->base + DAVINCI_MMCBLEN); 581 582 /* Configure the FIFO */ 583 switch (data->flags & MMC_DATA_WRITE) { 584 case MMC_DATA_WRITE: 585 host->data_dir = DAVINCI_MMC_DATADIR_WRITE; 586 writel(fifo_lev | MMCFIFOCTL_FIFODIR_WR | MMCFIFOCTL_FIFORST, 587 host->base + DAVINCI_MMCFIFOCTL); 588 writel(fifo_lev | MMCFIFOCTL_FIFODIR_WR, 589 host->base + DAVINCI_MMCFIFOCTL); 590 break; 591 592 default: 593 host->data_dir = DAVINCI_MMC_DATADIR_READ; 594 writel(fifo_lev | MMCFIFOCTL_FIFODIR_RD | MMCFIFOCTL_FIFORST, 595 host->base + DAVINCI_MMCFIFOCTL); 596 writel(fifo_lev | MMCFIFOCTL_FIFODIR_RD, 597 host->base + DAVINCI_MMCFIFOCTL); 598 break; 599 } 600 601 host->buffer = NULL; 602 host->bytes_left = data->blocks * data->blksz; 603 604 /* For now we try to use DMA whenever we won't need partial FIFO 605 * reads or writes, either for the whole transfer (as tested here) 606 * or for any individual scatterlist segment (tested when we call 607 * start_dma_transfer). 608 * 609 * While we *could* change that, unusual block sizes are rarely 610 * used. The occasional fallback to PIO should't hurt. 611 */ 612 if (host->use_dma && (host->bytes_left & (rw_threshold - 1)) == 0 613 && mmc_davinci_start_dma_transfer(host, data) == 0) { 614 /* zero this to ensure we take no PIO paths */ 615 host->bytes_left = 0; 616 } else { 617 /* Revert to CPU Copy */ 618 host->sg_len = data->sg_len; 619 host->sg = host->data->sg; 620 mmc_davinci_sg_to_buf(host); 621 } 622 } 623 624 static void mmc_davinci_request(struct mmc_host *mmc, struct mmc_request *req) 625 { 626 struct mmc_davinci_host *host = mmc_priv(mmc); 627 unsigned long timeout = jiffies + msecs_to_jiffies(900); 628 u32 mmcst1 = 0; 629 630 /* Card may still be sending BUSY after a previous operation, 631 * typically some kind of write. If so, we can't proceed yet. 632 */ 633 while (time_before(jiffies, timeout)) { 634 mmcst1 = readl(host->base + DAVINCI_MMCST1); 635 if (!(mmcst1 & MMCST1_BUSY)) 636 break; 637 cpu_relax(); 638 } 639 if (mmcst1 & MMCST1_BUSY) { 640 dev_err(mmc_dev(host->mmc), "still BUSY? bad ... \n"); 641 req->cmd->error = -ETIMEDOUT; 642 mmc_request_done(mmc, req); 643 return; 644 } 645 646 host->do_dma = 0; 647 mmc_davinci_prepare_data(host, req); 648 mmc_davinci_start_command(host, req->cmd); 649 } 650 651 static unsigned int calculate_freq_for_card(struct mmc_davinci_host *host, 652 unsigned int mmc_req_freq) 653 { 654 unsigned int mmc_freq = 0, mmc_pclk = 0, mmc_push_pull_divisor = 0; 655 656 mmc_pclk = host->mmc_input_clk; 657 if (mmc_req_freq && mmc_pclk > (2 * mmc_req_freq)) 658 mmc_push_pull_divisor = ((unsigned int)mmc_pclk 659 / (2 * mmc_req_freq)) - 1; 660 else 661 mmc_push_pull_divisor = 0; 662 663 mmc_freq = (unsigned int)mmc_pclk 664 / (2 * (mmc_push_pull_divisor + 1)); 665 666 if (mmc_freq > mmc_req_freq) 667 mmc_push_pull_divisor = mmc_push_pull_divisor + 1; 668 /* Convert ns to clock cycles */ 669 if (mmc_req_freq <= 400000) 670 host->ns_in_one_cycle = (1000000) / (((mmc_pclk 671 / (2 * (mmc_push_pull_divisor + 1)))/1000)); 672 else 673 host->ns_in_one_cycle = (1000000) / (((mmc_pclk 674 / (2 * (mmc_push_pull_divisor + 1)))/1000000)); 675 676 return mmc_push_pull_divisor; 677 } 678 679 static void calculate_clk_divider(struct mmc_host *mmc, struct mmc_ios *ios) 680 { 681 unsigned int open_drain_freq = 0, mmc_pclk = 0; 682 unsigned int mmc_push_pull_freq = 0; 683 struct mmc_davinci_host *host = mmc_priv(mmc); 684 685 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) { 686 u32 temp; 687 688 /* Ignoring the init clock value passed for fixing the inter 689 * operability with different cards. 690 */ 691 open_drain_freq = ((unsigned int)mmc_pclk 692 / (2 * MMCSD_INIT_CLOCK)) - 1; 693 694 if (open_drain_freq > 0xFF) 695 open_drain_freq = 0xFF; 696 697 temp = readl(host->base + DAVINCI_MMCCLK) & ~MMCCLK_CLKRT_MASK; 698 temp |= open_drain_freq; 699 writel(temp, host->base + DAVINCI_MMCCLK); 700 701 /* Convert ns to clock cycles */ 702 host->ns_in_one_cycle = (1000000) / (MMCSD_INIT_CLOCK/1000); 703 } else { 704 u32 temp; 705 mmc_push_pull_freq = calculate_freq_for_card(host, ios->clock); 706 707 if (mmc_push_pull_freq > 0xFF) 708 mmc_push_pull_freq = 0xFF; 709 710 temp = readl(host->base + DAVINCI_MMCCLK) & ~MMCCLK_CLKEN; 711 writel(temp, host->base + DAVINCI_MMCCLK); 712 713 udelay(10); 714 715 temp = readl(host->base + DAVINCI_MMCCLK) & ~MMCCLK_CLKRT_MASK; 716 temp |= mmc_push_pull_freq; 717 writel(temp, host->base + DAVINCI_MMCCLK); 718 719 writel(temp | MMCCLK_CLKEN, host->base + DAVINCI_MMCCLK); 720 721 udelay(10); 722 } 723 } 724 725 static void mmc_davinci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 726 { 727 struct mmc_davinci_host *host = mmc_priv(mmc); 728 struct platform_device *pdev = to_platform_device(mmc->parent); 729 struct davinci_mmc_config *config = pdev->dev.platform_data; 730 731 dev_dbg(mmc_dev(host->mmc), 732 "clock %dHz busmode %d powermode %d Vdd %04x\n", 733 ios->clock, ios->bus_mode, ios->power_mode, 734 ios->vdd); 735 736 switch (ios->power_mode) { 737 case MMC_POWER_OFF: 738 if (config && config->set_power) 739 config->set_power(pdev->id, false); 740 break; 741 case MMC_POWER_UP: 742 if (config && config->set_power) 743 config->set_power(pdev->id, true); 744 break; 745 } 746 747 switch (ios->bus_width) { 748 case MMC_BUS_WIDTH_8: 749 dev_dbg(mmc_dev(host->mmc), "Enabling 8 bit mode\n"); 750 writel((readl(host->base + DAVINCI_MMCCTL) & 751 ~MMCCTL_WIDTH_4_BIT) | MMCCTL_WIDTH_8_BIT, 752 host->base + DAVINCI_MMCCTL); 753 break; 754 case MMC_BUS_WIDTH_4: 755 dev_dbg(mmc_dev(host->mmc), "Enabling 4 bit mode\n"); 756 if (host->version == MMC_CTLR_VERSION_2) 757 writel((readl(host->base + DAVINCI_MMCCTL) & 758 ~MMCCTL_WIDTH_8_BIT) | MMCCTL_WIDTH_4_BIT, 759 host->base + DAVINCI_MMCCTL); 760 else 761 writel(readl(host->base + DAVINCI_MMCCTL) | 762 MMCCTL_WIDTH_4_BIT, 763 host->base + DAVINCI_MMCCTL); 764 break; 765 case MMC_BUS_WIDTH_1: 766 dev_dbg(mmc_dev(host->mmc), "Enabling 1 bit mode\n"); 767 if (host->version == MMC_CTLR_VERSION_2) 768 writel(readl(host->base + DAVINCI_MMCCTL) & 769 ~(MMCCTL_WIDTH_8_BIT | MMCCTL_WIDTH_4_BIT), 770 host->base + DAVINCI_MMCCTL); 771 else 772 writel(readl(host->base + DAVINCI_MMCCTL) & 773 ~MMCCTL_WIDTH_4_BIT, 774 host->base + DAVINCI_MMCCTL); 775 break; 776 } 777 778 calculate_clk_divider(mmc, ios); 779 780 host->bus_mode = ios->bus_mode; 781 if (ios->power_mode == MMC_POWER_UP) { 782 unsigned long timeout = jiffies + msecs_to_jiffies(50); 783 bool lose = true; 784 785 /* Send clock cycles, poll completion */ 786 writel(0, host->base + DAVINCI_MMCARGHL); 787 writel(MMCCMD_INITCK, host->base + DAVINCI_MMCCMD); 788 while (time_before(jiffies, timeout)) { 789 u32 tmp = readl(host->base + DAVINCI_MMCST0); 790 791 if (tmp & MMCST0_RSPDNE) { 792 lose = false; 793 break; 794 } 795 cpu_relax(); 796 } 797 if (lose) 798 dev_warn(mmc_dev(host->mmc), "powerup timeout\n"); 799 } 800 801 /* FIXME on power OFF, reset things ... */ 802 } 803 804 static void 805 mmc_davinci_xfer_done(struct mmc_davinci_host *host, struct mmc_data *data) 806 { 807 host->data = NULL; 808 809 if (host->mmc->caps & MMC_CAP_SDIO_IRQ) { 810 /* 811 * SDIO Interrupt Detection work-around as suggested by 812 * Davinci Errata (TMS320DM355 Silicon Revision 1.1 Errata 813 * 2.1.6): Signal SDIO interrupt only if it is enabled by core 814 */ 815 if (host->sdio_int && !(readl(host->base + DAVINCI_SDIOST0) & 816 SDIOST0_DAT1_HI)) { 817 writel(SDIOIST_IOINT, host->base + DAVINCI_SDIOIST); 818 mmc_signal_sdio_irq(host->mmc); 819 } 820 } 821 822 if (host->do_dma) { 823 davinci_abort_dma(host); 824 825 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, 826 (data->flags & MMC_DATA_WRITE) 827 ? DMA_TO_DEVICE 828 : DMA_FROM_DEVICE); 829 host->do_dma = false; 830 } 831 host->data_dir = DAVINCI_MMC_DATADIR_NONE; 832 833 if (!data->stop || (host->cmd && host->cmd->error)) { 834 mmc_request_done(host->mmc, data->mrq); 835 writel(0, host->base + DAVINCI_MMCIM); 836 host->active_request = false; 837 } else 838 mmc_davinci_start_command(host, data->stop); 839 } 840 841 static void mmc_davinci_cmd_done(struct mmc_davinci_host *host, 842 struct mmc_command *cmd) 843 { 844 host->cmd = NULL; 845 846 if (cmd->flags & MMC_RSP_PRESENT) { 847 if (cmd->flags & MMC_RSP_136) { 848 /* response type 2 */ 849 cmd->resp[3] = readl(host->base + DAVINCI_MMCRSP01); 850 cmd->resp[2] = readl(host->base + DAVINCI_MMCRSP23); 851 cmd->resp[1] = readl(host->base + DAVINCI_MMCRSP45); 852 cmd->resp[0] = readl(host->base + DAVINCI_MMCRSP67); 853 } else { 854 /* response types 1, 1b, 3, 4, 5, 6 */ 855 cmd->resp[0] = readl(host->base + DAVINCI_MMCRSP67); 856 } 857 } 858 859 if (host->data == NULL || cmd->error) { 860 if (cmd->error == -ETIMEDOUT) 861 cmd->mrq->cmd->retries = 0; 862 mmc_request_done(host->mmc, cmd->mrq); 863 writel(0, host->base + DAVINCI_MMCIM); 864 host->active_request = false; 865 } 866 } 867 868 static inline void mmc_davinci_reset_ctrl(struct mmc_davinci_host *host, 869 int val) 870 { 871 u32 temp; 872 873 temp = readl(host->base + DAVINCI_MMCCTL); 874 if (val) /* reset */ 875 temp |= MMCCTL_CMDRST | MMCCTL_DATRST; 876 else /* enable */ 877 temp &= ~(MMCCTL_CMDRST | MMCCTL_DATRST); 878 879 writel(temp, host->base + DAVINCI_MMCCTL); 880 udelay(10); 881 } 882 883 static void 884 davinci_abort_data(struct mmc_davinci_host *host, struct mmc_data *data) 885 { 886 mmc_davinci_reset_ctrl(host, 1); 887 mmc_davinci_reset_ctrl(host, 0); 888 } 889 890 static irqreturn_t mmc_davinci_sdio_irq(int irq, void *dev_id) 891 { 892 struct mmc_davinci_host *host = dev_id; 893 unsigned int status; 894 895 status = readl(host->base + DAVINCI_SDIOIST); 896 if (status & SDIOIST_IOINT) { 897 dev_dbg(mmc_dev(host->mmc), 898 "SDIO interrupt status %x\n", status); 899 writel(status | SDIOIST_IOINT, host->base + DAVINCI_SDIOIST); 900 mmc_signal_sdio_irq(host->mmc); 901 } 902 return IRQ_HANDLED; 903 } 904 905 static irqreturn_t mmc_davinci_irq(int irq, void *dev_id) 906 { 907 struct mmc_davinci_host *host = (struct mmc_davinci_host *)dev_id; 908 unsigned int status, qstatus; 909 int end_command = 0; 910 int end_transfer = 0; 911 struct mmc_data *data = host->data; 912 913 if (host->cmd == NULL && host->data == NULL) { 914 status = readl(host->base + DAVINCI_MMCST0); 915 dev_dbg(mmc_dev(host->mmc), 916 "Spurious interrupt 0x%04x\n", status); 917 /* Disable the interrupt from mmcsd */ 918 writel(0, host->base + DAVINCI_MMCIM); 919 return IRQ_NONE; 920 } 921 922 status = readl(host->base + DAVINCI_MMCST0); 923 qstatus = status; 924 925 /* handle FIFO first when using PIO for data. 926 * bytes_left will decrease to zero as I/O progress and status will 927 * read zero over iteration because this controller status 928 * register(MMCST0) reports any status only once and it is cleared 929 * by read. So, it is not unbouned loop even in the case of 930 * non-dma. 931 */ 932 if (host->bytes_left && (status & (MMCST0_DXRDY | MMCST0_DRRDY))) { 933 unsigned long im_val; 934 935 /* 936 * If interrupts fire during the following loop, they will be 937 * handled by the handler, but the PIC will still buffer these. 938 * As a result, the handler will be called again to serve these 939 * needlessly. In order to avoid these spurious interrupts, 940 * keep interrupts masked during the loop. 941 */ 942 im_val = readl(host->base + DAVINCI_MMCIM); 943 writel(0, host->base + DAVINCI_MMCIM); 944 945 do { 946 davinci_fifo_data_trans(host, rw_threshold); 947 status = readl(host->base + DAVINCI_MMCST0); 948 qstatus |= status; 949 } while (host->bytes_left && 950 (status & (MMCST0_DXRDY | MMCST0_DRRDY))); 951 952 /* 953 * If an interrupt is pending, it is assumed it will fire when 954 * it is unmasked. This assumption is also taken when the MMCIM 955 * is first set. Otherwise, writing to MMCIM after reading the 956 * status is race-prone. 957 */ 958 writel(im_val, host->base + DAVINCI_MMCIM); 959 } 960 961 if (qstatus & MMCST0_DATDNE) { 962 /* All blocks sent/received, and CRC checks passed */ 963 if (data != NULL) { 964 if ((host->do_dma == 0) && (host->bytes_left > 0)) { 965 /* if datasize < rw_threshold 966 * no RX ints are generated 967 */ 968 davinci_fifo_data_trans(host, host->bytes_left); 969 } 970 end_transfer = 1; 971 data->bytes_xfered = data->blocks * data->blksz; 972 } else { 973 dev_err(mmc_dev(host->mmc), 974 "DATDNE with no host->data\n"); 975 } 976 } 977 978 if (qstatus & MMCST0_TOUTRD) { 979 /* Read data timeout */ 980 data->error = -ETIMEDOUT; 981 end_transfer = 1; 982 983 dev_dbg(mmc_dev(host->mmc), 984 "read data timeout, status %x\n", 985 qstatus); 986 987 davinci_abort_data(host, data); 988 } 989 990 if (qstatus & (MMCST0_CRCWR | MMCST0_CRCRD)) { 991 /* Data CRC error */ 992 data->error = -EILSEQ; 993 end_transfer = 1; 994 995 /* NOTE: this controller uses CRCWR to report both CRC 996 * errors and timeouts (on writes). MMCDRSP values are 997 * only weakly documented, but 0x9f was clearly a timeout 998 * case and the two three-bit patterns in various SD specs 999 * (101, 010) aren't part of it ... 1000 */ 1001 if (qstatus & MMCST0_CRCWR) { 1002 u32 temp = readb(host->base + DAVINCI_MMCDRSP); 1003 1004 if (temp == 0x9f) 1005 data->error = -ETIMEDOUT; 1006 } 1007 dev_dbg(mmc_dev(host->mmc), "data %s %s error\n", 1008 (qstatus & MMCST0_CRCWR) ? "write" : "read", 1009 (data->error == -ETIMEDOUT) ? "timeout" : "CRC"); 1010 1011 davinci_abort_data(host, data); 1012 } 1013 1014 if (qstatus & MMCST0_TOUTRS) { 1015 /* Command timeout */ 1016 if (host->cmd) { 1017 dev_dbg(mmc_dev(host->mmc), 1018 "CMD%d timeout, status %x\n", 1019 host->cmd->opcode, qstatus); 1020 host->cmd->error = -ETIMEDOUT; 1021 if (data) { 1022 end_transfer = 1; 1023 davinci_abort_data(host, data); 1024 } else 1025 end_command = 1; 1026 } 1027 } 1028 1029 if (qstatus & MMCST0_CRCRS) { 1030 /* Command CRC error */ 1031 dev_dbg(mmc_dev(host->mmc), "Command CRC error\n"); 1032 if (host->cmd) { 1033 host->cmd->error = -EILSEQ; 1034 end_command = 1; 1035 } 1036 } 1037 1038 if (qstatus & MMCST0_RSPDNE) { 1039 /* End of command phase */ 1040 end_command = (int) host->cmd; 1041 } 1042 1043 if (end_command) 1044 mmc_davinci_cmd_done(host, host->cmd); 1045 if (end_transfer) 1046 mmc_davinci_xfer_done(host, data); 1047 return IRQ_HANDLED; 1048 } 1049 1050 static int mmc_davinci_get_cd(struct mmc_host *mmc) 1051 { 1052 struct platform_device *pdev = to_platform_device(mmc->parent); 1053 struct davinci_mmc_config *config = pdev->dev.platform_data; 1054 1055 if (!config || !config->get_cd) 1056 return -ENOSYS; 1057 return config->get_cd(pdev->id); 1058 } 1059 1060 static int mmc_davinci_get_ro(struct mmc_host *mmc) 1061 { 1062 struct platform_device *pdev = to_platform_device(mmc->parent); 1063 struct davinci_mmc_config *config = pdev->dev.platform_data; 1064 1065 if (!config || !config->get_ro) 1066 return -ENOSYS; 1067 return config->get_ro(pdev->id); 1068 } 1069 1070 static void mmc_davinci_enable_sdio_irq(struct mmc_host *mmc, int enable) 1071 { 1072 struct mmc_davinci_host *host = mmc_priv(mmc); 1073 1074 if (enable) { 1075 if (!(readl(host->base + DAVINCI_SDIOST0) & SDIOST0_DAT1_HI)) { 1076 writel(SDIOIST_IOINT, host->base + DAVINCI_SDIOIST); 1077 mmc_signal_sdio_irq(host->mmc); 1078 } else { 1079 host->sdio_int = true; 1080 writel(readl(host->base + DAVINCI_SDIOIEN) | 1081 SDIOIEN_IOINTEN, host->base + DAVINCI_SDIOIEN); 1082 } 1083 } else { 1084 host->sdio_int = false; 1085 writel(readl(host->base + DAVINCI_SDIOIEN) & ~SDIOIEN_IOINTEN, 1086 host->base + DAVINCI_SDIOIEN); 1087 } 1088 } 1089 1090 static struct mmc_host_ops mmc_davinci_ops = { 1091 .request = mmc_davinci_request, 1092 .set_ios = mmc_davinci_set_ios, 1093 .get_cd = mmc_davinci_get_cd, 1094 .get_ro = mmc_davinci_get_ro, 1095 .enable_sdio_irq = mmc_davinci_enable_sdio_irq, 1096 }; 1097 1098 /*----------------------------------------------------------------------*/ 1099 1100 #ifdef CONFIG_CPU_FREQ 1101 static int mmc_davinci_cpufreq_transition(struct notifier_block *nb, 1102 unsigned long val, void *data) 1103 { 1104 struct mmc_davinci_host *host; 1105 unsigned int mmc_pclk; 1106 struct mmc_host *mmc; 1107 unsigned long flags; 1108 1109 host = container_of(nb, struct mmc_davinci_host, freq_transition); 1110 mmc = host->mmc; 1111 mmc_pclk = clk_get_rate(host->clk); 1112 1113 if (val == CPUFREQ_POSTCHANGE) { 1114 spin_lock_irqsave(&mmc->lock, flags); 1115 host->mmc_input_clk = mmc_pclk; 1116 calculate_clk_divider(mmc, &mmc->ios); 1117 spin_unlock_irqrestore(&mmc->lock, flags); 1118 } 1119 1120 return 0; 1121 } 1122 1123 static inline int mmc_davinci_cpufreq_register(struct mmc_davinci_host *host) 1124 { 1125 host->freq_transition.notifier_call = mmc_davinci_cpufreq_transition; 1126 1127 return cpufreq_register_notifier(&host->freq_transition, 1128 CPUFREQ_TRANSITION_NOTIFIER); 1129 } 1130 1131 static inline void mmc_davinci_cpufreq_deregister(struct mmc_davinci_host *host) 1132 { 1133 cpufreq_unregister_notifier(&host->freq_transition, 1134 CPUFREQ_TRANSITION_NOTIFIER); 1135 } 1136 #else 1137 static inline int mmc_davinci_cpufreq_register(struct mmc_davinci_host *host) 1138 { 1139 return 0; 1140 } 1141 1142 static inline void mmc_davinci_cpufreq_deregister(struct mmc_davinci_host *host) 1143 { 1144 } 1145 #endif 1146 static void __init init_mmcsd_host(struct mmc_davinci_host *host) 1147 { 1148 1149 mmc_davinci_reset_ctrl(host, 1); 1150 1151 writel(0, host->base + DAVINCI_MMCCLK); 1152 writel(MMCCLK_CLKEN, host->base + DAVINCI_MMCCLK); 1153 1154 writel(0x1FFF, host->base + DAVINCI_MMCTOR); 1155 writel(0xFFFF, host->base + DAVINCI_MMCTOD); 1156 1157 mmc_davinci_reset_ctrl(host, 0); 1158 } 1159 1160 static int __init davinci_mmcsd_probe(struct platform_device *pdev) 1161 { 1162 struct davinci_mmc_config *pdata = pdev->dev.platform_data; 1163 struct mmc_davinci_host *host = NULL; 1164 struct mmc_host *mmc = NULL; 1165 struct resource *r, *mem = NULL; 1166 int ret = 0, irq = 0; 1167 size_t mem_size; 1168 1169 /* REVISIT: when we're fully converted, fail if pdata is NULL */ 1170 1171 ret = -ENODEV; 1172 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1173 irq = platform_get_irq(pdev, 0); 1174 if (!r || irq == NO_IRQ) 1175 goto out; 1176 1177 ret = -EBUSY; 1178 mem_size = resource_size(r); 1179 mem = request_mem_region(r->start, mem_size, pdev->name); 1180 if (!mem) 1181 goto out; 1182 1183 ret = -ENOMEM; 1184 mmc = mmc_alloc_host(sizeof(struct mmc_davinci_host), &pdev->dev); 1185 if (!mmc) 1186 goto out; 1187 1188 host = mmc_priv(mmc); 1189 host->mmc = mmc; /* Important */ 1190 1191 r = platform_get_resource(pdev, IORESOURCE_DMA, 0); 1192 if (!r) 1193 goto out; 1194 host->rxdma = r->start; 1195 1196 r = platform_get_resource(pdev, IORESOURCE_DMA, 1); 1197 if (!r) 1198 goto out; 1199 host->txdma = r->start; 1200 1201 host->mem_res = mem; 1202 host->base = ioremap(mem->start, mem_size); 1203 if (!host->base) 1204 goto out; 1205 1206 ret = -ENXIO; 1207 host->clk = clk_get(&pdev->dev, "MMCSDCLK"); 1208 if (IS_ERR(host->clk)) { 1209 ret = PTR_ERR(host->clk); 1210 goto out; 1211 } 1212 clk_enable(host->clk); 1213 host->mmc_input_clk = clk_get_rate(host->clk); 1214 1215 init_mmcsd_host(host); 1216 1217 if (pdata->nr_sg) 1218 host->nr_sg = pdata->nr_sg - 1; 1219 1220 if (host->nr_sg > MAX_NR_SG || !host->nr_sg) 1221 host->nr_sg = MAX_NR_SG; 1222 1223 host->use_dma = use_dma; 1224 host->mmc_irq = irq; 1225 host->sdio_irq = platform_get_irq(pdev, 1); 1226 1227 if (host->use_dma && davinci_acquire_dma_channels(host) != 0) 1228 host->use_dma = 0; 1229 1230 /* REVISIT: someday, support IRQ-driven card detection. */ 1231 mmc->caps |= MMC_CAP_NEEDS_POLL; 1232 mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY; 1233 1234 if (pdata && (pdata->wires == 4 || pdata->wires == 0)) 1235 mmc->caps |= MMC_CAP_4_BIT_DATA; 1236 1237 if (pdata && (pdata->wires == 8)) 1238 mmc->caps |= (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA); 1239 1240 host->version = pdata->version; 1241 1242 mmc->ops = &mmc_davinci_ops; 1243 mmc->f_min = 312500; 1244 mmc->f_max = 25000000; 1245 if (pdata && pdata->max_freq) 1246 mmc->f_max = pdata->max_freq; 1247 if (pdata && pdata->caps) 1248 mmc->caps |= pdata->caps; 1249 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; 1250 1251 /* With no iommu coalescing pages, each phys_seg is a hw_seg. 1252 * Each hw_seg uses one EDMA parameter RAM slot, always one 1253 * channel and then usually some linked slots. 1254 */ 1255 mmc->max_segs = MAX_NR_SG; 1256 1257 /* EDMA limit per hw segment (one or two MBytes) */ 1258 mmc->max_seg_size = MAX_CCNT * rw_threshold; 1259 1260 /* MMC/SD controller limits for multiblock requests */ 1261 mmc->max_blk_size = 4095; /* BLEN is 12 bits */ 1262 mmc->max_blk_count = 65535; /* NBLK is 16 bits */ 1263 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; 1264 1265 dev_dbg(mmc_dev(host->mmc), "max_segs=%d\n", mmc->max_segs); 1266 dev_dbg(mmc_dev(host->mmc), "max_blk_size=%d\n", mmc->max_blk_size); 1267 dev_dbg(mmc_dev(host->mmc), "max_req_size=%d\n", mmc->max_req_size); 1268 dev_dbg(mmc_dev(host->mmc), "max_seg_size=%d\n", mmc->max_seg_size); 1269 1270 platform_set_drvdata(pdev, host); 1271 1272 ret = mmc_davinci_cpufreq_register(host); 1273 if (ret) { 1274 dev_err(&pdev->dev, "failed to register cpufreq\n"); 1275 goto cpu_freq_fail; 1276 } 1277 1278 ret = mmc_add_host(mmc); 1279 if (ret < 0) 1280 goto out; 1281 1282 ret = request_irq(irq, mmc_davinci_irq, 0, mmc_hostname(mmc), host); 1283 if (ret) 1284 goto out; 1285 1286 if (host->sdio_irq >= 0) { 1287 ret = request_irq(host->sdio_irq, mmc_davinci_sdio_irq, 0, 1288 mmc_hostname(mmc), host); 1289 if (!ret) 1290 mmc->caps |= MMC_CAP_SDIO_IRQ; 1291 } 1292 1293 rename_region(mem, mmc_hostname(mmc)); 1294 1295 dev_info(mmc_dev(host->mmc), "Using %s, %d-bit mode\n", 1296 host->use_dma ? "DMA" : "PIO", 1297 (mmc->caps & MMC_CAP_4_BIT_DATA) ? 4 : 1); 1298 1299 return 0; 1300 1301 out: 1302 mmc_davinci_cpufreq_deregister(host); 1303 cpu_freq_fail: 1304 if (host) { 1305 davinci_release_dma_channels(host); 1306 1307 if (host->clk) { 1308 clk_disable(host->clk); 1309 clk_put(host->clk); 1310 } 1311 1312 if (host->base) 1313 iounmap(host->base); 1314 } 1315 1316 if (mmc) 1317 mmc_free_host(mmc); 1318 1319 if (mem) 1320 release_resource(mem); 1321 1322 dev_dbg(&pdev->dev, "probe err %d\n", ret); 1323 1324 return ret; 1325 } 1326 1327 static int __exit davinci_mmcsd_remove(struct platform_device *pdev) 1328 { 1329 struct mmc_davinci_host *host = platform_get_drvdata(pdev); 1330 1331 platform_set_drvdata(pdev, NULL); 1332 if (host) { 1333 mmc_davinci_cpufreq_deregister(host); 1334 1335 mmc_remove_host(host->mmc); 1336 free_irq(host->mmc_irq, host); 1337 if (host->mmc->caps & MMC_CAP_SDIO_IRQ) 1338 free_irq(host->sdio_irq, host); 1339 1340 davinci_release_dma_channels(host); 1341 1342 clk_disable(host->clk); 1343 clk_put(host->clk); 1344 1345 iounmap(host->base); 1346 1347 release_resource(host->mem_res); 1348 1349 mmc_free_host(host->mmc); 1350 } 1351 1352 return 0; 1353 } 1354 1355 #ifdef CONFIG_PM 1356 static int davinci_mmcsd_suspend(struct device *dev) 1357 { 1358 struct platform_device *pdev = to_platform_device(dev); 1359 struct mmc_davinci_host *host = platform_get_drvdata(pdev); 1360 int ret; 1361 1362 ret = mmc_suspend_host(host->mmc); 1363 if (!ret) { 1364 writel(0, host->base + DAVINCI_MMCIM); 1365 mmc_davinci_reset_ctrl(host, 1); 1366 clk_disable(host->clk); 1367 host->suspended = 1; 1368 } else { 1369 host->suspended = 0; 1370 } 1371 1372 return ret; 1373 } 1374 1375 static int davinci_mmcsd_resume(struct device *dev) 1376 { 1377 struct platform_device *pdev = to_platform_device(dev); 1378 struct mmc_davinci_host *host = platform_get_drvdata(pdev); 1379 int ret; 1380 1381 if (!host->suspended) 1382 return 0; 1383 1384 clk_enable(host->clk); 1385 1386 mmc_davinci_reset_ctrl(host, 0); 1387 ret = mmc_resume_host(host->mmc); 1388 if (!ret) 1389 host->suspended = 0; 1390 1391 return ret; 1392 } 1393 1394 static const struct dev_pm_ops davinci_mmcsd_pm = { 1395 .suspend = davinci_mmcsd_suspend, 1396 .resume = davinci_mmcsd_resume, 1397 }; 1398 1399 #define davinci_mmcsd_pm_ops (&davinci_mmcsd_pm) 1400 #else 1401 #define davinci_mmcsd_pm_ops NULL 1402 #endif 1403 1404 static struct platform_driver davinci_mmcsd_driver = { 1405 .driver = { 1406 .name = "davinci_mmc", 1407 .owner = THIS_MODULE, 1408 .pm = davinci_mmcsd_pm_ops, 1409 }, 1410 .remove = __exit_p(davinci_mmcsd_remove), 1411 }; 1412 1413 static int __init davinci_mmcsd_init(void) 1414 { 1415 return platform_driver_probe(&davinci_mmcsd_driver, 1416 davinci_mmcsd_probe); 1417 } 1418 module_init(davinci_mmcsd_init); 1419 1420 static void __exit davinci_mmcsd_exit(void) 1421 { 1422 platform_driver_unregister(&davinci_mmcsd_driver); 1423 } 1424 module_exit(davinci_mmcsd_exit); 1425 1426 MODULE_AUTHOR("Texas Instruments India"); 1427 MODULE_LICENSE("GPL"); 1428 MODULE_DESCRIPTION("MMC/SD driver for Davinci MMC controller"); 1429 MODULE_ALIAS("platform:davinci_mmc"); 1430 1431