1 /* 2 * davinci_mmc.c - TI DaVinci MMC/SD/SDIO driver 3 * 4 * Copyright (C) 2006 Texas Instruments. 5 * Original author: Purushotam Kumar 6 * Copyright (C) 2009 David Brownell 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2 of the License, or 11 * (at your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 21 */ 22 23 #include <linux/module.h> 24 #include <linux/ioport.h> 25 #include <linux/platform_device.h> 26 #include <linux/clk.h> 27 #include <linux/err.h> 28 #include <linux/cpufreq.h> 29 #include <linux/mmc/host.h> 30 #include <linux/io.h> 31 #include <linux/irq.h> 32 #include <linux/delay.h> 33 #include <linux/dmaengine.h> 34 #include <linux/dma-mapping.h> 35 #include <linux/mmc/mmc.h> 36 #include <linux/of.h> 37 #include <linux/of_device.h> 38 #include <linux/mmc/slot-gpio.h> 39 #include <linux/interrupt.h> 40 41 #include <linux/platform_data/mmc-davinci.h> 42 43 /* 44 * Register Definitions 45 */ 46 #define DAVINCI_MMCCTL 0x00 /* Control Register */ 47 #define DAVINCI_MMCCLK 0x04 /* Memory Clock Control Register */ 48 #define DAVINCI_MMCST0 0x08 /* Status Register 0 */ 49 #define DAVINCI_MMCST1 0x0C /* Status Register 1 */ 50 #define DAVINCI_MMCIM 0x10 /* Interrupt Mask Register */ 51 #define DAVINCI_MMCTOR 0x14 /* Response Time-Out Register */ 52 #define DAVINCI_MMCTOD 0x18 /* Data Read Time-Out Register */ 53 #define DAVINCI_MMCBLEN 0x1C /* Block Length Register */ 54 #define DAVINCI_MMCNBLK 0x20 /* Number of Blocks Register */ 55 #define DAVINCI_MMCNBLC 0x24 /* Number of Blocks Counter Register */ 56 #define DAVINCI_MMCDRR 0x28 /* Data Receive Register */ 57 #define DAVINCI_MMCDXR 0x2C /* Data Transmit Register */ 58 #define DAVINCI_MMCCMD 0x30 /* Command Register */ 59 #define DAVINCI_MMCARGHL 0x34 /* Argument Register */ 60 #define DAVINCI_MMCRSP01 0x38 /* Response Register 0 and 1 */ 61 #define DAVINCI_MMCRSP23 0x3C /* Response Register 0 and 1 */ 62 #define DAVINCI_MMCRSP45 0x40 /* Response Register 0 and 1 */ 63 #define DAVINCI_MMCRSP67 0x44 /* Response Register 0 and 1 */ 64 #define DAVINCI_MMCDRSP 0x48 /* Data Response Register */ 65 #define DAVINCI_MMCETOK 0x4C 66 #define DAVINCI_MMCCIDX 0x50 /* Command Index Register */ 67 #define DAVINCI_MMCCKC 0x54 68 #define DAVINCI_MMCTORC 0x58 69 #define DAVINCI_MMCTODC 0x5C 70 #define DAVINCI_MMCBLNC 0x60 71 #define DAVINCI_SDIOCTL 0x64 72 #define DAVINCI_SDIOST0 0x68 73 #define DAVINCI_SDIOIEN 0x6C 74 #define DAVINCI_SDIOIST 0x70 75 #define DAVINCI_MMCFIFOCTL 0x74 /* FIFO Control Register */ 76 77 /* DAVINCI_MMCCTL definitions */ 78 #define MMCCTL_DATRST (1 << 0) 79 #define MMCCTL_CMDRST (1 << 1) 80 #define MMCCTL_WIDTH_8_BIT (1 << 8) 81 #define MMCCTL_WIDTH_4_BIT (1 << 2) 82 #define MMCCTL_DATEG_DISABLED (0 << 6) 83 #define MMCCTL_DATEG_RISING (1 << 6) 84 #define MMCCTL_DATEG_FALLING (2 << 6) 85 #define MMCCTL_DATEG_BOTH (3 << 6) 86 #define MMCCTL_PERMDR_LE (0 << 9) 87 #define MMCCTL_PERMDR_BE (1 << 9) 88 #define MMCCTL_PERMDX_LE (0 << 10) 89 #define MMCCTL_PERMDX_BE (1 << 10) 90 91 /* DAVINCI_MMCCLK definitions */ 92 #define MMCCLK_CLKEN (1 << 8) 93 #define MMCCLK_CLKRT_MASK (0xFF << 0) 94 95 /* IRQ bit definitions, for DAVINCI_MMCST0 and DAVINCI_MMCIM */ 96 #define MMCST0_DATDNE BIT(0) /* data done */ 97 #define MMCST0_BSYDNE BIT(1) /* busy done */ 98 #define MMCST0_RSPDNE BIT(2) /* command done */ 99 #define MMCST0_TOUTRD BIT(3) /* data read timeout */ 100 #define MMCST0_TOUTRS BIT(4) /* command response timeout */ 101 #define MMCST0_CRCWR BIT(5) /* data write CRC error */ 102 #define MMCST0_CRCRD BIT(6) /* data read CRC error */ 103 #define MMCST0_CRCRS BIT(7) /* command response CRC error */ 104 #define MMCST0_DXRDY BIT(9) /* data transmit ready (fifo empty) */ 105 #define MMCST0_DRRDY BIT(10) /* data receive ready (data in fifo)*/ 106 #define MMCST0_DATED BIT(11) /* DAT3 edge detect */ 107 #define MMCST0_TRNDNE BIT(12) /* transfer done */ 108 109 /* DAVINCI_MMCST1 definitions */ 110 #define MMCST1_BUSY (1 << 0) 111 112 /* DAVINCI_MMCCMD definitions */ 113 #define MMCCMD_CMD_MASK (0x3F << 0) 114 #define MMCCMD_PPLEN (1 << 7) 115 #define MMCCMD_BSYEXP (1 << 8) 116 #define MMCCMD_RSPFMT_MASK (3 << 9) 117 #define MMCCMD_RSPFMT_NONE (0 << 9) 118 #define MMCCMD_RSPFMT_R1456 (1 << 9) 119 #define MMCCMD_RSPFMT_R2 (2 << 9) 120 #define MMCCMD_RSPFMT_R3 (3 << 9) 121 #define MMCCMD_DTRW (1 << 11) 122 #define MMCCMD_STRMTP (1 << 12) 123 #define MMCCMD_WDATX (1 << 13) 124 #define MMCCMD_INITCK (1 << 14) 125 #define MMCCMD_DCLR (1 << 15) 126 #define MMCCMD_DMATRIG (1 << 16) 127 128 /* DAVINCI_MMCFIFOCTL definitions */ 129 #define MMCFIFOCTL_FIFORST (1 << 0) 130 #define MMCFIFOCTL_FIFODIR_WR (1 << 1) 131 #define MMCFIFOCTL_FIFODIR_RD (0 << 1) 132 #define MMCFIFOCTL_FIFOLEV (1 << 2) /* 0 = 128 bits, 1 = 256 bits */ 133 #define MMCFIFOCTL_ACCWD_4 (0 << 3) /* access width of 4 bytes */ 134 #define MMCFIFOCTL_ACCWD_3 (1 << 3) /* access width of 3 bytes */ 135 #define MMCFIFOCTL_ACCWD_2 (2 << 3) /* access width of 2 bytes */ 136 #define MMCFIFOCTL_ACCWD_1 (3 << 3) /* access width of 1 byte */ 137 138 /* DAVINCI_SDIOST0 definitions */ 139 #define SDIOST0_DAT1_HI BIT(0) 140 141 /* DAVINCI_SDIOIEN definitions */ 142 #define SDIOIEN_IOINTEN BIT(0) 143 144 /* DAVINCI_SDIOIST definitions */ 145 #define SDIOIST_IOINT BIT(0) 146 147 /* MMCSD Init clock in Hz in opendrain mode */ 148 #define MMCSD_INIT_CLOCK 200000 149 150 /* 151 * One scatterlist dma "segment" is at most MAX_CCNT rw_threshold units, 152 * and we handle up to MAX_NR_SG segments. MMC_BLOCK_BOUNCE kicks in only 153 * for drivers with max_segs == 1, making the segments bigger (64KB) 154 * than the page or two that's otherwise typical. nr_sg (passed from 155 * platform data) == 16 gives at least the same throughput boost, using 156 * EDMA transfer linkage instead of spending CPU time copying pages. 157 */ 158 #define MAX_CCNT ((1 << 16) - 1) 159 160 #define MAX_NR_SG 16 161 162 static unsigned rw_threshold = 32; 163 module_param(rw_threshold, uint, S_IRUGO); 164 MODULE_PARM_DESC(rw_threshold, 165 "Read/Write threshold. Default = 32"); 166 167 static unsigned poll_threshold = 128; 168 module_param(poll_threshold, uint, S_IRUGO); 169 MODULE_PARM_DESC(poll_threshold, 170 "Polling transaction size threshold. Default = 128"); 171 172 static unsigned poll_loopcount = 32; 173 module_param(poll_loopcount, uint, S_IRUGO); 174 MODULE_PARM_DESC(poll_loopcount, 175 "Maximum polling loop count. Default = 32"); 176 177 static unsigned __initdata use_dma = 1; 178 module_param(use_dma, uint, 0); 179 MODULE_PARM_DESC(use_dma, "Whether to use DMA or not. Default = 1"); 180 181 struct mmc_davinci_host { 182 struct mmc_command *cmd; 183 struct mmc_data *data; 184 struct mmc_host *mmc; 185 struct clk *clk; 186 unsigned int mmc_input_clk; 187 void __iomem *base; 188 struct resource *mem_res; 189 int mmc_irq, sdio_irq; 190 unsigned char bus_mode; 191 192 #define DAVINCI_MMC_DATADIR_NONE 0 193 #define DAVINCI_MMC_DATADIR_READ 1 194 #define DAVINCI_MMC_DATADIR_WRITE 2 195 unsigned char data_dir; 196 197 /* buffer is used during PIO of one scatterlist segment, and 198 * is updated along with buffer_bytes_left. bytes_left applies 199 * to all N blocks of the PIO transfer. 200 */ 201 u8 *buffer; 202 u32 buffer_bytes_left; 203 u32 bytes_left; 204 205 struct dma_chan *dma_tx; 206 struct dma_chan *dma_rx; 207 bool use_dma; 208 bool do_dma; 209 bool sdio_int; 210 bool active_request; 211 212 /* For PIO we walk scatterlists one segment at a time. */ 213 unsigned int sg_len; 214 struct scatterlist *sg; 215 216 /* Version of the MMC/SD controller */ 217 u8 version; 218 /* for ns in one cycle calculation */ 219 unsigned ns_in_one_cycle; 220 /* Number of sg segments */ 221 u8 nr_sg; 222 #ifdef CONFIG_CPU_FREQ 223 struct notifier_block freq_transition; 224 #endif 225 }; 226 227 static irqreturn_t mmc_davinci_irq(int irq, void *dev_id); 228 229 /* PIO only */ 230 static void mmc_davinci_sg_to_buf(struct mmc_davinci_host *host) 231 { 232 host->buffer_bytes_left = sg_dma_len(host->sg); 233 host->buffer = sg_virt(host->sg); 234 if (host->buffer_bytes_left > host->bytes_left) 235 host->buffer_bytes_left = host->bytes_left; 236 } 237 238 static void davinci_fifo_data_trans(struct mmc_davinci_host *host, 239 unsigned int n) 240 { 241 u8 *p; 242 unsigned int i; 243 244 if (host->buffer_bytes_left == 0) { 245 host->sg = sg_next(host->data->sg); 246 mmc_davinci_sg_to_buf(host); 247 } 248 249 p = host->buffer; 250 if (n > host->buffer_bytes_left) 251 n = host->buffer_bytes_left; 252 host->buffer_bytes_left -= n; 253 host->bytes_left -= n; 254 255 /* NOTE: we never transfer more than rw_threshold bytes 256 * to/from the fifo here; there's no I/O overlap. 257 * This also assumes that access width( i.e. ACCWD) is 4 bytes 258 */ 259 if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) { 260 for (i = 0; i < (n >> 2); i++) { 261 writel(*((u32 *)p), host->base + DAVINCI_MMCDXR); 262 p = p + 4; 263 } 264 if (n & 3) { 265 iowrite8_rep(host->base + DAVINCI_MMCDXR, p, (n & 3)); 266 p = p + (n & 3); 267 } 268 } else { 269 for (i = 0; i < (n >> 2); i++) { 270 *((u32 *)p) = readl(host->base + DAVINCI_MMCDRR); 271 p = p + 4; 272 } 273 if (n & 3) { 274 ioread8_rep(host->base + DAVINCI_MMCDRR, p, (n & 3)); 275 p = p + (n & 3); 276 } 277 } 278 host->buffer = p; 279 } 280 281 static void mmc_davinci_start_command(struct mmc_davinci_host *host, 282 struct mmc_command *cmd) 283 { 284 u32 cmd_reg = 0; 285 u32 im_val; 286 287 dev_dbg(mmc_dev(host->mmc), "CMD%d, arg 0x%08x%s\n", 288 cmd->opcode, cmd->arg, 289 ({ char *s; 290 switch (mmc_resp_type(cmd)) { 291 case MMC_RSP_R1: 292 s = ", R1/R5/R6/R7 response"; 293 break; 294 case MMC_RSP_R1B: 295 s = ", R1b response"; 296 break; 297 case MMC_RSP_R2: 298 s = ", R2 response"; 299 break; 300 case MMC_RSP_R3: 301 s = ", R3/R4 response"; 302 break; 303 default: 304 s = ", (R? response)"; 305 break; 306 }; s; })); 307 host->cmd = cmd; 308 309 switch (mmc_resp_type(cmd)) { 310 case MMC_RSP_R1B: 311 /* There's some spec confusion about when R1B is 312 * allowed, but if the card doesn't issue a BUSY 313 * then it's harmless for us to allow it. 314 */ 315 cmd_reg |= MMCCMD_BSYEXP; 316 /* FALLTHROUGH */ 317 case MMC_RSP_R1: /* 48 bits, CRC */ 318 cmd_reg |= MMCCMD_RSPFMT_R1456; 319 break; 320 case MMC_RSP_R2: /* 136 bits, CRC */ 321 cmd_reg |= MMCCMD_RSPFMT_R2; 322 break; 323 case MMC_RSP_R3: /* 48 bits, no CRC */ 324 cmd_reg |= MMCCMD_RSPFMT_R3; 325 break; 326 default: 327 cmd_reg |= MMCCMD_RSPFMT_NONE; 328 dev_dbg(mmc_dev(host->mmc), "unknown resp_type %04x\n", 329 mmc_resp_type(cmd)); 330 break; 331 } 332 333 /* Set command index */ 334 cmd_reg |= cmd->opcode; 335 336 /* Enable EDMA transfer triggers */ 337 if (host->do_dma) 338 cmd_reg |= MMCCMD_DMATRIG; 339 340 if (host->version == MMC_CTLR_VERSION_2 && host->data != NULL && 341 host->data_dir == DAVINCI_MMC_DATADIR_READ) 342 cmd_reg |= MMCCMD_DMATRIG; 343 344 /* Setting whether command involves data transfer or not */ 345 if (cmd->data) 346 cmd_reg |= MMCCMD_WDATX; 347 348 /* Setting whether data read or write */ 349 if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) 350 cmd_reg |= MMCCMD_DTRW; 351 352 if (host->bus_mode == MMC_BUSMODE_PUSHPULL) 353 cmd_reg |= MMCCMD_PPLEN; 354 355 /* set Command timeout */ 356 writel(0x1FFF, host->base + DAVINCI_MMCTOR); 357 358 /* Enable interrupt (calculate here, defer until FIFO is stuffed). */ 359 im_val = MMCST0_RSPDNE | MMCST0_CRCRS | MMCST0_TOUTRS; 360 if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) { 361 im_val |= MMCST0_DATDNE | MMCST0_CRCWR; 362 363 if (!host->do_dma) 364 im_val |= MMCST0_DXRDY; 365 } else if (host->data_dir == DAVINCI_MMC_DATADIR_READ) { 366 im_val |= MMCST0_DATDNE | MMCST0_CRCRD | MMCST0_TOUTRD; 367 368 if (!host->do_dma) 369 im_val |= MMCST0_DRRDY; 370 } 371 372 /* 373 * Before non-DMA WRITE commands the controller needs priming: 374 * FIFO should be populated with 32 bytes i.e. whatever is the FIFO size 375 */ 376 if (!host->do_dma && (host->data_dir == DAVINCI_MMC_DATADIR_WRITE)) 377 davinci_fifo_data_trans(host, rw_threshold); 378 379 writel(cmd->arg, host->base + DAVINCI_MMCARGHL); 380 writel(cmd_reg, host->base + DAVINCI_MMCCMD); 381 382 host->active_request = true; 383 384 if (!host->do_dma && host->bytes_left <= poll_threshold) { 385 u32 count = poll_loopcount; 386 387 while (host->active_request && count--) { 388 mmc_davinci_irq(0, host); 389 cpu_relax(); 390 } 391 } 392 393 if (host->active_request) 394 writel(im_val, host->base + DAVINCI_MMCIM); 395 } 396 397 /*----------------------------------------------------------------------*/ 398 399 /* DMA infrastructure */ 400 401 static void davinci_abort_dma(struct mmc_davinci_host *host) 402 { 403 struct dma_chan *sync_dev; 404 405 if (host->data_dir == DAVINCI_MMC_DATADIR_READ) 406 sync_dev = host->dma_rx; 407 else 408 sync_dev = host->dma_tx; 409 410 dmaengine_terminate_all(sync_dev); 411 } 412 413 static int mmc_davinci_send_dma_request(struct mmc_davinci_host *host, 414 struct mmc_data *data) 415 { 416 struct dma_chan *chan; 417 struct dma_async_tx_descriptor *desc; 418 int ret = 0; 419 420 if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) { 421 struct dma_slave_config dma_tx_conf = { 422 .direction = DMA_MEM_TO_DEV, 423 .dst_addr = host->mem_res->start + DAVINCI_MMCDXR, 424 .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, 425 .dst_maxburst = 426 rw_threshold / DMA_SLAVE_BUSWIDTH_4_BYTES, 427 }; 428 chan = host->dma_tx; 429 dmaengine_slave_config(host->dma_tx, &dma_tx_conf); 430 431 desc = dmaengine_prep_slave_sg(host->dma_tx, 432 data->sg, 433 host->sg_len, 434 DMA_MEM_TO_DEV, 435 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 436 if (!desc) { 437 dev_dbg(mmc_dev(host->mmc), 438 "failed to allocate DMA TX descriptor"); 439 ret = -1; 440 goto out; 441 } 442 } else { 443 struct dma_slave_config dma_rx_conf = { 444 .direction = DMA_DEV_TO_MEM, 445 .src_addr = host->mem_res->start + DAVINCI_MMCDRR, 446 .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, 447 .src_maxburst = 448 rw_threshold / DMA_SLAVE_BUSWIDTH_4_BYTES, 449 }; 450 chan = host->dma_rx; 451 dmaengine_slave_config(host->dma_rx, &dma_rx_conf); 452 453 desc = dmaengine_prep_slave_sg(host->dma_rx, 454 data->sg, 455 host->sg_len, 456 DMA_DEV_TO_MEM, 457 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 458 if (!desc) { 459 dev_dbg(mmc_dev(host->mmc), 460 "failed to allocate DMA RX descriptor"); 461 ret = -1; 462 goto out; 463 } 464 } 465 466 dmaengine_submit(desc); 467 dma_async_issue_pending(chan); 468 469 out: 470 return ret; 471 } 472 473 static int mmc_davinci_start_dma_transfer(struct mmc_davinci_host *host, 474 struct mmc_data *data) 475 { 476 int i; 477 int mask = rw_threshold - 1; 478 int ret = 0; 479 480 host->sg_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, 481 ((data->flags & MMC_DATA_WRITE) 482 ? DMA_TO_DEVICE 483 : DMA_FROM_DEVICE)); 484 485 /* no individual DMA segment should need a partial FIFO */ 486 for (i = 0; i < host->sg_len; i++) { 487 if (sg_dma_len(data->sg + i) & mask) { 488 dma_unmap_sg(mmc_dev(host->mmc), 489 data->sg, data->sg_len, 490 (data->flags & MMC_DATA_WRITE) 491 ? DMA_TO_DEVICE 492 : DMA_FROM_DEVICE); 493 return -1; 494 } 495 } 496 497 host->do_dma = 1; 498 ret = mmc_davinci_send_dma_request(host, data); 499 500 return ret; 501 } 502 503 static void __init_or_module 504 davinci_release_dma_channels(struct mmc_davinci_host *host) 505 { 506 if (!host->use_dma) 507 return; 508 509 dma_release_channel(host->dma_tx); 510 dma_release_channel(host->dma_rx); 511 } 512 513 static int __init davinci_acquire_dma_channels(struct mmc_davinci_host *host) 514 { 515 host->dma_tx = dma_request_chan(mmc_dev(host->mmc), "tx"); 516 if (IS_ERR(host->dma_tx)) { 517 dev_err(mmc_dev(host->mmc), "Can't get dma_tx channel\n"); 518 return PTR_ERR(host->dma_tx); 519 } 520 521 host->dma_rx = dma_request_chan(mmc_dev(host->mmc), "rx"); 522 if (IS_ERR(host->dma_rx)) { 523 dev_err(mmc_dev(host->mmc), "Can't get dma_rx channel\n"); 524 dma_release_channel(host->dma_tx); 525 return PTR_ERR(host->dma_rx); 526 } 527 528 return 0; 529 } 530 531 /*----------------------------------------------------------------------*/ 532 533 static void 534 mmc_davinci_prepare_data(struct mmc_davinci_host *host, struct mmc_request *req) 535 { 536 int fifo_lev = (rw_threshold == 32) ? MMCFIFOCTL_FIFOLEV : 0; 537 int timeout; 538 struct mmc_data *data = req->data; 539 540 if (host->version == MMC_CTLR_VERSION_2) 541 fifo_lev = (rw_threshold == 64) ? MMCFIFOCTL_FIFOLEV : 0; 542 543 host->data = data; 544 if (data == NULL) { 545 host->data_dir = DAVINCI_MMC_DATADIR_NONE; 546 writel(0, host->base + DAVINCI_MMCBLEN); 547 writel(0, host->base + DAVINCI_MMCNBLK); 548 return; 549 } 550 551 dev_dbg(mmc_dev(host->mmc), "%s, %d blocks of %d bytes\n", 552 (data->flags & MMC_DATA_WRITE) ? "write" : "read", 553 data->blocks, data->blksz); 554 dev_dbg(mmc_dev(host->mmc), " DTO %d cycles + %d ns\n", 555 data->timeout_clks, data->timeout_ns); 556 timeout = data->timeout_clks + 557 (data->timeout_ns / host->ns_in_one_cycle); 558 if (timeout > 0xffff) 559 timeout = 0xffff; 560 561 writel(timeout, host->base + DAVINCI_MMCTOD); 562 writel(data->blocks, host->base + DAVINCI_MMCNBLK); 563 writel(data->blksz, host->base + DAVINCI_MMCBLEN); 564 565 /* Configure the FIFO */ 566 if (data->flags & MMC_DATA_WRITE) { 567 host->data_dir = DAVINCI_MMC_DATADIR_WRITE; 568 writel(fifo_lev | MMCFIFOCTL_FIFODIR_WR | MMCFIFOCTL_FIFORST, 569 host->base + DAVINCI_MMCFIFOCTL); 570 writel(fifo_lev | MMCFIFOCTL_FIFODIR_WR, 571 host->base + DAVINCI_MMCFIFOCTL); 572 } else { 573 host->data_dir = DAVINCI_MMC_DATADIR_READ; 574 writel(fifo_lev | MMCFIFOCTL_FIFODIR_RD | MMCFIFOCTL_FIFORST, 575 host->base + DAVINCI_MMCFIFOCTL); 576 writel(fifo_lev | MMCFIFOCTL_FIFODIR_RD, 577 host->base + DAVINCI_MMCFIFOCTL); 578 } 579 580 host->buffer = NULL; 581 host->bytes_left = data->blocks * data->blksz; 582 583 /* For now we try to use DMA whenever we won't need partial FIFO 584 * reads or writes, either for the whole transfer (as tested here) 585 * or for any individual scatterlist segment (tested when we call 586 * start_dma_transfer). 587 * 588 * While we *could* change that, unusual block sizes are rarely 589 * used. The occasional fallback to PIO should't hurt. 590 */ 591 if (host->use_dma && (host->bytes_left & (rw_threshold - 1)) == 0 592 && mmc_davinci_start_dma_transfer(host, data) == 0) { 593 /* zero this to ensure we take no PIO paths */ 594 host->bytes_left = 0; 595 } else { 596 /* Revert to CPU Copy */ 597 host->sg_len = data->sg_len; 598 host->sg = host->data->sg; 599 mmc_davinci_sg_to_buf(host); 600 } 601 } 602 603 static void mmc_davinci_request(struct mmc_host *mmc, struct mmc_request *req) 604 { 605 struct mmc_davinci_host *host = mmc_priv(mmc); 606 unsigned long timeout = jiffies + msecs_to_jiffies(900); 607 u32 mmcst1 = 0; 608 609 /* Card may still be sending BUSY after a previous operation, 610 * typically some kind of write. If so, we can't proceed yet. 611 */ 612 while (time_before(jiffies, timeout)) { 613 mmcst1 = readl(host->base + DAVINCI_MMCST1); 614 if (!(mmcst1 & MMCST1_BUSY)) 615 break; 616 cpu_relax(); 617 } 618 if (mmcst1 & MMCST1_BUSY) { 619 dev_err(mmc_dev(host->mmc), "still BUSY? bad ... \n"); 620 req->cmd->error = -ETIMEDOUT; 621 mmc_request_done(mmc, req); 622 return; 623 } 624 625 host->do_dma = 0; 626 mmc_davinci_prepare_data(host, req); 627 mmc_davinci_start_command(host, req->cmd); 628 } 629 630 static unsigned int calculate_freq_for_card(struct mmc_davinci_host *host, 631 unsigned int mmc_req_freq) 632 { 633 unsigned int mmc_freq = 0, mmc_pclk = 0, mmc_push_pull_divisor = 0; 634 635 mmc_pclk = host->mmc_input_clk; 636 if (mmc_req_freq && mmc_pclk > (2 * mmc_req_freq)) 637 mmc_push_pull_divisor = ((unsigned int)mmc_pclk 638 / (2 * mmc_req_freq)) - 1; 639 else 640 mmc_push_pull_divisor = 0; 641 642 mmc_freq = (unsigned int)mmc_pclk 643 / (2 * (mmc_push_pull_divisor + 1)); 644 645 if (mmc_freq > mmc_req_freq) 646 mmc_push_pull_divisor = mmc_push_pull_divisor + 1; 647 /* Convert ns to clock cycles */ 648 if (mmc_req_freq <= 400000) 649 host->ns_in_one_cycle = (1000000) / (((mmc_pclk 650 / (2 * (mmc_push_pull_divisor + 1)))/1000)); 651 else 652 host->ns_in_one_cycle = (1000000) / (((mmc_pclk 653 / (2 * (mmc_push_pull_divisor + 1)))/1000000)); 654 655 return mmc_push_pull_divisor; 656 } 657 658 static void calculate_clk_divider(struct mmc_host *mmc, struct mmc_ios *ios) 659 { 660 unsigned int open_drain_freq = 0, mmc_pclk = 0; 661 unsigned int mmc_push_pull_freq = 0; 662 struct mmc_davinci_host *host = mmc_priv(mmc); 663 664 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) { 665 u32 temp; 666 667 /* Ignoring the init clock value passed for fixing the inter 668 * operability with different cards. 669 */ 670 open_drain_freq = ((unsigned int)mmc_pclk 671 / (2 * MMCSD_INIT_CLOCK)) - 1; 672 673 if (open_drain_freq > 0xFF) 674 open_drain_freq = 0xFF; 675 676 temp = readl(host->base + DAVINCI_MMCCLK) & ~MMCCLK_CLKRT_MASK; 677 temp |= open_drain_freq; 678 writel(temp, host->base + DAVINCI_MMCCLK); 679 680 /* Convert ns to clock cycles */ 681 host->ns_in_one_cycle = (1000000) / (MMCSD_INIT_CLOCK/1000); 682 } else { 683 u32 temp; 684 mmc_push_pull_freq = calculate_freq_for_card(host, ios->clock); 685 686 if (mmc_push_pull_freq > 0xFF) 687 mmc_push_pull_freq = 0xFF; 688 689 temp = readl(host->base + DAVINCI_MMCCLK) & ~MMCCLK_CLKEN; 690 writel(temp, host->base + DAVINCI_MMCCLK); 691 692 udelay(10); 693 694 temp = readl(host->base + DAVINCI_MMCCLK) & ~MMCCLK_CLKRT_MASK; 695 temp |= mmc_push_pull_freq; 696 writel(temp, host->base + DAVINCI_MMCCLK); 697 698 writel(temp | MMCCLK_CLKEN, host->base + DAVINCI_MMCCLK); 699 700 udelay(10); 701 } 702 } 703 704 static void mmc_davinci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 705 { 706 struct mmc_davinci_host *host = mmc_priv(mmc); 707 struct platform_device *pdev = to_platform_device(mmc->parent); 708 struct davinci_mmc_config *config = pdev->dev.platform_data; 709 710 dev_dbg(mmc_dev(host->mmc), 711 "clock %dHz busmode %d powermode %d Vdd %04x\n", 712 ios->clock, ios->bus_mode, ios->power_mode, 713 ios->vdd); 714 715 switch (ios->power_mode) { 716 case MMC_POWER_OFF: 717 if (config && config->set_power) 718 config->set_power(pdev->id, false); 719 break; 720 case MMC_POWER_UP: 721 if (config && config->set_power) 722 config->set_power(pdev->id, true); 723 break; 724 } 725 726 switch (ios->bus_width) { 727 case MMC_BUS_WIDTH_8: 728 dev_dbg(mmc_dev(host->mmc), "Enabling 8 bit mode\n"); 729 writel((readl(host->base + DAVINCI_MMCCTL) & 730 ~MMCCTL_WIDTH_4_BIT) | MMCCTL_WIDTH_8_BIT, 731 host->base + DAVINCI_MMCCTL); 732 break; 733 case MMC_BUS_WIDTH_4: 734 dev_dbg(mmc_dev(host->mmc), "Enabling 4 bit mode\n"); 735 if (host->version == MMC_CTLR_VERSION_2) 736 writel((readl(host->base + DAVINCI_MMCCTL) & 737 ~MMCCTL_WIDTH_8_BIT) | MMCCTL_WIDTH_4_BIT, 738 host->base + DAVINCI_MMCCTL); 739 else 740 writel(readl(host->base + DAVINCI_MMCCTL) | 741 MMCCTL_WIDTH_4_BIT, 742 host->base + DAVINCI_MMCCTL); 743 break; 744 case MMC_BUS_WIDTH_1: 745 dev_dbg(mmc_dev(host->mmc), "Enabling 1 bit mode\n"); 746 if (host->version == MMC_CTLR_VERSION_2) 747 writel(readl(host->base + DAVINCI_MMCCTL) & 748 ~(MMCCTL_WIDTH_8_BIT | MMCCTL_WIDTH_4_BIT), 749 host->base + DAVINCI_MMCCTL); 750 else 751 writel(readl(host->base + DAVINCI_MMCCTL) & 752 ~MMCCTL_WIDTH_4_BIT, 753 host->base + DAVINCI_MMCCTL); 754 break; 755 } 756 757 calculate_clk_divider(mmc, ios); 758 759 host->bus_mode = ios->bus_mode; 760 if (ios->power_mode == MMC_POWER_UP) { 761 unsigned long timeout = jiffies + msecs_to_jiffies(50); 762 bool lose = true; 763 764 /* Send clock cycles, poll completion */ 765 writel(0, host->base + DAVINCI_MMCARGHL); 766 writel(MMCCMD_INITCK, host->base + DAVINCI_MMCCMD); 767 while (time_before(jiffies, timeout)) { 768 u32 tmp = readl(host->base + DAVINCI_MMCST0); 769 770 if (tmp & MMCST0_RSPDNE) { 771 lose = false; 772 break; 773 } 774 cpu_relax(); 775 } 776 if (lose) 777 dev_warn(mmc_dev(host->mmc), "powerup timeout\n"); 778 } 779 780 /* FIXME on power OFF, reset things ... */ 781 } 782 783 static void 784 mmc_davinci_xfer_done(struct mmc_davinci_host *host, struct mmc_data *data) 785 { 786 host->data = NULL; 787 788 if (host->mmc->caps & MMC_CAP_SDIO_IRQ) { 789 /* 790 * SDIO Interrupt Detection work-around as suggested by 791 * Davinci Errata (TMS320DM355 Silicon Revision 1.1 Errata 792 * 2.1.6): Signal SDIO interrupt only if it is enabled by core 793 */ 794 if (host->sdio_int && !(readl(host->base + DAVINCI_SDIOST0) & 795 SDIOST0_DAT1_HI)) { 796 writel(SDIOIST_IOINT, host->base + DAVINCI_SDIOIST); 797 mmc_signal_sdio_irq(host->mmc); 798 } 799 } 800 801 if (host->do_dma) { 802 davinci_abort_dma(host); 803 804 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, 805 (data->flags & MMC_DATA_WRITE) 806 ? DMA_TO_DEVICE 807 : DMA_FROM_DEVICE); 808 host->do_dma = false; 809 } 810 host->data_dir = DAVINCI_MMC_DATADIR_NONE; 811 812 if (!data->stop || (host->cmd && host->cmd->error)) { 813 mmc_request_done(host->mmc, data->mrq); 814 writel(0, host->base + DAVINCI_MMCIM); 815 host->active_request = false; 816 } else 817 mmc_davinci_start_command(host, data->stop); 818 } 819 820 static void mmc_davinci_cmd_done(struct mmc_davinci_host *host, 821 struct mmc_command *cmd) 822 { 823 host->cmd = NULL; 824 825 if (cmd->flags & MMC_RSP_PRESENT) { 826 if (cmd->flags & MMC_RSP_136) { 827 /* response type 2 */ 828 cmd->resp[3] = readl(host->base + DAVINCI_MMCRSP01); 829 cmd->resp[2] = readl(host->base + DAVINCI_MMCRSP23); 830 cmd->resp[1] = readl(host->base + DAVINCI_MMCRSP45); 831 cmd->resp[0] = readl(host->base + DAVINCI_MMCRSP67); 832 } else { 833 /* response types 1, 1b, 3, 4, 5, 6 */ 834 cmd->resp[0] = readl(host->base + DAVINCI_MMCRSP67); 835 } 836 } 837 838 if (host->data == NULL || cmd->error) { 839 if (cmd->error == -ETIMEDOUT) 840 cmd->mrq->cmd->retries = 0; 841 mmc_request_done(host->mmc, cmd->mrq); 842 writel(0, host->base + DAVINCI_MMCIM); 843 host->active_request = false; 844 } 845 } 846 847 static inline void mmc_davinci_reset_ctrl(struct mmc_davinci_host *host, 848 int val) 849 { 850 u32 temp; 851 852 temp = readl(host->base + DAVINCI_MMCCTL); 853 if (val) /* reset */ 854 temp |= MMCCTL_CMDRST | MMCCTL_DATRST; 855 else /* enable */ 856 temp &= ~(MMCCTL_CMDRST | MMCCTL_DATRST); 857 858 writel(temp, host->base + DAVINCI_MMCCTL); 859 udelay(10); 860 } 861 862 static void 863 davinci_abort_data(struct mmc_davinci_host *host, struct mmc_data *data) 864 { 865 mmc_davinci_reset_ctrl(host, 1); 866 mmc_davinci_reset_ctrl(host, 0); 867 } 868 869 static irqreturn_t mmc_davinci_sdio_irq(int irq, void *dev_id) 870 { 871 struct mmc_davinci_host *host = dev_id; 872 unsigned int status; 873 874 status = readl(host->base + DAVINCI_SDIOIST); 875 if (status & SDIOIST_IOINT) { 876 dev_dbg(mmc_dev(host->mmc), 877 "SDIO interrupt status %x\n", status); 878 writel(status | SDIOIST_IOINT, host->base + DAVINCI_SDIOIST); 879 mmc_signal_sdio_irq(host->mmc); 880 } 881 return IRQ_HANDLED; 882 } 883 884 static irqreturn_t mmc_davinci_irq(int irq, void *dev_id) 885 { 886 struct mmc_davinci_host *host = (struct mmc_davinci_host *)dev_id; 887 unsigned int status, qstatus; 888 int end_command = 0; 889 int end_transfer = 0; 890 struct mmc_data *data = host->data; 891 892 if (host->cmd == NULL && host->data == NULL) { 893 status = readl(host->base + DAVINCI_MMCST0); 894 dev_dbg(mmc_dev(host->mmc), 895 "Spurious interrupt 0x%04x\n", status); 896 /* Disable the interrupt from mmcsd */ 897 writel(0, host->base + DAVINCI_MMCIM); 898 return IRQ_NONE; 899 } 900 901 status = readl(host->base + DAVINCI_MMCST0); 902 qstatus = status; 903 904 /* handle FIFO first when using PIO for data. 905 * bytes_left will decrease to zero as I/O progress and status will 906 * read zero over iteration because this controller status 907 * register(MMCST0) reports any status only once and it is cleared 908 * by read. So, it is not unbouned loop even in the case of 909 * non-dma. 910 */ 911 if (host->bytes_left && (status & (MMCST0_DXRDY | MMCST0_DRRDY))) { 912 unsigned long im_val; 913 914 /* 915 * If interrupts fire during the following loop, they will be 916 * handled by the handler, but the PIC will still buffer these. 917 * As a result, the handler will be called again to serve these 918 * needlessly. In order to avoid these spurious interrupts, 919 * keep interrupts masked during the loop. 920 */ 921 im_val = readl(host->base + DAVINCI_MMCIM); 922 writel(0, host->base + DAVINCI_MMCIM); 923 924 do { 925 davinci_fifo_data_trans(host, rw_threshold); 926 status = readl(host->base + DAVINCI_MMCST0); 927 qstatus |= status; 928 } while (host->bytes_left && 929 (status & (MMCST0_DXRDY | MMCST0_DRRDY))); 930 931 /* 932 * If an interrupt is pending, it is assumed it will fire when 933 * it is unmasked. This assumption is also taken when the MMCIM 934 * is first set. Otherwise, writing to MMCIM after reading the 935 * status is race-prone. 936 */ 937 writel(im_val, host->base + DAVINCI_MMCIM); 938 } 939 940 if (qstatus & MMCST0_DATDNE) { 941 /* All blocks sent/received, and CRC checks passed */ 942 if (data != NULL) { 943 if ((host->do_dma == 0) && (host->bytes_left > 0)) { 944 /* if datasize < rw_threshold 945 * no RX ints are generated 946 */ 947 davinci_fifo_data_trans(host, host->bytes_left); 948 } 949 end_transfer = 1; 950 data->bytes_xfered = data->blocks * data->blksz; 951 } else { 952 dev_err(mmc_dev(host->mmc), 953 "DATDNE with no host->data\n"); 954 } 955 } 956 957 if (qstatus & MMCST0_TOUTRD) { 958 /* Read data timeout */ 959 data->error = -ETIMEDOUT; 960 end_transfer = 1; 961 962 dev_dbg(mmc_dev(host->mmc), 963 "read data timeout, status %x\n", 964 qstatus); 965 966 davinci_abort_data(host, data); 967 } 968 969 if (qstatus & (MMCST0_CRCWR | MMCST0_CRCRD)) { 970 /* Data CRC error */ 971 data->error = -EILSEQ; 972 end_transfer = 1; 973 974 /* NOTE: this controller uses CRCWR to report both CRC 975 * errors and timeouts (on writes). MMCDRSP values are 976 * only weakly documented, but 0x9f was clearly a timeout 977 * case and the two three-bit patterns in various SD specs 978 * (101, 010) aren't part of it ... 979 */ 980 if (qstatus & MMCST0_CRCWR) { 981 u32 temp = readb(host->base + DAVINCI_MMCDRSP); 982 983 if (temp == 0x9f) 984 data->error = -ETIMEDOUT; 985 } 986 dev_dbg(mmc_dev(host->mmc), "data %s %s error\n", 987 (qstatus & MMCST0_CRCWR) ? "write" : "read", 988 (data->error == -ETIMEDOUT) ? "timeout" : "CRC"); 989 990 davinci_abort_data(host, data); 991 } 992 993 if (qstatus & MMCST0_TOUTRS) { 994 /* Command timeout */ 995 if (host->cmd) { 996 dev_dbg(mmc_dev(host->mmc), 997 "CMD%d timeout, status %x\n", 998 host->cmd->opcode, qstatus); 999 host->cmd->error = -ETIMEDOUT; 1000 if (data) { 1001 end_transfer = 1; 1002 davinci_abort_data(host, data); 1003 } else 1004 end_command = 1; 1005 } 1006 } 1007 1008 if (qstatus & MMCST0_CRCRS) { 1009 /* Command CRC error */ 1010 dev_dbg(mmc_dev(host->mmc), "Command CRC error\n"); 1011 if (host->cmd) { 1012 host->cmd->error = -EILSEQ; 1013 end_command = 1; 1014 } 1015 } 1016 1017 if (qstatus & MMCST0_RSPDNE) { 1018 /* End of command phase */ 1019 end_command = (int) host->cmd; 1020 } 1021 1022 if (end_command) 1023 mmc_davinci_cmd_done(host, host->cmd); 1024 if (end_transfer) 1025 mmc_davinci_xfer_done(host, data); 1026 return IRQ_HANDLED; 1027 } 1028 1029 static int mmc_davinci_get_cd(struct mmc_host *mmc) 1030 { 1031 struct platform_device *pdev = to_platform_device(mmc->parent); 1032 struct davinci_mmc_config *config = pdev->dev.platform_data; 1033 1034 if (config && config->get_cd) 1035 return config->get_cd(pdev->id); 1036 1037 return mmc_gpio_get_cd(mmc); 1038 } 1039 1040 static int mmc_davinci_get_ro(struct mmc_host *mmc) 1041 { 1042 struct platform_device *pdev = to_platform_device(mmc->parent); 1043 struct davinci_mmc_config *config = pdev->dev.platform_data; 1044 1045 if (config && config->get_ro) 1046 return config->get_ro(pdev->id); 1047 1048 return mmc_gpio_get_ro(mmc); 1049 } 1050 1051 static void mmc_davinci_enable_sdio_irq(struct mmc_host *mmc, int enable) 1052 { 1053 struct mmc_davinci_host *host = mmc_priv(mmc); 1054 1055 if (enable) { 1056 if (!(readl(host->base + DAVINCI_SDIOST0) & SDIOST0_DAT1_HI)) { 1057 writel(SDIOIST_IOINT, host->base + DAVINCI_SDIOIST); 1058 mmc_signal_sdio_irq(host->mmc); 1059 } else { 1060 host->sdio_int = true; 1061 writel(readl(host->base + DAVINCI_SDIOIEN) | 1062 SDIOIEN_IOINTEN, host->base + DAVINCI_SDIOIEN); 1063 } 1064 } else { 1065 host->sdio_int = false; 1066 writel(readl(host->base + DAVINCI_SDIOIEN) & ~SDIOIEN_IOINTEN, 1067 host->base + DAVINCI_SDIOIEN); 1068 } 1069 } 1070 1071 static struct mmc_host_ops mmc_davinci_ops = { 1072 .request = mmc_davinci_request, 1073 .set_ios = mmc_davinci_set_ios, 1074 .get_cd = mmc_davinci_get_cd, 1075 .get_ro = mmc_davinci_get_ro, 1076 .enable_sdio_irq = mmc_davinci_enable_sdio_irq, 1077 }; 1078 1079 /*----------------------------------------------------------------------*/ 1080 1081 #ifdef CONFIG_CPU_FREQ 1082 static int mmc_davinci_cpufreq_transition(struct notifier_block *nb, 1083 unsigned long val, void *data) 1084 { 1085 struct mmc_davinci_host *host; 1086 unsigned int mmc_pclk; 1087 struct mmc_host *mmc; 1088 unsigned long flags; 1089 1090 host = container_of(nb, struct mmc_davinci_host, freq_transition); 1091 mmc = host->mmc; 1092 mmc_pclk = clk_get_rate(host->clk); 1093 1094 if (val == CPUFREQ_POSTCHANGE) { 1095 spin_lock_irqsave(&mmc->lock, flags); 1096 host->mmc_input_clk = mmc_pclk; 1097 calculate_clk_divider(mmc, &mmc->ios); 1098 spin_unlock_irqrestore(&mmc->lock, flags); 1099 } 1100 1101 return 0; 1102 } 1103 1104 static inline int mmc_davinci_cpufreq_register(struct mmc_davinci_host *host) 1105 { 1106 host->freq_transition.notifier_call = mmc_davinci_cpufreq_transition; 1107 1108 return cpufreq_register_notifier(&host->freq_transition, 1109 CPUFREQ_TRANSITION_NOTIFIER); 1110 } 1111 1112 static inline void mmc_davinci_cpufreq_deregister(struct mmc_davinci_host *host) 1113 { 1114 cpufreq_unregister_notifier(&host->freq_transition, 1115 CPUFREQ_TRANSITION_NOTIFIER); 1116 } 1117 #else 1118 static inline int mmc_davinci_cpufreq_register(struct mmc_davinci_host *host) 1119 { 1120 return 0; 1121 } 1122 1123 static inline void mmc_davinci_cpufreq_deregister(struct mmc_davinci_host *host) 1124 { 1125 } 1126 #endif 1127 static void __init init_mmcsd_host(struct mmc_davinci_host *host) 1128 { 1129 1130 mmc_davinci_reset_ctrl(host, 1); 1131 1132 writel(0, host->base + DAVINCI_MMCCLK); 1133 writel(MMCCLK_CLKEN, host->base + DAVINCI_MMCCLK); 1134 1135 writel(0x1FFF, host->base + DAVINCI_MMCTOR); 1136 writel(0xFFFF, host->base + DAVINCI_MMCTOD); 1137 1138 mmc_davinci_reset_ctrl(host, 0); 1139 } 1140 1141 static const struct platform_device_id davinci_mmc_devtype[] = { 1142 { 1143 .name = "dm6441-mmc", 1144 .driver_data = MMC_CTLR_VERSION_1, 1145 }, { 1146 .name = "da830-mmc", 1147 .driver_data = MMC_CTLR_VERSION_2, 1148 }, 1149 {}, 1150 }; 1151 MODULE_DEVICE_TABLE(platform, davinci_mmc_devtype); 1152 1153 static const struct of_device_id davinci_mmc_dt_ids[] = { 1154 { 1155 .compatible = "ti,dm6441-mmc", 1156 .data = &davinci_mmc_devtype[MMC_CTLR_VERSION_1], 1157 }, 1158 { 1159 .compatible = "ti,da830-mmc", 1160 .data = &davinci_mmc_devtype[MMC_CTLR_VERSION_2], 1161 }, 1162 {}, 1163 }; 1164 MODULE_DEVICE_TABLE(of, davinci_mmc_dt_ids); 1165 1166 static int mmc_davinci_parse_pdata(struct mmc_host *mmc) 1167 { 1168 struct platform_device *pdev = to_platform_device(mmc->parent); 1169 struct davinci_mmc_config *pdata = pdev->dev.platform_data; 1170 struct mmc_davinci_host *host; 1171 int ret; 1172 1173 if (!pdata) 1174 return -EINVAL; 1175 1176 host = mmc_priv(mmc); 1177 if (!host) 1178 return -EINVAL; 1179 1180 if (pdata && pdata->nr_sg) 1181 host->nr_sg = pdata->nr_sg - 1; 1182 1183 if (pdata && (pdata->wires == 4 || pdata->wires == 0)) 1184 mmc->caps |= MMC_CAP_4_BIT_DATA; 1185 1186 if (pdata && (pdata->wires == 8)) 1187 mmc->caps |= (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA); 1188 1189 mmc->f_min = 312500; 1190 mmc->f_max = 25000000; 1191 if (pdata && pdata->max_freq) 1192 mmc->f_max = pdata->max_freq; 1193 if (pdata && pdata->caps) 1194 mmc->caps |= pdata->caps; 1195 1196 /* Register a cd gpio, if there is not one, enable polling */ 1197 ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0, NULL); 1198 if (ret == -EPROBE_DEFER) 1199 return ret; 1200 else if (ret) 1201 mmc->caps |= MMC_CAP_NEEDS_POLL; 1202 1203 ret = mmc_gpiod_request_ro(mmc, "wp", 0, false, 0, NULL); 1204 if (ret == -EPROBE_DEFER) 1205 return ret; 1206 1207 return 0; 1208 } 1209 1210 static int __init davinci_mmcsd_probe(struct platform_device *pdev) 1211 { 1212 const struct of_device_id *match; 1213 struct mmc_davinci_host *host = NULL; 1214 struct mmc_host *mmc = NULL; 1215 struct resource *r, *mem = NULL; 1216 int ret, irq; 1217 size_t mem_size; 1218 const struct platform_device_id *id_entry; 1219 1220 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1221 if (!r) 1222 return -ENODEV; 1223 irq = platform_get_irq(pdev, 0); 1224 if (irq < 0) 1225 return irq; 1226 1227 mem_size = resource_size(r); 1228 mem = devm_request_mem_region(&pdev->dev, r->start, mem_size, 1229 pdev->name); 1230 if (!mem) 1231 return -EBUSY; 1232 1233 mmc = mmc_alloc_host(sizeof(struct mmc_davinci_host), &pdev->dev); 1234 if (!mmc) 1235 return -ENOMEM; 1236 1237 host = mmc_priv(mmc); 1238 host->mmc = mmc; /* Important */ 1239 1240 host->mem_res = mem; 1241 host->base = devm_ioremap(&pdev->dev, mem->start, mem_size); 1242 if (!host->base) { 1243 ret = -ENOMEM; 1244 goto ioremap_fail; 1245 } 1246 1247 host->clk = devm_clk_get(&pdev->dev, NULL); 1248 if (IS_ERR(host->clk)) { 1249 ret = PTR_ERR(host->clk); 1250 goto clk_get_fail; 1251 } 1252 ret = clk_prepare_enable(host->clk); 1253 if (ret) 1254 goto clk_prepare_enable_fail; 1255 1256 host->mmc_input_clk = clk_get_rate(host->clk); 1257 1258 match = of_match_device(davinci_mmc_dt_ids, &pdev->dev); 1259 if (match) { 1260 pdev->id_entry = match->data; 1261 ret = mmc_of_parse(mmc); 1262 if (ret) { 1263 dev_err(&pdev->dev, 1264 "could not parse of data: %d\n", ret); 1265 goto parse_fail; 1266 } 1267 } else { 1268 ret = mmc_davinci_parse_pdata(mmc); 1269 if (ret) { 1270 dev_err(&pdev->dev, 1271 "could not parse platform data: %d\n", ret); 1272 goto parse_fail; 1273 } } 1274 1275 if (host->nr_sg > MAX_NR_SG || !host->nr_sg) 1276 host->nr_sg = MAX_NR_SG; 1277 1278 init_mmcsd_host(host); 1279 1280 host->use_dma = use_dma; 1281 host->mmc_irq = irq; 1282 host->sdio_irq = platform_get_irq(pdev, 1); 1283 1284 if (host->use_dma) { 1285 ret = davinci_acquire_dma_channels(host); 1286 if (ret == -EPROBE_DEFER) 1287 goto dma_probe_defer; 1288 else if (ret) 1289 host->use_dma = 0; 1290 } 1291 1292 mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY; 1293 1294 id_entry = platform_get_device_id(pdev); 1295 if (id_entry) 1296 host->version = id_entry->driver_data; 1297 1298 mmc->ops = &mmc_davinci_ops; 1299 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; 1300 1301 /* With no iommu coalescing pages, each phys_seg is a hw_seg. 1302 * Each hw_seg uses one EDMA parameter RAM slot, always one 1303 * channel and then usually some linked slots. 1304 */ 1305 mmc->max_segs = MAX_NR_SG; 1306 1307 /* EDMA limit per hw segment (one or two MBytes) */ 1308 mmc->max_seg_size = MAX_CCNT * rw_threshold; 1309 1310 /* MMC/SD controller limits for multiblock requests */ 1311 mmc->max_blk_size = 4095; /* BLEN is 12 bits */ 1312 mmc->max_blk_count = 65535; /* NBLK is 16 bits */ 1313 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; 1314 1315 dev_dbg(mmc_dev(host->mmc), "max_segs=%d\n", mmc->max_segs); 1316 dev_dbg(mmc_dev(host->mmc), "max_blk_size=%d\n", mmc->max_blk_size); 1317 dev_dbg(mmc_dev(host->mmc), "max_req_size=%d\n", mmc->max_req_size); 1318 dev_dbg(mmc_dev(host->mmc), "max_seg_size=%d\n", mmc->max_seg_size); 1319 1320 platform_set_drvdata(pdev, host); 1321 1322 ret = mmc_davinci_cpufreq_register(host); 1323 if (ret) { 1324 dev_err(&pdev->dev, "failed to register cpufreq\n"); 1325 goto cpu_freq_fail; 1326 } 1327 1328 ret = mmc_add_host(mmc); 1329 if (ret < 0) 1330 goto mmc_add_host_fail; 1331 1332 ret = devm_request_irq(&pdev->dev, irq, mmc_davinci_irq, 0, 1333 mmc_hostname(mmc), host); 1334 if (ret) 1335 goto request_irq_fail; 1336 1337 if (host->sdio_irq >= 0) { 1338 ret = devm_request_irq(&pdev->dev, host->sdio_irq, 1339 mmc_davinci_sdio_irq, 0, 1340 mmc_hostname(mmc), host); 1341 if (!ret) 1342 mmc->caps |= MMC_CAP_SDIO_IRQ; 1343 } 1344 1345 rename_region(mem, mmc_hostname(mmc)); 1346 1347 dev_info(mmc_dev(host->mmc), "Using %s, %d-bit mode\n", 1348 host->use_dma ? "DMA" : "PIO", 1349 (mmc->caps & MMC_CAP_4_BIT_DATA) ? 4 : 1); 1350 1351 return 0; 1352 1353 request_irq_fail: 1354 mmc_remove_host(mmc); 1355 mmc_add_host_fail: 1356 mmc_davinci_cpufreq_deregister(host); 1357 cpu_freq_fail: 1358 davinci_release_dma_channels(host); 1359 parse_fail: 1360 dma_probe_defer: 1361 clk_disable_unprepare(host->clk); 1362 clk_prepare_enable_fail: 1363 clk_get_fail: 1364 ioremap_fail: 1365 mmc_free_host(mmc); 1366 1367 return ret; 1368 } 1369 1370 static int __exit davinci_mmcsd_remove(struct platform_device *pdev) 1371 { 1372 struct mmc_davinci_host *host = platform_get_drvdata(pdev); 1373 1374 mmc_remove_host(host->mmc); 1375 mmc_davinci_cpufreq_deregister(host); 1376 davinci_release_dma_channels(host); 1377 clk_disable_unprepare(host->clk); 1378 mmc_free_host(host->mmc); 1379 1380 return 0; 1381 } 1382 1383 #ifdef CONFIG_PM 1384 static int davinci_mmcsd_suspend(struct device *dev) 1385 { 1386 struct platform_device *pdev = to_platform_device(dev); 1387 struct mmc_davinci_host *host = platform_get_drvdata(pdev); 1388 1389 writel(0, host->base + DAVINCI_MMCIM); 1390 mmc_davinci_reset_ctrl(host, 1); 1391 clk_disable(host->clk); 1392 1393 return 0; 1394 } 1395 1396 static int davinci_mmcsd_resume(struct device *dev) 1397 { 1398 struct platform_device *pdev = to_platform_device(dev); 1399 struct mmc_davinci_host *host = platform_get_drvdata(pdev); 1400 1401 clk_enable(host->clk); 1402 mmc_davinci_reset_ctrl(host, 0); 1403 1404 return 0; 1405 } 1406 1407 static const struct dev_pm_ops davinci_mmcsd_pm = { 1408 .suspend = davinci_mmcsd_suspend, 1409 .resume = davinci_mmcsd_resume, 1410 }; 1411 1412 #define davinci_mmcsd_pm_ops (&davinci_mmcsd_pm) 1413 #else 1414 #define davinci_mmcsd_pm_ops NULL 1415 #endif 1416 1417 static struct platform_driver davinci_mmcsd_driver = { 1418 .driver = { 1419 .name = "davinci_mmc", 1420 .pm = davinci_mmcsd_pm_ops, 1421 .of_match_table = davinci_mmc_dt_ids, 1422 }, 1423 .remove = __exit_p(davinci_mmcsd_remove), 1424 .id_table = davinci_mmc_devtype, 1425 }; 1426 1427 module_platform_driver_probe(davinci_mmcsd_driver, davinci_mmcsd_probe); 1428 1429 MODULE_AUTHOR("Texas Instruments India"); 1430 MODULE_LICENSE("GPL"); 1431 MODULE_DESCRIPTION("MMC/SD driver for Davinci MMC controller"); 1432 MODULE_ALIAS("platform:davinci_mmc"); 1433 1434