1 /* 2 * davinci_mmc.c - TI DaVinci MMC/SD/SDIO driver 3 * 4 * Copyright (C) 2006 Texas Instruments. 5 * Original author: Purushotam Kumar 6 * Copyright (C) 2009 David Brownell 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2 of the License, or 11 * (at your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 21 */ 22 23 #include <linux/module.h> 24 #include <linux/ioport.h> 25 #include <linux/platform_device.h> 26 #include <linux/clk.h> 27 #include <linux/err.h> 28 #include <linux/cpufreq.h> 29 #include <linux/mmc/host.h> 30 #include <linux/io.h> 31 #include <linux/irq.h> 32 #include <linux/delay.h> 33 #include <linux/dmaengine.h> 34 #include <linux/dma-mapping.h> 35 #include <linux/mmc/mmc.h> 36 #include <linux/of.h> 37 #include <linux/of_device.h> 38 #include <linux/mmc/slot-gpio.h> 39 40 #include <linux/platform_data/mmc-davinci.h> 41 42 /* 43 * Register Definitions 44 */ 45 #define DAVINCI_MMCCTL 0x00 /* Control Register */ 46 #define DAVINCI_MMCCLK 0x04 /* Memory Clock Control Register */ 47 #define DAVINCI_MMCST0 0x08 /* Status Register 0 */ 48 #define DAVINCI_MMCST1 0x0C /* Status Register 1 */ 49 #define DAVINCI_MMCIM 0x10 /* Interrupt Mask Register */ 50 #define DAVINCI_MMCTOR 0x14 /* Response Time-Out Register */ 51 #define DAVINCI_MMCTOD 0x18 /* Data Read Time-Out Register */ 52 #define DAVINCI_MMCBLEN 0x1C /* Block Length Register */ 53 #define DAVINCI_MMCNBLK 0x20 /* Number of Blocks Register */ 54 #define DAVINCI_MMCNBLC 0x24 /* Number of Blocks Counter Register */ 55 #define DAVINCI_MMCDRR 0x28 /* Data Receive Register */ 56 #define DAVINCI_MMCDXR 0x2C /* Data Transmit Register */ 57 #define DAVINCI_MMCCMD 0x30 /* Command Register */ 58 #define DAVINCI_MMCARGHL 0x34 /* Argument Register */ 59 #define DAVINCI_MMCRSP01 0x38 /* Response Register 0 and 1 */ 60 #define DAVINCI_MMCRSP23 0x3C /* Response Register 0 and 1 */ 61 #define DAVINCI_MMCRSP45 0x40 /* Response Register 0 and 1 */ 62 #define DAVINCI_MMCRSP67 0x44 /* Response Register 0 and 1 */ 63 #define DAVINCI_MMCDRSP 0x48 /* Data Response Register */ 64 #define DAVINCI_MMCETOK 0x4C 65 #define DAVINCI_MMCCIDX 0x50 /* Command Index Register */ 66 #define DAVINCI_MMCCKC 0x54 67 #define DAVINCI_MMCTORC 0x58 68 #define DAVINCI_MMCTODC 0x5C 69 #define DAVINCI_MMCBLNC 0x60 70 #define DAVINCI_SDIOCTL 0x64 71 #define DAVINCI_SDIOST0 0x68 72 #define DAVINCI_SDIOIEN 0x6C 73 #define DAVINCI_SDIOIST 0x70 74 #define DAVINCI_MMCFIFOCTL 0x74 /* FIFO Control Register */ 75 76 /* DAVINCI_MMCCTL definitions */ 77 #define MMCCTL_DATRST (1 << 0) 78 #define MMCCTL_CMDRST (1 << 1) 79 #define MMCCTL_WIDTH_8_BIT (1 << 8) 80 #define MMCCTL_WIDTH_4_BIT (1 << 2) 81 #define MMCCTL_DATEG_DISABLED (0 << 6) 82 #define MMCCTL_DATEG_RISING (1 << 6) 83 #define MMCCTL_DATEG_FALLING (2 << 6) 84 #define MMCCTL_DATEG_BOTH (3 << 6) 85 #define MMCCTL_PERMDR_LE (0 << 9) 86 #define MMCCTL_PERMDR_BE (1 << 9) 87 #define MMCCTL_PERMDX_LE (0 << 10) 88 #define MMCCTL_PERMDX_BE (1 << 10) 89 90 /* DAVINCI_MMCCLK definitions */ 91 #define MMCCLK_CLKEN (1 << 8) 92 #define MMCCLK_CLKRT_MASK (0xFF << 0) 93 94 /* IRQ bit definitions, for DAVINCI_MMCST0 and DAVINCI_MMCIM */ 95 #define MMCST0_DATDNE BIT(0) /* data done */ 96 #define MMCST0_BSYDNE BIT(1) /* busy done */ 97 #define MMCST0_RSPDNE BIT(2) /* command done */ 98 #define MMCST0_TOUTRD BIT(3) /* data read timeout */ 99 #define MMCST0_TOUTRS BIT(4) /* command response timeout */ 100 #define MMCST0_CRCWR BIT(5) /* data write CRC error */ 101 #define MMCST0_CRCRD BIT(6) /* data read CRC error */ 102 #define MMCST0_CRCRS BIT(7) /* command response CRC error */ 103 #define MMCST0_DXRDY BIT(9) /* data transmit ready (fifo empty) */ 104 #define MMCST0_DRRDY BIT(10) /* data receive ready (data in fifo)*/ 105 #define MMCST0_DATED BIT(11) /* DAT3 edge detect */ 106 #define MMCST0_TRNDNE BIT(12) /* transfer done */ 107 108 /* DAVINCI_MMCST1 definitions */ 109 #define MMCST1_BUSY (1 << 0) 110 111 /* DAVINCI_MMCCMD definitions */ 112 #define MMCCMD_CMD_MASK (0x3F << 0) 113 #define MMCCMD_PPLEN (1 << 7) 114 #define MMCCMD_BSYEXP (1 << 8) 115 #define MMCCMD_RSPFMT_MASK (3 << 9) 116 #define MMCCMD_RSPFMT_NONE (0 << 9) 117 #define MMCCMD_RSPFMT_R1456 (1 << 9) 118 #define MMCCMD_RSPFMT_R2 (2 << 9) 119 #define MMCCMD_RSPFMT_R3 (3 << 9) 120 #define MMCCMD_DTRW (1 << 11) 121 #define MMCCMD_STRMTP (1 << 12) 122 #define MMCCMD_WDATX (1 << 13) 123 #define MMCCMD_INITCK (1 << 14) 124 #define MMCCMD_DCLR (1 << 15) 125 #define MMCCMD_DMATRIG (1 << 16) 126 127 /* DAVINCI_MMCFIFOCTL definitions */ 128 #define MMCFIFOCTL_FIFORST (1 << 0) 129 #define MMCFIFOCTL_FIFODIR_WR (1 << 1) 130 #define MMCFIFOCTL_FIFODIR_RD (0 << 1) 131 #define MMCFIFOCTL_FIFOLEV (1 << 2) /* 0 = 128 bits, 1 = 256 bits */ 132 #define MMCFIFOCTL_ACCWD_4 (0 << 3) /* access width of 4 bytes */ 133 #define MMCFIFOCTL_ACCWD_3 (1 << 3) /* access width of 3 bytes */ 134 #define MMCFIFOCTL_ACCWD_2 (2 << 3) /* access width of 2 bytes */ 135 #define MMCFIFOCTL_ACCWD_1 (3 << 3) /* access width of 1 byte */ 136 137 /* DAVINCI_SDIOST0 definitions */ 138 #define SDIOST0_DAT1_HI BIT(0) 139 140 /* DAVINCI_SDIOIEN definitions */ 141 #define SDIOIEN_IOINTEN BIT(0) 142 143 /* DAVINCI_SDIOIST definitions */ 144 #define SDIOIST_IOINT BIT(0) 145 146 /* MMCSD Init clock in Hz in opendrain mode */ 147 #define MMCSD_INIT_CLOCK 200000 148 149 /* 150 * One scatterlist dma "segment" is at most MAX_CCNT rw_threshold units, 151 * and we handle up to MAX_NR_SG segments. MMC_BLOCK_BOUNCE kicks in only 152 * for drivers with max_segs == 1, making the segments bigger (64KB) 153 * than the page or two that's otherwise typical. nr_sg (passed from 154 * platform data) == 16 gives at least the same throughput boost, using 155 * EDMA transfer linkage instead of spending CPU time copying pages. 156 */ 157 #define MAX_CCNT ((1 << 16) - 1) 158 159 #define MAX_NR_SG 16 160 161 static unsigned rw_threshold = 32; 162 module_param(rw_threshold, uint, S_IRUGO); 163 MODULE_PARM_DESC(rw_threshold, 164 "Read/Write threshold. Default = 32"); 165 166 static unsigned poll_threshold = 128; 167 module_param(poll_threshold, uint, S_IRUGO); 168 MODULE_PARM_DESC(poll_threshold, 169 "Polling transaction size threshold. Default = 128"); 170 171 static unsigned poll_loopcount = 32; 172 module_param(poll_loopcount, uint, S_IRUGO); 173 MODULE_PARM_DESC(poll_loopcount, 174 "Maximum polling loop count. Default = 32"); 175 176 static unsigned __initdata use_dma = 1; 177 module_param(use_dma, uint, 0); 178 MODULE_PARM_DESC(use_dma, "Whether to use DMA or not. Default = 1"); 179 180 struct mmc_davinci_host { 181 struct mmc_command *cmd; 182 struct mmc_data *data; 183 struct mmc_host *mmc; 184 struct clk *clk; 185 unsigned int mmc_input_clk; 186 void __iomem *base; 187 struct resource *mem_res; 188 int mmc_irq, sdio_irq; 189 unsigned char bus_mode; 190 191 #define DAVINCI_MMC_DATADIR_NONE 0 192 #define DAVINCI_MMC_DATADIR_READ 1 193 #define DAVINCI_MMC_DATADIR_WRITE 2 194 unsigned char data_dir; 195 196 /* buffer is used during PIO of one scatterlist segment, and 197 * is updated along with buffer_bytes_left. bytes_left applies 198 * to all N blocks of the PIO transfer. 199 */ 200 u8 *buffer; 201 u32 buffer_bytes_left; 202 u32 bytes_left; 203 204 struct dma_chan *dma_tx; 205 struct dma_chan *dma_rx; 206 bool use_dma; 207 bool do_dma; 208 bool sdio_int; 209 bool active_request; 210 211 /* For PIO we walk scatterlists one segment at a time. */ 212 unsigned int sg_len; 213 struct scatterlist *sg; 214 215 /* Version of the MMC/SD controller */ 216 u8 version; 217 /* for ns in one cycle calculation */ 218 unsigned ns_in_one_cycle; 219 /* Number of sg segments */ 220 u8 nr_sg; 221 #ifdef CONFIG_CPU_FREQ 222 struct notifier_block freq_transition; 223 #endif 224 }; 225 226 static irqreturn_t mmc_davinci_irq(int irq, void *dev_id); 227 228 /* PIO only */ 229 static void mmc_davinci_sg_to_buf(struct mmc_davinci_host *host) 230 { 231 host->buffer_bytes_left = sg_dma_len(host->sg); 232 host->buffer = sg_virt(host->sg); 233 if (host->buffer_bytes_left > host->bytes_left) 234 host->buffer_bytes_left = host->bytes_left; 235 } 236 237 static void davinci_fifo_data_trans(struct mmc_davinci_host *host, 238 unsigned int n) 239 { 240 u8 *p; 241 unsigned int i; 242 243 if (host->buffer_bytes_left == 0) { 244 host->sg = sg_next(host->data->sg); 245 mmc_davinci_sg_to_buf(host); 246 } 247 248 p = host->buffer; 249 if (n > host->buffer_bytes_left) 250 n = host->buffer_bytes_left; 251 host->buffer_bytes_left -= n; 252 host->bytes_left -= n; 253 254 /* NOTE: we never transfer more than rw_threshold bytes 255 * to/from the fifo here; there's no I/O overlap. 256 * This also assumes that access width( i.e. ACCWD) is 4 bytes 257 */ 258 if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) { 259 for (i = 0; i < (n >> 2); i++) { 260 writel(*((u32 *)p), host->base + DAVINCI_MMCDXR); 261 p = p + 4; 262 } 263 if (n & 3) { 264 iowrite8_rep(host->base + DAVINCI_MMCDXR, p, (n & 3)); 265 p = p + (n & 3); 266 } 267 } else { 268 for (i = 0; i < (n >> 2); i++) { 269 *((u32 *)p) = readl(host->base + DAVINCI_MMCDRR); 270 p = p + 4; 271 } 272 if (n & 3) { 273 ioread8_rep(host->base + DAVINCI_MMCDRR, p, (n & 3)); 274 p = p + (n & 3); 275 } 276 } 277 host->buffer = p; 278 } 279 280 static void mmc_davinci_start_command(struct mmc_davinci_host *host, 281 struct mmc_command *cmd) 282 { 283 u32 cmd_reg = 0; 284 u32 im_val; 285 286 dev_dbg(mmc_dev(host->mmc), "CMD%d, arg 0x%08x%s\n", 287 cmd->opcode, cmd->arg, 288 ({ char *s; 289 switch (mmc_resp_type(cmd)) { 290 case MMC_RSP_R1: 291 s = ", R1/R5/R6/R7 response"; 292 break; 293 case MMC_RSP_R1B: 294 s = ", R1b response"; 295 break; 296 case MMC_RSP_R2: 297 s = ", R2 response"; 298 break; 299 case MMC_RSP_R3: 300 s = ", R3/R4 response"; 301 break; 302 default: 303 s = ", (R? response)"; 304 break; 305 }; s; })); 306 host->cmd = cmd; 307 308 switch (mmc_resp_type(cmd)) { 309 case MMC_RSP_R1B: 310 /* There's some spec confusion about when R1B is 311 * allowed, but if the card doesn't issue a BUSY 312 * then it's harmless for us to allow it. 313 */ 314 cmd_reg |= MMCCMD_BSYEXP; 315 /* FALLTHROUGH */ 316 case MMC_RSP_R1: /* 48 bits, CRC */ 317 cmd_reg |= MMCCMD_RSPFMT_R1456; 318 break; 319 case MMC_RSP_R2: /* 136 bits, CRC */ 320 cmd_reg |= MMCCMD_RSPFMT_R2; 321 break; 322 case MMC_RSP_R3: /* 48 bits, no CRC */ 323 cmd_reg |= MMCCMD_RSPFMT_R3; 324 break; 325 default: 326 cmd_reg |= MMCCMD_RSPFMT_NONE; 327 dev_dbg(mmc_dev(host->mmc), "unknown resp_type %04x\n", 328 mmc_resp_type(cmd)); 329 break; 330 } 331 332 /* Set command index */ 333 cmd_reg |= cmd->opcode; 334 335 /* Enable EDMA transfer triggers */ 336 if (host->do_dma) 337 cmd_reg |= MMCCMD_DMATRIG; 338 339 if (host->version == MMC_CTLR_VERSION_2 && host->data != NULL && 340 host->data_dir == DAVINCI_MMC_DATADIR_READ) 341 cmd_reg |= MMCCMD_DMATRIG; 342 343 /* Setting whether command involves data transfer or not */ 344 if (cmd->data) 345 cmd_reg |= MMCCMD_WDATX; 346 347 /* Setting whether data read or write */ 348 if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) 349 cmd_reg |= MMCCMD_DTRW; 350 351 if (host->bus_mode == MMC_BUSMODE_PUSHPULL) 352 cmd_reg |= MMCCMD_PPLEN; 353 354 /* set Command timeout */ 355 writel(0x1FFF, host->base + DAVINCI_MMCTOR); 356 357 /* Enable interrupt (calculate here, defer until FIFO is stuffed). */ 358 im_val = MMCST0_RSPDNE | MMCST0_CRCRS | MMCST0_TOUTRS; 359 if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) { 360 im_val |= MMCST0_DATDNE | MMCST0_CRCWR; 361 362 if (!host->do_dma) 363 im_val |= MMCST0_DXRDY; 364 } else if (host->data_dir == DAVINCI_MMC_DATADIR_READ) { 365 im_val |= MMCST0_DATDNE | MMCST0_CRCRD | MMCST0_TOUTRD; 366 367 if (!host->do_dma) 368 im_val |= MMCST0_DRRDY; 369 } 370 371 /* 372 * Before non-DMA WRITE commands the controller needs priming: 373 * FIFO should be populated with 32 bytes i.e. whatever is the FIFO size 374 */ 375 if (!host->do_dma && (host->data_dir == DAVINCI_MMC_DATADIR_WRITE)) 376 davinci_fifo_data_trans(host, rw_threshold); 377 378 writel(cmd->arg, host->base + DAVINCI_MMCARGHL); 379 writel(cmd_reg, host->base + DAVINCI_MMCCMD); 380 381 host->active_request = true; 382 383 if (!host->do_dma && host->bytes_left <= poll_threshold) { 384 u32 count = poll_loopcount; 385 386 while (host->active_request && count--) { 387 mmc_davinci_irq(0, host); 388 cpu_relax(); 389 } 390 } 391 392 if (host->active_request) 393 writel(im_val, host->base + DAVINCI_MMCIM); 394 } 395 396 /*----------------------------------------------------------------------*/ 397 398 /* DMA infrastructure */ 399 400 static void davinci_abort_dma(struct mmc_davinci_host *host) 401 { 402 struct dma_chan *sync_dev; 403 404 if (host->data_dir == DAVINCI_MMC_DATADIR_READ) 405 sync_dev = host->dma_rx; 406 else 407 sync_dev = host->dma_tx; 408 409 dmaengine_terminate_all(sync_dev); 410 } 411 412 static int mmc_davinci_send_dma_request(struct mmc_davinci_host *host, 413 struct mmc_data *data) 414 { 415 struct dma_chan *chan; 416 struct dma_async_tx_descriptor *desc; 417 int ret = 0; 418 419 if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) { 420 struct dma_slave_config dma_tx_conf = { 421 .direction = DMA_MEM_TO_DEV, 422 .dst_addr = host->mem_res->start + DAVINCI_MMCDXR, 423 .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, 424 .dst_maxburst = 425 rw_threshold / DMA_SLAVE_BUSWIDTH_4_BYTES, 426 }; 427 chan = host->dma_tx; 428 dmaengine_slave_config(host->dma_tx, &dma_tx_conf); 429 430 desc = dmaengine_prep_slave_sg(host->dma_tx, 431 data->sg, 432 host->sg_len, 433 DMA_MEM_TO_DEV, 434 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 435 if (!desc) { 436 dev_dbg(mmc_dev(host->mmc), 437 "failed to allocate DMA TX descriptor"); 438 ret = -1; 439 goto out; 440 } 441 } else { 442 struct dma_slave_config dma_rx_conf = { 443 .direction = DMA_DEV_TO_MEM, 444 .src_addr = host->mem_res->start + DAVINCI_MMCDRR, 445 .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, 446 .src_maxburst = 447 rw_threshold / DMA_SLAVE_BUSWIDTH_4_BYTES, 448 }; 449 chan = host->dma_rx; 450 dmaengine_slave_config(host->dma_rx, &dma_rx_conf); 451 452 desc = dmaengine_prep_slave_sg(host->dma_rx, 453 data->sg, 454 host->sg_len, 455 DMA_DEV_TO_MEM, 456 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 457 if (!desc) { 458 dev_dbg(mmc_dev(host->mmc), 459 "failed to allocate DMA RX descriptor"); 460 ret = -1; 461 goto out; 462 } 463 } 464 465 dmaengine_submit(desc); 466 dma_async_issue_pending(chan); 467 468 out: 469 return ret; 470 } 471 472 static int mmc_davinci_start_dma_transfer(struct mmc_davinci_host *host, 473 struct mmc_data *data) 474 { 475 int i; 476 int mask = rw_threshold - 1; 477 int ret = 0; 478 479 host->sg_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, 480 ((data->flags & MMC_DATA_WRITE) 481 ? DMA_TO_DEVICE 482 : DMA_FROM_DEVICE)); 483 484 /* no individual DMA segment should need a partial FIFO */ 485 for (i = 0; i < host->sg_len; i++) { 486 if (sg_dma_len(data->sg + i) & mask) { 487 dma_unmap_sg(mmc_dev(host->mmc), 488 data->sg, data->sg_len, 489 (data->flags & MMC_DATA_WRITE) 490 ? DMA_TO_DEVICE 491 : DMA_FROM_DEVICE); 492 return -1; 493 } 494 } 495 496 host->do_dma = 1; 497 ret = mmc_davinci_send_dma_request(host, data); 498 499 return ret; 500 } 501 502 static void __init_or_module 503 davinci_release_dma_channels(struct mmc_davinci_host *host) 504 { 505 if (!host->use_dma) 506 return; 507 508 dma_release_channel(host->dma_tx); 509 dma_release_channel(host->dma_rx); 510 } 511 512 static int __init davinci_acquire_dma_channels(struct mmc_davinci_host *host) 513 { 514 host->dma_tx = dma_request_chan(mmc_dev(host->mmc), "tx"); 515 if (IS_ERR(host->dma_tx)) { 516 dev_err(mmc_dev(host->mmc), "Can't get dma_tx channel\n"); 517 return PTR_ERR(host->dma_tx); 518 } 519 520 host->dma_rx = dma_request_chan(mmc_dev(host->mmc), "rx"); 521 if (IS_ERR(host->dma_rx)) { 522 dev_err(mmc_dev(host->mmc), "Can't get dma_rx channel\n"); 523 dma_release_channel(host->dma_tx); 524 return PTR_ERR(host->dma_rx); 525 } 526 527 return 0; 528 } 529 530 /*----------------------------------------------------------------------*/ 531 532 static void 533 mmc_davinci_prepare_data(struct mmc_davinci_host *host, struct mmc_request *req) 534 { 535 int fifo_lev = (rw_threshold == 32) ? MMCFIFOCTL_FIFOLEV : 0; 536 int timeout; 537 struct mmc_data *data = req->data; 538 539 if (host->version == MMC_CTLR_VERSION_2) 540 fifo_lev = (rw_threshold == 64) ? MMCFIFOCTL_FIFOLEV : 0; 541 542 host->data = data; 543 if (data == NULL) { 544 host->data_dir = DAVINCI_MMC_DATADIR_NONE; 545 writel(0, host->base + DAVINCI_MMCBLEN); 546 writel(0, host->base + DAVINCI_MMCNBLK); 547 return; 548 } 549 550 dev_dbg(mmc_dev(host->mmc), "%s, %d blocks of %d bytes\n", 551 (data->flags & MMC_DATA_WRITE) ? "write" : "read", 552 data->blocks, data->blksz); 553 dev_dbg(mmc_dev(host->mmc), " DTO %d cycles + %d ns\n", 554 data->timeout_clks, data->timeout_ns); 555 timeout = data->timeout_clks + 556 (data->timeout_ns / host->ns_in_one_cycle); 557 if (timeout > 0xffff) 558 timeout = 0xffff; 559 560 writel(timeout, host->base + DAVINCI_MMCTOD); 561 writel(data->blocks, host->base + DAVINCI_MMCNBLK); 562 writel(data->blksz, host->base + DAVINCI_MMCBLEN); 563 564 /* Configure the FIFO */ 565 if (data->flags & MMC_DATA_WRITE) { 566 host->data_dir = DAVINCI_MMC_DATADIR_WRITE; 567 writel(fifo_lev | MMCFIFOCTL_FIFODIR_WR | MMCFIFOCTL_FIFORST, 568 host->base + DAVINCI_MMCFIFOCTL); 569 writel(fifo_lev | MMCFIFOCTL_FIFODIR_WR, 570 host->base + DAVINCI_MMCFIFOCTL); 571 } else { 572 host->data_dir = DAVINCI_MMC_DATADIR_READ; 573 writel(fifo_lev | MMCFIFOCTL_FIFODIR_RD | MMCFIFOCTL_FIFORST, 574 host->base + DAVINCI_MMCFIFOCTL); 575 writel(fifo_lev | MMCFIFOCTL_FIFODIR_RD, 576 host->base + DAVINCI_MMCFIFOCTL); 577 } 578 579 host->buffer = NULL; 580 host->bytes_left = data->blocks * data->blksz; 581 582 /* For now we try to use DMA whenever we won't need partial FIFO 583 * reads or writes, either for the whole transfer (as tested here) 584 * or for any individual scatterlist segment (tested when we call 585 * start_dma_transfer). 586 * 587 * While we *could* change that, unusual block sizes are rarely 588 * used. The occasional fallback to PIO should't hurt. 589 */ 590 if (host->use_dma && (host->bytes_left & (rw_threshold - 1)) == 0 591 && mmc_davinci_start_dma_transfer(host, data) == 0) { 592 /* zero this to ensure we take no PIO paths */ 593 host->bytes_left = 0; 594 } else { 595 /* Revert to CPU Copy */ 596 host->sg_len = data->sg_len; 597 host->sg = host->data->sg; 598 mmc_davinci_sg_to_buf(host); 599 } 600 } 601 602 static void mmc_davinci_request(struct mmc_host *mmc, struct mmc_request *req) 603 { 604 struct mmc_davinci_host *host = mmc_priv(mmc); 605 unsigned long timeout = jiffies + msecs_to_jiffies(900); 606 u32 mmcst1 = 0; 607 608 /* Card may still be sending BUSY after a previous operation, 609 * typically some kind of write. If so, we can't proceed yet. 610 */ 611 while (time_before(jiffies, timeout)) { 612 mmcst1 = readl(host->base + DAVINCI_MMCST1); 613 if (!(mmcst1 & MMCST1_BUSY)) 614 break; 615 cpu_relax(); 616 } 617 if (mmcst1 & MMCST1_BUSY) { 618 dev_err(mmc_dev(host->mmc), "still BUSY? bad ... \n"); 619 req->cmd->error = -ETIMEDOUT; 620 mmc_request_done(mmc, req); 621 return; 622 } 623 624 host->do_dma = 0; 625 mmc_davinci_prepare_data(host, req); 626 mmc_davinci_start_command(host, req->cmd); 627 } 628 629 static unsigned int calculate_freq_for_card(struct mmc_davinci_host *host, 630 unsigned int mmc_req_freq) 631 { 632 unsigned int mmc_freq = 0, mmc_pclk = 0, mmc_push_pull_divisor = 0; 633 634 mmc_pclk = host->mmc_input_clk; 635 if (mmc_req_freq && mmc_pclk > (2 * mmc_req_freq)) 636 mmc_push_pull_divisor = ((unsigned int)mmc_pclk 637 / (2 * mmc_req_freq)) - 1; 638 else 639 mmc_push_pull_divisor = 0; 640 641 mmc_freq = (unsigned int)mmc_pclk 642 / (2 * (mmc_push_pull_divisor + 1)); 643 644 if (mmc_freq > mmc_req_freq) 645 mmc_push_pull_divisor = mmc_push_pull_divisor + 1; 646 /* Convert ns to clock cycles */ 647 if (mmc_req_freq <= 400000) 648 host->ns_in_one_cycle = (1000000) / (((mmc_pclk 649 / (2 * (mmc_push_pull_divisor + 1)))/1000)); 650 else 651 host->ns_in_one_cycle = (1000000) / (((mmc_pclk 652 / (2 * (mmc_push_pull_divisor + 1)))/1000000)); 653 654 return mmc_push_pull_divisor; 655 } 656 657 static void calculate_clk_divider(struct mmc_host *mmc, struct mmc_ios *ios) 658 { 659 unsigned int open_drain_freq = 0, mmc_pclk = 0; 660 unsigned int mmc_push_pull_freq = 0; 661 struct mmc_davinci_host *host = mmc_priv(mmc); 662 663 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) { 664 u32 temp; 665 666 /* Ignoring the init clock value passed for fixing the inter 667 * operability with different cards. 668 */ 669 open_drain_freq = ((unsigned int)mmc_pclk 670 / (2 * MMCSD_INIT_CLOCK)) - 1; 671 672 if (open_drain_freq > 0xFF) 673 open_drain_freq = 0xFF; 674 675 temp = readl(host->base + DAVINCI_MMCCLK) & ~MMCCLK_CLKRT_MASK; 676 temp |= open_drain_freq; 677 writel(temp, host->base + DAVINCI_MMCCLK); 678 679 /* Convert ns to clock cycles */ 680 host->ns_in_one_cycle = (1000000) / (MMCSD_INIT_CLOCK/1000); 681 } else { 682 u32 temp; 683 mmc_push_pull_freq = calculate_freq_for_card(host, ios->clock); 684 685 if (mmc_push_pull_freq > 0xFF) 686 mmc_push_pull_freq = 0xFF; 687 688 temp = readl(host->base + DAVINCI_MMCCLK) & ~MMCCLK_CLKEN; 689 writel(temp, host->base + DAVINCI_MMCCLK); 690 691 udelay(10); 692 693 temp = readl(host->base + DAVINCI_MMCCLK) & ~MMCCLK_CLKRT_MASK; 694 temp |= mmc_push_pull_freq; 695 writel(temp, host->base + DAVINCI_MMCCLK); 696 697 writel(temp | MMCCLK_CLKEN, host->base + DAVINCI_MMCCLK); 698 699 udelay(10); 700 } 701 } 702 703 static void mmc_davinci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 704 { 705 struct mmc_davinci_host *host = mmc_priv(mmc); 706 struct platform_device *pdev = to_platform_device(mmc->parent); 707 struct davinci_mmc_config *config = pdev->dev.platform_data; 708 709 dev_dbg(mmc_dev(host->mmc), 710 "clock %dHz busmode %d powermode %d Vdd %04x\n", 711 ios->clock, ios->bus_mode, ios->power_mode, 712 ios->vdd); 713 714 switch (ios->power_mode) { 715 case MMC_POWER_OFF: 716 if (config && config->set_power) 717 config->set_power(pdev->id, false); 718 break; 719 case MMC_POWER_UP: 720 if (config && config->set_power) 721 config->set_power(pdev->id, true); 722 break; 723 } 724 725 switch (ios->bus_width) { 726 case MMC_BUS_WIDTH_8: 727 dev_dbg(mmc_dev(host->mmc), "Enabling 8 bit mode\n"); 728 writel((readl(host->base + DAVINCI_MMCCTL) & 729 ~MMCCTL_WIDTH_4_BIT) | MMCCTL_WIDTH_8_BIT, 730 host->base + DAVINCI_MMCCTL); 731 break; 732 case MMC_BUS_WIDTH_4: 733 dev_dbg(mmc_dev(host->mmc), "Enabling 4 bit mode\n"); 734 if (host->version == MMC_CTLR_VERSION_2) 735 writel((readl(host->base + DAVINCI_MMCCTL) & 736 ~MMCCTL_WIDTH_8_BIT) | MMCCTL_WIDTH_4_BIT, 737 host->base + DAVINCI_MMCCTL); 738 else 739 writel(readl(host->base + DAVINCI_MMCCTL) | 740 MMCCTL_WIDTH_4_BIT, 741 host->base + DAVINCI_MMCCTL); 742 break; 743 case MMC_BUS_WIDTH_1: 744 dev_dbg(mmc_dev(host->mmc), "Enabling 1 bit mode\n"); 745 if (host->version == MMC_CTLR_VERSION_2) 746 writel(readl(host->base + DAVINCI_MMCCTL) & 747 ~(MMCCTL_WIDTH_8_BIT | MMCCTL_WIDTH_4_BIT), 748 host->base + DAVINCI_MMCCTL); 749 else 750 writel(readl(host->base + DAVINCI_MMCCTL) & 751 ~MMCCTL_WIDTH_4_BIT, 752 host->base + DAVINCI_MMCCTL); 753 break; 754 } 755 756 calculate_clk_divider(mmc, ios); 757 758 host->bus_mode = ios->bus_mode; 759 if (ios->power_mode == MMC_POWER_UP) { 760 unsigned long timeout = jiffies + msecs_to_jiffies(50); 761 bool lose = true; 762 763 /* Send clock cycles, poll completion */ 764 writel(0, host->base + DAVINCI_MMCARGHL); 765 writel(MMCCMD_INITCK, host->base + DAVINCI_MMCCMD); 766 while (time_before(jiffies, timeout)) { 767 u32 tmp = readl(host->base + DAVINCI_MMCST0); 768 769 if (tmp & MMCST0_RSPDNE) { 770 lose = false; 771 break; 772 } 773 cpu_relax(); 774 } 775 if (lose) 776 dev_warn(mmc_dev(host->mmc), "powerup timeout\n"); 777 } 778 779 /* FIXME on power OFF, reset things ... */ 780 } 781 782 static void 783 mmc_davinci_xfer_done(struct mmc_davinci_host *host, struct mmc_data *data) 784 { 785 host->data = NULL; 786 787 if (host->mmc->caps & MMC_CAP_SDIO_IRQ) { 788 /* 789 * SDIO Interrupt Detection work-around as suggested by 790 * Davinci Errata (TMS320DM355 Silicon Revision 1.1 Errata 791 * 2.1.6): Signal SDIO interrupt only if it is enabled by core 792 */ 793 if (host->sdio_int && !(readl(host->base + DAVINCI_SDIOST0) & 794 SDIOST0_DAT1_HI)) { 795 writel(SDIOIST_IOINT, host->base + DAVINCI_SDIOIST); 796 mmc_signal_sdio_irq(host->mmc); 797 } 798 } 799 800 if (host->do_dma) { 801 davinci_abort_dma(host); 802 803 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, 804 (data->flags & MMC_DATA_WRITE) 805 ? DMA_TO_DEVICE 806 : DMA_FROM_DEVICE); 807 host->do_dma = false; 808 } 809 host->data_dir = DAVINCI_MMC_DATADIR_NONE; 810 811 if (!data->stop || (host->cmd && host->cmd->error)) { 812 mmc_request_done(host->mmc, data->mrq); 813 writel(0, host->base + DAVINCI_MMCIM); 814 host->active_request = false; 815 } else 816 mmc_davinci_start_command(host, data->stop); 817 } 818 819 static void mmc_davinci_cmd_done(struct mmc_davinci_host *host, 820 struct mmc_command *cmd) 821 { 822 host->cmd = NULL; 823 824 if (cmd->flags & MMC_RSP_PRESENT) { 825 if (cmd->flags & MMC_RSP_136) { 826 /* response type 2 */ 827 cmd->resp[3] = readl(host->base + DAVINCI_MMCRSP01); 828 cmd->resp[2] = readl(host->base + DAVINCI_MMCRSP23); 829 cmd->resp[1] = readl(host->base + DAVINCI_MMCRSP45); 830 cmd->resp[0] = readl(host->base + DAVINCI_MMCRSP67); 831 } else { 832 /* response types 1, 1b, 3, 4, 5, 6 */ 833 cmd->resp[0] = readl(host->base + DAVINCI_MMCRSP67); 834 } 835 } 836 837 if (host->data == NULL || cmd->error) { 838 if (cmd->error == -ETIMEDOUT) 839 cmd->mrq->cmd->retries = 0; 840 mmc_request_done(host->mmc, cmd->mrq); 841 writel(0, host->base + DAVINCI_MMCIM); 842 host->active_request = false; 843 } 844 } 845 846 static inline void mmc_davinci_reset_ctrl(struct mmc_davinci_host *host, 847 int val) 848 { 849 u32 temp; 850 851 temp = readl(host->base + DAVINCI_MMCCTL); 852 if (val) /* reset */ 853 temp |= MMCCTL_CMDRST | MMCCTL_DATRST; 854 else /* enable */ 855 temp &= ~(MMCCTL_CMDRST | MMCCTL_DATRST); 856 857 writel(temp, host->base + DAVINCI_MMCCTL); 858 udelay(10); 859 } 860 861 static void 862 davinci_abort_data(struct mmc_davinci_host *host, struct mmc_data *data) 863 { 864 mmc_davinci_reset_ctrl(host, 1); 865 mmc_davinci_reset_ctrl(host, 0); 866 } 867 868 static irqreturn_t mmc_davinci_sdio_irq(int irq, void *dev_id) 869 { 870 struct mmc_davinci_host *host = dev_id; 871 unsigned int status; 872 873 status = readl(host->base + DAVINCI_SDIOIST); 874 if (status & SDIOIST_IOINT) { 875 dev_dbg(mmc_dev(host->mmc), 876 "SDIO interrupt status %x\n", status); 877 writel(status | SDIOIST_IOINT, host->base + DAVINCI_SDIOIST); 878 mmc_signal_sdio_irq(host->mmc); 879 } 880 return IRQ_HANDLED; 881 } 882 883 static irqreturn_t mmc_davinci_irq(int irq, void *dev_id) 884 { 885 struct mmc_davinci_host *host = (struct mmc_davinci_host *)dev_id; 886 unsigned int status, qstatus; 887 int end_command = 0; 888 int end_transfer = 0; 889 struct mmc_data *data = host->data; 890 891 if (host->cmd == NULL && host->data == NULL) { 892 status = readl(host->base + DAVINCI_MMCST0); 893 dev_dbg(mmc_dev(host->mmc), 894 "Spurious interrupt 0x%04x\n", status); 895 /* Disable the interrupt from mmcsd */ 896 writel(0, host->base + DAVINCI_MMCIM); 897 return IRQ_NONE; 898 } 899 900 status = readl(host->base + DAVINCI_MMCST0); 901 qstatus = status; 902 903 /* handle FIFO first when using PIO for data. 904 * bytes_left will decrease to zero as I/O progress and status will 905 * read zero over iteration because this controller status 906 * register(MMCST0) reports any status only once and it is cleared 907 * by read. So, it is not unbouned loop even in the case of 908 * non-dma. 909 */ 910 if (host->bytes_left && (status & (MMCST0_DXRDY | MMCST0_DRRDY))) { 911 unsigned long im_val; 912 913 /* 914 * If interrupts fire during the following loop, they will be 915 * handled by the handler, but the PIC will still buffer these. 916 * As a result, the handler will be called again to serve these 917 * needlessly. In order to avoid these spurious interrupts, 918 * keep interrupts masked during the loop. 919 */ 920 im_val = readl(host->base + DAVINCI_MMCIM); 921 writel(0, host->base + DAVINCI_MMCIM); 922 923 do { 924 davinci_fifo_data_trans(host, rw_threshold); 925 status = readl(host->base + DAVINCI_MMCST0); 926 qstatus |= status; 927 } while (host->bytes_left && 928 (status & (MMCST0_DXRDY | MMCST0_DRRDY))); 929 930 /* 931 * If an interrupt is pending, it is assumed it will fire when 932 * it is unmasked. This assumption is also taken when the MMCIM 933 * is first set. Otherwise, writing to MMCIM after reading the 934 * status is race-prone. 935 */ 936 writel(im_val, host->base + DAVINCI_MMCIM); 937 } 938 939 if (qstatus & MMCST0_DATDNE) { 940 /* All blocks sent/received, and CRC checks passed */ 941 if (data != NULL) { 942 if ((host->do_dma == 0) && (host->bytes_left > 0)) { 943 /* if datasize < rw_threshold 944 * no RX ints are generated 945 */ 946 davinci_fifo_data_trans(host, host->bytes_left); 947 } 948 end_transfer = 1; 949 data->bytes_xfered = data->blocks * data->blksz; 950 } else { 951 dev_err(mmc_dev(host->mmc), 952 "DATDNE with no host->data\n"); 953 } 954 } 955 956 if (qstatus & MMCST0_TOUTRD) { 957 /* Read data timeout */ 958 data->error = -ETIMEDOUT; 959 end_transfer = 1; 960 961 dev_dbg(mmc_dev(host->mmc), 962 "read data timeout, status %x\n", 963 qstatus); 964 965 davinci_abort_data(host, data); 966 } 967 968 if (qstatus & (MMCST0_CRCWR | MMCST0_CRCRD)) { 969 /* Data CRC error */ 970 data->error = -EILSEQ; 971 end_transfer = 1; 972 973 /* NOTE: this controller uses CRCWR to report both CRC 974 * errors and timeouts (on writes). MMCDRSP values are 975 * only weakly documented, but 0x9f was clearly a timeout 976 * case and the two three-bit patterns in various SD specs 977 * (101, 010) aren't part of it ... 978 */ 979 if (qstatus & MMCST0_CRCWR) { 980 u32 temp = readb(host->base + DAVINCI_MMCDRSP); 981 982 if (temp == 0x9f) 983 data->error = -ETIMEDOUT; 984 } 985 dev_dbg(mmc_dev(host->mmc), "data %s %s error\n", 986 (qstatus & MMCST0_CRCWR) ? "write" : "read", 987 (data->error == -ETIMEDOUT) ? "timeout" : "CRC"); 988 989 davinci_abort_data(host, data); 990 } 991 992 if (qstatus & MMCST0_TOUTRS) { 993 /* Command timeout */ 994 if (host->cmd) { 995 dev_dbg(mmc_dev(host->mmc), 996 "CMD%d timeout, status %x\n", 997 host->cmd->opcode, qstatus); 998 host->cmd->error = -ETIMEDOUT; 999 if (data) { 1000 end_transfer = 1; 1001 davinci_abort_data(host, data); 1002 } else 1003 end_command = 1; 1004 } 1005 } 1006 1007 if (qstatus & MMCST0_CRCRS) { 1008 /* Command CRC error */ 1009 dev_dbg(mmc_dev(host->mmc), "Command CRC error\n"); 1010 if (host->cmd) { 1011 host->cmd->error = -EILSEQ; 1012 end_command = 1; 1013 } 1014 } 1015 1016 if (qstatus & MMCST0_RSPDNE) { 1017 /* End of command phase */ 1018 end_command = (int) host->cmd; 1019 } 1020 1021 if (end_command) 1022 mmc_davinci_cmd_done(host, host->cmd); 1023 if (end_transfer) 1024 mmc_davinci_xfer_done(host, data); 1025 return IRQ_HANDLED; 1026 } 1027 1028 static int mmc_davinci_get_cd(struct mmc_host *mmc) 1029 { 1030 struct platform_device *pdev = to_platform_device(mmc->parent); 1031 struct davinci_mmc_config *config = pdev->dev.platform_data; 1032 1033 if (config && config->get_cd) 1034 return config->get_cd(pdev->id); 1035 1036 return mmc_gpio_get_cd(mmc); 1037 } 1038 1039 static int mmc_davinci_get_ro(struct mmc_host *mmc) 1040 { 1041 struct platform_device *pdev = to_platform_device(mmc->parent); 1042 struct davinci_mmc_config *config = pdev->dev.platform_data; 1043 1044 if (config && config->get_ro) 1045 return config->get_ro(pdev->id); 1046 1047 return mmc_gpio_get_ro(mmc); 1048 } 1049 1050 static void mmc_davinci_enable_sdio_irq(struct mmc_host *mmc, int enable) 1051 { 1052 struct mmc_davinci_host *host = mmc_priv(mmc); 1053 1054 if (enable) { 1055 if (!(readl(host->base + DAVINCI_SDIOST0) & SDIOST0_DAT1_HI)) { 1056 writel(SDIOIST_IOINT, host->base + DAVINCI_SDIOIST); 1057 mmc_signal_sdio_irq(host->mmc); 1058 } else { 1059 host->sdio_int = true; 1060 writel(readl(host->base + DAVINCI_SDIOIEN) | 1061 SDIOIEN_IOINTEN, host->base + DAVINCI_SDIOIEN); 1062 } 1063 } else { 1064 host->sdio_int = false; 1065 writel(readl(host->base + DAVINCI_SDIOIEN) & ~SDIOIEN_IOINTEN, 1066 host->base + DAVINCI_SDIOIEN); 1067 } 1068 } 1069 1070 static struct mmc_host_ops mmc_davinci_ops = { 1071 .request = mmc_davinci_request, 1072 .set_ios = mmc_davinci_set_ios, 1073 .get_cd = mmc_davinci_get_cd, 1074 .get_ro = mmc_davinci_get_ro, 1075 .enable_sdio_irq = mmc_davinci_enable_sdio_irq, 1076 }; 1077 1078 /*----------------------------------------------------------------------*/ 1079 1080 #ifdef CONFIG_CPU_FREQ 1081 static int mmc_davinci_cpufreq_transition(struct notifier_block *nb, 1082 unsigned long val, void *data) 1083 { 1084 struct mmc_davinci_host *host; 1085 unsigned int mmc_pclk; 1086 struct mmc_host *mmc; 1087 unsigned long flags; 1088 1089 host = container_of(nb, struct mmc_davinci_host, freq_transition); 1090 mmc = host->mmc; 1091 mmc_pclk = clk_get_rate(host->clk); 1092 1093 if (val == CPUFREQ_POSTCHANGE) { 1094 spin_lock_irqsave(&mmc->lock, flags); 1095 host->mmc_input_clk = mmc_pclk; 1096 calculate_clk_divider(mmc, &mmc->ios); 1097 spin_unlock_irqrestore(&mmc->lock, flags); 1098 } 1099 1100 return 0; 1101 } 1102 1103 static inline int mmc_davinci_cpufreq_register(struct mmc_davinci_host *host) 1104 { 1105 host->freq_transition.notifier_call = mmc_davinci_cpufreq_transition; 1106 1107 return cpufreq_register_notifier(&host->freq_transition, 1108 CPUFREQ_TRANSITION_NOTIFIER); 1109 } 1110 1111 static inline void mmc_davinci_cpufreq_deregister(struct mmc_davinci_host *host) 1112 { 1113 cpufreq_unregister_notifier(&host->freq_transition, 1114 CPUFREQ_TRANSITION_NOTIFIER); 1115 } 1116 #else 1117 static inline int mmc_davinci_cpufreq_register(struct mmc_davinci_host *host) 1118 { 1119 return 0; 1120 } 1121 1122 static inline void mmc_davinci_cpufreq_deregister(struct mmc_davinci_host *host) 1123 { 1124 } 1125 #endif 1126 static void __init init_mmcsd_host(struct mmc_davinci_host *host) 1127 { 1128 1129 mmc_davinci_reset_ctrl(host, 1); 1130 1131 writel(0, host->base + DAVINCI_MMCCLK); 1132 writel(MMCCLK_CLKEN, host->base + DAVINCI_MMCCLK); 1133 1134 writel(0x1FFF, host->base + DAVINCI_MMCTOR); 1135 writel(0xFFFF, host->base + DAVINCI_MMCTOD); 1136 1137 mmc_davinci_reset_ctrl(host, 0); 1138 } 1139 1140 static const struct platform_device_id davinci_mmc_devtype[] = { 1141 { 1142 .name = "dm6441-mmc", 1143 .driver_data = MMC_CTLR_VERSION_1, 1144 }, { 1145 .name = "da830-mmc", 1146 .driver_data = MMC_CTLR_VERSION_2, 1147 }, 1148 {}, 1149 }; 1150 MODULE_DEVICE_TABLE(platform, davinci_mmc_devtype); 1151 1152 static const struct of_device_id davinci_mmc_dt_ids[] = { 1153 { 1154 .compatible = "ti,dm6441-mmc", 1155 .data = &davinci_mmc_devtype[MMC_CTLR_VERSION_1], 1156 }, 1157 { 1158 .compatible = "ti,da830-mmc", 1159 .data = &davinci_mmc_devtype[MMC_CTLR_VERSION_2], 1160 }, 1161 {}, 1162 }; 1163 MODULE_DEVICE_TABLE(of, davinci_mmc_dt_ids); 1164 1165 static int mmc_davinci_parse_pdata(struct mmc_host *mmc) 1166 { 1167 struct platform_device *pdev = to_platform_device(mmc->parent); 1168 struct davinci_mmc_config *pdata = pdev->dev.platform_data; 1169 struct mmc_davinci_host *host; 1170 int ret; 1171 1172 if (!pdata) 1173 return -EINVAL; 1174 1175 host = mmc_priv(mmc); 1176 if (!host) 1177 return -EINVAL; 1178 1179 if (pdata && pdata->nr_sg) 1180 host->nr_sg = pdata->nr_sg - 1; 1181 1182 if (pdata && (pdata->wires == 4 || pdata->wires == 0)) 1183 mmc->caps |= MMC_CAP_4_BIT_DATA; 1184 1185 if (pdata && (pdata->wires == 8)) 1186 mmc->caps |= (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA); 1187 1188 mmc->f_min = 312500; 1189 mmc->f_max = 25000000; 1190 if (pdata && pdata->max_freq) 1191 mmc->f_max = pdata->max_freq; 1192 if (pdata && pdata->caps) 1193 mmc->caps |= pdata->caps; 1194 1195 /* Register a cd gpio, if there is not one, enable polling */ 1196 ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0, NULL); 1197 if (ret == -EPROBE_DEFER) 1198 return ret; 1199 else if (ret) 1200 mmc->caps |= MMC_CAP_NEEDS_POLL; 1201 1202 ret = mmc_gpiod_request_ro(mmc, "wp", 0, false, 0, NULL); 1203 if (ret == -EPROBE_DEFER) 1204 return ret; 1205 1206 return 0; 1207 } 1208 1209 static int __init davinci_mmcsd_probe(struct platform_device *pdev) 1210 { 1211 const struct of_device_id *match; 1212 struct mmc_davinci_host *host = NULL; 1213 struct mmc_host *mmc = NULL; 1214 struct resource *r, *mem = NULL; 1215 int ret, irq; 1216 size_t mem_size; 1217 const struct platform_device_id *id_entry; 1218 1219 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1220 if (!r) 1221 return -ENODEV; 1222 irq = platform_get_irq(pdev, 0); 1223 if (irq < 0) 1224 return irq; 1225 1226 mem_size = resource_size(r); 1227 mem = devm_request_mem_region(&pdev->dev, r->start, mem_size, 1228 pdev->name); 1229 if (!mem) 1230 return -EBUSY; 1231 1232 mmc = mmc_alloc_host(sizeof(struct mmc_davinci_host), &pdev->dev); 1233 if (!mmc) 1234 return -ENOMEM; 1235 1236 host = mmc_priv(mmc); 1237 host->mmc = mmc; /* Important */ 1238 1239 host->mem_res = mem; 1240 host->base = devm_ioremap(&pdev->dev, mem->start, mem_size); 1241 if (!host->base) { 1242 ret = -ENOMEM; 1243 goto ioremap_fail; 1244 } 1245 1246 host->clk = devm_clk_get(&pdev->dev, NULL); 1247 if (IS_ERR(host->clk)) { 1248 ret = PTR_ERR(host->clk); 1249 goto clk_get_fail; 1250 } 1251 ret = clk_prepare_enable(host->clk); 1252 if (ret) 1253 goto clk_prepare_enable_fail; 1254 1255 host->mmc_input_clk = clk_get_rate(host->clk); 1256 1257 match = of_match_device(davinci_mmc_dt_ids, &pdev->dev); 1258 if (match) { 1259 pdev->id_entry = match->data; 1260 ret = mmc_of_parse(mmc); 1261 if (ret) { 1262 dev_err(&pdev->dev, 1263 "could not parse of data: %d\n", ret); 1264 goto parse_fail; 1265 } 1266 } else { 1267 ret = mmc_davinci_parse_pdata(mmc); 1268 if (ret) { 1269 dev_err(&pdev->dev, 1270 "could not parse platform data: %d\n", ret); 1271 goto parse_fail; 1272 } } 1273 1274 if (host->nr_sg > MAX_NR_SG || !host->nr_sg) 1275 host->nr_sg = MAX_NR_SG; 1276 1277 init_mmcsd_host(host); 1278 1279 host->use_dma = use_dma; 1280 host->mmc_irq = irq; 1281 host->sdio_irq = platform_get_irq(pdev, 1); 1282 1283 if (host->use_dma) { 1284 ret = davinci_acquire_dma_channels(host); 1285 if (ret == -EPROBE_DEFER) 1286 goto dma_probe_defer; 1287 else if (ret) 1288 host->use_dma = 0; 1289 } 1290 1291 mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY; 1292 1293 id_entry = platform_get_device_id(pdev); 1294 if (id_entry) 1295 host->version = id_entry->driver_data; 1296 1297 mmc->ops = &mmc_davinci_ops; 1298 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; 1299 1300 /* With no iommu coalescing pages, each phys_seg is a hw_seg. 1301 * Each hw_seg uses one EDMA parameter RAM slot, always one 1302 * channel and then usually some linked slots. 1303 */ 1304 mmc->max_segs = MAX_NR_SG; 1305 1306 /* EDMA limit per hw segment (one or two MBytes) */ 1307 mmc->max_seg_size = MAX_CCNT * rw_threshold; 1308 1309 /* MMC/SD controller limits for multiblock requests */ 1310 mmc->max_blk_size = 4095; /* BLEN is 12 bits */ 1311 mmc->max_blk_count = 65535; /* NBLK is 16 bits */ 1312 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; 1313 1314 dev_dbg(mmc_dev(host->mmc), "max_segs=%d\n", mmc->max_segs); 1315 dev_dbg(mmc_dev(host->mmc), "max_blk_size=%d\n", mmc->max_blk_size); 1316 dev_dbg(mmc_dev(host->mmc), "max_req_size=%d\n", mmc->max_req_size); 1317 dev_dbg(mmc_dev(host->mmc), "max_seg_size=%d\n", mmc->max_seg_size); 1318 1319 platform_set_drvdata(pdev, host); 1320 1321 ret = mmc_davinci_cpufreq_register(host); 1322 if (ret) { 1323 dev_err(&pdev->dev, "failed to register cpufreq\n"); 1324 goto cpu_freq_fail; 1325 } 1326 1327 ret = mmc_add_host(mmc); 1328 if (ret < 0) 1329 goto mmc_add_host_fail; 1330 1331 ret = devm_request_irq(&pdev->dev, irq, mmc_davinci_irq, 0, 1332 mmc_hostname(mmc), host); 1333 if (ret) 1334 goto request_irq_fail; 1335 1336 if (host->sdio_irq >= 0) { 1337 ret = devm_request_irq(&pdev->dev, host->sdio_irq, 1338 mmc_davinci_sdio_irq, 0, 1339 mmc_hostname(mmc), host); 1340 if (!ret) 1341 mmc->caps |= MMC_CAP_SDIO_IRQ; 1342 } 1343 1344 rename_region(mem, mmc_hostname(mmc)); 1345 1346 dev_info(mmc_dev(host->mmc), "Using %s, %d-bit mode\n", 1347 host->use_dma ? "DMA" : "PIO", 1348 (mmc->caps & MMC_CAP_4_BIT_DATA) ? 4 : 1); 1349 1350 return 0; 1351 1352 request_irq_fail: 1353 mmc_remove_host(mmc); 1354 mmc_add_host_fail: 1355 mmc_davinci_cpufreq_deregister(host); 1356 cpu_freq_fail: 1357 davinci_release_dma_channels(host); 1358 parse_fail: 1359 dma_probe_defer: 1360 clk_disable_unprepare(host->clk); 1361 clk_prepare_enable_fail: 1362 clk_get_fail: 1363 ioremap_fail: 1364 mmc_free_host(mmc); 1365 1366 return ret; 1367 } 1368 1369 static int __exit davinci_mmcsd_remove(struct platform_device *pdev) 1370 { 1371 struct mmc_davinci_host *host = platform_get_drvdata(pdev); 1372 1373 mmc_remove_host(host->mmc); 1374 mmc_davinci_cpufreq_deregister(host); 1375 davinci_release_dma_channels(host); 1376 clk_disable_unprepare(host->clk); 1377 mmc_free_host(host->mmc); 1378 1379 return 0; 1380 } 1381 1382 #ifdef CONFIG_PM 1383 static int davinci_mmcsd_suspend(struct device *dev) 1384 { 1385 struct platform_device *pdev = to_platform_device(dev); 1386 struct mmc_davinci_host *host = platform_get_drvdata(pdev); 1387 1388 writel(0, host->base + DAVINCI_MMCIM); 1389 mmc_davinci_reset_ctrl(host, 1); 1390 clk_disable(host->clk); 1391 1392 return 0; 1393 } 1394 1395 static int davinci_mmcsd_resume(struct device *dev) 1396 { 1397 struct platform_device *pdev = to_platform_device(dev); 1398 struct mmc_davinci_host *host = platform_get_drvdata(pdev); 1399 1400 clk_enable(host->clk); 1401 mmc_davinci_reset_ctrl(host, 0); 1402 1403 return 0; 1404 } 1405 1406 static const struct dev_pm_ops davinci_mmcsd_pm = { 1407 .suspend = davinci_mmcsd_suspend, 1408 .resume = davinci_mmcsd_resume, 1409 }; 1410 1411 #define davinci_mmcsd_pm_ops (&davinci_mmcsd_pm) 1412 #else 1413 #define davinci_mmcsd_pm_ops NULL 1414 #endif 1415 1416 static struct platform_driver davinci_mmcsd_driver = { 1417 .driver = { 1418 .name = "davinci_mmc", 1419 .pm = davinci_mmcsd_pm_ops, 1420 .of_match_table = davinci_mmc_dt_ids, 1421 }, 1422 .remove = __exit_p(davinci_mmcsd_remove), 1423 .id_table = davinci_mmc_devtype, 1424 }; 1425 1426 module_platform_driver_probe(davinci_mmcsd_driver, davinci_mmcsd_probe); 1427 1428 MODULE_AUTHOR("Texas Instruments India"); 1429 MODULE_LICENSE("GPL"); 1430 MODULE_DESCRIPTION("MMC/SD driver for Davinci MMC controller"); 1431 MODULE_ALIAS("platform:davinci_mmc"); 1432 1433