1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * davinci_mmc.c - TI DaVinci MMC/SD/SDIO driver 4 * 5 * Copyright (C) 2006 Texas Instruments. 6 * Original author: Purushotam Kumar 7 * Copyright (C) 2009 David Brownell 8 */ 9 10 #include <linux/module.h> 11 #include <linux/ioport.h> 12 #include <linux/platform_device.h> 13 #include <linux/clk.h> 14 #include <linux/err.h> 15 #include <linux/cpufreq.h> 16 #include <linux/mmc/host.h> 17 #include <linux/io.h> 18 #include <linux/irq.h> 19 #include <linux/delay.h> 20 #include <linux/dmaengine.h> 21 #include <linux/dma-mapping.h> 22 #include <linux/mmc/mmc.h> 23 #include <linux/of.h> 24 #include <linux/mmc/slot-gpio.h> 25 #include <linux/interrupt.h> 26 27 #include <linux/platform_data/mmc-davinci.h> 28 29 /* 30 * Register Definitions 31 */ 32 #define DAVINCI_MMCCTL 0x00 /* Control Register */ 33 #define DAVINCI_MMCCLK 0x04 /* Memory Clock Control Register */ 34 #define DAVINCI_MMCST0 0x08 /* Status Register 0 */ 35 #define DAVINCI_MMCST1 0x0C /* Status Register 1 */ 36 #define DAVINCI_MMCIM 0x10 /* Interrupt Mask Register */ 37 #define DAVINCI_MMCTOR 0x14 /* Response Time-Out Register */ 38 #define DAVINCI_MMCTOD 0x18 /* Data Read Time-Out Register */ 39 #define DAVINCI_MMCBLEN 0x1C /* Block Length Register */ 40 #define DAVINCI_MMCNBLK 0x20 /* Number of Blocks Register */ 41 #define DAVINCI_MMCNBLC 0x24 /* Number of Blocks Counter Register */ 42 #define DAVINCI_MMCDRR 0x28 /* Data Receive Register */ 43 #define DAVINCI_MMCDXR 0x2C /* Data Transmit Register */ 44 #define DAVINCI_MMCCMD 0x30 /* Command Register */ 45 #define DAVINCI_MMCARGHL 0x34 /* Argument Register */ 46 #define DAVINCI_MMCRSP01 0x38 /* Response Register 0 and 1 */ 47 #define DAVINCI_MMCRSP23 0x3C /* Response Register 0 and 1 */ 48 #define DAVINCI_MMCRSP45 0x40 /* Response Register 0 and 1 */ 49 #define DAVINCI_MMCRSP67 0x44 /* Response Register 0 and 1 */ 50 #define DAVINCI_MMCDRSP 0x48 /* Data Response Register */ 51 #define DAVINCI_MMCETOK 0x4C 52 #define DAVINCI_MMCCIDX 0x50 /* Command Index Register */ 53 #define DAVINCI_MMCCKC 0x54 54 #define DAVINCI_MMCTORC 0x58 55 #define DAVINCI_MMCTODC 0x5C 56 #define DAVINCI_MMCBLNC 0x60 57 #define DAVINCI_SDIOCTL 0x64 58 #define DAVINCI_SDIOST0 0x68 59 #define DAVINCI_SDIOIEN 0x6C 60 #define DAVINCI_SDIOIST 0x70 61 #define DAVINCI_MMCFIFOCTL 0x74 /* FIFO Control Register */ 62 63 /* DAVINCI_MMCCTL definitions */ 64 #define MMCCTL_DATRST (1 << 0) 65 #define MMCCTL_CMDRST (1 << 1) 66 #define MMCCTL_WIDTH_8_BIT (1 << 8) 67 #define MMCCTL_WIDTH_4_BIT (1 << 2) 68 #define MMCCTL_DATEG_DISABLED (0 << 6) 69 #define MMCCTL_DATEG_RISING (1 << 6) 70 #define MMCCTL_DATEG_FALLING (2 << 6) 71 #define MMCCTL_DATEG_BOTH (3 << 6) 72 #define MMCCTL_PERMDR_LE (0 << 9) 73 #define MMCCTL_PERMDR_BE (1 << 9) 74 #define MMCCTL_PERMDX_LE (0 << 10) 75 #define MMCCTL_PERMDX_BE (1 << 10) 76 77 /* DAVINCI_MMCCLK definitions */ 78 #define MMCCLK_CLKEN (1 << 8) 79 #define MMCCLK_CLKRT_MASK (0xFF << 0) 80 81 /* IRQ bit definitions, for DAVINCI_MMCST0 and DAVINCI_MMCIM */ 82 #define MMCST0_DATDNE BIT(0) /* data done */ 83 #define MMCST0_BSYDNE BIT(1) /* busy done */ 84 #define MMCST0_RSPDNE BIT(2) /* command done */ 85 #define MMCST0_TOUTRD BIT(3) /* data read timeout */ 86 #define MMCST0_TOUTRS BIT(4) /* command response timeout */ 87 #define MMCST0_CRCWR BIT(5) /* data write CRC error */ 88 #define MMCST0_CRCRD BIT(6) /* data read CRC error */ 89 #define MMCST0_CRCRS BIT(7) /* command response CRC error */ 90 #define MMCST0_DXRDY BIT(9) /* data transmit ready (fifo empty) */ 91 #define MMCST0_DRRDY BIT(10) /* data receive ready (data in fifo)*/ 92 #define MMCST0_DATED BIT(11) /* DAT3 edge detect */ 93 #define MMCST0_TRNDNE BIT(12) /* transfer done */ 94 95 /* DAVINCI_MMCST1 definitions */ 96 #define MMCST1_BUSY (1 << 0) 97 98 /* DAVINCI_MMCCMD definitions */ 99 #define MMCCMD_CMD_MASK (0x3F << 0) 100 #define MMCCMD_PPLEN (1 << 7) 101 #define MMCCMD_BSYEXP (1 << 8) 102 #define MMCCMD_RSPFMT_MASK (3 << 9) 103 #define MMCCMD_RSPFMT_NONE (0 << 9) 104 #define MMCCMD_RSPFMT_R1456 (1 << 9) 105 #define MMCCMD_RSPFMT_R2 (2 << 9) 106 #define MMCCMD_RSPFMT_R3 (3 << 9) 107 #define MMCCMD_DTRW (1 << 11) 108 #define MMCCMD_STRMTP (1 << 12) 109 #define MMCCMD_WDATX (1 << 13) 110 #define MMCCMD_INITCK (1 << 14) 111 #define MMCCMD_DCLR (1 << 15) 112 #define MMCCMD_DMATRIG (1 << 16) 113 114 /* DAVINCI_MMCFIFOCTL definitions */ 115 #define MMCFIFOCTL_FIFORST (1 << 0) 116 #define MMCFIFOCTL_FIFODIR_WR (1 << 1) 117 #define MMCFIFOCTL_FIFODIR_RD (0 << 1) 118 #define MMCFIFOCTL_FIFOLEV (1 << 2) /* 0 = 128 bits, 1 = 256 bits */ 119 #define MMCFIFOCTL_ACCWD_4 (0 << 3) /* access width of 4 bytes */ 120 #define MMCFIFOCTL_ACCWD_3 (1 << 3) /* access width of 3 bytes */ 121 #define MMCFIFOCTL_ACCWD_2 (2 << 3) /* access width of 2 bytes */ 122 #define MMCFIFOCTL_ACCWD_1 (3 << 3) /* access width of 1 byte */ 123 124 /* DAVINCI_SDIOST0 definitions */ 125 #define SDIOST0_DAT1_HI BIT(0) 126 127 /* DAVINCI_SDIOIEN definitions */ 128 #define SDIOIEN_IOINTEN BIT(0) 129 130 /* DAVINCI_SDIOIST definitions */ 131 #define SDIOIST_IOINT BIT(0) 132 133 /* MMCSD Init clock in Hz in opendrain mode */ 134 #define MMCSD_INIT_CLOCK 200000 135 136 /* 137 * One scatterlist dma "segment" is at most MAX_CCNT rw_threshold units, 138 * and we handle up to MAX_NR_SG segments. MMC_BLOCK_BOUNCE kicks in only 139 * for drivers with max_segs == 1, making the segments bigger (64KB) 140 * than the page or two that's otherwise typical. nr_sg (passed from 141 * platform data) == 16 gives at least the same throughput boost, using 142 * EDMA transfer linkage instead of spending CPU time copying pages. 143 */ 144 #define MAX_CCNT ((1 << 16) - 1) 145 146 #define MAX_NR_SG 16 147 148 static unsigned rw_threshold = 32; 149 module_param(rw_threshold, uint, S_IRUGO); 150 MODULE_PARM_DESC(rw_threshold, 151 "Read/Write threshold. Default = 32"); 152 153 static unsigned poll_threshold = 128; 154 module_param(poll_threshold, uint, S_IRUGO); 155 MODULE_PARM_DESC(poll_threshold, 156 "Polling transaction size threshold. Default = 128"); 157 158 static unsigned poll_loopcount = 32; 159 module_param(poll_loopcount, uint, S_IRUGO); 160 MODULE_PARM_DESC(poll_loopcount, 161 "Maximum polling loop count. Default = 32"); 162 163 static unsigned use_dma = 1; 164 module_param(use_dma, uint, 0); 165 MODULE_PARM_DESC(use_dma, "Whether to use DMA or not. Default = 1"); 166 167 struct mmc_davinci_host { 168 struct mmc_command *cmd; 169 struct mmc_data *data; 170 struct mmc_host *mmc; 171 struct clk *clk; 172 unsigned int mmc_input_clk; 173 void __iomem *base; 174 struct resource *mem_res; 175 int mmc_irq, sdio_irq; 176 unsigned char bus_mode; 177 178 #define DAVINCI_MMC_DATADIR_NONE 0 179 #define DAVINCI_MMC_DATADIR_READ 1 180 #define DAVINCI_MMC_DATADIR_WRITE 2 181 unsigned char data_dir; 182 183 u32 bytes_left; 184 185 struct dma_chan *dma_tx; 186 struct dma_chan *dma_rx; 187 bool use_dma; 188 bool do_dma; 189 bool sdio_int; 190 bool active_request; 191 192 /* For PIO we walk scatterlists one segment at a time. */ 193 struct sg_mapping_iter sg_miter; 194 unsigned int sg_len; 195 196 /* Version of the MMC/SD controller */ 197 u8 version; 198 /* for ns in one cycle calculation */ 199 unsigned ns_in_one_cycle; 200 /* Number of sg segments */ 201 u8 nr_sg; 202 #ifdef CONFIG_CPU_FREQ 203 struct notifier_block freq_transition; 204 #endif 205 }; 206 207 static irqreturn_t mmc_davinci_irq(int irq, void *dev_id); 208 209 /* PIO only */ 210 static void davinci_fifo_data_trans(struct mmc_davinci_host *host, 211 unsigned int n) 212 { 213 struct sg_mapping_iter *sgm = &host->sg_miter; 214 u8 *p; 215 unsigned int i; 216 217 /* 218 * By adjusting sgm->consumed this will give a pointer to the 219 * current index into the sgm. 220 */ 221 if (!sg_miter_next(sgm)) { 222 dev_err(mmc_dev(host->mmc), "ran out of sglist prematurely\n"); 223 return; 224 } 225 p = sgm->addr; 226 227 if (n > sgm->length) 228 n = sgm->length; 229 230 /* NOTE: we never transfer more than rw_threshold bytes 231 * to/from the fifo here; there's no I/O overlap. 232 * This also assumes that access width( i.e. ACCWD) is 4 bytes 233 */ 234 if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) { 235 for (i = 0; i < (n >> 2); i++) { 236 writel(*((u32 *)p), host->base + DAVINCI_MMCDXR); 237 p = p + 4; 238 } 239 if (n & 3) { 240 iowrite8_rep(host->base + DAVINCI_MMCDXR, p, (n & 3)); 241 p = p + (n & 3); 242 } 243 } else { 244 for (i = 0; i < (n >> 2); i++) { 245 *((u32 *)p) = readl(host->base + DAVINCI_MMCDRR); 246 p = p + 4; 247 } 248 if (n & 3) { 249 ioread8_rep(host->base + DAVINCI_MMCDRR, p, (n & 3)); 250 p = p + (n & 3); 251 } 252 } 253 254 sgm->consumed = n; 255 host->bytes_left -= n; 256 } 257 258 static void mmc_davinci_start_command(struct mmc_davinci_host *host, 259 struct mmc_command *cmd) 260 { 261 u32 cmd_reg = 0; 262 u32 im_val; 263 264 dev_dbg(mmc_dev(host->mmc), "CMD%d, arg 0x%08x%s\n", 265 cmd->opcode, cmd->arg, 266 ({ char *s; 267 switch (mmc_resp_type(cmd)) { 268 case MMC_RSP_R1: 269 s = ", R1/R5/R6/R7 response"; 270 break; 271 case MMC_RSP_R1B: 272 s = ", R1b response"; 273 break; 274 case MMC_RSP_R2: 275 s = ", R2 response"; 276 break; 277 case MMC_RSP_R3: 278 s = ", R3/R4 response"; 279 break; 280 default: 281 s = ", (R? response)"; 282 break; 283 } s; })); 284 host->cmd = cmd; 285 286 switch (mmc_resp_type(cmd)) { 287 case MMC_RSP_R1B: 288 /* There's some spec confusion about when R1B is 289 * allowed, but if the card doesn't issue a BUSY 290 * then it's harmless for us to allow it. 291 */ 292 cmd_reg |= MMCCMD_BSYEXP; 293 fallthrough; 294 case MMC_RSP_R1: /* 48 bits, CRC */ 295 cmd_reg |= MMCCMD_RSPFMT_R1456; 296 break; 297 case MMC_RSP_R2: /* 136 bits, CRC */ 298 cmd_reg |= MMCCMD_RSPFMT_R2; 299 break; 300 case MMC_RSP_R3: /* 48 bits, no CRC */ 301 cmd_reg |= MMCCMD_RSPFMT_R3; 302 break; 303 default: 304 cmd_reg |= MMCCMD_RSPFMT_NONE; 305 dev_dbg(mmc_dev(host->mmc), "unknown resp_type %04x\n", 306 mmc_resp_type(cmd)); 307 break; 308 } 309 310 /* Set command index */ 311 cmd_reg |= cmd->opcode; 312 313 /* Enable EDMA transfer triggers */ 314 if (host->do_dma) 315 cmd_reg |= MMCCMD_DMATRIG; 316 317 if (host->version == MMC_CTLR_VERSION_2 && host->data != NULL && 318 host->data_dir == DAVINCI_MMC_DATADIR_READ) 319 cmd_reg |= MMCCMD_DMATRIG; 320 321 /* Setting whether command involves data transfer or not */ 322 if (cmd->data) 323 cmd_reg |= MMCCMD_WDATX; 324 325 /* Setting whether data read or write */ 326 if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) 327 cmd_reg |= MMCCMD_DTRW; 328 329 if (host->bus_mode == MMC_BUSMODE_PUSHPULL) 330 cmd_reg |= MMCCMD_PPLEN; 331 332 /* set Command timeout */ 333 writel(0x1FFF, host->base + DAVINCI_MMCTOR); 334 335 /* Enable interrupt (calculate here, defer until FIFO is stuffed). */ 336 im_val = MMCST0_RSPDNE | MMCST0_CRCRS | MMCST0_TOUTRS; 337 if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) { 338 im_val |= MMCST0_DATDNE | MMCST0_CRCWR; 339 340 if (!host->do_dma) 341 im_val |= MMCST0_DXRDY; 342 } else if (host->data_dir == DAVINCI_MMC_DATADIR_READ) { 343 im_val |= MMCST0_DATDNE | MMCST0_CRCRD | MMCST0_TOUTRD; 344 345 if (!host->do_dma) 346 im_val |= MMCST0_DRRDY; 347 } 348 349 /* 350 * Before non-DMA WRITE commands the controller needs priming: 351 * FIFO should be populated with 32 bytes i.e. whatever is the FIFO size 352 */ 353 if (!host->do_dma && (host->data_dir == DAVINCI_MMC_DATADIR_WRITE)) 354 davinci_fifo_data_trans(host, rw_threshold); 355 356 writel(cmd->arg, host->base + DAVINCI_MMCARGHL); 357 writel(cmd_reg, host->base + DAVINCI_MMCCMD); 358 359 host->active_request = true; 360 361 if (!host->do_dma && host->bytes_left <= poll_threshold) { 362 u32 count = poll_loopcount; 363 364 while (host->active_request && count--) { 365 mmc_davinci_irq(0, host); 366 cpu_relax(); 367 } 368 } 369 370 if (host->active_request) 371 writel(im_val, host->base + DAVINCI_MMCIM); 372 } 373 374 /*----------------------------------------------------------------------*/ 375 376 /* DMA infrastructure */ 377 378 static void davinci_abort_dma(struct mmc_davinci_host *host) 379 { 380 struct dma_chan *sync_dev; 381 382 if (host->data_dir == DAVINCI_MMC_DATADIR_READ) 383 sync_dev = host->dma_rx; 384 else 385 sync_dev = host->dma_tx; 386 387 dmaengine_terminate_all(sync_dev); 388 } 389 390 static int mmc_davinci_send_dma_request(struct mmc_davinci_host *host, 391 struct mmc_data *data) 392 { 393 struct dma_chan *chan; 394 struct dma_async_tx_descriptor *desc; 395 int ret = 0; 396 397 if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) { 398 struct dma_slave_config dma_tx_conf = { 399 .direction = DMA_MEM_TO_DEV, 400 .dst_addr = host->mem_res->start + DAVINCI_MMCDXR, 401 .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, 402 .dst_maxburst = 403 rw_threshold / DMA_SLAVE_BUSWIDTH_4_BYTES, 404 }; 405 chan = host->dma_tx; 406 dmaengine_slave_config(host->dma_tx, &dma_tx_conf); 407 408 desc = dmaengine_prep_slave_sg(host->dma_tx, 409 data->sg, 410 host->sg_len, 411 DMA_MEM_TO_DEV, 412 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 413 if (!desc) { 414 dev_dbg(mmc_dev(host->mmc), 415 "failed to allocate DMA TX descriptor"); 416 ret = -1; 417 goto out; 418 } 419 } else { 420 struct dma_slave_config dma_rx_conf = { 421 .direction = DMA_DEV_TO_MEM, 422 .src_addr = host->mem_res->start + DAVINCI_MMCDRR, 423 .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, 424 .src_maxburst = 425 rw_threshold / DMA_SLAVE_BUSWIDTH_4_BYTES, 426 }; 427 chan = host->dma_rx; 428 dmaengine_slave_config(host->dma_rx, &dma_rx_conf); 429 430 desc = dmaengine_prep_slave_sg(host->dma_rx, 431 data->sg, 432 host->sg_len, 433 DMA_DEV_TO_MEM, 434 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 435 if (!desc) { 436 dev_dbg(mmc_dev(host->mmc), 437 "failed to allocate DMA RX descriptor"); 438 ret = -1; 439 goto out; 440 } 441 } 442 443 dmaengine_submit(desc); 444 dma_async_issue_pending(chan); 445 446 out: 447 return ret; 448 } 449 450 static int mmc_davinci_start_dma_transfer(struct mmc_davinci_host *host, 451 struct mmc_data *data) 452 { 453 int i; 454 int mask = rw_threshold - 1; 455 int ret = 0; 456 457 host->sg_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, 458 mmc_get_dma_dir(data)); 459 460 /* no individual DMA segment should need a partial FIFO */ 461 for (i = 0; i < host->sg_len; i++) { 462 if (sg_dma_len(data->sg + i) & mask) { 463 dma_unmap_sg(mmc_dev(host->mmc), 464 data->sg, data->sg_len, 465 mmc_get_dma_dir(data)); 466 return -1; 467 } 468 } 469 470 host->do_dma = 1; 471 ret = mmc_davinci_send_dma_request(host, data); 472 473 return ret; 474 } 475 476 static void davinci_release_dma_channels(struct mmc_davinci_host *host) 477 { 478 if (!host->use_dma) 479 return; 480 481 dma_release_channel(host->dma_tx); 482 dma_release_channel(host->dma_rx); 483 } 484 485 static int davinci_acquire_dma_channels(struct mmc_davinci_host *host) 486 { 487 host->dma_tx = dma_request_chan(mmc_dev(host->mmc), "tx"); 488 if (IS_ERR(host->dma_tx)) { 489 dev_err(mmc_dev(host->mmc), "Can't get dma_tx channel\n"); 490 return PTR_ERR(host->dma_tx); 491 } 492 493 host->dma_rx = dma_request_chan(mmc_dev(host->mmc), "rx"); 494 if (IS_ERR(host->dma_rx)) { 495 dev_err(mmc_dev(host->mmc), "Can't get dma_rx channel\n"); 496 dma_release_channel(host->dma_tx); 497 return PTR_ERR(host->dma_rx); 498 } 499 500 return 0; 501 } 502 503 /*----------------------------------------------------------------------*/ 504 505 static void 506 mmc_davinci_prepare_data(struct mmc_davinci_host *host, struct mmc_request *req) 507 { 508 int fifo_lev = (rw_threshold == 32) ? MMCFIFOCTL_FIFOLEV : 0; 509 int timeout; 510 struct mmc_data *data = req->data; 511 unsigned int flags = SG_MITER_ATOMIC; /* Used from IRQ */ 512 513 if (host->version == MMC_CTLR_VERSION_2) 514 fifo_lev = (rw_threshold == 64) ? MMCFIFOCTL_FIFOLEV : 0; 515 516 host->data = data; 517 if (data == NULL) { 518 host->data_dir = DAVINCI_MMC_DATADIR_NONE; 519 writel(0, host->base + DAVINCI_MMCBLEN); 520 writel(0, host->base + DAVINCI_MMCNBLK); 521 return; 522 } 523 524 dev_dbg(mmc_dev(host->mmc), "%s, %d blocks of %d bytes\n", 525 (data->flags & MMC_DATA_WRITE) ? "write" : "read", 526 data->blocks, data->blksz); 527 dev_dbg(mmc_dev(host->mmc), " DTO %d cycles + %d ns\n", 528 data->timeout_clks, data->timeout_ns); 529 timeout = data->timeout_clks + 530 (data->timeout_ns / host->ns_in_one_cycle); 531 if (timeout > 0xffff) 532 timeout = 0xffff; 533 534 writel(timeout, host->base + DAVINCI_MMCTOD); 535 writel(data->blocks, host->base + DAVINCI_MMCNBLK); 536 writel(data->blksz, host->base + DAVINCI_MMCBLEN); 537 538 /* Configure the FIFO */ 539 if (data->flags & MMC_DATA_WRITE) { 540 flags |= SG_MITER_FROM_SG; 541 host->data_dir = DAVINCI_MMC_DATADIR_WRITE; 542 writel(fifo_lev | MMCFIFOCTL_FIFODIR_WR | MMCFIFOCTL_FIFORST, 543 host->base + DAVINCI_MMCFIFOCTL); 544 writel(fifo_lev | MMCFIFOCTL_FIFODIR_WR, 545 host->base + DAVINCI_MMCFIFOCTL); 546 } else { 547 flags |= SG_MITER_TO_SG; 548 host->data_dir = DAVINCI_MMC_DATADIR_READ; 549 writel(fifo_lev | MMCFIFOCTL_FIFODIR_RD | MMCFIFOCTL_FIFORST, 550 host->base + DAVINCI_MMCFIFOCTL); 551 writel(fifo_lev | MMCFIFOCTL_FIFODIR_RD, 552 host->base + DAVINCI_MMCFIFOCTL); 553 } 554 555 host->bytes_left = data->blocks * data->blksz; 556 557 /* For now we try to use DMA whenever we won't need partial FIFO 558 * reads or writes, either for the whole transfer (as tested here) 559 * or for any individual scatterlist segment (tested when we call 560 * start_dma_transfer). 561 * 562 * While we *could* change that, unusual block sizes are rarely 563 * used. The occasional fallback to PIO should't hurt. 564 */ 565 if (host->use_dma && (host->bytes_left & (rw_threshold - 1)) == 0 566 && mmc_davinci_start_dma_transfer(host, data) == 0) { 567 /* zero this to ensure we take no PIO paths */ 568 host->bytes_left = 0; 569 } else { 570 /* Revert to CPU Copy */ 571 host->sg_len = data->sg_len; 572 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags); 573 } 574 } 575 576 static void mmc_davinci_request(struct mmc_host *mmc, struct mmc_request *req) 577 { 578 struct mmc_davinci_host *host = mmc_priv(mmc); 579 unsigned long timeout = jiffies + msecs_to_jiffies(900); 580 u32 mmcst1 = 0; 581 582 /* Card may still be sending BUSY after a previous operation, 583 * typically some kind of write. If so, we can't proceed yet. 584 */ 585 while (time_before(jiffies, timeout)) { 586 mmcst1 = readl(host->base + DAVINCI_MMCST1); 587 if (!(mmcst1 & MMCST1_BUSY)) 588 break; 589 cpu_relax(); 590 } 591 if (mmcst1 & MMCST1_BUSY) { 592 dev_err(mmc_dev(host->mmc), "still BUSY? bad ... \n"); 593 req->cmd->error = -ETIMEDOUT; 594 mmc_request_done(mmc, req); 595 return; 596 } 597 598 host->do_dma = 0; 599 mmc_davinci_prepare_data(host, req); 600 mmc_davinci_start_command(host, req->cmd); 601 } 602 603 static unsigned int calculate_freq_for_card(struct mmc_davinci_host *host, 604 unsigned int mmc_req_freq) 605 { 606 unsigned int mmc_freq = 0, mmc_pclk = 0, mmc_push_pull_divisor = 0; 607 608 mmc_pclk = host->mmc_input_clk; 609 if (mmc_req_freq && mmc_pclk > (2 * mmc_req_freq)) 610 mmc_push_pull_divisor = ((unsigned int)mmc_pclk 611 / (2 * mmc_req_freq)) - 1; 612 else 613 mmc_push_pull_divisor = 0; 614 615 mmc_freq = (unsigned int)mmc_pclk 616 / (2 * (mmc_push_pull_divisor + 1)); 617 618 if (mmc_freq > mmc_req_freq) 619 mmc_push_pull_divisor = mmc_push_pull_divisor + 1; 620 /* Convert ns to clock cycles */ 621 if (mmc_req_freq <= 400000) 622 host->ns_in_one_cycle = (1000000) / (((mmc_pclk 623 / (2 * (mmc_push_pull_divisor + 1)))/1000)); 624 else 625 host->ns_in_one_cycle = (1000000) / (((mmc_pclk 626 / (2 * (mmc_push_pull_divisor + 1)))/1000000)); 627 628 return mmc_push_pull_divisor; 629 } 630 631 static void calculate_clk_divider(struct mmc_host *mmc, struct mmc_ios *ios) 632 { 633 unsigned int open_drain_freq = 0, mmc_pclk = 0; 634 unsigned int mmc_push_pull_freq = 0; 635 struct mmc_davinci_host *host = mmc_priv(mmc); 636 637 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) { 638 u32 temp; 639 640 /* Ignoring the init clock value passed for fixing the inter 641 * operability with different cards. 642 */ 643 open_drain_freq = ((unsigned int)mmc_pclk 644 / (2 * MMCSD_INIT_CLOCK)) - 1; 645 646 if (open_drain_freq > 0xFF) 647 open_drain_freq = 0xFF; 648 649 temp = readl(host->base + DAVINCI_MMCCLK) & ~MMCCLK_CLKRT_MASK; 650 temp |= open_drain_freq; 651 writel(temp, host->base + DAVINCI_MMCCLK); 652 653 /* Convert ns to clock cycles */ 654 host->ns_in_one_cycle = (1000000) / (MMCSD_INIT_CLOCK/1000); 655 } else { 656 u32 temp; 657 mmc_push_pull_freq = calculate_freq_for_card(host, ios->clock); 658 659 if (mmc_push_pull_freq > 0xFF) 660 mmc_push_pull_freq = 0xFF; 661 662 temp = readl(host->base + DAVINCI_MMCCLK) & ~MMCCLK_CLKEN; 663 writel(temp, host->base + DAVINCI_MMCCLK); 664 665 udelay(10); 666 667 temp = readl(host->base + DAVINCI_MMCCLK) & ~MMCCLK_CLKRT_MASK; 668 temp |= mmc_push_pull_freq; 669 writel(temp, host->base + DAVINCI_MMCCLK); 670 671 writel(temp | MMCCLK_CLKEN, host->base + DAVINCI_MMCCLK); 672 673 udelay(10); 674 } 675 } 676 677 static void mmc_davinci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 678 { 679 struct mmc_davinci_host *host = mmc_priv(mmc); 680 struct platform_device *pdev = to_platform_device(mmc->parent); 681 struct davinci_mmc_config *config = pdev->dev.platform_data; 682 683 dev_dbg(mmc_dev(host->mmc), 684 "clock %dHz busmode %d powermode %d Vdd %04x\n", 685 ios->clock, ios->bus_mode, ios->power_mode, 686 ios->vdd); 687 688 switch (ios->power_mode) { 689 case MMC_POWER_OFF: 690 if (config && config->set_power) 691 config->set_power(pdev->id, false); 692 break; 693 case MMC_POWER_UP: 694 if (config && config->set_power) 695 config->set_power(pdev->id, true); 696 break; 697 } 698 699 switch (ios->bus_width) { 700 case MMC_BUS_WIDTH_8: 701 dev_dbg(mmc_dev(host->mmc), "Enabling 8 bit mode\n"); 702 writel((readl(host->base + DAVINCI_MMCCTL) & 703 ~MMCCTL_WIDTH_4_BIT) | MMCCTL_WIDTH_8_BIT, 704 host->base + DAVINCI_MMCCTL); 705 break; 706 case MMC_BUS_WIDTH_4: 707 dev_dbg(mmc_dev(host->mmc), "Enabling 4 bit mode\n"); 708 if (host->version == MMC_CTLR_VERSION_2) 709 writel((readl(host->base + DAVINCI_MMCCTL) & 710 ~MMCCTL_WIDTH_8_BIT) | MMCCTL_WIDTH_4_BIT, 711 host->base + DAVINCI_MMCCTL); 712 else 713 writel(readl(host->base + DAVINCI_MMCCTL) | 714 MMCCTL_WIDTH_4_BIT, 715 host->base + DAVINCI_MMCCTL); 716 break; 717 case MMC_BUS_WIDTH_1: 718 dev_dbg(mmc_dev(host->mmc), "Enabling 1 bit mode\n"); 719 if (host->version == MMC_CTLR_VERSION_2) 720 writel(readl(host->base + DAVINCI_MMCCTL) & 721 ~(MMCCTL_WIDTH_8_BIT | MMCCTL_WIDTH_4_BIT), 722 host->base + DAVINCI_MMCCTL); 723 else 724 writel(readl(host->base + DAVINCI_MMCCTL) & 725 ~MMCCTL_WIDTH_4_BIT, 726 host->base + DAVINCI_MMCCTL); 727 break; 728 } 729 730 calculate_clk_divider(mmc, ios); 731 732 host->bus_mode = ios->bus_mode; 733 if (ios->power_mode == MMC_POWER_UP) { 734 unsigned long timeout = jiffies + msecs_to_jiffies(50); 735 bool lose = true; 736 737 /* Send clock cycles, poll completion */ 738 writel(0, host->base + DAVINCI_MMCARGHL); 739 writel(MMCCMD_INITCK, host->base + DAVINCI_MMCCMD); 740 while (time_before(jiffies, timeout)) { 741 u32 tmp = readl(host->base + DAVINCI_MMCST0); 742 743 if (tmp & MMCST0_RSPDNE) { 744 lose = false; 745 break; 746 } 747 cpu_relax(); 748 } 749 if (lose) 750 dev_warn(mmc_dev(host->mmc), "powerup timeout\n"); 751 } 752 753 /* FIXME on power OFF, reset things ... */ 754 } 755 756 static void 757 mmc_davinci_xfer_done(struct mmc_davinci_host *host, struct mmc_data *data) 758 { 759 host->data = NULL; 760 761 if (host->mmc->caps & MMC_CAP_SDIO_IRQ) { 762 /* 763 * SDIO Interrupt Detection work-around as suggested by 764 * Davinci Errata (TMS320DM355 Silicon Revision 1.1 Errata 765 * 2.1.6): Signal SDIO interrupt only if it is enabled by core 766 */ 767 if (host->sdio_int && !(readl(host->base + DAVINCI_SDIOST0) & 768 SDIOST0_DAT1_HI)) { 769 writel(SDIOIST_IOINT, host->base + DAVINCI_SDIOIST); 770 mmc_signal_sdio_irq(host->mmc); 771 } 772 } 773 774 if (host->do_dma) { 775 davinci_abort_dma(host); 776 777 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, 778 mmc_get_dma_dir(data)); 779 host->do_dma = false; 780 } 781 host->data_dir = DAVINCI_MMC_DATADIR_NONE; 782 783 if (!data->stop || (host->cmd && host->cmd->error)) { 784 mmc_request_done(host->mmc, data->mrq); 785 writel(0, host->base + DAVINCI_MMCIM); 786 host->active_request = false; 787 } else 788 mmc_davinci_start_command(host, data->stop); 789 } 790 791 static void mmc_davinci_cmd_done(struct mmc_davinci_host *host, 792 struct mmc_command *cmd) 793 { 794 host->cmd = NULL; 795 796 if (cmd->flags & MMC_RSP_PRESENT) { 797 if (cmd->flags & MMC_RSP_136) { 798 /* response type 2 */ 799 cmd->resp[3] = readl(host->base + DAVINCI_MMCRSP01); 800 cmd->resp[2] = readl(host->base + DAVINCI_MMCRSP23); 801 cmd->resp[1] = readl(host->base + DAVINCI_MMCRSP45); 802 cmd->resp[0] = readl(host->base + DAVINCI_MMCRSP67); 803 } else { 804 /* response types 1, 1b, 3, 4, 5, 6 */ 805 cmd->resp[0] = readl(host->base + DAVINCI_MMCRSP67); 806 } 807 } 808 809 if (host->data == NULL || cmd->error) { 810 if (cmd->error == -ETIMEDOUT) 811 cmd->mrq->cmd->retries = 0; 812 mmc_request_done(host->mmc, cmd->mrq); 813 writel(0, host->base + DAVINCI_MMCIM); 814 host->active_request = false; 815 } 816 } 817 818 static inline void mmc_davinci_reset_ctrl(struct mmc_davinci_host *host, 819 int val) 820 { 821 u32 temp; 822 823 temp = readl(host->base + DAVINCI_MMCCTL); 824 if (val) /* reset */ 825 temp |= MMCCTL_CMDRST | MMCCTL_DATRST; 826 else /* enable */ 827 temp &= ~(MMCCTL_CMDRST | MMCCTL_DATRST); 828 829 writel(temp, host->base + DAVINCI_MMCCTL); 830 udelay(10); 831 } 832 833 static void 834 davinci_abort_data(struct mmc_davinci_host *host, struct mmc_data *data) 835 { 836 mmc_davinci_reset_ctrl(host, 1); 837 mmc_davinci_reset_ctrl(host, 0); 838 if (!host->do_dma) 839 sg_miter_stop(&host->sg_miter); 840 } 841 842 static irqreturn_t mmc_davinci_sdio_irq(int irq, void *dev_id) 843 { 844 struct mmc_davinci_host *host = dev_id; 845 unsigned int status; 846 847 status = readl(host->base + DAVINCI_SDIOIST); 848 if (status & SDIOIST_IOINT) { 849 dev_dbg(mmc_dev(host->mmc), 850 "SDIO interrupt status %x\n", status); 851 writel(status | SDIOIST_IOINT, host->base + DAVINCI_SDIOIST); 852 mmc_signal_sdio_irq(host->mmc); 853 } 854 return IRQ_HANDLED; 855 } 856 857 static irqreturn_t mmc_davinci_irq(int irq, void *dev_id) 858 { 859 struct mmc_davinci_host *host = (struct mmc_davinci_host *)dev_id; 860 unsigned int status, qstatus; 861 int end_command = 0; 862 int end_transfer = 0; 863 struct mmc_data *data = host->data; 864 865 if (host->cmd == NULL && host->data == NULL) { 866 status = readl(host->base + DAVINCI_MMCST0); 867 dev_dbg(mmc_dev(host->mmc), 868 "Spurious interrupt 0x%04x\n", status); 869 /* Disable the interrupt from mmcsd */ 870 writel(0, host->base + DAVINCI_MMCIM); 871 return IRQ_NONE; 872 } 873 874 status = readl(host->base + DAVINCI_MMCST0); 875 qstatus = status; 876 877 /* handle FIFO first when using PIO for data. 878 * bytes_left will decrease to zero as I/O progress and status will 879 * read zero over iteration because this controller status 880 * register(MMCST0) reports any status only once and it is cleared 881 * by read. So, it is not unbouned loop even in the case of 882 * non-dma. 883 */ 884 if (host->bytes_left && (status & (MMCST0_DXRDY | MMCST0_DRRDY))) { 885 unsigned long im_val; 886 887 /* 888 * If interrupts fire during the following loop, they will be 889 * handled by the handler, but the PIC will still buffer these. 890 * As a result, the handler will be called again to serve these 891 * needlessly. In order to avoid these spurious interrupts, 892 * keep interrupts masked during the loop. 893 */ 894 im_val = readl(host->base + DAVINCI_MMCIM); 895 writel(0, host->base + DAVINCI_MMCIM); 896 897 do { 898 davinci_fifo_data_trans(host, rw_threshold); 899 status = readl(host->base + DAVINCI_MMCST0); 900 qstatus |= status; 901 } while (host->bytes_left && 902 (status & (MMCST0_DXRDY | MMCST0_DRRDY))); 903 904 /* 905 * If an interrupt is pending, it is assumed it will fire when 906 * it is unmasked. This assumption is also taken when the MMCIM 907 * is first set. Otherwise, writing to MMCIM after reading the 908 * status is race-prone. 909 */ 910 writel(im_val, host->base + DAVINCI_MMCIM); 911 } 912 913 if (qstatus & MMCST0_DATDNE) { 914 /* All blocks sent/received, and CRC checks passed */ 915 if (data != NULL) { 916 if (!host->do_dma) { 917 if (host->bytes_left > 0) 918 /* if datasize < rw_threshold 919 * no RX ints are generated 920 */ 921 davinci_fifo_data_trans(host, host->bytes_left); 922 sg_miter_stop(&host->sg_miter); 923 } 924 end_transfer = 1; 925 data->bytes_xfered = data->blocks * data->blksz; 926 } else { 927 dev_err(mmc_dev(host->mmc), 928 "DATDNE with no host->data\n"); 929 } 930 } 931 932 if (qstatus & MMCST0_TOUTRD) { 933 /* Read data timeout */ 934 data->error = -ETIMEDOUT; 935 end_transfer = 1; 936 937 dev_dbg(mmc_dev(host->mmc), 938 "read data timeout, status %x\n", 939 qstatus); 940 941 davinci_abort_data(host, data); 942 } 943 944 if (qstatus & (MMCST0_CRCWR | MMCST0_CRCRD)) { 945 /* Data CRC error */ 946 data->error = -EILSEQ; 947 end_transfer = 1; 948 949 /* NOTE: this controller uses CRCWR to report both CRC 950 * errors and timeouts (on writes). MMCDRSP values are 951 * only weakly documented, but 0x9f was clearly a timeout 952 * case and the two three-bit patterns in various SD specs 953 * (101, 010) aren't part of it ... 954 */ 955 if (qstatus & MMCST0_CRCWR) { 956 u32 temp = readb(host->base + DAVINCI_MMCDRSP); 957 958 if (temp == 0x9f) 959 data->error = -ETIMEDOUT; 960 } 961 dev_dbg(mmc_dev(host->mmc), "data %s %s error\n", 962 (qstatus & MMCST0_CRCWR) ? "write" : "read", 963 (data->error == -ETIMEDOUT) ? "timeout" : "CRC"); 964 965 davinci_abort_data(host, data); 966 } 967 968 if (qstatus & MMCST0_TOUTRS) { 969 /* Command timeout */ 970 if (host->cmd) { 971 dev_dbg(mmc_dev(host->mmc), 972 "CMD%d timeout, status %x\n", 973 host->cmd->opcode, qstatus); 974 host->cmd->error = -ETIMEDOUT; 975 if (data) { 976 end_transfer = 1; 977 davinci_abort_data(host, data); 978 } else 979 end_command = 1; 980 } 981 } 982 983 if (qstatus & MMCST0_CRCRS) { 984 /* Command CRC error */ 985 dev_dbg(mmc_dev(host->mmc), "Command CRC error\n"); 986 if (host->cmd) { 987 host->cmd->error = -EILSEQ; 988 end_command = 1; 989 } 990 } 991 992 if (qstatus & MMCST0_RSPDNE) { 993 /* End of command phase */ 994 end_command = host->cmd ? 1 : 0; 995 } 996 997 if (end_command) 998 mmc_davinci_cmd_done(host, host->cmd); 999 if (end_transfer) 1000 mmc_davinci_xfer_done(host, data); 1001 return IRQ_HANDLED; 1002 } 1003 1004 static int mmc_davinci_get_cd(struct mmc_host *mmc) 1005 { 1006 struct platform_device *pdev = to_platform_device(mmc->parent); 1007 struct davinci_mmc_config *config = pdev->dev.platform_data; 1008 1009 if (config && config->get_cd) 1010 return config->get_cd(pdev->id); 1011 1012 return mmc_gpio_get_cd(mmc); 1013 } 1014 1015 static int mmc_davinci_get_ro(struct mmc_host *mmc) 1016 { 1017 struct platform_device *pdev = to_platform_device(mmc->parent); 1018 struct davinci_mmc_config *config = pdev->dev.platform_data; 1019 1020 if (config && config->get_ro) 1021 return config->get_ro(pdev->id); 1022 1023 return mmc_gpio_get_ro(mmc); 1024 } 1025 1026 static void mmc_davinci_enable_sdio_irq(struct mmc_host *mmc, int enable) 1027 { 1028 struct mmc_davinci_host *host = mmc_priv(mmc); 1029 1030 if (enable) { 1031 if (!(readl(host->base + DAVINCI_SDIOST0) & SDIOST0_DAT1_HI)) { 1032 writel(SDIOIST_IOINT, host->base + DAVINCI_SDIOIST); 1033 mmc_signal_sdio_irq(host->mmc); 1034 } else { 1035 host->sdio_int = true; 1036 writel(readl(host->base + DAVINCI_SDIOIEN) | 1037 SDIOIEN_IOINTEN, host->base + DAVINCI_SDIOIEN); 1038 } 1039 } else { 1040 host->sdio_int = false; 1041 writel(readl(host->base + DAVINCI_SDIOIEN) & ~SDIOIEN_IOINTEN, 1042 host->base + DAVINCI_SDIOIEN); 1043 } 1044 } 1045 1046 static const struct mmc_host_ops mmc_davinci_ops = { 1047 .request = mmc_davinci_request, 1048 .set_ios = mmc_davinci_set_ios, 1049 .get_cd = mmc_davinci_get_cd, 1050 .get_ro = mmc_davinci_get_ro, 1051 .enable_sdio_irq = mmc_davinci_enable_sdio_irq, 1052 }; 1053 1054 /*----------------------------------------------------------------------*/ 1055 1056 #ifdef CONFIG_CPU_FREQ 1057 static int mmc_davinci_cpufreq_transition(struct notifier_block *nb, 1058 unsigned long val, void *data) 1059 { 1060 struct mmc_davinci_host *host; 1061 unsigned int mmc_pclk; 1062 struct mmc_host *mmc; 1063 unsigned long flags; 1064 1065 host = container_of(nb, struct mmc_davinci_host, freq_transition); 1066 mmc = host->mmc; 1067 mmc_pclk = clk_get_rate(host->clk); 1068 1069 if (val == CPUFREQ_POSTCHANGE) { 1070 spin_lock_irqsave(&mmc->lock, flags); 1071 host->mmc_input_clk = mmc_pclk; 1072 calculate_clk_divider(mmc, &mmc->ios); 1073 spin_unlock_irqrestore(&mmc->lock, flags); 1074 } 1075 1076 return 0; 1077 } 1078 1079 static inline int mmc_davinci_cpufreq_register(struct mmc_davinci_host *host) 1080 { 1081 host->freq_transition.notifier_call = mmc_davinci_cpufreq_transition; 1082 1083 return cpufreq_register_notifier(&host->freq_transition, 1084 CPUFREQ_TRANSITION_NOTIFIER); 1085 } 1086 1087 static inline void mmc_davinci_cpufreq_deregister(struct mmc_davinci_host *host) 1088 { 1089 cpufreq_unregister_notifier(&host->freq_transition, 1090 CPUFREQ_TRANSITION_NOTIFIER); 1091 } 1092 #else 1093 static inline int mmc_davinci_cpufreq_register(struct mmc_davinci_host *host) 1094 { 1095 return 0; 1096 } 1097 1098 static inline void mmc_davinci_cpufreq_deregister(struct mmc_davinci_host *host) 1099 { 1100 } 1101 #endif 1102 static void init_mmcsd_host(struct mmc_davinci_host *host) 1103 { 1104 1105 mmc_davinci_reset_ctrl(host, 1); 1106 1107 writel(0, host->base + DAVINCI_MMCCLK); 1108 writel(MMCCLK_CLKEN, host->base + DAVINCI_MMCCLK); 1109 1110 writel(0x1FFF, host->base + DAVINCI_MMCTOR); 1111 writel(0xFFFF, host->base + DAVINCI_MMCTOD); 1112 1113 mmc_davinci_reset_ctrl(host, 0); 1114 } 1115 1116 static const struct platform_device_id davinci_mmc_devtype[] = { 1117 { 1118 .name = "dm6441-mmc", 1119 .driver_data = MMC_CTLR_VERSION_1, 1120 }, { 1121 .name = "da830-mmc", 1122 .driver_data = MMC_CTLR_VERSION_2, 1123 }, 1124 {}, 1125 }; 1126 MODULE_DEVICE_TABLE(platform, davinci_mmc_devtype); 1127 1128 static const struct of_device_id davinci_mmc_dt_ids[] = { 1129 { 1130 .compatible = "ti,dm6441-mmc", 1131 .data = &davinci_mmc_devtype[MMC_CTLR_VERSION_1], 1132 }, 1133 { 1134 .compatible = "ti,da830-mmc", 1135 .data = &davinci_mmc_devtype[MMC_CTLR_VERSION_2], 1136 }, 1137 {}, 1138 }; 1139 MODULE_DEVICE_TABLE(of, davinci_mmc_dt_ids); 1140 1141 static int mmc_davinci_parse_pdata(struct mmc_host *mmc) 1142 { 1143 struct platform_device *pdev = to_platform_device(mmc->parent); 1144 struct davinci_mmc_config *pdata = pdev->dev.platform_data; 1145 struct mmc_davinci_host *host; 1146 int ret; 1147 1148 if (!pdata) 1149 return -EINVAL; 1150 1151 host = mmc_priv(mmc); 1152 if (!host) 1153 return -EINVAL; 1154 1155 if (pdata && pdata->nr_sg) 1156 host->nr_sg = pdata->nr_sg - 1; 1157 1158 if (pdata && (pdata->wires == 4 || pdata->wires == 0)) 1159 mmc->caps |= MMC_CAP_4_BIT_DATA; 1160 1161 if (pdata && (pdata->wires == 8)) 1162 mmc->caps |= (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA); 1163 1164 mmc->f_min = 312500; 1165 mmc->f_max = 25000000; 1166 if (pdata && pdata->max_freq) 1167 mmc->f_max = pdata->max_freq; 1168 if (pdata && pdata->caps) 1169 mmc->caps |= pdata->caps; 1170 1171 /* Register a cd gpio, if there is not one, enable polling */ 1172 ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0); 1173 if (ret == -EPROBE_DEFER) 1174 return ret; 1175 else if (ret) 1176 mmc->caps |= MMC_CAP_NEEDS_POLL; 1177 1178 ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0); 1179 if (ret == -EPROBE_DEFER) 1180 return ret; 1181 1182 return 0; 1183 } 1184 1185 static int davinci_mmcsd_probe(struct platform_device *pdev) 1186 { 1187 struct mmc_davinci_host *host = NULL; 1188 struct mmc_host *mmc = NULL; 1189 struct resource *r, *mem = NULL; 1190 int ret, irq, bus_width; 1191 size_t mem_size; 1192 const struct platform_device_id *id_entry; 1193 1194 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1195 if (!r) 1196 return -ENODEV; 1197 irq = platform_get_irq(pdev, 0); 1198 if (irq < 0) 1199 return irq; 1200 1201 mem_size = resource_size(r); 1202 mem = devm_request_mem_region(&pdev->dev, r->start, mem_size, 1203 pdev->name); 1204 if (!mem) 1205 return -EBUSY; 1206 1207 mmc = mmc_alloc_host(sizeof(struct mmc_davinci_host), &pdev->dev); 1208 if (!mmc) 1209 return -ENOMEM; 1210 1211 host = mmc_priv(mmc); 1212 host->mmc = mmc; /* Important */ 1213 1214 host->mem_res = mem; 1215 host->base = devm_ioremap(&pdev->dev, mem->start, mem_size); 1216 if (!host->base) { 1217 ret = -ENOMEM; 1218 goto ioremap_fail; 1219 } 1220 1221 host->clk = devm_clk_get(&pdev->dev, NULL); 1222 if (IS_ERR(host->clk)) { 1223 ret = PTR_ERR(host->clk); 1224 goto clk_get_fail; 1225 } 1226 ret = clk_prepare_enable(host->clk); 1227 if (ret) 1228 goto clk_prepare_enable_fail; 1229 1230 host->mmc_input_clk = clk_get_rate(host->clk); 1231 1232 pdev->id_entry = of_device_get_match_data(&pdev->dev); 1233 if (pdev->id_entry) { 1234 ret = mmc_of_parse(mmc); 1235 if (ret) { 1236 dev_err_probe(&pdev->dev, ret, 1237 "could not parse of data\n"); 1238 goto parse_fail; 1239 } 1240 } else { 1241 ret = mmc_davinci_parse_pdata(mmc); 1242 if (ret) { 1243 dev_err(&pdev->dev, 1244 "could not parse platform data: %d\n", ret); 1245 goto parse_fail; 1246 } } 1247 1248 if (host->nr_sg > MAX_NR_SG || !host->nr_sg) 1249 host->nr_sg = MAX_NR_SG; 1250 1251 init_mmcsd_host(host); 1252 1253 host->use_dma = use_dma; 1254 host->mmc_irq = irq; 1255 host->sdio_irq = platform_get_irq_optional(pdev, 1); 1256 1257 if (host->use_dma) { 1258 ret = davinci_acquire_dma_channels(host); 1259 if (ret == -EPROBE_DEFER) 1260 goto dma_probe_defer; 1261 else if (ret) 1262 host->use_dma = 0; 1263 } 1264 1265 mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY; 1266 1267 id_entry = platform_get_device_id(pdev); 1268 if (id_entry) 1269 host->version = id_entry->driver_data; 1270 1271 mmc->ops = &mmc_davinci_ops; 1272 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; 1273 1274 /* With no iommu coalescing pages, each phys_seg is a hw_seg. 1275 * Each hw_seg uses one EDMA parameter RAM slot, always one 1276 * channel and then usually some linked slots. 1277 */ 1278 mmc->max_segs = MAX_NR_SG; 1279 1280 /* EDMA limit per hw segment (one or two MBytes) */ 1281 mmc->max_seg_size = MAX_CCNT * rw_threshold; 1282 1283 /* MMC/SD controller limits for multiblock requests */ 1284 mmc->max_blk_size = 4095; /* BLEN is 12 bits */ 1285 mmc->max_blk_count = 65535; /* NBLK is 16 bits */ 1286 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; 1287 1288 dev_dbg(mmc_dev(host->mmc), "max_segs=%d\n", mmc->max_segs); 1289 dev_dbg(mmc_dev(host->mmc), "max_blk_size=%d\n", mmc->max_blk_size); 1290 dev_dbg(mmc_dev(host->mmc), "max_req_size=%d\n", mmc->max_req_size); 1291 dev_dbg(mmc_dev(host->mmc), "max_seg_size=%d\n", mmc->max_seg_size); 1292 1293 platform_set_drvdata(pdev, host); 1294 1295 ret = mmc_davinci_cpufreq_register(host); 1296 if (ret) { 1297 dev_err(&pdev->dev, "failed to register cpufreq\n"); 1298 goto cpu_freq_fail; 1299 } 1300 1301 ret = mmc_add_host(mmc); 1302 if (ret < 0) 1303 goto mmc_add_host_fail; 1304 1305 ret = devm_request_irq(&pdev->dev, irq, mmc_davinci_irq, 0, 1306 mmc_hostname(mmc), host); 1307 if (ret) 1308 goto request_irq_fail; 1309 1310 if (host->sdio_irq >= 0) { 1311 ret = devm_request_irq(&pdev->dev, host->sdio_irq, 1312 mmc_davinci_sdio_irq, 0, 1313 mmc_hostname(mmc), host); 1314 if (!ret) 1315 mmc->caps |= MMC_CAP_SDIO_IRQ; 1316 } 1317 1318 rename_region(mem, mmc_hostname(mmc)); 1319 1320 if (mmc->caps & MMC_CAP_8_BIT_DATA) 1321 bus_width = 8; 1322 else if (mmc->caps & MMC_CAP_4_BIT_DATA) 1323 bus_width = 4; 1324 else 1325 bus_width = 1; 1326 dev_info(mmc_dev(host->mmc), "Using %s, %d-bit mode\n", 1327 host->use_dma ? "DMA" : "PIO", bus_width); 1328 1329 return 0; 1330 1331 request_irq_fail: 1332 mmc_remove_host(mmc); 1333 mmc_add_host_fail: 1334 mmc_davinci_cpufreq_deregister(host); 1335 cpu_freq_fail: 1336 davinci_release_dma_channels(host); 1337 parse_fail: 1338 dma_probe_defer: 1339 clk_disable_unprepare(host->clk); 1340 clk_prepare_enable_fail: 1341 clk_get_fail: 1342 ioremap_fail: 1343 mmc_free_host(mmc); 1344 1345 return ret; 1346 } 1347 1348 static void davinci_mmcsd_remove(struct platform_device *pdev) 1349 { 1350 struct mmc_davinci_host *host = platform_get_drvdata(pdev); 1351 1352 mmc_remove_host(host->mmc); 1353 mmc_davinci_cpufreq_deregister(host); 1354 davinci_release_dma_channels(host); 1355 clk_disable_unprepare(host->clk); 1356 mmc_free_host(host->mmc); 1357 } 1358 1359 #ifdef CONFIG_PM 1360 static int davinci_mmcsd_suspend(struct device *dev) 1361 { 1362 struct mmc_davinci_host *host = dev_get_drvdata(dev); 1363 1364 writel(0, host->base + DAVINCI_MMCIM); 1365 mmc_davinci_reset_ctrl(host, 1); 1366 clk_disable(host->clk); 1367 1368 return 0; 1369 } 1370 1371 static int davinci_mmcsd_resume(struct device *dev) 1372 { 1373 struct mmc_davinci_host *host = dev_get_drvdata(dev); 1374 int ret; 1375 1376 ret = clk_enable(host->clk); 1377 if (ret) 1378 return ret; 1379 1380 mmc_davinci_reset_ctrl(host, 0); 1381 1382 return 0; 1383 } 1384 1385 static const struct dev_pm_ops davinci_mmcsd_pm = { 1386 .suspend = davinci_mmcsd_suspend, 1387 .resume = davinci_mmcsd_resume, 1388 }; 1389 1390 #define davinci_mmcsd_pm_ops (&davinci_mmcsd_pm) 1391 #else 1392 #define davinci_mmcsd_pm_ops NULL 1393 #endif 1394 1395 static struct platform_driver davinci_mmcsd_driver = { 1396 .driver = { 1397 .name = "davinci_mmc", 1398 .probe_type = PROBE_PREFER_ASYNCHRONOUS, 1399 .pm = davinci_mmcsd_pm_ops, 1400 .of_match_table = davinci_mmc_dt_ids, 1401 }, 1402 .probe = davinci_mmcsd_probe, 1403 .remove_new = davinci_mmcsd_remove, 1404 .id_table = davinci_mmc_devtype, 1405 }; 1406 1407 module_platform_driver(davinci_mmcsd_driver); 1408 1409 MODULE_AUTHOR("Texas Instruments India"); 1410 MODULE_LICENSE("GPL"); 1411 MODULE_DESCRIPTION("MMC/SD driver for Davinci MMC controller"); 1412 MODULE_ALIAS("platform:davinci_mmc"); 1413 1414