1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * davinci_mmc.c - TI DaVinci MMC/SD/SDIO driver 4 * 5 * Copyright (C) 2006 Texas Instruments. 6 * Original author: Purushotam Kumar 7 * Copyright (C) 2009 David Brownell 8 */ 9 10 #include <linux/module.h> 11 #include <linux/ioport.h> 12 #include <linux/platform_device.h> 13 #include <linux/clk.h> 14 #include <linux/err.h> 15 #include <linux/cpufreq.h> 16 #include <linux/mmc/host.h> 17 #include <linux/io.h> 18 #include <linux/irq.h> 19 #include <linux/delay.h> 20 #include <linux/dmaengine.h> 21 #include <linux/dma-mapping.h> 22 #include <linux/mmc/mmc.h> 23 #include <linux/of.h> 24 #include <linux/mmc/slot-gpio.h> 25 #include <linux/interrupt.h> 26 27 #include <linux/platform_data/mmc-davinci.h> 28 29 /* 30 * Register Definitions 31 */ 32 #define DAVINCI_MMCCTL 0x00 /* Control Register */ 33 #define DAVINCI_MMCCLK 0x04 /* Memory Clock Control Register */ 34 #define DAVINCI_MMCST0 0x08 /* Status Register 0 */ 35 #define DAVINCI_MMCST1 0x0C /* Status Register 1 */ 36 #define DAVINCI_MMCIM 0x10 /* Interrupt Mask Register */ 37 #define DAVINCI_MMCTOR 0x14 /* Response Time-Out Register */ 38 #define DAVINCI_MMCTOD 0x18 /* Data Read Time-Out Register */ 39 #define DAVINCI_MMCBLEN 0x1C /* Block Length Register */ 40 #define DAVINCI_MMCNBLK 0x20 /* Number of Blocks Register */ 41 #define DAVINCI_MMCNBLC 0x24 /* Number of Blocks Counter Register */ 42 #define DAVINCI_MMCDRR 0x28 /* Data Receive Register */ 43 #define DAVINCI_MMCDXR 0x2C /* Data Transmit Register */ 44 #define DAVINCI_MMCCMD 0x30 /* Command Register */ 45 #define DAVINCI_MMCARGHL 0x34 /* Argument Register */ 46 #define DAVINCI_MMCRSP01 0x38 /* Response Register 0 and 1 */ 47 #define DAVINCI_MMCRSP23 0x3C /* Response Register 0 and 1 */ 48 #define DAVINCI_MMCRSP45 0x40 /* Response Register 0 and 1 */ 49 #define DAVINCI_MMCRSP67 0x44 /* Response Register 0 and 1 */ 50 #define DAVINCI_MMCDRSP 0x48 /* Data Response Register */ 51 #define DAVINCI_MMCETOK 0x4C 52 #define DAVINCI_MMCCIDX 0x50 /* Command Index Register */ 53 #define DAVINCI_MMCCKC 0x54 54 #define DAVINCI_MMCTORC 0x58 55 #define DAVINCI_MMCTODC 0x5C 56 #define DAVINCI_MMCBLNC 0x60 57 #define DAVINCI_SDIOCTL 0x64 58 #define DAVINCI_SDIOST0 0x68 59 #define DAVINCI_SDIOIEN 0x6C 60 #define DAVINCI_SDIOIST 0x70 61 #define DAVINCI_MMCFIFOCTL 0x74 /* FIFO Control Register */ 62 63 /* DAVINCI_MMCCTL definitions */ 64 #define MMCCTL_DATRST (1 << 0) 65 #define MMCCTL_CMDRST (1 << 1) 66 #define MMCCTL_WIDTH_8_BIT (1 << 8) 67 #define MMCCTL_WIDTH_4_BIT (1 << 2) 68 #define MMCCTL_DATEG_DISABLED (0 << 6) 69 #define MMCCTL_DATEG_RISING (1 << 6) 70 #define MMCCTL_DATEG_FALLING (2 << 6) 71 #define MMCCTL_DATEG_BOTH (3 << 6) 72 #define MMCCTL_PERMDR_LE (0 << 9) 73 #define MMCCTL_PERMDR_BE (1 << 9) 74 #define MMCCTL_PERMDX_LE (0 << 10) 75 #define MMCCTL_PERMDX_BE (1 << 10) 76 77 /* DAVINCI_MMCCLK definitions */ 78 #define MMCCLK_CLKEN (1 << 8) 79 #define MMCCLK_CLKRT_MASK (0xFF << 0) 80 81 /* IRQ bit definitions, for DAVINCI_MMCST0 and DAVINCI_MMCIM */ 82 #define MMCST0_DATDNE BIT(0) /* data done */ 83 #define MMCST0_BSYDNE BIT(1) /* busy done */ 84 #define MMCST0_RSPDNE BIT(2) /* command done */ 85 #define MMCST0_TOUTRD BIT(3) /* data read timeout */ 86 #define MMCST0_TOUTRS BIT(4) /* command response timeout */ 87 #define MMCST0_CRCWR BIT(5) /* data write CRC error */ 88 #define MMCST0_CRCRD BIT(6) /* data read CRC error */ 89 #define MMCST0_CRCRS BIT(7) /* command response CRC error */ 90 #define MMCST0_DXRDY BIT(9) /* data transmit ready (fifo empty) */ 91 #define MMCST0_DRRDY BIT(10) /* data receive ready (data in fifo)*/ 92 #define MMCST0_DATED BIT(11) /* DAT3 edge detect */ 93 #define MMCST0_TRNDNE BIT(12) /* transfer done */ 94 95 /* DAVINCI_MMCST1 definitions */ 96 #define MMCST1_BUSY (1 << 0) 97 98 /* DAVINCI_MMCCMD definitions */ 99 #define MMCCMD_CMD_MASK (0x3F << 0) 100 #define MMCCMD_PPLEN (1 << 7) 101 #define MMCCMD_BSYEXP (1 << 8) 102 #define MMCCMD_RSPFMT_MASK (3 << 9) 103 #define MMCCMD_RSPFMT_NONE (0 << 9) 104 #define MMCCMD_RSPFMT_R1456 (1 << 9) 105 #define MMCCMD_RSPFMT_R2 (2 << 9) 106 #define MMCCMD_RSPFMT_R3 (3 << 9) 107 #define MMCCMD_DTRW (1 << 11) 108 #define MMCCMD_STRMTP (1 << 12) 109 #define MMCCMD_WDATX (1 << 13) 110 #define MMCCMD_INITCK (1 << 14) 111 #define MMCCMD_DCLR (1 << 15) 112 #define MMCCMD_DMATRIG (1 << 16) 113 114 /* DAVINCI_MMCFIFOCTL definitions */ 115 #define MMCFIFOCTL_FIFORST (1 << 0) 116 #define MMCFIFOCTL_FIFODIR_WR (1 << 1) 117 #define MMCFIFOCTL_FIFODIR_RD (0 << 1) 118 #define MMCFIFOCTL_FIFOLEV (1 << 2) /* 0 = 128 bits, 1 = 256 bits */ 119 #define MMCFIFOCTL_ACCWD_4 (0 << 3) /* access width of 4 bytes */ 120 #define MMCFIFOCTL_ACCWD_3 (1 << 3) /* access width of 3 bytes */ 121 #define MMCFIFOCTL_ACCWD_2 (2 << 3) /* access width of 2 bytes */ 122 #define MMCFIFOCTL_ACCWD_1 (3 << 3) /* access width of 1 byte */ 123 124 /* DAVINCI_SDIOST0 definitions */ 125 #define SDIOST0_DAT1_HI BIT(0) 126 127 /* DAVINCI_SDIOIEN definitions */ 128 #define SDIOIEN_IOINTEN BIT(0) 129 130 /* DAVINCI_SDIOIST definitions */ 131 #define SDIOIST_IOINT BIT(0) 132 133 /* MMCSD Init clock in Hz in opendrain mode */ 134 #define MMCSD_INIT_CLOCK 200000 135 136 /* 137 * One scatterlist dma "segment" is at most MAX_CCNT rw_threshold units, 138 * and we handle up to MAX_NR_SG segments. MMC_BLOCK_BOUNCE kicks in only 139 * for drivers with max_segs == 1, making the segments bigger (64KB) 140 * than the page or two that's otherwise typical. nr_sg (passed from 141 * platform data) == 16 gives at least the same throughput boost, using 142 * EDMA transfer linkage instead of spending CPU time copying pages. 143 */ 144 #define MAX_CCNT ((1 << 16) - 1) 145 146 #define MAX_NR_SG 16 147 148 static unsigned rw_threshold = 32; 149 module_param(rw_threshold, uint, S_IRUGO); 150 MODULE_PARM_DESC(rw_threshold, 151 "Read/Write threshold. Default = 32"); 152 153 static unsigned poll_threshold = 128; 154 module_param(poll_threshold, uint, S_IRUGO); 155 MODULE_PARM_DESC(poll_threshold, 156 "Polling transaction size threshold. Default = 128"); 157 158 static unsigned poll_loopcount = 32; 159 module_param(poll_loopcount, uint, S_IRUGO); 160 MODULE_PARM_DESC(poll_loopcount, 161 "Maximum polling loop count. Default = 32"); 162 163 static unsigned use_dma = 1; 164 module_param(use_dma, uint, 0); 165 MODULE_PARM_DESC(use_dma, "Whether to use DMA or not. Default = 1"); 166 167 struct mmc_davinci_host { 168 struct mmc_command *cmd; 169 struct mmc_data *data; 170 struct mmc_host *mmc; 171 struct clk *clk; 172 unsigned int mmc_input_clk; 173 void __iomem *base; 174 struct resource *mem_res; 175 int mmc_irq, sdio_irq; 176 unsigned char bus_mode; 177 178 #define DAVINCI_MMC_DATADIR_NONE 0 179 #define DAVINCI_MMC_DATADIR_READ 1 180 #define DAVINCI_MMC_DATADIR_WRITE 2 181 unsigned char data_dir; 182 183 u32 bytes_left; 184 185 struct dma_chan *dma_tx; 186 struct dma_chan *dma_rx; 187 bool use_dma; 188 bool do_dma; 189 bool sdio_int; 190 bool active_request; 191 192 /* For PIO we walk scatterlists one segment at a time. */ 193 struct sg_mapping_iter sg_miter; 194 unsigned int sg_len; 195 196 /* Version of the MMC/SD controller */ 197 u8 version; 198 /* for ns in one cycle calculation */ 199 unsigned ns_in_one_cycle; 200 /* Number of sg segments */ 201 u8 nr_sg; 202 #ifdef CONFIG_CPU_FREQ 203 struct notifier_block freq_transition; 204 #endif 205 }; 206 207 static irqreturn_t mmc_davinci_irq(int irq, void *dev_id); 208 209 /* PIO only */ 210 static void davinci_fifo_data_trans(struct mmc_davinci_host *host, 211 unsigned int n) 212 { 213 struct sg_mapping_iter *sgm = &host->sg_miter; 214 u8 *p; 215 unsigned int i; 216 217 /* 218 * By adjusting sgm->consumed this will give a pointer to the 219 * current index into the sgm. 220 */ 221 if (!sg_miter_next(sgm)) { 222 dev_err(mmc_dev(host->mmc), "ran out of sglist prematurely\n"); 223 return; 224 } 225 p = sgm->addr; 226 227 /* NOTE: we never transfer more than rw_threshold bytes 228 * to/from the fifo here; there's no I/O overlap. 229 * This also assumes that access width( i.e. ACCWD) is 4 bytes 230 */ 231 if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) { 232 for (i = 0; i < (n >> 2); i++) { 233 writel(*((u32 *)p), host->base + DAVINCI_MMCDXR); 234 p = p + 4; 235 } 236 if (n & 3) { 237 iowrite8_rep(host->base + DAVINCI_MMCDXR, p, (n & 3)); 238 p = p + (n & 3); 239 } 240 } else { 241 for (i = 0; i < (n >> 2); i++) { 242 *((u32 *)p) = readl(host->base + DAVINCI_MMCDRR); 243 p = p + 4; 244 } 245 if (n & 3) { 246 ioread8_rep(host->base + DAVINCI_MMCDRR, p, (n & 3)); 247 p = p + (n & 3); 248 } 249 } 250 251 sgm->consumed = n; 252 host->bytes_left -= n; 253 } 254 255 static void mmc_davinci_start_command(struct mmc_davinci_host *host, 256 struct mmc_command *cmd) 257 { 258 u32 cmd_reg = 0; 259 u32 im_val; 260 261 dev_dbg(mmc_dev(host->mmc), "CMD%d, arg 0x%08x%s\n", 262 cmd->opcode, cmd->arg, 263 ({ char *s; 264 switch (mmc_resp_type(cmd)) { 265 case MMC_RSP_R1: 266 s = ", R1/R5/R6/R7 response"; 267 break; 268 case MMC_RSP_R1B: 269 s = ", R1b response"; 270 break; 271 case MMC_RSP_R2: 272 s = ", R2 response"; 273 break; 274 case MMC_RSP_R3: 275 s = ", R3/R4 response"; 276 break; 277 default: 278 s = ", (R? response)"; 279 break; 280 } s; })); 281 host->cmd = cmd; 282 283 switch (mmc_resp_type(cmd)) { 284 case MMC_RSP_R1B: 285 /* There's some spec confusion about when R1B is 286 * allowed, but if the card doesn't issue a BUSY 287 * then it's harmless for us to allow it. 288 */ 289 cmd_reg |= MMCCMD_BSYEXP; 290 fallthrough; 291 case MMC_RSP_R1: /* 48 bits, CRC */ 292 cmd_reg |= MMCCMD_RSPFMT_R1456; 293 break; 294 case MMC_RSP_R2: /* 136 bits, CRC */ 295 cmd_reg |= MMCCMD_RSPFMT_R2; 296 break; 297 case MMC_RSP_R3: /* 48 bits, no CRC */ 298 cmd_reg |= MMCCMD_RSPFMT_R3; 299 break; 300 default: 301 cmd_reg |= MMCCMD_RSPFMT_NONE; 302 dev_dbg(mmc_dev(host->mmc), "unknown resp_type %04x\n", 303 mmc_resp_type(cmd)); 304 break; 305 } 306 307 /* Set command index */ 308 cmd_reg |= cmd->opcode; 309 310 /* Enable EDMA transfer triggers */ 311 if (host->do_dma) 312 cmd_reg |= MMCCMD_DMATRIG; 313 314 if (host->version == MMC_CTLR_VERSION_2 && host->data != NULL && 315 host->data_dir == DAVINCI_MMC_DATADIR_READ) 316 cmd_reg |= MMCCMD_DMATRIG; 317 318 /* Setting whether command involves data transfer or not */ 319 if (cmd->data) 320 cmd_reg |= MMCCMD_WDATX; 321 322 /* Setting whether data read or write */ 323 if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) 324 cmd_reg |= MMCCMD_DTRW; 325 326 if (host->bus_mode == MMC_BUSMODE_PUSHPULL) 327 cmd_reg |= MMCCMD_PPLEN; 328 329 /* set Command timeout */ 330 writel(0x1FFF, host->base + DAVINCI_MMCTOR); 331 332 /* Enable interrupt (calculate here, defer until FIFO is stuffed). */ 333 im_val = MMCST0_RSPDNE | MMCST0_CRCRS | MMCST0_TOUTRS; 334 if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) { 335 im_val |= MMCST0_DATDNE | MMCST0_CRCWR; 336 337 if (!host->do_dma) 338 im_val |= MMCST0_DXRDY; 339 } else if (host->data_dir == DAVINCI_MMC_DATADIR_READ) { 340 im_val |= MMCST0_DATDNE | MMCST0_CRCRD | MMCST0_TOUTRD; 341 342 if (!host->do_dma) 343 im_val |= MMCST0_DRRDY; 344 } 345 346 /* 347 * Before non-DMA WRITE commands the controller needs priming: 348 * FIFO should be populated with 32 bytes i.e. whatever is the FIFO size 349 */ 350 if (!host->do_dma && (host->data_dir == DAVINCI_MMC_DATADIR_WRITE)) 351 davinci_fifo_data_trans(host, rw_threshold); 352 353 writel(cmd->arg, host->base + DAVINCI_MMCARGHL); 354 writel(cmd_reg, host->base + DAVINCI_MMCCMD); 355 356 host->active_request = true; 357 358 if (!host->do_dma && host->bytes_left <= poll_threshold) { 359 u32 count = poll_loopcount; 360 361 while (host->active_request && count--) { 362 mmc_davinci_irq(0, host); 363 cpu_relax(); 364 } 365 } 366 367 if (host->active_request) 368 writel(im_val, host->base + DAVINCI_MMCIM); 369 } 370 371 /*----------------------------------------------------------------------*/ 372 373 /* DMA infrastructure */ 374 375 static void davinci_abort_dma(struct mmc_davinci_host *host) 376 { 377 struct dma_chan *sync_dev; 378 379 if (host->data_dir == DAVINCI_MMC_DATADIR_READ) 380 sync_dev = host->dma_rx; 381 else 382 sync_dev = host->dma_tx; 383 384 dmaengine_terminate_all(sync_dev); 385 } 386 387 static int mmc_davinci_send_dma_request(struct mmc_davinci_host *host, 388 struct mmc_data *data) 389 { 390 struct dma_chan *chan; 391 struct dma_async_tx_descriptor *desc; 392 int ret = 0; 393 394 if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) { 395 struct dma_slave_config dma_tx_conf = { 396 .direction = DMA_MEM_TO_DEV, 397 .dst_addr = host->mem_res->start + DAVINCI_MMCDXR, 398 .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, 399 .dst_maxburst = 400 rw_threshold / DMA_SLAVE_BUSWIDTH_4_BYTES, 401 }; 402 chan = host->dma_tx; 403 dmaengine_slave_config(host->dma_tx, &dma_tx_conf); 404 405 desc = dmaengine_prep_slave_sg(host->dma_tx, 406 data->sg, 407 host->sg_len, 408 DMA_MEM_TO_DEV, 409 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 410 if (!desc) { 411 dev_dbg(mmc_dev(host->mmc), 412 "failed to allocate DMA TX descriptor"); 413 ret = -1; 414 goto out; 415 } 416 } else { 417 struct dma_slave_config dma_rx_conf = { 418 .direction = DMA_DEV_TO_MEM, 419 .src_addr = host->mem_res->start + DAVINCI_MMCDRR, 420 .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, 421 .src_maxburst = 422 rw_threshold / DMA_SLAVE_BUSWIDTH_4_BYTES, 423 }; 424 chan = host->dma_rx; 425 dmaengine_slave_config(host->dma_rx, &dma_rx_conf); 426 427 desc = dmaengine_prep_slave_sg(host->dma_rx, 428 data->sg, 429 host->sg_len, 430 DMA_DEV_TO_MEM, 431 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 432 if (!desc) { 433 dev_dbg(mmc_dev(host->mmc), 434 "failed to allocate DMA RX descriptor"); 435 ret = -1; 436 goto out; 437 } 438 } 439 440 dmaengine_submit(desc); 441 dma_async_issue_pending(chan); 442 443 out: 444 return ret; 445 } 446 447 static int mmc_davinci_start_dma_transfer(struct mmc_davinci_host *host, 448 struct mmc_data *data) 449 { 450 int i; 451 int mask = rw_threshold - 1; 452 int ret = 0; 453 454 host->sg_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, 455 mmc_get_dma_dir(data)); 456 457 /* no individual DMA segment should need a partial FIFO */ 458 for (i = 0; i < host->sg_len; i++) { 459 if (sg_dma_len(data->sg + i) & mask) { 460 dma_unmap_sg(mmc_dev(host->mmc), 461 data->sg, data->sg_len, 462 mmc_get_dma_dir(data)); 463 return -1; 464 } 465 } 466 467 host->do_dma = 1; 468 ret = mmc_davinci_send_dma_request(host, data); 469 470 return ret; 471 } 472 473 static void davinci_release_dma_channels(struct mmc_davinci_host *host) 474 { 475 if (!host->use_dma) 476 return; 477 478 dma_release_channel(host->dma_tx); 479 dma_release_channel(host->dma_rx); 480 } 481 482 static int davinci_acquire_dma_channels(struct mmc_davinci_host *host) 483 { 484 host->dma_tx = dma_request_chan(mmc_dev(host->mmc), "tx"); 485 if (IS_ERR(host->dma_tx)) { 486 dev_err(mmc_dev(host->mmc), "Can't get dma_tx channel\n"); 487 return PTR_ERR(host->dma_tx); 488 } 489 490 host->dma_rx = dma_request_chan(mmc_dev(host->mmc), "rx"); 491 if (IS_ERR(host->dma_rx)) { 492 dev_err(mmc_dev(host->mmc), "Can't get dma_rx channel\n"); 493 dma_release_channel(host->dma_tx); 494 return PTR_ERR(host->dma_rx); 495 } 496 497 return 0; 498 } 499 500 /*----------------------------------------------------------------------*/ 501 502 static void 503 mmc_davinci_prepare_data(struct mmc_davinci_host *host, struct mmc_request *req) 504 { 505 int fifo_lev = (rw_threshold == 32) ? MMCFIFOCTL_FIFOLEV : 0; 506 int timeout; 507 struct mmc_data *data = req->data; 508 unsigned int flags = SG_MITER_ATOMIC; /* Used from IRQ */ 509 510 if (host->version == MMC_CTLR_VERSION_2) 511 fifo_lev = (rw_threshold == 64) ? MMCFIFOCTL_FIFOLEV : 0; 512 513 host->data = data; 514 if (data == NULL) { 515 host->data_dir = DAVINCI_MMC_DATADIR_NONE; 516 writel(0, host->base + DAVINCI_MMCBLEN); 517 writel(0, host->base + DAVINCI_MMCNBLK); 518 return; 519 } 520 521 dev_dbg(mmc_dev(host->mmc), "%s, %d blocks of %d bytes\n", 522 (data->flags & MMC_DATA_WRITE) ? "write" : "read", 523 data->blocks, data->blksz); 524 dev_dbg(mmc_dev(host->mmc), " DTO %d cycles + %d ns\n", 525 data->timeout_clks, data->timeout_ns); 526 timeout = data->timeout_clks + 527 (data->timeout_ns / host->ns_in_one_cycle); 528 if (timeout > 0xffff) 529 timeout = 0xffff; 530 531 writel(timeout, host->base + DAVINCI_MMCTOD); 532 writel(data->blocks, host->base + DAVINCI_MMCNBLK); 533 writel(data->blksz, host->base + DAVINCI_MMCBLEN); 534 535 /* Configure the FIFO */ 536 if (data->flags & MMC_DATA_WRITE) { 537 flags |= SG_MITER_FROM_SG; 538 host->data_dir = DAVINCI_MMC_DATADIR_WRITE; 539 writel(fifo_lev | MMCFIFOCTL_FIFODIR_WR | MMCFIFOCTL_FIFORST, 540 host->base + DAVINCI_MMCFIFOCTL); 541 writel(fifo_lev | MMCFIFOCTL_FIFODIR_WR, 542 host->base + DAVINCI_MMCFIFOCTL); 543 } else { 544 flags |= SG_MITER_TO_SG; 545 host->data_dir = DAVINCI_MMC_DATADIR_READ; 546 writel(fifo_lev | MMCFIFOCTL_FIFODIR_RD | MMCFIFOCTL_FIFORST, 547 host->base + DAVINCI_MMCFIFOCTL); 548 writel(fifo_lev | MMCFIFOCTL_FIFODIR_RD, 549 host->base + DAVINCI_MMCFIFOCTL); 550 } 551 552 host->bytes_left = data->blocks * data->blksz; 553 554 /* For now we try to use DMA whenever we won't need partial FIFO 555 * reads or writes, either for the whole transfer (as tested here) 556 * or for any individual scatterlist segment (tested when we call 557 * start_dma_transfer). 558 * 559 * While we *could* change that, unusual block sizes are rarely 560 * used. The occasional fallback to PIO should't hurt. 561 */ 562 if (host->use_dma && (host->bytes_left & (rw_threshold - 1)) == 0 563 && mmc_davinci_start_dma_transfer(host, data) == 0) { 564 /* zero this to ensure we take no PIO paths */ 565 host->bytes_left = 0; 566 } else { 567 /* Revert to CPU Copy */ 568 host->sg_len = data->sg_len; 569 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags); 570 } 571 } 572 573 static void mmc_davinci_request(struct mmc_host *mmc, struct mmc_request *req) 574 { 575 struct mmc_davinci_host *host = mmc_priv(mmc); 576 unsigned long timeout = jiffies + msecs_to_jiffies(900); 577 u32 mmcst1 = 0; 578 579 /* Card may still be sending BUSY after a previous operation, 580 * typically some kind of write. If so, we can't proceed yet. 581 */ 582 while (time_before(jiffies, timeout)) { 583 mmcst1 = readl(host->base + DAVINCI_MMCST1); 584 if (!(mmcst1 & MMCST1_BUSY)) 585 break; 586 cpu_relax(); 587 } 588 if (mmcst1 & MMCST1_BUSY) { 589 dev_err(mmc_dev(host->mmc), "still BUSY? bad ... \n"); 590 req->cmd->error = -ETIMEDOUT; 591 mmc_request_done(mmc, req); 592 return; 593 } 594 595 host->do_dma = 0; 596 mmc_davinci_prepare_data(host, req); 597 mmc_davinci_start_command(host, req->cmd); 598 } 599 600 static unsigned int calculate_freq_for_card(struct mmc_davinci_host *host, 601 unsigned int mmc_req_freq) 602 { 603 unsigned int mmc_freq = 0, mmc_pclk = 0, mmc_push_pull_divisor = 0; 604 605 mmc_pclk = host->mmc_input_clk; 606 if (mmc_req_freq && mmc_pclk > (2 * mmc_req_freq)) 607 mmc_push_pull_divisor = ((unsigned int)mmc_pclk 608 / (2 * mmc_req_freq)) - 1; 609 else 610 mmc_push_pull_divisor = 0; 611 612 mmc_freq = (unsigned int)mmc_pclk 613 / (2 * (mmc_push_pull_divisor + 1)); 614 615 if (mmc_freq > mmc_req_freq) 616 mmc_push_pull_divisor = mmc_push_pull_divisor + 1; 617 /* Convert ns to clock cycles */ 618 if (mmc_req_freq <= 400000) 619 host->ns_in_one_cycle = (1000000) / (((mmc_pclk 620 / (2 * (mmc_push_pull_divisor + 1)))/1000)); 621 else 622 host->ns_in_one_cycle = (1000000) / (((mmc_pclk 623 / (2 * (mmc_push_pull_divisor + 1)))/1000000)); 624 625 return mmc_push_pull_divisor; 626 } 627 628 static void calculate_clk_divider(struct mmc_host *mmc, struct mmc_ios *ios) 629 { 630 unsigned int open_drain_freq = 0, mmc_pclk = 0; 631 unsigned int mmc_push_pull_freq = 0; 632 struct mmc_davinci_host *host = mmc_priv(mmc); 633 634 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) { 635 u32 temp; 636 637 /* Ignoring the init clock value passed for fixing the inter 638 * operability with different cards. 639 */ 640 open_drain_freq = ((unsigned int)mmc_pclk 641 / (2 * MMCSD_INIT_CLOCK)) - 1; 642 643 if (open_drain_freq > 0xFF) 644 open_drain_freq = 0xFF; 645 646 temp = readl(host->base + DAVINCI_MMCCLK) & ~MMCCLK_CLKRT_MASK; 647 temp |= open_drain_freq; 648 writel(temp, host->base + DAVINCI_MMCCLK); 649 650 /* Convert ns to clock cycles */ 651 host->ns_in_one_cycle = (1000000) / (MMCSD_INIT_CLOCK/1000); 652 } else { 653 u32 temp; 654 mmc_push_pull_freq = calculate_freq_for_card(host, ios->clock); 655 656 if (mmc_push_pull_freq > 0xFF) 657 mmc_push_pull_freq = 0xFF; 658 659 temp = readl(host->base + DAVINCI_MMCCLK) & ~MMCCLK_CLKEN; 660 writel(temp, host->base + DAVINCI_MMCCLK); 661 662 udelay(10); 663 664 temp = readl(host->base + DAVINCI_MMCCLK) & ~MMCCLK_CLKRT_MASK; 665 temp |= mmc_push_pull_freq; 666 writel(temp, host->base + DAVINCI_MMCCLK); 667 668 writel(temp | MMCCLK_CLKEN, host->base + DAVINCI_MMCCLK); 669 670 udelay(10); 671 } 672 } 673 674 static void mmc_davinci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 675 { 676 struct mmc_davinci_host *host = mmc_priv(mmc); 677 struct platform_device *pdev = to_platform_device(mmc->parent); 678 struct davinci_mmc_config *config = pdev->dev.platform_data; 679 680 dev_dbg(mmc_dev(host->mmc), 681 "clock %dHz busmode %d powermode %d Vdd %04x\n", 682 ios->clock, ios->bus_mode, ios->power_mode, 683 ios->vdd); 684 685 switch (ios->power_mode) { 686 case MMC_POWER_OFF: 687 if (config && config->set_power) 688 config->set_power(pdev->id, false); 689 break; 690 case MMC_POWER_UP: 691 if (config && config->set_power) 692 config->set_power(pdev->id, true); 693 break; 694 } 695 696 switch (ios->bus_width) { 697 case MMC_BUS_WIDTH_8: 698 dev_dbg(mmc_dev(host->mmc), "Enabling 8 bit mode\n"); 699 writel((readl(host->base + DAVINCI_MMCCTL) & 700 ~MMCCTL_WIDTH_4_BIT) | MMCCTL_WIDTH_8_BIT, 701 host->base + DAVINCI_MMCCTL); 702 break; 703 case MMC_BUS_WIDTH_4: 704 dev_dbg(mmc_dev(host->mmc), "Enabling 4 bit mode\n"); 705 if (host->version == MMC_CTLR_VERSION_2) 706 writel((readl(host->base + DAVINCI_MMCCTL) & 707 ~MMCCTL_WIDTH_8_BIT) | MMCCTL_WIDTH_4_BIT, 708 host->base + DAVINCI_MMCCTL); 709 else 710 writel(readl(host->base + DAVINCI_MMCCTL) | 711 MMCCTL_WIDTH_4_BIT, 712 host->base + DAVINCI_MMCCTL); 713 break; 714 case MMC_BUS_WIDTH_1: 715 dev_dbg(mmc_dev(host->mmc), "Enabling 1 bit mode\n"); 716 if (host->version == MMC_CTLR_VERSION_2) 717 writel(readl(host->base + DAVINCI_MMCCTL) & 718 ~(MMCCTL_WIDTH_8_BIT | MMCCTL_WIDTH_4_BIT), 719 host->base + DAVINCI_MMCCTL); 720 else 721 writel(readl(host->base + DAVINCI_MMCCTL) & 722 ~MMCCTL_WIDTH_4_BIT, 723 host->base + DAVINCI_MMCCTL); 724 break; 725 } 726 727 calculate_clk_divider(mmc, ios); 728 729 host->bus_mode = ios->bus_mode; 730 if (ios->power_mode == MMC_POWER_UP) { 731 unsigned long timeout = jiffies + msecs_to_jiffies(50); 732 bool lose = true; 733 734 /* Send clock cycles, poll completion */ 735 writel(0, host->base + DAVINCI_MMCARGHL); 736 writel(MMCCMD_INITCK, host->base + DAVINCI_MMCCMD); 737 while (time_before(jiffies, timeout)) { 738 u32 tmp = readl(host->base + DAVINCI_MMCST0); 739 740 if (tmp & MMCST0_RSPDNE) { 741 lose = false; 742 break; 743 } 744 cpu_relax(); 745 } 746 if (lose) 747 dev_warn(mmc_dev(host->mmc), "powerup timeout\n"); 748 } 749 750 /* FIXME on power OFF, reset things ... */ 751 } 752 753 static void 754 mmc_davinci_xfer_done(struct mmc_davinci_host *host, struct mmc_data *data) 755 { 756 host->data = NULL; 757 758 if (host->mmc->caps & MMC_CAP_SDIO_IRQ) { 759 /* 760 * SDIO Interrupt Detection work-around as suggested by 761 * Davinci Errata (TMS320DM355 Silicon Revision 1.1 Errata 762 * 2.1.6): Signal SDIO interrupt only if it is enabled by core 763 */ 764 if (host->sdio_int && !(readl(host->base + DAVINCI_SDIOST0) & 765 SDIOST0_DAT1_HI)) { 766 writel(SDIOIST_IOINT, host->base + DAVINCI_SDIOIST); 767 mmc_signal_sdio_irq(host->mmc); 768 } 769 } 770 771 if (host->do_dma) { 772 davinci_abort_dma(host); 773 774 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, 775 mmc_get_dma_dir(data)); 776 host->do_dma = false; 777 } 778 host->data_dir = DAVINCI_MMC_DATADIR_NONE; 779 780 if (!data->stop || (host->cmd && host->cmd->error)) { 781 mmc_request_done(host->mmc, data->mrq); 782 writel(0, host->base + DAVINCI_MMCIM); 783 host->active_request = false; 784 } else 785 mmc_davinci_start_command(host, data->stop); 786 } 787 788 static void mmc_davinci_cmd_done(struct mmc_davinci_host *host, 789 struct mmc_command *cmd) 790 { 791 host->cmd = NULL; 792 793 if (cmd->flags & MMC_RSP_PRESENT) { 794 if (cmd->flags & MMC_RSP_136) { 795 /* response type 2 */ 796 cmd->resp[3] = readl(host->base + DAVINCI_MMCRSP01); 797 cmd->resp[2] = readl(host->base + DAVINCI_MMCRSP23); 798 cmd->resp[1] = readl(host->base + DAVINCI_MMCRSP45); 799 cmd->resp[0] = readl(host->base + DAVINCI_MMCRSP67); 800 } else { 801 /* response types 1, 1b, 3, 4, 5, 6 */ 802 cmd->resp[0] = readl(host->base + DAVINCI_MMCRSP67); 803 } 804 } 805 806 if (host->data == NULL || cmd->error) { 807 if (cmd->error == -ETIMEDOUT) 808 cmd->mrq->cmd->retries = 0; 809 mmc_request_done(host->mmc, cmd->mrq); 810 writel(0, host->base + DAVINCI_MMCIM); 811 host->active_request = false; 812 } 813 } 814 815 static inline void mmc_davinci_reset_ctrl(struct mmc_davinci_host *host, 816 int val) 817 { 818 u32 temp; 819 820 temp = readl(host->base + DAVINCI_MMCCTL); 821 if (val) /* reset */ 822 temp |= MMCCTL_CMDRST | MMCCTL_DATRST; 823 else /* enable */ 824 temp &= ~(MMCCTL_CMDRST | MMCCTL_DATRST); 825 826 writel(temp, host->base + DAVINCI_MMCCTL); 827 udelay(10); 828 } 829 830 static void 831 davinci_abort_data(struct mmc_davinci_host *host, struct mmc_data *data) 832 { 833 mmc_davinci_reset_ctrl(host, 1); 834 mmc_davinci_reset_ctrl(host, 0); 835 if (!host->do_dma) 836 sg_miter_stop(&host->sg_miter); 837 } 838 839 static irqreturn_t mmc_davinci_sdio_irq(int irq, void *dev_id) 840 { 841 struct mmc_davinci_host *host = dev_id; 842 unsigned int status; 843 844 status = readl(host->base + DAVINCI_SDIOIST); 845 if (status & SDIOIST_IOINT) { 846 dev_dbg(mmc_dev(host->mmc), 847 "SDIO interrupt status %x\n", status); 848 writel(status | SDIOIST_IOINT, host->base + DAVINCI_SDIOIST); 849 mmc_signal_sdio_irq(host->mmc); 850 } 851 return IRQ_HANDLED; 852 } 853 854 static irqreturn_t mmc_davinci_irq(int irq, void *dev_id) 855 { 856 struct mmc_davinci_host *host = (struct mmc_davinci_host *)dev_id; 857 unsigned int status, qstatus; 858 int end_command = 0; 859 int end_transfer = 0; 860 struct mmc_data *data = host->data; 861 862 if (host->cmd == NULL && host->data == NULL) { 863 status = readl(host->base + DAVINCI_MMCST0); 864 dev_dbg(mmc_dev(host->mmc), 865 "Spurious interrupt 0x%04x\n", status); 866 /* Disable the interrupt from mmcsd */ 867 writel(0, host->base + DAVINCI_MMCIM); 868 return IRQ_NONE; 869 } 870 871 status = readl(host->base + DAVINCI_MMCST0); 872 qstatus = status; 873 874 /* handle FIFO first when using PIO for data. 875 * bytes_left will decrease to zero as I/O progress and status will 876 * read zero over iteration because this controller status 877 * register(MMCST0) reports any status only once and it is cleared 878 * by read. So, it is not unbouned loop even in the case of 879 * non-dma. 880 */ 881 if (host->bytes_left && (status & (MMCST0_DXRDY | MMCST0_DRRDY))) { 882 unsigned long im_val; 883 884 /* 885 * If interrupts fire during the following loop, they will be 886 * handled by the handler, but the PIC will still buffer these. 887 * As a result, the handler will be called again to serve these 888 * needlessly. In order to avoid these spurious interrupts, 889 * keep interrupts masked during the loop. 890 */ 891 im_val = readl(host->base + DAVINCI_MMCIM); 892 writel(0, host->base + DAVINCI_MMCIM); 893 894 do { 895 davinci_fifo_data_trans(host, rw_threshold); 896 status = readl(host->base + DAVINCI_MMCST0); 897 qstatus |= status; 898 } while (host->bytes_left && 899 (status & (MMCST0_DXRDY | MMCST0_DRRDY))); 900 901 /* 902 * If an interrupt is pending, it is assumed it will fire when 903 * it is unmasked. This assumption is also taken when the MMCIM 904 * is first set. Otherwise, writing to MMCIM after reading the 905 * status is race-prone. 906 */ 907 writel(im_val, host->base + DAVINCI_MMCIM); 908 } 909 910 if (qstatus & MMCST0_DATDNE) { 911 /* All blocks sent/received, and CRC checks passed */ 912 if (data != NULL) { 913 if (!host->do_dma) { 914 if (host->bytes_left > 0) 915 /* if datasize < rw_threshold 916 * no RX ints are generated 917 */ 918 davinci_fifo_data_trans(host, host->bytes_left); 919 sg_miter_stop(&host->sg_miter); 920 } 921 end_transfer = 1; 922 data->bytes_xfered = data->blocks * data->blksz; 923 } else { 924 dev_err(mmc_dev(host->mmc), 925 "DATDNE with no host->data\n"); 926 } 927 } 928 929 if (qstatus & MMCST0_TOUTRD) { 930 /* Read data timeout */ 931 data->error = -ETIMEDOUT; 932 end_transfer = 1; 933 934 dev_dbg(mmc_dev(host->mmc), 935 "read data timeout, status %x\n", 936 qstatus); 937 938 davinci_abort_data(host, data); 939 } 940 941 if (qstatus & (MMCST0_CRCWR | MMCST0_CRCRD)) { 942 /* Data CRC error */ 943 data->error = -EILSEQ; 944 end_transfer = 1; 945 946 /* NOTE: this controller uses CRCWR to report both CRC 947 * errors and timeouts (on writes). MMCDRSP values are 948 * only weakly documented, but 0x9f was clearly a timeout 949 * case and the two three-bit patterns in various SD specs 950 * (101, 010) aren't part of it ... 951 */ 952 if (qstatus & MMCST0_CRCWR) { 953 u32 temp = readb(host->base + DAVINCI_MMCDRSP); 954 955 if (temp == 0x9f) 956 data->error = -ETIMEDOUT; 957 } 958 dev_dbg(mmc_dev(host->mmc), "data %s %s error\n", 959 (qstatus & MMCST0_CRCWR) ? "write" : "read", 960 (data->error == -ETIMEDOUT) ? "timeout" : "CRC"); 961 962 davinci_abort_data(host, data); 963 } 964 965 if (qstatus & MMCST0_TOUTRS) { 966 /* Command timeout */ 967 if (host->cmd) { 968 dev_dbg(mmc_dev(host->mmc), 969 "CMD%d timeout, status %x\n", 970 host->cmd->opcode, qstatus); 971 host->cmd->error = -ETIMEDOUT; 972 if (data) { 973 end_transfer = 1; 974 davinci_abort_data(host, data); 975 } else 976 end_command = 1; 977 } 978 } 979 980 if (qstatus & MMCST0_CRCRS) { 981 /* Command CRC error */ 982 dev_dbg(mmc_dev(host->mmc), "Command CRC error\n"); 983 if (host->cmd) { 984 host->cmd->error = -EILSEQ; 985 end_command = 1; 986 } 987 } 988 989 if (qstatus & MMCST0_RSPDNE) { 990 /* End of command phase */ 991 end_command = host->cmd ? 1 : 0; 992 } 993 994 if (end_command) 995 mmc_davinci_cmd_done(host, host->cmd); 996 if (end_transfer) 997 mmc_davinci_xfer_done(host, data); 998 return IRQ_HANDLED; 999 } 1000 1001 static int mmc_davinci_get_cd(struct mmc_host *mmc) 1002 { 1003 struct platform_device *pdev = to_platform_device(mmc->parent); 1004 struct davinci_mmc_config *config = pdev->dev.platform_data; 1005 1006 if (config && config->get_cd) 1007 return config->get_cd(pdev->id); 1008 1009 return mmc_gpio_get_cd(mmc); 1010 } 1011 1012 static int mmc_davinci_get_ro(struct mmc_host *mmc) 1013 { 1014 struct platform_device *pdev = to_platform_device(mmc->parent); 1015 struct davinci_mmc_config *config = pdev->dev.platform_data; 1016 1017 if (config && config->get_ro) 1018 return config->get_ro(pdev->id); 1019 1020 return mmc_gpio_get_ro(mmc); 1021 } 1022 1023 static void mmc_davinci_enable_sdio_irq(struct mmc_host *mmc, int enable) 1024 { 1025 struct mmc_davinci_host *host = mmc_priv(mmc); 1026 1027 if (enable) { 1028 if (!(readl(host->base + DAVINCI_SDIOST0) & SDIOST0_DAT1_HI)) { 1029 writel(SDIOIST_IOINT, host->base + DAVINCI_SDIOIST); 1030 mmc_signal_sdio_irq(host->mmc); 1031 } else { 1032 host->sdio_int = true; 1033 writel(readl(host->base + DAVINCI_SDIOIEN) | 1034 SDIOIEN_IOINTEN, host->base + DAVINCI_SDIOIEN); 1035 } 1036 } else { 1037 host->sdio_int = false; 1038 writel(readl(host->base + DAVINCI_SDIOIEN) & ~SDIOIEN_IOINTEN, 1039 host->base + DAVINCI_SDIOIEN); 1040 } 1041 } 1042 1043 static const struct mmc_host_ops mmc_davinci_ops = { 1044 .request = mmc_davinci_request, 1045 .set_ios = mmc_davinci_set_ios, 1046 .get_cd = mmc_davinci_get_cd, 1047 .get_ro = mmc_davinci_get_ro, 1048 .enable_sdio_irq = mmc_davinci_enable_sdio_irq, 1049 }; 1050 1051 /*----------------------------------------------------------------------*/ 1052 1053 #ifdef CONFIG_CPU_FREQ 1054 static int mmc_davinci_cpufreq_transition(struct notifier_block *nb, 1055 unsigned long val, void *data) 1056 { 1057 struct mmc_davinci_host *host; 1058 unsigned int mmc_pclk; 1059 struct mmc_host *mmc; 1060 unsigned long flags; 1061 1062 host = container_of(nb, struct mmc_davinci_host, freq_transition); 1063 mmc = host->mmc; 1064 mmc_pclk = clk_get_rate(host->clk); 1065 1066 if (val == CPUFREQ_POSTCHANGE) { 1067 spin_lock_irqsave(&mmc->lock, flags); 1068 host->mmc_input_clk = mmc_pclk; 1069 calculate_clk_divider(mmc, &mmc->ios); 1070 spin_unlock_irqrestore(&mmc->lock, flags); 1071 } 1072 1073 return 0; 1074 } 1075 1076 static inline int mmc_davinci_cpufreq_register(struct mmc_davinci_host *host) 1077 { 1078 host->freq_transition.notifier_call = mmc_davinci_cpufreq_transition; 1079 1080 return cpufreq_register_notifier(&host->freq_transition, 1081 CPUFREQ_TRANSITION_NOTIFIER); 1082 } 1083 1084 static inline void mmc_davinci_cpufreq_deregister(struct mmc_davinci_host *host) 1085 { 1086 cpufreq_unregister_notifier(&host->freq_transition, 1087 CPUFREQ_TRANSITION_NOTIFIER); 1088 } 1089 #else 1090 static inline int mmc_davinci_cpufreq_register(struct mmc_davinci_host *host) 1091 { 1092 return 0; 1093 } 1094 1095 static inline void mmc_davinci_cpufreq_deregister(struct mmc_davinci_host *host) 1096 { 1097 } 1098 #endif 1099 static void init_mmcsd_host(struct mmc_davinci_host *host) 1100 { 1101 1102 mmc_davinci_reset_ctrl(host, 1); 1103 1104 writel(0, host->base + DAVINCI_MMCCLK); 1105 writel(MMCCLK_CLKEN, host->base + DAVINCI_MMCCLK); 1106 1107 writel(0x1FFF, host->base + DAVINCI_MMCTOR); 1108 writel(0xFFFF, host->base + DAVINCI_MMCTOD); 1109 1110 mmc_davinci_reset_ctrl(host, 0); 1111 } 1112 1113 static const struct platform_device_id davinci_mmc_devtype[] = { 1114 { 1115 .name = "dm6441-mmc", 1116 .driver_data = MMC_CTLR_VERSION_1, 1117 }, { 1118 .name = "da830-mmc", 1119 .driver_data = MMC_CTLR_VERSION_2, 1120 }, 1121 {}, 1122 }; 1123 MODULE_DEVICE_TABLE(platform, davinci_mmc_devtype); 1124 1125 static const struct of_device_id davinci_mmc_dt_ids[] = { 1126 { 1127 .compatible = "ti,dm6441-mmc", 1128 .data = &davinci_mmc_devtype[MMC_CTLR_VERSION_1], 1129 }, 1130 { 1131 .compatible = "ti,da830-mmc", 1132 .data = &davinci_mmc_devtype[MMC_CTLR_VERSION_2], 1133 }, 1134 {}, 1135 }; 1136 MODULE_DEVICE_TABLE(of, davinci_mmc_dt_ids); 1137 1138 static int mmc_davinci_parse_pdata(struct mmc_host *mmc) 1139 { 1140 struct platform_device *pdev = to_platform_device(mmc->parent); 1141 struct davinci_mmc_config *pdata = pdev->dev.platform_data; 1142 struct mmc_davinci_host *host; 1143 int ret; 1144 1145 if (!pdata) 1146 return -EINVAL; 1147 1148 host = mmc_priv(mmc); 1149 if (!host) 1150 return -EINVAL; 1151 1152 if (pdata && pdata->nr_sg) 1153 host->nr_sg = pdata->nr_sg - 1; 1154 1155 if (pdata && (pdata->wires == 4 || pdata->wires == 0)) 1156 mmc->caps |= MMC_CAP_4_BIT_DATA; 1157 1158 if (pdata && (pdata->wires == 8)) 1159 mmc->caps |= (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA); 1160 1161 mmc->f_min = 312500; 1162 mmc->f_max = 25000000; 1163 if (pdata && pdata->max_freq) 1164 mmc->f_max = pdata->max_freq; 1165 if (pdata && pdata->caps) 1166 mmc->caps |= pdata->caps; 1167 1168 /* Register a cd gpio, if there is not one, enable polling */ 1169 ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0); 1170 if (ret == -EPROBE_DEFER) 1171 return ret; 1172 else if (ret) 1173 mmc->caps |= MMC_CAP_NEEDS_POLL; 1174 1175 ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0); 1176 if (ret == -EPROBE_DEFER) 1177 return ret; 1178 1179 return 0; 1180 } 1181 1182 static int davinci_mmcsd_probe(struct platform_device *pdev) 1183 { 1184 struct mmc_davinci_host *host = NULL; 1185 struct mmc_host *mmc = NULL; 1186 struct resource *r, *mem = NULL; 1187 int ret, irq; 1188 size_t mem_size; 1189 const struct platform_device_id *id_entry; 1190 1191 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1192 if (!r) 1193 return -ENODEV; 1194 irq = platform_get_irq(pdev, 0); 1195 if (irq < 0) 1196 return irq; 1197 1198 mem_size = resource_size(r); 1199 mem = devm_request_mem_region(&pdev->dev, r->start, mem_size, 1200 pdev->name); 1201 if (!mem) 1202 return -EBUSY; 1203 1204 mmc = mmc_alloc_host(sizeof(struct mmc_davinci_host), &pdev->dev); 1205 if (!mmc) 1206 return -ENOMEM; 1207 1208 host = mmc_priv(mmc); 1209 host->mmc = mmc; /* Important */ 1210 1211 host->mem_res = mem; 1212 host->base = devm_ioremap(&pdev->dev, mem->start, mem_size); 1213 if (!host->base) { 1214 ret = -ENOMEM; 1215 goto ioremap_fail; 1216 } 1217 1218 host->clk = devm_clk_get(&pdev->dev, NULL); 1219 if (IS_ERR(host->clk)) { 1220 ret = PTR_ERR(host->clk); 1221 goto clk_get_fail; 1222 } 1223 ret = clk_prepare_enable(host->clk); 1224 if (ret) 1225 goto clk_prepare_enable_fail; 1226 1227 host->mmc_input_clk = clk_get_rate(host->clk); 1228 1229 pdev->id_entry = of_device_get_match_data(&pdev->dev); 1230 if (pdev->id_entry) { 1231 ret = mmc_of_parse(mmc); 1232 if (ret) { 1233 dev_err_probe(&pdev->dev, ret, 1234 "could not parse of data\n"); 1235 goto parse_fail; 1236 } 1237 } else { 1238 ret = mmc_davinci_parse_pdata(mmc); 1239 if (ret) { 1240 dev_err(&pdev->dev, 1241 "could not parse platform data: %d\n", ret); 1242 goto parse_fail; 1243 } } 1244 1245 if (host->nr_sg > MAX_NR_SG || !host->nr_sg) 1246 host->nr_sg = MAX_NR_SG; 1247 1248 init_mmcsd_host(host); 1249 1250 host->use_dma = use_dma; 1251 host->mmc_irq = irq; 1252 host->sdio_irq = platform_get_irq_optional(pdev, 1); 1253 1254 if (host->use_dma) { 1255 ret = davinci_acquire_dma_channels(host); 1256 if (ret == -EPROBE_DEFER) 1257 goto dma_probe_defer; 1258 else if (ret) 1259 host->use_dma = 0; 1260 } 1261 1262 mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY; 1263 1264 id_entry = platform_get_device_id(pdev); 1265 if (id_entry) 1266 host->version = id_entry->driver_data; 1267 1268 mmc->ops = &mmc_davinci_ops; 1269 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; 1270 1271 /* With no iommu coalescing pages, each phys_seg is a hw_seg. 1272 * Each hw_seg uses one EDMA parameter RAM slot, always one 1273 * channel and then usually some linked slots. 1274 */ 1275 mmc->max_segs = MAX_NR_SG; 1276 1277 /* EDMA limit per hw segment (one or two MBytes) */ 1278 mmc->max_seg_size = MAX_CCNT * rw_threshold; 1279 1280 /* MMC/SD controller limits for multiblock requests */ 1281 mmc->max_blk_size = 4095; /* BLEN is 12 bits */ 1282 mmc->max_blk_count = 65535; /* NBLK is 16 bits */ 1283 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; 1284 1285 dev_dbg(mmc_dev(host->mmc), "max_segs=%d\n", mmc->max_segs); 1286 dev_dbg(mmc_dev(host->mmc), "max_blk_size=%d\n", mmc->max_blk_size); 1287 dev_dbg(mmc_dev(host->mmc), "max_req_size=%d\n", mmc->max_req_size); 1288 dev_dbg(mmc_dev(host->mmc), "max_seg_size=%d\n", mmc->max_seg_size); 1289 1290 platform_set_drvdata(pdev, host); 1291 1292 ret = mmc_davinci_cpufreq_register(host); 1293 if (ret) { 1294 dev_err(&pdev->dev, "failed to register cpufreq\n"); 1295 goto cpu_freq_fail; 1296 } 1297 1298 ret = mmc_add_host(mmc); 1299 if (ret < 0) 1300 goto mmc_add_host_fail; 1301 1302 ret = devm_request_irq(&pdev->dev, irq, mmc_davinci_irq, 0, 1303 mmc_hostname(mmc), host); 1304 if (ret) 1305 goto request_irq_fail; 1306 1307 if (host->sdio_irq >= 0) { 1308 ret = devm_request_irq(&pdev->dev, host->sdio_irq, 1309 mmc_davinci_sdio_irq, 0, 1310 mmc_hostname(mmc), host); 1311 if (!ret) 1312 mmc->caps |= MMC_CAP_SDIO_IRQ; 1313 } 1314 1315 rename_region(mem, mmc_hostname(mmc)); 1316 1317 dev_info(mmc_dev(host->mmc), "Using %s, %d-bit mode\n", 1318 host->use_dma ? "DMA" : "PIO", 1319 (mmc->caps & MMC_CAP_4_BIT_DATA) ? 4 : 1); 1320 1321 return 0; 1322 1323 request_irq_fail: 1324 mmc_remove_host(mmc); 1325 mmc_add_host_fail: 1326 mmc_davinci_cpufreq_deregister(host); 1327 cpu_freq_fail: 1328 davinci_release_dma_channels(host); 1329 parse_fail: 1330 dma_probe_defer: 1331 clk_disable_unprepare(host->clk); 1332 clk_prepare_enable_fail: 1333 clk_get_fail: 1334 ioremap_fail: 1335 mmc_free_host(mmc); 1336 1337 return ret; 1338 } 1339 1340 static void __exit davinci_mmcsd_remove(struct platform_device *pdev) 1341 { 1342 struct mmc_davinci_host *host = platform_get_drvdata(pdev); 1343 1344 mmc_remove_host(host->mmc); 1345 mmc_davinci_cpufreq_deregister(host); 1346 davinci_release_dma_channels(host); 1347 clk_disable_unprepare(host->clk); 1348 mmc_free_host(host->mmc); 1349 } 1350 1351 #ifdef CONFIG_PM 1352 static int davinci_mmcsd_suspend(struct device *dev) 1353 { 1354 struct mmc_davinci_host *host = dev_get_drvdata(dev); 1355 1356 writel(0, host->base + DAVINCI_MMCIM); 1357 mmc_davinci_reset_ctrl(host, 1); 1358 clk_disable(host->clk); 1359 1360 return 0; 1361 } 1362 1363 static int davinci_mmcsd_resume(struct device *dev) 1364 { 1365 struct mmc_davinci_host *host = dev_get_drvdata(dev); 1366 int ret; 1367 1368 ret = clk_enable(host->clk); 1369 if (ret) 1370 return ret; 1371 1372 mmc_davinci_reset_ctrl(host, 0); 1373 1374 return 0; 1375 } 1376 1377 static const struct dev_pm_ops davinci_mmcsd_pm = { 1378 .suspend = davinci_mmcsd_suspend, 1379 .resume = davinci_mmcsd_resume, 1380 }; 1381 1382 #define davinci_mmcsd_pm_ops (&davinci_mmcsd_pm) 1383 #else 1384 #define davinci_mmcsd_pm_ops NULL 1385 #endif 1386 1387 static struct platform_driver davinci_mmcsd_driver = { 1388 .driver = { 1389 .name = "davinci_mmc", 1390 .probe_type = PROBE_PREFER_ASYNCHRONOUS, 1391 .pm = davinci_mmcsd_pm_ops, 1392 .of_match_table = davinci_mmc_dt_ids, 1393 }, 1394 .probe = davinci_mmcsd_probe, 1395 .remove_new = __exit_p(davinci_mmcsd_remove), 1396 .id_table = davinci_mmc_devtype, 1397 }; 1398 1399 module_platform_driver(davinci_mmcsd_driver); 1400 1401 MODULE_AUTHOR("Texas Instruments India"); 1402 MODULE_LICENSE("GPL"); 1403 MODULE_DESCRIPTION("MMC/SD driver for Davinci MMC controller"); 1404 MODULE_ALIAS("platform:davinci_mmc"); 1405 1406