1 /* 2 * DMA function for TMIO MMC implementations 3 * 4 * Copyright (C) 2016-17 Renesas Electronics Corporation 5 * Copyright (C) 2016-17 Sang Engineering, Wolfram Sang 6 * Copyright (C) 2017 Horms Solutions, Simon Horman 7 * Copyright (C) 2010-2011 Guennadi Liakhovetski 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 */ 13 14 #include <linux/device.h> 15 #include <linux/dma-mapping.h> 16 #include <linux/dmaengine.h> 17 #include <linux/mfd/tmio.h> 18 #include <linux/mmc/host.h> 19 #include <linux/mod_devicetable.h> 20 #include <linux/module.h> 21 #include <linux/pagemap.h> 22 #include <linux/scatterlist.h> 23 24 #include "renesas_sdhi.h" 25 #include "tmio_mmc.h" 26 27 #define TMIO_MMC_MIN_DMA_LEN 8 28 29 static const struct renesas_sdhi_of_data of_default_cfg = { 30 .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT, 31 }; 32 33 static const struct renesas_sdhi_of_data of_rz_compatible = { 34 .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_32BIT_DATA_PORT, 35 .tmio_ocr_mask = MMC_VDD_32_33, 36 .capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ, 37 }; 38 39 static const struct renesas_sdhi_of_data of_rcar_gen1_compatible = { 40 .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_WRPROTECT_DISABLE | 41 TMIO_MMC_CLK_ACTUAL, 42 .capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ, 43 }; 44 45 /* Definitions for sampling clocks */ 46 static struct renesas_sdhi_scc rcar_gen2_scc_taps[] = { 47 { 48 .clk_rate = 156000000, 49 .tap = 0x00000703, 50 }, 51 { 52 .clk_rate = 0, 53 .tap = 0x00000300, 54 }, 55 }; 56 57 static const struct renesas_sdhi_of_data of_rcar_gen2_compatible = { 58 .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_WRPROTECT_DISABLE | 59 TMIO_MMC_CLK_ACTUAL | TMIO_MMC_MIN_RCAR2, 60 .capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ | 61 MMC_CAP_CMD23, 62 .dma_buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES, 63 .dma_rx_offset = 0x2000, 64 .scc_offset = 0x0300, 65 .taps = rcar_gen2_scc_taps, 66 .taps_num = ARRAY_SIZE(rcar_gen2_scc_taps), 67 }; 68 69 /* Definitions for sampling clocks */ 70 static struct renesas_sdhi_scc rcar_gen3_scc_taps[] = { 71 { 72 .clk_rate = 0, 73 .tap = 0x00000300, 74 }, 75 }; 76 77 static const struct renesas_sdhi_of_data of_rcar_gen3_compatible = { 78 .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_WRPROTECT_DISABLE | 79 TMIO_MMC_CLK_ACTUAL | TMIO_MMC_MIN_RCAR2, 80 .capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ | 81 MMC_CAP_CMD23, 82 .bus_shift = 2, 83 .scc_offset = 0x1000, 84 .taps = rcar_gen3_scc_taps, 85 .taps_num = ARRAY_SIZE(rcar_gen3_scc_taps), 86 }; 87 88 static const struct of_device_id renesas_sdhi_sys_dmac_of_match[] = { 89 { .compatible = "renesas,sdhi-shmobile" }, 90 { .compatible = "renesas,sdhi-sh73a0", .data = &of_default_cfg, }, 91 { .compatible = "renesas,sdhi-r8a73a4", .data = &of_default_cfg, }, 92 { .compatible = "renesas,sdhi-r8a7740", .data = &of_default_cfg, }, 93 { .compatible = "renesas,sdhi-r7s72100", .data = &of_rz_compatible, }, 94 { .compatible = "renesas,sdhi-r8a7778", .data = &of_rcar_gen1_compatible, }, 95 { .compatible = "renesas,sdhi-r8a7779", .data = &of_rcar_gen1_compatible, }, 96 { .compatible = "renesas,sdhi-r8a7790", .data = &of_rcar_gen2_compatible, }, 97 { .compatible = "renesas,sdhi-r8a7791", .data = &of_rcar_gen2_compatible, }, 98 { .compatible = "renesas,sdhi-r8a7792", .data = &of_rcar_gen2_compatible, }, 99 { .compatible = "renesas,sdhi-r8a7793", .data = &of_rcar_gen2_compatible, }, 100 { .compatible = "renesas,sdhi-r8a7794", .data = &of_rcar_gen2_compatible, }, 101 { .compatible = "renesas,sdhi-r8a7795", .data = &of_rcar_gen3_compatible, }, 102 { .compatible = "renesas,sdhi-r8a7796", .data = &of_rcar_gen3_compatible, }, 103 {}, 104 }; 105 MODULE_DEVICE_TABLE(of, renesas_sdhi_sys_dmac_of_match); 106 107 static void renesas_sdhi_sys_dmac_enable_dma(struct tmio_mmc_host *host, 108 bool enable) 109 { 110 if (!host->chan_tx || !host->chan_rx) 111 return; 112 113 if (host->dma->enable) 114 host->dma->enable(host, enable); 115 } 116 117 static void renesas_sdhi_sys_dmac_abort_dma(struct tmio_mmc_host *host) 118 { 119 renesas_sdhi_sys_dmac_enable_dma(host, false); 120 121 if (host->chan_rx) 122 dmaengine_terminate_all(host->chan_rx); 123 if (host->chan_tx) 124 dmaengine_terminate_all(host->chan_tx); 125 126 renesas_sdhi_sys_dmac_enable_dma(host, true); 127 } 128 129 static void renesas_sdhi_sys_dmac_dma_callback(void *arg) 130 { 131 struct tmio_mmc_host *host = arg; 132 133 spin_lock_irq(&host->lock); 134 135 if (!host->data) 136 goto out; 137 138 if (host->data->flags & MMC_DATA_READ) 139 dma_unmap_sg(host->chan_rx->device->dev, 140 host->sg_ptr, host->sg_len, 141 DMA_FROM_DEVICE); 142 else 143 dma_unmap_sg(host->chan_tx->device->dev, 144 host->sg_ptr, host->sg_len, 145 DMA_TO_DEVICE); 146 147 spin_unlock_irq(&host->lock); 148 149 wait_for_completion(&host->dma_dataend); 150 151 spin_lock_irq(&host->lock); 152 tmio_mmc_do_data_irq(host); 153 out: 154 spin_unlock_irq(&host->lock); 155 } 156 157 static void renesas_sdhi_sys_dmac_start_dma_rx(struct tmio_mmc_host *host) 158 { 159 struct scatterlist *sg = host->sg_ptr, *sg_tmp; 160 struct dma_async_tx_descriptor *desc = NULL; 161 struct dma_chan *chan = host->chan_rx; 162 dma_cookie_t cookie; 163 int ret, i; 164 bool aligned = true, multiple = true; 165 unsigned int align = (1 << host->pdata->alignment_shift) - 1; 166 167 for_each_sg(sg, sg_tmp, host->sg_len, i) { 168 if (sg_tmp->offset & align) 169 aligned = false; 170 if (sg_tmp->length & align) { 171 multiple = false; 172 break; 173 } 174 } 175 176 if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_SIZE || 177 (align & PAGE_MASK))) || !multiple) { 178 ret = -EINVAL; 179 goto pio; 180 } 181 182 if (sg->length < TMIO_MMC_MIN_DMA_LEN) { 183 host->force_pio = true; 184 return; 185 } 186 187 tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_RXRDY); 188 189 /* The only sg element can be unaligned, use our bounce buffer then */ 190 if (!aligned) { 191 sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length); 192 host->sg_ptr = &host->bounce_sg; 193 sg = host->sg_ptr; 194 } 195 196 ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_FROM_DEVICE); 197 if (ret > 0) 198 desc = dmaengine_prep_slave_sg(chan, sg, ret, DMA_DEV_TO_MEM, 199 DMA_CTRL_ACK); 200 201 if (desc) { 202 reinit_completion(&host->dma_dataend); 203 desc->callback = renesas_sdhi_sys_dmac_dma_callback; 204 desc->callback_param = host; 205 206 cookie = dmaengine_submit(desc); 207 if (cookie < 0) { 208 desc = NULL; 209 ret = cookie; 210 } 211 } 212 pio: 213 if (!desc) { 214 /* DMA failed, fall back to PIO */ 215 renesas_sdhi_sys_dmac_enable_dma(host, false); 216 if (ret >= 0) 217 ret = -EIO; 218 host->chan_rx = NULL; 219 dma_release_channel(chan); 220 /* Free the Tx channel too */ 221 chan = host->chan_tx; 222 if (chan) { 223 host->chan_tx = NULL; 224 dma_release_channel(chan); 225 } 226 dev_warn(&host->pdev->dev, 227 "DMA failed: %d, falling back to PIO\n", ret); 228 } 229 } 230 231 static void renesas_sdhi_sys_dmac_start_dma_tx(struct tmio_mmc_host *host) 232 { 233 struct scatterlist *sg = host->sg_ptr, *sg_tmp; 234 struct dma_async_tx_descriptor *desc = NULL; 235 struct dma_chan *chan = host->chan_tx; 236 dma_cookie_t cookie; 237 int ret, i; 238 bool aligned = true, multiple = true; 239 unsigned int align = (1 << host->pdata->alignment_shift) - 1; 240 241 for_each_sg(sg, sg_tmp, host->sg_len, i) { 242 if (sg_tmp->offset & align) 243 aligned = false; 244 if (sg_tmp->length & align) { 245 multiple = false; 246 break; 247 } 248 } 249 250 if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_SIZE || 251 (align & PAGE_MASK))) || !multiple) { 252 ret = -EINVAL; 253 goto pio; 254 } 255 256 if (sg->length < TMIO_MMC_MIN_DMA_LEN) { 257 host->force_pio = true; 258 return; 259 } 260 261 tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_TXRQ); 262 263 /* The only sg element can be unaligned, use our bounce buffer then */ 264 if (!aligned) { 265 unsigned long flags; 266 void *sg_vaddr = tmio_mmc_kmap_atomic(sg, &flags); 267 268 sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length); 269 memcpy(host->bounce_buf, sg_vaddr, host->bounce_sg.length); 270 tmio_mmc_kunmap_atomic(sg, &flags, sg_vaddr); 271 host->sg_ptr = &host->bounce_sg; 272 sg = host->sg_ptr; 273 } 274 275 ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_TO_DEVICE); 276 if (ret > 0) 277 desc = dmaengine_prep_slave_sg(chan, sg, ret, DMA_MEM_TO_DEV, 278 DMA_CTRL_ACK); 279 280 if (desc) { 281 reinit_completion(&host->dma_dataend); 282 desc->callback = renesas_sdhi_sys_dmac_dma_callback; 283 desc->callback_param = host; 284 285 cookie = dmaengine_submit(desc); 286 if (cookie < 0) { 287 desc = NULL; 288 ret = cookie; 289 } 290 } 291 pio: 292 if (!desc) { 293 /* DMA failed, fall back to PIO */ 294 renesas_sdhi_sys_dmac_enable_dma(host, false); 295 if (ret >= 0) 296 ret = -EIO; 297 host->chan_tx = NULL; 298 dma_release_channel(chan); 299 /* Free the Rx channel too */ 300 chan = host->chan_rx; 301 if (chan) { 302 host->chan_rx = NULL; 303 dma_release_channel(chan); 304 } 305 dev_warn(&host->pdev->dev, 306 "DMA failed: %d, falling back to PIO\n", ret); 307 } 308 } 309 310 static void renesas_sdhi_sys_dmac_start_dma(struct tmio_mmc_host *host, 311 struct mmc_data *data) 312 { 313 if (data->flags & MMC_DATA_READ) { 314 if (host->chan_rx) 315 renesas_sdhi_sys_dmac_start_dma_rx(host); 316 } else { 317 if (host->chan_tx) 318 renesas_sdhi_sys_dmac_start_dma_tx(host); 319 } 320 } 321 322 static void renesas_sdhi_sys_dmac_issue_tasklet_fn(unsigned long priv) 323 { 324 struct tmio_mmc_host *host = (struct tmio_mmc_host *)priv; 325 struct dma_chan *chan = NULL; 326 327 spin_lock_irq(&host->lock); 328 329 if (host && host->data) { 330 if (host->data->flags & MMC_DATA_READ) 331 chan = host->chan_rx; 332 else 333 chan = host->chan_tx; 334 } 335 336 spin_unlock_irq(&host->lock); 337 338 tmio_mmc_enable_mmc_irqs(host, TMIO_STAT_DATAEND); 339 340 if (chan) 341 dma_async_issue_pending(chan); 342 } 343 344 static void renesas_sdhi_sys_dmac_request_dma(struct tmio_mmc_host *host, 345 struct tmio_mmc_data *pdata) 346 { 347 /* We can only either use DMA for both Tx and Rx or not use it at all */ 348 if (!host->dma || (!host->pdev->dev.of_node && 349 (!pdata->chan_priv_tx || !pdata->chan_priv_rx))) 350 return; 351 352 if (!host->chan_tx && !host->chan_rx) { 353 struct resource *res = platform_get_resource(host->pdev, 354 IORESOURCE_MEM, 0); 355 struct dma_slave_config cfg = {}; 356 dma_cap_mask_t mask; 357 int ret; 358 359 if (!res) 360 return; 361 362 dma_cap_zero(mask); 363 dma_cap_set(DMA_SLAVE, mask); 364 365 host->chan_tx = dma_request_slave_channel_compat(mask, 366 host->dma->filter, pdata->chan_priv_tx, 367 &host->pdev->dev, "tx"); 368 dev_dbg(&host->pdev->dev, "%s: TX: got channel %p\n", __func__, 369 host->chan_tx); 370 371 if (!host->chan_tx) 372 return; 373 374 cfg.direction = DMA_MEM_TO_DEV; 375 cfg.dst_addr = res->start + 376 (CTL_SD_DATA_PORT << host->bus_shift); 377 cfg.dst_addr_width = host->dma->dma_buswidth; 378 if (!cfg.dst_addr_width) 379 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; 380 cfg.src_addr = 0; 381 ret = dmaengine_slave_config(host->chan_tx, &cfg); 382 if (ret < 0) 383 goto ecfgtx; 384 385 host->chan_rx = dma_request_slave_channel_compat(mask, 386 host->dma->filter, pdata->chan_priv_rx, 387 &host->pdev->dev, "rx"); 388 dev_dbg(&host->pdev->dev, "%s: RX: got channel %p\n", __func__, 389 host->chan_rx); 390 391 if (!host->chan_rx) 392 goto ereqrx; 393 394 cfg.direction = DMA_DEV_TO_MEM; 395 cfg.src_addr = cfg.dst_addr + host->pdata->dma_rx_offset; 396 cfg.src_addr_width = host->dma->dma_buswidth; 397 if (!cfg.src_addr_width) 398 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; 399 cfg.dst_addr = 0; 400 ret = dmaengine_slave_config(host->chan_rx, &cfg); 401 if (ret < 0) 402 goto ecfgrx; 403 404 host->bounce_buf = (u8 *)__get_free_page(GFP_KERNEL | GFP_DMA); 405 if (!host->bounce_buf) 406 goto ebouncebuf; 407 408 init_completion(&host->dma_dataend); 409 tasklet_init(&host->dma_issue, 410 renesas_sdhi_sys_dmac_issue_tasklet_fn, 411 (unsigned long)host); 412 } 413 414 renesas_sdhi_sys_dmac_enable_dma(host, true); 415 416 return; 417 418 ebouncebuf: 419 ecfgrx: 420 dma_release_channel(host->chan_rx); 421 host->chan_rx = NULL; 422 ereqrx: 423 ecfgtx: 424 dma_release_channel(host->chan_tx); 425 host->chan_tx = NULL; 426 } 427 428 static void renesas_sdhi_sys_dmac_release_dma(struct tmio_mmc_host *host) 429 { 430 if (host->chan_tx) { 431 struct dma_chan *chan = host->chan_tx; 432 433 host->chan_tx = NULL; 434 dma_release_channel(chan); 435 } 436 if (host->chan_rx) { 437 struct dma_chan *chan = host->chan_rx; 438 439 host->chan_rx = NULL; 440 dma_release_channel(chan); 441 } 442 if (host->bounce_buf) { 443 free_pages((unsigned long)host->bounce_buf, 0); 444 host->bounce_buf = NULL; 445 } 446 } 447 448 static const struct tmio_mmc_dma_ops renesas_sdhi_sys_dmac_dma_ops = { 449 .start = renesas_sdhi_sys_dmac_start_dma, 450 .enable = renesas_sdhi_sys_dmac_enable_dma, 451 .request = renesas_sdhi_sys_dmac_request_dma, 452 .release = renesas_sdhi_sys_dmac_release_dma, 453 .abort = renesas_sdhi_sys_dmac_abort_dma, 454 }; 455 456 static int renesas_sdhi_sys_dmac_probe(struct platform_device *pdev) 457 { 458 return renesas_sdhi_probe(pdev, &renesas_sdhi_sys_dmac_dma_ops); 459 } 460 461 static const struct dev_pm_ops renesas_sdhi_sys_dmac_dev_pm_ops = { 462 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, 463 pm_runtime_force_resume) 464 SET_RUNTIME_PM_OPS(tmio_mmc_host_runtime_suspend, 465 tmio_mmc_host_runtime_resume, 466 NULL) 467 }; 468 469 static struct platform_driver renesas_sys_dmac_sdhi_driver = { 470 .driver = { 471 .name = "sh_mobile_sdhi", 472 .pm = &renesas_sdhi_sys_dmac_dev_pm_ops, 473 .of_match_table = renesas_sdhi_sys_dmac_of_match, 474 }, 475 .probe = renesas_sdhi_sys_dmac_probe, 476 .remove = renesas_sdhi_remove, 477 }; 478 479 module_platform_driver(renesas_sys_dmac_sdhi_driver); 480 481 MODULE_DESCRIPTION("Renesas SDHI driver"); 482 MODULE_AUTHOR("Magnus Damm"); 483 MODULE_LICENSE("GPL v2"); 484 MODULE_ALIAS("platform:sh_mobile_sdhi"); 485