1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * DMA support use of SYS DMAC with SDHI SD/SDIO controller 4 * 5 * Copyright (C) 2016-19 Renesas Electronics Corporation 6 * Copyright (C) 2016-19 Sang Engineering, Wolfram Sang 7 * Copyright (C) 2017 Horms Solutions, Simon Horman 8 * Copyright (C) 2010-2011 Guennadi Liakhovetski 9 */ 10 11 #include <linux/device.h> 12 #include <linux/dma-mapping.h> 13 #include <linux/dmaengine.h> 14 #include <linux/mfd/tmio.h> 15 #include <linux/mmc/host.h> 16 #include <linux/mod_devicetable.h> 17 #include <linux/module.h> 18 #include <linux/of.h> 19 #include <linux/platform_device.h> 20 #include <linux/pagemap.h> 21 #include <linux/scatterlist.h> 22 #include <linux/sys_soc.h> 23 24 #include "renesas_sdhi.h" 25 #include "tmio_mmc.h" 26 27 #define TMIO_MMC_MIN_DMA_LEN 8 28 29 static const struct renesas_sdhi_of_data of_default_cfg = { 30 .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT, 31 }; 32 33 static const struct renesas_sdhi_of_data of_rz_compatible = { 34 .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_32BIT_DATA_PORT | 35 TMIO_MMC_HAVE_CBSY, 36 .tmio_ocr_mask = MMC_VDD_32_33, 37 .capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ | 38 MMC_CAP_WAIT_WHILE_BUSY, 39 }; 40 41 static const struct renesas_sdhi_of_data of_rcar_gen1_compatible = { 42 .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_CLK_ACTUAL, 43 .capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ | 44 MMC_CAP_WAIT_WHILE_BUSY, 45 .capabilities2 = MMC_CAP2_NO_WRITE_PROTECT, 46 }; 47 48 /* Definitions for sampling clocks */ 49 static struct renesas_sdhi_scc rcar_gen2_scc_taps[] = { 50 { 51 .clk_rate = 156000000, 52 .tap = 0x00000703, 53 }, 54 { 55 .clk_rate = 0, 56 .tap = 0x00000300, 57 }, 58 }; 59 60 static const struct renesas_sdhi_of_data of_rcar_gen2_compatible = { 61 .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_CLK_ACTUAL | 62 TMIO_MMC_HAVE_CBSY | TMIO_MMC_MIN_RCAR2, 63 .capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ | 64 MMC_CAP_CMD23 | MMC_CAP_WAIT_WHILE_BUSY, 65 .capabilities2 = MMC_CAP2_NO_WRITE_PROTECT, 66 .dma_buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES, 67 .dma_rx_offset = 0x2000, 68 .scc_offset = 0x0300, 69 .taps = rcar_gen2_scc_taps, 70 .taps_num = ARRAY_SIZE(rcar_gen2_scc_taps), 71 .max_blk_count = UINT_MAX / TMIO_MAX_BLK_SIZE, 72 }; 73 74 static const struct of_device_id renesas_sdhi_sys_dmac_of_match[] = { 75 { .compatible = "renesas,sdhi-sh73a0", .data = &of_default_cfg, }, 76 { .compatible = "renesas,sdhi-r8a73a4", .data = &of_default_cfg, }, 77 { .compatible = "renesas,sdhi-r8a7740", .data = &of_default_cfg, }, 78 { .compatible = "renesas,sdhi-r7s72100", .data = &of_rz_compatible, }, 79 { .compatible = "renesas,sdhi-r8a7778", .data = &of_rcar_gen1_compatible, }, 80 { .compatible = "renesas,sdhi-r8a7779", .data = &of_rcar_gen1_compatible, }, 81 { .compatible = "renesas,sdhi-r8a7743", .data = &of_rcar_gen2_compatible, }, 82 { .compatible = "renesas,sdhi-r8a7745", .data = &of_rcar_gen2_compatible, }, 83 { .compatible = "renesas,sdhi-r8a7790", .data = &of_rcar_gen2_compatible, }, 84 { .compatible = "renesas,sdhi-r8a7791", .data = &of_rcar_gen2_compatible, }, 85 { .compatible = "renesas,sdhi-r8a7792", .data = &of_rcar_gen2_compatible, }, 86 { .compatible = "renesas,sdhi-r8a7793", .data = &of_rcar_gen2_compatible, }, 87 { .compatible = "renesas,sdhi-r8a7794", .data = &of_rcar_gen2_compatible, }, 88 { .compatible = "renesas,rcar-gen1-sdhi", .data = &of_rcar_gen1_compatible, }, 89 { .compatible = "renesas,rcar-gen2-sdhi", .data = &of_rcar_gen2_compatible, }, 90 { .compatible = "renesas,sdhi-shmobile" }, 91 {}, 92 }; 93 MODULE_DEVICE_TABLE(of, renesas_sdhi_sys_dmac_of_match); 94 95 static void renesas_sdhi_sys_dmac_enable_dma(struct tmio_mmc_host *host, 96 bool enable) 97 { 98 struct renesas_sdhi *priv = host_to_priv(host); 99 100 if (!host->chan_tx || !host->chan_rx) 101 return; 102 103 if (priv->dma_priv.enable) 104 priv->dma_priv.enable(host, enable); 105 } 106 107 static void renesas_sdhi_sys_dmac_abort_dma(struct tmio_mmc_host *host) 108 { 109 renesas_sdhi_sys_dmac_enable_dma(host, false); 110 111 if (host->chan_rx) 112 dmaengine_terminate_sync(host->chan_rx); 113 if (host->chan_tx) 114 dmaengine_terminate_sync(host->chan_tx); 115 116 renesas_sdhi_sys_dmac_enable_dma(host, true); 117 } 118 119 static void renesas_sdhi_sys_dmac_dataend_dma(struct tmio_mmc_host *host) 120 { 121 struct renesas_sdhi *priv = host_to_priv(host); 122 123 complete(&priv->dma_priv.dma_dataend); 124 } 125 126 static void renesas_sdhi_sys_dmac_dma_callback(void *arg) 127 { 128 struct tmio_mmc_host *host = arg; 129 struct renesas_sdhi *priv = host_to_priv(host); 130 131 spin_lock_irq(&host->lock); 132 133 if (!host->data) 134 goto out; 135 136 if (host->data->flags & MMC_DATA_READ) 137 dma_unmap_sg(host->chan_rx->device->dev, 138 host->sg_ptr, host->sg_len, 139 DMA_FROM_DEVICE); 140 else 141 dma_unmap_sg(host->chan_tx->device->dev, 142 host->sg_ptr, host->sg_len, 143 DMA_TO_DEVICE); 144 145 spin_unlock_irq(&host->lock); 146 147 wait_for_completion(&priv->dma_priv.dma_dataend); 148 149 spin_lock_irq(&host->lock); 150 tmio_mmc_do_data_irq(host); 151 out: 152 spin_unlock_irq(&host->lock); 153 } 154 155 static void renesas_sdhi_sys_dmac_start_dma_rx(struct tmio_mmc_host *host) 156 { 157 struct renesas_sdhi *priv = host_to_priv(host); 158 struct scatterlist *sg = host->sg_ptr, *sg_tmp; 159 struct dma_async_tx_descriptor *desc = NULL; 160 struct dma_chan *chan = host->chan_rx; 161 dma_cookie_t cookie; 162 int ret, i; 163 bool aligned = true, multiple = true; 164 unsigned int align = 1; /* 2-byte alignment */ 165 166 for_each_sg(sg, sg_tmp, host->sg_len, i) { 167 if (sg_tmp->offset & align) 168 aligned = false; 169 if (sg_tmp->length & align) { 170 multiple = false; 171 break; 172 } 173 } 174 175 if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_SIZE || 176 (align & PAGE_MASK))) || !multiple) { 177 ret = -EINVAL; 178 goto pio; 179 } 180 181 if (sg->length < TMIO_MMC_MIN_DMA_LEN) 182 return; 183 184 /* The only sg element can be unaligned, use our bounce buffer then */ 185 if (!aligned) { 186 sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length); 187 host->sg_ptr = &host->bounce_sg; 188 sg = host->sg_ptr; 189 } 190 191 ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_FROM_DEVICE); 192 if (ret > 0) 193 desc = dmaengine_prep_slave_sg(chan, sg, ret, DMA_DEV_TO_MEM, 194 DMA_CTRL_ACK); 195 196 if (desc) { 197 reinit_completion(&priv->dma_priv.dma_dataend); 198 desc->callback = renesas_sdhi_sys_dmac_dma_callback; 199 desc->callback_param = host; 200 201 cookie = dmaengine_submit(desc); 202 if (cookie < 0) { 203 desc = NULL; 204 ret = cookie; 205 } 206 host->dma_on = true; 207 } 208 pio: 209 if (!desc) { 210 /* DMA failed, fall back to PIO */ 211 renesas_sdhi_sys_dmac_enable_dma(host, false); 212 if (ret >= 0) 213 ret = -EIO; 214 host->chan_rx = NULL; 215 dma_release_channel(chan); 216 /* Free the Tx channel too */ 217 chan = host->chan_tx; 218 if (chan) { 219 host->chan_tx = NULL; 220 dma_release_channel(chan); 221 } 222 dev_warn(&host->pdev->dev, 223 "DMA failed: %d, falling back to PIO\n", ret); 224 } 225 } 226 227 static void renesas_sdhi_sys_dmac_start_dma_tx(struct tmio_mmc_host *host) 228 { 229 struct renesas_sdhi *priv = host_to_priv(host); 230 struct scatterlist *sg = host->sg_ptr, *sg_tmp; 231 struct dma_async_tx_descriptor *desc = NULL; 232 struct dma_chan *chan = host->chan_tx; 233 dma_cookie_t cookie; 234 int ret, i; 235 bool aligned = true, multiple = true; 236 unsigned int align = 1; /* 2-byte alignment */ 237 238 for_each_sg(sg, sg_tmp, host->sg_len, i) { 239 if (sg_tmp->offset & align) 240 aligned = false; 241 if (sg_tmp->length & align) { 242 multiple = false; 243 break; 244 } 245 } 246 247 if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_SIZE || 248 (align & PAGE_MASK))) || !multiple) { 249 ret = -EINVAL; 250 goto pio; 251 } 252 253 if (sg->length < TMIO_MMC_MIN_DMA_LEN) 254 return; 255 256 /* The only sg element can be unaligned, use our bounce buffer then */ 257 if (!aligned) { 258 void *sg_vaddr = kmap_local_page(sg_page(sg)); 259 260 sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length); 261 memcpy(host->bounce_buf, sg_vaddr + sg->offset, host->bounce_sg.length); 262 kunmap_local(sg_vaddr); 263 host->sg_ptr = &host->bounce_sg; 264 sg = host->sg_ptr; 265 } 266 267 ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_TO_DEVICE); 268 if (ret > 0) 269 desc = dmaengine_prep_slave_sg(chan, sg, ret, DMA_MEM_TO_DEV, 270 DMA_CTRL_ACK); 271 272 if (desc) { 273 reinit_completion(&priv->dma_priv.dma_dataend); 274 desc->callback = renesas_sdhi_sys_dmac_dma_callback; 275 desc->callback_param = host; 276 277 cookie = dmaengine_submit(desc); 278 if (cookie < 0) { 279 desc = NULL; 280 ret = cookie; 281 } 282 host->dma_on = true; 283 } 284 pio: 285 if (!desc) { 286 /* DMA failed, fall back to PIO */ 287 renesas_sdhi_sys_dmac_enable_dma(host, false); 288 if (ret >= 0) 289 ret = -EIO; 290 host->chan_tx = NULL; 291 dma_release_channel(chan); 292 /* Free the Rx channel too */ 293 chan = host->chan_rx; 294 if (chan) { 295 host->chan_rx = NULL; 296 dma_release_channel(chan); 297 } 298 dev_warn(&host->pdev->dev, 299 "DMA failed: %d, falling back to PIO\n", ret); 300 } 301 } 302 303 static void renesas_sdhi_sys_dmac_start_dma(struct tmio_mmc_host *host, 304 struct mmc_data *data) 305 { 306 if (data->flags & MMC_DATA_READ) { 307 if (host->chan_rx) 308 renesas_sdhi_sys_dmac_start_dma_rx(host); 309 } else { 310 if (host->chan_tx) 311 renesas_sdhi_sys_dmac_start_dma_tx(host); 312 } 313 } 314 315 static void renesas_sdhi_sys_dmac_issue_tasklet_fn(unsigned long priv) 316 { 317 struct tmio_mmc_host *host = (struct tmio_mmc_host *)priv; 318 struct dma_chan *chan = NULL; 319 320 spin_lock_irq(&host->lock); 321 322 if (host->data) { 323 if (host->data->flags & MMC_DATA_READ) 324 chan = host->chan_rx; 325 else 326 chan = host->chan_tx; 327 } 328 329 spin_unlock_irq(&host->lock); 330 331 tmio_mmc_enable_mmc_irqs(host, TMIO_STAT_DATAEND); 332 333 if (chan) 334 dma_async_issue_pending(chan); 335 } 336 337 static void renesas_sdhi_sys_dmac_request_dma(struct tmio_mmc_host *host, 338 struct tmio_mmc_data *pdata) 339 { 340 struct renesas_sdhi *priv = host_to_priv(host); 341 342 /* We can only either use DMA for both Tx and Rx or not use it at all */ 343 if (!host->pdev->dev.of_node && 344 (!pdata->chan_priv_tx || !pdata->chan_priv_rx)) 345 return; 346 347 if (!host->chan_tx && !host->chan_rx) { 348 struct resource *res = platform_get_resource(host->pdev, 349 IORESOURCE_MEM, 0); 350 struct dma_slave_config cfg = {}; 351 dma_cap_mask_t mask; 352 int ret; 353 354 if (!res) 355 return; 356 357 dma_cap_zero(mask); 358 dma_cap_set(DMA_SLAVE, mask); 359 360 host->chan_tx = dma_request_slave_channel_compat(mask, 361 priv->dma_priv.filter, pdata->chan_priv_tx, 362 &host->pdev->dev, "tx"); 363 dev_dbg(&host->pdev->dev, "%s: TX: got channel %p\n", __func__, 364 host->chan_tx); 365 366 if (!host->chan_tx) 367 return; 368 369 cfg.direction = DMA_MEM_TO_DEV; 370 cfg.dst_addr = res->start + 371 (CTL_SD_DATA_PORT << host->bus_shift); 372 cfg.dst_addr_width = priv->dma_priv.dma_buswidth; 373 if (!cfg.dst_addr_width) 374 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; 375 cfg.src_addr = 0; 376 ret = dmaengine_slave_config(host->chan_tx, &cfg); 377 if (ret < 0) 378 goto ecfgtx; 379 380 host->chan_rx = dma_request_slave_channel_compat(mask, 381 priv->dma_priv.filter, pdata->chan_priv_rx, 382 &host->pdev->dev, "rx"); 383 dev_dbg(&host->pdev->dev, "%s: RX: got channel %p\n", __func__, 384 host->chan_rx); 385 386 if (!host->chan_rx) 387 goto ereqrx; 388 389 cfg.direction = DMA_DEV_TO_MEM; 390 cfg.src_addr = cfg.dst_addr + host->pdata->dma_rx_offset; 391 cfg.src_addr_width = priv->dma_priv.dma_buswidth; 392 if (!cfg.src_addr_width) 393 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; 394 cfg.dst_addr = 0; 395 ret = dmaengine_slave_config(host->chan_rx, &cfg); 396 if (ret < 0) 397 goto ecfgrx; 398 399 host->bounce_buf = (u8 *)__get_free_page(GFP_KERNEL | GFP_DMA); 400 if (!host->bounce_buf) 401 goto ebouncebuf; 402 403 init_completion(&priv->dma_priv.dma_dataend); 404 tasklet_init(&host->dma_issue, 405 renesas_sdhi_sys_dmac_issue_tasklet_fn, 406 (unsigned long)host); 407 } 408 409 renesas_sdhi_sys_dmac_enable_dma(host, true); 410 411 return; 412 413 ebouncebuf: 414 ecfgrx: 415 dma_release_channel(host->chan_rx); 416 host->chan_rx = NULL; 417 ereqrx: 418 ecfgtx: 419 dma_release_channel(host->chan_tx); 420 host->chan_tx = NULL; 421 } 422 423 static void renesas_sdhi_sys_dmac_release_dma(struct tmio_mmc_host *host) 424 { 425 if (host->chan_tx) { 426 struct dma_chan *chan = host->chan_tx; 427 428 host->chan_tx = NULL; 429 dma_release_channel(chan); 430 } 431 if (host->chan_rx) { 432 struct dma_chan *chan = host->chan_rx; 433 434 host->chan_rx = NULL; 435 dma_release_channel(chan); 436 } 437 if (host->bounce_buf) { 438 free_pages((unsigned long)host->bounce_buf, 0); 439 host->bounce_buf = NULL; 440 } 441 } 442 443 static const struct tmio_mmc_dma_ops renesas_sdhi_sys_dmac_dma_ops = { 444 .start = renesas_sdhi_sys_dmac_start_dma, 445 .enable = renesas_sdhi_sys_dmac_enable_dma, 446 .request = renesas_sdhi_sys_dmac_request_dma, 447 .release = renesas_sdhi_sys_dmac_release_dma, 448 .abort = renesas_sdhi_sys_dmac_abort_dma, 449 .dataend = renesas_sdhi_sys_dmac_dataend_dma, 450 }; 451 452 static int renesas_sdhi_sys_dmac_probe(struct platform_device *pdev) 453 { 454 return renesas_sdhi_probe(pdev, &renesas_sdhi_sys_dmac_dma_ops, 455 of_device_get_match_data(&pdev->dev), NULL); 456 } 457 458 static const struct dev_pm_ops renesas_sdhi_sys_dmac_dev_pm_ops = { 459 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, 460 pm_runtime_force_resume) 461 SET_RUNTIME_PM_OPS(tmio_mmc_host_runtime_suspend, 462 tmio_mmc_host_runtime_resume, 463 NULL) 464 }; 465 466 static struct platform_driver renesas_sys_dmac_sdhi_driver = { 467 .driver = { 468 .name = "sh_mobile_sdhi", 469 .probe_type = PROBE_PREFER_ASYNCHRONOUS, 470 .pm = &renesas_sdhi_sys_dmac_dev_pm_ops, 471 .of_match_table = renesas_sdhi_sys_dmac_of_match, 472 }, 473 .probe = renesas_sdhi_sys_dmac_probe, 474 .remove_new = renesas_sdhi_remove, 475 }; 476 477 module_platform_driver(renesas_sys_dmac_sdhi_driver); 478 479 MODULE_DESCRIPTION("Renesas SDHI driver"); 480 MODULE_AUTHOR("Magnus Damm"); 481 MODULE_LICENSE("GPL v2"); 482 MODULE_ALIAS("platform:sh_mobile_sdhi"); 483