1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * DMA support use of SYS DMAC with SDHI SD/SDIO controller 4 * 5 * Copyright (C) 2016-19 Renesas Electronics Corporation 6 * Copyright (C) 2016-19 Sang Engineering, Wolfram Sang 7 * Copyright (C) 2017 Horms Solutions, Simon Horman 8 * Copyright (C) 2010-2011 Guennadi Liakhovetski 9 */ 10 11 #include <linux/device.h> 12 #include <linux/dma-mapping.h> 13 #include <linux/dmaengine.h> 14 #include <linux/mmc/host.h> 15 #include <linux/mod_devicetable.h> 16 #include <linux/module.h> 17 #include <linux/of.h> 18 #include <linux/pagemap.h> 19 #include <linux/platform_data/tmio.h> 20 #include <linux/platform_device.h> 21 #include <linux/pm_runtime.h> 22 #include <linux/scatterlist.h> 23 #include <linux/sys_soc.h> 24 25 #include "renesas_sdhi.h" 26 #include "tmio_mmc.h" 27 28 #define TMIO_MMC_MIN_DMA_LEN 8 29 30 static const struct renesas_sdhi_of_data of_default_cfg = { 31 .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT, 32 }; 33 34 static const struct renesas_sdhi_of_data of_rz_compatible = { 35 .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_32BIT_DATA_PORT | 36 TMIO_MMC_HAVE_CBSY, 37 .tmio_ocr_mask = MMC_VDD_32_33, 38 .capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ | 39 MMC_CAP_WAIT_WHILE_BUSY, 40 }; 41 42 static const struct renesas_sdhi_of_data of_rcar_gen1_compatible = { 43 .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_CLK_ACTUAL, 44 .capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ | 45 MMC_CAP_WAIT_WHILE_BUSY, 46 .capabilities2 = MMC_CAP2_NO_WRITE_PROTECT, 47 }; 48 49 /* Definitions for sampling clocks */ 50 static struct renesas_sdhi_scc rcar_gen2_scc_taps[] = { 51 { 52 .clk_rate = 156000000, 53 .tap = 0x00000703, 54 }, 55 { 56 .clk_rate = 0, 57 .tap = 0x00000300, 58 }, 59 }; 60 61 static const struct renesas_sdhi_of_data of_rcar_gen2_compatible = { 62 .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_CLK_ACTUAL | 63 TMIO_MMC_HAVE_CBSY | TMIO_MMC_MIN_RCAR2, 64 .capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ | 65 MMC_CAP_CMD23 | MMC_CAP_WAIT_WHILE_BUSY, 66 .capabilities2 = MMC_CAP2_NO_WRITE_PROTECT, 67 .dma_buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES, 68 .dma_rx_offset = 0x2000, 69 .scc_offset = 0x0300, 70 .taps = rcar_gen2_scc_taps, 71 .taps_num = ARRAY_SIZE(rcar_gen2_scc_taps), 72 .max_blk_count = UINT_MAX / TMIO_MAX_BLK_SIZE, 73 }; 74 75 static const struct of_device_id renesas_sdhi_sys_dmac_of_match[] = { 76 { .compatible = "renesas,sdhi-sh73a0", .data = &of_default_cfg, }, 77 { .compatible = "renesas,sdhi-r8a73a4", .data = &of_default_cfg, }, 78 { .compatible = "renesas,sdhi-r8a7740", .data = &of_default_cfg, }, 79 { .compatible = "renesas,sdhi-r7s72100", .data = &of_rz_compatible, }, 80 { .compatible = "renesas,sdhi-r8a7778", .data = &of_rcar_gen1_compatible, }, 81 { .compatible = "renesas,sdhi-r8a7779", .data = &of_rcar_gen1_compatible, }, 82 { .compatible = "renesas,sdhi-r8a7743", .data = &of_rcar_gen2_compatible, }, 83 { .compatible = "renesas,sdhi-r8a7745", .data = &of_rcar_gen2_compatible, }, 84 { .compatible = "renesas,sdhi-r8a7790", .data = &of_rcar_gen2_compatible, }, 85 { .compatible = "renesas,sdhi-r8a7791", .data = &of_rcar_gen2_compatible, }, 86 { .compatible = "renesas,sdhi-r8a7792", .data = &of_rcar_gen2_compatible, }, 87 { .compatible = "renesas,sdhi-r8a7793", .data = &of_rcar_gen2_compatible, }, 88 { .compatible = "renesas,sdhi-r8a7794", .data = &of_rcar_gen2_compatible, }, 89 { .compatible = "renesas,rcar-gen1-sdhi", .data = &of_rcar_gen1_compatible, }, 90 { .compatible = "renesas,rcar-gen2-sdhi", .data = &of_rcar_gen2_compatible, }, 91 { .compatible = "renesas,sdhi-shmobile" }, 92 {}, 93 }; 94 MODULE_DEVICE_TABLE(of, renesas_sdhi_sys_dmac_of_match); 95 96 static void renesas_sdhi_sys_dmac_enable_dma(struct tmio_mmc_host *host, 97 bool enable) 98 { 99 struct renesas_sdhi *priv = host_to_priv(host); 100 101 if (!host->chan_tx || !host->chan_rx) 102 return; 103 104 if (priv->dma_priv.enable) 105 priv->dma_priv.enable(host, enable); 106 } 107 108 static void renesas_sdhi_sys_dmac_abort_dma(struct tmio_mmc_host *host) 109 { 110 renesas_sdhi_sys_dmac_enable_dma(host, false); 111 112 if (host->chan_rx) 113 dmaengine_terminate_sync(host->chan_rx); 114 if (host->chan_tx) 115 dmaengine_terminate_sync(host->chan_tx); 116 117 renesas_sdhi_sys_dmac_enable_dma(host, true); 118 } 119 120 static void renesas_sdhi_sys_dmac_dataend_dma(struct tmio_mmc_host *host) 121 { 122 struct renesas_sdhi *priv = host_to_priv(host); 123 124 complete(&priv->dma_priv.dma_dataend); 125 } 126 127 static void renesas_sdhi_sys_dmac_dma_callback(void *arg) 128 { 129 struct tmio_mmc_host *host = arg; 130 struct renesas_sdhi *priv = host_to_priv(host); 131 132 spin_lock_irq(&host->lock); 133 134 if (!host->data) 135 goto out; 136 137 if (host->data->flags & MMC_DATA_READ) 138 dma_unmap_sg(host->chan_rx->device->dev, 139 host->sg_ptr, host->sg_len, 140 DMA_FROM_DEVICE); 141 else 142 dma_unmap_sg(host->chan_tx->device->dev, 143 host->sg_ptr, host->sg_len, 144 DMA_TO_DEVICE); 145 146 spin_unlock_irq(&host->lock); 147 148 wait_for_completion(&priv->dma_priv.dma_dataend); 149 150 spin_lock_irq(&host->lock); 151 tmio_mmc_do_data_irq(host); 152 out: 153 spin_unlock_irq(&host->lock); 154 } 155 156 static void renesas_sdhi_sys_dmac_start_dma_rx(struct tmio_mmc_host *host) 157 { 158 struct renesas_sdhi *priv = host_to_priv(host); 159 struct scatterlist *sg = host->sg_ptr, *sg_tmp; 160 struct dma_async_tx_descriptor *desc = NULL; 161 struct dma_chan *chan = host->chan_rx; 162 dma_cookie_t cookie; 163 int ret, i; 164 bool aligned = true, multiple = true; 165 unsigned int align = 1; /* 2-byte alignment */ 166 167 for_each_sg(sg, sg_tmp, host->sg_len, i) { 168 if (sg_tmp->offset & align) 169 aligned = false; 170 if (sg_tmp->length & align) { 171 multiple = false; 172 break; 173 } 174 } 175 176 if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_SIZE || 177 (align & PAGE_MASK))) || !multiple) { 178 ret = -EINVAL; 179 goto pio; 180 } 181 182 if (sg->length < TMIO_MMC_MIN_DMA_LEN) 183 return; 184 185 /* The only sg element can be unaligned, use our bounce buffer then */ 186 if (!aligned) { 187 sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length); 188 host->sg_ptr = &host->bounce_sg; 189 sg = host->sg_ptr; 190 } 191 192 ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_FROM_DEVICE); 193 if (ret > 0) 194 desc = dmaengine_prep_slave_sg(chan, sg, ret, DMA_DEV_TO_MEM, 195 DMA_CTRL_ACK); 196 197 if (desc) { 198 reinit_completion(&priv->dma_priv.dma_dataend); 199 desc->callback = renesas_sdhi_sys_dmac_dma_callback; 200 desc->callback_param = host; 201 202 cookie = dmaengine_submit(desc); 203 if (cookie < 0) { 204 desc = NULL; 205 ret = cookie; 206 } 207 host->dma_on = true; 208 } 209 pio: 210 if (!desc) { 211 /* DMA failed, fall back to PIO */ 212 renesas_sdhi_sys_dmac_enable_dma(host, false); 213 if (ret >= 0) 214 ret = -EIO; 215 host->chan_rx = NULL; 216 dma_release_channel(chan); 217 /* Free the Tx channel too */ 218 chan = host->chan_tx; 219 if (chan) { 220 host->chan_tx = NULL; 221 dma_release_channel(chan); 222 } 223 dev_warn(&host->pdev->dev, 224 "DMA failed: %d, falling back to PIO\n", ret); 225 } 226 } 227 228 static void renesas_sdhi_sys_dmac_start_dma_tx(struct tmio_mmc_host *host) 229 { 230 struct renesas_sdhi *priv = host_to_priv(host); 231 struct scatterlist *sg = host->sg_ptr, *sg_tmp; 232 struct dma_async_tx_descriptor *desc = NULL; 233 struct dma_chan *chan = host->chan_tx; 234 dma_cookie_t cookie; 235 int ret, i; 236 bool aligned = true, multiple = true; 237 unsigned int align = 1; /* 2-byte alignment */ 238 239 for_each_sg(sg, sg_tmp, host->sg_len, i) { 240 if (sg_tmp->offset & align) 241 aligned = false; 242 if (sg_tmp->length & align) { 243 multiple = false; 244 break; 245 } 246 } 247 248 if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_SIZE || 249 (align & PAGE_MASK))) || !multiple) { 250 ret = -EINVAL; 251 goto pio; 252 } 253 254 if (sg->length < TMIO_MMC_MIN_DMA_LEN) 255 return; 256 257 /* The only sg element can be unaligned, use our bounce buffer then */ 258 if (!aligned) { 259 void *sg_vaddr = kmap_local_page(sg_page(sg)); 260 261 sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length); 262 memcpy(host->bounce_buf, sg_vaddr + sg->offset, host->bounce_sg.length); 263 kunmap_local(sg_vaddr); 264 host->sg_ptr = &host->bounce_sg; 265 sg = host->sg_ptr; 266 } 267 268 ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_TO_DEVICE); 269 if (ret > 0) 270 desc = dmaengine_prep_slave_sg(chan, sg, ret, DMA_MEM_TO_DEV, 271 DMA_CTRL_ACK); 272 273 if (desc) { 274 reinit_completion(&priv->dma_priv.dma_dataend); 275 desc->callback = renesas_sdhi_sys_dmac_dma_callback; 276 desc->callback_param = host; 277 278 cookie = dmaengine_submit(desc); 279 if (cookie < 0) { 280 desc = NULL; 281 ret = cookie; 282 } 283 host->dma_on = true; 284 } 285 pio: 286 if (!desc) { 287 /* DMA failed, fall back to PIO */ 288 renesas_sdhi_sys_dmac_enable_dma(host, false); 289 if (ret >= 0) 290 ret = -EIO; 291 host->chan_tx = NULL; 292 dma_release_channel(chan); 293 /* Free the Rx channel too */ 294 chan = host->chan_rx; 295 if (chan) { 296 host->chan_rx = NULL; 297 dma_release_channel(chan); 298 } 299 dev_warn(&host->pdev->dev, 300 "DMA failed: %d, falling back to PIO\n", ret); 301 } 302 } 303 304 static void renesas_sdhi_sys_dmac_start_dma(struct tmio_mmc_host *host, 305 struct mmc_data *data) 306 { 307 if (data->flags & MMC_DATA_READ) { 308 if (host->chan_rx) 309 renesas_sdhi_sys_dmac_start_dma_rx(host); 310 } else { 311 if (host->chan_tx) 312 renesas_sdhi_sys_dmac_start_dma_tx(host); 313 } 314 } 315 316 static void renesas_sdhi_sys_dmac_issue_work_fn(struct work_struct *work) 317 { 318 struct tmio_mmc_host *host = from_work(host, work, dma_issue); 319 struct dma_chan *chan = NULL; 320 321 spin_lock_irq(&host->lock); 322 323 if (host->data) { 324 if (host->data->flags & MMC_DATA_READ) 325 chan = host->chan_rx; 326 else 327 chan = host->chan_tx; 328 } 329 330 spin_unlock_irq(&host->lock); 331 332 tmio_mmc_enable_mmc_irqs(host, TMIO_STAT_DATAEND); 333 334 if (chan) 335 dma_async_issue_pending(chan); 336 } 337 338 static void renesas_sdhi_sys_dmac_request_dma(struct tmio_mmc_host *host, 339 struct tmio_mmc_data *pdata) 340 { 341 struct renesas_sdhi *priv = host_to_priv(host); 342 343 /* We can only either use DMA for both Tx and Rx or not use it at all */ 344 if (!host->pdev->dev.of_node && 345 (!pdata->chan_priv_tx || !pdata->chan_priv_rx)) 346 return; 347 348 if (!host->chan_tx && !host->chan_rx) { 349 struct resource *res = platform_get_resource(host->pdev, 350 IORESOURCE_MEM, 0); 351 struct dma_slave_config cfg = {}; 352 dma_cap_mask_t mask; 353 int ret; 354 355 if (!res) 356 return; 357 358 dma_cap_zero(mask); 359 dma_cap_set(DMA_SLAVE, mask); 360 361 host->chan_tx = dma_request_slave_channel_compat(mask, 362 priv->dma_priv.filter, pdata->chan_priv_tx, 363 &host->pdev->dev, "tx"); 364 dev_dbg(&host->pdev->dev, "%s: TX: got channel %p\n", __func__, 365 host->chan_tx); 366 367 if (!host->chan_tx) 368 return; 369 370 cfg.direction = DMA_MEM_TO_DEV; 371 cfg.dst_addr = res->start + 372 (CTL_SD_DATA_PORT << host->bus_shift); 373 cfg.dst_addr_width = priv->dma_priv.dma_buswidth; 374 if (!cfg.dst_addr_width) 375 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; 376 cfg.src_addr = 0; 377 ret = dmaengine_slave_config(host->chan_tx, &cfg); 378 if (ret < 0) 379 goto ecfgtx; 380 381 host->chan_rx = dma_request_slave_channel_compat(mask, 382 priv->dma_priv.filter, pdata->chan_priv_rx, 383 &host->pdev->dev, "rx"); 384 dev_dbg(&host->pdev->dev, "%s: RX: got channel %p\n", __func__, 385 host->chan_rx); 386 387 if (!host->chan_rx) 388 goto ereqrx; 389 390 cfg.direction = DMA_DEV_TO_MEM; 391 cfg.src_addr = cfg.dst_addr + host->pdata->dma_rx_offset; 392 cfg.src_addr_width = priv->dma_priv.dma_buswidth; 393 if (!cfg.src_addr_width) 394 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; 395 cfg.dst_addr = 0; 396 ret = dmaengine_slave_config(host->chan_rx, &cfg); 397 if (ret < 0) 398 goto ecfgrx; 399 400 host->bounce_buf = (u8 *)__get_free_page(GFP_KERNEL | GFP_DMA); 401 if (!host->bounce_buf) 402 goto ebouncebuf; 403 404 init_completion(&priv->dma_priv.dma_dataend); 405 INIT_WORK(&host->dma_issue, 406 renesas_sdhi_sys_dmac_issue_work_fn); 407 } 408 409 renesas_sdhi_sys_dmac_enable_dma(host, true); 410 411 return; 412 413 ebouncebuf: 414 ecfgrx: 415 dma_release_channel(host->chan_rx); 416 host->chan_rx = NULL; 417 ereqrx: 418 ecfgtx: 419 dma_release_channel(host->chan_tx); 420 host->chan_tx = NULL; 421 } 422 423 static void renesas_sdhi_sys_dmac_release_dma(struct tmio_mmc_host *host) 424 { 425 if (host->chan_tx) { 426 struct dma_chan *chan = host->chan_tx; 427 428 host->chan_tx = NULL; 429 dma_release_channel(chan); 430 } 431 if (host->chan_rx) { 432 struct dma_chan *chan = host->chan_rx; 433 434 host->chan_rx = NULL; 435 dma_release_channel(chan); 436 } 437 if (host->bounce_buf) { 438 free_pages((unsigned long)host->bounce_buf, 0); 439 host->bounce_buf = NULL; 440 } 441 } 442 443 static const struct tmio_mmc_dma_ops renesas_sdhi_sys_dmac_dma_ops = { 444 .start = renesas_sdhi_sys_dmac_start_dma, 445 .enable = renesas_sdhi_sys_dmac_enable_dma, 446 .request = renesas_sdhi_sys_dmac_request_dma, 447 .release = renesas_sdhi_sys_dmac_release_dma, 448 .abort = renesas_sdhi_sys_dmac_abort_dma, 449 .dataend = renesas_sdhi_sys_dmac_dataend_dma, 450 }; 451 452 static int renesas_sdhi_sys_dmac_probe(struct platform_device *pdev) 453 { 454 return renesas_sdhi_probe(pdev, &renesas_sdhi_sys_dmac_dma_ops, 455 of_device_get_match_data(&pdev->dev), NULL); 456 } 457 458 static const struct dev_pm_ops renesas_sdhi_sys_dmac_dev_pm_ops = { 459 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, 460 pm_runtime_force_resume) 461 SET_RUNTIME_PM_OPS(tmio_mmc_host_runtime_suspend, 462 tmio_mmc_host_runtime_resume, 463 NULL) 464 }; 465 466 static struct platform_driver renesas_sys_dmac_sdhi_driver = { 467 .driver = { 468 .name = "sh_mobile_sdhi", 469 .probe_type = PROBE_PREFER_ASYNCHRONOUS, 470 .pm = &renesas_sdhi_sys_dmac_dev_pm_ops, 471 .of_match_table = renesas_sdhi_sys_dmac_of_match, 472 }, 473 .probe = renesas_sdhi_sys_dmac_probe, 474 .remove_new = renesas_sdhi_remove, 475 }; 476 477 module_platform_driver(renesas_sys_dmac_sdhi_driver); 478 479 MODULE_DESCRIPTION("Renesas SDHI driver"); 480 MODULE_AUTHOR("Magnus Damm"); 481 MODULE_LICENSE("GPL v2"); 482 MODULE_ALIAS("platform:sh_mobile_sdhi"); 483