1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Platform driver for the Synopsys DesignWare DMA Controller 4 * 5 * Copyright (C) 2007-2008 Atmel Corporation 6 * Copyright (C) 2010-2011 ST Microelectronics 7 * Copyright (C) 2013 Intel Corporation 8 * 9 * Some parts of this driver are derived from the original dw_dmac. 10 */ 11 12 #include <linux/module.h> 13 #include <linux/device.h> 14 #include <linux/clk.h> 15 #include <linux/pm_runtime.h> 16 #include <linux/platform_device.h> 17 #include <linux/dmaengine.h> 18 #include <linux/dma-mapping.h> 19 #include <linux/of.h> 20 #include <linux/of_dma.h> 21 #include <linux/acpi.h> 22 23 #include "internal.h" 24 25 #define DRV_NAME "dw_dmac" 26 27 static struct dma_chan *dw_dma_of_xlate(struct of_phandle_args *dma_spec, 28 struct of_dma *ofdma) 29 { 30 struct dw_dma *dw = ofdma->of_dma_data; 31 struct dw_dma_slave slave = { 32 .dma_dev = dw->dma.dev, 33 }; 34 dma_cap_mask_t cap; 35 36 if (dma_spec->args_count != 3) 37 return NULL; 38 39 slave.src_id = dma_spec->args[0]; 40 slave.dst_id = dma_spec->args[0]; 41 slave.m_master = dma_spec->args[1]; 42 slave.p_master = dma_spec->args[2]; 43 44 if (WARN_ON(slave.src_id >= DW_DMA_MAX_NR_REQUESTS || 45 slave.dst_id >= DW_DMA_MAX_NR_REQUESTS || 46 slave.m_master >= dw->pdata->nr_masters || 47 slave.p_master >= dw->pdata->nr_masters)) 48 return NULL; 49 50 dma_cap_zero(cap); 51 dma_cap_set(DMA_SLAVE, cap); 52 53 /* TODO: there should be a simpler way to do this */ 54 return dma_request_channel(cap, dw_dma_filter, &slave); 55 } 56 57 #ifdef CONFIG_OF 58 static struct dw_dma_platform_data * 59 dw_dma_parse_dt(struct platform_device *pdev) 60 { 61 struct device_node *np = pdev->dev.of_node; 62 struct dw_dma_platform_data *pdata; 63 u32 tmp, arr[DW_DMA_MAX_NR_MASTERS], mb[DW_DMA_MAX_NR_CHANNELS]; 64 u32 nr_masters; 65 u32 nr_channels; 66 67 if (!np) { 68 dev_err(&pdev->dev, "Missing DT data\n"); 69 return NULL; 70 } 71 72 if (of_property_read_u32(np, "dma-masters", &nr_masters)) 73 return NULL; 74 if (nr_masters < 1 || nr_masters > DW_DMA_MAX_NR_MASTERS) 75 return NULL; 76 77 if (of_property_read_u32(np, "dma-channels", &nr_channels)) 78 return NULL; 79 if (nr_channels > DW_DMA_MAX_NR_CHANNELS) 80 return NULL; 81 82 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); 83 if (!pdata) 84 return NULL; 85 86 pdata->nr_masters = nr_masters; 87 pdata->nr_channels = nr_channels; 88 89 if (!of_property_read_u32(np, "chan_allocation_order", &tmp)) 90 pdata->chan_allocation_order = (unsigned char)tmp; 91 92 if (!of_property_read_u32(np, "chan_priority", &tmp)) 93 pdata->chan_priority = tmp; 94 95 if (!of_property_read_u32(np, "block_size", &tmp)) 96 pdata->block_size = tmp; 97 98 if (!of_property_read_u32_array(np, "data-width", arr, nr_masters)) { 99 for (tmp = 0; tmp < nr_masters; tmp++) 100 pdata->data_width[tmp] = arr[tmp]; 101 } else if (!of_property_read_u32_array(np, "data_width", arr, nr_masters)) { 102 for (tmp = 0; tmp < nr_masters; tmp++) 103 pdata->data_width[tmp] = BIT(arr[tmp] & 0x07); 104 } 105 106 if (!of_property_read_u32_array(np, "multi-block", mb, nr_channels)) { 107 for (tmp = 0; tmp < nr_channels; tmp++) 108 pdata->multi_block[tmp] = mb[tmp]; 109 } else { 110 for (tmp = 0; tmp < nr_channels; tmp++) 111 pdata->multi_block[tmp] = 1; 112 } 113 114 if (!of_property_read_u32(np, "snps,dma-protection-control", &tmp)) { 115 if (tmp > CHAN_PROTCTL_MASK) 116 return NULL; 117 pdata->protctl = tmp; 118 } 119 120 return pdata; 121 } 122 #else 123 static inline struct dw_dma_platform_data * 124 dw_dma_parse_dt(struct platform_device *pdev) 125 { 126 return NULL; 127 } 128 #endif 129 130 static int dw_probe(struct platform_device *pdev) 131 { 132 const struct dw_dma_chip_pdata *match; 133 struct dw_dma_chip_pdata *data; 134 struct dw_dma_chip *chip; 135 struct device *dev = &pdev->dev; 136 int err; 137 138 match = device_get_match_data(dev); 139 if (!match) 140 return -ENODEV; 141 142 data = devm_kmemdup(&pdev->dev, match, sizeof(*match), GFP_KERNEL); 143 if (!data) 144 return -ENOMEM; 145 146 chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL); 147 if (!chip) 148 return -ENOMEM; 149 150 chip->irq = platform_get_irq(pdev, 0); 151 if (chip->irq < 0) 152 return chip->irq; 153 154 chip->regs = devm_platform_ioremap_resource(pdev, 0); 155 if (IS_ERR(chip->regs)) 156 return PTR_ERR(chip->regs); 157 158 err = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 159 if (err) 160 return err; 161 162 if (!data->pdata) 163 data->pdata = dev_get_platdata(dev); 164 if (!data->pdata) 165 data->pdata = dw_dma_parse_dt(pdev); 166 167 chip->dev = dev; 168 chip->id = pdev->id; 169 chip->pdata = data->pdata; 170 171 data->chip = chip; 172 173 chip->clk = devm_clk_get(chip->dev, "hclk"); 174 if (IS_ERR(chip->clk)) 175 return PTR_ERR(chip->clk); 176 err = clk_prepare_enable(chip->clk); 177 if (err) 178 return err; 179 180 pm_runtime_enable(&pdev->dev); 181 182 err = data->probe(chip); 183 if (err) 184 goto err_dw_dma_probe; 185 186 platform_set_drvdata(pdev, data); 187 188 if (pdev->dev.of_node) { 189 err = of_dma_controller_register(pdev->dev.of_node, 190 dw_dma_of_xlate, chip->dw); 191 if (err) 192 dev_err(&pdev->dev, 193 "could not register of_dma_controller\n"); 194 } 195 196 dw_dma_acpi_controller_register(chip->dw); 197 198 return 0; 199 200 err_dw_dma_probe: 201 pm_runtime_disable(&pdev->dev); 202 clk_disable_unprepare(chip->clk); 203 return err; 204 } 205 206 static int dw_remove(struct platform_device *pdev) 207 { 208 struct dw_dma_chip_pdata *data = platform_get_drvdata(pdev); 209 struct dw_dma_chip *chip = data->chip; 210 int ret; 211 212 dw_dma_acpi_controller_free(chip->dw); 213 214 if (pdev->dev.of_node) 215 of_dma_controller_free(pdev->dev.of_node); 216 217 ret = data->remove(chip); 218 if (ret) 219 dev_warn(chip->dev, "can't remove device properly: %d\n", ret); 220 221 pm_runtime_disable(&pdev->dev); 222 clk_disable_unprepare(chip->clk); 223 224 return 0; 225 } 226 227 static void dw_shutdown(struct platform_device *pdev) 228 { 229 struct dw_dma_chip_pdata *data = platform_get_drvdata(pdev); 230 struct dw_dma_chip *chip = data->chip; 231 232 /* 233 * We have to call do_dw_dma_disable() to stop any ongoing transfer. On 234 * some platforms we can't do that since DMA device is powered off. 235 * Moreover we have no possibility to check if the platform is affected 236 * or not. That's why we call pm_runtime_get_sync() / pm_runtime_put() 237 * unconditionally. On the other hand we can't use 238 * pm_runtime_suspended() because runtime PM framework is not fully 239 * used by the driver. 240 */ 241 pm_runtime_get_sync(chip->dev); 242 do_dw_dma_disable(chip); 243 pm_runtime_put_sync_suspend(chip->dev); 244 245 clk_disable_unprepare(chip->clk); 246 } 247 248 #ifdef CONFIG_OF 249 static const struct of_device_id dw_dma_of_id_table[] = { 250 { .compatible = "snps,dma-spear1340", .data = &dw_dma_chip_pdata }, 251 {} 252 }; 253 MODULE_DEVICE_TABLE(of, dw_dma_of_id_table); 254 #endif 255 256 #ifdef CONFIG_ACPI 257 static const struct acpi_device_id dw_dma_acpi_id_table[] = { 258 { "INTL9C60", (kernel_ulong_t)&dw_dma_chip_pdata }, 259 { "80862286", (kernel_ulong_t)&dw_dma_chip_pdata }, 260 { "808622C0", (kernel_ulong_t)&dw_dma_chip_pdata }, 261 262 /* Elkhart Lake iDMA 32-bit (PSE DMA) */ 263 { "80864BB4", (kernel_ulong_t)&idma32_chip_pdata }, 264 { "80864BB5", (kernel_ulong_t)&idma32_chip_pdata }, 265 { "80864BB6", (kernel_ulong_t)&idma32_chip_pdata }, 266 267 { } 268 }; 269 MODULE_DEVICE_TABLE(acpi, dw_dma_acpi_id_table); 270 #endif 271 272 #ifdef CONFIG_PM_SLEEP 273 274 static int dw_suspend_late(struct device *dev) 275 { 276 struct dw_dma_chip_pdata *data = dev_get_drvdata(dev); 277 struct dw_dma_chip *chip = data->chip; 278 279 do_dw_dma_disable(chip); 280 clk_disable_unprepare(chip->clk); 281 282 return 0; 283 } 284 285 static int dw_resume_early(struct device *dev) 286 { 287 struct dw_dma_chip_pdata *data = dev_get_drvdata(dev); 288 struct dw_dma_chip *chip = data->chip; 289 int ret; 290 291 ret = clk_prepare_enable(chip->clk); 292 if (ret) 293 return ret; 294 295 return do_dw_dma_enable(chip); 296 } 297 298 #endif /* CONFIG_PM_SLEEP */ 299 300 static const struct dev_pm_ops dw_dev_pm_ops = { 301 SET_LATE_SYSTEM_SLEEP_PM_OPS(dw_suspend_late, dw_resume_early) 302 }; 303 304 static struct platform_driver dw_driver = { 305 .probe = dw_probe, 306 .remove = dw_remove, 307 .shutdown = dw_shutdown, 308 .driver = { 309 .name = DRV_NAME, 310 .pm = &dw_dev_pm_ops, 311 .of_match_table = of_match_ptr(dw_dma_of_id_table), 312 .acpi_match_table = ACPI_PTR(dw_dma_acpi_id_table), 313 }, 314 }; 315 316 static int __init dw_init(void) 317 { 318 return platform_driver_register(&dw_driver); 319 } 320 subsys_initcall(dw_init); 321 322 static void __exit dw_exit(void) 323 { 324 platform_driver_unregister(&dw_driver); 325 } 326 module_exit(dw_exit); 327 328 MODULE_LICENSE("GPL v2"); 329 MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller platform driver"); 330 MODULE_ALIAS("platform:" DRV_NAME); 331