1 // SPDX-License-Identifier: GPL-2.0 2 // 3 // Copyright (C) 2019 Linaro Ltd. 4 // Copyright (C) 2019 Socionext Inc. 5 6 #include <linux/bits.h> 7 #include <linux/dma-mapping.h> 8 #include <linux/dmaengine.h> 9 #include <linux/interrupt.h> 10 #include <linux/iopoll.h> 11 #include <linux/list.h> 12 #include <linux/module.h> 13 #include <linux/of_dma.h> 14 #include <linux/platform_device.h> 15 #include <linux/slab.h> 16 #include <linux/types.h> 17 #include <linux/bitfield.h> 18 19 #include "virt-dma.h" 20 21 /* global register */ 22 #define M10V_XDACS 0x00 23 24 /* channel local register */ 25 #define M10V_XDTBC 0x10 26 #define M10V_XDSSA 0x14 27 #define M10V_XDDSA 0x18 28 #define M10V_XDSAC 0x1C 29 #define M10V_XDDAC 0x20 30 #define M10V_XDDCC 0x24 31 #define M10V_XDDES 0x28 32 #define M10V_XDDPC 0x2C 33 #define M10V_XDDSD 0x30 34 35 #define M10V_XDACS_XE BIT(28) 36 37 #define M10V_DEFBS 0x3 38 #define M10V_DEFBL 0xf 39 40 #define M10V_XDSAC_SBS GENMASK(17, 16) 41 #define M10V_XDSAC_SBL GENMASK(11, 8) 42 43 #define M10V_XDDAC_DBS GENMASK(17, 16) 44 #define M10V_XDDAC_DBL GENMASK(11, 8) 45 46 #define M10V_XDDES_CE BIT(28) 47 #define M10V_XDDES_SE BIT(24) 48 #define M10V_XDDES_SA BIT(15) 49 #define M10V_XDDES_TF GENMASK(23, 20) 50 #define M10V_XDDES_EI BIT(1) 51 #define M10V_XDDES_TI BIT(0) 52 53 #define M10V_XDDSD_IS_MASK GENMASK(3, 0) 54 #define M10V_XDDSD_IS_NORMAL 0x8 55 56 #define MLB_XDMAC_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ 57 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ 58 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \ 59 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)) 60 61 struct milbeaut_xdmac_desc { 62 struct virt_dma_desc vd; 63 size_t len; 64 dma_addr_t src; 65 dma_addr_t dst; 66 }; 67 68 struct milbeaut_xdmac_chan { 69 struct virt_dma_chan vc; 70 struct milbeaut_xdmac_desc *md; 71 void __iomem *reg_ch_base; 72 }; 73 74 struct milbeaut_xdmac_device { 75 struct dma_device ddev; 76 void __iomem *reg_base; 77 struct milbeaut_xdmac_chan channels[]; 78 }; 79 80 static struct milbeaut_xdmac_chan * 81 to_milbeaut_xdmac_chan(struct virt_dma_chan *vc) 82 { 83 return container_of(vc, struct milbeaut_xdmac_chan, vc); 84 } 85 86 static struct milbeaut_xdmac_desc * 87 to_milbeaut_xdmac_desc(struct virt_dma_desc *vd) 88 { 89 return container_of(vd, struct milbeaut_xdmac_desc, vd); 90 } 91 92 /* mc->vc.lock must be held by caller */ 93 static struct milbeaut_xdmac_desc * 94 milbeaut_xdmac_next_desc(struct milbeaut_xdmac_chan *mc) 95 { 96 struct virt_dma_desc *vd; 97 98 vd = vchan_next_desc(&mc->vc); 99 if (!vd) { 100 mc->md = NULL; 101 return NULL; 102 } 103 104 list_del(&vd->node); 105 106 mc->md = to_milbeaut_xdmac_desc(vd); 107 108 return mc->md; 109 } 110 111 /* mc->vc.lock must be held by caller */ 112 static void milbeaut_chan_start(struct milbeaut_xdmac_chan *mc, 113 struct milbeaut_xdmac_desc *md) 114 { 115 u32 val; 116 117 /* Setup the channel */ 118 val = md->len - 1; 119 writel_relaxed(val, mc->reg_ch_base + M10V_XDTBC); 120 121 val = md->src; 122 writel_relaxed(val, mc->reg_ch_base + M10V_XDSSA); 123 124 val = md->dst; 125 writel_relaxed(val, mc->reg_ch_base + M10V_XDDSA); 126 127 val = readl_relaxed(mc->reg_ch_base + M10V_XDSAC); 128 val &= ~(M10V_XDSAC_SBS | M10V_XDSAC_SBL); 129 val |= FIELD_PREP(M10V_XDSAC_SBS, M10V_DEFBS) | 130 FIELD_PREP(M10V_XDSAC_SBL, M10V_DEFBL); 131 writel_relaxed(val, mc->reg_ch_base + M10V_XDSAC); 132 133 val = readl_relaxed(mc->reg_ch_base + M10V_XDDAC); 134 val &= ~(M10V_XDDAC_DBS | M10V_XDDAC_DBL); 135 val |= FIELD_PREP(M10V_XDDAC_DBS, M10V_DEFBS) | 136 FIELD_PREP(M10V_XDDAC_DBL, M10V_DEFBL); 137 writel_relaxed(val, mc->reg_ch_base + M10V_XDDAC); 138 139 /* Start the channel */ 140 val = readl_relaxed(mc->reg_ch_base + M10V_XDDES); 141 val &= ~(M10V_XDDES_CE | M10V_XDDES_SE | M10V_XDDES_TF | 142 M10V_XDDES_EI | M10V_XDDES_TI); 143 val |= FIELD_PREP(M10V_XDDES_CE, 1) | FIELD_PREP(M10V_XDDES_SE, 1) | 144 FIELD_PREP(M10V_XDDES_TF, 1) | FIELD_PREP(M10V_XDDES_EI, 1) | 145 FIELD_PREP(M10V_XDDES_TI, 1); 146 writel_relaxed(val, mc->reg_ch_base + M10V_XDDES); 147 } 148 149 /* mc->vc.lock must be held by caller */ 150 static void milbeaut_xdmac_start(struct milbeaut_xdmac_chan *mc) 151 { 152 struct milbeaut_xdmac_desc *md; 153 154 md = milbeaut_xdmac_next_desc(mc); 155 if (md) 156 milbeaut_chan_start(mc, md); 157 } 158 159 static irqreturn_t milbeaut_xdmac_interrupt(int irq, void *dev_id) 160 { 161 struct milbeaut_xdmac_chan *mc = dev_id; 162 struct milbeaut_xdmac_desc *md; 163 u32 val; 164 165 spin_lock(&mc->vc.lock); 166 167 /* Ack and Stop */ 168 val = FIELD_PREP(M10V_XDDSD_IS_MASK, 0x0); 169 writel_relaxed(val, mc->reg_ch_base + M10V_XDDSD); 170 171 md = mc->md; 172 if (!md) 173 goto out; 174 175 vchan_cookie_complete(&md->vd); 176 177 milbeaut_xdmac_start(mc); 178 out: 179 spin_unlock(&mc->vc.lock); 180 return IRQ_HANDLED; 181 } 182 183 static void milbeaut_xdmac_free_chan_resources(struct dma_chan *chan) 184 { 185 vchan_free_chan_resources(to_virt_chan(chan)); 186 } 187 188 static struct dma_async_tx_descriptor * 189 milbeaut_xdmac_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, 190 dma_addr_t src, size_t len, unsigned long flags) 191 { 192 struct virt_dma_chan *vc = to_virt_chan(chan); 193 struct milbeaut_xdmac_desc *md; 194 195 md = kzalloc(sizeof(*md), GFP_NOWAIT); 196 if (!md) 197 return NULL; 198 199 md->len = len; 200 md->src = src; 201 md->dst = dst; 202 203 return vchan_tx_prep(vc, &md->vd, flags); 204 } 205 206 static int milbeaut_xdmac_terminate_all(struct dma_chan *chan) 207 { 208 struct virt_dma_chan *vc = to_virt_chan(chan); 209 struct milbeaut_xdmac_chan *mc = to_milbeaut_xdmac_chan(vc); 210 unsigned long flags; 211 u32 val; 212 213 LIST_HEAD(head); 214 215 spin_lock_irqsave(&vc->lock, flags); 216 217 /* Halt the channel */ 218 val = readl(mc->reg_ch_base + M10V_XDDES); 219 val &= ~M10V_XDDES_CE; 220 val |= FIELD_PREP(M10V_XDDES_CE, 0); 221 writel(val, mc->reg_ch_base + M10V_XDDES); 222 223 if (mc->md) { 224 vchan_terminate_vdesc(&mc->md->vd); 225 mc->md = NULL; 226 } 227 228 vchan_get_all_descriptors(vc, &head); 229 230 spin_unlock_irqrestore(&vc->lock, flags); 231 232 vchan_dma_desc_free_list(vc, &head); 233 234 return 0; 235 } 236 237 static void milbeaut_xdmac_synchronize(struct dma_chan *chan) 238 { 239 vchan_synchronize(to_virt_chan(chan)); 240 } 241 242 static void milbeaut_xdmac_issue_pending(struct dma_chan *chan) 243 { 244 struct virt_dma_chan *vc = to_virt_chan(chan); 245 struct milbeaut_xdmac_chan *mc = to_milbeaut_xdmac_chan(vc); 246 unsigned long flags; 247 248 spin_lock_irqsave(&vc->lock, flags); 249 250 if (vchan_issue_pending(vc) && !mc->md) 251 milbeaut_xdmac_start(mc); 252 253 spin_unlock_irqrestore(&vc->lock, flags); 254 } 255 256 static void milbeaut_xdmac_desc_free(struct virt_dma_desc *vd) 257 { 258 kfree(to_milbeaut_xdmac_desc(vd)); 259 } 260 261 static int milbeaut_xdmac_chan_init(struct platform_device *pdev, 262 struct milbeaut_xdmac_device *mdev, 263 int chan_id) 264 { 265 struct device *dev = &pdev->dev; 266 struct milbeaut_xdmac_chan *mc = &mdev->channels[chan_id]; 267 char *irq_name; 268 int irq, ret; 269 270 irq = platform_get_irq(pdev, chan_id); 271 if (irq < 0) 272 return irq; 273 274 irq_name = devm_kasprintf(dev, GFP_KERNEL, "milbeaut-xdmac-%d", 275 chan_id); 276 if (!irq_name) 277 return -ENOMEM; 278 279 ret = devm_request_irq(dev, irq, milbeaut_xdmac_interrupt, 280 IRQF_SHARED, irq_name, mc); 281 if (ret) 282 return ret; 283 284 mc->reg_ch_base = mdev->reg_base + chan_id * 0x30; 285 286 mc->vc.desc_free = milbeaut_xdmac_desc_free; 287 vchan_init(&mc->vc, &mdev->ddev); 288 289 return 0; 290 } 291 292 static void enable_xdmac(struct milbeaut_xdmac_device *mdev) 293 { 294 unsigned int val; 295 296 val = readl(mdev->reg_base + M10V_XDACS); 297 val |= M10V_XDACS_XE; 298 writel(val, mdev->reg_base + M10V_XDACS); 299 } 300 301 static void disable_xdmac(struct milbeaut_xdmac_device *mdev) 302 { 303 unsigned int val; 304 305 val = readl(mdev->reg_base + M10V_XDACS); 306 val &= ~M10V_XDACS_XE; 307 writel(val, mdev->reg_base + M10V_XDACS); 308 } 309 310 static int milbeaut_xdmac_probe(struct platform_device *pdev) 311 { 312 struct device *dev = &pdev->dev; 313 struct milbeaut_xdmac_device *mdev; 314 struct dma_device *ddev; 315 int nr_chans, ret, i; 316 317 nr_chans = platform_irq_count(pdev); 318 if (nr_chans < 0) 319 return nr_chans; 320 321 mdev = devm_kzalloc(dev, struct_size(mdev, channels, nr_chans), 322 GFP_KERNEL); 323 if (!mdev) 324 return -ENOMEM; 325 326 mdev->reg_base = devm_platform_ioremap_resource(pdev, 0); 327 if (IS_ERR(mdev->reg_base)) 328 return PTR_ERR(mdev->reg_base); 329 330 ddev = &mdev->ddev; 331 ddev->dev = dev; 332 dma_cap_set(DMA_MEMCPY, ddev->cap_mask); 333 ddev->src_addr_widths = MLB_XDMAC_BUSWIDTHS; 334 ddev->dst_addr_widths = MLB_XDMAC_BUSWIDTHS; 335 ddev->device_free_chan_resources = milbeaut_xdmac_free_chan_resources; 336 ddev->device_prep_dma_memcpy = milbeaut_xdmac_prep_memcpy; 337 ddev->device_terminate_all = milbeaut_xdmac_terminate_all; 338 ddev->device_synchronize = milbeaut_xdmac_synchronize; 339 ddev->device_tx_status = dma_cookie_status; 340 ddev->device_issue_pending = milbeaut_xdmac_issue_pending; 341 INIT_LIST_HEAD(&ddev->channels); 342 343 for (i = 0; i < nr_chans; i++) { 344 ret = milbeaut_xdmac_chan_init(pdev, mdev, i); 345 if (ret) 346 return ret; 347 } 348 349 enable_xdmac(mdev); 350 351 ret = dma_async_device_register(ddev); 352 if (ret) 353 goto disable_xdmac; 354 355 ret = of_dma_controller_register(dev->of_node, 356 of_dma_simple_xlate, mdev); 357 if (ret) 358 goto unregister_dmac; 359 360 platform_set_drvdata(pdev, mdev); 361 362 return 0; 363 364 unregister_dmac: 365 dma_async_device_unregister(ddev); 366 disable_xdmac: 367 disable_xdmac(mdev); 368 return ret; 369 } 370 371 static void milbeaut_xdmac_remove(struct platform_device *pdev) 372 { 373 struct milbeaut_xdmac_device *mdev = platform_get_drvdata(pdev); 374 struct dma_chan *chan; 375 int ret; 376 377 /* 378 * Before reaching here, almost all descriptors have been freed by the 379 * ->device_free_chan_resources() hook. However, each channel might 380 * be still holding one descriptor that was on-flight at that moment. 381 * Terminate it to make sure this hardware is no longer running. Then, 382 * free the channel resources once again to avoid memory leak. 383 */ 384 list_for_each_entry(chan, &mdev->ddev.channels, device_node) { 385 ret = dmaengine_terminate_sync(chan); 386 if (ret) { 387 /* 388 * This results in resource leakage and maybe also 389 * use-after-free errors as e.g. *mdev is kfreed. 390 */ 391 dev_alert(&pdev->dev, "Failed to terminate channel %d (%pe)\n", 392 chan->chan_id, ERR_PTR(ret)); 393 return; 394 } 395 milbeaut_xdmac_free_chan_resources(chan); 396 } 397 398 of_dma_controller_free(pdev->dev.of_node); 399 dma_async_device_unregister(&mdev->ddev); 400 401 disable_xdmac(mdev); 402 } 403 404 static const struct of_device_id milbeaut_xdmac_match[] = { 405 { .compatible = "socionext,milbeaut-m10v-xdmac" }, 406 { /* sentinel */ } 407 }; 408 MODULE_DEVICE_TABLE(of, milbeaut_xdmac_match); 409 410 static struct platform_driver milbeaut_xdmac_driver = { 411 .probe = milbeaut_xdmac_probe, 412 .remove_new = milbeaut_xdmac_remove, 413 .driver = { 414 .name = "milbeaut-m10v-xdmac", 415 .of_match_table = milbeaut_xdmac_match, 416 }, 417 }; 418 module_platform_driver(milbeaut_xdmac_driver); 419 420 MODULE_DESCRIPTION("Milbeaut XDMAC DmaEngine driver"); 421 MODULE_LICENSE("GPL v2"); 422