1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Driver for the Analog Devices AXI-DMAC core 4 * 5 * Copyright 2013-2019 Analog Devices Inc. 6 * Author: Lars-Peter Clausen <lars@metafoo.de> 7 */ 8 9 #include <linux/bitfield.h> 10 #include <linux/clk.h> 11 #include <linux/device.h> 12 #include <linux/dma-mapping.h> 13 #include <linux/dmaengine.h> 14 #include <linux/err.h> 15 #include <linux/interrupt.h> 16 #include <linux/io.h> 17 #include <linux/kernel.h> 18 #include <linux/module.h> 19 #include <linux/of.h> 20 #include <linux/of_dma.h> 21 #include <linux/of_address.h> 22 #include <linux/platform_device.h> 23 #include <linux/regmap.h> 24 #include <linux/slab.h> 25 #include <linux/fpga/adi-axi-common.h> 26 27 #include <dt-bindings/dma/axi-dmac.h> 28 29 #include "dmaengine.h" 30 #include "virt-dma.h" 31 32 /* 33 * The AXI-DMAC is a soft IP core that is used in FPGA designs. The core has 34 * various instantiation parameters which decided the exact feature set support 35 * by the core. 36 * 37 * Each channel of the core has a source interface and a destination interface. 38 * The number of channels and the type of the channel interfaces is selected at 39 * configuration time. A interface can either be a connected to a central memory 40 * interconnect, which allows access to system memory, or it can be connected to 41 * a dedicated bus which is directly connected to a data port on a peripheral. 42 * Given that those are configuration options of the core that are selected when 43 * it is instantiated this means that they can not be changed by software at 44 * runtime. By extension this means that each channel is uni-directional. It can 45 * either be device to memory or memory to device, but not both. Also since the 46 * device side is a dedicated data bus only connected to a single peripheral 47 * there is no address than can or needs to be configured for the device side. 48 */ 49 50 #define AXI_DMAC_REG_INTERFACE_DESC 0x10 51 #define AXI_DMAC_DMA_SRC_TYPE_MSK GENMASK(13, 12) 52 #define AXI_DMAC_DMA_SRC_TYPE_GET(x) FIELD_GET(AXI_DMAC_DMA_SRC_TYPE_MSK, x) 53 #define AXI_DMAC_DMA_SRC_WIDTH_MSK GENMASK(11, 8) 54 #define AXI_DMAC_DMA_SRC_WIDTH_GET(x) FIELD_GET(AXI_DMAC_DMA_SRC_WIDTH_MSK, x) 55 #define AXI_DMAC_DMA_DST_TYPE_MSK GENMASK(5, 4) 56 #define AXI_DMAC_DMA_DST_TYPE_GET(x) FIELD_GET(AXI_DMAC_DMA_DST_TYPE_MSK, x) 57 #define AXI_DMAC_DMA_DST_WIDTH_MSK GENMASK(3, 0) 58 #define AXI_DMAC_DMA_DST_WIDTH_GET(x) FIELD_GET(AXI_DMAC_DMA_DST_WIDTH_MSK, x) 59 #define AXI_DMAC_REG_COHERENCY_DESC 0x14 60 #define AXI_DMAC_DST_COHERENT_MSK BIT(0) 61 #define AXI_DMAC_DST_COHERENT_GET(x) FIELD_GET(AXI_DMAC_DST_COHERENT_MSK, x) 62 63 #define AXI_DMAC_REG_IRQ_MASK 0x80 64 #define AXI_DMAC_REG_IRQ_PENDING 0x84 65 #define AXI_DMAC_REG_IRQ_SOURCE 0x88 66 67 #define AXI_DMAC_REG_CTRL 0x400 68 #define AXI_DMAC_REG_TRANSFER_ID 0x404 69 #define AXI_DMAC_REG_START_TRANSFER 0x408 70 #define AXI_DMAC_REG_FLAGS 0x40c 71 #define AXI_DMAC_REG_DEST_ADDRESS 0x410 72 #define AXI_DMAC_REG_SRC_ADDRESS 0x414 73 #define AXI_DMAC_REG_X_LENGTH 0x418 74 #define AXI_DMAC_REG_Y_LENGTH 0x41c 75 #define AXI_DMAC_REG_DEST_STRIDE 0x420 76 #define AXI_DMAC_REG_SRC_STRIDE 0x424 77 #define AXI_DMAC_REG_TRANSFER_DONE 0x428 78 #define AXI_DMAC_REG_ACTIVE_TRANSFER_ID 0x42c 79 #define AXI_DMAC_REG_STATUS 0x430 80 #define AXI_DMAC_REG_CURRENT_SRC_ADDR 0x434 81 #define AXI_DMAC_REG_CURRENT_DEST_ADDR 0x438 82 #define AXI_DMAC_REG_PARTIAL_XFER_LEN 0x44c 83 #define AXI_DMAC_REG_PARTIAL_XFER_ID 0x450 84 #define AXI_DMAC_REG_CURRENT_SG_ID 0x454 85 #define AXI_DMAC_REG_SG_ADDRESS 0x47c 86 #define AXI_DMAC_REG_SG_ADDRESS_HIGH 0x4bc 87 88 #define AXI_DMAC_CTRL_ENABLE BIT(0) 89 #define AXI_DMAC_CTRL_PAUSE BIT(1) 90 #define AXI_DMAC_CTRL_ENABLE_SG BIT(2) 91 92 #define AXI_DMAC_IRQ_SOT BIT(0) 93 #define AXI_DMAC_IRQ_EOT BIT(1) 94 95 #define AXI_DMAC_FLAG_CYCLIC BIT(0) 96 #define AXI_DMAC_FLAG_LAST BIT(1) 97 #define AXI_DMAC_FLAG_PARTIAL_REPORT BIT(2) 98 99 #define AXI_DMAC_FLAG_PARTIAL_XFER_DONE BIT(31) 100 101 /* The maximum ID allocated by the hardware is 31 */ 102 #define AXI_DMAC_SG_UNUSED 32U 103 104 /* Flags for axi_dmac_hw_desc.flags */ 105 #define AXI_DMAC_HW_FLAG_LAST BIT(0) 106 #define AXI_DMAC_HW_FLAG_IRQ BIT(1) 107 108 struct axi_dmac_hw_desc { 109 u32 flags; 110 u32 id; 111 u64 dest_addr; 112 u64 src_addr; 113 u64 next_sg_addr; 114 u32 y_len; 115 u32 x_len; 116 u32 src_stride; 117 u32 dst_stride; 118 u64 __pad[2]; 119 }; 120 121 struct axi_dmac_sg { 122 unsigned int partial_len; 123 bool schedule_when_free; 124 125 struct axi_dmac_hw_desc *hw; 126 dma_addr_t hw_phys; 127 }; 128 129 struct axi_dmac_desc { 130 struct virt_dma_desc vdesc; 131 struct axi_dmac_chan *chan; 132 133 bool cyclic; 134 bool have_partial_xfer; 135 136 unsigned int num_submitted; 137 unsigned int num_completed; 138 unsigned int num_sgs; 139 struct axi_dmac_sg sg[] __counted_by(num_sgs); 140 }; 141 142 struct axi_dmac_chan { 143 struct virt_dma_chan vchan; 144 145 struct axi_dmac_desc *next_desc; 146 struct list_head active_descs; 147 enum dma_transfer_direction direction; 148 149 unsigned int src_width; 150 unsigned int dest_width; 151 unsigned int src_type; 152 unsigned int dest_type; 153 154 unsigned int max_length; 155 unsigned int address_align_mask; 156 unsigned int length_align_mask; 157 158 bool hw_partial_xfer; 159 bool hw_cyclic; 160 bool hw_2d; 161 bool hw_sg; 162 }; 163 164 struct axi_dmac { 165 void __iomem *base; 166 int irq; 167 168 struct clk *clk; 169 170 struct dma_device dma_dev; 171 struct axi_dmac_chan chan; 172 }; 173 174 static struct axi_dmac *chan_to_axi_dmac(struct axi_dmac_chan *chan) 175 { 176 return container_of(chan->vchan.chan.device, struct axi_dmac, 177 dma_dev); 178 } 179 180 static struct axi_dmac_chan *to_axi_dmac_chan(struct dma_chan *c) 181 { 182 return container_of(c, struct axi_dmac_chan, vchan.chan); 183 } 184 185 static struct axi_dmac_desc *to_axi_dmac_desc(struct virt_dma_desc *vdesc) 186 { 187 return container_of(vdesc, struct axi_dmac_desc, vdesc); 188 } 189 190 static void axi_dmac_write(struct axi_dmac *axi_dmac, unsigned int reg, 191 unsigned int val) 192 { 193 writel(val, axi_dmac->base + reg); 194 } 195 196 static int axi_dmac_read(struct axi_dmac *axi_dmac, unsigned int reg) 197 { 198 return readl(axi_dmac->base + reg); 199 } 200 201 static int axi_dmac_src_is_mem(struct axi_dmac_chan *chan) 202 { 203 return chan->src_type == AXI_DMAC_BUS_TYPE_AXI_MM; 204 } 205 206 static int axi_dmac_dest_is_mem(struct axi_dmac_chan *chan) 207 { 208 return chan->dest_type == AXI_DMAC_BUS_TYPE_AXI_MM; 209 } 210 211 static bool axi_dmac_check_len(struct axi_dmac_chan *chan, unsigned int len) 212 { 213 if (len == 0) 214 return false; 215 if ((len & chan->length_align_mask) != 0) /* Not aligned */ 216 return false; 217 return true; 218 } 219 220 static bool axi_dmac_check_addr(struct axi_dmac_chan *chan, dma_addr_t addr) 221 { 222 if ((addr & chan->address_align_mask) != 0) /* Not aligned */ 223 return false; 224 return true; 225 } 226 227 static void axi_dmac_start_transfer(struct axi_dmac_chan *chan) 228 { 229 struct axi_dmac *dmac = chan_to_axi_dmac(chan); 230 struct virt_dma_desc *vdesc; 231 struct axi_dmac_desc *desc; 232 struct axi_dmac_sg *sg; 233 unsigned int flags = 0; 234 unsigned int val; 235 236 if (!chan->hw_sg) { 237 val = axi_dmac_read(dmac, AXI_DMAC_REG_START_TRANSFER); 238 if (val) /* Queue is full, wait for the next SOT IRQ */ 239 return; 240 } 241 242 desc = chan->next_desc; 243 244 if (!desc) { 245 vdesc = vchan_next_desc(&chan->vchan); 246 if (!vdesc) 247 return; 248 list_move_tail(&vdesc->node, &chan->active_descs); 249 desc = to_axi_dmac_desc(vdesc); 250 } 251 sg = &desc->sg[desc->num_submitted]; 252 253 /* Already queued in cyclic mode. Wait for it to finish */ 254 if (sg->hw->id != AXI_DMAC_SG_UNUSED) { 255 sg->schedule_when_free = true; 256 return; 257 } 258 259 if (chan->hw_sg) { 260 chan->next_desc = NULL; 261 } else if (++desc->num_submitted == desc->num_sgs || 262 desc->have_partial_xfer) { 263 if (desc->cyclic) 264 desc->num_submitted = 0; /* Start again */ 265 else 266 chan->next_desc = NULL; 267 flags |= AXI_DMAC_FLAG_LAST; 268 } else { 269 chan->next_desc = desc; 270 } 271 272 sg->hw->id = axi_dmac_read(dmac, AXI_DMAC_REG_TRANSFER_ID); 273 274 if (!chan->hw_sg) { 275 if (axi_dmac_dest_is_mem(chan)) { 276 axi_dmac_write(dmac, AXI_DMAC_REG_DEST_ADDRESS, sg->hw->dest_addr); 277 axi_dmac_write(dmac, AXI_DMAC_REG_DEST_STRIDE, sg->hw->dst_stride); 278 } 279 280 if (axi_dmac_src_is_mem(chan)) { 281 axi_dmac_write(dmac, AXI_DMAC_REG_SRC_ADDRESS, sg->hw->src_addr); 282 axi_dmac_write(dmac, AXI_DMAC_REG_SRC_STRIDE, sg->hw->src_stride); 283 } 284 } 285 286 /* 287 * If the hardware supports cyclic transfers and there is no callback to 288 * call, enable hw cyclic mode to avoid unnecessary interrupts. 289 */ 290 if (chan->hw_cyclic && desc->cyclic && !desc->vdesc.tx.callback) { 291 if (chan->hw_sg) 292 desc->sg[desc->num_sgs - 1].hw->flags &= ~AXI_DMAC_HW_FLAG_IRQ; 293 else if (desc->num_sgs == 1) 294 flags |= AXI_DMAC_FLAG_CYCLIC; 295 } 296 297 if (chan->hw_partial_xfer) 298 flags |= AXI_DMAC_FLAG_PARTIAL_REPORT; 299 300 if (chan->hw_sg) { 301 axi_dmac_write(dmac, AXI_DMAC_REG_SG_ADDRESS, (u32)sg->hw_phys); 302 axi_dmac_write(dmac, AXI_DMAC_REG_SG_ADDRESS_HIGH, 303 (u64)sg->hw_phys >> 32); 304 } else { 305 axi_dmac_write(dmac, AXI_DMAC_REG_X_LENGTH, sg->hw->x_len); 306 axi_dmac_write(dmac, AXI_DMAC_REG_Y_LENGTH, sg->hw->y_len); 307 } 308 axi_dmac_write(dmac, AXI_DMAC_REG_FLAGS, flags); 309 axi_dmac_write(dmac, AXI_DMAC_REG_START_TRANSFER, 1); 310 } 311 312 static struct axi_dmac_desc *axi_dmac_active_desc(struct axi_dmac_chan *chan) 313 { 314 return list_first_entry_or_null(&chan->active_descs, 315 struct axi_dmac_desc, vdesc.node); 316 } 317 318 static inline unsigned int axi_dmac_total_sg_bytes(struct axi_dmac_chan *chan, 319 struct axi_dmac_sg *sg) 320 { 321 if (chan->hw_2d) 322 return (sg->hw->x_len + 1) * (sg->hw->y_len + 1); 323 else 324 return (sg->hw->x_len + 1); 325 } 326 327 static void axi_dmac_dequeue_partial_xfers(struct axi_dmac_chan *chan) 328 { 329 struct axi_dmac *dmac = chan_to_axi_dmac(chan); 330 struct axi_dmac_desc *desc; 331 struct axi_dmac_sg *sg; 332 u32 xfer_done, len, id, i; 333 bool found_sg; 334 335 do { 336 len = axi_dmac_read(dmac, AXI_DMAC_REG_PARTIAL_XFER_LEN); 337 id = axi_dmac_read(dmac, AXI_DMAC_REG_PARTIAL_XFER_ID); 338 339 found_sg = false; 340 list_for_each_entry(desc, &chan->active_descs, vdesc.node) { 341 for (i = 0; i < desc->num_sgs; i++) { 342 sg = &desc->sg[i]; 343 if (sg->hw->id == AXI_DMAC_SG_UNUSED) 344 continue; 345 if (sg->hw->id == id) { 346 desc->have_partial_xfer = true; 347 sg->partial_len = len; 348 found_sg = true; 349 break; 350 } 351 } 352 if (found_sg) 353 break; 354 } 355 356 if (found_sg) { 357 dev_dbg(dmac->dma_dev.dev, 358 "Found partial segment id=%u, len=%u\n", 359 id, len); 360 } else { 361 dev_warn(dmac->dma_dev.dev, 362 "Not found partial segment id=%u, len=%u\n", 363 id, len); 364 } 365 366 /* Check if we have any more partial transfers */ 367 xfer_done = axi_dmac_read(dmac, AXI_DMAC_REG_TRANSFER_DONE); 368 xfer_done = !(xfer_done & AXI_DMAC_FLAG_PARTIAL_XFER_DONE); 369 370 } while (!xfer_done); 371 } 372 373 static void axi_dmac_compute_residue(struct axi_dmac_chan *chan, 374 struct axi_dmac_desc *active) 375 { 376 struct dmaengine_result *rslt = &active->vdesc.tx_result; 377 unsigned int start = active->num_completed - 1; 378 struct axi_dmac_sg *sg; 379 unsigned int i, total; 380 381 rslt->result = DMA_TRANS_NOERROR; 382 rslt->residue = 0; 383 384 if (chan->hw_sg) 385 return; 386 387 /* 388 * We get here if the last completed segment is partial, which 389 * means we can compute the residue from that segment onwards 390 */ 391 for (i = start; i < active->num_sgs; i++) { 392 sg = &active->sg[i]; 393 total = axi_dmac_total_sg_bytes(chan, sg); 394 rslt->residue += (total - sg->partial_len); 395 } 396 } 397 398 static bool axi_dmac_transfer_done(struct axi_dmac_chan *chan, 399 unsigned int completed_transfers) 400 { 401 struct axi_dmac_desc *active; 402 struct axi_dmac_sg *sg; 403 bool start_next = false; 404 405 active = axi_dmac_active_desc(chan); 406 if (!active) 407 return false; 408 409 if (chan->hw_partial_xfer && 410 (completed_transfers & AXI_DMAC_FLAG_PARTIAL_XFER_DONE)) 411 axi_dmac_dequeue_partial_xfers(chan); 412 413 if (chan->hw_sg) { 414 if (active->cyclic) { 415 vchan_cyclic_callback(&active->vdesc); 416 } else { 417 list_del(&active->vdesc.node); 418 vchan_cookie_complete(&active->vdesc); 419 active = axi_dmac_active_desc(chan); 420 start_next = !!active; 421 } 422 } else { 423 do { 424 sg = &active->sg[active->num_completed]; 425 if (sg->hw->id == AXI_DMAC_SG_UNUSED) /* Not yet submitted */ 426 break; 427 if (!(BIT(sg->hw->id) & completed_transfers)) 428 break; 429 active->num_completed++; 430 sg->hw->id = AXI_DMAC_SG_UNUSED; 431 if (sg->schedule_when_free) { 432 sg->schedule_when_free = false; 433 start_next = true; 434 } 435 436 if (sg->partial_len) 437 axi_dmac_compute_residue(chan, active); 438 439 if (active->cyclic) 440 vchan_cyclic_callback(&active->vdesc); 441 442 if (active->num_completed == active->num_sgs || 443 sg->partial_len) { 444 if (active->cyclic) { 445 active->num_completed = 0; /* wrap around */ 446 } else { 447 list_del(&active->vdesc.node); 448 vchan_cookie_complete(&active->vdesc); 449 active = axi_dmac_active_desc(chan); 450 } 451 } 452 } while (active); 453 } 454 455 return start_next; 456 } 457 458 static irqreturn_t axi_dmac_interrupt_handler(int irq, void *devid) 459 { 460 struct axi_dmac *dmac = devid; 461 unsigned int pending; 462 bool start_next = false; 463 464 pending = axi_dmac_read(dmac, AXI_DMAC_REG_IRQ_PENDING); 465 if (!pending) 466 return IRQ_NONE; 467 468 axi_dmac_write(dmac, AXI_DMAC_REG_IRQ_PENDING, pending); 469 470 spin_lock(&dmac->chan.vchan.lock); 471 /* One or more transfers have finished */ 472 if (pending & AXI_DMAC_IRQ_EOT) { 473 unsigned int completed; 474 475 completed = axi_dmac_read(dmac, AXI_DMAC_REG_TRANSFER_DONE); 476 start_next = axi_dmac_transfer_done(&dmac->chan, completed); 477 } 478 /* Space has become available in the descriptor queue */ 479 if ((pending & AXI_DMAC_IRQ_SOT) || start_next) 480 axi_dmac_start_transfer(&dmac->chan); 481 spin_unlock(&dmac->chan.vchan.lock); 482 483 return IRQ_HANDLED; 484 } 485 486 static int axi_dmac_terminate_all(struct dma_chan *c) 487 { 488 struct axi_dmac_chan *chan = to_axi_dmac_chan(c); 489 struct axi_dmac *dmac = chan_to_axi_dmac(chan); 490 unsigned long flags; 491 LIST_HEAD(head); 492 493 spin_lock_irqsave(&chan->vchan.lock, flags); 494 axi_dmac_write(dmac, AXI_DMAC_REG_CTRL, 0); 495 chan->next_desc = NULL; 496 vchan_get_all_descriptors(&chan->vchan, &head); 497 list_splice_tail_init(&chan->active_descs, &head); 498 spin_unlock_irqrestore(&chan->vchan.lock, flags); 499 500 vchan_dma_desc_free_list(&chan->vchan, &head); 501 502 return 0; 503 } 504 505 static void axi_dmac_synchronize(struct dma_chan *c) 506 { 507 struct axi_dmac_chan *chan = to_axi_dmac_chan(c); 508 509 vchan_synchronize(&chan->vchan); 510 } 511 512 static void axi_dmac_issue_pending(struct dma_chan *c) 513 { 514 struct axi_dmac_chan *chan = to_axi_dmac_chan(c); 515 struct axi_dmac *dmac = chan_to_axi_dmac(chan); 516 unsigned long flags; 517 u32 ctrl = AXI_DMAC_CTRL_ENABLE; 518 519 if (chan->hw_sg) 520 ctrl |= AXI_DMAC_CTRL_ENABLE_SG; 521 522 axi_dmac_write(dmac, AXI_DMAC_REG_CTRL, ctrl); 523 524 spin_lock_irqsave(&chan->vchan.lock, flags); 525 if (vchan_issue_pending(&chan->vchan)) 526 axi_dmac_start_transfer(chan); 527 spin_unlock_irqrestore(&chan->vchan.lock, flags); 528 } 529 530 static struct axi_dmac_desc * 531 axi_dmac_alloc_desc(struct axi_dmac_chan *chan, unsigned int num_sgs) 532 { 533 struct axi_dmac *dmac = chan_to_axi_dmac(chan); 534 struct device *dev = dmac->dma_dev.dev; 535 struct axi_dmac_hw_desc *hws; 536 struct axi_dmac_desc *desc; 537 dma_addr_t hw_phys; 538 unsigned int i; 539 540 desc = kzalloc(struct_size(desc, sg, num_sgs), GFP_NOWAIT); 541 if (!desc) 542 return NULL; 543 desc->num_sgs = num_sgs; 544 desc->chan = chan; 545 546 hws = dma_alloc_coherent(dev, PAGE_ALIGN(num_sgs * sizeof(*hws)), 547 &hw_phys, GFP_ATOMIC); 548 if (!hws) { 549 kfree(desc); 550 return NULL; 551 } 552 553 for (i = 0; i < num_sgs; i++) { 554 desc->sg[i].hw = &hws[i]; 555 desc->sg[i].hw_phys = hw_phys + i * sizeof(*hws); 556 557 hws[i].id = AXI_DMAC_SG_UNUSED; 558 hws[i].flags = 0; 559 560 /* Link hardware descriptors */ 561 hws[i].next_sg_addr = hw_phys + (i + 1) * sizeof(*hws); 562 } 563 564 /* The last hardware descriptor will trigger an interrupt */ 565 desc->sg[num_sgs - 1].hw->flags = AXI_DMAC_HW_FLAG_LAST | AXI_DMAC_HW_FLAG_IRQ; 566 567 return desc; 568 } 569 570 static void axi_dmac_free_desc(struct axi_dmac_desc *desc) 571 { 572 struct axi_dmac *dmac = chan_to_axi_dmac(desc->chan); 573 struct device *dev = dmac->dma_dev.dev; 574 struct axi_dmac_hw_desc *hw = desc->sg[0].hw; 575 dma_addr_t hw_phys = desc->sg[0].hw_phys; 576 577 dma_free_coherent(dev, PAGE_ALIGN(desc->num_sgs * sizeof(*hw)), 578 hw, hw_phys); 579 kfree(desc); 580 } 581 582 static struct axi_dmac_sg *axi_dmac_fill_linear_sg(struct axi_dmac_chan *chan, 583 enum dma_transfer_direction direction, dma_addr_t addr, 584 unsigned int num_periods, unsigned int period_len, 585 struct axi_dmac_sg *sg) 586 { 587 unsigned int num_segments, i; 588 unsigned int segment_size; 589 unsigned int len; 590 591 /* Split into multiple equally sized segments if necessary */ 592 num_segments = DIV_ROUND_UP(period_len, chan->max_length); 593 segment_size = DIV_ROUND_UP(period_len, num_segments); 594 /* Take care of alignment */ 595 segment_size = ((segment_size - 1) | chan->length_align_mask) + 1; 596 597 for (i = 0; i < num_periods; i++) { 598 for (len = period_len; len > segment_size; sg++) { 599 if (direction == DMA_DEV_TO_MEM) 600 sg->hw->dest_addr = addr; 601 else 602 sg->hw->src_addr = addr; 603 sg->hw->x_len = segment_size - 1; 604 sg->hw->y_len = 0; 605 sg->hw->flags = 0; 606 addr += segment_size; 607 len -= segment_size; 608 } 609 610 if (direction == DMA_DEV_TO_MEM) 611 sg->hw->dest_addr = addr; 612 else 613 sg->hw->src_addr = addr; 614 sg->hw->x_len = len - 1; 615 sg->hw->y_len = 0; 616 sg++; 617 addr += len; 618 } 619 620 return sg; 621 } 622 623 static struct dma_async_tx_descriptor * 624 axi_dmac_prep_peripheral_dma_vec(struct dma_chan *c, const struct dma_vec *vecs, 625 size_t nb, enum dma_transfer_direction direction, 626 unsigned long flags) 627 { 628 struct axi_dmac_chan *chan = to_axi_dmac_chan(c); 629 struct axi_dmac_desc *desc; 630 unsigned int num_sgs = 0; 631 struct axi_dmac_sg *dsg; 632 size_t i; 633 634 if (direction != chan->direction) 635 return NULL; 636 637 for (i = 0; i < nb; i++) 638 num_sgs += DIV_ROUND_UP(vecs[i].len, chan->max_length); 639 640 desc = axi_dmac_alloc_desc(chan, num_sgs); 641 if (!desc) 642 return NULL; 643 644 dsg = desc->sg; 645 646 for (i = 0; i < nb; i++) { 647 if (!axi_dmac_check_addr(chan, vecs[i].addr) || 648 !axi_dmac_check_len(chan, vecs[i].len)) { 649 kfree(desc); 650 return NULL; 651 } 652 653 dsg = axi_dmac_fill_linear_sg(chan, direction, vecs[i].addr, 1, 654 vecs[i].len, dsg); 655 } 656 657 desc->cyclic = false; 658 659 return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); 660 } 661 662 static struct dma_async_tx_descriptor *axi_dmac_prep_slave_sg( 663 struct dma_chan *c, struct scatterlist *sgl, 664 unsigned int sg_len, enum dma_transfer_direction direction, 665 unsigned long flags, void *context) 666 { 667 struct axi_dmac_chan *chan = to_axi_dmac_chan(c); 668 struct axi_dmac_desc *desc; 669 struct axi_dmac_sg *dsg; 670 struct scatterlist *sg; 671 unsigned int num_sgs; 672 unsigned int i; 673 674 if (direction != chan->direction) 675 return NULL; 676 677 num_sgs = 0; 678 for_each_sg(sgl, sg, sg_len, i) 679 num_sgs += DIV_ROUND_UP(sg_dma_len(sg), chan->max_length); 680 681 desc = axi_dmac_alloc_desc(chan, num_sgs); 682 if (!desc) 683 return NULL; 684 685 dsg = desc->sg; 686 687 for_each_sg(sgl, sg, sg_len, i) { 688 if (!axi_dmac_check_addr(chan, sg_dma_address(sg)) || 689 !axi_dmac_check_len(chan, sg_dma_len(sg))) { 690 axi_dmac_free_desc(desc); 691 return NULL; 692 } 693 694 dsg = axi_dmac_fill_linear_sg(chan, direction, sg_dma_address(sg), 1, 695 sg_dma_len(sg), dsg); 696 } 697 698 desc->cyclic = false; 699 700 return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); 701 } 702 703 static struct dma_async_tx_descriptor *axi_dmac_prep_dma_cyclic( 704 struct dma_chan *c, dma_addr_t buf_addr, size_t buf_len, 705 size_t period_len, enum dma_transfer_direction direction, 706 unsigned long flags) 707 { 708 struct axi_dmac_chan *chan = to_axi_dmac_chan(c); 709 struct axi_dmac_desc *desc; 710 unsigned int num_periods, num_segments, num_sgs; 711 712 if (direction != chan->direction) 713 return NULL; 714 715 if (!axi_dmac_check_len(chan, buf_len) || 716 !axi_dmac_check_addr(chan, buf_addr)) 717 return NULL; 718 719 if (period_len == 0 || buf_len % period_len) 720 return NULL; 721 722 num_periods = buf_len / period_len; 723 num_segments = DIV_ROUND_UP(period_len, chan->max_length); 724 num_sgs = num_periods * num_segments; 725 726 desc = axi_dmac_alloc_desc(chan, num_sgs); 727 if (!desc) 728 return NULL; 729 730 /* Chain the last descriptor to the first, and remove its "last" flag */ 731 desc->sg[num_sgs - 1].hw->next_sg_addr = desc->sg[0].hw_phys; 732 desc->sg[num_sgs - 1].hw->flags &= ~AXI_DMAC_HW_FLAG_LAST; 733 734 axi_dmac_fill_linear_sg(chan, direction, buf_addr, num_periods, 735 period_len, desc->sg); 736 737 desc->cyclic = true; 738 739 return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); 740 } 741 742 static struct dma_async_tx_descriptor *axi_dmac_prep_interleaved( 743 struct dma_chan *c, struct dma_interleaved_template *xt, 744 unsigned long flags) 745 { 746 struct axi_dmac_chan *chan = to_axi_dmac_chan(c); 747 struct axi_dmac_desc *desc; 748 size_t dst_icg, src_icg; 749 750 if (xt->frame_size != 1) 751 return NULL; 752 753 if (xt->dir != chan->direction) 754 return NULL; 755 756 if (axi_dmac_src_is_mem(chan)) { 757 if (!xt->src_inc || !axi_dmac_check_addr(chan, xt->src_start)) 758 return NULL; 759 } 760 761 if (axi_dmac_dest_is_mem(chan)) { 762 if (!xt->dst_inc || !axi_dmac_check_addr(chan, xt->dst_start)) 763 return NULL; 764 } 765 766 dst_icg = dmaengine_get_dst_icg(xt, &xt->sgl[0]); 767 src_icg = dmaengine_get_src_icg(xt, &xt->sgl[0]); 768 769 if (chan->hw_2d) { 770 if (!axi_dmac_check_len(chan, xt->sgl[0].size) || 771 xt->numf == 0) 772 return NULL; 773 if (xt->sgl[0].size + dst_icg > chan->max_length || 774 xt->sgl[0].size + src_icg > chan->max_length) 775 return NULL; 776 } else { 777 if (dst_icg != 0 || src_icg != 0) 778 return NULL; 779 if (chan->max_length / xt->sgl[0].size < xt->numf) 780 return NULL; 781 if (!axi_dmac_check_len(chan, xt->sgl[0].size * xt->numf)) 782 return NULL; 783 } 784 785 desc = axi_dmac_alloc_desc(chan, 1); 786 if (!desc) 787 return NULL; 788 789 if (axi_dmac_src_is_mem(chan)) { 790 desc->sg[0].hw->src_addr = xt->src_start; 791 desc->sg[0].hw->src_stride = xt->sgl[0].size + src_icg; 792 } 793 794 if (axi_dmac_dest_is_mem(chan)) { 795 desc->sg[0].hw->dest_addr = xt->dst_start; 796 desc->sg[0].hw->dst_stride = xt->sgl[0].size + dst_icg; 797 } 798 799 if (chan->hw_2d) { 800 desc->sg[0].hw->x_len = xt->sgl[0].size - 1; 801 desc->sg[0].hw->y_len = xt->numf - 1; 802 } else { 803 desc->sg[0].hw->x_len = xt->sgl[0].size * xt->numf - 1; 804 desc->sg[0].hw->y_len = 0; 805 } 806 807 if (flags & DMA_CYCLIC) 808 desc->cyclic = true; 809 810 return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); 811 } 812 813 static void axi_dmac_free_chan_resources(struct dma_chan *c) 814 { 815 vchan_free_chan_resources(to_virt_chan(c)); 816 } 817 818 static void axi_dmac_desc_free(struct virt_dma_desc *vdesc) 819 { 820 axi_dmac_free_desc(to_axi_dmac_desc(vdesc)); 821 } 822 823 static bool axi_dmac_regmap_rdwr(struct device *dev, unsigned int reg) 824 { 825 switch (reg) { 826 case AXI_DMAC_REG_IRQ_MASK: 827 case AXI_DMAC_REG_IRQ_SOURCE: 828 case AXI_DMAC_REG_IRQ_PENDING: 829 case AXI_DMAC_REG_CTRL: 830 case AXI_DMAC_REG_TRANSFER_ID: 831 case AXI_DMAC_REG_START_TRANSFER: 832 case AXI_DMAC_REG_FLAGS: 833 case AXI_DMAC_REG_DEST_ADDRESS: 834 case AXI_DMAC_REG_SRC_ADDRESS: 835 case AXI_DMAC_REG_X_LENGTH: 836 case AXI_DMAC_REG_Y_LENGTH: 837 case AXI_DMAC_REG_DEST_STRIDE: 838 case AXI_DMAC_REG_SRC_STRIDE: 839 case AXI_DMAC_REG_TRANSFER_DONE: 840 case AXI_DMAC_REG_ACTIVE_TRANSFER_ID: 841 case AXI_DMAC_REG_STATUS: 842 case AXI_DMAC_REG_CURRENT_SRC_ADDR: 843 case AXI_DMAC_REG_CURRENT_DEST_ADDR: 844 case AXI_DMAC_REG_PARTIAL_XFER_LEN: 845 case AXI_DMAC_REG_PARTIAL_XFER_ID: 846 case AXI_DMAC_REG_CURRENT_SG_ID: 847 case AXI_DMAC_REG_SG_ADDRESS: 848 case AXI_DMAC_REG_SG_ADDRESS_HIGH: 849 return true; 850 default: 851 return false; 852 } 853 } 854 855 static const struct regmap_config axi_dmac_regmap_config = { 856 .reg_bits = 32, 857 .val_bits = 32, 858 .reg_stride = 4, 859 .max_register = AXI_DMAC_REG_PARTIAL_XFER_ID, 860 .readable_reg = axi_dmac_regmap_rdwr, 861 .writeable_reg = axi_dmac_regmap_rdwr, 862 }; 863 864 static void axi_dmac_adjust_chan_params(struct axi_dmac_chan *chan) 865 { 866 chan->address_align_mask = max(chan->dest_width, chan->src_width) - 1; 867 868 if (axi_dmac_dest_is_mem(chan) && axi_dmac_src_is_mem(chan)) 869 chan->direction = DMA_MEM_TO_MEM; 870 else if (!axi_dmac_dest_is_mem(chan) && axi_dmac_src_is_mem(chan)) 871 chan->direction = DMA_MEM_TO_DEV; 872 else if (axi_dmac_dest_is_mem(chan) && !axi_dmac_src_is_mem(chan)) 873 chan->direction = DMA_DEV_TO_MEM; 874 else 875 chan->direction = DMA_DEV_TO_DEV; 876 } 877 878 /* 879 * The configuration stored in the devicetree matches the configuration 880 * parameters of the peripheral instance and allows the driver to know which 881 * features are implemented and how it should behave. 882 */ 883 static int axi_dmac_parse_chan_dt(struct device_node *of_chan, 884 struct axi_dmac_chan *chan) 885 { 886 u32 val; 887 int ret; 888 889 ret = of_property_read_u32(of_chan, "reg", &val); 890 if (ret) 891 return ret; 892 893 /* We only support 1 channel for now */ 894 if (val != 0) 895 return -EINVAL; 896 897 ret = of_property_read_u32(of_chan, "adi,source-bus-type", &val); 898 if (ret) 899 return ret; 900 if (val > AXI_DMAC_BUS_TYPE_FIFO) 901 return -EINVAL; 902 chan->src_type = val; 903 904 ret = of_property_read_u32(of_chan, "adi,destination-bus-type", &val); 905 if (ret) 906 return ret; 907 if (val > AXI_DMAC_BUS_TYPE_FIFO) 908 return -EINVAL; 909 chan->dest_type = val; 910 911 ret = of_property_read_u32(of_chan, "adi,source-bus-width", &val); 912 if (ret) 913 return ret; 914 chan->src_width = val / 8; 915 916 ret = of_property_read_u32(of_chan, "adi,destination-bus-width", &val); 917 if (ret) 918 return ret; 919 chan->dest_width = val / 8; 920 921 axi_dmac_adjust_chan_params(chan); 922 923 return 0; 924 } 925 926 static int axi_dmac_parse_dt(struct device *dev, struct axi_dmac *dmac) 927 { 928 struct device_node *of_channels, *of_chan; 929 int ret; 930 931 of_channels = of_get_child_by_name(dev->of_node, "adi,channels"); 932 if (of_channels == NULL) 933 return -ENODEV; 934 935 for_each_child_of_node(of_channels, of_chan) { 936 ret = axi_dmac_parse_chan_dt(of_chan, &dmac->chan); 937 if (ret) { 938 of_node_put(of_chan); 939 of_node_put(of_channels); 940 return -EINVAL; 941 } 942 } 943 of_node_put(of_channels); 944 945 return 0; 946 } 947 948 static int axi_dmac_read_chan_config(struct device *dev, struct axi_dmac *dmac) 949 { 950 struct axi_dmac_chan *chan = &dmac->chan; 951 unsigned int val, desc; 952 953 desc = axi_dmac_read(dmac, AXI_DMAC_REG_INTERFACE_DESC); 954 if (desc == 0) { 955 dev_err(dev, "DMA interface register reads zero\n"); 956 return -EFAULT; 957 } 958 959 val = AXI_DMAC_DMA_SRC_TYPE_GET(desc); 960 if (val > AXI_DMAC_BUS_TYPE_FIFO) { 961 dev_err(dev, "Invalid source bus type read: %d\n", val); 962 return -EINVAL; 963 } 964 chan->src_type = val; 965 966 val = AXI_DMAC_DMA_DST_TYPE_GET(desc); 967 if (val > AXI_DMAC_BUS_TYPE_FIFO) { 968 dev_err(dev, "Invalid destination bus type read: %d\n", val); 969 return -EINVAL; 970 } 971 chan->dest_type = val; 972 973 val = AXI_DMAC_DMA_SRC_WIDTH_GET(desc); 974 if (val == 0) { 975 dev_err(dev, "Source bus width is zero\n"); 976 return -EINVAL; 977 } 978 /* widths are stored in log2 */ 979 chan->src_width = 1 << val; 980 981 val = AXI_DMAC_DMA_DST_WIDTH_GET(desc); 982 if (val == 0) { 983 dev_err(dev, "Destination bus width is zero\n"); 984 return -EINVAL; 985 } 986 chan->dest_width = 1 << val; 987 988 axi_dmac_adjust_chan_params(chan); 989 990 return 0; 991 } 992 993 static int axi_dmac_detect_caps(struct axi_dmac *dmac, unsigned int version) 994 { 995 struct axi_dmac_chan *chan = &dmac->chan; 996 997 axi_dmac_write(dmac, AXI_DMAC_REG_FLAGS, AXI_DMAC_FLAG_CYCLIC); 998 if (axi_dmac_read(dmac, AXI_DMAC_REG_FLAGS) == AXI_DMAC_FLAG_CYCLIC) 999 chan->hw_cyclic = true; 1000 1001 axi_dmac_write(dmac, AXI_DMAC_REG_SG_ADDRESS, 0xffffffff); 1002 if (axi_dmac_read(dmac, AXI_DMAC_REG_SG_ADDRESS)) 1003 chan->hw_sg = true; 1004 1005 axi_dmac_write(dmac, AXI_DMAC_REG_Y_LENGTH, 1); 1006 if (axi_dmac_read(dmac, AXI_DMAC_REG_Y_LENGTH) == 1) 1007 chan->hw_2d = true; 1008 1009 axi_dmac_write(dmac, AXI_DMAC_REG_X_LENGTH, 0xffffffff); 1010 chan->max_length = axi_dmac_read(dmac, AXI_DMAC_REG_X_LENGTH); 1011 if (chan->max_length != UINT_MAX) 1012 chan->max_length++; 1013 1014 axi_dmac_write(dmac, AXI_DMAC_REG_DEST_ADDRESS, 0xffffffff); 1015 if (axi_dmac_read(dmac, AXI_DMAC_REG_DEST_ADDRESS) == 0 && 1016 chan->dest_type == AXI_DMAC_BUS_TYPE_AXI_MM) { 1017 dev_err(dmac->dma_dev.dev, 1018 "Destination memory-mapped interface not supported."); 1019 return -ENODEV; 1020 } 1021 1022 axi_dmac_write(dmac, AXI_DMAC_REG_SRC_ADDRESS, 0xffffffff); 1023 if (axi_dmac_read(dmac, AXI_DMAC_REG_SRC_ADDRESS) == 0 && 1024 chan->src_type == AXI_DMAC_BUS_TYPE_AXI_MM) { 1025 dev_err(dmac->dma_dev.dev, 1026 "Source memory-mapped interface not supported."); 1027 return -ENODEV; 1028 } 1029 1030 if (version >= ADI_AXI_PCORE_VER(4, 2, 'a')) 1031 chan->hw_partial_xfer = true; 1032 1033 if (version >= ADI_AXI_PCORE_VER(4, 1, 'a')) { 1034 axi_dmac_write(dmac, AXI_DMAC_REG_X_LENGTH, 0x00); 1035 chan->length_align_mask = 1036 axi_dmac_read(dmac, AXI_DMAC_REG_X_LENGTH); 1037 } else { 1038 chan->length_align_mask = chan->address_align_mask; 1039 } 1040 1041 return 0; 1042 } 1043 1044 static void axi_dmac_tasklet_kill(void *task) 1045 { 1046 tasklet_kill(task); 1047 } 1048 1049 static void axi_dmac_free_dma_controller(void *of_node) 1050 { 1051 of_dma_controller_free(of_node); 1052 } 1053 1054 static int axi_dmac_probe(struct platform_device *pdev) 1055 { 1056 struct dma_device *dma_dev; 1057 struct axi_dmac *dmac; 1058 struct regmap *regmap; 1059 unsigned int version; 1060 u32 irq_mask = 0; 1061 int ret; 1062 1063 dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL); 1064 if (!dmac) 1065 return -ENOMEM; 1066 1067 dmac->irq = platform_get_irq(pdev, 0); 1068 if (dmac->irq < 0) 1069 return dmac->irq; 1070 if (dmac->irq == 0) 1071 return -EINVAL; 1072 1073 dmac->base = devm_platform_ioremap_resource(pdev, 0); 1074 if (IS_ERR(dmac->base)) 1075 return PTR_ERR(dmac->base); 1076 1077 dmac->clk = devm_clk_get_enabled(&pdev->dev, NULL); 1078 if (IS_ERR(dmac->clk)) 1079 return PTR_ERR(dmac->clk); 1080 1081 version = axi_dmac_read(dmac, ADI_AXI_REG_VERSION); 1082 1083 if (version >= ADI_AXI_PCORE_VER(4, 3, 'a')) 1084 ret = axi_dmac_read_chan_config(&pdev->dev, dmac); 1085 else 1086 ret = axi_dmac_parse_dt(&pdev->dev, dmac); 1087 1088 if (ret < 0) 1089 return ret; 1090 1091 INIT_LIST_HEAD(&dmac->chan.active_descs); 1092 1093 dma_set_max_seg_size(&pdev->dev, UINT_MAX); 1094 1095 dma_dev = &dmac->dma_dev; 1096 dma_cap_set(DMA_SLAVE, dma_dev->cap_mask); 1097 dma_cap_set(DMA_CYCLIC, dma_dev->cap_mask); 1098 dma_cap_set(DMA_INTERLEAVE, dma_dev->cap_mask); 1099 dma_dev->device_free_chan_resources = axi_dmac_free_chan_resources; 1100 dma_dev->device_tx_status = dma_cookie_status; 1101 dma_dev->device_issue_pending = axi_dmac_issue_pending; 1102 dma_dev->device_prep_slave_sg = axi_dmac_prep_slave_sg; 1103 dma_dev->device_prep_peripheral_dma_vec = axi_dmac_prep_peripheral_dma_vec; 1104 dma_dev->device_prep_dma_cyclic = axi_dmac_prep_dma_cyclic; 1105 dma_dev->device_prep_interleaved_dma = axi_dmac_prep_interleaved; 1106 dma_dev->device_terminate_all = axi_dmac_terminate_all; 1107 dma_dev->device_synchronize = axi_dmac_synchronize; 1108 dma_dev->dev = &pdev->dev; 1109 dma_dev->src_addr_widths = BIT(dmac->chan.src_width); 1110 dma_dev->dst_addr_widths = BIT(dmac->chan.dest_width); 1111 dma_dev->directions = BIT(dmac->chan.direction); 1112 dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; 1113 dma_dev->max_sg_burst = 31; /* 31 SGs maximum in one burst */ 1114 INIT_LIST_HEAD(&dma_dev->channels); 1115 1116 dmac->chan.vchan.desc_free = axi_dmac_desc_free; 1117 vchan_init(&dmac->chan.vchan, dma_dev); 1118 1119 ret = axi_dmac_detect_caps(dmac, version); 1120 if (ret) 1121 return ret; 1122 1123 dma_dev->copy_align = (dmac->chan.address_align_mask + 1); 1124 1125 if (dmac->chan.hw_sg) 1126 irq_mask |= AXI_DMAC_IRQ_SOT; 1127 1128 axi_dmac_write(dmac, AXI_DMAC_REG_IRQ_MASK, irq_mask); 1129 1130 if (of_dma_is_coherent(pdev->dev.of_node)) { 1131 ret = axi_dmac_read(dmac, AXI_DMAC_REG_COHERENCY_DESC); 1132 1133 if (version < ADI_AXI_PCORE_VER(4, 4, 'a') || 1134 !AXI_DMAC_DST_COHERENT_GET(ret)) { 1135 dev_err(dmac->dma_dev.dev, 1136 "Coherent DMA not supported in hardware"); 1137 return -EINVAL; 1138 } 1139 } 1140 1141 ret = dmaenginem_async_device_register(dma_dev); 1142 if (ret) 1143 return ret; 1144 1145 /* 1146 * Put the action in here so it get's done before unregistering the DMA 1147 * device. 1148 */ 1149 ret = devm_add_action_or_reset(&pdev->dev, axi_dmac_tasklet_kill, 1150 &dmac->chan.vchan.task); 1151 if (ret) 1152 return ret; 1153 1154 ret = of_dma_controller_register(pdev->dev.of_node, 1155 of_dma_xlate_by_chan_id, dma_dev); 1156 if (ret) 1157 return ret; 1158 1159 ret = devm_add_action_or_reset(&pdev->dev, axi_dmac_free_dma_controller, 1160 pdev->dev.of_node); 1161 if (ret) 1162 return ret; 1163 1164 ret = devm_request_irq(&pdev->dev, dmac->irq, axi_dmac_interrupt_handler, 1165 IRQF_SHARED, dev_name(&pdev->dev), dmac); 1166 if (ret) 1167 return ret; 1168 1169 regmap = devm_regmap_init_mmio(&pdev->dev, dmac->base, 1170 &axi_dmac_regmap_config); 1171 1172 return PTR_ERR_OR_ZERO(regmap); 1173 } 1174 1175 static const struct of_device_id axi_dmac_of_match_table[] = { 1176 { .compatible = "adi,axi-dmac-1.00.a" }, 1177 { }, 1178 }; 1179 MODULE_DEVICE_TABLE(of, axi_dmac_of_match_table); 1180 1181 static struct platform_driver axi_dmac_driver = { 1182 .driver = { 1183 .name = "dma-axi-dmac", 1184 .of_match_table = axi_dmac_of_match_table, 1185 }, 1186 .probe = axi_dmac_probe, 1187 }; 1188 module_platform_driver(axi_dmac_driver); 1189 1190 MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>"); 1191 MODULE_DESCRIPTION("DMA controller driver for the AXI-DMAC controller"); 1192 MODULE_LICENSE("GPL v2"); 1193