1 /* 2 * Qualcomm Technologies HIDMA DMA engine interface 3 * 4 * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 and 8 * only version 2 as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 */ 15 16 /* 17 * Copyright (C) Freescale Semicondutor, Inc. 2007, 2008. 18 * Copyright (C) Semihalf 2009 19 * Copyright (C) Ilya Yanok, Emcraft Systems 2010 20 * Copyright (C) Alexander Popov, Promcontroller 2014 21 * 22 * Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description 23 * (defines, structures and comments) was taken from MPC5121 DMA driver 24 * written by Hongjun Chen <hong-jun.chen@freescale.com>. 25 * 26 * Approved as OSADL project by a majority of OSADL members and funded 27 * by OSADL membership fees in 2009; for details see www.osadl.org. 28 * 29 * This program is free software; you can redistribute it and/or modify it 30 * under the terms of the GNU General Public License as published by the Free 31 * Software Foundation; either version 2 of the License, or (at your option) 32 * any later version. 33 * 34 * This program is distributed in the hope that it will be useful, but WITHOUT 35 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 36 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 37 * more details. 38 * 39 * The full GNU General Public License is included in this distribution in the 40 * file called COPYING. 41 */ 42 43 /* Linux Foundation elects GPLv2 license only. */ 44 45 #include <linux/dmaengine.h> 46 #include <linux/dma-mapping.h> 47 #include <linux/list.h> 48 #include <linux/module.h> 49 #include <linux/platform_device.h> 50 #include <linux/slab.h> 51 #include <linux/spinlock.h> 52 #include <linux/of_dma.h> 53 #include <linux/of_device.h> 54 #include <linux/property.h> 55 #include <linux/delay.h> 56 #include <linux/acpi.h> 57 #include <linux/irq.h> 58 #include <linux/atomic.h> 59 #include <linux/pm_runtime.h> 60 #include <linux/msi.h> 61 62 #include "../dmaengine.h" 63 #include "hidma.h" 64 65 /* 66 * Default idle time is 2 seconds. This parameter can 67 * be overridden by changing the following 68 * /sys/bus/platform/devices/QCOM8061:<xy>/power/autosuspend_delay_ms 69 * during kernel boot. 70 */ 71 #define HIDMA_AUTOSUSPEND_TIMEOUT 2000 72 #define HIDMA_ERR_INFO_SW 0xFF 73 #define HIDMA_ERR_CODE_UNEXPECTED_TERMINATE 0x0 74 #define HIDMA_NR_DEFAULT_DESC 10 75 #define HIDMA_MSI_INTS 11 76 77 static inline struct hidma_dev *to_hidma_dev(struct dma_device *dmadev) 78 { 79 return container_of(dmadev, struct hidma_dev, ddev); 80 } 81 82 static inline 83 struct hidma_dev *to_hidma_dev_from_lldev(struct hidma_lldev **_lldevp) 84 { 85 return container_of(_lldevp, struct hidma_dev, lldev); 86 } 87 88 static inline struct hidma_chan *to_hidma_chan(struct dma_chan *dmach) 89 { 90 return container_of(dmach, struct hidma_chan, chan); 91 } 92 93 static inline 94 struct hidma_desc *to_hidma_desc(struct dma_async_tx_descriptor *t) 95 { 96 return container_of(t, struct hidma_desc, desc); 97 } 98 99 static void hidma_free(struct hidma_dev *dmadev) 100 { 101 INIT_LIST_HEAD(&dmadev->ddev.channels); 102 } 103 104 static unsigned int nr_desc_prm; 105 module_param(nr_desc_prm, uint, 0644); 106 MODULE_PARM_DESC(nr_desc_prm, "number of descriptors (default: 0)"); 107 108 enum hidma_cap { 109 HIDMA_MSI_CAP = 1, 110 HIDMA_IDENTITY_CAP, 111 }; 112 113 /* process completed descriptors */ 114 static void hidma_process_completed(struct hidma_chan *mchan) 115 { 116 struct dma_device *ddev = mchan->chan.device; 117 struct hidma_dev *mdma = to_hidma_dev(ddev); 118 struct dma_async_tx_descriptor *desc; 119 dma_cookie_t last_cookie; 120 struct hidma_desc *mdesc; 121 struct hidma_desc *next; 122 unsigned long irqflags; 123 struct list_head list; 124 125 INIT_LIST_HEAD(&list); 126 127 /* Get all completed descriptors */ 128 spin_lock_irqsave(&mchan->lock, irqflags); 129 list_splice_tail_init(&mchan->completed, &list); 130 spin_unlock_irqrestore(&mchan->lock, irqflags); 131 132 /* Execute callbacks and run dependencies */ 133 list_for_each_entry_safe(mdesc, next, &list, node) { 134 enum dma_status llstat; 135 struct dmaengine_desc_callback cb; 136 struct dmaengine_result result; 137 138 desc = &mdesc->desc; 139 last_cookie = desc->cookie; 140 141 llstat = hidma_ll_status(mdma->lldev, mdesc->tre_ch); 142 143 spin_lock_irqsave(&mchan->lock, irqflags); 144 if (llstat == DMA_COMPLETE) { 145 mchan->last_success = last_cookie; 146 result.result = DMA_TRANS_NOERROR; 147 } else { 148 result.result = DMA_TRANS_ABORTED; 149 } 150 151 dma_cookie_complete(desc); 152 spin_unlock_irqrestore(&mchan->lock, irqflags); 153 154 dmaengine_desc_get_callback(desc, &cb); 155 156 dma_run_dependencies(desc); 157 158 spin_lock_irqsave(&mchan->lock, irqflags); 159 list_move(&mdesc->node, &mchan->free); 160 spin_unlock_irqrestore(&mchan->lock, irqflags); 161 162 dmaengine_desc_callback_invoke(&cb, &result); 163 } 164 } 165 166 /* 167 * Called once for each submitted descriptor. 168 * PM is locked once for each descriptor that is currently 169 * in execution. 170 */ 171 static void hidma_callback(void *data) 172 { 173 struct hidma_desc *mdesc = data; 174 struct hidma_chan *mchan = to_hidma_chan(mdesc->desc.chan); 175 struct dma_device *ddev = mchan->chan.device; 176 struct hidma_dev *dmadev = to_hidma_dev(ddev); 177 unsigned long irqflags; 178 bool queued = false; 179 180 spin_lock_irqsave(&mchan->lock, irqflags); 181 if (mdesc->node.next) { 182 /* Delete from the active list, add to completed list */ 183 list_move_tail(&mdesc->node, &mchan->completed); 184 queued = true; 185 186 /* calculate the next running descriptor */ 187 mchan->running = list_first_entry(&mchan->active, 188 struct hidma_desc, node); 189 } 190 spin_unlock_irqrestore(&mchan->lock, irqflags); 191 192 hidma_process_completed(mchan); 193 194 if (queued) { 195 pm_runtime_mark_last_busy(dmadev->ddev.dev); 196 pm_runtime_put_autosuspend(dmadev->ddev.dev); 197 } 198 } 199 200 static int hidma_chan_init(struct hidma_dev *dmadev, u32 dma_sig) 201 { 202 struct hidma_chan *mchan; 203 struct dma_device *ddev; 204 205 mchan = devm_kzalloc(dmadev->ddev.dev, sizeof(*mchan), GFP_KERNEL); 206 if (!mchan) 207 return -ENOMEM; 208 209 ddev = &dmadev->ddev; 210 mchan->dma_sig = dma_sig; 211 mchan->dmadev = dmadev; 212 mchan->chan.device = ddev; 213 dma_cookie_init(&mchan->chan); 214 215 INIT_LIST_HEAD(&mchan->free); 216 INIT_LIST_HEAD(&mchan->prepared); 217 INIT_LIST_HEAD(&mchan->active); 218 INIT_LIST_HEAD(&mchan->completed); 219 INIT_LIST_HEAD(&mchan->queued); 220 221 spin_lock_init(&mchan->lock); 222 list_add_tail(&mchan->chan.device_node, &ddev->channels); 223 dmadev->ddev.chancnt++; 224 return 0; 225 } 226 227 static void hidma_issue_task(struct tasklet_struct *t) 228 { 229 struct hidma_dev *dmadev = from_tasklet(dmadev, t, task); 230 231 pm_runtime_get_sync(dmadev->ddev.dev); 232 hidma_ll_start(dmadev->lldev); 233 } 234 235 static void hidma_issue_pending(struct dma_chan *dmach) 236 { 237 struct hidma_chan *mchan = to_hidma_chan(dmach); 238 struct hidma_dev *dmadev = mchan->dmadev; 239 unsigned long flags; 240 struct hidma_desc *qdesc, *next; 241 int status; 242 243 spin_lock_irqsave(&mchan->lock, flags); 244 list_for_each_entry_safe(qdesc, next, &mchan->queued, node) { 245 hidma_ll_queue_request(dmadev->lldev, qdesc->tre_ch); 246 list_move_tail(&qdesc->node, &mchan->active); 247 } 248 249 if (!mchan->running) { 250 struct hidma_desc *desc = list_first_entry(&mchan->active, 251 struct hidma_desc, 252 node); 253 mchan->running = desc; 254 } 255 spin_unlock_irqrestore(&mchan->lock, flags); 256 257 /* PM will be released in hidma_callback function. */ 258 status = pm_runtime_get(dmadev->ddev.dev); 259 if (status < 0) 260 tasklet_schedule(&dmadev->task); 261 else 262 hidma_ll_start(dmadev->lldev); 263 } 264 265 static inline bool hidma_txn_is_success(dma_cookie_t cookie, 266 dma_cookie_t last_success, dma_cookie_t last_used) 267 { 268 if (last_success <= last_used) { 269 if ((cookie <= last_success) || (cookie > last_used)) 270 return true; 271 } else { 272 if ((cookie <= last_success) && (cookie > last_used)) 273 return true; 274 } 275 return false; 276 } 277 278 static enum dma_status hidma_tx_status(struct dma_chan *dmach, 279 dma_cookie_t cookie, 280 struct dma_tx_state *txstate) 281 { 282 struct hidma_chan *mchan = to_hidma_chan(dmach); 283 enum dma_status ret; 284 285 ret = dma_cookie_status(dmach, cookie, txstate); 286 if (ret == DMA_COMPLETE) { 287 bool is_success; 288 289 is_success = hidma_txn_is_success(cookie, mchan->last_success, 290 dmach->cookie); 291 return is_success ? ret : DMA_ERROR; 292 } 293 294 if (mchan->paused && (ret == DMA_IN_PROGRESS)) { 295 unsigned long flags; 296 dma_cookie_t runcookie; 297 298 spin_lock_irqsave(&mchan->lock, flags); 299 if (mchan->running) 300 runcookie = mchan->running->desc.cookie; 301 else 302 runcookie = -EINVAL; 303 304 if (runcookie == cookie) 305 ret = DMA_PAUSED; 306 307 spin_unlock_irqrestore(&mchan->lock, flags); 308 } 309 310 return ret; 311 } 312 313 /* 314 * Submit descriptor to hardware. 315 * Lock the PM for each descriptor we are sending. 316 */ 317 static dma_cookie_t hidma_tx_submit(struct dma_async_tx_descriptor *txd) 318 { 319 struct hidma_chan *mchan = to_hidma_chan(txd->chan); 320 struct hidma_dev *dmadev = mchan->dmadev; 321 struct hidma_desc *mdesc; 322 unsigned long irqflags; 323 dma_cookie_t cookie; 324 325 pm_runtime_get_sync(dmadev->ddev.dev); 326 if (!hidma_ll_isenabled(dmadev->lldev)) { 327 pm_runtime_mark_last_busy(dmadev->ddev.dev); 328 pm_runtime_put_autosuspend(dmadev->ddev.dev); 329 return -ENODEV; 330 } 331 pm_runtime_mark_last_busy(dmadev->ddev.dev); 332 pm_runtime_put_autosuspend(dmadev->ddev.dev); 333 334 mdesc = container_of(txd, struct hidma_desc, desc); 335 spin_lock_irqsave(&mchan->lock, irqflags); 336 337 /* Move descriptor to queued */ 338 list_move_tail(&mdesc->node, &mchan->queued); 339 340 /* Update cookie */ 341 cookie = dma_cookie_assign(txd); 342 343 spin_unlock_irqrestore(&mchan->lock, irqflags); 344 345 return cookie; 346 } 347 348 static int hidma_alloc_chan_resources(struct dma_chan *dmach) 349 { 350 struct hidma_chan *mchan = to_hidma_chan(dmach); 351 struct hidma_dev *dmadev = mchan->dmadev; 352 struct hidma_desc *mdesc, *tmp; 353 unsigned long irqflags; 354 LIST_HEAD(descs); 355 unsigned int i; 356 int rc = 0; 357 358 if (mchan->allocated) 359 return 0; 360 361 /* Alloc descriptors for this channel */ 362 for (i = 0; i < dmadev->nr_descriptors; i++) { 363 mdesc = kzalloc(sizeof(struct hidma_desc), GFP_NOWAIT); 364 if (!mdesc) { 365 rc = -ENOMEM; 366 break; 367 } 368 dma_async_tx_descriptor_init(&mdesc->desc, dmach); 369 mdesc->desc.tx_submit = hidma_tx_submit; 370 371 rc = hidma_ll_request(dmadev->lldev, mchan->dma_sig, 372 "DMA engine", hidma_callback, mdesc, 373 &mdesc->tre_ch); 374 if (rc) { 375 dev_err(dmach->device->dev, 376 "channel alloc failed at %u\n", i); 377 kfree(mdesc); 378 break; 379 } 380 list_add_tail(&mdesc->node, &descs); 381 } 382 383 if (rc) { 384 /* return the allocated descriptors */ 385 list_for_each_entry_safe(mdesc, tmp, &descs, node) { 386 hidma_ll_free(dmadev->lldev, mdesc->tre_ch); 387 kfree(mdesc); 388 } 389 return rc; 390 } 391 392 spin_lock_irqsave(&mchan->lock, irqflags); 393 list_splice_tail_init(&descs, &mchan->free); 394 mchan->allocated = true; 395 spin_unlock_irqrestore(&mchan->lock, irqflags); 396 return 1; 397 } 398 399 static struct dma_async_tx_descriptor * 400 hidma_prep_dma_memcpy(struct dma_chan *dmach, dma_addr_t dest, dma_addr_t src, 401 size_t len, unsigned long flags) 402 { 403 struct hidma_chan *mchan = to_hidma_chan(dmach); 404 struct hidma_desc *mdesc = NULL; 405 struct hidma_dev *mdma = mchan->dmadev; 406 unsigned long irqflags; 407 408 /* Get free descriptor */ 409 spin_lock_irqsave(&mchan->lock, irqflags); 410 if (!list_empty(&mchan->free)) { 411 mdesc = list_first_entry(&mchan->free, struct hidma_desc, node); 412 list_del(&mdesc->node); 413 } 414 spin_unlock_irqrestore(&mchan->lock, irqflags); 415 416 if (!mdesc) 417 return NULL; 418 419 mdesc->desc.flags = flags; 420 hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch, 421 src, dest, len, flags, 422 HIDMA_TRE_MEMCPY); 423 424 /* Place descriptor in prepared list */ 425 spin_lock_irqsave(&mchan->lock, irqflags); 426 list_add_tail(&mdesc->node, &mchan->prepared); 427 spin_unlock_irqrestore(&mchan->lock, irqflags); 428 429 return &mdesc->desc; 430 } 431 432 static struct dma_async_tx_descriptor * 433 hidma_prep_dma_memset(struct dma_chan *dmach, dma_addr_t dest, int value, 434 size_t len, unsigned long flags) 435 { 436 struct hidma_chan *mchan = to_hidma_chan(dmach); 437 struct hidma_desc *mdesc = NULL; 438 struct hidma_dev *mdma = mchan->dmadev; 439 unsigned long irqflags; 440 441 /* Get free descriptor */ 442 spin_lock_irqsave(&mchan->lock, irqflags); 443 if (!list_empty(&mchan->free)) { 444 mdesc = list_first_entry(&mchan->free, struct hidma_desc, node); 445 list_del(&mdesc->node); 446 } 447 spin_unlock_irqrestore(&mchan->lock, irqflags); 448 449 if (!mdesc) 450 return NULL; 451 452 mdesc->desc.flags = flags; 453 hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch, 454 value, dest, len, flags, 455 HIDMA_TRE_MEMSET); 456 457 /* Place descriptor in prepared list */ 458 spin_lock_irqsave(&mchan->lock, irqflags); 459 list_add_tail(&mdesc->node, &mchan->prepared); 460 spin_unlock_irqrestore(&mchan->lock, irqflags); 461 462 return &mdesc->desc; 463 } 464 465 static int hidma_terminate_channel(struct dma_chan *chan) 466 { 467 struct hidma_chan *mchan = to_hidma_chan(chan); 468 struct hidma_dev *dmadev = to_hidma_dev(mchan->chan.device); 469 struct hidma_desc *tmp, *mdesc; 470 unsigned long irqflags; 471 LIST_HEAD(list); 472 int rc; 473 474 pm_runtime_get_sync(dmadev->ddev.dev); 475 /* give completed requests a chance to finish */ 476 hidma_process_completed(mchan); 477 478 spin_lock_irqsave(&mchan->lock, irqflags); 479 mchan->last_success = 0; 480 list_splice_init(&mchan->active, &list); 481 list_splice_init(&mchan->prepared, &list); 482 list_splice_init(&mchan->completed, &list); 483 list_splice_init(&mchan->queued, &list); 484 spin_unlock_irqrestore(&mchan->lock, irqflags); 485 486 /* this suspends the existing transfer */ 487 rc = hidma_ll_disable(dmadev->lldev); 488 if (rc) { 489 dev_err(dmadev->ddev.dev, "channel did not pause\n"); 490 goto out; 491 } 492 493 /* return all user requests */ 494 list_for_each_entry_safe(mdesc, tmp, &list, node) { 495 struct dma_async_tx_descriptor *txd = &mdesc->desc; 496 497 dma_descriptor_unmap(txd); 498 dmaengine_desc_get_callback_invoke(txd, NULL); 499 dma_run_dependencies(txd); 500 501 /* move myself to free_list */ 502 list_move(&mdesc->node, &mchan->free); 503 } 504 505 rc = hidma_ll_enable(dmadev->lldev); 506 out: 507 pm_runtime_mark_last_busy(dmadev->ddev.dev); 508 pm_runtime_put_autosuspend(dmadev->ddev.dev); 509 return rc; 510 } 511 512 static int hidma_terminate_all(struct dma_chan *chan) 513 { 514 struct hidma_chan *mchan = to_hidma_chan(chan); 515 struct hidma_dev *dmadev = to_hidma_dev(mchan->chan.device); 516 int rc; 517 518 rc = hidma_terminate_channel(chan); 519 if (rc) 520 return rc; 521 522 /* reinitialize the hardware */ 523 pm_runtime_get_sync(dmadev->ddev.dev); 524 rc = hidma_ll_setup(dmadev->lldev); 525 pm_runtime_mark_last_busy(dmadev->ddev.dev); 526 pm_runtime_put_autosuspend(dmadev->ddev.dev); 527 return rc; 528 } 529 530 static void hidma_free_chan_resources(struct dma_chan *dmach) 531 { 532 struct hidma_chan *mchan = to_hidma_chan(dmach); 533 struct hidma_dev *mdma = mchan->dmadev; 534 struct hidma_desc *mdesc, *tmp; 535 unsigned long irqflags; 536 LIST_HEAD(descs); 537 538 /* terminate running transactions and free descriptors */ 539 hidma_terminate_channel(dmach); 540 541 spin_lock_irqsave(&mchan->lock, irqflags); 542 543 /* Move data */ 544 list_splice_tail_init(&mchan->free, &descs); 545 546 /* Free descriptors */ 547 list_for_each_entry_safe(mdesc, tmp, &descs, node) { 548 hidma_ll_free(mdma->lldev, mdesc->tre_ch); 549 list_del(&mdesc->node); 550 kfree(mdesc); 551 } 552 553 mchan->allocated = false; 554 spin_unlock_irqrestore(&mchan->lock, irqflags); 555 } 556 557 static int hidma_pause(struct dma_chan *chan) 558 { 559 struct hidma_chan *mchan; 560 struct hidma_dev *dmadev; 561 562 mchan = to_hidma_chan(chan); 563 dmadev = to_hidma_dev(mchan->chan.device); 564 if (!mchan->paused) { 565 pm_runtime_get_sync(dmadev->ddev.dev); 566 if (hidma_ll_disable(dmadev->lldev)) 567 dev_warn(dmadev->ddev.dev, "channel did not stop\n"); 568 mchan->paused = true; 569 pm_runtime_mark_last_busy(dmadev->ddev.dev); 570 pm_runtime_put_autosuspend(dmadev->ddev.dev); 571 } 572 return 0; 573 } 574 575 static int hidma_resume(struct dma_chan *chan) 576 { 577 struct hidma_chan *mchan; 578 struct hidma_dev *dmadev; 579 int rc = 0; 580 581 mchan = to_hidma_chan(chan); 582 dmadev = to_hidma_dev(mchan->chan.device); 583 if (mchan->paused) { 584 pm_runtime_get_sync(dmadev->ddev.dev); 585 rc = hidma_ll_enable(dmadev->lldev); 586 if (!rc) 587 mchan->paused = false; 588 else 589 dev_err(dmadev->ddev.dev, 590 "failed to resume the channel"); 591 pm_runtime_mark_last_busy(dmadev->ddev.dev); 592 pm_runtime_put_autosuspend(dmadev->ddev.dev); 593 } 594 return rc; 595 } 596 597 static irqreturn_t hidma_chirq_handler(int chirq, void *arg) 598 { 599 struct hidma_lldev *lldev = arg; 600 601 /* 602 * All interrupts are request driven. 603 * HW doesn't send an interrupt by itself. 604 */ 605 return hidma_ll_inthandler(chirq, lldev); 606 } 607 608 #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN 609 static irqreturn_t hidma_chirq_handler_msi(int chirq, void *arg) 610 { 611 struct hidma_lldev **lldevp = arg; 612 struct hidma_dev *dmadev = to_hidma_dev_from_lldev(lldevp); 613 614 return hidma_ll_inthandler_msi(chirq, *lldevp, 615 1 << (chirq - dmadev->msi_virqbase)); 616 } 617 #endif 618 619 static ssize_t hidma_show_values(struct device *dev, 620 struct device_attribute *attr, char *buf) 621 { 622 struct hidma_dev *mdev = dev_get_drvdata(dev); 623 624 buf[0] = 0; 625 626 if (strcmp(attr->attr.name, "chid") == 0) 627 sprintf(buf, "%d\n", mdev->chidx); 628 629 return strlen(buf); 630 } 631 632 static inline void hidma_sysfs_uninit(struct hidma_dev *dev) 633 { 634 device_remove_file(dev->ddev.dev, dev->chid_attrs); 635 } 636 637 static struct device_attribute* 638 hidma_create_sysfs_entry(struct hidma_dev *dev, char *name, int mode) 639 { 640 struct device_attribute *attrs; 641 char *name_copy; 642 643 attrs = devm_kmalloc(dev->ddev.dev, sizeof(struct device_attribute), 644 GFP_KERNEL); 645 if (!attrs) 646 return NULL; 647 648 name_copy = devm_kstrdup(dev->ddev.dev, name, GFP_KERNEL); 649 if (!name_copy) 650 return NULL; 651 652 attrs->attr.name = name_copy; 653 attrs->attr.mode = mode; 654 attrs->show = hidma_show_values; 655 sysfs_attr_init(&attrs->attr); 656 657 return attrs; 658 } 659 660 static int hidma_sysfs_init(struct hidma_dev *dev) 661 { 662 dev->chid_attrs = hidma_create_sysfs_entry(dev, "chid", S_IRUGO); 663 if (!dev->chid_attrs) 664 return -ENOMEM; 665 666 return device_create_file(dev->ddev.dev, dev->chid_attrs); 667 } 668 669 #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN 670 static void hidma_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg) 671 { 672 struct device *dev = msi_desc_to_dev(desc); 673 struct hidma_dev *dmadev = dev_get_drvdata(dev); 674 675 if (!desc->platform.msi_index) { 676 writel(msg->address_lo, dmadev->dev_evca + 0x118); 677 writel(msg->address_hi, dmadev->dev_evca + 0x11C); 678 writel(msg->data, dmadev->dev_evca + 0x120); 679 } 680 } 681 #endif 682 683 static void hidma_free_msis(struct hidma_dev *dmadev) 684 { 685 #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN 686 struct device *dev = dmadev->ddev.dev; 687 struct msi_desc *desc; 688 689 /* free allocated MSI interrupts above */ 690 for_each_msi_entry(desc, dev) 691 devm_free_irq(dev, desc->irq, &dmadev->lldev); 692 693 platform_msi_domain_free_irqs(dev); 694 #endif 695 } 696 697 static int hidma_request_msi(struct hidma_dev *dmadev, 698 struct platform_device *pdev) 699 { 700 #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN 701 int rc; 702 struct msi_desc *desc; 703 struct msi_desc *failed_desc = NULL; 704 705 rc = platform_msi_domain_alloc_irqs(&pdev->dev, HIDMA_MSI_INTS, 706 hidma_write_msi_msg); 707 if (rc) 708 return rc; 709 710 for_each_msi_entry(desc, &pdev->dev) { 711 if (!desc->platform.msi_index) 712 dmadev->msi_virqbase = desc->irq; 713 714 rc = devm_request_irq(&pdev->dev, desc->irq, 715 hidma_chirq_handler_msi, 716 0, "qcom-hidma-msi", 717 &dmadev->lldev); 718 if (rc) { 719 failed_desc = desc; 720 break; 721 } 722 } 723 724 if (rc) { 725 /* free allocated MSI interrupts above */ 726 for_each_msi_entry(desc, &pdev->dev) { 727 if (desc == failed_desc) 728 break; 729 devm_free_irq(&pdev->dev, desc->irq, 730 &dmadev->lldev); 731 } 732 } else { 733 /* Add callback to free MSIs on teardown */ 734 hidma_ll_setup_irq(dmadev->lldev, true); 735 736 } 737 if (rc) 738 dev_warn(&pdev->dev, 739 "failed to request MSI irq, falling back to wired IRQ\n"); 740 return rc; 741 #else 742 return -EINVAL; 743 #endif 744 } 745 746 static bool hidma_test_capability(struct device *dev, enum hidma_cap test_cap) 747 { 748 enum hidma_cap cap; 749 750 cap = (enum hidma_cap) device_get_match_data(dev); 751 return cap ? ((cap & test_cap) > 0) : 0; 752 } 753 754 static int hidma_probe(struct platform_device *pdev) 755 { 756 struct hidma_dev *dmadev; 757 struct resource *trca_resource; 758 struct resource *evca_resource; 759 int chirq; 760 void __iomem *evca; 761 void __iomem *trca; 762 int rc; 763 bool msi; 764 765 pm_runtime_set_autosuspend_delay(&pdev->dev, HIDMA_AUTOSUSPEND_TIMEOUT); 766 pm_runtime_use_autosuspend(&pdev->dev); 767 pm_runtime_set_active(&pdev->dev); 768 pm_runtime_enable(&pdev->dev); 769 770 trca_resource = platform_get_resource(pdev, IORESOURCE_MEM, 0); 771 trca = devm_ioremap_resource(&pdev->dev, trca_resource); 772 if (IS_ERR(trca)) { 773 rc = -ENOMEM; 774 goto bailout; 775 } 776 777 evca_resource = platform_get_resource(pdev, IORESOURCE_MEM, 1); 778 evca = devm_ioremap_resource(&pdev->dev, evca_resource); 779 if (IS_ERR(evca)) { 780 rc = -ENOMEM; 781 goto bailout; 782 } 783 784 /* 785 * This driver only handles the channel IRQs. 786 * Common IRQ is handled by the management driver. 787 */ 788 chirq = platform_get_irq(pdev, 0); 789 if (chirq < 0) { 790 rc = -ENODEV; 791 goto bailout; 792 } 793 794 dmadev = devm_kzalloc(&pdev->dev, sizeof(*dmadev), GFP_KERNEL); 795 if (!dmadev) { 796 rc = -ENOMEM; 797 goto bailout; 798 } 799 800 INIT_LIST_HEAD(&dmadev->ddev.channels); 801 spin_lock_init(&dmadev->lock); 802 dmadev->ddev.dev = &pdev->dev; 803 pm_runtime_get_sync(dmadev->ddev.dev); 804 805 dma_cap_set(DMA_MEMCPY, dmadev->ddev.cap_mask); 806 dma_cap_set(DMA_MEMSET, dmadev->ddev.cap_mask); 807 if (WARN_ON(!pdev->dev.dma_mask)) { 808 rc = -ENXIO; 809 goto dmafree; 810 } 811 812 dmadev->dev_evca = evca; 813 dmadev->evca_resource = evca_resource; 814 dmadev->dev_trca = trca; 815 dmadev->trca_resource = trca_resource; 816 dmadev->ddev.device_prep_dma_memcpy = hidma_prep_dma_memcpy; 817 dmadev->ddev.device_prep_dma_memset = hidma_prep_dma_memset; 818 dmadev->ddev.device_alloc_chan_resources = hidma_alloc_chan_resources; 819 dmadev->ddev.device_free_chan_resources = hidma_free_chan_resources; 820 dmadev->ddev.device_tx_status = hidma_tx_status; 821 dmadev->ddev.device_issue_pending = hidma_issue_pending; 822 dmadev->ddev.device_pause = hidma_pause; 823 dmadev->ddev.device_resume = hidma_resume; 824 dmadev->ddev.device_terminate_all = hidma_terminate_all; 825 dmadev->ddev.copy_align = 8; 826 827 /* 828 * Determine the MSI capability of the platform. Old HW doesn't 829 * support MSI. 830 */ 831 msi = hidma_test_capability(&pdev->dev, HIDMA_MSI_CAP); 832 device_property_read_u32(&pdev->dev, "desc-count", 833 &dmadev->nr_descriptors); 834 835 if (nr_desc_prm) { 836 dev_info(&pdev->dev, "overriding number of descriptors as %d\n", 837 nr_desc_prm); 838 dmadev->nr_descriptors = nr_desc_prm; 839 } 840 841 if (!dmadev->nr_descriptors) 842 dmadev->nr_descriptors = HIDMA_NR_DEFAULT_DESC; 843 844 if (hidma_test_capability(&pdev->dev, HIDMA_IDENTITY_CAP)) 845 dmadev->chidx = readl(dmadev->dev_trca + 0x40); 846 else 847 dmadev->chidx = readl(dmadev->dev_trca + 0x28); 848 849 /* Set DMA mask to 64 bits. */ 850 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 851 if (rc) { 852 dev_warn(&pdev->dev, "unable to set coherent mask to 64"); 853 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 854 if (rc) 855 goto dmafree; 856 } 857 858 dmadev->lldev = hidma_ll_init(dmadev->ddev.dev, 859 dmadev->nr_descriptors, dmadev->dev_trca, 860 dmadev->dev_evca, dmadev->chidx); 861 if (!dmadev->lldev) { 862 rc = -EPROBE_DEFER; 863 goto dmafree; 864 } 865 866 platform_set_drvdata(pdev, dmadev); 867 if (msi) 868 rc = hidma_request_msi(dmadev, pdev); 869 870 if (!msi || rc) { 871 hidma_ll_setup_irq(dmadev->lldev, false); 872 rc = devm_request_irq(&pdev->dev, chirq, hidma_chirq_handler, 873 0, "qcom-hidma", dmadev->lldev); 874 if (rc) 875 goto uninit; 876 } 877 878 INIT_LIST_HEAD(&dmadev->ddev.channels); 879 rc = hidma_chan_init(dmadev, 0); 880 if (rc) 881 goto uninit; 882 883 rc = dma_async_device_register(&dmadev->ddev); 884 if (rc) 885 goto uninit; 886 887 dmadev->irq = chirq; 888 tasklet_setup(&dmadev->task, hidma_issue_task); 889 hidma_debug_init(dmadev); 890 hidma_sysfs_init(dmadev); 891 dev_info(&pdev->dev, "HI-DMA engine driver registration complete\n"); 892 pm_runtime_mark_last_busy(dmadev->ddev.dev); 893 pm_runtime_put_autosuspend(dmadev->ddev.dev); 894 return 0; 895 896 uninit: 897 if (msi) 898 hidma_free_msis(dmadev); 899 900 hidma_ll_uninit(dmadev->lldev); 901 dmafree: 902 if (dmadev) 903 hidma_free(dmadev); 904 bailout: 905 pm_runtime_put_sync(&pdev->dev); 906 pm_runtime_disable(&pdev->dev); 907 return rc; 908 } 909 910 static void hidma_shutdown(struct platform_device *pdev) 911 { 912 struct hidma_dev *dmadev = platform_get_drvdata(pdev); 913 914 dev_info(dmadev->ddev.dev, "HI-DMA engine shutdown\n"); 915 916 pm_runtime_get_sync(dmadev->ddev.dev); 917 if (hidma_ll_disable(dmadev->lldev)) 918 dev_warn(dmadev->ddev.dev, "channel did not stop\n"); 919 pm_runtime_mark_last_busy(dmadev->ddev.dev); 920 pm_runtime_put_autosuspend(dmadev->ddev.dev); 921 922 } 923 924 static int hidma_remove(struct platform_device *pdev) 925 { 926 struct hidma_dev *dmadev = platform_get_drvdata(pdev); 927 928 pm_runtime_get_sync(dmadev->ddev.dev); 929 dma_async_device_unregister(&dmadev->ddev); 930 if (!dmadev->lldev->msi_support) 931 devm_free_irq(dmadev->ddev.dev, dmadev->irq, dmadev->lldev); 932 else 933 hidma_free_msis(dmadev); 934 935 tasklet_kill(&dmadev->task); 936 hidma_sysfs_uninit(dmadev); 937 hidma_debug_uninit(dmadev); 938 hidma_ll_uninit(dmadev->lldev); 939 hidma_free(dmadev); 940 941 dev_info(&pdev->dev, "HI-DMA engine removed\n"); 942 pm_runtime_put_sync_suspend(&pdev->dev); 943 pm_runtime_disable(&pdev->dev); 944 945 return 0; 946 } 947 948 #if IS_ENABLED(CONFIG_ACPI) 949 static const struct acpi_device_id hidma_acpi_ids[] = { 950 {"QCOM8061"}, 951 {"QCOM8062", HIDMA_MSI_CAP}, 952 {"QCOM8063", (HIDMA_MSI_CAP | HIDMA_IDENTITY_CAP)}, 953 {}, 954 }; 955 MODULE_DEVICE_TABLE(acpi, hidma_acpi_ids); 956 #endif 957 958 static const struct of_device_id hidma_match[] = { 959 {.compatible = "qcom,hidma-1.0",}, 960 {.compatible = "qcom,hidma-1.1", .data = (void *)(HIDMA_MSI_CAP),}, 961 {.compatible = "qcom,hidma-1.2", 962 .data = (void *)(HIDMA_MSI_CAP | HIDMA_IDENTITY_CAP),}, 963 {}, 964 }; 965 MODULE_DEVICE_TABLE(of, hidma_match); 966 967 static struct platform_driver hidma_driver = { 968 .probe = hidma_probe, 969 .remove = hidma_remove, 970 .shutdown = hidma_shutdown, 971 .driver = { 972 .name = "hidma", 973 .of_match_table = hidma_match, 974 .acpi_match_table = ACPI_PTR(hidma_acpi_ids), 975 }, 976 }; 977 978 module_platform_driver(hidma_driver); 979 MODULE_LICENSE("GPL v2"); 980