1 /* 2 * Qualcomm Technologies HIDMA DMA engine interface 3 * 4 * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 and 8 * only version 2 as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 */ 15 16 /* 17 * Copyright (C) Freescale Semicondutor, Inc. 2007, 2008. 18 * Copyright (C) Semihalf 2009 19 * Copyright (C) Ilya Yanok, Emcraft Systems 2010 20 * Copyright (C) Alexander Popov, Promcontroller 2014 21 * 22 * Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description 23 * (defines, structures and comments) was taken from MPC5121 DMA driver 24 * written by Hongjun Chen <hong-jun.chen@freescale.com>. 25 * 26 * Approved as OSADL project by a majority of OSADL members and funded 27 * by OSADL membership fees in 2009; for details see www.osadl.org. 28 * 29 * This program is free software; you can redistribute it and/or modify it 30 * under the terms of the GNU General Public License as published by the Free 31 * Software Foundation; either version 2 of the License, or (at your option) 32 * any later version. 33 * 34 * This program is distributed in the hope that it will be useful, but WITHOUT 35 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 36 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 37 * more details. 38 * 39 * The full GNU General Public License is included in this distribution in the 40 * file called COPYING. 41 */ 42 43 /* Linux Foundation elects GPLv2 license only. */ 44 45 #include <linux/dmaengine.h> 46 #include <linux/dma-mapping.h> 47 #include <linux/list.h> 48 #include <linux/mod_devicetable.h> 49 #include <linux/module.h> 50 #include <linux/platform_device.h> 51 #include <linux/slab.h> 52 #include <linux/spinlock.h> 53 #include <linux/property.h> 54 #include <linux/delay.h> 55 #include <linux/acpi.h> 56 #include <linux/irq.h> 57 #include <linux/atomic.h> 58 #include <linux/pm_runtime.h> 59 #include <linux/msi.h> 60 61 #include "../dmaengine.h" 62 #include "hidma.h" 63 64 /* 65 * Default idle time is 2 seconds. This parameter can 66 * be overridden by changing the following 67 * /sys/bus/platform/devices/QCOM8061:<xy>/power/autosuspend_delay_ms 68 * during kernel boot. 69 */ 70 #define HIDMA_AUTOSUSPEND_TIMEOUT 2000 71 #define HIDMA_ERR_INFO_SW 0xFF 72 #define HIDMA_ERR_CODE_UNEXPECTED_TERMINATE 0x0 73 #define HIDMA_NR_DEFAULT_DESC 10 74 #define HIDMA_MSI_INTS 11 75 76 static inline struct hidma_dev *to_hidma_dev(struct dma_device *dmadev) 77 { 78 return container_of(dmadev, struct hidma_dev, ddev); 79 } 80 81 static inline 82 struct hidma_dev *to_hidma_dev_from_lldev(struct hidma_lldev **_lldevp) 83 { 84 return container_of(_lldevp, struct hidma_dev, lldev); 85 } 86 87 static inline struct hidma_chan *to_hidma_chan(struct dma_chan *dmach) 88 { 89 return container_of(dmach, struct hidma_chan, chan); 90 } 91 92 static void hidma_free(struct hidma_dev *dmadev) 93 { 94 INIT_LIST_HEAD(&dmadev->ddev.channels); 95 } 96 97 static unsigned int nr_desc_prm; 98 module_param(nr_desc_prm, uint, 0644); 99 MODULE_PARM_DESC(nr_desc_prm, "number of descriptors (default: 0)"); 100 101 enum hidma_cap { 102 HIDMA_MSI_CAP = 1, 103 HIDMA_IDENTITY_CAP, 104 }; 105 106 /* process completed descriptors */ 107 static void hidma_process_completed(struct hidma_chan *mchan) 108 { 109 struct dma_device *ddev = mchan->chan.device; 110 struct hidma_dev *mdma = to_hidma_dev(ddev); 111 struct dma_async_tx_descriptor *desc; 112 dma_cookie_t last_cookie; 113 struct hidma_desc *mdesc; 114 struct hidma_desc *next; 115 unsigned long irqflags; 116 struct list_head list; 117 118 INIT_LIST_HEAD(&list); 119 120 /* Get all completed descriptors */ 121 spin_lock_irqsave(&mchan->lock, irqflags); 122 list_splice_tail_init(&mchan->completed, &list); 123 spin_unlock_irqrestore(&mchan->lock, irqflags); 124 125 /* Execute callbacks and run dependencies */ 126 list_for_each_entry_safe(mdesc, next, &list, node) { 127 enum dma_status llstat; 128 struct dmaengine_desc_callback cb; 129 struct dmaengine_result result; 130 131 desc = &mdesc->desc; 132 last_cookie = desc->cookie; 133 134 llstat = hidma_ll_status(mdma->lldev, mdesc->tre_ch); 135 136 spin_lock_irqsave(&mchan->lock, irqflags); 137 if (llstat == DMA_COMPLETE) { 138 mchan->last_success = last_cookie; 139 result.result = DMA_TRANS_NOERROR; 140 } else { 141 result.result = DMA_TRANS_ABORTED; 142 } 143 144 dma_cookie_complete(desc); 145 spin_unlock_irqrestore(&mchan->lock, irqflags); 146 147 dmaengine_desc_get_callback(desc, &cb); 148 149 dma_run_dependencies(desc); 150 151 spin_lock_irqsave(&mchan->lock, irqflags); 152 list_move(&mdesc->node, &mchan->free); 153 spin_unlock_irqrestore(&mchan->lock, irqflags); 154 155 dmaengine_desc_callback_invoke(&cb, &result); 156 } 157 } 158 159 /* 160 * Called once for each submitted descriptor. 161 * PM is locked once for each descriptor that is currently 162 * in execution. 163 */ 164 static void hidma_callback(void *data) 165 { 166 struct hidma_desc *mdesc = data; 167 struct hidma_chan *mchan = to_hidma_chan(mdesc->desc.chan); 168 struct dma_device *ddev = mchan->chan.device; 169 struct hidma_dev *dmadev = to_hidma_dev(ddev); 170 unsigned long irqflags; 171 bool queued = false; 172 173 spin_lock_irqsave(&mchan->lock, irqflags); 174 if (mdesc->node.next) { 175 /* Delete from the active list, add to completed list */ 176 list_move_tail(&mdesc->node, &mchan->completed); 177 queued = true; 178 179 /* calculate the next running descriptor */ 180 mchan->running = list_first_entry(&mchan->active, 181 struct hidma_desc, node); 182 } 183 spin_unlock_irqrestore(&mchan->lock, irqflags); 184 185 hidma_process_completed(mchan); 186 187 if (queued) { 188 pm_runtime_mark_last_busy(dmadev->ddev.dev); 189 pm_runtime_put_autosuspend(dmadev->ddev.dev); 190 } 191 } 192 193 static int hidma_chan_init(struct hidma_dev *dmadev, u32 dma_sig) 194 { 195 struct hidma_chan *mchan; 196 struct dma_device *ddev; 197 198 mchan = devm_kzalloc(dmadev->ddev.dev, sizeof(*mchan), GFP_KERNEL); 199 if (!mchan) 200 return -ENOMEM; 201 202 ddev = &dmadev->ddev; 203 mchan->dma_sig = dma_sig; 204 mchan->dmadev = dmadev; 205 mchan->chan.device = ddev; 206 dma_cookie_init(&mchan->chan); 207 208 INIT_LIST_HEAD(&mchan->free); 209 INIT_LIST_HEAD(&mchan->prepared); 210 INIT_LIST_HEAD(&mchan->active); 211 INIT_LIST_HEAD(&mchan->completed); 212 INIT_LIST_HEAD(&mchan->queued); 213 214 spin_lock_init(&mchan->lock); 215 list_add_tail(&mchan->chan.device_node, &ddev->channels); 216 return 0; 217 } 218 219 static void hidma_issue_task(struct tasklet_struct *t) 220 { 221 struct hidma_dev *dmadev = from_tasklet(dmadev, t, task); 222 223 pm_runtime_get_sync(dmadev->ddev.dev); 224 hidma_ll_start(dmadev->lldev); 225 } 226 227 static void hidma_issue_pending(struct dma_chan *dmach) 228 { 229 struct hidma_chan *mchan = to_hidma_chan(dmach); 230 struct hidma_dev *dmadev = mchan->dmadev; 231 unsigned long flags; 232 struct hidma_desc *qdesc, *next; 233 int status; 234 235 spin_lock_irqsave(&mchan->lock, flags); 236 list_for_each_entry_safe(qdesc, next, &mchan->queued, node) { 237 hidma_ll_queue_request(dmadev->lldev, qdesc->tre_ch); 238 list_move_tail(&qdesc->node, &mchan->active); 239 } 240 241 if (!mchan->running) { 242 struct hidma_desc *desc = list_first_entry(&mchan->active, 243 struct hidma_desc, 244 node); 245 mchan->running = desc; 246 } 247 spin_unlock_irqrestore(&mchan->lock, flags); 248 249 /* PM will be released in hidma_callback function. */ 250 status = pm_runtime_get(dmadev->ddev.dev); 251 if (status < 0) 252 tasklet_schedule(&dmadev->task); 253 else 254 hidma_ll_start(dmadev->lldev); 255 } 256 257 static inline bool hidma_txn_is_success(dma_cookie_t cookie, 258 dma_cookie_t last_success, dma_cookie_t last_used) 259 { 260 if (last_success <= last_used) { 261 if ((cookie <= last_success) || (cookie > last_used)) 262 return true; 263 } else { 264 if ((cookie <= last_success) && (cookie > last_used)) 265 return true; 266 } 267 return false; 268 } 269 270 static enum dma_status hidma_tx_status(struct dma_chan *dmach, 271 dma_cookie_t cookie, 272 struct dma_tx_state *txstate) 273 { 274 struct hidma_chan *mchan = to_hidma_chan(dmach); 275 enum dma_status ret; 276 277 ret = dma_cookie_status(dmach, cookie, txstate); 278 if (ret == DMA_COMPLETE) { 279 bool is_success; 280 281 is_success = hidma_txn_is_success(cookie, mchan->last_success, 282 dmach->cookie); 283 return is_success ? ret : DMA_ERROR; 284 } 285 286 if (mchan->paused && (ret == DMA_IN_PROGRESS)) { 287 unsigned long flags; 288 dma_cookie_t runcookie; 289 290 spin_lock_irqsave(&mchan->lock, flags); 291 if (mchan->running) 292 runcookie = mchan->running->desc.cookie; 293 else 294 runcookie = -EINVAL; 295 296 if (runcookie == cookie) 297 ret = DMA_PAUSED; 298 299 spin_unlock_irqrestore(&mchan->lock, flags); 300 } 301 302 return ret; 303 } 304 305 /* 306 * Submit descriptor to hardware. 307 * Lock the PM for each descriptor we are sending. 308 */ 309 static dma_cookie_t hidma_tx_submit(struct dma_async_tx_descriptor *txd) 310 { 311 struct hidma_chan *mchan = to_hidma_chan(txd->chan); 312 struct hidma_dev *dmadev = mchan->dmadev; 313 struct hidma_desc *mdesc; 314 unsigned long irqflags; 315 dma_cookie_t cookie; 316 317 pm_runtime_get_sync(dmadev->ddev.dev); 318 if (!hidma_ll_isenabled(dmadev->lldev)) { 319 pm_runtime_mark_last_busy(dmadev->ddev.dev); 320 pm_runtime_put_autosuspend(dmadev->ddev.dev); 321 return -ENODEV; 322 } 323 pm_runtime_mark_last_busy(dmadev->ddev.dev); 324 pm_runtime_put_autosuspend(dmadev->ddev.dev); 325 326 mdesc = container_of(txd, struct hidma_desc, desc); 327 spin_lock_irqsave(&mchan->lock, irqflags); 328 329 /* Move descriptor to queued */ 330 list_move_tail(&mdesc->node, &mchan->queued); 331 332 /* Update cookie */ 333 cookie = dma_cookie_assign(txd); 334 335 spin_unlock_irqrestore(&mchan->lock, irqflags); 336 337 return cookie; 338 } 339 340 static int hidma_alloc_chan_resources(struct dma_chan *dmach) 341 { 342 struct hidma_chan *mchan = to_hidma_chan(dmach); 343 struct hidma_dev *dmadev = mchan->dmadev; 344 struct hidma_desc *mdesc, *tmp; 345 unsigned long irqflags; 346 LIST_HEAD(descs); 347 unsigned int i; 348 int rc = 0; 349 350 if (mchan->allocated) 351 return 0; 352 353 /* Alloc descriptors for this channel */ 354 for (i = 0; i < dmadev->nr_descriptors; i++) { 355 mdesc = kzalloc(sizeof(struct hidma_desc), GFP_NOWAIT); 356 if (!mdesc) { 357 rc = -ENOMEM; 358 break; 359 } 360 dma_async_tx_descriptor_init(&mdesc->desc, dmach); 361 mdesc->desc.tx_submit = hidma_tx_submit; 362 363 rc = hidma_ll_request(dmadev->lldev, mchan->dma_sig, 364 "DMA engine", hidma_callback, mdesc, 365 &mdesc->tre_ch); 366 if (rc) { 367 dev_err(dmach->device->dev, 368 "channel alloc failed at %u\n", i); 369 kfree(mdesc); 370 break; 371 } 372 list_add_tail(&mdesc->node, &descs); 373 } 374 375 if (rc) { 376 /* return the allocated descriptors */ 377 list_for_each_entry_safe(mdesc, tmp, &descs, node) { 378 hidma_ll_free(dmadev->lldev, mdesc->tre_ch); 379 kfree(mdesc); 380 } 381 return rc; 382 } 383 384 spin_lock_irqsave(&mchan->lock, irqflags); 385 list_splice_tail_init(&descs, &mchan->free); 386 mchan->allocated = true; 387 spin_unlock_irqrestore(&mchan->lock, irqflags); 388 return 1; 389 } 390 391 static struct dma_async_tx_descriptor * 392 hidma_prep_dma_memcpy(struct dma_chan *dmach, dma_addr_t dest, dma_addr_t src, 393 size_t len, unsigned long flags) 394 { 395 struct hidma_chan *mchan = to_hidma_chan(dmach); 396 struct hidma_desc *mdesc = NULL; 397 struct hidma_dev *mdma = mchan->dmadev; 398 unsigned long irqflags; 399 400 /* Get free descriptor */ 401 spin_lock_irqsave(&mchan->lock, irqflags); 402 if (!list_empty(&mchan->free)) { 403 mdesc = list_first_entry(&mchan->free, struct hidma_desc, node); 404 list_del(&mdesc->node); 405 } 406 spin_unlock_irqrestore(&mchan->lock, irqflags); 407 408 if (!mdesc) 409 return NULL; 410 411 mdesc->desc.flags = flags; 412 hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch, 413 src, dest, len, flags, 414 HIDMA_TRE_MEMCPY); 415 416 /* Place descriptor in prepared list */ 417 spin_lock_irqsave(&mchan->lock, irqflags); 418 list_add_tail(&mdesc->node, &mchan->prepared); 419 spin_unlock_irqrestore(&mchan->lock, irqflags); 420 421 return &mdesc->desc; 422 } 423 424 static struct dma_async_tx_descriptor * 425 hidma_prep_dma_memset(struct dma_chan *dmach, dma_addr_t dest, int value, 426 size_t len, unsigned long flags) 427 { 428 struct hidma_chan *mchan = to_hidma_chan(dmach); 429 struct hidma_desc *mdesc = NULL; 430 struct hidma_dev *mdma = mchan->dmadev; 431 unsigned long irqflags; 432 u64 byte_pattern, fill_pattern; 433 434 /* Get free descriptor */ 435 spin_lock_irqsave(&mchan->lock, irqflags); 436 if (!list_empty(&mchan->free)) { 437 mdesc = list_first_entry(&mchan->free, struct hidma_desc, node); 438 list_del(&mdesc->node); 439 } 440 spin_unlock_irqrestore(&mchan->lock, irqflags); 441 442 if (!mdesc) 443 return NULL; 444 445 byte_pattern = (char)value; 446 fill_pattern = (byte_pattern << 56) | 447 (byte_pattern << 48) | 448 (byte_pattern << 40) | 449 (byte_pattern << 32) | 450 (byte_pattern << 24) | 451 (byte_pattern << 16) | 452 (byte_pattern << 8) | 453 byte_pattern; 454 455 mdesc->desc.flags = flags; 456 hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch, 457 fill_pattern, dest, len, flags, 458 HIDMA_TRE_MEMSET); 459 460 /* Place descriptor in prepared list */ 461 spin_lock_irqsave(&mchan->lock, irqflags); 462 list_add_tail(&mdesc->node, &mchan->prepared); 463 spin_unlock_irqrestore(&mchan->lock, irqflags); 464 465 return &mdesc->desc; 466 } 467 468 static int hidma_terminate_channel(struct dma_chan *chan) 469 { 470 struct hidma_chan *mchan = to_hidma_chan(chan); 471 struct hidma_dev *dmadev = to_hidma_dev(mchan->chan.device); 472 struct hidma_desc *tmp, *mdesc; 473 unsigned long irqflags; 474 LIST_HEAD(list); 475 int rc; 476 477 pm_runtime_get_sync(dmadev->ddev.dev); 478 /* give completed requests a chance to finish */ 479 hidma_process_completed(mchan); 480 481 spin_lock_irqsave(&mchan->lock, irqflags); 482 mchan->last_success = 0; 483 list_splice_init(&mchan->active, &list); 484 list_splice_init(&mchan->prepared, &list); 485 list_splice_init(&mchan->completed, &list); 486 list_splice_init(&mchan->queued, &list); 487 spin_unlock_irqrestore(&mchan->lock, irqflags); 488 489 /* this suspends the existing transfer */ 490 rc = hidma_ll_disable(dmadev->lldev); 491 if (rc) { 492 dev_err(dmadev->ddev.dev, "channel did not pause\n"); 493 goto out; 494 } 495 496 /* return all user requests */ 497 list_for_each_entry_safe(mdesc, tmp, &list, node) { 498 struct dma_async_tx_descriptor *txd = &mdesc->desc; 499 500 dma_descriptor_unmap(txd); 501 dmaengine_desc_get_callback_invoke(txd, NULL); 502 dma_run_dependencies(txd); 503 504 /* move myself to free_list */ 505 list_move(&mdesc->node, &mchan->free); 506 } 507 508 rc = hidma_ll_enable(dmadev->lldev); 509 out: 510 pm_runtime_mark_last_busy(dmadev->ddev.dev); 511 pm_runtime_put_autosuspend(dmadev->ddev.dev); 512 return rc; 513 } 514 515 static int hidma_terminate_all(struct dma_chan *chan) 516 { 517 struct hidma_chan *mchan = to_hidma_chan(chan); 518 struct hidma_dev *dmadev = to_hidma_dev(mchan->chan.device); 519 int rc; 520 521 rc = hidma_terminate_channel(chan); 522 if (rc) 523 return rc; 524 525 /* reinitialize the hardware */ 526 pm_runtime_get_sync(dmadev->ddev.dev); 527 rc = hidma_ll_setup(dmadev->lldev); 528 pm_runtime_mark_last_busy(dmadev->ddev.dev); 529 pm_runtime_put_autosuspend(dmadev->ddev.dev); 530 return rc; 531 } 532 533 static void hidma_free_chan_resources(struct dma_chan *dmach) 534 { 535 struct hidma_chan *mchan = to_hidma_chan(dmach); 536 struct hidma_dev *mdma = mchan->dmadev; 537 struct hidma_desc *mdesc, *tmp; 538 unsigned long irqflags; 539 LIST_HEAD(descs); 540 541 /* terminate running transactions and free descriptors */ 542 hidma_terminate_channel(dmach); 543 544 spin_lock_irqsave(&mchan->lock, irqflags); 545 546 /* Move data */ 547 list_splice_tail_init(&mchan->free, &descs); 548 549 /* Free descriptors */ 550 list_for_each_entry_safe(mdesc, tmp, &descs, node) { 551 hidma_ll_free(mdma->lldev, mdesc->tre_ch); 552 list_del(&mdesc->node); 553 kfree(mdesc); 554 } 555 556 mchan->allocated = false; 557 spin_unlock_irqrestore(&mchan->lock, irqflags); 558 } 559 560 static int hidma_pause(struct dma_chan *chan) 561 { 562 struct hidma_chan *mchan; 563 struct hidma_dev *dmadev; 564 565 mchan = to_hidma_chan(chan); 566 dmadev = to_hidma_dev(mchan->chan.device); 567 if (!mchan->paused) { 568 pm_runtime_get_sync(dmadev->ddev.dev); 569 if (hidma_ll_disable(dmadev->lldev)) 570 dev_warn(dmadev->ddev.dev, "channel did not stop\n"); 571 mchan->paused = true; 572 pm_runtime_mark_last_busy(dmadev->ddev.dev); 573 pm_runtime_put_autosuspend(dmadev->ddev.dev); 574 } 575 return 0; 576 } 577 578 static int hidma_resume(struct dma_chan *chan) 579 { 580 struct hidma_chan *mchan; 581 struct hidma_dev *dmadev; 582 int rc = 0; 583 584 mchan = to_hidma_chan(chan); 585 dmadev = to_hidma_dev(mchan->chan.device); 586 if (mchan->paused) { 587 pm_runtime_get_sync(dmadev->ddev.dev); 588 rc = hidma_ll_enable(dmadev->lldev); 589 if (!rc) 590 mchan->paused = false; 591 else 592 dev_err(dmadev->ddev.dev, 593 "failed to resume the channel"); 594 pm_runtime_mark_last_busy(dmadev->ddev.dev); 595 pm_runtime_put_autosuspend(dmadev->ddev.dev); 596 } 597 return rc; 598 } 599 600 static irqreturn_t hidma_chirq_handler(int chirq, void *arg) 601 { 602 struct hidma_lldev *lldev = arg; 603 604 /* 605 * All interrupts are request driven. 606 * HW doesn't send an interrupt by itself. 607 */ 608 return hidma_ll_inthandler(chirq, lldev); 609 } 610 611 #ifdef CONFIG_GENERIC_MSI_IRQ 612 static irqreturn_t hidma_chirq_handler_msi(int chirq, void *arg) 613 { 614 struct hidma_lldev **lldevp = arg; 615 struct hidma_dev *dmadev = to_hidma_dev_from_lldev(lldevp); 616 617 return hidma_ll_inthandler_msi(chirq, *lldevp, 618 1 << (chirq - dmadev->msi_virqbase)); 619 } 620 #endif 621 622 static ssize_t hidma_show_values(struct device *dev, 623 struct device_attribute *attr, char *buf) 624 { 625 struct hidma_dev *mdev = dev_get_drvdata(dev); 626 627 buf[0] = 0; 628 629 if (strcmp(attr->attr.name, "chid") == 0) 630 sprintf(buf, "%d\n", mdev->chidx); 631 632 return strlen(buf); 633 } 634 635 static inline void hidma_sysfs_uninit(struct hidma_dev *dev) 636 { 637 device_remove_file(dev->ddev.dev, dev->chid_attrs); 638 } 639 640 static struct device_attribute* 641 hidma_create_sysfs_entry(struct hidma_dev *dev, char *name, int mode) 642 { 643 struct device_attribute *attrs; 644 char *name_copy; 645 646 attrs = devm_kmalloc(dev->ddev.dev, sizeof(struct device_attribute), 647 GFP_KERNEL); 648 if (!attrs) 649 return NULL; 650 651 name_copy = devm_kstrdup(dev->ddev.dev, name, GFP_KERNEL); 652 if (!name_copy) 653 return NULL; 654 655 attrs->attr.name = name_copy; 656 attrs->attr.mode = mode; 657 attrs->show = hidma_show_values; 658 sysfs_attr_init(&attrs->attr); 659 660 return attrs; 661 } 662 663 static int hidma_sysfs_init(struct hidma_dev *dev) 664 { 665 dev->chid_attrs = hidma_create_sysfs_entry(dev, "chid", S_IRUGO); 666 if (!dev->chid_attrs) 667 return -ENOMEM; 668 669 return device_create_file(dev->ddev.dev, dev->chid_attrs); 670 } 671 672 #ifdef CONFIG_GENERIC_MSI_IRQ 673 static void hidma_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg) 674 { 675 struct device *dev = msi_desc_to_dev(desc); 676 struct hidma_dev *dmadev = dev_get_drvdata(dev); 677 678 if (!desc->msi_index) { 679 writel(msg->address_lo, dmadev->dev_evca + 0x118); 680 writel(msg->address_hi, dmadev->dev_evca + 0x11C); 681 writel(msg->data, dmadev->dev_evca + 0x120); 682 } 683 } 684 #endif 685 686 static void hidma_free_msis(struct hidma_dev *dmadev) 687 { 688 #ifdef CONFIG_GENERIC_MSI_IRQ 689 struct device *dev = dmadev->ddev.dev; 690 int i, virq; 691 692 for (i = 0; i < HIDMA_MSI_INTS; i++) { 693 virq = msi_get_virq(dev, i); 694 if (virq) 695 devm_free_irq(dev, virq, &dmadev->lldev); 696 } 697 698 platform_device_msi_free_irqs_all(dev); 699 #endif 700 } 701 702 static int hidma_request_msi(struct hidma_dev *dmadev, 703 struct platform_device *pdev) 704 { 705 #ifdef CONFIG_GENERIC_MSI_IRQ 706 int rc, i, virq; 707 708 rc = platform_device_msi_init_and_alloc_irqs(&pdev->dev, HIDMA_MSI_INTS, 709 hidma_write_msi_msg); 710 if (rc) 711 return rc; 712 713 for (i = 0; i < HIDMA_MSI_INTS; i++) { 714 virq = msi_get_virq(&pdev->dev, i); 715 rc = devm_request_irq(&pdev->dev, virq, 716 hidma_chirq_handler_msi, 717 0, "qcom-hidma-msi", 718 &dmadev->lldev); 719 if (rc) 720 break; 721 if (!i) 722 dmadev->msi_virqbase = virq; 723 } 724 725 if (rc) { 726 /* free allocated MSI interrupts above */ 727 for (--i; i >= 0; i--) { 728 virq = msi_get_virq(&pdev->dev, i); 729 devm_free_irq(&pdev->dev, virq, &dmadev->lldev); 730 } 731 dev_warn(&pdev->dev, 732 "failed to request MSI irq, falling back to wired IRQ\n"); 733 } else { 734 /* Add callback to free MSIs on teardown */ 735 hidma_ll_setup_irq(dmadev->lldev, true); 736 } 737 return rc; 738 #else 739 return -EINVAL; 740 #endif 741 } 742 743 static bool hidma_test_capability(struct device *dev, enum hidma_cap test_cap) 744 { 745 enum hidma_cap cap; 746 747 cap = (uintptr_t) device_get_match_data(dev); 748 return cap ? ((cap & test_cap) > 0) : 0; 749 } 750 751 static int hidma_probe(struct platform_device *pdev) 752 { 753 struct hidma_dev *dmadev; 754 struct resource *trca_resource; 755 struct resource *evca_resource; 756 int chirq; 757 void __iomem *evca; 758 void __iomem *trca; 759 int rc; 760 bool msi; 761 762 pm_runtime_set_autosuspend_delay(&pdev->dev, HIDMA_AUTOSUSPEND_TIMEOUT); 763 pm_runtime_use_autosuspend(&pdev->dev); 764 pm_runtime_set_active(&pdev->dev); 765 pm_runtime_enable(&pdev->dev); 766 767 trca = devm_platform_get_and_ioremap_resource(pdev, 0, &trca_resource); 768 if (IS_ERR(trca)) { 769 rc = PTR_ERR(trca); 770 goto bailout; 771 } 772 773 evca = devm_platform_get_and_ioremap_resource(pdev, 1, &evca_resource); 774 if (IS_ERR(evca)) { 775 rc = PTR_ERR(evca); 776 goto bailout; 777 } 778 779 /* 780 * This driver only handles the channel IRQs. 781 * Common IRQ is handled by the management driver. 782 */ 783 chirq = platform_get_irq(pdev, 0); 784 if (chirq < 0) { 785 rc = chirq; 786 goto bailout; 787 } 788 789 dmadev = devm_kzalloc(&pdev->dev, sizeof(*dmadev), GFP_KERNEL); 790 if (!dmadev) { 791 rc = -ENOMEM; 792 goto bailout; 793 } 794 795 INIT_LIST_HEAD(&dmadev->ddev.channels); 796 spin_lock_init(&dmadev->lock); 797 dmadev->ddev.dev = &pdev->dev; 798 pm_runtime_get_sync(dmadev->ddev.dev); 799 800 dma_cap_set(DMA_MEMCPY, dmadev->ddev.cap_mask); 801 dma_cap_set(DMA_MEMSET, dmadev->ddev.cap_mask); 802 if (WARN_ON(!pdev->dev.dma_mask)) { 803 rc = -ENXIO; 804 goto dmafree; 805 } 806 807 dmadev->dev_evca = evca; 808 dmadev->evca_resource = evca_resource; 809 dmadev->dev_trca = trca; 810 dmadev->trca_resource = trca_resource; 811 dmadev->ddev.device_prep_dma_memcpy = hidma_prep_dma_memcpy; 812 dmadev->ddev.device_prep_dma_memset = hidma_prep_dma_memset; 813 dmadev->ddev.device_alloc_chan_resources = hidma_alloc_chan_resources; 814 dmadev->ddev.device_free_chan_resources = hidma_free_chan_resources; 815 dmadev->ddev.device_tx_status = hidma_tx_status; 816 dmadev->ddev.device_issue_pending = hidma_issue_pending; 817 dmadev->ddev.device_pause = hidma_pause; 818 dmadev->ddev.device_resume = hidma_resume; 819 dmadev->ddev.device_terminate_all = hidma_terminate_all; 820 dmadev->ddev.copy_align = 8; 821 822 /* 823 * Determine the MSI capability of the platform. Old HW doesn't 824 * support MSI. 825 */ 826 msi = hidma_test_capability(&pdev->dev, HIDMA_MSI_CAP); 827 device_property_read_u32(&pdev->dev, "desc-count", 828 &dmadev->nr_descriptors); 829 830 if (nr_desc_prm) { 831 dev_info(&pdev->dev, "overriding number of descriptors as %d\n", 832 nr_desc_prm); 833 dmadev->nr_descriptors = nr_desc_prm; 834 } 835 836 if (!dmadev->nr_descriptors) 837 dmadev->nr_descriptors = HIDMA_NR_DEFAULT_DESC; 838 839 if (hidma_test_capability(&pdev->dev, HIDMA_IDENTITY_CAP)) 840 dmadev->chidx = readl(dmadev->dev_trca + 0x40); 841 else 842 dmadev->chidx = readl(dmadev->dev_trca + 0x28); 843 844 /* Set DMA mask to 64 bits. */ 845 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 846 if (rc) { 847 dev_warn(&pdev->dev, "unable to set coherent mask to 64"); 848 goto dmafree; 849 } 850 851 dmadev->lldev = hidma_ll_init(dmadev->ddev.dev, 852 dmadev->nr_descriptors, dmadev->dev_trca, 853 dmadev->dev_evca, dmadev->chidx); 854 if (!dmadev->lldev) { 855 rc = -EPROBE_DEFER; 856 goto dmafree; 857 } 858 859 platform_set_drvdata(pdev, dmadev); 860 if (msi) 861 rc = hidma_request_msi(dmadev, pdev); 862 863 if (!msi || rc) { 864 hidma_ll_setup_irq(dmadev->lldev, false); 865 rc = devm_request_irq(&pdev->dev, chirq, hidma_chirq_handler, 866 0, "qcom-hidma", dmadev->lldev); 867 if (rc) 868 goto uninit; 869 } 870 871 INIT_LIST_HEAD(&dmadev->ddev.channels); 872 rc = hidma_chan_init(dmadev, 0); 873 if (rc) 874 goto uninit; 875 876 rc = dma_async_device_register(&dmadev->ddev); 877 if (rc) 878 goto uninit; 879 880 dmadev->irq = chirq; 881 tasklet_setup(&dmadev->task, hidma_issue_task); 882 hidma_debug_init(dmadev); 883 hidma_sysfs_init(dmadev); 884 dev_info(&pdev->dev, "HI-DMA engine driver registration complete\n"); 885 pm_runtime_mark_last_busy(dmadev->ddev.dev); 886 pm_runtime_put_autosuspend(dmadev->ddev.dev); 887 return 0; 888 889 uninit: 890 if (msi) 891 hidma_free_msis(dmadev); 892 893 hidma_ll_uninit(dmadev->lldev); 894 dmafree: 895 if (dmadev) 896 hidma_free(dmadev); 897 bailout: 898 pm_runtime_put_sync(&pdev->dev); 899 pm_runtime_disable(&pdev->dev); 900 return rc; 901 } 902 903 static void hidma_shutdown(struct platform_device *pdev) 904 { 905 struct hidma_dev *dmadev = platform_get_drvdata(pdev); 906 907 dev_info(dmadev->ddev.dev, "HI-DMA engine shutdown\n"); 908 909 pm_runtime_get_sync(dmadev->ddev.dev); 910 if (hidma_ll_disable(dmadev->lldev)) 911 dev_warn(dmadev->ddev.dev, "channel did not stop\n"); 912 pm_runtime_mark_last_busy(dmadev->ddev.dev); 913 pm_runtime_put_autosuspend(dmadev->ddev.dev); 914 915 } 916 917 static void hidma_remove(struct platform_device *pdev) 918 { 919 struct hidma_dev *dmadev = platform_get_drvdata(pdev); 920 921 pm_runtime_get_sync(dmadev->ddev.dev); 922 dma_async_device_unregister(&dmadev->ddev); 923 if (!dmadev->lldev->msi_support) 924 devm_free_irq(dmadev->ddev.dev, dmadev->irq, dmadev->lldev); 925 else 926 hidma_free_msis(dmadev); 927 928 tasklet_kill(&dmadev->task); 929 hidma_sysfs_uninit(dmadev); 930 hidma_debug_uninit(dmadev); 931 hidma_ll_uninit(dmadev->lldev); 932 hidma_free(dmadev); 933 934 dev_info(&pdev->dev, "HI-DMA engine removed\n"); 935 pm_runtime_put_sync_suspend(&pdev->dev); 936 pm_runtime_disable(&pdev->dev); 937 } 938 939 #if IS_ENABLED(CONFIG_ACPI) 940 static const struct acpi_device_id hidma_acpi_ids[] = { 941 {"QCOM8061"}, 942 {"QCOM8062", HIDMA_MSI_CAP}, 943 {"QCOM8063", (HIDMA_MSI_CAP | HIDMA_IDENTITY_CAP)}, 944 {}, 945 }; 946 MODULE_DEVICE_TABLE(acpi, hidma_acpi_ids); 947 #endif 948 949 static struct platform_driver hidma_driver = { 950 .probe = hidma_probe, 951 .remove_new = hidma_remove, 952 .shutdown = hidma_shutdown, 953 .driver = { 954 .name = "hidma", 955 .acpi_match_table = ACPI_PTR(hidma_acpi_ids), 956 }, 957 }; 958 959 module_platform_driver(hidma_driver); 960 MODULE_DESCRIPTION("Qualcomm Technologies HIDMA Channel support"); 961 MODULE_LICENSE("GPL v2"); 962