1 /* 2 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License as published by the Free 6 * Software Foundation; either version 2 of the License, or (at your option) 7 * any later version. 8 * 9 * This program is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 * 14 * The full GNU General Public License is included in this distribution in the 15 * file called COPYING. 16 */ 17 18 /* 19 * This code implements the DMA subsystem. It provides a HW-neutral interface 20 * for other kernel code to use asynchronous memory copy capabilities, 21 * if present, and allows different HW DMA drivers to register as providing 22 * this capability. 23 * 24 * Due to the fact we are accelerating what is already a relatively fast 25 * operation, the code goes to great lengths to avoid additional overhead, 26 * such as locking. 27 * 28 * LOCKING: 29 * 30 * The subsystem keeps a global list of dma_device structs it is protected by a 31 * mutex, dma_list_mutex. 32 * 33 * A subsystem can get access to a channel by calling dmaengine_get() followed 34 * by dma_find_channel(), or if it has need for an exclusive channel it can call 35 * dma_request_channel(). Once a channel is allocated a reference is taken 36 * against its corresponding driver to disable removal. 37 * 38 * Each device has a channels list, which runs unlocked but is never modified 39 * once the device is registered, it's just setup by the driver. 40 * 41 * See Documentation/driver-api/dmaengine for more details 42 */ 43 44 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 45 46 #include <linux/platform_device.h> 47 #include <linux/dma-mapping.h> 48 #include <linux/init.h> 49 #include <linux/module.h> 50 #include <linux/mm.h> 51 #include <linux/device.h> 52 #include <linux/dmaengine.h> 53 #include <linux/hardirq.h> 54 #include <linux/spinlock.h> 55 #include <linux/percpu.h> 56 #include <linux/rcupdate.h> 57 #include <linux/mutex.h> 58 #include <linux/jiffies.h> 59 #include <linux/rculist.h> 60 #include <linux/idr.h> 61 #include <linux/slab.h> 62 #include <linux/acpi.h> 63 #include <linux/acpi_dma.h> 64 #include <linux/of_dma.h> 65 #include <linux/mempool.h> 66 #include <linux/numa.h> 67 68 static DEFINE_MUTEX(dma_list_mutex); 69 static DEFINE_IDA(dma_ida); 70 static LIST_HEAD(dma_device_list); 71 static long dmaengine_ref_count; 72 73 /* --- sysfs implementation --- */ 74 75 /** 76 * dev_to_dma_chan - convert a device pointer to its sysfs container object 77 * @dev - device node 78 * 79 * Must be called under dma_list_mutex 80 */ 81 static struct dma_chan *dev_to_dma_chan(struct device *dev) 82 { 83 struct dma_chan_dev *chan_dev; 84 85 chan_dev = container_of(dev, typeof(*chan_dev), device); 86 return chan_dev->chan; 87 } 88 89 static ssize_t memcpy_count_show(struct device *dev, 90 struct device_attribute *attr, char *buf) 91 { 92 struct dma_chan *chan; 93 unsigned long count = 0; 94 int i; 95 int err; 96 97 mutex_lock(&dma_list_mutex); 98 chan = dev_to_dma_chan(dev); 99 if (chan) { 100 for_each_possible_cpu(i) 101 count += per_cpu_ptr(chan->local, i)->memcpy_count; 102 err = sprintf(buf, "%lu\n", count); 103 } else 104 err = -ENODEV; 105 mutex_unlock(&dma_list_mutex); 106 107 return err; 108 } 109 static DEVICE_ATTR_RO(memcpy_count); 110 111 static ssize_t bytes_transferred_show(struct device *dev, 112 struct device_attribute *attr, char *buf) 113 { 114 struct dma_chan *chan; 115 unsigned long count = 0; 116 int i; 117 int err; 118 119 mutex_lock(&dma_list_mutex); 120 chan = dev_to_dma_chan(dev); 121 if (chan) { 122 for_each_possible_cpu(i) 123 count += per_cpu_ptr(chan->local, i)->bytes_transferred; 124 err = sprintf(buf, "%lu\n", count); 125 } else 126 err = -ENODEV; 127 mutex_unlock(&dma_list_mutex); 128 129 return err; 130 } 131 static DEVICE_ATTR_RO(bytes_transferred); 132 133 static ssize_t in_use_show(struct device *dev, struct device_attribute *attr, 134 char *buf) 135 { 136 struct dma_chan *chan; 137 int err; 138 139 mutex_lock(&dma_list_mutex); 140 chan = dev_to_dma_chan(dev); 141 if (chan) 142 err = sprintf(buf, "%d\n", chan->client_count); 143 else 144 err = -ENODEV; 145 mutex_unlock(&dma_list_mutex); 146 147 return err; 148 } 149 static DEVICE_ATTR_RO(in_use); 150 151 static struct attribute *dma_dev_attrs[] = { 152 &dev_attr_memcpy_count.attr, 153 &dev_attr_bytes_transferred.attr, 154 &dev_attr_in_use.attr, 155 NULL, 156 }; 157 ATTRIBUTE_GROUPS(dma_dev); 158 159 static void chan_dev_release(struct device *dev) 160 { 161 struct dma_chan_dev *chan_dev; 162 163 chan_dev = container_of(dev, typeof(*chan_dev), device); 164 if (atomic_dec_and_test(chan_dev->idr_ref)) { 165 ida_free(&dma_ida, chan_dev->dev_id); 166 kfree(chan_dev->idr_ref); 167 } 168 kfree(chan_dev); 169 } 170 171 static struct class dma_devclass = { 172 .name = "dma", 173 .dev_groups = dma_dev_groups, 174 .dev_release = chan_dev_release, 175 }; 176 177 /* --- client and device registration --- */ 178 179 #define dma_device_satisfies_mask(device, mask) \ 180 __dma_device_satisfies_mask((device), &(mask)) 181 static int 182 __dma_device_satisfies_mask(struct dma_device *device, 183 const dma_cap_mask_t *want) 184 { 185 dma_cap_mask_t has; 186 187 bitmap_and(has.bits, want->bits, device->cap_mask.bits, 188 DMA_TX_TYPE_END); 189 return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END); 190 } 191 192 static struct module *dma_chan_to_owner(struct dma_chan *chan) 193 { 194 return chan->device->dev->driver->owner; 195 } 196 197 /** 198 * balance_ref_count - catch up the channel reference count 199 * @chan - channel to balance ->client_count versus dmaengine_ref_count 200 * 201 * balance_ref_count must be called under dma_list_mutex 202 */ 203 static void balance_ref_count(struct dma_chan *chan) 204 { 205 struct module *owner = dma_chan_to_owner(chan); 206 207 while (chan->client_count < dmaengine_ref_count) { 208 __module_get(owner); 209 chan->client_count++; 210 } 211 } 212 213 /** 214 * dma_chan_get - try to grab a dma channel's parent driver module 215 * @chan - channel to grab 216 * 217 * Must be called under dma_list_mutex 218 */ 219 static int dma_chan_get(struct dma_chan *chan) 220 { 221 struct module *owner = dma_chan_to_owner(chan); 222 int ret; 223 224 /* The channel is already in use, update client count */ 225 if (chan->client_count) { 226 __module_get(owner); 227 goto out; 228 } 229 230 if (!try_module_get(owner)) 231 return -ENODEV; 232 233 /* allocate upon first client reference */ 234 if (chan->device->device_alloc_chan_resources) { 235 ret = chan->device->device_alloc_chan_resources(chan); 236 if (ret < 0) 237 goto err_out; 238 } 239 240 if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask)) 241 balance_ref_count(chan); 242 243 out: 244 chan->client_count++; 245 return 0; 246 247 err_out: 248 module_put(owner); 249 return ret; 250 } 251 252 /** 253 * dma_chan_put - drop a reference to a dma channel's parent driver module 254 * @chan - channel to release 255 * 256 * Must be called under dma_list_mutex 257 */ 258 static void dma_chan_put(struct dma_chan *chan) 259 { 260 /* This channel is not in use, bail out */ 261 if (!chan->client_count) 262 return; 263 264 chan->client_count--; 265 module_put(dma_chan_to_owner(chan)); 266 267 /* This channel is not in use anymore, free it */ 268 if (!chan->client_count && chan->device->device_free_chan_resources) { 269 /* Make sure all operations have completed */ 270 dmaengine_synchronize(chan); 271 chan->device->device_free_chan_resources(chan); 272 } 273 274 /* If the channel is used via a DMA request router, free the mapping */ 275 if (chan->router && chan->router->route_free) { 276 chan->router->route_free(chan->router->dev, chan->route_data); 277 chan->router = NULL; 278 chan->route_data = NULL; 279 } 280 } 281 282 enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie) 283 { 284 enum dma_status status; 285 unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000); 286 287 dma_async_issue_pending(chan); 288 do { 289 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); 290 if (time_after_eq(jiffies, dma_sync_wait_timeout)) { 291 dev_err(chan->device->dev, "%s: timeout!\n", __func__); 292 return DMA_ERROR; 293 } 294 if (status != DMA_IN_PROGRESS) 295 break; 296 cpu_relax(); 297 } while (1); 298 299 return status; 300 } 301 EXPORT_SYMBOL(dma_sync_wait); 302 303 /** 304 * dma_cap_mask_all - enable iteration over all operation types 305 */ 306 static dma_cap_mask_t dma_cap_mask_all; 307 308 /** 309 * dma_chan_tbl_ent - tracks channel allocations per core/operation 310 * @chan - associated channel for this entry 311 */ 312 struct dma_chan_tbl_ent { 313 struct dma_chan *chan; 314 }; 315 316 /** 317 * channel_table - percpu lookup table for memory-to-memory offload providers 318 */ 319 static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END]; 320 321 static int __init dma_channel_table_init(void) 322 { 323 enum dma_transaction_type cap; 324 int err = 0; 325 326 bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END); 327 328 /* 'interrupt', 'private', and 'slave' are channel capabilities, 329 * but are not associated with an operation so they do not need 330 * an entry in the channel_table 331 */ 332 clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits); 333 clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits); 334 clear_bit(DMA_SLAVE, dma_cap_mask_all.bits); 335 336 for_each_dma_cap_mask(cap, dma_cap_mask_all) { 337 channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent); 338 if (!channel_table[cap]) { 339 err = -ENOMEM; 340 break; 341 } 342 } 343 344 if (err) { 345 pr_err("initialization failure\n"); 346 for_each_dma_cap_mask(cap, dma_cap_mask_all) 347 free_percpu(channel_table[cap]); 348 } 349 350 return err; 351 } 352 arch_initcall(dma_channel_table_init); 353 354 /** 355 * dma_find_channel - find a channel to carry out the operation 356 * @tx_type: transaction type 357 */ 358 struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type) 359 { 360 return this_cpu_read(channel_table[tx_type]->chan); 361 } 362 EXPORT_SYMBOL(dma_find_channel); 363 364 /** 365 * dma_issue_pending_all - flush all pending operations across all channels 366 */ 367 void dma_issue_pending_all(void) 368 { 369 struct dma_device *device; 370 struct dma_chan *chan; 371 372 rcu_read_lock(); 373 list_for_each_entry_rcu(device, &dma_device_list, global_node) { 374 if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) 375 continue; 376 list_for_each_entry(chan, &device->channels, device_node) 377 if (chan->client_count) 378 device->device_issue_pending(chan); 379 } 380 rcu_read_unlock(); 381 } 382 EXPORT_SYMBOL(dma_issue_pending_all); 383 384 /** 385 * dma_chan_is_local - returns true if the channel is in the same numa-node as the cpu 386 */ 387 static bool dma_chan_is_local(struct dma_chan *chan, int cpu) 388 { 389 int node = dev_to_node(chan->device->dev); 390 return node == NUMA_NO_NODE || 391 cpumask_test_cpu(cpu, cpumask_of_node(node)); 392 } 393 394 /** 395 * min_chan - returns the channel with min count and in the same numa-node as the cpu 396 * @cap: capability to match 397 * @cpu: cpu index which the channel should be close to 398 * 399 * If some channels are close to the given cpu, the one with the lowest 400 * reference count is returned. Otherwise, cpu is ignored and only the 401 * reference count is taken into account. 402 * Must be called under dma_list_mutex. 403 */ 404 static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu) 405 { 406 struct dma_device *device; 407 struct dma_chan *chan; 408 struct dma_chan *min = NULL; 409 struct dma_chan *localmin = NULL; 410 411 list_for_each_entry(device, &dma_device_list, global_node) { 412 if (!dma_has_cap(cap, device->cap_mask) || 413 dma_has_cap(DMA_PRIVATE, device->cap_mask)) 414 continue; 415 list_for_each_entry(chan, &device->channels, device_node) { 416 if (!chan->client_count) 417 continue; 418 if (!min || chan->table_count < min->table_count) 419 min = chan; 420 421 if (dma_chan_is_local(chan, cpu)) 422 if (!localmin || 423 chan->table_count < localmin->table_count) 424 localmin = chan; 425 } 426 } 427 428 chan = localmin ? localmin : min; 429 430 if (chan) 431 chan->table_count++; 432 433 return chan; 434 } 435 436 /** 437 * dma_channel_rebalance - redistribute the available channels 438 * 439 * Optimize for cpu isolation (each cpu gets a dedicated channel for an 440 * operation type) in the SMP case, and operation isolation (avoid 441 * multi-tasking channels) in the non-SMP case. Must be called under 442 * dma_list_mutex. 443 */ 444 static void dma_channel_rebalance(void) 445 { 446 struct dma_chan *chan; 447 struct dma_device *device; 448 int cpu; 449 int cap; 450 451 /* undo the last distribution */ 452 for_each_dma_cap_mask(cap, dma_cap_mask_all) 453 for_each_possible_cpu(cpu) 454 per_cpu_ptr(channel_table[cap], cpu)->chan = NULL; 455 456 list_for_each_entry(device, &dma_device_list, global_node) { 457 if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) 458 continue; 459 list_for_each_entry(chan, &device->channels, device_node) 460 chan->table_count = 0; 461 } 462 463 /* don't populate the channel_table if no clients are available */ 464 if (!dmaengine_ref_count) 465 return; 466 467 /* redistribute available channels */ 468 for_each_dma_cap_mask(cap, dma_cap_mask_all) 469 for_each_online_cpu(cpu) { 470 chan = min_chan(cap, cpu); 471 per_cpu_ptr(channel_table[cap], cpu)->chan = chan; 472 } 473 } 474 475 int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps) 476 { 477 struct dma_device *device; 478 479 if (!chan || !caps) 480 return -EINVAL; 481 482 device = chan->device; 483 484 /* check if the channel supports slave transactions */ 485 if (!(test_bit(DMA_SLAVE, device->cap_mask.bits) || 486 test_bit(DMA_CYCLIC, device->cap_mask.bits))) 487 return -ENXIO; 488 489 /* 490 * Check whether it reports it uses the generic slave 491 * capabilities, if not, that means it doesn't support any 492 * kind of slave capabilities reporting. 493 */ 494 if (!device->directions) 495 return -ENXIO; 496 497 caps->src_addr_widths = device->src_addr_widths; 498 caps->dst_addr_widths = device->dst_addr_widths; 499 caps->directions = device->directions; 500 caps->max_burst = device->max_burst; 501 caps->residue_granularity = device->residue_granularity; 502 caps->descriptor_reuse = device->descriptor_reuse; 503 caps->cmd_pause = !!device->device_pause; 504 caps->cmd_resume = !!device->device_resume; 505 caps->cmd_terminate = !!device->device_terminate_all; 506 507 return 0; 508 } 509 EXPORT_SYMBOL_GPL(dma_get_slave_caps); 510 511 static struct dma_chan *private_candidate(const dma_cap_mask_t *mask, 512 struct dma_device *dev, 513 dma_filter_fn fn, void *fn_param) 514 { 515 struct dma_chan *chan; 516 517 if (mask && !__dma_device_satisfies_mask(dev, mask)) { 518 dev_dbg(dev->dev, "%s: wrong capabilities\n", __func__); 519 return NULL; 520 } 521 /* devices with multiple channels need special handling as we need to 522 * ensure that all channels are either private or public. 523 */ 524 if (dev->chancnt > 1 && !dma_has_cap(DMA_PRIVATE, dev->cap_mask)) 525 list_for_each_entry(chan, &dev->channels, device_node) { 526 /* some channels are already publicly allocated */ 527 if (chan->client_count) 528 return NULL; 529 } 530 531 list_for_each_entry(chan, &dev->channels, device_node) { 532 if (chan->client_count) { 533 dev_dbg(dev->dev, "%s: %s busy\n", 534 __func__, dma_chan_name(chan)); 535 continue; 536 } 537 if (fn && !fn(chan, fn_param)) { 538 dev_dbg(dev->dev, "%s: %s filter said false\n", 539 __func__, dma_chan_name(chan)); 540 continue; 541 } 542 return chan; 543 } 544 545 return NULL; 546 } 547 548 static struct dma_chan *find_candidate(struct dma_device *device, 549 const dma_cap_mask_t *mask, 550 dma_filter_fn fn, void *fn_param) 551 { 552 struct dma_chan *chan = private_candidate(mask, device, fn, fn_param); 553 int err; 554 555 if (chan) { 556 /* Found a suitable channel, try to grab, prep, and return it. 557 * We first set DMA_PRIVATE to disable balance_ref_count as this 558 * channel will not be published in the general-purpose 559 * allocator 560 */ 561 dma_cap_set(DMA_PRIVATE, device->cap_mask); 562 device->privatecnt++; 563 err = dma_chan_get(chan); 564 565 if (err) { 566 if (err == -ENODEV) { 567 dev_dbg(device->dev, "%s: %s module removed\n", 568 __func__, dma_chan_name(chan)); 569 list_del_rcu(&device->global_node); 570 } else 571 dev_dbg(device->dev, 572 "%s: failed to get %s: (%d)\n", 573 __func__, dma_chan_name(chan), err); 574 575 if (--device->privatecnt == 0) 576 dma_cap_clear(DMA_PRIVATE, device->cap_mask); 577 578 chan = ERR_PTR(err); 579 } 580 } 581 582 return chan ? chan : ERR_PTR(-EPROBE_DEFER); 583 } 584 585 /** 586 * dma_get_slave_channel - try to get specific channel exclusively 587 * @chan: target channel 588 */ 589 struct dma_chan *dma_get_slave_channel(struct dma_chan *chan) 590 { 591 int err = -EBUSY; 592 593 /* lock against __dma_request_channel */ 594 mutex_lock(&dma_list_mutex); 595 596 if (chan->client_count == 0) { 597 struct dma_device *device = chan->device; 598 599 dma_cap_set(DMA_PRIVATE, device->cap_mask); 600 device->privatecnt++; 601 err = dma_chan_get(chan); 602 if (err) { 603 dev_dbg(chan->device->dev, 604 "%s: failed to get %s: (%d)\n", 605 __func__, dma_chan_name(chan), err); 606 chan = NULL; 607 if (--device->privatecnt == 0) 608 dma_cap_clear(DMA_PRIVATE, device->cap_mask); 609 } 610 } else 611 chan = NULL; 612 613 mutex_unlock(&dma_list_mutex); 614 615 616 return chan; 617 } 618 EXPORT_SYMBOL_GPL(dma_get_slave_channel); 619 620 struct dma_chan *dma_get_any_slave_channel(struct dma_device *device) 621 { 622 dma_cap_mask_t mask; 623 struct dma_chan *chan; 624 625 dma_cap_zero(mask); 626 dma_cap_set(DMA_SLAVE, mask); 627 628 /* lock against __dma_request_channel */ 629 mutex_lock(&dma_list_mutex); 630 631 chan = find_candidate(device, &mask, NULL, NULL); 632 633 mutex_unlock(&dma_list_mutex); 634 635 return IS_ERR(chan) ? NULL : chan; 636 } 637 EXPORT_SYMBOL_GPL(dma_get_any_slave_channel); 638 639 /** 640 * __dma_request_channel - try to allocate an exclusive channel 641 * @mask: capabilities that the channel must satisfy 642 * @fn: optional callback to disposition available channels 643 * @fn_param: opaque parameter to pass to dma_filter_fn 644 * @np: device node to look for DMA channels 645 * 646 * Returns pointer to appropriate DMA channel on success or NULL. 647 */ 648 struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask, 649 dma_filter_fn fn, void *fn_param, 650 struct device_node *np) 651 { 652 struct dma_device *device, *_d; 653 struct dma_chan *chan = NULL; 654 655 /* Find a channel */ 656 mutex_lock(&dma_list_mutex); 657 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) { 658 /* Finds a DMA controller with matching device node */ 659 if (np && device->dev->of_node && np != device->dev->of_node) 660 continue; 661 662 chan = find_candidate(device, mask, fn, fn_param); 663 if (!IS_ERR(chan)) 664 break; 665 666 chan = NULL; 667 } 668 mutex_unlock(&dma_list_mutex); 669 670 pr_debug("%s: %s (%s)\n", 671 __func__, 672 chan ? "success" : "fail", 673 chan ? dma_chan_name(chan) : NULL); 674 675 return chan; 676 } 677 EXPORT_SYMBOL_GPL(__dma_request_channel); 678 679 static const struct dma_slave_map *dma_filter_match(struct dma_device *device, 680 const char *name, 681 struct device *dev) 682 { 683 int i; 684 685 if (!device->filter.mapcnt) 686 return NULL; 687 688 for (i = 0; i < device->filter.mapcnt; i++) { 689 const struct dma_slave_map *map = &device->filter.map[i]; 690 691 if (!strcmp(map->devname, dev_name(dev)) && 692 !strcmp(map->slave, name)) 693 return map; 694 } 695 696 return NULL; 697 } 698 699 /** 700 * dma_request_chan - try to allocate an exclusive slave channel 701 * @dev: pointer to client device structure 702 * @name: slave channel name 703 * 704 * Returns pointer to appropriate DMA channel on success or an error pointer. 705 */ 706 struct dma_chan *dma_request_chan(struct device *dev, const char *name) 707 { 708 struct dma_device *d, *_d; 709 struct dma_chan *chan = NULL; 710 711 /* If device-tree is present get slave info from here */ 712 if (dev->of_node) 713 chan = of_dma_request_slave_channel(dev->of_node, name); 714 715 /* If device was enumerated by ACPI get slave info from here */ 716 if (has_acpi_companion(dev) && !chan) 717 chan = acpi_dma_request_slave_chan_by_name(dev, name); 718 719 if (chan) { 720 /* Valid channel found or requester needs to be deferred */ 721 if (!IS_ERR(chan) || PTR_ERR(chan) == -EPROBE_DEFER) 722 return chan; 723 } 724 725 /* Try to find the channel via the DMA filter map(s) */ 726 mutex_lock(&dma_list_mutex); 727 list_for_each_entry_safe(d, _d, &dma_device_list, global_node) { 728 dma_cap_mask_t mask; 729 const struct dma_slave_map *map = dma_filter_match(d, name, dev); 730 731 if (!map) 732 continue; 733 734 dma_cap_zero(mask); 735 dma_cap_set(DMA_SLAVE, mask); 736 737 chan = find_candidate(d, &mask, d->filter.fn, map->param); 738 if (!IS_ERR(chan)) 739 break; 740 } 741 mutex_unlock(&dma_list_mutex); 742 743 return chan ? chan : ERR_PTR(-EPROBE_DEFER); 744 } 745 EXPORT_SYMBOL_GPL(dma_request_chan); 746 747 /** 748 * dma_request_slave_channel - try to allocate an exclusive slave channel 749 * @dev: pointer to client device structure 750 * @name: slave channel name 751 * 752 * Returns pointer to appropriate DMA channel on success or NULL. 753 */ 754 struct dma_chan *dma_request_slave_channel(struct device *dev, 755 const char *name) 756 { 757 struct dma_chan *ch = dma_request_chan(dev, name); 758 if (IS_ERR(ch)) 759 return NULL; 760 761 return ch; 762 } 763 EXPORT_SYMBOL_GPL(dma_request_slave_channel); 764 765 /** 766 * dma_request_chan_by_mask - allocate a channel satisfying certain capabilities 767 * @mask: capabilities that the channel must satisfy 768 * 769 * Returns pointer to appropriate DMA channel on success or an error pointer. 770 */ 771 struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask) 772 { 773 struct dma_chan *chan; 774 775 if (!mask) 776 return ERR_PTR(-ENODEV); 777 778 chan = __dma_request_channel(mask, NULL, NULL, NULL); 779 if (!chan) { 780 mutex_lock(&dma_list_mutex); 781 if (list_empty(&dma_device_list)) 782 chan = ERR_PTR(-EPROBE_DEFER); 783 else 784 chan = ERR_PTR(-ENODEV); 785 mutex_unlock(&dma_list_mutex); 786 } 787 788 return chan; 789 } 790 EXPORT_SYMBOL_GPL(dma_request_chan_by_mask); 791 792 void dma_release_channel(struct dma_chan *chan) 793 { 794 mutex_lock(&dma_list_mutex); 795 WARN_ONCE(chan->client_count != 1, 796 "chan reference count %d != 1\n", chan->client_count); 797 dma_chan_put(chan); 798 /* drop PRIVATE cap enabled by __dma_request_channel() */ 799 if (--chan->device->privatecnt == 0) 800 dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask); 801 mutex_unlock(&dma_list_mutex); 802 } 803 EXPORT_SYMBOL_GPL(dma_release_channel); 804 805 /** 806 * dmaengine_get - register interest in dma_channels 807 */ 808 void dmaengine_get(void) 809 { 810 struct dma_device *device, *_d; 811 struct dma_chan *chan; 812 int err; 813 814 mutex_lock(&dma_list_mutex); 815 dmaengine_ref_count++; 816 817 /* try to grab channels */ 818 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) { 819 if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) 820 continue; 821 list_for_each_entry(chan, &device->channels, device_node) { 822 err = dma_chan_get(chan); 823 if (err == -ENODEV) { 824 /* module removed before we could use it */ 825 list_del_rcu(&device->global_node); 826 break; 827 } else if (err) 828 dev_dbg(chan->device->dev, 829 "%s: failed to get %s: (%d)\n", 830 __func__, dma_chan_name(chan), err); 831 } 832 } 833 834 /* if this is the first reference and there were channels 835 * waiting we need to rebalance to get those channels 836 * incorporated into the channel table 837 */ 838 if (dmaengine_ref_count == 1) 839 dma_channel_rebalance(); 840 mutex_unlock(&dma_list_mutex); 841 } 842 EXPORT_SYMBOL(dmaengine_get); 843 844 /** 845 * dmaengine_put - let dma drivers be removed when ref_count == 0 846 */ 847 void dmaengine_put(void) 848 { 849 struct dma_device *device; 850 struct dma_chan *chan; 851 852 mutex_lock(&dma_list_mutex); 853 dmaengine_ref_count--; 854 BUG_ON(dmaengine_ref_count < 0); 855 /* drop channel references */ 856 list_for_each_entry(device, &dma_device_list, global_node) { 857 if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) 858 continue; 859 list_for_each_entry(chan, &device->channels, device_node) 860 dma_chan_put(chan); 861 } 862 mutex_unlock(&dma_list_mutex); 863 } 864 EXPORT_SYMBOL(dmaengine_put); 865 866 static bool device_has_all_tx_types(struct dma_device *device) 867 { 868 /* A device that satisfies this test has channels that will never cause 869 * an async_tx channel switch event as all possible operation types can 870 * be handled. 871 */ 872 #ifdef CONFIG_ASYNC_TX_DMA 873 if (!dma_has_cap(DMA_INTERRUPT, device->cap_mask)) 874 return false; 875 #endif 876 877 #if IS_ENABLED(CONFIG_ASYNC_MEMCPY) 878 if (!dma_has_cap(DMA_MEMCPY, device->cap_mask)) 879 return false; 880 #endif 881 882 #if IS_ENABLED(CONFIG_ASYNC_XOR) 883 if (!dma_has_cap(DMA_XOR, device->cap_mask)) 884 return false; 885 886 #ifndef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA 887 if (!dma_has_cap(DMA_XOR_VAL, device->cap_mask)) 888 return false; 889 #endif 890 #endif 891 892 #if IS_ENABLED(CONFIG_ASYNC_PQ) 893 if (!dma_has_cap(DMA_PQ, device->cap_mask)) 894 return false; 895 896 #ifndef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA 897 if (!dma_has_cap(DMA_PQ_VAL, device->cap_mask)) 898 return false; 899 #endif 900 #endif 901 902 return true; 903 } 904 905 static int get_dma_id(struct dma_device *device) 906 { 907 int rc = ida_alloc(&dma_ida, GFP_KERNEL); 908 909 if (rc < 0) 910 return rc; 911 device->dev_id = rc; 912 return 0; 913 } 914 915 /** 916 * dma_async_device_register - registers DMA devices found 917 * @device: &dma_device 918 */ 919 int dma_async_device_register(struct dma_device *device) 920 { 921 int chancnt = 0, rc; 922 struct dma_chan* chan; 923 atomic_t *idr_ref; 924 925 if (!device) 926 return -ENODEV; 927 928 /* validate device routines */ 929 if (!device->dev) { 930 pr_err("DMAdevice must have dev\n"); 931 return -EIO; 932 } 933 934 if (dma_has_cap(DMA_MEMCPY, device->cap_mask) && !device->device_prep_dma_memcpy) { 935 dev_err(device->dev, 936 "Device claims capability %s, but op is not defined\n", 937 "DMA_MEMCPY"); 938 return -EIO; 939 } 940 941 if (dma_has_cap(DMA_XOR, device->cap_mask) && !device->device_prep_dma_xor) { 942 dev_err(device->dev, 943 "Device claims capability %s, but op is not defined\n", 944 "DMA_XOR"); 945 return -EIO; 946 } 947 948 if (dma_has_cap(DMA_XOR_VAL, device->cap_mask) && !device->device_prep_dma_xor_val) { 949 dev_err(device->dev, 950 "Device claims capability %s, but op is not defined\n", 951 "DMA_XOR_VAL"); 952 return -EIO; 953 } 954 955 if (dma_has_cap(DMA_PQ, device->cap_mask) && !device->device_prep_dma_pq) { 956 dev_err(device->dev, 957 "Device claims capability %s, but op is not defined\n", 958 "DMA_PQ"); 959 return -EIO; 960 } 961 962 if (dma_has_cap(DMA_PQ_VAL, device->cap_mask) && !device->device_prep_dma_pq_val) { 963 dev_err(device->dev, 964 "Device claims capability %s, but op is not defined\n", 965 "DMA_PQ_VAL"); 966 return -EIO; 967 } 968 969 if (dma_has_cap(DMA_MEMSET, device->cap_mask) && !device->device_prep_dma_memset) { 970 dev_err(device->dev, 971 "Device claims capability %s, but op is not defined\n", 972 "DMA_MEMSET"); 973 return -EIO; 974 } 975 976 if (dma_has_cap(DMA_INTERRUPT, device->cap_mask) && !device->device_prep_dma_interrupt) { 977 dev_err(device->dev, 978 "Device claims capability %s, but op is not defined\n", 979 "DMA_INTERRUPT"); 980 return -EIO; 981 } 982 983 if (dma_has_cap(DMA_CYCLIC, device->cap_mask) && !device->device_prep_dma_cyclic) { 984 dev_err(device->dev, 985 "Device claims capability %s, but op is not defined\n", 986 "DMA_CYCLIC"); 987 return -EIO; 988 } 989 990 if (dma_has_cap(DMA_INTERLEAVE, device->cap_mask) && !device->device_prep_interleaved_dma) { 991 dev_err(device->dev, 992 "Device claims capability %s, but op is not defined\n", 993 "DMA_INTERLEAVE"); 994 return -EIO; 995 } 996 997 998 if (!device->device_tx_status) { 999 dev_err(device->dev, "Device tx_status is not defined\n"); 1000 return -EIO; 1001 } 1002 1003 1004 if (!device->device_issue_pending) { 1005 dev_err(device->dev, "Device issue_pending is not defined\n"); 1006 return -EIO; 1007 } 1008 1009 /* note: this only matters in the 1010 * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case 1011 */ 1012 if (device_has_all_tx_types(device)) 1013 dma_cap_set(DMA_ASYNC_TX, device->cap_mask); 1014 1015 idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL); 1016 if (!idr_ref) 1017 return -ENOMEM; 1018 rc = get_dma_id(device); 1019 if (rc != 0) { 1020 kfree(idr_ref); 1021 return rc; 1022 } 1023 1024 atomic_set(idr_ref, 0); 1025 1026 /* represent channels in sysfs. Probably want devs too */ 1027 list_for_each_entry(chan, &device->channels, device_node) { 1028 rc = -ENOMEM; 1029 chan->local = alloc_percpu(typeof(*chan->local)); 1030 if (chan->local == NULL) 1031 goto err_out; 1032 chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL); 1033 if (chan->dev == NULL) { 1034 free_percpu(chan->local); 1035 chan->local = NULL; 1036 goto err_out; 1037 } 1038 1039 chan->chan_id = chancnt++; 1040 chan->dev->device.class = &dma_devclass; 1041 chan->dev->device.parent = device->dev; 1042 chan->dev->chan = chan; 1043 chan->dev->idr_ref = idr_ref; 1044 chan->dev->dev_id = device->dev_id; 1045 atomic_inc(idr_ref); 1046 dev_set_name(&chan->dev->device, "dma%dchan%d", 1047 device->dev_id, chan->chan_id); 1048 1049 rc = device_register(&chan->dev->device); 1050 if (rc) { 1051 free_percpu(chan->local); 1052 chan->local = NULL; 1053 kfree(chan->dev); 1054 atomic_dec(idr_ref); 1055 goto err_out; 1056 } 1057 chan->client_count = 0; 1058 } 1059 1060 if (!chancnt) { 1061 dev_err(device->dev, "%s: device has no channels!\n", __func__); 1062 rc = -ENODEV; 1063 goto err_out; 1064 } 1065 1066 device->chancnt = chancnt; 1067 1068 mutex_lock(&dma_list_mutex); 1069 /* take references on public channels */ 1070 if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask)) 1071 list_for_each_entry(chan, &device->channels, device_node) { 1072 /* if clients are already waiting for channels we need 1073 * to take references on their behalf 1074 */ 1075 if (dma_chan_get(chan) == -ENODEV) { 1076 /* note we can only get here for the first 1077 * channel as the remaining channels are 1078 * guaranteed to get a reference 1079 */ 1080 rc = -ENODEV; 1081 mutex_unlock(&dma_list_mutex); 1082 goto err_out; 1083 } 1084 } 1085 list_add_tail_rcu(&device->global_node, &dma_device_list); 1086 if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) 1087 device->privatecnt++; /* Always private */ 1088 dma_channel_rebalance(); 1089 mutex_unlock(&dma_list_mutex); 1090 1091 return 0; 1092 1093 err_out: 1094 /* if we never registered a channel just release the idr */ 1095 if (atomic_read(idr_ref) == 0) { 1096 ida_free(&dma_ida, device->dev_id); 1097 kfree(idr_ref); 1098 return rc; 1099 } 1100 1101 list_for_each_entry(chan, &device->channels, device_node) { 1102 if (chan->local == NULL) 1103 continue; 1104 mutex_lock(&dma_list_mutex); 1105 chan->dev->chan = NULL; 1106 mutex_unlock(&dma_list_mutex); 1107 device_unregister(&chan->dev->device); 1108 free_percpu(chan->local); 1109 } 1110 return rc; 1111 } 1112 EXPORT_SYMBOL(dma_async_device_register); 1113 1114 /** 1115 * dma_async_device_unregister - unregister a DMA device 1116 * @device: &dma_device 1117 * 1118 * This routine is called by dma driver exit routines, dmaengine holds module 1119 * references to prevent it being called while channels are in use. 1120 */ 1121 void dma_async_device_unregister(struct dma_device *device) 1122 { 1123 struct dma_chan *chan; 1124 1125 mutex_lock(&dma_list_mutex); 1126 list_del_rcu(&device->global_node); 1127 dma_channel_rebalance(); 1128 mutex_unlock(&dma_list_mutex); 1129 1130 list_for_each_entry(chan, &device->channels, device_node) { 1131 WARN_ONCE(chan->client_count, 1132 "%s called while %d clients hold a reference\n", 1133 __func__, chan->client_count); 1134 mutex_lock(&dma_list_mutex); 1135 chan->dev->chan = NULL; 1136 mutex_unlock(&dma_list_mutex); 1137 device_unregister(&chan->dev->device); 1138 free_percpu(chan->local); 1139 } 1140 } 1141 EXPORT_SYMBOL(dma_async_device_unregister); 1142 1143 static void dmam_device_release(struct device *dev, void *res) 1144 { 1145 struct dma_device *device; 1146 1147 device = *(struct dma_device **)res; 1148 dma_async_device_unregister(device); 1149 } 1150 1151 /** 1152 * dmaenginem_async_device_register - registers DMA devices found 1153 * @device: &dma_device 1154 * 1155 * The operation is managed and will be undone on driver detach. 1156 */ 1157 int dmaenginem_async_device_register(struct dma_device *device) 1158 { 1159 void *p; 1160 int ret; 1161 1162 p = devres_alloc(dmam_device_release, sizeof(void *), GFP_KERNEL); 1163 if (!p) 1164 return -ENOMEM; 1165 1166 ret = dma_async_device_register(device); 1167 if (!ret) { 1168 *(struct dma_device **)p = device; 1169 devres_add(device->dev, p); 1170 } else { 1171 devres_free(p); 1172 } 1173 1174 return ret; 1175 } 1176 EXPORT_SYMBOL(dmaenginem_async_device_register); 1177 1178 struct dmaengine_unmap_pool { 1179 struct kmem_cache *cache; 1180 const char *name; 1181 mempool_t *pool; 1182 size_t size; 1183 }; 1184 1185 #define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) } 1186 static struct dmaengine_unmap_pool unmap_pool[] = { 1187 __UNMAP_POOL(2), 1188 #if IS_ENABLED(CONFIG_DMA_ENGINE_RAID) 1189 __UNMAP_POOL(16), 1190 __UNMAP_POOL(128), 1191 __UNMAP_POOL(256), 1192 #endif 1193 }; 1194 1195 static struct dmaengine_unmap_pool *__get_unmap_pool(int nr) 1196 { 1197 int order = get_count_order(nr); 1198 1199 switch (order) { 1200 case 0 ... 1: 1201 return &unmap_pool[0]; 1202 #if IS_ENABLED(CONFIG_DMA_ENGINE_RAID) 1203 case 2 ... 4: 1204 return &unmap_pool[1]; 1205 case 5 ... 7: 1206 return &unmap_pool[2]; 1207 case 8: 1208 return &unmap_pool[3]; 1209 #endif 1210 default: 1211 BUG(); 1212 return NULL; 1213 } 1214 } 1215 1216 static void dmaengine_unmap(struct kref *kref) 1217 { 1218 struct dmaengine_unmap_data *unmap = container_of(kref, typeof(*unmap), kref); 1219 struct device *dev = unmap->dev; 1220 int cnt, i; 1221 1222 cnt = unmap->to_cnt; 1223 for (i = 0; i < cnt; i++) 1224 dma_unmap_page(dev, unmap->addr[i], unmap->len, 1225 DMA_TO_DEVICE); 1226 cnt += unmap->from_cnt; 1227 for (; i < cnt; i++) 1228 dma_unmap_page(dev, unmap->addr[i], unmap->len, 1229 DMA_FROM_DEVICE); 1230 cnt += unmap->bidi_cnt; 1231 for (; i < cnt; i++) { 1232 if (unmap->addr[i] == 0) 1233 continue; 1234 dma_unmap_page(dev, unmap->addr[i], unmap->len, 1235 DMA_BIDIRECTIONAL); 1236 } 1237 cnt = unmap->map_cnt; 1238 mempool_free(unmap, __get_unmap_pool(cnt)->pool); 1239 } 1240 1241 void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap) 1242 { 1243 if (unmap) 1244 kref_put(&unmap->kref, dmaengine_unmap); 1245 } 1246 EXPORT_SYMBOL_GPL(dmaengine_unmap_put); 1247 1248 static void dmaengine_destroy_unmap_pool(void) 1249 { 1250 int i; 1251 1252 for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) { 1253 struct dmaengine_unmap_pool *p = &unmap_pool[i]; 1254 1255 mempool_destroy(p->pool); 1256 p->pool = NULL; 1257 kmem_cache_destroy(p->cache); 1258 p->cache = NULL; 1259 } 1260 } 1261 1262 static int __init dmaengine_init_unmap_pool(void) 1263 { 1264 int i; 1265 1266 for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) { 1267 struct dmaengine_unmap_pool *p = &unmap_pool[i]; 1268 size_t size; 1269 1270 size = sizeof(struct dmaengine_unmap_data) + 1271 sizeof(dma_addr_t) * p->size; 1272 1273 p->cache = kmem_cache_create(p->name, size, 0, 1274 SLAB_HWCACHE_ALIGN, NULL); 1275 if (!p->cache) 1276 break; 1277 p->pool = mempool_create_slab_pool(1, p->cache); 1278 if (!p->pool) 1279 break; 1280 } 1281 1282 if (i == ARRAY_SIZE(unmap_pool)) 1283 return 0; 1284 1285 dmaengine_destroy_unmap_pool(); 1286 return -ENOMEM; 1287 } 1288 1289 struct dmaengine_unmap_data * 1290 dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags) 1291 { 1292 struct dmaengine_unmap_data *unmap; 1293 1294 unmap = mempool_alloc(__get_unmap_pool(nr)->pool, flags); 1295 if (!unmap) 1296 return NULL; 1297 1298 memset(unmap, 0, sizeof(*unmap)); 1299 kref_init(&unmap->kref); 1300 unmap->dev = dev; 1301 unmap->map_cnt = nr; 1302 1303 return unmap; 1304 } 1305 EXPORT_SYMBOL(dmaengine_get_unmap_data); 1306 1307 void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, 1308 struct dma_chan *chan) 1309 { 1310 tx->chan = chan; 1311 #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH 1312 spin_lock_init(&tx->lock); 1313 #endif 1314 } 1315 EXPORT_SYMBOL(dma_async_tx_descriptor_init); 1316 1317 /* dma_wait_for_async_tx - spin wait for a transaction to complete 1318 * @tx: in-flight transaction to wait on 1319 */ 1320 enum dma_status 1321 dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) 1322 { 1323 unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000); 1324 1325 if (!tx) 1326 return DMA_COMPLETE; 1327 1328 while (tx->cookie == -EBUSY) { 1329 if (time_after_eq(jiffies, dma_sync_wait_timeout)) { 1330 dev_err(tx->chan->device->dev, 1331 "%s timeout waiting for descriptor submission\n", 1332 __func__); 1333 return DMA_ERROR; 1334 } 1335 cpu_relax(); 1336 } 1337 return dma_sync_wait(tx->chan, tx->cookie); 1338 } 1339 EXPORT_SYMBOL_GPL(dma_wait_for_async_tx); 1340 1341 /* dma_run_dependencies - helper routine for dma drivers to process 1342 * (start) dependent operations on their target channel 1343 * @tx: transaction with dependencies 1344 */ 1345 void dma_run_dependencies(struct dma_async_tx_descriptor *tx) 1346 { 1347 struct dma_async_tx_descriptor *dep = txd_next(tx); 1348 struct dma_async_tx_descriptor *dep_next; 1349 struct dma_chan *chan; 1350 1351 if (!dep) 1352 return; 1353 1354 /* we'll submit tx->next now, so clear the link */ 1355 txd_clear_next(tx); 1356 chan = dep->chan; 1357 1358 /* keep submitting up until a channel switch is detected 1359 * in that case we will be called again as a result of 1360 * processing the interrupt from async_tx_channel_switch 1361 */ 1362 for (; dep; dep = dep_next) { 1363 txd_lock(dep); 1364 txd_clear_parent(dep); 1365 dep_next = txd_next(dep); 1366 if (dep_next && dep_next->chan == chan) 1367 txd_clear_next(dep); /* ->next will be submitted */ 1368 else 1369 dep_next = NULL; /* submit current dep and terminate */ 1370 txd_unlock(dep); 1371 1372 dep->tx_submit(dep); 1373 } 1374 1375 chan->device->device_issue_pending(chan); 1376 } 1377 EXPORT_SYMBOL_GPL(dma_run_dependencies); 1378 1379 static int __init dma_bus_init(void) 1380 { 1381 int err = dmaengine_init_unmap_pool(); 1382 1383 if (err) 1384 return err; 1385 return class_register(&dma_devclass); 1386 } 1387 arch_initcall(dma_bus_init); 1388 1389 1390