1 /* 2 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License as published by the Free 6 * Software Foundation; either version 2 of the License, or (at your option) 7 * any later version. 8 * 9 * This program is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 * 14 * You should have received a copy of the GNU General Public License along with 15 * this program; if not, write to the Free Software Foundation, Inc., 59 16 * Temple Place - Suite 330, Boston, MA 02111-1307, USA. 17 * 18 * The full GNU General Public License is included in this distribution in the 19 * file called COPYING. 20 */ 21 22 /* 23 * This code implements the DMA subsystem. It provides a HW-neutral interface 24 * for other kernel code to use asynchronous memory copy capabilities, 25 * if present, and allows different HW DMA drivers to register as providing 26 * this capability. 27 * 28 * Due to the fact we are accelerating what is already a relatively fast 29 * operation, the code goes to great lengths to avoid additional overhead, 30 * such as locking. 31 * 32 * LOCKING: 33 * 34 * The subsystem keeps a global list of dma_device structs it is protected by a 35 * mutex, dma_list_mutex. 36 * 37 * A subsystem can get access to a channel by calling dmaengine_get() followed 38 * by dma_find_channel(), or if it has need for an exclusive channel it can call 39 * dma_request_channel(). Once a channel is allocated a reference is taken 40 * against its corresponding driver to disable removal. 41 * 42 * Each device has a channels list, which runs unlocked but is never modified 43 * once the device is registered, it's just setup by the driver. 44 * 45 * See Documentation/dmaengine.txt for more details 46 */ 47 48 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 49 50 #include <linux/dma-mapping.h> 51 #include <linux/init.h> 52 #include <linux/module.h> 53 #include <linux/mm.h> 54 #include <linux/device.h> 55 #include <linux/dmaengine.h> 56 #include <linux/hardirq.h> 57 #include <linux/spinlock.h> 58 #include <linux/percpu.h> 59 #include <linux/rcupdate.h> 60 #include <linux/mutex.h> 61 #include <linux/jiffies.h> 62 #include <linux/rculist.h> 63 #include <linux/idr.h> 64 #include <linux/slab.h> 65 #include <linux/of_dma.h> 66 67 static DEFINE_MUTEX(dma_list_mutex); 68 static DEFINE_IDR(dma_idr); 69 static LIST_HEAD(dma_device_list); 70 static long dmaengine_ref_count; 71 72 /* --- sysfs implementation --- */ 73 74 /** 75 * dev_to_dma_chan - convert a device pointer to the its sysfs container object 76 * @dev - device node 77 * 78 * Must be called under dma_list_mutex 79 */ 80 static struct dma_chan *dev_to_dma_chan(struct device *dev) 81 { 82 struct dma_chan_dev *chan_dev; 83 84 chan_dev = container_of(dev, typeof(*chan_dev), device); 85 return chan_dev->chan; 86 } 87 88 static ssize_t show_memcpy_count(struct device *dev, struct device_attribute *attr, char *buf) 89 { 90 struct dma_chan *chan; 91 unsigned long count = 0; 92 int i; 93 int err; 94 95 mutex_lock(&dma_list_mutex); 96 chan = dev_to_dma_chan(dev); 97 if (chan) { 98 for_each_possible_cpu(i) 99 count += per_cpu_ptr(chan->local, i)->memcpy_count; 100 err = sprintf(buf, "%lu\n", count); 101 } else 102 err = -ENODEV; 103 mutex_unlock(&dma_list_mutex); 104 105 return err; 106 } 107 108 static ssize_t show_bytes_transferred(struct device *dev, struct device_attribute *attr, 109 char *buf) 110 { 111 struct dma_chan *chan; 112 unsigned long count = 0; 113 int i; 114 int err; 115 116 mutex_lock(&dma_list_mutex); 117 chan = dev_to_dma_chan(dev); 118 if (chan) { 119 for_each_possible_cpu(i) 120 count += per_cpu_ptr(chan->local, i)->bytes_transferred; 121 err = sprintf(buf, "%lu\n", count); 122 } else 123 err = -ENODEV; 124 mutex_unlock(&dma_list_mutex); 125 126 return err; 127 } 128 129 static ssize_t show_in_use(struct device *dev, struct device_attribute *attr, char *buf) 130 { 131 struct dma_chan *chan; 132 int err; 133 134 mutex_lock(&dma_list_mutex); 135 chan = dev_to_dma_chan(dev); 136 if (chan) 137 err = sprintf(buf, "%d\n", chan->client_count); 138 else 139 err = -ENODEV; 140 mutex_unlock(&dma_list_mutex); 141 142 return err; 143 } 144 145 static struct device_attribute dma_attrs[] = { 146 __ATTR(memcpy_count, S_IRUGO, show_memcpy_count, NULL), 147 __ATTR(bytes_transferred, S_IRUGO, show_bytes_transferred, NULL), 148 __ATTR(in_use, S_IRUGO, show_in_use, NULL), 149 __ATTR_NULL 150 }; 151 152 static void chan_dev_release(struct device *dev) 153 { 154 struct dma_chan_dev *chan_dev; 155 156 chan_dev = container_of(dev, typeof(*chan_dev), device); 157 if (atomic_dec_and_test(chan_dev->idr_ref)) { 158 mutex_lock(&dma_list_mutex); 159 idr_remove(&dma_idr, chan_dev->dev_id); 160 mutex_unlock(&dma_list_mutex); 161 kfree(chan_dev->idr_ref); 162 } 163 kfree(chan_dev); 164 } 165 166 static struct class dma_devclass = { 167 .name = "dma", 168 .dev_attrs = dma_attrs, 169 .dev_release = chan_dev_release, 170 }; 171 172 /* --- client and device registration --- */ 173 174 #define dma_device_satisfies_mask(device, mask) \ 175 __dma_device_satisfies_mask((device), &(mask)) 176 static int 177 __dma_device_satisfies_mask(struct dma_device *device, dma_cap_mask_t *want) 178 { 179 dma_cap_mask_t has; 180 181 bitmap_and(has.bits, want->bits, device->cap_mask.bits, 182 DMA_TX_TYPE_END); 183 return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END); 184 } 185 186 static struct module *dma_chan_to_owner(struct dma_chan *chan) 187 { 188 return chan->device->dev->driver->owner; 189 } 190 191 /** 192 * balance_ref_count - catch up the channel reference count 193 * @chan - channel to balance ->client_count versus dmaengine_ref_count 194 * 195 * balance_ref_count must be called under dma_list_mutex 196 */ 197 static void balance_ref_count(struct dma_chan *chan) 198 { 199 struct module *owner = dma_chan_to_owner(chan); 200 201 while (chan->client_count < dmaengine_ref_count) { 202 __module_get(owner); 203 chan->client_count++; 204 } 205 } 206 207 /** 208 * dma_chan_get - try to grab a dma channel's parent driver module 209 * @chan - channel to grab 210 * 211 * Must be called under dma_list_mutex 212 */ 213 static int dma_chan_get(struct dma_chan *chan) 214 { 215 int err = -ENODEV; 216 struct module *owner = dma_chan_to_owner(chan); 217 218 if (chan->client_count) { 219 __module_get(owner); 220 err = 0; 221 } else if (try_module_get(owner)) 222 err = 0; 223 224 if (err == 0) 225 chan->client_count++; 226 227 /* allocate upon first client reference */ 228 if (chan->client_count == 1 && err == 0) { 229 int desc_cnt = chan->device->device_alloc_chan_resources(chan); 230 231 if (desc_cnt < 0) { 232 err = desc_cnt; 233 chan->client_count = 0; 234 module_put(owner); 235 } else if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask)) 236 balance_ref_count(chan); 237 } 238 239 return err; 240 } 241 242 /** 243 * dma_chan_put - drop a reference to a dma channel's parent driver module 244 * @chan - channel to release 245 * 246 * Must be called under dma_list_mutex 247 */ 248 static void dma_chan_put(struct dma_chan *chan) 249 { 250 if (!chan->client_count) 251 return; /* this channel failed alloc_chan_resources */ 252 chan->client_count--; 253 module_put(dma_chan_to_owner(chan)); 254 if (chan->client_count == 0) 255 chan->device->device_free_chan_resources(chan); 256 } 257 258 enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie) 259 { 260 enum dma_status status; 261 unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000); 262 263 dma_async_issue_pending(chan); 264 do { 265 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); 266 if (time_after_eq(jiffies, dma_sync_wait_timeout)) { 267 pr_err("%s: timeout!\n", __func__); 268 return DMA_ERROR; 269 } 270 if (status != DMA_IN_PROGRESS) 271 break; 272 cpu_relax(); 273 } while (1); 274 275 return status; 276 } 277 EXPORT_SYMBOL(dma_sync_wait); 278 279 /** 280 * dma_cap_mask_all - enable iteration over all operation types 281 */ 282 static dma_cap_mask_t dma_cap_mask_all; 283 284 /** 285 * dma_chan_tbl_ent - tracks channel allocations per core/operation 286 * @chan - associated channel for this entry 287 */ 288 struct dma_chan_tbl_ent { 289 struct dma_chan *chan; 290 }; 291 292 /** 293 * channel_table - percpu lookup table for memory-to-memory offload providers 294 */ 295 static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END]; 296 297 static int __init dma_channel_table_init(void) 298 { 299 enum dma_transaction_type cap; 300 int err = 0; 301 302 bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END); 303 304 /* 'interrupt', 'private', and 'slave' are channel capabilities, 305 * but are not associated with an operation so they do not need 306 * an entry in the channel_table 307 */ 308 clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits); 309 clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits); 310 clear_bit(DMA_SLAVE, dma_cap_mask_all.bits); 311 312 for_each_dma_cap_mask(cap, dma_cap_mask_all) { 313 channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent); 314 if (!channel_table[cap]) { 315 err = -ENOMEM; 316 break; 317 } 318 } 319 320 if (err) { 321 pr_err("initialization failure\n"); 322 for_each_dma_cap_mask(cap, dma_cap_mask_all) 323 if (channel_table[cap]) 324 free_percpu(channel_table[cap]); 325 } 326 327 return err; 328 } 329 arch_initcall(dma_channel_table_init); 330 331 /** 332 * dma_find_channel - find a channel to carry out the operation 333 * @tx_type: transaction type 334 */ 335 struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type) 336 { 337 return this_cpu_read(channel_table[tx_type]->chan); 338 } 339 EXPORT_SYMBOL(dma_find_channel); 340 341 /* 342 * net_dma_find_channel - find a channel for net_dma 343 * net_dma has alignment requirements 344 */ 345 struct dma_chan *net_dma_find_channel(void) 346 { 347 struct dma_chan *chan = dma_find_channel(DMA_MEMCPY); 348 if (chan && !is_dma_copy_aligned(chan->device, 1, 1, 1)) 349 return NULL; 350 351 return chan; 352 } 353 EXPORT_SYMBOL(net_dma_find_channel); 354 355 /** 356 * dma_issue_pending_all - flush all pending operations across all channels 357 */ 358 void dma_issue_pending_all(void) 359 { 360 struct dma_device *device; 361 struct dma_chan *chan; 362 363 rcu_read_lock(); 364 list_for_each_entry_rcu(device, &dma_device_list, global_node) { 365 if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) 366 continue; 367 list_for_each_entry(chan, &device->channels, device_node) 368 if (chan->client_count) 369 device->device_issue_pending(chan); 370 } 371 rcu_read_unlock(); 372 } 373 EXPORT_SYMBOL(dma_issue_pending_all); 374 375 /** 376 * nth_chan - returns the nth channel of the given capability 377 * @cap: capability to match 378 * @n: nth channel desired 379 * 380 * Defaults to returning the channel with the desired capability and the 381 * lowest reference count when 'n' cannot be satisfied. Must be called 382 * under dma_list_mutex. 383 */ 384 static struct dma_chan *nth_chan(enum dma_transaction_type cap, int n) 385 { 386 struct dma_device *device; 387 struct dma_chan *chan; 388 struct dma_chan *ret = NULL; 389 struct dma_chan *min = NULL; 390 391 list_for_each_entry(device, &dma_device_list, global_node) { 392 if (!dma_has_cap(cap, device->cap_mask) || 393 dma_has_cap(DMA_PRIVATE, device->cap_mask)) 394 continue; 395 list_for_each_entry(chan, &device->channels, device_node) { 396 if (!chan->client_count) 397 continue; 398 if (!min) 399 min = chan; 400 else if (chan->table_count < min->table_count) 401 min = chan; 402 403 if (n-- == 0) { 404 ret = chan; 405 break; /* done */ 406 } 407 } 408 if (ret) 409 break; /* done */ 410 } 411 412 if (!ret) 413 ret = min; 414 415 if (ret) 416 ret->table_count++; 417 418 return ret; 419 } 420 421 /** 422 * dma_channel_rebalance - redistribute the available channels 423 * 424 * Optimize for cpu isolation (each cpu gets a dedicated channel for an 425 * operation type) in the SMP case, and operation isolation (avoid 426 * multi-tasking channels) in the non-SMP case. Must be called under 427 * dma_list_mutex. 428 */ 429 static void dma_channel_rebalance(void) 430 { 431 struct dma_chan *chan; 432 struct dma_device *device; 433 int cpu; 434 int cap; 435 int n; 436 437 /* undo the last distribution */ 438 for_each_dma_cap_mask(cap, dma_cap_mask_all) 439 for_each_possible_cpu(cpu) 440 per_cpu_ptr(channel_table[cap], cpu)->chan = NULL; 441 442 list_for_each_entry(device, &dma_device_list, global_node) { 443 if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) 444 continue; 445 list_for_each_entry(chan, &device->channels, device_node) 446 chan->table_count = 0; 447 } 448 449 /* don't populate the channel_table if no clients are available */ 450 if (!dmaengine_ref_count) 451 return; 452 453 /* redistribute available channels */ 454 n = 0; 455 for_each_dma_cap_mask(cap, dma_cap_mask_all) 456 for_each_online_cpu(cpu) { 457 if (num_possible_cpus() > 1) 458 chan = nth_chan(cap, n++); 459 else 460 chan = nth_chan(cap, -1); 461 462 per_cpu_ptr(channel_table[cap], cpu)->chan = chan; 463 } 464 } 465 466 static struct dma_chan *private_candidate(dma_cap_mask_t *mask, struct dma_device *dev, 467 dma_filter_fn fn, void *fn_param) 468 { 469 struct dma_chan *chan; 470 471 if (!__dma_device_satisfies_mask(dev, mask)) { 472 pr_debug("%s: wrong capabilities\n", __func__); 473 return NULL; 474 } 475 /* devices with multiple channels need special handling as we need to 476 * ensure that all channels are either private or public. 477 */ 478 if (dev->chancnt > 1 && !dma_has_cap(DMA_PRIVATE, dev->cap_mask)) 479 list_for_each_entry(chan, &dev->channels, device_node) { 480 /* some channels are already publicly allocated */ 481 if (chan->client_count) 482 return NULL; 483 } 484 485 list_for_each_entry(chan, &dev->channels, device_node) { 486 if (chan->client_count) { 487 pr_debug("%s: %s busy\n", 488 __func__, dma_chan_name(chan)); 489 continue; 490 } 491 if (fn && !fn(chan, fn_param)) { 492 pr_debug("%s: %s filter said false\n", 493 __func__, dma_chan_name(chan)); 494 continue; 495 } 496 return chan; 497 } 498 499 return NULL; 500 } 501 502 /** 503 * dma_request_channel - try to allocate an exclusive channel 504 * @mask: capabilities that the channel must satisfy 505 * @fn: optional callback to disposition available channels 506 * @fn_param: opaque parameter to pass to dma_filter_fn 507 */ 508 struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param) 509 { 510 struct dma_device *device, *_d; 511 struct dma_chan *chan = NULL; 512 int err; 513 514 /* Find a channel */ 515 mutex_lock(&dma_list_mutex); 516 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) { 517 chan = private_candidate(mask, device, fn, fn_param); 518 if (chan) { 519 /* Found a suitable channel, try to grab, prep, and 520 * return it. We first set DMA_PRIVATE to disable 521 * balance_ref_count as this channel will not be 522 * published in the general-purpose allocator 523 */ 524 dma_cap_set(DMA_PRIVATE, device->cap_mask); 525 device->privatecnt++; 526 err = dma_chan_get(chan); 527 528 if (err == -ENODEV) { 529 pr_debug("%s: %s module removed\n", 530 __func__, dma_chan_name(chan)); 531 list_del_rcu(&device->global_node); 532 } else if (err) 533 pr_debug("%s: failed to get %s: (%d)\n", 534 __func__, dma_chan_name(chan), err); 535 else 536 break; 537 if (--device->privatecnt == 0) 538 dma_cap_clear(DMA_PRIVATE, device->cap_mask); 539 chan = NULL; 540 } 541 } 542 mutex_unlock(&dma_list_mutex); 543 544 pr_debug("%s: %s (%s)\n", 545 __func__, 546 chan ? "success" : "fail", 547 chan ? dma_chan_name(chan) : NULL); 548 549 return chan; 550 } 551 EXPORT_SYMBOL_GPL(__dma_request_channel); 552 553 /** 554 * dma_request_slave_channel - try to allocate an exclusive slave channel 555 * @dev: pointer to client device structure 556 * @name: slave channel name 557 */ 558 struct dma_chan *dma_request_slave_channel(struct device *dev, char *name) 559 { 560 /* If device-tree is present get slave info from here */ 561 if (dev->of_node) 562 return of_dma_request_slave_channel(dev->of_node, name); 563 564 return NULL; 565 } 566 EXPORT_SYMBOL_GPL(dma_request_slave_channel); 567 568 void dma_release_channel(struct dma_chan *chan) 569 { 570 mutex_lock(&dma_list_mutex); 571 WARN_ONCE(chan->client_count != 1, 572 "chan reference count %d != 1\n", chan->client_count); 573 dma_chan_put(chan); 574 /* drop PRIVATE cap enabled by __dma_request_channel() */ 575 if (--chan->device->privatecnt == 0) 576 dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask); 577 mutex_unlock(&dma_list_mutex); 578 } 579 EXPORT_SYMBOL_GPL(dma_release_channel); 580 581 /** 582 * dmaengine_get - register interest in dma_channels 583 */ 584 void dmaengine_get(void) 585 { 586 struct dma_device *device, *_d; 587 struct dma_chan *chan; 588 int err; 589 590 mutex_lock(&dma_list_mutex); 591 dmaengine_ref_count++; 592 593 /* try to grab channels */ 594 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) { 595 if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) 596 continue; 597 list_for_each_entry(chan, &device->channels, device_node) { 598 err = dma_chan_get(chan); 599 if (err == -ENODEV) { 600 /* module removed before we could use it */ 601 list_del_rcu(&device->global_node); 602 break; 603 } else if (err) 604 pr_debug("%s: failed to get %s: (%d)\n", 605 __func__, dma_chan_name(chan), err); 606 } 607 } 608 609 /* if this is the first reference and there were channels 610 * waiting we need to rebalance to get those channels 611 * incorporated into the channel table 612 */ 613 if (dmaengine_ref_count == 1) 614 dma_channel_rebalance(); 615 mutex_unlock(&dma_list_mutex); 616 } 617 EXPORT_SYMBOL(dmaengine_get); 618 619 /** 620 * dmaengine_put - let dma drivers be removed when ref_count == 0 621 */ 622 void dmaengine_put(void) 623 { 624 struct dma_device *device; 625 struct dma_chan *chan; 626 627 mutex_lock(&dma_list_mutex); 628 dmaengine_ref_count--; 629 BUG_ON(dmaengine_ref_count < 0); 630 /* drop channel references */ 631 list_for_each_entry(device, &dma_device_list, global_node) { 632 if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) 633 continue; 634 list_for_each_entry(chan, &device->channels, device_node) 635 dma_chan_put(chan); 636 } 637 mutex_unlock(&dma_list_mutex); 638 } 639 EXPORT_SYMBOL(dmaengine_put); 640 641 static bool device_has_all_tx_types(struct dma_device *device) 642 { 643 /* A device that satisfies this test has channels that will never cause 644 * an async_tx channel switch event as all possible operation types can 645 * be handled. 646 */ 647 #ifdef CONFIG_ASYNC_TX_DMA 648 if (!dma_has_cap(DMA_INTERRUPT, device->cap_mask)) 649 return false; 650 #endif 651 652 #if defined(CONFIG_ASYNC_MEMCPY) || defined(CONFIG_ASYNC_MEMCPY_MODULE) 653 if (!dma_has_cap(DMA_MEMCPY, device->cap_mask)) 654 return false; 655 #endif 656 657 #if defined(CONFIG_ASYNC_MEMSET) || defined(CONFIG_ASYNC_MEMSET_MODULE) 658 if (!dma_has_cap(DMA_MEMSET, device->cap_mask)) 659 return false; 660 #endif 661 662 #if defined(CONFIG_ASYNC_XOR) || defined(CONFIG_ASYNC_XOR_MODULE) 663 if (!dma_has_cap(DMA_XOR, device->cap_mask)) 664 return false; 665 666 #ifndef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA 667 if (!dma_has_cap(DMA_XOR_VAL, device->cap_mask)) 668 return false; 669 #endif 670 #endif 671 672 #if defined(CONFIG_ASYNC_PQ) || defined(CONFIG_ASYNC_PQ_MODULE) 673 if (!dma_has_cap(DMA_PQ, device->cap_mask)) 674 return false; 675 676 #ifndef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA 677 if (!dma_has_cap(DMA_PQ_VAL, device->cap_mask)) 678 return false; 679 #endif 680 #endif 681 682 return true; 683 } 684 685 static int get_dma_id(struct dma_device *device) 686 { 687 int rc; 688 689 idr_retry: 690 if (!idr_pre_get(&dma_idr, GFP_KERNEL)) 691 return -ENOMEM; 692 mutex_lock(&dma_list_mutex); 693 rc = idr_get_new(&dma_idr, NULL, &device->dev_id); 694 mutex_unlock(&dma_list_mutex); 695 if (rc == -EAGAIN) 696 goto idr_retry; 697 else if (rc != 0) 698 return rc; 699 700 return 0; 701 } 702 703 /** 704 * dma_async_device_register - registers DMA devices found 705 * @device: &dma_device 706 */ 707 int dma_async_device_register(struct dma_device *device) 708 { 709 int chancnt = 0, rc; 710 struct dma_chan* chan; 711 atomic_t *idr_ref; 712 713 if (!device) 714 return -ENODEV; 715 716 /* validate device routines */ 717 BUG_ON(dma_has_cap(DMA_MEMCPY, device->cap_mask) && 718 !device->device_prep_dma_memcpy); 719 BUG_ON(dma_has_cap(DMA_XOR, device->cap_mask) && 720 !device->device_prep_dma_xor); 721 BUG_ON(dma_has_cap(DMA_XOR_VAL, device->cap_mask) && 722 !device->device_prep_dma_xor_val); 723 BUG_ON(dma_has_cap(DMA_PQ, device->cap_mask) && 724 !device->device_prep_dma_pq); 725 BUG_ON(dma_has_cap(DMA_PQ_VAL, device->cap_mask) && 726 !device->device_prep_dma_pq_val); 727 BUG_ON(dma_has_cap(DMA_MEMSET, device->cap_mask) && 728 !device->device_prep_dma_memset); 729 BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) && 730 !device->device_prep_dma_interrupt); 731 BUG_ON(dma_has_cap(DMA_SG, device->cap_mask) && 732 !device->device_prep_dma_sg); 733 BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) && 734 !device->device_prep_dma_cyclic); 735 BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) && 736 !device->device_control); 737 BUG_ON(dma_has_cap(DMA_INTERLEAVE, device->cap_mask) && 738 !device->device_prep_interleaved_dma); 739 740 BUG_ON(!device->device_alloc_chan_resources); 741 BUG_ON(!device->device_free_chan_resources); 742 BUG_ON(!device->device_tx_status); 743 BUG_ON(!device->device_issue_pending); 744 BUG_ON(!device->dev); 745 746 /* note: this only matters in the 747 * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case 748 */ 749 if (device_has_all_tx_types(device)) 750 dma_cap_set(DMA_ASYNC_TX, device->cap_mask); 751 752 idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL); 753 if (!idr_ref) 754 return -ENOMEM; 755 rc = get_dma_id(device); 756 if (rc != 0) { 757 kfree(idr_ref); 758 return rc; 759 } 760 761 atomic_set(idr_ref, 0); 762 763 /* represent channels in sysfs. Probably want devs too */ 764 list_for_each_entry(chan, &device->channels, device_node) { 765 rc = -ENOMEM; 766 chan->local = alloc_percpu(typeof(*chan->local)); 767 if (chan->local == NULL) 768 goto err_out; 769 chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL); 770 if (chan->dev == NULL) { 771 free_percpu(chan->local); 772 chan->local = NULL; 773 goto err_out; 774 } 775 776 chan->chan_id = chancnt++; 777 chan->dev->device.class = &dma_devclass; 778 chan->dev->device.parent = device->dev; 779 chan->dev->chan = chan; 780 chan->dev->idr_ref = idr_ref; 781 chan->dev->dev_id = device->dev_id; 782 atomic_inc(idr_ref); 783 dev_set_name(&chan->dev->device, "dma%dchan%d", 784 device->dev_id, chan->chan_id); 785 786 rc = device_register(&chan->dev->device); 787 if (rc) { 788 free_percpu(chan->local); 789 chan->local = NULL; 790 kfree(chan->dev); 791 atomic_dec(idr_ref); 792 goto err_out; 793 } 794 chan->client_count = 0; 795 } 796 device->chancnt = chancnt; 797 798 mutex_lock(&dma_list_mutex); 799 /* take references on public channels */ 800 if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask)) 801 list_for_each_entry(chan, &device->channels, device_node) { 802 /* if clients are already waiting for channels we need 803 * to take references on their behalf 804 */ 805 if (dma_chan_get(chan) == -ENODEV) { 806 /* note we can only get here for the first 807 * channel as the remaining channels are 808 * guaranteed to get a reference 809 */ 810 rc = -ENODEV; 811 mutex_unlock(&dma_list_mutex); 812 goto err_out; 813 } 814 } 815 list_add_tail_rcu(&device->global_node, &dma_device_list); 816 if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) 817 device->privatecnt++; /* Always private */ 818 dma_channel_rebalance(); 819 mutex_unlock(&dma_list_mutex); 820 821 return 0; 822 823 err_out: 824 /* if we never registered a channel just release the idr */ 825 if (atomic_read(idr_ref) == 0) { 826 mutex_lock(&dma_list_mutex); 827 idr_remove(&dma_idr, device->dev_id); 828 mutex_unlock(&dma_list_mutex); 829 kfree(idr_ref); 830 return rc; 831 } 832 833 list_for_each_entry(chan, &device->channels, device_node) { 834 if (chan->local == NULL) 835 continue; 836 mutex_lock(&dma_list_mutex); 837 chan->dev->chan = NULL; 838 mutex_unlock(&dma_list_mutex); 839 device_unregister(&chan->dev->device); 840 free_percpu(chan->local); 841 } 842 return rc; 843 } 844 EXPORT_SYMBOL(dma_async_device_register); 845 846 /** 847 * dma_async_device_unregister - unregister a DMA device 848 * @device: &dma_device 849 * 850 * This routine is called by dma driver exit routines, dmaengine holds module 851 * references to prevent it being called while channels are in use. 852 */ 853 void dma_async_device_unregister(struct dma_device *device) 854 { 855 struct dma_chan *chan; 856 857 mutex_lock(&dma_list_mutex); 858 list_del_rcu(&device->global_node); 859 dma_channel_rebalance(); 860 mutex_unlock(&dma_list_mutex); 861 862 list_for_each_entry(chan, &device->channels, device_node) { 863 WARN_ONCE(chan->client_count, 864 "%s called while %d clients hold a reference\n", 865 __func__, chan->client_count); 866 mutex_lock(&dma_list_mutex); 867 chan->dev->chan = NULL; 868 mutex_unlock(&dma_list_mutex); 869 device_unregister(&chan->dev->device); 870 free_percpu(chan->local); 871 } 872 } 873 EXPORT_SYMBOL(dma_async_device_unregister); 874 875 /** 876 * dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses 877 * @chan: DMA channel to offload copy to 878 * @dest: destination address (virtual) 879 * @src: source address (virtual) 880 * @len: length 881 * 882 * Both @dest and @src must be mappable to a bus address according to the 883 * DMA mapping API rules for streaming mappings. 884 * Both @dest and @src must stay memory resident (kernel memory or locked 885 * user space pages). 886 */ 887 dma_cookie_t 888 dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest, 889 void *src, size_t len) 890 { 891 struct dma_device *dev = chan->device; 892 struct dma_async_tx_descriptor *tx; 893 dma_addr_t dma_dest, dma_src; 894 dma_cookie_t cookie; 895 unsigned long flags; 896 897 dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE); 898 dma_dest = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE); 899 flags = DMA_CTRL_ACK | 900 DMA_COMPL_SRC_UNMAP_SINGLE | 901 DMA_COMPL_DEST_UNMAP_SINGLE; 902 tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags); 903 904 if (!tx) { 905 dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE); 906 dma_unmap_single(dev->dev, dma_dest, len, DMA_FROM_DEVICE); 907 return -ENOMEM; 908 } 909 910 tx->callback = NULL; 911 cookie = tx->tx_submit(tx); 912 913 preempt_disable(); 914 __this_cpu_add(chan->local->bytes_transferred, len); 915 __this_cpu_inc(chan->local->memcpy_count); 916 preempt_enable(); 917 918 return cookie; 919 } 920 EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf); 921 922 /** 923 * dma_async_memcpy_buf_to_pg - offloaded copy from address to page 924 * @chan: DMA channel to offload copy to 925 * @page: destination page 926 * @offset: offset in page to copy to 927 * @kdata: source address (virtual) 928 * @len: length 929 * 930 * Both @page/@offset and @kdata must be mappable to a bus address according 931 * to the DMA mapping API rules for streaming mappings. 932 * Both @page/@offset and @kdata must stay memory resident (kernel memory or 933 * locked user space pages) 934 */ 935 dma_cookie_t 936 dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page, 937 unsigned int offset, void *kdata, size_t len) 938 { 939 struct dma_device *dev = chan->device; 940 struct dma_async_tx_descriptor *tx; 941 dma_addr_t dma_dest, dma_src; 942 dma_cookie_t cookie; 943 unsigned long flags; 944 945 dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE); 946 dma_dest = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE); 947 flags = DMA_CTRL_ACK | DMA_COMPL_SRC_UNMAP_SINGLE; 948 tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags); 949 950 if (!tx) { 951 dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE); 952 dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE); 953 return -ENOMEM; 954 } 955 956 tx->callback = NULL; 957 cookie = tx->tx_submit(tx); 958 959 preempt_disable(); 960 __this_cpu_add(chan->local->bytes_transferred, len); 961 __this_cpu_inc(chan->local->memcpy_count); 962 preempt_enable(); 963 964 return cookie; 965 } 966 EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg); 967 968 /** 969 * dma_async_memcpy_pg_to_pg - offloaded copy from page to page 970 * @chan: DMA channel to offload copy to 971 * @dest_pg: destination page 972 * @dest_off: offset in page to copy to 973 * @src_pg: source page 974 * @src_off: offset in page to copy from 975 * @len: length 976 * 977 * Both @dest_page/@dest_off and @src_page/@src_off must be mappable to a bus 978 * address according to the DMA mapping API rules for streaming mappings. 979 * Both @dest_page/@dest_off and @src_page/@src_off must stay memory resident 980 * (kernel memory or locked user space pages). 981 */ 982 dma_cookie_t 983 dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg, 984 unsigned int dest_off, struct page *src_pg, unsigned int src_off, 985 size_t len) 986 { 987 struct dma_device *dev = chan->device; 988 struct dma_async_tx_descriptor *tx; 989 dma_addr_t dma_dest, dma_src; 990 dma_cookie_t cookie; 991 unsigned long flags; 992 993 dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE); 994 dma_dest = dma_map_page(dev->dev, dest_pg, dest_off, len, 995 DMA_FROM_DEVICE); 996 flags = DMA_CTRL_ACK; 997 tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags); 998 999 if (!tx) { 1000 dma_unmap_page(dev->dev, dma_src, len, DMA_TO_DEVICE); 1001 dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE); 1002 return -ENOMEM; 1003 } 1004 1005 tx->callback = NULL; 1006 cookie = tx->tx_submit(tx); 1007 1008 preempt_disable(); 1009 __this_cpu_add(chan->local->bytes_transferred, len); 1010 __this_cpu_inc(chan->local->memcpy_count); 1011 preempt_enable(); 1012 1013 return cookie; 1014 } 1015 EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg); 1016 1017 void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, 1018 struct dma_chan *chan) 1019 { 1020 tx->chan = chan; 1021 #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH 1022 spin_lock_init(&tx->lock); 1023 #endif 1024 } 1025 EXPORT_SYMBOL(dma_async_tx_descriptor_init); 1026 1027 /* dma_wait_for_async_tx - spin wait for a transaction to complete 1028 * @tx: in-flight transaction to wait on 1029 */ 1030 enum dma_status 1031 dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) 1032 { 1033 unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000); 1034 1035 if (!tx) 1036 return DMA_SUCCESS; 1037 1038 while (tx->cookie == -EBUSY) { 1039 if (time_after_eq(jiffies, dma_sync_wait_timeout)) { 1040 pr_err("%s timeout waiting for descriptor submission\n", 1041 __func__); 1042 return DMA_ERROR; 1043 } 1044 cpu_relax(); 1045 } 1046 return dma_sync_wait(tx->chan, tx->cookie); 1047 } 1048 EXPORT_SYMBOL_GPL(dma_wait_for_async_tx); 1049 1050 /* dma_run_dependencies - helper routine for dma drivers to process 1051 * (start) dependent operations on their target channel 1052 * @tx: transaction with dependencies 1053 */ 1054 void dma_run_dependencies(struct dma_async_tx_descriptor *tx) 1055 { 1056 struct dma_async_tx_descriptor *dep = txd_next(tx); 1057 struct dma_async_tx_descriptor *dep_next; 1058 struct dma_chan *chan; 1059 1060 if (!dep) 1061 return; 1062 1063 /* we'll submit tx->next now, so clear the link */ 1064 txd_clear_next(tx); 1065 chan = dep->chan; 1066 1067 /* keep submitting up until a channel switch is detected 1068 * in that case we will be called again as a result of 1069 * processing the interrupt from async_tx_channel_switch 1070 */ 1071 for (; dep; dep = dep_next) { 1072 txd_lock(dep); 1073 txd_clear_parent(dep); 1074 dep_next = txd_next(dep); 1075 if (dep_next && dep_next->chan == chan) 1076 txd_clear_next(dep); /* ->next will be submitted */ 1077 else 1078 dep_next = NULL; /* submit current dep and terminate */ 1079 txd_unlock(dep); 1080 1081 dep->tx_submit(dep); 1082 } 1083 1084 chan->device->device_issue_pending(chan); 1085 } 1086 EXPORT_SYMBOL_GPL(dma_run_dependencies); 1087 1088 static int __init dma_bus_init(void) 1089 { 1090 return class_register(&dma_devclass); 1091 } 1092 arch_initcall(dma_bus_init); 1093 1094 1095