1 /* 2 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License as published by the Free 6 * Software Foundation; either version 2 of the License, or (at your option) 7 * any later version. 8 * 9 * This program is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 * 14 * You should have received a copy of the GNU General Public License along with 15 * this program; if not, write to the Free Software Foundation, Inc., 59 16 * Temple Place - Suite 330, Boston, MA 02111-1307, USA. 17 * 18 * The full GNU General Public License is included in this distribution in the 19 * file called COPYING. 20 */ 21 22 /* 23 * This code implements the DMA subsystem. It provides a HW-neutral interface 24 * for other kernel code to use asynchronous memory copy capabilities, 25 * if present, and allows different HW DMA drivers to register as providing 26 * this capability. 27 * 28 * Due to the fact we are accelerating what is already a relatively fast 29 * operation, the code goes to great lengths to avoid additional overhead, 30 * such as locking. 31 * 32 * LOCKING: 33 * 34 * The subsystem keeps a global list of dma_device structs it is protected by a 35 * mutex, dma_list_mutex. 36 * 37 * A subsystem can get access to a channel by calling dmaengine_get() followed 38 * by dma_find_channel(), or if it has need for an exclusive channel it can call 39 * dma_request_channel(). Once a channel is allocated a reference is taken 40 * against its corresponding driver to disable removal. 41 * 42 * Each device has a channels list, which runs unlocked but is never modified 43 * once the device is registered, it's just setup by the driver. 44 * 45 * See Documentation/dmaengine.txt for more details 46 */ 47 48 #include <linux/init.h> 49 #include <linux/module.h> 50 #include <linux/mm.h> 51 #include <linux/device.h> 52 #include <linux/dmaengine.h> 53 #include <linux/hardirq.h> 54 #include <linux/spinlock.h> 55 #include <linux/percpu.h> 56 #include <linux/rcupdate.h> 57 #include <linux/mutex.h> 58 #include <linux/jiffies.h> 59 #include <linux/rculist.h> 60 #include <linux/idr.h> 61 #include <linux/slab.h> 62 63 static DEFINE_MUTEX(dma_list_mutex); 64 static LIST_HEAD(dma_device_list); 65 static long dmaengine_ref_count; 66 static struct idr dma_idr; 67 68 /* --- sysfs implementation --- */ 69 70 /** 71 * dev_to_dma_chan - convert a device pointer to the its sysfs container object 72 * @dev - device node 73 * 74 * Must be called under dma_list_mutex 75 */ 76 static struct dma_chan *dev_to_dma_chan(struct device *dev) 77 { 78 struct dma_chan_dev *chan_dev; 79 80 chan_dev = container_of(dev, typeof(*chan_dev), device); 81 return chan_dev->chan; 82 } 83 84 static ssize_t show_memcpy_count(struct device *dev, struct device_attribute *attr, char *buf) 85 { 86 struct dma_chan *chan; 87 unsigned long count = 0; 88 int i; 89 int err; 90 91 mutex_lock(&dma_list_mutex); 92 chan = dev_to_dma_chan(dev); 93 if (chan) { 94 for_each_possible_cpu(i) 95 count += per_cpu_ptr(chan->local, i)->memcpy_count; 96 err = sprintf(buf, "%lu\n", count); 97 } else 98 err = -ENODEV; 99 mutex_unlock(&dma_list_mutex); 100 101 return err; 102 } 103 104 static ssize_t show_bytes_transferred(struct device *dev, struct device_attribute *attr, 105 char *buf) 106 { 107 struct dma_chan *chan; 108 unsigned long count = 0; 109 int i; 110 int err; 111 112 mutex_lock(&dma_list_mutex); 113 chan = dev_to_dma_chan(dev); 114 if (chan) { 115 for_each_possible_cpu(i) 116 count += per_cpu_ptr(chan->local, i)->bytes_transferred; 117 err = sprintf(buf, "%lu\n", count); 118 } else 119 err = -ENODEV; 120 mutex_unlock(&dma_list_mutex); 121 122 return err; 123 } 124 125 static ssize_t show_in_use(struct device *dev, struct device_attribute *attr, char *buf) 126 { 127 struct dma_chan *chan; 128 int err; 129 130 mutex_lock(&dma_list_mutex); 131 chan = dev_to_dma_chan(dev); 132 if (chan) 133 err = sprintf(buf, "%d\n", chan->client_count); 134 else 135 err = -ENODEV; 136 mutex_unlock(&dma_list_mutex); 137 138 return err; 139 } 140 141 static struct device_attribute dma_attrs[] = { 142 __ATTR(memcpy_count, S_IRUGO, show_memcpy_count, NULL), 143 __ATTR(bytes_transferred, S_IRUGO, show_bytes_transferred, NULL), 144 __ATTR(in_use, S_IRUGO, show_in_use, NULL), 145 __ATTR_NULL 146 }; 147 148 static void chan_dev_release(struct device *dev) 149 { 150 struct dma_chan_dev *chan_dev; 151 152 chan_dev = container_of(dev, typeof(*chan_dev), device); 153 if (atomic_dec_and_test(chan_dev->idr_ref)) { 154 mutex_lock(&dma_list_mutex); 155 idr_remove(&dma_idr, chan_dev->dev_id); 156 mutex_unlock(&dma_list_mutex); 157 kfree(chan_dev->idr_ref); 158 } 159 kfree(chan_dev); 160 } 161 162 static struct class dma_devclass = { 163 .name = "dma", 164 .dev_attrs = dma_attrs, 165 .dev_release = chan_dev_release, 166 }; 167 168 /* --- client and device registration --- */ 169 170 #define dma_device_satisfies_mask(device, mask) \ 171 __dma_device_satisfies_mask((device), &(mask)) 172 static int 173 __dma_device_satisfies_mask(struct dma_device *device, dma_cap_mask_t *want) 174 { 175 dma_cap_mask_t has; 176 177 bitmap_and(has.bits, want->bits, device->cap_mask.bits, 178 DMA_TX_TYPE_END); 179 return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END); 180 } 181 182 static struct module *dma_chan_to_owner(struct dma_chan *chan) 183 { 184 return chan->device->dev->driver->owner; 185 } 186 187 /** 188 * balance_ref_count - catch up the channel reference count 189 * @chan - channel to balance ->client_count versus dmaengine_ref_count 190 * 191 * balance_ref_count must be called under dma_list_mutex 192 */ 193 static void balance_ref_count(struct dma_chan *chan) 194 { 195 struct module *owner = dma_chan_to_owner(chan); 196 197 while (chan->client_count < dmaengine_ref_count) { 198 __module_get(owner); 199 chan->client_count++; 200 } 201 } 202 203 /** 204 * dma_chan_get - try to grab a dma channel's parent driver module 205 * @chan - channel to grab 206 * 207 * Must be called under dma_list_mutex 208 */ 209 static int dma_chan_get(struct dma_chan *chan) 210 { 211 int err = -ENODEV; 212 struct module *owner = dma_chan_to_owner(chan); 213 214 if (chan->client_count) { 215 __module_get(owner); 216 err = 0; 217 } else if (try_module_get(owner)) 218 err = 0; 219 220 if (err == 0) 221 chan->client_count++; 222 223 /* allocate upon first client reference */ 224 if (chan->client_count == 1 && err == 0) { 225 int desc_cnt = chan->device->device_alloc_chan_resources(chan); 226 227 if (desc_cnt < 0) { 228 err = desc_cnt; 229 chan->client_count = 0; 230 module_put(owner); 231 } else if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask)) 232 balance_ref_count(chan); 233 } 234 235 return err; 236 } 237 238 /** 239 * dma_chan_put - drop a reference to a dma channel's parent driver module 240 * @chan - channel to release 241 * 242 * Must be called under dma_list_mutex 243 */ 244 static void dma_chan_put(struct dma_chan *chan) 245 { 246 if (!chan->client_count) 247 return; /* this channel failed alloc_chan_resources */ 248 chan->client_count--; 249 module_put(dma_chan_to_owner(chan)); 250 if (chan->client_count == 0) 251 chan->device->device_free_chan_resources(chan); 252 } 253 254 enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie) 255 { 256 enum dma_status status; 257 unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000); 258 259 dma_async_issue_pending(chan); 260 do { 261 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); 262 if (time_after_eq(jiffies, dma_sync_wait_timeout)) { 263 printk(KERN_ERR "dma_sync_wait_timeout!\n"); 264 return DMA_ERROR; 265 } 266 } while (status == DMA_IN_PROGRESS); 267 268 return status; 269 } 270 EXPORT_SYMBOL(dma_sync_wait); 271 272 /** 273 * dma_cap_mask_all - enable iteration over all operation types 274 */ 275 static dma_cap_mask_t dma_cap_mask_all; 276 277 /** 278 * dma_chan_tbl_ent - tracks channel allocations per core/operation 279 * @chan - associated channel for this entry 280 */ 281 struct dma_chan_tbl_ent { 282 struct dma_chan *chan; 283 }; 284 285 /** 286 * channel_table - percpu lookup table for memory-to-memory offload providers 287 */ 288 static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END]; 289 290 static int __init dma_channel_table_init(void) 291 { 292 enum dma_transaction_type cap; 293 int err = 0; 294 295 bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END); 296 297 /* 'interrupt', 'private', and 'slave' are channel capabilities, 298 * but are not associated with an operation so they do not need 299 * an entry in the channel_table 300 */ 301 clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits); 302 clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits); 303 clear_bit(DMA_SLAVE, dma_cap_mask_all.bits); 304 305 for_each_dma_cap_mask(cap, dma_cap_mask_all) { 306 channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent); 307 if (!channel_table[cap]) { 308 err = -ENOMEM; 309 break; 310 } 311 } 312 313 if (err) { 314 pr_err("dmaengine: initialization failure\n"); 315 for_each_dma_cap_mask(cap, dma_cap_mask_all) 316 if (channel_table[cap]) 317 free_percpu(channel_table[cap]); 318 } 319 320 return err; 321 } 322 arch_initcall(dma_channel_table_init); 323 324 /** 325 * dma_find_channel - find a channel to carry out the operation 326 * @tx_type: transaction type 327 */ 328 struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type) 329 { 330 return this_cpu_read(channel_table[tx_type]->chan); 331 } 332 EXPORT_SYMBOL(dma_find_channel); 333 334 /** 335 * dma_issue_pending_all - flush all pending operations across all channels 336 */ 337 void dma_issue_pending_all(void) 338 { 339 struct dma_device *device; 340 struct dma_chan *chan; 341 342 rcu_read_lock(); 343 list_for_each_entry_rcu(device, &dma_device_list, global_node) { 344 if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) 345 continue; 346 list_for_each_entry(chan, &device->channels, device_node) 347 if (chan->client_count) 348 device->device_issue_pending(chan); 349 } 350 rcu_read_unlock(); 351 } 352 EXPORT_SYMBOL(dma_issue_pending_all); 353 354 /** 355 * nth_chan - returns the nth channel of the given capability 356 * @cap: capability to match 357 * @n: nth channel desired 358 * 359 * Defaults to returning the channel with the desired capability and the 360 * lowest reference count when 'n' cannot be satisfied. Must be called 361 * under dma_list_mutex. 362 */ 363 static struct dma_chan *nth_chan(enum dma_transaction_type cap, int n) 364 { 365 struct dma_device *device; 366 struct dma_chan *chan; 367 struct dma_chan *ret = NULL; 368 struct dma_chan *min = NULL; 369 370 list_for_each_entry(device, &dma_device_list, global_node) { 371 if (!dma_has_cap(cap, device->cap_mask) || 372 dma_has_cap(DMA_PRIVATE, device->cap_mask)) 373 continue; 374 list_for_each_entry(chan, &device->channels, device_node) { 375 if (!chan->client_count) 376 continue; 377 if (!min) 378 min = chan; 379 else if (chan->table_count < min->table_count) 380 min = chan; 381 382 if (n-- == 0) { 383 ret = chan; 384 break; /* done */ 385 } 386 } 387 if (ret) 388 break; /* done */ 389 } 390 391 if (!ret) 392 ret = min; 393 394 if (ret) 395 ret->table_count++; 396 397 return ret; 398 } 399 400 /** 401 * dma_channel_rebalance - redistribute the available channels 402 * 403 * Optimize for cpu isolation (each cpu gets a dedicated channel for an 404 * operation type) in the SMP case, and operation isolation (avoid 405 * multi-tasking channels) in the non-SMP case. Must be called under 406 * dma_list_mutex. 407 */ 408 static void dma_channel_rebalance(void) 409 { 410 struct dma_chan *chan; 411 struct dma_device *device; 412 int cpu; 413 int cap; 414 int n; 415 416 /* undo the last distribution */ 417 for_each_dma_cap_mask(cap, dma_cap_mask_all) 418 for_each_possible_cpu(cpu) 419 per_cpu_ptr(channel_table[cap], cpu)->chan = NULL; 420 421 list_for_each_entry(device, &dma_device_list, global_node) { 422 if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) 423 continue; 424 list_for_each_entry(chan, &device->channels, device_node) 425 chan->table_count = 0; 426 } 427 428 /* don't populate the channel_table if no clients are available */ 429 if (!dmaengine_ref_count) 430 return; 431 432 /* redistribute available channels */ 433 n = 0; 434 for_each_dma_cap_mask(cap, dma_cap_mask_all) 435 for_each_online_cpu(cpu) { 436 if (num_possible_cpus() > 1) 437 chan = nth_chan(cap, n++); 438 else 439 chan = nth_chan(cap, -1); 440 441 per_cpu_ptr(channel_table[cap], cpu)->chan = chan; 442 } 443 } 444 445 static struct dma_chan *private_candidate(dma_cap_mask_t *mask, struct dma_device *dev, 446 dma_filter_fn fn, void *fn_param) 447 { 448 struct dma_chan *chan; 449 450 if (!__dma_device_satisfies_mask(dev, mask)) { 451 pr_debug("%s: wrong capabilities\n", __func__); 452 return NULL; 453 } 454 /* devices with multiple channels need special handling as we need to 455 * ensure that all channels are either private or public. 456 */ 457 if (dev->chancnt > 1 && !dma_has_cap(DMA_PRIVATE, dev->cap_mask)) 458 list_for_each_entry(chan, &dev->channels, device_node) { 459 /* some channels are already publicly allocated */ 460 if (chan->client_count) 461 return NULL; 462 } 463 464 list_for_each_entry(chan, &dev->channels, device_node) { 465 if (chan->client_count) { 466 pr_debug("%s: %s busy\n", 467 __func__, dma_chan_name(chan)); 468 continue; 469 } 470 if (fn && !fn(chan, fn_param)) { 471 pr_debug("%s: %s filter said false\n", 472 __func__, dma_chan_name(chan)); 473 continue; 474 } 475 return chan; 476 } 477 478 return NULL; 479 } 480 481 /** 482 * dma_request_channel - try to allocate an exclusive channel 483 * @mask: capabilities that the channel must satisfy 484 * @fn: optional callback to disposition available channels 485 * @fn_param: opaque parameter to pass to dma_filter_fn 486 */ 487 struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param) 488 { 489 struct dma_device *device, *_d; 490 struct dma_chan *chan = NULL; 491 int err; 492 493 /* Find a channel */ 494 mutex_lock(&dma_list_mutex); 495 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) { 496 chan = private_candidate(mask, device, fn, fn_param); 497 if (chan) { 498 /* Found a suitable channel, try to grab, prep, and 499 * return it. We first set DMA_PRIVATE to disable 500 * balance_ref_count as this channel will not be 501 * published in the general-purpose allocator 502 */ 503 dma_cap_set(DMA_PRIVATE, device->cap_mask); 504 device->privatecnt++; 505 err = dma_chan_get(chan); 506 507 if (err == -ENODEV) { 508 pr_debug("%s: %s module removed\n", __func__, 509 dma_chan_name(chan)); 510 list_del_rcu(&device->global_node); 511 } else if (err) 512 pr_err("dmaengine: failed to get %s: (%d)\n", 513 dma_chan_name(chan), err); 514 else 515 break; 516 if (--device->privatecnt == 0) 517 dma_cap_clear(DMA_PRIVATE, device->cap_mask); 518 chan->private = NULL; 519 chan = NULL; 520 } 521 } 522 mutex_unlock(&dma_list_mutex); 523 524 pr_debug("%s: %s (%s)\n", __func__, chan ? "success" : "fail", 525 chan ? dma_chan_name(chan) : NULL); 526 527 return chan; 528 } 529 EXPORT_SYMBOL_GPL(__dma_request_channel); 530 531 void dma_release_channel(struct dma_chan *chan) 532 { 533 mutex_lock(&dma_list_mutex); 534 WARN_ONCE(chan->client_count != 1, 535 "chan reference count %d != 1\n", chan->client_count); 536 dma_chan_put(chan); 537 /* drop PRIVATE cap enabled by __dma_request_channel() */ 538 if (--chan->device->privatecnt == 0) 539 dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask); 540 chan->private = NULL; 541 mutex_unlock(&dma_list_mutex); 542 } 543 EXPORT_SYMBOL_GPL(dma_release_channel); 544 545 /** 546 * dmaengine_get - register interest in dma_channels 547 */ 548 void dmaengine_get(void) 549 { 550 struct dma_device *device, *_d; 551 struct dma_chan *chan; 552 int err; 553 554 mutex_lock(&dma_list_mutex); 555 dmaengine_ref_count++; 556 557 /* try to grab channels */ 558 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) { 559 if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) 560 continue; 561 list_for_each_entry(chan, &device->channels, device_node) { 562 err = dma_chan_get(chan); 563 if (err == -ENODEV) { 564 /* module removed before we could use it */ 565 list_del_rcu(&device->global_node); 566 break; 567 } else if (err) 568 pr_err("dmaengine: failed to get %s: (%d)\n", 569 dma_chan_name(chan), err); 570 } 571 } 572 573 /* if this is the first reference and there were channels 574 * waiting we need to rebalance to get those channels 575 * incorporated into the channel table 576 */ 577 if (dmaengine_ref_count == 1) 578 dma_channel_rebalance(); 579 mutex_unlock(&dma_list_mutex); 580 } 581 EXPORT_SYMBOL(dmaengine_get); 582 583 /** 584 * dmaengine_put - let dma drivers be removed when ref_count == 0 585 */ 586 void dmaengine_put(void) 587 { 588 struct dma_device *device; 589 struct dma_chan *chan; 590 591 mutex_lock(&dma_list_mutex); 592 dmaengine_ref_count--; 593 BUG_ON(dmaengine_ref_count < 0); 594 /* drop channel references */ 595 list_for_each_entry(device, &dma_device_list, global_node) { 596 if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) 597 continue; 598 list_for_each_entry(chan, &device->channels, device_node) 599 dma_chan_put(chan); 600 } 601 mutex_unlock(&dma_list_mutex); 602 } 603 EXPORT_SYMBOL(dmaengine_put); 604 605 static bool device_has_all_tx_types(struct dma_device *device) 606 { 607 /* A device that satisfies this test has channels that will never cause 608 * an async_tx channel switch event as all possible operation types can 609 * be handled. 610 */ 611 #ifdef CONFIG_ASYNC_TX_DMA 612 if (!dma_has_cap(DMA_INTERRUPT, device->cap_mask)) 613 return false; 614 #endif 615 616 #if defined(CONFIG_ASYNC_MEMCPY) || defined(CONFIG_ASYNC_MEMCPY_MODULE) 617 if (!dma_has_cap(DMA_MEMCPY, device->cap_mask)) 618 return false; 619 #endif 620 621 #if defined(CONFIG_ASYNC_MEMSET) || defined(CONFIG_ASYNC_MEMSET_MODULE) 622 if (!dma_has_cap(DMA_MEMSET, device->cap_mask)) 623 return false; 624 #endif 625 626 #if defined(CONFIG_ASYNC_XOR) || defined(CONFIG_ASYNC_XOR_MODULE) 627 if (!dma_has_cap(DMA_XOR, device->cap_mask)) 628 return false; 629 630 #ifndef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA 631 if (!dma_has_cap(DMA_XOR_VAL, device->cap_mask)) 632 return false; 633 #endif 634 #endif 635 636 #if defined(CONFIG_ASYNC_PQ) || defined(CONFIG_ASYNC_PQ_MODULE) 637 if (!dma_has_cap(DMA_PQ, device->cap_mask)) 638 return false; 639 640 #ifndef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA 641 if (!dma_has_cap(DMA_PQ_VAL, device->cap_mask)) 642 return false; 643 #endif 644 #endif 645 646 return true; 647 } 648 649 static int get_dma_id(struct dma_device *device) 650 { 651 int rc; 652 653 idr_retry: 654 if (!idr_pre_get(&dma_idr, GFP_KERNEL)) 655 return -ENOMEM; 656 mutex_lock(&dma_list_mutex); 657 rc = idr_get_new(&dma_idr, NULL, &device->dev_id); 658 mutex_unlock(&dma_list_mutex); 659 if (rc == -EAGAIN) 660 goto idr_retry; 661 else if (rc != 0) 662 return rc; 663 664 return 0; 665 } 666 667 /** 668 * dma_async_device_register - registers DMA devices found 669 * @device: &dma_device 670 */ 671 int dma_async_device_register(struct dma_device *device) 672 { 673 int chancnt = 0, rc; 674 struct dma_chan* chan; 675 atomic_t *idr_ref; 676 677 if (!device) 678 return -ENODEV; 679 680 /* validate device routines */ 681 BUG_ON(dma_has_cap(DMA_MEMCPY, device->cap_mask) && 682 !device->device_prep_dma_memcpy); 683 BUG_ON(dma_has_cap(DMA_XOR, device->cap_mask) && 684 !device->device_prep_dma_xor); 685 BUG_ON(dma_has_cap(DMA_XOR_VAL, device->cap_mask) && 686 !device->device_prep_dma_xor_val); 687 BUG_ON(dma_has_cap(DMA_PQ, device->cap_mask) && 688 !device->device_prep_dma_pq); 689 BUG_ON(dma_has_cap(DMA_PQ_VAL, device->cap_mask) && 690 !device->device_prep_dma_pq_val); 691 BUG_ON(dma_has_cap(DMA_MEMSET, device->cap_mask) && 692 !device->device_prep_dma_memset); 693 BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) && 694 !device->device_prep_dma_interrupt); 695 BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) && 696 !device->device_prep_slave_sg); 697 BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) && 698 !device->device_terminate_all); 699 700 BUG_ON(!device->device_alloc_chan_resources); 701 BUG_ON(!device->device_free_chan_resources); 702 BUG_ON(!device->device_is_tx_complete); 703 BUG_ON(!device->device_issue_pending); 704 BUG_ON(!device->dev); 705 706 /* note: this only matters in the 707 * CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH=y case 708 */ 709 if (device_has_all_tx_types(device)) 710 dma_cap_set(DMA_ASYNC_TX, device->cap_mask); 711 712 idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL); 713 if (!idr_ref) 714 return -ENOMEM; 715 rc = get_dma_id(device); 716 if (rc != 0) { 717 kfree(idr_ref); 718 return rc; 719 } 720 721 atomic_set(idr_ref, 0); 722 723 /* represent channels in sysfs. Probably want devs too */ 724 list_for_each_entry(chan, &device->channels, device_node) { 725 rc = -ENOMEM; 726 chan->local = alloc_percpu(typeof(*chan->local)); 727 if (chan->local == NULL) 728 goto err_out; 729 chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL); 730 if (chan->dev == NULL) { 731 free_percpu(chan->local); 732 chan->local = NULL; 733 goto err_out; 734 } 735 736 chan->chan_id = chancnt++; 737 chan->dev->device.class = &dma_devclass; 738 chan->dev->device.parent = device->dev; 739 chan->dev->chan = chan; 740 chan->dev->idr_ref = idr_ref; 741 chan->dev->dev_id = device->dev_id; 742 atomic_inc(idr_ref); 743 dev_set_name(&chan->dev->device, "dma%dchan%d", 744 device->dev_id, chan->chan_id); 745 746 rc = device_register(&chan->dev->device); 747 if (rc) { 748 free_percpu(chan->local); 749 chan->local = NULL; 750 kfree(chan->dev); 751 atomic_dec(idr_ref); 752 goto err_out; 753 } 754 chan->client_count = 0; 755 } 756 device->chancnt = chancnt; 757 758 mutex_lock(&dma_list_mutex); 759 /* take references on public channels */ 760 if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask)) 761 list_for_each_entry(chan, &device->channels, device_node) { 762 /* if clients are already waiting for channels we need 763 * to take references on their behalf 764 */ 765 if (dma_chan_get(chan) == -ENODEV) { 766 /* note we can only get here for the first 767 * channel as the remaining channels are 768 * guaranteed to get a reference 769 */ 770 rc = -ENODEV; 771 mutex_unlock(&dma_list_mutex); 772 goto err_out; 773 } 774 } 775 list_add_tail_rcu(&device->global_node, &dma_device_list); 776 if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) 777 device->privatecnt++; /* Always private */ 778 dma_channel_rebalance(); 779 mutex_unlock(&dma_list_mutex); 780 781 return 0; 782 783 err_out: 784 /* if we never registered a channel just release the idr */ 785 if (atomic_read(idr_ref) == 0) { 786 mutex_lock(&dma_list_mutex); 787 idr_remove(&dma_idr, device->dev_id); 788 mutex_unlock(&dma_list_mutex); 789 kfree(idr_ref); 790 return rc; 791 } 792 793 list_for_each_entry(chan, &device->channels, device_node) { 794 if (chan->local == NULL) 795 continue; 796 mutex_lock(&dma_list_mutex); 797 chan->dev->chan = NULL; 798 mutex_unlock(&dma_list_mutex); 799 device_unregister(&chan->dev->device); 800 free_percpu(chan->local); 801 } 802 return rc; 803 } 804 EXPORT_SYMBOL(dma_async_device_register); 805 806 /** 807 * dma_async_device_unregister - unregister a DMA device 808 * @device: &dma_device 809 * 810 * This routine is called by dma driver exit routines, dmaengine holds module 811 * references to prevent it being called while channels are in use. 812 */ 813 void dma_async_device_unregister(struct dma_device *device) 814 { 815 struct dma_chan *chan; 816 817 mutex_lock(&dma_list_mutex); 818 list_del_rcu(&device->global_node); 819 dma_channel_rebalance(); 820 mutex_unlock(&dma_list_mutex); 821 822 list_for_each_entry(chan, &device->channels, device_node) { 823 WARN_ONCE(chan->client_count, 824 "%s called while %d clients hold a reference\n", 825 __func__, chan->client_count); 826 mutex_lock(&dma_list_mutex); 827 chan->dev->chan = NULL; 828 mutex_unlock(&dma_list_mutex); 829 device_unregister(&chan->dev->device); 830 free_percpu(chan->local); 831 } 832 } 833 EXPORT_SYMBOL(dma_async_device_unregister); 834 835 /** 836 * dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses 837 * @chan: DMA channel to offload copy to 838 * @dest: destination address (virtual) 839 * @src: source address (virtual) 840 * @len: length 841 * 842 * Both @dest and @src must be mappable to a bus address according to the 843 * DMA mapping API rules for streaming mappings. 844 * Both @dest and @src must stay memory resident (kernel memory or locked 845 * user space pages). 846 */ 847 dma_cookie_t 848 dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest, 849 void *src, size_t len) 850 { 851 struct dma_device *dev = chan->device; 852 struct dma_async_tx_descriptor *tx; 853 dma_addr_t dma_dest, dma_src; 854 dma_cookie_t cookie; 855 unsigned long flags; 856 857 dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE); 858 dma_dest = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE); 859 flags = DMA_CTRL_ACK | 860 DMA_COMPL_SRC_UNMAP_SINGLE | 861 DMA_COMPL_DEST_UNMAP_SINGLE; 862 tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags); 863 864 if (!tx) { 865 dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE); 866 dma_unmap_single(dev->dev, dma_dest, len, DMA_FROM_DEVICE); 867 return -ENOMEM; 868 } 869 870 tx->callback = NULL; 871 cookie = tx->tx_submit(tx); 872 873 preempt_disable(); 874 __this_cpu_add(chan->local->bytes_transferred, len); 875 __this_cpu_inc(chan->local->memcpy_count); 876 preempt_enable(); 877 878 return cookie; 879 } 880 EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf); 881 882 /** 883 * dma_async_memcpy_buf_to_pg - offloaded copy from address to page 884 * @chan: DMA channel to offload copy to 885 * @page: destination page 886 * @offset: offset in page to copy to 887 * @kdata: source address (virtual) 888 * @len: length 889 * 890 * Both @page/@offset and @kdata must be mappable to a bus address according 891 * to the DMA mapping API rules for streaming mappings. 892 * Both @page/@offset and @kdata must stay memory resident (kernel memory or 893 * locked user space pages) 894 */ 895 dma_cookie_t 896 dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page, 897 unsigned int offset, void *kdata, size_t len) 898 { 899 struct dma_device *dev = chan->device; 900 struct dma_async_tx_descriptor *tx; 901 dma_addr_t dma_dest, dma_src; 902 dma_cookie_t cookie; 903 unsigned long flags; 904 905 dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE); 906 dma_dest = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE); 907 flags = DMA_CTRL_ACK | DMA_COMPL_SRC_UNMAP_SINGLE; 908 tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags); 909 910 if (!tx) { 911 dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE); 912 dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE); 913 return -ENOMEM; 914 } 915 916 tx->callback = NULL; 917 cookie = tx->tx_submit(tx); 918 919 preempt_disable(); 920 __this_cpu_add(chan->local->bytes_transferred, len); 921 __this_cpu_inc(chan->local->memcpy_count); 922 preempt_enable(); 923 924 return cookie; 925 } 926 EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg); 927 928 /** 929 * dma_async_memcpy_pg_to_pg - offloaded copy from page to page 930 * @chan: DMA channel to offload copy to 931 * @dest_pg: destination page 932 * @dest_off: offset in page to copy to 933 * @src_pg: source page 934 * @src_off: offset in page to copy from 935 * @len: length 936 * 937 * Both @dest_page/@dest_off and @src_page/@src_off must be mappable to a bus 938 * address according to the DMA mapping API rules for streaming mappings. 939 * Both @dest_page/@dest_off and @src_page/@src_off must stay memory resident 940 * (kernel memory or locked user space pages). 941 */ 942 dma_cookie_t 943 dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg, 944 unsigned int dest_off, struct page *src_pg, unsigned int src_off, 945 size_t len) 946 { 947 struct dma_device *dev = chan->device; 948 struct dma_async_tx_descriptor *tx; 949 dma_addr_t dma_dest, dma_src; 950 dma_cookie_t cookie; 951 unsigned long flags; 952 953 dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE); 954 dma_dest = dma_map_page(dev->dev, dest_pg, dest_off, len, 955 DMA_FROM_DEVICE); 956 flags = DMA_CTRL_ACK; 957 tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags); 958 959 if (!tx) { 960 dma_unmap_page(dev->dev, dma_src, len, DMA_TO_DEVICE); 961 dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE); 962 return -ENOMEM; 963 } 964 965 tx->callback = NULL; 966 cookie = tx->tx_submit(tx); 967 968 preempt_disable(); 969 __this_cpu_add(chan->local->bytes_transferred, len); 970 __this_cpu_inc(chan->local->memcpy_count); 971 preempt_enable(); 972 973 return cookie; 974 } 975 EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg); 976 977 void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, 978 struct dma_chan *chan) 979 { 980 tx->chan = chan; 981 spin_lock_init(&tx->lock); 982 } 983 EXPORT_SYMBOL(dma_async_tx_descriptor_init); 984 985 /* dma_wait_for_async_tx - spin wait for a transaction to complete 986 * @tx: in-flight transaction to wait on 987 */ 988 enum dma_status 989 dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) 990 { 991 unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000); 992 993 if (!tx) 994 return DMA_SUCCESS; 995 996 while (tx->cookie == -EBUSY) { 997 if (time_after_eq(jiffies, dma_sync_wait_timeout)) { 998 pr_err("%s timeout waiting for descriptor submission\n", 999 __func__); 1000 return DMA_ERROR; 1001 } 1002 cpu_relax(); 1003 } 1004 return dma_sync_wait(tx->chan, tx->cookie); 1005 } 1006 EXPORT_SYMBOL_GPL(dma_wait_for_async_tx); 1007 1008 /* dma_run_dependencies - helper routine for dma drivers to process 1009 * (start) dependent operations on their target channel 1010 * @tx: transaction with dependencies 1011 */ 1012 void dma_run_dependencies(struct dma_async_tx_descriptor *tx) 1013 { 1014 struct dma_async_tx_descriptor *dep = tx->next; 1015 struct dma_async_tx_descriptor *dep_next; 1016 struct dma_chan *chan; 1017 1018 if (!dep) 1019 return; 1020 1021 /* we'll submit tx->next now, so clear the link */ 1022 tx->next = NULL; 1023 chan = dep->chan; 1024 1025 /* keep submitting up until a channel switch is detected 1026 * in that case we will be called again as a result of 1027 * processing the interrupt from async_tx_channel_switch 1028 */ 1029 for (; dep; dep = dep_next) { 1030 spin_lock_bh(&dep->lock); 1031 dep->parent = NULL; 1032 dep_next = dep->next; 1033 if (dep_next && dep_next->chan == chan) 1034 dep->next = NULL; /* ->next will be submitted */ 1035 else 1036 dep_next = NULL; /* submit current dep and terminate */ 1037 spin_unlock_bh(&dep->lock); 1038 1039 dep->tx_submit(dep); 1040 } 1041 1042 chan->device->device_issue_pending(chan); 1043 } 1044 EXPORT_SYMBOL_GPL(dma_run_dependencies); 1045 1046 static int __init dma_bus_init(void) 1047 { 1048 idr_init(&dma_idr); 1049 mutex_init(&dma_list_mutex); 1050 return class_register(&dma_devclass); 1051 } 1052 arch_initcall(dma_bus_init); 1053 1054 1055