1 /* 2 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License as published by the Free 6 * Software Foundation; either version 2 of the License, or (at your option) 7 * any later version. 8 * 9 * This program is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 * 14 * You should have received a copy of the GNU General Public License along with 15 * this program; if not, write to the Free Software Foundation, Inc., 59 16 * Temple Place - Suite 330, Boston, MA 02111-1307, USA. 17 * 18 * The full GNU General Public License is included in this distribution in the 19 * file called COPYING. 20 */ 21 22 /* 23 * This code implements the DMA subsystem. It provides a HW-neutral interface 24 * for other kernel code to use asynchronous memory copy capabilities, 25 * if present, and allows different HW DMA drivers to register as providing 26 * this capability. 27 * 28 * Due to the fact we are accelerating what is already a relatively fast 29 * operation, the code goes to great lengths to avoid additional overhead, 30 * such as locking. 31 * 32 * LOCKING: 33 * 34 * The subsystem keeps a global list of dma_device structs it is protected by a 35 * mutex, dma_list_mutex. 36 * 37 * A subsystem can get access to a channel by calling dmaengine_get() followed 38 * by dma_find_channel(), or if it has need for an exclusive channel it can call 39 * dma_request_channel(). Once a channel is allocated a reference is taken 40 * against its corresponding driver to disable removal. 41 * 42 * Each device has a channels list, which runs unlocked but is never modified 43 * once the device is registered, it's just setup by the driver. 44 * 45 * See Documentation/dmaengine.txt for more details 46 */ 47 48 #include <linux/dma-mapping.h> 49 #include <linux/init.h> 50 #include <linux/module.h> 51 #include <linux/mm.h> 52 #include <linux/device.h> 53 #include <linux/dmaengine.h> 54 #include <linux/hardirq.h> 55 #include <linux/spinlock.h> 56 #include <linux/percpu.h> 57 #include <linux/rcupdate.h> 58 #include <linux/mutex.h> 59 #include <linux/jiffies.h> 60 #include <linux/rculist.h> 61 #include <linux/idr.h> 62 #include <linux/slab.h> 63 64 static DEFINE_MUTEX(dma_list_mutex); 65 static DEFINE_IDR(dma_idr); 66 static LIST_HEAD(dma_device_list); 67 static long dmaengine_ref_count; 68 69 /* --- sysfs implementation --- */ 70 71 /** 72 * dev_to_dma_chan - convert a device pointer to the its sysfs container object 73 * @dev - device node 74 * 75 * Must be called under dma_list_mutex 76 */ 77 static struct dma_chan *dev_to_dma_chan(struct device *dev) 78 { 79 struct dma_chan_dev *chan_dev; 80 81 chan_dev = container_of(dev, typeof(*chan_dev), device); 82 return chan_dev->chan; 83 } 84 85 static ssize_t show_memcpy_count(struct device *dev, struct device_attribute *attr, char *buf) 86 { 87 struct dma_chan *chan; 88 unsigned long count = 0; 89 int i; 90 int err; 91 92 mutex_lock(&dma_list_mutex); 93 chan = dev_to_dma_chan(dev); 94 if (chan) { 95 for_each_possible_cpu(i) 96 count += per_cpu_ptr(chan->local, i)->memcpy_count; 97 err = sprintf(buf, "%lu\n", count); 98 } else 99 err = -ENODEV; 100 mutex_unlock(&dma_list_mutex); 101 102 return err; 103 } 104 105 static ssize_t show_bytes_transferred(struct device *dev, struct device_attribute *attr, 106 char *buf) 107 { 108 struct dma_chan *chan; 109 unsigned long count = 0; 110 int i; 111 int err; 112 113 mutex_lock(&dma_list_mutex); 114 chan = dev_to_dma_chan(dev); 115 if (chan) { 116 for_each_possible_cpu(i) 117 count += per_cpu_ptr(chan->local, i)->bytes_transferred; 118 err = sprintf(buf, "%lu\n", count); 119 } else 120 err = -ENODEV; 121 mutex_unlock(&dma_list_mutex); 122 123 return err; 124 } 125 126 static ssize_t show_in_use(struct device *dev, struct device_attribute *attr, char *buf) 127 { 128 struct dma_chan *chan; 129 int err; 130 131 mutex_lock(&dma_list_mutex); 132 chan = dev_to_dma_chan(dev); 133 if (chan) 134 err = sprintf(buf, "%d\n", chan->client_count); 135 else 136 err = -ENODEV; 137 mutex_unlock(&dma_list_mutex); 138 139 return err; 140 } 141 142 static struct device_attribute dma_attrs[] = { 143 __ATTR(memcpy_count, S_IRUGO, show_memcpy_count, NULL), 144 __ATTR(bytes_transferred, S_IRUGO, show_bytes_transferred, NULL), 145 __ATTR(in_use, S_IRUGO, show_in_use, NULL), 146 __ATTR_NULL 147 }; 148 149 static void chan_dev_release(struct device *dev) 150 { 151 struct dma_chan_dev *chan_dev; 152 153 chan_dev = container_of(dev, typeof(*chan_dev), device); 154 if (atomic_dec_and_test(chan_dev->idr_ref)) { 155 mutex_lock(&dma_list_mutex); 156 idr_remove(&dma_idr, chan_dev->dev_id); 157 mutex_unlock(&dma_list_mutex); 158 kfree(chan_dev->idr_ref); 159 } 160 kfree(chan_dev); 161 } 162 163 static struct class dma_devclass = { 164 .name = "dma", 165 .dev_attrs = dma_attrs, 166 .dev_release = chan_dev_release, 167 }; 168 169 /* --- client and device registration --- */ 170 171 #define dma_device_satisfies_mask(device, mask) \ 172 __dma_device_satisfies_mask((device), &(mask)) 173 static int 174 __dma_device_satisfies_mask(struct dma_device *device, dma_cap_mask_t *want) 175 { 176 dma_cap_mask_t has; 177 178 bitmap_and(has.bits, want->bits, device->cap_mask.bits, 179 DMA_TX_TYPE_END); 180 return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END); 181 } 182 183 static struct module *dma_chan_to_owner(struct dma_chan *chan) 184 { 185 return chan->device->dev->driver->owner; 186 } 187 188 /** 189 * balance_ref_count - catch up the channel reference count 190 * @chan - channel to balance ->client_count versus dmaengine_ref_count 191 * 192 * balance_ref_count must be called under dma_list_mutex 193 */ 194 static void balance_ref_count(struct dma_chan *chan) 195 { 196 struct module *owner = dma_chan_to_owner(chan); 197 198 while (chan->client_count < dmaengine_ref_count) { 199 __module_get(owner); 200 chan->client_count++; 201 } 202 } 203 204 /** 205 * dma_chan_get - try to grab a dma channel's parent driver module 206 * @chan - channel to grab 207 * 208 * Must be called under dma_list_mutex 209 */ 210 static int dma_chan_get(struct dma_chan *chan) 211 { 212 int err = -ENODEV; 213 struct module *owner = dma_chan_to_owner(chan); 214 215 if (chan->client_count) { 216 __module_get(owner); 217 err = 0; 218 } else if (try_module_get(owner)) 219 err = 0; 220 221 if (err == 0) 222 chan->client_count++; 223 224 /* allocate upon first client reference */ 225 if (chan->client_count == 1 && err == 0) { 226 int desc_cnt = chan->device->device_alloc_chan_resources(chan); 227 228 if (desc_cnt < 0) { 229 err = desc_cnt; 230 chan->client_count = 0; 231 module_put(owner); 232 } else if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask)) 233 balance_ref_count(chan); 234 } 235 236 return err; 237 } 238 239 /** 240 * dma_chan_put - drop a reference to a dma channel's parent driver module 241 * @chan - channel to release 242 * 243 * Must be called under dma_list_mutex 244 */ 245 static void dma_chan_put(struct dma_chan *chan) 246 { 247 if (!chan->client_count) 248 return; /* this channel failed alloc_chan_resources */ 249 chan->client_count--; 250 module_put(dma_chan_to_owner(chan)); 251 if (chan->client_count == 0) 252 chan->device->device_free_chan_resources(chan); 253 } 254 255 enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie) 256 { 257 enum dma_status status; 258 unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000); 259 260 dma_async_issue_pending(chan); 261 do { 262 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); 263 if (time_after_eq(jiffies, dma_sync_wait_timeout)) { 264 printk(KERN_ERR "dma_sync_wait_timeout!\n"); 265 return DMA_ERROR; 266 } 267 } while (status == DMA_IN_PROGRESS); 268 269 return status; 270 } 271 EXPORT_SYMBOL(dma_sync_wait); 272 273 /** 274 * dma_cap_mask_all - enable iteration over all operation types 275 */ 276 static dma_cap_mask_t dma_cap_mask_all; 277 278 /** 279 * dma_chan_tbl_ent - tracks channel allocations per core/operation 280 * @chan - associated channel for this entry 281 */ 282 struct dma_chan_tbl_ent { 283 struct dma_chan *chan; 284 }; 285 286 /** 287 * channel_table - percpu lookup table for memory-to-memory offload providers 288 */ 289 static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END]; 290 291 static int __init dma_channel_table_init(void) 292 { 293 enum dma_transaction_type cap; 294 int err = 0; 295 296 bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END); 297 298 /* 'interrupt', 'private', and 'slave' are channel capabilities, 299 * but are not associated with an operation so they do not need 300 * an entry in the channel_table 301 */ 302 clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits); 303 clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits); 304 clear_bit(DMA_SLAVE, dma_cap_mask_all.bits); 305 306 for_each_dma_cap_mask(cap, dma_cap_mask_all) { 307 channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent); 308 if (!channel_table[cap]) { 309 err = -ENOMEM; 310 break; 311 } 312 } 313 314 if (err) { 315 pr_err("dmaengine: initialization failure\n"); 316 for_each_dma_cap_mask(cap, dma_cap_mask_all) 317 if (channel_table[cap]) 318 free_percpu(channel_table[cap]); 319 } 320 321 return err; 322 } 323 arch_initcall(dma_channel_table_init); 324 325 /** 326 * dma_find_channel - find a channel to carry out the operation 327 * @tx_type: transaction type 328 */ 329 struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type) 330 { 331 return this_cpu_read(channel_table[tx_type]->chan); 332 } 333 EXPORT_SYMBOL(dma_find_channel); 334 335 /** 336 * dma_issue_pending_all - flush all pending operations across all channels 337 */ 338 void dma_issue_pending_all(void) 339 { 340 struct dma_device *device; 341 struct dma_chan *chan; 342 343 rcu_read_lock(); 344 list_for_each_entry_rcu(device, &dma_device_list, global_node) { 345 if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) 346 continue; 347 list_for_each_entry(chan, &device->channels, device_node) 348 if (chan->client_count) 349 device->device_issue_pending(chan); 350 } 351 rcu_read_unlock(); 352 } 353 EXPORT_SYMBOL(dma_issue_pending_all); 354 355 /** 356 * nth_chan - returns the nth channel of the given capability 357 * @cap: capability to match 358 * @n: nth channel desired 359 * 360 * Defaults to returning the channel with the desired capability and the 361 * lowest reference count when 'n' cannot be satisfied. Must be called 362 * under dma_list_mutex. 363 */ 364 static struct dma_chan *nth_chan(enum dma_transaction_type cap, int n) 365 { 366 struct dma_device *device; 367 struct dma_chan *chan; 368 struct dma_chan *ret = NULL; 369 struct dma_chan *min = NULL; 370 371 list_for_each_entry(device, &dma_device_list, global_node) { 372 if (!dma_has_cap(cap, device->cap_mask) || 373 dma_has_cap(DMA_PRIVATE, device->cap_mask)) 374 continue; 375 list_for_each_entry(chan, &device->channels, device_node) { 376 if (!chan->client_count) 377 continue; 378 if (!min) 379 min = chan; 380 else if (chan->table_count < min->table_count) 381 min = chan; 382 383 if (n-- == 0) { 384 ret = chan; 385 break; /* done */ 386 } 387 } 388 if (ret) 389 break; /* done */ 390 } 391 392 if (!ret) 393 ret = min; 394 395 if (ret) 396 ret->table_count++; 397 398 return ret; 399 } 400 401 /** 402 * dma_channel_rebalance - redistribute the available channels 403 * 404 * Optimize for cpu isolation (each cpu gets a dedicated channel for an 405 * operation type) in the SMP case, and operation isolation (avoid 406 * multi-tasking channels) in the non-SMP case. Must be called under 407 * dma_list_mutex. 408 */ 409 static void dma_channel_rebalance(void) 410 { 411 struct dma_chan *chan; 412 struct dma_device *device; 413 int cpu; 414 int cap; 415 int n; 416 417 /* undo the last distribution */ 418 for_each_dma_cap_mask(cap, dma_cap_mask_all) 419 for_each_possible_cpu(cpu) 420 per_cpu_ptr(channel_table[cap], cpu)->chan = NULL; 421 422 list_for_each_entry(device, &dma_device_list, global_node) { 423 if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) 424 continue; 425 list_for_each_entry(chan, &device->channels, device_node) 426 chan->table_count = 0; 427 } 428 429 /* don't populate the channel_table if no clients are available */ 430 if (!dmaengine_ref_count) 431 return; 432 433 /* redistribute available channels */ 434 n = 0; 435 for_each_dma_cap_mask(cap, dma_cap_mask_all) 436 for_each_online_cpu(cpu) { 437 if (num_possible_cpus() > 1) 438 chan = nth_chan(cap, n++); 439 else 440 chan = nth_chan(cap, -1); 441 442 per_cpu_ptr(channel_table[cap], cpu)->chan = chan; 443 } 444 } 445 446 static struct dma_chan *private_candidate(dma_cap_mask_t *mask, struct dma_device *dev, 447 dma_filter_fn fn, void *fn_param) 448 { 449 struct dma_chan *chan; 450 451 if (!__dma_device_satisfies_mask(dev, mask)) { 452 pr_debug("%s: wrong capabilities\n", __func__); 453 return NULL; 454 } 455 /* devices with multiple channels need special handling as we need to 456 * ensure that all channels are either private or public. 457 */ 458 if (dev->chancnt > 1 && !dma_has_cap(DMA_PRIVATE, dev->cap_mask)) 459 list_for_each_entry(chan, &dev->channels, device_node) { 460 /* some channels are already publicly allocated */ 461 if (chan->client_count) 462 return NULL; 463 } 464 465 list_for_each_entry(chan, &dev->channels, device_node) { 466 if (chan->client_count) { 467 pr_debug("%s: %s busy\n", 468 __func__, dma_chan_name(chan)); 469 continue; 470 } 471 if (fn && !fn(chan, fn_param)) { 472 pr_debug("%s: %s filter said false\n", 473 __func__, dma_chan_name(chan)); 474 continue; 475 } 476 return chan; 477 } 478 479 return NULL; 480 } 481 482 /** 483 * dma_request_channel - try to allocate an exclusive channel 484 * @mask: capabilities that the channel must satisfy 485 * @fn: optional callback to disposition available channels 486 * @fn_param: opaque parameter to pass to dma_filter_fn 487 */ 488 struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param) 489 { 490 struct dma_device *device, *_d; 491 struct dma_chan *chan = NULL; 492 int err; 493 494 /* Find a channel */ 495 mutex_lock(&dma_list_mutex); 496 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) { 497 chan = private_candidate(mask, device, fn, fn_param); 498 if (chan) { 499 /* Found a suitable channel, try to grab, prep, and 500 * return it. We first set DMA_PRIVATE to disable 501 * balance_ref_count as this channel will not be 502 * published in the general-purpose allocator 503 */ 504 dma_cap_set(DMA_PRIVATE, device->cap_mask); 505 device->privatecnt++; 506 err = dma_chan_get(chan); 507 508 if (err == -ENODEV) { 509 pr_debug("%s: %s module removed\n", __func__, 510 dma_chan_name(chan)); 511 list_del_rcu(&device->global_node); 512 } else if (err) 513 pr_debug("dmaengine: failed to get %s: (%d)\n", 514 dma_chan_name(chan), err); 515 else 516 break; 517 if (--device->privatecnt == 0) 518 dma_cap_clear(DMA_PRIVATE, device->cap_mask); 519 chan = NULL; 520 } 521 } 522 mutex_unlock(&dma_list_mutex); 523 524 pr_debug("%s: %s (%s)\n", __func__, chan ? "success" : "fail", 525 chan ? dma_chan_name(chan) : NULL); 526 527 return chan; 528 } 529 EXPORT_SYMBOL_GPL(__dma_request_channel); 530 531 void dma_release_channel(struct dma_chan *chan) 532 { 533 mutex_lock(&dma_list_mutex); 534 WARN_ONCE(chan->client_count != 1, 535 "chan reference count %d != 1\n", chan->client_count); 536 dma_chan_put(chan); 537 /* drop PRIVATE cap enabled by __dma_request_channel() */ 538 if (--chan->device->privatecnt == 0) 539 dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask); 540 mutex_unlock(&dma_list_mutex); 541 } 542 EXPORT_SYMBOL_GPL(dma_release_channel); 543 544 /** 545 * dmaengine_get - register interest in dma_channels 546 */ 547 void dmaengine_get(void) 548 { 549 struct dma_device *device, *_d; 550 struct dma_chan *chan; 551 int err; 552 553 mutex_lock(&dma_list_mutex); 554 dmaengine_ref_count++; 555 556 /* try to grab channels */ 557 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) { 558 if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) 559 continue; 560 list_for_each_entry(chan, &device->channels, device_node) { 561 err = dma_chan_get(chan); 562 if (err == -ENODEV) { 563 /* module removed before we could use it */ 564 list_del_rcu(&device->global_node); 565 break; 566 } else if (err) 567 pr_err("dmaengine: failed to get %s: (%d)\n", 568 dma_chan_name(chan), err); 569 } 570 } 571 572 /* if this is the first reference and there were channels 573 * waiting we need to rebalance to get those channels 574 * incorporated into the channel table 575 */ 576 if (dmaengine_ref_count == 1) 577 dma_channel_rebalance(); 578 mutex_unlock(&dma_list_mutex); 579 } 580 EXPORT_SYMBOL(dmaengine_get); 581 582 /** 583 * dmaengine_put - let dma drivers be removed when ref_count == 0 584 */ 585 void dmaengine_put(void) 586 { 587 struct dma_device *device; 588 struct dma_chan *chan; 589 590 mutex_lock(&dma_list_mutex); 591 dmaengine_ref_count--; 592 BUG_ON(dmaengine_ref_count < 0); 593 /* drop channel references */ 594 list_for_each_entry(device, &dma_device_list, global_node) { 595 if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) 596 continue; 597 list_for_each_entry(chan, &device->channels, device_node) 598 dma_chan_put(chan); 599 } 600 mutex_unlock(&dma_list_mutex); 601 } 602 EXPORT_SYMBOL(dmaengine_put); 603 604 static bool device_has_all_tx_types(struct dma_device *device) 605 { 606 /* A device that satisfies this test has channels that will never cause 607 * an async_tx channel switch event as all possible operation types can 608 * be handled. 609 */ 610 #ifdef CONFIG_ASYNC_TX_DMA 611 if (!dma_has_cap(DMA_INTERRUPT, device->cap_mask)) 612 return false; 613 #endif 614 615 #if defined(CONFIG_ASYNC_MEMCPY) || defined(CONFIG_ASYNC_MEMCPY_MODULE) 616 if (!dma_has_cap(DMA_MEMCPY, device->cap_mask)) 617 return false; 618 #endif 619 620 #if defined(CONFIG_ASYNC_MEMSET) || defined(CONFIG_ASYNC_MEMSET_MODULE) 621 if (!dma_has_cap(DMA_MEMSET, device->cap_mask)) 622 return false; 623 #endif 624 625 #if defined(CONFIG_ASYNC_XOR) || defined(CONFIG_ASYNC_XOR_MODULE) 626 if (!dma_has_cap(DMA_XOR, device->cap_mask)) 627 return false; 628 629 #ifndef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA 630 if (!dma_has_cap(DMA_XOR_VAL, device->cap_mask)) 631 return false; 632 #endif 633 #endif 634 635 #if defined(CONFIG_ASYNC_PQ) || defined(CONFIG_ASYNC_PQ_MODULE) 636 if (!dma_has_cap(DMA_PQ, device->cap_mask)) 637 return false; 638 639 #ifndef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA 640 if (!dma_has_cap(DMA_PQ_VAL, device->cap_mask)) 641 return false; 642 #endif 643 #endif 644 645 return true; 646 } 647 648 static int get_dma_id(struct dma_device *device) 649 { 650 int rc; 651 652 idr_retry: 653 if (!idr_pre_get(&dma_idr, GFP_KERNEL)) 654 return -ENOMEM; 655 mutex_lock(&dma_list_mutex); 656 rc = idr_get_new(&dma_idr, NULL, &device->dev_id); 657 mutex_unlock(&dma_list_mutex); 658 if (rc == -EAGAIN) 659 goto idr_retry; 660 else if (rc != 0) 661 return rc; 662 663 return 0; 664 } 665 666 /** 667 * dma_async_device_register - registers DMA devices found 668 * @device: &dma_device 669 */ 670 int dma_async_device_register(struct dma_device *device) 671 { 672 int chancnt = 0, rc; 673 struct dma_chan* chan; 674 atomic_t *idr_ref; 675 676 if (!device) 677 return -ENODEV; 678 679 /* validate device routines */ 680 BUG_ON(dma_has_cap(DMA_MEMCPY, device->cap_mask) && 681 !device->device_prep_dma_memcpy); 682 BUG_ON(dma_has_cap(DMA_XOR, device->cap_mask) && 683 !device->device_prep_dma_xor); 684 BUG_ON(dma_has_cap(DMA_XOR_VAL, device->cap_mask) && 685 !device->device_prep_dma_xor_val); 686 BUG_ON(dma_has_cap(DMA_PQ, device->cap_mask) && 687 !device->device_prep_dma_pq); 688 BUG_ON(dma_has_cap(DMA_PQ_VAL, device->cap_mask) && 689 !device->device_prep_dma_pq_val); 690 BUG_ON(dma_has_cap(DMA_MEMSET, device->cap_mask) && 691 !device->device_prep_dma_memset); 692 BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) && 693 !device->device_prep_dma_interrupt); 694 BUG_ON(dma_has_cap(DMA_SG, device->cap_mask) && 695 !device->device_prep_dma_sg); 696 BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) && 697 !device->device_prep_slave_sg); 698 BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) && 699 !device->device_prep_dma_cyclic); 700 BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) && 701 !device->device_control); 702 703 BUG_ON(!device->device_alloc_chan_resources); 704 BUG_ON(!device->device_free_chan_resources); 705 BUG_ON(!device->device_tx_status); 706 BUG_ON(!device->device_issue_pending); 707 BUG_ON(!device->dev); 708 709 /* note: this only matters in the 710 * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case 711 */ 712 if (device_has_all_tx_types(device)) 713 dma_cap_set(DMA_ASYNC_TX, device->cap_mask); 714 715 idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL); 716 if (!idr_ref) 717 return -ENOMEM; 718 rc = get_dma_id(device); 719 if (rc != 0) { 720 kfree(idr_ref); 721 return rc; 722 } 723 724 atomic_set(idr_ref, 0); 725 726 /* represent channels in sysfs. Probably want devs too */ 727 list_for_each_entry(chan, &device->channels, device_node) { 728 rc = -ENOMEM; 729 chan->local = alloc_percpu(typeof(*chan->local)); 730 if (chan->local == NULL) 731 goto err_out; 732 chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL); 733 if (chan->dev == NULL) { 734 free_percpu(chan->local); 735 chan->local = NULL; 736 goto err_out; 737 } 738 739 chan->chan_id = chancnt++; 740 chan->dev->device.class = &dma_devclass; 741 chan->dev->device.parent = device->dev; 742 chan->dev->chan = chan; 743 chan->dev->idr_ref = idr_ref; 744 chan->dev->dev_id = device->dev_id; 745 atomic_inc(idr_ref); 746 dev_set_name(&chan->dev->device, "dma%dchan%d", 747 device->dev_id, chan->chan_id); 748 749 rc = device_register(&chan->dev->device); 750 if (rc) { 751 free_percpu(chan->local); 752 chan->local = NULL; 753 kfree(chan->dev); 754 atomic_dec(idr_ref); 755 goto err_out; 756 } 757 chan->client_count = 0; 758 } 759 device->chancnt = chancnt; 760 761 mutex_lock(&dma_list_mutex); 762 /* take references on public channels */ 763 if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask)) 764 list_for_each_entry(chan, &device->channels, device_node) { 765 /* if clients are already waiting for channels we need 766 * to take references on their behalf 767 */ 768 if (dma_chan_get(chan) == -ENODEV) { 769 /* note we can only get here for the first 770 * channel as the remaining channels are 771 * guaranteed to get a reference 772 */ 773 rc = -ENODEV; 774 mutex_unlock(&dma_list_mutex); 775 goto err_out; 776 } 777 } 778 list_add_tail_rcu(&device->global_node, &dma_device_list); 779 if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) 780 device->privatecnt++; /* Always private */ 781 dma_channel_rebalance(); 782 mutex_unlock(&dma_list_mutex); 783 784 return 0; 785 786 err_out: 787 /* if we never registered a channel just release the idr */ 788 if (atomic_read(idr_ref) == 0) { 789 mutex_lock(&dma_list_mutex); 790 idr_remove(&dma_idr, device->dev_id); 791 mutex_unlock(&dma_list_mutex); 792 kfree(idr_ref); 793 return rc; 794 } 795 796 list_for_each_entry(chan, &device->channels, device_node) { 797 if (chan->local == NULL) 798 continue; 799 mutex_lock(&dma_list_mutex); 800 chan->dev->chan = NULL; 801 mutex_unlock(&dma_list_mutex); 802 device_unregister(&chan->dev->device); 803 free_percpu(chan->local); 804 } 805 return rc; 806 } 807 EXPORT_SYMBOL(dma_async_device_register); 808 809 /** 810 * dma_async_device_unregister - unregister a DMA device 811 * @device: &dma_device 812 * 813 * This routine is called by dma driver exit routines, dmaengine holds module 814 * references to prevent it being called while channels are in use. 815 */ 816 void dma_async_device_unregister(struct dma_device *device) 817 { 818 struct dma_chan *chan; 819 820 mutex_lock(&dma_list_mutex); 821 list_del_rcu(&device->global_node); 822 dma_channel_rebalance(); 823 mutex_unlock(&dma_list_mutex); 824 825 list_for_each_entry(chan, &device->channels, device_node) { 826 WARN_ONCE(chan->client_count, 827 "%s called while %d clients hold a reference\n", 828 __func__, chan->client_count); 829 mutex_lock(&dma_list_mutex); 830 chan->dev->chan = NULL; 831 mutex_unlock(&dma_list_mutex); 832 device_unregister(&chan->dev->device); 833 free_percpu(chan->local); 834 } 835 } 836 EXPORT_SYMBOL(dma_async_device_unregister); 837 838 /** 839 * dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses 840 * @chan: DMA channel to offload copy to 841 * @dest: destination address (virtual) 842 * @src: source address (virtual) 843 * @len: length 844 * 845 * Both @dest and @src must be mappable to a bus address according to the 846 * DMA mapping API rules for streaming mappings. 847 * Both @dest and @src must stay memory resident (kernel memory or locked 848 * user space pages). 849 */ 850 dma_cookie_t 851 dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest, 852 void *src, size_t len) 853 { 854 struct dma_device *dev = chan->device; 855 struct dma_async_tx_descriptor *tx; 856 dma_addr_t dma_dest, dma_src; 857 dma_cookie_t cookie; 858 unsigned long flags; 859 860 dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE); 861 dma_dest = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE); 862 flags = DMA_CTRL_ACK | 863 DMA_COMPL_SRC_UNMAP_SINGLE | 864 DMA_COMPL_DEST_UNMAP_SINGLE; 865 tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags); 866 867 if (!tx) { 868 dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE); 869 dma_unmap_single(dev->dev, dma_dest, len, DMA_FROM_DEVICE); 870 return -ENOMEM; 871 } 872 873 tx->callback = NULL; 874 cookie = tx->tx_submit(tx); 875 876 preempt_disable(); 877 __this_cpu_add(chan->local->bytes_transferred, len); 878 __this_cpu_inc(chan->local->memcpy_count); 879 preempt_enable(); 880 881 return cookie; 882 } 883 EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf); 884 885 /** 886 * dma_async_memcpy_buf_to_pg - offloaded copy from address to page 887 * @chan: DMA channel to offload copy to 888 * @page: destination page 889 * @offset: offset in page to copy to 890 * @kdata: source address (virtual) 891 * @len: length 892 * 893 * Both @page/@offset and @kdata must be mappable to a bus address according 894 * to the DMA mapping API rules for streaming mappings. 895 * Both @page/@offset and @kdata must stay memory resident (kernel memory or 896 * locked user space pages) 897 */ 898 dma_cookie_t 899 dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page, 900 unsigned int offset, void *kdata, size_t len) 901 { 902 struct dma_device *dev = chan->device; 903 struct dma_async_tx_descriptor *tx; 904 dma_addr_t dma_dest, dma_src; 905 dma_cookie_t cookie; 906 unsigned long flags; 907 908 dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE); 909 dma_dest = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE); 910 flags = DMA_CTRL_ACK | DMA_COMPL_SRC_UNMAP_SINGLE; 911 tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags); 912 913 if (!tx) { 914 dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE); 915 dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE); 916 return -ENOMEM; 917 } 918 919 tx->callback = NULL; 920 cookie = tx->tx_submit(tx); 921 922 preempt_disable(); 923 __this_cpu_add(chan->local->bytes_transferred, len); 924 __this_cpu_inc(chan->local->memcpy_count); 925 preempt_enable(); 926 927 return cookie; 928 } 929 EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg); 930 931 /** 932 * dma_async_memcpy_pg_to_pg - offloaded copy from page to page 933 * @chan: DMA channel to offload copy to 934 * @dest_pg: destination page 935 * @dest_off: offset in page to copy to 936 * @src_pg: source page 937 * @src_off: offset in page to copy from 938 * @len: length 939 * 940 * Both @dest_page/@dest_off and @src_page/@src_off must be mappable to a bus 941 * address according to the DMA mapping API rules for streaming mappings. 942 * Both @dest_page/@dest_off and @src_page/@src_off must stay memory resident 943 * (kernel memory or locked user space pages). 944 */ 945 dma_cookie_t 946 dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg, 947 unsigned int dest_off, struct page *src_pg, unsigned int src_off, 948 size_t len) 949 { 950 struct dma_device *dev = chan->device; 951 struct dma_async_tx_descriptor *tx; 952 dma_addr_t dma_dest, dma_src; 953 dma_cookie_t cookie; 954 unsigned long flags; 955 956 dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE); 957 dma_dest = dma_map_page(dev->dev, dest_pg, dest_off, len, 958 DMA_FROM_DEVICE); 959 flags = DMA_CTRL_ACK; 960 tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags); 961 962 if (!tx) { 963 dma_unmap_page(dev->dev, dma_src, len, DMA_TO_DEVICE); 964 dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE); 965 return -ENOMEM; 966 } 967 968 tx->callback = NULL; 969 cookie = tx->tx_submit(tx); 970 971 preempt_disable(); 972 __this_cpu_add(chan->local->bytes_transferred, len); 973 __this_cpu_inc(chan->local->memcpy_count); 974 preempt_enable(); 975 976 return cookie; 977 } 978 EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg); 979 980 void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, 981 struct dma_chan *chan) 982 { 983 tx->chan = chan; 984 #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH 985 spin_lock_init(&tx->lock); 986 #endif 987 } 988 EXPORT_SYMBOL(dma_async_tx_descriptor_init); 989 990 /* dma_wait_for_async_tx - spin wait for a transaction to complete 991 * @tx: in-flight transaction to wait on 992 */ 993 enum dma_status 994 dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) 995 { 996 unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000); 997 998 if (!tx) 999 return DMA_SUCCESS; 1000 1001 while (tx->cookie == -EBUSY) { 1002 if (time_after_eq(jiffies, dma_sync_wait_timeout)) { 1003 pr_err("%s timeout waiting for descriptor submission\n", 1004 __func__); 1005 return DMA_ERROR; 1006 } 1007 cpu_relax(); 1008 } 1009 return dma_sync_wait(tx->chan, tx->cookie); 1010 } 1011 EXPORT_SYMBOL_GPL(dma_wait_for_async_tx); 1012 1013 /* dma_run_dependencies - helper routine for dma drivers to process 1014 * (start) dependent operations on their target channel 1015 * @tx: transaction with dependencies 1016 */ 1017 void dma_run_dependencies(struct dma_async_tx_descriptor *tx) 1018 { 1019 struct dma_async_tx_descriptor *dep = txd_next(tx); 1020 struct dma_async_tx_descriptor *dep_next; 1021 struct dma_chan *chan; 1022 1023 if (!dep) 1024 return; 1025 1026 /* we'll submit tx->next now, so clear the link */ 1027 txd_clear_next(tx); 1028 chan = dep->chan; 1029 1030 /* keep submitting up until a channel switch is detected 1031 * in that case we will be called again as a result of 1032 * processing the interrupt from async_tx_channel_switch 1033 */ 1034 for (; dep; dep = dep_next) { 1035 txd_lock(dep); 1036 txd_clear_parent(dep); 1037 dep_next = txd_next(dep); 1038 if (dep_next && dep_next->chan == chan) 1039 txd_clear_next(dep); /* ->next will be submitted */ 1040 else 1041 dep_next = NULL; /* submit current dep and terminate */ 1042 txd_unlock(dep); 1043 1044 dep->tx_submit(dep); 1045 } 1046 1047 chan->device->device_issue_pending(chan); 1048 } 1049 EXPORT_SYMBOL_GPL(dma_run_dependencies); 1050 1051 static int __init dma_bus_init(void) 1052 { 1053 return class_register(&dma_devclass); 1054 } 1055 arch_initcall(dma_bus_init); 1056 1057 1058