1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2008 Advanced Micro Devices, Inc. 4 * 5 * Author: Joerg Roedel <joerg.roedel@amd.com> 6 */ 7 8 #define pr_fmt(fmt) "DMA-API: " fmt 9 10 #include <linux/sched/task_stack.h> 11 #include <linux/scatterlist.h> 12 #include <linux/dma-map-ops.h> 13 #include <linux/sched/task.h> 14 #include <linux/stacktrace.h> 15 #include <linux/spinlock.h> 16 #include <linux/vmalloc.h> 17 #include <linux/debugfs.h> 18 #include <linux/uaccess.h> 19 #include <linux/export.h> 20 #include <linux/device.h> 21 #include <linux/types.h> 22 #include <linux/sched.h> 23 #include <linux/ctype.h> 24 #include <linux/list.h> 25 #include <linux/slab.h> 26 #include <asm/sections.h> 27 #include "debug.h" 28 29 #define HASH_SIZE 16384ULL 30 #define HASH_FN_SHIFT 13 31 #define HASH_FN_MASK (HASH_SIZE - 1) 32 33 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) 34 /* If the pool runs out, add this many new entries at once */ 35 #define DMA_DEBUG_DYNAMIC_ENTRIES (PAGE_SIZE / sizeof(struct dma_debug_entry)) 36 37 enum { 38 dma_debug_single, 39 dma_debug_sg, 40 dma_debug_coherent, 41 dma_debug_resource, 42 }; 43 44 enum map_err_types { 45 MAP_ERR_CHECK_NOT_APPLICABLE, 46 MAP_ERR_NOT_CHECKED, 47 MAP_ERR_CHECKED, 48 }; 49 50 #define DMA_DEBUG_STACKTRACE_ENTRIES 5 51 52 /** 53 * struct dma_debug_entry - track a dma_map* or dma_alloc_coherent mapping 54 * @list: node on pre-allocated free_entries list 55 * @dev: 'dev' argument to dma_map_{page|single|sg} or dma_alloc_coherent 56 * @dev_addr: dma address 57 * @size: length of the mapping 58 * @type: single, page, sg, coherent 59 * @direction: enum dma_data_direction 60 * @sg_call_ents: 'nents' from dma_map_sg 61 * @sg_mapped_ents: 'mapped_ents' from dma_map_sg 62 * @paddr: physical start address of the mapping 63 * @map_err_type: track whether dma_mapping_error() was checked 64 * @stack_len: number of backtrace entries in @stack_entries 65 * @stack_entries: stack of backtrace history 66 */ 67 struct dma_debug_entry { 68 struct list_head list; 69 struct device *dev; 70 u64 dev_addr; 71 u64 size; 72 int type; 73 int direction; 74 int sg_call_ents; 75 int sg_mapped_ents; 76 phys_addr_t paddr; 77 enum map_err_types map_err_type; 78 #ifdef CONFIG_STACKTRACE 79 unsigned int stack_len; 80 unsigned long stack_entries[DMA_DEBUG_STACKTRACE_ENTRIES]; 81 #endif 82 } ____cacheline_aligned_in_smp; 83 84 typedef bool (*match_fn)(struct dma_debug_entry *, struct dma_debug_entry *); 85 86 struct hash_bucket { 87 struct list_head list; 88 spinlock_t lock; 89 }; 90 91 /* Hash list to save the allocated dma addresses */ 92 static struct hash_bucket dma_entry_hash[HASH_SIZE]; 93 /* List of pre-allocated dma_debug_entry's */ 94 static LIST_HEAD(free_entries); 95 /* Lock for the list above */ 96 static DEFINE_SPINLOCK(free_entries_lock); 97 98 /* Global disable flag - will be set in case of an error */ 99 static bool global_disable __read_mostly; 100 101 /* Early initialization disable flag, set at the end of dma_debug_init */ 102 static bool dma_debug_initialized __read_mostly; 103 104 static inline bool dma_debug_disabled(void) 105 { 106 return global_disable || !dma_debug_initialized; 107 } 108 109 /* Global error count */ 110 static u32 error_count; 111 112 /* Global error show enable*/ 113 static u32 show_all_errors __read_mostly; 114 /* Number of errors to show */ 115 static u32 show_num_errors = 1; 116 117 static u32 num_free_entries; 118 static u32 min_free_entries; 119 static u32 nr_total_entries; 120 121 /* number of preallocated entries requested by kernel cmdline */ 122 static u32 nr_prealloc_entries = PREALLOC_DMA_DEBUG_ENTRIES; 123 124 /* per-driver filter related state */ 125 126 #define NAME_MAX_LEN 64 127 128 static char current_driver_name[NAME_MAX_LEN] __read_mostly; 129 static struct device_driver *current_driver __read_mostly; 130 131 static DEFINE_RWLOCK(driver_name_lock); 132 133 static const char *const maperr2str[] = { 134 [MAP_ERR_CHECK_NOT_APPLICABLE] = "dma map error check not applicable", 135 [MAP_ERR_NOT_CHECKED] = "dma map error not checked", 136 [MAP_ERR_CHECKED] = "dma map error checked", 137 }; 138 139 static const char *type2name[] = { 140 [dma_debug_single] = "single", 141 [dma_debug_sg] = "scatter-gather", 142 [dma_debug_coherent] = "coherent", 143 [dma_debug_resource] = "resource", 144 }; 145 146 static const char *dir2name[] = { 147 [DMA_BIDIRECTIONAL] = "DMA_BIDIRECTIONAL", 148 [DMA_TO_DEVICE] = "DMA_TO_DEVICE", 149 [DMA_FROM_DEVICE] = "DMA_FROM_DEVICE", 150 [DMA_NONE] = "DMA_NONE", 151 }; 152 153 /* 154 * The access to some variables in this macro is racy. We can't use atomic_t 155 * here because all these variables are exported to debugfs. Some of them even 156 * writeable. This is also the reason why a lock won't help much. But anyway, 157 * the races are no big deal. Here is why: 158 * 159 * error_count: the addition is racy, but the worst thing that can happen is 160 * that we don't count some errors 161 * show_num_errors: the subtraction is racy. Also no big deal because in 162 * worst case this will result in one warning more in the 163 * system log than the user configured. This variable is 164 * writeable via debugfs. 165 */ 166 static inline void dump_entry_trace(struct dma_debug_entry *entry) 167 { 168 #ifdef CONFIG_STACKTRACE 169 if (entry) { 170 pr_warn("Mapped at:\n"); 171 stack_trace_print(entry->stack_entries, entry->stack_len, 0); 172 } 173 #endif 174 } 175 176 static bool driver_filter(struct device *dev) 177 { 178 struct device_driver *drv; 179 unsigned long flags; 180 bool ret; 181 182 /* driver filter off */ 183 if (likely(!current_driver_name[0])) 184 return true; 185 186 /* driver filter on and initialized */ 187 if (current_driver && dev && dev->driver == current_driver) 188 return true; 189 190 /* driver filter on, but we can't filter on a NULL device... */ 191 if (!dev) 192 return false; 193 194 if (current_driver || !current_driver_name[0]) 195 return false; 196 197 /* driver filter on but not yet initialized */ 198 drv = dev->driver; 199 if (!drv) 200 return false; 201 202 /* lock to protect against change of current_driver_name */ 203 read_lock_irqsave(&driver_name_lock, flags); 204 205 ret = false; 206 if (drv->name && 207 strncmp(current_driver_name, drv->name, NAME_MAX_LEN - 1) == 0) { 208 current_driver = drv; 209 ret = true; 210 } 211 212 read_unlock_irqrestore(&driver_name_lock, flags); 213 214 return ret; 215 } 216 217 #define err_printk(dev, entry, format, arg...) do { \ 218 error_count += 1; \ 219 if (driver_filter(dev) && \ 220 (show_all_errors || show_num_errors > 0)) { \ 221 WARN(1, pr_fmt("%s %s: ") format, \ 222 dev ? dev_driver_string(dev) : "NULL", \ 223 dev ? dev_name(dev) : "NULL", ## arg); \ 224 dump_entry_trace(entry); \ 225 } \ 226 if (!show_all_errors && show_num_errors > 0) \ 227 show_num_errors -= 1; \ 228 } while (0); 229 230 /* 231 * Hash related functions 232 * 233 * Every DMA-API request is saved into a struct dma_debug_entry. To 234 * have quick access to these structs they are stored into a hash. 235 */ 236 static int hash_fn(struct dma_debug_entry *entry) 237 { 238 /* 239 * Hash function is based on the dma address. 240 * We use bits 20-27 here as the index into the hash 241 */ 242 return (entry->dev_addr >> HASH_FN_SHIFT) & HASH_FN_MASK; 243 } 244 245 /* 246 * Request exclusive access to a hash bucket for a given dma_debug_entry. 247 */ 248 static struct hash_bucket *get_hash_bucket(struct dma_debug_entry *entry, 249 unsigned long *flags) 250 __acquires(&dma_entry_hash[idx].lock) 251 { 252 int idx = hash_fn(entry); 253 unsigned long __flags; 254 255 spin_lock_irqsave(&dma_entry_hash[idx].lock, __flags); 256 *flags = __flags; 257 return &dma_entry_hash[idx]; 258 } 259 260 /* 261 * Give up exclusive access to the hash bucket 262 */ 263 static void put_hash_bucket(struct hash_bucket *bucket, 264 unsigned long flags) 265 __releases(&bucket->lock) 266 { 267 spin_unlock_irqrestore(&bucket->lock, flags); 268 } 269 270 static bool exact_match(struct dma_debug_entry *a, struct dma_debug_entry *b) 271 { 272 return ((a->dev_addr == b->dev_addr) && 273 (a->dev == b->dev)) ? true : false; 274 } 275 276 static bool containing_match(struct dma_debug_entry *a, 277 struct dma_debug_entry *b) 278 { 279 if (a->dev != b->dev) 280 return false; 281 282 if ((b->dev_addr <= a->dev_addr) && 283 ((b->dev_addr + b->size) >= (a->dev_addr + a->size))) 284 return true; 285 286 return false; 287 } 288 289 /* 290 * Search a given entry in the hash bucket list 291 */ 292 static struct dma_debug_entry *__hash_bucket_find(struct hash_bucket *bucket, 293 struct dma_debug_entry *ref, 294 match_fn match) 295 { 296 struct dma_debug_entry *entry, *ret = NULL; 297 int matches = 0, match_lvl, last_lvl = -1; 298 299 list_for_each_entry(entry, &bucket->list, list) { 300 if (!match(ref, entry)) 301 continue; 302 303 /* 304 * Some drivers map the same physical address multiple 305 * times. Without a hardware IOMMU this results in the 306 * same device addresses being put into the dma-debug 307 * hash multiple times too. This can result in false 308 * positives being reported. Therefore we implement a 309 * best-fit algorithm here which returns the entry from 310 * the hash which fits best to the reference value 311 * instead of the first-fit. 312 */ 313 matches += 1; 314 match_lvl = 0; 315 entry->size == ref->size ? ++match_lvl : 0; 316 entry->type == ref->type ? ++match_lvl : 0; 317 entry->direction == ref->direction ? ++match_lvl : 0; 318 entry->sg_call_ents == ref->sg_call_ents ? ++match_lvl : 0; 319 320 if (match_lvl == 4) { 321 /* perfect-fit - return the result */ 322 return entry; 323 } else if (match_lvl > last_lvl) { 324 /* 325 * We found an entry that fits better then the 326 * previous one or it is the 1st match. 327 */ 328 last_lvl = match_lvl; 329 ret = entry; 330 } 331 } 332 333 /* 334 * If we have multiple matches but no perfect-fit, just return 335 * NULL. 336 */ 337 ret = (matches == 1) ? ret : NULL; 338 339 return ret; 340 } 341 342 static struct dma_debug_entry *bucket_find_exact(struct hash_bucket *bucket, 343 struct dma_debug_entry *ref) 344 { 345 return __hash_bucket_find(bucket, ref, exact_match); 346 } 347 348 static struct dma_debug_entry *bucket_find_contain(struct hash_bucket **bucket, 349 struct dma_debug_entry *ref, 350 unsigned long *flags) 351 { 352 353 struct dma_debug_entry *entry, index = *ref; 354 int limit = min(HASH_SIZE, (index.dev_addr >> HASH_FN_SHIFT) + 1); 355 356 for (int i = 0; i < limit; i++) { 357 entry = __hash_bucket_find(*bucket, ref, containing_match); 358 359 if (entry) 360 return entry; 361 362 /* 363 * Nothing found, go back a hash bucket 364 */ 365 put_hash_bucket(*bucket, *flags); 366 index.dev_addr -= (1 << HASH_FN_SHIFT); 367 *bucket = get_hash_bucket(&index, flags); 368 } 369 370 return NULL; 371 } 372 373 /* 374 * Add an entry to a hash bucket 375 */ 376 static void hash_bucket_add(struct hash_bucket *bucket, 377 struct dma_debug_entry *entry) 378 { 379 list_add_tail(&entry->list, &bucket->list); 380 } 381 382 /* 383 * Remove entry from a hash bucket list 384 */ 385 static void hash_bucket_del(struct dma_debug_entry *entry) 386 { 387 list_del(&entry->list); 388 } 389 390 /* 391 * For each mapping (initial cacheline in the case of 392 * dma_alloc_coherent/dma_map_page, initial cacheline in each page of a 393 * scatterlist, or the cacheline specified in dma_map_single) insert 394 * into this tree using the cacheline as the key. At 395 * dma_unmap_{single|sg|page} or dma_free_coherent delete the entry. If 396 * the entry already exists at insertion time add a tag as a reference 397 * count for the overlapping mappings. For now, the overlap tracking 398 * just ensures that 'unmaps' balance 'maps' before marking the 399 * cacheline idle, but we should also be flagging overlaps as an API 400 * violation. 401 * 402 * Memory usage is mostly constrained by the maximum number of available 403 * dma-debug entries in that we need a free dma_debug_entry before 404 * inserting into the tree. In the case of dma_map_page and 405 * dma_alloc_coherent there is only one dma_debug_entry and one 406 * dma_active_cacheline entry to track per event. dma_map_sg(), on the 407 * other hand, consumes a single dma_debug_entry, but inserts 'nents' 408 * entries into the tree. 409 * 410 * Use __GFP_NOWARN because the printk from an OOM, to netconsole, could end 411 * up right back in the DMA debugging code, leading to a deadlock. 412 */ 413 static RADIX_TREE(dma_active_cacheline, GFP_ATOMIC | __GFP_NOWARN); 414 static DEFINE_SPINLOCK(radix_lock); 415 #define ACTIVE_CACHELINE_MAX_OVERLAP ((1 << RADIX_TREE_MAX_TAGS) - 1) 416 #define CACHELINE_PER_PAGE_SHIFT (PAGE_SHIFT - L1_CACHE_SHIFT) 417 #define CACHELINES_PER_PAGE (1 << CACHELINE_PER_PAGE_SHIFT) 418 419 static phys_addr_t to_cacheline_number(struct dma_debug_entry *entry) 420 { 421 return ((entry->paddr >> PAGE_SHIFT) << CACHELINE_PER_PAGE_SHIFT) + 422 (offset_in_page(entry->paddr) >> L1_CACHE_SHIFT); 423 } 424 425 static int active_cacheline_read_overlap(phys_addr_t cln) 426 { 427 int overlap = 0, i; 428 429 for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--) 430 if (radix_tree_tag_get(&dma_active_cacheline, cln, i)) 431 overlap |= 1 << i; 432 return overlap; 433 } 434 435 static int active_cacheline_set_overlap(phys_addr_t cln, int overlap) 436 { 437 int i; 438 439 if (overlap > ACTIVE_CACHELINE_MAX_OVERLAP || overlap < 0) 440 return overlap; 441 442 for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--) 443 if (overlap & 1 << i) 444 radix_tree_tag_set(&dma_active_cacheline, cln, i); 445 else 446 radix_tree_tag_clear(&dma_active_cacheline, cln, i); 447 448 return overlap; 449 } 450 451 static void active_cacheline_inc_overlap(phys_addr_t cln) 452 { 453 int overlap = active_cacheline_read_overlap(cln); 454 455 overlap = active_cacheline_set_overlap(cln, ++overlap); 456 457 /* If we overflowed the overlap counter then we're potentially 458 * leaking dma-mappings. 459 */ 460 WARN_ONCE(overlap > ACTIVE_CACHELINE_MAX_OVERLAP, 461 pr_fmt("exceeded %d overlapping mappings of cacheline %pa\n"), 462 ACTIVE_CACHELINE_MAX_OVERLAP, &cln); 463 } 464 465 static int active_cacheline_dec_overlap(phys_addr_t cln) 466 { 467 int overlap = active_cacheline_read_overlap(cln); 468 469 return active_cacheline_set_overlap(cln, --overlap); 470 } 471 472 static int active_cacheline_insert(struct dma_debug_entry *entry) 473 { 474 phys_addr_t cln = to_cacheline_number(entry); 475 unsigned long flags; 476 int rc; 477 478 /* If the device is not writing memory then we don't have any 479 * concerns about the cpu consuming stale data. This mitigates 480 * legitimate usages of overlapping mappings. 481 */ 482 if (entry->direction == DMA_TO_DEVICE) 483 return 0; 484 485 spin_lock_irqsave(&radix_lock, flags); 486 rc = radix_tree_insert(&dma_active_cacheline, cln, entry); 487 if (rc == -EEXIST) 488 active_cacheline_inc_overlap(cln); 489 spin_unlock_irqrestore(&radix_lock, flags); 490 491 return rc; 492 } 493 494 static void active_cacheline_remove(struct dma_debug_entry *entry) 495 { 496 phys_addr_t cln = to_cacheline_number(entry); 497 unsigned long flags; 498 499 /* ...mirror the insert case */ 500 if (entry->direction == DMA_TO_DEVICE) 501 return; 502 503 spin_lock_irqsave(&radix_lock, flags); 504 /* since we are counting overlaps the final put of the 505 * cacheline will occur when the overlap count is 0. 506 * active_cacheline_dec_overlap() returns -1 in that case 507 */ 508 if (active_cacheline_dec_overlap(cln) < 0) 509 radix_tree_delete(&dma_active_cacheline, cln); 510 spin_unlock_irqrestore(&radix_lock, flags); 511 } 512 513 /* 514 * Dump mappings entries on kernel space for debugging purposes 515 */ 516 void debug_dma_dump_mappings(struct device *dev) 517 { 518 int idx; 519 phys_addr_t cln; 520 521 for (idx = 0; idx < HASH_SIZE; idx++) { 522 struct hash_bucket *bucket = &dma_entry_hash[idx]; 523 struct dma_debug_entry *entry; 524 unsigned long flags; 525 526 spin_lock_irqsave(&bucket->lock, flags); 527 list_for_each_entry(entry, &bucket->list, list) { 528 if (!dev || dev == entry->dev) { 529 cln = to_cacheline_number(entry); 530 dev_info(entry->dev, 531 "%s idx %d P=%pa D=%llx L=%llx cln=%pa %s %s\n", 532 type2name[entry->type], idx, 533 &entry->paddr, entry->dev_addr, 534 entry->size, &cln, 535 dir2name[entry->direction], 536 maperr2str[entry->map_err_type]); 537 } 538 } 539 spin_unlock_irqrestore(&bucket->lock, flags); 540 541 cond_resched(); 542 } 543 } 544 545 /* 546 * Dump mappings entries on user space via debugfs 547 */ 548 static int dump_show(struct seq_file *seq, void *v) 549 { 550 int idx; 551 phys_addr_t cln; 552 553 for (idx = 0; idx < HASH_SIZE; idx++) { 554 struct hash_bucket *bucket = &dma_entry_hash[idx]; 555 struct dma_debug_entry *entry; 556 unsigned long flags; 557 558 spin_lock_irqsave(&bucket->lock, flags); 559 list_for_each_entry(entry, &bucket->list, list) { 560 cln = to_cacheline_number(entry); 561 seq_printf(seq, 562 "%s %s %s idx %d P=%pa D=%llx L=%llx cln=%pa %s %s\n", 563 dev_driver_string(entry->dev), 564 dev_name(entry->dev), 565 type2name[entry->type], idx, 566 &entry->paddr, entry->dev_addr, 567 entry->size, &cln, 568 dir2name[entry->direction], 569 maperr2str[entry->map_err_type]); 570 } 571 spin_unlock_irqrestore(&bucket->lock, flags); 572 } 573 return 0; 574 } 575 DEFINE_SHOW_ATTRIBUTE(dump); 576 577 /* 578 * Wrapper function for adding an entry to the hash. 579 * This function takes care of locking itself. 580 */ 581 static void add_dma_entry(struct dma_debug_entry *entry, unsigned long attrs) 582 { 583 struct hash_bucket *bucket; 584 unsigned long flags; 585 int rc; 586 587 bucket = get_hash_bucket(entry, &flags); 588 hash_bucket_add(bucket, entry); 589 put_hash_bucket(bucket, flags); 590 591 rc = active_cacheline_insert(entry); 592 if (rc == -ENOMEM) { 593 pr_err_once("cacheline tracking ENOMEM, dma-debug disabled\n"); 594 global_disable = true; 595 } else if (rc == -EEXIST && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) { 596 err_printk(entry->dev, entry, 597 "cacheline tracking EEXIST, overlapping mappings aren't supported\n"); 598 } 599 } 600 601 static int dma_debug_create_entries(gfp_t gfp) 602 { 603 struct dma_debug_entry *entry; 604 int i; 605 606 entry = (void *)get_zeroed_page(gfp); 607 if (!entry) 608 return -ENOMEM; 609 610 for (i = 0; i < DMA_DEBUG_DYNAMIC_ENTRIES; i++) 611 list_add_tail(&entry[i].list, &free_entries); 612 613 num_free_entries += DMA_DEBUG_DYNAMIC_ENTRIES; 614 nr_total_entries += DMA_DEBUG_DYNAMIC_ENTRIES; 615 616 return 0; 617 } 618 619 static struct dma_debug_entry *__dma_entry_alloc(void) 620 { 621 struct dma_debug_entry *entry; 622 623 entry = list_entry(free_entries.next, struct dma_debug_entry, list); 624 list_del(&entry->list); 625 memset(entry, 0, sizeof(*entry)); 626 627 num_free_entries -= 1; 628 if (num_free_entries < min_free_entries) 629 min_free_entries = num_free_entries; 630 631 return entry; 632 } 633 634 /* 635 * This should be called outside of free_entries_lock scope to avoid potential 636 * deadlocks with serial consoles that use DMA. 637 */ 638 static void __dma_entry_alloc_check_leak(u32 nr_entries) 639 { 640 u32 tmp = nr_entries % nr_prealloc_entries; 641 642 /* Shout each time we tick over some multiple of the initial pool */ 643 if (tmp < DMA_DEBUG_DYNAMIC_ENTRIES) { 644 pr_info("dma_debug_entry pool grown to %u (%u00%%)\n", 645 nr_entries, 646 (nr_entries / nr_prealloc_entries)); 647 } 648 } 649 650 /* struct dma_entry allocator 651 * 652 * The next two functions implement the allocator for 653 * struct dma_debug_entries. 654 */ 655 static struct dma_debug_entry *dma_entry_alloc(void) 656 { 657 bool alloc_check_leak = false; 658 struct dma_debug_entry *entry; 659 unsigned long flags; 660 u32 nr_entries; 661 662 spin_lock_irqsave(&free_entries_lock, flags); 663 if (num_free_entries == 0) { 664 if (dma_debug_create_entries(GFP_ATOMIC)) { 665 global_disable = true; 666 spin_unlock_irqrestore(&free_entries_lock, flags); 667 pr_err("debugging out of memory - disabling\n"); 668 return NULL; 669 } 670 alloc_check_leak = true; 671 nr_entries = nr_total_entries; 672 } 673 674 entry = __dma_entry_alloc(); 675 676 spin_unlock_irqrestore(&free_entries_lock, flags); 677 678 if (alloc_check_leak) 679 __dma_entry_alloc_check_leak(nr_entries); 680 681 #ifdef CONFIG_STACKTRACE 682 entry->stack_len = stack_trace_save(entry->stack_entries, 683 ARRAY_SIZE(entry->stack_entries), 684 1); 685 #endif 686 return entry; 687 } 688 689 static void dma_entry_free(struct dma_debug_entry *entry) 690 { 691 unsigned long flags; 692 693 active_cacheline_remove(entry); 694 695 /* 696 * add to beginning of the list - this way the entries are 697 * more likely cache hot when they are reallocated. 698 */ 699 spin_lock_irqsave(&free_entries_lock, flags); 700 list_add(&entry->list, &free_entries); 701 num_free_entries += 1; 702 spin_unlock_irqrestore(&free_entries_lock, flags); 703 } 704 705 /* 706 * DMA-API debugging init code 707 * 708 * The init code does two things: 709 * 1. Initialize core data structures 710 * 2. Preallocate a given number of dma_debug_entry structs 711 */ 712 713 static ssize_t filter_read(struct file *file, char __user *user_buf, 714 size_t count, loff_t *ppos) 715 { 716 char buf[NAME_MAX_LEN + 1]; 717 unsigned long flags; 718 int len; 719 720 if (!current_driver_name[0]) 721 return 0; 722 723 /* 724 * We can't copy to userspace directly because current_driver_name can 725 * only be read under the driver_name_lock with irqs disabled. So 726 * create a temporary copy first. 727 */ 728 read_lock_irqsave(&driver_name_lock, flags); 729 len = scnprintf(buf, NAME_MAX_LEN + 1, "%s\n", current_driver_name); 730 read_unlock_irqrestore(&driver_name_lock, flags); 731 732 return simple_read_from_buffer(user_buf, count, ppos, buf, len); 733 } 734 735 static ssize_t filter_write(struct file *file, const char __user *userbuf, 736 size_t count, loff_t *ppos) 737 { 738 char buf[NAME_MAX_LEN]; 739 unsigned long flags; 740 size_t len; 741 int i; 742 743 /* 744 * We can't copy from userspace directly. Access to 745 * current_driver_name is protected with a write_lock with irqs 746 * disabled. Since copy_from_user can fault and may sleep we 747 * need to copy to temporary buffer first 748 */ 749 len = min(count, (size_t)(NAME_MAX_LEN - 1)); 750 if (copy_from_user(buf, userbuf, len)) 751 return -EFAULT; 752 753 buf[len] = 0; 754 755 write_lock_irqsave(&driver_name_lock, flags); 756 757 /* 758 * Now handle the string we got from userspace very carefully. 759 * The rules are: 760 * - only use the first token we got 761 * - token delimiter is everything looking like a space 762 * character (' ', '\n', '\t' ...) 763 * 764 */ 765 if (!isalnum(buf[0])) { 766 /* 767 * If the first character userspace gave us is not 768 * alphanumerical then assume the filter should be 769 * switched off. 770 */ 771 if (current_driver_name[0]) 772 pr_info("switching off dma-debug driver filter\n"); 773 current_driver_name[0] = 0; 774 current_driver = NULL; 775 goto out_unlock; 776 } 777 778 /* 779 * Now parse out the first token and use it as the name for the 780 * driver to filter for. 781 */ 782 for (i = 0; i < NAME_MAX_LEN - 1; ++i) { 783 current_driver_name[i] = buf[i]; 784 if (isspace(buf[i]) || buf[i] == ' ' || buf[i] == 0) 785 break; 786 } 787 current_driver_name[i] = 0; 788 current_driver = NULL; 789 790 pr_info("enable driver filter for driver [%s]\n", 791 current_driver_name); 792 793 out_unlock: 794 write_unlock_irqrestore(&driver_name_lock, flags); 795 796 return count; 797 } 798 799 static const struct file_operations filter_fops = { 800 .read = filter_read, 801 .write = filter_write, 802 .llseek = default_llseek, 803 }; 804 805 static int __init dma_debug_fs_init(void) 806 { 807 struct dentry *dentry = debugfs_create_dir("dma-api", NULL); 808 809 debugfs_create_bool("disabled", 0444, dentry, &global_disable); 810 debugfs_create_u32("error_count", 0444, dentry, &error_count); 811 debugfs_create_u32("all_errors", 0644, dentry, &show_all_errors); 812 debugfs_create_u32("num_errors", 0644, dentry, &show_num_errors); 813 debugfs_create_u32("num_free_entries", 0444, dentry, &num_free_entries); 814 debugfs_create_u32("min_free_entries", 0444, dentry, &min_free_entries); 815 debugfs_create_u32("nr_total_entries", 0444, dentry, &nr_total_entries); 816 debugfs_create_file("driver_filter", 0644, dentry, NULL, &filter_fops); 817 debugfs_create_file("dump", 0444, dentry, NULL, &dump_fops); 818 819 return 0; 820 } 821 core_initcall_sync(dma_debug_fs_init); 822 823 static int device_dma_allocations(struct device *dev, struct dma_debug_entry **out_entry) 824 { 825 struct dma_debug_entry *entry; 826 unsigned long flags; 827 int count = 0, i; 828 829 for (i = 0; i < HASH_SIZE; ++i) { 830 spin_lock_irqsave(&dma_entry_hash[i].lock, flags); 831 list_for_each_entry(entry, &dma_entry_hash[i].list, list) { 832 if (entry->dev == dev) { 833 count += 1; 834 *out_entry = entry; 835 } 836 } 837 spin_unlock_irqrestore(&dma_entry_hash[i].lock, flags); 838 } 839 840 return count; 841 } 842 843 static int dma_debug_device_change(struct notifier_block *nb, unsigned long action, void *data) 844 { 845 struct device *dev = data; 846 struct dma_debug_entry *entry; 847 int count; 848 849 if (dma_debug_disabled()) 850 return 0; 851 852 switch (action) { 853 case BUS_NOTIFY_UNBOUND_DRIVER: 854 count = device_dma_allocations(dev, &entry); 855 if (count == 0) 856 break; 857 err_printk(dev, entry, "device driver has pending " 858 "DMA allocations while released from device " 859 "[count=%d]\n" 860 "One of leaked entries details: " 861 "[device address=0x%016llx] [size=%llu bytes] " 862 "[mapped with %s] [mapped as %s]\n", 863 count, entry->dev_addr, entry->size, 864 dir2name[entry->direction], type2name[entry->type]); 865 break; 866 default: 867 break; 868 } 869 870 return 0; 871 } 872 873 void dma_debug_add_bus(const struct bus_type *bus) 874 { 875 struct notifier_block *nb; 876 877 if (dma_debug_disabled()) 878 return; 879 880 nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL); 881 if (nb == NULL) { 882 pr_err("dma_debug_add_bus: out of memory\n"); 883 return; 884 } 885 886 nb->notifier_call = dma_debug_device_change; 887 888 bus_register_notifier(bus, nb); 889 } 890 891 static int dma_debug_init(void) 892 { 893 int i, nr_pages; 894 895 /* Do not use dma_debug_initialized here, since we really want to be 896 * called to set dma_debug_initialized 897 */ 898 if (global_disable) 899 return 0; 900 901 for (i = 0; i < HASH_SIZE; ++i) { 902 INIT_LIST_HEAD(&dma_entry_hash[i].list); 903 spin_lock_init(&dma_entry_hash[i].lock); 904 } 905 906 nr_pages = DIV_ROUND_UP(nr_prealloc_entries, DMA_DEBUG_DYNAMIC_ENTRIES); 907 for (i = 0; i < nr_pages; ++i) 908 dma_debug_create_entries(GFP_KERNEL); 909 if (num_free_entries >= nr_prealloc_entries) { 910 pr_info("preallocated %d debug entries\n", nr_total_entries); 911 } else if (num_free_entries > 0) { 912 pr_warn("%d debug entries requested but only %d allocated\n", 913 nr_prealloc_entries, nr_total_entries); 914 } else { 915 pr_err("debugging out of memory error - disabled\n"); 916 global_disable = true; 917 918 return 0; 919 } 920 min_free_entries = num_free_entries; 921 922 dma_debug_initialized = true; 923 924 pr_info("debugging enabled by kernel config\n"); 925 return 0; 926 } 927 core_initcall(dma_debug_init); 928 929 static __init int dma_debug_cmdline(char *str) 930 { 931 if (!str) 932 return -EINVAL; 933 934 if (strncmp(str, "off", 3) == 0) { 935 pr_info("debugging disabled on kernel command line\n"); 936 global_disable = true; 937 } 938 939 return 1; 940 } 941 942 static __init int dma_debug_entries_cmdline(char *str) 943 { 944 if (!str) 945 return -EINVAL; 946 if (!get_option(&str, &nr_prealloc_entries)) 947 nr_prealloc_entries = PREALLOC_DMA_DEBUG_ENTRIES; 948 return 1; 949 } 950 951 __setup("dma_debug=", dma_debug_cmdline); 952 __setup("dma_debug_entries=", dma_debug_entries_cmdline); 953 954 static void check_unmap(struct dma_debug_entry *ref) 955 { 956 struct dma_debug_entry *entry; 957 struct hash_bucket *bucket; 958 unsigned long flags; 959 960 bucket = get_hash_bucket(ref, &flags); 961 entry = bucket_find_exact(bucket, ref); 962 963 if (!entry) { 964 /* must drop lock before calling dma_mapping_error */ 965 put_hash_bucket(bucket, flags); 966 967 if (dma_mapping_error(ref->dev, ref->dev_addr)) { 968 err_printk(ref->dev, NULL, 969 "device driver tries to free an " 970 "invalid DMA memory address\n"); 971 } else { 972 err_printk(ref->dev, NULL, 973 "device driver tries to free DMA " 974 "memory it has not allocated [device " 975 "address=0x%016llx] [size=%llu bytes]\n", 976 ref->dev_addr, ref->size); 977 } 978 return; 979 } 980 981 if (ref->size != entry->size) { 982 err_printk(ref->dev, entry, "device driver frees " 983 "DMA memory with different size " 984 "[device address=0x%016llx] [map size=%llu bytes] " 985 "[unmap size=%llu bytes]\n", 986 ref->dev_addr, entry->size, ref->size); 987 } 988 989 if (ref->type != entry->type) { 990 err_printk(ref->dev, entry, "device driver frees " 991 "DMA memory with wrong function " 992 "[device address=0x%016llx] [size=%llu bytes] " 993 "[mapped as %s] [unmapped as %s]\n", 994 ref->dev_addr, ref->size, 995 type2name[entry->type], type2name[ref->type]); 996 } else if (entry->type == dma_debug_coherent && 997 ref->paddr != entry->paddr) { 998 err_printk(ref->dev, entry, "device driver frees " 999 "DMA memory with different CPU address " 1000 "[device address=0x%016llx] [size=%llu bytes] " 1001 "[cpu alloc address=0x%pa] " 1002 "[cpu free address=0x%pa]", 1003 ref->dev_addr, ref->size, 1004 &entry->paddr, 1005 &ref->paddr); 1006 } 1007 1008 if (ref->sg_call_ents && ref->type == dma_debug_sg && 1009 ref->sg_call_ents != entry->sg_call_ents) { 1010 err_printk(ref->dev, entry, "device driver frees " 1011 "DMA sg list with different entry count " 1012 "[map count=%d] [unmap count=%d]\n", 1013 entry->sg_call_ents, ref->sg_call_ents); 1014 } 1015 1016 /* 1017 * This may be no bug in reality - but most implementations of the 1018 * DMA API don't handle this properly, so check for it here 1019 */ 1020 if (ref->direction != entry->direction) { 1021 err_printk(ref->dev, entry, "device driver frees " 1022 "DMA memory with different direction " 1023 "[device address=0x%016llx] [size=%llu bytes] " 1024 "[mapped with %s] [unmapped with %s]\n", 1025 ref->dev_addr, ref->size, 1026 dir2name[entry->direction], 1027 dir2name[ref->direction]); 1028 } 1029 1030 /* 1031 * Drivers should use dma_mapping_error() to check the returned 1032 * addresses of dma_map_single() and dma_map_page(). 1033 * If not, print this warning message. See Documentation/core-api/dma-api.rst. 1034 */ 1035 if (entry->map_err_type == MAP_ERR_NOT_CHECKED) { 1036 err_printk(ref->dev, entry, 1037 "device driver failed to check map error" 1038 "[device address=0x%016llx] [size=%llu bytes] " 1039 "[mapped as %s]", 1040 ref->dev_addr, ref->size, 1041 type2name[entry->type]); 1042 } 1043 1044 hash_bucket_del(entry); 1045 put_hash_bucket(bucket, flags); 1046 1047 /* 1048 * Free the entry outside of bucket_lock to avoid ABBA deadlocks 1049 * between that and radix_lock. 1050 */ 1051 dma_entry_free(entry); 1052 } 1053 1054 static void check_for_stack(struct device *dev, 1055 struct page *page, size_t offset) 1056 { 1057 void *addr; 1058 struct vm_struct *stack_vm_area = task_stack_vm_area(current); 1059 1060 if (!stack_vm_area) { 1061 /* Stack is direct-mapped. */ 1062 if (PageHighMem(page)) 1063 return; 1064 addr = page_address(page) + offset; 1065 if (object_is_on_stack(addr)) 1066 err_printk(dev, NULL, "device driver maps memory from stack [addr=%p]\n", addr); 1067 } else { 1068 /* Stack is vmalloced. */ 1069 int i; 1070 1071 for (i = 0; i < stack_vm_area->nr_pages; i++) { 1072 if (page != stack_vm_area->pages[i]) 1073 continue; 1074 1075 addr = (u8 *)current->stack + i * PAGE_SIZE + offset; 1076 err_printk(dev, NULL, "device driver maps memory from stack [probable addr=%p]\n", addr); 1077 break; 1078 } 1079 } 1080 } 1081 1082 static void check_for_illegal_area(struct device *dev, void *addr, unsigned long len) 1083 { 1084 if (memory_intersects(_stext, _etext, addr, len) || 1085 memory_intersects(__start_rodata, __end_rodata, addr, len)) 1086 err_printk(dev, NULL, "device driver maps memory from kernel text or rodata [addr=%p] [len=%lu]\n", addr, len); 1087 } 1088 1089 static void check_sync(struct device *dev, 1090 struct dma_debug_entry *ref, 1091 bool to_cpu) 1092 { 1093 struct dma_debug_entry *entry; 1094 struct hash_bucket *bucket; 1095 unsigned long flags; 1096 1097 bucket = get_hash_bucket(ref, &flags); 1098 1099 entry = bucket_find_contain(&bucket, ref, &flags); 1100 1101 if (!entry) { 1102 err_printk(dev, NULL, "device driver tries " 1103 "to sync DMA memory it has not allocated " 1104 "[device address=0x%016llx] [size=%llu bytes]\n", 1105 (unsigned long long)ref->dev_addr, ref->size); 1106 goto out; 1107 } 1108 1109 if (ref->size > entry->size) { 1110 err_printk(dev, entry, "device driver syncs" 1111 " DMA memory outside allocated range " 1112 "[device address=0x%016llx] " 1113 "[allocation size=%llu bytes] " 1114 "[sync offset+size=%llu]\n", 1115 entry->dev_addr, entry->size, 1116 ref->size); 1117 } 1118 1119 if (entry->direction == DMA_BIDIRECTIONAL) 1120 goto out; 1121 1122 if (ref->direction != entry->direction) { 1123 err_printk(dev, entry, "device driver syncs " 1124 "DMA memory with different direction " 1125 "[device address=0x%016llx] [size=%llu bytes] " 1126 "[mapped with %s] [synced with %s]\n", 1127 (unsigned long long)ref->dev_addr, entry->size, 1128 dir2name[entry->direction], 1129 dir2name[ref->direction]); 1130 } 1131 1132 if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) && 1133 !(ref->direction == DMA_TO_DEVICE)) 1134 err_printk(dev, entry, "device driver syncs " 1135 "device read-only DMA memory for cpu " 1136 "[device address=0x%016llx] [size=%llu bytes] " 1137 "[mapped with %s] [synced with %s]\n", 1138 (unsigned long long)ref->dev_addr, entry->size, 1139 dir2name[entry->direction], 1140 dir2name[ref->direction]); 1141 1142 if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) && 1143 !(ref->direction == DMA_FROM_DEVICE)) 1144 err_printk(dev, entry, "device driver syncs " 1145 "device write-only DMA memory to device " 1146 "[device address=0x%016llx] [size=%llu bytes] " 1147 "[mapped with %s] [synced with %s]\n", 1148 (unsigned long long)ref->dev_addr, entry->size, 1149 dir2name[entry->direction], 1150 dir2name[ref->direction]); 1151 1152 if (ref->sg_call_ents && ref->type == dma_debug_sg && 1153 ref->sg_call_ents != entry->sg_call_ents) { 1154 err_printk(ref->dev, entry, "device driver syncs " 1155 "DMA sg list with different entry count " 1156 "[map count=%d] [sync count=%d]\n", 1157 entry->sg_call_ents, ref->sg_call_ents); 1158 } 1159 1160 out: 1161 put_hash_bucket(bucket, flags); 1162 } 1163 1164 static void check_sg_segment(struct device *dev, struct scatterlist *sg) 1165 { 1166 unsigned int max_seg = dma_get_max_seg_size(dev); 1167 u64 start, end, boundary = dma_get_seg_boundary(dev); 1168 1169 /* 1170 * Either the driver forgot to set dma_parms appropriately, or 1171 * whoever generated the list forgot to check them. 1172 */ 1173 if (sg->length > max_seg) 1174 err_printk(dev, NULL, "mapping sg segment longer than device claims to support [len=%u] [max=%u]\n", 1175 sg->length, max_seg); 1176 /* 1177 * In some cases this could potentially be the DMA API 1178 * implementation's fault, but it would usually imply that 1179 * the scatterlist was built inappropriately to begin with. 1180 */ 1181 start = sg_dma_address(sg); 1182 end = start + sg_dma_len(sg) - 1; 1183 if ((start ^ end) & ~boundary) 1184 err_printk(dev, NULL, "mapping sg segment across boundary [start=0x%016llx] [end=0x%016llx] [boundary=0x%016llx]\n", 1185 start, end, boundary); 1186 } 1187 1188 void debug_dma_map_single(struct device *dev, const void *addr, 1189 unsigned long len) 1190 { 1191 if (unlikely(dma_debug_disabled())) 1192 return; 1193 1194 if (!virt_addr_valid(addr)) 1195 err_printk(dev, NULL, "device driver maps memory from invalid area [addr=%p] [len=%lu]\n", 1196 addr, len); 1197 1198 if (is_vmalloc_addr(addr)) 1199 err_printk(dev, NULL, "device driver maps memory from vmalloc area [addr=%p] [len=%lu]\n", 1200 addr, len); 1201 } 1202 EXPORT_SYMBOL(debug_dma_map_single); 1203 1204 void debug_dma_map_page(struct device *dev, struct page *page, size_t offset, 1205 size_t size, int direction, dma_addr_t dma_addr, 1206 unsigned long attrs) 1207 { 1208 struct dma_debug_entry *entry; 1209 1210 if (unlikely(dma_debug_disabled())) 1211 return; 1212 1213 if (dma_mapping_error(dev, dma_addr)) 1214 return; 1215 1216 entry = dma_entry_alloc(); 1217 if (!entry) 1218 return; 1219 1220 entry->dev = dev; 1221 entry->type = dma_debug_single; 1222 entry->paddr = page_to_phys(page) + offset; 1223 entry->dev_addr = dma_addr; 1224 entry->size = size; 1225 entry->direction = direction; 1226 entry->map_err_type = MAP_ERR_NOT_CHECKED; 1227 1228 check_for_stack(dev, page, offset); 1229 1230 if (!PageHighMem(page)) { 1231 void *addr = page_address(page) + offset; 1232 1233 check_for_illegal_area(dev, addr, size); 1234 } 1235 1236 add_dma_entry(entry, attrs); 1237 } 1238 1239 void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 1240 { 1241 struct dma_debug_entry ref; 1242 struct dma_debug_entry *entry; 1243 struct hash_bucket *bucket; 1244 unsigned long flags; 1245 1246 if (unlikely(dma_debug_disabled())) 1247 return; 1248 1249 ref.dev = dev; 1250 ref.dev_addr = dma_addr; 1251 bucket = get_hash_bucket(&ref, &flags); 1252 1253 list_for_each_entry(entry, &bucket->list, list) { 1254 if (!exact_match(&ref, entry)) 1255 continue; 1256 1257 /* 1258 * The same physical address can be mapped multiple 1259 * times. Without a hardware IOMMU this results in the 1260 * same device addresses being put into the dma-debug 1261 * hash multiple times too. This can result in false 1262 * positives being reported. Therefore we implement a 1263 * best-fit algorithm here which updates the first entry 1264 * from the hash which fits the reference value and is 1265 * not currently listed as being checked. 1266 */ 1267 if (entry->map_err_type == MAP_ERR_NOT_CHECKED) { 1268 entry->map_err_type = MAP_ERR_CHECKED; 1269 break; 1270 } 1271 } 1272 1273 put_hash_bucket(bucket, flags); 1274 } 1275 EXPORT_SYMBOL(debug_dma_mapping_error); 1276 1277 void debug_dma_unmap_page(struct device *dev, dma_addr_t dma_addr, 1278 size_t size, int direction) 1279 { 1280 struct dma_debug_entry ref = { 1281 .type = dma_debug_single, 1282 .dev = dev, 1283 .dev_addr = dma_addr, 1284 .size = size, 1285 .direction = direction, 1286 }; 1287 1288 if (unlikely(dma_debug_disabled())) 1289 return; 1290 check_unmap(&ref); 1291 } 1292 1293 void debug_dma_map_sg(struct device *dev, struct scatterlist *sg, 1294 int nents, int mapped_ents, int direction, 1295 unsigned long attrs) 1296 { 1297 struct dma_debug_entry *entry; 1298 struct scatterlist *s; 1299 int i; 1300 1301 if (unlikely(dma_debug_disabled())) 1302 return; 1303 1304 for_each_sg(sg, s, nents, i) { 1305 check_for_stack(dev, sg_page(s), s->offset); 1306 if (!PageHighMem(sg_page(s))) 1307 check_for_illegal_area(dev, sg_virt(s), s->length); 1308 } 1309 1310 for_each_sg(sg, s, mapped_ents, i) { 1311 entry = dma_entry_alloc(); 1312 if (!entry) 1313 return; 1314 1315 entry->type = dma_debug_sg; 1316 entry->dev = dev; 1317 entry->paddr = sg_phys(s); 1318 entry->size = sg_dma_len(s); 1319 entry->dev_addr = sg_dma_address(s); 1320 entry->direction = direction; 1321 entry->sg_call_ents = nents; 1322 entry->sg_mapped_ents = mapped_ents; 1323 1324 check_sg_segment(dev, s); 1325 1326 add_dma_entry(entry, attrs); 1327 } 1328 } 1329 1330 static int get_nr_mapped_entries(struct device *dev, 1331 struct dma_debug_entry *ref) 1332 { 1333 struct dma_debug_entry *entry; 1334 struct hash_bucket *bucket; 1335 unsigned long flags; 1336 int mapped_ents; 1337 1338 bucket = get_hash_bucket(ref, &flags); 1339 entry = bucket_find_exact(bucket, ref); 1340 mapped_ents = 0; 1341 1342 if (entry) 1343 mapped_ents = entry->sg_mapped_ents; 1344 put_hash_bucket(bucket, flags); 1345 1346 return mapped_ents; 1347 } 1348 1349 void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, 1350 int nelems, int dir) 1351 { 1352 struct scatterlist *s; 1353 int mapped_ents = 0, i; 1354 1355 if (unlikely(dma_debug_disabled())) 1356 return; 1357 1358 for_each_sg(sglist, s, nelems, i) { 1359 1360 struct dma_debug_entry ref = { 1361 .type = dma_debug_sg, 1362 .dev = dev, 1363 .paddr = sg_phys(s), 1364 .dev_addr = sg_dma_address(s), 1365 .size = sg_dma_len(s), 1366 .direction = dir, 1367 .sg_call_ents = nelems, 1368 }; 1369 1370 if (mapped_ents && i >= mapped_ents) 1371 break; 1372 1373 if (!i) 1374 mapped_ents = get_nr_mapped_entries(dev, &ref); 1375 1376 check_unmap(&ref); 1377 } 1378 } 1379 1380 static phys_addr_t virt_to_paddr(void *virt) 1381 { 1382 struct page *page; 1383 1384 if (is_vmalloc_addr(virt)) 1385 page = vmalloc_to_page(virt); 1386 else 1387 page = virt_to_page(virt); 1388 1389 return page_to_phys(page) + offset_in_page(virt); 1390 } 1391 1392 void debug_dma_alloc_coherent(struct device *dev, size_t size, 1393 dma_addr_t dma_addr, void *virt, 1394 unsigned long attrs) 1395 { 1396 struct dma_debug_entry *entry; 1397 1398 if (unlikely(dma_debug_disabled())) 1399 return; 1400 1401 if (unlikely(virt == NULL)) 1402 return; 1403 1404 /* handle vmalloc and linear addresses */ 1405 if (!is_vmalloc_addr(virt) && !virt_addr_valid(virt)) 1406 return; 1407 1408 entry = dma_entry_alloc(); 1409 if (!entry) 1410 return; 1411 1412 entry->type = dma_debug_coherent; 1413 entry->dev = dev; 1414 entry->paddr = virt_to_paddr(virt); 1415 entry->size = size; 1416 entry->dev_addr = dma_addr; 1417 entry->direction = DMA_BIDIRECTIONAL; 1418 1419 add_dma_entry(entry, attrs); 1420 } 1421 1422 void debug_dma_free_coherent(struct device *dev, size_t size, 1423 void *virt, dma_addr_t dma_addr) 1424 { 1425 struct dma_debug_entry ref = { 1426 .type = dma_debug_coherent, 1427 .dev = dev, 1428 .dev_addr = dma_addr, 1429 .size = size, 1430 .direction = DMA_BIDIRECTIONAL, 1431 }; 1432 1433 /* handle vmalloc and linear addresses */ 1434 if (!is_vmalloc_addr(virt) && !virt_addr_valid(virt)) 1435 return; 1436 1437 ref.paddr = virt_to_paddr(virt); 1438 1439 if (unlikely(dma_debug_disabled())) 1440 return; 1441 1442 check_unmap(&ref); 1443 } 1444 1445 void debug_dma_map_resource(struct device *dev, phys_addr_t addr, size_t size, 1446 int direction, dma_addr_t dma_addr, 1447 unsigned long attrs) 1448 { 1449 struct dma_debug_entry *entry; 1450 1451 if (unlikely(dma_debug_disabled())) 1452 return; 1453 1454 entry = dma_entry_alloc(); 1455 if (!entry) 1456 return; 1457 1458 entry->type = dma_debug_resource; 1459 entry->dev = dev; 1460 entry->paddr = addr; 1461 entry->size = size; 1462 entry->dev_addr = dma_addr; 1463 entry->direction = direction; 1464 entry->map_err_type = MAP_ERR_NOT_CHECKED; 1465 1466 add_dma_entry(entry, attrs); 1467 } 1468 1469 void debug_dma_unmap_resource(struct device *dev, dma_addr_t dma_addr, 1470 size_t size, int direction) 1471 { 1472 struct dma_debug_entry ref = { 1473 .type = dma_debug_resource, 1474 .dev = dev, 1475 .dev_addr = dma_addr, 1476 .size = size, 1477 .direction = direction, 1478 }; 1479 1480 if (unlikely(dma_debug_disabled())) 1481 return; 1482 1483 check_unmap(&ref); 1484 } 1485 1486 void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, 1487 size_t size, int direction) 1488 { 1489 struct dma_debug_entry ref; 1490 1491 if (unlikely(dma_debug_disabled())) 1492 return; 1493 1494 ref.type = dma_debug_single; 1495 ref.dev = dev; 1496 ref.dev_addr = dma_handle; 1497 ref.size = size; 1498 ref.direction = direction; 1499 ref.sg_call_ents = 0; 1500 1501 check_sync(dev, &ref, true); 1502 } 1503 1504 void debug_dma_sync_single_for_device(struct device *dev, 1505 dma_addr_t dma_handle, size_t size, 1506 int direction) 1507 { 1508 struct dma_debug_entry ref; 1509 1510 if (unlikely(dma_debug_disabled())) 1511 return; 1512 1513 ref.type = dma_debug_single; 1514 ref.dev = dev; 1515 ref.dev_addr = dma_handle; 1516 ref.size = size; 1517 ref.direction = direction; 1518 ref.sg_call_ents = 0; 1519 1520 check_sync(dev, &ref, false); 1521 } 1522 1523 void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, 1524 int nelems, int direction) 1525 { 1526 struct scatterlist *s; 1527 int mapped_ents = 0, i; 1528 1529 if (unlikely(dma_debug_disabled())) 1530 return; 1531 1532 for_each_sg(sg, s, nelems, i) { 1533 1534 struct dma_debug_entry ref = { 1535 .type = dma_debug_sg, 1536 .dev = dev, 1537 .paddr = sg_phys(s), 1538 .dev_addr = sg_dma_address(s), 1539 .size = sg_dma_len(s), 1540 .direction = direction, 1541 .sg_call_ents = nelems, 1542 }; 1543 1544 if (!i) 1545 mapped_ents = get_nr_mapped_entries(dev, &ref); 1546 1547 if (i >= mapped_ents) 1548 break; 1549 1550 check_sync(dev, &ref, true); 1551 } 1552 } 1553 1554 void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, 1555 int nelems, int direction) 1556 { 1557 struct scatterlist *s; 1558 int mapped_ents = 0, i; 1559 1560 if (unlikely(dma_debug_disabled())) 1561 return; 1562 1563 for_each_sg(sg, s, nelems, i) { 1564 1565 struct dma_debug_entry ref = { 1566 .type = dma_debug_sg, 1567 .dev = dev, 1568 .paddr = sg_phys(sg), 1569 .dev_addr = sg_dma_address(s), 1570 .size = sg_dma_len(s), 1571 .direction = direction, 1572 .sg_call_ents = nelems, 1573 }; 1574 if (!i) 1575 mapped_ents = get_nr_mapped_entries(dev, &ref); 1576 1577 if (i >= mapped_ents) 1578 break; 1579 1580 check_sync(dev, &ref, false); 1581 } 1582 } 1583 1584 static int __init dma_debug_driver_setup(char *str) 1585 { 1586 int i; 1587 1588 for (i = 0; i < NAME_MAX_LEN - 1; ++i, ++str) { 1589 current_driver_name[i] = *str; 1590 if (*str == 0) 1591 break; 1592 } 1593 1594 if (current_driver_name[0]) 1595 pr_info("enable driver filter for driver [%s]\n", 1596 current_driver_name); 1597 1598 1599 return 1; 1600 } 1601 __setup("dma_debug_driver=", dma_debug_driver_setup); 1602