xref: /linux/kernel/dma/debug.c (revision 0b8061c340b643e01da431dd60c75a41bb1d31ec)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2008 Advanced Micro Devices, Inc.
4  *
5  * Author: Joerg Roedel <joerg.roedel@amd.com>
6  */
7 
8 #define pr_fmt(fmt)	"DMA-API: " fmt
9 
10 #include <linux/sched/task_stack.h>
11 #include <linux/scatterlist.h>
12 #include <linux/dma-map-ops.h>
13 #include <linux/sched/task.h>
14 #include <linux/stacktrace.h>
15 #include <linux/spinlock.h>
16 #include <linux/vmalloc.h>
17 #include <linux/debugfs.h>
18 #include <linux/uaccess.h>
19 #include <linux/export.h>
20 #include <linux/device.h>
21 #include <linux/types.h>
22 #include <linux/sched.h>
23 #include <linux/ctype.h>
24 #include <linux/list.h>
25 #include <linux/slab.h>
26 #include <asm/sections.h>
27 #include "debug.h"
28 
29 #define HASH_SIZE       16384ULL
30 #define HASH_FN_SHIFT   13
31 #define HASH_FN_MASK    (HASH_SIZE - 1)
32 
33 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
34 /* If the pool runs out, add this many new entries at once */
35 #define DMA_DEBUG_DYNAMIC_ENTRIES (PAGE_SIZE / sizeof(struct dma_debug_entry))
36 
37 enum {
38 	dma_debug_single,
39 	dma_debug_sg,
40 	dma_debug_coherent,
41 	dma_debug_resource,
42 };
43 
44 enum map_err_types {
45 	MAP_ERR_CHECK_NOT_APPLICABLE,
46 	MAP_ERR_NOT_CHECKED,
47 	MAP_ERR_CHECKED,
48 };
49 
50 #define DMA_DEBUG_STACKTRACE_ENTRIES 5
51 
52 /**
53  * struct dma_debug_entry - track a dma_map* or dma_alloc_coherent mapping
54  * @list: node on pre-allocated free_entries list
55  * @dev: 'dev' argument to dma_map_{page|single|sg} or dma_alloc_coherent
56  * @size: length of the mapping
57  * @type: single, page, sg, coherent
58  * @direction: enum dma_data_direction
59  * @sg_call_ents: 'nents' from dma_map_sg
60  * @sg_mapped_ents: 'mapped_ents' from dma_map_sg
61  * @pfn: page frame of the start address
62  * @offset: offset of mapping relative to pfn
63  * @map_err_type: track whether dma_mapping_error() was checked
64  * @stacktrace: support backtraces when a violation is detected
65  */
66 struct dma_debug_entry {
67 	struct list_head list;
68 	struct device    *dev;
69 	u64              dev_addr;
70 	u64              size;
71 	int              type;
72 	int              direction;
73 	int		 sg_call_ents;
74 	int		 sg_mapped_ents;
75 	unsigned long	 pfn;
76 	size_t		 offset;
77 	enum map_err_types  map_err_type;
78 #ifdef CONFIG_STACKTRACE
79 	unsigned int	stack_len;
80 	unsigned long	stack_entries[DMA_DEBUG_STACKTRACE_ENTRIES];
81 #endif
82 } ____cacheline_aligned_in_smp;
83 
84 typedef bool (*match_fn)(struct dma_debug_entry *, struct dma_debug_entry *);
85 
86 struct hash_bucket {
87 	struct list_head list;
88 	spinlock_t lock;
89 };
90 
91 /* Hash list to save the allocated dma addresses */
92 static struct hash_bucket dma_entry_hash[HASH_SIZE];
93 /* List of pre-allocated dma_debug_entry's */
94 static LIST_HEAD(free_entries);
95 /* Lock for the list above */
96 static DEFINE_SPINLOCK(free_entries_lock);
97 
98 /* Global disable flag - will be set in case of an error */
99 static bool global_disable __read_mostly;
100 
101 /* Early initialization disable flag, set at the end of dma_debug_init */
102 static bool dma_debug_initialized __read_mostly;
103 
104 static inline bool dma_debug_disabled(void)
105 {
106 	return global_disable || !dma_debug_initialized;
107 }
108 
109 /* Global error count */
110 static u32 error_count;
111 
112 /* Global error show enable*/
113 static u32 show_all_errors __read_mostly;
114 /* Number of errors to show */
115 static u32 show_num_errors = 1;
116 
117 static u32 num_free_entries;
118 static u32 min_free_entries;
119 static u32 nr_total_entries;
120 
121 /* number of preallocated entries requested by kernel cmdline */
122 static u32 nr_prealloc_entries = PREALLOC_DMA_DEBUG_ENTRIES;
123 
124 /* per-driver filter related state */
125 
126 #define NAME_MAX_LEN	64
127 
128 static char                  current_driver_name[NAME_MAX_LEN] __read_mostly;
129 static struct device_driver *current_driver                    __read_mostly;
130 
131 static DEFINE_RWLOCK(driver_name_lock);
132 
133 static const char *const maperr2str[] = {
134 	[MAP_ERR_CHECK_NOT_APPLICABLE] = "dma map error check not applicable",
135 	[MAP_ERR_NOT_CHECKED] = "dma map error not checked",
136 	[MAP_ERR_CHECKED] = "dma map error checked",
137 };
138 
139 static const char *type2name[] = {
140 	[dma_debug_single] = "single",
141 	[dma_debug_sg] = "scather-gather",
142 	[dma_debug_coherent] = "coherent",
143 	[dma_debug_resource] = "resource",
144 };
145 
146 static const char *dir2name[] = {
147 	[DMA_BIDIRECTIONAL]	= "DMA_BIDIRECTIONAL",
148 	[DMA_TO_DEVICE]		= "DMA_TO_DEVICE",
149 	[DMA_FROM_DEVICE]	= "DMA_FROM_DEVICE",
150 	[DMA_NONE]		= "DMA_NONE",
151 };
152 
153 /*
154  * The access to some variables in this macro is racy. We can't use atomic_t
155  * here because all these variables are exported to debugfs. Some of them even
156  * writeable. This is also the reason why a lock won't help much. But anyway,
157  * the races are no big deal. Here is why:
158  *
159  *   error_count: the addition is racy, but the worst thing that can happen is
160  *                that we don't count some errors
161  *   show_num_errors: the subtraction is racy. Also no big deal because in
162  *                    worst case this will result in one warning more in the
163  *                    system log than the user configured. This variable is
164  *                    writeable via debugfs.
165  */
166 static inline void dump_entry_trace(struct dma_debug_entry *entry)
167 {
168 #ifdef CONFIG_STACKTRACE
169 	if (entry) {
170 		pr_warn("Mapped at:\n");
171 		stack_trace_print(entry->stack_entries, entry->stack_len, 0);
172 	}
173 #endif
174 }
175 
176 static bool driver_filter(struct device *dev)
177 {
178 	struct device_driver *drv;
179 	unsigned long flags;
180 	bool ret;
181 
182 	/* driver filter off */
183 	if (likely(!current_driver_name[0]))
184 		return true;
185 
186 	/* driver filter on and initialized */
187 	if (current_driver && dev && dev->driver == current_driver)
188 		return true;
189 
190 	/* driver filter on, but we can't filter on a NULL device... */
191 	if (!dev)
192 		return false;
193 
194 	if (current_driver || !current_driver_name[0])
195 		return false;
196 
197 	/* driver filter on but not yet initialized */
198 	drv = dev->driver;
199 	if (!drv)
200 		return false;
201 
202 	/* lock to protect against change of current_driver_name */
203 	read_lock_irqsave(&driver_name_lock, flags);
204 
205 	ret = false;
206 	if (drv->name &&
207 	    strncmp(current_driver_name, drv->name, NAME_MAX_LEN - 1) == 0) {
208 		current_driver = drv;
209 		ret = true;
210 	}
211 
212 	read_unlock_irqrestore(&driver_name_lock, flags);
213 
214 	return ret;
215 }
216 
217 #define err_printk(dev, entry, format, arg...) do {			\
218 		error_count += 1;					\
219 		if (driver_filter(dev) &&				\
220 		    (show_all_errors || show_num_errors > 0)) {		\
221 			WARN(1, pr_fmt("%s %s: ") format,		\
222 			     dev ? dev_driver_string(dev) : "NULL",	\
223 			     dev ? dev_name(dev) : "NULL", ## arg);	\
224 			dump_entry_trace(entry);			\
225 		}							\
226 		if (!show_all_errors && show_num_errors > 0)		\
227 			show_num_errors -= 1;				\
228 	} while (0);
229 
230 /*
231  * Hash related functions
232  *
233  * Every DMA-API request is saved into a struct dma_debug_entry. To
234  * have quick access to these structs they are stored into a hash.
235  */
236 static int hash_fn(struct dma_debug_entry *entry)
237 {
238 	/*
239 	 * Hash function is based on the dma address.
240 	 * We use bits 20-27 here as the index into the hash
241 	 */
242 	return (entry->dev_addr >> HASH_FN_SHIFT) & HASH_FN_MASK;
243 }
244 
245 /*
246  * Request exclusive access to a hash bucket for a given dma_debug_entry.
247  */
248 static struct hash_bucket *get_hash_bucket(struct dma_debug_entry *entry,
249 					   unsigned long *flags)
250 	__acquires(&dma_entry_hash[idx].lock)
251 {
252 	int idx = hash_fn(entry);
253 	unsigned long __flags;
254 
255 	spin_lock_irqsave(&dma_entry_hash[idx].lock, __flags);
256 	*flags = __flags;
257 	return &dma_entry_hash[idx];
258 }
259 
260 /*
261  * Give up exclusive access to the hash bucket
262  */
263 static void put_hash_bucket(struct hash_bucket *bucket,
264 			    unsigned long flags)
265 	__releases(&bucket->lock)
266 {
267 	spin_unlock_irqrestore(&bucket->lock, flags);
268 }
269 
270 static bool exact_match(struct dma_debug_entry *a, struct dma_debug_entry *b)
271 {
272 	return ((a->dev_addr == b->dev_addr) &&
273 		(a->dev == b->dev)) ? true : false;
274 }
275 
276 static bool containing_match(struct dma_debug_entry *a,
277 			     struct dma_debug_entry *b)
278 {
279 	if (a->dev != b->dev)
280 		return false;
281 
282 	if ((b->dev_addr <= a->dev_addr) &&
283 	    ((b->dev_addr + b->size) >= (a->dev_addr + a->size)))
284 		return true;
285 
286 	return false;
287 }
288 
289 /*
290  * Search a given entry in the hash bucket list
291  */
292 static struct dma_debug_entry *__hash_bucket_find(struct hash_bucket *bucket,
293 						  struct dma_debug_entry *ref,
294 						  match_fn match)
295 {
296 	struct dma_debug_entry *entry, *ret = NULL;
297 	int matches = 0, match_lvl, last_lvl = -1;
298 
299 	list_for_each_entry(entry, &bucket->list, list) {
300 		if (!match(ref, entry))
301 			continue;
302 
303 		/*
304 		 * Some drivers map the same physical address multiple
305 		 * times. Without a hardware IOMMU this results in the
306 		 * same device addresses being put into the dma-debug
307 		 * hash multiple times too. This can result in false
308 		 * positives being reported. Therefore we implement a
309 		 * best-fit algorithm here which returns the entry from
310 		 * the hash which fits best to the reference value
311 		 * instead of the first-fit.
312 		 */
313 		matches += 1;
314 		match_lvl = 0;
315 		entry->size         == ref->size         ? ++match_lvl : 0;
316 		entry->type         == ref->type         ? ++match_lvl : 0;
317 		entry->direction    == ref->direction    ? ++match_lvl : 0;
318 		entry->sg_call_ents == ref->sg_call_ents ? ++match_lvl : 0;
319 
320 		if (match_lvl == 4) {
321 			/* perfect-fit - return the result */
322 			return entry;
323 		} else if (match_lvl > last_lvl) {
324 			/*
325 			 * We found an entry that fits better then the
326 			 * previous one or it is the 1st match.
327 			 */
328 			last_lvl = match_lvl;
329 			ret      = entry;
330 		}
331 	}
332 
333 	/*
334 	 * If we have multiple matches but no perfect-fit, just return
335 	 * NULL.
336 	 */
337 	ret = (matches == 1) ? ret : NULL;
338 
339 	return ret;
340 }
341 
342 static struct dma_debug_entry *bucket_find_exact(struct hash_bucket *bucket,
343 						 struct dma_debug_entry *ref)
344 {
345 	return __hash_bucket_find(bucket, ref, exact_match);
346 }
347 
348 static struct dma_debug_entry *bucket_find_contain(struct hash_bucket **bucket,
349 						   struct dma_debug_entry *ref,
350 						   unsigned long *flags)
351 {
352 
353 	unsigned int max_range = dma_get_max_seg_size(ref->dev);
354 	struct dma_debug_entry *entry, index = *ref;
355 	unsigned int range = 0;
356 
357 	while (range <= max_range) {
358 		entry = __hash_bucket_find(*bucket, ref, containing_match);
359 
360 		if (entry)
361 			return entry;
362 
363 		/*
364 		 * Nothing found, go back a hash bucket
365 		 */
366 		put_hash_bucket(*bucket, *flags);
367 		range          += (1 << HASH_FN_SHIFT);
368 		index.dev_addr -= (1 << HASH_FN_SHIFT);
369 		*bucket = get_hash_bucket(&index, flags);
370 	}
371 
372 	return NULL;
373 }
374 
375 /*
376  * Add an entry to a hash bucket
377  */
378 static void hash_bucket_add(struct hash_bucket *bucket,
379 			    struct dma_debug_entry *entry)
380 {
381 	list_add_tail(&entry->list, &bucket->list);
382 }
383 
384 /*
385  * Remove entry from a hash bucket list
386  */
387 static void hash_bucket_del(struct dma_debug_entry *entry)
388 {
389 	list_del(&entry->list);
390 }
391 
392 static unsigned long long phys_addr(struct dma_debug_entry *entry)
393 {
394 	if (entry->type == dma_debug_resource)
395 		return __pfn_to_phys(entry->pfn) + entry->offset;
396 
397 	return page_to_phys(pfn_to_page(entry->pfn)) + entry->offset;
398 }
399 
400 /*
401  * Dump mapping entries for debugging purposes
402  */
403 void debug_dma_dump_mappings(struct device *dev)
404 {
405 	int idx;
406 
407 	for (idx = 0; idx < HASH_SIZE; idx++) {
408 		struct hash_bucket *bucket = &dma_entry_hash[idx];
409 		struct dma_debug_entry *entry;
410 		unsigned long flags;
411 
412 		spin_lock_irqsave(&bucket->lock, flags);
413 
414 		list_for_each_entry(entry, &bucket->list, list) {
415 			if (!dev || dev == entry->dev) {
416 				dev_info(entry->dev,
417 					 "%s idx %d P=%Lx N=%lx D=%Lx L=%Lx %s %s\n",
418 					 type2name[entry->type], idx,
419 					 phys_addr(entry), entry->pfn,
420 					 entry->dev_addr, entry->size,
421 					 dir2name[entry->direction],
422 					 maperr2str[entry->map_err_type]);
423 			}
424 		}
425 
426 		spin_unlock_irqrestore(&bucket->lock, flags);
427 		cond_resched();
428 	}
429 }
430 
431 /*
432  * For each mapping (initial cacheline in the case of
433  * dma_alloc_coherent/dma_map_page, initial cacheline in each page of a
434  * scatterlist, or the cacheline specified in dma_map_single) insert
435  * into this tree using the cacheline as the key. At
436  * dma_unmap_{single|sg|page} or dma_free_coherent delete the entry.  If
437  * the entry already exists at insertion time add a tag as a reference
438  * count for the overlapping mappings.  For now, the overlap tracking
439  * just ensures that 'unmaps' balance 'maps' before marking the
440  * cacheline idle, but we should also be flagging overlaps as an API
441  * violation.
442  *
443  * Memory usage is mostly constrained by the maximum number of available
444  * dma-debug entries in that we need a free dma_debug_entry before
445  * inserting into the tree.  In the case of dma_map_page and
446  * dma_alloc_coherent there is only one dma_debug_entry and one
447  * dma_active_cacheline entry to track per event.  dma_map_sg(), on the
448  * other hand, consumes a single dma_debug_entry, but inserts 'nents'
449  * entries into the tree.
450  */
451 static RADIX_TREE(dma_active_cacheline, GFP_NOWAIT);
452 static DEFINE_SPINLOCK(radix_lock);
453 #define ACTIVE_CACHELINE_MAX_OVERLAP ((1 << RADIX_TREE_MAX_TAGS) - 1)
454 #define CACHELINE_PER_PAGE_SHIFT (PAGE_SHIFT - L1_CACHE_SHIFT)
455 #define CACHELINES_PER_PAGE (1 << CACHELINE_PER_PAGE_SHIFT)
456 
457 static phys_addr_t to_cacheline_number(struct dma_debug_entry *entry)
458 {
459 	return (entry->pfn << CACHELINE_PER_PAGE_SHIFT) +
460 		(entry->offset >> L1_CACHE_SHIFT);
461 }
462 
463 static int active_cacheline_read_overlap(phys_addr_t cln)
464 {
465 	int overlap = 0, i;
466 
467 	for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--)
468 		if (radix_tree_tag_get(&dma_active_cacheline, cln, i))
469 			overlap |= 1 << i;
470 	return overlap;
471 }
472 
473 static int active_cacheline_set_overlap(phys_addr_t cln, int overlap)
474 {
475 	int i;
476 
477 	if (overlap > ACTIVE_CACHELINE_MAX_OVERLAP || overlap < 0)
478 		return overlap;
479 
480 	for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--)
481 		if (overlap & 1 << i)
482 			radix_tree_tag_set(&dma_active_cacheline, cln, i);
483 		else
484 			radix_tree_tag_clear(&dma_active_cacheline, cln, i);
485 
486 	return overlap;
487 }
488 
489 static void active_cacheline_inc_overlap(phys_addr_t cln)
490 {
491 	int overlap = active_cacheline_read_overlap(cln);
492 
493 	overlap = active_cacheline_set_overlap(cln, ++overlap);
494 
495 	/* If we overflowed the overlap counter then we're potentially
496 	 * leaking dma-mappings.
497 	 */
498 	WARN_ONCE(overlap > ACTIVE_CACHELINE_MAX_OVERLAP,
499 		  pr_fmt("exceeded %d overlapping mappings of cacheline %pa\n"),
500 		  ACTIVE_CACHELINE_MAX_OVERLAP, &cln);
501 }
502 
503 static int active_cacheline_dec_overlap(phys_addr_t cln)
504 {
505 	int overlap = active_cacheline_read_overlap(cln);
506 
507 	return active_cacheline_set_overlap(cln, --overlap);
508 }
509 
510 static int active_cacheline_insert(struct dma_debug_entry *entry)
511 {
512 	phys_addr_t cln = to_cacheline_number(entry);
513 	unsigned long flags;
514 	int rc;
515 
516 	/* If the device is not writing memory then we don't have any
517 	 * concerns about the cpu consuming stale data.  This mitigates
518 	 * legitimate usages of overlapping mappings.
519 	 */
520 	if (entry->direction == DMA_TO_DEVICE)
521 		return 0;
522 
523 	spin_lock_irqsave(&radix_lock, flags);
524 	rc = radix_tree_insert(&dma_active_cacheline, cln, entry);
525 	if (rc == -EEXIST)
526 		active_cacheline_inc_overlap(cln);
527 	spin_unlock_irqrestore(&radix_lock, flags);
528 
529 	return rc;
530 }
531 
532 static void active_cacheline_remove(struct dma_debug_entry *entry)
533 {
534 	phys_addr_t cln = to_cacheline_number(entry);
535 	unsigned long flags;
536 
537 	/* ...mirror the insert case */
538 	if (entry->direction == DMA_TO_DEVICE)
539 		return;
540 
541 	spin_lock_irqsave(&radix_lock, flags);
542 	/* since we are counting overlaps the final put of the
543 	 * cacheline will occur when the overlap count is 0.
544 	 * active_cacheline_dec_overlap() returns -1 in that case
545 	 */
546 	if (active_cacheline_dec_overlap(cln) < 0)
547 		radix_tree_delete(&dma_active_cacheline, cln);
548 	spin_unlock_irqrestore(&radix_lock, flags);
549 }
550 
551 /*
552  * Wrapper function for adding an entry to the hash.
553  * This function takes care of locking itself.
554  */
555 static void add_dma_entry(struct dma_debug_entry *entry)
556 {
557 	struct hash_bucket *bucket;
558 	unsigned long flags;
559 	int rc;
560 
561 	bucket = get_hash_bucket(entry, &flags);
562 	hash_bucket_add(bucket, entry);
563 	put_hash_bucket(bucket, flags);
564 
565 	rc = active_cacheline_insert(entry);
566 	if (rc == -ENOMEM) {
567 		pr_err("cacheline tracking ENOMEM, dma-debug disabled\n");
568 		global_disable = true;
569 	}
570 
571 	/* TODO: report -EEXIST errors here as overlapping mappings are
572 	 * not supported by the DMA API
573 	 */
574 }
575 
576 static int dma_debug_create_entries(gfp_t gfp)
577 {
578 	struct dma_debug_entry *entry;
579 	int i;
580 
581 	entry = (void *)get_zeroed_page(gfp);
582 	if (!entry)
583 		return -ENOMEM;
584 
585 	for (i = 0; i < DMA_DEBUG_DYNAMIC_ENTRIES; i++)
586 		list_add_tail(&entry[i].list, &free_entries);
587 
588 	num_free_entries += DMA_DEBUG_DYNAMIC_ENTRIES;
589 	nr_total_entries += DMA_DEBUG_DYNAMIC_ENTRIES;
590 
591 	return 0;
592 }
593 
594 static struct dma_debug_entry *__dma_entry_alloc(void)
595 {
596 	struct dma_debug_entry *entry;
597 
598 	entry = list_entry(free_entries.next, struct dma_debug_entry, list);
599 	list_del(&entry->list);
600 	memset(entry, 0, sizeof(*entry));
601 
602 	num_free_entries -= 1;
603 	if (num_free_entries < min_free_entries)
604 		min_free_entries = num_free_entries;
605 
606 	return entry;
607 }
608 
609 static void __dma_entry_alloc_check_leak(void)
610 {
611 	u32 tmp = nr_total_entries % nr_prealloc_entries;
612 
613 	/* Shout each time we tick over some multiple of the initial pool */
614 	if (tmp < DMA_DEBUG_DYNAMIC_ENTRIES) {
615 		pr_info("dma_debug_entry pool grown to %u (%u00%%)\n",
616 			nr_total_entries,
617 			(nr_total_entries / nr_prealloc_entries));
618 	}
619 }
620 
621 /* struct dma_entry allocator
622  *
623  * The next two functions implement the allocator for
624  * struct dma_debug_entries.
625  */
626 static struct dma_debug_entry *dma_entry_alloc(void)
627 {
628 	struct dma_debug_entry *entry;
629 	unsigned long flags;
630 
631 	spin_lock_irqsave(&free_entries_lock, flags);
632 	if (num_free_entries == 0) {
633 		if (dma_debug_create_entries(GFP_ATOMIC)) {
634 			global_disable = true;
635 			spin_unlock_irqrestore(&free_entries_lock, flags);
636 			pr_err("debugging out of memory - disabling\n");
637 			return NULL;
638 		}
639 		__dma_entry_alloc_check_leak();
640 	}
641 
642 	entry = __dma_entry_alloc();
643 
644 	spin_unlock_irqrestore(&free_entries_lock, flags);
645 
646 #ifdef CONFIG_STACKTRACE
647 	entry->stack_len = stack_trace_save(entry->stack_entries,
648 					    ARRAY_SIZE(entry->stack_entries),
649 					    1);
650 #endif
651 	return entry;
652 }
653 
654 static void dma_entry_free(struct dma_debug_entry *entry)
655 {
656 	unsigned long flags;
657 
658 	active_cacheline_remove(entry);
659 
660 	/*
661 	 * add to beginning of the list - this way the entries are
662 	 * more likely cache hot when they are reallocated.
663 	 */
664 	spin_lock_irqsave(&free_entries_lock, flags);
665 	list_add(&entry->list, &free_entries);
666 	num_free_entries += 1;
667 	spin_unlock_irqrestore(&free_entries_lock, flags);
668 }
669 
670 /*
671  * DMA-API debugging init code
672  *
673  * The init code does two things:
674  *   1. Initialize core data structures
675  *   2. Preallocate a given number of dma_debug_entry structs
676  */
677 
678 static ssize_t filter_read(struct file *file, char __user *user_buf,
679 			   size_t count, loff_t *ppos)
680 {
681 	char buf[NAME_MAX_LEN + 1];
682 	unsigned long flags;
683 	int len;
684 
685 	if (!current_driver_name[0])
686 		return 0;
687 
688 	/*
689 	 * We can't copy to userspace directly because current_driver_name can
690 	 * only be read under the driver_name_lock with irqs disabled. So
691 	 * create a temporary copy first.
692 	 */
693 	read_lock_irqsave(&driver_name_lock, flags);
694 	len = scnprintf(buf, NAME_MAX_LEN + 1, "%s\n", current_driver_name);
695 	read_unlock_irqrestore(&driver_name_lock, flags);
696 
697 	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
698 }
699 
700 static ssize_t filter_write(struct file *file, const char __user *userbuf,
701 			    size_t count, loff_t *ppos)
702 {
703 	char buf[NAME_MAX_LEN];
704 	unsigned long flags;
705 	size_t len;
706 	int i;
707 
708 	/*
709 	 * We can't copy from userspace directly. Access to
710 	 * current_driver_name is protected with a write_lock with irqs
711 	 * disabled. Since copy_from_user can fault and may sleep we
712 	 * need to copy to temporary buffer first
713 	 */
714 	len = min(count, (size_t)(NAME_MAX_LEN - 1));
715 	if (copy_from_user(buf, userbuf, len))
716 		return -EFAULT;
717 
718 	buf[len] = 0;
719 
720 	write_lock_irqsave(&driver_name_lock, flags);
721 
722 	/*
723 	 * Now handle the string we got from userspace very carefully.
724 	 * The rules are:
725 	 *         - only use the first token we got
726 	 *         - token delimiter is everything looking like a space
727 	 *           character (' ', '\n', '\t' ...)
728 	 *
729 	 */
730 	if (!isalnum(buf[0])) {
731 		/*
732 		 * If the first character userspace gave us is not
733 		 * alphanumerical then assume the filter should be
734 		 * switched off.
735 		 */
736 		if (current_driver_name[0])
737 			pr_info("switching off dma-debug driver filter\n");
738 		current_driver_name[0] = 0;
739 		current_driver = NULL;
740 		goto out_unlock;
741 	}
742 
743 	/*
744 	 * Now parse out the first token and use it as the name for the
745 	 * driver to filter for.
746 	 */
747 	for (i = 0; i < NAME_MAX_LEN - 1; ++i) {
748 		current_driver_name[i] = buf[i];
749 		if (isspace(buf[i]) || buf[i] == ' ' || buf[i] == 0)
750 			break;
751 	}
752 	current_driver_name[i] = 0;
753 	current_driver = NULL;
754 
755 	pr_info("enable driver filter for driver [%s]\n",
756 		current_driver_name);
757 
758 out_unlock:
759 	write_unlock_irqrestore(&driver_name_lock, flags);
760 
761 	return count;
762 }
763 
764 static const struct file_operations filter_fops = {
765 	.read  = filter_read,
766 	.write = filter_write,
767 	.llseek = default_llseek,
768 };
769 
770 static int dump_show(struct seq_file *seq, void *v)
771 {
772 	int idx;
773 
774 	for (idx = 0; idx < HASH_SIZE; idx++) {
775 		struct hash_bucket *bucket = &dma_entry_hash[idx];
776 		struct dma_debug_entry *entry;
777 		unsigned long flags;
778 
779 		spin_lock_irqsave(&bucket->lock, flags);
780 		list_for_each_entry(entry, &bucket->list, list) {
781 			seq_printf(seq,
782 				   "%s %s %s idx %d P=%llx N=%lx D=%llx L=%llx %s %s\n",
783 				   dev_name(entry->dev),
784 				   dev_driver_string(entry->dev),
785 				   type2name[entry->type], idx,
786 				   phys_addr(entry), entry->pfn,
787 				   entry->dev_addr, entry->size,
788 				   dir2name[entry->direction],
789 				   maperr2str[entry->map_err_type]);
790 		}
791 		spin_unlock_irqrestore(&bucket->lock, flags);
792 	}
793 	return 0;
794 }
795 DEFINE_SHOW_ATTRIBUTE(dump);
796 
797 static void dma_debug_fs_init(void)
798 {
799 	struct dentry *dentry = debugfs_create_dir("dma-api", NULL);
800 
801 	debugfs_create_bool("disabled", 0444, dentry, &global_disable);
802 	debugfs_create_u32("error_count", 0444, dentry, &error_count);
803 	debugfs_create_u32("all_errors", 0644, dentry, &show_all_errors);
804 	debugfs_create_u32("num_errors", 0644, dentry, &show_num_errors);
805 	debugfs_create_u32("num_free_entries", 0444, dentry, &num_free_entries);
806 	debugfs_create_u32("min_free_entries", 0444, dentry, &min_free_entries);
807 	debugfs_create_u32("nr_total_entries", 0444, dentry, &nr_total_entries);
808 	debugfs_create_file("driver_filter", 0644, dentry, NULL, &filter_fops);
809 	debugfs_create_file("dump", 0444, dentry, NULL, &dump_fops);
810 }
811 
812 static int device_dma_allocations(struct device *dev, struct dma_debug_entry **out_entry)
813 {
814 	struct dma_debug_entry *entry;
815 	unsigned long flags;
816 	int count = 0, i;
817 
818 	for (i = 0; i < HASH_SIZE; ++i) {
819 		spin_lock_irqsave(&dma_entry_hash[i].lock, flags);
820 		list_for_each_entry(entry, &dma_entry_hash[i].list, list) {
821 			if (entry->dev == dev) {
822 				count += 1;
823 				*out_entry = entry;
824 			}
825 		}
826 		spin_unlock_irqrestore(&dma_entry_hash[i].lock, flags);
827 	}
828 
829 	return count;
830 }
831 
832 static int dma_debug_device_change(struct notifier_block *nb, unsigned long action, void *data)
833 {
834 	struct device *dev = data;
835 	struct dma_debug_entry *entry;
836 	int count;
837 
838 	if (dma_debug_disabled())
839 		return 0;
840 
841 	switch (action) {
842 	case BUS_NOTIFY_UNBOUND_DRIVER:
843 		count = device_dma_allocations(dev, &entry);
844 		if (count == 0)
845 			break;
846 		err_printk(dev, entry, "device driver has pending "
847 				"DMA allocations while released from device "
848 				"[count=%d]\n"
849 				"One of leaked entries details: "
850 				"[device address=0x%016llx] [size=%llu bytes] "
851 				"[mapped with %s] [mapped as %s]\n",
852 			count, entry->dev_addr, entry->size,
853 			dir2name[entry->direction], type2name[entry->type]);
854 		break;
855 	default:
856 		break;
857 	}
858 
859 	return 0;
860 }
861 
862 void dma_debug_add_bus(struct bus_type *bus)
863 {
864 	struct notifier_block *nb;
865 
866 	if (dma_debug_disabled())
867 		return;
868 
869 	nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
870 	if (nb == NULL) {
871 		pr_err("dma_debug_add_bus: out of memory\n");
872 		return;
873 	}
874 
875 	nb->notifier_call = dma_debug_device_change;
876 
877 	bus_register_notifier(bus, nb);
878 }
879 
880 static int dma_debug_init(void)
881 {
882 	int i, nr_pages;
883 
884 	/* Do not use dma_debug_initialized here, since we really want to be
885 	 * called to set dma_debug_initialized
886 	 */
887 	if (global_disable)
888 		return 0;
889 
890 	for (i = 0; i < HASH_SIZE; ++i) {
891 		INIT_LIST_HEAD(&dma_entry_hash[i].list);
892 		spin_lock_init(&dma_entry_hash[i].lock);
893 	}
894 
895 	dma_debug_fs_init();
896 
897 	nr_pages = DIV_ROUND_UP(nr_prealloc_entries, DMA_DEBUG_DYNAMIC_ENTRIES);
898 	for (i = 0; i < nr_pages; ++i)
899 		dma_debug_create_entries(GFP_KERNEL);
900 	if (num_free_entries >= nr_prealloc_entries) {
901 		pr_info("preallocated %d debug entries\n", nr_total_entries);
902 	} else if (num_free_entries > 0) {
903 		pr_warn("%d debug entries requested but only %d allocated\n",
904 			nr_prealloc_entries, nr_total_entries);
905 	} else {
906 		pr_err("debugging out of memory error - disabled\n");
907 		global_disable = true;
908 
909 		return 0;
910 	}
911 	min_free_entries = num_free_entries;
912 
913 	dma_debug_initialized = true;
914 
915 	pr_info("debugging enabled by kernel config\n");
916 	return 0;
917 }
918 core_initcall(dma_debug_init);
919 
920 static __init int dma_debug_cmdline(char *str)
921 {
922 	if (!str)
923 		return -EINVAL;
924 
925 	if (strncmp(str, "off", 3) == 0) {
926 		pr_info("debugging disabled on kernel command line\n");
927 		global_disable = true;
928 	}
929 
930 	return 0;
931 }
932 
933 static __init int dma_debug_entries_cmdline(char *str)
934 {
935 	if (!str)
936 		return -EINVAL;
937 	if (!get_option(&str, &nr_prealloc_entries))
938 		nr_prealloc_entries = PREALLOC_DMA_DEBUG_ENTRIES;
939 	return 0;
940 }
941 
942 __setup("dma_debug=", dma_debug_cmdline);
943 __setup("dma_debug_entries=", dma_debug_entries_cmdline);
944 
945 static void check_unmap(struct dma_debug_entry *ref)
946 {
947 	struct dma_debug_entry *entry;
948 	struct hash_bucket *bucket;
949 	unsigned long flags;
950 
951 	bucket = get_hash_bucket(ref, &flags);
952 	entry = bucket_find_exact(bucket, ref);
953 
954 	if (!entry) {
955 		/* must drop lock before calling dma_mapping_error */
956 		put_hash_bucket(bucket, flags);
957 
958 		if (dma_mapping_error(ref->dev, ref->dev_addr)) {
959 			err_printk(ref->dev, NULL,
960 				   "device driver tries to free an "
961 				   "invalid DMA memory address\n");
962 		} else {
963 			err_printk(ref->dev, NULL,
964 				   "device driver tries to free DMA "
965 				   "memory it has not allocated [device "
966 				   "address=0x%016llx] [size=%llu bytes]\n",
967 				   ref->dev_addr, ref->size);
968 		}
969 		return;
970 	}
971 
972 	if (ref->size != entry->size) {
973 		err_printk(ref->dev, entry, "device driver frees "
974 			   "DMA memory with different size "
975 			   "[device address=0x%016llx] [map size=%llu bytes] "
976 			   "[unmap size=%llu bytes]\n",
977 			   ref->dev_addr, entry->size, ref->size);
978 	}
979 
980 	if (ref->type != entry->type) {
981 		err_printk(ref->dev, entry, "device driver frees "
982 			   "DMA memory with wrong function "
983 			   "[device address=0x%016llx] [size=%llu bytes] "
984 			   "[mapped as %s] [unmapped as %s]\n",
985 			   ref->dev_addr, ref->size,
986 			   type2name[entry->type], type2name[ref->type]);
987 	} else if ((entry->type == dma_debug_coherent) &&
988 		   (phys_addr(ref) != phys_addr(entry))) {
989 		err_printk(ref->dev, entry, "device driver frees "
990 			   "DMA memory with different CPU address "
991 			   "[device address=0x%016llx] [size=%llu bytes] "
992 			   "[cpu alloc address=0x%016llx] "
993 			   "[cpu free address=0x%016llx]",
994 			   ref->dev_addr, ref->size,
995 			   phys_addr(entry),
996 			   phys_addr(ref));
997 	}
998 
999 	if (ref->sg_call_ents && ref->type == dma_debug_sg &&
1000 	    ref->sg_call_ents != entry->sg_call_ents) {
1001 		err_printk(ref->dev, entry, "device driver frees "
1002 			   "DMA sg list with different entry count "
1003 			   "[map count=%d] [unmap count=%d]\n",
1004 			   entry->sg_call_ents, ref->sg_call_ents);
1005 	}
1006 
1007 	/*
1008 	 * This may be no bug in reality - but most implementations of the
1009 	 * DMA API don't handle this properly, so check for it here
1010 	 */
1011 	if (ref->direction != entry->direction) {
1012 		err_printk(ref->dev, entry, "device driver frees "
1013 			   "DMA memory with different direction "
1014 			   "[device address=0x%016llx] [size=%llu bytes] "
1015 			   "[mapped with %s] [unmapped with %s]\n",
1016 			   ref->dev_addr, ref->size,
1017 			   dir2name[entry->direction],
1018 			   dir2name[ref->direction]);
1019 	}
1020 
1021 	/*
1022 	 * Drivers should use dma_mapping_error() to check the returned
1023 	 * addresses of dma_map_single() and dma_map_page().
1024 	 * If not, print this warning message. See Documentation/core-api/dma-api.rst.
1025 	 */
1026 	if (entry->map_err_type == MAP_ERR_NOT_CHECKED) {
1027 		err_printk(ref->dev, entry,
1028 			   "device driver failed to check map error"
1029 			   "[device address=0x%016llx] [size=%llu bytes] "
1030 			   "[mapped as %s]",
1031 			   ref->dev_addr, ref->size,
1032 			   type2name[entry->type]);
1033 	}
1034 
1035 	hash_bucket_del(entry);
1036 	dma_entry_free(entry);
1037 
1038 	put_hash_bucket(bucket, flags);
1039 }
1040 
1041 static void check_for_stack(struct device *dev,
1042 			    struct page *page, size_t offset)
1043 {
1044 	void *addr;
1045 	struct vm_struct *stack_vm_area = task_stack_vm_area(current);
1046 
1047 	if (!stack_vm_area) {
1048 		/* Stack is direct-mapped. */
1049 		if (PageHighMem(page))
1050 			return;
1051 		addr = page_address(page) + offset;
1052 		if (object_is_on_stack(addr))
1053 			err_printk(dev, NULL, "device driver maps memory from stack [addr=%p]\n", addr);
1054 	} else {
1055 		/* Stack is vmalloced. */
1056 		int i;
1057 
1058 		for (i = 0; i < stack_vm_area->nr_pages; i++) {
1059 			if (page != stack_vm_area->pages[i])
1060 				continue;
1061 
1062 			addr = (u8 *)current->stack + i * PAGE_SIZE + offset;
1063 			err_printk(dev, NULL, "device driver maps memory from stack [probable addr=%p]\n", addr);
1064 			break;
1065 		}
1066 	}
1067 }
1068 
1069 static inline bool overlap(void *addr, unsigned long len, void *start, void *end)
1070 {
1071 	unsigned long a1 = (unsigned long)addr;
1072 	unsigned long b1 = a1 + len;
1073 	unsigned long a2 = (unsigned long)start;
1074 	unsigned long b2 = (unsigned long)end;
1075 
1076 	return !(b1 <= a2 || a1 >= b2);
1077 }
1078 
1079 static void check_for_illegal_area(struct device *dev, void *addr, unsigned long len)
1080 {
1081 	if (overlap(addr, len, _stext, _etext) ||
1082 	    overlap(addr, len, __start_rodata, __end_rodata))
1083 		err_printk(dev, NULL, "device driver maps memory from kernel text or rodata [addr=%p] [len=%lu]\n", addr, len);
1084 }
1085 
1086 static void check_sync(struct device *dev,
1087 		       struct dma_debug_entry *ref,
1088 		       bool to_cpu)
1089 {
1090 	struct dma_debug_entry *entry;
1091 	struct hash_bucket *bucket;
1092 	unsigned long flags;
1093 
1094 	bucket = get_hash_bucket(ref, &flags);
1095 
1096 	entry = bucket_find_contain(&bucket, ref, &flags);
1097 
1098 	if (!entry) {
1099 		err_printk(dev, NULL, "device driver tries "
1100 				"to sync DMA memory it has not allocated "
1101 				"[device address=0x%016llx] [size=%llu bytes]\n",
1102 				(unsigned long long)ref->dev_addr, ref->size);
1103 		goto out;
1104 	}
1105 
1106 	if (ref->size > entry->size) {
1107 		err_printk(dev, entry, "device driver syncs"
1108 				" DMA memory outside allocated range "
1109 				"[device address=0x%016llx] "
1110 				"[allocation size=%llu bytes] "
1111 				"[sync offset+size=%llu]\n",
1112 				entry->dev_addr, entry->size,
1113 				ref->size);
1114 	}
1115 
1116 	if (entry->direction == DMA_BIDIRECTIONAL)
1117 		goto out;
1118 
1119 	if (ref->direction != entry->direction) {
1120 		err_printk(dev, entry, "device driver syncs "
1121 				"DMA memory with different direction "
1122 				"[device address=0x%016llx] [size=%llu bytes] "
1123 				"[mapped with %s] [synced with %s]\n",
1124 				(unsigned long long)ref->dev_addr, entry->size,
1125 				dir2name[entry->direction],
1126 				dir2name[ref->direction]);
1127 	}
1128 
1129 	if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) &&
1130 		      !(ref->direction == DMA_TO_DEVICE))
1131 		err_printk(dev, entry, "device driver syncs "
1132 				"device read-only DMA memory for cpu "
1133 				"[device address=0x%016llx] [size=%llu bytes] "
1134 				"[mapped with %s] [synced with %s]\n",
1135 				(unsigned long long)ref->dev_addr, entry->size,
1136 				dir2name[entry->direction],
1137 				dir2name[ref->direction]);
1138 
1139 	if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) &&
1140 		       !(ref->direction == DMA_FROM_DEVICE))
1141 		err_printk(dev, entry, "device driver syncs "
1142 				"device write-only DMA memory to device "
1143 				"[device address=0x%016llx] [size=%llu bytes] "
1144 				"[mapped with %s] [synced with %s]\n",
1145 				(unsigned long long)ref->dev_addr, entry->size,
1146 				dir2name[entry->direction],
1147 				dir2name[ref->direction]);
1148 
1149 	if (ref->sg_call_ents && ref->type == dma_debug_sg &&
1150 	    ref->sg_call_ents != entry->sg_call_ents) {
1151 		err_printk(ref->dev, entry, "device driver syncs "
1152 			   "DMA sg list with different entry count "
1153 			   "[map count=%d] [sync count=%d]\n",
1154 			   entry->sg_call_ents, ref->sg_call_ents);
1155 	}
1156 
1157 out:
1158 	put_hash_bucket(bucket, flags);
1159 }
1160 
1161 static void check_sg_segment(struct device *dev, struct scatterlist *sg)
1162 {
1163 #ifdef CONFIG_DMA_API_DEBUG_SG
1164 	unsigned int max_seg = dma_get_max_seg_size(dev);
1165 	u64 start, end, boundary = dma_get_seg_boundary(dev);
1166 
1167 	/*
1168 	 * Either the driver forgot to set dma_parms appropriately, or
1169 	 * whoever generated the list forgot to check them.
1170 	 */
1171 	if (sg->length > max_seg)
1172 		err_printk(dev, NULL, "mapping sg segment longer than device claims to support [len=%u] [max=%u]\n",
1173 			   sg->length, max_seg);
1174 	/*
1175 	 * In some cases this could potentially be the DMA API
1176 	 * implementation's fault, but it would usually imply that
1177 	 * the scatterlist was built inappropriately to begin with.
1178 	 */
1179 	start = sg_dma_address(sg);
1180 	end = start + sg_dma_len(sg) - 1;
1181 	if ((start ^ end) & ~boundary)
1182 		err_printk(dev, NULL, "mapping sg segment across boundary [start=0x%016llx] [end=0x%016llx] [boundary=0x%016llx]\n",
1183 			   start, end, boundary);
1184 #endif
1185 }
1186 
1187 void debug_dma_map_single(struct device *dev, const void *addr,
1188 			    unsigned long len)
1189 {
1190 	if (unlikely(dma_debug_disabled()))
1191 		return;
1192 
1193 	if (!virt_addr_valid(addr))
1194 		err_printk(dev, NULL, "device driver maps memory from invalid area [addr=%p] [len=%lu]\n",
1195 			   addr, len);
1196 
1197 	if (is_vmalloc_addr(addr))
1198 		err_printk(dev, NULL, "device driver maps memory from vmalloc area [addr=%p] [len=%lu]\n",
1199 			   addr, len);
1200 }
1201 EXPORT_SYMBOL(debug_dma_map_single);
1202 
1203 void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
1204 			size_t size, int direction, dma_addr_t dma_addr)
1205 {
1206 	struct dma_debug_entry *entry;
1207 
1208 	if (unlikely(dma_debug_disabled()))
1209 		return;
1210 
1211 	if (dma_mapping_error(dev, dma_addr))
1212 		return;
1213 
1214 	entry = dma_entry_alloc();
1215 	if (!entry)
1216 		return;
1217 
1218 	entry->dev       = dev;
1219 	entry->type      = dma_debug_single;
1220 	entry->pfn	 = page_to_pfn(page);
1221 	entry->offset	 = offset;
1222 	entry->dev_addr  = dma_addr;
1223 	entry->size      = size;
1224 	entry->direction = direction;
1225 	entry->map_err_type = MAP_ERR_NOT_CHECKED;
1226 
1227 	check_for_stack(dev, page, offset);
1228 
1229 	if (!PageHighMem(page)) {
1230 		void *addr = page_address(page) + offset;
1231 
1232 		check_for_illegal_area(dev, addr, size);
1233 	}
1234 
1235 	add_dma_entry(entry);
1236 }
1237 
1238 void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
1239 {
1240 	struct dma_debug_entry ref;
1241 	struct dma_debug_entry *entry;
1242 	struct hash_bucket *bucket;
1243 	unsigned long flags;
1244 
1245 	if (unlikely(dma_debug_disabled()))
1246 		return;
1247 
1248 	ref.dev = dev;
1249 	ref.dev_addr = dma_addr;
1250 	bucket = get_hash_bucket(&ref, &flags);
1251 
1252 	list_for_each_entry(entry, &bucket->list, list) {
1253 		if (!exact_match(&ref, entry))
1254 			continue;
1255 
1256 		/*
1257 		 * The same physical address can be mapped multiple
1258 		 * times. Without a hardware IOMMU this results in the
1259 		 * same device addresses being put into the dma-debug
1260 		 * hash multiple times too. This can result in false
1261 		 * positives being reported. Therefore we implement a
1262 		 * best-fit algorithm here which updates the first entry
1263 		 * from the hash which fits the reference value and is
1264 		 * not currently listed as being checked.
1265 		 */
1266 		if (entry->map_err_type == MAP_ERR_NOT_CHECKED) {
1267 			entry->map_err_type = MAP_ERR_CHECKED;
1268 			break;
1269 		}
1270 	}
1271 
1272 	put_hash_bucket(bucket, flags);
1273 }
1274 EXPORT_SYMBOL(debug_dma_mapping_error);
1275 
1276 void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
1277 			  size_t size, int direction)
1278 {
1279 	struct dma_debug_entry ref = {
1280 		.type           = dma_debug_single,
1281 		.dev            = dev,
1282 		.dev_addr       = addr,
1283 		.size           = size,
1284 		.direction      = direction,
1285 	};
1286 
1287 	if (unlikely(dma_debug_disabled()))
1288 		return;
1289 	check_unmap(&ref);
1290 }
1291 
1292 void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
1293 		      int nents, int mapped_ents, int direction)
1294 {
1295 	struct dma_debug_entry *entry;
1296 	struct scatterlist *s;
1297 	int i;
1298 
1299 	if (unlikely(dma_debug_disabled()))
1300 		return;
1301 
1302 	for_each_sg(sg, s, mapped_ents, i) {
1303 		entry = dma_entry_alloc();
1304 		if (!entry)
1305 			return;
1306 
1307 		entry->type           = dma_debug_sg;
1308 		entry->dev            = dev;
1309 		entry->pfn	      = page_to_pfn(sg_page(s));
1310 		entry->offset	      = s->offset;
1311 		entry->size           = sg_dma_len(s);
1312 		entry->dev_addr       = sg_dma_address(s);
1313 		entry->direction      = direction;
1314 		entry->sg_call_ents   = nents;
1315 		entry->sg_mapped_ents = mapped_ents;
1316 
1317 		check_for_stack(dev, sg_page(s), s->offset);
1318 
1319 		if (!PageHighMem(sg_page(s))) {
1320 			check_for_illegal_area(dev, sg_virt(s), sg_dma_len(s));
1321 		}
1322 
1323 		check_sg_segment(dev, s);
1324 
1325 		add_dma_entry(entry);
1326 	}
1327 }
1328 
1329 static int get_nr_mapped_entries(struct device *dev,
1330 				 struct dma_debug_entry *ref)
1331 {
1332 	struct dma_debug_entry *entry;
1333 	struct hash_bucket *bucket;
1334 	unsigned long flags;
1335 	int mapped_ents;
1336 
1337 	bucket       = get_hash_bucket(ref, &flags);
1338 	entry        = bucket_find_exact(bucket, ref);
1339 	mapped_ents  = 0;
1340 
1341 	if (entry)
1342 		mapped_ents = entry->sg_mapped_ents;
1343 	put_hash_bucket(bucket, flags);
1344 
1345 	return mapped_ents;
1346 }
1347 
1348 void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
1349 			int nelems, int dir)
1350 {
1351 	struct scatterlist *s;
1352 	int mapped_ents = 0, i;
1353 
1354 	if (unlikely(dma_debug_disabled()))
1355 		return;
1356 
1357 	for_each_sg(sglist, s, nelems, i) {
1358 
1359 		struct dma_debug_entry ref = {
1360 			.type           = dma_debug_sg,
1361 			.dev            = dev,
1362 			.pfn		= page_to_pfn(sg_page(s)),
1363 			.offset		= s->offset,
1364 			.dev_addr       = sg_dma_address(s),
1365 			.size           = sg_dma_len(s),
1366 			.direction      = dir,
1367 			.sg_call_ents   = nelems,
1368 		};
1369 
1370 		if (mapped_ents && i >= mapped_ents)
1371 			break;
1372 
1373 		if (!i)
1374 			mapped_ents = get_nr_mapped_entries(dev, &ref);
1375 
1376 		check_unmap(&ref);
1377 	}
1378 }
1379 
1380 void debug_dma_alloc_coherent(struct device *dev, size_t size,
1381 			      dma_addr_t dma_addr, void *virt)
1382 {
1383 	struct dma_debug_entry *entry;
1384 
1385 	if (unlikely(dma_debug_disabled()))
1386 		return;
1387 
1388 	if (unlikely(virt == NULL))
1389 		return;
1390 
1391 	/* handle vmalloc and linear addresses */
1392 	if (!is_vmalloc_addr(virt) && !virt_addr_valid(virt))
1393 		return;
1394 
1395 	entry = dma_entry_alloc();
1396 	if (!entry)
1397 		return;
1398 
1399 	entry->type      = dma_debug_coherent;
1400 	entry->dev       = dev;
1401 	entry->offset	 = offset_in_page(virt);
1402 	entry->size      = size;
1403 	entry->dev_addr  = dma_addr;
1404 	entry->direction = DMA_BIDIRECTIONAL;
1405 
1406 	if (is_vmalloc_addr(virt))
1407 		entry->pfn = vmalloc_to_pfn(virt);
1408 	else
1409 		entry->pfn = page_to_pfn(virt_to_page(virt));
1410 
1411 	add_dma_entry(entry);
1412 }
1413 
1414 void debug_dma_free_coherent(struct device *dev, size_t size,
1415 			 void *virt, dma_addr_t addr)
1416 {
1417 	struct dma_debug_entry ref = {
1418 		.type           = dma_debug_coherent,
1419 		.dev            = dev,
1420 		.offset		= offset_in_page(virt),
1421 		.dev_addr       = addr,
1422 		.size           = size,
1423 		.direction      = DMA_BIDIRECTIONAL,
1424 	};
1425 
1426 	/* handle vmalloc and linear addresses */
1427 	if (!is_vmalloc_addr(virt) && !virt_addr_valid(virt))
1428 		return;
1429 
1430 	if (is_vmalloc_addr(virt))
1431 		ref.pfn = vmalloc_to_pfn(virt);
1432 	else
1433 		ref.pfn = page_to_pfn(virt_to_page(virt));
1434 
1435 	if (unlikely(dma_debug_disabled()))
1436 		return;
1437 
1438 	check_unmap(&ref);
1439 }
1440 
1441 void debug_dma_map_resource(struct device *dev, phys_addr_t addr, size_t size,
1442 			    int direction, dma_addr_t dma_addr)
1443 {
1444 	struct dma_debug_entry *entry;
1445 
1446 	if (unlikely(dma_debug_disabled()))
1447 		return;
1448 
1449 	entry = dma_entry_alloc();
1450 	if (!entry)
1451 		return;
1452 
1453 	entry->type		= dma_debug_resource;
1454 	entry->dev		= dev;
1455 	entry->pfn		= PHYS_PFN(addr);
1456 	entry->offset		= offset_in_page(addr);
1457 	entry->size		= size;
1458 	entry->dev_addr		= dma_addr;
1459 	entry->direction	= direction;
1460 	entry->map_err_type	= MAP_ERR_NOT_CHECKED;
1461 
1462 	add_dma_entry(entry);
1463 }
1464 
1465 void debug_dma_unmap_resource(struct device *dev, dma_addr_t dma_addr,
1466 			      size_t size, int direction)
1467 {
1468 	struct dma_debug_entry ref = {
1469 		.type           = dma_debug_resource,
1470 		.dev            = dev,
1471 		.dev_addr       = dma_addr,
1472 		.size           = size,
1473 		.direction      = direction,
1474 	};
1475 
1476 	if (unlikely(dma_debug_disabled()))
1477 		return;
1478 
1479 	check_unmap(&ref);
1480 }
1481 
1482 void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
1483 				   size_t size, int direction)
1484 {
1485 	struct dma_debug_entry ref;
1486 
1487 	if (unlikely(dma_debug_disabled()))
1488 		return;
1489 
1490 	ref.type         = dma_debug_single;
1491 	ref.dev          = dev;
1492 	ref.dev_addr     = dma_handle;
1493 	ref.size         = size;
1494 	ref.direction    = direction;
1495 	ref.sg_call_ents = 0;
1496 
1497 	check_sync(dev, &ref, true);
1498 }
1499 
1500 void debug_dma_sync_single_for_device(struct device *dev,
1501 				      dma_addr_t dma_handle, size_t size,
1502 				      int direction)
1503 {
1504 	struct dma_debug_entry ref;
1505 
1506 	if (unlikely(dma_debug_disabled()))
1507 		return;
1508 
1509 	ref.type         = dma_debug_single;
1510 	ref.dev          = dev;
1511 	ref.dev_addr     = dma_handle;
1512 	ref.size         = size;
1513 	ref.direction    = direction;
1514 	ref.sg_call_ents = 0;
1515 
1516 	check_sync(dev, &ref, false);
1517 }
1518 
1519 void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
1520 			       int nelems, int direction)
1521 {
1522 	struct scatterlist *s;
1523 	int mapped_ents = 0, i;
1524 
1525 	if (unlikely(dma_debug_disabled()))
1526 		return;
1527 
1528 	for_each_sg(sg, s, nelems, i) {
1529 
1530 		struct dma_debug_entry ref = {
1531 			.type           = dma_debug_sg,
1532 			.dev            = dev,
1533 			.pfn		= page_to_pfn(sg_page(s)),
1534 			.offset		= s->offset,
1535 			.dev_addr       = sg_dma_address(s),
1536 			.size           = sg_dma_len(s),
1537 			.direction      = direction,
1538 			.sg_call_ents   = nelems,
1539 		};
1540 
1541 		if (!i)
1542 			mapped_ents = get_nr_mapped_entries(dev, &ref);
1543 
1544 		if (i >= mapped_ents)
1545 			break;
1546 
1547 		check_sync(dev, &ref, true);
1548 	}
1549 }
1550 
1551 void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
1552 				  int nelems, int direction)
1553 {
1554 	struct scatterlist *s;
1555 	int mapped_ents = 0, i;
1556 
1557 	if (unlikely(dma_debug_disabled()))
1558 		return;
1559 
1560 	for_each_sg(sg, s, nelems, i) {
1561 
1562 		struct dma_debug_entry ref = {
1563 			.type           = dma_debug_sg,
1564 			.dev            = dev,
1565 			.pfn		= page_to_pfn(sg_page(s)),
1566 			.offset		= s->offset,
1567 			.dev_addr       = sg_dma_address(s),
1568 			.size           = sg_dma_len(s),
1569 			.direction      = direction,
1570 			.sg_call_ents   = nelems,
1571 		};
1572 		if (!i)
1573 			mapped_ents = get_nr_mapped_entries(dev, &ref);
1574 
1575 		if (i >= mapped_ents)
1576 			break;
1577 
1578 		check_sync(dev, &ref, false);
1579 	}
1580 }
1581 
1582 static int __init dma_debug_driver_setup(char *str)
1583 {
1584 	int i;
1585 
1586 	for (i = 0; i < NAME_MAX_LEN - 1; ++i, ++str) {
1587 		current_driver_name[i] = *str;
1588 		if (*str == 0)
1589 			break;
1590 	}
1591 
1592 	if (current_driver_name[0])
1593 		pr_info("enable driver filter for driver [%s]\n",
1594 			current_driver_name);
1595 
1596 
1597 	return 1;
1598 }
1599 __setup("dma_debug_driver=", dma_debug_driver_setup);
1600