xref: /linux/kernel/dma/debug.c (revision 23b0f90ba871f096474e1c27c3d14f455189d2d9)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2008 Advanced Micro Devices, Inc.
4  *
5  * Author: Joerg Roedel <joerg.roedel@amd.com>
6  */
7 
8 #define pr_fmt(fmt)	"DMA-API: " fmt
9 
10 #include <linux/sched/task_stack.h>
11 #include <linux/scatterlist.h>
12 #include <linux/dma-map-ops.h>
13 #include <linux/sched/task.h>
14 #include <linux/stacktrace.h>
15 #include <linux/spinlock.h>
16 #include <linux/vmalloc.h>
17 #include <linux/debugfs.h>
18 #include <linux/uaccess.h>
19 #include <linux/export.h>
20 #include <linux/device.h>
21 #include <linux/types.h>
22 #include <linux/sched.h>
23 #include <linux/ctype.h>
24 #include <linux/list.h>
25 #include <linux/slab.h>
26 #include <linux/swiotlb.h>
27 #include <asm/sections.h>
28 #include "debug.h"
29 
30 #define HASH_SIZE       16384ULL
31 #define HASH_FN_SHIFT   13
32 #define HASH_FN_MASK    (HASH_SIZE - 1)
33 
34 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
35 /* If the pool runs out, add this many new entries at once */
36 #define DMA_DEBUG_DYNAMIC_ENTRIES (PAGE_SIZE / sizeof(struct dma_debug_entry))
37 
38 enum {
39 	dma_debug_single,
40 	dma_debug_sg,
41 	dma_debug_coherent,
42 	dma_debug_noncoherent,
43 	dma_debug_phy,
44 };
45 
46 enum map_err_types {
47 	MAP_ERR_CHECK_NOT_APPLICABLE,
48 	MAP_ERR_NOT_CHECKED,
49 	MAP_ERR_CHECKED,
50 };
51 
52 #define DMA_DEBUG_STACKTRACE_ENTRIES 5
53 
54 /**
55  * struct dma_debug_entry - track a dma_map* or dma_alloc_coherent mapping
56  * @list: node on pre-allocated free_entries list
57  * @dev: 'dev' argument to dma_map_{page|single|sg} or dma_alloc_coherent
58  * @dev_addr: dma address
59  * @size: length of the mapping
60  * @type: single, page, sg, coherent
61  * @direction: enum dma_data_direction
62  * @sg_call_ents: 'nents' from dma_map_sg
63  * @sg_mapped_ents: 'mapped_ents' from dma_map_sg
64  * @paddr: physical start address of the mapping
65  * @map_err_type: track whether dma_mapping_error() was checked
66  * @is_cache_clean: driver promises not to write to buffer while mapped
67  * @stack_len: number of backtrace entries in @stack_entries
68  * @stack_entries: stack of backtrace history
69  */
70 struct dma_debug_entry {
71 	struct list_head list;
72 	struct device    *dev;
73 	u64              dev_addr;
74 	u64              size;
75 	int              type;
76 	int              direction;
77 	int		 sg_call_ents;
78 	int		 sg_mapped_ents;
79 	phys_addr_t	 paddr;
80 	enum map_err_types map_err_type;
81 	bool		 is_cache_clean;
82 #ifdef CONFIG_STACKTRACE
83 	unsigned int	stack_len;
84 	unsigned long	stack_entries[DMA_DEBUG_STACKTRACE_ENTRIES];
85 #endif
86 } ____cacheline_aligned_in_smp;
87 
88 typedef bool (*match_fn)(struct dma_debug_entry *, struct dma_debug_entry *);
89 
90 struct hash_bucket {
91 	struct list_head list;
92 	spinlock_t lock;
93 };
94 
95 /* Hash list to save the allocated dma addresses */
96 static struct hash_bucket dma_entry_hash[HASH_SIZE];
97 /* List of pre-allocated dma_debug_entry's */
98 static LIST_HEAD(free_entries);
99 /* Lock for the list above */
100 static DEFINE_SPINLOCK(free_entries_lock);
101 
102 /* Global disable flag - will be set in case of an error */
103 static bool global_disable __read_mostly;
104 
105 /* Early initialization disable flag, set at the end of dma_debug_init */
106 static bool dma_debug_initialized __read_mostly;
107 
108 static inline bool dma_debug_disabled(void)
109 {
110 	return global_disable || !dma_debug_initialized;
111 }
112 
113 /* Global error count */
114 static u32 error_count;
115 
116 /* Global error show enable*/
117 static u32 show_all_errors __read_mostly;
118 /* Number of errors to show */
119 static u32 show_num_errors = 1;
120 
121 static u32 num_free_entries;
122 static u32 min_free_entries;
123 static u32 nr_total_entries;
124 
125 /* number of preallocated entries requested by kernel cmdline */
126 static u32 nr_prealloc_entries = PREALLOC_DMA_DEBUG_ENTRIES;
127 
128 /* per-driver filter related state */
129 
130 #define NAME_MAX_LEN	64
131 
132 static char                  current_driver_name[NAME_MAX_LEN] __read_mostly;
133 static struct device_driver *current_driver                    __read_mostly;
134 
135 static DEFINE_RWLOCK(driver_name_lock);
136 
137 static const char *const maperr2str[] = {
138 	[MAP_ERR_CHECK_NOT_APPLICABLE] = "dma map error check not applicable",
139 	[MAP_ERR_NOT_CHECKED] = "dma map error not checked",
140 	[MAP_ERR_CHECKED] = "dma map error checked",
141 };
142 
143 static const char *type2name[] = {
144 	[dma_debug_single] = "single",
145 	[dma_debug_sg] = "scatter-gather",
146 	[dma_debug_coherent] = "coherent",
147 	[dma_debug_noncoherent] = "noncoherent",
148 	[dma_debug_phy] = "phy",
149 };
150 
151 static const char *dir2name[] = {
152 	[DMA_BIDIRECTIONAL]	= "DMA_BIDIRECTIONAL",
153 	[DMA_TO_DEVICE]		= "DMA_TO_DEVICE",
154 	[DMA_FROM_DEVICE]	= "DMA_FROM_DEVICE",
155 	[DMA_NONE]		= "DMA_NONE",
156 };
157 
158 /*
159  * The access to some variables in this macro is racy. We can't use atomic_t
160  * here because all these variables are exported to debugfs. Some of them even
161  * writeable. This is also the reason why a lock won't help much. But anyway,
162  * the races are no big deal. Here is why:
163  *
164  *   error_count: the addition is racy, but the worst thing that can happen is
165  *                that we don't count some errors
166  *   show_num_errors: the subtraction is racy. Also no big deal because in
167  *                    worst case this will result in one warning more in the
168  *                    system log than the user configured. This variable is
169  *                    writeable via debugfs.
170  */
171 static inline void dump_entry_trace(struct dma_debug_entry *entry)
172 {
173 #ifdef CONFIG_STACKTRACE
174 	if (entry) {
175 		pr_warn("Mapped at:\n");
176 		stack_trace_print(entry->stack_entries, entry->stack_len, 0);
177 	}
178 #endif
179 }
180 
181 static bool driver_filter(struct device *dev)
182 {
183 	struct device_driver *drv;
184 	unsigned long flags;
185 	bool ret;
186 
187 	/* driver filter off */
188 	if (likely(!current_driver_name[0]))
189 		return true;
190 
191 	/* driver filter on and initialized */
192 	if (current_driver && dev && dev->driver == current_driver)
193 		return true;
194 
195 	/* driver filter on, but we can't filter on a NULL device... */
196 	if (!dev)
197 		return false;
198 
199 	if (current_driver || !current_driver_name[0])
200 		return false;
201 
202 	/* driver filter on but not yet initialized */
203 	drv = dev->driver;
204 	if (!drv)
205 		return false;
206 
207 	/* lock to protect against change of current_driver_name */
208 	read_lock_irqsave(&driver_name_lock, flags);
209 
210 	ret = false;
211 	if (drv->name &&
212 	    strncmp(current_driver_name, drv->name, NAME_MAX_LEN - 1) == 0) {
213 		current_driver = drv;
214 		ret = true;
215 	}
216 
217 	read_unlock_irqrestore(&driver_name_lock, flags);
218 
219 	return ret;
220 }
221 
222 #define err_printk(dev, entry, format, arg...) do {			\
223 		error_count += 1;					\
224 		if (driver_filter(dev) &&				\
225 		    (show_all_errors || show_num_errors > 0)) {		\
226 			WARN(1, pr_fmt("%s %s: ") format,		\
227 			     dev ? dev_driver_string(dev) : "NULL",	\
228 			     dev ? dev_name(dev) : "NULL", ## arg);	\
229 			dump_entry_trace(entry);			\
230 		}							\
231 		if (!show_all_errors && show_num_errors > 0)		\
232 			show_num_errors -= 1;				\
233 	} while (0);
234 
235 /*
236  * Hash related functions
237  *
238  * Every DMA-API request is saved into a struct dma_debug_entry. To
239  * have quick access to these structs they are stored into a hash.
240  */
241 static int hash_fn(struct dma_debug_entry *entry)
242 {
243 	/*
244 	 * Hash function is based on the dma address.
245 	 * We use bits 20-27 here as the index into the hash
246 	 */
247 	return (entry->dev_addr >> HASH_FN_SHIFT) & HASH_FN_MASK;
248 }
249 
250 /*
251  * Request exclusive access to a hash bucket for a given dma_debug_entry.
252  */
253 static struct hash_bucket *get_hash_bucket(struct dma_debug_entry *entry,
254 					   unsigned long *flags)
255 	__acquires(&dma_entry_hash[idx].lock)
256 {
257 	int idx = hash_fn(entry);
258 	unsigned long __flags;
259 
260 	spin_lock_irqsave(&dma_entry_hash[idx].lock, __flags);
261 	*flags = __flags;
262 	return &dma_entry_hash[idx];
263 }
264 
265 /*
266  * Give up exclusive access to the hash bucket
267  */
268 static void put_hash_bucket(struct hash_bucket *bucket,
269 			    unsigned long flags)
270 	__releases(&bucket->lock)
271 {
272 	spin_unlock_irqrestore(&bucket->lock, flags);
273 }
274 
275 static bool exact_match(struct dma_debug_entry *a, struct dma_debug_entry *b)
276 {
277 	return ((a->dev_addr == b->dev_addr) &&
278 		(a->dev == b->dev)) ? true : false;
279 }
280 
281 static bool containing_match(struct dma_debug_entry *a,
282 			     struct dma_debug_entry *b)
283 {
284 	if (a->dev != b->dev)
285 		return false;
286 
287 	if ((b->dev_addr <= a->dev_addr) &&
288 	    ((b->dev_addr + b->size) >= (a->dev_addr + a->size)))
289 		return true;
290 
291 	return false;
292 }
293 
294 /*
295  * Search a given entry in the hash bucket list
296  */
297 static struct dma_debug_entry *__hash_bucket_find(struct hash_bucket *bucket,
298 						  struct dma_debug_entry *ref,
299 						  match_fn match)
300 {
301 	struct dma_debug_entry *entry, *ret = NULL;
302 	int matches = 0, match_lvl, last_lvl = -1;
303 
304 	list_for_each_entry(entry, &bucket->list, list) {
305 		if (!match(ref, entry))
306 			continue;
307 
308 		/*
309 		 * Some drivers map the same physical address multiple
310 		 * times. Without a hardware IOMMU this results in the
311 		 * same device addresses being put into the dma-debug
312 		 * hash multiple times too. This can result in false
313 		 * positives being reported. Therefore we implement a
314 		 * best-fit algorithm here which returns the entry from
315 		 * the hash which fits best to the reference value
316 		 * instead of the first-fit.
317 		 */
318 		matches += 1;
319 		match_lvl = 0;
320 		entry->size         == ref->size         ? ++match_lvl : 0;
321 		entry->type         == ref->type         ? ++match_lvl : 0;
322 		entry->direction    == ref->direction    ? ++match_lvl : 0;
323 		entry->sg_call_ents == ref->sg_call_ents ? ++match_lvl : 0;
324 
325 		if (match_lvl == 4) {
326 			/* perfect-fit - return the result */
327 			return entry;
328 		} else if (match_lvl > last_lvl) {
329 			/*
330 			 * We found an entry that fits better then the
331 			 * previous one or it is the 1st match.
332 			 */
333 			last_lvl = match_lvl;
334 			ret      = entry;
335 		}
336 	}
337 
338 	/*
339 	 * If we have multiple matches but no perfect-fit, just return
340 	 * NULL.
341 	 */
342 	ret = (matches == 1) ? ret : NULL;
343 
344 	return ret;
345 }
346 
347 static struct dma_debug_entry *bucket_find_exact(struct hash_bucket *bucket,
348 						 struct dma_debug_entry *ref)
349 {
350 	return __hash_bucket_find(bucket, ref, exact_match);
351 }
352 
353 static struct dma_debug_entry *bucket_find_contain(struct hash_bucket **bucket,
354 						   struct dma_debug_entry *ref,
355 						   unsigned long *flags)
356 {
357 
358 	struct dma_debug_entry *entry, index = *ref;
359 	int limit = min(HASH_SIZE, (index.dev_addr >> HASH_FN_SHIFT) + 1);
360 
361 	for (int i = 0; i < limit; i++) {
362 		entry = __hash_bucket_find(*bucket, ref, containing_match);
363 
364 		if (entry)
365 			return entry;
366 
367 		/*
368 		 * Nothing found, go back a hash bucket
369 		 */
370 		put_hash_bucket(*bucket, *flags);
371 		index.dev_addr -= (1 << HASH_FN_SHIFT);
372 		*bucket = get_hash_bucket(&index, flags);
373 	}
374 
375 	return NULL;
376 }
377 
378 /*
379  * Add an entry to a hash bucket
380  */
381 static void hash_bucket_add(struct hash_bucket *bucket,
382 			    struct dma_debug_entry *entry)
383 {
384 	list_add_tail(&entry->list, &bucket->list);
385 }
386 
387 /*
388  * Remove entry from a hash bucket list
389  */
390 static void hash_bucket_del(struct dma_debug_entry *entry)
391 {
392 	list_del(&entry->list);
393 }
394 
395 /*
396  * For each mapping (initial cacheline in the case of
397  * dma_alloc_coherent/dma_map_page, initial cacheline in each page of a
398  * scatterlist, or the cacheline specified in dma_map_single) insert
399  * into this tree using the cacheline as the key. At
400  * dma_unmap_{single|sg|page} or dma_free_coherent delete the entry.  If
401  * the entry already exists at insertion time add a tag as a reference
402  * count for the overlapping mappings.  For now, the overlap tracking
403  * just ensures that 'unmaps' balance 'maps' before marking the
404  * cacheline idle, but we should also be flagging overlaps as an API
405  * violation.
406  *
407  * Memory usage is mostly constrained by the maximum number of available
408  * dma-debug entries in that we need a free dma_debug_entry before
409  * inserting into the tree.  In the case of dma_map_page and
410  * dma_alloc_coherent there is only one dma_debug_entry and one
411  * dma_active_cacheline entry to track per event.  dma_map_sg(), on the
412  * other hand, consumes a single dma_debug_entry, but inserts 'nents'
413  * entries into the tree.
414  *
415  * Use __GFP_NOWARN because the printk from an OOM, to netconsole, could end
416  * up right back in the DMA debugging code, leading to a deadlock.
417  */
418 static RADIX_TREE(dma_active_cacheline, GFP_ATOMIC | __GFP_NOWARN);
419 static DEFINE_SPINLOCK(radix_lock);
420 #define ACTIVE_CACHELINE_MAX_OVERLAP ((1 << RADIX_TREE_MAX_TAGS) - 1)
421 #define CACHELINE_PER_PAGE_SHIFT (PAGE_SHIFT - L1_CACHE_SHIFT)
422 #define CACHELINES_PER_PAGE (1 << CACHELINE_PER_PAGE_SHIFT)
423 
424 static phys_addr_t to_cacheline_number(struct dma_debug_entry *entry)
425 {
426 	return ((entry->paddr >> PAGE_SHIFT) << CACHELINE_PER_PAGE_SHIFT) +
427 		(offset_in_page(entry->paddr) >> L1_CACHE_SHIFT);
428 }
429 
430 static int active_cacheline_read_overlap(phys_addr_t cln)
431 {
432 	int overlap = 0, i;
433 
434 	for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--)
435 		if (radix_tree_tag_get(&dma_active_cacheline, cln, i))
436 			overlap |= 1 << i;
437 	return overlap;
438 }
439 
440 static int active_cacheline_set_overlap(phys_addr_t cln, int overlap)
441 {
442 	int i;
443 
444 	if (overlap > ACTIVE_CACHELINE_MAX_OVERLAP || overlap < 0)
445 		return overlap;
446 
447 	for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--)
448 		if (overlap & 1 << i)
449 			radix_tree_tag_set(&dma_active_cacheline, cln, i);
450 		else
451 			radix_tree_tag_clear(&dma_active_cacheline, cln, i);
452 
453 	return overlap;
454 }
455 
456 static void active_cacheline_inc_overlap(phys_addr_t cln)
457 {
458 	int overlap = active_cacheline_read_overlap(cln);
459 
460 	overlap = active_cacheline_set_overlap(cln, ++overlap);
461 
462 	/* If we overflowed the overlap counter then we're potentially
463 	 * leaking dma-mappings.
464 	 */
465 	WARN_ONCE(overlap > ACTIVE_CACHELINE_MAX_OVERLAP,
466 		  pr_fmt("exceeded %d overlapping mappings of cacheline %pa\n"),
467 		  ACTIVE_CACHELINE_MAX_OVERLAP, &cln);
468 }
469 
470 static int active_cacheline_dec_overlap(phys_addr_t cln)
471 {
472 	int overlap = active_cacheline_read_overlap(cln);
473 
474 	return active_cacheline_set_overlap(cln, --overlap);
475 }
476 
477 static int active_cacheline_insert(struct dma_debug_entry *entry,
478 				   bool *overlap_cache_clean)
479 {
480 	phys_addr_t cln = to_cacheline_number(entry);
481 	unsigned long flags;
482 	int rc;
483 
484 	*overlap_cache_clean = false;
485 
486 	/* If the device is not writing memory then we don't have any
487 	 * concerns about the cpu consuming stale data.  This mitigates
488 	 * legitimate usages of overlapping mappings.
489 	 */
490 	if (entry->direction == DMA_TO_DEVICE)
491 		return 0;
492 
493 	spin_lock_irqsave(&radix_lock, flags);
494 	rc = radix_tree_insert(&dma_active_cacheline, cln, entry);
495 	if (rc == -EEXIST) {
496 		struct dma_debug_entry *existing;
497 
498 		active_cacheline_inc_overlap(cln);
499 		existing = radix_tree_lookup(&dma_active_cacheline, cln);
500 		/* A lookup failure here after we got -EEXIST is unexpected. */
501 		WARN_ON(!existing);
502 		if (existing)
503 			*overlap_cache_clean = existing->is_cache_clean;
504 	}
505 	spin_unlock_irqrestore(&radix_lock, flags);
506 
507 	return rc;
508 }
509 
510 static void active_cacheline_remove(struct dma_debug_entry *entry)
511 {
512 	phys_addr_t cln = to_cacheline_number(entry);
513 	unsigned long flags;
514 
515 	/* ...mirror the insert case */
516 	if (entry->direction == DMA_TO_DEVICE)
517 		return;
518 
519 	spin_lock_irqsave(&radix_lock, flags);
520 	/* since we are counting overlaps the final put of the
521 	 * cacheline will occur when the overlap count is 0.
522 	 * active_cacheline_dec_overlap() returns -1 in that case
523 	 */
524 	if (active_cacheline_dec_overlap(cln) < 0)
525 		radix_tree_delete(&dma_active_cacheline, cln);
526 	spin_unlock_irqrestore(&radix_lock, flags);
527 }
528 
529 /*
530  * Dump mappings entries on kernel space for debugging purposes
531  */
532 void debug_dma_dump_mappings(struct device *dev)
533 {
534 	int idx;
535 	phys_addr_t cln;
536 
537 	for (idx = 0; idx < HASH_SIZE; idx++) {
538 		struct hash_bucket *bucket = &dma_entry_hash[idx];
539 		struct dma_debug_entry *entry;
540 		unsigned long flags;
541 
542 		spin_lock_irqsave(&bucket->lock, flags);
543 		list_for_each_entry(entry, &bucket->list, list) {
544 			if (!dev || dev == entry->dev) {
545 				cln = to_cacheline_number(entry);
546 				dev_info(entry->dev,
547 					 "%s idx %d P=%pa D=%llx L=%llx cln=%pa %s %s\n",
548 					 type2name[entry->type], idx,
549 					 &entry->paddr, entry->dev_addr,
550 					 entry->size, &cln,
551 					 dir2name[entry->direction],
552 					 maperr2str[entry->map_err_type]);
553 			}
554 		}
555 		spin_unlock_irqrestore(&bucket->lock, flags);
556 
557 		cond_resched();
558 	}
559 }
560 
561 /*
562  * Dump mappings entries on user space via debugfs
563  */
564 static int dump_show(struct seq_file *seq, void *v)
565 {
566 	int idx;
567 	phys_addr_t cln;
568 
569 	for (idx = 0; idx < HASH_SIZE; idx++) {
570 		struct hash_bucket *bucket = &dma_entry_hash[idx];
571 		struct dma_debug_entry *entry;
572 		unsigned long flags;
573 
574 		spin_lock_irqsave(&bucket->lock, flags);
575 		list_for_each_entry(entry, &bucket->list, list) {
576 			cln = to_cacheline_number(entry);
577 			seq_printf(seq,
578 				   "%s %s %s idx %d P=%pa D=%llx L=%llx cln=%pa %s %s\n",
579 				   dev_driver_string(entry->dev),
580 				   dev_name(entry->dev),
581 				   type2name[entry->type], idx,
582 				   &entry->paddr, entry->dev_addr,
583 				   entry->size, &cln,
584 				   dir2name[entry->direction],
585 				   maperr2str[entry->map_err_type]);
586 		}
587 		spin_unlock_irqrestore(&bucket->lock, flags);
588 	}
589 	return 0;
590 }
591 DEFINE_SHOW_ATTRIBUTE(dump);
592 
593 /*
594  * Wrapper function for adding an entry to the hash.
595  * This function takes care of locking itself.
596  */
597 static void add_dma_entry(struct dma_debug_entry *entry, unsigned long attrs)
598 {
599 	bool overlap_cache_clean;
600 	struct hash_bucket *bucket;
601 	unsigned long flags;
602 	int rc;
603 
604 	entry->is_cache_clean = !!(attrs & DMA_ATTR_CPU_CACHE_CLEAN);
605 
606 	bucket = get_hash_bucket(entry, &flags);
607 	hash_bucket_add(bucket, entry);
608 	put_hash_bucket(bucket, flags);
609 
610 	rc = active_cacheline_insert(entry, &overlap_cache_clean);
611 	if (rc == -ENOMEM) {
612 		pr_err_once("cacheline tracking ENOMEM, dma-debug disabled\n");
613 		global_disable = true;
614 	} else if (rc == -EEXIST &&
615 		   !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
616 		   !(entry->is_cache_clean && overlap_cache_clean) &&
617 		   !(IS_ENABLED(CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC) &&
618 		     is_swiotlb_active(entry->dev))) {
619 		err_printk(entry->dev, entry,
620 			"cacheline tracking EEXIST, overlapping mappings aren't supported\n");
621 	}
622 }
623 
624 static int dma_debug_create_entries(gfp_t gfp)
625 {
626 	struct dma_debug_entry *entry;
627 	int i;
628 
629 	entry = (void *)get_zeroed_page(gfp);
630 	if (!entry)
631 		return -ENOMEM;
632 
633 	for (i = 0; i < DMA_DEBUG_DYNAMIC_ENTRIES; i++)
634 		list_add_tail(&entry[i].list, &free_entries);
635 
636 	num_free_entries += DMA_DEBUG_DYNAMIC_ENTRIES;
637 	nr_total_entries += DMA_DEBUG_DYNAMIC_ENTRIES;
638 
639 	return 0;
640 }
641 
642 static struct dma_debug_entry *__dma_entry_alloc(void)
643 {
644 	struct dma_debug_entry *entry;
645 
646 	entry = list_entry(free_entries.next, struct dma_debug_entry, list);
647 	list_del(&entry->list);
648 	memset(entry, 0, sizeof(*entry));
649 
650 	num_free_entries -= 1;
651 	if (num_free_entries < min_free_entries)
652 		min_free_entries = num_free_entries;
653 
654 	return entry;
655 }
656 
657 /*
658  * This should be called outside of free_entries_lock scope to avoid potential
659  * deadlocks with serial consoles that use DMA.
660  */
661 static void __dma_entry_alloc_check_leak(u32 nr_entries)
662 {
663 	u32 tmp = nr_entries % nr_prealloc_entries;
664 
665 	/* Shout each time we tick over some multiple of the initial pool */
666 	if (tmp < DMA_DEBUG_DYNAMIC_ENTRIES) {
667 		pr_info("dma_debug_entry pool grown to %u (%u00%%)\n",
668 			nr_entries,
669 			(nr_entries / nr_prealloc_entries));
670 	}
671 }
672 
673 /* struct dma_entry allocator
674  *
675  * The next two functions implement the allocator for
676  * struct dma_debug_entries.
677  */
678 static struct dma_debug_entry *dma_entry_alloc(void)
679 {
680 	bool alloc_check_leak = false;
681 	struct dma_debug_entry *entry;
682 	unsigned long flags;
683 	u32 nr_entries;
684 
685 	spin_lock_irqsave(&free_entries_lock, flags);
686 	if (num_free_entries == 0) {
687 		if (dma_debug_create_entries(GFP_ATOMIC)) {
688 			global_disable = true;
689 			spin_unlock_irqrestore(&free_entries_lock, flags);
690 			pr_err("debugging out of memory - disabling\n");
691 			return NULL;
692 		}
693 		alloc_check_leak = true;
694 		nr_entries = nr_total_entries;
695 	}
696 
697 	entry = __dma_entry_alloc();
698 
699 	spin_unlock_irqrestore(&free_entries_lock, flags);
700 
701 	if (alloc_check_leak)
702 		__dma_entry_alloc_check_leak(nr_entries);
703 
704 #ifdef CONFIG_STACKTRACE
705 	entry->stack_len = stack_trace_save(entry->stack_entries,
706 					    ARRAY_SIZE(entry->stack_entries),
707 					    1);
708 #endif
709 	return entry;
710 }
711 
712 static void dma_entry_free(struct dma_debug_entry *entry)
713 {
714 	unsigned long flags;
715 
716 	active_cacheline_remove(entry);
717 
718 	/*
719 	 * add to beginning of the list - this way the entries are
720 	 * more likely cache hot when they are reallocated.
721 	 */
722 	spin_lock_irqsave(&free_entries_lock, flags);
723 	list_add(&entry->list, &free_entries);
724 	num_free_entries += 1;
725 	spin_unlock_irqrestore(&free_entries_lock, flags);
726 }
727 
728 /*
729  * DMA-API debugging init code
730  *
731  * The init code does two things:
732  *   1. Initialize core data structures
733  *   2. Preallocate a given number of dma_debug_entry structs
734  */
735 
736 static ssize_t filter_read(struct file *file, char __user *user_buf,
737 			   size_t count, loff_t *ppos)
738 {
739 	char buf[NAME_MAX_LEN + 1];
740 	unsigned long flags;
741 	int len;
742 
743 	if (!current_driver_name[0])
744 		return 0;
745 
746 	/*
747 	 * We can't copy to userspace directly because current_driver_name can
748 	 * only be read under the driver_name_lock with irqs disabled. So
749 	 * create a temporary copy first.
750 	 */
751 	read_lock_irqsave(&driver_name_lock, flags);
752 	len = scnprintf(buf, NAME_MAX_LEN + 1, "%s\n", current_driver_name);
753 	read_unlock_irqrestore(&driver_name_lock, flags);
754 
755 	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
756 }
757 
758 static ssize_t filter_write(struct file *file, const char __user *userbuf,
759 			    size_t count, loff_t *ppos)
760 {
761 	char buf[NAME_MAX_LEN];
762 	unsigned long flags;
763 	size_t len;
764 	int i;
765 
766 	/*
767 	 * We can't copy from userspace directly. Access to
768 	 * current_driver_name is protected with a write_lock with irqs
769 	 * disabled. Since copy_from_user can fault and may sleep we
770 	 * need to copy to temporary buffer first
771 	 */
772 	len = min(count, (size_t)(NAME_MAX_LEN - 1));
773 	if (copy_from_user(buf, userbuf, len))
774 		return -EFAULT;
775 
776 	buf[len] = 0;
777 
778 	write_lock_irqsave(&driver_name_lock, flags);
779 
780 	/*
781 	 * Now handle the string we got from userspace very carefully.
782 	 * The rules are:
783 	 *         - only use the first token we got
784 	 *         - token delimiter is everything looking like a space
785 	 *           character (' ', '\n', '\t' ...)
786 	 *
787 	 */
788 	if (!isalnum(buf[0])) {
789 		/*
790 		 * If the first character userspace gave us is not
791 		 * alphanumerical then assume the filter should be
792 		 * switched off.
793 		 */
794 		if (current_driver_name[0])
795 			pr_info("switching off dma-debug driver filter\n");
796 		current_driver_name[0] = 0;
797 		current_driver = NULL;
798 		goto out_unlock;
799 	}
800 
801 	/*
802 	 * Now parse out the first token and use it as the name for the
803 	 * driver to filter for.
804 	 */
805 	for (i = 0; i < NAME_MAX_LEN - 1; ++i) {
806 		current_driver_name[i] = buf[i];
807 		if (isspace(buf[i]) || buf[i] == ' ' || buf[i] == 0)
808 			break;
809 	}
810 	current_driver_name[i] = 0;
811 	current_driver = NULL;
812 
813 	pr_info("enable driver filter for driver [%s]\n",
814 		current_driver_name);
815 
816 out_unlock:
817 	write_unlock_irqrestore(&driver_name_lock, flags);
818 
819 	return count;
820 }
821 
822 static const struct file_operations filter_fops = {
823 	.read  = filter_read,
824 	.write = filter_write,
825 	.llseek = default_llseek,
826 };
827 
828 static int __init dma_debug_fs_init(void)
829 {
830 	struct dentry *dentry = debugfs_create_dir("dma-api", NULL);
831 
832 	debugfs_create_bool("disabled", 0444, dentry, &global_disable);
833 	debugfs_create_u32("error_count", 0444, dentry, &error_count);
834 	debugfs_create_u32("all_errors", 0644, dentry, &show_all_errors);
835 	debugfs_create_u32("num_errors", 0644, dentry, &show_num_errors);
836 	debugfs_create_u32("num_free_entries", 0444, dentry, &num_free_entries);
837 	debugfs_create_u32("min_free_entries", 0444, dentry, &min_free_entries);
838 	debugfs_create_u32("nr_total_entries", 0444, dentry, &nr_total_entries);
839 	debugfs_create_file("driver_filter", 0644, dentry, NULL, &filter_fops);
840 	debugfs_create_file("dump", 0444, dentry, NULL, &dump_fops);
841 
842 	return 0;
843 }
844 core_initcall_sync(dma_debug_fs_init);
845 
846 static int device_dma_allocations(struct device *dev, struct dma_debug_entry **out_entry)
847 {
848 	struct dma_debug_entry *entry;
849 	unsigned long flags;
850 	int count = 0, i;
851 
852 	for (i = 0; i < HASH_SIZE; ++i) {
853 		spin_lock_irqsave(&dma_entry_hash[i].lock, flags);
854 		list_for_each_entry(entry, &dma_entry_hash[i].list, list) {
855 			if (entry->dev == dev) {
856 				count += 1;
857 				*out_entry = entry;
858 			}
859 		}
860 		spin_unlock_irqrestore(&dma_entry_hash[i].lock, flags);
861 	}
862 
863 	return count;
864 }
865 
866 static int dma_debug_device_change(struct notifier_block *nb, unsigned long action, void *data)
867 {
868 	struct device *dev = data;
869 	struct dma_debug_entry *entry;
870 	int count;
871 
872 	if (dma_debug_disabled())
873 		return 0;
874 
875 	switch (action) {
876 	case BUS_NOTIFY_UNBOUND_DRIVER:
877 		count = device_dma_allocations(dev, &entry);
878 		if (count == 0)
879 			break;
880 		err_printk(dev, entry, "device driver has pending "
881 				"DMA allocations while released from device "
882 				"[count=%d]\n"
883 				"One of leaked entries details: "
884 				"[device address=0x%016llx] [size=%llu bytes] "
885 				"[mapped with %s] [mapped as %s]\n",
886 			count, entry->dev_addr, entry->size,
887 			dir2name[entry->direction], type2name[entry->type]);
888 		break;
889 	default:
890 		break;
891 	}
892 
893 	return 0;
894 }
895 
896 void dma_debug_add_bus(const struct bus_type *bus)
897 {
898 	struct notifier_block *nb;
899 
900 	if (dma_debug_disabled())
901 		return;
902 
903 	nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
904 	if (nb == NULL) {
905 		pr_err("dma_debug_add_bus: out of memory\n");
906 		return;
907 	}
908 
909 	nb->notifier_call = dma_debug_device_change;
910 
911 	bus_register_notifier(bus, nb);
912 }
913 
914 static int dma_debug_init(void)
915 {
916 	int i, nr_pages;
917 
918 	/* Do not use dma_debug_initialized here, since we really want to be
919 	 * called to set dma_debug_initialized
920 	 */
921 	if (global_disable)
922 		return 0;
923 
924 	for (i = 0; i < HASH_SIZE; ++i) {
925 		INIT_LIST_HEAD(&dma_entry_hash[i].list);
926 		spin_lock_init(&dma_entry_hash[i].lock);
927 	}
928 
929 	nr_pages = DIV_ROUND_UP(nr_prealloc_entries, DMA_DEBUG_DYNAMIC_ENTRIES);
930 	for (i = 0; i < nr_pages; ++i)
931 		dma_debug_create_entries(GFP_KERNEL);
932 	if (num_free_entries >= nr_prealloc_entries) {
933 		pr_info("preallocated %d debug entries\n", nr_total_entries);
934 	} else if (num_free_entries > 0) {
935 		pr_warn("%d debug entries requested but only %d allocated\n",
936 			nr_prealloc_entries, nr_total_entries);
937 	} else {
938 		pr_err("debugging out of memory error - disabled\n");
939 		global_disable = true;
940 
941 		return 0;
942 	}
943 	min_free_entries = num_free_entries;
944 
945 	dma_debug_initialized = true;
946 
947 	pr_info("debugging enabled by kernel config\n");
948 	return 0;
949 }
950 core_initcall(dma_debug_init);
951 
952 static __init int dma_debug_cmdline(char *str)
953 {
954 	if (!str)
955 		return -EINVAL;
956 
957 	if (strncmp(str, "off", 3) == 0) {
958 		pr_info("debugging disabled on kernel command line\n");
959 		global_disable = true;
960 	}
961 
962 	return 1;
963 }
964 
965 static __init int dma_debug_entries_cmdline(char *str)
966 {
967 	if (!str)
968 		return -EINVAL;
969 	if (!get_option(&str, &nr_prealloc_entries))
970 		nr_prealloc_entries = PREALLOC_DMA_DEBUG_ENTRIES;
971 	return 1;
972 }
973 
974 __setup("dma_debug=", dma_debug_cmdline);
975 __setup("dma_debug_entries=", dma_debug_entries_cmdline);
976 
977 static void check_unmap(struct dma_debug_entry *ref)
978 {
979 	struct dma_debug_entry *entry;
980 	struct hash_bucket *bucket;
981 	unsigned long flags;
982 
983 	bucket = get_hash_bucket(ref, &flags);
984 	entry = bucket_find_exact(bucket, ref);
985 
986 	if (!entry) {
987 		/* must drop lock before calling dma_mapping_error */
988 		put_hash_bucket(bucket, flags);
989 
990 		if (dma_mapping_error(ref->dev, ref->dev_addr)) {
991 			err_printk(ref->dev, NULL,
992 				   "device driver tries to free an "
993 				   "invalid DMA memory address\n");
994 		} else {
995 			err_printk(ref->dev, NULL,
996 				   "device driver tries to free DMA "
997 				   "memory it has not allocated [device "
998 				   "address=0x%016llx] [size=%llu bytes]\n",
999 				   ref->dev_addr, ref->size);
1000 		}
1001 		return;
1002 	}
1003 
1004 	if (ref->size != entry->size) {
1005 		err_printk(ref->dev, entry, "device driver frees "
1006 			   "DMA memory with different size "
1007 			   "[device address=0x%016llx] [map size=%llu bytes] "
1008 			   "[unmap size=%llu bytes]\n",
1009 			   ref->dev_addr, entry->size, ref->size);
1010 	}
1011 
1012 	if (ref->type != entry->type) {
1013 		err_printk(ref->dev, entry, "device driver frees "
1014 			   "DMA memory with wrong function "
1015 			   "[device address=0x%016llx] [size=%llu bytes] "
1016 			   "[mapped as %s] [unmapped as %s]\n",
1017 			   ref->dev_addr, ref->size,
1018 			   type2name[entry->type], type2name[ref->type]);
1019 	} else if ((entry->type == dma_debug_coherent ||
1020 		    entry->type == dma_debug_noncoherent) &&
1021 		   ref->paddr != entry->paddr) {
1022 		err_printk(ref->dev, entry, "device driver frees "
1023 			   "DMA memory with different CPU address "
1024 			   "[device address=0x%016llx] [size=%llu bytes] "
1025 			   "[cpu alloc address=0x%pa] "
1026 			   "[cpu free address=0x%pa]",
1027 			   ref->dev_addr, ref->size,
1028 			   &entry->paddr,
1029 			   &ref->paddr);
1030 	}
1031 
1032 	if (ref->sg_call_ents && ref->type == dma_debug_sg &&
1033 	    ref->sg_call_ents != entry->sg_call_ents) {
1034 		err_printk(ref->dev, entry, "device driver frees "
1035 			   "DMA sg list with different entry count "
1036 			   "[map count=%d] [unmap count=%d]\n",
1037 			   entry->sg_call_ents, ref->sg_call_ents);
1038 	}
1039 
1040 	/*
1041 	 * This may be no bug in reality - but most implementations of the
1042 	 * DMA API don't handle this properly, so check for it here
1043 	 */
1044 	if (ref->direction != entry->direction) {
1045 		err_printk(ref->dev, entry, "device driver frees "
1046 			   "DMA memory with different direction "
1047 			   "[device address=0x%016llx] [size=%llu bytes] "
1048 			   "[mapped with %s] [unmapped with %s]\n",
1049 			   ref->dev_addr, ref->size,
1050 			   dir2name[entry->direction],
1051 			   dir2name[ref->direction]);
1052 	}
1053 
1054 	/*
1055 	 * Drivers should use dma_mapping_error() to check the returned
1056 	 * addresses of dma_map_single() and dma_map_page().
1057 	 * If not, print this warning message. See Documentation/core-api/dma-api.rst.
1058 	 */
1059 	if (entry->map_err_type == MAP_ERR_NOT_CHECKED) {
1060 		err_printk(ref->dev, entry,
1061 			   "device driver failed to check map error"
1062 			   "[device address=0x%016llx] [size=%llu bytes] "
1063 			   "[mapped as %s]",
1064 			   ref->dev_addr, ref->size,
1065 			   type2name[entry->type]);
1066 	}
1067 
1068 	hash_bucket_del(entry);
1069 	put_hash_bucket(bucket, flags);
1070 
1071 	/*
1072 	 * Free the entry outside of bucket_lock to avoid ABBA deadlocks
1073 	 * between that and radix_lock.
1074 	 */
1075 	dma_entry_free(entry);
1076 }
1077 
1078 static void check_for_stack(struct device *dev, phys_addr_t phys)
1079 {
1080 	void *addr;
1081 	struct vm_struct *stack_vm_area = task_stack_vm_area(current);
1082 
1083 	if (!stack_vm_area) {
1084 		/* Stack is direct-mapped. */
1085 		if (PhysHighMem(phys))
1086 			return;
1087 		addr = phys_to_virt(phys);
1088 		if (object_is_on_stack(addr))
1089 			err_printk(dev, NULL, "device driver maps memory from stack [addr=%p]\n", addr);
1090 	} else {
1091 		/* Stack is vmalloced. */
1092 		int i;
1093 
1094 		for (i = 0; i < stack_vm_area->nr_pages; i++) {
1095 			if (__phys_to_pfn(phys) !=
1096 			    page_to_pfn(stack_vm_area->pages[i]))
1097 				continue;
1098 
1099 			addr = (u8 *)current->stack + i * PAGE_SIZE +
1100 			       (phys % PAGE_SIZE);
1101 			err_printk(dev, NULL, "device driver maps memory from stack [probable addr=%p]\n", addr);
1102 			break;
1103 		}
1104 	}
1105 }
1106 
1107 static void check_for_illegal_area(struct device *dev, void *addr, unsigned long len)
1108 {
1109 	if (memory_intersects(_stext, _etext, addr, len) ||
1110 	    memory_intersects(__start_rodata, __end_rodata, addr, len))
1111 		err_printk(dev, NULL, "device driver maps memory from kernel text or rodata [addr=%p] [len=%lu]\n", addr, len);
1112 }
1113 
1114 static void check_sync(struct device *dev,
1115 		       struct dma_debug_entry *ref,
1116 		       bool to_cpu)
1117 {
1118 	struct dma_debug_entry *entry;
1119 	struct hash_bucket *bucket;
1120 	unsigned long flags;
1121 
1122 	bucket = get_hash_bucket(ref, &flags);
1123 
1124 	entry = bucket_find_contain(&bucket, ref, &flags);
1125 
1126 	if (!entry) {
1127 		err_printk(dev, NULL, "device driver tries "
1128 				"to sync DMA memory it has not allocated "
1129 				"[device address=0x%016llx] [size=%llu bytes]\n",
1130 				(unsigned long long)ref->dev_addr, ref->size);
1131 		goto out;
1132 	}
1133 
1134 	if (ref->size > entry->size) {
1135 		err_printk(dev, entry, "device driver syncs"
1136 				" DMA memory outside allocated range "
1137 				"[device address=0x%016llx] "
1138 				"[allocation size=%llu bytes] "
1139 				"[sync offset+size=%llu]\n",
1140 				entry->dev_addr, entry->size,
1141 				ref->size);
1142 	}
1143 
1144 	if (entry->direction == DMA_BIDIRECTIONAL)
1145 		goto out;
1146 
1147 	if (ref->direction != entry->direction) {
1148 		err_printk(dev, entry, "device driver syncs "
1149 				"DMA memory with different direction "
1150 				"[device address=0x%016llx] [size=%llu bytes] "
1151 				"[mapped with %s] [synced with %s]\n",
1152 				(unsigned long long)ref->dev_addr, entry->size,
1153 				dir2name[entry->direction],
1154 				dir2name[ref->direction]);
1155 	}
1156 
1157 	if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) &&
1158 		      !(ref->direction == DMA_TO_DEVICE))
1159 		err_printk(dev, entry, "device driver syncs "
1160 				"device read-only DMA memory for cpu "
1161 				"[device address=0x%016llx] [size=%llu bytes] "
1162 				"[mapped with %s] [synced with %s]\n",
1163 				(unsigned long long)ref->dev_addr, entry->size,
1164 				dir2name[entry->direction],
1165 				dir2name[ref->direction]);
1166 
1167 	if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) &&
1168 		       !(ref->direction == DMA_FROM_DEVICE))
1169 		err_printk(dev, entry, "device driver syncs "
1170 				"device write-only DMA memory to device "
1171 				"[device address=0x%016llx] [size=%llu bytes] "
1172 				"[mapped with %s] [synced with %s]\n",
1173 				(unsigned long long)ref->dev_addr, entry->size,
1174 				dir2name[entry->direction],
1175 				dir2name[ref->direction]);
1176 
1177 	if (ref->sg_call_ents && ref->type == dma_debug_sg &&
1178 	    ref->sg_call_ents != entry->sg_call_ents) {
1179 		err_printk(ref->dev, entry, "device driver syncs "
1180 			   "DMA sg list with different entry count "
1181 			   "[map count=%d] [sync count=%d]\n",
1182 			   entry->sg_call_ents, ref->sg_call_ents);
1183 	}
1184 
1185 out:
1186 	put_hash_bucket(bucket, flags);
1187 }
1188 
1189 static void check_sg_segment(struct device *dev, struct scatterlist *sg)
1190 {
1191 	unsigned int max_seg = dma_get_max_seg_size(dev);
1192 	u64 start, end, boundary = dma_get_seg_boundary(dev);
1193 
1194 	/*
1195 	 * Either the driver forgot to set dma_parms appropriately, or
1196 	 * whoever generated the list forgot to check them.
1197 	 */
1198 	if (sg->length > max_seg)
1199 		err_printk(dev, NULL, "mapping sg segment longer than device claims to support [len=%u] [max=%u]\n",
1200 			   sg->length, max_seg);
1201 	/*
1202 	 * In some cases this could potentially be the DMA API
1203 	 * implementation's fault, but it would usually imply that
1204 	 * the scatterlist was built inappropriately to begin with.
1205 	 */
1206 	start = sg_dma_address(sg);
1207 	end = start + sg_dma_len(sg) - 1;
1208 	if ((start ^ end) & ~boundary)
1209 		err_printk(dev, NULL, "mapping sg segment across boundary [start=0x%016llx] [end=0x%016llx] [boundary=0x%016llx]\n",
1210 			   start, end, boundary);
1211 }
1212 
1213 void debug_dma_map_single(struct device *dev, const void *addr,
1214 			    unsigned long len)
1215 {
1216 	if (unlikely(dma_debug_disabled()))
1217 		return;
1218 
1219 	if (!virt_addr_valid(addr))
1220 		err_printk(dev, NULL, "device driver maps memory from invalid area [addr=%p] [len=%lu]\n",
1221 			   addr, len);
1222 
1223 	if (is_vmalloc_addr(addr))
1224 		err_printk(dev, NULL, "device driver maps memory from vmalloc area [addr=%p] [len=%lu]\n",
1225 			   addr, len);
1226 }
1227 EXPORT_SYMBOL(debug_dma_map_single);
1228 
1229 void debug_dma_map_phys(struct device *dev, phys_addr_t phys, size_t size,
1230 		int direction, dma_addr_t dma_addr, unsigned long attrs)
1231 {
1232 	struct dma_debug_entry *entry;
1233 
1234 	if (unlikely(dma_debug_disabled()))
1235 		return;
1236 
1237 	if (dma_mapping_error(dev, dma_addr))
1238 		return;
1239 
1240 	entry = dma_entry_alloc();
1241 	if (!entry)
1242 		return;
1243 
1244 	entry->dev       = dev;
1245 	entry->type      = dma_debug_phy;
1246 	entry->paddr	 = phys;
1247 	entry->dev_addr  = dma_addr;
1248 	entry->size      = size;
1249 	entry->direction = direction;
1250 	entry->map_err_type = MAP_ERR_NOT_CHECKED;
1251 
1252 	if (!(attrs & DMA_ATTR_MMIO)) {
1253 		check_for_stack(dev, phys);
1254 
1255 		if (!PhysHighMem(phys))
1256 			check_for_illegal_area(dev, phys_to_virt(phys), size);
1257 	}
1258 
1259 	add_dma_entry(entry, attrs);
1260 }
1261 
1262 void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
1263 {
1264 	struct dma_debug_entry ref;
1265 	struct dma_debug_entry *entry;
1266 	struct hash_bucket *bucket;
1267 	unsigned long flags;
1268 
1269 	if (unlikely(dma_debug_disabled()))
1270 		return;
1271 
1272 	ref.dev = dev;
1273 	ref.dev_addr = dma_addr;
1274 	bucket = get_hash_bucket(&ref, &flags);
1275 
1276 	list_for_each_entry(entry, &bucket->list, list) {
1277 		if (!exact_match(&ref, entry))
1278 			continue;
1279 
1280 		/*
1281 		 * The same physical address can be mapped multiple
1282 		 * times. Without a hardware IOMMU this results in the
1283 		 * same device addresses being put into the dma-debug
1284 		 * hash multiple times too. This can result in false
1285 		 * positives being reported. Therefore we implement a
1286 		 * best-fit algorithm here which updates the first entry
1287 		 * from the hash which fits the reference value and is
1288 		 * not currently listed as being checked.
1289 		 */
1290 		if (entry->map_err_type == MAP_ERR_NOT_CHECKED) {
1291 			entry->map_err_type = MAP_ERR_CHECKED;
1292 			break;
1293 		}
1294 	}
1295 
1296 	put_hash_bucket(bucket, flags);
1297 }
1298 EXPORT_SYMBOL(debug_dma_mapping_error);
1299 
1300 void debug_dma_unmap_phys(struct device *dev, dma_addr_t dma_addr,
1301 			  size_t size, int direction)
1302 {
1303 	struct dma_debug_entry ref = {
1304 		.type           = dma_debug_phy,
1305 		.dev            = dev,
1306 		.dev_addr       = dma_addr,
1307 		.size           = size,
1308 		.direction      = direction,
1309 	};
1310 
1311 	if (unlikely(dma_debug_disabled()))
1312 		return;
1313 	check_unmap(&ref);
1314 }
1315 
1316 void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
1317 		      int nents, int mapped_ents, int direction,
1318 		      unsigned long attrs)
1319 {
1320 	struct dma_debug_entry *entry;
1321 	struct scatterlist *s;
1322 	int i;
1323 
1324 	if (unlikely(dma_debug_disabled()))
1325 		return;
1326 
1327 	for_each_sg(sg, s, nents, i) {
1328 		check_for_stack(dev, sg_phys(s));
1329 		if (!PageHighMem(sg_page(s)))
1330 			check_for_illegal_area(dev, sg_virt(s), s->length);
1331 	}
1332 
1333 	for_each_sg(sg, s, mapped_ents, i) {
1334 		entry = dma_entry_alloc();
1335 		if (!entry)
1336 			return;
1337 
1338 		entry->type           = dma_debug_sg;
1339 		entry->dev            = dev;
1340 		entry->paddr	      = sg_phys(s);
1341 		entry->size           = sg_dma_len(s);
1342 		entry->dev_addr       = sg_dma_address(s);
1343 		entry->direction      = direction;
1344 		entry->sg_call_ents   = nents;
1345 		entry->sg_mapped_ents = mapped_ents;
1346 
1347 		check_sg_segment(dev, s);
1348 
1349 		add_dma_entry(entry, attrs);
1350 	}
1351 }
1352 
1353 static int get_nr_mapped_entries(struct device *dev,
1354 				 struct dma_debug_entry *ref)
1355 {
1356 	struct dma_debug_entry *entry;
1357 	struct hash_bucket *bucket;
1358 	unsigned long flags;
1359 	int mapped_ents;
1360 
1361 	bucket       = get_hash_bucket(ref, &flags);
1362 	entry        = bucket_find_exact(bucket, ref);
1363 	mapped_ents  = 0;
1364 
1365 	if (entry)
1366 		mapped_ents = entry->sg_mapped_ents;
1367 	put_hash_bucket(bucket, flags);
1368 
1369 	return mapped_ents;
1370 }
1371 
1372 void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
1373 			int nelems, int dir)
1374 {
1375 	struct scatterlist *s;
1376 	int mapped_ents = 0, i;
1377 
1378 	if (unlikely(dma_debug_disabled()))
1379 		return;
1380 
1381 	for_each_sg(sglist, s, nelems, i) {
1382 
1383 		struct dma_debug_entry ref = {
1384 			.type           = dma_debug_sg,
1385 			.dev            = dev,
1386 			.paddr		= sg_phys(s),
1387 			.dev_addr       = sg_dma_address(s),
1388 			.size           = sg_dma_len(s),
1389 			.direction      = dir,
1390 			.sg_call_ents   = nelems,
1391 		};
1392 
1393 		if (mapped_ents && i >= mapped_ents)
1394 			break;
1395 
1396 		if (!i)
1397 			mapped_ents = get_nr_mapped_entries(dev, &ref);
1398 
1399 		check_unmap(&ref);
1400 	}
1401 }
1402 
1403 static phys_addr_t virt_to_paddr(void *virt)
1404 {
1405 	struct page *page;
1406 
1407 	if (is_vmalloc_addr(virt))
1408 		page = vmalloc_to_page(virt);
1409 	else
1410 		page = virt_to_page(virt);
1411 
1412 	return page_to_phys(page) + offset_in_page(virt);
1413 }
1414 
1415 void debug_dma_alloc_coherent(struct device *dev, size_t size,
1416 			      dma_addr_t dma_addr, void *virt,
1417 			      unsigned long attrs)
1418 {
1419 	struct dma_debug_entry *entry;
1420 
1421 	if (unlikely(dma_debug_disabled()))
1422 		return;
1423 
1424 	if (unlikely(virt == NULL))
1425 		return;
1426 
1427 	/* handle vmalloc and linear addresses */
1428 	if (!is_vmalloc_addr(virt) && !virt_addr_valid(virt))
1429 		return;
1430 
1431 	entry = dma_entry_alloc();
1432 	if (!entry)
1433 		return;
1434 
1435 	entry->type      = dma_debug_coherent;
1436 	entry->dev       = dev;
1437 	entry->paddr	 = virt_to_paddr(virt);
1438 	entry->size      = size;
1439 	entry->dev_addr  = dma_addr;
1440 	entry->direction = DMA_BIDIRECTIONAL;
1441 
1442 	add_dma_entry(entry, attrs);
1443 }
1444 
1445 void debug_dma_free_coherent(struct device *dev, size_t size,
1446 			 void *virt, dma_addr_t dma_addr)
1447 {
1448 	struct dma_debug_entry ref = {
1449 		.type           = dma_debug_coherent,
1450 		.dev            = dev,
1451 		.dev_addr       = dma_addr,
1452 		.size           = size,
1453 		.direction      = DMA_BIDIRECTIONAL,
1454 	};
1455 
1456 	/* handle vmalloc and linear addresses */
1457 	if (!is_vmalloc_addr(virt) && !virt_addr_valid(virt))
1458 		return;
1459 
1460 	ref.paddr = virt_to_paddr(virt);
1461 
1462 	if (unlikely(dma_debug_disabled()))
1463 		return;
1464 
1465 	check_unmap(&ref);
1466 }
1467 
1468 void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
1469 				   size_t size, int direction)
1470 {
1471 	struct dma_debug_entry ref;
1472 
1473 	if (unlikely(dma_debug_disabled()))
1474 		return;
1475 
1476 	ref.type         = dma_debug_single;
1477 	ref.dev          = dev;
1478 	ref.dev_addr     = dma_handle;
1479 	ref.size         = size;
1480 	ref.direction    = direction;
1481 	ref.sg_call_ents = 0;
1482 
1483 	check_sync(dev, &ref, true);
1484 }
1485 
1486 void debug_dma_sync_single_for_device(struct device *dev,
1487 				      dma_addr_t dma_handle, size_t size,
1488 				      int direction)
1489 {
1490 	struct dma_debug_entry ref;
1491 
1492 	if (unlikely(dma_debug_disabled()))
1493 		return;
1494 
1495 	ref.type         = dma_debug_single;
1496 	ref.dev          = dev;
1497 	ref.dev_addr     = dma_handle;
1498 	ref.size         = size;
1499 	ref.direction    = direction;
1500 	ref.sg_call_ents = 0;
1501 
1502 	check_sync(dev, &ref, false);
1503 }
1504 
1505 void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
1506 			       int nelems, int direction)
1507 {
1508 	struct scatterlist *s;
1509 	int mapped_ents = 0, i;
1510 
1511 	if (unlikely(dma_debug_disabled()))
1512 		return;
1513 
1514 	for_each_sg(sg, s, nelems, i) {
1515 
1516 		struct dma_debug_entry ref = {
1517 			.type           = dma_debug_sg,
1518 			.dev            = dev,
1519 			.paddr		= sg_phys(s),
1520 			.dev_addr       = sg_dma_address(s),
1521 			.size           = sg_dma_len(s),
1522 			.direction      = direction,
1523 			.sg_call_ents   = nelems,
1524 		};
1525 
1526 		if (!i)
1527 			mapped_ents = get_nr_mapped_entries(dev, &ref);
1528 
1529 		if (i >= mapped_ents)
1530 			break;
1531 
1532 		check_sync(dev, &ref, true);
1533 	}
1534 }
1535 
1536 void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
1537 				  int nelems, int direction)
1538 {
1539 	struct scatterlist *s;
1540 	int mapped_ents = 0, i;
1541 
1542 	if (unlikely(dma_debug_disabled()))
1543 		return;
1544 
1545 	for_each_sg(sg, s, nelems, i) {
1546 
1547 		struct dma_debug_entry ref = {
1548 			.type           = dma_debug_sg,
1549 			.dev            = dev,
1550 			.paddr		= sg_phys(sg),
1551 			.dev_addr       = sg_dma_address(s),
1552 			.size           = sg_dma_len(s),
1553 			.direction      = direction,
1554 			.sg_call_ents   = nelems,
1555 		};
1556 		if (!i)
1557 			mapped_ents = get_nr_mapped_entries(dev, &ref);
1558 
1559 		if (i >= mapped_ents)
1560 			break;
1561 
1562 		check_sync(dev, &ref, false);
1563 	}
1564 }
1565 
1566 void debug_dma_alloc_pages(struct device *dev, struct page *page,
1567 			   size_t size, int direction,
1568 			   dma_addr_t dma_addr,
1569 			   unsigned long attrs)
1570 {
1571 	struct dma_debug_entry *entry;
1572 
1573 	if (unlikely(dma_debug_disabled()))
1574 		return;
1575 
1576 	entry = dma_entry_alloc();
1577 	if (!entry)
1578 		return;
1579 
1580 	entry->type      = dma_debug_noncoherent;
1581 	entry->dev       = dev;
1582 	entry->paddr	 = page_to_phys(page);
1583 	entry->size      = size;
1584 	entry->dev_addr  = dma_addr;
1585 	entry->direction = direction;
1586 
1587 	add_dma_entry(entry, attrs);
1588 }
1589 
1590 void debug_dma_free_pages(struct device *dev, struct page *page,
1591 			  size_t size, int direction,
1592 			  dma_addr_t dma_addr)
1593 {
1594 	struct dma_debug_entry ref = {
1595 		.type           = dma_debug_noncoherent,
1596 		.dev            = dev,
1597 		.paddr		= page_to_phys(page),
1598 		.dev_addr       = dma_addr,
1599 		.size           = size,
1600 		.direction      = direction,
1601 	};
1602 
1603 	if (unlikely(dma_debug_disabled()))
1604 		return;
1605 
1606 	check_unmap(&ref);
1607 }
1608 
1609 static int __init dma_debug_driver_setup(char *str)
1610 {
1611 	int i;
1612 
1613 	for (i = 0; i < NAME_MAX_LEN - 1; ++i, ++str) {
1614 		current_driver_name[i] = *str;
1615 		if (*str == 0)
1616 			break;
1617 	}
1618 
1619 	if (current_driver_name[0])
1620 		pr_info("enable driver filter for driver [%s]\n",
1621 			current_driver_name);
1622 
1623 
1624 	return 1;
1625 }
1626 __setup("dma_debug_driver=", dma_debug_driver_setup);
1627