xref: /linux/drivers/iommu/iova.c (revision a1ff5a7d78a036d6c2178ee5acd6ba4946243800)
13b20eb23SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2166e9278SOhad Ben-Cohen /*
3166e9278SOhad Ben-Cohen  * Copyright © 2006-2009, Intel Corporation.
4166e9278SOhad Ben-Cohen  *
5166e9278SOhad Ben-Cohen  * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
6166e9278SOhad Ben-Cohen  */
7166e9278SOhad Ben-Cohen 
8166e9278SOhad Ben-Cohen #include <linux/iova.h>
915bbdec3SSakari Ailus #include <linux/module.h>
1085b45456SRobin Murphy #include <linux/slab.h>
119257b4a2SOmer Peleg #include <linux/smp.h>
129257b4a2SOmer Peleg #include <linux/bitops.h>
13aaffaa8aSSebastian Andrzej Siewior #include <linux/cpu.h>
1423304537SRobin Murphy #include <linux/workqueue.h>
159257b4a2SOmer Peleg 
16bb68b2fbSRobin Murphy /* The anchor node sits above the top of the usable address space */
17bb68b2fbSRobin Murphy #define IOVA_ANCHOR	~0UL
18bb68b2fbSRobin Murphy 
1932e92d9fSJohn Garry #define IOVA_RANGE_CACHE_MAX_SIZE 6	/* log of max cached IOVA range size (in pages) */
2032e92d9fSJohn Garry 
219257b4a2SOmer Peleg static bool iova_rcache_insert(struct iova_domain *iovad,
229257b4a2SOmer Peleg 			       unsigned long pfn,
239257b4a2SOmer Peleg 			       unsigned long size);
249257b4a2SOmer Peleg static unsigned long iova_rcache_get(struct iova_domain *iovad,
259257b4a2SOmer Peleg 				     unsigned long size,
269257b4a2SOmer Peleg 				     unsigned long limit_pfn);
279257b4a2SOmer Peleg static void free_iova_rcaches(struct iova_domain *iovad);
287f845d8bSRobin Murphy static void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad);
296fa3525bSVijayanand Jitta static void free_global_cached_iovas(struct iova_domain *iovad);
3085b45456SRobin Murphy 
to_iova(struct rb_node * node)317ae31cecSRobin Murphy static struct iova *to_iova(struct rb_node *node)
327ae31cecSRobin Murphy {
337ae31cecSRobin Murphy 	return rb_entry(node, struct iova, node);
347ae31cecSRobin Murphy }
357ae31cecSRobin Murphy 
36166e9278SOhad Ben-Cohen void
init_iova_domain(struct iova_domain * iovad,unsigned long granule,unsigned long start_pfn)370fb5fe87SRobin Murphy init_iova_domain(struct iova_domain *iovad, unsigned long granule,
38aa3ac946SZhen Lei 	unsigned long start_pfn)
39166e9278SOhad Ben-Cohen {
400fb5fe87SRobin Murphy 	/*
410fb5fe87SRobin Murphy 	 * IOVA granularity will normally be equal to the smallest
420fb5fe87SRobin Murphy 	 * supported IOMMU page size; both *must* be capable of
430fb5fe87SRobin Murphy 	 * representing individual CPU pages exactly.
440fb5fe87SRobin Murphy 	 */
450fb5fe87SRobin Murphy 	BUG_ON((granule > PAGE_SIZE) || !is_power_of_2(granule));
460fb5fe87SRobin Murphy 
47166e9278SOhad Ben-Cohen 	spin_lock_init(&iovad->iova_rbtree_lock);
48166e9278SOhad Ben-Cohen 	iovad->rbroot = RB_ROOT;
49973f5fbeSRobin Murphy 	iovad->cached_node = &iovad->anchor.node;
50973f5fbeSRobin Murphy 	iovad->cached32_node = &iovad->anchor.node;
510fb5fe87SRobin Murphy 	iovad->granule = granule;
521b722500SRobin Murphy 	iovad->start_pfn = start_pfn;
53aa3ac946SZhen Lei 	iovad->dma_32bit_pfn = 1UL << (32 - iova_shift(iovad));
54bee60e94SGanapatrao Kulkarni 	iovad->max32_alloc_size = iovad->dma_32bit_pfn;
55bb68b2fbSRobin Murphy 	iovad->anchor.pfn_lo = iovad->anchor.pfn_hi = IOVA_ANCHOR;
56bb68b2fbSRobin Murphy 	rb_link_node(&iovad->anchor.node, NULL, &iovad->rbroot.rb_node);
57bb68b2fbSRobin Murphy 	rb_insert_color(&iovad->anchor.node, &iovad->rbroot);
58166e9278SOhad Ben-Cohen }
599b41760bSSakari Ailus EXPORT_SYMBOL_GPL(init_iova_domain);
60166e9278SOhad Ben-Cohen 
61166e9278SOhad Ben-Cohen static struct rb_node *
__get_cached_rbnode(struct iova_domain * iovad,unsigned long limit_pfn)62973f5fbeSRobin Murphy __get_cached_rbnode(struct iova_domain *iovad, unsigned long limit_pfn)
63166e9278SOhad Ben-Cohen {
64973f5fbeSRobin Murphy 	if (limit_pfn <= iovad->dma_32bit_pfn)
65973f5fbeSRobin Murphy 		return iovad->cached32_node;
66e60aa7b5SRobin Murphy 
67973f5fbeSRobin Murphy 	return iovad->cached_node;
68166e9278SOhad Ben-Cohen }
69166e9278SOhad Ben-Cohen 
70166e9278SOhad Ben-Cohen static void
__cached_rbnode_insert_update(struct iova_domain * iovad,struct iova * new)71e60aa7b5SRobin Murphy __cached_rbnode_insert_update(struct iova_domain *iovad, struct iova *new)
72166e9278SOhad Ben-Cohen {
73e60aa7b5SRobin Murphy 	if (new->pfn_hi < iovad->dma_32bit_pfn)
74166e9278SOhad Ben-Cohen 		iovad->cached32_node = &new->node;
75e60aa7b5SRobin Murphy 	else
76e60aa7b5SRobin Murphy 		iovad->cached_node = &new->node;
77166e9278SOhad Ben-Cohen }
78166e9278SOhad Ben-Cohen 
79166e9278SOhad Ben-Cohen static void
__cached_rbnode_delete_update(struct iova_domain * iovad,struct iova * free)80166e9278SOhad Ben-Cohen __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
81166e9278SOhad Ben-Cohen {
82166e9278SOhad Ben-Cohen 	struct iova *cached_iova;
83166e9278SOhad Ben-Cohen 
847ae31cecSRobin Murphy 	cached_iova = to_iova(iovad->cached32_node);
859eed17d3SChris Wilson 	if (free == cached_iova ||
869eed17d3SChris Wilson 	    (free->pfn_hi < iovad->dma_32bit_pfn &&
875b61343bSRobin Murphy 	     free->pfn_lo >= cached_iova->pfn_lo))
88e60aa7b5SRobin Murphy 		iovad->cached32_node = rb_next(&free->node);
895b61343bSRobin Murphy 
905b61343bSRobin Murphy 	if (free->pfn_lo < iovad->dma_32bit_pfn)
91bee60e94SGanapatrao Kulkarni 		iovad->max32_alloc_size = iovad->dma_32bit_pfn;
92166e9278SOhad Ben-Cohen 
937ae31cecSRobin Murphy 	cached_iova = to_iova(iovad->cached_node);
94973f5fbeSRobin Murphy 	if (free->pfn_lo >= cached_iova->pfn_lo)
95e60aa7b5SRobin Murphy 		iovad->cached_node = rb_next(&free->node);
96166e9278SOhad Ben-Cohen }
97166e9278SOhad Ben-Cohen 
iova_find_limit(struct iova_domain * iovad,unsigned long limit_pfn)98371d7955SRobin Murphy static struct rb_node *iova_find_limit(struct iova_domain *iovad, unsigned long limit_pfn)
99371d7955SRobin Murphy {
100371d7955SRobin Murphy 	struct rb_node *node, *next;
101371d7955SRobin Murphy 	/*
102371d7955SRobin Murphy 	 * Ideally what we'd like to judge here is whether limit_pfn is close
103371d7955SRobin Murphy 	 * enough to the highest-allocated IOVA that starting the allocation
104371d7955SRobin Murphy 	 * walk from the anchor node will be quicker than this initial work to
105371d7955SRobin Murphy 	 * find an exact starting point (especially if that ends up being the
106371d7955SRobin Murphy 	 * anchor node anyway). This is an incredibly crude approximation which
107371d7955SRobin Murphy 	 * only really helps the most likely case, but is at least trivially easy.
108371d7955SRobin Murphy 	 */
109371d7955SRobin Murphy 	if (limit_pfn > iovad->dma_32bit_pfn)
110371d7955SRobin Murphy 		return &iovad->anchor.node;
111371d7955SRobin Murphy 
112371d7955SRobin Murphy 	node = iovad->rbroot.rb_node;
113371d7955SRobin Murphy 	while (to_iova(node)->pfn_hi < limit_pfn)
114371d7955SRobin Murphy 		node = node->rb_right;
115371d7955SRobin Murphy 
116371d7955SRobin Murphy search_left:
117371d7955SRobin Murphy 	while (node->rb_left && to_iova(node->rb_left)->pfn_lo >= limit_pfn)
118371d7955SRobin Murphy 		node = node->rb_left;
119371d7955SRobin Murphy 
120371d7955SRobin Murphy 	if (!node->rb_left)
121371d7955SRobin Murphy 		return node;
122371d7955SRobin Murphy 
123371d7955SRobin Murphy 	next = node->rb_left;
124371d7955SRobin Murphy 	while (next->rb_right) {
125371d7955SRobin Murphy 		next = next->rb_right;
126371d7955SRobin Murphy 		if (to_iova(next)->pfn_lo >= limit_pfn) {
127371d7955SRobin Murphy 			node = next;
128371d7955SRobin Murphy 			goto search_left;
129371d7955SRobin Murphy 		}
130371d7955SRobin Murphy 	}
131371d7955SRobin Murphy 
132371d7955SRobin Murphy 	return node;
133371d7955SRobin Murphy }
134371d7955SRobin Murphy 
135d751751aSMarek Szyprowski /* Insert the iova into domain rbtree by holding writer lock */
136d751751aSMarek Szyprowski static void
iova_insert_rbtree(struct rb_root * root,struct iova * iova,struct rb_node * start)137d751751aSMarek Szyprowski iova_insert_rbtree(struct rb_root *root, struct iova *iova,
138d751751aSMarek Szyprowski 		   struct rb_node *start)
139d751751aSMarek Szyprowski {
140d751751aSMarek Szyprowski 	struct rb_node **new, *parent = NULL;
141d751751aSMarek Szyprowski 
142d751751aSMarek Szyprowski 	new = (start) ? &start : &(root->rb_node);
143d751751aSMarek Szyprowski 	/* Figure out where to put new node */
144d751751aSMarek Szyprowski 	while (*new) {
1457ae31cecSRobin Murphy 		struct iova *this = to_iova(*new);
146d751751aSMarek Szyprowski 
147d751751aSMarek Szyprowski 		parent = *new;
148d751751aSMarek Szyprowski 
149d751751aSMarek Szyprowski 		if (iova->pfn_lo < this->pfn_lo)
150d751751aSMarek Szyprowski 			new = &((*new)->rb_left);
151d751751aSMarek Szyprowski 		else if (iova->pfn_lo > this->pfn_lo)
152d751751aSMarek Szyprowski 			new = &((*new)->rb_right);
153d751751aSMarek Szyprowski 		else {
154d751751aSMarek Szyprowski 			WARN_ON(1); /* this should not happen */
155d751751aSMarek Szyprowski 			return;
156d751751aSMarek Szyprowski 		}
157d751751aSMarek Szyprowski 	}
158d751751aSMarek Szyprowski 	/* Add new node and rebalance tree. */
159d751751aSMarek Szyprowski 	rb_link_node(&iova->node, parent, new);
160d751751aSMarek Szyprowski 	rb_insert_color(&iova->node, root);
161d751751aSMarek Szyprowski }
162d751751aSMarek Szyprowski 
__alloc_and_insert_iova_range(struct iova_domain * iovad,unsigned long size,unsigned long limit_pfn,struct iova * new,bool size_aligned)163166e9278SOhad Ben-Cohen static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
164166e9278SOhad Ben-Cohen 		unsigned long size, unsigned long limit_pfn,
165166e9278SOhad Ben-Cohen 			struct iova *new, bool size_aligned)
166166e9278SOhad Ben-Cohen {
167973f5fbeSRobin Murphy 	struct rb_node *curr, *prev;
168973f5fbeSRobin Murphy 	struct iova *curr_iova;
169166e9278SOhad Ben-Cohen 	unsigned long flags;
1704e89dce7SVijayanand Jitta 	unsigned long new_pfn, retry_pfn;
171086c83acSZhen Lei 	unsigned long align_mask = ~0UL;
1724e89dce7SVijayanand Jitta 	unsigned long high_pfn = limit_pfn, low_pfn = iovad->start_pfn;
173086c83acSZhen Lei 
174086c83acSZhen Lei 	if (size_aligned)
175086c83acSZhen Lei 		align_mask <<= fls_long(size - 1);
176166e9278SOhad Ben-Cohen 
177166e9278SOhad Ben-Cohen 	/* Walk the tree backwards */
178166e9278SOhad Ben-Cohen 	spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
179bee60e94SGanapatrao Kulkarni 	if (limit_pfn <= iovad->dma_32bit_pfn &&
180bee60e94SGanapatrao Kulkarni 			size >= iovad->max32_alloc_size)
181bee60e94SGanapatrao Kulkarni 		goto iova32_full;
182bee60e94SGanapatrao Kulkarni 
183973f5fbeSRobin Murphy 	curr = __get_cached_rbnode(iovad, limit_pfn);
1847ae31cecSRobin Murphy 	curr_iova = to_iova(curr);
185dcdb3ba7SYunfei Wang 	retry_pfn = curr_iova->pfn_hi;
1864e89dce7SVijayanand Jitta 
1874e89dce7SVijayanand Jitta retry:
188973f5fbeSRobin Murphy 	do {
1894e89dce7SVijayanand Jitta 		high_pfn = min(high_pfn, curr_iova->pfn_lo);
1904e89dce7SVijayanand Jitta 		new_pfn = (high_pfn - size) & align_mask;
191166e9278SOhad Ben-Cohen 		prev = curr;
192166e9278SOhad Ben-Cohen 		curr = rb_prev(curr);
1937ae31cecSRobin Murphy 		curr_iova = to_iova(curr);
1944e89dce7SVijayanand Jitta 	} while (curr && new_pfn <= curr_iova->pfn_hi && new_pfn >= low_pfn);
195166e9278SOhad Ben-Cohen 
1964e89dce7SVijayanand Jitta 	if (high_pfn < size || new_pfn < low_pfn) {
1974e89dce7SVijayanand Jitta 		if (low_pfn == iovad->start_pfn && retry_pfn < limit_pfn) {
1984e89dce7SVijayanand Jitta 			high_pfn = limit_pfn;
199dcdb3ba7SYunfei Wang 			low_pfn = retry_pfn + 1;
200371d7955SRobin Murphy 			curr = iova_find_limit(iovad, limit_pfn);
2017ae31cecSRobin Murphy 			curr_iova = to_iova(curr);
2024e89dce7SVijayanand Jitta 			goto retry;
2034e89dce7SVijayanand Jitta 		}
20480ef4464SRobert Richter 		iovad->max32_alloc_size = size;
205bee60e94SGanapatrao Kulkarni 		goto iova32_full;
20680ef4464SRobert Richter 	}
207166e9278SOhad Ben-Cohen 
208166e9278SOhad Ben-Cohen 	/* pfn_lo will point to size aligned address if size_aligned is set */
209086c83acSZhen Lei 	new->pfn_lo = new_pfn;
210166e9278SOhad Ben-Cohen 	new->pfn_hi = new->pfn_lo + size - 1;
211166e9278SOhad Ben-Cohen 
212d751751aSMarek Szyprowski 	/* If we have 'prev', it's a valid place to start the insertion. */
213d751751aSMarek Szyprowski 	iova_insert_rbtree(&iovad->rbroot, new, prev);
214e60aa7b5SRobin Murphy 	__cached_rbnode_insert_update(iovad, new);
215166e9278SOhad Ben-Cohen 
216166e9278SOhad Ben-Cohen 	spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
217166e9278SOhad Ben-Cohen 	return 0;
218bee60e94SGanapatrao Kulkarni 
219bee60e94SGanapatrao Kulkarni iova32_full:
220bee60e94SGanapatrao Kulkarni 	spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
221bee60e94SGanapatrao Kulkarni 	return -ENOMEM;
222166e9278SOhad Ben-Cohen }
223166e9278SOhad Ben-Cohen 
224ae1ff3d6SSakari Ailus static struct kmem_cache *iova_cache;
225ae1ff3d6SSakari Ailus static unsigned int iova_cache_users;
226ae1ff3d6SSakari Ailus static DEFINE_MUTEX(iova_cache_mutex);
227ae1ff3d6SSakari Ailus 
alloc_iova_mem(void)22851b70b81SJohn Garry static struct iova *alloc_iova_mem(void)
229ae1ff3d6SSakari Ailus {
230944c9175SQian Cai 	return kmem_cache_zalloc(iova_cache, GFP_ATOMIC | __GFP_NOWARN);
231ae1ff3d6SSakari Ailus }
232ae1ff3d6SSakari Ailus 
free_iova_mem(struct iova * iova)233176cfc18SJohn Garry static void free_iova_mem(struct iova *iova)
234ae1ff3d6SSakari Ailus {
235bb68b2fbSRobin Murphy 	if (iova->pfn_lo != IOVA_ANCHOR)
236ae1ff3d6SSakari Ailus 		kmem_cache_free(iova_cache, iova);
237ae1ff3d6SSakari Ailus }
238ae1ff3d6SSakari Ailus 
239166e9278SOhad Ben-Cohen /**
240166e9278SOhad Ben-Cohen  * alloc_iova - allocates an iova
24107db0409SMasanari Iida  * @iovad: - iova domain in question
24207db0409SMasanari Iida  * @size: - size of page frames to allocate
24307db0409SMasanari Iida  * @limit_pfn: - max limit address
24407db0409SMasanari Iida  * @size_aligned: - set if size_aligned address range is required
2451b722500SRobin Murphy  * This function allocates an iova in the range iovad->start_pfn to limit_pfn,
2461b722500SRobin Murphy  * searching top-down from limit_pfn to iovad->start_pfn. If the size_aligned
247166e9278SOhad Ben-Cohen  * flag is set then the allocated address iova->pfn_lo will be naturally
248166e9278SOhad Ben-Cohen  * aligned on roundup_power_of_two(size).
249166e9278SOhad Ben-Cohen  */
250166e9278SOhad Ben-Cohen struct iova *
alloc_iova(struct iova_domain * iovad,unsigned long size,unsigned long limit_pfn,bool size_aligned)251166e9278SOhad Ben-Cohen alloc_iova(struct iova_domain *iovad, unsigned long size,
252166e9278SOhad Ben-Cohen 	unsigned long limit_pfn,
253166e9278SOhad Ben-Cohen 	bool size_aligned)
254166e9278SOhad Ben-Cohen {
255166e9278SOhad Ben-Cohen 	struct iova *new_iova;
256166e9278SOhad Ben-Cohen 	int ret;
257166e9278SOhad Ben-Cohen 
258166e9278SOhad Ben-Cohen 	new_iova = alloc_iova_mem();
259166e9278SOhad Ben-Cohen 	if (!new_iova)
260166e9278SOhad Ben-Cohen 		return NULL;
261166e9278SOhad Ben-Cohen 
262757c370fSRobin Murphy 	ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn + 1,
263166e9278SOhad Ben-Cohen 			new_iova, size_aligned);
264166e9278SOhad Ben-Cohen 
265166e9278SOhad Ben-Cohen 	if (ret) {
266166e9278SOhad Ben-Cohen 		free_iova_mem(new_iova);
267166e9278SOhad Ben-Cohen 		return NULL;
268166e9278SOhad Ben-Cohen 	}
269166e9278SOhad Ben-Cohen 
270166e9278SOhad Ben-Cohen 	return new_iova;
271166e9278SOhad Ben-Cohen }
2729b41760bSSakari Ailus EXPORT_SYMBOL_GPL(alloc_iova);
273166e9278SOhad Ben-Cohen 
2749257b4a2SOmer Peleg static struct iova *
private_find_iova(struct iova_domain * iovad,unsigned long pfn)2759257b4a2SOmer Peleg private_find_iova(struct iova_domain *iovad, unsigned long pfn)
276166e9278SOhad Ben-Cohen {
2779257b4a2SOmer Peleg 	struct rb_node *node = iovad->rbroot.rb_node;
278166e9278SOhad Ben-Cohen 
2799257b4a2SOmer Peleg 	assert_spin_locked(&iovad->iova_rbtree_lock);
2809257b4a2SOmer Peleg 
281166e9278SOhad Ben-Cohen 	while (node) {
2827ae31cecSRobin Murphy 		struct iova *iova = to_iova(node);
283166e9278SOhad Ben-Cohen 
284166e9278SOhad Ben-Cohen 		if (pfn < iova->pfn_lo)
285166e9278SOhad Ben-Cohen 			node = node->rb_left;
2862070f940SZhen Lei 		else if (pfn > iova->pfn_hi)
287166e9278SOhad Ben-Cohen 			node = node->rb_right;
2882070f940SZhen Lei 		else
2892070f940SZhen Lei 			return iova;	/* pfn falls within iova's range */
290166e9278SOhad Ben-Cohen 	}
291166e9278SOhad Ben-Cohen 
292166e9278SOhad Ben-Cohen 	return NULL;
293166e9278SOhad Ben-Cohen }
2949257b4a2SOmer Peleg 
remove_iova(struct iova_domain * iovad,struct iova * iova)2957978724fSXiang Chen static void remove_iova(struct iova_domain *iovad, struct iova *iova)
2969257b4a2SOmer Peleg {
2979257b4a2SOmer Peleg 	assert_spin_locked(&iovad->iova_rbtree_lock);
2989257b4a2SOmer Peleg 	__cached_rbnode_delete_update(iovad, iova);
2999257b4a2SOmer Peleg 	rb_erase(&iova->node, &iovad->rbroot);
3009257b4a2SOmer Peleg }
3019257b4a2SOmer Peleg 
3029257b4a2SOmer Peleg /**
3039257b4a2SOmer Peleg  * find_iova - finds an iova for a given pfn
3049257b4a2SOmer Peleg  * @iovad: - iova domain in question.
3059257b4a2SOmer Peleg  * @pfn: - page frame number
3069257b4a2SOmer Peleg  * This function finds and returns an iova belonging to the
3076775ae90SStefano Garzarella  * given domain which matches the given pfn.
3089257b4a2SOmer Peleg  */
find_iova(struct iova_domain * iovad,unsigned long pfn)3099257b4a2SOmer Peleg struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn)
3109257b4a2SOmer Peleg {
3119257b4a2SOmer Peleg 	unsigned long flags;
3129257b4a2SOmer Peleg 	struct iova *iova;
3139257b4a2SOmer Peleg 
3149257b4a2SOmer Peleg 	/* Take the lock so that no other thread is manipulating the rbtree */
3159257b4a2SOmer Peleg 	spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
3169257b4a2SOmer Peleg 	iova = private_find_iova(iovad, pfn);
3179257b4a2SOmer Peleg 	spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
3189257b4a2SOmer Peleg 	return iova;
3199257b4a2SOmer Peleg }
3209b41760bSSakari Ailus EXPORT_SYMBOL_GPL(find_iova);
321166e9278SOhad Ben-Cohen 
322166e9278SOhad Ben-Cohen /**
323166e9278SOhad Ben-Cohen  * __free_iova - frees the given iova
324166e9278SOhad Ben-Cohen  * @iovad: iova domain in question.
325166e9278SOhad Ben-Cohen  * @iova: iova in question.
326166e9278SOhad Ben-Cohen  * Frees the given iova belonging to the giving domain
327166e9278SOhad Ben-Cohen  */
328166e9278SOhad Ben-Cohen void
__free_iova(struct iova_domain * iovad,struct iova * iova)329166e9278SOhad Ben-Cohen __free_iova(struct iova_domain *iovad, struct iova *iova)
330166e9278SOhad Ben-Cohen {
331166e9278SOhad Ben-Cohen 	unsigned long flags;
332166e9278SOhad Ben-Cohen 
333166e9278SOhad Ben-Cohen 	spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
3347978724fSXiang Chen 	remove_iova(iovad, iova);
335166e9278SOhad Ben-Cohen 	spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
3367978724fSXiang Chen 	free_iova_mem(iova);
337166e9278SOhad Ben-Cohen }
3389b41760bSSakari Ailus EXPORT_SYMBOL_GPL(__free_iova);
339166e9278SOhad Ben-Cohen 
340166e9278SOhad Ben-Cohen /**
341166e9278SOhad Ben-Cohen  * free_iova - finds and frees the iova for a given pfn
342166e9278SOhad Ben-Cohen  * @iovad: - iova domain in question.
343166e9278SOhad Ben-Cohen  * @pfn: - pfn that is allocated previously
344166e9278SOhad Ben-Cohen  * This functions finds an iova for a given pfn and then
345166e9278SOhad Ben-Cohen  * frees the iova from that domain.
346166e9278SOhad Ben-Cohen  */
347166e9278SOhad Ben-Cohen void
free_iova(struct iova_domain * iovad,unsigned long pfn)348166e9278SOhad Ben-Cohen free_iova(struct iova_domain *iovad, unsigned long pfn)
349166e9278SOhad Ben-Cohen {
3503a651b3aSCong Wang 	unsigned long flags;
3513a651b3aSCong Wang 	struct iova *iova;
352733cac2aSRobert Callicotte 
3533a651b3aSCong Wang 	spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
3543a651b3aSCong Wang 	iova = private_find_iova(iovad, pfn);
3557978724fSXiang Chen 	if (!iova) {
3563a651b3aSCong Wang 		spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
3577978724fSXiang Chen 		return;
3587978724fSXiang Chen 	}
3597978724fSXiang Chen 	remove_iova(iovad, iova);
3607978724fSXiang Chen 	spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
3617978724fSXiang Chen 	free_iova_mem(iova);
362166e9278SOhad Ben-Cohen }
3639b41760bSSakari Ailus EXPORT_SYMBOL_GPL(free_iova);
364166e9278SOhad Ben-Cohen 
365166e9278SOhad Ben-Cohen /**
3669257b4a2SOmer Peleg  * alloc_iova_fast - allocates an iova from rcache
3679257b4a2SOmer Peleg  * @iovad: - iova domain in question
3689257b4a2SOmer Peleg  * @size: - size of page frames to allocate
3699257b4a2SOmer Peleg  * @limit_pfn: - max limit address
370538d5b33STomasz Nowicki  * @flush_rcache: - set to flush rcache on regular allocation failure
3719257b4a2SOmer Peleg  * This function tries to satisfy an iova allocation from the rcache,
372538d5b33STomasz Nowicki  * and falls back to regular allocation on failure. If regular allocation
373538d5b33STomasz Nowicki  * fails too and the flush_rcache flag is set then the rcache will be flushed.
3749257b4a2SOmer Peleg */
3759257b4a2SOmer Peleg unsigned long
alloc_iova_fast(struct iova_domain * iovad,unsigned long size,unsigned long limit_pfn,bool flush_rcache)3769257b4a2SOmer Peleg alloc_iova_fast(struct iova_domain *iovad, unsigned long size,
377538d5b33STomasz Nowicki 		unsigned long limit_pfn, bool flush_rcache)
3789257b4a2SOmer Peleg {
3799257b4a2SOmer Peleg 	unsigned long iova_pfn;
3809257b4a2SOmer Peleg 	struct iova *new_iova;
3819257b4a2SOmer Peleg 
382972bf252SJohn Garry via iommu 	/*
383972bf252SJohn Garry via iommu 	 * Freeing non-power-of-two-sized allocations back into the IOVA caches
384972bf252SJohn Garry via iommu 	 * will come back to bite us badly, so we have to waste a bit of space
385972bf252SJohn Garry via iommu 	 * rounding up anything cacheable to make sure that can't happen. The
386972bf252SJohn Garry via iommu 	 * order of the unadjusted size will still match upon freeing.
387972bf252SJohn Garry via iommu 	 */
388972bf252SJohn Garry via iommu 	if (size < (1 << (IOVA_RANGE_CACHE_MAX_SIZE - 1)))
389972bf252SJohn Garry via iommu 		size = roundup_pow_of_two(size);
390972bf252SJohn Garry via iommu 
391b826ee9aSRobin Murphy 	iova_pfn = iova_rcache_get(iovad, size, limit_pfn + 1);
3929257b4a2SOmer Peleg 	if (iova_pfn)
3939257b4a2SOmer Peleg 		return iova_pfn;
3949257b4a2SOmer Peleg 
3959257b4a2SOmer Peleg retry:
3969257b4a2SOmer Peleg 	new_iova = alloc_iova(iovad, size, limit_pfn, true);
3979257b4a2SOmer Peleg 	if (!new_iova) {
3989257b4a2SOmer Peleg 		unsigned int cpu;
3999257b4a2SOmer Peleg 
400538d5b33STomasz Nowicki 		if (!flush_rcache)
4019257b4a2SOmer Peleg 			return 0;
4029257b4a2SOmer Peleg 
4039257b4a2SOmer Peleg 		/* Try replenishing IOVAs by flushing rcache. */
404538d5b33STomasz Nowicki 		flush_rcache = false;
4059257b4a2SOmer Peleg 		for_each_online_cpu(cpu)
4069257b4a2SOmer Peleg 			free_cpu_cached_iovas(cpu, iovad);
4076fa3525bSVijayanand Jitta 		free_global_cached_iovas(iovad);
4089257b4a2SOmer Peleg 		goto retry;
4099257b4a2SOmer Peleg 	}
4109257b4a2SOmer Peleg 
4119257b4a2SOmer Peleg 	return new_iova->pfn_lo;
4129257b4a2SOmer Peleg }
413a93a9626SXie Yongji EXPORT_SYMBOL_GPL(alloc_iova_fast);
4149257b4a2SOmer Peleg 
4159257b4a2SOmer Peleg /**
4169257b4a2SOmer Peleg  * free_iova_fast - free iova pfn range into rcache
4179257b4a2SOmer Peleg  * @iovad: - iova domain in question.
4189257b4a2SOmer Peleg  * @pfn: - pfn that is allocated previously
4199257b4a2SOmer Peleg  * @size: - # of pages in range
4209257b4a2SOmer Peleg  * This functions frees an iova range by trying to put it into the rcache,
4219257b4a2SOmer Peleg  * falling back to regular iova deallocation via free_iova() if this fails.
4229257b4a2SOmer Peleg  */
4239257b4a2SOmer Peleg void
free_iova_fast(struct iova_domain * iovad,unsigned long pfn,unsigned long size)4249257b4a2SOmer Peleg free_iova_fast(struct iova_domain *iovad, unsigned long pfn, unsigned long size)
4259257b4a2SOmer Peleg {
4269257b4a2SOmer Peleg 	if (iova_rcache_insert(iovad, pfn, size))
4279257b4a2SOmer Peleg 		return;
4289257b4a2SOmer Peleg 
4299257b4a2SOmer Peleg 	free_iova(iovad, pfn);
4309257b4a2SOmer Peleg }
431a93a9626SXie Yongji EXPORT_SYMBOL_GPL(free_iova_fast);
4329257b4a2SOmer Peleg 
iova_domain_free_rcaches(struct iova_domain * iovad)43332e92d9fSJohn Garry static void iova_domain_free_rcaches(struct iova_domain *iovad)
43432e92d9fSJohn Garry {
43532e92d9fSJohn Garry 	cpuhp_state_remove_instance_nocalls(CPUHP_IOMMU_IOVA_DEAD,
43632e92d9fSJohn Garry 					    &iovad->cpuhp_dead);
43732e92d9fSJohn Garry 	free_iova_rcaches(iovad);
43832e92d9fSJohn Garry }
43932e92d9fSJohn Garry 
4409257b4a2SOmer Peleg /**
4416775ae90SStefano Garzarella  * put_iova_domain - destroys the iova domain
442166e9278SOhad Ben-Cohen  * @iovad: - iova domain in question.
443166e9278SOhad Ben-Cohen  * All the iova's in that domain are destroyed.
444166e9278SOhad Ben-Cohen  */
put_iova_domain(struct iova_domain * iovad)445166e9278SOhad Ben-Cohen void put_iova_domain(struct iova_domain *iovad)
446166e9278SOhad Ben-Cohen {
4477595dc58SRobin Murphy 	struct iova *iova, *tmp;
448166e9278SOhad Ben-Cohen 
44932e92d9fSJohn Garry 	if (iovad->rcaches)
45032e92d9fSJohn Garry 		iova_domain_free_rcaches(iovad);
45132e92d9fSJohn Garry 
4527595dc58SRobin Murphy 	rbtree_postorder_for_each_entry_safe(iova, tmp, &iovad->rbroot, node)
453166e9278SOhad Ben-Cohen 		free_iova_mem(iova);
454166e9278SOhad Ben-Cohen }
4559b41760bSSakari Ailus EXPORT_SYMBOL_GPL(put_iova_domain);
456166e9278SOhad Ben-Cohen 
457166e9278SOhad Ben-Cohen static int
__is_range_overlap(struct rb_node * node,unsigned long pfn_lo,unsigned long pfn_hi)458166e9278SOhad Ben-Cohen __is_range_overlap(struct rb_node *node,
459166e9278SOhad Ben-Cohen 	unsigned long pfn_lo, unsigned long pfn_hi)
460166e9278SOhad Ben-Cohen {
4617ae31cecSRobin Murphy 	struct iova *iova = to_iova(node);
462166e9278SOhad Ben-Cohen 
463166e9278SOhad Ben-Cohen 	if ((pfn_lo <= iova->pfn_hi) && (pfn_hi >= iova->pfn_lo))
464166e9278SOhad Ben-Cohen 		return 1;
465166e9278SOhad Ben-Cohen 	return 0;
466166e9278SOhad Ben-Cohen }
467166e9278SOhad Ben-Cohen 
46875f05569SJiang Liu static inline struct iova *
alloc_and_init_iova(unsigned long pfn_lo,unsigned long pfn_hi)46975f05569SJiang Liu alloc_and_init_iova(unsigned long pfn_lo, unsigned long pfn_hi)
47075f05569SJiang Liu {
47175f05569SJiang Liu 	struct iova *iova;
47275f05569SJiang Liu 
47375f05569SJiang Liu 	iova = alloc_iova_mem();
47475f05569SJiang Liu 	if (iova) {
47575f05569SJiang Liu 		iova->pfn_lo = pfn_lo;
47675f05569SJiang Liu 		iova->pfn_hi = pfn_hi;
47775f05569SJiang Liu 	}
47875f05569SJiang Liu 
47975f05569SJiang Liu 	return iova;
48075f05569SJiang Liu }
48175f05569SJiang Liu 
482166e9278SOhad Ben-Cohen static struct iova *
__insert_new_range(struct iova_domain * iovad,unsigned long pfn_lo,unsigned long pfn_hi)483166e9278SOhad Ben-Cohen __insert_new_range(struct iova_domain *iovad,
484166e9278SOhad Ben-Cohen 	unsigned long pfn_lo, unsigned long pfn_hi)
485166e9278SOhad Ben-Cohen {
486166e9278SOhad Ben-Cohen 	struct iova *iova;
487166e9278SOhad Ben-Cohen 
48875f05569SJiang Liu 	iova = alloc_and_init_iova(pfn_lo, pfn_hi);
48975f05569SJiang Liu 	if (iova)
490d751751aSMarek Szyprowski 		iova_insert_rbtree(&iovad->rbroot, iova, NULL);
49175f05569SJiang Liu 
492166e9278SOhad Ben-Cohen 	return iova;
493166e9278SOhad Ben-Cohen }
494166e9278SOhad Ben-Cohen 
495166e9278SOhad Ben-Cohen static void
__adjust_overlap_range(struct iova * iova,unsigned long * pfn_lo,unsigned long * pfn_hi)496166e9278SOhad Ben-Cohen __adjust_overlap_range(struct iova *iova,
497166e9278SOhad Ben-Cohen 	unsigned long *pfn_lo, unsigned long *pfn_hi)
498166e9278SOhad Ben-Cohen {
499166e9278SOhad Ben-Cohen 	if (*pfn_lo < iova->pfn_lo)
500166e9278SOhad Ben-Cohen 		iova->pfn_lo = *pfn_lo;
501166e9278SOhad Ben-Cohen 	if (*pfn_hi > iova->pfn_hi)
502166e9278SOhad Ben-Cohen 		*pfn_lo = iova->pfn_hi + 1;
503166e9278SOhad Ben-Cohen }
504166e9278SOhad Ben-Cohen 
505166e9278SOhad Ben-Cohen /**
506166e9278SOhad Ben-Cohen  * reserve_iova - reserves an iova in the given range
507166e9278SOhad Ben-Cohen  * @iovad: - iova domain pointer
508166e9278SOhad Ben-Cohen  * @pfn_lo: - lower page frame address
509166e9278SOhad Ben-Cohen  * @pfn_hi:- higher pfn adderss
510166e9278SOhad Ben-Cohen  * This function allocates reserves the address range from pfn_lo to pfn_hi so
511166e9278SOhad Ben-Cohen  * that this address is not dished out as part of alloc_iova.
512166e9278SOhad Ben-Cohen  */
513166e9278SOhad Ben-Cohen struct iova *
reserve_iova(struct iova_domain * iovad,unsigned long pfn_lo,unsigned long pfn_hi)514166e9278SOhad Ben-Cohen reserve_iova(struct iova_domain *iovad,
515166e9278SOhad Ben-Cohen 	unsigned long pfn_lo, unsigned long pfn_hi)
516166e9278SOhad Ben-Cohen {
517166e9278SOhad Ben-Cohen 	struct rb_node *node;
518166e9278SOhad Ben-Cohen 	unsigned long flags;
519166e9278SOhad Ben-Cohen 	struct iova *iova;
520166e9278SOhad Ben-Cohen 	unsigned int overlap = 0;
521166e9278SOhad Ben-Cohen 
522bb68b2fbSRobin Murphy 	/* Don't allow nonsensical pfns */
523bb68b2fbSRobin Murphy 	if (WARN_ON((pfn_hi | pfn_lo) > (ULLONG_MAX >> iova_shift(iovad))))
524bb68b2fbSRobin Murphy 		return NULL;
525bb68b2fbSRobin Murphy 
526166e9278SOhad Ben-Cohen 	spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
527166e9278SOhad Ben-Cohen 	for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) {
528166e9278SOhad Ben-Cohen 		if (__is_range_overlap(node, pfn_lo, pfn_hi)) {
5297ae31cecSRobin Murphy 			iova = to_iova(node);
530166e9278SOhad Ben-Cohen 			__adjust_overlap_range(iova, &pfn_lo, &pfn_hi);
531166e9278SOhad Ben-Cohen 			if ((pfn_lo >= iova->pfn_lo) &&
532166e9278SOhad Ben-Cohen 				(pfn_hi <= iova->pfn_hi))
533166e9278SOhad Ben-Cohen 				goto finish;
534166e9278SOhad Ben-Cohen 			overlap = 1;
535166e9278SOhad Ben-Cohen 
536166e9278SOhad Ben-Cohen 		} else if (overlap)
537166e9278SOhad Ben-Cohen 				break;
538166e9278SOhad Ben-Cohen 	}
539166e9278SOhad Ben-Cohen 
540166e9278SOhad Ben-Cohen 	/* We are here either because this is the first reserver node
541166e9278SOhad Ben-Cohen 	 * or need to insert remaining non overlap addr range
542166e9278SOhad Ben-Cohen 	 */
543166e9278SOhad Ben-Cohen 	iova = __insert_new_range(iovad, pfn_lo, pfn_hi);
544166e9278SOhad Ben-Cohen finish:
545166e9278SOhad Ben-Cohen 
546166e9278SOhad Ben-Cohen 	spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
547166e9278SOhad Ben-Cohen 	return iova;
548166e9278SOhad Ben-Cohen }
5499b41760bSSakari Ailus EXPORT_SYMBOL_GPL(reserve_iova);
550166e9278SOhad Ben-Cohen 
5519257b4a2SOmer Peleg /*
5529257b4a2SOmer Peleg  * Magazine caches for IOVA ranges.  For an introduction to magazines,
5539257b4a2SOmer Peleg  * see the USENIX 2001 paper "Magazines and Vmem: Extending the Slab
5549257b4a2SOmer Peleg  * Allocator to Many CPUs and Arbitrary Resources" by Bonwick and Adams.
5559257b4a2SOmer Peleg  * For simplicity, we use a static magazine size and don't implement the
5569257b4a2SOmer Peleg  * dynamic size tuning described in the paper.
5579257b4a2SOmer Peleg  */
5589257b4a2SOmer Peleg 
559b4c9bf17SFeng Tang /*
560b4c9bf17SFeng Tang  * As kmalloc's buffer size is fixed to power of 2, 127 is chosen to
561b4c9bf17SFeng Tang  * assure size of 'iova_magazine' to be 1024 bytes, so that no memory
562911aa124SRobin Murphy  * will be wasted. Since only full magazines are inserted into the depot,
563911aa124SRobin Murphy  * we don't need to waste PFN capacity on a separate list head either.
564b4c9bf17SFeng Tang  */
565b4c9bf17SFeng Tang #define IOVA_MAG_SIZE 127
5669257b4a2SOmer Peleg 
56723304537SRobin Murphy #define IOVA_DEPOT_DELAY msecs_to_jiffies(100)
56823304537SRobin Murphy 
5699257b4a2SOmer Peleg struct iova_magazine {
570911aa124SRobin Murphy 	union {
5719257b4a2SOmer Peleg 		unsigned long size;
572911aa124SRobin Murphy 		struct iova_magazine *next;
573911aa124SRobin Murphy 	};
5749257b4a2SOmer Peleg 	unsigned long pfns[IOVA_MAG_SIZE];
5759257b4a2SOmer Peleg };
576911aa124SRobin Murphy static_assert(!(sizeof(struct iova_magazine) & (sizeof(struct iova_magazine) - 1)));
5779257b4a2SOmer Peleg 
5789257b4a2SOmer Peleg struct iova_cpu_rcache {
5799257b4a2SOmer Peleg 	spinlock_t lock;
5809257b4a2SOmer Peleg 	struct iova_magazine *loaded;
5819257b4a2SOmer Peleg 	struct iova_magazine *prev;
5829257b4a2SOmer Peleg };
5839257b4a2SOmer Peleg 
58432e92d9fSJohn Garry struct iova_rcache {
58532e92d9fSJohn Garry 	spinlock_t lock;
58623304537SRobin Murphy 	unsigned int depot_size;
587911aa124SRobin Murphy 	struct iova_magazine *depot;
58832e92d9fSJohn Garry 	struct iova_cpu_rcache __percpu *cpu_rcaches;
58923304537SRobin Murphy 	struct iova_domain *iovad;
59023304537SRobin Murphy 	struct delayed_work work;
59132e92d9fSJohn Garry };
59232e92d9fSJohn Garry 
59384e6f56bSPasha Tatashin static struct kmem_cache *iova_magazine_cache;
59484e6f56bSPasha Tatashin 
iova_rcache_range(void)5957f845d8bSRobin Murphy unsigned long iova_rcache_range(void)
5967f845d8bSRobin Murphy {
5977f845d8bSRobin Murphy 	return PAGE_SIZE << (IOVA_RANGE_CACHE_MAX_SIZE - 1);
5987f845d8bSRobin Murphy }
5997f845d8bSRobin Murphy 
iova_magazine_alloc(gfp_t flags)6009257b4a2SOmer Peleg static struct iova_magazine *iova_magazine_alloc(gfp_t flags)
6019257b4a2SOmer Peleg {
6025d62baccSZhen Lei 	struct iova_magazine *mag;
6035d62baccSZhen Lei 
60484e6f56bSPasha Tatashin 	mag = kmem_cache_alloc(iova_magazine_cache, flags);
6055d62baccSZhen Lei 	if (mag)
6065d62baccSZhen Lei 		mag->size = 0;
6075d62baccSZhen Lei 
6085d62baccSZhen Lei 	return mag;
6099257b4a2SOmer Peleg }
6109257b4a2SOmer Peleg 
iova_magazine_free(struct iova_magazine * mag)6119257b4a2SOmer Peleg static void iova_magazine_free(struct iova_magazine *mag)
6129257b4a2SOmer Peleg {
61384e6f56bSPasha Tatashin 	kmem_cache_free(iova_magazine_cache, mag);
6149257b4a2SOmer Peleg }
6159257b4a2SOmer Peleg 
6169257b4a2SOmer Peleg static void
iova_magazine_free_pfns(struct iova_magazine * mag,struct iova_domain * iovad)6179257b4a2SOmer Peleg iova_magazine_free_pfns(struct iova_magazine *mag, struct iova_domain *iovad)
6189257b4a2SOmer Peleg {
6199257b4a2SOmer Peleg 	unsigned long flags;
6209257b4a2SOmer Peleg 	int i;
6219257b4a2SOmer Peleg 
6229257b4a2SOmer Peleg 	spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
6239257b4a2SOmer Peleg 
6249257b4a2SOmer Peleg 	for (i = 0 ; i < mag->size; ++i) {
6259257b4a2SOmer Peleg 		struct iova *iova = private_find_iova(iovad, mag->pfns[i]);
6269257b4a2SOmer Peleg 
627d3e3d2beSRobin Murphy 		if (WARN_ON(!iova))
628d3e3d2beSRobin Murphy 			continue;
629d3e3d2beSRobin Murphy 
6307978724fSXiang Chen 		remove_iova(iovad, iova);
6317978724fSXiang Chen 		free_iova_mem(iova);
6329257b4a2SOmer Peleg 	}
6339257b4a2SOmer Peleg 
6349257b4a2SOmer Peleg 	spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
6359257b4a2SOmer Peleg 
6369257b4a2SOmer Peleg 	mag->size = 0;
6379257b4a2SOmer Peleg }
6389257b4a2SOmer Peleg 
iova_magazine_full(struct iova_magazine * mag)6399257b4a2SOmer Peleg static bool iova_magazine_full(struct iova_magazine *mag)
6409257b4a2SOmer Peleg {
641a390bde7SJohn Garry 	return mag->size == IOVA_MAG_SIZE;
6429257b4a2SOmer Peleg }
6439257b4a2SOmer Peleg 
iova_magazine_empty(struct iova_magazine * mag)6449257b4a2SOmer Peleg static bool iova_magazine_empty(struct iova_magazine *mag)
6459257b4a2SOmer Peleg {
646a390bde7SJohn Garry 	return mag->size == 0;
6479257b4a2SOmer Peleg }
6489257b4a2SOmer Peleg 
iova_magazine_pop(struct iova_magazine * mag,unsigned long limit_pfn)6499257b4a2SOmer Peleg static unsigned long iova_magazine_pop(struct iova_magazine *mag,
6509257b4a2SOmer Peleg 				       unsigned long limit_pfn)
6519257b4a2SOmer Peleg {
652e8b19840SRobin Murphy 	int i;
653e8b19840SRobin Murphy 	unsigned long pfn;
654e8b19840SRobin Murphy 
655e8b19840SRobin Murphy 	/* Only fall back to the rbtree if we have no suitable pfns at all */
656e8b19840SRobin Murphy 	for (i = mag->size - 1; mag->pfns[i] > limit_pfn; i--)
657e8b19840SRobin Murphy 		if (i == 0)
6589257b4a2SOmer Peleg 			return 0;
6599257b4a2SOmer Peleg 
660e8b19840SRobin Murphy 	/* Swap it to pop it */
661e8b19840SRobin Murphy 	pfn = mag->pfns[i];
662e8b19840SRobin Murphy 	mag->pfns[i] = mag->pfns[--mag->size];
663e8b19840SRobin Murphy 
664e8b19840SRobin Murphy 	return pfn;
6659257b4a2SOmer Peleg }
6669257b4a2SOmer Peleg 
iova_magazine_push(struct iova_magazine * mag,unsigned long pfn)6679257b4a2SOmer Peleg static void iova_magazine_push(struct iova_magazine *mag, unsigned long pfn)
6689257b4a2SOmer Peleg {
6699257b4a2SOmer Peleg 	mag->pfns[mag->size++] = pfn;
6709257b4a2SOmer Peleg }
6719257b4a2SOmer Peleg 
iova_depot_pop(struct iova_rcache * rcache)672911aa124SRobin Murphy static struct iova_magazine *iova_depot_pop(struct iova_rcache *rcache)
673911aa124SRobin Murphy {
674911aa124SRobin Murphy 	struct iova_magazine *mag = rcache->depot;
675911aa124SRobin Murphy 
676911aa124SRobin Murphy 	rcache->depot = mag->next;
677911aa124SRobin Murphy 	mag->size = IOVA_MAG_SIZE;
67823304537SRobin Murphy 	rcache->depot_size--;
679911aa124SRobin Murphy 	return mag;
680911aa124SRobin Murphy }
681911aa124SRobin Murphy 
iova_depot_push(struct iova_rcache * rcache,struct iova_magazine * mag)682911aa124SRobin Murphy static void iova_depot_push(struct iova_rcache *rcache, struct iova_magazine *mag)
683911aa124SRobin Murphy {
684911aa124SRobin Murphy 	mag->next = rcache->depot;
685911aa124SRobin Murphy 	rcache->depot = mag;
68623304537SRobin Murphy 	rcache->depot_size++;
68723304537SRobin Murphy }
68823304537SRobin Murphy 
iova_depot_work_func(struct work_struct * work)68923304537SRobin Murphy static void iova_depot_work_func(struct work_struct *work)
69023304537SRobin Murphy {
69123304537SRobin Murphy 	struct iova_rcache *rcache = container_of(work, typeof(*rcache), work.work);
69223304537SRobin Murphy 	struct iova_magazine *mag = NULL;
69323304537SRobin Murphy 	unsigned long flags;
69423304537SRobin Murphy 
69523304537SRobin Murphy 	spin_lock_irqsave(&rcache->lock, flags);
69623304537SRobin Murphy 	if (rcache->depot_size > num_online_cpus())
69723304537SRobin Murphy 		mag = iova_depot_pop(rcache);
69823304537SRobin Murphy 	spin_unlock_irqrestore(&rcache->lock, flags);
69923304537SRobin Murphy 
70023304537SRobin Murphy 	if (mag) {
70123304537SRobin Murphy 		iova_magazine_free_pfns(mag, rcache->iovad);
70223304537SRobin Murphy 		iova_magazine_free(mag);
70323304537SRobin Murphy 		schedule_delayed_work(&rcache->work, IOVA_DEPOT_DELAY);
70423304537SRobin Murphy 	}
705911aa124SRobin Murphy }
706911aa124SRobin Murphy 
iova_domain_init_rcaches(struct iova_domain * iovad)70732e92d9fSJohn Garry int iova_domain_init_rcaches(struct iova_domain *iovad)
7089257b4a2SOmer Peleg {
7099257b4a2SOmer Peleg 	unsigned int cpu;
71032e92d9fSJohn Garry 	int i, ret;
71132e92d9fSJohn Garry 
71232e92d9fSJohn Garry 	iovad->rcaches = kcalloc(IOVA_RANGE_CACHE_MAX_SIZE,
71332e92d9fSJohn Garry 				 sizeof(struct iova_rcache),
71432e92d9fSJohn Garry 				 GFP_KERNEL);
71532e92d9fSJohn Garry 	if (!iovad->rcaches)
71632e92d9fSJohn Garry 		return -ENOMEM;
7179257b4a2SOmer Peleg 
7189257b4a2SOmer Peleg 	for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
71932e92d9fSJohn Garry 		struct iova_cpu_rcache *cpu_rcache;
72032e92d9fSJohn Garry 		struct iova_rcache *rcache;
72132e92d9fSJohn Garry 
7229257b4a2SOmer Peleg 		rcache = &iovad->rcaches[i];
7239257b4a2SOmer Peleg 		spin_lock_init(&rcache->lock);
72423304537SRobin Murphy 		rcache->iovad = iovad;
72523304537SRobin Murphy 		INIT_DELAYED_WORK(&rcache->work, iova_depot_work_func);
72632e92d9fSJohn Garry 		rcache->cpu_rcaches = __alloc_percpu(sizeof(*cpu_rcache),
72732e92d9fSJohn Garry 						     cache_line_size());
72832e92d9fSJohn Garry 		if (!rcache->cpu_rcaches) {
72932e92d9fSJohn Garry 			ret = -ENOMEM;
73032e92d9fSJohn Garry 			goto out_err;
73132e92d9fSJohn Garry 		}
7329257b4a2SOmer Peleg 		for_each_possible_cpu(cpu) {
7339257b4a2SOmer Peleg 			cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu);
73432e92d9fSJohn Garry 
7359257b4a2SOmer Peleg 			spin_lock_init(&cpu_rcache->lock);
7369257b4a2SOmer Peleg 			cpu_rcache->loaded = iova_magazine_alloc(GFP_KERNEL);
7379257b4a2SOmer Peleg 			cpu_rcache->prev = iova_magazine_alloc(GFP_KERNEL);
73832e92d9fSJohn Garry 			if (!cpu_rcache->loaded || !cpu_rcache->prev) {
73932e92d9fSJohn Garry 				ret = -ENOMEM;
74032e92d9fSJohn Garry 				goto out_err;
7419257b4a2SOmer Peleg 			}
7429257b4a2SOmer Peleg 		}
7439257b4a2SOmer Peleg 	}
7449257b4a2SOmer Peleg 
74532e92d9fSJohn Garry 	ret = cpuhp_state_add_instance_nocalls(CPUHP_IOMMU_IOVA_DEAD,
74632e92d9fSJohn Garry 					       &iovad->cpuhp_dead);
74732e92d9fSJohn Garry 	if (ret)
74832e92d9fSJohn Garry 		goto out_err;
74932e92d9fSJohn Garry 	return 0;
75032e92d9fSJohn Garry 
75132e92d9fSJohn Garry out_err:
75232e92d9fSJohn Garry 	free_iova_rcaches(iovad);
75332e92d9fSJohn Garry 	return ret;
75432e92d9fSJohn Garry }
75532e92d9fSJohn Garry EXPORT_SYMBOL_GPL(iova_domain_init_rcaches);
75632e92d9fSJohn Garry 
7579257b4a2SOmer Peleg /*
7589257b4a2SOmer Peleg  * Try inserting IOVA range starting with 'iova_pfn' into 'rcache', and
7599257b4a2SOmer Peleg  * return true on success.  Can fail if rcache is full and we can't free
7609257b4a2SOmer Peleg  * space, and free_iova() (our only caller) will then return the IOVA
7619257b4a2SOmer Peleg  * range to the rbtree instead.
7629257b4a2SOmer Peleg  */
__iova_rcache_insert(struct iova_domain * iovad,struct iova_rcache * rcache,unsigned long iova_pfn)7639257b4a2SOmer Peleg static bool __iova_rcache_insert(struct iova_domain *iovad,
7649257b4a2SOmer Peleg 				 struct iova_rcache *rcache,
7659257b4a2SOmer Peleg 				 unsigned long iova_pfn)
7669257b4a2SOmer Peleg {
7679257b4a2SOmer Peleg 	struct iova_cpu_rcache *cpu_rcache;
7689257b4a2SOmer Peleg 	bool can_insert = false;
7699257b4a2SOmer Peleg 	unsigned long flags;
7709257b4a2SOmer Peleg 
771aaffaa8aSSebastian Andrzej Siewior 	cpu_rcache = raw_cpu_ptr(rcache->cpu_rcaches);
7729257b4a2SOmer Peleg 	spin_lock_irqsave(&cpu_rcache->lock, flags);
7739257b4a2SOmer Peleg 
7749257b4a2SOmer Peleg 	if (!iova_magazine_full(cpu_rcache->loaded)) {
7759257b4a2SOmer Peleg 		can_insert = true;
7769257b4a2SOmer Peleg 	} else if (!iova_magazine_full(cpu_rcache->prev)) {
7779257b4a2SOmer Peleg 		swap(cpu_rcache->prev, cpu_rcache->loaded);
7789257b4a2SOmer Peleg 		can_insert = true;
7799257b4a2SOmer Peleg 	} else {
7809257b4a2SOmer Peleg 		struct iova_magazine *new_mag = iova_magazine_alloc(GFP_ATOMIC);
7819257b4a2SOmer Peleg 
7829257b4a2SOmer Peleg 		if (new_mag) {
7839257b4a2SOmer Peleg 			spin_lock(&rcache->lock);
784911aa124SRobin Murphy 			iova_depot_push(rcache, cpu_rcache->loaded);
7859257b4a2SOmer Peleg 			spin_unlock(&rcache->lock);
78623304537SRobin Murphy 			schedule_delayed_work(&rcache->work, IOVA_DEPOT_DELAY);
7879257b4a2SOmer Peleg 
7889257b4a2SOmer Peleg 			cpu_rcache->loaded = new_mag;
7899257b4a2SOmer Peleg 			can_insert = true;
7909257b4a2SOmer Peleg 		}
7919257b4a2SOmer Peleg 	}
7929257b4a2SOmer Peleg 
7939257b4a2SOmer Peleg 	if (can_insert)
7949257b4a2SOmer Peleg 		iova_magazine_push(cpu_rcache->loaded, iova_pfn);
7959257b4a2SOmer Peleg 
7969257b4a2SOmer Peleg 	spin_unlock_irqrestore(&cpu_rcache->lock, flags);
7979257b4a2SOmer Peleg 
7989257b4a2SOmer Peleg 	return can_insert;
7999257b4a2SOmer Peleg }
8009257b4a2SOmer Peleg 
iova_rcache_insert(struct iova_domain * iovad,unsigned long pfn,unsigned long size)8019257b4a2SOmer Peleg static bool iova_rcache_insert(struct iova_domain *iovad, unsigned long pfn,
8029257b4a2SOmer Peleg 			       unsigned long size)
8039257b4a2SOmer Peleg {
8049257b4a2SOmer Peleg 	unsigned int log_size = order_base_2(size);
8059257b4a2SOmer Peleg 
8069257b4a2SOmer Peleg 	if (log_size >= IOVA_RANGE_CACHE_MAX_SIZE)
8079257b4a2SOmer Peleg 		return false;
8089257b4a2SOmer Peleg 
8099257b4a2SOmer Peleg 	return __iova_rcache_insert(iovad, &iovad->rcaches[log_size], pfn);
8109257b4a2SOmer Peleg }
8119257b4a2SOmer Peleg 
8129257b4a2SOmer Peleg /*
8139257b4a2SOmer Peleg  * Caller wants to allocate a new IOVA range from 'rcache'.  If we can
8149257b4a2SOmer Peleg  * satisfy the request, return a matching non-NULL range and remove
8159257b4a2SOmer Peleg  * it from the 'rcache'.
8169257b4a2SOmer Peleg  */
__iova_rcache_get(struct iova_rcache * rcache,unsigned long limit_pfn)8179257b4a2SOmer Peleg static unsigned long __iova_rcache_get(struct iova_rcache *rcache,
8189257b4a2SOmer Peleg 				       unsigned long limit_pfn)
8199257b4a2SOmer Peleg {
8209257b4a2SOmer Peleg 	struct iova_cpu_rcache *cpu_rcache;
8219257b4a2SOmer Peleg 	unsigned long iova_pfn = 0;
8229257b4a2SOmer Peleg 	bool has_pfn = false;
8239257b4a2SOmer Peleg 	unsigned long flags;
8249257b4a2SOmer Peleg 
825aaffaa8aSSebastian Andrzej Siewior 	cpu_rcache = raw_cpu_ptr(rcache->cpu_rcaches);
8269257b4a2SOmer Peleg 	spin_lock_irqsave(&cpu_rcache->lock, flags);
8279257b4a2SOmer Peleg 
8289257b4a2SOmer Peleg 	if (!iova_magazine_empty(cpu_rcache->loaded)) {
8299257b4a2SOmer Peleg 		has_pfn = true;
8309257b4a2SOmer Peleg 	} else if (!iova_magazine_empty(cpu_rcache->prev)) {
8319257b4a2SOmer Peleg 		swap(cpu_rcache->prev, cpu_rcache->loaded);
8329257b4a2SOmer Peleg 		has_pfn = true;
8339257b4a2SOmer Peleg 	} else {
8349257b4a2SOmer Peleg 		spin_lock(&rcache->lock);
835911aa124SRobin Murphy 		if (rcache->depot) {
8369257b4a2SOmer Peleg 			iova_magazine_free(cpu_rcache->loaded);
837911aa124SRobin Murphy 			cpu_rcache->loaded = iova_depot_pop(rcache);
8389257b4a2SOmer Peleg 			has_pfn = true;
8399257b4a2SOmer Peleg 		}
8409257b4a2SOmer Peleg 		spin_unlock(&rcache->lock);
8419257b4a2SOmer Peleg 	}
8429257b4a2SOmer Peleg 
8439257b4a2SOmer Peleg 	if (has_pfn)
8449257b4a2SOmer Peleg 		iova_pfn = iova_magazine_pop(cpu_rcache->loaded, limit_pfn);
8459257b4a2SOmer Peleg 
8469257b4a2SOmer Peleg 	spin_unlock_irqrestore(&cpu_rcache->lock, flags);
8479257b4a2SOmer Peleg 
8489257b4a2SOmer Peleg 	return iova_pfn;
8499257b4a2SOmer Peleg }
8509257b4a2SOmer Peleg 
8519257b4a2SOmer Peleg /*
8529257b4a2SOmer Peleg  * Try to satisfy IOVA allocation range from rcache.  Fail if requested
8539257b4a2SOmer Peleg  * size is too big or the DMA limit we are given isn't satisfied by the
8549257b4a2SOmer Peleg  * top element in the magazine.
8559257b4a2SOmer Peleg  */
iova_rcache_get(struct iova_domain * iovad,unsigned long size,unsigned long limit_pfn)8569257b4a2SOmer Peleg static unsigned long iova_rcache_get(struct iova_domain *iovad,
8579257b4a2SOmer Peleg 				     unsigned long size,
8589257b4a2SOmer Peleg 				     unsigned long limit_pfn)
8599257b4a2SOmer Peleg {
8609257b4a2SOmer Peleg 	unsigned int log_size = order_base_2(size);
8619257b4a2SOmer Peleg 
862189cb8feSJohn Garry 	if (log_size >= IOVA_RANGE_CACHE_MAX_SIZE)
8639257b4a2SOmer Peleg 		return 0;
8649257b4a2SOmer Peleg 
865b826ee9aSRobin Murphy 	return __iova_rcache_get(&iovad->rcaches[log_size], limit_pfn - size);
8669257b4a2SOmer Peleg }
8679257b4a2SOmer Peleg 
8689257b4a2SOmer Peleg /*
8699257b4a2SOmer Peleg  * free rcache data structures.
8709257b4a2SOmer Peleg  */
free_iova_rcaches(struct iova_domain * iovad)8719257b4a2SOmer Peleg static void free_iova_rcaches(struct iova_domain *iovad)
8729257b4a2SOmer Peleg {
8739257b4a2SOmer Peleg 	struct iova_rcache *rcache;
8747595dc58SRobin Murphy 	struct iova_cpu_rcache *cpu_rcache;
8759257b4a2SOmer Peleg 	unsigned int cpu;
8769257b4a2SOmer Peleg 
877911aa124SRobin Murphy 	for (int i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
8789257b4a2SOmer Peleg 		rcache = &iovad->rcaches[i];
87932e92d9fSJohn Garry 		if (!rcache->cpu_rcaches)
88032e92d9fSJohn Garry 			break;
8817595dc58SRobin Murphy 		for_each_possible_cpu(cpu) {
8827595dc58SRobin Murphy 			cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu);
8837595dc58SRobin Murphy 			iova_magazine_free(cpu_rcache->loaded);
8847595dc58SRobin Murphy 			iova_magazine_free(cpu_rcache->prev);
8859257b4a2SOmer Peleg 		}
8867595dc58SRobin Murphy 		free_percpu(rcache->cpu_rcaches);
88723304537SRobin Murphy 		cancel_delayed_work_sync(&rcache->work);
888911aa124SRobin Murphy 		while (rcache->depot)
889911aa124SRobin Murphy 			iova_magazine_free(iova_depot_pop(rcache));
8909257b4a2SOmer Peleg 	}
89132e92d9fSJohn Garry 
89232e92d9fSJohn Garry 	kfree(iovad->rcaches);
89332e92d9fSJohn Garry 	iovad->rcaches = NULL;
8949257b4a2SOmer Peleg }
8959257b4a2SOmer Peleg 
8969257b4a2SOmer Peleg /*
8979257b4a2SOmer Peleg  * free all the IOVA ranges cached by a cpu (used when cpu is unplugged)
8989257b4a2SOmer Peleg  */
free_cpu_cached_iovas(unsigned int cpu,struct iova_domain * iovad)899149448b3SJohn Garry static void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad)
9009257b4a2SOmer Peleg {
9019257b4a2SOmer Peleg 	struct iova_cpu_rcache *cpu_rcache;
9029257b4a2SOmer Peleg 	struct iova_rcache *rcache;
9039257b4a2SOmer Peleg 	unsigned long flags;
9049257b4a2SOmer Peleg 	int i;
9059257b4a2SOmer Peleg 
9069257b4a2SOmer Peleg 	for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
9079257b4a2SOmer Peleg 		rcache = &iovad->rcaches[i];
9089257b4a2SOmer Peleg 		cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu);
9099257b4a2SOmer Peleg 		spin_lock_irqsave(&cpu_rcache->lock, flags);
9109257b4a2SOmer Peleg 		iova_magazine_free_pfns(cpu_rcache->loaded, iovad);
9119257b4a2SOmer Peleg 		iova_magazine_free_pfns(cpu_rcache->prev, iovad);
9129257b4a2SOmer Peleg 		spin_unlock_irqrestore(&cpu_rcache->lock, flags);
9139257b4a2SOmer Peleg 	}
9149257b4a2SOmer Peleg }
9159257b4a2SOmer Peleg 
9166fa3525bSVijayanand Jitta /*
9176fa3525bSVijayanand Jitta  * free all the IOVA ranges of global cache
9186fa3525bSVijayanand Jitta  */
free_global_cached_iovas(struct iova_domain * iovad)9196fa3525bSVijayanand Jitta static void free_global_cached_iovas(struct iova_domain *iovad)
9206fa3525bSVijayanand Jitta {
9216fa3525bSVijayanand Jitta 	struct iova_rcache *rcache;
9226fa3525bSVijayanand Jitta 	unsigned long flags;
9236fa3525bSVijayanand Jitta 
924911aa124SRobin Murphy 	for (int i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
9256fa3525bSVijayanand Jitta 		rcache = &iovad->rcaches[i];
9266fa3525bSVijayanand Jitta 		spin_lock_irqsave(&rcache->lock, flags);
927911aa124SRobin Murphy 		while (rcache->depot) {
928911aa124SRobin Murphy 			struct iova_magazine *mag = iova_depot_pop(rcache);
929911aa124SRobin Murphy 
930911aa124SRobin Murphy 			iova_magazine_free_pfns(mag, iovad);
931911aa124SRobin Murphy 			iova_magazine_free(mag);
9326fa3525bSVijayanand Jitta 		}
9336fa3525bSVijayanand Jitta 		spin_unlock_irqrestore(&rcache->lock, flags);
9346fa3525bSVijayanand Jitta 	}
9356fa3525bSVijayanand Jitta }
9367f845d8bSRobin Murphy 
iova_cpuhp_dead(unsigned int cpu,struct hlist_node * node)9377f845d8bSRobin Murphy static int iova_cpuhp_dead(unsigned int cpu, struct hlist_node *node)
9387f845d8bSRobin Murphy {
9397f845d8bSRobin Murphy 	struct iova_domain *iovad;
9407f845d8bSRobin Murphy 
9417f845d8bSRobin Murphy 	iovad = hlist_entry_safe(node, struct iova_domain, cpuhp_dead);
9427f845d8bSRobin Murphy 
9437f845d8bSRobin Murphy 	free_cpu_cached_iovas(cpu, iovad);
9447f845d8bSRobin Murphy 	return 0;
9457f845d8bSRobin Murphy }
9467f845d8bSRobin Murphy 
iova_cache_get(void)9477f845d8bSRobin Murphy int iova_cache_get(void)
9487f845d8bSRobin Murphy {
9497f845d8bSRobin Murphy 	int err = -ENOMEM;
9507f845d8bSRobin Murphy 
9517f845d8bSRobin Murphy 	mutex_lock(&iova_cache_mutex);
9527f845d8bSRobin Murphy 	if (!iova_cache_users) {
9537f845d8bSRobin Murphy 		iova_cache = kmem_cache_create("iommu_iova", sizeof(struct iova), 0,
9547f845d8bSRobin Murphy 					       SLAB_HWCACHE_ALIGN, NULL);
9557f845d8bSRobin Murphy 		if (!iova_cache)
9567f845d8bSRobin Murphy 			goto out_err;
9577f845d8bSRobin Murphy 
95884e6f56bSPasha Tatashin 		iova_magazine_cache = kmem_cache_create("iommu_iova_magazine",
95984e6f56bSPasha Tatashin 							sizeof(struct iova_magazine),
96084e6f56bSPasha Tatashin 							0, SLAB_HWCACHE_ALIGN, NULL);
96184e6f56bSPasha Tatashin 		if (!iova_magazine_cache)
96284e6f56bSPasha Tatashin 			goto out_err;
96384e6f56bSPasha Tatashin 
9647f845d8bSRobin Murphy 		err = cpuhp_setup_state_multi(CPUHP_IOMMU_IOVA_DEAD, "iommu/iova:dead",
9657f845d8bSRobin Murphy 					      NULL, iova_cpuhp_dead);
9667f845d8bSRobin Murphy 		if (err) {
9677f845d8bSRobin Murphy 			pr_err("IOVA: Couldn't register cpuhp handler: %pe\n", ERR_PTR(err));
9687f845d8bSRobin Murphy 			goto out_err;
9697f845d8bSRobin Murphy 		}
9707f845d8bSRobin Murphy 	}
9717f845d8bSRobin Murphy 
9727f845d8bSRobin Murphy 	iova_cache_users++;
9737f845d8bSRobin Murphy 	mutex_unlock(&iova_cache_mutex);
9747f845d8bSRobin Murphy 
9757f845d8bSRobin Murphy 	return 0;
9767f845d8bSRobin Murphy 
9777f845d8bSRobin Murphy out_err:
9787f845d8bSRobin Murphy 	kmem_cache_destroy(iova_cache);
97984e6f56bSPasha Tatashin 	kmem_cache_destroy(iova_magazine_cache);
9807f845d8bSRobin Murphy 	mutex_unlock(&iova_cache_mutex);
9817f845d8bSRobin Murphy 	return err;
9827f845d8bSRobin Murphy }
9837f845d8bSRobin Murphy EXPORT_SYMBOL_GPL(iova_cache_get);
9847f845d8bSRobin Murphy 
iova_cache_put(void)9857f845d8bSRobin Murphy void iova_cache_put(void)
9867f845d8bSRobin Murphy {
9877f845d8bSRobin Murphy 	mutex_lock(&iova_cache_mutex);
9887f845d8bSRobin Murphy 	if (WARN_ON(!iova_cache_users)) {
9897f845d8bSRobin Murphy 		mutex_unlock(&iova_cache_mutex);
9907f845d8bSRobin Murphy 		return;
9917f845d8bSRobin Murphy 	}
9927f845d8bSRobin Murphy 	iova_cache_users--;
9937f845d8bSRobin Murphy 	if (!iova_cache_users) {
9947f845d8bSRobin Murphy 		cpuhp_remove_multi_state(CPUHP_IOMMU_IOVA_DEAD);
9957f845d8bSRobin Murphy 		kmem_cache_destroy(iova_cache);
99684e6f56bSPasha Tatashin 		kmem_cache_destroy(iova_magazine_cache);
9977f845d8bSRobin Murphy 	}
9987f845d8bSRobin Murphy 	mutex_unlock(&iova_cache_mutex);
9997f845d8bSRobin Murphy }
10007f845d8bSRobin Murphy EXPORT_SYMBOL_GPL(iova_cache_put);
10017f845d8bSRobin Murphy 
100215bbdec3SSakari Ailus MODULE_AUTHOR("Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>");
1003*c94ad1d5SJeff Johnson MODULE_DESCRIPTION("IOMMU I/O Virtual Address management");
100415bbdec3SSakari Ailus MODULE_LICENSE("GPL");
1005