xref: /linux/drivers/iommu/iova.c (revision 1c4b5ecb7ea190fa3e9f9d6891e6c90b60e04f24)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright © 2006-2009, Intel Corporation.
4  *
5  * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
6  */
7 
8 #include <linux/iova.h>
9 #include <linux/module.h>
10 #include <linux/slab.h>
11 #include <linux/smp.h>
12 #include <linux/bitops.h>
13 #include <linux/cpu.h>
14 
15 /* The anchor node sits above the top of the usable address space */
16 #define IOVA_ANCHOR	~0UL
17 
18 static bool iova_rcache_insert(struct iova_domain *iovad,
19 			       unsigned long pfn,
20 			       unsigned long size);
21 static unsigned long iova_rcache_get(struct iova_domain *iovad,
22 				     unsigned long size,
23 				     unsigned long limit_pfn);
24 static void init_iova_rcaches(struct iova_domain *iovad);
25 static void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad);
26 static void free_iova_rcaches(struct iova_domain *iovad);
27 
28 static int iova_cpuhp_dead(unsigned int cpu, struct hlist_node *node)
29 {
30 	struct iova_domain *iovad;
31 
32 	iovad = hlist_entry_safe(node, struct iova_domain, cpuhp_dead);
33 
34 	free_cpu_cached_iovas(cpu, iovad);
35 	return 0;
36 }
37 
38 static void free_global_cached_iovas(struct iova_domain *iovad);
39 
40 static struct iova *to_iova(struct rb_node *node)
41 {
42 	return rb_entry(node, struct iova, node);
43 }
44 
45 void
46 init_iova_domain(struct iova_domain *iovad, unsigned long granule,
47 	unsigned long start_pfn)
48 {
49 	/*
50 	 * IOVA granularity will normally be equal to the smallest
51 	 * supported IOMMU page size; both *must* be capable of
52 	 * representing individual CPU pages exactly.
53 	 */
54 	BUG_ON((granule > PAGE_SIZE) || !is_power_of_2(granule));
55 
56 	spin_lock_init(&iovad->iova_rbtree_lock);
57 	iovad->rbroot = RB_ROOT;
58 	iovad->cached_node = &iovad->anchor.node;
59 	iovad->cached32_node = &iovad->anchor.node;
60 	iovad->granule = granule;
61 	iovad->start_pfn = start_pfn;
62 	iovad->dma_32bit_pfn = 1UL << (32 - iova_shift(iovad));
63 	iovad->max32_alloc_size = iovad->dma_32bit_pfn;
64 	iovad->anchor.pfn_lo = iovad->anchor.pfn_hi = IOVA_ANCHOR;
65 	rb_link_node(&iovad->anchor.node, NULL, &iovad->rbroot.rb_node);
66 	rb_insert_color(&iovad->anchor.node, &iovad->rbroot);
67 	cpuhp_state_add_instance_nocalls(CPUHP_IOMMU_IOVA_DEAD, &iovad->cpuhp_dead);
68 	init_iova_rcaches(iovad);
69 }
70 EXPORT_SYMBOL_GPL(init_iova_domain);
71 
72 static struct rb_node *
73 __get_cached_rbnode(struct iova_domain *iovad, unsigned long limit_pfn)
74 {
75 	if (limit_pfn <= iovad->dma_32bit_pfn)
76 		return iovad->cached32_node;
77 
78 	return iovad->cached_node;
79 }
80 
81 static void
82 __cached_rbnode_insert_update(struct iova_domain *iovad, struct iova *new)
83 {
84 	if (new->pfn_hi < iovad->dma_32bit_pfn)
85 		iovad->cached32_node = &new->node;
86 	else
87 		iovad->cached_node = &new->node;
88 }
89 
90 static void
91 __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
92 {
93 	struct iova *cached_iova;
94 
95 	cached_iova = to_iova(iovad->cached32_node);
96 	if (free == cached_iova ||
97 	    (free->pfn_hi < iovad->dma_32bit_pfn &&
98 	     free->pfn_lo >= cached_iova->pfn_lo)) {
99 		iovad->cached32_node = rb_next(&free->node);
100 		iovad->max32_alloc_size = iovad->dma_32bit_pfn;
101 	}
102 
103 	cached_iova = to_iova(iovad->cached_node);
104 	if (free->pfn_lo >= cached_iova->pfn_lo)
105 		iovad->cached_node = rb_next(&free->node);
106 }
107 
108 static struct rb_node *iova_find_limit(struct iova_domain *iovad, unsigned long limit_pfn)
109 {
110 	struct rb_node *node, *next;
111 	/*
112 	 * Ideally what we'd like to judge here is whether limit_pfn is close
113 	 * enough to the highest-allocated IOVA that starting the allocation
114 	 * walk from the anchor node will be quicker than this initial work to
115 	 * find an exact starting point (especially if that ends up being the
116 	 * anchor node anyway). This is an incredibly crude approximation which
117 	 * only really helps the most likely case, but is at least trivially easy.
118 	 */
119 	if (limit_pfn > iovad->dma_32bit_pfn)
120 		return &iovad->anchor.node;
121 
122 	node = iovad->rbroot.rb_node;
123 	while (to_iova(node)->pfn_hi < limit_pfn)
124 		node = node->rb_right;
125 
126 search_left:
127 	while (node->rb_left && to_iova(node->rb_left)->pfn_lo >= limit_pfn)
128 		node = node->rb_left;
129 
130 	if (!node->rb_left)
131 		return node;
132 
133 	next = node->rb_left;
134 	while (next->rb_right) {
135 		next = next->rb_right;
136 		if (to_iova(next)->pfn_lo >= limit_pfn) {
137 			node = next;
138 			goto search_left;
139 		}
140 	}
141 
142 	return node;
143 }
144 
145 /* Insert the iova into domain rbtree by holding writer lock */
146 static void
147 iova_insert_rbtree(struct rb_root *root, struct iova *iova,
148 		   struct rb_node *start)
149 {
150 	struct rb_node **new, *parent = NULL;
151 
152 	new = (start) ? &start : &(root->rb_node);
153 	/* Figure out where to put new node */
154 	while (*new) {
155 		struct iova *this = to_iova(*new);
156 
157 		parent = *new;
158 
159 		if (iova->pfn_lo < this->pfn_lo)
160 			new = &((*new)->rb_left);
161 		else if (iova->pfn_lo > this->pfn_lo)
162 			new = &((*new)->rb_right);
163 		else {
164 			WARN_ON(1); /* this should not happen */
165 			return;
166 		}
167 	}
168 	/* Add new node and rebalance tree. */
169 	rb_link_node(&iova->node, parent, new);
170 	rb_insert_color(&iova->node, root);
171 }
172 
173 static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
174 		unsigned long size, unsigned long limit_pfn,
175 			struct iova *new, bool size_aligned)
176 {
177 	struct rb_node *curr, *prev;
178 	struct iova *curr_iova;
179 	unsigned long flags;
180 	unsigned long new_pfn, retry_pfn;
181 	unsigned long align_mask = ~0UL;
182 	unsigned long high_pfn = limit_pfn, low_pfn = iovad->start_pfn;
183 
184 	if (size_aligned)
185 		align_mask <<= fls_long(size - 1);
186 
187 	/* Walk the tree backwards */
188 	spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
189 	if (limit_pfn <= iovad->dma_32bit_pfn &&
190 			size >= iovad->max32_alloc_size)
191 		goto iova32_full;
192 
193 	curr = __get_cached_rbnode(iovad, limit_pfn);
194 	curr_iova = to_iova(curr);
195 	retry_pfn = curr_iova->pfn_hi + 1;
196 
197 retry:
198 	do {
199 		high_pfn = min(high_pfn, curr_iova->pfn_lo);
200 		new_pfn = (high_pfn - size) & align_mask;
201 		prev = curr;
202 		curr = rb_prev(curr);
203 		curr_iova = to_iova(curr);
204 	} while (curr && new_pfn <= curr_iova->pfn_hi && new_pfn >= low_pfn);
205 
206 	if (high_pfn < size || new_pfn < low_pfn) {
207 		if (low_pfn == iovad->start_pfn && retry_pfn < limit_pfn) {
208 			high_pfn = limit_pfn;
209 			low_pfn = retry_pfn;
210 			curr = iova_find_limit(iovad, limit_pfn);
211 			curr_iova = to_iova(curr);
212 			goto retry;
213 		}
214 		iovad->max32_alloc_size = size;
215 		goto iova32_full;
216 	}
217 
218 	/* pfn_lo will point to size aligned address if size_aligned is set */
219 	new->pfn_lo = new_pfn;
220 	new->pfn_hi = new->pfn_lo + size - 1;
221 
222 	/* If we have 'prev', it's a valid place to start the insertion. */
223 	iova_insert_rbtree(&iovad->rbroot, new, prev);
224 	__cached_rbnode_insert_update(iovad, new);
225 
226 	spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
227 	return 0;
228 
229 iova32_full:
230 	spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
231 	return -ENOMEM;
232 }
233 
234 static struct kmem_cache *iova_cache;
235 static unsigned int iova_cache_users;
236 static DEFINE_MUTEX(iova_cache_mutex);
237 
238 static struct iova *alloc_iova_mem(void)
239 {
240 	return kmem_cache_zalloc(iova_cache, GFP_ATOMIC | __GFP_NOWARN);
241 }
242 
243 static void free_iova_mem(struct iova *iova)
244 {
245 	if (iova->pfn_lo != IOVA_ANCHOR)
246 		kmem_cache_free(iova_cache, iova);
247 }
248 
249 int iova_cache_get(void)
250 {
251 	mutex_lock(&iova_cache_mutex);
252 	if (!iova_cache_users) {
253 		int ret;
254 
255 		ret = cpuhp_setup_state_multi(CPUHP_IOMMU_IOVA_DEAD, "iommu/iova:dead", NULL,
256 					iova_cpuhp_dead);
257 		if (ret) {
258 			mutex_unlock(&iova_cache_mutex);
259 			pr_err("Couldn't register cpuhp handler\n");
260 			return ret;
261 		}
262 
263 		iova_cache = kmem_cache_create(
264 			"iommu_iova", sizeof(struct iova), 0,
265 			SLAB_HWCACHE_ALIGN, NULL);
266 		if (!iova_cache) {
267 			cpuhp_remove_multi_state(CPUHP_IOMMU_IOVA_DEAD);
268 			mutex_unlock(&iova_cache_mutex);
269 			pr_err("Couldn't create iova cache\n");
270 			return -ENOMEM;
271 		}
272 	}
273 
274 	iova_cache_users++;
275 	mutex_unlock(&iova_cache_mutex);
276 
277 	return 0;
278 }
279 EXPORT_SYMBOL_GPL(iova_cache_get);
280 
281 void iova_cache_put(void)
282 {
283 	mutex_lock(&iova_cache_mutex);
284 	if (WARN_ON(!iova_cache_users)) {
285 		mutex_unlock(&iova_cache_mutex);
286 		return;
287 	}
288 	iova_cache_users--;
289 	if (!iova_cache_users) {
290 		cpuhp_remove_multi_state(CPUHP_IOMMU_IOVA_DEAD);
291 		kmem_cache_destroy(iova_cache);
292 	}
293 	mutex_unlock(&iova_cache_mutex);
294 }
295 EXPORT_SYMBOL_GPL(iova_cache_put);
296 
297 /**
298  * alloc_iova - allocates an iova
299  * @iovad: - iova domain in question
300  * @size: - size of page frames to allocate
301  * @limit_pfn: - max limit address
302  * @size_aligned: - set if size_aligned address range is required
303  * This function allocates an iova in the range iovad->start_pfn to limit_pfn,
304  * searching top-down from limit_pfn to iovad->start_pfn. If the size_aligned
305  * flag is set then the allocated address iova->pfn_lo will be naturally
306  * aligned on roundup_power_of_two(size).
307  */
308 struct iova *
309 alloc_iova(struct iova_domain *iovad, unsigned long size,
310 	unsigned long limit_pfn,
311 	bool size_aligned)
312 {
313 	struct iova *new_iova;
314 	int ret;
315 
316 	new_iova = alloc_iova_mem();
317 	if (!new_iova)
318 		return NULL;
319 
320 	ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn + 1,
321 			new_iova, size_aligned);
322 
323 	if (ret) {
324 		free_iova_mem(new_iova);
325 		return NULL;
326 	}
327 
328 	return new_iova;
329 }
330 EXPORT_SYMBOL_GPL(alloc_iova);
331 
332 static struct iova *
333 private_find_iova(struct iova_domain *iovad, unsigned long pfn)
334 {
335 	struct rb_node *node = iovad->rbroot.rb_node;
336 
337 	assert_spin_locked(&iovad->iova_rbtree_lock);
338 
339 	while (node) {
340 		struct iova *iova = to_iova(node);
341 
342 		if (pfn < iova->pfn_lo)
343 			node = node->rb_left;
344 		else if (pfn > iova->pfn_hi)
345 			node = node->rb_right;
346 		else
347 			return iova;	/* pfn falls within iova's range */
348 	}
349 
350 	return NULL;
351 }
352 
353 static void remove_iova(struct iova_domain *iovad, struct iova *iova)
354 {
355 	assert_spin_locked(&iovad->iova_rbtree_lock);
356 	__cached_rbnode_delete_update(iovad, iova);
357 	rb_erase(&iova->node, &iovad->rbroot);
358 }
359 
360 /**
361  * find_iova - finds an iova for a given pfn
362  * @iovad: - iova domain in question.
363  * @pfn: - page frame number
364  * This function finds and returns an iova belonging to the
365  * given domain which matches the given pfn.
366  */
367 struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn)
368 {
369 	unsigned long flags;
370 	struct iova *iova;
371 
372 	/* Take the lock so that no other thread is manipulating the rbtree */
373 	spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
374 	iova = private_find_iova(iovad, pfn);
375 	spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
376 	return iova;
377 }
378 EXPORT_SYMBOL_GPL(find_iova);
379 
380 /**
381  * __free_iova - frees the given iova
382  * @iovad: iova domain in question.
383  * @iova: iova in question.
384  * Frees the given iova belonging to the giving domain
385  */
386 void
387 __free_iova(struct iova_domain *iovad, struct iova *iova)
388 {
389 	unsigned long flags;
390 
391 	spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
392 	remove_iova(iovad, iova);
393 	spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
394 	free_iova_mem(iova);
395 }
396 EXPORT_SYMBOL_GPL(__free_iova);
397 
398 /**
399  * free_iova - finds and frees the iova for a given pfn
400  * @iovad: - iova domain in question.
401  * @pfn: - pfn that is allocated previously
402  * This functions finds an iova for a given pfn and then
403  * frees the iova from that domain.
404  */
405 void
406 free_iova(struct iova_domain *iovad, unsigned long pfn)
407 {
408 	unsigned long flags;
409 	struct iova *iova;
410 
411 	spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
412 	iova = private_find_iova(iovad, pfn);
413 	if (!iova) {
414 		spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
415 		return;
416 	}
417 	remove_iova(iovad, iova);
418 	spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
419 	free_iova_mem(iova);
420 }
421 EXPORT_SYMBOL_GPL(free_iova);
422 
423 /**
424  * alloc_iova_fast - allocates an iova from rcache
425  * @iovad: - iova domain in question
426  * @size: - size of page frames to allocate
427  * @limit_pfn: - max limit address
428  * @flush_rcache: - set to flush rcache on regular allocation failure
429  * This function tries to satisfy an iova allocation from the rcache,
430  * and falls back to regular allocation on failure. If regular allocation
431  * fails too and the flush_rcache flag is set then the rcache will be flushed.
432 */
433 unsigned long
434 alloc_iova_fast(struct iova_domain *iovad, unsigned long size,
435 		unsigned long limit_pfn, bool flush_rcache)
436 {
437 	unsigned long iova_pfn;
438 	struct iova *new_iova;
439 
440 	/*
441 	 * Freeing non-power-of-two-sized allocations back into the IOVA caches
442 	 * will come back to bite us badly, so we have to waste a bit of space
443 	 * rounding up anything cacheable to make sure that can't happen. The
444 	 * order of the unadjusted size will still match upon freeing.
445 	 */
446 	if (size < (1 << (IOVA_RANGE_CACHE_MAX_SIZE - 1)))
447 		size = roundup_pow_of_two(size);
448 
449 	iova_pfn = iova_rcache_get(iovad, size, limit_pfn + 1);
450 	if (iova_pfn)
451 		return iova_pfn;
452 
453 retry:
454 	new_iova = alloc_iova(iovad, size, limit_pfn, true);
455 	if (!new_iova) {
456 		unsigned int cpu;
457 
458 		if (!flush_rcache)
459 			return 0;
460 
461 		/* Try replenishing IOVAs by flushing rcache. */
462 		flush_rcache = false;
463 		for_each_online_cpu(cpu)
464 			free_cpu_cached_iovas(cpu, iovad);
465 		free_global_cached_iovas(iovad);
466 		goto retry;
467 	}
468 
469 	return new_iova->pfn_lo;
470 }
471 EXPORT_SYMBOL_GPL(alloc_iova_fast);
472 
473 /**
474  * free_iova_fast - free iova pfn range into rcache
475  * @iovad: - iova domain in question.
476  * @pfn: - pfn that is allocated previously
477  * @size: - # of pages in range
478  * This functions frees an iova range by trying to put it into the rcache,
479  * falling back to regular iova deallocation via free_iova() if this fails.
480  */
481 void
482 free_iova_fast(struct iova_domain *iovad, unsigned long pfn, unsigned long size)
483 {
484 	if (iova_rcache_insert(iovad, pfn, size))
485 		return;
486 
487 	free_iova(iovad, pfn);
488 }
489 EXPORT_SYMBOL_GPL(free_iova_fast);
490 
491 /**
492  * put_iova_domain - destroys the iova domain
493  * @iovad: - iova domain in question.
494  * All the iova's in that domain are destroyed.
495  */
496 void put_iova_domain(struct iova_domain *iovad)
497 {
498 	struct iova *iova, *tmp;
499 
500 	cpuhp_state_remove_instance_nocalls(CPUHP_IOMMU_IOVA_DEAD,
501 					    &iovad->cpuhp_dead);
502 	free_iova_rcaches(iovad);
503 	rbtree_postorder_for_each_entry_safe(iova, tmp, &iovad->rbroot, node)
504 		free_iova_mem(iova);
505 }
506 EXPORT_SYMBOL_GPL(put_iova_domain);
507 
508 static int
509 __is_range_overlap(struct rb_node *node,
510 	unsigned long pfn_lo, unsigned long pfn_hi)
511 {
512 	struct iova *iova = to_iova(node);
513 
514 	if ((pfn_lo <= iova->pfn_hi) && (pfn_hi >= iova->pfn_lo))
515 		return 1;
516 	return 0;
517 }
518 
519 static inline struct iova *
520 alloc_and_init_iova(unsigned long pfn_lo, unsigned long pfn_hi)
521 {
522 	struct iova *iova;
523 
524 	iova = alloc_iova_mem();
525 	if (iova) {
526 		iova->pfn_lo = pfn_lo;
527 		iova->pfn_hi = pfn_hi;
528 	}
529 
530 	return iova;
531 }
532 
533 static struct iova *
534 __insert_new_range(struct iova_domain *iovad,
535 	unsigned long pfn_lo, unsigned long pfn_hi)
536 {
537 	struct iova *iova;
538 
539 	iova = alloc_and_init_iova(pfn_lo, pfn_hi);
540 	if (iova)
541 		iova_insert_rbtree(&iovad->rbroot, iova, NULL);
542 
543 	return iova;
544 }
545 
546 static void
547 __adjust_overlap_range(struct iova *iova,
548 	unsigned long *pfn_lo, unsigned long *pfn_hi)
549 {
550 	if (*pfn_lo < iova->pfn_lo)
551 		iova->pfn_lo = *pfn_lo;
552 	if (*pfn_hi > iova->pfn_hi)
553 		*pfn_lo = iova->pfn_hi + 1;
554 }
555 
556 /**
557  * reserve_iova - reserves an iova in the given range
558  * @iovad: - iova domain pointer
559  * @pfn_lo: - lower page frame address
560  * @pfn_hi:- higher pfn adderss
561  * This function allocates reserves the address range from pfn_lo to pfn_hi so
562  * that this address is not dished out as part of alloc_iova.
563  */
564 struct iova *
565 reserve_iova(struct iova_domain *iovad,
566 	unsigned long pfn_lo, unsigned long pfn_hi)
567 {
568 	struct rb_node *node;
569 	unsigned long flags;
570 	struct iova *iova;
571 	unsigned int overlap = 0;
572 
573 	/* Don't allow nonsensical pfns */
574 	if (WARN_ON((pfn_hi | pfn_lo) > (ULLONG_MAX >> iova_shift(iovad))))
575 		return NULL;
576 
577 	spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
578 	for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) {
579 		if (__is_range_overlap(node, pfn_lo, pfn_hi)) {
580 			iova = to_iova(node);
581 			__adjust_overlap_range(iova, &pfn_lo, &pfn_hi);
582 			if ((pfn_lo >= iova->pfn_lo) &&
583 				(pfn_hi <= iova->pfn_hi))
584 				goto finish;
585 			overlap = 1;
586 
587 		} else if (overlap)
588 				break;
589 	}
590 
591 	/* We are here either because this is the first reserver node
592 	 * or need to insert remaining non overlap addr range
593 	 */
594 	iova = __insert_new_range(iovad, pfn_lo, pfn_hi);
595 finish:
596 
597 	spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
598 	return iova;
599 }
600 EXPORT_SYMBOL_GPL(reserve_iova);
601 
602 /*
603  * Magazine caches for IOVA ranges.  For an introduction to magazines,
604  * see the USENIX 2001 paper "Magazines and Vmem: Extending the Slab
605  * Allocator to Many CPUs and Arbitrary Resources" by Bonwick and Adams.
606  * For simplicity, we use a static magazine size and don't implement the
607  * dynamic size tuning described in the paper.
608  */
609 
610 #define IOVA_MAG_SIZE 128
611 
612 struct iova_magazine {
613 	unsigned long size;
614 	unsigned long pfns[IOVA_MAG_SIZE];
615 };
616 
617 struct iova_cpu_rcache {
618 	spinlock_t lock;
619 	struct iova_magazine *loaded;
620 	struct iova_magazine *prev;
621 };
622 
623 static struct iova_magazine *iova_magazine_alloc(gfp_t flags)
624 {
625 	return kzalloc(sizeof(struct iova_magazine), flags);
626 }
627 
628 static void iova_magazine_free(struct iova_magazine *mag)
629 {
630 	kfree(mag);
631 }
632 
633 static void
634 iova_magazine_free_pfns(struct iova_magazine *mag, struct iova_domain *iovad)
635 {
636 	unsigned long flags;
637 	int i;
638 
639 	if (!mag)
640 		return;
641 
642 	spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
643 
644 	for (i = 0 ; i < mag->size; ++i) {
645 		struct iova *iova = private_find_iova(iovad, mag->pfns[i]);
646 
647 		if (WARN_ON(!iova))
648 			continue;
649 
650 		remove_iova(iovad, iova);
651 		free_iova_mem(iova);
652 	}
653 
654 	spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
655 
656 	mag->size = 0;
657 }
658 
659 static bool iova_magazine_full(struct iova_magazine *mag)
660 {
661 	return (mag && mag->size == IOVA_MAG_SIZE);
662 }
663 
664 static bool iova_magazine_empty(struct iova_magazine *mag)
665 {
666 	return (!mag || mag->size == 0);
667 }
668 
669 static unsigned long iova_magazine_pop(struct iova_magazine *mag,
670 				       unsigned long limit_pfn)
671 {
672 	int i;
673 	unsigned long pfn;
674 
675 	BUG_ON(iova_magazine_empty(mag));
676 
677 	/* Only fall back to the rbtree if we have no suitable pfns at all */
678 	for (i = mag->size - 1; mag->pfns[i] > limit_pfn; i--)
679 		if (i == 0)
680 			return 0;
681 
682 	/* Swap it to pop it */
683 	pfn = mag->pfns[i];
684 	mag->pfns[i] = mag->pfns[--mag->size];
685 
686 	return pfn;
687 }
688 
689 static void iova_magazine_push(struct iova_magazine *mag, unsigned long pfn)
690 {
691 	BUG_ON(iova_magazine_full(mag));
692 
693 	mag->pfns[mag->size++] = pfn;
694 }
695 
696 static void init_iova_rcaches(struct iova_domain *iovad)
697 {
698 	struct iova_cpu_rcache *cpu_rcache;
699 	struct iova_rcache *rcache;
700 	unsigned int cpu;
701 	int i;
702 
703 	for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
704 		rcache = &iovad->rcaches[i];
705 		spin_lock_init(&rcache->lock);
706 		rcache->depot_size = 0;
707 		rcache->cpu_rcaches = __alloc_percpu(sizeof(*cpu_rcache), cache_line_size());
708 		if (WARN_ON(!rcache->cpu_rcaches))
709 			continue;
710 		for_each_possible_cpu(cpu) {
711 			cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu);
712 			spin_lock_init(&cpu_rcache->lock);
713 			cpu_rcache->loaded = iova_magazine_alloc(GFP_KERNEL);
714 			cpu_rcache->prev = iova_magazine_alloc(GFP_KERNEL);
715 		}
716 	}
717 }
718 
719 /*
720  * Try inserting IOVA range starting with 'iova_pfn' into 'rcache', and
721  * return true on success.  Can fail if rcache is full and we can't free
722  * space, and free_iova() (our only caller) will then return the IOVA
723  * range to the rbtree instead.
724  */
725 static bool __iova_rcache_insert(struct iova_domain *iovad,
726 				 struct iova_rcache *rcache,
727 				 unsigned long iova_pfn)
728 {
729 	struct iova_magazine *mag_to_free = NULL;
730 	struct iova_cpu_rcache *cpu_rcache;
731 	bool can_insert = false;
732 	unsigned long flags;
733 
734 	cpu_rcache = raw_cpu_ptr(rcache->cpu_rcaches);
735 	spin_lock_irqsave(&cpu_rcache->lock, flags);
736 
737 	if (!iova_magazine_full(cpu_rcache->loaded)) {
738 		can_insert = true;
739 	} else if (!iova_magazine_full(cpu_rcache->prev)) {
740 		swap(cpu_rcache->prev, cpu_rcache->loaded);
741 		can_insert = true;
742 	} else {
743 		struct iova_magazine *new_mag = iova_magazine_alloc(GFP_ATOMIC);
744 
745 		if (new_mag) {
746 			spin_lock(&rcache->lock);
747 			if (rcache->depot_size < MAX_GLOBAL_MAGS) {
748 				rcache->depot[rcache->depot_size++] =
749 						cpu_rcache->loaded;
750 			} else {
751 				mag_to_free = cpu_rcache->loaded;
752 			}
753 			spin_unlock(&rcache->lock);
754 
755 			cpu_rcache->loaded = new_mag;
756 			can_insert = true;
757 		}
758 	}
759 
760 	if (can_insert)
761 		iova_magazine_push(cpu_rcache->loaded, iova_pfn);
762 
763 	spin_unlock_irqrestore(&cpu_rcache->lock, flags);
764 
765 	if (mag_to_free) {
766 		iova_magazine_free_pfns(mag_to_free, iovad);
767 		iova_magazine_free(mag_to_free);
768 	}
769 
770 	return can_insert;
771 }
772 
773 static bool iova_rcache_insert(struct iova_domain *iovad, unsigned long pfn,
774 			       unsigned long size)
775 {
776 	unsigned int log_size = order_base_2(size);
777 
778 	if (log_size >= IOVA_RANGE_CACHE_MAX_SIZE)
779 		return false;
780 
781 	return __iova_rcache_insert(iovad, &iovad->rcaches[log_size], pfn);
782 }
783 
784 /*
785  * Caller wants to allocate a new IOVA range from 'rcache'.  If we can
786  * satisfy the request, return a matching non-NULL range and remove
787  * it from the 'rcache'.
788  */
789 static unsigned long __iova_rcache_get(struct iova_rcache *rcache,
790 				       unsigned long limit_pfn)
791 {
792 	struct iova_cpu_rcache *cpu_rcache;
793 	unsigned long iova_pfn = 0;
794 	bool has_pfn = false;
795 	unsigned long flags;
796 
797 	cpu_rcache = raw_cpu_ptr(rcache->cpu_rcaches);
798 	spin_lock_irqsave(&cpu_rcache->lock, flags);
799 
800 	if (!iova_magazine_empty(cpu_rcache->loaded)) {
801 		has_pfn = true;
802 	} else if (!iova_magazine_empty(cpu_rcache->prev)) {
803 		swap(cpu_rcache->prev, cpu_rcache->loaded);
804 		has_pfn = true;
805 	} else {
806 		spin_lock(&rcache->lock);
807 		if (rcache->depot_size > 0) {
808 			iova_magazine_free(cpu_rcache->loaded);
809 			cpu_rcache->loaded = rcache->depot[--rcache->depot_size];
810 			has_pfn = true;
811 		}
812 		spin_unlock(&rcache->lock);
813 	}
814 
815 	if (has_pfn)
816 		iova_pfn = iova_magazine_pop(cpu_rcache->loaded, limit_pfn);
817 
818 	spin_unlock_irqrestore(&cpu_rcache->lock, flags);
819 
820 	return iova_pfn;
821 }
822 
823 /*
824  * Try to satisfy IOVA allocation range from rcache.  Fail if requested
825  * size is too big or the DMA limit we are given isn't satisfied by the
826  * top element in the magazine.
827  */
828 static unsigned long iova_rcache_get(struct iova_domain *iovad,
829 				     unsigned long size,
830 				     unsigned long limit_pfn)
831 {
832 	unsigned int log_size = order_base_2(size);
833 
834 	if (log_size >= IOVA_RANGE_CACHE_MAX_SIZE)
835 		return 0;
836 
837 	return __iova_rcache_get(&iovad->rcaches[log_size], limit_pfn - size);
838 }
839 
840 /*
841  * free rcache data structures.
842  */
843 static void free_iova_rcaches(struct iova_domain *iovad)
844 {
845 	struct iova_rcache *rcache;
846 	struct iova_cpu_rcache *cpu_rcache;
847 	unsigned int cpu;
848 	int i, j;
849 
850 	for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
851 		rcache = &iovad->rcaches[i];
852 		for_each_possible_cpu(cpu) {
853 			cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu);
854 			iova_magazine_free(cpu_rcache->loaded);
855 			iova_magazine_free(cpu_rcache->prev);
856 		}
857 		free_percpu(rcache->cpu_rcaches);
858 		for (j = 0; j < rcache->depot_size; ++j)
859 			iova_magazine_free(rcache->depot[j]);
860 	}
861 }
862 
863 /*
864  * free all the IOVA ranges cached by a cpu (used when cpu is unplugged)
865  */
866 static void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad)
867 {
868 	struct iova_cpu_rcache *cpu_rcache;
869 	struct iova_rcache *rcache;
870 	unsigned long flags;
871 	int i;
872 
873 	for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
874 		rcache = &iovad->rcaches[i];
875 		cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu);
876 		spin_lock_irqsave(&cpu_rcache->lock, flags);
877 		iova_magazine_free_pfns(cpu_rcache->loaded, iovad);
878 		iova_magazine_free_pfns(cpu_rcache->prev, iovad);
879 		spin_unlock_irqrestore(&cpu_rcache->lock, flags);
880 	}
881 }
882 
883 /*
884  * free all the IOVA ranges of global cache
885  */
886 static void free_global_cached_iovas(struct iova_domain *iovad)
887 {
888 	struct iova_rcache *rcache;
889 	unsigned long flags;
890 	int i, j;
891 
892 	for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
893 		rcache = &iovad->rcaches[i];
894 		spin_lock_irqsave(&rcache->lock, flags);
895 		for (j = 0; j < rcache->depot_size; ++j) {
896 			iova_magazine_free_pfns(rcache->depot[j], iovad);
897 			iova_magazine_free(rcache->depot[j]);
898 		}
899 		rcache->depot_size = 0;
900 		spin_unlock_irqrestore(&rcache->lock, flags);
901 	}
902 }
903 MODULE_AUTHOR("Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>");
904 MODULE_LICENSE("GPL");
905