Lines Matching +full:pool +full:- +full:long

1 // SPDX-License-Identifier: GPL-2.0
10 #include <linux/iommu-helper.h>
11 #include <linux/dma-mapping.h>
13 #include <asm/iommu-common.h>
15 static unsigned long iommu_large_alloc = 15;
21 return ((iommu->flags & IOMMU_NEED_FLUSH) != 0); in need_flush()
26 iommu->flags |= IOMMU_NEED_FLUSH; in set_flush()
31 iommu->flags &= ~IOMMU_NEED_FLUSH; in clear_flush()
49 * the top 1/4 of the table will be set aside for pool allocations
53 unsigned long num_entries, in iommu_tbl_pool_init()
60 struct iommu_pool *p = &(iommu->large_pool); in iommu_tbl_pool_init()
64 iommu->nr_pools = IOMMU_NR_POOLS; in iommu_tbl_pool_init()
66 iommu->nr_pools = npools; in iommu_tbl_pool_init()
69 iommu->table_shift = table_shift; in iommu_tbl_pool_init()
70 iommu->lazy_flush = lazy_flush; in iommu_tbl_pool_init()
73 iommu->flags |= IOMMU_NO_SPAN_BOUND; in iommu_tbl_pool_init()
75 iommu->flags |= IOMMU_HAS_LARGE_POOL; in iommu_tbl_pool_init()
78 iommu->poolsize = num_entries/iommu->nr_pools; in iommu_tbl_pool_init()
80 iommu->poolsize = (num_entries * 3 / 4)/iommu->nr_pools; in iommu_tbl_pool_init()
81 for (i = 0; i < iommu->nr_pools; i++) { in iommu_tbl_pool_init()
82 spin_lock_init(&(iommu->pools[i].lock)); in iommu_tbl_pool_init()
83 iommu->pools[i].start = start; in iommu_tbl_pool_init()
84 iommu->pools[i].hint = start; in iommu_tbl_pool_init()
85 start += iommu->poolsize; /* start for next pool */ in iommu_tbl_pool_init()
86 iommu->pools[i].end = start - 1; in iommu_tbl_pool_init()
91 spin_lock_init(&(p->lock)); in iommu_tbl_pool_init()
92 p->start = start; in iommu_tbl_pool_init()
93 p->hint = p->start; in iommu_tbl_pool_init()
94 p->end = num_entries; in iommu_tbl_pool_init()
97 unsigned long iommu_tbl_range_alloc(struct device *dev, in iommu_tbl_range_alloc()
99 unsigned long npages, in iommu_tbl_range_alloc()
100 unsigned long *handle, in iommu_tbl_range_alloc()
101 unsigned long mask, in iommu_tbl_range_alloc()
105 unsigned long n, end, start, limit, boundary_size; in iommu_tbl_range_alloc()
106 struct iommu_pool *pool; in iommu_tbl_range_alloc() local
109 unsigned int npools = iommu->nr_pools; in iommu_tbl_range_alloc()
110 unsigned long flags; in iommu_tbl_range_alloc()
111 bool large_pool = ((iommu->flags & IOMMU_HAS_LARGE_POOL) != 0); in iommu_tbl_range_alloc()
113 unsigned long shift; in iommu_tbl_range_alloc()
114 unsigned long align_mask = 0; in iommu_tbl_range_alloc()
117 align_mask = ~0ul >> (BITS_PER_LONG - align_order); in iommu_tbl_range_alloc()
126 pool = &(iommu->large_pool); in iommu_tbl_range_alloc()
130 pool_nr = pool_hash & (npools - 1); in iommu_tbl_range_alloc()
131 pool = &(iommu->pools[pool_nr]); in iommu_tbl_range_alloc()
133 spin_lock_irqsave(&pool->lock, flags); in iommu_tbl_range_alloc()
137 (*handle >= pool->start) && (*handle < pool->end)) in iommu_tbl_range_alloc()
140 start = pool->hint; in iommu_tbl_range_alloc()
142 limit = pool->end; in iommu_tbl_range_alloc()
151 start = pool->start; in iommu_tbl_range_alloc()
152 shift = iommu->table_map_base >> iommu->table_shift; in iommu_tbl_range_alloc()
154 limit = mask - shift + 1; in iommu_tbl_range_alloc()
157 * but on second pass, start at 0 in pool 0. in iommu_tbl_range_alloc()
160 spin_unlock(&(pool->lock)); in iommu_tbl_range_alloc()
161 pool = &(iommu->pools[0]); in iommu_tbl_range_alloc()
162 spin_lock(&(pool->lock)); in iommu_tbl_range_alloc()
163 start = pool->start; in iommu_tbl_range_alloc()
174 if ((iommu->flags & IOMMU_NO_SPAN_BOUND) != 0) { in iommu_tbl_range_alloc()
176 boundary_size = iommu->poolsize * iommu->nr_pools; in iommu_tbl_range_alloc()
179 iommu->table_shift); in iommu_tbl_range_alloc()
181 n = iommu_area_alloc(iommu->map, limit, start, npages, shift, in iommu_tbl_range_alloc()
183 if (n == -1) { in iommu_tbl_range_alloc()
186 pool->hint = pool->start; in iommu_tbl_range_alloc()
190 } else if (!largealloc && pass <= iommu->nr_pools) { in iommu_tbl_range_alloc()
191 spin_unlock(&(pool->lock)); in iommu_tbl_range_alloc()
192 pool_nr = (pool_nr + 1) & (iommu->nr_pools - 1); in iommu_tbl_range_alloc()
193 pool = &(iommu->pools[pool_nr]); in iommu_tbl_range_alloc()
194 spin_lock(&(pool->lock)); in iommu_tbl_range_alloc()
195 pool->hint = pool->start; in iommu_tbl_range_alloc()
205 if (iommu->lazy_flush && in iommu_tbl_range_alloc()
206 (n < pool->hint || need_flush(iommu))) { in iommu_tbl_range_alloc()
208 iommu->lazy_flush(iommu); in iommu_tbl_range_alloc()
212 pool->hint = end; in iommu_tbl_range_alloc()
218 spin_unlock_irqrestore(&(pool->lock), flags); in iommu_tbl_range_alloc()
224 unsigned long entry) in get_pool()
227 unsigned long largepool_start = tbl->large_pool.start; in get_pool()
228 bool large_pool = ((tbl->flags & IOMMU_HAS_LARGE_POOL) != 0); in get_pool()
230 /* The large pool is the last pool at the top of the table */ in get_pool()
232 p = &tbl->large_pool; in get_pool()
234 unsigned int pool_nr = entry / tbl->poolsize; in get_pool()
236 BUG_ON(pool_nr >= tbl->nr_pools); in get_pool()
237 p = &tbl->pools[pool_nr]; in get_pool()
244 * default addr->entry mapping below.
247 unsigned long npages, unsigned long entry) in iommu_tbl_range_free()
249 struct iommu_pool *pool; in iommu_tbl_range_free() local
250 unsigned long flags; in iommu_tbl_range_free()
251 unsigned long shift = iommu->table_shift; in iommu_tbl_range_free()
253 if (entry == IOMMU_ERROR_CODE) /* use default addr->entry mapping */ in iommu_tbl_range_free()
254 entry = (dma_addr - iommu->table_map_base) >> shift; in iommu_tbl_range_free()
255 pool = get_pool(iommu, entry); in iommu_tbl_range_free()
257 spin_lock_irqsave(&(pool->lock), flags); in iommu_tbl_range_free()
258 bitmap_clear(iommu->map, entry, npages); in iommu_tbl_range_free()
259 spin_unlock_irqrestore(&(pool->lock), flags); in iommu_tbl_range_free()