Lines Matching full:iommu
3 * IOMMU mmap management and range allocation functions.
4 * Based almost entirely upon the powerpc iommu allocator.
10 #include <linux/iommu-helper.h>
13 #include <asm/iommu-common.h>
19 static inline bool need_flush(struct iommu_map_table *iommu) in need_flush() argument
21 return ((iommu->flags & IOMMU_NEED_FLUSH) != 0); in need_flush()
24 static inline void set_flush(struct iommu_map_table *iommu) in set_flush() argument
26 iommu->flags |= IOMMU_NEED_FLUSH; in set_flush()
29 static inline void clear_flush(struct iommu_map_table *iommu) in clear_flush() argument
31 iommu->flags &= ~IOMMU_NEED_FLUSH; in clear_flush()
52 void iommu_tbl_pool_init(struct iommu_map_table *iommu, in iommu_tbl_pool_init() argument
60 struct iommu_pool *p = &(iommu->large_pool); in iommu_tbl_pool_init()
64 iommu->nr_pools = IOMMU_NR_POOLS; in iommu_tbl_pool_init()
66 iommu->nr_pools = npools; in iommu_tbl_pool_init()
69 iommu->table_shift = table_shift; in iommu_tbl_pool_init()
70 iommu->lazy_flush = lazy_flush; in iommu_tbl_pool_init()
73 iommu->flags |= IOMMU_NO_SPAN_BOUND; in iommu_tbl_pool_init()
75 iommu->flags |= IOMMU_HAS_LARGE_POOL; in iommu_tbl_pool_init()
78 iommu->poolsize = num_entries/iommu->nr_pools; in iommu_tbl_pool_init()
80 iommu->poolsize = (num_entries * 3 / 4)/iommu->nr_pools; in iommu_tbl_pool_init()
81 for (i = 0; i < iommu->nr_pools; i++) { in iommu_tbl_pool_init()
82 spin_lock_init(&(iommu->pools[i].lock)); in iommu_tbl_pool_init()
83 iommu->pools[i].start = start; in iommu_tbl_pool_init()
84 iommu->pools[i].hint = start; in iommu_tbl_pool_init()
85 start += iommu->poolsize; /* start for next pool */ in iommu_tbl_pool_init()
86 iommu->pools[i].end = start - 1; in iommu_tbl_pool_init()
98 struct iommu_map_table *iommu, in iommu_tbl_range_alloc() argument
109 unsigned int npools = iommu->nr_pools; in iommu_tbl_range_alloc()
111 bool large_pool = ((iommu->flags & IOMMU_HAS_LARGE_POOL) != 0); in iommu_tbl_range_alloc()
126 pool = &(iommu->large_pool); in iommu_tbl_range_alloc()
131 pool = &(iommu->pools[pool_nr]); in iommu_tbl_range_alloc()
152 shift = iommu->table_map_base >> iommu->table_shift; in iommu_tbl_range_alloc()
161 pool = &(iommu->pools[0]); in iommu_tbl_range_alloc()
174 if ((iommu->flags & IOMMU_NO_SPAN_BOUND) != 0) { in iommu_tbl_range_alloc()
176 boundary_size = iommu->poolsize * iommu->nr_pools; in iommu_tbl_range_alloc()
179 iommu->table_shift); in iommu_tbl_range_alloc()
181 n = iommu_area_alloc(iommu->map, limit, start, npages, shift, in iommu_tbl_range_alloc()
187 set_flush(iommu); in iommu_tbl_range_alloc()
190 } else if (!largealloc && pass <= iommu->nr_pools) { in iommu_tbl_range_alloc()
192 pool_nr = (pool_nr + 1) & (iommu->nr_pools - 1); in iommu_tbl_range_alloc()
193 pool = &(iommu->pools[pool_nr]); in iommu_tbl_range_alloc()
196 set_flush(iommu); in iommu_tbl_range_alloc()
205 if (iommu->lazy_flush && in iommu_tbl_range_alloc()
206 (n < pool->hint || need_flush(iommu))) { in iommu_tbl_range_alloc()
207 clear_flush(iommu); in iommu_tbl_range_alloc()
208 iommu->lazy_flush(iommu); in iommu_tbl_range_alloc()
242 /* Caller supplies the index of the entry into the iommu map table
246 void iommu_tbl_range_free(struct iommu_map_table *iommu, u64 dma_addr, in iommu_tbl_range_free() argument
251 unsigned long shift = iommu->table_shift; in iommu_tbl_range_free()
254 entry = (dma_addr - iommu->table_map_base) >> shift; in iommu_tbl_range_free()
255 pool = get_pool(iommu, entry); in iommu_tbl_range_free()
258 bitmap_clear(iommu->map, entry, npages); in iommu_tbl_range_free()