Lines Matching defs:pool

148  * Adjust the default number of areas in a memory pool.
149 * The default size of the memory pool may also change to meet minimum area
168 * limit_nareas() - get the maximum number of areas for a given memory pool size
170 * @nslots: Total number of slots in the memory pool.
173 * a memory pool of the given size.
302 * add_mem_pool() - add a memory pool to the allocator
304 * @pool: Memory pool to be added.
306 static void add_mem_pool(struct io_tlb_mem *mem, struct io_tlb_pool *pool)
310 list_add_rcu(&pool->node, &mem->pools);
311 mem->nslabs += pool->nslabs;
314 mem->nslabs = pool->nslabs;
538 pr_info("tearing down default memory pool\n");
605 * @dev: Device for which a memory pool is allocated.
669 * swiotlb_alloc_pool() - allocate a new IO TLB memory pool
670 * @dev: Device for which a memory pool is allocated.
677 * Allocate and initialize a new IO TLB memory pool. The actual number of
681 * Return: New memory pool, or %NULL on allocation failure.
687 struct io_tlb_pool *pool;
698 pool_size = sizeof(*pool) + array_size(sizeof(*pool->areas), nareas);
699 pool = kzalloc(pool_size, gfp);
700 if (!pool)
702 pool->areas = (void *)pool + sizeof(*pool);
713 slot_order = get_order(array_size(sizeof(*pool->slots), nslabs));
714 pool->slots = (struct io_tlb_slot *)
716 if (!pool->slots)
719 swiotlb_init_io_tlb_pool(pool, page_to_phys(tlb), nslabs, true, nareas);
720 return pool;
725 kfree(pool);
731 * swiotlb_dyn_alloc() - dynamic memory pool allocation worker
738 struct io_tlb_pool *pool;
740 pool = swiotlb_alloc_pool(NULL, IO_TLB_MIN_SLABS, default_nslabs,
742 if (!pool) {
743 pr_warn_ratelimited("Failed to allocate new pool");
747 add_mem_pool(mem, pool);
751 * swiotlb_dyn_free() - RCU callback to free a memory pool
756 struct io_tlb_pool *pool = container_of(rcu, struct io_tlb_pool, rcu);
757 size_t slots_size = array_size(sizeof(*pool->slots), pool->nslabs);
758 size_t tlb_size = pool->end - pool->start;
760 free_pages((unsigned long)pool->slots, get_order(slots_size));
761 swiotlb_free_tlb(pool->vaddr, tlb_size);
762 kfree(pool);
766 * __swiotlb_find_pool() - find the IO TLB pool for a physical address
770 * Find the IO TLB memory pool descriptor which contains the given physical
775 * Return: Memory pool which contains @paddr, or %NULL if none.
780 struct io_tlb_pool *pool;
783 list_for_each_entry_rcu(pool, &mem->pools, node) {
784 if (paddr >= pool->start && paddr < pool->end)
788 list_for_each_entry_rcu(pool, &dev->dma_io_tlb_pools, node) {
789 if (paddr >= pool->start && paddr < pool->end)
792 pool = NULL;
795 return pool;
799 * swiotlb_del_pool() - remove an IO TLB pool from a device
801 * @pool: Memory pool to be removed.
803 static void swiotlb_del_pool(struct device *dev, struct io_tlb_pool *pool)
808 list_del_rcu(&pool->node);
811 call_rcu(&pool->rcu, swiotlb_dyn_free);
1001 * swiotlb_search_pool_area() - search one memory area in one pool
1003 * @pool: Memory pool to be searched.
1016 static int swiotlb_search_pool_area(struct device *dev, struct io_tlb_pool *pool,
1020 struct io_tlb_area *area = pool->areas + area_index;
1023 phys_to_dma_unencrypted(dev, pool->start) & boundary_mask;
1034 BUG_ON(area_index >= pool->nareas);
1062 if (unlikely(nslots > pool->area_nslabs - area->used))
1065 slot_base = area_index * pool->area_nslabs;
1068 for (slots_checked = 0; slots_checked < pool->area_nslabs; ) {
1077 index = wrap_area_index(pool, index + 1);
1085 if (pool->slots[slot_index].list >= nslots)
1088 index = wrap_area_index(pool, index + stride);
1103 pool->slots[i].list = 0;
1104 pool->slots[i].alloc_size = alloc_size - (offset +
1109 pool->slots[i].list; i--)
1110 pool->slots[i].list = ++count;
1115 area->index = wrap_area_index(pool, index + nslots);
1134 * @retpool: Used memory pool, updated on return.
1146 struct io_tlb_pool *pool;
1151 list_for_each_entry_rcu(pool, &mem->pools, node) {
1152 if (cpu_offset >= pool->nareas)
1154 area_index = (start_cpu + cpu_offset) & (pool->nareas - 1);
1155 index = swiotlb_search_pool_area(dev, pool, area_index,
1159 *retpool = pool;
1174 * @retpool: Used memory pool, updated on return.
1186 struct io_tlb_pool *pool;
1199 alloc_align_mask, &pool);
1211 pool = swiotlb_alloc_pool(dev, nslabs, nslabs, 1, phys_limit,
1213 if (!pool)
1216 index = swiotlb_search_pool_area(dev, pool, 0, orig_addr,
1219 swiotlb_dyn_free(&pool->rcu);
1223 pool->transient = true;
1225 list_add_rcu(&pool->node, &dev->dma_io_tlb_pools);
1227 inc_transient_used(mem, pool->nslabs);
1251 *retpool = pool;
1261 struct io_tlb_pool *pool;
1265 *retpool = pool = &dev->dma_io_tlb_mem->defpool;
1266 i = start = raw_smp_processor_id() & (pool->nareas - 1);
1268 index = swiotlb_search_pool_area(dev, pool, i, orig_addr,
1272 if (++i >= pool->nareas)
1299 * mem_pool_used() - get number of used slots in a memory pool
1300 * @pool: Software IO TLB memory pool.
1306 static unsigned long mem_pool_used(struct io_tlb_pool *pool)
1311 for (i = 0; i < pool->nareas; i++)
1312 used += pool->areas[i].used;
1328 struct io_tlb_pool *pool;
1332 list_for_each_entry_rcu(pool, &mem->pools, node)
1333 used += mem_pool_used(pool);
1375 struct io_tlb_pool *pool;
1392 * The default swiotlb memory pool is allocated with PAGE_SIZE
1403 index = swiotlb_find_slots(dev, orig_addr, size, alloc_align_mask, &pool);
1426 pool->slots[index].pad_slots = pad_slots;
1428 pool->slots[index + i].orig_addr = slot_addr(orig_addr, i);
1429 tlb_addr = slot_addr(pool->start, index) + offset;
1439 swiotlb_bounce(dev, tlb_addr, mapping_size, DMA_TO_DEVICE, pool);
1462 * with slots below and above the pool being returned.
1500 * swiotlb_del_transient() - delete a transient memory pool
1503 * @pool: Pointer to the transient memory pool to be checked and deleted.
1505 * Check whether the address belongs to a transient SWIOTLB memory pool.
1506 * If yes, then delete the pool.
1508 * Return: %true if @tlb_addr belonged to a transient pool that was released.
1511 struct io_tlb_pool *pool)
1513 if (!pool->transient)
1516 dec_used(dev->dma_io_tlb_mem, pool->nslabs);
1517 swiotlb_del_pool(dev, pool);
1518 dec_transient_used(dev->dma_io_tlb_mem, pool->nslabs);
1525 phys_addr_t tlb_addr, struct io_tlb_pool *pool)
1537 unsigned long attrs, struct io_tlb_pool *pool)
1545 DMA_FROM_DEVICE, pool);
1547 if (swiotlb_del_transient(dev, tlb_addr, pool))
1549 swiotlb_release_slots(dev, tlb_addr, pool);
1554 struct io_tlb_pool *pool)
1557 swiotlb_bounce(dev, tlb_addr, size, DMA_TO_DEVICE, pool);
1564 struct io_tlb_pool *pool)
1567 swiotlb_bounce(dev, tlb_addr, size, DMA_FROM_DEVICE, pool);
1639 * Get the lowest physical address used by the default software IO TLB pool.
1652 * Get the highest physical address used by the default software IO TLB pool.
1754 struct io_tlb_pool *pool;
1763 index = swiotlb_find_slots(dev, 0, size, align, &pool);
1767 tlb_addr = slot_addr(pool->start, index);
1771 swiotlb_release_slots(dev, tlb_addr, pool);
1781 struct io_tlb_pool *pool;
1783 pool = swiotlb_find_pool(dev, tlb_addr);
1784 if (!pool)
1787 swiotlb_release_slots(dev, tlb_addr, pool);
1802 dev_err(dev, "Restricted DMA pool must be accessible within the linear mapping.");
1807 * Since multiple devices can share the same pool, the private data,
1812 struct io_tlb_pool *pool;
1817 pool = &mem->defpool;
1819 pool->slots = kcalloc(nslabs, sizeof(*pool->slots), GFP_KERNEL);
1820 if (!pool->slots) {
1825 pool->areas = kcalloc(nareas, sizeof(*pool->areas),
1827 if (!pool->areas) {
1828 kfree(pool->slots);
1835 swiotlb_init_io_tlb_pool(pool, rmem->base, nslabs,
1843 add_mem_pool(mem, pool);
1877 pr_info("Reserved memory: created restricted DMA pool at %pa, size %ld MiB\n",
1882 RESERVEDMEM_OF_DECLARE(dma, "restricted-dma-pool", rmem_swiotlb_setup);