Lines Matching +full:pool +full:- +full:long

1 // SPDX-License-Identifier: GPL-2.0-only
9 * Copyright (C) 2000, 2003 Hewlett-Packard Co
10 * David Mosberger-Tang <davidm@hpl.hp.com>
12 * 03/05/07 davidm Switch from PCI-DMA to generic device DMA API.
14 * unnecessary i-cache flushing.
27 #include <linux/dma-direct.h>
28 #include <linux/dma-map-ops.h>
33 #include <linux/iommu-helper.h>
55 #define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
67 * struct io_tlb_slot - IO TLB slot descriptor
73 * allocated non-padding slot.
102 static unsigned long default_nslabs = IO_TLB_DEFAULT_SIZE >> IO_TLB_SHIFT;
103 static unsigned long default_nareas;
106 * struct io_tlb_area - IO TLB memory area descriptor
116 unsigned long used;
145 * swiotlb_adjust_nareas() - adjust the number of areas and slots
148 * Adjust the default number of areas in a memory pool.
149 * The default size of the memory pool may also change to meet minimum area
168 * limit_nareas() - get the maximum number of areas for a given memory pool size
170 * @nslots: Total number of slots in the memory pool.
173 * a memory pool of the given size.
177 static unsigned int limit_nareas(unsigned int nareas, unsigned long nslots)
207 unsigned long swiotlb_size_or_default(void)
212 void __init swiotlb_adjust_size(unsigned long size)
233 if (!mem->nslabs) {
238 pr_info("mapped [mem %pa-%pa] (%luMB)\n", &mem->start, &mem->end,
239 (mem->nslabs << IO_TLB_SHIFT) >> 20);
242 static inline unsigned long io_tlb_offset(unsigned long val)
244 return val & (IO_TLB_SEGSIZE - 1);
247 static inline unsigned long nr_slots(u64 val)
261 unsigned long bytes;
263 if (!mem->nslabs || mem->late_alloc)
265 bytes = PAGE_ALIGN(mem->nslabs << IO_TLB_SHIFT);
266 set_memory_decrypted((unsigned long)mem->vaddr, bytes >> PAGE_SHIFT);
270 unsigned long nslabs, bool late_alloc, unsigned int nareas)
273 unsigned long bytes = nslabs << IO_TLB_SHIFT, i;
275 mem->nslabs = nslabs;
276 mem->start = start;
277 mem->end = mem->start + bytes;
278 mem->late_alloc = late_alloc;
279 mem->nareas = nareas;
280 mem->area_nslabs = nslabs / mem->nareas;
282 for (i = 0; i < mem->nareas; i++) {
283 spin_lock_init(&mem->areas[i].lock);
284 mem->areas[i].index = 0;
285 mem->areas[i].used = 0;
288 for (i = 0; i < mem->nslabs; i++) {
289 mem->slots[i].list = min(IO_TLB_SEGSIZE - io_tlb_offset(i),
290 mem->nslabs - i);
291 mem->slots[i].orig_addr = INVALID_PHYS_ADDR;
292 mem->slots[i].alloc_size = 0;
293 mem->slots[i].pad_slots = 0;
297 mem->vaddr = vaddr;
302 * add_mem_pool() - add a memory pool to the allocator
304 * @pool: Memory pool to be added.
306 static void add_mem_pool(struct io_tlb_mem *mem, struct io_tlb_pool *pool)
309 spin_lock(&mem->lock);
310 list_add_rcu(&pool->node, &mem->pools);
311 mem->nslabs += pool->nslabs;
312 spin_unlock(&mem->lock);
314 mem->nslabs = pool->nslabs;
318 static void __init *swiotlb_memblock_alloc(unsigned long nslabs,
320 int (*remap)(void *tlb, unsigned long nslabs))
355 int (*remap)(void *tlb, unsigned long nslabs))
358 unsigned long nslabs;
375 io_tlb_default_mem.phys_limit = virt_to_phys(high_memory - 1);
393 pr_info("SWIOTLB bounce buffer size adjusted %lu -> %lu slabs",
398 alloc_size = PAGE_ALIGN(array_size(sizeof(*mem->slots), nslabs));
399 mem->slots = memblock_alloc(alloc_size, PAGE_SIZE);
400 if (!mem->slots) {
406 mem->areas = memblock_alloc(array_size(sizeof(struct io_tlb_area),
408 if (!mem->areas) {
409 pr_warn("%s: Failed to allocate mem->areas.\n", __func__);
431 int (*remap)(void *tlb, unsigned long nslabs))
434 unsigned long nslabs = ALIGN(size >> IO_TLB_SHIFT, IO_TLB_SEGSIZE);
457 io_tlb_default_mem.phys_limit = virt_to_phys(high_memory - 1);
472 order--;
478 return -ENOMEM;
483 free_pages((unsigned long)vstart, order);
498 area_order = get_order(array_size(sizeof(*mem->areas), nareas));
499 mem->areas = (struct io_tlb_area *)
501 if (!mem->areas)
504 mem->slots = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
505 get_order(array_size(sizeof(*mem->slots), nslabs)));
506 if (!mem->slots)
509 set_memory_decrypted((unsigned long)vstart,
519 free_pages((unsigned long)mem->areas, area_order);
521 free_pages((unsigned long)vstart, order);
522 return -ENOMEM;
528 unsigned long tbl_vaddr;
535 if (!mem->nslabs)
538 pr_info("tearing down default memory pool\n");
539 tbl_vaddr = (unsigned long)phys_to_virt(mem->start);
540 tbl_size = PAGE_ALIGN(mem->end - mem->start);
541 slots_size = PAGE_ALIGN(array_size(sizeof(*mem->slots), mem->nslabs));
544 if (mem->late_alloc) {
545 area_order = get_order(array_size(sizeof(*mem->areas),
546 mem->nareas));
547 free_pages((unsigned long)mem->areas, area_order);
549 free_pages((unsigned long)mem->slots, get_order(slots_size));
551 memblock_free_late(__pa(mem->areas),
552 array_size(sizeof(*mem->areas), mem->nareas));
553 memblock_free_late(mem->start, tbl_size);
554 memblock_free_late(__pa(mem->slots), slots_size);
563 * alloc_dma_pages() - allocate pages to be used for DMA
571 * Return: Decrypted pages, %NULL on allocation failure, or ERR_PTR(-EAGAIN)
586 if (paddr + bytes - 1 > phys_limit) {
588 return ERR_PTR(-EAGAIN);
592 if (set_memory_decrypted((unsigned long)vaddr, PFN_UP(bytes)))
598 if (!set_memory_encrypted((unsigned long)vaddr, PFN_UP(bytes)))
604 * swiotlb_alloc_tlb() - allocate a dynamic IO TLB buffer
605 * @dev: Device for which a memory pool is allocated.
653 * swiotlb_free_tlb() - free a dynamically allocated IO TLB buffer
664 if (!set_memory_encrypted((unsigned long)vaddr, PFN_UP(bytes)))
669 * swiotlb_alloc_pool() - allocate a new IO TLB memory pool
670 * @dev: Device for which a memory pool is allocated.
677 * Allocate and initialize a new IO TLB memory pool. The actual number of
681 * Return: New memory pool, or %NULL on allocation failure.
684 unsigned long minslabs, unsigned long nslabs,
687 struct io_tlb_pool *pool;
698 pool_size = sizeof(*pool) + array_size(sizeof(*pool->areas), nareas);
699 pool = kzalloc(pool_size, gfp);
700 if (!pool)
702 pool->areas = (void *)pool + sizeof(*pool);
713 slot_order = get_order(array_size(sizeof(*pool->slots), nslabs));
714 pool->slots = (struct io_tlb_slot *)
716 if (!pool->slots)
719 swiotlb_init_io_tlb_pool(pool, page_to_phys(tlb), nslabs, true, nareas);
720 return pool;
725 kfree(pool);
731 * swiotlb_dyn_alloc() - dynamic memory pool allocation worker
738 struct io_tlb_pool *pool;
740 pool = swiotlb_alloc_pool(NULL, IO_TLB_MIN_SLABS, default_nslabs,
741 default_nareas, mem->phys_limit, GFP_KERNEL);
742 if (!pool) {
743 pr_warn_ratelimited("Failed to allocate new pool");
747 add_mem_pool(mem, pool);
751 * swiotlb_dyn_free() - RCU callback to free a memory pool
756 struct io_tlb_pool *pool = container_of(rcu, struct io_tlb_pool, rcu);
757 size_t slots_size = array_size(sizeof(*pool->slots), pool->nslabs);
758 size_t tlb_size = pool->end - pool->start;
760 free_pages((unsigned long)pool->slots, get_order(slots_size));
761 swiotlb_free_tlb(pool->vaddr, tlb_size);
762 kfree(pool);
766 * __swiotlb_find_pool() - find the IO TLB pool for a physical address
770 * Find the IO TLB memory pool descriptor which contains the given physical
775 * Return: Memory pool which contains @paddr, or %NULL if none.
779 struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
780 struct io_tlb_pool *pool;
783 list_for_each_entry_rcu(pool, &mem->pools, node) {
784 if (paddr >= pool->start && paddr < pool->end)
788 list_for_each_entry_rcu(pool, &dev->dma_io_tlb_pools, node) {
789 if (paddr >= pool->start && paddr < pool->end)
792 pool = NULL;
795 return pool;
799 * swiotlb_del_pool() - remove an IO TLB pool from a device
801 * @pool: Memory pool to be removed.
803 static void swiotlb_del_pool(struct device *dev, struct io_tlb_pool *pool)
805 unsigned long flags;
807 spin_lock_irqsave(&dev->dma_io_tlb_lock, flags);
808 list_del_rcu(&pool->node);
809 spin_unlock_irqrestore(&dev->dma_io_tlb_lock, flags);
811 call_rcu(&pool->rcu, swiotlb_dyn_free);
817 * swiotlb_dev_init() - initialize swiotlb fields in &struct device
822 dev->dma_io_tlb_mem = &io_tlb_default_mem;
824 INIT_LIST_HEAD(&dev->dma_io_tlb_pools);
825 spin_lock_init(&dev->dma_io_tlb_lock);
826 dev->dma_uses_io_tlb = false;
831 * swiotlb_align_offset() - Get required offset into an IO TLB allocation.
853 (align_mask | (IO_TLB_SIZE - 1));
862 int index = (tlb_addr - mem->start) >> IO_TLB_SHIFT;
863 phys_addr_t orig_addr = mem->slots[index].orig_addr;
864 size_t alloc_size = mem->slots[index].alloc_size;
865 unsigned long pfn = PFN_DOWN(orig_addr);
866 unsigned char *vaddr = mem->vaddr + tlb_addr - mem->start;
874 * "offset" returned by swiotlb_align_offset() is non-zero, and the
882 tlb_offset = (tlb_addr & (IO_TLB_SIZE - 1)) -
886 alloc_size -= tlb_offset;
899 unsigned long flags;
902 sz = min_t(size_t, PAGE_SIZE - offset, size);
912 size -= sz;
932 static inline unsigned long get_max_slots(unsigned long boundary_mask)
939 if (index >= mem->area_nslabs)
953 unsigned long old_hiwater, new_used;
955 new_used = atomic_long_add_return(nslots, &mem->total_used);
956 old_hiwater = atomic_long_read(&mem->used_hiwater);
960 } while (!atomic_long_try_cmpxchg(&mem->used_hiwater,
966 atomic_long_sub(nslots, &mem->total_used);
982 atomic_long_add(nslots, &mem->transient_nslabs);
987 atomic_long_sub(nslots, &mem->transient_nslabs);
1001 * swiotlb_search_pool_area() - search one memory area in one pool
1003 * @pool: Memory pool to be searched.
1005 * @orig_addr: Original (non-bounced) IO buffer address.
1014 * Return: Index of the first allocated slot, or -1 on error.
1016 static int swiotlb_search_pool_area(struct device *dev, struct io_tlb_pool *pool,
1020 struct io_tlb_area *area = pool->areas + area_index;
1021 unsigned long boundary_mask = dma_get_seg_boundary(dev);
1023 phys_to_dma_unencrypted(dev, pool->start) & boundary_mask;
1024 unsigned long max_slots = get_max_slots(boundary_mask);
1029 unsigned long flags;
1034 BUG_ON(area_index >= pool->nareas);
1038 * page-aligned in the absence of any other alignment requirements.
1045 alloc_align_mask = PAGE_SIZE - 1;
1048 * Ensure that the allocation is at least slot-aligned and update
1052 alloc_align_mask |= (IO_TLB_SIZE - 1);
1061 spin_lock_irqsave(&area->lock, flags);
1062 if (unlikely(nslots > pool->area_nslabs - area->used))
1065 slot_base = area_index * pool->area_nslabs;
1066 index = area->index;
1068 for (slots_checked = 0; slots_checked < pool->area_nslabs; ) {
1077 index = wrap_area_index(pool, index + 1);
1085 if (pool->slots[slot_index].list >= nslots)
1088 index = wrap_area_index(pool, index + stride);
1093 spin_unlock_irqrestore(&area->lock, flags);
1094 return -1;
1103 pool->slots[i].list = 0;
1104 pool->slots[i].alloc_size = alloc_size - (offset +
1105 ((i - slot_index) << IO_TLB_SHIFT));
1107 for (i = slot_index - 1;
1108 io_tlb_offset(i) != IO_TLB_SEGSIZE - 1 &&
1109 pool->slots[i].list; i--)
1110 pool->slots[i].list = ++count;
1115 area->index = wrap_area_index(pool, index + nslots);
1116 area->used += nslots;
1117 spin_unlock_irqrestore(&area->lock, flags);
1119 inc_used_and_hiwater(dev->dma_io_tlb_mem, nslots);
1126 * swiotlb_search_area() - search one memory area in all pools
1130 * @orig_addr: Original (non-bounced) IO buffer address.
1134 * @retpool: Used memory pool, updated on return.
1139 * Return: Index of the first allocated slot, or -1 on error.
1145 struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
1146 struct io_tlb_pool *pool;
1148 int index = -1;
1151 list_for_each_entry_rcu(pool, &mem->pools, node) {
1152 if (cpu_offset >= pool->nareas)
1154 area_index = (start_cpu + cpu_offset) & (pool->nareas - 1);
1155 index = swiotlb_search_pool_area(dev, pool, area_index,
1159 *retpool = pool;
1168 * swiotlb_find_slots() - search for slots in the whole swiotlb
1170 * @orig_addr: Original (non-bounced) IO buffer address.
1174 * @retpool: Used memory pool, updated on return.
1179 * Return: Index of the first allocated slot, or -1 on error.
1185 struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
1186 struct io_tlb_pool *pool;
1187 unsigned long nslabs;
1188 unsigned long flags;
1194 return -1;
1199 alloc_align_mask, &pool);
1204 if (!mem->can_grow)
1205 return -1;
1207 schedule_work(&mem->dyn_alloc);
1210 phys_limit = min_not_zero(*dev->dma_mask, dev->bus_dma_limit);
1211 pool = swiotlb_alloc_pool(dev, nslabs, nslabs, 1, phys_limit,
1213 if (!pool)
1214 return -1;
1216 index = swiotlb_search_pool_area(dev, pool, 0, orig_addr,
1219 swiotlb_dyn_free(&pool->rcu);
1220 return -1;
1223 pool->transient = true;
1224 spin_lock_irqsave(&dev->dma_io_tlb_lock, flags);
1225 list_add_rcu(&pool->node, &dev->dma_io_tlb_pools);
1226 spin_unlock_irqrestore(&dev->dma_io_tlb_lock, flags);
1227 inc_transient_used(mem, pool->nslabs);
1230 WRITE_ONCE(dev->dma_uses_io_tlb, true);
1237 * First, the store to dev->dma_uses_io_tlb must be ordered before the
1239 * cannot be passed to another CPU before updating dev->dma_uses_io_tlb.
1241 * Second, the load from mem->pools must be ordered before the same
1251 *retpool = pool;
1261 struct io_tlb_pool *pool;
1265 *retpool = pool = &dev->dma_io_tlb_mem->defpool;
1266 i = start = raw_smp_processor_id() & (pool->nareas - 1);
1268 index = swiotlb_search_pool_area(dev, pool, i, orig_addr,
1272 if (++i >= pool->nareas)
1275 return -1;
1283 * mem_used() - get number of used slots in an allocator
1291 static unsigned long mem_used(struct io_tlb_mem *mem)
1293 return atomic_long_read(&mem->total_used);
1299 * mem_pool_used() - get number of used slots in a memory pool
1300 * @pool: Software IO TLB memory pool.
1306 static unsigned long mem_pool_used(struct io_tlb_pool *pool)
1309 unsigned long used = 0;
1311 for (i = 0; i < pool->nareas; i++)
1312 used += pool->areas[i].used;
1317 * mem_used() - get number of used slots in an allocator
1325 static unsigned long mem_used(struct io_tlb_mem *mem)
1328 struct io_tlb_pool *pool;
1329 unsigned long used = 0;
1332 list_for_each_entry_rcu(pool, &mem->pools, node)
1333 used += mem_pool_used(pool);
1338 return mem_pool_used(&mem->defpool);
1345 * swiotlb_tbl_map_single() - bounce buffer map a single contiguous physical area
1347 * @orig_addr: Original (non-bounced) physical IO buffer address
1349 * any pre- or post-padding for alignment
1366 * area. Any pre-padding (due to an offset) and any post-padding (due to
1367 * rounding-up the size) is not initialized.
1371 enum dma_data_direction dir, unsigned long attrs)
1373 struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
1375 struct io_tlb_pool *pool;
1382 if (!mem || !mem->nslabs) {
1392 * The default swiotlb memory pool is allocated with PAGE_SIZE
1403 index = swiotlb_find_slots(dev, orig_addr, size, alloc_align_mask, &pool);
1404 if (index == -1) {
1408 size, mem->nslabs, mem_used(mem));
1424 offset &= (IO_TLB_SIZE - 1);
1426 pool->slots[index].pad_slots = pad_slots;
1427 for (i = 0; i < (nr_slots(size) - pad_slots); i++)
1428 pool->slots[index + i].orig_addr = slot_addr(orig_addr, i);
1429 tlb_addr = slot_addr(pool->start, index) + offset;
1439 swiotlb_bounce(dev, tlb_addr, mapping_size, DMA_TO_DEVICE, pool);
1446 unsigned long flags;
1452 index = (tlb_addr - offset - mem->start) >> IO_TLB_SHIFT;
1453 index -= mem->slots[index].pad_slots;
1454 nslots = nr_slots(mem->slots[index].alloc_size + offset);
1455 aindex = index / mem->area_nslabs;
1456 area = &mem->areas[aindex];
1462 * with slots below and above the pool being returned.
1464 BUG_ON(aindex >= mem->nareas);
1466 spin_lock_irqsave(&area->lock, flags);
1468 count = mem->slots[index + nslots].list;
1476 for (i = index + nslots - 1; i >= index; i--) {
1477 mem->slots[i].list = ++count;
1478 mem->slots[i].orig_addr = INVALID_PHYS_ADDR;
1479 mem->slots[i].alloc_size = 0;
1480 mem->slots[i].pad_slots = 0;
1487 for (i = index - 1;
1488 io_tlb_offset(i) != IO_TLB_SEGSIZE - 1 && mem->slots[i].list;
1489 i--)
1490 mem->slots[i].list = ++count;
1491 area->used -= nslots;
1492 spin_unlock_irqrestore(&area->lock, flags);
1494 dec_used(dev->dma_io_tlb_mem, nslots);
1500 * swiotlb_del_transient() - delete a transient memory pool
1503 * @pool: Pointer to the transient memory pool to be checked and deleted.
1505 * Check whether the address belongs to a transient SWIOTLB memory pool.
1506 * If yes, then delete the pool.
1508 * Return: %true if @tlb_addr belonged to a transient pool that was released.
1511 struct io_tlb_pool *pool)
1513 if (!pool->transient)
1516 dec_used(dev->dma_io_tlb_mem, pool->nslabs);
1517 swiotlb_del_pool(dev, pool);
1518 dec_transient_used(dev->dma_io_tlb_mem, pool->nslabs);
1525 phys_addr_t tlb_addr, struct io_tlb_pool *pool)
1537 unsigned long attrs, struct io_tlb_pool *pool)
1545 DMA_FROM_DEVICE, pool);
1547 if (swiotlb_del_transient(dev, tlb_addr, pool))
1549 swiotlb_release_slots(dev, tlb_addr, pool);
1554 struct io_tlb_pool *pool)
1557 swiotlb_bounce(dev, tlb_addr, size, DMA_TO_DEVICE, pool);
1564 struct io_tlb_pool *pool)
1567 swiotlb_bounce(dev, tlb_addr, size, DMA_FROM_DEVICE, pool);
1577 enum dma_data_direction dir, unsigned long attrs)
1596 &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit);
1618 return ((size_t)IO_TLB_SIZE) * IO_TLB_SEGSIZE - min_align;
1622 * is_swiotlb_allocated() - check if the default software IO TLB is initialized
1631 struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
1633 return mem && mem->nslabs;
1637 * default_swiotlb_base() - get the base address of the default SWIOTLB
1639 * Get the lowest physical address used by the default software IO TLB pool.
1650 * default_swiotlb_limit() - get the address limit of the default SWIOTLB
1652 * Get the highest physical address used by the default software IO TLB pool.
1659 return io_tlb_default_mem.defpool.end - 1;
1665 static unsigned long mem_transient_used(struct io_tlb_mem *mem)
1667 return atomic_long_read(&mem->transient_nslabs);
1694 *val = atomic_long_read(&mem->used_hiwater);
1704 return -EINVAL;
1706 atomic_long_set(&mem->used_hiwater, val);
1717 mem->debugfs = debugfs_create_dir(dirname, io_tlb_default_mem.debugfs);
1718 if (!mem->nslabs)
1721 debugfs_create_ulong("io_tlb_nslabs", 0400, mem->debugfs, &mem->nslabs);
1722 debugfs_create_file("io_tlb_used", 0400, mem->debugfs, mem,
1724 debugfs_create_file("io_tlb_used_hiwater", 0600, mem->debugfs, mem,
1727 debugfs_create_file("io_tlb_transient_nslabs", 0400, mem->debugfs,
1753 struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
1754 struct io_tlb_pool *pool;
1762 align = (1 << (get_order(size) + PAGE_SHIFT)) - 1;
1763 index = swiotlb_find_slots(dev, 0, size, align, &pool);
1764 if (index == -1)
1767 tlb_addr = slot_addr(pool->start, index);
1769 dev_WARN_ONCE(dev, 1, "Cannot allocate pages from non page-aligned swiotlb addr 0x%pa.\n",
1771 swiotlb_release_slots(dev, tlb_addr, pool);
1781 struct io_tlb_pool *pool;
1783 pool = swiotlb_find_pool(dev, tlb_addr);
1784 if (!pool)
1787 swiotlb_release_slots(dev, tlb_addr, pool);
1795 struct io_tlb_mem *mem = rmem->priv;
1796 unsigned long nslabs = rmem->size >> IO_TLB_SHIFT;
1798 /* Set Per-device io tlb area to one */
1801 if (PageHighMem(pfn_to_page(PHYS_PFN(rmem->base)))) {
1802 dev_err(dev, "Restricted DMA pool must be accessible within the linear mapping.");
1803 return -EINVAL;
1807 * Since multiple devices can share the same pool, the private data,
1812 struct io_tlb_pool *pool;
1816 return -ENOMEM;
1817 pool = &mem->defpool;
1819 pool->slots = kcalloc(nslabs, sizeof(*pool->slots), GFP_KERNEL);
1820 if (!pool->slots) {
1822 return -ENOMEM;
1825 pool->areas = kcalloc(nareas, sizeof(*pool->areas),
1827 if (!pool->areas) {
1828 kfree(pool->slots);
1830 return -ENOMEM;
1833 set_memory_decrypted((unsigned long)phys_to_virt(rmem->base),
1834 rmem->size >> PAGE_SHIFT);
1835 swiotlb_init_io_tlb_pool(pool, rmem->base, nslabs,
1837 mem->force_bounce = true;
1838 mem->for_alloc = true;
1840 spin_lock_init(&mem->lock);
1841 INIT_LIST_HEAD_RCU(&mem->pools);
1843 add_mem_pool(mem, pool);
1845 rmem->priv = mem;
1847 swiotlb_create_debugfs_files(mem, rmem->name);
1850 dev->dma_io_tlb_mem = mem;
1858 dev->dma_io_tlb_mem = &io_tlb_default_mem;
1868 unsigned long node = rmem->fdt_node;
1871 of_get_flat_dt_prop(node, "linux,cma-default", NULL) ||
1872 of_get_flat_dt_prop(node, "linux,dma-default", NULL) ||
1873 of_get_flat_dt_prop(node, "no-map", NULL))
1874 return -EINVAL;
1876 rmem->ops = &rmem_swiotlb_ops;
1877 pr_info("Reserved memory: created restricted DMA pool at %pa, size %ld MiB\n",
1878 &rmem->base, (unsigned long)rmem->size / SZ_1M);
1882 RESERVEDMEM_OF_DECLARE(dma, "restricted-dma-pool", rmem_swiotlb_setup);