Lines Matching +full:dma +full:- +full:mem

1 // SPDX-License-Identifier: GPL-2.0-only
3 * Dynamic DMA mapping support.
6 * I/O TLBs (aka DMA address translation hardware).
9 * Copyright (C) 2000, 2003 Hewlett-Packard Co
10 * David Mosberger-Tang <davidm@hpl.hp.com>
12 * 03/05/07 davidm Switch from PCI-DMA to generic device DMA API.
14 * unnecessary i-cache flushing.
27 #include <linux/dma-direct.h>
28 #include <linux/dma-map-ops.h>
33 #include <linux/iommu-helper.h>
55 #define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
67 * struct io_tlb_slot - IO TLB slot descriptor
73 * allocated non-padding slot.
106 * struct io_tlb_area - IO TLB memory area descriptor
145 * swiotlb_adjust_nareas() - adjust the number of areas and slots
168 * limit_nareas() - get the maximum number of areas for a given memory pool size
231 struct io_tlb_pool *mem = &io_tlb_default_mem.defpool; in swiotlb_print_info() local
233 if (!mem->nslabs) { in swiotlb_print_info()
234 pr_warn("No low mem\n"); in swiotlb_print_info()
238 pr_info("mapped [mem %pa-%pa] (%luMB)\n", &mem->start, &mem->end, in swiotlb_print_info()
239 (mem->nslabs << IO_TLB_SHIFT) >> 20); in swiotlb_print_info()
244 return val & (IO_TLB_SEGSIZE - 1); in io_tlb_offset()
260 struct io_tlb_pool *mem = &io_tlb_default_mem.defpool; in swiotlb_update_mem_attributes() local
263 if (!mem->nslabs || mem->late_alloc) in swiotlb_update_mem_attributes()
265 bytes = PAGE_ALIGN(mem->nslabs << IO_TLB_SHIFT); in swiotlb_update_mem_attributes()
266 set_memory_decrypted((unsigned long)mem->vaddr, bytes >> PAGE_SHIFT); in swiotlb_update_mem_attributes()
269 static void swiotlb_init_io_tlb_pool(struct io_tlb_pool *mem, phys_addr_t start, in swiotlb_init_io_tlb_pool() argument
275 mem->nslabs = nslabs; in swiotlb_init_io_tlb_pool()
276 mem->start = start; in swiotlb_init_io_tlb_pool()
277 mem->end = mem->start + bytes; in swiotlb_init_io_tlb_pool()
278 mem->late_alloc = late_alloc; in swiotlb_init_io_tlb_pool()
279 mem->nareas = nareas; in swiotlb_init_io_tlb_pool()
280 mem->area_nslabs = nslabs / mem->nareas; in swiotlb_init_io_tlb_pool()
282 for (i = 0; i < mem->nareas; i++) { in swiotlb_init_io_tlb_pool()
283 spin_lock_init(&mem->areas[i].lock); in swiotlb_init_io_tlb_pool()
284 mem->areas[i].index = 0; in swiotlb_init_io_tlb_pool()
285 mem->areas[i].used = 0; in swiotlb_init_io_tlb_pool()
288 for (i = 0; i < mem->nslabs; i++) { in swiotlb_init_io_tlb_pool()
289 mem->slots[i].list = min(IO_TLB_SEGSIZE - io_tlb_offset(i), in swiotlb_init_io_tlb_pool()
290 mem->nslabs - i); in swiotlb_init_io_tlb_pool()
291 mem->slots[i].orig_addr = INVALID_PHYS_ADDR; in swiotlb_init_io_tlb_pool()
292 mem->slots[i].alloc_size = 0; in swiotlb_init_io_tlb_pool()
293 mem->slots[i].pad_slots = 0; in swiotlb_init_io_tlb_pool()
297 mem->vaddr = vaddr; in swiotlb_init_io_tlb_pool()
302 * add_mem_pool() - add a memory pool to the allocator
303 * @mem: Software IO TLB allocator.
306 static void add_mem_pool(struct io_tlb_mem *mem, struct io_tlb_pool *pool) in add_mem_pool() argument
309 spin_lock(&mem->lock); in add_mem_pool()
310 list_add_rcu(&pool->node, &mem->pools); in add_mem_pool()
311 mem->nslabs += pool->nslabs; in add_mem_pool()
312 spin_unlock(&mem->lock); in add_mem_pool()
314 mem->nslabs = pool->nslabs; in add_mem_pool()
352 * structures for the software IO TLB used to implement the DMA API.
357 struct io_tlb_pool *mem = &io_tlb_default_mem.defpool; in swiotlb_init_remap() local
375 io_tlb_default_mem.phys_limit = virt_to_phys(high_memory - 1); in swiotlb_init_remap()
393 pr_info("SWIOTLB bounce buffer size adjusted %lu -> %lu slabs", in swiotlb_init_remap()
398 alloc_size = PAGE_ALIGN(array_size(sizeof(*mem->slots), nslabs)); in swiotlb_init_remap()
399 mem->slots = memblock_alloc(alloc_size, PAGE_SIZE); in swiotlb_init_remap()
400 if (!mem->slots) { in swiotlb_init_remap()
406 mem->areas = memblock_alloc(array_size(sizeof(struct io_tlb_area), in swiotlb_init_remap()
408 if (!mem->areas) { in swiotlb_init_remap()
409 pr_warn("%s: Failed to allocate mem->areas.\n", __func__); in swiotlb_init_remap()
413 swiotlb_init_io_tlb_pool(mem, __pa(tlb), nslabs, false, nareas); in swiotlb_init_remap()
414 add_mem_pool(&io_tlb_default_mem, mem); in swiotlb_init_remap()
426 * Systems with larger DMA zones (those that don't support ISA) can
433 struct io_tlb_pool *mem = &io_tlb_default_mem.defpool; in swiotlb_init_late() local
457 io_tlb_default_mem.phys_limit = virt_to_phys(high_memory - 1); in swiotlb_init_late()
472 order--; in swiotlb_init_late()
478 return -ENOMEM; in swiotlb_init_late()
498 area_order = get_order(array_size(sizeof(*mem->areas), nareas)); in swiotlb_init_late()
499 mem->areas = (struct io_tlb_area *) in swiotlb_init_late()
501 if (!mem->areas) in swiotlb_init_late()
504 mem->slots = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, in swiotlb_init_late()
505 get_order(array_size(sizeof(*mem->slots), nslabs))); in swiotlb_init_late()
506 if (!mem->slots) in swiotlb_init_late()
511 swiotlb_init_io_tlb_pool(mem, virt_to_phys(vstart), nslabs, true, in swiotlb_init_late()
513 add_mem_pool(&io_tlb_default_mem, mem); in swiotlb_init_late()
519 free_pages((unsigned long)mem->areas, area_order); in swiotlb_init_late()
522 return -ENOMEM; in swiotlb_init_late()
527 struct io_tlb_pool *mem = &io_tlb_default_mem.defpool; in swiotlb_exit() local
535 if (!mem->nslabs) in swiotlb_exit()
539 tbl_vaddr = (unsigned long)phys_to_virt(mem->start); in swiotlb_exit()
540 tbl_size = PAGE_ALIGN(mem->end - mem->start); in swiotlb_exit()
541 slots_size = PAGE_ALIGN(array_size(sizeof(*mem->slots), mem->nslabs)); in swiotlb_exit()
544 if (mem->late_alloc) { in swiotlb_exit()
545 area_order = get_order(array_size(sizeof(*mem->areas), in swiotlb_exit()
546 mem->nareas)); in swiotlb_exit()
547 free_pages((unsigned long)mem->areas, area_order); in swiotlb_exit()
549 free_pages((unsigned long)mem->slots, get_order(slots_size)); in swiotlb_exit()
551 memblock_free_late(__pa(mem->areas), in swiotlb_exit()
552 array_size(sizeof(*mem->areas), mem->nareas)); in swiotlb_exit()
553 memblock_free_late(mem->start, tbl_size); in swiotlb_exit()
554 memblock_free_late(__pa(mem->slots), slots_size); in swiotlb_exit()
557 memset(mem, 0, sizeof(*mem)); in swiotlb_exit()
563 * alloc_dma_pages() - allocate pages to be used for DMA
569 * pages decrypted that they can be used for DMA.
571 * Return: Decrypted pages, %NULL on allocation failure, or ERR_PTR(-EAGAIN)
586 if (paddr + bytes - 1 > phys_limit) { in alloc_dma_pages()
588 return ERR_PTR(-EAGAIN); in alloc_dma_pages()
604 * swiotlb_alloc_tlb() - allocate a dynamic IO TLB buffer
653 * swiotlb_free_tlb() - free a dynamically allocated IO TLB buffer
669 * swiotlb_alloc_pool() - allocate a new IO TLB memory pool
674 * @phys_limit: Maximum DMA buffer physical address.
698 pool_size = sizeof(*pool) + array_size(sizeof(*pool->areas), nareas); in swiotlb_alloc_pool()
702 pool->areas = (void *)pool + sizeof(*pool); in swiotlb_alloc_pool()
713 slot_order = get_order(array_size(sizeof(*pool->slots), nslabs)); in swiotlb_alloc_pool()
714 pool->slots = (struct io_tlb_slot *) in swiotlb_alloc_pool()
716 if (!pool->slots) in swiotlb_alloc_pool()
731 * swiotlb_dyn_alloc() - dynamic memory pool allocation worker
736 struct io_tlb_mem *mem = in swiotlb_dyn_alloc() local
741 default_nareas, mem->phys_limit, GFP_KERNEL); in swiotlb_dyn_alloc()
747 add_mem_pool(mem, pool); in swiotlb_dyn_alloc()
751 * swiotlb_dyn_free() - RCU callback to free a memory pool
757 size_t slots_size = array_size(sizeof(*pool->slots), pool->nslabs); in swiotlb_dyn_free()
758 size_t tlb_size = pool->end - pool->start; in swiotlb_dyn_free()
760 free_pages((unsigned long)pool->slots, get_order(slots_size)); in swiotlb_dyn_free()
761 swiotlb_free_tlb(pool->vaddr, tlb_size); in swiotlb_dyn_free()
766 * __swiotlb_find_pool() - find the IO TLB pool for a physical address
767 * @dev: Device which has mapped the DMA buffer.
768 * @paddr: Physical address within the DMA buffer.
779 struct io_tlb_mem *mem = dev->dma_io_tlb_mem; in __swiotlb_find_pool() local
783 list_for_each_entry_rcu(pool, &mem->pools, node) { in __swiotlb_find_pool()
784 if (paddr >= pool->start && paddr < pool->end) in __swiotlb_find_pool()
788 list_for_each_entry_rcu(pool, &dev->dma_io_tlb_pools, node) { in __swiotlb_find_pool()
789 if (paddr >= pool->start && paddr < pool->end) in __swiotlb_find_pool()
799 * swiotlb_del_pool() - remove an IO TLB pool from a device
807 spin_lock_irqsave(&dev->dma_io_tlb_lock, flags); in swiotlb_del_pool()
808 list_del_rcu(&pool->node); in swiotlb_del_pool()
809 spin_unlock_irqrestore(&dev->dma_io_tlb_lock, flags); in swiotlb_del_pool()
811 call_rcu(&pool->rcu, swiotlb_dyn_free); in swiotlb_del_pool()
817 * swiotlb_dev_init() - initialize swiotlb fields in &struct device
822 dev->dma_io_tlb_mem = &io_tlb_default_mem; in swiotlb_dev_init()
824 INIT_LIST_HEAD(&dev->dma_io_tlb_pools); in swiotlb_dev_init()
825 spin_lock_init(&dev->dma_io_tlb_lock); in swiotlb_dev_init()
826 dev->dma_uses_io_tlb = false; in swiotlb_dev_init()
831 * swiotlb_align_offset() - Get required offset into an IO TLB allocation.
834 * @addr: DMA address.
853 (align_mask | (IO_TLB_SIZE - 1)); in swiotlb_align_offset()
857 * Bounce: copy the swiotlb buffer from or back to the original dma location
860 enum dma_data_direction dir, struct io_tlb_pool *mem) in swiotlb_bounce() argument
862 int index = (tlb_addr - mem->start) >> IO_TLB_SHIFT; in swiotlb_bounce()
863 phys_addr_t orig_addr = mem->slots[index].orig_addr; in swiotlb_bounce()
864 size_t alloc_size = mem->slots[index].alloc_size; in swiotlb_bounce()
866 unsigned char *vaddr = mem->vaddr + tlb_addr - mem->start; in swiotlb_bounce()
874 * "offset" returned by swiotlb_align_offset() is non-zero, and the in swiotlb_bounce()
882 tlb_offset = (tlb_addr & (IO_TLB_SIZE - 1)) - in swiotlb_bounce()
886 alloc_size -= tlb_offset; in swiotlb_bounce()
902 sz = min_t(size_t, PAGE_SIZE - offset, size); in swiotlb_bounce()
912 size -= sz; in swiotlb_bounce()
937 static unsigned int wrap_area_index(struct io_tlb_pool *mem, unsigned int index) in wrap_area_index() argument
939 if (index >= mem->area_nslabs) in wrap_area_index()
951 static void inc_used_and_hiwater(struct io_tlb_mem *mem, unsigned int nslots) in inc_used_and_hiwater() argument
955 new_used = atomic_long_add_return(nslots, &mem->total_used); in inc_used_and_hiwater()
956 old_hiwater = atomic_long_read(&mem->used_hiwater); in inc_used_and_hiwater()
960 } while (!atomic_long_try_cmpxchg(&mem->used_hiwater, in inc_used_and_hiwater()
964 static void dec_used(struct io_tlb_mem *mem, unsigned int nslots) in dec_used() argument
966 atomic_long_sub(nslots, &mem->total_used); in dec_used()
970 static void inc_used_and_hiwater(struct io_tlb_mem *mem, unsigned int nslots) in inc_used_and_hiwater() argument
973 static void dec_used(struct io_tlb_mem *mem, unsigned int nslots) in dec_used() argument
980 static void inc_transient_used(struct io_tlb_mem *mem, unsigned int nslots) in inc_transient_used() argument
982 atomic_long_add(nslots, &mem->transient_nslabs); in inc_transient_used()
985 static void dec_transient_used(struct io_tlb_mem *mem, unsigned int nslots) in dec_transient_used() argument
987 atomic_long_sub(nslots, &mem->transient_nslabs); in dec_transient_used()
991 static void inc_transient_used(struct io_tlb_mem *mem, unsigned int nslots) in inc_transient_used() argument
994 static void dec_transient_used(struct io_tlb_mem *mem, unsigned int nslots) in dec_transient_used() argument
1001 * swiotlb_search_pool_area() - search one memory area in one pool
1005 * @orig_addr: Original (non-bounced) IO buffer address.
1014 * Return: Index of the first allocated slot, or -1 on error.
1020 struct io_tlb_area *area = pool->areas + area_index; in swiotlb_search_pool_area()
1023 phys_to_dma_unencrypted(dev, pool->start) & boundary_mask; in swiotlb_search_pool_area()
1034 BUG_ON(area_index >= pool->nareas); in swiotlb_search_pool_area()
1038 * page-aligned in the absence of any other alignment requirements. in swiotlb_search_pool_area()
1045 alloc_align_mask = PAGE_SIZE - 1; in swiotlb_search_pool_area()
1048 * Ensure that the allocation is at least slot-aligned and update in swiotlb_search_pool_area()
1052 alloc_align_mask |= (IO_TLB_SIZE - 1); in swiotlb_search_pool_area()
1061 spin_lock_irqsave(&area->lock, flags); in swiotlb_search_pool_area()
1062 if (unlikely(nslots > pool->area_nslabs - area->used)) in swiotlb_search_pool_area()
1065 slot_base = area_index * pool->area_nslabs; in swiotlb_search_pool_area()
1066 index = area->index; in swiotlb_search_pool_area()
1068 for (slots_checked = 0; slots_checked < pool->area_nslabs; ) { in swiotlb_search_pool_area()
1085 if (pool->slots[slot_index].list >= nslots) in swiotlb_search_pool_area()
1093 spin_unlock_irqrestore(&area->lock, flags); in swiotlb_search_pool_area()
1094 return -1; in swiotlb_search_pool_area()
1103 pool->slots[i].list = 0; in swiotlb_search_pool_area()
1104 pool->slots[i].alloc_size = alloc_size - (offset + in swiotlb_search_pool_area()
1105 ((i - slot_index) << IO_TLB_SHIFT)); in swiotlb_search_pool_area()
1107 for (i = slot_index - 1; in swiotlb_search_pool_area()
1108 io_tlb_offset(i) != IO_TLB_SEGSIZE - 1 && in swiotlb_search_pool_area()
1109 pool->slots[i].list; i--) in swiotlb_search_pool_area()
1110 pool->slots[i].list = ++count; in swiotlb_search_pool_area()
1115 area->index = wrap_area_index(pool, index + nslots); in swiotlb_search_pool_area()
1116 area->used += nslots; in swiotlb_search_pool_area()
1117 spin_unlock_irqrestore(&area->lock, flags); in swiotlb_search_pool_area()
1119 inc_used_and_hiwater(dev->dma_io_tlb_mem, nslots); in swiotlb_search_pool_area()
1126 * swiotlb_search_area() - search one memory area in all pools
1130 * @orig_addr: Original (non-bounced) IO buffer address.
1139 * Return: Index of the first allocated slot, or -1 on error.
1145 struct io_tlb_mem *mem = dev->dma_io_tlb_mem; in swiotlb_search_area() local
1148 int index = -1; in swiotlb_search_area()
1151 list_for_each_entry_rcu(pool, &mem->pools, node) { in swiotlb_search_area()
1152 if (cpu_offset >= pool->nareas) in swiotlb_search_area()
1154 area_index = (start_cpu + cpu_offset) & (pool->nareas - 1); in swiotlb_search_area()
1168 * swiotlb_find_slots() - search for slots in the whole swiotlb
1170 * @orig_addr: Original (non-bounced) IO buffer address.
1179 * Return: Index of the first allocated slot, or -1 on error.
1185 struct io_tlb_mem *mem = dev->dma_io_tlb_mem; in swiotlb_find_slots() local
1194 return -1; in swiotlb_find_slots()
1204 if (!mem->can_grow) in swiotlb_find_slots()
1205 return -1; in swiotlb_find_slots()
1207 schedule_work(&mem->dyn_alloc); in swiotlb_find_slots()
1210 phys_limit = min_not_zero(*dev->dma_mask, dev->bus_dma_limit); in swiotlb_find_slots()
1214 return -1; in swiotlb_find_slots()
1219 swiotlb_dyn_free(&pool->rcu); in swiotlb_find_slots()
1220 return -1; in swiotlb_find_slots()
1223 pool->transient = true; in swiotlb_find_slots()
1224 spin_lock_irqsave(&dev->dma_io_tlb_lock, flags); in swiotlb_find_slots()
1225 list_add_rcu(&pool->node, &dev->dma_io_tlb_pools); in swiotlb_find_slots()
1226 spin_unlock_irqrestore(&dev->dma_io_tlb_lock, flags); in swiotlb_find_slots()
1227 inc_transient_used(mem, pool->nslabs); in swiotlb_find_slots()
1230 WRITE_ONCE(dev->dma_uses_io_tlb, true); in swiotlb_find_slots()
1237 * First, the store to dev->dma_uses_io_tlb must be ordered before the in swiotlb_find_slots()
1239 * cannot be passed to another CPU before updating dev->dma_uses_io_tlb. in swiotlb_find_slots()
1241 * Second, the load from mem->pools must be ordered before the same in swiotlb_find_slots()
1265 *retpool = pool = &dev->dma_io_tlb_mem->defpool; in swiotlb_find_slots()
1266 i = start = raw_smp_processor_id() & (pool->nareas - 1); in swiotlb_find_slots()
1272 if (++i >= pool->nareas) in swiotlb_find_slots()
1275 return -1; in swiotlb_find_slots()
1283 * mem_used() - get number of used slots in an allocator
1284 * @mem: Software IO TLB allocator.
1291 static unsigned long mem_used(struct io_tlb_mem *mem) in mem_used() argument
1293 return atomic_long_read(&mem->total_used); in mem_used()
1299 * mem_pool_used() - get number of used slots in a memory pool
1311 for (i = 0; i < pool->nareas; i++) in mem_pool_used()
1312 used += pool->areas[i].used; in mem_pool_used()
1317 * mem_used() - get number of used slots in an allocator
1318 * @mem: Software IO TLB allocator.
1325 static unsigned long mem_used(struct io_tlb_mem *mem) in mem_used() argument
1332 list_for_each_entry_rcu(pool, &mem->pools, node) in mem_used()
1338 return mem_pool_used(&mem->defpool); in mem_used()
1345 * swiotlb_tbl_map_single() - bounce buffer map a single contiguous physical area
1347 * @orig_addr: Original (non-bounced) physical IO buffer address
1349 * any pre- or post-padding for alignment
1351 * @dir: DMA direction
1352 * @attrs: Optional DMA attributes for the map operation
1362 * of orig_addr that are specified in the DMA min_align_mask for the device. As
1366 * area. Any pre-padding (due to an offset) and any post-padding (due to
1367 * rounding-up the size) is not initialized.
1373 struct io_tlb_mem *mem = dev->dma_io_tlb_mem; in swiotlb_tbl_map_single() local
1382 if (!mem || !mem->nslabs) { in swiotlb_tbl_map_single()
1384 "Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer"); in swiotlb_tbl_map_single()
1389 pr_warn_once("Memory encryption is active and system is using DMA bounce buffers\n"); in swiotlb_tbl_map_single()
1404 if (index == -1) { in swiotlb_tbl_map_single()
1408 size, mem->nslabs, mem_used(mem)); in swiotlb_tbl_map_single()
1419 * Save away the mapping from the original address to the DMA address. in swiotlb_tbl_map_single()
1424 offset &= (IO_TLB_SIZE - 1); in swiotlb_tbl_map_single()
1426 pool->slots[index].pad_slots = pad_slots; in swiotlb_tbl_map_single()
1427 for (i = 0; i < (nr_slots(size) - pad_slots); i++) in swiotlb_tbl_map_single()
1428 pool->slots[index + i].orig_addr = slot_addr(orig_addr, i); in swiotlb_tbl_map_single()
1429 tlb_addr = slot_addr(pool->start, index) + offset; in swiotlb_tbl_map_single()
1432 * the original buffer to the TLB buffer before initiating DMA in order in swiotlb_tbl_map_single()
1444 struct io_tlb_pool *mem) in swiotlb_release_slots() argument
1452 index = (tlb_addr - offset - mem->start) >> IO_TLB_SHIFT; in swiotlb_release_slots()
1453 index -= mem->slots[index].pad_slots; in swiotlb_release_slots()
1454 nslots = nr_slots(mem->slots[index].alloc_size + offset); in swiotlb_release_slots()
1455 aindex = index / mem->area_nslabs; in swiotlb_release_slots()
1456 area = &mem->areas[aindex]; in swiotlb_release_slots()
1464 BUG_ON(aindex >= mem->nareas); in swiotlb_release_slots()
1466 spin_lock_irqsave(&area->lock, flags); in swiotlb_release_slots()
1468 count = mem->slots[index + nslots].list; in swiotlb_release_slots()
1476 for (i = index + nslots - 1; i >= index; i--) { in swiotlb_release_slots()
1477 mem->slots[i].list = ++count; in swiotlb_release_slots()
1478 mem->slots[i].orig_addr = INVALID_PHYS_ADDR; in swiotlb_release_slots()
1479 mem->slots[i].alloc_size = 0; in swiotlb_release_slots()
1480 mem->slots[i].pad_slots = 0; in swiotlb_release_slots()
1487 for (i = index - 1; in swiotlb_release_slots()
1488 io_tlb_offset(i) != IO_TLB_SEGSIZE - 1 && mem->slots[i].list; in swiotlb_release_slots()
1489 i--) in swiotlb_release_slots()
1490 mem->slots[i].list = ++count; in swiotlb_release_slots()
1491 area->used -= nslots; in swiotlb_release_slots()
1492 spin_unlock_irqrestore(&area->lock, flags); in swiotlb_release_slots()
1494 dec_used(dev->dma_io_tlb_mem, nslots); in swiotlb_release_slots()
1500 * swiotlb_del_transient() - delete a transient memory pool
1513 if (!pool->transient) in swiotlb_del_transient()
1516 dec_used(dev->dma_io_tlb_mem, pool->nslabs); in swiotlb_del_transient()
1518 dec_transient_used(dev->dma_io_tlb_mem, pool->nslabs); in swiotlb_del_transient()
1588 /* Ensure that the address returned is DMA'ble */ in swiotlb_map()
1596 &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit); in swiotlb_map()
1618 return ((size_t)IO_TLB_SIZE) * IO_TLB_SEGSIZE - min_align; in swiotlb_max_mapping_size()
1622 * is_swiotlb_allocated() - check if the default software IO TLB is initialized
1631 struct io_tlb_mem *mem = dev->dma_io_tlb_mem; in is_swiotlb_active() local
1633 return mem && mem->nslabs; in is_swiotlb_active()
1637 * default_swiotlb_base() - get the base address of the default SWIOTLB
1650 * default_swiotlb_limit() - get the address limit of the default SWIOTLB
1659 return io_tlb_default_mem.defpool.end - 1; in default_swiotlb_limit()
1665 static unsigned long mem_transient_used(struct io_tlb_mem *mem) in mem_transient_used() argument
1667 return atomic_long_read(&mem->transient_nslabs); in mem_transient_used()
1672 struct io_tlb_mem *mem = data; in io_tlb_transient_used_get() local
1674 *val = mem_transient_used(mem); in io_tlb_transient_used_get()
1684 struct io_tlb_mem *mem = data; in io_tlb_used_get() local
1686 *val = mem_used(mem); in io_tlb_used_get()
1692 struct io_tlb_mem *mem = data; in io_tlb_hiwater_get() local
1694 *val = atomic_long_read(&mem->used_hiwater); in io_tlb_hiwater_get()
1700 struct io_tlb_mem *mem = data; in io_tlb_hiwater_set() local
1704 return -EINVAL; in io_tlb_hiwater_set()
1706 atomic_long_set(&mem->used_hiwater, val); in io_tlb_hiwater_set()
1714 static void swiotlb_create_debugfs_files(struct io_tlb_mem *mem, in swiotlb_create_debugfs_files() argument
1717 mem->debugfs = debugfs_create_dir(dirname, io_tlb_default_mem.debugfs); in swiotlb_create_debugfs_files()
1718 if (!mem->nslabs) in swiotlb_create_debugfs_files()
1721 debugfs_create_ulong("io_tlb_nslabs", 0400, mem->debugfs, &mem->nslabs); in swiotlb_create_debugfs_files()
1722 debugfs_create_file("io_tlb_used", 0400, mem->debugfs, mem, in swiotlb_create_debugfs_files()
1724 debugfs_create_file("io_tlb_used_hiwater", 0600, mem->debugfs, mem, in swiotlb_create_debugfs_files()
1727 debugfs_create_file("io_tlb_transient_nslabs", 0400, mem->debugfs, in swiotlb_create_debugfs_files()
1728 mem, &fops_io_tlb_transient_used); in swiotlb_create_debugfs_files()
1742 static inline void swiotlb_create_debugfs_files(struct io_tlb_mem *mem, in swiotlb_create_debugfs_files() argument
1753 struct io_tlb_mem *mem = dev->dma_io_tlb_mem; in swiotlb_alloc() local
1759 if (!mem) in swiotlb_alloc()
1762 align = (1 << (get_order(size) + PAGE_SHIFT)) - 1; in swiotlb_alloc()
1764 if (index == -1) in swiotlb_alloc()
1767 tlb_addr = slot_addr(pool->start, index); in swiotlb_alloc()
1769 dev_WARN_ONCE(dev, 1, "Cannot allocate pages from non page-aligned swiotlb addr 0x%pa.\n", in swiotlb_alloc()
1795 struct io_tlb_mem *mem = rmem->priv; in rmem_swiotlb_device_init() local
1796 unsigned long nslabs = rmem->size >> IO_TLB_SHIFT; in rmem_swiotlb_device_init()
1798 /* Set Per-device io tlb area to one */ in rmem_swiotlb_device_init()
1801 if (PageHighMem(pfn_to_page(PHYS_PFN(rmem->base)))) { in rmem_swiotlb_device_init()
1802 dev_err(dev, "Restricted DMA pool must be accessible within the linear mapping."); in rmem_swiotlb_device_init()
1803 return -EINVAL; in rmem_swiotlb_device_init()
1811 if (!mem) { in rmem_swiotlb_device_init()
1814 mem = kzalloc(sizeof(*mem), GFP_KERNEL); in rmem_swiotlb_device_init()
1815 if (!mem) in rmem_swiotlb_device_init()
1816 return -ENOMEM; in rmem_swiotlb_device_init()
1817 pool = &mem->defpool; in rmem_swiotlb_device_init()
1819 pool->slots = kcalloc(nslabs, sizeof(*pool->slots), GFP_KERNEL); in rmem_swiotlb_device_init()
1820 if (!pool->slots) { in rmem_swiotlb_device_init()
1821 kfree(mem); in rmem_swiotlb_device_init()
1822 return -ENOMEM; in rmem_swiotlb_device_init()
1825 pool->areas = kcalloc(nareas, sizeof(*pool->areas), in rmem_swiotlb_device_init()
1827 if (!pool->areas) { in rmem_swiotlb_device_init()
1828 kfree(pool->slots); in rmem_swiotlb_device_init()
1829 kfree(mem); in rmem_swiotlb_device_init()
1830 return -ENOMEM; in rmem_swiotlb_device_init()
1833 set_memory_decrypted((unsigned long)phys_to_virt(rmem->base), in rmem_swiotlb_device_init()
1834 rmem->size >> PAGE_SHIFT); in rmem_swiotlb_device_init()
1835 swiotlb_init_io_tlb_pool(pool, rmem->base, nslabs, in rmem_swiotlb_device_init()
1837 mem->force_bounce = true; in rmem_swiotlb_device_init()
1838 mem->for_alloc = true; in rmem_swiotlb_device_init()
1840 spin_lock_init(&mem->lock); in rmem_swiotlb_device_init()
1841 INIT_LIST_HEAD_RCU(&mem->pools); in rmem_swiotlb_device_init()
1843 add_mem_pool(mem, pool); in rmem_swiotlb_device_init()
1845 rmem->priv = mem; in rmem_swiotlb_device_init()
1847 swiotlb_create_debugfs_files(mem, rmem->name); in rmem_swiotlb_device_init()
1850 dev->dma_io_tlb_mem = mem; in rmem_swiotlb_device_init()
1858 dev->dma_io_tlb_mem = &io_tlb_default_mem; in rmem_swiotlb_device_release()
1868 unsigned long node = rmem->fdt_node; in rmem_swiotlb_setup()
1871 of_get_flat_dt_prop(node, "linux,cma-default", NULL) || in rmem_swiotlb_setup()
1872 of_get_flat_dt_prop(node, "linux,dma-default", NULL) || in rmem_swiotlb_setup()
1873 of_get_flat_dt_prop(node, "no-map", NULL)) in rmem_swiotlb_setup()
1874 return -EINVAL; in rmem_swiotlb_setup()
1876 rmem->ops = &rmem_swiotlb_ops; in rmem_swiotlb_setup()
1877 pr_info("Reserved memory: created restricted DMA pool at %pa, size %ld MiB\n", in rmem_swiotlb_setup()
1878 &rmem->base, (unsigned long)rmem->size / SZ_1M); in rmem_swiotlb_setup()
1882 RESERVEDMEM_OF_DECLARE(dma, "restricted-dma-pool", rmem_swiotlb_setup);