Lines Matching refs:arena
64 struct pci_iommu_arena *arena; in iommu_arena_new_node() local
75 arena = memblock_alloc_or_panic(sizeof(*arena), SMP_CACHE_BYTES); in iommu_arena_new_node()
76 arena->ptes = memblock_alloc_or_panic(mem_size, align); in iommu_arena_new_node()
78 spin_lock_init(&arena->lock); in iommu_arena_new_node()
79 arena->hose = hose; in iommu_arena_new_node()
80 arena->dma_base = base; in iommu_arena_new_node()
81 arena->size = window_size; in iommu_arena_new_node()
82 arena->next_entry = 0; in iommu_arena_new_node()
86 arena->align_entry = 1; in iommu_arena_new_node()
88 return arena; in iommu_arena_new_node()
100 iommu_arena_find_pages(struct device *dev, struct pci_iommu_arena *arena, in iommu_arena_find_pages() argument
109 base = arena->dma_base >> PAGE_SHIFT; in iommu_arena_find_pages()
113 ptes = arena->ptes; in iommu_arena_find_pages()
114 nent = arena->size >> PAGE_SHIFT; in iommu_arena_find_pages()
115 p = ALIGN(arena->next_entry, mask + 1); in iommu_arena_find_pages()
139 alpha_mv.mv_pci_tbi(arena->hose, 0, -1); in iommu_arena_find_pages()
155 iommu_arena_alloc(struct device *dev, struct pci_iommu_arena *arena, long n, in iommu_arena_alloc() argument
162 spin_lock_irqsave(&arena->lock, flags); in iommu_arena_alloc()
165 ptes = arena->ptes; in iommu_arena_alloc()
166 mask = max(align, arena->align_entry) - 1; in iommu_arena_alloc()
167 p = iommu_arena_find_pages(dev, arena, n, mask); in iommu_arena_alloc()
169 spin_unlock_irqrestore(&arena->lock, flags); in iommu_arena_alloc()
180 arena->next_entry = p + n; in iommu_arena_alloc()
181 spin_unlock_irqrestore(&arena->lock, flags); in iommu_arena_alloc()
187 iommu_arena_free(struct pci_iommu_arena *arena, long ofs, long n) in iommu_arena_free() argument
192 p = arena->ptes + ofs; in iommu_arena_free()
232 struct pci_iommu_arena *arena; in pci_map_single_1() local
272 arena = hose->sg_pci; in pci_map_single_1()
273 if (!arena || arena->dma_base + arena->size - 1 > max_dma) in pci_map_single_1()
274 arena = hose->sg_isa; in pci_map_single_1()
281 dma_ofs = iommu_arena_alloc(dev, arena, npages, align); in pci_map_single_1()
290 arena->ptes[i + dma_ofs] = mk_iommu_pte(paddr); in pci_map_single_1()
292 ret = arena->dma_base + dma_ofs * PAGE_SIZE; in pci_map_single_1()
353 struct pci_iommu_arena *arena; in alpha_pci_unmap_page() local
374 arena = hose->sg_pci; in alpha_pci_unmap_page()
375 if (!arena || dma_addr < arena->dma_base) in alpha_pci_unmap_page()
376 arena = hose->sg_isa; in alpha_pci_unmap_page()
378 dma_ofs = (dma_addr - arena->dma_base) >> PAGE_SHIFT; in alpha_pci_unmap_page()
379 if (dma_ofs * PAGE_SIZE >= arena->size) { in alpha_pci_unmap_page()
382 dma_addr, arena->dma_base, arena->size); in alpha_pci_unmap_page()
389 spin_lock_irqsave(&arena->lock, flags); in alpha_pci_unmap_page()
391 iommu_arena_free(arena, dma_ofs, npages); in alpha_pci_unmap_page()
396 if (dma_ofs >= arena->next_entry) in alpha_pci_unmap_page()
399 spin_unlock_irqrestore(&arena->lock, flags); in alpha_pci_unmap_page()
531 struct scatterlist *out, struct pci_iommu_arena *arena, in sg_fill() argument
572 dma_ofs = iommu_arena_alloc(dev, arena, npages, 0); in sg_fill()
581 return sg_fill(dev, leader, end, out, arena, max_dma, dac_allowed); in sg_fill()
584 out->dma_address = arena->dma_base + dma_ofs*PAGE_SIZE + paddr; in sg_fill()
592 ptes = &arena->ptes[dma_ofs]; in sg_fill()
635 struct pci_iommu_arena *arena; in alpha_pci_map_sg() local
664 arena = hose->sg_pci; in alpha_pci_map_sg()
665 if (!arena || arena->dma_base + arena->size - 1 > max_dma) in alpha_pci_map_sg()
666 arena = hose->sg_isa; in alpha_pci_map_sg()
669 arena = NULL; in alpha_pci_map_sg()
678 if (sg_fill(dev, sg, end, out, arena, max_dma, dac_allowed) < 0) in alpha_pci_map_sg()
717 struct pci_iommu_arena *arena; in alpha_pci_unmap_sg() local
729 arena = hose->sg_pci; in alpha_pci_unmap_sg()
730 if (!arena || arena->dma_base + arena->size - 1 > max_dma) in alpha_pci_unmap_sg()
731 arena = hose->sg_isa; in alpha_pci_unmap_sg()
735 spin_lock_irqsave(&arena->lock, flags); in alpha_pci_unmap_sg()
767 ofs = (addr - arena->dma_base) >> PAGE_SHIFT; in alpha_pci_unmap_sg()
768 iommu_arena_free(arena, ofs, npages); in alpha_pci_unmap_sg()
778 if ((fend - arena->dma_base) >> PAGE_SHIFT >= arena->next_entry) in alpha_pci_unmap_sg()
781 spin_unlock_irqrestore(&arena->lock, flags); in alpha_pci_unmap_sg()
793 struct pci_iommu_arena *arena; in alpha_pci_supported() local
805 arena = hose->sg_isa; in alpha_pci_supported()
806 if (arena && arena->dma_base + arena->size - 1 <= mask) in alpha_pci_supported()
808 arena = hose->sg_pci; in alpha_pci_supported()
809 if (arena && arena->dma_base + arena->size - 1 <= mask) in alpha_pci_supported()
824 iommu_reserve(struct pci_iommu_arena *arena, long pg_count, long align_mask) in iommu_reserve() argument
830 if (!arena) return -EINVAL; in iommu_reserve()
832 spin_lock_irqsave(&arena->lock, flags); in iommu_reserve()
835 ptes = arena->ptes; in iommu_reserve()
836 p = iommu_arena_find_pages(NULL, arena, pg_count, align_mask); in iommu_reserve()
838 spin_unlock_irqrestore(&arena->lock, flags); in iommu_reserve()
848 arena->next_entry = p + pg_count; in iommu_reserve()
849 spin_unlock_irqrestore(&arena->lock, flags); in iommu_reserve()
855 iommu_release(struct pci_iommu_arena *arena, long pg_start, long pg_count) in iommu_release() argument
860 if (!arena) return -EINVAL; in iommu_release()
862 ptes = arena->ptes; in iommu_release()
869 iommu_arena_free(arena, pg_start, pg_count); in iommu_release()
874 iommu_bind(struct pci_iommu_arena *arena, long pg_start, long pg_count, in iommu_bind() argument
881 if (!arena) return -EINVAL; in iommu_bind()
883 spin_lock_irqsave(&arena->lock, flags); in iommu_bind()
885 ptes = arena->ptes; in iommu_bind()
889 spin_unlock_irqrestore(&arena->lock, flags); in iommu_bind()
897 spin_unlock_irqrestore(&arena->lock, flags); in iommu_bind()
903 iommu_unbind(struct pci_iommu_arena *arena, long pg_start, long pg_count) in iommu_unbind() argument
908 if (!arena) return -EINVAL; in iommu_unbind()
910 p = arena->ptes + pg_start; in iommu_unbind()