10920654fSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 2fffcda11SJoerg Roedel /* 3fffcda11SJoerg Roedel * Dynamic DMA mapping support for AMD Hammer. 4fffcda11SJoerg Roedel * 5fffcda11SJoerg Roedel * Use the integrated AGP GART in the Hammer northbridge as an IOMMU for PCI. 6fffcda11SJoerg Roedel * This allows to use PCI devices that only support 32bit addresses on systems 7fffcda11SJoerg Roedel * with more than 4GB. 8fffcda11SJoerg Roedel * 9395cf969SPaul Bolle * See Documentation/DMA-API-HOWTO.txt for the interface specification. 10fffcda11SJoerg Roedel * 11fffcda11SJoerg Roedel * Copyright 2002 Andi Kleen, SuSE Labs. 12fffcda11SJoerg Roedel */ 13fffcda11SJoerg Roedel 14fffcda11SJoerg Roedel #include <linux/types.h> 15fffcda11SJoerg Roedel #include <linux/ctype.h> 16fffcda11SJoerg Roedel #include <linux/agp_backend.h> 17fffcda11SJoerg Roedel #include <linux/init.h> 18fffcda11SJoerg Roedel #include <linux/mm.h> 19fffcda11SJoerg Roedel #include <linux/sched.h> 20b17b0153SIngo Molnar #include <linux/sched/debug.h> 21fffcda11SJoerg Roedel #include <linux/string.h> 22fffcda11SJoerg Roedel #include <linux/spinlock.h> 23fffcda11SJoerg Roedel #include <linux/pci.h> 24fffcda11SJoerg Roedel #include <linux/topology.h> 25fffcda11SJoerg Roedel #include <linux/interrupt.h> 26fffcda11SJoerg Roedel #include <linux/bitmap.h> 27fffcda11SJoerg Roedel #include <linux/kdebug.h> 28fffcda11SJoerg Roedel #include <linux/scatterlist.h> 29fffcda11SJoerg Roedel #include <linux/iommu-helper.h> 30fffcda11SJoerg Roedel #include <linux/syscore_ops.h> 31fffcda11SJoerg Roedel #include <linux/io.h> 32fffcda11SJoerg Roedel #include <linux/gfp.h> 3360063497SArun Sharma #include <linux/atomic.h> 34ea8c64acSChristoph Hellwig #include <linux/dma-direct.h> 35fffcda11SJoerg Roedel #include <asm/mtrr.h> 36fffcda11SJoerg Roedel #include <asm/pgtable.h> 37fffcda11SJoerg Roedel #include <asm/proto.h> 38fffcda11SJoerg Roedel #include <asm/iommu.h> 39fffcda11SJoerg Roedel #include <asm/gart.h> 40d1163651SLaura Abbott #include <asm/set_memory.h> 41fffcda11SJoerg Roedel #include <asm/swiotlb.h> 42fffcda11SJoerg Roedel #include <asm/dma.h> 43fffcda11SJoerg Roedel #include <asm/amd_nb.h> 44fffcda11SJoerg Roedel #include <asm/x86_init.h> 45fffcda11SJoerg Roedel #include <asm/iommu_table.h> 46fffcda11SJoerg Roedel 47fffcda11SJoerg Roedel static unsigned long iommu_bus_base; /* GART remapping area (physical) */ 48fffcda11SJoerg Roedel static unsigned long iommu_size; /* size of remapping area bytes */ 49fffcda11SJoerg Roedel static unsigned long iommu_pages; /* .. and in pages */ 50fffcda11SJoerg Roedel 51fffcda11SJoerg Roedel static u32 *iommu_gatt_base; /* Remapping table */ 52fffcda11SJoerg Roedel 53fffcda11SJoerg Roedel /* 54fffcda11SJoerg Roedel * If this is disabled the IOMMU will use an optimized flushing strategy 55fffcda11SJoerg Roedel * of only flushing when an mapping is reused. With it true the GART is 56fffcda11SJoerg Roedel * flushed for every mapping. Problem is that doing the lazy flush seems 57fffcda11SJoerg Roedel * to trigger bugs with some popular PCI cards, in particular 3ware (but 58fffcda11SJoerg Roedel * has been also also seen with Qlogic at least). 59fffcda11SJoerg Roedel */ 60fffcda11SJoerg Roedel static int iommu_fullflush = 1; 61fffcda11SJoerg Roedel 62fffcda11SJoerg Roedel /* Allocation bitmap for the remapping area: */ 63fffcda11SJoerg Roedel static DEFINE_SPINLOCK(iommu_bitmap_lock); 64fffcda11SJoerg Roedel /* Guarded by iommu_bitmap_lock: */ 65fffcda11SJoerg Roedel static unsigned long *iommu_gart_bitmap; 66fffcda11SJoerg Roedel 67fffcda11SJoerg Roedel static u32 gart_unmapped_entry; 68fffcda11SJoerg Roedel 69fffcda11SJoerg Roedel #define GPTE_VALID 1 70fffcda11SJoerg Roedel #define GPTE_COHERENT 2 71fffcda11SJoerg Roedel #define GPTE_ENCODE(x) \ 72fffcda11SJoerg Roedel (((x) & 0xfffff000) | (((x) >> 32) << 4) | GPTE_VALID | GPTE_COHERENT) 73fffcda11SJoerg Roedel #define GPTE_DECODE(x) (((x) & 0xfffff000) | (((u64)(x) & 0xff0) << 28)) 74fffcda11SJoerg Roedel 75fffcda11SJoerg Roedel #ifdef CONFIG_AGP 76fffcda11SJoerg Roedel #define AGPEXTERN extern 77fffcda11SJoerg Roedel #else 78fffcda11SJoerg Roedel #define AGPEXTERN 79fffcda11SJoerg Roedel #endif 80fffcda11SJoerg Roedel 81fffcda11SJoerg Roedel /* GART can only remap to physical addresses < 1TB */ 82fffcda11SJoerg Roedel #define GART_MAX_PHYS_ADDR (1ULL << 40) 83fffcda11SJoerg Roedel 84fffcda11SJoerg Roedel /* backdoor interface to AGP driver */ 85fffcda11SJoerg Roedel AGPEXTERN int agp_memory_reserved; 86fffcda11SJoerg Roedel AGPEXTERN __u32 *agp_gatt_table; 87fffcda11SJoerg Roedel 88fffcda11SJoerg Roedel static unsigned long next_bit; /* protected by iommu_bitmap_lock */ 89fffcda11SJoerg Roedel static bool need_flush; /* global flush state. set for each gart wrap */ 90fffcda11SJoerg Roedel 91fffcda11SJoerg Roedel static unsigned long alloc_iommu(struct device *dev, int size, 92fffcda11SJoerg Roedel unsigned long align_mask) 93fffcda11SJoerg Roedel { 94fffcda11SJoerg Roedel unsigned long offset, flags; 95fffcda11SJoerg Roedel unsigned long boundary_size; 96fffcda11SJoerg Roedel unsigned long base_index; 97fffcda11SJoerg Roedel 98fffcda11SJoerg Roedel base_index = ALIGN(iommu_bus_base & dma_get_seg_boundary(dev), 99fffcda11SJoerg Roedel PAGE_SIZE) >> PAGE_SHIFT; 100fffcda11SJoerg Roedel boundary_size = ALIGN((u64)dma_get_seg_boundary(dev) + 1, 101fffcda11SJoerg Roedel PAGE_SIZE) >> PAGE_SHIFT; 102fffcda11SJoerg Roedel 103fffcda11SJoerg Roedel spin_lock_irqsave(&iommu_bitmap_lock, flags); 104fffcda11SJoerg Roedel offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, next_bit, 105fffcda11SJoerg Roedel size, base_index, boundary_size, align_mask); 106fffcda11SJoerg Roedel if (offset == -1) { 107fffcda11SJoerg Roedel need_flush = true; 108fffcda11SJoerg Roedel offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, 0, 109fffcda11SJoerg Roedel size, base_index, boundary_size, 110fffcda11SJoerg Roedel align_mask); 111fffcda11SJoerg Roedel } 112fffcda11SJoerg Roedel if (offset != -1) { 113fffcda11SJoerg Roedel next_bit = offset+size; 114fffcda11SJoerg Roedel if (next_bit >= iommu_pages) { 115fffcda11SJoerg Roedel next_bit = 0; 116fffcda11SJoerg Roedel need_flush = true; 117fffcda11SJoerg Roedel } 118fffcda11SJoerg Roedel } 119fffcda11SJoerg Roedel if (iommu_fullflush) 120fffcda11SJoerg Roedel need_flush = true; 121fffcda11SJoerg Roedel spin_unlock_irqrestore(&iommu_bitmap_lock, flags); 122fffcda11SJoerg Roedel 123fffcda11SJoerg Roedel return offset; 124fffcda11SJoerg Roedel } 125fffcda11SJoerg Roedel 126fffcda11SJoerg Roedel static void free_iommu(unsigned long offset, int size) 127fffcda11SJoerg Roedel { 128fffcda11SJoerg Roedel unsigned long flags; 129fffcda11SJoerg Roedel 130fffcda11SJoerg Roedel spin_lock_irqsave(&iommu_bitmap_lock, flags); 131fffcda11SJoerg Roedel bitmap_clear(iommu_gart_bitmap, offset, size); 132fffcda11SJoerg Roedel if (offset >= next_bit) 133fffcda11SJoerg Roedel next_bit = offset + size; 134fffcda11SJoerg Roedel spin_unlock_irqrestore(&iommu_bitmap_lock, flags); 135fffcda11SJoerg Roedel } 136fffcda11SJoerg Roedel 137fffcda11SJoerg Roedel /* 138fffcda11SJoerg Roedel * Use global flush state to avoid races with multiple flushers. 139fffcda11SJoerg Roedel */ 140fffcda11SJoerg Roedel static void flush_gart(void) 141fffcda11SJoerg Roedel { 142fffcda11SJoerg Roedel unsigned long flags; 143fffcda11SJoerg Roedel 144fffcda11SJoerg Roedel spin_lock_irqsave(&iommu_bitmap_lock, flags); 145fffcda11SJoerg Roedel if (need_flush) { 146fffcda11SJoerg Roedel amd_flush_garts(); 147fffcda11SJoerg Roedel need_flush = false; 148fffcda11SJoerg Roedel } 149fffcda11SJoerg Roedel spin_unlock_irqrestore(&iommu_bitmap_lock, flags); 150fffcda11SJoerg Roedel } 151fffcda11SJoerg Roedel 152fffcda11SJoerg Roedel #ifdef CONFIG_IOMMU_LEAK 153fffcda11SJoerg Roedel /* Debugging aid for drivers that don't free their IOMMU tables */ 154fffcda11SJoerg Roedel static void dump_leak(void) 155fffcda11SJoerg Roedel { 156fffcda11SJoerg Roedel static int dump; 157fffcda11SJoerg Roedel 158fffcda11SJoerg Roedel if (dump) 159fffcda11SJoerg Roedel return; 160fffcda11SJoerg Roedel dump = 1; 161fffcda11SJoerg Roedel 162*9cb8f069SDmitry Safonov show_stack(NULL, NULL, KERN_ERR); 163fffcda11SJoerg Roedel debug_dma_dump_mappings(NULL); 164fffcda11SJoerg Roedel } 165fffcda11SJoerg Roedel #endif 166fffcda11SJoerg Roedel 167fffcda11SJoerg Roedel static void iommu_full(struct device *dev, size_t size, int dir) 168fffcda11SJoerg Roedel { 169fffcda11SJoerg Roedel /* 170fffcda11SJoerg Roedel * Ran out of IOMMU space for this operation. This is very bad. 171fffcda11SJoerg Roedel * Unfortunately the drivers cannot handle this operation properly. 172fffcda11SJoerg Roedel * Return some non mapped prereserved space in the aperture and 173fffcda11SJoerg Roedel * let the Northbridge deal with it. This will result in garbage 174fffcda11SJoerg Roedel * in the IO operation. When the size exceeds the prereserved space 175fffcda11SJoerg Roedel * memory corruption will occur or random memory will be DMAed 176fffcda11SJoerg Roedel * out. Hopefully no network devices use single mappings that big. 177fffcda11SJoerg Roedel */ 178fffcda11SJoerg Roedel 179fffcda11SJoerg Roedel dev_err(dev, "PCI-DMA: Out of IOMMU space for %lu bytes\n", size); 180fffcda11SJoerg Roedel #ifdef CONFIG_IOMMU_LEAK 181fffcda11SJoerg Roedel dump_leak(); 182fffcda11SJoerg Roedel #endif 183fffcda11SJoerg Roedel } 184fffcda11SJoerg Roedel 185fffcda11SJoerg Roedel static inline int 186fffcda11SJoerg Roedel need_iommu(struct device *dev, unsigned long addr, size_t size) 187fffcda11SJoerg Roedel { 18868a33b17SChristoph Hellwig return force_iommu || !dma_capable(dev, addr, size, true); 189fffcda11SJoerg Roedel } 190fffcda11SJoerg Roedel 191fffcda11SJoerg Roedel static inline int 192fffcda11SJoerg Roedel nonforced_iommu(struct device *dev, unsigned long addr, size_t size) 193fffcda11SJoerg Roedel { 19468a33b17SChristoph Hellwig return !dma_capable(dev, addr, size, true); 195fffcda11SJoerg Roedel } 196fffcda11SJoerg Roedel 197fffcda11SJoerg Roedel /* Map a single continuous physical area into the IOMMU. 198fffcda11SJoerg Roedel * Caller needs to check if the iommu is needed and flush. 199fffcda11SJoerg Roedel */ 200fffcda11SJoerg Roedel static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem, 201fffcda11SJoerg Roedel size_t size, int dir, unsigned long align_mask) 202fffcda11SJoerg Roedel { 203fffcda11SJoerg Roedel unsigned long npages = iommu_num_pages(phys_mem, size, PAGE_SIZE); 204fffcda11SJoerg Roedel unsigned long iommu_page; 205fffcda11SJoerg Roedel int i; 206fffcda11SJoerg Roedel 207fffcda11SJoerg Roedel if (unlikely(phys_mem + size > GART_MAX_PHYS_ADDR)) 2089e8aa6b5SChristoph Hellwig return DMA_MAPPING_ERROR; 209fffcda11SJoerg Roedel 210fffcda11SJoerg Roedel iommu_page = alloc_iommu(dev, npages, align_mask); 211fffcda11SJoerg Roedel if (iommu_page == -1) { 212fffcda11SJoerg Roedel if (!nonforced_iommu(dev, phys_mem, size)) 213fffcda11SJoerg Roedel return phys_mem; 214fffcda11SJoerg Roedel if (panic_on_overflow) 215fffcda11SJoerg Roedel panic("dma_map_area overflow %lu bytes\n", size); 216fffcda11SJoerg Roedel iommu_full(dev, size, dir); 2179e8aa6b5SChristoph Hellwig return DMA_MAPPING_ERROR; 218fffcda11SJoerg Roedel } 219fffcda11SJoerg Roedel 220fffcda11SJoerg Roedel for (i = 0; i < npages; i++) { 221fffcda11SJoerg Roedel iommu_gatt_base[iommu_page + i] = GPTE_ENCODE(phys_mem); 222fffcda11SJoerg Roedel phys_mem += PAGE_SIZE; 223fffcda11SJoerg Roedel } 224fffcda11SJoerg Roedel return iommu_bus_base + iommu_page*PAGE_SIZE + (phys_mem & ~PAGE_MASK); 225fffcda11SJoerg Roedel } 226fffcda11SJoerg Roedel 227fffcda11SJoerg Roedel /* Map a single area into the IOMMU */ 228fffcda11SJoerg Roedel static dma_addr_t gart_map_page(struct device *dev, struct page *page, 229fffcda11SJoerg Roedel unsigned long offset, size_t size, 230fffcda11SJoerg Roedel enum dma_data_direction dir, 23100085f1eSKrzysztof Kozlowski unsigned long attrs) 232fffcda11SJoerg Roedel { 233fffcda11SJoerg Roedel unsigned long bus; 234fffcda11SJoerg Roedel phys_addr_t paddr = page_to_phys(page) + offset; 235fffcda11SJoerg Roedel 236fffcda11SJoerg Roedel if (!need_iommu(dev, paddr, size)) 237fffcda11SJoerg Roedel return paddr; 238fffcda11SJoerg Roedel 239fffcda11SJoerg Roedel bus = dma_map_area(dev, paddr, size, dir, 0); 240fffcda11SJoerg Roedel flush_gart(); 241fffcda11SJoerg Roedel 242fffcda11SJoerg Roedel return bus; 243fffcda11SJoerg Roedel } 244fffcda11SJoerg Roedel 245fffcda11SJoerg Roedel /* 246fffcda11SJoerg Roedel * Free a DMA mapping. 247fffcda11SJoerg Roedel */ 248fffcda11SJoerg Roedel static void gart_unmap_page(struct device *dev, dma_addr_t dma_addr, 249fffcda11SJoerg Roedel size_t size, enum dma_data_direction dir, 25000085f1eSKrzysztof Kozlowski unsigned long attrs) 251fffcda11SJoerg Roedel { 252fffcda11SJoerg Roedel unsigned long iommu_page; 253fffcda11SJoerg Roedel int npages; 254fffcda11SJoerg Roedel int i; 255fffcda11SJoerg Roedel 25606f55fd2SChristoph Hellwig if (WARN_ON_ONCE(dma_addr == DMA_MAPPING_ERROR)) 25706f55fd2SChristoph Hellwig return; 25806f55fd2SChristoph Hellwig 25906f55fd2SChristoph Hellwig /* 26006f55fd2SChristoph Hellwig * This driver will not always use a GART mapping, but might have 26106f55fd2SChristoph Hellwig * created a direct mapping instead. If that is the case there is 26206f55fd2SChristoph Hellwig * nothing to unmap here. 26306f55fd2SChristoph Hellwig */ 26406f55fd2SChristoph Hellwig if (dma_addr < iommu_bus_base || 265fffcda11SJoerg Roedel dma_addr >= iommu_bus_base + iommu_size) 266fffcda11SJoerg Roedel return; 267fffcda11SJoerg Roedel 268fffcda11SJoerg Roedel iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT; 269fffcda11SJoerg Roedel npages = iommu_num_pages(dma_addr, size, PAGE_SIZE); 270fffcda11SJoerg Roedel for (i = 0; i < npages; i++) { 271fffcda11SJoerg Roedel iommu_gatt_base[iommu_page + i] = gart_unmapped_entry; 272fffcda11SJoerg Roedel } 273fffcda11SJoerg Roedel free_iommu(iommu_page, npages); 274fffcda11SJoerg Roedel } 275fffcda11SJoerg Roedel 276fffcda11SJoerg Roedel /* 277fffcda11SJoerg Roedel * Wrapper for pci_unmap_single working with scatterlists. 278fffcda11SJoerg Roedel */ 279fffcda11SJoerg Roedel static void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, 28000085f1eSKrzysztof Kozlowski enum dma_data_direction dir, unsigned long attrs) 281fffcda11SJoerg Roedel { 282fffcda11SJoerg Roedel struct scatterlist *s; 283fffcda11SJoerg Roedel int i; 284fffcda11SJoerg Roedel 285fffcda11SJoerg Roedel for_each_sg(sg, s, nents, i) { 286fffcda11SJoerg Roedel if (!s->dma_length || !s->length) 287fffcda11SJoerg Roedel break; 28800085f1eSKrzysztof Kozlowski gart_unmap_page(dev, s->dma_address, s->dma_length, dir, 0); 289fffcda11SJoerg Roedel } 290fffcda11SJoerg Roedel } 291fffcda11SJoerg Roedel 292fffcda11SJoerg Roedel /* Fallback for dma_map_sg in case of overflow */ 293fffcda11SJoerg Roedel static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg, 294fffcda11SJoerg Roedel int nents, int dir) 295fffcda11SJoerg Roedel { 296fffcda11SJoerg Roedel struct scatterlist *s; 297fffcda11SJoerg Roedel int i; 298fffcda11SJoerg Roedel 299fffcda11SJoerg Roedel #ifdef CONFIG_IOMMU_DEBUG 300fffcda11SJoerg Roedel pr_debug("dma_map_sg overflow\n"); 301fffcda11SJoerg Roedel #endif 302fffcda11SJoerg Roedel 303fffcda11SJoerg Roedel for_each_sg(sg, s, nents, i) { 304fffcda11SJoerg Roedel unsigned long addr = sg_phys(s); 305fffcda11SJoerg Roedel 306fffcda11SJoerg Roedel if (nonforced_iommu(dev, addr, s->length)) { 307fffcda11SJoerg Roedel addr = dma_map_area(dev, addr, s->length, dir, 0); 3089e8aa6b5SChristoph Hellwig if (addr == DMA_MAPPING_ERROR) { 309fffcda11SJoerg Roedel if (i > 0) 31000085f1eSKrzysztof Kozlowski gart_unmap_sg(dev, sg, i, dir, 0); 311fffcda11SJoerg Roedel nents = 0; 312fffcda11SJoerg Roedel sg[0].dma_length = 0; 313fffcda11SJoerg Roedel break; 314fffcda11SJoerg Roedel } 315fffcda11SJoerg Roedel } 316fffcda11SJoerg Roedel s->dma_address = addr; 317fffcda11SJoerg Roedel s->dma_length = s->length; 318fffcda11SJoerg Roedel } 319fffcda11SJoerg Roedel flush_gart(); 320fffcda11SJoerg Roedel 321fffcda11SJoerg Roedel return nents; 322fffcda11SJoerg Roedel } 323fffcda11SJoerg Roedel 324fffcda11SJoerg Roedel /* Map multiple scatterlist entries continuous into the first. */ 325fffcda11SJoerg Roedel static int __dma_map_cont(struct device *dev, struct scatterlist *start, 326fffcda11SJoerg Roedel int nelems, struct scatterlist *sout, 327fffcda11SJoerg Roedel unsigned long pages) 328fffcda11SJoerg Roedel { 329fffcda11SJoerg Roedel unsigned long iommu_start = alloc_iommu(dev, pages, 0); 330fffcda11SJoerg Roedel unsigned long iommu_page = iommu_start; 331fffcda11SJoerg Roedel struct scatterlist *s; 332fffcda11SJoerg Roedel int i; 333fffcda11SJoerg Roedel 334fffcda11SJoerg Roedel if (iommu_start == -1) 335fffcda11SJoerg Roedel return -1; 336fffcda11SJoerg Roedel 337fffcda11SJoerg Roedel for_each_sg(start, s, nelems, i) { 338fffcda11SJoerg Roedel unsigned long pages, addr; 339fffcda11SJoerg Roedel unsigned long phys_addr = s->dma_address; 340fffcda11SJoerg Roedel 341fffcda11SJoerg Roedel BUG_ON(s != start && s->offset); 342fffcda11SJoerg Roedel if (s == start) { 343fffcda11SJoerg Roedel sout->dma_address = iommu_bus_base; 344fffcda11SJoerg Roedel sout->dma_address += iommu_page*PAGE_SIZE + s->offset; 345fffcda11SJoerg Roedel sout->dma_length = s->length; 346fffcda11SJoerg Roedel } else { 347fffcda11SJoerg Roedel sout->dma_length += s->length; 348fffcda11SJoerg Roedel } 349fffcda11SJoerg Roedel 350fffcda11SJoerg Roedel addr = phys_addr; 351fffcda11SJoerg Roedel pages = iommu_num_pages(s->offset, s->length, PAGE_SIZE); 352fffcda11SJoerg Roedel while (pages--) { 353fffcda11SJoerg Roedel iommu_gatt_base[iommu_page] = GPTE_ENCODE(addr); 354fffcda11SJoerg Roedel addr += PAGE_SIZE; 355fffcda11SJoerg Roedel iommu_page++; 356fffcda11SJoerg Roedel } 357fffcda11SJoerg Roedel } 358fffcda11SJoerg Roedel BUG_ON(iommu_page - iommu_start != pages); 359fffcda11SJoerg Roedel 360fffcda11SJoerg Roedel return 0; 361fffcda11SJoerg Roedel } 362fffcda11SJoerg Roedel 363fffcda11SJoerg Roedel static inline int 364fffcda11SJoerg Roedel dma_map_cont(struct device *dev, struct scatterlist *start, int nelems, 365fffcda11SJoerg Roedel struct scatterlist *sout, unsigned long pages, int need) 366fffcda11SJoerg Roedel { 367fffcda11SJoerg Roedel if (!need) { 368fffcda11SJoerg Roedel BUG_ON(nelems != 1); 369fffcda11SJoerg Roedel sout->dma_address = start->dma_address; 370fffcda11SJoerg Roedel sout->dma_length = start->length; 371fffcda11SJoerg Roedel return 0; 372fffcda11SJoerg Roedel } 373fffcda11SJoerg Roedel return __dma_map_cont(dev, start, nelems, sout, pages); 374fffcda11SJoerg Roedel } 375fffcda11SJoerg Roedel 376fffcda11SJoerg Roedel /* 377fffcda11SJoerg Roedel * DMA map all entries in a scatterlist. 378fffcda11SJoerg Roedel * Merge chunks that have page aligned sizes into a continuous mapping. 379fffcda11SJoerg Roedel */ 380fffcda11SJoerg Roedel static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, 38100085f1eSKrzysztof Kozlowski enum dma_data_direction dir, unsigned long attrs) 382fffcda11SJoerg Roedel { 383fffcda11SJoerg Roedel struct scatterlist *s, *ps, *start_sg, *sgmap; 384fffcda11SJoerg Roedel int need = 0, nextneed, i, out, start; 385fffcda11SJoerg Roedel unsigned long pages = 0; 386fffcda11SJoerg Roedel unsigned int seg_size; 387fffcda11SJoerg Roedel unsigned int max_seg_size; 388fffcda11SJoerg Roedel 389fffcda11SJoerg Roedel if (nents == 0) 390fffcda11SJoerg Roedel return 0; 391fffcda11SJoerg Roedel 392fffcda11SJoerg Roedel out = 0; 393fffcda11SJoerg Roedel start = 0; 394fffcda11SJoerg Roedel start_sg = sg; 395fffcda11SJoerg Roedel sgmap = sg; 396fffcda11SJoerg Roedel seg_size = 0; 397fffcda11SJoerg Roedel max_seg_size = dma_get_max_seg_size(dev); 398fffcda11SJoerg Roedel ps = NULL; /* shut up gcc */ 399fffcda11SJoerg Roedel 400fffcda11SJoerg Roedel for_each_sg(sg, s, nents, i) { 401fffcda11SJoerg Roedel dma_addr_t addr = sg_phys(s); 402fffcda11SJoerg Roedel 403fffcda11SJoerg Roedel s->dma_address = addr; 404fffcda11SJoerg Roedel BUG_ON(s->length == 0); 405fffcda11SJoerg Roedel 406fffcda11SJoerg Roedel nextneed = need_iommu(dev, addr, s->length); 407fffcda11SJoerg Roedel 408fffcda11SJoerg Roedel /* Handle the previous not yet processed entries */ 409fffcda11SJoerg Roedel if (i > start) { 410fffcda11SJoerg Roedel /* 411fffcda11SJoerg Roedel * Can only merge when the last chunk ends on a 412fffcda11SJoerg Roedel * page boundary and the new one doesn't have an 413fffcda11SJoerg Roedel * offset. 414fffcda11SJoerg Roedel */ 415fffcda11SJoerg Roedel if (!iommu_merge || !nextneed || !need || s->offset || 416fffcda11SJoerg Roedel (s->length + seg_size > max_seg_size) || 417fffcda11SJoerg Roedel (ps->offset + ps->length) % PAGE_SIZE) { 418fffcda11SJoerg Roedel if (dma_map_cont(dev, start_sg, i - start, 419fffcda11SJoerg Roedel sgmap, pages, need) < 0) 420fffcda11SJoerg Roedel goto error; 421fffcda11SJoerg Roedel out++; 422fffcda11SJoerg Roedel 423fffcda11SJoerg Roedel seg_size = 0; 424fffcda11SJoerg Roedel sgmap = sg_next(sgmap); 425fffcda11SJoerg Roedel pages = 0; 426fffcda11SJoerg Roedel start = i; 427fffcda11SJoerg Roedel start_sg = s; 428fffcda11SJoerg Roedel } 429fffcda11SJoerg Roedel } 430fffcda11SJoerg Roedel 431fffcda11SJoerg Roedel seg_size += s->length; 432fffcda11SJoerg Roedel need = nextneed; 433fffcda11SJoerg Roedel pages += iommu_num_pages(s->offset, s->length, PAGE_SIZE); 434fffcda11SJoerg Roedel ps = s; 435fffcda11SJoerg Roedel } 436fffcda11SJoerg Roedel if (dma_map_cont(dev, start_sg, i - start, sgmap, pages, need) < 0) 437fffcda11SJoerg Roedel goto error; 438fffcda11SJoerg Roedel out++; 439fffcda11SJoerg Roedel flush_gart(); 440fffcda11SJoerg Roedel if (out < nents) { 441fffcda11SJoerg Roedel sgmap = sg_next(sgmap); 442fffcda11SJoerg Roedel sgmap->dma_length = 0; 443fffcda11SJoerg Roedel } 444fffcda11SJoerg Roedel return out; 445fffcda11SJoerg Roedel 446fffcda11SJoerg Roedel error: 447fffcda11SJoerg Roedel flush_gart(); 44800085f1eSKrzysztof Kozlowski gart_unmap_sg(dev, sg, out, dir, 0); 449fffcda11SJoerg Roedel 450fffcda11SJoerg Roedel /* When it was forced or merged try again in a dumb way */ 451fffcda11SJoerg Roedel if (force_iommu || iommu_merge) { 452fffcda11SJoerg Roedel out = dma_map_sg_nonforce(dev, sg, nents, dir); 453fffcda11SJoerg Roedel if (out > 0) 454fffcda11SJoerg Roedel return out; 455fffcda11SJoerg Roedel } 456fffcda11SJoerg Roedel if (panic_on_overflow) 457fffcda11SJoerg Roedel panic("dma_map_sg: overflow on %lu pages\n", pages); 458fffcda11SJoerg Roedel 459fffcda11SJoerg Roedel iommu_full(dev, pages << PAGE_SHIFT, dir); 460fffcda11SJoerg Roedel for_each_sg(sg, s, nents, i) 4619e8aa6b5SChristoph Hellwig s->dma_address = DMA_MAPPING_ERROR; 462fffcda11SJoerg Roedel return 0; 463fffcda11SJoerg Roedel } 464fffcda11SJoerg Roedel 465fffcda11SJoerg Roedel /* allocate and map a coherent mapping */ 466fffcda11SJoerg Roedel static void * 467fffcda11SJoerg Roedel gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr, 46800085f1eSKrzysztof Kozlowski gfp_t flag, unsigned long attrs) 469fffcda11SJoerg Roedel { 47051c7eebaSChristoph Hellwig void *vaddr; 471fffcda11SJoerg Roedel 472bc3ec75dSChristoph Hellwig vaddr = dma_direct_alloc_pages(dev, size, dma_addr, flag, attrs); 47351c7eebaSChristoph Hellwig if (!vaddr || 47451c7eebaSChristoph Hellwig !force_iommu || dev->coherent_dma_mask <= DMA_BIT_MASK(24)) 47551c7eebaSChristoph Hellwig return vaddr; 476fffcda11SJoerg Roedel 47751c7eebaSChristoph Hellwig *dma_addr = dma_map_area(dev, virt_to_phys(vaddr), size, 47851c7eebaSChristoph Hellwig DMA_BIDIRECTIONAL, (1UL << get_order(size)) - 1); 479fffcda11SJoerg Roedel flush_gart(); 4809e8aa6b5SChristoph Hellwig if (unlikely(*dma_addr == DMA_MAPPING_ERROR)) 48151c7eebaSChristoph Hellwig goto out_free; 48251c7eebaSChristoph Hellwig return vaddr; 48351c7eebaSChristoph Hellwig out_free: 484bc3ec75dSChristoph Hellwig dma_direct_free_pages(dev, size, vaddr, *dma_addr, attrs); 485fffcda11SJoerg Roedel return NULL; 486fffcda11SJoerg Roedel } 487fffcda11SJoerg Roedel 488fffcda11SJoerg Roedel /* free a coherent mapping */ 489fffcda11SJoerg Roedel static void 490fffcda11SJoerg Roedel gart_free_coherent(struct device *dev, size_t size, void *vaddr, 49100085f1eSKrzysztof Kozlowski dma_addr_t dma_addr, unsigned long attrs) 492fffcda11SJoerg Roedel { 49300085f1eSKrzysztof Kozlowski gart_unmap_page(dev, dma_addr, size, DMA_BIDIRECTIONAL, 0); 494bc3ec75dSChristoph Hellwig dma_direct_free_pages(dev, size, vaddr, dma_addr, attrs); 495fffcda11SJoerg Roedel } 496fffcda11SJoerg Roedel 497fffcda11SJoerg Roedel static int no_agp; 498fffcda11SJoerg Roedel 499fffcda11SJoerg Roedel static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size) 500fffcda11SJoerg Roedel { 501fffcda11SJoerg Roedel unsigned long a; 502fffcda11SJoerg Roedel 503fffcda11SJoerg Roedel if (!iommu_size) { 504fffcda11SJoerg Roedel iommu_size = aper_size; 505fffcda11SJoerg Roedel if (!no_agp) 506fffcda11SJoerg Roedel iommu_size /= 2; 507fffcda11SJoerg Roedel } 508fffcda11SJoerg Roedel 509fffcda11SJoerg Roedel a = aper + iommu_size; 510fffcda11SJoerg Roedel iommu_size -= round_up(a, PMD_PAGE_SIZE) - a; 511fffcda11SJoerg Roedel 512fffcda11SJoerg Roedel if (iommu_size < 64*1024*1024) { 5138d3bcc44SKefeng Wang pr_warn("PCI-DMA: Warning: Small IOMMU %luMB." 514fffcda11SJoerg Roedel " Consider increasing the AGP aperture in BIOS\n", 515fffcda11SJoerg Roedel iommu_size >> 20); 516fffcda11SJoerg Roedel } 517fffcda11SJoerg Roedel 518fffcda11SJoerg Roedel return iommu_size; 519fffcda11SJoerg Roedel } 520fffcda11SJoerg Roedel 521fffcda11SJoerg Roedel static __init unsigned read_aperture(struct pci_dev *dev, u32 *size) 522fffcda11SJoerg Roedel { 523fffcda11SJoerg Roedel unsigned aper_size = 0, aper_base_32, aper_order; 524fffcda11SJoerg Roedel u64 aper_base; 525fffcda11SJoerg Roedel 526fffcda11SJoerg Roedel pci_read_config_dword(dev, AMD64_GARTAPERTUREBASE, &aper_base_32); 527fffcda11SJoerg Roedel pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &aper_order); 528fffcda11SJoerg Roedel aper_order = (aper_order >> 1) & 7; 529fffcda11SJoerg Roedel 530fffcda11SJoerg Roedel aper_base = aper_base_32 & 0x7fff; 531fffcda11SJoerg Roedel aper_base <<= 25; 532fffcda11SJoerg Roedel 533fffcda11SJoerg Roedel aper_size = (32 * 1024 * 1024) << aper_order; 534fffcda11SJoerg Roedel if (aper_base + aper_size > 0x100000000UL || !aper_size) 535fffcda11SJoerg Roedel aper_base = 0; 536fffcda11SJoerg Roedel 537fffcda11SJoerg Roedel *size = aper_size; 538fffcda11SJoerg Roedel return aper_base; 539fffcda11SJoerg Roedel } 540fffcda11SJoerg Roedel 541fffcda11SJoerg Roedel static void enable_gart_translations(void) 542fffcda11SJoerg Roedel { 543fffcda11SJoerg Roedel int i; 544fffcda11SJoerg Roedel 545fffcda11SJoerg Roedel if (!amd_nb_has_feature(AMD_NB_GART)) 546fffcda11SJoerg Roedel return; 547fffcda11SJoerg Roedel 548fffcda11SJoerg Roedel for (i = 0; i < amd_nb_num(); i++) { 549fffcda11SJoerg Roedel struct pci_dev *dev = node_to_amd_nb(i)->misc; 550fffcda11SJoerg Roedel 551fffcda11SJoerg Roedel enable_gart_translation(dev, __pa(agp_gatt_table)); 552fffcda11SJoerg Roedel } 553fffcda11SJoerg Roedel 554fffcda11SJoerg Roedel /* Flush the GART-TLB to remove stale entries */ 555fffcda11SJoerg Roedel amd_flush_garts(); 556fffcda11SJoerg Roedel } 557fffcda11SJoerg Roedel 558fffcda11SJoerg Roedel /* 559fffcda11SJoerg Roedel * If fix_up_north_bridges is set, the north bridges have to be fixed up on 560fffcda11SJoerg Roedel * resume in the same way as they are handled in gart_iommu_hole_init(). 561fffcda11SJoerg Roedel */ 562fffcda11SJoerg Roedel static bool fix_up_north_bridges; 563fffcda11SJoerg Roedel static u32 aperture_order; 564fffcda11SJoerg Roedel static u32 aperture_alloc; 565fffcda11SJoerg Roedel 566fffcda11SJoerg Roedel void set_up_gart_resume(u32 aper_order, u32 aper_alloc) 567fffcda11SJoerg Roedel { 568fffcda11SJoerg Roedel fix_up_north_bridges = true; 569fffcda11SJoerg Roedel aperture_order = aper_order; 570fffcda11SJoerg Roedel aperture_alloc = aper_alloc; 571fffcda11SJoerg Roedel } 572fffcda11SJoerg Roedel 573fffcda11SJoerg Roedel static void gart_fixup_northbridges(void) 574fffcda11SJoerg Roedel { 575fffcda11SJoerg Roedel int i; 576fffcda11SJoerg Roedel 577fffcda11SJoerg Roedel if (!fix_up_north_bridges) 578fffcda11SJoerg Roedel return; 579fffcda11SJoerg Roedel 580fffcda11SJoerg Roedel if (!amd_nb_has_feature(AMD_NB_GART)) 581fffcda11SJoerg Roedel return; 582fffcda11SJoerg Roedel 583fffcda11SJoerg Roedel pr_info("PCI-DMA: Restoring GART aperture settings\n"); 584fffcda11SJoerg Roedel 585fffcda11SJoerg Roedel for (i = 0; i < amd_nb_num(); i++) { 586fffcda11SJoerg Roedel struct pci_dev *dev = node_to_amd_nb(i)->misc; 587fffcda11SJoerg Roedel 588fffcda11SJoerg Roedel /* 589fffcda11SJoerg Roedel * Don't enable translations just yet. That is the next 590fffcda11SJoerg Roedel * step. Restore the pre-suspend aperture settings. 591fffcda11SJoerg Roedel */ 592fffcda11SJoerg Roedel gart_set_size_and_enable(dev, aperture_order); 593fffcda11SJoerg Roedel pci_write_config_dword(dev, AMD64_GARTAPERTUREBASE, aperture_alloc >> 25); 594fffcda11SJoerg Roedel } 595fffcda11SJoerg Roedel } 596fffcda11SJoerg Roedel 597fffcda11SJoerg Roedel static void gart_resume(void) 598fffcda11SJoerg Roedel { 599fffcda11SJoerg Roedel pr_info("PCI-DMA: Resuming GART IOMMU\n"); 600fffcda11SJoerg Roedel 601fffcda11SJoerg Roedel gart_fixup_northbridges(); 602fffcda11SJoerg Roedel 603fffcda11SJoerg Roedel enable_gart_translations(); 604fffcda11SJoerg Roedel } 605fffcda11SJoerg Roedel 606fffcda11SJoerg Roedel static struct syscore_ops gart_syscore_ops = { 607fffcda11SJoerg Roedel .resume = gart_resume, 608fffcda11SJoerg Roedel 609fffcda11SJoerg Roedel }; 610fffcda11SJoerg Roedel 611fffcda11SJoerg Roedel /* 612fffcda11SJoerg Roedel * Private Northbridge GATT initialization in case we cannot use the 613fffcda11SJoerg Roedel * AGP driver for some reason. 614fffcda11SJoerg Roedel */ 615fffcda11SJoerg Roedel static __init int init_amd_gatt(struct agp_kern_info *info) 616fffcda11SJoerg Roedel { 617fffcda11SJoerg Roedel unsigned aper_size, gatt_size, new_aper_size; 618fffcda11SJoerg Roedel unsigned aper_base, new_aper_base; 619fffcda11SJoerg Roedel struct pci_dev *dev; 620fffcda11SJoerg Roedel void *gatt; 621fffcda11SJoerg Roedel int i; 622fffcda11SJoerg Roedel 623fffcda11SJoerg Roedel pr_info("PCI-DMA: Disabling AGP.\n"); 624fffcda11SJoerg Roedel 625fffcda11SJoerg Roedel aper_size = aper_base = info->aper_size = 0; 626fffcda11SJoerg Roedel dev = NULL; 627fffcda11SJoerg Roedel for (i = 0; i < amd_nb_num(); i++) { 628fffcda11SJoerg Roedel dev = node_to_amd_nb(i)->misc; 629fffcda11SJoerg Roedel new_aper_base = read_aperture(dev, &new_aper_size); 630fffcda11SJoerg Roedel if (!new_aper_base) 631fffcda11SJoerg Roedel goto nommu; 632fffcda11SJoerg Roedel 633fffcda11SJoerg Roedel if (!aper_base) { 634fffcda11SJoerg Roedel aper_size = new_aper_size; 635fffcda11SJoerg Roedel aper_base = new_aper_base; 636fffcda11SJoerg Roedel } 637fffcda11SJoerg Roedel if (aper_size != new_aper_size || aper_base != new_aper_base) 638fffcda11SJoerg Roedel goto nommu; 639fffcda11SJoerg Roedel } 640fffcda11SJoerg Roedel if (!aper_base) 641fffcda11SJoerg Roedel goto nommu; 642fffcda11SJoerg Roedel 643fffcda11SJoerg Roedel info->aper_base = aper_base; 644fffcda11SJoerg Roedel info->aper_size = aper_size >> 20; 645fffcda11SJoerg Roedel 646fffcda11SJoerg Roedel gatt_size = (aper_size >> PAGE_SHIFT) * sizeof(u32); 647fffcda11SJoerg Roedel gatt = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 648fffcda11SJoerg Roedel get_order(gatt_size)); 649fffcda11SJoerg Roedel if (!gatt) 650fffcda11SJoerg Roedel panic("Cannot allocate GATT table"); 651fffcda11SJoerg Roedel if (set_memory_uc((unsigned long)gatt, gatt_size >> PAGE_SHIFT)) 652fffcda11SJoerg Roedel panic("Could not set GART PTEs to uncacheable pages"); 653fffcda11SJoerg Roedel 654fffcda11SJoerg Roedel agp_gatt_table = gatt; 655fffcda11SJoerg Roedel 656fffcda11SJoerg Roedel register_syscore_ops(&gart_syscore_ops); 657fffcda11SJoerg Roedel 658fffcda11SJoerg Roedel flush_gart(); 659fffcda11SJoerg Roedel 660fffcda11SJoerg Roedel pr_info("PCI-DMA: aperture base @ %x size %u KB\n", 661fffcda11SJoerg Roedel aper_base, aper_size>>10); 662fffcda11SJoerg Roedel 663fffcda11SJoerg Roedel return 0; 664fffcda11SJoerg Roedel 665fffcda11SJoerg Roedel nommu: 666fffcda11SJoerg Roedel /* Should not happen anymore */ 6678d3bcc44SKefeng Wang pr_warn("PCI-DMA: More than 4GB of RAM and no IOMMU - falling back to iommu=soft.\n"); 668fffcda11SJoerg Roedel return -1; 669fffcda11SJoerg Roedel } 670fffcda11SJoerg Roedel 6715299709dSBart Van Assche static const struct dma_map_ops gart_dma_ops = { 672fffcda11SJoerg Roedel .map_sg = gart_map_sg, 673fffcda11SJoerg Roedel .unmap_sg = gart_unmap_sg, 674fffcda11SJoerg Roedel .map_page = gart_map_page, 675fffcda11SJoerg Roedel .unmap_page = gart_unmap_page, 676baa676fcSAndrzej Pietrasiewicz .alloc = gart_alloc_coherent, 677baa676fcSAndrzej Pietrasiewicz .free = gart_free_coherent, 678f9f3232aSChristoph Hellwig .mmap = dma_common_mmap, 679f9f3232aSChristoph Hellwig .get_sgtable = dma_common_get_sgtable, 680fec777c3SChristoph Hellwig .dma_supported = dma_direct_supported, 681249baa54SChristoph Hellwig .get_required_mask = dma_direct_get_required_mask, 682fffcda11SJoerg Roedel }; 683fffcda11SJoerg Roedel 684fffcda11SJoerg Roedel static void gart_iommu_shutdown(void) 685fffcda11SJoerg Roedel { 686fffcda11SJoerg Roedel struct pci_dev *dev; 687fffcda11SJoerg Roedel int i; 688fffcda11SJoerg Roedel 689fffcda11SJoerg Roedel /* don't shutdown it if there is AGP installed */ 690fffcda11SJoerg Roedel if (!no_agp) 691fffcda11SJoerg Roedel return; 692fffcda11SJoerg Roedel 693fffcda11SJoerg Roedel if (!amd_nb_has_feature(AMD_NB_GART)) 694fffcda11SJoerg Roedel return; 695fffcda11SJoerg Roedel 696fffcda11SJoerg Roedel for (i = 0; i < amd_nb_num(); i++) { 697fffcda11SJoerg Roedel u32 ctl; 698fffcda11SJoerg Roedel 699fffcda11SJoerg Roedel dev = node_to_amd_nb(i)->misc; 700fffcda11SJoerg Roedel pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &ctl); 701fffcda11SJoerg Roedel 702fffcda11SJoerg Roedel ctl &= ~GARTEN; 703fffcda11SJoerg Roedel 704fffcda11SJoerg Roedel pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, ctl); 705fffcda11SJoerg Roedel } 706fffcda11SJoerg Roedel } 707fffcda11SJoerg Roedel 708fffcda11SJoerg Roedel int __init gart_iommu_init(void) 709fffcda11SJoerg Roedel { 710fffcda11SJoerg Roedel struct agp_kern_info info; 711fffcda11SJoerg Roedel unsigned long iommu_start; 712fffcda11SJoerg Roedel unsigned long aper_base, aper_size; 713fffcda11SJoerg Roedel unsigned long start_pfn, end_pfn; 714fffcda11SJoerg Roedel unsigned long scratch; 715fffcda11SJoerg Roedel 716fffcda11SJoerg Roedel if (!amd_nb_has_feature(AMD_NB_GART)) 717fffcda11SJoerg Roedel return 0; 718fffcda11SJoerg Roedel 719fffcda11SJoerg Roedel #ifndef CONFIG_AGP_AMD64 720fffcda11SJoerg Roedel no_agp = 1; 721fffcda11SJoerg Roedel #else 722fffcda11SJoerg Roedel /* Makefile puts PCI initialization via subsys_initcall first. */ 723fffcda11SJoerg Roedel /* Add other AMD AGP bridge drivers here */ 724fffcda11SJoerg Roedel no_agp = no_agp || 725fffcda11SJoerg Roedel (agp_amd64_init() < 0) || 726fffcda11SJoerg Roedel (agp_copy_info(agp_bridge, &info) < 0); 727fffcda11SJoerg Roedel #endif 728fffcda11SJoerg Roedel 729fffcda11SJoerg Roedel if (no_iommu || 730fffcda11SJoerg Roedel (!force_iommu && max_pfn <= MAX_DMA32_PFN) || 731fffcda11SJoerg Roedel !gart_iommu_aperture || 732fffcda11SJoerg Roedel (no_agp && init_amd_gatt(&info) < 0)) { 733fffcda11SJoerg Roedel if (max_pfn > MAX_DMA32_PFN) { 7348d3bcc44SKefeng Wang pr_warn("More than 4GB of memory but GART IOMMU not available.\n"); 7358d3bcc44SKefeng Wang pr_warn("falling back to iommu=soft.\n"); 736fffcda11SJoerg Roedel } 737fffcda11SJoerg Roedel return 0; 738fffcda11SJoerg Roedel } 739fffcda11SJoerg Roedel 740fffcda11SJoerg Roedel /* need to map that range */ 741fffcda11SJoerg Roedel aper_size = info.aper_size << 20; 742fffcda11SJoerg Roedel aper_base = info.aper_base; 743fffcda11SJoerg Roedel end_pfn = (aper_base>>PAGE_SHIFT) + (aper_size>>PAGE_SHIFT); 744fffcda11SJoerg Roedel 7455101730cSYinghai Lu start_pfn = PFN_DOWN(aper_base); 7465101730cSYinghai Lu if (!pfn_range_is_mapped(start_pfn, end_pfn)) 747c164fbb4SLogan Gunthorpe init_memory_mapping(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT, 748c164fbb4SLogan Gunthorpe PAGE_KERNEL); 749fffcda11SJoerg Roedel 750fffcda11SJoerg Roedel pr_info("PCI-DMA: using GART IOMMU.\n"); 751fffcda11SJoerg Roedel iommu_size = check_iommu_size(info.aper_base, aper_size); 752fffcda11SJoerg Roedel iommu_pages = iommu_size >> PAGE_SHIFT; 753fffcda11SJoerg Roedel 754fffcda11SJoerg Roedel iommu_gart_bitmap = (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO, 755fffcda11SJoerg Roedel get_order(iommu_pages/8)); 756fffcda11SJoerg Roedel if (!iommu_gart_bitmap) 757fffcda11SJoerg Roedel panic("Cannot allocate iommu bitmap\n"); 758fffcda11SJoerg Roedel 759fffcda11SJoerg Roedel pr_info("PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n", 760fffcda11SJoerg Roedel iommu_size >> 20); 761fffcda11SJoerg Roedel 762fffcda11SJoerg Roedel agp_memory_reserved = iommu_size; 763fffcda11SJoerg Roedel iommu_start = aper_size - iommu_size; 764fffcda11SJoerg Roedel iommu_bus_base = info.aper_base + iommu_start; 765fffcda11SJoerg Roedel iommu_gatt_base = agp_gatt_table + (iommu_start>>PAGE_SHIFT); 766fffcda11SJoerg Roedel 767fffcda11SJoerg Roedel /* 768fffcda11SJoerg Roedel * Unmap the IOMMU part of the GART. The alias of the page is 769fffcda11SJoerg Roedel * always mapped with cache enabled and there is no full cache 770fffcda11SJoerg Roedel * coherency across the GART remapping. The unmapping avoids 771fffcda11SJoerg Roedel * automatic prefetches from the CPU allocating cache lines in 772fffcda11SJoerg Roedel * there. All CPU accesses are done via the direct mapping to 773fffcda11SJoerg Roedel * the backing memory. The GART address is only used by PCI 774fffcda11SJoerg Roedel * devices. 775fffcda11SJoerg Roedel */ 776fffcda11SJoerg Roedel set_memory_np((unsigned long)__va(iommu_bus_base), 777fffcda11SJoerg Roedel iommu_size >> PAGE_SHIFT); 778fffcda11SJoerg Roedel /* 779fffcda11SJoerg Roedel * Tricky. The GART table remaps the physical memory range, 780fffcda11SJoerg Roedel * so the CPU wont notice potential aliases and if the memory 781fffcda11SJoerg Roedel * is remapped to UC later on, we might surprise the PCI devices 782fffcda11SJoerg Roedel * with a stray writeout of a cacheline. So play it sure and 783fffcda11SJoerg Roedel * do an explicit, full-scale wbinvd() _after_ having marked all 784fffcda11SJoerg Roedel * the pages as Not-Present: 785fffcda11SJoerg Roedel */ 786fffcda11SJoerg Roedel wbinvd(); 787fffcda11SJoerg Roedel 788fffcda11SJoerg Roedel /* 789fffcda11SJoerg Roedel * Now all caches are flushed and we can safely enable 790fffcda11SJoerg Roedel * GART hardware. Doing it early leaves the possibility 791fffcda11SJoerg Roedel * of stale cache entries that can lead to GART PTE 792fffcda11SJoerg Roedel * errors. 793fffcda11SJoerg Roedel */ 794fffcda11SJoerg Roedel enable_gart_translations(); 795fffcda11SJoerg Roedel 796fffcda11SJoerg Roedel /* 797fffcda11SJoerg Roedel * Try to workaround a bug (thanks to BenH): 798fffcda11SJoerg Roedel * Set unmapped entries to a scratch page instead of 0. 799fffcda11SJoerg Roedel * Any prefetches that hit unmapped entries won't get an bus abort 800fffcda11SJoerg Roedel * then. (P2P bridge may be prefetching on DMA reads). 801fffcda11SJoerg Roedel */ 802fffcda11SJoerg Roedel scratch = get_zeroed_page(GFP_KERNEL); 803fffcda11SJoerg Roedel if (!scratch) 804fffcda11SJoerg Roedel panic("Cannot allocate iommu scratch page"); 805fffcda11SJoerg Roedel gart_unmapped_entry = GPTE_ENCODE(__pa(scratch)); 806fffcda11SJoerg Roedel 807fffcda11SJoerg Roedel flush_gart(); 808fffcda11SJoerg Roedel dma_ops = &gart_dma_ops; 809fffcda11SJoerg Roedel x86_platform.iommu_shutdown = gart_iommu_shutdown; 810fffcda11SJoerg Roedel swiotlb = 0; 811fffcda11SJoerg Roedel 812fffcda11SJoerg Roedel return 0; 813fffcda11SJoerg Roedel } 814fffcda11SJoerg Roedel 815fffcda11SJoerg Roedel void __init gart_parse_options(char *p) 816fffcda11SJoerg Roedel { 817fffcda11SJoerg Roedel int arg; 818fffcda11SJoerg Roedel 819fffcda11SJoerg Roedel if (isdigit(*p) && get_option(&p, &arg)) 820fffcda11SJoerg Roedel iommu_size = arg; 821fffcda11SJoerg Roedel if (!strncmp(p, "fullflush", 9)) 822fffcda11SJoerg Roedel iommu_fullflush = 1; 823fffcda11SJoerg Roedel if (!strncmp(p, "nofullflush", 11)) 824fffcda11SJoerg Roedel iommu_fullflush = 0; 825fffcda11SJoerg Roedel if (!strncmp(p, "noagp", 5)) 826fffcda11SJoerg Roedel no_agp = 1; 827fffcda11SJoerg Roedel if (!strncmp(p, "noaperture", 10)) 828fffcda11SJoerg Roedel fix_aperture = 0; 829fffcda11SJoerg Roedel /* duplicated from pci-dma.c */ 830fffcda11SJoerg Roedel if (!strncmp(p, "force", 5)) 831fffcda11SJoerg Roedel gart_iommu_aperture_allowed = 1; 832fffcda11SJoerg Roedel if (!strncmp(p, "allowed", 7)) 833fffcda11SJoerg Roedel gart_iommu_aperture_allowed = 1; 834fffcda11SJoerg Roedel if (!strncmp(p, "memaper", 7)) { 835fffcda11SJoerg Roedel fallback_aper_force = 1; 836fffcda11SJoerg Roedel p += 7; 837fffcda11SJoerg Roedel if (*p == '=') { 838fffcda11SJoerg Roedel ++p; 839fffcda11SJoerg Roedel if (get_option(&p, &arg)) 840fffcda11SJoerg Roedel fallback_aper_order = arg; 841fffcda11SJoerg Roedel } 842fffcda11SJoerg Roedel } 843fffcda11SJoerg Roedel } 844fffcda11SJoerg Roedel IOMMU_INIT_POST(gart_iommu_hole_init); 845