1fffcda11SJoerg Roedel /* 2fffcda11SJoerg Roedel * Dynamic DMA mapping support for AMD Hammer. 3fffcda11SJoerg Roedel * 4fffcda11SJoerg Roedel * Use the integrated AGP GART in the Hammer northbridge as an IOMMU for PCI. 5fffcda11SJoerg Roedel * This allows to use PCI devices that only support 32bit addresses on systems 6fffcda11SJoerg Roedel * with more than 4GB. 7fffcda11SJoerg Roedel * 8395cf969SPaul Bolle * See Documentation/DMA-API-HOWTO.txt for the interface specification. 9fffcda11SJoerg Roedel * 10fffcda11SJoerg Roedel * Copyright 2002 Andi Kleen, SuSE Labs. 11fffcda11SJoerg Roedel * Subject to the GNU General Public License v2 only. 12fffcda11SJoerg Roedel */ 13fffcda11SJoerg Roedel 14fffcda11SJoerg Roedel #include <linux/types.h> 15fffcda11SJoerg Roedel #include <linux/ctype.h> 16fffcda11SJoerg Roedel #include <linux/agp_backend.h> 17fffcda11SJoerg Roedel #include <linux/init.h> 18fffcda11SJoerg Roedel #include <linux/mm.h> 19fffcda11SJoerg Roedel #include <linux/sched.h> 20b17b0153SIngo Molnar #include <linux/sched/debug.h> 21fffcda11SJoerg Roedel #include <linux/string.h> 22fffcda11SJoerg Roedel #include <linux/spinlock.h> 23fffcda11SJoerg Roedel #include <linux/pci.h> 24fffcda11SJoerg Roedel #include <linux/topology.h> 25fffcda11SJoerg Roedel #include <linux/interrupt.h> 26fffcda11SJoerg Roedel #include <linux/bitmap.h> 27fffcda11SJoerg Roedel #include <linux/kdebug.h> 28fffcda11SJoerg Roedel #include <linux/scatterlist.h> 29fffcda11SJoerg Roedel #include <linux/iommu-helper.h> 30fffcda11SJoerg Roedel #include <linux/syscore_ops.h> 31fffcda11SJoerg Roedel #include <linux/io.h> 32fffcda11SJoerg Roedel #include <linux/gfp.h> 3360063497SArun Sharma #include <linux/atomic.h> 34*ea8c64acSChristoph Hellwig #include <linux/dma-direct.h> 35fffcda11SJoerg Roedel #include <asm/mtrr.h> 36fffcda11SJoerg Roedel #include <asm/pgtable.h> 37fffcda11SJoerg Roedel #include <asm/proto.h> 38fffcda11SJoerg Roedel #include <asm/iommu.h> 39fffcda11SJoerg Roedel #include <asm/gart.h> 40d1163651SLaura Abbott #include <asm/set_memory.h> 41fffcda11SJoerg Roedel #include <asm/swiotlb.h> 42fffcda11SJoerg Roedel #include <asm/dma.h> 43fffcda11SJoerg Roedel #include <asm/amd_nb.h> 44fffcda11SJoerg Roedel #include <asm/x86_init.h> 45fffcda11SJoerg Roedel #include <asm/iommu_table.h> 46fffcda11SJoerg Roedel 47fffcda11SJoerg Roedel static unsigned long iommu_bus_base; /* GART remapping area (physical) */ 48fffcda11SJoerg Roedel static unsigned long iommu_size; /* size of remapping area bytes */ 49fffcda11SJoerg Roedel static unsigned long iommu_pages; /* .. and in pages */ 50fffcda11SJoerg Roedel 51fffcda11SJoerg Roedel static u32 *iommu_gatt_base; /* Remapping table */ 52fffcda11SJoerg Roedel 53fffcda11SJoerg Roedel static dma_addr_t bad_dma_addr; 54fffcda11SJoerg Roedel 55fffcda11SJoerg Roedel /* 56fffcda11SJoerg Roedel * If this is disabled the IOMMU will use an optimized flushing strategy 57fffcda11SJoerg Roedel * of only flushing when an mapping is reused. With it true the GART is 58fffcda11SJoerg Roedel * flushed for every mapping. Problem is that doing the lazy flush seems 59fffcda11SJoerg Roedel * to trigger bugs with some popular PCI cards, in particular 3ware (but 60fffcda11SJoerg Roedel * has been also also seen with Qlogic at least). 61fffcda11SJoerg Roedel */ 62fffcda11SJoerg Roedel static int iommu_fullflush = 1; 63fffcda11SJoerg Roedel 64fffcda11SJoerg Roedel /* Allocation bitmap for the remapping area: */ 65fffcda11SJoerg Roedel static DEFINE_SPINLOCK(iommu_bitmap_lock); 66fffcda11SJoerg Roedel /* Guarded by iommu_bitmap_lock: */ 67fffcda11SJoerg Roedel static unsigned long *iommu_gart_bitmap; 68fffcda11SJoerg Roedel 69fffcda11SJoerg Roedel static u32 gart_unmapped_entry; 70fffcda11SJoerg Roedel 71fffcda11SJoerg Roedel #define GPTE_VALID 1 72fffcda11SJoerg Roedel #define GPTE_COHERENT 2 73fffcda11SJoerg Roedel #define GPTE_ENCODE(x) \ 74fffcda11SJoerg Roedel (((x) & 0xfffff000) | (((x) >> 32) << 4) | GPTE_VALID | GPTE_COHERENT) 75fffcda11SJoerg Roedel #define GPTE_DECODE(x) (((x) & 0xfffff000) | (((u64)(x) & 0xff0) << 28)) 76fffcda11SJoerg Roedel 77fffcda11SJoerg Roedel #define EMERGENCY_PAGES 32 /* = 128KB */ 78fffcda11SJoerg Roedel 79fffcda11SJoerg Roedel #ifdef CONFIG_AGP 80fffcda11SJoerg Roedel #define AGPEXTERN extern 81fffcda11SJoerg Roedel #else 82fffcda11SJoerg Roedel #define AGPEXTERN 83fffcda11SJoerg Roedel #endif 84fffcda11SJoerg Roedel 85fffcda11SJoerg Roedel /* GART can only remap to physical addresses < 1TB */ 86fffcda11SJoerg Roedel #define GART_MAX_PHYS_ADDR (1ULL << 40) 87fffcda11SJoerg Roedel 88fffcda11SJoerg Roedel /* backdoor interface to AGP driver */ 89fffcda11SJoerg Roedel AGPEXTERN int agp_memory_reserved; 90fffcda11SJoerg Roedel AGPEXTERN __u32 *agp_gatt_table; 91fffcda11SJoerg Roedel 92fffcda11SJoerg Roedel static unsigned long next_bit; /* protected by iommu_bitmap_lock */ 93fffcda11SJoerg Roedel static bool need_flush; /* global flush state. set for each gart wrap */ 94fffcda11SJoerg Roedel 95fffcda11SJoerg Roedel static unsigned long alloc_iommu(struct device *dev, int size, 96fffcda11SJoerg Roedel unsigned long align_mask) 97fffcda11SJoerg Roedel { 98fffcda11SJoerg Roedel unsigned long offset, flags; 99fffcda11SJoerg Roedel unsigned long boundary_size; 100fffcda11SJoerg Roedel unsigned long base_index; 101fffcda11SJoerg Roedel 102fffcda11SJoerg Roedel base_index = ALIGN(iommu_bus_base & dma_get_seg_boundary(dev), 103fffcda11SJoerg Roedel PAGE_SIZE) >> PAGE_SHIFT; 104fffcda11SJoerg Roedel boundary_size = ALIGN((u64)dma_get_seg_boundary(dev) + 1, 105fffcda11SJoerg Roedel PAGE_SIZE) >> PAGE_SHIFT; 106fffcda11SJoerg Roedel 107fffcda11SJoerg Roedel spin_lock_irqsave(&iommu_bitmap_lock, flags); 108fffcda11SJoerg Roedel offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, next_bit, 109fffcda11SJoerg Roedel size, base_index, boundary_size, align_mask); 110fffcda11SJoerg Roedel if (offset == -1) { 111fffcda11SJoerg Roedel need_flush = true; 112fffcda11SJoerg Roedel offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, 0, 113fffcda11SJoerg Roedel size, base_index, boundary_size, 114fffcda11SJoerg Roedel align_mask); 115fffcda11SJoerg Roedel } 116fffcda11SJoerg Roedel if (offset != -1) { 117fffcda11SJoerg Roedel next_bit = offset+size; 118fffcda11SJoerg Roedel if (next_bit >= iommu_pages) { 119fffcda11SJoerg Roedel next_bit = 0; 120fffcda11SJoerg Roedel need_flush = true; 121fffcda11SJoerg Roedel } 122fffcda11SJoerg Roedel } 123fffcda11SJoerg Roedel if (iommu_fullflush) 124fffcda11SJoerg Roedel need_flush = true; 125fffcda11SJoerg Roedel spin_unlock_irqrestore(&iommu_bitmap_lock, flags); 126fffcda11SJoerg Roedel 127fffcda11SJoerg Roedel return offset; 128fffcda11SJoerg Roedel } 129fffcda11SJoerg Roedel 130fffcda11SJoerg Roedel static void free_iommu(unsigned long offset, int size) 131fffcda11SJoerg Roedel { 132fffcda11SJoerg Roedel unsigned long flags; 133fffcda11SJoerg Roedel 134fffcda11SJoerg Roedel spin_lock_irqsave(&iommu_bitmap_lock, flags); 135fffcda11SJoerg Roedel bitmap_clear(iommu_gart_bitmap, offset, size); 136fffcda11SJoerg Roedel if (offset >= next_bit) 137fffcda11SJoerg Roedel next_bit = offset + size; 138fffcda11SJoerg Roedel spin_unlock_irqrestore(&iommu_bitmap_lock, flags); 139fffcda11SJoerg Roedel } 140fffcda11SJoerg Roedel 141fffcda11SJoerg Roedel /* 142fffcda11SJoerg Roedel * Use global flush state to avoid races with multiple flushers. 143fffcda11SJoerg Roedel */ 144fffcda11SJoerg Roedel static void flush_gart(void) 145fffcda11SJoerg Roedel { 146fffcda11SJoerg Roedel unsigned long flags; 147fffcda11SJoerg Roedel 148fffcda11SJoerg Roedel spin_lock_irqsave(&iommu_bitmap_lock, flags); 149fffcda11SJoerg Roedel if (need_flush) { 150fffcda11SJoerg Roedel amd_flush_garts(); 151fffcda11SJoerg Roedel need_flush = false; 152fffcda11SJoerg Roedel } 153fffcda11SJoerg Roedel spin_unlock_irqrestore(&iommu_bitmap_lock, flags); 154fffcda11SJoerg Roedel } 155fffcda11SJoerg Roedel 156fffcda11SJoerg Roedel #ifdef CONFIG_IOMMU_LEAK 157fffcda11SJoerg Roedel /* Debugging aid for drivers that don't free their IOMMU tables */ 158fffcda11SJoerg Roedel static int leak_trace; 159fffcda11SJoerg Roedel static int iommu_leak_pages = 20; 160fffcda11SJoerg Roedel 161fffcda11SJoerg Roedel static void dump_leak(void) 162fffcda11SJoerg Roedel { 163fffcda11SJoerg Roedel static int dump; 164fffcda11SJoerg Roedel 165fffcda11SJoerg Roedel if (dump) 166fffcda11SJoerg Roedel return; 167fffcda11SJoerg Roedel dump = 1; 168fffcda11SJoerg Roedel 169fffcda11SJoerg Roedel show_stack(NULL, NULL); 170fffcda11SJoerg Roedel debug_dma_dump_mappings(NULL); 171fffcda11SJoerg Roedel } 172fffcda11SJoerg Roedel #endif 173fffcda11SJoerg Roedel 174fffcda11SJoerg Roedel static void iommu_full(struct device *dev, size_t size, int dir) 175fffcda11SJoerg Roedel { 176fffcda11SJoerg Roedel /* 177fffcda11SJoerg Roedel * Ran out of IOMMU space for this operation. This is very bad. 178fffcda11SJoerg Roedel * Unfortunately the drivers cannot handle this operation properly. 179fffcda11SJoerg Roedel * Return some non mapped prereserved space in the aperture and 180fffcda11SJoerg Roedel * let the Northbridge deal with it. This will result in garbage 181fffcda11SJoerg Roedel * in the IO operation. When the size exceeds the prereserved space 182fffcda11SJoerg Roedel * memory corruption will occur or random memory will be DMAed 183fffcda11SJoerg Roedel * out. Hopefully no network devices use single mappings that big. 184fffcda11SJoerg Roedel */ 185fffcda11SJoerg Roedel 186fffcda11SJoerg Roedel dev_err(dev, "PCI-DMA: Out of IOMMU space for %lu bytes\n", size); 187fffcda11SJoerg Roedel 188fffcda11SJoerg Roedel if (size > PAGE_SIZE*EMERGENCY_PAGES) { 189fffcda11SJoerg Roedel if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL) 190fffcda11SJoerg Roedel panic("PCI-DMA: Memory would be corrupted\n"); 191fffcda11SJoerg Roedel if (dir == PCI_DMA_TODEVICE || dir == PCI_DMA_BIDIRECTIONAL) 192fffcda11SJoerg Roedel panic(KERN_ERR 193fffcda11SJoerg Roedel "PCI-DMA: Random memory would be DMAed\n"); 194fffcda11SJoerg Roedel } 195fffcda11SJoerg Roedel #ifdef CONFIG_IOMMU_LEAK 196fffcda11SJoerg Roedel dump_leak(); 197fffcda11SJoerg Roedel #endif 198fffcda11SJoerg Roedel } 199fffcda11SJoerg Roedel 200fffcda11SJoerg Roedel static inline int 201fffcda11SJoerg Roedel need_iommu(struct device *dev, unsigned long addr, size_t size) 202fffcda11SJoerg Roedel { 203fffcda11SJoerg Roedel return force_iommu || !dma_capable(dev, addr, size); 204fffcda11SJoerg Roedel } 205fffcda11SJoerg Roedel 206fffcda11SJoerg Roedel static inline int 207fffcda11SJoerg Roedel nonforced_iommu(struct device *dev, unsigned long addr, size_t size) 208fffcda11SJoerg Roedel { 209fffcda11SJoerg Roedel return !dma_capable(dev, addr, size); 210fffcda11SJoerg Roedel } 211fffcda11SJoerg Roedel 212fffcda11SJoerg Roedel /* Map a single continuous physical area into the IOMMU. 213fffcda11SJoerg Roedel * Caller needs to check if the iommu is needed and flush. 214fffcda11SJoerg Roedel */ 215fffcda11SJoerg Roedel static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem, 216fffcda11SJoerg Roedel size_t size, int dir, unsigned long align_mask) 217fffcda11SJoerg Roedel { 218fffcda11SJoerg Roedel unsigned long npages = iommu_num_pages(phys_mem, size, PAGE_SIZE); 219fffcda11SJoerg Roedel unsigned long iommu_page; 220fffcda11SJoerg Roedel int i; 221fffcda11SJoerg Roedel 222fffcda11SJoerg Roedel if (unlikely(phys_mem + size > GART_MAX_PHYS_ADDR)) 223fffcda11SJoerg Roedel return bad_dma_addr; 224fffcda11SJoerg Roedel 225fffcda11SJoerg Roedel iommu_page = alloc_iommu(dev, npages, align_mask); 226fffcda11SJoerg Roedel if (iommu_page == -1) { 227fffcda11SJoerg Roedel if (!nonforced_iommu(dev, phys_mem, size)) 228fffcda11SJoerg Roedel return phys_mem; 229fffcda11SJoerg Roedel if (panic_on_overflow) 230fffcda11SJoerg Roedel panic("dma_map_area overflow %lu bytes\n", size); 231fffcda11SJoerg Roedel iommu_full(dev, size, dir); 232fffcda11SJoerg Roedel return bad_dma_addr; 233fffcda11SJoerg Roedel } 234fffcda11SJoerg Roedel 235fffcda11SJoerg Roedel for (i = 0; i < npages; i++) { 236fffcda11SJoerg Roedel iommu_gatt_base[iommu_page + i] = GPTE_ENCODE(phys_mem); 237fffcda11SJoerg Roedel phys_mem += PAGE_SIZE; 238fffcda11SJoerg Roedel } 239fffcda11SJoerg Roedel return iommu_bus_base + iommu_page*PAGE_SIZE + (phys_mem & ~PAGE_MASK); 240fffcda11SJoerg Roedel } 241fffcda11SJoerg Roedel 242fffcda11SJoerg Roedel /* Map a single area into the IOMMU */ 243fffcda11SJoerg Roedel static dma_addr_t gart_map_page(struct device *dev, struct page *page, 244fffcda11SJoerg Roedel unsigned long offset, size_t size, 245fffcda11SJoerg Roedel enum dma_data_direction dir, 24600085f1eSKrzysztof Kozlowski unsigned long attrs) 247fffcda11SJoerg Roedel { 248fffcda11SJoerg Roedel unsigned long bus; 249fffcda11SJoerg Roedel phys_addr_t paddr = page_to_phys(page) + offset; 250fffcda11SJoerg Roedel 251fffcda11SJoerg Roedel if (!dev) 252fffcda11SJoerg Roedel dev = &x86_dma_fallback_dev; 253fffcda11SJoerg Roedel 254fffcda11SJoerg Roedel if (!need_iommu(dev, paddr, size)) 255fffcda11SJoerg Roedel return paddr; 256fffcda11SJoerg Roedel 257fffcda11SJoerg Roedel bus = dma_map_area(dev, paddr, size, dir, 0); 258fffcda11SJoerg Roedel flush_gart(); 259fffcda11SJoerg Roedel 260fffcda11SJoerg Roedel return bus; 261fffcda11SJoerg Roedel } 262fffcda11SJoerg Roedel 263fffcda11SJoerg Roedel /* 264fffcda11SJoerg Roedel * Free a DMA mapping. 265fffcda11SJoerg Roedel */ 266fffcda11SJoerg Roedel static void gart_unmap_page(struct device *dev, dma_addr_t dma_addr, 267fffcda11SJoerg Roedel size_t size, enum dma_data_direction dir, 26800085f1eSKrzysztof Kozlowski unsigned long attrs) 269fffcda11SJoerg Roedel { 270fffcda11SJoerg Roedel unsigned long iommu_page; 271fffcda11SJoerg Roedel int npages; 272fffcda11SJoerg Roedel int i; 273fffcda11SJoerg Roedel 274fffcda11SJoerg Roedel if (dma_addr < iommu_bus_base + EMERGENCY_PAGES*PAGE_SIZE || 275fffcda11SJoerg Roedel dma_addr >= iommu_bus_base + iommu_size) 276fffcda11SJoerg Roedel return; 277fffcda11SJoerg Roedel 278fffcda11SJoerg Roedel iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT; 279fffcda11SJoerg Roedel npages = iommu_num_pages(dma_addr, size, PAGE_SIZE); 280fffcda11SJoerg Roedel for (i = 0; i < npages; i++) { 281fffcda11SJoerg Roedel iommu_gatt_base[iommu_page + i] = gart_unmapped_entry; 282fffcda11SJoerg Roedel } 283fffcda11SJoerg Roedel free_iommu(iommu_page, npages); 284fffcda11SJoerg Roedel } 285fffcda11SJoerg Roedel 286fffcda11SJoerg Roedel /* 287fffcda11SJoerg Roedel * Wrapper for pci_unmap_single working with scatterlists. 288fffcda11SJoerg Roedel */ 289fffcda11SJoerg Roedel static void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, 29000085f1eSKrzysztof Kozlowski enum dma_data_direction dir, unsigned long attrs) 291fffcda11SJoerg Roedel { 292fffcda11SJoerg Roedel struct scatterlist *s; 293fffcda11SJoerg Roedel int i; 294fffcda11SJoerg Roedel 295fffcda11SJoerg Roedel for_each_sg(sg, s, nents, i) { 296fffcda11SJoerg Roedel if (!s->dma_length || !s->length) 297fffcda11SJoerg Roedel break; 29800085f1eSKrzysztof Kozlowski gart_unmap_page(dev, s->dma_address, s->dma_length, dir, 0); 299fffcda11SJoerg Roedel } 300fffcda11SJoerg Roedel } 301fffcda11SJoerg Roedel 302fffcda11SJoerg Roedel /* Fallback for dma_map_sg in case of overflow */ 303fffcda11SJoerg Roedel static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg, 304fffcda11SJoerg Roedel int nents, int dir) 305fffcda11SJoerg Roedel { 306fffcda11SJoerg Roedel struct scatterlist *s; 307fffcda11SJoerg Roedel int i; 308fffcda11SJoerg Roedel 309fffcda11SJoerg Roedel #ifdef CONFIG_IOMMU_DEBUG 310fffcda11SJoerg Roedel pr_debug("dma_map_sg overflow\n"); 311fffcda11SJoerg Roedel #endif 312fffcda11SJoerg Roedel 313fffcda11SJoerg Roedel for_each_sg(sg, s, nents, i) { 314fffcda11SJoerg Roedel unsigned long addr = sg_phys(s); 315fffcda11SJoerg Roedel 316fffcda11SJoerg Roedel if (nonforced_iommu(dev, addr, s->length)) { 317fffcda11SJoerg Roedel addr = dma_map_area(dev, addr, s->length, dir, 0); 318fffcda11SJoerg Roedel if (addr == bad_dma_addr) { 319fffcda11SJoerg Roedel if (i > 0) 32000085f1eSKrzysztof Kozlowski gart_unmap_sg(dev, sg, i, dir, 0); 321fffcda11SJoerg Roedel nents = 0; 322fffcda11SJoerg Roedel sg[0].dma_length = 0; 323fffcda11SJoerg Roedel break; 324fffcda11SJoerg Roedel } 325fffcda11SJoerg Roedel } 326fffcda11SJoerg Roedel s->dma_address = addr; 327fffcda11SJoerg Roedel s->dma_length = s->length; 328fffcda11SJoerg Roedel } 329fffcda11SJoerg Roedel flush_gart(); 330fffcda11SJoerg Roedel 331fffcda11SJoerg Roedel return nents; 332fffcda11SJoerg Roedel } 333fffcda11SJoerg Roedel 334fffcda11SJoerg Roedel /* Map multiple scatterlist entries continuous into the first. */ 335fffcda11SJoerg Roedel static int __dma_map_cont(struct device *dev, struct scatterlist *start, 336fffcda11SJoerg Roedel int nelems, struct scatterlist *sout, 337fffcda11SJoerg Roedel unsigned long pages) 338fffcda11SJoerg Roedel { 339fffcda11SJoerg Roedel unsigned long iommu_start = alloc_iommu(dev, pages, 0); 340fffcda11SJoerg Roedel unsigned long iommu_page = iommu_start; 341fffcda11SJoerg Roedel struct scatterlist *s; 342fffcda11SJoerg Roedel int i; 343fffcda11SJoerg Roedel 344fffcda11SJoerg Roedel if (iommu_start == -1) 345fffcda11SJoerg Roedel return -1; 346fffcda11SJoerg Roedel 347fffcda11SJoerg Roedel for_each_sg(start, s, nelems, i) { 348fffcda11SJoerg Roedel unsigned long pages, addr; 349fffcda11SJoerg Roedel unsigned long phys_addr = s->dma_address; 350fffcda11SJoerg Roedel 351fffcda11SJoerg Roedel BUG_ON(s != start && s->offset); 352fffcda11SJoerg Roedel if (s == start) { 353fffcda11SJoerg Roedel sout->dma_address = iommu_bus_base; 354fffcda11SJoerg Roedel sout->dma_address += iommu_page*PAGE_SIZE + s->offset; 355fffcda11SJoerg Roedel sout->dma_length = s->length; 356fffcda11SJoerg Roedel } else { 357fffcda11SJoerg Roedel sout->dma_length += s->length; 358fffcda11SJoerg Roedel } 359fffcda11SJoerg Roedel 360fffcda11SJoerg Roedel addr = phys_addr; 361fffcda11SJoerg Roedel pages = iommu_num_pages(s->offset, s->length, PAGE_SIZE); 362fffcda11SJoerg Roedel while (pages--) { 363fffcda11SJoerg Roedel iommu_gatt_base[iommu_page] = GPTE_ENCODE(addr); 364fffcda11SJoerg Roedel addr += PAGE_SIZE; 365fffcda11SJoerg Roedel iommu_page++; 366fffcda11SJoerg Roedel } 367fffcda11SJoerg Roedel } 368fffcda11SJoerg Roedel BUG_ON(iommu_page - iommu_start != pages); 369fffcda11SJoerg Roedel 370fffcda11SJoerg Roedel return 0; 371fffcda11SJoerg Roedel } 372fffcda11SJoerg Roedel 373fffcda11SJoerg Roedel static inline int 374fffcda11SJoerg Roedel dma_map_cont(struct device *dev, struct scatterlist *start, int nelems, 375fffcda11SJoerg Roedel struct scatterlist *sout, unsigned long pages, int need) 376fffcda11SJoerg Roedel { 377fffcda11SJoerg Roedel if (!need) { 378fffcda11SJoerg Roedel BUG_ON(nelems != 1); 379fffcda11SJoerg Roedel sout->dma_address = start->dma_address; 380fffcda11SJoerg Roedel sout->dma_length = start->length; 381fffcda11SJoerg Roedel return 0; 382fffcda11SJoerg Roedel } 383fffcda11SJoerg Roedel return __dma_map_cont(dev, start, nelems, sout, pages); 384fffcda11SJoerg Roedel } 385fffcda11SJoerg Roedel 386fffcda11SJoerg Roedel /* 387fffcda11SJoerg Roedel * DMA map all entries in a scatterlist. 388fffcda11SJoerg Roedel * Merge chunks that have page aligned sizes into a continuous mapping. 389fffcda11SJoerg Roedel */ 390fffcda11SJoerg Roedel static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, 39100085f1eSKrzysztof Kozlowski enum dma_data_direction dir, unsigned long attrs) 392fffcda11SJoerg Roedel { 393fffcda11SJoerg Roedel struct scatterlist *s, *ps, *start_sg, *sgmap; 394fffcda11SJoerg Roedel int need = 0, nextneed, i, out, start; 395fffcda11SJoerg Roedel unsigned long pages = 0; 396fffcda11SJoerg Roedel unsigned int seg_size; 397fffcda11SJoerg Roedel unsigned int max_seg_size; 398fffcda11SJoerg Roedel 399fffcda11SJoerg Roedel if (nents == 0) 400fffcda11SJoerg Roedel return 0; 401fffcda11SJoerg Roedel 402fffcda11SJoerg Roedel if (!dev) 403fffcda11SJoerg Roedel dev = &x86_dma_fallback_dev; 404fffcda11SJoerg Roedel 405fffcda11SJoerg Roedel out = 0; 406fffcda11SJoerg Roedel start = 0; 407fffcda11SJoerg Roedel start_sg = sg; 408fffcda11SJoerg Roedel sgmap = sg; 409fffcda11SJoerg Roedel seg_size = 0; 410fffcda11SJoerg Roedel max_seg_size = dma_get_max_seg_size(dev); 411fffcda11SJoerg Roedel ps = NULL; /* shut up gcc */ 412fffcda11SJoerg Roedel 413fffcda11SJoerg Roedel for_each_sg(sg, s, nents, i) { 414fffcda11SJoerg Roedel dma_addr_t addr = sg_phys(s); 415fffcda11SJoerg Roedel 416fffcda11SJoerg Roedel s->dma_address = addr; 417fffcda11SJoerg Roedel BUG_ON(s->length == 0); 418fffcda11SJoerg Roedel 419fffcda11SJoerg Roedel nextneed = need_iommu(dev, addr, s->length); 420fffcda11SJoerg Roedel 421fffcda11SJoerg Roedel /* Handle the previous not yet processed entries */ 422fffcda11SJoerg Roedel if (i > start) { 423fffcda11SJoerg Roedel /* 424fffcda11SJoerg Roedel * Can only merge when the last chunk ends on a 425fffcda11SJoerg Roedel * page boundary and the new one doesn't have an 426fffcda11SJoerg Roedel * offset. 427fffcda11SJoerg Roedel */ 428fffcda11SJoerg Roedel if (!iommu_merge || !nextneed || !need || s->offset || 429fffcda11SJoerg Roedel (s->length + seg_size > max_seg_size) || 430fffcda11SJoerg Roedel (ps->offset + ps->length) % PAGE_SIZE) { 431fffcda11SJoerg Roedel if (dma_map_cont(dev, start_sg, i - start, 432fffcda11SJoerg Roedel sgmap, pages, need) < 0) 433fffcda11SJoerg Roedel goto error; 434fffcda11SJoerg Roedel out++; 435fffcda11SJoerg Roedel 436fffcda11SJoerg Roedel seg_size = 0; 437fffcda11SJoerg Roedel sgmap = sg_next(sgmap); 438fffcda11SJoerg Roedel pages = 0; 439fffcda11SJoerg Roedel start = i; 440fffcda11SJoerg Roedel start_sg = s; 441fffcda11SJoerg Roedel } 442fffcda11SJoerg Roedel } 443fffcda11SJoerg Roedel 444fffcda11SJoerg Roedel seg_size += s->length; 445fffcda11SJoerg Roedel need = nextneed; 446fffcda11SJoerg Roedel pages += iommu_num_pages(s->offset, s->length, PAGE_SIZE); 447fffcda11SJoerg Roedel ps = s; 448fffcda11SJoerg Roedel } 449fffcda11SJoerg Roedel if (dma_map_cont(dev, start_sg, i - start, sgmap, pages, need) < 0) 450fffcda11SJoerg Roedel goto error; 451fffcda11SJoerg Roedel out++; 452fffcda11SJoerg Roedel flush_gart(); 453fffcda11SJoerg Roedel if (out < nents) { 454fffcda11SJoerg Roedel sgmap = sg_next(sgmap); 455fffcda11SJoerg Roedel sgmap->dma_length = 0; 456fffcda11SJoerg Roedel } 457fffcda11SJoerg Roedel return out; 458fffcda11SJoerg Roedel 459fffcda11SJoerg Roedel error: 460fffcda11SJoerg Roedel flush_gart(); 46100085f1eSKrzysztof Kozlowski gart_unmap_sg(dev, sg, out, dir, 0); 462fffcda11SJoerg Roedel 463fffcda11SJoerg Roedel /* When it was forced or merged try again in a dumb way */ 464fffcda11SJoerg Roedel if (force_iommu || iommu_merge) { 465fffcda11SJoerg Roedel out = dma_map_sg_nonforce(dev, sg, nents, dir); 466fffcda11SJoerg Roedel if (out > 0) 467fffcda11SJoerg Roedel return out; 468fffcda11SJoerg Roedel } 469fffcda11SJoerg Roedel if (panic_on_overflow) 470fffcda11SJoerg Roedel panic("dma_map_sg: overflow on %lu pages\n", pages); 471fffcda11SJoerg Roedel 472fffcda11SJoerg Roedel iommu_full(dev, pages << PAGE_SHIFT, dir); 473fffcda11SJoerg Roedel for_each_sg(sg, s, nents, i) 474fffcda11SJoerg Roedel s->dma_address = bad_dma_addr; 475fffcda11SJoerg Roedel return 0; 476fffcda11SJoerg Roedel } 477fffcda11SJoerg Roedel 478fffcda11SJoerg Roedel /* allocate and map a coherent mapping */ 479fffcda11SJoerg Roedel static void * 480fffcda11SJoerg Roedel gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr, 48100085f1eSKrzysztof Kozlowski gfp_t flag, unsigned long attrs) 482fffcda11SJoerg Roedel { 483fffcda11SJoerg Roedel dma_addr_t paddr; 484fffcda11SJoerg Roedel unsigned long align_mask; 485fffcda11SJoerg Roedel struct page *page; 486fffcda11SJoerg Roedel 487fffcda11SJoerg Roedel if (force_iommu && !(flag & GFP_DMA)) { 488fffcda11SJoerg Roedel flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32); 489fffcda11SJoerg Roedel page = alloc_pages(flag | __GFP_ZERO, get_order(size)); 490fffcda11SJoerg Roedel if (!page) 491fffcda11SJoerg Roedel return NULL; 492fffcda11SJoerg Roedel 493fffcda11SJoerg Roedel align_mask = (1UL << get_order(size)) - 1; 494fffcda11SJoerg Roedel paddr = dma_map_area(dev, page_to_phys(page), size, 495fffcda11SJoerg Roedel DMA_BIDIRECTIONAL, align_mask); 496fffcda11SJoerg Roedel 497fffcda11SJoerg Roedel flush_gart(); 498fffcda11SJoerg Roedel if (paddr != bad_dma_addr) { 499fffcda11SJoerg Roedel *dma_addr = paddr; 500fffcda11SJoerg Roedel return page_address(page); 501fffcda11SJoerg Roedel } 502fffcda11SJoerg Roedel __free_pages(page, get_order(size)); 503fffcda11SJoerg Roedel } else 504baa676fcSAndrzej Pietrasiewicz return dma_generic_alloc_coherent(dev, size, dma_addr, flag, 505baa676fcSAndrzej Pietrasiewicz attrs); 506fffcda11SJoerg Roedel 507fffcda11SJoerg Roedel return NULL; 508fffcda11SJoerg Roedel } 509fffcda11SJoerg Roedel 510fffcda11SJoerg Roedel /* free a coherent mapping */ 511fffcda11SJoerg Roedel static void 512fffcda11SJoerg Roedel gart_free_coherent(struct device *dev, size_t size, void *vaddr, 51300085f1eSKrzysztof Kozlowski dma_addr_t dma_addr, unsigned long attrs) 514fffcda11SJoerg Roedel { 51500085f1eSKrzysztof Kozlowski gart_unmap_page(dev, dma_addr, size, DMA_BIDIRECTIONAL, 0); 5169c5a3621SAkinobu Mita dma_generic_free_coherent(dev, size, vaddr, dma_addr, attrs); 517fffcda11SJoerg Roedel } 518fffcda11SJoerg Roedel 519fffcda11SJoerg Roedel static int gart_mapping_error(struct device *dev, dma_addr_t dma_addr) 520fffcda11SJoerg Roedel { 521fffcda11SJoerg Roedel return (dma_addr == bad_dma_addr); 522fffcda11SJoerg Roedel } 523fffcda11SJoerg Roedel 524fffcda11SJoerg Roedel static int no_agp; 525fffcda11SJoerg Roedel 526fffcda11SJoerg Roedel static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size) 527fffcda11SJoerg Roedel { 528fffcda11SJoerg Roedel unsigned long a; 529fffcda11SJoerg Roedel 530fffcda11SJoerg Roedel if (!iommu_size) { 531fffcda11SJoerg Roedel iommu_size = aper_size; 532fffcda11SJoerg Roedel if (!no_agp) 533fffcda11SJoerg Roedel iommu_size /= 2; 534fffcda11SJoerg Roedel } 535fffcda11SJoerg Roedel 536fffcda11SJoerg Roedel a = aper + iommu_size; 537fffcda11SJoerg Roedel iommu_size -= round_up(a, PMD_PAGE_SIZE) - a; 538fffcda11SJoerg Roedel 539fffcda11SJoerg Roedel if (iommu_size < 64*1024*1024) { 540fffcda11SJoerg Roedel pr_warning( 541fffcda11SJoerg Roedel "PCI-DMA: Warning: Small IOMMU %luMB." 542fffcda11SJoerg Roedel " Consider increasing the AGP aperture in BIOS\n", 543fffcda11SJoerg Roedel iommu_size >> 20); 544fffcda11SJoerg Roedel } 545fffcda11SJoerg Roedel 546fffcda11SJoerg Roedel return iommu_size; 547fffcda11SJoerg Roedel } 548fffcda11SJoerg Roedel 549fffcda11SJoerg Roedel static __init unsigned read_aperture(struct pci_dev *dev, u32 *size) 550fffcda11SJoerg Roedel { 551fffcda11SJoerg Roedel unsigned aper_size = 0, aper_base_32, aper_order; 552fffcda11SJoerg Roedel u64 aper_base; 553fffcda11SJoerg Roedel 554fffcda11SJoerg Roedel pci_read_config_dword(dev, AMD64_GARTAPERTUREBASE, &aper_base_32); 555fffcda11SJoerg Roedel pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &aper_order); 556fffcda11SJoerg Roedel aper_order = (aper_order >> 1) & 7; 557fffcda11SJoerg Roedel 558fffcda11SJoerg Roedel aper_base = aper_base_32 & 0x7fff; 559fffcda11SJoerg Roedel aper_base <<= 25; 560fffcda11SJoerg Roedel 561fffcda11SJoerg Roedel aper_size = (32 * 1024 * 1024) << aper_order; 562fffcda11SJoerg Roedel if (aper_base + aper_size > 0x100000000UL || !aper_size) 563fffcda11SJoerg Roedel aper_base = 0; 564fffcda11SJoerg Roedel 565fffcda11SJoerg Roedel *size = aper_size; 566fffcda11SJoerg Roedel return aper_base; 567fffcda11SJoerg Roedel } 568fffcda11SJoerg Roedel 569fffcda11SJoerg Roedel static void enable_gart_translations(void) 570fffcda11SJoerg Roedel { 571fffcda11SJoerg Roedel int i; 572fffcda11SJoerg Roedel 573fffcda11SJoerg Roedel if (!amd_nb_has_feature(AMD_NB_GART)) 574fffcda11SJoerg Roedel return; 575fffcda11SJoerg Roedel 576fffcda11SJoerg Roedel for (i = 0; i < amd_nb_num(); i++) { 577fffcda11SJoerg Roedel struct pci_dev *dev = node_to_amd_nb(i)->misc; 578fffcda11SJoerg Roedel 579fffcda11SJoerg Roedel enable_gart_translation(dev, __pa(agp_gatt_table)); 580fffcda11SJoerg Roedel } 581fffcda11SJoerg Roedel 582fffcda11SJoerg Roedel /* Flush the GART-TLB to remove stale entries */ 583fffcda11SJoerg Roedel amd_flush_garts(); 584fffcda11SJoerg Roedel } 585fffcda11SJoerg Roedel 586fffcda11SJoerg Roedel /* 587fffcda11SJoerg Roedel * If fix_up_north_bridges is set, the north bridges have to be fixed up on 588fffcda11SJoerg Roedel * resume in the same way as they are handled in gart_iommu_hole_init(). 589fffcda11SJoerg Roedel */ 590fffcda11SJoerg Roedel static bool fix_up_north_bridges; 591fffcda11SJoerg Roedel static u32 aperture_order; 592fffcda11SJoerg Roedel static u32 aperture_alloc; 593fffcda11SJoerg Roedel 594fffcda11SJoerg Roedel void set_up_gart_resume(u32 aper_order, u32 aper_alloc) 595fffcda11SJoerg Roedel { 596fffcda11SJoerg Roedel fix_up_north_bridges = true; 597fffcda11SJoerg Roedel aperture_order = aper_order; 598fffcda11SJoerg Roedel aperture_alloc = aper_alloc; 599fffcda11SJoerg Roedel } 600fffcda11SJoerg Roedel 601fffcda11SJoerg Roedel static void gart_fixup_northbridges(void) 602fffcda11SJoerg Roedel { 603fffcda11SJoerg Roedel int i; 604fffcda11SJoerg Roedel 605fffcda11SJoerg Roedel if (!fix_up_north_bridges) 606fffcda11SJoerg Roedel return; 607fffcda11SJoerg Roedel 608fffcda11SJoerg Roedel if (!amd_nb_has_feature(AMD_NB_GART)) 609fffcda11SJoerg Roedel return; 610fffcda11SJoerg Roedel 611fffcda11SJoerg Roedel pr_info("PCI-DMA: Restoring GART aperture settings\n"); 612fffcda11SJoerg Roedel 613fffcda11SJoerg Roedel for (i = 0; i < amd_nb_num(); i++) { 614fffcda11SJoerg Roedel struct pci_dev *dev = node_to_amd_nb(i)->misc; 615fffcda11SJoerg Roedel 616fffcda11SJoerg Roedel /* 617fffcda11SJoerg Roedel * Don't enable translations just yet. That is the next 618fffcda11SJoerg Roedel * step. Restore the pre-suspend aperture settings. 619fffcda11SJoerg Roedel */ 620fffcda11SJoerg Roedel gart_set_size_and_enable(dev, aperture_order); 621fffcda11SJoerg Roedel pci_write_config_dword(dev, AMD64_GARTAPERTUREBASE, aperture_alloc >> 25); 622fffcda11SJoerg Roedel } 623fffcda11SJoerg Roedel } 624fffcda11SJoerg Roedel 625fffcda11SJoerg Roedel static void gart_resume(void) 626fffcda11SJoerg Roedel { 627fffcda11SJoerg Roedel pr_info("PCI-DMA: Resuming GART IOMMU\n"); 628fffcda11SJoerg Roedel 629fffcda11SJoerg Roedel gart_fixup_northbridges(); 630fffcda11SJoerg Roedel 631fffcda11SJoerg Roedel enable_gart_translations(); 632fffcda11SJoerg Roedel } 633fffcda11SJoerg Roedel 634fffcda11SJoerg Roedel static struct syscore_ops gart_syscore_ops = { 635fffcda11SJoerg Roedel .resume = gart_resume, 636fffcda11SJoerg Roedel 637fffcda11SJoerg Roedel }; 638fffcda11SJoerg Roedel 639fffcda11SJoerg Roedel /* 640fffcda11SJoerg Roedel * Private Northbridge GATT initialization in case we cannot use the 641fffcda11SJoerg Roedel * AGP driver for some reason. 642fffcda11SJoerg Roedel */ 643fffcda11SJoerg Roedel static __init int init_amd_gatt(struct agp_kern_info *info) 644fffcda11SJoerg Roedel { 645fffcda11SJoerg Roedel unsigned aper_size, gatt_size, new_aper_size; 646fffcda11SJoerg Roedel unsigned aper_base, new_aper_base; 647fffcda11SJoerg Roedel struct pci_dev *dev; 648fffcda11SJoerg Roedel void *gatt; 649fffcda11SJoerg Roedel int i; 650fffcda11SJoerg Roedel 651fffcda11SJoerg Roedel pr_info("PCI-DMA: Disabling AGP.\n"); 652fffcda11SJoerg Roedel 653fffcda11SJoerg Roedel aper_size = aper_base = info->aper_size = 0; 654fffcda11SJoerg Roedel dev = NULL; 655fffcda11SJoerg Roedel for (i = 0; i < amd_nb_num(); i++) { 656fffcda11SJoerg Roedel dev = node_to_amd_nb(i)->misc; 657fffcda11SJoerg Roedel new_aper_base = read_aperture(dev, &new_aper_size); 658fffcda11SJoerg Roedel if (!new_aper_base) 659fffcda11SJoerg Roedel goto nommu; 660fffcda11SJoerg Roedel 661fffcda11SJoerg Roedel if (!aper_base) { 662fffcda11SJoerg Roedel aper_size = new_aper_size; 663fffcda11SJoerg Roedel aper_base = new_aper_base; 664fffcda11SJoerg Roedel } 665fffcda11SJoerg Roedel if (aper_size != new_aper_size || aper_base != new_aper_base) 666fffcda11SJoerg Roedel goto nommu; 667fffcda11SJoerg Roedel } 668fffcda11SJoerg Roedel if (!aper_base) 669fffcda11SJoerg Roedel goto nommu; 670fffcda11SJoerg Roedel 671fffcda11SJoerg Roedel info->aper_base = aper_base; 672fffcda11SJoerg Roedel info->aper_size = aper_size >> 20; 673fffcda11SJoerg Roedel 674fffcda11SJoerg Roedel gatt_size = (aper_size >> PAGE_SHIFT) * sizeof(u32); 675fffcda11SJoerg Roedel gatt = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 676fffcda11SJoerg Roedel get_order(gatt_size)); 677fffcda11SJoerg Roedel if (!gatt) 678fffcda11SJoerg Roedel panic("Cannot allocate GATT table"); 679fffcda11SJoerg Roedel if (set_memory_uc((unsigned long)gatt, gatt_size >> PAGE_SHIFT)) 680fffcda11SJoerg Roedel panic("Could not set GART PTEs to uncacheable pages"); 681fffcda11SJoerg Roedel 682fffcda11SJoerg Roedel agp_gatt_table = gatt; 683fffcda11SJoerg Roedel 684fffcda11SJoerg Roedel register_syscore_ops(&gart_syscore_ops); 685fffcda11SJoerg Roedel 686fffcda11SJoerg Roedel flush_gart(); 687fffcda11SJoerg Roedel 688fffcda11SJoerg Roedel pr_info("PCI-DMA: aperture base @ %x size %u KB\n", 689fffcda11SJoerg Roedel aper_base, aper_size>>10); 690fffcda11SJoerg Roedel 691fffcda11SJoerg Roedel return 0; 692fffcda11SJoerg Roedel 693fffcda11SJoerg Roedel nommu: 694fffcda11SJoerg Roedel /* Should not happen anymore */ 695fffcda11SJoerg Roedel pr_warning("PCI-DMA: More than 4GB of RAM and no IOMMU\n" 696fffcda11SJoerg Roedel "falling back to iommu=soft.\n"); 697fffcda11SJoerg Roedel return -1; 698fffcda11SJoerg Roedel } 699fffcda11SJoerg Roedel 7005299709dSBart Van Assche static const struct dma_map_ops gart_dma_ops = { 701fffcda11SJoerg Roedel .map_sg = gart_map_sg, 702fffcda11SJoerg Roedel .unmap_sg = gart_unmap_sg, 703fffcda11SJoerg Roedel .map_page = gart_map_page, 704fffcda11SJoerg Roedel .unmap_page = gart_unmap_page, 705baa676fcSAndrzej Pietrasiewicz .alloc = gart_alloc_coherent, 706baa676fcSAndrzej Pietrasiewicz .free = gart_free_coherent, 707fffcda11SJoerg Roedel .mapping_error = gart_mapping_error, 7085860acc1SChristoph Hellwig .dma_supported = x86_dma_supported, 709fffcda11SJoerg Roedel }; 710fffcda11SJoerg Roedel 711fffcda11SJoerg Roedel static void gart_iommu_shutdown(void) 712fffcda11SJoerg Roedel { 713fffcda11SJoerg Roedel struct pci_dev *dev; 714fffcda11SJoerg Roedel int i; 715fffcda11SJoerg Roedel 716fffcda11SJoerg Roedel /* don't shutdown it if there is AGP installed */ 717fffcda11SJoerg Roedel if (!no_agp) 718fffcda11SJoerg Roedel return; 719fffcda11SJoerg Roedel 720fffcda11SJoerg Roedel if (!amd_nb_has_feature(AMD_NB_GART)) 721fffcda11SJoerg Roedel return; 722fffcda11SJoerg Roedel 723fffcda11SJoerg Roedel for (i = 0; i < amd_nb_num(); i++) { 724fffcda11SJoerg Roedel u32 ctl; 725fffcda11SJoerg Roedel 726fffcda11SJoerg Roedel dev = node_to_amd_nb(i)->misc; 727fffcda11SJoerg Roedel pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &ctl); 728fffcda11SJoerg Roedel 729fffcda11SJoerg Roedel ctl &= ~GARTEN; 730fffcda11SJoerg Roedel 731fffcda11SJoerg Roedel pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, ctl); 732fffcda11SJoerg Roedel } 733fffcda11SJoerg Roedel } 734fffcda11SJoerg Roedel 735fffcda11SJoerg Roedel int __init gart_iommu_init(void) 736fffcda11SJoerg Roedel { 737fffcda11SJoerg Roedel struct agp_kern_info info; 738fffcda11SJoerg Roedel unsigned long iommu_start; 739fffcda11SJoerg Roedel unsigned long aper_base, aper_size; 740fffcda11SJoerg Roedel unsigned long start_pfn, end_pfn; 741fffcda11SJoerg Roedel unsigned long scratch; 742fffcda11SJoerg Roedel long i; 743fffcda11SJoerg Roedel 744fffcda11SJoerg Roedel if (!amd_nb_has_feature(AMD_NB_GART)) 745fffcda11SJoerg Roedel return 0; 746fffcda11SJoerg Roedel 747fffcda11SJoerg Roedel #ifndef CONFIG_AGP_AMD64 748fffcda11SJoerg Roedel no_agp = 1; 749fffcda11SJoerg Roedel #else 750fffcda11SJoerg Roedel /* Makefile puts PCI initialization via subsys_initcall first. */ 751fffcda11SJoerg Roedel /* Add other AMD AGP bridge drivers here */ 752fffcda11SJoerg Roedel no_agp = no_agp || 753fffcda11SJoerg Roedel (agp_amd64_init() < 0) || 754fffcda11SJoerg Roedel (agp_copy_info(agp_bridge, &info) < 0); 755fffcda11SJoerg Roedel #endif 756fffcda11SJoerg Roedel 757fffcda11SJoerg Roedel if (no_iommu || 758fffcda11SJoerg Roedel (!force_iommu && max_pfn <= MAX_DMA32_PFN) || 759fffcda11SJoerg Roedel !gart_iommu_aperture || 760fffcda11SJoerg Roedel (no_agp && init_amd_gatt(&info) < 0)) { 761fffcda11SJoerg Roedel if (max_pfn > MAX_DMA32_PFN) { 762fffcda11SJoerg Roedel pr_warning("More than 4GB of memory but GART IOMMU not available.\n"); 763fffcda11SJoerg Roedel pr_warning("falling back to iommu=soft.\n"); 764fffcda11SJoerg Roedel } 765fffcda11SJoerg Roedel return 0; 766fffcda11SJoerg Roedel } 767fffcda11SJoerg Roedel 768fffcda11SJoerg Roedel /* need to map that range */ 769fffcda11SJoerg Roedel aper_size = info.aper_size << 20; 770fffcda11SJoerg Roedel aper_base = info.aper_base; 771fffcda11SJoerg Roedel end_pfn = (aper_base>>PAGE_SHIFT) + (aper_size>>PAGE_SHIFT); 772fffcda11SJoerg Roedel 7735101730cSYinghai Lu start_pfn = PFN_DOWN(aper_base); 7745101730cSYinghai Lu if (!pfn_range_is_mapped(start_pfn, end_pfn)) 775fffcda11SJoerg Roedel init_memory_mapping(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT); 776fffcda11SJoerg Roedel 777fffcda11SJoerg Roedel pr_info("PCI-DMA: using GART IOMMU.\n"); 778fffcda11SJoerg Roedel iommu_size = check_iommu_size(info.aper_base, aper_size); 779fffcda11SJoerg Roedel iommu_pages = iommu_size >> PAGE_SHIFT; 780fffcda11SJoerg Roedel 781fffcda11SJoerg Roedel iommu_gart_bitmap = (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO, 782fffcda11SJoerg Roedel get_order(iommu_pages/8)); 783fffcda11SJoerg Roedel if (!iommu_gart_bitmap) 784fffcda11SJoerg Roedel panic("Cannot allocate iommu bitmap\n"); 785fffcda11SJoerg Roedel 786fffcda11SJoerg Roedel #ifdef CONFIG_IOMMU_LEAK 787fffcda11SJoerg Roedel if (leak_trace) { 788fffcda11SJoerg Roedel int ret; 789fffcda11SJoerg Roedel 790fffcda11SJoerg Roedel ret = dma_debug_resize_entries(iommu_pages); 791fffcda11SJoerg Roedel if (ret) 792fffcda11SJoerg Roedel pr_debug("PCI-DMA: Cannot trace all the entries\n"); 793fffcda11SJoerg Roedel } 794fffcda11SJoerg Roedel #endif 795fffcda11SJoerg Roedel 796fffcda11SJoerg Roedel /* 797fffcda11SJoerg Roedel * Out of IOMMU space handling. 798fffcda11SJoerg Roedel * Reserve some invalid pages at the beginning of the GART. 799fffcda11SJoerg Roedel */ 800fffcda11SJoerg Roedel bitmap_set(iommu_gart_bitmap, 0, EMERGENCY_PAGES); 801fffcda11SJoerg Roedel 802fffcda11SJoerg Roedel pr_info("PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n", 803fffcda11SJoerg Roedel iommu_size >> 20); 804fffcda11SJoerg Roedel 805fffcda11SJoerg Roedel agp_memory_reserved = iommu_size; 806fffcda11SJoerg Roedel iommu_start = aper_size - iommu_size; 807fffcda11SJoerg Roedel iommu_bus_base = info.aper_base + iommu_start; 808fffcda11SJoerg Roedel bad_dma_addr = iommu_bus_base; 809fffcda11SJoerg Roedel iommu_gatt_base = agp_gatt_table + (iommu_start>>PAGE_SHIFT); 810fffcda11SJoerg Roedel 811fffcda11SJoerg Roedel /* 812fffcda11SJoerg Roedel * Unmap the IOMMU part of the GART. The alias of the page is 813fffcda11SJoerg Roedel * always mapped with cache enabled and there is no full cache 814fffcda11SJoerg Roedel * coherency across the GART remapping. The unmapping avoids 815fffcda11SJoerg Roedel * automatic prefetches from the CPU allocating cache lines in 816fffcda11SJoerg Roedel * there. All CPU accesses are done via the direct mapping to 817fffcda11SJoerg Roedel * the backing memory. The GART address is only used by PCI 818fffcda11SJoerg Roedel * devices. 819fffcda11SJoerg Roedel */ 820fffcda11SJoerg Roedel set_memory_np((unsigned long)__va(iommu_bus_base), 821fffcda11SJoerg Roedel iommu_size >> PAGE_SHIFT); 822fffcda11SJoerg Roedel /* 823fffcda11SJoerg Roedel * Tricky. The GART table remaps the physical memory range, 824fffcda11SJoerg Roedel * so the CPU wont notice potential aliases and if the memory 825fffcda11SJoerg Roedel * is remapped to UC later on, we might surprise the PCI devices 826fffcda11SJoerg Roedel * with a stray writeout of a cacheline. So play it sure and 827fffcda11SJoerg Roedel * do an explicit, full-scale wbinvd() _after_ having marked all 828fffcda11SJoerg Roedel * the pages as Not-Present: 829fffcda11SJoerg Roedel */ 830fffcda11SJoerg Roedel wbinvd(); 831fffcda11SJoerg Roedel 832fffcda11SJoerg Roedel /* 833fffcda11SJoerg Roedel * Now all caches are flushed and we can safely enable 834fffcda11SJoerg Roedel * GART hardware. Doing it early leaves the possibility 835fffcda11SJoerg Roedel * of stale cache entries that can lead to GART PTE 836fffcda11SJoerg Roedel * errors. 837fffcda11SJoerg Roedel */ 838fffcda11SJoerg Roedel enable_gart_translations(); 839fffcda11SJoerg Roedel 840fffcda11SJoerg Roedel /* 841fffcda11SJoerg Roedel * Try to workaround a bug (thanks to BenH): 842fffcda11SJoerg Roedel * Set unmapped entries to a scratch page instead of 0. 843fffcda11SJoerg Roedel * Any prefetches that hit unmapped entries won't get an bus abort 844fffcda11SJoerg Roedel * then. (P2P bridge may be prefetching on DMA reads). 845fffcda11SJoerg Roedel */ 846fffcda11SJoerg Roedel scratch = get_zeroed_page(GFP_KERNEL); 847fffcda11SJoerg Roedel if (!scratch) 848fffcda11SJoerg Roedel panic("Cannot allocate iommu scratch page"); 849fffcda11SJoerg Roedel gart_unmapped_entry = GPTE_ENCODE(__pa(scratch)); 850fffcda11SJoerg Roedel for (i = EMERGENCY_PAGES; i < iommu_pages; i++) 851fffcda11SJoerg Roedel iommu_gatt_base[i] = gart_unmapped_entry; 852fffcda11SJoerg Roedel 853fffcda11SJoerg Roedel flush_gart(); 854fffcda11SJoerg Roedel dma_ops = &gart_dma_ops; 855fffcda11SJoerg Roedel x86_platform.iommu_shutdown = gart_iommu_shutdown; 856fffcda11SJoerg Roedel swiotlb = 0; 857fffcda11SJoerg Roedel 858fffcda11SJoerg Roedel return 0; 859fffcda11SJoerg Roedel } 860fffcda11SJoerg Roedel 861fffcda11SJoerg Roedel void __init gart_parse_options(char *p) 862fffcda11SJoerg Roedel { 863fffcda11SJoerg Roedel int arg; 864fffcda11SJoerg Roedel 865fffcda11SJoerg Roedel #ifdef CONFIG_IOMMU_LEAK 866fffcda11SJoerg Roedel if (!strncmp(p, "leak", 4)) { 867fffcda11SJoerg Roedel leak_trace = 1; 868fffcda11SJoerg Roedel p += 4; 869fffcda11SJoerg Roedel if (*p == '=') 870fffcda11SJoerg Roedel ++p; 871fffcda11SJoerg Roedel if (isdigit(*p) && get_option(&p, &arg)) 872fffcda11SJoerg Roedel iommu_leak_pages = arg; 873fffcda11SJoerg Roedel } 874fffcda11SJoerg Roedel #endif 875fffcda11SJoerg Roedel if (isdigit(*p) && get_option(&p, &arg)) 876fffcda11SJoerg Roedel iommu_size = arg; 877fffcda11SJoerg Roedel if (!strncmp(p, "fullflush", 9)) 878fffcda11SJoerg Roedel iommu_fullflush = 1; 879fffcda11SJoerg Roedel if (!strncmp(p, "nofullflush", 11)) 880fffcda11SJoerg Roedel iommu_fullflush = 0; 881fffcda11SJoerg Roedel if (!strncmp(p, "noagp", 5)) 882fffcda11SJoerg Roedel no_agp = 1; 883fffcda11SJoerg Roedel if (!strncmp(p, "noaperture", 10)) 884fffcda11SJoerg Roedel fix_aperture = 0; 885fffcda11SJoerg Roedel /* duplicated from pci-dma.c */ 886fffcda11SJoerg Roedel if (!strncmp(p, "force", 5)) 887fffcda11SJoerg Roedel gart_iommu_aperture_allowed = 1; 888fffcda11SJoerg Roedel if (!strncmp(p, "allowed", 7)) 889fffcda11SJoerg Roedel gart_iommu_aperture_allowed = 1; 890fffcda11SJoerg Roedel if (!strncmp(p, "memaper", 7)) { 891fffcda11SJoerg Roedel fallback_aper_force = 1; 892fffcda11SJoerg Roedel p += 7; 893fffcda11SJoerg Roedel if (*p == '=') { 894fffcda11SJoerg Roedel ++p; 895fffcda11SJoerg Roedel if (get_option(&p, &arg)) 896fffcda11SJoerg Roedel fallback_aper_order = arg; 897fffcda11SJoerg Roedel } 898fffcda11SJoerg Roedel } 899fffcda11SJoerg Roedel } 900fffcda11SJoerg Roedel IOMMU_INIT_POST(gart_iommu_hole_init); 901