1fffcda11SJoerg Roedel /* 2fffcda11SJoerg Roedel * Dynamic DMA mapping support for AMD Hammer. 3fffcda11SJoerg Roedel * 4fffcda11SJoerg Roedel * Use the integrated AGP GART in the Hammer northbridge as an IOMMU for PCI. 5fffcda11SJoerg Roedel * This allows to use PCI devices that only support 32bit addresses on systems 6fffcda11SJoerg Roedel * with more than 4GB. 7fffcda11SJoerg Roedel * 8395cf969SPaul Bolle * See Documentation/DMA-API-HOWTO.txt for the interface specification. 9fffcda11SJoerg Roedel * 10fffcda11SJoerg Roedel * Copyright 2002 Andi Kleen, SuSE Labs. 11fffcda11SJoerg Roedel * Subject to the GNU General Public License v2 only. 12fffcda11SJoerg Roedel */ 13fffcda11SJoerg Roedel 14fffcda11SJoerg Roedel #include <linux/types.h> 15fffcda11SJoerg Roedel #include <linux/ctype.h> 16fffcda11SJoerg Roedel #include <linux/agp_backend.h> 17fffcda11SJoerg Roedel #include <linux/init.h> 18fffcda11SJoerg Roedel #include <linux/mm.h> 19fffcda11SJoerg Roedel #include <linux/sched.h> 20b17b0153SIngo Molnar #include <linux/sched/debug.h> 21fffcda11SJoerg Roedel #include <linux/string.h> 22fffcda11SJoerg Roedel #include <linux/spinlock.h> 23fffcda11SJoerg Roedel #include <linux/pci.h> 24fffcda11SJoerg Roedel #include <linux/topology.h> 25fffcda11SJoerg Roedel #include <linux/interrupt.h> 26fffcda11SJoerg Roedel #include <linux/bitmap.h> 27fffcda11SJoerg Roedel #include <linux/kdebug.h> 28fffcda11SJoerg Roedel #include <linux/scatterlist.h> 29fffcda11SJoerg Roedel #include <linux/iommu-helper.h> 30fffcda11SJoerg Roedel #include <linux/syscore_ops.h> 31fffcda11SJoerg Roedel #include <linux/io.h> 32fffcda11SJoerg Roedel #include <linux/gfp.h> 3360063497SArun Sharma #include <linux/atomic.h> 34fffcda11SJoerg Roedel #include <asm/mtrr.h> 35fffcda11SJoerg Roedel #include <asm/pgtable.h> 36fffcda11SJoerg Roedel #include <asm/proto.h> 37fffcda11SJoerg Roedel #include <asm/iommu.h> 38fffcda11SJoerg Roedel #include <asm/gart.h> 39*d1163651SLaura Abbott #include <asm/set_memory.h> 40fffcda11SJoerg Roedel #include <asm/swiotlb.h> 41fffcda11SJoerg Roedel #include <asm/dma.h> 42fffcda11SJoerg Roedel #include <asm/amd_nb.h> 43fffcda11SJoerg Roedel #include <asm/x86_init.h> 44fffcda11SJoerg Roedel #include <asm/iommu_table.h> 45fffcda11SJoerg Roedel 46fffcda11SJoerg Roedel static unsigned long iommu_bus_base; /* GART remapping area (physical) */ 47fffcda11SJoerg Roedel static unsigned long iommu_size; /* size of remapping area bytes */ 48fffcda11SJoerg Roedel static unsigned long iommu_pages; /* .. and in pages */ 49fffcda11SJoerg Roedel 50fffcda11SJoerg Roedel static u32 *iommu_gatt_base; /* Remapping table */ 51fffcda11SJoerg Roedel 52fffcda11SJoerg Roedel static dma_addr_t bad_dma_addr; 53fffcda11SJoerg Roedel 54fffcda11SJoerg Roedel /* 55fffcda11SJoerg Roedel * If this is disabled the IOMMU will use an optimized flushing strategy 56fffcda11SJoerg Roedel * of only flushing when an mapping is reused. With it true the GART is 57fffcda11SJoerg Roedel * flushed for every mapping. Problem is that doing the lazy flush seems 58fffcda11SJoerg Roedel * to trigger bugs with some popular PCI cards, in particular 3ware (but 59fffcda11SJoerg Roedel * has been also also seen with Qlogic at least). 60fffcda11SJoerg Roedel */ 61fffcda11SJoerg Roedel static int iommu_fullflush = 1; 62fffcda11SJoerg Roedel 63fffcda11SJoerg Roedel /* Allocation bitmap for the remapping area: */ 64fffcda11SJoerg Roedel static DEFINE_SPINLOCK(iommu_bitmap_lock); 65fffcda11SJoerg Roedel /* Guarded by iommu_bitmap_lock: */ 66fffcda11SJoerg Roedel static unsigned long *iommu_gart_bitmap; 67fffcda11SJoerg Roedel 68fffcda11SJoerg Roedel static u32 gart_unmapped_entry; 69fffcda11SJoerg Roedel 70fffcda11SJoerg Roedel #define GPTE_VALID 1 71fffcda11SJoerg Roedel #define GPTE_COHERENT 2 72fffcda11SJoerg Roedel #define GPTE_ENCODE(x) \ 73fffcda11SJoerg Roedel (((x) & 0xfffff000) | (((x) >> 32) << 4) | GPTE_VALID | GPTE_COHERENT) 74fffcda11SJoerg Roedel #define GPTE_DECODE(x) (((x) & 0xfffff000) | (((u64)(x) & 0xff0) << 28)) 75fffcda11SJoerg Roedel 76fffcda11SJoerg Roedel #define EMERGENCY_PAGES 32 /* = 128KB */ 77fffcda11SJoerg Roedel 78fffcda11SJoerg Roedel #ifdef CONFIG_AGP 79fffcda11SJoerg Roedel #define AGPEXTERN extern 80fffcda11SJoerg Roedel #else 81fffcda11SJoerg Roedel #define AGPEXTERN 82fffcda11SJoerg Roedel #endif 83fffcda11SJoerg Roedel 84fffcda11SJoerg Roedel /* GART can only remap to physical addresses < 1TB */ 85fffcda11SJoerg Roedel #define GART_MAX_PHYS_ADDR (1ULL << 40) 86fffcda11SJoerg Roedel 87fffcda11SJoerg Roedel /* backdoor interface to AGP driver */ 88fffcda11SJoerg Roedel AGPEXTERN int agp_memory_reserved; 89fffcda11SJoerg Roedel AGPEXTERN __u32 *agp_gatt_table; 90fffcda11SJoerg Roedel 91fffcda11SJoerg Roedel static unsigned long next_bit; /* protected by iommu_bitmap_lock */ 92fffcda11SJoerg Roedel static bool need_flush; /* global flush state. set for each gart wrap */ 93fffcda11SJoerg Roedel 94fffcda11SJoerg Roedel static unsigned long alloc_iommu(struct device *dev, int size, 95fffcda11SJoerg Roedel unsigned long align_mask) 96fffcda11SJoerg Roedel { 97fffcda11SJoerg Roedel unsigned long offset, flags; 98fffcda11SJoerg Roedel unsigned long boundary_size; 99fffcda11SJoerg Roedel unsigned long base_index; 100fffcda11SJoerg Roedel 101fffcda11SJoerg Roedel base_index = ALIGN(iommu_bus_base & dma_get_seg_boundary(dev), 102fffcda11SJoerg Roedel PAGE_SIZE) >> PAGE_SHIFT; 103fffcda11SJoerg Roedel boundary_size = ALIGN((u64)dma_get_seg_boundary(dev) + 1, 104fffcda11SJoerg Roedel PAGE_SIZE) >> PAGE_SHIFT; 105fffcda11SJoerg Roedel 106fffcda11SJoerg Roedel spin_lock_irqsave(&iommu_bitmap_lock, flags); 107fffcda11SJoerg Roedel offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, next_bit, 108fffcda11SJoerg Roedel size, base_index, boundary_size, align_mask); 109fffcda11SJoerg Roedel if (offset == -1) { 110fffcda11SJoerg Roedel need_flush = true; 111fffcda11SJoerg Roedel offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, 0, 112fffcda11SJoerg Roedel size, base_index, boundary_size, 113fffcda11SJoerg Roedel align_mask); 114fffcda11SJoerg Roedel } 115fffcda11SJoerg Roedel if (offset != -1) { 116fffcda11SJoerg Roedel next_bit = offset+size; 117fffcda11SJoerg Roedel if (next_bit >= iommu_pages) { 118fffcda11SJoerg Roedel next_bit = 0; 119fffcda11SJoerg Roedel need_flush = true; 120fffcda11SJoerg Roedel } 121fffcda11SJoerg Roedel } 122fffcda11SJoerg Roedel if (iommu_fullflush) 123fffcda11SJoerg Roedel need_flush = true; 124fffcda11SJoerg Roedel spin_unlock_irqrestore(&iommu_bitmap_lock, flags); 125fffcda11SJoerg Roedel 126fffcda11SJoerg Roedel return offset; 127fffcda11SJoerg Roedel } 128fffcda11SJoerg Roedel 129fffcda11SJoerg Roedel static void free_iommu(unsigned long offset, int size) 130fffcda11SJoerg Roedel { 131fffcda11SJoerg Roedel unsigned long flags; 132fffcda11SJoerg Roedel 133fffcda11SJoerg Roedel spin_lock_irqsave(&iommu_bitmap_lock, flags); 134fffcda11SJoerg Roedel bitmap_clear(iommu_gart_bitmap, offset, size); 135fffcda11SJoerg Roedel if (offset >= next_bit) 136fffcda11SJoerg Roedel next_bit = offset + size; 137fffcda11SJoerg Roedel spin_unlock_irqrestore(&iommu_bitmap_lock, flags); 138fffcda11SJoerg Roedel } 139fffcda11SJoerg Roedel 140fffcda11SJoerg Roedel /* 141fffcda11SJoerg Roedel * Use global flush state to avoid races with multiple flushers. 142fffcda11SJoerg Roedel */ 143fffcda11SJoerg Roedel static void flush_gart(void) 144fffcda11SJoerg Roedel { 145fffcda11SJoerg Roedel unsigned long flags; 146fffcda11SJoerg Roedel 147fffcda11SJoerg Roedel spin_lock_irqsave(&iommu_bitmap_lock, flags); 148fffcda11SJoerg Roedel if (need_flush) { 149fffcda11SJoerg Roedel amd_flush_garts(); 150fffcda11SJoerg Roedel need_flush = false; 151fffcda11SJoerg Roedel } 152fffcda11SJoerg Roedel spin_unlock_irqrestore(&iommu_bitmap_lock, flags); 153fffcda11SJoerg Roedel } 154fffcda11SJoerg Roedel 155fffcda11SJoerg Roedel #ifdef CONFIG_IOMMU_LEAK 156fffcda11SJoerg Roedel /* Debugging aid for drivers that don't free their IOMMU tables */ 157fffcda11SJoerg Roedel static int leak_trace; 158fffcda11SJoerg Roedel static int iommu_leak_pages = 20; 159fffcda11SJoerg Roedel 160fffcda11SJoerg Roedel static void dump_leak(void) 161fffcda11SJoerg Roedel { 162fffcda11SJoerg Roedel static int dump; 163fffcda11SJoerg Roedel 164fffcda11SJoerg Roedel if (dump) 165fffcda11SJoerg Roedel return; 166fffcda11SJoerg Roedel dump = 1; 167fffcda11SJoerg Roedel 168fffcda11SJoerg Roedel show_stack(NULL, NULL); 169fffcda11SJoerg Roedel debug_dma_dump_mappings(NULL); 170fffcda11SJoerg Roedel } 171fffcda11SJoerg Roedel #endif 172fffcda11SJoerg Roedel 173fffcda11SJoerg Roedel static void iommu_full(struct device *dev, size_t size, int dir) 174fffcda11SJoerg Roedel { 175fffcda11SJoerg Roedel /* 176fffcda11SJoerg Roedel * Ran out of IOMMU space for this operation. This is very bad. 177fffcda11SJoerg Roedel * Unfortunately the drivers cannot handle this operation properly. 178fffcda11SJoerg Roedel * Return some non mapped prereserved space in the aperture and 179fffcda11SJoerg Roedel * let the Northbridge deal with it. This will result in garbage 180fffcda11SJoerg Roedel * in the IO operation. When the size exceeds the prereserved space 181fffcda11SJoerg Roedel * memory corruption will occur or random memory will be DMAed 182fffcda11SJoerg Roedel * out. Hopefully no network devices use single mappings that big. 183fffcda11SJoerg Roedel */ 184fffcda11SJoerg Roedel 185fffcda11SJoerg Roedel dev_err(dev, "PCI-DMA: Out of IOMMU space for %lu bytes\n", size); 186fffcda11SJoerg Roedel 187fffcda11SJoerg Roedel if (size > PAGE_SIZE*EMERGENCY_PAGES) { 188fffcda11SJoerg Roedel if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL) 189fffcda11SJoerg Roedel panic("PCI-DMA: Memory would be corrupted\n"); 190fffcda11SJoerg Roedel if (dir == PCI_DMA_TODEVICE || dir == PCI_DMA_BIDIRECTIONAL) 191fffcda11SJoerg Roedel panic(KERN_ERR 192fffcda11SJoerg Roedel "PCI-DMA: Random memory would be DMAed\n"); 193fffcda11SJoerg Roedel } 194fffcda11SJoerg Roedel #ifdef CONFIG_IOMMU_LEAK 195fffcda11SJoerg Roedel dump_leak(); 196fffcda11SJoerg Roedel #endif 197fffcda11SJoerg Roedel } 198fffcda11SJoerg Roedel 199fffcda11SJoerg Roedel static inline int 200fffcda11SJoerg Roedel need_iommu(struct device *dev, unsigned long addr, size_t size) 201fffcda11SJoerg Roedel { 202fffcda11SJoerg Roedel return force_iommu || !dma_capable(dev, addr, size); 203fffcda11SJoerg Roedel } 204fffcda11SJoerg Roedel 205fffcda11SJoerg Roedel static inline int 206fffcda11SJoerg Roedel nonforced_iommu(struct device *dev, unsigned long addr, size_t size) 207fffcda11SJoerg Roedel { 208fffcda11SJoerg Roedel return !dma_capable(dev, addr, size); 209fffcda11SJoerg Roedel } 210fffcda11SJoerg Roedel 211fffcda11SJoerg Roedel /* Map a single continuous physical area into the IOMMU. 212fffcda11SJoerg Roedel * Caller needs to check if the iommu is needed and flush. 213fffcda11SJoerg Roedel */ 214fffcda11SJoerg Roedel static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem, 215fffcda11SJoerg Roedel size_t size, int dir, unsigned long align_mask) 216fffcda11SJoerg Roedel { 217fffcda11SJoerg Roedel unsigned long npages = iommu_num_pages(phys_mem, size, PAGE_SIZE); 218fffcda11SJoerg Roedel unsigned long iommu_page; 219fffcda11SJoerg Roedel int i; 220fffcda11SJoerg Roedel 221fffcda11SJoerg Roedel if (unlikely(phys_mem + size > GART_MAX_PHYS_ADDR)) 222fffcda11SJoerg Roedel return bad_dma_addr; 223fffcda11SJoerg Roedel 224fffcda11SJoerg Roedel iommu_page = alloc_iommu(dev, npages, align_mask); 225fffcda11SJoerg Roedel if (iommu_page == -1) { 226fffcda11SJoerg Roedel if (!nonforced_iommu(dev, phys_mem, size)) 227fffcda11SJoerg Roedel return phys_mem; 228fffcda11SJoerg Roedel if (panic_on_overflow) 229fffcda11SJoerg Roedel panic("dma_map_area overflow %lu bytes\n", size); 230fffcda11SJoerg Roedel iommu_full(dev, size, dir); 231fffcda11SJoerg Roedel return bad_dma_addr; 232fffcda11SJoerg Roedel } 233fffcda11SJoerg Roedel 234fffcda11SJoerg Roedel for (i = 0; i < npages; i++) { 235fffcda11SJoerg Roedel iommu_gatt_base[iommu_page + i] = GPTE_ENCODE(phys_mem); 236fffcda11SJoerg Roedel phys_mem += PAGE_SIZE; 237fffcda11SJoerg Roedel } 238fffcda11SJoerg Roedel return iommu_bus_base + iommu_page*PAGE_SIZE + (phys_mem & ~PAGE_MASK); 239fffcda11SJoerg Roedel } 240fffcda11SJoerg Roedel 241fffcda11SJoerg Roedel /* Map a single area into the IOMMU */ 242fffcda11SJoerg Roedel static dma_addr_t gart_map_page(struct device *dev, struct page *page, 243fffcda11SJoerg Roedel unsigned long offset, size_t size, 244fffcda11SJoerg Roedel enum dma_data_direction dir, 24500085f1eSKrzysztof Kozlowski unsigned long attrs) 246fffcda11SJoerg Roedel { 247fffcda11SJoerg Roedel unsigned long bus; 248fffcda11SJoerg Roedel phys_addr_t paddr = page_to_phys(page) + offset; 249fffcda11SJoerg Roedel 250fffcda11SJoerg Roedel if (!dev) 251fffcda11SJoerg Roedel dev = &x86_dma_fallback_dev; 252fffcda11SJoerg Roedel 253fffcda11SJoerg Roedel if (!need_iommu(dev, paddr, size)) 254fffcda11SJoerg Roedel return paddr; 255fffcda11SJoerg Roedel 256fffcda11SJoerg Roedel bus = dma_map_area(dev, paddr, size, dir, 0); 257fffcda11SJoerg Roedel flush_gart(); 258fffcda11SJoerg Roedel 259fffcda11SJoerg Roedel return bus; 260fffcda11SJoerg Roedel } 261fffcda11SJoerg Roedel 262fffcda11SJoerg Roedel /* 263fffcda11SJoerg Roedel * Free a DMA mapping. 264fffcda11SJoerg Roedel */ 265fffcda11SJoerg Roedel static void gart_unmap_page(struct device *dev, dma_addr_t dma_addr, 266fffcda11SJoerg Roedel size_t size, enum dma_data_direction dir, 26700085f1eSKrzysztof Kozlowski unsigned long attrs) 268fffcda11SJoerg Roedel { 269fffcda11SJoerg Roedel unsigned long iommu_page; 270fffcda11SJoerg Roedel int npages; 271fffcda11SJoerg Roedel int i; 272fffcda11SJoerg Roedel 273fffcda11SJoerg Roedel if (dma_addr < iommu_bus_base + EMERGENCY_PAGES*PAGE_SIZE || 274fffcda11SJoerg Roedel dma_addr >= iommu_bus_base + iommu_size) 275fffcda11SJoerg Roedel return; 276fffcda11SJoerg Roedel 277fffcda11SJoerg Roedel iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT; 278fffcda11SJoerg Roedel npages = iommu_num_pages(dma_addr, size, PAGE_SIZE); 279fffcda11SJoerg Roedel for (i = 0; i < npages; i++) { 280fffcda11SJoerg Roedel iommu_gatt_base[iommu_page + i] = gart_unmapped_entry; 281fffcda11SJoerg Roedel } 282fffcda11SJoerg Roedel free_iommu(iommu_page, npages); 283fffcda11SJoerg Roedel } 284fffcda11SJoerg Roedel 285fffcda11SJoerg Roedel /* 286fffcda11SJoerg Roedel * Wrapper for pci_unmap_single working with scatterlists. 287fffcda11SJoerg Roedel */ 288fffcda11SJoerg Roedel static void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, 28900085f1eSKrzysztof Kozlowski enum dma_data_direction dir, unsigned long attrs) 290fffcda11SJoerg Roedel { 291fffcda11SJoerg Roedel struct scatterlist *s; 292fffcda11SJoerg Roedel int i; 293fffcda11SJoerg Roedel 294fffcda11SJoerg Roedel for_each_sg(sg, s, nents, i) { 295fffcda11SJoerg Roedel if (!s->dma_length || !s->length) 296fffcda11SJoerg Roedel break; 29700085f1eSKrzysztof Kozlowski gart_unmap_page(dev, s->dma_address, s->dma_length, dir, 0); 298fffcda11SJoerg Roedel } 299fffcda11SJoerg Roedel } 300fffcda11SJoerg Roedel 301fffcda11SJoerg Roedel /* Fallback for dma_map_sg in case of overflow */ 302fffcda11SJoerg Roedel static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg, 303fffcda11SJoerg Roedel int nents, int dir) 304fffcda11SJoerg Roedel { 305fffcda11SJoerg Roedel struct scatterlist *s; 306fffcda11SJoerg Roedel int i; 307fffcda11SJoerg Roedel 308fffcda11SJoerg Roedel #ifdef CONFIG_IOMMU_DEBUG 309fffcda11SJoerg Roedel pr_debug("dma_map_sg overflow\n"); 310fffcda11SJoerg Roedel #endif 311fffcda11SJoerg Roedel 312fffcda11SJoerg Roedel for_each_sg(sg, s, nents, i) { 313fffcda11SJoerg Roedel unsigned long addr = sg_phys(s); 314fffcda11SJoerg Roedel 315fffcda11SJoerg Roedel if (nonforced_iommu(dev, addr, s->length)) { 316fffcda11SJoerg Roedel addr = dma_map_area(dev, addr, s->length, dir, 0); 317fffcda11SJoerg Roedel if (addr == bad_dma_addr) { 318fffcda11SJoerg Roedel if (i > 0) 31900085f1eSKrzysztof Kozlowski gart_unmap_sg(dev, sg, i, dir, 0); 320fffcda11SJoerg Roedel nents = 0; 321fffcda11SJoerg Roedel sg[0].dma_length = 0; 322fffcda11SJoerg Roedel break; 323fffcda11SJoerg Roedel } 324fffcda11SJoerg Roedel } 325fffcda11SJoerg Roedel s->dma_address = addr; 326fffcda11SJoerg Roedel s->dma_length = s->length; 327fffcda11SJoerg Roedel } 328fffcda11SJoerg Roedel flush_gart(); 329fffcda11SJoerg Roedel 330fffcda11SJoerg Roedel return nents; 331fffcda11SJoerg Roedel } 332fffcda11SJoerg Roedel 333fffcda11SJoerg Roedel /* Map multiple scatterlist entries continuous into the first. */ 334fffcda11SJoerg Roedel static int __dma_map_cont(struct device *dev, struct scatterlist *start, 335fffcda11SJoerg Roedel int nelems, struct scatterlist *sout, 336fffcda11SJoerg Roedel unsigned long pages) 337fffcda11SJoerg Roedel { 338fffcda11SJoerg Roedel unsigned long iommu_start = alloc_iommu(dev, pages, 0); 339fffcda11SJoerg Roedel unsigned long iommu_page = iommu_start; 340fffcda11SJoerg Roedel struct scatterlist *s; 341fffcda11SJoerg Roedel int i; 342fffcda11SJoerg Roedel 343fffcda11SJoerg Roedel if (iommu_start == -1) 344fffcda11SJoerg Roedel return -1; 345fffcda11SJoerg Roedel 346fffcda11SJoerg Roedel for_each_sg(start, s, nelems, i) { 347fffcda11SJoerg Roedel unsigned long pages, addr; 348fffcda11SJoerg Roedel unsigned long phys_addr = s->dma_address; 349fffcda11SJoerg Roedel 350fffcda11SJoerg Roedel BUG_ON(s != start && s->offset); 351fffcda11SJoerg Roedel if (s == start) { 352fffcda11SJoerg Roedel sout->dma_address = iommu_bus_base; 353fffcda11SJoerg Roedel sout->dma_address += iommu_page*PAGE_SIZE + s->offset; 354fffcda11SJoerg Roedel sout->dma_length = s->length; 355fffcda11SJoerg Roedel } else { 356fffcda11SJoerg Roedel sout->dma_length += s->length; 357fffcda11SJoerg Roedel } 358fffcda11SJoerg Roedel 359fffcda11SJoerg Roedel addr = phys_addr; 360fffcda11SJoerg Roedel pages = iommu_num_pages(s->offset, s->length, PAGE_SIZE); 361fffcda11SJoerg Roedel while (pages--) { 362fffcda11SJoerg Roedel iommu_gatt_base[iommu_page] = GPTE_ENCODE(addr); 363fffcda11SJoerg Roedel addr += PAGE_SIZE; 364fffcda11SJoerg Roedel iommu_page++; 365fffcda11SJoerg Roedel } 366fffcda11SJoerg Roedel } 367fffcda11SJoerg Roedel BUG_ON(iommu_page - iommu_start != pages); 368fffcda11SJoerg Roedel 369fffcda11SJoerg Roedel return 0; 370fffcda11SJoerg Roedel } 371fffcda11SJoerg Roedel 372fffcda11SJoerg Roedel static inline int 373fffcda11SJoerg Roedel dma_map_cont(struct device *dev, struct scatterlist *start, int nelems, 374fffcda11SJoerg Roedel struct scatterlist *sout, unsigned long pages, int need) 375fffcda11SJoerg Roedel { 376fffcda11SJoerg Roedel if (!need) { 377fffcda11SJoerg Roedel BUG_ON(nelems != 1); 378fffcda11SJoerg Roedel sout->dma_address = start->dma_address; 379fffcda11SJoerg Roedel sout->dma_length = start->length; 380fffcda11SJoerg Roedel return 0; 381fffcda11SJoerg Roedel } 382fffcda11SJoerg Roedel return __dma_map_cont(dev, start, nelems, sout, pages); 383fffcda11SJoerg Roedel } 384fffcda11SJoerg Roedel 385fffcda11SJoerg Roedel /* 386fffcda11SJoerg Roedel * DMA map all entries in a scatterlist. 387fffcda11SJoerg Roedel * Merge chunks that have page aligned sizes into a continuous mapping. 388fffcda11SJoerg Roedel */ 389fffcda11SJoerg Roedel static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, 39000085f1eSKrzysztof Kozlowski enum dma_data_direction dir, unsigned long attrs) 391fffcda11SJoerg Roedel { 392fffcda11SJoerg Roedel struct scatterlist *s, *ps, *start_sg, *sgmap; 393fffcda11SJoerg Roedel int need = 0, nextneed, i, out, start; 394fffcda11SJoerg Roedel unsigned long pages = 0; 395fffcda11SJoerg Roedel unsigned int seg_size; 396fffcda11SJoerg Roedel unsigned int max_seg_size; 397fffcda11SJoerg Roedel 398fffcda11SJoerg Roedel if (nents == 0) 399fffcda11SJoerg Roedel return 0; 400fffcda11SJoerg Roedel 401fffcda11SJoerg Roedel if (!dev) 402fffcda11SJoerg Roedel dev = &x86_dma_fallback_dev; 403fffcda11SJoerg Roedel 404fffcda11SJoerg Roedel out = 0; 405fffcda11SJoerg Roedel start = 0; 406fffcda11SJoerg Roedel start_sg = sg; 407fffcda11SJoerg Roedel sgmap = sg; 408fffcda11SJoerg Roedel seg_size = 0; 409fffcda11SJoerg Roedel max_seg_size = dma_get_max_seg_size(dev); 410fffcda11SJoerg Roedel ps = NULL; /* shut up gcc */ 411fffcda11SJoerg Roedel 412fffcda11SJoerg Roedel for_each_sg(sg, s, nents, i) { 413fffcda11SJoerg Roedel dma_addr_t addr = sg_phys(s); 414fffcda11SJoerg Roedel 415fffcda11SJoerg Roedel s->dma_address = addr; 416fffcda11SJoerg Roedel BUG_ON(s->length == 0); 417fffcda11SJoerg Roedel 418fffcda11SJoerg Roedel nextneed = need_iommu(dev, addr, s->length); 419fffcda11SJoerg Roedel 420fffcda11SJoerg Roedel /* Handle the previous not yet processed entries */ 421fffcda11SJoerg Roedel if (i > start) { 422fffcda11SJoerg Roedel /* 423fffcda11SJoerg Roedel * Can only merge when the last chunk ends on a 424fffcda11SJoerg Roedel * page boundary and the new one doesn't have an 425fffcda11SJoerg Roedel * offset. 426fffcda11SJoerg Roedel */ 427fffcda11SJoerg Roedel if (!iommu_merge || !nextneed || !need || s->offset || 428fffcda11SJoerg Roedel (s->length + seg_size > max_seg_size) || 429fffcda11SJoerg Roedel (ps->offset + ps->length) % PAGE_SIZE) { 430fffcda11SJoerg Roedel if (dma_map_cont(dev, start_sg, i - start, 431fffcda11SJoerg Roedel sgmap, pages, need) < 0) 432fffcda11SJoerg Roedel goto error; 433fffcda11SJoerg Roedel out++; 434fffcda11SJoerg Roedel 435fffcda11SJoerg Roedel seg_size = 0; 436fffcda11SJoerg Roedel sgmap = sg_next(sgmap); 437fffcda11SJoerg Roedel pages = 0; 438fffcda11SJoerg Roedel start = i; 439fffcda11SJoerg Roedel start_sg = s; 440fffcda11SJoerg Roedel } 441fffcda11SJoerg Roedel } 442fffcda11SJoerg Roedel 443fffcda11SJoerg Roedel seg_size += s->length; 444fffcda11SJoerg Roedel need = nextneed; 445fffcda11SJoerg Roedel pages += iommu_num_pages(s->offset, s->length, PAGE_SIZE); 446fffcda11SJoerg Roedel ps = s; 447fffcda11SJoerg Roedel } 448fffcda11SJoerg Roedel if (dma_map_cont(dev, start_sg, i - start, sgmap, pages, need) < 0) 449fffcda11SJoerg Roedel goto error; 450fffcda11SJoerg Roedel out++; 451fffcda11SJoerg Roedel flush_gart(); 452fffcda11SJoerg Roedel if (out < nents) { 453fffcda11SJoerg Roedel sgmap = sg_next(sgmap); 454fffcda11SJoerg Roedel sgmap->dma_length = 0; 455fffcda11SJoerg Roedel } 456fffcda11SJoerg Roedel return out; 457fffcda11SJoerg Roedel 458fffcda11SJoerg Roedel error: 459fffcda11SJoerg Roedel flush_gart(); 46000085f1eSKrzysztof Kozlowski gart_unmap_sg(dev, sg, out, dir, 0); 461fffcda11SJoerg Roedel 462fffcda11SJoerg Roedel /* When it was forced or merged try again in a dumb way */ 463fffcda11SJoerg Roedel if (force_iommu || iommu_merge) { 464fffcda11SJoerg Roedel out = dma_map_sg_nonforce(dev, sg, nents, dir); 465fffcda11SJoerg Roedel if (out > 0) 466fffcda11SJoerg Roedel return out; 467fffcda11SJoerg Roedel } 468fffcda11SJoerg Roedel if (panic_on_overflow) 469fffcda11SJoerg Roedel panic("dma_map_sg: overflow on %lu pages\n", pages); 470fffcda11SJoerg Roedel 471fffcda11SJoerg Roedel iommu_full(dev, pages << PAGE_SHIFT, dir); 472fffcda11SJoerg Roedel for_each_sg(sg, s, nents, i) 473fffcda11SJoerg Roedel s->dma_address = bad_dma_addr; 474fffcda11SJoerg Roedel return 0; 475fffcda11SJoerg Roedel } 476fffcda11SJoerg Roedel 477fffcda11SJoerg Roedel /* allocate and map a coherent mapping */ 478fffcda11SJoerg Roedel static void * 479fffcda11SJoerg Roedel gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr, 48000085f1eSKrzysztof Kozlowski gfp_t flag, unsigned long attrs) 481fffcda11SJoerg Roedel { 482fffcda11SJoerg Roedel dma_addr_t paddr; 483fffcda11SJoerg Roedel unsigned long align_mask; 484fffcda11SJoerg Roedel struct page *page; 485fffcda11SJoerg Roedel 486fffcda11SJoerg Roedel if (force_iommu && !(flag & GFP_DMA)) { 487fffcda11SJoerg Roedel flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32); 488fffcda11SJoerg Roedel page = alloc_pages(flag | __GFP_ZERO, get_order(size)); 489fffcda11SJoerg Roedel if (!page) 490fffcda11SJoerg Roedel return NULL; 491fffcda11SJoerg Roedel 492fffcda11SJoerg Roedel align_mask = (1UL << get_order(size)) - 1; 493fffcda11SJoerg Roedel paddr = dma_map_area(dev, page_to_phys(page), size, 494fffcda11SJoerg Roedel DMA_BIDIRECTIONAL, align_mask); 495fffcda11SJoerg Roedel 496fffcda11SJoerg Roedel flush_gart(); 497fffcda11SJoerg Roedel if (paddr != bad_dma_addr) { 498fffcda11SJoerg Roedel *dma_addr = paddr; 499fffcda11SJoerg Roedel return page_address(page); 500fffcda11SJoerg Roedel } 501fffcda11SJoerg Roedel __free_pages(page, get_order(size)); 502fffcda11SJoerg Roedel } else 503baa676fcSAndrzej Pietrasiewicz return dma_generic_alloc_coherent(dev, size, dma_addr, flag, 504baa676fcSAndrzej Pietrasiewicz attrs); 505fffcda11SJoerg Roedel 506fffcda11SJoerg Roedel return NULL; 507fffcda11SJoerg Roedel } 508fffcda11SJoerg Roedel 509fffcda11SJoerg Roedel /* free a coherent mapping */ 510fffcda11SJoerg Roedel static void 511fffcda11SJoerg Roedel gart_free_coherent(struct device *dev, size_t size, void *vaddr, 51200085f1eSKrzysztof Kozlowski dma_addr_t dma_addr, unsigned long attrs) 513fffcda11SJoerg Roedel { 51400085f1eSKrzysztof Kozlowski gart_unmap_page(dev, dma_addr, size, DMA_BIDIRECTIONAL, 0); 5159c5a3621SAkinobu Mita dma_generic_free_coherent(dev, size, vaddr, dma_addr, attrs); 516fffcda11SJoerg Roedel } 517fffcda11SJoerg Roedel 518fffcda11SJoerg Roedel static int gart_mapping_error(struct device *dev, dma_addr_t dma_addr) 519fffcda11SJoerg Roedel { 520fffcda11SJoerg Roedel return (dma_addr == bad_dma_addr); 521fffcda11SJoerg Roedel } 522fffcda11SJoerg Roedel 523fffcda11SJoerg Roedel static int no_agp; 524fffcda11SJoerg Roedel 525fffcda11SJoerg Roedel static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size) 526fffcda11SJoerg Roedel { 527fffcda11SJoerg Roedel unsigned long a; 528fffcda11SJoerg Roedel 529fffcda11SJoerg Roedel if (!iommu_size) { 530fffcda11SJoerg Roedel iommu_size = aper_size; 531fffcda11SJoerg Roedel if (!no_agp) 532fffcda11SJoerg Roedel iommu_size /= 2; 533fffcda11SJoerg Roedel } 534fffcda11SJoerg Roedel 535fffcda11SJoerg Roedel a = aper + iommu_size; 536fffcda11SJoerg Roedel iommu_size -= round_up(a, PMD_PAGE_SIZE) - a; 537fffcda11SJoerg Roedel 538fffcda11SJoerg Roedel if (iommu_size < 64*1024*1024) { 539fffcda11SJoerg Roedel pr_warning( 540fffcda11SJoerg Roedel "PCI-DMA: Warning: Small IOMMU %luMB." 541fffcda11SJoerg Roedel " Consider increasing the AGP aperture in BIOS\n", 542fffcda11SJoerg Roedel iommu_size >> 20); 543fffcda11SJoerg Roedel } 544fffcda11SJoerg Roedel 545fffcda11SJoerg Roedel return iommu_size; 546fffcda11SJoerg Roedel } 547fffcda11SJoerg Roedel 548fffcda11SJoerg Roedel static __init unsigned read_aperture(struct pci_dev *dev, u32 *size) 549fffcda11SJoerg Roedel { 550fffcda11SJoerg Roedel unsigned aper_size = 0, aper_base_32, aper_order; 551fffcda11SJoerg Roedel u64 aper_base; 552fffcda11SJoerg Roedel 553fffcda11SJoerg Roedel pci_read_config_dword(dev, AMD64_GARTAPERTUREBASE, &aper_base_32); 554fffcda11SJoerg Roedel pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &aper_order); 555fffcda11SJoerg Roedel aper_order = (aper_order >> 1) & 7; 556fffcda11SJoerg Roedel 557fffcda11SJoerg Roedel aper_base = aper_base_32 & 0x7fff; 558fffcda11SJoerg Roedel aper_base <<= 25; 559fffcda11SJoerg Roedel 560fffcda11SJoerg Roedel aper_size = (32 * 1024 * 1024) << aper_order; 561fffcda11SJoerg Roedel if (aper_base + aper_size > 0x100000000UL || !aper_size) 562fffcda11SJoerg Roedel aper_base = 0; 563fffcda11SJoerg Roedel 564fffcda11SJoerg Roedel *size = aper_size; 565fffcda11SJoerg Roedel return aper_base; 566fffcda11SJoerg Roedel } 567fffcda11SJoerg Roedel 568fffcda11SJoerg Roedel static void enable_gart_translations(void) 569fffcda11SJoerg Roedel { 570fffcda11SJoerg Roedel int i; 571fffcda11SJoerg Roedel 572fffcda11SJoerg Roedel if (!amd_nb_has_feature(AMD_NB_GART)) 573fffcda11SJoerg Roedel return; 574fffcda11SJoerg Roedel 575fffcda11SJoerg Roedel for (i = 0; i < amd_nb_num(); i++) { 576fffcda11SJoerg Roedel struct pci_dev *dev = node_to_amd_nb(i)->misc; 577fffcda11SJoerg Roedel 578fffcda11SJoerg Roedel enable_gart_translation(dev, __pa(agp_gatt_table)); 579fffcda11SJoerg Roedel } 580fffcda11SJoerg Roedel 581fffcda11SJoerg Roedel /* Flush the GART-TLB to remove stale entries */ 582fffcda11SJoerg Roedel amd_flush_garts(); 583fffcda11SJoerg Roedel } 584fffcda11SJoerg Roedel 585fffcda11SJoerg Roedel /* 586fffcda11SJoerg Roedel * If fix_up_north_bridges is set, the north bridges have to be fixed up on 587fffcda11SJoerg Roedel * resume in the same way as they are handled in gart_iommu_hole_init(). 588fffcda11SJoerg Roedel */ 589fffcda11SJoerg Roedel static bool fix_up_north_bridges; 590fffcda11SJoerg Roedel static u32 aperture_order; 591fffcda11SJoerg Roedel static u32 aperture_alloc; 592fffcda11SJoerg Roedel 593fffcda11SJoerg Roedel void set_up_gart_resume(u32 aper_order, u32 aper_alloc) 594fffcda11SJoerg Roedel { 595fffcda11SJoerg Roedel fix_up_north_bridges = true; 596fffcda11SJoerg Roedel aperture_order = aper_order; 597fffcda11SJoerg Roedel aperture_alloc = aper_alloc; 598fffcda11SJoerg Roedel } 599fffcda11SJoerg Roedel 600fffcda11SJoerg Roedel static void gart_fixup_northbridges(void) 601fffcda11SJoerg Roedel { 602fffcda11SJoerg Roedel int i; 603fffcda11SJoerg Roedel 604fffcda11SJoerg Roedel if (!fix_up_north_bridges) 605fffcda11SJoerg Roedel return; 606fffcda11SJoerg Roedel 607fffcda11SJoerg Roedel if (!amd_nb_has_feature(AMD_NB_GART)) 608fffcda11SJoerg Roedel return; 609fffcda11SJoerg Roedel 610fffcda11SJoerg Roedel pr_info("PCI-DMA: Restoring GART aperture settings\n"); 611fffcda11SJoerg Roedel 612fffcda11SJoerg Roedel for (i = 0; i < amd_nb_num(); i++) { 613fffcda11SJoerg Roedel struct pci_dev *dev = node_to_amd_nb(i)->misc; 614fffcda11SJoerg Roedel 615fffcda11SJoerg Roedel /* 616fffcda11SJoerg Roedel * Don't enable translations just yet. That is the next 617fffcda11SJoerg Roedel * step. Restore the pre-suspend aperture settings. 618fffcda11SJoerg Roedel */ 619fffcda11SJoerg Roedel gart_set_size_and_enable(dev, aperture_order); 620fffcda11SJoerg Roedel pci_write_config_dword(dev, AMD64_GARTAPERTUREBASE, aperture_alloc >> 25); 621fffcda11SJoerg Roedel } 622fffcda11SJoerg Roedel } 623fffcda11SJoerg Roedel 624fffcda11SJoerg Roedel static void gart_resume(void) 625fffcda11SJoerg Roedel { 626fffcda11SJoerg Roedel pr_info("PCI-DMA: Resuming GART IOMMU\n"); 627fffcda11SJoerg Roedel 628fffcda11SJoerg Roedel gart_fixup_northbridges(); 629fffcda11SJoerg Roedel 630fffcda11SJoerg Roedel enable_gart_translations(); 631fffcda11SJoerg Roedel } 632fffcda11SJoerg Roedel 633fffcda11SJoerg Roedel static struct syscore_ops gart_syscore_ops = { 634fffcda11SJoerg Roedel .resume = gart_resume, 635fffcda11SJoerg Roedel 636fffcda11SJoerg Roedel }; 637fffcda11SJoerg Roedel 638fffcda11SJoerg Roedel /* 639fffcda11SJoerg Roedel * Private Northbridge GATT initialization in case we cannot use the 640fffcda11SJoerg Roedel * AGP driver for some reason. 641fffcda11SJoerg Roedel */ 642fffcda11SJoerg Roedel static __init int init_amd_gatt(struct agp_kern_info *info) 643fffcda11SJoerg Roedel { 644fffcda11SJoerg Roedel unsigned aper_size, gatt_size, new_aper_size; 645fffcda11SJoerg Roedel unsigned aper_base, new_aper_base; 646fffcda11SJoerg Roedel struct pci_dev *dev; 647fffcda11SJoerg Roedel void *gatt; 648fffcda11SJoerg Roedel int i; 649fffcda11SJoerg Roedel 650fffcda11SJoerg Roedel pr_info("PCI-DMA: Disabling AGP.\n"); 651fffcda11SJoerg Roedel 652fffcda11SJoerg Roedel aper_size = aper_base = info->aper_size = 0; 653fffcda11SJoerg Roedel dev = NULL; 654fffcda11SJoerg Roedel for (i = 0; i < amd_nb_num(); i++) { 655fffcda11SJoerg Roedel dev = node_to_amd_nb(i)->misc; 656fffcda11SJoerg Roedel new_aper_base = read_aperture(dev, &new_aper_size); 657fffcda11SJoerg Roedel if (!new_aper_base) 658fffcda11SJoerg Roedel goto nommu; 659fffcda11SJoerg Roedel 660fffcda11SJoerg Roedel if (!aper_base) { 661fffcda11SJoerg Roedel aper_size = new_aper_size; 662fffcda11SJoerg Roedel aper_base = new_aper_base; 663fffcda11SJoerg Roedel } 664fffcda11SJoerg Roedel if (aper_size != new_aper_size || aper_base != new_aper_base) 665fffcda11SJoerg Roedel goto nommu; 666fffcda11SJoerg Roedel } 667fffcda11SJoerg Roedel if (!aper_base) 668fffcda11SJoerg Roedel goto nommu; 669fffcda11SJoerg Roedel 670fffcda11SJoerg Roedel info->aper_base = aper_base; 671fffcda11SJoerg Roedel info->aper_size = aper_size >> 20; 672fffcda11SJoerg Roedel 673fffcda11SJoerg Roedel gatt_size = (aper_size >> PAGE_SHIFT) * sizeof(u32); 674fffcda11SJoerg Roedel gatt = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 675fffcda11SJoerg Roedel get_order(gatt_size)); 676fffcda11SJoerg Roedel if (!gatt) 677fffcda11SJoerg Roedel panic("Cannot allocate GATT table"); 678fffcda11SJoerg Roedel if (set_memory_uc((unsigned long)gatt, gatt_size >> PAGE_SHIFT)) 679fffcda11SJoerg Roedel panic("Could not set GART PTEs to uncacheable pages"); 680fffcda11SJoerg Roedel 681fffcda11SJoerg Roedel agp_gatt_table = gatt; 682fffcda11SJoerg Roedel 683fffcda11SJoerg Roedel register_syscore_ops(&gart_syscore_ops); 684fffcda11SJoerg Roedel 685fffcda11SJoerg Roedel flush_gart(); 686fffcda11SJoerg Roedel 687fffcda11SJoerg Roedel pr_info("PCI-DMA: aperture base @ %x size %u KB\n", 688fffcda11SJoerg Roedel aper_base, aper_size>>10); 689fffcda11SJoerg Roedel 690fffcda11SJoerg Roedel return 0; 691fffcda11SJoerg Roedel 692fffcda11SJoerg Roedel nommu: 693fffcda11SJoerg Roedel /* Should not happen anymore */ 694fffcda11SJoerg Roedel pr_warning("PCI-DMA: More than 4GB of RAM and no IOMMU\n" 695fffcda11SJoerg Roedel "falling back to iommu=soft.\n"); 696fffcda11SJoerg Roedel return -1; 697fffcda11SJoerg Roedel } 698fffcda11SJoerg Roedel 6995299709dSBart Van Assche static const struct dma_map_ops gart_dma_ops = { 700fffcda11SJoerg Roedel .map_sg = gart_map_sg, 701fffcda11SJoerg Roedel .unmap_sg = gart_unmap_sg, 702fffcda11SJoerg Roedel .map_page = gart_map_page, 703fffcda11SJoerg Roedel .unmap_page = gart_unmap_page, 704baa676fcSAndrzej Pietrasiewicz .alloc = gart_alloc_coherent, 705baa676fcSAndrzej Pietrasiewicz .free = gart_free_coherent, 706fffcda11SJoerg Roedel .mapping_error = gart_mapping_error, 707fffcda11SJoerg Roedel }; 708fffcda11SJoerg Roedel 709fffcda11SJoerg Roedel static void gart_iommu_shutdown(void) 710fffcda11SJoerg Roedel { 711fffcda11SJoerg Roedel struct pci_dev *dev; 712fffcda11SJoerg Roedel int i; 713fffcda11SJoerg Roedel 714fffcda11SJoerg Roedel /* don't shutdown it if there is AGP installed */ 715fffcda11SJoerg Roedel if (!no_agp) 716fffcda11SJoerg Roedel return; 717fffcda11SJoerg Roedel 718fffcda11SJoerg Roedel if (!amd_nb_has_feature(AMD_NB_GART)) 719fffcda11SJoerg Roedel return; 720fffcda11SJoerg Roedel 721fffcda11SJoerg Roedel for (i = 0; i < amd_nb_num(); i++) { 722fffcda11SJoerg Roedel u32 ctl; 723fffcda11SJoerg Roedel 724fffcda11SJoerg Roedel dev = node_to_amd_nb(i)->misc; 725fffcda11SJoerg Roedel pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &ctl); 726fffcda11SJoerg Roedel 727fffcda11SJoerg Roedel ctl &= ~GARTEN; 728fffcda11SJoerg Roedel 729fffcda11SJoerg Roedel pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, ctl); 730fffcda11SJoerg Roedel } 731fffcda11SJoerg Roedel } 732fffcda11SJoerg Roedel 733fffcda11SJoerg Roedel int __init gart_iommu_init(void) 734fffcda11SJoerg Roedel { 735fffcda11SJoerg Roedel struct agp_kern_info info; 736fffcda11SJoerg Roedel unsigned long iommu_start; 737fffcda11SJoerg Roedel unsigned long aper_base, aper_size; 738fffcda11SJoerg Roedel unsigned long start_pfn, end_pfn; 739fffcda11SJoerg Roedel unsigned long scratch; 740fffcda11SJoerg Roedel long i; 741fffcda11SJoerg Roedel 742fffcda11SJoerg Roedel if (!amd_nb_has_feature(AMD_NB_GART)) 743fffcda11SJoerg Roedel return 0; 744fffcda11SJoerg Roedel 745fffcda11SJoerg Roedel #ifndef CONFIG_AGP_AMD64 746fffcda11SJoerg Roedel no_agp = 1; 747fffcda11SJoerg Roedel #else 748fffcda11SJoerg Roedel /* Makefile puts PCI initialization via subsys_initcall first. */ 749fffcda11SJoerg Roedel /* Add other AMD AGP bridge drivers here */ 750fffcda11SJoerg Roedel no_agp = no_agp || 751fffcda11SJoerg Roedel (agp_amd64_init() < 0) || 752fffcda11SJoerg Roedel (agp_copy_info(agp_bridge, &info) < 0); 753fffcda11SJoerg Roedel #endif 754fffcda11SJoerg Roedel 755fffcda11SJoerg Roedel if (no_iommu || 756fffcda11SJoerg Roedel (!force_iommu && max_pfn <= MAX_DMA32_PFN) || 757fffcda11SJoerg Roedel !gart_iommu_aperture || 758fffcda11SJoerg Roedel (no_agp && init_amd_gatt(&info) < 0)) { 759fffcda11SJoerg Roedel if (max_pfn > MAX_DMA32_PFN) { 760fffcda11SJoerg Roedel pr_warning("More than 4GB of memory but GART IOMMU not available.\n"); 761fffcda11SJoerg Roedel pr_warning("falling back to iommu=soft.\n"); 762fffcda11SJoerg Roedel } 763fffcda11SJoerg Roedel return 0; 764fffcda11SJoerg Roedel } 765fffcda11SJoerg Roedel 766fffcda11SJoerg Roedel /* need to map that range */ 767fffcda11SJoerg Roedel aper_size = info.aper_size << 20; 768fffcda11SJoerg Roedel aper_base = info.aper_base; 769fffcda11SJoerg Roedel end_pfn = (aper_base>>PAGE_SHIFT) + (aper_size>>PAGE_SHIFT); 770fffcda11SJoerg Roedel 7715101730cSYinghai Lu start_pfn = PFN_DOWN(aper_base); 7725101730cSYinghai Lu if (!pfn_range_is_mapped(start_pfn, end_pfn)) 773fffcda11SJoerg Roedel init_memory_mapping(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT); 774fffcda11SJoerg Roedel 775fffcda11SJoerg Roedel pr_info("PCI-DMA: using GART IOMMU.\n"); 776fffcda11SJoerg Roedel iommu_size = check_iommu_size(info.aper_base, aper_size); 777fffcda11SJoerg Roedel iommu_pages = iommu_size >> PAGE_SHIFT; 778fffcda11SJoerg Roedel 779fffcda11SJoerg Roedel iommu_gart_bitmap = (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO, 780fffcda11SJoerg Roedel get_order(iommu_pages/8)); 781fffcda11SJoerg Roedel if (!iommu_gart_bitmap) 782fffcda11SJoerg Roedel panic("Cannot allocate iommu bitmap\n"); 783fffcda11SJoerg Roedel 784fffcda11SJoerg Roedel #ifdef CONFIG_IOMMU_LEAK 785fffcda11SJoerg Roedel if (leak_trace) { 786fffcda11SJoerg Roedel int ret; 787fffcda11SJoerg Roedel 788fffcda11SJoerg Roedel ret = dma_debug_resize_entries(iommu_pages); 789fffcda11SJoerg Roedel if (ret) 790fffcda11SJoerg Roedel pr_debug("PCI-DMA: Cannot trace all the entries\n"); 791fffcda11SJoerg Roedel } 792fffcda11SJoerg Roedel #endif 793fffcda11SJoerg Roedel 794fffcda11SJoerg Roedel /* 795fffcda11SJoerg Roedel * Out of IOMMU space handling. 796fffcda11SJoerg Roedel * Reserve some invalid pages at the beginning of the GART. 797fffcda11SJoerg Roedel */ 798fffcda11SJoerg Roedel bitmap_set(iommu_gart_bitmap, 0, EMERGENCY_PAGES); 799fffcda11SJoerg Roedel 800fffcda11SJoerg Roedel pr_info("PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n", 801fffcda11SJoerg Roedel iommu_size >> 20); 802fffcda11SJoerg Roedel 803fffcda11SJoerg Roedel agp_memory_reserved = iommu_size; 804fffcda11SJoerg Roedel iommu_start = aper_size - iommu_size; 805fffcda11SJoerg Roedel iommu_bus_base = info.aper_base + iommu_start; 806fffcda11SJoerg Roedel bad_dma_addr = iommu_bus_base; 807fffcda11SJoerg Roedel iommu_gatt_base = agp_gatt_table + (iommu_start>>PAGE_SHIFT); 808fffcda11SJoerg Roedel 809fffcda11SJoerg Roedel /* 810fffcda11SJoerg Roedel * Unmap the IOMMU part of the GART. The alias of the page is 811fffcda11SJoerg Roedel * always mapped with cache enabled and there is no full cache 812fffcda11SJoerg Roedel * coherency across the GART remapping. The unmapping avoids 813fffcda11SJoerg Roedel * automatic prefetches from the CPU allocating cache lines in 814fffcda11SJoerg Roedel * there. All CPU accesses are done via the direct mapping to 815fffcda11SJoerg Roedel * the backing memory. The GART address is only used by PCI 816fffcda11SJoerg Roedel * devices. 817fffcda11SJoerg Roedel */ 818fffcda11SJoerg Roedel set_memory_np((unsigned long)__va(iommu_bus_base), 819fffcda11SJoerg Roedel iommu_size >> PAGE_SHIFT); 820fffcda11SJoerg Roedel /* 821fffcda11SJoerg Roedel * Tricky. The GART table remaps the physical memory range, 822fffcda11SJoerg Roedel * so the CPU wont notice potential aliases and if the memory 823fffcda11SJoerg Roedel * is remapped to UC later on, we might surprise the PCI devices 824fffcda11SJoerg Roedel * with a stray writeout of a cacheline. So play it sure and 825fffcda11SJoerg Roedel * do an explicit, full-scale wbinvd() _after_ having marked all 826fffcda11SJoerg Roedel * the pages as Not-Present: 827fffcda11SJoerg Roedel */ 828fffcda11SJoerg Roedel wbinvd(); 829fffcda11SJoerg Roedel 830fffcda11SJoerg Roedel /* 831fffcda11SJoerg Roedel * Now all caches are flushed and we can safely enable 832fffcda11SJoerg Roedel * GART hardware. Doing it early leaves the possibility 833fffcda11SJoerg Roedel * of stale cache entries that can lead to GART PTE 834fffcda11SJoerg Roedel * errors. 835fffcda11SJoerg Roedel */ 836fffcda11SJoerg Roedel enable_gart_translations(); 837fffcda11SJoerg Roedel 838fffcda11SJoerg Roedel /* 839fffcda11SJoerg Roedel * Try to workaround a bug (thanks to BenH): 840fffcda11SJoerg Roedel * Set unmapped entries to a scratch page instead of 0. 841fffcda11SJoerg Roedel * Any prefetches that hit unmapped entries won't get an bus abort 842fffcda11SJoerg Roedel * then. (P2P bridge may be prefetching on DMA reads). 843fffcda11SJoerg Roedel */ 844fffcda11SJoerg Roedel scratch = get_zeroed_page(GFP_KERNEL); 845fffcda11SJoerg Roedel if (!scratch) 846fffcda11SJoerg Roedel panic("Cannot allocate iommu scratch page"); 847fffcda11SJoerg Roedel gart_unmapped_entry = GPTE_ENCODE(__pa(scratch)); 848fffcda11SJoerg Roedel for (i = EMERGENCY_PAGES; i < iommu_pages; i++) 849fffcda11SJoerg Roedel iommu_gatt_base[i] = gart_unmapped_entry; 850fffcda11SJoerg Roedel 851fffcda11SJoerg Roedel flush_gart(); 852fffcda11SJoerg Roedel dma_ops = &gart_dma_ops; 853fffcda11SJoerg Roedel x86_platform.iommu_shutdown = gart_iommu_shutdown; 854fffcda11SJoerg Roedel swiotlb = 0; 855fffcda11SJoerg Roedel 856fffcda11SJoerg Roedel return 0; 857fffcda11SJoerg Roedel } 858fffcda11SJoerg Roedel 859fffcda11SJoerg Roedel void __init gart_parse_options(char *p) 860fffcda11SJoerg Roedel { 861fffcda11SJoerg Roedel int arg; 862fffcda11SJoerg Roedel 863fffcda11SJoerg Roedel #ifdef CONFIG_IOMMU_LEAK 864fffcda11SJoerg Roedel if (!strncmp(p, "leak", 4)) { 865fffcda11SJoerg Roedel leak_trace = 1; 866fffcda11SJoerg Roedel p += 4; 867fffcda11SJoerg Roedel if (*p == '=') 868fffcda11SJoerg Roedel ++p; 869fffcda11SJoerg Roedel if (isdigit(*p) && get_option(&p, &arg)) 870fffcda11SJoerg Roedel iommu_leak_pages = arg; 871fffcda11SJoerg Roedel } 872fffcda11SJoerg Roedel #endif 873fffcda11SJoerg Roedel if (isdigit(*p) && get_option(&p, &arg)) 874fffcda11SJoerg Roedel iommu_size = arg; 875fffcda11SJoerg Roedel if (!strncmp(p, "fullflush", 9)) 876fffcda11SJoerg Roedel iommu_fullflush = 1; 877fffcda11SJoerg Roedel if (!strncmp(p, "nofullflush", 11)) 878fffcda11SJoerg Roedel iommu_fullflush = 0; 879fffcda11SJoerg Roedel if (!strncmp(p, "noagp", 5)) 880fffcda11SJoerg Roedel no_agp = 1; 881fffcda11SJoerg Roedel if (!strncmp(p, "noaperture", 10)) 882fffcda11SJoerg Roedel fix_aperture = 0; 883fffcda11SJoerg Roedel /* duplicated from pci-dma.c */ 884fffcda11SJoerg Roedel if (!strncmp(p, "force", 5)) 885fffcda11SJoerg Roedel gart_iommu_aperture_allowed = 1; 886fffcda11SJoerg Roedel if (!strncmp(p, "allowed", 7)) 887fffcda11SJoerg Roedel gart_iommu_aperture_allowed = 1; 888fffcda11SJoerg Roedel if (!strncmp(p, "memaper", 7)) { 889fffcda11SJoerg Roedel fallback_aper_force = 1; 890fffcda11SJoerg Roedel p += 7; 891fffcda11SJoerg Roedel if (*p == '=') { 892fffcda11SJoerg Roedel ++p; 893fffcda11SJoerg Roedel if (get_option(&p, &arg)) 894fffcda11SJoerg Roedel fallback_aper_order = arg; 895fffcda11SJoerg Roedel } 896fffcda11SJoerg Roedel } 897fffcda11SJoerg Roedel } 898fffcda11SJoerg Roedel IOMMU_INIT_POST(gart_iommu_hole_init); 899