xref: /linux/arch/x86/kernel/amd_gart_64.c (revision d6e4b3e326d8b44675b9e19534347d97073826aa)
1 /*
2  * Dynamic DMA mapping support for AMD Hammer.
3  *
4  * Use the integrated AGP GART in the Hammer northbridge as an IOMMU for PCI.
5  * This allows to use PCI devices that only support 32bit addresses on systems
6  * with more than 4GB.
7  *
8  * See Documentation/DMA-API-HOWTO.txt for the interface specification.
9  *
10  * Copyright 2002 Andi Kleen, SuSE Labs.
11  * Subject to the GNU General Public License v2 only.
12  */
13 
14 #include <linux/types.h>
15 #include <linux/ctype.h>
16 #include <linux/agp_backend.h>
17 #include <linux/init.h>
18 #include <linux/mm.h>
19 #include <linux/sched.h>
20 #include <linux/sched/debug.h>
21 #include <linux/string.h>
22 #include <linux/spinlock.h>
23 #include <linux/pci.h>
24 #include <linux/topology.h>
25 #include <linux/interrupt.h>
26 #include <linux/bitmap.h>
27 #include <linux/kdebug.h>
28 #include <linux/scatterlist.h>
29 #include <linux/iommu-helper.h>
30 #include <linux/syscore_ops.h>
31 #include <linux/io.h>
32 #include <linux/gfp.h>
33 #include <linux/atomic.h>
34 #include <linux/dma-direct.h>
35 #include <asm/mtrr.h>
36 #include <asm/pgtable.h>
37 #include <asm/proto.h>
38 #include <asm/iommu.h>
39 #include <asm/gart.h>
40 #include <asm/set_memory.h>
41 #include <asm/swiotlb.h>
42 #include <asm/dma.h>
43 #include <asm/amd_nb.h>
44 #include <asm/x86_init.h>
45 #include <asm/iommu_table.h>
46 
47 static unsigned long iommu_bus_base;	/* GART remapping area (physical) */
48 static unsigned long iommu_size;	/* size of remapping area bytes */
49 static unsigned long iommu_pages;	/* .. and in pages */
50 
51 static u32 *iommu_gatt_base;		/* Remapping table */
52 
53 /*
54  * If this is disabled the IOMMU will use an optimized flushing strategy
55  * of only flushing when an mapping is reused. With it true the GART is
56  * flushed for every mapping. Problem is that doing the lazy flush seems
57  * to trigger bugs with some popular PCI cards, in particular 3ware (but
58  * has been also also seen with Qlogic at least).
59  */
60 static int iommu_fullflush = 1;
61 
62 /* Allocation bitmap for the remapping area: */
63 static DEFINE_SPINLOCK(iommu_bitmap_lock);
64 /* Guarded by iommu_bitmap_lock: */
65 static unsigned long *iommu_gart_bitmap;
66 
67 static u32 gart_unmapped_entry;
68 
69 #define GPTE_VALID    1
70 #define GPTE_COHERENT 2
71 #define GPTE_ENCODE(x) \
72 	(((x) & 0xfffff000) | (((x) >> 32) << 4) | GPTE_VALID | GPTE_COHERENT)
73 #define GPTE_DECODE(x) (((x) & 0xfffff000) | (((u64)(x) & 0xff0) << 28))
74 
75 #ifdef CONFIG_AGP
76 #define AGPEXTERN extern
77 #else
78 #define AGPEXTERN
79 #endif
80 
81 /* GART can only remap to physical addresses < 1TB */
82 #define GART_MAX_PHYS_ADDR	(1ULL << 40)
83 
84 /* backdoor interface to AGP driver */
85 AGPEXTERN int agp_memory_reserved;
86 AGPEXTERN __u32 *agp_gatt_table;
87 
88 static unsigned long next_bit;  /* protected by iommu_bitmap_lock */
89 static bool need_flush;		/* global flush state. set for each gart wrap */
90 
91 static unsigned long alloc_iommu(struct device *dev, int size,
92 				 unsigned long align_mask)
93 {
94 	unsigned long offset, flags;
95 	unsigned long boundary_size;
96 	unsigned long base_index;
97 
98 	base_index = ALIGN(iommu_bus_base & dma_get_seg_boundary(dev),
99 			   PAGE_SIZE) >> PAGE_SHIFT;
100 	boundary_size = ALIGN((u64)dma_get_seg_boundary(dev) + 1,
101 			      PAGE_SIZE) >> PAGE_SHIFT;
102 
103 	spin_lock_irqsave(&iommu_bitmap_lock, flags);
104 	offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, next_bit,
105 				  size, base_index, boundary_size, align_mask);
106 	if (offset == -1) {
107 		need_flush = true;
108 		offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, 0,
109 					  size, base_index, boundary_size,
110 					  align_mask);
111 	}
112 	if (offset != -1) {
113 		next_bit = offset+size;
114 		if (next_bit >= iommu_pages) {
115 			next_bit = 0;
116 			need_flush = true;
117 		}
118 	}
119 	if (iommu_fullflush)
120 		need_flush = true;
121 	spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
122 
123 	return offset;
124 }
125 
126 static void free_iommu(unsigned long offset, int size)
127 {
128 	unsigned long flags;
129 
130 	spin_lock_irqsave(&iommu_bitmap_lock, flags);
131 	bitmap_clear(iommu_gart_bitmap, offset, size);
132 	if (offset >= next_bit)
133 		next_bit = offset + size;
134 	spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
135 }
136 
137 /*
138  * Use global flush state to avoid races with multiple flushers.
139  */
140 static void flush_gart(void)
141 {
142 	unsigned long flags;
143 
144 	spin_lock_irqsave(&iommu_bitmap_lock, flags);
145 	if (need_flush) {
146 		amd_flush_garts();
147 		need_flush = false;
148 	}
149 	spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
150 }
151 
152 #ifdef CONFIG_IOMMU_LEAK
153 /* Debugging aid for drivers that don't free their IOMMU tables */
154 static void dump_leak(void)
155 {
156 	static int dump;
157 
158 	if (dump)
159 		return;
160 	dump = 1;
161 
162 	show_stack(NULL, NULL);
163 	debug_dma_dump_mappings(NULL);
164 }
165 #endif
166 
167 static void iommu_full(struct device *dev, size_t size, int dir)
168 {
169 	/*
170 	 * Ran out of IOMMU space for this operation. This is very bad.
171 	 * Unfortunately the drivers cannot handle this operation properly.
172 	 * Return some non mapped prereserved space in the aperture and
173 	 * let the Northbridge deal with it. This will result in garbage
174 	 * in the IO operation. When the size exceeds the prereserved space
175 	 * memory corruption will occur or random memory will be DMAed
176 	 * out. Hopefully no network devices use single mappings that big.
177 	 */
178 
179 	dev_err(dev, "PCI-DMA: Out of IOMMU space for %lu bytes\n", size);
180 #ifdef CONFIG_IOMMU_LEAK
181 	dump_leak();
182 #endif
183 }
184 
185 static inline int
186 need_iommu(struct device *dev, unsigned long addr, size_t size)
187 {
188 	return force_iommu || !dma_capable(dev, addr, size);
189 }
190 
191 static inline int
192 nonforced_iommu(struct device *dev, unsigned long addr, size_t size)
193 {
194 	return !dma_capable(dev, addr, size);
195 }
196 
197 /* Map a single continuous physical area into the IOMMU.
198  * Caller needs to check if the iommu is needed and flush.
199  */
200 static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
201 				size_t size, int dir, unsigned long align_mask)
202 {
203 	unsigned long npages = iommu_num_pages(phys_mem, size, PAGE_SIZE);
204 	unsigned long iommu_page;
205 	int i;
206 
207 	if (unlikely(phys_mem + size > GART_MAX_PHYS_ADDR))
208 		return DMA_MAPPING_ERROR;
209 
210 	iommu_page = alloc_iommu(dev, npages, align_mask);
211 	if (iommu_page == -1) {
212 		if (!nonforced_iommu(dev, phys_mem, size))
213 			return phys_mem;
214 		if (panic_on_overflow)
215 			panic("dma_map_area overflow %lu bytes\n", size);
216 		iommu_full(dev, size, dir);
217 		return DMA_MAPPING_ERROR;
218 	}
219 
220 	for (i = 0; i < npages; i++) {
221 		iommu_gatt_base[iommu_page + i] = GPTE_ENCODE(phys_mem);
222 		phys_mem += PAGE_SIZE;
223 	}
224 	return iommu_bus_base + iommu_page*PAGE_SIZE + (phys_mem & ~PAGE_MASK);
225 }
226 
227 /* Map a single area into the IOMMU */
228 static dma_addr_t gart_map_page(struct device *dev, struct page *page,
229 				unsigned long offset, size_t size,
230 				enum dma_data_direction dir,
231 				unsigned long attrs)
232 {
233 	unsigned long bus;
234 	phys_addr_t paddr = page_to_phys(page) + offset;
235 
236 	if (!dev)
237 		dev = &x86_dma_fallback_dev;
238 
239 	if (!need_iommu(dev, paddr, size))
240 		return paddr;
241 
242 	bus = dma_map_area(dev, paddr, size, dir, 0);
243 	flush_gart();
244 
245 	return bus;
246 }
247 
248 /*
249  * Free a DMA mapping.
250  */
251 static void gart_unmap_page(struct device *dev, dma_addr_t dma_addr,
252 			    size_t size, enum dma_data_direction dir,
253 			    unsigned long attrs)
254 {
255 	unsigned long iommu_page;
256 	int npages;
257 	int i;
258 
259 	if (dma_addr == DMA_MAPPING_ERROR ||
260 	    dma_addr >= iommu_bus_base + iommu_size)
261 		return;
262 
263 	iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT;
264 	npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
265 	for (i = 0; i < npages; i++) {
266 		iommu_gatt_base[iommu_page + i] = gart_unmapped_entry;
267 	}
268 	free_iommu(iommu_page, npages);
269 }
270 
271 /*
272  * Wrapper for pci_unmap_single working with scatterlists.
273  */
274 static void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
275 			  enum dma_data_direction dir, unsigned long attrs)
276 {
277 	struct scatterlist *s;
278 	int i;
279 
280 	for_each_sg(sg, s, nents, i) {
281 		if (!s->dma_length || !s->length)
282 			break;
283 		gart_unmap_page(dev, s->dma_address, s->dma_length, dir, 0);
284 	}
285 }
286 
287 /* Fallback for dma_map_sg in case of overflow */
288 static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
289 			       int nents, int dir)
290 {
291 	struct scatterlist *s;
292 	int i;
293 
294 #ifdef CONFIG_IOMMU_DEBUG
295 	pr_debug("dma_map_sg overflow\n");
296 #endif
297 
298 	for_each_sg(sg, s, nents, i) {
299 		unsigned long addr = sg_phys(s);
300 
301 		if (nonforced_iommu(dev, addr, s->length)) {
302 			addr = dma_map_area(dev, addr, s->length, dir, 0);
303 			if (addr == DMA_MAPPING_ERROR) {
304 				if (i > 0)
305 					gart_unmap_sg(dev, sg, i, dir, 0);
306 				nents = 0;
307 				sg[0].dma_length = 0;
308 				break;
309 			}
310 		}
311 		s->dma_address = addr;
312 		s->dma_length = s->length;
313 	}
314 	flush_gart();
315 
316 	return nents;
317 }
318 
319 /* Map multiple scatterlist entries continuous into the first. */
320 static int __dma_map_cont(struct device *dev, struct scatterlist *start,
321 			  int nelems, struct scatterlist *sout,
322 			  unsigned long pages)
323 {
324 	unsigned long iommu_start = alloc_iommu(dev, pages, 0);
325 	unsigned long iommu_page = iommu_start;
326 	struct scatterlist *s;
327 	int i;
328 
329 	if (iommu_start == -1)
330 		return -1;
331 
332 	for_each_sg(start, s, nelems, i) {
333 		unsigned long pages, addr;
334 		unsigned long phys_addr = s->dma_address;
335 
336 		BUG_ON(s != start && s->offset);
337 		if (s == start) {
338 			sout->dma_address = iommu_bus_base;
339 			sout->dma_address += iommu_page*PAGE_SIZE + s->offset;
340 			sout->dma_length = s->length;
341 		} else {
342 			sout->dma_length += s->length;
343 		}
344 
345 		addr = phys_addr;
346 		pages = iommu_num_pages(s->offset, s->length, PAGE_SIZE);
347 		while (pages--) {
348 			iommu_gatt_base[iommu_page] = GPTE_ENCODE(addr);
349 			addr += PAGE_SIZE;
350 			iommu_page++;
351 		}
352 	}
353 	BUG_ON(iommu_page - iommu_start != pages);
354 
355 	return 0;
356 }
357 
358 static inline int
359 dma_map_cont(struct device *dev, struct scatterlist *start, int nelems,
360 	     struct scatterlist *sout, unsigned long pages, int need)
361 {
362 	if (!need) {
363 		BUG_ON(nelems != 1);
364 		sout->dma_address = start->dma_address;
365 		sout->dma_length = start->length;
366 		return 0;
367 	}
368 	return __dma_map_cont(dev, start, nelems, sout, pages);
369 }
370 
371 /*
372  * DMA map all entries in a scatterlist.
373  * Merge chunks that have page aligned sizes into a continuous mapping.
374  */
375 static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents,
376 		       enum dma_data_direction dir, unsigned long attrs)
377 {
378 	struct scatterlist *s, *ps, *start_sg, *sgmap;
379 	int need = 0, nextneed, i, out, start;
380 	unsigned long pages = 0;
381 	unsigned int seg_size;
382 	unsigned int max_seg_size;
383 
384 	if (nents == 0)
385 		return 0;
386 
387 	if (!dev)
388 		dev = &x86_dma_fallback_dev;
389 
390 	out		= 0;
391 	start		= 0;
392 	start_sg	= sg;
393 	sgmap		= sg;
394 	seg_size	= 0;
395 	max_seg_size	= dma_get_max_seg_size(dev);
396 	ps		= NULL; /* shut up gcc */
397 
398 	for_each_sg(sg, s, nents, i) {
399 		dma_addr_t addr = sg_phys(s);
400 
401 		s->dma_address = addr;
402 		BUG_ON(s->length == 0);
403 
404 		nextneed = need_iommu(dev, addr, s->length);
405 
406 		/* Handle the previous not yet processed entries */
407 		if (i > start) {
408 			/*
409 			 * Can only merge when the last chunk ends on a
410 			 * page boundary and the new one doesn't have an
411 			 * offset.
412 			 */
413 			if (!iommu_merge || !nextneed || !need || s->offset ||
414 			    (s->length + seg_size > max_seg_size) ||
415 			    (ps->offset + ps->length) % PAGE_SIZE) {
416 				if (dma_map_cont(dev, start_sg, i - start,
417 						 sgmap, pages, need) < 0)
418 					goto error;
419 				out++;
420 
421 				seg_size	= 0;
422 				sgmap		= sg_next(sgmap);
423 				pages		= 0;
424 				start		= i;
425 				start_sg	= s;
426 			}
427 		}
428 
429 		seg_size += s->length;
430 		need = nextneed;
431 		pages += iommu_num_pages(s->offset, s->length, PAGE_SIZE);
432 		ps = s;
433 	}
434 	if (dma_map_cont(dev, start_sg, i - start, sgmap, pages, need) < 0)
435 		goto error;
436 	out++;
437 	flush_gart();
438 	if (out < nents) {
439 		sgmap = sg_next(sgmap);
440 		sgmap->dma_length = 0;
441 	}
442 	return out;
443 
444 error:
445 	flush_gart();
446 	gart_unmap_sg(dev, sg, out, dir, 0);
447 
448 	/* When it was forced or merged try again in a dumb way */
449 	if (force_iommu || iommu_merge) {
450 		out = dma_map_sg_nonforce(dev, sg, nents, dir);
451 		if (out > 0)
452 			return out;
453 	}
454 	if (panic_on_overflow)
455 		panic("dma_map_sg: overflow on %lu pages\n", pages);
456 
457 	iommu_full(dev, pages << PAGE_SHIFT, dir);
458 	for_each_sg(sg, s, nents, i)
459 		s->dma_address = DMA_MAPPING_ERROR;
460 	return 0;
461 }
462 
463 /* allocate and map a coherent mapping */
464 static void *
465 gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr,
466 		    gfp_t flag, unsigned long attrs)
467 {
468 	void *vaddr;
469 
470 	vaddr = dma_direct_alloc_pages(dev, size, dma_addr, flag, attrs);
471 	if (!vaddr ||
472 	    !force_iommu || dev->coherent_dma_mask <= DMA_BIT_MASK(24))
473 		return vaddr;
474 
475 	*dma_addr = dma_map_area(dev, virt_to_phys(vaddr), size,
476 			DMA_BIDIRECTIONAL, (1UL << get_order(size)) - 1);
477 	flush_gart();
478 	if (unlikely(*dma_addr == DMA_MAPPING_ERROR))
479 		goto out_free;
480 	return vaddr;
481 out_free:
482 	dma_direct_free_pages(dev, size, vaddr, *dma_addr, attrs);
483 	return NULL;
484 }
485 
486 /* free a coherent mapping */
487 static void
488 gart_free_coherent(struct device *dev, size_t size, void *vaddr,
489 		   dma_addr_t dma_addr, unsigned long attrs)
490 {
491 	gart_unmap_page(dev, dma_addr, size, DMA_BIDIRECTIONAL, 0);
492 	dma_direct_free_pages(dev, size, vaddr, dma_addr, attrs);
493 }
494 
495 static int no_agp;
496 
497 static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size)
498 {
499 	unsigned long a;
500 
501 	if (!iommu_size) {
502 		iommu_size = aper_size;
503 		if (!no_agp)
504 			iommu_size /= 2;
505 	}
506 
507 	a = aper + iommu_size;
508 	iommu_size -= round_up(a, PMD_PAGE_SIZE) - a;
509 
510 	if (iommu_size < 64*1024*1024) {
511 		pr_warning(
512 			"PCI-DMA: Warning: Small IOMMU %luMB."
513 			" Consider increasing the AGP aperture in BIOS\n",
514 				iommu_size >> 20);
515 	}
516 
517 	return iommu_size;
518 }
519 
520 static __init unsigned read_aperture(struct pci_dev *dev, u32 *size)
521 {
522 	unsigned aper_size = 0, aper_base_32, aper_order;
523 	u64 aper_base;
524 
525 	pci_read_config_dword(dev, AMD64_GARTAPERTUREBASE, &aper_base_32);
526 	pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &aper_order);
527 	aper_order = (aper_order >> 1) & 7;
528 
529 	aper_base = aper_base_32 & 0x7fff;
530 	aper_base <<= 25;
531 
532 	aper_size = (32 * 1024 * 1024) << aper_order;
533 	if (aper_base + aper_size > 0x100000000UL || !aper_size)
534 		aper_base = 0;
535 
536 	*size = aper_size;
537 	return aper_base;
538 }
539 
540 static void enable_gart_translations(void)
541 {
542 	int i;
543 
544 	if (!amd_nb_has_feature(AMD_NB_GART))
545 		return;
546 
547 	for (i = 0; i < amd_nb_num(); i++) {
548 		struct pci_dev *dev = node_to_amd_nb(i)->misc;
549 
550 		enable_gart_translation(dev, __pa(agp_gatt_table));
551 	}
552 
553 	/* Flush the GART-TLB to remove stale entries */
554 	amd_flush_garts();
555 }
556 
557 /*
558  * If fix_up_north_bridges is set, the north bridges have to be fixed up on
559  * resume in the same way as they are handled in gart_iommu_hole_init().
560  */
561 static bool fix_up_north_bridges;
562 static u32 aperture_order;
563 static u32 aperture_alloc;
564 
565 void set_up_gart_resume(u32 aper_order, u32 aper_alloc)
566 {
567 	fix_up_north_bridges = true;
568 	aperture_order = aper_order;
569 	aperture_alloc = aper_alloc;
570 }
571 
572 static void gart_fixup_northbridges(void)
573 {
574 	int i;
575 
576 	if (!fix_up_north_bridges)
577 		return;
578 
579 	if (!amd_nb_has_feature(AMD_NB_GART))
580 		return;
581 
582 	pr_info("PCI-DMA: Restoring GART aperture settings\n");
583 
584 	for (i = 0; i < amd_nb_num(); i++) {
585 		struct pci_dev *dev = node_to_amd_nb(i)->misc;
586 
587 		/*
588 		 * Don't enable translations just yet.  That is the next
589 		 * step.  Restore the pre-suspend aperture settings.
590 		 */
591 		gart_set_size_and_enable(dev, aperture_order);
592 		pci_write_config_dword(dev, AMD64_GARTAPERTUREBASE, aperture_alloc >> 25);
593 	}
594 }
595 
596 static void gart_resume(void)
597 {
598 	pr_info("PCI-DMA: Resuming GART IOMMU\n");
599 
600 	gart_fixup_northbridges();
601 
602 	enable_gart_translations();
603 }
604 
605 static struct syscore_ops gart_syscore_ops = {
606 	.resume		= gart_resume,
607 
608 };
609 
610 /*
611  * Private Northbridge GATT initialization in case we cannot use the
612  * AGP driver for some reason.
613  */
614 static __init int init_amd_gatt(struct agp_kern_info *info)
615 {
616 	unsigned aper_size, gatt_size, new_aper_size;
617 	unsigned aper_base, new_aper_base;
618 	struct pci_dev *dev;
619 	void *gatt;
620 	int i;
621 
622 	pr_info("PCI-DMA: Disabling AGP.\n");
623 
624 	aper_size = aper_base = info->aper_size = 0;
625 	dev = NULL;
626 	for (i = 0; i < amd_nb_num(); i++) {
627 		dev = node_to_amd_nb(i)->misc;
628 		new_aper_base = read_aperture(dev, &new_aper_size);
629 		if (!new_aper_base)
630 			goto nommu;
631 
632 		if (!aper_base) {
633 			aper_size = new_aper_size;
634 			aper_base = new_aper_base;
635 		}
636 		if (aper_size != new_aper_size || aper_base != new_aper_base)
637 			goto nommu;
638 	}
639 	if (!aper_base)
640 		goto nommu;
641 
642 	info->aper_base = aper_base;
643 	info->aper_size = aper_size >> 20;
644 
645 	gatt_size = (aper_size >> PAGE_SHIFT) * sizeof(u32);
646 	gatt = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
647 					get_order(gatt_size));
648 	if (!gatt)
649 		panic("Cannot allocate GATT table");
650 	if (set_memory_uc((unsigned long)gatt, gatt_size >> PAGE_SHIFT))
651 		panic("Could not set GART PTEs to uncacheable pages");
652 
653 	agp_gatt_table = gatt;
654 
655 	register_syscore_ops(&gart_syscore_ops);
656 
657 	flush_gart();
658 
659 	pr_info("PCI-DMA: aperture base @ %x size %u KB\n",
660 	       aper_base, aper_size>>10);
661 
662 	return 0;
663 
664  nommu:
665 	/* Should not happen anymore */
666 	pr_warning("PCI-DMA: More than 4GB of RAM and no IOMMU\n"
667 	       "falling back to iommu=soft.\n");
668 	return -1;
669 }
670 
671 static const struct dma_map_ops gart_dma_ops = {
672 	.map_sg				= gart_map_sg,
673 	.unmap_sg			= gart_unmap_sg,
674 	.map_page			= gart_map_page,
675 	.unmap_page			= gart_unmap_page,
676 	.alloc				= gart_alloc_coherent,
677 	.free				= gart_free_coherent,
678 	.dma_supported			= dma_direct_supported,
679 };
680 
681 static void gart_iommu_shutdown(void)
682 {
683 	struct pci_dev *dev;
684 	int i;
685 
686 	/* don't shutdown it if there is AGP installed */
687 	if (!no_agp)
688 		return;
689 
690 	if (!amd_nb_has_feature(AMD_NB_GART))
691 		return;
692 
693 	for (i = 0; i < amd_nb_num(); i++) {
694 		u32 ctl;
695 
696 		dev = node_to_amd_nb(i)->misc;
697 		pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &ctl);
698 
699 		ctl &= ~GARTEN;
700 
701 		pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, ctl);
702 	}
703 }
704 
705 int __init gart_iommu_init(void)
706 {
707 	struct agp_kern_info info;
708 	unsigned long iommu_start;
709 	unsigned long aper_base, aper_size;
710 	unsigned long start_pfn, end_pfn;
711 	unsigned long scratch;
712 
713 	if (!amd_nb_has_feature(AMD_NB_GART))
714 		return 0;
715 
716 #ifndef CONFIG_AGP_AMD64
717 	no_agp = 1;
718 #else
719 	/* Makefile puts PCI initialization via subsys_initcall first. */
720 	/* Add other AMD AGP bridge drivers here */
721 	no_agp = no_agp ||
722 		(agp_amd64_init() < 0) ||
723 		(agp_copy_info(agp_bridge, &info) < 0);
724 #endif
725 
726 	if (no_iommu ||
727 	    (!force_iommu && max_pfn <= MAX_DMA32_PFN) ||
728 	    !gart_iommu_aperture ||
729 	    (no_agp && init_amd_gatt(&info) < 0)) {
730 		if (max_pfn > MAX_DMA32_PFN) {
731 			pr_warning("More than 4GB of memory but GART IOMMU not available.\n");
732 			pr_warning("falling back to iommu=soft.\n");
733 		}
734 		return 0;
735 	}
736 
737 	/* need to map that range */
738 	aper_size	= info.aper_size << 20;
739 	aper_base	= info.aper_base;
740 	end_pfn		= (aper_base>>PAGE_SHIFT) + (aper_size>>PAGE_SHIFT);
741 
742 	start_pfn = PFN_DOWN(aper_base);
743 	if (!pfn_range_is_mapped(start_pfn, end_pfn))
744 		init_memory_mapping(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT);
745 
746 	pr_info("PCI-DMA: using GART IOMMU.\n");
747 	iommu_size = check_iommu_size(info.aper_base, aper_size);
748 	iommu_pages = iommu_size >> PAGE_SHIFT;
749 
750 	iommu_gart_bitmap = (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO,
751 						      get_order(iommu_pages/8));
752 	if (!iommu_gart_bitmap)
753 		panic("Cannot allocate iommu bitmap\n");
754 
755 	pr_info("PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n",
756 	       iommu_size >> 20);
757 
758 	agp_memory_reserved	= iommu_size;
759 	iommu_start		= aper_size - iommu_size;
760 	iommu_bus_base		= info.aper_base + iommu_start;
761 	iommu_gatt_base		= agp_gatt_table + (iommu_start>>PAGE_SHIFT);
762 
763 	/*
764 	 * Unmap the IOMMU part of the GART. The alias of the page is
765 	 * always mapped with cache enabled and there is no full cache
766 	 * coherency across the GART remapping. The unmapping avoids
767 	 * automatic prefetches from the CPU allocating cache lines in
768 	 * there. All CPU accesses are done via the direct mapping to
769 	 * the backing memory. The GART address is only used by PCI
770 	 * devices.
771 	 */
772 	set_memory_np((unsigned long)__va(iommu_bus_base),
773 				iommu_size >> PAGE_SHIFT);
774 	/*
775 	 * Tricky. The GART table remaps the physical memory range,
776 	 * so the CPU wont notice potential aliases and if the memory
777 	 * is remapped to UC later on, we might surprise the PCI devices
778 	 * with a stray writeout of a cacheline. So play it sure and
779 	 * do an explicit, full-scale wbinvd() _after_ having marked all
780 	 * the pages as Not-Present:
781 	 */
782 	wbinvd();
783 
784 	/*
785 	 * Now all caches are flushed and we can safely enable
786 	 * GART hardware.  Doing it early leaves the possibility
787 	 * of stale cache entries that can lead to GART PTE
788 	 * errors.
789 	 */
790 	enable_gart_translations();
791 
792 	/*
793 	 * Try to workaround a bug (thanks to BenH):
794 	 * Set unmapped entries to a scratch page instead of 0.
795 	 * Any prefetches that hit unmapped entries won't get an bus abort
796 	 * then. (P2P bridge may be prefetching on DMA reads).
797 	 */
798 	scratch = get_zeroed_page(GFP_KERNEL);
799 	if (!scratch)
800 		panic("Cannot allocate iommu scratch page");
801 	gart_unmapped_entry = GPTE_ENCODE(__pa(scratch));
802 
803 	flush_gart();
804 	dma_ops = &gart_dma_ops;
805 	x86_platform.iommu_shutdown = gart_iommu_shutdown;
806 	swiotlb = 0;
807 
808 	return 0;
809 }
810 
811 void __init gart_parse_options(char *p)
812 {
813 	int arg;
814 
815 	if (isdigit(*p) && get_option(&p, &arg))
816 		iommu_size = arg;
817 	if (!strncmp(p, "fullflush", 9))
818 		iommu_fullflush = 1;
819 	if (!strncmp(p, "nofullflush", 11))
820 		iommu_fullflush = 0;
821 	if (!strncmp(p, "noagp", 5))
822 		no_agp = 1;
823 	if (!strncmp(p, "noaperture", 10))
824 		fix_aperture = 0;
825 	/* duplicated from pci-dma.c */
826 	if (!strncmp(p, "force", 5))
827 		gart_iommu_aperture_allowed = 1;
828 	if (!strncmp(p, "allowed", 7))
829 		gart_iommu_aperture_allowed = 1;
830 	if (!strncmp(p, "memaper", 7)) {
831 		fallback_aper_force = 1;
832 		p += 7;
833 		if (*p == '=') {
834 			++p;
835 			if (get_option(&p, &arg))
836 				fallback_aper_order = arg;
837 		}
838 	}
839 }
840 IOMMU_INIT_POST(gart_iommu_hole_init);
841