xref: /linux/arch/x86/kernel/pci-dma.c (revision 8fa5723aa7e053d498336b48448b292fc2e0458b)
1 #include <linux/dma-mapping.h>
2 #include <linux/dmar.h>
3 #include <linux/bootmem.h>
4 #include <linux/pci.h>
5 
6 #include <asm/proto.h>
7 #include <asm/dma.h>
8 #include <asm/iommu.h>
9 #include <asm/calgary.h>
10 #include <asm/amd_iommu.h>
11 
12 static int forbid_dac __read_mostly;
13 
14 struct dma_mapping_ops *dma_ops;
15 EXPORT_SYMBOL(dma_ops);
16 
17 static int iommu_sac_force __read_mostly;
18 
19 #ifdef CONFIG_IOMMU_DEBUG
20 int panic_on_overflow __read_mostly = 1;
21 int force_iommu __read_mostly = 1;
22 #else
23 int panic_on_overflow __read_mostly = 0;
24 int force_iommu __read_mostly = 0;
25 #endif
26 
27 int iommu_merge __read_mostly = 0;
28 
29 int no_iommu __read_mostly;
30 /* Set this to 1 if there is a HW IOMMU in the system */
31 int iommu_detected __read_mostly = 0;
32 
33 /* This tells the BIO block layer to assume merging. Default to off
34    because we cannot guarantee merging later. */
35 int iommu_bio_merge __read_mostly = 0;
36 EXPORT_SYMBOL(iommu_bio_merge);
37 
38 dma_addr_t bad_dma_address __read_mostly = 0;
39 EXPORT_SYMBOL(bad_dma_address);
40 
41 /* Dummy device used for NULL arguments (normally ISA). Better would
42    be probably a smaller DMA mask, but this is bug-to-bug compatible
43    to older i386. */
44 struct device x86_dma_fallback_dev = {
45 	.bus_id = "fallback device",
46 	.coherent_dma_mask = DMA_32BIT_MASK,
47 	.dma_mask = &x86_dma_fallback_dev.coherent_dma_mask,
48 };
49 EXPORT_SYMBOL(x86_dma_fallback_dev);
50 
51 int dma_set_mask(struct device *dev, u64 mask)
52 {
53 	if (!dev->dma_mask || !dma_supported(dev, mask))
54 		return -EIO;
55 
56 	*dev->dma_mask = mask;
57 
58 	return 0;
59 }
60 EXPORT_SYMBOL(dma_set_mask);
61 
62 #ifdef CONFIG_X86_64
63 static __initdata void *dma32_bootmem_ptr;
64 static unsigned long dma32_bootmem_size __initdata = (128ULL<<20);
65 
66 static int __init parse_dma32_size_opt(char *p)
67 {
68 	if (!p)
69 		return -EINVAL;
70 	dma32_bootmem_size = memparse(p, &p);
71 	return 0;
72 }
73 early_param("dma32_size", parse_dma32_size_opt);
74 
75 void __init dma32_reserve_bootmem(void)
76 {
77 	unsigned long size, align;
78 	if (max_pfn <= MAX_DMA32_PFN)
79 		return;
80 
81 	/*
82 	 * check aperture_64.c allocate_aperture() for reason about
83 	 * using 512M as goal
84 	 */
85 	align = 64ULL<<20;
86 	size = roundup(dma32_bootmem_size, align);
87 	dma32_bootmem_ptr = __alloc_bootmem_nopanic(size, align,
88 				 512ULL<<20);
89 	if (dma32_bootmem_ptr)
90 		dma32_bootmem_size = size;
91 	else
92 		dma32_bootmem_size = 0;
93 }
94 static void __init dma32_free_bootmem(void)
95 {
96 
97 	if (max_pfn <= MAX_DMA32_PFN)
98 		return;
99 
100 	if (!dma32_bootmem_ptr)
101 		return;
102 
103 	free_bootmem(__pa(dma32_bootmem_ptr), dma32_bootmem_size);
104 
105 	dma32_bootmem_ptr = NULL;
106 	dma32_bootmem_size = 0;
107 }
108 
109 void __init pci_iommu_alloc(void)
110 {
111 	/* free the range so iommu could get some range less than 4G */
112 	dma32_free_bootmem();
113 	/*
114 	 * The order of these functions is important for
115 	 * fall-back/fail-over reasons
116 	 */
117 	gart_iommu_hole_init();
118 
119 	detect_calgary();
120 
121 	detect_intel_iommu();
122 
123 	amd_iommu_detect();
124 
125 	pci_swiotlb_init();
126 }
127 
128 unsigned long iommu_nr_pages(unsigned long addr, unsigned long len)
129 {
130 	unsigned long size = roundup((addr & ~PAGE_MASK) + len, PAGE_SIZE);
131 
132 	return size >> PAGE_SHIFT;
133 }
134 EXPORT_SYMBOL(iommu_nr_pages);
135 #endif
136 
137 void *dma_generic_alloc_coherent(struct device *dev, size_t size,
138 				 dma_addr_t *dma_addr, gfp_t flag)
139 {
140 	unsigned long dma_mask;
141 	struct page *page;
142 	dma_addr_t addr;
143 
144 	dma_mask = dma_alloc_coherent_mask(dev, flag);
145 
146 	flag |= __GFP_ZERO;
147 again:
148 	page = alloc_pages_node(dev_to_node(dev), flag, get_order(size));
149 	if (!page)
150 		return NULL;
151 
152 	addr = page_to_phys(page);
153 	if (!is_buffer_dma_capable(dma_mask, addr, size)) {
154 		__free_pages(page, get_order(size));
155 
156 		if (dma_mask < DMA_32BIT_MASK && !(flag & GFP_DMA)) {
157 			flag = (flag & ~GFP_DMA32) | GFP_DMA;
158 			goto again;
159 		}
160 
161 		return NULL;
162 	}
163 
164 	*dma_addr = addr;
165 	return page_address(page);
166 }
167 
168 /*
169  * See <Documentation/x86_64/boot-options.txt> for the iommu kernel parameter
170  * documentation.
171  */
172 static __init int iommu_setup(char *p)
173 {
174 	iommu_merge = 1;
175 
176 	if (!p)
177 		return -EINVAL;
178 
179 	while (*p) {
180 		if (!strncmp(p, "off", 3))
181 			no_iommu = 1;
182 		/* gart_parse_options has more force support */
183 		if (!strncmp(p, "force", 5))
184 			force_iommu = 1;
185 		if (!strncmp(p, "noforce", 7)) {
186 			iommu_merge = 0;
187 			force_iommu = 0;
188 		}
189 
190 		if (!strncmp(p, "biomerge", 8)) {
191 			iommu_bio_merge = 4096;
192 			iommu_merge = 1;
193 			force_iommu = 1;
194 		}
195 		if (!strncmp(p, "panic", 5))
196 			panic_on_overflow = 1;
197 		if (!strncmp(p, "nopanic", 7))
198 			panic_on_overflow = 0;
199 		if (!strncmp(p, "merge", 5)) {
200 			iommu_merge = 1;
201 			force_iommu = 1;
202 		}
203 		if (!strncmp(p, "nomerge", 7))
204 			iommu_merge = 0;
205 		if (!strncmp(p, "forcesac", 8))
206 			iommu_sac_force = 1;
207 		if (!strncmp(p, "allowdac", 8))
208 			forbid_dac = 0;
209 		if (!strncmp(p, "nodac", 5))
210 			forbid_dac = -1;
211 		if (!strncmp(p, "usedac", 6)) {
212 			forbid_dac = -1;
213 			return 1;
214 		}
215 #ifdef CONFIG_SWIOTLB
216 		if (!strncmp(p, "soft", 4))
217 			swiotlb = 1;
218 #endif
219 
220 		gart_parse_options(p);
221 
222 #ifdef CONFIG_CALGARY_IOMMU
223 		if (!strncmp(p, "calgary", 7))
224 			use_calgary = 1;
225 #endif /* CONFIG_CALGARY_IOMMU */
226 
227 		p += strcspn(p, ",");
228 		if (*p == ',')
229 			++p;
230 	}
231 	return 0;
232 }
233 early_param("iommu", iommu_setup);
234 
235 int dma_supported(struct device *dev, u64 mask)
236 {
237 	struct dma_mapping_ops *ops = get_dma_ops(dev);
238 
239 #ifdef CONFIG_PCI
240 	if (mask > 0xffffffff && forbid_dac > 0) {
241 		dev_info(dev, "PCI: Disallowing DAC for device\n");
242 		return 0;
243 	}
244 #endif
245 
246 	if (ops->dma_supported)
247 		return ops->dma_supported(dev, mask);
248 
249 	/* Copied from i386. Doesn't make much sense, because it will
250 	   only work for pci_alloc_coherent.
251 	   The caller just has to use GFP_DMA in this case. */
252 	if (mask < DMA_24BIT_MASK)
253 		return 0;
254 
255 	/* Tell the device to use SAC when IOMMU force is on.  This
256 	   allows the driver to use cheaper accesses in some cases.
257 
258 	   Problem with this is that if we overflow the IOMMU area and
259 	   return DAC as fallback address the device may not handle it
260 	   correctly.
261 
262 	   As a special case some controllers have a 39bit address
263 	   mode that is as efficient as 32bit (aic79xx). Don't force
264 	   SAC for these.  Assume all masks <= 40 bits are of this
265 	   type. Normally this doesn't make any difference, but gives
266 	   more gentle handling of IOMMU overflow. */
267 	if (iommu_sac_force && (mask >= DMA_40BIT_MASK)) {
268 		dev_info(dev, "Force SAC with mask %Lx\n", mask);
269 		return 0;
270 	}
271 
272 	return 1;
273 }
274 EXPORT_SYMBOL(dma_supported);
275 
276 static int __init pci_iommu_init(void)
277 {
278 	calgary_iommu_init();
279 
280 	intel_iommu_init();
281 
282 	amd_iommu_init();
283 
284 	gart_iommu_init();
285 
286 	no_iommu_init();
287 	return 0;
288 }
289 
290 void pci_iommu_shutdown(void)
291 {
292 	gart_iommu_shutdown();
293 }
294 /* Must execute after PCI subsystem */
295 fs_initcall(pci_iommu_init);
296 
297 #ifdef CONFIG_PCI
298 /* Many VIA bridges seem to corrupt data for DAC. Disable it here */
299 
300 static __devinit void via_no_dac(struct pci_dev *dev)
301 {
302 	if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && forbid_dac == 0) {
303 		printk(KERN_INFO "PCI: VIA PCI bridge detected."
304 				 "Disabling DAC.\n");
305 		forbid_dac = 1;
306 	}
307 }
308 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID, via_no_dac);
309 #endif
310