xref: /linux/arch/x86/kernel/pci-dma.c (revision 2277ab4a1df50e05bc732fe9488d4e902bb8399a)
1 #include <linux/dma-mapping.h>
2 #include <linux/dma-debug.h>
3 #include <linux/dmar.h>
4 #include <linux/bootmem.h>
5 #include <linux/pci.h>
6 
7 #include <asm/proto.h>
8 #include <asm/dma.h>
9 #include <asm/iommu.h>
10 #include <asm/gart.h>
11 #include <asm/calgary.h>
12 #include <asm/amd_iommu.h>
13 
14 static int forbid_dac __read_mostly;
15 
16 struct dma_map_ops *dma_ops;
17 EXPORT_SYMBOL(dma_ops);
18 
19 static int iommu_sac_force __read_mostly;
20 
21 #ifdef CONFIG_IOMMU_DEBUG
22 int panic_on_overflow __read_mostly = 1;
23 int force_iommu __read_mostly = 1;
24 #else
25 int panic_on_overflow __read_mostly = 0;
26 int force_iommu __read_mostly = 0;
27 #endif
28 
29 int iommu_merge __read_mostly = 0;
30 
31 int no_iommu __read_mostly;
32 /* Set this to 1 if there is a HW IOMMU in the system */
33 int iommu_detected __read_mostly = 0;
34 
35 int iommu_pass_through;
36 
37 dma_addr_t bad_dma_address __read_mostly = 0;
38 EXPORT_SYMBOL(bad_dma_address);
39 
40 /* Dummy device used for NULL arguments (normally ISA). Better would
41    be probably a smaller DMA mask, but this is bug-to-bug compatible
42    to older i386. */
43 struct device x86_dma_fallback_dev = {
44 	.init_name = "fallback device",
45 	.coherent_dma_mask = DMA_BIT_MASK(32),
46 	.dma_mask = &x86_dma_fallback_dev.coherent_dma_mask,
47 };
48 EXPORT_SYMBOL(x86_dma_fallback_dev);
49 
50 /* Number of entries preallocated for DMA-API debugging */
51 #define PREALLOC_DMA_DEBUG_ENTRIES       32768
52 
53 int dma_set_mask(struct device *dev, u64 mask)
54 {
55 	if (!dev->dma_mask || !dma_supported(dev, mask))
56 		return -EIO;
57 
58 	*dev->dma_mask = mask;
59 
60 	return 0;
61 }
62 EXPORT_SYMBOL(dma_set_mask);
63 
64 #ifdef CONFIG_X86_64
65 static __initdata void *dma32_bootmem_ptr;
66 static unsigned long dma32_bootmem_size __initdata = (128ULL<<20);
67 
68 static int __init parse_dma32_size_opt(char *p)
69 {
70 	if (!p)
71 		return -EINVAL;
72 	dma32_bootmem_size = memparse(p, &p);
73 	return 0;
74 }
75 early_param("dma32_size", parse_dma32_size_opt);
76 
77 void __init dma32_reserve_bootmem(void)
78 {
79 	unsigned long size, align;
80 	if (max_pfn <= MAX_DMA32_PFN)
81 		return;
82 
83 	/*
84 	 * check aperture_64.c allocate_aperture() for reason about
85 	 * using 512M as goal
86 	 */
87 	align = 64ULL<<20;
88 	size = roundup(dma32_bootmem_size, align);
89 	dma32_bootmem_ptr = __alloc_bootmem_nopanic(size, align,
90 				 512ULL<<20);
91 	if (dma32_bootmem_ptr)
92 		dma32_bootmem_size = size;
93 	else
94 		dma32_bootmem_size = 0;
95 }
96 static void __init dma32_free_bootmem(void)
97 {
98 
99 	if (max_pfn <= MAX_DMA32_PFN)
100 		return;
101 
102 	if (!dma32_bootmem_ptr)
103 		return;
104 
105 	free_bootmem(__pa(dma32_bootmem_ptr), dma32_bootmem_size);
106 
107 	dma32_bootmem_ptr = NULL;
108 	dma32_bootmem_size = 0;
109 }
110 #endif
111 
112 void __init pci_iommu_alloc(void)
113 {
114 #ifdef CONFIG_X86_64
115 	/* free the range so iommu could get some range less than 4G */
116 	dma32_free_bootmem();
117 #endif
118 
119 	/*
120 	 * The order of these functions is important for
121 	 * fall-back/fail-over reasons
122 	 */
123 	gart_iommu_hole_init();
124 
125 	detect_calgary();
126 
127 	detect_intel_iommu();
128 
129 	amd_iommu_detect();
130 
131 	pci_swiotlb_init();
132 }
133 
134 void *dma_generic_alloc_coherent(struct device *dev, size_t size,
135 				 dma_addr_t *dma_addr, gfp_t flag)
136 {
137 	unsigned long dma_mask;
138 	struct page *page;
139 	dma_addr_t addr;
140 
141 	dma_mask = dma_alloc_coherent_mask(dev, flag);
142 
143 	flag |= __GFP_ZERO;
144 again:
145 	page = alloc_pages_node(dev_to_node(dev), flag, get_order(size));
146 	if (!page)
147 		return NULL;
148 
149 	addr = page_to_phys(page);
150 	if (!is_buffer_dma_capable(dma_mask, addr, size)) {
151 		__free_pages(page, get_order(size));
152 
153 		if (dma_mask < DMA_BIT_MASK(32) && !(flag & GFP_DMA)) {
154 			flag = (flag & ~GFP_DMA32) | GFP_DMA;
155 			goto again;
156 		}
157 
158 		return NULL;
159 	}
160 
161 	*dma_addr = addr;
162 	return page_address(page);
163 }
164 
165 /*
166  * See <Documentation/x86_64/boot-options.txt> for the iommu kernel parameter
167  * documentation.
168  */
169 static __init int iommu_setup(char *p)
170 {
171 	iommu_merge = 1;
172 
173 	if (!p)
174 		return -EINVAL;
175 
176 	while (*p) {
177 		if (!strncmp(p, "off", 3))
178 			no_iommu = 1;
179 		/* gart_parse_options has more force support */
180 		if (!strncmp(p, "force", 5))
181 			force_iommu = 1;
182 		if (!strncmp(p, "noforce", 7)) {
183 			iommu_merge = 0;
184 			force_iommu = 0;
185 		}
186 
187 		if (!strncmp(p, "biomerge", 8)) {
188 			iommu_merge = 1;
189 			force_iommu = 1;
190 		}
191 		if (!strncmp(p, "panic", 5))
192 			panic_on_overflow = 1;
193 		if (!strncmp(p, "nopanic", 7))
194 			panic_on_overflow = 0;
195 		if (!strncmp(p, "merge", 5)) {
196 			iommu_merge = 1;
197 			force_iommu = 1;
198 		}
199 		if (!strncmp(p, "nomerge", 7))
200 			iommu_merge = 0;
201 		if (!strncmp(p, "forcesac", 8))
202 			iommu_sac_force = 1;
203 		if (!strncmp(p, "allowdac", 8))
204 			forbid_dac = 0;
205 		if (!strncmp(p, "nodac", 5))
206 			forbid_dac = -1;
207 		if (!strncmp(p, "usedac", 6)) {
208 			forbid_dac = -1;
209 			return 1;
210 		}
211 #ifdef CONFIG_SWIOTLB
212 		if (!strncmp(p, "soft", 4))
213 			swiotlb = 1;
214 #endif
215 		if (!strncmp(p, "pt", 2)) {
216 			iommu_pass_through = 1;
217 			return 1;
218 		}
219 
220 		gart_parse_options(p);
221 
222 #ifdef CONFIG_CALGARY_IOMMU
223 		if (!strncmp(p, "calgary", 7))
224 			use_calgary = 1;
225 #endif /* CONFIG_CALGARY_IOMMU */
226 
227 		p += strcspn(p, ",");
228 		if (*p == ',')
229 			++p;
230 	}
231 	return 0;
232 }
233 early_param("iommu", iommu_setup);
234 
235 int dma_supported(struct device *dev, u64 mask)
236 {
237 	struct dma_map_ops *ops = get_dma_ops(dev);
238 
239 #ifdef CONFIG_PCI
240 	if (mask > 0xffffffff && forbid_dac > 0) {
241 		dev_info(dev, "PCI: Disallowing DAC for device\n");
242 		return 0;
243 	}
244 #endif
245 
246 	if (ops->dma_supported)
247 		return ops->dma_supported(dev, mask);
248 
249 	/* Copied from i386. Doesn't make much sense, because it will
250 	   only work for pci_alloc_coherent.
251 	   The caller just has to use GFP_DMA in this case. */
252 	if (mask < DMA_BIT_MASK(24))
253 		return 0;
254 
255 	/* Tell the device to use SAC when IOMMU force is on.  This
256 	   allows the driver to use cheaper accesses in some cases.
257 
258 	   Problem with this is that if we overflow the IOMMU area and
259 	   return DAC as fallback address the device may not handle it
260 	   correctly.
261 
262 	   As a special case some controllers have a 39bit address
263 	   mode that is as efficient as 32bit (aic79xx). Don't force
264 	   SAC for these.  Assume all masks <= 40 bits are of this
265 	   type. Normally this doesn't make any difference, but gives
266 	   more gentle handling of IOMMU overflow. */
267 	if (iommu_sac_force && (mask >= DMA_BIT_MASK(40))) {
268 		dev_info(dev, "Force SAC with mask %Lx\n", mask);
269 		return 0;
270 	}
271 
272 	return 1;
273 }
274 EXPORT_SYMBOL(dma_supported);
275 
276 static int __init pci_iommu_init(void)
277 {
278 	dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
279 
280 #ifdef CONFIG_PCI
281 	dma_debug_add_bus(&pci_bus_type);
282 #endif
283 
284 	calgary_iommu_init();
285 
286 	intel_iommu_init();
287 
288 	amd_iommu_init();
289 
290 	gart_iommu_init();
291 
292 	no_iommu_init();
293 	return 0;
294 }
295 
296 void pci_iommu_shutdown(void)
297 {
298 	gart_iommu_shutdown();
299 
300 	amd_iommu_shutdown();
301 }
302 /* Must execute after PCI subsystem */
303 fs_initcall(pci_iommu_init);
304 
305 #ifdef CONFIG_PCI
306 /* Many VIA bridges seem to corrupt data for DAC. Disable it here */
307 
308 static __devinit void via_no_dac(struct pci_dev *dev)
309 {
310 	if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && forbid_dac == 0) {
311 		dev_info(&dev->dev, "disabling DAC on VIA PCI bridge\n");
312 		forbid_dac = 1;
313 	}
314 }
315 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID, via_no_dac);
316 #endif
317