xref: /linux/arch/x86/kernel/pci-dma.c (revision b8bb76713ec50df2f11efee386e16f93d51e1076)
1 #include <linux/dma-mapping.h>
2 #include <linux/dma-debug.h>
3 #include <linux/dmar.h>
4 #include <linux/bootmem.h>
5 #include <linux/pci.h>
6 
7 #include <asm/proto.h>
8 #include <asm/dma.h>
9 #include <asm/iommu.h>
10 #include <asm/gart.h>
11 #include <asm/calgary.h>
12 #include <asm/amd_iommu.h>
13 
14 static int forbid_dac __read_mostly;
15 
16 struct dma_map_ops *dma_ops;
17 EXPORT_SYMBOL(dma_ops);
18 
19 static int iommu_sac_force __read_mostly;
20 
21 #ifdef CONFIG_IOMMU_DEBUG
22 int panic_on_overflow __read_mostly = 1;
23 int force_iommu __read_mostly = 1;
24 #else
25 int panic_on_overflow __read_mostly = 0;
26 int force_iommu __read_mostly = 0;
27 #endif
28 
29 int iommu_merge __read_mostly = 0;
30 
31 int no_iommu __read_mostly;
32 /* Set this to 1 if there is a HW IOMMU in the system */
33 int iommu_detected __read_mostly = 0;
34 
35 dma_addr_t bad_dma_address __read_mostly = 0;
36 EXPORT_SYMBOL(bad_dma_address);
37 
38 /* Dummy device used for NULL arguments (normally ISA). Better would
39    be probably a smaller DMA mask, but this is bug-to-bug compatible
40    to older i386. */
41 struct device x86_dma_fallback_dev = {
42 	.init_name = "fallback device",
43 	.coherent_dma_mask = DMA_32BIT_MASK,
44 	.dma_mask = &x86_dma_fallback_dev.coherent_dma_mask,
45 };
46 EXPORT_SYMBOL(x86_dma_fallback_dev);
47 
48 /* Number of entries preallocated for DMA-API debugging */
49 #define PREALLOC_DMA_DEBUG_ENTRIES       32768
50 
51 int dma_set_mask(struct device *dev, u64 mask)
52 {
53 	if (!dev->dma_mask || !dma_supported(dev, mask))
54 		return -EIO;
55 
56 	*dev->dma_mask = mask;
57 
58 	return 0;
59 }
60 EXPORT_SYMBOL(dma_set_mask);
61 
62 #ifdef CONFIG_X86_64
63 static __initdata void *dma32_bootmem_ptr;
64 static unsigned long dma32_bootmem_size __initdata = (128ULL<<20);
65 
66 static int __init parse_dma32_size_opt(char *p)
67 {
68 	if (!p)
69 		return -EINVAL;
70 	dma32_bootmem_size = memparse(p, &p);
71 	return 0;
72 }
73 early_param("dma32_size", parse_dma32_size_opt);
74 
75 void __init dma32_reserve_bootmem(void)
76 {
77 	unsigned long size, align;
78 	if (max_pfn <= MAX_DMA32_PFN)
79 		return;
80 
81 	/*
82 	 * check aperture_64.c allocate_aperture() for reason about
83 	 * using 512M as goal
84 	 */
85 	align = 64ULL<<20;
86 	size = roundup(dma32_bootmem_size, align);
87 	dma32_bootmem_ptr = __alloc_bootmem_nopanic(size, align,
88 				 512ULL<<20);
89 	if (dma32_bootmem_ptr)
90 		dma32_bootmem_size = size;
91 	else
92 		dma32_bootmem_size = 0;
93 }
94 static void __init dma32_free_bootmem(void)
95 {
96 
97 	if (max_pfn <= MAX_DMA32_PFN)
98 		return;
99 
100 	if (!dma32_bootmem_ptr)
101 		return;
102 
103 	free_bootmem(__pa(dma32_bootmem_ptr), dma32_bootmem_size);
104 
105 	dma32_bootmem_ptr = NULL;
106 	dma32_bootmem_size = 0;
107 }
108 #endif
109 
110 void __init pci_iommu_alloc(void)
111 {
112 #ifdef CONFIG_X86_64
113 	/* free the range so iommu could get some range less than 4G */
114 	dma32_free_bootmem();
115 #endif
116 
117 	/*
118 	 * The order of these functions is important for
119 	 * fall-back/fail-over reasons
120 	 */
121 	gart_iommu_hole_init();
122 
123 	detect_calgary();
124 
125 	detect_intel_iommu();
126 
127 	amd_iommu_detect();
128 
129 	pci_swiotlb_init();
130 }
131 
132 void *dma_generic_alloc_coherent(struct device *dev, size_t size,
133 				 dma_addr_t *dma_addr, gfp_t flag)
134 {
135 	unsigned long dma_mask;
136 	struct page *page;
137 	dma_addr_t addr;
138 
139 	dma_mask = dma_alloc_coherent_mask(dev, flag);
140 
141 	flag |= __GFP_ZERO;
142 again:
143 	page = alloc_pages_node(dev_to_node(dev), flag, get_order(size));
144 	if (!page)
145 		return NULL;
146 
147 	addr = page_to_phys(page);
148 	if (!is_buffer_dma_capable(dma_mask, addr, size)) {
149 		__free_pages(page, get_order(size));
150 
151 		if (dma_mask < DMA_32BIT_MASK && !(flag & GFP_DMA)) {
152 			flag = (flag & ~GFP_DMA32) | GFP_DMA;
153 			goto again;
154 		}
155 
156 		return NULL;
157 	}
158 
159 	*dma_addr = addr;
160 	return page_address(page);
161 }
162 
163 /*
164  * See <Documentation/x86_64/boot-options.txt> for the iommu kernel parameter
165  * documentation.
166  */
167 static __init int iommu_setup(char *p)
168 {
169 	iommu_merge = 1;
170 
171 	if (!p)
172 		return -EINVAL;
173 
174 	while (*p) {
175 		if (!strncmp(p, "off", 3))
176 			no_iommu = 1;
177 		/* gart_parse_options has more force support */
178 		if (!strncmp(p, "force", 5))
179 			force_iommu = 1;
180 		if (!strncmp(p, "noforce", 7)) {
181 			iommu_merge = 0;
182 			force_iommu = 0;
183 		}
184 
185 		if (!strncmp(p, "biomerge", 8)) {
186 			iommu_merge = 1;
187 			force_iommu = 1;
188 		}
189 		if (!strncmp(p, "panic", 5))
190 			panic_on_overflow = 1;
191 		if (!strncmp(p, "nopanic", 7))
192 			panic_on_overflow = 0;
193 		if (!strncmp(p, "merge", 5)) {
194 			iommu_merge = 1;
195 			force_iommu = 1;
196 		}
197 		if (!strncmp(p, "nomerge", 7))
198 			iommu_merge = 0;
199 		if (!strncmp(p, "forcesac", 8))
200 			iommu_sac_force = 1;
201 		if (!strncmp(p, "allowdac", 8))
202 			forbid_dac = 0;
203 		if (!strncmp(p, "nodac", 5))
204 			forbid_dac = -1;
205 		if (!strncmp(p, "usedac", 6)) {
206 			forbid_dac = -1;
207 			return 1;
208 		}
209 #ifdef CONFIG_SWIOTLB
210 		if (!strncmp(p, "soft", 4))
211 			swiotlb = 1;
212 #endif
213 
214 		gart_parse_options(p);
215 
216 #ifdef CONFIG_CALGARY_IOMMU
217 		if (!strncmp(p, "calgary", 7))
218 			use_calgary = 1;
219 #endif /* CONFIG_CALGARY_IOMMU */
220 
221 		p += strcspn(p, ",");
222 		if (*p == ',')
223 			++p;
224 	}
225 	return 0;
226 }
227 early_param("iommu", iommu_setup);
228 
229 int dma_supported(struct device *dev, u64 mask)
230 {
231 	struct dma_map_ops *ops = get_dma_ops(dev);
232 
233 #ifdef CONFIG_PCI
234 	if (mask > 0xffffffff && forbid_dac > 0) {
235 		dev_info(dev, "PCI: Disallowing DAC for device\n");
236 		return 0;
237 	}
238 #endif
239 
240 	if (ops->dma_supported)
241 		return ops->dma_supported(dev, mask);
242 
243 	/* Copied from i386. Doesn't make much sense, because it will
244 	   only work for pci_alloc_coherent.
245 	   The caller just has to use GFP_DMA in this case. */
246 	if (mask < DMA_24BIT_MASK)
247 		return 0;
248 
249 	/* Tell the device to use SAC when IOMMU force is on.  This
250 	   allows the driver to use cheaper accesses in some cases.
251 
252 	   Problem with this is that if we overflow the IOMMU area and
253 	   return DAC as fallback address the device may not handle it
254 	   correctly.
255 
256 	   As a special case some controllers have a 39bit address
257 	   mode that is as efficient as 32bit (aic79xx). Don't force
258 	   SAC for these.  Assume all masks <= 40 bits are of this
259 	   type. Normally this doesn't make any difference, but gives
260 	   more gentle handling of IOMMU overflow. */
261 	if (iommu_sac_force && (mask >= DMA_40BIT_MASK)) {
262 		dev_info(dev, "Force SAC with mask %Lx\n", mask);
263 		return 0;
264 	}
265 
266 	return 1;
267 }
268 EXPORT_SYMBOL(dma_supported);
269 
270 static int __init pci_iommu_init(void)
271 {
272 	dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
273 
274 #ifdef CONFIG_PCI
275 	dma_debug_add_bus(&pci_bus_type);
276 #endif
277 
278 	calgary_iommu_init();
279 
280 	intel_iommu_init();
281 
282 	amd_iommu_init();
283 
284 	gart_iommu_init();
285 
286 	no_iommu_init();
287 	return 0;
288 }
289 
290 void pci_iommu_shutdown(void)
291 {
292 	gart_iommu_shutdown();
293 }
294 /* Must execute after PCI subsystem */
295 fs_initcall(pci_iommu_init);
296 
297 #ifdef CONFIG_PCI
298 /* Many VIA bridges seem to corrupt data for DAC. Disable it here */
299 
300 static __devinit void via_no_dac(struct pci_dev *dev)
301 {
302 	if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && forbid_dac == 0) {
303 		dev_info(&dev->dev, "disabling DAC on VIA PCI bridge\n");
304 		forbid_dac = 1;
305 	}
306 }
307 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID, via_no_dac);
308 #endif
309