xref: /linux/arch/x86/kernel/pci-dma.c (revision c7ef92cea98bc468bbc8e67b6e49d7365dc69482)
1 #include <linux/dma-mapping.h>
2 #include <linux/dma-debug.h>
3 #include <linux/dmar.h>
4 #include <linux/export.h>
5 #include <linux/bootmem.h>
6 #include <linux/gfp.h>
7 #include <linux/pci.h>
8 #include <linux/kmemleak.h>
9 
10 #include <asm/proto.h>
11 #include <asm/dma.h>
12 #include <asm/iommu.h>
13 #include <asm/gart.h>
14 #include <asm/calgary.h>
15 #include <asm/x86_init.h>
16 #include <asm/iommu_table.h>
17 
18 static int forbid_dac __read_mostly;
19 
20 struct dma_map_ops *dma_ops = &nommu_dma_ops;
21 EXPORT_SYMBOL(dma_ops);
22 
23 static int iommu_sac_force __read_mostly;
24 
25 #ifdef CONFIG_IOMMU_DEBUG
26 int panic_on_overflow __read_mostly = 1;
27 int force_iommu __read_mostly = 1;
28 #else
29 int panic_on_overflow __read_mostly = 0;
30 int force_iommu __read_mostly = 0;
31 #endif
32 
33 int iommu_merge __read_mostly = 0;
34 
35 int no_iommu __read_mostly;
36 /* Set this to 1 if there is a HW IOMMU in the system */
37 int iommu_detected __read_mostly = 0;
38 
39 /*
40  * This variable becomes 1 if iommu=pt is passed on the kernel command line.
41  * If this variable is 1, IOMMU implementations do no DMA translation for
42  * devices and allow every device to access to whole physical memory. This is
43  * useful if a user wants to use an IOMMU only for KVM device assignment to
44  * guests and not for driver dma translation.
45  */
46 int iommu_pass_through __read_mostly;
47 
48 extern struct iommu_table_entry __iommu_table[], __iommu_table_end[];
49 
50 /* Dummy device used for NULL arguments (normally ISA). */
51 struct device x86_dma_fallback_dev = {
52 	.init_name = "fallback device",
53 	.coherent_dma_mask = ISA_DMA_BIT_MASK,
54 	.dma_mask = &x86_dma_fallback_dev.coherent_dma_mask,
55 };
56 EXPORT_SYMBOL(x86_dma_fallback_dev);
57 
58 /* Number of entries preallocated for DMA-API debugging */
59 #define PREALLOC_DMA_DEBUG_ENTRIES       65536
60 
61 void __init pci_iommu_alloc(void)
62 {
63 	struct iommu_table_entry *p;
64 
65 	sort_iommu_table(__iommu_table, __iommu_table_end);
66 	check_iommu_entries(__iommu_table, __iommu_table_end);
67 
68 	for (p = __iommu_table; p < __iommu_table_end; p++) {
69 		if (p && p->detect && p->detect() > 0) {
70 			p->flags |= IOMMU_DETECTED;
71 			if (p->early_init)
72 				p->early_init();
73 			if (p->flags & IOMMU_FINISH_IF_DETECTED)
74 				break;
75 		}
76 	}
77 }
78 void *dma_generic_alloc_coherent(struct device *dev, size_t size,
79 				 dma_addr_t *dma_addr, gfp_t flag,
80 				 struct dma_attrs *attrs)
81 {
82 	unsigned long dma_mask;
83 	struct page *page;
84 	unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
85 	dma_addr_t addr;
86 
87 	dma_mask = dma_alloc_coherent_mask(dev, flag);
88 
89 	flag &= ~__GFP_ZERO;
90 again:
91 	page = NULL;
92 	/* CMA can be used only in the context which permits sleeping */
93 	if (flag & __GFP_WAIT) {
94 		page = dma_alloc_from_contiguous(dev, count, get_order(size));
95 		if (page && page_to_phys(page) + size > dma_mask) {
96 			dma_release_from_contiguous(dev, page, count);
97 			page = NULL;
98 		}
99 	}
100 	/* fallback */
101 	if (!page)
102 		page = alloc_pages_node(dev_to_node(dev), flag, get_order(size));
103 	if (!page)
104 		return NULL;
105 
106 	addr = page_to_phys(page);
107 	if (addr + size > dma_mask) {
108 		__free_pages(page, get_order(size));
109 
110 		if (dma_mask < DMA_BIT_MASK(32) && !(flag & GFP_DMA)) {
111 			flag = (flag & ~GFP_DMA32) | GFP_DMA;
112 			goto again;
113 		}
114 
115 		return NULL;
116 	}
117 	memset(page_address(page), 0, size);
118 	*dma_addr = addr;
119 	return page_address(page);
120 }
121 
122 void dma_generic_free_coherent(struct device *dev, size_t size, void *vaddr,
123 			       dma_addr_t dma_addr, struct dma_attrs *attrs)
124 {
125 	unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
126 	struct page *page = virt_to_page(vaddr);
127 
128 	if (!dma_release_from_contiguous(dev, page, count))
129 		free_pages((unsigned long)vaddr, get_order(size));
130 }
131 
132 bool arch_dma_alloc_attrs(struct device **dev, gfp_t *gfp)
133 {
134 	*gfp = dma_alloc_coherent_gfp_flags(*dev, *gfp);
135 	*gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
136 
137 	if (!*dev)
138 		*dev = &x86_dma_fallback_dev;
139 	if (!is_device_dma_capable(*dev))
140 		return false;
141 	return true;
142 
143 }
144 EXPORT_SYMBOL(arch_dma_alloc_attrs);
145 
146 /*
147  * See <Documentation/x86/x86_64/boot-options.txt> for the iommu kernel
148  * parameter documentation.
149  */
150 static __init int iommu_setup(char *p)
151 {
152 	iommu_merge = 1;
153 
154 	if (!p)
155 		return -EINVAL;
156 
157 	while (*p) {
158 		if (!strncmp(p, "off", 3))
159 			no_iommu = 1;
160 		/* gart_parse_options has more force support */
161 		if (!strncmp(p, "force", 5))
162 			force_iommu = 1;
163 		if (!strncmp(p, "noforce", 7)) {
164 			iommu_merge = 0;
165 			force_iommu = 0;
166 		}
167 
168 		if (!strncmp(p, "biomerge", 8)) {
169 			iommu_merge = 1;
170 			force_iommu = 1;
171 		}
172 		if (!strncmp(p, "panic", 5))
173 			panic_on_overflow = 1;
174 		if (!strncmp(p, "nopanic", 7))
175 			panic_on_overflow = 0;
176 		if (!strncmp(p, "merge", 5)) {
177 			iommu_merge = 1;
178 			force_iommu = 1;
179 		}
180 		if (!strncmp(p, "nomerge", 7))
181 			iommu_merge = 0;
182 		if (!strncmp(p, "forcesac", 8))
183 			iommu_sac_force = 1;
184 		if (!strncmp(p, "allowdac", 8))
185 			forbid_dac = 0;
186 		if (!strncmp(p, "nodac", 5))
187 			forbid_dac = 1;
188 		if (!strncmp(p, "usedac", 6)) {
189 			forbid_dac = -1;
190 			return 1;
191 		}
192 #ifdef CONFIG_SWIOTLB
193 		if (!strncmp(p, "soft", 4))
194 			swiotlb = 1;
195 #endif
196 		if (!strncmp(p, "pt", 2))
197 			iommu_pass_through = 1;
198 
199 		gart_parse_options(p);
200 
201 #ifdef CONFIG_CALGARY_IOMMU
202 		if (!strncmp(p, "calgary", 7))
203 			use_calgary = 1;
204 #endif /* CONFIG_CALGARY_IOMMU */
205 
206 		p += strcspn(p, ",");
207 		if (*p == ',')
208 			++p;
209 	}
210 	return 0;
211 }
212 early_param("iommu", iommu_setup);
213 
214 int dma_supported(struct device *dev, u64 mask)
215 {
216 	struct dma_map_ops *ops = get_dma_ops(dev);
217 
218 #ifdef CONFIG_PCI
219 	if (mask > 0xffffffff && forbid_dac > 0) {
220 		dev_info(dev, "PCI: Disallowing DAC for device\n");
221 		return 0;
222 	}
223 #endif
224 
225 	if (ops->dma_supported)
226 		return ops->dma_supported(dev, mask);
227 
228 	/* Copied from i386. Doesn't make much sense, because it will
229 	   only work for pci_alloc_coherent.
230 	   The caller just has to use GFP_DMA in this case. */
231 	if (mask < DMA_BIT_MASK(24))
232 		return 0;
233 
234 	/* Tell the device to use SAC when IOMMU force is on.  This
235 	   allows the driver to use cheaper accesses in some cases.
236 
237 	   Problem with this is that if we overflow the IOMMU area and
238 	   return DAC as fallback address the device may not handle it
239 	   correctly.
240 
241 	   As a special case some controllers have a 39bit address
242 	   mode that is as efficient as 32bit (aic79xx). Don't force
243 	   SAC for these.  Assume all masks <= 40 bits are of this
244 	   type. Normally this doesn't make any difference, but gives
245 	   more gentle handling of IOMMU overflow. */
246 	if (iommu_sac_force && (mask >= DMA_BIT_MASK(40))) {
247 		dev_info(dev, "Force SAC with mask %Lx\n", mask);
248 		return 0;
249 	}
250 
251 	return 1;
252 }
253 EXPORT_SYMBOL(dma_supported);
254 
255 static int __init pci_iommu_init(void)
256 {
257 	struct iommu_table_entry *p;
258 	dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
259 
260 #ifdef CONFIG_PCI
261 	dma_debug_add_bus(&pci_bus_type);
262 #endif
263 	x86_init.iommu.iommu_init();
264 
265 	for (p = __iommu_table; p < __iommu_table_end; p++) {
266 		if (p && (p->flags & IOMMU_DETECTED) && p->late_init)
267 			p->late_init();
268 	}
269 
270 	return 0;
271 }
272 /* Must execute after PCI subsystem */
273 rootfs_initcall(pci_iommu_init);
274 
275 #ifdef CONFIG_PCI
276 /* Many VIA bridges seem to corrupt data for DAC. Disable it here */
277 
278 static void via_no_dac(struct pci_dev *dev)
279 {
280 	if (forbid_dac == 0) {
281 		dev_info(&dev->dev, "disabling DAC on VIA PCI bridge\n");
282 		forbid_dac = 1;
283 	}
284 }
285 DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID,
286 				PCI_CLASS_BRIDGE_PCI, 8, via_no_dac);
287 #endif
288