xref: /linux/arch/x86/kernel/pci-dma.c (revision f26e8817b235d8764363bffcc9cbfc61867371f2)
1 #include <linux/dma-mapping.h>
2 #include <linux/dma-debug.h>
3 #include <linux/dmar.h>
4 #include <linux/export.h>
5 #include <linux/bootmem.h>
6 #include <linux/gfp.h>
7 #include <linux/pci.h>
8 #include <linux/kmemleak.h>
9 
10 #include <asm/proto.h>
11 #include <asm/dma.h>
12 #include <asm/iommu.h>
13 #include <asm/gart.h>
14 #include <asm/calgary.h>
15 #include <asm/x86_init.h>
16 #include <asm/iommu_table.h>
17 
18 static int forbid_dac __read_mostly;
19 
20 struct dma_map_ops *dma_ops = &nommu_dma_ops;
21 EXPORT_SYMBOL(dma_ops);
22 
23 static int iommu_sac_force __read_mostly;
24 
25 #ifdef CONFIG_IOMMU_DEBUG
26 int panic_on_overflow __read_mostly = 1;
27 int force_iommu __read_mostly = 1;
28 #else
29 int panic_on_overflow __read_mostly = 0;
30 int force_iommu __read_mostly = 0;
31 #endif
32 
33 int iommu_merge __read_mostly = 0;
34 
35 int no_iommu __read_mostly;
36 /* Set this to 1 if there is a HW IOMMU in the system */
37 int iommu_detected __read_mostly = 0;
38 
39 /*
40  * This variable becomes 1 if iommu=pt is passed on the kernel command line.
41  * If this variable is 1, IOMMU implementations do no DMA translation for
42  * devices and allow every device to access to whole physical memory. This is
43  * useful if a user wants to use an IOMMU only for KVM device assignment to
44  * guests and not for driver dma translation.
45  */
46 int iommu_pass_through __read_mostly;
47 
48 extern struct iommu_table_entry __iommu_table[], __iommu_table_end[];
49 
50 /* Dummy device used for NULL arguments (normally ISA). */
51 struct device x86_dma_fallback_dev = {
52 	.init_name = "fallback device",
53 	.coherent_dma_mask = ISA_DMA_BIT_MASK,
54 	.dma_mask = &x86_dma_fallback_dev.coherent_dma_mask,
55 };
56 EXPORT_SYMBOL(x86_dma_fallback_dev);
57 
58 /* Number of entries preallocated for DMA-API debugging */
59 #define PREALLOC_DMA_DEBUG_ENTRIES       65536
60 
61 void __init pci_iommu_alloc(void)
62 {
63 	struct iommu_table_entry *p;
64 
65 	sort_iommu_table(__iommu_table, __iommu_table_end);
66 	check_iommu_entries(__iommu_table, __iommu_table_end);
67 
68 	for (p = __iommu_table; p < __iommu_table_end; p++) {
69 		if (p && p->detect && p->detect() > 0) {
70 			p->flags |= IOMMU_DETECTED;
71 			if (p->early_init)
72 				p->early_init();
73 			if (p->flags & IOMMU_FINISH_IF_DETECTED)
74 				break;
75 		}
76 	}
77 }
78 void *dma_generic_alloc_coherent(struct device *dev, size_t size,
79 				 dma_addr_t *dma_addr, gfp_t flag,
80 				 unsigned long attrs)
81 {
82 	unsigned long dma_mask;
83 	struct page *page;
84 	unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
85 	dma_addr_t addr;
86 
87 	dma_mask = dma_alloc_coherent_mask(dev, flag);
88 
89 	flag &= ~__GFP_ZERO;
90 again:
91 	page = NULL;
92 	/* CMA can be used only in the context which permits sleeping */
93 	if (gfpflags_allow_blocking(flag)) {
94 		page = dma_alloc_from_contiguous(dev, count, get_order(size));
95 		if (page && page_to_phys(page) + size > dma_mask) {
96 			dma_release_from_contiguous(dev, page, count);
97 			page = NULL;
98 		}
99 	}
100 	/* fallback */
101 	if (!page)
102 		page = alloc_pages_node(dev_to_node(dev), flag, get_order(size));
103 	if (!page)
104 		return NULL;
105 
106 	addr = page_to_phys(page);
107 	if (addr + size > dma_mask) {
108 		__free_pages(page, get_order(size));
109 
110 		if (dma_mask < DMA_BIT_MASK(32) && !(flag & GFP_DMA)) {
111 			flag = (flag & ~GFP_DMA32) | GFP_DMA;
112 			goto again;
113 		}
114 
115 		return NULL;
116 	}
117 	memset(page_address(page), 0, size);
118 	*dma_addr = addr;
119 	return page_address(page);
120 }
121 
122 void dma_generic_free_coherent(struct device *dev, size_t size, void *vaddr,
123 			       dma_addr_t dma_addr, unsigned long attrs)
124 {
125 	unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
126 	struct page *page = virt_to_page(vaddr);
127 
128 	if (!dma_release_from_contiguous(dev, page, count))
129 		free_pages((unsigned long)vaddr, get_order(size));
130 }
131 
132 bool arch_dma_alloc_attrs(struct device **dev, gfp_t *gfp)
133 {
134 	if (!*dev)
135 		*dev = &x86_dma_fallback_dev;
136 
137 	*gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
138 	*gfp = dma_alloc_coherent_gfp_flags(*dev, *gfp);
139 
140 	if (!is_device_dma_capable(*dev))
141 		return false;
142 	return true;
143 
144 }
145 EXPORT_SYMBOL(arch_dma_alloc_attrs);
146 
147 /*
148  * See <Documentation/x86/x86_64/boot-options.txt> for the iommu kernel
149  * parameter documentation.
150  */
151 static __init int iommu_setup(char *p)
152 {
153 	iommu_merge = 1;
154 
155 	if (!p)
156 		return -EINVAL;
157 
158 	while (*p) {
159 		if (!strncmp(p, "off", 3))
160 			no_iommu = 1;
161 		/* gart_parse_options has more force support */
162 		if (!strncmp(p, "force", 5))
163 			force_iommu = 1;
164 		if (!strncmp(p, "noforce", 7)) {
165 			iommu_merge = 0;
166 			force_iommu = 0;
167 		}
168 
169 		if (!strncmp(p, "biomerge", 8)) {
170 			iommu_merge = 1;
171 			force_iommu = 1;
172 		}
173 		if (!strncmp(p, "panic", 5))
174 			panic_on_overflow = 1;
175 		if (!strncmp(p, "nopanic", 7))
176 			panic_on_overflow = 0;
177 		if (!strncmp(p, "merge", 5)) {
178 			iommu_merge = 1;
179 			force_iommu = 1;
180 		}
181 		if (!strncmp(p, "nomerge", 7))
182 			iommu_merge = 0;
183 		if (!strncmp(p, "forcesac", 8))
184 			iommu_sac_force = 1;
185 		if (!strncmp(p, "allowdac", 8))
186 			forbid_dac = 0;
187 		if (!strncmp(p, "nodac", 5))
188 			forbid_dac = 1;
189 		if (!strncmp(p, "usedac", 6)) {
190 			forbid_dac = -1;
191 			return 1;
192 		}
193 #ifdef CONFIG_SWIOTLB
194 		if (!strncmp(p, "soft", 4))
195 			swiotlb = 1;
196 #endif
197 		if (!strncmp(p, "pt", 2))
198 			iommu_pass_through = 1;
199 
200 		gart_parse_options(p);
201 
202 #ifdef CONFIG_CALGARY_IOMMU
203 		if (!strncmp(p, "calgary", 7))
204 			use_calgary = 1;
205 #endif /* CONFIG_CALGARY_IOMMU */
206 
207 		p += strcspn(p, ",");
208 		if (*p == ',')
209 			++p;
210 	}
211 	return 0;
212 }
213 early_param("iommu", iommu_setup);
214 
215 int dma_supported(struct device *dev, u64 mask)
216 {
217 	struct dma_map_ops *ops = get_dma_ops(dev);
218 
219 #ifdef CONFIG_PCI
220 	if (mask > 0xffffffff && forbid_dac > 0) {
221 		dev_info(dev, "PCI: Disallowing DAC for device\n");
222 		return 0;
223 	}
224 #endif
225 
226 	if (ops->dma_supported)
227 		return ops->dma_supported(dev, mask);
228 
229 	/* Copied from i386. Doesn't make much sense, because it will
230 	   only work for pci_alloc_coherent.
231 	   The caller just has to use GFP_DMA in this case. */
232 	if (mask < DMA_BIT_MASK(24))
233 		return 0;
234 
235 	/* Tell the device to use SAC when IOMMU force is on.  This
236 	   allows the driver to use cheaper accesses in some cases.
237 
238 	   Problem with this is that if we overflow the IOMMU area and
239 	   return DAC as fallback address the device may not handle it
240 	   correctly.
241 
242 	   As a special case some controllers have a 39bit address
243 	   mode that is as efficient as 32bit (aic79xx). Don't force
244 	   SAC for these.  Assume all masks <= 40 bits are of this
245 	   type. Normally this doesn't make any difference, but gives
246 	   more gentle handling of IOMMU overflow. */
247 	if (iommu_sac_force && (mask >= DMA_BIT_MASK(40))) {
248 		dev_info(dev, "Force SAC with mask %Lx\n", mask);
249 		return 0;
250 	}
251 
252 	return 1;
253 }
254 EXPORT_SYMBOL(dma_supported);
255 
256 static int __init pci_iommu_init(void)
257 {
258 	struct iommu_table_entry *p;
259 	dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
260 
261 #ifdef CONFIG_PCI
262 	dma_debug_add_bus(&pci_bus_type);
263 #endif
264 	x86_init.iommu.iommu_init();
265 
266 	for (p = __iommu_table; p < __iommu_table_end; p++) {
267 		if (p && (p->flags & IOMMU_DETECTED) && p->late_init)
268 			p->late_init();
269 	}
270 
271 	return 0;
272 }
273 /* Must execute after PCI subsystem */
274 rootfs_initcall(pci_iommu_init);
275 
276 #ifdef CONFIG_PCI
277 /* Many VIA bridges seem to corrupt data for DAC. Disable it here */
278 
279 static void via_no_dac(struct pci_dev *dev)
280 {
281 	if (forbid_dac == 0) {
282 		dev_info(&dev->dev, "disabling DAC on VIA PCI bridge\n");
283 		forbid_dac = 1;
284 	}
285 }
286 DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID,
287 				PCI_CLASS_BRIDGE_PCI, 8, via_no_dac);
288 #endif
289