xref: /linux/arch/x86/kernel/pci-dma.c (revision c6380ecd8e9bee7aba3d9a5a94b58168244c4a61)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/dma-direct.h>
3 #include <linux/dma-debug.h>
4 #include <linux/dmar.h>
5 #include <linux/export.h>
6 #include <linux/bootmem.h>
7 #include <linux/gfp.h>
8 #include <linux/pci.h>
9 #include <linux/kmemleak.h>
10 
11 #include <asm/proto.h>
12 #include <asm/dma.h>
13 #include <asm/iommu.h>
14 #include <asm/gart.h>
15 #include <asm/calgary.h>
16 #include <asm/x86_init.h>
17 #include <asm/iommu_table.h>
18 
19 static int forbid_dac __read_mostly;
20 
21 const struct dma_map_ops *dma_ops = &nommu_dma_ops;
22 EXPORT_SYMBOL(dma_ops);
23 
24 static int iommu_sac_force __read_mostly;
25 
26 #ifdef CONFIG_IOMMU_DEBUG
27 int panic_on_overflow __read_mostly = 1;
28 int force_iommu __read_mostly = 1;
29 #else
30 int panic_on_overflow __read_mostly = 0;
31 int force_iommu __read_mostly = 0;
32 #endif
33 
34 int iommu_merge __read_mostly = 0;
35 
36 int no_iommu __read_mostly;
37 /* Set this to 1 if there is a HW IOMMU in the system */
38 int iommu_detected __read_mostly = 0;
39 
40 /*
41  * This variable becomes 1 if iommu=pt is passed on the kernel command line.
42  * If this variable is 1, IOMMU implementations do no DMA translation for
43  * devices and allow every device to access to whole physical memory. This is
44  * useful if a user wants to use an IOMMU only for KVM device assignment to
45  * guests and not for driver dma translation.
46  */
47 int iommu_pass_through __read_mostly;
48 
49 extern struct iommu_table_entry __iommu_table[], __iommu_table_end[];
50 
51 /* Dummy device used for NULL arguments (normally ISA). */
52 struct device x86_dma_fallback_dev = {
53 	.init_name = "fallback device",
54 	.coherent_dma_mask = ISA_DMA_BIT_MASK,
55 	.dma_mask = &x86_dma_fallback_dev.coherent_dma_mask,
56 };
57 EXPORT_SYMBOL(x86_dma_fallback_dev);
58 
59 /* Number of entries preallocated for DMA-API debugging */
60 #define PREALLOC_DMA_DEBUG_ENTRIES       65536
61 
62 void __init pci_iommu_alloc(void)
63 {
64 	struct iommu_table_entry *p;
65 
66 	sort_iommu_table(__iommu_table, __iommu_table_end);
67 	check_iommu_entries(__iommu_table, __iommu_table_end);
68 
69 	for (p = __iommu_table; p < __iommu_table_end; p++) {
70 		if (p && p->detect && p->detect() > 0) {
71 			p->flags |= IOMMU_DETECTED;
72 			if (p->early_init)
73 				p->early_init();
74 			if (p->flags & IOMMU_FINISH_IF_DETECTED)
75 				break;
76 		}
77 	}
78 }
79 void *dma_generic_alloc_coherent(struct device *dev, size_t size,
80 				 dma_addr_t *dma_addr, gfp_t flag,
81 				 unsigned long attrs)
82 {
83 	unsigned long dma_mask;
84 	struct page *page;
85 	unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
86 	dma_addr_t addr;
87 
88 	dma_mask = dma_alloc_coherent_mask(dev, flag);
89 
90 again:
91 	page = NULL;
92 	/* CMA can be used only in the context which permits sleeping */
93 	if (gfpflags_allow_blocking(flag)) {
94 		page = dma_alloc_from_contiguous(dev, count, get_order(size),
95 						 flag);
96 		if (page) {
97 			addr = phys_to_dma(dev, page_to_phys(page));
98 			if (addr + size > dma_mask) {
99 				dma_release_from_contiguous(dev, page, count);
100 				page = NULL;
101 			}
102 		}
103 	}
104 	/* fallback */
105 	if (!page)
106 		page = alloc_pages_node(dev_to_node(dev), flag, get_order(size));
107 	if (!page)
108 		return NULL;
109 
110 	addr = phys_to_dma(dev, page_to_phys(page));
111 	if (addr + size > dma_mask) {
112 		__free_pages(page, get_order(size));
113 
114 		if (dma_mask < DMA_BIT_MASK(32) && !(flag & GFP_DMA)) {
115 			flag = (flag & ~GFP_DMA32) | GFP_DMA;
116 			goto again;
117 		}
118 
119 		return NULL;
120 	}
121 	memset(page_address(page), 0, size);
122 	*dma_addr = addr;
123 	return page_address(page);
124 }
125 
126 void dma_generic_free_coherent(struct device *dev, size_t size, void *vaddr,
127 			       dma_addr_t dma_addr, unsigned long attrs)
128 {
129 	unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
130 	struct page *page = virt_to_page(vaddr);
131 
132 	if (!dma_release_from_contiguous(dev, page, count))
133 		free_pages((unsigned long)vaddr, get_order(size));
134 }
135 
136 bool arch_dma_alloc_attrs(struct device **dev, gfp_t *gfp)
137 {
138 	if (!*dev)
139 		*dev = &x86_dma_fallback_dev;
140 
141 	*gfp = dma_alloc_coherent_gfp_flags(*dev, *gfp);
142 
143 	if (!is_device_dma_capable(*dev))
144 		return false;
145 	return true;
146 
147 }
148 EXPORT_SYMBOL(arch_dma_alloc_attrs);
149 
150 /*
151  * See <Documentation/x86/x86_64/boot-options.txt> for the iommu kernel
152  * parameter documentation.
153  */
154 static __init int iommu_setup(char *p)
155 {
156 	iommu_merge = 1;
157 
158 	if (!p)
159 		return -EINVAL;
160 
161 	while (*p) {
162 		if (!strncmp(p, "off", 3))
163 			no_iommu = 1;
164 		/* gart_parse_options has more force support */
165 		if (!strncmp(p, "force", 5))
166 			force_iommu = 1;
167 		if (!strncmp(p, "noforce", 7)) {
168 			iommu_merge = 0;
169 			force_iommu = 0;
170 		}
171 
172 		if (!strncmp(p, "biomerge", 8)) {
173 			iommu_merge = 1;
174 			force_iommu = 1;
175 		}
176 		if (!strncmp(p, "panic", 5))
177 			panic_on_overflow = 1;
178 		if (!strncmp(p, "nopanic", 7))
179 			panic_on_overflow = 0;
180 		if (!strncmp(p, "merge", 5)) {
181 			iommu_merge = 1;
182 			force_iommu = 1;
183 		}
184 		if (!strncmp(p, "nomerge", 7))
185 			iommu_merge = 0;
186 		if (!strncmp(p, "forcesac", 8))
187 			iommu_sac_force = 1;
188 		if (!strncmp(p, "allowdac", 8))
189 			forbid_dac = 0;
190 		if (!strncmp(p, "nodac", 5))
191 			forbid_dac = 1;
192 		if (!strncmp(p, "usedac", 6)) {
193 			forbid_dac = -1;
194 			return 1;
195 		}
196 #ifdef CONFIG_SWIOTLB
197 		if (!strncmp(p, "soft", 4))
198 			swiotlb = 1;
199 #endif
200 		if (!strncmp(p, "pt", 2))
201 			iommu_pass_through = 1;
202 
203 		gart_parse_options(p);
204 
205 #ifdef CONFIG_CALGARY_IOMMU
206 		if (!strncmp(p, "calgary", 7))
207 			use_calgary = 1;
208 #endif /* CONFIG_CALGARY_IOMMU */
209 
210 		p += strcspn(p, ",");
211 		if (*p == ',')
212 			++p;
213 	}
214 	return 0;
215 }
216 early_param("iommu", iommu_setup);
217 
218 int arch_dma_supported(struct device *dev, u64 mask)
219 {
220 #ifdef CONFIG_PCI
221 	if (mask > 0xffffffff && forbid_dac > 0) {
222 		dev_info(dev, "PCI: Disallowing DAC for device\n");
223 		return 0;
224 	}
225 #endif
226 
227 	/* Tell the device to use SAC when IOMMU force is on.  This
228 	   allows the driver to use cheaper accesses in some cases.
229 
230 	   Problem with this is that if we overflow the IOMMU area and
231 	   return DAC as fallback address the device may not handle it
232 	   correctly.
233 
234 	   As a special case some controllers have a 39bit address
235 	   mode that is as efficient as 32bit (aic79xx). Don't force
236 	   SAC for these.  Assume all masks <= 40 bits are of this
237 	   type. Normally this doesn't make any difference, but gives
238 	   more gentle handling of IOMMU overflow. */
239 	if (iommu_sac_force && (mask >= DMA_BIT_MASK(40))) {
240 		dev_info(dev, "Force SAC with mask %Lx\n", mask);
241 		return 0;
242 	}
243 
244 	return 1;
245 }
246 EXPORT_SYMBOL(arch_dma_supported);
247 
248 int x86_dma_supported(struct device *dev, u64 mask)
249 {
250 	/* Copied from i386. Doesn't make much sense, because it will
251 	   only work for pci_alloc_coherent.
252 	   The caller just has to use GFP_DMA in this case. */
253 	if (mask < DMA_BIT_MASK(24))
254 		return 0;
255 	return 1;
256 }
257 
258 static int __init pci_iommu_init(void)
259 {
260 	struct iommu_table_entry *p;
261 	dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
262 
263 #ifdef CONFIG_PCI
264 	dma_debug_add_bus(&pci_bus_type);
265 #endif
266 	x86_init.iommu.iommu_init();
267 
268 	for (p = __iommu_table; p < __iommu_table_end; p++) {
269 		if (p && (p->flags & IOMMU_DETECTED) && p->late_init)
270 			p->late_init();
271 	}
272 
273 	return 0;
274 }
275 /* Must execute after PCI subsystem */
276 rootfs_initcall(pci_iommu_init);
277 
278 #ifdef CONFIG_PCI
279 /* Many VIA bridges seem to corrupt data for DAC. Disable it here */
280 
281 static void via_no_dac(struct pci_dev *dev)
282 {
283 	if (forbid_dac == 0) {
284 		dev_info(&dev->dev, "disabling DAC on VIA PCI bridge\n");
285 		forbid_dac = 1;
286 	}
287 }
288 DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID,
289 				PCI_CLASS_BRIDGE_PCI, 8, via_no_dac);
290 #endif
291