xref: /linux/arch/x86/kernel/pci-dma.c (revision 0d456bad36d42d16022be045c8a53ddbb59ee478)
1 #include <linux/dma-mapping.h>
2 #include <linux/dma-debug.h>
3 #include <linux/dmar.h>
4 #include <linux/export.h>
5 #include <linux/bootmem.h>
6 #include <linux/gfp.h>
7 #include <linux/pci.h>
8 #include <linux/kmemleak.h>
9 
10 #include <asm/proto.h>
11 #include <asm/dma.h>
12 #include <asm/iommu.h>
13 #include <asm/gart.h>
14 #include <asm/calgary.h>
15 #include <asm/x86_init.h>
16 #include <asm/iommu_table.h>
17 
18 static int forbid_dac __read_mostly;
19 
20 struct dma_map_ops *dma_ops = &nommu_dma_ops;
21 EXPORT_SYMBOL(dma_ops);
22 
23 static int iommu_sac_force __read_mostly;
24 
25 #ifdef CONFIG_IOMMU_DEBUG
26 int panic_on_overflow __read_mostly = 1;
27 int force_iommu __read_mostly = 1;
28 #else
29 int panic_on_overflow __read_mostly = 0;
30 int force_iommu __read_mostly = 0;
31 #endif
32 
33 int iommu_merge __read_mostly = 0;
34 
35 int no_iommu __read_mostly;
36 /* Set this to 1 if there is a HW IOMMU in the system */
37 int iommu_detected __read_mostly = 0;
38 
39 /*
40  * This variable becomes 1 if iommu=pt is passed on the kernel command line.
41  * If this variable is 1, IOMMU implementations do no DMA translation for
42  * devices and allow every device to access to whole physical memory. This is
43  * useful if a user wants to use an IOMMU only for KVM device assignment to
44  * guests and not for driver dma translation.
45  */
46 int iommu_pass_through __read_mostly;
47 
48 extern struct iommu_table_entry __iommu_table[], __iommu_table_end[];
49 
50 /* Dummy device used for NULL arguments (normally ISA). */
51 struct device x86_dma_fallback_dev = {
52 	.init_name = "fallback device",
53 	.coherent_dma_mask = ISA_DMA_BIT_MASK,
54 	.dma_mask = &x86_dma_fallback_dev.coherent_dma_mask,
55 };
56 EXPORT_SYMBOL(x86_dma_fallback_dev);
57 
58 /* Number of entries preallocated for DMA-API debugging */
59 #define PREALLOC_DMA_DEBUG_ENTRIES       32768
60 
61 int dma_set_mask(struct device *dev, u64 mask)
62 {
63 	if (!dev->dma_mask || !dma_supported(dev, mask))
64 		return -EIO;
65 
66 	*dev->dma_mask = mask;
67 
68 	return 0;
69 }
70 EXPORT_SYMBOL(dma_set_mask);
71 
72 void __init pci_iommu_alloc(void)
73 {
74 	struct iommu_table_entry *p;
75 
76 	sort_iommu_table(__iommu_table, __iommu_table_end);
77 	check_iommu_entries(__iommu_table, __iommu_table_end);
78 
79 	for (p = __iommu_table; p < __iommu_table_end; p++) {
80 		if (p && p->detect && p->detect() > 0) {
81 			p->flags |= IOMMU_DETECTED;
82 			if (p->early_init)
83 				p->early_init();
84 			if (p->flags & IOMMU_FINISH_IF_DETECTED)
85 				break;
86 		}
87 	}
88 }
89 void *dma_generic_alloc_coherent(struct device *dev, size_t size,
90 				 dma_addr_t *dma_addr, gfp_t flag,
91 				 struct dma_attrs *attrs)
92 {
93 	unsigned long dma_mask;
94 	struct page *page;
95 	unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
96 	dma_addr_t addr;
97 
98 	dma_mask = dma_alloc_coherent_mask(dev, flag);
99 
100 	flag |= __GFP_ZERO;
101 again:
102 	page = NULL;
103 	if (!(flag & GFP_ATOMIC))
104 		page = dma_alloc_from_contiguous(dev, count, get_order(size));
105 	if (!page)
106 		page = alloc_pages_node(dev_to_node(dev), flag, get_order(size));
107 	if (!page)
108 		return NULL;
109 
110 	addr = page_to_phys(page);
111 	if (addr + size > dma_mask) {
112 		__free_pages(page, get_order(size));
113 
114 		if (dma_mask < DMA_BIT_MASK(32) && !(flag & GFP_DMA)) {
115 			flag = (flag & ~GFP_DMA32) | GFP_DMA;
116 			goto again;
117 		}
118 
119 		return NULL;
120 	}
121 
122 	*dma_addr = addr;
123 	return page_address(page);
124 }
125 
126 void dma_generic_free_coherent(struct device *dev, size_t size, void *vaddr,
127 			       dma_addr_t dma_addr, struct dma_attrs *attrs)
128 {
129 	unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
130 	struct page *page = virt_to_page(vaddr);
131 
132 	if (!dma_release_from_contiguous(dev, page, count))
133 		free_pages((unsigned long)vaddr, get_order(size));
134 }
135 
136 /*
137  * See <Documentation/x86/x86_64/boot-options.txt> for the iommu kernel
138  * parameter documentation.
139  */
140 static __init int iommu_setup(char *p)
141 {
142 	iommu_merge = 1;
143 
144 	if (!p)
145 		return -EINVAL;
146 
147 	while (*p) {
148 		if (!strncmp(p, "off", 3))
149 			no_iommu = 1;
150 		/* gart_parse_options has more force support */
151 		if (!strncmp(p, "force", 5))
152 			force_iommu = 1;
153 		if (!strncmp(p, "noforce", 7)) {
154 			iommu_merge = 0;
155 			force_iommu = 0;
156 		}
157 
158 		if (!strncmp(p, "biomerge", 8)) {
159 			iommu_merge = 1;
160 			force_iommu = 1;
161 		}
162 		if (!strncmp(p, "panic", 5))
163 			panic_on_overflow = 1;
164 		if (!strncmp(p, "nopanic", 7))
165 			panic_on_overflow = 0;
166 		if (!strncmp(p, "merge", 5)) {
167 			iommu_merge = 1;
168 			force_iommu = 1;
169 		}
170 		if (!strncmp(p, "nomerge", 7))
171 			iommu_merge = 0;
172 		if (!strncmp(p, "forcesac", 8))
173 			iommu_sac_force = 1;
174 		if (!strncmp(p, "allowdac", 8))
175 			forbid_dac = 0;
176 		if (!strncmp(p, "nodac", 5))
177 			forbid_dac = 1;
178 		if (!strncmp(p, "usedac", 6)) {
179 			forbid_dac = -1;
180 			return 1;
181 		}
182 #ifdef CONFIG_SWIOTLB
183 		if (!strncmp(p, "soft", 4))
184 			swiotlb = 1;
185 #endif
186 		if (!strncmp(p, "pt", 2))
187 			iommu_pass_through = 1;
188 
189 		gart_parse_options(p);
190 
191 #ifdef CONFIG_CALGARY_IOMMU
192 		if (!strncmp(p, "calgary", 7))
193 			use_calgary = 1;
194 #endif /* CONFIG_CALGARY_IOMMU */
195 
196 		p += strcspn(p, ",");
197 		if (*p == ',')
198 			++p;
199 	}
200 	return 0;
201 }
202 early_param("iommu", iommu_setup);
203 
204 int dma_supported(struct device *dev, u64 mask)
205 {
206 	struct dma_map_ops *ops = get_dma_ops(dev);
207 
208 #ifdef CONFIG_PCI
209 	if (mask > 0xffffffff && forbid_dac > 0) {
210 		dev_info(dev, "PCI: Disallowing DAC for device\n");
211 		return 0;
212 	}
213 #endif
214 
215 	if (ops->dma_supported)
216 		return ops->dma_supported(dev, mask);
217 
218 	/* Copied from i386. Doesn't make much sense, because it will
219 	   only work for pci_alloc_coherent.
220 	   The caller just has to use GFP_DMA in this case. */
221 	if (mask < DMA_BIT_MASK(24))
222 		return 0;
223 
224 	/* Tell the device to use SAC when IOMMU force is on.  This
225 	   allows the driver to use cheaper accesses in some cases.
226 
227 	   Problem with this is that if we overflow the IOMMU area and
228 	   return DAC as fallback address the device may not handle it
229 	   correctly.
230 
231 	   As a special case some controllers have a 39bit address
232 	   mode that is as efficient as 32bit (aic79xx). Don't force
233 	   SAC for these.  Assume all masks <= 40 bits are of this
234 	   type. Normally this doesn't make any difference, but gives
235 	   more gentle handling of IOMMU overflow. */
236 	if (iommu_sac_force && (mask >= DMA_BIT_MASK(40))) {
237 		dev_info(dev, "Force SAC with mask %Lx\n", mask);
238 		return 0;
239 	}
240 
241 	return 1;
242 }
243 EXPORT_SYMBOL(dma_supported);
244 
245 static int __init pci_iommu_init(void)
246 {
247 	struct iommu_table_entry *p;
248 	dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
249 
250 #ifdef CONFIG_PCI
251 	dma_debug_add_bus(&pci_bus_type);
252 #endif
253 	x86_init.iommu.iommu_init();
254 
255 	for (p = __iommu_table; p < __iommu_table_end; p++) {
256 		if (p && (p->flags & IOMMU_DETECTED) && p->late_init)
257 			p->late_init();
258 	}
259 
260 	return 0;
261 }
262 /* Must execute after PCI subsystem */
263 rootfs_initcall(pci_iommu_init);
264 
265 #ifdef CONFIG_PCI
266 /* Many VIA bridges seem to corrupt data for DAC. Disable it here */
267 
268 static __devinit void via_no_dac(struct pci_dev *dev)
269 {
270 	if (forbid_dac == 0) {
271 		dev_info(&dev->dev, "disabling DAC on VIA PCI bridge\n");
272 		forbid_dac = 1;
273 	}
274 }
275 DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID,
276 				PCI_CLASS_BRIDGE_PCI, 8, via_no_dac);
277 #endif
278