xref: /linux/arch/x86/kernel/pci-dma.c (revision a1c613ae4c322ddd58d5a8539dbfba2a0380a8c0)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/dma-map-ops.h>
3 #include <linux/dma-direct.h>
4 #include <linux/iommu.h>
5 #include <linux/dmar.h>
6 #include <linux/export.h>
7 #include <linux/memblock.h>
8 #include <linux/gfp.h>
9 #include <linux/pci.h>
10 #include <linux/amd-iommu.h>
11 
12 #include <asm/proto.h>
13 #include <asm/dma.h>
14 #include <asm/iommu.h>
15 #include <asm/gart.h>
16 #include <asm/x86_init.h>
17 
18 #include <xen/xen.h>
19 #include <xen/swiotlb-xen.h>
20 
21 static bool disable_dac_quirk __read_mostly;
22 
23 const struct dma_map_ops *dma_ops;
24 EXPORT_SYMBOL(dma_ops);
25 
26 #ifdef CONFIG_IOMMU_DEBUG
27 int panic_on_overflow __read_mostly = 1;
28 int force_iommu __read_mostly = 1;
29 #else
30 int panic_on_overflow __read_mostly = 0;
31 int force_iommu __read_mostly = 0;
32 #endif
33 
34 int iommu_merge __read_mostly = 0;
35 
36 int no_iommu __read_mostly;
37 /* Set this to 1 if there is a HW IOMMU in the system */
38 int iommu_detected __read_mostly = 0;
39 
40 #ifdef CONFIG_SWIOTLB
41 bool x86_swiotlb_enable;
42 static unsigned int x86_swiotlb_flags;
43 
pci_swiotlb_detect(void)44 static void __init pci_swiotlb_detect(void)
45 {
46 	/* don't initialize swiotlb if iommu=off (no_iommu=1) */
47 	if (!no_iommu && max_possible_pfn > MAX_DMA32_PFN)
48 		x86_swiotlb_enable = true;
49 
50 	/*
51 	 * Set swiotlb to 1 so that bounce buffers are allocated and used for
52 	 * devices that can't support DMA to encrypted memory.
53 	 */
54 	if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT))
55 		x86_swiotlb_enable = true;
56 
57 	/*
58 	 * Guest with guest memory encryption currently perform all DMA through
59 	 * bounce buffers as the hypervisor can't access arbitrary VM memory
60 	 * that is not explicitly shared with it.
61 	 */
62 	if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) {
63 		x86_swiotlb_enable = true;
64 		x86_swiotlb_flags |= SWIOTLB_FORCE;
65 	}
66 }
67 #else
pci_swiotlb_detect(void)68 static inline void __init pci_swiotlb_detect(void)
69 {
70 }
71 #define x86_swiotlb_flags 0
72 #endif /* CONFIG_SWIOTLB */
73 
74 #ifdef CONFIG_SWIOTLB_XEN
xen_swiotlb_enabled(void)75 static bool xen_swiotlb_enabled(void)
76 {
77 	return xen_initial_domain() || x86_swiotlb_enable ||
78 		(IS_ENABLED(CONFIG_XEN_PCIDEV_FRONTEND) && xen_pv_pci_possible);
79 }
80 
pci_xen_swiotlb_init(void)81 static void __init pci_xen_swiotlb_init(void)
82 {
83 	if (!xen_swiotlb_enabled())
84 		return;
85 	x86_swiotlb_enable = true;
86 	x86_swiotlb_flags |= SWIOTLB_ANY;
87 	swiotlb_init_remap(true, x86_swiotlb_flags, xen_swiotlb_fixup);
88 	dma_ops = &xen_swiotlb_dma_ops;
89 	if (IS_ENABLED(CONFIG_PCI))
90 		pci_request_acs();
91 }
92 #else
pci_xen_swiotlb_init(void)93 static inline void __init pci_xen_swiotlb_init(void)
94 {
95 }
96 #endif /* CONFIG_SWIOTLB_XEN */
97 
pci_iommu_alloc(void)98 void __init pci_iommu_alloc(void)
99 {
100 	if (xen_pv_domain()) {
101 		pci_xen_swiotlb_init();
102 		return;
103 	}
104 	pci_swiotlb_detect();
105 	gart_iommu_hole_init();
106 	amd_iommu_detect();
107 	detect_intel_iommu();
108 	swiotlb_init(x86_swiotlb_enable, x86_swiotlb_flags);
109 }
110 
111 /*
112  * See <Documentation/arch/x86/x86_64/boot-options.rst> for the iommu kernel
113  * parameter documentation.
114  */
iommu_setup(char * p)115 static __init int iommu_setup(char *p)
116 {
117 	iommu_merge = 1;
118 
119 	if (!p)
120 		return -EINVAL;
121 
122 	while (*p) {
123 		if (!strncmp(p, "off", 3))
124 			no_iommu = 1;
125 		/* gart_parse_options has more force support */
126 		if (!strncmp(p, "force", 5))
127 			force_iommu = 1;
128 		if (!strncmp(p, "noforce", 7)) {
129 			iommu_merge = 0;
130 			force_iommu = 0;
131 		}
132 
133 		if (!strncmp(p, "biomerge", 8)) {
134 			iommu_merge = 1;
135 			force_iommu = 1;
136 		}
137 		if (!strncmp(p, "panic", 5))
138 			panic_on_overflow = 1;
139 		if (!strncmp(p, "nopanic", 7))
140 			panic_on_overflow = 0;
141 		if (!strncmp(p, "merge", 5)) {
142 			iommu_merge = 1;
143 			force_iommu = 1;
144 		}
145 		if (!strncmp(p, "nomerge", 7))
146 			iommu_merge = 0;
147 		if (!strncmp(p, "forcesac", 8))
148 			pr_warn("forcesac option ignored.\n");
149 		if (!strncmp(p, "allowdac", 8))
150 			pr_warn("allowdac option ignored.\n");
151 		if (!strncmp(p, "nodac", 5))
152 			pr_warn("nodac option ignored.\n");
153 		if (!strncmp(p, "usedac", 6)) {
154 			disable_dac_quirk = true;
155 			return 1;
156 		}
157 #ifdef CONFIG_SWIOTLB
158 		if (!strncmp(p, "soft", 4))
159 			x86_swiotlb_enable = true;
160 #endif
161 		if (!strncmp(p, "pt", 2))
162 			iommu_set_default_passthrough(true);
163 		if (!strncmp(p, "nopt", 4))
164 			iommu_set_default_translated(true);
165 
166 		gart_parse_options(p);
167 
168 		p += strcspn(p, ",");
169 		if (*p == ',')
170 			++p;
171 	}
172 	return 0;
173 }
174 early_param("iommu", iommu_setup);
175 
pci_iommu_init(void)176 static int __init pci_iommu_init(void)
177 {
178 	x86_init.iommu.iommu_init();
179 
180 #ifdef CONFIG_SWIOTLB
181 	/* An IOMMU turned us off. */
182 	if (x86_swiotlb_enable) {
183 		pr_info("PCI-DMA: Using software bounce buffering for IO (SWIOTLB)\n");
184 		swiotlb_print_info();
185 	} else {
186 		swiotlb_exit();
187 	}
188 #endif
189 
190 	return 0;
191 }
192 /* Must execute after PCI subsystem */
193 rootfs_initcall(pci_iommu_init);
194 
195 #ifdef CONFIG_PCI
196 /* Many VIA bridges seem to corrupt data for DAC. Disable it here */
197 
via_no_dac_cb(struct pci_dev * pdev,void * data)198 static int via_no_dac_cb(struct pci_dev *pdev, void *data)
199 {
200 	pdev->dev.bus_dma_limit = DMA_BIT_MASK(32);
201 	return 0;
202 }
203 
via_no_dac(struct pci_dev * dev)204 static void via_no_dac(struct pci_dev *dev)
205 {
206 	if (!disable_dac_quirk) {
207 		dev_info(&dev->dev, "disabling DAC on VIA PCI bridge\n");
208 		pci_walk_bus(dev->subordinate, via_no_dac_cb, NULL);
209 	}
210 }
211 DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID,
212 				PCI_CLASS_BRIDGE_PCI, 8, via_no_dac);
213 #endif
214