1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/dma-map-ops.h> 3 #include <linux/dma-direct.h> 4 #include <linux/iommu.h> 5 #include <linux/dmar.h> 6 #include <linux/export.h> 7 #include <linux/memblock.h> 8 #include <linux/gfp.h> 9 #include <linux/pci.h> 10 #include <linux/amd-iommu.h> 11 12 #include <asm/proto.h> 13 #include <asm/dma.h> 14 #include <asm/iommu.h> 15 #include <asm/gart.h> 16 #include <asm/x86_init.h> 17 18 #include <xen/xen.h> 19 #include <xen/swiotlb-xen.h> 20 21 static bool disable_dac_quirk __read_mostly; 22 23 const struct dma_map_ops *dma_ops; 24 EXPORT_SYMBOL(dma_ops); 25 26 #ifdef CONFIG_IOMMU_DEBUG 27 int panic_on_overflow __read_mostly = 1; 28 int force_iommu __read_mostly = 1; 29 #else 30 int panic_on_overflow __read_mostly = 0; 31 int force_iommu __read_mostly = 0; 32 #endif 33 34 int iommu_merge __read_mostly = 0; 35 36 int no_iommu __read_mostly; 37 /* Set this to 1 if there is a HW IOMMU in the system */ 38 int iommu_detected __read_mostly = 0; 39 40 #ifdef CONFIG_SWIOTLB 41 bool x86_swiotlb_enable; 42 static unsigned int x86_swiotlb_flags; 43 44 static void __init pci_swiotlb_detect(void) 45 { 46 /* don't initialize swiotlb if iommu=off (no_iommu=1) */ 47 if (!no_iommu && max_possible_pfn > MAX_DMA32_PFN) 48 x86_swiotlb_enable = true; 49 50 /* 51 * Set swiotlb to 1 so that bounce buffers are allocated and used for 52 * devices that can't support DMA to encrypted memory. 53 */ 54 if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) 55 x86_swiotlb_enable = true; 56 57 /* 58 * Guest with guest memory encryption currently perform all DMA through 59 * bounce buffers as the hypervisor can't access arbitrary VM memory 60 * that is not explicitly shared with it. 61 */ 62 if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) { 63 x86_swiotlb_enable = true; 64 x86_swiotlb_flags |= SWIOTLB_FORCE; 65 } 66 } 67 #else 68 static inline void __init pci_swiotlb_detect(void) 69 { 70 } 71 #define x86_swiotlb_flags 0 72 #endif /* CONFIG_SWIOTLB */ 73 74 #ifdef CONFIG_SWIOTLB_XEN 75 static void __init pci_xen_swiotlb_init(void) 76 { 77 if (!xen_initial_domain() && !x86_swiotlb_enable) 78 return; 79 x86_swiotlb_enable = true; 80 x86_swiotlb_flags |= SWIOTLB_ANY; 81 swiotlb_init_remap(true, x86_swiotlb_flags, xen_swiotlb_fixup); 82 dma_ops = &xen_swiotlb_dma_ops; 83 if (IS_ENABLED(CONFIG_PCI)) 84 pci_request_acs(); 85 } 86 87 int pci_xen_swiotlb_init_late(void) 88 { 89 if (dma_ops == &xen_swiotlb_dma_ops) 90 return 0; 91 92 /* we can work with the default swiotlb */ 93 if (!io_tlb_default_mem.nslabs) { 94 int rc = swiotlb_init_late(swiotlb_size_or_default(), 95 GFP_KERNEL, xen_swiotlb_fixup); 96 if (rc < 0) 97 return rc; 98 } 99 100 /* XXX: this switches the dma ops under live devices! */ 101 dma_ops = &xen_swiotlb_dma_ops; 102 if (IS_ENABLED(CONFIG_PCI)) 103 pci_request_acs(); 104 return 0; 105 } 106 EXPORT_SYMBOL_GPL(pci_xen_swiotlb_init_late); 107 #else 108 static inline void __init pci_xen_swiotlb_init(void) 109 { 110 } 111 #endif /* CONFIG_SWIOTLB_XEN */ 112 113 void __init pci_iommu_alloc(void) 114 { 115 if (xen_pv_domain()) { 116 pci_xen_swiotlb_init(); 117 return; 118 } 119 pci_swiotlb_detect(); 120 gart_iommu_hole_init(); 121 amd_iommu_detect(); 122 detect_intel_iommu(); 123 swiotlb_init(x86_swiotlb_enable, x86_swiotlb_flags); 124 } 125 126 /* 127 * See <Documentation/x86/x86_64/boot-options.rst> for the iommu kernel 128 * parameter documentation. 129 */ 130 static __init int iommu_setup(char *p) 131 { 132 iommu_merge = 1; 133 134 if (!p) 135 return -EINVAL; 136 137 while (*p) { 138 if (!strncmp(p, "off", 3)) 139 no_iommu = 1; 140 /* gart_parse_options has more force support */ 141 if (!strncmp(p, "force", 5)) 142 force_iommu = 1; 143 if (!strncmp(p, "noforce", 7)) { 144 iommu_merge = 0; 145 force_iommu = 0; 146 } 147 148 if (!strncmp(p, "biomerge", 8)) { 149 iommu_merge = 1; 150 force_iommu = 1; 151 } 152 if (!strncmp(p, "panic", 5)) 153 panic_on_overflow = 1; 154 if (!strncmp(p, "nopanic", 7)) 155 panic_on_overflow = 0; 156 if (!strncmp(p, "merge", 5)) { 157 iommu_merge = 1; 158 force_iommu = 1; 159 } 160 if (!strncmp(p, "nomerge", 7)) 161 iommu_merge = 0; 162 if (!strncmp(p, "forcesac", 8)) 163 pr_warn("forcesac option ignored.\n"); 164 if (!strncmp(p, "allowdac", 8)) 165 pr_warn("allowdac option ignored.\n"); 166 if (!strncmp(p, "nodac", 5)) 167 pr_warn("nodac option ignored.\n"); 168 if (!strncmp(p, "usedac", 6)) { 169 disable_dac_quirk = true; 170 return 1; 171 } 172 #ifdef CONFIG_SWIOTLB 173 if (!strncmp(p, "soft", 4)) 174 x86_swiotlb_enable = true; 175 #endif 176 if (!strncmp(p, "pt", 2)) 177 iommu_set_default_passthrough(true); 178 if (!strncmp(p, "nopt", 4)) 179 iommu_set_default_translated(true); 180 181 gart_parse_options(p); 182 183 p += strcspn(p, ","); 184 if (*p == ',') 185 ++p; 186 } 187 return 0; 188 } 189 early_param("iommu", iommu_setup); 190 191 static int __init pci_iommu_init(void) 192 { 193 x86_init.iommu.iommu_init(); 194 195 #ifdef CONFIG_SWIOTLB 196 /* An IOMMU turned us off. */ 197 if (x86_swiotlb_enable) { 198 pr_info("PCI-DMA: Using software bounce buffering for IO (SWIOTLB)\n"); 199 swiotlb_print_info(); 200 } else { 201 swiotlb_exit(); 202 } 203 #endif 204 205 return 0; 206 } 207 /* Must execute after PCI subsystem */ 208 rootfs_initcall(pci_iommu_init); 209 210 #ifdef CONFIG_PCI 211 /* Many VIA bridges seem to corrupt data for DAC. Disable it here */ 212 213 static int via_no_dac_cb(struct pci_dev *pdev, void *data) 214 { 215 pdev->dev.bus_dma_limit = DMA_BIT_MASK(32); 216 return 0; 217 } 218 219 static void via_no_dac(struct pci_dev *dev) 220 { 221 if (!disable_dac_quirk) { 222 dev_info(&dev->dev, "disabling DAC on VIA PCI bridge\n"); 223 pci_walk_bus(dev->subordinate, via_no_dac_cb, NULL); 224 } 225 } 226 DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID, 227 PCI_CLASS_BRIDGE_PCI, 8, via_no_dac); 228 #endif 229