1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/dma-direct.h> 3 #include <linux/dma-debug.h> 4 #include <linux/dmar.h> 5 #include <linux/export.h> 6 #include <linux/bootmem.h> 7 #include <linux/gfp.h> 8 #include <linux/pci.h> 9 #include <linux/kmemleak.h> 10 11 #include <asm/proto.h> 12 #include <asm/dma.h> 13 #include <asm/iommu.h> 14 #include <asm/gart.h> 15 #include <asm/calgary.h> 16 #include <asm/x86_init.h> 17 #include <asm/iommu_table.h> 18 19 static int forbid_dac __read_mostly; 20 21 const struct dma_map_ops *dma_ops = &dma_direct_ops; 22 EXPORT_SYMBOL(dma_ops); 23 24 static int iommu_sac_force __read_mostly; 25 26 #ifdef CONFIG_IOMMU_DEBUG 27 int panic_on_overflow __read_mostly = 1; 28 int force_iommu __read_mostly = 1; 29 #else 30 int panic_on_overflow __read_mostly = 0; 31 int force_iommu __read_mostly = 0; 32 #endif 33 34 int iommu_merge __read_mostly = 0; 35 36 int no_iommu __read_mostly; 37 /* Set this to 1 if there is a HW IOMMU in the system */ 38 int iommu_detected __read_mostly = 0; 39 40 /* 41 * This variable becomes 1 if iommu=pt is passed on the kernel command line. 42 * If this variable is 1, IOMMU implementations do no DMA translation for 43 * devices and allow every device to access to whole physical memory. This is 44 * useful if a user wants to use an IOMMU only for KVM device assignment to 45 * guests and not for driver dma translation. 46 */ 47 int iommu_pass_through __read_mostly; 48 49 extern struct iommu_table_entry __iommu_table[], __iommu_table_end[]; 50 51 /* Dummy device used for NULL arguments (normally ISA). */ 52 struct device x86_dma_fallback_dev = { 53 .init_name = "fallback device", 54 .coherent_dma_mask = ISA_DMA_BIT_MASK, 55 .dma_mask = &x86_dma_fallback_dev.coherent_dma_mask, 56 }; 57 EXPORT_SYMBOL(x86_dma_fallback_dev); 58 59 /* Number of entries preallocated for DMA-API debugging */ 60 #define PREALLOC_DMA_DEBUG_ENTRIES 65536 61 62 void __init pci_iommu_alloc(void) 63 { 64 struct iommu_table_entry *p; 65 66 sort_iommu_table(__iommu_table, __iommu_table_end); 67 check_iommu_entries(__iommu_table, __iommu_table_end); 68 69 for (p = __iommu_table; p < __iommu_table_end; p++) { 70 if (p && p->detect && p->detect() > 0) { 71 p->flags |= IOMMU_DETECTED; 72 if (p->early_init) 73 p->early_init(); 74 if (p->flags & IOMMU_FINISH_IF_DETECTED) 75 break; 76 } 77 } 78 } 79 80 bool arch_dma_alloc_attrs(struct device **dev, gfp_t *gfp) 81 { 82 if (!*dev) 83 *dev = &x86_dma_fallback_dev; 84 85 if (!is_device_dma_capable(*dev)) 86 return false; 87 return true; 88 89 } 90 EXPORT_SYMBOL(arch_dma_alloc_attrs); 91 92 /* 93 * See <Documentation/x86/x86_64/boot-options.txt> for the iommu kernel 94 * parameter documentation. 95 */ 96 static __init int iommu_setup(char *p) 97 { 98 iommu_merge = 1; 99 100 if (!p) 101 return -EINVAL; 102 103 while (*p) { 104 if (!strncmp(p, "off", 3)) 105 no_iommu = 1; 106 /* gart_parse_options has more force support */ 107 if (!strncmp(p, "force", 5)) 108 force_iommu = 1; 109 if (!strncmp(p, "noforce", 7)) { 110 iommu_merge = 0; 111 force_iommu = 0; 112 } 113 114 if (!strncmp(p, "biomerge", 8)) { 115 iommu_merge = 1; 116 force_iommu = 1; 117 } 118 if (!strncmp(p, "panic", 5)) 119 panic_on_overflow = 1; 120 if (!strncmp(p, "nopanic", 7)) 121 panic_on_overflow = 0; 122 if (!strncmp(p, "merge", 5)) { 123 iommu_merge = 1; 124 force_iommu = 1; 125 } 126 if (!strncmp(p, "nomerge", 7)) 127 iommu_merge = 0; 128 if (!strncmp(p, "forcesac", 8)) 129 iommu_sac_force = 1; 130 if (!strncmp(p, "allowdac", 8)) 131 forbid_dac = 0; 132 if (!strncmp(p, "nodac", 5)) 133 forbid_dac = 1; 134 if (!strncmp(p, "usedac", 6)) { 135 forbid_dac = -1; 136 return 1; 137 } 138 #ifdef CONFIG_SWIOTLB 139 if (!strncmp(p, "soft", 4)) 140 swiotlb = 1; 141 #endif 142 if (!strncmp(p, "pt", 2)) 143 iommu_pass_through = 1; 144 145 gart_parse_options(p); 146 147 #ifdef CONFIG_CALGARY_IOMMU 148 if (!strncmp(p, "calgary", 7)) 149 use_calgary = 1; 150 #endif /* CONFIG_CALGARY_IOMMU */ 151 152 p += strcspn(p, ","); 153 if (*p == ',') 154 ++p; 155 } 156 return 0; 157 } 158 early_param("iommu", iommu_setup); 159 160 int arch_dma_supported(struct device *dev, u64 mask) 161 { 162 #ifdef CONFIG_PCI 163 if (mask > 0xffffffff && forbid_dac > 0) { 164 dev_info(dev, "PCI: Disallowing DAC for device\n"); 165 return 0; 166 } 167 #endif 168 169 /* Tell the device to use SAC when IOMMU force is on. This 170 allows the driver to use cheaper accesses in some cases. 171 172 Problem with this is that if we overflow the IOMMU area and 173 return DAC as fallback address the device may not handle it 174 correctly. 175 176 As a special case some controllers have a 39bit address 177 mode that is as efficient as 32bit (aic79xx). Don't force 178 SAC for these. Assume all masks <= 40 bits are of this 179 type. Normally this doesn't make any difference, but gives 180 more gentle handling of IOMMU overflow. */ 181 if (iommu_sac_force && (mask >= DMA_BIT_MASK(40))) { 182 dev_info(dev, "Force SAC with mask %Lx\n", mask); 183 return 0; 184 } 185 186 return 1; 187 } 188 EXPORT_SYMBOL(arch_dma_supported); 189 190 static int __init pci_iommu_init(void) 191 { 192 struct iommu_table_entry *p; 193 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); 194 195 #ifdef CONFIG_PCI 196 dma_debug_add_bus(&pci_bus_type); 197 #endif 198 x86_init.iommu.iommu_init(); 199 200 for (p = __iommu_table; p < __iommu_table_end; p++) { 201 if (p && (p->flags & IOMMU_DETECTED) && p->late_init) 202 p->late_init(); 203 } 204 205 return 0; 206 } 207 /* Must execute after PCI subsystem */ 208 rootfs_initcall(pci_iommu_init); 209 210 #ifdef CONFIG_PCI 211 /* Many VIA bridges seem to corrupt data for DAC. Disable it here */ 212 213 static void via_no_dac(struct pci_dev *dev) 214 { 215 if (forbid_dac == 0) { 216 dev_info(&dev->dev, "disabling DAC on VIA PCI bridge\n"); 217 forbid_dac = 1; 218 } 219 } 220 DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID, 221 PCI_CLASS_BRIDGE_PCI, 8, via_no_dac); 222 #endif 223