1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright 2010 4 * by Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> 5 * 6 * This code provides a IOMMU for Xen PV guests with PCI passthrough. 7 * 8 * PV guests under Xen are running in an non-contiguous memory architecture. 9 * 10 * When PCI pass-through is utilized, this necessitates an IOMMU for 11 * translating bus (DMA) to virtual and vice-versa and also providing a 12 * mechanism to have contiguous pages for device drivers operations (say DMA 13 * operations). 14 * 15 * Specifically, under Xen the Linux idea of pages is an illusion. It 16 * assumes that pages start at zero and go up to the available memory. To 17 * help with that, the Linux Xen MMU provides a lookup mechanism to 18 * translate the page frame numbers (PFN) to machine frame numbers (MFN) 19 * and vice-versa. The MFN are the "real" frame numbers. Furthermore 20 * memory is not contiguous. Xen hypervisor stitches memory for guests 21 * from different pools, which means there is no guarantee that PFN==MFN 22 * and PFN+1==MFN+1. Lastly with Xen 4.0, pages (in debug mode) are 23 * allocated in descending order (high to low), meaning the guest might 24 * never get any MFN's under the 4GB mark. 25 */ 26 27 #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt 28 29 #include <linux/memblock.h> 30 #include <linux/dma-direct.h> 31 #include <linux/dma-map-ops.h> 32 #include <linux/export.h> 33 #include <xen/swiotlb-xen.h> 34 #include <xen/page.h> 35 #include <xen/xen-ops.h> 36 #include <xen/hvc-console.h> 37 38 #include <asm/dma-mapping.h> 39 40 #include <trace/events/swiotlb.h> 41 #define MAX_DMA_BITS 32 42 43 /* 44 * Quick lookup value of the bus address of the IOTLB. 45 */ 46 47 static inline phys_addr_t xen_phys_to_bus(struct device *dev, phys_addr_t paddr) 48 { 49 unsigned long bfn = pfn_to_bfn(XEN_PFN_DOWN(paddr)); 50 phys_addr_t baddr = (phys_addr_t)bfn << XEN_PAGE_SHIFT; 51 52 baddr |= paddr & ~XEN_PAGE_MASK; 53 return baddr; 54 } 55 56 static inline dma_addr_t xen_phys_to_dma(struct device *dev, phys_addr_t paddr) 57 { 58 return phys_to_dma(dev, xen_phys_to_bus(dev, paddr)); 59 } 60 61 static inline phys_addr_t xen_bus_to_phys(struct device *dev, 62 phys_addr_t baddr) 63 { 64 unsigned long xen_pfn = bfn_to_pfn(XEN_PFN_DOWN(baddr)); 65 phys_addr_t paddr = (xen_pfn << XEN_PAGE_SHIFT) | 66 (baddr & ~XEN_PAGE_MASK); 67 68 return paddr; 69 } 70 71 static inline phys_addr_t xen_dma_to_phys(struct device *dev, 72 dma_addr_t dma_addr) 73 { 74 return xen_bus_to_phys(dev, dma_to_phys(dev, dma_addr)); 75 } 76 77 static inline int range_straddles_page_boundary(phys_addr_t p, size_t size) 78 { 79 unsigned long next_bfn, xen_pfn = XEN_PFN_DOWN(p); 80 unsigned int i, nr_pages = XEN_PFN_UP(xen_offset_in_page(p) + size); 81 82 next_bfn = pfn_to_bfn(xen_pfn); 83 84 for (i = 1; i < nr_pages; i++) 85 if (pfn_to_bfn(++xen_pfn) != ++next_bfn) 86 return 1; 87 88 return 0; 89 } 90 91 static struct io_tlb_pool *xen_swiotlb_find_pool(struct device *dev, 92 dma_addr_t dma_addr) 93 { 94 unsigned long bfn = XEN_PFN_DOWN(dma_to_phys(dev, dma_addr)); 95 unsigned long xen_pfn = bfn_to_local_pfn(bfn); 96 phys_addr_t paddr = (phys_addr_t)xen_pfn << XEN_PAGE_SHIFT; 97 98 /* If the address is outside our domain, it CAN 99 * have the same virtual address as another address 100 * in our domain. Therefore _only_ check address within our domain. 101 */ 102 if (pfn_valid(PFN_DOWN(paddr))) 103 return swiotlb_find_pool(dev, paddr); 104 return NULL; 105 } 106 107 #ifdef CONFIG_X86 108 int xen_swiotlb_fixup(void *buf, unsigned long nslabs) 109 { 110 int rc; 111 unsigned int order = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT); 112 unsigned int i, dma_bits = order + PAGE_SHIFT; 113 dma_addr_t dma_handle; 114 phys_addr_t p = virt_to_phys(buf); 115 116 BUILD_BUG_ON(IO_TLB_SEGSIZE & (IO_TLB_SEGSIZE - 1)); 117 BUG_ON(nslabs % IO_TLB_SEGSIZE); 118 119 i = 0; 120 do { 121 do { 122 rc = xen_create_contiguous_region( 123 p + (i << IO_TLB_SHIFT), order, 124 dma_bits, &dma_handle); 125 } while (rc && dma_bits++ < MAX_DMA_BITS); 126 if (rc) 127 return rc; 128 129 i += IO_TLB_SEGSIZE; 130 } while (i < nslabs); 131 return 0; 132 } 133 134 static void * 135 xen_swiotlb_alloc_coherent(struct device *dev, size_t size, 136 dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs) 137 { 138 u64 dma_mask = dev->coherent_dma_mask; 139 int order = get_order(size); 140 phys_addr_t phys; 141 void *ret; 142 143 /* Align the allocation to the Xen page size */ 144 size = 1UL << (order + XEN_PAGE_SHIFT); 145 146 ret = (void *)__get_free_pages(flags, get_order(size)); 147 if (!ret) 148 return ret; 149 phys = virt_to_phys(ret); 150 151 *dma_handle = xen_phys_to_dma(dev, phys); 152 if (*dma_handle + size - 1 > dma_mask || 153 range_straddles_page_boundary(phys, size)) { 154 if (xen_create_contiguous_region(phys, order, fls64(dma_mask), 155 dma_handle) != 0) 156 goto out_free_pages; 157 SetPageXenRemapped(virt_to_page(ret)); 158 } 159 160 memset(ret, 0, size); 161 return ret; 162 163 out_free_pages: 164 free_pages((unsigned long)ret, get_order(size)); 165 return NULL; 166 } 167 168 static void 169 xen_swiotlb_free_coherent(struct device *dev, size_t size, void *vaddr, 170 dma_addr_t dma_handle, unsigned long attrs) 171 { 172 phys_addr_t phys = virt_to_phys(vaddr); 173 int order = get_order(size); 174 175 /* Convert the size to actually allocated. */ 176 size = 1UL << (order + XEN_PAGE_SHIFT); 177 178 if (WARN_ON_ONCE(dma_handle + size - 1 > dev->coherent_dma_mask) || 179 WARN_ON_ONCE(range_straddles_page_boundary(phys, size))) 180 return; 181 182 if (TestClearPageXenRemapped(virt_to_page(vaddr))) 183 xen_destroy_contiguous_region(phys, order); 184 free_pages((unsigned long)vaddr, get_order(size)); 185 } 186 #endif /* CONFIG_X86 */ 187 188 /* 189 * Map a single buffer of the indicated size for DMA in streaming mode. The 190 * physical address to use is returned. 191 * 192 * Once the device is given the dma address, the device owns this memory until 193 * either xen_swiotlb_unmap_page or xen_swiotlb_dma_sync_single is performed. 194 */ 195 static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page, 196 unsigned long offset, size_t size, 197 enum dma_data_direction dir, 198 unsigned long attrs) 199 { 200 phys_addr_t map, phys = page_to_phys(page) + offset; 201 dma_addr_t dev_addr = xen_phys_to_dma(dev, phys); 202 203 BUG_ON(dir == DMA_NONE); 204 /* 205 * If the address happens to be in the device's DMA window, 206 * we can safely return the device addr and not worry about bounce 207 * buffering it. 208 */ 209 if (dma_capable(dev, dev_addr, size, true) && 210 !range_straddles_page_boundary(phys, size) && 211 !xen_arch_need_swiotlb(dev, phys, dev_addr) && 212 !is_swiotlb_force_bounce(dev)) 213 goto done; 214 215 /* 216 * Oh well, have to allocate and map a bounce buffer. 217 */ 218 trace_swiotlb_bounced(dev, dev_addr, size); 219 220 map = swiotlb_tbl_map_single(dev, phys, size, 0, dir, attrs); 221 if (map == (phys_addr_t)DMA_MAPPING_ERROR) 222 return DMA_MAPPING_ERROR; 223 224 phys = map; 225 dev_addr = xen_phys_to_dma(dev, map); 226 227 /* 228 * Ensure that the address returned is DMA'ble 229 */ 230 if (unlikely(!dma_capable(dev, dev_addr, size, true))) { 231 __swiotlb_tbl_unmap_single(dev, map, size, dir, 232 attrs | DMA_ATTR_SKIP_CPU_SYNC, 233 swiotlb_find_pool(dev, map)); 234 return DMA_MAPPING_ERROR; 235 } 236 237 done: 238 if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) { 239 if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dev_addr)))) 240 arch_sync_dma_for_device(phys, size, dir); 241 else 242 xen_dma_sync_for_device(dev, dev_addr, size, dir); 243 } 244 return dev_addr; 245 } 246 247 /* 248 * Unmap a single streaming mode DMA translation. The dma_addr and size must 249 * match what was provided for in a previous xen_swiotlb_map_page call. All 250 * other usages are undefined. 251 * 252 * After this call, reads by the cpu to the buffer are guaranteed to see 253 * whatever the device wrote there. 254 */ 255 static void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, 256 size_t size, enum dma_data_direction dir, unsigned long attrs) 257 { 258 phys_addr_t paddr = xen_dma_to_phys(hwdev, dev_addr); 259 struct io_tlb_pool *pool; 260 261 BUG_ON(dir == DMA_NONE); 262 263 if (!dev_is_dma_coherent(hwdev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) { 264 if (pfn_valid(PFN_DOWN(dma_to_phys(hwdev, dev_addr)))) 265 arch_sync_dma_for_cpu(paddr, size, dir); 266 else 267 xen_dma_sync_for_cpu(hwdev, dev_addr, size, dir); 268 } 269 270 /* NOTE: We use dev_addr here, not paddr! */ 271 pool = xen_swiotlb_find_pool(hwdev, dev_addr); 272 if (pool) 273 __swiotlb_tbl_unmap_single(hwdev, paddr, size, dir, 274 attrs, pool); 275 } 276 277 static void 278 xen_swiotlb_sync_single_for_cpu(struct device *dev, dma_addr_t dma_addr, 279 size_t size, enum dma_data_direction dir) 280 { 281 phys_addr_t paddr = xen_dma_to_phys(dev, dma_addr); 282 struct io_tlb_pool *pool; 283 284 if (!dev_is_dma_coherent(dev)) { 285 if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dma_addr)))) 286 arch_sync_dma_for_cpu(paddr, size, dir); 287 else 288 xen_dma_sync_for_cpu(dev, dma_addr, size, dir); 289 } 290 291 pool = xen_swiotlb_find_pool(dev, dma_addr); 292 if (pool) 293 __swiotlb_sync_single_for_cpu(dev, paddr, size, dir, pool); 294 } 295 296 static void 297 xen_swiotlb_sync_single_for_device(struct device *dev, dma_addr_t dma_addr, 298 size_t size, enum dma_data_direction dir) 299 { 300 phys_addr_t paddr = xen_dma_to_phys(dev, dma_addr); 301 struct io_tlb_pool *pool; 302 303 pool = xen_swiotlb_find_pool(dev, dma_addr); 304 if (pool) 305 __swiotlb_sync_single_for_device(dev, paddr, size, dir, pool); 306 307 if (!dev_is_dma_coherent(dev)) { 308 if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dma_addr)))) 309 arch_sync_dma_for_device(paddr, size, dir); 310 else 311 xen_dma_sync_for_device(dev, dma_addr, size, dir); 312 } 313 } 314 315 /* 316 * Unmap a set of streaming mode DMA translations. Again, cpu read rules 317 * concerning calls here are the same as for swiotlb_unmap_page() above. 318 */ 319 static void 320 xen_swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, 321 enum dma_data_direction dir, unsigned long attrs) 322 { 323 struct scatterlist *sg; 324 int i; 325 326 BUG_ON(dir == DMA_NONE); 327 328 for_each_sg(sgl, sg, nelems, i) 329 xen_swiotlb_unmap_page(hwdev, sg->dma_address, sg_dma_len(sg), 330 dir, attrs); 331 332 } 333 334 static int 335 xen_swiotlb_map_sg(struct device *dev, struct scatterlist *sgl, int nelems, 336 enum dma_data_direction dir, unsigned long attrs) 337 { 338 struct scatterlist *sg; 339 int i; 340 341 BUG_ON(dir == DMA_NONE); 342 343 for_each_sg(sgl, sg, nelems, i) { 344 sg->dma_address = xen_swiotlb_map_page(dev, sg_page(sg), 345 sg->offset, sg->length, dir, attrs); 346 if (sg->dma_address == DMA_MAPPING_ERROR) 347 goto out_unmap; 348 sg_dma_len(sg) = sg->length; 349 } 350 351 return nelems; 352 out_unmap: 353 xen_swiotlb_unmap_sg(dev, sgl, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC); 354 sg_dma_len(sgl) = 0; 355 return -EIO; 356 } 357 358 static void 359 xen_swiotlb_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl, 360 int nelems, enum dma_data_direction dir) 361 { 362 struct scatterlist *sg; 363 int i; 364 365 for_each_sg(sgl, sg, nelems, i) { 366 xen_swiotlb_sync_single_for_cpu(dev, sg->dma_address, 367 sg->length, dir); 368 } 369 } 370 371 static void 372 xen_swiotlb_sync_sg_for_device(struct device *dev, struct scatterlist *sgl, 373 int nelems, enum dma_data_direction dir) 374 { 375 struct scatterlist *sg; 376 int i; 377 378 for_each_sg(sgl, sg, nelems, i) { 379 xen_swiotlb_sync_single_for_device(dev, sg->dma_address, 380 sg->length, dir); 381 } 382 } 383 384 /* 385 * Return whether the given device DMA address mask can be supported 386 * properly. For example, if your device can only drive the low 24-bits 387 * during bus mastering, then you would pass 0x00ffffff as the mask to 388 * this function. 389 */ 390 static int 391 xen_swiotlb_dma_supported(struct device *hwdev, u64 mask) 392 { 393 return xen_phys_to_dma(hwdev, default_swiotlb_limit()) <= mask; 394 } 395 396 const struct dma_map_ops xen_swiotlb_dma_ops = { 397 #ifdef CONFIG_X86 398 .alloc = xen_swiotlb_alloc_coherent, 399 .free = xen_swiotlb_free_coherent, 400 #else 401 .alloc = dma_direct_alloc, 402 .free = dma_direct_free, 403 #endif 404 .sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu, 405 .sync_single_for_device = xen_swiotlb_sync_single_for_device, 406 .sync_sg_for_cpu = xen_swiotlb_sync_sg_for_cpu, 407 .sync_sg_for_device = xen_swiotlb_sync_sg_for_device, 408 .map_sg = xen_swiotlb_map_sg, 409 .unmap_sg = xen_swiotlb_unmap_sg, 410 .map_page = xen_swiotlb_map_page, 411 .unmap_page = xen_swiotlb_unmap_page, 412 .dma_supported = xen_swiotlb_dma_supported, 413 .mmap = dma_common_mmap, 414 .get_sgtable = dma_common_get_sgtable, 415 .alloc_pages_op = dma_common_alloc_pages, 416 .free_pages = dma_common_free_pages, 417 .max_mapping_size = swiotlb_max_mapping_size, 418 }; 419