1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright 2010
4 * by Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
5 *
6 * This code provides a IOMMU for Xen PV guests with PCI passthrough.
7 *
8 * PV guests under Xen are running in an non-contiguous memory architecture.
9 *
10 * When PCI pass-through is utilized, this necessitates an IOMMU for
11 * translating bus (DMA) to virtual and vice-versa and also providing a
12 * mechanism to have contiguous pages for device drivers operations (say DMA
13 * operations).
14 *
15 * Specifically, under Xen the Linux idea of pages is an illusion. It
16 * assumes that pages start at zero and go up to the available memory. To
17 * help with that, the Linux Xen MMU provides a lookup mechanism to
18 * translate the page frame numbers (PFN) to machine frame numbers (MFN)
19 * and vice-versa. The MFN are the "real" frame numbers. Furthermore
20 * memory is not contiguous. Xen hypervisor stitches memory for guests
21 * from different pools, which means there is no guarantee that PFN==MFN
22 * and PFN+1==MFN+1. Lastly with Xen 4.0, pages (in debug mode) are
23 * allocated in descending order (high to low), meaning the guest might
24 * never get any MFN's under the 4GB mark.
25 */
26
27 #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
28
29 #include <linux/memblock.h>
30 #include <linux/dma-direct.h>
31 #include <linux/dma-map-ops.h>
32 #include <linux/export.h>
33 #include <xen/swiotlb-xen.h>
34 #include <xen/page.h>
35 #include <xen/xen-ops.h>
36 #include <xen/hvc-console.h>
37
38 #include <asm/dma-mapping.h>
39
40 #include <trace/events/swiotlb.h>
41 #define MAX_DMA_BITS 32
42
43 /*
44 * Quick lookup value of the bus address of the IOTLB.
45 */
46
xen_phys_to_bus(struct device * dev,phys_addr_t paddr)47 static inline phys_addr_t xen_phys_to_bus(struct device *dev, phys_addr_t paddr)
48 {
49 unsigned long bfn = pfn_to_bfn(XEN_PFN_DOWN(paddr));
50 phys_addr_t baddr = (phys_addr_t)bfn << XEN_PAGE_SHIFT;
51
52 baddr |= paddr & ~XEN_PAGE_MASK;
53 return baddr;
54 }
55
xen_phys_to_dma(struct device * dev,phys_addr_t paddr)56 static inline dma_addr_t xen_phys_to_dma(struct device *dev, phys_addr_t paddr)
57 {
58 return phys_to_dma(dev, xen_phys_to_bus(dev, paddr));
59 }
60
xen_bus_to_phys(struct device * dev,phys_addr_t baddr)61 static inline phys_addr_t xen_bus_to_phys(struct device *dev,
62 phys_addr_t baddr)
63 {
64 unsigned long xen_pfn = bfn_to_pfn(XEN_PFN_DOWN(baddr));
65 phys_addr_t paddr = (xen_pfn << XEN_PAGE_SHIFT) |
66 (baddr & ~XEN_PAGE_MASK);
67
68 return paddr;
69 }
70
xen_dma_to_phys(struct device * dev,dma_addr_t dma_addr)71 static inline phys_addr_t xen_dma_to_phys(struct device *dev,
72 dma_addr_t dma_addr)
73 {
74 return xen_bus_to_phys(dev, dma_to_phys(dev, dma_addr));
75 }
76
range_requires_alignment(phys_addr_t p,size_t size)77 static inline bool range_requires_alignment(phys_addr_t p, size_t size)
78 {
79 phys_addr_t algn = 1ULL << (get_order(size) + PAGE_SHIFT);
80 phys_addr_t bus_addr = pfn_to_bfn(XEN_PFN_DOWN(p)) << XEN_PAGE_SHIFT;
81
82 return IS_ALIGNED(p, algn) && !IS_ALIGNED(bus_addr, algn);
83 }
84
range_straddles_page_boundary(phys_addr_t p,size_t size)85 static inline int range_straddles_page_boundary(phys_addr_t p, size_t size)
86 {
87 unsigned long next_bfn, xen_pfn = XEN_PFN_DOWN(p);
88 unsigned int i, nr_pages = XEN_PFN_UP(xen_offset_in_page(p) + size);
89
90 next_bfn = pfn_to_bfn(xen_pfn);
91
92 for (i = 1; i < nr_pages; i++)
93 if (pfn_to_bfn(++xen_pfn) != ++next_bfn)
94 return 1;
95
96 return 0;
97 }
98
xen_swiotlb_find_pool(struct device * dev,dma_addr_t dma_addr)99 static struct io_tlb_pool *xen_swiotlb_find_pool(struct device *dev,
100 dma_addr_t dma_addr)
101 {
102 unsigned long bfn = XEN_PFN_DOWN(dma_to_phys(dev, dma_addr));
103 unsigned long xen_pfn = bfn_to_local_pfn(bfn);
104 phys_addr_t paddr = (phys_addr_t)xen_pfn << XEN_PAGE_SHIFT;
105
106 /* If the address is outside our domain, it CAN
107 * have the same virtual address as another address
108 * in our domain. Therefore _only_ check address within our domain.
109 */
110 if (pfn_valid(PFN_DOWN(paddr)))
111 return swiotlb_find_pool(dev, paddr);
112 return NULL;
113 }
114
115 #ifdef CONFIG_X86
xen_swiotlb_fixup(void * buf,unsigned long nslabs)116 int __init xen_swiotlb_fixup(void *buf, unsigned long nslabs)
117 {
118 int rc;
119 unsigned int order = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT);
120 unsigned int i, dma_bits = order + PAGE_SHIFT;
121 dma_addr_t dma_handle;
122 phys_addr_t p = virt_to_phys(buf);
123
124 BUILD_BUG_ON(IO_TLB_SEGSIZE & (IO_TLB_SEGSIZE - 1));
125 BUG_ON(nslabs % IO_TLB_SEGSIZE);
126
127 i = 0;
128 do {
129 do {
130 rc = xen_create_contiguous_region(
131 p + (i << IO_TLB_SHIFT), order,
132 dma_bits, &dma_handle);
133 } while (rc && dma_bits++ < MAX_DMA_BITS);
134 if (rc)
135 return rc;
136
137 i += IO_TLB_SEGSIZE;
138 } while (i < nslabs);
139 return 0;
140 }
141
142 static void *
xen_swiotlb_alloc_coherent(struct device * dev,size_t size,dma_addr_t * dma_handle,gfp_t flags,unsigned long attrs)143 xen_swiotlb_alloc_coherent(struct device *dev, size_t size,
144 dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs)
145 {
146 u64 dma_mask = dev->coherent_dma_mask;
147 int order = get_order(size);
148 phys_addr_t phys;
149 void *ret;
150
151 /* Align the allocation to the Xen page size */
152 size = ALIGN(size, XEN_PAGE_SIZE);
153
154 ret = (void *)__get_free_pages(flags, get_order(size));
155 if (!ret)
156 return ret;
157 phys = virt_to_phys(ret);
158
159 *dma_handle = xen_phys_to_dma(dev, phys);
160 if (*dma_handle + size - 1 > dma_mask ||
161 range_straddles_page_boundary(phys, size) ||
162 range_requires_alignment(phys, size)) {
163 if (xen_create_contiguous_region(phys, order, fls64(dma_mask),
164 dma_handle) != 0)
165 goto out_free_pages;
166 SetPageXenRemapped(virt_to_page(ret));
167 }
168
169 memset(ret, 0, size);
170 return ret;
171
172 out_free_pages:
173 free_pages((unsigned long)ret, get_order(size));
174 return NULL;
175 }
176
177 static void
xen_swiotlb_free_coherent(struct device * dev,size_t size,void * vaddr,dma_addr_t dma_handle,unsigned long attrs)178 xen_swiotlb_free_coherent(struct device *dev, size_t size, void *vaddr,
179 dma_addr_t dma_handle, unsigned long attrs)
180 {
181 phys_addr_t phys = virt_to_phys(vaddr);
182 int order = get_order(size);
183
184 /* Convert the size to actually allocated. */
185 size = ALIGN(size, XEN_PAGE_SIZE);
186
187 if (WARN_ON_ONCE(dma_handle + size - 1 > dev->coherent_dma_mask) ||
188 WARN_ON_ONCE(range_straddles_page_boundary(phys, size) ||
189 range_requires_alignment(phys, size)))
190 return;
191
192 if (TestClearPageXenRemapped(virt_to_page(vaddr)))
193 xen_destroy_contiguous_region(phys, order);
194 free_pages((unsigned long)vaddr, get_order(size));
195 }
196 #endif /* CONFIG_X86 */
197
198 /*
199 * Map a single buffer of the indicated size for DMA in streaming mode. The
200 * physical address to use is returned.
201 *
202 * Once the device is given the dma address, the device owns this memory until
203 * either xen_swiotlb_unmap_page or xen_swiotlb_dma_sync_single is performed.
204 */
xen_swiotlb_map_page(struct device * dev,struct page * page,unsigned long offset,size_t size,enum dma_data_direction dir,unsigned long attrs)205 static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
206 unsigned long offset, size_t size,
207 enum dma_data_direction dir,
208 unsigned long attrs)
209 {
210 phys_addr_t map, phys = page_to_phys(page) + offset;
211 dma_addr_t dev_addr = xen_phys_to_dma(dev, phys);
212
213 BUG_ON(dir == DMA_NONE);
214 /*
215 * If the address happens to be in the device's DMA window,
216 * we can safely return the device addr and not worry about bounce
217 * buffering it.
218 */
219 if (dma_capable(dev, dev_addr, size, true) &&
220 !range_straddles_page_boundary(phys, size) &&
221 !xen_arch_need_swiotlb(dev, phys, dev_addr) &&
222 !is_swiotlb_force_bounce(dev))
223 goto done;
224
225 /*
226 * Oh well, have to allocate and map a bounce buffer.
227 */
228 trace_swiotlb_bounced(dev, dev_addr, size);
229
230 map = swiotlb_tbl_map_single(dev, phys, size, 0, dir, attrs);
231 if (map == (phys_addr_t)DMA_MAPPING_ERROR)
232 return DMA_MAPPING_ERROR;
233
234 phys = map;
235 dev_addr = xen_phys_to_dma(dev, map);
236
237 /*
238 * Ensure that the address returned is DMA'ble
239 */
240 if (unlikely(!dma_capable(dev, dev_addr, size, true))) {
241 __swiotlb_tbl_unmap_single(dev, map, size, dir,
242 attrs | DMA_ATTR_SKIP_CPU_SYNC,
243 swiotlb_find_pool(dev, map));
244 return DMA_MAPPING_ERROR;
245 }
246
247 done:
248 if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
249 if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dev_addr))))
250 arch_sync_dma_for_device(phys, size, dir);
251 else
252 xen_dma_sync_for_device(dev, dev_addr, size, dir);
253 }
254 return dev_addr;
255 }
256
257 /*
258 * Unmap a single streaming mode DMA translation. The dma_addr and size must
259 * match what was provided for in a previous xen_swiotlb_map_page call. All
260 * other usages are undefined.
261 *
262 * After this call, reads by the cpu to the buffer are guaranteed to see
263 * whatever the device wrote there.
264 */
xen_swiotlb_unmap_page(struct device * hwdev,dma_addr_t dev_addr,size_t size,enum dma_data_direction dir,unsigned long attrs)265 static void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
266 size_t size, enum dma_data_direction dir, unsigned long attrs)
267 {
268 phys_addr_t paddr = xen_dma_to_phys(hwdev, dev_addr);
269 struct io_tlb_pool *pool;
270
271 BUG_ON(dir == DMA_NONE);
272
273 if (!dev_is_dma_coherent(hwdev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
274 if (pfn_valid(PFN_DOWN(dma_to_phys(hwdev, dev_addr))))
275 arch_sync_dma_for_cpu(paddr, size, dir);
276 else
277 xen_dma_sync_for_cpu(hwdev, dev_addr, size, dir);
278 }
279
280 /* NOTE: We use dev_addr here, not paddr! */
281 pool = xen_swiotlb_find_pool(hwdev, dev_addr);
282 if (pool)
283 __swiotlb_tbl_unmap_single(hwdev, paddr, size, dir,
284 attrs, pool);
285 }
286
287 static void
xen_swiotlb_sync_single_for_cpu(struct device * dev,dma_addr_t dma_addr,size_t size,enum dma_data_direction dir)288 xen_swiotlb_sync_single_for_cpu(struct device *dev, dma_addr_t dma_addr,
289 size_t size, enum dma_data_direction dir)
290 {
291 phys_addr_t paddr = xen_dma_to_phys(dev, dma_addr);
292 struct io_tlb_pool *pool;
293
294 if (!dev_is_dma_coherent(dev)) {
295 if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dma_addr))))
296 arch_sync_dma_for_cpu(paddr, size, dir);
297 else
298 xen_dma_sync_for_cpu(dev, dma_addr, size, dir);
299 }
300
301 pool = xen_swiotlb_find_pool(dev, dma_addr);
302 if (pool)
303 __swiotlb_sync_single_for_cpu(dev, paddr, size, dir, pool);
304 }
305
306 static void
xen_swiotlb_sync_single_for_device(struct device * dev,dma_addr_t dma_addr,size_t size,enum dma_data_direction dir)307 xen_swiotlb_sync_single_for_device(struct device *dev, dma_addr_t dma_addr,
308 size_t size, enum dma_data_direction dir)
309 {
310 phys_addr_t paddr = xen_dma_to_phys(dev, dma_addr);
311 struct io_tlb_pool *pool;
312
313 pool = xen_swiotlb_find_pool(dev, dma_addr);
314 if (pool)
315 __swiotlb_sync_single_for_device(dev, paddr, size, dir, pool);
316
317 if (!dev_is_dma_coherent(dev)) {
318 if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dma_addr))))
319 arch_sync_dma_for_device(paddr, size, dir);
320 else
321 xen_dma_sync_for_device(dev, dma_addr, size, dir);
322 }
323 }
324
325 /*
326 * Unmap a set of streaming mode DMA translations. Again, cpu read rules
327 * concerning calls here are the same as for swiotlb_unmap_page() above.
328 */
329 static void
xen_swiotlb_unmap_sg(struct device * hwdev,struct scatterlist * sgl,int nelems,enum dma_data_direction dir,unsigned long attrs)330 xen_swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
331 enum dma_data_direction dir, unsigned long attrs)
332 {
333 struct scatterlist *sg;
334 int i;
335
336 BUG_ON(dir == DMA_NONE);
337
338 for_each_sg(sgl, sg, nelems, i)
339 xen_swiotlb_unmap_page(hwdev, sg->dma_address, sg_dma_len(sg),
340 dir, attrs);
341
342 }
343
344 static int
xen_swiotlb_map_sg(struct device * dev,struct scatterlist * sgl,int nelems,enum dma_data_direction dir,unsigned long attrs)345 xen_swiotlb_map_sg(struct device *dev, struct scatterlist *sgl, int nelems,
346 enum dma_data_direction dir, unsigned long attrs)
347 {
348 struct scatterlist *sg;
349 int i;
350
351 BUG_ON(dir == DMA_NONE);
352
353 for_each_sg(sgl, sg, nelems, i) {
354 sg->dma_address = xen_swiotlb_map_page(dev, sg_page(sg),
355 sg->offset, sg->length, dir, attrs);
356 if (sg->dma_address == DMA_MAPPING_ERROR)
357 goto out_unmap;
358 sg_dma_len(sg) = sg->length;
359 }
360
361 return nelems;
362 out_unmap:
363 xen_swiotlb_unmap_sg(dev, sgl, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
364 sg_dma_len(sgl) = 0;
365 return -EIO;
366 }
367
368 static void
xen_swiotlb_sync_sg_for_cpu(struct device * dev,struct scatterlist * sgl,int nelems,enum dma_data_direction dir)369 xen_swiotlb_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl,
370 int nelems, enum dma_data_direction dir)
371 {
372 struct scatterlist *sg;
373 int i;
374
375 for_each_sg(sgl, sg, nelems, i) {
376 xen_swiotlb_sync_single_for_cpu(dev, sg->dma_address,
377 sg->length, dir);
378 }
379 }
380
381 static void
xen_swiotlb_sync_sg_for_device(struct device * dev,struct scatterlist * sgl,int nelems,enum dma_data_direction dir)382 xen_swiotlb_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
383 int nelems, enum dma_data_direction dir)
384 {
385 struct scatterlist *sg;
386 int i;
387
388 for_each_sg(sgl, sg, nelems, i) {
389 xen_swiotlb_sync_single_for_device(dev, sg->dma_address,
390 sg->length, dir);
391 }
392 }
393
394 /*
395 * Return whether the given device DMA address mask can be supported
396 * properly. For example, if your device can only drive the low 24-bits
397 * during bus mastering, then you would pass 0x00ffffff as the mask to
398 * this function.
399 */
400 static int
xen_swiotlb_dma_supported(struct device * hwdev,u64 mask)401 xen_swiotlb_dma_supported(struct device *hwdev, u64 mask)
402 {
403 return xen_phys_to_dma(hwdev, default_swiotlb_limit()) <= mask;
404 }
405
406 const struct dma_map_ops xen_swiotlb_dma_ops = {
407 #ifdef CONFIG_X86
408 .alloc = xen_swiotlb_alloc_coherent,
409 .free = xen_swiotlb_free_coherent,
410 #else
411 .alloc = dma_direct_alloc,
412 .free = dma_direct_free,
413 #endif
414 .sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu,
415 .sync_single_for_device = xen_swiotlb_sync_single_for_device,
416 .sync_sg_for_cpu = xen_swiotlb_sync_sg_for_cpu,
417 .sync_sg_for_device = xen_swiotlb_sync_sg_for_device,
418 .map_sg = xen_swiotlb_map_sg,
419 .unmap_sg = xen_swiotlb_unmap_sg,
420 .map_page = xen_swiotlb_map_page,
421 .unmap_page = xen_swiotlb_unmap_page,
422 .dma_supported = xen_swiotlb_dma_supported,
423 .mmap = dma_common_mmap,
424 .get_sgtable = dma_common_get_sgtable,
425 .alloc_pages_op = dma_common_alloc_pages,
426 .free_pages = dma_common_free_pages,
427 .max_mapping_size = swiotlb_max_mapping_size,
428 };
429