xref: /linux/kernel/dma/direct.c (revision c060f8168bdf22aa986970955af99702d142dfbe)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2018-2020 Christoph Hellwig.
4  *
5  * DMA operations that map physical memory directly without using an IOMMU.
6  */
7 #include <linux/memblock.h> /* for max_pfn */
8 #include <linux/export.h>
9 #include <linux/mm.h>
10 #include <linux/dma-map-ops.h>
11 #include <linux/scatterlist.h>
12 #include <linux/pfn.h>
13 #include <linux/vmalloc.h>
14 #include <linux/set_memory.h>
15 #include <linux/slab.h>
16 #include "direct.h"
17 
18 /*
19  * Most architectures use ZONE_DMA for the first 16 Megabytes, but some use
20  * it for entirely different regions. In that case the arch code needs to
21  * override the variable below for dma-direct to work properly.
22  */
23 u64 zone_dma_limit __ro_after_init = DMA_BIT_MASK(24);
24 
25 static inline dma_addr_t phys_to_dma_direct(struct device *dev,
26 		phys_addr_t phys)
27 {
28 	if (force_dma_unencrypted(dev))
29 		return phys_to_dma_unencrypted(dev, phys);
30 	return phys_to_dma(dev, phys);
31 }
32 
33 static inline struct page *dma_direct_to_page(struct device *dev,
34 		dma_addr_t dma_addr)
35 {
36 	return pfn_to_page(PHYS_PFN(dma_to_phys(dev, dma_addr)));
37 }
38 
39 u64 dma_direct_get_required_mask(struct device *dev)
40 {
41 	phys_addr_t phys = (phys_addr_t)(max_pfn - 1) << PAGE_SHIFT;
42 	u64 max_dma = phys_to_dma_direct(dev, phys);
43 
44 	return (1ULL << (fls64(max_dma) - 1)) * 2 - 1;
45 }
46 
47 static gfp_t dma_direct_optimal_gfp_mask(struct device *dev, u64 *phys_limit)
48 {
49 	u64 dma_limit = min_not_zero(
50 		dev->coherent_dma_mask,
51 		dev->bus_dma_limit);
52 
53 	/*
54 	 * Optimistically try the zone that the physical address mask falls
55 	 * into first.  If that returns memory that isn't actually addressable
56 	 * we will fallback to the next lower zone and try again.
57 	 *
58 	 * Note that GFP_DMA32 and GFP_DMA are no ops without the corresponding
59 	 * zones.
60 	 */
61 	*phys_limit = dma_to_phys(dev, dma_limit);
62 	if (*phys_limit <= zone_dma_limit)
63 		return GFP_DMA;
64 	if (*phys_limit <= DMA_BIT_MASK(32))
65 		return GFP_DMA32;
66 	return 0;
67 }
68 
69 bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
70 {
71 	dma_addr_t dma_addr = phys_to_dma_direct(dev, phys);
72 
73 	if (dma_addr == DMA_MAPPING_ERROR)
74 		return false;
75 	return dma_addr + size - 1 <=
76 		min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit);
77 }
78 
79 static int dma_set_decrypted(struct device *dev, void *vaddr, size_t size)
80 {
81 	if (!force_dma_unencrypted(dev))
82 		return 0;
83 	return set_memory_decrypted((unsigned long)vaddr, PFN_UP(size));
84 }
85 
86 static int dma_set_encrypted(struct device *dev, void *vaddr, size_t size)
87 {
88 	int ret;
89 
90 	if (!force_dma_unencrypted(dev))
91 		return 0;
92 	ret = set_memory_encrypted((unsigned long)vaddr, PFN_UP(size));
93 	if (ret)
94 		pr_warn_ratelimited("leaking DMA memory that can't be re-encrypted\n");
95 	return ret;
96 }
97 
98 static void __dma_direct_free_pages(struct device *dev, struct page *page,
99 				    size_t size)
100 {
101 	if (swiotlb_free(dev, page, size))
102 		return;
103 	dma_free_contiguous(dev, page, size);
104 }
105 
106 static struct page *dma_direct_alloc_swiotlb(struct device *dev, size_t size)
107 {
108 	struct page *page = swiotlb_alloc(dev, size);
109 
110 	if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
111 		swiotlb_free(dev, page, size);
112 		return NULL;
113 	}
114 
115 	return page;
116 }
117 
118 static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
119 		gfp_t gfp, bool allow_highmem)
120 {
121 	int node = dev_to_node(dev);
122 	struct page *page = NULL;
123 	u64 phys_limit;
124 
125 	WARN_ON_ONCE(!PAGE_ALIGNED(size));
126 
127 	if (is_swiotlb_for_alloc(dev))
128 		return dma_direct_alloc_swiotlb(dev, size);
129 
130 	gfp |= dma_direct_optimal_gfp_mask(dev, &phys_limit);
131 	page = dma_alloc_contiguous(dev, size, gfp);
132 	if (page) {
133 		if (!dma_coherent_ok(dev, page_to_phys(page), size) ||
134 		    (!allow_highmem && PageHighMem(page))) {
135 			dma_free_contiguous(dev, page, size);
136 			page = NULL;
137 		}
138 	}
139 again:
140 	if (!page)
141 		page = alloc_pages_node(node, gfp, get_order(size));
142 	if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
143 		__free_pages(page, get_order(size));
144 		page = NULL;
145 
146 		if (IS_ENABLED(CONFIG_ZONE_DMA32) &&
147 		    phys_limit < DMA_BIT_MASK(64) &&
148 		    !(gfp & (GFP_DMA32 | GFP_DMA))) {
149 			gfp |= GFP_DMA32;
150 			goto again;
151 		}
152 
153 		if (IS_ENABLED(CONFIG_ZONE_DMA) && !(gfp & GFP_DMA)) {
154 			gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
155 			goto again;
156 		}
157 	}
158 
159 	return page;
160 }
161 
162 /*
163  * Check if a potentially blocking operations needs to dip into the atomic
164  * pools for the given device/gfp.
165  */
166 static bool dma_direct_use_pool(struct device *dev, gfp_t gfp)
167 {
168 	return !gfpflags_allow_blocking(gfp) && !is_swiotlb_for_alloc(dev);
169 }
170 
171 static void *dma_direct_alloc_from_pool(struct device *dev, size_t size,
172 		dma_addr_t *dma_handle, gfp_t gfp)
173 {
174 	struct page *page;
175 	u64 phys_limit;
176 	void *ret;
177 
178 	if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_DMA_COHERENT_POOL)))
179 		return NULL;
180 
181 	gfp |= dma_direct_optimal_gfp_mask(dev, &phys_limit);
182 	page = dma_alloc_from_pool(dev, size, &ret, gfp, dma_coherent_ok);
183 	if (!page)
184 		return NULL;
185 	*dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
186 	return ret;
187 }
188 
189 static void *dma_direct_alloc_no_mapping(struct device *dev, size_t size,
190 		dma_addr_t *dma_handle, gfp_t gfp)
191 {
192 	struct page *page;
193 
194 	page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO, true);
195 	if (!page)
196 		return NULL;
197 
198 	/* remove any dirty cache lines on the kernel alias */
199 	if (!PageHighMem(page))
200 		arch_dma_prep_coherent(page, size);
201 
202 	/* return the page pointer as the opaque cookie */
203 	*dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
204 	return page;
205 }
206 
207 void *dma_direct_alloc(struct device *dev, size_t size,
208 		dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
209 {
210 	bool remap = false, set_uncached = false;
211 	struct page *page;
212 	void *ret;
213 
214 	size = PAGE_ALIGN(size);
215 	if (attrs & DMA_ATTR_NO_WARN)
216 		gfp |= __GFP_NOWARN;
217 
218 	if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
219 	    !force_dma_unencrypted(dev) && !is_swiotlb_for_alloc(dev))
220 		return dma_direct_alloc_no_mapping(dev, size, dma_handle, gfp);
221 
222 	if (!dev_is_dma_coherent(dev)) {
223 		if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_ALLOC) &&
224 		    !is_swiotlb_for_alloc(dev))
225 			return arch_dma_alloc(dev, size, dma_handle, gfp,
226 					      attrs);
227 
228 		/*
229 		 * If there is a global pool, always allocate from it for
230 		 * non-coherent devices.
231 		 */
232 		if (IS_ENABLED(CONFIG_DMA_GLOBAL_POOL))
233 			return dma_alloc_from_global_coherent(dev, size,
234 					dma_handle);
235 
236 		/*
237 		 * Otherwise we require the architecture to either be able to
238 		 * mark arbitrary parts of the kernel direct mapping uncached,
239 		 * or remapped it uncached.
240 		 */
241 		set_uncached = IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED);
242 		remap = IS_ENABLED(CONFIG_DMA_DIRECT_REMAP);
243 		if (!set_uncached && !remap) {
244 			pr_warn_once("coherent DMA allocations not supported on this platform.\n");
245 			return NULL;
246 		}
247 	}
248 
249 	/*
250 	 * Remapping or decrypting memory may block, allocate the memory from
251 	 * the atomic pools instead if we aren't allowed block.
252 	 */
253 	if ((remap || force_dma_unencrypted(dev)) &&
254 	    dma_direct_use_pool(dev, gfp))
255 		return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp);
256 
257 	/* we always manually zero the memory once we are done */
258 	page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO, true);
259 	if (!page)
260 		return NULL;
261 
262 	/*
263 	 * dma_alloc_contiguous can return highmem pages depending on a
264 	 * combination the cma= arguments and per-arch setup.  These need to be
265 	 * remapped to return a kernel virtual address.
266 	 */
267 	if (PageHighMem(page)) {
268 		remap = true;
269 		set_uncached = false;
270 	}
271 
272 	if (remap) {
273 		pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs);
274 
275 		if (force_dma_unencrypted(dev))
276 			prot = pgprot_decrypted(prot);
277 
278 		/* remove any dirty cache lines on the kernel alias */
279 		arch_dma_prep_coherent(page, size);
280 
281 		/* create a coherent mapping */
282 		ret = dma_common_contiguous_remap(page, size, prot,
283 				__builtin_return_address(0));
284 		if (!ret)
285 			goto out_free_pages;
286 	} else {
287 		ret = page_address(page);
288 		if (dma_set_decrypted(dev, ret, size))
289 			goto out_leak_pages;
290 	}
291 
292 	memset(ret, 0, size);
293 
294 	if (set_uncached) {
295 		arch_dma_prep_coherent(page, size);
296 		ret = arch_dma_set_uncached(ret, size);
297 		if (IS_ERR(ret))
298 			goto out_encrypt_pages;
299 	}
300 
301 	*dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
302 	return ret;
303 
304 out_encrypt_pages:
305 	if (dma_set_encrypted(dev, page_address(page), size))
306 		return NULL;
307 out_free_pages:
308 	__dma_direct_free_pages(dev, page, size);
309 	return NULL;
310 out_leak_pages:
311 	return NULL;
312 }
313 
314 void dma_direct_free(struct device *dev, size_t size,
315 		void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs)
316 {
317 	unsigned int page_order = get_order(size);
318 
319 	if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
320 	    !force_dma_unencrypted(dev) && !is_swiotlb_for_alloc(dev)) {
321 		/* cpu_addr is a struct page cookie, not a kernel address */
322 		dma_free_contiguous(dev, cpu_addr, size);
323 		return;
324 	}
325 
326 	if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_ALLOC) &&
327 	    !dev_is_dma_coherent(dev) &&
328 	    !is_swiotlb_for_alloc(dev)) {
329 		arch_dma_free(dev, size, cpu_addr, dma_addr, attrs);
330 		return;
331 	}
332 
333 	if (IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) &&
334 	    !dev_is_dma_coherent(dev)) {
335 		if (!dma_release_from_global_coherent(page_order, cpu_addr))
336 			WARN_ON_ONCE(1);
337 		return;
338 	}
339 
340 	/* If cpu_addr is not from an atomic pool, dma_free_from_pool() fails */
341 	if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) &&
342 	    dma_free_from_pool(dev, cpu_addr, PAGE_ALIGN(size)))
343 		return;
344 
345 	if (is_vmalloc_addr(cpu_addr)) {
346 		vunmap(cpu_addr);
347 	} else {
348 		if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_CLEAR_UNCACHED))
349 			arch_dma_clear_uncached(cpu_addr, size);
350 		if (dma_set_encrypted(dev, cpu_addr, size))
351 			return;
352 	}
353 
354 	__dma_direct_free_pages(dev, dma_direct_to_page(dev, dma_addr), size);
355 }
356 
357 struct page *dma_direct_alloc_pages(struct device *dev, size_t size,
358 		dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)
359 {
360 	struct page *page;
361 	void *ret;
362 
363 	if (force_dma_unencrypted(dev) && dma_direct_use_pool(dev, gfp))
364 		return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp);
365 
366 	page = __dma_direct_alloc_pages(dev, size, gfp, false);
367 	if (!page)
368 		return NULL;
369 
370 	ret = page_address(page);
371 	if (dma_set_decrypted(dev, ret, size))
372 		goto out_leak_pages;
373 	memset(ret, 0, size);
374 	*dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
375 	return page;
376 out_leak_pages:
377 	return NULL;
378 }
379 
380 void dma_direct_free_pages(struct device *dev, size_t size,
381 		struct page *page, dma_addr_t dma_addr,
382 		enum dma_data_direction dir)
383 {
384 	void *vaddr = page_address(page);
385 
386 	/* If cpu_addr is not from an atomic pool, dma_free_from_pool() fails */
387 	if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) &&
388 	    dma_free_from_pool(dev, vaddr, size))
389 		return;
390 
391 	if (dma_set_encrypted(dev, vaddr, size))
392 		return;
393 	__dma_direct_free_pages(dev, page, size);
394 }
395 
396 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
397     defined(CONFIG_SWIOTLB)
398 void dma_direct_sync_sg_for_device(struct device *dev,
399 		struct scatterlist *sgl, int nents, enum dma_data_direction dir)
400 {
401 	struct scatterlist *sg;
402 	int i;
403 
404 	for_each_sg(sgl, sg, nents, i) {
405 		phys_addr_t paddr = dma_to_phys(dev, sg_dma_address(sg));
406 
407 		swiotlb_sync_single_for_device(dev, paddr, sg->length, dir);
408 
409 		if (!dev_is_dma_coherent(dev))
410 			arch_sync_dma_for_device(paddr, sg->length,
411 					dir);
412 	}
413 }
414 #endif
415 
416 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
417     defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \
418     defined(CONFIG_SWIOTLB)
419 void dma_direct_sync_sg_for_cpu(struct device *dev,
420 		struct scatterlist *sgl, int nents, enum dma_data_direction dir)
421 {
422 	struct scatterlist *sg;
423 	int i;
424 
425 	for_each_sg(sgl, sg, nents, i) {
426 		phys_addr_t paddr = dma_to_phys(dev, sg_dma_address(sg));
427 
428 		if (!dev_is_dma_coherent(dev))
429 			arch_sync_dma_for_cpu(paddr, sg->length, dir);
430 
431 		swiotlb_sync_single_for_cpu(dev, paddr, sg->length, dir);
432 
433 		if (dir == DMA_FROM_DEVICE)
434 			arch_dma_mark_clean(paddr, sg->length);
435 	}
436 
437 	if (!dev_is_dma_coherent(dev))
438 		arch_sync_dma_for_cpu_all();
439 }
440 
441 /*
442  * Unmaps segments, except for ones marked as pci_p2pdma which do not
443  * require any further action as they contain a bus address.
444  */
445 void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
446 		int nents, enum dma_data_direction dir, unsigned long attrs)
447 {
448 	struct scatterlist *sg;
449 	int i;
450 
451 	for_each_sg(sgl,  sg, nents, i) {
452 		if (sg_dma_is_bus_address(sg))
453 			sg_dma_unmark_bus_address(sg);
454 		else
455 			dma_direct_unmap_page(dev, sg->dma_address,
456 					      sg_dma_len(sg), dir, attrs);
457 	}
458 }
459 #endif
460 
461 int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
462 		enum dma_data_direction dir, unsigned long attrs)
463 {
464 	struct pci_p2pdma_map_state p2pdma_state = {};
465 	enum pci_p2pdma_map_type map;
466 	struct scatterlist *sg;
467 	int i, ret;
468 
469 	for_each_sg(sgl, sg, nents, i) {
470 		if (is_pci_p2pdma_page(sg_page(sg))) {
471 			map = pci_p2pdma_map_segment(&p2pdma_state, dev, sg);
472 			switch (map) {
473 			case PCI_P2PDMA_MAP_BUS_ADDR:
474 				continue;
475 			case PCI_P2PDMA_MAP_THRU_HOST_BRIDGE:
476 				/*
477 				 * Any P2P mapping that traverses the PCI
478 				 * host bridge must be mapped with CPU physical
479 				 * address and not PCI bus addresses. This is
480 				 * done with dma_direct_map_page() below.
481 				 */
482 				break;
483 			default:
484 				ret = -EREMOTEIO;
485 				goto out_unmap;
486 			}
487 		}
488 
489 		sg->dma_address = dma_direct_map_page(dev, sg_page(sg),
490 				sg->offset, sg->length, dir, attrs);
491 		if (sg->dma_address == DMA_MAPPING_ERROR) {
492 			ret = -EIO;
493 			goto out_unmap;
494 		}
495 		sg_dma_len(sg) = sg->length;
496 	}
497 
498 	return nents;
499 
500 out_unmap:
501 	dma_direct_unmap_sg(dev, sgl, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
502 	return ret;
503 }
504 
505 dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr,
506 		size_t size, enum dma_data_direction dir, unsigned long attrs)
507 {
508 	dma_addr_t dma_addr = paddr;
509 
510 	if (unlikely(!dma_capable(dev, dma_addr, size, false))) {
511 		dev_err_once(dev,
512 			     "DMA addr %pad+%zu overflow (mask %llx, bus limit %llx).\n",
513 			     &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit);
514 		WARN_ON_ONCE(1);
515 		return DMA_MAPPING_ERROR;
516 	}
517 
518 	return dma_addr;
519 }
520 
521 int dma_direct_get_sgtable(struct device *dev, struct sg_table *sgt,
522 		void *cpu_addr, dma_addr_t dma_addr, size_t size,
523 		unsigned long attrs)
524 {
525 	struct page *page = dma_direct_to_page(dev, dma_addr);
526 	int ret;
527 
528 	ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
529 	if (!ret)
530 		sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
531 	return ret;
532 }
533 
534 bool dma_direct_can_mmap(struct device *dev)
535 {
536 	return dev_is_dma_coherent(dev) ||
537 		IS_ENABLED(CONFIG_DMA_NONCOHERENT_MMAP);
538 }
539 
540 int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
541 		void *cpu_addr, dma_addr_t dma_addr, size_t size,
542 		unsigned long attrs)
543 {
544 	unsigned long user_count = vma_pages(vma);
545 	unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
546 	unsigned long pfn = PHYS_PFN(dma_to_phys(dev, dma_addr));
547 	int ret = -ENXIO;
548 
549 	vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
550 	if (force_dma_unencrypted(dev))
551 		vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
552 
553 	if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
554 		return ret;
555 	if (dma_mmap_from_global_coherent(vma, cpu_addr, size, &ret))
556 		return ret;
557 
558 	if (vma->vm_pgoff >= count || user_count > count - vma->vm_pgoff)
559 		return -ENXIO;
560 	return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
561 			user_count << PAGE_SHIFT, vma->vm_page_prot);
562 }
563 
564 int dma_direct_supported(struct device *dev, u64 mask)
565 {
566 	u64 min_mask = (max_pfn - 1) << PAGE_SHIFT;
567 
568 	/*
569 	 * Because 32-bit DMA masks are so common we expect every architecture
570 	 * to be able to satisfy them - either by not supporting more physical
571 	 * memory, or by providing a ZONE_DMA32.  If neither is the case, the
572 	 * architecture needs to use an IOMMU instead of the direct mapping.
573 	 */
574 	if (mask >= DMA_BIT_MASK(32))
575 		return 1;
576 
577 	/*
578 	 * This check needs to be against the actual bit mask value, so use
579 	 * phys_to_dma_unencrypted() here so that the SME encryption mask isn't
580 	 * part of the check.
581 	 */
582 	if (IS_ENABLED(CONFIG_ZONE_DMA))
583 		min_mask = min_t(u64, min_mask, zone_dma_limit);
584 	return mask >= phys_to_dma_unencrypted(dev, min_mask);
585 }
586 
587 /*
588  * To check whether all ram resource ranges are covered by dma range map
589  * Returns 0 when further check is needed
590  * Returns 1 if there is some RAM range can't be covered by dma_range_map
591  */
592 static int check_ram_in_range_map(unsigned long start_pfn,
593 				  unsigned long nr_pages, void *data)
594 {
595 	unsigned long end_pfn = start_pfn + nr_pages;
596 	const struct bus_dma_region *bdr = NULL;
597 	const struct bus_dma_region *m;
598 	struct device *dev = data;
599 
600 	while (start_pfn < end_pfn) {
601 		for (m = dev->dma_range_map; PFN_DOWN(m->size); m++) {
602 			unsigned long cpu_start_pfn = PFN_DOWN(m->cpu_start);
603 
604 			if (start_pfn >= cpu_start_pfn &&
605 			    start_pfn - cpu_start_pfn < PFN_DOWN(m->size)) {
606 				bdr = m;
607 				break;
608 			}
609 		}
610 		if (!bdr)
611 			return 1;
612 
613 		start_pfn = PFN_DOWN(bdr->cpu_start) + PFN_DOWN(bdr->size);
614 	}
615 
616 	return 0;
617 }
618 
619 bool dma_direct_all_ram_mapped(struct device *dev)
620 {
621 	if (!dev->dma_range_map)
622 		return true;
623 	return !walk_system_ram_range(0, PFN_DOWN(ULONG_MAX) + 1, dev,
624 				      check_ram_in_range_map);
625 }
626 
627 size_t dma_direct_max_mapping_size(struct device *dev)
628 {
629 	/* If SWIOTLB is active, use its maximum mapping size */
630 	if (is_swiotlb_active(dev) &&
631 	    (dma_addressing_limited(dev) || is_swiotlb_force_bounce(dev)))
632 		return swiotlb_max_mapping_size(dev);
633 	return SIZE_MAX;
634 }
635 
636 bool dma_direct_need_sync(struct device *dev, dma_addr_t dma_addr)
637 {
638 	return !dev_is_dma_coherent(dev) ||
639 	       swiotlb_find_pool(dev, dma_to_phys(dev, dma_addr));
640 }
641 
642 /**
643  * dma_direct_set_offset - Assign scalar offset for a single DMA range.
644  * @dev:	device pointer; needed to "own" the alloced memory.
645  * @cpu_start:  beginning of memory region covered by this offset.
646  * @dma_start:  beginning of DMA/PCI region covered by this offset.
647  * @size:	size of the region.
648  *
649  * This is for the simple case of a uniform offset which cannot
650  * be discovered by "dma-ranges".
651  *
652  * It returns -ENOMEM if out of memory, -EINVAL if a map
653  * already exists, 0 otherwise.
654  *
655  * Note: any call to this from a driver is a bug.  The mapping needs
656  * to be described by the device tree or other firmware interfaces.
657  */
658 int dma_direct_set_offset(struct device *dev, phys_addr_t cpu_start,
659 			 dma_addr_t dma_start, u64 size)
660 {
661 	struct bus_dma_region *map;
662 	u64 offset = (u64)cpu_start - (u64)dma_start;
663 
664 	if (dev->dma_range_map) {
665 		dev_err(dev, "attempt to add DMA range to existing map\n");
666 		return -EINVAL;
667 	}
668 
669 	if (!offset)
670 		return 0;
671 
672 	map = kcalloc(2, sizeof(*map), GFP_KERNEL);
673 	if (!map)
674 		return -ENOMEM;
675 	map[0].cpu_start = cpu_start;
676 	map[0].dma_start = dma_start;
677 	map[0].size = size;
678 	dev->dma_range_map = map;
679 	return 0;
680 }
681