xref: /linux/kernel/dma/mapping.c (revision 172cdcaefea5c297fdb3d20b7d5aff60ae4fbce6)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * arch-independent dma-mapping routines
4  *
5  * Copyright (c) 2006  SUSE Linux Products GmbH
6  * Copyright (c) 2006  Tejun Heo <teheo@suse.de>
7  */
8 #include <linux/memblock.h> /* for max_pfn */
9 #include <linux/acpi.h>
10 #include <linux/dma-map-ops.h>
11 #include <linux/export.h>
12 #include <linux/gfp.h>
13 #include <linux/of_device.h>
14 #include <linux/slab.h>
15 #include <linux/vmalloc.h>
16 #include "debug.h"
17 #include "direct.h"
18 
19 bool dma_default_coherent;
20 
21 /*
22  * Managed DMA API
23  */
24 struct dma_devres {
25 	size_t		size;
26 	void		*vaddr;
27 	dma_addr_t	dma_handle;
28 	unsigned long	attrs;
29 };
30 
31 static void dmam_release(struct device *dev, void *res)
32 {
33 	struct dma_devres *this = res;
34 
35 	dma_free_attrs(dev, this->size, this->vaddr, this->dma_handle,
36 			this->attrs);
37 }
38 
39 static int dmam_match(struct device *dev, void *res, void *match_data)
40 {
41 	struct dma_devres *this = res, *match = match_data;
42 
43 	if (this->vaddr == match->vaddr) {
44 		WARN_ON(this->size != match->size ||
45 			this->dma_handle != match->dma_handle);
46 		return 1;
47 	}
48 	return 0;
49 }
50 
51 /**
52  * dmam_free_coherent - Managed dma_free_coherent()
53  * @dev: Device to free coherent memory for
54  * @size: Size of allocation
55  * @vaddr: Virtual address of the memory to free
56  * @dma_handle: DMA handle of the memory to free
57  *
58  * Managed dma_free_coherent().
59  */
60 void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
61 			dma_addr_t dma_handle)
62 {
63 	struct dma_devres match_data = { size, vaddr, dma_handle };
64 
65 	dma_free_coherent(dev, size, vaddr, dma_handle);
66 	WARN_ON(devres_destroy(dev, dmam_release, dmam_match, &match_data));
67 }
68 EXPORT_SYMBOL(dmam_free_coherent);
69 
70 /**
71  * dmam_alloc_attrs - Managed dma_alloc_attrs()
72  * @dev: Device to allocate non_coherent memory for
73  * @size: Size of allocation
74  * @dma_handle: Out argument for allocated DMA handle
75  * @gfp: Allocation flags
76  * @attrs: Flags in the DMA_ATTR_* namespace.
77  *
78  * Managed dma_alloc_attrs().  Memory allocated using this function will be
79  * automatically released on driver detach.
80  *
81  * RETURNS:
82  * Pointer to allocated memory on success, NULL on failure.
83  */
84 void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
85 		gfp_t gfp, unsigned long attrs)
86 {
87 	struct dma_devres *dr;
88 	void *vaddr;
89 
90 	dr = devres_alloc(dmam_release, sizeof(*dr), gfp);
91 	if (!dr)
92 		return NULL;
93 
94 	vaddr = dma_alloc_attrs(dev, size, dma_handle, gfp, attrs);
95 	if (!vaddr) {
96 		devres_free(dr);
97 		return NULL;
98 	}
99 
100 	dr->vaddr = vaddr;
101 	dr->dma_handle = *dma_handle;
102 	dr->size = size;
103 	dr->attrs = attrs;
104 
105 	devres_add(dev, dr);
106 
107 	return vaddr;
108 }
109 EXPORT_SYMBOL(dmam_alloc_attrs);
110 
111 static bool dma_go_direct(struct device *dev, dma_addr_t mask,
112 		const struct dma_map_ops *ops)
113 {
114 	if (likely(!ops))
115 		return true;
116 #ifdef CONFIG_DMA_OPS_BYPASS
117 	if (dev->dma_ops_bypass)
118 		return min_not_zero(mask, dev->bus_dma_limit) >=
119 			    dma_direct_get_required_mask(dev);
120 #endif
121 	return false;
122 }
123 
124 
125 /*
126  * Check if the devices uses a direct mapping for streaming DMA operations.
127  * This allows IOMMU drivers to set a bypass mode if the DMA mask is large
128  * enough.
129  */
130 static inline bool dma_alloc_direct(struct device *dev,
131 		const struct dma_map_ops *ops)
132 {
133 	return dma_go_direct(dev, dev->coherent_dma_mask, ops);
134 }
135 
136 static inline bool dma_map_direct(struct device *dev,
137 		const struct dma_map_ops *ops)
138 {
139 	return dma_go_direct(dev, *dev->dma_mask, ops);
140 }
141 
142 dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page,
143 		size_t offset, size_t size, enum dma_data_direction dir,
144 		unsigned long attrs)
145 {
146 	const struct dma_map_ops *ops = get_dma_ops(dev);
147 	dma_addr_t addr;
148 
149 	BUG_ON(!valid_dma_direction(dir));
150 
151 	if (WARN_ON_ONCE(!dev->dma_mask))
152 		return DMA_MAPPING_ERROR;
153 
154 	if (dma_map_direct(dev, ops) ||
155 	    arch_dma_map_page_direct(dev, page_to_phys(page) + offset + size))
156 		addr = dma_direct_map_page(dev, page, offset, size, dir, attrs);
157 	else
158 		addr = ops->map_page(dev, page, offset, size, dir, attrs);
159 	debug_dma_map_page(dev, page, offset, size, dir, addr);
160 
161 	return addr;
162 }
163 EXPORT_SYMBOL(dma_map_page_attrs);
164 
165 void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size,
166 		enum dma_data_direction dir, unsigned long attrs)
167 {
168 	const struct dma_map_ops *ops = get_dma_ops(dev);
169 
170 	BUG_ON(!valid_dma_direction(dir));
171 	if (dma_map_direct(dev, ops) ||
172 	    arch_dma_unmap_page_direct(dev, addr + size))
173 		dma_direct_unmap_page(dev, addr, size, dir, attrs);
174 	else if (ops->unmap_page)
175 		ops->unmap_page(dev, addr, size, dir, attrs);
176 	debug_dma_unmap_page(dev, addr, size, dir);
177 }
178 EXPORT_SYMBOL(dma_unmap_page_attrs);
179 
180 /*
181  * dma_maps_sg_attrs returns 0 on error and > 0 on success.
182  * It should never return a value < 0.
183  */
184 int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, int nents,
185 		enum dma_data_direction dir, unsigned long attrs)
186 {
187 	const struct dma_map_ops *ops = get_dma_ops(dev);
188 	int ents;
189 
190 	BUG_ON(!valid_dma_direction(dir));
191 
192 	if (WARN_ON_ONCE(!dev->dma_mask))
193 		return 0;
194 
195 	if (dma_map_direct(dev, ops) ||
196 	    arch_dma_map_sg_direct(dev, sg, nents))
197 		ents = dma_direct_map_sg(dev, sg, nents, dir, attrs);
198 	else
199 		ents = ops->map_sg(dev, sg, nents, dir, attrs);
200 	BUG_ON(ents < 0);
201 	debug_dma_map_sg(dev, sg, nents, ents, dir);
202 
203 	return ents;
204 }
205 EXPORT_SYMBOL(dma_map_sg_attrs);
206 
207 void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
208 				      int nents, enum dma_data_direction dir,
209 				      unsigned long attrs)
210 {
211 	const struct dma_map_ops *ops = get_dma_ops(dev);
212 
213 	BUG_ON(!valid_dma_direction(dir));
214 	debug_dma_unmap_sg(dev, sg, nents, dir);
215 	if (dma_map_direct(dev, ops) ||
216 	    arch_dma_unmap_sg_direct(dev, sg, nents))
217 		dma_direct_unmap_sg(dev, sg, nents, dir, attrs);
218 	else if (ops->unmap_sg)
219 		ops->unmap_sg(dev, sg, nents, dir, attrs);
220 }
221 EXPORT_SYMBOL(dma_unmap_sg_attrs);
222 
223 dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr,
224 		size_t size, enum dma_data_direction dir, unsigned long attrs)
225 {
226 	const struct dma_map_ops *ops = get_dma_ops(dev);
227 	dma_addr_t addr = DMA_MAPPING_ERROR;
228 
229 	BUG_ON(!valid_dma_direction(dir));
230 
231 	if (WARN_ON_ONCE(!dev->dma_mask))
232 		return DMA_MAPPING_ERROR;
233 
234 	/* Don't allow RAM to be mapped */
235 	if (WARN_ON_ONCE(pfn_valid(PHYS_PFN(phys_addr))))
236 		return DMA_MAPPING_ERROR;
237 
238 	if (dma_map_direct(dev, ops))
239 		addr = dma_direct_map_resource(dev, phys_addr, size, dir, attrs);
240 	else if (ops->map_resource)
241 		addr = ops->map_resource(dev, phys_addr, size, dir, attrs);
242 
243 	debug_dma_map_resource(dev, phys_addr, size, dir, addr);
244 	return addr;
245 }
246 EXPORT_SYMBOL(dma_map_resource);
247 
248 void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size,
249 		enum dma_data_direction dir, unsigned long attrs)
250 {
251 	const struct dma_map_ops *ops = get_dma_ops(dev);
252 
253 	BUG_ON(!valid_dma_direction(dir));
254 	if (!dma_map_direct(dev, ops) && ops->unmap_resource)
255 		ops->unmap_resource(dev, addr, size, dir, attrs);
256 	debug_dma_unmap_resource(dev, addr, size, dir);
257 }
258 EXPORT_SYMBOL(dma_unmap_resource);
259 
260 void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
261 		enum dma_data_direction dir)
262 {
263 	const struct dma_map_ops *ops = get_dma_ops(dev);
264 
265 	BUG_ON(!valid_dma_direction(dir));
266 	if (dma_map_direct(dev, ops))
267 		dma_direct_sync_single_for_cpu(dev, addr, size, dir);
268 	else if (ops->sync_single_for_cpu)
269 		ops->sync_single_for_cpu(dev, addr, size, dir);
270 	debug_dma_sync_single_for_cpu(dev, addr, size, dir);
271 }
272 EXPORT_SYMBOL(dma_sync_single_for_cpu);
273 
274 void dma_sync_single_for_device(struct device *dev, dma_addr_t addr,
275 		size_t size, enum dma_data_direction dir)
276 {
277 	const struct dma_map_ops *ops = get_dma_ops(dev);
278 
279 	BUG_ON(!valid_dma_direction(dir));
280 	if (dma_map_direct(dev, ops))
281 		dma_direct_sync_single_for_device(dev, addr, size, dir);
282 	else if (ops->sync_single_for_device)
283 		ops->sync_single_for_device(dev, addr, size, dir);
284 	debug_dma_sync_single_for_device(dev, addr, size, dir);
285 }
286 EXPORT_SYMBOL(dma_sync_single_for_device);
287 
288 void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
289 		    int nelems, enum dma_data_direction dir)
290 {
291 	const struct dma_map_ops *ops = get_dma_ops(dev);
292 
293 	BUG_ON(!valid_dma_direction(dir));
294 	if (dma_map_direct(dev, ops))
295 		dma_direct_sync_sg_for_cpu(dev, sg, nelems, dir);
296 	else if (ops->sync_sg_for_cpu)
297 		ops->sync_sg_for_cpu(dev, sg, nelems, dir);
298 	debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
299 }
300 EXPORT_SYMBOL(dma_sync_sg_for_cpu);
301 
302 void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
303 		       int nelems, enum dma_data_direction dir)
304 {
305 	const struct dma_map_ops *ops = get_dma_ops(dev);
306 
307 	BUG_ON(!valid_dma_direction(dir));
308 	if (dma_map_direct(dev, ops))
309 		dma_direct_sync_sg_for_device(dev, sg, nelems, dir);
310 	else if (ops->sync_sg_for_device)
311 		ops->sync_sg_for_device(dev, sg, nelems, dir);
312 	debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
313 }
314 EXPORT_SYMBOL(dma_sync_sg_for_device);
315 
316 /*
317  * The whole dma_get_sgtable() idea is fundamentally unsafe - it seems
318  * that the intention is to allow exporting memory allocated via the
319  * coherent DMA APIs through the dma_buf API, which only accepts a
320  * scattertable.  This presents a couple of problems:
321  * 1. Not all memory allocated via the coherent DMA APIs is backed by
322  *    a struct page
323  * 2. Passing coherent DMA memory into the streaming APIs is not allowed
324  *    as we will try to flush the memory through a different alias to that
325  *    actually being used (and the flushes are redundant.)
326  */
327 int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
328 		void *cpu_addr, dma_addr_t dma_addr, size_t size,
329 		unsigned long attrs)
330 {
331 	const struct dma_map_ops *ops = get_dma_ops(dev);
332 
333 	if (dma_alloc_direct(dev, ops))
334 		return dma_direct_get_sgtable(dev, sgt, cpu_addr, dma_addr,
335 				size, attrs);
336 	if (!ops->get_sgtable)
337 		return -ENXIO;
338 	return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size, attrs);
339 }
340 EXPORT_SYMBOL(dma_get_sgtable_attrs);
341 
342 #ifdef CONFIG_MMU
343 /*
344  * Return the page attributes used for mapping dma_alloc_* memory, either in
345  * kernel space if remapping is needed, or to userspace through dma_mmap_*.
346  */
347 pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs)
348 {
349 	if (force_dma_unencrypted(dev))
350 		prot = pgprot_decrypted(prot);
351 	if (dev_is_dma_coherent(dev))
352 		return prot;
353 #ifdef CONFIG_ARCH_HAS_DMA_WRITE_COMBINE
354 	if (attrs & DMA_ATTR_WRITE_COMBINE)
355 		return pgprot_writecombine(prot);
356 #endif
357 	return pgprot_dmacoherent(prot);
358 }
359 #endif /* CONFIG_MMU */
360 
361 /**
362  * dma_can_mmap - check if a given device supports dma_mmap_*
363  * @dev: device to check
364  *
365  * Returns %true if @dev supports dma_mmap_coherent() and dma_mmap_attrs() to
366  * map DMA allocations to userspace.
367  */
368 bool dma_can_mmap(struct device *dev)
369 {
370 	const struct dma_map_ops *ops = get_dma_ops(dev);
371 
372 	if (dma_alloc_direct(dev, ops))
373 		return dma_direct_can_mmap(dev);
374 	return ops->mmap != NULL;
375 }
376 EXPORT_SYMBOL_GPL(dma_can_mmap);
377 
378 /**
379  * dma_mmap_attrs - map a coherent DMA allocation into user space
380  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
381  * @vma: vm_area_struct describing requested user mapping
382  * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
383  * @dma_addr: device-view address returned from dma_alloc_attrs
384  * @size: size of memory originally requested in dma_alloc_attrs
385  * @attrs: attributes of mapping properties requested in dma_alloc_attrs
386  *
387  * Map a coherent DMA buffer previously allocated by dma_alloc_attrs into user
388  * space.  The coherent DMA buffer must not be freed by the driver until the
389  * user space mapping has been released.
390  */
391 int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
392 		void *cpu_addr, dma_addr_t dma_addr, size_t size,
393 		unsigned long attrs)
394 {
395 	const struct dma_map_ops *ops = get_dma_ops(dev);
396 
397 	if (dma_alloc_direct(dev, ops))
398 		return dma_direct_mmap(dev, vma, cpu_addr, dma_addr, size,
399 				attrs);
400 	if (!ops->mmap)
401 		return -ENXIO;
402 	return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
403 }
404 EXPORT_SYMBOL(dma_mmap_attrs);
405 
406 u64 dma_get_required_mask(struct device *dev)
407 {
408 	const struct dma_map_ops *ops = get_dma_ops(dev);
409 
410 	if (dma_alloc_direct(dev, ops))
411 		return dma_direct_get_required_mask(dev);
412 	if (ops->get_required_mask)
413 		return ops->get_required_mask(dev);
414 
415 	/*
416 	 * We require every DMA ops implementation to at least support a 32-bit
417 	 * DMA mask (and use bounce buffering if that isn't supported in
418 	 * hardware).  As the direct mapping code has its own routine to
419 	 * actually report an optimal mask we default to 32-bit here as that
420 	 * is the right thing for most IOMMUs, and at least not actively
421 	 * harmful in general.
422 	 */
423 	return DMA_BIT_MASK(32);
424 }
425 EXPORT_SYMBOL_GPL(dma_get_required_mask);
426 
427 void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
428 		gfp_t flag, unsigned long attrs)
429 {
430 	const struct dma_map_ops *ops = get_dma_ops(dev);
431 	void *cpu_addr;
432 
433 	WARN_ON_ONCE(!dev->coherent_dma_mask);
434 
435 	if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr))
436 		return cpu_addr;
437 
438 	/* let the implementation decide on the zone to allocate from: */
439 	flag &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
440 
441 	if (dma_alloc_direct(dev, ops))
442 		cpu_addr = dma_direct_alloc(dev, size, dma_handle, flag, attrs);
443 	else if (ops->alloc)
444 		cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
445 	else
446 		return NULL;
447 
448 	debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
449 	return cpu_addr;
450 }
451 EXPORT_SYMBOL(dma_alloc_attrs);
452 
453 void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
454 		dma_addr_t dma_handle, unsigned long attrs)
455 {
456 	const struct dma_map_ops *ops = get_dma_ops(dev);
457 
458 	if (dma_release_from_dev_coherent(dev, get_order(size), cpu_addr))
459 		return;
460 	/*
461 	 * On non-coherent platforms which implement DMA-coherent buffers via
462 	 * non-cacheable remaps, ops->free() may call vunmap(). Thus getting
463 	 * this far in IRQ context is a) at risk of a BUG_ON() or trying to
464 	 * sleep on some machines, and b) an indication that the driver is
465 	 * probably misusing the coherent API anyway.
466 	 */
467 	WARN_ON(irqs_disabled());
468 
469 	if (!cpu_addr)
470 		return;
471 
472 	debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
473 	if (dma_alloc_direct(dev, ops))
474 		dma_direct_free(dev, size, cpu_addr, dma_handle, attrs);
475 	else if (ops->free)
476 		ops->free(dev, size, cpu_addr, dma_handle, attrs);
477 }
478 EXPORT_SYMBOL(dma_free_attrs);
479 
480 static struct page *__dma_alloc_pages(struct device *dev, size_t size,
481 		dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)
482 {
483 	const struct dma_map_ops *ops = get_dma_ops(dev);
484 
485 	if (WARN_ON_ONCE(!dev->coherent_dma_mask))
486 		return NULL;
487 	if (WARN_ON_ONCE(gfp & (__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM)))
488 		return NULL;
489 
490 	size = PAGE_ALIGN(size);
491 	if (dma_alloc_direct(dev, ops))
492 		return dma_direct_alloc_pages(dev, size, dma_handle, dir, gfp);
493 	if (!ops->alloc_pages)
494 		return NULL;
495 	return ops->alloc_pages(dev, size, dma_handle, dir, gfp);
496 }
497 
498 struct page *dma_alloc_pages(struct device *dev, size_t size,
499 		dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)
500 {
501 	struct page *page = __dma_alloc_pages(dev, size, dma_handle, dir, gfp);
502 
503 	if (page)
504 		debug_dma_map_page(dev, page, 0, size, dir, *dma_handle);
505 	return page;
506 }
507 EXPORT_SYMBOL_GPL(dma_alloc_pages);
508 
509 static void __dma_free_pages(struct device *dev, size_t size, struct page *page,
510 		dma_addr_t dma_handle, enum dma_data_direction dir)
511 {
512 	const struct dma_map_ops *ops = get_dma_ops(dev);
513 
514 	size = PAGE_ALIGN(size);
515 	if (dma_alloc_direct(dev, ops))
516 		dma_direct_free_pages(dev, size, page, dma_handle, dir);
517 	else if (ops->free_pages)
518 		ops->free_pages(dev, size, page, dma_handle, dir);
519 }
520 
521 void dma_free_pages(struct device *dev, size_t size, struct page *page,
522 		dma_addr_t dma_handle, enum dma_data_direction dir)
523 {
524 	debug_dma_unmap_page(dev, dma_handle, size, dir);
525 	__dma_free_pages(dev, size, page, dma_handle, dir);
526 }
527 EXPORT_SYMBOL_GPL(dma_free_pages);
528 
529 int dma_mmap_pages(struct device *dev, struct vm_area_struct *vma,
530 		size_t size, struct page *page)
531 {
532 	unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
533 
534 	if (vma->vm_pgoff >= count || vma_pages(vma) > count - vma->vm_pgoff)
535 		return -ENXIO;
536 	return remap_pfn_range(vma, vma->vm_start,
537 			       page_to_pfn(page) + vma->vm_pgoff,
538 			       vma_pages(vma) << PAGE_SHIFT, vma->vm_page_prot);
539 }
540 EXPORT_SYMBOL_GPL(dma_mmap_pages);
541 
542 static struct sg_table *alloc_single_sgt(struct device *dev, size_t size,
543 		enum dma_data_direction dir, gfp_t gfp)
544 {
545 	struct sg_table *sgt;
546 	struct page *page;
547 
548 	sgt = kmalloc(sizeof(*sgt), gfp);
549 	if (!sgt)
550 		return NULL;
551 	if (sg_alloc_table(sgt, 1, gfp))
552 		goto out_free_sgt;
553 	page = __dma_alloc_pages(dev, size, &sgt->sgl->dma_address, dir, gfp);
554 	if (!page)
555 		goto out_free_table;
556 	sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
557 	sg_dma_len(sgt->sgl) = sgt->sgl->length;
558 	return sgt;
559 out_free_table:
560 	sg_free_table(sgt);
561 out_free_sgt:
562 	kfree(sgt);
563 	return NULL;
564 }
565 
566 struct sg_table *dma_alloc_noncontiguous(struct device *dev, size_t size,
567 		enum dma_data_direction dir, gfp_t gfp, unsigned long attrs)
568 {
569 	const struct dma_map_ops *ops = get_dma_ops(dev);
570 	struct sg_table *sgt;
571 
572 	if (WARN_ON_ONCE(attrs & ~DMA_ATTR_ALLOC_SINGLE_PAGES))
573 		return NULL;
574 
575 	if (ops && ops->alloc_noncontiguous)
576 		sgt = ops->alloc_noncontiguous(dev, size, dir, gfp, attrs);
577 	else
578 		sgt = alloc_single_sgt(dev, size, dir, gfp);
579 
580 	if (sgt) {
581 		sgt->nents = 1;
582 		debug_dma_map_sg(dev, sgt->sgl, sgt->orig_nents, 1, dir);
583 	}
584 	return sgt;
585 }
586 EXPORT_SYMBOL_GPL(dma_alloc_noncontiguous);
587 
588 static void free_single_sgt(struct device *dev, size_t size,
589 		struct sg_table *sgt, enum dma_data_direction dir)
590 {
591 	__dma_free_pages(dev, size, sg_page(sgt->sgl), sgt->sgl->dma_address,
592 			 dir);
593 	sg_free_table(sgt);
594 	kfree(sgt);
595 }
596 
597 void dma_free_noncontiguous(struct device *dev, size_t size,
598 		struct sg_table *sgt, enum dma_data_direction dir)
599 {
600 	const struct dma_map_ops *ops = get_dma_ops(dev);
601 
602 	debug_dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir);
603 	if (ops && ops->free_noncontiguous)
604 		ops->free_noncontiguous(dev, size, sgt, dir);
605 	else
606 		free_single_sgt(dev, size, sgt, dir);
607 }
608 EXPORT_SYMBOL_GPL(dma_free_noncontiguous);
609 
610 void *dma_vmap_noncontiguous(struct device *dev, size_t size,
611 		struct sg_table *sgt)
612 {
613 	const struct dma_map_ops *ops = get_dma_ops(dev);
614 	unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
615 
616 	if (ops && ops->alloc_noncontiguous)
617 		return vmap(sgt_handle(sgt)->pages, count, VM_MAP, PAGE_KERNEL);
618 	return page_address(sg_page(sgt->sgl));
619 }
620 EXPORT_SYMBOL_GPL(dma_vmap_noncontiguous);
621 
622 void dma_vunmap_noncontiguous(struct device *dev, void *vaddr)
623 {
624 	const struct dma_map_ops *ops = get_dma_ops(dev);
625 
626 	if (ops && ops->alloc_noncontiguous)
627 		vunmap(vaddr);
628 }
629 EXPORT_SYMBOL_GPL(dma_vunmap_noncontiguous);
630 
631 int dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma,
632 		size_t size, struct sg_table *sgt)
633 {
634 	const struct dma_map_ops *ops = get_dma_ops(dev);
635 
636 	if (ops && ops->alloc_noncontiguous) {
637 		unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
638 
639 		if (vma->vm_pgoff >= count ||
640 		    vma_pages(vma) > count - vma->vm_pgoff)
641 			return -ENXIO;
642 		return vm_map_pages(vma, sgt_handle(sgt)->pages, count);
643 	}
644 	return dma_mmap_pages(dev, vma, size, sg_page(sgt->sgl));
645 }
646 EXPORT_SYMBOL_GPL(dma_mmap_noncontiguous);
647 
648 int dma_supported(struct device *dev, u64 mask)
649 {
650 	const struct dma_map_ops *ops = get_dma_ops(dev);
651 
652 	/*
653 	 * ->dma_supported sets the bypass flag, so we must always call
654 	 * into the method here unless the device is truly direct mapped.
655 	 */
656 	if (!ops)
657 		return dma_direct_supported(dev, mask);
658 	if (!ops->dma_supported)
659 		return 1;
660 	return ops->dma_supported(dev, mask);
661 }
662 EXPORT_SYMBOL(dma_supported);
663 
664 #ifdef CONFIG_ARCH_HAS_DMA_SET_MASK
665 void arch_dma_set_mask(struct device *dev, u64 mask);
666 #else
667 #define arch_dma_set_mask(dev, mask)	do { } while (0)
668 #endif
669 
670 int dma_set_mask(struct device *dev, u64 mask)
671 {
672 	/*
673 	 * Truncate the mask to the actually supported dma_addr_t width to
674 	 * avoid generating unsupportable addresses.
675 	 */
676 	mask = (dma_addr_t)mask;
677 
678 	if (!dev->dma_mask || !dma_supported(dev, mask))
679 		return -EIO;
680 
681 	arch_dma_set_mask(dev, mask);
682 	*dev->dma_mask = mask;
683 	return 0;
684 }
685 EXPORT_SYMBOL(dma_set_mask);
686 
687 #ifndef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK
688 int dma_set_coherent_mask(struct device *dev, u64 mask)
689 {
690 	/*
691 	 * Truncate the mask to the actually supported dma_addr_t width to
692 	 * avoid generating unsupportable addresses.
693 	 */
694 	mask = (dma_addr_t)mask;
695 
696 	if (!dma_supported(dev, mask))
697 		return -EIO;
698 
699 	dev->coherent_dma_mask = mask;
700 	return 0;
701 }
702 EXPORT_SYMBOL(dma_set_coherent_mask);
703 #endif
704 
705 size_t dma_max_mapping_size(struct device *dev)
706 {
707 	const struct dma_map_ops *ops = get_dma_ops(dev);
708 	size_t size = SIZE_MAX;
709 
710 	if (dma_map_direct(dev, ops))
711 		size = dma_direct_max_mapping_size(dev);
712 	else if (ops && ops->max_mapping_size)
713 		size = ops->max_mapping_size(dev);
714 
715 	return size;
716 }
717 EXPORT_SYMBOL_GPL(dma_max_mapping_size);
718 
719 bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)
720 {
721 	const struct dma_map_ops *ops = get_dma_ops(dev);
722 
723 	if (dma_map_direct(dev, ops))
724 		return dma_direct_need_sync(dev, dma_addr);
725 	return ops->sync_single_for_cpu || ops->sync_single_for_device;
726 }
727 EXPORT_SYMBOL_GPL(dma_need_sync);
728 
729 unsigned long dma_get_merge_boundary(struct device *dev)
730 {
731 	const struct dma_map_ops *ops = get_dma_ops(dev);
732 
733 	if (!ops || !ops->get_merge_boundary)
734 		return 0;	/* can't merge */
735 
736 	return ops->get_merge_boundary(dev);
737 }
738 EXPORT_SYMBOL_GPL(dma_get_merge_boundary);
739