xref: /linux/kernel/dma/direct.h (revision dabb83ecf404c74a75469e7694a0b891e71f61b7)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (C) 2018 Christoph Hellwig.
4  *
5  * DMA operations that map physical memory directly without using an IOMMU.
6  */
7 #ifndef _KERNEL_DMA_DIRECT_H
8 #define _KERNEL_DMA_DIRECT_H
9 
10 #include <linux/dma-direct.h>
11 #include <linux/memremap.h>
12 
13 int dma_direct_get_sgtable(struct device *dev, struct sg_table *sgt,
14 		void *cpu_addr, dma_addr_t dma_addr, size_t size,
15 		unsigned long attrs);
16 bool dma_direct_can_mmap(struct device *dev);
17 int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
18 		void *cpu_addr, dma_addr_t dma_addr, size_t size,
19 		unsigned long attrs);
20 bool dma_direct_need_sync(struct device *dev, dma_addr_t dma_addr);
21 int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
22 		enum dma_data_direction dir, unsigned long attrs);
23 bool dma_direct_all_ram_mapped(struct device *dev);
24 size_t dma_direct_max_mapping_size(struct device *dev);
25 
26 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
27     defined(CONFIG_SWIOTLB)
28 void dma_direct_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
29 		int nents, enum dma_data_direction dir);
30 #else
dma_direct_sync_sg_for_device(struct device * dev,struct scatterlist * sgl,int nents,enum dma_data_direction dir)31 static inline void dma_direct_sync_sg_for_device(struct device *dev,
32 		struct scatterlist *sgl, int nents, enum dma_data_direction dir)
33 {
34 }
35 #endif
36 
37 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
38     defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \
39     defined(CONFIG_SWIOTLB)
40 void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
41 		int nents, enum dma_data_direction dir, unsigned long attrs);
42 void dma_direct_sync_sg_for_cpu(struct device *dev,
43 		struct scatterlist *sgl, int nents, enum dma_data_direction dir);
44 #else
dma_direct_unmap_sg(struct device * dev,struct scatterlist * sgl,int nents,enum dma_data_direction dir,unsigned long attrs)45 static inline void dma_direct_unmap_sg(struct device *dev,
46 		struct scatterlist *sgl, int nents, enum dma_data_direction dir,
47 		unsigned long attrs)
48 {
49 }
dma_direct_sync_sg_for_cpu(struct device * dev,struct scatterlist * sgl,int nents,enum dma_data_direction dir)50 static inline void dma_direct_sync_sg_for_cpu(struct device *dev,
51 		struct scatterlist *sgl, int nents, enum dma_data_direction dir)
52 {
53 }
54 #endif
55 
dma_direct_sync_single_for_device(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir)56 static inline void dma_direct_sync_single_for_device(struct device *dev,
57 		dma_addr_t addr, size_t size, enum dma_data_direction dir)
58 {
59 	phys_addr_t paddr = dma_to_phys(dev, addr);
60 
61 	swiotlb_sync_single_for_device(dev, paddr, size, dir);
62 
63 	if (!dev_is_dma_coherent(dev))
64 		arch_sync_dma_for_device(paddr, size, dir);
65 }
66 
dma_direct_sync_single_for_cpu(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir)67 static inline void dma_direct_sync_single_for_cpu(struct device *dev,
68 		dma_addr_t addr, size_t size, enum dma_data_direction dir)
69 {
70 	phys_addr_t paddr = dma_to_phys(dev, addr);
71 
72 	if (!dev_is_dma_coherent(dev)) {
73 		arch_sync_dma_for_cpu(paddr, size, dir);
74 		arch_sync_dma_for_cpu_all();
75 	}
76 
77 	swiotlb_sync_single_for_cpu(dev, paddr, size, dir);
78 }
79 
dma_direct_map_phys(struct device * dev,phys_addr_t phys,size_t size,enum dma_data_direction dir,unsigned long attrs)80 static inline dma_addr_t dma_direct_map_phys(struct device *dev,
81 		phys_addr_t phys, size_t size, enum dma_data_direction dir,
82 		unsigned long attrs)
83 {
84 	dma_addr_t dma_addr;
85 
86 	if (is_swiotlb_force_bounce(dev)) {
87 		if (attrs & (DMA_ATTR_MMIO | DMA_ATTR_REQUIRE_COHERENT))
88 			return DMA_MAPPING_ERROR;
89 
90 		return swiotlb_map(dev, phys, size, dir, attrs);
91 	}
92 
93 	if (attrs & DMA_ATTR_MMIO) {
94 		dma_addr = phys;
95 		if (unlikely(!dma_capable(dev, dma_addr, size, false)))
96 			goto err_overflow;
97 	} else {
98 		dma_addr = phys_to_dma(dev, phys);
99 		if (unlikely(!dma_capable(dev, dma_addr, size, true)) ||
100 		    dma_kmalloc_needs_bounce(dev, size, dir)) {
101 			if (is_swiotlb_active(dev) &&
102 			    !(attrs & DMA_ATTR_REQUIRE_COHERENT))
103 				return swiotlb_map(dev, phys, size, dir, attrs);
104 
105 			goto err_overflow;
106 		}
107 	}
108 
109 	if (!dev_is_dma_coherent(dev) &&
110 	    !(attrs & (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_MMIO)))
111 		arch_sync_dma_for_device(phys, size, dir);
112 	return dma_addr;
113 
114 err_overflow:
115 	dev_WARN_ONCE(
116 		dev, 1,
117 		"DMA addr %pad+%zu overflow (mask %llx, bus limit %llx).\n",
118 		&dma_addr, size, *dev->dma_mask, dev->bus_dma_limit);
119 	return DMA_MAPPING_ERROR;
120 }
121 
dma_direct_unmap_phys(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir,unsigned long attrs)122 static inline void dma_direct_unmap_phys(struct device *dev, dma_addr_t addr,
123 		size_t size, enum dma_data_direction dir, unsigned long attrs)
124 {
125 	phys_addr_t phys;
126 
127 	if (attrs & (DMA_ATTR_MMIO | DMA_ATTR_REQUIRE_COHERENT))
128 		/* nothing to do: uncached and no swiotlb */
129 		return;
130 
131 	phys = dma_to_phys(dev, addr);
132 	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
133 		dma_direct_sync_single_for_cpu(dev, addr, size, dir);
134 
135 	swiotlb_tbl_unmap_single(dev, phys, size, dir,
136 					 attrs | DMA_ATTR_SKIP_CPU_SYNC);
137 }
138 #endif /* _KERNEL_DMA_DIRECT_H */
139