xref: /linux/kernel/dma/direct.h (revision e2683c8868d03382da7e1ce8453b543a043066d1)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (C) 2018 Christoph Hellwig.
4  *
5  * DMA operations that map physical memory directly without using an IOMMU.
6  */
7 #ifndef _KERNEL_DMA_DIRECT_H
8 #define _KERNEL_DMA_DIRECT_H
9 
10 #include <linux/dma-direct.h>
11 #include <linux/memremap.h>
12 
13 int dma_direct_get_sgtable(struct device *dev, struct sg_table *sgt,
14 		void *cpu_addr, dma_addr_t dma_addr, size_t size,
15 		unsigned long attrs);
16 bool dma_direct_can_mmap(struct device *dev);
17 int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
18 		void *cpu_addr, dma_addr_t dma_addr, size_t size,
19 		unsigned long attrs);
20 bool dma_direct_need_sync(struct device *dev, dma_addr_t dma_addr);
21 int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
22 		enum dma_data_direction dir, unsigned long attrs);
23 bool dma_direct_all_ram_mapped(struct device *dev);
24 size_t dma_direct_max_mapping_size(struct device *dev);
25 
26 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
27     defined(CONFIG_SWIOTLB)
28 void dma_direct_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
29 		int nents, enum dma_data_direction dir);
30 #else
31 static inline void dma_direct_sync_sg_for_device(struct device *dev,
32 		struct scatterlist *sgl, int nents, enum dma_data_direction dir)
33 {
34 }
35 #endif
36 
37 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
38     defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \
39     defined(CONFIG_SWIOTLB)
40 void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
41 		int nents, enum dma_data_direction dir, unsigned long attrs);
42 void dma_direct_sync_sg_for_cpu(struct device *dev,
43 		struct scatterlist *sgl, int nents, enum dma_data_direction dir);
44 #else
45 static inline void dma_direct_unmap_sg(struct device *dev,
46 		struct scatterlist *sgl, int nents, enum dma_data_direction dir,
47 		unsigned long attrs)
48 {
49 }
50 static inline void dma_direct_sync_sg_for_cpu(struct device *dev,
51 		struct scatterlist *sgl, int nents, enum dma_data_direction dir)
52 {
53 }
54 #endif
55 
56 static inline void dma_direct_sync_single_for_device(struct device *dev,
57 		dma_addr_t addr, size_t size, enum dma_data_direction dir)
58 {
59 	phys_addr_t paddr = dma_to_phys(dev, addr);
60 
61 	swiotlb_sync_single_for_device(dev, paddr, size, dir);
62 
63 	if (!dev_is_dma_coherent(dev)) {
64 		arch_sync_dma_for_device(paddr, size, dir);
65 		arch_sync_dma_flush();
66 	}
67 }
68 
69 static inline void dma_direct_sync_single_for_cpu(struct device *dev,
70 		dma_addr_t addr, size_t size, enum dma_data_direction dir,
71 		bool flush)
72 {
73 	phys_addr_t paddr = dma_to_phys(dev, addr);
74 
75 	if (!dev_is_dma_coherent(dev)) {
76 		arch_sync_dma_for_cpu(paddr, size, dir);
77 		if (flush)
78 			arch_sync_dma_flush();
79 		arch_sync_dma_for_cpu_all();
80 	}
81 
82 	swiotlb_sync_single_for_cpu(dev, paddr, size, dir);
83 }
84 
85 static inline dma_addr_t dma_direct_map_phys(struct device *dev,
86 		phys_addr_t phys, size_t size, enum dma_data_direction dir,
87 		unsigned long attrs, bool flush)
88 {
89 	dma_addr_t dma_addr;
90 
91 	if (is_swiotlb_force_bounce(dev)) {
92 		if (!(attrs & DMA_ATTR_CC_SHARED)) {
93 			if (attrs & (DMA_ATTR_MMIO | DMA_ATTR_REQUIRE_COHERENT))
94 				return DMA_MAPPING_ERROR;
95 
96 			return swiotlb_map(dev, phys, size, dir, attrs);
97 		}
98 	} else if (attrs & DMA_ATTR_CC_SHARED) {
99 		return DMA_MAPPING_ERROR;
100 	}
101 
102 	if (attrs & DMA_ATTR_MMIO) {
103 		dma_addr = phys;
104 		if (unlikely(!dma_capable(dev, dma_addr, size, false)))
105 			goto err_overflow;
106 	} else if (attrs & DMA_ATTR_CC_SHARED) {
107 		dma_addr = phys_to_dma_unencrypted(dev, phys);
108 		if (unlikely(!dma_capable(dev, dma_addr, size, false)))
109 			goto err_overflow;
110 	} else {
111 		dma_addr = phys_to_dma(dev, phys);
112 		if (unlikely(!dma_capable(dev, dma_addr, size, true)) ||
113 		    dma_kmalloc_needs_bounce(dev, size, dir)) {
114 			if (is_swiotlb_active(dev) &&
115 			    !(attrs & DMA_ATTR_REQUIRE_COHERENT))
116 				return swiotlb_map(dev, phys, size, dir, attrs);
117 
118 			goto err_overflow;
119 		}
120 	}
121 
122 	if (!dev_is_dma_coherent(dev) &&
123 	    !(attrs & (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_MMIO))) {
124 		arch_sync_dma_for_device(phys, size, dir);
125 		if (flush)
126 			arch_sync_dma_flush();
127 	}
128 	return dma_addr;
129 
130 err_overflow:
131 	dev_WARN_ONCE(
132 		dev, 1,
133 		"DMA addr %pad+%zu overflow (mask %llx, bus limit %llx).\n",
134 		&dma_addr, size, *dev->dma_mask, dev->bus_dma_limit);
135 	return DMA_MAPPING_ERROR;
136 }
137 
138 static inline void dma_direct_unmap_phys(struct device *dev, dma_addr_t addr,
139 		size_t size, enum dma_data_direction dir, unsigned long attrs,
140 		bool flush)
141 {
142 	phys_addr_t phys;
143 
144 	if (attrs & (DMA_ATTR_MMIO | DMA_ATTR_REQUIRE_COHERENT))
145 		/* nothing to do: uncached and no swiotlb */
146 		return;
147 
148 	phys = dma_to_phys(dev, addr);
149 	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
150 		dma_direct_sync_single_for_cpu(dev, addr, size, dir, flush);
151 
152 	swiotlb_tbl_unmap_single(dev, phys, size, dir,
153 					 attrs | DMA_ATTR_SKIP_CPU_SYNC);
154 }
155 #endif /* _KERNEL_DMA_DIRECT_H */
156