xref: /linux/include/linux/dma-mapping.h (revision dabb83ecf404c74a75469e7694a0b891e71f61b7)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_DMA_MAPPING_H
3 #define _LINUX_DMA_MAPPING_H
4 
5 #include <linux/device.h>
6 #include <linux/err.h>
7 #include <linux/dma-direction.h>
8 #include <linux/scatterlist.h>
9 #include <linux/bug.h>
10 #include <linux/cache.h>
11 
12 /**
13  * List of possible attributes associated with a DMA mapping. The semantics
14  * of each attribute should be defined in Documentation/core-api/dma-attributes.rst.
15  */
16 
17 /*
18  * DMA_ATTR_WEAK_ORDERING: Specifies that reads and writes to the mapping
19  * may be weakly ordered, that is that reads and writes may pass each other.
20  */
21 #define DMA_ATTR_WEAK_ORDERING		(1UL << 1)
22 /*
23  * DMA_ATTR_WRITE_COMBINE: Specifies that writes to the mapping may be
24  * buffered to improve performance.
25  */
26 #define DMA_ATTR_WRITE_COMBINE		(1UL << 2)
27 /*
28  * DMA_ATTR_NO_KERNEL_MAPPING: Lets the platform to avoid creating a kernel
29  * virtual mapping for the allocated buffer.
30  */
31 #define DMA_ATTR_NO_KERNEL_MAPPING	(1UL << 4)
32 /*
33  * DMA_ATTR_SKIP_CPU_SYNC: Allows platform code to skip synchronization of
34  * the CPU cache for the given buffer assuming that it has been already
35  * transferred to 'device' domain.
36  */
37 #define DMA_ATTR_SKIP_CPU_SYNC		(1UL << 5)
38 /*
39  * DMA_ATTR_FORCE_CONTIGUOUS: Forces contiguous allocation of the buffer
40  * in physical memory.
41  */
42 #define DMA_ATTR_FORCE_CONTIGUOUS	(1UL << 6)
43 /*
44  * DMA_ATTR_ALLOC_SINGLE_PAGES: This is a hint to the DMA-mapping subsystem
45  * that it's probably not worth the time to try to allocate memory to in a way
46  * that gives better TLB efficiency.
47  */
48 #define DMA_ATTR_ALLOC_SINGLE_PAGES	(1UL << 7)
49 /*
50  * DMA_ATTR_NO_WARN: This tells the DMA-mapping subsystem to suppress
51  * allocation failure reports (similarly to __GFP_NOWARN).
52  */
53 #define DMA_ATTR_NO_WARN	(1UL << 8)
54 
55 /*
56  * DMA_ATTR_PRIVILEGED: used to indicate that the buffer is fully
57  * accessible at an elevated privilege level (and ideally inaccessible or
58  * at least read-only at lesser-privileged levels).
59  */
60 #define DMA_ATTR_PRIVILEGED		(1UL << 9)
61 
62 /*
63  * DMA_ATTR_MMIO - Indicates memory-mapped I/O (MMIO) region for DMA mapping
64  *
65  * This attribute indicates the physical address is not normal system
66  * memory. It may not be used with kmap*()/phys_to_virt()/phys_to_page()
67  * functions, it may not be cacheable, and access using CPU load/store
68  * instructions may not be allowed.
69  *
70  * Usually this will be used to describe MMIO addresses, or other non-cacheable
71  * register addresses. When DMA mapping this sort of address we call
72  * the operation Peer to Peer as a one device is DMA'ing to another device.
73  * For PCI devices the p2pdma APIs must be used to determine if DMA_ATTR_MMIO
74  * is appropriate.
75  *
76  * For architectures that require cache flushing for DMA coherence
77  * DMA_ATTR_MMIO will not perform any cache flushing. The address
78  * provided must never be mapped cacheable into the CPU.
79  */
80 #define DMA_ATTR_MMIO		(1UL << 10)
81 
82 /*
83  * DMA_ATTR_DEBUGGING_IGNORE_CACHELINES: Indicates the CPU cache line can be
84  * overlapped. All mappings sharing a cacheline must have this attribute for
85  * this to be considered safe.
86  */
87 #define DMA_ATTR_DEBUGGING_IGNORE_CACHELINES	(1UL << 11)
88 
89 /*
90  * DMA_ATTR_REQUIRE_COHERENT: Indicates that DMA coherency is required.
91  * All mappings that carry this attribute can't work with SWIOTLB and cache
92  * flushing.
93  */
94 #define DMA_ATTR_REQUIRE_COHERENT	(1UL << 12)
95 
96 /*
97  * A dma_addr_t can hold any valid DMA or bus address for the platform.  It can
98  * be given to a device to use as a DMA source or target.  It is specific to a
99  * given device and there may be a translation between the CPU physical address
100  * space and the bus address space.
101  *
102  * DMA_MAPPING_ERROR is the magic error code if a mapping failed.  It should not
103  * be used directly in drivers, but checked for using dma_mapping_error()
104  * instead.
105  */
106 #define DMA_MAPPING_ERROR		(~(dma_addr_t)0)
107 
108 #define DMA_BIT_MASK(n)	GENMASK_ULL((n) - 1, 0)
109 
110 struct dma_iova_state {
111 	dma_addr_t addr;
112 	u64 __size;
113 };
114 
115 /*
116  * Use the high bit to mark if we used swiotlb for one or more ranges.
117  */
118 #define DMA_IOVA_USE_SWIOTLB		(1ULL << 63)
119 
dma_iova_size(struct dma_iova_state * state)120 static inline size_t dma_iova_size(struct dma_iova_state *state)
121 {
122 	/* Casting is needed for 32-bits systems */
123 	return (size_t)(state->__size & ~DMA_IOVA_USE_SWIOTLB);
124 }
125 
126 #ifdef CONFIG_DMA_API_DEBUG
127 void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
128 void debug_dma_map_single(struct device *dev, const void *addr,
129 		unsigned long len);
130 #else
debug_dma_mapping_error(struct device * dev,dma_addr_t dma_addr)131 static inline void debug_dma_mapping_error(struct device *dev,
132 		dma_addr_t dma_addr)
133 {
134 }
debug_dma_map_single(struct device * dev,const void * addr,unsigned long len)135 static inline void debug_dma_map_single(struct device *dev, const void *addr,
136 		unsigned long len)
137 {
138 }
139 #endif /* CONFIG_DMA_API_DEBUG */
140 
141 #ifdef CONFIG_HAS_DMA
dma_mapping_error(struct device * dev,dma_addr_t dma_addr)142 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
143 {
144 	debug_dma_mapping_error(dev, dma_addr);
145 
146 	if (unlikely(dma_addr == DMA_MAPPING_ERROR))
147 		return -ENOMEM;
148 	return 0;
149 }
150 
151 dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page,
152 		size_t offset, size_t size, enum dma_data_direction dir,
153 		unsigned long attrs);
154 void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size,
155 		enum dma_data_direction dir, unsigned long attrs);
156 dma_addr_t dma_map_phys(struct device *dev, phys_addr_t phys, size_t size,
157 		enum dma_data_direction dir, unsigned long attrs);
158 void dma_unmap_phys(struct device *dev, dma_addr_t addr, size_t size,
159 		enum dma_data_direction dir, unsigned long attrs);
160 unsigned int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
161 		int nents, enum dma_data_direction dir, unsigned long attrs);
162 void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
163 				      int nents, enum dma_data_direction dir,
164 				      unsigned long attrs);
165 int dma_map_sgtable(struct device *dev, struct sg_table *sgt,
166 		enum dma_data_direction dir, unsigned long attrs);
167 dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr,
168 		size_t size, enum dma_data_direction dir, unsigned long attrs);
169 void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size,
170 		enum dma_data_direction dir, unsigned long attrs);
171 void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
172 		gfp_t flag, unsigned long attrs);
173 void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
174 		dma_addr_t dma_handle, unsigned long attrs);
175 void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
176 		gfp_t gfp, unsigned long attrs);
177 void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
178 		dma_addr_t dma_handle);
179 int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
180 		void *cpu_addr, dma_addr_t dma_addr, size_t size,
181 		unsigned long attrs);
182 int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
183 		void *cpu_addr, dma_addr_t dma_addr, size_t size,
184 		unsigned long attrs);
185 bool dma_can_mmap(struct device *dev);
186 bool dma_pci_p2pdma_supported(struct device *dev);
187 int dma_set_mask(struct device *dev, u64 mask);
188 int dma_set_coherent_mask(struct device *dev, u64 mask);
189 u64 dma_get_required_mask(struct device *dev);
190 bool dma_addressing_limited(struct device *dev);
191 size_t dma_max_mapping_size(struct device *dev);
192 size_t dma_opt_mapping_size(struct device *dev);
193 unsigned long dma_get_merge_boundary(struct device *dev);
194 struct sg_table *dma_alloc_noncontiguous(struct device *dev, size_t size,
195 		enum dma_data_direction dir, gfp_t gfp, unsigned long attrs);
196 void dma_free_noncontiguous(struct device *dev, size_t size,
197 		struct sg_table *sgt, enum dma_data_direction dir);
198 void *dma_vmap_noncontiguous(struct device *dev, size_t size,
199 		struct sg_table *sgt);
200 void dma_vunmap_noncontiguous(struct device *dev, void *vaddr);
201 int dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma,
202 		size_t size, struct sg_table *sgt);
203 #else /* CONFIG_HAS_DMA */
dma_map_page_attrs(struct device * dev,struct page * page,size_t offset,size_t size,enum dma_data_direction dir,unsigned long attrs)204 static inline dma_addr_t dma_map_page_attrs(struct device *dev,
205 		struct page *page, size_t offset, size_t size,
206 		enum dma_data_direction dir, unsigned long attrs)
207 {
208 	return DMA_MAPPING_ERROR;
209 }
dma_unmap_page_attrs(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir,unsigned long attrs)210 static inline void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr,
211 		size_t size, enum dma_data_direction dir, unsigned long attrs)
212 {
213 }
dma_map_phys(struct device * dev,phys_addr_t phys,size_t size,enum dma_data_direction dir,unsigned long attrs)214 static inline dma_addr_t dma_map_phys(struct device *dev, phys_addr_t phys,
215 		size_t size, enum dma_data_direction dir, unsigned long attrs)
216 {
217 	return DMA_MAPPING_ERROR;
218 }
dma_unmap_phys(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir,unsigned long attrs)219 static inline void dma_unmap_phys(struct device *dev, dma_addr_t addr,
220 		size_t size, enum dma_data_direction dir, unsigned long attrs)
221 {
222 }
dma_map_sg_attrs(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir,unsigned long attrs)223 static inline unsigned int dma_map_sg_attrs(struct device *dev,
224 		struct scatterlist *sg, int nents, enum dma_data_direction dir,
225 		unsigned long attrs)
226 {
227 	return 0;
228 }
dma_unmap_sg_attrs(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir,unsigned long attrs)229 static inline void dma_unmap_sg_attrs(struct device *dev,
230 		struct scatterlist *sg, int nents, enum dma_data_direction dir,
231 		unsigned long attrs)
232 {
233 }
dma_map_sgtable(struct device * dev,struct sg_table * sgt,enum dma_data_direction dir,unsigned long attrs)234 static inline int dma_map_sgtable(struct device *dev, struct sg_table *sgt,
235 		enum dma_data_direction dir, unsigned long attrs)
236 {
237 	return -EOPNOTSUPP;
238 }
dma_map_resource(struct device * dev,phys_addr_t phys_addr,size_t size,enum dma_data_direction dir,unsigned long attrs)239 static inline dma_addr_t dma_map_resource(struct device *dev,
240 		phys_addr_t phys_addr, size_t size, enum dma_data_direction dir,
241 		unsigned long attrs)
242 {
243 	return DMA_MAPPING_ERROR;
244 }
dma_unmap_resource(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir,unsigned long attrs)245 static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr,
246 		size_t size, enum dma_data_direction dir, unsigned long attrs)
247 {
248 }
dma_mapping_error(struct device * dev,dma_addr_t dma_addr)249 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
250 {
251 	return -ENOMEM;
252 }
dma_alloc_attrs(struct device * dev,size_t size,dma_addr_t * dma_handle,gfp_t flag,unsigned long attrs)253 static inline void *dma_alloc_attrs(struct device *dev, size_t size,
254 		dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs)
255 {
256 	return NULL;
257 }
dma_free_attrs(struct device * dev,size_t size,void * cpu_addr,dma_addr_t dma_handle,unsigned long attrs)258 static inline void dma_free_attrs(struct device *dev, size_t size,
259 		void *cpu_addr, dma_addr_t dma_handle, unsigned long attrs)
260 {
261 }
dmam_alloc_attrs(struct device * dev,size_t size,dma_addr_t * dma_handle,gfp_t gfp,unsigned long attrs)262 static inline void *dmam_alloc_attrs(struct device *dev, size_t size,
263 		dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
264 {
265 	return NULL;
266 }
dmam_free_coherent(struct device * dev,size_t size,void * vaddr,dma_addr_t dma_handle)267 static inline void dmam_free_coherent(struct device *dev, size_t size,
268 		void *vaddr, dma_addr_t dma_handle)
269 {
270 }
dma_get_sgtable_attrs(struct device * dev,struct sg_table * sgt,void * cpu_addr,dma_addr_t dma_addr,size_t size,unsigned long attrs)271 static inline int dma_get_sgtable_attrs(struct device *dev,
272 		struct sg_table *sgt, void *cpu_addr, dma_addr_t dma_addr,
273 		size_t size, unsigned long attrs)
274 {
275 	return -ENXIO;
276 }
dma_mmap_attrs(struct device * dev,struct vm_area_struct * vma,void * cpu_addr,dma_addr_t dma_addr,size_t size,unsigned long attrs)277 static inline int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
278 		void *cpu_addr, dma_addr_t dma_addr, size_t size,
279 		unsigned long attrs)
280 {
281 	return -ENXIO;
282 }
dma_can_mmap(struct device * dev)283 static inline bool dma_can_mmap(struct device *dev)
284 {
285 	return false;
286 }
dma_pci_p2pdma_supported(struct device * dev)287 static inline bool dma_pci_p2pdma_supported(struct device *dev)
288 {
289 	return false;
290 }
dma_set_mask(struct device * dev,u64 mask)291 static inline int dma_set_mask(struct device *dev, u64 mask)
292 {
293 	return -EIO;
294 }
dma_set_coherent_mask(struct device * dev,u64 mask)295 static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
296 {
297 	return -EIO;
298 }
dma_get_required_mask(struct device * dev)299 static inline u64 dma_get_required_mask(struct device *dev)
300 {
301 	return 0;
302 }
dma_addressing_limited(struct device * dev)303 static inline bool dma_addressing_limited(struct device *dev)
304 {
305 	return false;
306 }
dma_max_mapping_size(struct device * dev)307 static inline size_t dma_max_mapping_size(struct device *dev)
308 {
309 	return 0;
310 }
dma_opt_mapping_size(struct device * dev)311 static inline size_t dma_opt_mapping_size(struct device *dev)
312 {
313 	return 0;
314 }
dma_get_merge_boundary(struct device * dev)315 static inline unsigned long dma_get_merge_boundary(struct device *dev)
316 {
317 	return 0;
318 }
dma_alloc_noncontiguous(struct device * dev,size_t size,enum dma_data_direction dir,gfp_t gfp,unsigned long attrs)319 static inline struct sg_table *dma_alloc_noncontiguous(struct device *dev,
320 		size_t size, enum dma_data_direction dir, gfp_t gfp,
321 		unsigned long attrs)
322 {
323 	return NULL;
324 }
dma_free_noncontiguous(struct device * dev,size_t size,struct sg_table * sgt,enum dma_data_direction dir)325 static inline void dma_free_noncontiguous(struct device *dev, size_t size,
326 		struct sg_table *sgt, enum dma_data_direction dir)
327 {
328 }
dma_vmap_noncontiguous(struct device * dev,size_t size,struct sg_table * sgt)329 static inline void *dma_vmap_noncontiguous(struct device *dev, size_t size,
330 		struct sg_table *sgt)
331 {
332 	return NULL;
333 }
dma_vunmap_noncontiguous(struct device * dev,void * vaddr)334 static inline void dma_vunmap_noncontiguous(struct device *dev, void *vaddr)
335 {
336 }
dma_mmap_noncontiguous(struct device * dev,struct vm_area_struct * vma,size_t size,struct sg_table * sgt)337 static inline int dma_mmap_noncontiguous(struct device *dev,
338 		struct vm_area_struct *vma, size_t size, struct sg_table *sgt)
339 {
340 	return -EINVAL;
341 }
342 #endif /* CONFIG_HAS_DMA */
343 
344 #ifdef CONFIG_IOMMU_DMA
345 /**
346  * dma_use_iova - check if the IOVA API is used for this state
347  * @state: IOVA state
348  *
349  * Return %true if the DMA transfers uses the dma_iova_*() calls or %false if
350  * they can't be used.
351  */
dma_use_iova(struct dma_iova_state * state)352 static inline bool dma_use_iova(struct dma_iova_state *state)
353 {
354 	return state->__size != 0;
355 }
356 
357 bool dma_iova_try_alloc(struct device *dev, struct dma_iova_state *state,
358 		phys_addr_t phys, size_t size);
359 void dma_iova_free(struct device *dev, struct dma_iova_state *state);
360 void dma_iova_destroy(struct device *dev, struct dma_iova_state *state,
361 		size_t mapped_len, enum dma_data_direction dir,
362 		unsigned long attrs);
363 int dma_iova_sync(struct device *dev, struct dma_iova_state *state,
364 		size_t offset, size_t size);
365 int dma_iova_link(struct device *dev, struct dma_iova_state *state,
366 		phys_addr_t phys, size_t offset, size_t size,
367 		enum dma_data_direction dir, unsigned long attrs);
368 void dma_iova_unlink(struct device *dev, struct dma_iova_state *state,
369 		size_t offset, size_t size, enum dma_data_direction dir,
370 		unsigned long attrs);
371 #else /* CONFIG_IOMMU_DMA */
dma_use_iova(struct dma_iova_state * state)372 static inline bool dma_use_iova(struct dma_iova_state *state)
373 {
374 	return false;
375 }
dma_iova_try_alloc(struct device * dev,struct dma_iova_state * state,phys_addr_t phys,size_t size)376 static inline bool dma_iova_try_alloc(struct device *dev,
377 		struct dma_iova_state *state, phys_addr_t phys, size_t size)
378 {
379 	return false;
380 }
dma_iova_free(struct device * dev,struct dma_iova_state * state)381 static inline void dma_iova_free(struct device *dev,
382 		struct dma_iova_state *state)
383 {
384 }
dma_iova_destroy(struct device * dev,struct dma_iova_state * state,size_t mapped_len,enum dma_data_direction dir,unsigned long attrs)385 static inline void dma_iova_destroy(struct device *dev,
386 		struct dma_iova_state *state, size_t mapped_len,
387 		enum dma_data_direction dir, unsigned long attrs)
388 {
389 }
dma_iova_sync(struct device * dev,struct dma_iova_state * state,size_t offset,size_t size)390 static inline int dma_iova_sync(struct device *dev,
391 		struct dma_iova_state *state, size_t offset, size_t size)
392 {
393 	return -EOPNOTSUPP;
394 }
dma_iova_link(struct device * dev,struct dma_iova_state * state,phys_addr_t phys,size_t offset,size_t size,enum dma_data_direction dir,unsigned long attrs)395 static inline int dma_iova_link(struct device *dev,
396 		struct dma_iova_state *state, phys_addr_t phys, size_t offset,
397 		size_t size, enum dma_data_direction dir, unsigned long attrs)
398 {
399 	return -EOPNOTSUPP;
400 }
dma_iova_unlink(struct device * dev,struct dma_iova_state * state,size_t offset,size_t size,enum dma_data_direction dir,unsigned long attrs)401 static inline void dma_iova_unlink(struct device *dev,
402 		struct dma_iova_state *state, size_t offset, size_t size,
403 		enum dma_data_direction dir, unsigned long attrs)
404 {
405 }
406 #endif /* CONFIG_IOMMU_DMA */
407 
408 #if defined(CONFIG_HAS_DMA) && defined(CONFIG_DMA_NEED_SYNC)
409 void __dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
410 		enum dma_data_direction dir);
411 void __dma_sync_single_for_device(struct device *dev, dma_addr_t addr,
412 		size_t size, enum dma_data_direction dir);
413 void __dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
414 		int nelems, enum dma_data_direction dir);
415 void __dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
416 		int nelems, enum dma_data_direction dir);
417 bool __dma_need_sync(struct device *dev, dma_addr_t dma_addr);
418 
dma_dev_need_sync(const struct device * dev)419 static inline bool dma_dev_need_sync(const struct device *dev)
420 {
421 	/* Always call DMA sync operations when debugging is enabled */
422 	return !dev->dma_skip_sync || IS_ENABLED(CONFIG_DMA_API_DEBUG);
423 }
424 
dma_sync_single_for_cpu(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir)425 static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
426 		size_t size, enum dma_data_direction dir)
427 {
428 	if (dma_dev_need_sync(dev))
429 		__dma_sync_single_for_cpu(dev, addr, size, dir);
430 }
431 
dma_sync_single_for_device(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir)432 static inline void dma_sync_single_for_device(struct device *dev,
433 		dma_addr_t addr, size_t size, enum dma_data_direction dir)
434 {
435 	if (dma_dev_need_sync(dev))
436 		__dma_sync_single_for_device(dev, addr, size, dir);
437 }
438 
dma_sync_sg_for_cpu(struct device * dev,struct scatterlist * sg,int nelems,enum dma_data_direction dir)439 static inline void dma_sync_sg_for_cpu(struct device *dev,
440 		struct scatterlist *sg, int nelems, enum dma_data_direction dir)
441 {
442 	if (dma_dev_need_sync(dev))
443 		__dma_sync_sg_for_cpu(dev, sg, nelems, dir);
444 }
445 
dma_sync_sg_for_device(struct device * dev,struct scatterlist * sg,int nelems,enum dma_data_direction dir)446 static inline void dma_sync_sg_for_device(struct device *dev,
447 		struct scatterlist *sg, int nelems, enum dma_data_direction dir)
448 {
449 	if (dma_dev_need_sync(dev))
450 		__dma_sync_sg_for_device(dev, sg, nelems, dir);
451 }
452 
dma_need_sync(struct device * dev,dma_addr_t dma_addr)453 static inline bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)
454 {
455 	return dma_dev_need_sync(dev) ? __dma_need_sync(dev, dma_addr) : false;
456 }
457 bool dma_need_unmap(struct device *dev);
458 #else /* !CONFIG_HAS_DMA || !CONFIG_DMA_NEED_SYNC */
dma_dev_need_sync(const struct device * dev)459 static inline bool dma_dev_need_sync(const struct device *dev)
460 {
461 	return false;
462 }
dma_sync_single_for_cpu(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir)463 static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
464 		size_t size, enum dma_data_direction dir)
465 {
466 }
dma_sync_single_for_device(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir)467 static inline void dma_sync_single_for_device(struct device *dev,
468 		dma_addr_t addr, size_t size, enum dma_data_direction dir)
469 {
470 }
dma_sync_sg_for_cpu(struct device * dev,struct scatterlist * sg,int nelems,enum dma_data_direction dir)471 static inline void dma_sync_sg_for_cpu(struct device *dev,
472 		struct scatterlist *sg, int nelems, enum dma_data_direction dir)
473 {
474 }
dma_sync_sg_for_device(struct device * dev,struct scatterlist * sg,int nelems,enum dma_data_direction dir)475 static inline void dma_sync_sg_for_device(struct device *dev,
476 		struct scatterlist *sg, int nelems, enum dma_data_direction dir)
477 {
478 }
dma_need_sync(struct device * dev,dma_addr_t dma_addr)479 static inline bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)
480 {
481 	return false;
482 }
dma_need_unmap(struct device * dev)483 static inline bool dma_need_unmap(struct device *dev)
484 {
485 	return false;
486 }
487 #endif /* !CONFIG_HAS_DMA || !CONFIG_DMA_NEED_SYNC */
488 
489 struct page *dma_alloc_pages(struct device *dev, size_t size,
490 		dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp);
491 void dma_free_pages(struct device *dev, size_t size, struct page *page,
492 		dma_addr_t dma_handle, enum dma_data_direction dir);
493 int dma_mmap_pages(struct device *dev, struct vm_area_struct *vma,
494 		size_t size, struct page *page);
495 
dma_alloc_noncoherent(struct device * dev,size_t size,dma_addr_t * dma_handle,enum dma_data_direction dir,gfp_t gfp)496 static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
497 		dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)
498 {
499 	struct page *page = dma_alloc_pages(dev, size, dma_handle, dir, gfp);
500 	return page ? page_address(page) : NULL;
501 }
502 
dma_free_noncoherent(struct device * dev,size_t size,void * vaddr,dma_addr_t dma_handle,enum dma_data_direction dir)503 static inline void dma_free_noncoherent(struct device *dev, size_t size,
504 		void *vaddr, dma_addr_t dma_handle, enum dma_data_direction dir)
505 {
506 	dma_free_pages(dev, size, virt_to_page(vaddr), dma_handle, dir);
507 }
508 
dma_map_single_attrs(struct device * dev,void * ptr,size_t size,enum dma_data_direction dir,unsigned long attrs)509 static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
510 		size_t size, enum dma_data_direction dir, unsigned long attrs)
511 {
512 	/* DMA must never operate on areas that might be remapped. */
513 	if (dev_WARN_ONCE(dev, is_vmalloc_addr(ptr),
514 			  "rejecting DMA map of vmalloc memory\n"))
515 		return DMA_MAPPING_ERROR;
516 	debug_dma_map_single(dev, ptr, size);
517 	return dma_map_page_attrs(dev, virt_to_page(ptr), offset_in_page(ptr),
518 			size, dir, attrs);
519 }
520 
dma_unmap_single_attrs(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir,unsigned long attrs)521 static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
522 		size_t size, enum dma_data_direction dir, unsigned long attrs)
523 {
524 	return dma_unmap_page_attrs(dev, addr, size, dir, attrs);
525 }
526 
dma_sync_single_range_for_cpu(struct device * dev,dma_addr_t addr,unsigned long offset,size_t size,enum dma_data_direction dir)527 static inline void dma_sync_single_range_for_cpu(struct device *dev,
528 		dma_addr_t addr, unsigned long offset, size_t size,
529 		enum dma_data_direction dir)
530 {
531 	return dma_sync_single_for_cpu(dev, addr + offset, size, dir);
532 }
533 
dma_sync_single_range_for_device(struct device * dev,dma_addr_t addr,unsigned long offset,size_t size,enum dma_data_direction dir)534 static inline void dma_sync_single_range_for_device(struct device *dev,
535 		dma_addr_t addr, unsigned long offset, size_t size,
536 		enum dma_data_direction dir)
537 {
538 	return dma_sync_single_for_device(dev, addr + offset, size, dir);
539 }
540 
541 /**
542  * dma_unmap_sgtable - Unmap the given buffer for DMA
543  * @dev:	The device for which to perform the DMA operation
544  * @sgt:	The sg_table object describing the buffer
545  * @dir:	DMA direction
546  * @attrs:	Optional DMA attributes for the unmap operation
547  *
548  * Unmaps a buffer described by a scatterlist stored in the given sg_table
549  * object for the @dir DMA operation by the @dev device. After this function
550  * the ownership of the buffer is transferred back to the CPU domain.
551  */
dma_unmap_sgtable(struct device * dev,struct sg_table * sgt,enum dma_data_direction dir,unsigned long attrs)552 static inline void dma_unmap_sgtable(struct device *dev, struct sg_table *sgt,
553 		enum dma_data_direction dir, unsigned long attrs)
554 {
555 	dma_unmap_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs);
556 }
557 
558 /**
559  * dma_sync_sgtable_for_cpu - Synchronize the given buffer for CPU access
560  * @dev:	The device for which to perform the DMA operation
561  * @sgt:	The sg_table object describing the buffer
562  * @dir:	DMA direction
563  *
564  * Performs the needed cache synchronization and moves the ownership of the
565  * buffer back to the CPU domain, so it is safe to perform any access to it
566  * by the CPU. Before doing any further DMA operations, one has to transfer
567  * the ownership of the buffer back to the DMA domain by calling the
568  * dma_sync_sgtable_for_device().
569  */
dma_sync_sgtable_for_cpu(struct device * dev,struct sg_table * sgt,enum dma_data_direction dir)570 static inline void dma_sync_sgtable_for_cpu(struct device *dev,
571 		struct sg_table *sgt, enum dma_data_direction dir)
572 {
573 	dma_sync_sg_for_cpu(dev, sgt->sgl, sgt->orig_nents, dir);
574 }
575 
576 /**
577  * dma_sync_sgtable_for_device - Synchronize the given buffer for DMA
578  * @dev:	The device for which to perform the DMA operation
579  * @sgt:	The sg_table object describing the buffer
580  * @dir:	DMA direction
581  *
582  * Performs the needed cache synchronization and moves the ownership of the
583  * buffer back to the DMA domain, so it is safe to perform the DMA operation.
584  * Once finished, one has to call dma_sync_sgtable_for_cpu() or
585  * dma_unmap_sgtable().
586  */
dma_sync_sgtable_for_device(struct device * dev,struct sg_table * sgt,enum dma_data_direction dir)587 static inline void dma_sync_sgtable_for_device(struct device *dev,
588 		struct sg_table *sgt, enum dma_data_direction dir)
589 {
590 	dma_sync_sg_for_device(dev, sgt->sgl, sgt->orig_nents, dir);
591 }
592 
593 #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0)
594 #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0)
595 #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0)
596 #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0)
597 #define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0)
598 #define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0)
599 #define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0)
600 #define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0)
601 
602 bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size);
603 
dma_alloc_coherent(struct device * dev,size_t size,dma_addr_t * dma_handle,gfp_t gfp)604 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
605 		dma_addr_t *dma_handle, gfp_t gfp)
606 {
607 	return dma_alloc_attrs(dev, size, dma_handle, gfp,
608 			(gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0);
609 }
610 
dma_free_coherent(struct device * dev,size_t size,void * cpu_addr,dma_addr_t dma_handle)611 static inline void dma_free_coherent(struct device *dev, size_t size,
612 		void *cpu_addr, dma_addr_t dma_handle)
613 {
614 	return dma_free_attrs(dev, size, cpu_addr, dma_handle, 0);
615 }
616 
617 
dma_get_mask(struct device * dev)618 static inline u64 dma_get_mask(struct device *dev)
619 {
620 	if (dev->dma_mask && *dev->dma_mask)
621 		return *dev->dma_mask;
622 	return DMA_BIT_MASK(32);
623 }
624 
625 /*
626  * Set both the DMA mask and the coherent DMA mask to the same thing.
627  * Note that we don't check the return value from dma_set_coherent_mask()
628  * as the DMA API guarantees that the coherent DMA mask can be set to
629  * the same or smaller than the streaming DMA mask.
630  */
dma_set_mask_and_coherent(struct device * dev,u64 mask)631 static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask)
632 {
633 	int rc = dma_set_mask(dev, mask);
634 	if (rc == 0)
635 		dma_set_coherent_mask(dev, mask);
636 	return rc;
637 }
638 
639 /*
640  * Similar to the above, except it deals with the case where the device
641  * does not have dev->dma_mask appropriately setup.
642  */
dma_coerce_mask_and_coherent(struct device * dev,u64 mask)643 static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
644 {
645 	dev->dma_mask = &dev->coherent_dma_mask;
646 	return dma_set_mask_and_coherent(dev, mask);
647 }
648 
dma_get_max_seg_size(struct device * dev)649 static inline unsigned int dma_get_max_seg_size(struct device *dev)
650 {
651 	if (dev->dma_parms && dev->dma_parms->max_segment_size)
652 		return dev->dma_parms->max_segment_size;
653 	return SZ_64K;
654 }
655 
dma_set_max_seg_size(struct device * dev,unsigned int size)656 static inline void dma_set_max_seg_size(struct device *dev, unsigned int size)
657 {
658 	if (WARN_ON_ONCE(!dev->dma_parms))
659 		return;
660 	dev->dma_parms->max_segment_size = size;
661 }
662 
dma_get_seg_boundary(struct device * dev)663 static inline unsigned long dma_get_seg_boundary(struct device *dev)
664 {
665 	if (dev->dma_parms && dev->dma_parms->segment_boundary_mask)
666 		return dev->dma_parms->segment_boundary_mask;
667 	return ULONG_MAX;
668 }
669 
670 /**
671  * dma_get_seg_boundary_nr_pages - return the segment boundary in "page" units
672  * @dev: device to guery the boundary for
673  * @page_shift: ilog() of the IOMMU page size
674  *
675  * Return the segment boundary in IOMMU page units (which may be different from
676  * the CPU page size) for the passed in device.
677  *
678  * If @dev is NULL a boundary of U32_MAX is assumed, this case is just for
679  * non-DMA API callers.
680  */
dma_get_seg_boundary_nr_pages(struct device * dev,unsigned int page_shift)681 static inline unsigned long dma_get_seg_boundary_nr_pages(struct device *dev,
682 		unsigned int page_shift)
683 {
684 	if (!dev)
685 		return (U32_MAX >> page_shift) + 1;
686 	return (dma_get_seg_boundary(dev) >> page_shift) + 1;
687 }
688 
dma_set_seg_boundary(struct device * dev,unsigned long mask)689 static inline void dma_set_seg_boundary(struct device *dev, unsigned long mask)
690 {
691 	if (WARN_ON_ONCE(!dev->dma_parms))
692 		return;
693 	dev->dma_parms->segment_boundary_mask = mask;
694 }
695 
dma_get_min_align_mask(struct device * dev)696 static inline unsigned int dma_get_min_align_mask(struct device *dev)
697 {
698 	if (dev->dma_parms)
699 		return dev->dma_parms->min_align_mask;
700 	return 0;
701 }
702 
dma_set_min_align_mask(struct device * dev,unsigned int min_align_mask)703 static inline void dma_set_min_align_mask(struct device *dev,
704 		unsigned int min_align_mask)
705 {
706 	if (WARN_ON_ONCE(!dev->dma_parms))
707 		return;
708 	dev->dma_parms->min_align_mask = min_align_mask;
709 }
710 
711 #ifndef dma_get_cache_alignment
dma_get_cache_alignment(void)712 static inline int dma_get_cache_alignment(void)
713 {
714 #ifdef ARCH_HAS_DMA_MINALIGN
715 	return ARCH_DMA_MINALIGN;
716 #endif
717 	return 1;
718 }
719 #endif
720 
721 #ifdef ARCH_HAS_DMA_MINALIGN
722 #define ____dma_from_device_aligned __aligned(ARCH_DMA_MINALIGN)
723 #else
724 #define ____dma_from_device_aligned
725 #endif
726 /* Mark start of DMA buffer */
727 #define __dma_from_device_group_begin(GROUP)			\
728 	__cacheline_group_begin(GROUP) ____dma_from_device_aligned
729 /* Mark end of DMA buffer */
730 #define __dma_from_device_group_end(GROUP)			\
731 	__cacheline_group_end(GROUP) ____dma_from_device_aligned
732 
dmam_alloc_coherent(struct device * dev,size_t size,dma_addr_t * dma_handle,gfp_t gfp)733 static inline void *dmam_alloc_coherent(struct device *dev, size_t size,
734 		dma_addr_t *dma_handle, gfp_t gfp)
735 {
736 	return dmam_alloc_attrs(dev, size, dma_handle, gfp,
737 			(gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0);
738 }
739 
dma_alloc_wc(struct device * dev,size_t size,dma_addr_t * dma_addr,gfp_t gfp)740 static inline void *dma_alloc_wc(struct device *dev, size_t size,
741 				 dma_addr_t *dma_addr, gfp_t gfp)
742 {
743 	unsigned long attrs = DMA_ATTR_WRITE_COMBINE;
744 
745 	if (gfp & __GFP_NOWARN)
746 		attrs |= DMA_ATTR_NO_WARN;
747 
748 	return dma_alloc_attrs(dev, size, dma_addr, gfp, attrs);
749 }
750 
dma_free_wc(struct device * dev,size_t size,void * cpu_addr,dma_addr_t dma_addr)751 static inline void dma_free_wc(struct device *dev, size_t size,
752 			       void *cpu_addr, dma_addr_t dma_addr)
753 {
754 	return dma_free_attrs(dev, size, cpu_addr, dma_addr,
755 			      DMA_ATTR_WRITE_COMBINE);
756 }
757 
dma_mmap_wc(struct device * dev,struct vm_area_struct * vma,void * cpu_addr,dma_addr_t dma_addr,size_t size)758 static inline int dma_mmap_wc(struct device *dev,
759 			      struct vm_area_struct *vma,
760 			      void *cpu_addr, dma_addr_t dma_addr,
761 			      size_t size)
762 {
763 	return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size,
764 			      DMA_ATTR_WRITE_COMBINE);
765 }
766 
767 #ifdef CONFIG_NEED_DMA_MAP_STATE
768 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)        dma_addr_t ADDR_NAME
769 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME)          __u32 LEN_NAME
770 #define dma_unmap_addr(PTR, ADDR_NAME)           ((PTR)->ADDR_NAME)
771 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL)  (((PTR)->ADDR_NAME) = (VAL))
772 #define dma_unmap_len(PTR, LEN_NAME)             ((PTR)->LEN_NAME)
773 #define dma_unmap_len_set(PTR, LEN_NAME, VAL)    (((PTR)->LEN_NAME) = (VAL))
774 #else
775 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)
776 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME)
777 #define dma_unmap_addr(PTR, ADDR_NAME)           \
778 	({ typeof(PTR) __p __maybe_unused = PTR; 0; })
779 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL)  \
780 	do { typeof(PTR) __p __maybe_unused = PTR; } while (0)
781 #define dma_unmap_len(PTR, LEN_NAME)             \
782 	({ typeof(PTR) __p __maybe_unused = PTR; 0; })
783 #define dma_unmap_len_set(PTR, LEN_NAME, VAL)    \
784 	do { typeof(PTR) __p __maybe_unused = PTR; } while (0)
785 #endif
786 
787 #endif /* _LINUX_DMA_MAPPING_H */
788