1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_DMA_MAPPING_H
3 #define _LINUX_DMA_MAPPING_H
4
5 #include <linux/device.h>
6 #include <linux/err.h>
7 #include <linux/dma-direction.h>
8 #include <linux/scatterlist.h>
9 #include <linux/bug.h>
10 #include <linux/cache.h>
11
12 /*
13 * List of possible attributes associated with a DMA mapping. The semantics
14 * of each attribute should be defined in Documentation/core-api/dma-attributes.rst.
15 */
16
17 /*
18 * DMA_ATTR_WEAK_ORDERING: Specifies that reads and writes to the mapping
19 * may be weakly ordered, that is that reads and writes may pass each other.
20 */
21 #define DMA_ATTR_WEAK_ORDERING (1UL << 1)
22 /*
23 * DMA_ATTR_WRITE_COMBINE: Specifies that writes to the mapping may be
24 * buffered to improve performance.
25 */
26 #define DMA_ATTR_WRITE_COMBINE (1UL << 2)
27 /*
28 * DMA_ATTR_NO_KERNEL_MAPPING: Lets the platform to avoid creating a kernel
29 * virtual mapping for the allocated buffer.
30 */
31 #define DMA_ATTR_NO_KERNEL_MAPPING (1UL << 4)
32 /*
33 * DMA_ATTR_SKIP_CPU_SYNC: Allows platform code to skip synchronization of
34 * the CPU cache for the given buffer assuming that it has been already
35 * transferred to 'device' domain.
36 */
37 #define DMA_ATTR_SKIP_CPU_SYNC (1UL << 5)
38 /*
39 * DMA_ATTR_FORCE_CONTIGUOUS: Forces contiguous allocation of the buffer
40 * in physical memory.
41 */
42 #define DMA_ATTR_FORCE_CONTIGUOUS (1UL << 6)
43 /*
44 * DMA_ATTR_ALLOC_SINGLE_PAGES: This is a hint to the DMA-mapping subsystem
45 * that it's probably not worth the time to try to allocate memory to in a way
46 * that gives better TLB efficiency.
47 */
48 #define DMA_ATTR_ALLOC_SINGLE_PAGES (1UL << 7)
49 /*
50 * DMA_ATTR_NO_WARN: This tells the DMA-mapping subsystem to suppress
51 * allocation failure reports (similarly to __GFP_NOWARN).
52 */
53 #define DMA_ATTR_NO_WARN (1UL << 8)
54
55 /*
56 * DMA_ATTR_PRIVILEGED: used to indicate that the buffer is fully
57 * accessible at an elevated privilege level (and ideally inaccessible or
58 * at least read-only at lesser-privileged levels).
59 */
60 #define DMA_ATTR_PRIVILEGED (1UL << 9)
61
62 /*
63 * DMA_ATTR_MMIO - Indicates memory-mapped I/O (MMIO) region for DMA mapping
64 *
65 * This attribute indicates the physical address is not normal system
66 * memory. It may not be used with kmap*()/phys_to_virt()/phys_to_page()
67 * functions, it may not be cacheable, and access using CPU load/store
68 * instructions may not be allowed.
69 *
70 * Usually this will be used to describe MMIO addresses, or other non-cacheable
71 * register addresses. When DMA mapping this sort of address we call
72 * the operation Peer to Peer as a one device is DMA'ing to another device.
73 * For PCI devices the p2pdma APIs must be used to determine if DMA_ATTR_MMIO
74 * is appropriate.
75 *
76 * For architectures that require cache flushing for DMA coherence
77 * DMA_ATTR_MMIO will not perform any cache flushing. The address
78 * provided must never be mapped cacheable into the CPU.
79 */
80 #define DMA_ATTR_MMIO (1UL << 10)
81
82 /*
83 * DMA_ATTR_DEBUGGING_IGNORE_CACHELINES: Indicates the CPU cache line can be
84 * overlapped. All mappings sharing a cacheline must have this attribute for
85 * this to be considered safe.
86 */
87 #define DMA_ATTR_DEBUGGING_IGNORE_CACHELINES (1UL << 11)
88
89 /*
90 * DMA_ATTR_REQUIRE_COHERENT: Indicates that DMA coherency is required.
91 * All mappings that carry this attribute can't work with SWIOTLB and cache
92 * flushing.
93 */
94 #define DMA_ATTR_REQUIRE_COHERENT (1UL << 12)
95 /*
96 * DMA_ATTR_CC_SHARED: Indicates the DMA mapping is shared (decrypted) for
97 * confidential computing guests. For normal system memory the caller must have
98 * called set_memory_decrypted(), and pgprot_decrypted must be used when
99 * creating CPU PTEs for the mapping. The same shared semantic may be passed
100 * to the vIOMMU when it sets up the IOPTE. For MMIO use together with
101 * DMA_ATTR_MMIO to indicate shared MMIO. Unless DMA_ATTR_MMIO is provided
102 * a struct page is required.
103 */
104 #define DMA_ATTR_CC_SHARED (1UL << 13)
105
106 /*
107 * A dma_addr_t can hold any valid DMA or bus address for the platform. It can
108 * be given to a device to use as a DMA source or target. It is specific to a
109 * given device and there may be a translation between the CPU physical address
110 * space and the bus address space.
111 *
112 * DMA_MAPPING_ERROR is the magic error code if a mapping failed. It should not
113 * be used directly in drivers, but checked for using dma_mapping_error()
114 * instead.
115 */
116 #define DMA_MAPPING_ERROR (~(dma_addr_t)0)
117
118 #define DMA_BIT_MASK(n) GENMASK_ULL((n) - 1, 0)
119
120 struct dma_iova_state {
121 dma_addr_t addr;
122 u64 __size;
123 };
124
125 /*
126 * Use the high bit to mark if we used swiotlb for one or more ranges.
127 */
128 #define DMA_IOVA_USE_SWIOTLB (1ULL << 63)
129
dma_iova_size(struct dma_iova_state * state)130 static inline size_t dma_iova_size(struct dma_iova_state *state)
131 {
132 /* Casting is needed for 32-bits systems */
133 return (size_t)(state->__size & ~DMA_IOVA_USE_SWIOTLB);
134 }
135
136 #ifdef CONFIG_DMA_API_DEBUG
137 void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
138 void debug_dma_map_single(struct device *dev, const void *addr,
139 unsigned long len);
140 #else
debug_dma_mapping_error(struct device * dev,dma_addr_t dma_addr)141 static inline void debug_dma_mapping_error(struct device *dev,
142 dma_addr_t dma_addr)
143 {
144 }
debug_dma_map_single(struct device * dev,const void * addr,unsigned long len)145 static inline void debug_dma_map_single(struct device *dev, const void *addr,
146 unsigned long len)
147 {
148 }
149 #endif /* CONFIG_DMA_API_DEBUG */
150
151 #ifdef CONFIG_HAS_DMA
dma_mapping_error(struct device * dev,dma_addr_t dma_addr)152 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
153 {
154 debug_dma_mapping_error(dev, dma_addr);
155
156 if (unlikely(dma_addr == DMA_MAPPING_ERROR))
157 return -ENOMEM;
158 return 0;
159 }
160
161 dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page,
162 size_t offset, size_t size, enum dma_data_direction dir,
163 unsigned long attrs);
164 void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size,
165 enum dma_data_direction dir, unsigned long attrs);
166 dma_addr_t dma_map_phys(struct device *dev, phys_addr_t phys, size_t size,
167 enum dma_data_direction dir, unsigned long attrs);
168 void dma_unmap_phys(struct device *dev, dma_addr_t addr, size_t size,
169 enum dma_data_direction dir, unsigned long attrs);
170 unsigned int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
171 int nents, enum dma_data_direction dir, unsigned long attrs);
172 void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
173 int nents, enum dma_data_direction dir,
174 unsigned long attrs);
175 int dma_map_sgtable(struct device *dev, struct sg_table *sgt,
176 enum dma_data_direction dir, unsigned long attrs);
177 dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr,
178 size_t size, enum dma_data_direction dir, unsigned long attrs);
179 void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size,
180 enum dma_data_direction dir, unsigned long attrs);
181 void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
182 gfp_t flag, unsigned long attrs);
183 void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
184 dma_addr_t dma_handle, unsigned long attrs);
185 void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
186 gfp_t gfp, unsigned long attrs);
187 void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
188 dma_addr_t dma_handle);
189 int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
190 void *cpu_addr, dma_addr_t dma_addr, size_t size,
191 unsigned long attrs);
192 int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
193 void *cpu_addr, dma_addr_t dma_addr, size_t size,
194 unsigned long attrs);
195 bool dma_can_mmap(struct device *dev);
196 bool dma_pci_p2pdma_supported(struct device *dev);
197 int dma_set_mask(struct device *dev, u64 mask);
198 int dma_set_coherent_mask(struct device *dev, u64 mask);
199 u64 dma_get_required_mask(struct device *dev);
200 bool dma_addressing_limited(struct device *dev);
201 size_t dma_max_mapping_size(struct device *dev);
202 size_t dma_opt_mapping_size(struct device *dev);
203 unsigned long dma_get_merge_boundary(struct device *dev);
204 struct sg_table *dma_alloc_noncontiguous(struct device *dev, size_t size,
205 enum dma_data_direction dir, gfp_t gfp, unsigned long attrs);
206 void dma_free_noncontiguous(struct device *dev, size_t size,
207 struct sg_table *sgt, enum dma_data_direction dir);
208 void *dma_vmap_noncontiguous(struct device *dev, size_t size,
209 struct sg_table *sgt);
210 void dma_vunmap_noncontiguous(struct device *dev, void *vaddr);
211 int dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma,
212 size_t size, struct sg_table *sgt);
213 #else /* CONFIG_HAS_DMA */
dma_map_page_attrs(struct device * dev,struct page * page,size_t offset,size_t size,enum dma_data_direction dir,unsigned long attrs)214 static inline dma_addr_t dma_map_page_attrs(struct device *dev,
215 struct page *page, size_t offset, size_t size,
216 enum dma_data_direction dir, unsigned long attrs)
217 {
218 return DMA_MAPPING_ERROR;
219 }
dma_unmap_page_attrs(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir,unsigned long attrs)220 static inline void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr,
221 size_t size, enum dma_data_direction dir, unsigned long attrs)
222 {
223 }
dma_map_phys(struct device * dev,phys_addr_t phys,size_t size,enum dma_data_direction dir,unsigned long attrs)224 static inline dma_addr_t dma_map_phys(struct device *dev, phys_addr_t phys,
225 size_t size, enum dma_data_direction dir, unsigned long attrs)
226 {
227 return DMA_MAPPING_ERROR;
228 }
dma_unmap_phys(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir,unsigned long attrs)229 static inline void dma_unmap_phys(struct device *dev, dma_addr_t addr,
230 size_t size, enum dma_data_direction dir, unsigned long attrs)
231 {
232 }
dma_map_sg_attrs(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir,unsigned long attrs)233 static inline unsigned int dma_map_sg_attrs(struct device *dev,
234 struct scatterlist *sg, int nents, enum dma_data_direction dir,
235 unsigned long attrs)
236 {
237 return 0;
238 }
dma_unmap_sg_attrs(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir,unsigned long attrs)239 static inline void dma_unmap_sg_attrs(struct device *dev,
240 struct scatterlist *sg, int nents, enum dma_data_direction dir,
241 unsigned long attrs)
242 {
243 }
dma_map_sgtable(struct device * dev,struct sg_table * sgt,enum dma_data_direction dir,unsigned long attrs)244 static inline int dma_map_sgtable(struct device *dev, struct sg_table *sgt,
245 enum dma_data_direction dir, unsigned long attrs)
246 {
247 return -EOPNOTSUPP;
248 }
dma_map_resource(struct device * dev,phys_addr_t phys_addr,size_t size,enum dma_data_direction dir,unsigned long attrs)249 static inline dma_addr_t dma_map_resource(struct device *dev,
250 phys_addr_t phys_addr, size_t size, enum dma_data_direction dir,
251 unsigned long attrs)
252 {
253 return DMA_MAPPING_ERROR;
254 }
dma_unmap_resource(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir,unsigned long attrs)255 static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr,
256 size_t size, enum dma_data_direction dir, unsigned long attrs)
257 {
258 }
dma_mapping_error(struct device * dev,dma_addr_t dma_addr)259 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
260 {
261 return -ENOMEM;
262 }
dma_alloc_attrs(struct device * dev,size_t size,dma_addr_t * dma_handle,gfp_t flag,unsigned long attrs)263 static inline void *dma_alloc_attrs(struct device *dev, size_t size,
264 dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs)
265 {
266 return NULL;
267 }
dma_free_attrs(struct device * dev,size_t size,void * cpu_addr,dma_addr_t dma_handle,unsigned long attrs)268 static inline void dma_free_attrs(struct device *dev, size_t size,
269 void *cpu_addr, dma_addr_t dma_handle, unsigned long attrs)
270 {
271 }
dmam_alloc_attrs(struct device * dev,size_t size,dma_addr_t * dma_handle,gfp_t gfp,unsigned long attrs)272 static inline void *dmam_alloc_attrs(struct device *dev, size_t size,
273 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
274 {
275 return NULL;
276 }
dmam_free_coherent(struct device * dev,size_t size,void * vaddr,dma_addr_t dma_handle)277 static inline void dmam_free_coherent(struct device *dev, size_t size,
278 void *vaddr, dma_addr_t dma_handle)
279 {
280 }
dma_get_sgtable_attrs(struct device * dev,struct sg_table * sgt,void * cpu_addr,dma_addr_t dma_addr,size_t size,unsigned long attrs)281 static inline int dma_get_sgtable_attrs(struct device *dev,
282 struct sg_table *sgt, void *cpu_addr, dma_addr_t dma_addr,
283 size_t size, unsigned long attrs)
284 {
285 return -ENXIO;
286 }
dma_mmap_attrs(struct device * dev,struct vm_area_struct * vma,void * cpu_addr,dma_addr_t dma_addr,size_t size,unsigned long attrs)287 static inline int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
288 void *cpu_addr, dma_addr_t dma_addr, size_t size,
289 unsigned long attrs)
290 {
291 return -ENXIO;
292 }
dma_can_mmap(struct device * dev)293 static inline bool dma_can_mmap(struct device *dev)
294 {
295 return false;
296 }
dma_pci_p2pdma_supported(struct device * dev)297 static inline bool dma_pci_p2pdma_supported(struct device *dev)
298 {
299 return false;
300 }
dma_set_mask(struct device * dev,u64 mask)301 static inline int dma_set_mask(struct device *dev, u64 mask)
302 {
303 return -EIO;
304 }
dma_set_coherent_mask(struct device * dev,u64 mask)305 static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
306 {
307 return -EIO;
308 }
dma_get_required_mask(struct device * dev)309 static inline u64 dma_get_required_mask(struct device *dev)
310 {
311 return 0;
312 }
dma_addressing_limited(struct device * dev)313 static inline bool dma_addressing_limited(struct device *dev)
314 {
315 return false;
316 }
dma_max_mapping_size(struct device * dev)317 static inline size_t dma_max_mapping_size(struct device *dev)
318 {
319 return 0;
320 }
dma_opt_mapping_size(struct device * dev)321 static inline size_t dma_opt_mapping_size(struct device *dev)
322 {
323 return 0;
324 }
dma_get_merge_boundary(struct device * dev)325 static inline unsigned long dma_get_merge_boundary(struct device *dev)
326 {
327 return 0;
328 }
dma_alloc_noncontiguous(struct device * dev,size_t size,enum dma_data_direction dir,gfp_t gfp,unsigned long attrs)329 static inline struct sg_table *dma_alloc_noncontiguous(struct device *dev,
330 size_t size, enum dma_data_direction dir, gfp_t gfp,
331 unsigned long attrs)
332 {
333 return NULL;
334 }
dma_free_noncontiguous(struct device * dev,size_t size,struct sg_table * sgt,enum dma_data_direction dir)335 static inline void dma_free_noncontiguous(struct device *dev, size_t size,
336 struct sg_table *sgt, enum dma_data_direction dir)
337 {
338 }
dma_vmap_noncontiguous(struct device * dev,size_t size,struct sg_table * sgt)339 static inline void *dma_vmap_noncontiguous(struct device *dev, size_t size,
340 struct sg_table *sgt)
341 {
342 return NULL;
343 }
dma_vunmap_noncontiguous(struct device * dev,void * vaddr)344 static inline void dma_vunmap_noncontiguous(struct device *dev, void *vaddr)
345 {
346 }
dma_mmap_noncontiguous(struct device * dev,struct vm_area_struct * vma,size_t size,struct sg_table * sgt)347 static inline int dma_mmap_noncontiguous(struct device *dev,
348 struct vm_area_struct *vma, size_t size, struct sg_table *sgt)
349 {
350 return -EINVAL;
351 }
352 #endif /* CONFIG_HAS_DMA */
353
354 #ifdef CONFIG_IOMMU_DMA
355 /**
356 * dma_use_iova - check if the IOVA API is used for this state
357 * @state: IOVA state
358 *
359 * Return %true if the DMA transfers uses the dma_iova_*() calls or %false if
360 * they can't be used.
361 */
dma_use_iova(struct dma_iova_state * state)362 static inline bool dma_use_iova(struct dma_iova_state *state)
363 {
364 return state->__size != 0;
365 }
366
367 bool dma_iova_try_alloc(struct device *dev, struct dma_iova_state *state,
368 phys_addr_t phys, size_t size);
369 void dma_iova_free(struct device *dev, struct dma_iova_state *state);
370 void dma_iova_destroy(struct device *dev, struct dma_iova_state *state,
371 size_t mapped_len, enum dma_data_direction dir,
372 unsigned long attrs);
373 int dma_iova_sync(struct device *dev, struct dma_iova_state *state,
374 size_t offset, size_t size);
375 int dma_iova_link(struct device *dev, struct dma_iova_state *state,
376 phys_addr_t phys, size_t offset, size_t size,
377 enum dma_data_direction dir, unsigned long attrs);
378 void dma_iova_unlink(struct device *dev, struct dma_iova_state *state,
379 size_t offset, size_t size, enum dma_data_direction dir,
380 unsigned long attrs);
381 #else /* CONFIG_IOMMU_DMA */
dma_use_iova(struct dma_iova_state * state)382 static inline bool dma_use_iova(struct dma_iova_state *state)
383 {
384 return false;
385 }
dma_iova_try_alloc(struct device * dev,struct dma_iova_state * state,phys_addr_t phys,size_t size)386 static inline bool dma_iova_try_alloc(struct device *dev,
387 struct dma_iova_state *state, phys_addr_t phys, size_t size)
388 {
389 return false;
390 }
dma_iova_free(struct device * dev,struct dma_iova_state * state)391 static inline void dma_iova_free(struct device *dev,
392 struct dma_iova_state *state)
393 {
394 }
dma_iova_destroy(struct device * dev,struct dma_iova_state * state,size_t mapped_len,enum dma_data_direction dir,unsigned long attrs)395 static inline void dma_iova_destroy(struct device *dev,
396 struct dma_iova_state *state, size_t mapped_len,
397 enum dma_data_direction dir, unsigned long attrs)
398 {
399 }
dma_iova_sync(struct device * dev,struct dma_iova_state * state,size_t offset,size_t size)400 static inline int dma_iova_sync(struct device *dev,
401 struct dma_iova_state *state, size_t offset, size_t size)
402 {
403 return -EOPNOTSUPP;
404 }
dma_iova_link(struct device * dev,struct dma_iova_state * state,phys_addr_t phys,size_t offset,size_t size,enum dma_data_direction dir,unsigned long attrs)405 static inline int dma_iova_link(struct device *dev,
406 struct dma_iova_state *state, phys_addr_t phys, size_t offset,
407 size_t size, enum dma_data_direction dir, unsigned long attrs)
408 {
409 return -EOPNOTSUPP;
410 }
dma_iova_unlink(struct device * dev,struct dma_iova_state * state,size_t offset,size_t size,enum dma_data_direction dir,unsigned long attrs)411 static inline void dma_iova_unlink(struct device *dev,
412 struct dma_iova_state *state, size_t offset, size_t size,
413 enum dma_data_direction dir, unsigned long attrs)
414 {
415 }
416 #endif /* CONFIG_IOMMU_DMA */
417
418 #if defined(CONFIG_HAS_DMA) && defined(CONFIG_DMA_NEED_SYNC)
419 void __dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
420 enum dma_data_direction dir);
421 void __dma_sync_single_for_device(struct device *dev, dma_addr_t addr,
422 size_t size, enum dma_data_direction dir);
423 void __dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
424 int nelems, enum dma_data_direction dir);
425 void __dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
426 int nelems, enum dma_data_direction dir);
427 bool __dma_need_sync(struct device *dev, dma_addr_t dma_addr);
428
dma_dev_need_sync(const struct device * dev)429 static inline bool dma_dev_need_sync(const struct device *dev)
430 {
431 /* Always call DMA sync operations when debugging is enabled */
432 return !dev->dma_skip_sync || IS_ENABLED(CONFIG_DMA_API_DEBUG);
433 }
434
dma_sync_single_for_cpu(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir)435 static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
436 size_t size, enum dma_data_direction dir)
437 {
438 if (dma_dev_need_sync(dev))
439 __dma_sync_single_for_cpu(dev, addr, size, dir);
440 }
441
dma_sync_single_for_device(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir)442 static inline void dma_sync_single_for_device(struct device *dev,
443 dma_addr_t addr, size_t size, enum dma_data_direction dir)
444 {
445 if (dma_dev_need_sync(dev))
446 __dma_sync_single_for_device(dev, addr, size, dir);
447 }
448
dma_sync_sg_for_cpu(struct device * dev,struct scatterlist * sg,int nelems,enum dma_data_direction dir)449 static inline void dma_sync_sg_for_cpu(struct device *dev,
450 struct scatterlist *sg, int nelems, enum dma_data_direction dir)
451 {
452 if (dma_dev_need_sync(dev))
453 __dma_sync_sg_for_cpu(dev, sg, nelems, dir);
454 }
455
dma_sync_sg_for_device(struct device * dev,struct scatterlist * sg,int nelems,enum dma_data_direction dir)456 static inline void dma_sync_sg_for_device(struct device *dev,
457 struct scatterlist *sg, int nelems, enum dma_data_direction dir)
458 {
459 if (dma_dev_need_sync(dev))
460 __dma_sync_sg_for_device(dev, sg, nelems, dir);
461 }
462
dma_need_sync(struct device * dev,dma_addr_t dma_addr)463 static inline bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)
464 {
465 return dma_dev_need_sync(dev) ? __dma_need_sync(dev, dma_addr) : false;
466 }
467 bool dma_need_unmap(struct device *dev);
468 #else /* !CONFIG_HAS_DMA || !CONFIG_DMA_NEED_SYNC */
dma_dev_need_sync(const struct device * dev)469 static inline bool dma_dev_need_sync(const struct device *dev)
470 {
471 return false;
472 }
dma_sync_single_for_cpu(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir)473 static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
474 size_t size, enum dma_data_direction dir)
475 {
476 }
dma_sync_single_for_device(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir)477 static inline void dma_sync_single_for_device(struct device *dev,
478 dma_addr_t addr, size_t size, enum dma_data_direction dir)
479 {
480 }
dma_sync_sg_for_cpu(struct device * dev,struct scatterlist * sg,int nelems,enum dma_data_direction dir)481 static inline void dma_sync_sg_for_cpu(struct device *dev,
482 struct scatterlist *sg, int nelems, enum dma_data_direction dir)
483 {
484 }
dma_sync_sg_for_device(struct device * dev,struct scatterlist * sg,int nelems,enum dma_data_direction dir)485 static inline void dma_sync_sg_for_device(struct device *dev,
486 struct scatterlist *sg, int nelems, enum dma_data_direction dir)
487 {
488 }
dma_need_sync(struct device * dev,dma_addr_t dma_addr)489 static inline bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)
490 {
491 return false;
492 }
dma_need_unmap(struct device * dev)493 static inline bool dma_need_unmap(struct device *dev)
494 {
495 return false;
496 }
497 #endif /* !CONFIG_HAS_DMA || !CONFIG_DMA_NEED_SYNC */
498
499 struct page *dma_alloc_pages(struct device *dev, size_t size,
500 dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp);
501 void dma_free_pages(struct device *dev, size_t size, struct page *page,
502 dma_addr_t dma_handle, enum dma_data_direction dir);
503 int dma_mmap_pages(struct device *dev, struct vm_area_struct *vma,
504 size_t size, struct page *page);
505
dma_alloc_noncoherent(struct device * dev,size_t size,dma_addr_t * dma_handle,enum dma_data_direction dir,gfp_t gfp)506 static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
507 dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)
508 {
509 struct page *page = dma_alloc_pages(dev, size, dma_handle, dir, gfp);
510 return page ? page_address(page) : NULL;
511 }
512
dma_free_noncoherent(struct device * dev,size_t size,void * vaddr,dma_addr_t dma_handle,enum dma_data_direction dir)513 static inline void dma_free_noncoherent(struct device *dev, size_t size,
514 void *vaddr, dma_addr_t dma_handle, enum dma_data_direction dir)
515 {
516 dma_free_pages(dev, size, virt_to_page(vaddr), dma_handle, dir);
517 }
518
dma_map_single_attrs(struct device * dev,void * ptr,size_t size,enum dma_data_direction dir,unsigned long attrs)519 static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
520 size_t size, enum dma_data_direction dir, unsigned long attrs)
521 {
522 /* DMA must never operate on areas that might be remapped. */
523 if (dev_WARN_ONCE(dev, is_vmalloc_addr(ptr),
524 "rejecting DMA map of vmalloc memory\n"))
525 return DMA_MAPPING_ERROR;
526 debug_dma_map_single(dev, ptr, size);
527 return dma_map_page_attrs(dev, virt_to_page(ptr), offset_in_page(ptr),
528 size, dir, attrs);
529 }
530
dma_unmap_single_attrs(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir,unsigned long attrs)531 static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
532 size_t size, enum dma_data_direction dir, unsigned long attrs)
533 {
534 return dma_unmap_page_attrs(dev, addr, size, dir, attrs);
535 }
536
dma_sync_single_range_for_cpu(struct device * dev,dma_addr_t addr,unsigned long offset,size_t size,enum dma_data_direction dir)537 static inline void dma_sync_single_range_for_cpu(struct device *dev,
538 dma_addr_t addr, unsigned long offset, size_t size,
539 enum dma_data_direction dir)
540 {
541 return dma_sync_single_for_cpu(dev, addr + offset, size, dir);
542 }
543
dma_sync_single_range_for_device(struct device * dev,dma_addr_t addr,unsigned long offset,size_t size,enum dma_data_direction dir)544 static inline void dma_sync_single_range_for_device(struct device *dev,
545 dma_addr_t addr, unsigned long offset, size_t size,
546 enum dma_data_direction dir)
547 {
548 return dma_sync_single_for_device(dev, addr + offset, size, dir);
549 }
550
551 /**
552 * dma_unmap_sgtable - Unmap the given buffer for DMA
553 * @dev: The device for which to perform the DMA operation
554 * @sgt: The sg_table object describing the buffer
555 * @dir: DMA direction
556 * @attrs: Optional DMA attributes for the unmap operation
557 *
558 * Unmaps a buffer described by a scatterlist stored in the given sg_table
559 * object for the @dir DMA operation by the @dev device. After this function
560 * the ownership of the buffer is transferred back to the CPU domain.
561 */
dma_unmap_sgtable(struct device * dev,struct sg_table * sgt,enum dma_data_direction dir,unsigned long attrs)562 static inline void dma_unmap_sgtable(struct device *dev, struct sg_table *sgt,
563 enum dma_data_direction dir, unsigned long attrs)
564 {
565 dma_unmap_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs);
566 }
567
568 /**
569 * dma_sync_sgtable_for_cpu - Synchronize the given buffer for CPU access
570 * @dev: The device for which to perform the DMA operation
571 * @sgt: The sg_table object describing the buffer
572 * @dir: DMA direction
573 *
574 * Performs the needed cache synchronization and moves the ownership of the
575 * buffer back to the CPU domain, so it is safe to perform any access to it
576 * by the CPU. Before doing any further DMA operations, one has to transfer
577 * the ownership of the buffer back to the DMA domain by calling the
578 * dma_sync_sgtable_for_device().
579 */
dma_sync_sgtable_for_cpu(struct device * dev,struct sg_table * sgt,enum dma_data_direction dir)580 static inline void dma_sync_sgtable_for_cpu(struct device *dev,
581 struct sg_table *sgt, enum dma_data_direction dir)
582 {
583 dma_sync_sg_for_cpu(dev, sgt->sgl, sgt->orig_nents, dir);
584 }
585
586 /**
587 * dma_sync_sgtable_for_device - Synchronize the given buffer for DMA
588 * @dev: The device for which to perform the DMA operation
589 * @sgt: The sg_table object describing the buffer
590 * @dir: DMA direction
591 *
592 * Performs the needed cache synchronization and moves the ownership of the
593 * buffer back to the DMA domain, so it is safe to perform the DMA operation.
594 * Once finished, one has to call dma_sync_sgtable_for_cpu() or
595 * dma_unmap_sgtable().
596 */
dma_sync_sgtable_for_device(struct device * dev,struct sg_table * sgt,enum dma_data_direction dir)597 static inline void dma_sync_sgtable_for_device(struct device *dev,
598 struct sg_table *sgt, enum dma_data_direction dir)
599 {
600 dma_sync_sg_for_device(dev, sgt->sgl, sgt->orig_nents, dir);
601 }
602
603 #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0)
604 #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0)
605 #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0)
606 #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0)
607 #define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0)
608 #define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0)
609 #define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0)
610 #define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0)
611
612 bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size);
613
dma_alloc_coherent(struct device * dev,size_t size,dma_addr_t * dma_handle,gfp_t gfp)614 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
615 dma_addr_t *dma_handle, gfp_t gfp)
616 {
617 return dma_alloc_attrs(dev, size, dma_handle, gfp,
618 (gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0);
619 }
620
dma_free_coherent(struct device * dev,size_t size,void * cpu_addr,dma_addr_t dma_handle)621 static inline void dma_free_coherent(struct device *dev, size_t size,
622 void *cpu_addr, dma_addr_t dma_handle)
623 {
624 return dma_free_attrs(dev, size, cpu_addr, dma_handle, 0);
625 }
626
627
dma_get_mask(struct device * dev)628 static inline u64 dma_get_mask(struct device *dev)
629 {
630 if (dev->dma_mask && *dev->dma_mask)
631 return *dev->dma_mask;
632 return DMA_BIT_MASK(32);
633 }
634
635 /*
636 * Set both the DMA mask and the coherent DMA mask to the same thing.
637 * Note that we don't check the return value from dma_set_coherent_mask()
638 * as the DMA API guarantees that the coherent DMA mask can be set to
639 * the same or smaller than the streaming DMA mask.
640 */
dma_set_mask_and_coherent(struct device * dev,u64 mask)641 static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask)
642 {
643 int rc = dma_set_mask(dev, mask);
644 if (rc == 0)
645 dma_set_coherent_mask(dev, mask);
646 return rc;
647 }
648
649 /*
650 * Similar to the above, except it deals with the case where the device
651 * does not have dev->dma_mask appropriately setup.
652 */
dma_coerce_mask_and_coherent(struct device * dev,u64 mask)653 static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
654 {
655 dev->dma_mask = &dev->coherent_dma_mask;
656 return dma_set_mask_and_coherent(dev, mask);
657 }
658
dma_get_max_seg_size(struct device * dev)659 static inline unsigned int dma_get_max_seg_size(struct device *dev)
660 {
661 if (dev->dma_parms && dev->dma_parms->max_segment_size)
662 return dev->dma_parms->max_segment_size;
663 return SZ_64K;
664 }
665
dma_set_max_seg_size(struct device * dev,unsigned int size)666 static inline void dma_set_max_seg_size(struct device *dev, unsigned int size)
667 {
668 if (WARN_ON_ONCE(!dev->dma_parms))
669 return;
670 dev->dma_parms->max_segment_size = size;
671 }
672
dma_get_seg_boundary(struct device * dev)673 static inline unsigned long dma_get_seg_boundary(struct device *dev)
674 {
675 if (dev->dma_parms && dev->dma_parms->segment_boundary_mask)
676 return dev->dma_parms->segment_boundary_mask;
677 return ULONG_MAX;
678 }
679
680 /**
681 * dma_get_seg_boundary_nr_pages - return the segment boundary in "page" units
682 * @dev: device to guery the boundary for
683 * @page_shift: ilog() of the IOMMU page size
684 *
685 * Return the segment boundary in IOMMU page units (which may be different from
686 * the CPU page size) for the passed in device.
687 *
688 * If @dev is NULL a boundary of U32_MAX is assumed, this case is just for
689 * non-DMA API callers.
690 */
dma_get_seg_boundary_nr_pages(struct device * dev,unsigned int page_shift)691 static inline unsigned long dma_get_seg_boundary_nr_pages(struct device *dev,
692 unsigned int page_shift)
693 {
694 if (!dev)
695 return (U32_MAX >> page_shift) + 1;
696 return (dma_get_seg_boundary(dev) >> page_shift) + 1;
697 }
698
dma_set_seg_boundary(struct device * dev,unsigned long mask)699 static inline void dma_set_seg_boundary(struct device *dev, unsigned long mask)
700 {
701 if (WARN_ON_ONCE(!dev->dma_parms))
702 return;
703 dev->dma_parms->segment_boundary_mask = mask;
704 }
705
dma_get_min_align_mask(struct device * dev)706 static inline unsigned int dma_get_min_align_mask(struct device *dev)
707 {
708 if (dev->dma_parms)
709 return dev->dma_parms->min_align_mask;
710 return 0;
711 }
712
dma_set_min_align_mask(struct device * dev,unsigned int min_align_mask)713 static inline void dma_set_min_align_mask(struct device *dev,
714 unsigned int min_align_mask)
715 {
716 if (WARN_ON_ONCE(!dev->dma_parms))
717 return;
718 dev->dma_parms->min_align_mask = min_align_mask;
719 }
720
721 #ifndef dma_get_cache_alignment
dma_get_cache_alignment(void)722 static inline int dma_get_cache_alignment(void)
723 {
724 #ifdef ARCH_HAS_DMA_MINALIGN
725 return ARCH_DMA_MINALIGN;
726 #endif
727 return 1;
728 }
729 #endif
730
731 #ifdef ARCH_HAS_DMA_MINALIGN
732 #define ____dma_from_device_aligned __aligned(ARCH_DMA_MINALIGN)
733 #else
734 #define ____dma_from_device_aligned
735 #endif
736 /* Mark start of DMA buffer */
737 #define __dma_from_device_group_begin(GROUP) \
738 __cacheline_group_begin(GROUP) ____dma_from_device_aligned
739 /* Mark end of DMA buffer */
740 #define __dma_from_device_group_end(GROUP) \
741 __cacheline_group_end(GROUP) ____dma_from_device_aligned
742
dmam_alloc_coherent(struct device * dev,size_t size,dma_addr_t * dma_handle,gfp_t gfp)743 static inline void *dmam_alloc_coherent(struct device *dev, size_t size,
744 dma_addr_t *dma_handle, gfp_t gfp)
745 {
746 return dmam_alloc_attrs(dev, size, dma_handle, gfp,
747 (gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0);
748 }
749
dma_alloc_wc(struct device * dev,size_t size,dma_addr_t * dma_addr,gfp_t gfp)750 static inline void *dma_alloc_wc(struct device *dev, size_t size,
751 dma_addr_t *dma_addr, gfp_t gfp)
752 {
753 unsigned long attrs = DMA_ATTR_WRITE_COMBINE;
754
755 if (gfp & __GFP_NOWARN)
756 attrs |= DMA_ATTR_NO_WARN;
757
758 return dma_alloc_attrs(dev, size, dma_addr, gfp, attrs);
759 }
760
dma_free_wc(struct device * dev,size_t size,void * cpu_addr,dma_addr_t dma_addr)761 static inline void dma_free_wc(struct device *dev, size_t size,
762 void *cpu_addr, dma_addr_t dma_addr)
763 {
764 return dma_free_attrs(dev, size, cpu_addr, dma_addr,
765 DMA_ATTR_WRITE_COMBINE);
766 }
767
dma_mmap_wc(struct device * dev,struct vm_area_struct * vma,void * cpu_addr,dma_addr_t dma_addr,size_t size)768 static inline int dma_mmap_wc(struct device *dev,
769 struct vm_area_struct *vma,
770 void *cpu_addr, dma_addr_t dma_addr,
771 size_t size)
772 {
773 return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size,
774 DMA_ATTR_WRITE_COMBINE);
775 }
776
777 #ifdef CONFIG_NEED_DMA_MAP_STATE
778 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME
779 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME
780 #define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME)
781 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL))
782 #define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME)
783 #define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL))
784 #else
785 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)
786 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME)
787 #define dma_unmap_addr(PTR, ADDR_NAME) \
788 ({ typeof(PTR) __p __maybe_unused = PTR; 0; })
789 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) \
790 do { typeof(PTR) __p __maybe_unused = PTR; } while (0)
791 #define dma_unmap_len(PTR, LEN_NAME) \
792 ({ typeof(PTR) __p __maybe_unused = PTR; 0; })
793 #define dma_unmap_len_set(PTR, LEN_NAME, VAL) \
794 do { typeof(PTR) __p __maybe_unused = PTR; } while (0)
795 #endif
796
797 #endif /* _LINUX_DMA_MAPPING_H */
798