1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * arch-independent dma-mapping routines
4 *
5 * Copyright (c) 2006 SUSE Linux Products GmbH
6 * Copyright (c) 2006 Tejun Heo <teheo@suse.de>
7 */
8 #include <linux/memblock.h> /* for max_pfn */
9 #include <linux/acpi.h>
10 #include <linux/dma-map-ops.h>
11 #include <linux/export.h>
12 #include <linux/gfp.h>
13 #include <linux/iommu-dma.h>
14 #include <linux/kmsan.h>
15 #include <linux/of_device.h>
16 #include <linux/slab.h>
17 #include <linux/vmalloc.h>
18 #include "debug.h"
19 #include "direct.h"
20
21 #define CREATE_TRACE_POINTS
22 #include <trace/events/dma.h>
23
24 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
25 defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
26 defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
27 bool dma_default_coherent = IS_ENABLED(CONFIG_ARCH_DMA_DEFAULT_COHERENT);
28 #endif
29
30 /*
31 * Managed DMA API
32 */
33 struct dma_devres {
34 size_t size;
35 void *vaddr;
36 dma_addr_t dma_handle;
37 unsigned long attrs;
38 };
39
dmam_release(struct device * dev,void * res)40 static void dmam_release(struct device *dev, void *res)
41 {
42 struct dma_devres *this = res;
43
44 dma_free_attrs(dev, this->size, this->vaddr, this->dma_handle,
45 this->attrs);
46 }
47
dmam_match(struct device * dev,void * res,void * match_data)48 static int dmam_match(struct device *dev, void *res, void *match_data)
49 {
50 struct dma_devres *this = res, *match = match_data;
51
52 if (this->vaddr == match->vaddr) {
53 WARN_ON(this->size != match->size ||
54 this->dma_handle != match->dma_handle);
55 return 1;
56 }
57 return 0;
58 }
59
60 /**
61 * dmam_free_coherent - Managed dma_free_coherent()
62 * @dev: Device to free coherent memory for
63 * @size: Size of allocation
64 * @vaddr: Virtual address of the memory to free
65 * @dma_handle: DMA handle of the memory to free
66 *
67 * Managed dma_free_coherent().
68 */
dmam_free_coherent(struct device * dev,size_t size,void * vaddr,dma_addr_t dma_handle)69 void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
70 dma_addr_t dma_handle)
71 {
72 struct dma_devres match_data = { size, vaddr, dma_handle };
73
74 WARN_ON(devres_destroy(dev, dmam_release, dmam_match, &match_data));
75 dma_free_coherent(dev, size, vaddr, dma_handle);
76 }
77 EXPORT_SYMBOL(dmam_free_coherent);
78
79 /**
80 * dmam_alloc_attrs - Managed dma_alloc_attrs()
81 * @dev: Device to allocate non_coherent memory for
82 * @size: Size of allocation
83 * @dma_handle: Out argument for allocated DMA handle
84 * @gfp: Allocation flags
85 * @attrs: Flags in the DMA_ATTR_* namespace.
86 *
87 * Managed dma_alloc_attrs(). Memory allocated using this function will be
88 * automatically released on driver detach.
89 *
90 * RETURNS:
91 * Pointer to allocated memory on success, NULL on failure.
92 */
dmam_alloc_attrs(struct device * dev,size_t size,dma_addr_t * dma_handle,gfp_t gfp,unsigned long attrs)93 void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
94 gfp_t gfp, unsigned long attrs)
95 {
96 struct dma_devres *dr;
97 void *vaddr;
98
99 dr = devres_alloc(dmam_release, sizeof(*dr), gfp);
100 if (!dr)
101 return NULL;
102
103 vaddr = dma_alloc_attrs(dev, size, dma_handle, gfp, attrs);
104 if (!vaddr) {
105 devres_free(dr);
106 return NULL;
107 }
108
109 dr->vaddr = vaddr;
110 dr->dma_handle = *dma_handle;
111 dr->size = size;
112 dr->attrs = attrs;
113
114 devres_add(dev, dr);
115
116 return vaddr;
117 }
118 EXPORT_SYMBOL(dmam_alloc_attrs);
119
dma_go_direct(struct device * dev,dma_addr_t mask,const struct dma_map_ops * ops)120 static bool dma_go_direct(struct device *dev, dma_addr_t mask,
121 const struct dma_map_ops *ops)
122 {
123 if (use_dma_iommu(dev))
124 return false;
125
126 if (likely(!ops))
127 return true;
128
129 #ifdef CONFIG_DMA_OPS_BYPASS
130 if (dev->dma_ops_bypass)
131 return min_not_zero(mask, dev->bus_dma_limit) >=
132 dma_direct_get_required_mask(dev);
133 #endif
134 return false;
135 }
136
137
138 /*
139 * Check if the devices uses a direct mapping for streaming DMA operations.
140 * This allows IOMMU drivers to set a bypass mode if the DMA mask is large
141 * enough.
142 */
dma_alloc_direct(struct device * dev,const struct dma_map_ops * ops)143 static inline bool dma_alloc_direct(struct device *dev,
144 const struct dma_map_ops *ops)
145 {
146 return dma_go_direct(dev, dev->coherent_dma_mask, ops);
147 }
148
dma_map_direct(struct device * dev,const struct dma_map_ops * ops)149 static inline bool dma_map_direct(struct device *dev,
150 const struct dma_map_ops *ops)
151 {
152 return dma_go_direct(dev, *dev->dma_mask, ops);
153 }
154
dma_map_phys(struct device * dev,phys_addr_t phys,size_t size,enum dma_data_direction dir,unsigned long attrs)155 dma_addr_t dma_map_phys(struct device *dev, phys_addr_t phys, size_t size,
156 enum dma_data_direction dir, unsigned long attrs)
157 {
158 const struct dma_map_ops *ops = get_dma_ops(dev);
159 bool is_mmio = attrs & DMA_ATTR_MMIO;
160 bool is_cc_shared = attrs & DMA_ATTR_CC_SHARED;
161 dma_addr_t addr = DMA_MAPPING_ERROR;
162
163 BUG_ON(!valid_dma_direction(dir));
164
165 if (WARN_ON_ONCE(!dev->dma_mask))
166 return DMA_MAPPING_ERROR;
167
168 if (!dev_is_dma_coherent(dev) && (attrs & DMA_ATTR_REQUIRE_COHERENT))
169 return DMA_MAPPING_ERROR;
170
171 if (dma_map_direct(dev, ops) ||
172 (!is_mmio && !is_cc_shared &&
173 arch_dma_map_phys_direct(dev, phys + size)))
174 addr = dma_direct_map_phys(dev, phys, size, dir, attrs, true);
175 else if (is_cc_shared)
176 return DMA_MAPPING_ERROR;
177 else if (use_dma_iommu(dev))
178 addr = iommu_dma_map_phys(dev, phys, size, dir, attrs);
179 else if (ops->map_phys)
180 addr = ops->map_phys(dev, phys, size, dir, attrs);
181
182 if (!is_mmio)
183 kmsan_handle_dma(phys, size, dir);
184 trace_dma_map_phys(dev, phys, addr, size, dir, attrs);
185 debug_dma_map_phys(dev, phys, size, dir, addr, attrs);
186
187 return addr;
188 }
189 EXPORT_SYMBOL_GPL(dma_map_phys);
190
dma_map_page_attrs(struct device * dev,struct page * page,size_t offset,size_t size,enum dma_data_direction dir,unsigned long attrs)191 dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page,
192 size_t offset, size_t size, enum dma_data_direction dir,
193 unsigned long attrs)
194 {
195 phys_addr_t phys = page_to_phys(page) + offset;
196
197 if (unlikely(attrs & DMA_ATTR_MMIO))
198 return DMA_MAPPING_ERROR;
199
200 if (IS_ENABLED(CONFIG_DMA_API_DEBUG) &&
201 WARN_ON_ONCE(is_zone_device_page(page)))
202 return DMA_MAPPING_ERROR;
203
204 return dma_map_phys(dev, phys, size, dir, attrs);
205 }
206 EXPORT_SYMBOL(dma_map_page_attrs);
207
dma_unmap_phys(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir,unsigned long attrs)208 void dma_unmap_phys(struct device *dev, dma_addr_t addr, size_t size,
209 enum dma_data_direction dir, unsigned long attrs)
210 {
211 const struct dma_map_ops *ops = get_dma_ops(dev);
212 bool is_mmio = attrs & DMA_ATTR_MMIO;
213 bool is_cc_shared = attrs & DMA_ATTR_CC_SHARED;
214
215 BUG_ON(!valid_dma_direction(dir));
216
217 if (dma_map_direct(dev, ops) ||
218 (!is_mmio && !is_cc_shared &&
219 arch_dma_unmap_phys_direct(dev, addr + size)))
220 dma_direct_unmap_phys(dev, addr, size, dir, attrs, true);
221 else if (is_cc_shared)
222 return;
223 else if (use_dma_iommu(dev))
224 iommu_dma_unmap_phys(dev, addr, size, dir, attrs);
225 else if (ops->unmap_phys)
226 ops->unmap_phys(dev, addr, size, dir, attrs);
227 trace_dma_unmap_phys(dev, addr, size, dir, attrs);
228 debug_dma_unmap_phys(dev, addr, size, dir);
229 }
230 EXPORT_SYMBOL_GPL(dma_unmap_phys);
231
dma_unmap_page_attrs(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir,unsigned long attrs)232 void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size,
233 enum dma_data_direction dir, unsigned long attrs)
234 {
235 if (unlikely(attrs & DMA_ATTR_MMIO))
236 return;
237
238 dma_unmap_phys(dev, addr, size, dir, attrs);
239 }
240 EXPORT_SYMBOL(dma_unmap_page_attrs);
241
__dma_map_sg_attrs(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir,unsigned long attrs)242 static int __dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
243 int nents, enum dma_data_direction dir, unsigned long attrs)
244 {
245 const struct dma_map_ops *ops = get_dma_ops(dev);
246 int ents;
247
248 BUG_ON(!valid_dma_direction(dir));
249
250 if (!dev_is_dma_coherent(dev) && (attrs & DMA_ATTR_REQUIRE_COHERENT))
251 return -EOPNOTSUPP;
252
253 if (WARN_ON_ONCE(!dev->dma_mask))
254 return 0;
255
256 if (dma_map_direct(dev, ops) ||
257 arch_dma_map_sg_direct(dev, sg, nents))
258 ents = dma_direct_map_sg(dev, sg, nents, dir, attrs);
259 else if (use_dma_iommu(dev))
260 ents = iommu_dma_map_sg(dev, sg, nents, dir, attrs);
261 else
262 ents = ops->map_sg(dev, sg, nents, dir, attrs);
263
264 if (ents > 0) {
265 kmsan_handle_dma_sg(sg, nents, dir);
266 trace_dma_map_sg(dev, sg, nents, ents, dir, attrs);
267 debug_dma_map_sg(dev, sg, nents, ents, dir, attrs);
268 } else if (WARN_ON_ONCE(ents != -EINVAL && ents != -ENOMEM &&
269 ents != -EIO && ents != -EREMOTEIO)) {
270 trace_dma_map_sg_err(dev, sg, nents, ents, dir, attrs);
271 return -EIO;
272 }
273
274 return ents;
275 }
276
277 /**
278 * dma_map_sg_attrs - Map the given buffer for DMA
279 * @dev: The device for which to perform the DMA operation
280 * @sg: The sg_table object describing the buffer
281 * @nents: Number of entries to map
282 * @dir: DMA direction
283 * @attrs: Optional DMA attributes for the map operation
284 *
285 * Maps a buffer described by a scatterlist passed in the sg argument with
286 * nents segments for the @dir DMA operation by the @dev device.
287 *
288 * Returns the number of mapped entries (which can be less than nents)
289 * on success. Zero is returned for any error.
290 *
291 * dma_unmap_sg_attrs() should be used to unmap the buffer with the
292 * original sg and original nents (not the value returned by this funciton).
293 */
dma_map_sg_attrs(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir,unsigned long attrs)294 unsigned int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
295 int nents, enum dma_data_direction dir, unsigned long attrs)
296 {
297 int ret;
298
299 ret = __dma_map_sg_attrs(dev, sg, nents, dir, attrs);
300 if (ret < 0)
301 return 0;
302 return ret;
303 }
304 EXPORT_SYMBOL(dma_map_sg_attrs);
305
306 /**
307 * dma_map_sgtable - Map the given buffer for DMA
308 * @dev: The device for which to perform the DMA operation
309 * @sgt: The sg_table object describing the buffer
310 * @dir: DMA direction
311 * @attrs: Optional DMA attributes for the map operation
312 *
313 * Maps a buffer described by a scatterlist stored in the given sg_table
314 * object for the @dir DMA operation by the @dev device. After success, the
315 * ownership for the buffer is transferred to the DMA domain. One has to
316 * call dma_sync_sgtable_for_cpu() or dma_unmap_sgtable() to move the
317 * ownership of the buffer back to the CPU domain before touching the
318 * buffer by the CPU.
319 *
320 * Returns 0 on success or a negative error code on error. The following
321 * error codes are supported with the given meaning:
322 *
323 * -EINVAL An invalid argument, unaligned access or other error
324 * in usage. Will not succeed if retried.
325 * -ENOMEM Insufficient resources (like memory or IOVA space) to
326 * complete the mapping. Should succeed if retried later.
327 * -EIO Legacy error code with an unknown meaning. eg. this is
328 * returned if a lower level call returned
329 * DMA_MAPPING_ERROR.
330 * -EREMOTEIO The DMA device cannot access P2PDMA memory specified
331 * in the sg_table. This will not succeed if retried.
332 */
dma_map_sgtable(struct device * dev,struct sg_table * sgt,enum dma_data_direction dir,unsigned long attrs)333 int dma_map_sgtable(struct device *dev, struct sg_table *sgt,
334 enum dma_data_direction dir, unsigned long attrs)
335 {
336 int nents;
337
338 nents = __dma_map_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs);
339 if (nents < 0)
340 return nents;
341 sgt->nents = nents;
342 return 0;
343 }
344 EXPORT_SYMBOL_GPL(dma_map_sgtable);
345
dma_unmap_sg_attrs(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir,unsigned long attrs)346 void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
347 int nents, enum dma_data_direction dir,
348 unsigned long attrs)
349 {
350 const struct dma_map_ops *ops = get_dma_ops(dev);
351
352 BUG_ON(!valid_dma_direction(dir));
353 trace_dma_unmap_sg(dev, sg, nents, dir, attrs);
354 debug_dma_unmap_sg(dev, sg, nents, dir);
355 if (dma_map_direct(dev, ops) ||
356 arch_dma_unmap_sg_direct(dev, sg, nents))
357 dma_direct_unmap_sg(dev, sg, nents, dir, attrs);
358 else if (use_dma_iommu(dev))
359 iommu_dma_unmap_sg(dev, sg, nents, dir, attrs);
360 else if (ops->unmap_sg)
361 ops->unmap_sg(dev, sg, nents, dir, attrs);
362 }
363 EXPORT_SYMBOL(dma_unmap_sg_attrs);
364
dma_map_resource(struct device * dev,phys_addr_t phys_addr,size_t size,enum dma_data_direction dir,unsigned long attrs)365 dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr,
366 size_t size, enum dma_data_direction dir, unsigned long attrs)
367 {
368 if (IS_ENABLED(CONFIG_DMA_API_DEBUG) &&
369 WARN_ON_ONCE(pfn_valid(PHYS_PFN(phys_addr))))
370 return DMA_MAPPING_ERROR;
371
372 return dma_map_phys(dev, phys_addr, size, dir, attrs | DMA_ATTR_MMIO);
373 }
374 EXPORT_SYMBOL(dma_map_resource);
375
dma_unmap_resource(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir,unsigned long attrs)376 void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size,
377 enum dma_data_direction dir, unsigned long attrs)
378 {
379 dma_unmap_phys(dev, addr, size, dir, attrs | DMA_ATTR_MMIO);
380 }
381 EXPORT_SYMBOL(dma_unmap_resource);
382
383 #ifdef CONFIG_DMA_NEED_SYNC
__dma_sync_single_for_cpu(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir)384 void __dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
385 enum dma_data_direction dir)
386 {
387 const struct dma_map_ops *ops = get_dma_ops(dev);
388
389 BUG_ON(!valid_dma_direction(dir));
390 if (dma_map_direct(dev, ops))
391 dma_direct_sync_single_for_cpu(dev, addr, size, dir, true);
392 else if (use_dma_iommu(dev))
393 iommu_dma_sync_single_for_cpu(dev, addr, size, dir);
394 else if (ops->sync_single_for_cpu)
395 ops->sync_single_for_cpu(dev, addr, size, dir);
396 trace_dma_sync_single_for_cpu(dev, addr, size, dir);
397 debug_dma_sync_single_for_cpu(dev, addr, size, dir);
398 }
399 EXPORT_SYMBOL(__dma_sync_single_for_cpu);
400
__dma_sync_single_for_device(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir)401 void __dma_sync_single_for_device(struct device *dev, dma_addr_t addr,
402 size_t size, enum dma_data_direction dir)
403 {
404 const struct dma_map_ops *ops = get_dma_ops(dev);
405
406 BUG_ON(!valid_dma_direction(dir));
407 if (dma_map_direct(dev, ops))
408 dma_direct_sync_single_for_device(dev, addr, size, dir);
409 else if (use_dma_iommu(dev))
410 iommu_dma_sync_single_for_device(dev, addr, size, dir);
411 else if (ops->sync_single_for_device)
412 ops->sync_single_for_device(dev, addr, size, dir);
413 trace_dma_sync_single_for_device(dev, addr, size, dir);
414 debug_dma_sync_single_for_device(dev, addr, size, dir);
415 }
416 EXPORT_SYMBOL(__dma_sync_single_for_device);
417
__dma_sync_sg_for_cpu(struct device * dev,struct scatterlist * sg,int nelems,enum dma_data_direction dir)418 void __dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
419 int nelems, enum dma_data_direction dir)
420 {
421 const struct dma_map_ops *ops = get_dma_ops(dev);
422
423 BUG_ON(!valid_dma_direction(dir));
424 if (dma_map_direct(dev, ops))
425 dma_direct_sync_sg_for_cpu(dev, sg, nelems, dir);
426 else if (use_dma_iommu(dev))
427 iommu_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
428 else if (ops->sync_sg_for_cpu)
429 ops->sync_sg_for_cpu(dev, sg, nelems, dir);
430 trace_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
431 debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
432 }
433 EXPORT_SYMBOL(__dma_sync_sg_for_cpu);
434
__dma_sync_sg_for_device(struct device * dev,struct scatterlist * sg,int nelems,enum dma_data_direction dir)435 void __dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
436 int nelems, enum dma_data_direction dir)
437 {
438 const struct dma_map_ops *ops = get_dma_ops(dev);
439
440 BUG_ON(!valid_dma_direction(dir));
441 if (dma_map_direct(dev, ops))
442 dma_direct_sync_sg_for_device(dev, sg, nelems, dir);
443 else if (use_dma_iommu(dev))
444 iommu_dma_sync_sg_for_device(dev, sg, nelems, dir);
445 else if (ops->sync_sg_for_device)
446 ops->sync_sg_for_device(dev, sg, nelems, dir);
447 trace_dma_sync_sg_for_device(dev, sg, nelems, dir);
448 debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
449 }
450 EXPORT_SYMBOL(__dma_sync_sg_for_device);
451
__dma_need_sync(struct device * dev,dma_addr_t dma_addr)452 bool __dma_need_sync(struct device *dev, dma_addr_t dma_addr)
453 {
454 const struct dma_map_ops *ops = get_dma_ops(dev);
455
456 if (dma_map_direct(dev, ops))
457 /*
458 * dma_skip_sync could've been reset on first SWIOTLB buffer
459 * mapping, but @dma_addr is not necessary an SWIOTLB buffer.
460 * In this case, fall back to more granular check.
461 */
462 return dma_direct_need_sync(dev, dma_addr);
463 return true;
464 }
465 EXPORT_SYMBOL_GPL(__dma_need_sync);
466
467 /**
468 * dma_need_unmap - does this device need dma_unmap_* operations
469 * @dev: device to check
470 *
471 * If this function returns %false, drivers can skip calling dma_unmap_* after
472 * finishing an I/O. This function must be called after all mappings that might
473 * need to be unmapped have been performed.
474 */
dma_need_unmap(struct device * dev)475 bool dma_need_unmap(struct device *dev)
476 {
477 if (!dma_map_direct(dev, get_dma_ops(dev)))
478 return true;
479 if (!dev->dma_skip_sync)
480 return true;
481 return IS_ENABLED(CONFIG_DMA_API_DEBUG);
482 }
483 EXPORT_SYMBOL_GPL(dma_need_unmap);
484
dma_setup_need_sync(struct device * dev)485 static void dma_setup_need_sync(struct device *dev)
486 {
487 const struct dma_map_ops *ops = get_dma_ops(dev);
488
489 if (dma_map_direct(dev, ops) || use_dma_iommu(dev))
490 /*
491 * dma_skip_sync will be reset to %false on first SWIOTLB buffer
492 * mapping, if any. During the device initialization, it's
493 * enough to check only for the DMA coherence.
494 */
495 dev->dma_skip_sync = dev_is_dma_coherent(dev);
496 else if (!ops->sync_single_for_device && !ops->sync_single_for_cpu &&
497 !ops->sync_sg_for_device && !ops->sync_sg_for_cpu)
498 /*
499 * Synchronization is not possible when none of DMA sync ops
500 * is set.
501 */
502 dev->dma_skip_sync = true;
503 else
504 dev->dma_skip_sync = false;
505 }
506 #else /* !CONFIG_DMA_NEED_SYNC */
dma_setup_need_sync(struct device * dev)507 static inline void dma_setup_need_sync(struct device *dev) { }
508 #endif /* !CONFIG_DMA_NEED_SYNC */
509
510 /*
511 * The whole dma_get_sgtable() idea is fundamentally unsafe - it seems
512 * that the intention is to allow exporting memory allocated via the
513 * coherent DMA APIs through the dma_buf API, which only accepts a
514 * scattertable. This presents a couple of problems:
515 * 1. Not all memory allocated via the coherent DMA APIs is backed by
516 * a struct page
517 * 2. Passing coherent DMA memory into the streaming APIs is not allowed
518 * as we will try to flush the memory through a different alias to that
519 * actually being used (and the flushes are redundant.)
520 */
dma_get_sgtable_attrs(struct device * dev,struct sg_table * sgt,void * cpu_addr,dma_addr_t dma_addr,size_t size,unsigned long attrs)521 int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
522 void *cpu_addr, dma_addr_t dma_addr, size_t size,
523 unsigned long attrs)
524 {
525 const struct dma_map_ops *ops = get_dma_ops(dev);
526
527 if (dma_alloc_direct(dev, ops))
528 return dma_direct_get_sgtable(dev, sgt, cpu_addr, dma_addr,
529 size, attrs);
530 if (use_dma_iommu(dev))
531 return iommu_dma_get_sgtable(dev, sgt, cpu_addr, dma_addr,
532 size, attrs);
533 if (!ops->get_sgtable)
534 return -ENXIO;
535 return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size, attrs);
536 }
537 EXPORT_SYMBOL(dma_get_sgtable_attrs);
538
539 #ifdef CONFIG_MMU
540 /*
541 * Return the page attributes used for mapping dma_alloc_* memory, either in
542 * kernel space if remapping is needed, or to userspace through dma_mmap_*.
543 */
dma_pgprot(struct device * dev,pgprot_t prot,unsigned long attrs)544 pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs)
545 {
546 if (dev_is_dma_coherent(dev))
547 return prot;
548 #ifdef CONFIG_ARCH_HAS_DMA_WRITE_COMBINE
549 if (attrs & DMA_ATTR_WRITE_COMBINE)
550 return pgprot_writecombine(prot);
551 #endif
552 return pgprot_dmacoherent(prot);
553 }
554 #endif /* CONFIG_MMU */
555
556 /**
557 * dma_can_mmap - check if a given device supports dma_mmap_*
558 * @dev: device to check
559 *
560 * Returns %true if @dev supports dma_mmap_coherent() and dma_mmap_attrs() to
561 * map DMA allocations to userspace.
562 */
dma_can_mmap(struct device * dev)563 bool dma_can_mmap(struct device *dev)
564 {
565 const struct dma_map_ops *ops = get_dma_ops(dev);
566
567 if (dma_alloc_direct(dev, ops))
568 return dma_direct_can_mmap(dev);
569 if (use_dma_iommu(dev))
570 return true;
571 return ops->mmap != NULL;
572 }
573 EXPORT_SYMBOL_GPL(dma_can_mmap);
574
575 /**
576 * dma_mmap_attrs - map a coherent DMA allocation into user space
577 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
578 * @vma: vm_area_struct describing requested user mapping
579 * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
580 * @dma_addr: device-view address returned from dma_alloc_attrs
581 * @size: size of memory originally requested in dma_alloc_attrs
582 * @attrs: attributes of mapping properties requested in dma_alloc_attrs
583 *
584 * Map a coherent DMA buffer previously allocated by dma_alloc_attrs into user
585 * space. The coherent DMA buffer must not be freed by the driver until the
586 * user space mapping has been released.
587 */
dma_mmap_attrs(struct device * dev,struct vm_area_struct * vma,void * cpu_addr,dma_addr_t dma_addr,size_t size,unsigned long attrs)588 int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
589 void *cpu_addr, dma_addr_t dma_addr, size_t size,
590 unsigned long attrs)
591 {
592 const struct dma_map_ops *ops = get_dma_ops(dev);
593
594 if (dma_alloc_direct(dev, ops))
595 return dma_direct_mmap(dev, vma, cpu_addr, dma_addr, size,
596 attrs);
597 if (use_dma_iommu(dev))
598 return iommu_dma_mmap(dev, vma, cpu_addr, dma_addr, size,
599 attrs);
600 if (!ops->mmap)
601 return -ENXIO;
602 return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
603 }
604 EXPORT_SYMBOL(dma_mmap_attrs);
605
dma_get_required_mask(struct device * dev)606 u64 dma_get_required_mask(struct device *dev)
607 {
608 const struct dma_map_ops *ops = get_dma_ops(dev);
609
610 if (dma_alloc_direct(dev, ops))
611 return dma_direct_get_required_mask(dev);
612
613 if (use_dma_iommu(dev))
614 return DMA_BIT_MASK(32);
615
616 if (ops->get_required_mask)
617 return ops->get_required_mask(dev);
618
619 /*
620 * We require every DMA ops implementation to at least support a 32-bit
621 * DMA mask (and use bounce buffering if that isn't supported in
622 * hardware). As the direct mapping code has its own routine to
623 * actually report an optimal mask we default to 32-bit here as that
624 * is the right thing for most IOMMUs, and at least not actively
625 * harmful in general.
626 */
627 return DMA_BIT_MASK(32);
628 }
629 EXPORT_SYMBOL_GPL(dma_get_required_mask);
630
dma_alloc_attrs(struct device * dev,size_t size,dma_addr_t * dma_handle,gfp_t flag,unsigned long attrs)631 void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
632 gfp_t flag, unsigned long attrs)
633 {
634 const struct dma_map_ops *ops = get_dma_ops(dev);
635 void *cpu_addr;
636
637 WARN_ON_ONCE(!dev->coherent_dma_mask);
638
639 /*
640 * DMA allocations can never be turned back into a page pointer, so
641 * requesting compound pages doesn't make sense (and can't even be
642 * supported at all by various backends).
643 */
644 if (WARN_ON_ONCE(flag & __GFP_COMP))
645 return NULL;
646
647 if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr)) {
648 trace_dma_alloc(dev, cpu_addr, *dma_handle, size,
649 DMA_BIDIRECTIONAL, flag, attrs);
650 return cpu_addr;
651 }
652
653 /* let the implementation decide on the zone to allocate from: */
654 flag &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
655
656 if (dma_alloc_direct(dev, ops) || arch_dma_alloc_direct(dev)) {
657 cpu_addr = dma_direct_alloc(dev, size, dma_handle, flag, attrs);
658 } else if (use_dma_iommu(dev)) {
659 cpu_addr = iommu_dma_alloc(dev, size, dma_handle, flag, attrs);
660 } else if (ops->alloc) {
661 cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
662 } else {
663 trace_dma_alloc(dev, NULL, 0, size, DMA_BIDIRECTIONAL, flag,
664 attrs);
665 return NULL;
666 }
667
668 trace_dma_alloc(dev, cpu_addr, *dma_handle, size, DMA_BIDIRECTIONAL,
669 flag, attrs);
670 debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr, attrs);
671 return cpu_addr;
672 }
673 EXPORT_SYMBOL(dma_alloc_attrs);
674
dma_free_attrs(struct device * dev,size_t size,void * cpu_addr,dma_addr_t dma_handle,unsigned long attrs)675 void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
676 dma_addr_t dma_handle, unsigned long attrs)
677 {
678 const struct dma_map_ops *ops = get_dma_ops(dev);
679
680 if (dma_release_from_dev_coherent(dev, get_order(size), cpu_addr))
681 return;
682 /*
683 * On non-coherent platforms which implement DMA-coherent buffers via
684 * non-cacheable remaps, ops->free() may call vunmap(). Thus getting
685 * this far in IRQ context is a) at risk of a BUG_ON() or trying to
686 * sleep on some machines, and b) an indication that the driver is
687 * probably misusing the coherent API anyway.
688 */
689 WARN_ON(irqs_disabled());
690
691 trace_dma_free(dev, cpu_addr, dma_handle, size, DMA_BIDIRECTIONAL,
692 attrs);
693 if (!cpu_addr)
694 return;
695
696 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
697 if (dma_alloc_direct(dev, ops) || arch_dma_free_direct(dev, dma_handle))
698 dma_direct_free(dev, size, cpu_addr, dma_handle, attrs);
699 else if (use_dma_iommu(dev))
700 iommu_dma_free(dev, size, cpu_addr, dma_handle, attrs);
701 else if (ops->free)
702 ops->free(dev, size, cpu_addr, dma_handle, attrs);
703 }
704 EXPORT_SYMBOL(dma_free_attrs);
705
__dma_alloc_pages(struct device * dev,size_t size,dma_addr_t * dma_handle,enum dma_data_direction dir,gfp_t gfp)706 static struct page *__dma_alloc_pages(struct device *dev, size_t size,
707 dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)
708 {
709 const struct dma_map_ops *ops = get_dma_ops(dev);
710
711 if (WARN_ON_ONCE(!dev->coherent_dma_mask))
712 return NULL;
713 if (WARN_ON_ONCE(gfp & (__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM)))
714 return NULL;
715 if (WARN_ON_ONCE(gfp & __GFP_COMP))
716 return NULL;
717
718 size = PAGE_ALIGN(size);
719 if (dma_alloc_direct(dev, ops))
720 return dma_direct_alloc_pages(dev, size, dma_handle, dir, gfp);
721 if (use_dma_iommu(dev))
722 return dma_common_alloc_pages(dev, size, dma_handle, dir, gfp);
723 if (!ops->alloc_pages_op)
724 return NULL;
725 return ops->alloc_pages_op(dev, size, dma_handle, dir, gfp);
726 }
727
dma_alloc_pages(struct device * dev,size_t size,dma_addr_t * dma_handle,enum dma_data_direction dir,gfp_t gfp)728 struct page *dma_alloc_pages(struct device *dev, size_t size,
729 dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)
730 {
731 struct page *page = __dma_alloc_pages(dev, size, dma_handle, dir, gfp);
732
733 if (page) {
734 trace_dma_alloc_pages(dev, page_to_virt(page), *dma_handle,
735 size, dir, gfp, 0);
736 debug_dma_alloc_pages(dev, page, size, dir, *dma_handle, 0);
737 } else {
738 trace_dma_alloc_pages(dev, NULL, 0, size, dir, gfp, 0);
739 }
740 return page;
741 }
742 EXPORT_SYMBOL_GPL(dma_alloc_pages);
743
__dma_free_pages(struct device * dev,size_t size,struct page * page,dma_addr_t dma_handle,enum dma_data_direction dir)744 static void __dma_free_pages(struct device *dev, size_t size, struct page *page,
745 dma_addr_t dma_handle, enum dma_data_direction dir)
746 {
747 const struct dma_map_ops *ops = get_dma_ops(dev);
748
749 size = PAGE_ALIGN(size);
750 if (dma_alloc_direct(dev, ops))
751 dma_direct_free_pages(dev, size, page, dma_handle, dir);
752 else if (use_dma_iommu(dev))
753 dma_common_free_pages(dev, size, page, dma_handle, dir);
754 else if (ops->free_pages)
755 ops->free_pages(dev, size, page, dma_handle, dir);
756 }
757
dma_free_pages(struct device * dev,size_t size,struct page * page,dma_addr_t dma_handle,enum dma_data_direction dir)758 void dma_free_pages(struct device *dev, size_t size, struct page *page,
759 dma_addr_t dma_handle, enum dma_data_direction dir)
760 {
761 trace_dma_free_pages(dev, page_to_virt(page), dma_handle, size, dir, 0);
762 debug_dma_free_pages(dev, page, size, dir, dma_handle);
763 __dma_free_pages(dev, size, page, dma_handle, dir);
764 }
765 EXPORT_SYMBOL_GPL(dma_free_pages);
766
dma_mmap_pages(struct device * dev,struct vm_area_struct * vma,size_t size,struct page * page)767 int dma_mmap_pages(struct device *dev, struct vm_area_struct *vma,
768 size_t size, struct page *page)
769 {
770 unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
771
772 if (vma->vm_pgoff >= count || vma_pages(vma) > count - vma->vm_pgoff)
773 return -ENXIO;
774 return remap_pfn_range(vma, vma->vm_start,
775 page_to_pfn(page) + vma->vm_pgoff,
776 vma_pages(vma) << PAGE_SHIFT, vma->vm_page_prot);
777 }
778 EXPORT_SYMBOL_GPL(dma_mmap_pages);
779
alloc_single_sgt(struct device * dev,size_t size,enum dma_data_direction dir,gfp_t gfp)780 static struct sg_table *alloc_single_sgt(struct device *dev, size_t size,
781 enum dma_data_direction dir, gfp_t gfp)
782 {
783 struct sg_table *sgt;
784 struct page *page;
785
786 sgt = kmalloc_obj(*sgt, gfp);
787 if (!sgt)
788 return NULL;
789 if (sg_alloc_table(sgt, 1, gfp))
790 goto out_free_sgt;
791 page = __dma_alloc_pages(dev, size, &sgt->sgl->dma_address, dir, gfp);
792 if (!page)
793 goto out_free_table;
794 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
795 sg_dma_len(sgt->sgl) = sgt->sgl->length;
796 return sgt;
797 out_free_table:
798 sg_free_table(sgt);
799 out_free_sgt:
800 kfree(sgt);
801 return NULL;
802 }
803
dma_alloc_noncontiguous(struct device * dev,size_t size,enum dma_data_direction dir,gfp_t gfp,unsigned long attrs)804 struct sg_table *dma_alloc_noncontiguous(struct device *dev, size_t size,
805 enum dma_data_direction dir, gfp_t gfp, unsigned long attrs)
806 {
807 struct sg_table *sgt;
808
809 if (WARN_ON_ONCE(attrs & ~DMA_ATTR_ALLOC_SINGLE_PAGES))
810 return NULL;
811 if (WARN_ON_ONCE(gfp & __GFP_COMP))
812 return NULL;
813
814 if (use_dma_iommu(dev))
815 sgt = iommu_dma_alloc_noncontiguous(dev, size, dir, gfp, attrs);
816 else
817 sgt = alloc_single_sgt(dev, size, dir, gfp);
818
819 if (sgt) {
820 sgt->nents = 1;
821 trace_dma_alloc_sgt(dev, sgt, size, dir, gfp, attrs);
822 debug_dma_map_sg(dev, sgt->sgl, sgt->orig_nents, 1, dir, attrs);
823 } else {
824 trace_dma_alloc_sgt_err(dev, NULL, 0, size, dir, gfp, attrs);
825 }
826 return sgt;
827 }
828 EXPORT_SYMBOL_GPL(dma_alloc_noncontiguous);
829
free_single_sgt(struct device * dev,size_t size,struct sg_table * sgt,enum dma_data_direction dir)830 static void free_single_sgt(struct device *dev, size_t size,
831 struct sg_table *sgt, enum dma_data_direction dir)
832 {
833 __dma_free_pages(dev, size, sg_page(sgt->sgl), sgt->sgl->dma_address,
834 dir);
835 sg_free_table(sgt);
836 kfree(sgt);
837 }
838
dma_free_noncontiguous(struct device * dev,size_t size,struct sg_table * sgt,enum dma_data_direction dir)839 void dma_free_noncontiguous(struct device *dev, size_t size,
840 struct sg_table *sgt, enum dma_data_direction dir)
841 {
842 trace_dma_free_sgt(dev, sgt, size, dir);
843 debug_dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir);
844
845 if (use_dma_iommu(dev))
846 iommu_dma_free_noncontiguous(dev, size, sgt, dir);
847 else
848 free_single_sgt(dev, size, sgt, dir);
849 }
850 EXPORT_SYMBOL_GPL(dma_free_noncontiguous);
851
dma_vmap_noncontiguous(struct device * dev,size_t size,struct sg_table * sgt)852 void *dma_vmap_noncontiguous(struct device *dev, size_t size,
853 struct sg_table *sgt)
854 {
855
856 if (use_dma_iommu(dev))
857 return iommu_dma_vmap_noncontiguous(dev, size, sgt);
858
859 return page_address(sg_page(sgt->sgl));
860 }
861 EXPORT_SYMBOL_GPL(dma_vmap_noncontiguous);
862
dma_vunmap_noncontiguous(struct device * dev,void * vaddr)863 void dma_vunmap_noncontiguous(struct device *dev, void *vaddr)
864 {
865 if (use_dma_iommu(dev))
866 iommu_dma_vunmap_noncontiguous(dev, vaddr);
867 }
868 EXPORT_SYMBOL_GPL(dma_vunmap_noncontiguous);
869
dma_mmap_noncontiguous(struct device * dev,struct vm_area_struct * vma,size_t size,struct sg_table * sgt)870 int dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma,
871 size_t size, struct sg_table *sgt)
872 {
873 if (use_dma_iommu(dev))
874 return iommu_dma_mmap_noncontiguous(dev, vma, size, sgt);
875 return dma_mmap_pages(dev, vma, size, sg_page(sgt->sgl));
876 }
877 EXPORT_SYMBOL_GPL(dma_mmap_noncontiguous);
878
dma_supported(struct device * dev,u64 mask)879 static int dma_supported(struct device *dev, u64 mask)
880 {
881 const struct dma_map_ops *ops = get_dma_ops(dev);
882
883 if (use_dma_iommu(dev)) {
884 if (WARN_ON(ops))
885 return false;
886 return true;
887 }
888
889 /*
890 * ->dma_supported sets and clears the bypass flag, so ignore it here
891 * and always call into the method if there is one.
892 */
893 if (ops) {
894 if (!ops->dma_supported)
895 return true;
896 return ops->dma_supported(dev, mask);
897 }
898
899 return dma_direct_supported(dev, mask);
900 }
901
dma_pci_p2pdma_supported(struct device * dev)902 bool dma_pci_p2pdma_supported(struct device *dev)
903 {
904 const struct dma_map_ops *ops = get_dma_ops(dev);
905
906 /*
907 * Note: dma_ops_bypass is not checked here because P2PDMA should
908 * not be used with dma mapping ops that do not have support even
909 * if the specific device is bypassing them.
910 */
911
912 /* if ops is not set, dma direct and default IOMMU support P2PDMA */
913 return !ops;
914 }
915 EXPORT_SYMBOL_GPL(dma_pci_p2pdma_supported);
916
dma_set_mask(struct device * dev,u64 mask)917 int dma_set_mask(struct device *dev, u64 mask)
918 {
919 /*
920 * Truncate the mask to the actually supported dma_addr_t width to
921 * avoid generating unsupportable addresses.
922 */
923 mask = (dma_addr_t)mask;
924
925 if (!dev->dma_mask || !dma_supported(dev, mask))
926 return -EIO;
927
928 arch_dma_set_mask(dev, mask);
929 *dev->dma_mask = mask;
930 dma_setup_need_sync(dev);
931
932 return 0;
933 }
934 EXPORT_SYMBOL(dma_set_mask);
935
dma_set_coherent_mask(struct device * dev,u64 mask)936 int dma_set_coherent_mask(struct device *dev, u64 mask)
937 {
938 /*
939 * Truncate the mask to the actually supported dma_addr_t width to
940 * avoid generating unsupportable addresses.
941 */
942 mask = (dma_addr_t)mask;
943
944 if (!dma_supported(dev, mask))
945 return -EIO;
946
947 dev->coherent_dma_mask = mask;
948 return 0;
949 }
950 EXPORT_SYMBOL(dma_set_coherent_mask);
951
__dma_addressing_limited(struct device * dev)952 static bool __dma_addressing_limited(struct device *dev)
953 {
954 const struct dma_map_ops *ops = get_dma_ops(dev);
955
956 if (min_not_zero(dma_get_mask(dev), dev->bus_dma_limit) <
957 dma_get_required_mask(dev))
958 return true;
959
960 if (unlikely(ops) || use_dma_iommu(dev))
961 return false;
962 return !dma_direct_all_ram_mapped(dev);
963 }
964
965 /**
966 * dma_addressing_limited - return if the device is addressing limited
967 * @dev: device to check
968 *
969 * Return %true if the devices DMA mask is too small to address all memory in
970 * the system, else %false. Lack of addressing bits is the prime reason for
971 * bounce buffering, but might not be the only one.
972 */
dma_addressing_limited(struct device * dev)973 bool dma_addressing_limited(struct device *dev)
974 {
975 if (!__dma_addressing_limited(dev))
976 return false;
977
978 dev_dbg(dev, "device is DMA addressing limited\n");
979 return true;
980 }
981 EXPORT_SYMBOL_GPL(dma_addressing_limited);
982
dma_max_mapping_size(struct device * dev)983 size_t dma_max_mapping_size(struct device *dev)
984 {
985 const struct dma_map_ops *ops = get_dma_ops(dev);
986 size_t size = SIZE_MAX;
987
988 if (dma_map_direct(dev, ops))
989 size = dma_direct_max_mapping_size(dev);
990 else if (use_dma_iommu(dev))
991 size = iommu_dma_max_mapping_size(dev);
992 else if (ops && ops->max_mapping_size)
993 size = ops->max_mapping_size(dev);
994
995 return size;
996 }
997 EXPORT_SYMBOL_GPL(dma_max_mapping_size);
998
dma_opt_mapping_size(struct device * dev)999 size_t dma_opt_mapping_size(struct device *dev)
1000 {
1001 const struct dma_map_ops *ops = get_dma_ops(dev);
1002 size_t size = SIZE_MAX;
1003
1004 if (use_dma_iommu(dev))
1005 size = iommu_dma_opt_mapping_size();
1006 else if (ops && ops->opt_mapping_size)
1007 size = ops->opt_mapping_size();
1008
1009 return min(dma_max_mapping_size(dev), size);
1010 }
1011 EXPORT_SYMBOL_GPL(dma_opt_mapping_size);
1012
dma_get_merge_boundary(struct device * dev)1013 unsigned long dma_get_merge_boundary(struct device *dev)
1014 {
1015 const struct dma_map_ops *ops = get_dma_ops(dev);
1016
1017 if (use_dma_iommu(dev))
1018 return iommu_dma_get_merge_boundary(dev);
1019
1020 if (!ops || !ops->get_merge_boundary)
1021 return 0; /* can't merge */
1022
1023 return ops->get_merge_boundary(dev);
1024 }
1025 EXPORT_SYMBOL_GPL(dma_get_merge_boundary);
1026