10a0f0d8bSChristoph Hellwig /* SPDX-License-Identifier: GPL-2.0 */ 20a0f0d8bSChristoph Hellwig /* 30a0f0d8bSChristoph Hellwig * This header is for implementations of dma_map_ops and related code. 40a0f0d8bSChristoph Hellwig * It should not be included in drivers just using the DMA API. 50a0f0d8bSChristoph Hellwig */ 60a0f0d8bSChristoph Hellwig #ifndef _LINUX_DMA_MAP_OPS_H 70a0f0d8bSChristoph Hellwig #define _LINUX_DMA_MAP_OPS_H 80a0f0d8bSChristoph Hellwig 90a0f0d8bSChristoph Hellwig #include <linux/dma-mapping.h> 109f4df96bSChristoph Hellwig #include <linux/pgtable.h> 11370645f4SCatalin Marinas #include <linux/slab.h> 120a0f0d8bSChristoph Hellwig 130b1abd1fSChristoph Hellwig struct cma; 1417de3f5fSRobin Murphy struct iommu_ops; 150b1abd1fSChristoph Hellwig 16159bf192SLogan Gunthorpe /* 17159bf192SLogan Gunthorpe * Values for struct dma_map_ops.flags: 18159bf192SLogan Gunthorpe * 19159bf192SLogan Gunthorpe * DMA_F_PCI_P2PDMA_SUPPORTED: Indicates the dma_map_ops implementation can 20159bf192SLogan Gunthorpe * handle PCI P2PDMA pages in the map_sg/unmap_sg operation. 21f406c8e4SAlexander Lobakin * DMA_F_CAN_SKIP_SYNC: DMA sync operations can be skipped if the device is 22f406c8e4SAlexander Lobakin * coherent and it's not an SWIOTLB buffer. 23159bf192SLogan Gunthorpe */ 24159bf192SLogan Gunthorpe #define DMA_F_PCI_P2PDMA_SUPPORTED (1 << 0) 25f406c8e4SAlexander Lobakin #define DMA_F_CAN_SKIP_SYNC (1 << 1) 26159bf192SLogan Gunthorpe 270a0f0d8bSChristoph Hellwig struct dma_map_ops { 28159bf192SLogan Gunthorpe unsigned int flags; 29159bf192SLogan Gunthorpe 300a0f0d8bSChristoph Hellwig void *(*alloc)(struct device *dev, size_t size, 310a0f0d8bSChristoph Hellwig dma_addr_t *dma_handle, gfp_t gfp, 320a0f0d8bSChristoph Hellwig unsigned long attrs); 330a0f0d8bSChristoph Hellwig void (*free)(struct device *dev, size_t size, void *vaddr, 340a0f0d8bSChristoph Hellwig dma_addr_t dma_handle, unsigned long attrs); 358a2f1187SSuren Baghdasaryan struct page *(*alloc_pages_op)(struct device *dev, size_t size, 360a0f0d8bSChristoph Hellwig dma_addr_t *dma_handle, enum dma_data_direction dir, 370a0f0d8bSChristoph Hellwig gfp_t gfp); 380a0f0d8bSChristoph Hellwig void (*free_pages)(struct device *dev, size_t size, struct page *vaddr, 390a0f0d8bSChristoph Hellwig dma_addr_t dma_handle, enum dma_data_direction dir); 407d5b5738SChristoph Hellwig struct sg_table *(*alloc_noncontiguous)(struct device *dev, size_t size, 417d5b5738SChristoph Hellwig enum dma_data_direction dir, gfp_t gfp, 427d5b5738SChristoph Hellwig unsigned long attrs); 437d5b5738SChristoph Hellwig void (*free_noncontiguous)(struct device *dev, size_t size, 447d5b5738SChristoph Hellwig struct sg_table *sgt, enum dma_data_direction dir); 450a0f0d8bSChristoph Hellwig int (*mmap)(struct device *, struct vm_area_struct *, 460a0f0d8bSChristoph Hellwig void *, dma_addr_t, size_t, unsigned long attrs); 470a0f0d8bSChristoph Hellwig 480a0f0d8bSChristoph Hellwig int (*get_sgtable)(struct device *dev, struct sg_table *sgt, 490a0f0d8bSChristoph Hellwig void *cpu_addr, dma_addr_t dma_addr, size_t size, 500a0f0d8bSChristoph Hellwig unsigned long attrs); 510a0f0d8bSChristoph Hellwig 520a0f0d8bSChristoph Hellwig dma_addr_t (*map_page)(struct device *dev, struct page *page, 530a0f0d8bSChristoph Hellwig unsigned long offset, size_t size, 540a0f0d8bSChristoph Hellwig enum dma_data_direction dir, unsigned long attrs); 550a0f0d8bSChristoph Hellwig void (*unmap_page)(struct device *dev, dma_addr_t dma_handle, 560a0f0d8bSChristoph Hellwig size_t size, enum dma_data_direction dir, 570a0f0d8bSChristoph Hellwig unsigned long attrs); 580a0f0d8bSChristoph Hellwig /* 59fffe3cc8SLogan Gunthorpe * map_sg should return a negative error code on error. See 60fffe3cc8SLogan Gunthorpe * dma_map_sgtable() for a list of appropriate error codes 61fffe3cc8SLogan Gunthorpe * and their meanings. 620a0f0d8bSChristoph Hellwig */ 630a0f0d8bSChristoph Hellwig int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents, 640a0f0d8bSChristoph Hellwig enum dma_data_direction dir, unsigned long attrs); 650a0f0d8bSChristoph Hellwig void (*unmap_sg)(struct device *dev, struct scatterlist *sg, int nents, 660a0f0d8bSChristoph Hellwig enum dma_data_direction dir, unsigned long attrs); 670a0f0d8bSChristoph Hellwig dma_addr_t (*map_resource)(struct device *dev, phys_addr_t phys_addr, 680a0f0d8bSChristoph Hellwig size_t size, enum dma_data_direction dir, 690a0f0d8bSChristoph Hellwig unsigned long attrs); 700a0f0d8bSChristoph Hellwig void (*unmap_resource)(struct device *dev, dma_addr_t dma_handle, 710a0f0d8bSChristoph Hellwig size_t size, enum dma_data_direction dir, 720a0f0d8bSChristoph Hellwig unsigned long attrs); 730a0f0d8bSChristoph Hellwig void (*sync_single_for_cpu)(struct device *dev, dma_addr_t dma_handle, 740a0f0d8bSChristoph Hellwig size_t size, enum dma_data_direction dir); 750a0f0d8bSChristoph Hellwig void (*sync_single_for_device)(struct device *dev, 760a0f0d8bSChristoph Hellwig dma_addr_t dma_handle, size_t size, 770a0f0d8bSChristoph Hellwig enum dma_data_direction dir); 780a0f0d8bSChristoph Hellwig void (*sync_sg_for_cpu)(struct device *dev, struct scatterlist *sg, 790a0f0d8bSChristoph Hellwig int nents, enum dma_data_direction dir); 800a0f0d8bSChristoph Hellwig void (*sync_sg_for_device)(struct device *dev, struct scatterlist *sg, 810a0f0d8bSChristoph Hellwig int nents, enum dma_data_direction dir); 820a0f0d8bSChristoph Hellwig void (*cache_sync)(struct device *dev, void *vaddr, size_t size, 830a0f0d8bSChristoph Hellwig enum dma_data_direction direction); 840a0f0d8bSChristoph Hellwig int (*dma_supported)(struct device *dev, u64 mask); 850a0f0d8bSChristoph Hellwig u64 (*get_required_mask)(struct device *dev); 860a0f0d8bSChristoph Hellwig size_t (*max_mapping_size)(struct device *dev); 87a229cc14SJohn Garry size_t (*opt_mapping_size)(void); 880a0f0d8bSChristoph Hellwig unsigned long (*get_merge_boundary)(struct device *dev); 890a0f0d8bSChristoph Hellwig }; 900a0f0d8bSChristoph Hellwig 910a0f0d8bSChristoph Hellwig #ifdef CONFIG_DMA_OPS 920a0f0d8bSChristoph Hellwig #include <asm/dma-mapping.h> 930a0f0d8bSChristoph Hellwig 940a0f0d8bSChristoph Hellwig static inline const struct dma_map_ops *get_dma_ops(struct device *dev) 950a0f0d8bSChristoph Hellwig { 960a0f0d8bSChristoph Hellwig if (dev->dma_ops) 970a0f0d8bSChristoph Hellwig return dev->dma_ops; 98ade1229cSGreg Kroah-Hartman return get_arch_dma_ops(); 990a0f0d8bSChristoph Hellwig } 1000a0f0d8bSChristoph Hellwig 1010a0f0d8bSChristoph Hellwig static inline void set_dma_ops(struct device *dev, 1020a0f0d8bSChristoph Hellwig const struct dma_map_ops *dma_ops) 1030a0f0d8bSChristoph Hellwig { 1040a0f0d8bSChristoph Hellwig dev->dma_ops = dma_ops; 1050a0f0d8bSChristoph Hellwig } 1060a0f0d8bSChristoph Hellwig #else /* CONFIG_DMA_OPS */ 1070a0f0d8bSChristoph Hellwig static inline const struct dma_map_ops *get_dma_ops(struct device *dev) 1080a0f0d8bSChristoph Hellwig { 1090a0f0d8bSChristoph Hellwig return NULL; 1100a0f0d8bSChristoph Hellwig } 1110a0f0d8bSChristoph Hellwig static inline void set_dma_ops(struct device *dev, 1120a0f0d8bSChristoph Hellwig const struct dma_map_ops *dma_ops) 1130a0f0d8bSChristoph Hellwig { 1140a0f0d8bSChristoph Hellwig } 1150a0f0d8bSChristoph Hellwig #endif /* CONFIG_DMA_OPS */ 1160a0f0d8bSChristoph Hellwig 1170b1abd1fSChristoph Hellwig #ifdef CONFIG_DMA_CMA 1180b1abd1fSChristoph Hellwig extern struct cma *dma_contiguous_default_area; 1190b1abd1fSChristoph Hellwig 1200b1abd1fSChristoph Hellwig static inline struct cma *dev_get_cma_area(struct device *dev) 1210b1abd1fSChristoph Hellwig { 1220b1abd1fSChristoph Hellwig if (dev && dev->cma_area) 1230b1abd1fSChristoph Hellwig return dev->cma_area; 1240b1abd1fSChristoph Hellwig return dma_contiguous_default_area; 1250b1abd1fSChristoph Hellwig } 1260b1abd1fSChristoph Hellwig 1270b1abd1fSChristoph Hellwig void dma_contiguous_reserve(phys_addr_t addr_limit); 1280b1abd1fSChristoph Hellwig int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base, 1290b1abd1fSChristoph Hellwig phys_addr_t limit, struct cma **res_cma, bool fixed); 1300b1abd1fSChristoph Hellwig 1310b1abd1fSChristoph Hellwig struct page *dma_alloc_from_contiguous(struct device *dev, size_t count, 1320b1abd1fSChristoph Hellwig unsigned int order, bool no_warn); 1330b1abd1fSChristoph Hellwig bool dma_release_from_contiguous(struct device *dev, struct page *pages, 1340b1abd1fSChristoph Hellwig int count); 1350b1abd1fSChristoph Hellwig struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp); 1360b1abd1fSChristoph Hellwig void dma_free_contiguous(struct device *dev, struct page *page, size_t size); 1375db5d930SChristoph Hellwig 1385db5d930SChristoph Hellwig void dma_contiguous_early_fixup(phys_addr_t base, unsigned long size); 1390b1abd1fSChristoph Hellwig #else /* CONFIG_DMA_CMA */ 1400b1abd1fSChristoph Hellwig static inline struct cma *dev_get_cma_area(struct device *dev) 1410b1abd1fSChristoph Hellwig { 1420b1abd1fSChristoph Hellwig return NULL; 1430b1abd1fSChristoph Hellwig } 1440b1abd1fSChristoph Hellwig static inline void dma_contiguous_reserve(phys_addr_t limit) 1450b1abd1fSChristoph Hellwig { 1460b1abd1fSChristoph Hellwig } 1470b1abd1fSChristoph Hellwig static inline int dma_contiguous_reserve_area(phys_addr_t size, 1480b1abd1fSChristoph Hellwig phys_addr_t base, phys_addr_t limit, struct cma **res_cma, 1490b1abd1fSChristoph Hellwig bool fixed) 1500b1abd1fSChristoph Hellwig { 1510b1abd1fSChristoph Hellwig return -ENOSYS; 1520b1abd1fSChristoph Hellwig } 1530b1abd1fSChristoph Hellwig static inline struct page *dma_alloc_from_contiguous(struct device *dev, 1540b1abd1fSChristoph Hellwig size_t count, unsigned int order, bool no_warn) 1550b1abd1fSChristoph Hellwig { 1560b1abd1fSChristoph Hellwig return NULL; 1570b1abd1fSChristoph Hellwig } 1580b1abd1fSChristoph Hellwig static inline bool dma_release_from_contiguous(struct device *dev, 1590b1abd1fSChristoph Hellwig struct page *pages, int count) 1600b1abd1fSChristoph Hellwig { 1610b1abd1fSChristoph Hellwig return false; 1620b1abd1fSChristoph Hellwig } 1630b1abd1fSChristoph Hellwig /* Use fallback alloc() and free() when CONFIG_DMA_CMA=n */ 1640b1abd1fSChristoph Hellwig static inline struct page *dma_alloc_contiguous(struct device *dev, size_t size, 1650b1abd1fSChristoph Hellwig gfp_t gfp) 1660b1abd1fSChristoph Hellwig { 1670b1abd1fSChristoph Hellwig return NULL; 1680b1abd1fSChristoph Hellwig } 1690b1abd1fSChristoph Hellwig static inline void dma_free_contiguous(struct device *dev, struct page *page, 1700b1abd1fSChristoph Hellwig size_t size) 1710b1abd1fSChristoph Hellwig { 1720b1abd1fSChristoph Hellwig __free_pages(page, get_order(size)); 1730b1abd1fSChristoph Hellwig } 1740b1abd1fSChristoph Hellwig #endif /* CONFIG_DMA_CMA*/ 1750b1abd1fSChristoph Hellwig 1760a0f0d8bSChristoph Hellwig #ifdef CONFIG_DMA_DECLARE_COHERENT 1770a0f0d8bSChristoph Hellwig int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, 1780a0f0d8bSChristoph Hellwig dma_addr_t device_addr, size_t size); 179e61c4514SMark-PK Tsai void dma_release_coherent_memory(struct device *dev); 1800a0f0d8bSChristoph Hellwig int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size, 1810a0f0d8bSChristoph Hellwig dma_addr_t *dma_handle, void **ret); 1820a0f0d8bSChristoph Hellwig int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr); 1830a0f0d8bSChristoph Hellwig int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma, 1840a0f0d8bSChristoph Hellwig void *cpu_addr, size_t size, int *ret); 1850a0f0d8bSChristoph Hellwig #else 1860a0f0d8bSChristoph Hellwig static inline int dma_declare_coherent_memory(struct device *dev, 1870a0f0d8bSChristoph Hellwig phys_addr_t phys_addr, dma_addr_t device_addr, size_t size) 1880a0f0d8bSChristoph Hellwig { 1890a0f0d8bSChristoph Hellwig return -ENOSYS; 1900a0f0d8bSChristoph Hellwig } 191e61c4514SMark-PK Tsai 1920a0f0d8bSChristoph Hellwig #define dma_alloc_from_dev_coherent(dev, size, handle, ret) (0) 1930a0f0d8bSChristoph Hellwig #define dma_release_from_dev_coherent(dev, order, vaddr) (0) 1940a0f0d8bSChristoph Hellwig #define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0) 19550d6281cSRen Zhijie static inline void dma_release_coherent_memory(struct device *dev) { } 19622f9feb4SChristoph Hellwig #endif /* CONFIG_DMA_DECLARE_COHERENT */ 1970a0f0d8bSChristoph Hellwig 19822f9feb4SChristoph Hellwig #ifdef CONFIG_DMA_GLOBAL_POOL 19922f9feb4SChristoph Hellwig void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size, 20022f9feb4SChristoph Hellwig dma_addr_t *dma_handle); 20122f9feb4SChristoph Hellwig int dma_release_from_global_coherent(int order, void *vaddr); 20222f9feb4SChristoph Hellwig int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr, 20322f9feb4SChristoph Hellwig size_t size, int *ret); 20422f9feb4SChristoph Hellwig int dma_init_global_coherent(phys_addr_t phys_addr, size_t size); 20522f9feb4SChristoph Hellwig #else 2060a0f0d8bSChristoph Hellwig static inline void *dma_alloc_from_global_coherent(struct device *dev, 2070a0f0d8bSChristoph Hellwig ssize_t size, dma_addr_t *dma_handle) 2080a0f0d8bSChristoph Hellwig { 2090a0f0d8bSChristoph Hellwig return NULL; 2100a0f0d8bSChristoph Hellwig } 2110a0f0d8bSChristoph Hellwig static inline int dma_release_from_global_coherent(int order, void *vaddr) 2120a0f0d8bSChristoph Hellwig { 2130a0f0d8bSChristoph Hellwig return 0; 2140a0f0d8bSChristoph Hellwig } 2150a0f0d8bSChristoph Hellwig static inline int dma_mmap_from_global_coherent(struct vm_area_struct *vma, 2160a0f0d8bSChristoph Hellwig void *cpu_addr, size_t size, int *ret) 2170a0f0d8bSChristoph Hellwig { 2180a0f0d8bSChristoph Hellwig return 0; 2190a0f0d8bSChristoph Hellwig } 22022f9feb4SChristoph Hellwig #endif /* CONFIG_DMA_GLOBAL_POOL */ 2210a0f0d8bSChristoph Hellwig 2227d5b5738SChristoph Hellwig /* 2237d5b5738SChristoph Hellwig * This is the actual return value from the ->alloc_noncontiguous method. 2247d5b5738SChristoph Hellwig * The users of the DMA API should only care about the sg_table, but to make 2257d5b5738SChristoph Hellwig * the DMA-API internal vmaping and freeing easier we stash away the page 2267d5b5738SChristoph Hellwig * array as well (except for the fallback case). This can go away any time, 2277d5b5738SChristoph Hellwig * e.g. when a vmap-variant that takes a scatterlist comes along. 2287d5b5738SChristoph Hellwig */ 2297d5b5738SChristoph Hellwig struct dma_sgt_handle { 2307d5b5738SChristoph Hellwig struct sg_table sgt; 2317d5b5738SChristoph Hellwig struct page **pages; 2327d5b5738SChristoph Hellwig }; 2337d5b5738SChristoph Hellwig #define sgt_handle(sgt) \ 2347d5b5738SChristoph Hellwig container_of((sgt), struct dma_sgt_handle, sgt) 2357d5b5738SChristoph Hellwig 236695cebe5SChristoph Hellwig int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, 237695cebe5SChristoph Hellwig void *cpu_addr, dma_addr_t dma_addr, size_t size, 238695cebe5SChristoph Hellwig unsigned long attrs); 239695cebe5SChristoph Hellwig int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, 240695cebe5SChristoph Hellwig void *cpu_addr, dma_addr_t dma_addr, size_t size, 241695cebe5SChristoph Hellwig unsigned long attrs); 242695cebe5SChristoph Hellwig struct page *dma_common_alloc_pages(struct device *dev, size_t size, 243695cebe5SChristoph Hellwig dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp); 244695cebe5SChristoph Hellwig void dma_common_free_pages(struct device *dev, size_t size, struct page *vaddr, 245695cebe5SChristoph Hellwig dma_addr_t dma_handle, enum dma_data_direction dir); 246695cebe5SChristoph Hellwig 247695cebe5SChristoph Hellwig struct page **dma_common_find_pages(void *cpu_addr); 248695cebe5SChristoph Hellwig void *dma_common_contiguous_remap(struct page *page, size_t size, pgprot_t prot, 249695cebe5SChristoph Hellwig const void *caller); 250695cebe5SChristoph Hellwig void *dma_common_pages_remap(struct page **pages, size_t size, pgprot_t prot, 251695cebe5SChristoph Hellwig const void *caller); 252695cebe5SChristoph Hellwig void dma_common_free_remap(void *cpu_addr, size_t size); 253695cebe5SChristoph Hellwig 254695cebe5SChristoph Hellwig struct page *dma_alloc_from_pool(struct device *dev, size_t size, 255695cebe5SChristoph Hellwig void **cpu_addr, gfp_t flags, 256695cebe5SChristoph Hellwig bool (*phys_addr_ok)(struct device *, phys_addr_t, size_t)); 257695cebe5SChristoph Hellwig bool dma_free_from_pool(struct device *dev, void *start, size_t size); 258695cebe5SChristoph Hellwig 25916fee29bSChristoph Hellwig int dma_direct_set_offset(struct device *dev, phys_addr_t cpu_start, 26016fee29bSChristoph Hellwig dma_addr_t dma_start, u64 size); 26116fee29bSChristoph Hellwig 2626d4e9a8eSChristoph Hellwig #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \ 2639f4df96bSChristoph Hellwig defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \ 2649f4df96bSChristoph Hellwig defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) 2656d4e9a8eSChristoph Hellwig extern bool dma_default_coherent; 2669f4df96bSChristoph Hellwig static inline bool dev_is_dma_coherent(struct device *dev) 2679f4df96bSChristoph Hellwig { 2689f4df96bSChristoph Hellwig return dev->dma_coherent; 2699f4df96bSChristoph Hellwig } 2709f4df96bSChristoph Hellwig #else 271fe4e5efaSJiaxun Yang #define dma_default_coherent true 272fe4e5efaSJiaxun Yang 2739f4df96bSChristoph Hellwig static inline bool dev_is_dma_coherent(struct device *dev) 2749f4df96bSChristoph Hellwig { 2759f4df96bSChristoph Hellwig return true; 2769f4df96bSChristoph Hellwig } 2779f4df96bSChristoph Hellwig #endif /* CONFIG_ARCH_HAS_DMA_COHERENCE_H */ 2789f4df96bSChristoph Hellwig 279f406c8e4SAlexander Lobakin static inline void dma_reset_need_sync(struct device *dev) 280f406c8e4SAlexander Lobakin { 281f406c8e4SAlexander Lobakin #ifdef CONFIG_DMA_NEED_SYNC 282f406c8e4SAlexander Lobakin /* Reset it only once so that the function can be called on hotpath */ 283*a6016aacSAlexander Lobakin if (unlikely(dev->dma_skip_sync)) 284*a6016aacSAlexander Lobakin dev->dma_skip_sync = false; 285f406c8e4SAlexander Lobakin #endif 286f406c8e4SAlexander Lobakin } 287f406c8e4SAlexander Lobakin 288370645f4SCatalin Marinas /* 289370645f4SCatalin Marinas * Check whether potential kmalloc() buffers are safe for non-coherent DMA. 290370645f4SCatalin Marinas */ 291370645f4SCatalin Marinas static inline bool dma_kmalloc_safe(struct device *dev, 292370645f4SCatalin Marinas enum dma_data_direction dir) 293370645f4SCatalin Marinas { 294370645f4SCatalin Marinas /* 295370645f4SCatalin Marinas * If DMA bouncing of kmalloc() buffers is disabled, the kmalloc() 296370645f4SCatalin Marinas * caches have already been aligned to a DMA-safe size. 297370645f4SCatalin Marinas */ 298370645f4SCatalin Marinas if (!IS_ENABLED(CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC)) 299370645f4SCatalin Marinas return true; 300370645f4SCatalin Marinas 301370645f4SCatalin Marinas /* 302370645f4SCatalin Marinas * kmalloc() buffers are DMA-safe irrespective of size if the device 303370645f4SCatalin Marinas * is coherent or the direction is DMA_TO_DEVICE (non-desctructive 304370645f4SCatalin Marinas * cache maintenance and benign cache line evictions). 305370645f4SCatalin Marinas */ 306370645f4SCatalin Marinas if (dev_is_dma_coherent(dev) || dir == DMA_TO_DEVICE) 307370645f4SCatalin Marinas return true; 308370645f4SCatalin Marinas 309370645f4SCatalin Marinas return false; 310370645f4SCatalin Marinas } 311370645f4SCatalin Marinas 312370645f4SCatalin Marinas /* 313370645f4SCatalin Marinas * Check whether the given size, assuming it is for a kmalloc()'ed buffer, is 314370645f4SCatalin Marinas * sufficiently aligned for non-coherent DMA. 315370645f4SCatalin Marinas */ 316370645f4SCatalin Marinas static inline bool dma_kmalloc_size_aligned(size_t size) 317370645f4SCatalin Marinas { 318370645f4SCatalin Marinas /* 319370645f4SCatalin Marinas * Larger kmalloc() sizes are guaranteed to be aligned to 320370645f4SCatalin Marinas * ARCH_DMA_MINALIGN. 321370645f4SCatalin Marinas */ 322370645f4SCatalin Marinas if (size >= 2 * ARCH_DMA_MINALIGN || 323370645f4SCatalin Marinas IS_ALIGNED(kmalloc_size_roundup(size), dma_get_cache_alignment())) 324370645f4SCatalin Marinas return true; 325370645f4SCatalin Marinas 326370645f4SCatalin Marinas return false; 327370645f4SCatalin Marinas } 328370645f4SCatalin Marinas 329370645f4SCatalin Marinas /* 330370645f4SCatalin Marinas * Check whether the given object size may have originated from a kmalloc() 331370645f4SCatalin Marinas * buffer with a slab alignment below the DMA-safe alignment and needs 332370645f4SCatalin Marinas * bouncing for non-coherent DMA. The pointer alignment is not considered and 333370645f4SCatalin Marinas * in-structure DMA-safe offsets are the responsibility of the caller. Such 334370645f4SCatalin Marinas * code should use the static ARCH_DMA_MINALIGN for compiler annotations. 335370645f4SCatalin Marinas * 336370645f4SCatalin Marinas * The heuristics can have false positives, bouncing unnecessarily, though the 337370645f4SCatalin Marinas * buffers would be small. False negatives are theoretically possible if, for 338370645f4SCatalin Marinas * example, multiple small kmalloc() buffers are coalesced into a larger 339370645f4SCatalin Marinas * buffer that passes the alignment check. There are no such known constructs 340370645f4SCatalin Marinas * in the kernel. 341370645f4SCatalin Marinas */ 342370645f4SCatalin Marinas static inline bool dma_kmalloc_needs_bounce(struct device *dev, size_t size, 343370645f4SCatalin Marinas enum dma_data_direction dir) 344370645f4SCatalin Marinas { 345370645f4SCatalin Marinas return !dma_kmalloc_safe(dev, dir) && !dma_kmalloc_size_aligned(size); 346370645f4SCatalin Marinas } 347370645f4SCatalin Marinas 3489f4df96bSChristoph Hellwig void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, 3499f4df96bSChristoph Hellwig gfp_t gfp, unsigned long attrs); 3509f4df96bSChristoph Hellwig void arch_dma_free(struct device *dev, size_t size, void *cpu_addr, 3519f4df96bSChristoph Hellwig dma_addr_t dma_addr, unsigned long attrs); 3529f4df96bSChristoph Hellwig 3533d6f126bSArnd Bergmann #ifdef CONFIG_ARCH_HAS_DMA_SET_MASK 3543d6f126bSArnd Bergmann void arch_dma_set_mask(struct device *dev, u64 mask); 3553d6f126bSArnd Bergmann #else 3563d6f126bSArnd Bergmann #define arch_dma_set_mask(dev, mask) do { } while (0) 3573d6f126bSArnd Bergmann #endif 3583d6f126bSArnd Bergmann 3599f4df96bSChristoph Hellwig #ifdef CONFIG_MMU 3609f4df96bSChristoph Hellwig /* 3619f4df96bSChristoph Hellwig * Page protection so that devices that can't snoop CPU caches can use the 3629f4df96bSChristoph Hellwig * memory coherently. We default to pgprot_noncached which is usually used 3639f4df96bSChristoph Hellwig * for ioremap as a safe bet, but architectures can override this with less 3649f4df96bSChristoph Hellwig * strict semantics if possible. 3659f4df96bSChristoph Hellwig */ 3669f4df96bSChristoph Hellwig #ifndef pgprot_dmacoherent 3679f4df96bSChristoph Hellwig #define pgprot_dmacoherent(prot) pgprot_noncached(prot) 3689f4df96bSChristoph Hellwig #endif 3699f4df96bSChristoph Hellwig 3709f4df96bSChristoph Hellwig pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs); 3719f4df96bSChristoph Hellwig #else 3729f4df96bSChristoph Hellwig static inline pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, 3739f4df96bSChristoph Hellwig unsigned long attrs) 3749f4df96bSChristoph Hellwig { 3759f4df96bSChristoph Hellwig return prot; /* no protection bits supported without page tables */ 3769f4df96bSChristoph Hellwig } 3779f4df96bSChristoph Hellwig #endif /* CONFIG_MMU */ 3789f4df96bSChristoph Hellwig 3799f4df96bSChristoph Hellwig #ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE 3809f4df96bSChristoph Hellwig void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, 3819f4df96bSChristoph Hellwig enum dma_data_direction dir); 3829f4df96bSChristoph Hellwig #else 3839f4df96bSChristoph Hellwig static inline void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, 3849f4df96bSChristoph Hellwig enum dma_data_direction dir) 3859f4df96bSChristoph Hellwig { 3869f4df96bSChristoph Hellwig } 3879f4df96bSChristoph Hellwig #endif /* ARCH_HAS_SYNC_DMA_FOR_DEVICE */ 3889f4df96bSChristoph Hellwig 3899f4df96bSChristoph Hellwig #ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU 3909f4df96bSChristoph Hellwig void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, 3919f4df96bSChristoph Hellwig enum dma_data_direction dir); 3929f4df96bSChristoph Hellwig #else 3939f4df96bSChristoph Hellwig static inline void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, 3949f4df96bSChristoph Hellwig enum dma_data_direction dir) 3959f4df96bSChristoph Hellwig { 3969f4df96bSChristoph Hellwig } 3979f4df96bSChristoph Hellwig #endif /* ARCH_HAS_SYNC_DMA_FOR_CPU */ 3989f4df96bSChristoph Hellwig 3999f4df96bSChristoph Hellwig #ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL 4009f4df96bSChristoph Hellwig void arch_sync_dma_for_cpu_all(void); 4019f4df96bSChristoph Hellwig #else 4029f4df96bSChristoph Hellwig static inline void arch_sync_dma_for_cpu_all(void) 4039f4df96bSChristoph Hellwig { 4049f4df96bSChristoph Hellwig } 4059f4df96bSChristoph Hellwig #endif /* CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL */ 4069f4df96bSChristoph Hellwig 4079f4df96bSChristoph Hellwig #ifdef CONFIG_ARCH_HAS_DMA_PREP_COHERENT 4089f4df96bSChristoph Hellwig void arch_dma_prep_coherent(struct page *page, size_t size); 4099f4df96bSChristoph Hellwig #else 4109f4df96bSChristoph Hellwig static inline void arch_dma_prep_coherent(struct page *page, size_t size) 4119f4df96bSChristoph Hellwig { 4129f4df96bSChristoph Hellwig } 4139f4df96bSChristoph Hellwig #endif /* CONFIG_ARCH_HAS_DMA_PREP_COHERENT */ 4149f4df96bSChristoph Hellwig 4159f4df96bSChristoph Hellwig #ifdef CONFIG_ARCH_HAS_DMA_MARK_CLEAN 4169f4df96bSChristoph Hellwig void arch_dma_mark_clean(phys_addr_t paddr, size_t size); 4179f4df96bSChristoph Hellwig #else 4189f4df96bSChristoph Hellwig static inline void arch_dma_mark_clean(phys_addr_t paddr, size_t size) 4199f4df96bSChristoph Hellwig { 4209f4df96bSChristoph Hellwig } 4219f4df96bSChristoph Hellwig #endif /* ARCH_HAS_DMA_MARK_CLEAN */ 4229f4df96bSChristoph Hellwig 4239f4df96bSChristoph Hellwig void *arch_dma_set_uncached(void *addr, size_t size); 4249f4df96bSChristoph Hellwig void arch_dma_clear_uncached(void *addr, size_t size); 4259f4df96bSChristoph Hellwig 4268d8d53cfSAlexey Kardashevskiy #ifdef CONFIG_ARCH_HAS_DMA_MAP_DIRECT 4278d8d53cfSAlexey Kardashevskiy bool arch_dma_map_page_direct(struct device *dev, phys_addr_t addr); 4288d8d53cfSAlexey Kardashevskiy bool arch_dma_unmap_page_direct(struct device *dev, dma_addr_t dma_handle); 4298d8d53cfSAlexey Kardashevskiy bool arch_dma_map_sg_direct(struct device *dev, struct scatterlist *sg, 4308d8d53cfSAlexey Kardashevskiy int nents); 4318d8d53cfSAlexey Kardashevskiy bool arch_dma_unmap_sg_direct(struct device *dev, struct scatterlist *sg, 4328d8d53cfSAlexey Kardashevskiy int nents); 4338d8d53cfSAlexey Kardashevskiy #else 4348d8d53cfSAlexey Kardashevskiy #define arch_dma_map_page_direct(d, a) (false) 4358d8d53cfSAlexey Kardashevskiy #define arch_dma_unmap_page_direct(d, a) (false) 4368d8d53cfSAlexey Kardashevskiy #define arch_dma_map_sg_direct(d, s, n) (false) 4378d8d53cfSAlexey Kardashevskiy #define arch_dma_unmap_sg_direct(d, s, n) (false) 4388d8d53cfSAlexey Kardashevskiy #endif 4398d8d53cfSAlexey Kardashevskiy 4400a0f0d8bSChristoph Hellwig #ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS 441f091e933SRobin Murphy void arch_setup_dma_ops(struct device *dev, bool coherent); 4420a0f0d8bSChristoph Hellwig #else 443f091e933SRobin Murphy static inline void arch_setup_dma_ops(struct device *dev, bool coherent) 4440a0f0d8bSChristoph Hellwig { 4450a0f0d8bSChristoph Hellwig } 4460a0f0d8bSChristoph Hellwig #endif /* CONFIG_ARCH_HAS_SETUP_DMA_OPS */ 4470a0f0d8bSChristoph Hellwig 4480a0f0d8bSChristoph Hellwig #ifdef CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS 4490a0f0d8bSChristoph Hellwig void arch_teardown_dma_ops(struct device *dev); 4500a0f0d8bSChristoph Hellwig #else 4510a0f0d8bSChristoph Hellwig static inline void arch_teardown_dma_ops(struct device *dev) 4520a0f0d8bSChristoph Hellwig { 4530a0f0d8bSChristoph Hellwig } 4540a0f0d8bSChristoph Hellwig #endif /* CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS */ 4550a0f0d8bSChristoph Hellwig 456a1fd09e8SChristoph Hellwig #ifdef CONFIG_DMA_API_DEBUG 45786438841SGreg Kroah-Hartman void dma_debug_add_bus(const struct bus_type *bus); 458a1fd09e8SChristoph Hellwig void debug_dma_dump_mappings(struct device *dev); 459a1fd09e8SChristoph Hellwig #else 46086438841SGreg Kroah-Hartman static inline void dma_debug_add_bus(const struct bus_type *bus) 461a1fd09e8SChristoph Hellwig { 462a1fd09e8SChristoph Hellwig } 463a1fd09e8SChristoph Hellwig static inline void debug_dma_dump_mappings(struct device *dev) 464a1fd09e8SChristoph Hellwig { 465a1fd09e8SChristoph Hellwig } 466a1fd09e8SChristoph Hellwig #endif /* CONFIG_DMA_API_DEBUG */ 467a1fd09e8SChristoph Hellwig 4680a0f0d8bSChristoph Hellwig extern const struct dma_map_ops dma_dummy_ops; 4690a0f0d8bSChristoph Hellwig 4705e180ff3SLogan Gunthorpe enum pci_p2pdma_map_type { 4715e180ff3SLogan Gunthorpe /* 4725e180ff3SLogan Gunthorpe * PCI_P2PDMA_MAP_UNKNOWN: Used internally for indicating the mapping 4735e180ff3SLogan Gunthorpe * type hasn't been calculated yet. Functions that return this enum 4745e180ff3SLogan Gunthorpe * never return this value. 4755e180ff3SLogan Gunthorpe */ 4765e180ff3SLogan Gunthorpe PCI_P2PDMA_MAP_UNKNOWN = 0, 4775e180ff3SLogan Gunthorpe 4785e180ff3SLogan Gunthorpe /* 4795e180ff3SLogan Gunthorpe * PCI_P2PDMA_MAP_NOT_SUPPORTED: Indicates the transaction will 4805e180ff3SLogan Gunthorpe * traverse the host bridge and the host bridge is not in the 4815e180ff3SLogan Gunthorpe * allowlist. DMA Mapping routines should return an error when 4825e180ff3SLogan Gunthorpe * this is returned. 4835e180ff3SLogan Gunthorpe */ 4845e180ff3SLogan Gunthorpe PCI_P2PDMA_MAP_NOT_SUPPORTED, 4855e180ff3SLogan Gunthorpe 4865e180ff3SLogan Gunthorpe /* 4875e180ff3SLogan Gunthorpe * PCI_P2PDMA_BUS_ADDR: Indicates that two devices can talk to 4885e180ff3SLogan Gunthorpe * each other directly through a PCI switch and the transaction will 4895e180ff3SLogan Gunthorpe * not traverse the host bridge. Such a mapping should program 4905e180ff3SLogan Gunthorpe * the DMA engine with PCI bus addresses. 4915e180ff3SLogan Gunthorpe */ 4925e180ff3SLogan Gunthorpe PCI_P2PDMA_MAP_BUS_ADDR, 4935e180ff3SLogan Gunthorpe 4945e180ff3SLogan Gunthorpe /* 4955e180ff3SLogan Gunthorpe * PCI_P2PDMA_MAP_THRU_HOST_BRIDGE: Indicates two devices can talk 4965e180ff3SLogan Gunthorpe * to each other, but the transaction traverses a host bridge on the 4975e180ff3SLogan Gunthorpe * allowlist. In this case, a normal mapping either with CPU physical 4985e180ff3SLogan Gunthorpe * addresses (in the case of dma-direct) or IOVA addresses (in the 4995e180ff3SLogan Gunthorpe * case of IOMMUs) should be used to program the DMA engine. 5005e180ff3SLogan Gunthorpe */ 5015e180ff3SLogan Gunthorpe PCI_P2PDMA_MAP_THRU_HOST_BRIDGE, 5025e180ff3SLogan Gunthorpe }; 5035e180ff3SLogan Gunthorpe 5045e180ff3SLogan Gunthorpe struct pci_p2pdma_map_state { 5055e180ff3SLogan Gunthorpe struct dev_pagemap *pgmap; 5065e180ff3SLogan Gunthorpe int map; 5075e180ff3SLogan Gunthorpe u64 bus_off; 5085e180ff3SLogan Gunthorpe }; 5095e180ff3SLogan Gunthorpe 5105e180ff3SLogan Gunthorpe #ifdef CONFIG_PCI_P2PDMA 5115e180ff3SLogan Gunthorpe enum pci_p2pdma_map_type 5125e180ff3SLogan Gunthorpe pci_p2pdma_map_segment(struct pci_p2pdma_map_state *state, struct device *dev, 5135e180ff3SLogan Gunthorpe struct scatterlist *sg); 5145e180ff3SLogan Gunthorpe #else /* CONFIG_PCI_P2PDMA */ 5155e180ff3SLogan Gunthorpe static inline enum pci_p2pdma_map_type 5165e180ff3SLogan Gunthorpe pci_p2pdma_map_segment(struct pci_p2pdma_map_state *state, struct device *dev, 5175e180ff3SLogan Gunthorpe struct scatterlist *sg) 5185e180ff3SLogan Gunthorpe { 5195e180ff3SLogan Gunthorpe return PCI_P2PDMA_MAP_NOT_SUPPORTED; 5205e180ff3SLogan Gunthorpe } 5215e180ff3SLogan Gunthorpe #endif /* CONFIG_PCI_P2PDMA */ 5225e180ff3SLogan Gunthorpe 5230a0f0d8bSChristoph Hellwig #endif /* _LINUX_DMA_MAP_OPS_H */ 524