xref: /linux/kernel/dma/direct.h (revision f02ad36d4f76645e7e1c21f572260e9a2e61c26b)
119c65c3dSChristoph Hellwig /* SPDX-License-Identifier: GPL-2.0 */
219c65c3dSChristoph Hellwig /*
319c65c3dSChristoph Hellwig  * Copyright (C) 2018 Christoph Hellwig.
419c65c3dSChristoph Hellwig  *
519c65c3dSChristoph Hellwig  * DMA operations that map physical memory directly without using an IOMMU.
619c65c3dSChristoph Hellwig  */
719c65c3dSChristoph Hellwig #ifndef _KERNEL_DMA_DIRECT_H
819c65c3dSChristoph Hellwig #define _KERNEL_DMA_DIRECT_H
919c65c3dSChristoph Hellwig 
1019c65c3dSChristoph Hellwig #include <linux/dma-direct.h>
11*f02ad36dSLogan Gunthorpe #include <linux/memremap.h>
1219c65c3dSChristoph Hellwig 
1319c65c3dSChristoph Hellwig int dma_direct_get_sgtable(struct device *dev, struct sg_table *sgt,
1419c65c3dSChristoph Hellwig 		void *cpu_addr, dma_addr_t dma_addr, size_t size,
1519c65c3dSChristoph Hellwig 		unsigned long attrs);
1619c65c3dSChristoph Hellwig bool dma_direct_can_mmap(struct device *dev);
1719c65c3dSChristoph Hellwig int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
1819c65c3dSChristoph Hellwig 		void *cpu_addr, dma_addr_t dma_addr, size_t size,
1919c65c3dSChristoph Hellwig 		unsigned long attrs);
2019c65c3dSChristoph Hellwig bool dma_direct_need_sync(struct device *dev, dma_addr_t dma_addr);
2119c65c3dSChristoph Hellwig int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
2219c65c3dSChristoph Hellwig 		enum dma_data_direction dir, unsigned long attrs);
2319c65c3dSChristoph Hellwig size_t dma_direct_max_mapping_size(struct device *dev);
2419c65c3dSChristoph Hellwig 
2519c65c3dSChristoph Hellwig #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
2619c65c3dSChristoph Hellwig     defined(CONFIG_SWIOTLB)
2719c65c3dSChristoph Hellwig void dma_direct_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
2819c65c3dSChristoph Hellwig 		int nents, enum dma_data_direction dir);
2919c65c3dSChristoph Hellwig #else
3019c65c3dSChristoph Hellwig static inline void dma_direct_sync_sg_for_device(struct device *dev,
3119c65c3dSChristoph Hellwig 		struct scatterlist *sgl, int nents, enum dma_data_direction dir)
3219c65c3dSChristoph Hellwig {
3319c65c3dSChristoph Hellwig }
3419c65c3dSChristoph Hellwig #endif
3519c65c3dSChristoph Hellwig 
3619c65c3dSChristoph Hellwig #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
3719c65c3dSChristoph Hellwig     defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \
3819c65c3dSChristoph Hellwig     defined(CONFIG_SWIOTLB)
3919c65c3dSChristoph Hellwig void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
4019c65c3dSChristoph Hellwig 		int nents, enum dma_data_direction dir, unsigned long attrs);
4119c65c3dSChristoph Hellwig void dma_direct_sync_sg_for_cpu(struct device *dev,
4219c65c3dSChristoph Hellwig 		struct scatterlist *sgl, int nents, enum dma_data_direction dir);
4319c65c3dSChristoph Hellwig #else
4419c65c3dSChristoph Hellwig static inline void dma_direct_unmap_sg(struct device *dev,
4519c65c3dSChristoph Hellwig 		struct scatterlist *sgl, int nents, enum dma_data_direction dir,
4619c65c3dSChristoph Hellwig 		unsigned long attrs)
4719c65c3dSChristoph Hellwig {
4819c65c3dSChristoph Hellwig }
4919c65c3dSChristoph Hellwig static inline void dma_direct_sync_sg_for_cpu(struct device *dev,
5019c65c3dSChristoph Hellwig 		struct scatterlist *sgl, int nents, enum dma_data_direction dir)
5119c65c3dSChristoph Hellwig {
5219c65c3dSChristoph Hellwig }
5319c65c3dSChristoph Hellwig #endif
5419c65c3dSChristoph Hellwig 
5519c65c3dSChristoph Hellwig static inline void dma_direct_sync_single_for_device(struct device *dev,
5619c65c3dSChristoph Hellwig 		dma_addr_t addr, size_t size, enum dma_data_direction dir)
5719c65c3dSChristoph Hellwig {
5819c65c3dSChristoph Hellwig 	phys_addr_t paddr = dma_to_phys(dev, addr);
5919c65c3dSChristoph Hellwig 
607fd856aaSClaire Chang 	if (unlikely(is_swiotlb_buffer(dev, paddr)))
6180808d27SChristoph Hellwig 		swiotlb_sync_single_for_device(dev, paddr, size, dir);
6219c65c3dSChristoph Hellwig 
6319c65c3dSChristoph Hellwig 	if (!dev_is_dma_coherent(dev))
6419c65c3dSChristoph Hellwig 		arch_sync_dma_for_device(paddr, size, dir);
6519c65c3dSChristoph Hellwig }
6619c65c3dSChristoph Hellwig 
6719c65c3dSChristoph Hellwig static inline void dma_direct_sync_single_for_cpu(struct device *dev,
6819c65c3dSChristoph Hellwig 		dma_addr_t addr, size_t size, enum dma_data_direction dir)
6919c65c3dSChristoph Hellwig {
7019c65c3dSChristoph Hellwig 	phys_addr_t paddr = dma_to_phys(dev, addr);
7119c65c3dSChristoph Hellwig 
7219c65c3dSChristoph Hellwig 	if (!dev_is_dma_coherent(dev)) {
7319c65c3dSChristoph Hellwig 		arch_sync_dma_for_cpu(paddr, size, dir);
7419c65c3dSChristoph Hellwig 		arch_sync_dma_for_cpu_all();
7519c65c3dSChristoph Hellwig 	}
7619c65c3dSChristoph Hellwig 
777fd856aaSClaire Chang 	if (unlikely(is_swiotlb_buffer(dev, paddr)))
7880808d27SChristoph Hellwig 		swiotlb_sync_single_for_cpu(dev, paddr, size, dir);
7919c65c3dSChristoph Hellwig 
8019c65c3dSChristoph Hellwig 	if (dir == DMA_FROM_DEVICE)
8119c65c3dSChristoph Hellwig 		arch_dma_mark_clean(paddr, size);
8219c65c3dSChristoph Hellwig }
8319c65c3dSChristoph Hellwig 
8419c65c3dSChristoph Hellwig static inline dma_addr_t dma_direct_map_page(struct device *dev,
8519c65c3dSChristoph Hellwig 		struct page *page, unsigned long offset, size_t size,
8619c65c3dSChristoph Hellwig 		enum dma_data_direction dir, unsigned long attrs)
8719c65c3dSChristoph Hellwig {
8819c65c3dSChristoph Hellwig 	phys_addr_t phys = page_to_phys(page) + offset;
8919c65c3dSChristoph Hellwig 	dma_addr_t dma_addr = phys_to_dma(dev, phys);
9019c65c3dSChristoph Hellwig 
91*f02ad36dSLogan Gunthorpe 	if (is_swiotlb_force_bounce(dev)) {
92*f02ad36dSLogan Gunthorpe 		if (is_pci_p2pdma_page(page))
93*f02ad36dSLogan Gunthorpe 			return DMA_MAPPING_ERROR;
9419c65c3dSChristoph Hellwig 		return swiotlb_map(dev, phys, size, dir, attrs);
95*f02ad36dSLogan Gunthorpe 	}
9619c65c3dSChristoph Hellwig 
9719c65c3dSChristoph Hellwig 	if (unlikely(!dma_capable(dev, dma_addr, size, true))) {
98*f02ad36dSLogan Gunthorpe 		if (is_pci_p2pdma_page(page))
99*f02ad36dSLogan Gunthorpe 			return DMA_MAPPING_ERROR;
10007410559SChristoph Hellwig 		if (is_swiotlb_active(dev))
10119c65c3dSChristoph Hellwig 			return swiotlb_map(dev, phys, size, dir, attrs);
10219c65c3dSChristoph Hellwig 
10319c65c3dSChristoph Hellwig 		dev_WARN_ONCE(dev, 1,
10419c65c3dSChristoph Hellwig 			     "DMA addr %pad+%zu overflow (mask %llx, bus limit %llx).\n",
10519c65c3dSChristoph Hellwig 			     &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit);
10619c65c3dSChristoph Hellwig 		return DMA_MAPPING_ERROR;
10719c65c3dSChristoph Hellwig 	}
10819c65c3dSChristoph Hellwig 
10919c65c3dSChristoph Hellwig 	if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
11019c65c3dSChristoph Hellwig 		arch_sync_dma_for_device(phys, size, dir);
11119c65c3dSChristoph Hellwig 	return dma_addr;
11219c65c3dSChristoph Hellwig }
11319c65c3dSChristoph Hellwig 
11419c65c3dSChristoph Hellwig static inline void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
11519c65c3dSChristoph Hellwig 		size_t size, enum dma_data_direction dir, unsigned long attrs)
11619c65c3dSChristoph Hellwig {
11719c65c3dSChristoph Hellwig 	phys_addr_t phys = dma_to_phys(dev, addr);
11819c65c3dSChristoph Hellwig 
11919c65c3dSChristoph Hellwig 	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
12019c65c3dSChristoph Hellwig 		dma_direct_sync_single_for_cpu(dev, addr, size, dir);
12119c65c3dSChristoph Hellwig 
1227fd856aaSClaire Chang 	if (unlikely(is_swiotlb_buffer(dev, phys)))
1239e02977bSChao Gao 		swiotlb_tbl_unmap_single(dev, phys, size, dir,
1249e02977bSChao Gao 					 attrs | DMA_ATTR_SKIP_CPU_SYNC);
12519c65c3dSChristoph Hellwig }
12619c65c3dSChristoph Hellwig #endif /* _KERNEL_DMA_DIRECT_H */
127