xref: /linux/kernel/dma/direct.h (revision 07410559f38360885e91cff1b800168681ac515c)
119c65c3dSChristoph Hellwig /* SPDX-License-Identifier: GPL-2.0 */
219c65c3dSChristoph Hellwig /*
319c65c3dSChristoph Hellwig  * Copyright (C) 2018 Christoph Hellwig.
419c65c3dSChristoph Hellwig  *
519c65c3dSChristoph Hellwig  * DMA operations that map physical memory directly without using an IOMMU.
619c65c3dSChristoph Hellwig  */
719c65c3dSChristoph Hellwig #ifndef _KERNEL_DMA_DIRECT_H
819c65c3dSChristoph Hellwig #define _KERNEL_DMA_DIRECT_H
919c65c3dSChristoph Hellwig 
1019c65c3dSChristoph Hellwig #include <linux/dma-direct.h>
1119c65c3dSChristoph Hellwig 
1219c65c3dSChristoph Hellwig int dma_direct_get_sgtable(struct device *dev, struct sg_table *sgt,
1319c65c3dSChristoph Hellwig 		void *cpu_addr, dma_addr_t dma_addr, size_t size,
1419c65c3dSChristoph Hellwig 		unsigned long attrs);
1519c65c3dSChristoph Hellwig bool dma_direct_can_mmap(struct device *dev);
1619c65c3dSChristoph Hellwig int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
1719c65c3dSChristoph Hellwig 		void *cpu_addr, dma_addr_t dma_addr, size_t size,
1819c65c3dSChristoph Hellwig 		unsigned long attrs);
1919c65c3dSChristoph Hellwig bool dma_direct_need_sync(struct device *dev, dma_addr_t dma_addr);
2019c65c3dSChristoph Hellwig int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
2119c65c3dSChristoph Hellwig 		enum dma_data_direction dir, unsigned long attrs);
2219c65c3dSChristoph Hellwig size_t dma_direct_max_mapping_size(struct device *dev);
2319c65c3dSChristoph Hellwig 
2419c65c3dSChristoph Hellwig #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
2519c65c3dSChristoph Hellwig     defined(CONFIG_SWIOTLB)
2619c65c3dSChristoph Hellwig void dma_direct_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
2719c65c3dSChristoph Hellwig 		int nents, enum dma_data_direction dir);
2819c65c3dSChristoph Hellwig #else
2919c65c3dSChristoph Hellwig static inline void dma_direct_sync_sg_for_device(struct device *dev,
3019c65c3dSChristoph Hellwig 		struct scatterlist *sgl, int nents, enum dma_data_direction dir)
3119c65c3dSChristoph Hellwig {
3219c65c3dSChristoph Hellwig }
3319c65c3dSChristoph Hellwig #endif
3419c65c3dSChristoph Hellwig 
3519c65c3dSChristoph Hellwig #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
3619c65c3dSChristoph Hellwig     defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \
3719c65c3dSChristoph Hellwig     defined(CONFIG_SWIOTLB)
3819c65c3dSChristoph Hellwig void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
3919c65c3dSChristoph Hellwig 		int nents, enum dma_data_direction dir, unsigned long attrs);
4019c65c3dSChristoph Hellwig void dma_direct_sync_sg_for_cpu(struct device *dev,
4119c65c3dSChristoph Hellwig 		struct scatterlist *sgl, int nents, enum dma_data_direction dir);
4219c65c3dSChristoph Hellwig #else
4319c65c3dSChristoph Hellwig static inline void dma_direct_unmap_sg(struct device *dev,
4419c65c3dSChristoph Hellwig 		struct scatterlist *sgl, int nents, enum dma_data_direction dir,
4519c65c3dSChristoph Hellwig 		unsigned long attrs)
4619c65c3dSChristoph Hellwig {
4719c65c3dSChristoph Hellwig }
4819c65c3dSChristoph Hellwig static inline void dma_direct_sync_sg_for_cpu(struct device *dev,
4919c65c3dSChristoph Hellwig 		struct scatterlist *sgl, int nents, enum dma_data_direction dir)
5019c65c3dSChristoph Hellwig {
5119c65c3dSChristoph Hellwig }
5219c65c3dSChristoph Hellwig #endif
5319c65c3dSChristoph Hellwig 
5419c65c3dSChristoph Hellwig static inline void dma_direct_sync_single_for_device(struct device *dev,
5519c65c3dSChristoph Hellwig 		dma_addr_t addr, size_t size, enum dma_data_direction dir)
5619c65c3dSChristoph Hellwig {
5719c65c3dSChristoph Hellwig 	phys_addr_t paddr = dma_to_phys(dev, addr);
5819c65c3dSChristoph Hellwig 
597fd856aaSClaire Chang 	if (unlikely(is_swiotlb_buffer(dev, paddr)))
6080808d27SChristoph Hellwig 		swiotlb_sync_single_for_device(dev, paddr, size, dir);
6119c65c3dSChristoph Hellwig 
6219c65c3dSChristoph Hellwig 	if (!dev_is_dma_coherent(dev))
6319c65c3dSChristoph Hellwig 		arch_sync_dma_for_device(paddr, size, dir);
6419c65c3dSChristoph Hellwig }
6519c65c3dSChristoph Hellwig 
6619c65c3dSChristoph Hellwig static inline void dma_direct_sync_single_for_cpu(struct device *dev,
6719c65c3dSChristoph Hellwig 		dma_addr_t addr, size_t size, enum dma_data_direction dir)
6819c65c3dSChristoph Hellwig {
6919c65c3dSChristoph Hellwig 	phys_addr_t paddr = dma_to_phys(dev, addr);
7019c65c3dSChristoph Hellwig 
7119c65c3dSChristoph Hellwig 	if (!dev_is_dma_coherent(dev)) {
7219c65c3dSChristoph Hellwig 		arch_sync_dma_for_cpu(paddr, size, dir);
7319c65c3dSChristoph Hellwig 		arch_sync_dma_for_cpu_all();
7419c65c3dSChristoph Hellwig 	}
7519c65c3dSChristoph Hellwig 
767fd856aaSClaire Chang 	if (unlikely(is_swiotlb_buffer(dev, paddr)))
7780808d27SChristoph Hellwig 		swiotlb_sync_single_for_cpu(dev, paddr, size, dir);
7819c65c3dSChristoph Hellwig 
7919c65c3dSChristoph Hellwig 	if (dir == DMA_FROM_DEVICE)
8019c65c3dSChristoph Hellwig 		arch_dma_mark_clean(paddr, size);
8119c65c3dSChristoph Hellwig }
8219c65c3dSChristoph Hellwig 
8319c65c3dSChristoph Hellwig static inline dma_addr_t dma_direct_map_page(struct device *dev,
8419c65c3dSChristoph Hellwig 		struct page *page, unsigned long offset, size_t size,
8519c65c3dSChristoph Hellwig 		enum dma_data_direction dir, unsigned long attrs)
8619c65c3dSChristoph Hellwig {
8719c65c3dSChristoph Hellwig 	phys_addr_t phys = page_to_phys(page) + offset;
8819c65c3dSChristoph Hellwig 	dma_addr_t dma_addr = phys_to_dma(dev, phys);
8919c65c3dSChristoph Hellwig 
90903cd0f3SClaire Chang 	if (is_swiotlb_force_bounce(dev))
9119c65c3dSChristoph Hellwig 		return swiotlb_map(dev, phys, size, dir, attrs);
9219c65c3dSChristoph Hellwig 
9319c65c3dSChristoph Hellwig 	if (unlikely(!dma_capable(dev, dma_addr, size, true))) {
94*07410559SChristoph Hellwig 		if (is_swiotlb_active(dev))
9519c65c3dSChristoph Hellwig 			return swiotlb_map(dev, phys, size, dir, attrs);
9619c65c3dSChristoph Hellwig 
9719c65c3dSChristoph Hellwig 		dev_WARN_ONCE(dev, 1,
9819c65c3dSChristoph Hellwig 			     "DMA addr %pad+%zu overflow (mask %llx, bus limit %llx).\n",
9919c65c3dSChristoph Hellwig 			     &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit);
10019c65c3dSChristoph Hellwig 		return DMA_MAPPING_ERROR;
10119c65c3dSChristoph Hellwig 	}
10219c65c3dSChristoph Hellwig 
10319c65c3dSChristoph Hellwig 	if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
10419c65c3dSChristoph Hellwig 		arch_sync_dma_for_device(phys, size, dir);
10519c65c3dSChristoph Hellwig 	return dma_addr;
10619c65c3dSChristoph Hellwig }
10719c65c3dSChristoph Hellwig 
10819c65c3dSChristoph Hellwig static inline void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
10919c65c3dSChristoph Hellwig 		size_t size, enum dma_data_direction dir, unsigned long attrs)
11019c65c3dSChristoph Hellwig {
11119c65c3dSChristoph Hellwig 	phys_addr_t phys = dma_to_phys(dev, addr);
11219c65c3dSChristoph Hellwig 
11319c65c3dSChristoph Hellwig 	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
11419c65c3dSChristoph Hellwig 		dma_direct_sync_single_for_cpu(dev, addr, size, dir);
11519c65c3dSChristoph Hellwig 
1167fd856aaSClaire Chang 	if (unlikely(is_swiotlb_buffer(dev, phys)))
1179e02977bSChao Gao 		swiotlb_tbl_unmap_single(dev, phys, size, dir,
1189e02977bSChao Gao 					 attrs | DMA_ATTR_SKIP_CPU_SYNC);
11919c65c3dSChristoph Hellwig }
12019c65c3dSChristoph Hellwig #endif /* _KERNEL_DMA_DIRECT_H */
121