xref: /linux/kernel/dma/direct.h (revision a1ff5a7d78a036d6c2178ee5acd6ba4946243800)
119c65c3dSChristoph Hellwig /* SPDX-License-Identifier: GPL-2.0 */
219c65c3dSChristoph Hellwig /*
319c65c3dSChristoph Hellwig  * Copyright (C) 2018 Christoph Hellwig.
419c65c3dSChristoph Hellwig  *
519c65c3dSChristoph Hellwig  * DMA operations that map physical memory directly without using an IOMMU.
619c65c3dSChristoph Hellwig  */
719c65c3dSChristoph Hellwig #ifndef _KERNEL_DMA_DIRECT_H
819c65c3dSChristoph Hellwig #define _KERNEL_DMA_DIRECT_H
919c65c3dSChristoph Hellwig 
1019c65c3dSChristoph Hellwig #include <linux/dma-direct.h>
11f02ad36dSLogan Gunthorpe #include <linux/memremap.h>
1219c65c3dSChristoph Hellwig 
1319c65c3dSChristoph Hellwig int dma_direct_get_sgtable(struct device *dev, struct sg_table *sgt,
1419c65c3dSChristoph Hellwig 		void *cpu_addr, dma_addr_t dma_addr, size_t size,
1519c65c3dSChristoph Hellwig 		unsigned long attrs);
1619c65c3dSChristoph Hellwig bool dma_direct_can_mmap(struct device *dev);
1719c65c3dSChristoph Hellwig int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
1819c65c3dSChristoph Hellwig 		void *cpu_addr, dma_addr_t dma_addr, size_t size,
1919c65c3dSChristoph Hellwig 		unsigned long attrs);
2019c65c3dSChristoph Hellwig bool dma_direct_need_sync(struct device *dev, dma_addr_t dma_addr);
2119c65c3dSChristoph Hellwig int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
2219c65c3dSChristoph Hellwig 		enum dma_data_direction dir, unsigned long attrs);
23*a409d960SJia He bool dma_direct_all_ram_mapped(struct device *dev);
2419c65c3dSChristoph Hellwig size_t dma_direct_max_mapping_size(struct device *dev);
2519c65c3dSChristoph Hellwig 
2619c65c3dSChristoph Hellwig #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
2719c65c3dSChristoph Hellwig     defined(CONFIG_SWIOTLB)
2819c65c3dSChristoph Hellwig void dma_direct_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
2919c65c3dSChristoph Hellwig 		int nents, enum dma_data_direction dir);
3019c65c3dSChristoph Hellwig #else
dma_direct_sync_sg_for_device(struct device * dev,struct scatterlist * sgl,int nents,enum dma_data_direction dir)3119c65c3dSChristoph Hellwig static inline void dma_direct_sync_sg_for_device(struct device *dev,
3219c65c3dSChristoph Hellwig 		struct scatterlist *sgl, int nents, enum dma_data_direction dir)
3319c65c3dSChristoph Hellwig {
3419c65c3dSChristoph Hellwig }
3519c65c3dSChristoph Hellwig #endif
3619c65c3dSChristoph Hellwig 
3719c65c3dSChristoph Hellwig #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
3819c65c3dSChristoph Hellwig     defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \
3919c65c3dSChristoph Hellwig     defined(CONFIG_SWIOTLB)
4019c65c3dSChristoph Hellwig void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
4119c65c3dSChristoph Hellwig 		int nents, enum dma_data_direction dir, unsigned long attrs);
4219c65c3dSChristoph Hellwig void dma_direct_sync_sg_for_cpu(struct device *dev,
4319c65c3dSChristoph Hellwig 		struct scatterlist *sgl, int nents, enum dma_data_direction dir);
4419c65c3dSChristoph Hellwig #else
dma_direct_unmap_sg(struct device * dev,struct scatterlist * sgl,int nents,enum dma_data_direction dir,unsigned long attrs)4519c65c3dSChristoph Hellwig static inline void dma_direct_unmap_sg(struct device *dev,
4619c65c3dSChristoph Hellwig 		struct scatterlist *sgl, int nents, enum dma_data_direction dir,
4719c65c3dSChristoph Hellwig 		unsigned long attrs)
4819c65c3dSChristoph Hellwig {
4919c65c3dSChristoph Hellwig }
dma_direct_sync_sg_for_cpu(struct device * dev,struct scatterlist * sgl,int nents,enum dma_data_direction dir)5019c65c3dSChristoph Hellwig static inline void dma_direct_sync_sg_for_cpu(struct device *dev,
5119c65c3dSChristoph Hellwig 		struct scatterlist *sgl, int nents, enum dma_data_direction dir)
5219c65c3dSChristoph Hellwig {
5319c65c3dSChristoph Hellwig }
5419c65c3dSChristoph Hellwig #endif
5519c65c3dSChristoph Hellwig 
dma_direct_sync_single_for_device(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir)5619c65c3dSChristoph Hellwig static inline void dma_direct_sync_single_for_device(struct device *dev,
5719c65c3dSChristoph Hellwig 		dma_addr_t addr, size_t size, enum dma_data_direction dir)
5819c65c3dSChristoph Hellwig {
5919c65c3dSChristoph Hellwig 	phys_addr_t paddr = dma_to_phys(dev, addr);
6019c65c3dSChristoph Hellwig 
6180808d27SChristoph Hellwig 	swiotlb_sync_single_for_device(dev, paddr, size, dir);
6219c65c3dSChristoph Hellwig 
6319c65c3dSChristoph Hellwig 	if (!dev_is_dma_coherent(dev))
6419c65c3dSChristoph Hellwig 		arch_sync_dma_for_device(paddr, size, dir);
6519c65c3dSChristoph Hellwig }
6619c65c3dSChristoph Hellwig 
dma_direct_sync_single_for_cpu(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir)6719c65c3dSChristoph Hellwig static inline void dma_direct_sync_single_for_cpu(struct device *dev,
6819c65c3dSChristoph Hellwig 		dma_addr_t addr, size_t size, enum dma_data_direction dir)
6919c65c3dSChristoph Hellwig {
7019c65c3dSChristoph Hellwig 	phys_addr_t paddr = dma_to_phys(dev, addr);
7119c65c3dSChristoph Hellwig 
7219c65c3dSChristoph Hellwig 	if (!dev_is_dma_coherent(dev)) {
7319c65c3dSChristoph Hellwig 		arch_sync_dma_for_cpu(paddr, size, dir);
7419c65c3dSChristoph Hellwig 		arch_sync_dma_for_cpu_all();
7519c65c3dSChristoph Hellwig 	}
7619c65c3dSChristoph Hellwig 
7780808d27SChristoph Hellwig 	swiotlb_sync_single_for_cpu(dev, paddr, size, dir);
7819c65c3dSChristoph Hellwig 
7919c65c3dSChristoph Hellwig 	if (dir == DMA_FROM_DEVICE)
8019c65c3dSChristoph Hellwig 		arch_dma_mark_clean(paddr, size);
8119c65c3dSChristoph Hellwig }
8219c65c3dSChristoph Hellwig 
dma_direct_map_page(struct device * dev,struct page * page,unsigned long offset,size_t size,enum dma_data_direction dir,unsigned long attrs)8319c65c3dSChristoph Hellwig static inline dma_addr_t dma_direct_map_page(struct device *dev,
8419c65c3dSChristoph Hellwig 		struct page *page, unsigned long offset, size_t size,
8519c65c3dSChristoph Hellwig 		enum dma_data_direction dir, unsigned long attrs)
8619c65c3dSChristoph Hellwig {
8719c65c3dSChristoph Hellwig 	phys_addr_t phys = page_to_phys(page) + offset;
8819c65c3dSChristoph Hellwig 	dma_addr_t dma_addr = phys_to_dma(dev, phys);
8919c65c3dSChristoph Hellwig 
90f02ad36dSLogan Gunthorpe 	if (is_swiotlb_force_bounce(dev)) {
91f02ad36dSLogan Gunthorpe 		if (is_pci_p2pdma_page(page))
92f02ad36dSLogan Gunthorpe 			return DMA_MAPPING_ERROR;
9319c65c3dSChristoph Hellwig 		return swiotlb_map(dev, phys, size, dir, attrs);
94f02ad36dSLogan Gunthorpe 	}
9519c65c3dSChristoph Hellwig 
96370645f4SCatalin Marinas 	if (unlikely(!dma_capable(dev, dma_addr, size, true)) ||
97370645f4SCatalin Marinas 	    dma_kmalloc_needs_bounce(dev, size, dir)) {
98f02ad36dSLogan Gunthorpe 		if (is_pci_p2pdma_page(page))
99f02ad36dSLogan Gunthorpe 			return DMA_MAPPING_ERROR;
10007410559SChristoph Hellwig 		if (is_swiotlb_active(dev))
10119c65c3dSChristoph Hellwig 			return swiotlb_map(dev, phys, size, dir, attrs);
10219c65c3dSChristoph Hellwig 
10319c65c3dSChristoph Hellwig 		dev_WARN_ONCE(dev, 1,
10419c65c3dSChristoph Hellwig 			     "DMA addr %pad+%zu overflow (mask %llx, bus limit %llx).\n",
10519c65c3dSChristoph Hellwig 			     &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit);
10619c65c3dSChristoph Hellwig 		return DMA_MAPPING_ERROR;
10719c65c3dSChristoph Hellwig 	}
10819c65c3dSChristoph Hellwig 
10919c65c3dSChristoph Hellwig 	if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
11019c65c3dSChristoph Hellwig 		arch_sync_dma_for_device(phys, size, dir);
11119c65c3dSChristoph Hellwig 	return dma_addr;
11219c65c3dSChristoph Hellwig }
11319c65c3dSChristoph Hellwig 
dma_direct_unmap_page(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir,unsigned long attrs)11419c65c3dSChristoph Hellwig static inline void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
11519c65c3dSChristoph Hellwig 		size_t size, enum dma_data_direction dir, unsigned long attrs)
11619c65c3dSChristoph Hellwig {
11719c65c3dSChristoph Hellwig 	phys_addr_t phys = dma_to_phys(dev, addr);
11819c65c3dSChristoph Hellwig 
11919c65c3dSChristoph Hellwig 	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
12019c65c3dSChristoph Hellwig 		dma_direct_sync_single_for_cpu(dev, addr, size, dir);
12119c65c3dSChristoph Hellwig 
1229e02977bSChao Gao 	swiotlb_tbl_unmap_single(dev, phys, size, dir,
1239e02977bSChao Gao 					 attrs | DMA_ATTR_SKIP_CPU_SYNC);
12419c65c3dSChristoph Hellwig }
12519c65c3dSChristoph Hellwig #endif /* _KERNEL_DMA_DIRECT_H */
126