18d59ecb2SHans Petter Selasky /*- 28d59ecb2SHans Petter Selasky * Copyright (c) 2010 Isilon Systems, Inc. 38d59ecb2SHans Petter Selasky * Copyright (c) 2010 iX Systems, Inc. 48d59ecb2SHans Petter Selasky * Copyright (c) 2010 Panasas, Inc. 58d59ecb2SHans Petter Selasky * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd. 68d59ecb2SHans Petter Selasky * All rights reserved. 78d59ecb2SHans Petter Selasky * 88d59ecb2SHans Petter Selasky * Redistribution and use in source and binary forms, with or without 98d59ecb2SHans Petter Selasky * modification, are permitted provided that the following conditions 108d59ecb2SHans Petter Selasky * are met: 118d59ecb2SHans Petter Selasky * 1. Redistributions of source code must retain the above copyright 128d59ecb2SHans Petter Selasky * notice unmodified, this list of conditions, and the following 138d59ecb2SHans Petter Selasky * disclaimer. 148d59ecb2SHans Petter Selasky * 2. Redistributions in binary form must reproduce the above copyright 158d59ecb2SHans Petter Selasky * notice, this list of conditions and the following disclaimer in the 168d59ecb2SHans Petter Selasky * documentation and/or other materials provided with the distribution. 178d59ecb2SHans Petter Selasky * 188d59ecb2SHans Petter Selasky * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 198d59ecb2SHans Petter Selasky * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 208d59ecb2SHans Petter Selasky * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 218d59ecb2SHans Petter Selasky * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 228d59ecb2SHans Petter Selasky * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 238d59ecb2SHans Petter Selasky * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 248d59ecb2SHans Petter Selasky * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 258d59ecb2SHans Petter Selasky * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 268d59ecb2SHans Petter Selasky * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 278d59ecb2SHans Petter Selasky * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 288d59ecb2SHans Petter Selasky * 298d59ecb2SHans Petter Selasky * $FreeBSD$ 308d59ecb2SHans Petter Selasky */ 31307f78f3SVladimir Kondratyev #ifndef _LINUXKPI_LINUX_DMA_MAPPING_H_ 32307f78f3SVladimir Kondratyev #define _LINUXKPI_LINUX_DMA_MAPPING_H_ 338d59ecb2SHans Petter Selasky 348d59ecb2SHans Petter Selasky #include <linux/types.h> 358d59ecb2SHans Petter Selasky #include <linux/device.h> 368d59ecb2SHans Petter Selasky #include <linux/err.h> 378d59ecb2SHans Petter Selasky #include <linux/dma-attrs.h> 388d59ecb2SHans Petter Selasky #include <linux/scatterlist.h> 398d59ecb2SHans Petter Selasky #include <linux/mm.h> 408d59ecb2SHans Petter Selasky #include <linux/page.h> 4138ba9c8bSEmmanuel Vadot #include <linux/sizes.h> 428d59ecb2SHans Petter Selasky 438d59ecb2SHans Petter Selasky #include <sys/systm.h> 448d59ecb2SHans Petter Selasky #include <sys/malloc.h> 458d59ecb2SHans Petter Selasky 468d59ecb2SHans Petter Selasky #include <vm/vm.h> 478d59ecb2SHans Petter Selasky #include <vm/vm_page.h> 488d59ecb2SHans Petter Selasky #include <vm/pmap.h> 498d59ecb2SHans Petter Selasky 508d59ecb2SHans Petter Selasky #include <machine/bus.h> 518d59ecb2SHans Petter Selasky 528d59ecb2SHans Petter Selasky enum dma_data_direction { 538d59ecb2SHans Petter Selasky DMA_BIDIRECTIONAL = 0, 548d59ecb2SHans Petter Selasky DMA_TO_DEVICE = 1, 558d59ecb2SHans Petter Selasky DMA_FROM_DEVICE = 2, 568d59ecb2SHans Petter Selasky DMA_NONE = 3, 578d59ecb2SHans Petter Selasky }; 588d59ecb2SHans Petter Selasky 598d59ecb2SHans Petter Selasky struct dma_map_ops { 608d59ecb2SHans Petter Selasky void* (*alloc_coherent)(struct device *dev, size_t size, 618d59ecb2SHans Petter Selasky dma_addr_t *dma_handle, gfp_t gfp); 628d59ecb2SHans Petter Selasky void (*free_coherent)(struct device *dev, size_t size, 638d59ecb2SHans Petter Selasky void *vaddr, dma_addr_t dma_handle); 648d59ecb2SHans Petter Selasky dma_addr_t (*map_page)(struct device *dev, struct page *page, 658d59ecb2SHans Petter Selasky unsigned long offset, size_t size, enum dma_data_direction dir, 6698a6984aSVladimir Kondratyev unsigned long attrs); 678d59ecb2SHans Petter Selasky void (*unmap_page)(struct device *dev, dma_addr_t dma_handle, 6898a6984aSVladimir Kondratyev size_t size, enum dma_data_direction dir, unsigned long attrs); 698d59ecb2SHans Petter Selasky int (*map_sg)(struct device *dev, struct scatterlist *sg, 7098a6984aSVladimir Kondratyev int nents, enum dma_data_direction dir, unsigned long attrs); 718d59ecb2SHans Petter Selasky void (*unmap_sg)(struct device *dev, struct scatterlist *sg, int nents, 7298a6984aSVladimir Kondratyev enum dma_data_direction dir, unsigned long attrs); 738d59ecb2SHans Petter Selasky void (*sync_single_for_cpu)(struct device *dev, dma_addr_t dma_handle, 748d59ecb2SHans Petter Selasky size_t size, enum dma_data_direction dir); 758d59ecb2SHans Petter Selasky void (*sync_single_for_device)(struct device *dev, 768d59ecb2SHans Petter Selasky dma_addr_t dma_handle, size_t size, enum dma_data_direction dir); 778d59ecb2SHans Petter Selasky void (*sync_single_range_for_cpu)(struct device *dev, 788d59ecb2SHans Petter Selasky dma_addr_t dma_handle, unsigned long offset, size_t size, 798d59ecb2SHans Petter Selasky enum dma_data_direction dir); 808d59ecb2SHans Petter Selasky void (*sync_single_range_for_device)(struct device *dev, 818d59ecb2SHans Petter Selasky dma_addr_t dma_handle, unsigned long offset, size_t size, 828d59ecb2SHans Petter Selasky enum dma_data_direction dir); 838d59ecb2SHans Petter Selasky void (*sync_sg_for_cpu)(struct device *dev, struct scatterlist *sg, 848d59ecb2SHans Petter Selasky int nents, enum dma_data_direction dir); 858d59ecb2SHans Petter Selasky void (*sync_sg_for_device)(struct device *dev, struct scatterlist *sg, 868d59ecb2SHans Petter Selasky int nents, enum dma_data_direction dir); 878d59ecb2SHans Petter Selasky int (*mapping_error)(struct device *dev, dma_addr_t dma_addr); 888d59ecb2SHans Petter Selasky int (*dma_supported)(struct device *dev, u64 mask); 898d59ecb2SHans Petter Selasky int is_phys; 908d59ecb2SHans Petter Selasky }; 918d59ecb2SHans Petter Selasky 928d59ecb2SHans Petter Selasky #define DMA_BIT_MASK(n) ((2ULL << ((n) - 1)) - 1ULL) 938d59ecb2SHans Petter Selasky 9472c89ce9SBjoern A. Zeeb int linux_dma_tag_init(struct device *, u64); 95c39eefe7SBjoern A. Zeeb int linux_dma_tag_init_coherent(struct device *, u64); 96f211d536STycho Nightingale void *linux_dma_alloc_coherent(struct device *dev, size_t size, 97f211d536STycho Nightingale dma_addr_t *dma_handle, gfp_t flag); 987105f0d9SBjoern A. Zeeb void *linuxkpi_dmam_alloc_coherent(struct device *dev, size_t size, 997105f0d9SBjoern A. Zeeb dma_addr_t *dma_handle, gfp_t flag); 100f211d536STycho Nightingale dma_addr_t linux_dma_map_phys(struct device *dev, vm_paddr_t phys, size_t len); 101f211d536STycho Nightingale void linux_dma_unmap(struct device *dev, dma_addr_t dma_addr, size_t size); 102f211d536STycho Nightingale int linux_dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl, 10398a6984aSVladimir Kondratyev int nents, enum dma_data_direction dir __unused, 10498a6984aSVladimir Kondratyev unsigned long attrs __unused); 105f211d536STycho Nightingale void linux_dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg, 10698a6984aSVladimir Kondratyev int nents __unused, enum dma_data_direction dir __unused, 10798a6984aSVladimir Kondratyev unsigned long attrs __unused); 10895edb10bSBjoern A. Zeeb void linuxkpi_dma_sync(struct device *, dma_addr_t, size_t, bus_dmasync_op_t); 109f211d536STycho Nightingale 1108d59ecb2SHans Petter Selasky static inline int 11172c89ce9SBjoern A. Zeeb dma_supported(struct device *dev, u64 dma_mask) 1128d59ecb2SHans Petter Selasky { 1138d59ecb2SHans Petter Selasky 1148d59ecb2SHans Petter Selasky /* XXX busdma takes care of this elsewhere. */ 1158d59ecb2SHans Petter Selasky return (1); 1168d59ecb2SHans Petter Selasky } 1178d59ecb2SHans Petter Selasky 1188d59ecb2SHans Petter Selasky static inline int 1198d59ecb2SHans Petter Selasky dma_set_mask(struct device *dev, u64 dma_mask) 1208d59ecb2SHans Petter Selasky { 1218d59ecb2SHans Petter Selasky 122f211d536STycho Nightingale if (!dev->dma_priv || !dma_supported(dev, dma_mask)) 1238d59ecb2SHans Petter Selasky return -EIO; 1248d59ecb2SHans Petter Selasky 125f211d536STycho Nightingale return (linux_dma_tag_init(dev, dma_mask)); 1268d59ecb2SHans Petter Selasky } 1278d59ecb2SHans Petter Selasky 1288d59ecb2SHans Petter Selasky static inline int 12972c89ce9SBjoern A. Zeeb dma_set_coherent_mask(struct device *dev, u64 dma_mask) 1308d59ecb2SHans Petter Selasky { 1318d59ecb2SHans Petter Selasky 132c39eefe7SBjoern A. Zeeb if (!dev->dma_priv || !dma_supported(dev, dma_mask)) 1338d59ecb2SHans Petter Selasky return -EIO; 134c39eefe7SBjoern A. Zeeb 135c39eefe7SBjoern A. Zeeb return (linux_dma_tag_init_coherent(dev, dma_mask)); 1368d59ecb2SHans Petter Selasky } 1378d59ecb2SHans Petter Selasky 1388fdb5febSHans Petter Selasky static inline int 13972c89ce9SBjoern A. Zeeb dma_set_mask_and_coherent(struct device *dev, u64 dma_mask) 1408fdb5febSHans Petter Selasky { 1418fdb5febSHans Petter Selasky int r; 1428fdb5febSHans Petter Selasky 14372c89ce9SBjoern A. Zeeb r = dma_set_mask(dev, dma_mask); 1448fdb5febSHans Petter Selasky if (r == 0) 14572c89ce9SBjoern A. Zeeb dma_set_coherent_mask(dev, dma_mask); 1468fdb5febSHans Petter Selasky return (r); 1478fdb5febSHans Petter Selasky } 1488fdb5febSHans Petter Selasky 1498d59ecb2SHans Petter Selasky static inline void * 1508d59ecb2SHans Petter Selasky dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, 1518d59ecb2SHans Petter Selasky gfp_t flag) 1528d59ecb2SHans Petter Selasky { 153f211d536STycho Nightingale return (linux_dma_alloc_coherent(dev, size, dma_handle, flag)); 1548d59ecb2SHans Petter Selasky } 1558d59ecb2SHans Petter Selasky 1568d59ecb2SHans Petter Selasky static inline void * 1578d59ecb2SHans Petter Selasky dma_zalloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, 1588d59ecb2SHans Petter Selasky gfp_t flag) 1598d59ecb2SHans Petter Selasky { 1608d59ecb2SHans Petter Selasky 1618d59ecb2SHans Petter Selasky return (dma_alloc_coherent(dev, size, dma_handle, flag | __GFP_ZERO)); 1628d59ecb2SHans Petter Selasky } 1638d59ecb2SHans Petter Selasky 1647105f0d9SBjoern A. Zeeb static inline void * 1657105f0d9SBjoern A. Zeeb dmam_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, 1667105f0d9SBjoern A. Zeeb gfp_t flag) 1677105f0d9SBjoern A. Zeeb { 1687105f0d9SBjoern A. Zeeb 1697105f0d9SBjoern A. Zeeb return (linuxkpi_dmam_alloc_coherent(dev, size, dma_handle, flag)); 1707105f0d9SBjoern A. Zeeb } 1717105f0d9SBjoern A. Zeeb 1728d59ecb2SHans Petter Selasky static inline void 1738d59ecb2SHans Petter Selasky dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, 174f211d536STycho Nightingale dma_addr_t dma_addr) 1758d59ecb2SHans Petter Selasky { 1768d59ecb2SHans Petter Selasky 177f211d536STycho Nightingale linux_dma_unmap(dev, dma_addr, size); 178f49fd63aSJohn Baldwin kmem_free(cpu_addr, size); 1798d59ecb2SHans Petter Selasky } 1808d59ecb2SHans Petter Selasky 1814f081c4fSHans Petter Selasky static inline dma_addr_t 1824f081c4fSHans Petter Selasky dma_map_page_attrs(struct device *dev, struct page *page, size_t offset, 1834f081c4fSHans Petter Selasky size_t size, enum dma_data_direction dir, unsigned long attrs) 1844f081c4fSHans Petter Selasky { 1854f081c4fSHans Petter Selasky 186f211d536STycho Nightingale return (linux_dma_map_phys(dev, VM_PAGE_TO_PHYS(page) + offset, size)); 1874f081c4fSHans Petter Selasky } 1884f081c4fSHans Petter Selasky 18998a6984aSVladimir Kondratyev /* linux_dma_(un)map_sg_attrs does not support attrs yet */ 19098a6984aSVladimir Kondratyev #define dma_map_sg_attrs(dev, sgl, nents, dir, attrs) \ 19198a6984aSVladimir Kondratyev linux_dma_map_sg_attrs(dev, sgl, nents, dir, 0) 1928d59ecb2SHans Petter Selasky 19398a6984aSVladimir Kondratyev #define dma_unmap_sg_attrs(dev, sg, nents, dir, attrs) \ 19498a6984aSVladimir Kondratyev linux_dma_unmap_sg_attrs(dev, sg, nents, dir, 0) 1958d59ecb2SHans Petter Selasky 1968d59ecb2SHans Petter Selasky static inline dma_addr_t 1978d59ecb2SHans Petter Selasky dma_map_page(struct device *dev, struct page *page, 1988d59ecb2SHans Petter Selasky unsigned long offset, size_t size, enum dma_data_direction direction) 1998d59ecb2SHans Petter Selasky { 2008d59ecb2SHans Petter Selasky 201f211d536STycho Nightingale return (linux_dma_map_phys(dev, VM_PAGE_TO_PHYS(page) + offset, size)); 2028d59ecb2SHans Petter Selasky } 2038d59ecb2SHans Petter Selasky 2048d59ecb2SHans Petter Selasky static inline void 2058d59ecb2SHans Petter Selasky dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, 2068d59ecb2SHans Petter Selasky enum dma_data_direction direction) 2078d59ecb2SHans Petter Selasky { 208f211d536STycho Nightingale 209f211d536STycho Nightingale linux_dma_unmap(dev, dma_address, size); 2108d59ecb2SHans Petter Selasky } 2118d59ecb2SHans Petter Selasky 2128d59ecb2SHans Petter Selasky static inline void 21395edb10bSBjoern A. Zeeb dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma, size_t size, 2148d59ecb2SHans Petter Selasky enum dma_data_direction direction) 2158d59ecb2SHans Petter Selasky { 21695edb10bSBjoern A. Zeeb bus_dmasync_op_t op; 21795edb10bSBjoern A. Zeeb 21895edb10bSBjoern A. Zeeb switch (direction) { 21995edb10bSBjoern A. Zeeb case DMA_BIDIRECTIONAL: 22095edb10bSBjoern A. Zeeb op = BUS_DMASYNC_POSTREAD; 22195edb10bSBjoern A. Zeeb linuxkpi_dma_sync(dev, dma, size, op); 22295edb10bSBjoern A. Zeeb op = BUS_DMASYNC_PREREAD; 22395edb10bSBjoern A. Zeeb break; 22495edb10bSBjoern A. Zeeb case DMA_TO_DEVICE: 22595edb10bSBjoern A. Zeeb op = BUS_DMASYNC_POSTWRITE; 22695edb10bSBjoern A. Zeeb break; 22795edb10bSBjoern A. Zeeb case DMA_FROM_DEVICE: 22895edb10bSBjoern A. Zeeb op = BUS_DMASYNC_POSTREAD; 22995edb10bSBjoern A. Zeeb break; 23095edb10bSBjoern A. Zeeb default: 23195edb10bSBjoern A. Zeeb return; 23295edb10bSBjoern A. Zeeb } 23395edb10bSBjoern A. Zeeb 23495edb10bSBjoern A. Zeeb linuxkpi_dma_sync(dev, dma, size, op); 2358d59ecb2SHans Petter Selasky } 2368d59ecb2SHans Petter Selasky 2378d59ecb2SHans Petter Selasky static inline void 2388d59ecb2SHans Petter Selasky dma_sync_single(struct device *dev, dma_addr_t addr, size_t size, 2398d59ecb2SHans Petter Selasky enum dma_data_direction dir) 2408d59ecb2SHans Petter Selasky { 2418d59ecb2SHans Petter Selasky dma_sync_single_for_cpu(dev, addr, size, dir); 2428d59ecb2SHans Petter Selasky } 2438d59ecb2SHans Petter Selasky 2448d59ecb2SHans Petter Selasky static inline void 24595edb10bSBjoern A. Zeeb dma_sync_single_for_device(struct device *dev, dma_addr_t dma, 2468d59ecb2SHans Petter Selasky size_t size, enum dma_data_direction direction) 2478d59ecb2SHans Petter Selasky { 24895edb10bSBjoern A. Zeeb bus_dmasync_op_t op; 24995edb10bSBjoern A. Zeeb 25095edb10bSBjoern A. Zeeb switch (direction) { 25195edb10bSBjoern A. Zeeb case DMA_BIDIRECTIONAL: 25295edb10bSBjoern A. Zeeb op = BUS_DMASYNC_PREWRITE; 25395edb10bSBjoern A. Zeeb break; 25495edb10bSBjoern A. Zeeb case DMA_TO_DEVICE: 25595edb10bSBjoern A. Zeeb op = BUS_DMASYNC_PREREAD; 25695edb10bSBjoern A. Zeeb break; 25795edb10bSBjoern A. Zeeb case DMA_FROM_DEVICE: 25895edb10bSBjoern A. Zeeb op = BUS_DMASYNC_PREWRITE; 25995edb10bSBjoern A. Zeeb break; 26095edb10bSBjoern A. Zeeb default: 26195edb10bSBjoern A. Zeeb return; 26295edb10bSBjoern A. Zeeb } 26395edb10bSBjoern A. Zeeb 26495edb10bSBjoern A. Zeeb linuxkpi_dma_sync(dev, dma, size, op); 2658d59ecb2SHans Petter Selasky } 2668d59ecb2SHans Petter Selasky 2678d59ecb2SHans Petter Selasky static inline void 2688d59ecb2SHans Petter Selasky dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, 2698d59ecb2SHans Petter Selasky enum dma_data_direction direction) 2708d59ecb2SHans Petter Selasky { 2718d59ecb2SHans Petter Selasky } 2728d59ecb2SHans Petter Selasky 2738d59ecb2SHans Petter Selasky static inline void 2748d59ecb2SHans Petter Selasky dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, 2758d59ecb2SHans Petter Selasky enum dma_data_direction direction) 2768d59ecb2SHans Petter Selasky { 2778d59ecb2SHans Petter Selasky } 2788d59ecb2SHans Petter Selasky 2798d59ecb2SHans Petter Selasky static inline void 2808d59ecb2SHans Petter Selasky dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, 2818d59ecb2SHans Petter Selasky unsigned long offset, size_t size, int direction) 2828d59ecb2SHans Petter Selasky { 2838d59ecb2SHans Petter Selasky } 2848d59ecb2SHans Petter Selasky 2858d59ecb2SHans Petter Selasky static inline void 2868d59ecb2SHans Petter Selasky dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, 2878d59ecb2SHans Petter Selasky unsigned long offset, size_t size, int direction) 2888d59ecb2SHans Petter Selasky { 2898d59ecb2SHans Petter Selasky } 2908d59ecb2SHans Petter Selasky 2910b9bc973SBjoern A. Zeeb #define DMA_MAPPING_ERROR (~(dma_addr_t)0) 2920b9bc973SBjoern A. Zeeb 2938d59ecb2SHans Petter Selasky static inline int 2948d59ecb2SHans Petter Selasky dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 2958d59ecb2SHans Petter Selasky { 2968d59ecb2SHans Petter Selasky 2970b9bc973SBjoern A. Zeeb if (dma_addr == 0 || dma_addr == DMA_MAPPING_ERROR) 2980b9bc973SBjoern A. Zeeb return (-ENOMEM); 2990b9bc973SBjoern A. Zeeb return (0); 3008d59ecb2SHans Petter Selasky } 3018d59ecb2SHans Petter Selasky 3028d59ecb2SHans Petter Selasky static inline unsigned int dma_set_max_seg_size(struct device *dev, 3038d59ecb2SHans Petter Selasky unsigned int size) 3048d59ecb2SHans Petter Selasky { 3058d59ecb2SHans Petter Selasky return (0); 3068d59ecb2SHans Petter Selasky } 3078d59ecb2SHans Petter Selasky 30895edb10bSBjoern A. Zeeb static inline dma_addr_t 30995edb10bSBjoern A. Zeeb _dma_map_single_attrs(struct device *dev, void *ptr, size_t size, 31095edb10bSBjoern A. Zeeb enum dma_data_direction direction, unsigned long attrs __unused) 31195edb10bSBjoern A. Zeeb { 31295edb10bSBjoern A. Zeeb dma_addr_t dma; 31395edb10bSBjoern A. Zeeb 31495edb10bSBjoern A. Zeeb dma = linux_dma_map_phys(dev, vtophys(ptr), size); 31595edb10bSBjoern A. Zeeb if (!dma_mapping_error(dev, dma)) 31695edb10bSBjoern A. Zeeb dma_sync_single_for_device(dev, dma, size, direction); 31795edb10bSBjoern A. Zeeb 31895edb10bSBjoern A. Zeeb return (dma); 31995edb10bSBjoern A. Zeeb } 32095edb10bSBjoern A. Zeeb 32195edb10bSBjoern A. Zeeb static inline void 32295edb10bSBjoern A. Zeeb _dma_unmap_single_attrs(struct device *dev, dma_addr_t dma, size_t size, 32395edb10bSBjoern A. Zeeb enum dma_data_direction direction, unsigned long attrs __unused) 32495edb10bSBjoern A. Zeeb { 32595edb10bSBjoern A. Zeeb 32695edb10bSBjoern A. Zeeb dma_sync_single_for_cpu(dev, dma, size, direction); 32795edb10bSBjoern A. Zeeb linux_dma_unmap(dev, dma, size); 32895edb10bSBjoern A. Zeeb } 32995edb10bSBjoern A. Zeeb 3301acf9b27SEmmanuel Vadot static inline size_t 3311acf9b27SEmmanuel Vadot dma_max_mapping_size(struct device *dev) 3321acf9b27SEmmanuel Vadot { 3331acf9b27SEmmanuel Vadot 3341acf9b27SEmmanuel Vadot return (SCATTERLIST_MAX_SEGMENT); 3351acf9b27SEmmanuel Vadot } 3361acf9b27SEmmanuel Vadot 33795edb10bSBjoern A. Zeeb #define dma_map_single_attrs(dev, ptr, size, dir, attrs) \ 33895edb10bSBjoern A. Zeeb _dma_map_single_attrs(dev, ptr, size, dir, 0) 33995edb10bSBjoern A. Zeeb 34095edb10bSBjoern A. Zeeb #define dma_unmap_single_attrs(dev, dma_addr, size, dir, attrs) \ 34195edb10bSBjoern A. Zeeb _dma_unmap_single_attrs(dev, dma_addr, size, dir, 0) 34295edb10bSBjoern A. Zeeb 34398a6984aSVladimir Kondratyev #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0) 34498a6984aSVladimir Kondratyev #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0) 34598a6984aSVladimir Kondratyev #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0) 34698a6984aSVladimir Kondratyev #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0) 3478d59ecb2SHans Petter Selasky 3488d59ecb2SHans Petter Selasky #define DEFINE_DMA_UNMAP_ADDR(name) dma_addr_t name 3498d59ecb2SHans Petter Selasky #define DEFINE_DMA_UNMAP_LEN(name) __u32 name 3508d59ecb2SHans Petter Selasky #define dma_unmap_addr(p, name) ((p)->name) 3518d59ecb2SHans Petter Selasky #define dma_unmap_addr_set(p, name, v) (((p)->name) = (v)) 3528d59ecb2SHans Petter Selasky #define dma_unmap_len(p, name) ((p)->name) 3538d59ecb2SHans Petter Selasky #define dma_unmap_len_set(p, name, v) (((p)->name) = (v)) 3548d59ecb2SHans Petter Selasky 3558d59ecb2SHans Petter Selasky extern int uma_align_cache; 3568d59ecb2SHans Petter Selasky #define dma_get_cache_alignment() uma_align_cache 3578d59ecb2SHans Petter Selasky 3589202c95fSEmmanuel Vadot 3599202c95fSEmmanuel Vadot static inline int 3609202c95fSEmmanuel Vadot dma_map_sgtable(struct device *dev, struct sg_table *sgt, 3619202c95fSEmmanuel Vadot enum dma_data_direction dir, 3629202c95fSEmmanuel Vadot unsigned long attrs) 3639202c95fSEmmanuel Vadot { 3649202c95fSEmmanuel Vadot 365*4085bde9SAustin Shafer int nents = dma_map_sg_attrs(dev, sgt->sgl, sgt->nents, dir, attrs); 366*4085bde9SAustin Shafer 367*4085bde9SAustin Shafer if (nents < 0) { 368*4085bde9SAustin Shafer return nents; 369*4085bde9SAustin Shafer } else { 370*4085bde9SAustin Shafer sgt->nents = nents; 371*4085bde9SAustin Shafer return 0; 372*4085bde9SAustin Shafer } 3739202c95fSEmmanuel Vadot } 3749202c95fSEmmanuel Vadot 3759202c95fSEmmanuel Vadot static inline void 3769202c95fSEmmanuel Vadot dma_unmap_sgtable(struct device *dev, struct sg_table *sgt, 3779202c95fSEmmanuel Vadot enum dma_data_direction dir, 3789202c95fSEmmanuel Vadot unsigned long attrs) 3799202c95fSEmmanuel Vadot { 3809202c95fSEmmanuel Vadot 3819202c95fSEmmanuel Vadot dma_unmap_sg_attrs(dev, sgt->sgl, sgt->nents, dir, attrs); 3829202c95fSEmmanuel Vadot } 3839202c95fSEmmanuel Vadot 3849202c95fSEmmanuel Vadot 385307f78f3SVladimir Kondratyev #endif /* _LINUXKPI_LINUX_DMA_MAPPING_H_ */ 386