1 #ifndef ___ASM_SPARC_DMA_MAPPING_H 2 #define ___ASM_SPARC_DMA_MAPPING_H 3 4 #include <linux/scatterlist.h> 5 #include <linux/mm.h> 6 #include <linux/dma-debug.h> 7 8 #define DMA_ERROR_CODE (~(dma_addr_t)0x0) 9 10 extern int dma_supported(struct device *dev, u64 mask); 11 extern int dma_set_mask(struct device *dev, u64 dma_mask); 12 13 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) 14 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) 15 #define dma_is_consistent(d, h) (1) 16 17 extern struct dma_map_ops *dma_ops, pci32_dma_ops; 18 extern struct bus_type pci_bus_type; 19 20 static inline struct dma_map_ops *get_dma_ops(struct device *dev) 21 { 22 #if defined(CONFIG_SPARC32) && defined(CONFIG_PCI) 23 if (dev->bus == &pci_bus_type) 24 return &pci32_dma_ops; 25 #endif 26 return dma_ops; 27 } 28 29 #include <asm-generic/dma-mapping-common.h> 30 31 static inline void *dma_alloc_coherent(struct device *dev, size_t size, 32 dma_addr_t *dma_handle, gfp_t flag) 33 { 34 struct dma_map_ops *ops = get_dma_ops(dev); 35 void *cpu_addr; 36 37 cpu_addr = ops->alloc_coherent(dev, size, dma_handle, flag); 38 debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr); 39 return cpu_addr; 40 } 41 42 static inline void dma_free_coherent(struct device *dev, size_t size, 43 void *cpu_addr, dma_addr_t dma_handle) 44 { 45 struct dma_map_ops *ops = get_dma_ops(dev); 46 47 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); 48 ops->free_coherent(dev, size, cpu_addr, dma_handle); 49 } 50 51 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 52 { 53 return (dma_addr == DMA_ERROR_CODE); 54 } 55 56 static inline int dma_get_cache_alignment(void) 57 { 58 /* 59 * no easy way to get cache size on all processors, so return 60 * the maximum possible, to be safe 61 */ 62 return (1 << INTERNODE_CACHE_SHIFT); 63 } 64 65 #endif 66