1 /* Copyright (C) 2002 by James.Bottomley@HansenPartnership.com 2 * 3 * Implements the generic device dma API via the existing pci_ one 4 * for unconverted architectures 5 */ 6 7 #ifndef _ASM_GENERIC_DMA_MAPPING_H 8 #define _ASM_GENERIC_DMA_MAPPING_H 9 10 11 #ifdef CONFIG_PCI 12 13 /* we implement the API below in terms of the existing PCI one, 14 * so include it */ 15 #include <linux/pci.h> 16 /* need struct page definitions */ 17 #include <linux/mm.h> 18 19 static inline int 20 dma_supported(struct device *dev, u64 mask) 21 { 22 BUG_ON(dev->bus != &pci_bus_type); 23 24 return pci_dma_supported(to_pci_dev(dev), mask); 25 } 26 27 static inline int 28 dma_set_mask(struct device *dev, u64 dma_mask) 29 { 30 BUG_ON(dev->bus != &pci_bus_type); 31 32 return pci_set_dma_mask(to_pci_dev(dev), dma_mask); 33 } 34 35 static inline void * 36 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, 37 gfp_t flag) 38 { 39 BUG_ON(dev->bus != &pci_bus_type); 40 41 return pci_alloc_consistent(to_pci_dev(dev), size, dma_handle); 42 } 43 44 static inline void 45 dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, 46 dma_addr_t dma_handle) 47 { 48 BUG_ON(dev->bus != &pci_bus_type); 49 50 pci_free_consistent(to_pci_dev(dev), size, cpu_addr, dma_handle); 51 } 52 53 static inline dma_addr_t 54 dma_map_single(struct device *dev, void *cpu_addr, size_t size, 55 enum dma_data_direction direction) 56 { 57 BUG_ON(dev->bus != &pci_bus_type); 58 59 return pci_map_single(to_pci_dev(dev), cpu_addr, size, (int)direction); 60 } 61 62 static inline void 63 dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, 64 enum dma_data_direction direction) 65 { 66 BUG_ON(dev->bus != &pci_bus_type); 67 68 pci_unmap_single(to_pci_dev(dev), dma_addr, size, (int)direction); 69 } 70 71 static inline dma_addr_t 72 dma_map_page(struct device *dev, struct page *page, 73 unsigned long offset, size_t size, 74 enum dma_data_direction direction) 75 { 76 BUG_ON(dev->bus != &pci_bus_type); 77 78 return pci_map_page(to_pci_dev(dev), page, offset, size, (int)direction); 79 } 80 81 static inline void 82 dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, 83 enum dma_data_direction direction) 84 { 85 BUG_ON(dev->bus != &pci_bus_type); 86 87 pci_unmap_page(to_pci_dev(dev), dma_address, size, (int)direction); 88 } 89 90 static inline int 91 dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, 92 enum dma_data_direction direction) 93 { 94 BUG_ON(dev->bus != &pci_bus_type); 95 96 return pci_map_sg(to_pci_dev(dev), sg, nents, (int)direction); 97 } 98 99 static inline void 100 dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, 101 enum dma_data_direction direction) 102 { 103 BUG_ON(dev->bus != &pci_bus_type); 104 105 pci_unmap_sg(to_pci_dev(dev), sg, nhwentries, (int)direction); 106 } 107 108 static inline void 109 dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, 110 enum dma_data_direction direction) 111 { 112 BUG_ON(dev->bus != &pci_bus_type); 113 114 pci_dma_sync_single_for_cpu(to_pci_dev(dev), dma_handle, 115 size, (int)direction); 116 } 117 118 static inline void 119 dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, 120 enum dma_data_direction direction) 121 { 122 BUG_ON(dev->bus != &pci_bus_type); 123 124 pci_dma_sync_single_for_device(to_pci_dev(dev), dma_handle, 125 size, (int)direction); 126 } 127 128 static inline void 129 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, 130 enum dma_data_direction direction) 131 { 132 BUG_ON(dev->bus != &pci_bus_type); 133 134 pci_dma_sync_sg_for_cpu(to_pci_dev(dev), sg, nelems, (int)direction); 135 } 136 137 static inline void 138 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, 139 enum dma_data_direction direction) 140 { 141 BUG_ON(dev->bus != &pci_bus_type); 142 143 pci_dma_sync_sg_for_device(to_pci_dev(dev), sg, nelems, (int)direction); 144 } 145 146 static inline int 147 dma_mapping_error(dma_addr_t dma_addr) 148 { 149 return pci_dma_mapping_error(dma_addr); 150 } 151 152 153 #else 154 155 static inline int 156 dma_supported(struct device *dev, u64 mask) 157 { 158 return 0; 159 } 160 161 static inline int 162 dma_set_mask(struct device *dev, u64 dma_mask) 163 { 164 BUG(); 165 return 0; 166 } 167 168 static inline void * 169 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, 170 gfp_t flag) 171 { 172 BUG(); 173 return NULL; 174 } 175 176 static inline void 177 dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, 178 dma_addr_t dma_handle) 179 { 180 BUG(); 181 } 182 183 static inline dma_addr_t 184 dma_map_single(struct device *dev, void *cpu_addr, size_t size, 185 enum dma_data_direction direction) 186 { 187 BUG(); 188 return 0; 189 } 190 191 static inline void 192 dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, 193 enum dma_data_direction direction) 194 { 195 BUG(); 196 } 197 198 static inline dma_addr_t 199 dma_map_page(struct device *dev, struct page *page, 200 unsigned long offset, size_t size, 201 enum dma_data_direction direction) 202 { 203 BUG(); 204 return 0; 205 } 206 207 static inline void 208 dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, 209 enum dma_data_direction direction) 210 { 211 BUG(); 212 } 213 214 static inline int 215 dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, 216 enum dma_data_direction direction) 217 { 218 BUG(); 219 return 0; 220 } 221 222 static inline void 223 dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, 224 enum dma_data_direction direction) 225 { 226 BUG(); 227 } 228 229 static inline void 230 dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, 231 enum dma_data_direction direction) 232 { 233 BUG(); 234 } 235 236 static inline void 237 dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, 238 enum dma_data_direction direction) 239 { 240 BUG(); 241 } 242 243 static inline void 244 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, 245 enum dma_data_direction direction) 246 { 247 BUG(); 248 } 249 250 static inline void 251 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, 252 enum dma_data_direction direction) 253 { 254 BUG(); 255 } 256 257 static inline int 258 dma_error(dma_addr_t dma_addr) 259 { 260 return 0; 261 } 262 263 #endif 264 265 /* Now for the API extensions over the pci_ one */ 266 267 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) 268 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) 269 #define dma_is_consistent(d) (1) 270 271 static inline int 272 dma_get_cache_alignment(void) 273 { 274 /* no easy way to get cache size on all processors, so return 275 * the maximum possible, to be safe */ 276 return (1 << INTERNODE_CACHE_SHIFT); 277 } 278 279 static inline void 280 dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, 281 unsigned long offset, size_t size, 282 enum dma_data_direction direction) 283 { 284 /* just sync everything, that's all the pci API can do */ 285 dma_sync_single_for_cpu(dev, dma_handle, offset+size, direction); 286 } 287 288 static inline void 289 dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, 290 unsigned long offset, size_t size, 291 enum dma_data_direction direction) 292 { 293 /* just sync everything, that's all the pci API can do */ 294 dma_sync_single_for_device(dev, dma_handle, offset+size, direction); 295 } 296 297 static inline void 298 dma_cache_sync(void *vaddr, size_t size, 299 enum dma_data_direction direction) 300 { 301 /* could define this in terms of the dma_cache ... operations, 302 * but if you get this on a platform, you should convert the platform 303 * to using the generic device DMA API */ 304 BUG(); 305 } 306 307 #endif 308 309