1 /* 2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License version 2 as 6 * published by the Free Software Foundation. 7 */ 8 9 /* 10 * DMA Coherent API Notes 11 * 12 * I/O is inherently non-coherent on ARC. So a coherent DMA buffer is 13 * implemented by accessintg it using a kernel virtual address, with 14 * Cache bit off in the TLB entry. 15 * 16 * The default DMA address == Phy address which is 0x8000_0000 based. 17 */ 18 19 #include <linux/dma-mapping.h> 20 #include <asm/cache.h> 21 #include <asm/cacheflush.h> 22 23 24 static void *arc_dma_alloc(struct device *dev, size_t size, 25 dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs) 26 { 27 void *paddr, *kvaddr; 28 29 /* This is linear addr (0x8000_0000 based) */ 30 paddr = alloc_pages_exact(size, gfp); 31 if (!paddr) 32 return NULL; 33 34 /* This is bus address, platform dependent */ 35 *dma_handle = (dma_addr_t)paddr; 36 37 /* 38 * IOC relies on all data (even coherent DMA data) being in cache 39 * Thus allocate normal cached memory 40 * 41 * The gains with IOC are two pronged: 42 * -For streaming data, elides needs for cache maintenance, saving 43 * cycles in flush code, and bus bandwidth as all the lines of a 44 * buffer need to be flushed out to memory 45 * -For coherent data, Read/Write to buffers terminate early in cache 46 * (vs. always going to memory - thus are faster) 47 */ 48 if ((is_isa_arcv2() && ioc_exists) || 49 dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs)) 50 return paddr; 51 52 /* This is kernel Virtual address (0x7000_0000 based) */ 53 kvaddr = ioremap_nocache((unsigned long)paddr, size); 54 if (kvaddr == NULL) 55 return NULL; 56 57 /* 58 * Evict any existing L1 and/or L2 lines for the backing page 59 * in case it was used earlier as a normal "cached" page. 60 * Yeah this bit us - STAR 9000898266 61 * 62 * Although core does call flush_cache_vmap(), it gets kvaddr hence 63 * can't be used to efficiently flush L1 and/or L2 which need paddr 64 * Currently flush_cache_vmap nukes the L1 cache completely which 65 * will be optimized as a separate commit 66 */ 67 dma_cache_wback_inv((unsigned long)paddr, size); 68 69 return kvaddr; 70 } 71 72 static void arc_dma_free(struct device *dev, size_t size, void *vaddr, 73 dma_addr_t dma_handle, struct dma_attrs *attrs) 74 { 75 if (!dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs) && 76 !(is_isa_arcv2() && ioc_exists)) 77 iounmap((void __force __iomem *)vaddr); 78 79 free_pages_exact((void *)dma_handle, size); 80 } 81 82 /* 83 * streaming DMA Mapping API... 84 * CPU accesses page via normal paddr, thus needs to explicitly made 85 * consistent before each use 86 */ 87 static void _dma_cache_sync(unsigned long paddr, size_t size, 88 enum dma_data_direction dir) 89 { 90 switch (dir) { 91 case DMA_FROM_DEVICE: 92 dma_cache_inv(paddr, size); 93 break; 94 case DMA_TO_DEVICE: 95 dma_cache_wback(paddr, size); 96 break; 97 case DMA_BIDIRECTIONAL: 98 dma_cache_wback_inv(paddr, size); 99 break; 100 default: 101 pr_err("Invalid DMA dir [%d] for OP @ %lx\n", dir, paddr); 102 } 103 } 104 105 static dma_addr_t arc_dma_map_page(struct device *dev, struct page *page, 106 unsigned long offset, size_t size, enum dma_data_direction dir, 107 struct dma_attrs *attrs) 108 { 109 unsigned long paddr = page_to_phys(page) + offset; 110 _dma_cache_sync(paddr, size, dir); 111 return (dma_addr_t)paddr; 112 } 113 114 static int arc_dma_map_sg(struct device *dev, struct scatterlist *sg, 115 int nents, enum dma_data_direction dir, struct dma_attrs *attrs) 116 { 117 struct scatterlist *s; 118 int i; 119 120 for_each_sg(sg, s, nents, i) 121 s->dma_address = dma_map_page(dev, sg_page(s), s->offset, 122 s->length, dir); 123 124 return nents; 125 } 126 127 static void arc_dma_sync_single_for_cpu(struct device *dev, 128 dma_addr_t dma_handle, size_t size, enum dma_data_direction dir) 129 { 130 _dma_cache_sync(dma_handle, size, DMA_FROM_DEVICE); 131 } 132 133 static void arc_dma_sync_single_for_device(struct device *dev, 134 dma_addr_t dma_handle, size_t size, enum dma_data_direction dir) 135 { 136 _dma_cache_sync(dma_handle, size, DMA_TO_DEVICE); 137 } 138 139 static void arc_dma_sync_sg_for_cpu(struct device *dev, 140 struct scatterlist *sglist, int nelems, 141 enum dma_data_direction dir) 142 { 143 int i; 144 struct scatterlist *sg; 145 146 for_each_sg(sglist, sg, nelems, i) 147 _dma_cache_sync((unsigned int)sg_virt(sg), sg->length, dir); 148 } 149 150 static void arc_dma_sync_sg_for_device(struct device *dev, 151 struct scatterlist *sglist, int nelems, 152 enum dma_data_direction dir) 153 { 154 int i; 155 struct scatterlist *sg; 156 157 for_each_sg(sglist, sg, nelems, i) 158 _dma_cache_sync((unsigned int)sg_virt(sg), sg->length, dir); 159 } 160 161 static int arc_dma_supported(struct device *dev, u64 dma_mask) 162 { 163 /* Support 32 bit DMA mask exclusively */ 164 return dma_mask == DMA_BIT_MASK(32); 165 } 166 167 struct dma_map_ops arc_dma_ops = { 168 .alloc = arc_dma_alloc, 169 .free = arc_dma_free, 170 .map_page = arc_dma_map_page, 171 .map_sg = arc_dma_map_sg, 172 .sync_single_for_device = arc_dma_sync_single_for_device, 173 .sync_single_for_cpu = arc_dma_sync_single_for_cpu, 174 .sync_sg_for_cpu = arc_dma_sync_sg_for_cpu, 175 .sync_sg_for_device = arc_dma_sync_sg_for_device, 176 .dma_supported = arc_dma_supported, 177 }; 178 EXPORT_SYMBOL(arc_dma_ops); 179