1 #include <linux/prefetch.h> 2 3 /** 4 * iommu_fill_pdir - Insert coalesced scatter/gather chunks into the I/O Pdir. 5 * @ioc: The I/O Controller. 6 * @startsg: The scatter/gather list of coalesced chunks. 7 * @nents: The number of entries in the scatter/gather list. 8 * @hint: The DMA Hint. 9 * 10 * This function inserts the coalesced scatter/gather list chunks into the 11 * I/O Controller's I/O Pdir. 12 */ 13 static inline unsigned int 14 iommu_fill_pdir(struct ioc *ioc, struct scatterlist *startsg, int nents, 15 unsigned long hint, 16 void (*iommu_io_pdir_entry)(u64 *, space_t, unsigned long, 17 unsigned long)) 18 { 19 struct scatterlist *dma_sg = startsg; /* pointer to current DMA */ 20 unsigned int n_mappings = 0; 21 unsigned long dma_offset = 0, dma_len = 0; 22 u64 *pdirp = NULL; 23 24 /* Horrible hack. For efficiency's sake, dma_sg starts one 25 * entry below the true start (it is immediately incremented 26 * in the loop) */ 27 dma_sg--; 28 29 while (nents-- > 0) { 30 unsigned long vaddr; 31 long size; 32 33 DBG_RUN_SG(" %d : %08lx/%05x %p/%05x\n", nents, 34 (unsigned long)sg_dma_address(startsg), cnt, 35 sg_virt(startsg), startsg->length 36 ); 37 38 39 /* 40 ** Look for the start of a new DMA stream 41 */ 42 43 if (sg_dma_address(startsg) & PIDE_FLAG) { 44 u32 pide = sg_dma_address(startsg) & ~PIDE_FLAG; 45 46 BUG_ON(pdirp && (dma_len != sg_dma_len(dma_sg))); 47 48 dma_sg++; 49 50 dma_len = sg_dma_len(startsg); 51 sg_dma_len(startsg) = 0; 52 dma_offset = (unsigned long) pide & ~IOVP_MASK; 53 n_mappings++; 54 #if defined(ZX1_SUPPORT) 55 /* Pluto IOMMU IO Virt Address is not zero based */ 56 sg_dma_address(dma_sg) = pide | ioc->ibase; 57 #else 58 /* SBA, ccio, and dino are zero based. 59 * Trying to save a few CPU cycles for most users. 60 */ 61 sg_dma_address(dma_sg) = pide; 62 #endif 63 pdirp = &(ioc->pdir_base[pide >> IOVP_SHIFT]); 64 prefetchw(pdirp); 65 } 66 67 BUG_ON(pdirp == NULL); 68 69 vaddr = (unsigned long)sg_virt(startsg); 70 sg_dma_len(dma_sg) += startsg->length; 71 size = startsg->length + dma_offset; 72 dma_offset = 0; 73 #ifdef IOMMU_MAP_STATS 74 ioc->msg_pages += startsg->length >> IOVP_SHIFT; 75 #endif 76 do { 77 iommu_io_pdir_entry(pdirp, KERNEL_SPACE, 78 vaddr, hint); 79 vaddr += IOVP_SIZE; 80 size -= IOVP_SIZE; 81 pdirp++; 82 } while(unlikely(size > 0)); 83 startsg++; 84 } 85 return(n_mappings); 86 } 87 88 89 /* 90 ** First pass is to walk the SG list and determine where the breaks are 91 ** in the DMA stream. Allocates PDIR entries but does not fill them. 92 ** Returns the number of DMA chunks. 93 ** 94 ** Doing the fill separate from the coalescing/allocation keeps the 95 ** code simpler. Future enhancement could make one pass through 96 ** the sglist do both. 97 */ 98 99 static inline unsigned int 100 iommu_coalesce_chunks(struct ioc *ioc, struct device *dev, 101 struct scatterlist *startsg, int nents, 102 int (*iommu_alloc_range)(struct ioc *, struct device *, size_t)) 103 { 104 struct scatterlist *contig_sg; /* contig chunk head */ 105 unsigned long dma_offset, dma_len; /* start/len of DMA stream */ 106 unsigned int n_mappings = 0; 107 unsigned int max_seg_size = min(dma_get_max_seg_size(dev), 108 (unsigned)DMA_CHUNK_SIZE); 109 unsigned int max_seg_boundary = dma_get_seg_boundary(dev) + 1; 110 if (max_seg_boundary) /* check if the addition above didn't overflow */ 111 max_seg_size = min(max_seg_size, max_seg_boundary); 112 113 while (nents > 0) { 114 115 /* 116 ** Prepare for first/next DMA stream 117 */ 118 contig_sg = startsg; 119 dma_len = startsg->length; 120 dma_offset = startsg->offset; 121 122 /* PARANOID: clear entries */ 123 sg_dma_address(startsg) = 0; 124 sg_dma_len(startsg) = 0; 125 126 /* 127 ** This loop terminates one iteration "early" since 128 ** it's always looking one "ahead". 129 */ 130 while(--nents > 0) { 131 unsigned long prev_end, sg_start; 132 133 prev_end = (unsigned long)sg_virt(startsg) + 134 startsg->length; 135 136 startsg++; 137 sg_start = (unsigned long)sg_virt(startsg); 138 139 /* PARANOID: clear entries */ 140 sg_dma_address(startsg) = 0; 141 sg_dma_len(startsg) = 0; 142 143 /* 144 ** First make sure current dma stream won't 145 ** exceed max_seg_size if we coalesce the 146 ** next entry. 147 */ 148 if (unlikely(ALIGN(dma_len + dma_offset + startsg->length, IOVP_SIZE) > 149 max_seg_size)) 150 break; 151 152 /* 153 * Next see if we can append the next chunk (i.e. 154 * it must end on one page and begin on another, or 155 * it must start on the same address as the previous 156 * entry ended. 157 */ 158 if (unlikely((prev_end != sg_start) || 159 ((prev_end | sg_start) & ~PAGE_MASK))) 160 break; 161 162 dma_len += startsg->length; 163 } 164 165 /* 166 ** End of DMA Stream 167 ** Terminate last VCONTIG block. 168 ** Allocate space for DMA stream. 169 */ 170 sg_dma_len(contig_sg) = dma_len; 171 dma_len = ALIGN(dma_len + dma_offset, IOVP_SIZE); 172 sg_dma_address(contig_sg) = 173 PIDE_FLAG 174 | (iommu_alloc_range(ioc, dev, dma_len) << IOVP_SHIFT) 175 | dma_offset; 176 n_mappings++; 177 } 178 179 return n_mappings; 180 } 181 182