1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 User DMA
4
5 Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
6 Copyright (C) 2004 Chris Kennedy <c@groovy.org>
7 Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
8
9 */
10
11 #include "ivtv-driver.h"
12 #include "ivtv-udma.h"
13
ivtv_udma_get_page_info(struct ivtv_dma_page_info * dma_page,unsigned long first,unsigned long size)14 void ivtv_udma_get_page_info(struct ivtv_dma_page_info *dma_page, unsigned long first, unsigned long size)
15 {
16 dma_page->uaddr = first & PAGE_MASK;
17 dma_page->offset = first & ~PAGE_MASK;
18 dma_page->tail = 1 + ((first+size-1) & ~PAGE_MASK);
19 dma_page->first = (first & PAGE_MASK) >> PAGE_SHIFT;
20 dma_page->last = ((first+size-1) & PAGE_MASK) >> PAGE_SHIFT;
21 dma_page->page_count = dma_page->last - dma_page->first + 1;
22 if (dma_page->page_count == 1) dma_page->tail -= dma_page->offset;
23 }
24
ivtv_udma_fill_sg_list(struct ivtv_user_dma * dma,struct ivtv_dma_page_info * dma_page,int map_offset)25 int ivtv_udma_fill_sg_list (struct ivtv_user_dma *dma, struct ivtv_dma_page_info *dma_page, int map_offset)
26 {
27 int i, offset;
28 unsigned long flags;
29
30 if (map_offset < 0)
31 return map_offset;
32
33 offset = dma_page->offset;
34
35 /* Fill SG Array with new values */
36 for (i = 0; i < dma_page->page_count; i++) {
37 unsigned int len = (i == dma_page->page_count - 1) ?
38 dma_page->tail : PAGE_SIZE - offset;
39
40 if (PageHighMem(dma->map[map_offset])) {
41 void *src;
42
43 if (dma->bouncemap[map_offset] == NULL)
44 dma->bouncemap[map_offset] = alloc_page(GFP_KERNEL);
45 if (dma->bouncemap[map_offset] == NULL)
46 return -1;
47 local_irq_save(flags);
48 src = kmap_atomic(dma->map[map_offset]) + offset;
49 memcpy(page_address(dma->bouncemap[map_offset]) + offset, src, len);
50 kunmap_atomic(src);
51 local_irq_restore(flags);
52 sg_set_page(&dma->SGlist[map_offset], dma->bouncemap[map_offset], len, offset);
53 }
54 else {
55 sg_set_page(&dma->SGlist[map_offset], dma->map[map_offset], len, offset);
56 }
57 offset = 0;
58 map_offset++;
59 }
60 return map_offset;
61 }
62
ivtv_udma_fill_sg_array(struct ivtv_user_dma * dma,u32 buffer_offset,u32 buffer_offset_2,u32 split)63 void ivtv_udma_fill_sg_array (struct ivtv_user_dma *dma, u32 buffer_offset, u32 buffer_offset_2, u32 split) {
64 int i;
65 struct scatterlist *sg;
66
67 for_each_sg(dma->SGlist, sg, dma->SG_length, i) {
68 dma->SGarray[i].size = cpu_to_le32(sg_dma_len(sg));
69 dma->SGarray[i].src = cpu_to_le32(sg_dma_address(sg));
70 dma->SGarray[i].dst = cpu_to_le32(buffer_offset);
71 buffer_offset += sg_dma_len(sg);
72
73 split -= sg_dma_len(sg);
74 if (split == 0)
75 buffer_offset = buffer_offset_2;
76 }
77 }
78
79 /* User DMA Buffers */
ivtv_udma_alloc(struct ivtv * itv)80 void ivtv_udma_alloc(struct ivtv *itv)
81 {
82 if (itv->udma.SG_handle == 0) {
83 /* Map DMA Page Array Buffer */
84 itv->udma.SG_handle = dma_map_single(&itv->pdev->dev,
85 itv->udma.SGarray,
86 sizeof(itv->udma.SGarray),
87 DMA_TO_DEVICE);
88 ivtv_udma_sync_for_cpu(itv);
89 }
90 }
91
ivtv_udma_setup(struct ivtv * itv,unsigned long ivtv_dest_addr,void __user * userbuf,int size_in_bytes)92 int ivtv_udma_setup(struct ivtv *itv, unsigned long ivtv_dest_addr,
93 void __user *userbuf, int size_in_bytes)
94 {
95 struct ivtv_dma_page_info user_dma;
96 struct ivtv_user_dma *dma = &itv->udma;
97 int err;
98
99 IVTV_DEBUG_DMA("ivtv_udma_setup, dst: 0x%08x\n", (unsigned int)ivtv_dest_addr);
100
101 /* Still in USE */
102 if (dma->SG_length || dma->page_count) {
103 IVTV_DEBUG_WARN("ivtv_udma_setup: SG_length %d page_count %d still full?\n",
104 dma->SG_length, dma->page_count);
105 return -EBUSY;
106 }
107
108 ivtv_udma_get_page_info(&user_dma, (unsigned long)userbuf, size_in_bytes);
109
110 if (user_dma.page_count <= 0) {
111 IVTV_DEBUG_WARN("ivtv_udma_setup: Error %d page_count from %d bytes %d offset\n",
112 user_dma.page_count, size_in_bytes, user_dma.offset);
113 return -EINVAL;
114 }
115
116 /* Pin user pages for DMA Xfer */
117 err = pin_user_pages_unlocked(user_dma.uaddr, user_dma.page_count,
118 dma->map, 0);
119
120 if (user_dma.page_count != err) {
121 IVTV_DEBUG_WARN("failed to map user pages, returned %d instead of %d\n",
122 err, user_dma.page_count);
123 if (err >= 0) {
124 unpin_user_pages(dma->map, err);
125 return -EINVAL;
126 }
127 return err;
128 }
129
130 dma->page_count = user_dma.page_count;
131
132 /* Fill SG List with new values */
133 if (ivtv_udma_fill_sg_list(dma, &user_dma, 0) < 0) {
134 IVTV_DEBUG_WARN("%s: could not allocate bounce buffers for highmem userspace buffers\n",
135 __func__);
136 unpin_user_pages(dma->map, dma->page_count);
137 dma->page_count = 0;
138 return -ENOMEM;
139 }
140
141 /* Map SG List */
142 dma->SG_length = dma_map_sg(&itv->pdev->dev, dma->SGlist,
143 dma->page_count, DMA_TO_DEVICE);
144 if (!dma->SG_length) {
145 IVTV_DEBUG_WARN("%s: DMA map error, SG_length is 0\n", __func__);
146 unpin_user_pages(dma->map, dma->page_count);
147 dma->page_count = 0;
148 return -EINVAL;
149 }
150
151 /* Fill SG Array with new values */
152 ivtv_udma_fill_sg_array (dma, ivtv_dest_addr, 0, -1);
153
154 /* Tag SG Array with Interrupt Bit */
155 dma->SGarray[dma->SG_length - 1].size |= cpu_to_le32(0x80000000);
156
157 ivtv_udma_sync_for_device(itv);
158 return dma->page_count;
159 }
160
ivtv_udma_unmap(struct ivtv * itv)161 void ivtv_udma_unmap(struct ivtv *itv)
162 {
163 struct ivtv_user_dma *dma = &itv->udma;
164
165 IVTV_DEBUG_INFO("ivtv_unmap_user_dma\n");
166
167 /* Nothing to free */
168 if (dma->page_count == 0)
169 return;
170
171 /* Unmap Scatterlist */
172 if (dma->SG_length) {
173 dma_unmap_sg(&itv->pdev->dev, dma->SGlist, dma->page_count,
174 DMA_TO_DEVICE);
175 dma->SG_length = 0;
176 }
177 /* sync DMA */
178 ivtv_udma_sync_for_cpu(itv);
179
180 unpin_user_pages(dma->map, dma->page_count);
181 dma->page_count = 0;
182 }
183
ivtv_udma_free(struct ivtv * itv)184 void ivtv_udma_free(struct ivtv *itv)
185 {
186 int i;
187
188 /* Unmap SG Array */
189 if (itv->udma.SG_handle) {
190 dma_unmap_single(&itv->pdev->dev, itv->udma.SG_handle,
191 sizeof(itv->udma.SGarray), DMA_TO_DEVICE);
192 }
193
194 /* Unmap Scatterlist */
195 if (itv->udma.SG_length) {
196 dma_unmap_sg(&itv->pdev->dev, itv->udma.SGlist,
197 itv->udma.page_count, DMA_TO_DEVICE);
198 }
199
200 for (i = 0; i < IVTV_DMA_SG_OSD_ENT; i++) {
201 if (itv->udma.bouncemap[i])
202 __free_page(itv->udma.bouncemap[i]);
203 }
204 }
205
ivtv_udma_start(struct ivtv * itv)206 void ivtv_udma_start(struct ivtv *itv)
207 {
208 IVTV_DEBUG_DMA("start UDMA\n");
209 write_reg(itv->udma.SG_handle, IVTV_REG_DECDMAADDR);
210 write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x01, IVTV_REG_DMAXFER);
211 set_bit(IVTV_F_I_DMA, &itv->i_flags);
212 set_bit(IVTV_F_I_UDMA, &itv->i_flags);
213 clear_bit(IVTV_F_I_UDMA_PENDING, &itv->i_flags);
214 }
215
ivtv_udma_prepare(struct ivtv * itv)216 void ivtv_udma_prepare(struct ivtv *itv)
217 {
218 unsigned long flags;
219
220 spin_lock_irqsave(&itv->dma_reg_lock, flags);
221 if (!test_bit(IVTV_F_I_DMA, &itv->i_flags))
222 ivtv_udma_start(itv);
223 else
224 set_bit(IVTV_F_I_UDMA_PENDING, &itv->i_flags);
225 spin_unlock_irqrestore(&itv->dma_reg_lock, flags);
226 }
227