xref: /linux/drivers/media/pci/cobalt/cobalt-omnitek.c (revision cbecf716ca618fd44feda6bd9a64a8179d031fc5)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  Omnitek Scatter-Gather DMA Controller
4  *
5  *  Copyright 2012-2015 Cisco Systems, Inc. and/or its affiliates.
6  *  All rights reserved.
7  */
8 
9 #include <linux/string.h>
10 #include <linux/io.h>
11 #include <linux/pci_regs.h>
12 #include <linux/spinlock.h>
13 
14 #include "cobalt-driver.h"
15 #include "cobalt-omnitek.h"
16 
17 /* descriptor */
18 #define END_OF_CHAIN		(1 << 1)
19 #define INTERRUPT_ENABLE	(1 << 2)
20 #define WRITE_TO_PCI		(1 << 3)
21 #define READ_FROM_PCI		(0 << 3)
22 #define DESCRIPTOR_FLAG_MSK	(END_OF_CHAIN | INTERRUPT_ENABLE | WRITE_TO_PCI)
23 #define NEXT_ADRS_MSK		0xffffffe0
24 
25 /* control/status register */
26 #define ENABLE                  (1 << 0)
27 #define START                   (1 << 1)
28 #define ABORT                   (1 << 2)
29 #define DONE                    (1 << 4)
30 #define SG_INTERRUPT            (1 << 5)
31 #define EVENT_INTERRUPT         (1 << 6)
32 #define SCATTER_GATHER_MODE     (1 << 8)
33 #define DISABLE_VIDEO_RESYNC    (1 << 9)
34 #define EVENT_INTERRUPT_ENABLE  (1 << 10)
35 #define DIRECTIONAL_MSK         (3 << 16)
36 #define INPUT_ONLY              (0 << 16)
37 #define OUTPUT_ONLY             (1 << 16)
38 #define BIDIRECTIONAL           (2 << 16)
39 #define DMA_TYPE_MEMORY         (0 << 18)
40 #define DMA_TYPE_FIFO		(1 << 18)
41 
42 #define BASE			(cobalt->bar0)
43 #define CAPABILITY_HEADER	(BASE)
44 #define CAPABILITY_REGISTER	(BASE + 0x04)
45 #define PCI_64BIT		(1 << 8)
46 #define LOCAL_64BIT		(1 << 9)
47 #define INTERRUPT_STATUS	(BASE + 0x08)
48 #define PCI(c)			(BASE + 0x40 + ((c) * 0x40))
49 #define SIZE(c)			(BASE + 0x58 + ((c) * 0x40))
50 #define DESCRIPTOR(c)		(BASE + 0x50 + ((c) * 0x40))
51 #define CS_REG(c)		(BASE + 0x60 + ((c) * 0x40))
52 #define BYTES_TRANSFERRED(c)	(BASE + 0x64 + ((c) * 0x40))
53 
54 
get_dma_direction(u32 status)55 static char *get_dma_direction(u32 status)
56 {
57 	switch (status & DIRECTIONAL_MSK) {
58 	case INPUT_ONLY: return "Input";
59 	case OUTPUT_ONLY: return "Output";
60 	case BIDIRECTIONAL: return "Bidirectional";
61 	}
62 	return "";
63 }
64 
show_dma_capability(struct cobalt * cobalt)65 static void show_dma_capability(struct cobalt *cobalt)
66 {
67 	u32 header = ioread32(CAPABILITY_HEADER);
68 	u32 capa = ioread32(CAPABILITY_REGISTER);
69 	u32 i;
70 
71 	cobalt_info("Omnitek DMA capability: ID 0x%02x Version 0x%02x Next 0x%x Size 0x%x\n",
72 		    header & 0xff, (header >> 8) & 0xff,
73 		    (header >> 16) & 0xffff, (capa >> 24) & 0xff);
74 
75 	switch ((capa >> 8) & 0x3) {
76 	case 0:
77 		cobalt_info("Omnitek DMA: 32 bits PCIe and Local\n");
78 		break;
79 	case 1:
80 		cobalt_info("Omnitek DMA: 64 bits PCIe, 32 bits Local\n");
81 		break;
82 	case 3:
83 		cobalt_info("Omnitek DMA: 64 bits PCIe and Local\n");
84 		break;
85 	}
86 
87 	for (i = 0;  i < (capa & 0xf);  i++) {
88 		u32 status = ioread32(CS_REG(i));
89 
90 		cobalt_info("Omnitek DMA channel #%d: %s %s\n", i,
91 			    status & DMA_TYPE_FIFO ? "FIFO" : "MEMORY",
92 			    get_dma_direction(status));
93 	}
94 }
95 
omni_sg_dma_start(struct cobalt_stream * s,struct sg_dma_desc_info * desc)96 void omni_sg_dma_start(struct cobalt_stream *s, struct sg_dma_desc_info *desc)
97 {
98 	struct cobalt *cobalt = s->cobalt;
99 
100 	iowrite32((u32)((u64)desc->bus >> 32), DESCRIPTOR(s->dma_channel) + 4);
101 	iowrite32((u32)desc->bus & NEXT_ADRS_MSK, DESCRIPTOR(s->dma_channel));
102 	iowrite32(ENABLE | SCATTER_GATHER_MODE | START, CS_REG(s->dma_channel));
103 }
104 
is_dma_done(struct cobalt_stream * s)105 bool is_dma_done(struct cobalt_stream *s)
106 {
107 	struct cobalt *cobalt = s->cobalt;
108 
109 	if (ioread32(CS_REG(s->dma_channel)) & DONE)
110 		return true;
111 
112 	return false;
113 }
114 
omni_sg_dma_abort_channel(struct cobalt_stream * s)115 void omni_sg_dma_abort_channel(struct cobalt_stream *s)
116 {
117 	struct cobalt *cobalt = s->cobalt;
118 
119 	if (!is_dma_done(s))
120 		iowrite32(ABORT, CS_REG(s->dma_channel));
121 }
122 
omni_sg_dma_init(struct cobalt * cobalt)123 int omni_sg_dma_init(struct cobalt *cobalt)
124 {
125 	u32 capa = ioread32(CAPABILITY_REGISTER);
126 	int i;
127 
128 	cobalt->first_fifo_channel = 0;
129 	cobalt->dma_channels = capa & 0xf;
130 	if (capa & PCI_64BIT)
131 		cobalt->pci_32_bit = false;
132 	else
133 		cobalt->pci_32_bit = true;
134 
135 	for (i = 0; i < cobalt->dma_channels; i++) {
136 		u32 status = ioread32(CS_REG(i));
137 		u32 ctrl = ioread32(CS_REG(i));
138 
139 		if (!(ctrl & DONE))
140 			iowrite32(ABORT, CS_REG(i));
141 
142 		if (!(status & DMA_TYPE_FIFO))
143 			cobalt->first_fifo_channel++;
144 	}
145 	show_dma_capability(cobalt);
146 	return 0;
147 }
148 
descriptor_list_create(struct cobalt * cobalt,struct scatterlist * scatter_list,bool to_pci,unsigned sglen,unsigned size,unsigned width,unsigned stride,struct sg_dma_desc_info * desc)149 int descriptor_list_create(struct cobalt *cobalt,
150 		struct scatterlist *scatter_list, bool to_pci, unsigned sglen,
151 		unsigned size, unsigned width, unsigned stride,
152 		struct sg_dma_desc_info *desc)
153 {
154 	struct sg_dma_descriptor *d = (struct sg_dma_descriptor *)desc->virt;
155 	dma_addr_t next = desc->bus;
156 	unsigned offset = 0;
157 	unsigned copy_bytes = width;
158 	unsigned copied = 0;
159 	bool first = true;
160 
161 	/* Must be 4-byte aligned */
162 	WARN_ON(sg_dma_address(scatter_list) & 3);
163 	WARN_ON(size & 3);
164 	WARN_ON(next & 3);
165 	WARN_ON(stride & 3);
166 	WARN_ON(stride < width);
167 	if (width >= stride)
168 		copy_bytes = stride = size;
169 
170 	while (size) {
171 		dma_addr_t addr = sg_dma_address(scatter_list) + offset;
172 		unsigned bytes;
173 
174 		if (addr == 0)
175 			return -EFAULT;
176 		if (cobalt->pci_32_bit) {
177 			WARN_ON((u64)addr >> 32);
178 			if ((u64)addr >> 32)
179 				return -EFAULT;
180 		}
181 
182 		/* PCIe address */
183 		d->pci_l = addr & 0xffffffff;
184 		/* If dma_addr_t is 32 bits, then addr >> 32 is actually the
185 		   equivalent of addr >> 0 in gcc. So must cast to u64. */
186 		d->pci_h = (u64)addr >> 32;
187 
188 		/* Sync to start of streaming frame */
189 		d->local = 0;
190 		d->reserved0 = 0;
191 
192 		/* Transfer bytes */
193 		bytes = min(sg_dma_len(scatter_list) - offset,
194 				copy_bytes - copied);
195 
196 		if (first) {
197 			if (to_pci)
198 				d->local = 0x11111111;
199 			first = false;
200 			if (sglen == 1) {
201 				/* Make sure there are always at least two
202 				 * descriptors */
203 				d->bytes = (bytes / 2) & ~3;
204 				d->reserved1 = 0;
205 				size -= d->bytes;
206 				copied += d->bytes;
207 				offset += d->bytes;
208 				addr += d->bytes;
209 				next += sizeof(struct sg_dma_descriptor);
210 				d->next_h = (u32)((u64)next >> 32);
211 				d->next_l = (u32)next |
212 					(to_pci ? WRITE_TO_PCI : 0);
213 				bytes -= d->bytes;
214 				d++;
215 				/* PCIe address */
216 				d->pci_l = addr & 0xffffffff;
217 				/* If dma_addr_t is 32 bits, then addr >> 32
218 				 * is actually the equivalent of addr >> 0 in
219 				 * gcc. So must cast to u64. */
220 				d->pci_h = (u64)addr >> 32;
221 
222 				/* Sync to start of streaming frame */
223 				d->local = 0;
224 				d->reserved0 = 0;
225 			}
226 		}
227 
228 		d->bytes = bytes;
229 		d->reserved1 = 0;
230 		size -= bytes;
231 		copied += bytes;
232 		offset += bytes;
233 
234 		if (copied == copy_bytes) {
235 			while (copied < stride) {
236 				bytes = min(sg_dma_len(scatter_list) - offset,
237 						stride - copied);
238 				copied += bytes;
239 				offset += bytes;
240 				size -= bytes;
241 				if (sg_dma_len(scatter_list) == offset) {
242 					offset = 0;
243 					scatter_list = sg_next(scatter_list);
244 				}
245 			}
246 			copied = 0;
247 		} else {
248 			offset = 0;
249 			scatter_list = sg_next(scatter_list);
250 		}
251 
252 		/* Next descriptor + control bits */
253 		next += sizeof(struct sg_dma_descriptor);
254 		if (size == 0) {
255 			/* Loopback to the first descriptor */
256 			d->next_h = (u32)((u64)desc->bus >> 32);
257 			d->next_l = (u32)desc->bus |
258 				(to_pci ? WRITE_TO_PCI : 0) | INTERRUPT_ENABLE;
259 			if (!to_pci)
260 				d->local = 0x22222222;
261 			desc->last_desc_virt = d;
262 		} else {
263 			d->next_h = (u32)((u64)next >> 32);
264 			d->next_l = (u32)next | (to_pci ? WRITE_TO_PCI : 0);
265 		}
266 		d++;
267 	}
268 	return 0;
269 }
270 
descriptor_list_chain(struct sg_dma_desc_info * this,struct sg_dma_desc_info * next)271 void descriptor_list_chain(struct sg_dma_desc_info *this,
272 			   struct sg_dma_desc_info *next)
273 {
274 	struct sg_dma_descriptor *d = this->last_desc_virt;
275 	u32 direction = d->next_l & WRITE_TO_PCI;
276 
277 	if (next == NULL) {
278 		d->next_h = 0;
279 		d->next_l = direction | INTERRUPT_ENABLE | END_OF_CHAIN;
280 	} else {
281 		d->next_h = (u32)((u64)next->bus >> 32);
282 		d->next_l = (u32)next->bus | direction | INTERRUPT_ENABLE;
283 	}
284 }
285 
descriptor_list_allocate(struct sg_dma_desc_info * desc,size_t bytes)286 void *descriptor_list_allocate(struct sg_dma_desc_info *desc, size_t bytes)
287 {
288 	desc->size = bytes;
289 	desc->virt = dma_alloc_coherent(desc->dev, bytes,
290 					&desc->bus, GFP_KERNEL);
291 	return desc->virt;
292 }
293 
descriptor_list_free(struct sg_dma_desc_info * desc)294 void descriptor_list_free(struct sg_dma_desc_info *desc)
295 {
296 	if (desc->virt)
297 		dma_free_coherent(desc->dev, desc->size,
298 				  desc->virt, desc->bus);
299 	desc->virt = NULL;
300 }
301 
descriptor_list_interrupt_enable(struct sg_dma_desc_info * desc)302 void descriptor_list_interrupt_enable(struct sg_dma_desc_info *desc)
303 {
304 	struct sg_dma_descriptor *d = desc->last_desc_virt;
305 
306 	d->next_l |= INTERRUPT_ENABLE;
307 }
308 
descriptor_list_interrupt_disable(struct sg_dma_desc_info * desc)309 void descriptor_list_interrupt_disable(struct sg_dma_desc_info *desc)
310 {
311 	struct sg_dma_descriptor *d = desc->last_desc_virt;
312 
313 	d->next_l &= ~INTERRUPT_ENABLE;
314 }
315 
descriptor_list_loopback(struct sg_dma_desc_info * desc)316 void descriptor_list_loopback(struct sg_dma_desc_info *desc)
317 {
318 	struct sg_dma_descriptor *d = desc->last_desc_virt;
319 
320 	d->next_h = (u32)((u64)desc->bus >> 32);
321 	d->next_l = (u32)desc->bus | (d->next_l & DESCRIPTOR_FLAG_MSK);
322 }
323 
descriptor_list_end_of_chain(struct sg_dma_desc_info * desc)324 void descriptor_list_end_of_chain(struct sg_dma_desc_info *desc)
325 {
326 	struct sg_dma_descriptor *d = desc->last_desc_virt;
327 
328 	d->next_l |= END_OF_CHAIN;
329 }
330