xref: /linux/drivers/media/pci/cx88/cx88-vbi.c (revision 957e3facd147510f2cf8780e38606f1d707f0e33)
1 /*
2  */
3 #include <linux/kernel.h>
4 #include <linux/module.h>
5 #include <linux/init.h>
6 
7 #include "cx88.h"
8 
9 static unsigned int vbi_debug;
10 module_param(vbi_debug,int,0644);
11 MODULE_PARM_DESC(vbi_debug,"enable debug messages [vbi]");
12 
13 #define dprintk(level,fmt, arg...)	if (vbi_debug >= level) \
14 	printk(KERN_DEBUG "%s: " fmt, dev->core->name , ## arg)
15 
16 /* ------------------------------------------------------------------ */
17 
18 int cx8800_vbi_fmt (struct file *file, void *priv,
19 					struct v4l2_format *f)
20 {
21 	struct cx8800_dev *dev = video_drvdata(file);
22 
23 	f->fmt.vbi.samples_per_line = VBI_LINE_LENGTH;
24 	f->fmt.vbi.sample_format = V4L2_PIX_FMT_GREY;
25 	f->fmt.vbi.offset = 244;
26 
27 	if (dev->core->tvnorm & V4L2_STD_525_60) {
28 		/* ntsc */
29 		f->fmt.vbi.sampling_rate = 28636363;
30 		f->fmt.vbi.start[0] = 10;
31 		f->fmt.vbi.start[1] = 273;
32 		f->fmt.vbi.count[0] = VBI_LINE_NTSC_COUNT;
33 		f->fmt.vbi.count[1] = VBI_LINE_NTSC_COUNT;
34 
35 	} else if (dev->core->tvnorm & V4L2_STD_625_50) {
36 		/* pal */
37 		f->fmt.vbi.sampling_rate = 35468950;
38 		f->fmt.vbi.start[0] = V4L2_VBI_ITU_625_F1_START + 5;
39 		f->fmt.vbi.start[1] = V4L2_VBI_ITU_625_F2_START + 5;
40 		f->fmt.vbi.count[0] = VBI_LINE_PAL_COUNT;
41 		f->fmt.vbi.count[1] = VBI_LINE_PAL_COUNT;
42 	}
43 	return 0;
44 }
45 
46 static int cx8800_start_vbi_dma(struct cx8800_dev    *dev,
47 			 struct cx88_dmaqueue *q,
48 			 struct cx88_buffer   *buf)
49 {
50 	struct cx88_core *core = dev->core;
51 
52 	/* setup fifo + format */
53 	cx88_sram_channel_setup(dev->core, &cx88_sram_channels[SRAM_CH24],
54 				VBI_LINE_LENGTH, buf->risc.dma);
55 
56 	cx_write(MO_VBOS_CONTROL, ( (1 << 18) |  // comb filter delay fixup
57 				    (1 << 15) |  // enable vbi capture
58 				    (1 << 11) ));
59 
60 	/* reset counter */
61 	cx_write(MO_VBI_GPCNTRL, GP_COUNT_CONTROL_RESET);
62 	q->count = 1;
63 
64 	/* enable irqs */
65 	cx_set(MO_PCI_INTMSK, core->pci_irqmask | PCI_INT_VIDINT);
66 	cx_set(MO_VID_INTMSK, 0x0f0088);
67 
68 	/* enable capture */
69 	cx_set(VID_CAPTURE_CONTROL,0x18);
70 
71 	/* start dma */
72 	cx_set(MO_DEV_CNTRL2, (1<<5));
73 	cx_set(MO_VID_DMACNTRL, 0x88);
74 
75 	return 0;
76 }
77 
78 void cx8800_stop_vbi_dma(struct cx8800_dev *dev)
79 {
80 	struct cx88_core *core = dev->core;
81 
82 	/* stop dma */
83 	cx_clear(MO_VID_DMACNTRL, 0x88);
84 
85 	/* disable capture */
86 	cx_clear(VID_CAPTURE_CONTROL,0x18);
87 
88 	/* disable irqs */
89 	cx_clear(MO_PCI_INTMSK, PCI_INT_VIDINT);
90 	cx_clear(MO_VID_INTMSK, 0x0f0088);
91 }
92 
93 int cx8800_restart_vbi_queue(struct cx8800_dev    *dev,
94 			     struct cx88_dmaqueue *q)
95 {
96 	struct cx88_buffer *buf;
97 
98 	if (list_empty(&q->active))
99 		return 0;
100 
101 	buf = list_entry(q->active.next, struct cx88_buffer, list);
102 	dprintk(2,"restart_queue [%p/%d]: restart dma\n",
103 		buf, buf->vb.v4l2_buf.index);
104 	cx8800_start_vbi_dma(dev, q, buf);
105 	list_for_each_entry(buf, &q->active, list)
106 		buf->count = q->count++;
107 	return 0;
108 }
109 
110 /* ------------------------------------------------------------------ */
111 
112 static int queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt,
113 			   unsigned int *num_buffers, unsigned int *num_planes,
114 			   unsigned int sizes[], void *alloc_ctxs[])
115 {
116 	struct cx8800_dev *dev = q->drv_priv;
117 
118 	*num_planes = 1;
119 	if (dev->core->tvnorm & V4L2_STD_525_60)
120 		sizes[0] = VBI_LINE_NTSC_COUNT * VBI_LINE_LENGTH * 2;
121 	else
122 		sizes[0] = VBI_LINE_PAL_COUNT * VBI_LINE_LENGTH * 2;
123 	return 0;
124 }
125 
126 
127 static int buffer_prepare(struct vb2_buffer *vb)
128 {
129 	struct cx8800_dev *dev = vb->vb2_queue->drv_priv;
130 	struct cx88_buffer *buf = container_of(vb, struct cx88_buffer, vb);
131 	struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0);
132 	unsigned int lines;
133 	unsigned int size;
134 	int rc;
135 
136 	if (dev->core->tvnorm & V4L2_STD_525_60)
137 		lines = VBI_LINE_NTSC_COUNT;
138 	else
139 		lines = VBI_LINE_PAL_COUNT;
140 	size = lines * VBI_LINE_LENGTH * 2;
141 	if (vb2_plane_size(vb, 0) < size)
142 		return -EINVAL;
143 	vb2_set_plane_payload(vb, 0, size);
144 
145 	rc = dma_map_sg(&dev->pci->dev, sgt->sgl, sgt->nents, DMA_FROM_DEVICE);
146 	if (!rc)
147 		return -EIO;
148 
149 	cx88_risc_buffer(dev->pci, &buf->risc, sgt->sgl,
150 			 0, VBI_LINE_LENGTH * lines,
151 			 VBI_LINE_LENGTH, 0,
152 			 lines);
153 	return 0;
154 }
155 
156 static void buffer_finish(struct vb2_buffer *vb)
157 {
158 	struct cx8800_dev *dev = vb->vb2_queue->drv_priv;
159 	struct cx88_buffer *buf = container_of(vb, struct cx88_buffer, vb);
160 	struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0);
161 	struct cx88_riscmem *risc = &buf->risc;
162 
163 	if (risc->cpu)
164 		pci_free_consistent(dev->pci, risc->size, risc->cpu, risc->dma);
165 	memset(risc, 0, sizeof(*risc));
166 
167 	dma_unmap_sg(&dev->pci->dev, sgt->sgl, sgt->nents, DMA_FROM_DEVICE);
168 }
169 
170 static void buffer_queue(struct vb2_buffer *vb)
171 {
172 	struct cx8800_dev *dev = vb->vb2_queue->drv_priv;
173 	struct cx88_buffer    *buf = container_of(vb, struct cx88_buffer, vb);
174 	struct cx88_buffer    *prev;
175 	struct cx88_dmaqueue  *q    = &dev->vbiq;
176 
177 	/* add jump to start */
178 	buf->risc.cpu[1] = cpu_to_le32(buf->risc.dma + 8);
179 	buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP | RISC_CNT_INC);
180 	buf->risc.jmp[1] = cpu_to_le32(buf->risc.dma + 8);
181 
182 	if (list_empty(&q->active)) {
183 		list_add_tail(&buf->list, &q->active);
184 		cx8800_start_vbi_dma(dev, q, buf);
185 		buf->count    = q->count++;
186 		dprintk(2,"[%p/%d] vbi_queue - first active\n",
187 			buf, buf->vb.v4l2_buf.index);
188 
189 	} else {
190 		buf->risc.cpu[0] |= cpu_to_le32(RISC_IRQ1);
191 		prev = list_entry(q->active.prev, struct cx88_buffer, list);
192 		list_add_tail(&buf->list, &q->active);
193 		buf->count    = q->count++;
194 		prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
195 		dprintk(2,"[%p/%d] buffer_queue - append to active\n",
196 			buf, buf->vb.v4l2_buf.index);
197 	}
198 }
199 
200 static int start_streaming(struct vb2_queue *q, unsigned int count)
201 {
202 	struct cx8800_dev *dev = q->drv_priv;
203 	struct cx88_dmaqueue *dmaq = &dev->vbiq;
204 	struct cx88_buffer *buf = list_entry(dmaq->active.next,
205 			struct cx88_buffer, list);
206 
207 	cx8800_start_vbi_dma(dev, dmaq, buf);
208 	return 0;
209 }
210 
211 static void stop_streaming(struct vb2_queue *q)
212 {
213 	struct cx8800_dev *dev = q->drv_priv;
214 	struct cx88_core *core = dev->core;
215 	struct cx88_dmaqueue *dmaq = &dev->vbiq;
216 	unsigned long flags;
217 
218 	cx_clear(MO_VID_DMACNTRL, 0x11);
219 	cx_clear(VID_CAPTURE_CONTROL, 0x06);
220 	cx8800_stop_vbi_dma(dev);
221 	spin_lock_irqsave(&dev->slock, flags);
222 	while (!list_empty(&dmaq->active)) {
223 		struct cx88_buffer *buf = list_entry(dmaq->active.next,
224 			struct cx88_buffer, list);
225 
226 		list_del(&buf->list);
227 		vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
228 	}
229 	spin_unlock_irqrestore(&dev->slock, flags);
230 }
231 
232 const struct vb2_ops cx8800_vbi_qops = {
233 	.queue_setup    = queue_setup,
234 	.buf_prepare  = buffer_prepare,
235 	.buf_finish = buffer_finish,
236 	.buf_queue    = buffer_queue,
237 	.wait_prepare = vb2_ops_wait_prepare,
238 	.wait_finish = vb2_ops_wait_finish,
239 	.start_streaming = start_streaming,
240 	.stop_streaming = stop_streaming,
241 };
242