xref: /linux/drivers/media/pci/cx23885/cx23885-core.c (revision 08ec212c0f92cbf30e3ecc7349f18151714041d6)
1 /*
2  *  Driver for the Conexant CX23885 PCIe bridge
3  *
4  *  Copyright (c) 2006 Steven Toth <stoth@linuxtv.org>
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *
15  *  GNU General Public License for more details.
16  *
17  *  You should have received a copy of the GNU General Public License
18  *  along with this program; if not, write to the Free Software
19  *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20  */
21 
22 #include <linux/init.h>
23 #include <linux/list.h>
24 #include <linux/module.h>
25 #include <linux/moduleparam.h>
26 #include <linux/kmod.h>
27 #include <linux/kernel.h>
28 #include <linux/slab.h>
29 #include <linux/interrupt.h>
30 #include <linux/delay.h>
31 #include <asm/div64.h>
32 #include <linux/firmware.h>
33 
34 #include "cx23885.h"
35 #include "cimax2.h"
36 #include "altera-ci.h"
37 #include "cx23888-ir.h"
38 #include "cx23885-ir.h"
39 #include "cx23885-av.h"
40 #include "cx23885-input.h"
41 
42 MODULE_DESCRIPTION("Driver for cx23885 based TV cards");
43 MODULE_AUTHOR("Steven Toth <stoth@linuxtv.org>");
44 MODULE_LICENSE("GPL");
45 MODULE_VERSION(CX23885_VERSION);
46 
47 static unsigned int debug;
48 module_param(debug, int, 0644);
49 MODULE_PARM_DESC(debug, "enable debug messages");
50 
51 static unsigned int card[]  = {[0 ... (CX23885_MAXBOARDS - 1)] = UNSET };
52 module_param_array(card,  int, NULL, 0444);
53 MODULE_PARM_DESC(card, "card type");
54 
55 #define dprintk(level, fmt, arg...)\
56 	do { if (debug >= level)\
57 		printk(KERN_DEBUG "%s: " fmt, dev->name, ## arg);\
58 	} while (0)
59 
60 static unsigned int cx23885_devcount;
61 
62 #define NO_SYNC_LINE (-1U)
63 
64 /* FIXME, these allocations will change when
65  * analog arrives. The be reviewed.
66  * CX23887 Assumptions
67  * 1 line = 16 bytes of CDT
68  * cmds size = 80
69  * cdt size = 16 * linesize
70  * iqsize = 64
71  * maxlines = 6
72  *
73  * Address Space:
74  * 0x00000000 0x00008fff FIFO clusters
75  * 0x00010000 0x000104af Channel Management Data Structures
76  * 0x000104b0 0x000104ff Free
77  * 0x00010500 0x000108bf 15 channels * iqsize
78  * 0x000108c0 0x000108ff Free
79  * 0x00010900 0x00010e9f IQ's + Cluster Descriptor Tables
80  *                       15 channels * (iqsize + (maxlines * linesize))
81  * 0x00010ea0 0x00010xxx Free
82  */
83 
84 static struct sram_channel cx23885_sram_channels[] = {
85 	[SRAM_CH01] = {
86 		.name		= "VID A",
87 		.cmds_start	= 0x10000,
88 		.ctrl_start	= 0x10380,
89 		.cdt		= 0x104c0,
90 		.fifo_start	= 0x40,
91 		.fifo_size	= 0x2800,
92 		.ptr1_reg	= DMA1_PTR1,
93 		.ptr2_reg	= DMA1_PTR2,
94 		.cnt1_reg	= DMA1_CNT1,
95 		.cnt2_reg	= DMA1_CNT2,
96 	},
97 	[SRAM_CH02] = {
98 		.name		= "ch2",
99 		.cmds_start	= 0x0,
100 		.ctrl_start	= 0x0,
101 		.cdt		= 0x0,
102 		.fifo_start	= 0x0,
103 		.fifo_size	= 0x0,
104 		.ptr1_reg	= DMA2_PTR1,
105 		.ptr2_reg	= DMA2_PTR2,
106 		.cnt1_reg	= DMA2_CNT1,
107 		.cnt2_reg	= DMA2_CNT2,
108 	},
109 	[SRAM_CH03] = {
110 		.name		= "TS1 B",
111 		.cmds_start	= 0x100A0,
112 		.ctrl_start	= 0x10400,
113 		.cdt		= 0x10580,
114 		.fifo_start	= 0x5000,
115 		.fifo_size	= 0x1000,
116 		.ptr1_reg	= DMA3_PTR1,
117 		.ptr2_reg	= DMA3_PTR2,
118 		.cnt1_reg	= DMA3_CNT1,
119 		.cnt2_reg	= DMA3_CNT2,
120 	},
121 	[SRAM_CH04] = {
122 		.name		= "ch4",
123 		.cmds_start	= 0x0,
124 		.ctrl_start	= 0x0,
125 		.cdt		= 0x0,
126 		.fifo_start	= 0x0,
127 		.fifo_size	= 0x0,
128 		.ptr1_reg	= DMA4_PTR1,
129 		.ptr2_reg	= DMA4_PTR2,
130 		.cnt1_reg	= DMA4_CNT1,
131 		.cnt2_reg	= DMA4_CNT2,
132 	},
133 	[SRAM_CH05] = {
134 		.name		= "ch5",
135 		.cmds_start	= 0x0,
136 		.ctrl_start	= 0x0,
137 		.cdt		= 0x0,
138 		.fifo_start	= 0x0,
139 		.fifo_size	= 0x0,
140 		.ptr1_reg	= DMA5_PTR1,
141 		.ptr2_reg	= DMA5_PTR2,
142 		.cnt1_reg	= DMA5_CNT1,
143 		.cnt2_reg	= DMA5_CNT2,
144 	},
145 	[SRAM_CH06] = {
146 		.name		= "TS2 C",
147 		.cmds_start	= 0x10140,
148 		.ctrl_start	= 0x10440,
149 		.cdt		= 0x105e0,
150 		.fifo_start	= 0x6000,
151 		.fifo_size	= 0x1000,
152 		.ptr1_reg	= DMA5_PTR1,
153 		.ptr2_reg	= DMA5_PTR2,
154 		.cnt1_reg	= DMA5_CNT1,
155 		.cnt2_reg	= DMA5_CNT2,
156 	},
157 	[SRAM_CH07] = {
158 		.name		= "TV Audio",
159 		.cmds_start	= 0x10190,
160 		.ctrl_start	= 0x10480,
161 		.cdt		= 0x10a00,
162 		.fifo_start	= 0x7000,
163 		.fifo_size	= 0x1000,
164 		.ptr1_reg	= DMA6_PTR1,
165 		.ptr2_reg	= DMA6_PTR2,
166 		.cnt1_reg	= DMA6_CNT1,
167 		.cnt2_reg	= DMA6_CNT2,
168 	},
169 	[SRAM_CH08] = {
170 		.name		= "ch8",
171 		.cmds_start	= 0x0,
172 		.ctrl_start	= 0x0,
173 		.cdt		= 0x0,
174 		.fifo_start	= 0x0,
175 		.fifo_size	= 0x0,
176 		.ptr1_reg	= DMA7_PTR1,
177 		.ptr2_reg	= DMA7_PTR2,
178 		.cnt1_reg	= DMA7_CNT1,
179 		.cnt2_reg	= DMA7_CNT2,
180 	},
181 	[SRAM_CH09] = {
182 		.name		= "ch9",
183 		.cmds_start	= 0x0,
184 		.ctrl_start	= 0x0,
185 		.cdt		= 0x0,
186 		.fifo_start	= 0x0,
187 		.fifo_size	= 0x0,
188 		.ptr1_reg	= DMA8_PTR1,
189 		.ptr2_reg	= DMA8_PTR2,
190 		.cnt1_reg	= DMA8_CNT1,
191 		.cnt2_reg	= DMA8_CNT2,
192 	},
193 };
194 
195 static struct sram_channel cx23887_sram_channels[] = {
196 	[SRAM_CH01] = {
197 		.name		= "VID A",
198 		.cmds_start	= 0x10000,
199 		.ctrl_start	= 0x105b0,
200 		.cdt		= 0x107b0,
201 		.fifo_start	= 0x40,
202 		.fifo_size	= 0x2800,
203 		.ptr1_reg	= DMA1_PTR1,
204 		.ptr2_reg	= DMA1_PTR2,
205 		.cnt1_reg	= DMA1_CNT1,
206 		.cnt2_reg	= DMA1_CNT2,
207 	},
208 	[SRAM_CH02] = {
209 		.name		= "VID A (VBI)",
210 		.cmds_start	= 0x10050,
211 		.ctrl_start	= 0x105F0,
212 		.cdt		= 0x10810,
213 		.fifo_start	= 0x3000,
214 		.fifo_size	= 0x1000,
215 		.ptr1_reg	= DMA2_PTR1,
216 		.ptr2_reg	= DMA2_PTR2,
217 		.cnt1_reg	= DMA2_CNT1,
218 		.cnt2_reg	= DMA2_CNT2,
219 	},
220 	[SRAM_CH03] = {
221 		.name		= "TS1 B",
222 		.cmds_start	= 0x100A0,
223 		.ctrl_start	= 0x10630,
224 		.cdt		= 0x10870,
225 		.fifo_start	= 0x5000,
226 		.fifo_size	= 0x1000,
227 		.ptr1_reg	= DMA3_PTR1,
228 		.ptr2_reg	= DMA3_PTR2,
229 		.cnt1_reg	= DMA3_CNT1,
230 		.cnt2_reg	= DMA3_CNT2,
231 	},
232 	[SRAM_CH04] = {
233 		.name		= "ch4",
234 		.cmds_start	= 0x0,
235 		.ctrl_start	= 0x0,
236 		.cdt		= 0x0,
237 		.fifo_start	= 0x0,
238 		.fifo_size	= 0x0,
239 		.ptr1_reg	= DMA4_PTR1,
240 		.ptr2_reg	= DMA4_PTR2,
241 		.cnt1_reg	= DMA4_CNT1,
242 		.cnt2_reg	= DMA4_CNT2,
243 	},
244 	[SRAM_CH05] = {
245 		.name		= "ch5",
246 		.cmds_start	= 0x0,
247 		.ctrl_start	= 0x0,
248 		.cdt		= 0x0,
249 		.fifo_start	= 0x0,
250 		.fifo_size	= 0x0,
251 		.ptr1_reg	= DMA5_PTR1,
252 		.ptr2_reg	= DMA5_PTR2,
253 		.cnt1_reg	= DMA5_CNT1,
254 		.cnt2_reg	= DMA5_CNT2,
255 	},
256 	[SRAM_CH06] = {
257 		.name		= "TS2 C",
258 		.cmds_start	= 0x10140,
259 		.ctrl_start	= 0x10670,
260 		.cdt		= 0x108d0,
261 		.fifo_start	= 0x6000,
262 		.fifo_size	= 0x1000,
263 		.ptr1_reg	= DMA5_PTR1,
264 		.ptr2_reg	= DMA5_PTR2,
265 		.cnt1_reg	= DMA5_CNT1,
266 		.cnt2_reg	= DMA5_CNT2,
267 	},
268 	[SRAM_CH07] = {
269 		.name		= "TV Audio",
270 		.cmds_start	= 0x10190,
271 		.ctrl_start	= 0x106B0,
272 		.cdt		= 0x10930,
273 		.fifo_start	= 0x7000,
274 		.fifo_size	= 0x1000,
275 		.ptr1_reg	= DMA6_PTR1,
276 		.ptr2_reg	= DMA6_PTR2,
277 		.cnt1_reg	= DMA6_CNT1,
278 		.cnt2_reg	= DMA6_CNT2,
279 	},
280 	[SRAM_CH08] = {
281 		.name		= "ch8",
282 		.cmds_start	= 0x0,
283 		.ctrl_start	= 0x0,
284 		.cdt		= 0x0,
285 		.fifo_start	= 0x0,
286 		.fifo_size	= 0x0,
287 		.ptr1_reg	= DMA7_PTR1,
288 		.ptr2_reg	= DMA7_PTR2,
289 		.cnt1_reg	= DMA7_CNT1,
290 		.cnt2_reg	= DMA7_CNT2,
291 	},
292 	[SRAM_CH09] = {
293 		.name		= "ch9",
294 		.cmds_start	= 0x0,
295 		.ctrl_start	= 0x0,
296 		.cdt		= 0x0,
297 		.fifo_start	= 0x0,
298 		.fifo_size	= 0x0,
299 		.ptr1_reg	= DMA8_PTR1,
300 		.ptr2_reg	= DMA8_PTR2,
301 		.cnt1_reg	= DMA8_CNT1,
302 		.cnt2_reg	= DMA8_CNT2,
303 	},
304 };
305 
306 void cx23885_irq_add(struct cx23885_dev *dev, u32 mask)
307 {
308 	unsigned long flags;
309 	spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
310 
311 	dev->pci_irqmask |= mask;
312 
313 	spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
314 }
315 
316 void cx23885_irq_add_enable(struct cx23885_dev *dev, u32 mask)
317 {
318 	unsigned long flags;
319 	spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
320 
321 	dev->pci_irqmask |= mask;
322 	cx_set(PCI_INT_MSK, mask);
323 
324 	spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
325 }
326 
327 void cx23885_irq_enable(struct cx23885_dev *dev, u32 mask)
328 {
329 	u32 v;
330 	unsigned long flags;
331 	spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
332 
333 	v = mask & dev->pci_irqmask;
334 	if (v)
335 		cx_set(PCI_INT_MSK, v);
336 
337 	spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
338 }
339 
340 static inline void cx23885_irq_enable_all(struct cx23885_dev *dev)
341 {
342 	cx23885_irq_enable(dev, 0xffffffff);
343 }
344 
345 void cx23885_irq_disable(struct cx23885_dev *dev, u32 mask)
346 {
347 	unsigned long flags;
348 	spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
349 
350 	cx_clear(PCI_INT_MSK, mask);
351 
352 	spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
353 }
354 
355 static inline void cx23885_irq_disable_all(struct cx23885_dev *dev)
356 {
357 	cx23885_irq_disable(dev, 0xffffffff);
358 }
359 
360 void cx23885_irq_remove(struct cx23885_dev *dev, u32 mask)
361 {
362 	unsigned long flags;
363 	spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
364 
365 	dev->pci_irqmask &= ~mask;
366 	cx_clear(PCI_INT_MSK, mask);
367 
368 	spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
369 }
370 
371 static u32 cx23885_irq_get_mask(struct cx23885_dev *dev)
372 {
373 	u32 v;
374 	unsigned long flags;
375 	spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
376 
377 	v = cx_read(PCI_INT_MSK);
378 
379 	spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
380 	return v;
381 }
382 
383 static int cx23885_risc_decode(u32 risc)
384 {
385 	static char *instr[16] = {
386 		[RISC_SYNC    >> 28] = "sync",
387 		[RISC_WRITE   >> 28] = "write",
388 		[RISC_WRITEC  >> 28] = "writec",
389 		[RISC_READ    >> 28] = "read",
390 		[RISC_READC   >> 28] = "readc",
391 		[RISC_JUMP    >> 28] = "jump",
392 		[RISC_SKIP    >> 28] = "skip",
393 		[RISC_WRITERM >> 28] = "writerm",
394 		[RISC_WRITECM >> 28] = "writecm",
395 		[RISC_WRITECR >> 28] = "writecr",
396 	};
397 	static int incr[16] = {
398 		[RISC_WRITE   >> 28] = 3,
399 		[RISC_JUMP    >> 28] = 3,
400 		[RISC_SKIP    >> 28] = 1,
401 		[RISC_SYNC    >> 28] = 1,
402 		[RISC_WRITERM >> 28] = 3,
403 		[RISC_WRITECM >> 28] = 3,
404 		[RISC_WRITECR >> 28] = 4,
405 	};
406 	static char *bits[] = {
407 		"12",   "13",   "14",   "resync",
408 		"cnt0", "cnt1", "18",   "19",
409 		"20",   "21",   "22",   "23",
410 		"irq1", "irq2", "eol",  "sol",
411 	};
412 	int i;
413 
414 	printk("0x%08x [ %s", risc,
415 	       instr[risc >> 28] ? instr[risc >> 28] : "INVALID");
416 	for (i = ARRAY_SIZE(bits) - 1; i >= 0; i--)
417 		if (risc & (1 << (i + 12)))
418 			printk(" %s", bits[i]);
419 	printk(" count=%d ]\n", risc & 0xfff);
420 	return incr[risc >> 28] ? incr[risc >> 28] : 1;
421 }
422 
423 void cx23885_wakeup(struct cx23885_tsport *port,
424 			   struct cx23885_dmaqueue *q, u32 count)
425 {
426 	struct cx23885_dev *dev = port->dev;
427 	struct cx23885_buffer *buf;
428 	int bc;
429 
430 	for (bc = 0;; bc++) {
431 		if (list_empty(&q->active))
432 			break;
433 		buf = list_entry(q->active.next,
434 				 struct cx23885_buffer, vb.queue);
435 
436 		/* count comes from the hw and is is 16bit wide --
437 		 * this trick handles wrap-arounds correctly for
438 		 * up to 32767 buffers in flight... */
439 		if ((s16) (count - buf->count) < 0)
440 			break;
441 
442 		do_gettimeofday(&buf->vb.ts);
443 		dprintk(2, "[%p/%d] wakeup reg=%d buf=%d\n", buf, buf->vb.i,
444 			count, buf->count);
445 		buf->vb.state = VIDEOBUF_DONE;
446 		list_del(&buf->vb.queue);
447 		wake_up(&buf->vb.done);
448 	}
449 	if (list_empty(&q->active))
450 		del_timer(&q->timeout);
451 	else
452 		mod_timer(&q->timeout, jiffies + BUFFER_TIMEOUT);
453 	if (bc != 1)
454 		printk(KERN_WARNING "%s: %d buffers handled (should be 1)\n",
455 		       __func__, bc);
456 }
457 
458 int cx23885_sram_channel_setup(struct cx23885_dev *dev,
459 				      struct sram_channel *ch,
460 				      unsigned int bpl, u32 risc)
461 {
462 	unsigned int i, lines;
463 	u32 cdt;
464 
465 	if (ch->cmds_start == 0) {
466 		dprintk(1, "%s() Erasing channel [%s]\n", __func__,
467 			ch->name);
468 		cx_write(ch->ptr1_reg, 0);
469 		cx_write(ch->ptr2_reg, 0);
470 		cx_write(ch->cnt2_reg, 0);
471 		cx_write(ch->cnt1_reg, 0);
472 		return 0;
473 	} else {
474 		dprintk(1, "%s() Configuring channel [%s]\n", __func__,
475 			ch->name);
476 	}
477 
478 	bpl   = (bpl + 7) & ~7; /* alignment */
479 	cdt   = ch->cdt;
480 	lines = ch->fifo_size / bpl;
481 	if (lines > 6)
482 		lines = 6;
483 	BUG_ON(lines < 2);
484 
485 	cx_write(8 + 0, RISC_JUMP | RISC_IRQ1 | RISC_CNT_INC);
486 	cx_write(8 + 4, 8);
487 	cx_write(8 + 8, 0);
488 
489 	/* write CDT */
490 	for (i = 0; i < lines; i++) {
491 		dprintk(2, "%s() 0x%08x <- 0x%08x\n", __func__, cdt + 16*i,
492 			ch->fifo_start + bpl*i);
493 		cx_write(cdt + 16*i, ch->fifo_start + bpl*i);
494 		cx_write(cdt + 16*i +  4, 0);
495 		cx_write(cdt + 16*i +  8, 0);
496 		cx_write(cdt + 16*i + 12, 0);
497 	}
498 
499 	/* write CMDS */
500 	if (ch->jumponly)
501 		cx_write(ch->cmds_start + 0, 8);
502 	else
503 		cx_write(ch->cmds_start + 0, risc);
504 	cx_write(ch->cmds_start +  4, 0); /* 64 bits 63-32 */
505 	cx_write(ch->cmds_start +  8, cdt);
506 	cx_write(ch->cmds_start + 12, (lines*16) >> 3);
507 	cx_write(ch->cmds_start + 16, ch->ctrl_start);
508 	if (ch->jumponly)
509 		cx_write(ch->cmds_start + 20, 0x80000000 | (64 >> 2));
510 	else
511 		cx_write(ch->cmds_start + 20, 64 >> 2);
512 	for (i = 24; i < 80; i += 4)
513 		cx_write(ch->cmds_start + i, 0);
514 
515 	/* fill registers */
516 	cx_write(ch->ptr1_reg, ch->fifo_start);
517 	cx_write(ch->ptr2_reg, cdt);
518 	cx_write(ch->cnt2_reg, (lines*16) >> 3);
519 	cx_write(ch->cnt1_reg, (bpl >> 3) - 1);
520 
521 	dprintk(2, "[bridge %d] sram setup %s: bpl=%d lines=%d\n",
522 		dev->bridge,
523 		ch->name,
524 		bpl,
525 		lines);
526 
527 	return 0;
528 }
529 
530 void cx23885_sram_channel_dump(struct cx23885_dev *dev,
531 				      struct sram_channel *ch)
532 {
533 	static char *name[] = {
534 		"init risc lo",
535 		"init risc hi",
536 		"cdt base",
537 		"cdt size",
538 		"iq base",
539 		"iq size",
540 		"risc pc lo",
541 		"risc pc hi",
542 		"iq wr ptr",
543 		"iq rd ptr",
544 		"cdt current",
545 		"pci target lo",
546 		"pci target hi",
547 		"line / byte",
548 	};
549 	u32 risc;
550 	unsigned int i, j, n;
551 
552 	printk(KERN_WARNING "%s: %s - dma channel status dump\n",
553 	       dev->name, ch->name);
554 	for (i = 0; i < ARRAY_SIZE(name); i++)
555 		printk(KERN_WARNING "%s:   cmds: %-15s: 0x%08x\n",
556 		       dev->name, name[i],
557 		       cx_read(ch->cmds_start + 4*i));
558 
559 	for (i = 0; i < 4; i++) {
560 		risc = cx_read(ch->cmds_start + 4 * (i + 14));
561 		printk(KERN_WARNING "%s:   risc%d: ", dev->name, i);
562 		cx23885_risc_decode(risc);
563 	}
564 	for (i = 0; i < (64 >> 2); i += n) {
565 		risc = cx_read(ch->ctrl_start + 4 * i);
566 		/* No consideration for bits 63-32 */
567 
568 		printk(KERN_WARNING "%s:   (0x%08x) iq %x: ", dev->name,
569 		       ch->ctrl_start + 4 * i, i);
570 		n = cx23885_risc_decode(risc);
571 		for (j = 1; j < n; j++) {
572 			risc = cx_read(ch->ctrl_start + 4 * (i + j));
573 			printk(KERN_WARNING "%s:   iq %x: 0x%08x [ arg #%d ]\n",
574 			       dev->name, i+j, risc, j);
575 		}
576 	}
577 
578 	printk(KERN_WARNING "%s: fifo: 0x%08x -> 0x%x\n",
579 	       dev->name, ch->fifo_start, ch->fifo_start+ch->fifo_size);
580 	printk(KERN_WARNING "%s: ctrl: 0x%08x -> 0x%x\n",
581 	       dev->name, ch->ctrl_start, ch->ctrl_start + 6*16);
582 	printk(KERN_WARNING "%s:   ptr1_reg: 0x%08x\n",
583 	       dev->name, cx_read(ch->ptr1_reg));
584 	printk(KERN_WARNING "%s:   ptr2_reg: 0x%08x\n",
585 	       dev->name, cx_read(ch->ptr2_reg));
586 	printk(KERN_WARNING "%s:   cnt1_reg: 0x%08x\n",
587 	       dev->name, cx_read(ch->cnt1_reg));
588 	printk(KERN_WARNING "%s:   cnt2_reg: 0x%08x\n",
589 	       dev->name, cx_read(ch->cnt2_reg));
590 }
591 
592 static void cx23885_risc_disasm(struct cx23885_tsport *port,
593 				struct btcx_riscmem *risc)
594 {
595 	struct cx23885_dev *dev = port->dev;
596 	unsigned int i, j, n;
597 
598 	printk(KERN_INFO "%s: risc disasm: %p [dma=0x%08lx]\n",
599 	       dev->name, risc->cpu, (unsigned long)risc->dma);
600 	for (i = 0; i < (risc->size >> 2); i += n) {
601 		printk(KERN_INFO "%s:   %04d: ", dev->name, i);
602 		n = cx23885_risc_decode(le32_to_cpu(risc->cpu[i]));
603 		for (j = 1; j < n; j++)
604 			printk(KERN_INFO "%s:   %04d: 0x%08x [ arg #%d ]\n",
605 			       dev->name, i + j, risc->cpu[i + j], j);
606 		if (risc->cpu[i] == cpu_to_le32(RISC_JUMP))
607 			break;
608 	}
609 }
610 
611 static void cx23885_shutdown(struct cx23885_dev *dev)
612 {
613 	/* disable RISC controller */
614 	cx_write(DEV_CNTRL2, 0);
615 
616 	/* Disable all IR activity */
617 	cx_write(IR_CNTRL_REG, 0);
618 
619 	/* Disable Video A/B activity */
620 	cx_write(VID_A_DMA_CTL, 0);
621 	cx_write(VID_B_DMA_CTL, 0);
622 	cx_write(VID_C_DMA_CTL, 0);
623 
624 	/* Disable Audio activity */
625 	cx_write(AUD_INT_DMA_CTL, 0);
626 	cx_write(AUD_EXT_DMA_CTL, 0);
627 
628 	/* Disable Serial port */
629 	cx_write(UART_CTL, 0);
630 
631 	/* Disable Interrupts */
632 	cx23885_irq_disable_all(dev);
633 	cx_write(VID_A_INT_MSK, 0);
634 	cx_write(VID_B_INT_MSK, 0);
635 	cx_write(VID_C_INT_MSK, 0);
636 	cx_write(AUDIO_INT_INT_MSK, 0);
637 	cx_write(AUDIO_EXT_INT_MSK, 0);
638 
639 }
640 
641 static void cx23885_reset(struct cx23885_dev *dev)
642 {
643 	dprintk(1, "%s()\n", __func__);
644 
645 	cx23885_shutdown(dev);
646 
647 	cx_write(PCI_INT_STAT, 0xffffffff);
648 	cx_write(VID_A_INT_STAT, 0xffffffff);
649 	cx_write(VID_B_INT_STAT, 0xffffffff);
650 	cx_write(VID_C_INT_STAT, 0xffffffff);
651 	cx_write(AUDIO_INT_INT_STAT, 0xffffffff);
652 	cx_write(AUDIO_EXT_INT_STAT, 0xffffffff);
653 	cx_write(CLK_DELAY, cx_read(CLK_DELAY) & 0x80000000);
654 	cx_write(PAD_CTRL, 0x00500300);
655 
656 	mdelay(100);
657 
658 	cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH01],
659 		720*4, 0);
660 	cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH02], 128, 0);
661 	cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH03],
662 		188*4, 0);
663 	cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH04], 128, 0);
664 	cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH05], 128, 0);
665 	cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH06],
666 		188*4, 0);
667 	cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH07], 128, 0);
668 	cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH08], 128, 0);
669 	cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH09], 128, 0);
670 
671 	cx23885_gpio_setup(dev);
672 }
673 
674 
675 static int cx23885_pci_quirks(struct cx23885_dev *dev)
676 {
677 	dprintk(1, "%s()\n", __func__);
678 
679 	/* The cx23885 bridge has a weird bug which causes NMI to be asserted
680 	 * when DMA begins if RDR_TLCTL0 bit4 is not cleared. It does not
681 	 * occur on the cx23887 bridge.
682 	 */
683 	if (dev->bridge == CX23885_BRIDGE_885)
684 		cx_clear(RDR_TLCTL0, 1 << 4);
685 
686 	return 0;
687 }
688 
689 static int get_resources(struct cx23885_dev *dev)
690 {
691 	if (request_mem_region(pci_resource_start(dev->pci, 0),
692 			       pci_resource_len(dev->pci, 0),
693 			       dev->name))
694 		return 0;
695 
696 	printk(KERN_ERR "%s: can't get MMIO memory @ 0x%llx\n",
697 		dev->name, (unsigned long long)pci_resource_start(dev->pci, 0));
698 
699 	return -EBUSY;
700 }
701 
702 static void cx23885_timeout(unsigned long data);
703 int cx23885_risc_stopper(struct pci_dev *pci, struct btcx_riscmem *risc,
704 				u32 reg, u32 mask, u32 value);
705 
706 static int cx23885_init_tsport(struct cx23885_dev *dev,
707 	struct cx23885_tsport *port, int portno)
708 {
709 	dprintk(1, "%s(portno=%d)\n", __func__, portno);
710 
711 	/* Transport bus init dma queue  - Common settings */
712 	port->dma_ctl_val        = 0x11; /* Enable RISC controller and Fifo */
713 	port->ts_int_msk_val     = 0x1111; /* TS port bits for RISC */
714 	port->vld_misc_val       = 0x0;
715 	port->hw_sop_ctrl_val    = (0x47 << 16 | 188 << 4);
716 
717 	spin_lock_init(&port->slock);
718 	port->dev = dev;
719 	port->nr = portno;
720 
721 	INIT_LIST_HEAD(&port->mpegq.active);
722 	INIT_LIST_HEAD(&port->mpegq.queued);
723 	port->mpegq.timeout.function = cx23885_timeout;
724 	port->mpegq.timeout.data = (unsigned long)port;
725 	init_timer(&port->mpegq.timeout);
726 
727 	mutex_init(&port->frontends.lock);
728 	INIT_LIST_HEAD(&port->frontends.felist);
729 	port->frontends.active_fe_id = 0;
730 
731 	/* This should be hardcoded allow a single frontend
732 	 * attachment to this tsport, keeping the -dvb.c
733 	 * code clean and safe.
734 	 */
735 	if (!port->num_frontends)
736 		port->num_frontends = 1;
737 
738 	switch (portno) {
739 	case 1:
740 		port->reg_gpcnt          = VID_B_GPCNT;
741 		port->reg_gpcnt_ctl      = VID_B_GPCNT_CTL;
742 		port->reg_dma_ctl        = VID_B_DMA_CTL;
743 		port->reg_lngth          = VID_B_LNGTH;
744 		port->reg_hw_sop_ctrl    = VID_B_HW_SOP_CTL;
745 		port->reg_gen_ctrl       = VID_B_GEN_CTL;
746 		port->reg_bd_pkt_status  = VID_B_BD_PKT_STATUS;
747 		port->reg_sop_status     = VID_B_SOP_STATUS;
748 		port->reg_fifo_ovfl_stat = VID_B_FIFO_OVFL_STAT;
749 		port->reg_vld_misc       = VID_B_VLD_MISC;
750 		port->reg_ts_clk_en      = VID_B_TS_CLK_EN;
751 		port->reg_src_sel        = VID_B_SRC_SEL;
752 		port->reg_ts_int_msk     = VID_B_INT_MSK;
753 		port->reg_ts_int_stat    = VID_B_INT_STAT;
754 		port->sram_chno          = SRAM_CH03; /* VID_B */
755 		port->pci_irqmask        = 0x02; /* VID_B bit1 */
756 		break;
757 	case 2:
758 		port->reg_gpcnt          = VID_C_GPCNT;
759 		port->reg_gpcnt_ctl      = VID_C_GPCNT_CTL;
760 		port->reg_dma_ctl        = VID_C_DMA_CTL;
761 		port->reg_lngth          = VID_C_LNGTH;
762 		port->reg_hw_sop_ctrl    = VID_C_HW_SOP_CTL;
763 		port->reg_gen_ctrl       = VID_C_GEN_CTL;
764 		port->reg_bd_pkt_status  = VID_C_BD_PKT_STATUS;
765 		port->reg_sop_status     = VID_C_SOP_STATUS;
766 		port->reg_fifo_ovfl_stat = VID_C_FIFO_OVFL_STAT;
767 		port->reg_vld_misc       = VID_C_VLD_MISC;
768 		port->reg_ts_clk_en      = VID_C_TS_CLK_EN;
769 		port->reg_src_sel        = 0;
770 		port->reg_ts_int_msk     = VID_C_INT_MSK;
771 		port->reg_ts_int_stat    = VID_C_INT_STAT;
772 		port->sram_chno          = SRAM_CH06; /* VID_C */
773 		port->pci_irqmask        = 0x04; /* VID_C bit2 */
774 		break;
775 	default:
776 		BUG();
777 	}
778 
779 	cx23885_risc_stopper(dev->pci, &port->mpegq.stopper,
780 		     port->reg_dma_ctl, port->dma_ctl_val, 0x00);
781 
782 	return 0;
783 }
784 
785 static void cx23885_dev_checkrevision(struct cx23885_dev *dev)
786 {
787 	switch (cx_read(RDR_CFG2) & 0xff) {
788 	case 0x00:
789 		/* cx23885 */
790 		dev->hwrevision = 0xa0;
791 		break;
792 	case 0x01:
793 		/* CX23885-12Z */
794 		dev->hwrevision = 0xa1;
795 		break;
796 	case 0x02:
797 		/* CX23885-13Z/14Z */
798 		dev->hwrevision = 0xb0;
799 		break;
800 	case 0x03:
801 		if (dev->pci->device == 0x8880) {
802 			/* CX23888-21Z/22Z */
803 			dev->hwrevision = 0xc0;
804 		} else {
805 			/* CX23885-14Z */
806 			dev->hwrevision = 0xa4;
807 		}
808 		break;
809 	case 0x04:
810 		if (dev->pci->device == 0x8880) {
811 			/* CX23888-31Z */
812 			dev->hwrevision = 0xd0;
813 		} else {
814 			/* CX23885-15Z, CX23888-31Z */
815 			dev->hwrevision = 0xa5;
816 		}
817 		break;
818 	case 0x0e:
819 		/* CX23887-15Z */
820 		dev->hwrevision = 0xc0;
821 		break;
822 	case 0x0f:
823 		/* CX23887-14Z */
824 		dev->hwrevision = 0xb1;
825 		break;
826 	default:
827 		printk(KERN_ERR "%s() New hardware revision found 0x%x\n",
828 			__func__, dev->hwrevision);
829 	}
830 	if (dev->hwrevision)
831 		printk(KERN_INFO "%s() Hardware revision = 0x%02x\n",
832 			__func__, dev->hwrevision);
833 	else
834 		printk(KERN_ERR "%s() Hardware revision unknown 0x%x\n",
835 			__func__, dev->hwrevision);
836 }
837 
838 /* Find the first v4l2_subdev member of the group id in hw */
839 struct v4l2_subdev *cx23885_find_hw(struct cx23885_dev *dev, u32 hw)
840 {
841 	struct v4l2_subdev *result = NULL;
842 	struct v4l2_subdev *sd;
843 
844 	spin_lock(&dev->v4l2_dev.lock);
845 	v4l2_device_for_each_subdev(sd, &dev->v4l2_dev) {
846 		if (sd->grp_id == hw) {
847 			result = sd;
848 			break;
849 		}
850 	}
851 	spin_unlock(&dev->v4l2_dev.lock);
852 	return result;
853 }
854 
855 static int cx23885_dev_setup(struct cx23885_dev *dev)
856 {
857 	int i;
858 
859 	spin_lock_init(&dev->pci_irqmask_lock);
860 
861 	mutex_init(&dev->lock);
862 	mutex_init(&dev->gpio_lock);
863 
864 	atomic_inc(&dev->refcount);
865 
866 	dev->nr = cx23885_devcount++;
867 	sprintf(dev->name, "cx23885[%d]", dev->nr);
868 
869 	/* Configure the internal memory */
870 	if (dev->pci->device == 0x8880) {
871 		/* Could be 887 or 888, assume a default */
872 		dev->bridge = CX23885_BRIDGE_887;
873 		/* Apply a sensible clock frequency for the PCIe bridge */
874 		dev->clk_freq = 25000000;
875 		dev->sram_channels = cx23887_sram_channels;
876 	} else
877 	if (dev->pci->device == 0x8852) {
878 		dev->bridge = CX23885_BRIDGE_885;
879 		/* Apply a sensible clock frequency for the PCIe bridge */
880 		dev->clk_freq = 28000000;
881 		dev->sram_channels = cx23885_sram_channels;
882 	} else
883 		BUG();
884 
885 	dprintk(1, "%s() Memory configured for PCIe bridge type %d\n",
886 		__func__, dev->bridge);
887 
888 	/* board config */
889 	dev->board = UNSET;
890 	if (card[dev->nr] < cx23885_bcount)
891 		dev->board = card[dev->nr];
892 	for (i = 0; UNSET == dev->board  &&  i < cx23885_idcount; i++)
893 		if (dev->pci->subsystem_vendor == cx23885_subids[i].subvendor &&
894 		    dev->pci->subsystem_device == cx23885_subids[i].subdevice)
895 			dev->board = cx23885_subids[i].card;
896 	if (UNSET == dev->board) {
897 		dev->board = CX23885_BOARD_UNKNOWN;
898 		cx23885_card_list(dev);
899 	}
900 
901 	/* If the user specific a clk freq override, apply it */
902 	if (cx23885_boards[dev->board].clk_freq > 0)
903 		dev->clk_freq = cx23885_boards[dev->board].clk_freq;
904 
905 	dev->pci_bus  = dev->pci->bus->number;
906 	dev->pci_slot = PCI_SLOT(dev->pci->devfn);
907 	cx23885_irq_add(dev, 0x001f00);
908 
909 	/* External Master 1 Bus */
910 	dev->i2c_bus[0].nr = 0;
911 	dev->i2c_bus[0].dev = dev;
912 	dev->i2c_bus[0].reg_stat  = I2C1_STAT;
913 	dev->i2c_bus[0].reg_ctrl  = I2C1_CTRL;
914 	dev->i2c_bus[0].reg_addr  = I2C1_ADDR;
915 	dev->i2c_bus[0].reg_rdata = I2C1_RDATA;
916 	dev->i2c_bus[0].reg_wdata = I2C1_WDATA;
917 	dev->i2c_bus[0].i2c_period = (0x9d << 24); /* 100kHz */
918 
919 	/* External Master 2 Bus */
920 	dev->i2c_bus[1].nr = 1;
921 	dev->i2c_bus[1].dev = dev;
922 	dev->i2c_bus[1].reg_stat  = I2C2_STAT;
923 	dev->i2c_bus[1].reg_ctrl  = I2C2_CTRL;
924 	dev->i2c_bus[1].reg_addr  = I2C2_ADDR;
925 	dev->i2c_bus[1].reg_rdata = I2C2_RDATA;
926 	dev->i2c_bus[1].reg_wdata = I2C2_WDATA;
927 	dev->i2c_bus[1].i2c_period = (0x9d << 24); /* 100kHz */
928 
929 	/* Internal Master 3 Bus */
930 	dev->i2c_bus[2].nr = 2;
931 	dev->i2c_bus[2].dev = dev;
932 	dev->i2c_bus[2].reg_stat  = I2C3_STAT;
933 	dev->i2c_bus[2].reg_ctrl  = I2C3_CTRL;
934 	dev->i2c_bus[2].reg_addr  = I2C3_ADDR;
935 	dev->i2c_bus[2].reg_rdata = I2C3_RDATA;
936 	dev->i2c_bus[2].reg_wdata = I2C3_WDATA;
937 	dev->i2c_bus[2].i2c_period = (0x07 << 24); /* 1.95MHz */
938 
939 	if ((cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) ||
940 		(cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER))
941 		cx23885_init_tsport(dev, &dev->ts1, 1);
942 
943 	if ((cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) ||
944 		(cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER))
945 		cx23885_init_tsport(dev, &dev->ts2, 2);
946 
947 	if (get_resources(dev) < 0) {
948 		printk(KERN_ERR "CORE %s No more PCIe resources for "
949 		       "subsystem: %04x:%04x\n",
950 		       dev->name, dev->pci->subsystem_vendor,
951 		       dev->pci->subsystem_device);
952 
953 		cx23885_devcount--;
954 		return -ENODEV;
955 	}
956 
957 	/* PCIe stuff */
958 	dev->lmmio = ioremap(pci_resource_start(dev->pci, 0),
959 			     pci_resource_len(dev->pci, 0));
960 
961 	dev->bmmio = (u8 __iomem *)dev->lmmio;
962 
963 	printk(KERN_INFO "CORE %s: subsystem: %04x:%04x, board: %s [card=%d,%s]\n",
964 	       dev->name, dev->pci->subsystem_vendor,
965 	       dev->pci->subsystem_device, cx23885_boards[dev->board].name,
966 	       dev->board, card[dev->nr] == dev->board ?
967 	       "insmod option" : "autodetected");
968 
969 	cx23885_pci_quirks(dev);
970 
971 	/* Assume some sensible defaults */
972 	dev->tuner_type = cx23885_boards[dev->board].tuner_type;
973 	dev->tuner_addr = cx23885_boards[dev->board].tuner_addr;
974 	dev->tuner_bus = cx23885_boards[dev->board].tuner_bus;
975 	dev->radio_type = cx23885_boards[dev->board].radio_type;
976 	dev->radio_addr = cx23885_boards[dev->board].radio_addr;
977 
978 	dprintk(1, "%s() tuner_type = 0x%x tuner_addr = 0x%x tuner_bus = %d\n",
979 		__func__, dev->tuner_type, dev->tuner_addr, dev->tuner_bus);
980 	dprintk(1, "%s() radio_type = 0x%x radio_addr = 0x%x\n",
981 		__func__, dev->radio_type, dev->radio_addr);
982 
983 	/* The cx23417 encoder has GPIO's that need to be initialised
984 	 * before DVB, so that demodulators and tuners are out of
985 	 * reset before DVB uses them.
986 	 */
987 	if ((cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) ||
988 		(cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER))
989 			cx23885_mc417_init(dev);
990 
991 	/* init hardware */
992 	cx23885_reset(dev);
993 
994 	cx23885_i2c_register(&dev->i2c_bus[0]);
995 	cx23885_i2c_register(&dev->i2c_bus[1]);
996 	cx23885_i2c_register(&dev->i2c_bus[2]);
997 	cx23885_card_setup(dev);
998 	call_all(dev, core, s_power, 0);
999 	cx23885_ir_init(dev);
1000 
1001 	if (cx23885_boards[dev->board].porta == CX23885_ANALOG_VIDEO) {
1002 		if (cx23885_video_register(dev) < 0) {
1003 			printk(KERN_ERR "%s() Failed to register analog "
1004 				"video adapters on VID_A\n", __func__);
1005 		}
1006 	}
1007 
1008 	if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) {
1009 		if (cx23885_boards[dev->board].num_fds_portb)
1010 			dev->ts1.num_frontends =
1011 				cx23885_boards[dev->board].num_fds_portb;
1012 		if (cx23885_dvb_register(&dev->ts1) < 0) {
1013 			printk(KERN_ERR "%s() Failed to register dvb adapters on VID_B\n",
1014 			       __func__);
1015 		}
1016 	} else
1017 	if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) {
1018 		if (cx23885_417_register(dev) < 0) {
1019 			printk(KERN_ERR
1020 				"%s() Failed to register 417 on VID_B\n",
1021 			       __func__);
1022 		}
1023 	}
1024 
1025 	if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) {
1026 		if (cx23885_boards[dev->board].num_fds_portc)
1027 			dev->ts2.num_frontends =
1028 				cx23885_boards[dev->board].num_fds_portc;
1029 		if (cx23885_dvb_register(&dev->ts2) < 0) {
1030 			printk(KERN_ERR
1031 				"%s() Failed to register dvb on VID_C\n",
1032 			       __func__);
1033 		}
1034 	} else
1035 	if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER) {
1036 		if (cx23885_417_register(dev) < 0) {
1037 			printk(KERN_ERR
1038 				"%s() Failed to register 417 on VID_C\n",
1039 			       __func__);
1040 		}
1041 	}
1042 
1043 	cx23885_dev_checkrevision(dev);
1044 
1045 	/* disable MSI for NetUP cards, otherwise CI is not working */
1046 	if (cx23885_boards[dev->board].ci_type > 0)
1047 		cx_clear(RDR_RDRCTL1, 1 << 8);
1048 
1049 	switch (dev->board) {
1050 	case CX23885_BOARD_TEVII_S470:
1051 	case CX23885_BOARD_TEVII_S471:
1052 		cx_clear(RDR_RDRCTL1, 1 << 8);
1053 		break;
1054 	}
1055 
1056 	return 0;
1057 }
1058 
1059 static void cx23885_dev_unregister(struct cx23885_dev *dev)
1060 {
1061 	release_mem_region(pci_resource_start(dev->pci, 0),
1062 			   pci_resource_len(dev->pci, 0));
1063 
1064 	if (!atomic_dec_and_test(&dev->refcount))
1065 		return;
1066 
1067 	if (cx23885_boards[dev->board].porta == CX23885_ANALOG_VIDEO)
1068 		cx23885_video_unregister(dev);
1069 
1070 	if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB)
1071 		cx23885_dvb_unregister(&dev->ts1);
1072 
1073 	if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1074 		cx23885_417_unregister(dev);
1075 
1076 	if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB)
1077 		cx23885_dvb_unregister(&dev->ts2);
1078 
1079 	if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER)
1080 		cx23885_417_unregister(dev);
1081 
1082 	cx23885_i2c_unregister(&dev->i2c_bus[2]);
1083 	cx23885_i2c_unregister(&dev->i2c_bus[1]);
1084 	cx23885_i2c_unregister(&dev->i2c_bus[0]);
1085 
1086 	iounmap(dev->lmmio);
1087 }
1088 
1089 static __le32 *cx23885_risc_field(__le32 *rp, struct scatterlist *sglist,
1090 			       unsigned int offset, u32 sync_line,
1091 			       unsigned int bpl, unsigned int padding,
1092 			       unsigned int lines,  unsigned int lpi)
1093 {
1094 	struct scatterlist *sg;
1095 	unsigned int line, todo, sol;
1096 
1097 	/* sync instruction */
1098 	if (sync_line != NO_SYNC_LINE)
1099 		*(rp++) = cpu_to_le32(RISC_RESYNC | sync_line);
1100 
1101 	/* scan lines */
1102 	sg = sglist;
1103 	for (line = 0; line < lines; line++) {
1104 		while (offset && offset >= sg_dma_len(sg)) {
1105 			offset -= sg_dma_len(sg);
1106 			sg++;
1107 		}
1108 
1109 		if (lpi && line > 0 && !(line % lpi))
1110 			sol = RISC_SOL | RISC_IRQ1 | RISC_CNT_INC;
1111 		else
1112 			sol = RISC_SOL;
1113 
1114 		if (bpl <= sg_dma_len(sg)-offset) {
1115 			/* fits into current chunk */
1116 			*(rp++) = cpu_to_le32(RISC_WRITE|sol|RISC_EOL|bpl);
1117 			*(rp++) = cpu_to_le32(sg_dma_address(sg)+offset);
1118 			*(rp++) = cpu_to_le32(0); /* bits 63-32 */
1119 			offset += bpl;
1120 		} else {
1121 			/* scanline needs to be split */
1122 			todo = bpl;
1123 			*(rp++) = cpu_to_le32(RISC_WRITE|sol|
1124 					    (sg_dma_len(sg)-offset));
1125 			*(rp++) = cpu_to_le32(sg_dma_address(sg)+offset);
1126 			*(rp++) = cpu_to_le32(0); /* bits 63-32 */
1127 			todo -= (sg_dma_len(sg)-offset);
1128 			offset = 0;
1129 			sg++;
1130 			while (todo > sg_dma_len(sg)) {
1131 				*(rp++) = cpu_to_le32(RISC_WRITE|
1132 						    sg_dma_len(sg));
1133 				*(rp++) = cpu_to_le32(sg_dma_address(sg));
1134 				*(rp++) = cpu_to_le32(0); /* bits 63-32 */
1135 				todo -= sg_dma_len(sg);
1136 				sg++;
1137 			}
1138 			*(rp++) = cpu_to_le32(RISC_WRITE|RISC_EOL|todo);
1139 			*(rp++) = cpu_to_le32(sg_dma_address(sg));
1140 			*(rp++) = cpu_to_le32(0); /* bits 63-32 */
1141 			offset += todo;
1142 		}
1143 		offset += padding;
1144 	}
1145 
1146 	return rp;
1147 }
1148 
1149 int cx23885_risc_buffer(struct pci_dev *pci, struct btcx_riscmem *risc,
1150 			struct scatterlist *sglist, unsigned int top_offset,
1151 			unsigned int bottom_offset, unsigned int bpl,
1152 			unsigned int padding, unsigned int lines)
1153 {
1154 	u32 instructions, fields;
1155 	__le32 *rp;
1156 	int rc;
1157 
1158 	fields = 0;
1159 	if (UNSET != top_offset)
1160 		fields++;
1161 	if (UNSET != bottom_offset)
1162 		fields++;
1163 
1164 	/* estimate risc mem: worst case is one write per page border +
1165 	   one write per scan line + syncs + jump (all 2 dwords).  Padding
1166 	   can cause next bpl to start close to a page border.  First DMA
1167 	   region may be smaller than PAGE_SIZE */
1168 	/* write and jump need and extra dword */
1169 	instructions  = fields * (1 + ((bpl + padding) * lines)
1170 		/ PAGE_SIZE + lines);
1171 	instructions += 2;
1172 	rc = btcx_riscmem_alloc(pci, risc, instructions*12);
1173 	if (rc < 0)
1174 		return rc;
1175 
1176 	/* write risc instructions */
1177 	rp = risc->cpu;
1178 	if (UNSET != top_offset)
1179 		rp = cx23885_risc_field(rp, sglist, top_offset, 0,
1180 					bpl, padding, lines, 0);
1181 	if (UNSET != bottom_offset)
1182 		rp = cx23885_risc_field(rp, sglist, bottom_offset, 0x200,
1183 					bpl, padding, lines, 0);
1184 
1185 	/* save pointer to jmp instruction address */
1186 	risc->jmp = rp;
1187 	BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
1188 	return 0;
1189 }
1190 
1191 int cx23885_risc_databuffer(struct pci_dev *pci,
1192 				   struct btcx_riscmem *risc,
1193 				   struct scatterlist *sglist,
1194 				   unsigned int bpl,
1195 				   unsigned int lines, unsigned int lpi)
1196 {
1197 	u32 instructions;
1198 	__le32 *rp;
1199 	int rc;
1200 
1201 	/* estimate risc mem: worst case is one write per page border +
1202 	   one write per scan line + syncs + jump (all 2 dwords).  Here
1203 	   there is no padding and no sync.  First DMA region may be smaller
1204 	   than PAGE_SIZE */
1205 	/* Jump and write need an extra dword */
1206 	instructions  = 1 + (bpl * lines) / PAGE_SIZE + lines;
1207 	instructions += 1;
1208 
1209 	rc = btcx_riscmem_alloc(pci, risc, instructions*12);
1210 	if (rc < 0)
1211 		return rc;
1212 
1213 	/* write risc instructions */
1214 	rp = risc->cpu;
1215 	rp = cx23885_risc_field(rp, sglist, 0, NO_SYNC_LINE,
1216 				bpl, 0, lines, lpi);
1217 
1218 	/* save pointer to jmp instruction address */
1219 	risc->jmp = rp;
1220 	BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
1221 	return 0;
1222 }
1223 
1224 int cx23885_risc_vbibuffer(struct pci_dev *pci, struct btcx_riscmem *risc,
1225 			struct scatterlist *sglist, unsigned int top_offset,
1226 			unsigned int bottom_offset, unsigned int bpl,
1227 			unsigned int padding, unsigned int lines)
1228 {
1229 	u32 instructions, fields;
1230 	__le32 *rp;
1231 	int rc;
1232 
1233 	fields = 0;
1234 	if (UNSET != top_offset)
1235 		fields++;
1236 	if (UNSET != bottom_offset)
1237 		fields++;
1238 
1239 	/* estimate risc mem: worst case is one write per page border +
1240 	   one write per scan line + syncs + jump (all 2 dwords).  Padding
1241 	   can cause next bpl to start close to a page border.  First DMA
1242 	   region may be smaller than PAGE_SIZE */
1243 	/* write and jump need and extra dword */
1244 	instructions  = fields * (1 + ((bpl + padding) * lines)
1245 		/ PAGE_SIZE + lines);
1246 	instructions += 2;
1247 	rc = btcx_riscmem_alloc(pci, risc, instructions*12);
1248 	if (rc < 0)
1249 		return rc;
1250 	/* write risc instructions */
1251 	rp = risc->cpu;
1252 
1253 	/* Sync to line 6, so US CC line 21 will appear in line '12'
1254 	 * in the userland vbi payload */
1255 	if (UNSET != top_offset)
1256 		rp = cx23885_risc_field(rp, sglist, top_offset, 6,
1257 					bpl, padding, lines, 0);
1258 
1259 	if (UNSET != bottom_offset)
1260 		rp = cx23885_risc_field(rp, sglist, bottom_offset, 0x207,
1261 					bpl, padding, lines, 0);
1262 
1263 
1264 
1265 	/* save pointer to jmp instruction address */
1266 	risc->jmp = rp;
1267 	BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
1268 	return 0;
1269 }
1270 
1271 
1272 int cx23885_risc_stopper(struct pci_dev *pci, struct btcx_riscmem *risc,
1273 				u32 reg, u32 mask, u32 value)
1274 {
1275 	__le32 *rp;
1276 	int rc;
1277 
1278 	rc = btcx_riscmem_alloc(pci, risc, 4*16);
1279 	if (rc < 0)
1280 		return rc;
1281 
1282 	/* write risc instructions */
1283 	rp = risc->cpu;
1284 	*(rp++) = cpu_to_le32(RISC_WRITECR  | RISC_IRQ2);
1285 	*(rp++) = cpu_to_le32(reg);
1286 	*(rp++) = cpu_to_le32(value);
1287 	*(rp++) = cpu_to_le32(mask);
1288 	*(rp++) = cpu_to_le32(RISC_JUMP);
1289 	*(rp++) = cpu_to_le32(risc->dma);
1290 	*(rp++) = cpu_to_le32(0); /* bits 63-32 */
1291 	return 0;
1292 }
1293 
1294 void cx23885_free_buffer(struct videobuf_queue *q, struct cx23885_buffer *buf)
1295 {
1296 	struct videobuf_dmabuf *dma = videobuf_to_dma(&buf->vb);
1297 
1298 	BUG_ON(in_interrupt());
1299 	videobuf_waiton(q, &buf->vb, 0, 0);
1300 	videobuf_dma_unmap(q->dev, dma);
1301 	videobuf_dma_free(dma);
1302 	btcx_riscmem_free(to_pci_dev(q->dev), &buf->risc);
1303 	buf->vb.state = VIDEOBUF_NEEDS_INIT;
1304 }
1305 
1306 static void cx23885_tsport_reg_dump(struct cx23885_tsport *port)
1307 {
1308 	struct cx23885_dev *dev = port->dev;
1309 
1310 	dprintk(1, "%s() Register Dump\n", __func__);
1311 	dprintk(1, "%s() DEV_CNTRL2               0x%08X\n", __func__,
1312 		cx_read(DEV_CNTRL2));
1313 	dprintk(1, "%s() PCI_INT_MSK              0x%08X\n", __func__,
1314 		cx23885_irq_get_mask(dev));
1315 	dprintk(1, "%s() AUD_INT_INT_MSK          0x%08X\n", __func__,
1316 		cx_read(AUDIO_INT_INT_MSK));
1317 	dprintk(1, "%s() AUD_INT_DMA_CTL          0x%08X\n", __func__,
1318 		cx_read(AUD_INT_DMA_CTL));
1319 	dprintk(1, "%s() AUD_EXT_INT_MSK          0x%08X\n", __func__,
1320 		cx_read(AUDIO_EXT_INT_MSK));
1321 	dprintk(1, "%s() AUD_EXT_DMA_CTL          0x%08X\n", __func__,
1322 		cx_read(AUD_EXT_DMA_CTL));
1323 	dprintk(1, "%s() PAD_CTRL                 0x%08X\n", __func__,
1324 		cx_read(PAD_CTRL));
1325 	dprintk(1, "%s() ALT_PIN_OUT_SEL          0x%08X\n", __func__,
1326 		cx_read(ALT_PIN_OUT_SEL));
1327 	dprintk(1, "%s() GPIO2                    0x%08X\n", __func__,
1328 		cx_read(GPIO2));
1329 	dprintk(1, "%s() gpcnt(0x%08X)          0x%08X\n", __func__,
1330 		port->reg_gpcnt, cx_read(port->reg_gpcnt));
1331 	dprintk(1, "%s() gpcnt_ctl(0x%08X)      0x%08x\n", __func__,
1332 		port->reg_gpcnt_ctl, cx_read(port->reg_gpcnt_ctl));
1333 	dprintk(1, "%s() dma_ctl(0x%08X)        0x%08x\n", __func__,
1334 		port->reg_dma_ctl, cx_read(port->reg_dma_ctl));
1335 	if (port->reg_src_sel)
1336 		dprintk(1, "%s() src_sel(0x%08X)        0x%08x\n", __func__,
1337 			port->reg_src_sel, cx_read(port->reg_src_sel));
1338 	dprintk(1, "%s() lngth(0x%08X)          0x%08x\n", __func__,
1339 		port->reg_lngth, cx_read(port->reg_lngth));
1340 	dprintk(1, "%s() hw_sop_ctrl(0x%08X)    0x%08x\n", __func__,
1341 		port->reg_hw_sop_ctrl, cx_read(port->reg_hw_sop_ctrl));
1342 	dprintk(1, "%s() gen_ctrl(0x%08X)       0x%08x\n", __func__,
1343 		port->reg_gen_ctrl, cx_read(port->reg_gen_ctrl));
1344 	dprintk(1, "%s() bd_pkt_status(0x%08X)  0x%08x\n", __func__,
1345 		port->reg_bd_pkt_status, cx_read(port->reg_bd_pkt_status));
1346 	dprintk(1, "%s() sop_status(0x%08X)     0x%08x\n", __func__,
1347 		port->reg_sop_status, cx_read(port->reg_sop_status));
1348 	dprintk(1, "%s() fifo_ovfl_stat(0x%08X) 0x%08x\n", __func__,
1349 		port->reg_fifo_ovfl_stat, cx_read(port->reg_fifo_ovfl_stat));
1350 	dprintk(1, "%s() vld_misc(0x%08X)       0x%08x\n", __func__,
1351 		port->reg_vld_misc, cx_read(port->reg_vld_misc));
1352 	dprintk(1, "%s() ts_clk_en(0x%08X)      0x%08x\n", __func__,
1353 		port->reg_ts_clk_en, cx_read(port->reg_ts_clk_en));
1354 	dprintk(1, "%s() ts_int_msk(0x%08X)     0x%08x\n", __func__,
1355 		port->reg_ts_int_msk, cx_read(port->reg_ts_int_msk));
1356 }
1357 
1358 static int cx23885_start_dma(struct cx23885_tsport *port,
1359 			     struct cx23885_dmaqueue *q,
1360 			     struct cx23885_buffer   *buf)
1361 {
1362 	struct cx23885_dev *dev = port->dev;
1363 	u32 reg;
1364 
1365 	dprintk(1, "%s() w: %d, h: %d, f: %d\n", __func__,
1366 		buf->vb.width, buf->vb.height, buf->vb.field);
1367 
1368 	/* Stop the fifo and risc engine for this port */
1369 	cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1370 
1371 	/* setup fifo + format */
1372 	cx23885_sram_channel_setup(dev,
1373 				   &dev->sram_channels[port->sram_chno],
1374 				   port->ts_packet_size, buf->risc.dma);
1375 	if (debug > 5) {
1376 		cx23885_sram_channel_dump(dev,
1377 			&dev->sram_channels[port->sram_chno]);
1378 		cx23885_risc_disasm(port, &buf->risc);
1379 	}
1380 
1381 	/* write TS length to chip */
1382 	cx_write(port->reg_lngth, buf->vb.width);
1383 
1384 	if ((!(cx23885_boards[dev->board].portb & CX23885_MPEG_DVB)) &&
1385 		(!(cx23885_boards[dev->board].portc & CX23885_MPEG_DVB))) {
1386 		printk("%s() Unsupported .portb/c (0x%08x)/(0x%08x)\n",
1387 			__func__,
1388 			cx23885_boards[dev->board].portb,
1389 			cx23885_boards[dev->board].portc);
1390 		return -EINVAL;
1391 	}
1392 
1393 	if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1394 		cx23885_av_clk(dev, 0);
1395 
1396 	udelay(100);
1397 
1398 	/* If the port supports SRC SELECT, configure it */
1399 	if (port->reg_src_sel)
1400 		cx_write(port->reg_src_sel, port->src_sel_val);
1401 
1402 	cx_write(port->reg_hw_sop_ctrl, port->hw_sop_ctrl_val);
1403 	cx_write(port->reg_ts_clk_en, port->ts_clk_en_val);
1404 	cx_write(port->reg_vld_misc, port->vld_misc_val);
1405 	cx_write(port->reg_gen_ctrl, port->gen_ctrl_val);
1406 	udelay(100);
1407 
1408 	/* NOTE: this is 2 (reserved) for portb, does it matter? */
1409 	/* reset counter to zero */
1410 	cx_write(port->reg_gpcnt_ctl, 3);
1411 	q->count = 1;
1412 
1413 	/* Set VIDB pins to input */
1414 	if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) {
1415 		reg = cx_read(PAD_CTRL);
1416 		reg &= ~0x3; /* Clear TS1_OE & TS1_SOP_OE */
1417 		cx_write(PAD_CTRL, reg);
1418 	}
1419 
1420 	/* Set VIDC pins to input */
1421 	if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) {
1422 		reg = cx_read(PAD_CTRL);
1423 		reg &= ~0x4; /* Clear TS2_SOP_OE */
1424 		cx_write(PAD_CTRL, reg);
1425 	}
1426 
1427 	if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) {
1428 
1429 		reg = cx_read(PAD_CTRL);
1430 		reg = reg & ~0x1;    /* Clear TS1_OE */
1431 
1432 		/* FIXME, bit 2 writing here is questionable */
1433 		/* set TS1_SOP_OE and TS1_OE_HI */
1434 		reg = reg | 0xa;
1435 		cx_write(PAD_CTRL, reg);
1436 
1437 		/* FIXME and these two registers should be documented. */
1438 		cx_write(CLK_DELAY, cx_read(CLK_DELAY) | 0x80000011);
1439 		cx_write(ALT_PIN_OUT_SEL, 0x10100045);
1440 	}
1441 
1442 	switch (dev->bridge) {
1443 	case CX23885_BRIDGE_885:
1444 	case CX23885_BRIDGE_887:
1445 	case CX23885_BRIDGE_888:
1446 		/* enable irqs */
1447 		dprintk(1, "%s() enabling TS int's and DMA\n", __func__);
1448 		cx_set(port->reg_ts_int_msk,  port->ts_int_msk_val);
1449 		cx_set(port->reg_dma_ctl, port->dma_ctl_val);
1450 		cx23885_irq_add(dev, port->pci_irqmask);
1451 		cx23885_irq_enable_all(dev);
1452 		break;
1453 	default:
1454 		BUG();
1455 	}
1456 
1457 	cx_set(DEV_CNTRL2, (1<<5)); /* Enable RISC controller */
1458 
1459 	if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1460 		cx23885_av_clk(dev, 1);
1461 
1462 	if (debug > 4)
1463 		cx23885_tsport_reg_dump(port);
1464 
1465 	return 0;
1466 }
1467 
1468 static int cx23885_stop_dma(struct cx23885_tsport *port)
1469 {
1470 	struct cx23885_dev *dev = port->dev;
1471 	u32 reg;
1472 
1473 	dprintk(1, "%s()\n", __func__);
1474 
1475 	/* Stop interrupts and DMA */
1476 	cx_clear(port->reg_ts_int_msk, port->ts_int_msk_val);
1477 	cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1478 
1479 	if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) {
1480 
1481 		reg = cx_read(PAD_CTRL);
1482 
1483 		/* Set TS1_OE */
1484 		reg = reg | 0x1;
1485 
1486 		/* clear TS1_SOP_OE and TS1_OE_HI */
1487 		reg = reg & ~0xa;
1488 		cx_write(PAD_CTRL, reg);
1489 		cx_write(port->reg_src_sel, 0);
1490 		cx_write(port->reg_gen_ctrl, 8);
1491 
1492 	}
1493 
1494 	if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1495 		cx23885_av_clk(dev, 0);
1496 
1497 	return 0;
1498 }
1499 
1500 int cx23885_restart_queue(struct cx23885_tsport *port,
1501 				struct cx23885_dmaqueue *q)
1502 {
1503 	struct cx23885_dev *dev = port->dev;
1504 	struct cx23885_buffer *buf;
1505 
1506 	dprintk(5, "%s()\n", __func__);
1507 	if (list_empty(&q->active)) {
1508 		struct cx23885_buffer *prev;
1509 		prev = NULL;
1510 
1511 		dprintk(5, "%s() queue is empty\n", __func__);
1512 
1513 		for (;;) {
1514 			if (list_empty(&q->queued))
1515 				return 0;
1516 			buf = list_entry(q->queued.next, struct cx23885_buffer,
1517 					 vb.queue);
1518 			if (NULL == prev) {
1519 				list_del(&buf->vb.queue);
1520 				list_add_tail(&buf->vb.queue, &q->active);
1521 				cx23885_start_dma(port, q, buf);
1522 				buf->vb.state = VIDEOBUF_ACTIVE;
1523 				buf->count    = q->count++;
1524 				mod_timer(&q->timeout, jiffies+BUFFER_TIMEOUT);
1525 				dprintk(5, "[%p/%d] restart_queue - f/active\n",
1526 					buf, buf->vb.i);
1527 
1528 			} else if (prev->vb.width  == buf->vb.width  &&
1529 				   prev->vb.height == buf->vb.height &&
1530 				   prev->fmt       == buf->fmt) {
1531 				list_del(&buf->vb.queue);
1532 				list_add_tail(&buf->vb.queue, &q->active);
1533 				buf->vb.state = VIDEOBUF_ACTIVE;
1534 				buf->count    = q->count++;
1535 				prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
1536 				/* 64 bit bits 63-32 */
1537 				prev->risc.jmp[2] = cpu_to_le32(0);
1538 				dprintk(5, "[%p/%d] restart_queue - m/active\n",
1539 					buf, buf->vb.i);
1540 			} else {
1541 				return 0;
1542 			}
1543 			prev = buf;
1544 		}
1545 		return 0;
1546 	}
1547 
1548 	buf = list_entry(q->active.next, struct cx23885_buffer, vb.queue);
1549 	dprintk(2, "restart_queue [%p/%d]: restart dma\n",
1550 		buf, buf->vb.i);
1551 	cx23885_start_dma(port, q, buf);
1552 	list_for_each_entry(buf, &q->active, vb.queue)
1553 		buf->count = q->count++;
1554 	mod_timer(&q->timeout, jiffies + BUFFER_TIMEOUT);
1555 	return 0;
1556 }
1557 
1558 /* ------------------------------------------------------------------ */
1559 
1560 int cx23885_buf_prepare(struct videobuf_queue *q, struct cx23885_tsport *port,
1561 			struct cx23885_buffer *buf, enum v4l2_field field)
1562 {
1563 	struct cx23885_dev *dev = port->dev;
1564 	int size = port->ts_packet_size * port->ts_packet_count;
1565 	int rc;
1566 
1567 	dprintk(1, "%s: %p\n", __func__, buf);
1568 	if (0 != buf->vb.baddr  &&  buf->vb.bsize < size)
1569 		return -EINVAL;
1570 
1571 	if (VIDEOBUF_NEEDS_INIT == buf->vb.state) {
1572 		buf->vb.width  = port->ts_packet_size;
1573 		buf->vb.height = port->ts_packet_count;
1574 		buf->vb.size   = size;
1575 		buf->vb.field  = field /*V4L2_FIELD_TOP*/;
1576 
1577 		rc = videobuf_iolock(q, &buf->vb, NULL);
1578 		if (0 != rc)
1579 			goto fail;
1580 		cx23885_risc_databuffer(dev->pci, &buf->risc,
1581 					videobuf_to_dma(&buf->vb)->sglist,
1582 					buf->vb.width, buf->vb.height, 0);
1583 	}
1584 	buf->vb.state = VIDEOBUF_PREPARED;
1585 	return 0;
1586 
1587  fail:
1588 	cx23885_free_buffer(q, buf);
1589 	return rc;
1590 }
1591 
1592 void cx23885_buf_queue(struct cx23885_tsport *port, struct cx23885_buffer *buf)
1593 {
1594 	struct cx23885_buffer    *prev;
1595 	struct cx23885_dev *dev = port->dev;
1596 	struct cx23885_dmaqueue  *cx88q = &port->mpegq;
1597 
1598 	/* add jump to stopper */
1599 	buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP | RISC_IRQ1 | RISC_CNT_INC);
1600 	buf->risc.jmp[1] = cpu_to_le32(cx88q->stopper.dma);
1601 	buf->risc.jmp[2] = cpu_to_le32(0); /* bits 63-32 */
1602 
1603 	if (list_empty(&cx88q->active)) {
1604 		dprintk(1, "queue is empty - first active\n");
1605 		list_add_tail(&buf->vb.queue, &cx88q->active);
1606 		cx23885_start_dma(port, cx88q, buf);
1607 		buf->vb.state = VIDEOBUF_ACTIVE;
1608 		buf->count    = cx88q->count++;
1609 		mod_timer(&cx88q->timeout, jiffies + BUFFER_TIMEOUT);
1610 		dprintk(1, "[%p/%d] %s - first active\n",
1611 			buf, buf->vb.i, __func__);
1612 	} else {
1613 		dprintk(1, "queue is not empty - append to active\n");
1614 		prev = list_entry(cx88q->active.prev, struct cx23885_buffer,
1615 				  vb.queue);
1616 		list_add_tail(&buf->vb.queue, &cx88q->active);
1617 		buf->vb.state = VIDEOBUF_ACTIVE;
1618 		buf->count    = cx88q->count++;
1619 		prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
1620 		prev->risc.jmp[2] = cpu_to_le32(0); /* 64 bit bits 63-32 */
1621 		dprintk(1, "[%p/%d] %s - append to active\n",
1622 			 buf, buf->vb.i, __func__);
1623 	}
1624 }
1625 
1626 /* ----------------------------------------------------------- */
1627 
1628 static void do_cancel_buffers(struct cx23885_tsport *port, char *reason,
1629 			      int restart)
1630 {
1631 	struct cx23885_dev *dev = port->dev;
1632 	struct cx23885_dmaqueue *q = &port->mpegq;
1633 	struct cx23885_buffer *buf;
1634 	unsigned long flags;
1635 
1636 	spin_lock_irqsave(&port->slock, flags);
1637 	while (!list_empty(&q->active)) {
1638 		buf = list_entry(q->active.next, struct cx23885_buffer,
1639 				 vb.queue);
1640 		list_del(&buf->vb.queue);
1641 		buf->vb.state = VIDEOBUF_ERROR;
1642 		wake_up(&buf->vb.done);
1643 		dprintk(1, "[%p/%d] %s - dma=0x%08lx\n",
1644 			buf, buf->vb.i, reason, (unsigned long)buf->risc.dma);
1645 	}
1646 	if (restart) {
1647 		dprintk(1, "restarting queue\n");
1648 		cx23885_restart_queue(port, q);
1649 	}
1650 	spin_unlock_irqrestore(&port->slock, flags);
1651 }
1652 
1653 void cx23885_cancel_buffers(struct cx23885_tsport *port)
1654 {
1655 	struct cx23885_dev *dev = port->dev;
1656 	struct cx23885_dmaqueue *q = &port->mpegq;
1657 
1658 	dprintk(1, "%s()\n", __func__);
1659 	del_timer_sync(&q->timeout);
1660 	cx23885_stop_dma(port);
1661 	do_cancel_buffers(port, "cancel", 0);
1662 }
1663 
1664 static void cx23885_timeout(unsigned long data)
1665 {
1666 	struct cx23885_tsport *port = (struct cx23885_tsport *)data;
1667 	struct cx23885_dev *dev = port->dev;
1668 
1669 	dprintk(1, "%s()\n", __func__);
1670 
1671 	if (debug > 5)
1672 		cx23885_sram_channel_dump(dev,
1673 			&dev->sram_channels[port->sram_chno]);
1674 
1675 	cx23885_stop_dma(port);
1676 	do_cancel_buffers(port, "timeout", 1);
1677 }
1678 
1679 int cx23885_irq_417(struct cx23885_dev *dev, u32 status)
1680 {
1681 	/* FIXME: port1 assumption here. */
1682 	struct cx23885_tsport *port = &dev->ts1;
1683 	int count = 0;
1684 	int handled = 0;
1685 
1686 	if (status == 0)
1687 		return handled;
1688 
1689 	count = cx_read(port->reg_gpcnt);
1690 	dprintk(7, "status: 0x%08x  mask: 0x%08x count: 0x%x\n",
1691 		status, cx_read(port->reg_ts_int_msk), count);
1692 
1693 	if ((status & VID_B_MSK_BAD_PKT)         ||
1694 		(status & VID_B_MSK_OPC_ERR)     ||
1695 		(status & VID_B_MSK_VBI_OPC_ERR) ||
1696 		(status & VID_B_MSK_SYNC)        ||
1697 		(status & VID_B_MSK_VBI_SYNC)    ||
1698 		(status & VID_B_MSK_OF)          ||
1699 		(status & VID_B_MSK_VBI_OF)) {
1700 		printk(KERN_ERR "%s: V4L mpeg risc op code error, status "
1701 			"= 0x%x\n", dev->name, status);
1702 		if (status & VID_B_MSK_BAD_PKT)
1703 			dprintk(1, "        VID_B_MSK_BAD_PKT\n");
1704 		if (status & VID_B_MSK_OPC_ERR)
1705 			dprintk(1, "        VID_B_MSK_OPC_ERR\n");
1706 		if (status & VID_B_MSK_VBI_OPC_ERR)
1707 			dprintk(1, "        VID_B_MSK_VBI_OPC_ERR\n");
1708 		if (status & VID_B_MSK_SYNC)
1709 			dprintk(1, "        VID_B_MSK_SYNC\n");
1710 		if (status & VID_B_MSK_VBI_SYNC)
1711 			dprintk(1, "        VID_B_MSK_VBI_SYNC\n");
1712 		if (status & VID_B_MSK_OF)
1713 			dprintk(1, "        VID_B_MSK_OF\n");
1714 		if (status & VID_B_MSK_VBI_OF)
1715 			dprintk(1, "        VID_B_MSK_VBI_OF\n");
1716 
1717 		cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1718 		cx23885_sram_channel_dump(dev,
1719 			&dev->sram_channels[port->sram_chno]);
1720 		cx23885_417_check_encoder(dev);
1721 	} else if (status & VID_B_MSK_RISCI1) {
1722 		dprintk(7, "        VID_B_MSK_RISCI1\n");
1723 		spin_lock(&port->slock);
1724 		cx23885_wakeup(port, &port->mpegq, count);
1725 		spin_unlock(&port->slock);
1726 	} else if (status & VID_B_MSK_RISCI2) {
1727 		dprintk(7, "        VID_B_MSK_RISCI2\n");
1728 		spin_lock(&port->slock);
1729 		cx23885_restart_queue(port, &port->mpegq);
1730 		spin_unlock(&port->slock);
1731 	}
1732 	if (status) {
1733 		cx_write(port->reg_ts_int_stat, status);
1734 		handled = 1;
1735 	}
1736 
1737 	return handled;
1738 }
1739 
1740 static int cx23885_irq_ts(struct cx23885_tsport *port, u32 status)
1741 {
1742 	struct cx23885_dev *dev = port->dev;
1743 	int handled = 0;
1744 	u32 count;
1745 
1746 	if ((status & VID_BC_MSK_OPC_ERR) ||
1747 		(status & VID_BC_MSK_BAD_PKT) ||
1748 		(status & VID_BC_MSK_SYNC) ||
1749 		(status & VID_BC_MSK_OF)) {
1750 
1751 		if (status & VID_BC_MSK_OPC_ERR)
1752 			dprintk(7, " (VID_BC_MSK_OPC_ERR 0x%08x)\n",
1753 				VID_BC_MSK_OPC_ERR);
1754 
1755 		if (status & VID_BC_MSK_BAD_PKT)
1756 			dprintk(7, " (VID_BC_MSK_BAD_PKT 0x%08x)\n",
1757 				VID_BC_MSK_BAD_PKT);
1758 
1759 		if (status & VID_BC_MSK_SYNC)
1760 			dprintk(7, " (VID_BC_MSK_SYNC    0x%08x)\n",
1761 				VID_BC_MSK_SYNC);
1762 
1763 		if (status & VID_BC_MSK_OF)
1764 			dprintk(7, " (VID_BC_MSK_OF      0x%08x)\n",
1765 				VID_BC_MSK_OF);
1766 
1767 		printk(KERN_ERR "%s: mpeg risc op code error\n", dev->name);
1768 
1769 		cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1770 		cx23885_sram_channel_dump(dev,
1771 			&dev->sram_channels[port->sram_chno]);
1772 
1773 	} else if (status & VID_BC_MSK_RISCI1) {
1774 
1775 		dprintk(7, " (RISCI1            0x%08x)\n", VID_BC_MSK_RISCI1);
1776 
1777 		spin_lock(&port->slock);
1778 		count = cx_read(port->reg_gpcnt);
1779 		cx23885_wakeup(port, &port->mpegq, count);
1780 		spin_unlock(&port->slock);
1781 
1782 	} else if (status & VID_BC_MSK_RISCI2) {
1783 
1784 		dprintk(7, " (RISCI2            0x%08x)\n", VID_BC_MSK_RISCI2);
1785 
1786 		spin_lock(&port->slock);
1787 		cx23885_restart_queue(port, &port->mpegq);
1788 		spin_unlock(&port->slock);
1789 
1790 	}
1791 	if (status) {
1792 		cx_write(port->reg_ts_int_stat, status);
1793 		handled = 1;
1794 	}
1795 
1796 	return handled;
1797 }
1798 
1799 static irqreturn_t cx23885_irq(int irq, void *dev_id)
1800 {
1801 	struct cx23885_dev *dev = dev_id;
1802 	struct cx23885_tsport *ts1 = &dev->ts1;
1803 	struct cx23885_tsport *ts2 = &dev->ts2;
1804 	u32 pci_status, pci_mask;
1805 	u32 vida_status, vida_mask;
1806 	u32 audint_status, audint_mask;
1807 	u32 ts1_status, ts1_mask;
1808 	u32 ts2_status, ts2_mask;
1809 	int vida_count = 0, ts1_count = 0, ts2_count = 0, handled = 0;
1810 	int audint_count = 0;
1811 	bool subdev_handled;
1812 
1813 	pci_status = cx_read(PCI_INT_STAT);
1814 	pci_mask = cx23885_irq_get_mask(dev);
1815 	vida_status = cx_read(VID_A_INT_STAT);
1816 	vida_mask = cx_read(VID_A_INT_MSK);
1817 	audint_status = cx_read(AUDIO_INT_INT_STAT);
1818 	audint_mask = cx_read(AUDIO_INT_INT_MSK);
1819 	ts1_status = cx_read(VID_B_INT_STAT);
1820 	ts1_mask = cx_read(VID_B_INT_MSK);
1821 	ts2_status = cx_read(VID_C_INT_STAT);
1822 	ts2_mask = cx_read(VID_C_INT_MSK);
1823 
1824 	if ((pci_status == 0) && (ts2_status == 0) && (ts1_status == 0))
1825 		goto out;
1826 
1827 	vida_count = cx_read(VID_A_GPCNT);
1828 	audint_count = cx_read(AUD_INT_A_GPCNT);
1829 	ts1_count = cx_read(ts1->reg_gpcnt);
1830 	ts2_count = cx_read(ts2->reg_gpcnt);
1831 	dprintk(7, "pci_status: 0x%08x  pci_mask: 0x%08x\n",
1832 		pci_status, pci_mask);
1833 	dprintk(7, "vida_status: 0x%08x vida_mask: 0x%08x count: 0x%x\n",
1834 		vida_status, vida_mask, vida_count);
1835 	dprintk(7, "audint_status: 0x%08x audint_mask: 0x%08x count: 0x%x\n",
1836 		audint_status, audint_mask, audint_count);
1837 	dprintk(7, "ts1_status: 0x%08x  ts1_mask: 0x%08x count: 0x%x\n",
1838 		ts1_status, ts1_mask, ts1_count);
1839 	dprintk(7, "ts2_status: 0x%08x  ts2_mask: 0x%08x count: 0x%x\n",
1840 		ts2_status, ts2_mask, ts2_count);
1841 
1842 	if (pci_status & (PCI_MSK_RISC_RD | PCI_MSK_RISC_WR |
1843 			  PCI_MSK_AL_RD   | PCI_MSK_AL_WR   | PCI_MSK_APB_DMA |
1844 			  PCI_MSK_VID_C   | PCI_MSK_VID_B   | PCI_MSK_VID_A   |
1845 			  PCI_MSK_AUD_INT | PCI_MSK_AUD_EXT |
1846 			  PCI_MSK_GPIO0   | PCI_MSK_GPIO1   |
1847 			  PCI_MSK_AV_CORE | PCI_MSK_IR)) {
1848 
1849 		if (pci_status & PCI_MSK_RISC_RD)
1850 			dprintk(7, " (PCI_MSK_RISC_RD   0x%08x)\n",
1851 				PCI_MSK_RISC_RD);
1852 
1853 		if (pci_status & PCI_MSK_RISC_WR)
1854 			dprintk(7, " (PCI_MSK_RISC_WR   0x%08x)\n",
1855 				PCI_MSK_RISC_WR);
1856 
1857 		if (pci_status & PCI_MSK_AL_RD)
1858 			dprintk(7, " (PCI_MSK_AL_RD     0x%08x)\n",
1859 				PCI_MSK_AL_RD);
1860 
1861 		if (pci_status & PCI_MSK_AL_WR)
1862 			dprintk(7, " (PCI_MSK_AL_WR     0x%08x)\n",
1863 				PCI_MSK_AL_WR);
1864 
1865 		if (pci_status & PCI_MSK_APB_DMA)
1866 			dprintk(7, " (PCI_MSK_APB_DMA   0x%08x)\n",
1867 				PCI_MSK_APB_DMA);
1868 
1869 		if (pci_status & PCI_MSK_VID_C)
1870 			dprintk(7, " (PCI_MSK_VID_C     0x%08x)\n",
1871 				PCI_MSK_VID_C);
1872 
1873 		if (pci_status & PCI_MSK_VID_B)
1874 			dprintk(7, " (PCI_MSK_VID_B     0x%08x)\n",
1875 				PCI_MSK_VID_B);
1876 
1877 		if (pci_status & PCI_MSK_VID_A)
1878 			dprintk(7, " (PCI_MSK_VID_A     0x%08x)\n",
1879 				PCI_MSK_VID_A);
1880 
1881 		if (pci_status & PCI_MSK_AUD_INT)
1882 			dprintk(7, " (PCI_MSK_AUD_INT   0x%08x)\n",
1883 				PCI_MSK_AUD_INT);
1884 
1885 		if (pci_status & PCI_MSK_AUD_EXT)
1886 			dprintk(7, " (PCI_MSK_AUD_EXT   0x%08x)\n",
1887 				PCI_MSK_AUD_EXT);
1888 
1889 		if (pci_status & PCI_MSK_GPIO0)
1890 			dprintk(7, " (PCI_MSK_GPIO0     0x%08x)\n",
1891 				PCI_MSK_GPIO0);
1892 
1893 		if (pci_status & PCI_MSK_GPIO1)
1894 			dprintk(7, " (PCI_MSK_GPIO1     0x%08x)\n",
1895 				PCI_MSK_GPIO1);
1896 
1897 		if (pci_status & PCI_MSK_AV_CORE)
1898 			dprintk(7, " (PCI_MSK_AV_CORE   0x%08x)\n",
1899 				PCI_MSK_AV_CORE);
1900 
1901 		if (pci_status & PCI_MSK_IR)
1902 			dprintk(7, " (PCI_MSK_IR        0x%08x)\n",
1903 				PCI_MSK_IR);
1904 	}
1905 
1906 	if (cx23885_boards[dev->board].ci_type == 1 &&
1907 			(pci_status & (PCI_MSK_GPIO1 | PCI_MSK_GPIO0)))
1908 		handled += netup_ci_slot_status(dev, pci_status);
1909 
1910 	if (cx23885_boards[dev->board].ci_type == 2 &&
1911 			(pci_status & PCI_MSK_GPIO0))
1912 		handled += altera_ci_irq(dev);
1913 
1914 	if (ts1_status) {
1915 		if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB)
1916 			handled += cx23885_irq_ts(ts1, ts1_status);
1917 		else
1918 		if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1919 			handled += cx23885_irq_417(dev, ts1_status);
1920 	}
1921 
1922 	if (ts2_status) {
1923 		if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB)
1924 			handled += cx23885_irq_ts(ts2, ts2_status);
1925 		else
1926 		if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER)
1927 			handled += cx23885_irq_417(dev, ts2_status);
1928 	}
1929 
1930 	if (vida_status)
1931 		handled += cx23885_video_irq(dev, vida_status);
1932 
1933 	if (audint_status)
1934 		handled += cx23885_audio_irq(dev, audint_status, audint_mask);
1935 
1936 	if (pci_status & PCI_MSK_IR) {
1937 		subdev_handled = false;
1938 		v4l2_subdev_call(dev->sd_ir, core, interrupt_service_routine,
1939 				 pci_status, &subdev_handled);
1940 		if (subdev_handled)
1941 			handled++;
1942 	}
1943 
1944 	if ((pci_status & pci_mask) & PCI_MSK_AV_CORE) {
1945 		cx23885_irq_disable(dev, PCI_MSK_AV_CORE);
1946 		if (!schedule_work(&dev->cx25840_work))
1947 			printk(KERN_ERR "%s: failed to set up deferred work for"
1948 			       " AV Core/IR interrupt. Interrupt is disabled"
1949 			       " and won't be re-enabled\n", dev->name);
1950 		handled++;
1951 	}
1952 
1953 	if (handled)
1954 		cx_write(PCI_INT_STAT, pci_status);
1955 out:
1956 	return IRQ_RETVAL(handled);
1957 }
1958 
1959 static void cx23885_v4l2_dev_notify(struct v4l2_subdev *sd,
1960 				    unsigned int notification, void *arg)
1961 {
1962 	struct cx23885_dev *dev;
1963 
1964 	if (sd == NULL)
1965 		return;
1966 
1967 	dev = to_cx23885(sd->v4l2_dev);
1968 
1969 	switch (notification) {
1970 	case V4L2_SUBDEV_IR_RX_NOTIFY: /* Possibly called in an IRQ context */
1971 		if (sd == dev->sd_ir)
1972 			cx23885_ir_rx_v4l2_dev_notify(sd, *(u32 *)arg);
1973 		break;
1974 	case V4L2_SUBDEV_IR_TX_NOTIFY: /* Possibly called in an IRQ context */
1975 		if (sd == dev->sd_ir)
1976 			cx23885_ir_tx_v4l2_dev_notify(sd, *(u32 *)arg);
1977 		break;
1978 	}
1979 }
1980 
1981 static void cx23885_v4l2_dev_notify_init(struct cx23885_dev *dev)
1982 {
1983 	INIT_WORK(&dev->cx25840_work, cx23885_av_work_handler);
1984 	INIT_WORK(&dev->ir_rx_work, cx23885_ir_rx_work_handler);
1985 	INIT_WORK(&dev->ir_tx_work, cx23885_ir_tx_work_handler);
1986 	dev->v4l2_dev.notify = cx23885_v4l2_dev_notify;
1987 }
1988 
1989 static inline int encoder_on_portb(struct cx23885_dev *dev)
1990 {
1991 	return cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER;
1992 }
1993 
1994 static inline int encoder_on_portc(struct cx23885_dev *dev)
1995 {
1996 	return cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER;
1997 }
1998 
1999 /* Mask represents 32 different GPIOs, GPIO's are split into multiple
2000  * registers depending on the board configuration (and whether the
2001  * 417 encoder (wi it's own GPIO's) are present. Each GPIO bit will
2002  * be pushed into the correct hardware register, regardless of the
2003  * physical location. Certain registers are shared so we sanity check
2004  * and report errors if we think we're tampering with a GPIo that might
2005  * be assigned to the encoder (and used for the host bus).
2006  *
2007  * GPIO  2 thru  0 - On the cx23885 bridge
2008  * GPIO 18 thru  3 - On the cx23417 host bus interface
2009  * GPIO 23 thru 19 - On the cx25840 a/v core
2010  */
2011 void cx23885_gpio_set(struct cx23885_dev *dev, u32 mask)
2012 {
2013 	if (mask & 0x7)
2014 		cx_set(GP0_IO, mask & 0x7);
2015 
2016 	if (mask & 0x0007fff8) {
2017 		if (encoder_on_portb(dev) || encoder_on_portc(dev))
2018 			printk(KERN_ERR
2019 				"%s: Setting GPIO on encoder ports\n",
2020 				dev->name);
2021 		cx_set(MC417_RWD, (mask & 0x0007fff8) >> 3);
2022 	}
2023 
2024 	/* TODO: 23-19 */
2025 	if (mask & 0x00f80000)
2026 		printk(KERN_INFO "%s: Unsupported\n", dev->name);
2027 }
2028 
2029 void cx23885_gpio_clear(struct cx23885_dev *dev, u32 mask)
2030 {
2031 	if (mask & 0x00000007)
2032 		cx_clear(GP0_IO, mask & 0x7);
2033 
2034 	if (mask & 0x0007fff8) {
2035 		if (encoder_on_portb(dev) || encoder_on_portc(dev))
2036 			printk(KERN_ERR
2037 				"%s: Clearing GPIO moving on encoder ports\n",
2038 				dev->name);
2039 		cx_clear(MC417_RWD, (mask & 0x7fff8) >> 3);
2040 	}
2041 
2042 	/* TODO: 23-19 */
2043 	if (mask & 0x00f80000)
2044 		printk(KERN_INFO "%s: Unsupported\n", dev->name);
2045 }
2046 
2047 u32 cx23885_gpio_get(struct cx23885_dev *dev, u32 mask)
2048 {
2049 	if (mask & 0x00000007)
2050 		return (cx_read(GP0_IO) >> 8) & mask & 0x7;
2051 
2052 	if (mask & 0x0007fff8) {
2053 		if (encoder_on_portb(dev) || encoder_on_portc(dev))
2054 			printk(KERN_ERR
2055 				"%s: Reading GPIO moving on encoder ports\n",
2056 				dev->name);
2057 		return (cx_read(MC417_RWD) & ((mask & 0x7fff8) >> 3)) << 3;
2058 	}
2059 
2060 	/* TODO: 23-19 */
2061 	if (mask & 0x00f80000)
2062 		printk(KERN_INFO "%s: Unsupported\n", dev->name);
2063 
2064 	return 0;
2065 }
2066 
2067 void cx23885_gpio_enable(struct cx23885_dev *dev, u32 mask, int asoutput)
2068 {
2069 	if ((mask & 0x00000007) && asoutput)
2070 		cx_set(GP0_IO, (mask & 0x7) << 16);
2071 	else if ((mask & 0x00000007) && !asoutput)
2072 		cx_clear(GP0_IO, (mask & 0x7) << 16);
2073 
2074 	if (mask & 0x0007fff8) {
2075 		if (encoder_on_portb(dev) || encoder_on_portc(dev))
2076 			printk(KERN_ERR
2077 				"%s: Enabling GPIO on encoder ports\n",
2078 				dev->name);
2079 	}
2080 
2081 	/* MC417_OEN is active low for output, write 1 for an input */
2082 	if ((mask & 0x0007fff8) && asoutput)
2083 		cx_clear(MC417_OEN, (mask & 0x7fff8) >> 3);
2084 
2085 	else if ((mask & 0x0007fff8) && !asoutput)
2086 		cx_set(MC417_OEN, (mask & 0x7fff8) >> 3);
2087 
2088 	/* TODO: 23-19 */
2089 }
2090 
2091 static int __devinit cx23885_initdev(struct pci_dev *pci_dev,
2092 				     const struct pci_device_id *pci_id)
2093 {
2094 	struct cx23885_dev *dev;
2095 	int err;
2096 
2097 	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
2098 	if (NULL == dev)
2099 		return -ENOMEM;
2100 
2101 	err = v4l2_device_register(&pci_dev->dev, &dev->v4l2_dev);
2102 	if (err < 0)
2103 		goto fail_free;
2104 
2105 	/* Prepare to handle notifications from subdevices */
2106 	cx23885_v4l2_dev_notify_init(dev);
2107 
2108 	/* pci init */
2109 	dev->pci = pci_dev;
2110 	if (pci_enable_device(pci_dev)) {
2111 		err = -EIO;
2112 		goto fail_unreg;
2113 	}
2114 
2115 	if (cx23885_dev_setup(dev) < 0) {
2116 		err = -EINVAL;
2117 		goto fail_unreg;
2118 	}
2119 
2120 	/* print pci info */
2121 	dev->pci_rev = pci_dev->revision;
2122 	pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER,  &dev->pci_lat);
2123 	printk(KERN_INFO "%s/0: found at %s, rev: %d, irq: %d, "
2124 	       "latency: %d, mmio: 0x%llx\n", dev->name,
2125 	       pci_name(pci_dev), dev->pci_rev, pci_dev->irq,
2126 	       dev->pci_lat,
2127 		(unsigned long long)pci_resource_start(pci_dev, 0));
2128 
2129 	pci_set_master(pci_dev);
2130 	if (!pci_dma_supported(pci_dev, 0xffffffff)) {
2131 		printk("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name);
2132 		err = -EIO;
2133 		goto fail_irq;
2134 	}
2135 
2136 	err = request_irq(pci_dev->irq, cx23885_irq,
2137 			  IRQF_SHARED | IRQF_DISABLED, dev->name, dev);
2138 	if (err < 0) {
2139 		printk(KERN_ERR "%s: can't get IRQ %d\n",
2140 		       dev->name, pci_dev->irq);
2141 		goto fail_irq;
2142 	}
2143 
2144 	switch (dev->board) {
2145 	case CX23885_BOARD_NETUP_DUAL_DVBS2_CI:
2146 		cx23885_irq_add_enable(dev, PCI_MSK_GPIO1 | PCI_MSK_GPIO0);
2147 		break;
2148 	case CX23885_BOARD_NETUP_DUAL_DVB_T_C_CI_RF:
2149 		cx23885_irq_add_enable(dev, PCI_MSK_GPIO0);
2150 		break;
2151 	}
2152 
2153 	/*
2154 	 * The CX2388[58] IR controller can start firing interrupts when
2155 	 * enabled, so these have to take place after the cx23885_irq() handler
2156 	 * is hooked up by the call to request_irq() above.
2157 	 */
2158 	cx23885_ir_pci_int_enable(dev);
2159 	cx23885_input_init(dev);
2160 
2161 	return 0;
2162 
2163 fail_irq:
2164 	cx23885_dev_unregister(dev);
2165 fail_unreg:
2166 	v4l2_device_unregister(&dev->v4l2_dev);
2167 fail_free:
2168 	kfree(dev);
2169 	return err;
2170 }
2171 
2172 static void __devexit cx23885_finidev(struct pci_dev *pci_dev)
2173 {
2174 	struct v4l2_device *v4l2_dev = pci_get_drvdata(pci_dev);
2175 	struct cx23885_dev *dev = to_cx23885(v4l2_dev);
2176 
2177 	cx23885_input_fini(dev);
2178 	cx23885_ir_fini(dev);
2179 
2180 	cx23885_shutdown(dev);
2181 
2182 	pci_disable_device(pci_dev);
2183 
2184 	/* unregister stuff */
2185 	free_irq(pci_dev->irq, dev);
2186 
2187 	cx23885_dev_unregister(dev);
2188 	v4l2_device_unregister(v4l2_dev);
2189 	kfree(dev);
2190 }
2191 
2192 static struct pci_device_id cx23885_pci_tbl[] = {
2193 	{
2194 		/* CX23885 */
2195 		.vendor       = 0x14f1,
2196 		.device       = 0x8852,
2197 		.subvendor    = PCI_ANY_ID,
2198 		.subdevice    = PCI_ANY_ID,
2199 	}, {
2200 		/* CX23887 Rev 2 */
2201 		.vendor       = 0x14f1,
2202 		.device       = 0x8880,
2203 		.subvendor    = PCI_ANY_ID,
2204 		.subdevice    = PCI_ANY_ID,
2205 	}, {
2206 		/* --- end of list --- */
2207 	}
2208 };
2209 MODULE_DEVICE_TABLE(pci, cx23885_pci_tbl);
2210 
2211 static struct pci_driver cx23885_pci_driver = {
2212 	.name     = "cx23885",
2213 	.id_table = cx23885_pci_tbl,
2214 	.probe    = cx23885_initdev,
2215 	.remove   = __devexit_p(cx23885_finidev),
2216 	/* TODO */
2217 	.suspend  = NULL,
2218 	.resume   = NULL,
2219 };
2220 
2221 static int __init cx23885_init(void)
2222 {
2223 	printk(KERN_INFO "cx23885 driver version %s loaded\n",
2224 		CX23885_VERSION);
2225 	return pci_register_driver(&cx23885_pci_driver);
2226 }
2227 
2228 static void __exit cx23885_fini(void)
2229 {
2230 	pci_unregister_driver(&cx23885_pci_driver);
2231 }
2232 
2233 module_init(cx23885_init);
2234 module_exit(cx23885_fini);
2235