xref: /linux/drivers/media/pci/b2c2/flexcop-pci.c (revision b77e0ce62d63a761ffb7f7245a215a49f5921c2f)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Linux driver the digital TV devices equipped with B2C2 FlexcopII(b)/III
4  * flexcop-pci.c - covers the PCI part including DMA transfers
5  * see flexcop.c for copyright information
6  */
7 
8 #define FC_LOG_PREFIX "flexcop-pci"
9 #include "flexcop-common.h"
10 
11 static int enable_pid_filtering = 1;
12 module_param(enable_pid_filtering, int, 0444);
13 MODULE_PARM_DESC(enable_pid_filtering,
14 	"enable hardware pid filtering: supported values: 0 (fullts), 1");
15 
16 static int irq_chk_intv = 100;
17 module_param(irq_chk_intv, int, 0644);
18 MODULE_PARM_DESC(irq_chk_intv, "set the interval for IRQ streaming watchdog.");
19 
20 #ifdef CONFIG_DVB_B2C2_FLEXCOP_DEBUG
21 #define dprintk(level,args...) \
22 	do { if ((debug & level)) printk(args); } while (0)
23 #define DEBSTATUS ""
24 #else
25 #define dprintk(level,args...)
26 #define DEBSTATUS " (debugging is not enabled)"
27 #endif
28 
29 #define deb_info(args...) dprintk(0x01, args)
30 #define deb_reg(args...) dprintk(0x02, args)
31 #define deb_ts(args...) dprintk(0x04, args)
32 #define deb_irq(args...) dprintk(0x08, args)
33 #define deb_chk(args...) dprintk(0x10, args)
34 
35 static int debug;
36 module_param(debug, int, 0644);
37 MODULE_PARM_DESC(debug,
38 	"set debug level (1=info,2=regs,4=TS,8=irqdma,16=check (|-able))."
39 	DEBSTATUS);
40 
41 #define DRIVER_VERSION "0.1"
42 #define DRIVER_NAME "flexcop-pci"
43 #define DRIVER_AUTHOR "Patrick Boettcher <patrick.boettcher@posteo.de>"
44 
45 struct flexcop_pci {
46 	struct pci_dev *pdev;
47 
48 #define FC_PCI_INIT     0x01
49 #define FC_PCI_DMA_INIT 0x02
50 	int init_state;
51 
52 	void __iomem *io_mem;
53 	u32 irq;
54 	/* buffersize (at least for DMA1, need to be % 188 == 0,
55 	 * this logic is required */
56 #define FC_DEFAULT_DMA1_BUFSIZE (1280 * 188)
57 #define FC_DEFAULT_DMA2_BUFSIZE (10 * 188)
58 	struct flexcop_dma dma[2];
59 
60 	int active_dma1_addr; /* 0 = addr0 of dma1; 1 = addr1 of dma1 */
61 	u32 last_dma1_cur_pos;
62 	/* position of the pointer last time the timer/packet irq occurred */
63 	int count;
64 	int count_prev;
65 	int stream_problem;
66 
67 	spinlock_t irq_lock;
68 	unsigned long last_irq;
69 
70 	struct delayed_work irq_check_work;
71 	struct flexcop_device *fc_dev;
72 };
73 
74 static int lastwreg, lastwval, lastrreg, lastrval;
75 
76 static flexcop_ibi_value flexcop_pci_read_ibi_reg(struct flexcop_device *fc,
77 		flexcop_ibi_register r)
78 {
79 	struct flexcop_pci *fc_pci = fc->bus_specific;
80 	flexcop_ibi_value v;
81 	v.raw = readl(fc_pci->io_mem + r);
82 
83 	if (lastrreg != r || lastrval != v.raw) {
84 		lastrreg = r; lastrval = v.raw;
85 		deb_reg("new rd: %3x: %08x\n", r, v.raw);
86 	}
87 
88 	return v;
89 }
90 
91 static int flexcop_pci_write_ibi_reg(struct flexcop_device *fc,
92 		flexcop_ibi_register r, flexcop_ibi_value v)
93 {
94 	struct flexcop_pci *fc_pci = fc->bus_specific;
95 
96 	if (lastwreg != r || lastwval != v.raw) {
97 		lastwreg = r; lastwval = v.raw;
98 		deb_reg("new wr: %3x: %08x\n", r, v.raw);
99 	}
100 
101 	writel(v.raw, fc_pci->io_mem + r);
102 	return 0;
103 }
104 
105 static void flexcop_pci_irq_check_work(struct work_struct *work)
106 {
107 	struct flexcop_pci *fc_pci =
108 		container_of(work, struct flexcop_pci, irq_check_work.work);
109 	struct flexcop_device *fc = fc_pci->fc_dev;
110 
111 	if (fc->feedcount) {
112 
113 		if (fc_pci->count == fc_pci->count_prev) {
114 			deb_chk("no IRQ since the last check\n");
115 			if (fc_pci->stream_problem++ == 3) {
116 				struct dvb_demux_feed *feed;
117 				deb_info("flexcop-pci: stream problem, resetting pid filter\n");
118 
119 				spin_lock_irq(&fc->demux.lock);
120 				list_for_each_entry(feed, &fc->demux.feed_list,
121 						list_head) {
122 					flexcop_pid_feed_control(fc, feed, 0);
123 				}
124 
125 				list_for_each_entry(feed, &fc->demux.feed_list,
126 						list_head) {
127 					flexcop_pid_feed_control(fc, feed, 1);
128 				}
129 				spin_unlock_irq(&fc->demux.lock);
130 
131 				fc_pci->stream_problem = 0;
132 			}
133 		} else {
134 			fc_pci->stream_problem = 0;
135 			fc_pci->count_prev = fc_pci->count;
136 		}
137 	}
138 
139 	schedule_delayed_work(&fc_pci->irq_check_work,
140 			msecs_to_jiffies(irq_chk_intv < 100 ? 100 : irq_chk_intv));
141 }
142 
143 /* When PID filtering is turned on, we use the timer IRQ, because small amounts
144  * of data need to be passed to the user space instantly as well. When PID
145  * filtering is turned off, we use the page-change-IRQ */
146 static irqreturn_t flexcop_pci_isr(int irq, void *dev_id)
147 {
148 	struct flexcop_pci *fc_pci = dev_id;
149 	struct flexcop_device *fc = fc_pci->fc_dev;
150 	unsigned long flags;
151 	flexcop_ibi_value v;
152 	irqreturn_t ret = IRQ_HANDLED;
153 
154 	spin_lock_irqsave(&fc_pci->irq_lock, flags);
155 	v = fc->read_ibi_reg(fc, irq_20c);
156 
157 	/* errors */
158 	if (v.irq_20c.Data_receiver_error)
159 		deb_chk("data receiver error\n");
160 	if (v.irq_20c.Continuity_error_flag)
161 		deb_chk("Continuity error flag is set\n");
162 	if (v.irq_20c.LLC_SNAP_FLAG_set)
163 		deb_chk("LLC_SNAP_FLAG_set is set\n");
164 	if (v.irq_20c.Transport_Error)
165 		deb_chk("Transport error\n");
166 
167 	if ((fc_pci->count % 1000) == 0)
168 		deb_chk("%d valid irq took place so far\n", fc_pci->count);
169 
170 	if (v.irq_20c.DMA1_IRQ_Status == 1) {
171 		if (fc_pci->active_dma1_addr == 0)
172 			flexcop_pass_dmx_packets(fc_pci->fc_dev,
173 					fc_pci->dma[0].cpu_addr0,
174 					fc_pci->dma[0].size / 188);
175 		else
176 			flexcop_pass_dmx_packets(fc_pci->fc_dev,
177 					fc_pci->dma[0].cpu_addr1,
178 					fc_pci->dma[0].size / 188);
179 
180 		deb_irq("page change to page: %d\n",!fc_pci->active_dma1_addr);
181 		fc_pci->active_dma1_addr = !fc_pci->active_dma1_addr;
182 		/* for the timer IRQ we only can use buffer dmx feeding, because we don't have
183 		 * complete TS packets when reading from the DMA memory */
184 	} else if (v.irq_20c.DMA1_Timer_Status == 1) {
185 		dma_addr_t cur_addr =
186 			fc->read_ibi_reg(fc,dma1_008).dma_0x8.dma_cur_addr << 2;
187 		u32 cur_pos = cur_addr - fc_pci->dma[0].dma_addr0;
188 
189 		deb_irq("%u irq: %08x cur_addr: %llx: cur_pos: %08x, last_cur_pos: %08x ",
190 				jiffies_to_usecs(jiffies - fc_pci->last_irq),
191 				v.raw, (unsigned long long)cur_addr, cur_pos,
192 				fc_pci->last_dma1_cur_pos);
193 		fc_pci->last_irq = jiffies;
194 
195 		/* buffer end was reached, restarted from the beginning
196 		 * pass the data from last_cur_pos to the buffer end to the demux
197 		 */
198 		if (cur_pos < fc_pci->last_dma1_cur_pos) {
199 			deb_irq(" end was reached: passing %d bytes ",
200 				(fc_pci->dma[0].size*2 - 1) -
201 				fc_pci->last_dma1_cur_pos);
202 			flexcop_pass_dmx_data(fc_pci->fc_dev,
203 				fc_pci->dma[0].cpu_addr0 +
204 					fc_pci->last_dma1_cur_pos,
205 				(fc_pci->dma[0].size*2) -
206 					fc_pci->last_dma1_cur_pos);
207 			fc_pci->last_dma1_cur_pos = 0;
208 		}
209 
210 		if (cur_pos > fc_pci->last_dma1_cur_pos) {
211 			deb_irq(" passing %d bytes ",
212 				cur_pos - fc_pci->last_dma1_cur_pos);
213 			flexcop_pass_dmx_data(fc_pci->fc_dev,
214 				fc_pci->dma[0].cpu_addr0 +
215 					fc_pci->last_dma1_cur_pos,
216 				cur_pos - fc_pci->last_dma1_cur_pos);
217 		}
218 		deb_irq("\n");
219 
220 		fc_pci->last_dma1_cur_pos = cur_pos;
221 		fc_pci->count++;
222 	} else {
223 		deb_irq("isr for flexcop called, apparently without reason (%08x)\n",
224 			v.raw);
225 		ret = IRQ_NONE;
226 	}
227 
228 	spin_unlock_irqrestore(&fc_pci->irq_lock, flags);
229 	return ret;
230 }
231 
232 static int flexcop_pci_stream_control(struct flexcop_device *fc, int onoff)
233 {
234 	struct flexcop_pci *fc_pci = fc->bus_specific;
235 	if (onoff) {
236 		flexcop_dma_config(fc, &fc_pci->dma[0], FC_DMA_1);
237 		flexcop_dma_config(fc, &fc_pci->dma[1], FC_DMA_2);
238 		flexcop_dma_config_timer(fc, FC_DMA_1, 0);
239 		flexcop_dma_xfer_control(fc, FC_DMA_1,
240 				FC_DMA_SUBADDR_0 | FC_DMA_SUBADDR_1, 1);
241 		deb_irq("DMA xfer enabled\n");
242 
243 		fc_pci->last_dma1_cur_pos = 0;
244 		flexcop_dma_control_timer_irq(fc, FC_DMA_1, 1);
245 		deb_irq("IRQ enabled\n");
246 		fc_pci->count_prev = fc_pci->count;
247 	} else {
248 		flexcop_dma_control_timer_irq(fc, FC_DMA_1, 0);
249 		deb_irq("IRQ disabled\n");
250 
251 		flexcop_dma_xfer_control(fc, FC_DMA_1,
252 			 FC_DMA_SUBADDR_0 | FC_DMA_SUBADDR_1, 0);
253 		deb_irq("DMA xfer disabled\n");
254 	}
255 	return 0;
256 }
257 
258 static int flexcop_pci_dma_init(struct flexcop_pci *fc_pci)
259 {
260 	int ret;
261 	ret = flexcop_dma_allocate(fc_pci->pdev, &fc_pci->dma[0],
262 			FC_DEFAULT_DMA1_BUFSIZE);
263 	if (ret != 0)
264 		return ret;
265 
266 	ret = flexcop_dma_allocate(fc_pci->pdev, &fc_pci->dma[1],
267 			FC_DEFAULT_DMA2_BUFSIZE);
268 	if (ret != 0) {
269 		flexcop_dma_free(&fc_pci->dma[0]);
270 		return ret;
271 	}
272 
273 	flexcop_sram_set_dest(fc_pci->fc_dev, FC_SRAM_DEST_MEDIA |
274 			FC_SRAM_DEST_NET, FC_SRAM_DEST_TARGET_DMA1);
275 	flexcop_sram_set_dest(fc_pci->fc_dev, FC_SRAM_DEST_CAO |
276 			FC_SRAM_DEST_CAI, FC_SRAM_DEST_TARGET_DMA2);
277 	fc_pci->init_state |= FC_PCI_DMA_INIT;
278 	return ret;
279 }
280 
281 static void flexcop_pci_dma_exit(struct flexcop_pci *fc_pci)
282 {
283 	if (fc_pci->init_state & FC_PCI_DMA_INIT) {
284 		flexcop_dma_free(&fc_pci->dma[0]);
285 		flexcop_dma_free(&fc_pci->dma[1]);
286 	}
287 	fc_pci->init_state &= ~FC_PCI_DMA_INIT;
288 }
289 
290 static int flexcop_pci_init(struct flexcop_pci *fc_pci)
291 {
292 	int ret;
293 
294 	info("card revision %x", fc_pci->pdev->revision);
295 
296 	if ((ret = pci_enable_device(fc_pci->pdev)) != 0)
297 		return ret;
298 	pci_set_master(fc_pci->pdev);
299 
300 	if ((ret = pci_request_regions(fc_pci->pdev, DRIVER_NAME)) != 0)
301 		goto err_pci_disable_device;
302 
303 	fc_pci->io_mem = pci_iomap(fc_pci->pdev, 0, 0x800);
304 
305 	if (!fc_pci->io_mem) {
306 		err("cannot map io memory\n");
307 		ret = -EIO;
308 		goto err_pci_release_regions;
309 	}
310 
311 	pci_set_drvdata(fc_pci->pdev, fc_pci);
312 	spin_lock_init(&fc_pci->irq_lock);
313 	if ((ret = request_irq(fc_pci->pdev->irq, flexcop_pci_isr,
314 					IRQF_SHARED, DRIVER_NAME, fc_pci)) != 0)
315 		goto err_pci_iounmap;
316 
317 	fc_pci->init_state |= FC_PCI_INIT;
318 	return ret;
319 
320 err_pci_iounmap:
321 	pci_iounmap(fc_pci->pdev, fc_pci->io_mem);
322 err_pci_release_regions:
323 	pci_release_regions(fc_pci->pdev);
324 err_pci_disable_device:
325 	pci_disable_device(fc_pci->pdev);
326 	return ret;
327 }
328 
329 static void flexcop_pci_exit(struct flexcop_pci *fc_pci)
330 {
331 	if (fc_pci->init_state & FC_PCI_INIT) {
332 		free_irq(fc_pci->pdev->irq, fc_pci);
333 		pci_iounmap(fc_pci->pdev, fc_pci->io_mem);
334 		pci_release_regions(fc_pci->pdev);
335 		pci_disable_device(fc_pci->pdev);
336 	}
337 	fc_pci->init_state &= ~FC_PCI_INIT;
338 }
339 
340 static int flexcop_pci_probe(struct pci_dev *pdev,
341 		const struct pci_device_id *ent)
342 {
343 	struct flexcop_device *fc;
344 	struct flexcop_pci *fc_pci;
345 	int ret = -ENOMEM;
346 
347 	if ((fc = flexcop_device_kmalloc(sizeof(struct flexcop_pci))) == NULL) {
348 		err("out of memory\n");
349 		return -ENOMEM;
350 	}
351 
352 	/* general flexcop init */
353 	fc_pci = fc->bus_specific;
354 	fc_pci->fc_dev = fc;
355 
356 	fc->read_ibi_reg = flexcop_pci_read_ibi_reg;
357 	fc->write_ibi_reg = flexcop_pci_write_ibi_reg;
358 	fc->i2c_request = flexcop_i2c_request;
359 	fc->get_mac_addr = flexcop_eeprom_check_mac_addr;
360 	fc->stream_control = flexcop_pci_stream_control;
361 
362 	if (enable_pid_filtering)
363 		info("will use the HW PID filter.");
364 	else
365 		info("will pass the complete TS to the demuxer.");
366 
367 	fc->pid_filtering = enable_pid_filtering;
368 	fc->bus_type = FC_PCI;
369 	fc->dev = &pdev->dev;
370 	fc->owner = THIS_MODULE;
371 
372 	/* bus specific part */
373 	fc_pci->pdev = pdev;
374 	if ((ret = flexcop_pci_init(fc_pci)) != 0)
375 		goto err_kfree;
376 
377 	/* init flexcop */
378 	if ((ret = flexcop_device_initialize(fc)) != 0)
379 		goto err_pci_exit;
380 
381 	/* init dma */
382 	if ((ret = flexcop_pci_dma_init(fc_pci)) != 0)
383 		goto err_fc_exit;
384 
385 	INIT_DELAYED_WORK(&fc_pci->irq_check_work, flexcop_pci_irq_check_work);
386 
387 	if (irq_chk_intv > 0)
388 		schedule_delayed_work(&fc_pci->irq_check_work,
389 				msecs_to_jiffies(irq_chk_intv < 100 ?
390 					100 :
391 					irq_chk_intv));
392 	return ret;
393 
394 err_fc_exit:
395 	flexcop_device_exit(fc);
396 err_pci_exit:
397 	flexcop_pci_exit(fc_pci);
398 err_kfree:
399 	flexcop_device_kfree(fc);
400 	return ret;
401 }
402 
403 /* in theory every _exit function should be called exactly two times,
404  * here and in the bail-out-part of the _init-function
405  */
406 static void flexcop_pci_remove(struct pci_dev *pdev)
407 {
408 	struct flexcop_pci *fc_pci = pci_get_drvdata(pdev);
409 
410 	if (irq_chk_intv > 0)
411 		cancel_delayed_work(&fc_pci->irq_check_work);
412 
413 	flexcop_pci_dma_exit(fc_pci);
414 	flexcop_device_exit(fc_pci->fc_dev);
415 	flexcop_pci_exit(fc_pci);
416 	flexcop_device_kfree(fc_pci->fc_dev);
417 }
418 
419 static const struct pci_device_id flexcop_pci_tbl[] = {
420 	{ PCI_DEVICE(0x13d0, 0x2103) },
421 	{ },
422 };
423 
424 MODULE_DEVICE_TABLE(pci, flexcop_pci_tbl);
425 
426 static struct pci_driver flexcop_pci_driver = {
427 	.name     = "b2c2_flexcop_pci",
428 	.id_table = flexcop_pci_tbl,
429 	.probe    = flexcop_pci_probe,
430 	.remove   = flexcop_pci_remove,
431 };
432 
433 module_pci_driver(flexcop_pci_driver);
434 
435 MODULE_AUTHOR(DRIVER_AUTHOR);
436 MODULE_DESCRIPTION(DRIVER_NAME);
437 MODULE_LICENSE("GPL");
438