1 // SPDX-License-Identifier: LGPL-2.1-or-later
2 /*
3 * dmxdev.c - DVB demultiplexer device
4 *
5 * Copyright (C) 2000 Ralph Metzler & Marcus Metzler
6 * for convergence integrated media GmbH
7 */
8
9 #define pr_fmt(fmt) "dmxdev: " fmt
10
11 #include <linux/sched.h>
12 #include <linux/spinlock.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include <linux/module.h>
16 #include <linux/poll.h>
17 #include <linux/ioctl.h>
18 #include <linux/wait.h>
19 #include <linux/uaccess.h>
20 #include <media/dmxdev.h>
21 #include <media/dvb_vb2.h>
22
23 static int debug;
24
25 module_param(debug, int, 0644);
26 MODULE_PARM_DESC(debug, "Turn on/off debugging (default:off).");
27
28 #define dprintk(fmt, arg...) do { \
29 if (debug) \
30 printk(KERN_DEBUG pr_fmt("%s: " fmt), \
31 __func__, ##arg); \
32 } while (0)
33
dvb_dmxdev_buffer_write(struct dvb_ringbuffer * buf,const u8 * src,size_t len)34 static int dvb_dmxdev_buffer_write(struct dvb_ringbuffer *buf,
35 const u8 *src, size_t len)
36 {
37 ssize_t free;
38
39 if (!len)
40 return 0;
41 if (!buf->data)
42 return 0;
43
44 free = dvb_ringbuffer_free(buf);
45 if (len > free) {
46 dprintk("buffer overflow\n");
47 return -EOVERFLOW;
48 }
49
50 return dvb_ringbuffer_write(buf, src, len);
51 }
52
dvb_dmxdev_buffer_read(struct dvb_ringbuffer * src,int non_blocking,char __user * buf,size_t count,loff_t * ppos)53 static ssize_t dvb_dmxdev_buffer_read(struct dvb_ringbuffer *src,
54 int non_blocking, char __user *buf,
55 size_t count, loff_t *ppos)
56 {
57 size_t todo;
58 ssize_t avail;
59 ssize_t ret = 0;
60
61 if (!src->data)
62 return 0;
63
64 if (src->error) {
65 ret = src->error;
66 dvb_ringbuffer_flush(src);
67 return ret;
68 }
69
70 for (todo = count; todo > 0; todo -= ret) {
71 if (non_blocking && dvb_ringbuffer_empty(src)) {
72 ret = -EWOULDBLOCK;
73 break;
74 }
75
76 ret = wait_event_interruptible(src->queue,
77 !dvb_ringbuffer_empty(src) ||
78 (src->error != 0));
79 if (ret < 0)
80 break;
81
82 if (src->error) {
83 ret = src->error;
84 dvb_ringbuffer_flush(src);
85 break;
86 }
87
88 avail = dvb_ringbuffer_avail(src);
89 if (avail > todo)
90 avail = todo;
91
92 ret = dvb_ringbuffer_read_user(src, buf, avail);
93 if (ret < 0)
94 break;
95
96 buf += ret;
97 }
98
99 return (count - todo) ? (count - todo) : ret;
100 }
101
get_fe(struct dmx_demux * demux,int type)102 static struct dmx_frontend *get_fe(struct dmx_demux *demux, int type)
103 {
104 struct list_head *head, *pos;
105
106 head = demux->get_frontends(demux);
107 if (!head)
108 return NULL;
109 list_for_each(pos, head)
110 if (DMX_FE_ENTRY(pos)->source == type)
111 return DMX_FE_ENTRY(pos);
112
113 return NULL;
114 }
115
dvb_dvr_open(struct inode * inode,struct file * file)116 static int dvb_dvr_open(struct inode *inode, struct file *file)
117 {
118 struct dvb_device *dvbdev = file->private_data;
119 struct dmxdev *dmxdev = dvbdev->priv;
120 struct dmx_frontend *front;
121 bool need_ringbuffer = false;
122
123 dprintk("%s\n", __func__);
124
125 if (mutex_lock_interruptible(&dmxdev->mutex))
126 return -ERESTARTSYS;
127
128 if (dmxdev->exit) {
129 mutex_unlock(&dmxdev->mutex);
130 return -ENODEV;
131 }
132
133 dmxdev->may_do_mmap = 0;
134
135 /*
136 * The logic here is a little tricky due to the ifdef.
137 *
138 * The ringbuffer is used for both read and mmap.
139 *
140 * It is not needed, however, on two situations:
141 * - Write devices (access with O_WRONLY);
142 * - For duplex device nodes, opened with O_RDWR.
143 */
144
145 if ((file->f_flags & O_ACCMODE) == O_RDONLY)
146 need_ringbuffer = true;
147 else if ((file->f_flags & O_ACCMODE) == O_RDWR) {
148 if (!(dmxdev->capabilities & DMXDEV_CAP_DUPLEX)) {
149 #ifdef CONFIG_DVB_MMAP
150 dmxdev->may_do_mmap = 1;
151 need_ringbuffer = true;
152 #else
153 mutex_unlock(&dmxdev->mutex);
154 return -EOPNOTSUPP;
155 #endif
156 }
157 }
158
159 if (need_ringbuffer) {
160 void *mem;
161
162 if (!dvbdev->readers) {
163 mutex_unlock(&dmxdev->mutex);
164 return -EBUSY;
165 }
166 mem = vmalloc(DVR_BUFFER_SIZE);
167 if (!mem) {
168 mutex_unlock(&dmxdev->mutex);
169 return -ENOMEM;
170 }
171 dmxdev->dvr_buffer.data = mem;
172 dmxdev->dvr_buffer.size = DVR_BUFFER_SIZE;
173 dvb_ringbuffer_reset(&dmxdev->dvr_buffer);
174 if (dmxdev->may_do_mmap)
175 dvb_vb2_init(&dmxdev->dvr_vb2_ctx, "dvr",
176 &dmxdev->mutex,
177 file->f_flags & O_NONBLOCK);
178 dvbdev->readers--;
179 }
180
181 if ((file->f_flags & O_ACCMODE) == O_WRONLY) {
182 dmxdev->dvr_orig_fe = dmxdev->demux->frontend;
183
184 if (!dmxdev->demux->write) {
185 mutex_unlock(&dmxdev->mutex);
186 return -EOPNOTSUPP;
187 }
188
189 front = get_fe(dmxdev->demux, DMX_MEMORY_FE);
190
191 if (!front) {
192 mutex_unlock(&dmxdev->mutex);
193 return -EINVAL;
194 }
195 dmxdev->demux->disconnect_frontend(dmxdev->demux);
196 dmxdev->demux->connect_frontend(dmxdev->demux, front);
197 }
198 dvbdev->users++;
199 mutex_unlock(&dmxdev->mutex);
200 return 0;
201 }
202
dvb_dvr_release(struct inode * inode,struct file * file)203 static int dvb_dvr_release(struct inode *inode, struct file *file)
204 {
205 struct dvb_device *dvbdev = file->private_data;
206 struct dmxdev *dmxdev = dvbdev->priv;
207
208 mutex_lock(&dmxdev->mutex);
209
210 if ((file->f_flags & O_ACCMODE) == O_WRONLY) {
211 dmxdev->demux->disconnect_frontend(dmxdev->demux);
212 dmxdev->demux->connect_frontend(dmxdev->demux,
213 dmxdev->dvr_orig_fe);
214 }
215
216 if (((file->f_flags & O_ACCMODE) == O_RDONLY) ||
217 dmxdev->may_do_mmap) {
218 if (dmxdev->may_do_mmap) {
219 if (dvb_vb2_is_streaming(&dmxdev->dvr_vb2_ctx))
220 dvb_vb2_stream_off(&dmxdev->dvr_vb2_ctx);
221 dvb_vb2_release(&dmxdev->dvr_vb2_ctx);
222 }
223 dvbdev->readers++;
224 if (dmxdev->dvr_buffer.data) {
225 void *mem = dmxdev->dvr_buffer.data;
226 /*memory barrier*/
227 mb();
228 spin_lock_irq(&dmxdev->lock);
229 dmxdev->dvr_buffer.data = NULL;
230 spin_unlock_irq(&dmxdev->lock);
231 vfree(mem);
232 }
233 }
234 /* TODO */
235 dvbdev->users--;
236 if (dvbdev->users == 1 && dmxdev->exit == 1) {
237 mutex_unlock(&dmxdev->mutex);
238 wake_up(&dvbdev->wait_queue);
239 } else
240 mutex_unlock(&dmxdev->mutex);
241
242 return 0;
243 }
244
dvb_dvr_write(struct file * file,const char __user * buf,size_t count,loff_t * ppos)245 static ssize_t dvb_dvr_write(struct file *file, const char __user *buf,
246 size_t count, loff_t *ppos)
247 {
248 struct dvb_device *dvbdev = file->private_data;
249 struct dmxdev *dmxdev = dvbdev->priv;
250 int ret;
251
252 if (!dmxdev->demux->write)
253 return -EOPNOTSUPP;
254 if ((file->f_flags & O_ACCMODE) != O_WRONLY)
255 return -EINVAL;
256 if (mutex_lock_interruptible(&dmxdev->mutex))
257 return -ERESTARTSYS;
258
259 if (dmxdev->exit) {
260 mutex_unlock(&dmxdev->mutex);
261 return -ENODEV;
262 }
263 ret = dmxdev->demux->write(dmxdev->demux, buf, count);
264 mutex_unlock(&dmxdev->mutex);
265 return ret;
266 }
267
dvb_dvr_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)268 static ssize_t dvb_dvr_read(struct file *file, char __user *buf, size_t count,
269 loff_t *ppos)
270 {
271 struct dvb_device *dvbdev = file->private_data;
272 struct dmxdev *dmxdev = dvbdev->priv;
273
274 if (dmxdev->exit)
275 return -ENODEV;
276
277 return dvb_dmxdev_buffer_read(&dmxdev->dvr_buffer,
278 file->f_flags & O_NONBLOCK,
279 buf, count, ppos);
280 }
281
dvb_dvr_set_buffer_size(struct dmxdev * dmxdev,unsigned long size)282 static int dvb_dvr_set_buffer_size(struct dmxdev *dmxdev,
283 unsigned long size)
284 {
285 struct dvb_ringbuffer *buf = &dmxdev->dvr_buffer;
286 void *newmem;
287 void *oldmem;
288
289 dprintk("%s\n", __func__);
290
291 if (buf->size == size)
292 return 0;
293 if (!size)
294 return -EINVAL;
295
296 newmem = vmalloc(size);
297 if (!newmem)
298 return -ENOMEM;
299
300 oldmem = buf->data;
301
302 spin_lock_irq(&dmxdev->lock);
303 buf->data = newmem;
304 buf->size = size;
305
306 /* reset and not flush in case the buffer shrinks */
307 dvb_ringbuffer_reset(buf);
308 spin_unlock_irq(&dmxdev->lock);
309
310 vfree(oldmem);
311
312 return 0;
313 }
314
dvb_dmxdev_filter_state_set(struct dmxdev_filter * dmxdevfilter,int state)315 static inline void dvb_dmxdev_filter_state_set(struct dmxdev_filter
316 *dmxdevfilter, int state)
317 {
318 spin_lock_irq(&dmxdevfilter->dev->lock);
319 dmxdevfilter->state = state;
320 spin_unlock_irq(&dmxdevfilter->dev->lock);
321 }
322
dvb_dmxdev_set_buffer_size(struct dmxdev_filter * dmxdevfilter,unsigned long size)323 static int dvb_dmxdev_set_buffer_size(struct dmxdev_filter *dmxdevfilter,
324 unsigned long size)
325 {
326 struct dvb_ringbuffer *buf = &dmxdevfilter->buffer;
327 void *newmem;
328 void *oldmem;
329
330 if (buf->size == size)
331 return 0;
332 if (!size)
333 return -EINVAL;
334 if (dmxdevfilter->state >= DMXDEV_STATE_GO)
335 return -EBUSY;
336
337 newmem = vmalloc(size);
338 if (!newmem)
339 return -ENOMEM;
340
341 oldmem = buf->data;
342
343 spin_lock_irq(&dmxdevfilter->dev->lock);
344 buf->data = newmem;
345 buf->size = size;
346
347 /* reset and not flush in case the buffer shrinks */
348 dvb_ringbuffer_reset(buf);
349 spin_unlock_irq(&dmxdevfilter->dev->lock);
350
351 vfree(oldmem);
352
353 return 0;
354 }
355
dvb_dmxdev_filter_timeout(struct timer_list * t)356 static void dvb_dmxdev_filter_timeout(struct timer_list *t)
357 {
358 struct dmxdev_filter *dmxdevfilter = timer_container_of(dmxdevfilter,
359 t, timer);
360
361 dmxdevfilter->buffer.error = -ETIMEDOUT;
362 spin_lock_irq(&dmxdevfilter->dev->lock);
363 dmxdevfilter->state = DMXDEV_STATE_TIMEDOUT;
364 spin_unlock_irq(&dmxdevfilter->dev->lock);
365 wake_up(&dmxdevfilter->buffer.queue);
366 }
367
dvb_dmxdev_filter_timer(struct dmxdev_filter * dmxdevfilter)368 static void dvb_dmxdev_filter_timer(struct dmxdev_filter *dmxdevfilter)
369 {
370 struct dmx_sct_filter_params *para = &dmxdevfilter->params.sec;
371
372 timer_delete(&dmxdevfilter->timer);
373 if (para->timeout) {
374 dmxdevfilter->timer.expires =
375 jiffies + 1 + (HZ / 2 + HZ * para->timeout) / 1000;
376 add_timer(&dmxdevfilter->timer);
377 }
378 }
379
dvb_dmxdev_section_callback(const u8 * buffer1,size_t buffer1_len,const u8 * buffer2,size_t buffer2_len,struct dmx_section_filter * filter,u32 * buffer_flags)380 static int dvb_dmxdev_section_callback(const u8 *buffer1, size_t buffer1_len,
381 const u8 *buffer2, size_t buffer2_len,
382 struct dmx_section_filter *filter,
383 u32 *buffer_flags)
384 {
385 struct dmxdev_filter *dmxdevfilter = filter->priv;
386 int ret;
387
388 if (!dvb_vb2_is_streaming(&dmxdevfilter->vb2_ctx) &&
389 dmxdevfilter->buffer.error) {
390 wake_up(&dmxdevfilter->buffer.queue);
391 return 0;
392 }
393 spin_lock(&dmxdevfilter->dev->lock);
394 if (dmxdevfilter->state != DMXDEV_STATE_GO) {
395 spin_unlock(&dmxdevfilter->dev->lock);
396 return 0;
397 }
398 timer_delete(&dmxdevfilter->timer);
399 dprintk("section callback %*ph\n", 6, buffer1);
400 if (dvb_vb2_is_streaming(&dmxdevfilter->vb2_ctx)) {
401 ret = dvb_vb2_fill_buffer(&dmxdevfilter->vb2_ctx,
402 buffer1, buffer1_len,
403 buffer_flags, true);
404 if (ret == buffer1_len)
405 ret = dvb_vb2_fill_buffer(&dmxdevfilter->vb2_ctx,
406 buffer2, buffer2_len,
407 buffer_flags, true);
408 } else {
409 ret = dvb_dmxdev_buffer_write(&dmxdevfilter->buffer,
410 buffer1, buffer1_len);
411 if (ret == buffer1_len) {
412 ret = dvb_dmxdev_buffer_write(&dmxdevfilter->buffer,
413 buffer2, buffer2_len);
414 }
415 }
416 if (ret < 0)
417 dmxdevfilter->buffer.error = ret;
418 if (dmxdevfilter->params.sec.flags & DMX_ONESHOT)
419 dmxdevfilter->state = DMXDEV_STATE_DONE;
420 spin_unlock(&dmxdevfilter->dev->lock);
421 wake_up(&dmxdevfilter->buffer.queue);
422 return 0;
423 }
424
dvb_dmxdev_ts_callback(const u8 * buffer1,size_t buffer1_len,const u8 * buffer2,size_t buffer2_len,struct dmx_ts_feed * feed,u32 * buffer_flags)425 static int dvb_dmxdev_ts_callback(const u8 *buffer1, size_t buffer1_len,
426 const u8 *buffer2, size_t buffer2_len,
427 struct dmx_ts_feed *feed,
428 u32 *buffer_flags)
429 {
430 struct dmxdev_filter *dmxdevfilter = feed->priv;
431 struct dvb_ringbuffer *buffer;
432 #ifdef CONFIG_DVB_MMAP
433 struct dvb_vb2_ctx *ctx;
434 #endif
435 int ret;
436
437 spin_lock(&dmxdevfilter->dev->lock);
438 if (dmxdevfilter->params.pes.output == DMX_OUT_DECODER) {
439 spin_unlock(&dmxdevfilter->dev->lock);
440 return 0;
441 }
442
443 if (dmxdevfilter->params.pes.output == DMX_OUT_TAP ||
444 dmxdevfilter->params.pes.output == DMX_OUT_TSDEMUX_TAP) {
445 buffer = &dmxdevfilter->buffer;
446 #ifdef CONFIG_DVB_MMAP
447 ctx = &dmxdevfilter->vb2_ctx;
448 #endif
449 } else {
450 buffer = &dmxdevfilter->dev->dvr_buffer;
451 #ifdef CONFIG_DVB_MMAP
452 ctx = &dmxdevfilter->dev->dvr_vb2_ctx;
453 #endif
454 }
455
456 if (dvb_vb2_is_streaming(ctx)) {
457 ret = dvb_vb2_fill_buffer(ctx, buffer1, buffer1_len,
458 buffer_flags, false);
459 if (ret == buffer1_len)
460 ret = dvb_vb2_fill_buffer(ctx, buffer2, buffer2_len,
461 buffer_flags, false);
462 } else {
463 if (buffer->error) {
464 spin_unlock(&dmxdevfilter->dev->lock);
465 wake_up(&buffer->queue);
466 return 0;
467 }
468 ret = dvb_dmxdev_buffer_write(buffer, buffer1, buffer1_len);
469 if (ret == buffer1_len)
470 ret = dvb_dmxdev_buffer_write(buffer,
471 buffer2, buffer2_len);
472 }
473 if (ret < 0)
474 buffer->error = ret;
475 spin_unlock(&dmxdevfilter->dev->lock);
476 wake_up(&buffer->queue);
477 return 0;
478 }
479
480 /* stop feed but only mark the specified filter as stopped (state set) */
dvb_dmxdev_feed_stop(struct dmxdev_filter * dmxdevfilter)481 static int dvb_dmxdev_feed_stop(struct dmxdev_filter *dmxdevfilter)
482 {
483 struct dmxdev_feed *feed;
484
485 dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_SET);
486
487 switch (dmxdevfilter->type) {
488 case DMXDEV_TYPE_SEC:
489 timer_delete(&dmxdevfilter->timer);
490 dmxdevfilter->feed.sec->stop_filtering(dmxdevfilter->feed.sec);
491 break;
492 case DMXDEV_TYPE_PES:
493 list_for_each_entry(feed, &dmxdevfilter->feed.ts, next)
494 feed->ts->stop_filtering(feed->ts);
495 break;
496 default:
497 return -EINVAL;
498 }
499 return 0;
500 }
501
502 /* start feed associated with the specified filter */
dvb_dmxdev_feed_start(struct dmxdev_filter * filter)503 static int dvb_dmxdev_feed_start(struct dmxdev_filter *filter)
504 {
505 struct dmxdev_feed *feed;
506 int ret;
507
508 dvb_dmxdev_filter_state_set(filter, DMXDEV_STATE_GO);
509
510 switch (filter->type) {
511 case DMXDEV_TYPE_SEC:
512 return filter->feed.sec->start_filtering(filter->feed.sec);
513 case DMXDEV_TYPE_PES:
514 list_for_each_entry(feed, &filter->feed.ts, next) {
515 ret = feed->ts->start_filtering(feed->ts);
516 if (ret < 0) {
517 dvb_dmxdev_feed_stop(filter);
518 return ret;
519 }
520 }
521 break;
522 default:
523 return -EINVAL;
524 }
525
526 return 0;
527 }
528
529 /* restart section feed if it has filters left associated with it,
530 otherwise release the feed */
dvb_dmxdev_feed_restart(struct dmxdev_filter * filter)531 static int dvb_dmxdev_feed_restart(struct dmxdev_filter *filter)
532 {
533 int i;
534 struct dmxdev *dmxdev = filter->dev;
535 u16 pid = filter->params.sec.pid;
536
537 for (i = 0; i < dmxdev->filternum; i++)
538 if (dmxdev->filter[i].state >= DMXDEV_STATE_GO &&
539 dmxdev->filter[i].type == DMXDEV_TYPE_SEC &&
540 dmxdev->filter[i].params.sec.pid == pid) {
541 dvb_dmxdev_feed_start(&dmxdev->filter[i]);
542 return 0;
543 }
544
545 filter->dev->demux->release_section_feed(dmxdev->demux,
546 filter->feed.sec);
547
548 return 0;
549 }
550
dvb_dmxdev_filter_stop(struct dmxdev_filter * dmxdevfilter)551 static int dvb_dmxdev_filter_stop(struct dmxdev_filter *dmxdevfilter)
552 {
553 struct dmxdev_feed *feed;
554 struct dmx_demux *demux;
555
556 if (dmxdevfilter->state < DMXDEV_STATE_GO)
557 return 0;
558
559 switch (dmxdevfilter->type) {
560 case DMXDEV_TYPE_SEC:
561 if (!dmxdevfilter->feed.sec)
562 break;
563 dvb_dmxdev_feed_stop(dmxdevfilter);
564 if (dmxdevfilter->filter.sec)
565 dmxdevfilter->feed.sec->
566 release_filter(dmxdevfilter->feed.sec,
567 dmxdevfilter->filter.sec);
568 dvb_dmxdev_feed_restart(dmxdevfilter);
569 dmxdevfilter->feed.sec = NULL;
570 break;
571 case DMXDEV_TYPE_PES:
572 dvb_dmxdev_feed_stop(dmxdevfilter);
573 demux = dmxdevfilter->dev->demux;
574 list_for_each_entry(feed, &dmxdevfilter->feed.ts, next) {
575 demux->release_ts_feed(demux, feed->ts);
576 feed->ts = NULL;
577 }
578 break;
579 default:
580 if (dmxdevfilter->state == DMXDEV_STATE_ALLOCATED)
581 return 0;
582 return -EINVAL;
583 }
584
585 dvb_ringbuffer_flush(&dmxdevfilter->buffer);
586 return 0;
587 }
588
dvb_dmxdev_delete_pids(struct dmxdev_filter * dmxdevfilter)589 static void dvb_dmxdev_delete_pids(struct dmxdev_filter *dmxdevfilter)
590 {
591 struct dmxdev_feed *feed, *tmp;
592
593 /* delete all PIDs */
594 list_for_each_entry_safe(feed, tmp, &dmxdevfilter->feed.ts, next) {
595 list_del(&feed->next);
596 kfree(feed);
597 }
598
599 BUG_ON(!list_empty(&dmxdevfilter->feed.ts));
600 }
601
dvb_dmxdev_filter_reset(struct dmxdev_filter * dmxdevfilter)602 static inline int dvb_dmxdev_filter_reset(struct dmxdev_filter *dmxdevfilter)
603 {
604 if (dmxdevfilter->state < DMXDEV_STATE_SET)
605 return 0;
606
607 if (dmxdevfilter->type == DMXDEV_TYPE_PES)
608 dvb_dmxdev_delete_pids(dmxdevfilter);
609
610 dmxdevfilter->type = DMXDEV_TYPE_NONE;
611 dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_ALLOCATED);
612 return 0;
613 }
614
dvb_dmxdev_start_feed(struct dmxdev * dmxdev,struct dmxdev_filter * filter,struct dmxdev_feed * feed)615 static int dvb_dmxdev_start_feed(struct dmxdev *dmxdev,
616 struct dmxdev_filter *filter,
617 struct dmxdev_feed *feed)
618 {
619 ktime_t timeout = ktime_set(0, 0);
620 struct dmx_pes_filter_params *para = &filter->params.pes;
621 enum dmx_output otype;
622 int ret;
623 int ts_type;
624 enum dmx_ts_pes ts_pes;
625 struct dmx_ts_feed *tsfeed;
626
627 feed->ts = NULL;
628 otype = para->output;
629
630 ts_pes = para->pes_type;
631
632 if (ts_pes < DMX_PES_OTHER)
633 ts_type = TS_DECODER;
634 else
635 ts_type = 0;
636
637 if (otype == DMX_OUT_TS_TAP)
638 ts_type |= TS_PACKET;
639 else if (otype == DMX_OUT_TSDEMUX_TAP)
640 ts_type |= TS_PACKET | TS_DEMUX;
641 else if (otype == DMX_OUT_TAP)
642 ts_type |= TS_PACKET | TS_DEMUX | TS_PAYLOAD_ONLY;
643
644 ret = dmxdev->demux->allocate_ts_feed(dmxdev->demux, &feed->ts,
645 dvb_dmxdev_ts_callback);
646 if (ret < 0)
647 return ret;
648
649 tsfeed = feed->ts;
650 tsfeed->priv = filter;
651
652 ret = tsfeed->set(tsfeed, feed->pid, ts_type, ts_pes, timeout);
653 if (ret < 0) {
654 dmxdev->demux->release_ts_feed(dmxdev->demux, tsfeed);
655 return ret;
656 }
657
658 ret = tsfeed->start_filtering(tsfeed);
659 if (ret < 0) {
660 dmxdev->demux->release_ts_feed(dmxdev->demux, tsfeed);
661 return ret;
662 }
663
664 return 0;
665 }
666
dvb_dmxdev_filter_start(struct dmxdev_filter * filter)667 static int dvb_dmxdev_filter_start(struct dmxdev_filter *filter)
668 {
669 struct dmxdev *dmxdev = filter->dev;
670 struct dmxdev_feed *feed;
671 void *mem;
672 int ret, i;
673
674 if (filter->state < DMXDEV_STATE_SET)
675 return -EINVAL;
676
677 if (filter->state >= DMXDEV_STATE_GO)
678 dvb_dmxdev_filter_stop(filter);
679
680 if (!filter->buffer.data) {
681 mem = vmalloc(filter->buffer.size);
682 if (!mem)
683 return -ENOMEM;
684 spin_lock_irq(&filter->dev->lock);
685 filter->buffer.data = mem;
686 spin_unlock_irq(&filter->dev->lock);
687 }
688
689 dvb_ringbuffer_flush(&filter->buffer);
690
691 switch (filter->type) {
692 case DMXDEV_TYPE_SEC:
693 {
694 struct dmx_sct_filter_params *para = &filter->params.sec;
695 struct dmx_section_filter **secfilter = &filter->filter.sec;
696 struct dmx_section_feed **secfeed = &filter->feed.sec;
697
698 *secfilter = NULL;
699 *secfeed = NULL;
700
701
702 /* find active filter/feed with same PID */
703 for (i = 0; i < dmxdev->filternum; i++) {
704 if (dmxdev->filter[i].state >= DMXDEV_STATE_GO &&
705 dmxdev->filter[i].type == DMXDEV_TYPE_SEC &&
706 dmxdev->filter[i].params.sec.pid == para->pid) {
707 *secfeed = dmxdev->filter[i].feed.sec;
708 break;
709 }
710 }
711
712 /* if no feed found, try to allocate new one */
713 if (!*secfeed) {
714 ret = dmxdev->demux->allocate_section_feed(dmxdev->demux,
715 secfeed,
716 dvb_dmxdev_section_callback);
717 if (!*secfeed) {
718 pr_err("DVB (%s): could not alloc feed\n",
719 __func__);
720 return ret;
721 }
722
723 ret = (*secfeed)->set(*secfeed, para->pid,
724 (para->flags & DMX_CHECK_CRC) ? 1 : 0);
725 if (ret < 0) {
726 pr_err("DVB (%s): could not set feed\n",
727 __func__);
728 dvb_dmxdev_feed_restart(filter);
729 return ret;
730 }
731 } else {
732 dvb_dmxdev_feed_stop(filter);
733 }
734
735 ret = (*secfeed)->allocate_filter(*secfeed, secfilter);
736 if (ret < 0) {
737 dvb_dmxdev_feed_restart(filter);
738 *secfeed = NULL;
739 dprintk("could not get filter\n");
740 return ret;
741 }
742
743 (*secfilter)->priv = filter;
744
745 memcpy(&((*secfilter)->filter_value[3]),
746 &(para->filter.filter[1]), DMX_FILTER_SIZE - 1);
747 memcpy(&(*secfilter)->filter_mask[3],
748 ¶->filter.mask[1], DMX_FILTER_SIZE - 1);
749 memcpy(&(*secfilter)->filter_mode[3],
750 ¶->filter.mode[1], DMX_FILTER_SIZE - 1);
751
752 (*secfilter)->filter_value[0] = para->filter.filter[0];
753 (*secfilter)->filter_mask[0] = para->filter.mask[0];
754 (*secfilter)->filter_mode[0] = para->filter.mode[0];
755 (*secfilter)->filter_mask[1] = 0;
756 (*secfilter)->filter_mask[2] = 0;
757
758 filter->todo = 0;
759
760 ret = filter->feed.sec->start_filtering(filter->feed.sec);
761 if (ret < 0)
762 return ret;
763
764 dvb_dmxdev_filter_timer(filter);
765 break;
766 }
767 case DMXDEV_TYPE_PES:
768 list_for_each_entry(feed, &filter->feed.ts, next) {
769 ret = dvb_dmxdev_start_feed(dmxdev, filter, feed);
770 if (ret < 0) {
771 dvb_dmxdev_filter_stop(filter);
772 return ret;
773 }
774 }
775 break;
776 default:
777 return -EINVAL;
778 }
779
780 dvb_dmxdev_filter_state_set(filter, DMXDEV_STATE_GO);
781 return 0;
782 }
783
dvb_demux_open(struct inode * inode,struct file * file)784 static int dvb_demux_open(struct inode *inode, struct file *file)
785 {
786 struct dvb_device *dvbdev = file->private_data;
787 struct dmxdev *dmxdev = dvbdev->priv;
788 int i;
789 struct dmxdev_filter *dmxdevfilter;
790
791 if (!dmxdev->filter)
792 return -EINVAL;
793
794 if (mutex_lock_interruptible(&dmxdev->mutex))
795 return -ERESTARTSYS;
796
797 if (dmxdev->exit) {
798 mutex_unlock(&dmxdev->mutex);
799 return -ENODEV;
800 }
801
802 for (i = 0; i < dmxdev->filternum; i++)
803 if (dmxdev->filter[i].state == DMXDEV_STATE_FREE)
804 break;
805
806 if (i == dmxdev->filternum) {
807 mutex_unlock(&dmxdev->mutex);
808 return -EMFILE;
809 }
810
811 dmxdevfilter = &dmxdev->filter[i];
812 mutex_init(&dmxdevfilter->mutex);
813 file->private_data = dmxdevfilter;
814
815 #ifdef CONFIG_DVB_MMAP
816 dmxdev->may_do_mmap = 1;
817 #else
818 dmxdev->may_do_mmap = 0;
819 #endif
820
821 /*
822 * The mutex passed to dvb_vb2_init is unlocked when a buffer
823 * is in a blocking wait. However, dmxdevfilter has two mutexes:
824 * dmxdevfilter->mutex and dmxdev->mutex. So this will not work.
825 * The solution would be to support unlocking two mutexes in vb2,
826 * but since this problem has been here since the beginning and
827 * nobody ever complained, we leave it as-is rather than adding
828 * that second mutex pointer to vb2.
829 *
830 * In the unlikely event that someone complains about this, then
831 * this comment will hopefully help.
832 */
833 dvb_ringbuffer_init(&dmxdevfilter->buffer, NULL, 8192);
834 dvb_vb2_init(&dmxdevfilter->vb2_ctx, "demux_filter",
835 &dmxdevfilter->mutex, file->f_flags & O_NONBLOCK);
836 dmxdevfilter->type = DMXDEV_TYPE_NONE;
837 dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_ALLOCATED);
838 timer_setup(&dmxdevfilter->timer, dvb_dmxdev_filter_timeout, 0);
839
840 dvbdev->users++;
841
842 mutex_unlock(&dmxdev->mutex);
843 return 0;
844 }
845
dvb_dmxdev_filter_free(struct dmxdev * dmxdev,struct dmxdev_filter * dmxdevfilter)846 static int dvb_dmxdev_filter_free(struct dmxdev *dmxdev,
847 struct dmxdev_filter *dmxdevfilter)
848 {
849 mutex_lock(&dmxdev->mutex);
850 mutex_lock(&dmxdevfilter->mutex);
851 if (dvb_vb2_is_streaming(&dmxdevfilter->vb2_ctx))
852 dvb_vb2_stream_off(&dmxdevfilter->vb2_ctx);
853 dvb_vb2_release(&dmxdevfilter->vb2_ctx);
854
855
856 dvb_dmxdev_filter_stop(dmxdevfilter);
857 dvb_dmxdev_filter_reset(dmxdevfilter);
858
859 if (dmxdevfilter->buffer.data) {
860 void *mem = dmxdevfilter->buffer.data;
861
862 spin_lock_irq(&dmxdev->lock);
863 dmxdevfilter->buffer.data = NULL;
864 spin_unlock_irq(&dmxdev->lock);
865 vfree(mem);
866 }
867
868 dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_FREE);
869 wake_up(&dmxdevfilter->buffer.queue);
870 mutex_unlock(&dmxdevfilter->mutex);
871 mutex_unlock(&dmxdev->mutex);
872 return 0;
873 }
874
invert_mode(struct dmx_filter * filter)875 static inline void invert_mode(struct dmx_filter *filter)
876 {
877 int i;
878
879 for (i = 0; i < DMX_FILTER_SIZE; i++)
880 filter->mode[i] ^= 0xff;
881 }
882
dvb_dmxdev_add_pid(struct dmxdev * dmxdev,struct dmxdev_filter * filter,u16 pid)883 static int dvb_dmxdev_add_pid(struct dmxdev *dmxdev,
884 struct dmxdev_filter *filter, u16 pid)
885 {
886 struct dmxdev_feed *feed;
887
888 if ((filter->type != DMXDEV_TYPE_PES) ||
889 (filter->state < DMXDEV_STATE_SET))
890 return -EINVAL;
891
892 /* only TS packet filters may have multiple PIDs */
893 if ((filter->params.pes.output != DMX_OUT_TSDEMUX_TAP) &&
894 (!list_empty(&filter->feed.ts)))
895 return -EINVAL;
896
897 feed = kzalloc_obj(struct dmxdev_feed);
898 if (feed == NULL)
899 return -ENOMEM;
900
901 feed->pid = pid;
902 list_add(&feed->next, &filter->feed.ts);
903
904 if (filter->state >= DMXDEV_STATE_GO)
905 return dvb_dmxdev_start_feed(dmxdev, filter, feed);
906
907 return 0;
908 }
909
dvb_dmxdev_remove_pid(struct dmxdev * dmxdev,struct dmxdev_filter * filter,u16 pid)910 static int dvb_dmxdev_remove_pid(struct dmxdev *dmxdev,
911 struct dmxdev_filter *filter, u16 pid)
912 {
913 struct dmxdev_feed *feed, *tmp;
914
915 if ((filter->type != DMXDEV_TYPE_PES) ||
916 (filter->state < DMXDEV_STATE_SET))
917 return -EINVAL;
918
919 list_for_each_entry_safe(feed, tmp, &filter->feed.ts, next) {
920 if ((feed->pid == pid) && (feed->ts != NULL)) {
921 feed->ts->stop_filtering(feed->ts);
922 filter->dev->demux->release_ts_feed(filter->dev->demux,
923 feed->ts);
924 list_del(&feed->next);
925 kfree(feed);
926 }
927 }
928
929 return 0;
930 }
931
dvb_dmxdev_filter_set(struct dmxdev * dmxdev,struct dmxdev_filter * dmxdevfilter,struct dmx_sct_filter_params * params)932 static int dvb_dmxdev_filter_set(struct dmxdev *dmxdev,
933 struct dmxdev_filter *dmxdevfilter,
934 struct dmx_sct_filter_params *params)
935 {
936 dprintk("%s: PID=0x%04x, flags=%02x, timeout=%d\n",
937 __func__, params->pid, params->flags, params->timeout);
938
939 dvb_dmxdev_filter_stop(dmxdevfilter);
940
941 dmxdevfilter->type = DMXDEV_TYPE_SEC;
942 memcpy(&dmxdevfilter->params.sec,
943 params, sizeof(struct dmx_sct_filter_params));
944 invert_mode(&dmxdevfilter->params.sec.filter);
945 dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_SET);
946
947 if (params->flags & DMX_IMMEDIATE_START)
948 return dvb_dmxdev_filter_start(dmxdevfilter);
949
950 return 0;
951 }
952
dvb_dmxdev_pes_filter_set(struct dmxdev * dmxdev,struct dmxdev_filter * dmxdevfilter,struct dmx_pes_filter_params * params)953 static int dvb_dmxdev_pes_filter_set(struct dmxdev *dmxdev,
954 struct dmxdev_filter *dmxdevfilter,
955 struct dmx_pes_filter_params *params)
956 {
957 int ret;
958
959 dvb_dmxdev_filter_stop(dmxdevfilter);
960 dvb_dmxdev_filter_reset(dmxdevfilter);
961
962 if ((unsigned int)params->pes_type > DMX_PES_OTHER)
963 return -EINVAL;
964
965 dmxdevfilter->type = DMXDEV_TYPE_PES;
966 memcpy(&dmxdevfilter->params, params,
967 sizeof(struct dmx_pes_filter_params));
968 INIT_LIST_HEAD(&dmxdevfilter->feed.ts);
969
970 dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_SET);
971
972 ret = dvb_dmxdev_add_pid(dmxdev, dmxdevfilter,
973 dmxdevfilter->params.pes.pid);
974 if (ret < 0)
975 return ret;
976
977 if (params->flags & DMX_IMMEDIATE_START)
978 return dvb_dmxdev_filter_start(dmxdevfilter);
979
980 return 0;
981 }
982
dvb_dmxdev_read_sec(struct dmxdev_filter * dfil,struct file * file,char __user * buf,size_t count,loff_t * ppos)983 static ssize_t dvb_dmxdev_read_sec(struct dmxdev_filter *dfil,
984 struct file *file, char __user *buf,
985 size_t count, loff_t *ppos)
986 {
987 int result, hcount;
988 int done = 0;
989
990 if (dfil->todo <= 0) {
991 hcount = 3 + dfil->todo;
992 if (hcount > count)
993 hcount = count;
994 result = dvb_dmxdev_buffer_read(&dfil->buffer,
995 file->f_flags & O_NONBLOCK,
996 buf, hcount, ppos);
997 if (result < 0) {
998 dfil->todo = 0;
999 return result;
1000 }
1001 if (copy_from_user(dfil->secheader - dfil->todo, buf, result))
1002 return -EFAULT;
1003 buf += result;
1004 done = result;
1005 count -= result;
1006 dfil->todo -= result;
1007 if (dfil->todo > -3)
1008 return done;
1009 dfil->todo = ((dfil->secheader[1] << 8) | dfil->secheader[2]) & 0xfff;
1010 if (!count)
1011 return done;
1012 }
1013 if (count > dfil->todo)
1014 count = dfil->todo;
1015 result = dvb_dmxdev_buffer_read(&dfil->buffer,
1016 file->f_flags & O_NONBLOCK,
1017 buf, count, ppos);
1018 if (result < 0)
1019 return result;
1020 dfil->todo -= result;
1021 return (result + done);
1022 }
1023
1024 static ssize_t
dvb_demux_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)1025 dvb_demux_read(struct file *file, char __user *buf, size_t count,
1026 loff_t *ppos)
1027 {
1028 struct dmxdev_filter *dmxdevfilter = file->private_data;
1029 int ret;
1030
1031 if (mutex_lock_interruptible(&dmxdevfilter->mutex))
1032 return -ERESTARTSYS;
1033
1034 if (dmxdevfilter->type == DMXDEV_TYPE_SEC)
1035 ret = dvb_dmxdev_read_sec(dmxdevfilter, file, buf, count, ppos);
1036 else
1037 ret = dvb_dmxdev_buffer_read(&dmxdevfilter->buffer,
1038 file->f_flags & O_NONBLOCK,
1039 buf, count, ppos);
1040
1041 mutex_unlock(&dmxdevfilter->mutex);
1042 return ret;
1043 }
1044
dvb_demux_do_ioctl(struct file * file,unsigned int cmd,void * parg)1045 static int dvb_demux_do_ioctl(struct file *file,
1046 unsigned int cmd, void *parg)
1047 {
1048 struct dmxdev_filter *dmxdevfilter = file->private_data;
1049 struct dmxdev *dmxdev = dmxdevfilter->dev;
1050 unsigned long arg = (unsigned long)parg;
1051 int ret = 0;
1052
1053 if (mutex_lock_interruptible(&dmxdev->mutex))
1054 return -ERESTARTSYS;
1055
1056 switch (cmd) {
1057 case DMX_START:
1058 if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
1059 mutex_unlock(&dmxdev->mutex);
1060 return -ERESTARTSYS;
1061 }
1062 if (dmxdevfilter->state < DMXDEV_STATE_SET)
1063 ret = -EINVAL;
1064 else
1065 ret = dvb_dmxdev_filter_start(dmxdevfilter);
1066 mutex_unlock(&dmxdevfilter->mutex);
1067 break;
1068
1069 case DMX_STOP:
1070 if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
1071 mutex_unlock(&dmxdev->mutex);
1072 return -ERESTARTSYS;
1073 }
1074 ret = dvb_dmxdev_filter_stop(dmxdevfilter);
1075 mutex_unlock(&dmxdevfilter->mutex);
1076 break;
1077
1078 case DMX_SET_FILTER:
1079 if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
1080 mutex_unlock(&dmxdev->mutex);
1081 return -ERESTARTSYS;
1082 }
1083 ret = dvb_dmxdev_filter_set(dmxdev, dmxdevfilter, parg);
1084 mutex_unlock(&dmxdevfilter->mutex);
1085 break;
1086
1087 case DMX_SET_PES_FILTER:
1088 if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
1089 mutex_unlock(&dmxdev->mutex);
1090 return -ERESTARTSYS;
1091 }
1092 ret = dvb_dmxdev_pes_filter_set(dmxdev, dmxdevfilter, parg);
1093 mutex_unlock(&dmxdevfilter->mutex);
1094 break;
1095
1096 case DMX_SET_BUFFER_SIZE:
1097 if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
1098 mutex_unlock(&dmxdev->mutex);
1099 return -ERESTARTSYS;
1100 }
1101 ret = dvb_dmxdev_set_buffer_size(dmxdevfilter, arg);
1102 mutex_unlock(&dmxdevfilter->mutex);
1103 break;
1104
1105 case DMX_GET_PES_PIDS:
1106 if (!dmxdev->demux->get_pes_pids) {
1107 ret = -EINVAL;
1108 break;
1109 }
1110 dmxdev->demux->get_pes_pids(dmxdev->demux, parg);
1111 break;
1112
1113 case DMX_GET_STC:
1114 if (!dmxdev->demux->get_stc) {
1115 ret = -EINVAL;
1116 break;
1117 }
1118 ret = dmxdev->demux->get_stc(dmxdev->demux,
1119 ((struct dmx_stc *)parg)->num,
1120 &((struct dmx_stc *)parg)->stc,
1121 &((struct dmx_stc *)parg)->base);
1122 break;
1123
1124 case DMX_ADD_PID:
1125 if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
1126 ret = -ERESTARTSYS;
1127 break;
1128 }
1129 ret = dvb_dmxdev_add_pid(dmxdev, dmxdevfilter, *(u16 *)parg);
1130 mutex_unlock(&dmxdevfilter->mutex);
1131 break;
1132
1133 case DMX_REMOVE_PID:
1134 if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
1135 ret = -ERESTARTSYS;
1136 break;
1137 }
1138 ret = dvb_dmxdev_remove_pid(dmxdev, dmxdevfilter, *(u16 *)parg);
1139 mutex_unlock(&dmxdevfilter->mutex);
1140 break;
1141
1142 #ifdef CONFIG_DVB_MMAP
1143 case DMX_REQBUFS:
1144 if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
1145 mutex_unlock(&dmxdev->mutex);
1146 return -ERESTARTSYS;
1147 }
1148 ret = dvb_vb2_reqbufs(&dmxdevfilter->vb2_ctx, parg);
1149 mutex_unlock(&dmxdevfilter->mutex);
1150 break;
1151
1152 case DMX_QUERYBUF:
1153 if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
1154 mutex_unlock(&dmxdev->mutex);
1155 return -ERESTARTSYS;
1156 }
1157 ret = dvb_vb2_querybuf(&dmxdevfilter->vb2_ctx, parg);
1158 mutex_unlock(&dmxdevfilter->mutex);
1159 break;
1160
1161 case DMX_EXPBUF:
1162 if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
1163 mutex_unlock(&dmxdev->mutex);
1164 return -ERESTARTSYS;
1165 }
1166 ret = dvb_vb2_expbuf(&dmxdevfilter->vb2_ctx, parg);
1167 mutex_unlock(&dmxdevfilter->mutex);
1168 break;
1169
1170 case DMX_QBUF:
1171 if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
1172 mutex_unlock(&dmxdev->mutex);
1173 return -ERESTARTSYS;
1174 }
1175 ret = dvb_vb2_qbuf(&dmxdevfilter->vb2_ctx, parg);
1176 if (ret == 0 && !dvb_vb2_is_streaming(&dmxdevfilter->vb2_ctx))
1177 ret = dvb_vb2_stream_on(&dmxdevfilter->vb2_ctx);
1178 mutex_unlock(&dmxdevfilter->mutex);
1179 break;
1180
1181 case DMX_DQBUF:
1182 if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
1183 mutex_unlock(&dmxdev->mutex);
1184 return -ERESTARTSYS;
1185 }
1186 ret = dvb_vb2_dqbuf(&dmxdevfilter->vb2_ctx, parg);
1187 mutex_unlock(&dmxdevfilter->mutex);
1188 break;
1189 #endif
1190 default:
1191 ret = -ENOTTY;
1192 break;
1193 }
1194 mutex_unlock(&dmxdev->mutex);
1195 return ret;
1196 }
1197
dvb_demux_ioctl(struct file * file,unsigned int cmd,unsigned long arg)1198 static long dvb_demux_ioctl(struct file *file, unsigned int cmd,
1199 unsigned long arg)
1200 {
1201 return dvb_usercopy(file, cmd, arg, dvb_demux_do_ioctl);
1202 }
1203
dvb_demux_poll(struct file * file,poll_table * wait)1204 static __poll_t dvb_demux_poll(struct file *file, poll_table *wait)
1205 {
1206 struct dmxdev_filter *dmxdevfilter = file->private_data;
1207 __poll_t mask = 0;
1208
1209 poll_wait(file, &dmxdevfilter->buffer.queue, wait);
1210
1211 if ((!dmxdevfilter) || dmxdevfilter->dev->exit)
1212 return EPOLLERR;
1213 if (dvb_vb2_is_streaming(&dmxdevfilter->vb2_ctx))
1214 return dvb_vb2_poll(&dmxdevfilter->vb2_ctx, file, wait);
1215
1216 if (dmxdevfilter->state != DMXDEV_STATE_GO &&
1217 dmxdevfilter->state != DMXDEV_STATE_DONE &&
1218 dmxdevfilter->state != DMXDEV_STATE_TIMEDOUT)
1219 return 0;
1220
1221 if (dmxdevfilter->buffer.error)
1222 mask |= (EPOLLIN | EPOLLRDNORM | EPOLLPRI | EPOLLERR);
1223
1224 if (!dvb_ringbuffer_empty(&dmxdevfilter->buffer))
1225 mask |= (EPOLLIN | EPOLLRDNORM | EPOLLPRI);
1226
1227 return mask;
1228 }
1229
1230 #ifdef CONFIG_DVB_MMAP
dvb_demux_mmap(struct file * file,struct vm_area_struct * vma)1231 static int dvb_demux_mmap(struct file *file, struct vm_area_struct *vma)
1232 {
1233 struct dmxdev_filter *dmxdevfilter = file->private_data;
1234 struct dmxdev *dmxdev = dmxdevfilter->dev;
1235
1236 if (!dmxdev->may_do_mmap)
1237 return -ENOTTY;
1238
1239 return dvb_vb2_mmap(&dmxdevfilter->vb2_ctx, vma);
1240 }
1241 #endif
1242
dvb_demux_release(struct inode * inode,struct file * file)1243 static int dvb_demux_release(struct inode *inode, struct file *file)
1244 {
1245 struct dmxdev_filter *dmxdevfilter = file->private_data;
1246 struct dmxdev *dmxdev = dmxdevfilter->dev;
1247 int ret;
1248
1249 ret = dvb_dmxdev_filter_free(dmxdev, dmxdevfilter);
1250
1251 mutex_lock(&dmxdev->mutex);
1252 dmxdev->dvbdev->users--;
1253 if (dmxdev->dvbdev->users == 1 && dmxdev->exit == 1) {
1254 mutex_unlock(&dmxdev->mutex);
1255 wake_up(&dmxdev->dvbdev->wait_queue);
1256 } else
1257 mutex_unlock(&dmxdev->mutex);
1258
1259 return ret;
1260 }
1261
1262 static const struct file_operations dvb_demux_fops = {
1263 .owner = THIS_MODULE,
1264 .read = dvb_demux_read,
1265 .unlocked_ioctl = dvb_demux_ioctl,
1266 .compat_ioctl = dvb_demux_ioctl,
1267 .open = dvb_demux_open,
1268 .release = dvb_demux_release,
1269 .poll = dvb_demux_poll,
1270 .llseek = default_llseek,
1271 #ifdef CONFIG_DVB_MMAP
1272 .mmap = dvb_demux_mmap,
1273 #endif
1274 };
1275
1276 static const struct dvb_device dvbdev_demux = {
1277 .priv = NULL,
1278 .users = 1,
1279 .writers = 1,
1280 #if defined(CONFIG_MEDIA_CONTROLLER_DVB)
1281 .name = "dvb-demux",
1282 #endif
1283 .fops = &dvb_demux_fops
1284 };
1285
dvb_dvr_do_ioctl(struct file * file,unsigned int cmd,void * parg)1286 static int dvb_dvr_do_ioctl(struct file *file,
1287 unsigned int cmd, void *parg)
1288 {
1289 struct dvb_device *dvbdev = file->private_data;
1290 struct dmxdev *dmxdev = dvbdev->priv;
1291 unsigned long arg = (unsigned long)parg;
1292 int ret;
1293
1294 if (mutex_lock_interruptible(&dmxdev->mutex))
1295 return -ERESTARTSYS;
1296
1297 switch (cmd) {
1298 case DMX_SET_BUFFER_SIZE:
1299 ret = dvb_dvr_set_buffer_size(dmxdev, arg);
1300 break;
1301
1302 #ifdef CONFIG_DVB_MMAP
1303 case DMX_REQBUFS:
1304 ret = dvb_vb2_reqbufs(&dmxdev->dvr_vb2_ctx, parg);
1305 break;
1306
1307 case DMX_QUERYBUF:
1308 ret = dvb_vb2_querybuf(&dmxdev->dvr_vb2_ctx, parg);
1309 break;
1310
1311 case DMX_EXPBUF:
1312 ret = dvb_vb2_expbuf(&dmxdev->dvr_vb2_ctx, parg);
1313 break;
1314
1315 case DMX_QBUF:
1316 ret = dvb_vb2_qbuf(&dmxdev->dvr_vb2_ctx, parg);
1317 if (ret == 0 && !dvb_vb2_is_streaming(&dmxdev->dvr_vb2_ctx))
1318 ret = dvb_vb2_stream_on(&dmxdev->dvr_vb2_ctx);
1319 break;
1320
1321 case DMX_DQBUF:
1322 ret = dvb_vb2_dqbuf(&dmxdev->dvr_vb2_ctx, parg);
1323 break;
1324 #endif
1325 default:
1326 ret = -ENOTTY;
1327 break;
1328 }
1329 mutex_unlock(&dmxdev->mutex);
1330 return ret;
1331 }
1332
dvb_dvr_ioctl(struct file * file,unsigned int cmd,unsigned long arg)1333 static long dvb_dvr_ioctl(struct file *file,
1334 unsigned int cmd, unsigned long arg)
1335 {
1336 return dvb_usercopy(file, cmd, arg, dvb_dvr_do_ioctl);
1337 }
1338
dvb_dvr_poll(struct file * file,poll_table * wait)1339 static __poll_t dvb_dvr_poll(struct file *file, poll_table *wait)
1340 {
1341 struct dvb_device *dvbdev = file->private_data;
1342 struct dmxdev *dmxdev = dvbdev->priv;
1343 __poll_t mask = 0;
1344
1345 dprintk("%s\n", __func__);
1346
1347 poll_wait(file, &dmxdev->dvr_buffer.queue, wait);
1348
1349 if (dmxdev->exit)
1350 return EPOLLERR;
1351 if (dvb_vb2_is_streaming(&dmxdev->dvr_vb2_ctx))
1352 return dvb_vb2_poll(&dmxdev->dvr_vb2_ctx, file, wait);
1353
1354 if (((file->f_flags & O_ACCMODE) == O_RDONLY) ||
1355 dmxdev->may_do_mmap) {
1356 if (dmxdev->dvr_buffer.error)
1357 mask |= (EPOLLIN | EPOLLRDNORM | EPOLLPRI | EPOLLERR);
1358
1359 if (!dvb_ringbuffer_empty(&dmxdev->dvr_buffer))
1360 mask |= (EPOLLIN | EPOLLRDNORM | EPOLLPRI);
1361 } else
1362 mask |= (EPOLLOUT | EPOLLWRNORM | EPOLLPRI);
1363
1364 return mask;
1365 }
1366
1367 #ifdef CONFIG_DVB_MMAP
dvb_dvr_mmap(struct file * file,struct vm_area_struct * vma)1368 static int dvb_dvr_mmap(struct file *file, struct vm_area_struct *vma)
1369 {
1370 struct dvb_device *dvbdev = file->private_data;
1371 struct dmxdev *dmxdev = dvbdev->priv;
1372
1373 if (!dmxdev->may_do_mmap)
1374 return -ENOTTY;
1375
1376 if (dmxdev->exit)
1377 return -ENODEV;
1378
1379 return dvb_vb2_mmap(&dmxdev->dvr_vb2_ctx, vma);
1380 }
1381 #endif
1382
1383 static const struct file_operations dvb_dvr_fops = {
1384 .owner = THIS_MODULE,
1385 .read = dvb_dvr_read,
1386 .write = dvb_dvr_write,
1387 .unlocked_ioctl = dvb_dvr_ioctl,
1388 .open = dvb_dvr_open,
1389 .release = dvb_dvr_release,
1390 .poll = dvb_dvr_poll,
1391 .llseek = default_llseek,
1392 #ifdef CONFIG_DVB_MMAP
1393 .mmap = dvb_dvr_mmap,
1394 #endif
1395 };
1396
1397 static const struct dvb_device dvbdev_dvr = {
1398 .priv = NULL,
1399 .readers = 1,
1400 .users = 1,
1401 #if defined(CONFIG_MEDIA_CONTROLLER_DVB)
1402 .name = "dvb-dvr",
1403 #endif
1404 .fops = &dvb_dvr_fops
1405 };
dvb_dmxdev_init(struct dmxdev * dmxdev,struct dvb_adapter * dvb_adapter)1406 int dvb_dmxdev_init(struct dmxdev *dmxdev, struct dvb_adapter *dvb_adapter)
1407 {
1408 int i, ret;
1409
1410 if (dmxdev->demux->open(dmxdev->demux) < 0)
1411 return -EUSERS;
1412
1413 dmxdev->filter = vmalloc_array(dmxdev->filternum,
1414 sizeof(struct dmxdev_filter));
1415 if (!dmxdev->filter)
1416 return -ENOMEM;
1417
1418 mutex_init(&dmxdev->mutex);
1419 spin_lock_init(&dmxdev->lock);
1420 for (i = 0; i < dmxdev->filternum; i++) {
1421 dmxdev->filter[i].dev = dmxdev;
1422 dmxdev->filter[i].buffer.data = NULL;
1423 dvb_dmxdev_filter_state_set(&dmxdev->filter[i],
1424 DMXDEV_STATE_FREE);
1425 }
1426
1427 ret = dvb_register_device(dvb_adapter, &dmxdev->dvbdev, &dvbdev_demux, dmxdev,
1428 DVB_DEVICE_DEMUX, dmxdev->filternum);
1429 if (ret < 0)
1430 goto err_register_dvbdev;
1431
1432 ret = dvb_register_device(dvb_adapter, &dmxdev->dvr_dvbdev, &dvbdev_dvr,
1433 dmxdev, DVB_DEVICE_DVR, dmxdev->filternum);
1434 if (ret < 0)
1435 goto err_register_dvr_dvbdev;
1436
1437 dvb_ringbuffer_init(&dmxdev->dvr_buffer, NULL, 8192);
1438
1439 return 0;
1440
1441 err_register_dvr_dvbdev:
1442 dvb_unregister_device(dmxdev->dvbdev);
1443 err_register_dvbdev:
1444 vfree(dmxdev->filter);
1445 dmxdev->filter = NULL;
1446 return ret;
1447 }
1448
1449 EXPORT_SYMBOL(dvb_dmxdev_init);
1450
dvb_dmxdev_release(struct dmxdev * dmxdev)1451 void dvb_dmxdev_release(struct dmxdev *dmxdev)
1452 {
1453 mutex_lock(&dmxdev->mutex);
1454 dmxdev->exit = 1;
1455 mutex_unlock(&dmxdev->mutex);
1456
1457 if (dmxdev->dvbdev->users > 1) {
1458 wait_event(dmxdev->dvbdev->wait_queue,
1459 dmxdev->dvbdev->users == 1);
1460 }
1461 if (dmxdev->dvr_dvbdev->users > 1) {
1462 wait_event(dmxdev->dvr_dvbdev->wait_queue,
1463 dmxdev->dvr_dvbdev->users == 1);
1464 }
1465
1466 dvb_unregister_device(dmxdev->dvbdev);
1467 dvb_unregister_device(dmxdev->dvr_dvbdev);
1468
1469 vfree(dmxdev->filter);
1470 dmxdev->filter = NULL;
1471 dmxdev->demux->close(dmxdev->demux);
1472 }
1473
1474 EXPORT_SYMBOL(dvb_dmxdev_release);
1475