1 // SPDX-License-Identifier: LGPL-2.1-or-later 2 /* 3 * dmxdev.c - DVB demultiplexer device 4 * 5 * Copyright (C) 2000 Ralph Metzler & Marcus Metzler 6 * for convergence integrated media GmbH 7 */ 8 9 #define pr_fmt(fmt) "dmxdev: " fmt 10 11 #include <linux/sched.h> 12 #include <linux/spinlock.h> 13 #include <linux/slab.h> 14 #include <linux/vmalloc.h> 15 #include <linux/module.h> 16 #include <linux/poll.h> 17 #include <linux/ioctl.h> 18 #include <linux/wait.h> 19 #include <linux/uaccess.h> 20 #include <media/dmxdev.h> 21 #include <media/dvb_vb2.h> 22 23 static int debug; 24 25 module_param(debug, int, 0644); 26 MODULE_PARM_DESC(debug, "Turn on/off debugging (default:off)."); 27 28 #define dprintk(fmt, arg...) do { \ 29 if (debug) \ 30 printk(KERN_DEBUG pr_fmt("%s: " fmt), \ 31 __func__, ##arg); \ 32 } while (0) 33 34 static int dvb_dmxdev_buffer_write(struct dvb_ringbuffer *buf, 35 const u8 *src, size_t len) 36 { 37 ssize_t free; 38 39 if (!len) 40 return 0; 41 if (!buf->data) 42 return 0; 43 44 free = dvb_ringbuffer_free(buf); 45 if (len > free) { 46 dprintk("buffer overflow\n"); 47 return -EOVERFLOW; 48 } 49 50 return dvb_ringbuffer_write(buf, src, len); 51 } 52 53 static ssize_t dvb_dmxdev_buffer_read(struct dvb_ringbuffer *src, 54 int non_blocking, char __user *buf, 55 size_t count, loff_t *ppos) 56 { 57 size_t todo; 58 ssize_t avail; 59 ssize_t ret = 0; 60 61 if (!src->data) 62 return 0; 63 64 if (src->error) { 65 ret = src->error; 66 dvb_ringbuffer_flush(src); 67 return ret; 68 } 69 70 for (todo = count; todo > 0; todo -= ret) { 71 if (non_blocking && dvb_ringbuffer_empty(src)) { 72 ret = -EWOULDBLOCK; 73 break; 74 } 75 76 ret = wait_event_interruptible(src->queue, 77 !dvb_ringbuffer_empty(src) || 78 (src->error != 0)); 79 if (ret < 0) 80 break; 81 82 if (src->error) { 83 ret = src->error; 84 dvb_ringbuffer_flush(src); 85 break; 86 } 87 88 avail = dvb_ringbuffer_avail(src); 89 if (avail > todo) 90 avail = todo; 91 92 ret = dvb_ringbuffer_read_user(src, buf, avail); 93 if (ret < 0) 94 break; 95 96 buf += ret; 97 } 98 99 return (count - todo) ? (count - todo) : ret; 100 } 101 102 static struct dmx_frontend *get_fe(struct dmx_demux *demux, int type) 103 { 104 struct list_head *head, *pos; 105 106 head = demux->get_frontends(demux); 107 if (!head) 108 return NULL; 109 list_for_each(pos, head) 110 if (DMX_FE_ENTRY(pos)->source == type) 111 return DMX_FE_ENTRY(pos); 112 113 return NULL; 114 } 115 116 static int dvb_dvr_open(struct inode *inode, struct file *file) 117 { 118 struct dvb_device *dvbdev = file->private_data; 119 struct dmxdev *dmxdev = dvbdev->priv; 120 struct dmx_frontend *front; 121 bool need_ringbuffer = false; 122 123 dprintk("%s\n", __func__); 124 125 if (mutex_lock_interruptible(&dmxdev->mutex)) 126 return -ERESTARTSYS; 127 128 if (dmxdev->exit) { 129 mutex_unlock(&dmxdev->mutex); 130 return -ENODEV; 131 } 132 133 dmxdev->may_do_mmap = 0; 134 135 /* 136 * The logic here is a little tricky due to the ifdef. 137 * 138 * The ringbuffer is used for both read and mmap. 139 * 140 * It is not needed, however, on two situations: 141 * - Write devices (access with O_WRONLY); 142 * - For duplex device nodes, opened with O_RDWR. 143 */ 144 145 if ((file->f_flags & O_ACCMODE) == O_RDONLY) 146 need_ringbuffer = true; 147 else if ((file->f_flags & O_ACCMODE) == O_RDWR) { 148 if (!(dmxdev->capabilities & DMXDEV_CAP_DUPLEX)) { 149 #ifdef CONFIG_DVB_MMAP 150 dmxdev->may_do_mmap = 1; 151 need_ringbuffer = true; 152 #else 153 mutex_unlock(&dmxdev->mutex); 154 return -EOPNOTSUPP; 155 #endif 156 } 157 } 158 159 if (need_ringbuffer) { 160 void *mem; 161 162 if (!dvbdev->readers) { 163 mutex_unlock(&dmxdev->mutex); 164 return -EBUSY; 165 } 166 mem = vmalloc(DVR_BUFFER_SIZE); 167 if (!mem) { 168 mutex_unlock(&dmxdev->mutex); 169 return -ENOMEM; 170 } 171 dvb_ringbuffer_init(&dmxdev->dvr_buffer, mem, DVR_BUFFER_SIZE); 172 if (dmxdev->may_do_mmap) 173 dvb_vb2_init(&dmxdev->dvr_vb2_ctx, "dvr", 174 &dmxdev->mutex, 175 file->f_flags & O_NONBLOCK); 176 dvbdev->readers--; 177 } 178 179 if ((file->f_flags & O_ACCMODE) == O_WRONLY) { 180 dmxdev->dvr_orig_fe = dmxdev->demux->frontend; 181 182 if (!dmxdev->demux->write) { 183 mutex_unlock(&dmxdev->mutex); 184 return -EOPNOTSUPP; 185 } 186 187 front = get_fe(dmxdev->demux, DMX_MEMORY_FE); 188 189 if (!front) { 190 mutex_unlock(&dmxdev->mutex); 191 return -EINVAL; 192 } 193 dmxdev->demux->disconnect_frontend(dmxdev->demux); 194 dmxdev->demux->connect_frontend(dmxdev->demux, front); 195 } 196 dvbdev->users++; 197 mutex_unlock(&dmxdev->mutex); 198 return 0; 199 } 200 201 static int dvb_dvr_release(struct inode *inode, struct file *file) 202 { 203 struct dvb_device *dvbdev = file->private_data; 204 struct dmxdev *dmxdev = dvbdev->priv; 205 206 mutex_lock(&dmxdev->mutex); 207 208 if ((file->f_flags & O_ACCMODE) == O_WRONLY) { 209 dmxdev->demux->disconnect_frontend(dmxdev->demux); 210 dmxdev->demux->connect_frontend(dmxdev->demux, 211 dmxdev->dvr_orig_fe); 212 } 213 214 if (((file->f_flags & O_ACCMODE) == O_RDONLY) || 215 dmxdev->may_do_mmap) { 216 if (dmxdev->may_do_mmap) { 217 if (dvb_vb2_is_streaming(&dmxdev->dvr_vb2_ctx)) 218 dvb_vb2_stream_off(&dmxdev->dvr_vb2_ctx); 219 dvb_vb2_release(&dmxdev->dvr_vb2_ctx); 220 } 221 dvbdev->readers++; 222 if (dmxdev->dvr_buffer.data) { 223 void *mem = dmxdev->dvr_buffer.data; 224 /*memory barrier*/ 225 mb(); 226 spin_lock_irq(&dmxdev->lock); 227 dmxdev->dvr_buffer.data = NULL; 228 spin_unlock_irq(&dmxdev->lock); 229 vfree(mem); 230 } 231 } 232 /* TODO */ 233 dvbdev->users--; 234 if (dvbdev->users == 1 && dmxdev->exit == 1) { 235 mutex_unlock(&dmxdev->mutex); 236 wake_up(&dvbdev->wait_queue); 237 } else 238 mutex_unlock(&dmxdev->mutex); 239 240 return 0; 241 } 242 243 static ssize_t dvb_dvr_write(struct file *file, const char __user *buf, 244 size_t count, loff_t *ppos) 245 { 246 struct dvb_device *dvbdev = file->private_data; 247 struct dmxdev *dmxdev = dvbdev->priv; 248 int ret; 249 250 if (!dmxdev->demux->write) 251 return -EOPNOTSUPP; 252 if ((file->f_flags & O_ACCMODE) != O_WRONLY) 253 return -EINVAL; 254 if (mutex_lock_interruptible(&dmxdev->mutex)) 255 return -ERESTARTSYS; 256 257 if (dmxdev->exit) { 258 mutex_unlock(&dmxdev->mutex); 259 return -ENODEV; 260 } 261 ret = dmxdev->demux->write(dmxdev->demux, buf, count); 262 mutex_unlock(&dmxdev->mutex); 263 return ret; 264 } 265 266 static ssize_t dvb_dvr_read(struct file *file, char __user *buf, size_t count, 267 loff_t *ppos) 268 { 269 struct dvb_device *dvbdev = file->private_data; 270 struct dmxdev *dmxdev = dvbdev->priv; 271 272 if (dmxdev->exit) 273 return -ENODEV; 274 275 return dvb_dmxdev_buffer_read(&dmxdev->dvr_buffer, 276 file->f_flags & O_NONBLOCK, 277 buf, count, ppos); 278 } 279 280 static int dvb_dvr_set_buffer_size(struct dmxdev *dmxdev, 281 unsigned long size) 282 { 283 struct dvb_ringbuffer *buf = &dmxdev->dvr_buffer; 284 void *newmem; 285 void *oldmem; 286 287 dprintk("%s\n", __func__); 288 289 if (buf->size == size) 290 return 0; 291 if (!size) 292 return -EINVAL; 293 294 newmem = vmalloc(size); 295 if (!newmem) 296 return -ENOMEM; 297 298 oldmem = buf->data; 299 300 spin_lock_irq(&dmxdev->lock); 301 buf->data = newmem; 302 buf->size = size; 303 304 /* reset and not flush in case the buffer shrinks */ 305 dvb_ringbuffer_reset(buf); 306 spin_unlock_irq(&dmxdev->lock); 307 308 vfree(oldmem); 309 310 return 0; 311 } 312 313 static inline void dvb_dmxdev_filter_state_set(struct dmxdev_filter 314 *dmxdevfilter, int state) 315 { 316 spin_lock_irq(&dmxdevfilter->dev->lock); 317 dmxdevfilter->state = state; 318 spin_unlock_irq(&dmxdevfilter->dev->lock); 319 } 320 321 static int dvb_dmxdev_set_buffer_size(struct dmxdev_filter *dmxdevfilter, 322 unsigned long size) 323 { 324 struct dvb_ringbuffer *buf = &dmxdevfilter->buffer; 325 void *newmem; 326 void *oldmem; 327 328 if (buf->size == size) 329 return 0; 330 if (!size) 331 return -EINVAL; 332 if (dmxdevfilter->state >= DMXDEV_STATE_GO) 333 return -EBUSY; 334 335 newmem = vmalloc(size); 336 if (!newmem) 337 return -ENOMEM; 338 339 oldmem = buf->data; 340 341 spin_lock_irq(&dmxdevfilter->dev->lock); 342 buf->data = newmem; 343 buf->size = size; 344 345 /* reset and not flush in case the buffer shrinks */ 346 dvb_ringbuffer_reset(buf); 347 spin_unlock_irq(&dmxdevfilter->dev->lock); 348 349 vfree(oldmem); 350 351 return 0; 352 } 353 354 static void dvb_dmxdev_filter_timeout(struct timer_list *t) 355 { 356 struct dmxdev_filter *dmxdevfilter = timer_container_of(dmxdevfilter, 357 t, timer); 358 359 dmxdevfilter->buffer.error = -ETIMEDOUT; 360 spin_lock_irq(&dmxdevfilter->dev->lock); 361 dmxdevfilter->state = DMXDEV_STATE_TIMEDOUT; 362 spin_unlock_irq(&dmxdevfilter->dev->lock); 363 wake_up(&dmxdevfilter->buffer.queue); 364 } 365 366 static void dvb_dmxdev_filter_timer(struct dmxdev_filter *dmxdevfilter) 367 { 368 struct dmx_sct_filter_params *para = &dmxdevfilter->params.sec; 369 370 timer_delete(&dmxdevfilter->timer); 371 if (para->timeout) { 372 dmxdevfilter->timer.expires = 373 jiffies + 1 + (HZ / 2 + HZ * para->timeout) / 1000; 374 add_timer(&dmxdevfilter->timer); 375 } 376 } 377 378 static int dvb_dmxdev_section_callback(const u8 *buffer1, size_t buffer1_len, 379 const u8 *buffer2, size_t buffer2_len, 380 struct dmx_section_filter *filter, 381 u32 *buffer_flags) 382 { 383 struct dmxdev_filter *dmxdevfilter = filter->priv; 384 int ret; 385 386 if (!dvb_vb2_is_streaming(&dmxdevfilter->vb2_ctx) && 387 dmxdevfilter->buffer.error) { 388 wake_up(&dmxdevfilter->buffer.queue); 389 return 0; 390 } 391 spin_lock(&dmxdevfilter->dev->lock); 392 if (dmxdevfilter->state != DMXDEV_STATE_GO) { 393 spin_unlock(&dmxdevfilter->dev->lock); 394 return 0; 395 } 396 timer_delete(&dmxdevfilter->timer); 397 dprintk("section callback %*ph\n", 6, buffer1); 398 if (dvb_vb2_is_streaming(&dmxdevfilter->vb2_ctx)) { 399 ret = dvb_vb2_fill_buffer(&dmxdevfilter->vb2_ctx, 400 buffer1, buffer1_len, 401 buffer_flags, true); 402 if (ret == buffer1_len) 403 ret = dvb_vb2_fill_buffer(&dmxdevfilter->vb2_ctx, 404 buffer2, buffer2_len, 405 buffer_flags, true); 406 } else { 407 ret = dvb_dmxdev_buffer_write(&dmxdevfilter->buffer, 408 buffer1, buffer1_len); 409 if (ret == buffer1_len) { 410 ret = dvb_dmxdev_buffer_write(&dmxdevfilter->buffer, 411 buffer2, buffer2_len); 412 } 413 } 414 if (ret < 0) 415 dmxdevfilter->buffer.error = ret; 416 if (dmxdevfilter->params.sec.flags & DMX_ONESHOT) 417 dmxdevfilter->state = DMXDEV_STATE_DONE; 418 spin_unlock(&dmxdevfilter->dev->lock); 419 wake_up(&dmxdevfilter->buffer.queue); 420 return 0; 421 } 422 423 static int dvb_dmxdev_ts_callback(const u8 *buffer1, size_t buffer1_len, 424 const u8 *buffer2, size_t buffer2_len, 425 struct dmx_ts_feed *feed, 426 u32 *buffer_flags) 427 { 428 struct dmxdev_filter *dmxdevfilter = feed->priv; 429 struct dvb_ringbuffer *buffer; 430 #ifdef CONFIG_DVB_MMAP 431 struct dvb_vb2_ctx *ctx; 432 #endif 433 int ret; 434 435 spin_lock(&dmxdevfilter->dev->lock); 436 if (dmxdevfilter->params.pes.output == DMX_OUT_DECODER) { 437 spin_unlock(&dmxdevfilter->dev->lock); 438 return 0; 439 } 440 441 if (dmxdevfilter->params.pes.output == DMX_OUT_TAP || 442 dmxdevfilter->params.pes.output == DMX_OUT_TSDEMUX_TAP) { 443 buffer = &dmxdevfilter->buffer; 444 #ifdef CONFIG_DVB_MMAP 445 ctx = &dmxdevfilter->vb2_ctx; 446 #endif 447 } else { 448 buffer = &dmxdevfilter->dev->dvr_buffer; 449 #ifdef CONFIG_DVB_MMAP 450 ctx = &dmxdevfilter->dev->dvr_vb2_ctx; 451 #endif 452 } 453 454 if (dvb_vb2_is_streaming(ctx)) { 455 ret = dvb_vb2_fill_buffer(ctx, buffer1, buffer1_len, 456 buffer_flags, false); 457 if (ret == buffer1_len) 458 ret = dvb_vb2_fill_buffer(ctx, buffer2, buffer2_len, 459 buffer_flags, false); 460 } else { 461 if (buffer->error) { 462 spin_unlock(&dmxdevfilter->dev->lock); 463 wake_up(&buffer->queue); 464 return 0; 465 } 466 ret = dvb_dmxdev_buffer_write(buffer, buffer1, buffer1_len); 467 if (ret == buffer1_len) 468 ret = dvb_dmxdev_buffer_write(buffer, 469 buffer2, buffer2_len); 470 } 471 if (ret < 0) 472 buffer->error = ret; 473 spin_unlock(&dmxdevfilter->dev->lock); 474 wake_up(&buffer->queue); 475 return 0; 476 } 477 478 /* stop feed but only mark the specified filter as stopped (state set) */ 479 static int dvb_dmxdev_feed_stop(struct dmxdev_filter *dmxdevfilter) 480 { 481 struct dmxdev_feed *feed; 482 483 dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_SET); 484 485 switch (dmxdevfilter->type) { 486 case DMXDEV_TYPE_SEC: 487 timer_delete(&dmxdevfilter->timer); 488 dmxdevfilter->feed.sec->stop_filtering(dmxdevfilter->feed.sec); 489 break; 490 case DMXDEV_TYPE_PES: 491 list_for_each_entry(feed, &dmxdevfilter->feed.ts, next) 492 feed->ts->stop_filtering(feed->ts); 493 break; 494 default: 495 return -EINVAL; 496 } 497 return 0; 498 } 499 500 /* start feed associated with the specified filter */ 501 static int dvb_dmxdev_feed_start(struct dmxdev_filter *filter) 502 { 503 struct dmxdev_feed *feed; 504 int ret; 505 506 dvb_dmxdev_filter_state_set(filter, DMXDEV_STATE_GO); 507 508 switch (filter->type) { 509 case DMXDEV_TYPE_SEC: 510 return filter->feed.sec->start_filtering(filter->feed.sec); 511 case DMXDEV_TYPE_PES: 512 list_for_each_entry(feed, &filter->feed.ts, next) { 513 ret = feed->ts->start_filtering(feed->ts); 514 if (ret < 0) { 515 dvb_dmxdev_feed_stop(filter); 516 return ret; 517 } 518 } 519 break; 520 default: 521 return -EINVAL; 522 } 523 524 return 0; 525 } 526 527 /* restart section feed if it has filters left associated with it, 528 otherwise release the feed */ 529 static int dvb_dmxdev_feed_restart(struct dmxdev_filter *filter) 530 { 531 int i; 532 struct dmxdev *dmxdev = filter->dev; 533 u16 pid = filter->params.sec.pid; 534 535 for (i = 0; i < dmxdev->filternum; i++) 536 if (dmxdev->filter[i].state >= DMXDEV_STATE_GO && 537 dmxdev->filter[i].type == DMXDEV_TYPE_SEC && 538 dmxdev->filter[i].params.sec.pid == pid) { 539 dvb_dmxdev_feed_start(&dmxdev->filter[i]); 540 return 0; 541 } 542 543 filter->dev->demux->release_section_feed(dmxdev->demux, 544 filter->feed.sec); 545 546 return 0; 547 } 548 549 static int dvb_dmxdev_filter_stop(struct dmxdev_filter *dmxdevfilter) 550 { 551 struct dmxdev_feed *feed; 552 struct dmx_demux *demux; 553 554 if (dmxdevfilter->state < DMXDEV_STATE_GO) 555 return 0; 556 557 switch (dmxdevfilter->type) { 558 case DMXDEV_TYPE_SEC: 559 if (!dmxdevfilter->feed.sec) 560 break; 561 dvb_dmxdev_feed_stop(dmxdevfilter); 562 if (dmxdevfilter->filter.sec) 563 dmxdevfilter->feed.sec-> 564 release_filter(dmxdevfilter->feed.sec, 565 dmxdevfilter->filter.sec); 566 dvb_dmxdev_feed_restart(dmxdevfilter); 567 dmxdevfilter->feed.sec = NULL; 568 break; 569 case DMXDEV_TYPE_PES: 570 dvb_dmxdev_feed_stop(dmxdevfilter); 571 demux = dmxdevfilter->dev->demux; 572 list_for_each_entry(feed, &dmxdevfilter->feed.ts, next) { 573 demux->release_ts_feed(demux, feed->ts); 574 feed->ts = NULL; 575 } 576 break; 577 default: 578 if (dmxdevfilter->state == DMXDEV_STATE_ALLOCATED) 579 return 0; 580 return -EINVAL; 581 } 582 583 dvb_ringbuffer_flush(&dmxdevfilter->buffer); 584 return 0; 585 } 586 587 static void dvb_dmxdev_delete_pids(struct dmxdev_filter *dmxdevfilter) 588 { 589 struct dmxdev_feed *feed, *tmp; 590 591 /* delete all PIDs */ 592 list_for_each_entry_safe(feed, tmp, &dmxdevfilter->feed.ts, next) { 593 list_del(&feed->next); 594 kfree(feed); 595 } 596 597 BUG_ON(!list_empty(&dmxdevfilter->feed.ts)); 598 } 599 600 static inline int dvb_dmxdev_filter_reset(struct dmxdev_filter *dmxdevfilter) 601 { 602 if (dmxdevfilter->state < DMXDEV_STATE_SET) 603 return 0; 604 605 if (dmxdevfilter->type == DMXDEV_TYPE_PES) 606 dvb_dmxdev_delete_pids(dmxdevfilter); 607 608 dmxdevfilter->type = DMXDEV_TYPE_NONE; 609 dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_ALLOCATED); 610 return 0; 611 } 612 613 static int dvb_dmxdev_start_feed(struct dmxdev *dmxdev, 614 struct dmxdev_filter *filter, 615 struct dmxdev_feed *feed) 616 { 617 ktime_t timeout = ktime_set(0, 0); 618 struct dmx_pes_filter_params *para = &filter->params.pes; 619 enum dmx_output otype; 620 int ret; 621 int ts_type; 622 enum dmx_ts_pes ts_pes; 623 struct dmx_ts_feed *tsfeed; 624 625 feed->ts = NULL; 626 otype = para->output; 627 628 ts_pes = para->pes_type; 629 630 if (ts_pes < DMX_PES_OTHER) 631 ts_type = TS_DECODER; 632 else 633 ts_type = 0; 634 635 if (otype == DMX_OUT_TS_TAP) 636 ts_type |= TS_PACKET; 637 else if (otype == DMX_OUT_TSDEMUX_TAP) 638 ts_type |= TS_PACKET | TS_DEMUX; 639 else if (otype == DMX_OUT_TAP) 640 ts_type |= TS_PACKET | TS_DEMUX | TS_PAYLOAD_ONLY; 641 642 ret = dmxdev->demux->allocate_ts_feed(dmxdev->demux, &feed->ts, 643 dvb_dmxdev_ts_callback); 644 if (ret < 0) 645 return ret; 646 647 tsfeed = feed->ts; 648 tsfeed->priv = filter; 649 650 ret = tsfeed->set(tsfeed, feed->pid, ts_type, ts_pes, timeout); 651 if (ret < 0) { 652 dmxdev->demux->release_ts_feed(dmxdev->demux, tsfeed); 653 return ret; 654 } 655 656 ret = tsfeed->start_filtering(tsfeed); 657 if (ret < 0) { 658 dmxdev->demux->release_ts_feed(dmxdev->demux, tsfeed); 659 return ret; 660 } 661 662 return 0; 663 } 664 665 static int dvb_dmxdev_filter_start(struct dmxdev_filter *filter) 666 { 667 struct dmxdev *dmxdev = filter->dev; 668 struct dmxdev_feed *feed; 669 void *mem; 670 int ret, i; 671 672 if (filter->state < DMXDEV_STATE_SET) 673 return -EINVAL; 674 675 if (filter->state >= DMXDEV_STATE_GO) 676 dvb_dmxdev_filter_stop(filter); 677 678 if (!filter->buffer.data) { 679 mem = vmalloc(filter->buffer.size); 680 if (!mem) 681 return -ENOMEM; 682 spin_lock_irq(&filter->dev->lock); 683 filter->buffer.data = mem; 684 spin_unlock_irq(&filter->dev->lock); 685 } 686 687 dvb_ringbuffer_flush(&filter->buffer); 688 689 switch (filter->type) { 690 case DMXDEV_TYPE_SEC: 691 { 692 struct dmx_sct_filter_params *para = &filter->params.sec; 693 struct dmx_section_filter **secfilter = &filter->filter.sec; 694 struct dmx_section_feed **secfeed = &filter->feed.sec; 695 696 *secfilter = NULL; 697 *secfeed = NULL; 698 699 700 /* find active filter/feed with same PID */ 701 for (i = 0; i < dmxdev->filternum; i++) { 702 if (dmxdev->filter[i].state >= DMXDEV_STATE_GO && 703 dmxdev->filter[i].type == DMXDEV_TYPE_SEC && 704 dmxdev->filter[i].params.sec.pid == para->pid) { 705 *secfeed = dmxdev->filter[i].feed.sec; 706 break; 707 } 708 } 709 710 /* if no feed found, try to allocate new one */ 711 if (!*secfeed) { 712 ret = dmxdev->demux->allocate_section_feed(dmxdev->demux, 713 secfeed, 714 dvb_dmxdev_section_callback); 715 if (!*secfeed) { 716 pr_err("DVB (%s): could not alloc feed\n", 717 __func__); 718 return ret; 719 } 720 721 ret = (*secfeed)->set(*secfeed, para->pid, 722 (para->flags & DMX_CHECK_CRC) ? 1 : 0); 723 if (ret < 0) { 724 pr_err("DVB (%s): could not set feed\n", 725 __func__); 726 dvb_dmxdev_feed_restart(filter); 727 return ret; 728 } 729 } else { 730 dvb_dmxdev_feed_stop(filter); 731 } 732 733 ret = (*secfeed)->allocate_filter(*secfeed, secfilter); 734 if (ret < 0) { 735 dvb_dmxdev_feed_restart(filter); 736 *secfeed = NULL; 737 dprintk("could not get filter\n"); 738 return ret; 739 } 740 741 (*secfilter)->priv = filter; 742 743 memcpy(&((*secfilter)->filter_value[3]), 744 &(para->filter.filter[1]), DMX_FILTER_SIZE - 1); 745 memcpy(&(*secfilter)->filter_mask[3], 746 ¶->filter.mask[1], DMX_FILTER_SIZE - 1); 747 memcpy(&(*secfilter)->filter_mode[3], 748 ¶->filter.mode[1], DMX_FILTER_SIZE - 1); 749 750 (*secfilter)->filter_value[0] = para->filter.filter[0]; 751 (*secfilter)->filter_mask[0] = para->filter.mask[0]; 752 (*secfilter)->filter_mode[0] = para->filter.mode[0]; 753 (*secfilter)->filter_mask[1] = 0; 754 (*secfilter)->filter_mask[2] = 0; 755 756 filter->todo = 0; 757 758 ret = filter->feed.sec->start_filtering(filter->feed.sec); 759 if (ret < 0) 760 return ret; 761 762 dvb_dmxdev_filter_timer(filter); 763 break; 764 } 765 case DMXDEV_TYPE_PES: 766 list_for_each_entry(feed, &filter->feed.ts, next) { 767 ret = dvb_dmxdev_start_feed(dmxdev, filter, feed); 768 if (ret < 0) { 769 dvb_dmxdev_filter_stop(filter); 770 return ret; 771 } 772 } 773 break; 774 default: 775 return -EINVAL; 776 } 777 778 dvb_dmxdev_filter_state_set(filter, DMXDEV_STATE_GO); 779 return 0; 780 } 781 782 static int dvb_demux_open(struct inode *inode, struct file *file) 783 { 784 struct dvb_device *dvbdev = file->private_data; 785 struct dmxdev *dmxdev = dvbdev->priv; 786 int i; 787 struct dmxdev_filter *dmxdevfilter; 788 789 if (!dmxdev->filter) 790 return -EINVAL; 791 792 if (mutex_lock_interruptible(&dmxdev->mutex)) 793 return -ERESTARTSYS; 794 795 if (dmxdev->exit) { 796 mutex_unlock(&dmxdev->mutex); 797 return -ENODEV; 798 } 799 800 for (i = 0; i < dmxdev->filternum; i++) 801 if (dmxdev->filter[i].state == DMXDEV_STATE_FREE) 802 break; 803 804 if (i == dmxdev->filternum) { 805 mutex_unlock(&dmxdev->mutex); 806 return -EMFILE; 807 } 808 809 dmxdevfilter = &dmxdev->filter[i]; 810 mutex_init(&dmxdevfilter->mutex); 811 file->private_data = dmxdevfilter; 812 813 #ifdef CONFIG_DVB_MMAP 814 dmxdev->may_do_mmap = 1; 815 #else 816 dmxdev->may_do_mmap = 0; 817 #endif 818 819 /* 820 * The mutex passed to dvb_vb2_init is unlocked when a buffer 821 * is in a blocking wait. However, dmxdevfilter has two mutexes: 822 * dmxdevfilter->mutex and dmxdev->mutex. So this will not work. 823 * The solution would be to support unlocking two mutexes in vb2, 824 * but since this problem has been here since the beginning and 825 * nobody ever complained, we leave it as-is rather than adding 826 * that second mutex pointer to vb2. 827 * 828 * In the unlikely event that someone complains about this, then 829 * this comment will hopefully help. 830 */ 831 dvb_ringbuffer_init(&dmxdevfilter->buffer, NULL, 8192); 832 dvb_vb2_init(&dmxdevfilter->vb2_ctx, "demux_filter", 833 &dmxdevfilter->mutex, file->f_flags & O_NONBLOCK); 834 dmxdevfilter->type = DMXDEV_TYPE_NONE; 835 dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_ALLOCATED); 836 timer_setup(&dmxdevfilter->timer, dvb_dmxdev_filter_timeout, 0); 837 838 dvbdev->users++; 839 840 mutex_unlock(&dmxdev->mutex); 841 return 0; 842 } 843 844 static int dvb_dmxdev_filter_free(struct dmxdev *dmxdev, 845 struct dmxdev_filter *dmxdevfilter) 846 { 847 mutex_lock(&dmxdev->mutex); 848 mutex_lock(&dmxdevfilter->mutex); 849 if (dvb_vb2_is_streaming(&dmxdevfilter->vb2_ctx)) 850 dvb_vb2_stream_off(&dmxdevfilter->vb2_ctx); 851 dvb_vb2_release(&dmxdevfilter->vb2_ctx); 852 853 854 dvb_dmxdev_filter_stop(dmxdevfilter); 855 dvb_dmxdev_filter_reset(dmxdevfilter); 856 857 if (dmxdevfilter->buffer.data) { 858 void *mem = dmxdevfilter->buffer.data; 859 860 spin_lock_irq(&dmxdev->lock); 861 dmxdevfilter->buffer.data = NULL; 862 spin_unlock_irq(&dmxdev->lock); 863 vfree(mem); 864 } 865 866 dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_FREE); 867 wake_up(&dmxdevfilter->buffer.queue); 868 mutex_unlock(&dmxdevfilter->mutex); 869 mutex_unlock(&dmxdev->mutex); 870 return 0; 871 } 872 873 static inline void invert_mode(struct dmx_filter *filter) 874 { 875 int i; 876 877 for (i = 0; i < DMX_FILTER_SIZE; i++) 878 filter->mode[i] ^= 0xff; 879 } 880 881 static int dvb_dmxdev_add_pid(struct dmxdev *dmxdev, 882 struct dmxdev_filter *filter, u16 pid) 883 { 884 struct dmxdev_feed *feed; 885 886 if ((filter->type != DMXDEV_TYPE_PES) || 887 (filter->state < DMXDEV_STATE_SET)) 888 return -EINVAL; 889 890 /* only TS packet filters may have multiple PIDs */ 891 if ((filter->params.pes.output != DMX_OUT_TSDEMUX_TAP) && 892 (!list_empty(&filter->feed.ts))) 893 return -EINVAL; 894 895 feed = kzalloc(sizeof(struct dmxdev_feed), GFP_KERNEL); 896 if (feed == NULL) 897 return -ENOMEM; 898 899 feed->pid = pid; 900 list_add(&feed->next, &filter->feed.ts); 901 902 if (filter->state >= DMXDEV_STATE_GO) 903 return dvb_dmxdev_start_feed(dmxdev, filter, feed); 904 905 return 0; 906 } 907 908 static int dvb_dmxdev_remove_pid(struct dmxdev *dmxdev, 909 struct dmxdev_filter *filter, u16 pid) 910 { 911 struct dmxdev_feed *feed, *tmp; 912 913 if ((filter->type != DMXDEV_TYPE_PES) || 914 (filter->state < DMXDEV_STATE_SET)) 915 return -EINVAL; 916 917 list_for_each_entry_safe(feed, tmp, &filter->feed.ts, next) { 918 if ((feed->pid == pid) && (feed->ts != NULL)) { 919 feed->ts->stop_filtering(feed->ts); 920 filter->dev->demux->release_ts_feed(filter->dev->demux, 921 feed->ts); 922 list_del(&feed->next); 923 kfree(feed); 924 } 925 } 926 927 return 0; 928 } 929 930 static int dvb_dmxdev_filter_set(struct dmxdev *dmxdev, 931 struct dmxdev_filter *dmxdevfilter, 932 struct dmx_sct_filter_params *params) 933 { 934 dprintk("%s: PID=0x%04x, flags=%02x, timeout=%d\n", 935 __func__, params->pid, params->flags, params->timeout); 936 937 dvb_dmxdev_filter_stop(dmxdevfilter); 938 939 dmxdevfilter->type = DMXDEV_TYPE_SEC; 940 memcpy(&dmxdevfilter->params.sec, 941 params, sizeof(struct dmx_sct_filter_params)); 942 invert_mode(&dmxdevfilter->params.sec.filter); 943 dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_SET); 944 945 if (params->flags & DMX_IMMEDIATE_START) 946 return dvb_dmxdev_filter_start(dmxdevfilter); 947 948 return 0; 949 } 950 951 static int dvb_dmxdev_pes_filter_set(struct dmxdev *dmxdev, 952 struct dmxdev_filter *dmxdevfilter, 953 struct dmx_pes_filter_params *params) 954 { 955 int ret; 956 957 dvb_dmxdev_filter_stop(dmxdevfilter); 958 dvb_dmxdev_filter_reset(dmxdevfilter); 959 960 if ((unsigned int)params->pes_type > DMX_PES_OTHER) 961 return -EINVAL; 962 963 dmxdevfilter->type = DMXDEV_TYPE_PES; 964 memcpy(&dmxdevfilter->params, params, 965 sizeof(struct dmx_pes_filter_params)); 966 INIT_LIST_HEAD(&dmxdevfilter->feed.ts); 967 968 dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_SET); 969 970 ret = dvb_dmxdev_add_pid(dmxdev, dmxdevfilter, 971 dmxdevfilter->params.pes.pid); 972 if (ret < 0) 973 return ret; 974 975 if (params->flags & DMX_IMMEDIATE_START) 976 return dvb_dmxdev_filter_start(dmxdevfilter); 977 978 return 0; 979 } 980 981 static ssize_t dvb_dmxdev_read_sec(struct dmxdev_filter *dfil, 982 struct file *file, char __user *buf, 983 size_t count, loff_t *ppos) 984 { 985 int result, hcount; 986 int done = 0; 987 988 if (dfil->todo <= 0) { 989 hcount = 3 + dfil->todo; 990 if (hcount > count) 991 hcount = count; 992 result = dvb_dmxdev_buffer_read(&dfil->buffer, 993 file->f_flags & O_NONBLOCK, 994 buf, hcount, ppos); 995 if (result < 0) { 996 dfil->todo = 0; 997 return result; 998 } 999 if (copy_from_user(dfil->secheader - dfil->todo, buf, result)) 1000 return -EFAULT; 1001 buf += result; 1002 done = result; 1003 count -= result; 1004 dfil->todo -= result; 1005 if (dfil->todo > -3) 1006 return done; 1007 dfil->todo = ((dfil->secheader[1] << 8) | dfil->secheader[2]) & 0xfff; 1008 if (!count) 1009 return done; 1010 } 1011 if (count > dfil->todo) 1012 count = dfil->todo; 1013 result = dvb_dmxdev_buffer_read(&dfil->buffer, 1014 file->f_flags & O_NONBLOCK, 1015 buf, count, ppos); 1016 if (result < 0) 1017 return result; 1018 dfil->todo -= result; 1019 return (result + done); 1020 } 1021 1022 static ssize_t 1023 dvb_demux_read(struct file *file, char __user *buf, size_t count, 1024 loff_t *ppos) 1025 { 1026 struct dmxdev_filter *dmxdevfilter = file->private_data; 1027 int ret; 1028 1029 if (mutex_lock_interruptible(&dmxdevfilter->mutex)) 1030 return -ERESTARTSYS; 1031 1032 if (dmxdevfilter->type == DMXDEV_TYPE_SEC) 1033 ret = dvb_dmxdev_read_sec(dmxdevfilter, file, buf, count, ppos); 1034 else 1035 ret = dvb_dmxdev_buffer_read(&dmxdevfilter->buffer, 1036 file->f_flags & O_NONBLOCK, 1037 buf, count, ppos); 1038 1039 mutex_unlock(&dmxdevfilter->mutex); 1040 return ret; 1041 } 1042 1043 static int dvb_demux_do_ioctl(struct file *file, 1044 unsigned int cmd, void *parg) 1045 { 1046 struct dmxdev_filter *dmxdevfilter = file->private_data; 1047 struct dmxdev *dmxdev = dmxdevfilter->dev; 1048 unsigned long arg = (unsigned long)parg; 1049 int ret = 0; 1050 1051 if (mutex_lock_interruptible(&dmxdev->mutex)) 1052 return -ERESTARTSYS; 1053 1054 switch (cmd) { 1055 case DMX_START: 1056 if (mutex_lock_interruptible(&dmxdevfilter->mutex)) { 1057 mutex_unlock(&dmxdev->mutex); 1058 return -ERESTARTSYS; 1059 } 1060 if (dmxdevfilter->state < DMXDEV_STATE_SET) 1061 ret = -EINVAL; 1062 else 1063 ret = dvb_dmxdev_filter_start(dmxdevfilter); 1064 mutex_unlock(&dmxdevfilter->mutex); 1065 break; 1066 1067 case DMX_STOP: 1068 if (mutex_lock_interruptible(&dmxdevfilter->mutex)) { 1069 mutex_unlock(&dmxdev->mutex); 1070 return -ERESTARTSYS; 1071 } 1072 ret = dvb_dmxdev_filter_stop(dmxdevfilter); 1073 mutex_unlock(&dmxdevfilter->mutex); 1074 break; 1075 1076 case DMX_SET_FILTER: 1077 if (mutex_lock_interruptible(&dmxdevfilter->mutex)) { 1078 mutex_unlock(&dmxdev->mutex); 1079 return -ERESTARTSYS; 1080 } 1081 ret = dvb_dmxdev_filter_set(dmxdev, dmxdevfilter, parg); 1082 mutex_unlock(&dmxdevfilter->mutex); 1083 break; 1084 1085 case DMX_SET_PES_FILTER: 1086 if (mutex_lock_interruptible(&dmxdevfilter->mutex)) { 1087 mutex_unlock(&dmxdev->mutex); 1088 return -ERESTARTSYS; 1089 } 1090 ret = dvb_dmxdev_pes_filter_set(dmxdev, dmxdevfilter, parg); 1091 mutex_unlock(&dmxdevfilter->mutex); 1092 break; 1093 1094 case DMX_SET_BUFFER_SIZE: 1095 if (mutex_lock_interruptible(&dmxdevfilter->mutex)) { 1096 mutex_unlock(&dmxdev->mutex); 1097 return -ERESTARTSYS; 1098 } 1099 ret = dvb_dmxdev_set_buffer_size(dmxdevfilter, arg); 1100 mutex_unlock(&dmxdevfilter->mutex); 1101 break; 1102 1103 case DMX_GET_PES_PIDS: 1104 if (!dmxdev->demux->get_pes_pids) { 1105 ret = -EINVAL; 1106 break; 1107 } 1108 dmxdev->demux->get_pes_pids(dmxdev->demux, parg); 1109 break; 1110 1111 case DMX_GET_STC: 1112 if (!dmxdev->demux->get_stc) { 1113 ret = -EINVAL; 1114 break; 1115 } 1116 ret = dmxdev->demux->get_stc(dmxdev->demux, 1117 ((struct dmx_stc *)parg)->num, 1118 &((struct dmx_stc *)parg)->stc, 1119 &((struct dmx_stc *)parg)->base); 1120 break; 1121 1122 case DMX_ADD_PID: 1123 if (mutex_lock_interruptible(&dmxdevfilter->mutex)) { 1124 ret = -ERESTARTSYS; 1125 break; 1126 } 1127 ret = dvb_dmxdev_add_pid(dmxdev, dmxdevfilter, *(u16 *)parg); 1128 mutex_unlock(&dmxdevfilter->mutex); 1129 break; 1130 1131 case DMX_REMOVE_PID: 1132 if (mutex_lock_interruptible(&dmxdevfilter->mutex)) { 1133 ret = -ERESTARTSYS; 1134 break; 1135 } 1136 ret = dvb_dmxdev_remove_pid(dmxdev, dmxdevfilter, *(u16 *)parg); 1137 mutex_unlock(&dmxdevfilter->mutex); 1138 break; 1139 1140 #ifdef CONFIG_DVB_MMAP 1141 case DMX_REQBUFS: 1142 if (mutex_lock_interruptible(&dmxdevfilter->mutex)) { 1143 mutex_unlock(&dmxdev->mutex); 1144 return -ERESTARTSYS; 1145 } 1146 ret = dvb_vb2_reqbufs(&dmxdevfilter->vb2_ctx, parg); 1147 mutex_unlock(&dmxdevfilter->mutex); 1148 break; 1149 1150 case DMX_QUERYBUF: 1151 if (mutex_lock_interruptible(&dmxdevfilter->mutex)) { 1152 mutex_unlock(&dmxdev->mutex); 1153 return -ERESTARTSYS; 1154 } 1155 ret = dvb_vb2_querybuf(&dmxdevfilter->vb2_ctx, parg); 1156 mutex_unlock(&dmxdevfilter->mutex); 1157 break; 1158 1159 case DMX_EXPBUF: 1160 if (mutex_lock_interruptible(&dmxdevfilter->mutex)) { 1161 mutex_unlock(&dmxdev->mutex); 1162 return -ERESTARTSYS; 1163 } 1164 ret = dvb_vb2_expbuf(&dmxdevfilter->vb2_ctx, parg); 1165 mutex_unlock(&dmxdevfilter->mutex); 1166 break; 1167 1168 case DMX_QBUF: 1169 if (mutex_lock_interruptible(&dmxdevfilter->mutex)) { 1170 mutex_unlock(&dmxdev->mutex); 1171 return -ERESTARTSYS; 1172 } 1173 ret = dvb_vb2_qbuf(&dmxdevfilter->vb2_ctx, parg); 1174 if (ret == 0 && !dvb_vb2_is_streaming(&dmxdevfilter->vb2_ctx)) 1175 ret = dvb_vb2_stream_on(&dmxdevfilter->vb2_ctx); 1176 mutex_unlock(&dmxdevfilter->mutex); 1177 break; 1178 1179 case DMX_DQBUF: 1180 if (mutex_lock_interruptible(&dmxdevfilter->mutex)) { 1181 mutex_unlock(&dmxdev->mutex); 1182 return -ERESTARTSYS; 1183 } 1184 ret = dvb_vb2_dqbuf(&dmxdevfilter->vb2_ctx, parg); 1185 mutex_unlock(&dmxdevfilter->mutex); 1186 break; 1187 #endif 1188 default: 1189 ret = -ENOTTY; 1190 break; 1191 } 1192 mutex_unlock(&dmxdev->mutex); 1193 return ret; 1194 } 1195 1196 static long dvb_demux_ioctl(struct file *file, unsigned int cmd, 1197 unsigned long arg) 1198 { 1199 return dvb_usercopy(file, cmd, arg, dvb_demux_do_ioctl); 1200 } 1201 1202 static __poll_t dvb_demux_poll(struct file *file, poll_table *wait) 1203 { 1204 struct dmxdev_filter *dmxdevfilter = file->private_data; 1205 __poll_t mask = 0; 1206 1207 poll_wait(file, &dmxdevfilter->buffer.queue, wait); 1208 1209 if ((!dmxdevfilter) || dmxdevfilter->dev->exit) 1210 return EPOLLERR; 1211 if (dvb_vb2_is_streaming(&dmxdevfilter->vb2_ctx)) 1212 return dvb_vb2_poll(&dmxdevfilter->vb2_ctx, file, wait); 1213 1214 if (dmxdevfilter->state != DMXDEV_STATE_GO && 1215 dmxdevfilter->state != DMXDEV_STATE_DONE && 1216 dmxdevfilter->state != DMXDEV_STATE_TIMEDOUT) 1217 return 0; 1218 1219 if (dmxdevfilter->buffer.error) 1220 mask |= (EPOLLIN | EPOLLRDNORM | EPOLLPRI | EPOLLERR); 1221 1222 if (!dvb_ringbuffer_empty(&dmxdevfilter->buffer)) 1223 mask |= (EPOLLIN | EPOLLRDNORM | EPOLLPRI); 1224 1225 return mask; 1226 } 1227 1228 #ifdef CONFIG_DVB_MMAP 1229 static int dvb_demux_mmap(struct file *file, struct vm_area_struct *vma) 1230 { 1231 struct dmxdev_filter *dmxdevfilter = file->private_data; 1232 struct dmxdev *dmxdev = dmxdevfilter->dev; 1233 1234 if (!dmxdev->may_do_mmap) 1235 return -ENOTTY; 1236 1237 return dvb_vb2_mmap(&dmxdevfilter->vb2_ctx, vma); 1238 } 1239 #endif 1240 1241 static int dvb_demux_release(struct inode *inode, struct file *file) 1242 { 1243 struct dmxdev_filter *dmxdevfilter = file->private_data; 1244 struct dmxdev *dmxdev = dmxdevfilter->dev; 1245 int ret; 1246 1247 ret = dvb_dmxdev_filter_free(dmxdev, dmxdevfilter); 1248 1249 mutex_lock(&dmxdev->mutex); 1250 dmxdev->dvbdev->users--; 1251 if (dmxdev->dvbdev->users == 1 && dmxdev->exit == 1) { 1252 mutex_unlock(&dmxdev->mutex); 1253 wake_up(&dmxdev->dvbdev->wait_queue); 1254 } else 1255 mutex_unlock(&dmxdev->mutex); 1256 1257 return ret; 1258 } 1259 1260 static const struct file_operations dvb_demux_fops = { 1261 .owner = THIS_MODULE, 1262 .read = dvb_demux_read, 1263 .unlocked_ioctl = dvb_demux_ioctl, 1264 .compat_ioctl = dvb_demux_ioctl, 1265 .open = dvb_demux_open, 1266 .release = dvb_demux_release, 1267 .poll = dvb_demux_poll, 1268 .llseek = default_llseek, 1269 #ifdef CONFIG_DVB_MMAP 1270 .mmap = dvb_demux_mmap, 1271 #endif 1272 }; 1273 1274 static const struct dvb_device dvbdev_demux = { 1275 .priv = NULL, 1276 .users = 1, 1277 .writers = 1, 1278 #if defined(CONFIG_MEDIA_CONTROLLER_DVB) 1279 .name = "dvb-demux", 1280 #endif 1281 .fops = &dvb_demux_fops 1282 }; 1283 1284 static int dvb_dvr_do_ioctl(struct file *file, 1285 unsigned int cmd, void *parg) 1286 { 1287 struct dvb_device *dvbdev = file->private_data; 1288 struct dmxdev *dmxdev = dvbdev->priv; 1289 unsigned long arg = (unsigned long)parg; 1290 int ret; 1291 1292 if (mutex_lock_interruptible(&dmxdev->mutex)) 1293 return -ERESTARTSYS; 1294 1295 switch (cmd) { 1296 case DMX_SET_BUFFER_SIZE: 1297 ret = dvb_dvr_set_buffer_size(dmxdev, arg); 1298 break; 1299 1300 #ifdef CONFIG_DVB_MMAP 1301 case DMX_REQBUFS: 1302 ret = dvb_vb2_reqbufs(&dmxdev->dvr_vb2_ctx, parg); 1303 break; 1304 1305 case DMX_QUERYBUF: 1306 ret = dvb_vb2_querybuf(&dmxdev->dvr_vb2_ctx, parg); 1307 break; 1308 1309 case DMX_EXPBUF: 1310 ret = dvb_vb2_expbuf(&dmxdev->dvr_vb2_ctx, parg); 1311 break; 1312 1313 case DMX_QBUF: 1314 ret = dvb_vb2_qbuf(&dmxdev->dvr_vb2_ctx, parg); 1315 if (ret == 0 && !dvb_vb2_is_streaming(&dmxdev->dvr_vb2_ctx)) 1316 ret = dvb_vb2_stream_on(&dmxdev->dvr_vb2_ctx); 1317 break; 1318 1319 case DMX_DQBUF: 1320 ret = dvb_vb2_dqbuf(&dmxdev->dvr_vb2_ctx, parg); 1321 break; 1322 #endif 1323 default: 1324 ret = -ENOTTY; 1325 break; 1326 } 1327 mutex_unlock(&dmxdev->mutex); 1328 return ret; 1329 } 1330 1331 static long dvb_dvr_ioctl(struct file *file, 1332 unsigned int cmd, unsigned long arg) 1333 { 1334 return dvb_usercopy(file, cmd, arg, dvb_dvr_do_ioctl); 1335 } 1336 1337 static __poll_t dvb_dvr_poll(struct file *file, poll_table *wait) 1338 { 1339 struct dvb_device *dvbdev = file->private_data; 1340 struct dmxdev *dmxdev = dvbdev->priv; 1341 __poll_t mask = 0; 1342 1343 dprintk("%s\n", __func__); 1344 1345 poll_wait(file, &dmxdev->dvr_buffer.queue, wait); 1346 1347 if (dmxdev->exit) 1348 return EPOLLERR; 1349 if (dvb_vb2_is_streaming(&dmxdev->dvr_vb2_ctx)) 1350 return dvb_vb2_poll(&dmxdev->dvr_vb2_ctx, file, wait); 1351 1352 if (((file->f_flags & O_ACCMODE) == O_RDONLY) || 1353 dmxdev->may_do_mmap) { 1354 if (dmxdev->dvr_buffer.error) 1355 mask |= (EPOLLIN | EPOLLRDNORM | EPOLLPRI | EPOLLERR); 1356 1357 if (!dvb_ringbuffer_empty(&dmxdev->dvr_buffer)) 1358 mask |= (EPOLLIN | EPOLLRDNORM | EPOLLPRI); 1359 } else 1360 mask |= (EPOLLOUT | EPOLLWRNORM | EPOLLPRI); 1361 1362 return mask; 1363 } 1364 1365 #ifdef CONFIG_DVB_MMAP 1366 static int dvb_dvr_mmap(struct file *file, struct vm_area_struct *vma) 1367 { 1368 struct dvb_device *dvbdev = file->private_data; 1369 struct dmxdev *dmxdev = dvbdev->priv; 1370 1371 if (!dmxdev->may_do_mmap) 1372 return -ENOTTY; 1373 1374 if (dmxdev->exit) 1375 return -ENODEV; 1376 1377 return dvb_vb2_mmap(&dmxdev->dvr_vb2_ctx, vma); 1378 } 1379 #endif 1380 1381 static const struct file_operations dvb_dvr_fops = { 1382 .owner = THIS_MODULE, 1383 .read = dvb_dvr_read, 1384 .write = dvb_dvr_write, 1385 .unlocked_ioctl = dvb_dvr_ioctl, 1386 .open = dvb_dvr_open, 1387 .release = dvb_dvr_release, 1388 .poll = dvb_dvr_poll, 1389 .llseek = default_llseek, 1390 #ifdef CONFIG_DVB_MMAP 1391 .mmap = dvb_dvr_mmap, 1392 #endif 1393 }; 1394 1395 static const struct dvb_device dvbdev_dvr = { 1396 .priv = NULL, 1397 .readers = 1, 1398 .users = 1, 1399 #if defined(CONFIG_MEDIA_CONTROLLER_DVB) 1400 .name = "dvb-dvr", 1401 #endif 1402 .fops = &dvb_dvr_fops 1403 }; 1404 int dvb_dmxdev_init(struct dmxdev *dmxdev, struct dvb_adapter *dvb_adapter) 1405 { 1406 int i, ret; 1407 1408 if (dmxdev->demux->open(dmxdev->demux) < 0) 1409 return -EUSERS; 1410 1411 dmxdev->filter = vmalloc_array(dmxdev->filternum, 1412 sizeof(struct dmxdev_filter)); 1413 if (!dmxdev->filter) 1414 return -ENOMEM; 1415 1416 mutex_init(&dmxdev->mutex); 1417 spin_lock_init(&dmxdev->lock); 1418 for (i = 0; i < dmxdev->filternum; i++) { 1419 dmxdev->filter[i].dev = dmxdev; 1420 dmxdev->filter[i].buffer.data = NULL; 1421 dvb_dmxdev_filter_state_set(&dmxdev->filter[i], 1422 DMXDEV_STATE_FREE); 1423 } 1424 1425 ret = dvb_register_device(dvb_adapter, &dmxdev->dvbdev, &dvbdev_demux, dmxdev, 1426 DVB_DEVICE_DEMUX, dmxdev->filternum); 1427 if (ret < 0) 1428 goto err_register_dvbdev; 1429 1430 ret = dvb_register_device(dvb_adapter, &dmxdev->dvr_dvbdev, &dvbdev_dvr, 1431 dmxdev, DVB_DEVICE_DVR, dmxdev->filternum); 1432 if (ret < 0) 1433 goto err_register_dvr_dvbdev; 1434 1435 dvb_ringbuffer_init(&dmxdev->dvr_buffer, NULL, 8192); 1436 1437 return 0; 1438 1439 err_register_dvr_dvbdev: 1440 dvb_unregister_device(dmxdev->dvbdev); 1441 err_register_dvbdev: 1442 vfree(dmxdev->filter); 1443 dmxdev->filter = NULL; 1444 return ret; 1445 } 1446 1447 EXPORT_SYMBOL(dvb_dmxdev_init); 1448 1449 void dvb_dmxdev_release(struct dmxdev *dmxdev) 1450 { 1451 mutex_lock(&dmxdev->mutex); 1452 dmxdev->exit = 1; 1453 mutex_unlock(&dmxdev->mutex); 1454 1455 if (dmxdev->dvbdev->users > 1) { 1456 wait_event(dmxdev->dvbdev->wait_queue, 1457 dmxdev->dvbdev->users == 1); 1458 } 1459 if (dmxdev->dvr_dvbdev->users > 1) { 1460 wait_event(dmxdev->dvr_dvbdev->wait_queue, 1461 dmxdev->dvr_dvbdev->users == 1); 1462 } 1463 1464 dvb_unregister_device(dmxdev->dvbdev); 1465 dvb_unregister_device(dmxdev->dvr_dvbdev); 1466 1467 vfree(dmxdev->filter); 1468 dmxdev->filter = NULL; 1469 dmxdev->demux->close(dmxdev->demux); 1470 } 1471 1472 EXPORT_SYMBOL(dvb_dmxdev_release); 1473