1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * dvb-vb2.c - dvb-vb2 4 * 5 * Copyright (C) 2015 Samsung Electronics 6 * 7 * Author: jh1009.sung@samsung.com 8 */ 9 10 #include <linux/err.h> 11 #include <linux/kernel.h> 12 #include <linux/module.h> 13 #include <linux/mm.h> 14 15 #include <media/dvbdev.h> 16 #include <media/dvb_vb2.h> 17 18 #define DVB_V2_MAX_SIZE (4096 * 188) 19 20 static int vb2_debug; 21 module_param(vb2_debug, int, 0644); 22 23 #define dprintk(level, fmt, arg...) \ 24 do { \ 25 if (vb2_debug >= level) \ 26 pr_info("vb2: %s: " fmt, __func__, ## arg); \ 27 } while (0) 28 29 static int _queue_setup(struct vb2_queue *vq, 30 unsigned int *nbuffers, unsigned int *nplanes, 31 unsigned int sizes[], struct device *alloc_devs[]) 32 { 33 struct dvb_vb2_ctx *ctx = vb2_get_drv_priv(vq); 34 35 ctx->buf_cnt = *nbuffers; 36 *nplanes = 1; 37 sizes[0] = ctx->buf_siz; 38 39 /* 40 * videobuf2-vmalloc allocator is context-less so no need to set 41 * alloc_ctxs array. 42 */ 43 44 dprintk(3, "[%s] count=%d, size=%d\n", ctx->name, 45 *nbuffers, sizes[0]); 46 47 return 0; 48 } 49 50 static int _buffer_prepare(struct vb2_buffer *vb) 51 { 52 struct dvb_vb2_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); 53 unsigned long size = ctx->buf_siz; 54 55 if (vb2_plane_size(vb, 0) < size) { 56 dprintk(1, "[%s] data will not fit into plane (%lu < %lu)\n", 57 ctx->name, vb2_plane_size(vb, 0), size); 58 return -EINVAL; 59 } 60 61 vb2_set_plane_payload(vb, 0, size); 62 dprintk(3, "[%s]\n", ctx->name); 63 64 return 0; 65 } 66 67 static void _buffer_queue(struct vb2_buffer *vb) 68 { 69 struct dvb_vb2_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); 70 struct dvb_buffer *buf = container_of(vb, struct dvb_buffer, vb); 71 unsigned long flags = 0; 72 73 spin_lock_irqsave(&ctx->slock, flags); 74 list_add_tail(&buf->list, &ctx->dvb_q); 75 spin_unlock_irqrestore(&ctx->slock, flags); 76 77 dprintk(3, "[%s]\n", ctx->name); 78 } 79 80 static int _start_streaming(struct vb2_queue *vq, unsigned int count) 81 { 82 struct dvb_vb2_ctx *ctx = vb2_get_drv_priv(vq); 83 84 dprintk(3, "[%s] count=%d\n", ctx->name, count); 85 return 0; 86 } 87 88 static void _stop_streaming(struct vb2_queue *vq) 89 { 90 struct dvb_vb2_ctx *ctx = vb2_get_drv_priv(vq); 91 struct dvb_buffer *buf; 92 unsigned long flags = 0; 93 94 dprintk(3, "[%s]\n", ctx->name); 95 96 spin_lock_irqsave(&ctx->slock, flags); 97 while (!list_empty(&ctx->dvb_q)) { 98 buf = list_entry(ctx->dvb_q.next, 99 struct dvb_buffer, list); 100 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR); 101 list_del(&buf->list); 102 } 103 spin_unlock_irqrestore(&ctx->slock, flags); 104 } 105 106 static void _dmxdev_lock(struct vb2_queue *vq) 107 { 108 struct dvb_vb2_ctx *ctx = vb2_get_drv_priv(vq); 109 110 mutex_lock(&ctx->mutex); 111 dprintk(3, "[%s]\n", ctx->name); 112 } 113 114 static void _dmxdev_unlock(struct vb2_queue *vq) 115 { 116 struct dvb_vb2_ctx *ctx = vb2_get_drv_priv(vq); 117 118 if (mutex_is_locked(&ctx->mutex)) 119 mutex_unlock(&ctx->mutex); 120 dprintk(3, "[%s]\n", ctx->name); 121 } 122 123 static const struct vb2_ops dvb_vb2_qops = { 124 .queue_setup = _queue_setup, 125 .buf_prepare = _buffer_prepare, 126 .buf_queue = _buffer_queue, 127 .start_streaming = _start_streaming, 128 .stop_streaming = _stop_streaming, 129 .wait_prepare = _dmxdev_unlock, 130 .wait_finish = _dmxdev_lock, 131 }; 132 133 static void _fill_dmx_buffer(struct vb2_buffer *vb, void *pb) 134 { 135 struct dvb_vb2_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); 136 struct dmx_buffer *b = pb; 137 138 b->index = vb->index; 139 b->length = vb->planes[0].length; 140 b->bytesused = vb->planes[0].bytesused; 141 b->offset = vb->planes[0].m.offset; 142 dprintk(3, "[%s]\n", ctx->name); 143 } 144 145 static int _fill_vb2_buffer(struct vb2_buffer *vb, struct vb2_plane *planes) 146 { 147 struct dvb_vb2_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); 148 149 planes[0].bytesused = 0; 150 dprintk(3, "[%s]\n", ctx->name); 151 152 return 0; 153 } 154 155 static const struct vb2_buf_ops dvb_vb2_buf_ops = { 156 .fill_user_buffer = _fill_dmx_buffer, 157 .fill_vb2_buffer = _fill_vb2_buffer, 158 }; 159 160 /* 161 * Videobuf operations 162 */ 163 int dvb_vb2_init(struct dvb_vb2_ctx *ctx, const char *name, int nonblocking) 164 { 165 struct vb2_queue *q = &ctx->vb_q; 166 int ret; 167 168 memset(ctx, 0, sizeof(struct dvb_vb2_ctx)); 169 q->type = DVB_BUF_TYPE_CAPTURE; 170 /**capture type*/ 171 q->is_output = 0; 172 /**only mmap is supported currently*/ 173 q->io_modes = VB2_MMAP; 174 q->drv_priv = ctx; 175 q->buf_struct_size = sizeof(struct dvb_buffer); 176 q->min_buffers_needed = 1; 177 q->ops = &dvb_vb2_qops; 178 q->mem_ops = &vb2_vmalloc_memops; 179 q->buf_ops = &dvb_vb2_buf_ops; 180 q->num_buffers = 0; 181 ret = vb2_core_queue_init(q); 182 if (ret) { 183 ctx->state = DVB_VB2_STATE_NONE; 184 dprintk(1, "[%s] errno=%d\n", ctx->name, ret); 185 return ret; 186 } 187 188 mutex_init(&ctx->mutex); 189 spin_lock_init(&ctx->slock); 190 INIT_LIST_HEAD(&ctx->dvb_q); 191 192 strscpy(ctx->name, name, DVB_VB2_NAME_MAX); 193 ctx->nonblocking = nonblocking; 194 ctx->state = DVB_VB2_STATE_INIT; 195 196 dprintk(3, "[%s]\n", ctx->name); 197 198 return 0; 199 } 200 201 int dvb_vb2_release(struct dvb_vb2_ctx *ctx) 202 { 203 struct vb2_queue *q = (struct vb2_queue *)&ctx->vb_q; 204 205 if (ctx->state & DVB_VB2_STATE_INIT) 206 vb2_core_queue_release(q); 207 208 ctx->state = DVB_VB2_STATE_NONE; 209 dprintk(3, "[%s]\n", ctx->name); 210 211 return 0; 212 } 213 214 int dvb_vb2_stream_on(struct dvb_vb2_ctx *ctx) 215 { 216 struct vb2_queue *q = &ctx->vb_q; 217 int ret; 218 219 ret = vb2_core_streamon(q, q->type); 220 if (ret) { 221 ctx->state = DVB_VB2_STATE_NONE; 222 dprintk(1, "[%s] errno=%d\n", ctx->name, ret); 223 return ret; 224 } 225 ctx->state |= DVB_VB2_STATE_STREAMON; 226 dprintk(3, "[%s]\n", ctx->name); 227 228 return 0; 229 } 230 231 int dvb_vb2_stream_off(struct dvb_vb2_ctx *ctx) 232 { 233 struct vb2_queue *q = (struct vb2_queue *)&ctx->vb_q; 234 int ret; 235 236 ctx->state &= ~DVB_VB2_STATE_STREAMON; 237 ret = vb2_core_streamoff(q, q->type); 238 if (ret) { 239 ctx->state = DVB_VB2_STATE_NONE; 240 dprintk(1, "[%s] errno=%d\n", ctx->name, ret); 241 return ret; 242 } 243 dprintk(3, "[%s]\n", ctx->name); 244 245 return 0; 246 } 247 248 int dvb_vb2_is_streaming(struct dvb_vb2_ctx *ctx) 249 { 250 return (ctx->state & DVB_VB2_STATE_STREAMON); 251 } 252 253 int dvb_vb2_fill_buffer(struct dvb_vb2_ctx *ctx, 254 const unsigned char *src, int len, 255 enum dmx_buffer_flags *buffer_flags) 256 { 257 unsigned long flags = 0; 258 void *vbuf = NULL; 259 int todo = len; 260 unsigned char *psrc = (unsigned char *)src; 261 int ll = 0; 262 263 /* 264 * normal case: This func is called twice from demux driver 265 * one with valid src pointer, second time with NULL pointer 266 */ 267 if (!src || !len) 268 return 0; 269 spin_lock_irqsave(&ctx->slock, flags); 270 if (buffer_flags && *buffer_flags) { 271 ctx->flags |= *buffer_flags; 272 *buffer_flags = 0; 273 } 274 while (todo) { 275 if (!ctx->buf) { 276 if (list_empty(&ctx->dvb_q)) { 277 dprintk(3, "[%s] Buffer overflow!!!\n", 278 ctx->name); 279 break; 280 } 281 282 ctx->buf = list_entry(ctx->dvb_q.next, 283 struct dvb_buffer, list); 284 ctx->remain = vb2_plane_size(&ctx->buf->vb, 0); 285 ctx->offset = 0; 286 } 287 288 if (!dvb_vb2_is_streaming(ctx)) { 289 vb2_buffer_done(&ctx->buf->vb, VB2_BUF_STATE_ERROR); 290 list_del(&ctx->buf->list); 291 ctx->buf = NULL; 292 break; 293 } 294 295 /* Fill buffer */ 296 ll = min(todo, ctx->remain); 297 vbuf = vb2_plane_vaddr(&ctx->buf->vb, 0); 298 memcpy(vbuf + ctx->offset, psrc, ll); 299 todo -= ll; 300 psrc += ll; 301 302 ctx->remain -= ll; 303 ctx->offset += ll; 304 305 if (ctx->remain == 0) { 306 vb2_buffer_done(&ctx->buf->vb, VB2_BUF_STATE_DONE); 307 list_del(&ctx->buf->list); 308 ctx->buf = NULL; 309 } 310 } 311 312 if (ctx->nonblocking && ctx->buf) { 313 vb2_set_plane_payload(&ctx->buf->vb, 0, ll); 314 vb2_buffer_done(&ctx->buf->vb, VB2_BUF_STATE_DONE); 315 list_del(&ctx->buf->list); 316 ctx->buf = NULL; 317 } 318 spin_unlock_irqrestore(&ctx->slock, flags); 319 320 if (todo) 321 dprintk(1, "[%s] %d bytes are dropped.\n", ctx->name, todo); 322 else 323 dprintk(3, "[%s]\n", ctx->name); 324 325 dprintk(3, "[%s] %d bytes are copied\n", ctx->name, len - todo); 326 return (len - todo); 327 } 328 329 int dvb_vb2_reqbufs(struct dvb_vb2_ctx *ctx, struct dmx_requestbuffers *req) 330 { 331 int ret; 332 333 /* Adjust size to a sane value */ 334 if (req->size > DVB_V2_MAX_SIZE) 335 req->size = DVB_V2_MAX_SIZE; 336 337 /* FIXME: round req->size to a 188 or 204 multiple */ 338 339 ctx->buf_siz = req->size; 340 ctx->buf_cnt = req->count; 341 ret = vb2_core_reqbufs(&ctx->vb_q, VB2_MEMORY_MMAP, 0, &req->count); 342 if (ret) { 343 ctx->state = DVB_VB2_STATE_NONE; 344 dprintk(1, "[%s] count=%d size=%d errno=%d\n", ctx->name, 345 ctx->buf_cnt, ctx->buf_siz, ret); 346 return ret; 347 } 348 ctx->state |= DVB_VB2_STATE_REQBUFS; 349 dprintk(3, "[%s] count=%d size=%d\n", ctx->name, 350 ctx->buf_cnt, ctx->buf_siz); 351 352 return 0; 353 } 354 355 int dvb_vb2_querybuf(struct dvb_vb2_ctx *ctx, struct dmx_buffer *b) 356 { 357 struct vb2_queue *q = &ctx->vb_q; 358 359 if (b->index >= q->num_buffers) { 360 dprintk(1, "[%s] buffer index out of range\n", ctx->name); 361 return -EINVAL; 362 } 363 vb2_core_querybuf(&ctx->vb_q, b->index, b); 364 dprintk(3, "[%s] index=%d\n", ctx->name, b->index); 365 return 0; 366 } 367 368 int dvb_vb2_expbuf(struct dvb_vb2_ctx *ctx, struct dmx_exportbuffer *exp) 369 { 370 struct vb2_queue *q = &ctx->vb_q; 371 int ret; 372 373 ret = vb2_core_expbuf(&ctx->vb_q, &exp->fd, q->type, exp->index, 374 0, exp->flags); 375 if (ret) { 376 dprintk(1, "[%s] index=%d errno=%d\n", ctx->name, 377 exp->index, ret); 378 return ret; 379 } 380 dprintk(3, "[%s] index=%d fd=%d\n", ctx->name, exp->index, exp->fd); 381 382 return 0; 383 } 384 385 int dvb_vb2_qbuf(struct dvb_vb2_ctx *ctx, struct dmx_buffer *b) 386 { 387 struct vb2_queue *q = &ctx->vb_q; 388 int ret; 389 390 if (b->index >= q->num_buffers) { 391 dprintk(1, "[%s] buffer index out of range\n", ctx->name); 392 return -EINVAL; 393 } 394 ret = vb2_core_qbuf(&ctx->vb_q, b->index, b, NULL); 395 if (ret) { 396 dprintk(1, "[%s] index=%d errno=%d\n", ctx->name, 397 b->index, ret); 398 return ret; 399 } 400 dprintk(5, "[%s] index=%d\n", ctx->name, b->index); 401 402 return 0; 403 } 404 405 int dvb_vb2_dqbuf(struct dvb_vb2_ctx *ctx, struct dmx_buffer *b) 406 { 407 unsigned long flags; 408 int ret; 409 410 ret = vb2_core_dqbuf(&ctx->vb_q, &b->index, b, ctx->nonblocking); 411 if (ret) { 412 dprintk(1, "[%s] errno=%d\n", ctx->name, ret); 413 return ret; 414 } 415 416 spin_lock_irqsave(&ctx->slock, flags); 417 b->count = ctx->count++; 418 b->flags = ctx->flags; 419 ctx->flags = 0; 420 spin_unlock_irqrestore(&ctx->slock, flags); 421 422 dprintk(5, "[%s] index=%d, count=%d, flags=%d\n", 423 ctx->name, b->index, ctx->count, b->flags); 424 425 426 return 0; 427 } 428 429 int dvb_vb2_mmap(struct dvb_vb2_ctx *ctx, struct vm_area_struct *vma) 430 { 431 int ret; 432 433 ret = vb2_mmap(&ctx->vb_q, vma); 434 if (ret) { 435 dprintk(1, "[%s] errno=%d\n", ctx->name, ret); 436 return ret; 437 } 438 dprintk(3, "[%s] ret=%d\n", ctx->name, ret); 439 440 return 0; 441 } 442 443 __poll_t dvb_vb2_poll(struct dvb_vb2_ctx *ctx, struct file *file, 444 poll_table *wait) 445 { 446 dprintk(3, "[%s]\n", ctx->name); 447 return vb2_core_poll(&ctx->vb_q, file, wait); 448 } 449 450