1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * dvb-vb2.c - dvb-vb2
4 *
5 * Copyright (C) 2015 Samsung Electronics
6 *
7 * Author: jh1009.sung@samsung.com
8 */
9
10 #include <linux/err.h>
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/mm.h>
14
15 #include <media/dvbdev.h>
16 #include <media/dvb_vb2.h>
17
18 #define DVB_V2_MAX_SIZE (4096 * 188)
19
20 static int vb2_debug;
21 module_param(vb2_debug, int, 0644);
22
23 #define dprintk(level, fmt, arg...) \
24 do { \
25 if (vb2_debug >= level) \
26 pr_info("vb2: %s: " fmt, __func__, ## arg); \
27 } while (0)
28
_queue_setup(struct vb2_queue * vq,unsigned int * nbuffers,unsigned int * nplanes,unsigned int sizes[],struct device * alloc_devs[])29 static int _queue_setup(struct vb2_queue *vq,
30 unsigned int *nbuffers, unsigned int *nplanes,
31 unsigned int sizes[], struct device *alloc_devs[])
32 {
33 struct dvb_vb2_ctx *ctx = vb2_get_drv_priv(vq);
34
35 ctx->buf_cnt = *nbuffers;
36 *nplanes = 1;
37 sizes[0] = ctx->buf_siz;
38
39 /*
40 * videobuf2-vmalloc allocator is context-less so no need to set
41 * alloc_ctxs array.
42 */
43
44 dprintk(3, "[%s] count=%d, size=%d\n", ctx->name,
45 *nbuffers, sizes[0]);
46
47 return 0;
48 }
49
_buffer_prepare(struct vb2_buffer * vb)50 static int _buffer_prepare(struct vb2_buffer *vb)
51 {
52 struct dvb_vb2_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
53 unsigned long size = ctx->buf_siz;
54
55 if (vb2_plane_size(vb, 0) < size) {
56 dprintk(1, "[%s] data will not fit into plane (%lu < %lu)\n",
57 ctx->name, vb2_plane_size(vb, 0), size);
58 return -EINVAL;
59 }
60
61 vb2_set_plane_payload(vb, 0, size);
62 dprintk(3, "[%s]\n", ctx->name);
63
64 return 0;
65 }
66
_buffer_queue(struct vb2_buffer * vb)67 static void _buffer_queue(struct vb2_buffer *vb)
68 {
69 struct dvb_vb2_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
70 struct dvb_buffer *buf = container_of(vb, struct dvb_buffer, vb);
71 unsigned long flags = 0;
72
73 spin_lock_irqsave(&ctx->slock, flags);
74 list_add_tail(&buf->list, &ctx->dvb_q);
75 spin_unlock_irqrestore(&ctx->slock, flags);
76
77 dprintk(3, "[%s]\n", ctx->name);
78 }
79
_start_streaming(struct vb2_queue * vq,unsigned int count)80 static int _start_streaming(struct vb2_queue *vq, unsigned int count)
81 {
82 struct dvb_vb2_ctx *ctx = vb2_get_drv_priv(vq);
83
84 dprintk(3, "[%s] count=%d\n", ctx->name, count);
85 return 0;
86 }
87
_stop_streaming(struct vb2_queue * vq)88 static void _stop_streaming(struct vb2_queue *vq)
89 {
90 struct dvb_vb2_ctx *ctx = vb2_get_drv_priv(vq);
91 struct dvb_buffer *buf;
92 unsigned long flags = 0;
93
94 dprintk(3, "[%s]\n", ctx->name);
95
96 spin_lock_irqsave(&ctx->slock, flags);
97 while (!list_empty(&ctx->dvb_q)) {
98 buf = list_entry(ctx->dvb_q.next,
99 struct dvb_buffer, list);
100 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
101 list_del(&buf->list);
102 }
103 spin_unlock_irqrestore(&ctx->slock, flags);
104 }
105
_dmxdev_lock(struct vb2_queue * vq)106 static void _dmxdev_lock(struct vb2_queue *vq)
107 {
108 struct dvb_vb2_ctx *ctx = vb2_get_drv_priv(vq);
109
110 mutex_lock(&ctx->mutex);
111 dprintk(3, "[%s]\n", ctx->name);
112 }
113
_dmxdev_unlock(struct vb2_queue * vq)114 static void _dmxdev_unlock(struct vb2_queue *vq)
115 {
116 struct dvb_vb2_ctx *ctx = vb2_get_drv_priv(vq);
117
118 if (mutex_is_locked(&ctx->mutex))
119 mutex_unlock(&ctx->mutex);
120 dprintk(3, "[%s]\n", ctx->name);
121 }
122
123 static const struct vb2_ops dvb_vb2_qops = {
124 .queue_setup = _queue_setup,
125 .buf_prepare = _buffer_prepare,
126 .buf_queue = _buffer_queue,
127 .start_streaming = _start_streaming,
128 .stop_streaming = _stop_streaming,
129 .wait_prepare = _dmxdev_unlock,
130 .wait_finish = _dmxdev_lock,
131 };
132
_fill_dmx_buffer(struct vb2_buffer * vb,void * pb)133 static void _fill_dmx_buffer(struct vb2_buffer *vb, void *pb)
134 {
135 struct dvb_vb2_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
136 struct dmx_buffer *b = pb;
137
138 b->index = vb->index;
139 b->length = vb->planes[0].length;
140 b->bytesused = vb->planes[0].bytesused;
141 b->offset = vb->planes[0].m.offset;
142 dprintk(3, "[%s]\n", ctx->name);
143 }
144
_fill_vb2_buffer(struct vb2_buffer * vb,struct vb2_plane * planes)145 static int _fill_vb2_buffer(struct vb2_buffer *vb, struct vb2_plane *planes)
146 {
147 struct dvb_vb2_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
148
149 planes[0].bytesused = 0;
150 dprintk(3, "[%s]\n", ctx->name);
151
152 return 0;
153 }
154
155 static const struct vb2_buf_ops dvb_vb2_buf_ops = {
156 .fill_user_buffer = _fill_dmx_buffer,
157 .fill_vb2_buffer = _fill_vb2_buffer,
158 };
159
160 /*
161 * Videobuf operations
162 */
dvb_vb2_init(struct dvb_vb2_ctx * ctx,const char * name,int nonblocking)163 int dvb_vb2_init(struct dvb_vb2_ctx *ctx, const char *name, int nonblocking)
164 {
165 struct vb2_queue *q = &ctx->vb_q;
166 int ret;
167
168 memset(ctx, 0, sizeof(struct dvb_vb2_ctx));
169 q->type = DVB_BUF_TYPE_CAPTURE;
170 /**only mmap is supported currently*/
171 q->io_modes = VB2_MMAP;
172 q->drv_priv = ctx;
173 q->buf_struct_size = sizeof(struct dvb_buffer);
174 q->min_queued_buffers = 1;
175 q->ops = &dvb_vb2_qops;
176 q->mem_ops = &vb2_vmalloc_memops;
177 q->buf_ops = &dvb_vb2_buf_ops;
178 ret = vb2_core_queue_init(q);
179 if (ret) {
180 ctx->state = DVB_VB2_STATE_NONE;
181 dprintk(1, "[%s] errno=%d\n", ctx->name, ret);
182 return ret;
183 }
184
185 mutex_init(&ctx->mutex);
186 spin_lock_init(&ctx->slock);
187 INIT_LIST_HEAD(&ctx->dvb_q);
188
189 strscpy(ctx->name, name, DVB_VB2_NAME_MAX);
190 ctx->nonblocking = nonblocking;
191 ctx->state = DVB_VB2_STATE_INIT;
192
193 dprintk(3, "[%s]\n", ctx->name);
194
195 return 0;
196 }
197
dvb_vb2_release(struct dvb_vb2_ctx * ctx)198 int dvb_vb2_release(struct dvb_vb2_ctx *ctx)
199 {
200 struct vb2_queue *q = (struct vb2_queue *)&ctx->vb_q;
201
202 if (ctx->state & DVB_VB2_STATE_INIT)
203 vb2_core_queue_release(q);
204
205 ctx->state = DVB_VB2_STATE_NONE;
206 dprintk(3, "[%s]\n", ctx->name);
207
208 return 0;
209 }
210
dvb_vb2_stream_on(struct dvb_vb2_ctx * ctx)211 int dvb_vb2_stream_on(struct dvb_vb2_ctx *ctx)
212 {
213 struct vb2_queue *q = &ctx->vb_q;
214 int ret;
215
216 ret = vb2_core_streamon(q, q->type);
217 if (ret) {
218 ctx->state = DVB_VB2_STATE_NONE;
219 dprintk(1, "[%s] errno=%d\n", ctx->name, ret);
220 return ret;
221 }
222 ctx->state |= DVB_VB2_STATE_STREAMON;
223 dprintk(3, "[%s]\n", ctx->name);
224
225 return 0;
226 }
227
dvb_vb2_stream_off(struct dvb_vb2_ctx * ctx)228 int dvb_vb2_stream_off(struct dvb_vb2_ctx *ctx)
229 {
230 struct vb2_queue *q = (struct vb2_queue *)&ctx->vb_q;
231 int ret;
232
233 ctx->state &= ~DVB_VB2_STATE_STREAMON;
234 ret = vb2_core_streamoff(q, q->type);
235 if (ret) {
236 ctx->state = DVB_VB2_STATE_NONE;
237 dprintk(1, "[%s] errno=%d\n", ctx->name, ret);
238 return ret;
239 }
240 dprintk(3, "[%s]\n", ctx->name);
241
242 return 0;
243 }
244
dvb_vb2_is_streaming(struct dvb_vb2_ctx * ctx)245 int dvb_vb2_is_streaming(struct dvb_vb2_ctx *ctx)
246 {
247 return (ctx->state & DVB_VB2_STATE_STREAMON);
248 }
249
dvb_vb2_fill_buffer(struct dvb_vb2_ctx * ctx,const unsigned char * src,int len,enum dmx_buffer_flags * buffer_flags)250 int dvb_vb2_fill_buffer(struct dvb_vb2_ctx *ctx,
251 const unsigned char *src, int len,
252 enum dmx_buffer_flags *buffer_flags)
253 {
254 unsigned long flags = 0;
255 void *vbuf = NULL;
256 int todo = len;
257 unsigned char *psrc = (unsigned char *)src;
258 int ll = 0;
259
260 /*
261 * normal case: This func is called twice from demux driver
262 * one with valid src pointer, second time with NULL pointer
263 */
264 if (!src || !len)
265 return 0;
266 spin_lock_irqsave(&ctx->slock, flags);
267 if (buffer_flags && *buffer_flags) {
268 ctx->flags |= *buffer_flags;
269 *buffer_flags = 0;
270 }
271 while (todo) {
272 if (!ctx->buf) {
273 if (list_empty(&ctx->dvb_q)) {
274 dprintk(3, "[%s] Buffer overflow!!!\n",
275 ctx->name);
276 break;
277 }
278
279 ctx->buf = list_entry(ctx->dvb_q.next,
280 struct dvb_buffer, list);
281 ctx->remain = vb2_plane_size(&ctx->buf->vb, 0);
282 ctx->offset = 0;
283 }
284
285 if (!dvb_vb2_is_streaming(ctx)) {
286 vb2_buffer_done(&ctx->buf->vb, VB2_BUF_STATE_ERROR);
287 list_del(&ctx->buf->list);
288 ctx->buf = NULL;
289 break;
290 }
291
292 /* Fill buffer */
293 ll = min(todo, ctx->remain);
294 vbuf = vb2_plane_vaddr(&ctx->buf->vb, 0);
295 memcpy(vbuf + ctx->offset, psrc, ll);
296 todo -= ll;
297 psrc += ll;
298
299 ctx->remain -= ll;
300 ctx->offset += ll;
301
302 if (ctx->remain == 0) {
303 vb2_buffer_done(&ctx->buf->vb, VB2_BUF_STATE_DONE);
304 list_del(&ctx->buf->list);
305 ctx->buf = NULL;
306 }
307 }
308
309 if (ctx->nonblocking && ctx->buf) {
310 vb2_set_plane_payload(&ctx->buf->vb, 0, ll);
311 vb2_buffer_done(&ctx->buf->vb, VB2_BUF_STATE_DONE);
312 list_del(&ctx->buf->list);
313 ctx->buf = NULL;
314 }
315 spin_unlock_irqrestore(&ctx->slock, flags);
316
317 if (todo)
318 dprintk(1, "[%s] %d bytes are dropped.\n", ctx->name, todo);
319 else
320 dprintk(3, "[%s]\n", ctx->name);
321
322 dprintk(3, "[%s] %d bytes are copied\n", ctx->name, len - todo);
323 return (len - todo);
324 }
325
dvb_vb2_reqbufs(struct dvb_vb2_ctx * ctx,struct dmx_requestbuffers * req)326 int dvb_vb2_reqbufs(struct dvb_vb2_ctx *ctx, struct dmx_requestbuffers *req)
327 {
328 int ret;
329
330 /* Adjust size to a sane value */
331 if (req->size > DVB_V2_MAX_SIZE)
332 req->size = DVB_V2_MAX_SIZE;
333
334 /* FIXME: round req->size to a 188 or 204 multiple */
335
336 ctx->buf_siz = req->size;
337 ctx->buf_cnt = req->count;
338 ret = vb2_core_reqbufs(&ctx->vb_q, VB2_MEMORY_MMAP, 0, &req->count);
339 if (ret) {
340 ctx->state = DVB_VB2_STATE_NONE;
341 dprintk(1, "[%s] count=%d size=%d errno=%d\n", ctx->name,
342 ctx->buf_cnt, ctx->buf_siz, ret);
343 return ret;
344 }
345 ctx->state |= DVB_VB2_STATE_REQBUFS;
346 dprintk(3, "[%s] count=%d size=%d\n", ctx->name,
347 ctx->buf_cnt, ctx->buf_siz);
348
349 return 0;
350 }
351
dvb_vb2_querybuf(struct dvb_vb2_ctx * ctx,struct dmx_buffer * b)352 int dvb_vb2_querybuf(struct dvb_vb2_ctx *ctx, struct dmx_buffer *b)
353 {
354 struct vb2_queue *q = &ctx->vb_q;
355 struct vb2_buffer *vb2 = vb2_get_buffer(q, b->index);
356
357 if (!vb2) {
358 dprintk(1, "[%s] invalid buffer index\n", ctx->name);
359 return -EINVAL;
360 }
361 vb2_core_querybuf(&ctx->vb_q, vb2, b);
362 dprintk(3, "[%s] index=%d\n", ctx->name, b->index);
363 return 0;
364 }
365
dvb_vb2_expbuf(struct dvb_vb2_ctx * ctx,struct dmx_exportbuffer * exp)366 int dvb_vb2_expbuf(struct dvb_vb2_ctx *ctx, struct dmx_exportbuffer *exp)
367 {
368 struct vb2_queue *q = &ctx->vb_q;
369 int ret;
370
371 ret = vb2_core_expbuf(&ctx->vb_q, &exp->fd, q->type, q->bufs[exp->index],
372 0, exp->flags);
373 if (ret) {
374 dprintk(1, "[%s] index=%d errno=%d\n", ctx->name,
375 exp->index, ret);
376 return ret;
377 }
378 dprintk(3, "[%s] index=%d fd=%d\n", ctx->name, exp->index, exp->fd);
379
380 return 0;
381 }
382
dvb_vb2_qbuf(struct dvb_vb2_ctx * ctx,struct dmx_buffer * b)383 int dvb_vb2_qbuf(struct dvb_vb2_ctx *ctx, struct dmx_buffer *b)
384 {
385 struct vb2_queue *q = &ctx->vb_q;
386 struct vb2_buffer *vb2 = vb2_get_buffer(q, b->index);
387 int ret;
388
389 if (!vb2) {
390 dprintk(1, "[%s] invalid buffer index\n", ctx->name);
391 return -EINVAL;
392 }
393 ret = vb2_core_qbuf(&ctx->vb_q, vb2, b, NULL);
394 if (ret) {
395 dprintk(1, "[%s] index=%d errno=%d\n", ctx->name,
396 b->index, ret);
397 return ret;
398 }
399 dprintk(5, "[%s] index=%d\n", ctx->name, b->index);
400
401 return 0;
402 }
403
dvb_vb2_dqbuf(struct dvb_vb2_ctx * ctx,struct dmx_buffer * b)404 int dvb_vb2_dqbuf(struct dvb_vb2_ctx *ctx, struct dmx_buffer *b)
405 {
406 unsigned long flags;
407 int ret;
408
409 ret = vb2_core_dqbuf(&ctx->vb_q, &b->index, b, ctx->nonblocking);
410 if (ret) {
411 dprintk(1, "[%s] errno=%d\n", ctx->name, ret);
412 return ret;
413 }
414
415 spin_lock_irqsave(&ctx->slock, flags);
416 b->count = ctx->count++;
417 b->flags = ctx->flags;
418 ctx->flags = 0;
419 spin_unlock_irqrestore(&ctx->slock, flags);
420
421 dprintk(5, "[%s] index=%d, count=%d, flags=%d\n",
422 ctx->name, b->index, ctx->count, b->flags);
423
424
425 return 0;
426 }
427
dvb_vb2_mmap(struct dvb_vb2_ctx * ctx,struct vm_area_struct * vma)428 int dvb_vb2_mmap(struct dvb_vb2_ctx *ctx, struct vm_area_struct *vma)
429 {
430 int ret;
431
432 ret = vb2_mmap(&ctx->vb_q, vma);
433 if (ret) {
434 dprintk(1, "[%s] errno=%d\n", ctx->name, ret);
435 return ret;
436 }
437 dprintk(3, "[%s] ret=%d\n", ctx->name, ret);
438
439 return 0;
440 }
441
dvb_vb2_poll(struct dvb_vb2_ctx * ctx,struct file * file,poll_table * wait)442 __poll_t dvb_vb2_poll(struct dvb_vb2_ctx *ctx, struct file *file,
443 poll_table *wait)
444 {
445 dprintk(3, "[%s]\n", ctx->name);
446 return vb2_core_poll(&ctx->vb_q, file, wait);
447 }
448
449