1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Memory-to-memory device framework for Video for Linux 2 and videobuf. 4 * 5 * Helper functions for devices that use videobuf buffers for both their 6 * source and destination. 7 * 8 * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd. 9 * Pawel Osciak, <pawel@osciak.com> 10 * Marek Szyprowski, <m.szyprowski@samsung.com> 11 */ 12 #include <linux/module.h> 13 #include <linux/sched.h> 14 #include <linux/slab.h> 15 16 #include <media/media-device.h> 17 #include <media/videobuf2-v4l2.h> 18 #include <media/v4l2-mem2mem.h> 19 #include <media/v4l2-dev.h> 20 #include <media/v4l2-device.h> 21 #include <media/v4l2-fh.h> 22 #include <media/v4l2-event.h> 23 24 MODULE_DESCRIPTION("Mem to mem device framework for videobuf"); 25 MODULE_AUTHOR("Pawel Osciak, <pawel@osciak.com>"); 26 MODULE_LICENSE("GPL"); 27 28 static bool debug; 29 module_param(debug, bool, 0644); 30 31 #define dprintk(fmt, arg...) \ 32 do { \ 33 if (debug) \ 34 printk(KERN_DEBUG "%s: " fmt, __func__, ## arg);\ 35 } while (0) 36 37 38 /* Instance is already queued on the job_queue */ 39 #define TRANS_QUEUED (1 << 0) 40 /* Instance is currently running in hardware */ 41 #define TRANS_RUNNING (1 << 1) 42 /* Instance is currently aborting */ 43 #define TRANS_ABORT (1 << 2) 44 45 46 /* Offset base for buffers on the destination queue - used to distinguish 47 * between source and destination buffers when mmapping - they receive the same 48 * offsets but for different queues */ 49 #define DST_QUEUE_OFF_BASE (1 << 30) 50 51 enum v4l2_m2m_entity_type { 52 MEM2MEM_ENT_TYPE_SOURCE, 53 MEM2MEM_ENT_TYPE_SINK, 54 MEM2MEM_ENT_TYPE_PROC 55 }; 56 57 static const char * const m2m_entity_name[] = { 58 "source", 59 "sink", 60 "proc" 61 }; 62 63 /** 64 * struct v4l2_m2m_dev - per-device context 65 * @source: &struct media_entity pointer with the source entity 66 * Used only when the M2M device is registered via 67 * v4l2_m2m_unregister_media_controller(). 68 * @source_pad: &struct media_pad with the source pad. 69 * Used only when the M2M device is registered via 70 * v4l2_m2m_unregister_media_controller(). 71 * @sink: &struct media_entity pointer with the sink entity 72 * Used only when the M2M device is registered via 73 * v4l2_m2m_unregister_media_controller(). 74 * @sink_pad: &struct media_pad with the sink pad. 75 * Used only when the M2M device is registered via 76 * v4l2_m2m_unregister_media_controller(). 77 * @proc: &struct media_entity pointer with the M2M device itself. 78 * @proc_pads: &struct media_pad with the @proc pads. 79 * Used only when the M2M device is registered via 80 * v4l2_m2m_unregister_media_controller(). 81 * @intf_devnode: &struct media_intf devnode pointer with the interface 82 * with controls the M2M device. 83 * @curr_ctx: currently running instance 84 * @job_queue: instances queued to run 85 * @job_spinlock: protects job_queue 86 * @job_work: worker to run queued jobs. 87 * @m2m_ops: driver callbacks 88 */ 89 struct v4l2_m2m_dev { 90 struct v4l2_m2m_ctx *curr_ctx; 91 #ifdef CONFIG_MEDIA_CONTROLLER 92 struct media_entity *source; 93 struct media_pad source_pad; 94 struct media_entity sink; 95 struct media_pad sink_pad; 96 struct media_entity proc; 97 struct media_pad proc_pads[2]; 98 struct media_intf_devnode *intf_devnode; 99 #endif 100 101 struct list_head job_queue; 102 spinlock_t job_spinlock; 103 struct work_struct job_work; 104 105 const struct v4l2_m2m_ops *m2m_ops; 106 }; 107 108 static struct v4l2_m2m_queue_ctx *get_queue_ctx(struct v4l2_m2m_ctx *m2m_ctx, 109 enum v4l2_buf_type type) 110 { 111 if (V4L2_TYPE_IS_OUTPUT(type)) 112 return &m2m_ctx->out_q_ctx; 113 else 114 return &m2m_ctx->cap_q_ctx; 115 } 116 117 struct vb2_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx, 118 enum v4l2_buf_type type) 119 { 120 struct v4l2_m2m_queue_ctx *q_ctx; 121 122 q_ctx = get_queue_ctx(m2m_ctx, type); 123 if (!q_ctx) 124 return NULL; 125 126 return &q_ctx->q; 127 } 128 EXPORT_SYMBOL(v4l2_m2m_get_vq); 129 130 struct vb2_v4l2_buffer *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx) 131 { 132 struct v4l2_m2m_buffer *b; 133 unsigned long flags; 134 135 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); 136 137 if (list_empty(&q_ctx->rdy_queue)) { 138 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); 139 return NULL; 140 } 141 142 b = list_first_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list); 143 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); 144 return &b->vb; 145 } 146 EXPORT_SYMBOL_GPL(v4l2_m2m_next_buf); 147 148 struct vb2_v4l2_buffer *v4l2_m2m_last_buf(struct v4l2_m2m_queue_ctx *q_ctx) 149 { 150 struct v4l2_m2m_buffer *b; 151 unsigned long flags; 152 153 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); 154 155 if (list_empty(&q_ctx->rdy_queue)) { 156 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); 157 return NULL; 158 } 159 160 b = list_last_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list); 161 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); 162 return &b->vb; 163 } 164 EXPORT_SYMBOL_GPL(v4l2_m2m_last_buf); 165 166 struct vb2_v4l2_buffer *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx) 167 { 168 struct v4l2_m2m_buffer *b; 169 unsigned long flags; 170 171 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); 172 if (list_empty(&q_ctx->rdy_queue)) { 173 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); 174 return NULL; 175 } 176 b = list_first_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list); 177 list_del(&b->list); 178 q_ctx->num_rdy--; 179 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); 180 181 return &b->vb; 182 } 183 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove); 184 185 void v4l2_m2m_buf_remove_by_buf(struct v4l2_m2m_queue_ctx *q_ctx, 186 struct vb2_v4l2_buffer *vbuf) 187 { 188 struct v4l2_m2m_buffer *b; 189 unsigned long flags; 190 191 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); 192 b = container_of(vbuf, struct v4l2_m2m_buffer, vb); 193 list_del(&b->list); 194 q_ctx->num_rdy--; 195 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); 196 } 197 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove_by_buf); 198 199 struct vb2_v4l2_buffer * 200 v4l2_m2m_buf_remove_by_idx(struct v4l2_m2m_queue_ctx *q_ctx, unsigned int idx) 201 202 { 203 struct v4l2_m2m_buffer *b, *tmp; 204 struct vb2_v4l2_buffer *ret = NULL; 205 unsigned long flags; 206 207 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); 208 list_for_each_entry_safe(b, tmp, &q_ctx->rdy_queue, list) { 209 if (b->vb.vb2_buf.index == idx) { 210 list_del(&b->list); 211 q_ctx->num_rdy--; 212 ret = &b->vb; 213 break; 214 } 215 } 216 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); 217 218 return ret; 219 } 220 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove_by_idx); 221 222 /* 223 * Scheduling handlers 224 */ 225 226 void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev *m2m_dev) 227 { 228 unsigned long flags; 229 void *ret = NULL; 230 231 spin_lock_irqsave(&m2m_dev->job_spinlock, flags); 232 if (m2m_dev->curr_ctx) 233 ret = m2m_dev->curr_ctx->priv; 234 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); 235 236 return ret; 237 } 238 EXPORT_SYMBOL(v4l2_m2m_get_curr_priv); 239 240 /** 241 * v4l2_m2m_try_run() - select next job to perform and run it if possible 242 * @m2m_dev: per-device context 243 * 244 * Get next transaction (if present) from the waiting jobs list and run it. 245 * 246 * Note that this function can run on a given v4l2_m2m_ctx context, 247 * but call .device_run for another context. 248 */ 249 static void v4l2_m2m_try_run(struct v4l2_m2m_dev *m2m_dev) 250 { 251 unsigned long flags; 252 253 spin_lock_irqsave(&m2m_dev->job_spinlock, flags); 254 if (NULL != m2m_dev->curr_ctx) { 255 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); 256 dprintk("Another instance is running, won't run now\n"); 257 return; 258 } 259 260 if (list_empty(&m2m_dev->job_queue)) { 261 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); 262 dprintk("No job pending\n"); 263 return; 264 } 265 266 m2m_dev->curr_ctx = list_first_entry(&m2m_dev->job_queue, 267 struct v4l2_m2m_ctx, queue); 268 m2m_dev->curr_ctx->job_flags |= TRANS_RUNNING; 269 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); 270 271 dprintk("Running job on m2m_ctx: %p\n", m2m_dev->curr_ctx); 272 m2m_dev->m2m_ops->device_run(m2m_dev->curr_ctx->priv); 273 } 274 275 /* 276 * __v4l2_m2m_try_queue() - queue a job 277 * @m2m_dev: m2m device 278 * @m2m_ctx: m2m context 279 * 280 * Check if this context is ready to queue a job. 281 * 282 * This function can run in interrupt context. 283 */ 284 static void __v4l2_m2m_try_queue(struct v4l2_m2m_dev *m2m_dev, 285 struct v4l2_m2m_ctx *m2m_ctx) 286 { 287 unsigned long flags_job; 288 struct vb2_v4l2_buffer *dst, *src; 289 290 dprintk("Trying to schedule a job for m2m_ctx: %p\n", m2m_ctx); 291 292 if (!m2m_ctx->out_q_ctx.q.streaming 293 || !m2m_ctx->cap_q_ctx.q.streaming) { 294 dprintk("Streaming needs to be on for both queues\n"); 295 return; 296 } 297 298 spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job); 299 300 /* If the context is aborted then don't schedule it */ 301 if (m2m_ctx->job_flags & TRANS_ABORT) { 302 dprintk("Aborted context\n"); 303 goto job_unlock; 304 } 305 306 if (m2m_ctx->job_flags & TRANS_QUEUED) { 307 dprintk("On job queue already\n"); 308 goto job_unlock; 309 } 310 311 src = v4l2_m2m_next_src_buf(m2m_ctx); 312 dst = v4l2_m2m_next_dst_buf(m2m_ctx); 313 if (!src && !m2m_ctx->out_q_ctx.buffered) { 314 dprintk("No input buffers available\n"); 315 goto job_unlock; 316 } 317 if (!dst && !m2m_ctx->cap_q_ctx.buffered) { 318 dprintk("No output buffers available\n"); 319 goto job_unlock; 320 } 321 322 m2m_ctx->new_frame = true; 323 324 if (src && dst && dst->is_held && 325 dst->vb2_buf.copied_timestamp && 326 dst->vb2_buf.timestamp != src->vb2_buf.timestamp) { 327 dst->is_held = false; 328 v4l2_m2m_dst_buf_remove(m2m_ctx); 329 v4l2_m2m_buf_done(dst, VB2_BUF_STATE_DONE); 330 dst = v4l2_m2m_next_dst_buf(m2m_ctx); 331 332 if (!dst && !m2m_ctx->cap_q_ctx.buffered) { 333 dprintk("No output buffers available after returning held buffer\n"); 334 goto job_unlock; 335 } 336 } 337 338 if (src && dst && (m2m_ctx->out_q_ctx.q.subsystem_flags & 339 VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF)) 340 m2m_ctx->new_frame = !dst->vb2_buf.copied_timestamp || 341 dst->vb2_buf.timestamp != src->vb2_buf.timestamp; 342 343 if (m2m_dev->m2m_ops->job_ready 344 && (!m2m_dev->m2m_ops->job_ready(m2m_ctx->priv))) { 345 dprintk("Driver not ready\n"); 346 goto job_unlock; 347 } 348 349 list_add_tail(&m2m_ctx->queue, &m2m_dev->job_queue); 350 m2m_ctx->job_flags |= TRANS_QUEUED; 351 352 job_unlock: 353 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); 354 } 355 356 /** 357 * v4l2_m2m_try_schedule() - schedule and possibly run a job for any context 358 * @m2m_ctx: m2m context 359 * 360 * Check if this context is ready to queue a job. If suitable, 361 * run the next queued job on the mem2mem device. 362 * 363 * This function shouldn't run in interrupt context. 364 * 365 * Note that v4l2_m2m_try_schedule() can schedule one job for this context, 366 * and then run another job for another context. 367 */ 368 void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx) 369 { 370 struct v4l2_m2m_dev *m2m_dev = m2m_ctx->m2m_dev; 371 372 __v4l2_m2m_try_queue(m2m_dev, m2m_ctx); 373 v4l2_m2m_try_run(m2m_dev); 374 } 375 EXPORT_SYMBOL_GPL(v4l2_m2m_try_schedule); 376 377 /** 378 * v4l2_m2m_device_run_work() - run pending jobs for the context 379 * @work: Work structure used for scheduling the execution of this function. 380 */ 381 static void v4l2_m2m_device_run_work(struct work_struct *work) 382 { 383 struct v4l2_m2m_dev *m2m_dev = 384 container_of(work, struct v4l2_m2m_dev, job_work); 385 386 v4l2_m2m_try_run(m2m_dev); 387 } 388 389 /** 390 * v4l2_m2m_cancel_job() - cancel pending jobs for the context 391 * @m2m_ctx: m2m context with jobs to be canceled 392 * 393 * In case of streamoff or release called on any context, 394 * 1] If the context is currently running, then abort job will be called 395 * 2] If the context is queued, then the context will be removed from 396 * the job_queue 397 */ 398 static void v4l2_m2m_cancel_job(struct v4l2_m2m_ctx *m2m_ctx) 399 { 400 struct v4l2_m2m_dev *m2m_dev; 401 unsigned long flags; 402 403 m2m_dev = m2m_ctx->m2m_dev; 404 spin_lock_irqsave(&m2m_dev->job_spinlock, flags); 405 406 m2m_ctx->job_flags |= TRANS_ABORT; 407 if (m2m_ctx->job_flags & TRANS_RUNNING) { 408 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); 409 if (m2m_dev->m2m_ops->job_abort) 410 m2m_dev->m2m_ops->job_abort(m2m_ctx->priv); 411 dprintk("m2m_ctx %p running, will wait to complete\n", m2m_ctx); 412 wait_event(m2m_ctx->finished, 413 !(m2m_ctx->job_flags & TRANS_RUNNING)); 414 } else if (m2m_ctx->job_flags & TRANS_QUEUED) { 415 list_del(&m2m_ctx->queue); 416 m2m_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING); 417 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); 418 dprintk("m2m_ctx: %p had been on queue and was removed\n", 419 m2m_ctx); 420 } else { 421 /* Do nothing, was not on queue/running */ 422 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); 423 } 424 } 425 426 /* 427 * Schedule the next job, called from v4l2_m2m_job_finish() or 428 * v4l2_m2m_buf_done_and_job_finish(). 429 */ 430 static void v4l2_m2m_schedule_next_job(struct v4l2_m2m_dev *m2m_dev, 431 struct v4l2_m2m_ctx *m2m_ctx) 432 { 433 /* 434 * This instance might have more buffers ready, but since we do not 435 * allow more than one job on the job_queue per instance, each has 436 * to be scheduled separately after the previous one finishes. 437 */ 438 __v4l2_m2m_try_queue(m2m_dev, m2m_ctx); 439 440 /* 441 * We might be running in atomic context, 442 * but the job must be run in non-atomic context. 443 */ 444 schedule_work(&m2m_dev->job_work); 445 } 446 447 /* 448 * Assumes job_spinlock is held, called from v4l2_m2m_job_finish() or 449 * v4l2_m2m_buf_done_and_job_finish(). 450 */ 451 static bool _v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev, 452 struct v4l2_m2m_ctx *m2m_ctx) 453 { 454 if (!m2m_dev->curr_ctx || m2m_dev->curr_ctx != m2m_ctx) { 455 dprintk("Called by an instance not currently running\n"); 456 return false; 457 } 458 459 list_del(&m2m_dev->curr_ctx->queue); 460 m2m_dev->curr_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING); 461 wake_up(&m2m_dev->curr_ctx->finished); 462 m2m_dev->curr_ctx = NULL; 463 return true; 464 } 465 466 void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev, 467 struct v4l2_m2m_ctx *m2m_ctx) 468 { 469 unsigned long flags; 470 bool schedule_next; 471 472 /* 473 * This function should not be used for drivers that support 474 * holding capture buffers. Those should use 475 * v4l2_m2m_buf_done_and_job_finish() instead. 476 */ 477 WARN_ON(m2m_ctx->out_q_ctx.q.subsystem_flags & 478 VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF); 479 spin_lock_irqsave(&m2m_dev->job_spinlock, flags); 480 schedule_next = _v4l2_m2m_job_finish(m2m_dev, m2m_ctx); 481 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); 482 483 if (schedule_next) 484 v4l2_m2m_schedule_next_job(m2m_dev, m2m_ctx); 485 } 486 EXPORT_SYMBOL(v4l2_m2m_job_finish); 487 488 void v4l2_m2m_buf_done_and_job_finish(struct v4l2_m2m_dev *m2m_dev, 489 struct v4l2_m2m_ctx *m2m_ctx, 490 enum vb2_buffer_state state) 491 { 492 struct vb2_v4l2_buffer *src_buf, *dst_buf; 493 bool schedule_next = false; 494 unsigned long flags; 495 496 spin_lock_irqsave(&m2m_dev->job_spinlock, flags); 497 src_buf = v4l2_m2m_src_buf_remove(m2m_ctx); 498 dst_buf = v4l2_m2m_next_dst_buf(m2m_ctx); 499 500 if (WARN_ON(!src_buf || !dst_buf)) 501 goto unlock; 502 v4l2_m2m_buf_done(src_buf, state); 503 dst_buf->is_held = src_buf->flags & V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF; 504 if (!dst_buf->is_held) { 505 v4l2_m2m_dst_buf_remove(m2m_ctx); 506 v4l2_m2m_buf_done(dst_buf, state); 507 } 508 schedule_next = _v4l2_m2m_job_finish(m2m_dev, m2m_ctx); 509 unlock: 510 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); 511 512 if (schedule_next) 513 v4l2_m2m_schedule_next_job(m2m_dev, m2m_ctx); 514 } 515 EXPORT_SYMBOL(v4l2_m2m_buf_done_and_job_finish); 516 517 int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 518 struct v4l2_requestbuffers *reqbufs) 519 { 520 struct vb2_queue *vq; 521 int ret; 522 523 vq = v4l2_m2m_get_vq(m2m_ctx, reqbufs->type); 524 ret = vb2_reqbufs(vq, reqbufs); 525 /* If count == 0, then the owner has released all buffers and he 526 is no longer owner of the queue. Otherwise we have an owner. */ 527 if (ret == 0) 528 vq->owner = reqbufs->count ? file->private_data : NULL; 529 530 return ret; 531 } 532 EXPORT_SYMBOL_GPL(v4l2_m2m_reqbufs); 533 534 int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 535 struct v4l2_buffer *buf) 536 { 537 struct vb2_queue *vq; 538 int ret = 0; 539 unsigned int i; 540 541 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); 542 ret = vb2_querybuf(vq, buf); 543 544 /* Adjust MMAP memory offsets for the CAPTURE queue */ 545 if (buf->memory == V4L2_MEMORY_MMAP && !V4L2_TYPE_IS_OUTPUT(vq->type)) { 546 if (V4L2_TYPE_IS_MULTIPLANAR(vq->type)) { 547 for (i = 0; i < buf->length; ++i) 548 buf->m.planes[i].m.mem_offset 549 += DST_QUEUE_OFF_BASE; 550 } else { 551 buf->m.offset += DST_QUEUE_OFF_BASE; 552 } 553 } 554 555 return ret; 556 } 557 EXPORT_SYMBOL_GPL(v4l2_m2m_querybuf); 558 559 int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 560 struct v4l2_buffer *buf) 561 { 562 struct video_device *vdev = video_devdata(file); 563 struct vb2_queue *vq; 564 int ret; 565 566 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); 567 if (!V4L2_TYPE_IS_OUTPUT(vq->type) && 568 (buf->flags & V4L2_BUF_FLAG_REQUEST_FD)) { 569 dprintk("%s: requests cannot be used with capture buffers\n", 570 __func__); 571 return -EPERM; 572 } 573 ret = vb2_qbuf(vq, vdev->v4l2_dev->mdev, buf); 574 if (!ret && !(buf->flags & V4L2_BUF_FLAG_IN_REQUEST)) 575 v4l2_m2m_try_schedule(m2m_ctx); 576 577 return ret; 578 } 579 EXPORT_SYMBOL_GPL(v4l2_m2m_qbuf); 580 581 int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 582 struct v4l2_buffer *buf) 583 { 584 struct vb2_queue *vq; 585 586 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); 587 return vb2_dqbuf(vq, buf, file->f_flags & O_NONBLOCK); 588 } 589 EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf); 590 591 int v4l2_m2m_prepare_buf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 592 struct v4l2_buffer *buf) 593 { 594 struct video_device *vdev = video_devdata(file); 595 struct vb2_queue *vq; 596 597 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); 598 return vb2_prepare_buf(vq, vdev->v4l2_dev->mdev, buf); 599 } 600 EXPORT_SYMBOL_GPL(v4l2_m2m_prepare_buf); 601 602 int v4l2_m2m_create_bufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 603 struct v4l2_create_buffers *create) 604 { 605 struct vb2_queue *vq; 606 607 vq = v4l2_m2m_get_vq(m2m_ctx, create->format.type); 608 return vb2_create_bufs(vq, create); 609 } 610 EXPORT_SYMBOL_GPL(v4l2_m2m_create_bufs); 611 612 int v4l2_m2m_expbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 613 struct v4l2_exportbuffer *eb) 614 { 615 struct vb2_queue *vq; 616 617 vq = v4l2_m2m_get_vq(m2m_ctx, eb->type); 618 return vb2_expbuf(vq, eb); 619 } 620 EXPORT_SYMBOL_GPL(v4l2_m2m_expbuf); 621 622 int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 623 enum v4l2_buf_type type) 624 { 625 struct vb2_queue *vq; 626 int ret; 627 628 vq = v4l2_m2m_get_vq(m2m_ctx, type); 629 ret = vb2_streamon(vq, type); 630 if (!ret) 631 v4l2_m2m_try_schedule(m2m_ctx); 632 633 return ret; 634 } 635 EXPORT_SYMBOL_GPL(v4l2_m2m_streamon); 636 637 int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 638 enum v4l2_buf_type type) 639 { 640 struct v4l2_m2m_dev *m2m_dev; 641 struct v4l2_m2m_queue_ctx *q_ctx; 642 unsigned long flags_job, flags; 643 int ret; 644 645 /* wait until the current context is dequeued from job_queue */ 646 v4l2_m2m_cancel_job(m2m_ctx); 647 648 q_ctx = get_queue_ctx(m2m_ctx, type); 649 ret = vb2_streamoff(&q_ctx->q, type); 650 if (ret) 651 return ret; 652 653 m2m_dev = m2m_ctx->m2m_dev; 654 spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job); 655 /* We should not be scheduled anymore, since we're dropping a queue. */ 656 if (m2m_ctx->job_flags & TRANS_QUEUED) 657 list_del(&m2m_ctx->queue); 658 m2m_ctx->job_flags = 0; 659 660 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); 661 /* Drop queue, since streamoff returns device to the same state as after 662 * calling reqbufs. */ 663 INIT_LIST_HEAD(&q_ctx->rdy_queue); 664 q_ctx->num_rdy = 0; 665 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); 666 667 if (m2m_dev->curr_ctx == m2m_ctx) { 668 m2m_dev->curr_ctx = NULL; 669 wake_up(&m2m_ctx->finished); 670 } 671 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); 672 673 return 0; 674 } 675 EXPORT_SYMBOL_GPL(v4l2_m2m_streamoff); 676 677 static __poll_t v4l2_m2m_poll_for_data(struct file *file, 678 struct v4l2_m2m_ctx *m2m_ctx, 679 struct poll_table_struct *wait) 680 { 681 struct vb2_queue *src_q, *dst_q; 682 struct vb2_buffer *src_vb = NULL, *dst_vb = NULL; 683 __poll_t rc = 0; 684 unsigned long flags; 685 686 src_q = v4l2_m2m_get_src_vq(m2m_ctx); 687 dst_q = v4l2_m2m_get_dst_vq(m2m_ctx); 688 689 poll_wait(file, &src_q->done_wq, wait); 690 poll_wait(file, &dst_q->done_wq, wait); 691 692 /* 693 * There has to be at least one buffer queued on each queued_list, which 694 * means either in driver already or waiting for driver to claim it 695 * and start processing. 696 */ 697 if ((!src_q->streaming || src_q->error || 698 list_empty(&src_q->queued_list)) && 699 (!dst_q->streaming || dst_q->error || 700 list_empty(&dst_q->queued_list))) 701 return EPOLLERR; 702 703 spin_lock_irqsave(&dst_q->done_lock, flags); 704 if (list_empty(&dst_q->done_list)) { 705 /* 706 * If the last buffer was dequeued from the capture queue, 707 * return immediately. DQBUF will return -EPIPE. 708 */ 709 if (dst_q->last_buffer_dequeued) { 710 spin_unlock_irqrestore(&dst_q->done_lock, flags); 711 return EPOLLIN | EPOLLRDNORM; 712 } 713 } 714 spin_unlock_irqrestore(&dst_q->done_lock, flags); 715 716 spin_lock_irqsave(&src_q->done_lock, flags); 717 if (!list_empty(&src_q->done_list)) 718 src_vb = list_first_entry(&src_q->done_list, struct vb2_buffer, 719 done_entry); 720 if (src_vb && (src_vb->state == VB2_BUF_STATE_DONE 721 || src_vb->state == VB2_BUF_STATE_ERROR)) 722 rc |= EPOLLOUT | EPOLLWRNORM; 723 spin_unlock_irqrestore(&src_q->done_lock, flags); 724 725 spin_lock_irqsave(&dst_q->done_lock, flags); 726 if (!list_empty(&dst_q->done_list)) 727 dst_vb = list_first_entry(&dst_q->done_list, struct vb2_buffer, 728 done_entry); 729 if (dst_vb && (dst_vb->state == VB2_BUF_STATE_DONE 730 || dst_vb->state == VB2_BUF_STATE_ERROR)) 731 rc |= EPOLLIN | EPOLLRDNORM; 732 spin_unlock_irqrestore(&dst_q->done_lock, flags); 733 734 return rc; 735 } 736 737 __poll_t v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 738 struct poll_table_struct *wait) 739 { 740 struct video_device *vfd = video_devdata(file); 741 __poll_t req_events = poll_requested_events(wait); 742 __poll_t rc = 0; 743 744 if (req_events & (EPOLLOUT | EPOLLWRNORM | EPOLLIN | EPOLLRDNORM)) 745 rc = v4l2_m2m_poll_for_data(file, m2m_ctx, wait); 746 747 if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) { 748 struct v4l2_fh *fh = file->private_data; 749 750 poll_wait(file, &fh->wait, wait); 751 if (v4l2_event_pending(fh)) 752 rc |= EPOLLPRI; 753 } 754 755 return rc; 756 } 757 EXPORT_SYMBOL_GPL(v4l2_m2m_poll); 758 759 int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 760 struct vm_area_struct *vma) 761 { 762 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; 763 struct vb2_queue *vq; 764 765 if (offset < DST_QUEUE_OFF_BASE) { 766 vq = v4l2_m2m_get_src_vq(m2m_ctx); 767 } else { 768 vq = v4l2_m2m_get_dst_vq(m2m_ctx); 769 vma->vm_pgoff -= (DST_QUEUE_OFF_BASE >> PAGE_SHIFT); 770 } 771 772 return vb2_mmap(vq, vma); 773 } 774 EXPORT_SYMBOL(v4l2_m2m_mmap); 775 776 #if defined(CONFIG_MEDIA_CONTROLLER) 777 void v4l2_m2m_unregister_media_controller(struct v4l2_m2m_dev *m2m_dev) 778 { 779 media_remove_intf_links(&m2m_dev->intf_devnode->intf); 780 media_devnode_remove(m2m_dev->intf_devnode); 781 782 media_entity_remove_links(m2m_dev->source); 783 media_entity_remove_links(&m2m_dev->sink); 784 media_entity_remove_links(&m2m_dev->proc); 785 media_device_unregister_entity(m2m_dev->source); 786 media_device_unregister_entity(&m2m_dev->sink); 787 media_device_unregister_entity(&m2m_dev->proc); 788 kfree(m2m_dev->source->name); 789 kfree(m2m_dev->sink.name); 790 kfree(m2m_dev->proc.name); 791 } 792 EXPORT_SYMBOL_GPL(v4l2_m2m_unregister_media_controller); 793 794 static int v4l2_m2m_register_entity(struct media_device *mdev, 795 struct v4l2_m2m_dev *m2m_dev, enum v4l2_m2m_entity_type type, 796 struct video_device *vdev, int function) 797 { 798 struct media_entity *entity; 799 struct media_pad *pads; 800 char *name; 801 unsigned int len; 802 int num_pads; 803 int ret; 804 805 switch (type) { 806 case MEM2MEM_ENT_TYPE_SOURCE: 807 entity = m2m_dev->source; 808 pads = &m2m_dev->source_pad; 809 pads[0].flags = MEDIA_PAD_FL_SOURCE; 810 num_pads = 1; 811 break; 812 case MEM2MEM_ENT_TYPE_SINK: 813 entity = &m2m_dev->sink; 814 pads = &m2m_dev->sink_pad; 815 pads[0].flags = MEDIA_PAD_FL_SINK; 816 num_pads = 1; 817 break; 818 case MEM2MEM_ENT_TYPE_PROC: 819 entity = &m2m_dev->proc; 820 pads = m2m_dev->proc_pads; 821 pads[0].flags = MEDIA_PAD_FL_SINK; 822 pads[1].flags = MEDIA_PAD_FL_SOURCE; 823 num_pads = 2; 824 break; 825 default: 826 return -EINVAL; 827 } 828 829 entity->obj_type = MEDIA_ENTITY_TYPE_BASE; 830 if (type != MEM2MEM_ENT_TYPE_PROC) { 831 entity->info.dev.major = VIDEO_MAJOR; 832 entity->info.dev.minor = vdev->minor; 833 } 834 len = strlen(vdev->name) + 2 + strlen(m2m_entity_name[type]); 835 name = kmalloc(len, GFP_KERNEL); 836 if (!name) 837 return -ENOMEM; 838 snprintf(name, len, "%s-%s", vdev->name, m2m_entity_name[type]); 839 entity->name = name; 840 entity->function = function; 841 842 ret = media_entity_pads_init(entity, num_pads, pads); 843 if (ret) 844 return ret; 845 ret = media_device_register_entity(mdev, entity); 846 if (ret) 847 return ret; 848 849 return 0; 850 } 851 852 int v4l2_m2m_register_media_controller(struct v4l2_m2m_dev *m2m_dev, 853 struct video_device *vdev, int function) 854 { 855 struct media_device *mdev = vdev->v4l2_dev->mdev; 856 struct media_link *link; 857 int ret; 858 859 if (!mdev) 860 return 0; 861 862 /* A memory-to-memory device consists in two 863 * DMA engine and one video processing entities. 864 * The DMA engine entities are linked to a V4L interface 865 */ 866 867 /* Create the three entities with their pads */ 868 m2m_dev->source = &vdev->entity; 869 ret = v4l2_m2m_register_entity(mdev, m2m_dev, 870 MEM2MEM_ENT_TYPE_SOURCE, vdev, MEDIA_ENT_F_IO_V4L); 871 if (ret) 872 return ret; 873 ret = v4l2_m2m_register_entity(mdev, m2m_dev, 874 MEM2MEM_ENT_TYPE_PROC, vdev, function); 875 if (ret) 876 goto err_rel_entity0; 877 ret = v4l2_m2m_register_entity(mdev, m2m_dev, 878 MEM2MEM_ENT_TYPE_SINK, vdev, MEDIA_ENT_F_IO_V4L); 879 if (ret) 880 goto err_rel_entity1; 881 882 /* Connect the three entities */ 883 ret = media_create_pad_link(m2m_dev->source, 0, &m2m_dev->proc, 1, 884 MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED); 885 if (ret) 886 goto err_rel_entity2; 887 888 ret = media_create_pad_link(&m2m_dev->proc, 0, &m2m_dev->sink, 0, 889 MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED); 890 if (ret) 891 goto err_rm_links0; 892 893 /* Create video interface */ 894 m2m_dev->intf_devnode = media_devnode_create(mdev, 895 MEDIA_INTF_T_V4L_VIDEO, 0, 896 VIDEO_MAJOR, vdev->minor); 897 if (!m2m_dev->intf_devnode) { 898 ret = -ENOMEM; 899 goto err_rm_links1; 900 } 901 902 /* Connect the two DMA engines to the interface */ 903 link = media_create_intf_link(m2m_dev->source, 904 &m2m_dev->intf_devnode->intf, 905 MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED); 906 if (!link) { 907 ret = -ENOMEM; 908 goto err_rm_devnode; 909 } 910 911 link = media_create_intf_link(&m2m_dev->sink, 912 &m2m_dev->intf_devnode->intf, 913 MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED); 914 if (!link) { 915 ret = -ENOMEM; 916 goto err_rm_intf_link; 917 } 918 return 0; 919 920 err_rm_intf_link: 921 media_remove_intf_links(&m2m_dev->intf_devnode->intf); 922 err_rm_devnode: 923 media_devnode_remove(m2m_dev->intf_devnode); 924 err_rm_links1: 925 media_entity_remove_links(&m2m_dev->sink); 926 err_rm_links0: 927 media_entity_remove_links(&m2m_dev->proc); 928 media_entity_remove_links(m2m_dev->source); 929 err_rel_entity2: 930 media_device_unregister_entity(&m2m_dev->proc); 931 kfree(m2m_dev->proc.name); 932 err_rel_entity1: 933 media_device_unregister_entity(&m2m_dev->sink); 934 kfree(m2m_dev->sink.name); 935 err_rel_entity0: 936 media_device_unregister_entity(m2m_dev->source); 937 kfree(m2m_dev->source->name); 938 return ret; 939 return 0; 940 } 941 EXPORT_SYMBOL_GPL(v4l2_m2m_register_media_controller); 942 #endif 943 944 struct v4l2_m2m_dev *v4l2_m2m_init(const struct v4l2_m2m_ops *m2m_ops) 945 { 946 struct v4l2_m2m_dev *m2m_dev; 947 948 if (!m2m_ops || WARN_ON(!m2m_ops->device_run)) 949 return ERR_PTR(-EINVAL); 950 951 m2m_dev = kzalloc(sizeof *m2m_dev, GFP_KERNEL); 952 if (!m2m_dev) 953 return ERR_PTR(-ENOMEM); 954 955 m2m_dev->curr_ctx = NULL; 956 m2m_dev->m2m_ops = m2m_ops; 957 INIT_LIST_HEAD(&m2m_dev->job_queue); 958 spin_lock_init(&m2m_dev->job_spinlock); 959 INIT_WORK(&m2m_dev->job_work, v4l2_m2m_device_run_work); 960 961 return m2m_dev; 962 } 963 EXPORT_SYMBOL_GPL(v4l2_m2m_init); 964 965 void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev) 966 { 967 kfree(m2m_dev); 968 } 969 EXPORT_SYMBOL_GPL(v4l2_m2m_release); 970 971 struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev, 972 void *drv_priv, 973 int (*queue_init)(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq)) 974 { 975 struct v4l2_m2m_ctx *m2m_ctx; 976 struct v4l2_m2m_queue_ctx *out_q_ctx, *cap_q_ctx; 977 int ret; 978 979 m2m_ctx = kzalloc(sizeof *m2m_ctx, GFP_KERNEL); 980 if (!m2m_ctx) 981 return ERR_PTR(-ENOMEM); 982 983 m2m_ctx->priv = drv_priv; 984 m2m_ctx->m2m_dev = m2m_dev; 985 init_waitqueue_head(&m2m_ctx->finished); 986 987 out_q_ctx = &m2m_ctx->out_q_ctx; 988 cap_q_ctx = &m2m_ctx->cap_q_ctx; 989 990 INIT_LIST_HEAD(&out_q_ctx->rdy_queue); 991 INIT_LIST_HEAD(&cap_q_ctx->rdy_queue); 992 spin_lock_init(&out_q_ctx->rdy_spinlock); 993 spin_lock_init(&cap_q_ctx->rdy_spinlock); 994 995 INIT_LIST_HEAD(&m2m_ctx->queue); 996 997 ret = queue_init(drv_priv, &out_q_ctx->q, &cap_q_ctx->q); 998 999 if (ret) 1000 goto err; 1001 /* 1002 * Both queues should use same the mutex to lock the m2m context. 1003 * This lock is used in some v4l2_m2m_* helpers. 1004 */ 1005 if (WARN_ON(out_q_ctx->q.lock != cap_q_ctx->q.lock)) { 1006 ret = -EINVAL; 1007 goto err; 1008 } 1009 m2m_ctx->q_lock = out_q_ctx->q.lock; 1010 1011 return m2m_ctx; 1012 err: 1013 kfree(m2m_ctx); 1014 return ERR_PTR(ret); 1015 } 1016 EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_init); 1017 1018 void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx) 1019 { 1020 /* wait until the current context is dequeued from job_queue */ 1021 v4l2_m2m_cancel_job(m2m_ctx); 1022 1023 vb2_queue_release(&m2m_ctx->cap_q_ctx.q); 1024 vb2_queue_release(&m2m_ctx->out_q_ctx.q); 1025 1026 kfree(m2m_ctx); 1027 } 1028 EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_release); 1029 1030 void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, 1031 struct vb2_v4l2_buffer *vbuf) 1032 { 1033 struct v4l2_m2m_buffer *b = container_of(vbuf, 1034 struct v4l2_m2m_buffer, vb); 1035 struct v4l2_m2m_queue_ctx *q_ctx; 1036 unsigned long flags; 1037 1038 q_ctx = get_queue_ctx(m2m_ctx, vbuf->vb2_buf.vb2_queue->type); 1039 if (!q_ctx) 1040 return; 1041 1042 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); 1043 list_add_tail(&b->list, &q_ctx->rdy_queue); 1044 q_ctx->num_rdy++; 1045 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); 1046 } 1047 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_queue); 1048 1049 void v4l2_m2m_buf_copy_metadata(const struct vb2_v4l2_buffer *out_vb, 1050 struct vb2_v4l2_buffer *cap_vb, 1051 bool copy_frame_flags) 1052 { 1053 u32 mask = V4L2_BUF_FLAG_TIMECODE | V4L2_BUF_FLAG_TSTAMP_SRC_MASK; 1054 1055 if (copy_frame_flags) 1056 mask |= V4L2_BUF_FLAG_KEYFRAME | V4L2_BUF_FLAG_PFRAME | 1057 V4L2_BUF_FLAG_BFRAME; 1058 1059 cap_vb->vb2_buf.timestamp = out_vb->vb2_buf.timestamp; 1060 1061 if (out_vb->flags & V4L2_BUF_FLAG_TIMECODE) 1062 cap_vb->timecode = out_vb->timecode; 1063 cap_vb->field = out_vb->field; 1064 cap_vb->flags &= ~mask; 1065 cap_vb->flags |= out_vb->flags & mask; 1066 cap_vb->vb2_buf.copied_timestamp = 1; 1067 } 1068 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_copy_metadata); 1069 1070 void v4l2_m2m_request_queue(struct media_request *req) 1071 { 1072 struct media_request_object *obj, *obj_safe; 1073 struct v4l2_m2m_ctx *m2m_ctx = NULL; 1074 1075 /* 1076 * Queue all objects. Note that buffer objects are at the end of the 1077 * objects list, after all other object types. Once buffer objects 1078 * are queued, the driver might delete them immediately (if the driver 1079 * processes the buffer at once), so we have to use 1080 * list_for_each_entry_safe() to handle the case where the object we 1081 * queue is deleted. 1082 */ 1083 list_for_each_entry_safe(obj, obj_safe, &req->objects, list) { 1084 struct v4l2_m2m_ctx *m2m_ctx_obj; 1085 struct vb2_buffer *vb; 1086 1087 if (!obj->ops->queue) 1088 continue; 1089 1090 if (vb2_request_object_is_buffer(obj)) { 1091 /* Sanity checks */ 1092 vb = container_of(obj, struct vb2_buffer, req_obj); 1093 WARN_ON(!V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)); 1094 m2m_ctx_obj = container_of(vb->vb2_queue, 1095 struct v4l2_m2m_ctx, 1096 out_q_ctx.q); 1097 WARN_ON(m2m_ctx && m2m_ctx_obj != m2m_ctx); 1098 m2m_ctx = m2m_ctx_obj; 1099 } 1100 1101 /* 1102 * The buffer we queue here can in theory be immediately 1103 * unbound, hence the use of list_for_each_entry_safe() 1104 * above and why we call the queue op last. 1105 */ 1106 obj->ops->queue(obj); 1107 } 1108 1109 WARN_ON(!m2m_ctx); 1110 1111 if (m2m_ctx) 1112 v4l2_m2m_try_schedule(m2m_ctx); 1113 } 1114 EXPORT_SYMBOL_GPL(v4l2_m2m_request_queue); 1115 1116 /* Videobuf2 ioctl helpers */ 1117 1118 int v4l2_m2m_ioctl_reqbufs(struct file *file, void *priv, 1119 struct v4l2_requestbuffers *rb) 1120 { 1121 struct v4l2_fh *fh = file->private_data; 1122 1123 return v4l2_m2m_reqbufs(file, fh->m2m_ctx, rb); 1124 } 1125 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_reqbufs); 1126 1127 int v4l2_m2m_ioctl_create_bufs(struct file *file, void *priv, 1128 struct v4l2_create_buffers *create) 1129 { 1130 struct v4l2_fh *fh = file->private_data; 1131 1132 return v4l2_m2m_create_bufs(file, fh->m2m_ctx, create); 1133 } 1134 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_create_bufs); 1135 1136 int v4l2_m2m_ioctl_querybuf(struct file *file, void *priv, 1137 struct v4l2_buffer *buf) 1138 { 1139 struct v4l2_fh *fh = file->private_data; 1140 1141 return v4l2_m2m_querybuf(file, fh->m2m_ctx, buf); 1142 } 1143 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_querybuf); 1144 1145 int v4l2_m2m_ioctl_qbuf(struct file *file, void *priv, 1146 struct v4l2_buffer *buf) 1147 { 1148 struct v4l2_fh *fh = file->private_data; 1149 1150 return v4l2_m2m_qbuf(file, fh->m2m_ctx, buf); 1151 } 1152 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_qbuf); 1153 1154 int v4l2_m2m_ioctl_dqbuf(struct file *file, void *priv, 1155 struct v4l2_buffer *buf) 1156 { 1157 struct v4l2_fh *fh = file->private_data; 1158 1159 return v4l2_m2m_dqbuf(file, fh->m2m_ctx, buf); 1160 } 1161 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_dqbuf); 1162 1163 int v4l2_m2m_ioctl_prepare_buf(struct file *file, void *priv, 1164 struct v4l2_buffer *buf) 1165 { 1166 struct v4l2_fh *fh = file->private_data; 1167 1168 return v4l2_m2m_prepare_buf(file, fh->m2m_ctx, buf); 1169 } 1170 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_prepare_buf); 1171 1172 int v4l2_m2m_ioctl_expbuf(struct file *file, void *priv, 1173 struct v4l2_exportbuffer *eb) 1174 { 1175 struct v4l2_fh *fh = file->private_data; 1176 1177 return v4l2_m2m_expbuf(file, fh->m2m_ctx, eb); 1178 } 1179 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_expbuf); 1180 1181 int v4l2_m2m_ioctl_streamon(struct file *file, void *priv, 1182 enum v4l2_buf_type type) 1183 { 1184 struct v4l2_fh *fh = file->private_data; 1185 1186 return v4l2_m2m_streamon(file, fh->m2m_ctx, type); 1187 } 1188 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamon); 1189 1190 int v4l2_m2m_ioctl_streamoff(struct file *file, void *priv, 1191 enum v4l2_buf_type type) 1192 { 1193 struct v4l2_fh *fh = file->private_data; 1194 1195 return v4l2_m2m_streamoff(file, fh->m2m_ctx, type); 1196 } 1197 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamoff); 1198 1199 int v4l2_m2m_ioctl_try_encoder_cmd(struct file *file, void *fh, 1200 struct v4l2_encoder_cmd *ec) 1201 { 1202 if (ec->cmd != V4L2_ENC_CMD_STOP && ec->cmd != V4L2_ENC_CMD_START) 1203 return -EINVAL; 1204 1205 ec->flags = 0; 1206 return 0; 1207 } 1208 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_try_encoder_cmd); 1209 1210 int v4l2_m2m_ioctl_try_decoder_cmd(struct file *file, void *fh, 1211 struct v4l2_decoder_cmd *dc) 1212 { 1213 if (dc->cmd != V4L2_DEC_CMD_STOP && dc->cmd != V4L2_DEC_CMD_START) 1214 return -EINVAL; 1215 1216 dc->flags = 0; 1217 1218 if (dc->cmd == V4L2_DEC_CMD_STOP) { 1219 dc->stop.pts = 0; 1220 } else if (dc->cmd == V4L2_DEC_CMD_START) { 1221 dc->start.speed = 0; 1222 dc->start.format = V4L2_DEC_START_FMT_NONE; 1223 } 1224 return 0; 1225 } 1226 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_try_decoder_cmd); 1227 1228 int v4l2_m2m_ioctl_stateless_try_decoder_cmd(struct file *file, void *fh, 1229 struct v4l2_decoder_cmd *dc) 1230 { 1231 if (dc->cmd != V4L2_DEC_CMD_FLUSH) 1232 return -EINVAL; 1233 1234 dc->flags = 0; 1235 1236 return 0; 1237 } 1238 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_stateless_try_decoder_cmd); 1239 1240 int v4l2_m2m_ioctl_stateless_decoder_cmd(struct file *file, void *priv, 1241 struct v4l2_decoder_cmd *dc) 1242 { 1243 struct v4l2_fh *fh = file->private_data; 1244 struct vb2_v4l2_buffer *out_vb, *cap_vb; 1245 struct v4l2_m2m_dev *m2m_dev = fh->m2m_ctx->m2m_dev; 1246 unsigned long flags; 1247 int ret; 1248 1249 ret = v4l2_m2m_ioctl_stateless_try_decoder_cmd(file, priv, dc); 1250 if (ret < 0) 1251 return ret; 1252 1253 spin_lock_irqsave(&m2m_dev->job_spinlock, flags); 1254 out_vb = v4l2_m2m_last_src_buf(fh->m2m_ctx); 1255 cap_vb = v4l2_m2m_last_dst_buf(fh->m2m_ctx); 1256 1257 /* 1258 * If there is an out buffer pending, then clear any HOLD flag. 1259 * 1260 * By clearing this flag we ensure that when this output 1261 * buffer is processed any held capture buffer will be released. 1262 */ 1263 if (out_vb) { 1264 out_vb->flags &= ~V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF; 1265 } else if (cap_vb && cap_vb->is_held) { 1266 /* 1267 * If there were no output buffers, but there is a 1268 * capture buffer that is held, then release that 1269 * buffer. 1270 */ 1271 cap_vb->is_held = false; 1272 v4l2_m2m_dst_buf_remove(fh->m2m_ctx); 1273 v4l2_m2m_buf_done(cap_vb, VB2_BUF_STATE_DONE); 1274 } 1275 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); 1276 1277 return 0; 1278 } 1279 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_stateless_decoder_cmd); 1280 1281 /* 1282 * v4l2_file_operations helpers. It is assumed here same lock is used 1283 * for the output and the capture buffer queue. 1284 */ 1285 1286 int v4l2_m2m_fop_mmap(struct file *file, struct vm_area_struct *vma) 1287 { 1288 struct v4l2_fh *fh = file->private_data; 1289 1290 return v4l2_m2m_mmap(file, fh->m2m_ctx, vma); 1291 } 1292 EXPORT_SYMBOL_GPL(v4l2_m2m_fop_mmap); 1293 1294 __poll_t v4l2_m2m_fop_poll(struct file *file, poll_table *wait) 1295 { 1296 struct v4l2_fh *fh = file->private_data; 1297 struct v4l2_m2m_ctx *m2m_ctx = fh->m2m_ctx; 1298 __poll_t ret; 1299 1300 if (m2m_ctx->q_lock) 1301 mutex_lock(m2m_ctx->q_lock); 1302 1303 ret = v4l2_m2m_poll(file, m2m_ctx, wait); 1304 1305 if (m2m_ctx->q_lock) 1306 mutex_unlock(m2m_ctx->q_lock); 1307 1308 return ret; 1309 } 1310 EXPORT_SYMBOL_GPL(v4l2_m2m_fop_poll); 1311 1312