1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Memory-to-memory device framework for Video for Linux 2 and vb2. 4 * 5 * Helper functions for devices that use vb2 buffers for both their 6 * source and destination. 7 * 8 * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd. 9 * Pawel Osciak, <pawel@osciak.com> 10 * Marek Szyprowski, <m.szyprowski@samsung.com> 11 */ 12 #include <linux/module.h> 13 #include <linux/sched.h> 14 #include <linux/slab.h> 15 16 #include <media/media-device.h> 17 #include <media/videobuf2-v4l2.h> 18 #include <media/v4l2-mem2mem.h> 19 #include <media/v4l2-dev.h> 20 #include <media/v4l2-device.h> 21 #include <media/v4l2-fh.h> 22 #include <media/v4l2-event.h> 23 24 MODULE_DESCRIPTION("Mem to mem device framework for vb2"); 25 MODULE_AUTHOR("Pawel Osciak, <pawel@osciak.com>"); 26 MODULE_LICENSE("GPL"); 27 28 static bool debug; 29 module_param(debug, bool, 0644); 30 31 #define dprintk(fmt, arg...) \ 32 do { \ 33 if (debug) \ 34 printk(KERN_DEBUG "%s: " fmt, __func__, ## arg);\ 35 } while (0) 36 37 38 /* Instance is already queued on the job_queue */ 39 #define TRANS_QUEUED (1 << 0) 40 /* Instance is currently running in hardware */ 41 #define TRANS_RUNNING (1 << 1) 42 /* Instance is currently aborting */ 43 #define TRANS_ABORT (1 << 2) 44 45 46 /* The job queue is not running new jobs */ 47 #define QUEUE_PAUSED (1 << 0) 48 49 50 /* Offset base for buffers on the destination queue - used to distinguish 51 * between source and destination buffers when mmapping - they receive the same 52 * offsets but for different queues */ 53 #define DST_QUEUE_OFF_BASE (1 << 30) 54 55 enum v4l2_m2m_entity_type { 56 MEM2MEM_ENT_TYPE_SOURCE, 57 MEM2MEM_ENT_TYPE_SINK, 58 MEM2MEM_ENT_TYPE_PROC 59 }; 60 61 static const char * const m2m_entity_name[] = { 62 "source", 63 "sink", 64 "proc" 65 }; 66 67 /** 68 * struct v4l2_m2m_dev - per-device context 69 * @source: &struct media_entity pointer with the source entity 70 * Used only when the M2M device is registered via 71 * v4l2_m2m_register_media_controller(). 72 * @source_pad: &struct media_pad with the source pad. 73 * Used only when the M2M device is registered via 74 * v4l2_m2m_register_media_controller(). 75 * @sink: &struct media_entity pointer with the sink entity 76 * Used only when the M2M device is registered via 77 * v4l2_m2m_register_media_controller(). 78 * @sink_pad: &struct media_pad with the sink pad. 79 * Used only when the M2M device is registered via 80 * v4l2_m2m_register_media_controller(). 81 * @proc: &struct media_entity pointer with the M2M device itself. 82 * @proc_pads: &struct media_pad with the @proc pads. 83 * Used only when the M2M device is registered via 84 * v4l2_m2m_unregister_media_controller(). 85 * @intf_devnode: &struct media_intf devnode pointer with the interface 86 * with controls the M2M device. 87 * @curr_ctx: currently running instance 88 * @job_queue: instances queued to run 89 * @job_spinlock: protects job_queue 90 * @job_work: worker to run queued jobs. 91 * @job_queue_flags: flags of the queue status, %QUEUE_PAUSED. 92 * @m2m_ops: driver callbacks 93 * @kref: device reference count 94 */ 95 struct v4l2_m2m_dev { 96 struct v4l2_m2m_ctx *curr_ctx; 97 #ifdef CONFIG_MEDIA_CONTROLLER 98 struct media_entity *source; 99 struct media_pad source_pad; 100 struct media_entity sink; 101 struct media_pad sink_pad; 102 struct media_entity proc; 103 struct media_pad proc_pads[2]; 104 struct media_intf_devnode *intf_devnode; 105 #endif 106 107 struct list_head job_queue; 108 spinlock_t job_spinlock; 109 struct work_struct job_work; 110 unsigned long job_queue_flags; 111 112 const struct v4l2_m2m_ops *m2m_ops; 113 114 struct kref kref; 115 }; 116 117 static struct v4l2_m2m_queue_ctx *get_queue_ctx(struct v4l2_m2m_ctx *m2m_ctx, 118 enum v4l2_buf_type type) 119 { 120 if (V4L2_TYPE_IS_OUTPUT(type)) 121 return &m2m_ctx->out_q_ctx; 122 else 123 return &m2m_ctx->cap_q_ctx; 124 } 125 126 struct vb2_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx, 127 enum v4l2_buf_type type) 128 { 129 return &get_queue_ctx(m2m_ctx, type)->q; 130 } 131 EXPORT_SYMBOL(v4l2_m2m_get_vq); 132 133 struct vb2_v4l2_buffer *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx) 134 { 135 struct v4l2_m2m_buffer *b; 136 unsigned long flags; 137 138 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); 139 140 if (list_empty(&q_ctx->rdy_queue)) { 141 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); 142 return NULL; 143 } 144 145 b = list_first_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list); 146 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); 147 return &b->vb; 148 } 149 EXPORT_SYMBOL_GPL(v4l2_m2m_next_buf); 150 151 struct vb2_v4l2_buffer *v4l2_m2m_last_buf(struct v4l2_m2m_queue_ctx *q_ctx) 152 { 153 struct v4l2_m2m_buffer *b; 154 unsigned long flags; 155 156 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); 157 158 if (list_empty(&q_ctx->rdy_queue)) { 159 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); 160 return NULL; 161 } 162 163 b = list_last_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list); 164 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); 165 return &b->vb; 166 } 167 EXPORT_SYMBOL_GPL(v4l2_m2m_last_buf); 168 169 struct vb2_v4l2_buffer *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx) 170 { 171 struct v4l2_m2m_buffer *b; 172 unsigned long flags; 173 174 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); 175 if (list_empty(&q_ctx->rdy_queue)) { 176 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); 177 return NULL; 178 } 179 b = list_first_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list); 180 list_del(&b->list); 181 q_ctx->num_rdy--; 182 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); 183 184 return &b->vb; 185 } 186 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove); 187 188 void v4l2_m2m_buf_remove_by_buf(struct v4l2_m2m_queue_ctx *q_ctx, 189 struct vb2_v4l2_buffer *vbuf) 190 { 191 struct v4l2_m2m_buffer *b; 192 unsigned long flags; 193 194 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); 195 b = container_of(vbuf, struct v4l2_m2m_buffer, vb); 196 list_del(&b->list); 197 q_ctx->num_rdy--; 198 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); 199 } 200 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove_by_buf); 201 202 struct vb2_v4l2_buffer * 203 v4l2_m2m_buf_remove_by_idx(struct v4l2_m2m_queue_ctx *q_ctx, unsigned int idx) 204 205 { 206 struct v4l2_m2m_buffer *b, *tmp; 207 struct vb2_v4l2_buffer *ret = NULL; 208 unsigned long flags; 209 210 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); 211 list_for_each_entry_safe(b, tmp, &q_ctx->rdy_queue, list) { 212 if (b->vb.vb2_buf.index == idx) { 213 list_del(&b->list); 214 q_ctx->num_rdy--; 215 ret = &b->vb; 216 break; 217 } 218 } 219 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); 220 221 return ret; 222 } 223 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove_by_idx); 224 225 /* 226 * Scheduling handlers 227 */ 228 229 void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev *m2m_dev) 230 { 231 unsigned long flags; 232 void *ret = NULL; 233 234 spin_lock_irqsave(&m2m_dev->job_spinlock, flags); 235 if (m2m_dev->curr_ctx) 236 ret = m2m_dev->curr_ctx->priv; 237 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); 238 239 return ret; 240 } 241 EXPORT_SYMBOL(v4l2_m2m_get_curr_priv); 242 243 /** 244 * v4l2_m2m_try_run() - select next job to perform and run it if possible 245 * @m2m_dev: per-device context 246 * 247 * Get next transaction (if present) from the waiting jobs list and run it. 248 * 249 * Note that this function can run on a given v4l2_m2m_ctx context, 250 * but call .device_run for another context. 251 */ 252 static void v4l2_m2m_try_run(struct v4l2_m2m_dev *m2m_dev) 253 { 254 unsigned long flags; 255 256 spin_lock_irqsave(&m2m_dev->job_spinlock, flags); 257 if (NULL != m2m_dev->curr_ctx) { 258 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); 259 dprintk("Another instance is running, won't run now\n"); 260 return; 261 } 262 263 if (list_empty(&m2m_dev->job_queue)) { 264 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); 265 dprintk("No job pending\n"); 266 return; 267 } 268 269 if (m2m_dev->job_queue_flags & QUEUE_PAUSED) { 270 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); 271 dprintk("Running new jobs is paused\n"); 272 return; 273 } 274 275 m2m_dev->curr_ctx = list_first_entry(&m2m_dev->job_queue, 276 struct v4l2_m2m_ctx, queue); 277 m2m_dev->curr_ctx->job_flags |= TRANS_RUNNING; 278 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); 279 280 dprintk("Running job on m2m_ctx: %p\n", m2m_dev->curr_ctx); 281 m2m_dev->m2m_ops->device_run(m2m_dev->curr_ctx->priv); 282 } 283 284 /* 285 * __v4l2_m2m_try_queue() - queue a job 286 * @m2m_dev: m2m device 287 * @m2m_ctx: m2m context 288 * 289 * Check if this context is ready to queue a job. 290 * 291 * This function can run in interrupt context. 292 */ 293 static void __v4l2_m2m_try_queue(struct v4l2_m2m_dev *m2m_dev, 294 struct v4l2_m2m_ctx *m2m_ctx) 295 { 296 unsigned long flags_job; 297 struct vb2_v4l2_buffer *dst, *src; 298 299 dprintk("Trying to schedule a job for m2m_ctx: %p\n", m2m_ctx); 300 301 if (!m2m_ctx->out_q_ctx.q.streaming || 302 (!m2m_ctx->cap_q_ctx.q.streaming && !m2m_ctx->ignore_cap_streaming)) { 303 if (!m2m_ctx->ignore_cap_streaming) 304 dprintk("Streaming needs to be on for both queues\n"); 305 else 306 dprintk("Streaming needs to be on for the OUTPUT queue\n"); 307 return; 308 } 309 310 spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job); 311 312 /* If the context is aborted then don't schedule it */ 313 if (m2m_ctx->job_flags & TRANS_ABORT) { 314 dprintk("Aborted context\n"); 315 goto job_unlock; 316 } 317 318 if (m2m_ctx->job_flags & TRANS_QUEUED) { 319 dprintk("On job queue already\n"); 320 goto job_unlock; 321 } 322 323 src = v4l2_m2m_next_src_buf(m2m_ctx); 324 dst = v4l2_m2m_next_dst_buf(m2m_ctx); 325 if (!src && !m2m_ctx->out_q_ctx.buffered) { 326 dprintk("No input buffers available\n"); 327 goto job_unlock; 328 } 329 if (!dst && !m2m_ctx->cap_q_ctx.buffered) { 330 dprintk("No output buffers available\n"); 331 goto job_unlock; 332 } 333 334 m2m_ctx->new_frame = true; 335 336 if (src && dst && dst->is_held && 337 dst->vb2_buf.copied_timestamp && 338 dst->vb2_buf.timestamp != src->vb2_buf.timestamp) { 339 dprintk("Timestamp mismatch, returning held capture buffer\n"); 340 dst->is_held = false; 341 v4l2_m2m_dst_buf_remove(m2m_ctx); 342 v4l2_m2m_buf_done(dst, VB2_BUF_STATE_DONE); 343 dst = v4l2_m2m_next_dst_buf(m2m_ctx); 344 345 if (!dst && !m2m_ctx->cap_q_ctx.buffered) { 346 dprintk("No output buffers available after returning held buffer\n"); 347 goto job_unlock; 348 } 349 } 350 351 if (src && dst && (m2m_ctx->out_q_ctx.q.subsystem_flags & 352 VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF)) 353 m2m_ctx->new_frame = !dst->vb2_buf.copied_timestamp || 354 dst->vb2_buf.timestamp != src->vb2_buf.timestamp; 355 356 if (m2m_ctx->has_stopped) { 357 dprintk("Device has stopped\n"); 358 goto job_unlock; 359 } 360 361 if (m2m_dev->m2m_ops->job_ready 362 && (!m2m_dev->m2m_ops->job_ready(m2m_ctx->priv))) { 363 dprintk("Driver not ready\n"); 364 goto job_unlock; 365 } 366 367 list_add_tail(&m2m_ctx->queue, &m2m_dev->job_queue); 368 m2m_ctx->job_flags |= TRANS_QUEUED; 369 370 job_unlock: 371 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); 372 } 373 374 /** 375 * v4l2_m2m_try_schedule() - schedule and possibly run a job for any context 376 * @m2m_ctx: m2m context 377 * 378 * Check if this context is ready to queue a job. If suitable, 379 * run the next queued job on the mem2mem device. 380 * 381 * This function shouldn't run in interrupt context. 382 * 383 * Note that v4l2_m2m_try_schedule() can schedule one job for this context, 384 * and then run another job for another context. 385 */ 386 void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx) 387 { 388 struct v4l2_m2m_dev *m2m_dev = m2m_ctx->m2m_dev; 389 390 __v4l2_m2m_try_queue(m2m_dev, m2m_ctx); 391 v4l2_m2m_try_run(m2m_dev); 392 } 393 EXPORT_SYMBOL_GPL(v4l2_m2m_try_schedule); 394 395 /** 396 * v4l2_m2m_device_run_work() - run pending jobs for the context 397 * @work: Work structure used for scheduling the execution of this function. 398 */ 399 static void v4l2_m2m_device_run_work(struct work_struct *work) 400 { 401 struct v4l2_m2m_dev *m2m_dev = 402 container_of(work, struct v4l2_m2m_dev, job_work); 403 404 v4l2_m2m_try_run(m2m_dev); 405 } 406 407 /** 408 * v4l2_m2m_cancel_job() - cancel pending jobs for the context 409 * @m2m_ctx: m2m context with jobs to be canceled 410 * 411 * In case of streamoff or release called on any context, 412 * 1] If the context is currently running, then abort job will be called 413 * 2] If the context is queued, then the context will be removed from 414 * the job_queue 415 */ 416 static void v4l2_m2m_cancel_job(struct v4l2_m2m_ctx *m2m_ctx) 417 { 418 struct v4l2_m2m_dev *m2m_dev; 419 unsigned long flags; 420 421 m2m_dev = m2m_ctx->m2m_dev; 422 spin_lock_irqsave(&m2m_dev->job_spinlock, flags); 423 424 m2m_ctx->job_flags |= TRANS_ABORT; 425 if (m2m_ctx->job_flags & TRANS_RUNNING) { 426 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); 427 if (m2m_dev->m2m_ops->job_abort) 428 m2m_dev->m2m_ops->job_abort(m2m_ctx->priv); 429 dprintk("m2m_ctx %p running, will wait to complete\n", m2m_ctx); 430 wait_event(m2m_ctx->finished, 431 !(m2m_ctx->job_flags & TRANS_RUNNING)); 432 } else if (m2m_ctx->job_flags & TRANS_QUEUED) { 433 list_del(&m2m_ctx->queue); 434 m2m_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING); 435 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); 436 dprintk("m2m_ctx: %p had been on queue and was removed\n", 437 m2m_ctx); 438 } else { 439 /* Do nothing, was not on queue/running */ 440 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); 441 } 442 } 443 444 /* 445 * Schedule the next job, called from v4l2_m2m_job_finish() or 446 * v4l2_m2m_buf_done_and_job_finish(). 447 */ 448 static void v4l2_m2m_schedule_next_job(struct v4l2_m2m_dev *m2m_dev, 449 struct v4l2_m2m_ctx *m2m_ctx) 450 { 451 /* 452 * This instance might have more buffers ready, but since we do not 453 * allow more than one job on the job_queue per instance, each has 454 * to be scheduled separately after the previous one finishes. 455 */ 456 __v4l2_m2m_try_queue(m2m_dev, m2m_ctx); 457 458 /* 459 * We might be running in atomic context, 460 * but the job must be run in non-atomic context. 461 */ 462 schedule_work(&m2m_dev->job_work); 463 } 464 465 /* 466 * Assumes job_spinlock is held, called from v4l2_m2m_job_finish() or 467 * v4l2_m2m_buf_done_and_job_finish(). 468 */ 469 static bool _v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev, 470 struct v4l2_m2m_ctx *m2m_ctx) 471 { 472 if (!m2m_dev->curr_ctx || m2m_dev->curr_ctx != m2m_ctx) { 473 dprintk("Called by an instance not currently running\n"); 474 return false; 475 } 476 477 list_del(&m2m_dev->curr_ctx->queue); 478 m2m_dev->curr_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING); 479 wake_up(&m2m_dev->curr_ctx->finished); 480 m2m_dev->curr_ctx = NULL; 481 return true; 482 } 483 484 void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev, 485 struct v4l2_m2m_ctx *m2m_ctx) 486 { 487 unsigned long flags; 488 bool schedule_next; 489 490 /* 491 * This function should not be used for drivers that support 492 * holding capture buffers. Those should use 493 * v4l2_m2m_buf_done_and_job_finish() instead. 494 */ 495 WARN_ON(m2m_ctx->out_q_ctx.q.subsystem_flags & 496 VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF); 497 spin_lock_irqsave(&m2m_dev->job_spinlock, flags); 498 schedule_next = _v4l2_m2m_job_finish(m2m_dev, m2m_ctx); 499 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); 500 501 if (schedule_next) 502 v4l2_m2m_schedule_next_job(m2m_dev, m2m_ctx); 503 } 504 EXPORT_SYMBOL(v4l2_m2m_job_finish); 505 506 void v4l2_m2m_buf_done_and_job_finish(struct v4l2_m2m_dev *m2m_dev, 507 struct v4l2_m2m_ctx *m2m_ctx, 508 enum vb2_buffer_state state) 509 { 510 struct vb2_v4l2_buffer *src_buf, *dst_buf; 511 bool schedule_next = false; 512 unsigned long flags; 513 514 spin_lock_irqsave(&m2m_dev->job_spinlock, flags); 515 src_buf = v4l2_m2m_src_buf_remove(m2m_ctx); 516 dst_buf = v4l2_m2m_next_dst_buf(m2m_ctx); 517 518 if (WARN_ON(!src_buf || !dst_buf)) 519 goto unlock; 520 dst_buf->is_held = src_buf->flags & V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF; 521 if (!dst_buf->is_held) { 522 v4l2_m2m_dst_buf_remove(m2m_ctx); 523 v4l2_m2m_buf_done(dst_buf, state); 524 } 525 /* 526 * If the request API is being used, returning the OUTPUT 527 * (src) buffer will wake-up any process waiting on the 528 * request file descriptor. 529 * 530 * Therefore, return the CAPTURE (dst) buffer first, 531 * to avoid signalling the request file descriptor 532 * before the CAPTURE buffer is done. 533 */ 534 v4l2_m2m_buf_done(src_buf, state); 535 schedule_next = _v4l2_m2m_job_finish(m2m_dev, m2m_ctx); 536 unlock: 537 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); 538 539 if (schedule_next) 540 v4l2_m2m_schedule_next_job(m2m_dev, m2m_ctx); 541 } 542 EXPORT_SYMBOL(v4l2_m2m_buf_done_and_job_finish); 543 544 void v4l2_m2m_suspend(struct v4l2_m2m_dev *m2m_dev) 545 { 546 unsigned long flags; 547 struct v4l2_m2m_ctx *curr_ctx; 548 549 spin_lock_irqsave(&m2m_dev->job_spinlock, flags); 550 m2m_dev->job_queue_flags |= QUEUE_PAUSED; 551 curr_ctx = m2m_dev->curr_ctx; 552 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); 553 554 if (curr_ctx) 555 wait_event(curr_ctx->finished, 556 !(curr_ctx->job_flags & TRANS_RUNNING)); 557 } 558 EXPORT_SYMBOL(v4l2_m2m_suspend); 559 560 void v4l2_m2m_resume(struct v4l2_m2m_dev *m2m_dev) 561 { 562 unsigned long flags; 563 564 spin_lock_irqsave(&m2m_dev->job_spinlock, flags); 565 m2m_dev->job_queue_flags &= ~QUEUE_PAUSED; 566 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); 567 568 v4l2_m2m_try_run(m2m_dev); 569 } 570 EXPORT_SYMBOL(v4l2_m2m_resume); 571 572 int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 573 struct v4l2_requestbuffers *reqbufs) 574 { 575 struct vb2_queue *vq; 576 int ret; 577 578 vq = v4l2_m2m_get_vq(m2m_ctx, reqbufs->type); 579 ret = vb2_reqbufs(vq, reqbufs); 580 /* If count == 0, then the owner has released all buffers and he 581 is no longer owner of the queue. Otherwise we have an owner. */ 582 if (ret == 0) 583 vq->owner = reqbufs->count ? file->private_data : NULL; 584 585 return ret; 586 } 587 EXPORT_SYMBOL_GPL(v4l2_m2m_reqbufs); 588 589 static void v4l2_m2m_adjust_mem_offset(struct vb2_queue *vq, 590 struct v4l2_buffer *buf) 591 { 592 /* Adjust MMAP memory offsets for the CAPTURE queue */ 593 if (buf->memory == V4L2_MEMORY_MMAP && V4L2_TYPE_IS_CAPTURE(vq->type)) { 594 if (V4L2_TYPE_IS_MULTIPLANAR(vq->type)) { 595 unsigned int i; 596 597 for (i = 0; i < buf->length; ++i) 598 buf->m.planes[i].m.mem_offset 599 += DST_QUEUE_OFF_BASE; 600 } else { 601 buf->m.offset += DST_QUEUE_OFF_BASE; 602 } 603 } 604 } 605 606 int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 607 struct v4l2_buffer *buf) 608 { 609 struct vb2_queue *vq; 610 int ret; 611 612 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); 613 ret = vb2_querybuf(vq, buf); 614 if (ret) 615 return ret; 616 617 /* Adjust MMAP memory offsets for the CAPTURE queue */ 618 v4l2_m2m_adjust_mem_offset(vq, buf); 619 620 return 0; 621 } 622 EXPORT_SYMBOL_GPL(v4l2_m2m_querybuf); 623 624 /* 625 * This will add the LAST flag and mark the buffer management 626 * state as stopped. 627 * This is called when the last capture buffer must be flagged as LAST 628 * in draining mode from the encoder/decoder driver buf_queue() callback 629 * or from v4l2_update_last_buf_state() when a capture buffer is available. 630 */ 631 void v4l2_m2m_last_buffer_done(struct v4l2_m2m_ctx *m2m_ctx, 632 struct vb2_v4l2_buffer *vbuf) 633 { 634 vbuf->flags |= V4L2_BUF_FLAG_LAST; 635 vb2_buffer_done(&vbuf->vb2_buf, VB2_BUF_STATE_DONE); 636 637 v4l2_m2m_mark_stopped(m2m_ctx); 638 } 639 EXPORT_SYMBOL_GPL(v4l2_m2m_last_buffer_done); 640 641 /* When stop command is issued, update buffer management state */ 642 static int v4l2_update_last_buf_state(struct v4l2_m2m_ctx *m2m_ctx) 643 { 644 struct vb2_v4l2_buffer *next_dst_buf; 645 646 if (m2m_ctx->is_draining) 647 return -EBUSY; 648 649 if (m2m_ctx->has_stopped) 650 return 0; 651 652 m2m_ctx->last_src_buf = v4l2_m2m_last_src_buf(m2m_ctx); 653 m2m_ctx->is_draining = true; 654 655 /* 656 * The processing of the last output buffer queued before 657 * the STOP command is expected to mark the buffer management 658 * state as stopped with v4l2_m2m_mark_stopped(). 659 */ 660 if (m2m_ctx->last_src_buf) 661 return 0; 662 663 /* 664 * In case the output queue is empty, try to mark the last capture 665 * buffer as LAST. 666 */ 667 next_dst_buf = v4l2_m2m_dst_buf_remove(m2m_ctx); 668 if (!next_dst_buf) { 669 /* 670 * Wait for the next queued one in encoder/decoder driver 671 * buf_queue() callback using the v4l2_m2m_dst_buf_is_last() 672 * helper or in v4l2_m2m_qbuf() if encoder/decoder is not yet 673 * streaming. 674 */ 675 m2m_ctx->next_buf_last = true; 676 return 0; 677 } 678 679 v4l2_m2m_last_buffer_done(m2m_ctx, next_dst_buf); 680 681 return 0; 682 } 683 684 /* 685 * Updates the encoding/decoding buffer management state, should 686 * be called from encoder/decoder drivers start_streaming() 687 */ 688 void v4l2_m2m_update_start_streaming_state(struct v4l2_m2m_ctx *m2m_ctx, 689 struct vb2_queue *q) 690 { 691 /* If start streaming again, untag the last output buffer */ 692 if (V4L2_TYPE_IS_OUTPUT(q->type)) 693 m2m_ctx->last_src_buf = NULL; 694 } 695 EXPORT_SYMBOL_GPL(v4l2_m2m_update_start_streaming_state); 696 697 /* 698 * Updates the encoding/decoding buffer management state, should 699 * be called from encoder/decoder driver stop_streaming() 700 */ 701 void v4l2_m2m_update_stop_streaming_state(struct v4l2_m2m_ctx *m2m_ctx, 702 struct vb2_queue *q) 703 { 704 if (V4L2_TYPE_IS_OUTPUT(q->type)) { 705 /* 706 * If in draining state, either mark next dst buffer as 707 * done or flag next one to be marked as done either 708 * in encoder/decoder driver buf_queue() callback using 709 * the v4l2_m2m_dst_buf_is_last() helper or in v4l2_m2m_qbuf() 710 * if encoder/decoder is not yet streaming 711 */ 712 if (m2m_ctx->is_draining) { 713 struct vb2_v4l2_buffer *next_dst_buf; 714 715 m2m_ctx->last_src_buf = NULL; 716 next_dst_buf = v4l2_m2m_dst_buf_remove(m2m_ctx); 717 if (!next_dst_buf) 718 m2m_ctx->next_buf_last = true; 719 else 720 v4l2_m2m_last_buffer_done(m2m_ctx, 721 next_dst_buf); 722 } 723 } else { 724 v4l2_m2m_clear_state(m2m_ctx); 725 } 726 } 727 EXPORT_SYMBOL_GPL(v4l2_m2m_update_stop_streaming_state); 728 729 static void v4l2_m2m_force_last_buf_done(struct v4l2_m2m_ctx *m2m_ctx, 730 struct vb2_queue *q) 731 { 732 struct vb2_buffer *vb; 733 struct vb2_v4l2_buffer *vbuf; 734 unsigned int i; 735 736 if (WARN_ON(q->is_output)) 737 return; 738 if (list_empty(&q->queued_list)) 739 return; 740 741 vb = list_first_entry(&q->queued_list, struct vb2_buffer, queued_entry); 742 for (i = 0; i < vb->num_planes; i++) 743 vb2_set_plane_payload(vb, i, 0); 744 745 /* 746 * Since the buffer hasn't been queued to the ready queue, 747 * mark is active and owned before marking it LAST and DONE 748 */ 749 vb->state = VB2_BUF_STATE_ACTIVE; 750 atomic_inc(&q->owned_by_drv_count); 751 752 vbuf = to_vb2_v4l2_buffer(vb); 753 vbuf->field = V4L2_FIELD_NONE; 754 755 v4l2_m2m_last_buffer_done(m2m_ctx, vbuf); 756 } 757 758 int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 759 struct v4l2_buffer *buf) 760 { 761 struct video_device *vdev = video_devdata(file); 762 struct vb2_queue *vq; 763 int ret; 764 765 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); 766 if (V4L2_TYPE_IS_CAPTURE(vq->type) && 767 (buf->flags & V4L2_BUF_FLAG_REQUEST_FD)) { 768 dprintk("%s: requests cannot be used with capture buffers\n", 769 __func__); 770 return -EPERM; 771 } 772 773 ret = vb2_qbuf(vq, vdev->v4l2_dev->mdev, buf); 774 if (ret) 775 return ret; 776 777 /* Adjust MMAP memory offsets for the CAPTURE queue */ 778 v4l2_m2m_adjust_mem_offset(vq, buf); 779 780 /* 781 * If the capture queue is streaming, but streaming hasn't started 782 * on the device, but was asked to stop, mark the previously queued 783 * buffer as DONE with LAST flag since it won't be queued on the 784 * device. 785 */ 786 if (V4L2_TYPE_IS_CAPTURE(vq->type) && 787 vb2_is_streaming(vq) && !vb2_start_streaming_called(vq) && 788 (v4l2_m2m_has_stopped(m2m_ctx) || v4l2_m2m_dst_buf_is_last(m2m_ctx))) 789 v4l2_m2m_force_last_buf_done(m2m_ctx, vq); 790 else if (!(buf->flags & V4L2_BUF_FLAG_IN_REQUEST)) 791 v4l2_m2m_try_schedule(m2m_ctx); 792 793 return 0; 794 } 795 EXPORT_SYMBOL_GPL(v4l2_m2m_qbuf); 796 797 int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 798 struct v4l2_buffer *buf) 799 { 800 struct vb2_queue *vq; 801 int ret; 802 803 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); 804 ret = vb2_dqbuf(vq, buf, file->f_flags & O_NONBLOCK); 805 if (ret) 806 return ret; 807 808 /* Adjust MMAP memory offsets for the CAPTURE queue */ 809 v4l2_m2m_adjust_mem_offset(vq, buf); 810 811 return 0; 812 } 813 EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf); 814 815 int v4l2_m2m_prepare_buf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 816 struct v4l2_buffer *buf) 817 { 818 struct video_device *vdev = video_devdata(file); 819 struct vb2_queue *vq; 820 int ret; 821 822 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); 823 ret = vb2_prepare_buf(vq, vdev->v4l2_dev->mdev, buf); 824 if (ret) 825 return ret; 826 827 /* Adjust MMAP memory offsets for the CAPTURE queue */ 828 v4l2_m2m_adjust_mem_offset(vq, buf); 829 830 return 0; 831 } 832 EXPORT_SYMBOL_GPL(v4l2_m2m_prepare_buf); 833 834 int v4l2_m2m_create_bufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 835 struct v4l2_create_buffers *create) 836 { 837 struct vb2_queue *vq; 838 839 vq = v4l2_m2m_get_vq(m2m_ctx, create->format.type); 840 return vb2_create_bufs(vq, create); 841 } 842 EXPORT_SYMBOL_GPL(v4l2_m2m_create_bufs); 843 844 int v4l2_m2m_expbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 845 struct v4l2_exportbuffer *eb) 846 { 847 struct vb2_queue *vq; 848 849 vq = v4l2_m2m_get_vq(m2m_ctx, eb->type); 850 return vb2_expbuf(vq, eb); 851 } 852 EXPORT_SYMBOL_GPL(v4l2_m2m_expbuf); 853 854 int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 855 enum v4l2_buf_type type) 856 { 857 struct vb2_queue *vq; 858 int ret; 859 860 vq = v4l2_m2m_get_vq(m2m_ctx, type); 861 ret = vb2_streamon(vq, type); 862 if (!ret) 863 v4l2_m2m_try_schedule(m2m_ctx); 864 865 return ret; 866 } 867 EXPORT_SYMBOL_GPL(v4l2_m2m_streamon); 868 869 int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 870 enum v4l2_buf_type type) 871 { 872 struct v4l2_m2m_dev *m2m_dev; 873 struct v4l2_m2m_queue_ctx *q_ctx; 874 unsigned long flags_job, flags; 875 int ret; 876 877 /* wait until the current context is dequeued from job_queue */ 878 v4l2_m2m_cancel_job(m2m_ctx); 879 880 q_ctx = get_queue_ctx(m2m_ctx, type); 881 ret = vb2_streamoff(&q_ctx->q, type); 882 if (ret) 883 return ret; 884 885 m2m_dev = m2m_ctx->m2m_dev; 886 spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job); 887 /* We should not be scheduled anymore, since we're dropping a queue. */ 888 if (m2m_ctx->job_flags & TRANS_QUEUED) 889 list_del(&m2m_ctx->queue); 890 m2m_ctx->job_flags = 0; 891 892 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); 893 /* Drop queue, since streamoff returns device to the same state as after 894 * calling reqbufs. */ 895 INIT_LIST_HEAD(&q_ctx->rdy_queue); 896 q_ctx->num_rdy = 0; 897 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); 898 899 if (m2m_dev->curr_ctx == m2m_ctx) { 900 m2m_dev->curr_ctx = NULL; 901 wake_up(&m2m_ctx->finished); 902 } 903 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); 904 905 return 0; 906 } 907 EXPORT_SYMBOL_GPL(v4l2_m2m_streamoff); 908 909 static __poll_t v4l2_m2m_poll_for_data(struct file *file, 910 struct v4l2_m2m_ctx *m2m_ctx, 911 struct poll_table_struct *wait) 912 { 913 struct vb2_queue *src_q, *dst_q; 914 __poll_t rc = 0; 915 unsigned long flags; 916 917 src_q = v4l2_m2m_get_src_vq(m2m_ctx); 918 dst_q = v4l2_m2m_get_dst_vq(m2m_ctx); 919 920 /* 921 * There has to be at least one buffer queued on each queued_list, which 922 * means either in driver already or waiting for driver to claim it 923 * and start processing. 924 */ 925 if ((!vb2_is_streaming(src_q) || src_q->error || 926 list_empty(&src_q->queued_list)) && 927 (!vb2_is_streaming(dst_q) || dst_q->error || 928 (list_empty(&dst_q->queued_list) && !dst_q->last_buffer_dequeued))) 929 return EPOLLERR; 930 931 spin_lock_irqsave(&src_q->done_lock, flags); 932 if (!list_empty(&src_q->done_list)) 933 rc |= EPOLLOUT | EPOLLWRNORM; 934 spin_unlock_irqrestore(&src_q->done_lock, flags); 935 936 spin_lock_irqsave(&dst_q->done_lock, flags); 937 /* 938 * If the last buffer was dequeued from the capture queue, signal 939 * userspace. DQBUF(CAPTURE) will return -EPIPE. 940 */ 941 if (!list_empty(&dst_q->done_list) || dst_q->last_buffer_dequeued) 942 rc |= EPOLLIN | EPOLLRDNORM; 943 spin_unlock_irqrestore(&dst_q->done_lock, flags); 944 945 return rc; 946 } 947 948 __poll_t v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 949 struct poll_table_struct *wait) 950 { 951 struct v4l2_fh *fh = file_to_v4l2_fh(file); 952 struct vb2_queue *src_q = v4l2_m2m_get_src_vq(m2m_ctx); 953 struct vb2_queue *dst_q = v4l2_m2m_get_dst_vq(m2m_ctx); 954 __poll_t req_events = poll_requested_events(wait); 955 __poll_t rc = 0; 956 957 /* 958 * poll_wait() MUST be called on the first invocation on all the 959 * potential queues of interest, even if we are not interested in their 960 * events during this first call. Failure to do so will result in 961 * queue's events to be ignored because the poll_table won't be capable 962 * of adding new wait queues thereafter. 963 */ 964 poll_wait(file, &src_q->done_wq, wait); 965 poll_wait(file, &dst_q->done_wq, wait); 966 967 if (req_events & (EPOLLOUT | EPOLLWRNORM | EPOLLIN | EPOLLRDNORM)) 968 rc = v4l2_m2m_poll_for_data(file, m2m_ctx, wait); 969 970 poll_wait(file, &fh->wait, wait); 971 if (v4l2_event_pending(fh)) 972 rc |= EPOLLPRI; 973 974 return rc; 975 } 976 EXPORT_SYMBOL_GPL(v4l2_m2m_poll); 977 978 int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 979 struct vm_area_struct *vma) 980 { 981 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; 982 struct vb2_queue *vq; 983 984 if (offset < DST_QUEUE_OFF_BASE) { 985 vq = v4l2_m2m_get_src_vq(m2m_ctx); 986 } else { 987 vq = v4l2_m2m_get_dst_vq(m2m_ctx); 988 vma->vm_pgoff -= (DST_QUEUE_OFF_BASE >> PAGE_SHIFT); 989 } 990 991 return vb2_mmap(vq, vma); 992 } 993 EXPORT_SYMBOL(v4l2_m2m_mmap); 994 995 #ifndef CONFIG_MMU 996 unsigned long v4l2_m2m_get_unmapped_area(struct file *file, unsigned long addr, 997 unsigned long len, unsigned long pgoff, 998 unsigned long flags) 999 { 1000 struct v4l2_fh *fh = file_to_v4l2_fh(file); 1001 unsigned long offset = pgoff << PAGE_SHIFT; 1002 struct vb2_queue *vq; 1003 1004 if (offset < DST_QUEUE_OFF_BASE) { 1005 vq = v4l2_m2m_get_src_vq(fh->m2m_ctx); 1006 } else { 1007 vq = v4l2_m2m_get_dst_vq(fh->m2m_ctx); 1008 pgoff -= (DST_QUEUE_OFF_BASE >> PAGE_SHIFT); 1009 } 1010 1011 return vb2_get_unmapped_area(vq, addr, len, pgoff, flags); 1012 } 1013 EXPORT_SYMBOL_GPL(v4l2_m2m_get_unmapped_area); 1014 #endif 1015 1016 #if defined(CONFIG_MEDIA_CONTROLLER) 1017 void v4l2_m2m_unregister_media_controller(struct v4l2_m2m_dev *m2m_dev) 1018 { 1019 media_remove_intf_links(&m2m_dev->intf_devnode->intf); 1020 media_devnode_remove(m2m_dev->intf_devnode); 1021 1022 media_entity_remove_links(m2m_dev->source); 1023 media_entity_remove_links(&m2m_dev->sink); 1024 media_entity_remove_links(&m2m_dev->proc); 1025 media_device_unregister_entity(m2m_dev->source); 1026 media_device_unregister_entity(&m2m_dev->sink); 1027 media_device_unregister_entity(&m2m_dev->proc); 1028 kfree(m2m_dev->source->name); 1029 kfree(m2m_dev->sink.name); 1030 kfree(m2m_dev->proc.name); 1031 } 1032 EXPORT_SYMBOL_GPL(v4l2_m2m_unregister_media_controller); 1033 1034 static int v4l2_m2m_register_entity(struct media_device *mdev, 1035 struct v4l2_m2m_dev *m2m_dev, enum v4l2_m2m_entity_type type, 1036 struct video_device *vdev, int function) 1037 { 1038 struct media_entity *entity; 1039 struct media_pad *pads; 1040 int num_pads; 1041 int ret; 1042 1043 switch (type) { 1044 case MEM2MEM_ENT_TYPE_SOURCE: 1045 entity = m2m_dev->source; 1046 pads = &m2m_dev->source_pad; 1047 pads[0].flags = MEDIA_PAD_FL_SOURCE; 1048 num_pads = 1; 1049 break; 1050 case MEM2MEM_ENT_TYPE_SINK: 1051 entity = &m2m_dev->sink; 1052 pads = &m2m_dev->sink_pad; 1053 pads[0].flags = MEDIA_PAD_FL_SINK; 1054 num_pads = 1; 1055 break; 1056 case MEM2MEM_ENT_TYPE_PROC: 1057 entity = &m2m_dev->proc; 1058 pads = m2m_dev->proc_pads; 1059 pads[0].flags = MEDIA_PAD_FL_SINK; 1060 pads[1].flags = MEDIA_PAD_FL_SOURCE; 1061 num_pads = 2; 1062 break; 1063 default: 1064 return -EINVAL; 1065 } 1066 1067 entity->obj_type = MEDIA_ENTITY_TYPE_BASE; 1068 if (type != MEM2MEM_ENT_TYPE_PROC) { 1069 entity->info.dev.major = VIDEO_MAJOR; 1070 entity->info.dev.minor = vdev->minor; 1071 } 1072 entity->name = kasprintf(GFP_KERNEL, "%s-%s", vdev->name, 1073 m2m_entity_name[type]); 1074 if (!entity->name) 1075 return -ENOMEM; 1076 entity->function = function; 1077 1078 ret = media_entity_pads_init(entity, num_pads, pads); 1079 if (ret) { 1080 kfree(entity->name); 1081 entity->name = NULL; 1082 return ret; 1083 } 1084 ret = media_device_register_entity(mdev, entity); 1085 if (ret) { 1086 kfree(entity->name); 1087 entity->name = NULL; 1088 return ret; 1089 } 1090 1091 return 0; 1092 } 1093 1094 int v4l2_m2m_register_media_controller(struct v4l2_m2m_dev *m2m_dev, 1095 struct video_device *vdev, int function) 1096 { 1097 struct media_device *mdev = vdev->v4l2_dev->mdev; 1098 struct media_link *link; 1099 int ret; 1100 1101 if (!mdev) 1102 return 0; 1103 1104 /* A memory-to-memory device consists in two 1105 * DMA engine and one video processing entities. 1106 * The DMA engine entities are linked to a V4L interface 1107 */ 1108 1109 /* Create the three entities with their pads */ 1110 m2m_dev->source = &vdev->entity; 1111 ret = v4l2_m2m_register_entity(mdev, m2m_dev, 1112 MEM2MEM_ENT_TYPE_SOURCE, vdev, MEDIA_ENT_F_IO_V4L); 1113 if (ret) 1114 return ret; 1115 ret = v4l2_m2m_register_entity(mdev, m2m_dev, 1116 MEM2MEM_ENT_TYPE_PROC, vdev, function); 1117 if (ret) 1118 goto err_rel_entity0; 1119 ret = v4l2_m2m_register_entity(mdev, m2m_dev, 1120 MEM2MEM_ENT_TYPE_SINK, vdev, MEDIA_ENT_F_IO_V4L); 1121 if (ret) 1122 goto err_rel_entity1; 1123 1124 /* Connect the three entities */ 1125 ret = media_create_pad_link(m2m_dev->source, 0, &m2m_dev->proc, 0, 1126 MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED); 1127 if (ret) 1128 goto err_rel_entity2; 1129 1130 ret = media_create_pad_link(&m2m_dev->proc, 1, &m2m_dev->sink, 0, 1131 MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED); 1132 if (ret) 1133 goto err_rm_links0; 1134 1135 /* Create video interface */ 1136 m2m_dev->intf_devnode = media_devnode_create(mdev, 1137 MEDIA_INTF_T_V4L_VIDEO, 0, 1138 VIDEO_MAJOR, vdev->minor); 1139 if (!m2m_dev->intf_devnode) { 1140 ret = -ENOMEM; 1141 goto err_rm_links1; 1142 } 1143 1144 /* Connect the two DMA engines to the interface */ 1145 link = media_create_intf_link(m2m_dev->source, 1146 &m2m_dev->intf_devnode->intf, 1147 MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED); 1148 if (!link) { 1149 ret = -ENOMEM; 1150 goto err_rm_devnode; 1151 } 1152 1153 link = media_create_intf_link(&m2m_dev->sink, 1154 &m2m_dev->intf_devnode->intf, 1155 MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED); 1156 if (!link) { 1157 ret = -ENOMEM; 1158 goto err_rm_intf_link; 1159 } 1160 return 0; 1161 1162 err_rm_intf_link: 1163 media_remove_intf_links(&m2m_dev->intf_devnode->intf); 1164 err_rm_devnode: 1165 media_devnode_remove(m2m_dev->intf_devnode); 1166 err_rm_links1: 1167 media_entity_remove_links(&m2m_dev->sink); 1168 err_rm_links0: 1169 media_entity_remove_links(&m2m_dev->proc); 1170 media_entity_remove_links(m2m_dev->source); 1171 err_rel_entity2: 1172 media_device_unregister_entity(&m2m_dev->proc); 1173 kfree(m2m_dev->proc.name); 1174 err_rel_entity1: 1175 media_device_unregister_entity(&m2m_dev->sink); 1176 kfree(m2m_dev->sink.name); 1177 err_rel_entity0: 1178 media_device_unregister_entity(m2m_dev->source); 1179 kfree(m2m_dev->source->name); 1180 return ret; 1181 return 0; 1182 } 1183 EXPORT_SYMBOL_GPL(v4l2_m2m_register_media_controller); 1184 #endif 1185 1186 struct v4l2_m2m_dev *v4l2_m2m_init(const struct v4l2_m2m_ops *m2m_ops) 1187 { 1188 struct v4l2_m2m_dev *m2m_dev; 1189 1190 if (!m2m_ops || WARN_ON(!m2m_ops->device_run)) 1191 return ERR_PTR(-EINVAL); 1192 1193 m2m_dev = kzalloc_obj(*m2m_dev); 1194 if (!m2m_dev) 1195 return ERR_PTR(-ENOMEM); 1196 1197 m2m_dev->curr_ctx = NULL; 1198 m2m_dev->m2m_ops = m2m_ops; 1199 INIT_LIST_HEAD(&m2m_dev->job_queue); 1200 spin_lock_init(&m2m_dev->job_spinlock); 1201 INIT_WORK(&m2m_dev->job_work, v4l2_m2m_device_run_work); 1202 kref_init(&m2m_dev->kref); 1203 1204 return m2m_dev; 1205 } 1206 EXPORT_SYMBOL_GPL(v4l2_m2m_init); 1207 1208 void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev) 1209 { 1210 kfree(m2m_dev); 1211 } 1212 EXPORT_SYMBOL_GPL(v4l2_m2m_release); 1213 1214 void v4l2_m2m_get(struct v4l2_m2m_dev *m2m_dev) 1215 { 1216 kref_get(&m2m_dev->kref); 1217 } 1218 EXPORT_SYMBOL_GPL(v4l2_m2m_get); 1219 1220 static void v4l2_m2m_release_from_kref(struct kref *kref) 1221 { 1222 struct v4l2_m2m_dev *m2m_dev = container_of(kref, struct v4l2_m2m_dev, kref); 1223 1224 v4l2_m2m_release(m2m_dev); 1225 } 1226 1227 void v4l2_m2m_put(struct v4l2_m2m_dev *m2m_dev) 1228 { 1229 kref_put(&m2m_dev->kref, v4l2_m2m_release_from_kref); 1230 } 1231 EXPORT_SYMBOL_GPL(v4l2_m2m_put); 1232 1233 struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev, 1234 void *drv_priv, 1235 int (*queue_init)(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq)) 1236 { 1237 struct v4l2_m2m_ctx *m2m_ctx; 1238 struct v4l2_m2m_queue_ctx *out_q_ctx, *cap_q_ctx; 1239 int ret; 1240 1241 m2m_ctx = kzalloc_obj(*m2m_ctx); 1242 if (!m2m_ctx) 1243 return ERR_PTR(-ENOMEM); 1244 1245 m2m_ctx->priv = drv_priv; 1246 m2m_ctx->m2m_dev = m2m_dev; 1247 init_waitqueue_head(&m2m_ctx->finished); 1248 1249 out_q_ctx = &m2m_ctx->out_q_ctx; 1250 cap_q_ctx = &m2m_ctx->cap_q_ctx; 1251 1252 INIT_LIST_HEAD(&out_q_ctx->rdy_queue); 1253 INIT_LIST_HEAD(&cap_q_ctx->rdy_queue); 1254 spin_lock_init(&out_q_ctx->rdy_spinlock); 1255 spin_lock_init(&cap_q_ctx->rdy_spinlock); 1256 1257 INIT_LIST_HEAD(&m2m_ctx->queue); 1258 1259 ret = queue_init(drv_priv, &out_q_ctx->q, &cap_q_ctx->q); 1260 1261 if (ret) 1262 goto err; 1263 /* 1264 * Both queues should use same the mutex to lock the m2m context. 1265 * This lock is used in some v4l2_m2m_* helpers. 1266 */ 1267 if (WARN_ON(out_q_ctx->q.lock != cap_q_ctx->q.lock)) { 1268 ret = -EINVAL; 1269 goto err; 1270 } 1271 m2m_ctx->q_lock = out_q_ctx->q.lock; 1272 1273 return m2m_ctx; 1274 err: 1275 kfree(m2m_ctx); 1276 return ERR_PTR(ret); 1277 } 1278 EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_init); 1279 1280 void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx) 1281 { 1282 /* wait until the current context is dequeued from job_queue */ 1283 v4l2_m2m_cancel_job(m2m_ctx); 1284 1285 vb2_queue_release(&m2m_ctx->cap_q_ctx.q); 1286 vb2_queue_release(&m2m_ctx->out_q_ctx.q); 1287 1288 kfree(m2m_ctx); 1289 } 1290 EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_release); 1291 1292 void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, 1293 struct vb2_v4l2_buffer *vbuf) 1294 { 1295 struct v4l2_m2m_buffer *b = container_of(vbuf, 1296 struct v4l2_m2m_buffer, vb); 1297 struct v4l2_m2m_queue_ctx *q_ctx; 1298 unsigned long flags; 1299 1300 q_ctx = get_queue_ctx(m2m_ctx, vbuf->vb2_buf.vb2_queue->type); 1301 1302 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); 1303 list_add_tail(&b->list, &q_ctx->rdy_queue); 1304 q_ctx->num_rdy++; 1305 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); 1306 } 1307 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_queue); 1308 1309 void v4l2_m2m_buf_copy_metadata(const struct vb2_v4l2_buffer *out_vb, 1310 struct vb2_v4l2_buffer *cap_vb) 1311 { 1312 const u32 mask = V4L2_BUF_FLAG_TIMECODE | V4L2_BUF_FLAG_TSTAMP_SRC_MASK; 1313 1314 cap_vb->vb2_buf.timestamp = out_vb->vb2_buf.timestamp; 1315 1316 if (out_vb->flags & V4L2_BUF_FLAG_TIMECODE) 1317 cap_vb->timecode = out_vb->timecode; 1318 cap_vb->field = out_vb->field; 1319 cap_vb->flags &= ~mask; 1320 cap_vb->flags |= out_vb->flags & mask; 1321 cap_vb->vb2_buf.copied_timestamp = 1; 1322 } 1323 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_copy_metadata); 1324 1325 void v4l2_m2m_request_queue(struct media_request *req) 1326 { 1327 struct media_request_object *obj, *obj_safe; 1328 struct v4l2_m2m_ctx *m2m_ctx = NULL; 1329 1330 /* 1331 * Queue all objects. Note that buffer objects are at the end of the 1332 * objects list, after all other object types. Once buffer objects 1333 * are queued, the driver might delete them immediately (if the driver 1334 * processes the buffer at once), so we have to use 1335 * list_for_each_entry_safe() to handle the case where the object we 1336 * queue is deleted. 1337 */ 1338 list_for_each_entry_safe(obj, obj_safe, &req->objects, list) { 1339 struct v4l2_m2m_ctx *m2m_ctx_obj; 1340 struct vb2_buffer *vb; 1341 1342 if (!obj->ops->queue) 1343 continue; 1344 1345 if (vb2_request_object_is_buffer(obj)) { 1346 /* Sanity checks */ 1347 vb = container_of(obj, struct vb2_buffer, req_obj); 1348 WARN_ON(!V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)); 1349 m2m_ctx_obj = container_of(vb->vb2_queue, 1350 struct v4l2_m2m_ctx, 1351 out_q_ctx.q); 1352 WARN_ON(m2m_ctx && m2m_ctx_obj != m2m_ctx); 1353 m2m_ctx = m2m_ctx_obj; 1354 } 1355 1356 /* 1357 * The buffer we queue here can in theory be immediately 1358 * unbound, hence the use of list_for_each_entry_safe() 1359 * above and why we call the queue op last. 1360 */ 1361 obj->ops->queue(obj); 1362 } 1363 1364 WARN_ON(!m2m_ctx); 1365 1366 if (m2m_ctx) 1367 v4l2_m2m_try_schedule(m2m_ctx); 1368 } 1369 EXPORT_SYMBOL_GPL(v4l2_m2m_request_queue); 1370 1371 /* Videobuf2 ioctl helpers */ 1372 1373 int v4l2_m2m_ioctl_reqbufs(struct file *file, void *priv, 1374 struct v4l2_requestbuffers *rb) 1375 { 1376 struct v4l2_fh *fh = file_to_v4l2_fh(file); 1377 1378 return v4l2_m2m_reqbufs(file, fh->m2m_ctx, rb); 1379 } 1380 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_reqbufs); 1381 1382 int v4l2_m2m_ioctl_create_bufs(struct file *file, void *priv, 1383 struct v4l2_create_buffers *create) 1384 { 1385 struct v4l2_fh *fh = file_to_v4l2_fh(file); 1386 1387 return v4l2_m2m_create_bufs(file, fh->m2m_ctx, create); 1388 } 1389 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_create_bufs); 1390 1391 int v4l2_m2m_ioctl_remove_bufs(struct file *file, void *priv, 1392 struct v4l2_remove_buffers *remove) 1393 { 1394 struct v4l2_fh *fh = file_to_v4l2_fh(file); 1395 struct vb2_queue *q = v4l2_m2m_get_vq(fh->m2m_ctx, remove->type); 1396 1397 if (q->type != remove->type) 1398 return -EINVAL; 1399 1400 return vb2_core_remove_bufs(q, remove->index, remove->count); 1401 } 1402 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_remove_bufs); 1403 1404 int v4l2_m2m_ioctl_querybuf(struct file *file, void *priv, 1405 struct v4l2_buffer *buf) 1406 { 1407 struct v4l2_fh *fh = file_to_v4l2_fh(file); 1408 1409 return v4l2_m2m_querybuf(file, fh->m2m_ctx, buf); 1410 } 1411 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_querybuf); 1412 1413 int v4l2_m2m_ioctl_qbuf(struct file *file, void *priv, 1414 struct v4l2_buffer *buf) 1415 { 1416 struct v4l2_fh *fh = file_to_v4l2_fh(file); 1417 1418 return v4l2_m2m_qbuf(file, fh->m2m_ctx, buf); 1419 } 1420 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_qbuf); 1421 1422 int v4l2_m2m_ioctl_dqbuf(struct file *file, void *priv, 1423 struct v4l2_buffer *buf) 1424 { 1425 struct v4l2_fh *fh = file_to_v4l2_fh(file); 1426 1427 return v4l2_m2m_dqbuf(file, fh->m2m_ctx, buf); 1428 } 1429 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_dqbuf); 1430 1431 int v4l2_m2m_ioctl_prepare_buf(struct file *file, void *priv, 1432 struct v4l2_buffer *buf) 1433 { 1434 struct v4l2_fh *fh = file_to_v4l2_fh(file); 1435 1436 return v4l2_m2m_prepare_buf(file, fh->m2m_ctx, buf); 1437 } 1438 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_prepare_buf); 1439 1440 int v4l2_m2m_ioctl_expbuf(struct file *file, void *priv, 1441 struct v4l2_exportbuffer *eb) 1442 { 1443 struct v4l2_fh *fh = file_to_v4l2_fh(file); 1444 1445 return v4l2_m2m_expbuf(file, fh->m2m_ctx, eb); 1446 } 1447 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_expbuf); 1448 1449 int v4l2_m2m_ioctl_streamon(struct file *file, void *priv, 1450 enum v4l2_buf_type type) 1451 { 1452 struct v4l2_fh *fh = file_to_v4l2_fh(file); 1453 1454 return v4l2_m2m_streamon(file, fh->m2m_ctx, type); 1455 } 1456 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamon); 1457 1458 int v4l2_m2m_ioctl_streamoff(struct file *file, void *priv, 1459 enum v4l2_buf_type type) 1460 { 1461 struct v4l2_fh *fh = file_to_v4l2_fh(file); 1462 1463 return v4l2_m2m_streamoff(file, fh->m2m_ctx, type); 1464 } 1465 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamoff); 1466 1467 int v4l2_m2m_ioctl_try_encoder_cmd(struct file *file, void *priv, 1468 struct v4l2_encoder_cmd *ec) 1469 { 1470 if (ec->cmd != V4L2_ENC_CMD_STOP && ec->cmd != V4L2_ENC_CMD_START) 1471 return -EINVAL; 1472 1473 ec->flags = 0; 1474 return 0; 1475 } 1476 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_try_encoder_cmd); 1477 1478 int v4l2_m2m_ioctl_try_decoder_cmd(struct file *file, void *priv, 1479 struct v4l2_decoder_cmd *dc) 1480 { 1481 if (dc->cmd != V4L2_DEC_CMD_STOP && dc->cmd != V4L2_DEC_CMD_START) 1482 return -EINVAL; 1483 1484 dc->flags = 0; 1485 1486 if (dc->cmd == V4L2_DEC_CMD_STOP) { 1487 dc->stop.pts = 0; 1488 } else if (dc->cmd == V4L2_DEC_CMD_START) { 1489 dc->start.speed = 0; 1490 dc->start.format = V4L2_DEC_START_FMT_NONE; 1491 } 1492 return 0; 1493 } 1494 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_try_decoder_cmd); 1495 1496 /* 1497 * Updates the encoding state on ENC_CMD_STOP/ENC_CMD_START 1498 * Should be called from the encoder driver encoder_cmd() callback 1499 */ 1500 int v4l2_m2m_encoder_cmd(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 1501 struct v4l2_encoder_cmd *ec) 1502 { 1503 if (ec->cmd != V4L2_ENC_CMD_STOP && ec->cmd != V4L2_ENC_CMD_START) 1504 return -EINVAL; 1505 1506 if (ec->cmd == V4L2_ENC_CMD_STOP) 1507 return v4l2_update_last_buf_state(m2m_ctx); 1508 1509 if (m2m_ctx->is_draining) 1510 return -EBUSY; 1511 1512 if (m2m_ctx->has_stopped) 1513 m2m_ctx->has_stopped = false; 1514 1515 return 0; 1516 } 1517 EXPORT_SYMBOL_GPL(v4l2_m2m_encoder_cmd); 1518 1519 /* 1520 * Updates the decoding state on DEC_CMD_STOP/DEC_CMD_START 1521 * Should be called from the decoder driver decoder_cmd() callback 1522 */ 1523 int v4l2_m2m_decoder_cmd(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 1524 struct v4l2_decoder_cmd *dc) 1525 { 1526 if (dc->cmd != V4L2_DEC_CMD_STOP && dc->cmd != V4L2_DEC_CMD_START) 1527 return -EINVAL; 1528 1529 if (dc->cmd == V4L2_DEC_CMD_STOP) 1530 return v4l2_update_last_buf_state(m2m_ctx); 1531 1532 if (m2m_ctx->is_draining) 1533 return -EBUSY; 1534 1535 if (m2m_ctx->has_stopped) 1536 m2m_ctx->has_stopped = false; 1537 1538 return 0; 1539 } 1540 EXPORT_SYMBOL_GPL(v4l2_m2m_decoder_cmd); 1541 1542 int v4l2_m2m_ioctl_encoder_cmd(struct file *file, void *priv, 1543 struct v4l2_encoder_cmd *ec) 1544 { 1545 struct v4l2_fh *fh = file_to_v4l2_fh(file); 1546 1547 return v4l2_m2m_encoder_cmd(file, fh->m2m_ctx, ec); 1548 } 1549 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_encoder_cmd); 1550 1551 int v4l2_m2m_ioctl_decoder_cmd(struct file *file, void *priv, 1552 struct v4l2_decoder_cmd *dc) 1553 { 1554 struct v4l2_fh *fh = file_to_v4l2_fh(file); 1555 1556 return v4l2_m2m_decoder_cmd(file, fh->m2m_ctx, dc); 1557 } 1558 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_decoder_cmd); 1559 1560 int v4l2_m2m_ioctl_stateless_try_decoder_cmd(struct file *file, void *priv, 1561 struct v4l2_decoder_cmd *dc) 1562 { 1563 if (dc->cmd != V4L2_DEC_CMD_FLUSH) 1564 return -EINVAL; 1565 1566 dc->flags = 0; 1567 1568 return 0; 1569 } 1570 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_stateless_try_decoder_cmd); 1571 1572 int v4l2_m2m_ioctl_stateless_decoder_cmd(struct file *file, void *priv, 1573 struct v4l2_decoder_cmd *dc) 1574 { 1575 struct v4l2_fh *fh = file_to_v4l2_fh(file); 1576 struct vb2_v4l2_buffer *out_vb, *cap_vb; 1577 struct v4l2_m2m_dev *m2m_dev = fh->m2m_ctx->m2m_dev; 1578 unsigned long flags; 1579 int ret; 1580 1581 ret = v4l2_m2m_ioctl_stateless_try_decoder_cmd(file, priv, dc); 1582 if (ret < 0) 1583 return ret; 1584 1585 spin_lock_irqsave(&m2m_dev->job_spinlock, flags); 1586 out_vb = v4l2_m2m_last_src_buf(fh->m2m_ctx); 1587 cap_vb = v4l2_m2m_last_dst_buf(fh->m2m_ctx); 1588 1589 /* 1590 * If there is an out buffer pending, then clear any HOLD flag. 1591 * 1592 * By clearing this flag we ensure that when this output 1593 * buffer is processed any held capture buffer will be released. 1594 */ 1595 if (out_vb) { 1596 out_vb->flags &= ~V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF; 1597 } else if (cap_vb && cap_vb->is_held) { 1598 /* 1599 * If there were no output buffers, but there is a 1600 * capture buffer that is held, then release that 1601 * buffer. 1602 */ 1603 cap_vb->is_held = false; 1604 v4l2_m2m_dst_buf_remove(fh->m2m_ctx); 1605 v4l2_m2m_buf_done(cap_vb, VB2_BUF_STATE_DONE); 1606 } 1607 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); 1608 1609 return 0; 1610 } 1611 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_stateless_decoder_cmd); 1612 1613 /* 1614 * v4l2_file_operations helpers. It is assumed here same lock is used 1615 * for the output and the capture buffer queue. 1616 */ 1617 1618 int v4l2_m2m_fop_mmap(struct file *file, struct vm_area_struct *vma) 1619 { 1620 struct v4l2_fh *fh = file_to_v4l2_fh(file); 1621 1622 return v4l2_m2m_mmap(file, fh->m2m_ctx, vma); 1623 } 1624 EXPORT_SYMBOL_GPL(v4l2_m2m_fop_mmap); 1625 1626 __poll_t v4l2_m2m_fop_poll(struct file *file, poll_table *wait) 1627 { 1628 struct v4l2_fh *fh = file_to_v4l2_fh(file); 1629 struct v4l2_m2m_ctx *m2m_ctx = fh->m2m_ctx; 1630 __poll_t ret; 1631 1632 if (m2m_ctx->q_lock) 1633 mutex_lock(m2m_ctx->q_lock); 1634 1635 ret = v4l2_m2m_poll(file, m2m_ctx, wait); 1636 1637 if (m2m_ctx->q_lock) 1638 mutex_unlock(m2m_ctx->q_lock); 1639 1640 return ret; 1641 } 1642 EXPORT_SYMBOL_GPL(v4l2_m2m_fop_poll); 1643 1644