1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Memory-to-memory device framework for Video for Linux 2 and videobuf. 4 * 5 * Helper functions for devices that use videobuf buffers for both their 6 * source and destination. 7 * 8 * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd. 9 * Pawel Osciak, <pawel@osciak.com> 10 * Marek Szyprowski, <m.szyprowski@samsung.com> 11 */ 12 #include <linux/module.h> 13 #include <linux/sched.h> 14 #include <linux/slab.h> 15 16 #include <media/media-device.h> 17 #include <media/videobuf2-v4l2.h> 18 #include <media/v4l2-mem2mem.h> 19 #include <media/v4l2-dev.h> 20 #include <media/v4l2-device.h> 21 #include <media/v4l2-fh.h> 22 #include <media/v4l2-event.h> 23 24 MODULE_DESCRIPTION("Mem to mem device framework for videobuf"); 25 MODULE_AUTHOR("Pawel Osciak, <pawel@osciak.com>"); 26 MODULE_LICENSE("GPL"); 27 28 static bool debug; 29 module_param(debug, bool, 0644); 30 31 #define dprintk(fmt, arg...) \ 32 do { \ 33 if (debug) \ 34 printk(KERN_DEBUG "%s: " fmt, __func__, ## arg);\ 35 } while (0) 36 37 38 /* Instance is already queued on the job_queue */ 39 #define TRANS_QUEUED (1 << 0) 40 /* Instance is currently running in hardware */ 41 #define TRANS_RUNNING (1 << 1) 42 /* Instance is currently aborting */ 43 #define TRANS_ABORT (1 << 2) 44 45 46 /* The job queue is not running new jobs */ 47 #define QUEUE_PAUSED (1 << 0) 48 49 50 /* Offset base for buffers on the destination queue - used to distinguish 51 * between source and destination buffers when mmapping - they receive the same 52 * offsets but for different queues */ 53 #define DST_QUEUE_OFF_BASE (1 << 30) 54 55 enum v4l2_m2m_entity_type { 56 MEM2MEM_ENT_TYPE_SOURCE, 57 MEM2MEM_ENT_TYPE_SINK, 58 MEM2MEM_ENT_TYPE_PROC 59 }; 60 61 static const char * const m2m_entity_name[] = { 62 "source", 63 "sink", 64 "proc" 65 }; 66 67 /** 68 * struct v4l2_m2m_dev - per-device context 69 * @source: &struct media_entity pointer with the source entity 70 * Used only when the M2M device is registered via 71 * v4l2_m2m_unregister_media_controller(). 72 * @source_pad: &struct media_pad with the source pad. 73 * Used only when the M2M device is registered via 74 * v4l2_m2m_unregister_media_controller(). 75 * @sink: &struct media_entity pointer with the sink entity 76 * Used only when the M2M device is registered via 77 * v4l2_m2m_unregister_media_controller(). 78 * @sink_pad: &struct media_pad with the sink pad. 79 * Used only when the M2M device is registered via 80 * v4l2_m2m_unregister_media_controller(). 81 * @proc: &struct media_entity pointer with the M2M device itself. 82 * @proc_pads: &struct media_pad with the @proc pads. 83 * Used only when the M2M device is registered via 84 * v4l2_m2m_unregister_media_controller(). 85 * @intf_devnode: &struct media_intf devnode pointer with the interface 86 * with controls the M2M device. 87 * @curr_ctx: currently running instance 88 * @job_queue: instances queued to run 89 * @job_spinlock: protects job_queue 90 * @job_work: worker to run queued jobs. 91 * @job_queue_flags: flags of the queue status, %QUEUE_PAUSED. 92 * @m2m_ops: driver callbacks 93 */ 94 struct v4l2_m2m_dev { 95 struct v4l2_m2m_ctx *curr_ctx; 96 #ifdef CONFIG_MEDIA_CONTROLLER 97 struct media_entity *source; 98 struct media_pad source_pad; 99 struct media_entity sink; 100 struct media_pad sink_pad; 101 struct media_entity proc; 102 struct media_pad proc_pads[2]; 103 struct media_intf_devnode *intf_devnode; 104 #endif 105 106 struct list_head job_queue; 107 spinlock_t job_spinlock; 108 struct work_struct job_work; 109 unsigned long job_queue_flags; 110 111 const struct v4l2_m2m_ops *m2m_ops; 112 }; 113 114 static struct v4l2_m2m_queue_ctx *get_queue_ctx(struct v4l2_m2m_ctx *m2m_ctx, 115 enum v4l2_buf_type type) 116 { 117 if (V4L2_TYPE_IS_OUTPUT(type)) 118 return &m2m_ctx->out_q_ctx; 119 else 120 return &m2m_ctx->cap_q_ctx; 121 } 122 123 struct vb2_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx, 124 enum v4l2_buf_type type) 125 { 126 struct v4l2_m2m_queue_ctx *q_ctx; 127 128 q_ctx = get_queue_ctx(m2m_ctx, type); 129 if (!q_ctx) 130 return NULL; 131 132 return &q_ctx->q; 133 } 134 EXPORT_SYMBOL(v4l2_m2m_get_vq); 135 136 struct vb2_v4l2_buffer *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx) 137 { 138 struct v4l2_m2m_buffer *b; 139 unsigned long flags; 140 141 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); 142 143 if (list_empty(&q_ctx->rdy_queue)) { 144 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); 145 return NULL; 146 } 147 148 b = list_first_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list); 149 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); 150 return &b->vb; 151 } 152 EXPORT_SYMBOL_GPL(v4l2_m2m_next_buf); 153 154 struct vb2_v4l2_buffer *v4l2_m2m_last_buf(struct v4l2_m2m_queue_ctx *q_ctx) 155 { 156 struct v4l2_m2m_buffer *b; 157 unsigned long flags; 158 159 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); 160 161 if (list_empty(&q_ctx->rdy_queue)) { 162 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); 163 return NULL; 164 } 165 166 b = list_last_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list); 167 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); 168 return &b->vb; 169 } 170 EXPORT_SYMBOL_GPL(v4l2_m2m_last_buf); 171 172 struct vb2_v4l2_buffer *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx) 173 { 174 struct v4l2_m2m_buffer *b; 175 unsigned long flags; 176 177 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); 178 if (list_empty(&q_ctx->rdy_queue)) { 179 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); 180 return NULL; 181 } 182 b = list_first_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list); 183 list_del(&b->list); 184 q_ctx->num_rdy--; 185 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); 186 187 return &b->vb; 188 } 189 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove); 190 191 void v4l2_m2m_buf_remove_by_buf(struct v4l2_m2m_queue_ctx *q_ctx, 192 struct vb2_v4l2_buffer *vbuf) 193 { 194 struct v4l2_m2m_buffer *b; 195 unsigned long flags; 196 197 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); 198 b = container_of(vbuf, struct v4l2_m2m_buffer, vb); 199 list_del(&b->list); 200 q_ctx->num_rdy--; 201 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); 202 } 203 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove_by_buf); 204 205 struct vb2_v4l2_buffer * 206 v4l2_m2m_buf_remove_by_idx(struct v4l2_m2m_queue_ctx *q_ctx, unsigned int idx) 207 208 { 209 struct v4l2_m2m_buffer *b, *tmp; 210 struct vb2_v4l2_buffer *ret = NULL; 211 unsigned long flags; 212 213 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); 214 list_for_each_entry_safe(b, tmp, &q_ctx->rdy_queue, list) { 215 if (b->vb.vb2_buf.index == idx) { 216 list_del(&b->list); 217 q_ctx->num_rdy--; 218 ret = &b->vb; 219 break; 220 } 221 } 222 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); 223 224 return ret; 225 } 226 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove_by_idx); 227 228 /* 229 * Scheduling handlers 230 */ 231 232 void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev *m2m_dev) 233 { 234 unsigned long flags; 235 void *ret = NULL; 236 237 spin_lock_irqsave(&m2m_dev->job_spinlock, flags); 238 if (m2m_dev->curr_ctx) 239 ret = m2m_dev->curr_ctx->priv; 240 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); 241 242 return ret; 243 } 244 EXPORT_SYMBOL(v4l2_m2m_get_curr_priv); 245 246 /** 247 * v4l2_m2m_try_run() - select next job to perform and run it if possible 248 * @m2m_dev: per-device context 249 * 250 * Get next transaction (if present) from the waiting jobs list and run it. 251 * 252 * Note that this function can run on a given v4l2_m2m_ctx context, 253 * but call .device_run for another context. 254 */ 255 static void v4l2_m2m_try_run(struct v4l2_m2m_dev *m2m_dev) 256 { 257 unsigned long flags; 258 259 spin_lock_irqsave(&m2m_dev->job_spinlock, flags); 260 if (NULL != m2m_dev->curr_ctx) { 261 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); 262 dprintk("Another instance is running, won't run now\n"); 263 return; 264 } 265 266 if (list_empty(&m2m_dev->job_queue)) { 267 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); 268 dprintk("No job pending\n"); 269 return; 270 } 271 272 if (m2m_dev->job_queue_flags & QUEUE_PAUSED) { 273 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); 274 dprintk("Running new jobs is paused\n"); 275 return; 276 } 277 278 m2m_dev->curr_ctx = list_first_entry(&m2m_dev->job_queue, 279 struct v4l2_m2m_ctx, queue); 280 m2m_dev->curr_ctx->job_flags |= TRANS_RUNNING; 281 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); 282 283 dprintk("Running job on m2m_ctx: %p\n", m2m_dev->curr_ctx); 284 m2m_dev->m2m_ops->device_run(m2m_dev->curr_ctx->priv); 285 } 286 287 /* 288 * __v4l2_m2m_try_queue() - queue a job 289 * @m2m_dev: m2m device 290 * @m2m_ctx: m2m context 291 * 292 * Check if this context is ready to queue a job. 293 * 294 * This function can run in interrupt context. 295 */ 296 static void __v4l2_m2m_try_queue(struct v4l2_m2m_dev *m2m_dev, 297 struct v4l2_m2m_ctx *m2m_ctx) 298 { 299 unsigned long flags_job; 300 struct vb2_v4l2_buffer *dst, *src; 301 302 dprintk("Trying to schedule a job for m2m_ctx: %p\n", m2m_ctx); 303 304 if (!m2m_ctx->out_q_ctx.q.streaming 305 || !m2m_ctx->cap_q_ctx.q.streaming) { 306 dprintk("Streaming needs to be on for both queues\n"); 307 return; 308 } 309 310 spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job); 311 312 /* If the context is aborted then don't schedule it */ 313 if (m2m_ctx->job_flags & TRANS_ABORT) { 314 dprintk("Aborted context\n"); 315 goto job_unlock; 316 } 317 318 if (m2m_ctx->job_flags & TRANS_QUEUED) { 319 dprintk("On job queue already\n"); 320 goto job_unlock; 321 } 322 323 src = v4l2_m2m_next_src_buf(m2m_ctx); 324 dst = v4l2_m2m_next_dst_buf(m2m_ctx); 325 if (!src && !m2m_ctx->out_q_ctx.buffered) { 326 dprintk("No input buffers available\n"); 327 goto job_unlock; 328 } 329 if (!dst && !m2m_ctx->cap_q_ctx.buffered) { 330 dprintk("No output buffers available\n"); 331 goto job_unlock; 332 } 333 334 m2m_ctx->new_frame = true; 335 336 if (src && dst && dst->is_held && 337 dst->vb2_buf.copied_timestamp && 338 dst->vb2_buf.timestamp != src->vb2_buf.timestamp) { 339 dst->is_held = false; 340 v4l2_m2m_dst_buf_remove(m2m_ctx); 341 v4l2_m2m_buf_done(dst, VB2_BUF_STATE_DONE); 342 dst = v4l2_m2m_next_dst_buf(m2m_ctx); 343 344 if (!dst && !m2m_ctx->cap_q_ctx.buffered) { 345 dprintk("No output buffers available after returning held buffer\n"); 346 goto job_unlock; 347 } 348 } 349 350 if (src && dst && (m2m_ctx->out_q_ctx.q.subsystem_flags & 351 VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF)) 352 m2m_ctx->new_frame = !dst->vb2_buf.copied_timestamp || 353 dst->vb2_buf.timestamp != src->vb2_buf.timestamp; 354 355 if (m2m_ctx->has_stopped) { 356 dprintk("Device has stopped\n"); 357 goto job_unlock; 358 } 359 360 if (m2m_dev->m2m_ops->job_ready 361 && (!m2m_dev->m2m_ops->job_ready(m2m_ctx->priv))) { 362 dprintk("Driver not ready\n"); 363 goto job_unlock; 364 } 365 366 list_add_tail(&m2m_ctx->queue, &m2m_dev->job_queue); 367 m2m_ctx->job_flags |= TRANS_QUEUED; 368 369 job_unlock: 370 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); 371 } 372 373 /** 374 * v4l2_m2m_try_schedule() - schedule and possibly run a job for any context 375 * @m2m_ctx: m2m context 376 * 377 * Check if this context is ready to queue a job. If suitable, 378 * run the next queued job on the mem2mem device. 379 * 380 * This function shouldn't run in interrupt context. 381 * 382 * Note that v4l2_m2m_try_schedule() can schedule one job for this context, 383 * and then run another job for another context. 384 */ 385 void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx) 386 { 387 struct v4l2_m2m_dev *m2m_dev = m2m_ctx->m2m_dev; 388 389 __v4l2_m2m_try_queue(m2m_dev, m2m_ctx); 390 v4l2_m2m_try_run(m2m_dev); 391 } 392 EXPORT_SYMBOL_GPL(v4l2_m2m_try_schedule); 393 394 /** 395 * v4l2_m2m_device_run_work() - run pending jobs for the context 396 * @work: Work structure used for scheduling the execution of this function. 397 */ 398 static void v4l2_m2m_device_run_work(struct work_struct *work) 399 { 400 struct v4l2_m2m_dev *m2m_dev = 401 container_of(work, struct v4l2_m2m_dev, job_work); 402 403 v4l2_m2m_try_run(m2m_dev); 404 } 405 406 /** 407 * v4l2_m2m_cancel_job() - cancel pending jobs for the context 408 * @m2m_ctx: m2m context with jobs to be canceled 409 * 410 * In case of streamoff or release called on any context, 411 * 1] If the context is currently running, then abort job will be called 412 * 2] If the context is queued, then the context will be removed from 413 * the job_queue 414 */ 415 static void v4l2_m2m_cancel_job(struct v4l2_m2m_ctx *m2m_ctx) 416 { 417 struct v4l2_m2m_dev *m2m_dev; 418 unsigned long flags; 419 420 m2m_dev = m2m_ctx->m2m_dev; 421 spin_lock_irqsave(&m2m_dev->job_spinlock, flags); 422 423 m2m_ctx->job_flags |= TRANS_ABORT; 424 if (m2m_ctx->job_flags & TRANS_RUNNING) { 425 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); 426 if (m2m_dev->m2m_ops->job_abort) 427 m2m_dev->m2m_ops->job_abort(m2m_ctx->priv); 428 dprintk("m2m_ctx %p running, will wait to complete\n", m2m_ctx); 429 wait_event(m2m_ctx->finished, 430 !(m2m_ctx->job_flags & TRANS_RUNNING)); 431 } else if (m2m_ctx->job_flags & TRANS_QUEUED) { 432 list_del(&m2m_ctx->queue); 433 m2m_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING); 434 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); 435 dprintk("m2m_ctx: %p had been on queue and was removed\n", 436 m2m_ctx); 437 } else { 438 /* Do nothing, was not on queue/running */ 439 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); 440 } 441 } 442 443 /* 444 * Schedule the next job, called from v4l2_m2m_job_finish() or 445 * v4l2_m2m_buf_done_and_job_finish(). 446 */ 447 static void v4l2_m2m_schedule_next_job(struct v4l2_m2m_dev *m2m_dev, 448 struct v4l2_m2m_ctx *m2m_ctx) 449 { 450 /* 451 * This instance might have more buffers ready, but since we do not 452 * allow more than one job on the job_queue per instance, each has 453 * to be scheduled separately after the previous one finishes. 454 */ 455 __v4l2_m2m_try_queue(m2m_dev, m2m_ctx); 456 457 /* 458 * We might be running in atomic context, 459 * but the job must be run in non-atomic context. 460 */ 461 schedule_work(&m2m_dev->job_work); 462 } 463 464 /* 465 * Assumes job_spinlock is held, called from v4l2_m2m_job_finish() or 466 * v4l2_m2m_buf_done_and_job_finish(). 467 */ 468 static bool _v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev, 469 struct v4l2_m2m_ctx *m2m_ctx) 470 { 471 if (!m2m_dev->curr_ctx || m2m_dev->curr_ctx != m2m_ctx) { 472 dprintk("Called by an instance not currently running\n"); 473 return false; 474 } 475 476 list_del(&m2m_dev->curr_ctx->queue); 477 m2m_dev->curr_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING); 478 wake_up(&m2m_dev->curr_ctx->finished); 479 m2m_dev->curr_ctx = NULL; 480 return true; 481 } 482 483 void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev, 484 struct v4l2_m2m_ctx *m2m_ctx) 485 { 486 unsigned long flags; 487 bool schedule_next; 488 489 /* 490 * This function should not be used for drivers that support 491 * holding capture buffers. Those should use 492 * v4l2_m2m_buf_done_and_job_finish() instead. 493 */ 494 WARN_ON(m2m_ctx->out_q_ctx.q.subsystem_flags & 495 VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF); 496 spin_lock_irqsave(&m2m_dev->job_spinlock, flags); 497 schedule_next = _v4l2_m2m_job_finish(m2m_dev, m2m_ctx); 498 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); 499 500 if (schedule_next) 501 v4l2_m2m_schedule_next_job(m2m_dev, m2m_ctx); 502 } 503 EXPORT_SYMBOL(v4l2_m2m_job_finish); 504 505 void v4l2_m2m_buf_done_and_job_finish(struct v4l2_m2m_dev *m2m_dev, 506 struct v4l2_m2m_ctx *m2m_ctx, 507 enum vb2_buffer_state state) 508 { 509 struct vb2_v4l2_buffer *src_buf, *dst_buf; 510 bool schedule_next = false; 511 unsigned long flags; 512 513 spin_lock_irqsave(&m2m_dev->job_spinlock, flags); 514 src_buf = v4l2_m2m_src_buf_remove(m2m_ctx); 515 dst_buf = v4l2_m2m_next_dst_buf(m2m_ctx); 516 517 if (WARN_ON(!src_buf || !dst_buf)) 518 goto unlock; 519 dst_buf->is_held = src_buf->flags & V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF; 520 if (!dst_buf->is_held) { 521 v4l2_m2m_dst_buf_remove(m2m_ctx); 522 v4l2_m2m_buf_done(dst_buf, state); 523 } 524 /* 525 * If the request API is being used, returning the OUTPUT 526 * (src) buffer will wake-up any process waiting on the 527 * request file descriptor. 528 * 529 * Therefore, return the CAPTURE (dst) buffer first, 530 * to avoid signalling the request file descriptor 531 * before the CAPTURE buffer is done. 532 */ 533 v4l2_m2m_buf_done(src_buf, state); 534 schedule_next = _v4l2_m2m_job_finish(m2m_dev, m2m_ctx); 535 unlock: 536 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); 537 538 if (schedule_next) 539 v4l2_m2m_schedule_next_job(m2m_dev, m2m_ctx); 540 } 541 EXPORT_SYMBOL(v4l2_m2m_buf_done_and_job_finish); 542 543 void v4l2_m2m_suspend(struct v4l2_m2m_dev *m2m_dev) 544 { 545 unsigned long flags; 546 struct v4l2_m2m_ctx *curr_ctx; 547 548 spin_lock_irqsave(&m2m_dev->job_spinlock, flags); 549 m2m_dev->job_queue_flags |= QUEUE_PAUSED; 550 curr_ctx = m2m_dev->curr_ctx; 551 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); 552 553 if (curr_ctx) 554 wait_event(curr_ctx->finished, 555 !(curr_ctx->job_flags & TRANS_RUNNING)); 556 } 557 EXPORT_SYMBOL(v4l2_m2m_suspend); 558 559 void v4l2_m2m_resume(struct v4l2_m2m_dev *m2m_dev) 560 { 561 unsigned long flags; 562 563 spin_lock_irqsave(&m2m_dev->job_spinlock, flags); 564 m2m_dev->job_queue_flags &= ~QUEUE_PAUSED; 565 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); 566 567 v4l2_m2m_try_run(m2m_dev); 568 } 569 EXPORT_SYMBOL(v4l2_m2m_resume); 570 571 int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 572 struct v4l2_requestbuffers *reqbufs) 573 { 574 struct vb2_queue *vq; 575 int ret; 576 577 vq = v4l2_m2m_get_vq(m2m_ctx, reqbufs->type); 578 ret = vb2_reqbufs(vq, reqbufs); 579 /* If count == 0, then the owner has released all buffers and he 580 is no longer owner of the queue. Otherwise we have an owner. */ 581 if (ret == 0) 582 vq->owner = reqbufs->count ? file->private_data : NULL; 583 584 return ret; 585 } 586 EXPORT_SYMBOL_GPL(v4l2_m2m_reqbufs); 587 588 int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 589 struct v4l2_buffer *buf) 590 { 591 struct vb2_queue *vq; 592 int ret = 0; 593 unsigned int i; 594 595 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); 596 ret = vb2_querybuf(vq, buf); 597 598 /* Adjust MMAP memory offsets for the CAPTURE queue */ 599 if (buf->memory == V4L2_MEMORY_MMAP && V4L2_TYPE_IS_CAPTURE(vq->type)) { 600 if (V4L2_TYPE_IS_MULTIPLANAR(vq->type)) { 601 for (i = 0; i < buf->length; ++i) 602 buf->m.planes[i].m.mem_offset 603 += DST_QUEUE_OFF_BASE; 604 } else { 605 buf->m.offset += DST_QUEUE_OFF_BASE; 606 } 607 } 608 609 return ret; 610 } 611 EXPORT_SYMBOL_GPL(v4l2_m2m_querybuf); 612 613 /* 614 * This will add the LAST flag and mark the buffer management 615 * state as stopped. 616 * This is called when the last capture buffer must be flagged as LAST 617 * in draining mode from the encoder/decoder driver buf_queue() callback 618 * or from v4l2_update_last_buf_state() when a capture buffer is available. 619 */ 620 void v4l2_m2m_last_buffer_done(struct v4l2_m2m_ctx *m2m_ctx, 621 struct vb2_v4l2_buffer *vbuf) 622 { 623 vbuf->flags |= V4L2_BUF_FLAG_LAST; 624 vb2_buffer_done(&vbuf->vb2_buf, VB2_BUF_STATE_DONE); 625 626 v4l2_m2m_mark_stopped(m2m_ctx); 627 } 628 EXPORT_SYMBOL_GPL(v4l2_m2m_last_buffer_done); 629 630 /* When stop command is issued, update buffer management state */ 631 static int v4l2_update_last_buf_state(struct v4l2_m2m_ctx *m2m_ctx) 632 { 633 struct vb2_v4l2_buffer *next_dst_buf; 634 635 if (m2m_ctx->is_draining) 636 return -EBUSY; 637 638 if (m2m_ctx->has_stopped) 639 return 0; 640 641 m2m_ctx->last_src_buf = v4l2_m2m_last_src_buf(m2m_ctx); 642 m2m_ctx->is_draining = true; 643 644 /* 645 * The processing of the last output buffer queued before 646 * the STOP command is expected to mark the buffer management 647 * state as stopped with v4l2_m2m_mark_stopped(). 648 */ 649 if (m2m_ctx->last_src_buf) 650 return 0; 651 652 /* 653 * In case the output queue is empty, try to mark the last capture 654 * buffer as LAST. 655 */ 656 next_dst_buf = v4l2_m2m_dst_buf_remove(m2m_ctx); 657 if (!next_dst_buf) { 658 /* 659 * Wait for the next queued one in encoder/decoder driver 660 * buf_queue() callback using the v4l2_m2m_dst_buf_is_last() 661 * helper or in v4l2_m2m_qbuf() if encoder/decoder is not yet 662 * streaming. 663 */ 664 m2m_ctx->next_buf_last = true; 665 return 0; 666 } 667 668 v4l2_m2m_last_buffer_done(m2m_ctx, next_dst_buf); 669 670 return 0; 671 } 672 673 /* 674 * Updates the encoding/decoding buffer management state, should 675 * be called from encoder/decoder drivers start_streaming() 676 */ 677 void v4l2_m2m_update_start_streaming_state(struct v4l2_m2m_ctx *m2m_ctx, 678 struct vb2_queue *q) 679 { 680 /* If start streaming again, untag the last output buffer */ 681 if (V4L2_TYPE_IS_OUTPUT(q->type)) 682 m2m_ctx->last_src_buf = NULL; 683 } 684 EXPORT_SYMBOL_GPL(v4l2_m2m_update_start_streaming_state); 685 686 /* 687 * Updates the encoding/decoding buffer management state, should 688 * be called from encoder/decoder driver stop_streaming() 689 */ 690 void v4l2_m2m_update_stop_streaming_state(struct v4l2_m2m_ctx *m2m_ctx, 691 struct vb2_queue *q) 692 { 693 if (V4L2_TYPE_IS_OUTPUT(q->type)) { 694 /* 695 * If in draining state, either mark next dst buffer as 696 * done or flag next one to be marked as done either 697 * in encoder/decoder driver buf_queue() callback using 698 * the v4l2_m2m_dst_buf_is_last() helper or in v4l2_m2m_qbuf() 699 * if encoder/decoder is not yet streaming 700 */ 701 if (m2m_ctx->is_draining) { 702 struct vb2_v4l2_buffer *next_dst_buf; 703 704 m2m_ctx->last_src_buf = NULL; 705 next_dst_buf = v4l2_m2m_dst_buf_remove(m2m_ctx); 706 if (!next_dst_buf) 707 m2m_ctx->next_buf_last = true; 708 else 709 v4l2_m2m_last_buffer_done(m2m_ctx, 710 next_dst_buf); 711 } 712 } else { 713 v4l2_m2m_clear_state(m2m_ctx); 714 } 715 } 716 EXPORT_SYMBOL_GPL(v4l2_m2m_update_stop_streaming_state); 717 718 static void v4l2_m2m_force_last_buf_done(struct v4l2_m2m_ctx *m2m_ctx, 719 struct vb2_queue *q) 720 { 721 struct vb2_buffer *vb; 722 struct vb2_v4l2_buffer *vbuf; 723 unsigned int i; 724 725 if (WARN_ON(q->is_output)) 726 return; 727 if (list_empty(&q->queued_list)) 728 return; 729 730 vb = list_first_entry(&q->queued_list, struct vb2_buffer, queued_entry); 731 for (i = 0; i < vb->num_planes; i++) 732 vb2_set_plane_payload(vb, i, 0); 733 734 /* 735 * Since the buffer hasn't been queued to the ready queue, 736 * mark is active and owned before marking it LAST and DONE 737 */ 738 vb->state = VB2_BUF_STATE_ACTIVE; 739 atomic_inc(&q->owned_by_drv_count); 740 741 vbuf = to_vb2_v4l2_buffer(vb); 742 vbuf->field = V4L2_FIELD_NONE; 743 744 v4l2_m2m_last_buffer_done(m2m_ctx, vbuf); 745 } 746 747 int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 748 struct v4l2_buffer *buf) 749 { 750 struct video_device *vdev = video_devdata(file); 751 struct vb2_queue *vq; 752 int ret; 753 754 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); 755 if (V4L2_TYPE_IS_CAPTURE(vq->type) && 756 (buf->flags & V4L2_BUF_FLAG_REQUEST_FD)) { 757 dprintk("%s: requests cannot be used with capture buffers\n", 758 __func__); 759 return -EPERM; 760 } 761 762 ret = vb2_qbuf(vq, vdev->v4l2_dev->mdev, buf); 763 if (ret) 764 return ret; 765 766 /* 767 * If the capture queue is streaming, but streaming hasn't started 768 * on the device, but was asked to stop, mark the previously queued 769 * buffer as DONE with LAST flag since it won't be queued on the 770 * device. 771 */ 772 if (V4L2_TYPE_IS_CAPTURE(vq->type) && 773 vb2_is_streaming(vq) && !vb2_start_streaming_called(vq) && 774 (v4l2_m2m_has_stopped(m2m_ctx) || v4l2_m2m_dst_buf_is_last(m2m_ctx))) 775 v4l2_m2m_force_last_buf_done(m2m_ctx, vq); 776 else if (!(buf->flags & V4L2_BUF_FLAG_IN_REQUEST)) 777 v4l2_m2m_try_schedule(m2m_ctx); 778 779 return 0; 780 } 781 EXPORT_SYMBOL_GPL(v4l2_m2m_qbuf); 782 783 int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 784 struct v4l2_buffer *buf) 785 { 786 struct vb2_queue *vq; 787 788 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); 789 return vb2_dqbuf(vq, buf, file->f_flags & O_NONBLOCK); 790 } 791 EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf); 792 793 int v4l2_m2m_prepare_buf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 794 struct v4l2_buffer *buf) 795 { 796 struct video_device *vdev = video_devdata(file); 797 struct vb2_queue *vq; 798 799 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); 800 return vb2_prepare_buf(vq, vdev->v4l2_dev->mdev, buf); 801 } 802 EXPORT_SYMBOL_GPL(v4l2_m2m_prepare_buf); 803 804 int v4l2_m2m_create_bufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 805 struct v4l2_create_buffers *create) 806 { 807 struct vb2_queue *vq; 808 809 vq = v4l2_m2m_get_vq(m2m_ctx, create->format.type); 810 return vb2_create_bufs(vq, create); 811 } 812 EXPORT_SYMBOL_GPL(v4l2_m2m_create_bufs); 813 814 int v4l2_m2m_expbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 815 struct v4l2_exportbuffer *eb) 816 { 817 struct vb2_queue *vq; 818 819 vq = v4l2_m2m_get_vq(m2m_ctx, eb->type); 820 return vb2_expbuf(vq, eb); 821 } 822 EXPORT_SYMBOL_GPL(v4l2_m2m_expbuf); 823 824 int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 825 enum v4l2_buf_type type) 826 { 827 struct vb2_queue *vq; 828 int ret; 829 830 vq = v4l2_m2m_get_vq(m2m_ctx, type); 831 ret = vb2_streamon(vq, type); 832 if (!ret) 833 v4l2_m2m_try_schedule(m2m_ctx); 834 835 return ret; 836 } 837 EXPORT_SYMBOL_GPL(v4l2_m2m_streamon); 838 839 int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 840 enum v4l2_buf_type type) 841 { 842 struct v4l2_m2m_dev *m2m_dev; 843 struct v4l2_m2m_queue_ctx *q_ctx; 844 unsigned long flags_job, flags; 845 int ret; 846 847 /* wait until the current context is dequeued from job_queue */ 848 v4l2_m2m_cancel_job(m2m_ctx); 849 850 q_ctx = get_queue_ctx(m2m_ctx, type); 851 ret = vb2_streamoff(&q_ctx->q, type); 852 if (ret) 853 return ret; 854 855 m2m_dev = m2m_ctx->m2m_dev; 856 spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job); 857 /* We should not be scheduled anymore, since we're dropping a queue. */ 858 if (m2m_ctx->job_flags & TRANS_QUEUED) 859 list_del(&m2m_ctx->queue); 860 m2m_ctx->job_flags = 0; 861 862 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); 863 /* Drop queue, since streamoff returns device to the same state as after 864 * calling reqbufs. */ 865 INIT_LIST_HEAD(&q_ctx->rdy_queue); 866 q_ctx->num_rdy = 0; 867 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); 868 869 if (m2m_dev->curr_ctx == m2m_ctx) { 870 m2m_dev->curr_ctx = NULL; 871 wake_up(&m2m_ctx->finished); 872 } 873 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); 874 875 return 0; 876 } 877 EXPORT_SYMBOL_GPL(v4l2_m2m_streamoff); 878 879 static __poll_t v4l2_m2m_poll_for_data(struct file *file, 880 struct v4l2_m2m_ctx *m2m_ctx, 881 struct poll_table_struct *wait) 882 { 883 struct vb2_queue *src_q, *dst_q; 884 __poll_t rc = 0; 885 unsigned long flags; 886 887 src_q = v4l2_m2m_get_src_vq(m2m_ctx); 888 dst_q = v4l2_m2m_get_dst_vq(m2m_ctx); 889 890 /* 891 * There has to be at least one buffer queued on each queued_list, which 892 * means either in driver already or waiting for driver to claim it 893 * and start processing. 894 */ 895 if ((!src_q->streaming || src_q->error || 896 list_empty(&src_q->queued_list)) && 897 (!dst_q->streaming || dst_q->error || 898 list_empty(&dst_q->queued_list))) 899 return EPOLLERR; 900 901 spin_lock_irqsave(&src_q->done_lock, flags); 902 if (!list_empty(&src_q->done_list)) 903 rc |= EPOLLOUT | EPOLLWRNORM; 904 spin_unlock_irqrestore(&src_q->done_lock, flags); 905 906 spin_lock_irqsave(&dst_q->done_lock, flags); 907 /* 908 * If the last buffer was dequeued from the capture queue, signal 909 * userspace. DQBUF(CAPTURE) will return -EPIPE. 910 */ 911 if (!list_empty(&dst_q->done_list) || dst_q->last_buffer_dequeued) 912 rc |= EPOLLIN | EPOLLRDNORM; 913 spin_unlock_irqrestore(&dst_q->done_lock, flags); 914 915 return rc; 916 } 917 918 __poll_t v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 919 struct poll_table_struct *wait) 920 { 921 struct video_device *vfd = video_devdata(file); 922 struct vb2_queue *src_q = v4l2_m2m_get_src_vq(m2m_ctx); 923 struct vb2_queue *dst_q = v4l2_m2m_get_dst_vq(m2m_ctx); 924 __poll_t req_events = poll_requested_events(wait); 925 __poll_t rc = 0; 926 927 /* 928 * poll_wait() MUST be called on the first invocation on all the 929 * potential queues of interest, even if we are not interested in their 930 * events during this first call. Failure to do so will result in 931 * queue's events to be ignored because the poll_table won't be capable 932 * of adding new wait queues thereafter. 933 */ 934 poll_wait(file, &src_q->done_wq, wait); 935 poll_wait(file, &dst_q->done_wq, wait); 936 937 if (req_events & (EPOLLOUT | EPOLLWRNORM | EPOLLIN | EPOLLRDNORM)) 938 rc = v4l2_m2m_poll_for_data(file, m2m_ctx, wait); 939 940 if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) { 941 struct v4l2_fh *fh = file->private_data; 942 943 poll_wait(file, &fh->wait, wait); 944 if (v4l2_event_pending(fh)) 945 rc |= EPOLLPRI; 946 } 947 948 return rc; 949 } 950 EXPORT_SYMBOL_GPL(v4l2_m2m_poll); 951 952 int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 953 struct vm_area_struct *vma) 954 { 955 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; 956 struct vb2_queue *vq; 957 958 if (offset < DST_QUEUE_OFF_BASE) { 959 vq = v4l2_m2m_get_src_vq(m2m_ctx); 960 } else { 961 vq = v4l2_m2m_get_dst_vq(m2m_ctx); 962 vma->vm_pgoff -= (DST_QUEUE_OFF_BASE >> PAGE_SHIFT); 963 } 964 965 return vb2_mmap(vq, vma); 966 } 967 EXPORT_SYMBOL(v4l2_m2m_mmap); 968 969 #ifndef CONFIG_MMU 970 unsigned long v4l2_m2m_get_unmapped_area(struct file *file, unsigned long addr, 971 unsigned long len, unsigned long pgoff, 972 unsigned long flags) 973 { 974 struct v4l2_fh *fh = file->private_data; 975 unsigned long offset = pgoff << PAGE_SHIFT; 976 struct vb2_queue *vq; 977 978 if (offset < DST_QUEUE_OFF_BASE) { 979 vq = v4l2_m2m_get_src_vq(fh->m2m_ctx); 980 } else { 981 vq = v4l2_m2m_get_dst_vq(fh->m2m_ctx); 982 pgoff -= (DST_QUEUE_OFF_BASE >> PAGE_SHIFT); 983 } 984 985 return vb2_get_unmapped_area(vq, addr, len, pgoff, flags); 986 } 987 EXPORT_SYMBOL_GPL(v4l2_m2m_get_unmapped_area); 988 #endif 989 990 #if defined(CONFIG_MEDIA_CONTROLLER) 991 void v4l2_m2m_unregister_media_controller(struct v4l2_m2m_dev *m2m_dev) 992 { 993 media_remove_intf_links(&m2m_dev->intf_devnode->intf); 994 media_devnode_remove(m2m_dev->intf_devnode); 995 996 media_entity_remove_links(m2m_dev->source); 997 media_entity_remove_links(&m2m_dev->sink); 998 media_entity_remove_links(&m2m_dev->proc); 999 media_device_unregister_entity(m2m_dev->source); 1000 media_device_unregister_entity(&m2m_dev->sink); 1001 media_device_unregister_entity(&m2m_dev->proc); 1002 kfree(m2m_dev->source->name); 1003 kfree(m2m_dev->sink.name); 1004 kfree(m2m_dev->proc.name); 1005 } 1006 EXPORT_SYMBOL_GPL(v4l2_m2m_unregister_media_controller); 1007 1008 static int v4l2_m2m_register_entity(struct media_device *mdev, 1009 struct v4l2_m2m_dev *m2m_dev, enum v4l2_m2m_entity_type type, 1010 struct video_device *vdev, int function) 1011 { 1012 struct media_entity *entity; 1013 struct media_pad *pads; 1014 char *name; 1015 unsigned int len; 1016 int num_pads; 1017 int ret; 1018 1019 switch (type) { 1020 case MEM2MEM_ENT_TYPE_SOURCE: 1021 entity = m2m_dev->source; 1022 pads = &m2m_dev->source_pad; 1023 pads[0].flags = MEDIA_PAD_FL_SOURCE; 1024 num_pads = 1; 1025 break; 1026 case MEM2MEM_ENT_TYPE_SINK: 1027 entity = &m2m_dev->sink; 1028 pads = &m2m_dev->sink_pad; 1029 pads[0].flags = MEDIA_PAD_FL_SINK; 1030 num_pads = 1; 1031 break; 1032 case MEM2MEM_ENT_TYPE_PROC: 1033 entity = &m2m_dev->proc; 1034 pads = m2m_dev->proc_pads; 1035 pads[0].flags = MEDIA_PAD_FL_SINK; 1036 pads[1].flags = MEDIA_PAD_FL_SOURCE; 1037 num_pads = 2; 1038 break; 1039 default: 1040 return -EINVAL; 1041 } 1042 1043 entity->obj_type = MEDIA_ENTITY_TYPE_BASE; 1044 if (type != MEM2MEM_ENT_TYPE_PROC) { 1045 entity->info.dev.major = VIDEO_MAJOR; 1046 entity->info.dev.minor = vdev->minor; 1047 } 1048 len = strlen(vdev->name) + 2 + strlen(m2m_entity_name[type]); 1049 name = kmalloc(len, GFP_KERNEL); 1050 if (!name) 1051 return -ENOMEM; 1052 snprintf(name, len, "%s-%s", vdev->name, m2m_entity_name[type]); 1053 entity->name = name; 1054 entity->function = function; 1055 1056 ret = media_entity_pads_init(entity, num_pads, pads); 1057 if (ret) 1058 return ret; 1059 ret = media_device_register_entity(mdev, entity); 1060 if (ret) 1061 return ret; 1062 1063 return 0; 1064 } 1065 1066 int v4l2_m2m_register_media_controller(struct v4l2_m2m_dev *m2m_dev, 1067 struct video_device *vdev, int function) 1068 { 1069 struct media_device *mdev = vdev->v4l2_dev->mdev; 1070 struct media_link *link; 1071 int ret; 1072 1073 if (!mdev) 1074 return 0; 1075 1076 /* A memory-to-memory device consists in two 1077 * DMA engine and one video processing entities. 1078 * The DMA engine entities are linked to a V4L interface 1079 */ 1080 1081 /* Create the three entities with their pads */ 1082 m2m_dev->source = &vdev->entity; 1083 ret = v4l2_m2m_register_entity(mdev, m2m_dev, 1084 MEM2MEM_ENT_TYPE_SOURCE, vdev, MEDIA_ENT_F_IO_V4L); 1085 if (ret) 1086 return ret; 1087 ret = v4l2_m2m_register_entity(mdev, m2m_dev, 1088 MEM2MEM_ENT_TYPE_PROC, vdev, function); 1089 if (ret) 1090 goto err_rel_entity0; 1091 ret = v4l2_m2m_register_entity(mdev, m2m_dev, 1092 MEM2MEM_ENT_TYPE_SINK, vdev, MEDIA_ENT_F_IO_V4L); 1093 if (ret) 1094 goto err_rel_entity1; 1095 1096 /* Connect the three entities */ 1097 ret = media_create_pad_link(m2m_dev->source, 0, &m2m_dev->proc, 0, 1098 MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED); 1099 if (ret) 1100 goto err_rel_entity2; 1101 1102 ret = media_create_pad_link(&m2m_dev->proc, 1, &m2m_dev->sink, 0, 1103 MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED); 1104 if (ret) 1105 goto err_rm_links0; 1106 1107 /* Create video interface */ 1108 m2m_dev->intf_devnode = media_devnode_create(mdev, 1109 MEDIA_INTF_T_V4L_VIDEO, 0, 1110 VIDEO_MAJOR, vdev->minor); 1111 if (!m2m_dev->intf_devnode) { 1112 ret = -ENOMEM; 1113 goto err_rm_links1; 1114 } 1115 1116 /* Connect the two DMA engines to the interface */ 1117 link = media_create_intf_link(m2m_dev->source, 1118 &m2m_dev->intf_devnode->intf, 1119 MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED); 1120 if (!link) { 1121 ret = -ENOMEM; 1122 goto err_rm_devnode; 1123 } 1124 1125 link = media_create_intf_link(&m2m_dev->sink, 1126 &m2m_dev->intf_devnode->intf, 1127 MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED); 1128 if (!link) { 1129 ret = -ENOMEM; 1130 goto err_rm_intf_link; 1131 } 1132 return 0; 1133 1134 err_rm_intf_link: 1135 media_remove_intf_links(&m2m_dev->intf_devnode->intf); 1136 err_rm_devnode: 1137 media_devnode_remove(m2m_dev->intf_devnode); 1138 err_rm_links1: 1139 media_entity_remove_links(&m2m_dev->sink); 1140 err_rm_links0: 1141 media_entity_remove_links(&m2m_dev->proc); 1142 media_entity_remove_links(m2m_dev->source); 1143 err_rel_entity2: 1144 media_device_unregister_entity(&m2m_dev->proc); 1145 kfree(m2m_dev->proc.name); 1146 err_rel_entity1: 1147 media_device_unregister_entity(&m2m_dev->sink); 1148 kfree(m2m_dev->sink.name); 1149 err_rel_entity0: 1150 media_device_unregister_entity(m2m_dev->source); 1151 kfree(m2m_dev->source->name); 1152 return ret; 1153 return 0; 1154 } 1155 EXPORT_SYMBOL_GPL(v4l2_m2m_register_media_controller); 1156 #endif 1157 1158 struct v4l2_m2m_dev *v4l2_m2m_init(const struct v4l2_m2m_ops *m2m_ops) 1159 { 1160 struct v4l2_m2m_dev *m2m_dev; 1161 1162 if (!m2m_ops || WARN_ON(!m2m_ops->device_run)) 1163 return ERR_PTR(-EINVAL); 1164 1165 m2m_dev = kzalloc(sizeof *m2m_dev, GFP_KERNEL); 1166 if (!m2m_dev) 1167 return ERR_PTR(-ENOMEM); 1168 1169 m2m_dev->curr_ctx = NULL; 1170 m2m_dev->m2m_ops = m2m_ops; 1171 INIT_LIST_HEAD(&m2m_dev->job_queue); 1172 spin_lock_init(&m2m_dev->job_spinlock); 1173 INIT_WORK(&m2m_dev->job_work, v4l2_m2m_device_run_work); 1174 1175 return m2m_dev; 1176 } 1177 EXPORT_SYMBOL_GPL(v4l2_m2m_init); 1178 1179 void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev) 1180 { 1181 kfree(m2m_dev); 1182 } 1183 EXPORT_SYMBOL_GPL(v4l2_m2m_release); 1184 1185 struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev, 1186 void *drv_priv, 1187 int (*queue_init)(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq)) 1188 { 1189 struct v4l2_m2m_ctx *m2m_ctx; 1190 struct v4l2_m2m_queue_ctx *out_q_ctx, *cap_q_ctx; 1191 int ret; 1192 1193 m2m_ctx = kzalloc(sizeof *m2m_ctx, GFP_KERNEL); 1194 if (!m2m_ctx) 1195 return ERR_PTR(-ENOMEM); 1196 1197 m2m_ctx->priv = drv_priv; 1198 m2m_ctx->m2m_dev = m2m_dev; 1199 init_waitqueue_head(&m2m_ctx->finished); 1200 1201 out_q_ctx = &m2m_ctx->out_q_ctx; 1202 cap_q_ctx = &m2m_ctx->cap_q_ctx; 1203 1204 INIT_LIST_HEAD(&out_q_ctx->rdy_queue); 1205 INIT_LIST_HEAD(&cap_q_ctx->rdy_queue); 1206 spin_lock_init(&out_q_ctx->rdy_spinlock); 1207 spin_lock_init(&cap_q_ctx->rdy_spinlock); 1208 1209 INIT_LIST_HEAD(&m2m_ctx->queue); 1210 1211 ret = queue_init(drv_priv, &out_q_ctx->q, &cap_q_ctx->q); 1212 1213 if (ret) 1214 goto err; 1215 /* 1216 * Both queues should use same the mutex to lock the m2m context. 1217 * This lock is used in some v4l2_m2m_* helpers. 1218 */ 1219 if (WARN_ON(out_q_ctx->q.lock != cap_q_ctx->q.lock)) { 1220 ret = -EINVAL; 1221 goto err; 1222 } 1223 m2m_ctx->q_lock = out_q_ctx->q.lock; 1224 1225 return m2m_ctx; 1226 err: 1227 kfree(m2m_ctx); 1228 return ERR_PTR(ret); 1229 } 1230 EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_init); 1231 1232 void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx) 1233 { 1234 /* wait until the current context is dequeued from job_queue */ 1235 v4l2_m2m_cancel_job(m2m_ctx); 1236 1237 vb2_queue_release(&m2m_ctx->cap_q_ctx.q); 1238 vb2_queue_release(&m2m_ctx->out_q_ctx.q); 1239 1240 kfree(m2m_ctx); 1241 } 1242 EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_release); 1243 1244 void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, 1245 struct vb2_v4l2_buffer *vbuf) 1246 { 1247 struct v4l2_m2m_buffer *b = container_of(vbuf, 1248 struct v4l2_m2m_buffer, vb); 1249 struct v4l2_m2m_queue_ctx *q_ctx; 1250 unsigned long flags; 1251 1252 q_ctx = get_queue_ctx(m2m_ctx, vbuf->vb2_buf.vb2_queue->type); 1253 if (!q_ctx) 1254 return; 1255 1256 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); 1257 list_add_tail(&b->list, &q_ctx->rdy_queue); 1258 q_ctx->num_rdy++; 1259 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); 1260 } 1261 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_queue); 1262 1263 void v4l2_m2m_buf_copy_metadata(const struct vb2_v4l2_buffer *out_vb, 1264 struct vb2_v4l2_buffer *cap_vb, 1265 bool copy_frame_flags) 1266 { 1267 u32 mask = V4L2_BUF_FLAG_TIMECODE | V4L2_BUF_FLAG_TSTAMP_SRC_MASK; 1268 1269 if (copy_frame_flags) 1270 mask |= V4L2_BUF_FLAG_KEYFRAME | V4L2_BUF_FLAG_PFRAME | 1271 V4L2_BUF_FLAG_BFRAME; 1272 1273 cap_vb->vb2_buf.timestamp = out_vb->vb2_buf.timestamp; 1274 1275 if (out_vb->flags & V4L2_BUF_FLAG_TIMECODE) 1276 cap_vb->timecode = out_vb->timecode; 1277 cap_vb->field = out_vb->field; 1278 cap_vb->flags &= ~mask; 1279 cap_vb->flags |= out_vb->flags & mask; 1280 cap_vb->vb2_buf.copied_timestamp = 1; 1281 } 1282 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_copy_metadata); 1283 1284 void v4l2_m2m_request_queue(struct media_request *req) 1285 { 1286 struct media_request_object *obj, *obj_safe; 1287 struct v4l2_m2m_ctx *m2m_ctx = NULL; 1288 1289 /* 1290 * Queue all objects. Note that buffer objects are at the end of the 1291 * objects list, after all other object types. Once buffer objects 1292 * are queued, the driver might delete them immediately (if the driver 1293 * processes the buffer at once), so we have to use 1294 * list_for_each_entry_safe() to handle the case where the object we 1295 * queue is deleted. 1296 */ 1297 list_for_each_entry_safe(obj, obj_safe, &req->objects, list) { 1298 struct v4l2_m2m_ctx *m2m_ctx_obj; 1299 struct vb2_buffer *vb; 1300 1301 if (!obj->ops->queue) 1302 continue; 1303 1304 if (vb2_request_object_is_buffer(obj)) { 1305 /* Sanity checks */ 1306 vb = container_of(obj, struct vb2_buffer, req_obj); 1307 WARN_ON(!V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)); 1308 m2m_ctx_obj = container_of(vb->vb2_queue, 1309 struct v4l2_m2m_ctx, 1310 out_q_ctx.q); 1311 WARN_ON(m2m_ctx && m2m_ctx_obj != m2m_ctx); 1312 m2m_ctx = m2m_ctx_obj; 1313 } 1314 1315 /* 1316 * The buffer we queue here can in theory be immediately 1317 * unbound, hence the use of list_for_each_entry_safe() 1318 * above and why we call the queue op last. 1319 */ 1320 obj->ops->queue(obj); 1321 } 1322 1323 WARN_ON(!m2m_ctx); 1324 1325 if (m2m_ctx) 1326 v4l2_m2m_try_schedule(m2m_ctx); 1327 } 1328 EXPORT_SYMBOL_GPL(v4l2_m2m_request_queue); 1329 1330 /* Videobuf2 ioctl helpers */ 1331 1332 int v4l2_m2m_ioctl_reqbufs(struct file *file, void *priv, 1333 struct v4l2_requestbuffers *rb) 1334 { 1335 struct v4l2_fh *fh = file->private_data; 1336 1337 return v4l2_m2m_reqbufs(file, fh->m2m_ctx, rb); 1338 } 1339 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_reqbufs); 1340 1341 int v4l2_m2m_ioctl_create_bufs(struct file *file, void *priv, 1342 struct v4l2_create_buffers *create) 1343 { 1344 struct v4l2_fh *fh = file->private_data; 1345 1346 return v4l2_m2m_create_bufs(file, fh->m2m_ctx, create); 1347 } 1348 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_create_bufs); 1349 1350 int v4l2_m2m_ioctl_querybuf(struct file *file, void *priv, 1351 struct v4l2_buffer *buf) 1352 { 1353 struct v4l2_fh *fh = file->private_data; 1354 1355 return v4l2_m2m_querybuf(file, fh->m2m_ctx, buf); 1356 } 1357 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_querybuf); 1358 1359 int v4l2_m2m_ioctl_qbuf(struct file *file, void *priv, 1360 struct v4l2_buffer *buf) 1361 { 1362 struct v4l2_fh *fh = file->private_data; 1363 1364 return v4l2_m2m_qbuf(file, fh->m2m_ctx, buf); 1365 } 1366 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_qbuf); 1367 1368 int v4l2_m2m_ioctl_dqbuf(struct file *file, void *priv, 1369 struct v4l2_buffer *buf) 1370 { 1371 struct v4l2_fh *fh = file->private_data; 1372 1373 return v4l2_m2m_dqbuf(file, fh->m2m_ctx, buf); 1374 } 1375 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_dqbuf); 1376 1377 int v4l2_m2m_ioctl_prepare_buf(struct file *file, void *priv, 1378 struct v4l2_buffer *buf) 1379 { 1380 struct v4l2_fh *fh = file->private_data; 1381 1382 return v4l2_m2m_prepare_buf(file, fh->m2m_ctx, buf); 1383 } 1384 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_prepare_buf); 1385 1386 int v4l2_m2m_ioctl_expbuf(struct file *file, void *priv, 1387 struct v4l2_exportbuffer *eb) 1388 { 1389 struct v4l2_fh *fh = file->private_data; 1390 1391 return v4l2_m2m_expbuf(file, fh->m2m_ctx, eb); 1392 } 1393 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_expbuf); 1394 1395 int v4l2_m2m_ioctl_streamon(struct file *file, void *priv, 1396 enum v4l2_buf_type type) 1397 { 1398 struct v4l2_fh *fh = file->private_data; 1399 1400 return v4l2_m2m_streamon(file, fh->m2m_ctx, type); 1401 } 1402 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamon); 1403 1404 int v4l2_m2m_ioctl_streamoff(struct file *file, void *priv, 1405 enum v4l2_buf_type type) 1406 { 1407 struct v4l2_fh *fh = file->private_data; 1408 1409 return v4l2_m2m_streamoff(file, fh->m2m_ctx, type); 1410 } 1411 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamoff); 1412 1413 int v4l2_m2m_ioctl_try_encoder_cmd(struct file *file, void *fh, 1414 struct v4l2_encoder_cmd *ec) 1415 { 1416 if (ec->cmd != V4L2_ENC_CMD_STOP && ec->cmd != V4L2_ENC_CMD_START) 1417 return -EINVAL; 1418 1419 ec->flags = 0; 1420 return 0; 1421 } 1422 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_try_encoder_cmd); 1423 1424 int v4l2_m2m_ioctl_try_decoder_cmd(struct file *file, void *fh, 1425 struct v4l2_decoder_cmd *dc) 1426 { 1427 if (dc->cmd != V4L2_DEC_CMD_STOP && dc->cmd != V4L2_DEC_CMD_START) 1428 return -EINVAL; 1429 1430 dc->flags = 0; 1431 1432 if (dc->cmd == V4L2_DEC_CMD_STOP) { 1433 dc->stop.pts = 0; 1434 } else if (dc->cmd == V4L2_DEC_CMD_START) { 1435 dc->start.speed = 0; 1436 dc->start.format = V4L2_DEC_START_FMT_NONE; 1437 } 1438 return 0; 1439 } 1440 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_try_decoder_cmd); 1441 1442 /* 1443 * Updates the encoding state on ENC_CMD_STOP/ENC_CMD_START 1444 * Should be called from the encoder driver encoder_cmd() callback 1445 */ 1446 int v4l2_m2m_encoder_cmd(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 1447 struct v4l2_encoder_cmd *ec) 1448 { 1449 if (ec->cmd != V4L2_ENC_CMD_STOP && ec->cmd != V4L2_ENC_CMD_START) 1450 return -EINVAL; 1451 1452 if (ec->cmd == V4L2_ENC_CMD_STOP) 1453 return v4l2_update_last_buf_state(m2m_ctx); 1454 1455 if (m2m_ctx->is_draining) 1456 return -EBUSY; 1457 1458 if (m2m_ctx->has_stopped) 1459 m2m_ctx->has_stopped = false; 1460 1461 return 0; 1462 } 1463 EXPORT_SYMBOL_GPL(v4l2_m2m_encoder_cmd); 1464 1465 /* 1466 * Updates the decoding state on DEC_CMD_STOP/DEC_CMD_START 1467 * Should be called from the decoder driver decoder_cmd() callback 1468 */ 1469 int v4l2_m2m_decoder_cmd(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 1470 struct v4l2_decoder_cmd *dc) 1471 { 1472 if (dc->cmd != V4L2_DEC_CMD_STOP && dc->cmd != V4L2_DEC_CMD_START) 1473 return -EINVAL; 1474 1475 if (dc->cmd == V4L2_DEC_CMD_STOP) 1476 return v4l2_update_last_buf_state(m2m_ctx); 1477 1478 if (m2m_ctx->is_draining) 1479 return -EBUSY; 1480 1481 if (m2m_ctx->has_stopped) 1482 m2m_ctx->has_stopped = false; 1483 1484 return 0; 1485 } 1486 EXPORT_SYMBOL_GPL(v4l2_m2m_decoder_cmd); 1487 1488 int v4l2_m2m_ioctl_encoder_cmd(struct file *file, void *priv, 1489 struct v4l2_encoder_cmd *ec) 1490 { 1491 struct v4l2_fh *fh = file->private_data; 1492 1493 return v4l2_m2m_encoder_cmd(file, fh->m2m_ctx, ec); 1494 } 1495 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_encoder_cmd); 1496 1497 int v4l2_m2m_ioctl_decoder_cmd(struct file *file, void *priv, 1498 struct v4l2_decoder_cmd *dc) 1499 { 1500 struct v4l2_fh *fh = file->private_data; 1501 1502 return v4l2_m2m_decoder_cmd(file, fh->m2m_ctx, dc); 1503 } 1504 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_decoder_cmd); 1505 1506 int v4l2_m2m_ioctl_stateless_try_decoder_cmd(struct file *file, void *fh, 1507 struct v4l2_decoder_cmd *dc) 1508 { 1509 if (dc->cmd != V4L2_DEC_CMD_FLUSH) 1510 return -EINVAL; 1511 1512 dc->flags = 0; 1513 1514 return 0; 1515 } 1516 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_stateless_try_decoder_cmd); 1517 1518 int v4l2_m2m_ioctl_stateless_decoder_cmd(struct file *file, void *priv, 1519 struct v4l2_decoder_cmd *dc) 1520 { 1521 struct v4l2_fh *fh = file->private_data; 1522 struct vb2_v4l2_buffer *out_vb, *cap_vb; 1523 struct v4l2_m2m_dev *m2m_dev = fh->m2m_ctx->m2m_dev; 1524 unsigned long flags; 1525 int ret; 1526 1527 ret = v4l2_m2m_ioctl_stateless_try_decoder_cmd(file, priv, dc); 1528 if (ret < 0) 1529 return ret; 1530 1531 spin_lock_irqsave(&m2m_dev->job_spinlock, flags); 1532 out_vb = v4l2_m2m_last_src_buf(fh->m2m_ctx); 1533 cap_vb = v4l2_m2m_last_dst_buf(fh->m2m_ctx); 1534 1535 /* 1536 * If there is an out buffer pending, then clear any HOLD flag. 1537 * 1538 * By clearing this flag we ensure that when this output 1539 * buffer is processed any held capture buffer will be released. 1540 */ 1541 if (out_vb) { 1542 out_vb->flags &= ~V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF; 1543 } else if (cap_vb && cap_vb->is_held) { 1544 /* 1545 * If there were no output buffers, but there is a 1546 * capture buffer that is held, then release that 1547 * buffer. 1548 */ 1549 cap_vb->is_held = false; 1550 v4l2_m2m_dst_buf_remove(fh->m2m_ctx); 1551 v4l2_m2m_buf_done(cap_vb, VB2_BUF_STATE_DONE); 1552 } 1553 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); 1554 1555 return 0; 1556 } 1557 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_stateless_decoder_cmd); 1558 1559 /* 1560 * v4l2_file_operations helpers. It is assumed here same lock is used 1561 * for the output and the capture buffer queue. 1562 */ 1563 1564 int v4l2_m2m_fop_mmap(struct file *file, struct vm_area_struct *vma) 1565 { 1566 struct v4l2_fh *fh = file->private_data; 1567 1568 return v4l2_m2m_mmap(file, fh->m2m_ctx, vma); 1569 } 1570 EXPORT_SYMBOL_GPL(v4l2_m2m_fop_mmap); 1571 1572 __poll_t v4l2_m2m_fop_poll(struct file *file, poll_table *wait) 1573 { 1574 struct v4l2_fh *fh = file->private_data; 1575 struct v4l2_m2m_ctx *m2m_ctx = fh->m2m_ctx; 1576 __poll_t ret; 1577 1578 if (m2m_ctx->q_lock) 1579 mutex_lock(m2m_ctx->q_lock); 1580 1581 ret = v4l2_m2m_poll(file, m2m_ctx, wait); 1582 1583 if (m2m_ctx->q_lock) 1584 mutex_unlock(m2m_ctx->q_lock); 1585 1586 return ret; 1587 } 1588 EXPORT_SYMBOL_GPL(v4l2_m2m_fop_poll); 1589 1590