Lines Matching +full:wait +full:- +full:queue

1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Memory-to-memory device framework for Video for Linux 2 and vb2.
8 * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
16 #include <media/media-device.h>
17 #include <media/videobuf2-v4l2.h>
18 #include <media/v4l2-mem2mem.h>
19 #include <media/v4l2-dev.h>
20 #include <media/v4l2-device.h>
21 #include <media/v4l2-fh.h>
22 #include <media/v4l2-event.h>
46 /* The job queue is not running new jobs */
50 /* Offset base for buffers on the destination queue - used to distinguish
51 * between source and destination buffers when mmapping - they receive the same
68 * struct v4l2_m2m_dev - per-device context
91 * @job_queue_flags: flags of the queue status, %QUEUE_PAUSED.
118 return &m2m_ctx->out_q_ctx; in get_queue_ctx()
120 return &m2m_ctx->cap_q_ctx; in get_queue_ctx()
132 return &q_ctx->q; in v4l2_m2m_get_vq()
141 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); in v4l2_m2m_next_buf()
143 if (list_empty(&q_ctx->rdy_queue)) { in v4l2_m2m_next_buf()
144 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); in v4l2_m2m_next_buf()
148 b = list_first_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list); in v4l2_m2m_next_buf()
149 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); in v4l2_m2m_next_buf()
150 return &b->vb; in v4l2_m2m_next_buf()
159 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); in v4l2_m2m_last_buf()
161 if (list_empty(&q_ctx->rdy_queue)) { in v4l2_m2m_last_buf()
162 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); in v4l2_m2m_last_buf()
166 b = list_last_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list); in v4l2_m2m_last_buf()
167 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); in v4l2_m2m_last_buf()
168 return &b->vb; in v4l2_m2m_last_buf()
177 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); in v4l2_m2m_buf_remove()
178 if (list_empty(&q_ctx->rdy_queue)) { in v4l2_m2m_buf_remove()
179 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); in v4l2_m2m_buf_remove()
182 b = list_first_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list); in v4l2_m2m_buf_remove()
183 list_del(&b->list); in v4l2_m2m_buf_remove()
184 q_ctx->num_rdy--; in v4l2_m2m_buf_remove()
185 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); in v4l2_m2m_buf_remove()
187 return &b->vb; in v4l2_m2m_buf_remove()
197 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); in v4l2_m2m_buf_remove_by_buf()
199 list_del(&b->list); in v4l2_m2m_buf_remove_by_buf()
200 q_ctx->num_rdy--; in v4l2_m2m_buf_remove_by_buf()
201 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); in v4l2_m2m_buf_remove_by_buf()
213 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); in v4l2_m2m_buf_remove_by_idx()
214 list_for_each_entry_safe(b, tmp, &q_ctx->rdy_queue, list) { in v4l2_m2m_buf_remove_by_idx()
215 if (b->vb.vb2_buf.index == idx) { in v4l2_m2m_buf_remove_by_idx()
216 list_del(&b->list); in v4l2_m2m_buf_remove_by_idx()
217 q_ctx->num_rdy--; in v4l2_m2m_buf_remove_by_idx()
218 ret = &b->vb; in v4l2_m2m_buf_remove_by_idx()
222 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); in v4l2_m2m_buf_remove_by_idx()
237 spin_lock_irqsave(&m2m_dev->job_spinlock, flags); in v4l2_m2m_get_curr_priv()
238 if (m2m_dev->curr_ctx) in v4l2_m2m_get_curr_priv()
239 ret = m2m_dev->curr_ctx->priv; in v4l2_m2m_get_curr_priv()
240 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); in v4l2_m2m_get_curr_priv()
247 * v4l2_m2m_try_run() - select next job to perform and run it if possible
248 * @m2m_dev: per-device context
259 spin_lock_irqsave(&m2m_dev->job_spinlock, flags); in v4l2_m2m_try_run()
260 if (NULL != m2m_dev->curr_ctx) { in v4l2_m2m_try_run()
261 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); in v4l2_m2m_try_run()
266 if (list_empty(&m2m_dev->job_queue)) { in v4l2_m2m_try_run()
267 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); in v4l2_m2m_try_run()
272 if (m2m_dev->job_queue_flags & QUEUE_PAUSED) { in v4l2_m2m_try_run()
273 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); in v4l2_m2m_try_run()
278 m2m_dev->curr_ctx = list_first_entry(&m2m_dev->job_queue, in v4l2_m2m_try_run()
279 struct v4l2_m2m_ctx, queue); in v4l2_m2m_try_run()
280 m2m_dev->curr_ctx->job_flags |= TRANS_RUNNING; in v4l2_m2m_try_run()
281 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); in v4l2_m2m_try_run()
283 dprintk("Running job on m2m_ctx: %p\n", m2m_dev->curr_ctx); in v4l2_m2m_try_run()
284 m2m_dev->m2m_ops->device_run(m2m_dev->curr_ctx->priv); in v4l2_m2m_try_run()
288 * __v4l2_m2m_try_queue() - queue a job
292 * Check if this context is ready to queue a job.
304 if (!m2m_ctx->out_q_ctx.q.streaming || in __v4l2_m2m_try_queue()
305 (!m2m_ctx->cap_q_ctx.q.streaming && !m2m_ctx->ignore_cap_streaming)) { in __v4l2_m2m_try_queue()
306 if (!m2m_ctx->ignore_cap_streaming) in __v4l2_m2m_try_queue()
309 dprintk("Streaming needs to be on for the OUTPUT queue\n"); in __v4l2_m2m_try_queue()
313 spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job); in __v4l2_m2m_try_queue()
316 if (m2m_ctx->job_flags & TRANS_ABORT) { in __v4l2_m2m_try_queue()
321 if (m2m_ctx->job_flags & TRANS_QUEUED) { in __v4l2_m2m_try_queue()
322 dprintk("On job queue already\n"); in __v4l2_m2m_try_queue()
328 if (!src && !m2m_ctx->out_q_ctx.buffered) { in __v4l2_m2m_try_queue()
332 if (!dst && !m2m_ctx->cap_q_ctx.buffered) { in __v4l2_m2m_try_queue()
337 m2m_ctx->new_frame = true; in __v4l2_m2m_try_queue()
339 if (src && dst && dst->is_held && in __v4l2_m2m_try_queue()
340 dst->vb2_buf.copied_timestamp && in __v4l2_m2m_try_queue()
341 dst->vb2_buf.timestamp != src->vb2_buf.timestamp) { in __v4l2_m2m_try_queue()
343 dst->is_held = false; in __v4l2_m2m_try_queue()
348 if (!dst && !m2m_ctx->cap_q_ctx.buffered) { in __v4l2_m2m_try_queue()
354 if (src && dst && (m2m_ctx->out_q_ctx.q.subsystem_flags & in __v4l2_m2m_try_queue()
356 m2m_ctx->new_frame = !dst->vb2_buf.copied_timestamp || in __v4l2_m2m_try_queue()
357 dst->vb2_buf.timestamp != src->vb2_buf.timestamp; in __v4l2_m2m_try_queue()
359 if (m2m_ctx->has_stopped) { in __v4l2_m2m_try_queue()
364 if (m2m_dev->m2m_ops->job_ready in __v4l2_m2m_try_queue()
365 && (!m2m_dev->m2m_ops->job_ready(m2m_ctx->priv))) { in __v4l2_m2m_try_queue()
370 list_add_tail(&m2m_ctx->queue, &m2m_dev->job_queue); in __v4l2_m2m_try_queue()
371 m2m_ctx->job_flags |= TRANS_QUEUED; in __v4l2_m2m_try_queue()
374 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); in __v4l2_m2m_try_queue()
378 * v4l2_m2m_try_schedule() - schedule and possibly run a job for any context
381 * Check if this context is ready to queue a job. If suitable,
391 struct v4l2_m2m_dev *m2m_dev = m2m_ctx->m2m_dev; in v4l2_m2m_try_schedule()
399 * v4l2_m2m_device_run_work() - run pending jobs for the context
411 * v4l2_m2m_cancel_job() - cancel pending jobs for the context
424 m2m_dev = m2m_ctx->m2m_dev; in v4l2_m2m_cancel_job()
425 spin_lock_irqsave(&m2m_dev->job_spinlock, flags); in v4l2_m2m_cancel_job()
427 m2m_ctx->job_flags |= TRANS_ABORT; in v4l2_m2m_cancel_job()
428 if (m2m_ctx->job_flags & TRANS_RUNNING) { in v4l2_m2m_cancel_job()
429 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); in v4l2_m2m_cancel_job()
430 if (m2m_dev->m2m_ops->job_abort) in v4l2_m2m_cancel_job()
431 m2m_dev->m2m_ops->job_abort(m2m_ctx->priv); in v4l2_m2m_cancel_job()
432 dprintk("m2m_ctx %p running, will wait to complete\n", m2m_ctx); in v4l2_m2m_cancel_job()
433 wait_event(m2m_ctx->finished, in v4l2_m2m_cancel_job()
434 !(m2m_ctx->job_flags & TRANS_RUNNING)); in v4l2_m2m_cancel_job()
435 } else if (m2m_ctx->job_flags & TRANS_QUEUED) { in v4l2_m2m_cancel_job()
436 list_del(&m2m_ctx->queue); in v4l2_m2m_cancel_job()
437 m2m_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING); in v4l2_m2m_cancel_job()
438 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); in v4l2_m2m_cancel_job()
439 dprintk("m2m_ctx: %p had been on queue and was removed\n", in v4l2_m2m_cancel_job()
442 /* Do nothing, was not on queue/running */ in v4l2_m2m_cancel_job()
443 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); in v4l2_m2m_cancel_job()
463 * but the job must be run in non-atomic context. in v4l2_m2m_schedule_next_job()
465 schedule_work(&m2m_dev->job_work); in v4l2_m2m_schedule_next_job()
475 if (!m2m_dev->curr_ctx || m2m_dev->curr_ctx != m2m_ctx) { in _v4l2_m2m_job_finish()
480 list_del(&m2m_dev->curr_ctx->queue); in _v4l2_m2m_job_finish()
481 m2m_dev->curr_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING); in _v4l2_m2m_job_finish()
482 wake_up(&m2m_dev->curr_ctx->finished); in _v4l2_m2m_job_finish()
483 m2m_dev->curr_ctx = NULL; in _v4l2_m2m_job_finish()
498 WARN_ON(m2m_ctx->out_q_ctx.q.subsystem_flags & in v4l2_m2m_job_finish()
500 spin_lock_irqsave(&m2m_dev->job_spinlock, flags); in v4l2_m2m_job_finish()
502 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); in v4l2_m2m_job_finish()
517 spin_lock_irqsave(&m2m_dev->job_spinlock, flags); in v4l2_m2m_buf_done_and_job_finish()
523 dst_buf->is_held = src_buf->flags & V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF; in v4l2_m2m_buf_done_and_job_finish()
524 if (!dst_buf->is_held) { in v4l2_m2m_buf_done_and_job_finish()
530 * (src) buffer will wake-up any process waiting on the in v4l2_m2m_buf_done_and_job_finish()
540 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); in v4l2_m2m_buf_done_and_job_finish()
552 spin_lock_irqsave(&m2m_dev->job_spinlock, flags); in v4l2_m2m_suspend()
553 m2m_dev->job_queue_flags |= QUEUE_PAUSED; in v4l2_m2m_suspend()
554 curr_ctx = m2m_dev->curr_ctx; in v4l2_m2m_suspend()
555 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); in v4l2_m2m_suspend()
558 wait_event(curr_ctx->finished, in v4l2_m2m_suspend()
559 !(curr_ctx->job_flags & TRANS_RUNNING)); in v4l2_m2m_suspend()
567 spin_lock_irqsave(&m2m_dev->job_spinlock, flags); in v4l2_m2m_resume()
568 m2m_dev->job_queue_flags &= ~QUEUE_PAUSED; in v4l2_m2m_resume()
569 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); in v4l2_m2m_resume()
581 vq = v4l2_m2m_get_vq(m2m_ctx, reqbufs->type); in v4l2_m2m_reqbufs()
584 is no longer owner of the queue. Otherwise we have an owner. */ in v4l2_m2m_reqbufs()
586 vq->owner = reqbufs->count ? file->private_data : NULL; in v4l2_m2m_reqbufs()
595 /* Adjust MMAP memory offsets for the CAPTURE queue */ in v4l2_m2m_adjust_mem_offset()
596 if (buf->memory == V4L2_MEMORY_MMAP && V4L2_TYPE_IS_CAPTURE(vq->type)) { in v4l2_m2m_adjust_mem_offset()
597 if (V4L2_TYPE_IS_MULTIPLANAR(vq->type)) { in v4l2_m2m_adjust_mem_offset()
600 for (i = 0; i < buf->length; ++i) in v4l2_m2m_adjust_mem_offset()
601 buf->m.planes[i].m.mem_offset in v4l2_m2m_adjust_mem_offset()
604 buf->m.offset += DST_QUEUE_OFF_BASE; in v4l2_m2m_adjust_mem_offset()
615 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); in v4l2_m2m_querybuf()
620 /* Adjust MMAP memory offsets for the CAPTURE queue */ in v4l2_m2m_querybuf()
637 vbuf->flags |= V4L2_BUF_FLAG_LAST; in v4l2_m2m_last_buffer_done()
638 vb2_buffer_done(&vbuf->vb2_buf, VB2_BUF_STATE_DONE); in v4l2_m2m_last_buffer_done()
649 if (m2m_ctx->is_draining) in v4l2_update_last_buf_state()
650 return -EBUSY; in v4l2_update_last_buf_state()
652 if (m2m_ctx->has_stopped) in v4l2_update_last_buf_state()
655 m2m_ctx->last_src_buf = v4l2_m2m_last_src_buf(m2m_ctx); in v4l2_update_last_buf_state()
656 m2m_ctx->is_draining = true; in v4l2_update_last_buf_state()
663 if (m2m_ctx->last_src_buf) in v4l2_update_last_buf_state()
667 * In case the output queue is empty, try to mark the last capture in v4l2_update_last_buf_state()
673 * Wait for the next queued one in encoder/decoder driver in v4l2_update_last_buf_state()
678 m2m_ctx->next_buf_last = true; in v4l2_update_last_buf_state()
695 if (V4L2_TYPE_IS_OUTPUT(q->type)) in v4l2_m2m_update_start_streaming_state()
696 m2m_ctx->last_src_buf = NULL; in v4l2_m2m_update_start_streaming_state()
707 if (V4L2_TYPE_IS_OUTPUT(q->type)) { in v4l2_m2m_update_stop_streaming_state()
715 if (m2m_ctx->is_draining) { in v4l2_m2m_update_stop_streaming_state()
718 m2m_ctx->last_src_buf = NULL; in v4l2_m2m_update_stop_streaming_state()
721 m2m_ctx->next_buf_last = true; in v4l2_m2m_update_stop_streaming_state()
739 if (WARN_ON(q->is_output)) in v4l2_m2m_force_last_buf_done()
741 if (list_empty(&q->queued_list)) in v4l2_m2m_force_last_buf_done()
744 vb = list_first_entry(&q->queued_list, struct vb2_buffer, queued_entry); in v4l2_m2m_force_last_buf_done()
745 for (i = 0; i < vb->num_planes; i++) in v4l2_m2m_force_last_buf_done()
749 * Since the buffer hasn't been queued to the ready queue, in v4l2_m2m_force_last_buf_done()
752 vb->state = VB2_BUF_STATE_ACTIVE; in v4l2_m2m_force_last_buf_done()
753 atomic_inc(&q->owned_by_drv_count); in v4l2_m2m_force_last_buf_done()
756 vbuf->field = V4L2_FIELD_NONE; in v4l2_m2m_force_last_buf_done()
768 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); in v4l2_m2m_qbuf()
769 if (V4L2_TYPE_IS_CAPTURE(vq->type) && in v4l2_m2m_qbuf()
770 (buf->flags & V4L2_BUF_FLAG_REQUEST_FD)) { in v4l2_m2m_qbuf()
773 return -EPERM; in v4l2_m2m_qbuf()
776 ret = vb2_qbuf(vq, vdev->v4l2_dev->mdev, buf); in v4l2_m2m_qbuf()
780 /* Adjust MMAP memory offsets for the CAPTURE queue */ in v4l2_m2m_qbuf()
784 * If the capture queue is streaming, but streaming hasn't started in v4l2_m2m_qbuf()
789 if (V4L2_TYPE_IS_CAPTURE(vq->type) && in v4l2_m2m_qbuf()
793 else if (!(buf->flags & V4L2_BUF_FLAG_IN_REQUEST)) in v4l2_m2m_qbuf()
806 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); in v4l2_m2m_dqbuf()
807 ret = vb2_dqbuf(vq, buf, file->f_flags & O_NONBLOCK); in v4l2_m2m_dqbuf()
811 /* Adjust MMAP memory offsets for the CAPTURE queue */ in v4l2_m2m_dqbuf()
825 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); in v4l2_m2m_prepare_buf()
826 ret = vb2_prepare_buf(vq, vdev->v4l2_dev->mdev, buf); in v4l2_m2m_prepare_buf()
830 /* Adjust MMAP memory offsets for the CAPTURE queue */ in v4l2_m2m_prepare_buf()
842 vq = v4l2_m2m_get_vq(m2m_ctx, create->format.type); in v4l2_m2m_create_bufs()
852 vq = v4l2_m2m_get_vq(m2m_ctx, eb->type); in v4l2_m2m_expbuf()
880 /* wait until the current context is dequeued from job_queue */ in v4l2_m2m_streamoff()
884 ret = vb2_streamoff(&q_ctx->q, type); in v4l2_m2m_streamoff()
888 m2m_dev = m2m_ctx->m2m_dev; in v4l2_m2m_streamoff()
889 spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job); in v4l2_m2m_streamoff()
890 /* We should not be scheduled anymore, since we're dropping a queue. */ in v4l2_m2m_streamoff()
891 if (m2m_ctx->job_flags & TRANS_QUEUED) in v4l2_m2m_streamoff()
892 list_del(&m2m_ctx->queue); in v4l2_m2m_streamoff()
893 m2m_ctx->job_flags = 0; in v4l2_m2m_streamoff()
895 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); in v4l2_m2m_streamoff()
896 /* Drop queue, since streamoff returns device to the same state as after in v4l2_m2m_streamoff()
898 INIT_LIST_HEAD(&q_ctx->rdy_queue); in v4l2_m2m_streamoff()
899 q_ctx->num_rdy = 0; in v4l2_m2m_streamoff()
900 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); in v4l2_m2m_streamoff()
902 if (m2m_dev->curr_ctx == m2m_ctx) { in v4l2_m2m_streamoff()
903 m2m_dev->curr_ctx = NULL; in v4l2_m2m_streamoff()
904 wake_up(&m2m_ctx->finished); in v4l2_m2m_streamoff()
906 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); in v4l2_m2m_streamoff()
914 struct poll_table_struct *wait) in v4l2_m2m_poll_for_data() argument
928 if ((!vb2_is_streaming(src_q) || src_q->error || in v4l2_m2m_poll_for_data()
929 list_empty(&src_q->queued_list)) && in v4l2_m2m_poll_for_data()
930 (!vb2_is_streaming(dst_q) || dst_q->error || in v4l2_m2m_poll_for_data()
931 (list_empty(&dst_q->queued_list) && !dst_q->last_buffer_dequeued))) in v4l2_m2m_poll_for_data()
934 spin_lock_irqsave(&src_q->done_lock, flags); in v4l2_m2m_poll_for_data()
935 if (!list_empty(&src_q->done_list)) in v4l2_m2m_poll_for_data()
937 spin_unlock_irqrestore(&src_q->done_lock, flags); in v4l2_m2m_poll_for_data()
939 spin_lock_irqsave(&dst_q->done_lock, flags); in v4l2_m2m_poll_for_data()
941 * If the last buffer was dequeued from the capture queue, signal in v4l2_m2m_poll_for_data()
942 * userspace. DQBUF(CAPTURE) will return -EPIPE. in v4l2_m2m_poll_for_data()
944 if (!list_empty(&dst_q->done_list) || dst_q->last_buffer_dequeued) in v4l2_m2m_poll_for_data()
946 spin_unlock_irqrestore(&dst_q->done_lock, flags); in v4l2_m2m_poll_for_data()
952 struct poll_table_struct *wait) in v4l2_m2m_poll() argument
957 __poll_t req_events = poll_requested_events(wait); in v4l2_m2m_poll()
964 * queue's events to be ignored because the poll_table won't be capable in v4l2_m2m_poll()
965 * of adding new wait queues thereafter. in v4l2_m2m_poll()
967 poll_wait(file, &src_q->done_wq, wait); in v4l2_m2m_poll()
968 poll_wait(file, &dst_q->done_wq, wait); in v4l2_m2m_poll()
971 rc = v4l2_m2m_poll_for_data(file, m2m_ctx, wait); in v4l2_m2m_poll()
973 if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) { in v4l2_m2m_poll()
974 struct v4l2_fh *fh = file->private_data; in v4l2_m2m_poll()
976 poll_wait(file, &fh->wait, wait); in v4l2_m2m_poll()
988 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; in v4l2_m2m_mmap()
995 vma->vm_pgoff -= (DST_QUEUE_OFF_BASE >> PAGE_SHIFT); in v4l2_m2m_mmap()
1007 struct v4l2_fh *fh = file->private_data; in v4l2_m2m_get_unmapped_area()
1012 vq = v4l2_m2m_get_src_vq(fh->m2m_ctx); in v4l2_m2m_get_unmapped_area()
1014 vq = v4l2_m2m_get_dst_vq(fh->m2m_ctx); in v4l2_m2m_get_unmapped_area()
1015 pgoff -= (DST_QUEUE_OFF_BASE >> PAGE_SHIFT); in v4l2_m2m_get_unmapped_area()
1026 media_remove_intf_links(&m2m_dev->intf_devnode->intf); in v4l2_m2m_unregister_media_controller()
1027 media_devnode_remove(m2m_dev->intf_devnode); in v4l2_m2m_unregister_media_controller()
1029 media_entity_remove_links(m2m_dev->source); in v4l2_m2m_unregister_media_controller()
1030 media_entity_remove_links(&m2m_dev->sink); in v4l2_m2m_unregister_media_controller()
1031 media_entity_remove_links(&m2m_dev->proc); in v4l2_m2m_unregister_media_controller()
1032 media_device_unregister_entity(m2m_dev->source); in v4l2_m2m_unregister_media_controller()
1033 media_device_unregister_entity(&m2m_dev->sink); in v4l2_m2m_unregister_media_controller()
1034 media_device_unregister_entity(&m2m_dev->proc); in v4l2_m2m_unregister_media_controller()
1035 kfree(m2m_dev->source->name); in v4l2_m2m_unregister_media_controller()
1036 kfree(m2m_dev->sink.name); in v4l2_m2m_unregister_media_controller()
1037 kfree(m2m_dev->proc.name); in v4l2_m2m_unregister_media_controller()
1054 entity = m2m_dev->source; in v4l2_m2m_register_entity()
1055 pads = &m2m_dev->source_pad; in v4l2_m2m_register_entity()
1060 entity = &m2m_dev->sink; in v4l2_m2m_register_entity()
1061 pads = &m2m_dev->sink_pad; in v4l2_m2m_register_entity()
1066 entity = &m2m_dev->proc; in v4l2_m2m_register_entity()
1067 pads = m2m_dev->proc_pads; in v4l2_m2m_register_entity()
1073 return -EINVAL; in v4l2_m2m_register_entity()
1076 entity->obj_type = MEDIA_ENTITY_TYPE_BASE; in v4l2_m2m_register_entity()
1078 entity->info.dev.major = VIDEO_MAJOR; in v4l2_m2m_register_entity()
1079 entity->info.dev.minor = vdev->minor; in v4l2_m2m_register_entity()
1081 len = strlen(vdev->name) + 2 + strlen(m2m_entity_name[type]); in v4l2_m2m_register_entity()
1084 return -ENOMEM; in v4l2_m2m_register_entity()
1085 snprintf(name, len, "%s-%s", vdev->name, m2m_entity_name[type]); in v4l2_m2m_register_entity()
1086 entity->name = name; in v4l2_m2m_register_entity()
1087 entity->function = function; in v4l2_m2m_register_entity()
1091 kfree(entity->name); in v4l2_m2m_register_entity()
1092 entity->name = NULL; in v4l2_m2m_register_entity()
1097 kfree(entity->name); in v4l2_m2m_register_entity()
1098 entity->name = NULL; in v4l2_m2m_register_entity()
1108 struct media_device *mdev = vdev->v4l2_dev->mdev; in v4l2_m2m_register_media_controller()
1115 /* A memory-to-memory device consists in two in v4l2_m2m_register_media_controller()
1121 m2m_dev->source = &vdev->entity; in v4l2_m2m_register_media_controller()
1136 ret = media_create_pad_link(m2m_dev->source, 0, &m2m_dev->proc, 0, in v4l2_m2m_register_media_controller()
1141 ret = media_create_pad_link(&m2m_dev->proc, 1, &m2m_dev->sink, 0, in v4l2_m2m_register_media_controller()
1147 m2m_dev->intf_devnode = media_devnode_create(mdev, in v4l2_m2m_register_media_controller()
1149 VIDEO_MAJOR, vdev->minor); in v4l2_m2m_register_media_controller()
1150 if (!m2m_dev->intf_devnode) { in v4l2_m2m_register_media_controller()
1151 ret = -ENOMEM; in v4l2_m2m_register_media_controller()
1156 link = media_create_intf_link(m2m_dev->source, in v4l2_m2m_register_media_controller()
1157 &m2m_dev->intf_devnode->intf, in v4l2_m2m_register_media_controller()
1160 ret = -ENOMEM; in v4l2_m2m_register_media_controller()
1164 link = media_create_intf_link(&m2m_dev->sink, in v4l2_m2m_register_media_controller()
1165 &m2m_dev->intf_devnode->intf, in v4l2_m2m_register_media_controller()
1168 ret = -ENOMEM; in v4l2_m2m_register_media_controller()
1174 media_remove_intf_links(&m2m_dev->intf_devnode->intf); in v4l2_m2m_register_media_controller()
1176 media_devnode_remove(m2m_dev->intf_devnode); in v4l2_m2m_register_media_controller()
1178 media_entity_remove_links(&m2m_dev->sink); in v4l2_m2m_register_media_controller()
1180 media_entity_remove_links(&m2m_dev->proc); in v4l2_m2m_register_media_controller()
1181 media_entity_remove_links(m2m_dev->source); in v4l2_m2m_register_media_controller()
1183 media_device_unregister_entity(&m2m_dev->proc); in v4l2_m2m_register_media_controller()
1184 kfree(m2m_dev->proc.name); in v4l2_m2m_register_media_controller()
1186 media_device_unregister_entity(&m2m_dev->sink); in v4l2_m2m_register_media_controller()
1187 kfree(m2m_dev->sink.name); in v4l2_m2m_register_media_controller()
1189 media_device_unregister_entity(m2m_dev->source); in v4l2_m2m_register_media_controller()
1190 kfree(m2m_dev->source->name); in v4l2_m2m_register_media_controller()
1201 if (!m2m_ops || WARN_ON(!m2m_ops->device_run)) in v4l2_m2m_init()
1202 return ERR_PTR(-EINVAL); in v4l2_m2m_init()
1206 return ERR_PTR(-ENOMEM); in v4l2_m2m_init()
1208 m2m_dev->curr_ctx = NULL; in v4l2_m2m_init()
1209 m2m_dev->m2m_ops = m2m_ops; in v4l2_m2m_init()
1210 INIT_LIST_HEAD(&m2m_dev->job_queue); in v4l2_m2m_init()
1211 spin_lock_init(&m2m_dev->job_spinlock); in v4l2_m2m_init()
1212 INIT_WORK(&m2m_dev->job_work, v4l2_m2m_device_run_work); in v4l2_m2m_init()
1234 return ERR_PTR(-ENOMEM); in v4l2_m2m_ctx_init()
1236 m2m_ctx->priv = drv_priv; in v4l2_m2m_ctx_init()
1237 m2m_ctx->m2m_dev = m2m_dev; in v4l2_m2m_ctx_init()
1238 init_waitqueue_head(&m2m_ctx->finished); in v4l2_m2m_ctx_init()
1240 out_q_ctx = &m2m_ctx->out_q_ctx; in v4l2_m2m_ctx_init()
1241 cap_q_ctx = &m2m_ctx->cap_q_ctx; in v4l2_m2m_ctx_init()
1243 INIT_LIST_HEAD(&out_q_ctx->rdy_queue); in v4l2_m2m_ctx_init()
1244 INIT_LIST_HEAD(&cap_q_ctx->rdy_queue); in v4l2_m2m_ctx_init()
1245 spin_lock_init(&out_q_ctx->rdy_spinlock); in v4l2_m2m_ctx_init()
1246 spin_lock_init(&cap_q_ctx->rdy_spinlock); in v4l2_m2m_ctx_init()
1248 INIT_LIST_HEAD(&m2m_ctx->queue); in v4l2_m2m_ctx_init()
1250 ret = queue_init(drv_priv, &out_q_ctx->q, &cap_q_ctx->q); in v4l2_m2m_ctx_init()
1258 if (WARN_ON(out_q_ctx->q.lock != cap_q_ctx->q.lock)) { in v4l2_m2m_ctx_init()
1259 ret = -EINVAL; in v4l2_m2m_ctx_init()
1262 m2m_ctx->q_lock = out_q_ctx->q.lock; in v4l2_m2m_ctx_init()
1273 /* wait until the current context is dequeued from job_queue */ in v4l2_m2m_ctx_release()
1276 vb2_queue_release(&m2m_ctx->cap_q_ctx.q); in v4l2_m2m_ctx_release()
1277 vb2_queue_release(&m2m_ctx->out_q_ctx.q); in v4l2_m2m_ctx_release()
1291 q_ctx = get_queue_ctx(m2m_ctx, vbuf->vb2_buf.vb2_queue->type); in v4l2_m2m_buf_queue()
1295 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); in v4l2_m2m_buf_queue()
1296 list_add_tail(&b->list, &q_ctx->rdy_queue); in v4l2_m2m_buf_queue()
1297 q_ctx->num_rdy++; in v4l2_m2m_buf_queue()
1298 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); in v4l2_m2m_buf_queue()
1312 cap_vb->vb2_buf.timestamp = out_vb->vb2_buf.timestamp; in v4l2_m2m_buf_copy_metadata()
1314 if (out_vb->flags & V4L2_BUF_FLAG_TIMECODE) in v4l2_m2m_buf_copy_metadata()
1315 cap_vb->timecode = out_vb->timecode; in v4l2_m2m_buf_copy_metadata()
1316 cap_vb->field = out_vb->field; in v4l2_m2m_buf_copy_metadata()
1317 cap_vb->flags &= ~mask; in v4l2_m2m_buf_copy_metadata()
1318 cap_vb->flags |= out_vb->flags & mask; in v4l2_m2m_buf_copy_metadata()
1319 cap_vb->vb2_buf.copied_timestamp = 1; in v4l2_m2m_buf_copy_metadata()
1329 * Queue all objects. Note that buffer objects are at the end of the in v4l2_m2m_request_queue()
1334 * queue is deleted. in v4l2_m2m_request_queue()
1336 list_for_each_entry_safe(obj, obj_safe, &req->objects, list) { in v4l2_m2m_request_queue()
1340 if (!obj->ops->queue) in v4l2_m2m_request_queue()
1346 WARN_ON(!V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)); in v4l2_m2m_request_queue()
1347 m2m_ctx_obj = container_of(vb->vb2_queue, in v4l2_m2m_request_queue()
1355 * The buffer we queue here can in theory be immediately in v4l2_m2m_request_queue()
1357 * above and why we call the queue op last. in v4l2_m2m_request_queue()
1359 obj->ops->queue(obj); in v4l2_m2m_request_queue()
1374 struct v4l2_fh *fh = file->private_data; in v4l2_m2m_ioctl_reqbufs()
1376 return v4l2_m2m_reqbufs(file, fh->m2m_ctx, rb); in v4l2_m2m_ioctl_reqbufs()
1383 struct v4l2_fh *fh = file->private_data; in v4l2_m2m_ioctl_create_bufs()
1385 return v4l2_m2m_create_bufs(file, fh->m2m_ctx, create); in v4l2_m2m_ioctl_create_bufs()
1392 struct v4l2_fh *fh = file->private_data; in v4l2_m2m_ioctl_remove_bufs()
1393 struct vb2_queue *q = v4l2_m2m_get_vq(fh->m2m_ctx, remove->type); in v4l2_m2m_ioctl_remove_bufs()
1396 return -EINVAL; in v4l2_m2m_ioctl_remove_bufs()
1397 if (q->type != remove->type) in v4l2_m2m_ioctl_remove_bufs()
1398 return -EINVAL; in v4l2_m2m_ioctl_remove_bufs()
1400 return vb2_core_remove_bufs(q, remove->index, remove->count); in v4l2_m2m_ioctl_remove_bufs()
1407 struct v4l2_fh *fh = file->private_data; in v4l2_m2m_ioctl_querybuf()
1409 return v4l2_m2m_querybuf(file, fh->m2m_ctx, buf); in v4l2_m2m_ioctl_querybuf()
1416 struct v4l2_fh *fh = file->private_data; in v4l2_m2m_ioctl_qbuf()
1418 return v4l2_m2m_qbuf(file, fh->m2m_ctx, buf); in v4l2_m2m_ioctl_qbuf()
1425 struct v4l2_fh *fh = file->private_data; in v4l2_m2m_ioctl_dqbuf()
1427 return v4l2_m2m_dqbuf(file, fh->m2m_ctx, buf); in v4l2_m2m_ioctl_dqbuf()
1434 struct v4l2_fh *fh = file->private_data; in v4l2_m2m_ioctl_prepare_buf()
1436 return v4l2_m2m_prepare_buf(file, fh->m2m_ctx, buf); in v4l2_m2m_ioctl_prepare_buf()
1443 struct v4l2_fh *fh = file->private_data; in v4l2_m2m_ioctl_expbuf()
1445 return v4l2_m2m_expbuf(file, fh->m2m_ctx, eb); in v4l2_m2m_ioctl_expbuf()
1452 struct v4l2_fh *fh = file->private_data; in v4l2_m2m_ioctl_streamon()
1454 return v4l2_m2m_streamon(file, fh->m2m_ctx, type); in v4l2_m2m_ioctl_streamon()
1461 struct v4l2_fh *fh = file->private_data; in v4l2_m2m_ioctl_streamoff()
1463 return v4l2_m2m_streamoff(file, fh->m2m_ctx, type); in v4l2_m2m_ioctl_streamoff()
1470 if (ec->cmd != V4L2_ENC_CMD_STOP && ec->cmd != V4L2_ENC_CMD_START) in v4l2_m2m_ioctl_try_encoder_cmd()
1471 return -EINVAL; in v4l2_m2m_ioctl_try_encoder_cmd()
1473 ec->flags = 0; in v4l2_m2m_ioctl_try_encoder_cmd()
1481 if (dc->cmd != V4L2_DEC_CMD_STOP && dc->cmd != V4L2_DEC_CMD_START) in v4l2_m2m_ioctl_try_decoder_cmd()
1482 return -EINVAL; in v4l2_m2m_ioctl_try_decoder_cmd()
1484 dc->flags = 0; in v4l2_m2m_ioctl_try_decoder_cmd()
1486 if (dc->cmd == V4L2_DEC_CMD_STOP) { in v4l2_m2m_ioctl_try_decoder_cmd()
1487 dc->stop.pts = 0; in v4l2_m2m_ioctl_try_decoder_cmd()
1488 } else if (dc->cmd == V4L2_DEC_CMD_START) { in v4l2_m2m_ioctl_try_decoder_cmd()
1489 dc->start.speed = 0; in v4l2_m2m_ioctl_try_decoder_cmd()
1490 dc->start.format = V4L2_DEC_START_FMT_NONE; in v4l2_m2m_ioctl_try_decoder_cmd()
1503 if (ec->cmd != V4L2_ENC_CMD_STOP && ec->cmd != V4L2_ENC_CMD_START) in v4l2_m2m_encoder_cmd()
1504 return -EINVAL; in v4l2_m2m_encoder_cmd()
1506 if (ec->cmd == V4L2_ENC_CMD_STOP) in v4l2_m2m_encoder_cmd()
1509 if (m2m_ctx->is_draining) in v4l2_m2m_encoder_cmd()
1510 return -EBUSY; in v4l2_m2m_encoder_cmd()
1512 if (m2m_ctx->has_stopped) in v4l2_m2m_encoder_cmd()
1513 m2m_ctx->has_stopped = false; in v4l2_m2m_encoder_cmd()
1526 if (dc->cmd != V4L2_DEC_CMD_STOP && dc->cmd != V4L2_DEC_CMD_START) in v4l2_m2m_decoder_cmd()
1527 return -EINVAL; in v4l2_m2m_decoder_cmd()
1529 if (dc->cmd == V4L2_DEC_CMD_STOP) in v4l2_m2m_decoder_cmd()
1532 if (m2m_ctx->is_draining) in v4l2_m2m_decoder_cmd()
1533 return -EBUSY; in v4l2_m2m_decoder_cmd()
1535 if (m2m_ctx->has_stopped) in v4l2_m2m_decoder_cmd()
1536 m2m_ctx->has_stopped = false; in v4l2_m2m_decoder_cmd()
1545 struct v4l2_fh *fh = file->private_data; in v4l2_m2m_ioctl_encoder_cmd()
1547 return v4l2_m2m_encoder_cmd(file, fh->m2m_ctx, ec); in v4l2_m2m_ioctl_encoder_cmd()
1554 struct v4l2_fh *fh = file->private_data; in v4l2_m2m_ioctl_decoder_cmd()
1556 return v4l2_m2m_decoder_cmd(file, fh->m2m_ctx, dc); in v4l2_m2m_ioctl_decoder_cmd()
1563 if (dc->cmd != V4L2_DEC_CMD_FLUSH) in v4l2_m2m_ioctl_stateless_try_decoder_cmd()
1564 return -EINVAL; in v4l2_m2m_ioctl_stateless_try_decoder_cmd()
1566 dc->flags = 0; in v4l2_m2m_ioctl_stateless_try_decoder_cmd()
1575 struct v4l2_fh *fh = file->private_data; in v4l2_m2m_ioctl_stateless_decoder_cmd()
1577 struct v4l2_m2m_dev *m2m_dev = fh->m2m_ctx->m2m_dev; in v4l2_m2m_ioctl_stateless_decoder_cmd()
1585 spin_lock_irqsave(&m2m_dev->job_spinlock, flags); in v4l2_m2m_ioctl_stateless_decoder_cmd()
1586 out_vb = v4l2_m2m_last_src_buf(fh->m2m_ctx); in v4l2_m2m_ioctl_stateless_decoder_cmd()
1587 cap_vb = v4l2_m2m_last_dst_buf(fh->m2m_ctx); in v4l2_m2m_ioctl_stateless_decoder_cmd()
1596 out_vb->flags &= ~V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF; in v4l2_m2m_ioctl_stateless_decoder_cmd()
1597 } else if (cap_vb && cap_vb->is_held) { in v4l2_m2m_ioctl_stateless_decoder_cmd()
1603 cap_vb->is_held = false; in v4l2_m2m_ioctl_stateless_decoder_cmd()
1604 v4l2_m2m_dst_buf_remove(fh->m2m_ctx); in v4l2_m2m_ioctl_stateless_decoder_cmd()
1607 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); in v4l2_m2m_ioctl_stateless_decoder_cmd()
1615 * for the output and the capture buffer queue.
1620 struct v4l2_fh *fh = file->private_data; in v4l2_m2m_fop_mmap()
1622 return v4l2_m2m_mmap(file, fh->m2m_ctx, vma); in v4l2_m2m_fop_mmap()
1626 __poll_t v4l2_m2m_fop_poll(struct file *file, poll_table *wait) in v4l2_m2m_fop_poll() argument
1628 struct v4l2_fh *fh = file->private_data; in v4l2_m2m_fop_poll()
1629 struct v4l2_m2m_ctx *m2m_ctx = fh->m2m_ctx; in v4l2_m2m_fop_poll()
1632 if (m2m_ctx->q_lock) in v4l2_m2m_fop_poll()
1633 mutex_lock(m2m_ctx->q_lock); in v4l2_m2m_fop_poll()
1635 ret = v4l2_m2m_poll(file, m2m_ctx, wait); in v4l2_m2m_fop_poll()
1637 if (m2m_ctx->q_lock) in v4l2_m2m_fop_poll()
1638 mutex_unlock(m2m_ctx->q_lock); in v4l2_m2m_fop_poll()