xref: /linux/include/media/v4l2-mem2mem.h (revision 6dfafbd0299a60bfb5d5e277fdf100037c7ded07)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  * Memory-to-memory device framework for Video for Linux 2.
4  *
5  * Helper functions for devices that use memory buffers for both source
6  * and destination.
7  *
8  * Copyright (c) 2009 Samsung Electronics Co., Ltd.
9  * Pawel Osciak, <pawel@osciak.com>
10  * Marek Szyprowski, <m.szyprowski@samsung.com>
11  */
12 
13 #ifndef _MEDIA_V4L2_MEM2MEM_H
14 #define _MEDIA_V4L2_MEM2MEM_H
15 
16 #include <media/videobuf2-v4l2.h>
17 
18 /**
19  * struct v4l2_m2m_ops - mem-to-mem device driver callbacks
20  * @device_run:	required. Begin the actual job (transaction) inside this
21  *		callback.
22  *		The job does NOT have to end before this callback returns
23  *		(and it will be the usual case). When the job finishes,
24  *		v4l2_m2m_job_finish() or v4l2_m2m_buf_done_and_job_finish()
25  *		has to be called.
26  * @job_ready:	optional. Should return 0 if the driver does not have a job
27  *		fully prepared to run yet (i.e. it will not be able to finish a
28  *		transaction without sleeping). If not provided, it will be
29  *		assumed that one source and one destination buffer are all
30  *		that is required for the driver to perform one full transaction.
31  *		This method may not sleep.
32  * @job_abort:	optional. Informs the driver that it has to abort the currently
33  *		running transaction as soon as possible (i.e. as soon as it can
34  *		stop the device safely; e.g. in the next interrupt handler),
35  *		even if the transaction would not have been finished by then.
36  *		After the driver performs the necessary steps, it has to call
37  *		v4l2_m2m_job_finish() or v4l2_m2m_buf_done_and_job_finish() as
38  *		if the transaction ended normally.
39  *		This function does not have to (and will usually not) wait
40  *		until the device enters a state when it can be stopped.
41  */
42 struct v4l2_m2m_ops {
43 	void (*device_run)(void *priv);
44 	int (*job_ready)(void *priv);
45 	void (*job_abort)(void *priv);
46 };
47 
48 struct video_device;
49 struct v4l2_m2m_dev;
50 
51 /**
52  * struct v4l2_m2m_queue_ctx - represents a queue for buffers ready to be
53  *	processed
54  *
55  * @q:		pointer to struct &vb2_queue
56  * @rdy_queue:	List of V4L2 mem-to-mem queues
57  * @rdy_spinlock: spin lock to protect the struct usage
58  * @num_rdy:	number of buffers ready to be processed
59  * @buffered:	is the queue buffered?
60  *
61  * Queue for buffers ready to be processed as soon as this
62  * instance receives access to the device.
63  */
64 
65 struct v4l2_m2m_queue_ctx {
66 	struct vb2_queue	q;
67 
68 	struct list_head	rdy_queue;
69 	spinlock_t		rdy_spinlock;
70 	u8			num_rdy;
71 	bool			buffered;
72 };
73 
74 /**
75  * struct v4l2_m2m_ctx - Memory to memory context structure
76  *
77  * @q_lock: struct &mutex lock
78  * @new_frame: valid in the device_run callback: if true, then this
79  *		starts a new frame; if false, then this is a new slice
80  *		for an existing frame. This is always true unless
81  *		V4L2_BUF_CAP_SUPPORTS_M2M_HOLD_CAPTURE_BUF is set, which
82  *		indicates slicing support.
83  * @is_draining: indicates device is in draining phase
84  * @last_src_buf: indicate the last source buffer for draining
85  * @next_buf_last: next capture queud buffer will be tagged as last
86  * @has_stopped: indicate the device has been stopped
87  * @ignore_cap_streaming: If true, job_ready can be called even if the CAPTURE
88  *			  queue is not streaming. This allows firmware to
89  *			  analyze the bitstream header which arrives on the
90  *			  OUTPUT queue. The driver must implement the job_ready
91  *			  callback correctly to make sure that the requirements
92  *			  for actual decoding are met.
93  * @m2m_dev: opaque pointer to the internal data to handle M2M context
94  * @cap_q_ctx: Capture (output to memory) queue context
95  * @out_q_ctx: Output (input from memory) queue context
96  * @queue: List of memory to memory contexts
97  * @job_flags: Job queue flags, used internally by v4l2-mem2mem.c:
98  *		%TRANS_QUEUED, %TRANS_RUNNING and %TRANS_ABORT.
99  * @finished: Wait queue used to signalize when a job queue finished.
100  * @priv: Instance private data
101  *
102  * The memory to memory context is specific to a file handle, NOT to e.g.
103  * a device.
104  */
105 struct v4l2_m2m_ctx {
106 	/* optional cap/out vb2 queues lock */
107 	struct mutex			*q_lock;
108 
109 	bool				new_frame;
110 
111 	bool				is_draining;
112 	struct vb2_v4l2_buffer		*last_src_buf;
113 	bool				next_buf_last;
114 	bool				has_stopped;
115 	bool				ignore_cap_streaming;
116 
117 	/* internal use only */
118 	struct v4l2_m2m_dev		*m2m_dev;
119 
120 	struct v4l2_m2m_queue_ctx	cap_q_ctx;
121 
122 	struct v4l2_m2m_queue_ctx	out_q_ctx;
123 
124 	/* For device job queue */
125 	struct list_head		queue;
126 	unsigned long			job_flags;
127 	wait_queue_head_t		finished;
128 
129 	void				*priv;
130 };
131 
132 /**
133  * struct v4l2_m2m_buffer - Memory to memory buffer
134  *
135  * @vb: pointer to struct &vb2_v4l2_buffer
136  * @list: list of m2m buffers
137  */
138 struct v4l2_m2m_buffer {
139 	struct vb2_v4l2_buffer	vb;
140 	struct list_head	list;
141 };
142 
143 /**
144  * v4l2_m2m_get_curr_priv() - return driver private data for the currently
145  * running instance or NULL if no instance is running
146  *
147  * @m2m_dev: opaque pointer to the internal data to handle M2M context
148  */
149 void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev *m2m_dev);
150 
151 /**
152  * v4l2_m2m_get_vq() - return vb2_queue for the given type
153  *
154  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
155  * @type: type of the V4L2 buffer, as defined by enum &v4l2_buf_type
156  *
157  * This function returns the capture queue when @type is a capture type, and the
158  * output queue otherwise. It never returns a NULL pointer.
159  */
160 struct vb2_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx,
161 				       enum v4l2_buf_type type);
162 
163 /**
164  * v4l2_m2m_try_schedule() - check whether an instance is ready to be added to
165  * the pending job queue and add it if so.
166  *
167  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
168  *
169  * There are three basic requirements an instance has to meet to be able to run:
170  * 1) at least one source buffer has to be queued,
171  * 2) at least one destination buffer has to be queued,
172  * 3) streaming has to be on.
173  *
174  * If a queue is buffered (for example a decoder hardware ringbuffer that has
175  * to be drained before doing streamoff), allow scheduling without v4l2 buffers
176  * on that queue.
177  *
178  * There may also be additional, custom requirements. In such case the driver
179  * should supply a custom callback (job_ready in v4l2_m2m_ops) that should
180  * return 1 if the instance is ready.
181  * An example of the above could be an instance that requires more than one
182  * src/dst buffer per transaction.
183  */
184 void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx);
185 
186 /**
187  * v4l2_m2m_job_finish() - inform the framework that a job has been finished
188  * and have it clean up
189  *
190  * @m2m_dev: opaque pointer to the internal data to handle M2M context
191  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
192  *
193  * Called by a driver to yield back the device after it has finished with it.
194  * Should be called as soon as possible after reaching a state which allows
195  * other instances to take control of the device.
196  *
197  * This function has to be called only after &v4l2_m2m_ops->device_run
198  * callback has been called on the driver.
199  */
200 void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
201 			 struct v4l2_m2m_ctx *m2m_ctx);
202 
203 /**
204  * v4l2_m2m_buf_done_and_job_finish() - return source/destination buffers with
205  * state and inform the framework that a job has been finished and have it
206  * clean up
207  *
208  * @m2m_dev: opaque pointer to the internal data to handle M2M context
209  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
210  * @state: vb2 buffer state passed to v4l2_m2m_buf_done().
211  *
212  * Drivers that set V4L2_BUF_CAP_SUPPORTS_M2M_HOLD_CAPTURE_BUF must use this
213  * function instead of job_finish() to take held buffers into account. It is
214  * optional for other drivers.
215  *
216  * This function removes the source buffer from the ready list and returns
217  * it with the given state. The same is done for the destination buffer, unless
218  * it is marked 'held'. In that case the buffer is kept on the ready list.
219  *
220  * After that the job is finished (see job_finish()).
221  *
222  * This allows for multiple output buffers to be used to fill in a single
223  * capture buffer. This is typically used by stateless decoders where
224  * multiple e.g. H.264 slices contribute to a single decoded frame.
225  */
226 void v4l2_m2m_buf_done_and_job_finish(struct v4l2_m2m_dev *m2m_dev,
227 				      struct v4l2_m2m_ctx *m2m_ctx,
228 				      enum vb2_buffer_state state);
229 
230 static inline void
231 v4l2_m2m_buf_done(struct vb2_v4l2_buffer *buf, enum vb2_buffer_state state)
232 {
233 	vb2_buffer_done(&buf->vb2_buf, state);
234 }
235 
236 /**
237  * v4l2_m2m_clear_state() - clear encoding/decoding state
238  *
239  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
240  */
241 static inline void
242 v4l2_m2m_clear_state(struct v4l2_m2m_ctx *m2m_ctx)
243 {
244 	m2m_ctx->next_buf_last = false;
245 	m2m_ctx->is_draining = false;
246 	m2m_ctx->has_stopped = false;
247 }
248 
249 /**
250  * v4l2_m2m_mark_stopped() - set current encoding/decoding state as stopped
251  *
252  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
253  */
254 static inline void
255 v4l2_m2m_mark_stopped(struct v4l2_m2m_ctx *m2m_ctx)
256 {
257 	m2m_ctx->next_buf_last = false;
258 	m2m_ctx->is_draining = false;
259 	m2m_ctx->has_stopped = true;
260 }
261 
262 /**
263  * v4l2_m2m_dst_buf_is_last() - return the current encoding/decoding session
264  * draining management state of next queued capture buffer
265  *
266  * This last capture buffer should be tagged with V4L2_BUF_FLAG_LAST to notify
267  * the end of the capture session.
268  *
269  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
270  */
271 static inline bool
272 v4l2_m2m_dst_buf_is_last(struct v4l2_m2m_ctx *m2m_ctx)
273 {
274 	return m2m_ctx->is_draining && m2m_ctx->next_buf_last;
275 }
276 
277 /**
278  * v4l2_m2m_has_stopped() - return the current encoding/decoding session
279  * stopped state
280  *
281  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
282  */
283 static inline bool
284 v4l2_m2m_has_stopped(struct v4l2_m2m_ctx *m2m_ctx)
285 {
286 	return m2m_ctx->has_stopped;
287 }
288 
289 /**
290  * v4l2_m2m_is_last_draining_src_buf() - return the output buffer draining
291  * state in the current encoding/decoding session
292  *
293  * This will identify the last output buffer queued before a session stop
294  * was required, leading to an actual encoding/decoding session stop state
295  * in the encoding/decoding process after being processed.
296  *
297  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
298  * @vbuf: pointer to struct &v4l2_buffer
299  */
300 static inline bool
301 v4l2_m2m_is_last_draining_src_buf(struct v4l2_m2m_ctx *m2m_ctx,
302 				  struct vb2_v4l2_buffer *vbuf)
303 {
304 	return m2m_ctx->is_draining && vbuf == m2m_ctx->last_src_buf;
305 }
306 
307 /**
308  * v4l2_m2m_last_buffer_done() - marks the buffer with LAST flag and DONE
309  *
310  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
311  * @vbuf: pointer to struct &v4l2_buffer
312  */
313 void v4l2_m2m_last_buffer_done(struct v4l2_m2m_ctx *m2m_ctx,
314 			       struct vb2_v4l2_buffer *vbuf);
315 
316 /**
317  * v4l2_m2m_suspend() - stop new jobs from being run and wait for current job
318  * to finish
319  *
320  * @m2m_dev: opaque pointer to the internal data to handle M2M context
321  *
322  * Called by a driver in the suspend hook. Stop new jobs from being run, and
323  * wait for current running job to finish.
324  */
325 void v4l2_m2m_suspend(struct v4l2_m2m_dev *m2m_dev);
326 
327 /**
328  * v4l2_m2m_resume() - resume job running and try to run a queued job
329  *
330  * @m2m_dev: opaque pointer to the internal data to handle M2M context
331  *
332  * Called by a driver in the resume hook. This reverts the operation of
333  * v4l2_m2m_suspend() and allows job to be run. Also try to run a queued job if
334  * there is any.
335  */
336 void v4l2_m2m_resume(struct v4l2_m2m_dev *m2m_dev);
337 
338 /**
339  * v4l2_m2m_reqbufs() - multi-queue-aware REQBUFS multiplexer
340  *
341  * @file: pointer to struct &file
342  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
343  * @reqbufs: pointer to struct &v4l2_requestbuffers
344  */
345 int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
346 		     struct v4l2_requestbuffers *reqbufs);
347 
348 /**
349  * v4l2_m2m_querybuf() - multi-queue-aware QUERYBUF multiplexer
350  *
351  * @file: pointer to struct &file
352  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
353  * @buf: pointer to struct &v4l2_buffer
354  *
355  * See v4l2_m2m_mmap() documentation for details.
356  */
357 int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
358 		      struct v4l2_buffer *buf);
359 
360 /**
361  * v4l2_m2m_qbuf() - enqueue a source or destination buffer, depending on
362  * the type
363  *
364  * @file: pointer to struct &file
365  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
366  * @buf: pointer to struct &v4l2_buffer
367  */
368 int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
369 		  struct v4l2_buffer *buf);
370 
371 /**
372  * v4l2_m2m_dqbuf() - dequeue a source or destination buffer, depending on
373  * the type
374  *
375  * @file: pointer to struct &file
376  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
377  * @buf: pointer to struct &v4l2_buffer
378  */
379 int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
380 		   struct v4l2_buffer *buf);
381 
382 /**
383  * v4l2_m2m_prepare_buf() - prepare a source or destination buffer, depending on
384  * the type
385  *
386  * @file: pointer to struct &file
387  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
388  * @buf: pointer to struct &v4l2_buffer
389  */
390 int v4l2_m2m_prepare_buf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
391 			 struct v4l2_buffer *buf);
392 
393 /**
394  * v4l2_m2m_create_bufs() - create a source or destination buffer, depending
395  * on the type
396  *
397  * @file: pointer to struct &file
398  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
399  * @create: pointer to struct &v4l2_create_buffers
400  */
401 int v4l2_m2m_create_bufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
402 			 struct v4l2_create_buffers *create);
403 
404 /**
405  * v4l2_m2m_expbuf() - export a source or destination buffer, depending on
406  * the type
407  *
408  * @file: pointer to struct &file
409  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
410  * @eb: pointer to struct &v4l2_exportbuffer
411  */
412 int v4l2_m2m_expbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
413 		   struct v4l2_exportbuffer *eb);
414 
415 /**
416  * v4l2_m2m_streamon() - turn on streaming for a video queue
417  *
418  * @file: pointer to struct &file
419  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
420  * @type: type of the V4L2 buffer, as defined by enum &v4l2_buf_type
421  */
422 int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
423 		      enum v4l2_buf_type type);
424 
425 /**
426  * v4l2_m2m_streamoff() - turn off streaming for a video queue
427  *
428  * @file: pointer to struct &file
429  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
430  * @type: type of the V4L2 buffer, as defined by enum &v4l2_buf_type
431  */
432 int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
433 		       enum v4l2_buf_type type);
434 
435 /**
436  * v4l2_m2m_update_start_streaming_state() - update the encoding/decoding
437  * session state when a start of streaming of a video queue is requested
438  *
439  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
440  * @q: queue
441  */
442 void v4l2_m2m_update_start_streaming_state(struct v4l2_m2m_ctx *m2m_ctx,
443 					   struct vb2_queue *q);
444 
445 /**
446  * v4l2_m2m_update_stop_streaming_state() -  update the encoding/decoding
447  * session state when a stop of streaming of a video queue is requested
448  *
449  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
450  * @q: queue
451  */
452 void v4l2_m2m_update_stop_streaming_state(struct v4l2_m2m_ctx *m2m_ctx,
453 					  struct vb2_queue *q);
454 
455 /**
456  * v4l2_m2m_encoder_cmd() - execute an encoder command
457  *
458  * @file: pointer to struct &file
459  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
460  * @ec: pointer to the encoder command
461  */
462 int v4l2_m2m_encoder_cmd(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
463 			 struct v4l2_encoder_cmd *ec);
464 
465 /**
466  * v4l2_m2m_decoder_cmd() - execute a decoder command
467  *
468  * @file: pointer to struct &file
469  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
470  * @dc: pointer to the decoder command
471  */
472 int v4l2_m2m_decoder_cmd(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
473 			 struct v4l2_decoder_cmd *dc);
474 
475 /**
476  * v4l2_m2m_poll() - poll replacement, for destination buffers only
477  *
478  * @file: pointer to struct &file
479  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
480  * @wait: pointer to struct &poll_table_struct
481  *
482  * Call from the driver's poll() function. Will poll both queues. If a buffer
483  * is available to dequeue (with dqbuf) from the source queue, this will
484  * indicate that a non-blocking write can be performed, while read will be
485  * returned in case of the destination queue.
486  */
487 __poll_t v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
488 			   struct poll_table_struct *wait);
489 
490 /**
491  * v4l2_m2m_mmap() - source and destination queues-aware mmap multiplexer
492  *
493  * @file: pointer to struct &file
494  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
495  * @vma: pointer to struct &vm_area_struct
496  *
497  * Call from driver's mmap() function. Will handle mmap() for both queues
498  * seamlessly for the video buffer, which will receive normal per-queue offsets
499  * and proper vb2 queue pointers. The differentiation is made outside
500  * vb2 by adding a predefined offset to buffers from one of the queues
501  * and subtracting it before passing it back to vb2. Only drivers (and
502  * thus applications) receive modified offsets.
503  */
504 int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
505 		  struct vm_area_struct *vma);
506 
507 #ifndef CONFIG_MMU
508 unsigned long v4l2_m2m_get_unmapped_area(struct file *file, unsigned long addr,
509 					 unsigned long len, unsigned long pgoff,
510 					 unsigned long flags);
511 #endif
512 /**
513  * v4l2_m2m_init() - initialize per-driver m2m data
514  *
515  * @m2m_ops: pointer to struct v4l2_m2m_ops
516  *
517  * Usually called from driver's ``probe()`` function.
518  *
519  * Return: returns an opaque pointer to the internal data to handle M2M context
520  */
521 struct v4l2_m2m_dev *v4l2_m2m_init(const struct v4l2_m2m_ops *m2m_ops);
522 
523 #if defined(CONFIG_MEDIA_CONTROLLER)
524 void v4l2_m2m_unregister_media_controller(struct v4l2_m2m_dev *m2m_dev);
525 int v4l2_m2m_register_media_controller(struct v4l2_m2m_dev *m2m_dev,
526 			struct video_device *vdev, int function);
527 #else
528 static inline void
529 v4l2_m2m_unregister_media_controller(struct v4l2_m2m_dev *m2m_dev)
530 {
531 }
532 
533 static inline int
534 v4l2_m2m_register_media_controller(struct v4l2_m2m_dev *m2m_dev,
535 		struct video_device *vdev, int function)
536 {
537 	return 0;
538 }
539 #endif
540 
541 /**
542  * v4l2_m2m_release() - cleans up and frees a m2m_dev structure
543  *
544  * @m2m_dev: opaque pointer to the internal data to handle M2M context
545  *
546  * Usually called from driver's ``remove()`` function.
547  */
548 void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev);
549 
550 /**
551  * v4l2_m2m_ctx_init() - allocate and initialize a m2m context
552  *
553  * @m2m_dev: opaque pointer to the internal data to handle M2M context
554  * @drv_priv: driver's instance private data
555  * @queue_init: a callback for queue type-specific initialization function
556  *	to be used for initializing vb2_queues
557  *
558  * Usually called from driver's ``open()`` function.
559  */
560 struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev,
561 		void *drv_priv,
562 		int (*queue_init)(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq));
563 
564 static inline void v4l2_m2m_set_src_buffered(struct v4l2_m2m_ctx *m2m_ctx,
565 					     bool buffered)
566 {
567 	m2m_ctx->out_q_ctx.buffered = buffered;
568 }
569 
570 static inline void v4l2_m2m_set_dst_buffered(struct v4l2_m2m_ctx *m2m_ctx,
571 					     bool buffered)
572 {
573 	m2m_ctx->cap_q_ctx.buffered = buffered;
574 }
575 
576 /**
577  * v4l2_m2m_ctx_release() - release m2m context
578  *
579  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
580  *
581  * Usually called from driver's release() function.
582  */
583 void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx);
584 
585 /**
586  * v4l2_m2m_buf_queue() - add a buffer to the proper ready buffers list.
587  *
588  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
589  * @vbuf: pointer to struct &vb2_v4l2_buffer
590  *
591  * Call from vb2_queue_ops->ops->buf_queue, vb2_queue_ops callback.
592  */
593 void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx,
594 			struct vb2_v4l2_buffer *vbuf);
595 
596 /**
597  * v4l2_m2m_num_src_bufs_ready() - return the number of source buffers ready for
598  * use
599  *
600  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
601  */
602 static inline
603 unsigned int v4l2_m2m_num_src_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx)
604 {
605 	unsigned int num_buf_rdy;
606 	unsigned long flags;
607 
608 	spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags);
609 	num_buf_rdy = m2m_ctx->out_q_ctx.num_rdy;
610 	spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags);
611 
612 	return num_buf_rdy;
613 }
614 
615 /**
616  * v4l2_m2m_num_dst_bufs_ready() - return the number of destination buffers
617  * ready for use
618  *
619  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
620  */
621 static inline
622 unsigned int v4l2_m2m_num_dst_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx)
623 {
624 	unsigned int num_buf_rdy;
625 	unsigned long flags;
626 
627 	spin_lock_irqsave(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags);
628 	num_buf_rdy = m2m_ctx->cap_q_ctx.num_rdy;
629 	spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags);
630 
631 	return num_buf_rdy;
632 }
633 
634 /**
635  * v4l2_m2m_next_buf() - return next buffer from the list of ready buffers
636  *
637  * @q_ctx: pointer to struct @v4l2_m2m_queue_ctx
638  */
639 struct vb2_v4l2_buffer *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx);
640 
641 /**
642  * v4l2_m2m_next_src_buf() - return next source buffer from the list of ready
643  * buffers
644  *
645  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
646  */
647 static inline struct vb2_v4l2_buffer *
648 v4l2_m2m_next_src_buf(struct v4l2_m2m_ctx *m2m_ctx)
649 {
650 	return v4l2_m2m_next_buf(&m2m_ctx->out_q_ctx);
651 }
652 
653 /**
654  * v4l2_m2m_next_dst_buf() - return next destination buffer from the list of
655  * ready buffers
656  *
657  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
658  */
659 static inline struct vb2_v4l2_buffer *
660 v4l2_m2m_next_dst_buf(struct v4l2_m2m_ctx *m2m_ctx)
661 {
662 	return v4l2_m2m_next_buf(&m2m_ctx->cap_q_ctx);
663 }
664 
665 /**
666  * v4l2_m2m_last_buf() - return last buffer from the list of ready buffers
667  *
668  * @q_ctx: pointer to struct @v4l2_m2m_queue_ctx
669  */
670 struct vb2_v4l2_buffer *v4l2_m2m_last_buf(struct v4l2_m2m_queue_ctx *q_ctx);
671 
672 /**
673  * v4l2_m2m_last_src_buf() - return last source buffer from the list of
674  * ready buffers
675  *
676  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
677  */
678 static inline struct vb2_v4l2_buffer *
679 v4l2_m2m_last_src_buf(struct v4l2_m2m_ctx *m2m_ctx)
680 {
681 	return v4l2_m2m_last_buf(&m2m_ctx->out_q_ctx);
682 }
683 
684 /**
685  * v4l2_m2m_last_dst_buf() - return last destination buffer from the list of
686  * ready buffers
687  *
688  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
689  */
690 static inline struct vb2_v4l2_buffer *
691 v4l2_m2m_last_dst_buf(struct v4l2_m2m_ctx *m2m_ctx)
692 {
693 	return v4l2_m2m_last_buf(&m2m_ctx->cap_q_ctx);
694 }
695 
696 /**
697  * v4l2_m2m_for_each_dst_buf() - iterate over a list of destination ready
698  * buffers
699  *
700  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
701  * @b: current buffer of type struct v4l2_m2m_buffer
702  */
703 #define v4l2_m2m_for_each_dst_buf(m2m_ctx, b)	\
704 	list_for_each_entry(b, &m2m_ctx->cap_q_ctx.rdy_queue, list)
705 
706 /**
707  * v4l2_m2m_for_each_src_buf() - iterate over a list of source ready buffers
708  *
709  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
710  * @b: current buffer of type struct v4l2_m2m_buffer
711  */
712 #define v4l2_m2m_for_each_src_buf(m2m_ctx, b)	\
713 	list_for_each_entry(b, &m2m_ctx->out_q_ctx.rdy_queue, list)
714 
715 /**
716  * v4l2_m2m_for_each_dst_buf_safe() - iterate over a list of destination ready
717  * buffers safely
718  *
719  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
720  * @b: current buffer of type struct v4l2_m2m_buffer
721  * @n: used as temporary storage
722  */
723 #define v4l2_m2m_for_each_dst_buf_safe(m2m_ctx, b, n)	\
724 	list_for_each_entry_safe(b, n, &m2m_ctx->cap_q_ctx.rdy_queue, list)
725 
726 /**
727  * v4l2_m2m_for_each_src_buf_safe() - iterate over a list of source ready
728  * buffers safely
729  *
730  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
731  * @b: current buffer of type struct v4l2_m2m_buffer
732  * @n: used as temporary storage
733  */
734 #define v4l2_m2m_for_each_src_buf_safe(m2m_ctx, b, n)	\
735 	list_for_each_entry_safe(b, n, &m2m_ctx->out_q_ctx.rdy_queue, list)
736 
737 /**
738  * v4l2_m2m_get_src_vq() - return vb2_queue for source buffers
739  *
740  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
741  */
742 static inline
743 struct vb2_queue *v4l2_m2m_get_src_vq(struct v4l2_m2m_ctx *m2m_ctx)
744 {
745 	return &m2m_ctx->out_q_ctx.q;
746 }
747 
748 /**
749  * v4l2_m2m_get_dst_vq() - return vb2_queue for destination buffers
750  *
751  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
752  */
753 static inline
754 struct vb2_queue *v4l2_m2m_get_dst_vq(struct v4l2_m2m_ctx *m2m_ctx)
755 {
756 	return &m2m_ctx->cap_q_ctx.q;
757 }
758 
759 /**
760  * v4l2_m2m_buf_remove() - take off a buffer from the list of ready buffers and
761  * return it
762  *
763  * @q_ctx: pointer to struct @v4l2_m2m_queue_ctx
764  */
765 struct vb2_v4l2_buffer *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx);
766 
767 /**
768  * v4l2_m2m_src_buf_remove() - take off a source buffer from the list of ready
769  * buffers and return it
770  *
771  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
772  */
773 static inline struct vb2_v4l2_buffer *
774 v4l2_m2m_src_buf_remove(struct v4l2_m2m_ctx *m2m_ctx)
775 {
776 	return v4l2_m2m_buf_remove(&m2m_ctx->out_q_ctx);
777 }
778 
779 /**
780  * v4l2_m2m_dst_buf_remove() - take off a destination buffer from the list of
781  * ready buffers and return it
782  *
783  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
784  */
785 static inline struct vb2_v4l2_buffer *
786 v4l2_m2m_dst_buf_remove(struct v4l2_m2m_ctx *m2m_ctx)
787 {
788 	return v4l2_m2m_buf_remove(&m2m_ctx->cap_q_ctx);
789 }
790 
791 /**
792  * v4l2_m2m_buf_remove_by_buf() - take off exact buffer from the list of ready
793  * buffers
794  *
795  * @q_ctx: pointer to struct @v4l2_m2m_queue_ctx
796  * @vbuf: the buffer to be removed
797  */
798 void v4l2_m2m_buf_remove_by_buf(struct v4l2_m2m_queue_ctx *q_ctx,
799 				struct vb2_v4l2_buffer *vbuf);
800 
801 /**
802  * v4l2_m2m_src_buf_remove_by_buf() - take off exact source buffer from the list
803  * of ready buffers
804  *
805  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
806  * @vbuf: the buffer to be removed
807  */
808 static inline void v4l2_m2m_src_buf_remove_by_buf(struct v4l2_m2m_ctx *m2m_ctx,
809 						  struct vb2_v4l2_buffer *vbuf)
810 {
811 	v4l2_m2m_buf_remove_by_buf(&m2m_ctx->out_q_ctx, vbuf);
812 }
813 
814 /**
815  * v4l2_m2m_dst_buf_remove_by_buf() - take off exact destination buffer from the
816  * list of ready buffers
817  *
818  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
819  * @vbuf: the buffer to be removed
820  */
821 static inline void v4l2_m2m_dst_buf_remove_by_buf(struct v4l2_m2m_ctx *m2m_ctx,
822 						  struct vb2_v4l2_buffer *vbuf)
823 {
824 	v4l2_m2m_buf_remove_by_buf(&m2m_ctx->cap_q_ctx, vbuf);
825 }
826 
827 struct vb2_v4l2_buffer *
828 v4l2_m2m_buf_remove_by_idx(struct v4l2_m2m_queue_ctx *q_ctx, unsigned int idx);
829 
830 static inline struct vb2_v4l2_buffer *
831 v4l2_m2m_src_buf_remove_by_idx(struct v4l2_m2m_ctx *m2m_ctx, unsigned int idx)
832 {
833 	return v4l2_m2m_buf_remove_by_idx(&m2m_ctx->out_q_ctx, idx);
834 }
835 
836 static inline struct vb2_v4l2_buffer *
837 v4l2_m2m_dst_buf_remove_by_idx(struct v4l2_m2m_ctx *m2m_ctx, unsigned int idx)
838 {
839 	return v4l2_m2m_buf_remove_by_idx(&m2m_ctx->cap_q_ctx, idx);
840 }
841 
842 /**
843  * v4l2_m2m_buf_copy_metadata() - copy buffer metadata from
844  * the output buffer to the capture buffer
845  *
846  * @out_vb: the output buffer that is the source of the metadata.
847  * @cap_vb: the capture buffer that will receive the metadata.
848  *
849  * This helper function copies the timestamp, timecode (if the TIMECODE
850  * buffer flag was set), field, and the TIMECODE and TSTAMP_SRC_MASK flags from
851  * @out_vb to @cap_vb.
852  */
853 void v4l2_m2m_buf_copy_metadata(const struct vb2_v4l2_buffer *out_vb,
854 				struct vb2_v4l2_buffer *cap_vb);
855 
856 /* v4l2 request helper */
857 
858 void v4l2_m2m_request_queue(struct media_request *req);
859 
860 /* v4l2 ioctl helpers */
861 
862 int v4l2_m2m_ioctl_reqbufs(struct file *file, void *priv,
863 			   struct v4l2_requestbuffers *rb);
864 int v4l2_m2m_ioctl_create_bufs(struct file *file, void *priv,
865 			       struct v4l2_create_buffers *create);
866 int v4l2_m2m_ioctl_remove_bufs(struct file *file, void *priv,
867 			       struct v4l2_remove_buffers *d);
868 int v4l2_m2m_ioctl_querybuf(struct file *file, void *priv,
869 			    struct v4l2_buffer *buf);
870 int v4l2_m2m_ioctl_expbuf(struct file *file, void *priv,
871 			  struct v4l2_exportbuffer *eb);
872 int v4l2_m2m_ioctl_qbuf(struct file *file, void *priv,
873 			struct v4l2_buffer *buf);
874 int v4l2_m2m_ioctl_dqbuf(struct file *file, void *priv,
875 			 struct v4l2_buffer *buf);
876 int v4l2_m2m_ioctl_prepare_buf(struct file *file, void *priv,
877 			       struct v4l2_buffer *buf);
878 int v4l2_m2m_ioctl_streamon(struct file *file, void *priv,
879 			    enum v4l2_buf_type type);
880 int v4l2_m2m_ioctl_streamoff(struct file *file, void *priv,
881 			     enum v4l2_buf_type type);
882 int v4l2_m2m_ioctl_encoder_cmd(struct file *file, void *priv,
883 			       struct v4l2_encoder_cmd *ec);
884 int v4l2_m2m_ioctl_decoder_cmd(struct file *file, void *priv,
885 			       struct v4l2_decoder_cmd *dc);
886 int v4l2_m2m_ioctl_try_encoder_cmd(struct file *file, void *priv,
887 				   struct v4l2_encoder_cmd *ec);
888 int v4l2_m2m_ioctl_try_decoder_cmd(struct file *file, void *priv,
889 				   struct v4l2_decoder_cmd *dc);
890 int v4l2_m2m_ioctl_stateless_try_decoder_cmd(struct file *file, void *priv,
891 					     struct v4l2_decoder_cmd *dc);
892 int v4l2_m2m_ioctl_stateless_decoder_cmd(struct file *file, void *priv,
893 					 struct v4l2_decoder_cmd *dc);
894 int v4l2_m2m_fop_mmap(struct file *file, struct vm_area_struct *vma);
895 __poll_t v4l2_m2m_fop_poll(struct file *file, poll_table *wait);
896 
897 #endif /* _MEDIA_V4L2_MEM2MEM_H */
898 
899