1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* 3 * Memory-to-memory device framework for Video for Linux 2. 4 * 5 * Helper functions for devices that use memory buffers for both source 6 * and destination. 7 * 8 * Copyright (c) 2009 Samsung Electronics Co., Ltd. 9 * Pawel Osciak, <pawel@osciak.com> 10 * Marek Szyprowski, <m.szyprowski@samsung.com> 11 */ 12 13 #ifndef _MEDIA_V4L2_MEM2MEM_H 14 #define _MEDIA_V4L2_MEM2MEM_H 15 16 #include <media/videobuf2-v4l2.h> 17 18 /** 19 * struct v4l2_m2m_ops - mem-to-mem device driver callbacks 20 * @device_run: required. Begin the actual job (transaction) inside this 21 * callback. 22 * The job does NOT have to end before this callback returns 23 * (and it will be the usual case). When the job finishes, 24 * v4l2_m2m_job_finish() or v4l2_m2m_buf_done_and_job_finish() 25 * has to be called. 26 * @job_ready: optional. Should return 0 if the driver does not have a job 27 * fully prepared to run yet (i.e. it will not be able to finish a 28 * transaction without sleeping). If not provided, it will be 29 * assumed that one source and one destination buffer are all 30 * that is required for the driver to perform one full transaction. 31 * This method may not sleep. 32 * @job_abort: optional. Informs the driver that it has to abort the currently 33 * running transaction as soon as possible (i.e. as soon as it can 34 * stop the device safely; e.g. in the next interrupt handler), 35 * even if the transaction would not have been finished by then. 36 * After the driver performs the necessary steps, it has to call 37 * v4l2_m2m_job_finish() or v4l2_m2m_buf_done_and_job_finish() as 38 * if the transaction ended normally. 39 * This function does not have to (and will usually not) wait 40 * until the device enters a state when it can be stopped. 41 */ 42 struct v4l2_m2m_ops { 43 void (*device_run)(void *priv); 44 int (*job_ready)(void *priv); 45 void (*job_abort)(void *priv); 46 }; 47 48 struct video_device; 49 struct v4l2_m2m_dev; 50 51 /** 52 * struct v4l2_m2m_queue_ctx - represents a queue for buffers ready to be 53 * processed 54 * 55 * @q: pointer to struct &vb2_queue 56 * @rdy_queue: List of V4L2 mem-to-mem queues 57 * @rdy_spinlock: spin lock to protect the struct usage 58 * @num_rdy: number of buffers ready to be processed 59 * @buffered: is the queue buffered? 60 * 61 * Queue for buffers ready to be processed as soon as this 62 * instance receives access to the device. 63 */ 64 65 struct v4l2_m2m_queue_ctx { 66 struct vb2_queue q; 67 68 struct list_head rdy_queue; 69 spinlock_t rdy_spinlock; 70 u8 num_rdy; 71 bool buffered; 72 }; 73 74 /** 75 * struct v4l2_m2m_ctx - Memory to memory context structure 76 * 77 * @q_lock: struct &mutex lock 78 * @new_frame: valid in the device_run callback: if true, then this 79 * starts a new frame; if false, then this is a new slice 80 * for an existing frame. This is always true unless 81 * V4L2_BUF_CAP_SUPPORTS_M2M_HOLD_CAPTURE_BUF is set, which 82 * indicates slicing support. 83 * @is_draining: indicates device is in draining phase 84 * @last_src_buf: indicate the last source buffer for draining 85 * @next_buf_last: next capture queud buffer will be tagged as last 86 * @has_stopped: indicate the device has been stopped 87 * @ignore_cap_streaming: If true, job_ready can be called even if the CAPTURE 88 * queue is not streaming. This allows firmware to 89 * analyze the bitstream header which arrives on the 90 * OUTPUT queue. The driver must implement the job_ready 91 * callback correctly to make sure that the requirements 92 * for actual decoding are met. 93 * @m2m_dev: opaque pointer to the internal data to handle M2M context 94 * @cap_q_ctx: Capture (output to memory) queue context 95 * @out_q_ctx: Output (input from memory) queue context 96 * @queue: List of memory to memory contexts 97 * @job_flags: Job queue flags, used internally by v4l2-mem2mem.c: 98 * %TRANS_QUEUED, %TRANS_RUNNING and %TRANS_ABORT. 99 * @finished: Wait queue used to signalize when a job queue finished. 100 * @priv: Instance private data 101 * 102 * The memory to memory context is specific to a file handle, NOT to e.g. 103 * a device. 104 */ 105 struct v4l2_m2m_ctx { 106 /* optional cap/out vb2 queues lock */ 107 struct mutex *q_lock; 108 109 bool new_frame; 110 111 bool is_draining; 112 struct vb2_v4l2_buffer *last_src_buf; 113 bool next_buf_last; 114 bool has_stopped; 115 bool ignore_cap_streaming; 116 117 /* internal use only */ 118 struct v4l2_m2m_dev *m2m_dev; 119 120 struct v4l2_m2m_queue_ctx cap_q_ctx; 121 122 struct v4l2_m2m_queue_ctx out_q_ctx; 123 124 /* For device job queue */ 125 struct list_head queue; 126 unsigned long job_flags; 127 wait_queue_head_t finished; 128 129 void *priv; 130 }; 131 132 /** 133 * struct v4l2_m2m_buffer - Memory to memory buffer 134 * 135 * @vb: pointer to struct &vb2_v4l2_buffer 136 * @list: list of m2m buffers 137 */ 138 struct v4l2_m2m_buffer { 139 struct vb2_v4l2_buffer vb; 140 struct list_head list; 141 }; 142 143 /** 144 * v4l2_m2m_get_curr_priv() - return driver private data for the currently 145 * running instance or NULL if no instance is running 146 * 147 * @m2m_dev: opaque pointer to the internal data to handle M2M context 148 */ 149 void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev *m2m_dev); 150 151 /** 152 * v4l2_m2m_get_vq() - return vb2_queue for the given type 153 * 154 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx 155 * @type: type of the V4L2 buffer, as defined by enum &v4l2_buf_type 156 * 157 * This function returns the capture queue when @type is a capture type, and the 158 * output queue otherwise. It never returns a NULL pointer. 159 */ 160 struct vb2_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx, 161 enum v4l2_buf_type type); 162 163 /** 164 * v4l2_m2m_try_schedule() - check whether an instance is ready to be added to 165 * the pending job queue and add it if so. 166 * 167 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx 168 * 169 * There are three basic requirements an instance has to meet to be able to run: 170 * 1) at least one source buffer has to be queued, 171 * 2) at least one destination buffer has to be queued, 172 * 3) streaming has to be on. 173 * 174 * If a queue is buffered (for example a decoder hardware ringbuffer that has 175 * to be drained before doing streamoff), allow scheduling without v4l2 buffers 176 * on that queue. 177 * 178 * There may also be additional, custom requirements. In such case the driver 179 * should supply a custom callback (job_ready in v4l2_m2m_ops) that should 180 * return 1 if the instance is ready. 181 * An example of the above could be an instance that requires more than one 182 * src/dst buffer per transaction. 183 */ 184 void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx); 185 186 /** 187 * v4l2_m2m_job_finish() - inform the framework that a job has been finished 188 * and have it clean up 189 * 190 * @m2m_dev: opaque pointer to the internal data to handle M2M context 191 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx 192 * 193 * Called by a driver to yield back the device after it has finished with it. 194 * Should be called as soon as possible after reaching a state which allows 195 * other instances to take control of the device. 196 * 197 * This function has to be called only after &v4l2_m2m_ops->device_run 198 * callback has been called on the driver. 199 */ 200 void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev, 201 struct v4l2_m2m_ctx *m2m_ctx); 202 203 /** 204 * v4l2_m2m_buf_done_and_job_finish() - return source/destination buffers with 205 * state and inform the framework that a job has been finished and have it 206 * clean up 207 * 208 * @m2m_dev: opaque pointer to the internal data to handle M2M context 209 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx 210 * @state: vb2 buffer state passed to v4l2_m2m_buf_done(). 211 * 212 * Drivers that set V4L2_BUF_CAP_SUPPORTS_M2M_HOLD_CAPTURE_BUF must use this 213 * function instead of job_finish() to take held buffers into account. It is 214 * optional for other drivers. 215 * 216 * This function removes the source buffer from the ready list and returns 217 * it with the given state. The same is done for the destination buffer, unless 218 * it is marked 'held'. In that case the buffer is kept on the ready list. 219 * 220 * After that the job is finished (see job_finish()). 221 * 222 * This allows for multiple output buffers to be used to fill in a single 223 * capture buffer. This is typically used by stateless decoders where 224 * multiple e.g. H.264 slices contribute to a single decoded frame. 225 */ 226 void v4l2_m2m_buf_done_and_job_finish(struct v4l2_m2m_dev *m2m_dev, 227 struct v4l2_m2m_ctx *m2m_ctx, 228 enum vb2_buffer_state state); 229 230 static inline void 231 v4l2_m2m_buf_done(struct vb2_v4l2_buffer *buf, enum vb2_buffer_state state) 232 { 233 vb2_buffer_done(&buf->vb2_buf, state); 234 } 235 236 /** 237 * v4l2_m2m_clear_state() - clear encoding/decoding state 238 * 239 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx 240 */ 241 static inline void 242 v4l2_m2m_clear_state(struct v4l2_m2m_ctx *m2m_ctx) 243 { 244 m2m_ctx->next_buf_last = false; 245 m2m_ctx->is_draining = false; 246 m2m_ctx->has_stopped = false; 247 } 248 249 /** 250 * v4l2_m2m_mark_stopped() - set current encoding/decoding state as stopped 251 * 252 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx 253 */ 254 static inline void 255 v4l2_m2m_mark_stopped(struct v4l2_m2m_ctx *m2m_ctx) 256 { 257 m2m_ctx->next_buf_last = false; 258 m2m_ctx->is_draining = false; 259 m2m_ctx->has_stopped = true; 260 } 261 262 /** 263 * v4l2_m2m_dst_buf_is_last() - return the current encoding/decoding session 264 * draining management state of next queued capture buffer 265 * 266 * This last capture buffer should be tagged with V4L2_BUF_FLAG_LAST to notify 267 * the end of the capture session. 268 * 269 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx 270 */ 271 static inline bool 272 v4l2_m2m_dst_buf_is_last(struct v4l2_m2m_ctx *m2m_ctx) 273 { 274 return m2m_ctx->is_draining && m2m_ctx->next_buf_last; 275 } 276 277 /** 278 * v4l2_m2m_has_stopped() - return the current encoding/decoding session 279 * stopped state 280 * 281 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx 282 */ 283 static inline bool 284 v4l2_m2m_has_stopped(struct v4l2_m2m_ctx *m2m_ctx) 285 { 286 return m2m_ctx->has_stopped; 287 } 288 289 /** 290 * v4l2_m2m_is_last_draining_src_buf() - return the output buffer draining 291 * state in the current encoding/decoding session 292 * 293 * This will identify the last output buffer queued before a session stop 294 * was required, leading to an actual encoding/decoding session stop state 295 * in the encoding/decoding process after being processed. 296 * 297 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx 298 * @vbuf: pointer to struct &v4l2_buffer 299 */ 300 static inline bool 301 v4l2_m2m_is_last_draining_src_buf(struct v4l2_m2m_ctx *m2m_ctx, 302 struct vb2_v4l2_buffer *vbuf) 303 { 304 return m2m_ctx->is_draining && vbuf == m2m_ctx->last_src_buf; 305 } 306 307 /** 308 * v4l2_m2m_last_buffer_done() - marks the buffer with LAST flag and DONE 309 * 310 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx 311 * @vbuf: pointer to struct &v4l2_buffer 312 */ 313 void v4l2_m2m_last_buffer_done(struct v4l2_m2m_ctx *m2m_ctx, 314 struct vb2_v4l2_buffer *vbuf); 315 316 /** 317 * v4l2_m2m_suspend() - stop new jobs from being run and wait for current job 318 * to finish 319 * 320 * @m2m_dev: opaque pointer to the internal data to handle M2M context 321 * 322 * Called by a driver in the suspend hook. Stop new jobs from being run, and 323 * wait for current running job to finish. 324 */ 325 void v4l2_m2m_suspend(struct v4l2_m2m_dev *m2m_dev); 326 327 /** 328 * v4l2_m2m_resume() - resume job running and try to run a queued job 329 * 330 * @m2m_dev: opaque pointer to the internal data to handle M2M context 331 * 332 * Called by a driver in the resume hook. This reverts the operation of 333 * v4l2_m2m_suspend() and allows job to be run. Also try to run a queued job if 334 * there is any. 335 */ 336 void v4l2_m2m_resume(struct v4l2_m2m_dev *m2m_dev); 337 338 /** 339 * v4l2_m2m_reqbufs() - multi-queue-aware REQBUFS multiplexer 340 * 341 * @file: pointer to struct &file 342 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx 343 * @reqbufs: pointer to struct &v4l2_requestbuffers 344 */ 345 int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 346 struct v4l2_requestbuffers *reqbufs); 347 348 /** 349 * v4l2_m2m_querybuf() - multi-queue-aware QUERYBUF multiplexer 350 * 351 * @file: pointer to struct &file 352 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx 353 * @buf: pointer to struct &v4l2_buffer 354 * 355 * See v4l2_m2m_mmap() documentation for details. 356 */ 357 int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 358 struct v4l2_buffer *buf); 359 360 /** 361 * v4l2_m2m_qbuf() - enqueue a source or destination buffer, depending on 362 * the type 363 * 364 * @file: pointer to struct &file 365 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx 366 * @buf: pointer to struct &v4l2_buffer 367 */ 368 int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 369 struct v4l2_buffer *buf); 370 371 /** 372 * v4l2_m2m_dqbuf() - dequeue a source or destination buffer, depending on 373 * the type 374 * 375 * @file: pointer to struct &file 376 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx 377 * @buf: pointer to struct &v4l2_buffer 378 */ 379 int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 380 struct v4l2_buffer *buf); 381 382 /** 383 * v4l2_m2m_prepare_buf() - prepare a source or destination buffer, depending on 384 * the type 385 * 386 * @file: pointer to struct &file 387 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx 388 * @buf: pointer to struct &v4l2_buffer 389 */ 390 int v4l2_m2m_prepare_buf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 391 struct v4l2_buffer *buf); 392 393 /** 394 * v4l2_m2m_create_bufs() - create a source or destination buffer, depending 395 * on the type 396 * 397 * @file: pointer to struct &file 398 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx 399 * @create: pointer to struct &v4l2_create_buffers 400 */ 401 int v4l2_m2m_create_bufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 402 struct v4l2_create_buffers *create); 403 404 /** 405 * v4l2_m2m_expbuf() - export a source or destination buffer, depending on 406 * the type 407 * 408 * @file: pointer to struct &file 409 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx 410 * @eb: pointer to struct &v4l2_exportbuffer 411 */ 412 int v4l2_m2m_expbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 413 struct v4l2_exportbuffer *eb); 414 415 /** 416 * v4l2_m2m_streamon() - turn on streaming for a video queue 417 * 418 * @file: pointer to struct &file 419 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx 420 * @type: type of the V4L2 buffer, as defined by enum &v4l2_buf_type 421 */ 422 int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 423 enum v4l2_buf_type type); 424 425 /** 426 * v4l2_m2m_streamoff() - turn off streaming for a video queue 427 * 428 * @file: pointer to struct &file 429 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx 430 * @type: type of the V4L2 buffer, as defined by enum &v4l2_buf_type 431 */ 432 int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 433 enum v4l2_buf_type type); 434 435 /** 436 * v4l2_m2m_update_start_streaming_state() - update the encoding/decoding 437 * session state when a start of streaming of a video queue is requested 438 * 439 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx 440 * @q: queue 441 */ 442 void v4l2_m2m_update_start_streaming_state(struct v4l2_m2m_ctx *m2m_ctx, 443 struct vb2_queue *q); 444 445 /** 446 * v4l2_m2m_update_stop_streaming_state() - update the encoding/decoding 447 * session state when a stop of streaming of a video queue is requested 448 * 449 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx 450 * @q: queue 451 */ 452 void v4l2_m2m_update_stop_streaming_state(struct v4l2_m2m_ctx *m2m_ctx, 453 struct vb2_queue *q); 454 455 /** 456 * v4l2_m2m_encoder_cmd() - execute an encoder command 457 * 458 * @file: pointer to struct &file 459 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx 460 * @ec: pointer to the encoder command 461 */ 462 int v4l2_m2m_encoder_cmd(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 463 struct v4l2_encoder_cmd *ec); 464 465 /** 466 * v4l2_m2m_decoder_cmd() - execute a decoder command 467 * 468 * @file: pointer to struct &file 469 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx 470 * @dc: pointer to the decoder command 471 */ 472 int v4l2_m2m_decoder_cmd(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 473 struct v4l2_decoder_cmd *dc); 474 475 /** 476 * v4l2_m2m_poll() - poll replacement, for destination buffers only 477 * 478 * @file: pointer to struct &file 479 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx 480 * @wait: pointer to struct &poll_table_struct 481 * 482 * Call from the driver's poll() function. Will poll both queues. If a buffer 483 * is available to dequeue (with dqbuf) from the source queue, this will 484 * indicate that a non-blocking write can be performed, while read will be 485 * returned in case of the destination queue. 486 */ 487 __poll_t v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 488 struct poll_table_struct *wait); 489 490 /** 491 * v4l2_m2m_mmap() - source and destination queues-aware mmap multiplexer 492 * 493 * @file: pointer to struct &file 494 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx 495 * @vma: pointer to struct &vm_area_struct 496 * 497 * Call from driver's mmap() function. Will handle mmap() for both queues 498 * seamlessly for the video buffer, which will receive normal per-queue offsets 499 * and proper vb2 queue pointers. The differentiation is made outside 500 * vb2 by adding a predefined offset to buffers from one of the queues 501 * and subtracting it before passing it back to vb2. Only drivers (and 502 * thus applications) receive modified offsets. 503 */ 504 int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, 505 struct vm_area_struct *vma); 506 507 #ifndef CONFIG_MMU 508 unsigned long v4l2_m2m_get_unmapped_area(struct file *file, unsigned long addr, 509 unsigned long len, unsigned long pgoff, 510 unsigned long flags); 511 #endif 512 /** 513 * v4l2_m2m_init() - initialize per-driver m2m data 514 * 515 * @m2m_ops: pointer to struct v4l2_m2m_ops 516 * 517 * Usually called from driver's ``probe()`` function. 518 * 519 * Return: returns an opaque pointer to the internal data to handle M2M context 520 */ 521 struct v4l2_m2m_dev *v4l2_m2m_init(const struct v4l2_m2m_ops *m2m_ops); 522 523 #if defined(CONFIG_MEDIA_CONTROLLER) 524 void v4l2_m2m_unregister_media_controller(struct v4l2_m2m_dev *m2m_dev); 525 int v4l2_m2m_register_media_controller(struct v4l2_m2m_dev *m2m_dev, 526 struct video_device *vdev, int function); 527 #else 528 static inline void 529 v4l2_m2m_unregister_media_controller(struct v4l2_m2m_dev *m2m_dev) 530 { 531 } 532 533 static inline int 534 v4l2_m2m_register_media_controller(struct v4l2_m2m_dev *m2m_dev, 535 struct video_device *vdev, int function) 536 { 537 return 0; 538 } 539 #endif 540 541 /** 542 * v4l2_m2m_release() - cleans up and frees a m2m_dev structure 543 * 544 * @m2m_dev: opaque pointer to the internal data to handle M2M context 545 * 546 * Usually called from driver's ``remove()`` function. 547 */ 548 void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev); 549 550 /** 551 * v4l2_m2m_get() - take a reference to the m2m_dev structure 552 * 553 * @m2m_dev: opaque pointer to the internal data to handle M2M context 554 * 555 * This is used to share the M2M device across multiple devices. This 556 * can be used to avoid scheduling two hardware nodes concurrently. 557 */ 558 void v4l2_m2m_get(struct v4l2_m2m_dev *m2m_dev); 559 560 /** 561 * v4l2_m2m_put() - remove a reference to the m2m_dev structure 562 * 563 * @m2m_dev: opaque pointer to the internal data to handle M2M context 564 * 565 * Once the M2M device has no more references, v4l2_m2m_release() will be 566 * called automatically. Users of this method should never call 567 * v4l2_m2m_release() directly. See v4l2_m2m_get() for more details. 568 */ 569 void v4l2_m2m_put(struct v4l2_m2m_dev *m2m_dev); 570 571 /** 572 * v4l2_m2m_ctx_init() - allocate and initialize a m2m context 573 * 574 * @m2m_dev: opaque pointer to the internal data to handle M2M context 575 * @drv_priv: driver's instance private data 576 * @queue_init: a callback for queue type-specific initialization function 577 * to be used for initializing vb2_queues 578 * 579 * Usually called from driver's ``open()`` function. 580 */ 581 struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev, 582 void *drv_priv, 583 int (*queue_init)(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq)); 584 585 static inline void v4l2_m2m_set_src_buffered(struct v4l2_m2m_ctx *m2m_ctx, 586 bool buffered) 587 { 588 m2m_ctx->out_q_ctx.buffered = buffered; 589 } 590 591 static inline void v4l2_m2m_set_dst_buffered(struct v4l2_m2m_ctx *m2m_ctx, 592 bool buffered) 593 { 594 m2m_ctx->cap_q_ctx.buffered = buffered; 595 } 596 597 /** 598 * v4l2_m2m_ctx_release() - release m2m context 599 * 600 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx 601 * 602 * Usually called from driver's release() function. 603 */ 604 void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx); 605 606 /** 607 * v4l2_m2m_buf_queue() - add a buffer to the proper ready buffers list. 608 * 609 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx 610 * @vbuf: pointer to struct &vb2_v4l2_buffer 611 * 612 * Call from vb2_queue_ops->ops->buf_queue, vb2_queue_ops callback. 613 */ 614 void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, 615 struct vb2_v4l2_buffer *vbuf); 616 617 /** 618 * v4l2_m2m_num_src_bufs_ready() - return the number of source buffers ready for 619 * use 620 * 621 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx 622 */ 623 static inline 624 unsigned int v4l2_m2m_num_src_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx) 625 { 626 unsigned int num_buf_rdy; 627 unsigned long flags; 628 629 spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags); 630 num_buf_rdy = m2m_ctx->out_q_ctx.num_rdy; 631 spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags); 632 633 return num_buf_rdy; 634 } 635 636 /** 637 * v4l2_m2m_num_dst_bufs_ready() - return the number of destination buffers 638 * ready for use 639 * 640 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx 641 */ 642 static inline 643 unsigned int v4l2_m2m_num_dst_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx) 644 { 645 unsigned int num_buf_rdy; 646 unsigned long flags; 647 648 spin_lock_irqsave(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags); 649 num_buf_rdy = m2m_ctx->cap_q_ctx.num_rdy; 650 spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags); 651 652 return num_buf_rdy; 653 } 654 655 /** 656 * v4l2_m2m_next_buf() - return next buffer from the list of ready buffers 657 * 658 * @q_ctx: pointer to struct @v4l2_m2m_queue_ctx 659 */ 660 struct vb2_v4l2_buffer *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx); 661 662 /** 663 * v4l2_m2m_next_src_buf() - return next source buffer from the list of ready 664 * buffers 665 * 666 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx 667 */ 668 static inline struct vb2_v4l2_buffer * 669 v4l2_m2m_next_src_buf(struct v4l2_m2m_ctx *m2m_ctx) 670 { 671 return v4l2_m2m_next_buf(&m2m_ctx->out_q_ctx); 672 } 673 674 /** 675 * v4l2_m2m_next_dst_buf() - return next destination buffer from the list of 676 * ready buffers 677 * 678 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx 679 */ 680 static inline struct vb2_v4l2_buffer * 681 v4l2_m2m_next_dst_buf(struct v4l2_m2m_ctx *m2m_ctx) 682 { 683 return v4l2_m2m_next_buf(&m2m_ctx->cap_q_ctx); 684 } 685 686 /** 687 * v4l2_m2m_last_buf() - return last buffer from the list of ready buffers 688 * 689 * @q_ctx: pointer to struct @v4l2_m2m_queue_ctx 690 */ 691 struct vb2_v4l2_buffer *v4l2_m2m_last_buf(struct v4l2_m2m_queue_ctx *q_ctx); 692 693 /** 694 * v4l2_m2m_last_src_buf() - return last source buffer from the list of 695 * ready buffers 696 * 697 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx 698 */ 699 static inline struct vb2_v4l2_buffer * 700 v4l2_m2m_last_src_buf(struct v4l2_m2m_ctx *m2m_ctx) 701 { 702 return v4l2_m2m_last_buf(&m2m_ctx->out_q_ctx); 703 } 704 705 /** 706 * v4l2_m2m_last_dst_buf() - return last destination buffer from the list of 707 * ready buffers 708 * 709 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx 710 */ 711 static inline struct vb2_v4l2_buffer * 712 v4l2_m2m_last_dst_buf(struct v4l2_m2m_ctx *m2m_ctx) 713 { 714 return v4l2_m2m_last_buf(&m2m_ctx->cap_q_ctx); 715 } 716 717 /** 718 * v4l2_m2m_for_each_dst_buf() - iterate over a list of destination ready 719 * buffers 720 * 721 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx 722 * @b: current buffer of type struct v4l2_m2m_buffer 723 */ 724 #define v4l2_m2m_for_each_dst_buf(m2m_ctx, b) \ 725 list_for_each_entry(b, &m2m_ctx->cap_q_ctx.rdy_queue, list) 726 727 /** 728 * v4l2_m2m_for_each_src_buf() - iterate over a list of source ready buffers 729 * 730 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx 731 * @b: current buffer of type struct v4l2_m2m_buffer 732 */ 733 #define v4l2_m2m_for_each_src_buf(m2m_ctx, b) \ 734 list_for_each_entry(b, &m2m_ctx->out_q_ctx.rdy_queue, list) 735 736 /** 737 * v4l2_m2m_for_each_dst_buf_safe() - iterate over a list of destination ready 738 * buffers safely 739 * 740 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx 741 * @b: current buffer of type struct v4l2_m2m_buffer 742 * @n: used as temporary storage 743 */ 744 #define v4l2_m2m_for_each_dst_buf_safe(m2m_ctx, b, n) \ 745 list_for_each_entry_safe(b, n, &m2m_ctx->cap_q_ctx.rdy_queue, list) 746 747 /** 748 * v4l2_m2m_for_each_src_buf_safe() - iterate over a list of source ready 749 * buffers safely 750 * 751 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx 752 * @b: current buffer of type struct v4l2_m2m_buffer 753 * @n: used as temporary storage 754 */ 755 #define v4l2_m2m_for_each_src_buf_safe(m2m_ctx, b, n) \ 756 list_for_each_entry_safe(b, n, &m2m_ctx->out_q_ctx.rdy_queue, list) 757 758 /** 759 * v4l2_m2m_get_src_vq() - return vb2_queue for source buffers 760 * 761 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx 762 */ 763 static inline 764 struct vb2_queue *v4l2_m2m_get_src_vq(struct v4l2_m2m_ctx *m2m_ctx) 765 { 766 return &m2m_ctx->out_q_ctx.q; 767 } 768 769 /** 770 * v4l2_m2m_get_dst_vq() - return vb2_queue for destination buffers 771 * 772 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx 773 */ 774 static inline 775 struct vb2_queue *v4l2_m2m_get_dst_vq(struct v4l2_m2m_ctx *m2m_ctx) 776 { 777 return &m2m_ctx->cap_q_ctx.q; 778 } 779 780 /** 781 * v4l2_m2m_buf_remove() - take off a buffer from the list of ready buffers and 782 * return it 783 * 784 * @q_ctx: pointer to struct @v4l2_m2m_queue_ctx 785 */ 786 struct vb2_v4l2_buffer *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx); 787 788 /** 789 * v4l2_m2m_src_buf_remove() - take off a source buffer from the list of ready 790 * buffers and return it 791 * 792 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx 793 */ 794 static inline struct vb2_v4l2_buffer * 795 v4l2_m2m_src_buf_remove(struct v4l2_m2m_ctx *m2m_ctx) 796 { 797 return v4l2_m2m_buf_remove(&m2m_ctx->out_q_ctx); 798 } 799 800 /** 801 * v4l2_m2m_dst_buf_remove() - take off a destination buffer from the list of 802 * ready buffers and return it 803 * 804 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx 805 */ 806 static inline struct vb2_v4l2_buffer * 807 v4l2_m2m_dst_buf_remove(struct v4l2_m2m_ctx *m2m_ctx) 808 { 809 return v4l2_m2m_buf_remove(&m2m_ctx->cap_q_ctx); 810 } 811 812 /** 813 * v4l2_m2m_buf_remove_by_buf() - take off exact buffer from the list of ready 814 * buffers 815 * 816 * @q_ctx: pointer to struct @v4l2_m2m_queue_ctx 817 * @vbuf: the buffer to be removed 818 */ 819 void v4l2_m2m_buf_remove_by_buf(struct v4l2_m2m_queue_ctx *q_ctx, 820 struct vb2_v4l2_buffer *vbuf); 821 822 /** 823 * v4l2_m2m_src_buf_remove_by_buf() - take off exact source buffer from the list 824 * of ready buffers 825 * 826 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx 827 * @vbuf: the buffer to be removed 828 */ 829 static inline void v4l2_m2m_src_buf_remove_by_buf(struct v4l2_m2m_ctx *m2m_ctx, 830 struct vb2_v4l2_buffer *vbuf) 831 { 832 v4l2_m2m_buf_remove_by_buf(&m2m_ctx->out_q_ctx, vbuf); 833 } 834 835 /** 836 * v4l2_m2m_dst_buf_remove_by_buf() - take off exact destination buffer from the 837 * list of ready buffers 838 * 839 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx 840 * @vbuf: the buffer to be removed 841 */ 842 static inline void v4l2_m2m_dst_buf_remove_by_buf(struct v4l2_m2m_ctx *m2m_ctx, 843 struct vb2_v4l2_buffer *vbuf) 844 { 845 v4l2_m2m_buf_remove_by_buf(&m2m_ctx->cap_q_ctx, vbuf); 846 } 847 848 struct vb2_v4l2_buffer * 849 v4l2_m2m_buf_remove_by_idx(struct v4l2_m2m_queue_ctx *q_ctx, unsigned int idx); 850 851 static inline struct vb2_v4l2_buffer * 852 v4l2_m2m_src_buf_remove_by_idx(struct v4l2_m2m_ctx *m2m_ctx, unsigned int idx) 853 { 854 return v4l2_m2m_buf_remove_by_idx(&m2m_ctx->out_q_ctx, idx); 855 } 856 857 static inline struct vb2_v4l2_buffer * 858 v4l2_m2m_dst_buf_remove_by_idx(struct v4l2_m2m_ctx *m2m_ctx, unsigned int idx) 859 { 860 return v4l2_m2m_buf_remove_by_idx(&m2m_ctx->cap_q_ctx, idx); 861 } 862 863 /** 864 * v4l2_m2m_buf_copy_metadata() - copy buffer metadata from 865 * the output buffer to the capture buffer 866 * 867 * @out_vb: the output buffer that is the source of the metadata. 868 * @cap_vb: the capture buffer that will receive the metadata. 869 * 870 * This helper function copies the timestamp, timecode (if the TIMECODE 871 * buffer flag was set), field, and the TIMECODE and TSTAMP_SRC_MASK flags from 872 * @out_vb to @cap_vb. 873 */ 874 void v4l2_m2m_buf_copy_metadata(const struct vb2_v4l2_buffer *out_vb, 875 struct vb2_v4l2_buffer *cap_vb); 876 877 /* v4l2 request helper */ 878 879 void v4l2_m2m_request_queue(struct media_request *req); 880 881 /* v4l2 ioctl helpers */ 882 883 int v4l2_m2m_ioctl_reqbufs(struct file *file, void *priv, 884 struct v4l2_requestbuffers *rb); 885 int v4l2_m2m_ioctl_create_bufs(struct file *file, void *priv, 886 struct v4l2_create_buffers *create); 887 int v4l2_m2m_ioctl_remove_bufs(struct file *file, void *priv, 888 struct v4l2_remove_buffers *d); 889 int v4l2_m2m_ioctl_querybuf(struct file *file, void *priv, 890 struct v4l2_buffer *buf); 891 int v4l2_m2m_ioctl_expbuf(struct file *file, void *priv, 892 struct v4l2_exportbuffer *eb); 893 int v4l2_m2m_ioctl_qbuf(struct file *file, void *priv, 894 struct v4l2_buffer *buf); 895 int v4l2_m2m_ioctl_dqbuf(struct file *file, void *priv, 896 struct v4l2_buffer *buf); 897 int v4l2_m2m_ioctl_prepare_buf(struct file *file, void *priv, 898 struct v4l2_buffer *buf); 899 int v4l2_m2m_ioctl_streamon(struct file *file, void *priv, 900 enum v4l2_buf_type type); 901 int v4l2_m2m_ioctl_streamoff(struct file *file, void *priv, 902 enum v4l2_buf_type type); 903 int v4l2_m2m_ioctl_encoder_cmd(struct file *file, void *priv, 904 struct v4l2_encoder_cmd *ec); 905 int v4l2_m2m_ioctl_decoder_cmd(struct file *file, void *priv, 906 struct v4l2_decoder_cmd *dc); 907 int v4l2_m2m_ioctl_try_encoder_cmd(struct file *file, void *priv, 908 struct v4l2_encoder_cmd *ec); 909 int v4l2_m2m_ioctl_try_decoder_cmd(struct file *file, void *priv, 910 struct v4l2_decoder_cmd *dc); 911 int v4l2_m2m_ioctl_stateless_try_decoder_cmd(struct file *file, void *priv, 912 struct v4l2_decoder_cmd *dc); 913 int v4l2_m2m_ioctl_stateless_decoder_cmd(struct file *file, void *priv, 914 struct v4l2_decoder_cmd *dc); 915 int v4l2_m2m_fop_mmap(struct file *file, struct vm_area_struct *vma); 916 __poll_t v4l2_m2m_fop_poll(struct file *file, poll_table *wait); 917 918 #endif /* _MEDIA_V4L2_MEM2MEM_H */ 919 920