1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. 4 */ 5 6 #include <media/v4l2-event.h> 7 #include <media/v4l2-mem2mem.h> 8 9 #include "iris_buffer.h" 10 #include "iris_instance.h" 11 #include "iris_power.h" 12 #include "iris_vpu_buffer.h" 13 14 #define PIXELS_4K 4096 15 #define MAX_WIDTH 4096 16 #define MAX_HEIGHT 2304 17 #define Y_STRIDE_ALIGN 128 18 #define UV_STRIDE_ALIGN 128 19 #define Y_SCANLINE_ALIGN 32 20 #define UV_SCANLINE_ALIGN 16 21 #define UV_SCANLINE_ALIGN_QC08C 32 22 #define META_STRIDE_ALIGNED 64 23 #define META_SCANLINE_ALIGNED 16 24 #define NUM_MBS_4K (DIV_ROUND_UP(MAX_WIDTH, 16) * DIV_ROUND_UP(MAX_HEIGHT, 16)) 25 26 /* 27 * NV12: 28 * YUV 4:2:0 image with a plane of 8 bit Y samples followed 29 * by an interleaved U/V plane containing 8 bit 2x2 subsampled 30 * colour difference samples. 31 * 32 * <-Y/UV_Stride (aligned to 128)-> 33 * <------- Width -------> 34 * Y Y Y Y Y Y Y Y Y Y Y Y . . . . ^ ^ 35 * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | 36 * Y Y Y Y Y Y Y Y Y Y Y Y . . . . Height | 37 * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | y_scanlines (aligned to 32) 38 * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | 39 * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | 40 * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | 41 * Y Y Y Y Y Y Y Y Y Y Y Y . . . . V | 42 * . . . . . . . . . . . . . . . . | 43 * . . . . . . . . . . . . . . . . | 44 * . . . . . . . . . . . . . . . . | 45 * . . . . . . . . . . . . . . . . V 46 * U V U V U V U V U V U V . . . . ^ 47 * U V U V U V U V U V U V . . . . | 48 * U V U V U V U V U V U V . . . . | 49 * U V U V U V U V U V U V . . . . uv_scanlines (aligned to 16) 50 * . . . . . . . . . . . . . . . . | 51 * . . . . . . . . . . . . . . . . V 52 * . . . . . . . . . . . . . . . . --> Buffer size aligned to 4K 53 * 54 * y_stride : Width aligned to 128 55 * uv_stride : Width aligned to 128 56 * y_scanlines: Height aligned to 32 57 * uv_scanlines: Height/2 aligned to 16 58 * Total size = align((y_stride * y_scanlines 59 * + uv_stride * uv_scanlines , 4096) 60 * 61 * Note: All the alignments are hardware requirements. 62 */ 63 static u32 iris_yuv_buffer_size_nv12(struct iris_inst *inst) 64 { 65 u32 y_plane, uv_plane, y_stride, uv_stride, y_scanlines, uv_scanlines; 66 struct v4l2_format *f = inst->fmt_dst; 67 68 y_stride = ALIGN(f->fmt.pix_mp.width, Y_STRIDE_ALIGN); 69 uv_stride = ALIGN(f->fmt.pix_mp.width, UV_STRIDE_ALIGN); 70 y_scanlines = ALIGN(f->fmt.pix_mp.height, Y_SCANLINE_ALIGN); 71 uv_scanlines = ALIGN((f->fmt.pix_mp.height + 1) >> 1, UV_SCANLINE_ALIGN); 72 y_plane = y_stride * y_scanlines; 73 uv_plane = uv_stride * uv_scanlines; 74 75 return ALIGN(y_plane + uv_plane, PIXELS_4K); 76 } 77 78 /* 79 * QC08C: 80 * Compressed Macro-tile format for NV12. 81 * Contains 4 planes in the following order - 82 * (A) Y_Meta_Plane 83 * (B) Y_UBWC_Plane 84 * (C) UV_Meta_Plane 85 * (D) UV_UBWC_Plane 86 * 87 * Y_Meta_Plane consists of meta information to decode compressed 88 * tile data in Y_UBWC_Plane. 89 * Y_UBWC_Plane consists of Y data in compressed macro-tile format. 90 * UBWC decoder block will use the Y_Meta_Plane data together with 91 * Y_UBWC_Plane data to produce loss-less uncompressed 8 bit Y samples. 92 * 93 * UV_Meta_Plane consists of meta information to decode compressed 94 * tile data in UV_UBWC_Plane. 95 * UV_UBWC_Plane consists of UV data in compressed macro-tile format. 96 * UBWC decoder block will use UV_Meta_Plane data together with 97 * UV_UBWC_Plane data to produce loss-less uncompressed 8 bit 2x2 98 * subsampled color difference samples. 99 * 100 * Each tile in Y_UBWC_Plane/UV_UBWC_Plane is independently decodable 101 * and randomly accessible. There is no dependency between tiles. 102 * 103 * <----- y_meta_stride ----> (aligned to 64) 104 * <-------- Width ------> 105 * M M M M M M M M M M M M . . ^ ^ 106 * M M M M M M M M M M M M . . | | 107 * M M M M M M M M M M M M . . Height | 108 * M M M M M M M M M M M M . . | y_meta_scanlines (aligned to 16) 109 * M M M M M M M M M M M M . . | | 110 * M M M M M M M M M M M M . . | | 111 * M M M M M M M M M M M M . . | | 112 * M M M M M M M M M M M M . . V | 113 * . . . . . . . . . . . . . . | 114 * . . . . . . . . . . . . . . | 115 * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k 116 * . . . . . . . . . . . . . . V 117 * <--Compressed tile y_stride---> (aligned to 128) 118 * <------- Width -------> 119 * Y* Y* Y* Y* Y* Y* Y* Y* . . . . ^ ^ 120 * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | | 121 * Y* Y* Y* Y* Y* Y* Y* Y* . . . . Height | 122 * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | Macro_tile y_scanlines (aligned to 32) 123 * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | | 124 * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | | 125 * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | | 126 * Y* Y* Y* Y* Y* Y* Y* Y* . . . . V | 127 * . . . . . . . . . . . . . . . . | 128 * . . . . . . . . . . . . . . . . | 129 * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k 130 * . . . . . . . . . . . . . . . . V 131 * <----- uv_meta_stride ----> (aligned to 64) 132 * M M M M M M M M M M M M . . ^ 133 * M M M M M M M M M M M M . . | 134 * M M M M M M M M M M M M . . | 135 * M M M M M M M M M M M M . . uv_meta_scanlines (aligned to 16) 136 * . . . . . . . . . . . . . . | 137 * . . . . . . . . . . . . . . V 138 * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k 139 * <--Compressed tile uv_stride---> (aligned to 128) 140 * U* V* U* V* U* V* U* V* . . . . ^ 141 * U* V* U* V* U* V* U* V* . . . . | 142 * U* V* U* V* U* V* U* V* . . . . | 143 * U* V* U* V* U* V* U* V* . . . . uv_scanlines (aligned to 32) 144 * . . . . . . . . . . . . . . . . | 145 * . . . . . . . . . . . . . . . . V 146 * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k 147 * 148 * y_stride: width aligned to 128 149 * uv_stride: width aligned to 128 150 * y_scanlines: height aligned to 32 151 * uv_scanlines: height aligned to 32 152 * y_plane: buffer size aligned to 4096 153 * uv_plane: buffer size aligned to 4096 154 * y_meta_stride: width aligned to 64 155 * y_meta_scanlines: height aligned to 16 156 * y_meta_plane: buffer size aligned to 4096 157 * uv_meta_stride: width aligned to 64 158 * uv_meta_scanlines: height aligned to 16 159 * uv_meta_plane: buffer size aligned to 4096 160 * 161 * Total size = align( y_plane + uv_plane + 162 * y_meta_plane + uv_meta_plane, 4096) 163 * 164 * Note: All the alignments are hardware requirements. 165 */ 166 static u32 iris_yuv_buffer_size_qc08c(struct iris_inst *inst) 167 { 168 u32 y_plane, uv_plane, y_stride, uv_stride; 169 struct v4l2_format *f = inst->fmt_dst; 170 u32 uv_meta_stride, uv_meta_plane; 171 u32 y_meta_stride, y_meta_plane; 172 173 y_meta_stride = ALIGN(DIV_ROUND_UP(f->fmt.pix_mp.width, META_STRIDE_ALIGNED >> 1), 174 META_STRIDE_ALIGNED); 175 y_meta_plane = y_meta_stride * ALIGN(DIV_ROUND_UP(f->fmt.pix_mp.height, 176 META_SCANLINE_ALIGNED >> 1), 177 META_SCANLINE_ALIGNED); 178 y_meta_plane = ALIGN(y_meta_plane, PIXELS_4K); 179 180 y_stride = ALIGN(f->fmt.pix_mp.width, Y_STRIDE_ALIGN); 181 y_plane = ALIGN(y_stride * ALIGN(f->fmt.pix_mp.height, Y_SCANLINE_ALIGN), PIXELS_4K); 182 183 uv_meta_stride = ALIGN(DIV_ROUND_UP(f->fmt.pix_mp.width / 2, META_STRIDE_ALIGNED >> 2), 184 META_STRIDE_ALIGNED); 185 uv_meta_plane = uv_meta_stride * ALIGN(DIV_ROUND_UP(f->fmt.pix_mp.height / 2, 186 META_SCANLINE_ALIGNED >> 1), 187 META_SCANLINE_ALIGNED); 188 uv_meta_plane = ALIGN(uv_meta_plane, PIXELS_4K); 189 190 uv_stride = ALIGN(f->fmt.pix_mp.width, UV_STRIDE_ALIGN); 191 uv_plane = ALIGN(uv_stride * ALIGN(f->fmt.pix_mp.height / 2, UV_SCANLINE_ALIGN_QC08C), 192 PIXELS_4K); 193 194 return ALIGN(y_meta_plane + y_plane + uv_meta_plane + uv_plane, PIXELS_4K); 195 } 196 197 static u32 iris_bitstream_buffer_size(struct iris_inst *inst) 198 { 199 struct platform_inst_caps *caps = inst->core->iris_platform_data->inst_caps; 200 u32 base_res_mbs = NUM_MBS_4K; 201 u32 frame_size, num_mbs; 202 u32 div_factor = 2; 203 204 num_mbs = iris_get_mbpf(inst); 205 if (num_mbs > NUM_MBS_4K) { 206 div_factor = 4; 207 base_res_mbs = caps->max_mbpf; 208 } else { 209 if (inst->codec == V4L2_PIX_FMT_VP9) 210 div_factor = 1; 211 } 212 213 /* 214 * frame_size = YUVsize / div_factor 215 * where YUVsize = resolution_in_MBs * MBs_in_pixel * 3 / 2 216 */ 217 frame_size = base_res_mbs * (16 * 16) * 3 / 2 / div_factor; 218 219 return ALIGN(frame_size, PIXELS_4K); 220 } 221 222 int iris_get_buffer_size(struct iris_inst *inst, 223 enum iris_buffer_type buffer_type) 224 { 225 switch (buffer_type) { 226 case BUF_INPUT: 227 return iris_bitstream_buffer_size(inst); 228 case BUF_OUTPUT: 229 return iris_yuv_buffer_size_nv12(inst); 230 case BUF_DPB: 231 return iris_yuv_buffer_size_qc08c(inst); 232 default: 233 return 0; 234 } 235 } 236 237 static void iris_fill_internal_buf_info(struct iris_inst *inst, 238 enum iris_buffer_type buffer_type) 239 { 240 struct iris_buffers *buffers = &inst->buffers[buffer_type]; 241 242 buffers->size = iris_vpu_buf_size(inst, buffer_type); 243 buffers->min_count = iris_vpu_buf_count(inst, buffer_type); 244 } 245 246 void iris_get_internal_buffers(struct iris_inst *inst, u32 plane) 247 { 248 const struct iris_platform_data *platform_data = inst->core->iris_platform_data; 249 const u32 *internal_buf_type; 250 u32 internal_buffer_count, i; 251 252 if (V4L2_TYPE_IS_OUTPUT(plane)) { 253 internal_buf_type = platform_data->dec_ip_int_buf_tbl; 254 internal_buffer_count = platform_data->dec_ip_int_buf_tbl_size; 255 for (i = 0; i < internal_buffer_count; i++) 256 iris_fill_internal_buf_info(inst, internal_buf_type[i]); 257 } else { 258 internal_buf_type = platform_data->dec_op_int_buf_tbl; 259 internal_buffer_count = platform_data->dec_op_int_buf_tbl_size; 260 for (i = 0; i < internal_buffer_count; i++) 261 iris_fill_internal_buf_info(inst, internal_buf_type[i]); 262 } 263 } 264 265 static int iris_create_internal_buffer(struct iris_inst *inst, 266 enum iris_buffer_type buffer_type, u32 index) 267 { 268 struct iris_buffers *buffers = &inst->buffers[buffer_type]; 269 struct iris_core *core = inst->core; 270 struct iris_buffer *buffer; 271 272 if (!buffers->size) 273 return 0; 274 275 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); 276 if (!buffer) 277 return -ENOMEM; 278 279 INIT_LIST_HEAD(&buffer->list); 280 buffer->type = buffer_type; 281 buffer->index = index; 282 buffer->buffer_size = buffers->size; 283 buffer->dma_attrs = DMA_ATTR_WRITE_COMBINE | DMA_ATTR_NO_KERNEL_MAPPING; 284 list_add_tail(&buffer->list, &buffers->list); 285 286 buffer->kvaddr = dma_alloc_attrs(core->dev, buffer->buffer_size, 287 &buffer->device_addr, GFP_KERNEL, buffer->dma_attrs); 288 if (!buffer->kvaddr) 289 return -ENOMEM; 290 291 return 0; 292 } 293 294 int iris_create_internal_buffers(struct iris_inst *inst, u32 plane) 295 { 296 const struct iris_platform_data *platform_data = inst->core->iris_platform_data; 297 u32 internal_buffer_count, i, j; 298 struct iris_buffers *buffers; 299 const u32 *internal_buf_type; 300 int ret; 301 302 if (V4L2_TYPE_IS_OUTPUT(plane)) { 303 internal_buf_type = platform_data->dec_ip_int_buf_tbl; 304 internal_buffer_count = platform_data->dec_ip_int_buf_tbl_size; 305 } else { 306 internal_buf_type = platform_data->dec_op_int_buf_tbl; 307 internal_buffer_count = platform_data->dec_op_int_buf_tbl_size; 308 } 309 310 for (i = 0; i < internal_buffer_count; i++) { 311 buffers = &inst->buffers[internal_buf_type[i]]; 312 for (j = 0; j < buffers->min_count; j++) { 313 ret = iris_create_internal_buffer(inst, internal_buf_type[i], j); 314 if (ret) 315 return ret; 316 } 317 } 318 319 return 0; 320 } 321 322 int iris_queue_buffer(struct iris_inst *inst, struct iris_buffer *buf) 323 { 324 const struct iris_hfi_command_ops *hfi_ops = inst->core->hfi_ops; 325 int ret; 326 327 ret = hfi_ops->session_queue_buf(inst, buf); 328 if (ret) 329 return ret; 330 331 buf->attr &= ~BUF_ATTR_DEFERRED; 332 buf->attr |= BUF_ATTR_QUEUED; 333 334 return 0; 335 } 336 337 int iris_queue_internal_buffers(struct iris_inst *inst, u32 plane) 338 { 339 const struct iris_platform_data *platform_data = inst->core->iris_platform_data; 340 struct iris_buffer *buffer, *next; 341 struct iris_buffers *buffers; 342 const u32 *internal_buf_type; 343 u32 internal_buffer_count, i; 344 int ret; 345 346 if (V4L2_TYPE_IS_OUTPUT(plane)) { 347 internal_buf_type = platform_data->dec_ip_int_buf_tbl; 348 internal_buffer_count = platform_data->dec_ip_int_buf_tbl_size; 349 } else { 350 internal_buf_type = platform_data->dec_op_int_buf_tbl; 351 internal_buffer_count = platform_data->dec_op_int_buf_tbl_size; 352 } 353 354 for (i = 0; i < internal_buffer_count; i++) { 355 buffers = &inst->buffers[internal_buf_type[i]]; 356 list_for_each_entry_safe(buffer, next, &buffers->list, list) { 357 if (buffer->attr & BUF_ATTR_PENDING_RELEASE) 358 continue; 359 if (buffer->attr & BUF_ATTR_QUEUED) 360 continue; 361 ret = iris_queue_buffer(inst, buffer); 362 if (ret) 363 return ret; 364 } 365 } 366 367 return 0; 368 } 369 370 int iris_destroy_internal_buffer(struct iris_inst *inst, struct iris_buffer *buffer) 371 { 372 struct iris_core *core = inst->core; 373 374 list_del(&buffer->list); 375 dma_free_attrs(core->dev, buffer->buffer_size, buffer->kvaddr, 376 buffer->device_addr, buffer->dma_attrs); 377 kfree(buffer); 378 379 return 0; 380 } 381 382 static int iris_destroy_internal_buffers(struct iris_inst *inst, u32 plane, bool force) 383 { 384 const struct iris_platform_data *platform_data = inst->core->iris_platform_data; 385 struct iris_buffer *buf, *next; 386 struct iris_buffers *buffers; 387 const u32 *internal_buf_type; 388 u32 i, len; 389 int ret; 390 391 if (V4L2_TYPE_IS_OUTPUT(plane)) { 392 internal_buf_type = platform_data->dec_ip_int_buf_tbl; 393 len = platform_data->dec_ip_int_buf_tbl_size; 394 } else { 395 internal_buf_type = platform_data->dec_op_int_buf_tbl; 396 len = platform_data->dec_op_int_buf_tbl_size; 397 } 398 399 for (i = 0; i < len; i++) { 400 buffers = &inst->buffers[internal_buf_type[i]]; 401 list_for_each_entry_safe(buf, next, &buffers->list, list) { 402 /* 403 * during stream on, skip destroying internal(DPB) buffer 404 * if firmware did not return it. 405 * during close, destroy all buffers irrespectively. 406 */ 407 if (!force && buf->attr & BUF_ATTR_QUEUED) 408 continue; 409 410 ret = iris_destroy_internal_buffer(inst, buf); 411 if (ret) 412 return ret; 413 } 414 } 415 416 return 0; 417 } 418 419 int iris_destroy_all_internal_buffers(struct iris_inst *inst, u32 plane) 420 { 421 return iris_destroy_internal_buffers(inst, plane, true); 422 } 423 424 int iris_destroy_dequeued_internal_buffers(struct iris_inst *inst, u32 plane) 425 { 426 return iris_destroy_internal_buffers(inst, plane, false); 427 } 428 429 static int iris_release_internal_buffers(struct iris_inst *inst, 430 enum iris_buffer_type buffer_type) 431 { 432 const struct iris_hfi_command_ops *hfi_ops = inst->core->hfi_ops; 433 struct iris_buffers *buffers = &inst->buffers[buffer_type]; 434 struct iris_buffer *buffer, *next; 435 int ret; 436 437 list_for_each_entry_safe(buffer, next, &buffers->list, list) { 438 if (buffer->attr & BUF_ATTR_PENDING_RELEASE) 439 continue; 440 if (!(buffer->attr & BUF_ATTR_QUEUED)) 441 continue; 442 ret = hfi_ops->session_release_buf(inst, buffer); 443 if (ret) 444 return ret; 445 buffer->attr |= BUF_ATTR_PENDING_RELEASE; 446 } 447 448 return 0; 449 } 450 451 static int iris_release_input_internal_buffers(struct iris_inst *inst) 452 { 453 const struct iris_platform_data *platform_data = inst->core->iris_platform_data; 454 const u32 *internal_buf_type; 455 u32 internal_buffer_count, i; 456 int ret; 457 458 internal_buf_type = platform_data->dec_ip_int_buf_tbl; 459 internal_buffer_count = platform_data->dec_ip_int_buf_tbl_size; 460 461 for (i = 0; i < internal_buffer_count; i++) { 462 ret = iris_release_internal_buffers(inst, internal_buf_type[i]); 463 if (ret) 464 return ret; 465 } 466 467 return 0; 468 } 469 470 int iris_alloc_and_queue_persist_bufs(struct iris_inst *inst) 471 { 472 struct iris_buffers *buffers = &inst->buffers[BUF_PERSIST]; 473 struct iris_buffer *buffer, *next; 474 int ret; 475 u32 i; 476 477 if (!list_empty(&buffers->list)) 478 return 0; 479 480 iris_fill_internal_buf_info(inst, BUF_PERSIST); 481 482 for (i = 0; i < buffers->min_count; i++) { 483 ret = iris_create_internal_buffer(inst, BUF_PERSIST, i); 484 if (ret) 485 return ret; 486 } 487 488 list_for_each_entry_safe(buffer, next, &buffers->list, list) { 489 if (buffer->attr & BUF_ATTR_PENDING_RELEASE) 490 continue; 491 if (buffer->attr & BUF_ATTR_QUEUED) 492 continue; 493 ret = iris_queue_buffer(inst, buffer); 494 if (ret) 495 return ret; 496 } 497 498 return 0; 499 } 500 501 int iris_alloc_and_queue_input_int_bufs(struct iris_inst *inst) 502 { 503 int ret; 504 505 iris_get_internal_buffers(inst, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE); 506 507 ret = iris_release_input_internal_buffers(inst); 508 if (ret) 509 return ret; 510 511 ret = iris_create_internal_buffers(inst, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE); 512 if (ret) 513 return ret; 514 515 return iris_queue_internal_buffers(inst, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE); 516 } 517 518 int iris_queue_deferred_buffers(struct iris_inst *inst, enum iris_buffer_type buf_type) 519 { 520 struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx; 521 struct v4l2_m2m_buffer *buffer, *n; 522 struct iris_buffer *buf; 523 int ret; 524 525 iris_scale_power(inst); 526 527 if (buf_type == BUF_INPUT) { 528 v4l2_m2m_for_each_src_buf_safe(m2m_ctx, buffer, n) { 529 buf = to_iris_buffer(&buffer->vb); 530 if (!(buf->attr & BUF_ATTR_DEFERRED)) 531 continue; 532 ret = iris_queue_buffer(inst, buf); 533 if (ret) 534 return ret; 535 } 536 } else { 537 v4l2_m2m_for_each_dst_buf_safe(m2m_ctx, buffer, n) { 538 buf = to_iris_buffer(&buffer->vb); 539 if (!(buf->attr & BUF_ATTR_DEFERRED)) 540 continue; 541 ret = iris_queue_buffer(inst, buf); 542 if (ret) 543 return ret; 544 } 545 } 546 547 return 0; 548 } 549 550 void iris_vb2_queue_error(struct iris_inst *inst) 551 { 552 struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx; 553 struct vb2_queue *q; 554 555 q = v4l2_m2m_get_src_vq(m2m_ctx); 556 vb2_queue_error(q); 557 q = v4l2_m2m_get_dst_vq(m2m_ctx); 558 vb2_queue_error(q); 559 } 560 561 static struct vb2_v4l2_buffer * 562 iris_helper_find_buf(struct iris_inst *inst, u32 type, u32 idx) 563 { 564 struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx; 565 566 if (V4L2_TYPE_IS_OUTPUT(type)) 567 return v4l2_m2m_src_buf_remove_by_idx(m2m_ctx, idx); 568 else 569 return v4l2_m2m_dst_buf_remove_by_idx(m2m_ctx, idx); 570 } 571 572 static void iris_get_ts_metadata(struct iris_inst *inst, u64 timestamp_ns, 573 struct vb2_v4l2_buffer *vbuf) 574 { 575 u32 mask = V4L2_BUF_FLAG_TIMECODE | V4L2_BUF_FLAG_TSTAMP_SRC_MASK; 576 u32 i; 577 578 for (i = 0; i < ARRAY_SIZE(inst->tss); ++i) { 579 if (inst->tss[i].ts_ns != timestamp_ns) 580 continue; 581 582 vbuf->flags &= ~mask; 583 vbuf->flags |= inst->tss[i].flags; 584 vbuf->timecode = inst->tss[i].tc; 585 return; 586 } 587 588 vbuf->flags &= ~mask; 589 vbuf->flags |= inst->tss[inst->metadata_idx].flags; 590 vbuf->timecode = inst->tss[inst->metadata_idx].tc; 591 } 592 593 int iris_vb2_buffer_done(struct iris_inst *inst, struct iris_buffer *buf) 594 { 595 struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx; 596 struct vb2_v4l2_buffer *vbuf; 597 struct vb2_buffer *vb2; 598 u32 type, state; 599 600 switch (buf->type) { 601 case BUF_INPUT: 602 type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; 603 break; 604 case BUF_OUTPUT: 605 type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; 606 break; 607 default: 608 return 0; /* Internal DPB Buffers */ 609 } 610 611 vbuf = iris_helper_find_buf(inst, type, buf->index); 612 if (!vbuf) 613 return -EINVAL; 614 615 vb2 = &vbuf->vb2_buf; 616 617 if (buf->flags & V4L2_BUF_FLAG_ERROR) { 618 state = VB2_BUF_STATE_ERROR; 619 vb2_set_plane_payload(vb2, 0, 0); 620 vb2->timestamp = 0; 621 v4l2_m2m_buf_done(vbuf, state); 622 return 0; 623 } 624 625 vbuf->flags |= buf->flags; 626 627 if (V4L2_TYPE_IS_CAPTURE(type)) { 628 vb2_set_plane_payload(vb2, 0, buf->data_size); 629 vbuf->sequence = inst->sequence_cap++; 630 iris_get_ts_metadata(inst, buf->timestamp, vbuf); 631 } else { 632 vbuf->sequence = inst->sequence_out++; 633 } 634 635 if (vbuf->flags & V4L2_BUF_FLAG_LAST) { 636 if (!v4l2_m2m_has_stopped(m2m_ctx)) { 637 const struct v4l2_event ev = { .type = V4L2_EVENT_EOS }; 638 639 v4l2_event_queue_fh(&inst->fh, &ev); 640 v4l2_m2m_mark_stopped(m2m_ctx); 641 } 642 inst->last_buffer_dequeued = true; 643 } 644 645 state = VB2_BUF_STATE_DONE; 646 vb2->timestamp = buf->timestamp; 647 v4l2_m2m_buf_done(vbuf, state); 648 649 return 0; 650 } 651