1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright 2020-2021 NXP 4 */ 5 6 #include <linux/init.h> 7 #include <linux/interconnect.h> 8 #include <linux/ioctl.h> 9 #include <linux/list.h> 10 #include <linux/kernel.h> 11 #include <linux/module.h> 12 #include <linux/pm_runtime.h> 13 #include <linux/videodev2.h> 14 #include <media/v4l2-device.h> 15 #include <media/v4l2-event.h> 16 #include <media/v4l2-mem2mem.h> 17 #include <media/v4l2-ioctl.h> 18 #include <media/videobuf2-v4l2.h> 19 #include <media/videobuf2-dma-contig.h> 20 #include <media/videobuf2-vmalloc.h> 21 #include "vpu.h" 22 #include "vpu_core.h" 23 #include "vpu_v4l2.h" 24 #include "vpu_msgs.h" 25 #include "vpu_helpers.h" 26 27 void vpu_inst_lock(struct vpu_inst *inst) 28 { 29 mutex_lock(&inst->lock); 30 } 31 32 void vpu_inst_unlock(struct vpu_inst *inst) 33 { 34 mutex_unlock(&inst->lock); 35 } 36 37 dma_addr_t vpu_get_vb_phy_addr(struct vb2_buffer *vb, u32 plane_no) 38 { 39 if (plane_no >= vb->num_planes) 40 return 0; 41 return vb2_dma_contig_plane_dma_addr(vb, plane_no) + 42 vb->planes[plane_no].data_offset; 43 } 44 45 unsigned int vpu_get_vb_length(struct vb2_buffer *vb, u32 plane_no) 46 { 47 if (plane_no >= vb->num_planes) 48 return 0; 49 return vb2_plane_size(vb, plane_no) - vb->planes[plane_no].data_offset; 50 } 51 52 void vpu_set_buffer_state(struct vb2_v4l2_buffer *vbuf, unsigned int state) 53 { 54 struct vpu_vb2_buffer *vpu_buf = to_vpu_vb2_buffer(vbuf); 55 56 vpu_buf->state = state; 57 } 58 59 unsigned int vpu_get_buffer_state(struct vb2_v4l2_buffer *vbuf) 60 { 61 struct vpu_vb2_buffer *vpu_buf = to_vpu_vb2_buffer(vbuf); 62 63 return vpu_buf->state; 64 } 65 66 void vpu_set_buffer_average_qp(struct vb2_v4l2_buffer *vbuf, u32 qp) 67 { 68 struct vpu_vb2_buffer *vpu_buf = to_vpu_vb2_buffer(vbuf); 69 70 vpu_buf->average_qp = qp; 71 } 72 73 void vpu_v4l2_set_error(struct vpu_inst *inst) 74 { 75 vpu_inst_lock(inst); 76 dev_err(inst->dev, "some error occurs in codec\n"); 77 if (inst->fh.m2m_ctx) { 78 vb2_queue_error(v4l2_m2m_get_src_vq(inst->fh.m2m_ctx)); 79 vb2_queue_error(v4l2_m2m_get_dst_vq(inst->fh.m2m_ctx)); 80 } 81 vpu_inst_unlock(inst); 82 } 83 84 int vpu_notify_eos(struct vpu_inst *inst) 85 { 86 static const struct v4l2_event ev = { 87 .id = 0, 88 .type = V4L2_EVENT_EOS 89 }; 90 91 vpu_trace(inst->dev, "[%d]\n", inst->id); 92 v4l2_event_queue_fh(&inst->fh, &ev); 93 94 return 0; 95 } 96 97 int vpu_notify_source_change(struct vpu_inst *inst) 98 { 99 static const struct v4l2_event ev = { 100 .id = 0, 101 .type = V4L2_EVENT_SOURCE_CHANGE, 102 .u.src_change.changes = V4L2_EVENT_SRC_CH_RESOLUTION 103 }; 104 105 vpu_trace(inst->dev, "[%d]\n", inst->id); 106 v4l2_event_queue_fh(&inst->fh, &ev); 107 return 0; 108 } 109 110 int vpu_set_last_buffer_dequeued(struct vpu_inst *inst, bool eos) 111 { 112 struct vb2_queue *q; 113 114 if (!inst || !inst->fh.m2m_ctx) 115 return -EINVAL; 116 117 q = v4l2_m2m_get_dst_vq(inst->fh.m2m_ctx); 118 if (!list_empty(&q->done_list)) 119 return -EINVAL; 120 121 if (q->last_buffer_dequeued) 122 return 0; 123 vpu_trace(inst->dev, "last buffer dequeued\n"); 124 q->last_buffer_dequeued = true; 125 wake_up(&q->done_wq); 126 if (eos) 127 vpu_notify_eos(inst); 128 return 0; 129 } 130 131 bool vpu_is_source_empty(struct vpu_inst *inst) 132 { 133 struct v4l2_m2m_buffer *buf = NULL; 134 135 if (!inst->fh.m2m_ctx) 136 return true; 137 v4l2_m2m_for_each_src_buf(inst->fh.m2m_ctx, buf) { 138 if (vpu_get_buffer_state(&buf->vb) == VPU_BUF_STATE_IDLE) 139 return false; 140 } 141 return true; 142 } 143 144 static int vpu_init_format(struct vpu_inst *inst, struct vpu_format *fmt) 145 { 146 const struct vpu_format *info; 147 148 info = vpu_helper_find_format(inst, fmt->type, fmt->pixfmt); 149 if (!info) { 150 info = vpu_helper_enum_format(inst, fmt->type, 0); 151 if (!info) 152 return -EINVAL; 153 } 154 memcpy(fmt, info, sizeof(*fmt)); 155 156 return 0; 157 } 158 159 static int vpu_calc_fmt_bytesperline(struct v4l2_format *f, struct vpu_format *fmt) 160 { 161 struct v4l2_pix_format_mplane *pixmp = &f->fmt.pix_mp; 162 int i; 163 164 if (fmt->flags & V4L2_FMT_FLAG_COMPRESSED) { 165 for (i = 0; i < fmt->comp_planes; i++) 166 fmt->bytesperline[i] = 0; 167 return 0; 168 } 169 if (pixmp->num_planes == fmt->comp_planes) { 170 for (i = 0; i < fmt->comp_planes; i++) 171 fmt->bytesperline[i] = pixmp->plane_fmt[i].bytesperline; 172 return 0; 173 } 174 if (pixmp->num_planes > 1) 175 return -EINVAL; 176 177 /*amphion vpu only support nv12 and nv12 tiled, 178 * so the bytesperline of luma and chroma should be same 179 */ 180 for (i = 0; i < fmt->comp_planes; i++) 181 fmt->bytesperline[i] = pixmp->plane_fmt[0].bytesperline; 182 183 return 0; 184 } 185 186 static int vpu_calc_fmt_sizeimage(struct vpu_inst *inst, struct vpu_format *fmt) 187 { 188 u32 stride = 1; 189 int i; 190 191 if (!(fmt->flags & V4L2_FMT_FLAG_COMPRESSED)) { 192 const struct vpu_core_resources *res = vpu_get_resource(inst); 193 194 if (res) 195 stride = res->stride; 196 } 197 198 for (i = 0; i < fmt->comp_planes; i++) { 199 fmt->sizeimage[i] = vpu_helper_get_plane_size(fmt->pixfmt, 200 fmt->width, 201 fmt->height, 202 i, 203 stride, 204 fmt->field != V4L2_FIELD_NONE ? 1 : 0, 205 &fmt->bytesperline[i]); 206 fmt->sizeimage[i] = max_t(u32, fmt->sizeimage[i], PAGE_SIZE); 207 if (fmt->flags & V4L2_FMT_FLAG_COMPRESSED) { 208 fmt->sizeimage[i] = clamp_val(fmt->sizeimage[i], SZ_128K, SZ_8M); 209 fmt->bytesperline[i] = 0; 210 } 211 } 212 213 return 0; 214 } 215 216 u32 vpu_get_fmt_plane_size(struct vpu_format *fmt, u32 plane_no) 217 { 218 u32 size; 219 int i; 220 221 if (plane_no >= fmt->mem_planes) 222 return 0; 223 224 if (fmt->comp_planes == fmt->mem_planes) 225 return fmt->sizeimage[plane_no]; 226 if (plane_no < fmt->mem_planes - 1) 227 return fmt->sizeimage[plane_no]; 228 229 size = fmt->sizeimage[plane_no]; 230 for (i = fmt->mem_planes; i < fmt->comp_planes; i++) 231 size += fmt->sizeimage[i]; 232 233 return size; 234 } 235 236 int vpu_try_fmt_common(struct vpu_inst *inst, struct v4l2_format *f, struct vpu_format *fmt) 237 { 238 struct v4l2_pix_format_mplane *pixmp = &f->fmt.pix_mp; 239 int i; 240 int ret; 241 242 fmt->pixfmt = pixmp->pixelformat; 243 fmt->type = f->type; 244 ret = vpu_init_format(inst, fmt); 245 if (ret < 0) 246 return ret; 247 248 fmt->width = pixmp->width; 249 fmt->height = pixmp->height; 250 if (fmt->width) 251 fmt->width = vpu_helper_valid_frame_width(inst, fmt->width); 252 if (fmt->height) 253 fmt->height = vpu_helper_valid_frame_height(inst, fmt->height); 254 fmt->field = pixmp->field == V4L2_FIELD_ANY ? V4L2_FIELD_NONE : pixmp->field; 255 vpu_calc_fmt_bytesperline(f, fmt); 256 vpu_calc_fmt_sizeimage(inst, fmt); 257 if ((fmt->flags & V4L2_FMT_FLAG_COMPRESSED) && pixmp->plane_fmt[0].sizeimage) 258 fmt->sizeimage[0] = clamp_val(pixmp->plane_fmt[0].sizeimage, SZ_128K, SZ_8M); 259 260 pixmp->pixelformat = fmt->pixfmt; 261 pixmp->width = fmt->width; 262 pixmp->height = fmt->height; 263 pixmp->flags = fmt->flags; 264 pixmp->num_planes = fmt->mem_planes; 265 pixmp->field = fmt->field; 266 memset(pixmp->reserved, 0, sizeof(pixmp->reserved)); 267 for (i = 0; i < pixmp->num_planes; i++) { 268 pixmp->plane_fmt[i].bytesperline = fmt->bytesperline[i]; 269 pixmp->plane_fmt[i].sizeimage = vpu_get_fmt_plane_size(fmt, i); 270 memset(pixmp->plane_fmt[i].reserved, 0, sizeof(pixmp->plane_fmt[i].reserved)); 271 } 272 273 return 0; 274 } 275 276 static bool vpu_check_ready(struct vpu_inst *inst, u32 type) 277 { 278 if (!inst) 279 return false; 280 if (inst->state == VPU_CODEC_STATE_DEINIT || inst->id < 0) 281 return false; 282 if (!inst->ops->check_ready) 283 return true; 284 return call_vop(inst, check_ready, type); 285 } 286 287 int vpu_process_output_buffer(struct vpu_inst *inst) 288 { 289 struct v4l2_m2m_buffer *buf = NULL; 290 struct vb2_v4l2_buffer *vbuf = NULL; 291 292 if (!inst || !inst->fh.m2m_ctx) 293 return -EINVAL; 294 295 if (!vpu_check_ready(inst, inst->out_format.type)) 296 return -EINVAL; 297 298 v4l2_m2m_for_each_src_buf(inst->fh.m2m_ctx, buf) { 299 vbuf = &buf->vb; 300 if (vpu_get_buffer_state(vbuf) == VPU_BUF_STATE_IDLE) 301 break; 302 vbuf = NULL; 303 } 304 305 if (!vbuf) 306 return -EINVAL; 307 308 dev_dbg(inst->dev, "[%d]frame id = %d / %d\n", 309 inst->id, vbuf->sequence, inst->sequence); 310 return call_vop(inst, process_output, &vbuf->vb2_buf); 311 } 312 313 int vpu_process_capture_buffer(struct vpu_inst *inst) 314 { 315 struct v4l2_m2m_buffer *buf = NULL; 316 struct vb2_v4l2_buffer *vbuf = NULL; 317 318 if (!inst || !inst->fh.m2m_ctx) 319 return -EINVAL; 320 321 if (!vpu_check_ready(inst, inst->cap_format.type)) 322 return -EINVAL; 323 324 v4l2_m2m_for_each_dst_buf(inst->fh.m2m_ctx, buf) { 325 vbuf = &buf->vb; 326 if (vpu_get_buffer_state(vbuf) == VPU_BUF_STATE_IDLE) 327 break; 328 vbuf = NULL; 329 } 330 if (!vbuf) 331 return -EINVAL; 332 333 return call_vop(inst, process_capture, &vbuf->vb2_buf); 334 } 335 336 struct vb2_v4l2_buffer *vpu_next_src_buf(struct vpu_inst *inst) 337 { 338 struct vb2_v4l2_buffer *src_buf = NULL; 339 340 if (!inst->fh.m2m_ctx) 341 return NULL; 342 343 src_buf = v4l2_m2m_next_src_buf(inst->fh.m2m_ctx); 344 if (!src_buf || vpu_get_buffer_state(src_buf) == VPU_BUF_STATE_IDLE) 345 return NULL; 346 347 while (vpu_vb_is_codecconfig(src_buf)) { 348 v4l2_m2m_src_buf_remove(inst->fh.m2m_ctx); 349 vpu_set_buffer_state(src_buf, VPU_BUF_STATE_IDLE); 350 v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE); 351 352 src_buf = v4l2_m2m_next_src_buf(inst->fh.m2m_ctx); 353 if (!src_buf || vpu_get_buffer_state(src_buf) == VPU_BUF_STATE_IDLE) 354 return NULL; 355 } 356 357 return src_buf; 358 } 359 360 void vpu_skip_frame(struct vpu_inst *inst, int count) 361 { 362 struct vb2_v4l2_buffer *src_buf; 363 enum vb2_buffer_state state; 364 int i = 0; 365 366 if (count <= 0 || !inst->fh.m2m_ctx) 367 return; 368 369 while (i < count) { 370 src_buf = v4l2_m2m_src_buf_remove(inst->fh.m2m_ctx); 371 if (!src_buf || vpu_get_buffer_state(src_buf) == VPU_BUF_STATE_IDLE) 372 return; 373 if (vpu_get_buffer_state(src_buf) == VPU_BUF_STATE_DECODED) 374 state = VB2_BUF_STATE_DONE; 375 else 376 state = VB2_BUF_STATE_ERROR; 377 i++; 378 vpu_set_buffer_state(src_buf, VPU_BUF_STATE_IDLE); 379 v4l2_m2m_buf_done(src_buf, state); 380 } 381 } 382 383 struct vb2_v4l2_buffer *vpu_find_buf_by_sequence(struct vpu_inst *inst, u32 type, u32 sequence) 384 { 385 struct v4l2_m2m_buffer *buf = NULL; 386 struct vb2_v4l2_buffer *vbuf = NULL; 387 388 if (!inst || !inst->fh.m2m_ctx) 389 return NULL; 390 391 if (V4L2_TYPE_IS_OUTPUT(type)) { 392 v4l2_m2m_for_each_src_buf(inst->fh.m2m_ctx, buf) { 393 vbuf = &buf->vb; 394 if (vbuf->sequence == sequence) 395 break; 396 vbuf = NULL; 397 } 398 } else { 399 v4l2_m2m_for_each_dst_buf(inst->fh.m2m_ctx, buf) { 400 vbuf = &buf->vb; 401 if (vbuf->sequence == sequence) 402 break; 403 vbuf = NULL; 404 } 405 } 406 407 return vbuf; 408 } 409 410 struct vb2_v4l2_buffer *vpu_find_buf_by_idx(struct vpu_inst *inst, u32 type, u32 idx) 411 { 412 struct v4l2_m2m_buffer *buf = NULL; 413 struct vb2_v4l2_buffer *vbuf = NULL; 414 415 if (!inst || !inst->fh.m2m_ctx) 416 return NULL; 417 418 if (V4L2_TYPE_IS_OUTPUT(type)) { 419 v4l2_m2m_for_each_src_buf(inst->fh.m2m_ctx, buf) { 420 vbuf = &buf->vb; 421 if (vbuf->vb2_buf.index == idx) 422 break; 423 vbuf = NULL; 424 } 425 } else { 426 v4l2_m2m_for_each_dst_buf(inst->fh.m2m_ctx, buf) { 427 vbuf = &buf->vb; 428 if (vbuf->vb2_buf.index == idx) 429 break; 430 vbuf = NULL; 431 } 432 } 433 434 return vbuf; 435 } 436 437 int vpu_get_num_buffers(struct vpu_inst *inst, u32 type) 438 { 439 struct vb2_queue *q; 440 441 if (!inst || !inst->fh.m2m_ctx) 442 return -EINVAL; 443 444 if (V4L2_TYPE_IS_OUTPUT(type)) 445 q = v4l2_m2m_get_src_vq(inst->fh.m2m_ctx); 446 else 447 q = v4l2_m2m_get_dst_vq(inst->fh.m2m_ctx); 448 449 return vb2_get_num_buffers(q); 450 } 451 452 static void vpu_m2m_device_run(void *priv) 453 { 454 } 455 456 static void vpu_m2m_job_abort(void *priv) 457 { 458 struct vpu_inst *inst = priv; 459 struct v4l2_m2m_ctx *m2m_ctx = inst->fh.m2m_ctx; 460 461 v4l2_m2m_job_finish(m2m_ctx->m2m_dev, m2m_ctx); 462 } 463 464 static const struct v4l2_m2m_ops vpu_m2m_ops = { 465 .device_run = vpu_m2m_device_run, 466 .job_abort = vpu_m2m_job_abort 467 }; 468 469 static int vpu_vb2_queue_setup(struct vb2_queue *vq, 470 unsigned int *buf_count, 471 unsigned int *plane_count, 472 unsigned int psize[], 473 struct device *allocators[]) 474 { 475 struct vpu_inst *inst = vb2_get_drv_priv(vq); 476 struct vpu_format *cur_fmt; 477 int i; 478 479 cur_fmt = vpu_get_format(inst, vq->type); 480 481 if (*plane_count) { 482 if (*plane_count != cur_fmt->mem_planes) 483 return -EINVAL; 484 for (i = 0; i < cur_fmt->mem_planes; i++) { 485 if (psize[i] < vpu_get_fmt_plane_size(cur_fmt, i)) 486 return -EINVAL; 487 } 488 return 0; 489 } 490 491 if (V4L2_TYPE_IS_OUTPUT(vq->type)) 492 *buf_count = max_t(unsigned int, *buf_count, inst->min_buffer_out); 493 else 494 *buf_count = max_t(unsigned int, *buf_count, inst->min_buffer_cap); 495 *plane_count = cur_fmt->mem_planes; 496 for (i = 0; i < cur_fmt->mem_planes; i++) 497 psize[i] = vpu_get_fmt_plane_size(cur_fmt, i); 498 499 if (V4L2_TYPE_IS_OUTPUT(vq->type) && inst->state == VPU_CODEC_STATE_SEEK) { 500 vpu_trace(inst->dev, "reinit when VIDIOC_REQBUFS(OUTPUT, 0)\n"); 501 call_void_vop(inst, release); 502 } 503 504 return 0; 505 } 506 507 static int vpu_vb2_buf_init(struct vb2_buffer *vb) 508 { 509 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); 510 511 vpu_set_buffer_state(vbuf, VPU_BUF_STATE_IDLE); 512 return 0; 513 } 514 515 static int vpu_vb2_buf_out_validate(struct vb2_buffer *vb) 516 { 517 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); 518 519 vbuf->field = V4L2_FIELD_NONE; 520 521 return 0; 522 } 523 524 static int vpu_vb2_buf_prepare(struct vb2_buffer *vb) 525 { 526 struct vpu_inst *inst = vb2_get_drv_priv(vb->vb2_queue); 527 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); 528 struct vpu_format *cur_fmt; 529 u32 i; 530 531 cur_fmt = vpu_get_format(inst, vb->type); 532 for (i = 0; i < cur_fmt->mem_planes; i++) { 533 if (vpu_get_vb_length(vb, i) < vpu_get_fmt_plane_size(cur_fmt, i)) { 534 dev_dbg(inst->dev, "[%d] %s buf[%d] is invalid\n", 535 inst->id, vpu_type_name(vb->type), vb->index); 536 vpu_set_buffer_state(vbuf, VPU_BUF_STATE_ERROR); 537 } 538 } 539 540 return 0; 541 } 542 543 static void vpu_vb2_buf_finish(struct vb2_buffer *vb) 544 { 545 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); 546 struct vpu_inst *inst = vb2_get_drv_priv(vb->vb2_queue); 547 struct vb2_queue *q = vb->vb2_queue; 548 549 if (V4L2_TYPE_IS_CAPTURE(vb->type)) { 550 struct vpu_vb2_buffer *vpu_buf = to_vpu_vb2_buffer(vbuf); 551 struct v4l2_ctrl *ctrl = v4l2_ctrl_find(&inst->ctrl_handler, 552 V4L2_CID_MPEG_VIDEO_AVERAGE_QP); 553 554 if (ctrl) 555 v4l2_ctrl_s_ctrl(ctrl, vpu_buf->average_qp); 556 } 557 558 if (vbuf->flags & V4L2_BUF_FLAG_LAST) 559 vpu_notify_eos(inst); 560 561 if (list_empty(&q->done_list)) 562 call_void_vop(inst, on_queue_empty, q->type); 563 } 564 565 void vpu_vb2_buffers_return(struct vpu_inst *inst, unsigned int type, enum vb2_buffer_state state) 566 { 567 struct vb2_v4l2_buffer *buf; 568 569 if (V4L2_TYPE_IS_OUTPUT(type)) { 570 while ((buf = v4l2_m2m_src_buf_remove(inst->fh.m2m_ctx))) { 571 vpu_set_buffer_state(buf, VPU_BUF_STATE_IDLE); 572 v4l2_m2m_buf_done(buf, state); 573 } 574 } else { 575 while ((buf = v4l2_m2m_dst_buf_remove(inst->fh.m2m_ctx))) { 576 vpu_set_buffer_state(buf, VPU_BUF_STATE_IDLE); 577 v4l2_m2m_buf_done(buf, state); 578 } 579 } 580 } 581 582 static int vpu_vb2_start_streaming(struct vb2_queue *q, unsigned int count) 583 { 584 struct vpu_inst *inst = vb2_get_drv_priv(q); 585 struct vpu_format *fmt = vpu_get_format(inst, q->type); 586 int ret; 587 588 vpu_inst_unlock(inst); 589 ret = vpu_inst_register(inst); 590 vpu_inst_lock(inst); 591 if (ret) { 592 vpu_vb2_buffers_return(inst, q->type, VB2_BUF_STATE_QUEUED); 593 return ret; 594 } 595 596 vpu_trace(inst->dev, "[%d] %s %c%c%c%c %dx%d %u(%u) %u(%u) %u(%u) %d\n", 597 inst->id, vpu_type_name(q->type), 598 fmt->pixfmt, 599 fmt->pixfmt >> 8, 600 fmt->pixfmt >> 16, 601 fmt->pixfmt >> 24, 602 fmt->width, fmt->height, 603 fmt->sizeimage[0], fmt->bytesperline[0], 604 fmt->sizeimage[1], fmt->bytesperline[1], 605 fmt->sizeimage[2], fmt->bytesperline[2], 606 vb2_get_num_buffers(q)); 607 vb2_clear_last_buffer_dequeued(q); 608 ret = call_vop(inst, start, q->type); 609 if (ret) 610 vpu_vb2_buffers_return(inst, q->type, VB2_BUF_STATE_QUEUED); 611 612 return ret; 613 } 614 615 static void vpu_vb2_stop_streaming(struct vb2_queue *q) 616 { 617 struct vpu_inst *inst = vb2_get_drv_priv(q); 618 619 vpu_trace(inst->dev, "[%d] %s\n", inst->id, vpu_type_name(q->type)); 620 621 call_void_vop(inst, stop, q->type); 622 vpu_vb2_buffers_return(inst, q->type, VB2_BUF_STATE_ERROR); 623 if (V4L2_TYPE_IS_OUTPUT(q->type)) 624 inst->sequence = 0; 625 } 626 627 static void vpu_vb2_buf_queue(struct vb2_buffer *vb) 628 { 629 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); 630 struct vpu_inst *inst = vb2_get_drv_priv(vb->vb2_queue); 631 632 if (V4L2_TYPE_IS_OUTPUT(vb->type)) 633 vbuf->sequence = inst->sequence++; 634 635 v4l2_m2m_buf_queue(inst->fh.m2m_ctx, vbuf); 636 vpu_process_output_buffer(inst); 637 vpu_process_capture_buffer(inst); 638 } 639 640 static const struct vb2_ops vpu_vb2_ops = { 641 .queue_setup = vpu_vb2_queue_setup, 642 .buf_init = vpu_vb2_buf_init, 643 .buf_out_validate = vpu_vb2_buf_out_validate, 644 .buf_prepare = vpu_vb2_buf_prepare, 645 .buf_finish = vpu_vb2_buf_finish, 646 .start_streaming = vpu_vb2_start_streaming, 647 .stop_streaming = vpu_vb2_stop_streaming, 648 .buf_queue = vpu_vb2_buf_queue, 649 .wait_prepare = vb2_ops_wait_prepare, 650 .wait_finish = vb2_ops_wait_finish, 651 }; 652 653 static int vpu_m2m_queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq) 654 { 655 struct vpu_inst *inst = priv; 656 int ret; 657 658 src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; 659 inst->out_format.type = src_vq->type; 660 src_vq->io_modes = VB2_MMAP | VB2_DMABUF; 661 src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY; 662 src_vq->ops = &vpu_vb2_ops; 663 src_vq->mem_ops = &vb2_dma_contig_memops; 664 if (inst->type == VPU_CORE_TYPE_DEC && inst->use_stream_buffer) 665 src_vq->mem_ops = &vb2_vmalloc_memops; 666 src_vq->drv_priv = inst; 667 src_vq->buf_struct_size = sizeof(struct vpu_vb2_buffer); 668 src_vq->min_queued_buffers = 1; 669 src_vq->dev = inst->vpu->dev; 670 src_vq->lock = &inst->lock; 671 ret = vb2_queue_init(src_vq); 672 if (ret) 673 return ret; 674 675 dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; 676 inst->cap_format.type = dst_vq->type; 677 dst_vq->io_modes = VB2_MMAP | VB2_DMABUF; 678 dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY; 679 dst_vq->ops = &vpu_vb2_ops; 680 dst_vq->mem_ops = &vb2_dma_contig_memops; 681 if (inst->type == VPU_CORE_TYPE_ENC && inst->use_stream_buffer) 682 dst_vq->mem_ops = &vb2_vmalloc_memops; 683 dst_vq->drv_priv = inst; 684 dst_vq->buf_struct_size = sizeof(struct vpu_vb2_buffer); 685 dst_vq->min_queued_buffers = 1; 686 dst_vq->dev = inst->vpu->dev; 687 dst_vq->lock = &inst->lock; 688 ret = vb2_queue_init(dst_vq); 689 if (ret) { 690 vb2_queue_release(src_vq); 691 return ret; 692 } 693 694 return 0; 695 } 696 697 static int vpu_v4l2_release(struct vpu_inst *inst) 698 { 699 vpu_trace(inst->vpu->dev, "%p\n", inst); 700 701 vpu_release_core(inst->core); 702 put_device(inst->dev); 703 704 if (inst->workqueue) { 705 cancel_work_sync(&inst->msg_work); 706 destroy_workqueue(inst->workqueue); 707 inst->workqueue = NULL; 708 } 709 710 v4l2_ctrl_handler_free(&inst->ctrl_handler); 711 mutex_destroy(&inst->lock); 712 v4l2_fh_del(&inst->fh); 713 v4l2_fh_exit(&inst->fh); 714 715 call_void_vop(inst, cleanup); 716 717 return 0; 718 } 719 720 int vpu_v4l2_open(struct file *file, struct vpu_inst *inst) 721 { 722 struct vpu_dev *vpu = video_drvdata(file); 723 struct vpu_func *func; 724 int ret = 0; 725 726 if (!inst || !inst->ops) 727 return -EINVAL; 728 729 if (inst->type == VPU_CORE_TYPE_ENC) 730 func = &vpu->encoder; 731 else 732 func = &vpu->decoder; 733 734 atomic_set(&inst->ref_count, 0); 735 atomic_long_set(&inst->last_response_cmd, 0); 736 vpu_inst_get(inst); 737 inst->vpu = vpu; 738 inst->core = vpu_request_core(vpu, inst->type); 739 if (inst->core) 740 inst->dev = get_device(inst->core->dev); 741 mutex_init(&inst->lock); 742 INIT_LIST_HEAD(&inst->cmd_q); 743 inst->id = VPU_INST_NULL_ID; 744 inst->release = vpu_v4l2_release; 745 inst->pid = current->pid; 746 inst->tgid = current->tgid; 747 inst->min_buffer_cap = 2; 748 inst->min_buffer_out = 2; 749 v4l2_fh_init(&inst->fh, func->vfd); 750 v4l2_fh_add(&inst->fh); 751 752 ret = call_vop(inst, ctrl_init); 753 if (ret) 754 goto error; 755 756 inst->fh.m2m_ctx = v4l2_m2m_ctx_init(func->m2m_dev, inst, vpu_m2m_queue_init); 757 if (IS_ERR(inst->fh.m2m_ctx)) { 758 dev_err(vpu->dev, "v4l2_m2m_ctx_init fail\n"); 759 ret = PTR_ERR(inst->fh.m2m_ctx); 760 goto error; 761 } 762 763 inst->fh.ctrl_handler = &inst->ctrl_handler; 764 file->private_data = &inst->fh; 765 inst->state = VPU_CODEC_STATE_DEINIT; 766 inst->workqueue = alloc_ordered_workqueue("vpu_inst", WQ_MEM_RECLAIM); 767 if (inst->workqueue) { 768 INIT_WORK(&inst->msg_work, vpu_inst_run_work); 769 ret = kfifo_init(&inst->msg_fifo, 770 inst->msg_buffer, 771 rounddown_pow_of_two(sizeof(inst->msg_buffer))); 772 if (ret) { 773 destroy_workqueue(inst->workqueue); 774 inst->workqueue = NULL; 775 } 776 } 777 vpu_trace(vpu->dev, "tgid = %d, pid = %d, type = %s, inst = %p\n", 778 inst->tgid, inst->pid, vpu_core_type_desc(inst->type), inst); 779 780 return 0; 781 error: 782 vpu_inst_put(inst); 783 return ret; 784 } 785 786 int vpu_v4l2_close(struct file *file) 787 { 788 struct vpu_dev *vpu = video_drvdata(file); 789 struct vpu_inst *inst = to_inst(file); 790 791 vpu_trace(vpu->dev, "tgid = %d, pid = %d, inst = %p\n", inst->tgid, inst->pid, inst); 792 793 vpu_inst_lock(inst); 794 if (inst->fh.m2m_ctx) { 795 v4l2_m2m_ctx_release(inst->fh.m2m_ctx); 796 inst->fh.m2m_ctx = NULL; 797 } 798 call_void_vop(inst, release); 799 vpu_inst_unlock(inst); 800 801 vpu_inst_unregister(inst); 802 vpu_inst_put(inst); 803 804 return 0; 805 } 806 807 int vpu_add_func(struct vpu_dev *vpu, struct vpu_func *func) 808 { 809 struct video_device *vfd; 810 int ret; 811 812 if (!vpu || !func) 813 return -EINVAL; 814 815 if (func->vfd) 816 return 0; 817 818 func->m2m_dev = v4l2_m2m_init(&vpu_m2m_ops); 819 if (IS_ERR(func->m2m_dev)) { 820 dev_err(vpu->dev, "v4l2_m2m_init fail\n"); 821 func->vfd = NULL; 822 return PTR_ERR(func->m2m_dev); 823 } 824 825 vfd = video_device_alloc(); 826 if (!vfd) { 827 v4l2_m2m_release(func->m2m_dev); 828 dev_err(vpu->dev, "alloc vpu decoder video device fail\n"); 829 return -ENOMEM; 830 } 831 vfd->release = video_device_release; 832 vfd->vfl_dir = VFL_DIR_M2M; 833 vfd->v4l2_dev = &vpu->v4l2_dev; 834 vfd->device_caps = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING; 835 if (func->type == VPU_CORE_TYPE_ENC) { 836 strscpy(vfd->name, "amphion-vpu-encoder", sizeof(vfd->name)); 837 vfd->fops = venc_get_fops(); 838 vfd->ioctl_ops = venc_get_ioctl_ops(); 839 } else { 840 strscpy(vfd->name, "amphion-vpu-decoder", sizeof(vfd->name)); 841 vfd->fops = vdec_get_fops(); 842 vfd->ioctl_ops = vdec_get_ioctl_ops(); 843 } 844 845 ret = video_register_device(vfd, VFL_TYPE_VIDEO, -1); 846 if (ret) { 847 video_device_release(vfd); 848 v4l2_m2m_release(func->m2m_dev); 849 return ret; 850 } 851 video_set_drvdata(vfd, vpu); 852 func->vfd = vfd; 853 854 ret = v4l2_m2m_register_media_controller(func->m2m_dev, func->vfd, func->function); 855 if (ret) { 856 v4l2_m2m_release(func->m2m_dev); 857 func->m2m_dev = NULL; 858 video_unregister_device(func->vfd); 859 func->vfd = NULL; 860 return ret; 861 } 862 863 return 0; 864 } 865 866 void vpu_remove_func(struct vpu_func *func) 867 { 868 if (!func) 869 return; 870 871 if (func->m2m_dev) { 872 v4l2_m2m_unregister_media_controller(func->m2m_dev); 873 v4l2_m2m_release(func->m2m_dev); 874 func->m2m_dev = NULL; 875 } 876 if (func->vfd) { 877 video_unregister_device(func->vfd); 878 func->vfd = NULL; 879 } 880 } 881