1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright 2020-2021 NXP 4 */ 5 6 #include <linux/init.h> 7 #include <linux/interconnect.h> 8 #include <linux/ioctl.h> 9 #include <linux/list.h> 10 #include <linux/kernel.h> 11 #include <linux/module.h> 12 #include <linux/pm_runtime.h> 13 #include <linux/videodev2.h> 14 #include <media/v4l2-device.h> 15 #include <media/v4l2-event.h> 16 #include <media/v4l2-mem2mem.h> 17 #include <media/v4l2-ioctl.h> 18 #include <media/videobuf2-v4l2.h> 19 #include <media/videobuf2-dma-contig.h> 20 #include <media/videobuf2-vmalloc.h> 21 #include "vpu.h" 22 #include "vpu_core.h" 23 #include "vpu_v4l2.h" 24 #include "vpu_msgs.h" 25 #include "vpu_helpers.h" 26 27 void vpu_inst_lock(struct vpu_inst *inst) 28 { 29 mutex_lock(&inst->lock); 30 } 31 32 void vpu_inst_unlock(struct vpu_inst *inst) 33 { 34 mutex_unlock(&inst->lock); 35 } 36 37 dma_addr_t vpu_get_vb_phy_addr(struct vb2_buffer *vb, u32 plane_no) 38 { 39 if (plane_no >= vb->num_planes) 40 return 0; 41 return vb2_dma_contig_plane_dma_addr(vb, plane_no) + 42 vb->planes[plane_no].data_offset; 43 } 44 45 unsigned int vpu_get_vb_length(struct vb2_buffer *vb, u32 plane_no) 46 { 47 if (plane_no >= vb->num_planes) 48 return 0; 49 return vb2_plane_size(vb, plane_no) - vb->planes[plane_no].data_offset; 50 } 51 52 void vpu_set_buffer_state(struct vb2_v4l2_buffer *vbuf, unsigned int state) 53 { 54 struct vpu_vb2_buffer *vpu_buf = to_vpu_vb2_buffer(vbuf); 55 56 vpu_buf->state = state; 57 } 58 59 unsigned int vpu_get_buffer_state(struct vb2_v4l2_buffer *vbuf) 60 { 61 struct vpu_vb2_buffer *vpu_buf = to_vpu_vb2_buffer(vbuf); 62 63 return vpu_buf->state; 64 } 65 66 void vpu_v4l2_set_error(struct vpu_inst *inst) 67 { 68 struct vb2_queue *src_q; 69 struct vb2_queue *dst_q; 70 71 vpu_inst_lock(inst); 72 dev_err(inst->dev, "some error occurs in codec\n"); 73 if (inst->fh.m2m_ctx) { 74 src_q = v4l2_m2m_get_src_vq(inst->fh.m2m_ctx); 75 dst_q = v4l2_m2m_get_dst_vq(inst->fh.m2m_ctx); 76 if (src_q) 77 src_q->error = 1; 78 if (dst_q) 79 dst_q->error = 1; 80 } 81 vpu_inst_unlock(inst); 82 } 83 84 int vpu_notify_eos(struct vpu_inst *inst) 85 { 86 static const struct v4l2_event ev = { 87 .id = 0, 88 .type = V4L2_EVENT_EOS 89 }; 90 91 vpu_trace(inst->dev, "[%d]\n", inst->id); 92 v4l2_event_queue_fh(&inst->fh, &ev); 93 94 return 0; 95 } 96 97 int vpu_notify_source_change(struct vpu_inst *inst) 98 { 99 static const struct v4l2_event ev = { 100 .id = 0, 101 .type = V4L2_EVENT_SOURCE_CHANGE, 102 .u.src_change.changes = V4L2_EVENT_SRC_CH_RESOLUTION 103 }; 104 105 vpu_trace(inst->dev, "[%d]\n", inst->id); 106 v4l2_event_queue_fh(&inst->fh, &ev); 107 return 0; 108 } 109 110 int vpu_set_last_buffer_dequeued(struct vpu_inst *inst) 111 { 112 struct vb2_queue *q; 113 114 if (!inst || !inst->fh.m2m_ctx) 115 return -EINVAL; 116 117 q = v4l2_m2m_get_dst_vq(inst->fh.m2m_ctx); 118 if (!list_empty(&q->done_list)) 119 return -EINVAL; 120 121 if (q->last_buffer_dequeued) 122 return 0; 123 vpu_trace(inst->dev, "last buffer dequeued\n"); 124 q->last_buffer_dequeued = true; 125 wake_up(&q->done_wq); 126 vpu_notify_eos(inst); 127 return 0; 128 } 129 130 const struct vpu_format *vpu_try_fmt_common(struct vpu_inst *inst, struct v4l2_format *f) 131 { 132 struct v4l2_pix_format_mplane *pixmp = &f->fmt.pix_mp; 133 u32 type = f->type; 134 u32 stride = 1; 135 u32 bytesperline; 136 u32 sizeimage; 137 const struct vpu_format *fmt; 138 const struct vpu_core_resources *res; 139 int i; 140 141 fmt = vpu_helper_find_format(inst, type, pixmp->pixelformat); 142 if (!fmt) { 143 fmt = vpu_helper_enum_format(inst, type, 0); 144 if (!fmt) 145 return NULL; 146 pixmp->pixelformat = fmt->pixfmt; 147 } 148 149 res = vpu_get_resource(inst); 150 if (res) 151 stride = res->stride; 152 if (pixmp->width) 153 pixmp->width = vpu_helper_valid_frame_width(inst, pixmp->width); 154 if (pixmp->height) 155 pixmp->height = vpu_helper_valid_frame_height(inst, pixmp->height); 156 pixmp->flags = fmt->flags; 157 pixmp->num_planes = fmt->num_planes; 158 if (pixmp->field == V4L2_FIELD_ANY) 159 pixmp->field = V4L2_FIELD_NONE; 160 for (i = 0; i < pixmp->num_planes; i++) { 161 bytesperline = max_t(s32, pixmp->plane_fmt[i].bytesperline, 0); 162 sizeimage = vpu_helper_get_plane_size(pixmp->pixelformat, 163 pixmp->width, 164 pixmp->height, 165 i, 166 stride, 167 pixmp->field > V4L2_FIELD_NONE ? 1 : 0, 168 &bytesperline); 169 sizeimage = max_t(s32, pixmp->plane_fmt[i].sizeimage, sizeimage); 170 pixmp->plane_fmt[i].bytesperline = bytesperline; 171 pixmp->plane_fmt[i].sizeimage = sizeimage; 172 } 173 174 return fmt; 175 } 176 177 static bool vpu_check_ready(struct vpu_inst *inst, u32 type) 178 { 179 if (!inst) 180 return false; 181 if (inst->state == VPU_CODEC_STATE_DEINIT || inst->id < 0) 182 return false; 183 if (!inst->ops->check_ready) 184 return true; 185 return call_vop(inst, check_ready, type); 186 } 187 188 int vpu_process_output_buffer(struct vpu_inst *inst) 189 { 190 struct v4l2_m2m_buffer *buf = NULL; 191 struct vb2_v4l2_buffer *vbuf = NULL; 192 193 if (!inst || !inst->fh.m2m_ctx) 194 return -EINVAL; 195 196 if (!vpu_check_ready(inst, inst->out_format.type)) 197 return -EINVAL; 198 199 v4l2_m2m_for_each_src_buf(inst->fh.m2m_ctx, buf) { 200 vbuf = &buf->vb; 201 if (vpu_get_buffer_state(vbuf) == VPU_BUF_STATE_IDLE) 202 break; 203 vbuf = NULL; 204 } 205 206 if (!vbuf) 207 return -EINVAL; 208 209 dev_dbg(inst->dev, "[%d]frame id = %d / %d\n", 210 inst->id, vbuf->sequence, inst->sequence); 211 return call_vop(inst, process_output, &vbuf->vb2_buf); 212 } 213 214 int vpu_process_capture_buffer(struct vpu_inst *inst) 215 { 216 struct v4l2_m2m_buffer *buf = NULL; 217 struct vb2_v4l2_buffer *vbuf = NULL; 218 219 if (!inst || !inst->fh.m2m_ctx) 220 return -EINVAL; 221 222 if (!vpu_check_ready(inst, inst->cap_format.type)) 223 return -EINVAL; 224 225 v4l2_m2m_for_each_dst_buf(inst->fh.m2m_ctx, buf) { 226 vbuf = &buf->vb; 227 if (vpu_get_buffer_state(vbuf) == VPU_BUF_STATE_IDLE) 228 break; 229 vbuf = NULL; 230 } 231 if (!vbuf) 232 return -EINVAL; 233 234 return call_vop(inst, process_capture, &vbuf->vb2_buf); 235 } 236 237 struct vb2_v4l2_buffer *vpu_find_buf_by_sequence(struct vpu_inst *inst, u32 type, u32 sequence) 238 { 239 struct v4l2_m2m_buffer *buf = NULL; 240 struct vb2_v4l2_buffer *vbuf = NULL; 241 242 if (!inst || !inst->fh.m2m_ctx) 243 return NULL; 244 245 if (V4L2_TYPE_IS_OUTPUT(type)) { 246 v4l2_m2m_for_each_src_buf(inst->fh.m2m_ctx, buf) { 247 vbuf = &buf->vb; 248 if (vbuf->sequence == sequence) 249 break; 250 vbuf = NULL; 251 } 252 } else { 253 v4l2_m2m_for_each_dst_buf(inst->fh.m2m_ctx, buf) { 254 vbuf = &buf->vb; 255 if (vbuf->sequence == sequence) 256 break; 257 vbuf = NULL; 258 } 259 } 260 261 return vbuf; 262 } 263 264 struct vb2_v4l2_buffer *vpu_find_buf_by_idx(struct vpu_inst *inst, u32 type, u32 idx) 265 { 266 struct v4l2_m2m_buffer *buf = NULL; 267 struct vb2_v4l2_buffer *vbuf = NULL; 268 269 if (!inst || !inst->fh.m2m_ctx) 270 return NULL; 271 272 if (V4L2_TYPE_IS_OUTPUT(type)) { 273 v4l2_m2m_for_each_src_buf(inst->fh.m2m_ctx, buf) { 274 vbuf = &buf->vb; 275 if (vbuf->vb2_buf.index == idx) 276 break; 277 vbuf = NULL; 278 } 279 } else { 280 v4l2_m2m_for_each_dst_buf(inst->fh.m2m_ctx, buf) { 281 vbuf = &buf->vb; 282 if (vbuf->vb2_buf.index == idx) 283 break; 284 vbuf = NULL; 285 } 286 } 287 288 return vbuf; 289 } 290 291 int vpu_get_num_buffers(struct vpu_inst *inst, u32 type) 292 { 293 struct vb2_queue *q; 294 295 if (!inst || !inst->fh.m2m_ctx) 296 return -EINVAL; 297 298 if (V4L2_TYPE_IS_OUTPUT(type)) 299 q = v4l2_m2m_get_src_vq(inst->fh.m2m_ctx); 300 else 301 q = v4l2_m2m_get_dst_vq(inst->fh.m2m_ctx); 302 303 return q->num_buffers; 304 } 305 306 static void vpu_m2m_device_run(void *priv) 307 { 308 } 309 310 static void vpu_m2m_job_abort(void *priv) 311 { 312 struct vpu_inst *inst = priv; 313 struct v4l2_m2m_ctx *m2m_ctx = inst->fh.m2m_ctx; 314 315 v4l2_m2m_job_finish(m2m_ctx->m2m_dev, m2m_ctx); 316 } 317 318 static const struct v4l2_m2m_ops vpu_m2m_ops = { 319 .device_run = vpu_m2m_device_run, 320 .job_abort = vpu_m2m_job_abort 321 }; 322 323 static int vpu_vb2_queue_setup(struct vb2_queue *vq, 324 unsigned int *buf_count, 325 unsigned int *plane_count, 326 unsigned int psize[], 327 struct device *allocators[]) 328 { 329 struct vpu_inst *inst = vb2_get_drv_priv(vq); 330 struct vpu_format *cur_fmt; 331 int i; 332 333 cur_fmt = vpu_get_format(inst, vq->type); 334 335 if (*plane_count) { 336 if (*plane_count != cur_fmt->num_planes) 337 return -EINVAL; 338 for (i = 0; i < cur_fmt->num_planes; i++) { 339 if (psize[i] < cur_fmt->sizeimage[i]) 340 return -EINVAL; 341 } 342 return 0; 343 } 344 345 *plane_count = cur_fmt->num_planes; 346 for (i = 0; i < cur_fmt->num_planes; i++) 347 psize[i] = cur_fmt->sizeimage[i]; 348 349 return 0; 350 } 351 352 static int vpu_vb2_buf_init(struct vb2_buffer *vb) 353 { 354 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); 355 356 vpu_set_buffer_state(vbuf, VPU_BUF_STATE_IDLE); 357 return 0; 358 } 359 360 static int vpu_vb2_buf_out_validate(struct vb2_buffer *vb) 361 { 362 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); 363 364 vbuf->field = V4L2_FIELD_NONE; 365 366 return 0; 367 } 368 369 static int vpu_vb2_buf_prepare(struct vb2_buffer *vb) 370 { 371 struct vpu_inst *inst = vb2_get_drv_priv(vb->vb2_queue); 372 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); 373 struct vpu_format *cur_fmt; 374 u32 i; 375 376 cur_fmt = vpu_get_format(inst, vb->type); 377 for (i = 0; i < cur_fmt->num_planes; i++) { 378 if (vpu_get_vb_length(vb, i) < cur_fmt->sizeimage[i]) { 379 dev_dbg(inst->dev, "[%d] %s buf[%d] is invalid\n", 380 inst->id, vpu_type_name(vb->type), vb->index); 381 vpu_set_buffer_state(vbuf, VPU_BUF_STATE_ERROR); 382 } 383 } 384 385 return 0; 386 } 387 388 static void vpu_vb2_buf_finish(struct vb2_buffer *vb) 389 { 390 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); 391 struct vpu_inst *inst = vb2_get_drv_priv(vb->vb2_queue); 392 struct vb2_queue *q = vb->vb2_queue; 393 394 if (vbuf->flags & V4L2_BUF_FLAG_LAST) 395 vpu_notify_eos(inst); 396 397 if (list_empty(&q->done_list)) 398 call_void_vop(inst, on_queue_empty, q->type); 399 } 400 401 void vpu_vb2_buffers_return(struct vpu_inst *inst, unsigned int type, enum vb2_buffer_state state) 402 { 403 struct vb2_v4l2_buffer *buf; 404 405 if (V4L2_TYPE_IS_OUTPUT(type)) { 406 while ((buf = v4l2_m2m_src_buf_remove(inst->fh.m2m_ctx))) { 407 vpu_set_buffer_state(buf, VPU_BUF_STATE_IDLE); 408 v4l2_m2m_buf_done(buf, state); 409 } 410 } else { 411 while ((buf = v4l2_m2m_dst_buf_remove(inst->fh.m2m_ctx))) { 412 vpu_set_buffer_state(buf, VPU_BUF_STATE_IDLE); 413 v4l2_m2m_buf_done(buf, state); 414 } 415 } 416 } 417 418 static int vpu_vb2_start_streaming(struct vb2_queue *q, unsigned int count) 419 { 420 struct vpu_inst *inst = vb2_get_drv_priv(q); 421 struct vpu_format *fmt = vpu_get_format(inst, q->type); 422 int ret; 423 424 vpu_inst_unlock(inst); 425 ret = vpu_inst_register(inst); 426 vpu_inst_lock(inst); 427 if (ret) { 428 vpu_vb2_buffers_return(inst, q->type, VB2_BUF_STATE_QUEUED); 429 return ret; 430 } 431 432 vpu_trace(inst->dev, "[%d] %s %c%c%c%c %dx%d %u(%u) %u(%u) %u(%u) %d\n", 433 inst->id, vpu_type_name(q->type), 434 fmt->pixfmt, 435 fmt->pixfmt >> 8, 436 fmt->pixfmt >> 16, 437 fmt->pixfmt >> 24, 438 fmt->width, fmt->height, 439 fmt->sizeimage[0], fmt->bytesperline[0], 440 fmt->sizeimage[1], fmt->bytesperline[1], 441 fmt->sizeimage[2], fmt->bytesperline[2], 442 q->num_buffers); 443 call_void_vop(inst, start, q->type); 444 vb2_clear_last_buffer_dequeued(q); 445 446 return 0; 447 } 448 449 static void vpu_vb2_stop_streaming(struct vb2_queue *q) 450 { 451 struct vpu_inst *inst = vb2_get_drv_priv(q); 452 453 vpu_trace(inst->dev, "[%d] %s\n", inst->id, vpu_type_name(q->type)); 454 455 call_void_vop(inst, stop, q->type); 456 vpu_vb2_buffers_return(inst, q->type, VB2_BUF_STATE_ERROR); 457 if (V4L2_TYPE_IS_OUTPUT(q->type)) 458 inst->sequence = 0; 459 } 460 461 static void vpu_vb2_buf_queue(struct vb2_buffer *vb) 462 { 463 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); 464 struct vpu_inst *inst = vb2_get_drv_priv(vb->vb2_queue); 465 466 if (V4L2_TYPE_IS_OUTPUT(vb->type)) 467 vbuf->sequence = inst->sequence++; 468 469 v4l2_m2m_buf_queue(inst->fh.m2m_ctx, vbuf); 470 vpu_process_output_buffer(inst); 471 vpu_process_capture_buffer(inst); 472 } 473 474 static const struct vb2_ops vpu_vb2_ops = { 475 .queue_setup = vpu_vb2_queue_setup, 476 .buf_init = vpu_vb2_buf_init, 477 .buf_out_validate = vpu_vb2_buf_out_validate, 478 .buf_prepare = vpu_vb2_buf_prepare, 479 .buf_finish = vpu_vb2_buf_finish, 480 .start_streaming = vpu_vb2_start_streaming, 481 .stop_streaming = vpu_vb2_stop_streaming, 482 .buf_queue = vpu_vb2_buf_queue, 483 .wait_prepare = vb2_ops_wait_prepare, 484 .wait_finish = vb2_ops_wait_finish, 485 }; 486 487 static int vpu_m2m_queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq) 488 { 489 struct vpu_inst *inst = priv; 490 int ret; 491 492 src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; 493 inst->out_format.type = src_vq->type; 494 src_vq->io_modes = VB2_MMAP | VB2_DMABUF; 495 src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY; 496 src_vq->ops = &vpu_vb2_ops; 497 src_vq->mem_ops = &vb2_dma_contig_memops; 498 if (inst->type == VPU_CORE_TYPE_DEC && inst->use_stream_buffer) 499 src_vq->mem_ops = &vb2_vmalloc_memops; 500 src_vq->drv_priv = inst; 501 src_vq->buf_struct_size = sizeof(struct vpu_vb2_buffer); 502 src_vq->min_buffers_needed = 1; 503 src_vq->dev = inst->vpu->dev; 504 src_vq->lock = &inst->lock; 505 ret = vb2_queue_init(src_vq); 506 if (ret) 507 return ret; 508 509 dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; 510 inst->cap_format.type = dst_vq->type; 511 dst_vq->io_modes = VB2_MMAP | VB2_DMABUF; 512 dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY; 513 dst_vq->ops = &vpu_vb2_ops; 514 dst_vq->mem_ops = &vb2_dma_contig_memops; 515 if (inst->type == VPU_CORE_TYPE_ENC && inst->use_stream_buffer) 516 dst_vq->mem_ops = &vb2_vmalloc_memops; 517 dst_vq->drv_priv = inst; 518 dst_vq->buf_struct_size = sizeof(struct vpu_vb2_buffer); 519 dst_vq->min_buffers_needed = 1; 520 dst_vq->dev = inst->vpu->dev; 521 dst_vq->lock = &inst->lock; 522 ret = vb2_queue_init(dst_vq); 523 if (ret) { 524 vb2_queue_release(src_vq); 525 return ret; 526 } 527 528 return 0; 529 } 530 531 static int vpu_v4l2_release(struct vpu_inst *inst) 532 { 533 vpu_trace(inst->vpu->dev, "%p\n", inst); 534 535 vpu_release_core(inst->core); 536 put_device(inst->dev); 537 538 if (inst->workqueue) { 539 cancel_work_sync(&inst->msg_work); 540 destroy_workqueue(inst->workqueue); 541 inst->workqueue = NULL; 542 } 543 544 v4l2_ctrl_handler_free(&inst->ctrl_handler); 545 mutex_destroy(&inst->lock); 546 v4l2_fh_del(&inst->fh); 547 v4l2_fh_exit(&inst->fh); 548 549 call_void_vop(inst, cleanup); 550 551 return 0; 552 } 553 554 int vpu_v4l2_open(struct file *file, struct vpu_inst *inst) 555 { 556 struct vpu_dev *vpu = video_drvdata(file); 557 struct vpu_func *func; 558 int ret = 0; 559 560 if (!inst || !inst->ops) 561 return -EINVAL; 562 563 if (inst->type == VPU_CORE_TYPE_ENC) 564 func = &vpu->encoder; 565 else 566 func = &vpu->decoder; 567 568 atomic_set(&inst->ref_count, 0); 569 vpu_inst_get(inst); 570 inst->vpu = vpu; 571 inst->core = vpu_request_core(vpu, inst->type); 572 if (inst->core) 573 inst->dev = get_device(inst->core->dev); 574 mutex_init(&inst->lock); 575 INIT_LIST_HEAD(&inst->cmd_q); 576 inst->id = VPU_INST_NULL_ID; 577 inst->release = vpu_v4l2_release; 578 inst->pid = current->pid; 579 inst->tgid = current->tgid; 580 inst->min_buffer_cap = 2; 581 inst->min_buffer_out = 2; 582 v4l2_fh_init(&inst->fh, func->vfd); 583 v4l2_fh_add(&inst->fh); 584 585 ret = call_vop(inst, ctrl_init); 586 if (ret) 587 goto error; 588 589 inst->fh.m2m_ctx = v4l2_m2m_ctx_init(func->m2m_dev, inst, vpu_m2m_queue_init); 590 if (IS_ERR(inst->fh.m2m_ctx)) { 591 dev_err(vpu->dev, "v4l2_m2m_ctx_init fail\n"); 592 ret = PTR_ERR(inst->fh.m2m_ctx); 593 goto error; 594 } 595 596 inst->fh.ctrl_handler = &inst->ctrl_handler; 597 file->private_data = &inst->fh; 598 inst->state = VPU_CODEC_STATE_DEINIT; 599 inst->workqueue = alloc_workqueue("vpu_inst", WQ_UNBOUND | WQ_MEM_RECLAIM, 1); 600 if (inst->workqueue) { 601 INIT_WORK(&inst->msg_work, vpu_inst_run_work); 602 ret = kfifo_init(&inst->msg_fifo, 603 inst->msg_buffer, 604 rounddown_pow_of_two(sizeof(inst->msg_buffer))); 605 if (ret) { 606 destroy_workqueue(inst->workqueue); 607 inst->workqueue = NULL; 608 } 609 } 610 vpu_trace(vpu->dev, "tgid = %d, pid = %d, type = %s, inst = %p\n", 611 inst->tgid, inst->pid, vpu_core_type_desc(inst->type), inst); 612 613 return 0; 614 error: 615 vpu_inst_put(inst); 616 return ret; 617 } 618 619 int vpu_v4l2_close(struct file *file) 620 { 621 struct vpu_dev *vpu = video_drvdata(file); 622 struct vpu_inst *inst = to_inst(file); 623 624 vpu_trace(vpu->dev, "tgid = %d, pid = %d, inst = %p\n", inst->tgid, inst->pid, inst); 625 626 vpu_inst_lock(inst); 627 if (inst->fh.m2m_ctx) { 628 v4l2_m2m_ctx_release(inst->fh.m2m_ctx); 629 inst->fh.m2m_ctx = NULL; 630 } 631 vpu_inst_unlock(inst); 632 633 call_void_vop(inst, release); 634 vpu_inst_unregister(inst); 635 vpu_inst_put(inst); 636 637 return 0; 638 } 639 640 int vpu_add_func(struct vpu_dev *vpu, struct vpu_func *func) 641 { 642 struct video_device *vfd; 643 int ret; 644 645 if (!vpu || !func) 646 return -EINVAL; 647 648 if (func->vfd) 649 return 0; 650 651 func->m2m_dev = v4l2_m2m_init(&vpu_m2m_ops); 652 if (IS_ERR(func->m2m_dev)) { 653 dev_err(vpu->dev, "v4l2_m2m_init fail\n"); 654 func->vfd = NULL; 655 return PTR_ERR(func->m2m_dev); 656 } 657 658 vfd = video_device_alloc(); 659 if (!vfd) { 660 v4l2_m2m_release(func->m2m_dev); 661 dev_err(vpu->dev, "alloc vpu decoder video device fail\n"); 662 return -ENOMEM; 663 } 664 vfd->release = video_device_release; 665 vfd->vfl_dir = VFL_DIR_M2M; 666 vfd->v4l2_dev = &vpu->v4l2_dev; 667 vfd->device_caps = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING; 668 if (func->type == VPU_CORE_TYPE_ENC) { 669 strscpy(vfd->name, "amphion-vpu-encoder", sizeof(vfd->name)); 670 vfd->fops = venc_get_fops(); 671 vfd->ioctl_ops = venc_get_ioctl_ops(); 672 } else { 673 strscpy(vfd->name, "amphion-vpu-decoder", sizeof(vfd->name)); 674 vfd->fops = vdec_get_fops(); 675 vfd->ioctl_ops = vdec_get_ioctl_ops(); 676 } 677 678 ret = video_register_device(vfd, VFL_TYPE_VIDEO, -1); 679 if (ret) { 680 video_device_release(vfd); 681 v4l2_m2m_release(func->m2m_dev); 682 return ret; 683 } 684 video_set_drvdata(vfd, vpu); 685 func->vfd = vfd; 686 687 ret = v4l2_m2m_register_media_controller(func->m2m_dev, func->vfd, func->function); 688 if (ret) { 689 v4l2_m2m_release(func->m2m_dev); 690 func->m2m_dev = NULL; 691 video_unregister_device(func->vfd); 692 func->vfd = NULL; 693 return ret; 694 } 695 696 return 0; 697 } 698 699 void vpu_remove_func(struct vpu_func *func) 700 { 701 if (!func) 702 return; 703 704 if (func->m2m_dev) { 705 v4l2_m2m_unregister_media_controller(func->m2m_dev); 706 v4l2_m2m_release(func->m2m_dev); 707 func->m2m_dev = NULL; 708 } 709 if (func->vfd) { 710 video_unregister_device(func->vfd); 711 func->vfd = NULL; 712 } 713 } 714