1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright 2020-2021 NXP 4 */ 5 6 #include <linux/init.h> 7 #include <linux/interconnect.h> 8 #include <linux/ioctl.h> 9 #include <linux/list.h> 10 #include <linux/kernel.h> 11 #include <linux/module.h> 12 #include <linux/pm_runtime.h> 13 #include <linux/videodev2.h> 14 #include <media/v4l2-device.h> 15 #include <media/v4l2-event.h> 16 #include <media/v4l2-mem2mem.h> 17 #include <media/v4l2-ioctl.h> 18 #include <media/videobuf2-v4l2.h> 19 #include <media/videobuf2-dma-contig.h> 20 #include <media/videobuf2-vmalloc.h> 21 #include "vpu.h" 22 #include "vpu_core.h" 23 #include "vpu_v4l2.h" 24 #include "vpu_msgs.h" 25 #include "vpu_helpers.h" 26 27 static char *vpu_type_name(u32 type) 28 { 29 return V4L2_TYPE_IS_OUTPUT(type) ? "output" : "capture"; 30 } 31 32 void vpu_inst_lock(struct vpu_inst *inst) 33 { 34 mutex_lock(&inst->lock); 35 } 36 37 void vpu_inst_unlock(struct vpu_inst *inst) 38 { 39 mutex_unlock(&inst->lock); 40 } 41 42 dma_addr_t vpu_get_vb_phy_addr(struct vb2_buffer *vb, u32 plane_no) 43 { 44 if (plane_no >= vb->num_planes) 45 return 0; 46 return vb2_dma_contig_plane_dma_addr(vb, plane_no) + 47 vb->planes[plane_no].data_offset; 48 } 49 50 static unsigned int vpu_get_vb_length(struct vb2_buffer *vb, u32 plane_no) 51 { 52 if (plane_no >= vb->num_planes) 53 return 0; 54 return vb2_plane_size(vb, plane_no) - vb->planes[plane_no].data_offset; 55 } 56 57 void vpu_set_buffer_state(struct vb2_v4l2_buffer *vbuf, unsigned int state) 58 { 59 struct vpu_vb2_buffer *vpu_buf = to_vpu_vb2_buffer(vbuf); 60 61 vpu_buf->state = state; 62 } 63 64 unsigned int vpu_get_buffer_state(struct vb2_v4l2_buffer *vbuf) 65 { 66 struct vpu_vb2_buffer *vpu_buf = to_vpu_vb2_buffer(vbuf); 67 68 return vpu_buf->state; 69 } 70 71 void vpu_set_buffer_average_qp(struct vb2_v4l2_buffer *vbuf, u32 qp) 72 { 73 struct vpu_vb2_buffer *vpu_buf = to_vpu_vb2_buffer(vbuf); 74 75 vpu_buf->average_qp = qp; 76 } 77 78 void vpu_v4l2_set_error(struct vpu_inst *inst) 79 { 80 vpu_inst_lock(inst); 81 dev_err(inst->dev, "some error occurs in codec\n"); 82 if (inst->fh.m2m_ctx) { 83 vb2_queue_error(v4l2_m2m_get_src_vq(inst->fh.m2m_ctx)); 84 vb2_queue_error(v4l2_m2m_get_dst_vq(inst->fh.m2m_ctx)); 85 } 86 vpu_inst_unlock(inst); 87 } 88 89 static int vpu_notify_eos(struct vpu_inst *inst) 90 { 91 static const struct v4l2_event ev = { 92 .id = 0, 93 .type = V4L2_EVENT_EOS 94 }; 95 96 vpu_trace(inst->dev, "[%d]\n", inst->id); 97 v4l2_event_queue_fh(&inst->fh, &ev); 98 99 return 0; 100 } 101 102 int vpu_notify_source_change(struct vpu_inst *inst) 103 { 104 static const struct v4l2_event ev = { 105 .type = V4L2_EVENT_SOURCE_CHANGE, 106 .u.src_change.changes = V4L2_EVENT_SRC_CH_RESOLUTION 107 }; 108 109 vpu_trace(inst->dev, "[%d]\n", inst->id); 110 v4l2_event_queue_fh(&inst->fh, &ev); 111 return 0; 112 } 113 114 int vpu_set_last_buffer_dequeued(struct vpu_inst *inst, bool eos) 115 { 116 struct vb2_queue *q; 117 118 if (!inst || !inst->fh.m2m_ctx) 119 return -EINVAL; 120 121 q = v4l2_m2m_get_dst_vq(inst->fh.m2m_ctx); 122 if (!list_empty(&q->done_list)) 123 return -EINVAL; 124 125 if (q->last_buffer_dequeued) 126 return 0; 127 vpu_trace(inst->dev, "last buffer dequeued\n"); 128 q->last_buffer_dequeued = true; 129 wake_up(&q->done_wq); 130 if (eos) 131 vpu_notify_eos(inst); 132 return 0; 133 } 134 135 bool vpu_is_source_empty(struct vpu_inst *inst) 136 { 137 struct v4l2_m2m_buffer *buf = NULL; 138 139 if (!inst->fh.m2m_ctx) 140 return true; 141 v4l2_m2m_for_each_src_buf(inst->fh.m2m_ctx, buf) { 142 if (vpu_get_buffer_state(&buf->vb) == VPU_BUF_STATE_IDLE) 143 return false; 144 } 145 return true; 146 } 147 148 static int vpu_init_format(struct vpu_inst *inst, struct vpu_format *fmt) 149 { 150 const struct vpu_format *info; 151 152 info = vpu_helper_find_format(inst, fmt->type, fmt->pixfmt); 153 if (!info) { 154 info = vpu_helper_enum_format(inst, fmt->type, 0); 155 if (!info) 156 return -EINVAL; 157 } 158 memcpy(fmt, info, sizeof(*fmt)); 159 160 return 0; 161 } 162 163 static int vpu_calc_fmt_bytesperline(struct v4l2_format *f, struct vpu_format *fmt) 164 { 165 struct v4l2_pix_format_mplane *pixmp = &f->fmt.pix_mp; 166 int i; 167 168 if (fmt->flags & V4L2_FMT_FLAG_COMPRESSED) { 169 for (i = 0; i < fmt->comp_planes; i++) 170 fmt->bytesperline[i] = 0; 171 return 0; 172 } 173 if (pixmp->num_planes == fmt->comp_planes) { 174 for (i = 0; i < fmt->comp_planes; i++) 175 fmt->bytesperline[i] = pixmp->plane_fmt[i].bytesperline; 176 return 0; 177 } 178 if (pixmp->num_planes > 1) 179 return -EINVAL; 180 181 /*amphion vpu only support nv12 and nv12 tiled, 182 * so the bytesperline of luma and chroma should be same 183 */ 184 for (i = 0; i < fmt->comp_planes; i++) 185 fmt->bytesperline[i] = pixmp->plane_fmt[0].bytesperline; 186 187 return 0; 188 } 189 190 static int vpu_calc_fmt_sizeimage(struct vpu_inst *inst, struct vpu_format *fmt) 191 { 192 u32 stride = 1; 193 int i; 194 195 if (!(fmt->flags & V4L2_FMT_FLAG_COMPRESSED)) { 196 const struct vpu_core_resources *res = vpu_get_resource(inst); 197 198 if (res) 199 stride = res->stride; 200 } 201 202 for (i = 0; i < fmt->comp_planes; i++) { 203 fmt->sizeimage[i] = vpu_helper_get_plane_size(fmt->pixfmt, 204 fmt->width, 205 fmt->height, 206 i, 207 stride, 208 fmt->field != V4L2_FIELD_NONE ? 1 : 0, 209 &fmt->bytesperline[i]); 210 fmt->sizeimage[i] = max_t(u32, fmt->sizeimage[i], PAGE_SIZE); 211 if (fmt->flags & V4L2_FMT_FLAG_COMPRESSED) { 212 fmt->sizeimage[i] = clamp_val(fmt->sizeimage[i], SZ_128K, SZ_8M); 213 fmt->bytesperline[i] = 0; 214 } 215 } 216 217 return 0; 218 } 219 220 u32 vpu_get_fmt_plane_size(struct vpu_format *fmt, u32 plane_no) 221 { 222 u32 size; 223 int i; 224 225 if (plane_no >= fmt->mem_planes) 226 return 0; 227 228 if (fmt->comp_planes == fmt->mem_planes) 229 return fmt->sizeimage[plane_no]; 230 if (plane_no < fmt->mem_planes - 1) 231 return fmt->sizeimage[plane_no]; 232 233 size = fmt->sizeimage[plane_no]; 234 for (i = fmt->mem_planes; i < fmt->comp_planes; i++) 235 size += fmt->sizeimage[i]; 236 237 return size; 238 } 239 240 int vpu_try_fmt_common(struct vpu_inst *inst, struct v4l2_format *f, struct vpu_format *fmt) 241 { 242 struct v4l2_pix_format_mplane *pixmp = &f->fmt.pix_mp; 243 int i; 244 int ret; 245 246 fmt->pixfmt = pixmp->pixelformat; 247 fmt->type = f->type; 248 ret = vpu_init_format(inst, fmt); 249 if (ret < 0) 250 return ret; 251 252 fmt->width = pixmp->width; 253 fmt->height = pixmp->height; 254 if (fmt->width) 255 fmt->width = vpu_helper_valid_frame_width(inst, fmt->width); 256 if (fmt->height) 257 fmt->height = vpu_helper_valid_frame_height(inst, fmt->height); 258 fmt->field = pixmp->field == V4L2_FIELD_ANY ? V4L2_FIELD_NONE : pixmp->field; 259 vpu_calc_fmt_bytesperline(f, fmt); 260 vpu_calc_fmt_sizeimage(inst, fmt); 261 if ((fmt->flags & V4L2_FMT_FLAG_COMPRESSED) && pixmp->plane_fmt[0].sizeimage) 262 fmt->sizeimage[0] = clamp_val(pixmp->plane_fmt[0].sizeimage, SZ_128K, SZ_8M); 263 264 pixmp->pixelformat = fmt->pixfmt; 265 pixmp->width = fmt->width; 266 pixmp->height = fmt->height; 267 pixmp->flags = fmt->flags; 268 pixmp->num_planes = fmt->mem_planes; 269 pixmp->field = fmt->field; 270 memset(pixmp->reserved, 0, sizeof(pixmp->reserved)); 271 for (i = 0; i < pixmp->num_planes; i++) { 272 pixmp->plane_fmt[i].bytesperline = fmt->bytesperline[i]; 273 pixmp->plane_fmt[i].sizeimage = vpu_get_fmt_plane_size(fmt, i); 274 memset(pixmp->plane_fmt[i].reserved, 0, sizeof(pixmp->plane_fmt[i].reserved)); 275 } 276 277 return 0; 278 } 279 280 static bool vpu_check_ready(struct vpu_inst *inst, u32 type) 281 { 282 if (!inst) 283 return false; 284 if (inst->state == VPU_CODEC_STATE_DEINIT || inst->id < 0) 285 return false; 286 if (!inst->ops->check_ready) 287 return true; 288 return call_vop(inst, check_ready, type); 289 } 290 291 int vpu_process_output_buffer(struct vpu_inst *inst) 292 { 293 struct v4l2_m2m_buffer *buf = NULL; 294 struct vb2_v4l2_buffer *vbuf = NULL; 295 296 if (!inst || !inst->fh.m2m_ctx) 297 return -EINVAL; 298 299 if (!vpu_check_ready(inst, inst->out_format.type)) 300 return -EINVAL; 301 302 v4l2_m2m_for_each_src_buf(inst->fh.m2m_ctx, buf) { 303 vbuf = &buf->vb; 304 if (vpu_get_buffer_state(vbuf) == VPU_BUF_STATE_IDLE) 305 break; 306 vbuf = NULL; 307 } 308 309 if (!vbuf) 310 return -EINVAL; 311 312 dev_dbg(inst->dev, "[%d]frame id = %d / %d\n", 313 inst->id, vbuf->sequence, inst->sequence); 314 return call_vop(inst, process_output, &vbuf->vb2_buf); 315 } 316 317 int vpu_process_capture_buffer(struct vpu_inst *inst) 318 { 319 struct v4l2_m2m_buffer *buf = NULL; 320 struct vb2_v4l2_buffer *vbuf = NULL; 321 322 if (!inst || !inst->fh.m2m_ctx) 323 return -EINVAL; 324 325 if (!vpu_check_ready(inst, inst->cap_format.type)) 326 return -EINVAL; 327 328 v4l2_m2m_for_each_dst_buf(inst->fh.m2m_ctx, buf) { 329 vbuf = &buf->vb; 330 if (vpu_get_buffer_state(vbuf) == VPU_BUF_STATE_IDLE) 331 break; 332 vbuf = NULL; 333 } 334 if (!vbuf) 335 return -EINVAL; 336 337 return call_vop(inst, process_capture, &vbuf->vb2_buf); 338 } 339 340 struct vb2_v4l2_buffer *vpu_next_src_buf(struct vpu_inst *inst) 341 { 342 struct vb2_v4l2_buffer *src_buf = NULL; 343 344 if (!inst->fh.m2m_ctx) 345 return NULL; 346 347 src_buf = v4l2_m2m_next_src_buf(inst->fh.m2m_ctx); 348 if (!src_buf || vpu_get_buffer_state(src_buf) == VPU_BUF_STATE_IDLE) 349 return NULL; 350 351 return src_buf; 352 } 353 354 void vpu_skip_frame(struct vpu_inst *inst, int count) 355 { 356 struct vb2_v4l2_buffer *src_buf; 357 enum vb2_buffer_state state; 358 int i = 0; 359 360 if (count <= 0 || !inst->fh.m2m_ctx) 361 return; 362 363 while (i < count) { 364 src_buf = v4l2_m2m_src_buf_remove(inst->fh.m2m_ctx); 365 if (!src_buf || vpu_get_buffer_state(src_buf) == VPU_BUF_STATE_IDLE) 366 return; 367 if (vpu_get_buffer_state(src_buf) == VPU_BUF_STATE_DECODED) 368 state = VB2_BUF_STATE_DONE; 369 else 370 state = VB2_BUF_STATE_ERROR; 371 i++; 372 vpu_set_buffer_state(src_buf, VPU_BUF_STATE_IDLE); 373 v4l2_m2m_buf_done(src_buf, state); 374 } 375 } 376 377 struct vb2_v4l2_buffer *vpu_find_buf_by_sequence(struct vpu_inst *inst, u32 type, u32 sequence) 378 { 379 struct v4l2_m2m_buffer *buf = NULL; 380 struct vb2_v4l2_buffer *vbuf = NULL; 381 382 if (!inst || !inst->fh.m2m_ctx) 383 return NULL; 384 385 if (V4L2_TYPE_IS_OUTPUT(type)) { 386 v4l2_m2m_for_each_src_buf(inst->fh.m2m_ctx, buf) { 387 vbuf = &buf->vb; 388 if (vbuf->sequence == sequence) 389 break; 390 vbuf = NULL; 391 } 392 } else { 393 v4l2_m2m_for_each_dst_buf(inst->fh.m2m_ctx, buf) { 394 vbuf = &buf->vb; 395 if (vbuf->sequence == sequence) 396 break; 397 vbuf = NULL; 398 } 399 } 400 401 return vbuf; 402 } 403 404 struct vb2_v4l2_buffer *vpu_find_buf_by_idx(struct vpu_inst *inst, u32 type, u32 idx) 405 { 406 struct v4l2_m2m_buffer *buf = NULL; 407 struct vb2_v4l2_buffer *vbuf = NULL; 408 409 if (!inst || !inst->fh.m2m_ctx) 410 return NULL; 411 412 if (V4L2_TYPE_IS_OUTPUT(type)) { 413 v4l2_m2m_for_each_src_buf(inst->fh.m2m_ctx, buf) { 414 vbuf = &buf->vb; 415 if (vbuf->vb2_buf.index == idx) 416 break; 417 vbuf = NULL; 418 } 419 } else { 420 v4l2_m2m_for_each_dst_buf(inst->fh.m2m_ctx, buf) { 421 vbuf = &buf->vb; 422 if (vbuf->vb2_buf.index == idx) 423 break; 424 vbuf = NULL; 425 } 426 } 427 428 return vbuf; 429 } 430 431 int vpu_get_num_buffers(struct vpu_inst *inst, u32 type) 432 { 433 struct vb2_queue *q; 434 435 if (!inst || !inst->fh.m2m_ctx) 436 return -EINVAL; 437 438 if (V4L2_TYPE_IS_OUTPUT(type)) 439 q = v4l2_m2m_get_src_vq(inst->fh.m2m_ctx); 440 else 441 q = v4l2_m2m_get_dst_vq(inst->fh.m2m_ctx); 442 443 return vb2_get_num_buffers(q); 444 } 445 446 static void vpu_m2m_device_run(void *priv) 447 { 448 } 449 450 static void vpu_m2m_job_abort(void *priv) 451 { 452 struct vpu_inst *inst = priv; 453 struct v4l2_m2m_ctx *m2m_ctx = inst->fh.m2m_ctx; 454 455 v4l2_m2m_job_finish(m2m_ctx->m2m_dev, m2m_ctx); 456 } 457 458 static const struct v4l2_m2m_ops vpu_m2m_ops = { 459 .device_run = vpu_m2m_device_run, 460 .job_abort = vpu_m2m_job_abort 461 }; 462 463 static int vpu_vb2_queue_setup(struct vb2_queue *vq, 464 unsigned int *buf_count, 465 unsigned int *plane_count, 466 unsigned int psize[], 467 struct device *allocators[]) 468 { 469 struct vpu_inst *inst = vb2_get_drv_priv(vq); 470 struct vpu_format *cur_fmt; 471 int i; 472 473 cur_fmt = vpu_get_format(inst, vq->type); 474 475 if (*plane_count) { 476 if (*plane_count != cur_fmt->mem_planes) 477 return -EINVAL; 478 for (i = 0; i < cur_fmt->mem_planes; i++) { 479 if (psize[i] < vpu_get_fmt_plane_size(cur_fmt, i)) 480 return -EINVAL; 481 } 482 return 0; 483 } 484 485 if (V4L2_TYPE_IS_OUTPUT(vq->type)) 486 *buf_count = max_t(unsigned int, *buf_count, inst->min_buffer_out); 487 else 488 *buf_count = max_t(unsigned int, *buf_count, inst->min_buffer_cap); 489 *plane_count = cur_fmt->mem_planes; 490 for (i = 0; i < cur_fmt->mem_planes; i++) 491 psize[i] = vpu_get_fmt_plane_size(cur_fmt, i); 492 493 if (V4L2_TYPE_IS_OUTPUT(vq->type) && inst->state == VPU_CODEC_STATE_SEEK) { 494 vpu_trace(inst->dev, "reinit when VIDIOC_REQBUFS(OUTPUT, 0)\n"); 495 call_void_vop(inst, release); 496 } 497 498 if (V4L2_TYPE_IS_CAPTURE(vq->type)) 499 call_void_vop(inst, reset_frame_store); 500 501 return 0; 502 } 503 504 static int vpu_vb2_buf_init(struct vb2_buffer *vb) 505 { 506 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); 507 struct vpu_vb2_buffer *vpu_buf = to_vpu_vb2_buffer(vbuf); 508 struct vpu_inst *inst = vb2_get_drv_priv(vb->vb2_queue); 509 510 vpu_buf->fs_id = -1; 511 vpu_set_buffer_state(vbuf, VPU_BUF_STATE_IDLE); 512 513 if (!inst->ops->attach_frame_store || V4L2_TYPE_IS_OUTPUT(vb->type)) 514 return 0; 515 516 call_void_vop(inst, attach_frame_store, vb); 517 return 0; 518 } 519 520 static int vpu_vb2_buf_out_validate(struct vb2_buffer *vb) 521 { 522 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); 523 524 vbuf->field = V4L2_FIELD_NONE; 525 526 return 0; 527 } 528 529 static int vpu_vb2_buf_prepare(struct vb2_buffer *vb) 530 { 531 struct vpu_inst *inst = vb2_get_drv_priv(vb->vb2_queue); 532 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); 533 struct vpu_format *cur_fmt; 534 u32 i; 535 536 cur_fmt = vpu_get_format(inst, vb->type); 537 for (i = 0; i < cur_fmt->mem_planes; i++) { 538 if (vpu_get_vb_length(vb, i) < vpu_get_fmt_plane_size(cur_fmt, i)) { 539 dev_dbg(inst->dev, "[%d] %s buf[%d] is invalid\n", 540 inst->id, vpu_type_name(vb->type), vb->index); 541 vpu_set_buffer_state(vbuf, VPU_BUF_STATE_ERROR); 542 } 543 } 544 545 return 0; 546 } 547 548 static void vpu_vb2_buf_finish(struct vb2_buffer *vb) 549 { 550 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); 551 struct vpu_inst *inst = vb2_get_drv_priv(vb->vb2_queue); 552 struct vb2_queue *q = vb->vb2_queue; 553 554 if (V4L2_TYPE_IS_CAPTURE(vb->type)) { 555 struct vpu_vb2_buffer *vpu_buf = to_vpu_vb2_buffer(vbuf); 556 struct v4l2_ctrl *ctrl = v4l2_ctrl_find(&inst->ctrl_handler, 557 V4L2_CID_MPEG_VIDEO_AVERAGE_QP); 558 559 if (ctrl) 560 v4l2_ctrl_s_ctrl(ctrl, vpu_buf->average_qp); 561 } 562 563 if (vbuf->flags & V4L2_BUF_FLAG_LAST) 564 vpu_notify_eos(inst); 565 566 if (list_empty(&q->done_list)) 567 call_void_vop(inst, on_queue_empty, q->type); 568 } 569 570 static void vpu_vb2_buffers_return(struct vpu_inst *inst, unsigned int type, 571 enum vb2_buffer_state state) 572 { 573 struct vb2_v4l2_buffer *buf; 574 575 if (V4L2_TYPE_IS_OUTPUT(type)) { 576 while ((buf = v4l2_m2m_src_buf_remove(inst->fh.m2m_ctx))) { 577 vpu_set_buffer_state(buf, VPU_BUF_STATE_IDLE); 578 v4l2_m2m_buf_done(buf, state); 579 } 580 } else { 581 while ((buf = v4l2_m2m_dst_buf_remove(inst->fh.m2m_ctx))) { 582 vpu_set_buffer_state(buf, VPU_BUF_STATE_IDLE); 583 v4l2_m2m_buf_done(buf, state); 584 } 585 } 586 } 587 588 static int vpu_vb2_start_streaming(struct vb2_queue *q, unsigned int count) 589 { 590 struct vpu_inst *inst = vb2_get_drv_priv(q); 591 struct vpu_format *fmt = vpu_get_format(inst, q->type); 592 int ret; 593 594 vpu_inst_unlock(inst); 595 ret = vpu_inst_register(inst); 596 vpu_inst_lock(inst); 597 if (ret) { 598 vpu_vb2_buffers_return(inst, q->type, VB2_BUF_STATE_QUEUED); 599 return ret; 600 } 601 602 vpu_trace(inst->dev, "[%d] %s %c%c%c%c %dx%d %u(%u) %u(%u) %u(%u) %d\n", 603 inst->id, vpu_type_name(q->type), 604 fmt->pixfmt, 605 fmt->pixfmt >> 8, 606 fmt->pixfmt >> 16, 607 fmt->pixfmt >> 24, 608 fmt->width, fmt->height, 609 fmt->sizeimage[0], fmt->bytesperline[0], 610 fmt->sizeimage[1], fmt->bytesperline[1], 611 fmt->sizeimage[2], fmt->bytesperline[2], 612 vb2_get_num_buffers(q)); 613 vb2_clear_last_buffer_dequeued(q); 614 ret = call_vop(inst, start, q->type); 615 if (ret) 616 vpu_vb2_buffers_return(inst, q->type, VB2_BUF_STATE_QUEUED); 617 618 return ret; 619 } 620 621 static void vpu_vb2_stop_streaming(struct vb2_queue *q) 622 { 623 struct vpu_inst *inst = vb2_get_drv_priv(q); 624 625 vpu_trace(inst->dev, "[%d] %s\n", inst->id, vpu_type_name(q->type)); 626 627 call_void_vop(inst, stop, q->type); 628 vpu_vb2_buffers_return(inst, q->type, VB2_BUF_STATE_ERROR); 629 if (V4L2_TYPE_IS_OUTPUT(q->type)) 630 inst->sequence = 0; 631 } 632 633 static void vpu_vb2_buf_queue(struct vb2_buffer *vb) 634 { 635 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); 636 struct vpu_inst *inst = vb2_get_drv_priv(vb->vb2_queue); 637 638 if (V4L2_TYPE_IS_OUTPUT(vb->type)) 639 vbuf->sequence = inst->sequence++; 640 641 v4l2_m2m_buf_queue(inst->fh.m2m_ctx, vbuf); 642 vpu_process_output_buffer(inst); 643 vpu_process_capture_buffer(inst); 644 } 645 646 static const struct vb2_ops vpu_vb2_ops = { 647 .queue_setup = vpu_vb2_queue_setup, 648 .buf_init = vpu_vb2_buf_init, 649 .buf_out_validate = vpu_vb2_buf_out_validate, 650 .buf_prepare = vpu_vb2_buf_prepare, 651 .buf_finish = vpu_vb2_buf_finish, 652 .start_streaming = vpu_vb2_start_streaming, 653 .stop_streaming = vpu_vb2_stop_streaming, 654 .buf_queue = vpu_vb2_buf_queue, 655 }; 656 657 static int vpu_m2m_queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq) 658 { 659 struct vpu_inst *inst = priv; 660 int ret; 661 662 src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; 663 inst->out_format.type = src_vq->type; 664 src_vq->io_modes = VB2_MMAP | VB2_DMABUF; 665 src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY; 666 src_vq->ops = &vpu_vb2_ops; 667 src_vq->mem_ops = &vb2_dma_contig_memops; 668 if (inst->type == VPU_CORE_TYPE_DEC && inst->use_stream_buffer) 669 src_vq->mem_ops = &vb2_vmalloc_memops; 670 src_vq->drv_priv = inst; 671 src_vq->buf_struct_size = sizeof(struct vpu_vb2_buffer); 672 src_vq->dev = inst->vpu->dev; 673 src_vq->lock = &inst->lock; 674 ret = vb2_queue_init(src_vq); 675 if (ret) 676 return ret; 677 678 dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; 679 inst->cap_format.type = dst_vq->type; 680 dst_vq->io_modes = VB2_MMAP | VB2_DMABUF; 681 dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY; 682 dst_vq->ops = &vpu_vb2_ops; 683 dst_vq->mem_ops = &vb2_dma_contig_memops; 684 if (inst->type == VPU_CORE_TYPE_ENC && inst->use_stream_buffer) 685 dst_vq->mem_ops = &vb2_vmalloc_memops; 686 dst_vq->drv_priv = inst; 687 dst_vq->buf_struct_size = sizeof(struct vpu_vb2_buffer); 688 dst_vq->dev = inst->vpu->dev; 689 dst_vq->lock = &inst->lock; 690 ret = vb2_queue_init(dst_vq); 691 if (ret) { 692 vb2_queue_release(src_vq); 693 return ret; 694 } 695 696 return 0; 697 } 698 699 static int vpu_v4l2_release(struct vpu_inst *inst) 700 { 701 vpu_trace(inst->vpu->dev, "%p\n", inst); 702 703 if (inst->workqueue) { 704 cancel_work_sync(&inst->msg_work); 705 destroy_workqueue(inst->workqueue); 706 inst->workqueue = NULL; 707 } 708 709 vpu_release_core(inst->core); 710 put_device(inst->dev); 711 712 v4l2_ctrl_handler_free(&inst->ctrl_handler); 713 mutex_destroy(&inst->lock); 714 715 call_void_vop(inst, cleanup); 716 717 return 0; 718 } 719 720 int vpu_v4l2_open(struct file *file, struct vpu_inst *inst) 721 { 722 struct vpu_dev *vpu = video_drvdata(file); 723 struct vpu_func *func; 724 int ret = 0; 725 726 if (!inst || !inst->ops) 727 return -EINVAL; 728 729 if (inst->type == VPU_CORE_TYPE_ENC) 730 func = &vpu->encoder; 731 else 732 func = &vpu->decoder; 733 734 atomic_set(&inst->ref_count, 0); 735 atomic_long_set(&inst->last_response_cmd, 0); 736 vpu_inst_get(inst); 737 inst->vpu = vpu; 738 inst->core = vpu_request_core(vpu, inst->type); 739 if (inst->core) 740 inst->dev = get_device(inst->core->dev); 741 mutex_init(&inst->lock); 742 INIT_LIST_HEAD(&inst->cmd_q); 743 inst->id = VPU_INST_NULL_ID; 744 inst->release = vpu_v4l2_release; 745 inst->pid = current->pid; 746 inst->tgid = current->tgid; 747 inst->min_buffer_cap = 2; 748 inst->min_buffer_out = 2; 749 v4l2_fh_init(&inst->fh, func->vfd); 750 v4l2_fh_add(&inst->fh, file); 751 752 ret = call_vop(inst, ctrl_init); 753 if (ret) 754 goto error; 755 756 inst->fh.m2m_ctx = v4l2_m2m_ctx_init(func->m2m_dev, inst, vpu_m2m_queue_init); 757 if (IS_ERR(inst->fh.m2m_ctx)) { 758 dev_err(vpu->dev, "v4l2_m2m_ctx_init fail\n"); 759 ret = PTR_ERR(inst->fh.m2m_ctx); 760 goto error; 761 } 762 763 inst->fh.ctrl_handler = &inst->ctrl_handler; 764 inst->state = VPU_CODEC_STATE_DEINIT; 765 inst->workqueue = alloc_ordered_workqueue("vpu_inst", WQ_MEM_RECLAIM); 766 if (inst->workqueue) { 767 INIT_WORK(&inst->msg_work, vpu_inst_run_work); 768 ret = kfifo_init(&inst->msg_fifo, 769 inst->msg_buffer, 770 rounddown_pow_of_two(sizeof(inst->msg_buffer))); 771 if (ret) { 772 destroy_workqueue(inst->workqueue); 773 inst->workqueue = NULL; 774 } 775 } 776 vpu_trace(vpu->dev, "tgid = %d, pid = %d, type = %s, inst = %p\n", 777 inst->tgid, inst->pid, vpu_core_type_desc(inst->type), inst); 778 779 return 0; 780 error: 781 v4l2_fh_del(&inst->fh, file); 782 v4l2_fh_exit(&inst->fh); 783 vpu_inst_put(inst); 784 return ret; 785 } 786 787 int vpu_v4l2_close(struct file *file) 788 { 789 struct vpu_dev *vpu = video_drvdata(file); 790 struct vpu_inst *inst = to_inst(file); 791 792 vpu_trace(vpu->dev, "tgid = %d, pid = %d, inst = %p\n", inst->tgid, inst->pid, inst); 793 794 vpu_inst_lock(inst); 795 if (inst->fh.m2m_ctx) { 796 v4l2_m2m_ctx_release(inst->fh.m2m_ctx); 797 inst->fh.m2m_ctx = NULL; 798 } 799 call_void_vop(inst, release); 800 vpu_inst_unlock(inst); 801 802 v4l2_fh_del(&inst->fh, file); 803 v4l2_fh_exit(&inst->fh); 804 805 vpu_inst_unregister(inst); 806 vpu_inst_put(inst); 807 808 return 0; 809 } 810 811 int vpu_add_func(struct vpu_dev *vpu, struct vpu_func *func) 812 { 813 struct video_device *vfd; 814 int ret; 815 816 if (!vpu || !func) 817 return -EINVAL; 818 819 if (func->vfd) 820 return 0; 821 822 func->m2m_dev = v4l2_m2m_init(&vpu_m2m_ops); 823 if (IS_ERR(func->m2m_dev)) { 824 dev_err(vpu->dev, "v4l2_m2m_init fail\n"); 825 func->vfd = NULL; 826 return PTR_ERR(func->m2m_dev); 827 } 828 829 vfd = video_device_alloc(); 830 if (!vfd) { 831 v4l2_m2m_release(func->m2m_dev); 832 dev_err(vpu->dev, "alloc vpu decoder video device fail\n"); 833 return -ENOMEM; 834 } 835 vfd->release = video_device_release; 836 vfd->vfl_dir = VFL_DIR_M2M; 837 vfd->v4l2_dev = &vpu->v4l2_dev; 838 vfd->device_caps = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING; 839 if (func->type == VPU_CORE_TYPE_ENC) { 840 strscpy(vfd->name, "amphion-vpu-encoder", sizeof(vfd->name)); 841 vfd->fops = venc_get_fops(); 842 vfd->ioctl_ops = venc_get_ioctl_ops(); 843 } else { 844 strscpy(vfd->name, "amphion-vpu-decoder", sizeof(vfd->name)); 845 vfd->fops = vdec_get_fops(); 846 vfd->ioctl_ops = vdec_get_ioctl_ops(); 847 } 848 video_set_drvdata(vfd, vpu); 849 850 ret = video_register_device(vfd, VFL_TYPE_VIDEO, -1); 851 if (ret) { 852 video_device_release(vfd); 853 v4l2_m2m_release(func->m2m_dev); 854 return ret; 855 } 856 func->vfd = vfd; 857 858 ret = v4l2_m2m_register_media_controller(func->m2m_dev, func->vfd, func->function); 859 if (ret) { 860 v4l2_m2m_release(func->m2m_dev); 861 func->m2m_dev = NULL; 862 video_unregister_device(func->vfd); 863 func->vfd = NULL; 864 return ret; 865 } 866 867 return 0; 868 } 869 870 void vpu_remove_func(struct vpu_func *func) 871 { 872 if (!func) 873 return; 874 875 if (func->m2m_dev) { 876 v4l2_m2m_unregister_media_controller(func->m2m_dev); 877 v4l2_m2m_release(func->m2m_dev); 878 func->m2m_dev = NULL; 879 } 880 if (func->vfd) { 881 video_unregister_device(func->vfd); 882 func->vfd = NULL; 883 } 884 } 885