1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * vsp1_video.c -- R-Car VSP1 Video Node 4 * 5 * Copyright (C) 2013-2015 Renesas Electronics Corporation 6 * 7 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) 8 */ 9 10 #include <linux/list.h> 11 #include <linux/module.h> 12 #include <linux/mutex.h> 13 #include <linux/slab.h> 14 #include <linux/v4l2-mediabus.h> 15 #include <linux/videodev2.h> 16 #include <linux/wait.h> 17 18 #include <media/media-entity.h> 19 #include <media/v4l2-dev.h> 20 #include <media/v4l2-fh.h> 21 #include <media/v4l2-ioctl.h> 22 #include <media/v4l2-subdev.h> 23 #include <media/videobuf2-v4l2.h> 24 #include <media/videobuf2-dma-contig.h> 25 26 #include "vsp1.h" 27 #include "vsp1_brx.h" 28 #include "vsp1_dl.h" 29 #include "vsp1_entity.h" 30 #include "vsp1_hgo.h" 31 #include "vsp1_hgt.h" 32 #include "vsp1_pipe.h" 33 #include "vsp1_rwpf.h" 34 #include "vsp1_uds.h" 35 #include "vsp1_video.h" 36 37 #define VSP1_VIDEO_DEF_FORMAT V4L2_PIX_FMT_YUYV 38 #define VSP1_VIDEO_DEF_WIDTH 1024 39 #define VSP1_VIDEO_DEF_HEIGHT 768 40 41 #define VSP1_VIDEO_MAX_WIDTH 8190U 42 #define VSP1_VIDEO_MAX_HEIGHT 8190U 43 44 /* ----------------------------------------------------------------------------- 45 * Helper functions 46 */ 47 48 static struct v4l2_subdev * 49 vsp1_video_remote_subdev(struct media_pad *local, u32 *pad) 50 { 51 struct media_pad *remote; 52 53 remote = media_pad_remote_pad_first(local); 54 if (!remote || !is_media_entity_v4l2_subdev(remote->entity)) 55 return NULL; 56 57 if (pad) 58 *pad = remote->index; 59 60 return media_entity_to_v4l2_subdev(remote->entity); 61 } 62 63 static int vsp1_video_verify_format(struct vsp1_video *video) 64 { 65 struct v4l2_subdev_format fmt = { 66 .which = V4L2_SUBDEV_FORMAT_ACTIVE, 67 }; 68 struct v4l2_subdev *subdev; 69 int ret; 70 71 subdev = vsp1_video_remote_subdev(&video->pad, &fmt.pad); 72 if (subdev == NULL) 73 return -EINVAL; 74 75 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt); 76 if (ret < 0) 77 return ret == -ENOIOCTLCMD ? -EINVAL : ret; 78 79 if (video->rwpf->fmtinfo->mbus != fmt.format.code || 80 video->rwpf->format.height != fmt.format.height || 81 video->rwpf->format.width != fmt.format.width) { 82 dev_dbg(video->vsp1->dev, 83 "Format mismatch: 0x%04x/%ux%u != 0x%04x/%ux%u\n", 84 video->rwpf->fmtinfo->mbus, video->rwpf->format.width, 85 video->rwpf->format.height, fmt.format.code, 86 fmt.format.width, fmt.format.height); 87 return -EPIPE; 88 } 89 90 return 0; 91 } 92 93 static int __vsp1_video_try_format(struct vsp1_video *video, 94 struct v4l2_pix_format_mplane *pix, 95 const struct vsp1_format_info **fmtinfo) 96 { 97 static const u32 xrgb_formats[][2] = { 98 { V4L2_PIX_FMT_RGB444, V4L2_PIX_FMT_XRGB444 }, 99 { V4L2_PIX_FMT_RGB555, V4L2_PIX_FMT_XRGB555 }, 100 { V4L2_PIX_FMT_BGR32, V4L2_PIX_FMT_XBGR32 }, 101 { V4L2_PIX_FMT_RGB32, V4L2_PIX_FMT_XRGB32 }, 102 }; 103 104 const struct vsp1_format_info *info; 105 unsigned int width = pix->width; 106 unsigned int height = pix->height; 107 unsigned int i; 108 109 /* 110 * Backward compatibility: replace deprecated RGB formats by their XRGB 111 * equivalent. This selects the format older userspace applications want 112 * while still exposing the new format. 113 */ 114 for (i = 0; i < ARRAY_SIZE(xrgb_formats); ++i) { 115 if (xrgb_formats[i][0] == pix->pixelformat) { 116 pix->pixelformat = xrgb_formats[i][1]; 117 break; 118 } 119 } 120 121 /* 122 * Retrieve format information and select the default format if the 123 * requested format isn't supported. 124 */ 125 info = vsp1_get_format_info(video->vsp1, pix->pixelformat); 126 if (info == NULL) 127 info = vsp1_get_format_info(video->vsp1, VSP1_VIDEO_DEF_FORMAT); 128 129 pix->pixelformat = info->fourcc; 130 pix->field = V4L2_FIELD_NONE; 131 132 /* 133 * Adjust the colour space fields. On capture devices, userspace needs 134 * to set the V4L2_PIX_FMT_FLAG_SET_CSC to override the defaults. Reset 135 * all fields to *_DEFAULT if the flag isn't set, to then handle 136 * capture and output devices in the same way. 137 */ 138 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE && 139 !(pix->flags & V4L2_PIX_FMT_FLAG_SET_CSC)) { 140 pix->colorspace = V4L2_COLORSPACE_DEFAULT; 141 pix->xfer_func = V4L2_XFER_FUNC_DEFAULT; 142 pix->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT; 143 pix->quantization = V4L2_QUANTIZATION_DEFAULT; 144 } 145 146 vsp1_adjust_color_space(info->mbus, &pix->colorspace, &pix->xfer_func, 147 &pix->ycbcr_enc, &pix->quantization); 148 149 memset(pix->reserved, 0, sizeof(pix->reserved)); 150 151 /* Align the width and height for YUV 4:2:2 and 4:2:0 formats. */ 152 width = round_down(width, info->hsub); 153 height = round_down(height, info->vsub); 154 155 /* Clamp the width and height. */ 156 pix->width = clamp(width, info->hsub, VSP1_VIDEO_MAX_WIDTH); 157 pix->height = clamp(height, info->vsub, VSP1_VIDEO_MAX_HEIGHT); 158 159 /* 160 * Compute and clamp the stride and image size. While not documented in 161 * the datasheet, strides not aligned to a multiple of 128 bytes result 162 * in image corruption. 163 */ 164 for (i = 0; i < min(info->planes, 2U); ++i) { 165 unsigned int hsub = i > 0 ? info->hsub : 1; 166 unsigned int vsub = i > 0 ? info->vsub : 1; 167 unsigned int align = 128; 168 unsigned int bpl; 169 170 bpl = clamp_t(unsigned int, pix->plane_fmt[i].bytesperline, 171 pix->width / hsub * info->bpp[i] / 8, 172 round_down(65535U, align)); 173 174 pix->plane_fmt[i].bytesperline = round_up(bpl, align); 175 pix->plane_fmt[i].sizeimage = pix->plane_fmt[i].bytesperline 176 * pix->height / vsub; 177 } 178 179 if (info->planes == 3) { 180 /* The second and third planes must have the same stride. */ 181 pix->plane_fmt[2].bytesperline = pix->plane_fmt[1].bytesperline; 182 pix->plane_fmt[2].sizeimage = pix->plane_fmt[1].sizeimage; 183 } 184 185 pix->num_planes = info->planes; 186 187 if (fmtinfo) 188 *fmtinfo = info; 189 190 return 0; 191 } 192 193 /* ----------------------------------------------------------------------------- 194 * Pipeline Management 195 */ 196 197 /* 198 * vsp1_video_complete_buffer - Complete the current buffer 199 * @video: the video node 200 * 201 * This function completes the current buffer by filling its sequence number, 202 * time stamp and payload size, and hands it back to the vb2 core. 203 * 204 * Return the next queued buffer or NULL if the queue is empty. 205 */ 206 static struct vsp1_vb2_buffer * 207 vsp1_video_complete_buffer(struct vsp1_video *video) 208 { 209 struct vsp1_pipeline *pipe = video->rwpf->entity.pipe; 210 struct vsp1_vb2_buffer *next = NULL; 211 struct vsp1_vb2_buffer *done; 212 unsigned long flags; 213 unsigned int i; 214 215 spin_lock_irqsave(&video->irqlock, flags); 216 217 if (list_empty(&video->irqqueue)) { 218 spin_unlock_irqrestore(&video->irqlock, flags); 219 return NULL; 220 } 221 222 done = list_first_entry(&video->irqqueue, 223 struct vsp1_vb2_buffer, queue); 224 225 list_del(&done->queue); 226 227 if (!list_empty(&video->irqqueue)) 228 next = list_first_entry(&video->irqqueue, 229 struct vsp1_vb2_buffer, queue); 230 231 spin_unlock_irqrestore(&video->irqlock, flags); 232 233 done->buf.sequence = pipe->sequence; 234 done->buf.vb2_buf.timestamp = ktime_get_ns(); 235 for (i = 0; i < done->buf.vb2_buf.num_planes; ++i) 236 vb2_set_plane_payload(&done->buf.vb2_buf, i, 237 vb2_plane_size(&done->buf.vb2_buf, i)); 238 vb2_buffer_done(&done->buf.vb2_buf, VB2_BUF_STATE_DONE); 239 240 return next; 241 } 242 243 static void vsp1_video_frame_end(struct vsp1_pipeline *pipe, 244 struct vsp1_rwpf *rwpf) 245 { 246 struct vsp1_video *video = rwpf->video; 247 struct vsp1_vb2_buffer *buf; 248 249 buf = vsp1_video_complete_buffer(video); 250 if (buf == NULL) 251 return; 252 253 video->rwpf->mem = buf->mem; 254 pipe->buffers_ready |= 1 << video->pipe_index; 255 } 256 257 static void vsp1_video_pipeline_run_partition(struct vsp1_pipeline *pipe, 258 struct vsp1_dl_list *dl, 259 unsigned int partition) 260 { 261 struct vsp1_partition *part = &pipe->part_table[partition]; 262 struct vsp1_dl_body *dlb = vsp1_dl_list_get_body0(dl); 263 struct vsp1_entity *entity; 264 265 list_for_each_entry(entity, &pipe->entities, list_pipe) 266 vsp1_entity_configure_partition(entity, pipe, part, dl, dlb); 267 } 268 269 static void vsp1_video_pipeline_run(struct vsp1_pipeline *pipe) 270 { 271 struct vsp1_device *vsp1 = pipe->output->entity.vsp1; 272 struct vsp1_entity *entity; 273 struct vsp1_dl_body *dlb; 274 struct vsp1_dl_list *dl; 275 unsigned int partition; 276 277 dl = vsp1_dl_list_get(pipe->output->dlm); 278 279 /* 280 * If the VSP hardware isn't configured yet (which occurs either when 281 * processing the first frame or after a system suspend/resume), add the 282 * cached stream configuration to the display list to perform a full 283 * initialisation. 284 */ 285 if (!pipe->configured) 286 vsp1_dl_list_add_body(dl, pipe->stream_config); 287 288 dlb = vsp1_dl_list_get_body0(dl); 289 290 list_for_each_entry(entity, &pipe->entities, list_pipe) 291 vsp1_entity_configure_frame(entity, pipe, dl, dlb); 292 293 /* Run the first partition. */ 294 vsp1_video_pipeline_run_partition(pipe, dl, 0); 295 296 /* Process consecutive partitions as necessary. */ 297 for (partition = 1; partition < pipe->partitions; ++partition) { 298 struct vsp1_dl_list *dl_next; 299 300 dl_next = vsp1_dl_list_get(pipe->output->dlm); 301 302 /* 303 * An incomplete chain will still function, but output only 304 * the partitions that had a dl available. The frame end 305 * interrupt will be marked on the last dl in the chain. 306 */ 307 if (!dl_next) { 308 dev_err(vsp1->dev, "Failed to obtain a dl list. Frame will be incomplete\n"); 309 break; 310 } 311 312 vsp1_video_pipeline_run_partition(pipe, dl_next, partition); 313 vsp1_dl_list_add_chain(dl, dl_next); 314 } 315 316 /* Complete, and commit the head display list. */ 317 vsp1_dl_list_commit(dl, 0); 318 pipe->configured = true; 319 320 vsp1_pipeline_run(pipe); 321 } 322 323 static void vsp1_video_pipeline_frame_end(struct vsp1_pipeline *pipe, 324 unsigned int completion) 325 { 326 struct vsp1_device *vsp1 = pipe->output->entity.vsp1; 327 enum vsp1_pipeline_state state; 328 unsigned long flags; 329 unsigned int i; 330 331 /* M2M Pipelines should never call here with an incomplete frame. */ 332 WARN_ON_ONCE(!(completion & VSP1_DL_FRAME_END_COMPLETED)); 333 334 spin_lock_irqsave(&pipe->irqlock, flags); 335 336 /* Complete buffers on all video nodes. */ 337 for (i = 0; i < vsp1->info->rpf_count; ++i) { 338 if (!pipe->inputs[i]) 339 continue; 340 341 vsp1_video_frame_end(pipe, pipe->inputs[i]); 342 } 343 344 vsp1_video_frame_end(pipe, pipe->output); 345 346 state = pipe->state; 347 pipe->state = VSP1_PIPELINE_STOPPED; 348 349 /* 350 * If a stop has been requested, mark the pipeline as stopped and 351 * return. Otherwise restart the pipeline if ready. 352 */ 353 if (state == VSP1_PIPELINE_STOPPING) 354 wake_up(&pipe->wq); 355 else if (vsp1_pipeline_ready(pipe)) 356 vsp1_video_pipeline_run(pipe); 357 358 spin_unlock_irqrestore(&pipe->irqlock, flags); 359 } 360 361 static int vsp1_video_pipeline_build_branch(struct vsp1_pipeline *pipe, 362 struct vsp1_rwpf *input, 363 struct vsp1_rwpf *output) 364 { 365 struct media_entity_enum ent_enum; 366 struct vsp1_entity *entity; 367 struct media_pad *pad; 368 struct vsp1_brx *brx = NULL; 369 int ret; 370 371 ret = media_entity_enum_init(&ent_enum, &input->entity.vsp1->media_dev); 372 if (ret < 0) 373 return ret; 374 375 /* 376 * The main data path doesn't include the HGO or HGT, use 377 * vsp1_entity_remote_pad() to traverse the graph. 378 */ 379 380 pad = vsp1_entity_remote_pad(&input->entity.pads[RWPF_PAD_SOURCE]); 381 382 while (1) { 383 if (pad == NULL) { 384 ret = -EPIPE; 385 goto out; 386 } 387 388 /* We've reached a video node, that shouldn't have happened. */ 389 if (!is_media_entity_v4l2_subdev(pad->entity)) { 390 ret = -EPIPE; 391 goto out; 392 } 393 394 entity = to_vsp1_entity( 395 media_entity_to_v4l2_subdev(pad->entity)); 396 397 /* 398 * A BRU or BRS is present in the pipeline, store its input pad 399 * number in the input RPF for use when configuring the RPF. 400 */ 401 if (entity->type == VSP1_ENTITY_BRU || 402 entity->type == VSP1_ENTITY_BRS) { 403 /* BRU and BRS can't be chained. */ 404 if (brx) { 405 ret = -EPIPE; 406 goto out; 407 } 408 409 brx = to_brx(&entity->subdev); 410 brx->inputs[pad->index].rpf = input; 411 input->brx_input = pad->index; 412 } 413 414 /* We've reached the WPF, we're done. */ 415 if (entity->type == VSP1_ENTITY_WPF) 416 break; 417 418 /* Ensure the branch has no loop. */ 419 if (media_entity_enum_test_and_set(&ent_enum, 420 &entity->subdev.entity)) { 421 ret = -EPIPE; 422 goto out; 423 } 424 425 /* UDS can't be chained. */ 426 if (entity->type == VSP1_ENTITY_UDS) { 427 if (pipe->uds) { 428 ret = -EPIPE; 429 goto out; 430 } 431 432 pipe->uds = entity; 433 pipe->uds_input = brx ? &brx->entity : &input->entity; 434 } 435 436 /* Follow the source link, ignoring any HGO or HGT. */ 437 pad = &entity->pads[entity->source_pad]; 438 pad = vsp1_entity_remote_pad(pad); 439 } 440 441 /* The last entity must be the output WPF. */ 442 if (entity != &output->entity) 443 ret = -EPIPE; 444 445 out: 446 media_entity_enum_cleanup(&ent_enum); 447 448 return ret; 449 } 450 451 static int vsp1_video_pipeline_build(struct vsp1_pipeline *pipe, 452 struct vsp1_video *video) 453 { 454 struct media_graph graph; 455 struct media_entity *entity = &video->video.entity; 456 struct media_device *mdev = entity->graph_obj.mdev; 457 unsigned int i; 458 int ret; 459 460 /* Walk the graph to locate the entities and video nodes. */ 461 ret = media_graph_walk_init(&graph, mdev); 462 if (ret) 463 return ret; 464 465 media_graph_walk_start(&graph, entity); 466 467 while ((entity = media_graph_walk_next(&graph))) { 468 struct v4l2_subdev *subdev; 469 struct vsp1_rwpf *rwpf; 470 struct vsp1_entity *e; 471 472 if (!is_media_entity_v4l2_subdev(entity)) 473 continue; 474 475 subdev = media_entity_to_v4l2_subdev(entity); 476 e = to_vsp1_entity(subdev); 477 list_add_tail(&e->list_pipe, &pipe->entities); 478 e->pipe = pipe; 479 480 switch (e->type) { 481 case VSP1_ENTITY_RPF: 482 rwpf = to_rwpf(subdev); 483 pipe->inputs[rwpf->entity.index] = rwpf; 484 rwpf->video->pipe_index = ++pipe->num_inputs; 485 break; 486 487 case VSP1_ENTITY_WPF: 488 rwpf = to_rwpf(subdev); 489 pipe->output = rwpf; 490 rwpf->video->pipe_index = 0; 491 break; 492 493 case VSP1_ENTITY_LIF: 494 pipe->lif = e; 495 break; 496 497 case VSP1_ENTITY_BRU: 498 case VSP1_ENTITY_BRS: 499 pipe->brx = e; 500 break; 501 502 case VSP1_ENTITY_HGO: 503 pipe->hgo = e; 504 break; 505 506 case VSP1_ENTITY_HGT: 507 pipe->hgt = e; 508 break; 509 510 default: 511 break; 512 } 513 } 514 515 media_graph_walk_cleanup(&graph); 516 517 /* We need one output and at least one input. */ 518 if (pipe->num_inputs == 0 || !pipe->output) 519 return -EPIPE; 520 521 /* 522 * Follow links downstream for each input and make sure the graph 523 * contains no loop and that all branches end at the output WPF. 524 */ 525 for (i = 0; i < video->vsp1->info->rpf_count; ++i) { 526 if (!pipe->inputs[i]) 527 continue; 528 529 ret = vsp1_video_pipeline_build_branch(pipe, pipe->inputs[i], 530 pipe->output); 531 if (ret < 0) 532 return ret; 533 } 534 535 return 0; 536 } 537 538 static int vsp1_video_pipeline_init(struct vsp1_pipeline *pipe, 539 struct vsp1_video *video) 540 { 541 int ret; 542 543 vsp1_pipeline_init(pipe); 544 545 pipe->frame_end = vsp1_video_pipeline_frame_end; 546 547 ret = vsp1_video_pipeline_build(pipe, video); 548 if (ret) 549 return ret; 550 551 vsp1_pipeline_dump(pipe, "video"); 552 553 return 0; 554 } 555 556 static struct vsp1_pipeline *vsp1_video_pipeline_get(struct vsp1_video *video) 557 { 558 struct vsp1_pipeline *pipe; 559 int ret; 560 561 /* 562 * Get a pipeline object for the video node. If a pipeline has already 563 * been allocated just increment its reference count and return it. 564 * Otherwise allocate a new pipeline and initialize it, it will be freed 565 * when the last reference is released. 566 */ 567 if (!video->rwpf->entity.pipe) { 568 pipe = kzalloc(sizeof(*pipe), GFP_KERNEL); 569 if (!pipe) 570 return ERR_PTR(-ENOMEM); 571 572 ret = vsp1_video_pipeline_init(pipe, video); 573 if (ret < 0) { 574 vsp1_pipeline_reset(pipe); 575 kfree(pipe); 576 return ERR_PTR(ret); 577 } 578 } else { 579 pipe = video->rwpf->entity.pipe; 580 kref_get(&pipe->kref); 581 } 582 583 return pipe; 584 } 585 586 static void vsp1_video_pipeline_release(struct kref *kref) 587 { 588 struct vsp1_pipeline *pipe = container_of(kref, typeof(*pipe), kref); 589 590 vsp1_pipeline_reset(pipe); 591 kfree(pipe); 592 } 593 594 static void vsp1_video_pipeline_put(struct vsp1_pipeline *pipe) 595 { 596 struct media_device *mdev = &pipe->output->entity.vsp1->media_dev; 597 598 mutex_lock(&mdev->graph_mutex); 599 kref_put(&pipe->kref, vsp1_video_pipeline_release); 600 mutex_unlock(&mdev->graph_mutex); 601 } 602 603 /* ----------------------------------------------------------------------------- 604 * videobuf2 Queue Operations 605 */ 606 607 static int 608 vsp1_video_queue_setup(struct vb2_queue *vq, 609 unsigned int *nbuffers, unsigned int *nplanes, 610 unsigned int sizes[], struct device *alloc_devs[]) 611 { 612 struct vsp1_video *video = vb2_get_drv_priv(vq); 613 const struct v4l2_pix_format_mplane *format = &video->rwpf->format; 614 unsigned int i; 615 616 if (*nplanes) { 617 if (*nplanes != format->num_planes) 618 return -EINVAL; 619 620 for (i = 0; i < *nplanes; i++) 621 if (sizes[i] < format->plane_fmt[i].sizeimage) 622 return -EINVAL; 623 return 0; 624 } 625 626 *nplanes = format->num_planes; 627 628 for (i = 0; i < format->num_planes; ++i) 629 sizes[i] = format->plane_fmt[i].sizeimage; 630 631 return 0; 632 } 633 634 static int vsp1_video_buffer_prepare(struct vb2_buffer *vb) 635 { 636 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); 637 struct vsp1_video *video = vb2_get_drv_priv(vb->vb2_queue); 638 struct vsp1_vb2_buffer *buf = to_vsp1_vb2_buffer(vbuf); 639 const struct v4l2_pix_format_mplane *format = &video->rwpf->format; 640 unsigned int i; 641 642 if (vb->num_planes < format->num_planes) 643 return -EINVAL; 644 645 for (i = 0; i < vb->num_planes; ++i) { 646 buf->mem.addr[i] = vb2_dma_contig_plane_dma_addr(vb, i); 647 648 if (vb2_plane_size(vb, i) < format->plane_fmt[i].sizeimage) 649 return -EINVAL; 650 } 651 652 for ( ; i < 3; ++i) 653 buf->mem.addr[i] = 0; 654 655 return 0; 656 } 657 658 static void vsp1_video_buffer_queue(struct vb2_buffer *vb) 659 { 660 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); 661 struct vsp1_video *video = vb2_get_drv_priv(vb->vb2_queue); 662 struct vsp1_pipeline *pipe = video->rwpf->entity.pipe; 663 struct vsp1_vb2_buffer *buf = to_vsp1_vb2_buffer(vbuf); 664 unsigned long flags; 665 bool empty; 666 667 spin_lock_irqsave(&video->irqlock, flags); 668 empty = list_empty(&video->irqqueue); 669 list_add_tail(&buf->queue, &video->irqqueue); 670 spin_unlock_irqrestore(&video->irqlock, flags); 671 672 if (!empty) 673 return; 674 675 spin_lock_irqsave(&pipe->irqlock, flags); 676 677 video->rwpf->mem = buf->mem; 678 pipe->buffers_ready |= 1 << video->pipe_index; 679 680 if (vb2_start_streaming_called(&video->queue) && 681 vsp1_pipeline_ready(pipe)) 682 vsp1_video_pipeline_run(pipe); 683 684 spin_unlock_irqrestore(&pipe->irqlock, flags); 685 } 686 687 static int vsp1_video_pipeline_setup_partitions(struct vsp1_pipeline *pipe) 688 { 689 struct vsp1_device *vsp1 = pipe->output->entity.vsp1; 690 const struct v4l2_mbus_framefmt *format; 691 struct vsp1_entity *entity; 692 unsigned int div_size; 693 unsigned int i; 694 695 /* 696 * Partitions are computed on the size before rotation, use the format 697 * at the WPF sink. 698 */ 699 format = v4l2_subdev_state_get_format(pipe->output->entity.state, 700 RWPF_PAD_SINK); 701 div_size = format->width; 702 703 /* 704 * Only Gen3+ hardware requires image partitioning, Gen2 will operate 705 * with a single partition that covers the whole output. 706 */ 707 if (vsp1->info->gen >= 3) { 708 list_for_each_entry(entity, &pipe->entities, list_pipe) { 709 unsigned int entity_max; 710 711 if (!entity->ops->max_width) 712 continue; 713 714 entity_max = entity->ops->max_width(entity, 715 entity->state, 716 pipe); 717 if (entity_max) 718 div_size = min(div_size, entity_max); 719 } 720 } 721 722 pipe->partitions = DIV_ROUND_UP(format->width, div_size); 723 pipe->part_table = kcalloc(pipe->partitions, sizeof(*pipe->part_table), 724 GFP_KERNEL); 725 if (!pipe->part_table) 726 return -ENOMEM; 727 728 for (i = 0; i < pipe->partitions; ++i) 729 vsp1_pipeline_calculate_partition(pipe, &pipe->part_table[i], 730 div_size, i); 731 732 return 0; 733 } 734 735 static int vsp1_video_setup_pipeline(struct vsp1_pipeline *pipe) 736 { 737 struct vsp1_entity *entity; 738 int ret; 739 740 /* Determine this pipelines sizes for image partitioning support. */ 741 ret = vsp1_video_pipeline_setup_partitions(pipe); 742 if (ret < 0) 743 return ret; 744 745 if (pipe->uds) { 746 struct vsp1_uds *uds = to_uds(&pipe->uds->subdev); 747 748 /* 749 * If a BRU or BRS is present in the pipeline before the UDS, 750 * the alpha component doesn't need to be scaled as the BRU and 751 * BRS output alpha value is fixed to 255. Otherwise we need to 752 * scale the alpha component only when available at the input 753 * RPF. 754 */ 755 if (pipe->uds_input->type == VSP1_ENTITY_BRU || 756 pipe->uds_input->type == VSP1_ENTITY_BRS) { 757 uds->scale_alpha = false; 758 } else { 759 struct vsp1_rwpf *rpf = 760 to_rwpf(&pipe->uds_input->subdev); 761 762 uds->scale_alpha = rpf->fmtinfo->alpha; 763 } 764 } 765 766 /* 767 * Compute and cache the stream configuration into a body. The cached 768 * body will be added to the display list by vsp1_video_pipeline_run() 769 * whenever the pipeline needs to be fully reconfigured. 770 */ 771 pipe->stream_config = vsp1_dlm_dl_body_get(pipe->output->dlm); 772 if (!pipe->stream_config) 773 return -ENOMEM; 774 775 list_for_each_entry(entity, &pipe->entities, list_pipe) { 776 vsp1_entity_route_setup(entity, pipe, pipe->stream_config); 777 vsp1_entity_configure_stream(entity, entity->state, pipe, NULL, 778 pipe->stream_config); 779 } 780 781 return 0; 782 } 783 784 static void vsp1_video_release_buffers(struct vsp1_video *video) 785 { 786 struct vsp1_vb2_buffer *buffer; 787 unsigned long flags; 788 789 /* Remove all buffers from the IRQ queue. */ 790 spin_lock_irqsave(&video->irqlock, flags); 791 list_for_each_entry(buffer, &video->irqqueue, queue) 792 vb2_buffer_done(&buffer->buf.vb2_buf, VB2_BUF_STATE_ERROR); 793 INIT_LIST_HEAD(&video->irqqueue); 794 spin_unlock_irqrestore(&video->irqlock, flags); 795 } 796 797 static void vsp1_video_cleanup_pipeline(struct vsp1_pipeline *pipe) 798 { 799 lockdep_assert_held(&pipe->lock); 800 801 /* Release any cached configuration from our output video. */ 802 vsp1_dl_body_put(pipe->stream_config); 803 pipe->stream_config = NULL; 804 pipe->configured = false; 805 806 /* Release our partition table allocation. */ 807 kfree(pipe->part_table); 808 pipe->part_table = NULL; 809 } 810 811 static int vsp1_video_start_streaming(struct vb2_queue *vq, unsigned int count) 812 { 813 struct vsp1_video *video = vb2_get_drv_priv(vq); 814 struct vsp1_pipeline *pipe = video->rwpf->entity.pipe; 815 bool start_pipeline = false; 816 unsigned long flags; 817 int ret; 818 819 mutex_lock(&pipe->lock); 820 if (pipe->stream_count == pipe->num_inputs) { 821 ret = vsp1_video_setup_pipeline(pipe); 822 if (ret < 0) { 823 vsp1_video_release_buffers(video); 824 vsp1_video_cleanup_pipeline(pipe); 825 mutex_unlock(&pipe->lock); 826 return ret; 827 } 828 829 start_pipeline = true; 830 } 831 832 pipe->stream_count++; 833 mutex_unlock(&pipe->lock); 834 835 /* 836 * vsp1_pipeline_ready() is not sufficient to establish that all streams 837 * are prepared and the pipeline is configured, as multiple streams 838 * can race through streamon with buffers already queued; Therefore we 839 * don't even attempt to start the pipeline until the last stream has 840 * called through here. 841 */ 842 if (!start_pipeline) 843 return 0; 844 845 spin_lock_irqsave(&pipe->irqlock, flags); 846 if (vsp1_pipeline_ready(pipe)) 847 vsp1_video_pipeline_run(pipe); 848 spin_unlock_irqrestore(&pipe->irqlock, flags); 849 850 return 0; 851 } 852 853 static void vsp1_video_stop_streaming(struct vb2_queue *vq) 854 { 855 struct vsp1_video *video = vb2_get_drv_priv(vq); 856 struct vsp1_pipeline *pipe = video->rwpf->entity.pipe; 857 unsigned long flags; 858 int ret; 859 860 /* 861 * Clear the buffers ready flag to make sure the device won't be started 862 * by a QBUF on the video node on the other side of the pipeline. 863 */ 864 spin_lock_irqsave(&video->irqlock, flags); 865 pipe->buffers_ready &= ~(1 << video->pipe_index); 866 spin_unlock_irqrestore(&video->irqlock, flags); 867 868 mutex_lock(&pipe->lock); 869 if (--pipe->stream_count == pipe->num_inputs) { 870 /* Stop the pipeline. */ 871 ret = vsp1_pipeline_stop(pipe); 872 if (ret == -ETIMEDOUT) 873 dev_err(video->vsp1->dev, "pipeline stop timeout\n"); 874 875 vsp1_video_cleanup_pipeline(pipe); 876 } 877 mutex_unlock(&pipe->lock); 878 879 video_device_pipeline_stop(&video->video); 880 vsp1_video_release_buffers(video); 881 vsp1_video_pipeline_put(pipe); 882 } 883 884 static const struct vb2_ops vsp1_video_queue_qops = { 885 .queue_setup = vsp1_video_queue_setup, 886 .buf_prepare = vsp1_video_buffer_prepare, 887 .buf_queue = vsp1_video_buffer_queue, 888 .start_streaming = vsp1_video_start_streaming, 889 .stop_streaming = vsp1_video_stop_streaming, 890 }; 891 892 /* ----------------------------------------------------------------------------- 893 * V4L2 ioctls 894 */ 895 896 static int 897 vsp1_video_querycap(struct file *file, void *fh, struct v4l2_capability *cap) 898 { 899 struct v4l2_fh *vfh = file->private_data; 900 struct vsp1_video *video = to_vsp1_video(vfh->vdev); 901 902 cap->capabilities = V4L2_CAP_DEVICE_CAPS | V4L2_CAP_STREAMING 903 | V4L2_CAP_IO_MC | V4L2_CAP_VIDEO_CAPTURE_MPLANE 904 | V4L2_CAP_VIDEO_OUTPUT_MPLANE; 905 906 strscpy(cap->driver, "vsp1", sizeof(cap->driver)); 907 strscpy(cap->card, video->video.name, sizeof(cap->card)); 908 909 return 0; 910 } 911 912 static int vsp1_video_enum_format(struct file *file, void *fh, 913 struct v4l2_fmtdesc *f) 914 { 915 struct v4l2_fh *vfh = file->private_data; 916 struct vsp1_video *video = to_vsp1_video(vfh->vdev); 917 const struct vsp1_format_info *info; 918 919 info = vsp1_get_format_info_by_index(video->vsp1, f->index, f->mbus_code); 920 if (!info) 921 return -EINVAL; 922 923 f->pixelformat = info->fourcc; 924 925 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE && 926 info->mbus == MEDIA_BUS_FMT_AYUV8_1X32) 927 f->flags = V4L2_FMT_FLAG_CSC_YCBCR_ENC 928 | V4L2_FMT_FLAG_CSC_QUANTIZATION; 929 930 return 0; 931 } 932 933 static int 934 vsp1_video_get_format(struct file *file, void *fh, struct v4l2_format *format) 935 { 936 struct v4l2_fh *vfh = file->private_data; 937 struct vsp1_video *video = to_vsp1_video(vfh->vdev); 938 939 if (format->type != video->queue.type) 940 return -EINVAL; 941 942 mutex_lock(&video->lock); 943 format->fmt.pix_mp = video->rwpf->format; 944 mutex_unlock(&video->lock); 945 946 return 0; 947 } 948 949 static int 950 vsp1_video_try_format(struct file *file, void *fh, struct v4l2_format *format) 951 { 952 struct v4l2_fh *vfh = file->private_data; 953 struct vsp1_video *video = to_vsp1_video(vfh->vdev); 954 955 if (format->type != video->queue.type) 956 return -EINVAL; 957 958 return __vsp1_video_try_format(video, &format->fmt.pix_mp, NULL); 959 } 960 961 static int 962 vsp1_video_set_format(struct file *file, void *fh, struct v4l2_format *format) 963 { 964 struct v4l2_fh *vfh = file->private_data; 965 struct vsp1_video *video = to_vsp1_video(vfh->vdev); 966 const struct vsp1_format_info *info; 967 int ret; 968 969 if (format->type != video->queue.type) 970 return -EINVAL; 971 972 ret = __vsp1_video_try_format(video, &format->fmt.pix_mp, &info); 973 if (ret < 0) 974 return ret; 975 976 mutex_lock(&video->lock); 977 978 if (vb2_is_busy(&video->queue)) { 979 ret = -EBUSY; 980 goto done; 981 } 982 983 video->rwpf->format = format->fmt.pix_mp; 984 video->rwpf->fmtinfo = info; 985 986 done: 987 mutex_unlock(&video->lock); 988 return ret; 989 } 990 991 static int 992 vsp1_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type) 993 { 994 struct v4l2_fh *vfh = file->private_data; 995 struct vsp1_video *video = to_vsp1_video(vfh->vdev); 996 struct media_device *mdev = &video->vsp1->media_dev; 997 struct vsp1_pipeline *pipe; 998 int ret; 999 1000 if (vb2_queue_is_busy(&video->queue, file)) 1001 return -EBUSY; 1002 1003 /* 1004 * Get a pipeline for the video node and start streaming on it. No link 1005 * touching an entity in the pipeline can be activated or deactivated 1006 * once streaming is started. 1007 */ 1008 mutex_lock(&mdev->graph_mutex); 1009 1010 pipe = vsp1_video_pipeline_get(video); 1011 if (IS_ERR(pipe)) { 1012 mutex_unlock(&mdev->graph_mutex); 1013 return PTR_ERR(pipe); 1014 } 1015 1016 ret = __video_device_pipeline_start(&video->video, &pipe->pipe); 1017 if (ret < 0) { 1018 mutex_unlock(&mdev->graph_mutex); 1019 goto err_pipe; 1020 } 1021 1022 mutex_unlock(&mdev->graph_mutex); 1023 1024 /* 1025 * Verify that the configured format matches the output of the connected 1026 * subdev. 1027 */ 1028 ret = vsp1_video_verify_format(video); 1029 if (ret < 0) 1030 goto err_stop; 1031 1032 /* Start the queue. */ 1033 ret = vb2_streamon(&video->queue, type); 1034 if (ret < 0) 1035 goto err_stop; 1036 1037 return 0; 1038 1039 err_stop: 1040 video_device_pipeline_stop(&video->video); 1041 err_pipe: 1042 vsp1_video_pipeline_put(pipe); 1043 return ret; 1044 } 1045 1046 static const struct v4l2_ioctl_ops vsp1_video_ioctl_ops = { 1047 .vidioc_querycap = vsp1_video_querycap, 1048 .vidioc_enum_fmt_vid_cap = vsp1_video_enum_format, 1049 .vidioc_enum_fmt_vid_out = vsp1_video_enum_format, 1050 .vidioc_g_fmt_vid_cap_mplane = vsp1_video_get_format, 1051 .vidioc_s_fmt_vid_cap_mplane = vsp1_video_set_format, 1052 .vidioc_try_fmt_vid_cap_mplane = vsp1_video_try_format, 1053 .vidioc_g_fmt_vid_out_mplane = vsp1_video_get_format, 1054 .vidioc_s_fmt_vid_out_mplane = vsp1_video_set_format, 1055 .vidioc_try_fmt_vid_out_mplane = vsp1_video_try_format, 1056 .vidioc_reqbufs = vb2_ioctl_reqbufs, 1057 .vidioc_querybuf = vb2_ioctl_querybuf, 1058 .vidioc_qbuf = vb2_ioctl_qbuf, 1059 .vidioc_dqbuf = vb2_ioctl_dqbuf, 1060 .vidioc_expbuf = vb2_ioctl_expbuf, 1061 .vidioc_create_bufs = vb2_ioctl_create_bufs, 1062 .vidioc_prepare_buf = vb2_ioctl_prepare_buf, 1063 .vidioc_streamon = vsp1_video_streamon, 1064 .vidioc_streamoff = vb2_ioctl_streamoff, 1065 }; 1066 1067 /* ----------------------------------------------------------------------------- 1068 * V4L2 File Operations 1069 */ 1070 1071 static int vsp1_video_open(struct file *file) 1072 { 1073 struct vsp1_video *video = video_drvdata(file); 1074 struct v4l2_fh *vfh; 1075 int ret = 0; 1076 1077 vfh = kzalloc(sizeof(*vfh), GFP_KERNEL); 1078 if (vfh == NULL) 1079 return -ENOMEM; 1080 1081 v4l2_fh_init(vfh, &video->video); 1082 v4l2_fh_add(vfh); 1083 1084 file->private_data = vfh; 1085 1086 ret = vsp1_device_get(video->vsp1); 1087 if (ret < 0) { 1088 v4l2_fh_del(vfh); 1089 v4l2_fh_exit(vfh); 1090 kfree(vfh); 1091 } 1092 1093 return ret; 1094 } 1095 1096 static int vsp1_video_release(struct file *file) 1097 { 1098 struct vsp1_video *video = video_drvdata(file); 1099 1100 vb2_fop_release(file); 1101 1102 vsp1_device_put(video->vsp1); 1103 1104 return 0; 1105 } 1106 1107 static const struct v4l2_file_operations vsp1_video_fops = { 1108 .owner = THIS_MODULE, 1109 .unlocked_ioctl = video_ioctl2, 1110 .open = vsp1_video_open, 1111 .release = vsp1_video_release, 1112 .poll = vb2_fop_poll, 1113 .mmap = vb2_fop_mmap, 1114 }; 1115 1116 /* ----------------------------------------------------------------------------- 1117 * Media entity operations 1118 */ 1119 1120 static int vsp1_video_link_validate(struct media_link *link) 1121 { 1122 /* 1123 * Ideally, link validation should be implemented here instead of 1124 * calling vsp1_video_verify_format() in vsp1_video_streamon() 1125 * manually. That would however break userspace that start one video 1126 * device before configures formats on other video devices in the 1127 * pipeline. This operation is just a no-op to silence the warnings 1128 * from v4l2_subdev_link_validate(). 1129 */ 1130 return 0; 1131 } 1132 1133 static const struct media_entity_operations vsp1_video_media_ops = { 1134 .link_validate = vsp1_video_link_validate, 1135 }; 1136 1137 /* ----------------------------------------------------------------------------- 1138 * Suspend and Resume 1139 */ 1140 1141 void vsp1_video_suspend(struct vsp1_device *vsp1) 1142 { 1143 unsigned long flags; 1144 unsigned int i; 1145 int ret; 1146 1147 /* 1148 * To avoid increasing the system suspend time needlessly, loop over the 1149 * pipelines twice, first to set them all to the stopping state, and 1150 * then to wait for the stop to complete. 1151 */ 1152 for (i = 0; i < vsp1->info->wpf_count; ++i) { 1153 struct vsp1_rwpf *wpf = vsp1->wpf[i]; 1154 struct vsp1_pipeline *pipe; 1155 1156 if (wpf == NULL) 1157 continue; 1158 1159 pipe = wpf->entity.pipe; 1160 if (pipe == NULL) 1161 continue; 1162 1163 spin_lock_irqsave(&pipe->irqlock, flags); 1164 if (pipe->state == VSP1_PIPELINE_RUNNING) 1165 pipe->state = VSP1_PIPELINE_STOPPING; 1166 spin_unlock_irqrestore(&pipe->irqlock, flags); 1167 } 1168 1169 for (i = 0; i < vsp1->info->wpf_count; ++i) { 1170 struct vsp1_rwpf *wpf = vsp1->wpf[i]; 1171 struct vsp1_pipeline *pipe; 1172 1173 if (wpf == NULL) 1174 continue; 1175 1176 pipe = wpf->entity.pipe; 1177 if (pipe == NULL) 1178 continue; 1179 1180 ret = wait_event_timeout(pipe->wq, vsp1_pipeline_stopped(pipe), 1181 msecs_to_jiffies(500)); 1182 if (ret == 0) 1183 dev_warn(vsp1->dev, "pipeline %u stop timeout\n", 1184 wpf->entity.index); 1185 } 1186 } 1187 1188 void vsp1_video_resume(struct vsp1_device *vsp1) 1189 { 1190 unsigned long flags; 1191 unsigned int i; 1192 1193 /* Resume all running pipelines. */ 1194 for (i = 0; i < vsp1->info->wpf_count; ++i) { 1195 struct vsp1_rwpf *wpf = vsp1->wpf[i]; 1196 struct vsp1_pipeline *pipe; 1197 1198 if (wpf == NULL) 1199 continue; 1200 1201 pipe = wpf->entity.pipe; 1202 if (pipe == NULL) 1203 continue; 1204 1205 /* 1206 * The hardware may have been reset during a suspend and will 1207 * need a full reconfiguration. 1208 */ 1209 pipe->configured = false; 1210 1211 spin_lock_irqsave(&pipe->irqlock, flags); 1212 if (vsp1_pipeline_ready(pipe)) 1213 vsp1_video_pipeline_run(pipe); 1214 spin_unlock_irqrestore(&pipe->irqlock, flags); 1215 } 1216 } 1217 1218 /* ----------------------------------------------------------------------------- 1219 * Initialization and Cleanup 1220 */ 1221 1222 struct vsp1_video *vsp1_video_create(struct vsp1_device *vsp1, 1223 struct vsp1_rwpf *rwpf) 1224 { 1225 struct vsp1_video *video; 1226 const char *direction; 1227 int ret; 1228 1229 video = devm_kzalloc(vsp1->dev, sizeof(*video), GFP_KERNEL); 1230 if (!video) 1231 return ERR_PTR(-ENOMEM); 1232 1233 rwpf->video = video; 1234 1235 video->vsp1 = vsp1; 1236 video->rwpf = rwpf; 1237 1238 if (rwpf->entity.type == VSP1_ENTITY_RPF) { 1239 direction = "input"; 1240 video->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; 1241 video->pad.flags = MEDIA_PAD_FL_SOURCE; 1242 video->video.vfl_dir = VFL_DIR_TX; 1243 video->video.device_caps = V4L2_CAP_VIDEO_OUTPUT_MPLANE | 1244 V4L2_CAP_STREAMING | V4L2_CAP_IO_MC; 1245 } else { 1246 direction = "output"; 1247 video->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; 1248 video->pad.flags = MEDIA_PAD_FL_SINK; 1249 video->video.vfl_dir = VFL_DIR_RX; 1250 video->video.device_caps = V4L2_CAP_VIDEO_CAPTURE_MPLANE | 1251 V4L2_CAP_STREAMING | V4L2_CAP_IO_MC; 1252 } 1253 1254 mutex_init(&video->lock); 1255 spin_lock_init(&video->irqlock); 1256 INIT_LIST_HEAD(&video->irqqueue); 1257 1258 /* Initialize the media entity... */ 1259 ret = media_entity_pads_init(&video->video.entity, 1, &video->pad); 1260 if (ret < 0) 1261 return ERR_PTR(ret); 1262 1263 /* ... and the format ... */ 1264 rwpf->format.pixelformat = VSP1_VIDEO_DEF_FORMAT; 1265 rwpf->format.width = VSP1_VIDEO_DEF_WIDTH; 1266 rwpf->format.height = VSP1_VIDEO_DEF_HEIGHT; 1267 __vsp1_video_try_format(video, &rwpf->format, &rwpf->fmtinfo); 1268 1269 /* ... and the video node... */ 1270 video->video.v4l2_dev = &video->vsp1->v4l2_dev; 1271 video->video.entity.ops = &vsp1_video_media_ops; 1272 video->video.fops = &vsp1_video_fops; 1273 snprintf(video->video.name, sizeof(video->video.name), "%s %s", 1274 rwpf->entity.subdev.name, direction); 1275 video->video.vfl_type = VFL_TYPE_VIDEO; 1276 video->video.release = video_device_release_empty; 1277 video->video.ioctl_ops = &vsp1_video_ioctl_ops; 1278 1279 video_set_drvdata(&video->video, video); 1280 1281 video->queue.type = video->type; 1282 video->queue.io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF; 1283 video->queue.lock = &video->lock; 1284 video->queue.drv_priv = video; 1285 video->queue.buf_struct_size = sizeof(struct vsp1_vb2_buffer); 1286 video->queue.ops = &vsp1_video_queue_qops; 1287 video->queue.mem_ops = &vb2_dma_contig_memops; 1288 video->queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY; 1289 video->queue.dev = video->vsp1->bus_master; 1290 ret = vb2_queue_init(&video->queue); 1291 if (ret < 0) { 1292 dev_err(video->vsp1->dev, "failed to initialize vb2 queue\n"); 1293 goto error; 1294 } 1295 1296 /* ... and register the video device. */ 1297 video->video.queue = &video->queue; 1298 ret = video_register_device(&video->video, VFL_TYPE_VIDEO, -1); 1299 if (ret < 0) { 1300 dev_err(video->vsp1->dev, "failed to register video device\n"); 1301 goto error; 1302 } 1303 1304 return video; 1305 1306 error: 1307 vsp1_video_cleanup(video); 1308 return ERR_PTR(ret); 1309 } 1310 1311 void vsp1_video_cleanup(struct vsp1_video *video) 1312 { 1313 if (video_is_registered(&video->video)) 1314 video_unregister_device(&video->video); 1315 1316 media_entity_cleanup(&video->video.entity); 1317 } 1318