1 /* 2 * Xilinx Video DMA 3 * 4 * Copyright (C) 2013-2015 Ideas on Board 5 * Copyright (C) 2013-2015 Xilinx, Inc. 6 * 7 * Contacts: Hyun Kwon <hyun.kwon@xilinx.com> 8 * Laurent Pinchart <laurent.pinchart@ideasonboard.com> 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License version 2 as 12 * published by the Free Software Foundation. 13 */ 14 15 #include <linux/dma/xilinx_dma.h> 16 #include <linux/lcm.h> 17 #include <linux/list.h> 18 #include <linux/module.h> 19 #include <linux/of.h> 20 #include <linux/slab.h> 21 22 #include <media/v4l2-dev.h> 23 #include <media/v4l2-fh.h> 24 #include <media/v4l2-ioctl.h> 25 #include <media/videobuf2-v4l2.h> 26 #include <media/videobuf2-dma-contig.h> 27 28 #include "xilinx-dma.h" 29 #include "xilinx-vip.h" 30 #include "xilinx-vipp.h" 31 32 #define XVIP_DMA_DEF_FORMAT V4L2_PIX_FMT_YUYV 33 #define XVIP_DMA_DEF_WIDTH 1920 34 #define XVIP_DMA_DEF_HEIGHT 1080 35 36 /* Minimum and maximum widths are expressed in bytes */ 37 #define XVIP_DMA_MIN_WIDTH 1U 38 #define XVIP_DMA_MAX_WIDTH 65535U 39 #define XVIP_DMA_MIN_HEIGHT 1U 40 #define XVIP_DMA_MAX_HEIGHT 8191U 41 42 /* ----------------------------------------------------------------------------- 43 * Helper functions 44 */ 45 46 static struct v4l2_subdev * 47 xvip_dma_remote_subdev(struct media_pad *local, u32 *pad) 48 { 49 struct media_pad *remote; 50 51 remote = media_entity_remote_pad(local); 52 if (!remote || !is_media_entity_v4l2_subdev(remote->entity)) 53 return NULL; 54 55 if (pad) 56 *pad = remote->index; 57 58 return media_entity_to_v4l2_subdev(remote->entity); 59 } 60 61 static int xvip_dma_verify_format(struct xvip_dma *dma) 62 { 63 struct v4l2_subdev_format fmt; 64 struct v4l2_subdev *subdev; 65 int ret; 66 67 subdev = xvip_dma_remote_subdev(&dma->pad, &fmt.pad); 68 if (subdev == NULL) 69 return -EPIPE; 70 71 fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE; 72 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt); 73 if (ret < 0) 74 return ret == -ENOIOCTLCMD ? -EINVAL : ret; 75 76 if (dma->fmtinfo->code != fmt.format.code || 77 dma->format.height != fmt.format.height || 78 dma->format.width != fmt.format.width || 79 dma->format.colorspace != fmt.format.colorspace) 80 return -EINVAL; 81 82 return 0; 83 } 84 85 /* ----------------------------------------------------------------------------- 86 * Pipeline Stream Management 87 */ 88 89 /** 90 * xvip_pipeline_start_stop - Start ot stop streaming on a pipeline 91 * @pipe: The pipeline 92 * @start: Start (when true) or stop (when false) the pipeline 93 * 94 * Walk the entities chain starting at the pipeline output video node and start 95 * or stop all of them. 96 * 97 * Return: 0 if successful, or the return value of the failed video::s_stream 98 * operation otherwise. 99 */ 100 static int xvip_pipeline_start_stop(struct xvip_pipeline *pipe, bool start) 101 { 102 struct xvip_dma *dma = pipe->output; 103 struct media_entity *entity; 104 struct media_pad *pad; 105 struct v4l2_subdev *subdev; 106 int ret; 107 108 entity = &dma->video.entity; 109 while (1) { 110 pad = &entity->pads[0]; 111 if (!(pad->flags & MEDIA_PAD_FL_SINK)) 112 break; 113 114 pad = media_entity_remote_pad(pad); 115 if (!pad || !is_media_entity_v4l2_subdev(pad->entity)) 116 break; 117 118 entity = pad->entity; 119 subdev = media_entity_to_v4l2_subdev(entity); 120 121 ret = v4l2_subdev_call(subdev, video, s_stream, start); 122 if (start && ret < 0 && ret != -ENOIOCTLCMD) 123 return ret; 124 } 125 126 return 0; 127 } 128 129 /** 130 * xvip_pipeline_set_stream - Enable/disable streaming on a pipeline 131 * @pipe: The pipeline 132 * @on: Turn the stream on when true or off when false 133 * 134 * The pipeline is shared between all DMA engines connect at its input and 135 * output. While the stream state of DMA engines can be controlled 136 * independently, pipelines have a shared stream state that enable or disable 137 * all entities in the pipeline. For this reason the pipeline uses a streaming 138 * counter that tracks the number of DMA engines that have requested the stream 139 * to be enabled. 140 * 141 * When called with the @on argument set to true, this function will increment 142 * the pipeline streaming count. If the streaming count reaches the number of 143 * DMA engines in the pipeline it will enable all entities that belong to the 144 * pipeline. 145 * 146 * Similarly, when called with the @on argument set to false, this function will 147 * decrement the pipeline streaming count and disable all entities in the 148 * pipeline when the streaming count reaches zero. 149 * 150 * Return: 0 if successful, or the return value of the failed video::s_stream 151 * operation otherwise. Stopping the pipeline never fails. The pipeline state is 152 * not updated when the operation fails. 153 */ 154 static int xvip_pipeline_set_stream(struct xvip_pipeline *pipe, bool on) 155 { 156 int ret = 0; 157 158 mutex_lock(&pipe->lock); 159 160 if (on) { 161 if (pipe->stream_count == pipe->num_dmas - 1) { 162 ret = xvip_pipeline_start_stop(pipe, true); 163 if (ret < 0) 164 goto done; 165 } 166 pipe->stream_count++; 167 } else { 168 if (--pipe->stream_count == 0) 169 xvip_pipeline_start_stop(pipe, false); 170 } 171 172 done: 173 mutex_unlock(&pipe->lock); 174 return ret; 175 } 176 177 static int xvip_pipeline_validate(struct xvip_pipeline *pipe, 178 struct xvip_dma *start) 179 { 180 struct media_entity_graph graph; 181 struct media_entity *entity = &start->video.entity; 182 struct media_device *mdev = entity->graph_obj.mdev; 183 unsigned int num_inputs = 0; 184 unsigned int num_outputs = 0; 185 int ret; 186 187 mutex_lock(&mdev->graph_mutex); 188 189 /* Walk the graph to locate the video nodes. */ 190 ret = media_entity_graph_walk_init(&graph, entity->graph_obj.mdev); 191 if (ret) { 192 mutex_unlock(&mdev->graph_mutex); 193 return ret; 194 } 195 196 media_entity_graph_walk_start(&graph, entity); 197 198 while ((entity = media_entity_graph_walk_next(&graph))) { 199 struct xvip_dma *dma; 200 201 if (entity->function != MEDIA_ENT_F_IO_V4L) 202 continue; 203 204 dma = to_xvip_dma(media_entity_to_video_device(entity)); 205 206 if (dma->pad.flags & MEDIA_PAD_FL_SINK) { 207 pipe->output = dma; 208 num_outputs++; 209 } else { 210 num_inputs++; 211 } 212 } 213 214 mutex_unlock(&mdev->graph_mutex); 215 216 media_entity_graph_walk_cleanup(&graph); 217 218 /* We need exactly one output and zero or one input. */ 219 if (num_outputs != 1 || num_inputs > 1) 220 return -EPIPE; 221 222 pipe->num_dmas = num_inputs + num_outputs; 223 224 return 0; 225 } 226 227 static void __xvip_pipeline_cleanup(struct xvip_pipeline *pipe) 228 { 229 pipe->num_dmas = 0; 230 pipe->output = NULL; 231 } 232 233 /** 234 * xvip_pipeline_cleanup - Cleanup the pipeline after streaming 235 * @pipe: the pipeline 236 * 237 * Decrease the pipeline use count and clean it up if we were the last user. 238 */ 239 static void xvip_pipeline_cleanup(struct xvip_pipeline *pipe) 240 { 241 mutex_lock(&pipe->lock); 242 243 /* If we're the last user clean up the pipeline. */ 244 if (--pipe->use_count == 0) 245 __xvip_pipeline_cleanup(pipe); 246 247 mutex_unlock(&pipe->lock); 248 } 249 250 /** 251 * xvip_pipeline_prepare - Prepare the pipeline for streaming 252 * @pipe: the pipeline 253 * @dma: DMA engine at one end of the pipeline 254 * 255 * Validate the pipeline if no user exists yet, otherwise just increase the use 256 * count. 257 * 258 * Return: 0 if successful or -EPIPE if the pipeline is not valid. 259 */ 260 static int xvip_pipeline_prepare(struct xvip_pipeline *pipe, 261 struct xvip_dma *dma) 262 { 263 int ret; 264 265 mutex_lock(&pipe->lock); 266 267 /* If we're the first user validate and initialize the pipeline. */ 268 if (pipe->use_count == 0) { 269 ret = xvip_pipeline_validate(pipe, dma); 270 if (ret < 0) { 271 __xvip_pipeline_cleanup(pipe); 272 goto done; 273 } 274 } 275 276 pipe->use_count++; 277 ret = 0; 278 279 done: 280 mutex_unlock(&pipe->lock); 281 return ret; 282 } 283 284 /* ----------------------------------------------------------------------------- 285 * videobuf2 queue operations 286 */ 287 288 /** 289 * struct xvip_dma_buffer - Video DMA buffer 290 * @buf: vb2 buffer base object 291 * @queue: buffer list entry in the DMA engine queued buffers list 292 * @dma: DMA channel that uses the buffer 293 */ 294 struct xvip_dma_buffer { 295 struct vb2_v4l2_buffer buf; 296 struct list_head queue; 297 struct xvip_dma *dma; 298 }; 299 300 #define to_xvip_dma_buffer(vb) container_of(vb, struct xvip_dma_buffer, buf) 301 302 static void xvip_dma_complete(void *param) 303 { 304 struct xvip_dma_buffer *buf = param; 305 struct xvip_dma *dma = buf->dma; 306 307 spin_lock(&dma->queued_lock); 308 list_del(&buf->queue); 309 spin_unlock(&dma->queued_lock); 310 311 buf->buf.field = V4L2_FIELD_NONE; 312 buf->buf.sequence = dma->sequence++; 313 buf->buf.vb2_buf.timestamp = ktime_get_ns(); 314 vb2_set_plane_payload(&buf->buf.vb2_buf, 0, dma->format.sizeimage); 315 vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_DONE); 316 } 317 318 static int 319 xvip_dma_queue_setup(struct vb2_queue *vq, 320 unsigned int *nbuffers, unsigned int *nplanes, 321 unsigned int sizes[], void *alloc_ctxs[]) 322 { 323 struct xvip_dma *dma = vb2_get_drv_priv(vq); 324 325 alloc_ctxs[0] = dma->alloc_ctx; 326 /* Make sure the image size is large enough. */ 327 if (*nplanes) 328 return sizes[0] < dma->format.sizeimage ? -EINVAL : 0; 329 330 *nplanes = 1; 331 sizes[0] = dma->format.sizeimage; 332 333 return 0; 334 } 335 336 static int xvip_dma_buffer_prepare(struct vb2_buffer *vb) 337 { 338 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); 339 struct xvip_dma *dma = vb2_get_drv_priv(vb->vb2_queue); 340 struct xvip_dma_buffer *buf = to_xvip_dma_buffer(vbuf); 341 342 buf->dma = dma; 343 344 return 0; 345 } 346 347 static void xvip_dma_buffer_queue(struct vb2_buffer *vb) 348 { 349 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); 350 struct xvip_dma *dma = vb2_get_drv_priv(vb->vb2_queue); 351 struct xvip_dma_buffer *buf = to_xvip_dma_buffer(vbuf); 352 struct dma_async_tx_descriptor *desc; 353 dma_addr_t addr = vb2_dma_contig_plane_dma_addr(vb, 0); 354 u32 flags; 355 356 if (dma->queue.type == V4L2_BUF_TYPE_VIDEO_CAPTURE) { 357 flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK; 358 dma->xt.dir = DMA_DEV_TO_MEM; 359 dma->xt.src_sgl = false; 360 dma->xt.dst_sgl = true; 361 dma->xt.dst_start = addr; 362 } else { 363 flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK; 364 dma->xt.dir = DMA_MEM_TO_DEV; 365 dma->xt.src_sgl = true; 366 dma->xt.dst_sgl = false; 367 dma->xt.src_start = addr; 368 } 369 370 dma->xt.frame_size = 1; 371 dma->sgl[0].size = dma->format.width * dma->fmtinfo->bpp; 372 dma->sgl[0].icg = dma->format.bytesperline - dma->sgl[0].size; 373 dma->xt.numf = dma->format.height; 374 375 desc = dmaengine_prep_interleaved_dma(dma->dma, &dma->xt, flags); 376 if (!desc) { 377 dev_err(dma->xdev->dev, "Failed to prepare DMA transfer\n"); 378 vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_ERROR); 379 return; 380 } 381 desc->callback = xvip_dma_complete; 382 desc->callback_param = buf; 383 384 spin_lock_irq(&dma->queued_lock); 385 list_add_tail(&buf->queue, &dma->queued_bufs); 386 spin_unlock_irq(&dma->queued_lock); 387 388 dmaengine_submit(desc); 389 390 if (vb2_is_streaming(&dma->queue)) 391 dma_async_issue_pending(dma->dma); 392 } 393 394 static int xvip_dma_start_streaming(struct vb2_queue *vq, unsigned int count) 395 { 396 struct xvip_dma *dma = vb2_get_drv_priv(vq); 397 struct xvip_dma_buffer *buf, *nbuf; 398 struct xvip_pipeline *pipe; 399 int ret; 400 401 dma->sequence = 0; 402 403 /* 404 * Start streaming on the pipeline. No link touching an entity in the 405 * pipeline can be activated or deactivated once streaming is started. 406 * 407 * Use the pipeline object embedded in the first DMA object that starts 408 * streaming. 409 */ 410 pipe = dma->video.entity.pipe 411 ? to_xvip_pipeline(&dma->video.entity) : &dma->pipe; 412 413 ret = media_entity_pipeline_start(&dma->video.entity, &pipe->pipe); 414 if (ret < 0) 415 goto error; 416 417 /* Verify that the configured format matches the output of the 418 * connected subdev. 419 */ 420 ret = xvip_dma_verify_format(dma); 421 if (ret < 0) 422 goto error_stop; 423 424 ret = xvip_pipeline_prepare(pipe, dma); 425 if (ret < 0) 426 goto error_stop; 427 428 /* Start the DMA engine. This must be done before starting the blocks 429 * in the pipeline to avoid DMA synchronization issues. 430 */ 431 dma_async_issue_pending(dma->dma); 432 433 /* Start the pipeline. */ 434 xvip_pipeline_set_stream(pipe, true); 435 436 return 0; 437 438 error_stop: 439 media_entity_pipeline_stop(&dma->video.entity); 440 441 error: 442 /* Give back all queued buffers to videobuf2. */ 443 spin_lock_irq(&dma->queued_lock); 444 list_for_each_entry_safe(buf, nbuf, &dma->queued_bufs, queue) { 445 vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_QUEUED); 446 list_del(&buf->queue); 447 } 448 spin_unlock_irq(&dma->queued_lock); 449 450 return ret; 451 } 452 453 static void xvip_dma_stop_streaming(struct vb2_queue *vq) 454 { 455 struct xvip_dma *dma = vb2_get_drv_priv(vq); 456 struct xvip_pipeline *pipe = to_xvip_pipeline(&dma->video.entity); 457 struct xvip_dma_buffer *buf, *nbuf; 458 459 /* Stop the pipeline. */ 460 xvip_pipeline_set_stream(pipe, false); 461 462 /* Stop and reset the DMA engine. */ 463 dmaengine_terminate_all(dma->dma); 464 465 /* Cleanup the pipeline and mark it as being stopped. */ 466 xvip_pipeline_cleanup(pipe); 467 media_entity_pipeline_stop(&dma->video.entity); 468 469 /* Give back all queued buffers to videobuf2. */ 470 spin_lock_irq(&dma->queued_lock); 471 list_for_each_entry_safe(buf, nbuf, &dma->queued_bufs, queue) { 472 vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_ERROR); 473 list_del(&buf->queue); 474 } 475 spin_unlock_irq(&dma->queued_lock); 476 } 477 478 static struct vb2_ops xvip_dma_queue_qops = { 479 .queue_setup = xvip_dma_queue_setup, 480 .buf_prepare = xvip_dma_buffer_prepare, 481 .buf_queue = xvip_dma_buffer_queue, 482 .wait_prepare = vb2_ops_wait_prepare, 483 .wait_finish = vb2_ops_wait_finish, 484 .start_streaming = xvip_dma_start_streaming, 485 .stop_streaming = xvip_dma_stop_streaming, 486 }; 487 488 /* ----------------------------------------------------------------------------- 489 * V4L2 ioctls 490 */ 491 492 static int 493 xvip_dma_querycap(struct file *file, void *fh, struct v4l2_capability *cap) 494 { 495 struct v4l2_fh *vfh = file->private_data; 496 struct xvip_dma *dma = to_xvip_dma(vfh->vdev); 497 498 cap->capabilities = V4L2_CAP_DEVICE_CAPS | V4L2_CAP_STREAMING 499 | dma->xdev->v4l2_caps; 500 501 if (dma->queue.type == V4L2_BUF_TYPE_VIDEO_CAPTURE) 502 cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; 503 else 504 cap->device_caps = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING; 505 506 strlcpy(cap->driver, "xilinx-vipp", sizeof(cap->driver)); 507 strlcpy(cap->card, dma->video.name, sizeof(cap->card)); 508 snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s:%u", 509 dma->xdev->dev->of_node->name, dma->port); 510 511 return 0; 512 } 513 514 /* FIXME: without this callback function, some applications are not configured 515 * with correct formats, and it results in frames in wrong format. Whether this 516 * callback needs to be required is not clearly defined, so it should be 517 * clarified through the mailing list. 518 */ 519 static int 520 xvip_dma_enum_format(struct file *file, void *fh, struct v4l2_fmtdesc *f) 521 { 522 struct v4l2_fh *vfh = file->private_data; 523 struct xvip_dma *dma = to_xvip_dma(vfh->vdev); 524 525 if (f->index > 0) 526 return -EINVAL; 527 528 f->pixelformat = dma->format.pixelformat; 529 strlcpy(f->description, dma->fmtinfo->description, 530 sizeof(f->description)); 531 532 return 0; 533 } 534 535 static int 536 xvip_dma_get_format(struct file *file, void *fh, struct v4l2_format *format) 537 { 538 struct v4l2_fh *vfh = file->private_data; 539 struct xvip_dma *dma = to_xvip_dma(vfh->vdev); 540 541 format->fmt.pix = dma->format; 542 543 return 0; 544 } 545 546 static void 547 __xvip_dma_try_format(struct xvip_dma *dma, struct v4l2_pix_format *pix, 548 const struct xvip_video_format **fmtinfo) 549 { 550 const struct xvip_video_format *info; 551 unsigned int min_width; 552 unsigned int max_width; 553 unsigned int min_bpl; 554 unsigned int max_bpl; 555 unsigned int width; 556 unsigned int align; 557 unsigned int bpl; 558 559 /* Retrieve format information and select the default format if the 560 * requested format isn't supported. 561 */ 562 info = xvip_get_format_by_fourcc(pix->pixelformat); 563 if (IS_ERR(info)) 564 info = xvip_get_format_by_fourcc(XVIP_DMA_DEF_FORMAT); 565 566 pix->pixelformat = info->fourcc; 567 pix->field = V4L2_FIELD_NONE; 568 569 /* The transfer alignment requirements are expressed in bytes. Compute 570 * the minimum and maximum values, clamp the requested width and convert 571 * it back to pixels. 572 */ 573 align = lcm(dma->align, info->bpp); 574 min_width = roundup(XVIP_DMA_MIN_WIDTH, align); 575 max_width = rounddown(XVIP_DMA_MAX_WIDTH, align); 576 width = rounddown(pix->width * info->bpp, align); 577 578 pix->width = clamp(width, min_width, max_width) / info->bpp; 579 pix->height = clamp(pix->height, XVIP_DMA_MIN_HEIGHT, 580 XVIP_DMA_MAX_HEIGHT); 581 582 /* Clamp the requested bytes per line value. If the maximum bytes per 583 * line value is zero, the module doesn't support user configurable line 584 * sizes. Override the requested value with the minimum in that case. 585 */ 586 min_bpl = pix->width * info->bpp; 587 max_bpl = rounddown(XVIP_DMA_MAX_WIDTH, dma->align); 588 bpl = rounddown(pix->bytesperline, dma->align); 589 590 pix->bytesperline = clamp(bpl, min_bpl, max_bpl); 591 pix->sizeimage = pix->bytesperline * pix->height; 592 593 if (fmtinfo) 594 *fmtinfo = info; 595 } 596 597 static int 598 xvip_dma_try_format(struct file *file, void *fh, struct v4l2_format *format) 599 { 600 struct v4l2_fh *vfh = file->private_data; 601 struct xvip_dma *dma = to_xvip_dma(vfh->vdev); 602 603 __xvip_dma_try_format(dma, &format->fmt.pix, NULL); 604 return 0; 605 } 606 607 static int 608 xvip_dma_set_format(struct file *file, void *fh, struct v4l2_format *format) 609 { 610 struct v4l2_fh *vfh = file->private_data; 611 struct xvip_dma *dma = to_xvip_dma(vfh->vdev); 612 const struct xvip_video_format *info; 613 614 __xvip_dma_try_format(dma, &format->fmt.pix, &info); 615 616 if (vb2_is_busy(&dma->queue)) 617 return -EBUSY; 618 619 dma->format = format->fmt.pix; 620 dma->fmtinfo = info; 621 622 return 0; 623 } 624 625 static const struct v4l2_ioctl_ops xvip_dma_ioctl_ops = { 626 .vidioc_querycap = xvip_dma_querycap, 627 .vidioc_enum_fmt_vid_cap = xvip_dma_enum_format, 628 .vidioc_g_fmt_vid_cap = xvip_dma_get_format, 629 .vidioc_g_fmt_vid_out = xvip_dma_get_format, 630 .vidioc_s_fmt_vid_cap = xvip_dma_set_format, 631 .vidioc_s_fmt_vid_out = xvip_dma_set_format, 632 .vidioc_try_fmt_vid_cap = xvip_dma_try_format, 633 .vidioc_try_fmt_vid_out = xvip_dma_try_format, 634 .vidioc_reqbufs = vb2_ioctl_reqbufs, 635 .vidioc_querybuf = vb2_ioctl_querybuf, 636 .vidioc_qbuf = vb2_ioctl_qbuf, 637 .vidioc_dqbuf = vb2_ioctl_dqbuf, 638 .vidioc_create_bufs = vb2_ioctl_create_bufs, 639 .vidioc_expbuf = vb2_ioctl_expbuf, 640 .vidioc_streamon = vb2_ioctl_streamon, 641 .vidioc_streamoff = vb2_ioctl_streamoff, 642 }; 643 644 /* ----------------------------------------------------------------------------- 645 * V4L2 file operations 646 */ 647 648 static const struct v4l2_file_operations xvip_dma_fops = { 649 .owner = THIS_MODULE, 650 .unlocked_ioctl = video_ioctl2, 651 .open = v4l2_fh_open, 652 .release = vb2_fop_release, 653 .poll = vb2_fop_poll, 654 .mmap = vb2_fop_mmap, 655 }; 656 657 /* ----------------------------------------------------------------------------- 658 * Xilinx Video DMA Core 659 */ 660 661 int xvip_dma_init(struct xvip_composite_device *xdev, struct xvip_dma *dma, 662 enum v4l2_buf_type type, unsigned int port) 663 { 664 char name[16]; 665 int ret; 666 667 dma->xdev = xdev; 668 dma->port = port; 669 mutex_init(&dma->lock); 670 mutex_init(&dma->pipe.lock); 671 INIT_LIST_HEAD(&dma->queued_bufs); 672 spin_lock_init(&dma->queued_lock); 673 674 dma->fmtinfo = xvip_get_format_by_fourcc(XVIP_DMA_DEF_FORMAT); 675 dma->format.pixelformat = dma->fmtinfo->fourcc; 676 dma->format.colorspace = V4L2_COLORSPACE_SRGB; 677 dma->format.field = V4L2_FIELD_NONE; 678 dma->format.width = XVIP_DMA_DEF_WIDTH; 679 dma->format.height = XVIP_DMA_DEF_HEIGHT; 680 dma->format.bytesperline = dma->format.width * dma->fmtinfo->bpp; 681 dma->format.sizeimage = dma->format.bytesperline * dma->format.height; 682 683 /* Initialize the media entity... */ 684 dma->pad.flags = type == V4L2_BUF_TYPE_VIDEO_CAPTURE 685 ? MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE; 686 687 ret = media_entity_pads_init(&dma->video.entity, 1, &dma->pad); 688 if (ret < 0) 689 goto error; 690 691 /* ... and the video node... */ 692 dma->video.fops = &xvip_dma_fops; 693 dma->video.v4l2_dev = &xdev->v4l2_dev; 694 dma->video.queue = &dma->queue; 695 snprintf(dma->video.name, sizeof(dma->video.name), "%s %s %u", 696 xdev->dev->of_node->name, 697 type == V4L2_BUF_TYPE_VIDEO_CAPTURE ? "output" : "input", 698 port); 699 dma->video.vfl_type = VFL_TYPE_GRABBER; 700 dma->video.vfl_dir = type == V4L2_BUF_TYPE_VIDEO_CAPTURE 701 ? VFL_DIR_RX : VFL_DIR_TX; 702 dma->video.release = video_device_release_empty; 703 dma->video.ioctl_ops = &xvip_dma_ioctl_ops; 704 dma->video.lock = &dma->lock; 705 706 video_set_drvdata(&dma->video, dma); 707 708 /* ... and the buffers queue... */ 709 dma->alloc_ctx = vb2_dma_contig_init_ctx(dma->xdev->dev); 710 if (IS_ERR(dma->alloc_ctx)) { 711 ret = PTR_ERR(dma->alloc_ctx); 712 goto error; 713 } 714 715 /* Don't enable VB2_READ and VB2_WRITE, as using the read() and write() 716 * V4L2 APIs would be inefficient. Testing on the command line with a 717 * 'cat /dev/video?' thus won't be possible, but given that the driver 718 * anyway requires a test tool to setup the pipeline before any video 719 * stream can be started, requiring a specific V4L2 test tool as well 720 * instead of 'cat' isn't really a drawback. 721 */ 722 dma->queue.type = type; 723 dma->queue.io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF; 724 dma->queue.lock = &dma->lock; 725 dma->queue.drv_priv = dma; 726 dma->queue.buf_struct_size = sizeof(struct xvip_dma_buffer); 727 dma->queue.ops = &xvip_dma_queue_qops; 728 dma->queue.mem_ops = &vb2_dma_contig_memops; 729 dma->queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC 730 | V4L2_BUF_FLAG_TSTAMP_SRC_EOF; 731 ret = vb2_queue_init(&dma->queue); 732 if (ret < 0) { 733 dev_err(dma->xdev->dev, "failed to initialize VB2 queue\n"); 734 goto error; 735 } 736 737 /* ... and the DMA channel. */ 738 snprintf(name, sizeof(name), "port%u", port); 739 dma->dma = dma_request_slave_channel(dma->xdev->dev, name); 740 if (dma->dma == NULL) { 741 dev_err(dma->xdev->dev, "no VDMA channel found\n"); 742 ret = -ENODEV; 743 goto error; 744 } 745 746 dma->align = 1 << dma->dma->device->copy_align; 747 748 ret = video_register_device(&dma->video, VFL_TYPE_GRABBER, -1); 749 if (ret < 0) { 750 dev_err(dma->xdev->dev, "failed to register video device\n"); 751 goto error; 752 } 753 754 return 0; 755 756 error: 757 xvip_dma_cleanup(dma); 758 return ret; 759 } 760 761 void xvip_dma_cleanup(struct xvip_dma *dma) 762 { 763 if (video_is_registered(&dma->video)) 764 video_unregister_device(&dma->video); 765 766 if (dma->dma) 767 dma_release_channel(dma->dma); 768 769 if (!IS_ERR_OR_NULL(dma->alloc_ctx)) 770 vb2_dma_contig_cleanup_ctx(dma->alloc_ctx); 771 772 media_entity_cleanup(&dma->video.entity); 773 774 mutex_destroy(&dma->lock); 775 mutex_destroy(&dma->pipe.lock); 776 } 777