1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * ispvideo.c 4 * 5 * TI OMAP3 ISP - Generic video node 6 * 7 * Copyright (C) 2009-2010 Nokia Corporation 8 * 9 * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com> 10 * Sakari Ailus <sakari.ailus@iki.fi> 11 */ 12 13 #include <linux/clk.h> 14 #include <linux/mm.h> 15 #include <linux/module.h> 16 #include <linux/pagemap.h> 17 #include <linux/scatterlist.h> 18 #include <linux/sched.h> 19 #include <linux/slab.h> 20 #include <linux/vmalloc.h> 21 22 #include <media/v4l2-dev.h> 23 #include <media/v4l2-ioctl.h> 24 #include <media/v4l2-mc.h> 25 #include <media/videobuf2-dma-contig.h> 26 27 #include "ispvideo.h" 28 #include "isp.h" 29 30 31 /* ----------------------------------------------------------------------------- 32 * Helper functions 33 */ 34 35 /* 36 * NOTE: When adding new media bus codes, always remember to add 37 * corresponding in-memory formats to the table below!!! 38 */ 39 static struct isp_format_info formats[] = { 40 { MEDIA_BUS_FMT_Y8_1X8, MEDIA_BUS_FMT_Y8_1X8, 41 MEDIA_BUS_FMT_Y8_1X8, MEDIA_BUS_FMT_Y8_1X8, 42 V4L2_PIX_FMT_GREY, 8, 1, }, 43 { MEDIA_BUS_FMT_Y10_1X10, MEDIA_BUS_FMT_Y10_1X10, 44 MEDIA_BUS_FMT_Y10_1X10, MEDIA_BUS_FMT_Y8_1X8, 45 V4L2_PIX_FMT_Y10, 10, 2, }, 46 { MEDIA_BUS_FMT_Y12_1X12, MEDIA_BUS_FMT_Y10_1X10, 47 MEDIA_BUS_FMT_Y12_1X12, MEDIA_BUS_FMT_Y8_1X8, 48 V4L2_PIX_FMT_Y12, 12, 2, }, 49 { MEDIA_BUS_FMT_SBGGR8_1X8, MEDIA_BUS_FMT_SBGGR8_1X8, 50 MEDIA_BUS_FMT_SBGGR8_1X8, MEDIA_BUS_FMT_SBGGR8_1X8, 51 V4L2_PIX_FMT_SBGGR8, 8, 1, }, 52 { MEDIA_BUS_FMT_SGBRG8_1X8, MEDIA_BUS_FMT_SGBRG8_1X8, 53 MEDIA_BUS_FMT_SGBRG8_1X8, MEDIA_BUS_FMT_SGBRG8_1X8, 54 V4L2_PIX_FMT_SGBRG8, 8, 1, }, 55 { MEDIA_BUS_FMT_SGRBG8_1X8, MEDIA_BUS_FMT_SGRBG8_1X8, 56 MEDIA_BUS_FMT_SGRBG8_1X8, MEDIA_BUS_FMT_SGRBG8_1X8, 57 V4L2_PIX_FMT_SGRBG8, 8, 1, }, 58 { MEDIA_BUS_FMT_SRGGB8_1X8, MEDIA_BUS_FMT_SRGGB8_1X8, 59 MEDIA_BUS_FMT_SRGGB8_1X8, MEDIA_BUS_FMT_SRGGB8_1X8, 60 V4L2_PIX_FMT_SRGGB8, 8, 1, }, 61 { MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8, MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8, 62 MEDIA_BUS_FMT_SBGGR10_1X10, 0, 63 V4L2_PIX_FMT_SBGGR10DPCM8, 8, 1, }, 64 { MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8, MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8, 65 MEDIA_BUS_FMT_SGBRG10_1X10, 0, 66 V4L2_PIX_FMT_SGBRG10DPCM8, 8, 1, }, 67 { MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8, MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8, 68 MEDIA_BUS_FMT_SGRBG10_1X10, 0, 69 V4L2_PIX_FMT_SGRBG10DPCM8, 8, 1, }, 70 { MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8, MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8, 71 MEDIA_BUS_FMT_SRGGB10_1X10, 0, 72 V4L2_PIX_FMT_SRGGB10DPCM8, 8, 1, }, 73 { MEDIA_BUS_FMT_SBGGR10_1X10, MEDIA_BUS_FMT_SBGGR10_1X10, 74 MEDIA_BUS_FMT_SBGGR10_1X10, MEDIA_BUS_FMT_SBGGR8_1X8, 75 V4L2_PIX_FMT_SBGGR10, 10, 2, }, 76 { MEDIA_BUS_FMT_SGBRG10_1X10, MEDIA_BUS_FMT_SGBRG10_1X10, 77 MEDIA_BUS_FMT_SGBRG10_1X10, MEDIA_BUS_FMT_SGBRG8_1X8, 78 V4L2_PIX_FMT_SGBRG10, 10, 2, }, 79 { MEDIA_BUS_FMT_SGRBG10_1X10, MEDIA_BUS_FMT_SGRBG10_1X10, 80 MEDIA_BUS_FMT_SGRBG10_1X10, MEDIA_BUS_FMT_SGRBG8_1X8, 81 V4L2_PIX_FMT_SGRBG10, 10, 2, }, 82 { MEDIA_BUS_FMT_SRGGB10_1X10, MEDIA_BUS_FMT_SRGGB10_1X10, 83 MEDIA_BUS_FMT_SRGGB10_1X10, MEDIA_BUS_FMT_SRGGB8_1X8, 84 V4L2_PIX_FMT_SRGGB10, 10, 2, }, 85 { MEDIA_BUS_FMT_SBGGR12_1X12, MEDIA_BUS_FMT_SBGGR10_1X10, 86 MEDIA_BUS_FMT_SBGGR12_1X12, MEDIA_BUS_FMT_SBGGR8_1X8, 87 V4L2_PIX_FMT_SBGGR12, 12, 2, }, 88 { MEDIA_BUS_FMT_SGBRG12_1X12, MEDIA_BUS_FMT_SGBRG10_1X10, 89 MEDIA_BUS_FMT_SGBRG12_1X12, MEDIA_BUS_FMT_SGBRG8_1X8, 90 V4L2_PIX_FMT_SGBRG12, 12, 2, }, 91 { MEDIA_BUS_FMT_SGRBG12_1X12, MEDIA_BUS_FMT_SGRBG10_1X10, 92 MEDIA_BUS_FMT_SGRBG12_1X12, MEDIA_BUS_FMT_SGRBG8_1X8, 93 V4L2_PIX_FMT_SGRBG12, 12, 2, }, 94 { MEDIA_BUS_FMT_SRGGB12_1X12, MEDIA_BUS_FMT_SRGGB10_1X10, 95 MEDIA_BUS_FMT_SRGGB12_1X12, MEDIA_BUS_FMT_SRGGB8_1X8, 96 V4L2_PIX_FMT_SRGGB12, 12, 2, }, 97 { MEDIA_BUS_FMT_UYVY8_1X16, MEDIA_BUS_FMT_UYVY8_1X16, 98 MEDIA_BUS_FMT_UYVY8_1X16, 0, 99 V4L2_PIX_FMT_UYVY, 16, 2, }, 100 { MEDIA_BUS_FMT_YUYV8_1X16, MEDIA_BUS_FMT_YUYV8_1X16, 101 MEDIA_BUS_FMT_YUYV8_1X16, 0, 102 V4L2_PIX_FMT_YUYV, 16, 2, }, 103 { MEDIA_BUS_FMT_UYVY8_2X8, MEDIA_BUS_FMT_UYVY8_2X8, 104 MEDIA_BUS_FMT_UYVY8_2X8, 0, 105 V4L2_PIX_FMT_UYVY, 8, 2, }, 106 { MEDIA_BUS_FMT_YUYV8_2X8, MEDIA_BUS_FMT_YUYV8_2X8, 107 MEDIA_BUS_FMT_YUYV8_2X8, 0, 108 V4L2_PIX_FMT_YUYV, 8, 2, }, 109 /* Empty entry to catch the unsupported pixel code (0) used by the CCDC 110 * module and avoid NULL pointer dereferences. 111 */ 112 { 0, } 113 }; 114 115 const struct isp_format_info *omap3isp_video_format_info(u32 code) 116 { 117 unsigned int i; 118 119 for (i = 0; i < ARRAY_SIZE(formats); ++i) { 120 if (formats[i].code == code) 121 return &formats[i]; 122 } 123 124 return NULL; 125 } 126 127 /* 128 * isp_video_mbus_to_pix - Convert v4l2_mbus_framefmt to v4l2_pix_format 129 * @video: ISP video instance 130 * @mbus: v4l2_mbus_framefmt format (input) 131 * @pix: v4l2_pix_format format (output) 132 * 133 * Fill the output pix structure with information from the input mbus format. 134 * The bytesperline and sizeimage fields are computed from the requested bytes 135 * per line value in the pix format and information from the video instance. 136 * 137 * Return the number of padding bytes at end of line. 138 */ 139 static unsigned int isp_video_mbus_to_pix(const struct isp_video *video, 140 const struct v4l2_mbus_framefmt *mbus, 141 struct v4l2_pix_format *pix) 142 { 143 unsigned int bpl = pix->bytesperline; 144 unsigned int min_bpl; 145 unsigned int i; 146 147 memset(pix, 0, sizeof(*pix)); 148 pix->width = mbus->width; 149 pix->height = mbus->height; 150 151 for (i = 0; i < ARRAY_SIZE(formats); ++i) { 152 if (formats[i].code == mbus->code) 153 break; 154 } 155 156 if (WARN_ON(i == ARRAY_SIZE(formats))) 157 return 0; 158 159 min_bpl = pix->width * formats[i].bpp; 160 161 /* Clamp the requested bytes per line value. If the maximum bytes per 162 * line value is zero, the module doesn't support user configurable line 163 * sizes. Override the requested value with the minimum in that case. 164 */ 165 if (video->bpl_max) 166 bpl = clamp(bpl, min_bpl, video->bpl_max); 167 else 168 bpl = min_bpl; 169 170 if (!video->bpl_zero_padding || bpl != min_bpl) 171 bpl = ALIGN(bpl, video->bpl_alignment); 172 173 pix->pixelformat = formats[i].pixelformat; 174 pix->bytesperline = bpl; 175 pix->sizeimage = pix->bytesperline * pix->height; 176 pix->colorspace = mbus->colorspace; 177 pix->field = mbus->field; 178 179 return bpl - min_bpl; 180 } 181 182 static void isp_video_pix_to_mbus(const struct v4l2_pix_format *pix, 183 struct v4l2_mbus_framefmt *mbus) 184 { 185 unsigned int i; 186 187 memset(mbus, 0, sizeof(*mbus)); 188 mbus->width = pix->width; 189 mbus->height = pix->height; 190 191 /* Skip the last format in the loop so that it will be selected if no 192 * match is found. 193 */ 194 for (i = 0; i < ARRAY_SIZE(formats) - 1; ++i) { 195 if (formats[i].pixelformat == pix->pixelformat) 196 break; 197 } 198 199 mbus->code = formats[i].code; 200 mbus->colorspace = pix->colorspace; 201 mbus->field = pix->field; 202 } 203 204 static struct v4l2_subdev * 205 isp_video_remote_subdev(struct isp_video *video, u32 *pad) 206 { 207 struct media_pad *remote; 208 209 remote = media_pad_remote_pad_first(&video->pad); 210 211 if (!remote || !is_media_entity_v4l2_subdev(remote->entity)) 212 return NULL; 213 214 if (pad) 215 *pad = remote->index; 216 217 return media_entity_to_v4l2_subdev(remote->entity); 218 } 219 220 /* Return a pointer to the ISP video instance at the far end of the pipeline. */ 221 static int isp_video_get_graph_data(struct isp_video *video, 222 struct isp_pipeline *pipe) 223 { 224 struct media_pipeline_entity_iter iter; 225 struct media_entity *entity; 226 struct isp_video *far_end = NULL; 227 int ret; 228 229 ret = media_pipeline_entity_iter_init(&pipe->pipe, &iter); 230 if (ret) 231 return ret; 232 233 media_pipeline_for_each_entity(&pipe->pipe, &iter, entity) { 234 struct isp_video *__video; 235 236 media_entity_enum_set(&pipe->ent_enum, entity); 237 238 if (far_end != NULL) 239 continue; 240 241 if (entity == &video->video.entity) 242 continue; 243 244 if (!is_media_entity_v4l2_video_device(entity)) 245 continue; 246 247 __video = to_isp_video(media_entity_to_video_device(entity)); 248 if (__video->type != video->type) 249 far_end = __video; 250 } 251 252 media_pipeline_entity_iter_cleanup(&iter); 253 254 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) { 255 pipe->input = far_end; 256 pipe->output = video; 257 } else { 258 if (far_end == NULL) 259 return -EPIPE; 260 261 pipe->input = video; 262 pipe->output = far_end; 263 } 264 265 return 0; 266 } 267 268 static int 269 __isp_video_get_format(struct isp_video *video, struct v4l2_format *format) 270 { 271 struct v4l2_subdev_format fmt = { 272 .which = V4L2_SUBDEV_FORMAT_ACTIVE, 273 }; 274 struct v4l2_subdev *subdev; 275 u32 pad; 276 int ret; 277 278 subdev = isp_video_remote_subdev(video, &pad); 279 if (subdev == NULL) 280 return -EINVAL; 281 282 fmt.pad = pad; 283 284 mutex_lock(&video->mutex); 285 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt); 286 mutex_unlock(&video->mutex); 287 288 if (ret) 289 return ret; 290 291 format->type = video->type; 292 return isp_video_mbus_to_pix(video, &fmt.format, &format->fmt.pix); 293 } 294 295 static int 296 isp_video_check_format(struct isp_video *video, struct isp_video_fh *vfh) 297 { 298 struct v4l2_format format; 299 int ret; 300 301 memcpy(&format, &vfh->format, sizeof(format)); 302 ret = __isp_video_get_format(video, &format); 303 if (ret < 0) 304 return ret; 305 306 if (vfh->format.fmt.pix.pixelformat != format.fmt.pix.pixelformat || 307 vfh->format.fmt.pix.height != format.fmt.pix.height || 308 vfh->format.fmt.pix.width != format.fmt.pix.width || 309 vfh->format.fmt.pix.bytesperline != format.fmt.pix.bytesperline || 310 vfh->format.fmt.pix.sizeimage != format.fmt.pix.sizeimage || 311 vfh->format.fmt.pix.field != format.fmt.pix.field) 312 return -EINVAL; 313 314 return 0; 315 } 316 317 /* ----------------------------------------------------------------------------- 318 * Video queue operations 319 */ 320 321 static int isp_video_queue_setup(struct vb2_queue *queue, 322 unsigned int *count, unsigned int *num_planes, 323 unsigned int sizes[], struct device *alloc_devs[]) 324 { 325 struct isp_video_fh *vfh = vb2_get_drv_priv(queue); 326 struct isp_video *video = vfh->video; 327 328 *num_planes = 1; 329 330 sizes[0] = vfh->format.fmt.pix.sizeimage; 331 if (sizes[0] == 0) 332 return -EINVAL; 333 334 *count = min(*count, video->capture_mem / PAGE_ALIGN(sizes[0])); 335 336 return 0; 337 } 338 339 static int isp_video_buffer_prepare(struct vb2_buffer *buf) 340 { 341 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(buf); 342 struct isp_video_fh *vfh = vb2_get_drv_priv(buf->vb2_queue); 343 struct isp_buffer *buffer = to_isp_buffer(vbuf); 344 struct isp_video *video = vfh->video; 345 dma_addr_t addr; 346 347 /* Refuse to prepare the buffer is the video node has registered an 348 * error. We don't need to take any lock here as the operation is 349 * inherently racy. The authoritative check will be performed in the 350 * queue handler, which can't return an error, this check is just a best 351 * effort to notify userspace as early as possible. 352 */ 353 if (unlikely(video->error)) 354 return -EIO; 355 356 addr = vb2_dma_contig_plane_dma_addr(buf, 0); 357 if (!IS_ALIGNED(addr, 32)) { 358 dev_dbg(video->isp->dev, 359 "Buffer address must be aligned to 32 bytes boundary.\n"); 360 return -EINVAL; 361 } 362 363 vb2_set_plane_payload(&buffer->vb.vb2_buf, 0, 364 vfh->format.fmt.pix.sizeimage); 365 buffer->dma = addr; 366 367 return 0; 368 } 369 370 /* 371 * isp_video_buffer_queue - Add buffer to streaming queue 372 * @buf: Video buffer 373 * 374 * In memory-to-memory mode, start streaming on the pipeline if buffers are 375 * queued on both the input and the output, if the pipeline isn't already busy. 376 * If the pipeline is busy, it will be restarted in the output module interrupt 377 * handler. 378 */ 379 static void isp_video_buffer_queue(struct vb2_buffer *buf) 380 { 381 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(buf); 382 struct isp_video_fh *vfh = vb2_get_drv_priv(buf->vb2_queue); 383 struct isp_buffer *buffer = to_isp_buffer(vbuf); 384 struct isp_video *video = vfh->video; 385 struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity); 386 enum isp_pipeline_state state; 387 unsigned long flags; 388 unsigned int empty; 389 unsigned int start; 390 391 spin_lock_irqsave(&video->irqlock, flags); 392 393 if (unlikely(video->error)) { 394 vb2_buffer_done(&buffer->vb.vb2_buf, VB2_BUF_STATE_ERROR); 395 spin_unlock_irqrestore(&video->irqlock, flags); 396 return; 397 } 398 399 empty = list_empty(&video->dmaqueue); 400 list_add_tail(&buffer->irqlist, &video->dmaqueue); 401 402 spin_unlock_irqrestore(&video->irqlock, flags); 403 404 if (empty) { 405 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) 406 state = ISP_PIPELINE_QUEUE_OUTPUT; 407 else 408 state = ISP_PIPELINE_QUEUE_INPUT; 409 410 spin_lock_irqsave(&pipe->lock, flags); 411 pipe->state |= state; 412 video->ops->queue(video, buffer); 413 video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_QUEUED; 414 415 start = isp_pipeline_ready(pipe); 416 if (start) 417 pipe->state |= ISP_PIPELINE_STREAM; 418 spin_unlock_irqrestore(&pipe->lock, flags); 419 420 if (start) 421 omap3isp_pipeline_set_stream(pipe, 422 ISP_PIPELINE_STREAM_SINGLESHOT); 423 } 424 } 425 426 /* 427 * omap3isp_video_return_buffers - Return all queued buffers to videobuf2 428 * @video: ISP video object 429 * @state: new state for the returned buffers 430 * 431 * Return all buffers queued on the video node to videobuf2 in the given state. 432 * The buffer state should be VB2_BUF_STATE_QUEUED if called due to an error 433 * when starting the stream, or VB2_BUF_STATE_ERROR otherwise. 434 * 435 * The function must be called with the video irqlock held. 436 */ 437 static void omap3isp_video_return_buffers(struct isp_video *video, 438 enum vb2_buffer_state state) 439 { 440 while (!list_empty(&video->dmaqueue)) { 441 struct isp_buffer *buf; 442 443 buf = list_first_entry(&video->dmaqueue, 444 struct isp_buffer, irqlist); 445 list_del(&buf->irqlist); 446 vb2_buffer_done(&buf->vb.vb2_buf, state); 447 } 448 } 449 450 static int isp_video_start_streaming(struct vb2_queue *queue, 451 unsigned int count) 452 { 453 struct isp_video_fh *vfh = vb2_get_drv_priv(queue); 454 struct isp_video *video = vfh->video; 455 struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity); 456 unsigned long flags; 457 int ret; 458 459 /* In sensor-to-memory mode, the stream can be started synchronously 460 * to the stream on command. In memory-to-memory mode, it will be 461 * started when buffers are queued on both the input and output. 462 */ 463 if (pipe->input) 464 return 0; 465 466 ret = omap3isp_pipeline_set_stream(pipe, 467 ISP_PIPELINE_STREAM_CONTINUOUS); 468 if (ret < 0) { 469 spin_lock_irqsave(&video->irqlock, flags); 470 omap3isp_video_return_buffers(video, VB2_BUF_STATE_QUEUED); 471 spin_unlock_irqrestore(&video->irqlock, flags); 472 return ret; 473 } 474 475 spin_lock_irqsave(&video->irqlock, flags); 476 if (list_empty(&video->dmaqueue)) 477 video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_UNDERRUN; 478 spin_unlock_irqrestore(&video->irqlock, flags); 479 480 return 0; 481 } 482 483 static void omap3isp_wait_prepare(struct vb2_queue *vq) 484 { 485 struct isp_video_fh *vfh = vb2_get_drv_priv(vq); 486 struct isp_video *video = vfh->video; 487 488 mutex_unlock(&video->queue_lock); 489 } 490 491 static void omap3isp_wait_finish(struct vb2_queue *vq) 492 { 493 struct isp_video_fh *vfh = vb2_get_drv_priv(vq); 494 struct isp_video *video = vfh->video; 495 496 mutex_lock(&video->queue_lock); 497 } 498 499 static const struct vb2_ops isp_video_queue_ops = { 500 .queue_setup = isp_video_queue_setup, 501 .buf_prepare = isp_video_buffer_prepare, 502 .buf_queue = isp_video_buffer_queue, 503 .start_streaming = isp_video_start_streaming, 504 .wait_prepare = omap3isp_wait_prepare, 505 .wait_finish = omap3isp_wait_finish, 506 }; 507 508 /* 509 * omap3isp_video_buffer_next - Complete the current buffer and return the next 510 * @video: ISP video object 511 * 512 * Remove the current video buffer from the DMA queue and fill its timestamp and 513 * field count before handing it back to videobuf2. 514 * 515 * For capture video nodes the buffer state is set to VB2_BUF_STATE_DONE if no 516 * error has been flagged in the pipeline, or to VB2_BUF_STATE_ERROR otherwise. 517 * For video output nodes the buffer state is always set to VB2_BUF_STATE_DONE. 518 * 519 * The DMA queue is expected to contain at least one buffer. 520 * 521 * Return a pointer to the next buffer in the DMA queue, or NULL if the queue is 522 * empty. 523 */ 524 struct isp_buffer *omap3isp_video_buffer_next(struct isp_video *video) 525 { 526 struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity); 527 enum vb2_buffer_state vb_state; 528 struct isp_buffer *buf; 529 unsigned long flags; 530 531 spin_lock_irqsave(&video->irqlock, flags); 532 if (WARN_ON(list_empty(&video->dmaqueue))) { 533 spin_unlock_irqrestore(&video->irqlock, flags); 534 return NULL; 535 } 536 537 buf = list_first_entry(&video->dmaqueue, struct isp_buffer, 538 irqlist); 539 list_del(&buf->irqlist); 540 spin_unlock_irqrestore(&video->irqlock, flags); 541 542 buf->vb.vb2_buf.timestamp = ktime_get_ns(); 543 544 /* Do frame number propagation only if this is the output video node. 545 * Frame number either comes from the CSI receivers or it gets 546 * incremented here if H3A is not active. 547 * Note: There is no guarantee that the output buffer will finish 548 * first, so the input number might lag behind by 1 in some cases. 549 */ 550 if (video == pipe->output && !pipe->do_propagation) 551 buf->vb.sequence = 552 atomic_inc_return(&pipe->frame_number); 553 else 554 buf->vb.sequence = atomic_read(&pipe->frame_number); 555 556 if (pipe->field != V4L2_FIELD_NONE) 557 buf->vb.sequence /= 2; 558 559 buf->vb.field = pipe->field; 560 561 /* Report pipeline errors to userspace on the capture device side. */ 562 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && pipe->error) { 563 vb_state = VB2_BUF_STATE_ERROR; 564 pipe->error = false; 565 } else { 566 vb_state = VB2_BUF_STATE_DONE; 567 } 568 569 vb2_buffer_done(&buf->vb.vb2_buf, vb_state); 570 571 spin_lock_irqsave(&video->irqlock, flags); 572 573 if (list_empty(&video->dmaqueue)) { 574 enum isp_pipeline_state state; 575 576 spin_unlock_irqrestore(&video->irqlock, flags); 577 578 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) 579 state = ISP_PIPELINE_QUEUE_OUTPUT 580 | ISP_PIPELINE_STREAM; 581 else 582 state = ISP_PIPELINE_QUEUE_INPUT 583 | ISP_PIPELINE_STREAM; 584 585 spin_lock_irqsave(&pipe->lock, flags); 586 pipe->state &= ~state; 587 if (video->pipe.stream_state == ISP_PIPELINE_STREAM_CONTINUOUS) 588 video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_UNDERRUN; 589 spin_unlock_irqrestore(&pipe->lock, flags); 590 return NULL; 591 } 592 593 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && pipe->input != NULL) { 594 spin_lock(&pipe->lock); 595 pipe->state &= ~ISP_PIPELINE_STREAM; 596 spin_unlock(&pipe->lock); 597 } 598 599 buf = list_first_entry(&video->dmaqueue, struct isp_buffer, 600 irqlist); 601 602 spin_unlock_irqrestore(&video->irqlock, flags); 603 604 return buf; 605 } 606 607 /* 608 * omap3isp_video_cancel_stream - Cancel stream on a video node 609 * @video: ISP video object 610 * 611 * Cancelling a stream returns all buffers queued on the video node to videobuf2 612 * in the erroneous state and makes sure no new buffer can be queued. 613 */ 614 void omap3isp_video_cancel_stream(struct isp_video *video) 615 { 616 unsigned long flags; 617 618 spin_lock_irqsave(&video->irqlock, flags); 619 omap3isp_video_return_buffers(video, VB2_BUF_STATE_ERROR); 620 video->error = true; 621 spin_unlock_irqrestore(&video->irqlock, flags); 622 } 623 624 /* 625 * omap3isp_video_resume - Perform resume operation on the buffers 626 * @video: ISP video object 627 * @continuous: Pipeline is in single shot mode if 0 or continuous mode otherwise 628 * 629 * This function is intended to be used on suspend/resume scenario. It 630 * requests video queue layer to discard buffers marked as DONE if it's in 631 * continuous mode and requests ISP modules to queue again the ACTIVE buffer 632 * if there's any. 633 */ 634 void omap3isp_video_resume(struct isp_video *video, int continuous) 635 { 636 struct isp_buffer *buf = NULL; 637 638 if (continuous && video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) { 639 mutex_lock(&video->queue_lock); 640 vb2_discard_done(video->queue); 641 mutex_unlock(&video->queue_lock); 642 } 643 644 if (!list_empty(&video->dmaqueue)) { 645 buf = list_first_entry(&video->dmaqueue, 646 struct isp_buffer, irqlist); 647 video->ops->queue(video, buf); 648 video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_QUEUED; 649 } else { 650 if (continuous) 651 video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_UNDERRUN; 652 } 653 } 654 655 /* ----------------------------------------------------------------------------- 656 * V4L2 ioctls 657 */ 658 659 static int 660 isp_video_querycap(struct file *file, void *fh, struct v4l2_capability *cap) 661 { 662 struct isp_video *video = video_drvdata(file); 663 664 strscpy(cap->driver, ISP_VIDEO_DRIVER_NAME, sizeof(cap->driver)); 665 strscpy(cap->card, video->video.name, sizeof(cap->card)); 666 strscpy(cap->bus_info, "media", sizeof(cap->bus_info)); 667 668 cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT 669 | V4L2_CAP_STREAMING | V4L2_CAP_DEVICE_CAPS; 670 671 672 return 0; 673 } 674 675 static int 676 isp_video_get_format(struct file *file, void *fh, struct v4l2_format *format) 677 { 678 struct isp_video_fh *vfh = to_isp_video_fh(fh); 679 struct isp_video *video = video_drvdata(file); 680 681 if (format->type != video->type) 682 return -EINVAL; 683 684 mutex_lock(&video->mutex); 685 *format = vfh->format; 686 mutex_unlock(&video->mutex); 687 688 return 0; 689 } 690 691 static int 692 isp_video_set_format(struct file *file, void *fh, struct v4l2_format *format) 693 { 694 struct isp_video_fh *vfh = to_isp_video_fh(fh); 695 struct isp_video *video = video_drvdata(file); 696 struct v4l2_mbus_framefmt fmt; 697 698 if (format->type != video->type) 699 return -EINVAL; 700 701 /* Replace unsupported field orders with sane defaults. */ 702 switch (format->fmt.pix.field) { 703 case V4L2_FIELD_NONE: 704 /* Progressive is supported everywhere. */ 705 break; 706 case V4L2_FIELD_ALTERNATE: 707 /* ALTERNATE is not supported on output nodes. */ 708 if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) 709 format->fmt.pix.field = V4L2_FIELD_NONE; 710 break; 711 case V4L2_FIELD_INTERLACED: 712 /* The ISP has no concept of video standard, select the 713 * top-bottom order when the unqualified interlaced order is 714 * requested. 715 */ 716 format->fmt.pix.field = V4L2_FIELD_INTERLACED_TB; 717 fallthrough; 718 case V4L2_FIELD_INTERLACED_TB: 719 case V4L2_FIELD_INTERLACED_BT: 720 /* Interlaced orders are only supported at the CCDC output. */ 721 if (video != &video->isp->isp_ccdc.video_out) 722 format->fmt.pix.field = V4L2_FIELD_NONE; 723 break; 724 case V4L2_FIELD_TOP: 725 case V4L2_FIELD_BOTTOM: 726 case V4L2_FIELD_SEQ_TB: 727 case V4L2_FIELD_SEQ_BT: 728 default: 729 /* All other field orders are currently unsupported, default to 730 * progressive. 731 */ 732 format->fmt.pix.field = V4L2_FIELD_NONE; 733 break; 734 } 735 736 /* Fill the bytesperline and sizeimage fields by converting to media bus 737 * format and back to pixel format. 738 */ 739 isp_video_pix_to_mbus(&format->fmt.pix, &fmt); 740 isp_video_mbus_to_pix(video, &fmt, &format->fmt.pix); 741 742 mutex_lock(&video->mutex); 743 vfh->format = *format; 744 mutex_unlock(&video->mutex); 745 746 return 0; 747 } 748 749 static int 750 isp_video_try_format(struct file *file, void *fh, struct v4l2_format *format) 751 { 752 struct isp_video *video = video_drvdata(file); 753 struct v4l2_subdev_format fmt = { 754 .which = V4L2_SUBDEV_FORMAT_ACTIVE, 755 }; 756 struct v4l2_subdev *subdev; 757 u32 pad; 758 int ret; 759 760 if (format->type != video->type) 761 return -EINVAL; 762 763 subdev = isp_video_remote_subdev(video, &pad); 764 if (subdev == NULL) 765 return -EINVAL; 766 767 isp_video_pix_to_mbus(&format->fmt.pix, &fmt.format); 768 769 fmt.pad = pad; 770 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt); 771 if (ret) 772 return ret == -ENOIOCTLCMD ? -ENOTTY : ret; 773 774 isp_video_mbus_to_pix(video, &fmt.format, &format->fmt.pix); 775 return 0; 776 } 777 778 static int 779 isp_video_get_selection(struct file *file, void *fh, struct v4l2_selection *sel) 780 { 781 struct isp_video *video = video_drvdata(file); 782 struct v4l2_subdev_format format = { 783 .which = V4L2_SUBDEV_FORMAT_ACTIVE, 784 }; 785 struct v4l2_subdev *subdev; 786 struct v4l2_subdev_selection sdsel = { 787 .which = V4L2_SUBDEV_FORMAT_ACTIVE, 788 .target = sel->target, 789 }; 790 u32 pad; 791 int ret; 792 793 switch (sel->target) { 794 case V4L2_SEL_TGT_CROP: 795 case V4L2_SEL_TGT_CROP_BOUNDS: 796 case V4L2_SEL_TGT_CROP_DEFAULT: 797 if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) 798 return -EINVAL; 799 break; 800 case V4L2_SEL_TGT_COMPOSE: 801 case V4L2_SEL_TGT_COMPOSE_BOUNDS: 802 case V4L2_SEL_TGT_COMPOSE_DEFAULT: 803 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) 804 return -EINVAL; 805 break; 806 default: 807 return -EINVAL; 808 } 809 subdev = isp_video_remote_subdev(video, &pad); 810 if (subdev == NULL) 811 return -EINVAL; 812 813 /* Try the get selection operation first and fallback to get format if not 814 * implemented. 815 */ 816 sdsel.pad = pad; 817 ret = v4l2_subdev_call(subdev, pad, get_selection, NULL, &sdsel); 818 if (!ret) 819 sel->r = sdsel.r; 820 if (ret != -ENOIOCTLCMD) 821 return ret; 822 823 format.pad = pad; 824 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &format); 825 if (ret < 0) 826 return ret == -ENOIOCTLCMD ? -ENOTTY : ret; 827 828 sel->r.left = 0; 829 sel->r.top = 0; 830 sel->r.width = format.format.width; 831 sel->r.height = format.format.height; 832 833 return 0; 834 } 835 836 static int 837 isp_video_set_selection(struct file *file, void *fh, struct v4l2_selection *sel) 838 { 839 struct isp_video *video = video_drvdata(file); 840 struct v4l2_subdev *subdev; 841 struct v4l2_subdev_selection sdsel = { 842 .which = V4L2_SUBDEV_FORMAT_ACTIVE, 843 .target = sel->target, 844 .flags = sel->flags, 845 .r = sel->r, 846 }; 847 u32 pad; 848 int ret; 849 850 switch (sel->target) { 851 case V4L2_SEL_TGT_CROP: 852 if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) 853 return -EINVAL; 854 break; 855 case V4L2_SEL_TGT_COMPOSE: 856 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) 857 return -EINVAL; 858 break; 859 default: 860 return -EINVAL; 861 } 862 subdev = isp_video_remote_subdev(video, &pad); 863 if (subdev == NULL) 864 return -EINVAL; 865 866 sdsel.pad = pad; 867 mutex_lock(&video->mutex); 868 ret = v4l2_subdev_call(subdev, pad, set_selection, NULL, &sdsel); 869 mutex_unlock(&video->mutex); 870 if (!ret) 871 sel->r = sdsel.r; 872 873 return ret == -ENOIOCTLCMD ? -ENOTTY : ret; 874 } 875 876 static int 877 isp_video_get_param(struct file *file, void *fh, struct v4l2_streamparm *a) 878 { 879 struct isp_video_fh *vfh = to_isp_video_fh(fh); 880 struct isp_video *video = video_drvdata(file); 881 882 if (video->type != V4L2_BUF_TYPE_VIDEO_OUTPUT || 883 video->type != a->type) 884 return -EINVAL; 885 886 memset(a, 0, sizeof(*a)); 887 a->type = V4L2_BUF_TYPE_VIDEO_OUTPUT; 888 a->parm.output.capability = V4L2_CAP_TIMEPERFRAME; 889 a->parm.output.timeperframe = vfh->timeperframe; 890 891 return 0; 892 } 893 894 static int 895 isp_video_set_param(struct file *file, void *fh, struct v4l2_streamparm *a) 896 { 897 struct isp_video_fh *vfh = to_isp_video_fh(fh); 898 struct isp_video *video = video_drvdata(file); 899 900 if (video->type != V4L2_BUF_TYPE_VIDEO_OUTPUT || 901 video->type != a->type) 902 return -EINVAL; 903 904 if (a->parm.output.timeperframe.denominator == 0) 905 a->parm.output.timeperframe.denominator = 1; 906 907 vfh->timeperframe = a->parm.output.timeperframe; 908 909 return 0; 910 } 911 912 static int 913 isp_video_reqbufs(struct file *file, void *fh, struct v4l2_requestbuffers *rb) 914 { 915 struct isp_video_fh *vfh = to_isp_video_fh(fh); 916 struct isp_video *video = video_drvdata(file); 917 int ret; 918 919 mutex_lock(&video->queue_lock); 920 ret = vb2_reqbufs(&vfh->queue, rb); 921 mutex_unlock(&video->queue_lock); 922 923 return ret; 924 } 925 926 static int 927 isp_video_querybuf(struct file *file, void *fh, struct v4l2_buffer *b) 928 { 929 struct isp_video_fh *vfh = to_isp_video_fh(fh); 930 struct isp_video *video = video_drvdata(file); 931 int ret; 932 933 mutex_lock(&video->queue_lock); 934 ret = vb2_querybuf(&vfh->queue, b); 935 mutex_unlock(&video->queue_lock); 936 937 return ret; 938 } 939 940 static int 941 isp_video_qbuf(struct file *file, void *fh, struct v4l2_buffer *b) 942 { 943 struct isp_video_fh *vfh = to_isp_video_fh(fh); 944 struct isp_video *video = video_drvdata(file); 945 int ret; 946 947 mutex_lock(&video->queue_lock); 948 ret = vb2_qbuf(&vfh->queue, video->video.v4l2_dev->mdev, b); 949 mutex_unlock(&video->queue_lock); 950 951 return ret; 952 } 953 954 static int 955 isp_video_dqbuf(struct file *file, void *fh, struct v4l2_buffer *b) 956 { 957 struct isp_video_fh *vfh = to_isp_video_fh(fh); 958 struct isp_video *video = video_drvdata(file); 959 int ret; 960 961 mutex_lock(&video->queue_lock); 962 ret = vb2_dqbuf(&vfh->queue, b, file->f_flags & O_NONBLOCK); 963 mutex_unlock(&video->queue_lock); 964 965 return ret; 966 } 967 968 static int isp_video_check_external_subdevs(struct isp_video *video, 969 struct isp_pipeline *pipe) 970 { 971 struct isp_device *isp = video->isp; 972 struct media_entity *ents[] = { 973 &isp->isp_csi2a.subdev.entity, 974 &isp->isp_csi2c.subdev.entity, 975 &isp->isp_ccp2.subdev.entity, 976 &isp->isp_ccdc.subdev.entity 977 }; 978 struct media_pad *source_pad; 979 struct media_entity *source = NULL; 980 struct media_entity *sink; 981 struct v4l2_subdev_format fmt = { 982 .which = V4L2_SUBDEV_FORMAT_ACTIVE, 983 }; 984 struct v4l2_ext_controls ctrls; 985 struct v4l2_ext_control ctrl; 986 unsigned int i; 987 int ret; 988 989 /* Memory-to-memory pipelines have no external subdev. */ 990 if (pipe->input != NULL) 991 return 0; 992 993 for (i = 0; i < ARRAY_SIZE(ents); i++) { 994 /* Is the entity part of the pipeline? */ 995 if (!media_entity_enum_test(&pipe->ent_enum, ents[i])) 996 continue; 997 998 /* ISP entities have always sink pad == 0. Find source. */ 999 source_pad = media_pad_remote_pad_first(&ents[i]->pads[0]); 1000 if (source_pad == NULL) 1001 continue; 1002 1003 source = source_pad->entity; 1004 sink = ents[i]; 1005 break; 1006 } 1007 1008 if (!source) { 1009 dev_warn(isp->dev, "can't find source, failing now\n"); 1010 return -EINVAL; 1011 } 1012 1013 if (!is_media_entity_v4l2_subdev(source)) 1014 return 0; 1015 1016 pipe->external = media_entity_to_v4l2_subdev(source); 1017 1018 fmt.pad = source_pad->index; 1019 ret = v4l2_subdev_call(media_entity_to_v4l2_subdev(sink), 1020 pad, get_fmt, NULL, &fmt); 1021 if (unlikely(ret < 0)) { 1022 dev_warn(isp->dev, "get_fmt returned null!\n"); 1023 return ret; 1024 } 1025 1026 pipe->external_width = 1027 omap3isp_video_format_info(fmt.format.code)->width; 1028 1029 memset(&ctrls, 0, sizeof(ctrls)); 1030 memset(&ctrl, 0, sizeof(ctrl)); 1031 1032 ctrl.id = V4L2_CID_PIXEL_RATE; 1033 1034 ctrls.count = 1; 1035 ctrls.controls = &ctrl; 1036 ret = v4l2_g_ext_ctrls(pipe->external->ctrl_handler, &video->video, 1037 NULL, &ctrls); 1038 if (ret < 0) { 1039 dev_warn(isp->dev, "no pixel rate control in subdev %s\n", 1040 pipe->external->name); 1041 return ret; 1042 } 1043 1044 pipe->external_rate = ctrl.value64; 1045 1046 if (media_entity_enum_test(&pipe->ent_enum, 1047 &isp->isp_ccdc.subdev.entity)) { 1048 unsigned int rate = UINT_MAX; 1049 /* 1050 * Check that maximum allowed CCDC pixel rate isn't 1051 * exceeded by the pixel rate. 1052 */ 1053 omap3isp_ccdc_max_rate(&isp->isp_ccdc, &rate); 1054 if (pipe->external_rate > rate) 1055 return -ENOSPC; 1056 } 1057 1058 return 0; 1059 } 1060 1061 /* 1062 * Stream management 1063 * 1064 * Every ISP pipeline has a single input and a single output. The input can be 1065 * either a sensor or a video node. The output is always a video node. 1066 * 1067 * As every pipeline has an output video node, the ISP video objects at the 1068 * pipeline output stores the pipeline state. It tracks the streaming state of 1069 * both the input and output, as well as the availability of buffers. 1070 * 1071 * In sensor-to-memory mode, frames are always available at the pipeline input. 1072 * Starting the sensor usually requires I2C transfers and must be done in 1073 * interruptible context. The pipeline is started and stopped synchronously 1074 * to the stream on/off commands. All modules in the pipeline will get their 1075 * subdev set stream handler called. The module at the end of the pipeline must 1076 * delay starting the hardware until buffers are available at its output. 1077 * 1078 * In memory-to-memory mode, starting/stopping the stream requires 1079 * synchronization between the input and output. ISP modules can't be stopped 1080 * in the middle of a frame, and at least some of the modules seem to become 1081 * busy as soon as they're started, even if they don't receive a frame start 1082 * event. For that reason frames need to be processed in single-shot mode. The 1083 * driver needs to wait until a frame is completely processed and written to 1084 * memory before restarting the pipeline for the next frame. Pipelined 1085 * processing might be possible but requires more testing. 1086 * 1087 * Stream start must be delayed until buffers are available at both the input 1088 * and output. The pipeline must be started in the vb2 queue callback with 1089 * the buffers queue spinlock held. The modules subdev set stream operation must 1090 * not sleep. 1091 */ 1092 static int 1093 isp_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type) 1094 { 1095 struct isp_video_fh *vfh = to_isp_video_fh(fh); 1096 struct isp_video *video = video_drvdata(file); 1097 enum isp_pipeline_state state; 1098 struct isp_pipeline *pipe; 1099 unsigned long flags; 1100 int ret; 1101 1102 if (type != video->type) 1103 return -EINVAL; 1104 1105 mutex_lock(&video->stream_lock); 1106 1107 /* Start streaming on the pipeline. No link touching an entity in the 1108 * pipeline can be activated or deactivated once streaming is started. 1109 */ 1110 pipe = to_isp_pipeline(&video->video.entity) ? : &video->pipe; 1111 1112 ret = media_entity_enum_init(&pipe->ent_enum, &video->isp->media_dev); 1113 if (ret) 1114 goto err_enum_init; 1115 1116 /* TODO: Implement PM QoS */ 1117 pipe->l3_ick = clk_get_rate(video->isp->clock[ISP_CLK_L3_ICK]); 1118 pipe->max_rate = pipe->l3_ick; 1119 1120 ret = video_device_pipeline_start(&video->video, &pipe->pipe); 1121 if (ret < 0) 1122 goto err_pipeline_start; 1123 1124 /* Verify that the currently configured format matches the output of 1125 * the connected subdev. 1126 */ 1127 ret = isp_video_check_format(video, vfh); 1128 if (ret < 0) 1129 goto err_check_format; 1130 1131 video->bpl_padding = ret; 1132 video->bpl_value = vfh->format.fmt.pix.bytesperline; 1133 1134 ret = isp_video_get_graph_data(video, pipe); 1135 if (ret < 0) 1136 goto err_check_format; 1137 1138 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) 1139 state = ISP_PIPELINE_STREAM_OUTPUT | ISP_PIPELINE_IDLE_OUTPUT; 1140 else 1141 state = ISP_PIPELINE_STREAM_INPUT | ISP_PIPELINE_IDLE_INPUT; 1142 1143 ret = isp_video_check_external_subdevs(video, pipe); 1144 if (ret < 0) 1145 goto err_check_format; 1146 1147 pipe->error = false; 1148 1149 spin_lock_irqsave(&pipe->lock, flags); 1150 pipe->state &= ~ISP_PIPELINE_STREAM; 1151 pipe->state |= state; 1152 spin_unlock_irqrestore(&pipe->lock, flags); 1153 1154 /* Set the maximum time per frame as the value requested by userspace. 1155 * This is a soft limit that can be overridden if the hardware doesn't 1156 * support the request limit. 1157 */ 1158 if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) 1159 pipe->max_timeperframe = vfh->timeperframe; 1160 1161 video->queue = &vfh->queue; 1162 INIT_LIST_HEAD(&video->dmaqueue); 1163 atomic_set(&pipe->frame_number, -1); 1164 pipe->field = vfh->format.fmt.pix.field; 1165 1166 mutex_lock(&video->queue_lock); 1167 ret = vb2_streamon(&vfh->queue, type); 1168 mutex_unlock(&video->queue_lock); 1169 if (ret < 0) 1170 goto err_check_format; 1171 1172 mutex_unlock(&video->stream_lock); 1173 1174 return 0; 1175 1176 err_check_format: 1177 video_device_pipeline_stop(&video->video); 1178 err_pipeline_start: 1179 /* TODO: Implement PM QoS */ 1180 /* The DMA queue must be emptied here, otherwise CCDC interrupts that 1181 * will get triggered the next time the CCDC is powered up will try to 1182 * access buffers that might have been freed but still present in the 1183 * DMA queue. This can easily get triggered if the above 1184 * omap3isp_pipeline_set_stream() call fails on a system with a 1185 * free-running sensor. 1186 */ 1187 INIT_LIST_HEAD(&video->dmaqueue); 1188 video->queue = NULL; 1189 1190 media_entity_enum_cleanup(&pipe->ent_enum); 1191 1192 err_enum_init: 1193 mutex_unlock(&video->stream_lock); 1194 1195 return ret; 1196 } 1197 1198 static int 1199 isp_video_streamoff(struct file *file, void *fh, enum v4l2_buf_type type) 1200 { 1201 struct isp_video_fh *vfh = to_isp_video_fh(fh); 1202 struct isp_video *video = video_drvdata(file); 1203 struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity); 1204 enum isp_pipeline_state state; 1205 unsigned int streaming; 1206 unsigned long flags; 1207 1208 if (type != video->type) 1209 return -EINVAL; 1210 1211 mutex_lock(&video->stream_lock); 1212 1213 /* Make sure we're not streaming yet. */ 1214 mutex_lock(&video->queue_lock); 1215 streaming = vb2_is_streaming(&vfh->queue); 1216 mutex_unlock(&video->queue_lock); 1217 1218 if (!streaming) 1219 goto done; 1220 1221 /* Update the pipeline state. */ 1222 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) 1223 state = ISP_PIPELINE_STREAM_OUTPUT 1224 | ISP_PIPELINE_QUEUE_OUTPUT; 1225 else 1226 state = ISP_PIPELINE_STREAM_INPUT 1227 | ISP_PIPELINE_QUEUE_INPUT; 1228 1229 spin_lock_irqsave(&pipe->lock, flags); 1230 pipe->state &= ~state; 1231 spin_unlock_irqrestore(&pipe->lock, flags); 1232 1233 /* Stop the stream. */ 1234 omap3isp_pipeline_set_stream(pipe, ISP_PIPELINE_STREAM_STOPPED); 1235 omap3isp_video_cancel_stream(video); 1236 1237 mutex_lock(&video->queue_lock); 1238 vb2_streamoff(&vfh->queue, type); 1239 mutex_unlock(&video->queue_lock); 1240 video->queue = NULL; 1241 video->error = false; 1242 1243 /* TODO: Implement PM QoS */ 1244 video_device_pipeline_stop(&video->video); 1245 1246 media_entity_enum_cleanup(&pipe->ent_enum); 1247 1248 done: 1249 mutex_unlock(&video->stream_lock); 1250 return 0; 1251 } 1252 1253 static int 1254 isp_video_enum_input(struct file *file, void *fh, struct v4l2_input *input) 1255 { 1256 if (input->index > 0) 1257 return -EINVAL; 1258 1259 strscpy(input->name, "camera", sizeof(input->name)); 1260 input->type = V4L2_INPUT_TYPE_CAMERA; 1261 1262 return 0; 1263 } 1264 1265 static int 1266 isp_video_g_input(struct file *file, void *fh, unsigned int *input) 1267 { 1268 *input = 0; 1269 1270 return 0; 1271 } 1272 1273 static int 1274 isp_video_s_input(struct file *file, void *fh, unsigned int input) 1275 { 1276 return input == 0 ? 0 : -EINVAL; 1277 } 1278 1279 static const struct v4l2_ioctl_ops isp_video_ioctl_ops = { 1280 .vidioc_querycap = isp_video_querycap, 1281 .vidioc_g_fmt_vid_cap = isp_video_get_format, 1282 .vidioc_s_fmt_vid_cap = isp_video_set_format, 1283 .vidioc_try_fmt_vid_cap = isp_video_try_format, 1284 .vidioc_g_fmt_vid_out = isp_video_get_format, 1285 .vidioc_s_fmt_vid_out = isp_video_set_format, 1286 .vidioc_try_fmt_vid_out = isp_video_try_format, 1287 .vidioc_g_selection = isp_video_get_selection, 1288 .vidioc_s_selection = isp_video_set_selection, 1289 .vidioc_g_parm = isp_video_get_param, 1290 .vidioc_s_parm = isp_video_set_param, 1291 .vidioc_reqbufs = isp_video_reqbufs, 1292 .vidioc_querybuf = isp_video_querybuf, 1293 .vidioc_qbuf = isp_video_qbuf, 1294 .vidioc_dqbuf = isp_video_dqbuf, 1295 .vidioc_streamon = isp_video_streamon, 1296 .vidioc_streamoff = isp_video_streamoff, 1297 .vidioc_enum_input = isp_video_enum_input, 1298 .vidioc_g_input = isp_video_g_input, 1299 .vidioc_s_input = isp_video_s_input, 1300 }; 1301 1302 /* ----------------------------------------------------------------------------- 1303 * V4L2 file operations 1304 */ 1305 1306 static int isp_video_open(struct file *file) 1307 { 1308 struct isp_video *video = video_drvdata(file); 1309 struct isp_video_fh *handle; 1310 struct vb2_queue *queue; 1311 int ret = 0; 1312 1313 handle = kzalloc(sizeof(*handle), GFP_KERNEL); 1314 if (handle == NULL) 1315 return -ENOMEM; 1316 1317 v4l2_fh_init(&handle->vfh, &video->video); 1318 v4l2_fh_add(&handle->vfh); 1319 1320 /* If this is the first user, initialise the pipeline. */ 1321 if (omap3isp_get(video->isp) == NULL) { 1322 ret = -EBUSY; 1323 goto done; 1324 } 1325 1326 ret = v4l2_pipeline_pm_get(&video->video.entity); 1327 if (ret < 0) { 1328 omap3isp_put(video->isp); 1329 goto done; 1330 } 1331 1332 queue = &handle->queue; 1333 queue->type = video->type; 1334 queue->io_modes = VB2_MMAP | VB2_USERPTR; 1335 queue->drv_priv = handle; 1336 queue->ops = &isp_video_queue_ops; 1337 queue->mem_ops = &vb2_dma_contig_memops; 1338 queue->buf_struct_size = sizeof(struct isp_buffer); 1339 queue->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; 1340 queue->dev = video->isp->dev; 1341 1342 ret = vb2_queue_init(&handle->queue); 1343 if (ret < 0) { 1344 omap3isp_put(video->isp); 1345 goto done; 1346 } 1347 1348 memset(&handle->format, 0, sizeof(handle->format)); 1349 handle->format.type = video->type; 1350 handle->timeperframe.denominator = 1; 1351 1352 handle->video = video; 1353 file->private_data = &handle->vfh; 1354 1355 done: 1356 if (ret < 0) { 1357 v4l2_fh_del(&handle->vfh); 1358 v4l2_fh_exit(&handle->vfh); 1359 kfree(handle); 1360 } 1361 1362 return ret; 1363 } 1364 1365 static int isp_video_release(struct file *file) 1366 { 1367 struct isp_video *video = video_drvdata(file); 1368 struct v4l2_fh *vfh = file->private_data; 1369 struct isp_video_fh *handle = to_isp_video_fh(vfh); 1370 1371 /* Disable streaming and free the buffers queue resources. */ 1372 isp_video_streamoff(file, vfh, video->type); 1373 1374 mutex_lock(&video->queue_lock); 1375 vb2_queue_release(&handle->queue); 1376 mutex_unlock(&video->queue_lock); 1377 1378 v4l2_pipeline_pm_put(&video->video.entity); 1379 1380 /* Release the file handle. */ 1381 v4l2_fh_del(vfh); 1382 v4l2_fh_exit(vfh); 1383 kfree(handle); 1384 file->private_data = NULL; 1385 1386 omap3isp_put(video->isp); 1387 1388 return 0; 1389 } 1390 1391 static __poll_t isp_video_poll(struct file *file, poll_table *wait) 1392 { 1393 struct isp_video_fh *vfh = to_isp_video_fh(file->private_data); 1394 struct isp_video *video = video_drvdata(file); 1395 __poll_t ret; 1396 1397 mutex_lock(&video->queue_lock); 1398 ret = vb2_poll(&vfh->queue, file, wait); 1399 mutex_unlock(&video->queue_lock); 1400 1401 return ret; 1402 } 1403 1404 static int isp_video_mmap(struct file *file, struct vm_area_struct *vma) 1405 { 1406 struct isp_video_fh *vfh = to_isp_video_fh(file->private_data); 1407 1408 return vb2_mmap(&vfh->queue, vma); 1409 } 1410 1411 static const struct v4l2_file_operations isp_video_fops = { 1412 .owner = THIS_MODULE, 1413 .unlocked_ioctl = video_ioctl2, 1414 .open = isp_video_open, 1415 .release = isp_video_release, 1416 .poll = isp_video_poll, 1417 .mmap = isp_video_mmap, 1418 }; 1419 1420 /* ----------------------------------------------------------------------------- 1421 * ISP video core 1422 */ 1423 1424 static const struct isp_video_operations isp_video_dummy_ops = { 1425 }; 1426 1427 int omap3isp_video_init(struct isp_video *video, const char *name) 1428 { 1429 const char *direction; 1430 int ret; 1431 1432 switch (video->type) { 1433 case V4L2_BUF_TYPE_VIDEO_CAPTURE: 1434 direction = "output"; 1435 video->pad.flags = MEDIA_PAD_FL_SINK 1436 | MEDIA_PAD_FL_MUST_CONNECT; 1437 break; 1438 case V4L2_BUF_TYPE_VIDEO_OUTPUT: 1439 direction = "input"; 1440 video->pad.flags = MEDIA_PAD_FL_SOURCE 1441 | MEDIA_PAD_FL_MUST_CONNECT; 1442 video->video.vfl_dir = VFL_DIR_TX; 1443 break; 1444 1445 default: 1446 return -EINVAL; 1447 } 1448 1449 ret = media_entity_pads_init(&video->video.entity, 1, &video->pad); 1450 if (ret < 0) 1451 return ret; 1452 1453 mutex_init(&video->mutex); 1454 atomic_set(&video->active, 0); 1455 1456 spin_lock_init(&video->pipe.lock); 1457 mutex_init(&video->stream_lock); 1458 mutex_init(&video->queue_lock); 1459 spin_lock_init(&video->irqlock); 1460 1461 /* Initialize the video device. */ 1462 if (video->ops == NULL) 1463 video->ops = &isp_video_dummy_ops; 1464 1465 video->video.fops = &isp_video_fops; 1466 snprintf(video->video.name, sizeof(video->video.name), 1467 "OMAP3 ISP %s %s", name, direction); 1468 video->video.vfl_type = VFL_TYPE_VIDEO; 1469 video->video.release = video_device_release_empty; 1470 video->video.ioctl_ops = &isp_video_ioctl_ops; 1471 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) 1472 video->video.device_caps = V4L2_CAP_VIDEO_CAPTURE 1473 | V4L2_CAP_STREAMING; 1474 else 1475 video->video.device_caps = V4L2_CAP_VIDEO_OUTPUT 1476 | V4L2_CAP_STREAMING; 1477 1478 video->pipe.stream_state = ISP_PIPELINE_STREAM_STOPPED; 1479 1480 video_set_drvdata(&video->video, video); 1481 1482 return 0; 1483 } 1484 1485 void omap3isp_video_cleanup(struct isp_video *video) 1486 { 1487 media_entity_cleanup(&video->video.entity); 1488 mutex_destroy(&video->queue_lock); 1489 mutex_destroy(&video->stream_lock); 1490 mutex_destroy(&video->mutex); 1491 } 1492 1493 int omap3isp_video_register(struct isp_video *video, struct v4l2_device *vdev) 1494 { 1495 int ret; 1496 1497 video->video.v4l2_dev = vdev; 1498 1499 ret = video_register_device(&video->video, VFL_TYPE_VIDEO, -1); 1500 if (ret < 0) 1501 dev_err(video->isp->dev, 1502 "%s: could not register video device (%d)\n", 1503 __func__, ret); 1504 1505 return ret; 1506 } 1507 1508 void omap3isp_video_unregister(struct isp_video *video) 1509 { 1510 video_unregister_device(&video->video); 1511 } 1512