1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * ispvideo.c 4 * 5 * TI OMAP3 ISP - Generic video node 6 * 7 * Copyright (C) 2009-2010 Nokia Corporation 8 * 9 * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com> 10 * Sakari Ailus <sakari.ailus@iki.fi> 11 */ 12 13 #include <linux/clk.h> 14 #include <linux/mm.h> 15 #include <linux/module.h> 16 #include <linux/pagemap.h> 17 #include <linux/scatterlist.h> 18 #include <linux/sched.h> 19 #include <linux/slab.h> 20 #include <linux/vmalloc.h> 21 22 #include <media/v4l2-dev.h> 23 #include <media/v4l2-ioctl.h> 24 #include <media/v4l2-mc.h> 25 #include <media/videobuf2-dma-contig.h> 26 27 #include "ispvideo.h" 28 #include "isp.h" 29 30 31 /* ----------------------------------------------------------------------------- 32 * Helper functions 33 */ 34 35 /* 36 * NOTE: When adding new media bus codes, always remember to add 37 * corresponding in-memory formats to the table below!!! 38 * 39 * If there are multiple entries with the same pixelformat but 40 * different media bus codes, then keep those together. Otherwise 41 * isp_video_enum_format() cannot detect duplicate pixelformats. 42 */ 43 static struct isp_format_info formats[] = { 44 { MEDIA_BUS_FMT_Y8_1X8, MEDIA_BUS_FMT_Y8_1X8, 45 MEDIA_BUS_FMT_Y8_1X8, MEDIA_BUS_FMT_Y8_1X8, 46 V4L2_PIX_FMT_GREY, 8, 1, }, 47 { MEDIA_BUS_FMT_Y10_1X10, MEDIA_BUS_FMT_Y10_1X10, 48 MEDIA_BUS_FMT_Y10_1X10, MEDIA_BUS_FMT_Y8_1X8, 49 V4L2_PIX_FMT_Y10, 10, 2, }, 50 { MEDIA_BUS_FMT_Y12_1X12, MEDIA_BUS_FMT_Y10_1X10, 51 MEDIA_BUS_FMT_Y12_1X12, MEDIA_BUS_FMT_Y8_1X8, 52 V4L2_PIX_FMT_Y12, 12, 2, }, 53 { MEDIA_BUS_FMT_SBGGR8_1X8, MEDIA_BUS_FMT_SBGGR8_1X8, 54 MEDIA_BUS_FMT_SBGGR8_1X8, MEDIA_BUS_FMT_SBGGR8_1X8, 55 V4L2_PIX_FMT_SBGGR8, 8, 1, }, 56 { MEDIA_BUS_FMT_SGBRG8_1X8, MEDIA_BUS_FMT_SGBRG8_1X8, 57 MEDIA_BUS_FMT_SGBRG8_1X8, MEDIA_BUS_FMT_SGBRG8_1X8, 58 V4L2_PIX_FMT_SGBRG8, 8, 1, }, 59 { MEDIA_BUS_FMT_SGRBG8_1X8, MEDIA_BUS_FMT_SGRBG8_1X8, 60 MEDIA_BUS_FMT_SGRBG8_1X8, MEDIA_BUS_FMT_SGRBG8_1X8, 61 V4L2_PIX_FMT_SGRBG8, 8, 1, }, 62 { MEDIA_BUS_FMT_SRGGB8_1X8, MEDIA_BUS_FMT_SRGGB8_1X8, 63 MEDIA_BUS_FMT_SRGGB8_1X8, MEDIA_BUS_FMT_SRGGB8_1X8, 64 V4L2_PIX_FMT_SRGGB8, 8, 1, }, 65 { MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8, MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8, 66 MEDIA_BUS_FMT_SBGGR10_1X10, 0, 67 V4L2_PIX_FMT_SBGGR10DPCM8, 8, 1, }, 68 { MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8, MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8, 69 MEDIA_BUS_FMT_SGBRG10_1X10, 0, 70 V4L2_PIX_FMT_SGBRG10DPCM8, 8, 1, }, 71 { MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8, MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8, 72 MEDIA_BUS_FMT_SGRBG10_1X10, 0, 73 V4L2_PIX_FMT_SGRBG10DPCM8, 8, 1, }, 74 { MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8, MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8, 75 MEDIA_BUS_FMT_SRGGB10_1X10, 0, 76 V4L2_PIX_FMT_SRGGB10DPCM8, 8, 1, }, 77 { MEDIA_BUS_FMT_SBGGR10_1X10, MEDIA_BUS_FMT_SBGGR10_1X10, 78 MEDIA_BUS_FMT_SBGGR10_1X10, MEDIA_BUS_FMT_SBGGR8_1X8, 79 V4L2_PIX_FMT_SBGGR10, 10, 2, }, 80 { MEDIA_BUS_FMT_SGBRG10_1X10, MEDIA_BUS_FMT_SGBRG10_1X10, 81 MEDIA_BUS_FMT_SGBRG10_1X10, MEDIA_BUS_FMT_SGBRG8_1X8, 82 V4L2_PIX_FMT_SGBRG10, 10, 2, }, 83 { MEDIA_BUS_FMT_SGRBG10_1X10, MEDIA_BUS_FMT_SGRBG10_1X10, 84 MEDIA_BUS_FMT_SGRBG10_1X10, MEDIA_BUS_FMT_SGRBG8_1X8, 85 V4L2_PIX_FMT_SGRBG10, 10, 2, }, 86 { MEDIA_BUS_FMT_SRGGB10_1X10, MEDIA_BUS_FMT_SRGGB10_1X10, 87 MEDIA_BUS_FMT_SRGGB10_1X10, MEDIA_BUS_FMT_SRGGB8_1X8, 88 V4L2_PIX_FMT_SRGGB10, 10, 2, }, 89 { MEDIA_BUS_FMT_SBGGR12_1X12, MEDIA_BUS_FMT_SBGGR10_1X10, 90 MEDIA_BUS_FMT_SBGGR12_1X12, MEDIA_BUS_FMT_SBGGR8_1X8, 91 V4L2_PIX_FMT_SBGGR12, 12, 2, }, 92 { MEDIA_BUS_FMT_SGBRG12_1X12, MEDIA_BUS_FMT_SGBRG10_1X10, 93 MEDIA_BUS_FMT_SGBRG12_1X12, MEDIA_BUS_FMT_SGBRG8_1X8, 94 V4L2_PIX_FMT_SGBRG12, 12, 2, }, 95 { MEDIA_BUS_FMT_SGRBG12_1X12, MEDIA_BUS_FMT_SGRBG10_1X10, 96 MEDIA_BUS_FMT_SGRBG12_1X12, MEDIA_BUS_FMT_SGRBG8_1X8, 97 V4L2_PIX_FMT_SGRBG12, 12, 2, }, 98 { MEDIA_BUS_FMT_SRGGB12_1X12, MEDIA_BUS_FMT_SRGGB10_1X10, 99 MEDIA_BUS_FMT_SRGGB12_1X12, MEDIA_BUS_FMT_SRGGB8_1X8, 100 V4L2_PIX_FMT_SRGGB12, 12, 2, }, 101 { MEDIA_BUS_FMT_UYVY8_1X16, MEDIA_BUS_FMT_UYVY8_1X16, 102 MEDIA_BUS_FMT_UYVY8_1X16, 0, 103 V4L2_PIX_FMT_UYVY, 16, 2, }, 104 { MEDIA_BUS_FMT_UYVY8_2X8, MEDIA_BUS_FMT_UYVY8_2X8, 105 MEDIA_BUS_FMT_UYVY8_2X8, 0, 106 V4L2_PIX_FMT_UYVY, 8, 2, }, 107 { MEDIA_BUS_FMT_YUYV8_1X16, MEDIA_BUS_FMT_YUYV8_1X16, 108 MEDIA_BUS_FMT_YUYV8_1X16, 0, 109 V4L2_PIX_FMT_YUYV, 16, 2, }, 110 { MEDIA_BUS_FMT_YUYV8_2X8, MEDIA_BUS_FMT_YUYV8_2X8, 111 MEDIA_BUS_FMT_YUYV8_2X8, 0, 112 V4L2_PIX_FMT_YUYV, 8, 2, }, 113 /* Empty entry to catch the unsupported pixel code (0) used by the CCDC 114 * module and avoid NULL pointer dereferences. 115 */ 116 { 0, } 117 }; 118 119 const struct isp_format_info *omap3isp_video_format_info(u32 code) 120 { 121 unsigned int i; 122 123 for (i = 0; i < ARRAY_SIZE(formats); ++i) { 124 if (formats[i].code == code) 125 return &formats[i]; 126 } 127 128 return NULL; 129 } 130 131 /* 132 * isp_video_mbus_to_pix - Convert v4l2_mbus_framefmt to v4l2_pix_format 133 * @video: ISP video instance 134 * @mbus: v4l2_mbus_framefmt format (input) 135 * @pix: v4l2_pix_format format (output) 136 * 137 * Fill the output pix structure with information from the input mbus format. 138 * The bytesperline and sizeimage fields are computed from the requested bytes 139 * per line value in the pix format and information from the video instance. 140 * 141 * Return the number of padding bytes at end of line. 142 */ 143 static unsigned int isp_video_mbus_to_pix(const struct isp_video *video, 144 const struct v4l2_mbus_framefmt *mbus, 145 struct v4l2_pix_format *pix) 146 { 147 unsigned int bpl = pix->bytesperline; 148 unsigned int min_bpl; 149 unsigned int i; 150 151 memset(pix, 0, sizeof(*pix)); 152 pix->width = mbus->width; 153 pix->height = mbus->height; 154 155 for (i = 0; i < ARRAY_SIZE(formats) - 1; ++i) { 156 if (formats[i].code == mbus->code) 157 break; 158 } 159 160 if (WARN_ON(i == ARRAY_SIZE(formats) - 1)) 161 return 0; 162 163 min_bpl = pix->width * formats[i].bpp; 164 165 /* Clamp the requested bytes per line value. If the maximum bytes per 166 * line value is zero, the module doesn't support user configurable line 167 * sizes. Override the requested value with the minimum in that case. 168 */ 169 if (video->bpl_max) 170 bpl = clamp(bpl, min_bpl, video->bpl_max); 171 else 172 bpl = min_bpl; 173 174 if (!video->bpl_zero_padding || bpl != min_bpl) 175 bpl = ALIGN(bpl, video->bpl_alignment); 176 177 pix->pixelformat = formats[i].pixelformat; 178 pix->bytesperline = bpl; 179 pix->sizeimage = pix->bytesperline * pix->height; 180 pix->colorspace = mbus->colorspace; 181 pix->field = mbus->field; 182 183 return bpl - min_bpl; 184 } 185 186 static void isp_video_pix_to_mbus(const struct v4l2_pix_format *pix, 187 struct v4l2_mbus_framefmt *mbus) 188 { 189 unsigned int i; 190 191 memset(mbus, 0, sizeof(*mbus)); 192 mbus->width = pix->width; 193 mbus->height = pix->height; 194 195 /* Skip the last format in the loop so that it will be selected if no 196 * match is found. 197 */ 198 for (i = 0; i < ARRAY_SIZE(formats) - 2; ++i) { 199 if (formats[i].pixelformat == pix->pixelformat) 200 break; 201 } 202 203 mbus->code = formats[i].code; 204 mbus->colorspace = pix->colorspace; 205 mbus->field = pix->field; 206 } 207 208 static struct v4l2_subdev * 209 isp_video_remote_subdev(struct isp_video *video, u32 *pad) 210 { 211 struct media_pad *remote; 212 213 remote = media_pad_remote_pad_first(&video->pad); 214 215 if (!remote || !is_media_entity_v4l2_subdev(remote->entity)) 216 return NULL; 217 218 if (pad) 219 *pad = remote->index; 220 221 return media_entity_to_v4l2_subdev(remote->entity); 222 } 223 224 /* Return a pointer to the ISP video instance at the far end of the pipeline. */ 225 static int isp_video_get_graph_data(struct isp_video *video, 226 struct isp_pipeline *pipe) 227 { 228 struct media_pipeline_entity_iter iter; 229 struct media_entity *entity; 230 struct isp_video *far_end = NULL; 231 int ret; 232 233 ret = media_pipeline_entity_iter_init(&pipe->pipe, &iter); 234 if (ret) 235 return ret; 236 237 media_pipeline_for_each_entity(&pipe->pipe, &iter, entity) { 238 struct isp_video *__video; 239 240 media_entity_enum_set(&pipe->ent_enum, entity); 241 242 if (far_end != NULL) 243 continue; 244 245 if (entity == &video->video.entity) 246 continue; 247 248 if (!is_media_entity_v4l2_video_device(entity)) 249 continue; 250 251 __video = to_isp_video(media_entity_to_video_device(entity)); 252 if (__video->type != video->type) 253 far_end = __video; 254 } 255 256 media_pipeline_entity_iter_cleanup(&iter); 257 258 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) { 259 pipe->input = far_end; 260 pipe->output = video; 261 } else { 262 if (far_end == NULL) 263 return -EPIPE; 264 265 pipe->input = video; 266 pipe->output = far_end; 267 } 268 269 return 0; 270 } 271 272 static int 273 __isp_video_get_format(struct isp_video *video, struct v4l2_format *format) 274 { 275 struct v4l2_subdev_format fmt = { 276 .which = V4L2_SUBDEV_FORMAT_ACTIVE, 277 }; 278 struct v4l2_subdev *subdev; 279 u32 pad; 280 int ret; 281 282 subdev = isp_video_remote_subdev(video, &pad); 283 if (subdev == NULL) 284 return -EINVAL; 285 286 fmt.pad = pad; 287 288 mutex_lock(&video->mutex); 289 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt); 290 mutex_unlock(&video->mutex); 291 292 if (ret) 293 return ret; 294 295 format->type = video->type; 296 return isp_video_mbus_to_pix(video, &fmt.format, &format->fmt.pix); 297 } 298 299 static int 300 isp_video_check_format(struct isp_video *video, struct isp_video_fh *vfh) 301 { 302 struct v4l2_format format; 303 int ret; 304 305 memcpy(&format, &vfh->format, sizeof(format)); 306 ret = __isp_video_get_format(video, &format); 307 if (ret < 0) 308 return ret; 309 310 if (vfh->format.fmt.pix.pixelformat != format.fmt.pix.pixelformat || 311 vfh->format.fmt.pix.height != format.fmt.pix.height || 312 vfh->format.fmt.pix.width != format.fmt.pix.width || 313 vfh->format.fmt.pix.bytesperline != format.fmt.pix.bytesperline || 314 vfh->format.fmt.pix.sizeimage != format.fmt.pix.sizeimage || 315 vfh->format.fmt.pix.field != format.fmt.pix.field) 316 return -EINVAL; 317 318 return 0; 319 } 320 321 /* ----------------------------------------------------------------------------- 322 * Video queue operations 323 */ 324 325 static int isp_video_queue_setup(struct vb2_queue *queue, 326 unsigned int *count, unsigned int *num_planes, 327 unsigned int sizes[], struct device *alloc_devs[]) 328 { 329 struct isp_video_fh *vfh = vb2_get_drv_priv(queue); 330 struct isp_video *video = vfh->video; 331 332 if (*num_planes) { 333 if (*num_planes != 1) 334 return -EINVAL; 335 if (sizes[0] < vfh->format.fmt.pix.sizeimage) 336 return -EINVAL; 337 return 0; 338 } 339 *num_planes = 1; 340 341 sizes[0] = vfh->format.fmt.pix.sizeimage; 342 if (sizes[0] == 0) 343 return -EINVAL; 344 345 *count = min(*count, video->capture_mem / PAGE_ALIGN(sizes[0])); 346 347 return 0; 348 } 349 350 static int isp_video_buffer_prepare(struct vb2_buffer *buf) 351 { 352 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(buf); 353 struct isp_video_fh *vfh = vb2_get_drv_priv(buf->vb2_queue); 354 unsigned int size = vfh->format.fmt.pix.sizeimage; 355 struct isp_buffer *buffer = to_isp_buffer(vbuf); 356 struct isp_video *video = vfh->video; 357 dma_addr_t addr; 358 359 /* Refuse to prepare the buffer is the video node has registered an 360 * error. We don't need to take any lock here as the operation is 361 * inherently racy. The authoritative check will be performed in the 362 * queue handler, which can't return an error, this check is just a best 363 * effort to notify userspace as early as possible. 364 */ 365 if (unlikely(video->error)) 366 return -EIO; 367 368 addr = vb2_dma_contig_plane_dma_addr(buf, 0); 369 if (!IS_ALIGNED(addr, 32)) { 370 dev_dbg(video->isp->dev, 371 "Buffer address must be aligned to 32 bytes boundary.\n"); 372 return -EINVAL; 373 } 374 375 if (vb2_plane_size(&buffer->vb.vb2_buf, 0) < size) { 376 dev_dbg(video->isp->dev, 377 "data will not fit into plane (%lu < %u)\n", 378 vb2_plane_size(&buffer->vb.vb2_buf, 0), size); 379 return -EINVAL; 380 } 381 vb2_set_plane_payload(&buffer->vb.vb2_buf, 0, size); 382 buffer->dma = addr; 383 384 return 0; 385 } 386 387 /* 388 * isp_video_buffer_queue - Add buffer to streaming queue 389 * @buf: Video buffer 390 * 391 * In memory-to-memory mode, start streaming on the pipeline if buffers are 392 * queued on both the input and the output, if the pipeline isn't already busy. 393 * If the pipeline is busy, it will be restarted in the output module interrupt 394 * handler. 395 */ 396 static void isp_video_buffer_queue(struct vb2_buffer *buf) 397 { 398 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(buf); 399 struct isp_video_fh *vfh = vb2_get_drv_priv(buf->vb2_queue); 400 struct isp_buffer *buffer = to_isp_buffer(vbuf); 401 struct isp_video *video = vfh->video; 402 struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity); 403 enum isp_pipeline_state state; 404 unsigned long flags; 405 unsigned int empty; 406 unsigned int start; 407 408 spin_lock_irqsave(&video->irqlock, flags); 409 410 if (unlikely(video->error)) { 411 vb2_buffer_done(&buffer->vb.vb2_buf, VB2_BUF_STATE_ERROR); 412 spin_unlock_irqrestore(&video->irqlock, flags); 413 return; 414 } 415 416 empty = list_empty(&video->dmaqueue); 417 list_add_tail(&buffer->irqlist, &video->dmaqueue); 418 419 spin_unlock_irqrestore(&video->irqlock, flags); 420 421 if (empty) { 422 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) 423 state = ISP_PIPELINE_QUEUE_OUTPUT; 424 else 425 state = ISP_PIPELINE_QUEUE_INPUT; 426 427 spin_lock_irqsave(&pipe->lock, flags); 428 pipe->state |= state; 429 video->ops->queue(video, buffer); 430 video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_QUEUED; 431 432 start = isp_pipeline_ready(pipe); 433 if (start) 434 pipe->state |= ISP_PIPELINE_STREAM; 435 spin_unlock_irqrestore(&pipe->lock, flags); 436 437 if (start) 438 omap3isp_pipeline_set_stream(pipe, 439 ISP_PIPELINE_STREAM_SINGLESHOT); 440 } 441 } 442 443 /* 444 * omap3isp_video_return_buffers - Return all queued buffers to videobuf2 445 * @video: ISP video object 446 * @state: new state for the returned buffers 447 * 448 * Return all buffers queued on the video node to videobuf2 in the given state. 449 * The buffer state should be VB2_BUF_STATE_QUEUED if called due to an error 450 * when starting the stream, or VB2_BUF_STATE_ERROR otherwise. 451 * 452 * The function must be called with the video irqlock held. 453 */ 454 static void omap3isp_video_return_buffers(struct isp_video *video, 455 enum vb2_buffer_state state) 456 { 457 while (!list_empty(&video->dmaqueue)) { 458 struct isp_buffer *buf; 459 460 buf = list_first_entry(&video->dmaqueue, 461 struct isp_buffer, irqlist); 462 list_del(&buf->irqlist); 463 vb2_buffer_done(&buf->vb.vb2_buf, state); 464 } 465 } 466 467 static int isp_video_start_streaming(struct vb2_queue *queue, 468 unsigned int count) 469 { 470 struct isp_video_fh *vfh = vb2_get_drv_priv(queue); 471 struct isp_video *video = vfh->video; 472 struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity); 473 unsigned long flags; 474 int ret; 475 476 /* In sensor-to-memory mode, the stream can be started synchronously 477 * to the stream on command. In memory-to-memory mode, it will be 478 * started when buffers are queued on both the input and output. 479 */ 480 if (pipe->input) 481 return 0; 482 483 ret = omap3isp_pipeline_set_stream(pipe, 484 ISP_PIPELINE_STREAM_CONTINUOUS); 485 if (ret < 0) { 486 spin_lock_irqsave(&video->irqlock, flags); 487 omap3isp_video_return_buffers(video, VB2_BUF_STATE_QUEUED); 488 spin_unlock_irqrestore(&video->irqlock, flags); 489 return ret; 490 } 491 492 spin_lock_irqsave(&video->irqlock, flags); 493 if (list_empty(&video->dmaqueue)) 494 video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_UNDERRUN; 495 spin_unlock_irqrestore(&video->irqlock, flags); 496 497 return 0; 498 } 499 500 static const struct vb2_ops isp_video_queue_ops = { 501 .queue_setup = isp_video_queue_setup, 502 .buf_prepare = isp_video_buffer_prepare, 503 .buf_queue = isp_video_buffer_queue, 504 .start_streaming = isp_video_start_streaming, 505 }; 506 507 /* 508 * omap3isp_video_buffer_next - Complete the current buffer and return the next 509 * @video: ISP video object 510 * 511 * Remove the current video buffer from the DMA queue and fill its timestamp and 512 * field count before handing it back to videobuf2. 513 * 514 * For capture video nodes the buffer state is set to VB2_BUF_STATE_DONE if no 515 * error has been flagged in the pipeline, or to VB2_BUF_STATE_ERROR otherwise. 516 * For video output nodes the buffer state is always set to VB2_BUF_STATE_DONE. 517 * 518 * The DMA queue is expected to contain at least one buffer. 519 * 520 * Return a pointer to the next buffer in the DMA queue, or NULL if the queue is 521 * empty. 522 */ 523 struct isp_buffer *omap3isp_video_buffer_next(struct isp_video *video) 524 { 525 struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity); 526 enum vb2_buffer_state vb_state; 527 struct isp_buffer *buf; 528 unsigned long flags; 529 530 spin_lock_irqsave(&video->irqlock, flags); 531 if (WARN_ON(list_empty(&video->dmaqueue))) { 532 spin_unlock_irqrestore(&video->irqlock, flags); 533 return NULL; 534 } 535 536 buf = list_first_entry(&video->dmaqueue, struct isp_buffer, 537 irqlist); 538 list_del(&buf->irqlist); 539 spin_unlock_irqrestore(&video->irqlock, flags); 540 541 buf->vb.vb2_buf.timestamp = ktime_get_ns(); 542 543 /* Do frame number propagation only if this is the output video node. 544 * Frame number either comes from the CSI receivers or it gets 545 * incremented here if H3A is not active. 546 * Note: There is no guarantee that the output buffer will finish 547 * first, so the input number might lag behind by 1 in some cases. 548 */ 549 if (video == pipe->output && !pipe->do_propagation) 550 buf->vb.sequence = 551 atomic_inc_return(&pipe->frame_number); 552 else 553 buf->vb.sequence = atomic_read(&pipe->frame_number); 554 555 if (pipe->field != V4L2_FIELD_NONE) 556 buf->vb.sequence /= 2; 557 558 buf->vb.field = pipe->field; 559 560 /* Report pipeline errors to userspace on the capture device side. */ 561 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && pipe->error) { 562 vb_state = VB2_BUF_STATE_ERROR; 563 pipe->error = false; 564 } else { 565 vb_state = VB2_BUF_STATE_DONE; 566 } 567 568 vb2_buffer_done(&buf->vb.vb2_buf, vb_state); 569 570 spin_lock_irqsave(&video->irqlock, flags); 571 572 if (list_empty(&video->dmaqueue)) { 573 enum isp_pipeline_state state; 574 575 spin_unlock_irqrestore(&video->irqlock, flags); 576 577 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) 578 state = ISP_PIPELINE_QUEUE_OUTPUT 579 | ISP_PIPELINE_STREAM; 580 else 581 state = ISP_PIPELINE_QUEUE_INPUT 582 | ISP_PIPELINE_STREAM; 583 584 spin_lock_irqsave(&pipe->lock, flags); 585 pipe->state &= ~state; 586 if (video->pipe.stream_state == ISP_PIPELINE_STREAM_CONTINUOUS) 587 video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_UNDERRUN; 588 spin_unlock_irqrestore(&pipe->lock, flags); 589 return NULL; 590 } 591 592 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && pipe->input != NULL) { 593 spin_lock(&pipe->lock); 594 pipe->state &= ~ISP_PIPELINE_STREAM; 595 spin_unlock(&pipe->lock); 596 } 597 598 buf = list_first_entry(&video->dmaqueue, struct isp_buffer, 599 irqlist); 600 601 spin_unlock_irqrestore(&video->irqlock, flags); 602 603 return buf; 604 } 605 606 /* 607 * omap3isp_video_cancel_stream - Cancel stream on a video node 608 * @video: ISP video object 609 * 610 * Cancelling a stream returns all buffers queued on the video node to videobuf2 611 * in the erroneous state and makes sure no new buffer can be queued. 612 */ 613 void omap3isp_video_cancel_stream(struct isp_video *video) 614 { 615 unsigned long flags; 616 617 spin_lock_irqsave(&video->irqlock, flags); 618 omap3isp_video_return_buffers(video, VB2_BUF_STATE_ERROR); 619 video->error = true; 620 spin_unlock_irqrestore(&video->irqlock, flags); 621 } 622 623 /* 624 * omap3isp_video_resume - Perform resume operation on the buffers 625 * @video: ISP video object 626 * @continuous: Pipeline is in single shot mode if 0 or continuous mode otherwise 627 * 628 * This function is intended to be used on suspend/resume scenario. It 629 * requests video queue layer to discard buffers marked as DONE if it's in 630 * continuous mode and requests ISP modules to queue again the ACTIVE buffer 631 * if there's any. 632 */ 633 void omap3isp_video_resume(struct isp_video *video, int continuous) 634 { 635 struct isp_buffer *buf = NULL; 636 637 if (continuous && video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) { 638 mutex_lock(&video->queue_lock); 639 vb2_discard_done(video->queue); 640 mutex_unlock(&video->queue_lock); 641 } 642 643 if (!list_empty(&video->dmaqueue)) { 644 buf = list_first_entry(&video->dmaqueue, 645 struct isp_buffer, irqlist); 646 video->ops->queue(video, buf); 647 video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_QUEUED; 648 } else { 649 if (continuous) 650 video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_UNDERRUN; 651 } 652 } 653 654 /* ----------------------------------------------------------------------------- 655 * V4L2 ioctls 656 */ 657 658 static int 659 isp_video_querycap(struct file *file, void *fh, struct v4l2_capability *cap) 660 { 661 struct isp_video *video = video_drvdata(file); 662 663 strscpy(cap->driver, ISP_VIDEO_DRIVER_NAME, sizeof(cap->driver)); 664 strscpy(cap->card, video->video.name, sizeof(cap->card)); 665 666 cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT 667 | V4L2_CAP_STREAMING | V4L2_CAP_DEVICE_CAPS | V4L2_CAP_IO_MC; 668 669 return 0; 670 } 671 672 static int 673 isp_video_enum_format(struct file *file, void *fh, struct v4l2_fmtdesc *f) 674 { 675 struct isp_video *video = video_drvdata(file); 676 unsigned int i, j; 677 678 if (f->type != video->type) 679 return -EINVAL; 680 681 for (i = 0, j = 0; i < ARRAY_SIZE(formats); i++) { 682 /* Weed out duplicate pixelformats with different mbus codes */ 683 if (!f->mbus_code && i && 684 formats[i - 1].pixelformat == formats[i].pixelformat) 685 continue; 686 if (f->mbus_code && formats[i].code != f->mbus_code) 687 continue; 688 689 if (j == f->index) { 690 f->pixelformat = formats[i].pixelformat; 691 return 0; 692 } 693 j++; 694 } 695 696 return -EINVAL; 697 } 698 699 static int 700 isp_video_get_format(struct file *file, void *fh, struct v4l2_format *format) 701 { 702 struct isp_video_fh *vfh = file_to_isp_video_fh(file); 703 struct isp_video *video = video_drvdata(file); 704 705 if (format->type != video->type) 706 return -EINVAL; 707 708 mutex_lock(&video->mutex); 709 *format = vfh->format; 710 mutex_unlock(&video->mutex); 711 712 return 0; 713 } 714 715 static int 716 isp_video_try_format(struct file *file, void *fh, struct v4l2_format *format) 717 { 718 struct isp_video *video = video_drvdata(file); 719 struct v4l2_subdev_format fmt = { 720 .which = V4L2_SUBDEV_FORMAT_ACTIVE, 721 }; 722 struct v4l2_subdev *subdev; 723 u32 pad; 724 int ret; 725 726 if (format->type != video->type) 727 return -EINVAL; 728 729 /* Replace unsupported field orders with sane defaults. */ 730 switch (format->fmt.pix.field) { 731 case V4L2_FIELD_NONE: 732 /* Progressive is supported everywhere. */ 733 break; 734 case V4L2_FIELD_ALTERNATE: 735 /* ALTERNATE is not supported on output nodes. */ 736 if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) 737 format->fmt.pix.field = V4L2_FIELD_NONE; 738 break; 739 case V4L2_FIELD_INTERLACED: 740 /* The ISP has no concept of video standard, select the 741 * top-bottom order when the unqualified interlaced order is 742 * requested. 743 */ 744 format->fmt.pix.field = V4L2_FIELD_INTERLACED_TB; 745 fallthrough; 746 case V4L2_FIELD_INTERLACED_TB: 747 case V4L2_FIELD_INTERLACED_BT: 748 /* Interlaced orders are only supported at the CCDC output. */ 749 if (video != &video->isp->isp_ccdc.video_out) 750 format->fmt.pix.field = V4L2_FIELD_NONE; 751 break; 752 case V4L2_FIELD_TOP: 753 case V4L2_FIELD_BOTTOM: 754 case V4L2_FIELD_SEQ_TB: 755 case V4L2_FIELD_SEQ_BT: 756 default: 757 /* All other field orders are currently unsupported, default to 758 * progressive. 759 */ 760 format->fmt.pix.field = V4L2_FIELD_NONE; 761 break; 762 } 763 764 if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) { 765 isp_video_pix_to_mbus(&format->fmt.pix, &fmt.format); 766 isp_video_mbus_to_pix(video, &fmt.format, &format->fmt.pix); 767 return 0; 768 } 769 770 subdev = isp_video_remote_subdev(video, &pad); 771 if (subdev == NULL) 772 return -EINVAL; 773 774 isp_video_pix_to_mbus(&format->fmt.pix, &fmt.format); 775 776 fmt.pad = pad; 777 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt); 778 if (ret) 779 return ret == -ENOIOCTLCMD ? -ENOTTY : ret; 780 781 isp_video_mbus_to_pix(video, &fmt.format, &format->fmt.pix); 782 return 0; 783 } 784 785 static int 786 isp_video_set_format(struct file *file, void *fh, struct v4l2_format *format) 787 { 788 struct isp_video_fh *vfh = file_to_isp_video_fh(file); 789 struct isp_video *video = video_drvdata(file); 790 int ret; 791 792 ret = isp_video_try_format(file, fh, format); 793 if (ret) 794 return ret; 795 796 mutex_lock(&video->mutex); 797 vfh->format = *format; 798 mutex_unlock(&video->mutex); 799 800 return 0; 801 } 802 803 static int 804 isp_video_get_selection(struct file *file, void *fh, struct v4l2_selection *sel) 805 { 806 struct isp_video *video = video_drvdata(file); 807 struct v4l2_subdev_format format = { 808 .which = V4L2_SUBDEV_FORMAT_ACTIVE, 809 }; 810 struct v4l2_subdev *subdev; 811 struct v4l2_subdev_selection sdsel = { 812 .which = V4L2_SUBDEV_FORMAT_ACTIVE, 813 .target = sel->target, 814 }; 815 u32 pad; 816 int ret; 817 818 switch (sel->target) { 819 case V4L2_SEL_TGT_CROP: 820 case V4L2_SEL_TGT_CROP_BOUNDS: 821 case V4L2_SEL_TGT_CROP_DEFAULT: 822 if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) 823 return -EINVAL; 824 break; 825 case V4L2_SEL_TGT_COMPOSE: 826 case V4L2_SEL_TGT_COMPOSE_BOUNDS: 827 case V4L2_SEL_TGT_COMPOSE_DEFAULT: 828 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) 829 return -EINVAL; 830 break; 831 default: 832 return -EINVAL; 833 } 834 subdev = isp_video_remote_subdev(video, &pad); 835 if (subdev == NULL) 836 return -EINVAL; 837 838 /* Try the get selection operation first and fallback to get format if not 839 * implemented. 840 */ 841 sdsel.pad = pad; 842 ret = v4l2_subdev_call(subdev, pad, get_selection, NULL, &sdsel); 843 if (!ret) 844 sel->r = sdsel.r; 845 if (ret != -ENOIOCTLCMD) 846 return ret; 847 848 format.pad = pad; 849 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &format); 850 if (ret < 0) 851 return ret == -ENOIOCTLCMD ? -ENOTTY : ret; 852 853 sel->r.left = 0; 854 sel->r.top = 0; 855 sel->r.width = format.format.width; 856 sel->r.height = format.format.height; 857 858 return 0; 859 } 860 861 static int 862 isp_video_set_selection(struct file *file, void *fh, struct v4l2_selection *sel) 863 { 864 struct isp_video *video = video_drvdata(file); 865 struct v4l2_subdev *subdev; 866 struct v4l2_subdev_selection sdsel = { 867 .which = V4L2_SUBDEV_FORMAT_ACTIVE, 868 .target = sel->target, 869 .flags = sel->flags, 870 .r = sel->r, 871 }; 872 u32 pad; 873 int ret; 874 875 switch (sel->target) { 876 case V4L2_SEL_TGT_CROP: 877 if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) 878 return -EINVAL; 879 break; 880 case V4L2_SEL_TGT_COMPOSE: 881 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) 882 return -EINVAL; 883 break; 884 default: 885 return -EINVAL; 886 } 887 subdev = isp_video_remote_subdev(video, &pad); 888 if (subdev == NULL) 889 return -EINVAL; 890 891 sdsel.pad = pad; 892 mutex_lock(&video->mutex); 893 ret = v4l2_subdev_call(subdev, pad, set_selection, NULL, &sdsel); 894 mutex_unlock(&video->mutex); 895 if (!ret) 896 sel->r = sdsel.r; 897 898 return ret == -ENOIOCTLCMD ? -ENOTTY : ret; 899 } 900 901 static int 902 isp_video_get_param(struct file *file, void *fh, struct v4l2_streamparm *a) 903 { 904 struct isp_video_fh *vfh = file_to_isp_video_fh(file); 905 struct isp_video *video = video_drvdata(file); 906 907 if (video->type != V4L2_BUF_TYPE_VIDEO_OUTPUT || 908 video->type != a->type) 909 return -EINVAL; 910 911 memset(a, 0, sizeof(*a)); 912 a->type = V4L2_BUF_TYPE_VIDEO_OUTPUT; 913 a->parm.output.capability = V4L2_CAP_TIMEPERFRAME; 914 a->parm.output.timeperframe = vfh->timeperframe; 915 916 return 0; 917 } 918 919 static int 920 isp_video_set_param(struct file *file, void *fh, struct v4l2_streamparm *a) 921 { 922 struct isp_video_fh *vfh = file_to_isp_video_fh(file); 923 struct isp_video *video = video_drvdata(file); 924 925 if (video->type != V4L2_BUF_TYPE_VIDEO_OUTPUT || 926 video->type != a->type) 927 return -EINVAL; 928 929 if (a->parm.output.timeperframe.denominator == 0) 930 a->parm.output.timeperframe.denominator = 1; 931 if (a->parm.output.timeperframe.numerator == 0) 932 a->parm.output.timeperframe.numerator = 1; 933 934 a->parm.output.capability = V4L2_CAP_TIMEPERFRAME; 935 vfh->timeperframe = a->parm.output.timeperframe; 936 937 return 0; 938 } 939 940 static int 941 isp_video_reqbufs(struct file *file, void *fh, struct v4l2_requestbuffers *rb) 942 { 943 struct isp_video_fh *vfh = file_to_isp_video_fh(file); 944 struct isp_video *video = video_drvdata(file); 945 int ret; 946 947 mutex_lock(&video->queue_lock); 948 ret = vb2_reqbufs(&vfh->queue, rb); 949 mutex_unlock(&video->queue_lock); 950 951 return ret; 952 } 953 954 static int 955 isp_video_create_bufs(struct file *file, void *fh, struct v4l2_create_buffers *p) 956 { 957 struct isp_video_fh *vfh = file_to_isp_video_fh(file); 958 struct isp_video *video = video_drvdata(file); 959 int ret; 960 961 mutex_lock(&video->queue_lock); 962 ret = vb2_create_bufs(&vfh->queue, p); 963 mutex_unlock(&video->queue_lock); 964 965 return ret; 966 } 967 968 static int 969 isp_video_querybuf(struct file *file, void *fh, struct v4l2_buffer *b) 970 { 971 struct isp_video_fh *vfh = file_to_isp_video_fh(file); 972 struct isp_video *video = video_drvdata(file); 973 int ret; 974 975 mutex_lock(&video->queue_lock); 976 ret = vb2_querybuf(&vfh->queue, b); 977 mutex_unlock(&video->queue_lock); 978 979 return ret; 980 } 981 982 static int 983 isp_video_prepare_buf(struct file *file, void *fh, struct v4l2_buffer *b) 984 { 985 struct isp_video_fh *vfh = file_to_isp_video_fh(file); 986 struct isp_video *video = video_drvdata(file); 987 int ret; 988 989 mutex_lock(&video->queue_lock); 990 ret = vb2_prepare_buf(&vfh->queue, video->video.v4l2_dev->mdev, b); 991 mutex_unlock(&video->queue_lock); 992 993 return ret; 994 } 995 996 static int 997 isp_video_qbuf(struct file *file, void *fh, struct v4l2_buffer *b) 998 { 999 struct isp_video_fh *vfh = file_to_isp_video_fh(file); 1000 struct isp_video *video = video_drvdata(file); 1001 int ret; 1002 1003 mutex_lock(&video->queue_lock); 1004 ret = vb2_qbuf(&vfh->queue, video->video.v4l2_dev->mdev, b); 1005 mutex_unlock(&video->queue_lock); 1006 1007 return ret; 1008 } 1009 1010 static int 1011 isp_video_dqbuf(struct file *file, void *fh, struct v4l2_buffer *b) 1012 { 1013 struct isp_video_fh *vfh = file_to_isp_video_fh(file); 1014 struct isp_video *video = video_drvdata(file); 1015 int ret; 1016 1017 mutex_lock(&video->queue_lock); 1018 ret = vb2_dqbuf(&vfh->queue, b, file->f_flags & O_NONBLOCK); 1019 mutex_unlock(&video->queue_lock); 1020 1021 return ret; 1022 } 1023 1024 static int isp_video_check_external_subdevs(struct isp_video *video, 1025 struct isp_pipeline *pipe) 1026 { 1027 struct isp_device *isp = video->isp; 1028 struct media_entity *ents[] = { 1029 &isp->isp_csi2a.subdev.entity, 1030 &isp->isp_csi2c.subdev.entity, 1031 &isp->isp_ccp2.subdev.entity, 1032 &isp->isp_ccdc.subdev.entity 1033 }; 1034 struct media_pad *source_pad; 1035 struct media_entity *source = NULL; 1036 struct media_entity *sink; 1037 struct v4l2_subdev_format fmt = { 1038 .which = V4L2_SUBDEV_FORMAT_ACTIVE, 1039 }; 1040 struct v4l2_ext_controls ctrls; 1041 struct v4l2_ext_control ctrl; 1042 unsigned int i; 1043 int ret; 1044 1045 /* Memory-to-memory pipelines have no external subdev. */ 1046 if (pipe->input != NULL) 1047 return 0; 1048 1049 for (i = 0; i < ARRAY_SIZE(ents); i++) { 1050 /* Is the entity part of the pipeline? */ 1051 if (!media_entity_enum_test(&pipe->ent_enum, ents[i])) 1052 continue; 1053 1054 /* ISP entities have always sink pad == 0. Find source. */ 1055 source_pad = media_pad_remote_pad_first(&ents[i]->pads[0]); 1056 if (source_pad == NULL) 1057 continue; 1058 1059 source = source_pad->entity; 1060 sink = ents[i]; 1061 break; 1062 } 1063 1064 if (!source) { 1065 dev_warn(isp->dev, "can't find source, failing now\n"); 1066 return -EINVAL; 1067 } 1068 1069 if (!is_media_entity_v4l2_subdev(source)) 1070 return 0; 1071 1072 pipe->external = media_entity_to_v4l2_subdev(source); 1073 1074 fmt.pad = source_pad->index; 1075 ret = v4l2_subdev_call(media_entity_to_v4l2_subdev(sink), 1076 pad, get_fmt, NULL, &fmt); 1077 if (unlikely(ret < 0)) { 1078 dev_warn(isp->dev, "get_fmt returned null!\n"); 1079 return ret; 1080 } 1081 1082 pipe->external_width = 1083 omap3isp_video_format_info(fmt.format.code)->width; 1084 1085 memset(&ctrls, 0, sizeof(ctrls)); 1086 memset(&ctrl, 0, sizeof(ctrl)); 1087 1088 ctrl.id = V4L2_CID_PIXEL_RATE; 1089 1090 ctrls.count = 1; 1091 ctrls.controls = &ctrl; 1092 ret = v4l2_g_ext_ctrls(pipe->external->ctrl_handler, &video->video, 1093 NULL, &ctrls); 1094 if (ret < 0) { 1095 dev_warn(isp->dev, "no pixel rate control in subdev %s\n", 1096 pipe->external->name); 1097 return ret; 1098 } 1099 1100 pipe->external_rate = ctrl.value64; 1101 1102 if (media_entity_enum_test(&pipe->ent_enum, 1103 &isp->isp_ccdc.subdev.entity)) { 1104 unsigned int rate = UINT_MAX; 1105 /* 1106 * Check that maximum allowed CCDC pixel rate isn't 1107 * exceeded by the pixel rate. 1108 */ 1109 omap3isp_ccdc_max_rate(&isp->isp_ccdc, &rate); 1110 if (pipe->external_rate > rate) 1111 return -ENOSPC; 1112 } 1113 1114 return 0; 1115 } 1116 1117 /* 1118 * Stream management 1119 * 1120 * Every ISP pipeline has a single input and a single output. The input can be 1121 * either a sensor or a video node. The output is always a video node. 1122 * 1123 * As every pipeline has an output video node, the ISP video objects at the 1124 * pipeline output stores the pipeline state. It tracks the streaming state of 1125 * both the input and output, as well as the availability of buffers. 1126 * 1127 * In sensor-to-memory mode, frames are always available at the pipeline input. 1128 * Starting the sensor usually requires I2C transfers and must be done in 1129 * interruptible context. The pipeline is started and stopped synchronously 1130 * to the stream on/off commands. All modules in the pipeline will get their 1131 * subdev set stream handler called. The module at the end of the pipeline must 1132 * delay starting the hardware until buffers are available at its output. 1133 * 1134 * In memory-to-memory mode, starting/stopping the stream requires 1135 * synchronization between the input and output. ISP modules can't be stopped 1136 * in the middle of a frame, and at least some of the modules seem to become 1137 * busy as soon as they're started, even if they don't receive a frame start 1138 * event. For that reason frames need to be processed in single-shot mode. The 1139 * driver needs to wait until a frame is completely processed and written to 1140 * memory before restarting the pipeline for the next frame. Pipelined 1141 * processing might be possible but requires more testing. 1142 * 1143 * Stream start must be delayed until buffers are available at both the input 1144 * and output. The pipeline must be started in the vb2 queue callback with 1145 * the buffers queue spinlock held. The modules subdev set stream operation must 1146 * not sleep. 1147 */ 1148 static int 1149 isp_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type) 1150 { 1151 struct isp_video_fh *vfh = file_to_isp_video_fh(file); 1152 struct isp_video *video = video_drvdata(file); 1153 enum isp_pipeline_state state; 1154 struct isp_pipeline *pipe; 1155 unsigned long flags; 1156 int ret; 1157 1158 if (type != video->type) 1159 return -EINVAL; 1160 1161 mutex_lock(&video->stream_lock); 1162 1163 /* Start streaming on the pipeline. No link touching an entity in the 1164 * pipeline can be activated or deactivated once streaming is started. 1165 */ 1166 pipe = to_isp_pipeline(&video->video.entity) ? : &video->pipe; 1167 1168 ret = media_entity_enum_init(&pipe->ent_enum, &video->isp->media_dev); 1169 if (ret) 1170 goto err_enum_init; 1171 1172 /* TODO: Implement PM QoS */ 1173 pipe->l3_ick = clk_get_rate(video->isp->clock[ISP_CLK_L3_ICK]); 1174 pipe->max_rate = pipe->l3_ick; 1175 1176 ret = video_device_pipeline_start(&video->video, &pipe->pipe); 1177 if (ret < 0) 1178 goto err_pipeline_start; 1179 1180 /* Verify that the currently configured format matches the output of 1181 * the connected subdev. 1182 */ 1183 ret = isp_video_check_format(video, vfh); 1184 if (ret < 0) 1185 goto err_check_format; 1186 1187 video->bpl_padding = ret; 1188 video->bpl_value = vfh->format.fmt.pix.bytesperline; 1189 1190 ret = isp_video_get_graph_data(video, pipe); 1191 if (ret < 0) 1192 goto err_check_format; 1193 1194 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) 1195 state = ISP_PIPELINE_STREAM_OUTPUT | ISP_PIPELINE_IDLE_OUTPUT; 1196 else 1197 state = ISP_PIPELINE_STREAM_INPUT | ISP_PIPELINE_IDLE_INPUT; 1198 1199 ret = isp_video_check_external_subdevs(video, pipe); 1200 if (ret < 0) 1201 goto err_check_format; 1202 1203 pipe->error = false; 1204 1205 spin_lock_irqsave(&pipe->lock, flags); 1206 pipe->state &= ~ISP_PIPELINE_STREAM; 1207 pipe->state |= state; 1208 spin_unlock_irqrestore(&pipe->lock, flags); 1209 1210 /* Set the maximum time per frame as the value requested by userspace. 1211 * This is a soft limit that can be overridden if the hardware doesn't 1212 * support the request limit. 1213 */ 1214 if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) 1215 pipe->max_timeperframe = vfh->timeperframe; 1216 1217 video->queue = &vfh->queue; 1218 INIT_LIST_HEAD(&video->dmaqueue); 1219 atomic_set(&pipe->frame_number, -1); 1220 pipe->field = vfh->format.fmt.pix.field; 1221 1222 mutex_lock(&video->queue_lock); 1223 ret = vb2_streamon(&vfh->queue, type); 1224 mutex_unlock(&video->queue_lock); 1225 if (ret < 0) 1226 goto err_check_format; 1227 1228 mutex_unlock(&video->stream_lock); 1229 1230 return 0; 1231 1232 err_check_format: 1233 video_device_pipeline_stop(&video->video); 1234 err_pipeline_start: 1235 /* TODO: Implement PM QoS */ 1236 /* The DMA queue must be emptied here, otherwise CCDC interrupts that 1237 * will get triggered the next time the CCDC is powered up will try to 1238 * access buffers that might have been freed but still present in the 1239 * DMA queue. This can easily get triggered if the above 1240 * omap3isp_pipeline_set_stream() call fails on a system with a 1241 * free-running sensor. 1242 */ 1243 INIT_LIST_HEAD(&video->dmaqueue); 1244 video->queue = NULL; 1245 1246 media_entity_enum_cleanup(&pipe->ent_enum); 1247 1248 err_enum_init: 1249 mutex_unlock(&video->stream_lock); 1250 1251 return ret; 1252 } 1253 1254 static int 1255 isp_video_streamoff(struct file *file, void *fh, enum v4l2_buf_type type) 1256 { 1257 struct isp_video_fh *vfh = file_to_isp_video_fh(file); 1258 struct isp_video *video = video_drvdata(file); 1259 struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity); 1260 enum isp_pipeline_state state; 1261 unsigned int streaming; 1262 unsigned long flags; 1263 1264 if (type != video->type) 1265 return -EINVAL; 1266 1267 mutex_lock(&video->stream_lock); 1268 1269 /* Make sure we're not streaming yet. */ 1270 mutex_lock(&video->queue_lock); 1271 streaming = vb2_is_streaming(&vfh->queue); 1272 mutex_unlock(&video->queue_lock); 1273 1274 if (!streaming) 1275 goto done; 1276 1277 /* Update the pipeline state. */ 1278 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) 1279 state = ISP_PIPELINE_STREAM_OUTPUT 1280 | ISP_PIPELINE_QUEUE_OUTPUT; 1281 else 1282 state = ISP_PIPELINE_STREAM_INPUT 1283 | ISP_PIPELINE_QUEUE_INPUT; 1284 1285 spin_lock_irqsave(&pipe->lock, flags); 1286 pipe->state &= ~state; 1287 spin_unlock_irqrestore(&pipe->lock, flags); 1288 1289 /* Stop the stream. */ 1290 omap3isp_pipeline_set_stream(pipe, ISP_PIPELINE_STREAM_STOPPED); 1291 omap3isp_video_cancel_stream(video); 1292 1293 mutex_lock(&video->queue_lock); 1294 vb2_streamoff(&vfh->queue, type); 1295 mutex_unlock(&video->queue_lock); 1296 video->queue = NULL; 1297 video->error = false; 1298 1299 /* TODO: Implement PM QoS */ 1300 video_device_pipeline_stop(&video->video); 1301 1302 media_entity_enum_cleanup(&pipe->ent_enum); 1303 1304 done: 1305 mutex_unlock(&video->stream_lock); 1306 return 0; 1307 } 1308 1309 static int 1310 isp_video_enum_input(struct file *file, void *fh, struct v4l2_input *input) 1311 { 1312 if (input->index > 0) 1313 return -EINVAL; 1314 1315 strscpy(input->name, "camera", sizeof(input->name)); 1316 input->type = V4L2_INPUT_TYPE_CAMERA; 1317 1318 return 0; 1319 } 1320 1321 static int 1322 isp_video_g_input(struct file *file, void *fh, unsigned int *input) 1323 { 1324 *input = 0; 1325 1326 return 0; 1327 } 1328 1329 static int 1330 isp_video_s_input(struct file *file, void *fh, unsigned int input) 1331 { 1332 return input == 0 ? 0 : -EINVAL; 1333 } 1334 1335 static const struct v4l2_ioctl_ops isp_video_ioctl_ops = { 1336 .vidioc_querycap = isp_video_querycap, 1337 .vidioc_enum_fmt_vid_cap = isp_video_enum_format, 1338 .vidioc_g_fmt_vid_cap = isp_video_get_format, 1339 .vidioc_s_fmt_vid_cap = isp_video_set_format, 1340 .vidioc_try_fmt_vid_cap = isp_video_try_format, 1341 .vidioc_enum_fmt_vid_out = isp_video_enum_format, 1342 .vidioc_g_fmt_vid_out = isp_video_get_format, 1343 .vidioc_s_fmt_vid_out = isp_video_set_format, 1344 .vidioc_try_fmt_vid_out = isp_video_try_format, 1345 .vidioc_g_selection = isp_video_get_selection, 1346 .vidioc_s_selection = isp_video_set_selection, 1347 .vidioc_g_parm = isp_video_get_param, 1348 .vidioc_s_parm = isp_video_set_param, 1349 .vidioc_reqbufs = isp_video_reqbufs, 1350 .vidioc_create_bufs = isp_video_create_bufs, 1351 .vidioc_querybuf = isp_video_querybuf, 1352 .vidioc_prepare_buf = isp_video_prepare_buf, 1353 .vidioc_qbuf = isp_video_qbuf, 1354 .vidioc_dqbuf = isp_video_dqbuf, 1355 .vidioc_streamon = isp_video_streamon, 1356 .vidioc_streamoff = isp_video_streamoff, 1357 .vidioc_enum_input = isp_video_enum_input, 1358 .vidioc_g_input = isp_video_g_input, 1359 .vidioc_s_input = isp_video_s_input, 1360 }; 1361 1362 /* ----------------------------------------------------------------------------- 1363 * V4L2 file operations 1364 */ 1365 1366 static int isp_video_open(struct file *file) 1367 { 1368 struct isp_video *video = video_drvdata(file); 1369 struct v4l2_mbus_framefmt fmt; 1370 struct isp_video_fh *handle; 1371 struct vb2_queue *queue; 1372 int ret = 0; 1373 1374 handle = kzalloc_obj(*handle); 1375 if (handle == NULL) 1376 return -ENOMEM; 1377 1378 v4l2_fh_init(&handle->vfh, &video->video); 1379 v4l2_fh_add(&handle->vfh, file); 1380 1381 /* If this is the first user, initialise the pipeline. */ 1382 if (omap3isp_get(video->isp) == NULL) { 1383 ret = -EBUSY; 1384 goto done; 1385 } 1386 1387 ret = v4l2_pipeline_pm_get(&video->video.entity); 1388 if (ret < 0) { 1389 omap3isp_put(video->isp); 1390 goto done; 1391 } 1392 1393 queue = &handle->queue; 1394 queue->type = video->type; 1395 queue->io_modes = VB2_MMAP | VB2_USERPTR; 1396 queue->drv_priv = handle; 1397 queue->ops = &isp_video_queue_ops; 1398 queue->mem_ops = &vb2_dma_contig_memops; 1399 queue->buf_struct_size = sizeof(struct isp_buffer); 1400 queue->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; 1401 queue->dev = video->isp->dev; 1402 queue->lock = &video->queue_lock; 1403 1404 ret = vb2_queue_init(&handle->queue); 1405 if (ret < 0) { 1406 omap3isp_put(video->isp); 1407 goto done; 1408 } 1409 1410 memset(&handle->format, 0, sizeof(handle->format)); 1411 handle->format.type = video->type; 1412 handle->format.fmt.pix.width = 720; 1413 handle->format.fmt.pix.height = 480; 1414 handle->format.fmt.pix.pixelformat = V4L2_PIX_FMT_UYVY; 1415 handle->format.fmt.pix.field = V4L2_FIELD_NONE; 1416 handle->format.fmt.pix.colorspace = V4L2_COLORSPACE_SRGB; 1417 isp_video_pix_to_mbus(&handle->format.fmt.pix, &fmt); 1418 isp_video_mbus_to_pix(video, &fmt, &handle->format.fmt.pix); 1419 handle->timeperframe.numerator = 1; 1420 handle->timeperframe.denominator = 1; 1421 1422 handle->video = video; 1423 1424 done: 1425 if (ret < 0) { 1426 v4l2_fh_del(&handle->vfh, file); 1427 v4l2_fh_exit(&handle->vfh); 1428 kfree(handle); 1429 } 1430 1431 return ret; 1432 } 1433 1434 static int isp_video_release(struct file *file) 1435 { 1436 struct isp_video *video = video_drvdata(file); 1437 struct v4l2_fh *vfh = file_to_v4l2_fh(file); 1438 struct isp_video_fh *handle = file_to_isp_video_fh(file); 1439 1440 /* Disable streaming and free the buffers queue resources. */ 1441 isp_video_streamoff(file, vfh, video->type); 1442 1443 mutex_lock(&video->queue_lock); 1444 vb2_queue_release(&handle->queue); 1445 mutex_unlock(&video->queue_lock); 1446 1447 v4l2_pipeline_pm_put(&video->video.entity); 1448 1449 /* Release the file handle. */ 1450 v4l2_fh_del(vfh, file); 1451 v4l2_fh_exit(vfh); 1452 kfree(handle); 1453 1454 omap3isp_put(video->isp); 1455 1456 return 0; 1457 } 1458 1459 static __poll_t isp_video_poll(struct file *file, poll_table *wait) 1460 { 1461 struct isp_video_fh *vfh = file_to_isp_video_fh(file); 1462 struct isp_video *video = video_drvdata(file); 1463 __poll_t ret; 1464 1465 mutex_lock(&video->queue_lock); 1466 ret = vb2_poll(&vfh->queue, file, wait); 1467 mutex_unlock(&video->queue_lock); 1468 1469 return ret; 1470 } 1471 1472 static int isp_video_mmap(struct file *file, struct vm_area_struct *vma) 1473 { 1474 struct isp_video_fh *vfh = file_to_isp_video_fh(file); 1475 1476 return vb2_mmap(&vfh->queue, vma); 1477 } 1478 1479 static const struct v4l2_file_operations isp_video_fops = { 1480 .owner = THIS_MODULE, 1481 .unlocked_ioctl = video_ioctl2, 1482 .open = isp_video_open, 1483 .release = isp_video_release, 1484 .poll = isp_video_poll, 1485 .mmap = isp_video_mmap, 1486 }; 1487 1488 /* ----------------------------------------------------------------------------- 1489 * ISP video core 1490 */ 1491 1492 static const struct isp_video_operations isp_video_dummy_ops = { 1493 }; 1494 1495 int omap3isp_video_init(struct isp_video *video, const char *name) 1496 { 1497 const char *direction; 1498 int ret; 1499 1500 switch (video->type) { 1501 case V4L2_BUF_TYPE_VIDEO_CAPTURE: 1502 direction = "output"; 1503 video->pad.flags = MEDIA_PAD_FL_SINK 1504 | MEDIA_PAD_FL_MUST_CONNECT; 1505 break; 1506 case V4L2_BUF_TYPE_VIDEO_OUTPUT: 1507 direction = "input"; 1508 video->pad.flags = MEDIA_PAD_FL_SOURCE 1509 | MEDIA_PAD_FL_MUST_CONNECT; 1510 video->video.vfl_dir = VFL_DIR_TX; 1511 break; 1512 1513 default: 1514 return -EINVAL; 1515 } 1516 1517 ret = media_entity_pads_init(&video->video.entity, 1, &video->pad); 1518 if (ret < 0) 1519 return ret; 1520 1521 mutex_init(&video->mutex); 1522 atomic_set(&video->active, 0); 1523 1524 spin_lock_init(&video->pipe.lock); 1525 mutex_init(&video->stream_lock); 1526 mutex_init(&video->queue_lock); 1527 spin_lock_init(&video->irqlock); 1528 1529 /* Initialize the video device. */ 1530 if (video->ops == NULL) 1531 video->ops = &isp_video_dummy_ops; 1532 1533 video->video.fops = &isp_video_fops; 1534 snprintf(video->video.name, sizeof(video->video.name), 1535 "OMAP3 ISP %s %s", name, direction); 1536 video->video.vfl_type = VFL_TYPE_VIDEO; 1537 video->video.release = video_device_release_empty; 1538 video->video.ioctl_ops = &isp_video_ioctl_ops; 1539 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) { 1540 video->video.device_caps = V4L2_CAP_VIDEO_CAPTURE 1541 | V4L2_CAP_STREAMING | V4L2_CAP_IO_MC; 1542 v4l2_disable_ioctl(&video->video, VIDIOC_S_PARM); 1543 v4l2_disable_ioctl(&video->video, VIDIOC_G_PARM); 1544 } else { 1545 video->video.device_caps = V4L2_CAP_VIDEO_OUTPUT 1546 | V4L2_CAP_STREAMING | V4L2_CAP_IO_MC; 1547 } 1548 1549 video->pipe.stream_state = ISP_PIPELINE_STREAM_STOPPED; 1550 1551 video_set_drvdata(&video->video, video); 1552 1553 return 0; 1554 } 1555 1556 void omap3isp_video_cleanup(struct isp_video *video) 1557 { 1558 media_entity_cleanup(&video->video.entity); 1559 mutex_destroy(&video->queue_lock); 1560 mutex_destroy(&video->stream_lock); 1561 mutex_destroy(&video->mutex); 1562 } 1563 1564 int omap3isp_video_register(struct isp_video *video, struct v4l2_device *vdev) 1565 { 1566 int ret; 1567 1568 video->video.v4l2_dev = vdev; 1569 1570 ret = video_register_device(&video->video, VFL_TYPE_VIDEO, -1); 1571 if (ret < 0) 1572 dev_err(video->isp->dev, 1573 "%s: could not register video device (%d)\n", 1574 __func__, ret); 1575 1576 return ret; 1577 } 1578 1579 void omap3isp_video_unregister(struct isp_video *video) 1580 { 1581 video_unregister_device(&video->video); 1582 } 1583