xref: /linux/drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-bytecap.c (revision 8e1bb4a41aa78d6105e59186af3dcd545fc66e70)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Driver for STM32 Digital Camera Memory Interface Pixel Processor
4  *
5  * Copyright (C) STMicroelectronics SA 2023
6  * Authors: Hugues Fruchet <hugues.fruchet@foss.st.com>
7  *          Alain Volmat <alain.volmat@foss.st.com>
8  *          for STMicroelectronics.
9  */
10 
11 #include <linux/iopoll.h>
12 #include <linux/pm_runtime.h>
13 #include <media/v4l2-ioctl.h>
14 #include <media/v4l2-mc.h>
15 #include <media/videobuf2-core.h>
16 #include <media/videobuf2-dma-contig.h>
17 
18 #include "dcmipp-common.h"
19 
20 #define DCMIPP_PRSR		0x1f8
21 #define DCMIPP_CMIER		0x3f0
22 #define DCMIPP_CMIER_P0FRAMEIE	BIT(9)
23 #define DCMIPP_CMIER_P0VSYNCIE	BIT(10)
24 #define DCMIPP_CMIER_P0OVRIE	BIT(15)
25 #define DCMIPP_CMIER_P0ALL	(DCMIPP_CMIER_P0VSYNCIE |\
26 				 DCMIPP_CMIER_P0FRAMEIE |\
27 				 DCMIPP_CMIER_P0OVRIE)
28 #define DCMIPP_CMSR1		0x3f4
29 #define DCMIPP_CMSR2		0x3f8
30 #define DCMIPP_CMSR2_P0FRAMEF	BIT(9)
31 #define DCMIPP_CMSR2_P0VSYNCF	BIT(10)
32 #define DCMIPP_CMSR2_P0OVRF	BIT(15)
33 #define DCMIPP_CMFCR		0x3fc
34 #define DCMIPP_P0FSCR		0x404
35 #define DCMIPP_P0FSCR_PIPEN	BIT(31)
36 #define DCMIPP_P0FCTCR		0x500
37 #define DCMIPP_P0FCTCR_CPTREQ	BIT(3)
38 #define DCMIPP_P0DCCNTR		0x5b0
39 #define DCMIPP_P0DCLMTR		0x5b4
40 #define DCMIPP_P0DCLMTR_ENABLE	BIT(31)
41 #define DCMIPP_P0DCLMTR_LIMIT_MASK	GENMASK(23, 0)
42 #define DCMIPP_P0PPM0AR1	0x5c4
43 #define DCMIPP_P0SR		0x5f8
44 #define DCMIPP_P0SR_CPTACT	BIT(23)
45 
46 struct dcmipp_bytecap_pix_map {
47 	unsigned int code;
48 	u32 pixelformat;
49 };
50 
51 #define PIXMAP_MBUS_PFMT(mbus, fmt)			\
52 	{						\
53 		.code = MEDIA_BUS_FMT_##mbus,		\
54 		.pixelformat = V4L2_PIX_FMT_##fmt	\
55 	}
56 
57 static const struct dcmipp_bytecap_pix_map dcmipp_bytecap_pix_map_list[] = {
58 	PIXMAP_MBUS_PFMT(RGB565_2X8_LE, RGB565),
59 	PIXMAP_MBUS_PFMT(YUYV8_2X8, YUYV),
60 	PIXMAP_MBUS_PFMT(YVYU8_2X8, YVYU),
61 	PIXMAP_MBUS_PFMT(UYVY8_2X8, UYVY),
62 	PIXMAP_MBUS_PFMT(VYUY8_2X8, VYUY),
63 	PIXMAP_MBUS_PFMT(Y8_1X8, GREY),
64 	PIXMAP_MBUS_PFMT(SBGGR8_1X8, SBGGR8),
65 	PIXMAP_MBUS_PFMT(SGBRG8_1X8, SGBRG8),
66 	PIXMAP_MBUS_PFMT(SGRBG8_1X8, SGRBG8),
67 	PIXMAP_MBUS_PFMT(SRGGB8_1X8, SRGGB8),
68 	PIXMAP_MBUS_PFMT(JPEG_1X8, JPEG),
69 };
70 
71 static const struct dcmipp_bytecap_pix_map *
72 dcmipp_bytecap_pix_map_by_pixelformat(u32 pixelformat)
73 {
74 	unsigned int i;
75 
76 	for (i = 0; i < ARRAY_SIZE(dcmipp_bytecap_pix_map_list); i++) {
77 		if (dcmipp_bytecap_pix_map_list[i].pixelformat == pixelformat)
78 			return &dcmipp_bytecap_pix_map_list[i];
79 	}
80 
81 	return NULL;
82 }
83 
84 struct dcmipp_buf {
85 	struct vb2_v4l2_buffer	vb;
86 	bool			prepared;
87 	dma_addr_t		addr;
88 	size_t			size;
89 	struct list_head	list;
90 };
91 
92 enum dcmipp_state {
93 	DCMIPP_STOPPED = 0,
94 	DCMIPP_WAIT_FOR_BUFFER,
95 	DCMIPP_RUNNING,
96 };
97 
98 struct dcmipp_bytecap_device {
99 	struct dcmipp_ent_device ved;
100 	struct video_device vdev;
101 	struct device *dev;
102 	struct v4l2_pix_format format;
103 	struct vb2_queue queue;
104 	struct list_head buffers;
105 	/*
106 	 * Protects concurrent calls of buf queue / irq handler
107 	 * and buffer handling related variables / lists
108 	 */
109 	spinlock_t irqlock;
110 	/* mutex used as vdev and queue lock */
111 	struct mutex lock;
112 	u32 sequence;
113 	struct media_pipeline pipe;
114 	struct v4l2_subdev *s_subdev;
115 
116 	enum dcmipp_state state;
117 
118 	/*
119 	 * DCMIPP driver is handling 2 buffers
120 	 * active: buffer into which DCMIPP is currently writing into
121 	 * next: buffer given to the DCMIPP and which will become
122 	 *       automatically active on next VSYNC
123 	 */
124 	struct dcmipp_buf *active, *next;
125 
126 	void __iomem *regs;
127 
128 	u32 cmier;
129 	u32 cmsr2;
130 
131 	struct {
132 		u32 errors;
133 		u32 limit;
134 		u32 overrun;
135 		u32 buffers;
136 		u32 vsync;
137 		u32 frame;
138 		u32 it;
139 		u32 underrun;
140 		u32 nactive;
141 	} count;
142 };
143 
144 static const struct v4l2_pix_format fmt_default = {
145 	.width = DCMIPP_FMT_WIDTH_DEFAULT,
146 	.height = DCMIPP_FMT_HEIGHT_DEFAULT,
147 	.pixelformat = V4L2_PIX_FMT_RGB565,
148 	.field = V4L2_FIELD_NONE,
149 	.bytesperline = DCMIPP_FMT_WIDTH_DEFAULT * 2,
150 	.sizeimage = DCMIPP_FMT_WIDTH_DEFAULT * DCMIPP_FMT_HEIGHT_DEFAULT * 2,
151 	.colorspace = DCMIPP_COLORSPACE_DEFAULT,
152 	.ycbcr_enc = DCMIPP_YCBCR_ENC_DEFAULT,
153 	.quantization = DCMIPP_QUANTIZATION_DEFAULT,
154 	.xfer_func = DCMIPP_XFER_FUNC_DEFAULT,
155 };
156 
157 static int dcmipp_bytecap_querycap(struct file *file, void *priv,
158 				   struct v4l2_capability *cap)
159 {
160 	strscpy(cap->driver, DCMIPP_PDEV_NAME, sizeof(cap->driver));
161 	strscpy(cap->card, KBUILD_MODNAME, sizeof(cap->card));
162 
163 	return 0;
164 }
165 
166 static int dcmipp_bytecap_g_fmt_vid_cap(struct file *file, void *priv,
167 					struct v4l2_format *f)
168 {
169 	struct dcmipp_bytecap_device *vcap = video_drvdata(file);
170 
171 	f->fmt.pix = vcap->format;
172 
173 	return 0;
174 }
175 
176 static int dcmipp_bytecap_try_fmt_vid_cap(struct file *file, void *priv,
177 					  struct v4l2_format *f)
178 {
179 	struct dcmipp_bytecap_device *vcap = video_drvdata(file);
180 	struct v4l2_pix_format *format = &f->fmt.pix;
181 	const struct dcmipp_bytecap_pix_map *vpix;
182 	u32 in_w, in_h;
183 
184 	/* Don't accept a pixelformat that is not on the table */
185 	vpix = dcmipp_bytecap_pix_map_by_pixelformat(format->pixelformat);
186 	if (!vpix)
187 		format->pixelformat = fmt_default.pixelformat;
188 
189 	/* Adjust width & height */
190 	in_w = format->width;
191 	in_h = format->height;
192 	v4l_bound_align_image(&format->width, DCMIPP_FRAME_MIN_WIDTH,
193 			      DCMIPP_FRAME_MAX_WIDTH, 0, &format->height,
194 			      DCMIPP_FRAME_MIN_HEIGHT, DCMIPP_FRAME_MAX_HEIGHT,
195 			      0, 0);
196 	if (format->width != in_w || format->height != in_h)
197 		dev_dbg(vcap->dev, "resolution updated: %dx%d -> %dx%d\n",
198 			in_w, in_h, format->width, format->height);
199 
200 	if (format->pixelformat == V4L2_PIX_FMT_JPEG) {
201 		format->bytesperline = format->width;
202 		format->sizeimage = format->bytesperline * format->height;
203 	} else {
204 		v4l2_fill_pixfmt(format, format->pixelformat,
205 				 format->width, format->height);
206 	}
207 
208 	if (format->field == V4L2_FIELD_ANY)
209 		format->field = fmt_default.field;
210 
211 	dcmipp_colorimetry_clamp(format);
212 
213 	return 0;
214 }
215 
216 static int dcmipp_bytecap_s_fmt_vid_cap(struct file *file, void *priv,
217 					struct v4l2_format *f)
218 {
219 	struct dcmipp_bytecap_device *vcap = video_drvdata(file);
220 	int ret;
221 
222 	/* Do not change the format while stream is on */
223 	if (vb2_is_busy(&vcap->queue))
224 		return -EBUSY;
225 
226 	ret = dcmipp_bytecap_try_fmt_vid_cap(file, priv, f);
227 	if (ret)
228 		return ret;
229 
230 	dev_dbg(vcap->dev, "%s: format update: old:%ux%u (0x%p4cc, %u, %u, %u, %u) new:%ux%d (0x%p4cc, %u, %u, %u, %u)\n",
231 		vcap->vdev.name,
232 		/* old */
233 		vcap->format.width, vcap->format.height,
234 		&vcap->format.pixelformat, vcap->format.colorspace,
235 		vcap->format.quantization, vcap->format.xfer_func,
236 		vcap->format.ycbcr_enc,
237 		/* new */
238 		f->fmt.pix.width, f->fmt.pix.height,
239 		&f->fmt.pix.pixelformat, f->fmt.pix.colorspace,
240 		f->fmt.pix.quantization, f->fmt.pix.xfer_func,
241 		f->fmt.pix.ycbcr_enc);
242 
243 	vcap->format = f->fmt.pix;
244 
245 	return 0;
246 }
247 
248 static int dcmipp_bytecap_enum_fmt_vid_cap(struct file *file, void *priv,
249 					   struct v4l2_fmtdesc *f)
250 {
251 	const struct dcmipp_bytecap_pix_map *vpix;
252 	unsigned int index = f->index;
253 	unsigned int i;
254 
255 	if (f->mbus_code) {
256 		/*
257 		 * If a media bus code is specified, only enumerate formats
258 		 * compatible with it.
259 		 */
260 		for (i = 0; i < ARRAY_SIZE(dcmipp_bytecap_pix_map_list); i++) {
261 			vpix = &dcmipp_bytecap_pix_map_list[i];
262 			if (vpix->code != f->mbus_code)
263 				continue;
264 
265 			if (index == 0)
266 				break;
267 
268 			index--;
269 		}
270 
271 		if (i == ARRAY_SIZE(dcmipp_bytecap_pix_map_list))
272 			return -EINVAL;
273 	} else {
274 		/* Otherwise, enumerate all formats. */
275 		if (f->index >= ARRAY_SIZE(dcmipp_bytecap_pix_map_list))
276 			return -EINVAL;
277 
278 		vpix = &dcmipp_bytecap_pix_map_list[f->index];
279 	}
280 
281 	f->pixelformat = vpix->pixelformat;
282 
283 	return 0;
284 }
285 
286 static int dcmipp_bytecap_enum_framesizes(struct file *file, void *fh,
287 					  struct v4l2_frmsizeenum *fsize)
288 {
289 	const struct dcmipp_bytecap_pix_map *vpix;
290 
291 	if (fsize->index)
292 		return -EINVAL;
293 
294 	/* Only accept code in the pix map table */
295 	vpix = dcmipp_bytecap_pix_map_by_pixelformat(fsize->pixel_format);
296 	if (!vpix)
297 		return -EINVAL;
298 
299 	fsize->type = V4L2_FRMSIZE_TYPE_CONTINUOUS;
300 	fsize->stepwise.min_width = DCMIPP_FRAME_MIN_WIDTH;
301 	fsize->stepwise.max_width = DCMIPP_FRAME_MAX_WIDTH;
302 	fsize->stepwise.min_height = DCMIPP_FRAME_MIN_HEIGHT;
303 	fsize->stepwise.max_height = DCMIPP_FRAME_MAX_HEIGHT;
304 	fsize->stepwise.step_width = 1;
305 	fsize->stepwise.step_height = 1;
306 
307 	return 0;
308 }
309 
310 static const struct v4l2_file_operations dcmipp_bytecap_fops = {
311 	.owner		= THIS_MODULE,
312 	.open		= v4l2_fh_open,
313 	.release	= vb2_fop_release,
314 	.read           = vb2_fop_read,
315 	.poll		= vb2_fop_poll,
316 	.unlocked_ioctl = video_ioctl2,
317 	.mmap           = vb2_fop_mmap,
318 };
319 
320 static const struct v4l2_ioctl_ops dcmipp_bytecap_ioctl_ops = {
321 	.vidioc_querycap = dcmipp_bytecap_querycap,
322 
323 	.vidioc_g_fmt_vid_cap = dcmipp_bytecap_g_fmt_vid_cap,
324 	.vidioc_s_fmt_vid_cap = dcmipp_bytecap_s_fmt_vid_cap,
325 	.vidioc_try_fmt_vid_cap = dcmipp_bytecap_try_fmt_vid_cap,
326 	.vidioc_enum_fmt_vid_cap = dcmipp_bytecap_enum_fmt_vid_cap,
327 	.vidioc_enum_framesizes = dcmipp_bytecap_enum_framesizes,
328 
329 	.vidioc_reqbufs = vb2_ioctl_reqbufs,
330 	.vidioc_create_bufs = vb2_ioctl_create_bufs,
331 	.vidioc_prepare_buf = vb2_ioctl_prepare_buf,
332 	.vidioc_querybuf = vb2_ioctl_querybuf,
333 	.vidioc_qbuf = vb2_ioctl_qbuf,
334 	.vidioc_dqbuf = vb2_ioctl_dqbuf,
335 	.vidioc_expbuf = vb2_ioctl_expbuf,
336 	.vidioc_streamon = vb2_ioctl_streamon,
337 	.vidioc_streamoff = vb2_ioctl_streamoff,
338 };
339 
340 static int dcmipp_pipeline_s_stream(struct dcmipp_bytecap_device *vcap,
341 				    int state)
342 {
343 	struct media_pad *pad;
344 	int ret;
345 
346 	/*
347 	 * Get source subdev - since link is IMMUTABLE, pointer is cached
348 	 * within the dcmipp_bytecap_device structure
349 	 */
350 	if (!vcap->s_subdev) {
351 		pad = media_pad_remote_pad_first(&vcap->vdev.entity.pads[0]);
352 		if (!pad || !is_media_entity_v4l2_subdev(pad->entity))
353 			return -EINVAL;
354 		vcap->s_subdev = media_entity_to_v4l2_subdev(pad->entity);
355 	}
356 
357 	ret = v4l2_subdev_call(vcap->s_subdev, video, s_stream, state);
358 	if (ret < 0) {
359 		dev_err(vcap->dev, "failed to %s streaming (%d)\n",
360 			state ? "start" : "stop", ret);
361 		return ret;
362 	}
363 
364 	return 0;
365 }
366 
367 static void dcmipp_start_capture(struct dcmipp_bytecap_device *vcap,
368 				 struct dcmipp_buf *buf)
369 {
370 	/* Set buffer address */
371 	reg_write(vcap, DCMIPP_P0PPM0AR1, buf->addr);
372 
373 	/* Set buffer size */
374 	reg_write(vcap, DCMIPP_P0DCLMTR, DCMIPP_P0DCLMTR_ENABLE |
375 		  ((buf->size / 4) & DCMIPP_P0DCLMTR_LIMIT_MASK));
376 
377 	/* Capture request */
378 	reg_set(vcap, DCMIPP_P0FCTCR, DCMIPP_P0FCTCR_CPTREQ);
379 }
380 
381 static void dcmipp_bytecap_all_buffers_done(struct dcmipp_bytecap_device *vcap,
382 					    enum vb2_buffer_state state)
383 {
384 	struct dcmipp_buf *buf, *node;
385 
386 	list_for_each_entry_safe(buf, node, &vcap->buffers, list) {
387 		list_del_init(&buf->list);
388 		vb2_buffer_done(&buf->vb.vb2_buf, state);
389 	}
390 }
391 
392 static int dcmipp_bytecap_start_streaming(struct vb2_queue *vq,
393 					  unsigned int count)
394 {
395 	struct dcmipp_bytecap_device *vcap = vb2_get_drv_priv(vq);
396 	struct media_entity *entity = &vcap->vdev.entity;
397 	struct dcmipp_buf *buf;
398 	int ret;
399 
400 	vcap->sequence = 0;
401 	memset(&vcap->count, 0, sizeof(vcap->count));
402 
403 	ret = pm_runtime_resume_and_get(vcap->dev);
404 	if (ret < 0) {
405 		dev_err(vcap->dev, "%s: Failed to start streaming, cannot get sync (%d)\n",
406 			__func__, ret);
407 		goto err_buffer_done;
408 	}
409 
410 	ret = media_pipeline_start(entity->pads, &vcap->pipe);
411 	if (ret) {
412 		dev_dbg(vcap->dev, "%s: Failed to start streaming, media pipeline start error (%d)\n",
413 			__func__, ret);
414 		goto err_pm_put;
415 	}
416 
417 	ret = dcmipp_pipeline_s_stream(vcap, 1);
418 	if (ret)
419 		goto err_media_pipeline_stop;
420 
421 	spin_lock_irq(&vcap->irqlock);
422 
423 	/* Enable pipe at the end of programming */
424 	reg_set(vcap, DCMIPP_P0FSCR, DCMIPP_P0FSCR_PIPEN);
425 
426 	/*
427 	 * vb2 framework guarantee that we have at least 'min_queued_buffers'
428 	 * buffers in the list at this moment
429 	 */
430 	vcap->next = list_first_entry(&vcap->buffers, typeof(*buf), list);
431 	dev_dbg(vcap->dev, "Start with next [%d] %p phy=%pad\n",
432 		vcap->next->vb.vb2_buf.index, vcap->next, &vcap->next->addr);
433 
434 	dcmipp_start_capture(vcap, vcap->next);
435 
436 	/* Enable interruptions */
437 	vcap->cmier |= DCMIPP_CMIER_P0ALL;
438 	reg_set(vcap, DCMIPP_CMIER, vcap->cmier);
439 
440 	vcap->state = DCMIPP_RUNNING;
441 
442 	spin_unlock_irq(&vcap->irqlock);
443 
444 	return 0;
445 
446 err_media_pipeline_stop:
447 	media_pipeline_stop(entity->pads);
448 err_pm_put:
449 	pm_runtime_put(vcap->dev);
450 err_buffer_done:
451 	spin_lock_irq(&vcap->irqlock);
452 	/*
453 	 * Return all buffers to vb2 in QUEUED state.
454 	 * This will give ownership back to userspace
455 	 */
456 	dcmipp_bytecap_all_buffers_done(vcap, VB2_BUF_STATE_QUEUED);
457 	vcap->active = NULL;
458 	spin_unlock_irq(&vcap->irqlock);
459 
460 	return ret;
461 }
462 
463 static void dcmipp_dump_status(struct dcmipp_bytecap_device *vcap)
464 {
465 	struct device *dev = vcap->dev;
466 
467 	dev_dbg(dev, "[DCMIPP_PRSR]  =%#10.8x\n", reg_read(vcap, DCMIPP_PRSR));
468 	dev_dbg(dev, "[DCMIPP_P0SR] =%#10.8x\n", reg_read(vcap, DCMIPP_P0SR));
469 	dev_dbg(dev, "[DCMIPP_P0DCCNTR]=%#10.8x\n",
470 		reg_read(vcap, DCMIPP_P0DCCNTR));
471 	dev_dbg(dev, "[DCMIPP_CMSR1] =%#10.8x\n", reg_read(vcap, DCMIPP_CMSR1));
472 	dev_dbg(dev, "[DCMIPP_CMSR2] =%#10.8x\n", reg_read(vcap, DCMIPP_CMSR2));
473 }
474 
475 /*
476  * Stop the stream engine. Any remaining buffers in the stream queue are
477  * dequeued and passed on to the vb2 framework marked as STATE_ERROR.
478  */
479 static void dcmipp_bytecap_stop_streaming(struct vb2_queue *vq)
480 {
481 	struct dcmipp_bytecap_device *vcap = vb2_get_drv_priv(vq);
482 	int ret;
483 	u32 status;
484 
485 	dcmipp_pipeline_s_stream(vcap, 0);
486 
487 	/* Stop the media pipeline */
488 	media_pipeline_stop(vcap->vdev.entity.pads);
489 
490 	/* Disable interruptions */
491 	reg_clear(vcap, DCMIPP_CMIER, vcap->cmier);
492 
493 	/* Stop capture */
494 	reg_clear(vcap, DCMIPP_P0FCTCR, DCMIPP_P0FCTCR_CPTREQ);
495 
496 	/* Wait until CPTACT become 0 */
497 	ret = readl_relaxed_poll_timeout(vcap->regs + DCMIPP_P0SR, status,
498 					 !(status & DCMIPP_P0SR_CPTACT),
499 					 20 * USEC_PER_MSEC,
500 					 1000 * USEC_PER_MSEC);
501 	if (ret)
502 		dev_warn(vcap->dev, "Timeout when stopping\n");
503 
504 	/* Disable pipe */
505 	reg_clear(vcap, DCMIPP_P0FSCR, DCMIPP_P0FSCR_PIPEN);
506 
507 	spin_lock_irq(&vcap->irqlock);
508 
509 	/* Return all queued buffers to vb2 in ERROR state */
510 	dcmipp_bytecap_all_buffers_done(vcap, VB2_BUF_STATE_ERROR);
511 	INIT_LIST_HEAD(&vcap->buffers);
512 
513 	vcap->active = NULL;
514 	vcap->state = DCMIPP_STOPPED;
515 
516 	spin_unlock_irq(&vcap->irqlock);
517 
518 	dcmipp_dump_status(vcap);
519 
520 	pm_runtime_put(vcap->dev);
521 
522 	if (vcap->count.errors)
523 		dev_warn(vcap->dev, "Some errors found while streaming: errors=%d (overrun=%d, limit=%d, nactive=%d), underrun=%d, buffers=%d\n",
524 			 vcap->count.errors, vcap->count.overrun,
525 			 vcap->count.limit, vcap->count.nactive,
526 			 vcap->count.underrun, vcap->count.buffers);
527 }
528 
529 static int dcmipp_bytecap_buf_prepare(struct vb2_buffer *vb)
530 {
531 	struct dcmipp_bytecap_device *vcap =  vb2_get_drv_priv(vb->vb2_queue);
532 	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
533 	struct dcmipp_buf *buf = container_of(vbuf, struct dcmipp_buf, vb);
534 	unsigned long size;
535 
536 	size = vcap->format.sizeimage;
537 
538 	if (vb2_plane_size(vb, 0) < size) {
539 		dev_err(vcap->dev, "%s data will not fit into plane (%lu < %lu)\n",
540 			__func__, vb2_plane_size(vb, 0), size);
541 		return -EINVAL;
542 	}
543 
544 	vb2_set_plane_payload(vb, 0, size);
545 
546 	if (!buf->prepared) {
547 		/* Get memory addresses */
548 		buf->addr = vb2_dma_contig_plane_dma_addr(&buf->vb.vb2_buf, 0);
549 		buf->size = vb2_plane_size(&buf->vb.vb2_buf, 0);
550 		buf->prepared = true;
551 
552 		vb2_set_plane_payload(&buf->vb.vb2_buf, 0, buf->size);
553 
554 		dev_dbg(vcap->dev, "Setup [%d] phy=%pad size=%zu\n",
555 			vb->index, &buf->addr, buf->size);
556 	}
557 
558 	return 0;
559 }
560 
561 static void dcmipp_bytecap_buf_queue(struct vb2_buffer *vb2_buf)
562 {
563 	struct dcmipp_bytecap_device *vcap =
564 		vb2_get_drv_priv(vb2_buf->vb2_queue);
565 	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb2_buf);
566 	struct dcmipp_buf *buf = container_of(vbuf, struct dcmipp_buf, vb);
567 
568 	dev_dbg(vcap->dev, "Queue [%d] %p phy=%pad\n", buf->vb.vb2_buf.index,
569 		buf, &buf->addr);
570 
571 	spin_lock_irq(&vcap->irqlock);
572 	list_add_tail(&buf->list, &vcap->buffers);
573 
574 	if (vcap->state == DCMIPP_WAIT_FOR_BUFFER) {
575 		vcap->next = buf;
576 		dev_dbg(vcap->dev, "Restart with next [%d] %p phy=%pad\n",
577 			buf->vb.vb2_buf.index, buf, &buf->addr);
578 
579 		dcmipp_start_capture(vcap, buf);
580 
581 		vcap->state = DCMIPP_RUNNING;
582 	}
583 
584 	spin_unlock_irq(&vcap->irqlock);
585 }
586 
587 static int dcmipp_bytecap_queue_setup(struct vb2_queue *vq,
588 				      unsigned int *nbuffers,
589 				      unsigned int *nplanes,
590 				      unsigned int sizes[],
591 				      struct device *alloc_devs[])
592 {
593 	struct dcmipp_bytecap_device *vcap = vb2_get_drv_priv(vq);
594 	unsigned int size;
595 
596 	size = vcap->format.sizeimage;
597 
598 	/* Make sure the image size is large enough */
599 	if (*nplanes)
600 		return sizes[0] < vcap->format.sizeimage ? -EINVAL : 0;
601 
602 	*nplanes = 1;
603 	sizes[0] = vcap->format.sizeimage;
604 
605 	dev_dbg(vcap->dev, "Setup queue, count=%d, size=%d\n",
606 		*nbuffers, size);
607 
608 	return 0;
609 }
610 
611 static int dcmipp_bytecap_buf_init(struct vb2_buffer *vb)
612 {
613 	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
614 	struct dcmipp_buf *buf = container_of(vbuf, struct dcmipp_buf, vb);
615 
616 	INIT_LIST_HEAD(&buf->list);
617 
618 	return 0;
619 }
620 
621 static const struct vb2_ops dcmipp_bytecap_qops = {
622 	.start_streaming	= dcmipp_bytecap_start_streaming,
623 	.stop_streaming		= dcmipp_bytecap_stop_streaming,
624 	.buf_init		= dcmipp_bytecap_buf_init,
625 	.buf_prepare		= dcmipp_bytecap_buf_prepare,
626 	.buf_queue		= dcmipp_bytecap_buf_queue,
627 	.queue_setup		= dcmipp_bytecap_queue_setup,
628 	/*
629 	 * Since q->lock is set we can use the standard
630 	 * vb2_ops_wait_prepare/finish helper functions.
631 	 */
632 	.wait_prepare		= vb2_ops_wait_prepare,
633 	.wait_finish		= vb2_ops_wait_finish,
634 };
635 
636 static void dcmipp_bytecap_release(struct video_device *vdev)
637 {
638 	struct dcmipp_bytecap_device *vcap =
639 		container_of(vdev, struct dcmipp_bytecap_device, vdev);
640 
641 	dcmipp_pads_cleanup(vcap->ved.pads);
642 	mutex_destroy(&vcap->lock);
643 
644 	kfree(vcap);
645 }
646 
647 void dcmipp_bytecap_ent_release(struct dcmipp_ent_device *ved)
648 {
649 	struct dcmipp_bytecap_device *vcap =
650 		container_of(ved, struct dcmipp_bytecap_device, ved);
651 
652 	media_entity_cleanup(ved->ent);
653 	vb2_video_unregister_device(&vcap->vdev);
654 }
655 
656 static void dcmipp_buffer_done(struct dcmipp_bytecap_device *vcap,
657 			       struct dcmipp_buf *buf,
658 			       size_t bytesused,
659 			       int err)
660 {
661 	struct vb2_v4l2_buffer *vbuf;
662 
663 	list_del_init(&buf->list);
664 
665 	vbuf = &buf->vb;
666 
667 	vbuf->sequence = vcap->sequence++;
668 	vbuf->field = V4L2_FIELD_NONE;
669 	vbuf->vb2_buf.timestamp = ktime_get_ns();
670 	vb2_set_plane_payload(&vbuf->vb2_buf, 0, bytesused);
671 	vb2_buffer_done(&vbuf->vb2_buf,
672 			err ? VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
673 	dev_dbg(vcap->dev, "Done  [%d] %p phy=%pad\n", buf->vb.vb2_buf.index,
674 		buf, &buf->addr);
675 	vcap->count.buffers++;
676 }
677 
678 /* irqlock must be held */
679 static void
680 dcmipp_bytecap_set_next_frame_or_stop(struct dcmipp_bytecap_device *vcap)
681 {
682 	if (!vcap->next && list_is_singular(&vcap->buffers)) {
683 		/*
684 		 * If there is no available buffer (none or a single one in the
685 		 * list while two are expected), stop the capture (effective
686 		 * for next frame). On-going frame capture will continue until
687 		 * FRAME END but no further capture will be done.
688 		 */
689 		reg_clear(vcap, DCMIPP_P0FCTCR, DCMIPP_P0FCTCR_CPTREQ);
690 
691 		dev_dbg(vcap->dev, "Capture restart is deferred to next buffer queueing\n");
692 		vcap->next = NULL;
693 		vcap->state = DCMIPP_WAIT_FOR_BUFFER;
694 		return;
695 	}
696 
697 	/* If we don't have buffer yet, pick the one after active */
698 	if (!vcap->next)
699 		vcap->next = list_next_entry(vcap->active, list);
700 
701 	/*
702 	 * Set buffer address
703 	 * This register is shadowed and will be taken into
704 	 * account on next VSYNC (start of next frame)
705 	 */
706 	reg_write(vcap, DCMIPP_P0PPM0AR1, vcap->next->addr);
707 	dev_dbg(vcap->dev, "Write [%d] %p phy=%pad\n",
708 		vcap->next->vb.vb2_buf.index, vcap->next, &vcap->next->addr);
709 }
710 
711 /* irqlock must be held */
712 static void dcmipp_bytecap_process_frame(struct dcmipp_bytecap_device *vcap,
713 					 size_t bytesused)
714 {
715 	int err = 0;
716 	struct dcmipp_buf *buf = vcap->active;
717 
718 	if (!buf) {
719 		vcap->count.nactive++;
720 		vcap->count.errors++;
721 		return;
722 	}
723 
724 	if (bytesused > buf->size) {
725 		dev_dbg(vcap->dev, "frame larger than expected (%zu > %zu)\n",
726 			bytesused, buf->size);
727 		/* Clip to buffer size and return buffer to V4L2 in error */
728 		bytesused = buf->size;
729 		vcap->count.limit++;
730 		vcap->count.errors++;
731 		err = -EOVERFLOW;
732 	}
733 
734 	dcmipp_buffer_done(vcap, buf, bytesused, err);
735 	vcap->active = NULL;
736 }
737 
738 static irqreturn_t dcmipp_bytecap_irq_thread(int irq, void *arg)
739 {
740 	struct dcmipp_bytecap_device *vcap =
741 			container_of(arg, struct dcmipp_bytecap_device, ved);
742 	size_t bytesused = 0;
743 	u32 cmsr2;
744 
745 	spin_lock_irq(&vcap->irqlock);
746 
747 	cmsr2 = vcap->cmsr2 & vcap->cmier;
748 
749 	/*
750 	 * If we have an overrun, a frame-end will probably not be generated,
751 	 * in that case the active buffer will be recycled as next buffer by
752 	 * the VSYNC handler
753 	 */
754 	if (cmsr2 & DCMIPP_CMSR2_P0OVRF) {
755 		vcap->count.errors++;
756 		vcap->count.overrun++;
757 	}
758 
759 	if (cmsr2 & DCMIPP_CMSR2_P0FRAMEF) {
760 		vcap->count.frame++;
761 
762 		/* Read captured buffer size */
763 		bytesused = reg_read(vcap, DCMIPP_P0DCCNTR);
764 		dcmipp_bytecap_process_frame(vcap, bytesused);
765 	}
766 
767 	if (cmsr2 & DCMIPP_CMSR2_P0VSYNCF) {
768 		vcap->count.vsync++;
769 		if (vcap->state == DCMIPP_WAIT_FOR_BUFFER) {
770 			vcap->count.underrun++;
771 			goto out;
772 		}
773 
774 		/*
775 		 * On VSYNC, the previously set next buffer is going to become
776 		 * active thanks to the shadowing mechanism of the DCMIPP. In
777 		 * most of the cases, since a FRAMEEND has already come,
778 		 * pointer next is NULL since active is reset during the
779 		 * FRAMEEND handling. However, in case of framerate adjustment,
780 		 * there are more VSYNC than FRAMEEND. Thus we recycle the
781 		 * active (but not used) buffer and put it back into next.
782 		 */
783 		swap(vcap->active, vcap->next);
784 		dcmipp_bytecap_set_next_frame_or_stop(vcap);
785 	}
786 
787 out:
788 	spin_unlock_irq(&vcap->irqlock);
789 	return IRQ_HANDLED;
790 }
791 
792 static irqreturn_t dcmipp_bytecap_irq_callback(int irq, void *arg)
793 {
794 	struct dcmipp_bytecap_device *vcap =
795 			container_of(arg, struct dcmipp_bytecap_device, ved);
796 
797 	/* Store interrupt status register */
798 	vcap->cmsr2 = reg_read(vcap, DCMIPP_CMSR2) & vcap->cmier;
799 	vcap->count.it++;
800 
801 	/* Clear interrupt */
802 	reg_write(vcap, DCMIPP_CMFCR, vcap->cmsr2);
803 
804 	return IRQ_WAKE_THREAD;
805 }
806 
807 static int dcmipp_bytecap_link_validate(struct media_link *link)
808 {
809 	struct media_entity *entity = link->sink->entity;
810 	struct video_device *vd = media_entity_to_video_device(entity);
811 	struct dcmipp_bytecap_device *vcap = container_of(vd,
812 					struct dcmipp_bytecap_device, vdev);
813 	struct v4l2_subdev *source_sd =
814 		media_entity_to_v4l2_subdev(link->source->entity);
815 	struct v4l2_subdev_format source_fmt = {
816 		.which = V4L2_SUBDEV_FORMAT_ACTIVE,
817 		.pad = link->source->index,
818 	};
819 	const struct dcmipp_bytecap_pix_map *vpix;
820 	int ret;
821 
822 	ret = v4l2_subdev_call(source_sd, pad, get_fmt, NULL, &source_fmt);
823 	if (ret < 0)
824 		return 0;
825 
826 	if (source_fmt.format.width != vcap->format.width ||
827 	    source_fmt.format.height != vcap->format.height) {
828 		dev_err(vcap->dev, "Wrong width or height %ux%u (%ux%u expected)\n",
829 			vcap->format.width, vcap->format.height,
830 			source_fmt.format.width, source_fmt.format.height);
831 		return -EINVAL;
832 	}
833 
834 	vpix = dcmipp_bytecap_pix_map_by_pixelformat(vcap->format.pixelformat);
835 	if (source_fmt.format.code != vpix->code) {
836 		dev_err(vcap->dev, "Wrong mbus_code 0x%x, (0x%x expected)\n",
837 			vpix->code, source_fmt.format.code);
838 		return -EINVAL;
839 	}
840 
841 	return 0;
842 }
843 
844 static const struct media_entity_operations dcmipp_bytecap_entity_ops = {
845 	.link_validate = dcmipp_bytecap_link_validate,
846 };
847 
848 struct dcmipp_ent_device *dcmipp_bytecap_ent_init(struct device *dev,
849 						  const char *entity_name,
850 						  struct v4l2_device *v4l2_dev,
851 						  void __iomem *regs)
852 {
853 	struct dcmipp_bytecap_device *vcap;
854 	struct video_device *vdev;
855 	struct vb2_queue *q;
856 	const unsigned long pad_flag = MEDIA_PAD_FL_SINK;
857 	int ret = 0;
858 
859 	/* Allocate the dcmipp_bytecap_device struct */
860 	vcap = kzalloc(sizeof(*vcap), GFP_KERNEL);
861 	if (!vcap)
862 		return ERR_PTR(-ENOMEM);
863 
864 	/* Allocate the pads */
865 	vcap->ved.pads = dcmipp_pads_init(1, &pad_flag);
866 	if (IS_ERR(vcap->ved.pads)) {
867 		ret = PTR_ERR(vcap->ved.pads);
868 		goto err_free_vcap;
869 	}
870 
871 	/* Initialize the media entity */
872 	vcap->vdev.entity.name = entity_name;
873 	vcap->vdev.entity.function = MEDIA_ENT_F_IO_V4L;
874 	vcap->vdev.entity.ops = &dcmipp_bytecap_entity_ops;
875 	ret = media_entity_pads_init(&vcap->vdev.entity, 1, vcap->ved.pads);
876 	if (ret)
877 		goto err_clean_pads;
878 
879 	/* Initialize the lock */
880 	mutex_init(&vcap->lock);
881 
882 	/* Initialize the vb2 queue */
883 	q = &vcap->queue;
884 	q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
885 	q->io_modes = VB2_MMAP | VB2_DMABUF;
886 	q->lock = &vcap->lock;
887 	q->drv_priv = vcap;
888 	q->buf_struct_size = sizeof(struct dcmipp_buf);
889 	q->ops = &dcmipp_bytecap_qops;
890 	q->mem_ops = &vb2_dma_contig_memops;
891 	q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
892 	q->min_queued_buffers = 1;
893 	q->dev = dev;
894 
895 	/* DCMIPP requires 16 bytes aligned buffers */
896 	ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32) & ~0x0f);
897 	if (ret) {
898 		dev_err(dev, "Failed to set DMA mask\n");
899 		goto err_mutex_destroy;
900 	}
901 
902 	ret = vb2_queue_init(q);
903 	if (ret) {
904 		dev_err(dev, "%s: vb2 queue init failed (err=%d)\n",
905 			entity_name, ret);
906 		goto err_clean_m_ent;
907 	}
908 
909 	/* Initialize buffer list and its lock */
910 	INIT_LIST_HEAD(&vcap->buffers);
911 	spin_lock_init(&vcap->irqlock);
912 
913 	/* Set default frame format */
914 	vcap->format = fmt_default;
915 
916 	/* Fill the dcmipp_ent_device struct */
917 	vcap->ved.ent = &vcap->vdev.entity;
918 	vcap->ved.handler = dcmipp_bytecap_irq_callback;
919 	vcap->ved.thread_fn = dcmipp_bytecap_irq_thread;
920 	vcap->dev = dev;
921 	vcap->regs = regs;
922 
923 	/* Initialize the video_device struct */
924 	vdev = &vcap->vdev;
925 	vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING |
926 			    V4L2_CAP_IO_MC;
927 	vdev->release = dcmipp_bytecap_release;
928 	vdev->fops = &dcmipp_bytecap_fops;
929 	vdev->ioctl_ops = &dcmipp_bytecap_ioctl_ops;
930 	vdev->lock = &vcap->lock;
931 	vdev->queue = q;
932 	vdev->v4l2_dev = v4l2_dev;
933 	strscpy(vdev->name, entity_name, sizeof(vdev->name));
934 	video_set_drvdata(vdev, &vcap->ved);
935 
936 	/* Register the video_device with the v4l2 and the media framework */
937 	ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
938 	if (ret) {
939 		dev_err(dev, "%s: video register failed (err=%d)\n",
940 			vcap->vdev.name, ret);
941 		goto err_clean_m_ent;
942 	}
943 
944 	return &vcap->ved;
945 
946 err_clean_m_ent:
947 	media_entity_cleanup(&vcap->vdev.entity);
948 err_mutex_destroy:
949 	mutex_destroy(&vcap->lock);
950 err_clean_pads:
951 	dcmipp_pads_cleanup(vcap->ved.pads);
952 err_free_vcap:
953 	kfree(vcap);
954 
955 	return ERR_PTR(ret);
956 }
957