xref: /linux/drivers/media/platform/ti/vpe/vpe.c (revision b615879dbfea6cf1236acbc3f2fb25ae84e07071)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * TI VPE mem2mem driver, based on the virtual v4l2-mem2mem example driver
4  *
5  * Copyright (c) 2013 Texas Instruments Inc.
6  * David Griego, <dagriego@biglakesoftware.com>
7  * Dale Farnsworth, <dale@farnsworth.org>
8  * Archit Taneja, <archit@ti.com>
9  *
10  * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
11  * Pawel Osciak, <pawel@osciak.com>
12  * Marek Szyprowski, <m.szyprowski@samsung.com>
13  *
14  * Based on the virtual v4l2-mem2mem example device
15  */
16 
17 #include <linux/delay.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/err.h>
20 #include <linux/fs.h>
21 #include <linux/interrupt.h>
22 #include <linux/io.h>
23 #include <linux/ioctl.h>
24 #include <linux/module.h>
25 #include <linux/of.h>
26 #include <linux/platform_device.h>
27 #include <linux/pm_runtime.h>
28 #include <linux/sched.h>
29 #include <linux/slab.h>
30 #include <linux/videodev2.h>
31 #include <linux/log2.h>
32 #include <linux/sizes.h>
33 
34 #include <media/v4l2-common.h>
35 #include <media/v4l2-ctrls.h>
36 #include <media/v4l2-device.h>
37 #include <media/v4l2-event.h>
38 #include <media/v4l2-ioctl.h>
39 #include <media/v4l2-mem2mem.h>
40 #include <media/videobuf2-v4l2.h>
41 #include <media/videobuf2-dma-contig.h>
42 
43 #include "vpdma.h"
44 #include "vpdma_priv.h"
45 #include "vpe_regs.h"
46 #include "sc.h"
47 #include "csc.h"
48 
49 #define VPE_MODULE_NAME "vpe"
50 
51 /* minimum and maximum frame sizes */
52 #define MIN_W		32
53 #define MIN_H		32
54 #define MAX_W		2048
55 #define MAX_H		2048
56 
57 /* required alignments */
58 #define S_ALIGN		0	/* multiple of 1 */
59 #define H_ALIGN		1	/* multiple of 2 */
60 
61 /* flags that indicate a format can be used for capture/output */
62 #define VPE_FMT_TYPE_CAPTURE	(1 << 0)
63 #define VPE_FMT_TYPE_OUTPUT	(1 << 1)
64 
65 /* used as plane indices */
66 #define VPE_MAX_PLANES	2
67 #define VPE_LUMA	0
68 #define VPE_CHROMA	1
69 
70 /* per m2m context info */
71 #define VPE_MAX_SRC_BUFS	3	/* need 3 src fields to de-interlace */
72 
73 #define VPE_DEF_BUFS_PER_JOB	1	/* default one buffer per batch job */
74 
75 /*
76  * each VPE context can need up to 3 config descriptors, 7 input descriptors,
77  * 3 output descriptors, and 10 control descriptors
78  */
79 #define VPE_DESC_LIST_SIZE	(10 * VPDMA_DTD_DESC_SIZE +	\
80 					13 * VPDMA_CFD_CTD_DESC_SIZE)
81 
82 #define vpe_dbg(vpedev, fmt, arg...)	\
83 		dev_dbg((vpedev)->v4l2_dev.dev, fmt, ##arg)
84 #define vpe_err(vpedev, fmt, arg...)	\
85 		dev_err((vpedev)->v4l2_dev.dev, fmt, ##arg)
86 
87 struct vpe_us_coeffs {
88 	unsigned short	anchor_fid0_c0;
89 	unsigned short	anchor_fid0_c1;
90 	unsigned short	anchor_fid0_c2;
91 	unsigned short	anchor_fid0_c3;
92 	unsigned short	interp_fid0_c0;
93 	unsigned short	interp_fid0_c1;
94 	unsigned short	interp_fid0_c2;
95 	unsigned short	interp_fid0_c3;
96 	unsigned short	anchor_fid1_c0;
97 	unsigned short	anchor_fid1_c1;
98 	unsigned short	anchor_fid1_c2;
99 	unsigned short	anchor_fid1_c3;
100 	unsigned short	interp_fid1_c0;
101 	unsigned short	interp_fid1_c1;
102 	unsigned short	interp_fid1_c2;
103 	unsigned short	interp_fid1_c3;
104 };
105 
106 /*
107  * Default upsampler coefficients
108  */
109 static const struct vpe_us_coeffs us_coeffs[] = {
110 	{
111 		/* Coefficients for progressive input */
112 		0x00C8, 0x0348, 0x0018, 0x3FD8, 0x3FB8, 0x0378, 0x00E8, 0x3FE8,
113 		0x00C8, 0x0348, 0x0018, 0x3FD8, 0x3FB8, 0x0378, 0x00E8, 0x3FE8,
114 	},
115 	{
116 		/* Coefficients for Top Field Interlaced input */
117 		0x0051, 0x03D5, 0x3FE3, 0x3FF7, 0x3FB5, 0x02E9, 0x018F, 0x3FD3,
118 		/* Coefficients for Bottom Field Interlaced input */
119 		0x016B, 0x0247, 0x00B1, 0x3F9D, 0x3FCF, 0x03DB, 0x005D, 0x3FF9,
120 	},
121 };
122 
123 /*
124  * the following registers are for configuring some of the parameters of the
125  * motion and edge detection blocks inside DEI, these generally remain the same,
126  * these could be passed later via userspace if some one needs to tweak these.
127  */
128 struct vpe_dei_regs {
129 	unsigned long mdt_spacial_freq_thr_reg;		/* VPE_DEI_REG2 */
130 	unsigned long edi_config_reg;			/* VPE_DEI_REG3 */
131 	unsigned long edi_lut_reg0;			/* VPE_DEI_REG4 */
132 	unsigned long edi_lut_reg1;			/* VPE_DEI_REG5 */
133 	unsigned long edi_lut_reg2;			/* VPE_DEI_REG6 */
134 	unsigned long edi_lut_reg3;			/* VPE_DEI_REG7 */
135 };
136 
137 /*
138  * default expert DEI register values, unlikely to be modified.
139  */
140 static const struct vpe_dei_regs dei_regs = {
141 	.mdt_spacial_freq_thr_reg = 0x020C0804u,
142 	.edi_config_reg = 0x0118100Cu,
143 	.edi_lut_reg0 = 0x08040200u,
144 	.edi_lut_reg1 = 0x1010100Cu,
145 	.edi_lut_reg2 = 0x10101010u,
146 	.edi_lut_reg3 = 0x10101010u,
147 };
148 
149 /*
150  * The port_data structure contains per-port data.
151  */
152 struct vpe_port_data {
153 	enum vpdma_channel channel;	/* VPDMA channel */
154 	u8	vb_index;		/* input frame f, f-1, f-2 index */
155 	u8	vb_part;		/* plane index for co-panar formats */
156 };
157 
158 /*
159  * Define indices into the port_data tables
160  */
161 #define VPE_PORT_LUMA1_IN	0
162 #define VPE_PORT_CHROMA1_IN	1
163 #define VPE_PORT_LUMA2_IN	2
164 #define VPE_PORT_CHROMA2_IN	3
165 #define VPE_PORT_LUMA3_IN	4
166 #define VPE_PORT_CHROMA3_IN	5
167 #define VPE_PORT_MV_IN		6
168 #define VPE_PORT_MV_OUT		7
169 #define VPE_PORT_LUMA_OUT	8
170 #define VPE_PORT_CHROMA_OUT	9
171 #define VPE_PORT_RGB_OUT	10
172 
173 static const struct vpe_port_data port_data[11] = {
174 	[VPE_PORT_LUMA1_IN] = {
175 		.channel	= VPE_CHAN_LUMA1_IN,
176 		.vb_index	= 0,
177 		.vb_part	= VPE_LUMA,
178 	},
179 	[VPE_PORT_CHROMA1_IN] = {
180 		.channel	= VPE_CHAN_CHROMA1_IN,
181 		.vb_index	= 0,
182 		.vb_part	= VPE_CHROMA,
183 	},
184 	[VPE_PORT_LUMA2_IN] = {
185 		.channel	= VPE_CHAN_LUMA2_IN,
186 		.vb_index	= 1,
187 		.vb_part	= VPE_LUMA,
188 	},
189 	[VPE_PORT_CHROMA2_IN] = {
190 		.channel	= VPE_CHAN_CHROMA2_IN,
191 		.vb_index	= 1,
192 		.vb_part	= VPE_CHROMA,
193 	},
194 	[VPE_PORT_LUMA3_IN] = {
195 		.channel	= VPE_CHAN_LUMA3_IN,
196 		.vb_index	= 2,
197 		.vb_part	= VPE_LUMA,
198 	},
199 	[VPE_PORT_CHROMA3_IN] = {
200 		.channel	= VPE_CHAN_CHROMA3_IN,
201 		.vb_index	= 2,
202 		.vb_part	= VPE_CHROMA,
203 	},
204 	[VPE_PORT_MV_IN] = {
205 		.channel	= VPE_CHAN_MV_IN,
206 	},
207 	[VPE_PORT_MV_OUT] = {
208 		.channel	= VPE_CHAN_MV_OUT,
209 	},
210 	[VPE_PORT_LUMA_OUT] = {
211 		.channel	= VPE_CHAN_LUMA_OUT,
212 		.vb_part	= VPE_LUMA,
213 	},
214 	[VPE_PORT_CHROMA_OUT] = {
215 		.channel	= VPE_CHAN_CHROMA_OUT,
216 		.vb_part	= VPE_CHROMA,
217 	},
218 	[VPE_PORT_RGB_OUT] = {
219 		.channel	= VPE_CHAN_RGB_OUT,
220 		.vb_part	= VPE_LUMA,
221 	},
222 };
223 
224 
225 /* driver info for each of the supported video formats */
226 struct vpe_fmt {
227 	u32	fourcc;			/* standard format identifier */
228 	u8	types;			/* CAPTURE and/or OUTPUT */
229 	u8	coplanar;		/* set for unpacked Luma and Chroma */
230 	/* vpdma format info for each plane */
231 	struct vpdma_data_format const *vpdma_fmt[VPE_MAX_PLANES];
232 };
233 
234 static struct vpe_fmt vpe_formats[] = {
235 	{
236 		.fourcc		= V4L2_PIX_FMT_NV16,
237 		.types		= VPE_FMT_TYPE_CAPTURE | VPE_FMT_TYPE_OUTPUT,
238 		.coplanar	= 1,
239 		.vpdma_fmt	= { &vpdma_yuv_fmts[VPDMA_DATA_FMT_Y444],
240 				    &vpdma_yuv_fmts[VPDMA_DATA_FMT_C444],
241 				  },
242 	},
243 	{
244 		.fourcc		= V4L2_PIX_FMT_NV12,
245 		.types		= VPE_FMT_TYPE_CAPTURE | VPE_FMT_TYPE_OUTPUT,
246 		.coplanar	= 1,
247 		.vpdma_fmt	= { &vpdma_yuv_fmts[VPDMA_DATA_FMT_Y420],
248 				    &vpdma_yuv_fmts[VPDMA_DATA_FMT_C420],
249 				  },
250 	},
251 	{
252 		.fourcc		= V4L2_PIX_FMT_NV21,
253 		.types		= VPE_FMT_TYPE_CAPTURE | VPE_FMT_TYPE_OUTPUT,
254 		.coplanar	= 1,
255 		.vpdma_fmt	= { &vpdma_yuv_fmts[VPDMA_DATA_FMT_Y420],
256 				    &vpdma_yuv_fmts[VPDMA_DATA_FMT_CB420],
257 				  },
258 	},
259 	{
260 		.fourcc		= V4L2_PIX_FMT_YUYV,
261 		.types		= VPE_FMT_TYPE_CAPTURE | VPE_FMT_TYPE_OUTPUT,
262 		.coplanar	= 0,
263 		.vpdma_fmt	= { &vpdma_yuv_fmts[VPDMA_DATA_FMT_YCB422],
264 				  },
265 	},
266 	{
267 		.fourcc		= V4L2_PIX_FMT_UYVY,
268 		.types		= VPE_FMT_TYPE_CAPTURE | VPE_FMT_TYPE_OUTPUT,
269 		.coplanar	= 0,
270 		.vpdma_fmt	= { &vpdma_yuv_fmts[VPDMA_DATA_FMT_CBY422],
271 				  },
272 	},
273 	{
274 		.fourcc		= V4L2_PIX_FMT_RGB24,
275 		.types		= VPE_FMT_TYPE_CAPTURE,
276 		.coplanar	= 0,
277 		.vpdma_fmt	= { &vpdma_rgb_fmts[VPDMA_DATA_FMT_RGB24],
278 				  },
279 	},
280 	{
281 		.fourcc		= V4L2_PIX_FMT_RGB32,
282 		.types		= VPE_FMT_TYPE_CAPTURE,
283 		.coplanar	= 0,
284 		.vpdma_fmt	= { &vpdma_rgb_fmts[VPDMA_DATA_FMT_ARGB32],
285 				  },
286 	},
287 	{
288 		.fourcc		= V4L2_PIX_FMT_BGR24,
289 		.types		= VPE_FMT_TYPE_CAPTURE,
290 		.coplanar	= 0,
291 		.vpdma_fmt	= { &vpdma_rgb_fmts[VPDMA_DATA_FMT_BGR24],
292 				  },
293 	},
294 	{
295 		.fourcc		= V4L2_PIX_FMT_BGR32,
296 		.types		= VPE_FMT_TYPE_CAPTURE,
297 		.coplanar	= 0,
298 		.vpdma_fmt	= { &vpdma_rgb_fmts[VPDMA_DATA_FMT_ABGR32],
299 				  },
300 	},
301 	{
302 		.fourcc		= V4L2_PIX_FMT_RGB565,
303 		.types		= VPE_FMT_TYPE_CAPTURE,
304 		.coplanar	= 0,
305 		.vpdma_fmt	= { &vpdma_rgb_fmts[VPDMA_DATA_FMT_RGB565],
306 				  },
307 	},
308 	{
309 		.fourcc		= V4L2_PIX_FMT_RGB555,
310 		.types		= VPE_FMT_TYPE_CAPTURE,
311 		.coplanar	= 0,
312 		.vpdma_fmt	= { &vpdma_rgb_fmts[VPDMA_DATA_FMT_RGBA16_5551],
313 				  },
314 	},
315 };
316 
317 /*
318  * per-queue, driver-specific private data.
319  * there is one source queue and one destination queue for each m2m context.
320  */
321 struct vpe_q_data {
322 	/* current v4l2 format info */
323 	struct v4l2_format	format;
324 	unsigned int		flags;
325 	struct v4l2_rect	c_rect;				/* crop/compose rectangle */
326 	struct vpe_fmt		*fmt;				/* format info */
327 };
328 
329 /* vpe_q_data flag bits */
330 #define	Q_DATA_FRAME_1D			BIT(0)
331 #define	Q_DATA_MODE_TILED		BIT(1)
332 #define	Q_DATA_INTERLACED_ALTERNATE	BIT(2)
333 #define	Q_DATA_INTERLACED_SEQ_TB	BIT(3)
334 #define	Q_DATA_INTERLACED_SEQ_BT	BIT(4)
335 
336 #define Q_IS_SEQ_XX		(Q_DATA_INTERLACED_SEQ_TB | \
337 				Q_DATA_INTERLACED_SEQ_BT)
338 
339 #define Q_IS_INTERLACED		(Q_DATA_INTERLACED_ALTERNATE | \
340 				Q_DATA_INTERLACED_SEQ_TB | \
341 				Q_DATA_INTERLACED_SEQ_BT)
342 
343 enum {
344 	Q_DATA_SRC = 0,
345 	Q_DATA_DST = 1,
346 };
347 
348 /* find our format description corresponding to the passed v4l2_format */
349 static struct vpe_fmt *__find_format(u32 fourcc)
350 {
351 	struct vpe_fmt *fmt;
352 	unsigned int k;
353 
354 	for (k = 0; k < ARRAY_SIZE(vpe_formats); k++) {
355 		fmt = &vpe_formats[k];
356 		if (fmt->fourcc == fourcc)
357 			return fmt;
358 	}
359 
360 	return NULL;
361 }
362 
363 static struct vpe_fmt *find_format(struct v4l2_format *f)
364 {
365 	return __find_format(f->fmt.pix.pixelformat);
366 }
367 
368 /*
369  * there is one vpe_dev structure in the driver, it is shared by
370  * all instances.
371  */
372 struct vpe_dev {
373 	struct v4l2_device	v4l2_dev;
374 	struct video_device	vfd;
375 	struct v4l2_m2m_dev	*m2m_dev;
376 
377 	atomic_t		num_instances;	/* count of driver instances */
378 	dma_addr_t		loaded_mmrs;	/* shadow mmrs in device */
379 	struct mutex		dev_mutex;
380 	spinlock_t		lock;
381 
382 	int			irq;
383 	void __iomem		*base;
384 	struct resource		*res;
385 
386 	struct vpdma_data	vpdma_data;
387 	struct vpdma_data	*vpdma;		/* vpdma data handle */
388 	struct sc_data		*sc;		/* scaler data handle */
389 	struct csc_data		*csc;		/* csc data handle */
390 };
391 
392 /*
393  * There is one vpe_ctx structure for each m2m context.
394  */
395 struct vpe_ctx {
396 	struct v4l2_fh		fh;
397 	struct vpe_dev		*dev;
398 	struct v4l2_ctrl_handler hdl;
399 
400 	unsigned int		field;			/* current field */
401 	unsigned int		sequence;		/* current frame/field seq */
402 	unsigned int		aborting;		/* abort after next irq */
403 
404 	unsigned int		bufs_per_job;		/* input buffers per batch */
405 	unsigned int		bufs_completed;		/* bufs done in this batch */
406 
407 	struct vpe_q_data	q_data[2];		/* src & dst queue data */
408 	struct vb2_v4l2_buffer	*src_vbs[VPE_MAX_SRC_BUFS];
409 	struct vb2_v4l2_buffer	*dst_vb;
410 
411 	dma_addr_t		mv_buf_dma[2];		/* dma addrs of motion vector in/out bufs */
412 	void			*mv_buf[2];		/* virtual addrs of motion vector bufs */
413 	size_t			mv_buf_size;		/* current motion vector buffer size */
414 	struct vpdma_buf	mmr_adb;		/* shadow reg addr/data block */
415 	struct vpdma_buf	sc_coeff_h;		/* h coeff buffer */
416 	struct vpdma_buf	sc_coeff_v;		/* v coeff buffer */
417 	struct vpdma_desc_list	desc_list;		/* DMA descriptor list */
418 
419 	bool			deinterlacing;		/* using de-interlacer */
420 	bool			load_mmrs;		/* have new shadow reg values */
421 
422 	unsigned int		src_mv_buf_selector;
423 };
424 
425 static inline struct vpe_ctx *to_vpe_ctx(struct file *filp)
426 {
427 	return container_of(file_to_v4l2_fh(filp), struct vpe_ctx, fh);
428 }
429 
430 /*
431  * M2M devices get 2 queues.
432  * Return the queue given the type.
433  */
434 static struct vpe_q_data *get_q_data(struct vpe_ctx *ctx,
435 				     enum v4l2_buf_type type)
436 {
437 	switch (type) {
438 	case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
439 	case V4L2_BUF_TYPE_VIDEO_OUTPUT:
440 		return &ctx->q_data[Q_DATA_SRC];
441 	case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
442 	case V4L2_BUF_TYPE_VIDEO_CAPTURE:
443 		return &ctx->q_data[Q_DATA_DST];
444 	default:
445 		return NULL;
446 	}
447 	return NULL;
448 }
449 
450 static u32 read_reg(struct vpe_dev *dev, int offset)
451 {
452 	return ioread32(dev->base + offset);
453 }
454 
455 static void write_reg(struct vpe_dev *dev, int offset, u32 value)
456 {
457 	iowrite32(value, dev->base + offset);
458 }
459 
460 /* register field read/write helpers */
461 static int get_field(u32 value, u32 mask, int shift)
462 {
463 	return (value & (mask << shift)) >> shift;
464 }
465 
466 static int read_field_reg(struct vpe_dev *dev, int offset, u32 mask, int shift)
467 {
468 	return get_field(read_reg(dev, offset), mask, shift);
469 }
470 
471 static void write_field(u32 *valp, u32 field, u32 mask, int shift)
472 {
473 	u32 val = *valp;
474 
475 	val &= ~(mask << shift);
476 	val |= (field & mask) << shift;
477 	*valp = val;
478 }
479 
480 static void write_field_reg(struct vpe_dev *dev, int offset, u32 field,
481 		u32 mask, int shift)
482 {
483 	u32 val = read_reg(dev, offset);
484 
485 	write_field(&val, field, mask, shift);
486 
487 	write_reg(dev, offset, val);
488 }
489 
490 /*
491  * DMA address/data block for the shadow registers
492  */
493 struct vpe_mmr_adb {
494 	struct vpdma_adb_hdr	out_fmt_hdr;
495 	u32			out_fmt_reg[1];
496 	u32			out_fmt_pad[3];
497 	struct vpdma_adb_hdr	us1_hdr;
498 	u32			us1_regs[8];
499 	struct vpdma_adb_hdr	us2_hdr;
500 	u32			us2_regs[8];
501 	struct vpdma_adb_hdr	us3_hdr;
502 	u32			us3_regs[8];
503 	struct vpdma_adb_hdr	dei_hdr;
504 	u32			dei_regs[8];
505 	struct vpdma_adb_hdr	sc_hdr0;
506 	u32			sc_regs0[7];
507 	u32			sc_pad0[1];
508 	struct vpdma_adb_hdr	sc_hdr8;
509 	u32			sc_regs8[6];
510 	u32			sc_pad8[2];
511 	struct vpdma_adb_hdr	sc_hdr17;
512 	u32			sc_regs17[9];
513 	u32			sc_pad17[3];
514 	struct vpdma_adb_hdr	csc_hdr;
515 	u32			csc_regs[6];
516 	u32			csc_pad[2];
517 };
518 
519 #define GET_OFFSET_TOP(ctx, obj, reg)	\
520 	((obj)->res->start - ctx->dev->res->start + reg)
521 
522 #define VPE_SET_MMR_ADB_HDR(ctx, hdr, regs, offset_a)	\
523 	VPDMA_SET_MMR_ADB_HDR(ctx->mmr_adb, vpe_mmr_adb, hdr, regs, offset_a)
524 /*
525  * Set the headers for all of the address/data block structures.
526  */
527 static void init_adb_hdrs(struct vpe_ctx *ctx)
528 {
529 	VPE_SET_MMR_ADB_HDR(ctx, out_fmt_hdr, out_fmt_reg, VPE_CLK_FORMAT_SELECT);
530 	VPE_SET_MMR_ADB_HDR(ctx, us1_hdr, us1_regs, VPE_US1_R0);
531 	VPE_SET_MMR_ADB_HDR(ctx, us2_hdr, us2_regs, VPE_US2_R0);
532 	VPE_SET_MMR_ADB_HDR(ctx, us3_hdr, us3_regs, VPE_US3_R0);
533 	VPE_SET_MMR_ADB_HDR(ctx, dei_hdr, dei_regs, VPE_DEI_FRAME_SIZE);
534 	VPE_SET_MMR_ADB_HDR(ctx, sc_hdr0, sc_regs0,
535 		GET_OFFSET_TOP(ctx, ctx->dev->sc, CFG_SC0));
536 	VPE_SET_MMR_ADB_HDR(ctx, sc_hdr8, sc_regs8,
537 		GET_OFFSET_TOP(ctx, ctx->dev->sc, CFG_SC8));
538 	VPE_SET_MMR_ADB_HDR(ctx, sc_hdr17, sc_regs17,
539 		GET_OFFSET_TOP(ctx, ctx->dev->sc, CFG_SC17));
540 	VPE_SET_MMR_ADB_HDR(ctx, csc_hdr, csc_regs,
541 		GET_OFFSET_TOP(ctx, ctx->dev->csc, CSC_CSC00));
542 };
543 
544 /*
545  * Allocate or re-allocate the motion vector DMA buffers
546  * There are two buffers, one for input and one for output.
547  * However, the roles are reversed after each field is processed.
548  * In other words, after each field is processed, the previous
549  * output (dst) MV buffer becomes the new input (src) MV buffer.
550  */
551 static int realloc_mv_buffers(struct vpe_ctx *ctx, size_t size)
552 {
553 	struct device *dev = ctx->dev->v4l2_dev.dev;
554 
555 	if (ctx->mv_buf_size == size)
556 		return 0;
557 
558 	if (ctx->mv_buf[0])
559 		dma_free_coherent(dev, ctx->mv_buf_size, ctx->mv_buf[0],
560 			ctx->mv_buf_dma[0]);
561 
562 	if (ctx->mv_buf[1])
563 		dma_free_coherent(dev, ctx->mv_buf_size, ctx->mv_buf[1],
564 			ctx->mv_buf_dma[1]);
565 
566 	if (size == 0)
567 		return 0;
568 
569 	ctx->mv_buf[0] = dma_alloc_coherent(dev, size, &ctx->mv_buf_dma[0],
570 				GFP_KERNEL);
571 	if (!ctx->mv_buf[0]) {
572 		vpe_err(ctx->dev, "failed to allocate motion vector buffer\n");
573 		return -ENOMEM;
574 	}
575 
576 	ctx->mv_buf[1] = dma_alloc_coherent(dev, size, &ctx->mv_buf_dma[1],
577 				GFP_KERNEL);
578 	if (!ctx->mv_buf[1]) {
579 		vpe_err(ctx->dev, "failed to allocate motion vector buffer\n");
580 		dma_free_coherent(dev, size, ctx->mv_buf[0],
581 			ctx->mv_buf_dma[0]);
582 
583 		return -ENOMEM;
584 	}
585 
586 	ctx->mv_buf_size = size;
587 	ctx->src_mv_buf_selector = 0;
588 
589 	return 0;
590 }
591 
592 static void free_mv_buffers(struct vpe_ctx *ctx)
593 {
594 	realloc_mv_buffers(ctx, 0);
595 }
596 
597 /*
598  * While de-interlacing, we keep the two most recent input buffers
599  * around.  This function frees those two buffers when we have
600  * finished processing the current stream.
601  */
602 static void free_vbs(struct vpe_ctx *ctx)
603 {
604 	struct vpe_dev *dev = ctx->dev;
605 	unsigned long flags;
606 
607 	if (ctx->src_vbs[2] == NULL)
608 		return;
609 
610 	spin_lock_irqsave(&dev->lock, flags);
611 	if (ctx->src_vbs[2]) {
612 		v4l2_m2m_buf_done(ctx->src_vbs[2], VB2_BUF_STATE_DONE);
613 		if (ctx->src_vbs[1] && (ctx->src_vbs[1] != ctx->src_vbs[2]))
614 			v4l2_m2m_buf_done(ctx->src_vbs[1], VB2_BUF_STATE_DONE);
615 		ctx->src_vbs[2] = NULL;
616 		ctx->src_vbs[1] = NULL;
617 	}
618 	spin_unlock_irqrestore(&dev->lock, flags);
619 }
620 
621 /*
622  * Enable or disable the VPE clocks
623  */
624 static void vpe_set_clock_enable(struct vpe_dev *dev, bool on)
625 {
626 	u32 val = 0;
627 
628 	if (on)
629 		val = VPE_DATA_PATH_CLK_ENABLE | VPE_VPEDMA_CLK_ENABLE;
630 	write_reg(dev, VPE_CLK_ENABLE, val);
631 }
632 
633 static void vpe_top_reset(struct vpe_dev *dev)
634 {
635 
636 	write_field_reg(dev, VPE_CLK_RESET, 1, VPE_DATA_PATH_CLK_RESET_MASK,
637 		VPE_DATA_PATH_CLK_RESET_SHIFT);
638 
639 	usleep_range(100, 150);
640 
641 	write_field_reg(dev, VPE_CLK_RESET, 0, VPE_DATA_PATH_CLK_RESET_MASK,
642 		VPE_DATA_PATH_CLK_RESET_SHIFT);
643 }
644 
645 static void vpe_top_vpdma_reset(struct vpe_dev *dev)
646 {
647 	write_field_reg(dev, VPE_CLK_RESET, 1, VPE_VPDMA_CLK_RESET_MASK,
648 		VPE_VPDMA_CLK_RESET_SHIFT);
649 
650 	usleep_range(100, 150);
651 
652 	write_field_reg(dev, VPE_CLK_RESET, 0, VPE_VPDMA_CLK_RESET_MASK,
653 		VPE_VPDMA_CLK_RESET_SHIFT);
654 }
655 
656 /*
657  * Load the correct of upsampler coefficients into the shadow MMRs
658  */
659 static void set_us_coefficients(struct vpe_ctx *ctx)
660 {
661 	struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr;
662 	struct vpe_q_data *s_q_data = &ctx->q_data[Q_DATA_SRC];
663 	u32 *us1_reg = &mmr_adb->us1_regs[0];
664 	u32 *us2_reg = &mmr_adb->us2_regs[0];
665 	u32 *us3_reg = &mmr_adb->us3_regs[0];
666 	const unsigned short *cp, *end_cp;
667 
668 	cp = &us_coeffs[0].anchor_fid0_c0;
669 
670 	if (s_q_data->flags & Q_IS_INTERLACED)		/* interlaced */
671 		cp += sizeof(us_coeffs[0]) / sizeof(*cp);
672 
673 	end_cp = cp + sizeof(us_coeffs[0]) / sizeof(*cp);
674 
675 	while (cp < end_cp) {
676 		write_field(us1_reg, *cp++, VPE_US_C0_MASK, VPE_US_C0_SHIFT);
677 		write_field(us1_reg, *cp++, VPE_US_C1_MASK, VPE_US_C1_SHIFT);
678 		*us2_reg++ = *us1_reg;
679 		*us3_reg++ = *us1_reg++;
680 	}
681 	ctx->load_mmrs = true;
682 }
683 
684 /*
685  * Set the upsampler config mode and the VPDMA line mode in the shadow MMRs.
686  */
687 static void set_cfg_modes(struct vpe_ctx *ctx)
688 {
689 	struct vpe_fmt *fmt = ctx->q_data[Q_DATA_SRC].fmt;
690 	struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr;
691 	u32 *us1_reg0 = &mmr_adb->us1_regs[0];
692 	u32 *us2_reg0 = &mmr_adb->us2_regs[0];
693 	u32 *us3_reg0 = &mmr_adb->us3_regs[0];
694 	int cfg_mode = 1;
695 
696 	/*
697 	 * Cfg Mode 0: YUV420 source, enable upsampler, DEI is de-interlacing.
698 	 * Cfg Mode 1: YUV422 source, disable upsampler, DEI is de-interlacing.
699 	 */
700 
701 	if (fmt->fourcc == V4L2_PIX_FMT_NV12 ||
702 	    fmt->fourcc == V4L2_PIX_FMT_NV21)
703 		cfg_mode = 0;
704 
705 	write_field(us1_reg0, cfg_mode, VPE_US_MODE_MASK, VPE_US_MODE_SHIFT);
706 	write_field(us2_reg0, cfg_mode, VPE_US_MODE_MASK, VPE_US_MODE_SHIFT);
707 	write_field(us3_reg0, cfg_mode, VPE_US_MODE_MASK, VPE_US_MODE_SHIFT);
708 
709 	ctx->load_mmrs = true;
710 }
711 
712 static void set_line_modes(struct vpe_ctx *ctx)
713 {
714 	struct vpe_fmt *fmt = ctx->q_data[Q_DATA_SRC].fmt;
715 	int line_mode = 1;
716 
717 	if (fmt->fourcc == V4L2_PIX_FMT_NV12 ||
718 	    fmt->fourcc == V4L2_PIX_FMT_NV21)
719 		line_mode = 0;		/* double lines to line buffer */
720 
721 	/* regs for now */
722 	vpdma_set_line_mode(ctx->dev->vpdma, line_mode, VPE_CHAN_CHROMA1_IN);
723 	vpdma_set_line_mode(ctx->dev->vpdma, line_mode, VPE_CHAN_CHROMA2_IN);
724 	vpdma_set_line_mode(ctx->dev->vpdma, line_mode, VPE_CHAN_CHROMA3_IN);
725 
726 	/* frame start for input luma */
727 	vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE,
728 		VPE_CHAN_LUMA1_IN);
729 	vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE,
730 		VPE_CHAN_LUMA2_IN);
731 	vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE,
732 		VPE_CHAN_LUMA3_IN);
733 
734 	/* frame start for input chroma */
735 	vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE,
736 		VPE_CHAN_CHROMA1_IN);
737 	vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE,
738 		VPE_CHAN_CHROMA2_IN);
739 	vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE,
740 		VPE_CHAN_CHROMA3_IN);
741 
742 	/* frame start for MV in client */
743 	vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE,
744 		VPE_CHAN_MV_IN);
745 }
746 
747 /*
748  * Set the shadow registers that are modified when the source
749  * format changes.
750  */
751 static void set_src_registers(struct vpe_ctx *ctx)
752 {
753 	set_us_coefficients(ctx);
754 }
755 
756 /*
757  * Set the shadow registers that are modified when the destination
758  * format changes.
759  */
760 static void set_dst_registers(struct vpe_ctx *ctx)
761 {
762 	struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr;
763 	struct vpe_fmt *fmt = ctx->q_data[Q_DATA_DST].fmt;
764 	const struct v4l2_format_info *finfo;
765 	u32 val = 0;
766 
767 	finfo = v4l2_format_info(fmt->fourcc);
768 	if (v4l2_is_format_rgb(finfo)) {
769 		val |= VPE_RGB_OUT_SELECT;
770 		vpdma_set_bg_color(ctx->dev->vpdma,
771 			(struct vpdma_data_format *)fmt->vpdma_fmt[0], 0xff);
772 	} else if (fmt->fourcc == V4L2_PIX_FMT_NV16)
773 		val |= VPE_COLOR_SEPARATE_422;
774 
775 	/*
776 	 * the source of CHR_DS and CSC is always the scaler, irrespective of
777 	 * whether it's used or not
778 	 */
779 	val |= VPE_DS_SRC_DEI_SCALER | VPE_CSC_SRC_DEI_SCALER;
780 
781 	if (fmt->fourcc != V4L2_PIX_FMT_NV12 &&
782 	    fmt->fourcc != V4L2_PIX_FMT_NV21)
783 		val |= VPE_DS_BYPASS;
784 
785 	mmr_adb->out_fmt_reg[0] = val;
786 
787 	ctx->load_mmrs = true;
788 }
789 
790 /*
791  * Set the de-interlacer shadow register values
792  */
793 static void set_dei_regs(struct vpe_ctx *ctx)
794 {
795 	struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr;
796 	struct vpe_q_data *s_q_data = &ctx->q_data[Q_DATA_SRC];
797 	unsigned int src_h = s_q_data->c_rect.height;
798 	unsigned int src_w = s_q_data->c_rect.width;
799 	u32 *dei_mmr0 = &mmr_adb->dei_regs[0];
800 	bool deinterlace = true;
801 	u32 val = 0;
802 
803 	/*
804 	 * according to TRM, we should set DEI in progressive bypass mode when
805 	 * the input content is progressive, however, DEI is bypassed correctly
806 	 * for both progressive and interlace content in interlace bypass mode.
807 	 * It has been recommended not to use progressive bypass mode.
808 	 */
809 	if (!(s_q_data->flags & Q_IS_INTERLACED) || !ctx->deinterlacing) {
810 		deinterlace = false;
811 		val = VPE_DEI_INTERLACE_BYPASS;
812 	}
813 
814 	src_h = deinterlace ? src_h * 2 : src_h;
815 
816 	val |= (src_h << VPE_DEI_HEIGHT_SHIFT) |
817 		(src_w << VPE_DEI_WIDTH_SHIFT) |
818 		VPE_DEI_FIELD_FLUSH;
819 
820 	*dei_mmr0 = val;
821 
822 	ctx->load_mmrs = true;
823 }
824 
825 static void set_dei_shadow_registers(struct vpe_ctx *ctx)
826 {
827 	struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr;
828 	u32 *dei_mmr = &mmr_adb->dei_regs[0];
829 	const struct vpe_dei_regs *cur = &dei_regs;
830 
831 	dei_mmr[2]  = cur->mdt_spacial_freq_thr_reg;
832 	dei_mmr[3]  = cur->edi_config_reg;
833 	dei_mmr[4]  = cur->edi_lut_reg0;
834 	dei_mmr[5]  = cur->edi_lut_reg1;
835 	dei_mmr[6]  = cur->edi_lut_reg2;
836 	dei_mmr[7]  = cur->edi_lut_reg3;
837 
838 	ctx->load_mmrs = true;
839 }
840 
841 static void config_edi_input_mode(struct vpe_ctx *ctx, int mode)
842 {
843 	struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr;
844 	u32 *edi_config_reg = &mmr_adb->dei_regs[3];
845 
846 	if (mode & 0x2)
847 		write_field(edi_config_reg, 1, 1, 2);	/* EDI_ENABLE_3D */
848 
849 	if (mode & 0x3)
850 		write_field(edi_config_reg, 1, 1, 3);	/* EDI_CHROMA_3D  */
851 
852 	write_field(edi_config_reg, mode, VPE_EDI_INP_MODE_MASK,
853 		VPE_EDI_INP_MODE_SHIFT);
854 
855 	ctx->load_mmrs = true;
856 }
857 
858 /*
859  * Set the shadow registers whose values are modified when either the
860  * source or destination format is changed.
861  */
862 static int set_srcdst_params(struct vpe_ctx *ctx)
863 {
864 	struct vpe_q_data *s_q_data =  &ctx->q_data[Q_DATA_SRC];
865 	struct vpe_q_data *d_q_data =  &ctx->q_data[Q_DATA_DST];
866 	struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr;
867 	unsigned int src_w = s_q_data->c_rect.width;
868 	unsigned int src_h = s_q_data->c_rect.height;
869 	unsigned int dst_w = d_q_data->c_rect.width;
870 	unsigned int dst_h = d_q_data->c_rect.height;
871 	struct v4l2_pix_format_mplane *spix;
872 	size_t mv_buf_size;
873 	int ret;
874 
875 	ctx->sequence = 0;
876 	ctx->field = V4L2_FIELD_TOP;
877 	spix = &s_q_data->format.fmt.pix_mp;
878 
879 	if ((s_q_data->flags & Q_IS_INTERLACED) &&
880 			!(d_q_data->flags & Q_IS_INTERLACED)) {
881 		int bytes_per_line;
882 		const struct vpdma_data_format *mv =
883 			&vpdma_misc_fmts[VPDMA_DATA_FMT_MV];
884 
885 		/*
886 		 * we make sure that the source image has a 16 byte aligned
887 		 * stride, we need to do the same for the motion vector buffer
888 		 * by aligning it's stride to the next 16 byte boundary. this
889 		 * extra space will not be used by the de-interlacer, but will
890 		 * ensure that vpdma operates correctly
891 		 */
892 		bytes_per_line = ALIGN((spix->width * mv->depth) >> 3,
893 				       VPDMA_STRIDE_ALIGN);
894 		mv_buf_size = bytes_per_line * spix->height;
895 
896 		ctx->deinterlacing = true;
897 		src_h <<= 1;
898 	} else {
899 		ctx->deinterlacing = false;
900 		mv_buf_size = 0;
901 	}
902 
903 	free_vbs(ctx);
904 	ctx->src_vbs[2] = ctx->src_vbs[1] = ctx->src_vbs[0] = NULL;
905 
906 	ret = realloc_mv_buffers(ctx, mv_buf_size);
907 	if (ret)
908 		return ret;
909 
910 	set_cfg_modes(ctx);
911 	set_dei_regs(ctx);
912 
913 	csc_set_coeff(ctx->dev->csc, &mmr_adb->csc_regs[0],
914 		      &s_q_data->format, &d_q_data->format);
915 
916 	sc_set_hs_coeffs(ctx->dev->sc, ctx->sc_coeff_h.addr, src_w, dst_w);
917 	sc_set_vs_coeffs(ctx->dev->sc, ctx->sc_coeff_v.addr, src_h, dst_h);
918 
919 	sc_config_scaler(ctx->dev->sc, &mmr_adb->sc_regs0[0],
920 		&mmr_adb->sc_regs8[0], &mmr_adb->sc_regs17[0],
921 		src_w, src_h, dst_w, dst_h);
922 
923 	return 0;
924 }
925 
926 /*
927  * mem2mem callbacks
928  */
929 
930 /*
931  * job_ready() - check whether an instance is ready to be scheduled to run
932  */
933 static int job_ready(void *priv)
934 {
935 	struct vpe_ctx *ctx = priv;
936 
937 	/*
938 	 * This check is needed as this might be called directly from driver
939 	 * When called by m2m framework, this will always satisfy, but when
940 	 * called from vpe_irq, this might fail. (src stream with zero buffers)
941 	 */
942 	if (v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx) <= 0 ||
943 		v4l2_m2m_num_dst_bufs_ready(ctx->fh.m2m_ctx) <= 0)
944 		return 0;
945 
946 	return 1;
947 }
948 
949 static void job_abort(void *priv)
950 {
951 	struct vpe_ctx *ctx = priv;
952 
953 	/* Will cancel the transaction in the next interrupt handler */
954 	ctx->aborting = 1;
955 }
956 
957 static void vpe_dump_regs(struct vpe_dev *dev)
958 {
959 #define DUMPREG(r) vpe_dbg(dev, "%-35s %08x\n", #r, read_reg(dev, VPE_##r))
960 
961 	vpe_dbg(dev, "VPE Registers:\n");
962 
963 	DUMPREG(PID);
964 	DUMPREG(SYSCONFIG);
965 	DUMPREG(INT0_STATUS0_RAW);
966 	DUMPREG(INT0_STATUS0);
967 	DUMPREG(INT0_ENABLE0);
968 	DUMPREG(INT0_STATUS1_RAW);
969 	DUMPREG(INT0_STATUS1);
970 	DUMPREG(INT0_ENABLE1);
971 	DUMPREG(CLK_ENABLE);
972 	DUMPREG(CLK_RESET);
973 	DUMPREG(CLK_FORMAT_SELECT);
974 	DUMPREG(CLK_RANGE_MAP);
975 	DUMPREG(US1_R0);
976 	DUMPREG(US1_R1);
977 	DUMPREG(US1_R2);
978 	DUMPREG(US1_R3);
979 	DUMPREG(US1_R4);
980 	DUMPREG(US1_R5);
981 	DUMPREG(US1_R6);
982 	DUMPREG(US1_R7);
983 	DUMPREG(US2_R0);
984 	DUMPREG(US2_R1);
985 	DUMPREG(US2_R2);
986 	DUMPREG(US2_R3);
987 	DUMPREG(US2_R4);
988 	DUMPREG(US2_R5);
989 	DUMPREG(US2_R6);
990 	DUMPREG(US2_R7);
991 	DUMPREG(US3_R0);
992 	DUMPREG(US3_R1);
993 	DUMPREG(US3_R2);
994 	DUMPREG(US3_R3);
995 	DUMPREG(US3_R4);
996 	DUMPREG(US3_R5);
997 	DUMPREG(US3_R6);
998 	DUMPREG(US3_R7);
999 	DUMPREG(DEI_FRAME_SIZE);
1000 	DUMPREG(MDT_BYPASS);
1001 	DUMPREG(MDT_SF_THRESHOLD);
1002 	DUMPREG(EDI_CONFIG);
1003 	DUMPREG(DEI_EDI_LUT_R0);
1004 	DUMPREG(DEI_EDI_LUT_R1);
1005 	DUMPREG(DEI_EDI_LUT_R2);
1006 	DUMPREG(DEI_EDI_LUT_R3);
1007 	DUMPREG(DEI_FMD_WINDOW_R0);
1008 	DUMPREG(DEI_FMD_WINDOW_R1);
1009 	DUMPREG(DEI_FMD_CONTROL_R0);
1010 	DUMPREG(DEI_FMD_CONTROL_R1);
1011 	DUMPREG(DEI_FMD_STATUS_R0);
1012 	DUMPREG(DEI_FMD_STATUS_R1);
1013 	DUMPREG(DEI_FMD_STATUS_R2);
1014 #undef DUMPREG
1015 
1016 	sc_dump_regs(dev->sc);
1017 	csc_dump_regs(dev->csc);
1018 }
1019 
1020 static void add_out_dtd(struct vpe_ctx *ctx, int port)
1021 {
1022 	struct vpe_q_data *q_data = &ctx->q_data[Q_DATA_DST];
1023 	const struct vpe_port_data *p_data = &port_data[port];
1024 	struct vb2_buffer *vb = &ctx->dst_vb->vb2_buf;
1025 	struct vpe_fmt *fmt = q_data->fmt;
1026 	const struct vpdma_data_format *vpdma_fmt;
1027 	int mv_buf_selector = !ctx->src_mv_buf_selector;
1028 	struct v4l2_pix_format_mplane *pix;
1029 	dma_addr_t dma_addr;
1030 	u32 flags = 0;
1031 	u32 offset = 0;
1032 	u32 stride;
1033 
1034 	if (port == VPE_PORT_MV_OUT) {
1035 		vpdma_fmt = &vpdma_misc_fmts[VPDMA_DATA_FMT_MV];
1036 		dma_addr = ctx->mv_buf_dma[mv_buf_selector];
1037 		q_data = &ctx->q_data[Q_DATA_SRC];
1038 		pix = &q_data->format.fmt.pix_mp;
1039 		stride = ALIGN((pix->width * vpdma_fmt->depth) >> 3,
1040 			       VPDMA_STRIDE_ALIGN);
1041 	} else {
1042 		/* to incorporate interleaved formats */
1043 		int plane = fmt->coplanar ? p_data->vb_part : 0;
1044 
1045 		pix = &q_data->format.fmt.pix_mp;
1046 		vpdma_fmt = fmt->vpdma_fmt[plane];
1047 		/*
1048 		 * If we are using a single plane buffer and
1049 		 * we need to set a separate vpdma chroma channel.
1050 		 */
1051 		if (pix->num_planes == 1 && plane) {
1052 			dma_addr = vb2_dma_contig_plane_dma_addr(vb, 0);
1053 			/* Compute required offset */
1054 			offset = pix->plane_fmt[0].bytesperline * pix->height;
1055 		} else {
1056 			dma_addr = vb2_dma_contig_plane_dma_addr(vb, plane);
1057 			/* Use address as is, no offset */
1058 			offset = 0;
1059 		}
1060 		if (!dma_addr) {
1061 			vpe_err(ctx->dev,
1062 				"acquiring output buffer(%d) dma_addr failed\n",
1063 				port);
1064 			return;
1065 		}
1066 		/* Apply the offset */
1067 		dma_addr += offset;
1068 		stride = pix->plane_fmt[VPE_LUMA].bytesperline;
1069 	}
1070 
1071 	if (q_data->flags & Q_DATA_FRAME_1D)
1072 		flags |= VPDMA_DATA_FRAME_1D;
1073 	if (q_data->flags & Q_DATA_MODE_TILED)
1074 		flags |= VPDMA_DATA_MODE_TILED;
1075 
1076 	vpdma_set_max_size(ctx->dev->vpdma, VPDMA_MAX_SIZE1,
1077 			   MAX_W, MAX_H);
1078 
1079 	vpdma_add_out_dtd(&ctx->desc_list, pix->width,
1080 			  stride, &q_data->c_rect,
1081 			  vpdma_fmt, dma_addr, MAX_OUT_WIDTH_REG1,
1082 			  MAX_OUT_HEIGHT_REG1, p_data->channel, flags);
1083 }
1084 
1085 static void add_in_dtd(struct vpe_ctx *ctx, int port)
1086 {
1087 	struct vpe_q_data *q_data = &ctx->q_data[Q_DATA_SRC];
1088 	const struct vpe_port_data *p_data = &port_data[port];
1089 	struct vb2_buffer *vb = &ctx->src_vbs[p_data->vb_index]->vb2_buf;
1090 	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
1091 	struct vpe_fmt *fmt = q_data->fmt;
1092 	struct v4l2_pix_format_mplane *pix;
1093 	const struct vpdma_data_format *vpdma_fmt;
1094 	int mv_buf_selector = ctx->src_mv_buf_selector;
1095 	int field = vbuf->field == V4L2_FIELD_BOTTOM;
1096 	int frame_width, frame_height;
1097 	dma_addr_t dma_addr;
1098 	u32 flags = 0;
1099 	u32 offset = 0;
1100 	u32 stride;
1101 
1102 	pix = &q_data->format.fmt.pix_mp;
1103 	if (port == VPE_PORT_MV_IN) {
1104 		vpdma_fmt = &vpdma_misc_fmts[VPDMA_DATA_FMT_MV];
1105 		dma_addr = ctx->mv_buf_dma[mv_buf_selector];
1106 		stride = ALIGN((pix->width * vpdma_fmt->depth) >> 3,
1107 			       VPDMA_STRIDE_ALIGN);
1108 	} else {
1109 		/* to incorporate interleaved formats */
1110 		int plane = fmt->coplanar ? p_data->vb_part : 0;
1111 
1112 		vpdma_fmt = fmt->vpdma_fmt[plane];
1113 		/*
1114 		 * If we are using a single plane buffer and
1115 		 * we need to set a separate vpdma chroma channel.
1116 		 */
1117 		if (pix->num_planes == 1 && plane) {
1118 			dma_addr = vb2_dma_contig_plane_dma_addr(vb, 0);
1119 			/* Compute required offset */
1120 			offset = pix->plane_fmt[0].bytesperline * pix->height;
1121 		} else {
1122 			dma_addr = vb2_dma_contig_plane_dma_addr(vb, plane);
1123 			/* Use address as is, no offset */
1124 			offset = 0;
1125 		}
1126 		if (!dma_addr) {
1127 			vpe_err(ctx->dev,
1128 				"acquiring output buffer(%d) dma_addr failed\n",
1129 				port);
1130 			return;
1131 		}
1132 		/* Apply the offset */
1133 		dma_addr += offset;
1134 		stride = pix->plane_fmt[VPE_LUMA].bytesperline;
1135 
1136 		/*
1137 		 * field used in VPDMA desc  = 0 (top) / 1 (bottom)
1138 		 * Use top or bottom field from same vb alternately
1139 		 * For each de-interlacing operation, f,f-1,f-2 should be one
1140 		 * of TBT or BTB
1141 		 */
1142 		if (q_data->flags & Q_DATA_INTERLACED_SEQ_TB ||
1143 		    q_data->flags & Q_DATA_INTERLACED_SEQ_BT) {
1144 			/* Select initial value based on format */
1145 			if (q_data->flags & Q_DATA_INTERLACED_SEQ_BT)
1146 				field = 1;
1147 			else
1148 				field = 0;
1149 
1150 			/* Toggle for each vb_index and each operation */
1151 			field = (field + p_data->vb_index + ctx->sequence) % 2;
1152 
1153 			if (field) {
1154 				int height = pix->height / 2;
1155 				int bpp;
1156 
1157 				if (fmt->fourcc == V4L2_PIX_FMT_NV12 ||
1158 				    fmt->fourcc == V4L2_PIX_FMT_NV21)
1159 					bpp = 1;
1160 				else
1161 					bpp = vpdma_fmt->depth >> 3;
1162 
1163 				if (plane)
1164 					height /= 2;
1165 
1166 				dma_addr += pix->width * height * bpp;
1167 			}
1168 		}
1169 	}
1170 
1171 	if (q_data->flags & Q_DATA_FRAME_1D)
1172 		flags |= VPDMA_DATA_FRAME_1D;
1173 	if (q_data->flags & Q_DATA_MODE_TILED)
1174 		flags |= VPDMA_DATA_MODE_TILED;
1175 
1176 	frame_width = q_data->c_rect.width;
1177 	frame_height = q_data->c_rect.height;
1178 
1179 	if (p_data->vb_part && (fmt->fourcc == V4L2_PIX_FMT_NV12 ||
1180 				fmt->fourcc == V4L2_PIX_FMT_NV21))
1181 		frame_height /= 2;
1182 
1183 	vpdma_add_in_dtd(&ctx->desc_list, pix->width, stride,
1184 			 &q_data->c_rect, vpdma_fmt, dma_addr,
1185 			 p_data->channel, field, flags, frame_width,
1186 			 frame_height, 0, 0);
1187 }
1188 
1189 /*
1190  * Enable the expected IRQ sources
1191  */
1192 static void enable_irqs(struct vpe_ctx *ctx)
1193 {
1194 	write_reg(ctx->dev, VPE_INT0_ENABLE0_SET, VPE_INT0_LIST0_COMPLETE);
1195 	write_reg(ctx->dev, VPE_INT0_ENABLE1_SET, VPE_DEI_ERROR_INT |
1196 				VPE_DS1_UV_ERROR_INT);
1197 
1198 	vpdma_enable_list_complete_irq(ctx->dev->vpdma, 0, 0, true);
1199 }
1200 
1201 static void disable_irqs(struct vpe_ctx *ctx)
1202 {
1203 	write_reg(ctx->dev, VPE_INT0_ENABLE0_CLR, 0xffffffff);
1204 	write_reg(ctx->dev, VPE_INT0_ENABLE1_CLR, 0xffffffff);
1205 
1206 	vpdma_enable_list_complete_irq(ctx->dev->vpdma, 0, 0, false);
1207 }
1208 
1209 /* device_run() - prepares and starts the device
1210  *
1211  * This function is only called when both the source and destination
1212  * buffers are in place.
1213  */
1214 static void device_run(void *priv)
1215 {
1216 	struct vpe_ctx *ctx = priv;
1217 	struct sc_data *sc = ctx->dev->sc;
1218 	struct vpe_q_data *d_q_data = &ctx->q_data[Q_DATA_DST];
1219 	struct vpe_q_data *s_q_data = &ctx->q_data[Q_DATA_SRC];
1220 	const struct v4l2_format_info *d_finfo;
1221 
1222 	d_finfo = v4l2_format_info(d_q_data->fmt->fourcc);
1223 
1224 	if (ctx->deinterlacing && s_q_data->flags & Q_IS_SEQ_XX &&
1225 	    ctx->sequence % 2 == 0) {
1226 		/* When using SEQ_XX type buffers, each buffer has two fields
1227 		 * each buffer has two fields (top & bottom)
1228 		 * Removing one buffer is actually getting two fields
1229 		 * Alternate between two operations:-
1230 		 * Even : consume one field but DO NOT REMOVE from queue
1231 		 * Odd : consume other field and REMOVE from queue
1232 		 */
1233 		ctx->src_vbs[0] = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
1234 		WARN_ON(ctx->src_vbs[0] == NULL);
1235 	} else {
1236 		ctx->src_vbs[0] = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
1237 		WARN_ON(ctx->src_vbs[0] == NULL);
1238 	}
1239 
1240 	ctx->dst_vb = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
1241 	WARN_ON(ctx->dst_vb == NULL);
1242 
1243 	if (ctx->deinterlacing) {
1244 
1245 		if (ctx->src_vbs[2] == NULL) {
1246 			ctx->src_vbs[2] = ctx->src_vbs[0];
1247 			WARN_ON(ctx->src_vbs[2] == NULL);
1248 			ctx->src_vbs[1] = ctx->src_vbs[0];
1249 			WARN_ON(ctx->src_vbs[1] == NULL);
1250 		}
1251 
1252 		/*
1253 		 * we have output the first 2 frames through line average, we
1254 		 * now switch to EDI de-interlacer
1255 		 */
1256 		if (ctx->sequence == 2)
1257 			config_edi_input_mode(ctx, 0x3); /* EDI (Y + UV) */
1258 	}
1259 
1260 	/* config descriptors */
1261 	if (ctx->dev->loaded_mmrs != ctx->mmr_adb.dma_addr || ctx->load_mmrs) {
1262 		vpdma_map_desc_buf(ctx->dev->vpdma, &ctx->mmr_adb);
1263 		vpdma_add_cfd_adb(&ctx->desc_list, CFD_MMR_CLIENT, &ctx->mmr_adb);
1264 
1265 		set_line_modes(ctx);
1266 
1267 		ctx->dev->loaded_mmrs = ctx->mmr_adb.dma_addr;
1268 		ctx->load_mmrs = false;
1269 	}
1270 
1271 	if (sc->loaded_coeff_h != ctx->sc_coeff_h.dma_addr ||
1272 			sc->load_coeff_h) {
1273 		vpdma_map_desc_buf(ctx->dev->vpdma, &ctx->sc_coeff_h);
1274 		vpdma_add_cfd_block(&ctx->desc_list, CFD_SC_CLIENT,
1275 			&ctx->sc_coeff_h, 0);
1276 
1277 		sc->loaded_coeff_h = ctx->sc_coeff_h.dma_addr;
1278 		sc->load_coeff_h = false;
1279 	}
1280 
1281 	if (sc->loaded_coeff_v != ctx->sc_coeff_v.dma_addr ||
1282 			sc->load_coeff_v) {
1283 		vpdma_map_desc_buf(ctx->dev->vpdma, &ctx->sc_coeff_v);
1284 		vpdma_add_cfd_block(&ctx->desc_list, CFD_SC_CLIENT,
1285 			&ctx->sc_coeff_v, SC_COEF_SRAM_SIZE >> 4);
1286 
1287 		sc->loaded_coeff_v = ctx->sc_coeff_v.dma_addr;
1288 		sc->load_coeff_v = false;
1289 	}
1290 
1291 	/* output data descriptors */
1292 	if (ctx->deinterlacing)
1293 		add_out_dtd(ctx, VPE_PORT_MV_OUT);
1294 
1295 	if (v4l2_is_format_rgb(d_finfo)) {
1296 		add_out_dtd(ctx, VPE_PORT_RGB_OUT);
1297 	} else {
1298 		add_out_dtd(ctx, VPE_PORT_LUMA_OUT);
1299 		if (d_q_data->fmt->coplanar)
1300 			add_out_dtd(ctx, VPE_PORT_CHROMA_OUT);
1301 	}
1302 
1303 	/* input data descriptors */
1304 	if (ctx->deinterlacing) {
1305 		add_in_dtd(ctx, VPE_PORT_LUMA3_IN);
1306 		add_in_dtd(ctx, VPE_PORT_CHROMA3_IN);
1307 
1308 		add_in_dtd(ctx, VPE_PORT_LUMA2_IN);
1309 		add_in_dtd(ctx, VPE_PORT_CHROMA2_IN);
1310 	}
1311 
1312 	add_in_dtd(ctx, VPE_PORT_LUMA1_IN);
1313 	add_in_dtd(ctx, VPE_PORT_CHROMA1_IN);
1314 
1315 	if (ctx->deinterlacing)
1316 		add_in_dtd(ctx, VPE_PORT_MV_IN);
1317 
1318 	/* sync on channel control descriptors for input ports */
1319 	vpdma_add_sync_on_channel_ctd(&ctx->desc_list, VPE_CHAN_LUMA1_IN);
1320 	vpdma_add_sync_on_channel_ctd(&ctx->desc_list, VPE_CHAN_CHROMA1_IN);
1321 
1322 	if (ctx->deinterlacing) {
1323 		vpdma_add_sync_on_channel_ctd(&ctx->desc_list,
1324 			VPE_CHAN_LUMA2_IN);
1325 		vpdma_add_sync_on_channel_ctd(&ctx->desc_list,
1326 			VPE_CHAN_CHROMA2_IN);
1327 
1328 		vpdma_add_sync_on_channel_ctd(&ctx->desc_list,
1329 			VPE_CHAN_LUMA3_IN);
1330 		vpdma_add_sync_on_channel_ctd(&ctx->desc_list,
1331 			VPE_CHAN_CHROMA3_IN);
1332 
1333 		vpdma_add_sync_on_channel_ctd(&ctx->desc_list, VPE_CHAN_MV_IN);
1334 	}
1335 
1336 	/* sync on channel control descriptors for output ports */
1337 	if (v4l2_is_format_rgb(d_finfo)) {
1338 		vpdma_add_sync_on_channel_ctd(&ctx->desc_list,
1339 			VPE_CHAN_RGB_OUT);
1340 	} else {
1341 		vpdma_add_sync_on_channel_ctd(&ctx->desc_list,
1342 			VPE_CHAN_LUMA_OUT);
1343 		if (d_q_data->fmt->coplanar)
1344 			vpdma_add_sync_on_channel_ctd(&ctx->desc_list,
1345 				VPE_CHAN_CHROMA_OUT);
1346 	}
1347 
1348 	if (ctx->deinterlacing)
1349 		vpdma_add_sync_on_channel_ctd(&ctx->desc_list, VPE_CHAN_MV_OUT);
1350 
1351 	enable_irqs(ctx);
1352 
1353 	vpdma_map_desc_buf(ctx->dev->vpdma, &ctx->desc_list.buf);
1354 	vpdma_submit_descs(ctx->dev->vpdma, &ctx->desc_list, 0);
1355 }
1356 
1357 static void dei_error(struct vpe_ctx *ctx)
1358 {
1359 	dev_warn(ctx->dev->v4l2_dev.dev,
1360 		"received DEI error interrupt\n");
1361 }
1362 
1363 static void ds1_uv_error(struct vpe_ctx *ctx)
1364 {
1365 	dev_warn(ctx->dev->v4l2_dev.dev,
1366 		"received downsampler error interrupt\n");
1367 }
1368 
1369 static irqreturn_t vpe_irq(int irq_vpe, void *data)
1370 {
1371 	struct vpe_dev *dev = (struct vpe_dev *)data;
1372 	struct vpe_ctx *ctx;
1373 	struct vpe_q_data *d_q_data;
1374 	struct vb2_v4l2_buffer *s_vb, *d_vb;
1375 	unsigned long flags;
1376 	u32 irqst0, irqst1;
1377 	bool list_complete = false;
1378 
1379 	irqst0 = read_reg(dev, VPE_INT0_STATUS0);
1380 	if (irqst0) {
1381 		write_reg(dev, VPE_INT0_STATUS0_CLR, irqst0);
1382 		vpe_dbg(dev, "INT0_STATUS0 = 0x%08x\n", irqst0);
1383 	}
1384 
1385 	irqst1 = read_reg(dev, VPE_INT0_STATUS1);
1386 	if (irqst1) {
1387 		write_reg(dev, VPE_INT0_STATUS1_CLR, irqst1);
1388 		vpe_dbg(dev, "INT0_STATUS1 = 0x%08x\n", irqst1);
1389 	}
1390 
1391 	ctx = v4l2_m2m_get_curr_priv(dev->m2m_dev);
1392 	if (!ctx) {
1393 		vpe_err(dev, "instance released before end of transaction\n");
1394 		goto handled;
1395 	}
1396 
1397 	if (irqst1) {
1398 		if (irqst1 & VPE_DEI_ERROR_INT) {
1399 			irqst1 &= ~VPE_DEI_ERROR_INT;
1400 			dei_error(ctx);
1401 		}
1402 		if (irqst1 & VPE_DS1_UV_ERROR_INT) {
1403 			irqst1 &= ~VPE_DS1_UV_ERROR_INT;
1404 			ds1_uv_error(ctx);
1405 		}
1406 	}
1407 
1408 	if (irqst0) {
1409 		if (irqst0 & VPE_INT0_LIST0_COMPLETE)
1410 			vpdma_clear_list_stat(ctx->dev->vpdma, 0, 0);
1411 
1412 		irqst0 &= ~(VPE_INT0_LIST0_COMPLETE);
1413 		list_complete = true;
1414 	}
1415 
1416 	if (irqst0 | irqst1) {
1417 		dev_warn(dev->v4l2_dev.dev, "Unexpected interrupt: INT0_STATUS0 = 0x%08x, INT0_STATUS1 = 0x%08x\n",
1418 			irqst0, irqst1);
1419 	}
1420 
1421 	/*
1422 	 * Setup next operation only when list complete IRQ occurs
1423 	 * otherwise, skip the following code
1424 	 */
1425 	if (!list_complete)
1426 		goto handled;
1427 
1428 	disable_irqs(ctx);
1429 
1430 	vpdma_unmap_desc_buf(dev->vpdma, &ctx->desc_list.buf);
1431 	vpdma_unmap_desc_buf(dev->vpdma, &ctx->mmr_adb);
1432 	vpdma_unmap_desc_buf(dev->vpdma, &ctx->sc_coeff_h);
1433 	vpdma_unmap_desc_buf(dev->vpdma, &ctx->sc_coeff_v);
1434 
1435 	vpdma_reset_desc_list(&ctx->desc_list);
1436 
1437 	 /* the previous dst mv buffer becomes the next src mv buffer */
1438 	ctx->src_mv_buf_selector = !ctx->src_mv_buf_selector;
1439 
1440 	s_vb = ctx->src_vbs[0];
1441 	d_vb = ctx->dst_vb;
1442 
1443 	d_vb->flags = s_vb->flags;
1444 	d_vb->vb2_buf.timestamp = s_vb->vb2_buf.timestamp;
1445 
1446 	if (s_vb->flags & V4L2_BUF_FLAG_TIMECODE)
1447 		d_vb->timecode = s_vb->timecode;
1448 
1449 	d_vb->sequence = ctx->sequence;
1450 	s_vb->sequence = ctx->sequence;
1451 
1452 	d_q_data = &ctx->q_data[Q_DATA_DST];
1453 	if (d_q_data->flags & Q_IS_INTERLACED) {
1454 		d_vb->field = ctx->field;
1455 		if (ctx->field == V4L2_FIELD_BOTTOM) {
1456 			ctx->sequence++;
1457 			ctx->field = V4L2_FIELD_TOP;
1458 		} else {
1459 			WARN_ON(ctx->field != V4L2_FIELD_TOP);
1460 			ctx->field = V4L2_FIELD_BOTTOM;
1461 		}
1462 	} else {
1463 		d_vb->field = V4L2_FIELD_NONE;
1464 		ctx->sequence++;
1465 	}
1466 
1467 	if (ctx->deinterlacing) {
1468 		/*
1469 		 * Allow source buffer to be dequeued only if it won't be used
1470 		 * in the next iteration. All vbs are initialized to first
1471 		 * buffer and we are shifting buffers every iteration, for the
1472 		 * first two iterations, no buffer will be dequeued.
1473 		 * This ensures that driver will keep (n-2)th (n-1)th and (n)th
1474 		 * field when deinterlacing is enabled
1475 		 */
1476 		if (ctx->src_vbs[2] != ctx->src_vbs[1])
1477 			s_vb = ctx->src_vbs[2];
1478 		else
1479 			s_vb = NULL;
1480 	}
1481 
1482 	spin_lock_irqsave(&dev->lock, flags);
1483 
1484 	if (s_vb)
1485 		v4l2_m2m_buf_done(s_vb, VB2_BUF_STATE_DONE);
1486 
1487 	v4l2_m2m_buf_done(d_vb, VB2_BUF_STATE_DONE);
1488 
1489 	spin_unlock_irqrestore(&dev->lock, flags);
1490 
1491 	if (ctx->deinterlacing) {
1492 		ctx->src_vbs[2] = ctx->src_vbs[1];
1493 		ctx->src_vbs[1] = ctx->src_vbs[0];
1494 	}
1495 
1496 	/*
1497 	 * Since the vb2_buf_done has already been called fir therse
1498 	 * buffer we can now NULL them out so that we won't try
1499 	 * to clean out stray pointer later on.
1500 	*/
1501 	ctx->src_vbs[0] = NULL;
1502 	ctx->dst_vb = NULL;
1503 
1504 	if (ctx->aborting)
1505 		goto finished;
1506 
1507 	ctx->bufs_completed++;
1508 	if (ctx->bufs_completed < ctx->bufs_per_job && job_ready(ctx)) {
1509 		device_run(ctx);
1510 		goto handled;
1511 	}
1512 
1513 finished:
1514 	vpe_dbg(ctx->dev, "finishing transaction\n");
1515 	ctx->bufs_completed = 0;
1516 	v4l2_m2m_job_finish(dev->m2m_dev, ctx->fh.m2m_ctx);
1517 handled:
1518 	return IRQ_HANDLED;
1519 }
1520 
1521 /*
1522  * video ioctls
1523  */
1524 static int vpe_querycap(struct file *file, void *priv,
1525 			struct v4l2_capability *cap)
1526 {
1527 	strscpy(cap->driver, VPE_MODULE_NAME, sizeof(cap->driver));
1528 	strscpy(cap->card, VPE_MODULE_NAME, sizeof(cap->card));
1529 	snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s",
1530 		VPE_MODULE_NAME);
1531 	return 0;
1532 }
1533 
1534 static int __enum_fmt(struct v4l2_fmtdesc *f, u32 type)
1535 {
1536 	int i, index;
1537 	struct vpe_fmt *fmt = NULL;
1538 
1539 	index = 0;
1540 	for (i = 0; i < ARRAY_SIZE(vpe_formats); ++i) {
1541 		if (vpe_formats[i].types & type) {
1542 			if (index == f->index) {
1543 				fmt = &vpe_formats[i];
1544 				break;
1545 			}
1546 			index++;
1547 		}
1548 	}
1549 
1550 	if (!fmt)
1551 		return -EINVAL;
1552 
1553 	f->pixelformat = fmt->fourcc;
1554 	return 0;
1555 }
1556 
1557 static int vpe_enum_fmt(struct file *file, void *priv,
1558 				struct v4l2_fmtdesc *f)
1559 {
1560 	if (V4L2_TYPE_IS_OUTPUT(f->type))
1561 		return __enum_fmt(f, VPE_FMT_TYPE_OUTPUT);
1562 
1563 	return __enum_fmt(f, VPE_FMT_TYPE_CAPTURE);
1564 }
1565 
1566 static int vpe_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
1567 {
1568 	struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
1569 	struct vpe_ctx *ctx = to_vpe_ctx(file);
1570 	struct vb2_queue *vq;
1571 	struct vpe_q_data *q_data;
1572 
1573 	vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
1574 	if (!vq)
1575 		return -EINVAL;
1576 
1577 	q_data = get_q_data(ctx, f->type);
1578 	if (!q_data)
1579 		return -EINVAL;
1580 
1581 	*f = q_data->format;
1582 
1583 	if (V4L2_TYPE_IS_CAPTURE(f->type)) {
1584 		struct vpe_q_data *s_q_data;
1585 		struct v4l2_pix_format_mplane *spix;
1586 
1587 		/* get colorimetry from the source queue */
1588 		s_q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
1589 		spix = &s_q_data->format.fmt.pix_mp;
1590 
1591 		pix->colorspace = spix->colorspace;
1592 		pix->xfer_func = spix->xfer_func;
1593 		pix->ycbcr_enc = spix->ycbcr_enc;
1594 		pix->quantization = spix->quantization;
1595 	}
1596 
1597 	return 0;
1598 }
1599 
1600 static int __vpe_try_fmt(struct vpe_ctx *ctx, struct v4l2_format *f,
1601 		       struct vpe_fmt *fmt, int type)
1602 {
1603 	struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
1604 	struct v4l2_plane_pix_format *plane_fmt;
1605 	unsigned int w_align;
1606 	int i, depth, depth_bytes, height;
1607 	unsigned int stride = 0;
1608 	const struct v4l2_format_info *finfo;
1609 
1610 	if (!fmt || !(fmt->types & type)) {
1611 		vpe_dbg(ctx->dev, "Fourcc format (0x%08x) invalid.\n",
1612 			pix->pixelformat);
1613 		fmt = __find_format(V4L2_PIX_FMT_YUYV);
1614 	}
1615 
1616 	if (pix->field != V4L2_FIELD_NONE &&
1617 	    pix->field != V4L2_FIELD_ALTERNATE &&
1618 	    pix->field != V4L2_FIELD_SEQ_TB &&
1619 	    pix->field != V4L2_FIELD_SEQ_BT)
1620 		pix->field = V4L2_FIELD_NONE;
1621 
1622 	depth = fmt->vpdma_fmt[VPE_LUMA]->depth;
1623 
1624 	/*
1625 	 * the line stride should 16 byte aligned for VPDMA to work, based on
1626 	 * the bytes per pixel, figure out how much the width should be aligned
1627 	 * to make sure line stride is 16 byte aligned
1628 	 */
1629 	depth_bytes = depth >> 3;
1630 
1631 	if (depth_bytes == 3) {
1632 		/*
1633 		 * if bpp is 3(as in some RGB formats), the pixel width doesn't
1634 		 * really help in ensuring line stride is 16 byte aligned
1635 		 */
1636 		w_align = 4;
1637 	} else {
1638 		/*
1639 		 * for the remainder bpp(4, 2 and 1), the pixel width alignment
1640 		 * can ensure a line stride alignment of 16 bytes. For example,
1641 		 * if bpp is 2, then the line stride can be 16 byte aligned if
1642 		 * the width is 8 byte aligned
1643 		 */
1644 
1645 		/*
1646 		 * HACK: using order_base_2() here causes lots of asm output
1647 		 * errors with smatch, on i386:
1648 		 * ./arch/x86/include/asm/bitops.h:457:22:
1649 		 *		 warning: asm output is not an lvalue
1650 		 * Perhaps some gcc optimization is doing the wrong thing
1651 		 * there.
1652 		 * Let's get rid of them by doing the calculus on two steps
1653 		 */
1654 		w_align = roundup_pow_of_two(VPDMA_DESC_ALIGN / depth_bytes);
1655 		w_align = ilog2(w_align);
1656 	}
1657 
1658 	v4l_bound_align_image(&pix->width, MIN_W, MAX_W, w_align,
1659 			      &pix->height, MIN_H, MAX_H, H_ALIGN,
1660 			      S_ALIGN);
1661 
1662 	if (!pix->num_planes || pix->num_planes > 2)
1663 		pix->num_planes = fmt->coplanar ? 2 : 1;
1664 	else if (pix->num_planes > 1 && !fmt->coplanar)
1665 		pix->num_planes = 1;
1666 
1667 	pix->pixelformat = fmt->fourcc;
1668 	finfo = v4l2_format_info(fmt->fourcc);
1669 
1670 	/*
1671 	 * For the actual image parameters, we need to consider the field
1672 	 * height of the image for SEQ_XX buffers.
1673 	 */
1674 	if (pix->field == V4L2_FIELD_SEQ_TB || pix->field == V4L2_FIELD_SEQ_BT)
1675 		height = pix->height / 2;
1676 	else
1677 		height = pix->height;
1678 
1679 	if (!pix->colorspace) {
1680 		if (v4l2_is_format_rgb(finfo)) {
1681 			pix->colorspace = V4L2_COLORSPACE_SRGB;
1682 		} else {
1683 			if (height > 1280)	/* HD */
1684 				pix->colorspace = V4L2_COLORSPACE_REC709;
1685 			else			/* SD */
1686 				pix->colorspace = V4L2_COLORSPACE_SMPTE170M;
1687 		}
1688 	}
1689 
1690 	for (i = 0; i < pix->num_planes; i++) {
1691 		plane_fmt = &pix->plane_fmt[i];
1692 		depth = fmt->vpdma_fmt[i]->depth;
1693 
1694 		stride = (pix->width * fmt->vpdma_fmt[VPE_LUMA]->depth) >> 3;
1695 		if (stride > plane_fmt->bytesperline)
1696 			plane_fmt->bytesperline = stride;
1697 
1698 		plane_fmt->bytesperline = clamp_t(u32, plane_fmt->bytesperline,
1699 						  stride,
1700 						  VPDMA_MAX_STRIDE);
1701 
1702 		plane_fmt->bytesperline = ALIGN(plane_fmt->bytesperline,
1703 						VPDMA_STRIDE_ALIGN);
1704 
1705 		if (i == VPE_LUMA) {
1706 			plane_fmt->sizeimage = pix->height *
1707 					       plane_fmt->bytesperline;
1708 
1709 			if (pix->num_planes == 1 && fmt->coplanar)
1710 				plane_fmt->sizeimage += pix->height *
1711 					plane_fmt->bytesperline *
1712 					fmt->vpdma_fmt[VPE_CHROMA]->depth >> 3;
1713 
1714 		} else { /* i == VIP_CHROMA */
1715 			plane_fmt->sizeimage = (pix->height *
1716 					       plane_fmt->bytesperline *
1717 					       depth) >> 3;
1718 		}
1719 	}
1720 
1721 	return 0;
1722 }
1723 
1724 static int vpe_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
1725 {
1726 	struct vpe_ctx *ctx = to_vpe_ctx(file);
1727 	struct vpe_fmt *fmt = find_format(f);
1728 
1729 	if (V4L2_TYPE_IS_OUTPUT(f->type))
1730 		return __vpe_try_fmt(ctx, f, fmt, VPE_FMT_TYPE_OUTPUT);
1731 	else
1732 		return __vpe_try_fmt(ctx, f, fmt, VPE_FMT_TYPE_CAPTURE);
1733 }
1734 
1735 static int __vpe_s_fmt(struct vpe_ctx *ctx, struct v4l2_format *f)
1736 {
1737 	struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
1738 	struct v4l2_pix_format_mplane *qpix;
1739 	struct vpe_q_data *q_data;
1740 	struct vb2_queue *vq;
1741 
1742 	vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
1743 	if (!vq)
1744 		return -EINVAL;
1745 
1746 	if (vb2_is_busy(vq)) {
1747 		vpe_err(ctx->dev, "queue busy\n");
1748 		return -EBUSY;
1749 	}
1750 
1751 	q_data = get_q_data(ctx, f->type);
1752 	if (!q_data)
1753 		return -EINVAL;
1754 
1755 	qpix = &q_data->format.fmt.pix_mp;
1756 	q_data->fmt		= find_format(f);
1757 	q_data->format = *f;
1758 
1759 	q_data->c_rect.left	= 0;
1760 	q_data->c_rect.top	= 0;
1761 	q_data->c_rect.width	= pix->width;
1762 	q_data->c_rect.height	= pix->height;
1763 
1764 	if (qpix->field == V4L2_FIELD_ALTERNATE)
1765 		q_data->flags |= Q_DATA_INTERLACED_ALTERNATE;
1766 	else if (qpix->field == V4L2_FIELD_SEQ_TB)
1767 		q_data->flags |= Q_DATA_INTERLACED_SEQ_TB;
1768 	else if (qpix->field == V4L2_FIELD_SEQ_BT)
1769 		q_data->flags |= Q_DATA_INTERLACED_SEQ_BT;
1770 	else
1771 		q_data->flags &= ~Q_IS_INTERLACED;
1772 
1773 	/* the crop height is halved for the case of SEQ_XX buffers */
1774 	if (q_data->flags & Q_IS_SEQ_XX)
1775 		q_data->c_rect.height /= 2;
1776 
1777 	vpe_dbg(ctx->dev, "Setting format for type %d, wxh: %dx%d, fmt: %d bpl_y %d",
1778 		f->type, pix->width, pix->height, pix->pixelformat,
1779 		pix->plane_fmt[0].bytesperline);
1780 	if (pix->num_planes == 2)
1781 		vpe_dbg(ctx->dev, " bpl_uv %d\n",
1782 			pix->plane_fmt[1].bytesperline);
1783 
1784 	return 0;
1785 }
1786 
1787 static int vpe_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
1788 {
1789 	int ret;
1790 	struct vpe_ctx *ctx = to_vpe_ctx(file);
1791 
1792 	ret = vpe_try_fmt(file, priv, f);
1793 	if (ret)
1794 		return ret;
1795 
1796 	ret = __vpe_s_fmt(ctx, f);
1797 	if (ret)
1798 		return ret;
1799 
1800 	if (V4L2_TYPE_IS_OUTPUT(f->type))
1801 		set_src_registers(ctx);
1802 	else
1803 		set_dst_registers(ctx);
1804 
1805 	return set_srcdst_params(ctx);
1806 }
1807 
1808 static int __vpe_try_selection(struct vpe_ctx *ctx, struct v4l2_selection *s)
1809 {
1810 	struct vpe_q_data *q_data;
1811 	struct v4l2_pix_format_mplane *pix;
1812 	int height;
1813 
1814 	if ((s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) &&
1815 	    (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT))
1816 		return -EINVAL;
1817 
1818 	q_data = get_q_data(ctx, s->type);
1819 	if (!q_data)
1820 		return -EINVAL;
1821 
1822 	pix = &q_data->format.fmt.pix_mp;
1823 
1824 	switch (s->target) {
1825 	case V4L2_SEL_TGT_COMPOSE:
1826 		/*
1827 		 * COMPOSE target is only valid for capture buffer type, return
1828 		 * error for output buffer type
1829 		 */
1830 		if (s->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
1831 			return -EINVAL;
1832 		break;
1833 	case V4L2_SEL_TGT_CROP:
1834 		/*
1835 		 * CROP target is only valid for output buffer type, return
1836 		 * error for capture buffer type
1837 		 */
1838 		if (s->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
1839 			return -EINVAL;
1840 		break;
1841 	/*
1842 	 * bound and default crop/compose targets are invalid targets to
1843 	 * try/set
1844 	 */
1845 	default:
1846 		return -EINVAL;
1847 	}
1848 
1849 	/*
1850 	 * For SEQ_XX buffers, crop height should be less than the height of
1851 	 * the field height, not the buffer height
1852 	 */
1853 	if (q_data->flags & Q_IS_SEQ_XX)
1854 		height = pix->height / 2;
1855 	else
1856 		height = pix->height;
1857 
1858 	if (s->r.top < 0 || s->r.left < 0) {
1859 		vpe_err(ctx->dev, "negative values for top and left\n");
1860 		s->r.top = s->r.left = 0;
1861 	}
1862 
1863 	v4l_bound_align_image(&s->r.width, MIN_W, pix->width, 1,
1864 		&s->r.height, MIN_H, height, H_ALIGN, S_ALIGN);
1865 
1866 	/* adjust left/top if cropping rectangle is out of bounds */
1867 	if (s->r.left + s->r.width > pix->width)
1868 		s->r.left = pix->width - s->r.width;
1869 	if (s->r.top + s->r.height > pix->height)
1870 		s->r.top = pix->height - s->r.height;
1871 
1872 	return 0;
1873 }
1874 
1875 static int vpe_g_selection(struct file *file, void *fh,
1876 		struct v4l2_selection *s)
1877 {
1878 	struct vpe_ctx *ctx = to_vpe_ctx(file);
1879 	struct vpe_q_data *q_data;
1880 	struct v4l2_pix_format_mplane *pix;
1881 	bool use_c_rect = false;
1882 
1883 	if ((s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) &&
1884 	    (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT))
1885 		return -EINVAL;
1886 
1887 	q_data = get_q_data(ctx, s->type);
1888 	if (!q_data)
1889 		return -EINVAL;
1890 
1891 	pix = &q_data->format.fmt.pix_mp;
1892 
1893 	switch (s->target) {
1894 	case V4L2_SEL_TGT_COMPOSE_DEFAULT:
1895 	case V4L2_SEL_TGT_COMPOSE_BOUNDS:
1896 		if (s->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
1897 			return -EINVAL;
1898 		break;
1899 	case V4L2_SEL_TGT_CROP_BOUNDS:
1900 	case V4L2_SEL_TGT_CROP_DEFAULT:
1901 		if (s->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
1902 			return -EINVAL;
1903 		break;
1904 	case V4L2_SEL_TGT_COMPOSE:
1905 		if (s->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
1906 			return -EINVAL;
1907 		use_c_rect = true;
1908 		break;
1909 	case V4L2_SEL_TGT_CROP:
1910 		if (s->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
1911 			return -EINVAL;
1912 		use_c_rect = true;
1913 		break;
1914 	default:
1915 		return -EINVAL;
1916 	}
1917 
1918 	if (use_c_rect) {
1919 		/*
1920 		 * for CROP/COMPOSE target type, return c_rect params from the
1921 		 * respective buffer type
1922 		 */
1923 		s->r = q_data->c_rect;
1924 	} else {
1925 		/*
1926 		 * for DEFAULT/BOUNDS target type, return width and height from
1927 		 * S_FMT of the respective buffer type
1928 		 */
1929 		s->r.left = 0;
1930 		s->r.top = 0;
1931 		s->r.width = pix->width;
1932 		s->r.height = pix->height;
1933 	}
1934 
1935 	return 0;
1936 }
1937 
1938 
1939 static int vpe_s_selection(struct file *file, void *fh,
1940 		struct v4l2_selection *s)
1941 {
1942 	struct vpe_ctx *ctx = to_vpe_ctx(file);
1943 	struct vpe_q_data *q_data;
1944 	struct v4l2_selection sel = *s;
1945 	int ret;
1946 
1947 	ret = __vpe_try_selection(ctx, &sel);
1948 	if (ret)
1949 		return ret;
1950 
1951 	q_data = get_q_data(ctx, sel.type);
1952 	if (!q_data)
1953 		return -EINVAL;
1954 
1955 	if ((q_data->c_rect.left == sel.r.left) &&
1956 			(q_data->c_rect.top == sel.r.top) &&
1957 			(q_data->c_rect.width == sel.r.width) &&
1958 			(q_data->c_rect.height == sel.r.height)) {
1959 		vpe_dbg(ctx->dev,
1960 			"requested crop/compose values are already set\n");
1961 		return 0;
1962 	}
1963 
1964 	q_data->c_rect = sel.r;
1965 
1966 	return set_srcdst_params(ctx);
1967 }
1968 
1969 /*
1970  * defines number of buffers/frames a context can process with VPE before
1971  * switching to a different context. default value is 1 buffer per context
1972  */
1973 #define V4L2_CID_VPE_BUFS_PER_JOB		(V4L2_CID_USER_TI_VPE_BASE + 0)
1974 
1975 static int vpe_s_ctrl(struct v4l2_ctrl *ctrl)
1976 {
1977 	struct vpe_ctx *ctx =
1978 		container_of(ctrl->handler, struct vpe_ctx, hdl);
1979 
1980 	switch (ctrl->id) {
1981 	case V4L2_CID_VPE_BUFS_PER_JOB:
1982 		ctx->bufs_per_job = ctrl->val;
1983 		break;
1984 
1985 	default:
1986 		vpe_err(ctx->dev, "Invalid control\n");
1987 		return -EINVAL;
1988 	}
1989 
1990 	return 0;
1991 }
1992 
1993 static const struct v4l2_ctrl_ops vpe_ctrl_ops = {
1994 	.s_ctrl = vpe_s_ctrl,
1995 };
1996 
1997 static const struct v4l2_ioctl_ops vpe_ioctl_ops = {
1998 	.vidioc_querycap		= vpe_querycap,
1999 
2000 	.vidioc_enum_fmt_vid_cap	= vpe_enum_fmt,
2001 	.vidioc_g_fmt_vid_cap_mplane	= vpe_g_fmt,
2002 	.vidioc_try_fmt_vid_cap_mplane	= vpe_try_fmt,
2003 	.vidioc_s_fmt_vid_cap_mplane	= vpe_s_fmt,
2004 
2005 	.vidioc_enum_fmt_vid_out	= vpe_enum_fmt,
2006 	.vidioc_g_fmt_vid_out_mplane	= vpe_g_fmt,
2007 	.vidioc_try_fmt_vid_out_mplane	= vpe_try_fmt,
2008 	.vidioc_s_fmt_vid_out_mplane	= vpe_s_fmt,
2009 
2010 	.vidioc_g_selection		= vpe_g_selection,
2011 	.vidioc_s_selection		= vpe_s_selection,
2012 
2013 	.vidioc_reqbufs			= v4l2_m2m_ioctl_reqbufs,
2014 	.vidioc_querybuf		= v4l2_m2m_ioctl_querybuf,
2015 	.vidioc_qbuf			= v4l2_m2m_ioctl_qbuf,
2016 	.vidioc_dqbuf			= v4l2_m2m_ioctl_dqbuf,
2017 	.vidioc_expbuf			= v4l2_m2m_ioctl_expbuf,
2018 	.vidioc_streamon		= v4l2_m2m_ioctl_streamon,
2019 	.vidioc_streamoff		= v4l2_m2m_ioctl_streamoff,
2020 
2021 	.vidioc_subscribe_event		= v4l2_ctrl_subscribe_event,
2022 	.vidioc_unsubscribe_event	= v4l2_event_unsubscribe,
2023 };
2024 
2025 /*
2026  * Queue operations
2027  */
2028 static int vpe_queue_setup(struct vb2_queue *vq,
2029 			   unsigned int *nbuffers, unsigned int *nplanes,
2030 			   unsigned int sizes[], struct device *alloc_devs[])
2031 {
2032 	int i;
2033 	struct vpe_ctx *ctx = vb2_get_drv_priv(vq);
2034 	struct vpe_q_data *q_data;
2035 	struct v4l2_pix_format_mplane *pix;
2036 
2037 	q_data = get_q_data(ctx, vq->type);
2038 	if (!q_data)
2039 		return -EINVAL;
2040 
2041 	pix = &q_data->format.fmt.pix_mp;
2042 	*nplanes = pix->num_planes;
2043 
2044 	for (i = 0; i < *nplanes; i++)
2045 		sizes[i] = pix->plane_fmt[i].sizeimage;
2046 
2047 	vpe_dbg(ctx->dev, "get %d buffer(s) of size %d", *nbuffers,
2048 		sizes[VPE_LUMA]);
2049 	if (*nplanes == 2)
2050 		vpe_dbg(ctx->dev, " and %d\n", sizes[VPE_CHROMA]);
2051 
2052 	return 0;
2053 }
2054 
2055 static int vpe_buf_prepare(struct vb2_buffer *vb)
2056 {
2057 	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
2058 	struct vpe_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
2059 	struct vpe_q_data *q_data;
2060 	struct v4l2_pix_format_mplane *pix;
2061 	int i;
2062 
2063 	vpe_dbg(ctx->dev, "type: %d\n", vb->vb2_queue->type);
2064 
2065 	q_data = get_q_data(ctx, vb->vb2_queue->type);
2066 	if (!q_data)
2067 		return -EINVAL;
2068 
2069 	pix = &q_data->format.fmt.pix_mp;
2070 
2071 	if (vb->vb2_queue->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
2072 		if (!(q_data->flags & Q_IS_INTERLACED)) {
2073 			vbuf->field = V4L2_FIELD_NONE;
2074 		} else {
2075 			if (vbuf->field != V4L2_FIELD_TOP &&
2076 			    vbuf->field != V4L2_FIELD_BOTTOM &&
2077 			    vbuf->field != V4L2_FIELD_SEQ_TB &&
2078 			    vbuf->field != V4L2_FIELD_SEQ_BT)
2079 				return -EINVAL;
2080 		}
2081 	}
2082 
2083 	for (i = 0; i < pix->num_planes; i++) {
2084 		if (vb2_plane_size(vb, i) < pix->plane_fmt[i].sizeimage) {
2085 			vpe_err(ctx->dev,
2086 				"data will not fit into plane (%lu < %lu)\n",
2087 				vb2_plane_size(vb, i),
2088 				(long)pix->plane_fmt[i].sizeimage);
2089 			return -EINVAL;
2090 		}
2091 	}
2092 
2093 	for (i = 0; i < pix->num_planes; i++)
2094 		vb2_set_plane_payload(vb, i, pix->plane_fmt[i].sizeimage);
2095 
2096 	return 0;
2097 }
2098 
2099 static void vpe_buf_queue(struct vb2_buffer *vb)
2100 {
2101 	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
2102 	struct vpe_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
2103 
2104 	v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
2105 }
2106 
2107 static int check_srcdst_sizes(struct vpe_ctx *ctx)
2108 {
2109 	struct vpe_q_data *s_q_data =  &ctx->q_data[Q_DATA_SRC];
2110 	struct vpe_q_data *d_q_data =  &ctx->q_data[Q_DATA_DST];
2111 	unsigned int src_w = s_q_data->c_rect.width;
2112 	unsigned int src_h = s_q_data->c_rect.height;
2113 	unsigned int dst_w = d_q_data->c_rect.width;
2114 	unsigned int dst_h = d_q_data->c_rect.height;
2115 
2116 	if (src_w == dst_w && src_h == dst_h)
2117 		return 0;
2118 
2119 	if (src_h <= SC_MAX_PIXEL_HEIGHT &&
2120 	    src_w <= SC_MAX_PIXEL_WIDTH &&
2121 	    dst_h <= SC_MAX_PIXEL_HEIGHT &&
2122 	    dst_w <= SC_MAX_PIXEL_WIDTH)
2123 		return 0;
2124 
2125 	return -1;
2126 }
2127 
2128 static void vpe_return_all_buffers(struct vpe_ctx *ctx,  struct vb2_queue *q,
2129 				   enum vb2_buffer_state state)
2130 {
2131 	struct vb2_v4l2_buffer *vb;
2132 	unsigned long flags;
2133 
2134 	for (;;) {
2135 		if (V4L2_TYPE_IS_OUTPUT(q->type))
2136 			vb = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
2137 		else
2138 			vb = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
2139 		if (!vb)
2140 			break;
2141 		spin_lock_irqsave(&ctx->dev->lock, flags);
2142 		v4l2_m2m_buf_done(vb, state);
2143 		spin_unlock_irqrestore(&ctx->dev->lock, flags);
2144 	}
2145 
2146 	/*
2147 	 * Cleanup the in-transit vb2 buffers that have been
2148 	 * removed from their respective queue already but for
2149 	 * which procecessing has not been completed yet.
2150 	 */
2151 	if (V4L2_TYPE_IS_OUTPUT(q->type)) {
2152 		spin_lock_irqsave(&ctx->dev->lock, flags);
2153 
2154 		if (ctx->src_vbs[2])
2155 			v4l2_m2m_buf_done(ctx->src_vbs[2], state);
2156 
2157 		if (ctx->src_vbs[1] && (ctx->src_vbs[1] != ctx->src_vbs[2]))
2158 			v4l2_m2m_buf_done(ctx->src_vbs[1], state);
2159 
2160 		if (ctx->src_vbs[0] &&
2161 		    (ctx->src_vbs[0] != ctx->src_vbs[1]) &&
2162 		    (ctx->src_vbs[0] != ctx->src_vbs[2]))
2163 			v4l2_m2m_buf_done(ctx->src_vbs[0], state);
2164 
2165 		ctx->src_vbs[2] = NULL;
2166 		ctx->src_vbs[1] = NULL;
2167 		ctx->src_vbs[0] = NULL;
2168 
2169 		spin_unlock_irqrestore(&ctx->dev->lock, flags);
2170 	} else {
2171 		if (ctx->dst_vb) {
2172 			spin_lock_irqsave(&ctx->dev->lock, flags);
2173 
2174 			v4l2_m2m_buf_done(ctx->dst_vb, state);
2175 			ctx->dst_vb = NULL;
2176 			spin_unlock_irqrestore(&ctx->dev->lock, flags);
2177 		}
2178 	}
2179 }
2180 
2181 static int vpe_start_streaming(struct vb2_queue *q, unsigned int count)
2182 {
2183 	struct vpe_ctx *ctx = vb2_get_drv_priv(q);
2184 
2185 	/* Check any of the size exceed maximum scaling sizes */
2186 	if (check_srcdst_sizes(ctx)) {
2187 		vpe_err(ctx->dev,
2188 			"Conversion setup failed, check source and destination parameters\n"
2189 			);
2190 		vpe_return_all_buffers(ctx, q, VB2_BUF_STATE_QUEUED);
2191 		return -EINVAL;
2192 	}
2193 
2194 	if (ctx->deinterlacing)
2195 		config_edi_input_mode(ctx, 0x0);
2196 
2197 	if (ctx->sequence != 0)
2198 		set_srcdst_params(ctx);
2199 
2200 	return 0;
2201 }
2202 
2203 static void vpe_stop_streaming(struct vb2_queue *q)
2204 {
2205 	struct vpe_ctx *ctx = vb2_get_drv_priv(q);
2206 
2207 	vpe_dump_regs(ctx->dev);
2208 	vpdma_dump_regs(ctx->dev->vpdma);
2209 
2210 	vpe_return_all_buffers(ctx, q, VB2_BUF_STATE_ERROR);
2211 }
2212 
2213 static const struct vb2_ops vpe_qops = {
2214 	.queue_setup	 = vpe_queue_setup,
2215 	.buf_prepare	 = vpe_buf_prepare,
2216 	.buf_queue	 = vpe_buf_queue,
2217 	.start_streaming = vpe_start_streaming,
2218 	.stop_streaming  = vpe_stop_streaming,
2219 };
2220 
2221 static int queue_init(void *priv, struct vb2_queue *src_vq,
2222 		      struct vb2_queue *dst_vq)
2223 {
2224 	struct vpe_ctx *ctx = priv;
2225 	struct vpe_dev *dev = ctx->dev;
2226 	int ret;
2227 
2228 	memset(src_vq, 0, sizeof(*src_vq));
2229 	src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
2230 	src_vq->io_modes = VB2_MMAP | VB2_DMABUF;
2231 	src_vq->drv_priv = ctx;
2232 	src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
2233 	src_vq->ops = &vpe_qops;
2234 	src_vq->mem_ops = &vb2_dma_contig_memops;
2235 	src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
2236 	src_vq->lock = &dev->dev_mutex;
2237 	src_vq->dev = dev->v4l2_dev.dev;
2238 
2239 	ret = vb2_queue_init(src_vq);
2240 	if (ret)
2241 		return ret;
2242 
2243 	memset(dst_vq, 0, sizeof(*dst_vq));
2244 	dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
2245 	dst_vq->io_modes = VB2_MMAP | VB2_DMABUF;
2246 	dst_vq->drv_priv = ctx;
2247 	dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
2248 	dst_vq->ops = &vpe_qops;
2249 	dst_vq->mem_ops = &vb2_dma_contig_memops;
2250 	dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
2251 	dst_vq->lock = &dev->dev_mutex;
2252 	dst_vq->dev = dev->v4l2_dev.dev;
2253 
2254 	return vb2_queue_init(dst_vq);
2255 }
2256 
2257 static const struct v4l2_ctrl_config vpe_bufs_per_job = {
2258 	.ops = &vpe_ctrl_ops,
2259 	.id = V4L2_CID_VPE_BUFS_PER_JOB,
2260 	.name = "Buffers Per Transaction",
2261 	.type = V4L2_CTRL_TYPE_INTEGER,
2262 	.def = VPE_DEF_BUFS_PER_JOB,
2263 	.min = 1,
2264 	.max = VIDEO_MAX_FRAME,
2265 	.step = 1,
2266 };
2267 
2268 /*
2269  * File operations
2270  */
2271 static int vpe_open(struct file *file)
2272 {
2273 	struct vpe_dev *dev = video_drvdata(file);
2274 	struct vpe_q_data *s_q_data;
2275 	struct v4l2_ctrl_handler *hdl;
2276 	struct vpe_ctx *ctx;
2277 	struct v4l2_pix_format_mplane *pix;
2278 	int ret;
2279 
2280 	vpe_dbg(dev, "vpe_open\n");
2281 
2282 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
2283 	if (!ctx)
2284 		return -ENOMEM;
2285 
2286 	ctx->dev = dev;
2287 
2288 	if (mutex_lock_interruptible(&dev->dev_mutex)) {
2289 		ret = -ERESTARTSYS;
2290 		goto free_ctx;
2291 	}
2292 
2293 	ret = vpdma_create_desc_list(&ctx->desc_list, VPE_DESC_LIST_SIZE,
2294 			VPDMA_LIST_TYPE_NORMAL);
2295 	if (ret != 0)
2296 		goto unlock;
2297 
2298 	ret = vpdma_alloc_desc_buf(&ctx->mmr_adb, sizeof(struct vpe_mmr_adb));
2299 	if (ret != 0)
2300 		goto free_desc_list;
2301 
2302 	ret = vpdma_alloc_desc_buf(&ctx->sc_coeff_h, SC_COEF_SRAM_SIZE);
2303 	if (ret != 0)
2304 		goto free_mmr_adb;
2305 
2306 	ret = vpdma_alloc_desc_buf(&ctx->sc_coeff_v, SC_COEF_SRAM_SIZE);
2307 	if (ret != 0)
2308 		goto free_sc_h;
2309 
2310 	init_adb_hdrs(ctx);
2311 
2312 	v4l2_fh_init(&ctx->fh, video_devdata(file));
2313 
2314 	hdl = &ctx->hdl;
2315 	v4l2_ctrl_handler_init(hdl, 1);
2316 	v4l2_ctrl_new_custom(hdl, &vpe_bufs_per_job, NULL);
2317 	if (hdl->error) {
2318 		ret = hdl->error;
2319 		goto exit_fh;
2320 	}
2321 	ctx->fh.ctrl_handler = hdl;
2322 	v4l2_ctrl_handler_setup(hdl);
2323 
2324 	s_q_data = &ctx->q_data[Q_DATA_SRC];
2325 	pix = &s_q_data->format.fmt.pix_mp;
2326 	s_q_data->fmt = __find_format(V4L2_PIX_FMT_YUYV);
2327 	pix->pixelformat = s_q_data->fmt->fourcc;
2328 	s_q_data->format.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
2329 	pix->width = 1920;
2330 	pix->height = 1080;
2331 	pix->num_planes = 1;
2332 	pix->plane_fmt[VPE_LUMA].bytesperline = (pix->width *
2333 			s_q_data->fmt->vpdma_fmt[VPE_LUMA]->depth) >> 3;
2334 	pix->plane_fmt[VPE_LUMA].sizeimage =
2335 			pix->plane_fmt[VPE_LUMA].bytesperline *
2336 			pix->height;
2337 	pix->colorspace = V4L2_COLORSPACE_REC709;
2338 	pix->xfer_func = V4L2_XFER_FUNC_DEFAULT;
2339 	pix->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
2340 	pix->quantization = V4L2_QUANTIZATION_DEFAULT;
2341 	pix->field = V4L2_FIELD_NONE;
2342 	s_q_data->c_rect.left = 0;
2343 	s_q_data->c_rect.top = 0;
2344 	s_q_data->c_rect.width = pix->width;
2345 	s_q_data->c_rect.height = pix->height;
2346 	s_q_data->flags = 0;
2347 
2348 	ctx->q_data[Q_DATA_DST] = *s_q_data;
2349 	ctx->q_data[Q_DATA_DST].format.type =
2350 			V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
2351 
2352 	set_dei_shadow_registers(ctx);
2353 	set_src_registers(ctx);
2354 	set_dst_registers(ctx);
2355 	ret = set_srcdst_params(ctx);
2356 	if (ret)
2357 		goto exit_fh;
2358 
2359 	ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev, ctx, &queue_init);
2360 
2361 	if (IS_ERR(ctx->fh.m2m_ctx)) {
2362 		ret = PTR_ERR(ctx->fh.m2m_ctx);
2363 		goto exit_fh;
2364 	}
2365 
2366 	v4l2_fh_add(&ctx->fh, file);
2367 
2368 	/*
2369 	 * for now, just report the creation of the first instance, we can later
2370 	 * optimize the driver to enable or disable clocks when the first
2371 	 * instance is created or the last instance released
2372 	 */
2373 	if (atomic_inc_return(&dev->num_instances) == 1)
2374 		vpe_dbg(dev, "first instance created\n");
2375 
2376 	ctx->bufs_per_job = VPE_DEF_BUFS_PER_JOB;
2377 
2378 	ctx->load_mmrs = true;
2379 
2380 	vpe_dbg(dev, "created instance %p, m2m_ctx: %p\n",
2381 		ctx, ctx->fh.m2m_ctx);
2382 
2383 	mutex_unlock(&dev->dev_mutex);
2384 
2385 	return 0;
2386 exit_fh:
2387 	v4l2_ctrl_handler_free(hdl);
2388 	v4l2_fh_exit(&ctx->fh);
2389 	vpdma_free_desc_buf(&ctx->sc_coeff_v);
2390 free_sc_h:
2391 	vpdma_free_desc_buf(&ctx->sc_coeff_h);
2392 free_mmr_adb:
2393 	vpdma_free_desc_buf(&ctx->mmr_adb);
2394 free_desc_list:
2395 	vpdma_free_desc_list(&ctx->desc_list);
2396 unlock:
2397 	mutex_unlock(&dev->dev_mutex);
2398 free_ctx:
2399 	kfree(ctx);
2400 	return ret;
2401 }
2402 
2403 static int vpe_release(struct file *file)
2404 {
2405 	struct vpe_dev *dev = video_drvdata(file);
2406 	struct vpe_ctx *ctx = to_vpe_ctx(file);
2407 
2408 	vpe_dbg(dev, "releasing instance %p\n", ctx);
2409 
2410 	mutex_lock(&dev->dev_mutex);
2411 	free_mv_buffers(ctx);
2412 
2413 	vpdma_unmap_desc_buf(dev->vpdma, &ctx->desc_list.buf);
2414 	vpdma_unmap_desc_buf(dev->vpdma, &ctx->mmr_adb);
2415 	vpdma_unmap_desc_buf(dev->vpdma, &ctx->sc_coeff_h);
2416 	vpdma_unmap_desc_buf(dev->vpdma, &ctx->sc_coeff_v);
2417 
2418 	vpdma_free_desc_list(&ctx->desc_list);
2419 	vpdma_free_desc_buf(&ctx->mmr_adb);
2420 
2421 	vpdma_free_desc_buf(&ctx->sc_coeff_v);
2422 	vpdma_free_desc_buf(&ctx->sc_coeff_h);
2423 
2424 	v4l2_fh_del(&ctx->fh, file);
2425 	v4l2_fh_exit(&ctx->fh);
2426 	v4l2_ctrl_handler_free(&ctx->hdl);
2427 	v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
2428 
2429 	kfree(ctx);
2430 
2431 	/*
2432 	 * for now, just report the release of the last instance, we can later
2433 	 * optimize the driver to enable or disable clocks when the first
2434 	 * instance is created or the last instance released
2435 	 */
2436 	if (atomic_dec_return(&dev->num_instances) == 0)
2437 		vpe_dbg(dev, "last instance released\n");
2438 
2439 	mutex_unlock(&dev->dev_mutex);
2440 
2441 	return 0;
2442 }
2443 
2444 static const struct v4l2_file_operations vpe_fops = {
2445 	.owner		= THIS_MODULE,
2446 	.open		= vpe_open,
2447 	.release	= vpe_release,
2448 	.poll		= v4l2_m2m_fop_poll,
2449 	.unlocked_ioctl	= video_ioctl2,
2450 	.mmap		= v4l2_m2m_fop_mmap,
2451 };
2452 
2453 static const struct video_device vpe_videodev = {
2454 	.name		= VPE_MODULE_NAME,
2455 	.fops		= &vpe_fops,
2456 	.ioctl_ops	= &vpe_ioctl_ops,
2457 	.minor		= -1,
2458 	.release	= video_device_release_empty,
2459 	.vfl_dir	= VFL_DIR_M2M,
2460 	.device_caps	= V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING,
2461 };
2462 
2463 static const struct v4l2_m2m_ops m2m_ops = {
2464 	.device_run	= device_run,
2465 	.job_ready	= job_ready,
2466 	.job_abort	= job_abort,
2467 };
2468 
2469 static int vpe_runtime_get(struct platform_device *pdev)
2470 {
2471 	int r;
2472 
2473 	dev_dbg(&pdev->dev, "vpe_runtime_get\n");
2474 
2475 	r = pm_runtime_resume_and_get(&pdev->dev);
2476 	WARN_ON(r < 0);
2477 	return r;
2478 }
2479 
2480 static void vpe_runtime_put(struct platform_device *pdev)
2481 {
2482 
2483 	int r;
2484 
2485 	dev_dbg(&pdev->dev, "vpe_runtime_put\n");
2486 
2487 	r = pm_runtime_put_sync(&pdev->dev);
2488 	WARN_ON(r < 0 && r != -ENOSYS);
2489 }
2490 
2491 static void vpe_fw_cb(struct platform_device *pdev)
2492 {
2493 	struct vpe_dev *dev = platform_get_drvdata(pdev);
2494 	struct video_device *vfd;
2495 	int ret;
2496 
2497 	vfd = &dev->vfd;
2498 	*vfd = vpe_videodev;
2499 	vfd->lock = &dev->dev_mutex;
2500 	vfd->v4l2_dev = &dev->v4l2_dev;
2501 
2502 	ret = video_register_device(vfd, VFL_TYPE_VIDEO, 0);
2503 	if (ret) {
2504 		vpe_err(dev, "Failed to register video device\n");
2505 
2506 		vpe_set_clock_enable(dev, 0);
2507 		vpe_runtime_put(pdev);
2508 		pm_runtime_disable(&pdev->dev);
2509 		v4l2_m2m_release(dev->m2m_dev);
2510 		v4l2_device_unregister(&dev->v4l2_dev);
2511 
2512 		return;
2513 	}
2514 
2515 	video_set_drvdata(vfd, dev);
2516 	dev_info(dev->v4l2_dev.dev, "Device registered as /dev/video%d\n",
2517 		vfd->num);
2518 }
2519 
2520 static int vpe_probe(struct platform_device *pdev)
2521 {
2522 	struct vpe_dev *dev;
2523 	int ret, irq, func;
2524 
2525 	ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
2526 	if (ret) {
2527 		dev_err(&pdev->dev,
2528 			"32-bit consistent DMA enable failed\n");
2529 		return ret;
2530 	}
2531 
2532 	dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
2533 	if (!dev)
2534 		return -ENOMEM;
2535 
2536 	spin_lock_init(&dev->lock);
2537 
2538 	ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
2539 	if (ret)
2540 		return ret;
2541 
2542 	atomic_set(&dev->num_instances, 0);
2543 	mutex_init(&dev->dev_mutex);
2544 
2545 	dev->res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
2546 						"vpe_top");
2547 	if (!dev->res) {
2548 		dev_err(&pdev->dev, "missing 'vpe_top' resources data\n");
2549 		return -ENODEV;
2550 	}
2551 
2552 	/*
2553 	 * HACK: we get resource info from device tree in the form of a list of
2554 	 * VPE sub blocks, the driver currently uses only the base of vpe_top
2555 	 * for register access, the driver should be changed later to access
2556 	 * registers based on the sub block base addresses
2557 	 */
2558 	dev->base = devm_ioremap(&pdev->dev, dev->res->start, SZ_32K);
2559 	if (!dev->base) {
2560 		ret = -ENOMEM;
2561 		goto v4l2_dev_unreg;
2562 	}
2563 
2564 	irq = platform_get_irq(pdev, 0);
2565 	ret = devm_request_irq(&pdev->dev, irq, vpe_irq, 0, VPE_MODULE_NAME,
2566 			dev);
2567 	if (ret)
2568 		goto v4l2_dev_unreg;
2569 
2570 	platform_set_drvdata(pdev, dev);
2571 
2572 	dev->m2m_dev = v4l2_m2m_init(&m2m_ops);
2573 	if (IS_ERR(dev->m2m_dev)) {
2574 		vpe_err(dev, "Failed to init mem2mem device\n");
2575 		ret = PTR_ERR(dev->m2m_dev);
2576 		goto v4l2_dev_unreg;
2577 	}
2578 
2579 	pm_runtime_enable(&pdev->dev);
2580 
2581 	ret = vpe_runtime_get(pdev);
2582 	if (ret < 0)
2583 		goto rel_m2m;
2584 
2585 	/* Perform clk enable followed by reset */
2586 	vpe_set_clock_enable(dev, 1);
2587 
2588 	vpe_top_reset(dev);
2589 
2590 	func = read_field_reg(dev, VPE_PID, VPE_PID_FUNC_MASK,
2591 		VPE_PID_FUNC_SHIFT);
2592 	vpe_dbg(dev, "VPE PID function %x\n", func);
2593 
2594 	vpe_top_vpdma_reset(dev);
2595 
2596 	dev->sc = sc_create(pdev, "sc");
2597 	if (IS_ERR(dev->sc)) {
2598 		ret = PTR_ERR(dev->sc);
2599 		goto runtime_put;
2600 	}
2601 
2602 	dev->csc = csc_create(pdev, "csc");
2603 	if (IS_ERR(dev->csc)) {
2604 		ret = PTR_ERR(dev->csc);
2605 		goto runtime_put;
2606 	}
2607 
2608 	dev->vpdma = &dev->vpdma_data;
2609 	ret = vpdma_create(pdev, dev->vpdma, vpe_fw_cb);
2610 	if (ret)
2611 		goto runtime_put;
2612 
2613 	return 0;
2614 
2615 runtime_put:
2616 	vpe_runtime_put(pdev);
2617 rel_m2m:
2618 	pm_runtime_disable(&pdev->dev);
2619 	v4l2_m2m_release(dev->m2m_dev);
2620 v4l2_dev_unreg:
2621 	v4l2_device_unregister(&dev->v4l2_dev);
2622 
2623 	return ret;
2624 }
2625 
2626 static void vpe_remove(struct platform_device *pdev)
2627 {
2628 	struct vpe_dev *dev = platform_get_drvdata(pdev);
2629 
2630 	v4l2_info(&dev->v4l2_dev, "Removing " VPE_MODULE_NAME);
2631 
2632 	v4l2_m2m_release(dev->m2m_dev);
2633 	video_unregister_device(&dev->vfd);
2634 	v4l2_device_unregister(&dev->v4l2_dev);
2635 
2636 	vpe_set_clock_enable(dev, 0);
2637 	vpe_runtime_put(pdev);
2638 	pm_runtime_disable(&pdev->dev);
2639 }
2640 
2641 #if defined(CONFIG_OF)
2642 static const struct of_device_id vpe_of_match[] = {
2643 	{
2644 		.compatible = "ti,dra7-vpe",
2645 	},
2646 	{},
2647 };
2648 MODULE_DEVICE_TABLE(of, vpe_of_match);
2649 #endif
2650 
2651 static struct platform_driver vpe_pdrv = {
2652 	.probe		= vpe_probe,
2653 	.remove		= vpe_remove,
2654 	.driver		= {
2655 		.name	= VPE_MODULE_NAME,
2656 		.of_match_table = of_match_ptr(vpe_of_match),
2657 	},
2658 };
2659 
2660 module_platform_driver(vpe_pdrv);
2661 
2662 MODULE_DESCRIPTION("TI VPE driver");
2663 MODULE_AUTHOR("Dale Farnsworth, <dale@farnsworth.org>");
2664 MODULE_LICENSE("GPL");
2665