xref: /linux/drivers/media/platform/renesas/rcar_fdp1.c (revision bf4afc53b77aeaa48b5409da5c8da6bb4eff7f43)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Renesas R-Car Fine Display Processor
4  *
5  * Video format converter and frame deinterlacer device.
6  *
7  * Author: Kieran Bingham, <kieran@bingham.xyz>
8  * Copyright (c) 2016 Renesas Electronics Corporation.
9  *
10  * This code is developed and inspired from the vim2m, rcar_jpu,
11  * m2m-deinterlace, and vsp1 drivers.
12  */
13 
14 #include <linux/clk.h>
15 #include <linux/delay.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/fs.h>
18 #include <linux/interrupt.h>
19 #include <linux/module.h>
20 #include <linux/of.h>
21 #include <linux/platform_device.h>
22 #include <linux/pm_runtime.h>
23 #include <linux/sched.h>
24 #include <linux/slab.h>
25 #include <linux/timer.h>
26 #include <media/rcar-fcp.h>
27 #include <media/v4l2-ctrls.h>
28 #include <media/v4l2-device.h>
29 #include <media/v4l2-event.h>
30 #include <media/v4l2-ioctl.h>
31 #include <media/v4l2-mem2mem.h>
32 #include <media/videobuf2-dma-contig.h>
33 
34 static unsigned int debug;
35 module_param(debug, uint, 0644);
36 MODULE_PARM_DESC(debug, "activate debug info");
37 
38 /* Minimum and maximum frame width/height */
39 #define FDP1_MIN_W		80U
40 #define FDP1_MIN_H		80U
41 
42 #define FDP1_MAX_W		3840U
43 #define FDP1_MAX_H		2160U
44 
45 #define FDP1_MAX_PLANES		3U
46 #define FDP1_MAX_STRIDE		8190U
47 
48 /* Flags that indicate a format can be used for capture/output */
49 #define FDP1_CAPTURE		BIT(0)
50 #define FDP1_OUTPUT		BIT(1)
51 
52 #define DRIVER_NAME		"rcar_fdp1"
53 
54 /* Number of Job's to have available on the processing queue */
55 #define FDP1_NUMBER_JOBS 8
56 
57 #define dprintk(fdp1, fmt, arg...) \
58 	v4l2_dbg(1, debug, &fdp1->v4l2_dev, "%s: " fmt, __func__, ## arg)
59 
60 /*
61  * FDP1 registers and bits
62  */
63 
64 /* FDP1 start register - Imm */
65 #define FD1_CTL_CMD			0x0000
66 #define FD1_CTL_CMD_STRCMD		BIT(0)
67 
68 /* Sync generator register - Imm */
69 #define FD1_CTL_SGCMD			0x0004
70 #define FD1_CTL_SGCMD_SGEN		BIT(0)
71 
72 /* Register set end register - Imm */
73 #define FD1_CTL_REGEND			0x0008
74 #define FD1_CTL_REGEND_REGEND		BIT(0)
75 
76 /* Channel activation register - Vupdt */
77 #define FD1_CTL_CHACT			0x000c
78 #define FD1_CTL_CHACT_SMW		BIT(9)
79 #define FD1_CTL_CHACT_WR		BIT(8)
80 #define FD1_CTL_CHACT_SMR		BIT(3)
81 #define FD1_CTL_CHACT_RD2		BIT(2)
82 #define FD1_CTL_CHACT_RD1		BIT(1)
83 #define FD1_CTL_CHACT_RD0		BIT(0)
84 
85 /* Operation Mode Register - Vupdt */
86 #define FD1_CTL_OPMODE			0x0010
87 #define FD1_CTL_OPMODE_PRG		BIT(4)
88 #define FD1_CTL_OPMODE_VIMD_INTERRUPT	(0 << 0)
89 #define FD1_CTL_OPMODE_VIMD_BESTEFFORT	(1 << 0)
90 #define FD1_CTL_OPMODE_VIMD_NOINTERRUPT	(2 << 0)
91 
92 #define FD1_CTL_VPERIOD			0x0014
93 #define FD1_CTL_CLKCTRL			0x0018
94 #define FD1_CTL_CLKCTRL_CSTP_N		BIT(0)
95 
96 /* Software reset register */
97 #define FD1_CTL_SRESET			0x001c
98 #define FD1_CTL_SRESET_SRST		BIT(0)
99 
100 /* Control status register (V-update-status) */
101 #define FD1_CTL_STATUS			0x0024
102 #define FD1_CTL_STATUS_VINT_CNT_MASK	GENMASK(31, 16)
103 #define FD1_CTL_STATUS_VINT_CNT_SHIFT	16
104 #define FD1_CTL_STATUS_SGREGSET		BIT(10)
105 #define FD1_CTL_STATUS_SGVERR		BIT(9)
106 #define FD1_CTL_STATUS_SGFREND		BIT(8)
107 #define FD1_CTL_STATUS_BSY		BIT(0)
108 
109 #define FD1_CTL_VCYCLE_STAT		0x0028
110 
111 /* Interrupt enable register */
112 #define FD1_CTL_IRQENB			0x0038
113 /* Interrupt status register */
114 #define FD1_CTL_IRQSTA			0x003c
115 /* Interrupt control register */
116 #define FD1_CTL_IRQFSET			0x0040
117 
118 /* Common IRQ Bit settings */
119 #define FD1_CTL_IRQ_VERE		BIT(16)
120 #define FD1_CTL_IRQ_VINTE		BIT(4)
121 #define FD1_CTL_IRQ_FREE		BIT(0)
122 #define FD1_CTL_IRQ_MASK		(FD1_CTL_IRQ_VERE | \
123 					 FD1_CTL_IRQ_VINTE | \
124 					 FD1_CTL_IRQ_FREE)
125 
126 /* RPF */
127 #define FD1_RPF_SIZE			0x0060
128 #define FD1_RPF_SIZE_MASK		GENMASK(12, 0)
129 #define FD1_RPF_SIZE_H_SHIFT		16
130 #define FD1_RPF_SIZE_V_SHIFT		0
131 
132 #define FD1_RPF_FORMAT			0x0064
133 #define FD1_RPF_FORMAT_CIPM		BIT(16)
134 #define FD1_RPF_FORMAT_RSPYCS		BIT(13)
135 #define FD1_RPF_FORMAT_RSPUVS		BIT(12)
136 #define FD1_RPF_FORMAT_CF		BIT(8)
137 
138 #define FD1_RPF_PSTRIDE			0x0068
139 #define FD1_RPF_PSTRIDE_Y_SHIFT		16
140 #define FD1_RPF_PSTRIDE_C_SHIFT		0
141 
142 /* RPF0 Source Component Y Address register */
143 #define FD1_RPF0_ADDR_Y			0x006c
144 
145 /* RPF1 Current Picture Registers */
146 #define FD1_RPF1_ADDR_Y			0x0078
147 #define FD1_RPF1_ADDR_C0		0x007c
148 #define FD1_RPF1_ADDR_C1		0x0080
149 
150 /* RPF2 next picture register */
151 #define FD1_RPF2_ADDR_Y			0x0084
152 
153 #define FD1_RPF_SMSK_ADDR		0x0090
154 #define FD1_RPF_SWAP			0x0094
155 
156 /* WPF */
157 #define FD1_WPF_FORMAT			0x00c0
158 #define FD1_WPF_FORMAT_PDV_SHIFT	24
159 #define FD1_WPF_FORMAT_FCNL		BIT(20)
160 #define FD1_WPF_FORMAT_WSPYCS		BIT(15)
161 #define FD1_WPF_FORMAT_WSPUVS		BIT(14)
162 #define FD1_WPF_FORMAT_WRTM_601_16	(0 << 9)
163 #define FD1_WPF_FORMAT_WRTM_601_0	(1 << 9)
164 #define FD1_WPF_FORMAT_WRTM_709_16	(2 << 9)
165 #define FD1_WPF_FORMAT_CSC		BIT(8)
166 
167 #define FD1_WPF_RNDCTL			0x00c4
168 #define FD1_WPF_RNDCTL_CBRM		BIT(28)
169 #define FD1_WPF_RNDCTL_CLMD_NOCLIP	(0 << 12)
170 #define FD1_WPF_RNDCTL_CLMD_CLIP_16_235	(1 << 12)
171 #define FD1_WPF_RNDCTL_CLMD_CLIP_1_254	(2 << 12)
172 
173 #define FD1_WPF_PSTRIDE			0x00c8
174 #define FD1_WPF_PSTRIDE_Y_SHIFT		16
175 #define FD1_WPF_PSTRIDE_C_SHIFT		0
176 
177 /* WPF Destination picture */
178 #define FD1_WPF_ADDR_Y			0x00cc
179 #define FD1_WPF_ADDR_C0			0x00d0
180 #define FD1_WPF_ADDR_C1			0x00d4
181 #define FD1_WPF_SWAP			0x00d8
182 #define FD1_WPF_SWAP_OSWAP_SHIFT	0
183 #define FD1_WPF_SWAP_SSWAP_SHIFT	4
184 
185 /* WPF/RPF Common */
186 #define FD1_RWPF_SWAP_BYTE		BIT(0)
187 #define FD1_RWPF_SWAP_WORD		BIT(1)
188 #define FD1_RWPF_SWAP_LWRD		BIT(2)
189 #define FD1_RWPF_SWAP_LLWD		BIT(3)
190 
191 /* IPC */
192 #define FD1_IPC_MODE			0x0100
193 #define FD1_IPC_MODE_DLI		BIT(8)
194 #define FD1_IPC_MODE_DIM_ADAPT2D3D	(0 << 0)
195 #define FD1_IPC_MODE_DIM_FIXED2D	(1 << 0)
196 #define FD1_IPC_MODE_DIM_FIXED3D	(2 << 0)
197 #define FD1_IPC_MODE_DIM_PREVFIELD	(3 << 0)
198 #define FD1_IPC_MODE_DIM_NEXTFIELD	(4 << 0)
199 
200 #define FD1_IPC_SMSK_THRESH		0x0104
201 #define FD1_IPC_SMSK_THRESH_CONST	0x00010002
202 
203 #define FD1_IPC_COMB_DET		0x0108
204 #define FD1_IPC_COMB_DET_CONST		0x00200040
205 
206 #define FD1_IPC_MOTDEC			0x010c
207 #define FD1_IPC_MOTDEC_CONST		0x00008020
208 
209 /* DLI registers */
210 #define FD1_IPC_DLI_BLEND		0x0120
211 #define FD1_IPC_DLI_BLEND_CONST		0x0080ff02
212 
213 #define FD1_IPC_DLI_HGAIN		0x0124
214 #define FD1_IPC_DLI_HGAIN_CONST		0x001000ff
215 
216 #define FD1_IPC_DLI_SPRS		0x0128
217 #define FD1_IPC_DLI_SPRS_CONST		0x009004ff
218 
219 #define FD1_IPC_DLI_ANGLE		0x012c
220 #define FD1_IPC_DLI_ANGLE_CONST		0x0004080c
221 
222 #define FD1_IPC_DLI_ISOPIX0		0x0130
223 #define FD1_IPC_DLI_ISOPIX0_CONST	0xff10ff10
224 
225 #define FD1_IPC_DLI_ISOPIX1		0x0134
226 #define FD1_IPC_DLI_ISOPIX1_CONST	0x0000ff10
227 
228 /* Sensor registers */
229 #define FD1_IPC_SENSOR_TH0		0x0140
230 #define FD1_IPC_SENSOR_TH0_CONST	0x20208080
231 
232 #define FD1_IPC_SENSOR_TH1		0x0144
233 #define FD1_IPC_SENSOR_TH1_CONST	0
234 
235 #define FD1_IPC_SENSOR_CTL0		0x0170
236 #define FD1_IPC_SENSOR_CTL0_CONST	0x00002201
237 
238 #define FD1_IPC_SENSOR_CTL1		0x0174
239 #define FD1_IPC_SENSOR_CTL1_CONST	0
240 
241 #define FD1_IPC_SENSOR_CTL2		0x0178
242 #define FD1_IPC_SENSOR_CTL2_X_SHIFT	16
243 #define FD1_IPC_SENSOR_CTL2_Y_SHIFT	0
244 
245 #define FD1_IPC_SENSOR_CTL3		0x017c
246 #define FD1_IPC_SENSOR_CTL3_0_SHIFT	16
247 #define FD1_IPC_SENSOR_CTL3_1_SHIFT	0
248 
249 /* Line memory pixel number register */
250 #define FD1_IPC_LMEM			0x01e0
251 #define FD1_IPC_LMEM_LINEAR		1024
252 #define FD1_IPC_LMEM_TILE		960
253 
254 /* Internal Data (HW Version) */
255 #define FD1_IP_INTDATA			0x0800
256 /* R-Car Gen2 HW manual says zero, but actual value matches R-Car H3 ES1.x */
257 #define FD1_IP_GEN2			0x02010101
258 #define FD1_IP_M3W			0x02010202
259 #define FD1_IP_H3			0x02010203
260 #define FD1_IP_M3N			0x02010204
261 #define FD1_IP_E3			0x02010205
262 
263 /* LUTs */
264 #define FD1_LUT_DIF_ADJ			0x1000
265 #define FD1_LUT_SAD_ADJ			0x1400
266 #define FD1_LUT_BLD_GAIN		0x1800
267 #define FD1_LUT_DIF_GAIN		0x1c00
268 #define FD1_LUT_MDET			0x2000
269 
270 /**
271  * struct fdp1_fmt - The FDP1 internal format data
272  * @fourcc: the fourcc code, to match the V4L2 API
273  * @bpp: bits per pixel per plane
274  * @num_planes: number of planes
275  * @hsub: horizontal subsampling factor
276  * @vsub: vertical subsampling factor
277  * @fmt: 7-bit format code for the fdp1 hardware
278  * @swap_yc: the Y and C components are swapped (Y comes before C)
279  * @swap_uv: the U and V components are swapped (V comes before U)
280  * @swap: swap register control
281  * @types: types of queue this format is applicable to
282  */
283 struct fdp1_fmt {
284 	u32	fourcc;
285 	u8	bpp[3];
286 	u8	num_planes;
287 	u8	hsub;
288 	u8	vsub;
289 	u8	fmt;
290 	bool	swap_yc;
291 	bool	swap_uv;
292 	u8	swap;
293 	u8	types;
294 };
295 
296 static const struct fdp1_fmt fdp1_formats[] = {
297 	/* RGB formats are only supported by the Write Pixel Formatter */
298 
299 	{ V4L2_PIX_FMT_RGB332, { 8, 0, 0 }, 1, 1, 1, 0x00, false, false,
300 	  FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
301 	  FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
302 	  FDP1_CAPTURE },
303 	{ V4L2_PIX_FMT_XRGB444, { 16, 0, 0 }, 1, 1, 1, 0x01, false, false,
304 	  FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
305 	  FD1_RWPF_SWAP_WORD,
306 	  FDP1_CAPTURE },
307 	{ V4L2_PIX_FMT_XRGB555, { 16, 0, 0 }, 1, 1, 1, 0x04, false, false,
308 	  FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
309 	  FD1_RWPF_SWAP_WORD,
310 	  FDP1_CAPTURE },
311 	{ V4L2_PIX_FMT_RGB565, { 16, 0, 0 }, 1, 1, 1, 0x06, false, false,
312 	  FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
313 	  FD1_RWPF_SWAP_WORD,
314 	  FDP1_CAPTURE },
315 	{ V4L2_PIX_FMT_ABGR32, { 32, 0, 0 }, 1, 1, 1, 0x13, false, false,
316 	  FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD,
317 	  FDP1_CAPTURE },
318 	{ V4L2_PIX_FMT_XBGR32, { 32, 0, 0 }, 1, 1, 1, 0x13, false, false,
319 	  FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD,
320 	  FDP1_CAPTURE },
321 	{ V4L2_PIX_FMT_ARGB32, { 32, 0, 0 }, 1, 1, 1, 0x13, false, false,
322 	  FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
323 	  FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
324 	  FDP1_CAPTURE },
325 	{ V4L2_PIX_FMT_XRGB32, { 32, 0, 0 }, 1, 1, 1, 0x13, false, false,
326 	  FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
327 	  FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
328 	  FDP1_CAPTURE },
329 	{ V4L2_PIX_FMT_RGB24, { 24, 0, 0 }, 1, 1, 1, 0x15, false, false,
330 	  FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
331 	  FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
332 	  FDP1_CAPTURE },
333 	{ V4L2_PIX_FMT_BGR24, { 24, 0, 0 }, 1, 1, 1, 0x18, false, false,
334 	  FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
335 	  FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
336 	  FDP1_CAPTURE },
337 	{ V4L2_PIX_FMT_ARGB444, { 16, 0, 0 }, 1, 1, 1, 0x19, false, false,
338 	  FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
339 	  FD1_RWPF_SWAP_WORD,
340 	  FDP1_CAPTURE },
341 	{ V4L2_PIX_FMT_ARGB555, { 16, 0, 0 }, 1, 1, 1, 0x1b, false, false,
342 	  FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
343 	  FD1_RWPF_SWAP_WORD,
344 	  FDP1_CAPTURE },
345 
346 	/* YUV Formats are supported by Read and Write Pixel Formatters */
347 
348 	{ V4L2_PIX_FMT_NV16M, { 8, 16, 0 }, 2, 2, 1, 0x41, false, false,
349 	  FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
350 	  FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
351 	  FDP1_CAPTURE | FDP1_OUTPUT },
352 	{ V4L2_PIX_FMT_NV61M, { 8, 16, 0 }, 2, 2, 1, 0x41, false, true,
353 	  FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
354 	  FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
355 	  FDP1_CAPTURE | FDP1_OUTPUT },
356 	{ V4L2_PIX_FMT_NV12M, { 8, 16, 0 }, 2, 2, 2, 0x42, false, false,
357 	  FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
358 	  FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
359 	  FDP1_CAPTURE | FDP1_OUTPUT },
360 	{ V4L2_PIX_FMT_NV21M, { 8, 16, 0 }, 2, 2, 2, 0x42, false, true,
361 	  FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
362 	  FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
363 	  FDP1_CAPTURE | FDP1_OUTPUT },
364 	{ V4L2_PIX_FMT_UYVY, { 16, 0, 0 }, 1, 2, 1, 0x47, false, false,
365 	  FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
366 	  FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
367 	  FDP1_CAPTURE | FDP1_OUTPUT },
368 	{ V4L2_PIX_FMT_VYUY, { 16, 0, 0 }, 1, 2, 1, 0x47, false, true,
369 	  FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
370 	  FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
371 	  FDP1_CAPTURE | FDP1_OUTPUT },
372 	{ V4L2_PIX_FMT_YUYV, { 16, 0, 0 }, 1, 2, 1, 0x47, true, false,
373 	  FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
374 	  FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
375 	  FDP1_CAPTURE | FDP1_OUTPUT },
376 	{ V4L2_PIX_FMT_YVYU, { 16, 0, 0 }, 1, 2, 1, 0x47, true, true,
377 	  FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
378 	  FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
379 	  FDP1_CAPTURE | FDP1_OUTPUT },
380 	{ V4L2_PIX_FMT_YUV444M, { 8, 8, 8 }, 3, 1, 1, 0x4a, false, false,
381 	  FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
382 	  FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
383 	  FDP1_CAPTURE | FDP1_OUTPUT },
384 	{ V4L2_PIX_FMT_YVU444M, { 8, 8, 8 }, 3, 1, 1, 0x4a, false, true,
385 	  FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
386 	  FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
387 	  FDP1_CAPTURE | FDP1_OUTPUT },
388 	{ V4L2_PIX_FMT_YUV422M, { 8, 8, 8 }, 3, 2, 1, 0x4b, false, false,
389 	  FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
390 	  FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
391 	  FDP1_CAPTURE | FDP1_OUTPUT },
392 	{ V4L2_PIX_FMT_YVU422M, { 8, 8, 8 }, 3, 2, 1, 0x4b, false, true,
393 	  FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
394 	  FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
395 	  FDP1_CAPTURE | FDP1_OUTPUT },
396 	{ V4L2_PIX_FMT_YUV420M, { 8, 8, 8 }, 3, 2, 2, 0x4c, false, false,
397 	  FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
398 	  FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
399 	  FDP1_CAPTURE | FDP1_OUTPUT },
400 	{ V4L2_PIX_FMT_YVU420M, { 8, 8, 8 }, 3, 2, 2, 0x4c, false, true,
401 	  FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
402 	  FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
403 	  FDP1_CAPTURE | FDP1_OUTPUT },
404 };
405 
fdp1_fmt_is_rgb(const struct fdp1_fmt * fmt)406 static int fdp1_fmt_is_rgb(const struct fdp1_fmt *fmt)
407 {
408 	return fmt->fmt <= 0x1b; /* Last RGB code */
409 }
410 
411 /*
412  * FDP1 Lookup tables range from 0...255 only
413  *
414  * Each table must be less than 256 entries, and all tables
415  * are padded out to 256 entries by duplicating the last value.
416  */
417 static const u8 fdp1_diff_adj[] = {
418 	0x00, 0x24, 0x43, 0x5e, 0x76, 0x8c, 0x9e, 0xaf,
419 	0xbd, 0xc9, 0xd4, 0xdd, 0xe4, 0xea, 0xef, 0xf3,
420 	0xf6, 0xf9, 0xfb, 0xfc, 0xfd, 0xfe, 0xfe, 0xff,
421 };
422 
423 static const u8 fdp1_sad_adj[] = {
424 	0x00, 0x24, 0x43, 0x5e, 0x76, 0x8c, 0x9e, 0xaf,
425 	0xbd, 0xc9, 0xd4, 0xdd, 0xe4, 0xea, 0xef, 0xf3,
426 	0xf6, 0xf9, 0xfb, 0xfc, 0xfd, 0xfe, 0xfe, 0xff,
427 };
428 
429 static const u8 fdp1_bld_gain[] = {
430 	0x80,
431 };
432 
433 static const u8 fdp1_dif_gain[] = {
434 	0x80,
435 };
436 
437 static const u8 fdp1_mdet[] = {
438 	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
439 	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
440 	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
441 	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
442 	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
443 	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
444 	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
445 	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
446 	0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
447 	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
448 	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
449 	0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
450 	0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
451 	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
452 	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
453 	0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f,
454 	0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
455 	0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
456 	0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
457 	0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f,
458 	0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7,
459 	0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf,
460 	0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7,
461 	0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf,
462 	0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7,
463 	0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf,
464 	0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7,
465 	0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf,
466 	0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7,
467 	0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef,
468 	0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7,
469 	0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff
470 };
471 
472 /* Per-queue, driver-specific private data */
473 struct fdp1_q_data {
474 	const struct fdp1_fmt		*fmt;
475 	struct v4l2_pix_format_mplane	format;
476 
477 	unsigned int			vsize;
478 	unsigned int			stride_y;
479 	unsigned int			stride_c;
480 };
481 
fdp1_find_format(u32 pixelformat)482 static const struct fdp1_fmt *fdp1_find_format(u32 pixelformat)
483 {
484 	const struct fdp1_fmt *fmt;
485 	unsigned int i;
486 
487 	for (i = 0; i < ARRAY_SIZE(fdp1_formats); i++) {
488 		fmt = &fdp1_formats[i];
489 		if (fmt->fourcc == pixelformat)
490 			return fmt;
491 	}
492 
493 	return NULL;
494 }
495 
496 enum fdp1_deint_mode {
497 	FDP1_PROGRESSIVE = 0, /* Must be zero when !deinterlacing */
498 	FDP1_ADAPT2D3D,
499 	FDP1_FIXED2D,
500 	FDP1_FIXED3D,
501 	FDP1_PREVFIELD,
502 	FDP1_NEXTFIELD,
503 };
504 
505 #define FDP1_DEINT_MODE_USES_NEXT(mode) \
506 	(mode == FDP1_ADAPT2D3D || \
507 	 mode == FDP1_FIXED3D   || \
508 	 mode == FDP1_NEXTFIELD)
509 
510 #define FDP1_DEINT_MODE_USES_PREV(mode) \
511 	(mode == FDP1_ADAPT2D3D || \
512 	 mode == FDP1_FIXED3D   || \
513 	 mode == FDP1_PREVFIELD)
514 
515 /*
516  * FDP1 operates on potentially 3 fields, which are tracked
517  * from the VB buffers using this context structure.
518  * Will always be a field or a full frame, never two fields.
519  */
520 struct fdp1_field_buffer {
521 	struct vb2_v4l2_buffer		*vb;
522 	dma_addr_t			addrs[3];
523 
524 	/* Should be NONE:TOP:BOTTOM only */
525 	enum v4l2_field			field;
526 
527 	/* Flag to indicate this is the last field in the vb */
528 	bool				last_field;
529 
530 	/* Buffer queue lists */
531 	struct list_head		list;
532 };
533 
534 struct fdp1_buffer {
535 	struct v4l2_m2m_buffer		m2m_buf;
536 	struct fdp1_field_buffer	fields[2];
537 	unsigned int			num_fields;
538 };
539 
to_fdp1_buffer(struct vb2_v4l2_buffer * vb)540 static inline struct fdp1_buffer *to_fdp1_buffer(struct vb2_v4l2_buffer *vb)
541 {
542 	return container_of(vb, struct fdp1_buffer, m2m_buf.vb);
543 }
544 
545 struct fdp1_job {
546 	struct fdp1_field_buffer	*previous;
547 	struct fdp1_field_buffer	*active;
548 	struct fdp1_field_buffer	*next;
549 	struct fdp1_field_buffer	*dst;
550 
551 	/* A job can only be on one list at a time */
552 	struct list_head		list;
553 };
554 
555 struct fdp1_dev {
556 	struct v4l2_device		v4l2_dev;
557 	struct video_device		vfd;
558 
559 	struct mutex			dev_mutex;
560 	spinlock_t			irqlock;
561 	spinlock_t			device_process_lock;
562 
563 	void __iomem			*regs;
564 	unsigned int			irq;
565 	struct device			*dev;
566 
567 	/* Job Queues */
568 	struct fdp1_job			jobs[FDP1_NUMBER_JOBS];
569 	struct list_head		free_job_list;
570 	struct list_head		queued_job_list;
571 	struct list_head		hw_job_list;
572 
573 	unsigned int			clk_rate;
574 
575 	struct rcar_fcp_device		*fcp;
576 	struct v4l2_m2m_dev		*m2m_dev;
577 };
578 
579 struct fdp1_ctx {
580 	struct v4l2_fh			fh;
581 	struct fdp1_dev			*fdp1;
582 
583 	struct v4l2_ctrl_handler	hdl;
584 	unsigned int			sequence;
585 
586 	/* Processed buffers in this transaction */
587 	u8				num_processed;
588 
589 	/* Transaction length (i.e. how many buffers per transaction) */
590 	u32				translen;
591 
592 	/* Abort requested by m2m */
593 	int				aborting;
594 
595 	/* Deinterlace processing mode */
596 	enum fdp1_deint_mode		deint_mode;
597 
598 	/*
599 	 * Adaptive 2D/3D mode uses a shared mask
600 	 * This is allocated at streamon, if the ADAPT2D3D mode
601 	 * is requested
602 	 */
603 	unsigned int			smsk_size;
604 	dma_addr_t			smsk_addr[2];
605 	void				*smsk_cpu;
606 
607 	/* Capture pipeline, can specify an alpha value
608 	 * for supported formats. 0-255 only
609 	 */
610 	unsigned char			alpha;
611 
612 	/* Source and destination queue data */
613 	struct fdp1_q_data		out_q; /* HW Source */
614 	struct fdp1_q_data		cap_q; /* HW Destination */
615 
616 	/*
617 	 * Field Queues
618 	 * Interlaced fields are used on 3 occasions, and tracked in this list.
619 	 *
620 	 * V4L2 Buffers are tracked inside the fdp1_buffer
621 	 * and released when the last 'field' completes
622 	 */
623 	struct list_head		fields_queue;
624 	unsigned int			buffers_queued;
625 
626 	/*
627 	 * For de-interlacing we need to track our previous buffer
628 	 * while preparing our job lists.
629 	 */
630 	struct fdp1_field_buffer	*previous;
631 };
632 
file_to_ctx(struct file * filp)633 static inline struct fdp1_ctx *file_to_ctx(struct file *filp)
634 {
635 	return container_of(file_to_v4l2_fh(filp), struct fdp1_ctx, fh);
636 }
637 
get_q_data(struct fdp1_ctx * ctx,enum v4l2_buf_type type)638 static struct fdp1_q_data *get_q_data(struct fdp1_ctx *ctx,
639 					 enum v4l2_buf_type type)
640 {
641 	if (V4L2_TYPE_IS_OUTPUT(type))
642 		return &ctx->out_q;
643 	else
644 		return &ctx->cap_q;
645 }
646 
647 /*
648  * list_remove_job: Take the first item off the specified job list
649  *
650  * Returns: pointer to a job, or NULL if the list is empty.
651  */
list_remove_job(struct fdp1_dev * fdp1,struct list_head * list)652 static struct fdp1_job *list_remove_job(struct fdp1_dev *fdp1,
653 					 struct list_head *list)
654 {
655 	struct fdp1_job *job;
656 	unsigned long flags;
657 
658 	spin_lock_irqsave(&fdp1->irqlock, flags);
659 	job = list_first_entry_or_null(list, struct fdp1_job, list);
660 	if (job)
661 		list_del(&job->list);
662 	spin_unlock_irqrestore(&fdp1->irqlock, flags);
663 
664 	return job;
665 }
666 
667 /*
668  * list_add_job: Add a job to the specified job list
669  *
670  * Returns: void - always succeeds
671  */
list_add_job(struct fdp1_dev * fdp1,struct list_head * list,struct fdp1_job * job)672 static void list_add_job(struct fdp1_dev *fdp1,
673 			 struct list_head *list,
674 			 struct fdp1_job *job)
675 {
676 	unsigned long flags;
677 
678 	spin_lock_irqsave(&fdp1->irqlock, flags);
679 	list_add_tail(&job->list, list);
680 	spin_unlock_irqrestore(&fdp1->irqlock, flags);
681 }
682 
fdp1_job_alloc(struct fdp1_dev * fdp1)683 static struct fdp1_job *fdp1_job_alloc(struct fdp1_dev *fdp1)
684 {
685 	return list_remove_job(fdp1, &fdp1->free_job_list);
686 }
687 
fdp1_job_free(struct fdp1_dev * fdp1,struct fdp1_job * job)688 static void fdp1_job_free(struct fdp1_dev *fdp1, struct fdp1_job *job)
689 {
690 	/* Ensure that all residue from previous jobs is gone */
691 	memset(job, 0, sizeof(struct fdp1_job));
692 
693 	list_add_job(fdp1, &fdp1->free_job_list, job);
694 }
695 
queue_job(struct fdp1_dev * fdp1,struct fdp1_job * job)696 static void queue_job(struct fdp1_dev *fdp1, struct fdp1_job *job)
697 {
698 	list_add_job(fdp1, &fdp1->queued_job_list, job);
699 }
700 
get_queued_job(struct fdp1_dev * fdp1)701 static struct fdp1_job *get_queued_job(struct fdp1_dev *fdp1)
702 {
703 	return list_remove_job(fdp1, &fdp1->queued_job_list);
704 }
705 
queue_hw_job(struct fdp1_dev * fdp1,struct fdp1_job * job)706 static void queue_hw_job(struct fdp1_dev *fdp1, struct fdp1_job *job)
707 {
708 	list_add_job(fdp1, &fdp1->hw_job_list, job);
709 }
710 
get_hw_queued_job(struct fdp1_dev * fdp1)711 static struct fdp1_job *get_hw_queued_job(struct fdp1_dev *fdp1)
712 {
713 	return list_remove_job(fdp1, &fdp1->hw_job_list);
714 }
715 
716 /*
717  * Buffer lists handling
718  */
fdp1_field_complete(struct fdp1_ctx * ctx,struct fdp1_field_buffer * fbuf)719 static void fdp1_field_complete(struct fdp1_ctx *ctx,
720 				struct fdp1_field_buffer *fbuf)
721 {
722 	/* job->previous may be on the first field */
723 	if (!fbuf)
724 		return;
725 
726 	if (fbuf->last_field)
727 		v4l2_m2m_buf_done(fbuf->vb, VB2_BUF_STATE_DONE);
728 }
729 
fdp1_queue_field(struct fdp1_ctx * ctx,struct fdp1_field_buffer * fbuf)730 static void fdp1_queue_field(struct fdp1_ctx *ctx,
731 			     struct fdp1_field_buffer *fbuf)
732 {
733 	unsigned long flags;
734 
735 	spin_lock_irqsave(&ctx->fdp1->irqlock, flags);
736 	list_add_tail(&fbuf->list, &ctx->fields_queue);
737 	spin_unlock_irqrestore(&ctx->fdp1->irqlock, flags);
738 
739 	ctx->buffers_queued++;
740 }
741 
fdp1_dequeue_field(struct fdp1_ctx * ctx)742 static struct fdp1_field_buffer *fdp1_dequeue_field(struct fdp1_ctx *ctx)
743 {
744 	struct fdp1_field_buffer *fbuf;
745 	unsigned long flags;
746 
747 	ctx->buffers_queued--;
748 
749 	spin_lock_irqsave(&ctx->fdp1->irqlock, flags);
750 	fbuf = list_first_entry_or_null(&ctx->fields_queue,
751 					struct fdp1_field_buffer, list);
752 	if (fbuf)
753 		list_del(&fbuf->list);
754 	spin_unlock_irqrestore(&ctx->fdp1->irqlock, flags);
755 
756 	return fbuf;
757 }
758 
759 /*
760  * Return the next field in the queue - or NULL,
761  * without removing the item from the list
762  */
fdp1_peek_queued_field(struct fdp1_ctx * ctx)763 static struct fdp1_field_buffer *fdp1_peek_queued_field(struct fdp1_ctx *ctx)
764 {
765 	struct fdp1_field_buffer *fbuf;
766 	unsigned long flags;
767 
768 	spin_lock_irqsave(&ctx->fdp1->irqlock, flags);
769 	fbuf = list_first_entry_or_null(&ctx->fields_queue,
770 					struct fdp1_field_buffer, list);
771 	spin_unlock_irqrestore(&ctx->fdp1->irqlock, flags);
772 
773 	return fbuf;
774 }
775 
fdp1_read(struct fdp1_dev * fdp1,unsigned int reg)776 static u32 fdp1_read(struct fdp1_dev *fdp1, unsigned int reg)
777 {
778 	u32 value = ioread32(fdp1->regs + reg);
779 
780 	if (debug >= 2)
781 		dprintk(fdp1, "Read 0x%08x from 0x%04x\n", value, reg);
782 
783 	return value;
784 }
785 
fdp1_write(struct fdp1_dev * fdp1,u32 val,unsigned int reg)786 static void fdp1_write(struct fdp1_dev *fdp1, u32 val, unsigned int reg)
787 {
788 	if (debug >= 2)
789 		dprintk(fdp1, "Write 0x%08x to 0x%04x\n", val, reg);
790 
791 	iowrite32(val, fdp1->regs + reg);
792 }
793 
794 /* IPC registers are to be programmed with constant values */
fdp1_set_ipc_dli(struct fdp1_ctx * ctx)795 static void fdp1_set_ipc_dli(struct fdp1_ctx *ctx)
796 {
797 	struct fdp1_dev *fdp1 = ctx->fdp1;
798 
799 	fdp1_write(fdp1, FD1_IPC_SMSK_THRESH_CONST,	FD1_IPC_SMSK_THRESH);
800 	fdp1_write(fdp1, FD1_IPC_COMB_DET_CONST,	FD1_IPC_COMB_DET);
801 	fdp1_write(fdp1, FD1_IPC_MOTDEC_CONST,	FD1_IPC_MOTDEC);
802 
803 	fdp1_write(fdp1, FD1_IPC_DLI_BLEND_CONST,	FD1_IPC_DLI_BLEND);
804 	fdp1_write(fdp1, FD1_IPC_DLI_HGAIN_CONST,	FD1_IPC_DLI_HGAIN);
805 	fdp1_write(fdp1, FD1_IPC_DLI_SPRS_CONST,	FD1_IPC_DLI_SPRS);
806 	fdp1_write(fdp1, FD1_IPC_DLI_ANGLE_CONST,	FD1_IPC_DLI_ANGLE);
807 	fdp1_write(fdp1, FD1_IPC_DLI_ISOPIX0_CONST,	FD1_IPC_DLI_ISOPIX0);
808 	fdp1_write(fdp1, FD1_IPC_DLI_ISOPIX1_CONST,	FD1_IPC_DLI_ISOPIX1);
809 }
810 
811 
fdp1_set_ipc_sensor(struct fdp1_ctx * ctx)812 static void fdp1_set_ipc_sensor(struct fdp1_ctx *ctx)
813 {
814 	struct fdp1_dev *fdp1 = ctx->fdp1;
815 	struct fdp1_q_data *src_q_data = &ctx->out_q;
816 	unsigned int x0, x1;
817 	unsigned int hsize = src_q_data->format.width;
818 	unsigned int vsize = src_q_data->format.height;
819 
820 	x0 = hsize / 3;
821 	x1 = 2 * hsize / 3;
822 
823 	fdp1_write(fdp1, FD1_IPC_SENSOR_TH0_CONST, FD1_IPC_SENSOR_TH0);
824 	fdp1_write(fdp1, FD1_IPC_SENSOR_TH1_CONST, FD1_IPC_SENSOR_TH1);
825 	fdp1_write(fdp1, FD1_IPC_SENSOR_CTL0_CONST, FD1_IPC_SENSOR_CTL0);
826 	fdp1_write(fdp1, FD1_IPC_SENSOR_CTL1_CONST, FD1_IPC_SENSOR_CTL1);
827 
828 	fdp1_write(fdp1, ((hsize - 1) << FD1_IPC_SENSOR_CTL2_X_SHIFT) |
829 			 ((vsize - 1) << FD1_IPC_SENSOR_CTL2_Y_SHIFT),
830 			 FD1_IPC_SENSOR_CTL2);
831 
832 	fdp1_write(fdp1, (x0 << FD1_IPC_SENSOR_CTL3_0_SHIFT) |
833 			 (x1 << FD1_IPC_SENSOR_CTL3_1_SHIFT),
834 			 FD1_IPC_SENSOR_CTL3);
835 }
836 
837 /*
838  * fdp1_write_lut: Write a padded LUT to the hw
839  *
840  * FDP1 uses constant data for de-interlacing processing,
841  * with large tables. These hardware tables are all 256 bytes
842  * long, however they often contain repeated data at the end.
843  *
844  * The last byte of the table is written to all remaining entries.
845  */
fdp1_write_lut(struct fdp1_dev * fdp1,const u8 * lut,unsigned int len,unsigned int base)846 static void fdp1_write_lut(struct fdp1_dev *fdp1, const u8 *lut,
847 			   unsigned int len, unsigned int base)
848 {
849 	unsigned int i;
850 	u8 pad;
851 
852 	/* Tables larger than the hw are clipped */
853 	len = min(len, 256u);
854 
855 	for (i = 0; i < len; i++)
856 		fdp1_write(fdp1, lut[i], base + (i*4));
857 
858 	/* Tables are padded with the last entry */
859 	pad = lut[i-1];
860 
861 	for (; i < 256; i++)
862 		fdp1_write(fdp1, pad, base + (i*4));
863 }
864 
fdp1_set_lut(struct fdp1_dev * fdp1)865 static void fdp1_set_lut(struct fdp1_dev *fdp1)
866 {
867 	fdp1_write_lut(fdp1, fdp1_diff_adj, ARRAY_SIZE(fdp1_diff_adj),
868 			FD1_LUT_DIF_ADJ);
869 	fdp1_write_lut(fdp1, fdp1_sad_adj,  ARRAY_SIZE(fdp1_sad_adj),
870 			FD1_LUT_SAD_ADJ);
871 	fdp1_write_lut(fdp1, fdp1_bld_gain, ARRAY_SIZE(fdp1_bld_gain),
872 			FD1_LUT_BLD_GAIN);
873 	fdp1_write_lut(fdp1, fdp1_dif_gain, ARRAY_SIZE(fdp1_dif_gain),
874 			FD1_LUT_DIF_GAIN);
875 	fdp1_write_lut(fdp1, fdp1_mdet, ARRAY_SIZE(fdp1_mdet),
876 			FD1_LUT_MDET);
877 }
878 
fdp1_configure_rpf(struct fdp1_ctx * ctx,struct fdp1_job * job)879 static void fdp1_configure_rpf(struct fdp1_ctx *ctx,
880 			       struct fdp1_job *job)
881 {
882 	struct fdp1_dev *fdp1 = ctx->fdp1;
883 	u32 picture_size;
884 	u32 pstride;
885 	u32 format;
886 	u32 smsk_addr;
887 
888 	struct fdp1_q_data *q_data = &ctx->out_q;
889 
890 	/* Picture size is common to Source and Destination frames */
891 	picture_size = (q_data->format.width << FD1_RPF_SIZE_H_SHIFT)
892 		     | (q_data->vsize << FD1_RPF_SIZE_V_SHIFT);
893 
894 	/* Strides */
895 	pstride = q_data->stride_y << FD1_RPF_PSTRIDE_Y_SHIFT;
896 	if (q_data->format.num_planes > 1)
897 		pstride |= q_data->stride_c << FD1_RPF_PSTRIDE_C_SHIFT;
898 
899 	/* Format control */
900 	format = q_data->fmt->fmt;
901 	if (q_data->fmt->swap_yc)
902 		format |= FD1_RPF_FORMAT_RSPYCS;
903 
904 	if (q_data->fmt->swap_uv)
905 		format |= FD1_RPF_FORMAT_RSPUVS;
906 
907 	if (job->active->field == V4L2_FIELD_BOTTOM) {
908 		format |= FD1_RPF_FORMAT_CF; /* Set for Bottom field */
909 		smsk_addr = ctx->smsk_addr[0];
910 	} else {
911 		smsk_addr = ctx->smsk_addr[1];
912 	}
913 
914 	/* Deint mode is non-zero when deinterlacing */
915 	if (ctx->deint_mode)
916 		format |= FD1_RPF_FORMAT_CIPM;
917 
918 	fdp1_write(fdp1, format, FD1_RPF_FORMAT);
919 	fdp1_write(fdp1, q_data->fmt->swap, FD1_RPF_SWAP);
920 	fdp1_write(fdp1, picture_size, FD1_RPF_SIZE);
921 	fdp1_write(fdp1, pstride, FD1_RPF_PSTRIDE);
922 	fdp1_write(fdp1, smsk_addr, FD1_RPF_SMSK_ADDR);
923 
924 	/* Previous Field Channel (CH0) */
925 	if (job->previous)
926 		fdp1_write(fdp1, job->previous->addrs[0], FD1_RPF0_ADDR_Y);
927 
928 	/* Current Field Channel (CH1) */
929 	fdp1_write(fdp1, job->active->addrs[0], FD1_RPF1_ADDR_Y);
930 	fdp1_write(fdp1, job->active->addrs[1], FD1_RPF1_ADDR_C0);
931 	fdp1_write(fdp1, job->active->addrs[2], FD1_RPF1_ADDR_C1);
932 
933 	/* Next Field  Channel (CH2) */
934 	if (job->next)
935 		fdp1_write(fdp1, job->next->addrs[0], FD1_RPF2_ADDR_Y);
936 }
937 
fdp1_configure_wpf(struct fdp1_ctx * ctx,struct fdp1_job * job)938 static void fdp1_configure_wpf(struct fdp1_ctx *ctx,
939 			       struct fdp1_job *job)
940 {
941 	struct fdp1_dev *fdp1 = ctx->fdp1;
942 	struct fdp1_q_data *src_q_data = &ctx->out_q;
943 	struct fdp1_q_data *q_data = &ctx->cap_q;
944 	u32 pstride;
945 	u32 format;
946 	u32 swap;
947 	u32 rndctl;
948 
949 	pstride = q_data->format.plane_fmt[0].bytesperline
950 		<< FD1_WPF_PSTRIDE_Y_SHIFT;
951 
952 	if (q_data->format.num_planes > 1)
953 		pstride |= q_data->format.plane_fmt[1].bytesperline
954 			<< FD1_WPF_PSTRIDE_C_SHIFT;
955 
956 	format = q_data->fmt->fmt; /* Output Format Code */
957 
958 	if (q_data->fmt->swap_yc)
959 		format |= FD1_WPF_FORMAT_WSPYCS;
960 
961 	if (q_data->fmt->swap_uv)
962 		format |= FD1_WPF_FORMAT_WSPUVS;
963 
964 	if (fdp1_fmt_is_rgb(q_data->fmt)) {
965 		/* Enable Colour Space conversion */
966 		format |= FD1_WPF_FORMAT_CSC;
967 
968 		/* Set WRTM */
969 		if (src_q_data->format.ycbcr_enc == V4L2_YCBCR_ENC_709)
970 			format |= FD1_WPF_FORMAT_WRTM_709_16;
971 		else if (src_q_data->format.quantization ==
972 				V4L2_QUANTIZATION_FULL_RANGE)
973 			format |= FD1_WPF_FORMAT_WRTM_601_0;
974 		else
975 			format |= FD1_WPF_FORMAT_WRTM_601_16;
976 	}
977 
978 	/* Set an alpha value into the Pad Value */
979 	format |= ctx->alpha << FD1_WPF_FORMAT_PDV_SHIFT;
980 
981 	/* Determine picture rounding and clipping */
982 	rndctl = FD1_WPF_RNDCTL_CBRM; /* Rounding Off */
983 	rndctl |= FD1_WPF_RNDCTL_CLMD_NOCLIP;
984 
985 	/* WPF Swap needs both ISWAP and OSWAP setting */
986 	swap = q_data->fmt->swap << FD1_WPF_SWAP_OSWAP_SHIFT;
987 	swap |= src_q_data->fmt->swap << FD1_WPF_SWAP_SSWAP_SHIFT;
988 
989 	fdp1_write(fdp1, format, FD1_WPF_FORMAT);
990 	fdp1_write(fdp1, rndctl, FD1_WPF_RNDCTL);
991 	fdp1_write(fdp1, swap, FD1_WPF_SWAP);
992 	fdp1_write(fdp1, pstride, FD1_WPF_PSTRIDE);
993 
994 	fdp1_write(fdp1, job->dst->addrs[0], FD1_WPF_ADDR_Y);
995 	fdp1_write(fdp1, job->dst->addrs[1], FD1_WPF_ADDR_C0);
996 	fdp1_write(fdp1, job->dst->addrs[2], FD1_WPF_ADDR_C1);
997 }
998 
fdp1_configure_deint_mode(struct fdp1_ctx * ctx,struct fdp1_job * job)999 static void fdp1_configure_deint_mode(struct fdp1_ctx *ctx,
1000 				      struct fdp1_job *job)
1001 {
1002 	struct fdp1_dev *fdp1 = ctx->fdp1;
1003 	u32 opmode = FD1_CTL_OPMODE_VIMD_NOINTERRUPT;
1004 	u32 ipcmode = FD1_IPC_MODE_DLI; /* Always set */
1005 	u32 channels = FD1_CTL_CHACT_WR | FD1_CTL_CHACT_RD1; /* Always on */
1006 
1007 	/* De-interlacing Mode */
1008 	switch (ctx->deint_mode) {
1009 	default:
1010 	case FDP1_PROGRESSIVE:
1011 		dprintk(fdp1, "Progressive Mode\n");
1012 		opmode |= FD1_CTL_OPMODE_PRG;
1013 		ipcmode |= FD1_IPC_MODE_DIM_FIXED2D;
1014 		break;
1015 	case FDP1_ADAPT2D3D:
1016 		dprintk(fdp1, "Adapt2D3D Mode\n");
1017 		if (ctx->sequence == 0 || ctx->aborting)
1018 			ipcmode |= FD1_IPC_MODE_DIM_FIXED2D;
1019 		else
1020 			ipcmode |= FD1_IPC_MODE_DIM_ADAPT2D3D;
1021 
1022 		if (ctx->sequence > 1) {
1023 			channels |= FD1_CTL_CHACT_SMW;
1024 			channels |= FD1_CTL_CHACT_RD0 | FD1_CTL_CHACT_RD2;
1025 		}
1026 
1027 		if (ctx->sequence > 2)
1028 			channels |= FD1_CTL_CHACT_SMR;
1029 
1030 		break;
1031 	case FDP1_FIXED3D:
1032 		dprintk(fdp1, "Fixed 3D Mode\n");
1033 		ipcmode |= FD1_IPC_MODE_DIM_FIXED3D;
1034 		/* Except for first and last frame, enable all channels */
1035 		if (!(ctx->sequence == 0 || ctx->aborting))
1036 			channels |= FD1_CTL_CHACT_RD0 | FD1_CTL_CHACT_RD2;
1037 		break;
1038 	case FDP1_FIXED2D:
1039 		dprintk(fdp1, "Fixed 2D Mode\n");
1040 		ipcmode |= FD1_IPC_MODE_DIM_FIXED2D;
1041 		/* No extra channels enabled */
1042 		break;
1043 	case FDP1_PREVFIELD:
1044 		dprintk(fdp1, "Previous Field Mode\n");
1045 		ipcmode |= FD1_IPC_MODE_DIM_PREVFIELD;
1046 		channels |= FD1_CTL_CHACT_RD0; /* Previous */
1047 		break;
1048 	case FDP1_NEXTFIELD:
1049 		dprintk(fdp1, "Next Field Mode\n");
1050 		ipcmode |= FD1_IPC_MODE_DIM_NEXTFIELD;
1051 		channels |= FD1_CTL_CHACT_RD2; /* Next */
1052 		break;
1053 	}
1054 
1055 	fdp1_write(fdp1, channels,	FD1_CTL_CHACT);
1056 	fdp1_write(fdp1, opmode,	FD1_CTL_OPMODE);
1057 	fdp1_write(fdp1, ipcmode,	FD1_IPC_MODE);
1058 }
1059 
1060 /*
1061  * fdp1_device_process() - Run the hardware
1062  *
1063  * Configure and start the hardware to generate a single frame
1064  * of output given our input parameters.
1065  */
fdp1_device_process(struct fdp1_ctx * ctx)1066 static int fdp1_device_process(struct fdp1_ctx *ctx)
1067 
1068 {
1069 	struct fdp1_dev *fdp1 = ctx->fdp1;
1070 	struct fdp1_job *job;
1071 	unsigned long flags;
1072 
1073 	spin_lock_irqsave(&fdp1->device_process_lock, flags);
1074 
1075 	/* Get a job to process */
1076 	job = get_queued_job(fdp1);
1077 	if (!job) {
1078 		/*
1079 		 * VINT can call us to see if we can queue another job.
1080 		 * If we have no work to do, we simply return.
1081 		 */
1082 		spin_unlock_irqrestore(&fdp1->device_process_lock, flags);
1083 		return 0;
1084 	}
1085 
1086 	/* First Frame only? ... */
1087 	fdp1_write(fdp1, FD1_CTL_CLKCTRL_CSTP_N, FD1_CTL_CLKCTRL);
1088 
1089 	/* Set the mode, and configuration */
1090 	fdp1_configure_deint_mode(ctx, job);
1091 
1092 	/* DLI Static Configuration */
1093 	fdp1_set_ipc_dli(ctx);
1094 
1095 	/* Sensor Configuration */
1096 	fdp1_set_ipc_sensor(ctx);
1097 
1098 	/* Setup the source picture */
1099 	fdp1_configure_rpf(ctx, job);
1100 
1101 	/* Setup the destination picture */
1102 	fdp1_configure_wpf(ctx, job);
1103 
1104 	/* Line Memory Pixel Number Register for linear access */
1105 	fdp1_write(fdp1, FD1_IPC_LMEM_LINEAR, FD1_IPC_LMEM);
1106 
1107 	/* Enable Interrupts */
1108 	fdp1_write(fdp1, FD1_CTL_IRQ_MASK, FD1_CTL_IRQENB);
1109 
1110 	/* Finally, the Immediate Registers */
1111 
1112 	/* This job is now in the HW queue */
1113 	queue_hw_job(fdp1, job);
1114 
1115 	/* Start the command */
1116 	fdp1_write(fdp1, FD1_CTL_CMD_STRCMD, FD1_CTL_CMD);
1117 
1118 	/* Registers will update to HW at next VINT */
1119 	fdp1_write(fdp1, FD1_CTL_REGEND_REGEND, FD1_CTL_REGEND);
1120 
1121 	/* Enable VINT Generator */
1122 	fdp1_write(fdp1, FD1_CTL_SGCMD_SGEN, FD1_CTL_SGCMD);
1123 
1124 	spin_unlock_irqrestore(&fdp1->device_process_lock, flags);
1125 
1126 	return 0;
1127 }
1128 
1129 /*
1130  * mem2mem callbacks
1131  */
1132 
1133 /*
1134  * job_ready() - check whether an instance is ready to be scheduled to run
1135  */
fdp1_m2m_job_ready(void * priv)1136 static int fdp1_m2m_job_ready(void *priv)
1137 {
1138 	struct fdp1_ctx *ctx = priv;
1139 	struct fdp1_q_data *src_q_data = &ctx->out_q;
1140 	int srcbufs = 1;
1141 	int dstbufs = 1;
1142 
1143 	dprintk(ctx->fdp1, "+ Src: %d : Dst: %d\n",
1144 		v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx),
1145 		v4l2_m2m_num_dst_bufs_ready(ctx->fh.m2m_ctx));
1146 
1147 	/* One output buffer is required for each field */
1148 	if (V4L2_FIELD_HAS_BOTH(src_q_data->format.field))
1149 		dstbufs = 2;
1150 
1151 	if (v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx) < srcbufs
1152 	    || v4l2_m2m_num_dst_bufs_ready(ctx->fh.m2m_ctx) < dstbufs) {
1153 		dprintk(ctx->fdp1, "Not enough buffers available\n");
1154 		return 0;
1155 	}
1156 
1157 	return 1;
1158 }
1159 
fdp1_m2m_job_abort(void * priv)1160 static void fdp1_m2m_job_abort(void *priv)
1161 {
1162 	struct fdp1_ctx *ctx = priv;
1163 
1164 	dprintk(ctx->fdp1, "+\n");
1165 
1166 	/* Will cancel the transaction in the next interrupt handler */
1167 	ctx->aborting = 1;
1168 
1169 	/* Immediate abort sequence */
1170 	fdp1_write(ctx->fdp1, 0, FD1_CTL_SGCMD);
1171 	fdp1_write(ctx->fdp1, FD1_CTL_SRESET_SRST, FD1_CTL_SRESET);
1172 }
1173 
1174 /*
1175  * fdp1_prepare_job: Prepare and queue a new job for a single action of work
1176  *
1177  * Prepare the next field, (or frame in progressive) and an output
1178  * buffer for the hardware to perform a single operation.
1179  */
fdp1_prepare_job(struct fdp1_ctx * ctx)1180 static struct fdp1_job *fdp1_prepare_job(struct fdp1_ctx *ctx)
1181 {
1182 	struct vb2_v4l2_buffer *vbuf;
1183 	struct fdp1_buffer *fbuf;
1184 	struct fdp1_dev *fdp1 = ctx->fdp1;
1185 	struct fdp1_job *job;
1186 	unsigned int buffers_required = 1;
1187 
1188 	dprintk(fdp1, "+\n");
1189 
1190 	if (FDP1_DEINT_MODE_USES_NEXT(ctx->deint_mode))
1191 		buffers_required = 2;
1192 
1193 	if (ctx->buffers_queued < buffers_required)
1194 		return NULL;
1195 
1196 	job = fdp1_job_alloc(fdp1);
1197 	if (!job) {
1198 		dprintk(fdp1, "No free jobs currently available\n");
1199 		return NULL;
1200 	}
1201 
1202 	job->active = fdp1_dequeue_field(ctx);
1203 	if (!job->active) {
1204 		/* Buffer check should prevent this ever happening */
1205 		dprintk(fdp1, "No input buffers currently available\n");
1206 
1207 		fdp1_job_free(fdp1, job);
1208 		return NULL;
1209 	}
1210 
1211 	dprintk(fdp1, "+ Buffer en-route...\n");
1212 
1213 	/* Source buffers have been prepared on our buffer_queue
1214 	 * Prepare our Output buffer
1215 	 */
1216 	vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
1217 	fbuf = to_fdp1_buffer(vbuf);
1218 	job->dst = &fbuf->fields[0];
1219 
1220 	job->active->vb->sequence = ctx->sequence;
1221 	job->dst->vb->sequence = ctx->sequence;
1222 	ctx->sequence++;
1223 
1224 	if (FDP1_DEINT_MODE_USES_PREV(ctx->deint_mode)) {
1225 		job->previous = ctx->previous;
1226 
1227 		/* Active buffer becomes the next job's previous buffer */
1228 		ctx->previous = job->active;
1229 	}
1230 
1231 	if (FDP1_DEINT_MODE_USES_NEXT(ctx->deint_mode)) {
1232 		/* Must be called after 'active' is dequeued */
1233 		job->next = fdp1_peek_queued_field(ctx);
1234 	}
1235 
1236 	/* Transfer timestamps and flags from src->dst */
1237 
1238 	job->dst->vb->vb2_buf.timestamp = job->active->vb->vb2_buf.timestamp;
1239 
1240 	job->dst->vb->flags = job->active->vb->flags &
1241 				V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
1242 
1243 	/* Ideally, the frame-end function will just 'check' to see
1244 	 * if there are more jobs instead
1245 	 */
1246 	ctx->translen++;
1247 
1248 	/* Finally, Put this job on the processing queue */
1249 	queue_job(fdp1, job);
1250 
1251 	dprintk(fdp1, "Job Queued translen = %d\n", ctx->translen);
1252 
1253 	return job;
1254 }
1255 
1256 /* fdp1_m2m_device_run() - prepares and starts the device for an M2M task
1257  *
1258  * A single input buffer is taken and serialised into our fdp1_buffer
1259  * queue. The queue is then processed to create as many jobs as possible
1260  * from our available input.
1261  */
fdp1_m2m_device_run(void * priv)1262 static void fdp1_m2m_device_run(void *priv)
1263 {
1264 	struct fdp1_ctx *ctx = priv;
1265 	struct fdp1_dev *fdp1 = ctx->fdp1;
1266 	struct vb2_v4l2_buffer *src_vb;
1267 	struct fdp1_buffer *buf;
1268 	unsigned int i;
1269 
1270 	dprintk(fdp1, "+\n");
1271 
1272 	ctx->translen = 0;
1273 
1274 	/* Get our incoming buffer of either one or two fields, or one frame */
1275 	src_vb = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
1276 	buf = to_fdp1_buffer(src_vb);
1277 
1278 	for (i = 0; i < buf->num_fields; i++) {
1279 		struct fdp1_field_buffer *fbuf = &buf->fields[i];
1280 
1281 		fdp1_queue_field(ctx, fbuf);
1282 		dprintk(fdp1, "Queued Buffer [%d] last_field:%d\n",
1283 			i, fbuf->last_field);
1284 	}
1285 
1286 	/* Queue as many jobs as our data provides for */
1287 	while (fdp1_prepare_job(ctx))
1288 		;
1289 
1290 	if (ctx->translen == 0) {
1291 		dprintk(fdp1, "No jobs were processed. M2M action complete\n");
1292 		v4l2_m2m_job_finish(fdp1->m2m_dev, ctx->fh.m2m_ctx);
1293 		return;
1294 	}
1295 
1296 	/* Kick the job processing action */
1297 	fdp1_device_process(ctx);
1298 }
1299 
1300 /*
1301  * device_frame_end:
1302  *
1303  * Handles the M2M level after a buffer completion event.
1304  */
device_frame_end(struct fdp1_dev * fdp1,enum vb2_buffer_state state)1305 static void device_frame_end(struct fdp1_dev *fdp1,
1306 			     enum vb2_buffer_state state)
1307 {
1308 	struct fdp1_ctx *ctx;
1309 	unsigned long flags;
1310 	struct fdp1_job *job = get_hw_queued_job(fdp1);
1311 
1312 	dprintk(fdp1, "+\n");
1313 
1314 	ctx = v4l2_m2m_get_curr_priv(fdp1->m2m_dev);
1315 
1316 	if (ctx == NULL) {
1317 		v4l2_err(&fdp1->v4l2_dev,
1318 			"Instance released before the end of transaction\n");
1319 		return;
1320 	}
1321 
1322 	ctx->num_processed++;
1323 
1324 	/*
1325 	 * fdp1_field_complete will call buf_done only when the last vb2_buffer
1326 	 * reference is complete
1327 	 */
1328 	if (FDP1_DEINT_MODE_USES_PREV(ctx->deint_mode))
1329 		fdp1_field_complete(ctx, job->previous);
1330 	else
1331 		fdp1_field_complete(ctx, job->active);
1332 
1333 	spin_lock_irqsave(&fdp1->irqlock, flags);
1334 	v4l2_m2m_buf_done(job->dst->vb, state);
1335 	job->dst = NULL;
1336 	spin_unlock_irqrestore(&fdp1->irqlock, flags);
1337 
1338 	/* Move this job back to the free job list */
1339 	fdp1_job_free(fdp1, job);
1340 
1341 	dprintk(fdp1, "curr_ctx->num_processed %d curr_ctx->translen %d\n",
1342 		ctx->num_processed, ctx->translen);
1343 
1344 	if (ctx->num_processed == ctx->translen ||
1345 			ctx->aborting) {
1346 		dprintk(ctx->fdp1, "Finishing transaction\n");
1347 		ctx->num_processed = 0;
1348 		v4l2_m2m_job_finish(fdp1->m2m_dev, ctx->fh.m2m_ctx);
1349 	} else {
1350 		/*
1351 		 * For pipelined performance support, this would
1352 		 * be called from a VINT handler
1353 		 */
1354 		fdp1_device_process(ctx);
1355 	}
1356 }
1357 
1358 /*
1359  * video ioctls
1360  */
fdp1_vidioc_querycap(struct file * file,void * priv,struct v4l2_capability * cap)1361 static int fdp1_vidioc_querycap(struct file *file, void *priv,
1362 			   struct v4l2_capability *cap)
1363 {
1364 	strscpy(cap->driver, DRIVER_NAME, sizeof(cap->driver));
1365 	strscpy(cap->card, DRIVER_NAME, sizeof(cap->card));
1366 	snprintf(cap->bus_info, sizeof(cap->bus_info),
1367 		 "platform:%s", DRIVER_NAME);
1368 	return 0;
1369 }
1370 
fdp1_enum_fmt(struct v4l2_fmtdesc * f,u32 type)1371 static int fdp1_enum_fmt(struct v4l2_fmtdesc *f, u32 type)
1372 {
1373 	unsigned int i, num;
1374 
1375 	num = 0;
1376 
1377 	for (i = 0; i < ARRAY_SIZE(fdp1_formats); ++i) {
1378 		if (fdp1_formats[i].types & type) {
1379 			if (num == f->index)
1380 				break;
1381 			++num;
1382 		}
1383 	}
1384 
1385 	/* Format not found */
1386 	if (i >= ARRAY_SIZE(fdp1_formats))
1387 		return -EINVAL;
1388 
1389 	/* Format found */
1390 	f->pixelformat = fdp1_formats[i].fourcc;
1391 
1392 	return 0;
1393 }
1394 
fdp1_enum_fmt_vid_cap(struct file * file,void * priv,struct v4l2_fmtdesc * f)1395 static int fdp1_enum_fmt_vid_cap(struct file *file, void *priv,
1396 				 struct v4l2_fmtdesc *f)
1397 {
1398 	return fdp1_enum_fmt(f, FDP1_CAPTURE);
1399 }
1400 
fdp1_enum_fmt_vid_out(struct file * file,void * priv,struct v4l2_fmtdesc * f)1401 static int fdp1_enum_fmt_vid_out(struct file *file, void *priv,
1402 				   struct v4l2_fmtdesc *f)
1403 {
1404 	return fdp1_enum_fmt(f, FDP1_OUTPUT);
1405 }
1406 
fdp1_g_fmt(struct file * file,void * priv,struct v4l2_format * f)1407 static int fdp1_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
1408 {
1409 	struct fdp1_ctx *ctx = file_to_ctx(file);
1410 	struct fdp1_q_data *q_data;
1411 
1412 	q_data = get_q_data(ctx, f->type);
1413 	f->fmt.pix_mp = q_data->format;
1414 
1415 	return 0;
1416 }
1417 
fdp1_compute_stride(struct v4l2_pix_format_mplane * pix,const struct fdp1_fmt * fmt)1418 static void fdp1_compute_stride(struct v4l2_pix_format_mplane *pix,
1419 				const struct fdp1_fmt *fmt)
1420 {
1421 	unsigned int i;
1422 
1423 	/* Compute and clamp the stride and image size. */
1424 	for (i = 0; i < min_t(unsigned int, fmt->num_planes, 2U); ++i) {
1425 		unsigned int hsub = i > 0 ? fmt->hsub : 1;
1426 		unsigned int vsub = i > 0 ? fmt->vsub : 1;
1427 		 /* From VSP : TODO: Confirm alignment limits for FDP1 */
1428 		unsigned int align = 128;
1429 		unsigned int bpl;
1430 
1431 		bpl = clamp_t(unsigned int, pix->plane_fmt[i].bytesperline,
1432 			      pix->width / hsub * fmt->bpp[i] / 8,
1433 			      round_down(FDP1_MAX_STRIDE, align));
1434 
1435 		pix->plane_fmt[i].bytesperline = round_up(bpl, align);
1436 		pix->plane_fmt[i].sizeimage = pix->plane_fmt[i].bytesperline
1437 					    * pix->height / vsub;
1438 
1439 	}
1440 
1441 	if (fmt->num_planes == 3) {
1442 		/* The two chroma planes must have the same stride. */
1443 		pix->plane_fmt[2].bytesperline = pix->plane_fmt[1].bytesperline;
1444 		pix->plane_fmt[2].sizeimage = pix->plane_fmt[1].sizeimage;
1445 
1446 	}
1447 }
1448 
fdp1_try_fmt_output(struct fdp1_ctx * ctx,const struct fdp1_fmt ** fmtinfo,struct v4l2_pix_format_mplane * pix)1449 static void fdp1_try_fmt_output(struct fdp1_ctx *ctx,
1450 				const struct fdp1_fmt **fmtinfo,
1451 				struct v4l2_pix_format_mplane *pix)
1452 {
1453 	const struct fdp1_fmt *fmt;
1454 	unsigned int width;
1455 	unsigned int height;
1456 
1457 	/* Validate the pixel format to ensure the output queue supports it. */
1458 	fmt = fdp1_find_format(pix->pixelformat);
1459 	if (!fmt || !(fmt->types & FDP1_OUTPUT))
1460 		fmt = fdp1_find_format(V4L2_PIX_FMT_YUYV);
1461 
1462 	if (fmtinfo)
1463 		*fmtinfo = fmt;
1464 
1465 	pix->pixelformat = fmt->fourcc;
1466 	pix->num_planes = fmt->num_planes;
1467 
1468 	/*
1469 	 * Progressive video and all interlaced field orders are acceptable.
1470 	 * Default to V4L2_FIELD_INTERLACED.
1471 	 */
1472 	if (pix->field != V4L2_FIELD_NONE &&
1473 	    pix->field != V4L2_FIELD_ALTERNATE &&
1474 	    !V4L2_FIELD_HAS_BOTH(pix->field))
1475 		pix->field = V4L2_FIELD_INTERLACED;
1476 
1477 	/*
1478 	 * The deinterlacer doesn't care about the colorspace, accept all values
1479 	 * and default to V4L2_COLORSPACE_SMPTE170M. The YUV to RGB conversion
1480 	 * at the output of the deinterlacer supports a subset of encodings and
1481 	 * quantization methods and will only be available when the colorspace
1482 	 * allows it.
1483 	 */
1484 	if (pix->colorspace == V4L2_COLORSPACE_DEFAULT)
1485 		pix->colorspace = V4L2_COLORSPACE_SMPTE170M;
1486 
1487 	/*
1488 	 * Align the width and height for YUV 4:2:2 and 4:2:0 formats and clamp
1489 	 * them to the supported frame size range. The height boundary are
1490 	 * related to the full frame, divide them by two when the format passes
1491 	 * fields in separate buffers.
1492 	 */
1493 	width = round_down(pix->width, fmt->hsub);
1494 	pix->width = clamp(width, FDP1_MIN_W, FDP1_MAX_W);
1495 
1496 	height = round_down(pix->height, fmt->vsub);
1497 	if (pix->field == V4L2_FIELD_ALTERNATE)
1498 		pix->height = clamp(height, FDP1_MIN_H / 2, FDP1_MAX_H / 2);
1499 	else
1500 		pix->height = clamp(height, FDP1_MIN_H, FDP1_MAX_H);
1501 
1502 	fdp1_compute_stride(pix, fmt);
1503 }
1504 
fdp1_try_fmt_capture(struct fdp1_ctx * ctx,const struct fdp1_fmt ** fmtinfo,struct v4l2_pix_format_mplane * pix)1505 static void fdp1_try_fmt_capture(struct fdp1_ctx *ctx,
1506 				 const struct fdp1_fmt **fmtinfo,
1507 				 struct v4l2_pix_format_mplane *pix)
1508 {
1509 	struct fdp1_q_data *src_data = &ctx->out_q;
1510 	enum v4l2_colorspace colorspace;
1511 	enum v4l2_ycbcr_encoding ycbcr_enc;
1512 	enum v4l2_quantization quantization;
1513 	const struct fdp1_fmt *fmt;
1514 	bool allow_rgb;
1515 
1516 	/*
1517 	 * Validate the pixel format. We can only accept RGB output formats if
1518 	 * the input encoding and quantization are compatible with the format
1519 	 * conversions supported by the hardware. The supported combinations are
1520 	 *
1521 	 * V4L2_YCBCR_ENC_601 + V4L2_QUANTIZATION_LIM_RANGE
1522 	 * V4L2_YCBCR_ENC_601 + V4L2_QUANTIZATION_FULL_RANGE
1523 	 * V4L2_YCBCR_ENC_709 + V4L2_QUANTIZATION_LIM_RANGE
1524 	 */
1525 	colorspace = src_data->format.colorspace;
1526 
1527 	ycbcr_enc = src_data->format.ycbcr_enc;
1528 	if (ycbcr_enc == V4L2_YCBCR_ENC_DEFAULT)
1529 		ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(colorspace);
1530 
1531 	quantization = src_data->format.quantization;
1532 	if (quantization == V4L2_QUANTIZATION_DEFAULT)
1533 		quantization = V4L2_MAP_QUANTIZATION_DEFAULT(false, colorspace,
1534 							     ycbcr_enc);
1535 
1536 	allow_rgb = ycbcr_enc == V4L2_YCBCR_ENC_601 ||
1537 		    (ycbcr_enc == V4L2_YCBCR_ENC_709 &&
1538 		     quantization == V4L2_QUANTIZATION_LIM_RANGE);
1539 
1540 	fmt = fdp1_find_format(pix->pixelformat);
1541 	if (!fmt || (!allow_rgb && fdp1_fmt_is_rgb(fmt)))
1542 		fmt = fdp1_find_format(V4L2_PIX_FMT_YUYV);
1543 
1544 	if (fmtinfo)
1545 		*fmtinfo = fmt;
1546 
1547 	pix->pixelformat = fmt->fourcc;
1548 	pix->num_planes = fmt->num_planes;
1549 	pix->field = V4L2_FIELD_NONE;
1550 
1551 	/*
1552 	 * The colorspace on the capture queue is copied from the output queue
1553 	 * as the hardware can't change the colorspace. It can convert YCbCr to
1554 	 * RGB though, in which case the encoding and quantization are set to
1555 	 * default values as anything else wouldn't make sense.
1556 	 */
1557 	pix->colorspace = src_data->format.colorspace;
1558 	pix->xfer_func = src_data->format.xfer_func;
1559 
1560 	if (fdp1_fmt_is_rgb(fmt)) {
1561 		pix->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
1562 		pix->quantization = V4L2_QUANTIZATION_DEFAULT;
1563 	} else {
1564 		pix->ycbcr_enc = src_data->format.ycbcr_enc;
1565 		pix->quantization = src_data->format.quantization;
1566 	}
1567 
1568 	/*
1569 	 * The frame width is identical to the output queue, and the height is
1570 	 * either doubled or identical depending on whether the output queue
1571 	 * field order contains one or two fields per frame.
1572 	 */
1573 	pix->width = src_data->format.width;
1574 	if (src_data->format.field == V4L2_FIELD_ALTERNATE)
1575 		pix->height = 2 * src_data->format.height;
1576 	else
1577 		pix->height = src_data->format.height;
1578 
1579 	fdp1_compute_stride(pix, fmt);
1580 }
1581 
fdp1_try_fmt(struct file * file,void * priv,struct v4l2_format * f)1582 static int fdp1_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
1583 {
1584 	struct fdp1_ctx *ctx = file_to_ctx(file);
1585 
1586 	if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
1587 		fdp1_try_fmt_output(ctx, NULL, &f->fmt.pix_mp);
1588 	else
1589 		fdp1_try_fmt_capture(ctx, NULL, &f->fmt.pix_mp);
1590 
1591 	dprintk(ctx->fdp1, "Try %s format: %4.4s (0x%08x) %ux%u field %u\n",
1592 		V4L2_TYPE_IS_OUTPUT(f->type) ? "output" : "capture",
1593 		(char *)&f->fmt.pix_mp.pixelformat, f->fmt.pix_mp.pixelformat,
1594 		f->fmt.pix_mp.width, f->fmt.pix_mp.height, f->fmt.pix_mp.field);
1595 
1596 	return 0;
1597 }
1598 
fdp1_set_format(struct fdp1_ctx * ctx,struct v4l2_pix_format_mplane * pix,enum v4l2_buf_type type)1599 static void fdp1_set_format(struct fdp1_ctx *ctx,
1600 			    struct v4l2_pix_format_mplane *pix,
1601 			    enum v4l2_buf_type type)
1602 {
1603 	struct fdp1_q_data *q_data = get_q_data(ctx, type);
1604 	const struct fdp1_fmt *fmtinfo;
1605 
1606 	if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
1607 		fdp1_try_fmt_output(ctx, &fmtinfo, pix);
1608 	else
1609 		fdp1_try_fmt_capture(ctx, &fmtinfo, pix);
1610 
1611 	q_data->fmt = fmtinfo;
1612 	q_data->format = *pix;
1613 
1614 	q_data->vsize = pix->height;
1615 	if (pix->field != V4L2_FIELD_NONE)
1616 		q_data->vsize /= 2;
1617 
1618 	q_data->stride_y = pix->plane_fmt[0].bytesperline;
1619 	q_data->stride_c = pix->plane_fmt[1].bytesperline;
1620 
1621 	/* Adjust strides for interleaved buffers */
1622 	if (pix->field == V4L2_FIELD_INTERLACED ||
1623 	    pix->field == V4L2_FIELD_INTERLACED_TB ||
1624 	    pix->field == V4L2_FIELD_INTERLACED_BT) {
1625 		q_data->stride_y *= 2;
1626 		q_data->stride_c *= 2;
1627 	}
1628 
1629 	/* Propagate the format from the output node to the capture node. */
1630 	if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
1631 		struct fdp1_q_data *dst_data = &ctx->cap_q;
1632 
1633 		/*
1634 		 * Copy the format, clear the per-plane bytes per line and image
1635 		 * size, override the field and double the height if needed.
1636 		 */
1637 		dst_data->format = q_data->format;
1638 		memset(dst_data->format.plane_fmt, 0,
1639 		       sizeof(dst_data->format.plane_fmt));
1640 
1641 		dst_data->format.field = V4L2_FIELD_NONE;
1642 		if (pix->field == V4L2_FIELD_ALTERNATE)
1643 			dst_data->format.height *= 2;
1644 
1645 		fdp1_try_fmt_capture(ctx, &dst_data->fmt, &dst_data->format);
1646 
1647 		dst_data->vsize = dst_data->format.height;
1648 		dst_data->stride_y = dst_data->format.plane_fmt[0].bytesperline;
1649 		dst_data->stride_c = dst_data->format.plane_fmt[1].bytesperline;
1650 	}
1651 }
1652 
fdp1_s_fmt(struct file * file,void * priv,struct v4l2_format * f)1653 static int fdp1_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
1654 {
1655 	struct fdp1_ctx *ctx = file_to_ctx(file);
1656 	struct v4l2_m2m_ctx *m2m_ctx = ctx->fh.m2m_ctx;
1657 	struct vb2_queue *vq = v4l2_m2m_get_vq(m2m_ctx, f->type);
1658 
1659 	if (vb2_is_busy(vq)) {
1660 		v4l2_err(&ctx->fdp1->v4l2_dev, "%s queue busy\n", __func__);
1661 		return -EBUSY;
1662 	}
1663 
1664 	fdp1_set_format(ctx, &f->fmt.pix_mp, f->type);
1665 
1666 	dprintk(ctx->fdp1, "Set %s format: %4.4s (0x%08x) %ux%u field %u\n",
1667 		V4L2_TYPE_IS_OUTPUT(f->type) ? "output" : "capture",
1668 		(char *)&f->fmt.pix_mp.pixelformat, f->fmt.pix_mp.pixelformat,
1669 		f->fmt.pix_mp.width, f->fmt.pix_mp.height, f->fmt.pix_mp.field);
1670 
1671 	return 0;
1672 }
1673 
fdp1_g_ctrl(struct v4l2_ctrl * ctrl)1674 static int fdp1_g_ctrl(struct v4l2_ctrl *ctrl)
1675 {
1676 	struct fdp1_ctx *ctx =
1677 		container_of(ctrl->handler, struct fdp1_ctx, hdl);
1678 	struct fdp1_q_data *src_q_data = &ctx->out_q;
1679 
1680 	switch (ctrl->id) {
1681 	case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE:
1682 		if (V4L2_FIELD_HAS_BOTH(src_q_data->format.field))
1683 			ctrl->val = 2;
1684 		else
1685 			ctrl->val = 1;
1686 		return 0;
1687 	}
1688 
1689 	return 1;
1690 }
1691 
fdp1_s_ctrl(struct v4l2_ctrl * ctrl)1692 static int fdp1_s_ctrl(struct v4l2_ctrl *ctrl)
1693 {
1694 	struct fdp1_ctx *ctx =
1695 		container_of(ctrl->handler, struct fdp1_ctx, hdl);
1696 
1697 	switch (ctrl->id) {
1698 	case V4L2_CID_ALPHA_COMPONENT:
1699 		ctx->alpha = ctrl->val;
1700 		break;
1701 
1702 	case V4L2_CID_DEINTERLACING_MODE:
1703 		ctx->deint_mode = ctrl->val;
1704 		break;
1705 	}
1706 
1707 	return 0;
1708 }
1709 
1710 static const struct v4l2_ctrl_ops fdp1_ctrl_ops = {
1711 	.s_ctrl = fdp1_s_ctrl,
1712 	.g_volatile_ctrl = fdp1_g_ctrl,
1713 };
1714 
1715 static const char * const fdp1_ctrl_deint_menu[] = {
1716 	"Progressive",
1717 	"Adaptive 2D/3D",
1718 	"Fixed 2D",
1719 	"Fixed 3D",
1720 	"Previous field",
1721 	"Next field",
1722 	NULL
1723 };
1724 
1725 static const struct v4l2_ioctl_ops fdp1_ioctl_ops = {
1726 	.vidioc_querycap	= fdp1_vidioc_querycap,
1727 
1728 	.vidioc_enum_fmt_vid_cap	= fdp1_enum_fmt_vid_cap,
1729 	.vidioc_enum_fmt_vid_out	= fdp1_enum_fmt_vid_out,
1730 	.vidioc_g_fmt_vid_cap_mplane	= fdp1_g_fmt,
1731 	.vidioc_g_fmt_vid_out_mplane	= fdp1_g_fmt,
1732 	.vidioc_try_fmt_vid_cap_mplane	= fdp1_try_fmt,
1733 	.vidioc_try_fmt_vid_out_mplane	= fdp1_try_fmt,
1734 	.vidioc_s_fmt_vid_cap_mplane	= fdp1_s_fmt,
1735 	.vidioc_s_fmt_vid_out_mplane	= fdp1_s_fmt,
1736 
1737 	.vidioc_reqbufs		= v4l2_m2m_ioctl_reqbufs,
1738 	.vidioc_querybuf	= v4l2_m2m_ioctl_querybuf,
1739 	.vidioc_qbuf		= v4l2_m2m_ioctl_qbuf,
1740 	.vidioc_dqbuf		= v4l2_m2m_ioctl_dqbuf,
1741 	.vidioc_prepare_buf	= v4l2_m2m_ioctl_prepare_buf,
1742 	.vidioc_create_bufs	= v4l2_m2m_ioctl_create_bufs,
1743 	.vidioc_expbuf		= v4l2_m2m_ioctl_expbuf,
1744 
1745 	.vidioc_streamon	= v4l2_m2m_ioctl_streamon,
1746 	.vidioc_streamoff	= v4l2_m2m_ioctl_streamoff,
1747 
1748 	.vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
1749 	.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
1750 };
1751 
1752 /*
1753  * Queue operations
1754  */
1755 
fdp1_queue_setup(struct vb2_queue * vq,unsigned int * nbuffers,unsigned int * nplanes,unsigned int sizes[],struct device * alloc_ctxs[])1756 static int fdp1_queue_setup(struct vb2_queue *vq,
1757 				unsigned int *nbuffers, unsigned int *nplanes,
1758 				unsigned int sizes[],
1759 				struct device *alloc_ctxs[])
1760 {
1761 	struct fdp1_ctx *ctx = vb2_get_drv_priv(vq);
1762 	struct fdp1_q_data *q_data;
1763 	unsigned int i;
1764 
1765 	q_data = get_q_data(ctx, vq->type);
1766 
1767 	if (*nplanes) {
1768 		if (*nplanes > FDP1_MAX_PLANES)
1769 			return -EINVAL;
1770 
1771 		return 0;
1772 	}
1773 
1774 	*nplanes = q_data->format.num_planes;
1775 
1776 	for (i = 0; i < *nplanes; i++)
1777 		sizes[i] = q_data->format.plane_fmt[i].sizeimage;
1778 
1779 	return 0;
1780 }
1781 
fdp1_buf_prepare_field(struct fdp1_q_data * q_data,struct vb2_v4l2_buffer * vbuf,unsigned int field_num)1782 static void fdp1_buf_prepare_field(struct fdp1_q_data *q_data,
1783 				   struct vb2_v4l2_buffer *vbuf,
1784 				   unsigned int field_num)
1785 {
1786 	struct fdp1_buffer *buf = to_fdp1_buffer(vbuf);
1787 	struct fdp1_field_buffer *fbuf = &buf->fields[field_num];
1788 	unsigned int num_fields;
1789 	unsigned int i;
1790 
1791 	num_fields = V4L2_FIELD_HAS_BOTH(vbuf->field) ? 2 : 1;
1792 
1793 	fbuf->vb = vbuf;
1794 	fbuf->last_field = (field_num + 1) == num_fields;
1795 
1796 	for (i = 0; i < vbuf->vb2_buf.num_planes; ++i)
1797 		fbuf->addrs[i] = vb2_dma_contig_plane_dma_addr(&vbuf->vb2_buf, i);
1798 
1799 	switch (vbuf->field) {
1800 	case V4L2_FIELD_INTERLACED:
1801 		/*
1802 		 * Interlaced means bottom-top for 60Hz TV standards (NTSC) and
1803 		 * top-bottom for 50Hz. As TV standards are not applicable to
1804 		 * the mem-to-mem API, use the height as a heuristic.
1805 		 */
1806 		fbuf->field = (q_data->format.height < 576) == field_num
1807 			    ? V4L2_FIELD_TOP : V4L2_FIELD_BOTTOM;
1808 		break;
1809 	case V4L2_FIELD_INTERLACED_TB:
1810 	case V4L2_FIELD_SEQ_TB:
1811 		fbuf->field = field_num ? V4L2_FIELD_BOTTOM : V4L2_FIELD_TOP;
1812 		break;
1813 	case V4L2_FIELD_INTERLACED_BT:
1814 	case V4L2_FIELD_SEQ_BT:
1815 		fbuf->field = field_num ? V4L2_FIELD_TOP : V4L2_FIELD_BOTTOM;
1816 		break;
1817 	default:
1818 		fbuf->field = vbuf->field;
1819 		break;
1820 	}
1821 
1822 	/* Buffer is completed */
1823 	if (!field_num)
1824 		return;
1825 
1826 	/* Adjust buffer addresses for second field */
1827 	switch (vbuf->field) {
1828 	case V4L2_FIELD_INTERLACED:
1829 	case V4L2_FIELD_INTERLACED_TB:
1830 	case V4L2_FIELD_INTERLACED_BT:
1831 		for (i = 0; i < vbuf->vb2_buf.num_planes; i++)
1832 			fbuf->addrs[i] +=
1833 				(i == 0 ? q_data->stride_y : q_data->stride_c);
1834 		break;
1835 	case V4L2_FIELD_SEQ_TB:
1836 	case V4L2_FIELD_SEQ_BT:
1837 		for (i = 0; i < vbuf->vb2_buf.num_planes; i++)
1838 			fbuf->addrs[i] += q_data->vsize *
1839 				(i == 0 ? q_data->stride_y : q_data->stride_c);
1840 		break;
1841 	}
1842 }
1843 
fdp1_buf_prepare(struct vb2_buffer * vb)1844 static int fdp1_buf_prepare(struct vb2_buffer *vb)
1845 {
1846 	struct fdp1_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
1847 	struct fdp1_q_data *q_data = get_q_data(ctx, vb->vb2_queue->type);
1848 	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
1849 	struct fdp1_buffer *buf = to_fdp1_buffer(vbuf);
1850 	unsigned int i;
1851 
1852 	if (V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)) {
1853 		bool field_valid = true;
1854 
1855 		/* Validate the buffer field. */
1856 		switch (q_data->format.field) {
1857 		case V4L2_FIELD_NONE:
1858 			if (vbuf->field != V4L2_FIELD_NONE)
1859 				field_valid = false;
1860 			break;
1861 
1862 		case V4L2_FIELD_ALTERNATE:
1863 			if (vbuf->field != V4L2_FIELD_TOP &&
1864 			    vbuf->field != V4L2_FIELD_BOTTOM)
1865 				field_valid = false;
1866 			break;
1867 
1868 		case V4L2_FIELD_INTERLACED:
1869 		case V4L2_FIELD_SEQ_TB:
1870 		case V4L2_FIELD_SEQ_BT:
1871 		case V4L2_FIELD_INTERLACED_TB:
1872 		case V4L2_FIELD_INTERLACED_BT:
1873 			if (vbuf->field != q_data->format.field)
1874 				field_valid = false;
1875 			break;
1876 		}
1877 
1878 		if (!field_valid) {
1879 			dprintk(ctx->fdp1,
1880 				"buffer field %u invalid for format field %u\n",
1881 				vbuf->field, q_data->format.field);
1882 			return -EINVAL;
1883 		}
1884 	} else {
1885 		vbuf->field = V4L2_FIELD_NONE;
1886 	}
1887 
1888 	/* Validate the planes sizes. */
1889 	for (i = 0; i < q_data->format.num_planes; i++) {
1890 		unsigned long size = q_data->format.plane_fmt[i].sizeimage;
1891 
1892 		if (vb2_plane_size(vb, i) < size) {
1893 			dprintk(ctx->fdp1,
1894 				"data will not fit into plane [%u/%u] (%lu < %lu)\n",
1895 				i, q_data->format.num_planes,
1896 				vb2_plane_size(vb, i), size);
1897 			return -EINVAL;
1898 		}
1899 
1900 		/* We have known size formats all around */
1901 		vb2_set_plane_payload(vb, i, size);
1902 	}
1903 
1904 	buf->num_fields = V4L2_FIELD_HAS_BOTH(vbuf->field) ? 2 : 1;
1905 	for (i = 0; i < buf->num_fields; ++i)
1906 		fdp1_buf_prepare_field(q_data, vbuf, i);
1907 
1908 	return 0;
1909 }
1910 
fdp1_buf_queue(struct vb2_buffer * vb)1911 static void fdp1_buf_queue(struct vb2_buffer *vb)
1912 {
1913 	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
1914 	struct fdp1_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
1915 
1916 	v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
1917 }
1918 
fdp1_start_streaming(struct vb2_queue * q,unsigned int count)1919 static int fdp1_start_streaming(struct vb2_queue *q, unsigned int count)
1920 {
1921 	struct fdp1_ctx *ctx = vb2_get_drv_priv(q);
1922 	struct fdp1_q_data *q_data = get_q_data(ctx, q->type);
1923 
1924 	if (V4L2_TYPE_IS_OUTPUT(q->type)) {
1925 		/*
1926 		 * Force our deint_mode when we are progressive,
1927 		 * ignoring any setting on the device from the user,
1928 		 * Otherwise, lock in the requested de-interlace mode.
1929 		 */
1930 		if (q_data->format.field == V4L2_FIELD_NONE)
1931 			ctx->deint_mode = FDP1_PROGRESSIVE;
1932 
1933 		if (ctx->deint_mode == FDP1_ADAPT2D3D) {
1934 			u32 stride;
1935 			dma_addr_t smsk_base;
1936 			const u32 bpp = 2; /* bytes per pixel */
1937 
1938 			stride = round_up(q_data->format.width, 8);
1939 
1940 			ctx->smsk_size = bpp * stride * q_data->vsize;
1941 
1942 			ctx->smsk_cpu = dma_alloc_coherent(ctx->fdp1->dev,
1943 				ctx->smsk_size, &smsk_base, GFP_KERNEL);
1944 
1945 			if (ctx->smsk_cpu == NULL) {
1946 				dprintk(ctx->fdp1, "Failed to alloc smsk\n");
1947 				return -ENOMEM;
1948 			}
1949 
1950 			ctx->smsk_addr[0] = smsk_base;
1951 			ctx->smsk_addr[1] = smsk_base + (ctx->smsk_size/2);
1952 		}
1953 	}
1954 
1955 	return 0;
1956 }
1957 
fdp1_stop_streaming(struct vb2_queue * q)1958 static void fdp1_stop_streaming(struct vb2_queue *q)
1959 {
1960 	struct fdp1_ctx *ctx = vb2_get_drv_priv(q);
1961 	struct vb2_v4l2_buffer *vbuf;
1962 	unsigned long flags;
1963 
1964 	while (1) {
1965 		if (V4L2_TYPE_IS_OUTPUT(q->type))
1966 			vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
1967 		else
1968 			vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
1969 		if (vbuf == NULL)
1970 			break;
1971 		spin_lock_irqsave(&ctx->fdp1->irqlock, flags);
1972 		v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
1973 		spin_unlock_irqrestore(&ctx->fdp1->irqlock, flags);
1974 	}
1975 
1976 	/* Empty Output queues */
1977 	if (V4L2_TYPE_IS_OUTPUT(q->type)) {
1978 		/* Empty our internal queues */
1979 		struct fdp1_field_buffer *fbuf;
1980 
1981 		/* Free any queued buffers */
1982 		fbuf = fdp1_dequeue_field(ctx);
1983 		while (fbuf != NULL) {
1984 			fdp1_field_complete(ctx, fbuf);
1985 			fbuf = fdp1_dequeue_field(ctx);
1986 		}
1987 
1988 		/* Free smsk_data */
1989 		if (ctx->smsk_cpu) {
1990 			dma_free_coherent(ctx->fdp1->dev, ctx->smsk_size,
1991 					  ctx->smsk_cpu, ctx->smsk_addr[0]);
1992 			ctx->smsk_addr[0] = ctx->smsk_addr[1] = 0;
1993 			ctx->smsk_cpu = NULL;
1994 		}
1995 
1996 		WARN(!list_empty(&ctx->fields_queue),
1997 		     "Buffer queue not empty");
1998 	} else {
1999 		/* Empty Capture queues (Jobs) */
2000 		struct fdp1_job *job;
2001 
2002 		job = get_queued_job(ctx->fdp1);
2003 		while (job) {
2004 			if (FDP1_DEINT_MODE_USES_PREV(ctx->deint_mode))
2005 				fdp1_field_complete(ctx, job->previous);
2006 			else
2007 				fdp1_field_complete(ctx, job->active);
2008 
2009 			v4l2_m2m_buf_done(job->dst->vb, VB2_BUF_STATE_ERROR);
2010 			job->dst = NULL;
2011 
2012 			job = get_queued_job(ctx->fdp1);
2013 		}
2014 
2015 		/* Free any held buffer in the ctx */
2016 		fdp1_field_complete(ctx, ctx->previous);
2017 
2018 		WARN(!list_empty(&ctx->fdp1->queued_job_list),
2019 		     "Queued Job List not empty");
2020 
2021 		WARN(!list_empty(&ctx->fdp1->hw_job_list),
2022 		     "HW Job list not empty");
2023 	}
2024 }
2025 
2026 static const struct vb2_ops fdp1_qops = {
2027 	.queue_setup	 = fdp1_queue_setup,
2028 	.buf_prepare	 = fdp1_buf_prepare,
2029 	.buf_queue	 = fdp1_buf_queue,
2030 	.start_streaming = fdp1_start_streaming,
2031 	.stop_streaming  = fdp1_stop_streaming,
2032 };
2033 
queue_init(void * priv,struct vb2_queue * src_vq,struct vb2_queue * dst_vq)2034 static int queue_init(void *priv, struct vb2_queue *src_vq,
2035 		      struct vb2_queue *dst_vq)
2036 {
2037 	struct fdp1_ctx *ctx = priv;
2038 	int ret;
2039 
2040 	src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
2041 	src_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
2042 	src_vq->drv_priv = ctx;
2043 	src_vq->buf_struct_size = sizeof(struct fdp1_buffer);
2044 	src_vq->ops = &fdp1_qops;
2045 	src_vq->mem_ops = &vb2_dma_contig_memops;
2046 	src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
2047 	src_vq->lock = &ctx->fdp1->dev_mutex;
2048 	src_vq->dev = ctx->fdp1->dev;
2049 
2050 	ret = vb2_queue_init(src_vq);
2051 	if (ret)
2052 		return ret;
2053 
2054 	dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
2055 	dst_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
2056 	dst_vq->drv_priv = ctx;
2057 	dst_vq->buf_struct_size = sizeof(struct fdp1_buffer);
2058 	dst_vq->ops = &fdp1_qops;
2059 	dst_vq->mem_ops = &vb2_dma_contig_memops;
2060 	dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
2061 	dst_vq->lock = &ctx->fdp1->dev_mutex;
2062 	dst_vq->dev = ctx->fdp1->dev;
2063 
2064 	return vb2_queue_init(dst_vq);
2065 }
2066 
2067 /*
2068  * File operations
2069  */
fdp1_open(struct file * file)2070 static int fdp1_open(struct file *file)
2071 {
2072 	struct fdp1_dev *fdp1 = video_drvdata(file);
2073 	struct v4l2_pix_format_mplane format;
2074 	struct fdp1_ctx *ctx = NULL;
2075 	struct v4l2_ctrl *ctrl;
2076 	int ret = 0;
2077 
2078 	if (mutex_lock_interruptible(&fdp1->dev_mutex))
2079 		return -ERESTARTSYS;
2080 
2081 	ctx = kzalloc_obj(*ctx);
2082 	if (!ctx) {
2083 		ret = -ENOMEM;
2084 		goto done;
2085 	}
2086 
2087 	v4l2_fh_init(&ctx->fh, video_devdata(file));
2088 	ctx->fdp1 = fdp1;
2089 
2090 	/* Initialise Queues */
2091 	INIT_LIST_HEAD(&ctx->fields_queue);
2092 
2093 	ctx->translen = 1;
2094 	ctx->sequence = 0;
2095 
2096 	/* Initialise controls */
2097 
2098 	v4l2_ctrl_handler_init(&ctx->hdl, 3);
2099 	v4l2_ctrl_new_std_menu_items(&ctx->hdl, &fdp1_ctrl_ops,
2100 				     V4L2_CID_DEINTERLACING_MODE,
2101 				     FDP1_NEXTFIELD, BIT(0), FDP1_FIXED3D,
2102 				     fdp1_ctrl_deint_menu);
2103 
2104 	ctrl = v4l2_ctrl_new_std(&ctx->hdl, &fdp1_ctrl_ops,
2105 				 V4L2_CID_MIN_BUFFERS_FOR_CAPTURE, 1, 2, 1, 1);
2106 	if (ctrl)
2107 		ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
2108 
2109 	v4l2_ctrl_new_std(&ctx->hdl, &fdp1_ctrl_ops,
2110 			  V4L2_CID_ALPHA_COMPONENT, 0, 255, 1, 255);
2111 
2112 	if (ctx->hdl.error) {
2113 		ret = ctx->hdl.error;
2114 		goto error_ctx;
2115 	}
2116 
2117 	ctx->fh.ctrl_handler = &ctx->hdl;
2118 	v4l2_ctrl_handler_setup(&ctx->hdl);
2119 
2120 	/* Configure default parameters. */
2121 	memset(&format, 0, sizeof(format));
2122 	fdp1_set_format(ctx, &format, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
2123 
2124 	ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(fdp1->m2m_dev, ctx, &queue_init);
2125 
2126 	if (IS_ERR(ctx->fh.m2m_ctx)) {
2127 		ret = PTR_ERR(ctx->fh.m2m_ctx);
2128 		goto error_ctx;
2129 	}
2130 
2131 	/* Perform any power management required */
2132 	ret = pm_runtime_resume_and_get(fdp1->dev);
2133 	if (ret < 0)
2134 		goto error_pm;
2135 
2136 	v4l2_fh_add(&ctx->fh, file);
2137 
2138 	dprintk(fdp1, "Created instance: %p, m2m_ctx: %p\n",
2139 		ctx, ctx->fh.m2m_ctx);
2140 
2141 	mutex_unlock(&fdp1->dev_mutex);
2142 	return 0;
2143 
2144 error_pm:
2145        v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
2146 error_ctx:
2147 	v4l2_ctrl_handler_free(&ctx->hdl);
2148 	kfree(ctx);
2149 done:
2150 	mutex_unlock(&fdp1->dev_mutex);
2151 	return ret;
2152 }
2153 
fdp1_release(struct file * file)2154 static int fdp1_release(struct file *file)
2155 {
2156 	struct fdp1_dev *fdp1 = video_drvdata(file);
2157 	struct fdp1_ctx *ctx = file_to_ctx(file);
2158 
2159 	dprintk(fdp1, "Releasing instance %p\n", ctx);
2160 
2161 	v4l2_fh_del(&ctx->fh, file);
2162 	v4l2_fh_exit(&ctx->fh);
2163 	v4l2_ctrl_handler_free(&ctx->hdl);
2164 	mutex_lock(&fdp1->dev_mutex);
2165 	v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
2166 	mutex_unlock(&fdp1->dev_mutex);
2167 	kfree(ctx);
2168 
2169 	pm_runtime_put(fdp1->dev);
2170 
2171 	return 0;
2172 }
2173 
2174 static const struct v4l2_file_operations fdp1_fops = {
2175 	.owner		= THIS_MODULE,
2176 	.open		= fdp1_open,
2177 	.release	= fdp1_release,
2178 	.poll		= v4l2_m2m_fop_poll,
2179 	.unlocked_ioctl	= video_ioctl2,
2180 	.mmap		= v4l2_m2m_fop_mmap,
2181 };
2182 
2183 static const struct video_device fdp1_videodev = {
2184 	.name		= DRIVER_NAME,
2185 	.vfl_dir	= VFL_DIR_M2M,
2186 	.fops		= &fdp1_fops,
2187 	.device_caps	= V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING,
2188 	.ioctl_ops	= &fdp1_ioctl_ops,
2189 	.minor		= -1,
2190 	.release	= video_device_release_empty,
2191 };
2192 
2193 static const struct v4l2_m2m_ops m2m_ops = {
2194 	.device_run	= fdp1_m2m_device_run,
2195 	.job_ready	= fdp1_m2m_job_ready,
2196 	.job_abort	= fdp1_m2m_job_abort,
2197 };
2198 
fdp1_irq_handler(int irq,void * dev_id)2199 static irqreturn_t fdp1_irq_handler(int irq, void *dev_id)
2200 {
2201 	struct fdp1_dev *fdp1 = dev_id;
2202 	u32 int_status;
2203 	u32 ctl_status;
2204 	u32 vint_cnt;
2205 	u32 cycles;
2206 
2207 	int_status = fdp1_read(fdp1, FD1_CTL_IRQSTA);
2208 	cycles = fdp1_read(fdp1, FD1_CTL_VCYCLE_STAT);
2209 	ctl_status = fdp1_read(fdp1, FD1_CTL_STATUS);
2210 	vint_cnt = (ctl_status & FD1_CTL_STATUS_VINT_CNT_MASK) >>
2211 			FD1_CTL_STATUS_VINT_CNT_SHIFT;
2212 
2213 	/* Clear interrupts */
2214 	fdp1_write(fdp1, ~(int_status) & FD1_CTL_IRQ_MASK, FD1_CTL_IRQSTA);
2215 
2216 	if (debug >= 2) {
2217 		dprintk(fdp1, "IRQ: 0x%x %s%s%s\n", int_status,
2218 			int_status & FD1_CTL_IRQ_VERE ? "[Error]" : "[!E]",
2219 			int_status & FD1_CTL_IRQ_VINTE ? "[VSync]" : "[!V]",
2220 			int_status & FD1_CTL_IRQ_FREE ? "[FrameEnd]" : "[!F]");
2221 
2222 		dprintk(fdp1, "CycleStatus = %d (%dms)\n",
2223 			cycles, cycles/(fdp1->clk_rate/1000));
2224 
2225 		dprintk(fdp1,
2226 			"Control Status = 0x%08x : VINT_CNT = %d %s:%s:%s:%s\n",
2227 			ctl_status, vint_cnt,
2228 			ctl_status & FD1_CTL_STATUS_SGREGSET ? "RegSet" : "",
2229 			ctl_status & FD1_CTL_STATUS_SGVERR ? "Vsync Error" : "",
2230 			ctl_status & FD1_CTL_STATUS_SGFREND ? "FrameEnd" : "",
2231 			ctl_status & FD1_CTL_STATUS_BSY ? "Busy" : "");
2232 		dprintk(fdp1, "***********************************\n");
2233 	}
2234 
2235 	/* Spurious interrupt */
2236 	if (!(FD1_CTL_IRQ_MASK & int_status))
2237 		return IRQ_NONE;
2238 
2239 	/* Work completed, release the frame */
2240 	if (FD1_CTL_IRQ_VERE & int_status)
2241 		device_frame_end(fdp1, VB2_BUF_STATE_ERROR);
2242 	else if (FD1_CTL_IRQ_FREE & int_status)
2243 		device_frame_end(fdp1, VB2_BUF_STATE_DONE);
2244 
2245 	return IRQ_HANDLED;
2246 }
2247 
fdp1_probe(struct platform_device * pdev)2248 static int fdp1_probe(struct platform_device *pdev)
2249 {
2250 	struct fdp1_dev *fdp1;
2251 	struct video_device *vfd;
2252 	struct device_node *fcp_node;
2253 	struct clk *clk;
2254 	unsigned int i;
2255 
2256 	int ret;
2257 	int hw_version;
2258 
2259 	fdp1 = devm_kzalloc(&pdev->dev, sizeof(*fdp1), GFP_KERNEL);
2260 	if (!fdp1)
2261 		return -ENOMEM;
2262 
2263 	INIT_LIST_HEAD(&fdp1->free_job_list);
2264 	INIT_LIST_HEAD(&fdp1->queued_job_list);
2265 	INIT_LIST_HEAD(&fdp1->hw_job_list);
2266 
2267 	/* Initialise the jobs on the free list */
2268 	for (i = 0; i < ARRAY_SIZE(fdp1->jobs); i++)
2269 		list_add(&fdp1->jobs[i].list, &fdp1->free_job_list);
2270 
2271 	mutex_init(&fdp1->dev_mutex);
2272 
2273 	spin_lock_init(&fdp1->irqlock);
2274 	spin_lock_init(&fdp1->device_process_lock);
2275 	fdp1->dev = &pdev->dev;
2276 	platform_set_drvdata(pdev, fdp1);
2277 
2278 	/* Memory-mapped registers */
2279 	fdp1->regs = devm_platform_ioremap_resource(pdev, 0);
2280 	if (IS_ERR(fdp1->regs))
2281 		return PTR_ERR(fdp1->regs);
2282 
2283 	/* Interrupt service routine registration */
2284 	ret = platform_get_irq(pdev, 0);
2285 	if (ret < 0)
2286 		return ret;
2287 	fdp1->irq = ret;
2288 
2289 	ret = devm_request_irq(&pdev->dev, fdp1->irq, fdp1_irq_handler, 0,
2290 			       dev_name(&pdev->dev), fdp1);
2291 	if (ret) {
2292 		dev_err(&pdev->dev, "cannot claim IRQ %d\n", fdp1->irq);
2293 		return ret;
2294 	}
2295 
2296 	/* FCP */
2297 	fcp_node = of_parse_phandle(pdev->dev.of_node, "renesas,fcp", 0);
2298 	if (fcp_node) {
2299 		fdp1->fcp = rcar_fcp_get(fcp_node);
2300 		of_node_put(fcp_node);
2301 		if (IS_ERR(fdp1->fcp)) {
2302 			dev_dbg(&pdev->dev, "FCP not found (%pe)\n", fdp1->fcp);
2303 			return PTR_ERR(fdp1->fcp);
2304 		}
2305 	}
2306 
2307 	/* Determine our clock rate */
2308 	clk = clk_get(&pdev->dev, NULL);
2309 	if (IS_ERR(clk)) {
2310 		ret = PTR_ERR(clk);
2311 		goto put_dev;
2312 	}
2313 
2314 	fdp1->clk_rate = clk_get_rate(clk);
2315 	clk_put(clk);
2316 
2317 	/* V4L2 device registration */
2318 	ret = v4l2_device_register(&pdev->dev, &fdp1->v4l2_dev);
2319 	if (ret) {
2320 		v4l2_err(&fdp1->v4l2_dev, "Failed to register video device\n");
2321 		goto put_dev;
2322 	}
2323 
2324 	/* M2M registration */
2325 	fdp1->m2m_dev = v4l2_m2m_init(&m2m_ops);
2326 	if (IS_ERR(fdp1->m2m_dev)) {
2327 		v4l2_err(&fdp1->v4l2_dev, "Failed to init mem2mem device\n");
2328 		ret = PTR_ERR(fdp1->m2m_dev);
2329 		goto unreg_dev;
2330 	}
2331 
2332 	/* Video registration */
2333 	fdp1->vfd = fdp1_videodev;
2334 	vfd = &fdp1->vfd;
2335 	vfd->lock = &fdp1->dev_mutex;
2336 	vfd->v4l2_dev = &fdp1->v4l2_dev;
2337 	video_set_drvdata(vfd, fdp1);
2338 	strscpy(vfd->name, fdp1_videodev.name, sizeof(vfd->name));
2339 
2340 	ret = video_register_device(vfd, VFL_TYPE_VIDEO, 0);
2341 	if (ret) {
2342 		v4l2_err(&fdp1->v4l2_dev, "Failed to register video device\n");
2343 		goto release_m2m;
2344 	}
2345 
2346 	v4l2_info(&fdp1->v4l2_dev, "Device registered as /dev/video%d\n",
2347 		  vfd->num);
2348 
2349 	/* Power up the cells to read HW */
2350 	pm_runtime_enable(&pdev->dev);
2351 	ret = pm_runtime_resume_and_get(fdp1->dev);
2352 	if (ret < 0)
2353 		goto disable_pm;
2354 
2355 	hw_version = fdp1_read(fdp1, FD1_IP_INTDATA);
2356 	switch (hw_version) {
2357 	case FD1_IP_GEN2:
2358 		dprintk(fdp1, "FDP1 Version R-Car Gen2\n");
2359 		break;
2360 	case FD1_IP_M3W:
2361 		dprintk(fdp1, "FDP1 Version R-Car M3-W\n");
2362 		break;
2363 	case FD1_IP_H3:
2364 		dprintk(fdp1, "FDP1 Version R-Car H3\n");
2365 		break;
2366 	case FD1_IP_M3N:
2367 		dprintk(fdp1, "FDP1 Version R-Car M3-N\n");
2368 		break;
2369 	case FD1_IP_E3:
2370 		dprintk(fdp1, "FDP1 Version R-Car E3\n");
2371 		break;
2372 	default:
2373 		dev_err(fdp1->dev, "FDP1 Unidentifiable (0x%08x)\n",
2374 			hw_version);
2375 	}
2376 
2377 	/* Allow the hw to sleep until an open call puts it to use */
2378 	pm_runtime_put(fdp1->dev);
2379 
2380 	return 0;
2381 
2382 disable_pm:
2383 	pm_runtime_disable(fdp1->dev);
2384 
2385 release_m2m:
2386 	v4l2_m2m_release(fdp1->m2m_dev);
2387 
2388 unreg_dev:
2389 	v4l2_device_unregister(&fdp1->v4l2_dev);
2390 
2391 put_dev:
2392 	rcar_fcp_put(fdp1->fcp);
2393 	return ret;
2394 }
2395 
fdp1_remove(struct platform_device * pdev)2396 static void fdp1_remove(struct platform_device *pdev)
2397 {
2398 	struct fdp1_dev *fdp1 = platform_get_drvdata(pdev);
2399 
2400 	v4l2_m2m_release(fdp1->m2m_dev);
2401 	video_unregister_device(&fdp1->vfd);
2402 	v4l2_device_unregister(&fdp1->v4l2_dev);
2403 	pm_runtime_disable(&pdev->dev);
2404 	rcar_fcp_put(fdp1->fcp);
2405 }
2406 
fdp1_pm_runtime_suspend(struct device * dev)2407 static int fdp1_pm_runtime_suspend(struct device *dev)
2408 {
2409 	struct fdp1_dev *fdp1 = dev_get_drvdata(dev);
2410 
2411 	rcar_fcp_disable(fdp1->fcp);
2412 
2413 	return 0;
2414 }
2415 
fdp1_pm_runtime_resume(struct device * dev)2416 static int fdp1_pm_runtime_resume(struct device *dev)
2417 {
2418 	struct fdp1_dev *fdp1 = dev_get_drvdata(dev);
2419 
2420 	/* Program in the static LUTs */
2421 	fdp1_set_lut(fdp1);
2422 
2423 	return rcar_fcp_enable(fdp1->fcp);
2424 }
2425 
2426 static const struct dev_pm_ops fdp1_pm_ops = {
2427 	RUNTIME_PM_OPS(fdp1_pm_runtime_suspend, fdp1_pm_runtime_resume, NULL)
2428 };
2429 
2430 static const struct of_device_id fdp1_dt_ids[] = {
2431 	{ .compatible = "renesas,fdp1" },
2432 	{ },
2433 };
2434 MODULE_DEVICE_TABLE(of, fdp1_dt_ids);
2435 
2436 static struct platform_driver fdp1_pdrv = {
2437 	.probe		= fdp1_probe,
2438 	.remove		= fdp1_remove,
2439 	.driver		= {
2440 		.name	= DRIVER_NAME,
2441 		.of_match_table = fdp1_dt_ids,
2442 		.pm	= pm_ptr(&fdp1_pm_ops),
2443 	},
2444 };
2445 
2446 module_platform_driver(fdp1_pdrv);
2447 
2448 MODULE_DESCRIPTION("Renesas R-Car Fine Display Processor Driver");
2449 MODULE_AUTHOR("Kieran Bingham <kieran@bingham.xyz>");
2450 MODULE_LICENSE("GPL");
2451 MODULE_ALIAS("platform:" DRIVER_NAME);
2452