1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * TI VIP capture driver
4 *
5 * Copyright (C) 2025 Texas Instruments Incorporated - http://www.ti.com/
6 * David Griego, <dagriego@biglakesoftware.com>
7 * Dale Farnsworth, <dale@farnsworth.org>
8 * Yemike Abhilash Chandra, <y-abhilashchandra@ti.com>
9 */
10
11 #include <linux/clk.h>
12 #include <linux/delay.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/err.h>
15 #include <linux/interrupt.h>
16 #include <linux/module.h>
17 #include <linux/workqueue.h>
18 #include <linux/pm_runtime.h>
19 #include <linux/sched.h>
20 #include <linux/mfd/syscon.h>
21 #include <linux/regmap.h>
22
23 #include <linux/pinctrl/consumer.h>
24 #include <linux/of_device.h>
25 #include <linux/of_graph.h>
26
27 #include "vip.h"
28
29 #define VIP_MODULE_NAME "vip"
30
31 static int debug;
32 module_param(debug, int, 0644);
33 MODULE_PARM_DESC(debug, "debug level (0-8)");
34
35 /*
36 * Minimum and maximum frame sizes
37 */
38 #define MIN_W 128
39 #define MIN_H 128
40 #define MAX_W 2048
41 #define MAX_H 1536
42
43 /*
44 * Required alignments
45 */
46 #define S_ALIGN 0 /* multiple of 1 */
47 #define H_ALIGN 1 /* multiple of 2 */
48 #define W_ALIGN 1 /* multiple of 2 */
49
50 /*
51 * Need a descriptor entry for each of up to 15 outputs,
52 * and up to 2 control transfers.
53 */
54 #define VIP_DESC_LIST_SIZE (17 * sizeof(struct vpdma_dtd))
55
56 /*
57 * port flag bits
58 */
59 #define FLAG_INTERLACED BIT(4)
60 #define FLAG_MULT_PORT BIT(6)
61 #define FLAG_MULT_ANC BIT(7)
62
63 #define VIP_VPDMA_FIFO_SIZE 2
64 #define VIP_DROPQ_SIZE 3
65
66 /*
67 * Define indices into the srce_info tables
68 */
69
70 #define VIP_SRCE_MULT_PORT 0
71 #define VIP_SRCE_MULT_ANC 1
72 #define VIP_SRCE_LUMA 2
73 #define VIP_SRCE_CHROMA 3
74 #define VIP_SRCE_RGB 4
75
76 #define reg_read(dev, offset) ioread32((dev)->base + (offset))
77 #define reg_write(dev, offset, val) iowrite32((val), (dev)->base + (offset))
78
79 #define GET_OFFSET_TOP(port, obj, reg) \
80 ((obj)->base - (port)->dev->base + (reg))
81
82 #define VIP_SET_MMR_ADB_HDR(port, hdr, regs, offset_a) \
83 VPDMA_SET_MMR_ADB_HDR((port)->mmr_adb, vip_mmr_adb, hdr, regs, offset_a)
84
85 /*
86 * These represent the module resets bit for slice 1
87 * Upon detecting slice2 we simply left shift by 1
88 */
89 #define VIP_DP_RST BIT(16)
90 #define VIP_CSC_RST BIT(20)
91 #define VIP_SC_RST BIT(22)
92
93 #define VIP_PARSER_PORT(p) (VIP_PARSER_PORTA_0 + ((p) * 0x8U))
94 #define VIP_PARSER_CROP_H_PORT(p) \
95 (VIP_PARSER_PORTA_EXTRA4 + ((p) * 0x10U))
96 #define VIP_PARSER_CROP_V_PORT(p) \
97 (VIP_PARSER_PORTA_EXTRA5 + ((p) * 0x10U))
98 #define VIP_PARSER_STOP_IMM_PORT(p) (VIP_PARSER_PORTA_EXTRA6 + ((p) * 0x4U))
99
100 #define PARSER_IRQ_MASK (VIP_PORTA_OUTPUT_FIFO_YUV | \
101 VIP_PORTB_OUTPUT_FIFO_YUV)
102
103 /*
104 * The srce_info structure contains per-srce data.
105 */
106 struct vip_srce_info {
107 u8 base_channel; /* the VPDMA channel number */
108 u8 vb_index; /* input frame f, f-1, f-2 index */
109 u8 vb_part; /* identifies section of co-planar formats */
110 };
111
112 static struct vip_srce_info srce_info[5] = {
113 [VIP_SRCE_MULT_PORT] = {
114 .base_channel = VIP1_CHAN_NUM_MULT_PORT_A_SRC0,
115 .vb_index = 0,
116 .vb_part = VIP_CHROMA,
117 },
118 [VIP_SRCE_MULT_ANC] = {
119 .base_channel = VIP1_CHAN_NUM_MULT_ANC_A_SRC0,
120 .vb_index = 0,
121 .vb_part = VIP_LUMA,
122 },
123 [VIP_SRCE_LUMA] = {
124 .base_channel = VIP1_CHAN_NUM_PORT_A_LUMA,
125 .vb_index = 1,
126 .vb_part = VIP_LUMA,
127 },
128 [VIP_SRCE_CHROMA] = {
129 .base_channel = VIP1_CHAN_NUM_PORT_A_CHROMA,
130 .vb_index = 1,
131 .vb_part = VIP_CHROMA,
132 },
133 [VIP_SRCE_RGB] = {
134 .base_channel = VIP1_CHAN_NUM_PORT_A_RGB,
135 .vb_part = VIP_LUMA,
136 },
137 };
138
139 static struct vip_fmt vip_formats[VIP_MAX_ACTIVE_FMT] = {
140 {
141 .fourcc = V4L2_PIX_FMT_NV12,
142 .code = MEDIA_BUS_FMT_UYVY8_2X8,
143 .coplanar = 1,
144 .vpdma_fmt = { &vpdma_yuv_fmts[VPDMA_DATA_FMT_Y420],
145 &vpdma_yuv_fmts[VPDMA_DATA_FMT_C420],
146 },
147 },
148 {
149 .fourcc = V4L2_PIX_FMT_UYVY,
150 .code = MEDIA_BUS_FMT_UYVY8_2X8,
151 .coplanar = 0,
152 .vpdma_fmt = { &vpdma_yuv_fmts[VPDMA_DATA_FMT_CBY422],
153 },
154 },
155 {
156 .fourcc = V4L2_PIX_FMT_YUYV,
157 .code = MEDIA_BUS_FMT_UYVY8_2X8,
158 .coplanar = 0,
159 .vpdma_fmt = { &vpdma_yuv_fmts[VPDMA_DATA_FMT_YCB422],
160 },
161 },
162 {
163 .fourcc = V4L2_PIX_FMT_VYUY,
164 .code = MEDIA_BUS_FMT_UYVY8_2X8,
165 .coplanar = 0,
166 .vpdma_fmt = { &vpdma_yuv_fmts[VPDMA_DATA_FMT_CRY422],
167 },
168 },
169 {
170 .fourcc = V4L2_PIX_FMT_YVYU,
171 .code = MEDIA_BUS_FMT_UYVY8_2X8,
172 .coplanar = 0,
173 .vpdma_fmt = { &vpdma_yuv_fmts[VPDMA_DATA_FMT_YCR422],
174 },
175 },
176 {
177 .fourcc = V4L2_PIX_FMT_RGB24,
178 .code = MEDIA_BUS_FMT_UYVY8_2X8,
179 .coplanar = 0,
180 .vpdma_fmt = { &vpdma_rgb_fmts[VPDMA_DATA_FMT_RGB24],
181 },
182 },
183 {
184 .fourcc = V4L2_PIX_FMT_RGB32,
185 .code = MEDIA_BUS_FMT_UYVY8_2X8,
186 .coplanar = 0,
187 .vpdma_fmt = { &vpdma_rgb_fmts[VPDMA_DATA_FMT_ARGB32],
188 },
189 },
190 {
191 .fourcc = V4L2_PIX_FMT_BGR24,
192 .code = MEDIA_BUS_FMT_UYVY8_2X8,
193 .coplanar = 0,
194 .vpdma_fmt = { &vpdma_rgb_fmts[VPDMA_DATA_FMT_BGR24],
195 },
196 },
197 {
198 .fourcc = V4L2_PIX_FMT_BGR32,
199 .code = MEDIA_BUS_FMT_UYVY8_2X8,
200 .coplanar = 0,
201 .vpdma_fmt = { &vpdma_rgb_fmts[VPDMA_DATA_FMT_ABGR32],
202 },
203 },
204 {
205 .fourcc = V4L2_PIX_FMT_RGB24,
206 .code = MEDIA_BUS_FMT_RGB888_1X24,
207 .coplanar = 0,
208 .vpdma_fmt = { &vpdma_rgb_fmts[VPDMA_DATA_FMT_RGB24],
209 },
210 },
211 {
212 .fourcc = V4L2_PIX_FMT_RGB32,
213 .code = MEDIA_BUS_FMT_ARGB8888_1X32,
214 .coplanar = 0,
215 .vpdma_fmt = { &vpdma_rgb_fmts[VPDMA_DATA_FMT_ARGB32],
216 },
217 },
218 {
219 .fourcc = V4L2_PIX_FMT_SBGGR8,
220 .code = MEDIA_BUS_FMT_SBGGR8_1X8,
221 .coplanar = 0,
222 .vpdma_fmt = { &vpdma_raw_fmts[VPDMA_DATA_FMT_RAW8],
223 },
224 },
225 {
226 .fourcc = V4L2_PIX_FMT_SGBRG8,
227 .code = MEDIA_BUS_FMT_SGBRG8_1X8,
228 .coplanar = 0,
229 .vpdma_fmt = { &vpdma_raw_fmts[VPDMA_DATA_FMT_RAW8],
230 },
231 },
232 {
233 .fourcc = V4L2_PIX_FMT_SGRBG8,
234 .code = MEDIA_BUS_FMT_SGRBG8_1X8,
235 .coplanar = 0,
236 .vpdma_fmt = { &vpdma_raw_fmts[VPDMA_DATA_FMT_RAW8],
237 },
238 },
239 {
240 .fourcc = V4L2_PIX_FMT_SRGGB8,
241 .code = MEDIA_BUS_FMT_SRGGB8_1X8,
242 .coplanar = 0,
243 .vpdma_fmt = { &vpdma_raw_fmts[VPDMA_DATA_FMT_RAW8],
244 },
245 },
246 {
247 /* V4L2 currently only defines one 16 bit variant */
248 .fourcc = V4L2_PIX_FMT_SBGGR16,
249 .code = MEDIA_BUS_FMT_SBGGR16_1X16,
250 .coplanar = 0,
251 .vpdma_fmt = { &vpdma_raw_fmts[VPDMA_DATA_FMT_RAW16],
252 },
253 },
254 };
255
256 /*
257 * DMA address/data block for the shadow registers
258 */
259 struct vip_mmr_adb {
260 struct vpdma_adb_hdr sc_hdr0;
261 u32 sc_regs0[7];
262 u32 sc_pad0[1];
263 struct vpdma_adb_hdr sc_hdr8;
264 u32 sc_regs8[6];
265 u32 sc_pad8[2];
266 struct vpdma_adb_hdr sc_hdr17;
267 u32 sc_regs17[9];
268 u32 sc_pad17[3];
269 struct vpdma_adb_hdr csc_hdr;
270 u32 csc_regs[6];
271 u32 csc_pad[2];
272 };
273
274 /*
275 * Function prototype declarations
276 */
277 static int alloc_port(struct vip_dev *, int);
278 static void free_port(struct vip_port *);
279 static int vip_setup_parser(struct vip_port *port);
280 static int vip_setup_scaler(struct vip_stream *stream);
281 static void vip_enable_parser(struct vip_port *port, bool on);
282 static void vip_reset_parser(struct vip_port *port, bool on);
283 static void vip_parser_stop_imm(struct vip_port *port, bool on);
284 static void stop_dma(struct vip_stream *stream, bool clear_list);
285 static int vip_load_vpdma_list_fifo(struct vip_stream *stream);
286 static inline bool is_scaler_available(struct vip_port *port);
287 static inline bool allocate_scaler(struct vip_port *port);
288 static inline void free_scaler(struct vip_port *port);
289 static bool is_csc_available(struct vip_port *port);
290 static bool allocate_csc(struct vip_port *port,
291 enum vip_csc_state csc_direction);
292 static void free_csc(struct vip_port *port);
293
294 /* initialize v4l2_format_info member in vip_formats array */
vip_init_format_info(struct device * dev)295 static void vip_init_format_info(struct device *dev)
296 {
297 struct vip_fmt *fmt;
298 int i;
299
300 for (i = 0; i < ARRAY_SIZE(vip_formats); i++) {
301 fmt = &vip_formats[i];
302 fmt->finfo = v4l2_format_info(fmt->fourcc);
303 }
304 }
305
306 /* Print Four-character-code (FOURCC) */
fourcc_to_str(u32 fmt)307 static char *fourcc_to_str(u32 fmt)
308 {
309 static char code[5];
310
311 code[0] = (unsigned char)(fmt & 0xff);
312 code[1] = (unsigned char)((fmt >> 8) & 0xff);
313 code[2] = (unsigned char)((fmt >> 16) & 0xff);
314 code[3] = (unsigned char)((fmt >> 24) & 0xff);
315 code[4] = '\0';
316
317 return code;
318 }
319
320 /*
321 * Find our format description corresponding to the passed v4l2_format
322 */
find_port_format_by_pix(struct vip_port * port,u32 pixelformat)323 static struct vip_fmt *find_port_format_by_pix(struct vip_port *port,
324 u32 pixelformat)
325 {
326 struct vip_fmt *fmt;
327 unsigned int index;
328
329 for (index = 0; index < port->num_active_fmt; index++) {
330 fmt = port->active_fmt[index];
331 if (fmt->fourcc == pixelformat)
332 return fmt;
333 }
334
335 return NULL;
336 }
337
find_port_format_by_code(struct vip_port * port,u32 code)338 static struct vip_fmt *find_port_format_by_code(struct vip_port *port,
339 u32 code)
340 {
341 struct vip_fmt *fmt;
342 unsigned int index;
343
344 for (index = 0; index < port->num_active_fmt; index++) {
345 fmt = port->active_fmt[index];
346 if (fmt->code == code)
347 return fmt;
348 }
349
350 return NULL;
351 }
352
notifier_to_vip_port(struct v4l2_async_notifier * n)353 inline struct vip_port *notifier_to_vip_port(struct v4l2_async_notifier *n)
354 {
355 return container_of(n, struct vip_port, notifier);
356 }
357
vip_is_mbuscode_yuv(u32 code)358 static bool vip_is_mbuscode_yuv(u32 code)
359 {
360 return ((code & 0xff00) == 0x2000);
361 }
362
vip_is_mbuscode_rgb(u32 code)363 static bool vip_is_mbuscode_rgb(u32 code)
364 {
365 return ((code & 0xff00) == 0x1000);
366 }
367
vip_is_mbuscode_raw(u32 code)368 static bool vip_is_mbuscode_raw(u32 code)
369 {
370 return ((code & 0xff00) == 0x3000);
371 }
372
373 /*
374 * This is not an accurate conversion but it is only used to
375 * assess if color conversion is needed.
376 */
vip_mbus_code_to_fourcc(u32 code)377 static u32 vip_mbus_code_to_fourcc(u32 code)
378 {
379 if (vip_is_mbuscode_rgb(code))
380 return V4L2_PIX_FMT_RGB24;
381
382 if (vip_is_mbuscode_yuv(code))
383 return V4L2_PIX_FMT_UYVY;
384
385 return V4L2_PIX_FMT_SBGGR8;
386 }
387
388 static enum vip_csc_state
vip_csc_direction(u32 src_code,const struct v4l2_format_info * dfinfo)389 vip_csc_direction(u32 src_code, const struct v4l2_format_info *dfinfo)
390 {
391 if (vip_is_mbuscode_yuv(src_code) && v4l2_is_format_rgb(dfinfo))
392 return VIP_CSC_Y2R;
393 else if (vip_is_mbuscode_rgb(src_code) && v4l2_is_format_yuv(dfinfo))
394 return VIP_CSC_R2Y;
395 else
396 return VIP_CSC_NA;
397 }
398
399 /*
400 * Insert a masked field into a 32-bit field
401 */
insert_field(u32 * valp,u32 field,u32 mask,int shift)402 static void insert_field(u32 *valp, u32 field, u32 mask, int shift)
403 {
404 u32 val = *valp;
405
406 val &= ~(mask << shift);
407 val |= (field & mask) << shift;
408 *valp = val;
409 }
410
411 /*
412 * Set the headers for all of the address/data block structures.
413 */
init_adb_hdrs(struct vip_port * port)414 static void init_adb_hdrs(struct vip_port *port)
415 {
416 VIP_SET_MMR_ADB_HDR(port, sc_hdr0, sc_regs0,
417 GET_OFFSET_TOP(port, port->dev->sc, CFG_SC0));
418 VIP_SET_MMR_ADB_HDR(port, sc_hdr8, sc_regs8,
419 GET_OFFSET_TOP(port, port->dev->sc, CFG_SC8));
420 VIP_SET_MMR_ADB_HDR(port, sc_hdr17, sc_regs17,
421 GET_OFFSET_TOP(port, port->dev->sc, CFG_SC17));
422 VIP_SET_MMR_ADB_HDR(port, csc_hdr, csc_regs,
423 GET_OFFSET_TOP(port, port->dev->csc, CSC_CSC00));
424
425 };
426
vip_module_toggle(struct vip_dev * dev,uint32_t module,bool on)427 static void vip_module_toggle(struct vip_dev *dev, uint32_t module, bool on)
428 {
429 u32 val = 0;
430
431 val = reg_read(dev, VIP_CLK_RESET);
432
433 if (dev->slice_id == VIP_SLICE2)
434 module <<= 1;
435
436 if (on)
437 val |= module;
438 else
439 val &= ~module;
440
441 reg_write(dev, VIP_CLK_RESET, val);
442 }
443
444 /*
445 * Enable or disable the VIP clocks
446 */
vip_set_clock_enable(struct vip_dev * dev,bool on)447 static void vip_set_clock_enable(struct vip_dev *dev, bool on)
448 {
449 u32 val = 0;
450
451 val = reg_read(dev, VIP_CLK_ENABLE);
452 if (on) {
453 val |= VIP_VPDMA_CLK_ENABLE;
454 if (dev->slice_id == VIP_SLICE1)
455 val |= VIP_VIP1_DATA_PATH_CLK_ENABLE;
456 else
457 val |= VIP_VIP2_DATA_PATH_CLK_ENABLE;
458 } else {
459 if (dev->slice_id == VIP_SLICE1)
460 val &= ~VIP_VIP1_DATA_PATH_CLK_ENABLE;
461 else
462 val &= ~VIP_VIP2_DATA_PATH_CLK_ENABLE;
463
464 /* Both VIP are disabled then shutdown VPDMA also */
465 if (!(val & (VIP_VIP1_DATA_PATH_CLK_ENABLE |
466 VIP_VIP2_DATA_PATH_CLK_ENABLE)))
467 val = 0;
468 }
469
470 reg_write(dev, VIP_CLK_ENABLE, val);
471 }
472
473 /* This helper function is used to enable the clock early on to
474 * enable vpdma firmware loading before the slice device are created
475 */
vip_shared_set_clock_enable(struct vip_shared * shared,bool on)476 static void vip_shared_set_clock_enable(struct vip_shared *shared, bool on)
477 {
478 u32 val = 0;
479
480 val = VIP_VIP1_DATA_PATH_CLK_ENABLE | VIP_VPDMA_CLK_ENABLE;
481
482 reg_write(shared, VIP_CLK_ENABLE, val);
483 }
484
vip_top_reset(struct vip_dev * dev)485 static void vip_top_reset(struct vip_dev *dev)
486 {
487 u32 val = 0;
488
489 val = reg_read(dev, VIP_CLK_RESET);
490
491 if (dev->slice_id == VIP_SLICE1)
492 insert_field(&val, 1, VIP_DATA_PATH_CLK_RESET_MASK,
493 VIP_VIP1_DATA_PATH_RESET_SHIFT);
494 else
495 insert_field(&val, 1, VIP_DATA_PATH_CLK_RESET_MASK,
496 VIP_VIP2_DATA_PATH_RESET_SHIFT);
497
498 reg_write(dev, VIP_CLK_RESET, val);
499
500 usleep_range(200, 250);
501
502 val = reg_read(dev, VIP_CLK_RESET);
503
504 if (dev->slice_id == VIP_SLICE1)
505 insert_field(&val, 0, VIP_DATA_PATH_CLK_RESET_MASK,
506 VIP_VIP1_DATA_PATH_RESET_SHIFT);
507 else
508 insert_field(&val, 0, VIP_DATA_PATH_CLK_RESET_MASK,
509 VIP_VIP2_DATA_PATH_RESET_SHIFT);
510 reg_write(dev, VIP_CLK_RESET, val);
511 }
512
vip_top_vpdma_reset(struct vip_shared * shared)513 static void vip_top_vpdma_reset(struct vip_shared *shared)
514 {
515 u32 val;
516
517 val = reg_read(shared, VIP_CLK_RESET);
518 insert_field(&val, 1, VIP_VPDMA_CLK_RESET_MASK,
519 VIP_VPDMA_CLK_RESET_SHIFT);
520 reg_write(shared, VIP_CLK_RESET, val);
521
522 usleep_range(200, 250);
523
524 val = reg_read(shared, VIP_CLK_RESET);
525 insert_field(&val, 0, VIP_VPDMA_CLK_RESET_MASK,
526 VIP_VPDMA_CLK_RESET_SHIFT);
527 reg_write(shared, VIP_CLK_RESET, val);
528 }
529
vip_set_pclk_invert(struct vip_port * port)530 static void vip_set_pclk_invert(struct vip_port *port)
531 {
532 struct vip_ctrl_module *ctrl = port->dev->syscon;
533 struct vip_dev *dev = port->dev;
534 u32 index;
535 /*
536 * When the VIP parser is configured to so that the pixel clock
537 * is to be sampled at falling edge, the pixel clock needs to be
538 * inverted before it is given to the VIP module. This is done
539 * by setting a bit in the CTRL_CORE_SMA_SW1 register.
540 */
541
542 index = 2 * port->dev->slice_id + port->port_id;
543
544 v4l2_dbg(3, debug, &dev->v4l2_dev, "%s: slice%d:port%d -> index: %d\n", __func__,
545 port->dev->slice_id, port->port_id, index);
546
547 if (ctrl->syscon_pol)
548 regmap_update_bits(ctrl->syscon_pol,
549 ctrl->syscon_offset,
550 ctrl->syscon_bit_field[index],
551 ctrl->syscon_bit_field[index]);
552 }
553
vip_set_data_interface(struct vip_port * port,enum data_interface_modes mode)554 static void vip_set_data_interface(struct vip_port *port,
555 enum data_interface_modes mode)
556 {
557 u32 val = 0;
558
559 insert_field(&val, mode, VIP_DATA_INTERFACE_MODE_MASK,
560 VIP_DATA_INTERFACE_MODE_SHFT);
561
562 reg_write(port->dev->parser, VIP_PARSER_MAIN_CFG, val);
563 }
564
vip_set_slice_path(struct vip_dev * dev,enum data_path_select data_path,u32 path_val)565 static void vip_set_slice_path(struct vip_dev *dev,
566 enum data_path_select data_path, u32 path_val)
567 {
568 u32 val = 0;
569 int data_path_reg;
570
571 data_path_reg = VIP_VIP1_DATA_PATH_SELECT + 4 * dev->slice_id;
572
573 switch (data_path) {
574 case ALL_FIELDS_DATA_SELECT:
575 val |= path_val;
576 break;
577 case VIP_CSC_SRC_DATA_SELECT:
578 insert_field(&val, path_val, VIP_CSC_SRC_SELECT_MASK,
579 VIP_CSC_SRC_SELECT_SHFT);
580 break;
581 case VIP_SC_SRC_DATA_SELECT:
582 insert_field(&val, path_val, VIP_SC_SRC_SELECT_MASK,
583 VIP_SC_SRC_SELECT_SHFT);
584 break;
585 case VIP_RGB_SRC_DATA_SELECT:
586 val |= (path_val) ? VIP_RGB_SRC_SELECT : 0;
587 break;
588 case VIP_RGB_OUT_LO_DATA_SELECT:
589 val |= (path_val) ? VIP_RGB_OUT_LO_SRC_SELECT : 0;
590 break;
591 case VIP_RGB_OUT_HI_DATA_SELECT:
592 val |= (path_val) ? VIP_RGB_OUT_HI_SRC_SELECT : 0;
593 break;
594 case VIP_CHR_DS_1_SRC_DATA_SELECT:
595 insert_field(&val, path_val, VIP_DS1_SRC_SELECT_MASK,
596 VIP_DS1_SRC_SELECT_SHFT);
597 break;
598 case VIP_CHR_DS_2_SRC_DATA_SELECT:
599 insert_field(&val, path_val, VIP_DS2_SRC_SELECT_MASK,
600 VIP_DS2_SRC_SELECT_SHFT);
601 break;
602 case VIP_MULTI_CHANNEL_DATA_SELECT:
603 val |= (path_val) ? VIP_MULTI_CHANNEL_SELECT : 0;
604 break;
605 case VIP_CHR_DS_1_DATA_BYPASS:
606 val |= (path_val) ? VIP_DS1_BYPASS : 0;
607 break;
608 case VIP_CHR_DS_2_DATA_BYPASS:
609 val |= (path_val) ? VIP_DS2_BYPASS : 0;
610 break;
611 default:
612 v4l2_err(&dev->v4l2_dev, "%s: data_path 0x%x is not valid\n",
613 __func__, data_path);
614 return;
615 }
616 insert_field(&val, data_path, VIP_DATAPATH_SELECT_MASK,
617 VIP_DATAPATH_SELECT_SHFT);
618 reg_write(dev, data_path_reg, val);
619 v4l2_dbg(3, debug, &dev->v4l2_dev, "%s: DATA_PATH_SELECT(%08X): %08X\n", __func__,
620 data_path_reg, reg_read(dev, data_path_reg));
621 }
622
623 /*
624 * Return the vip_stream structure for a given struct file
625 */
file2stream(struct file * file)626 static inline struct vip_stream *file2stream(struct file *file)
627 {
628 return video_drvdata(file);
629 }
630
631 /*
632 * Append a destination descriptor to the current descriptor list,
633 * setting up dma to the given srce.
634 */
add_out_dtd(struct vip_stream * stream,int srce_type)635 static int add_out_dtd(struct vip_stream *stream, int srce_type)
636 {
637 struct vip_port *port = stream->port;
638 struct vip_dev *dev = port->dev;
639 struct vip_srce_info *sinfo = &srce_info[srce_type];
640 struct v4l2_rect *c_rect = &port->c_rect;
641 struct vip_fmt *fmt = port->fmt;
642 int channel, plane = 0;
643 int max_width, max_height;
644 dma_addr_t dma_addr = 0;
645 u32 flags;
646 u32 width = stream->width;
647
648 channel = sinfo->base_channel;
649
650 switch (srce_type) {
651 case VIP_SRCE_MULT_PORT:
652 case VIP_SRCE_MULT_ANC:
653 if (port->port_id == VIP_PORTB)
654 channel += VIP_CHAN_MULT_PORTB_OFFSET;
655 channel += stream->stream_id;
656 flags = 0;
657 break;
658 case VIP_SRCE_CHROMA:
659 plane = 1;
660 fallthrough;
661 case VIP_SRCE_LUMA:
662 if (port->port_id == VIP_PORTB) {
663 if (port->scaler && !port->fmt->coplanar)
664 /*
665 * In this case Port A Chroma channel
666 * is used to carry Port B scaled YUV422
667 */
668 channel += 1;
669 else
670 channel += VIP_CHAN_YUV_PORTB_OFFSET;
671 }
672 flags = port->flags;
673 break;
674 case VIP_SRCE_RGB:
675 if (port->port_id == VIP_PORTB ||
676 (port->port_id == VIP_PORTA &&
677 port->csc == VIP_CSC_NA &&
678 v4l2_is_format_rgb(port->fmt->finfo)))
679 /*
680 * RGB sensor only connect to Y_LO
681 * channel i.e. port B channel.
682 */
683 channel += VIP_CHAN_RGB_PORTB_OFFSET;
684 flags = port->flags;
685 break;
686 default:
687 v4l2_err(&dev->v4l2_dev, "%s: srce_type 0x%x is not valid\n",
688 __func__, srce_type);
689 return -1;
690 }
691
692 if (dev->slice_id == VIP_SLICE2)
693 channel += VIP_CHAN_VIP2_OFFSET;
694
695 if (port->fmt->vpdma_fmt[0] == &vpdma_raw_fmts[VPDMA_DATA_FMT_RAW8]) {
696 /*
697 * Special case since we are faking a YUV422 16bit format
698 * to have the vpdma perform the needed byte swap
699 * we need to adjust the pixel width accordingly
700 * otherwise the parser will attempt to collect more pixels
701 * then available and the vpdma transfer will exceed the
702 * allocated frame buffer.
703 */
704 width >>= 1;
705 v4l2_dbg(1, debug, &dev->v4l2_dev, "%s: 8 bit raw detected, adjusting width to %d\n",
706 __func__, width);
707 }
708
709 /*
710 * Use VPDMA_MAX_SIZE1 or VPDMA_MAX_SIZE2 register for slice0/1
711 */
712
713 if (dev->slice_id == VIP_SLICE1) {
714 vpdma_set_max_size(dev->shared->vpdma, VPDMA_MAX_SIZE1,
715 width, stream->height);
716
717 max_width = MAX_OUT_WIDTH_REG1;
718 max_height = MAX_OUT_HEIGHT_REG1;
719 } else {
720 vpdma_set_max_size(dev->shared->vpdma, VPDMA_MAX_SIZE2,
721 width, stream->height);
722
723 max_width = MAX_OUT_WIDTH_REG2;
724 max_height = MAX_OUT_HEIGHT_REG2;
725 }
726
727 /*
728 * Mark this channel to be cleared while cleaning up resources
729 * This will make sure that an abort descriptor for this channel
730 * would be submitted to VPDMA causing any ongoing transaction to be
731 * aborted and cleanup the VPDMA FSM for this channel
732 */
733 stream->vpdma_channels[channel] = 1;
734
735 vpdma_rawchan_add_out_dtd(&stream->desc_list, c_rect->width,
736 stream->bytesperline, c_rect,
737 fmt->vpdma_fmt[plane], dma_addr,
738 max_width, max_height, channel, flags);
739 return 0;
740 }
741
742 /*
743 * add_stream_dtds - prepares and starts DMA for pending transfers
744 */
add_stream_dtds(struct vip_stream * stream)745 static void add_stream_dtds(struct vip_stream *stream)
746 {
747 struct vip_port *port = stream->port;
748 int srce_type;
749
750 if (port->flags & FLAG_MULT_PORT)
751 srce_type = VIP_SRCE_MULT_PORT;
752 else if (port->flags & FLAG_MULT_ANC)
753 srce_type = VIP_SRCE_MULT_ANC;
754 else if (v4l2_is_format_rgb(port->fmt->finfo))
755 srce_type = VIP_SRCE_RGB;
756 else
757 srce_type = VIP_SRCE_LUMA;
758
759 add_out_dtd(stream, srce_type);
760
761 if (srce_type == VIP_SRCE_LUMA && port->fmt->coplanar)
762 add_out_dtd(stream, VIP_SRCE_CHROMA);
763 }
764
enable_irqs(struct vip_dev * dev,int irq_num,int list_num)765 static void enable_irqs(struct vip_dev *dev, int irq_num, int list_num)
766 {
767 struct vip_parser_data *parser = dev->parser;
768 u32 reg_addr = VIP_INT0_ENABLE0_SET +
769 VIP_INTC_INTX_OFFSET * irq_num;
770 u32 irq_val = (1 << (list_num * 2)) |
771 (VIP_VIP1_PARSER_INT << (irq_num * 1));
772
773 /* Enable Parser Interrupt */
774 reg_write(parser, VIP_PARSER_FIQ_MASK, (u32)~PARSER_IRQ_MASK);
775
776 reg_write(dev->shared, reg_addr, irq_val);
777
778 vpdma_enable_list_complete_irq(dev->shared->vpdma,
779 irq_num, list_num, true);
780 }
781
disable_irqs(struct vip_dev * dev,int irq_num,int list_num)782 static void disable_irqs(struct vip_dev *dev, int irq_num, int list_num)
783 {
784 struct vip_parser_data *parser = dev->parser;
785 u32 reg_addr = VIP_INT0_ENABLE0_CLR +
786 VIP_INTC_INTX_OFFSET * irq_num;
787 u32 irq_val = (1 << (list_num * 2)) |
788 (VIP_VIP1_PARSER_INT << (irq_num * 1));
789
790 /* Disable all Parser Interrupt */
791 reg_write(parser, VIP_PARSER_FIQ_MASK, 0xffffffff);
792
793 reg_write(dev->shared, reg_addr, irq_val);
794
795 vpdma_enable_list_complete_irq(dev->shared->vpdma,
796 irq_num, list_num, false);
797 }
798
clear_irqs(struct vip_dev * dev,int irq_num,int list_num)799 static void clear_irqs(struct vip_dev *dev, int irq_num, int list_num)
800 {
801 struct vip_parser_data *parser = dev->parser;
802 u32 reg_addr = VIP_INT0_STATUS0_CLR +
803 VIP_INTC_INTX_OFFSET * irq_num;
804 u32 irq_val = (1 << (list_num * 2)) |
805 (VIP_VIP1_PARSER_INT << (irq_num * 1));
806
807 /* Clear all Parser Interrupt */
808 reg_write(parser, VIP_PARSER_FIQ_CLR, 0xffffffff);
809 reg_write(parser, VIP_PARSER_FIQ_CLR, 0x0);
810
811 reg_write(dev->shared, reg_addr, irq_val);
812
813 vpdma_clear_list_stat(dev->shared->vpdma, irq_num, dev->slice_id);
814 }
815
populate_desc_list(struct vip_stream * stream)816 static void populate_desc_list(struct vip_stream *stream)
817 {
818 struct vip_port *port = stream->port;
819 struct vip_dev *dev = port->dev;
820
821 stream->desc_next = stream->desc_list.buf.addr;
822 add_stream_dtds(stream);
823
824 vpdma_map_desc_buf(dev->shared->vpdma, &stream->desc_list.buf);
825 }
826
827 /*
828 * start_dma - adds descriptors to the dma list and submits them.
829 * Should be called after a new vb is queued and on a vpdma list
830 * completion interrupt.
831 */
start_dma(struct vip_stream * stream,struct vip_buffer * buf)832 static void start_dma(struct vip_stream *stream, struct vip_buffer *buf)
833 {
834 struct vip_dev *dev = stream->port->dev;
835 struct vpdma_data *vpdma = dev->shared->vpdma;
836 int list_num = stream->list_num;
837 dma_addr_t dma_addr;
838 int drop_data;
839
840 if (vpdma_list_busy(vpdma, list_num)) {
841 v4l2_err(&dev->v4l2_dev, "vpdma list busy, cannot post\n");
842 return; /* nothing to do */
843 }
844
845 if (buf) {
846 dma_addr = vb2_dma_contig_plane_dma_addr(&buf->vb.vb2_buf, 0);
847 drop_data = 0;
848 v4l2_dbg(4, debug, &dev->v4l2_dev, "%s: vb2 buf idx:%d, dma_addr:%pad\n",
849 __func__, buf->vb.vb2_buf.index, &dma_addr);
850 } else {
851 dma_addr = 0;
852 drop_data = 1;
853 v4l2_dbg(4, debug, &dev->v4l2_dev, "%s: dropped\n", __func__);
854 }
855
856 vpdma_update_dma_addr(dev->shared->vpdma, &stream->desc_list,
857 dma_addr, stream->write_desc, drop_data, 0);
858
859 if (stream->port->fmt->coplanar) {
860 dma_addr += stream->bytesperline * stream->height;
861 vpdma_update_dma_addr(dev->shared->vpdma, &stream->desc_list,
862 dma_addr, stream->write_desc + 1,
863 drop_data, 1);
864 }
865
866 vpdma_submit_descs(dev->shared->vpdma,
867 &stream->desc_list, stream->list_num);
868 }
869
vip_schedule_next_buffer(struct vip_stream * stream)870 static void vip_schedule_next_buffer(struct vip_stream *stream)
871 {
872 struct vip_dev *dev = stream->port->dev;
873 struct vip_buffer *buf;
874 unsigned long flags;
875
876 spin_lock_irqsave(&dev->slock, flags);
877 if (list_empty(&stream->vidq)) {
878 v4l2_dbg(4, debug, &dev->v4l2_dev, "Dropping frame\n");
879 if (list_empty(&stream->dropq)) {
880 v4l2_err(&dev->v4l2_dev, "No dropq buffer left!");
881 spin_unlock_irqrestore(&dev->slock, flags);
882 return;
883 }
884 buf = list_entry(stream->dropq.next,
885 struct vip_buffer, list);
886
887 buf->drop = true;
888 list_move_tail(&buf->list, &stream->post_bufs);
889 buf = NULL;
890 } else {
891 buf = list_entry(stream->vidq.next,
892 struct vip_buffer, list);
893 buf->drop = false;
894 list_move_tail(&buf->list, &stream->post_bufs);
895 v4l2_dbg(4, debug, &dev->v4l2_dev, "added next buffer\n");
896 }
897
898 spin_unlock_irqrestore(&dev->slock, flags);
899 start_dma(stream, buf);
900 }
901
vip_process_buffer_complete(struct vip_stream * stream)902 static void vip_process_buffer_complete(struct vip_stream *stream)
903 {
904 struct vip_dev *dev = stream->port->dev;
905 struct vip_buffer *buf;
906 struct vb2_v4l2_buffer *vb = NULL;
907 unsigned long flags, fld;
908
909 buf = list_first_entry_or_null(&stream->post_bufs, struct vip_buffer, list);
910
911 if (stream->port->flags & FLAG_INTERLACED) {
912 vpdma_unmap_desc_buf(dev->shared->vpdma,
913 &stream->desc_list.buf);
914
915 fld = dtd_get_field(stream->write_desc);
916 stream->field = fld ? V4L2_FIELD_BOTTOM : V4L2_FIELD_TOP;
917
918 vpdma_map_desc_buf(dev->shared->vpdma, &stream->desc_list.buf);
919 }
920
921 if (buf) {
922 vb = &buf->vb;
923 vb->sequence = stream->sequence;
924 vb->vb2_buf.timestamp = ktime_get_ns();
925 vb->field = V4L2_FIELD_NONE;
926
927 if (buf->drop) {
928 spin_lock_irqsave(&dev->slock, flags);
929 list_move_tail(&buf->list, &stream->dropq);
930 spin_unlock_irqrestore(&dev->slock, flags);
931 } else {
932 spin_lock_irqsave(&dev->slock, flags);
933 list_del(&buf->list);
934 spin_unlock_irqrestore(&dev->slock, flags);
935 vb2_buffer_done(&vb->vb2_buf, VB2_BUF_STATE_DONE);
936 }
937 } else {
938 v4l2_err(&dev->v4l2_dev, "%s: buf is null!!!\n", __func__);
939 return;
940 }
941
942 stream->sequence++;
943 }
944
vip_reset_vpdma(struct vip_stream * stream)945 static int vip_reset_vpdma(struct vip_stream *stream)
946 {
947 struct vip_port *port = stream->port;
948 struct vip_dev *dev = port->dev;
949 struct vip_buffer *buf;
950 unsigned long flags;
951
952 stop_dma(stream, false);
953
954 spin_lock_irqsave(&dev->slock, flags);
955 /* requeue all active buffers in the opposite order */
956 while (!list_empty(&stream->post_bufs)) {
957 buf = list_last_entry(&stream->post_bufs,
958 struct vip_buffer, list);
959 list_del(&buf->list);
960 if (buf->drop == 1) {
961 list_add_tail(&buf->list, &stream->dropq);
962 v4l2_dbg(4, debug, &dev->v4l2_dev, "requeueing drop buffer on dropq\n");
963 } else {
964 list_add(&buf->list, &stream->vidq);
965 v4l2_dbg(4, debug, &dev->v4l2_dev, "requeueing vb2 buf idx:%d on vidq\n",
966 buf->vb.vb2_buf.index);
967 }
968 }
969 spin_unlock_irqrestore(&dev->slock, flags);
970
971 /* Make sure the desc_list is unmapped */
972 vpdma_unmap_desc_buf(dev->shared->vpdma, &stream->desc_list.buf);
973
974 return 0;
975 }
976
vip_overflow_recovery_work(struct work_struct * work)977 static void vip_overflow_recovery_work(struct work_struct *work)
978 {
979 struct vip_stream *stream = container_of(work, struct vip_stream,
980 recovery_work);
981 struct vip_port *port = stream->port;
982 struct vip_dev *dev = port->dev;
983
984 v4l2_err(&dev->v4l2_dev, "%s: Port %c\n", __func__,
985 port->port_id == VIP_PORTA ? 'A' : 'B');
986
987 disable_irqs(dev, dev->slice_id, stream->list_num);
988 clear_irqs(dev, dev->slice_id, stream->list_num);
989
990 /* 1. Set VIP_XTRA6_PORT_A[31:16] YUV_SRCNUM_STOP_IMMEDIATELY */
991 /* 2. Set VIP_XTRA6_PORT_A[15:0] ANC_SRCNUM_STOP_IMMEDIATELY */
992 vip_parser_stop_imm(port, 1);
993
994 /* 3. Clear VIP_PORT_A[8] ENABLE */
995 /*
996 * 4. Set VIP_PORT_A[7] CLR_ASYNC_FIFO_RD
997 * Set VIP_PORT_A[6] CLR_ASYNC_FIFO_WR
998 */
999 vip_enable_parser(port, false);
1000
1001 /* 5. Set VIP_PORT_A[23] SW_RESET */
1002 vip_reset_parser(port, 1);
1003
1004 /*
1005 * 6. Reset other VIP modules
1006 * For each module used downstream of VIP_PARSER, write 1 to the
1007 * bit location of the VIP_CLKC_RST register which is connected
1008 * to VIP_PARSER
1009 */
1010 vip_module_toggle(dev, VIP_DP_RST, true);
1011
1012 usleep_range(200, 250);
1013
1014 /*
1015 * 7. Abort VPDMA channels
1016 * Write to list attribute to stop list 0
1017 * Write to list address register location of abort list
1018 * Write to list attribute register list 0 and size of abort list
1019 */
1020 vip_reset_vpdma(stream);
1021
1022 /* 8. Clear VIP_PORT_A[23] SW_RESET */
1023 vip_reset_parser(port, 0);
1024
1025 /*
1026 * 9. Un-reset other VIP modules
1027 * For each module used downstream of VIP_PARSER, write 0 to
1028 * the bit location of the VIP_CLKC_RST register which is
1029 * connected to VIP_PARSER
1030 */
1031 vip_module_toggle(dev, VIP_DP_RST, false);
1032
1033 /* 10. (Delay) */
1034 /* 11. SC coeff downloaded (if VIP_SCALER is being used) */
1035 vip_setup_scaler(stream);
1036
1037 /* 12. (Delay) */
1038 /* the above are not needed here yet */
1039
1040 populate_desc_list(stream);
1041 stream->num_recovery++;
1042 if (stream->num_recovery < 5) {
1043 /* Reload the vpdma */
1044 vip_load_vpdma_list_fifo(stream);
1045
1046 enable_irqs(dev, dev->slice_id, stream->list_num);
1047 vip_schedule_next_buffer(stream);
1048
1049 /* 13. Clear VIP_XTRA6_PORT_A[31:16] YUV_SRCNUM_STOP_IMM */
1050 /* 14. Clear VIP_XTRA6_PORT_A[15:0] ANC_SRCNUM_STOP_IMM */
1051
1052 vip_parser_stop_imm(port, 0);
1053
1054 /* 15. Set VIP_PORT_A[8] ENABLE */
1055 /*
1056 * 16. Clear VIP_PORT_A[7] CLR_ASYNC_FIFO_RD
1057 * Clear VIP_PORT_A[6] CLR_ASYNC_FIFO_WR
1058 */
1059 vip_enable_parser(port, true);
1060 } else {
1061 v4l2_err(&dev->v4l2_dev, "%s: num_recovery limit exceeded leaving disabled\n",
1062 __func__);
1063 }
1064 }
1065
handle_parser_irqs(struct vip_dev * dev)1066 static void handle_parser_irqs(struct vip_dev *dev)
1067 {
1068 struct vip_parser_data *parser = dev->parser;
1069 struct vip_port *porta = dev->ports[VIP_PORTA];
1070 struct vip_port *portb = dev->ports[VIP_PORTB];
1071 struct vip_stream *stream = NULL;
1072 u32 vip_irq_stat = reg_read(parser, VIP_PARSER_FIQ_STATUS);
1073 int i;
1074
1075 v4l2_dbg(3, debug, &dev->v4l2_dev, "%s: FIQ_STATUS: 0x%08x\n", __func__, vip_irq_stat);
1076
1077 /* Clear all Parser Interrupt */
1078 reg_write(parser, VIP_PARSER_FIQ_CLR, vip_irq_stat);
1079 reg_write(parser, VIP_PARSER_FIQ_CLR, 0x0);
1080
1081 #ifdef DEBUG
1082 if (vip_irq_stat & VIP_PORTA_VDET)
1083 v4l2_dbg(3, debug, &dev->v4l2_dev, "VIP_PORTA_VDET\n");
1084 if (vip_irq_stat & VIP_PORTB_VDET)
1085 v4l2_dbg(3, debug, &dev->v4l2_dev, "VIP_PORTB_VDET\n");
1086 if (vip_irq_stat & VIP_PORTA_ASYNC_FIFO_OF)
1087 v4l2_err(&dev->v4l2_dev, "VIP_PORTA_ASYNC_FIFO_OF\n");
1088 if (vip_irq_stat & VIP_PORTB_ASYNC_FIFO_OF)
1089 v4l2_err(&dev->v4l2_dev, "VIP_PORTB_ASYNC_FIFO_OF\n");
1090 if (vip_irq_stat & VIP_PORTA_OUTPUT_FIFO_YUV)
1091 v4l2_err(&dev->v4l2_dev, "VIP_PORTA_OUTPUT_FIFO_YUV\n");
1092 if (vip_irq_stat & VIP_PORTA_OUTPUT_FIFO_ANC)
1093 v4l2_err(&dev->v4l2_dev, "VIP_PORTA_OUTPUT_FIFO_ANC\n");
1094 if (vip_irq_stat & VIP_PORTB_OUTPUT_FIFO_YUV)
1095 v4l2_err(&dev->v4l2_dev, "VIP_PORTB_OUTPUT_FIFO_YUV\n");
1096 if (vip_irq_stat & VIP_PORTB_OUTPUT_FIFO_ANC)
1097 v4l2_err(&dev->v4l2_dev, "VIP_PORTB_OUTPUT_FIFO_ANC\n");
1098 if (vip_irq_stat & VIP_PORTA_CONN)
1099 v4l2_dbg(3, debug, &dev->v4l2_dev, "VIP_PORTA_CONN\n");
1100 if (vip_irq_stat & VIP_PORTA_DISCONN)
1101 v4l2_dbg(3, debug, &dev->v4l2_dev, "VIP_PORTA_DISCONN\n");
1102 if (vip_irq_stat & VIP_PORTB_CONN)
1103 v4l2_dbg(3, debug, &dev->v4l2_dev, "VIP_PORTB_CONN\n");
1104 if (vip_irq_stat & VIP_PORTB_DISCONN)
1105 v4l2_dbg(3, debug, &dev->v4l2_dev, "VIP_PORTB_DISCONN\n");
1106 if (vip_irq_stat & VIP_PORTA_SRC0_SIZE)
1107 v4l2_dbg(3, debug, &dev->v4l2_dev, "VIP_PORTA_SRC0_SIZE\n");
1108 if (vip_irq_stat & VIP_PORTB_SRC0_SIZE)
1109 v4l2_dbg(3, debug, &dev->v4l2_dev, "VIP_PORTB_SRC0_SIZE\n");
1110 if (vip_irq_stat & VIP_PORTA_YUV_PROTO_VIOLATION)
1111 v4l2_dbg(3, debug, &dev->v4l2_dev, "VIP_PORTA_YUV_PROTO_VIOLATION\n");
1112 if (vip_irq_stat & VIP_PORTA_ANC_PROTO_VIOLATION)
1113 v4l2_dbg(3, debug, &dev->v4l2_dev, "VIP_PORTA_ANC_PROTO_VIOLATION\n");
1114 if (vip_irq_stat & VIP_PORTB_YUV_PROTO_VIOLATION)
1115 v4l2_dbg(3, debug, &dev->v4l2_dev, "VIP_PORTB_YUV_PROTO_VIOLATION\n");
1116 if (vip_irq_stat & VIP_PORTB_ANC_PROTO_VIOLATION)
1117 v4l2_dbg(3, debug, &dev->v4l2_dev, "VIP_PORTB_ANC_PROTO_VIOLATION\n");
1118 if (vip_irq_stat & VIP_PORTA_CFG_DISABLE_COMPLETE)
1119 v4l2_dbg(3, debug, &dev->v4l2_dev, "VIP_PORTA_CFG_DISABLE_COMPLETE\n");
1120 if (vip_irq_stat & VIP_PORTB_CFG_DISABLE_COMPLETE)
1121 v4l2_dbg(3, debug, &dev->v4l2_dev, "VIP_PORTB_CFG_DISABLE_COMPLETE\n");
1122 #endif
1123
1124 if (vip_irq_stat & (VIP_PORTA_ASYNC_FIFO_OF |
1125 VIP_PORTA_OUTPUT_FIFO_YUV |
1126 VIP_PORTA_OUTPUT_FIFO_ANC)) {
1127 for (i = 0; i < VIP_CAP_STREAMS_PER_PORT; i++) {
1128 if (porta->cap_streams[i] &&
1129 porta->cap_streams[i]->port->port_id ==
1130 porta->port_id) {
1131 stream = porta->cap_streams[i];
1132 break;
1133 }
1134 }
1135 if (stream) {
1136 disable_irqs(dev, dev->slice_id,
1137 stream->list_num);
1138 schedule_work(&stream->recovery_work);
1139 return;
1140 }
1141 }
1142 if (vip_irq_stat & (VIP_PORTB_ASYNC_FIFO_OF |
1143 VIP_PORTB_OUTPUT_FIFO_YUV |
1144 VIP_PORTB_OUTPUT_FIFO_ANC)) {
1145 for (i = 0; i < VIP_CAP_STREAMS_PER_PORT; i++) {
1146 if (portb->cap_streams[i] &&
1147 portb->cap_streams[i]->port->port_id ==
1148 portb->port_id) {
1149 stream = portb->cap_streams[i];
1150 break;
1151 }
1152 }
1153 if (stream) {
1154 disable_irqs(dev, dev->slice_id,
1155 stream->list_num);
1156 schedule_work(&stream->recovery_work);
1157 return;
1158 }
1159 }
1160 }
1161
vip_irq(int irq_vip,void * data)1162 static irqreturn_t vip_irq(int irq_vip, void *data)
1163 {
1164 struct vip_dev *dev = (struct vip_dev *)data;
1165 struct vpdma_data *vpdma;
1166 struct vip_stream *stream;
1167 int list_num;
1168 int irq_num = dev->slice_id;
1169 u32 irqst, irqst_saved, reg_addr;
1170
1171 if (!dev->shared)
1172 return IRQ_HANDLED;
1173
1174 vpdma = dev->shared->vpdma;
1175 reg_addr = VIP_INT0_STATUS0 +
1176 VIP_INTC_INTX_OFFSET * irq_num;
1177 irqst_saved = reg_read(dev->shared, reg_addr);
1178 irqst = irqst_saved;
1179
1180 v4l2_dbg(8, debug, &dev->v4l2_dev, "IRQ %d VIP_INT%d_STATUS0 0x%x\n",
1181 irq_vip, irq_num, irqst);
1182 if (irqst) {
1183 if (irqst & (VIP_VIP1_PARSER_INT << (irq_num * 1))) {
1184 irqst &= ~(VIP_VIP1_PARSER_INT << (irq_num * 1));
1185 handle_parser_irqs(dev);
1186 }
1187
1188 for (list_num = 0; irqst && (list_num < 8); list_num++) {
1189 /* Check for LIST_COMPLETE IRQ */
1190 if (!(irqst & (1 << list_num * 2)))
1191 continue;
1192
1193 v4l2_dbg(8, debug, &dev->v4l2_dev, "IRQ %d: handling LIST%d_COMPLETE\n",
1194 irq_num, list_num);
1195
1196 stream = vpdma_hwlist_get_priv(vpdma, list_num);
1197 if (!stream || stream->list_num != list_num) {
1198 v4l2_err(&dev->v4l2_dev, "IRQ occurred for unused list");
1199 continue;
1200 }
1201
1202 vpdma_clear_list_stat(vpdma, irq_num, list_num);
1203
1204 vip_process_buffer_complete(stream);
1205
1206 vip_schedule_next_buffer(stream);
1207
1208 irqst &= ~((1 << list_num * 2));
1209 }
1210 }
1211
1212 /* Acknowledge that we are done with all interrupts */
1213 reg_write(dev->shared, VIP_INTC_E0I, 1 << irq_num);
1214
1215 /* Clear handled events from status register */
1216 reg_addr = VIP_INT0_STATUS0_CLR +
1217 VIP_INTC_INTX_OFFSET * irq_num;
1218 reg_write(dev->shared, reg_addr, irqst_saved);
1219
1220 return IRQ_HANDLED;
1221 }
1222
1223 /*
1224 * video ioctls
1225 */
vip_querycap(struct file * file,void * priv,struct v4l2_capability * cap)1226 static int vip_querycap(struct file *file, void *priv,
1227 struct v4l2_capability *cap)
1228 {
1229 strscpy(cap->driver, VIP_MODULE_NAME, sizeof(cap->driver));
1230 strscpy(cap->card, VIP_MODULE_NAME, sizeof(cap->card));
1231 return 0;
1232 }
1233
vip_enuminput(struct file * file,void * priv,struct v4l2_input * inp)1234 static int vip_enuminput(struct file *file, void *priv,
1235 struct v4l2_input *inp)
1236 {
1237 struct vip_stream *stream = file2stream(file);
1238
1239 if (inp->index)
1240 return -EINVAL;
1241
1242 inp->type = V4L2_INPUT_TYPE_CAMERA;
1243 inp->std = stream->vfd->tvnorms;
1244 sprintf(inp->name, "Camera %u", stream->vfd->num);
1245
1246 return 0;
1247 }
1248
vip_g_input(struct file * file,void * priv,unsigned int * i)1249 static int vip_g_input(struct file *file, void *priv, unsigned int *i)
1250 {
1251 *i = 0;
1252 return 0;
1253 }
1254
vip_s_input(struct file * file,void * priv,unsigned int i)1255 static int vip_s_input(struct file *file, void *priv, unsigned int i)
1256 {
1257 if (i != 0)
1258 return -EINVAL;
1259 return 0;
1260 }
1261
vip_querystd(struct file * file,void * fh,v4l2_std_id * std)1262 static int vip_querystd(struct file *file, void *fh, v4l2_std_id *std)
1263 {
1264 struct vip_stream *stream = file2stream(file);
1265 struct vip_port *port = stream->port;
1266
1267 *std = stream->vfd->tvnorms;
1268 v4l2_subdev_call(port->subdev, video, querystd, std);
1269 return 0;
1270 }
1271
vip_g_std(struct file * file,void * fh,v4l2_std_id * std)1272 static int vip_g_std(struct file *file, void *fh, v4l2_std_id *std)
1273 {
1274 struct vip_stream *stream = file2stream(file);
1275 struct vip_port *port = stream->port;
1276
1277 *std = stream->vfd->tvnorms;
1278 v4l2_subdev_call(port->subdev, video, g_std, std);
1279
1280 return 0;
1281 }
1282
vip_s_std(struct file * file,void * fh,v4l2_std_id std)1283 static int vip_s_std(struct file *file, void *fh, v4l2_std_id std)
1284 {
1285 struct vip_stream *stream = file2stream(file);
1286 struct vip_port *port = stream->port;
1287
1288 if (stream->vfd->tvnorms == std)
1289 return 0;
1290
1291 if (!(std & stream->vfd->tvnorms))
1292 return -EINVAL;
1293
1294 if (vb2_is_busy(&stream->vb_vidq))
1295 return -EBUSY;
1296
1297 v4l2_subdev_call(port->subdev, video, s_std, std);
1298 return 0;
1299 }
1300
vip_enum_fmt_vid_cap(struct file * file,void * priv,struct v4l2_fmtdesc * f)1301 static int vip_enum_fmt_vid_cap(struct file *file, void *priv,
1302 struct v4l2_fmtdesc *f)
1303 {
1304 struct vip_stream *stream = file2stream(file);
1305 struct vip_port *port = stream->port;
1306 struct vip_fmt *fmt;
1307
1308 if (f->index >= port->num_active_fmt)
1309 return -EINVAL;
1310
1311 fmt = port->active_fmt[f->index];
1312 f->pixelformat = fmt->fourcc;
1313
1314 return 0;
1315 }
1316
vip_enum_framesizes(struct file * file,void * priv,struct v4l2_frmsizeenum * f)1317 static int vip_enum_framesizes(struct file *file, void *priv,
1318 struct v4l2_frmsizeenum *f)
1319 {
1320 struct vip_stream *stream = file2stream(file);
1321 struct vip_port *port = stream->port;
1322 struct vip_dev *dev = port->dev;
1323 struct vip_fmt *fmt;
1324 int ret;
1325 struct v4l2_subdev_frame_size_enum fse = {
1326 .which = V4L2_SUBDEV_FORMAT_ACTIVE,
1327 .pad = 0,
1328 };
1329
1330 fmt = find_port_format_by_pix(port, f->pixel_format);
1331 if (!fmt)
1332 return -EINVAL;
1333
1334 fse.index = f->index;
1335 fse.code = fmt->code;
1336 ret = v4l2_subdev_call(port->subdev, pad, enum_frame_size, NULL, &fse);
1337 if (ret)
1338 return -EINVAL;
1339
1340 v4l2_dbg(1, debug, &dev->v4l2_dev, "%s: index: %d code: %x W:[%d,%d] H:[%d,%d]\n",
1341 __func__, fse.index, fse.code, fse.min_width, fse.max_width,
1342 fse.min_height, fse.max_height);
1343
1344 f->type = V4L2_FRMSIZE_TYPE_DISCRETE;
1345 f->discrete.width = fse.max_width;
1346 f->discrete.height = fse.max_height;
1347
1348 return 0;
1349 }
1350
vip_enum_frameintervals(struct file * file,void * priv,struct v4l2_frmivalenum * f)1351 static int vip_enum_frameintervals(struct file *file, void *priv,
1352 struct v4l2_frmivalenum *f)
1353 {
1354 struct vip_stream *stream = file2stream(file);
1355 struct vip_port *port = stream->port;
1356 struct vip_fmt *fmt;
1357 struct v4l2_subdev_frame_interval_enum fie = {
1358 .index = f->index,
1359 .width = f->width,
1360 .height = f->height,
1361 .which = V4L2_SUBDEV_FORMAT_ACTIVE,
1362 };
1363 int ret;
1364
1365 fmt = find_port_format_by_pix(port, f->pixel_format);
1366 if (!fmt)
1367 return -EINVAL;
1368
1369 fie.code = fmt->code;
1370 ret = v4l2_subdev_call(port->subdev, pad, enum_frame_interval,
1371 NULL, &fie);
1372 if (ret)
1373 return ret;
1374 f->type = V4L2_FRMIVAL_TYPE_DISCRETE;
1375 f->discrete = fie.interval;
1376
1377 return 0;
1378 }
1379
vip_g_parm(struct file * file,void * fh,struct v4l2_streamparm * a)1380 static int vip_g_parm(struct file *file, void *fh, struct v4l2_streamparm *a)
1381 {
1382 struct vip_stream *stream = video_drvdata(file);
1383 struct vip_port *port = stream->port;
1384
1385 return v4l2_g_parm_cap(video_devdata(file), port->subdev, a);
1386 }
1387
vip_s_parm(struct file * file,void * fh,struct v4l2_streamparm * a)1388 static int vip_s_parm(struct file *file, void *fh, struct v4l2_streamparm *a)
1389 {
1390 struct vip_stream *stream = video_drvdata(file);
1391 struct vip_port *port = stream->port;
1392
1393 return v4l2_s_parm_cap(video_devdata(file), port->subdev, a);
1394 }
1395
vip_calc_format_size(struct vip_port * port,struct vip_fmt * fmt,struct v4l2_format * f)1396 static int vip_calc_format_size(struct vip_port *port,
1397 struct vip_fmt *fmt,
1398 struct v4l2_format *f)
1399 {
1400 enum v4l2_field *field;
1401 unsigned int stride;
1402 struct vip_dev *dev = port->dev;
1403
1404 if (!fmt) {
1405 v4l2_dbg(2, debug, &dev->v4l2_dev,
1406 "no vip_fmt format provided!\n");
1407 return -EINVAL;
1408 }
1409
1410 field = &f->fmt.pix.field;
1411 if (*field == V4L2_FIELD_ANY)
1412 *field = V4L2_FIELD_NONE;
1413 else if (V4L2_FIELD_NONE != *field && V4L2_FIELD_ALTERNATE != *field)
1414 return -EINVAL;
1415
1416 v4l_bound_align_image(&f->fmt.pix.width, MIN_W, MAX_W, W_ALIGN,
1417 &f->fmt.pix.height, MIN_H, MAX_H, H_ALIGN,
1418 S_ALIGN);
1419
1420 stride = f->fmt.pix.width * (fmt->vpdma_fmt[0]->depth >> 3);
1421 f->fmt.pix.bytesperline = ALIGN(stride, VPDMA_STRIDE_ALIGN);
1422
1423 f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline;
1424 if (fmt->coplanar) {
1425 f->fmt.pix.sizeimage += f->fmt.pix.height *
1426 f->fmt.pix.bytesperline *
1427 fmt->vpdma_fmt[VIP_CHROMA]->depth >> 3;
1428 }
1429
1430 f->fmt.pix.colorspace = V4L2_COLORSPACE_SRGB;
1431
1432 v4l2_dbg(3, debug, &dev->v4l2_dev, "calc_format_size: fourcc:%s size: %dx%d bpl:%d img_size:%d\n",
1433 fourcc_to_str(f->fmt.pix.pixelformat),
1434 f->fmt.pix.width, f->fmt.pix.height,
1435 f->fmt.pix.bytesperline, f->fmt.pix.sizeimage);
1436
1437 return 0;
1438 }
1439
vip_is_size_dma_aligned(u32 bpp,u32 width)1440 static inline bool vip_is_size_dma_aligned(u32 bpp, u32 width)
1441 {
1442 return ((width * bpp) == ALIGN(width * bpp, VPDMA_STRIDE_ALIGN));
1443 }
1444
vip_try_fmt_vid_cap(struct file * file,void * priv,struct v4l2_format * f)1445 static int vip_try_fmt_vid_cap(struct file *file, void *priv,
1446 struct v4l2_format *f)
1447 {
1448 struct vip_stream *stream = file2stream(file);
1449 struct vip_port *port = stream->port;
1450 struct vip_dev *dev = port->dev;
1451 struct vip_fmt *fmt;
1452 u32 best_width, best_height, largest_width, largest_height;
1453 int ret, found;
1454 enum vip_csc_state csc_direction;
1455 struct v4l2_subdev_frame_size_enum fse = {
1456 .which = V4L2_SUBDEV_FORMAT_ACTIVE,
1457 .pad = 0,
1458 };
1459
1460 fmt = find_port_format_by_pix(port, f->fmt.pix.pixelformat);
1461 if (!fmt) {
1462 /* Just get the first one enumerated */
1463 fmt = port->active_fmt[0];
1464 f->fmt.pix.pixelformat = fmt->fourcc;
1465 }
1466
1467 csc_direction = vip_csc_direction(fmt->code, fmt->finfo);
1468 if (csc_direction != VIP_CSC_NA) {
1469 if (!is_csc_available(port)) {
1470 v4l2_dbg(2, debug, &dev->v4l2_dev,
1471 "CSC not available for Fourcc format (0x%08x).\n",
1472 f->fmt.pix.pixelformat);
1473
1474 /* Just get the first one enumerated */
1475 fmt = port->active_fmt[0];
1476 f->fmt.pix.pixelformat = fmt->fourcc;
1477 /* re-evaluate the csc_direction here */
1478 csc_direction = vip_csc_direction(fmt->code,
1479 fmt->finfo);
1480 } else {
1481 v4l2_dbg(3, debug, &dev->v4l2_dev, "CSC active on Port %c: going %s\n",
1482 port->port_id == VIP_PORTA ? 'A' : 'B',
1483 (csc_direction == VIP_CSC_Y2R) ? "Y2R" : "R2Y");
1484 }
1485 }
1486
1487 /*
1488 * Given that sensors might support multiple mbus code we need
1489 * to use the one that matches the requested pixel format
1490 */
1491 port->try_mbus_framefmt = port->mbus_framefmt;
1492 port->try_mbus_framefmt.code = fmt->code;
1493
1494 /* check for/find a valid width/height */
1495 ret = 0;
1496 found = false;
1497 best_width = 0;
1498 best_height = 0;
1499 largest_width = 0;
1500 largest_height = 0;
1501
1502 fse.code = fmt->code;
1503 for (fse.index = 0; ; fse.index++) {
1504 u32 bpp = fmt->vpdma_fmt[0]->depth >> 3;
1505
1506 ret = v4l2_subdev_call(port->subdev, pad,
1507 enum_frame_size, NULL, &fse);
1508 if (ret)
1509 break;
1510
1511 if (!vip_is_size_dma_aligned(bpp, fse.max_width))
1512 continue;
1513
1514 if (fse.max_width >= largest_width &&
1515 fse.max_height >= largest_height) {
1516 v4l2_dbg(3, debug, &dev->v4l2_dev, "try_fmt loop:%d found new larger: %dx%d\n",
1517 fse.index, fse.max_width, fse.max_height);
1518 largest_width = fse.max_width;
1519 largest_height = fse.max_height;
1520 }
1521
1522 if (fse.max_width >= f->fmt.pix.width &&
1523 fse.max_height >= f->fmt.pix.height) {
1524 v4l2_dbg(3, debug, &dev->v4l2_dev, "try_fmt loop:%d found at least larger: %dx%d\n",
1525 fse.index, fse.max_width, fse.max_height);
1526
1527 if (!best_width ||
1528 ((abs(best_width - f->fmt.pix.width) >=
1529 abs(fse.max_width - f->fmt.pix.width)) &&
1530 (abs(best_height - f->fmt.pix.height) >=
1531 abs(fse.max_height - f->fmt.pix.height)))) {
1532 best_width = fse.max_width;
1533 best_height = fse.max_height;
1534 v4l2_dbg(3, debug, &dev->v4l2_dev, "try_fmt loop:%d found new best: %dx%d\n",
1535 fse.index, fse.max_width,
1536 fse.max_height);
1537 }
1538 }
1539
1540 if (f->fmt.pix.width == fse.max_width &&
1541 f->fmt.pix.height == fse.max_height) {
1542 found = true;
1543 v4l2_dbg(3, debug, &dev->v4l2_dev, "try_fmt loop:%d found direct match: %dx%d\n",
1544 fse.index, fse.max_width,
1545 fse.max_height);
1546 break;
1547 }
1548
1549 if (f->fmt.pix.width >= fse.min_width &&
1550 f->fmt.pix.width <= fse.max_width &&
1551 f->fmt.pix.height >= fse.min_height &&
1552 f->fmt.pix.height <= fse.max_height) {
1553 found = true;
1554 v4l2_dbg(3, debug, &dev->v4l2_dev, "try_fmt loop:%d found direct range match: %dx%d\n",
1555 fse.index, fse.max_width,
1556 fse.max_height);
1557 break;
1558 }
1559 }
1560
1561 if (found) {
1562 port->try_mbus_framefmt.width = f->fmt.pix.width;
1563 port->try_mbus_framefmt.height = f->fmt.pix.height;
1564 /* No need to check for scaling */
1565 goto calc_size;
1566 } else if (f->fmt.pix.width > largest_width) {
1567 port->try_mbus_framefmt.width = largest_width;
1568 port->try_mbus_framefmt.height = largest_height;
1569 } else if (best_width) {
1570 port->try_mbus_framefmt.width = best_width;
1571 port->try_mbus_framefmt.height = best_height;
1572 } else {
1573 /* use existing values as default */
1574 }
1575
1576 v4l2_dbg(3, debug, &dev->v4l2_dev, "try_fmt best subdev size: %dx%d\n",
1577 port->try_mbus_framefmt.width,
1578 port->try_mbus_framefmt.height);
1579
1580 if (is_scaler_available(port) &&
1581 csc_direction != VIP_CSC_Y2R &&
1582 !vip_is_mbuscode_raw(fmt->code) &&
1583 f->fmt.pix.height <= port->try_mbus_framefmt.height &&
1584 port->try_mbus_framefmt.height <= SC_MAX_PIXEL_HEIGHT &&
1585 port->try_mbus_framefmt.width <= SC_MAX_PIXEL_WIDTH) {
1586 /*
1587 * Scaler is only accessible if the dst colorspace is YUV.
1588 * As the input to the scaler must be in YUV mode only.
1589 *
1590 * Scaling up is allowed only horizontally.
1591 */
1592 unsigned int hratio, vratio, width_align, height_align;
1593 u32 bpp = fmt->vpdma_fmt[0]->depth >> 3;
1594
1595 v4l2_dbg(3, debug, &dev->v4l2_dev, "Scaler active on Port %c: requesting %dx%d\n",
1596 port->port_id == VIP_PORTA ? 'A' : 'B',
1597 f->fmt.pix.width, f->fmt.pix.height);
1598
1599 /* Just make sure everything is properly aligned */
1600 width_align = ALIGN(f->fmt.pix.width * bpp, VPDMA_STRIDE_ALIGN);
1601 width_align /= bpp;
1602 height_align = ALIGN(f->fmt.pix.height, 2);
1603
1604 f->fmt.pix.width = width_align;
1605 f->fmt.pix.height = height_align;
1606
1607 hratio = f->fmt.pix.width * 1000 /
1608 port->try_mbus_framefmt.width;
1609 vratio = f->fmt.pix.height * 1000 /
1610 port->try_mbus_framefmt.height;
1611 if (hratio < 125) {
1612 f->fmt.pix.width = port->try_mbus_framefmt.width / 8;
1613 v4l2_dbg(3, debug, &dev->v4l2_dev, "Horizontal scaling ratio out of range adjusting -> %d\n",
1614 f->fmt.pix.width);
1615 }
1616
1617 if (vratio < 188) {
1618 f->fmt.pix.height = port->try_mbus_framefmt.height / 4;
1619 v4l2_dbg(3, debug, &dev->v4l2_dev, "Vertical scaling ratio out of range adjusting -> %d\n",
1620 f->fmt.pix.height);
1621 }
1622 v4l2_dbg(3, debug, &dev->v4l2_dev, "Scaler: got %dx%d\n",
1623 f->fmt.pix.width, f->fmt.pix.height);
1624 } else {
1625 /* use existing values as default */
1626 f->fmt.pix.width = port->try_mbus_framefmt.width;
1627 f->fmt.pix.height = port->try_mbus_framefmt.height;
1628 }
1629
1630 calc_size:
1631 /* That we have a fmt calculate imagesize and bytesperline */
1632 return vip_calc_format_size(port, fmt, f);
1633 }
1634
vip_g_fmt_vid_cap(struct file * file,void * priv,struct v4l2_format * f)1635 static int vip_g_fmt_vid_cap(struct file *file, void *priv,
1636 struct v4l2_format *f)
1637 {
1638 struct vip_stream *stream = file2stream(file);
1639 struct vip_port *port = stream->port;
1640
1641 /* Use last known values or defaults */
1642 f->fmt.pix.width = stream->width;
1643 f->fmt.pix.height = stream->height;
1644 f->fmt.pix.pixelformat = port->fmt->fourcc;
1645 f->fmt.pix.field = stream->sup_field;
1646 f->fmt.pix.colorspace = V4L2_COLORSPACE_SRGB;
1647 f->fmt.pix.bytesperline = stream->bytesperline;
1648 f->fmt.pix.sizeimage = stream->sizeimage;
1649
1650 return 0;
1651 }
1652
vip_s_fmt_vid_cap(struct file * file,void * priv,struct v4l2_format * f)1653 static int vip_s_fmt_vid_cap(struct file *file, void *priv,
1654 struct v4l2_format *f)
1655 {
1656 struct vip_stream *stream = file2stream(file);
1657 struct vip_port *port = stream->port;
1658 struct vip_dev *dev = port->dev;
1659 struct v4l2_subdev_format sfmt;
1660 struct v4l2_mbus_framefmt *mf;
1661 enum vip_csc_state csc_direction;
1662 int ret;
1663
1664 ret = vip_try_fmt_vid_cap(file, priv, f);
1665 if (ret)
1666 return ret;
1667
1668 if (vb2_is_busy(&stream->vb_vidq)) {
1669 v4l2_err(&dev->v4l2_dev, "%s queue busy\n", __func__);
1670 return -EBUSY;
1671 }
1672
1673 /*
1674 * Check if we need the scaler or not
1675 *
1676 * Since on previous S_FMT call the scaler might have been
1677 * allocated if it is not needed in this instance we will
1678 * attempt to free it just in case.
1679 *
1680 * free_scaler() is harmless unless the current port
1681 * allocated it.
1682 */
1683 if (f->fmt.pix.width == port->try_mbus_framefmt.width &&
1684 f->fmt.pix.height == port->try_mbus_framefmt.height)
1685 free_scaler(port);
1686 else
1687 allocate_scaler(port);
1688
1689 port->fmt = find_port_format_by_pix(port,
1690 f->fmt.pix.pixelformat);
1691 stream->width = f->fmt.pix.width;
1692 stream->height = f->fmt.pix.height;
1693 stream->bytesperline = f->fmt.pix.bytesperline;
1694 stream->sizeimage = f->fmt.pix.sizeimage;
1695 stream->sup_field = f->fmt.pix.field;
1696 stream->field = f->fmt.pix.field;
1697
1698 port->c_rect.left = 0;
1699 port->c_rect.top = 0;
1700 port->c_rect.width = stream->width;
1701 port->c_rect.height = stream->height;
1702
1703 /*
1704 * Check if we need the csc unit or not
1705 *
1706 * Since on previous S_FMT call, the csc might have been
1707 * allocated if it is not needed in this instance we will
1708 * attempt to free it just in case.
1709 *
1710 * free_csc() is harmless unless the current port
1711 * allocated it.
1712 */
1713 csc_direction = vip_csc_direction(port->fmt->code, port->fmt->finfo);
1714 if (csc_direction == VIP_CSC_NA)
1715 free_csc(port);
1716 else
1717 allocate_csc(port, csc_direction);
1718
1719 if (stream->sup_field == V4L2_FIELD_ALTERNATE)
1720 port->flags |= FLAG_INTERLACED;
1721 else
1722 port->flags &= ~FLAG_INTERLACED;
1723
1724 memset(&sfmt, 0, sizeof(sfmt));
1725 mf = &sfmt.format;
1726 v4l2_fill_mbus_format(mf, &f->fmt.pix, port->fmt->code);
1727 /* Make sure to use the subdev size found in the try_fmt */
1728 mf->width = port->try_mbus_framefmt.width;
1729 mf->height = port->try_mbus_framefmt.height;
1730
1731 sfmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
1732 sfmt.pad = 0;
1733 ret = v4l2_subdev_call(port->subdev, pad, set_fmt, NULL, &sfmt);
1734 if (ret) {
1735 v4l2_dbg(1, debug, &dev->v4l2_dev, "set_fmt failed in subdev\n");
1736 return ret;
1737 }
1738
1739 /* Save it */
1740 port->mbus_framefmt = *mf;
1741
1742 v4l2_dbg(3, debug, &dev->v4l2_dev, "s_fmt subdev fmt mbus_code: %04X size: %dx%d\n",
1743 port->mbus_framefmt.code,
1744 port->mbus_framefmt.width, port->mbus_framefmt.height);
1745
1746 return 0;
1747 }
1748
vip_unset_csc_y2r(struct vip_dev * dev,struct vip_port * port)1749 static void vip_unset_csc_y2r(struct vip_dev *dev, struct vip_port *port)
1750 {
1751 if (port->port_id == VIP_PORTA) {
1752 vip_set_slice_path(dev, VIP_CSC_SRC_DATA_SELECT, 0);
1753 vip_set_slice_path(dev, VIP_MULTI_CHANNEL_DATA_SELECT, 0);
1754 vip_set_slice_path(dev, VIP_RGB_OUT_HI_DATA_SELECT, 0);
1755 vip_set_slice_path(dev, VIP_RGB_SRC_DATA_SELECT, 0);
1756 } else {
1757 vip_set_slice_path(dev, VIP_CSC_SRC_DATA_SELECT, 0);
1758 vip_set_slice_path(dev, VIP_MULTI_CHANNEL_DATA_SELECT, 0);
1759 vip_set_slice_path(dev, VIP_RGB_OUT_LO_DATA_SELECT, 0);
1760 }
1761 }
1762
vip_unset_csc_r2y(struct vip_dev * dev,struct vip_port * port)1763 static void vip_unset_csc_r2y(struct vip_dev *dev, struct vip_port *port)
1764 {
1765 if (port->port_id != VIP_PORTA) {
1766 v4l2_err(&dev->v4l2_dev, "RGB sensor can only be on Port A\n");
1767 return;
1768 }
1769
1770 if (port->scaler && port->fmt->coplanar) {
1771 vip_set_slice_path(dev, VIP_CSC_SRC_DATA_SELECT, 0);
1772 vip_set_slice_path(dev, VIP_SC_SRC_DATA_SELECT, 0);
1773 vip_set_slice_path(dev, VIP_CHR_DS_1_SRC_DATA_SELECT, 0);
1774 vip_set_slice_path(dev, VIP_CHR_DS_1_DATA_BYPASS, 0);
1775 vip_set_slice_path(dev, VIP_RGB_OUT_HI_DATA_SELECT, 0);
1776 } else if (port->scaler) {
1777 vip_set_slice_path(dev, VIP_CSC_SRC_DATA_SELECT, 0);
1778 vip_set_slice_path(dev, VIP_SC_SRC_DATA_SELECT, 0);
1779 vip_set_slice_path(dev, VIP_CHR_DS_1_SRC_DATA_SELECT, 0);
1780 vip_set_slice_path(dev, VIP_CHR_DS_1_DATA_BYPASS, 0);
1781 vip_set_slice_path(dev, VIP_RGB_OUT_HI_DATA_SELECT, 0);
1782 } else if (port->fmt->coplanar) {
1783 vip_set_slice_path(dev, VIP_CSC_SRC_DATA_SELECT, 0);
1784 vip_set_slice_path(dev, VIP_CHR_DS_1_SRC_DATA_SELECT, 0);
1785 vip_set_slice_path(dev, VIP_CHR_DS_1_DATA_BYPASS, 0);
1786 vip_set_slice_path(dev, VIP_RGB_OUT_HI_DATA_SELECT, 0);
1787 }
1788 }
1789
vip_unset_rgb(struct vip_dev * dev,struct vip_port * port)1790 static void vip_unset_rgb(struct vip_dev *dev, struct vip_port *port)
1791 {
1792 if (port->port_id != VIP_PORTA) {
1793 v4l2_err(&dev->v4l2_dev, "RGB sensor can only be on Port A\n");
1794 return;
1795 }
1796
1797 vip_set_slice_path(dev, VIP_MULTI_CHANNEL_DATA_SELECT, 0);
1798 vip_set_slice_path(dev, VIP_RGB_OUT_LO_DATA_SELECT, 0);
1799 }
1800
vip_unset_yuv(struct vip_dev * dev,struct vip_port * port)1801 static void vip_unset_yuv(struct vip_dev *dev, struct vip_port *port)
1802 {
1803 if (port->scaler && port->fmt->coplanar) {
1804 if (port->port_id == VIP_PORTA) {
1805 vip_set_slice_path(dev, VIP_SC_SRC_DATA_SELECT, 0);
1806 vip_set_slice_path(dev, VIP_CHR_DS_1_SRC_DATA_SELECT, 0);
1807 vip_set_slice_path(dev, VIP_CHR_DS_1_DATA_BYPASS, 0);
1808 vip_set_slice_path(dev, VIP_RGB_OUT_HI_DATA_SELECT, 0);
1809 } else {
1810 vip_set_slice_path(dev, VIP_SC_SRC_DATA_SELECT, 0);
1811 vip_set_slice_path(dev, VIP_CHR_DS_2_SRC_DATA_SELECT, 0);
1812 vip_set_slice_path(dev, VIP_CHR_DS_1_DATA_BYPASS, 0);
1813 vip_set_slice_path(dev, VIP_RGB_OUT_LO_DATA_SELECT, 0);
1814 vip_set_slice_path(dev, VIP_MULTI_CHANNEL_DATA_SELECT, 0);
1815 }
1816 } else if (port->scaler) {
1817 if (port->port_id == VIP_PORTA) {
1818 vip_set_slice_path(dev, VIP_SC_SRC_DATA_SELECT, 0);
1819 vip_set_slice_path(dev, VIP_CHR_DS_1_SRC_DATA_SELECT, 0);
1820 vip_set_slice_path(dev, VIP_CHR_DS_1_DATA_BYPASS, 0);
1821 vip_set_slice_path(dev, VIP_RGB_OUT_HI_DATA_SELECT, 0);
1822 } else {
1823 vip_set_slice_path(dev, VIP_SC_SRC_DATA_SELECT, 0);
1824 vip_set_slice_path(dev, VIP_CHR_DS_2_SRC_DATA_SELECT, 0);
1825 vip_set_slice_path(dev, VIP_CHR_DS_1_DATA_BYPASS, 0);
1826 vip_set_slice_path(dev, VIP_CHR_DS_2_DATA_BYPASS, 0);
1827 vip_set_slice_path(dev, VIP_RGB_OUT_HI_DATA_SELECT, 0);
1828 }
1829 } else if (port->fmt->coplanar) {
1830 if (port->port_id == VIP_PORTA) {
1831 vip_set_slice_path(dev, VIP_CHR_DS_1_SRC_DATA_SELECT, 0);
1832 vip_set_slice_path(dev, VIP_CHR_DS_1_DATA_BYPASS, 0);
1833 vip_set_slice_path(dev, VIP_RGB_OUT_HI_DATA_SELECT, 0);
1834 } else {
1835 vip_set_slice_path(dev, VIP_CHR_DS_2_SRC_DATA_SELECT, 0);
1836 vip_set_slice_path(dev, VIP_CHR_DS_2_DATA_BYPASS, 0);
1837 vip_set_slice_path(dev, VIP_MULTI_CHANNEL_DATA_SELECT, 0);
1838 vip_set_slice_path(dev, VIP_RGB_OUT_LO_DATA_SELECT, 0);
1839 }
1840 } else {
1841 /*
1842 * We undo all data path setting except for the multi
1843 * stream case.
1844 * Because we cannot disrupt other on-going capture if only
1845 * one stream is terminated the other might still be going
1846 */
1847 vip_set_slice_path(dev, VIP_MULTI_CHANNEL_DATA_SELECT, 1);
1848 vip_set_slice_path(dev, VIP_RGB_OUT_LO_DATA_SELECT, 0);
1849 }
1850 }
1851
1852 /*
1853 * Does the exact opposite of set_fmt_params
1854 * It makes sure the DataPath register is sane after tear down
1855 */
unset_fmt_params(struct vip_stream * stream)1856 static void unset_fmt_params(struct vip_stream *stream)
1857 {
1858 struct vip_port *port = stream->port;
1859 struct vip_dev *dev = port->dev;
1860
1861 stream->sequence = 0;
1862 stream->field = V4L2_FIELD_TOP;
1863
1864 /* Undo CSC Y2R routing */
1865 if (port->csc == VIP_CSC_Y2R) {
1866 vip_unset_csc_y2r(dev, port);
1867 /* Undo CSC R2Y routing */
1868 } else if (port->csc == VIP_CSC_R2Y) {
1869 vip_unset_csc_r2y(dev, port);
1870 /* Undo RGB output routing (no CSC) */
1871 } else if (v4l2_is_format_rgb(port->fmt->finfo)) {
1872 vip_unset_rgb(dev, port);
1873 /* Undo YUV routing with no CSC */
1874 } else {
1875 vip_unset_yuv(dev, port);
1876 }
1877 }
1878
vip_config_csc_y2r(struct vip_dev * dev,struct vip_port * port)1879 static void vip_config_csc_y2r(struct vip_dev *dev, struct vip_port *port)
1880 {
1881 port->flags &= ~FLAG_MULT_PORT;
1882
1883 /* Set alpha component in background color */
1884 vpdma_set_bg_color(dev->shared->vpdma,
1885 (struct vpdma_data_format *)
1886 port->fmt->vpdma_fmt[0],
1887 0xff);
1888
1889 if (port->port_id == VIP_PORTA) {
1890 vip_set_slice_path(dev, VIP_CSC_SRC_DATA_SELECT, 1);
1891 vip_set_slice_path(dev, VIP_MULTI_CHANNEL_DATA_SELECT, 0);
1892 vip_set_slice_path(dev, VIP_RGB_OUT_HI_DATA_SELECT, 1);
1893 vip_set_slice_path(dev, VIP_RGB_SRC_DATA_SELECT, 1);
1894 } else {
1895 vip_set_slice_path(dev, VIP_CSC_SRC_DATA_SELECT, 2);
1896 vip_set_slice_path(dev, VIP_MULTI_CHANNEL_DATA_SELECT, 0);
1897 vip_set_slice_path(dev, VIP_RGB_OUT_LO_DATA_SELECT, 1);
1898 }
1899 }
1900
vip_config_csc_r2y(struct vip_dev * dev,struct vip_port * port)1901 static void vip_config_csc_r2y(struct vip_dev *dev, struct vip_port *port)
1902 {
1903 if (port->port_id != VIP_PORTA) {
1904 v4l2_err(&dev->v4l2_dev, "RGB sensor can only be on Port A\n");
1905 return;
1906 }
1907
1908 port->flags &= ~FLAG_MULT_PORT;
1909
1910 if (port->scaler && port->fmt->coplanar) {
1911 vip_set_slice_path(dev, VIP_CSC_SRC_DATA_SELECT, 4);
1912 vip_set_slice_path(dev, VIP_SC_SRC_DATA_SELECT, 1);
1913 vip_set_slice_path(dev, VIP_CHR_DS_1_SRC_DATA_SELECT, 1);
1914 vip_set_slice_path(dev, VIP_CHR_DS_1_DATA_BYPASS, 0);
1915 vip_set_slice_path(dev, VIP_RGB_OUT_HI_DATA_SELECT, 0);
1916 } else if (port->scaler) {
1917 vip_set_slice_path(dev, VIP_CSC_SRC_DATA_SELECT, 4);
1918 vip_set_slice_path(dev, VIP_SC_SRC_DATA_SELECT, 1);
1919 vip_set_slice_path(dev, VIP_CHR_DS_1_SRC_DATA_SELECT, 1);
1920 vip_set_slice_path(dev, VIP_CHR_DS_1_DATA_BYPASS, 1);
1921 vip_set_slice_path(dev, VIP_RGB_OUT_HI_DATA_SELECT, 0);
1922 } else if (port->fmt->coplanar) {
1923 vip_set_slice_path(dev, VIP_CSC_SRC_DATA_SELECT, 4);
1924 vip_set_slice_path(dev, VIP_CHR_DS_1_SRC_DATA_SELECT, 2);
1925 vip_set_slice_path(dev, VIP_CHR_DS_1_DATA_BYPASS, 0);
1926 vip_set_slice_path(dev, VIP_RGB_OUT_HI_DATA_SELECT, 0);
1927 } else {
1928 vip_set_slice_path(dev, VIP_CSC_SRC_DATA_SELECT, 4);
1929 vip_set_slice_path(dev, VIP_CHR_DS_1_SRC_DATA_SELECT, 2);
1930 vip_set_slice_path(dev, VIP_CHR_DS_1_DATA_BYPASS, 1);
1931 vip_set_slice_path(dev, VIP_RGB_OUT_HI_DATA_SELECT, 0);
1932 }
1933 }
1934
vip_config_rgb(struct vip_dev * dev,struct vip_port * port)1935 static void vip_config_rgb(struct vip_dev *dev, struct vip_port *port)
1936 {
1937 if (port->port_id != VIP_PORTA) {
1938 v4l2_err(&dev->v4l2_dev, "RGB sensor can only be on Port A\n");
1939 return;
1940 }
1941
1942 port->flags &= ~FLAG_MULT_PORT;
1943
1944 /* Set alpha component in background color */
1945 vpdma_set_bg_color(dev->shared->vpdma,
1946 (struct vpdma_data_format *)
1947 port->fmt->vpdma_fmt[0],
1948 0xff);
1949
1950 vip_set_slice_path(dev, VIP_MULTI_CHANNEL_DATA_SELECT, 1);
1951 vip_set_slice_path(dev, VIP_RGB_OUT_LO_DATA_SELECT, 1);
1952 }
1953
vip_config_yuv(struct vip_dev * dev,struct vip_port * port)1954 static void vip_config_yuv(struct vip_dev *dev, struct vip_port *port)
1955 {
1956 if (port->scaler && port->fmt->coplanar) {
1957 port->flags &= ~FLAG_MULT_PORT;
1958 if (port->port_id == VIP_PORTA) {
1959 vip_set_slice_path(dev, VIP_SC_SRC_DATA_SELECT, 2);
1960 vip_set_slice_path(dev, VIP_CHR_DS_1_SRC_DATA_SELECT, 1);
1961 vip_set_slice_path(dev, VIP_CHR_DS_1_DATA_BYPASS, 0);
1962 vip_set_slice_path(dev, VIP_RGB_OUT_HI_DATA_SELECT, 0);
1963 } else {
1964 vip_set_slice_path(dev, VIP_SC_SRC_DATA_SELECT, 3);
1965 vip_set_slice_path(dev, VIP_CHR_DS_2_SRC_DATA_SELECT, 1);
1966 vip_set_slice_path(dev, VIP_CHR_DS_1_DATA_BYPASS, 0);
1967 vip_set_slice_path(dev, VIP_RGB_OUT_LO_DATA_SELECT, 0);
1968 vip_set_slice_path(dev, VIP_MULTI_CHANNEL_DATA_SELECT, 0);
1969 }
1970 } else if (port->scaler) {
1971 port->flags &= ~FLAG_MULT_PORT;
1972 if (port->port_id == VIP_PORTA) {
1973 vip_set_slice_path(dev, VIP_SC_SRC_DATA_SELECT, 2);
1974 vip_set_slice_path(dev, VIP_CHR_DS_1_SRC_DATA_SELECT, 1);
1975 vip_set_slice_path(dev, VIP_CHR_DS_1_DATA_BYPASS, 1);
1976 vip_set_slice_path(dev, VIP_RGB_OUT_HI_DATA_SELECT, 0);
1977 } else {
1978 vip_set_slice_path(dev, VIP_SC_SRC_DATA_SELECT, 3);
1979 vip_set_slice_path(dev, VIP_CHR_DS_2_SRC_DATA_SELECT, 1);
1980 vip_set_slice_path(dev, VIP_CHR_DS_1_DATA_BYPASS, 1);
1981 vip_set_slice_path(dev, VIP_CHR_DS_2_DATA_BYPASS, 1);
1982 vip_set_slice_path(dev, VIP_RGB_OUT_HI_DATA_SELECT, 0);
1983 }
1984 } else if (port->fmt->coplanar) {
1985 port->flags &= ~FLAG_MULT_PORT;
1986 if (port->port_id == VIP_PORTA) {
1987 vip_set_slice_path(dev, VIP_CHR_DS_1_SRC_DATA_SELECT, 3);
1988 vip_set_slice_path(dev, VIP_CHR_DS_1_DATA_BYPASS, 0);
1989 vip_set_slice_path(dev, VIP_RGB_OUT_HI_DATA_SELECT, 0);
1990 } else {
1991 vip_set_slice_path(dev, VIP_CHR_DS_2_SRC_DATA_SELECT, 4);
1992 vip_set_slice_path(dev, VIP_CHR_DS_2_DATA_BYPASS, 0);
1993 vip_set_slice_path(dev, VIP_MULTI_CHANNEL_DATA_SELECT, 0);
1994 vip_set_slice_path(dev, VIP_RGB_OUT_LO_DATA_SELECT, 0);
1995 }
1996 } else {
1997 port->flags |= FLAG_MULT_PORT;
1998 vip_set_slice_path(dev, VIP_MULTI_CHANNEL_DATA_SELECT, 1);
1999 vip_set_slice_path(dev, VIP_RGB_OUT_LO_DATA_SELECT, 0);
2000 }
2001 }
2002
2003 /*
2004 * Set the registers that are modified when the video format changes.
2005 */
set_fmt_params(struct vip_stream * stream)2006 static void set_fmt_params(struct vip_stream *stream)
2007 {
2008 struct vip_port *port = stream->port;
2009 struct vip_dev *dev = port->dev;
2010
2011 stream->sequence = 0;
2012 stream->field = V4L2_FIELD_TOP;
2013
2014 /* YUV input, RGB output using CSC (Y2R) */
2015 if (port->csc == VIP_CSC_Y2R) {
2016 vip_config_csc_y2r(dev, port);
2017 /* RGB input, YUV output using CSC (R2Y) */
2018 } else if (port->csc == VIP_CSC_R2Y) {
2019 vip_config_csc_r2y(dev, port);
2020 /* RGB output without CSC */
2021 } else if (v4l2_is_format_rgb(port->fmt->finfo)) {
2022 vip_config_rgb(dev, port);
2023 /* YUV output without CSC */
2024 } else {
2025 vip_config_yuv(dev, port);
2026 }
2027 }
2028
vip_g_selection(struct file * file,void * fh,struct v4l2_selection * s)2029 static int vip_g_selection(struct file *file, void *fh,
2030 struct v4l2_selection *s)
2031 {
2032 if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
2033 return -EINVAL;
2034
2035 struct vip_stream *stream = file2stream(file);
2036
2037 switch (s->target) {
2038 case V4L2_SEL_TGT_CROP_BOUNDS:
2039 case V4L2_SEL_TGT_CROP_DEFAULT:
2040 s->r.left = 0;
2041 s->r.top = 0;
2042 s->r.width = stream->width;
2043 s->r.height = stream->height;
2044 return 0;
2045
2046 case V4L2_SEL_TGT_CROP:
2047 s->r = stream->port->c_rect;
2048 return 0;
2049 }
2050
2051 return -EINVAL;
2052 }
2053
enclosed_rectangle(struct v4l2_rect * a,struct v4l2_rect * b)2054 static int enclosed_rectangle(struct v4l2_rect *a, struct v4l2_rect *b)
2055 {
2056 if (a->left < b->left || a->top < b->top)
2057 return 0;
2058 if (a->left + a->width > b->left + b->width)
2059 return 0;
2060 if (a->top + a->height > b->top + b->height)
2061 return 0;
2062
2063 return 1;
2064 }
2065
vip_s_selection(struct file * file,void * fh,struct v4l2_selection * s)2066 static int vip_s_selection(struct file *file, void *fh,
2067 struct v4l2_selection *s)
2068 {
2069 struct vip_stream *stream = file2stream(file);
2070 struct vip_port *port = stream->port;
2071 struct vip_dev *dev = port->dev;
2072 struct v4l2_rect r = s->r;
2073
2074 if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
2075 return -EINVAL;
2076
2077 if (s->target != V4L2_SEL_TGT_CROP)
2078 return -EINVAL;
2079
2080 if (vb2_is_busy(&stream->vb_vidq))
2081 return -EBUSY;
2082
2083 v4l_bound_align_image(&r.width, 0, stream->width, 0,
2084 &r.height, 0, stream->height, 0, 0);
2085
2086 r.left = clamp_t(unsigned int, r.left, 0, stream->width - r.width);
2087 r.top = clamp_t(unsigned int, r.top, 0, stream->height - r.height);
2088
2089 if (s->flags & V4L2_SEL_FLAG_LE && !enclosed_rectangle(&r, &s->r))
2090 return -ERANGE;
2091
2092 if (s->flags & V4L2_SEL_FLAG_GE && !enclosed_rectangle(&s->r, &r))
2093 return -ERANGE;
2094
2095 s->r = r;
2096 stream->port->c_rect = r;
2097
2098 v4l2_dbg(1, debug, &dev->v4l2_dev, "cropped (%d,%d)/%dx%d of %dx%d\n",
2099 r.left, r.top, r.width, r.height,
2100 stream->width, stream->height);
2101
2102 return 0;
2103 }
2104
2105 static const struct v4l2_ioctl_ops vip_ioctl_ops = {
2106 .vidioc_querycap = vip_querycap,
2107 .vidioc_enum_input = vip_enuminput,
2108 .vidioc_g_input = vip_g_input,
2109 .vidioc_s_input = vip_s_input,
2110
2111 .vidioc_querystd = vip_querystd,
2112 .vidioc_g_std = vip_g_std,
2113 .vidioc_s_std = vip_s_std,
2114
2115 .vidioc_enum_fmt_vid_cap = vip_enum_fmt_vid_cap,
2116 .vidioc_g_fmt_vid_cap = vip_g_fmt_vid_cap,
2117 .vidioc_try_fmt_vid_cap = vip_try_fmt_vid_cap,
2118 .vidioc_s_fmt_vid_cap = vip_s_fmt_vid_cap,
2119
2120 .vidioc_enum_frameintervals = vip_enum_frameintervals,
2121 .vidioc_enum_framesizes = vip_enum_framesizes,
2122 .vidioc_s_parm = vip_s_parm,
2123 .vidioc_g_parm = vip_g_parm,
2124 .vidioc_g_selection = vip_g_selection,
2125 .vidioc_s_selection = vip_s_selection,
2126 .vidioc_reqbufs = vb2_ioctl_reqbufs,
2127 .vidioc_create_bufs = vb2_ioctl_create_bufs,
2128 .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
2129 .vidioc_querybuf = vb2_ioctl_querybuf,
2130 .vidioc_qbuf = vb2_ioctl_qbuf,
2131 .vidioc_dqbuf = vb2_ioctl_dqbuf,
2132 .vidioc_expbuf = vb2_ioctl_expbuf,
2133
2134 .vidioc_streamon = vb2_ioctl_streamon,
2135 .vidioc_streamoff = vb2_ioctl_streamoff,
2136 .vidioc_log_status = v4l2_ctrl_log_status,
2137 .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
2138 .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
2139 };
2140
2141 /*
2142 * Videobuf operations
2143 */
vip_queue_setup(struct vb2_queue * vq,unsigned int * nbuffers,unsigned int * nplanes,unsigned int sizes[],struct device * alloc_devs[])2144 static int vip_queue_setup(struct vb2_queue *vq,
2145 unsigned int *nbuffers, unsigned int *nplanes,
2146 unsigned int sizes[], struct device *alloc_devs[])
2147 {
2148 struct vip_stream *stream = vb2_get_drv_priv(vq);
2149 struct vip_port *port = stream->port;
2150 struct vip_dev *dev = port->dev;
2151 unsigned int size = stream->sizeimage;
2152
2153 if (*nplanes)
2154 return sizes[0] < size ? -EINVAL : 0;
2155
2156 *nplanes = 1;
2157 sizes[0] = size;
2158
2159 v4l2_dbg(1, debug, &dev->v4l2_dev, "get %d buffer(s) of size %d each.\n",
2160 *nbuffers, sizes[0]);
2161
2162 return 0;
2163 }
2164
vip_buf_prepare(struct vb2_buffer * vb)2165 static int vip_buf_prepare(struct vb2_buffer *vb)
2166 {
2167 struct vip_stream *stream = vb2_get_drv_priv(vb->vb2_queue);
2168 struct vip_port *port = stream->port;
2169 struct vip_dev *dev = port->dev;
2170
2171 if (vb2_plane_size(vb, 0) < stream->sizeimage) {
2172 v4l2_dbg(1, debug, &dev->v4l2_dev,
2173 "%s data will not fit into plane (%lu < %lu)\n",
2174 __func__, vb2_plane_size(vb, 0),
2175 (long)stream->sizeimage);
2176 return -EINVAL;
2177 }
2178
2179 vb2_set_plane_payload(vb, 0, stream->sizeimage);
2180
2181 return 0;
2182 }
2183
vip_buf_queue(struct vb2_buffer * vb)2184 static void vip_buf_queue(struct vb2_buffer *vb)
2185 {
2186 struct vip_stream *stream = vb2_get_drv_priv(vb->vb2_queue);
2187 struct vip_dev *dev = stream->port->dev;
2188 struct vip_buffer *buf = container_of(vb, struct vip_buffer,
2189 vb.vb2_buf);
2190 unsigned long flags;
2191
2192 spin_lock_irqsave(&dev->slock, flags);
2193 list_add_tail(&buf->list, &stream->vidq);
2194 spin_unlock_irqrestore(&dev->slock, flags);
2195 }
2196
return_buffers(struct vb2_queue * vq,int state)2197 static void return_buffers(struct vb2_queue *vq, int state)
2198 {
2199 struct vip_stream *stream = vb2_get_drv_priv(vq);
2200 struct vip_dev *dev = stream->port->dev;
2201 struct vip_buffer *buf;
2202 unsigned long flags;
2203
2204 spin_lock_irqsave(&dev->slock, flags);
2205
2206 /* release all active buffers */
2207 while (!list_empty(&stream->post_bufs)) {
2208 buf = list_entry(stream->post_bufs.next,
2209 struct vip_buffer, list);
2210 list_del(&buf->list);
2211 if (buf->drop == 1)
2212 list_add_tail(&buf->list, &stream->dropq);
2213 else
2214 vb2_buffer_done(&buf->vb.vb2_buf, state);
2215 }
2216 while (!list_empty(&stream->vidq)) {
2217 buf = list_entry(stream->vidq.next, struct vip_buffer, list);
2218 list_del(&buf->list);
2219 vb2_buffer_done(&buf->vb.vb2_buf, state);
2220 }
2221
2222 INIT_LIST_HEAD(&stream->post_bufs);
2223 INIT_LIST_HEAD(&stream->vidq);
2224
2225 spin_unlock_irqrestore(&dev->slock, flags);
2226 }
2227
vip_setup_scaler(struct vip_stream * stream)2228 static int vip_setup_scaler(struct vip_stream *stream)
2229 {
2230 struct vip_port *port = stream->port;
2231 struct vip_dev *dev = port->dev;
2232 struct sc_data *sc = dev->sc;
2233 struct csc_data *csc = dev->csc;
2234 struct vpdma_data *vpdma = dev->shared->vpdma;
2235 struct vip_mmr_adb *mmr_adb = port->mmr_adb.addr;
2236 int list_num = stream->list_num;
2237 int timeout = 500;
2238 struct v4l2_format dst_f;
2239 struct v4l2_format src_f;
2240
2241 memset(&src_f, 0, sizeof(src_f));
2242 src_f.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
2243 v4l2_fill_pix_format(&src_f.fmt.pix, &port->mbus_framefmt);
2244 src_f.fmt.pix.pixelformat = vip_mbus_code_to_fourcc(port->fmt->code);
2245
2246 dst_f = src_f;
2247 dst_f.fmt.pix.pixelformat = port->fmt->fourcc;
2248 dst_f.fmt.pix.width = stream->width;
2249 dst_f.fmt.pix.height = stream->height;
2250
2251 /* if scaler not associated with this port then skip */
2252 if (port->scaler) {
2253 sc_set_hs_coeffs(sc, port->sc_coeff_h.addr,
2254 port->mbus_framefmt.width,
2255 port->c_rect.width);
2256 sc_set_vs_coeffs(sc, port->sc_coeff_v.addr,
2257 port->mbus_framefmt.height,
2258 port->c_rect.height);
2259 sc_config_scaler(sc, &mmr_adb->sc_regs0[0],
2260 &mmr_adb->sc_regs8[0], &mmr_adb->sc_regs17[0],
2261 port->mbus_framefmt.width,
2262 port->mbus_framefmt.height,
2263 port->c_rect.width,
2264 port->c_rect.height);
2265 port->load_mmrs = true;
2266 }
2267
2268 /* if csc not associated with this port then skip */
2269 if (port->csc) {
2270 csc_set_coeff(csc, &mmr_adb->csc_regs[0],
2271 &src_f, &dst_f);
2272
2273 port->load_mmrs = true;
2274 }
2275
2276 /* If coeff are already loaded then skip */
2277 if (!sc->load_coeff_v && !sc->load_coeff_h && !port->load_mmrs)
2278 return 0;
2279
2280 if (vpdma_list_busy(vpdma, list_num)) {
2281 v4l2_dbg(3, debug, &dev->v4l2_dev, "%s: List %d is busy\n",
2282 __func__, list_num);
2283 }
2284
2285 /* Make sure we start with a clean list */
2286 vpdma_reset_desc_list(&stream->desc_list);
2287
2288 /* config descriptors */
2289 if (port->load_mmrs) {
2290 vpdma_map_desc_buf(vpdma, &port->mmr_adb);
2291 vpdma_add_cfd_adb(&stream->desc_list, CFD_MMR_CLIENT,
2292 &port->mmr_adb);
2293
2294 port->load_mmrs = false;
2295 v4l2_dbg(3, debug, &dev->v4l2_dev, "Added mmr_adb config desc\n");
2296 }
2297
2298 if (sc->loaded_coeff_h != port->sc_coeff_h.dma_addr ||
2299 sc->load_coeff_h) {
2300 vpdma_map_desc_buf(vpdma, &port->sc_coeff_h);
2301 vpdma_add_cfd_block(&stream->desc_list,
2302 VIP_SLICE1_CFD_SC_CLIENT + dev->slice_id,
2303 &port->sc_coeff_h, 0);
2304
2305 sc->loaded_coeff_h = port->sc_coeff_h.dma_addr;
2306 sc->load_coeff_h = false;
2307 v4l2_dbg(3, debug, &dev->v4l2_dev, "Added sc_coeff_h config desc\n");
2308 }
2309
2310 if (sc->loaded_coeff_v != port->sc_coeff_v.dma_addr ||
2311 sc->load_coeff_v) {
2312 vpdma_map_desc_buf(vpdma, &port->sc_coeff_v);
2313 vpdma_add_cfd_block(&stream->desc_list,
2314 VIP_SLICE1_CFD_SC_CLIENT + dev->slice_id,
2315 &port->sc_coeff_v, SC_COEF_SRAM_SIZE >> 4);
2316
2317 sc->loaded_coeff_v = port->sc_coeff_v.dma_addr;
2318 sc->load_coeff_v = false;
2319 v4l2_dbg(3, debug, &dev->v4l2_dev, "Added sc_coeff_v config desc\n");
2320 }
2321 v4l2_dbg(3, debug, stream, "CFD_SC_CLIENT %d slice_id: %d\n",
2322 VIP_SLICE1_CFD_SC_CLIENT + dev->slice_id, dev->slice_id);
2323
2324 vpdma_map_desc_buf(vpdma, &stream->desc_list.buf);
2325 v4l2_dbg(3, debug, &dev->v4l2_dev, "Submitting desc on list# %d\n", list_num);
2326 vpdma_submit_descs(vpdma, &stream->desc_list, list_num);
2327
2328 while (vpdma_list_busy(vpdma, list_num) && timeout--)
2329 usleep_range(1000, 1100);
2330
2331 vpdma_unmap_desc_buf(dev->shared->vpdma, &port->mmr_adb);
2332 vpdma_unmap_desc_buf(dev->shared->vpdma, &port->sc_coeff_h);
2333 vpdma_unmap_desc_buf(dev->shared->vpdma, &port->sc_coeff_v);
2334 vpdma_unmap_desc_buf(dev->shared->vpdma, &stream->desc_list.buf);
2335
2336 vpdma_reset_desc_list(&stream->desc_list);
2337
2338 if (timeout <= 0) {
2339 v4l2_err(&dev->v4l2_dev, "Timed out setting up scaler through VPDMA list\n");
2340 return -EBUSY;
2341 }
2342
2343 return 0;
2344 }
2345
vip_load_vpdma_list_fifo(struct vip_stream * stream)2346 static int vip_load_vpdma_list_fifo(struct vip_stream *stream)
2347 {
2348 struct vip_port *port = stream->port;
2349 struct vip_dev *dev = port->dev;
2350 struct vpdma_data *vpdma = dev->shared->vpdma;
2351 int list_num = stream->list_num;
2352 struct vip_buffer *buf;
2353 unsigned long flags;
2354 int timeout, i;
2355
2356 if (vpdma_list_busy(dev->shared->vpdma, stream->list_num))
2357 return -EBUSY;
2358
2359 for (i = 0; i < VIP_VPDMA_FIFO_SIZE; i++) {
2360 spin_lock_irqsave(&dev->slock, flags);
2361 if (list_empty(&stream->vidq)) {
2362 v4l2_err(&dev->v4l2_dev, "No buffer left!");
2363 spin_unlock_irqrestore(&dev->slock, flags);
2364 return -EINVAL;
2365 }
2366
2367 buf = list_entry(stream->vidq.next,
2368 struct vip_buffer, list);
2369 buf->drop = false;
2370
2371 list_move_tail(&buf->list, &stream->post_bufs);
2372 spin_unlock_irqrestore(&dev->slock, flags);
2373
2374 v4l2_dbg(2, debug, &dev->v4l2_dev, "%s: start_dma vb2 buf idx:%d\n",
2375 __func__, buf->vb.vb2_buf.index);
2376 start_dma(stream, buf);
2377
2378 timeout = 500;
2379 while (vpdma_list_busy(vpdma, list_num) && timeout--)
2380 usleep_range(1000, 1100);
2381
2382 if (timeout <= 0) {
2383 v4l2_err(&dev->v4l2_dev, "Timed out loading VPDMA list fifo\n");
2384 return -EBUSY;
2385 }
2386 }
2387 return 0;
2388 }
2389
vip_start_streaming(struct vb2_queue * vq,unsigned int count)2390 static int vip_start_streaming(struct vb2_queue *vq, unsigned int count)
2391 {
2392 struct vip_stream *stream = vb2_get_drv_priv(vq);
2393 struct vip_port *port = stream->port;
2394 struct vip_dev *dev = port->dev;
2395 int ret;
2396
2397 ret = vip_setup_scaler(stream);
2398 if (ret)
2399 goto err;
2400
2401 /*
2402 * Make sure the scaler is configured before the datapath is
2403 * enabled. The scaler can only load the coefficient
2404 * parameters when it is idle. If the scaler path is enabled
2405 * and video data is being received then the VPDMA transfer will
2406 * stall indefinitely.
2407 */
2408 set_fmt_params(stream);
2409 ret = vip_setup_parser(port);
2410 if (ret)
2411 goto err;
2412
2413 if (port->subdev) {
2414 ret = v4l2_subdev_call(port->subdev, video, s_stream, 1);
2415 if (ret) {
2416 v4l2_err(&dev->v4l2_dev, "stream on failed in subdev\n");
2417 goto err;
2418 }
2419 }
2420
2421 stream->sequence = 0;
2422 stream->field = V4L2_FIELD_TOP;
2423 populate_desc_list(stream);
2424
2425 ret = vip_load_vpdma_list_fifo(stream);
2426 if (ret)
2427 goto err;
2428
2429 stream->num_recovery = 0;
2430
2431 clear_irqs(dev, dev->slice_id, stream->list_num);
2432 enable_irqs(dev, dev->slice_id, stream->list_num);
2433 vip_schedule_next_buffer(stream);
2434 vip_parser_stop_imm(port, false);
2435 vip_enable_parser(port, true);
2436
2437 return 0;
2438
2439 err:
2440 return_buffers(vq, VB2_BUF_STATE_QUEUED);
2441 return ret;
2442 }
2443
2444 /*
2445 * Abort streaming and wait for last buffer
2446 */
vip_stop_streaming(struct vb2_queue * vq)2447 static void vip_stop_streaming(struct vb2_queue *vq)
2448 {
2449 struct vip_stream *stream = vb2_get_drv_priv(vq);
2450 struct vip_port *port = stream->port;
2451 struct vip_dev *dev = port->dev;
2452 int ret;
2453
2454 vip_parser_stop_imm(port, true);
2455 vip_enable_parser(port, false);
2456 unset_fmt_params(stream);
2457
2458 disable_irqs(dev, dev->slice_id, stream->list_num);
2459 clear_irqs(dev, dev->slice_id, stream->list_num);
2460
2461 if (port->subdev) {
2462 ret = v4l2_subdev_call(port->subdev, video, s_stream, 0);
2463 if (ret)
2464 v4l2_err(&dev->v4l2_dev, "Failed to stop subdev stream");
2465 }
2466
2467 stop_dma(stream, true);
2468
2469 return_buffers(vq, VB2_BUF_STATE_ERROR);
2470
2471 vpdma_unmap_desc_buf(dev->shared->vpdma, &stream->desc_list.buf);
2472 vpdma_reset_desc_list(&stream->desc_list);
2473 }
2474
2475 static const struct vb2_ops vip_video_qops = {
2476 .queue_setup = vip_queue_setup,
2477 .buf_prepare = vip_buf_prepare,
2478 .buf_queue = vip_buf_queue,
2479 .start_streaming = vip_start_streaming,
2480 .stop_streaming = vip_stop_streaming,
2481 };
2482
vip_init_dev(struct vip_dev * dev)2483 static int vip_init_dev(struct vip_dev *dev)
2484 {
2485 if (dev->num_ports != 0)
2486 goto done;
2487
2488 vip_set_clock_enable(dev, 1);
2489 vip_module_toggle(dev, VIP_SC_RST, false);
2490 vip_module_toggle(dev, VIP_CSC_RST, false);
2491 done:
2492 dev->num_ports++;
2493
2494 return 0;
2495 }
2496
is_scaler_available(struct vip_port * port)2497 static inline bool is_scaler_available(struct vip_port *port)
2498 {
2499 if (port->endpoint.bus_type == V4L2_MBUS_PARALLEL)
2500 if (port->dev->sc_assigned == VIP_NOT_ASSIGNED ||
2501 port->dev->sc_assigned == port->port_id)
2502 return true;
2503 return false;
2504 }
2505
allocate_scaler(struct vip_port * port)2506 static inline bool allocate_scaler(struct vip_port *port)
2507 {
2508 if (is_scaler_available(port)) {
2509 if (port->dev->sc_assigned == VIP_NOT_ASSIGNED ||
2510 port->dev->sc_assigned == port->port_id) {
2511 port->dev->sc_assigned = port->port_id;
2512 port->scaler = true;
2513 return true;
2514 }
2515 }
2516 return false;
2517 }
2518
free_scaler(struct vip_port * port)2519 static inline void free_scaler(struct vip_port *port)
2520 {
2521 if (port->dev->sc_assigned == port->port_id) {
2522 port->dev->sc_assigned = VIP_NOT_ASSIGNED;
2523 port->scaler = false;
2524 }
2525 }
2526
is_csc_available(struct vip_port * port)2527 static bool is_csc_available(struct vip_port *port)
2528 {
2529 if (port->endpoint.bus_type == V4L2_MBUS_PARALLEL)
2530 if (port->dev->csc_assigned == VIP_NOT_ASSIGNED ||
2531 port->dev->csc_assigned == port->port_id)
2532 return true;
2533 return false;
2534 }
2535
allocate_csc(struct vip_port * port,enum vip_csc_state csc_direction)2536 static bool allocate_csc(struct vip_port *port,
2537 enum vip_csc_state csc_direction)
2538 {
2539 struct vip_dev *dev = port->dev;
2540 /* Is CSC needed? */
2541 if (csc_direction != VIP_CSC_NA) {
2542 if (is_csc_available(port)) {
2543 port->dev->csc_assigned = port->port_id;
2544 port->csc = csc_direction;
2545 v4l2_dbg(1, debug, &dev->v4l2_dev, "%s: csc allocated: dir: %d\n",
2546 __func__, csc_direction);
2547 return true;
2548 }
2549 }
2550 return false;
2551 }
2552
free_csc(struct vip_port * port)2553 static void free_csc(struct vip_port *port)
2554 {
2555 struct vip_dev *dev = port->dev;
2556
2557 if (port->dev->csc_assigned == port->port_id) {
2558 port->dev->csc_assigned = VIP_NOT_ASSIGNED;
2559 port->csc = VIP_CSC_NA;
2560 v4l2_dbg(1, debug, &dev->v4l2_dev, "%s: csc freed\n",
2561 __func__);
2562 }
2563 }
2564
vip_init_port(struct vip_port * port)2565 static int vip_init_port(struct vip_port *port)
2566 {
2567 struct vip_dev *dev = port->dev;
2568 int ret;
2569 struct vip_fmt *fmt;
2570 struct v4l2_subdev_format sd_fmt = {
2571 .which = V4L2_SUBDEV_FORMAT_ACTIVE,
2572 .pad = 0,
2573 };
2574 struct v4l2_mbus_framefmt *mbus_fmt = &sd_fmt.format;
2575
2576 if (port->num_streams != 0)
2577 goto done;
2578
2579 ret = vip_init_dev(port->dev);
2580 if (ret)
2581 goto done;
2582
2583 /* Get subdevice current frame format */
2584 ret = v4l2_subdev_call(port->subdev, pad, get_fmt, NULL, &sd_fmt);
2585 if (ret)
2586 v4l2_dbg(1, debug, &dev->v4l2_dev, "init_port get_fmt failed in subdev: (%d)\n",
2587 ret);
2588
2589 /* try to find one that matches */
2590 fmt = find_port_format_by_code(port, mbus_fmt->code);
2591 if (!fmt) {
2592 v4l2_dbg(1, debug, &dev->v4l2_dev, "subdev default mbus_fmt %04x is not matched.\n",
2593 mbus_fmt->code);
2594 /* if all else fails just pick the first one */
2595 fmt = port->active_fmt[0];
2596
2597 mbus_fmt->code = fmt->code;
2598 sd_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
2599 sd_fmt.pad = 0;
2600 ret = v4l2_subdev_call(port->subdev, pad, set_fmt,
2601 NULL, &sd_fmt);
2602 if (ret)
2603 v4l2_dbg(1, debug, &dev->v4l2_dev, "init_port set_fmt failed in subdev: (%d)\n",
2604 ret);
2605 }
2606
2607 /* Assign current format */
2608 port->fmt = fmt;
2609 port->mbus_framefmt = *mbus_fmt;
2610
2611 v4l2_dbg(3, debug, &dev->v4l2_dev, "%s: g_mbus_fmt subdev mbus_code: %04X fourcc:%s size: %dx%d\n",
2612 __func__, fmt->code,
2613 fourcc_to_str(fmt->fourcc),
2614 mbus_fmt->width, mbus_fmt->height);
2615
2616 if (mbus_fmt->field == V4L2_FIELD_ALTERNATE)
2617 port->flags |= FLAG_INTERLACED;
2618 else
2619 port->flags &= ~FLAG_INTERLACED;
2620
2621 port->c_rect.left = 0;
2622 port->c_rect.top = 0;
2623 port->c_rect.width = mbus_fmt->width;
2624 port->c_rect.height = mbus_fmt->height;
2625
2626 ret = vpdma_alloc_desc_buf(&port->sc_coeff_h, SC_COEF_SRAM_SIZE);
2627 if (ret != 0)
2628 return ret;
2629
2630 ret = vpdma_alloc_desc_buf(&port->sc_coeff_v, SC_COEF_SRAM_SIZE);
2631 if (ret != 0)
2632 goto free_sc_h;
2633
2634 ret = vpdma_alloc_desc_buf(&port->mmr_adb, sizeof(struct vip_mmr_adb));
2635 if (ret != 0)
2636 goto free_sc_v;
2637
2638 init_adb_hdrs(port);
2639
2640 vip_enable_parser(port, false);
2641 done:
2642 port->num_streams++;
2643 return 0;
2644
2645 free_sc_v:
2646 vpdma_free_desc_buf(&port->sc_coeff_v);
2647 free_sc_h:
2648 vpdma_free_desc_buf(&port->sc_coeff_h);
2649 return ret;
2650 }
2651
vip_init_stream(struct vip_stream * stream)2652 static int vip_init_stream(struct vip_stream *stream)
2653 {
2654 struct vip_port *port = stream->port;
2655 struct vip_dev *dev = port->dev;
2656 struct vip_fmt *fmt;
2657 struct v4l2_mbus_framefmt *mbus_fmt;
2658 struct v4l2_format f;
2659 int ret;
2660
2661 ret = vip_init_port(port);
2662 if (ret != 0)
2663 return ret;
2664
2665 fmt = port->fmt;
2666 mbus_fmt = &port->mbus_framefmt;
2667
2668 memset(&f, 0, sizeof(f));
2669
2670 /* Properly calculate the sizeimage and bytesperline values. */
2671 v4l2_fill_pix_format(&f.fmt.pix, mbus_fmt);
2672 f.fmt.pix.pixelformat = fmt->fourcc;
2673 ret = vip_calc_format_size(port, fmt, &f);
2674 if (ret)
2675 return ret;
2676
2677 stream->width = f.fmt.pix.width;
2678 stream->height = f.fmt.pix.height;
2679 stream->sup_field = f.fmt.pix.field;
2680 stream->bytesperline = f.fmt.pix.bytesperline;
2681 stream->sizeimage = f.fmt.pix.sizeimage;
2682
2683 v4l2_dbg(3, debug, &dev->v4l2_dev, "init_stream fourcc:%s size: %dx%d bpl:%d img_size:%d\n",
2684 fourcc_to_str(f.fmt.pix.pixelformat),
2685 f.fmt.pix.width, f.fmt.pix.height,
2686 f.fmt.pix.bytesperline, f.fmt.pix.sizeimage);
2687 v4l2_dbg(3, debug, &dev->v4l2_dev, "init_stream vpdma data type: 0x%02X\n",
2688 port->fmt->vpdma_fmt[0]->data_type);
2689
2690 ret = vpdma_create_desc_list(&stream->desc_list, VIP_DESC_LIST_SIZE,
2691 VPDMA_LIST_TYPE_NORMAL);
2692
2693 if (ret != 0)
2694 return ret;
2695
2696 stream->write_desc = (struct vpdma_dtd *)stream->desc_list.buf.addr
2697 + 15;
2698
2699 v4l2_dbg(1, debug, &dev->v4l2_dev, "%s: stream instance %pa\n",
2700 __func__, &stream);
2701
2702 return 0;
2703 }
2704
vip_release_dev(struct vip_dev * dev)2705 static void vip_release_dev(struct vip_dev *dev)
2706 {
2707 /*
2708 * On last close, disable clocks to conserve power
2709 */
2710
2711 if (--dev->num_ports == 0) {
2712 /* reset the scaler module */
2713 vip_module_toggle(dev, VIP_SC_RST, true);
2714 vip_module_toggle(dev, VIP_CSC_RST, true);
2715 vip_set_clock_enable(dev, 0);
2716 }
2717 }
2718
vip_set_crop_parser(struct vip_port * port)2719 static int vip_set_crop_parser(struct vip_port *port)
2720 {
2721 struct vip_dev *dev = port->dev;
2722 struct vip_parser_data *parser = dev->parser;
2723 u32 hcrop = 0, vcrop = 0;
2724 u32 width = port->mbus_framefmt.width;
2725
2726 if (port->fmt->vpdma_fmt[0] == &vpdma_raw_fmts[VPDMA_DATA_FMT_RAW8]) {
2727 /*
2728 * Special case since we are faking a YUV422 16bit format
2729 * to have the vpdma perform the needed byte swap
2730 * we need to adjust the pixel width accordingly
2731 * otherwise the parser will attempt to collect more pixels
2732 * then available and the vpdma transfer will exceed the
2733 * allocated frame buffer.
2734 */
2735 width >>= 1;
2736 v4l2_dbg(1, debug, &dev->v4l2_dev, "%s: 8 bit raw detected, adjusting width to %d\n",
2737 __func__, width);
2738 }
2739
2740 /*
2741 * Set Parser Crop parameters to source size otherwise
2742 * scaler and colorspace converter will yield garbage.
2743 */
2744 hcrop = VIP_ACT_BYPASS;
2745 insert_field(&hcrop, 0, VIP_ACT_SKIP_NUMPIX_MASK,
2746 VIP_ACT_SKIP_NUMPIX_SHFT);
2747 insert_field(&hcrop, width,
2748 VIP_ACT_USE_NUMPIX_MASK, VIP_ACT_USE_NUMPIX_SHFT);
2749 reg_write(parser, VIP_PARSER_CROP_H_PORT(port->port_id), hcrop);
2750
2751 insert_field(&vcrop, 0, VIP_ACT_SKIP_NUMLINES_MASK,
2752 VIP_ACT_SKIP_NUMLINES_SHFT);
2753 insert_field(&vcrop, port->mbus_framefmt.height,
2754 VIP_ACT_USE_NUMLINES_MASK, VIP_ACT_USE_NUMLINES_SHFT);
2755 reg_write(parser, VIP_PARSER_CROP_V_PORT(port->port_id), vcrop);
2756
2757 return 0;
2758 }
2759
vip_setup_parser(struct vip_port * port)2760 static int vip_setup_parser(struct vip_port *port)
2761 {
2762 struct vip_dev *dev = port->dev;
2763 struct vip_parser_data *parser = dev->parser;
2764 struct v4l2_fwnode_endpoint *endpoint = &port->endpoint;
2765 int iface, sync_type;
2766 u32 flags = 0, config0;
2767
2768 /* Reset the port */
2769 vip_reset_parser(port, true);
2770 usleep_range(200, 250);
2771 vip_reset_parser(port, false);
2772
2773 config0 = reg_read(parser, VIP_PARSER_PORT(port->port_id));
2774
2775 if (endpoint->bus_type == V4L2_MBUS_BT656) {
2776 flags = endpoint->bus.parallel.flags;
2777 iface = DUAL_8B_INTERFACE;
2778
2779 /*
2780 * Ideally, this should come from subdev
2781 * port->fmt can be anything once CSC is enabled
2782 */
2783 if (vip_is_mbuscode_rgb(port->fmt->code))
2784 sync_type = EMBEDDED_SYNC_SINGLE_RGB_OR_YUV444;
2785 else
2786 sync_type = EMBEDDED_SYNC_LINE_MULTIPLEXED_YUV422;
2787
2788 } else if (endpoint->bus_type == V4L2_MBUS_PARALLEL) {
2789 flags = endpoint->bus.parallel.flags;
2790
2791 switch (endpoint->bus.parallel.bus_width) {
2792 case 24:
2793 iface = SINGLE_24B_INTERFACE;
2794 break;
2795 case 16:
2796 iface = SINGLE_16B_INTERFACE;
2797 break;
2798 case 8:
2799 default:
2800 iface = DUAL_8B_INTERFACE;
2801 }
2802
2803 if (vip_is_mbuscode_rgb(port->fmt->code))
2804 sync_type = DISCRETE_SYNC_SINGLE_RGB_24B;
2805 else
2806 sync_type = DISCRETE_SYNC_SINGLE_YUV422;
2807
2808 if (flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH)
2809 config0 |= VIP_HSYNC_POLARITY;
2810 else if (flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)
2811 config0 &= ~VIP_HSYNC_POLARITY;
2812
2813 if (flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH)
2814 config0 |= VIP_VSYNC_POLARITY;
2815 else if (flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)
2816 config0 &= ~VIP_VSYNC_POLARITY;
2817
2818 config0 &= ~VIP_USE_ACTVID_HSYNC_ONLY;
2819 config0 |= VIP_ACTVID_POLARITY;
2820 config0 |= VIP_DISCRETE_BASIC_MODE;
2821
2822 } else {
2823 v4l2_err(&dev->v4l2_dev, "Device doesn't support CSI2");
2824 return -EINVAL;
2825 }
2826
2827 if (flags & V4L2_MBUS_PCLK_SAMPLE_FALLING) {
2828 vip_set_pclk_invert(port);
2829 config0 |= VIP_PIXCLK_EDGE_POLARITY;
2830 } else {
2831 config0 &= ~VIP_PIXCLK_EDGE_POLARITY;
2832 }
2833
2834 config0 |= ((sync_type & VIP_SYNC_TYPE_MASK) << VIP_SYNC_TYPE_SHFT);
2835
2836 reg_write(parser, VIP_PARSER_PORT(port->port_id), config0);
2837
2838 vip_set_data_interface(port, iface);
2839 vip_set_crop_parser(port);
2840
2841 return 0;
2842 }
2843
vip_enable_parser(struct vip_port * port,bool on)2844 static void vip_enable_parser(struct vip_port *port, bool on)
2845 {
2846 u32 config0;
2847 struct vip_dev *dev = port->dev;
2848 struct vip_parser_data *parser = dev->parser;
2849
2850 config0 = reg_read(parser, VIP_PARSER_PORT(port->port_id));
2851
2852 if (on) {
2853 config0 |= VIP_PORT_ENABLE;
2854 config0 &= ~(VIP_ASYNC_FIFO_RD | VIP_ASYNC_FIFO_WR);
2855 } else {
2856 config0 &= ~VIP_PORT_ENABLE;
2857 config0 |= (VIP_ASYNC_FIFO_RD | VIP_ASYNC_FIFO_WR);
2858 }
2859 reg_write(parser, VIP_PARSER_PORT(port->port_id), config0);
2860 }
2861
vip_reset_parser(struct vip_port * port,bool on)2862 static void vip_reset_parser(struct vip_port *port, bool on)
2863 {
2864 u32 config0;
2865 struct vip_dev *dev = port->dev;
2866 struct vip_parser_data *parser = dev->parser;
2867
2868 config0 = reg_read(parser, VIP_PARSER_PORT(port->port_id));
2869
2870 if (on)
2871 config0 |= VIP_SW_RESET;
2872 else
2873 config0 &= ~VIP_SW_RESET;
2874
2875 reg_write(parser, VIP_PARSER_PORT(port->port_id), config0);
2876 }
2877
vip_parser_stop_imm(struct vip_port * port,bool on)2878 static void vip_parser_stop_imm(struct vip_port *port, bool on)
2879 {
2880 u32 config0;
2881 struct vip_dev *dev = port->dev;
2882 struct vip_parser_data *parser = dev->parser;
2883
2884 config0 = reg_read(parser, VIP_PARSER_STOP_IMM_PORT(port->port_id));
2885
2886 if (on)
2887 config0 = 0xffffffff;
2888 else
2889 config0 = 0;
2890
2891 reg_write(parser, VIP_PARSER_STOP_IMM_PORT(port->port_id), config0);
2892 }
2893
vip_release_stream(struct vip_stream * stream)2894 static void vip_release_stream(struct vip_stream *stream)
2895 {
2896 struct vip_dev *dev = stream->port->dev;
2897
2898 v4l2_dbg(1, debug, &dev->v4l2_dev, "%s: stream instance %pa\n",
2899 __func__, &stream);
2900
2901 vpdma_unmap_desc_buf(dev->shared->vpdma, &stream->desc_list.buf);
2902 vpdma_free_desc_buf(&stream->desc_list.buf);
2903 vpdma_free_desc_list(&stream->desc_list);
2904 }
2905
vip_release_port(struct vip_port * port)2906 static void vip_release_port(struct vip_port *port)
2907 {
2908 struct vip_dev *dev = port->dev;
2909
2910 v4l2_dbg(1, debug, &dev->v4l2_dev, "%s: port instance %pa\n",
2911 __func__, &port);
2912
2913 vpdma_free_desc_buf(&port->mmr_adb);
2914 vpdma_free_desc_buf(&port->sc_coeff_h);
2915 vpdma_free_desc_buf(&port->sc_coeff_v);
2916 }
2917
stop_dma(struct vip_stream * stream,bool clear_list)2918 static void stop_dma(struct vip_stream *stream, bool clear_list)
2919 {
2920 struct vip_dev *dev = stream->port->dev;
2921 int ch, size = 0;
2922
2923 /* Create a list of channels to be cleared */
2924 for (ch = 0; ch < VPDMA_MAX_CHANNELS; ch++) {
2925 if (stream->vpdma_channels[ch] == 1) {
2926 stream->vpdma_channels_to_abort[size++] = ch;
2927 v4l2_dbg(2, debug, &dev->v4l2_dev, "Clear channel no: %d\n", ch);
2928 }
2929 }
2930
2931 /* Clear all the used channels for the list */
2932 vpdma_list_cleanup(dev->shared->vpdma, stream->list_num,
2933 stream->vpdma_channels_to_abort, size);
2934
2935 if (clear_list)
2936 for (ch = 0; ch < VPDMA_MAX_CHANNELS; ch++)
2937 stream->vpdma_channels[ch] = 0;
2938 }
2939
vip_open(struct file * file)2940 static int vip_open(struct file *file)
2941 {
2942 struct vip_stream *stream = video_drvdata(file);
2943 struct vip_port *port = stream->port;
2944 struct vip_dev *dev = port->dev;
2945 int ret = 0;
2946
2947 mutex_lock(&dev->mutex);
2948
2949 ret = v4l2_fh_open(file);
2950 if (ret) {
2951 v4l2_err(&dev->v4l2_dev, "v4l2_fh_open failed\n");
2952 goto unlock;
2953 }
2954
2955 /*
2956 * If this is the first open file.
2957 * Then initialize hw module.
2958 */
2959 if (!v4l2_fh_is_singular_file(file))
2960 goto unlock;
2961
2962 if (vip_init_stream(stream))
2963 ret = -ENODEV;
2964 unlock:
2965 mutex_unlock(&dev->mutex);
2966 return ret;
2967 }
2968
vip_release(struct file * file)2969 static int vip_release(struct file *file)
2970 {
2971 struct vip_stream *stream = video_drvdata(file);
2972 struct vip_port *port = stream->port;
2973 struct vip_dev *dev = port->dev;
2974 bool fh_singular;
2975 int ret;
2976
2977 mutex_lock(&dev->mutex);
2978
2979 /* Save the singular status before we call the clean-up helper */
2980 fh_singular = v4l2_fh_is_singular_file(file);
2981
2982 /* the release helper will cleanup any on-going streaming */
2983 ret = _vb2_fop_release(file, NULL);
2984
2985 free_csc(port);
2986 free_scaler(port);
2987
2988 /*
2989 * If this is the last open file.
2990 * Then de-initialize hw module.
2991 */
2992 if (fh_singular) {
2993 vip_release_stream(stream);
2994
2995 if (--port->num_streams == 0) {
2996 vip_release_port(port);
2997 vip_release_dev(port->dev);
2998 }
2999 }
3000
3001 mutex_unlock(&dev->mutex);
3002
3003 return ret;
3004 }
3005
3006 /*
3007 * File operations
3008 */
3009 static const struct v4l2_file_operations vip_fops = {
3010 .owner = THIS_MODULE,
3011 .open = vip_open,
3012 .release = vip_release,
3013 .poll = vb2_fop_poll,
3014 .unlocked_ioctl = video_ioctl2,
3015 .mmap = vb2_fop_mmap,
3016 };
3017
3018 static struct video_device vip_videodev = {
3019 .name = VIP_MODULE_NAME,
3020 .fops = &vip_fops,
3021 .ioctl_ops = &vip_ioctl_ops,
3022 .minor = -1,
3023 .release = video_device_release,
3024 .tvnorms = V4L2_STD_NTSC | V4L2_STD_PAL | V4L2_STD_SECAM,
3025 .device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_CAPTURE,
3026 };
3027
alloc_stream(struct vip_port * port,int stream_id,int vfl_type)3028 static int alloc_stream(struct vip_port *port, int stream_id, int vfl_type)
3029 {
3030 struct vip_stream *stream;
3031 struct vip_dev *dev = port->dev;
3032 struct vb2_queue *q;
3033 struct video_device *vfd;
3034 struct vip_buffer *buf;
3035 struct list_head *pos, *tmp;
3036 int ret, i;
3037
3038 stream = kzalloc_obj(*stream);
3039 if (!stream)
3040 return -ENOMEM;
3041
3042 stream->port = port;
3043 stream->stream_id = stream_id;
3044 stream->vfl_type = vfl_type;
3045 port->cap_streams[stream_id] = stream;
3046
3047 stream->list_num = vpdma_hwlist_alloc(dev->shared->vpdma, stream);
3048 if (stream->list_num < 0) {
3049 v4l2_err(&dev->v4l2_dev, "Could not get VPDMA hwlist");
3050 ret = -ENODEV;
3051 goto do_free_stream;
3052 }
3053
3054 INIT_LIST_HEAD(&stream->post_bufs);
3055
3056 /*
3057 * Initialize queue
3058 */
3059 q = &stream->vb_vidq;
3060 q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
3061 q->io_modes = VB2_MMAP | VB2_DMABUF;
3062 q->drv_priv = stream;
3063 q->buf_struct_size = sizeof(struct vip_buffer);
3064 q->ops = &vip_video_qops;
3065 q->mem_ops = &vb2_dma_contig_memops;
3066 q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
3067 q->lock = &dev->mutex;
3068 q->min_queued_buffers = 2;
3069 q->dev = dev->v4l2_dev.dev;
3070
3071 ret = vb2_queue_init(q);
3072 if (ret)
3073 goto do_free_hwlist;
3074
3075 INIT_WORK(&stream->recovery_work, vip_overflow_recovery_work);
3076
3077 INIT_LIST_HEAD(&stream->vidq);
3078
3079 /* Allocate/populate Drop queue entries */
3080 INIT_LIST_HEAD(&stream->dropq);
3081 for (i = 0; i < VIP_DROPQ_SIZE; i++) {
3082 buf = kzalloc_obj(*buf, GFP_ATOMIC);
3083 if (!buf) {
3084 ret = -ENOMEM;
3085 goto do_free_dropq;
3086 }
3087 buf->drop = true;
3088 list_add(&buf->list, &stream->dropq);
3089 }
3090
3091 vfd = video_device_alloc();
3092 if (!vfd) {
3093 ret = -ENOMEM;
3094 goto do_free_dropq;
3095 }
3096 *vfd = vip_videodev;
3097 vfd->v4l2_dev = &dev->v4l2_dev;
3098 vfd->queue = q;
3099
3100 vfd->lock = &dev->mutex;
3101 video_set_drvdata(vfd, stream);
3102 stream->vfd = vfd;
3103
3104 ret = video_register_device(vfd, vfl_type, -1);
3105 if (ret) {
3106 v4l2_err(&dev->v4l2_dev, "Failed to register video device\n");
3107 goto do_free_vfd;
3108 }
3109
3110 v4l2_info(&dev->v4l2_dev, "device registered as %s\n",
3111 video_device_node_name(vfd));
3112 return 0;
3113
3114 do_free_vfd:
3115 video_device_release(vfd);
3116 do_free_dropq:
3117 list_for_each_safe(pos, tmp, &stream->dropq) {
3118 buf = list_entry(pos,
3119 struct vip_buffer, list);
3120 v4l2_dbg(1, debug, &dev->v4l2_dev, "dropq buffer\n");
3121 list_del(pos);
3122 kfree(buf);
3123 }
3124 do_free_hwlist:
3125 vpdma_hwlist_release(dev->shared->vpdma, stream->list_num);
3126 do_free_stream:
3127 kfree(stream);
3128 return ret;
3129 }
3130
free_stream(struct vip_stream * stream)3131 static void free_stream(struct vip_stream *stream)
3132 {
3133 struct vip_dev *dev;
3134 struct vip_buffer *buf;
3135 struct list_head *pos, *q;
3136
3137 if (!stream)
3138 return;
3139
3140 dev = stream->port->dev;
3141 /* Free up the Drop queue */
3142 list_for_each_safe(pos, q, &stream->dropq) {
3143 buf = list_entry(pos,
3144 struct vip_buffer, list);
3145 v4l2_dbg(1, debug, &dev->v4l2_dev, "dropq buffer\n");
3146 list_del(pos);
3147 kfree(buf);
3148 }
3149
3150 video_unregister_device(stream->vfd);
3151 vpdma_hwlist_release(dev->shared->vpdma, stream->list_num);
3152 stream->port->cap_streams[stream->stream_id] = NULL;
3153 kfree(stream);
3154 }
3155
get_subdev_active_format(struct vip_port * port,struct v4l2_subdev * subdev)3156 static int get_subdev_active_format(struct vip_port *port,
3157 struct v4l2_subdev *subdev)
3158 {
3159 struct vip_fmt *fmt;
3160 struct vip_dev *dev = port->dev;
3161 struct v4l2_subdev_mbus_code_enum mbus_code;
3162 int ret = 0;
3163 unsigned int k, i, j;
3164 enum vip_csc_state csc;
3165
3166 /* Enumerate sub device formats and enable all matching local formats */
3167 port->num_active_fmt = 0;
3168 for (k = 0, i = 0; (ret != -EINVAL); k++) {
3169 memset(&mbus_code, 0, sizeof(mbus_code));
3170 mbus_code.index = k;
3171 mbus_code.which = V4L2_SUBDEV_FORMAT_ACTIVE;
3172 ret = v4l2_subdev_call(subdev, pad, enum_mbus_code,
3173 NULL, &mbus_code);
3174 if (ret)
3175 continue;
3176
3177 v4l2_dbg(2, debug, &dev->v4l2_dev,
3178 "subdev %s: code: %04x idx: %d\n",
3179 subdev->name, mbus_code.code, k);
3180
3181 for (j = 0; j < ARRAY_SIZE(vip_formats); j++) {
3182 fmt = &vip_formats[j];
3183 if (mbus_code.code != fmt->code)
3184 continue;
3185
3186 /*
3187 * When the port is configured for BT656
3188 * then none of the downstream unit can be used.
3189 * So here we need to skip all format requiring
3190 * either CSC or CHR_DS
3191 */
3192 csc = vip_csc_direction(fmt->code, fmt->finfo);
3193 if (port->endpoint.bus_type == V4L2_MBUS_BT656 &&
3194 (csc != VIP_CSC_NA || fmt->coplanar))
3195 continue;
3196
3197 port->active_fmt[i] = fmt;
3198 v4l2_dbg(2, debug, &dev->v4l2_dev,
3199 "matched fourcc: %s: code: %04x idx: %d\n",
3200 fourcc_to_str(fmt->fourcc), fmt->code, i);
3201 port->num_active_fmt = ++i;
3202 }
3203 }
3204
3205 if (i == 0) {
3206 v4l2_err(&dev->v4l2_dev, "No suitable format reported by subdev %s\n",
3207 subdev->name);
3208 return -EINVAL;
3209 }
3210 return 0;
3211 }
3212
alloc_port(struct vip_dev * dev,int id)3213 static int alloc_port(struct vip_dev *dev, int id)
3214 {
3215 struct vip_port *port;
3216
3217 if (dev->ports[id])
3218 return -EINVAL;
3219
3220 port = devm_kzalloc(&dev->pdev->dev, sizeof(*port), GFP_KERNEL);
3221 if (!port)
3222 return -ENOMEM;
3223
3224 dev->ports[id] = port;
3225 port->dev = dev;
3226 port->port_id = id;
3227 port->num_streams = 0;
3228 return 0;
3229 }
3230
free_port(struct vip_port * port)3231 static void free_port(struct vip_port *port)
3232 {
3233 if (!port)
3234 return;
3235
3236 v4l2_async_nf_unregister(&port->notifier);
3237 v4l2_async_nf_cleanup(&port->notifier);
3238 free_stream(port->cap_streams[0]);
3239 }
3240
get_field(u32 value,u32 mask,int shift)3241 static int get_field(u32 value, u32 mask, int shift)
3242 {
3243 return (value & (mask << shift)) >> shift;
3244 }
3245
3246 static int vip_probe_complete(struct platform_device *pdev);
vip_vpdma_fw_cb(struct platform_device * pdev)3247 static void vip_vpdma_fw_cb(struct platform_device *pdev)
3248 {
3249 dev_info(&pdev->dev, "VPDMA firmware loaded\n");
3250
3251 if (pdev->dev.of_node)
3252 vip_probe_complete(pdev);
3253 }
3254
vip_create_streams(struct vip_port * port,struct v4l2_subdev * subdev)3255 static int vip_create_streams(struct vip_port *port,
3256 struct v4l2_subdev *subdev)
3257 {
3258 int i;
3259
3260 for (i = 0; i < VIP_CAP_STREAMS_PER_PORT; i++)
3261 free_stream(port->cap_streams[i]);
3262
3263 if (get_subdev_active_format(port, subdev))
3264 return -ENODEV;
3265
3266 port->subdev = subdev;
3267
3268 if (port->endpoint.bus_type == V4L2_MBUS_PARALLEL) {
3269 port->flags |= FLAG_MULT_PORT;
3270 port->num_streams_configured = 1;
3271 alloc_stream(port, 0, VFL_TYPE_VIDEO);
3272 } else if (port->endpoint.bus_type == V4L2_MBUS_BT656) {
3273 port->flags |= FLAG_MULT_PORT;
3274 port->num_streams_configured = 1;
3275 alloc_stream(port, 0, VFL_TYPE_VIDEO);
3276 }
3277 return 0;
3278 }
3279
vip_async_bound(struct v4l2_async_notifier * notifier,struct v4l2_subdev * subdev,struct v4l2_async_connection * asd)3280 static int vip_async_bound(struct v4l2_async_notifier *notifier,
3281 struct v4l2_subdev *subdev,
3282 struct v4l2_async_connection *asd)
3283 {
3284 struct vip_port *port = notifier_to_vip_port(notifier);
3285 int ret;
3286
3287 if (port->subdev) {
3288 v4l2_info(&port->dev->v4l2_dev, "Rejecting subdev %s (Already set!!)",
3289 subdev->name);
3290 return 0;
3291 }
3292
3293 v4l2_info(&port->dev->v4l2_dev, "Port %c: Using subdev %s for capture\n",
3294 port->port_id == VIP_PORTA ? 'A' : 'B', subdev->name);
3295
3296 ret = vip_create_streams(port, subdev);
3297 if (ret)
3298 return ret;
3299
3300 return 0;
3301 }
3302
vip_async_complete(struct v4l2_async_notifier * notifier)3303 static int vip_async_complete(struct v4l2_async_notifier *notifier)
3304 {
3305 return 0;
3306 }
3307
3308 static const struct v4l2_async_notifier_operations vip_async_ops = {
3309 .bound = vip_async_bound,
3310 .complete = vip_async_complete,
3311 };
3312
3313 static struct fwnode_handle *
fwnode_graph_get_next_endpoint_by_regs(const struct fwnode_handle * fwnode,int port_reg,int reg)3314 fwnode_graph_get_next_endpoint_by_regs(const struct fwnode_handle *fwnode,
3315 int port_reg, int reg)
3316 {
3317 return of_fwnode_handle(of_graph_get_endpoint_by_regs(to_of_node(fwnode),
3318 port_reg, reg));
3319 }
3320
vip_register_subdev_notify(struct vip_port * port,struct fwnode_handle * ep)3321 static int vip_register_subdev_notify(struct vip_port *port,
3322 struct fwnode_handle *ep)
3323 {
3324 struct v4l2_async_notifier *notifier = &port->notifier;
3325 struct fwnode_handle *subdev;
3326 struct v4l2_fwnode_endpoint *vep;
3327 struct v4l2_async_connection *asd;
3328 int ret;
3329 struct vip_dev *dev = port->dev;
3330
3331 vep = &port->endpoint;
3332
3333 subdev = fwnode_graph_get_remote_port_parent(ep);
3334 if (!subdev) {
3335 v4l2_dbg(3, debug, &dev->v4l2_dev, "can't get remote parent\n");
3336 return -EINVAL;
3337 }
3338
3339 ret = v4l2_fwnode_endpoint_parse(ep, vep);
3340 if (ret) {
3341 v4l2_dbg(3, debug, &dev->v4l2_dev, "Failed to parse endpoint:\n");
3342 fwnode_handle_put(subdev);
3343 return -EINVAL;
3344 }
3345
3346 v4l2_async_nf_init(notifier, &port->dev->shared->v4l2_dev);
3347
3348 asd = v4l2_async_nf_add_fwnode(notifier, subdev, struct v4l2_async_connection);
3349 if (IS_ERR(asd)) {
3350 v4l2_dbg(1, debug, &dev->v4l2_dev, "Error adding asd\n");
3351 fwnode_handle_put(subdev);
3352 v4l2_async_nf_cleanup(notifier);
3353 return -EINVAL;
3354 }
3355
3356 notifier->ops = &vip_async_ops;
3357 ret = v4l2_async_nf_register(notifier);
3358 if (ret) {
3359 v4l2_dbg(1, debug, &dev->v4l2_dev, "Error registering async notifier\n");
3360 v4l2_async_nf_cleanup(notifier);
3361 ret = -EINVAL;
3362 }
3363
3364 return ret;
3365 }
3366
vip_endpoint_scan(struct platform_device * pdev)3367 static int vip_endpoint_scan(struct platform_device *pdev)
3368 {
3369 struct device_node *parent = pdev->dev.of_node;
3370 struct device_node *ep = NULL;
3371 int count = 0, p;
3372
3373 for (p = 0; p < (VIP_NUM_PORTS * VIP_NUM_SLICES); p++) {
3374 ep = of_graph_get_endpoint_by_regs(parent, p, 0);
3375 if (!ep)
3376 continue;
3377
3378 count++;
3379 of_node_put(ep);
3380 }
3381
3382 return count;
3383 }
3384
vip_probe_complete(struct platform_device * pdev)3385 static int vip_probe_complete(struct platform_device *pdev)
3386 {
3387 struct vip_shared *shared = platform_get_drvdata(pdev);
3388 struct vip_ctrl_module *ctrl = NULL;
3389 struct vip_port *port;
3390 struct vip_dev *dev;
3391 struct device_node *parent = pdev->dev.of_node;
3392 struct fwnode_handle *ep = NULL;
3393 unsigned int syscon_args[5];
3394 int ret, i, slice_id, port_id, p;
3395
3396 /* Allocate ctrl before using it */
3397 ctrl = devm_kzalloc(&pdev->dev, sizeof(*ctrl), GFP_KERNEL);
3398 if (!ctrl)
3399 return -ENOMEM;
3400
3401 ctrl->syscon_pol = syscon_regmap_lookup_by_phandle_args(parent, "ti,ctrl-module",
3402 5, syscon_args);
3403
3404 if (IS_ERR(ctrl->syscon_pol))
3405 return dev_err_probe(&pdev->dev, PTR_ERR(ctrl->syscon_pol),
3406 "Failed to get ti,ctrl-module\n");
3407
3408 ctrl->syscon_offset = syscon_args[0];
3409
3410 for (i = 0; i < ARRAY_SIZE(ctrl->syscon_bit_field); i++)
3411 ctrl->syscon_bit_field[i] = syscon_args[i + 1];
3412
3413 for (p = 0; p < (VIP_NUM_PORTS * VIP_NUM_SLICES); p++) {
3414 ep = fwnode_graph_get_next_endpoint_by_regs(of_fwnode_handle(parent),
3415 p, 0);
3416 if (!ep)
3417 continue;
3418
3419 switch (p) {
3420 case 0:
3421 slice_id = VIP_SLICE1;
3422 port_id = VIP_PORTA;
3423 break;
3424 case 1:
3425 slice_id = VIP_SLICE2;
3426 port_id = VIP_PORTA;
3427 break;
3428 case 2:
3429 slice_id = VIP_SLICE1;
3430 port_id = VIP_PORTB;
3431 break;
3432 case 3:
3433 slice_id = VIP_SLICE2;
3434 port_id = VIP_PORTB;
3435 break;
3436 default:
3437 dev_err(&pdev->dev, "Unknown port reg=<%d>\n", p);
3438 continue;
3439 }
3440
3441 ret = alloc_port(shared->devs[slice_id], port_id);
3442 if (ret < 0)
3443 continue;
3444
3445 dev = shared->devs[slice_id];
3446 dev->syscon = ctrl;
3447 port = dev->ports[port_id];
3448
3449 vip_register_subdev_notify(port, ep);
3450 fwnode_handle_put(ep);
3451 }
3452 return 0;
3453 }
3454
vip_probe_slice(struct platform_device * pdev,int slice)3455 static int vip_probe_slice(struct platform_device *pdev, int slice)
3456 {
3457 struct vip_shared *shared = platform_get_drvdata(pdev);
3458 struct vip_dev *dev;
3459 struct vip_parser_data *parser;
3460 struct sc_data *sc;
3461 struct csc_data *csc;
3462 int ret;
3463
3464 dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
3465 if (!dev)
3466 return -ENOMEM;
3467
3468 dev->irq = platform_get_irq(pdev, slice);
3469 if (dev->irq < 0)
3470 return dev->irq;
3471
3472 ret = devm_request_irq(&pdev->dev, dev->irq, vip_irq,
3473 0, VIP_MODULE_NAME, dev);
3474 if (ret < 0)
3475 return -ENOMEM;
3476
3477 spin_lock_init(&dev->slock);
3478 mutex_init(&dev->mutex);
3479
3480 dev->slice_id = slice;
3481 dev->pdev = pdev;
3482 dev->base = shared->base;
3483 dev->v4l2_dev = shared->v4l2_dev;
3484
3485 dev->shared = shared;
3486 shared->devs[slice] = dev;
3487
3488 vip_top_reset(dev);
3489 vip_set_slice_path(dev, VIP_MULTI_CHANNEL_DATA_SELECT, 1);
3490
3491 parser = devm_kzalloc(&pdev->dev, sizeof(*dev->parser), GFP_KERNEL);
3492 if (!parser)
3493 return PTR_ERR_OR_ZERO(parser);
3494
3495 parser->base = dev->base + (slice ? VIP_SLICE1_PARSER : VIP_SLICE0_PARSER);
3496 if (IS_ERR(parser->base))
3497 return PTR_ERR(parser->base);
3498
3499 parser->pdev = pdev;
3500 dev->parser = parser;
3501
3502 dev->sc_assigned = VIP_NOT_ASSIGNED;
3503 sc = devm_kzalloc(&pdev->dev, sizeof(*dev->sc), GFP_KERNEL);
3504 if (!sc)
3505 return PTR_ERR_OR_ZERO(sc);
3506
3507 sc->base = dev->base + (slice ? VIP_SLICE1_SC : VIP_SLICE0_SC);
3508 if (IS_ERR(sc->base))
3509 return PTR_ERR(sc->base);
3510
3511 sc->pdev = pdev;
3512 dev->sc = sc;
3513
3514 dev->csc_assigned = VIP_NOT_ASSIGNED;
3515 csc = devm_kzalloc(&pdev->dev, sizeof(*dev->csc), GFP_KERNEL);
3516 if (!csc)
3517 return PTR_ERR_OR_ZERO(csc);
3518
3519 csc->base = dev->base + (slice ? VIP_SLICE1_CSC : VIP_SLICE0_CSC);
3520 if (IS_ERR(csc->base))
3521 return PTR_ERR(csc->base);
3522
3523 csc->pdev = pdev;
3524 dev->csc = csc;
3525
3526 return 0;
3527 }
3528
vip_probe(struct platform_device * pdev)3529 static int vip_probe(struct platform_device *pdev)
3530 {
3531 struct vip_shared *shared;
3532 int ret, slice = VIP_SLICE1;
3533 u32 tmp, pid;
3534
3535 /* If there are no endpoint defined there is nothing to do */
3536 if (!vip_endpoint_scan(pdev)) {
3537 dev_err(&pdev->dev, "%s: No sensor", __func__);
3538 return -ENODEV;
3539 }
3540
3541 ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
3542 if (ret) {
3543 dev_err(&pdev->dev,
3544 "32-bit consistent DMA enable failed\n");
3545 return ret;
3546 }
3547
3548 shared = devm_kzalloc(&pdev->dev, sizeof(*shared), GFP_KERNEL);
3549 if (!shared)
3550 return -ENOMEM;
3551
3552 shared->base = devm_platform_ioremap_resource(pdev, 0);
3553 if (IS_ERR(shared->base))
3554 return PTR_ERR(shared->base);
3555
3556 vip_init_format_info(&pdev->dev);
3557
3558 pm_runtime_enable(&pdev->dev);
3559
3560 ret = pm_runtime_get_sync(&pdev->dev);
3561 if (ret < 0)
3562 goto err_runtime_disable;
3563
3564 /* Make sure H/W module has the right functionality */
3565 pid = reg_read(shared, VIP_PID);
3566 tmp = get_field(pid, VIP_PID_FUNC_MASK, VIP_PID_FUNC_SHIFT);
3567
3568 if (tmp != VIP_PID_FUNC) {
3569 dev_info(&pdev->dev, "vip: unexpected PID function: 0x%x\n",
3570 tmp);
3571 ret = -ENODEV;
3572 goto err_runtime_put;
3573 }
3574
3575 ret = v4l2_device_register(&pdev->dev, &shared->v4l2_dev);
3576 if (ret)
3577 goto err_runtime_put;
3578
3579 /* enable clocks, so the firmware will load properly */
3580 vip_shared_set_clock_enable(shared, 1);
3581 vip_top_vpdma_reset(shared);
3582
3583 platform_set_drvdata(pdev, shared);
3584
3585 v4l2_ctrl_handler_init(&shared->ctrl_handler, 11);
3586 shared->v4l2_dev.ctrl_handler = &shared->ctrl_handler;
3587
3588 for (slice = VIP_SLICE1; slice < VIP_NUM_SLICES; slice++) {
3589 ret = vip_probe_slice(pdev, slice);
3590 if (ret) {
3591 dev_err(&pdev->dev, "Creating slice failed");
3592 goto err_dev_unreg;
3593 }
3594 }
3595
3596 shared->vpdma = &shared->vpdma_data;
3597
3598 shared->vpdma->pdev = pdev;
3599 shared->vpdma->cb = vip_vpdma_fw_cb;
3600 spin_lock_init(&shared->vpdma->lock);
3601
3602 shared->vpdma->base = shared->base + VIP_VPDMA_BASE;
3603 if (!shared->vpdma->base) {
3604 dev_err(&pdev->dev, "failed to ioremap\n");
3605 ret = -ENOMEM;
3606 goto err_dev_unreg;
3607 }
3608
3609 ret = vpdma_load_firmware(shared->vpdma);
3610 if (ret) {
3611 dev_err(&pdev->dev, "Creating VPDMA failed");
3612 goto err_dev_unreg;
3613 }
3614
3615 return 0;
3616
3617 err_dev_unreg:
3618 v4l2_ctrl_handler_free(&shared->ctrl_handler);
3619 v4l2_device_unregister(&shared->v4l2_dev);
3620 err_runtime_put:
3621 pm_runtime_put_sync(&pdev->dev);
3622 err_runtime_disable:
3623 pm_runtime_disable(&pdev->dev);
3624
3625 return ret;
3626 }
3627
vip_remove(struct platform_device * pdev)3628 static void vip_remove(struct platform_device *pdev)
3629 {
3630 struct vip_shared *shared = platform_get_drvdata(pdev);
3631 struct vip_dev *dev;
3632 int slice;
3633
3634 for (slice = 0; slice < VIP_NUM_SLICES; slice++) {
3635 dev = shared->devs[slice];
3636 if (!dev)
3637 continue;
3638
3639 free_port(dev->ports[VIP_PORTA]);
3640 free_port(dev->ports[VIP_PORTB]);
3641 }
3642
3643 v4l2_ctrl_handler_free(&shared->ctrl_handler);
3644
3645 pm_runtime_put_sync(&pdev->dev);
3646 pm_runtime_disable(&pdev->dev);
3647 }
3648
3649 #if defined(CONFIG_OF)
3650 static const struct of_device_id vip_of_match[] = {
3651 {
3652 .compatible = "ti,dra7-vip",
3653 },
3654 {},
3655 };
3656
3657 MODULE_DEVICE_TABLE(of, vip_of_match);
3658 #endif
3659
3660 static struct platform_driver vip_pdrv = {
3661 .probe = vip_probe,
3662 .remove = vip_remove,
3663 .driver = {
3664 .name = VIP_MODULE_NAME,
3665 .of_match_table = of_match_ptr(vip_of_match),
3666 },
3667 };
3668
3669 module_platform_driver(vip_pdrv);
3670
3671 MODULE_DESCRIPTION("TI VIP driver");
3672 MODULE_AUTHOR("Texas Instruments");
3673 MODULE_LICENSE("GPL");
3674