1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * TI VPFE capture Driver
4 *
5 * Copyright (C) 2013 - 2014 Texas Instruments, Inc.
6 *
7 * Benoit Parrot <bparrot@ti.com>
8 * Lad, Prabhakar <prabhakar.csengg@gmail.com>
9 */
10
11 #include <linux/delay.h>
12 #include <linux/err.h>
13 #include <linux/init.h>
14 #include <linux/interrupt.h>
15 #include <linux/io.h>
16 #include <linux/module.h>
17 #include <linux/of_graph.h>
18 #include <linux/pinctrl/consumer.h>
19 #include <linux/platform_device.h>
20 #include <linux/pm_runtime.h>
21 #include <linux/slab.h>
22 #include <linux/uaccess.h>
23 #include <linux/videodev2.h>
24
25 #include <media/v4l2-common.h>
26 #include <media/v4l2-ctrls.h>
27 #include <media/v4l2-event.h>
28 #include <media/v4l2-fwnode.h>
29 #include <media/v4l2-rect.h>
30
31 #include "am437x-vpfe.h"
32
33 #define VPFE_MODULE_NAME "vpfe"
34 #define VPFE_VERSION "0.1.0"
35
36 static int debug;
37 module_param(debug, int, 0644);
38 MODULE_PARM_DESC(debug, "Debug level 0-8");
39
40 #define vpfe_dbg(level, dev, fmt, arg...) \
41 v4l2_dbg(level, debug, &dev->v4l2_dev, fmt, ##arg)
42 #define vpfe_info(dev, fmt, arg...) \
43 v4l2_info(&dev->v4l2_dev, fmt, ##arg)
44 #define vpfe_err(dev, fmt, arg...) \
45 v4l2_err(&dev->v4l2_dev, fmt, ##arg)
46
47 /* standard information */
48 struct vpfe_standard {
49 v4l2_std_id std_id;
50 unsigned int width;
51 unsigned int height;
52 struct v4l2_fract pixelaspect;
53 int frame_format;
54 };
55
56 static const struct vpfe_standard vpfe_standards[] = {
57 {V4L2_STD_525_60, 720, 480, {11, 10}, 1},
58 {V4L2_STD_625_50, 720, 576, {54, 59}, 1},
59 };
60
61 static struct vpfe_fmt formats[VPFE_NUM_FORMATS] = {
62 {
63 .fourcc = V4L2_PIX_FMT_YUYV,
64 .code = MEDIA_BUS_FMT_YUYV8_2X8,
65 .bitsperpixel = 16,
66 }, {
67 .fourcc = V4L2_PIX_FMT_UYVY,
68 .code = MEDIA_BUS_FMT_UYVY8_2X8,
69 .bitsperpixel = 16,
70 }, {
71 .fourcc = V4L2_PIX_FMT_YVYU,
72 .code = MEDIA_BUS_FMT_YVYU8_2X8,
73 .bitsperpixel = 16,
74 }, {
75 .fourcc = V4L2_PIX_FMT_VYUY,
76 .code = MEDIA_BUS_FMT_VYUY8_2X8,
77 .bitsperpixel = 16,
78 }, {
79 .fourcc = V4L2_PIX_FMT_SBGGR8,
80 .code = MEDIA_BUS_FMT_SBGGR8_1X8,
81 .bitsperpixel = 8,
82 }, {
83 .fourcc = V4L2_PIX_FMT_SGBRG8,
84 .code = MEDIA_BUS_FMT_SGBRG8_1X8,
85 .bitsperpixel = 8,
86 }, {
87 .fourcc = V4L2_PIX_FMT_SGRBG8,
88 .code = MEDIA_BUS_FMT_SGRBG8_1X8,
89 .bitsperpixel = 8,
90 }, {
91 .fourcc = V4L2_PIX_FMT_SRGGB8,
92 .code = MEDIA_BUS_FMT_SRGGB8_1X8,
93 .bitsperpixel = 8,
94 }, {
95 .fourcc = V4L2_PIX_FMT_RGB565,
96 .code = MEDIA_BUS_FMT_RGB565_2X8_LE,
97 .bitsperpixel = 16,
98 }, {
99 .fourcc = V4L2_PIX_FMT_RGB565X,
100 .code = MEDIA_BUS_FMT_RGB565_2X8_BE,
101 .bitsperpixel = 16,
102 },
103 };
104
105 static int __subdev_get_format(struct vpfe_device *vpfe,
106 struct v4l2_mbus_framefmt *fmt);
107 static int vpfe_calc_format_size(struct vpfe_device *vpfe,
108 const struct vpfe_fmt *fmt,
109 struct v4l2_format *f);
110
find_format_by_code(struct vpfe_device * vpfe,unsigned int code)111 static struct vpfe_fmt *find_format_by_code(struct vpfe_device *vpfe,
112 unsigned int code)
113 {
114 struct vpfe_fmt *fmt;
115 unsigned int k;
116
117 for (k = 0; k < vpfe->num_active_fmt; k++) {
118 fmt = vpfe->active_fmt[k];
119 if (fmt->code == code)
120 return fmt;
121 }
122
123 return NULL;
124 }
125
find_format_by_pix(struct vpfe_device * vpfe,unsigned int pixelformat)126 static struct vpfe_fmt *find_format_by_pix(struct vpfe_device *vpfe,
127 unsigned int pixelformat)
128 {
129 struct vpfe_fmt *fmt;
130 unsigned int k;
131
132 for (k = 0; k < vpfe->num_active_fmt; k++) {
133 fmt = vpfe->active_fmt[k];
134 if (fmt->fourcc == pixelformat)
135 return fmt;
136 }
137
138 return NULL;
139 }
140
__get_bytesperpixel(struct vpfe_device * vpfe,const struct vpfe_fmt * fmt)141 static unsigned int __get_bytesperpixel(struct vpfe_device *vpfe,
142 const struct vpfe_fmt *fmt)
143 {
144 struct vpfe_subdev_info *sdinfo = vpfe->current_subdev;
145 unsigned int bus_width = sdinfo->vpfe_param.bus_width;
146 u32 bpp, bus_width_bytes, clocksperpixel;
147
148 bus_width_bytes = ALIGN(bus_width, 8) >> 3;
149 clocksperpixel = DIV_ROUND_UP(fmt->bitsperpixel, bus_width);
150 bpp = clocksperpixel * bus_width_bytes;
151
152 return bpp;
153 }
154
155 /* Print Four-character-code (FOURCC) */
print_fourcc(u32 fmt)156 static char *print_fourcc(u32 fmt)
157 {
158 static char code[5];
159
160 code[0] = (unsigned char)(fmt & 0xff);
161 code[1] = (unsigned char)((fmt >> 8) & 0xff);
162 code[2] = (unsigned char)((fmt >> 16) & 0xff);
163 code[3] = (unsigned char)((fmt >> 24) & 0xff);
164 code[4] = '\0';
165
166 return code;
167 }
168
vpfe_reg_read(struct vpfe_ccdc * ccdc,u32 offset)169 static inline u32 vpfe_reg_read(struct vpfe_ccdc *ccdc, u32 offset)
170 {
171 return ioread32(ccdc->ccdc_cfg.base_addr + offset);
172 }
173
vpfe_reg_write(struct vpfe_ccdc * ccdc,u32 val,u32 offset)174 static inline void vpfe_reg_write(struct vpfe_ccdc *ccdc, u32 val, u32 offset)
175 {
176 iowrite32(val, ccdc->ccdc_cfg.base_addr + offset);
177 }
178
to_vpfe(struct vpfe_ccdc * ccdc)179 static inline struct vpfe_device *to_vpfe(struct vpfe_ccdc *ccdc)
180 {
181 return container_of(ccdc, struct vpfe_device, ccdc);
182 }
183
184 static inline
to_vpfe_buffer(struct vb2_v4l2_buffer * vb)185 struct vpfe_cap_buffer *to_vpfe_buffer(struct vb2_v4l2_buffer *vb)
186 {
187 return container_of(vb, struct vpfe_cap_buffer, vb);
188 }
189
vpfe_pcr_enable(struct vpfe_ccdc * ccdc,int flag)190 static inline void vpfe_pcr_enable(struct vpfe_ccdc *ccdc, int flag)
191 {
192 vpfe_reg_write(ccdc, !!flag, VPFE_PCR);
193 }
194
vpfe_config_enable(struct vpfe_ccdc * ccdc,int flag)195 static void vpfe_config_enable(struct vpfe_ccdc *ccdc, int flag)
196 {
197 unsigned int cfg;
198
199 if (!flag) {
200 cfg = vpfe_reg_read(ccdc, VPFE_CONFIG);
201 cfg &= ~(VPFE_CONFIG_EN_ENABLE << VPFE_CONFIG_EN_SHIFT);
202 } else {
203 cfg = VPFE_CONFIG_EN_ENABLE << VPFE_CONFIG_EN_SHIFT;
204 }
205
206 vpfe_reg_write(ccdc, cfg, VPFE_CONFIG);
207 }
208
vpfe_ccdc_setwin(struct vpfe_ccdc * ccdc,struct v4l2_rect * image_win,enum ccdc_frmfmt frm_fmt,int bpp)209 static void vpfe_ccdc_setwin(struct vpfe_ccdc *ccdc,
210 struct v4l2_rect *image_win,
211 enum ccdc_frmfmt frm_fmt,
212 int bpp)
213 {
214 int horz_start, horz_nr_pixels;
215 int vert_start, vert_nr_lines;
216 int val, mid_img;
217
218 /*
219 * ppc - per pixel count. indicates how many pixels per cell
220 * output to SDRAM. example, for ycbcr, it is one y and one c, so 2.
221 * raw capture this is 1
222 */
223 horz_start = image_win->left * bpp;
224 horz_nr_pixels = (image_win->width * bpp) - 1;
225 vpfe_reg_write(ccdc, (horz_start << VPFE_HORZ_INFO_SPH_SHIFT) |
226 horz_nr_pixels, VPFE_HORZ_INFO);
227
228 vert_start = image_win->top;
229
230 if (frm_fmt == CCDC_FRMFMT_INTERLACED) {
231 vert_nr_lines = (image_win->height >> 1) - 1;
232 vert_start >>= 1;
233 /* configure VDINT0 */
234 val = (vert_start << VPFE_VDINT_VDINT0_SHIFT);
235 } else {
236 vert_nr_lines = image_win->height - 1;
237 /*
238 * configure VDINT0 and VDINT1. VDINT1 will be at half
239 * of image height
240 */
241 mid_img = vert_start + (image_win->height / 2);
242 val = (vert_start << VPFE_VDINT_VDINT0_SHIFT) |
243 (mid_img & VPFE_VDINT_VDINT1_MASK);
244 }
245
246 vpfe_reg_write(ccdc, val, VPFE_VDINT);
247
248 vpfe_reg_write(ccdc, (vert_start << VPFE_VERT_START_SLV0_SHIFT) |
249 vert_start, VPFE_VERT_START);
250 vpfe_reg_write(ccdc, vert_nr_lines, VPFE_VERT_LINES);
251 }
252
vpfe_reg_dump(struct vpfe_ccdc * ccdc)253 static void vpfe_reg_dump(struct vpfe_ccdc *ccdc)
254 {
255 struct vpfe_device *vpfe = to_vpfe(ccdc);
256
257 vpfe_dbg(3, vpfe, "ALAW: 0x%x\n", vpfe_reg_read(ccdc, VPFE_ALAW));
258 vpfe_dbg(3, vpfe, "CLAMP: 0x%x\n", vpfe_reg_read(ccdc, VPFE_CLAMP));
259 vpfe_dbg(3, vpfe, "DCSUB: 0x%x\n", vpfe_reg_read(ccdc, VPFE_DCSUB));
260 vpfe_dbg(3, vpfe, "BLKCMP: 0x%x\n", vpfe_reg_read(ccdc, VPFE_BLKCMP));
261 vpfe_dbg(3, vpfe, "COLPTN: 0x%x\n", vpfe_reg_read(ccdc, VPFE_COLPTN));
262 vpfe_dbg(3, vpfe, "SDOFST: 0x%x\n", vpfe_reg_read(ccdc, VPFE_SDOFST));
263 vpfe_dbg(3, vpfe, "SYN_MODE: 0x%x\n",
264 vpfe_reg_read(ccdc, VPFE_SYNMODE));
265 vpfe_dbg(3, vpfe, "HSIZE_OFF: 0x%x\n",
266 vpfe_reg_read(ccdc, VPFE_HSIZE_OFF));
267 vpfe_dbg(3, vpfe, "HORZ_INFO: 0x%x\n",
268 vpfe_reg_read(ccdc, VPFE_HORZ_INFO));
269 vpfe_dbg(3, vpfe, "VERT_START: 0x%x\n",
270 vpfe_reg_read(ccdc, VPFE_VERT_START));
271 vpfe_dbg(3, vpfe, "VERT_LINES: 0x%x\n",
272 vpfe_reg_read(ccdc, VPFE_VERT_LINES));
273 }
274
275 static int
vpfe_ccdc_validate_param(struct vpfe_ccdc * ccdc,struct vpfe_ccdc_config_params_raw * ccdcparam)276 vpfe_ccdc_validate_param(struct vpfe_ccdc *ccdc,
277 struct vpfe_ccdc_config_params_raw *ccdcparam)
278 {
279 struct vpfe_device *vpfe = to_vpfe(ccdc);
280 u8 max_gamma, max_data;
281
282 if (!ccdcparam->alaw.enable)
283 return 0;
284
285 max_gamma = ccdc_gamma_width_max_bit(ccdcparam->alaw.gamma_wd);
286 max_data = ccdc_data_size_max_bit(ccdcparam->data_sz);
287
288 if (ccdcparam->alaw.gamma_wd > VPFE_CCDC_GAMMA_BITS_09_0 ||
289 ccdcparam->data_sz > VPFE_CCDC_DATA_8BITS ||
290 max_gamma > max_data) {
291 vpfe_dbg(1, vpfe, "Invalid data line select\n");
292 return -EINVAL;
293 }
294
295 return 0;
296 }
297
298 static void
vpfe_ccdc_update_raw_params(struct vpfe_ccdc * ccdc,struct vpfe_ccdc_config_params_raw * raw_params)299 vpfe_ccdc_update_raw_params(struct vpfe_ccdc *ccdc,
300 struct vpfe_ccdc_config_params_raw *raw_params)
301 {
302 struct vpfe_ccdc_config_params_raw *config_params =
303 &ccdc->ccdc_cfg.bayer.config_params;
304
305 *config_params = *raw_params;
306 }
307
308 /*
309 * vpfe_ccdc_restore_defaults()
310 * This function will write defaults to all CCDC registers
311 */
vpfe_ccdc_restore_defaults(struct vpfe_ccdc * ccdc)312 static void vpfe_ccdc_restore_defaults(struct vpfe_ccdc *ccdc)
313 {
314 int i;
315
316 /* Disable CCDC */
317 vpfe_pcr_enable(ccdc, 0);
318
319 /* set all registers to default value */
320 for (i = 4; i <= 0x94; i += 4)
321 vpfe_reg_write(ccdc, 0, i);
322
323 vpfe_reg_write(ccdc, VPFE_NO_CULLING, VPFE_CULLING);
324 vpfe_reg_write(ccdc, VPFE_CCDC_GAMMA_BITS_11_2, VPFE_ALAW);
325 }
326
vpfe_ccdc_close(struct vpfe_ccdc * ccdc,struct device * dev)327 static int vpfe_ccdc_close(struct vpfe_ccdc *ccdc, struct device *dev)
328 {
329 struct vpfe_device *vpfe = to_vpfe(ccdc);
330 u32 dma_cntl, pcr;
331
332 pcr = vpfe_reg_read(ccdc, VPFE_PCR);
333 if (pcr)
334 vpfe_dbg(1, vpfe, "VPFE_PCR is still set (%x)", pcr);
335
336 dma_cntl = vpfe_reg_read(ccdc, VPFE_DMA_CNTL);
337 if ((dma_cntl & VPFE_DMA_CNTL_OVERFLOW))
338 vpfe_dbg(1, vpfe, "VPFE_DMA_CNTL_OVERFLOW is still set (%x)",
339 dma_cntl);
340
341 /* Disable CCDC by resetting all register to default POR values */
342 vpfe_ccdc_restore_defaults(ccdc);
343
344 /* Disabled the module at the CONFIG level */
345 vpfe_config_enable(ccdc, 0);
346
347 pm_runtime_put_sync(dev);
348 return 0;
349 }
350
vpfe_ccdc_set_params(struct vpfe_ccdc * ccdc,void __user * params)351 static int vpfe_ccdc_set_params(struct vpfe_ccdc *ccdc, void __user *params)
352 {
353 struct vpfe_device *vpfe = to_vpfe(ccdc);
354 struct vpfe_ccdc_config_params_raw raw_params;
355 int x;
356
357 if (ccdc->ccdc_cfg.if_type != VPFE_RAW_BAYER)
358 return -EINVAL;
359
360 x = copy_from_user(&raw_params, params, sizeof(raw_params));
361 if (x) {
362 vpfe_dbg(1, vpfe,
363 "%s: error in copying ccdc params, %d\n",
364 __func__, x);
365 return -EFAULT;
366 }
367
368 if (!vpfe_ccdc_validate_param(ccdc, &raw_params)) {
369 vpfe_ccdc_update_raw_params(ccdc, &raw_params);
370 return 0;
371 }
372
373 return -EINVAL;
374 }
375
376 /*
377 * vpfe_ccdc_config_ycbcr()
378 * This function will configure CCDC for YCbCr video capture
379 */
vpfe_ccdc_config_ycbcr(struct vpfe_ccdc * ccdc)380 static void vpfe_ccdc_config_ycbcr(struct vpfe_ccdc *ccdc)
381 {
382 struct ccdc_params_ycbcr *params = &ccdc->ccdc_cfg.ycbcr;
383 u32 syn_mode;
384
385 /*
386 * first restore the CCDC registers to default values
387 * This is important since we assume default values to be set in
388 * a lot of registers that we didn't touch
389 */
390 vpfe_ccdc_restore_defaults(ccdc);
391
392 /*
393 * configure pixel format, frame format, configure video frame
394 * format, enable output to SDRAM, enable internal timing generator
395 * and 8bit pack mode
396 */
397 syn_mode = (((params->pix_fmt & VPFE_SYN_MODE_INPMOD_MASK) <<
398 VPFE_SYN_MODE_INPMOD_SHIFT) |
399 ((params->frm_fmt & VPFE_SYN_FLDMODE_MASK) <<
400 VPFE_SYN_FLDMODE_SHIFT) | VPFE_VDHDEN_ENABLE |
401 VPFE_WEN_ENABLE | VPFE_DATA_PACK_ENABLE);
402
403 /* setup BT.656 sync mode */
404 if (params->bt656_enable) {
405 vpfe_reg_write(ccdc, VPFE_REC656IF_BT656_EN, VPFE_REC656IF);
406
407 /*
408 * configure the FID, VD, HD pin polarity,
409 * fld,hd pol positive, vd negative, 8-bit data
410 */
411 syn_mode |= VPFE_SYN_MODE_VD_POL_NEGATIVE;
412 if (ccdc->ccdc_cfg.if_type == VPFE_BT656_10BIT)
413 syn_mode |= VPFE_SYN_MODE_10BITS;
414 else
415 syn_mode |= VPFE_SYN_MODE_8BITS;
416 } else {
417 /* y/c external sync mode */
418 syn_mode |= (((params->fid_pol & VPFE_FID_POL_MASK) <<
419 VPFE_FID_POL_SHIFT) |
420 ((params->hd_pol & VPFE_HD_POL_MASK) <<
421 VPFE_HD_POL_SHIFT) |
422 ((params->vd_pol & VPFE_VD_POL_MASK) <<
423 VPFE_VD_POL_SHIFT));
424 }
425 vpfe_reg_write(ccdc, syn_mode, VPFE_SYNMODE);
426
427 /* configure video window */
428 vpfe_ccdc_setwin(ccdc, ¶ms->win,
429 params->frm_fmt, params->bytesperpixel);
430
431 /*
432 * configure the order of y cb cr in SDRAM, and disable latch
433 * internal register on vsync
434 */
435 if (ccdc->ccdc_cfg.if_type == VPFE_BT656_10BIT)
436 vpfe_reg_write(ccdc,
437 (params->pix_order << VPFE_CCDCFG_Y8POS_SHIFT) |
438 VPFE_LATCH_ON_VSYNC_DISABLE |
439 VPFE_CCDCFG_BW656_10BIT, VPFE_CCDCFG);
440 else
441 vpfe_reg_write(ccdc,
442 (params->pix_order << VPFE_CCDCFG_Y8POS_SHIFT) |
443 VPFE_LATCH_ON_VSYNC_DISABLE, VPFE_CCDCFG);
444
445 /*
446 * configure the horizontal line offset. This should be a
447 * on 32 byte boundary. So clear LSB 5 bits
448 */
449 vpfe_reg_write(ccdc, params->bytesperline, VPFE_HSIZE_OFF);
450
451 /* configure the memory line offset */
452 if (params->buf_type == CCDC_BUFTYPE_FLD_INTERLEAVED)
453 /* two fields are interleaved in memory */
454 vpfe_reg_write(ccdc, VPFE_SDOFST_FIELD_INTERLEAVED,
455 VPFE_SDOFST);
456 }
457
458 static void
vpfe_ccdc_config_black_clamp(struct vpfe_ccdc * ccdc,struct vpfe_ccdc_black_clamp * bclamp)459 vpfe_ccdc_config_black_clamp(struct vpfe_ccdc *ccdc,
460 struct vpfe_ccdc_black_clamp *bclamp)
461 {
462 u32 val;
463
464 if (!bclamp->enable) {
465 /* configure DCSub */
466 val = (bclamp->dc_sub) & VPFE_BLK_DC_SUB_MASK;
467 vpfe_reg_write(ccdc, val, VPFE_DCSUB);
468 vpfe_reg_write(ccdc, VPFE_CLAMP_DEFAULT_VAL, VPFE_CLAMP);
469 return;
470 }
471 /*
472 * Configure gain, Start pixel, No of line to be avg,
473 * No of pixel/line to be avg, & Enable the Black clamping
474 */
475 val = ((bclamp->sgain & VPFE_BLK_SGAIN_MASK) |
476 ((bclamp->start_pixel & VPFE_BLK_ST_PXL_MASK) <<
477 VPFE_BLK_ST_PXL_SHIFT) |
478 ((bclamp->sample_ln & VPFE_BLK_SAMPLE_LINE_MASK) <<
479 VPFE_BLK_SAMPLE_LINE_SHIFT) |
480 ((bclamp->sample_pixel & VPFE_BLK_SAMPLE_LN_MASK) <<
481 VPFE_BLK_SAMPLE_LN_SHIFT) | VPFE_BLK_CLAMP_ENABLE);
482 vpfe_reg_write(ccdc, val, VPFE_CLAMP);
483 /* If Black clamping is enable then make dcsub 0 */
484 vpfe_reg_write(ccdc, VPFE_DCSUB_DEFAULT_VAL, VPFE_DCSUB);
485 }
486
487 static void
vpfe_ccdc_config_black_compense(struct vpfe_ccdc * ccdc,struct vpfe_ccdc_black_compensation * bcomp)488 vpfe_ccdc_config_black_compense(struct vpfe_ccdc *ccdc,
489 struct vpfe_ccdc_black_compensation *bcomp)
490 {
491 u32 val;
492
493 val = ((bcomp->b & VPFE_BLK_COMP_MASK) |
494 ((bcomp->gb & VPFE_BLK_COMP_MASK) <<
495 VPFE_BLK_COMP_GB_COMP_SHIFT) |
496 ((bcomp->gr & VPFE_BLK_COMP_MASK) <<
497 VPFE_BLK_COMP_GR_COMP_SHIFT) |
498 ((bcomp->r & VPFE_BLK_COMP_MASK) <<
499 VPFE_BLK_COMP_R_COMP_SHIFT));
500 vpfe_reg_write(ccdc, val, VPFE_BLKCMP);
501 }
502
503 /*
504 * vpfe_ccdc_config_raw()
505 * This function will configure CCDC for Raw capture mode
506 */
vpfe_ccdc_config_raw(struct vpfe_ccdc * ccdc)507 static void vpfe_ccdc_config_raw(struct vpfe_ccdc *ccdc)
508 {
509 struct vpfe_device *vpfe = to_vpfe(ccdc);
510 struct vpfe_ccdc_config_params_raw *config_params =
511 &ccdc->ccdc_cfg.bayer.config_params;
512 struct ccdc_params_raw *params = &ccdc->ccdc_cfg.bayer;
513 unsigned int syn_mode;
514 unsigned int val;
515
516 /* Reset CCDC */
517 vpfe_ccdc_restore_defaults(ccdc);
518
519 /* Disable latching function registers on VSYNC */
520 vpfe_reg_write(ccdc, VPFE_LATCH_ON_VSYNC_DISABLE, VPFE_CCDCFG);
521
522 /*
523 * Configure the vertical sync polarity(SYN_MODE.VDPOL),
524 * horizontal sync polarity (SYN_MODE.HDPOL), frame id polarity
525 * (SYN_MODE.FLDPOL), frame format(progressive or interlace),
526 * data size(SYNMODE.DATSIZ), &pixel format (Input mode), output
527 * SDRAM, enable internal timing generator
528 */
529 syn_mode = (((params->vd_pol & VPFE_VD_POL_MASK) << VPFE_VD_POL_SHIFT) |
530 ((params->hd_pol & VPFE_HD_POL_MASK) << VPFE_HD_POL_SHIFT) |
531 ((params->fid_pol & VPFE_FID_POL_MASK) <<
532 VPFE_FID_POL_SHIFT) | ((params->frm_fmt &
533 VPFE_FRM_FMT_MASK) << VPFE_FRM_FMT_SHIFT) |
534 ((config_params->data_sz & VPFE_DATA_SZ_MASK) <<
535 VPFE_DATA_SZ_SHIFT) | ((params->pix_fmt &
536 VPFE_PIX_FMT_MASK) << VPFE_PIX_FMT_SHIFT) |
537 VPFE_WEN_ENABLE | VPFE_VDHDEN_ENABLE);
538
539 /* Enable and configure aLaw register if needed */
540 if (config_params->alaw.enable) {
541 val = ((config_params->alaw.gamma_wd &
542 VPFE_ALAW_GAMMA_WD_MASK) | VPFE_ALAW_ENABLE);
543 vpfe_reg_write(ccdc, val, VPFE_ALAW);
544 vpfe_dbg(3, vpfe, "\nWriting 0x%x to ALAW...\n", val);
545 }
546
547 /* Configure video window */
548 vpfe_ccdc_setwin(ccdc, ¶ms->win, params->frm_fmt,
549 params->bytesperpixel);
550
551 /* Configure Black Clamp */
552 vpfe_ccdc_config_black_clamp(ccdc, &config_params->blk_clamp);
553
554 /* Configure Black level compensation */
555 vpfe_ccdc_config_black_compense(ccdc, &config_params->blk_comp);
556
557 /* If data size is 8 bit then pack the data */
558 if ((config_params->data_sz == VPFE_CCDC_DATA_8BITS) ||
559 config_params->alaw.enable)
560 syn_mode |= VPFE_DATA_PACK_ENABLE;
561
562 /*
563 * Configure Horizontal offset register. If pack 8 is enabled then
564 * 1 pixel will take 1 byte
565 */
566 vpfe_reg_write(ccdc, params->bytesperline, VPFE_HSIZE_OFF);
567
568 vpfe_dbg(3, vpfe, "Writing %d (%x) to HSIZE_OFF\n",
569 params->bytesperline, params->bytesperline);
570
571 /* Set value for SDOFST */
572 if (params->frm_fmt == CCDC_FRMFMT_INTERLACED) {
573 if (params->image_invert_enable) {
574 /* For interlace inverse mode */
575 vpfe_reg_write(ccdc, VPFE_INTERLACED_IMAGE_INVERT,
576 VPFE_SDOFST);
577 } else {
578 /* For interlace non inverse mode */
579 vpfe_reg_write(ccdc, VPFE_INTERLACED_NO_IMAGE_INVERT,
580 VPFE_SDOFST);
581 }
582 } else if (params->frm_fmt == CCDC_FRMFMT_PROGRESSIVE) {
583 vpfe_reg_write(ccdc, VPFE_PROGRESSIVE_NO_IMAGE_INVERT,
584 VPFE_SDOFST);
585 }
586
587 vpfe_reg_write(ccdc, syn_mode, VPFE_SYNMODE);
588
589 vpfe_reg_dump(ccdc);
590 }
591
592 static inline int
vpfe_ccdc_set_buftype(struct vpfe_ccdc * ccdc,enum ccdc_buftype buf_type)593 vpfe_ccdc_set_buftype(struct vpfe_ccdc *ccdc,
594 enum ccdc_buftype buf_type)
595 {
596 if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER)
597 ccdc->ccdc_cfg.bayer.buf_type = buf_type;
598 else
599 ccdc->ccdc_cfg.ycbcr.buf_type = buf_type;
600
601 return 0;
602 }
603
vpfe_ccdc_get_buftype(struct vpfe_ccdc * ccdc)604 static inline enum ccdc_buftype vpfe_ccdc_get_buftype(struct vpfe_ccdc *ccdc)
605 {
606 if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER)
607 return ccdc->ccdc_cfg.bayer.buf_type;
608
609 return ccdc->ccdc_cfg.ycbcr.buf_type;
610 }
611
vpfe_ccdc_set_pixel_format(struct vpfe_ccdc * ccdc,u32 pixfmt)612 static int vpfe_ccdc_set_pixel_format(struct vpfe_ccdc *ccdc, u32 pixfmt)
613 {
614 struct vpfe_device *vpfe = to_vpfe(ccdc);
615
616 vpfe_dbg(1, vpfe, "%s: if_type: %d, pixfmt:%s\n",
617 __func__, ccdc->ccdc_cfg.if_type, print_fourcc(pixfmt));
618
619 if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER) {
620 ccdc->ccdc_cfg.bayer.pix_fmt = CCDC_PIXFMT_RAW;
621 /*
622 * Need to clear it in case it was left on
623 * after the last capture.
624 */
625 ccdc->ccdc_cfg.bayer.config_params.alaw.enable = 0;
626
627 switch (pixfmt) {
628 case V4L2_PIX_FMT_SBGGR8:
629 ccdc->ccdc_cfg.bayer.config_params.alaw.enable = 1;
630 break;
631
632 case V4L2_PIX_FMT_YUYV:
633 case V4L2_PIX_FMT_UYVY:
634 case V4L2_PIX_FMT_YUV420:
635 case V4L2_PIX_FMT_NV12:
636 case V4L2_PIX_FMT_RGB565X:
637 break;
638
639 case V4L2_PIX_FMT_SBGGR16:
640 default:
641 return -EINVAL;
642 }
643 } else {
644 switch (pixfmt) {
645 case V4L2_PIX_FMT_YUYV:
646 ccdc->ccdc_cfg.ycbcr.pix_order = CCDC_PIXORDER_YCBYCR;
647 break;
648
649 case V4L2_PIX_FMT_UYVY:
650 ccdc->ccdc_cfg.ycbcr.pix_order = CCDC_PIXORDER_CBYCRY;
651 break;
652
653 default:
654 return -EINVAL;
655 }
656 }
657
658 return 0;
659 }
660
vpfe_ccdc_get_pixel_format(struct vpfe_ccdc * ccdc)661 static u32 vpfe_ccdc_get_pixel_format(struct vpfe_ccdc *ccdc)
662 {
663 u32 pixfmt;
664
665 if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER) {
666 pixfmt = V4L2_PIX_FMT_YUYV;
667 } else {
668 if (ccdc->ccdc_cfg.ycbcr.pix_order == CCDC_PIXORDER_YCBYCR)
669 pixfmt = V4L2_PIX_FMT_YUYV;
670 else
671 pixfmt = V4L2_PIX_FMT_UYVY;
672 }
673
674 return pixfmt;
675 }
676
677 static int
vpfe_ccdc_set_image_window(struct vpfe_ccdc * ccdc,struct v4l2_rect * win,unsigned int bpp)678 vpfe_ccdc_set_image_window(struct vpfe_ccdc *ccdc,
679 struct v4l2_rect *win, unsigned int bpp)
680 {
681 if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER) {
682 ccdc->ccdc_cfg.bayer.win = *win;
683 ccdc->ccdc_cfg.bayer.bytesperpixel = bpp;
684 ccdc->ccdc_cfg.bayer.bytesperline = ALIGN(win->width * bpp, 32);
685 } else {
686 ccdc->ccdc_cfg.ycbcr.win = *win;
687 ccdc->ccdc_cfg.ycbcr.bytesperpixel = bpp;
688 ccdc->ccdc_cfg.ycbcr.bytesperline = ALIGN(win->width * bpp, 32);
689 }
690
691 return 0;
692 }
693
694 static inline void
vpfe_ccdc_get_image_window(struct vpfe_ccdc * ccdc,struct v4l2_rect * win)695 vpfe_ccdc_get_image_window(struct vpfe_ccdc *ccdc,
696 struct v4l2_rect *win)
697 {
698 if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER)
699 *win = ccdc->ccdc_cfg.bayer.win;
700 else
701 *win = ccdc->ccdc_cfg.ycbcr.win;
702 }
703
vpfe_ccdc_get_line_length(struct vpfe_ccdc * ccdc)704 static inline unsigned int vpfe_ccdc_get_line_length(struct vpfe_ccdc *ccdc)
705 {
706 if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER)
707 return ccdc->ccdc_cfg.bayer.bytesperline;
708
709 return ccdc->ccdc_cfg.ycbcr.bytesperline;
710 }
711
712 static inline int
vpfe_ccdc_set_frame_format(struct vpfe_ccdc * ccdc,enum ccdc_frmfmt frm_fmt)713 vpfe_ccdc_set_frame_format(struct vpfe_ccdc *ccdc,
714 enum ccdc_frmfmt frm_fmt)
715 {
716 if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER)
717 ccdc->ccdc_cfg.bayer.frm_fmt = frm_fmt;
718 else
719 ccdc->ccdc_cfg.ycbcr.frm_fmt = frm_fmt;
720
721 return 0;
722 }
723
724 static inline enum ccdc_frmfmt
vpfe_ccdc_get_frame_format(struct vpfe_ccdc * ccdc)725 vpfe_ccdc_get_frame_format(struct vpfe_ccdc *ccdc)
726 {
727 if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER)
728 return ccdc->ccdc_cfg.bayer.frm_fmt;
729
730 return ccdc->ccdc_cfg.ycbcr.frm_fmt;
731 }
732
vpfe_ccdc_getfid(struct vpfe_ccdc * ccdc)733 static inline int vpfe_ccdc_getfid(struct vpfe_ccdc *ccdc)
734 {
735 return (vpfe_reg_read(ccdc, VPFE_SYNMODE) >> 15) & 1;
736 }
737
vpfe_set_sdr_addr(struct vpfe_ccdc * ccdc,unsigned long addr)738 static inline void vpfe_set_sdr_addr(struct vpfe_ccdc *ccdc, unsigned long addr)
739 {
740 vpfe_reg_write(ccdc, addr & 0xffffffe0, VPFE_SDR_ADDR);
741 }
742
vpfe_ccdc_set_hw_if_params(struct vpfe_ccdc * ccdc,struct vpfe_hw_if_param * params)743 static int vpfe_ccdc_set_hw_if_params(struct vpfe_ccdc *ccdc,
744 struct vpfe_hw_if_param *params)
745 {
746 struct vpfe_device *vpfe = to_vpfe(ccdc);
747
748 ccdc->ccdc_cfg.if_type = params->if_type;
749
750 switch (params->if_type) {
751 case VPFE_BT656:
752 case VPFE_YCBCR_SYNC_16:
753 case VPFE_YCBCR_SYNC_8:
754 case VPFE_BT656_10BIT:
755 ccdc->ccdc_cfg.ycbcr.vd_pol = params->vdpol;
756 ccdc->ccdc_cfg.ycbcr.hd_pol = params->hdpol;
757 break;
758
759 case VPFE_RAW_BAYER:
760 ccdc->ccdc_cfg.bayer.vd_pol = params->vdpol;
761 ccdc->ccdc_cfg.bayer.hd_pol = params->hdpol;
762 if (params->bus_width == 10)
763 ccdc->ccdc_cfg.bayer.config_params.data_sz =
764 VPFE_CCDC_DATA_10BITS;
765 else
766 ccdc->ccdc_cfg.bayer.config_params.data_sz =
767 VPFE_CCDC_DATA_8BITS;
768 vpfe_dbg(1, vpfe, "params.bus_width: %d\n",
769 params->bus_width);
770 vpfe_dbg(1, vpfe, "config_params.data_sz: %d\n",
771 ccdc->ccdc_cfg.bayer.config_params.data_sz);
772 break;
773
774 default:
775 return -EINVAL;
776 }
777
778 return 0;
779 }
780
vpfe_clear_intr(struct vpfe_ccdc * ccdc,int vdint)781 static void vpfe_clear_intr(struct vpfe_ccdc *ccdc, int vdint)
782 {
783 unsigned int vpfe_int_status;
784
785 vpfe_int_status = vpfe_reg_read(ccdc, VPFE_IRQ_STS);
786
787 switch (vdint) {
788 /* VD0 interrupt */
789 case VPFE_VDINT0:
790 vpfe_int_status &= ~VPFE_VDINT0;
791 vpfe_int_status |= VPFE_VDINT0;
792 break;
793
794 /* VD1 interrupt */
795 case VPFE_VDINT1:
796 vpfe_int_status &= ~VPFE_VDINT1;
797 vpfe_int_status |= VPFE_VDINT1;
798 break;
799
800 /* VD2 interrupt */
801 case VPFE_VDINT2:
802 vpfe_int_status &= ~VPFE_VDINT2;
803 vpfe_int_status |= VPFE_VDINT2;
804 break;
805
806 /* Clear all interrupts */
807 default:
808 vpfe_int_status &= ~(VPFE_VDINT0 |
809 VPFE_VDINT1 |
810 VPFE_VDINT2);
811 vpfe_int_status |= (VPFE_VDINT0 |
812 VPFE_VDINT1 |
813 VPFE_VDINT2);
814 break;
815 }
816 /* Clear specific VDINT from the status register */
817 vpfe_reg_write(ccdc, vpfe_int_status, VPFE_IRQ_STS);
818
819 vpfe_int_status = vpfe_reg_read(ccdc, VPFE_IRQ_STS);
820
821 /* Acknowledge that we are done with all interrupts */
822 vpfe_reg_write(ccdc, 1, VPFE_IRQ_EOI);
823 }
824
vpfe_ccdc_config_defaults(struct vpfe_ccdc * ccdc)825 static void vpfe_ccdc_config_defaults(struct vpfe_ccdc *ccdc)
826 {
827 ccdc->ccdc_cfg.if_type = VPFE_RAW_BAYER;
828
829 ccdc->ccdc_cfg.ycbcr.pix_fmt = CCDC_PIXFMT_YCBCR_8BIT;
830 ccdc->ccdc_cfg.ycbcr.frm_fmt = CCDC_FRMFMT_INTERLACED;
831 ccdc->ccdc_cfg.ycbcr.fid_pol = VPFE_PINPOL_POSITIVE;
832 ccdc->ccdc_cfg.ycbcr.vd_pol = VPFE_PINPOL_POSITIVE;
833 ccdc->ccdc_cfg.ycbcr.hd_pol = VPFE_PINPOL_POSITIVE;
834 ccdc->ccdc_cfg.ycbcr.pix_order = CCDC_PIXORDER_CBYCRY;
835 ccdc->ccdc_cfg.ycbcr.buf_type = CCDC_BUFTYPE_FLD_INTERLEAVED;
836
837 ccdc->ccdc_cfg.ycbcr.win.left = 0;
838 ccdc->ccdc_cfg.ycbcr.win.top = 0;
839 ccdc->ccdc_cfg.ycbcr.win.width = 720;
840 ccdc->ccdc_cfg.ycbcr.win.height = 576;
841 ccdc->ccdc_cfg.ycbcr.bt656_enable = 1;
842
843 ccdc->ccdc_cfg.bayer.pix_fmt = CCDC_PIXFMT_RAW;
844 ccdc->ccdc_cfg.bayer.frm_fmt = CCDC_FRMFMT_PROGRESSIVE;
845 ccdc->ccdc_cfg.bayer.fid_pol = VPFE_PINPOL_POSITIVE;
846 ccdc->ccdc_cfg.bayer.vd_pol = VPFE_PINPOL_POSITIVE;
847 ccdc->ccdc_cfg.bayer.hd_pol = VPFE_PINPOL_POSITIVE;
848
849 ccdc->ccdc_cfg.bayer.win.left = 0;
850 ccdc->ccdc_cfg.bayer.win.top = 0;
851 ccdc->ccdc_cfg.bayer.win.width = 800;
852 ccdc->ccdc_cfg.bayer.win.height = 600;
853 ccdc->ccdc_cfg.bayer.config_params.data_sz = VPFE_CCDC_DATA_8BITS;
854 ccdc->ccdc_cfg.bayer.config_params.alaw.gamma_wd =
855 VPFE_CCDC_GAMMA_BITS_09_0;
856 }
857
858 /*
859 * vpfe_get_ccdc_image_format - Get image parameters based on CCDC settings
860 */
vpfe_get_ccdc_image_format(struct vpfe_device * vpfe,struct v4l2_format * f)861 static int vpfe_get_ccdc_image_format(struct vpfe_device *vpfe,
862 struct v4l2_format *f)
863 {
864 struct v4l2_rect image_win;
865 enum ccdc_buftype buf_type;
866 enum ccdc_frmfmt frm_fmt;
867
868 memset(f, 0, sizeof(*f));
869 f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
870 vpfe_ccdc_get_image_window(&vpfe->ccdc, &image_win);
871 f->fmt.pix.width = image_win.width;
872 f->fmt.pix.height = image_win.height;
873 f->fmt.pix.bytesperline = vpfe_ccdc_get_line_length(&vpfe->ccdc);
874 f->fmt.pix.sizeimage = f->fmt.pix.bytesperline *
875 f->fmt.pix.height;
876 buf_type = vpfe_ccdc_get_buftype(&vpfe->ccdc);
877 f->fmt.pix.pixelformat = vpfe_ccdc_get_pixel_format(&vpfe->ccdc);
878 frm_fmt = vpfe_ccdc_get_frame_format(&vpfe->ccdc);
879
880 if (frm_fmt == CCDC_FRMFMT_PROGRESSIVE) {
881 f->fmt.pix.field = V4L2_FIELD_NONE;
882 } else if (frm_fmt == CCDC_FRMFMT_INTERLACED) {
883 if (buf_type == CCDC_BUFTYPE_FLD_INTERLEAVED) {
884 f->fmt.pix.field = V4L2_FIELD_INTERLACED;
885 } else if (buf_type == CCDC_BUFTYPE_FLD_SEPARATED) {
886 f->fmt.pix.field = V4L2_FIELD_SEQ_TB;
887 } else {
888 vpfe_err(vpfe, "Invalid buf_type\n");
889 return -EINVAL;
890 }
891 } else {
892 vpfe_err(vpfe, "Invalid frm_fmt\n");
893 return -EINVAL;
894 }
895 return 0;
896 }
897
vpfe_config_ccdc_image_format(struct vpfe_device * vpfe)898 static int vpfe_config_ccdc_image_format(struct vpfe_device *vpfe)
899 {
900 enum ccdc_frmfmt frm_fmt = CCDC_FRMFMT_INTERLACED;
901 u32 bpp;
902 int ret = 0;
903
904 vpfe_dbg(1, vpfe, "pixelformat: %s\n",
905 print_fourcc(vpfe->fmt.fmt.pix.pixelformat));
906
907 if (vpfe_ccdc_set_pixel_format(&vpfe->ccdc,
908 vpfe->fmt.fmt.pix.pixelformat) < 0) {
909 vpfe_err(vpfe, "couldn't set pix format in ccdc\n");
910 return -EINVAL;
911 }
912
913 /* configure the image window */
914 bpp = __get_bytesperpixel(vpfe, vpfe->current_vpfe_fmt);
915 vpfe_ccdc_set_image_window(&vpfe->ccdc, &vpfe->crop, bpp);
916
917 switch (vpfe->fmt.fmt.pix.field) {
918 case V4L2_FIELD_INTERLACED:
919 /* do nothing, since it is default */
920 ret = vpfe_ccdc_set_buftype(
921 &vpfe->ccdc,
922 CCDC_BUFTYPE_FLD_INTERLEAVED);
923 break;
924
925 case V4L2_FIELD_NONE:
926 frm_fmt = CCDC_FRMFMT_PROGRESSIVE;
927 /* buffer type only applicable for interlaced scan */
928 break;
929
930 case V4L2_FIELD_SEQ_TB:
931 ret = vpfe_ccdc_set_buftype(
932 &vpfe->ccdc,
933 CCDC_BUFTYPE_FLD_SEPARATED);
934 break;
935
936 default:
937 return -EINVAL;
938 }
939
940 if (ret)
941 return ret;
942
943 return vpfe_ccdc_set_frame_format(&vpfe->ccdc, frm_fmt);
944 }
945
946 /*
947 * vpfe_config_image_format()
948 * For a given standard, this functions sets up the default
949 * pix format & crop values in the vpfe device and ccdc. It first
950 * starts with defaults based values from the standard table.
951 * It then checks if sub device supports get_fmt and then override the
952 * values based on that.Sets crop values to match with scan resolution
953 * starting at 0,0. It calls vpfe_config_ccdc_image_format() set the
954 * values in ccdc
955 */
vpfe_config_image_format(struct vpfe_device * vpfe,v4l2_std_id std_id)956 static int vpfe_config_image_format(struct vpfe_device *vpfe,
957 v4l2_std_id std_id)
958 {
959 struct vpfe_fmt *fmt;
960 struct v4l2_mbus_framefmt mbus_fmt;
961 int i, ret;
962
963 for (i = 0; i < ARRAY_SIZE(vpfe_standards); i++) {
964 if (vpfe_standards[i].std_id & std_id) {
965 vpfe->std_info.active_pixels =
966 vpfe_standards[i].width;
967 vpfe->std_info.active_lines =
968 vpfe_standards[i].height;
969 vpfe->std_info.frame_format =
970 vpfe_standards[i].frame_format;
971 vpfe->std_index = i;
972
973 break;
974 }
975 }
976
977 if (i == ARRAY_SIZE(vpfe_standards)) {
978 vpfe_err(vpfe, "standard not supported\n");
979 return -EINVAL;
980 }
981
982 ret = __subdev_get_format(vpfe, &mbus_fmt);
983 if (ret)
984 return ret;
985
986 fmt = find_format_by_code(vpfe, mbus_fmt.code);
987 if (!fmt) {
988 vpfe_dbg(3, vpfe, "mbus code format (0x%08x) not found.\n",
989 mbus_fmt.code);
990 return -EINVAL;
991 }
992
993 /* Save current subdev format */
994 v4l2_fill_pix_format(&vpfe->fmt.fmt.pix, &mbus_fmt);
995 vpfe->fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
996 vpfe->fmt.fmt.pix.pixelformat = fmt->fourcc;
997 vpfe_calc_format_size(vpfe, fmt, &vpfe->fmt);
998 vpfe->current_vpfe_fmt = fmt;
999
1000 /* Update the crop window based on found values */
1001 vpfe->crop.top = 0;
1002 vpfe->crop.left = 0;
1003 vpfe->crop.width = mbus_fmt.width;
1004 vpfe->crop.height = mbus_fmt.height;
1005
1006 return vpfe_config_ccdc_image_format(vpfe);
1007 }
1008
vpfe_initialize_device(struct vpfe_device * vpfe)1009 static int vpfe_initialize_device(struct vpfe_device *vpfe)
1010 {
1011 struct vpfe_subdev_info *sdinfo;
1012 int ret;
1013
1014 sdinfo = &vpfe->cfg->sub_devs[0];
1015 sdinfo->sd = vpfe->sd[0];
1016 vpfe->current_input = 0;
1017 vpfe->std_index = 0;
1018 /* Configure the default format information */
1019 ret = vpfe_config_image_format(vpfe,
1020 vpfe_standards[vpfe->std_index].std_id);
1021 if (ret)
1022 return ret;
1023
1024 ret = pm_runtime_resume_and_get(vpfe->pdev);
1025 if (ret < 0)
1026 return ret;
1027
1028 vpfe_config_enable(&vpfe->ccdc, 1);
1029
1030 vpfe_ccdc_restore_defaults(&vpfe->ccdc);
1031
1032 /* Clear all VPFE interrupts */
1033 vpfe_clear_intr(&vpfe->ccdc, -1);
1034
1035 return ret;
1036 }
1037
1038 /*
1039 * vpfe_release : This function is based on the vb2_fop_release
1040 * helper function.
1041 * It has been augmented to handle module power management,
1042 * by disabling/enabling h/w module fcntl clock when necessary.
1043 */
vpfe_release(struct file * file)1044 static int vpfe_release(struct file *file)
1045 {
1046 struct vpfe_device *vpfe = video_drvdata(file);
1047 bool fh_singular;
1048 int ret;
1049
1050 mutex_lock(&vpfe->lock);
1051
1052 /* Save the singular status before we call the clean-up helper */
1053 fh_singular = v4l2_fh_is_singular_file(file);
1054
1055 /* the release helper will cleanup any on-going streaming */
1056 ret = _vb2_fop_release(file, NULL);
1057
1058 /*
1059 * If this was the last open file.
1060 * Then de-initialize hw module.
1061 */
1062 if (fh_singular)
1063 vpfe_ccdc_close(&vpfe->ccdc, vpfe->pdev);
1064
1065 mutex_unlock(&vpfe->lock);
1066
1067 return ret;
1068 }
1069
1070 /*
1071 * vpfe_open : This function is based on the v4l2_fh_open helper function.
1072 * It has been augmented to handle module power management,
1073 * by disabling/enabling h/w module fcntl clock when necessary.
1074 */
vpfe_open(struct file * file)1075 static int vpfe_open(struct file *file)
1076 {
1077 struct vpfe_device *vpfe = video_drvdata(file);
1078 int ret;
1079
1080 mutex_lock(&vpfe->lock);
1081
1082 ret = v4l2_fh_open(file);
1083 if (ret) {
1084 vpfe_err(vpfe, "v4l2_fh_open failed\n");
1085 goto unlock;
1086 }
1087
1088 if (!v4l2_fh_is_singular_file(file))
1089 goto unlock;
1090
1091 if (vpfe_initialize_device(vpfe)) {
1092 v4l2_fh_release(file);
1093 ret = -ENODEV;
1094 }
1095
1096 unlock:
1097 mutex_unlock(&vpfe->lock);
1098 return ret;
1099 }
1100
1101 /**
1102 * vpfe_schedule_next_buffer: set next buffer address for capture
1103 * @vpfe : ptr to vpfe device
1104 *
1105 * This function will get next buffer from the dma queue and
1106 * set the buffer address in the vpfe register for capture.
1107 * the buffer is marked active
1108 */
vpfe_schedule_next_buffer(struct vpfe_device * vpfe)1109 static void vpfe_schedule_next_buffer(struct vpfe_device *vpfe)
1110 {
1111 dma_addr_t addr;
1112
1113 spin_lock(&vpfe->dma_queue_lock);
1114 if (list_empty(&vpfe->dma_queue)) {
1115 spin_unlock(&vpfe->dma_queue_lock);
1116 return;
1117 }
1118
1119 vpfe->next_frm = list_entry(vpfe->dma_queue.next,
1120 struct vpfe_cap_buffer, list);
1121 list_del(&vpfe->next_frm->list);
1122 spin_unlock(&vpfe->dma_queue_lock);
1123
1124 addr = vb2_dma_contig_plane_dma_addr(&vpfe->next_frm->vb.vb2_buf, 0);
1125 vpfe_set_sdr_addr(&vpfe->ccdc, addr);
1126 }
1127
vpfe_schedule_bottom_field(struct vpfe_device * vpfe)1128 static inline void vpfe_schedule_bottom_field(struct vpfe_device *vpfe)
1129 {
1130 dma_addr_t addr;
1131
1132 addr = vb2_dma_contig_plane_dma_addr(&vpfe->next_frm->vb.vb2_buf, 0) +
1133 vpfe->field_off;
1134
1135 vpfe_set_sdr_addr(&vpfe->ccdc, addr);
1136 }
1137
1138 /*
1139 * vpfe_process_buffer_complete: process a completed buffer
1140 * @vpfe : ptr to vpfe device
1141 *
1142 * This function time stamp the buffer and mark it as DONE. It also
1143 * wake up any process waiting on the QUEUE and set the next buffer
1144 * as current
1145 */
vpfe_process_buffer_complete(struct vpfe_device * vpfe)1146 static inline void vpfe_process_buffer_complete(struct vpfe_device *vpfe)
1147 {
1148 vpfe->cur_frm->vb.vb2_buf.timestamp = ktime_get_ns();
1149 vpfe->cur_frm->vb.field = vpfe->fmt.fmt.pix.field;
1150 vpfe->cur_frm->vb.sequence = vpfe->sequence++;
1151 vb2_buffer_done(&vpfe->cur_frm->vb.vb2_buf, VB2_BUF_STATE_DONE);
1152 vpfe->cur_frm = vpfe->next_frm;
1153 }
1154
vpfe_handle_interlaced_irq(struct vpfe_device * vpfe,enum v4l2_field field)1155 static void vpfe_handle_interlaced_irq(struct vpfe_device *vpfe,
1156 enum v4l2_field field)
1157 {
1158 int fid;
1159
1160 /* interlaced or TB capture check which field
1161 * we are in hardware
1162 */
1163 fid = vpfe_ccdc_getfid(&vpfe->ccdc);
1164
1165 /* switch the software maintained field id */
1166 vpfe->field ^= 1;
1167 if (fid == vpfe->field) {
1168 /* we are in-sync here,continue */
1169 if (fid == 0) {
1170 /*
1171 * One frame is just being captured. If the
1172 * next frame is available, release the
1173 * current frame and move on
1174 */
1175 if (vpfe->cur_frm != vpfe->next_frm)
1176 vpfe_process_buffer_complete(vpfe);
1177
1178 if (vpfe->stopping)
1179 return;
1180
1181 /*
1182 * based on whether the two fields are stored
1183 * interleave or separately in memory,
1184 * reconfigure the CCDC memory address
1185 */
1186 if (field == V4L2_FIELD_SEQ_TB)
1187 vpfe_schedule_bottom_field(vpfe);
1188 } else {
1189 /*
1190 * if one field is just being captured configure
1191 * the next frame get the next frame from the empty
1192 * queue if no frame is available hold on to the
1193 * current buffer
1194 */
1195 if (vpfe->cur_frm == vpfe->next_frm)
1196 vpfe_schedule_next_buffer(vpfe);
1197 }
1198 } else if (fid == 0) {
1199 /*
1200 * out of sync. Recover from any hardware out-of-sync.
1201 * May loose one frame
1202 */
1203 vpfe->field = fid;
1204 }
1205 }
1206
1207 /*
1208 * vpfe_isr : ISR handler for vpfe capture (VINT0)
1209 * @irq: irq number
1210 * @dev_id: dev_id ptr
1211 *
1212 * It changes status of the captured buffer, takes next buffer from the queue
1213 * and sets its address in VPFE registers
1214 */
vpfe_isr(int irq,void * dev)1215 static irqreturn_t vpfe_isr(int irq, void *dev)
1216 {
1217 struct vpfe_device *vpfe = (struct vpfe_device *)dev;
1218 enum v4l2_field field = vpfe->fmt.fmt.pix.field;
1219 int intr_status, stopping = vpfe->stopping;
1220
1221 intr_status = vpfe_reg_read(&vpfe->ccdc, VPFE_IRQ_STS);
1222
1223 if (intr_status & VPFE_VDINT0) {
1224 if (field == V4L2_FIELD_NONE) {
1225 if (vpfe->cur_frm != vpfe->next_frm)
1226 vpfe_process_buffer_complete(vpfe);
1227 } else {
1228 vpfe_handle_interlaced_irq(vpfe, field);
1229 }
1230 if (stopping) {
1231 vpfe->stopping = false;
1232 complete(&vpfe->capture_stop);
1233 }
1234 }
1235
1236 if (intr_status & VPFE_VDINT1 && !stopping) {
1237 if (field == V4L2_FIELD_NONE &&
1238 vpfe->cur_frm == vpfe->next_frm)
1239 vpfe_schedule_next_buffer(vpfe);
1240 }
1241
1242 vpfe_clear_intr(&vpfe->ccdc, intr_status);
1243
1244 return IRQ_HANDLED;
1245 }
1246
vpfe_detach_irq(struct vpfe_device * vpfe)1247 static inline void vpfe_detach_irq(struct vpfe_device *vpfe)
1248 {
1249 unsigned int intr = VPFE_VDINT0;
1250 enum ccdc_frmfmt frame_format;
1251
1252 frame_format = vpfe_ccdc_get_frame_format(&vpfe->ccdc);
1253 if (frame_format == CCDC_FRMFMT_PROGRESSIVE)
1254 intr |= VPFE_VDINT1;
1255
1256 vpfe_reg_write(&vpfe->ccdc, intr, VPFE_IRQ_EN_CLR);
1257 }
1258
vpfe_attach_irq(struct vpfe_device * vpfe)1259 static inline void vpfe_attach_irq(struct vpfe_device *vpfe)
1260 {
1261 unsigned int intr = VPFE_VDINT0;
1262 enum ccdc_frmfmt frame_format;
1263
1264 frame_format = vpfe_ccdc_get_frame_format(&vpfe->ccdc);
1265 if (frame_format == CCDC_FRMFMT_PROGRESSIVE)
1266 intr |= VPFE_VDINT1;
1267
1268 vpfe_reg_write(&vpfe->ccdc, intr, VPFE_IRQ_EN_SET);
1269 }
1270
vpfe_querycap(struct file * file,void * priv,struct v4l2_capability * cap)1271 static int vpfe_querycap(struct file *file, void *priv,
1272 struct v4l2_capability *cap)
1273 {
1274 strscpy(cap->driver, VPFE_MODULE_NAME, sizeof(cap->driver));
1275 strscpy(cap->card, "TI AM437x VPFE", sizeof(cap->card));
1276 return 0;
1277 }
1278
1279 /* get the format set at output pad of the adjacent subdev */
__subdev_get_format(struct vpfe_device * vpfe,struct v4l2_mbus_framefmt * fmt)1280 static int __subdev_get_format(struct vpfe_device *vpfe,
1281 struct v4l2_mbus_framefmt *fmt)
1282 {
1283 struct v4l2_subdev *sd = vpfe->current_subdev->sd;
1284 struct v4l2_subdev_format sd_fmt = {
1285 .which = V4L2_SUBDEV_FORMAT_ACTIVE,
1286 .pad = 0,
1287 };
1288 struct v4l2_mbus_framefmt *mbus_fmt = &sd_fmt.format;
1289 int ret;
1290
1291 ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &sd_fmt);
1292 if (ret)
1293 return ret;
1294
1295 *fmt = *mbus_fmt;
1296
1297 vpfe_dbg(1, vpfe, "%s: %dx%d code:%04X\n", __func__,
1298 fmt->width, fmt->height, fmt->code);
1299
1300 return 0;
1301 }
1302
1303 /* set the format at output pad of the adjacent subdev */
__subdev_set_format(struct vpfe_device * vpfe,struct v4l2_mbus_framefmt * fmt)1304 static int __subdev_set_format(struct vpfe_device *vpfe,
1305 struct v4l2_mbus_framefmt *fmt)
1306 {
1307 struct v4l2_subdev *sd = vpfe->current_subdev->sd;
1308 struct v4l2_subdev_format sd_fmt = {
1309 .which = V4L2_SUBDEV_FORMAT_ACTIVE,
1310 .pad = 0,
1311 };
1312 struct v4l2_mbus_framefmt *mbus_fmt = &sd_fmt.format;
1313 int ret;
1314
1315 *mbus_fmt = *fmt;
1316
1317 ret = v4l2_subdev_call(sd, pad, set_fmt, NULL, &sd_fmt);
1318 if (ret)
1319 return ret;
1320
1321 vpfe_dbg(1, vpfe, "%s %dx%d code:%04X\n", __func__,
1322 fmt->width, fmt->height, fmt->code);
1323
1324 return 0;
1325 }
1326
vpfe_calc_format_size(struct vpfe_device * vpfe,const struct vpfe_fmt * fmt,struct v4l2_format * f)1327 static int vpfe_calc_format_size(struct vpfe_device *vpfe,
1328 const struct vpfe_fmt *fmt,
1329 struct v4l2_format *f)
1330 {
1331 u32 bpp;
1332
1333 if (!fmt) {
1334 vpfe_dbg(3, vpfe, "No vpfe_fmt provided!\n");
1335 return -EINVAL;
1336 }
1337
1338 bpp = __get_bytesperpixel(vpfe, fmt);
1339
1340 /* pitch should be 32 bytes aligned */
1341 f->fmt.pix.bytesperline = ALIGN(f->fmt.pix.width * bpp, 32);
1342 f->fmt.pix.sizeimage = f->fmt.pix.bytesperline *
1343 f->fmt.pix.height;
1344
1345 vpfe_dbg(3, vpfe, "%s: fourcc: %s size: %dx%d bpl:%d img_size:%d\n",
1346 __func__, print_fourcc(f->fmt.pix.pixelformat),
1347 f->fmt.pix.width, f->fmt.pix.height,
1348 f->fmt.pix.bytesperline, f->fmt.pix.sizeimage);
1349
1350 return 0;
1351 }
1352
vpfe_g_fmt(struct file * file,void * priv,struct v4l2_format * fmt)1353 static int vpfe_g_fmt(struct file *file, void *priv,
1354 struct v4l2_format *fmt)
1355 {
1356 struct vpfe_device *vpfe = video_drvdata(file);
1357
1358 *fmt = vpfe->fmt;
1359
1360 return 0;
1361 }
1362
vpfe_enum_fmt(struct file * file,void * priv,struct v4l2_fmtdesc * f)1363 static int vpfe_enum_fmt(struct file *file, void *priv,
1364 struct v4l2_fmtdesc *f)
1365 {
1366 struct vpfe_device *vpfe = video_drvdata(file);
1367 struct vpfe_subdev_info *sdinfo;
1368 struct vpfe_fmt *fmt;
1369
1370 sdinfo = vpfe->current_subdev;
1371 if (!sdinfo->sd)
1372 return -EINVAL;
1373
1374 if (f->index >= vpfe->num_active_fmt)
1375 return -EINVAL;
1376
1377 fmt = vpfe->active_fmt[f->index];
1378
1379 f->pixelformat = fmt->fourcc;
1380
1381 vpfe_dbg(1, vpfe, "%s: mbus index: %d code: %x pixelformat: %s\n",
1382 __func__, f->index, fmt->code, print_fourcc(fmt->fourcc));
1383
1384 return 0;
1385 }
1386
vpfe_try_fmt(struct file * file,void * priv,struct v4l2_format * f)1387 static int vpfe_try_fmt(struct file *file, void *priv,
1388 struct v4l2_format *f)
1389 {
1390 struct vpfe_device *vpfe = video_drvdata(file);
1391 struct v4l2_subdev *sd = vpfe->current_subdev->sd;
1392 const struct vpfe_fmt *fmt;
1393 struct v4l2_subdev_frame_size_enum fse = {
1394 .which = V4L2_SUBDEV_FORMAT_ACTIVE,
1395 };
1396 int ret, found;
1397
1398 fmt = find_format_by_pix(vpfe, f->fmt.pix.pixelformat);
1399 if (!fmt) {
1400 /* default to first entry */
1401 vpfe_dbg(3, vpfe, "Invalid pixel code: %x, default used instead\n",
1402 f->fmt.pix.pixelformat);
1403 fmt = vpfe->active_fmt[0];
1404 f->fmt.pix.pixelformat = fmt->fourcc;
1405 }
1406
1407 f->fmt.pix.field = vpfe->fmt.fmt.pix.field;
1408
1409 /* check for/find a valid width/height */
1410 ret = 0;
1411 found = false;
1412 fse.pad = 0;
1413 fse.code = fmt->code;
1414 for (fse.index = 0; ; fse.index++) {
1415 ret = v4l2_subdev_call(sd, pad, enum_frame_size,
1416 NULL, &fse);
1417 if (ret)
1418 break;
1419
1420 if (f->fmt.pix.width == fse.max_width &&
1421 f->fmt.pix.height == fse.max_height) {
1422 found = true;
1423 break;
1424 } else if (f->fmt.pix.width >= fse.min_width &&
1425 f->fmt.pix.width <= fse.max_width &&
1426 f->fmt.pix.height >= fse.min_height &&
1427 f->fmt.pix.height <= fse.max_height) {
1428 found = true;
1429 break;
1430 }
1431 }
1432
1433 if (!found) {
1434 /* use existing values as default */
1435 f->fmt.pix.width = vpfe->fmt.fmt.pix.width;
1436 f->fmt.pix.height = vpfe->fmt.fmt.pix.height;
1437 }
1438
1439 /*
1440 * Use current colorspace for now, it will get
1441 * updated properly during s_fmt
1442 */
1443 f->fmt.pix.colorspace = vpfe->fmt.fmt.pix.colorspace;
1444 return vpfe_calc_format_size(vpfe, fmt, f);
1445 }
1446
vpfe_s_fmt(struct file * file,void * priv,struct v4l2_format * fmt)1447 static int vpfe_s_fmt(struct file *file, void *priv,
1448 struct v4l2_format *fmt)
1449 {
1450 struct vpfe_device *vpfe = video_drvdata(file);
1451 struct vpfe_fmt *f;
1452 struct v4l2_mbus_framefmt mbus_fmt;
1453 int ret;
1454
1455 /* If streaming is started, return error */
1456 if (vb2_is_busy(&vpfe->buffer_queue)) {
1457 vpfe_err(vpfe, "%s device busy\n", __func__);
1458 return -EBUSY;
1459 }
1460
1461 ret = vpfe_try_fmt(file, priv, fmt);
1462 if (ret < 0)
1463 return ret;
1464
1465 f = find_format_by_pix(vpfe, fmt->fmt.pix.pixelformat);
1466
1467 v4l2_fill_mbus_format(&mbus_fmt, &fmt->fmt.pix, f->code);
1468
1469 ret = __subdev_set_format(vpfe, &mbus_fmt);
1470 if (ret)
1471 return ret;
1472
1473 /* Just double check nothing has gone wrong */
1474 if (mbus_fmt.code != f->code) {
1475 vpfe_dbg(3, vpfe,
1476 "%s subdev changed format on us, this should not happen\n",
1477 __func__);
1478 return -EINVAL;
1479 }
1480
1481 v4l2_fill_pix_format(&vpfe->fmt.fmt.pix, &mbus_fmt);
1482 vpfe->fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
1483 vpfe->fmt.fmt.pix.pixelformat = f->fourcc;
1484 vpfe_calc_format_size(vpfe, f, &vpfe->fmt);
1485 *fmt = vpfe->fmt;
1486 vpfe->current_vpfe_fmt = f;
1487
1488 /* Update the crop window based on found values */
1489 vpfe->crop.width = fmt->fmt.pix.width;
1490 vpfe->crop.height = fmt->fmt.pix.height;
1491
1492 /* set image capture parameters in the ccdc */
1493 return vpfe_config_ccdc_image_format(vpfe);
1494 }
1495
vpfe_enum_size(struct file * file,void * priv,struct v4l2_frmsizeenum * fsize)1496 static int vpfe_enum_size(struct file *file, void *priv,
1497 struct v4l2_frmsizeenum *fsize)
1498 {
1499 struct vpfe_device *vpfe = video_drvdata(file);
1500 struct v4l2_subdev_frame_size_enum fse = {
1501 .which = V4L2_SUBDEV_FORMAT_ACTIVE,
1502 };
1503 struct v4l2_subdev *sd = vpfe->current_subdev->sd;
1504 struct vpfe_fmt *fmt;
1505 int ret;
1506
1507 /* check for valid format */
1508 fmt = find_format_by_pix(vpfe, fsize->pixel_format);
1509 if (!fmt) {
1510 vpfe_dbg(3, vpfe, "Invalid pixel code: %x\n",
1511 fsize->pixel_format);
1512 return -EINVAL;
1513 }
1514
1515 memset(fsize->reserved, 0x0, sizeof(fsize->reserved));
1516
1517 fse.index = fsize->index;
1518 fse.pad = 0;
1519 fse.code = fmt->code;
1520 ret = v4l2_subdev_call(sd, pad, enum_frame_size, NULL, &fse);
1521 if (ret)
1522 return ret;
1523
1524 vpfe_dbg(1, vpfe, "%s: index: %d code: %x W:[%d,%d] H:[%d,%d]\n",
1525 __func__, fse.index, fse.code, fse.min_width, fse.max_width,
1526 fse.min_height, fse.max_height);
1527
1528 fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
1529 fsize->discrete.width = fse.max_width;
1530 fsize->discrete.height = fse.max_height;
1531
1532 vpfe_dbg(1, vpfe, "%s: index: %d pixformat: %s size: %dx%d\n",
1533 __func__, fsize->index, print_fourcc(fsize->pixel_format),
1534 fsize->discrete.width, fsize->discrete.height);
1535
1536 return 0;
1537 }
1538
1539 /*
1540 * vpfe_get_subdev_input_index - Get subdev index and subdev input index for a
1541 * given app input index
1542 */
1543 static int
vpfe_get_subdev_input_index(struct vpfe_device * vpfe,int * subdev_index,int * subdev_input_index,int app_input_index)1544 vpfe_get_subdev_input_index(struct vpfe_device *vpfe,
1545 int *subdev_index,
1546 int *subdev_input_index,
1547 int app_input_index)
1548 {
1549 int i, j = 0;
1550
1551 for (i = 0; i < ARRAY_SIZE(vpfe->cfg->asd); i++) {
1552 if (app_input_index < (j + 1)) {
1553 *subdev_index = i;
1554 *subdev_input_index = app_input_index - j;
1555 return 0;
1556 }
1557 j++;
1558 }
1559 return -EINVAL;
1560 }
1561
1562 /*
1563 * vpfe_get_app_input - Get app input index for a given subdev input index
1564 * driver stores the input index of the current sub device and translate it
1565 * when application request the current input
1566 */
vpfe_get_app_input_index(struct vpfe_device * vpfe,int * app_input_index)1567 static int vpfe_get_app_input_index(struct vpfe_device *vpfe,
1568 int *app_input_index)
1569 {
1570 struct vpfe_config *cfg = vpfe->cfg;
1571 struct vpfe_subdev_info *sdinfo;
1572 struct i2c_client *client;
1573 struct i2c_client *curr_client;
1574 int i, j = 0;
1575
1576 curr_client = v4l2_get_subdevdata(vpfe->current_subdev->sd);
1577 for (i = 0; i < ARRAY_SIZE(vpfe->cfg->asd); i++) {
1578 sdinfo = &cfg->sub_devs[i];
1579 client = v4l2_get_subdevdata(sdinfo->sd);
1580 if (client->addr == curr_client->addr &&
1581 client->adapter->nr == curr_client->adapter->nr) {
1582 if (vpfe->current_input >= 1)
1583 return -1;
1584 *app_input_index = j + vpfe->current_input;
1585 return 0;
1586 }
1587 j++;
1588 }
1589 return -EINVAL;
1590 }
1591
vpfe_enum_input(struct file * file,void * priv,struct v4l2_input * inp)1592 static int vpfe_enum_input(struct file *file, void *priv,
1593 struct v4l2_input *inp)
1594 {
1595 struct vpfe_device *vpfe = video_drvdata(file);
1596 struct vpfe_subdev_info *sdinfo;
1597 int subdev, index;
1598
1599 if (vpfe_get_subdev_input_index(vpfe, &subdev, &index,
1600 inp->index) < 0) {
1601 vpfe_dbg(1, vpfe,
1602 "input information not found for the subdev\n");
1603 return -EINVAL;
1604 }
1605 sdinfo = &vpfe->cfg->sub_devs[subdev];
1606 *inp = sdinfo->inputs[index];
1607
1608 return 0;
1609 }
1610
vpfe_g_input(struct file * file,void * priv,unsigned int * index)1611 static int vpfe_g_input(struct file *file, void *priv, unsigned int *index)
1612 {
1613 struct vpfe_device *vpfe = video_drvdata(file);
1614
1615 return vpfe_get_app_input_index(vpfe, index);
1616 }
1617
1618 /* Assumes caller is holding vpfe_dev->lock */
vpfe_set_input(struct vpfe_device * vpfe,unsigned int index)1619 static int vpfe_set_input(struct vpfe_device *vpfe, unsigned int index)
1620 {
1621 int subdev_index = 0, inp_index = 0;
1622 struct vpfe_subdev_info *sdinfo;
1623 struct vpfe_route *route;
1624 u32 input, output;
1625 int ret;
1626
1627 /* If streaming is started, return error */
1628 if (vb2_is_busy(&vpfe->buffer_queue)) {
1629 vpfe_err(vpfe, "%s device busy\n", __func__);
1630 return -EBUSY;
1631 }
1632 ret = vpfe_get_subdev_input_index(vpfe,
1633 &subdev_index,
1634 &inp_index,
1635 index);
1636 if (ret < 0) {
1637 vpfe_err(vpfe, "invalid input index: %d\n", index);
1638 goto get_out;
1639 }
1640
1641 sdinfo = &vpfe->cfg->sub_devs[subdev_index];
1642 sdinfo->sd = vpfe->sd[subdev_index];
1643 route = &sdinfo->routes[inp_index];
1644 if (route && sdinfo->can_route) {
1645 input = route->input;
1646 output = route->output;
1647 if (sdinfo->sd) {
1648 ret = v4l2_subdev_call(sdinfo->sd, video,
1649 s_routing, input, output, 0);
1650 if (ret) {
1651 vpfe_err(vpfe, "s_routing failed\n");
1652 ret = -EINVAL;
1653 goto get_out;
1654 }
1655 }
1656
1657 }
1658
1659 vpfe->current_subdev = sdinfo;
1660 if (sdinfo->sd)
1661 vpfe->v4l2_dev.ctrl_handler = sdinfo->sd->ctrl_handler;
1662 vpfe->current_input = index;
1663 vpfe->std_index = 0;
1664
1665 /* set the bus/interface parameter for the sub device in ccdc */
1666 ret = vpfe_ccdc_set_hw_if_params(&vpfe->ccdc, &sdinfo->vpfe_param);
1667 if (ret)
1668 return ret;
1669
1670 /* set the default image parameters in the device */
1671 return vpfe_config_image_format(vpfe,
1672 vpfe_standards[vpfe->std_index].std_id);
1673
1674 get_out:
1675 return ret;
1676 }
1677
vpfe_s_input(struct file * file,void * priv,unsigned int index)1678 static int vpfe_s_input(struct file *file, void *priv, unsigned int index)
1679 {
1680 struct vpfe_device *vpfe = video_drvdata(file);
1681
1682 return vpfe_set_input(vpfe, index);
1683 }
1684
vpfe_querystd(struct file * file,void * priv,v4l2_std_id * std_id)1685 static int vpfe_querystd(struct file *file, void *priv, v4l2_std_id *std_id)
1686 {
1687 struct vpfe_device *vpfe = video_drvdata(file);
1688 struct vpfe_subdev_info *sdinfo;
1689
1690 sdinfo = vpfe->current_subdev;
1691 if (!(sdinfo->inputs[0].capabilities & V4L2_IN_CAP_STD))
1692 return -ENODATA;
1693
1694 /* Call querystd function of decoder device */
1695 return v4l2_device_call_until_err(&vpfe->v4l2_dev, sdinfo->grp_id,
1696 video, querystd, std_id);
1697 }
1698
vpfe_s_std(struct file * file,void * priv,v4l2_std_id std_id)1699 static int vpfe_s_std(struct file *file, void *priv, v4l2_std_id std_id)
1700 {
1701 struct vpfe_device *vpfe = video_drvdata(file);
1702 struct vpfe_subdev_info *sdinfo;
1703 int ret;
1704
1705 sdinfo = vpfe->current_subdev;
1706 if (!(sdinfo->inputs[0].capabilities & V4L2_IN_CAP_STD))
1707 return -ENODATA;
1708
1709 /* if trying to set the same std then nothing to do */
1710 if (vpfe_standards[vpfe->std_index].std_id == std_id)
1711 return 0;
1712
1713 /* If streaming is started, return error */
1714 if (vb2_is_busy(&vpfe->buffer_queue)) {
1715 vpfe_err(vpfe, "%s device busy\n", __func__);
1716 ret = -EBUSY;
1717 return ret;
1718 }
1719
1720 ret = v4l2_device_call_until_err(&vpfe->v4l2_dev, sdinfo->grp_id,
1721 video, s_std, std_id);
1722 if (ret < 0) {
1723 vpfe_err(vpfe, "Failed to set standard\n");
1724 return ret;
1725 }
1726 ret = vpfe_config_image_format(vpfe, std_id);
1727
1728 return ret;
1729 }
1730
vpfe_g_std(struct file * file,void * priv,v4l2_std_id * std_id)1731 static int vpfe_g_std(struct file *file, void *priv, v4l2_std_id *std_id)
1732 {
1733 struct vpfe_device *vpfe = video_drvdata(file);
1734 struct vpfe_subdev_info *sdinfo;
1735
1736 sdinfo = vpfe->current_subdev;
1737 if (sdinfo->inputs[0].capabilities != V4L2_IN_CAP_STD)
1738 return -ENODATA;
1739
1740 *std_id = vpfe_standards[vpfe->std_index].std_id;
1741
1742 return 0;
1743 }
1744
1745 /*
1746 * vpfe_calculate_offsets : This function calculates buffers offset
1747 * for top and bottom field
1748 */
vpfe_calculate_offsets(struct vpfe_device * vpfe)1749 static void vpfe_calculate_offsets(struct vpfe_device *vpfe)
1750 {
1751 struct v4l2_rect image_win;
1752
1753 vpfe_ccdc_get_image_window(&vpfe->ccdc, &image_win);
1754 vpfe->field_off = image_win.height * image_win.width;
1755 }
1756
1757 /*
1758 * vpfe_queue_setup - Callback function for buffer setup.
1759 * @vq: vb2_queue ptr
1760 * @nbuffers: ptr to number of buffers requested by application
1761 * @nplanes:: contains number of distinct video planes needed to hold a frame
1762 * @sizes[]: contains the size (in bytes) of each plane.
1763 * @alloc_devs: ptr to allocation context
1764 *
1765 * This callback function is called when reqbuf() is called to adjust
1766 * the buffer count and buffer size
1767 */
vpfe_queue_setup(struct vb2_queue * vq,unsigned int * nbuffers,unsigned int * nplanes,unsigned int sizes[],struct device * alloc_devs[])1768 static int vpfe_queue_setup(struct vb2_queue *vq,
1769 unsigned int *nbuffers, unsigned int *nplanes,
1770 unsigned int sizes[], struct device *alloc_devs[])
1771 {
1772 struct vpfe_device *vpfe = vb2_get_drv_priv(vq);
1773 unsigned size = vpfe->fmt.fmt.pix.sizeimage;
1774 unsigned int q_num_bufs = vb2_get_num_buffers(vq);
1775
1776 if (q_num_bufs + *nbuffers < 3)
1777 *nbuffers = 3 - q_num_bufs;
1778
1779 if (*nplanes) {
1780 if (sizes[0] < size)
1781 return -EINVAL;
1782 size = sizes[0];
1783 }
1784
1785 *nplanes = 1;
1786 sizes[0] = size;
1787
1788 vpfe_dbg(1, vpfe,
1789 "nbuffers=%d, size=%u\n", *nbuffers, sizes[0]);
1790
1791 /* Calculate field offset */
1792 vpfe_calculate_offsets(vpfe);
1793
1794 return 0;
1795 }
1796
1797 /*
1798 * vpfe_buffer_prepare : callback function for buffer prepare
1799 * @vb: ptr to vb2_buffer
1800 *
1801 * This is the callback function for buffer prepare when vb2_qbuf()
1802 * function is called. The buffer is prepared and user space virtual address
1803 * or user address is converted into physical address
1804 */
vpfe_buffer_prepare(struct vb2_buffer * vb)1805 static int vpfe_buffer_prepare(struct vb2_buffer *vb)
1806 {
1807 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
1808 struct vpfe_device *vpfe = vb2_get_drv_priv(vb->vb2_queue);
1809
1810 vb2_set_plane_payload(vb, 0, vpfe->fmt.fmt.pix.sizeimage);
1811
1812 if (vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0))
1813 return -EINVAL;
1814
1815 vbuf->field = vpfe->fmt.fmt.pix.field;
1816
1817 return 0;
1818 }
1819
1820 /*
1821 * vpfe_buffer_queue : Callback function to add buffer to DMA queue
1822 * @vb: ptr to vb2_buffer
1823 */
vpfe_buffer_queue(struct vb2_buffer * vb)1824 static void vpfe_buffer_queue(struct vb2_buffer *vb)
1825 {
1826 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
1827 struct vpfe_device *vpfe = vb2_get_drv_priv(vb->vb2_queue);
1828 struct vpfe_cap_buffer *buf = to_vpfe_buffer(vbuf);
1829 unsigned long flags = 0;
1830
1831 /* add the buffer to the DMA queue */
1832 spin_lock_irqsave(&vpfe->dma_queue_lock, flags);
1833 list_add_tail(&buf->list, &vpfe->dma_queue);
1834 spin_unlock_irqrestore(&vpfe->dma_queue_lock, flags);
1835 }
1836
vpfe_return_all_buffers(struct vpfe_device * vpfe,enum vb2_buffer_state state)1837 static void vpfe_return_all_buffers(struct vpfe_device *vpfe,
1838 enum vb2_buffer_state state)
1839 {
1840 struct vpfe_cap_buffer *buf, *node;
1841 unsigned long flags;
1842
1843 spin_lock_irqsave(&vpfe->dma_queue_lock, flags);
1844 list_for_each_entry_safe(buf, node, &vpfe->dma_queue, list) {
1845 vb2_buffer_done(&buf->vb.vb2_buf, state);
1846 list_del(&buf->list);
1847 }
1848
1849 if (vpfe->cur_frm)
1850 vb2_buffer_done(&vpfe->cur_frm->vb.vb2_buf, state);
1851
1852 if (vpfe->next_frm && vpfe->next_frm != vpfe->cur_frm)
1853 vb2_buffer_done(&vpfe->next_frm->vb.vb2_buf, state);
1854
1855 vpfe->cur_frm = NULL;
1856 vpfe->next_frm = NULL;
1857 spin_unlock_irqrestore(&vpfe->dma_queue_lock, flags);
1858 }
1859
1860 /*
1861 * vpfe_start_streaming : Starts the DMA engine for streaming
1862 * @vb: ptr to vb2_buffer
1863 * @count: number of buffers
1864 */
vpfe_start_streaming(struct vb2_queue * vq,unsigned int count)1865 static int vpfe_start_streaming(struct vb2_queue *vq, unsigned int count)
1866 {
1867 struct vpfe_device *vpfe = vb2_get_drv_priv(vq);
1868 struct vpfe_subdev_info *sdinfo;
1869 unsigned long flags;
1870 unsigned long addr;
1871 int ret;
1872
1873 spin_lock_irqsave(&vpfe->dma_queue_lock, flags);
1874
1875 vpfe->field = 0;
1876 vpfe->sequence = 0;
1877
1878 sdinfo = vpfe->current_subdev;
1879
1880 vpfe_attach_irq(vpfe);
1881
1882 vpfe->stopping = false;
1883 init_completion(&vpfe->capture_stop);
1884
1885 if (vpfe->ccdc.ccdc_cfg.if_type == VPFE_RAW_BAYER)
1886 vpfe_ccdc_config_raw(&vpfe->ccdc);
1887 else
1888 vpfe_ccdc_config_ycbcr(&vpfe->ccdc);
1889
1890 /* Get the next frame from the buffer queue */
1891 vpfe->next_frm = list_entry(vpfe->dma_queue.next,
1892 struct vpfe_cap_buffer, list);
1893 vpfe->cur_frm = vpfe->next_frm;
1894 /* Remove buffer from the buffer queue */
1895 list_del(&vpfe->cur_frm->list);
1896 spin_unlock_irqrestore(&vpfe->dma_queue_lock, flags);
1897
1898 addr = vb2_dma_contig_plane_dma_addr(&vpfe->cur_frm->vb.vb2_buf, 0);
1899
1900 vpfe_set_sdr_addr(&vpfe->ccdc, (unsigned long)(addr));
1901
1902 vpfe_pcr_enable(&vpfe->ccdc, 1);
1903
1904 ret = v4l2_subdev_call(sdinfo->sd, video, s_stream, 1);
1905 if (ret < 0) {
1906 vpfe_err(vpfe, "Error in attaching interrupt handle\n");
1907 goto err;
1908 }
1909
1910 return 0;
1911
1912 err:
1913 vpfe_return_all_buffers(vpfe, VB2_BUF_STATE_QUEUED);
1914 vpfe_pcr_enable(&vpfe->ccdc, 0);
1915 return ret;
1916 }
1917
1918 /*
1919 * vpfe_stop_streaming : Stop the DMA engine
1920 * @vq: ptr to vb2_queue
1921 *
1922 * This callback stops the DMA engine and any remaining buffers
1923 * in the DMA queue are released.
1924 */
vpfe_stop_streaming(struct vb2_queue * vq)1925 static void vpfe_stop_streaming(struct vb2_queue *vq)
1926 {
1927 struct vpfe_device *vpfe = vb2_get_drv_priv(vq);
1928 struct vpfe_subdev_info *sdinfo;
1929 int ret;
1930
1931 vpfe_pcr_enable(&vpfe->ccdc, 0);
1932
1933 /* Wait for the last frame to be captured */
1934 vpfe->stopping = true;
1935 wait_for_completion_timeout(&vpfe->capture_stop,
1936 msecs_to_jiffies(250));
1937
1938 vpfe_detach_irq(vpfe);
1939
1940 sdinfo = vpfe->current_subdev;
1941 ret = v4l2_subdev_call(sdinfo->sd, video, s_stream, 0);
1942 if (ret && ret != -ENOIOCTLCMD && ret != -ENODEV)
1943 vpfe_dbg(1, vpfe, "stream off failed in subdev\n");
1944
1945 /* release all active buffers */
1946 vpfe_return_all_buffers(vpfe, VB2_BUF_STATE_ERROR);
1947 }
1948
vpfe_g_pixelaspect(struct file * file,void * priv,int type,struct v4l2_fract * f)1949 static int vpfe_g_pixelaspect(struct file *file, void *priv,
1950 int type, struct v4l2_fract *f)
1951 {
1952 struct vpfe_device *vpfe = video_drvdata(file);
1953
1954 if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
1955 vpfe->std_index >= ARRAY_SIZE(vpfe_standards))
1956 return -EINVAL;
1957
1958 *f = vpfe_standards[vpfe->std_index].pixelaspect;
1959
1960 return 0;
1961 }
1962
1963 static int
vpfe_g_selection(struct file * file,void * fh,struct v4l2_selection * s)1964 vpfe_g_selection(struct file *file, void *fh, struct v4l2_selection *s)
1965 {
1966 struct vpfe_device *vpfe = video_drvdata(file);
1967
1968 if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
1969 vpfe->std_index >= ARRAY_SIZE(vpfe_standards))
1970 return -EINVAL;
1971
1972 switch (s->target) {
1973 case V4L2_SEL_TGT_CROP_BOUNDS:
1974 case V4L2_SEL_TGT_CROP_DEFAULT:
1975 s->r.left = 0;
1976 s->r.top = 0;
1977 s->r.width = vpfe_standards[vpfe->std_index].width;
1978 s->r.height = vpfe_standards[vpfe->std_index].height;
1979 break;
1980
1981 case V4L2_SEL_TGT_CROP:
1982 s->r = vpfe->crop;
1983 break;
1984
1985 default:
1986 return -EINVAL;
1987 }
1988
1989 return 0;
1990 }
1991
1992 static int
vpfe_s_selection(struct file * file,void * fh,struct v4l2_selection * s)1993 vpfe_s_selection(struct file *file, void *fh, struct v4l2_selection *s)
1994 {
1995 struct vpfe_device *vpfe = video_drvdata(file);
1996 struct v4l2_rect cr = vpfe->crop;
1997 struct v4l2_rect r = s->r;
1998 u32 bpp;
1999
2000 /* If streaming is started, return error */
2001 if (vb2_is_busy(&vpfe->buffer_queue)) {
2002 vpfe_err(vpfe, "%s device busy\n", __func__);
2003 return -EBUSY;
2004 }
2005
2006 if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
2007 s->target != V4L2_SEL_TGT_CROP)
2008 return -EINVAL;
2009
2010 v4l_bound_align_image(&r.width, 0, cr.width, 0,
2011 &r.height, 0, cr.height, 0, 0);
2012
2013 r.left = clamp_t(unsigned int, r.left, 0, cr.width - r.width);
2014 r.top = clamp_t(unsigned int, r.top, 0, cr.height - r.height);
2015
2016 if (s->flags & V4L2_SEL_FLAG_LE && !v4l2_rect_enclosed(&r, &s->r))
2017 return -ERANGE;
2018
2019 if (s->flags & V4L2_SEL_FLAG_GE && !v4l2_rect_enclosed(&s->r, &r))
2020 return -ERANGE;
2021
2022 s->r = vpfe->crop = r;
2023
2024 bpp = __get_bytesperpixel(vpfe, vpfe->current_vpfe_fmt);
2025 vpfe_ccdc_set_image_window(&vpfe->ccdc, &r, bpp);
2026 vpfe->fmt.fmt.pix.width = r.width;
2027 vpfe->fmt.fmt.pix.height = r.height;
2028 vpfe->fmt.fmt.pix.bytesperline =
2029 vpfe_ccdc_get_line_length(&vpfe->ccdc);
2030 vpfe->fmt.fmt.pix.sizeimage = vpfe->fmt.fmt.pix.bytesperline *
2031 vpfe->fmt.fmt.pix.height;
2032
2033 vpfe_dbg(1, vpfe, "cropped (%d,%d)/%dx%d of %dx%d\n",
2034 r.left, r.top, r.width, r.height, cr.width, cr.height);
2035
2036 return 0;
2037 }
2038
vpfe_ioctl_default(struct file * file,void * priv,bool valid_prio,unsigned int cmd,void * param)2039 static long vpfe_ioctl_default(struct file *file, void *priv,
2040 bool valid_prio, unsigned int cmd, void *param)
2041 {
2042 struct vpfe_device *vpfe = video_drvdata(file);
2043 int ret;
2044
2045 if (!valid_prio) {
2046 vpfe_err(vpfe, "%s device busy\n", __func__);
2047 return -EBUSY;
2048 }
2049
2050 /* If streaming is started, return error */
2051 if (vb2_is_busy(&vpfe->buffer_queue)) {
2052 vpfe_err(vpfe, "%s device busy\n", __func__);
2053 return -EBUSY;
2054 }
2055
2056 switch (cmd) {
2057 case VIDIOC_AM437X_CCDC_CFG:
2058 ret = vpfe_ccdc_set_params(&vpfe->ccdc, (void __user *)param);
2059 if (ret) {
2060 vpfe_dbg(2, vpfe,
2061 "Error setting parameters in CCDC\n");
2062 return ret;
2063 }
2064 ret = vpfe_get_ccdc_image_format(vpfe,
2065 &vpfe->fmt);
2066 if (ret < 0) {
2067 vpfe_dbg(2, vpfe,
2068 "Invalid image format at CCDC\n");
2069 return ret;
2070 }
2071 break;
2072
2073 default:
2074 ret = -ENOTTY;
2075 break;
2076 }
2077
2078 return ret;
2079 }
2080
2081 static const struct vb2_ops vpfe_video_qops = {
2082 .wait_prepare = vb2_ops_wait_prepare,
2083 .wait_finish = vb2_ops_wait_finish,
2084 .queue_setup = vpfe_queue_setup,
2085 .buf_prepare = vpfe_buffer_prepare,
2086 .buf_queue = vpfe_buffer_queue,
2087 .start_streaming = vpfe_start_streaming,
2088 .stop_streaming = vpfe_stop_streaming,
2089 };
2090
2091 /* vpfe capture driver file operations */
2092 static const struct v4l2_file_operations vpfe_fops = {
2093 .owner = THIS_MODULE,
2094 .open = vpfe_open,
2095 .release = vpfe_release,
2096 .read = vb2_fop_read,
2097 .poll = vb2_fop_poll,
2098 .unlocked_ioctl = video_ioctl2,
2099 .mmap = vb2_fop_mmap,
2100 };
2101
2102 /* vpfe capture ioctl operations */
2103 static const struct v4l2_ioctl_ops vpfe_ioctl_ops = {
2104 .vidioc_querycap = vpfe_querycap,
2105 .vidioc_enum_fmt_vid_cap = vpfe_enum_fmt,
2106 .vidioc_g_fmt_vid_cap = vpfe_g_fmt,
2107 .vidioc_s_fmt_vid_cap = vpfe_s_fmt,
2108 .vidioc_try_fmt_vid_cap = vpfe_try_fmt,
2109
2110 .vidioc_enum_framesizes = vpfe_enum_size,
2111
2112 .vidioc_enum_input = vpfe_enum_input,
2113 .vidioc_g_input = vpfe_g_input,
2114 .vidioc_s_input = vpfe_s_input,
2115
2116 .vidioc_querystd = vpfe_querystd,
2117 .vidioc_s_std = vpfe_s_std,
2118 .vidioc_g_std = vpfe_g_std,
2119
2120 .vidioc_reqbufs = vb2_ioctl_reqbufs,
2121 .vidioc_create_bufs = vb2_ioctl_create_bufs,
2122 .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
2123 .vidioc_querybuf = vb2_ioctl_querybuf,
2124 .vidioc_qbuf = vb2_ioctl_qbuf,
2125 .vidioc_dqbuf = vb2_ioctl_dqbuf,
2126 .vidioc_expbuf = vb2_ioctl_expbuf,
2127 .vidioc_streamon = vb2_ioctl_streamon,
2128 .vidioc_streamoff = vb2_ioctl_streamoff,
2129
2130 .vidioc_log_status = v4l2_ctrl_log_status,
2131 .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
2132 .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
2133
2134 .vidioc_g_pixelaspect = vpfe_g_pixelaspect,
2135 .vidioc_g_selection = vpfe_g_selection,
2136 .vidioc_s_selection = vpfe_s_selection,
2137
2138 .vidioc_default = vpfe_ioctl_default,
2139 };
2140
2141 static int
vpfe_async_bound(struct v4l2_async_notifier * notifier,struct v4l2_subdev * subdev,struct v4l2_async_connection * asd)2142 vpfe_async_bound(struct v4l2_async_notifier *notifier,
2143 struct v4l2_subdev *subdev,
2144 struct v4l2_async_connection *asd)
2145 {
2146 struct vpfe_device *vpfe = container_of(notifier->v4l2_dev,
2147 struct vpfe_device, v4l2_dev);
2148 struct vpfe_subdev_info *sdinfo;
2149 struct vpfe_fmt *fmt;
2150 int ret = 0;
2151 bool found = false;
2152 int i, j, k;
2153
2154 for (i = 0; i < ARRAY_SIZE(vpfe->cfg->asd); i++) {
2155 if (vpfe->cfg->asd[i]->match.fwnode ==
2156 asd[i].match.fwnode) {
2157 sdinfo = &vpfe->cfg->sub_devs[i];
2158 vpfe->sd[i] = subdev;
2159 vpfe->sd[i]->grp_id = sdinfo->grp_id;
2160 found = true;
2161 break;
2162 }
2163 }
2164
2165 if (!found) {
2166 vpfe_info(vpfe, "sub device (%s) not matched\n", subdev->name);
2167 return -EINVAL;
2168 }
2169
2170 vpfe->video_dev.tvnorms |= sdinfo->inputs[0].std;
2171
2172 vpfe->num_active_fmt = 0;
2173 for (j = 0, i = 0; (ret != -EINVAL); ++j) {
2174 struct v4l2_subdev_mbus_code_enum mbus_code = {
2175 .index = j,
2176 .which = V4L2_SUBDEV_FORMAT_ACTIVE,
2177 };
2178
2179 ret = v4l2_subdev_call(subdev, pad, enum_mbus_code,
2180 NULL, &mbus_code);
2181 if (ret)
2182 continue;
2183
2184 vpfe_dbg(3, vpfe,
2185 "subdev %s: code: %04x idx: %d\n",
2186 subdev->name, mbus_code.code, j);
2187
2188 for (k = 0; k < ARRAY_SIZE(formats); k++) {
2189 fmt = &formats[k];
2190 if (mbus_code.code != fmt->code)
2191 continue;
2192 vpfe->active_fmt[i] = fmt;
2193 vpfe_dbg(3, vpfe,
2194 "matched fourcc: %s code: %04x idx: %d\n",
2195 print_fourcc(fmt->fourcc), mbus_code.code, i);
2196 vpfe->num_active_fmt = ++i;
2197 }
2198 }
2199
2200 if (!i) {
2201 vpfe_err(vpfe, "No suitable format reported by subdev %s\n",
2202 subdev->name);
2203 return -EINVAL;
2204 }
2205 return 0;
2206 }
2207
vpfe_probe_complete(struct vpfe_device * vpfe)2208 static int vpfe_probe_complete(struct vpfe_device *vpfe)
2209 {
2210 struct video_device *vdev;
2211 struct vb2_queue *q;
2212 int err;
2213
2214 spin_lock_init(&vpfe->dma_queue_lock);
2215 mutex_init(&vpfe->lock);
2216
2217 vpfe->fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
2218
2219 /* set first sub device as current one */
2220 vpfe->current_subdev = &vpfe->cfg->sub_devs[0];
2221 vpfe->v4l2_dev.ctrl_handler = vpfe->sd[0]->ctrl_handler;
2222
2223 err = vpfe_set_input(vpfe, 0);
2224 if (err)
2225 goto probe_out;
2226
2227 /* Initialize videobuf2 queue as per the buffer type */
2228 q = &vpfe->buffer_queue;
2229 q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
2230 q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_READ;
2231 q->drv_priv = vpfe;
2232 q->ops = &vpfe_video_qops;
2233 q->mem_ops = &vb2_dma_contig_memops;
2234 q->buf_struct_size = sizeof(struct vpfe_cap_buffer);
2235 q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
2236 q->lock = &vpfe->lock;
2237 q->min_queued_buffers = 1;
2238 q->dev = vpfe->pdev;
2239
2240 err = vb2_queue_init(q);
2241 if (err) {
2242 vpfe_err(vpfe, "vb2_queue_init() failed\n");
2243 goto probe_out;
2244 }
2245
2246 INIT_LIST_HEAD(&vpfe->dma_queue);
2247
2248 vdev = &vpfe->video_dev;
2249 strscpy(vdev->name, VPFE_MODULE_NAME, sizeof(vdev->name));
2250 vdev->release = video_device_release_empty;
2251 vdev->fops = &vpfe_fops;
2252 vdev->ioctl_ops = &vpfe_ioctl_ops;
2253 vdev->v4l2_dev = &vpfe->v4l2_dev;
2254 vdev->vfl_dir = VFL_DIR_RX;
2255 vdev->queue = q;
2256 vdev->lock = &vpfe->lock;
2257 vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING |
2258 V4L2_CAP_READWRITE;
2259 video_set_drvdata(vdev, vpfe);
2260 err = video_register_device(&vpfe->video_dev, VFL_TYPE_VIDEO, -1);
2261 if (err) {
2262 vpfe_err(vpfe,
2263 "Unable to register video device.\n");
2264 goto probe_out;
2265 }
2266
2267 return 0;
2268
2269 probe_out:
2270 v4l2_device_unregister(&vpfe->v4l2_dev);
2271 return err;
2272 }
2273
vpfe_async_complete(struct v4l2_async_notifier * notifier)2274 static int vpfe_async_complete(struct v4l2_async_notifier *notifier)
2275 {
2276 struct vpfe_device *vpfe = container_of(notifier->v4l2_dev,
2277 struct vpfe_device, v4l2_dev);
2278
2279 return vpfe_probe_complete(vpfe);
2280 }
2281
2282 static const struct v4l2_async_notifier_operations vpfe_async_ops = {
2283 .bound = vpfe_async_bound,
2284 .complete = vpfe_async_complete,
2285 };
2286
2287 static struct vpfe_config *
vpfe_get_pdata(struct vpfe_device * vpfe)2288 vpfe_get_pdata(struct vpfe_device *vpfe)
2289 {
2290 struct device_node *endpoint;
2291 struct device *dev = vpfe->pdev;
2292 struct vpfe_subdev_info *sdinfo;
2293 struct vpfe_config *pdata;
2294 unsigned int flags;
2295 unsigned int i;
2296 int err;
2297
2298 dev_dbg(dev, "vpfe_get_pdata\n");
2299
2300 v4l2_async_nf_init(&vpfe->notifier, &vpfe->v4l2_dev);
2301
2302 if (!IS_ENABLED(CONFIG_OF) || !dev->of_node)
2303 return dev->platform_data;
2304
2305 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
2306 if (!pdata)
2307 return NULL;
2308
2309 i = 0;
2310 for_each_endpoint_of_node(dev->of_node, endpoint) {
2311 struct v4l2_fwnode_endpoint bus_cfg = { .bus_type = 0 };
2312 struct device_node *rem;
2313
2314 sdinfo = &pdata->sub_devs[i];
2315 sdinfo->grp_id = 0;
2316
2317 /* we only support camera */
2318 sdinfo->inputs[0].index = i;
2319 strscpy(sdinfo->inputs[0].name, "Camera",
2320 sizeof(sdinfo->inputs[0].name));
2321 sdinfo->inputs[0].type = V4L2_INPUT_TYPE_CAMERA;
2322 sdinfo->inputs[0].std = V4L2_STD_ALL;
2323 sdinfo->inputs[0].capabilities = V4L2_IN_CAP_STD;
2324
2325 sdinfo->can_route = 0;
2326 sdinfo->routes = NULL;
2327
2328 of_property_read_u32(endpoint, "ti,am437x-vpfe-interface",
2329 &sdinfo->vpfe_param.if_type);
2330 if (sdinfo->vpfe_param.if_type < 0 ||
2331 sdinfo->vpfe_param.if_type > 4) {
2332 sdinfo->vpfe_param.if_type = VPFE_RAW_BAYER;
2333 }
2334
2335 err = v4l2_fwnode_endpoint_parse(of_fwnode_handle(endpoint),
2336 &bus_cfg);
2337 if (err) {
2338 dev_err(dev, "Could not parse the endpoint\n");
2339 goto cleanup;
2340 }
2341
2342 sdinfo->vpfe_param.bus_width = bus_cfg.bus.parallel.bus_width;
2343
2344 if (sdinfo->vpfe_param.bus_width < 8 ||
2345 sdinfo->vpfe_param.bus_width > 16) {
2346 dev_err(dev, "Invalid bus width.\n");
2347 goto cleanup;
2348 }
2349
2350 flags = bus_cfg.bus.parallel.flags;
2351
2352 if (flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH)
2353 sdinfo->vpfe_param.hdpol = 1;
2354
2355 if (flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH)
2356 sdinfo->vpfe_param.vdpol = 1;
2357
2358 rem = of_graph_get_remote_port_parent(endpoint);
2359 if (!rem) {
2360 dev_err(dev, "Remote device at %pOF not found\n",
2361 endpoint);
2362 goto cleanup;
2363 }
2364
2365 pdata->asd[i] = v4l2_async_nf_add_fwnode(&vpfe->notifier,
2366 of_fwnode_handle(rem),
2367 struct v4l2_async_connection);
2368 of_node_put(rem);
2369 if (IS_ERR(pdata->asd[i]))
2370 goto cleanup;
2371
2372 i++;
2373 }
2374
2375 return pdata;
2376
2377 cleanup:
2378 v4l2_async_nf_cleanup(&vpfe->notifier);
2379 of_node_put(endpoint);
2380 return NULL;
2381 }
2382
2383 /*
2384 * vpfe_probe : This function creates device entries by register
2385 * itself to the V4L2 driver and initializes fields of each
2386 * device objects
2387 */
vpfe_probe(struct platform_device * pdev)2388 static int vpfe_probe(struct platform_device *pdev)
2389 {
2390 struct vpfe_config *vpfe_cfg;
2391 struct vpfe_device *vpfe;
2392 struct vpfe_ccdc *ccdc;
2393 int ret;
2394
2395 vpfe = devm_kzalloc(&pdev->dev, sizeof(*vpfe), GFP_KERNEL);
2396 if (!vpfe)
2397 return -ENOMEM;
2398
2399 vpfe->pdev = &pdev->dev;
2400
2401 ret = v4l2_device_register(&pdev->dev, &vpfe->v4l2_dev);
2402 if (ret) {
2403 vpfe_err(vpfe, "Unable to register v4l2 device.\n");
2404 return ret;
2405 }
2406
2407 vpfe_cfg = vpfe_get_pdata(vpfe);
2408 if (!vpfe_cfg) {
2409 dev_err(&pdev->dev, "No platform data\n");
2410 ret = -EINVAL;
2411 goto probe_out_cleanup;
2412 }
2413
2414 vpfe->cfg = vpfe_cfg;
2415 ccdc = &vpfe->ccdc;
2416
2417 ccdc->ccdc_cfg.base_addr = devm_platform_ioremap_resource(pdev, 0);
2418 if (IS_ERR(ccdc->ccdc_cfg.base_addr)) {
2419 ret = PTR_ERR(ccdc->ccdc_cfg.base_addr);
2420 goto probe_out_cleanup;
2421 }
2422
2423 ret = platform_get_irq(pdev, 0);
2424 if (ret < 0)
2425 goto probe_out_cleanup;
2426 vpfe->irq = ret;
2427
2428 ret = devm_request_irq(vpfe->pdev, vpfe->irq, vpfe_isr, 0,
2429 "vpfe_capture0", vpfe);
2430 if (ret) {
2431 dev_err(&pdev->dev, "Unable to request interrupt\n");
2432 ret = -EINVAL;
2433 goto probe_out_cleanup;
2434 }
2435
2436 /* set the driver data in platform device */
2437 platform_set_drvdata(pdev, vpfe);
2438 /* Enabling module functional clock */
2439 pm_runtime_enable(&pdev->dev);
2440
2441 /* for now just enable it here instead of waiting for the open */
2442 ret = pm_runtime_resume_and_get(&pdev->dev);
2443 if (ret < 0) {
2444 vpfe_err(vpfe, "Unable to resume device.\n");
2445 goto probe_out_cleanup;
2446 }
2447
2448 vpfe_ccdc_config_defaults(ccdc);
2449
2450 pm_runtime_put_sync(&pdev->dev);
2451
2452 vpfe->sd = devm_kcalloc(&pdev->dev,
2453 ARRAY_SIZE(vpfe->cfg->asd),
2454 sizeof(struct v4l2_subdev *),
2455 GFP_KERNEL);
2456 if (!vpfe->sd) {
2457 ret = -ENOMEM;
2458 goto probe_out_cleanup;
2459 }
2460
2461 vpfe->notifier.ops = &vpfe_async_ops;
2462 ret = v4l2_async_nf_register(&vpfe->notifier);
2463 if (ret) {
2464 vpfe_err(vpfe, "Error registering async notifier\n");
2465 ret = -EINVAL;
2466 goto probe_out_cleanup;
2467 }
2468
2469 return 0;
2470
2471 probe_out_cleanup:
2472 v4l2_async_nf_cleanup(&vpfe->notifier);
2473 v4l2_device_unregister(&vpfe->v4l2_dev);
2474 return ret;
2475 }
2476
2477 /*
2478 * vpfe_remove : It un-register device from V4L2 driver
2479 */
vpfe_remove(struct platform_device * pdev)2480 static void vpfe_remove(struct platform_device *pdev)
2481 {
2482 struct vpfe_device *vpfe = platform_get_drvdata(pdev);
2483
2484 pm_runtime_disable(&pdev->dev);
2485
2486 v4l2_async_nf_unregister(&vpfe->notifier);
2487 v4l2_async_nf_cleanup(&vpfe->notifier);
2488 video_unregister_device(&vpfe->video_dev);
2489 v4l2_device_unregister(&vpfe->v4l2_dev);
2490 }
2491
2492 #ifdef CONFIG_PM_SLEEP
2493
vpfe_save_context(struct vpfe_ccdc * ccdc)2494 static void vpfe_save_context(struct vpfe_ccdc *ccdc)
2495 {
2496 ccdc->ccdc_ctx[VPFE_PCR >> 2] = vpfe_reg_read(ccdc, VPFE_PCR);
2497 ccdc->ccdc_ctx[VPFE_SYNMODE >> 2] = vpfe_reg_read(ccdc, VPFE_SYNMODE);
2498 ccdc->ccdc_ctx[VPFE_SDOFST >> 2] = vpfe_reg_read(ccdc, VPFE_SDOFST);
2499 ccdc->ccdc_ctx[VPFE_SDR_ADDR >> 2] = vpfe_reg_read(ccdc, VPFE_SDR_ADDR);
2500 ccdc->ccdc_ctx[VPFE_CLAMP >> 2] = vpfe_reg_read(ccdc, VPFE_CLAMP);
2501 ccdc->ccdc_ctx[VPFE_DCSUB >> 2] = vpfe_reg_read(ccdc, VPFE_DCSUB);
2502 ccdc->ccdc_ctx[VPFE_COLPTN >> 2] = vpfe_reg_read(ccdc, VPFE_COLPTN);
2503 ccdc->ccdc_ctx[VPFE_BLKCMP >> 2] = vpfe_reg_read(ccdc, VPFE_BLKCMP);
2504 ccdc->ccdc_ctx[VPFE_VDINT >> 2] = vpfe_reg_read(ccdc, VPFE_VDINT);
2505 ccdc->ccdc_ctx[VPFE_ALAW >> 2] = vpfe_reg_read(ccdc, VPFE_ALAW);
2506 ccdc->ccdc_ctx[VPFE_REC656IF >> 2] = vpfe_reg_read(ccdc, VPFE_REC656IF);
2507 ccdc->ccdc_ctx[VPFE_CCDCFG >> 2] = vpfe_reg_read(ccdc, VPFE_CCDCFG);
2508 ccdc->ccdc_ctx[VPFE_CULLING >> 2] = vpfe_reg_read(ccdc, VPFE_CULLING);
2509 ccdc->ccdc_ctx[VPFE_HD_VD_WID >> 2] = vpfe_reg_read(ccdc,
2510 VPFE_HD_VD_WID);
2511 ccdc->ccdc_ctx[VPFE_PIX_LINES >> 2] = vpfe_reg_read(ccdc,
2512 VPFE_PIX_LINES);
2513 ccdc->ccdc_ctx[VPFE_HORZ_INFO >> 2] = vpfe_reg_read(ccdc,
2514 VPFE_HORZ_INFO);
2515 ccdc->ccdc_ctx[VPFE_VERT_START >> 2] = vpfe_reg_read(ccdc,
2516 VPFE_VERT_START);
2517 ccdc->ccdc_ctx[VPFE_VERT_LINES >> 2] = vpfe_reg_read(ccdc,
2518 VPFE_VERT_LINES);
2519 ccdc->ccdc_ctx[VPFE_HSIZE_OFF >> 2] = vpfe_reg_read(ccdc,
2520 VPFE_HSIZE_OFF);
2521 }
2522
vpfe_suspend(struct device * dev)2523 static int vpfe_suspend(struct device *dev)
2524 {
2525 struct vpfe_device *vpfe = dev_get_drvdata(dev);
2526 struct vpfe_ccdc *ccdc = &vpfe->ccdc;
2527
2528 /* only do full suspend if streaming has started */
2529 if (vb2_start_streaming_called(&vpfe->buffer_queue)) {
2530 /*
2531 * ignore RPM resume errors here, as it is already too late.
2532 * A check like that should happen earlier, either at
2533 * open() or just before start streaming.
2534 */
2535 pm_runtime_get_sync(dev);
2536 vpfe_config_enable(ccdc, 1);
2537
2538 /* Save VPFE context */
2539 vpfe_save_context(ccdc);
2540
2541 /* Disable CCDC */
2542 vpfe_pcr_enable(ccdc, 0);
2543 vpfe_config_enable(ccdc, 0);
2544
2545 /* Disable both master and slave clock */
2546 pm_runtime_put_sync(dev);
2547 }
2548
2549 /* Select sleep pin state */
2550 pinctrl_pm_select_sleep_state(dev);
2551
2552 return 0;
2553 }
2554
vpfe_restore_context(struct vpfe_ccdc * ccdc)2555 static void vpfe_restore_context(struct vpfe_ccdc *ccdc)
2556 {
2557 vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_SYNMODE >> 2], VPFE_SYNMODE);
2558 vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_CULLING >> 2], VPFE_CULLING);
2559 vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_SDOFST >> 2], VPFE_SDOFST);
2560 vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_SDR_ADDR >> 2], VPFE_SDR_ADDR);
2561 vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_CLAMP >> 2], VPFE_CLAMP);
2562 vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_DCSUB >> 2], VPFE_DCSUB);
2563 vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_COLPTN >> 2], VPFE_COLPTN);
2564 vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_BLKCMP >> 2], VPFE_BLKCMP);
2565 vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_VDINT >> 2], VPFE_VDINT);
2566 vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_ALAW >> 2], VPFE_ALAW);
2567 vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_REC656IF >> 2], VPFE_REC656IF);
2568 vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_CCDCFG >> 2], VPFE_CCDCFG);
2569 vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_PCR >> 2], VPFE_PCR);
2570 vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_HD_VD_WID >> 2],
2571 VPFE_HD_VD_WID);
2572 vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_PIX_LINES >> 2],
2573 VPFE_PIX_LINES);
2574 vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_HORZ_INFO >> 2],
2575 VPFE_HORZ_INFO);
2576 vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_VERT_START >> 2],
2577 VPFE_VERT_START);
2578 vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_VERT_LINES >> 2],
2579 VPFE_VERT_LINES);
2580 vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_HSIZE_OFF >> 2],
2581 VPFE_HSIZE_OFF);
2582 }
2583
vpfe_resume(struct device * dev)2584 static int vpfe_resume(struct device *dev)
2585 {
2586 struct vpfe_device *vpfe = dev_get_drvdata(dev);
2587 struct vpfe_ccdc *ccdc = &vpfe->ccdc;
2588
2589 /* only do full resume if streaming has started */
2590 if (vb2_start_streaming_called(&vpfe->buffer_queue)) {
2591 /* Enable both master and slave clock */
2592 pm_runtime_get_sync(dev);
2593 vpfe_config_enable(ccdc, 1);
2594
2595 /* Restore VPFE context */
2596 vpfe_restore_context(ccdc);
2597
2598 vpfe_config_enable(ccdc, 0);
2599 pm_runtime_put_sync(dev);
2600 }
2601
2602 /* Select default pin state */
2603 pinctrl_pm_select_default_state(dev);
2604
2605 return 0;
2606 }
2607
2608 #endif
2609
2610 static SIMPLE_DEV_PM_OPS(vpfe_pm_ops, vpfe_suspend, vpfe_resume);
2611
2612 static const struct of_device_id vpfe_of_match[] = {
2613 { .compatible = "ti,am437x-vpfe", },
2614 { /* sentinel */ },
2615 };
2616 MODULE_DEVICE_TABLE(of, vpfe_of_match);
2617
2618 static struct platform_driver vpfe_driver = {
2619 .probe = vpfe_probe,
2620 .remove_new = vpfe_remove,
2621 .driver = {
2622 .name = VPFE_MODULE_NAME,
2623 .pm = &vpfe_pm_ops,
2624 .of_match_table = vpfe_of_match,
2625 },
2626 };
2627
2628 module_platform_driver(vpfe_driver);
2629
2630 MODULE_AUTHOR("Texas Instruments");
2631 MODULE_DESCRIPTION("TI AM437x VPFE driver");
2632 MODULE_LICENSE("GPL");
2633 MODULE_VERSION(VPFE_VERSION);
2634