xref: /linux/drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c (revision 68a052239fc4b351e961f698b824f7654a346091)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * TI CSI2RX Shim Wrapper Driver
4  *
5  * Copyright (C) 2023 Texas Instruments Incorporated - https://www.ti.com/
6  *
7  * Author: Pratyush Yadav <p.yadav@ti.com>
8  * Author: Jai Luthra <j-luthra@ti.com>
9  */
10 
11 #include <linux/bitfield.h>
12 #include <linux/dmaengine.h>
13 #include <linux/module.h>
14 #include <linux/of_platform.h>
15 #include <linux/platform_device.h>
16 #include <linux/property.h>
17 
18 #include <media/cadence/cdns-csi2rx.h>
19 #include <media/mipi-csi2.h>
20 #include <media/v4l2-device.h>
21 #include <media/v4l2-ioctl.h>
22 #include <media/v4l2-mc.h>
23 #include <media/videobuf2-dma-contig.h>
24 
25 #define TI_CSI2RX_MODULE_NAME		"j721e-csi2rx"
26 
27 #define SHIM_CNTL			0x10
28 #define SHIM_CNTL_PIX_RST		BIT(0)
29 
30 #define SHIM_DMACNTX			0x20
31 #define SHIM_DMACNTX_EN			BIT(31)
32 #define SHIM_DMACNTX_YUV422		GENMASK(27, 26)
33 #define SHIM_DMACNTX_DUAL_PCK_CFG	BIT(24)
34 #define SHIM_DMACNTX_SIZE		GENMASK(21, 20)
35 #define SHIM_DMACNTX_FMT		GENMASK(5, 0)
36 #define SHIM_DMACNTX_YUV422_MODE_11	3
37 #define SHIM_DMACNTX_SIZE_8		0
38 #define SHIM_DMACNTX_SIZE_16		1
39 #define SHIM_DMACNTX_SIZE_32		2
40 
41 #define SHIM_PSI_CFG0			0x24
42 #define SHIM_PSI_CFG0_SRC_TAG		GENMASK(15, 0)
43 #define SHIM_PSI_CFG0_DST_TAG		GENMASK(31, 16)
44 
45 #define TI_CSI2RX_MAX_PIX_PER_CLK	4
46 #define PSIL_WORD_SIZE_BYTES		16
47 /*
48  * There are no hard limits on the width or height. The DMA engine can handle
49  * all sizes. The max width and height are arbitrary numbers for this driver.
50  * Use 16K * 16K as the arbitrary limit. It is large enough that it is unlikely
51  * the limit will be hit in practice.
52  */
53 #define MAX_WIDTH_BYTES			SZ_16K
54 #define MAX_HEIGHT_LINES		SZ_16K
55 
56 #define DRAIN_TIMEOUT_MS		50
57 #define DRAIN_BUFFER_SIZE		SZ_32K
58 
59 #define CSI2RX_BRIDGE_SOURCE_PAD	1
60 
61 struct ti_csi2rx_fmt {
62 	u32				fourcc;	/* Four character code. */
63 	u32				code;	/* Mbus code. */
64 	u32				csi_dt;	/* CSI Data type. */
65 	u8				bpp;	/* Bits per pixel. */
66 	u8				size;	/* Data size shift when unpacking. */
67 };
68 
69 struct ti_csi2rx_buffer {
70 	/* Common v4l2 buffer. Must be first. */
71 	struct vb2_v4l2_buffer		vb;
72 	struct list_head		list;
73 	struct ti_csi2rx_dev		*csi;
74 };
75 
76 enum ti_csi2rx_dma_state {
77 	TI_CSI2RX_DMA_STOPPED,	/* Streaming not started yet. */
78 	TI_CSI2RX_DMA_IDLE,	/* Streaming but no pending DMA operation. */
79 	TI_CSI2RX_DMA_ACTIVE,	/* Streaming and pending DMA operation. */
80 };
81 
82 struct ti_csi2rx_dma {
83 	/* Protects all fields in this struct. */
84 	spinlock_t			lock;
85 	struct dma_chan			*chan;
86 	/* Buffers queued to the driver, waiting to be processed by DMA. */
87 	struct list_head		queue;
88 	enum ti_csi2rx_dma_state	state;
89 	/*
90 	 * Queue of buffers submitted to DMA engine.
91 	 */
92 	struct list_head		submitted;
93 	/* Buffer to drain stale data from PSI-L endpoint */
94 	struct {
95 		void			*vaddr;
96 		dma_addr_t		paddr;
97 		size_t			len;
98 	} drain;
99 };
100 
101 struct ti_csi2rx_dev {
102 	struct device			*dev;
103 	void __iomem			*shim;
104 	struct v4l2_device		v4l2_dev;
105 	struct video_device		vdev;
106 	struct media_device		mdev;
107 	struct media_pipeline		pipe;
108 	struct media_pad		pad;
109 	struct v4l2_async_notifier	notifier;
110 	struct v4l2_subdev		*source;
111 	struct vb2_queue		vidq;
112 	struct mutex			mutex; /* To serialize ioctls. */
113 	struct v4l2_format		v_fmt;
114 	struct ti_csi2rx_dma		dma;
115 	u32				sequence;
116 	u8				pix_per_clk;
117 };
118 
119 static const struct ti_csi2rx_fmt ti_csi2rx_formats[] = {
120 	{
121 		.fourcc			= V4L2_PIX_FMT_YUYV,
122 		.code			= MEDIA_BUS_FMT_YUYV8_1X16,
123 		.csi_dt			= MIPI_CSI2_DT_YUV422_8B,
124 		.bpp			= 16,
125 		.size			= SHIM_DMACNTX_SIZE_8,
126 	}, {
127 		.fourcc			= V4L2_PIX_FMT_UYVY,
128 		.code			= MEDIA_BUS_FMT_UYVY8_1X16,
129 		.csi_dt			= MIPI_CSI2_DT_YUV422_8B,
130 		.bpp			= 16,
131 		.size			= SHIM_DMACNTX_SIZE_8,
132 	}, {
133 		.fourcc			= V4L2_PIX_FMT_YVYU,
134 		.code			= MEDIA_BUS_FMT_YVYU8_1X16,
135 		.csi_dt			= MIPI_CSI2_DT_YUV422_8B,
136 		.bpp			= 16,
137 		.size			= SHIM_DMACNTX_SIZE_8,
138 	}, {
139 		.fourcc			= V4L2_PIX_FMT_VYUY,
140 		.code			= MEDIA_BUS_FMT_VYUY8_1X16,
141 		.csi_dt			= MIPI_CSI2_DT_YUV422_8B,
142 		.bpp			= 16,
143 		.size			= SHIM_DMACNTX_SIZE_8,
144 	}, {
145 		.fourcc			= V4L2_PIX_FMT_SBGGR8,
146 		.code			= MEDIA_BUS_FMT_SBGGR8_1X8,
147 		.csi_dt			= MIPI_CSI2_DT_RAW8,
148 		.bpp			= 8,
149 		.size			= SHIM_DMACNTX_SIZE_8,
150 	}, {
151 		.fourcc			= V4L2_PIX_FMT_SGBRG8,
152 		.code			= MEDIA_BUS_FMT_SGBRG8_1X8,
153 		.csi_dt			= MIPI_CSI2_DT_RAW8,
154 		.bpp			= 8,
155 		.size			= SHIM_DMACNTX_SIZE_8,
156 	}, {
157 		.fourcc			= V4L2_PIX_FMT_SGRBG8,
158 		.code			= MEDIA_BUS_FMT_SGRBG8_1X8,
159 		.csi_dt			= MIPI_CSI2_DT_RAW8,
160 		.bpp			= 8,
161 		.size			= SHIM_DMACNTX_SIZE_8,
162 	}, {
163 		.fourcc			= V4L2_PIX_FMT_SRGGB8,
164 		.code			= MEDIA_BUS_FMT_SRGGB8_1X8,
165 		.csi_dt			= MIPI_CSI2_DT_RAW8,
166 		.bpp			= 8,
167 		.size			= SHIM_DMACNTX_SIZE_8,
168 	}, {
169 		.fourcc			= V4L2_PIX_FMT_GREY,
170 		.code			= MEDIA_BUS_FMT_Y8_1X8,
171 		.csi_dt			= MIPI_CSI2_DT_RAW8,
172 		.bpp			= 8,
173 		.size			= SHIM_DMACNTX_SIZE_8,
174 	}, {
175 		.fourcc			= V4L2_PIX_FMT_SBGGR10,
176 		.code			= MEDIA_BUS_FMT_SBGGR10_1X10,
177 		.csi_dt			= MIPI_CSI2_DT_RAW10,
178 		.bpp			= 16,
179 		.size			= SHIM_DMACNTX_SIZE_16,
180 	}, {
181 		.fourcc			= V4L2_PIX_FMT_SGBRG10,
182 		.code			= MEDIA_BUS_FMT_SGBRG10_1X10,
183 		.csi_dt			= MIPI_CSI2_DT_RAW10,
184 		.bpp			= 16,
185 		.size			= SHIM_DMACNTX_SIZE_16,
186 	}, {
187 		.fourcc			= V4L2_PIX_FMT_SGRBG10,
188 		.code			= MEDIA_BUS_FMT_SGRBG10_1X10,
189 		.csi_dt			= MIPI_CSI2_DT_RAW10,
190 		.bpp			= 16,
191 		.size			= SHIM_DMACNTX_SIZE_16,
192 	}, {
193 		.fourcc			= V4L2_PIX_FMT_SRGGB10,
194 		.code			= MEDIA_BUS_FMT_SRGGB10_1X10,
195 		.csi_dt			= MIPI_CSI2_DT_RAW10,
196 		.bpp			= 16,
197 		.size			= SHIM_DMACNTX_SIZE_16,
198 	}, {
199 		.fourcc			= V4L2_PIX_FMT_RGB565X,
200 		.code			= MEDIA_BUS_FMT_RGB565_1X16,
201 		.csi_dt			= MIPI_CSI2_DT_RGB565,
202 		.bpp			= 16,
203 		.size			= SHIM_DMACNTX_SIZE_16,
204 	}, {
205 		.fourcc			= V4L2_PIX_FMT_XBGR32,
206 		.code			= MEDIA_BUS_FMT_RGB888_1X24,
207 		.csi_dt			= MIPI_CSI2_DT_RGB888,
208 		.bpp			= 32,
209 		.size			= SHIM_DMACNTX_SIZE_32,
210 	}, {
211 		.fourcc			= V4L2_PIX_FMT_RGBX32,
212 		.code			= MEDIA_BUS_FMT_BGR888_1X24,
213 		.csi_dt			= MIPI_CSI2_DT_RGB888,
214 		.bpp			= 32,
215 		.size			= SHIM_DMACNTX_SIZE_32,
216 	},
217 
218 	/* More formats can be supported but they are not listed for now. */
219 };
220 
221 /* Forward declaration needed by ti_csi2rx_dma_callback. */
222 static int ti_csi2rx_start_dma(struct ti_csi2rx_dev *csi,
223 			       struct ti_csi2rx_buffer *buf);
224 
225 static const struct ti_csi2rx_fmt *find_format_by_fourcc(u32 pixelformat)
226 {
227 	unsigned int i;
228 
229 	for (i = 0; i < ARRAY_SIZE(ti_csi2rx_formats); i++) {
230 		if (ti_csi2rx_formats[i].fourcc == pixelformat)
231 			return &ti_csi2rx_formats[i];
232 	}
233 
234 	return NULL;
235 }
236 
237 static const struct ti_csi2rx_fmt *find_format_by_code(u32 code)
238 {
239 	unsigned int i;
240 
241 	for (i = 0; i < ARRAY_SIZE(ti_csi2rx_formats); i++) {
242 		if (ti_csi2rx_formats[i].code == code)
243 			return &ti_csi2rx_formats[i];
244 	}
245 
246 	return NULL;
247 }
248 
249 static void ti_csi2rx_fill_fmt(const struct ti_csi2rx_fmt *csi_fmt,
250 			       struct v4l2_format *v4l2_fmt)
251 {
252 	struct v4l2_pix_format *pix = &v4l2_fmt->fmt.pix;
253 	unsigned int pixels_in_word;
254 
255 	pixels_in_word = PSIL_WORD_SIZE_BYTES * 8 / csi_fmt->bpp;
256 
257 	/* Clamp width and height to sensible maximums (16K x 16K) */
258 	pix->width = clamp_t(unsigned int, pix->width,
259 			     pixels_in_word,
260 			     MAX_WIDTH_BYTES * 8 / csi_fmt->bpp);
261 	pix->height = clamp_t(unsigned int, pix->height, 1, MAX_HEIGHT_LINES);
262 
263 	/* Width should be a multiple of transfer word-size */
264 	pix->width = rounddown(pix->width, pixels_in_word);
265 
266 	v4l2_fmt->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
267 	pix->pixelformat = csi_fmt->fourcc;
268 	pix->bytesperline = pix->width * (csi_fmt->bpp / 8);
269 	pix->sizeimage = pix->bytesperline * pix->height;
270 }
271 
272 static int ti_csi2rx_querycap(struct file *file, void *priv,
273 			      struct v4l2_capability *cap)
274 {
275 	strscpy(cap->driver, TI_CSI2RX_MODULE_NAME, sizeof(cap->driver));
276 	strscpy(cap->card, TI_CSI2RX_MODULE_NAME, sizeof(cap->card));
277 
278 	return 0;
279 }
280 
281 static int ti_csi2rx_enum_fmt_vid_cap(struct file *file, void *priv,
282 				      struct v4l2_fmtdesc *f)
283 {
284 	const struct ti_csi2rx_fmt *fmt = NULL;
285 
286 	if (f->mbus_code) {
287 		/* 1-to-1 mapping between bus formats and pixel formats */
288 		if (f->index > 0)
289 			return -EINVAL;
290 
291 		fmt = find_format_by_code(f->mbus_code);
292 	} else {
293 		if (f->index >= ARRAY_SIZE(ti_csi2rx_formats))
294 			return -EINVAL;
295 
296 		fmt = &ti_csi2rx_formats[f->index];
297 	}
298 
299 	if (!fmt)
300 		return -EINVAL;
301 
302 	f->pixelformat = fmt->fourcc;
303 	memset(f->reserved, 0, sizeof(f->reserved));
304 	f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
305 
306 	return 0;
307 }
308 
309 static int ti_csi2rx_g_fmt_vid_cap(struct file *file, void *priv,
310 				   struct v4l2_format *f)
311 {
312 	struct ti_csi2rx_dev *csi = video_drvdata(file);
313 
314 	*f = csi->v_fmt;
315 
316 	return 0;
317 }
318 
319 static int ti_csi2rx_try_fmt_vid_cap(struct file *file, void *priv,
320 				     struct v4l2_format *f)
321 {
322 	const struct ti_csi2rx_fmt *fmt;
323 
324 	/*
325 	 * Default to the first format if the requested pixel format code isn't
326 	 * supported.
327 	 */
328 	fmt = find_format_by_fourcc(f->fmt.pix.pixelformat);
329 	if (!fmt)
330 		fmt = &ti_csi2rx_formats[0];
331 
332 	/* Interlaced formats are not supported. */
333 	f->fmt.pix.field = V4L2_FIELD_NONE;
334 
335 	ti_csi2rx_fill_fmt(fmt, f);
336 
337 	return 0;
338 }
339 
340 static int ti_csi2rx_s_fmt_vid_cap(struct file *file, void *priv,
341 				   struct v4l2_format *f)
342 {
343 	struct ti_csi2rx_dev *csi = video_drvdata(file);
344 	struct vb2_queue *q = &csi->vidq;
345 	int ret;
346 
347 	if (vb2_is_busy(q))
348 		return -EBUSY;
349 
350 	ret = ti_csi2rx_try_fmt_vid_cap(file, priv, f);
351 	if (ret < 0)
352 		return ret;
353 
354 	csi->v_fmt = *f;
355 
356 	return 0;
357 }
358 
359 static int ti_csi2rx_enum_framesizes(struct file *file, void *fh,
360 				     struct v4l2_frmsizeenum *fsize)
361 {
362 	const struct ti_csi2rx_fmt *fmt;
363 	unsigned int pixels_in_word;
364 
365 	fmt = find_format_by_fourcc(fsize->pixel_format);
366 	if (!fmt || fsize->index != 0)
367 		return -EINVAL;
368 
369 	/*
370 	 * Number of pixels in one PSI-L word. The transfer happens in multiples
371 	 * of PSI-L word sizes.
372 	 */
373 	pixels_in_word = PSIL_WORD_SIZE_BYTES * 8 / fmt->bpp;
374 
375 	fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
376 	fsize->stepwise.min_width = pixels_in_word;
377 	fsize->stepwise.max_width = rounddown(MAX_WIDTH_BYTES * 8 / fmt->bpp,
378 					      pixels_in_word);
379 	fsize->stepwise.step_width = pixels_in_word;
380 	fsize->stepwise.min_height = 1;
381 	fsize->stepwise.max_height = MAX_HEIGHT_LINES;
382 	fsize->stepwise.step_height = 1;
383 
384 	return 0;
385 }
386 
387 static const struct v4l2_ioctl_ops csi_ioctl_ops = {
388 	.vidioc_querycap      = ti_csi2rx_querycap,
389 	.vidioc_enum_fmt_vid_cap = ti_csi2rx_enum_fmt_vid_cap,
390 	.vidioc_try_fmt_vid_cap = ti_csi2rx_try_fmt_vid_cap,
391 	.vidioc_g_fmt_vid_cap = ti_csi2rx_g_fmt_vid_cap,
392 	.vidioc_s_fmt_vid_cap = ti_csi2rx_s_fmt_vid_cap,
393 	.vidioc_enum_framesizes = ti_csi2rx_enum_framesizes,
394 	.vidioc_reqbufs       = vb2_ioctl_reqbufs,
395 	.vidioc_create_bufs   = vb2_ioctl_create_bufs,
396 	.vidioc_prepare_buf   = vb2_ioctl_prepare_buf,
397 	.vidioc_querybuf      = vb2_ioctl_querybuf,
398 	.vidioc_qbuf          = vb2_ioctl_qbuf,
399 	.vidioc_dqbuf         = vb2_ioctl_dqbuf,
400 	.vidioc_expbuf        = vb2_ioctl_expbuf,
401 	.vidioc_streamon      = vb2_ioctl_streamon,
402 	.vidioc_streamoff     = vb2_ioctl_streamoff,
403 };
404 
405 static const struct v4l2_file_operations csi_fops = {
406 	.owner = THIS_MODULE,
407 	.open = v4l2_fh_open,
408 	.release = vb2_fop_release,
409 	.read = vb2_fop_read,
410 	.poll = vb2_fop_poll,
411 	.unlocked_ioctl = video_ioctl2,
412 	.mmap = vb2_fop_mmap,
413 };
414 
415 static int csi_async_notifier_bound(struct v4l2_async_notifier *notifier,
416 				    struct v4l2_subdev *subdev,
417 				    struct v4l2_async_connection *asc)
418 {
419 	struct ti_csi2rx_dev *csi = dev_get_drvdata(notifier->v4l2_dev->dev);
420 
421 	csi->source = subdev;
422 
423 	return 0;
424 }
425 
426 static int csi_async_notifier_complete(struct v4l2_async_notifier *notifier)
427 {
428 	struct ti_csi2rx_dev *csi = dev_get_drvdata(notifier->v4l2_dev->dev);
429 	struct video_device *vdev = &csi->vdev;
430 	int ret;
431 
432 	ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
433 	if (ret)
434 		return ret;
435 
436 	ret = media_create_pad_link(&csi->source->entity, CSI2RX_BRIDGE_SOURCE_PAD,
437 				    &vdev->entity, csi->pad.index,
438 				    MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
439 
440 	if (ret) {
441 		video_unregister_device(vdev);
442 		return ret;
443 	}
444 
445 	ret = v4l2_device_register_subdev_nodes(&csi->v4l2_dev);
446 	if (ret)
447 		video_unregister_device(vdev);
448 
449 	return ret;
450 }
451 
452 static const struct v4l2_async_notifier_operations csi_async_notifier_ops = {
453 	.bound = csi_async_notifier_bound,
454 	.complete = csi_async_notifier_complete,
455 };
456 
457 static int ti_csi2rx_notifier_register(struct ti_csi2rx_dev *csi)
458 {
459 	struct fwnode_handle *fwnode;
460 	struct v4l2_async_connection *asc;
461 	int ret;
462 
463 	fwnode = fwnode_get_named_child_node(csi->dev->fwnode, "csi-bridge");
464 	if (!fwnode)
465 		return -EINVAL;
466 
467 	v4l2_async_nf_init(&csi->notifier, &csi->v4l2_dev);
468 	csi->notifier.ops = &csi_async_notifier_ops;
469 
470 	asc = v4l2_async_nf_add_fwnode(&csi->notifier, fwnode,
471 				       struct v4l2_async_connection);
472 	/*
473 	 * Calling v4l2_async_nf_add_fwnode grabs a refcount,
474 	 * so drop the one we got in fwnode_get_named_child_node
475 	 */
476 	fwnode_handle_put(fwnode);
477 
478 	if (IS_ERR(asc)) {
479 		v4l2_async_nf_cleanup(&csi->notifier);
480 		return PTR_ERR(asc);
481 	}
482 
483 	ret = v4l2_async_nf_register(&csi->notifier);
484 	if (ret) {
485 		v4l2_async_nf_cleanup(&csi->notifier);
486 		return ret;
487 	}
488 
489 	return 0;
490 }
491 
492 /* Request maximum possible pixels per clock from the bridge */
493 static void ti_csi2rx_request_max_ppc(struct ti_csi2rx_dev *csi)
494 {
495 	u8 ppc = TI_CSI2RX_MAX_PIX_PER_CLK;
496 	struct media_pad *pad;
497 	int ret;
498 
499 	pad = media_entity_remote_source_pad_unique(&csi->vdev.entity);
500 	if (IS_ERR(pad))
501 		return;
502 
503 	ret = cdns_csi2rx_negotiate_ppc(csi->source, pad->index, &ppc);
504 	if (ret) {
505 		dev_warn(csi->dev, "NUM_PIXELS negotiation failed: %d\n", ret);
506 		csi->pix_per_clk = 1;
507 	} else {
508 		csi->pix_per_clk = ppc;
509 	}
510 }
511 
512 static void ti_csi2rx_setup_shim(struct ti_csi2rx_dev *csi)
513 {
514 	const struct ti_csi2rx_fmt *fmt;
515 	unsigned int reg;
516 
517 	fmt = find_format_by_fourcc(csi->v_fmt.fmt.pix.pixelformat);
518 
519 	/* De-assert the pixel interface reset. */
520 	reg = SHIM_CNTL_PIX_RST;
521 	writel(reg, csi->shim + SHIM_CNTL);
522 
523 	/* Negotiate pixel count from the source */
524 	ti_csi2rx_request_max_ppc(csi);
525 
526 	reg = SHIM_DMACNTX_EN;
527 	reg |= FIELD_PREP(SHIM_DMACNTX_FMT, fmt->csi_dt);
528 
529 	/*
530 	 * The hardware assumes incoming YUV422 8-bit data on MIPI CSI2 bus
531 	 * follows the spec and is packed in the order U0 -> Y0 -> V0 -> Y1 ->
532 	 * ...
533 	 *
534 	 * There is an option to swap the bytes around before storing in
535 	 * memory, to achieve different pixel formats:
536 	 *
537 	 * Byte3 <----------- Byte0
538 	 * [ Y1 ][ V0 ][ Y0 ][ U0 ]	MODE 11
539 	 * [ Y1 ][ U0 ][ Y0 ][ V0 ]	MODE 10
540 	 * [ V0 ][ Y1 ][ U0 ][ Y0 ]	MODE 01
541 	 * [ U0 ][ Y1 ][ V0 ][ Y0 ]	MODE 00
542 	 *
543 	 * We don't have any requirement to change pixelformat from what is
544 	 * coming from the source, so we keep it in MODE 11, which does not
545 	 * swap any bytes when storing in memory.
546 	 */
547 	switch (fmt->fourcc) {
548 	case V4L2_PIX_FMT_UYVY:
549 	case V4L2_PIX_FMT_VYUY:
550 	case V4L2_PIX_FMT_YUYV:
551 	case V4L2_PIX_FMT_YVYU:
552 		reg |= FIELD_PREP(SHIM_DMACNTX_YUV422,
553 				  SHIM_DMACNTX_YUV422_MODE_11);
554 		/* Multiple pixels are handled differently for packed YUV */
555 		if (csi->pix_per_clk == 2)
556 			reg |= SHIM_DMACNTX_DUAL_PCK_CFG;
557 		reg |= FIELD_PREP(SHIM_DMACNTX_SIZE, fmt->size);
558 		break;
559 	default:
560 		/* By default we change the shift size for multiple pixels */
561 		reg |= FIELD_PREP(SHIM_DMACNTX_SIZE,
562 				  fmt->size + (csi->pix_per_clk >> 1));
563 		break;
564 	}
565 
566 	writel(reg, csi->shim + SHIM_DMACNTX);
567 
568 	reg = FIELD_PREP(SHIM_PSI_CFG0_SRC_TAG, 0) |
569 	      FIELD_PREP(SHIM_PSI_CFG0_DST_TAG, 0);
570 	writel(reg, csi->shim + SHIM_PSI_CFG0);
571 }
572 
573 static void ti_csi2rx_drain_callback(void *param)
574 {
575 	struct completion *drain_complete = param;
576 
577 	complete(drain_complete);
578 }
579 
580 /*
581  * Drain the stale data left at the PSI-L endpoint.
582  *
583  * This might happen if no buffers are queued in time but source is still
584  * streaming. In multi-stream scenarios this can happen when one stream is
585  * stopped but other is still streaming, and thus module-level pixel reset is
586  * not asserted.
587  *
588  * To prevent that stale data corrupting the subsequent transactions, it is
589  * required to issue DMA requests to drain it out.
590  */
591 static int ti_csi2rx_drain_dma(struct ti_csi2rx_dev *csi)
592 {
593 	struct dma_async_tx_descriptor *desc;
594 	struct completion drain_complete;
595 	dma_cookie_t cookie;
596 	int ret;
597 
598 	init_completion(&drain_complete);
599 
600 	desc = dmaengine_prep_slave_single(csi->dma.chan, csi->dma.drain.paddr,
601 					   csi->dma.drain.len, DMA_DEV_TO_MEM,
602 					   DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
603 	if (!desc) {
604 		ret = -EIO;
605 		goto out;
606 	}
607 
608 	desc->callback = ti_csi2rx_drain_callback;
609 	desc->callback_param = &drain_complete;
610 
611 	cookie = dmaengine_submit(desc);
612 	ret = dma_submit_error(cookie);
613 	if (ret)
614 		goto out;
615 
616 	dma_async_issue_pending(csi->dma.chan);
617 
618 	if (!wait_for_completion_timeout(&drain_complete,
619 					 msecs_to_jiffies(DRAIN_TIMEOUT_MS))) {
620 		dmaengine_terminate_sync(csi->dma.chan);
621 		dev_dbg(csi->dev, "DMA transfer timed out for drain buffer\n");
622 		ret = -ETIMEDOUT;
623 		goto out;
624 	}
625 out:
626 	return ret;
627 }
628 
629 static void ti_csi2rx_dma_callback(void *param)
630 {
631 	struct ti_csi2rx_buffer *buf = param;
632 	struct ti_csi2rx_dev *csi = buf->csi;
633 	struct ti_csi2rx_dma *dma = &csi->dma;
634 	unsigned long flags;
635 
636 	/*
637 	 * TODO: Derive the sequence number from the CSI2RX frame number
638 	 * hardware monitor registers.
639 	 */
640 	buf->vb.vb2_buf.timestamp = ktime_get_ns();
641 	buf->vb.sequence = csi->sequence++;
642 
643 	spin_lock_irqsave(&dma->lock, flags);
644 
645 	WARN_ON(!list_is_first(&buf->list, &dma->submitted));
646 	vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
647 	list_del(&buf->list);
648 
649 	/* If there are more buffers to process then start their transfer. */
650 	while (!list_empty(&dma->queue)) {
651 		buf = list_entry(dma->queue.next, struct ti_csi2rx_buffer, list);
652 
653 		if (ti_csi2rx_start_dma(csi, buf)) {
654 			dev_err(csi->dev, "Failed to queue the next buffer for DMA\n");
655 			list_del(&buf->list);
656 			vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
657 		} else {
658 			list_move_tail(&buf->list, &dma->submitted);
659 		}
660 	}
661 
662 	if (list_empty(&dma->submitted))
663 		dma->state = TI_CSI2RX_DMA_IDLE;
664 
665 	spin_unlock_irqrestore(&dma->lock, flags);
666 }
667 
668 static int ti_csi2rx_start_dma(struct ti_csi2rx_dev *csi,
669 			       struct ti_csi2rx_buffer *buf)
670 {
671 	unsigned long addr;
672 	struct dma_async_tx_descriptor *desc;
673 	size_t len = csi->v_fmt.fmt.pix.sizeimage;
674 	dma_cookie_t cookie;
675 	int ret = 0;
676 
677 	addr = vb2_dma_contig_plane_dma_addr(&buf->vb.vb2_buf, 0);
678 	desc = dmaengine_prep_slave_single(csi->dma.chan, addr, len,
679 					   DMA_DEV_TO_MEM,
680 					   DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
681 	if (!desc)
682 		return -EIO;
683 
684 	desc->callback = ti_csi2rx_dma_callback;
685 	desc->callback_param = buf;
686 
687 	cookie = dmaengine_submit(desc);
688 	ret = dma_submit_error(cookie);
689 	if (ret)
690 		return ret;
691 
692 	dma_async_issue_pending(csi->dma.chan);
693 
694 	return 0;
695 }
696 
697 static void ti_csi2rx_stop_dma(struct ti_csi2rx_dev *csi)
698 {
699 	struct ti_csi2rx_dma *dma = &csi->dma;
700 	enum ti_csi2rx_dma_state state;
701 	unsigned long flags;
702 	int ret;
703 
704 	spin_lock_irqsave(&dma->lock, flags);
705 	state = csi->dma.state;
706 	dma->state = TI_CSI2RX_DMA_STOPPED;
707 	spin_unlock_irqrestore(&dma->lock, flags);
708 
709 	if (state != TI_CSI2RX_DMA_STOPPED) {
710 		/*
711 		 * Normal DMA termination does not clean up pending data on
712 		 * the endpoint if multiple streams are running and only one
713 		 * is stopped, as the module-level pixel reset cannot be
714 		 * enforced before terminating DMA.
715 		 */
716 		ret = ti_csi2rx_drain_dma(csi);
717 		if (ret && ret != -ETIMEDOUT)
718 			dev_warn(csi->dev,
719 				 "Failed to drain DMA. Next frame might be bogus\n");
720 	}
721 
722 	ret = dmaengine_terminate_sync(csi->dma.chan);
723 	if (ret)
724 		dev_err(csi->dev, "Failed to stop DMA: %d\n", ret);
725 }
726 
727 static void ti_csi2rx_cleanup_buffers(struct ti_csi2rx_dev *csi,
728 				      enum vb2_buffer_state state)
729 {
730 	struct ti_csi2rx_dma *dma = &csi->dma;
731 	struct ti_csi2rx_buffer *buf, *tmp;
732 	unsigned long flags;
733 
734 	spin_lock_irqsave(&dma->lock, flags);
735 	list_for_each_entry_safe(buf, tmp, &csi->dma.queue, list) {
736 		list_del(&buf->list);
737 		vb2_buffer_done(&buf->vb.vb2_buf, state);
738 	}
739 	list_for_each_entry_safe(buf, tmp, &csi->dma.submitted, list) {
740 		list_del(&buf->list);
741 		vb2_buffer_done(&buf->vb.vb2_buf, state);
742 	}
743 	spin_unlock_irqrestore(&dma->lock, flags);
744 }
745 
746 static int ti_csi2rx_queue_setup(struct vb2_queue *q, unsigned int *nbuffers,
747 				 unsigned int *nplanes, unsigned int sizes[],
748 				 struct device *alloc_devs[])
749 {
750 	struct ti_csi2rx_dev *csi = vb2_get_drv_priv(q);
751 	unsigned int size = csi->v_fmt.fmt.pix.sizeimage;
752 
753 	if (*nplanes) {
754 		if (sizes[0] < size)
755 			return -EINVAL;
756 		size = sizes[0];
757 	}
758 
759 	*nplanes = 1;
760 	sizes[0] = size;
761 
762 	return 0;
763 }
764 
765 static int ti_csi2rx_buffer_prepare(struct vb2_buffer *vb)
766 {
767 	struct ti_csi2rx_dev *csi = vb2_get_drv_priv(vb->vb2_queue);
768 	unsigned long size = csi->v_fmt.fmt.pix.sizeimage;
769 
770 	if (vb2_plane_size(vb, 0) < size) {
771 		dev_err(csi->dev, "Data will not fit into plane\n");
772 		return -EINVAL;
773 	}
774 
775 	vb2_set_plane_payload(vb, 0, size);
776 	return 0;
777 }
778 
779 static void ti_csi2rx_buffer_queue(struct vb2_buffer *vb)
780 {
781 	struct ti_csi2rx_dev *csi = vb2_get_drv_priv(vb->vb2_queue);
782 	struct ti_csi2rx_buffer *buf;
783 	struct ti_csi2rx_dma *dma = &csi->dma;
784 	bool restart_dma = false;
785 	unsigned long flags = 0;
786 	int ret;
787 
788 	buf = container_of(vb, struct ti_csi2rx_buffer, vb.vb2_buf);
789 	buf->csi = csi;
790 
791 	spin_lock_irqsave(&dma->lock, flags);
792 	/*
793 	 * Usually the DMA callback takes care of queueing the pending buffers.
794 	 * But if DMA has stalled due to lack of buffers, restart it now.
795 	 */
796 	if (dma->state == TI_CSI2RX_DMA_IDLE) {
797 		/*
798 		 * Do not restart DMA with the lock held because
799 		 * ti_csi2rx_drain_dma() might block for completion.
800 		 * There won't be a race on queueing DMA anyway since the
801 		 * callback is not being fired.
802 		 */
803 		restart_dma = true;
804 		dma->state = TI_CSI2RX_DMA_ACTIVE;
805 	} else {
806 		list_add_tail(&buf->list, &dma->queue);
807 	}
808 	spin_unlock_irqrestore(&dma->lock, flags);
809 
810 	if (restart_dma) {
811 		/*
812 		 * Once frames start dropping, some data gets stuck in the DMA
813 		 * pipeline somewhere. So the first DMA transfer after frame
814 		 * drops gives a partial frame. This is obviously not useful to
815 		 * the application and will only confuse it. Issue a DMA
816 		 * transaction to drain that up.
817 		 */
818 		ret = ti_csi2rx_drain_dma(csi);
819 		if (ret && ret != -ETIMEDOUT)
820 			dev_warn(csi->dev,
821 				 "Failed to drain DMA. Next frame might be bogus\n");
822 
823 		spin_lock_irqsave(&dma->lock, flags);
824 		ret = ti_csi2rx_start_dma(csi, buf);
825 		if (ret) {
826 			vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
827 			dma->state = TI_CSI2RX_DMA_IDLE;
828 			spin_unlock_irqrestore(&dma->lock, flags);
829 			dev_err(csi->dev, "Failed to start DMA: %d\n", ret);
830 		} else {
831 			list_add_tail(&buf->list, &dma->submitted);
832 			spin_unlock_irqrestore(&dma->lock, flags);
833 		}
834 	}
835 }
836 
837 static int ti_csi2rx_start_streaming(struct vb2_queue *vq, unsigned int count)
838 {
839 	struct ti_csi2rx_dev *csi = vb2_get_drv_priv(vq);
840 	struct ti_csi2rx_dma *dma = &csi->dma;
841 	struct ti_csi2rx_buffer *buf;
842 	unsigned long flags;
843 	int ret = 0;
844 
845 	spin_lock_irqsave(&dma->lock, flags);
846 	if (list_empty(&dma->queue))
847 		ret = -EIO;
848 	spin_unlock_irqrestore(&dma->lock, flags);
849 	if (ret)
850 		return ret;
851 
852 	ret = video_device_pipeline_start(&csi->vdev, &csi->pipe);
853 	if (ret)
854 		goto err;
855 
856 	ti_csi2rx_setup_shim(csi);
857 
858 	csi->sequence = 0;
859 
860 	spin_lock_irqsave(&dma->lock, flags);
861 	buf = list_entry(dma->queue.next, struct ti_csi2rx_buffer, list);
862 
863 	ret = ti_csi2rx_start_dma(csi, buf);
864 	if (ret) {
865 		dev_err(csi->dev, "Failed to start DMA: %d\n", ret);
866 		spin_unlock_irqrestore(&dma->lock, flags);
867 		goto err_pipeline;
868 	}
869 
870 	list_move_tail(&buf->list, &dma->submitted);
871 	dma->state = TI_CSI2RX_DMA_ACTIVE;
872 	spin_unlock_irqrestore(&dma->lock, flags);
873 
874 	ret = v4l2_subdev_call(csi->source, video, s_stream, 1);
875 	if (ret)
876 		goto err_dma;
877 
878 	return 0;
879 
880 err_dma:
881 	ti_csi2rx_stop_dma(csi);
882 err_pipeline:
883 	video_device_pipeline_stop(&csi->vdev);
884 	writel(0, csi->shim + SHIM_CNTL);
885 	writel(0, csi->shim + SHIM_DMACNTX);
886 err:
887 	ti_csi2rx_cleanup_buffers(csi, VB2_BUF_STATE_QUEUED);
888 	return ret;
889 }
890 
891 static void ti_csi2rx_stop_streaming(struct vb2_queue *vq)
892 {
893 	struct ti_csi2rx_dev *csi = vb2_get_drv_priv(vq);
894 	int ret;
895 
896 	video_device_pipeline_stop(&csi->vdev);
897 
898 	writel(0, csi->shim + SHIM_CNTL);
899 	writel(0, csi->shim + SHIM_DMACNTX);
900 
901 	ret = v4l2_subdev_call(csi->source, video, s_stream, 0);
902 	if (ret)
903 		dev_err(csi->dev, "Failed to stop subdev stream\n");
904 
905 	ti_csi2rx_stop_dma(csi);
906 	ti_csi2rx_cleanup_buffers(csi, VB2_BUF_STATE_ERROR);
907 }
908 
909 static const struct vb2_ops csi_vb2_qops = {
910 	.queue_setup = ti_csi2rx_queue_setup,
911 	.buf_prepare = ti_csi2rx_buffer_prepare,
912 	.buf_queue = ti_csi2rx_buffer_queue,
913 	.start_streaming = ti_csi2rx_start_streaming,
914 	.stop_streaming = ti_csi2rx_stop_streaming,
915 };
916 
917 static int ti_csi2rx_init_vb2q(struct ti_csi2rx_dev *csi)
918 {
919 	struct vb2_queue *q = &csi->vidq;
920 	int ret;
921 
922 	q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
923 	q->io_modes = VB2_MMAP | VB2_DMABUF;
924 	q->drv_priv = csi;
925 	q->buf_struct_size = sizeof(struct ti_csi2rx_buffer);
926 	q->ops = &csi_vb2_qops;
927 	q->mem_ops = &vb2_dma_contig_memops;
928 	q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
929 	q->dev = dmaengine_get_dma_device(csi->dma.chan);
930 	q->lock = &csi->mutex;
931 	q->min_queued_buffers = 1;
932 	q->allow_cache_hints = 1;
933 
934 	ret = vb2_queue_init(q);
935 	if (ret)
936 		return ret;
937 
938 	csi->vdev.queue = q;
939 
940 	return 0;
941 }
942 
943 static int ti_csi2rx_link_validate(struct media_link *link)
944 {
945 	struct media_entity *entity = link->sink->entity;
946 	struct video_device *vdev = media_entity_to_video_device(entity);
947 	struct ti_csi2rx_dev *csi = container_of(vdev, struct ti_csi2rx_dev, vdev);
948 	struct v4l2_pix_format *csi_fmt = &csi->v_fmt.fmt.pix;
949 	struct v4l2_subdev_format source_fmt = {
950 		.which	= V4L2_SUBDEV_FORMAT_ACTIVE,
951 		.pad	= link->source->index,
952 	};
953 	const struct ti_csi2rx_fmt *ti_fmt;
954 	int ret;
955 
956 	ret = v4l2_subdev_call_state_active(csi->source, pad,
957 					    get_fmt, &source_fmt);
958 	if (ret)
959 		return ret;
960 
961 	if (source_fmt.format.width != csi_fmt->width) {
962 		dev_dbg(csi->dev, "Width does not match (source %u, sink %u)\n",
963 			source_fmt.format.width, csi_fmt->width);
964 		return -EPIPE;
965 	}
966 
967 	if (source_fmt.format.height != csi_fmt->height) {
968 		dev_dbg(csi->dev, "Height does not match (source %u, sink %u)\n",
969 			source_fmt.format.height, csi_fmt->height);
970 		return -EPIPE;
971 	}
972 
973 	if (source_fmt.format.field != csi_fmt->field &&
974 	    csi_fmt->field != V4L2_FIELD_NONE) {
975 		dev_dbg(csi->dev, "Field does not match (source %u, sink %u)\n",
976 			source_fmt.format.field, csi_fmt->field);
977 		return -EPIPE;
978 	}
979 
980 	ti_fmt = find_format_by_code(source_fmt.format.code);
981 	if (!ti_fmt) {
982 		dev_dbg(csi->dev, "Media bus format 0x%x not supported\n",
983 			source_fmt.format.code);
984 		return -EPIPE;
985 	}
986 
987 	if (ti_fmt->fourcc != csi_fmt->pixelformat) {
988 		dev_dbg(csi->dev,
989 			"Cannot transform source fmt 0x%x to sink fmt 0x%x\n",
990 			ti_fmt->fourcc, csi_fmt->pixelformat);
991 		return -EPIPE;
992 	}
993 
994 	return 0;
995 }
996 
997 static const struct media_entity_operations ti_csi2rx_video_entity_ops = {
998 	.link_validate = ti_csi2rx_link_validate,
999 };
1000 
1001 static int ti_csi2rx_init_dma(struct ti_csi2rx_dev *csi)
1002 {
1003 	struct dma_slave_config cfg = {
1004 		.src_addr_width = DMA_SLAVE_BUSWIDTH_16_BYTES,
1005 	};
1006 	int ret;
1007 
1008 	INIT_LIST_HEAD(&csi->dma.queue);
1009 	INIT_LIST_HEAD(&csi->dma.submitted);
1010 	spin_lock_init(&csi->dma.lock);
1011 
1012 	csi->dma.state = TI_CSI2RX_DMA_STOPPED;
1013 
1014 	csi->dma.chan = dma_request_chan(csi->dev, "rx0");
1015 	if (IS_ERR(csi->dma.chan))
1016 		return PTR_ERR(csi->dma.chan);
1017 
1018 	ret = dmaengine_slave_config(csi->dma.chan, &cfg);
1019 	if (ret) {
1020 		dma_release_channel(csi->dma.chan);
1021 		return ret;
1022 	}
1023 
1024 	csi->dma.drain.len = DRAIN_BUFFER_SIZE;
1025 	csi->dma.drain.vaddr = dma_alloc_coherent(csi->dev, csi->dma.drain.len,
1026 						  &csi->dma.drain.paddr,
1027 						  GFP_KERNEL);
1028 	if (!csi->dma.drain.vaddr)
1029 		return -ENOMEM;
1030 
1031 	return 0;
1032 }
1033 
1034 static int ti_csi2rx_v4l2_init(struct ti_csi2rx_dev *csi)
1035 {
1036 	struct media_device *mdev = &csi->mdev;
1037 	struct video_device *vdev = &csi->vdev;
1038 	const struct ti_csi2rx_fmt *fmt;
1039 	struct v4l2_pix_format *pix_fmt = &csi->v_fmt.fmt.pix;
1040 	int ret;
1041 
1042 	fmt = find_format_by_fourcc(V4L2_PIX_FMT_UYVY);
1043 	if (!fmt)
1044 		return -EINVAL;
1045 
1046 	pix_fmt->width = 640;
1047 	pix_fmt->height = 480;
1048 	pix_fmt->field = V4L2_FIELD_NONE;
1049 	pix_fmt->colorspace = V4L2_COLORSPACE_SRGB;
1050 	pix_fmt->ycbcr_enc = V4L2_YCBCR_ENC_601;
1051 	pix_fmt->quantization = V4L2_QUANTIZATION_LIM_RANGE;
1052 	pix_fmt->xfer_func = V4L2_XFER_FUNC_SRGB;
1053 
1054 	ti_csi2rx_fill_fmt(fmt, &csi->v_fmt);
1055 
1056 	mdev->dev = csi->dev;
1057 	mdev->hw_revision = 1;
1058 	strscpy(mdev->model, "TI-CSI2RX", sizeof(mdev->model));
1059 
1060 	media_device_init(mdev);
1061 
1062 	strscpy(vdev->name, TI_CSI2RX_MODULE_NAME, sizeof(vdev->name));
1063 	vdev->v4l2_dev = &csi->v4l2_dev;
1064 	vdev->vfl_dir = VFL_DIR_RX;
1065 	vdev->fops = &csi_fops;
1066 	vdev->ioctl_ops = &csi_ioctl_ops;
1067 	vdev->release = video_device_release_empty;
1068 	vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING |
1069 			    V4L2_CAP_IO_MC;
1070 	vdev->lock = &csi->mutex;
1071 	video_set_drvdata(vdev, csi);
1072 
1073 	csi->pad.flags = MEDIA_PAD_FL_SINK;
1074 	vdev->entity.ops = &ti_csi2rx_video_entity_ops;
1075 	ret = media_entity_pads_init(&csi->vdev.entity, 1, &csi->pad);
1076 	if (ret)
1077 		return ret;
1078 
1079 	csi->v4l2_dev.mdev = mdev;
1080 
1081 	ret = v4l2_device_register(csi->dev, &csi->v4l2_dev);
1082 	if (ret)
1083 		return ret;
1084 
1085 	ret = media_device_register(mdev);
1086 	if (ret) {
1087 		v4l2_device_unregister(&csi->v4l2_dev);
1088 		media_device_cleanup(mdev);
1089 		return ret;
1090 	}
1091 
1092 	return 0;
1093 }
1094 
1095 static void ti_csi2rx_cleanup_dma(struct ti_csi2rx_dev *csi)
1096 {
1097 	dma_free_coherent(csi->dev, csi->dma.drain.len,
1098 			  csi->dma.drain.vaddr, csi->dma.drain.paddr);
1099 	csi->dma.drain.vaddr = NULL;
1100 	dma_release_channel(csi->dma.chan);
1101 }
1102 
1103 static void ti_csi2rx_cleanup_v4l2(struct ti_csi2rx_dev *csi)
1104 {
1105 	media_device_unregister(&csi->mdev);
1106 	v4l2_device_unregister(&csi->v4l2_dev);
1107 	media_device_cleanup(&csi->mdev);
1108 }
1109 
1110 static void ti_csi2rx_cleanup_subdev(struct ti_csi2rx_dev *csi)
1111 {
1112 	v4l2_async_nf_unregister(&csi->notifier);
1113 	v4l2_async_nf_cleanup(&csi->notifier);
1114 }
1115 
1116 static void ti_csi2rx_cleanup_vb2q(struct ti_csi2rx_dev *csi)
1117 {
1118 	vb2_queue_release(&csi->vidq);
1119 }
1120 
1121 static int ti_csi2rx_probe(struct platform_device *pdev)
1122 {
1123 	struct ti_csi2rx_dev *csi;
1124 	int ret;
1125 
1126 	csi = devm_kzalloc(&pdev->dev, sizeof(*csi), GFP_KERNEL);
1127 	if (!csi)
1128 		return -ENOMEM;
1129 
1130 	csi->dev = &pdev->dev;
1131 	platform_set_drvdata(pdev, csi);
1132 
1133 	mutex_init(&csi->mutex);
1134 	csi->shim = devm_platform_ioremap_resource(pdev, 0);
1135 	if (IS_ERR(csi->shim)) {
1136 		ret = PTR_ERR(csi->shim);
1137 		goto err_mutex;
1138 	}
1139 
1140 	ret = ti_csi2rx_init_dma(csi);
1141 	if (ret)
1142 		goto err_mutex;
1143 
1144 	ret = ti_csi2rx_v4l2_init(csi);
1145 	if (ret)
1146 		goto err_dma;
1147 
1148 	ret = ti_csi2rx_init_vb2q(csi);
1149 	if (ret)
1150 		goto err_v4l2;
1151 
1152 	ret = ti_csi2rx_notifier_register(csi);
1153 	if (ret)
1154 		goto err_vb2q;
1155 
1156 	ret = devm_of_platform_populate(csi->dev);
1157 	if (ret) {
1158 		dev_err(csi->dev, "Failed to create children: %d\n", ret);
1159 		goto err_subdev;
1160 	}
1161 
1162 	return 0;
1163 
1164 err_subdev:
1165 	ti_csi2rx_cleanup_subdev(csi);
1166 err_vb2q:
1167 	ti_csi2rx_cleanup_vb2q(csi);
1168 err_v4l2:
1169 	ti_csi2rx_cleanup_v4l2(csi);
1170 err_dma:
1171 	ti_csi2rx_cleanup_dma(csi);
1172 err_mutex:
1173 	mutex_destroy(&csi->mutex);
1174 	return ret;
1175 }
1176 
1177 static void ti_csi2rx_remove(struct platform_device *pdev)
1178 {
1179 	struct ti_csi2rx_dev *csi = platform_get_drvdata(pdev);
1180 
1181 	video_unregister_device(&csi->vdev);
1182 
1183 	ti_csi2rx_cleanup_vb2q(csi);
1184 	ti_csi2rx_cleanup_subdev(csi);
1185 	ti_csi2rx_cleanup_v4l2(csi);
1186 	ti_csi2rx_cleanup_dma(csi);
1187 
1188 	mutex_destroy(&csi->mutex);
1189 }
1190 
1191 static const struct of_device_id ti_csi2rx_of_match[] = {
1192 	{ .compatible = "ti,j721e-csi2rx-shim", },
1193 	{ },
1194 };
1195 MODULE_DEVICE_TABLE(of, ti_csi2rx_of_match);
1196 
1197 static struct platform_driver ti_csi2rx_pdrv = {
1198 	.probe = ti_csi2rx_probe,
1199 	.remove = ti_csi2rx_remove,
1200 	.driver = {
1201 		.name = TI_CSI2RX_MODULE_NAME,
1202 		.of_match_table = ti_csi2rx_of_match,
1203 	},
1204 };
1205 
1206 module_platform_driver(ti_csi2rx_pdrv);
1207 
1208 MODULE_DESCRIPTION("TI J721E CSI2 RX Driver");
1209 MODULE_AUTHOR("Jai Luthra <j-luthra@ti.com>");
1210 MODULE_LICENSE("GPL");
1211