xref: /linux/drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c (revision 6e9a12f85a7567bb9a41d5230468886bd6a27b20)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * TI CSI2RX Shim Wrapper Driver
4  *
5  * Copyright (C) 2023 Texas Instruments Incorporated - https://www.ti.com/
6  *
7  * Author: Pratyush Yadav <p.yadav@ti.com>
8  * Author: Jai Luthra <j-luthra@ti.com>
9  */
10 
11 #include <linux/bitfield.h>
12 #include <linux/dmaengine.h>
13 #include <linux/module.h>
14 #include <linux/of_platform.h>
15 #include <linux/platform_device.h>
16 
17 #include <media/mipi-csi2.h>
18 #include <media/v4l2-device.h>
19 #include <media/v4l2-ioctl.h>
20 #include <media/v4l2-mc.h>
21 #include <media/videobuf2-dma-contig.h>
22 
23 #define TI_CSI2RX_MODULE_NAME		"j721e-csi2rx"
24 
25 #define SHIM_CNTL			0x10
26 #define SHIM_CNTL_PIX_RST		BIT(0)
27 
28 #define SHIM_DMACNTX			0x20
29 #define SHIM_DMACNTX_EN			BIT(31)
30 #define SHIM_DMACNTX_YUV422		GENMASK(27, 26)
31 #define SHIM_DMACNTX_SIZE		GENMASK(21, 20)
32 #define SHIM_DMACNTX_FMT		GENMASK(5, 0)
33 #define SHIM_DMACNTX_YUV422_MODE_11	3
34 #define SHIM_DMACNTX_SIZE_8		0
35 #define SHIM_DMACNTX_SIZE_16		1
36 #define SHIM_DMACNTX_SIZE_32		2
37 
38 #define SHIM_PSI_CFG0			0x24
39 #define SHIM_PSI_CFG0_SRC_TAG		GENMASK(15, 0)
40 #define SHIM_PSI_CFG0_DST_TAG		GENMASK(31, 16)
41 
42 #define PSIL_WORD_SIZE_BYTES		16
43 /*
44  * There are no hard limits on the width or height. The DMA engine can handle
45  * all sizes. The max width and height are arbitrary numbers for this driver.
46  * Use 16K * 16K as the arbitrary limit. It is large enough that it is unlikely
47  * the limit will be hit in practice.
48  */
49 #define MAX_WIDTH_BYTES			SZ_16K
50 #define MAX_HEIGHT_LINES		SZ_16K
51 
52 #define DRAIN_TIMEOUT_MS		50
53 #define DRAIN_BUFFER_SIZE		SZ_32K
54 
55 struct ti_csi2rx_fmt {
56 	u32				fourcc;	/* Four character code. */
57 	u32				code;	/* Mbus code. */
58 	u32				csi_dt;	/* CSI Data type. */
59 	u8				bpp;	/* Bits per pixel. */
60 	u8				size;	/* Data size shift when unpacking. */
61 };
62 
63 struct ti_csi2rx_buffer {
64 	/* Common v4l2 buffer. Must be first. */
65 	struct vb2_v4l2_buffer		vb;
66 	struct list_head		list;
67 	struct ti_csi2rx_dev		*csi;
68 };
69 
70 enum ti_csi2rx_dma_state {
71 	TI_CSI2RX_DMA_STOPPED,	/* Streaming not started yet. */
72 	TI_CSI2RX_DMA_IDLE,	/* Streaming but no pending DMA operation. */
73 	TI_CSI2RX_DMA_ACTIVE,	/* Streaming and pending DMA operation. */
74 };
75 
76 struct ti_csi2rx_dma {
77 	/* Protects all fields in this struct. */
78 	spinlock_t			lock;
79 	struct dma_chan			*chan;
80 	/* Buffers queued to the driver, waiting to be processed by DMA. */
81 	struct list_head		queue;
82 	enum ti_csi2rx_dma_state	state;
83 	/*
84 	 * Queue of buffers submitted to DMA engine.
85 	 */
86 	struct list_head		submitted;
87 	/* Buffer to drain stale data from PSI-L endpoint */
88 	struct {
89 		void			*vaddr;
90 		dma_addr_t		paddr;
91 		size_t			len;
92 	} drain;
93 };
94 
95 struct ti_csi2rx_dev {
96 	struct device			*dev;
97 	void __iomem			*shim;
98 	struct v4l2_device		v4l2_dev;
99 	struct video_device		vdev;
100 	struct media_device		mdev;
101 	struct media_pipeline		pipe;
102 	struct media_pad		pad;
103 	struct v4l2_async_notifier	notifier;
104 	struct v4l2_subdev		*source;
105 	struct vb2_queue		vidq;
106 	struct mutex			mutex; /* To serialize ioctls. */
107 	struct v4l2_format		v_fmt;
108 	struct ti_csi2rx_dma		dma;
109 	u32				sequence;
110 };
111 
112 static const struct ti_csi2rx_fmt ti_csi2rx_formats[] = {
113 	{
114 		.fourcc			= V4L2_PIX_FMT_YUYV,
115 		.code			= MEDIA_BUS_FMT_YUYV8_1X16,
116 		.csi_dt			= MIPI_CSI2_DT_YUV422_8B,
117 		.bpp			= 16,
118 		.size			= SHIM_DMACNTX_SIZE_8,
119 	}, {
120 		.fourcc			= V4L2_PIX_FMT_UYVY,
121 		.code			= MEDIA_BUS_FMT_UYVY8_1X16,
122 		.csi_dt			= MIPI_CSI2_DT_YUV422_8B,
123 		.bpp			= 16,
124 		.size			= SHIM_DMACNTX_SIZE_8,
125 	}, {
126 		.fourcc			= V4L2_PIX_FMT_YVYU,
127 		.code			= MEDIA_BUS_FMT_YVYU8_1X16,
128 		.csi_dt			= MIPI_CSI2_DT_YUV422_8B,
129 		.bpp			= 16,
130 		.size			= SHIM_DMACNTX_SIZE_8,
131 	}, {
132 		.fourcc			= V4L2_PIX_FMT_VYUY,
133 		.code			= MEDIA_BUS_FMT_VYUY8_1X16,
134 		.csi_dt			= MIPI_CSI2_DT_YUV422_8B,
135 		.bpp			= 16,
136 		.size			= SHIM_DMACNTX_SIZE_8,
137 	}, {
138 		.fourcc			= V4L2_PIX_FMT_SBGGR8,
139 		.code			= MEDIA_BUS_FMT_SBGGR8_1X8,
140 		.csi_dt			= MIPI_CSI2_DT_RAW8,
141 		.bpp			= 8,
142 		.size			= SHIM_DMACNTX_SIZE_8,
143 	}, {
144 		.fourcc			= V4L2_PIX_FMT_SGBRG8,
145 		.code			= MEDIA_BUS_FMT_SGBRG8_1X8,
146 		.csi_dt			= MIPI_CSI2_DT_RAW8,
147 		.bpp			= 8,
148 		.size			= SHIM_DMACNTX_SIZE_8,
149 	}, {
150 		.fourcc			= V4L2_PIX_FMT_SGRBG8,
151 		.code			= MEDIA_BUS_FMT_SGRBG8_1X8,
152 		.csi_dt			= MIPI_CSI2_DT_RAW8,
153 		.bpp			= 8,
154 		.size			= SHIM_DMACNTX_SIZE_8,
155 	}, {
156 		.fourcc			= V4L2_PIX_FMT_SRGGB8,
157 		.code			= MEDIA_BUS_FMT_SRGGB8_1X8,
158 		.csi_dt			= MIPI_CSI2_DT_RAW8,
159 		.bpp			= 8,
160 		.size			= SHIM_DMACNTX_SIZE_8,
161 	}, {
162 		.fourcc			= V4L2_PIX_FMT_GREY,
163 		.code			= MEDIA_BUS_FMT_Y8_1X8,
164 		.csi_dt			= MIPI_CSI2_DT_RAW8,
165 		.bpp			= 8,
166 		.size			= SHIM_DMACNTX_SIZE_8,
167 	}, {
168 		.fourcc			= V4L2_PIX_FMT_SBGGR10,
169 		.code			= MEDIA_BUS_FMT_SBGGR10_1X10,
170 		.csi_dt			= MIPI_CSI2_DT_RAW10,
171 		.bpp			= 16,
172 		.size			= SHIM_DMACNTX_SIZE_16,
173 	}, {
174 		.fourcc			= V4L2_PIX_FMT_SGBRG10,
175 		.code			= MEDIA_BUS_FMT_SGBRG10_1X10,
176 		.csi_dt			= MIPI_CSI2_DT_RAW10,
177 		.bpp			= 16,
178 		.size			= SHIM_DMACNTX_SIZE_16,
179 	}, {
180 		.fourcc			= V4L2_PIX_FMT_SGRBG10,
181 		.code			= MEDIA_BUS_FMT_SGRBG10_1X10,
182 		.csi_dt			= MIPI_CSI2_DT_RAW10,
183 		.bpp			= 16,
184 		.size			= SHIM_DMACNTX_SIZE_16,
185 	}, {
186 		.fourcc			= V4L2_PIX_FMT_SRGGB10,
187 		.code			= MEDIA_BUS_FMT_SRGGB10_1X10,
188 		.csi_dt			= MIPI_CSI2_DT_RAW10,
189 		.bpp			= 16,
190 		.size			= SHIM_DMACNTX_SIZE_16,
191 	}, {
192 		.fourcc			= V4L2_PIX_FMT_RGB565X,
193 		.code			= MEDIA_BUS_FMT_RGB565_1X16,
194 		.csi_dt			= MIPI_CSI2_DT_RGB565,
195 		.bpp			= 16,
196 		.size			= SHIM_DMACNTX_SIZE_16,
197 	}, {
198 		.fourcc			= V4L2_PIX_FMT_XBGR32,
199 		.code			= MEDIA_BUS_FMT_RGB888_1X24,
200 		.csi_dt			= MIPI_CSI2_DT_RGB888,
201 		.bpp			= 32,
202 		.size			= SHIM_DMACNTX_SIZE_32,
203 	}, {
204 		.fourcc			= V4L2_PIX_FMT_RGBX32,
205 		.code			= MEDIA_BUS_FMT_BGR888_1X24,
206 		.csi_dt			= MIPI_CSI2_DT_RGB888,
207 		.bpp			= 32,
208 		.size			= SHIM_DMACNTX_SIZE_32,
209 	},
210 
211 	/* More formats can be supported but they are not listed for now. */
212 };
213 
214 /* Forward declaration needed by ti_csi2rx_dma_callback. */
215 static int ti_csi2rx_start_dma(struct ti_csi2rx_dev *csi,
216 			       struct ti_csi2rx_buffer *buf);
217 
218 static const struct ti_csi2rx_fmt *find_format_by_fourcc(u32 pixelformat)
219 {
220 	unsigned int i;
221 
222 	for (i = 0; i < ARRAY_SIZE(ti_csi2rx_formats); i++) {
223 		if (ti_csi2rx_formats[i].fourcc == pixelformat)
224 			return &ti_csi2rx_formats[i];
225 	}
226 
227 	return NULL;
228 }
229 
230 static const struct ti_csi2rx_fmt *find_format_by_code(u32 code)
231 {
232 	unsigned int i;
233 
234 	for (i = 0; i < ARRAY_SIZE(ti_csi2rx_formats); i++) {
235 		if (ti_csi2rx_formats[i].code == code)
236 			return &ti_csi2rx_formats[i];
237 	}
238 
239 	return NULL;
240 }
241 
242 static void ti_csi2rx_fill_fmt(const struct ti_csi2rx_fmt *csi_fmt,
243 			       struct v4l2_format *v4l2_fmt)
244 {
245 	struct v4l2_pix_format *pix = &v4l2_fmt->fmt.pix;
246 	unsigned int pixels_in_word;
247 
248 	pixels_in_word = PSIL_WORD_SIZE_BYTES * 8 / csi_fmt->bpp;
249 
250 	/* Clamp width and height to sensible maximums (16K x 16K) */
251 	pix->width = clamp_t(unsigned int, pix->width,
252 			     pixels_in_word,
253 			     MAX_WIDTH_BYTES * 8 / csi_fmt->bpp);
254 	pix->height = clamp_t(unsigned int, pix->height, 1, MAX_HEIGHT_LINES);
255 
256 	/* Width should be a multiple of transfer word-size */
257 	pix->width = rounddown(pix->width, pixels_in_word);
258 
259 	v4l2_fmt->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
260 	pix->pixelformat = csi_fmt->fourcc;
261 	pix->bytesperline = pix->width * (csi_fmt->bpp / 8);
262 	pix->sizeimage = pix->bytesperline * pix->height;
263 }
264 
265 static int ti_csi2rx_querycap(struct file *file, void *priv,
266 			      struct v4l2_capability *cap)
267 {
268 	strscpy(cap->driver, TI_CSI2RX_MODULE_NAME, sizeof(cap->driver));
269 	strscpy(cap->card, TI_CSI2RX_MODULE_NAME, sizeof(cap->card));
270 
271 	return 0;
272 }
273 
274 static int ti_csi2rx_enum_fmt_vid_cap(struct file *file, void *priv,
275 				      struct v4l2_fmtdesc *f)
276 {
277 	const struct ti_csi2rx_fmt *fmt = NULL;
278 
279 	if (f->mbus_code) {
280 		/* 1-to-1 mapping between bus formats and pixel formats */
281 		if (f->index > 0)
282 			return -EINVAL;
283 
284 		fmt = find_format_by_code(f->mbus_code);
285 	} else {
286 		if (f->index >= ARRAY_SIZE(ti_csi2rx_formats))
287 			return -EINVAL;
288 
289 		fmt = &ti_csi2rx_formats[f->index];
290 	}
291 
292 	if (!fmt)
293 		return -EINVAL;
294 
295 	f->pixelformat = fmt->fourcc;
296 	memset(f->reserved, 0, sizeof(f->reserved));
297 	f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
298 
299 	return 0;
300 }
301 
302 static int ti_csi2rx_g_fmt_vid_cap(struct file *file, void *prov,
303 				   struct v4l2_format *f)
304 {
305 	struct ti_csi2rx_dev *csi = video_drvdata(file);
306 
307 	*f = csi->v_fmt;
308 
309 	return 0;
310 }
311 
312 static int ti_csi2rx_try_fmt_vid_cap(struct file *file, void *priv,
313 				     struct v4l2_format *f)
314 {
315 	const struct ti_csi2rx_fmt *fmt;
316 
317 	/*
318 	 * Default to the first format if the requested pixel format code isn't
319 	 * supported.
320 	 */
321 	fmt = find_format_by_fourcc(f->fmt.pix.pixelformat);
322 	if (!fmt)
323 		fmt = &ti_csi2rx_formats[0];
324 
325 	/* Interlaced formats are not supported. */
326 	f->fmt.pix.field = V4L2_FIELD_NONE;
327 
328 	ti_csi2rx_fill_fmt(fmt, f);
329 
330 	return 0;
331 }
332 
333 static int ti_csi2rx_s_fmt_vid_cap(struct file *file, void *priv,
334 				   struct v4l2_format *f)
335 {
336 	struct ti_csi2rx_dev *csi = video_drvdata(file);
337 	struct vb2_queue *q = &csi->vidq;
338 	int ret;
339 
340 	if (vb2_is_busy(q))
341 		return -EBUSY;
342 
343 	ret = ti_csi2rx_try_fmt_vid_cap(file, priv, f);
344 	if (ret < 0)
345 		return ret;
346 
347 	csi->v_fmt = *f;
348 
349 	return 0;
350 }
351 
352 static int ti_csi2rx_enum_framesizes(struct file *file, void *fh,
353 				     struct v4l2_frmsizeenum *fsize)
354 {
355 	const struct ti_csi2rx_fmt *fmt;
356 	unsigned int pixels_in_word;
357 
358 	fmt = find_format_by_fourcc(fsize->pixel_format);
359 	if (!fmt || fsize->index != 0)
360 		return -EINVAL;
361 
362 	/*
363 	 * Number of pixels in one PSI-L word. The transfer happens in multiples
364 	 * of PSI-L word sizes.
365 	 */
366 	pixels_in_word = PSIL_WORD_SIZE_BYTES * 8 / fmt->bpp;
367 
368 	fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
369 	fsize->stepwise.min_width = pixels_in_word;
370 	fsize->stepwise.max_width = rounddown(MAX_WIDTH_BYTES * 8 / fmt->bpp,
371 					      pixels_in_word);
372 	fsize->stepwise.step_width = pixels_in_word;
373 	fsize->stepwise.min_height = 1;
374 	fsize->stepwise.max_height = MAX_HEIGHT_LINES;
375 	fsize->stepwise.step_height = 1;
376 
377 	return 0;
378 }
379 
380 static const struct v4l2_ioctl_ops csi_ioctl_ops = {
381 	.vidioc_querycap      = ti_csi2rx_querycap,
382 	.vidioc_enum_fmt_vid_cap = ti_csi2rx_enum_fmt_vid_cap,
383 	.vidioc_try_fmt_vid_cap = ti_csi2rx_try_fmt_vid_cap,
384 	.vidioc_g_fmt_vid_cap = ti_csi2rx_g_fmt_vid_cap,
385 	.vidioc_s_fmt_vid_cap = ti_csi2rx_s_fmt_vid_cap,
386 	.vidioc_enum_framesizes = ti_csi2rx_enum_framesizes,
387 	.vidioc_reqbufs       = vb2_ioctl_reqbufs,
388 	.vidioc_create_bufs   = vb2_ioctl_create_bufs,
389 	.vidioc_prepare_buf   = vb2_ioctl_prepare_buf,
390 	.vidioc_querybuf      = vb2_ioctl_querybuf,
391 	.vidioc_qbuf          = vb2_ioctl_qbuf,
392 	.vidioc_dqbuf         = vb2_ioctl_dqbuf,
393 	.vidioc_expbuf        = vb2_ioctl_expbuf,
394 	.vidioc_streamon      = vb2_ioctl_streamon,
395 	.vidioc_streamoff     = vb2_ioctl_streamoff,
396 };
397 
398 static const struct v4l2_file_operations csi_fops = {
399 	.owner = THIS_MODULE,
400 	.open = v4l2_fh_open,
401 	.release = vb2_fop_release,
402 	.read = vb2_fop_read,
403 	.poll = vb2_fop_poll,
404 	.unlocked_ioctl = video_ioctl2,
405 	.mmap = vb2_fop_mmap,
406 };
407 
408 static int csi_async_notifier_bound(struct v4l2_async_notifier *notifier,
409 				    struct v4l2_subdev *subdev,
410 				    struct v4l2_async_connection *asc)
411 {
412 	struct ti_csi2rx_dev *csi = dev_get_drvdata(notifier->v4l2_dev->dev);
413 
414 	csi->source = subdev;
415 
416 	return 0;
417 }
418 
419 static int csi_async_notifier_complete(struct v4l2_async_notifier *notifier)
420 {
421 	struct ti_csi2rx_dev *csi = dev_get_drvdata(notifier->v4l2_dev->dev);
422 	struct video_device *vdev = &csi->vdev;
423 	int ret;
424 
425 	ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
426 	if (ret)
427 		return ret;
428 
429 	ret = v4l2_create_fwnode_links_to_pad(csi->source, &csi->pad,
430 					      MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
431 
432 	if (ret) {
433 		video_unregister_device(vdev);
434 		return ret;
435 	}
436 
437 	ret = v4l2_device_register_subdev_nodes(&csi->v4l2_dev);
438 	if (ret)
439 		video_unregister_device(vdev);
440 
441 	return ret;
442 }
443 
444 static const struct v4l2_async_notifier_operations csi_async_notifier_ops = {
445 	.bound = csi_async_notifier_bound,
446 	.complete = csi_async_notifier_complete,
447 };
448 
449 static int ti_csi2rx_notifier_register(struct ti_csi2rx_dev *csi)
450 {
451 	struct fwnode_handle *fwnode;
452 	struct v4l2_async_connection *asc;
453 	struct device_node *node;
454 	int ret;
455 
456 	node = of_get_child_by_name(csi->dev->of_node, "csi-bridge");
457 	if (!node)
458 		return -EINVAL;
459 
460 	fwnode = of_fwnode_handle(node);
461 	if (!fwnode) {
462 		of_node_put(node);
463 		return -EINVAL;
464 	}
465 
466 	v4l2_async_nf_init(&csi->notifier, &csi->v4l2_dev);
467 	csi->notifier.ops = &csi_async_notifier_ops;
468 
469 	asc = v4l2_async_nf_add_fwnode(&csi->notifier, fwnode,
470 				       struct v4l2_async_connection);
471 	of_node_put(node);
472 	if (IS_ERR(asc)) {
473 		v4l2_async_nf_cleanup(&csi->notifier);
474 		return PTR_ERR(asc);
475 	}
476 
477 	ret = v4l2_async_nf_register(&csi->notifier);
478 	if (ret) {
479 		v4l2_async_nf_cleanup(&csi->notifier);
480 		return ret;
481 	}
482 
483 	return 0;
484 }
485 
486 static void ti_csi2rx_setup_shim(struct ti_csi2rx_dev *csi)
487 {
488 	const struct ti_csi2rx_fmt *fmt;
489 	unsigned int reg;
490 
491 	fmt = find_format_by_fourcc(csi->v_fmt.fmt.pix.pixelformat);
492 
493 	/* De-assert the pixel interface reset. */
494 	reg = SHIM_CNTL_PIX_RST;
495 	writel(reg, csi->shim + SHIM_CNTL);
496 
497 	reg = SHIM_DMACNTX_EN;
498 	reg |= FIELD_PREP(SHIM_DMACNTX_FMT, fmt->csi_dt);
499 
500 	/*
501 	 * The hardware assumes incoming YUV422 8-bit data on MIPI CSI2 bus
502 	 * follows the spec and is packed in the order U0 -> Y0 -> V0 -> Y1 ->
503 	 * ...
504 	 *
505 	 * There is an option to swap the bytes around before storing in
506 	 * memory, to achieve different pixel formats:
507 	 *
508 	 * Byte3 <----------- Byte0
509 	 * [ Y1 ][ V0 ][ Y0 ][ U0 ]	MODE 11
510 	 * [ Y1 ][ U0 ][ Y0 ][ V0 ]	MODE 10
511 	 * [ V0 ][ Y1 ][ U0 ][ Y0 ]	MODE 01
512 	 * [ U0 ][ Y1 ][ V0 ][ Y0 ]	MODE 00
513 	 *
514 	 * We don't have any requirement to change pixelformat from what is
515 	 * coming from the source, so we keep it in MODE 11, which does not
516 	 * swap any bytes when storing in memory.
517 	 */
518 	switch (fmt->fourcc) {
519 	case V4L2_PIX_FMT_UYVY:
520 	case V4L2_PIX_FMT_VYUY:
521 	case V4L2_PIX_FMT_YUYV:
522 	case V4L2_PIX_FMT_YVYU:
523 		reg |= FIELD_PREP(SHIM_DMACNTX_YUV422,
524 				  SHIM_DMACNTX_YUV422_MODE_11);
525 		break;
526 	default:
527 		/* Ignore if not YUV 4:2:2 */
528 		break;
529 	}
530 
531 	reg |= FIELD_PREP(SHIM_DMACNTX_SIZE, fmt->size);
532 
533 	writel(reg, csi->shim + SHIM_DMACNTX);
534 
535 	reg = FIELD_PREP(SHIM_PSI_CFG0_SRC_TAG, 0) |
536 	      FIELD_PREP(SHIM_PSI_CFG0_DST_TAG, 0);
537 	writel(reg, csi->shim + SHIM_PSI_CFG0);
538 }
539 
540 static void ti_csi2rx_drain_callback(void *param)
541 {
542 	struct completion *drain_complete = param;
543 
544 	complete(drain_complete);
545 }
546 
547 /*
548  * Drain the stale data left at the PSI-L endpoint.
549  *
550  * This might happen if no buffers are queued in time but source is still
551  * streaming. In multi-stream scenarios this can happen when one stream is
552  * stopped but other is still streaming, and thus module-level pixel reset is
553  * not asserted.
554  *
555  * To prevent that stale data corrupting the subsequent transactions, it is
556  * required to issue DMA requests to drain it out.
557  */
558 static int ti_csi2rx_drain_dma(struct ti_csi2rx_dev *csi)
559 {
560 	struct dma_async_tx_descriptor *desc;
561 	struct completion drain_complete;
562 	dma_cookie_t cookie;
563 	int ret;
564 
565 	init_completion(&drain_complete);
566 
567 	desc = dmaengine_prep_slave_single(csi->dma.chan, csi->dma.drain.paddr,
568 					   csi->dma.drain.len, DMA_DEV_TO_MEM,
569 					   DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
570 	if (!desc) {
571 		ret = -EIO;
572 		goto out;
573 	}
574 
575 	desc->callback = ti_csi2rx_drain_callback;
576 	desc->callback_param = &drain_complete;
577 
578 	cookie = dmaengine_submit(desc);
579 	ret = dma_submit_error(cookie);
580 	if (ret)
581 		goto out;
582 
583 	dma_async_issue_pending(csi->dma.chan);
584 
585 	if (!wait_for_completion_timeout(&drain_complete,
586 					 msecs_to_jiffies(DRAIN_TIMEOUT_MS))) {
587 		dmaengine_terminate_sync(csi->dma.chan);
588 		dev_dbg(csi->dev, "DMA transfer timed out for drain buffer\n");
589 		ret = -ETIMEDOUT;
590 		goto out;
591 	}
592 out:
593 	return ret;
594 }
595 
596 static void ti_csi2rx_dma_callback(void *param)
597 {
598 	struct ti_csi2rx_buffer *buf = param;
599 	struct ti_csi2rx_dev *csi = buf->csi;
600 	struct ti_csi2rx_dma *dma = &csi->dma;
601 	unsigned long flags;
602 
603 	/*
604 	 * TODO: Derive the sequence number from the CSI2RX frame number
605 	 * hardware monitor registers.
606 	 */
607 	buf->vb.vb2_buf.timestamp = ktime_get_ns();
608 	buf->vb.sequence = csi->sequence++;
609 
610 	spin_lock_irqsave(&dma->lock, flags);
611 
612 	WARN_ON(!list_is_first(&buf->list, &dma->submitted));
613 	vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
614 	list_del(&buf->list);
615 
616 	/* If there are more buffers to process then start their transfer. */
617 	while (!list_empty(&dma->queue)) {
618 		buf = list_entry(dma->queue.next, struct ti_csi2rx_buffer, list);
619 
620 		if (ti_csi2rx_start_dma(csi, buf)) {
621 			dev_err(csi->dev, "Failed to queue the next buffer for DMA\n");
622 			list_del(&buf->list);
623 			vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
624 		} else {
625 			list_move_tail(&buf->list, &dma->submitted);
626 		}
627 	}
628 
629 	if (list_empty(&dma->submitted))
630 		dma->state = TI_CSI2RX_DMA_IDLE;
631 
632 	spin_unlock_irqrestore(&dma->lock, flags);
633 }
634 
635 static int ti_csi2rx_start_dma(struct ti_csi2rx_dev *csi,
636 			       struct ti_csi2rx_buffer *buf)
637 {
638 	unsigned long addr;
639 	struct dma_async_tx_descriptor *desc;
640 	size_t len = csi->v_fmt.fmt.pix.sizeimage;
641 	dma_cookie_t cookie;
642 	int ret = 0;
643 
644 	addr = vb2_dma_contig_plane_dma_addr(&buf->vb.vb2_buf, 0);
645 	desc = dmaengine_prep_slave_single(csi->dma.chan, addr, len,
646 					   DMA_DEV_TO_MEM,
647 					   DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
648 	if (!desc)
649 		return -EIO;
650 
651 	desc->callback = ti_csi2rx_dma_callback;
652 	desc->callback_param = buf;
653 
654 	cookie = dmaengine_submit(desc);
655 	ret = dma_submit_error(cookie);
656 	if (ret)
657 		return ret;
658 
659 	dma_async_issue_pending(csi->dma.chan);
660 
661 	return 0;
662 }
663 
664 static void ti_csi2rx_stop_dma(struct ti_csi2rx_dev *csi)
665 {
666 	struct ti_csi2rx_dma *dma = &csi->dma;
667 	enum ti_csi2rx_dma_state state;
668 	unsigned long flags;
669 	int ret;
670 
671 	spin_lock_irqsave(&dma->lock, flags);
672 	state = csi->dma.state;
673 	dma->state = TI_CSI2RX_DMA_STOPPED;
674 	spin_unlock_irqrestore(&dma->lock, flags);
675 
676 	if (state != TI_CSI2RX_DMA_STOPPED) {
677 		/*
678 		 * Normal DMA termination does not clean up pending data on
679 		 * the endpoint if multiple streams are running and only one
680 		 * is stopped, as the module-level pixel reset cannot be
681 		 * enforced before terminating DMA.
682 		 */
683 		ret = ti_csi2rx_drain_dma(csi);
684 		if (ret && ret != -ETIMEDOUT)
685 			dev_warn(csi->dev,
686 				 "Failed to drain DMA. Next frame might be bogus\n");
687 	}
688 
689 	ret = dmaengine_terminate_sync(csi->dma.chan);
690 	if (ret)
691 		dev_err(csi->dev, "Failed to stop DMA: %d\n", ret);
692 }
693 
694 static void ti_csi2rx_cleanup_buffers(struct ti_csi2rx_dev *csi,
695 				      enum vb2_buffer_state state)
696 {
697 	struct ti_csi2rx_dma *dma = &csi->dma;
698 	struct ti_csi2rx_buffer *buf, *tmp;
699 	unsigned long flags;
700 
701 	spin_lock_irqsave(&dma->lock, flags);
702 	list_for_each_entry_safe(buf, tmp, &csi->dma.queue, list) {
703 		list_del(&buf->list);
704 		vb2_buffer_done(&buf->vb.vb2_buf, state);
705 	}
706 	list_for_each_entry_safe(buf, tmp, &csi->dma.submitted, list) {
707 		list_del(&buf->list);
708 		vb2_buffer_done(&buf->vb.vb2_buf, state);
709 	}
710 	spin_unlock_irqrestore(&dma->lock, flags);
711 }
712 
713 static int ti_csi2rx_queue_setup(struct vb2_queue *q, unsigned int *nbuffers,
714 				 unsigned int *nplanes, unsigned int sizes[],
715 				 struct device *alloc_devs[])
716 {
717 	struct ti_csi2rx_dev *csi = vb2_get_drv_priv(q);
718 	unsigned int size = csi->v_fmt.fmt.pix.sizeimage;
719 
720 	if (*nplanes) {
721 		if (sizes[0] < size)
722 			return -EINVAL;
723 		size = sizes[0];
724 	}
725 
726 	*nplanes = 1;
727 	sizes[0] = size;
728 
729 	return 0;
730 }
731 
732 static int ti_csi2rx_buffer_prepare(struct vb2_buffer *vb)
733 {
734 	struct ti_csi2rx_dev *csi = vb2_get_drv_priv(vb->vb2_queue);
735 	unsigned long size = csi->v_fmt.fmt.pix.sizeimage;
736 
737 	if (vb2_plane_size(vb, 0) < size) {
738 		dev_err(csi->dev, "Data will not fit into plane\n");
739 		return -EINVAL;
740 	}
741 
742 	vb2_set_plane_payload(vb, 0, size);
743 	return 0;
744 }
745 
746 static void ti_csi2rx_buffer_queue(struct vb2_buffer *vb)
747 {
748 	struct ti_csi2rx_dev *csi = vb2_get_drv_priv(vb->vb2_queue);
749 	struct ti_csi2rx_buffer *buf;
750 	struct ti_csi2rx_dma *dma = &csi->dma;
751 	bool restart_dma = false;
752 	unsigned long flags = 0;
753 	int ret;
754 
755 	buf = container_of(vb, struct ti_csi2rx_buffer, vb.vb2_buf);
756 	buf->csi = csi;
757 
758 	spin_lock_irqsave(&dma->lock, flags);
759 	/*
760 	 * Usually the DMA callback takes care of queueing the pending buffers.
761 	 * But if DMA has stalled due to lack of buffers, restart it now.
762 	 */
763 	if (dma->state == TI_CSI2RX_DMA_IDLE) {
764 		/*
765 		 * Do not restart DMA with the lock held because
766 		 * ti_csi2rx_drain_dma() might block for completion.
767 		 * There won't be a race on queueing DMA anyway since the
768 		 * callback is not being fired.
769 		 */
770 		restart_dma = true;
771 		dma->state = TI_CSI2RX_DMA_ACTIVE;
772 	} else {
773 		list_add_tail(&buf->list, &dma->queue);
774 	}
775 	spin_unlock_irqrestore(&dma->lock, flags);
776 
777 	if (restart_dma) {
778 		/*
779 		 * Once frames start dropping, some data gets stuck in the DMA
780 		 * pipeline somewhere. So the first DMA transfer after frame
781 		 * drops gives a partial frame. This is obviously not useful to
782 		 * the application and will only confuse it. Issue a DMA
783 		 * transaction to drain that up.
784 		 */
785 		ret = ti_csi2rx_drain_dma(csi);
786 		if (ret && ret != -ETIMEDOUT)
787 			dev_warn(csi->dev,
788 				 "Failed to drain DMA. Next frame might be bogus\n");
789 
790 		spin_lock_irqsave(&dma->lock, flags);
791 		ret = ti_csi2rx_start_dma(csi, buf);
792 		if (ret) {
793 			vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
794 			dma->state = TI_CSI2RX_DMA_IDLE;
795 			spin_unlock_irqrestore(&dma->lock, flags);
796 			dev_err(csi->dev, "Failed to start DMA: %d\n", ret);
797 		} else {
798 			list_add_tail(&buf->list, &dma->submitted);
799 			spin_unlock_irqrestore(&dma->lock, flags);
800 		}
801 	}
802 }
803 
804 static int ti_csi2rx_start_streaming(struct vb2_queue *vq, unsigned int count)
805 {
806 	struct ti_csi2rx_dev *csi = vb2_get_drv_priv(vq);
807 	struct ti_csi2rx_dma *dma = &csi->dma;
808 	struct ti_csi2rx_buffer *buf;
809 	unsigned long flags;
810 	int ret = 0;
811 
812 	spin_lock_irqsave(&dma->lock, flags);
813 	if (list_empty(&dma->queue))
814 		ret = -EIO;
815 	spin_unlock_irqrestore(&dma->lock, flags);
816 	if (ret)
817 		return ret;
818 
819 	ret = video_device_pipeline_start(&csi->vdev, &csi->pipe);
820 	if (ret)
821 		goto err;
822 
823 	ti_csi2rx_setup_shim(csi);
824 
825 	csi->sequence = 0;
826 
827 	spin_lock_irqsave(&dma->lock, flags);
828 	buf = list_entry(dma->queue.next, struct ti_csi2rx_buffer, list);
829 
830 	ret = ti_csi2rx_start_dma(csi, buf);
831 	if (ret) {
832 		dev_err(csi->dev, "Failed to start DMA: %d\n", ret);
833 		spin_unlock_irqrestore(&dma->lock, flags);
834 		goto err_pipeline;
835 	}
836 
837 	list_move_tail(&buf->list, &dma->submitted);
838 	dma->state = TI_CSI2RX_DMA_ACTIVE;
839 	spin_unlock_irqrestore(&dma->lock, flags);
840 
841 	ret = v4l2_subdev_call(csi->source, video, s_stream, 1);
842 	if (ret)
843 		goto err_dma;
844 
845 	return 0;
846 
847 err_dma:
848 	ti_csi2rx_stop_dma(csi);
849 err_pipeline:
850 	video_device_pipeline_stop(&csi->vdev);
851 	writel(0, csi->shim + SHIM_CNTL);
852 	writel(0, csi->shim + SHIM_DMACNTX);
853 err:
854 	ti_csi2rx_cleanup_buffers(csi, VB2_BUF_STATE_QUEUED);
855 	return ret;
856 }
857 
858 static void ti_csi2rx_stop_streaming(struct vb2_queue *vq)
859 {
860 	struct ti_csi2rx_dev *csi = vb2_get_drv_priv(vq);
861 	int ret;
862 
863 	video_device_pipeline_stop(&csi->vdev);
864 
865 	writel(0, csi->shim + SHIM_CNTL);
866 	writel(0, csi->shim + SHIM_DMACNTX);
867 
868 	ret = v4l2_subdev_call(csi->source, video, s_stream, 0);
869 	if (ret)
870 		dev_err(csi->dev, "Failed to stop subdev stream\n");
871 
872 	ti_csi2rx_stop_dma(csi);
873 	ti_csi2rx_cleanup_buffers(csi, VB2_BUF_STATE_ERROR);
874 }
875 
876 static const struct vb2_ops csi_vb2_qops = {
877 	.queue_setup = ti_csi2rx_queue_setup,
878 	.buf_prepare = ti_csi2rx_buffer_prepare,
879 	.buf_queue = ti_csi2rx_buffer_queue,
880 	.start_streaming = ti_csi2rx_start_streaming,
881 	.stop_streaming = ti_csi2rx_stop_streaming,
882 };
883 
884 static int ti_csi2rx_init_vb2q(struct ti_csi2rx_dev *csi)
885 {
886 	struct vb2_queue *q = &csi->vidq;
887 	int ret;
888 
889 	q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
890 	q->io_modes = VB2_MMAP | VB2_DMABUF;
891 	q->drv_priv = csi;
892 	q->buf_struct_size = sizeof(struct ti_csi2rx_buffer);
893 	q->ops = &csi_vb2_qops;
894 	q->mem_ops = &vb2_dma_contig_memops;
895 	q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
896 	q->dev = dmaengine_get_dma_device(csi->dma.chan);
897 	q->lock = &csi->mutex;
898 	q->min_queued_buffers = 1;
899 	q->allow_cache_hints = 1;
900 
901 	ret = vb2_queue_init(q);
902 	if (ret)
903 		return ret;
904 
905 	csi->vdev.queue = q;
906 
907 	return 0;
908 }
909 
910 static int ti_csi2rx_link_validate(struct media_link *link)
911 {
912 	struct media_entity *entity = link->sink->entity;
913 	struct video_device *vdev = media_entity_to_video_device(entity);
914 	struct ti_csi2rx_dev *csi = container_of(vdev, struct ti_csi2rx_dev, vdev);
915 	struct v4l2_pix_format *csi_fmt = &csi->v_fmt.fmt.pix;
916 	struct v4l2_subdev_format source_fmt = {
917 		.which	= V4L2_SUBDEV_FORMAT_ACTIVE,
918 		.pad	= link->source->index,
919 	};
920 	const struct ti_csi2rx_fmt *ti_fmt;
921 	int ret;
922 
923 	ret = v4l2_subdev_call_state_active(csi->source, pad,
924 					    get_fmt, &source_fmt);
925 	if (ret)
926 		return ret;
927 
928 	if (source_fmt.format.width != csi_fmt->width) {
929 		dev_dbg(csi->dev, "Width does not match (source %u, sink %u)\n",
930 			source_fmt.format.width, csi_fmt->width);
931 		return -EPIPE;
932 	}
933 
934 	if (source_fmt.format.height != csi_fmt->height) {
935 		dev_dbg(csi->dev, "Height does not match (source %u, sink %u)\n",
936 			source_fmt.format.height, csi_fmt->height);
937 		return -EPIPE;
938 	}
939 
940 	if (source_fmt.format.field != csi_fmt->field &&
941 	    csi_fmt->field != V4L2_FIELD_NONE) {
942 		dev_dbg(csi->dev, "Field does not match (source %u, sink %u)\n",
943 			source_fmt.format.field, csi_fmt->field);
944 		return -EPIPE;
945 	}
946 
947 	ti_fmt = find_format_by_code(source_fmt.format.code);
948 	if (!ti_fmt) {
949 		dev_dbg(csi->dev, "Media bus format 0x%x not supported\n",
950 			source_fmt.format.code);
951 		return -EPIPE;
952 	}
953 
954 	if (ti_fmt->fourcc != csi_fmt->pixelformat) {
955 		dev_dbg(csi->dev,
956 			"Cannot transform source fmt 0x%x to sink fmt 0x%x\n",
957 			ti_fmt->fourcc, csi_fmt->pixelformat);
958 		return -EPIPE;
959 	}
960 
961 	return 0;
962 }
963 
964 static const struct media_entity_operations ti_csi2rx_video_entity_ops = {
965 	.link_validate = ti_csi2rx_link_validate,
966 };
967 
968 static int ti_csi2rx_init_dma(struct ti_csi2rx_dev *csi)
969 {
970 	struct dma_slave_config cfg = {
971 		.src_addr_width = DMA_SLAVE_BUSWIDTH_16_BYTES,
972 	};
973 	int ret;
974 
975 	INIT_LIST_HEAD(&csi->dma.queue);
976 	INIT_LIST_HEAD(&csi->dma.submitted);
977 	spin_lock_init(&csi->dma.lock);
978 
979 	csi->dma.state = TI_CSI2RX_DMA_STOPPED;
980 
981 	csi->dma.chan = dma_request_chan(csi->dev, "rx0");
982 	if (IS_ERR(csi->dma.chan))
983 		return PTR_ERR(csi->dma.chan);
984 
985 	ret = dmaengine_slave_config(csi->dma.chan, &cfg);
986 	if (ret) {
987 		dma_release_channel(csi->dma.chan);
988 		return ret;
989 	}
990 
991 	csi->dma.drain.len = DRAIN_BUFFER_SIZE;
992 	csi->dma.drain.vaddr = dma_alloc_coherent(csi->dev, csi->dma.drain.len,
993 						  &csi->dma.drain.paddr,
994 						  GFP_KERNEL);
995 	if (!csi->dma.drain.vaddr)
996 		return -ENOMEM;
997 
998 	return 0;
999 }
1000 
1001 static int ti_csi2rx_v4l2_init(struct ti_csi2rx_dev *csi)
1002 {
1003 	struct media_device *mdev = &csi->mdev;
1004 	struct video_device *vdev = &csi->vdev;
1005 	const struct ti_csi2rx_fmt *fmt;
1006 	struct v4l2_pix_format *pix_fmt = &csi->v_fmt.fmt.pix;
1007 	int ret;
1008 
1009 	fmt = find_format_by_fourcc(V4L2_PIX_FMT_UYVY);
1010 	if (!fmt)
1011 		return -EINVAL;
1012 
1013 	pix_fmt->width = 640;
1014 	pix_fmt->height = 480;
1015 	pix_fmt->field = V4L2_FIELD_NONE;
1016 	pix_fmt->colorspace = V4L2_COLORSPACE_SRGB;
1017 	pix_fmt->ycbcr_enc = V4L2_YCBCR_ENC_601;
1018 	pix_fmt->quantization = V4L2_QUANTIZATION_LIM_RANGE;
1019 	pix_fmt->xfer_func = V4L2_XFER_FUNC_SRGB;
1020 
1021 	ti_csi2rx_fill_fmt(fmt, &csi->v_fmt);
1022 
1023 	mdev->dev = csi->dev;
1024 	mdev->hw_revision = 1;
1025 	strscpy(mdev->model, "TI-CSI2RX", sizeof(mdev->model));
1026 
1027 	media_device_init(mdev);
1028 
1029 	strscpy(vdev->name, TI_CSI2RX_MODULE_NAME, sizeof(vdev->name));
1030 	vdev->v4l2_dev = &csi->v4l2_dev;
1031 	vdev->vfl_dir = VFL_DIR_RX;
1032 	vdev->fops = &csi_fops;
1033 	vdev->ioctl_ops = &csi_ioctl_ops;
1034 	vdev->release = video_device_release_empty;
1035 	vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING |
1036 			    V4L2_CAP_IO_MC;
1037 	vdev->lock = &csi->mutex;
1038 	video_set_drvdata(vdev, csi);
1039 
1040 	csi->pad.flags = MEDIA_PAD_FL_SINK;
1041 	vdev->entity.ops = &ti_csi2rx_video_entity_ops;
1042 	ret = media_entity_pads_init(&csi->vdev.entity, 1, &csi->pad);
1043 	if (ret)
1044 		return ret;
1045 
1046 	csi->v4l2_dev.mdev = mdev;
1047 
1048 	ret = v4l2_device_register(csi->dev, &csi->v4l2_dev);
1049 	if (ret)
1050 		return ret;
1051 
1052 	ret = media_device_register(mdev);
1053 	if (ret) {
1054 		v4l2_device_unregister(&csi->v4l2_dev);
1055 		media_device_cleanup(mdev);
1056 		return ret;
1057 	}
1058 
1059 	return 0;
1060 }
1061 
1062 static void ti_csi2rx_cleanup_dma(struct ti_csi2rx_dev *csi)
1063 {
1064 	dma_free_coherent(csi->dev, csi->dma.drain.len,
1065 			  csi->dma.drain.vaddr, csi->dma.drain.paddr);
1066 	csi->dma.drain.vaddr = NULL;
1067 	dma_release_channel(csi->dma.chan);
1068 }
1069 
1070 static void ti_csi2rx_cleanup_v4l2(struct ti_csi2rx_dev *csi)
1071 {
1072 	media_device_unregister(&csi->mdev);
1073 	v4l2_device_unregister(&csi->v4l2_dev);
1074 	media_device_cleanup(&csi->mdev);
1075 }
1076 
1077 static void ti_csi2rx_cleanup_subdev(struct ti_csi2rx_dev *csi)
1078 {
1079 	v4l2_async_nf_unregister(&csi->notifier);
1080 	v4l2_async_nf_cleanup(&csi->notifier);
1081 }
1082 
1083 static void ti_csi2rx_cleanup_vb2q(struct ti_csi2rx_dev *csi)
1084 {
1085 	vb2_queue_release(&csi->vidq);
1086 }
1087 
1088 static int ti_csi2rx_probe(struct platform_device *pdev)
1089 {
1090 	struct ti_csi2rx_dev *csi;
1091 	int ret;
1092 
1093 	csi = devm_kzalloc(&pdev->dev, sizeof(*csi), GFP_KERNEL);
1094 	if (!csi)
1095 		return -ENOMEM;
1096 
1097 	csi->dev = &pdev->dev;
1098 	platform_set_drvdata(pdev, csi);
1099 
1100 	mutex_init(&csi->mutex);
1101 	csi->shim = devm_platform_ioremap_resource(pdev, 0);
1102 	if (IS_ERR(csi->shim)) {
1103 		ret = PTR_ERR(csi->shim);
1104 		goto err_mutex;
1105 	}
1106 
1107 	ret = ti_csi2rx_init_dma(csi);
1108 	if (ret)
1109 		goto err_mutex;
1110 
1111 	ret = ti_csi2rx_v4l2_init(csi);
1112 	if (ret)
1113 		goto err_dma;
1114 
1115 	ret = ti_csi2rx_init_vb2q(csi);
1116 	if (ret)
1117 		goto err_v4l2;
1118 
1119 	ret = ti_csi2rx_notifier_register(csi);
1120 	if (ret)
1121 		goto err_vb2q;
1122 
1123 	ret = of_platform_populate(csi->dev->of_node, NULL, NULL, csi->dev);
1124 	if (ret) {
1125 		dev_err(csi->dev, "Failed to create children: %d\n", ret);
1126 		goto err_subdev;
1127 	}
1128 
1129 	return 0;
1130 
1131 err_subdev:
1132 	ti_csi2rx_cleanup_subdev(csi);
1133 err_vb2q:
1134 	ti_csi2rx_cleanup_vb2q(csi);
1135 err_v4l2:
1136 	ti_csi2rx_cleanup_v4l2(csi);
1137 err_dma:
1138 	ti_csi2rx_cleanup_dma(csi);
1139 err_mutex:
1140 	mutex_destroy(&csi->mutex);
1141 	return ret;
1142 }
1143 
1144 static void ti_csi2rx_remove(struct platform_device *pdev)
1145 {
1146 	struct ti_csi2rx_dev *csi = platform_get_drvdata(pdev);
1147 
1148 	video_unregister_device(&csi->vdev);
1149 
1150 	ti_csi2rx_cleanup_vb2q(csi);
1151 	ti_csi2rx_cleanup_subdev(csi);
1152 	ti_csi2rx_cleanup_v4l2(csi);
1153 	ti_csi2rx_cleanup_dma(csi);
1154 
1155 	mutex_destroy(&csi->mutex);
1156 }
1157 
1158 static const struct of_device_id ti_csi2rx_of_match[] = {
1159 	{ .compatible = "ti,j721e-csi2rx-shim", },
1160 	{ },
1161 };
1162 MODULE_DEVICE_TABLE(of, ti_csi2rx_of_match);
1163 
1164 static struct platform_driver ti_csi2rx_pdrv = {
1165 	.probe = ti_csi2rx_probe,
1166 	.remove = ti_csi2rx_remove,
1167 	.driver = {
1168 		.name = TI_CSI2RX_MODULE_NAME,
1169 		.of_match_table = ti_csi2rx_of_match,
1170 	},
1171 };
1172 
1173 module_platform_driver(ti_csi2rx_pdrv);
1174 
1175 MODULE_DESCRIPTION("TI J721E CSI2 RX Driver");
1176 MODULE_AUTHOR("Jai Luthra <j-luthra@ti.com>");
1177 MODULE_LICENSE("GPL");
1178