xref: /linux/drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c (revision 71dfa617ea9f18e4585fe78364217cd32b1fc382)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * TI CSI2RX Shim Wrapper Driver
4  *
5  * Copyright (C) 2023 Texas Instruments Incorporated - https://www.ti.com/
6  *
7  * Author: Pratyush Yadav <p.yadav@ti.com>
8  * Author: Jai Luthra <j-luthra@ti.com>
9  */
10 
11 #include <linux/bitfield.h>
12 #include <linux/dmaengine.h>
13 #include <linux/module.h>
14 #include <linux/of_platform.h>
15 #include <linux/platform_device.h>
16 
17 #include <media/mipi-csi2.h>
18 #include <media/v4l2-device.h>
19 #include <media/v4l2-ioctl.h>
20 #include <media/v4l2-mc.h>
21 #include <media/videobuf2-dma-contig.h>
22 
23 #define TI_CSI2RX_MODULE_NAME		"j721e-csi2rx"
24 
25 #define SHIM_CNTL			0x10
26 #define SHIM_CNTL_PIX_RST		BIT(0)
27 
28 #define SHIM_DMACNTX			0x20
29 #define SHIM_DMACNTX_EN			BIT(31)
30 #define SHIM_DMACNTX_YUV422		GENMASK(27, 26)
31 #define SHIM_DMACNTX_SIZE		GENMASK(21, 20)
32 #define SHIM_DMACNTX_FMT		GENMASK(5, 0)
33 #define SHIM_DMACNTX_YUV422_MODE_11	3
34 #define SHIM_DMACNTX_SIZE_8		0
35 #define SHIM_DMACNTX_SIZE_16		1
36 #define SHIM_DMACNTX_SIZE_32		2
37 
38 #define SHIM_PSI_CFG0			0x24
39 #define SHIM_PSI_CFG0_SRC_TAG		GENMASK(15, 0)
40 #define SHIM_PSI_CFG0_DST_TAG		GENMASK(31, 16)
41 
42 #define PSIL_WORD_SIZE_BYTES		16
43 /*
44  * There are no hard limits on the width or height. The DMA engine can handle
45  * all sizes. The max width and height are arbitrary numbers for this driver.
46  * Use 16K * 16K as the arbitrary limit. It is large enough that it is unlikely
47  * the limit will be hit in practice.
48  */
49 #define MAX_WIDTH_BYTES			SZ_16K
50 #define MAX_HEIGHT_LINES		SZ_16K
51 
52 #define DRAIN_TIMEOUT_MS		50
53 #define DRAIN_BUFFER_SIZE		SZ_32K
54 
55 struct ti_csi2rx_fmt {
56 	u32				fourcc;	/* Four character code. */
57 	u32				code;	/* Mbus code. */
58 	u32				csi_dt;	/* CSI Data type. */
59 	u8				bpp;	/* Bits per pixel. */
60 	u8				size;	/* Data size shift when unpacking. */
61 };
62 
63 struct ti_csi2rx_buffer {
64 	/* Common v4l2 buffer. Must be first. */
65 	struct vb2_v4l2_buffer		vb;
66 	struct list_head		list;
67 	struct ti_csi2rx_dev		*csi;
68 };
69 
70 enum ti_csi2rx_dma_state {
71 	TI_CSI2RX_DMA_STOPPED,	/* Streaming not started yet. */
72 	TI_CSI2RX_DMA_IDLE,	/* Streaming but no pending DMA operation. */
73 	TI_CSI2RX_DMA_ACTIVE,	/* Streaming and pending DMA operation. */
74 };
75 
76 struct ti_csi2rx_dma {
77 	/* Protects all fields in this struct. */
78 	spinlock_t			lock;
79 	struct dma_chan			*chan;
80 	/* Buffers queued to the driver, waiting to be processed by DMA. */
81 	struct list_head		queue;
82 	enum ti_csi2rx_dma_state	state;
83 	/*
84 	 * Queue of buffers submitted to DMA engine.
85 	 */
86 	struct list_head		submitted;
87 	/* Buffer to drain stale data from PSI-L endpoint */
88 	struct {
89 		void			*vaddr;
90 		dma_addr_t		paddr;
91 		size_t			len;
92 	} drain;
93 };
94 
95 struct ti_csi2rx_dev {
96 	struct device			*dev;
97 	void __iomem			*shim;
98 	struct v4l2_device		v4l2_dev;
99 	struct video_device		vdev;
100 	struct media_device		mdev;
101 	struct media_pipeline		pipe;
102 	struct media_pad		pad;
103 	struct v4l2_async_notifier	notifier;
104 	struct v4l2_subdev		*source;
105 	struct vb2_queue		vidq;
106 	struct mutex			mutex; /* To serialize ioctls. */
107 	struct v4l2_format		v_fmt;
108 	struct ti_csi2rx_dma		dma;
109 	u32				sequence;
110 };
111 
112 static const struct ti_csi2rx_fmt ti_csi2rx_formats[] = {
113 	{
114 		.fourcc			= V4L2_PIX_FMT_YUYV,
115 		.code			= MEDIA_BUS_FMT_YUYV8_1X16,
116 		.csi_dt			= MIPI_CSI2_DT_YUV422_8B,
117 		.bpp			= 16,
118 		.size			= SHIM_DMACNTX_SIZE_8,
119 	}, {
120 		.fourcc			= V4L2_PIX_FMT_UYVY,
121 		.code			= MEDIA_BUS_FMT_UYVY8_1X16,
122 		.csi_dt			= MIPI_CSI2_DT_YUV422_8B,
123 		.bpp			= 16,
124 		.size			= SHIM_DMACNTX_SIZE_8,
125 	}, {
126 		.fourcc			= V4L2_PIX_FMT_YVYU,
127 		.code			= MEDIA_BUS_FMT_YVYU8_1X16,
128 		.csi_dt			= MIPI_CSI2_DT_YUV422_8B,
129 		.bpp			= 16,
130 		.size			= SHIM_DMACNTX_SIZE_8,
131 	}, {
132 		.fourcc			= V4L2_PIX_FMT_VYUY,
133 		.code			= MEDIA_BUS_FMT_VYUY8_1X16,
134 		.csi_dt			= MIPI_CSI2_DT_YUV422_8B,
135 		.bpp			= 16,
136 		.size			= SHIM_DMACNTX_SIZE_8,
137 	}, {
138 		.fourcc			= V4L2_PIX_FMT_SBGGR8,
139 		.code			= MEDIA_BUS_FMT_SBGGR8_1X8,
140 		.csi_dt			= MIPI_CSI2_DT_RAW8,
141 		.bpp			= 8,
142 		.size			= SHIM_DMACNTX_SIZE_8,
143 	}, {
144 		.fourcc			= V4L2_PIX_FMT_SGBRG8,
145 		.code			= MEDIA_BUS_FMT_SGBRG8_1X8,
146 		.csi_dt			= MIPI_CSI2_DT_RAW8,
147 		.bpp			= 8,
148 		.size			= SHIM_DMACNTX_SIZE_8,
149 	}, {
150 		.fourcc			= V4L2_PIX_FMT_SGRBG8,
151 		.code			= MEDIA_BUS_FMT_SGRBG8_1X8,
152 		.csi_dt			= MIPI_CSI2_DT_RAW8,
153 		.bpp			= 8,
154 		.size			= SHIM_DMACNTX_SIZE_8,
155 	}, {
156 		.fourcc			= V4L2_PIX_FMT_SRGGB8,
157 		.code			= MEDIA_BUS_FMT_SRGGB8_1X8,
158 		.csi_dt			= MIPI_CSI2_DT_RAW8,
159 		.bpp			= 8,
160 		.size			= SHIM_DMACNTX_SIZE_8,
161 	}, {
162 		.fourcc			= V4L2_PIX_FMT_GREY,
163 		.code			= MEDIA_BUS_FMT_Y8_1X8,
164 		.csi_dt			= MIPI_CSI2_DT_RAW8,
165 		.bpp			= 8,
166 		.size			= SHIM_DMACNTX_SIZE_8,
167 	}, {
168 		.fourcc			= V4L2_PIX_FMT_SBGGR10,
169 		.code			= MEDIA_BUS_FMT_SBGGR10_1X10,
170 		.csi_dt			= MIPI_CSI2_DT_RAW10,
171 		.bpp			= 16,
172 		.size			= SHIM_DMACNTX_SIZE_16,
173 	}, {
174 		.fourcc			= V4L2_PIX_FMT_SGBRG10,
175 		.code			= MEDIA_BUS_FMT_SGBRG10_1X10,
176 		.csi_dt			= MIPI_CSI2_DT_RAW10,
177 		.bpp			= 16,
178 		.size			= SHIM_DMACNTX_SIZE_16,
179 	}, {
180 		.fourcc			= V4L2_PIX_FMT_SGRBG10,
181 		.code			= MEDIA_BUS_FMT_SGRBG10_1X10,
182 		.csi_dt			= MIPI_CSI2_DT_RAW10,
183 		.bpp			= 16,
184 		.size			= SHIM_DMACNTX_SIZE_16,
185 	}, {
186 		.fourcc			= V4L2_PIX_FMT_SRGGB10,
187 		.code			= MEDIA_BUS_FMT_SRGGB10_1X10,
188 		.csi_dt			= MIPI_CSI2_DT_RAW10,
189 		.bpp			= 16,
190 		.size			= SHIM_DMACNTX_SIZE_16,
191 	}, {
192 		.fourcc			= V4L2_PIX_FMT_RGB565X,
193 		.code			= MEDIA_BUS_FMT_RGB565_1X16,
194 		.csi_dt			= MIPI_CSI2_DT_RGB565,
195 		.bpp			= 16,
196 		.size			= SHIM_DMACNTX_SIZE_16,
197 	}, {
198 		.fourcc			= V4L2_PIX_FMT_XBGR32,
199 		.code			= MEDIA_BUS_FMT_RGB888_1X24,
200 		.csi_dt			= MIPI_CSI2_DT_RGB888,
201 		.bpp			= 32,
202 		.size			= SHIM_DMACNTX_SIZE_32,
203 	}, {
204 		.fourcc			= V4L2_PIX_FMT_RGBX32,
205 		.code			= MEDIA_BUS_FMT_BGR888_1X24,
206 		.csi_dt			= MIPI_CSI2_DT_RGB888,
207 		.bpp			= 32,
208 		.size			= SHIM_DMACNTX_SIZE_32,
209 	},
210 
211 	/* More formats can be supported but they are not listed for now. */
212 };
213 
214 /* Forward declaration needed by ti_csi2rx_dma_callback. */
215 static int ti_csi2rx_start_dma(struct ti_csi2rx_dev *csi,
216 			       struct ti_csi2rx_buffer *buf);
217 
218 static const struct ti_csi2rx_fmt *find_format_by_fourcc(u32 pixelformat)
219 {
220 	unsigned int i;
221 
222 	for (i = 0; i < ARRAY_SIZE(ti_csi2rx_formats); i++) {
223 		if (ti_csi2rx_formats[i].fourcc == pixelformat)
224 			return &ti_csi2rx_formats[i];
225 	}
226 
227 	return NULL;
228 }
229 
230 static const struct ti_csi2rx_fmt *find_format_by_code(u32 code)
231 {
232 	unsigned int i;
233 
234 	for (i = 0; i < ARRAY_SIZE(ti_csi2rx_formats); i++) {
235 		if (ti_csi2rx_formats[i].code == code)
236 			return &ti_csi2rx_formats[i];
237 	}
238 
239 	return NULL;
240 }
241 
242 static void ti_csi2rx_fill_fmt(const struct ti_csi2rx_fmt *csi_fmt,
243 			       struct v4l2_format *v4l2_fmt)
244 {
245 	struct v4l2_pix_format *pix = &v4l2_fmt->fmt.pix;
246 	unsigned int pixels_in_word;
247 
248 	pixels_in_word = PSIL_WORD_SIZE_BYTES * 8 / csi_fmt->bpp;
249 
250 	/* Clamp width and height to sensible maximums (16K x 16K) */
251 	pix->width = clamp_t(unsigned int, pix->width,
252 			     pixels_in_word,
253 			     MAX_WIDTH_BYTES * 8 / csi_fmt->bpp);
254 	pix->height = clamp_t(unsigned int, pix->height, 1, MAX_HEIGHT_LINES);
255 
256 	/* Width should be a multiple of transfer word-size */
257 	pix->width = rounddown(pix->width, pixels_in_word);
258 
259 	v4l2_fmt->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
260 	pix->pixelformat = csi_fmt->fourcc;
261 	pix->bytesperline = pix->width * (csi_fmt->bpp / 8);
262 	pix->sizeimage = pix->bytesperline * pix->height;
263 }
264 
265 static int ti_csi2rx_querycap(struct file *file, void *priv,
266 			      struct v4l2_capability *cap)
267 {
268 	strscpy(cap->driver, TI_CSI2RX_MODULE_NAME, sizeof(cap->driver));
269 	strscpy(cap->card, TI_CSI2RX_MODULE_NAME, sizeof(cap->card));
270 
271 	return 0;
272 }
273 
274 static int ti_csi2rx_enum_fmt_vid_cap(struct file *file, void *priv,
275 				      struct v4l2_fmtdesc *f)
276 {
277 	const struct ti_csi2rx_fmt *fmt = NULL;
278 
279 	if (f->mbus_code) {
280 		/* 1-to-1 mapping between bus formats and pixel formats */
281 		if (f->index > 0)
282 			return -EINVAL;
283 
284 		fmt = find_format_by_code(f->mbus_code);
285 	} else {
286 		if (f->index >= ARRAY_SIZE(ti_csi2rx_formats))
287 			return -EINVAL;
288 
289 		fmt = &ti_csi2rx_formats[f->index];
290 	}
291 
292 	if (!fmt)
293 		return -EINVAL;
294 
295 	f->pixelformat = fmt->fourcc;
296 	memset(f->reserved, 0, sizeof(f->reserved));
297 	f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
298 
299 	return 0;
300 }
301 
302 static int ti_csi2rx_g_fmt_vid_cap(struct file *file, void *prov,
303 				   struct v4l2_format *f)
304 {
305 	struct ti_csi2rx_dev *csi = video_drvdata(file);
306 
307 	*f = csi->v_fmt;
308 
309 	return 0;
310 }
311 
312 static int ti_csi2rx_try_fmt_vid_cap(struct file *file, void *priv,
313 				     struct v4l2_format *f)
314 {
315 	const struct ti_csi2rx_fmt *fmt;
316 
317 	/*
318 	 * Default to the first format if the requested pixel format code isn't
319 	 * supported.
320 	 */
321 	fmt = find_format_by_fourcc(f->fmt.pix.pixelformat);
322 	if (!fmt)
323 		fmt = &ti_csi2rx_formats[0];
324 
325 	/* Interlaced formats are not supported. */
326 	f->fmt.pix.field = V4L2_FIELD_NONE;
327 
328 	ti_csi2rx_fill_fmt(fmt, f);
329 
330 	return 0;
331 }
332 
333 static int ti_csi2rx_s_fmt_vid_cap(struct file *file, void *priv,
334 				   struct v4l2_format *f)
335 {
336 	struct ti_csi2rx_dev *csi = video_drvdata(file);
337 	struct vb2_queue *q = &csi->vidq;
338 	int ret;
339 
340 	if (vb2_is_busy(q))
341 		return -EBUSY;
342 
343 	ret = ti_csi2rx_try_fmt_vid_cap(file, priv, f);
344 	if (ret < 0)
345 		return ret;
346 
347 	csi->v_fmt = *f;
348 
349 	return 0;
350 }
351 
352 static int ti_csi2rx_enum_framesizes(struct file *file, void *fh,
353 				     struct v4l2_frmsizeenum *fsize)
354 {
355 	const struct ti_csi2rx_fmt *fmt;
356 	unsigned int pixels_in_word;
357 
358 	fmt = find_format_by_fourcc(fsize->pixel_format);
359 	if (!fmt || fsize->index != 0)
360 		return -EINVAL;
361 
362 	/*
363 	 * Number of pixels in one PSI-L word. The transfer happens in multiples
364 	 * of PSI-L word sizes.
365 	 */
366 	pixels_in_word = PSIL_WORD_SIZE_BYTES * 8 / fmt->bpp;
367 
368 	fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
369 	fsize->stepwise.min_width = pixels_in_word;
370 	fsize->stepwise.max_width = rounddown(MAX_WIDTH_BYTES * 8 / fmt->bpp,
371 					      pixels_in_word);
372 	fsize->stepwise.step_width = pixels_in_word;
373 	fsize->stepwise.min_height = 1;
374 	fsize->stepwise.max_height = MAX_HEIGHT_LINES;
375 	fsize->stepwise.step_height = 1;
376 
377 	return 0;
378 }
379 
380 static const struct v4l2_ioctl_ops csi_ioctl_ops = {
381 	.vidioc_querycap      = ti_csi2rx_querycap,
382 	.vidioc_enum_fmt_vid_cap = ti_csi2rx_enum_fmt_vid_cap,
383 	.vidioc_try_fmt_vid_cap = ti_csi2rx_try_fmt_vid_cap,
384 	.vidioc_g_fmt_vid_cap = ti_csi2rx_g_fmt_vid_cap,
385 	.vidioc_s_fmt_vid_cap = ti_csi2rx_s_fmt_vid_cap,
386 	.vidioc_enum_framesizes = ti_csi2rx_enum_framesizes,
387 	.vidioc_reqbufs       = vb2_ioctl_reqbufs,
388 	.vidioc_create_bufs   = vb2_ioctl_create_bufs,
389 	.vidioc_prepare_buf   = vb2_ioctl_prepare_buf,
390 	.vidioc_querybuf      = vb2_ioctl_querybuf,
391 	.vidioc_qbuf          = vb2_ioctl_qbuf,
392 	.vidioc_dqbuf         = vb2_ioctl_dqbuf,
393 	.vidioc_expbuf        = vb2_ioctl_expbuf,
394 	.vidioc_streamon      = vb2_ioctl_streamon,
395 	.vidioc_streamoff     = vb2_ioctl_streamoff,
396 };
397 
398 static const struct v4l2_file_operations csi_fops = {
399 	.owner = THIS_MODULE,
400 	.open = v4l2_fh_open,
401 	.release = vb2_fop_release,
402 	.read = vb2_fop_read,
403 	.poll = vb2_fop_poll,
404 	.unlocked_ioctl = video_ioctl2,
405 	.mmap = vb2_fop_mmap,
406 };
407 
408 static int csi_async_notifier_bound(struct v4l2_async_notifier *notifier,
409 				    struct v4l2_subdev *subdev,
410 				    struct v4l2_async_connection *asc)
411 {
412 	struct ti_csi2rx_dev *csi = dev_get_drvdata(notifier->v4l2_dev->dev);
413 
414 	csi->source = subdev;
415 
416 	return 0;
417 }
418 
419 static int csi_async_notifier_complete(struct v4l2_async_notifier *notifier)
420 {
421 	struct ti_csi2rx_dev *csi = dev_get_drvdata(notifier->v4l2_dev->dev);
422 	struct video_device *vdev = &csi->vdev;
423 	int ret;
424 
425 	ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
426 	if (ret)
427 		return ret;
428 
429 	ret = v4l2_create_fwnode_links_to_pad(csi->source, &csi->pad,
430 					      MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
431 
432 	if (ret) {
433 		video_unregister_device(vdev);
434 		return ret;
435 	}
436 
437 	ret = v4l2_device_register_subdev_nodes(&csi->v4l2_dev);
438 	if (ret)
439 		video_unregister_device(vdev);
440 
441 	return ret;
442 }
443 
444 static const struct v4l2_async_notifier_operations csi_async_notifier_ops = {
445 	.bound = csi_async_notifier_bound,
446 	.complete = csi_async_notifier_complete,
447 };
448 
449 static int ti_csi2rx_notifier_register(struct ti_csi2rx_dev *csi)
450 {
451 	struct fwnode_handle *fwnode;
452 	struct v4l2_async_connection *asc;
453 	struct device_node *node;
454 	int ret;
455 
456 	node = of_get_child_by_name(csi->dev->of_node, "csi-bridge");
457 	if (!node)
458 		return -EINVAL;
459 
460 	fwnode = of_fwnode_handle(node);
461 	if (!fwnode) {
462 		of_node_put(node);
463 		return -EINVAL;
464 	}
465 
466 	v4l2_async_nf_init(&csi->notifier, &csi->v4l2_dev);
467 	csi->notifier.ops = &csi_async_notifier_ops;
468 
469 	asc = v4l2_async_nf_add_fwnode(&csi->notifier, fwnode,
470 				       struct v4l2_async_connection);
471 	of_node_put(node);
472 	if (IS_ERR(asc)) {
473 		v4l2_async_nf_cleanup(&csi->notifier);
474 		return PTR_ERR(asc);
475 	}
476 
477 	ret = v4l2_async_nf_register(&csi->notifier);
478 	if (ret) {
479 		v4l2_async_nf_cleanup(&csi->notifier);
480 		return ret;
481 	}
482 
483 	return 0;
484 }
485 
486 static void ti_csi2rx_setup_shim(struct ti_csi2rx_dev *csi)
487 {
488 	const struct ti_csi2rx_fmt *fmt;
489 	unsigned int reg;
490 
491 	fmt = find_format_by_fourcc(csi->v_fmt.fmt.pix.pixelformat);
492 
493 	/* De-assert the pixel interface reset. */
494 	reg = SHIM_CNTL_PIX_RST;
495 	writel(reg, csi->shim + SHIM_CNTL);
496 
497 	reg = SHIM_DMACNTX_EN;
498 	reg |= FIELD_PREP(SHIM_DMACNTX_FMT, fmt->csi_dt);
499 
500 	/*
501 	 * The hardware assumes incoming YUV422 8-bit data on MIPI CSI2 bus
502 	 * follows the spec and is packed in the order U0 -> Y0 -> V0 -> Y1 ->
503 	 * ...
504 	 *
505 	 * There is an option to swap the bytes around before storing in
506 	 * memory, to achieve different pixel formats:
507 	 *
508 	 * Byte3 <----------- Byte0
509 	 * [ Y1 ][ V0 ][ Y0 ][ U0 ]	MODE 11
510 	 * [ Y1 ][ U0 ][ Y0 ][ V0 ]	MODE 10
511 	 * [ V0 ][ Y1 ][ U0 ][ Y0 ]	MODE 01
512 	 * [ U0 ][ Y1 ][ V0 ][ Y0 ]	MODE 00
513 	 *
514 	 * We don't have any requirement to change pixelformat from what is
515 	 * coming from the source, so we keep it in MODE 11, which does not
516 	 * swap any bytes when storing in memory.
517 	 */
518 	switch (fmt->fourcc) {
519 	case V4L2_PIX_FMT_UYVY:
520 	case V4L2_PIX_FMT_VYUY:
521 	case V4L2_PIX_FMT_YUYV:
522 	case V4L2_PIX_FMT_YVYU:
523 		reg |= FIELD_PREP(SHIM_DMACNTX_YUV422,
524 				  SHIM_DMACNTX_YUV422_MODE_11);
525 		break;
526 	default:
527 		/* Ignore if not YUV 4:2:2 */
528 		break;
529 	}
530 
531 	reg |= FIELD_PREP(SHIM_DMACNTX_SIZE, fmt->size);
532 
533 	writel(reg, csi->shim + SHIM_DMACNTX);
534 
535 	reg = FIELD_PREP(SHIM_PSI_CFG0_SRC_TAG, 0) |
536 	      FIELD_PREP(SHIM_PSI_CFG0_DST_TAG, 0);
537 	writel(reg, csi->shim + SHIM_PSI_CFG0);
538 }
539 
540 static void ti_csi2rx_drain_callback(void *param)
541 {
542 	struct completion *drain_complete = param;
543 
544 	complete(drain_complete);
545 }
546 
547 /*
548  * Drain the stale data left at the PSI-L endpoint.
549  *
550  * This might happen if no buffers are queued in time but source is still
551  * streaming. In multi-stream scenarios this can happen when one stream is
552  * stopped but other is still streaming, and thus module-level pixel reset is
553  * not asserted.
554  *
555  * To prevent that stale data corrupting the subsequent transactions, it is
556  * required to issue DMA requests to drain it out.
557  */
558 static int ti_csi2rx_drain_dma(struct ti_csi2rx_dev *csi)
559 {
560 	struct dma_async_tx_descriptor *desc;
561 	struct completion drain_complete;
562 	dma_cookie_t cookie;
563 	int ret;
564 
565 	init_completion(&drain_complete);
566 
567 	desc = dmaengine_prep_slave_single(csi->dma.chan, csi->dma.drain.paddr,
568 					   csi->dma.drain.len, DMA_DEV_TO_MEM,
569 					   DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
570 	if (!desc) {
571 		ret = -EIO;
572 		goto out;
573 	}
574 
575 	desc->callback = ti_csi2rx_drain_callback;
576 	desc->callback_param = &drain_complete;
577 
578 	cookie = dmaengine_submit(desc);
579 	ret = dma_submit_error(cookie);
580 	if (ret)
581 		goto out;
582 
583 	dma_async_issue_pending(csi->dma.chan);
584 
585 	if (!wait_for_completion_timeout(&drain_complete,
586 					 msecs_to_jiffies(DRAIN_TIMEOUT_MS))) {
587 		dmaengine_terminate_sync(csi->dma.chan);
588 		dev_dbg(csi->dev, "DMA transfer timed out for drain buffer\n");
589 		ret = -ETIMEDOUT;
590 		goto out;
591 	}
592 out:
593 	return ret;
594 }
595 
596 static void ti_csi2rx_dma_callback(void *param)
597 {
598 	struct ti_csi2rx_buffer *buf = param;
599 	struct ti_csi2rx_dev *csi = buf->csi;
600 	struct ti_csi2rx_dma *dma = &csi->dma;
601 	unsigned long flags;
602 
603 	/*
604 	 * TODO: Derive the sequence number from the CSI2RX frame number
605 	 * hardware monitor registers.
606 	 */
607 	buf->vb.vb2_buf.timestamp = ktime_get_ns();
608 	buf->vb.sequence = csi->sequence++;
609 
610 	spin_lock_irqsave(&dma->lock, flags);
611 
612 	WARN_ON(!list_is_first(&buf->list, &dma->submitted));
613 	vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
614 	list_del(&buf->list);
615 
616 	/* If there are more buffers to process then start their transfer. */
617 	while (!list_empty(&dma->queue)) {
618 		buf = list_entry(dma->queue.next, struct ti_csi2rx_buffer, list);
619 
620 		if (ti_csi2rx_start_dma(csi, buf)) {
621 			dev_err(csi->dev, "Failed to queue the next buffer for DMA\n");
622 			vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
623 		} else {
624 			list_move_tail(&buf->list, &dma->submitted);
625 		}
626 	}
627 
628 	if (list_empty(&dma->submitted))
629 		dma->state = TI_CSI2RX_DMA_IDLE;
630 
631 	spin_unlock_irqrestore(&dma->lock, flags);
632 }
633 
634 static int ti_csi2rx_start_dma(struct ti_csi2rx_dev *csi,
635 			       struct ti_csi2rx_buffer *buf)
636 {
637 	unsigned long addr;
638 	struct dma_async_tx_descriptor *desc;
639 	size_t len = csi->v_fmt.fmt.pix.sizeimage;
640 	dma_cookie_t cookie;
641 	int ret = 0;
642 
643 	addr = vb2_dma_contig_plane_dma_addr(&buf->vb.vb2_buf, 0);
644 	desc = dmaengine_prep_slave_single(csi->dma.chan, addr, len,
645 					   DMA_DEV_TO_MEM,
646 					   DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
647 	if (!desc)
648 		return -EIO;
649 
650 	desc->callback = ti_csi2rx_dma_callback;
651 	desc->callback_param = buf;
652 
653 	cookie = dmaengine_submit(desc);
654 	ret = dma_submit_error(cookie);
655 	if (ret)
656 		return ret;
657 
658 	dma_async_issue_pending(csi->dma.chan);
659 
660 	return 0;
661 }
662 
663 static void ti_csi2rx_stop_dma(struct ti_csi2rx_dev *csi)
664 {
665 	struct ti_csi2rx_dma *dma = &csi->dma;
666 	enum ti_csi2rx_dma_state state;
667 	unsigned long flags;
668 	int ret;
669 
670 	spin_lock_irqsave(&dma->lock, flags);
671 	state = csi->dma.state;
672 	dma->state = TI_CSI2RX_DMA_STOPPED;
673 	spin_unlock_irqrestore(&dma->lock, flags);
674 
675 	if (state != TI_CSI2RX_DMA_STOPPED) {
676 		/*
677 		 * Normal DMA termination does not clean up pending data on
678 		 * the endpoint if multiple streams are running and only one
679 		 * is stopped, as the module-level pixel reset cannot be
680 		 * enforced before terminating DMA.
681 		 */
682 		ret = ti_csi2rx_drain_dma(csi);
683 		if (ret && ret != -ETIMEDOUT)
684 			dev_warn(csi->dev,
685 				 "Failed to drain DMA. Next frame might be bogus\n");
686 	}
687 
688 	ret = dmaengine_terminate_sync(csi->dma.chan);
689 	if (ret)
690 		dev_err(csi->dev, "Failed to stop DMA: %d\n", ret);
691 }
692 
693 static void ti_csi2rx_cleanup_buffers(struct ti_csi2rx_dev *csi,
694 				      enum vb2_buffer_state state)
695 {
696 	struct ti_csi2rx_dma *dma = &csi->dma;
697 	struct ti_csi2rx_buffer *buf, *tmp;
698 	unsigned long flags;
699 
700 	spin_lock_irqsave(&dma->lock, flags);
701 	list_for_each_entry_safe(buf, tmp, &csi->dma.queue, list) {
702 		list_del(&buf->list);
703 		vb2_buffer_done(&buf->vb.vb2_buf, state);
704 	}
705 	list_for_each_entry_safe(buf, tmp, &csi->dma.submitted, list) {
706 		list_del(&buf->list);
707 		vb2_buffer_done(&buf->vb.vb2_buf, state);
708 	}
709 	spin_unlock_irqrestore(&dma->lock, flags);
710 }
711 
712 static int ti_csi2rx_queue_setup(struct vb2_queue *q, unsigned int *nbuffers,
713 				 unsigned int *nplanes, unsigned int sizes[],
714 				 struct device *alloc_devs[])
715 {
716 	struct ti_csi2rx_dev *csi = vb2_get_drv_priv(q);
717 	unsigned int size = csi->v_fmt.fmt.pix.sizeimage;
718 
719 	if (*nplanes) {
720 		if (sizes[0] < size)
721 			return -EINVAL;
722 		size = sizes[0];
723 	}
724 
725 	*nplanes = 1;
726 	sizes[0] = size;
727 
728 	return 0;
729 }
730 
731 static int ti_csi2rx_buffer_prepare(struct vb2_buffer *vb)
732 {
733 	struct ti_csi2rx_dev *csi = vb2_get_drv_priv(vb->vb2_queue);
734 	unsigned long size = csi->v_fmt.fmt.pix.sizeimage;
735 
736 	if (vb2_plane_size(vb, 0) < size) {
737 		dev_err(csi->dev, "Data will not fit into plane\n");
738 		return -EINVAL;
739 	}
740 
741 	vb2_set_plane_payload(vb, 0, size);
742 	return 0;
743 }
744 
745 static void ti_csi2rx_buffer_queue(struct vb2_buffer *vb)
746 {
747 	struct ti_csi2rx_dev *csi = vb2_get_drv_priv(vb->vb2_queue);
748 	struct ti_csi2rx_buffer *buf;
749 	struct ti_csi2rx_dma *dma = &csi->dma;
750 	bool restart_dma = false;
751 	unsigned long flags = 0;
752 	int ret;
753 
754 	buf = container_of(vb, struct ti_csi2rx_buffer, vb.vb2_buf);
755 	buf->csi = csi;
756 
757 	spin_lock_irqsave(&dma->lock, flags);
758 	/*
759 	 * Usually the DMA callback takes care of queueing the pending buffers.
760 	 * But if DMA has stalled due to lack of buffers, restart it now.
761 	 */
762 	if (dma->state == TI_CSI2RX_DMA_IDLE) {
763 		/*
764 		 * Do not restart DMA with the lock held because
765 		 * ti_csi2rx_drain_dma() might block for completion.
766 		 * There won't be a race on queueing DMA anyway since the
767 		 * callback is not being fired.
768 		 */
769 		restart_dma = true;
770 		dma->state = TI_CSI2RX_DMA_ACTIVE;
771 	} else {
772 		list_add_tail(&buf->list, &dma->queue);
773 	}
774 	spin_unlock_irqrestore(&dma->lock, flags);
775 
776 	if (restart_dma) {
777 		/*
778 		 * Once frames start dropping, some data gets stuck in the DMA
779 		 * pipeline somewhere. So the first DMA transfer after frame
780 		 * drops gives a partial frame. This is obviously not useful to
781 		 * the application and will only confuse it. Issue a DMA
782 		 * transaction to drain that up.
783 		 */
784 		ret = ti_csi2rx_drain_dma(csi);
785 		if (ret && ret != -ETIMEDOUT)
786 			dev_warn(csi->dev,
787 				 "Failed to drain DMA. Next frame might be bogus\n");
788 
789 		ret = ti_csi2rx_start_dma(csi, buf);
790 		if (ret) {
791 			dev_err(csi->dev, "Failed to start DMA: %d\n", ret);
792 			spin_lock_irqsave(&dma->lock, flags);
793 			vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
794 			dma->state = TI_CSI2RX_DMA_IDLE;
795 			spin_unlock_irqrestore(&dma->lock, flags);
796 		} else {
797 			spin_lock_irqsave(&dma->lock, flags);
798 			list_add_tail(&buf->list, &dma->submitted);
799 			spin_unlock_irqrestore(&dma->lock, flags);
800 		}
801 	}
802 }
803 
804 static int ti_csi2rx_start_streaming(struct vb2_queue *vq, unsigned int count)
805 {
806 	struct ti_csi2rx_dev *csi = vb2_get_drv_priv(vq);
807 	struct ti_csi2rx_dma *dma = &csi->dma;
808 	struct ti_csi2rx_buffer *buf;
809 	unsigned long flags;
810 	int ret = 0;
811 
812 	spin_lock_irqsave(&dma->lock, flags);
813 	if (list_empty(&dma->queue))
814 		ret = -EIO;
815 	spin_unlock_irqrestore(&dma->lock, flags);
816 	if (ret)
817 		return ret;
818 
819 	ret = video_device_pipeline_start(&csi->vdev, &csi->pipe);
820 	if (ret)
821 		goto err;
822 
823 	ti_csi2rx_setup_shim(csi);
824 
825 	csi->sequence = 0;
826 
827 	spin_lock_irqsave(&dma->lock, flags);
828 	buf = list_entry(dma->queue.next, struct ti_csi2rx_buffer, list);
829 
830 	ret = ti_csi2rx_start_dma(csi, buf);
831 	if (ret) {
832 		dev_err(csi->dev, "Failed to start DMA: %d\n", ret);
833 		spin_unlock_irqrestore(&dma->lock, flags);
834 		goto err_pipeline;
835 	}
836 
837 	list_move_tail(&buf->list, &dma->submitted);
838 	dma->state = TI_CSI2RX_DMA_ACTIVE;
839 	spin_unlock_irqrestore(&dma->lock, flags);
840 
841 	ret = v4l2_subdev_call(csi->source, video, s_stream, 1);
842 	if (ret)
843 		goto err_dma;
844 
845 	return 0;
846 
847 err_dma:
848 	ti_csi2rx_stop_dma(csi);
849 err_pipeline:
850 	video_device_pipeline_stop(&csi->vdev);
851 	writel(0, csi->shim + SHIM_CNTL);
852 	writel(0, csi->shim + SHIM_DMACNTX);
853 err:
854 	ti_csi2rx_cleanup_buffers(csi, VB2_BUF_STATE_QUEUED);
855 	return ret;
856 }
857 
858 static void ti_csi2rx_stop_streaming(struct vb2_queue *vq)
859 {
860 	struct ti_csi2rx_dev *csi = vb2_get_drv_priv(vq);
861 	int ret;
862 
863 	video_device_pipeline_stop(&csi->vdev);
864 
865 	writel(0, csi->shim + SHIM_CNTL);
866 	writel(0, csi->shim + SHIM_DMACNTX);
867 
868 	ret = v4l2_subdev_call(csi->source, video, s_stream, 0);
869 	if (ret)
870 		dev_err(csi->dev, "Failed to stop subdev stream\n");
871 
872 	ti_csi2rx_stop_dma(csi);
873 	ti_csi2rx_cleanup_buffers(csi, VB2_BUF_STATE_ERROR);
874 }
875 
876 static const struct vb2_ops csi_vb2_qops = {
877 	.queue_setup = ti_csi2rx_queue_setup,
878 	.buf_prepare = ti_csi2rx_buffer_prepare,
879 	.buf_queue = ti_csi2rx_buffer_queue,
880 	.start_streaming = ti_csi2rx_start_streaming,
881 	.stop_streaming = ti_csi2rx_stop_streaming,
882 	.wait_prepare = vb2_ops_wait_prepare,
883 	.wait_finish = vb2_ops_wait_finish,
884 };
885 
886 static int ti_csi2rx_init_vb2q(struct ti_csi2rx_dev *csi)
887 {
888 	struct vb2_queue *q = &csi->vidq;
889 	int ret;
890 
891 	q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
892 	q->io_modes = VB2_MMAP | VB2_DMABUF;
893 	q->drv_priv = csi;
894 	q->buf_struct_size = sizeof(struct ti_csi2rx_buffer);
895 	q->ops = &csi_vb2_qops;
896 	q->mem_ops = &vb2_dma_contig_memops;
897 	q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
898 	q->dev = dmaengine_get_dma_device(csi->dma.chan);
899 	q->lock = &csi->mutex;
900 	q->min_queued_buffers = 1;
901 
902 	ret = vb2_queue_init(q);
903 	if (ret)
904 		return ret;
905 
906 	csi->vdev.queue = q;
907 
908 	return 0;
909 }
910 
911 static int ti_csi2rx_link_validate(struct media_link *link)
912 {
913 	struct media_entity *entity = link->sink->entity;
914 	struct video_device *vdev = media_entity_to_video_device(entity);
915 	struct ti_csi2rx_dev *csi = container_of(vdev, struct ti_csi2rx_dev, vdev);
916 	struct v4l2_pix_format *csi_fmt = &csi->v_fmt.fmt.pix;
917 	struct v4l2_subdev_format source_fmt = {
918 		.which	= V4L2_SUBDEV_FORMAT_ACTIVE,
919 		.pad	= link->source->index,
920 	};
921 	const struct ti_csi2rx_fmt *ti_fmt;
922 	int ret;
923 
924 	ret = v4l2_subdev_call_state_active(csi->source, pad,
925 					    get_fmt, &source_fmt);
926 	if (ret)
927 		return ret;
928 
929 	if (source_fmt.format.width != csi_fmt->width) {
930 		dev_dbg(csi->dev, "Width does not match (source %u, sink %u)\n",
931 			source_fmt.format.width, csi_fmt->width);
932 		return -EPIPE;
933 	}
934 
935 	if (source_fmt.format.height != csi_fmt->height) {
936 		dev_dbg(csi->dev, "Height does not match (source %u, sink %u)\n",
937 			source_fmt.format.height, csi_fmt->height);
938 		return -EPIPE;
939 	}
940 
941 	if (source_fmt.format.field != csi_fmt->field &&
942 	    csi_fmt->field != V4L2_FIELD_NONE) {
943 		dev_dbg(csi->dev, "Field does not match (source %u, sink %u)\n",
944 			source_fmt.format.field, csi_fmt->field);
945 		return -EPIPE;
946 	}
947 
948 	ti_fmt = find_format_by_code(source_fmt.format.code);
949 	if (!ti_fmt) {
950 		dev_dbg(csi->dev, "Media bus format 0x%x not supported\n",
951 			source_fmt.format.code);
952 		return -EPIPE;
953 	}
954 
955 	if (ti_fmt->fourcc != csi_fmt->pixelformat) {
956 		dev_dbg(csi->dev,
957 			"Cannot transform source fmt 0x%x to sink fmt 0x%x\n",
958 			ti_fmt->fourcc, csi_fmt->pixelformat);
959 		return -EPIPE;
960 	}
961 
962 	return 0;
963 }
964 
965 static const struct media_entity_operations ti_csi2rx_video_entity_ops = {
966 	.link_validate = ti_csi2rx_link_validate,
967 };
968 
969 static int ti_csi2rx_init_dma(struct ti_csi2rx_dev *csi)
970 {
971 	struct dma_slave_config cfg = {
972 		.src_addr_width = DMA_SLAVE_BUSWIDTH_16_BYTES,
973 	};
974 	int ret;
975 
976 	INIT_LIST_HEAD(&csi->dma.queue);
977 	INIT_LIST_HEAD(&csi->dma.submitted);
978 	spin_lock_init(&csi->dma.lock);
979 
980 	csi->dma.state = TI_CSI2RX_DMA_STOPPED;
981 
982 	csi->dma.chan = dma_request_chan(csi->dev, "rx0");
983 	if (IS_ERR(csi->dma.chan))
984 		return PTR_ERR(csi->dma.chan);
985 
986 	ret = dmaengine_slave_config(csi->dma.chan, &cfg);
987 	if (ret) {
988 		dma_release_channel(csi->dma.chan);
989 		return ret;
990 	}
991 
992 	csi->dma.drain.len = DRAIN_BUFFER_SIZE;
993 	csi->dma.drain.vaddr = dma_alloc_coherent(csi->dev, csi->dma.drain.len,
994 						  &csi->dma.drain.paddr,
995 						  GFP_KERNEL);
996 	if (!csi->dma.drain.vaddr)
997 		return -ENOMEM;
998 
999 	return 0;
1000 }
1001 
1002 static int ti_csi2rx_v4l2_init(struct ti_csi2rx_dev *csi)
1003 {
1004 	struct media_device *mdev = &csi->mdev;
1005 	struct video_device *vdev = &csi->vdev;
1006 	const struct ti_csi2rx_fmt *fmt;
1007 	struct v4l2_pix_format *pix_fmt = &csi->v_fmt.fmt.pix;
1008 	int ret;
1009 
1010 	fmt = find_format_by_fourcc(V4L2_PIX_FMT_UYVY);
1011 	if (!fmt)
1012 		return -EINVAL;
1013 
1014 	pix_fmt->width = 640;
1015 	pix_fmt->height = 480;
1016 	pix_fmt->field = V4L2_FIELD_NONE;
1017 	pix_fmt->colorspace = V4L2_COLORSPACE_SRGB;
1018 	pix_fmt->ycbcr_enc = V4L2_YCBCR_ENC_601,
1019 	pix_fmt->quantization = V4L2_QUANTIZATION_LIM_RANGE,
1020 	pix_fmt->xfer_func = V4L2_XFER_FUNC_SRGB,
1021 
1022 	ti_csi2rx_fill_fmt(fmt, &csi->v_fmt);
1023 
1024 	mdev->dev = csi->dev;
1025 	mdev->hw_revision = 1;
1026 	strscpy(mdev->model, "TI-CSI2RX", sizeof(mdev->model));
1027 
1028 	media_device_init(mdev);
1029 
1030 	strscpy(vdev->name, TI_CSI2RX_MODULE_NAME, sizeof(vdev->name));
1031 	vdev->v4l2_dev = &csi->v4l2_dev;
1032 	vdev->vfl_dir = VFL_DIR_RX;
1033 	vdev->fops = &csi_fops;
1034 	vdev->ioctl_ops = &csi_ioctl_ops;
1035 	vdev->release = video_device_release_empty;
1036 	vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING |
1037 			    V4L2_CAP_IO_MC;
1038 	vdev->lock = &csi->mutex;
1039 	video_set_drvdata(vdev, csi);
1040 
1041 	csi->pad.flags = MEDIA_PAD_FL_SINK;
1042 	vdev->entity.ops = &ti_csi2rx_video_entity_ops;
1043 	ret = media_entity_pads_init(&csi->vdev.entity, 1, &csi->pad);
1044 	if (ret)
1045 		return ret;
1046 
1047 	csi->v4l2_dev.mdev = mdev;
1048 
1049 	ret = v4l2_device_register(csi->dev, &csi->v4l2_dev);
1050 	if (ret)
1051 		return ret;
1052 
1053 	ret = media_device_register(mdev);
1054 	if (ret) {
1055 		v4l2_device_unregister(&csi->v4l2_dev);
1056 		media_device_cleanup(mdev);
1057 		return ret;
1058 	}
1059 
1060 	return 0;
1061 }
1062 
1063 static void ti_csi2rx_cleanup_dma(struct ti_csi2rx_dev *csi)
1064 {
1065 	dma_free_coherent(csi->dev, csi->dma.drain.len,
1066 			  csi->dma.drain.vaddr, csi->dma.drain.paddr);
1067 	csi->dma.drain.vaddr = NULL;
1068 	dma_release_channel(csi->dma.chan);
1069 }
1070 
1071 static void ti_csi2rx_cleanup_v4l2(struct ti_csi2rx_dev *csi)
1072 {
1073 	media_device_unregister(&csi->mdev);
1074 	v4l2_device_unregister(&csi->v4l2_dev);
1075 	media_device_cleanup(&csi->mdev);
1076 }
1077 
1078 static void ti_csi2rx_cleanup_subdev(struct ti_csi2rx_dev *csi)
1079 {
1080 	v4l2_async_nf_unregister(&csi->notifier);
1081 	v4l2_async_nf_cleanup(&csi->notifier);
1082 }
1083 
1084 static void ti_csi2rx_cleanup_vb2q(struct ti_csi2rx_dev *csi)
1085 {
1086 	vb2_queue_release(&csi->vidq);
1087 }
1088 
1089 static int ti_csi2rx_probe(struct platform_device *pdev)
1090 {
1091 	struct ti_csi2rx_dev *csi;
1092 	int ret;
1093 
1094 	csi = devm_kzalloc(&pdev->dev, sizeof(*csi), GFP_KERNEL);
1095 	if (!csi)
1096 		return -ENOMEM;
1097 
1098 	csi->dev = &pdev->dev;
1099 	platform_set_drvdata(pdev, csi);
1100 
1101 	mutex_init(&csi->mutex);
1102 	csi->shim = devm_platform_ioremap_resource(pdev, 0);
1103 	if (IS_ERR(csi->shim)) {
1104 		ret = PTR_ERR(csi->shim);
1105 		goto err_mutex;
1106 	}
1107 
1108 	ret = ti_csi2rx_init_dma(csi);
1109 	if (ret)
1110 		goto err_mutex;
1111 
1112 	ret = ti_csi2rx_v4l2_init(csi);
1113 	if (ret)
1114 		goto err_dma;
1115 
1116 	ret = ti_csi2rx_init_vb2q(csi);
1117 	if (ret)
1118 		goto err_v4l2;
1119 
1120 	ret = ti_csi2rx_notifier_register(csi);
1121 	if (ret)
1122 		goto err_vb2q;
1123 
1124 	ret = of_platform_populate(csi->dev->of_node, NULL, NULL, csi->dev);
1125 	if (ret) {
1126 		dev_err(csi->dev, "Failed to create children: %d\n", ret);
1127 		goto err_subdev;
1128 	}
1129 
1130 	return 0;
1131 
1132 err_subdev:
1133 	ti_csi2rx_cleanup_subdev(csi);
1134 err_vb2q:
1135 	ti_csi2rx_cleanup_vb2q(csi);
1136 err_v4l2:
1137 	ti_csi2rx_cleanup_v4l2(csi);
1138 err_dma:
1139 	ti_csi2rx_cleanup_dma(csi);
1140 err_mutex:
1141 	mutex_destroy(&csi->mutex);
1142 	return ret;
1143 }
1144 
1145 static void ti_csi2rx_remove(struct platform_device *pdev)
1146 {
1147 	struct ti_csi2rx_dev *csi = platform_get_drvdata(pdev);
1148 
1149 	video_unregister_device(&csi->vdev);
1150 
1151 	ti_csi2rx_cleanup_vb2q(csi);
1152 	ti_csi2rx_cleanup_subdev(csi);
1153 	ti_csi2rx_cleanup_v4l2(csi);
1154 	ti_csi2rx_cleanup_dma(csi);
1155 
1156 	mutex_destroy(&csi->mutex);
1157 }
1158 
1159 static const struct of_device_id ti_csi2rx_of_match[] = {
1160 	{ .compatible = "ti,j721e-csi2rx-shim", },
1161 	{ },
1162 };
1163 MODULE_DEVICE_TABLE(of, ti_csi2rx_of_match);
1164 
1165 static struct platform_driver ti_csi2rx_pdrv = {
1166 	.probe = ti_csi2rx_probe,
1167 	.remove_new = ti_csi2rx_remove,
1168 	.driver = {
1169 		.name = TI_CSI2RX_MODULE_NAME,
1170 		.of_match_table = ti_csi2rx_of_match,
1171 	},
1172 };
1173 
1174 module_platform_driver(ti_csi2rx_pdrv);
1175 
1176 MODULE_DESCRIPTION("TI J721E CSI2 RX Driver");
1177 MODULE_AUTHOR("Jai Luthra <j-luthra@ti.com>");
1178 MODULE_LICENSE("GPL");
1179