xref: /linux/drivers/media/platform/raspberrypi/rp1-cfe/cfe.c (revision 876f5ebd58a9ac42f48a7ead3d5b274a314e0ace)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * RP1 Camera Front End Driver
4  *
5  * Copyright (c) 2021-2024 Raspberry Pi Ltd.
6  * Copyright (c) 2023-2024 Ideas on Board Oy
7  */
8 
9 #include <linux/clk.h>
10 #include <linux/debugfs.h>
11 #include <linux/delay.h>
12 #include <linux/device.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/err.h>
15 #include <linux/init.h>
16 #include <linux/interrupt.h>
17 #include <linux/io.h>
18 #include <linux/lcm.h>
19 #include <linux/math.h>
20 #include <linux/module.h>
21 #include <linux/platform_device.h>
22 #include <linux/pm_runtime.h>
23 #include <linux/property.h>
24 #include <linux/seq_file.h>
25 #include <linux/slab.h>
26 #include <linux/uaccess.h>
27 #include <linux/videodev2.h>
28 
29 #include <media/v4l2-async.h>
30 #include <media/v4l2-common.h>
31 #include <media/v4l2-ctrls.h>
32 #include <media/v4l2-dev.h>
33 #include <media/v4l2-device.h>
34 #include <media/v4l2-event.h>
35 #include <media/v4l2-fwnode.h>
36 #include <media/v4l2-ioctl.h>
37 #include <media/v4l2-mc.h>
38 #include <media/videobuf2-dma-contig.h>
39 
40 #include <linux/media/raspberrypi/pisp_fe_config.h>
41 #include <linux/media/raspberrypi/pisp_fe_statistics.h>
42 
43 #include "cfe-fmts.h"
44 #include "cfe.h"
45 #include "csi2.h"
46 #include "pisp-fe.h"
47 
48 #define CREATE_TRACE_POINTS
49 #include "cfe-trace.h"
50 
51 #define CFE_MODULE_NAME	"rp1-cfe"
52 #define CFE_VERSION	"1.0"
53 
54 #define cfe_dbg(cfe, fmt, arg...) dev_dbg(&(cfe)->pdev->dev, fmt, ##arg)
55 #define cfe_info(cfe, fmt, arg...) dev_info(&(cfe)->pdev->dev, fmt, ##arg)
56 #define cfe_err(cfe, fmt, arg...) dev_err(&(cfe)->pdev->dev, fmt, ##arg)
57 
58 /* MIPICFG registers */
59 #define MIPICFG_CFG		0x004
60 #define MIPICFG_INTR		0x028
61 #define MIPICFG_INTE		0x02c
62 #define MIPICFG_INTF		0x030
63 #define MIPICFG_INTS		0x034
64 
65 #define MIPICFG_CFG_SEL_CSI	BIT(0)
66 
67 #define MIPICFG_INT_CSI_DMA	BIT(0)
68 #define MIPICFG_INT_CSI_HOST	BIT(2)
69 #define MIPICFG_INT_PISP_FE	BIT(4)
70 
71 #define BPL_ALIGNMENT 16
72 #define MAX_BYTESPERLINE 0xffffff00
73 #define MAX_BUFFER_SIZE  0xffffff00
74 /*
75  * Max width is therefore determined by the max stride divided by the number of
76  * bits per pixel.
77  *
78  * However, to avoid overflow issues let's use a 16k maximum. This lets us
79  * calculate 16k * 16k * 4 with 32bits. If we need higher maximums, a careful
80  * review and adjustment of the code is needed so that it will deal with
81  * overflows correctly.
82  */
83 #define MAX_WIDTH 16384
84 #define MAX_HEIGHT MAX_WIDTH
85 /* Define a nominal minimum image size */
86 #define MIN_WIDTH 16
87 #define MIN_HEIGHT 16
88 
89 #define MIN_META_WIDTH 4
90 #define MIN_META_HEIGHT 1
91 
92 const struct v4l2_mbus_framefmt cfe_default_format = {
93 	.width = 640,
94 	.height = 480,
95 	.code = MEDIA_BUS_FMT_SRGGB10_1X10,
96 	.field = V4L2_FIELD_NONE,
97 	.colorspace = V4L2_COLORSPACE_RAW,
98 	.ycbcr_enc = V4L2_YCBCR_ENC_601,
99 	.quantization = V4L2_QUANTIZATION_FULL_RANGE,
100 	.xfer_func = V4L2_XFER_FUNC_NONE,
101 };
102 
103 enum node_ids {
104 	/* CSI2 HW output nodes first. */
105 	CSI2_CH0,
106 	CSI2_CH1,
107 	CSI2_CH2,
108 	CSI2_CH3,
109 	/* FE only nodes from here on. */
110 	FE_OUT0,
111 	FE_OUT1,
112 	FE_STATS,
113 	FE_CONFIG,
114 	NUM_NODES
115 };
116 
117 struct node_description {
118 	enum node_ids id;
119 	const char *name;
120 	unsigned int caps;
121 	unsigned int pad_flags;
122 	unsigned int link_pad;
123 };
124 
125 /* Must match the ordering of enum ids */
126 static const struct node_description node_desc[NUM_NODES] = {
127 	[CSI2_CH0] = {
128 		.name = "csi2-ch0",
129 		.caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_META_CAPTURE,
130 		.pad_flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT,
131 		.link_pad = CSI2_PAD_FIRST_SOURCE + 0
132 	},
133 	/*
134 	 * At the moment the main userspace component (libcamera) doesn't
135 	 * support metadata with video nodes that support both video and
136 	 * metadata. So for the time being this node is set to only support
137 	 * V4L2_CAP_META_CAPTURE.
138 	 */
139 	[CSI2_CH1] = {
140 		.name = "csi2-ch1",
141 		.caps = V4L2_CAP_META_CAPTURE,
142 		.pad_flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT,
143 		.link_pad = CSI2_PAD_FIRST_SOURCE + 1
144 	},
145 	[CSI2_CH2] = {
146 		.name = "csi2-ch2",
147 		.caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_META_CAPTURE,
148 		.pad_flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT,
149 		.link_pad = CSI2_PAD_FIRST_SOURCE + 2
150 	},
151 	[CSI2_CH3] = {
152 		.name = "csi2-ch3",
153 		.caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_META_CAPTURE,
154 		.pad_flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT,
155 		.link_pad = CSI2_PAD_FIRST_SOURCE + 3
156 	},
157 	[FE_OUT0] = {
158 		.name = "fe-image0",
159 		.caps = V4L2_CAP_VIDEO_CAPTURE,
160 		.pad_flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT,
161 		.link_pad = FE_OUTPUT0_PAD
162 	},
163 	[FE_OUT1] = {
164 		.name = "fe-image1",
165 		.caps = V4L2_CAP_VIDEO_CAPTURE,
166 		.pad_flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT,
167 		.link_pad = FE_OUTPUT1_PAD
168 	},
169 	[FE_STATS] = {
170 		.name = "fe-stats",
171 		.caps = V4L2_CAP_META_CAPTURE,
172 		.pad_flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT,
173 		.link_pad = FE_STATS_PAD
174 	},
175 	[FE_CONFIG] = {
176 		.name = "fe-config",
177 		.caps = V4L2_CAP_META_OUTPUT,
178 		.pad_flags = MEDIA_PAD_FL_SOURCE | MEDIA_PAD_FL_MUST_CONNECT,
179 		.link_pad = FE_CONFIG_PAD
180 	},
181 };
182 
183 #define is_fe_node(node) (((node)->id) >= FE_OUT0)
184 #define is_csi2_node(node) (!is_fe_node(node))
185 
186 #define node_supports_image_output(node) \
187 	(node_desc[(node)->id].caps & V4L2_CAP_VIDEO_CAPTURE)
188 #define node_supports_meta_output(node) \
189 	(node_desc[(node)->id].caps & V4L2_CAP_META_CAPTURE)
190 #define node_supports_image_input(node) \
191 	(node_desc[(node)->id].caps & V4L2_CAP_VIDEO_OUTPUT)
192 #define node_supports_meta_input(node) \
193 	(node_desc[(node)->id].caps & V4L2_CAP_META_OUTPUT)
194 #define node_supports_image(node) \
195 	(node_supports_image_output(node) || node_supports_image_input(node))
196 #define node_supports_meta(node) \
197 	(node_supports_meta_output(node) || node_supports_meta_input(node))
198 
199 #define is_image_output_node(node) \
200 	((node)->buffer_queue.type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
201 #define is_image_input_node(node) \
202 	((node)->buffer_queue.type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
203 #define is_image_node(node) \
204 	(is_image_output_node(node) || is_image_input_node(node))
205 #define is_meta_output_node(node) \
206 	((node)->buffer_queue.type == V4L2_BUF_TYPE_META_CAPTURE)
207 #define is_meta_input_node(node) \
208 	((node)->buffer_queue.type == V4L2_BUF_TYPE_META_OUTPUT)
209 #define is_meta_node(node) \
210 	(is_meta_output_node(node) || is_meta_input_node(node))
211 
212 /* To track state across all nodes. */
213 #define NODE_REGISTERED		BIT(0)
214 #define NODE_ENABLED		BIT(1)
215 #define NODE_STREAMING		BIT(2)
216 #define FS_INT			BIT(3)
217 #define FE_INT			BIT(4)
218 #define NUM_STATES		5
219 
220 struct cfe_buffer {
221 	struct vb2_v4l2_buffer vb;
222 	struct list_head list;
223 };
224 
225 struct cfe_config_buffer {
226 	struct cfe_buffer buf;
227 	struct pisp_fe_config config;
228 };
229 
230 static inline struct cfe_buffer *to_cfe_buffer(struct vb2_buffer *vb)
231 {
232 	return container_of(vb, struct cfe_buffer, vb.vb2_buf);
233 }
234 
235 static inline
236 struct cfe_config_buffer *to_cfe_config_buffer(struct cfe_buffer *buf)
237 {
238 	return container_of(buf, struct cfe_config_buffer, buf);
239 }
240 
241 struct cfe_node {
242 	/* Node id */
243 	enum node_ids id;
244 	/* Pointer pointing to current v4l2_buffer */
245 	struct cfe_buffer *cur_frm;
246 	/* Pointer pointing to next v4l2_buffer */
247 	struct cfe_buffer *next_frm;
248 	/* Used to store current pixel format */
249 	struct v4l2_format vid_fmt;
250 	/* Used to store current meta format */
251 	struct v4l2_format meta_fmt;
252 	/* Buffer queue used in video-buf */
253 	struct vb2_queue buffer_queue;
254 	/* Queue of filled frames */
255 	struct list_head dma_queue;
256 	/* lock used to access this structure */
257 	struct mutex lock;
258 	/* Identifies video device for this channel */
259 	struct video_device video_dev;
260 	/* Pointer to the parent handle */
261 	struct cfe_device *cfe;
262 	/* Media pad for this node */
263 	struct media_pad pad;
264 	/* Frame-start counter */
265 	unsigned int fs_count;
266 	/* Timestamp of the current buffer */
267 	u64 ts;
268 };
269 
270 struct cfe_device {
271 	struct dentry *debugfs;
272 	struct kref kref;
273 
274 	/* peripheral base address */
275 	void __iomem *mipi_cfg_base;
276 
277 	struct clk *clk;
278 
279 	/* V4l2 device */
280 	struct v4l2_device v4l2_dev;
281 	struct media_device mdev;
282 	struct media_pipeline pipe;
283 
284 	/* IRQ lock for node state and DMA queues */
285 	spinlock_t state_lock;
286 	bool job_ready;
287 	bool job_queued;
288 
289 	/* parent device */
290 	struct platform_device *pdev;
291 	/* subdevice async Notifier */
292 	struct v4l2_async_notifier notifier;
293 
294 	/* Source sub device */
295 	struct v4l2_subdev *source_sd;
296 	/* Source subdev's pad */
297 	u32 source_pad;
298 
299 	struct cfe_node node[NUM_NODES];
300 	DECLARE_BITMAP(node_flags, NUM_STATES * NUM_NODES);
301 
302 	struct csi2_device csi2;
303 	struct pisp_fe_device fe;
304 
305 	int fe_csi2_channel;
306 
307 	/* Mask of enabled streams */
308 	u64 streams_mask;
309 };
310 
311 static inline bool is_fe_enabled(struct cfe_device *cfe)
312 {
313 	return cfe->fe_csi2_channel != -1;
314 }
315 
316 static inline struct cfe_device *to_cfe_device(struct v4l2_device *v4l2_dev)
317 {
318 	return container_of(v4l2_dev, struct cfe_device, v4l2_dev);
319 }
320 
321 static inline u32 cfg_reg_read(struct cfe_device *cfe, u32 offset)
322 {
323 	return readl(cfe->mipi_cfg_base + offset);
324 }
325 
326 static inline void cfg_reg_write(struct cfe_device *cfe, u32 offset, u32 val)
327 {
328 	writel(val, cfe->mipi_cfg_base + offset);
329 }
330 
331 static bool check_state(struct cfe_device *cfe, unsigned long state,
332 			unsigned int node_id)
333 {
334 	unsigned long bit;
335 
336 	for_each_set_bit(bit, &state, sizeof(state)) {
337 		if (!test_bit(bit + (node_id * NUM_STATES), cfe->node_flags))
338 			return false;
339 	}
340 
341 	return true;
342 }
343 
344 static void set_state(struct cfe_device *cfe, unsigned long state,
345 		      unsigned int node_id)
346 {
347 	unsigned long bit;
348 
349 	for_each_set_bit(bit, &state, sizeof(state))
350 		set_bit(bit + (node_id * NUM_STATES), cfe->node_flags);
351 }
352 
353 static void clear_state(struct cfe_device *cfe, unsigned long state,
354 			unsigned int node_id)
355 {
356 	unsigned long bit;
357 
358 	for_each_set_bit(bit, &state, sizeof(state))
359 		clear_bit(bit + (node_id * NUM_STATES), cfe->node_flags);
360 }
361 
362 static bool test_any_node(struct cfe_device *cfe, unsigned long cond)
363 {
364 	for (unsigned int i = 0; i < NUM_NODES; i++) {
365 		if (check_state(cfe, cond, i))
366 			return true;
367 	}
368 
369 	return false;
370 }
371 
372 static bool test_all_nodes(struct cfe_device *cfe, unsigned long precond,
373 			   unsigned long cond)
374 {
375 	for (unsigned int i = 0; i < NUM_NODES; i++) {
376 		if (check_state(cfe, precond, i)) {
377 			if (!check_state(cfe, cond, i))
378 				return false;
379 		}
380 	}
381 
382 	return true;
383 }
384 
385 static int mipi_cfg_regs_show(struct seq_file *s, void *data)
386 {
387 	struct cfe_device *cfe = s->private;
388 	int ret;
389 
390 	ret = pm_runtime_resume_and_get(&cfe->pdev->dev);
391 	if (ret)
392 		return ret;
393 
394 #define DUMP(reg) seq_printf(s, #reg " \t0x%08x\n", cfg_reg_read(cfe, reg))
395 	DUMP(MIPICFG_CFG);
396 	DUMP(MIPICFG_INTR);
397 	DUMP(MIPICFG_INTE);
398 	DUMP(MIPICFG_INTF);
399 	DUMP(MIPICFG_INTS);
400 #undef DUMP
401 
402 	pm_runtime_put(&cfe->pdev->dev);
403 
404 	return 0;
405 }
406 
407 DEFINE_SHOW_ATTRIBUTE(mipi_cfg_regs);
408 
409 /* Format setup functions */
410 const struct cfe_fmt *find_format_by_code(u32 code)
411 {
412 	for (unsigned int i = 0; i < ARRAY_SIZE(formats); i++) {
413 		if (formats[i].code == code)
414 			return &formats[i];
415 	}
416 
417 	return NULL;
418 }
419 
420 const struct cfe_fmt *find_format_by_pix(u32 pixelformat)
421 {
422 	for (unsigned int i = 0; i < ARRAY_SIZE(formats); i++) {
423 		if (formats[i].fourcc == pixelformat)
424 			return &formats[i];
425 	}
426 
427 	return NULL;
428 }
429 
430 static const struct cfe_fmt *find_format_by_code_and_fourcc(u32 code,
431 							    u32 fourcc)
432 {
433 	for (unsigned int i = 0; i < ARRAY_SIZE(formats); i++) {
434 		if (formats[i].code == code && formats[i].fourcc == fourcc)
435 			return &formats[i];
436 	}
437 
438 	return NULL;
439 }
440 
441 /*
442  * Given the mbus code, find the 16 bit remapped code. Returns 0 if no remap
443  * possible.
444  */
445 u32 cfe_find_16bit_code(u32 code)
446 {
447 	const struct cfe_fmt *cfe_fmt;
448 
449 	cfe_fmt = find_format_by_code(code);
450 
451 	if (!cfe_fmt || !cfe_fmt->remap[CFE_REMAP_16BIT])
452 		return 0;
453 
454 	cfe_fmt = find_format_by_pix(cfe_fmt->remap[CFE_REMAP_16BIT]);
455 	if (!cfe_fmt)
456 		return 0;
457 
458 	return cfe_fmt->code;
459 }
460 
461 /*
462  * Given the mbus code, find the 8 bit compressed code. Returns 0 if no remap
463  * possible.
464  */
465 u32 cfe_find_compressed_code(u32 code)
466 {
467 	const struct cfe_fmt *cfe_fmt;
468 
469 	cfe_fmt = find_format_by_code(code);
470 
471 	if (!cfe_fmt || !cfe_fmt->remap[CFE_REMAP_COMPRESSED])
472 		return 0;
473 
474 	cfe_fmt = find_format_by_pix(cfe_fmt->remap[CFE_REMAP_COMPRESSED]);
475 	if (!cfe_fmt)
476 		return 0;
477 
478 	return cfe_fmt->code;
479 }
480 
481 static void cfe_calc_vid_format_size_bpl(struct cfe_device *cfe,
482 					 const struct cfe_fmt *fmt,
483 					 struct v4l2_format *f)
484 {
485 	unsigned int min_bytesperline;
486 
487 	v4l_bound_align_image(&f->fmt.pix.width, MIN_WIDTH, MAX_WIDTH, 2,
488 			      &f->fmt.pix.height, MIN_HEIGHT, MAX_HEIGHT, 0, 0);
489 
490 	min_bytesperline =
491 		ALIGN((f->fmt.pix.width * fmt->depth) >> 3, BPL_ALIGNMENT);
492 
493 	if (f->fmt.pix.bytesperline > min_bytesperline &&
494 	    f->fmt.pix.bytesperline <= MAX_BYTESPERLINE)
495 		f->fmt.pix.bytesperline =
496 			ALIGN(f->fmt.pix.bytesperline, BPL_ALIGNMENT);
497 	else
498 		f->fmt.pix.bytesperline = min_bytesperline;
499 
500 	f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline;
501 
502 	cfe_dbg(cfe, "%s: %p4cc size: %ux%u bpl:%u img_size:%u\n", __func__,
503 		&f->fmt.pix.pixelformat, f->fmt.pix.width, f->fmt.pix.height,
504 		f->fmt.pix.bytesperline, f->fmt.pix.sizeimage);
505 }
506 
507 static void cfe_calc_meta_format_size_bpl(struct cfe_device *cfe,
508 					  const struct cfe_fmt *fmt,
509 					  struct v4l2_format *f)
510 {
511 	v4l_bound_align_image(&f->fmt.meta.width, MIN_META_WIDTH, MAX_WIDTH, 2,
512 			      &f->fmt.meta.height, MIN_META_HEIGHT, MAX_HEIGHT,
513 			      0, 0);
514 
515 	f->fmt.meta.bytesperline = (f->fmt.meta.width * fmt->depth) >> 3;
516 	f->fmt.meta.buffersize = f->fmt.meta.height * f->fmt.pix.bytesperline;
517 
518 	cfe_dbg(cfe, "%s: %p4cc size: %ux%u bpl:%u buf_size:%u\n", __func__,
519 		&f->fmt.meta.dataformat, f->fmt.meta.width, f->fmt.meta.height,
520 		f->fmt.meta.bytesperline, f->fmt.meta.buffersize);
521 }
522 
523 static void cfe_schedule_next_csi2_job(struct cfe_device *cfe)
524 {
525 	struct cfe_buffer *buf;
526 	dma_addr_t addr;
527 
528 	for (unsigned int i = 0; i < CSI2_NUM_CHANNELS; i++) {
529 		struct cfe_node *node = &cfe->node[i];
530 		unsigned int stride, size;
531 
532 		if (!check_state(cfe, NODE_STREAMING, i))
533 			continue;
534 
535 		buf = list_first_entry(&node->dma_queue, struct cfe_buffer,
536 				       list);
537 		node->next_frm = buf;
538 		list_del(&buf->list);
539 
540 		trace_cfe_csi2_schedule(node->id, &buf->vb.vb2_buf);
541 
542 		if (is_meta_node(node)) {
543 			size = node->meta_fmt.fmt.meta.buffersize;
544 			/* We use CSI2_CH_CTRL_PACK_BYTES, so stride == 0 */
545 			stride = 0;
546 		} else {
547 			size = node->vid_fmt.fmt.pix.sizeimage;
548 			stride = node->vid_fmt.fmt.pix.bytesperline;
549 		}
550 
551 		addr = vb2_dma_contig_plane_dma_addr(&buf->vb.vb2_buf, 0);
552 		csi2_set_buffer(&cfe->csi2, node->id, addr, stride, size);
553 	}
554 }
555 
556 static void cfe_schedule_next_pisp_job(struct cfe_device *cfe)
557 {
558 	struct vb2_buffer *vb2_bufs[FE_NUM_PADS] = { 0 };
559 	struct cfe_config_buffer *config_buf;
560 	struct cfe_buffer *buf;
561 
562 	for (unsigned int i = CSI2_NUM_CHANNELS; i < NUM_NODES; i++) {
563 		struct cfe_node *node = &cfe->node[i];
564 
565 		if (!check_state(cfe, NODE_STREAMING, i))
566 			continue;
567 
568 		buf = list_first_entry(&node->dma_queue, struct cfe_buffer,
569 				       list);
570 
571 		trace_cfe_fe_schedule(node->id, &buf->vb.vb2_buf);
572 
573 		node->next_frm = buf;
574 		vb2_bufs[node_desc[i].link_pad] = &buf->vb.vb2_buf;
575 		list_del(&buf->list);
576 	}
577 
578 	config_buf = to_cfe_config_buffer(cfe->node[FE_CONFIG].next_frm);
579 	pisp_fe_submit_job(&cfe->fe, vb2_bufs, &config_buf->config);
580 }
581 
582 static bool cfe_check_job_ready(struct cfe_device *cfe)
583 {
584 	for (unsigned int i = 0; i < NUM_NODES; i++) {
585 		struct cfe_node *node = &cfe->node[i];
586 
587 		if (!check_state(cfe, NODE_ENABLED, i))
588 			continue;
589 
590 		if (list_empty(&node->dma_queue))
591 			return false;
592 	}
593 
594 	return true;
595 }
596 
597 static void cfe_prepare_next_job(struct cfe_device *cfe)
598 {
599 	trace_cfe_prepare_next_job(is_fe_enabled(cfe));
600 
601 	cfe->job_queued = true;
602 	cfe_schedule_next_csi2_job(cfe);
603 	if (is_fe_enabled(cfe))
604 		cfe_schedule_next_pisp_job(cfe);
605 
606 	/* Flag if another job is ready after this. */
607 	cfe->job_ready = cfe_check_job_ready(cfe);
608 }
609 
610 static void cfe_process_buffer_complete(struct cfe_node *node,
611 					enum vb2_buffer_state state)
612 {
613 	trace_cfe_buffer_complete(node->id, &node->cur_frm->vb);
614 
615 	node->cur_frm->vb.sequence = node->fs_count - 1;
616 	vb2_buffer_done(&node->cur_frm->vb.vb2_buf, state);
617 }
618 
619 static void cfe_queue_event_sof(struct cfe_node *node)
620 {
621 	struct v4l2_event event = {
622 		.type = V4L2_EVENT_FRAME_SYNC,
623 		.u.frame_sync.frame_sequence = node->fs_count - 1,
624 	};
625 
626 	v4l2_event_queue(&node->video_dev, &event);
627 }
628 
629 static void cfe_sof_isr(struct cfe_node *node)
630 {
631 	struct cfe_device *cfe = node->cfe;
632 	bool matching_fs = true;
633 
634 	trace_cfe_frame_start(node->id, node->fs_count);
635 
636 	/*
637 	 * If the sensor is producing unexpected frame event ordering over a
638 	 * sustained period of time, guard against the possibility of coming
639 	 * here and orphaning the cur_frm if it's not been dequeued already.
640 	 * Unfortunately, there is not enough hardware state to tell if this
641 	 * may have occurred.
642 	 */
643 	if (WARN(node->cur_frm, "%s: [%s] Orphaned frame at seq %u\n",
644 		 __func__, node_desc[node->id].name, node->fs_count))
645 		cfe_process_buffer_complete(node, VB2_BUF_STATE_ERROR);
646 
647 	node->cur_frm = node->next_frm;
648 	node->next_frm = NULL;
649 	node->fs_count++;
650 
651 	node->ts = ktime_get_ns();
652 	for (unsigned int i = 0; i < NUM_NODES; i++) {
653 		if (!check_state(cfe, NODE_STREAMING, i) || i == node->id)
654 			continue;
655 		/*
656 		 * This checks if any other node has seen a FS. If yes, use the
657 		 * same timestamp, eventually across all node buffers.
658 		 */
659 		if (cfe->node[i].fs_count >= node->fs_count)
660 			node->ts = cfe->node[i].ts;
661 		/*
662 		 * This checks if all other node have seen a matching FS. If
663 		 * yes, we can flag another job to be queued.
664 		 */
665 		if (matching_fs && cfe->node[i].fs_count != node->fs_count)
666 			matching_fs = false;
667 	}
668 
669 	if (matching_fs)
670 		cfe->job_queued = false;
671 
672 	if (node->cur_frm)
673 		node->cur_frm->vb.vb2_buf.timestamp = node->ts;
674 
675 	set_state(cfe, FS_INT, node->id);
676 	clear_state(cfe, FE_INT, node->id);
677 
678 	if (is_image_output_node(node))
679 		cfe_queue_event_sof(node);
680 }
681 
682 static void cfe_eof_isr(struct cfe_node *node)
683 {
684 	struct cfe_device *cfe = node->cfe;
685 
686 	trace_cfe_frame_end(node->id, node->fs_count - 1);
687 
688 	if (node->cur_frm)
689 		cfe_process_buffer_complete(node, VB2_BUF_STATE_DONE);
690 
691 	node->cur_frm = NULL;
692 	set_state(cfe, FE_INT, node->id);
693 	clear_state(cfe, FS_INT, node->id);
694 }
695 
696 static irqreturn_t cfe_isr(int irq, void *dev)
697 {
698 	struct cfe_device *cfe = dev;
699 	bool sof[NUM_NODES] = { 0 }, eof[NUM_NODES] = { 0 };
700 	u32 sts;
701 
702 	sts = cfg_reg_read(cfe, MIPICFG_INTS);
703 
704 	if (sts & MIPICFG_INT_CSI_DMA)
705 		csi2_isr(&cfe->csi2, sof, eof);
706 
707 	if (sts & MIPICFG_INT_PISP_FE)
708 		pisp_fe_isr(&cfe->fe, sof + CSI2_NUM_CHANNELS,
709 			    eof + CSI2_NUM_CHANNELS);
710 
711 	spin_lock(&cfe->state_lock);
712 
713 	for (unsigned int i = 0; i < NUM_NODES; i++) {
714 		struct cfe_node *node = &cfe->node[i];
715 
716 		/*
717 		 * The check_state(NODE_STREAMING) is to ensure we do not loop
718 		 * over the CSI2_CHx nodes when the FE is active since they
719 		 * generate interrupts even though the node is not streaming.
720 		 */
721 		if (!check_state(cfe, NODE_STREAMING, i) || !(sof[i] || eof[i]))
722 			continue;
723 
724 		/*
725 		 * There are 3 cases where we could get FS + FE_ACK at
726 		 * the same time:
727 		 * 1) FE of the current frame, and FS of the next frame.
728 		 * 2) FS + FE of the same frame.
729 		 * 3) FE of the current frame, and FS + FE of the next
730 		 *    frame. To handle this, see the sof handler below.
731 		 *
732 		 * (1) is handled implicitly by the ordering of the FE and FS
733 		 * handlers below.
734 		 */
735 		if (eof[i]) {
736 			/*
737 			 * The condition below tests for (2). Run the FS handler
738 			 * first before the FE handler, both for the current
739 			 * frame.
740 			 */
741 			if (sof[i] && !check_state(cfe, FS_INT, i)) {
742 				cfe_sof_isr(node);
743 				sof[i] = false;
744 			}
745 
746 			cfe_eof_isr(node);
747 		}
748 
749 		if (sof[i]) {
750 			/*
751 			 * The condition below tests for (3). In such cases, we
752 			 * come in here with FS flag set in the node state from
753 			 * the previous frame since it only gets cleared in
754 			 * cfe_eof_isr(). Handle the FE for the previous
755 			 * frame first before the FS handler for the current
756 			 * frame.
757 			 */
758 			if (check_state(cfe, FS_INT, node->id) &&
759 			    !check_state(cfe, FE_INT, node->id)) {
760 				cfe_dbg(cfe, "%s: [%s] Handling missing previous FE interrupt\n",
761 					__func__, node_desc[node->id].name);
762 				cfe_eof_isr(node);
763 			}
764 
765 			cfe_sof_isr(node);
766 		}
767 
768 		if (!cfe->job_queued && cfe->job_ready)
769 			cfe_prepare_next_job(cfe);
770 	}
771 
772 	spin_unlock(&cfe->state_lock);
773 
774 	return IRQ_HANDLED;
775 }
776 
777 /*
778  * Stream helpers
779  */
780 
781 static int cfe_get_vc_dt_fallback(struct cfe_device *cfe, u8 *vc, u8 *dt)
782 {
783 	struct v4l2_subdev_state *state;
784 	struct v4l2_mbus_framefmt *fmt;
785 	const struct cfe_fmt *cfe_fmt;
786 
787 	state = v4l2_subdev_get_locked_active_state(&cfe->csi2.sd);
788 
789 	fmt = v4l2_subdev_state_get_format(state, CSI2_PAD_SINK, 0);
790 	if (!fmt)
791 		return -EINVAL;
792 
793 	cfe_fmt = find_format_by_code(fmt->code);
794 	if (!cfe_fmt)
795 		return -EINVAL;
796 
797 	*vc = 0;
798 	*dt = cfe_fmt->csi_dt;
799 
800 	return 0;
801 }
802 
803 static int cfe_get_vc_dt(struct cfe_device *cfe, unsigned int channel, u8 *vc,
804 			 u8 *dt)
805 {
806 	struct v4l2_mbus_frame_desc remote_desc;
807 	struct v4l2_subdev_state *state;
808 	u32 sink_stream;
809 	unsigned int i;
810 	int ret;
811 
812 	state = v4l2_subdev_get_locked_active_state(&cfe->csi2.sd);
813 
814 	ret = v4l2_subdev_routing_find_opposite_end(&state->routing,
815 		CSI2_PAD_FIRST_SOURCE + channel, 0, NULL, &sink_stream);
816 	if (ret)
817 		return ret;
818 
819 	ret = v4l2_subdev_call(cfe->source_sd, pad, get_frame_desc,
820 			       cfe->source_pad, &remote_desc);
821 	if (ret == -ENOIOCTLCMD) {
822 		cfe_dbg(cfe, "source does not support get_frame_desc, use fallback\n");
823 		return cfe_get_vc_dt_fallback(cfe, vc, dt);
824 	} else if (ret) {
825 		cfe_err(cfe, "Failed to get frame descriptor\n");
826 		return ret;
827 	}
828 
829 	if (remote_desc.type != V4L2_MBUS_FRAME_DESC_TYPE_CSI2) {
830 		cfe_err(cfe, "Frame descriptor does not describe CSI-2 link");
831 		return -EINVAL;
832 	}
833 
834 	for (i = 0; i < remote_desc.num_entries; i++) {
835 		if (remote_desc.entry[i].stream == sink_stream)
836 			break;
837 	}
838 
839 	if (i == remote_desc.num_entries) {
840 		cfe_err(cfe, "Stream %u not found in remote frame desc\n",
841 			sink_stream);
842 		return -EINVAL;
843 	}
844 
845 	*vc = remote_desc.entry[i].bus.csi2.vc;
846 	*dt = remote_desc.entry[i].bus.csi2.dt;
847 
848 	return 0;
849 }
850 
851 static int cfe_start_channel(struct cfe_node *node)
852 {
853 	struct cfe_device *cfe = node->cfe;
854 	struct v4l2_subdev_state *state;
855 	struct v4l2_mbus_framefmt *source_fmt;
856 	const struct cfe_fmt *fmt;
857 	unsigned long flags;
858 	bool start_fe;
859 	int ret;
860 
861 	cfe_dbg(cfe, "%s: [%s]\n", __func__, node_desc[node->id].name);
862 
863 	start_fe = is_fe_enabled(cfe) &&
864 		   test_all_nodes(cfe, NODE_ENABLED, NODE_STREAMING);
865 
866 	state = v4l2_subdev_get_locked_active_state(&cfe->csi2.sd);
867 
868 	if (start_fe) {
869 		unsigned int width, height;
870 		u8 vc, dt;
871 
872 		cfe_dbg(cfe, "%s: %s using csi2 channel %d\n", __func__,
873 			node_desc[FE_OUT0].name, cfe->fe_csi2_channel);
874 
875 		ret = cfe_get_vc_dt(cfe, cfe->fe_csi2_channel, &vc, &dt);
876 		if (ret)
877 			return ret;
878 
879 		source_fmt = v4l2_subdev_state_get_format(state,
880 			node_desc[cfe->fe_csi2_channel].link_pad);
881 		fmt = find_format_by_code(source_fmt->code);
882 
883 		width = source_fmt->width;
884 		height = source_fmt->height;
885 
886 		/* Must have a valid CSI2 datatype. */
887 		WARN_ON(!fmt->csi_dt);
888 
889 		/*
890 		 * Start the associated CSI2 Channel as well.
891 		 *
892 		 * Must write to the ADDR register to latch the ctrl values
893 		 * even if we are connected to the front end. Once running,
894 		 * this is handled by the CSI2 AUTO_ARM mode.
895 		 */
896 		csi2_start_channel(&cfe->csi2, cfe->fe_csi2_channel,
897 				   CSI2_MODE_FE_STREAMING,
898 				   true, false, width, height, vc, dt);
899 		csi2_set_buffer(&cfe->csi2, cfe->fe_csi2_channel, 0, 0, -1);
900 		pisp_fe_start(&cfe->fe);
901 	}
902 
903 	if (is_csi2_node(node)) {
904 		unsigned int width = 0, height = 0;
905 		u8 vc, dt;
906 
907 		ret = cfe_get_vc_dt(cfe, node->id, &vc, &dt);
908 		if (ret) {
909 			if (start_fe) {
910 				csi2_stop_channel(&cfe->csi2,
911 						  cfe->fe_csi2_channel);
912 				pisp_fe_stop(&cfe->fe);
913 			}
914 
915 			return ret;
916 		}
917 
918 		u32 mode = CSI2_MODE_NORMAL;
919 
920 		source_fmt = v4l2_subdev_state_get_format(state,
921 			node_desc[node->id].link_pad);
922 		fmt = find_format_by_code(source_fmt->code);
923 
924 		/* Must have a valid CSI2 datatype. */
925 		WARN_ON(!fmt->csi_dt);
926 
927 		if (is_image_output_node(node)) {
928 			u32  pixfmt;
929 
930 			width = source_fmt->width;
931 			height = source_fmt->height;
932 
933 			pixfmt = node->vid_fmt.fmt.pix.pixelformat;
934 
935 			if (pixfmt == fmt->remap[CFE_REMAP_16BIT]) {
936 				mode = CSI2_MODE_REMAP;
937 			} else if (pixfmt == fmt->remap[CFE_REMAP_COMPRESSED]) {
938 				mode = CSI2_MODE_COMPRESSED;
939 				csi2_set_compression(&cfe->csi2, node->id,
940 						     CSI2_COMPRESSION_DELTA, 0,
941 						     0);
942 			}
943 		}
944 		/* Unconditionally start this CSI2 channel. */
945 		csi2_start_channel(&cfe->csi2, node->id,
946 				   mode,
947 				   /* Auto arm */
948 				   false,
949 				   /* Pack bytes */
950 				   is_meta_node(node) ? true : false,
951 				   width, height, vc, dt);
952 	}
953 
954 	spin_lock_irqsave(&cfe->state_lock, flags);
955 	if (cfe->job_ready && test_all_nodes(cfe, NODE_ENABLED, NODE_STREAMING))
956 		cfe_prepare_next_job(cfe);
957 	spin_unlock_irqrestore(&cfe->state_lock, flags);
958 
959 	return 0;
960 }
961 
962 static void cfe_stop_channel(struct cfe_node *node, bool fe_stop)
963 {
964 	struct cfe_device *cfe = node->cfe;
965 
966 	cfe_dbg(cfe, "%s: [%s] fe_stop %u\n", __func__,
967 		node_desc[node->id].name, fe_stop);
968 
969 	if (fe_stop) {
970 		csi2_stop_channel(&cfe->csi2, cfe->fe_csi2_channel);
971 		pisp_fe_stop(&cfe->fe);
972 	}
973 
974 	if (is_csi2_node(node))
975 		csi2_stop_channel(&cfe->csi2, node->id);
976 }
977 
978 static void cfe_return_buffers(struct cfe_node *node,
979 			       enum vb2_buffer_state state)
980 {
981 	struct cfe_device *cfe = node->cfe;
982 	struct cfe_buffer *buf, *tmp;
983 	unsigned long flags;
984 
985 	cfe_dbg(cfe, "%s: [%s]\n", __func__, node_desc[node->id].name);
986 
987 	spin_lock_irqsave(&cfe->state_lock, flags);
988 	list_for_each_entry_safe(buf, tmp, &node->dma_queue, list) {
989 		list_del(&buf->list);
990 		trace_cfe_return_buffer(node->id, buf->vb.vb2_buf.index, 2);
991 		vb2_buffer_done(&buf->vb.vb2_buf, state);
992 	}
993 
994 	if (node->cur_frm) {
995 		trace_cfe_return_buffer(node->id,
996 					node->cur_frm->vb.vb2_buf.index, 0);
997 		vb2_buffer_done(&node->cur_frm->vb.vb2_buf, state);
998 	}
999 	if (node->next_frm && node->cur_frm != node->next_frm) {
1000 		trace_cfe_return_buffer(node->id,
1001 					node->next_frm->vb.vb2_buf.index, 1);
1002 		vb2_buffer_done(&node->next_frm->vb.vb2_buf, state);
1003 	}
1004 
1005 	node->cur_frm = NULL;
1006 	node->next_frm = NULL;
1007 	spin_unlock_irqrestore(&cfe->state_lock, flags);
1008 }
1009 
1010 /*
1011  * vb2 ops
1012  */
1013 
1014 static int cfe_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers,
1015 			   unsigned int *nplanes, unsigned int sizes[],
1016 			   struct device *alloc_devs[])
1017 {
1018 	struct cfe_node *node = vb2_get_drv_priv(vq);
1019 	struct cfe_device *cfe = node->cfe;
1020 	unsigned int size = is_image_node(node) ?
1021 				    node->vid_fmt.fmt.pix.sizeimage :
1022 				    node->meta_fmt.fmt.meta.buffersize;
1023 
1024 	cfe_dbg(cfe, "%s: [%s] type:%u\n", __func__, node_desc[node->id].name,
1025 		node->buffer_queue.type);
1026 
1027 	if (vq->max_num_buffers + *nbuffers < 3)
1028 		*nbuffers = 3 - vq->max_num_buffers;
1029 
1030 	if (*nplanes) {
1031 		if (sizes[0] < size) {
1032 			cfe_err(cfe, "sizes[0] %i < size %u\n", sizes[0], size);
1033 			return -EINVAL;
1034 		}
1035 		size = sizes[0];
1036 	}
1037 
1038 	*nplanes = 1;
1039 	sizes[0] = size;
1040 
1041 	return 0;
1042 }
1043 
1044 static int cfe_buffer_prepare(struct vb2_buffer *vb)
1045 {
1046 	struct cfe_node *node = vb2_get_drv_priv(vb->vb2_queue);
1047 	struct cfe_device *cfe = node->cfe;
1048 	struct cfe_buffer *buf = to_cfe_buffer(vb);
1049 	unsigned long size;
1050 
1051 	trace_cfe_buffer_prepare(node->id, vb);
1052 
1053 	size = is_image_node(node) ? node->vid_fmt.fmt.pix.sizeimage :
1054 				     node->meta_fmt.fmt.meta.buffersize;
1055 	if (vb2_plane_size(vb, 0) < size) {
1056 		cfe_err(cfe, "data will not fit into plane (%lu < %lu)\n",
1057 			vb2_plane_size(vb, 0), size);
1058 		return -EINVAL;
1059 	}
1060 
1061 	vb2_set_plane_payload(&buf->vb.vb2_buf, 0, size);
1062 
1063 	if (node->id == FE_CONFIG) {
1064 		struct cfe_config_buffer *b = to_cfe_config_buffer(buf);
1065 		void *addr = vb2_plane_vaddr(vb, 0);
1066 
1067 		memcpy(&b->config, addr, sizeof(struct pisp_fe_config));
1068 		return pisp_fe_validate_config(&cfe->fe, &b->config,
1069 					       &cfe->node[FE_OUT0].vid_fmt,
1070 					       &cfe->node[FE_OUT1].vid_fmt);
1071 	}
1072 
1073 	return 0;
1074 }
1075 
1076 static void cfe_buffer_queue(struct vb2_buffer *vb)
1077 {
1078 	struct cfe_node *node = vb2_get_drv_priv(vb->vb2_queue);
1079 	struct cfe_device *cfe = node->cfe;
1080 	struct cfe_buffer *buf = to_cfe_buffer(vb);
1081 	unsigned long flags;
1082 	bool schedule_now;
1083 
1084 	spin_lock_irqsave(&cfe->state_lock, flags);
1085 
1086 	list_add_tail(&buf->list, &node->dma_queue);
1087 
1088 	if (!cfe->job_ready)
1089 		cfe->job_ready = cfe_check_job_ready(cfe);
1090 
1091 	schedule_now = !cfe->job_queued && cfe->job_ready &&
1092 		       test_all_nodes(cfe, NODE_ENABLED, NODE_STREAMING);
1093 
1094 	trace_cfe_buffer_queue(node->id, vb, schedule_now);
1095 
1096 	if (schedule_now)
1097 		cfe_prepare_next_job(cfe);
1098 
1099 	spin_unlock_irqrestore(&cfe->state_lock, flags);
1100 }
1101 
1102 static s64 cfe_get_source_link_freq(struct cfe_device *cfe)
1103 {
1104 	struct media_pad *src_pad =
1105 		&cfe->source_sd->entity.pads[cfe->source_pad];
1106 	struct v4l2_subdev_state *state;
1107 	s64 link_freq;
1108 	u32 bpp;
1109 
1110 	state = v4l2_subdev_get_locked_active_state(&cfe->csi2.sd);
1111 
1112 	/*
1113 	 * v4l2_get_link_freq() uses V4L2_CID_LINK_FREQ first, and falls back
1114 	 * to V4L2_CID_PIXEL_RATE if V4L2_CID_LINK_FREQ is not available.
1115 	 *
1116 	 * With multistream input there is no single pixel rate, and thus we
1117 	 * cannot use V4L2_CID_PIXEL_RATE, so we pass 0 as the bpp which
1118 	 * causes v4l2_get_link_freq() to return an error if it falls back to
1119 	 * V4L2_CID_PIXEL_RATE.
1120 	 */
1121 
1122 	if (state->routing.num_routes == 1) {
1123 		struct v4l2_subdev_route *route = &state->routing.routes[0];
1124 		struct v4l2_mbus_framefmt *source_fmt;
1125 		const struct cfe_fmt *fmt;
1126 
1127 		source_fmt = v4l2_subdev_state_get_format(state,
1128 							  route->sink_pad,
1129 							  route->sink_stream);
1130 
1131 		fmt = find_format_by_code(source_fmt->code);
1132 		if (!fmt)
1133 			return -EINVAL;
1134 
1135 		bpp = fmt->depth;
1136 	} else {
1137 		bpp = 0;
1138 	}
1139 
1140 	link_freq = v4l2_get_link_freq(src_pad, bpp,
1141 				       2 * cfe->csi2.dphy.active_lanes);
1142 	if (link_freq < 0)
1143 		cfe_err(cfe, "failed to get link freq for subdev '%s'\n",
1144 			cfe->source_sd->name);
1145 
1146 	return link_freq;
1147 }
1148 
1149 static int cfe_start_streaming(struct vb2_queue *vq, unsigned int count)
1150 {
1151 	struct v4l2_mbus_config mbus_config = { 0 };
1152 	struct cfe_node *node = vb2_get_drv_priv(vq);
1153 	struct cfe_device *cfe = node->cfe;
1154 	struct v4l2_subdev_state *state;
1155 	struct v4l2_subdev_route *route;
1156 	s64 link_freq;
1157 	int ret;
1158 
1159 	cfe_dbg(cfe, "%s: [%s]\n", __func__, node_desc[node->id].name);
1160 
1161 	if (!check_state(cfe, NODE_ENABLED, node->id)) {
1162 		cfe_err(cfe, "%s node link is not enabled.\n",
1163 			node_desc[node->id].name);
1164 		ret = -EINVAL;
1165 		goto err_streaming;
1166 	}
1167 
1168 	ret = pm_runtime_resume_and_get(&cfe->pdev->dev);
1169 	if (ret < 0) {
1170 		cfe_err(cfe, "pm_runtime_resume_and_get failed\n");
1171 		goto err_streaming;
1172 	}
1173 
1174 	/* When using the Frontend, we must enable the FE_CONFIG node. */
1175 	if (is_fe_enabled(cfe) &&
1176 	    !check_state(cfe, NODE_ENABLED, cfe->node[FE_CONFIG].id)) {
1177 		cfe_err(cfe, "FE enabled, but FE_CONFIG node is not\n");
1178 		ret = -EINVAL;
1179 		goto err_pm_put;
1180 	}
1181 
1182 	ret = media_pipeline_start(&node->pad, &cfe->pipe);
1183 	if (ret < 0) {
1184 		cfe_err(cfe, "Failed to start media pipeline: %d\n", ret);
1185 		goto err_pm_put;
1186 	}
1187 
1188 	state = v4l2_subdev_lock_and_get_active_state(&cfe->csi2.sd);
1189 
1190 	clear_state(cfe, FS_INT | FE_INT, node->id);
1191 	set_state(cfe, NODE_STREAMING, node->id);
1192 	node->fs_count = 0;
1193 
1194 	ret = cfe_start_channel(node);
1195 	if (ret)
1196 		goto err_unlock_state;
1197 
1198 	if (!test_all_nodes(cfe, NODE_ENABLED, NODE_STREAMING)) {
1199 		cfe_dbg(cfe, "Streaming on hold, as all nodes are not set to streaming yet\n");
1200 		v4l2_subdev_unlock_state(state);
1201 		return 0;
1202 	}
1203 
1204 	cfg_reg_write(cfe, MIPICFG_CFG, MIPICFG_CFG_SEL_CSI);
1205 	cfg_reg_write(cfe, MIPICFG_INTE,
1206 		      MIPICFG_INT_CSI_DMA | MIPICFG_INT_PISP_FE);
1207 
1208 	ret = v4l2_subdev_call(cfe->source_sd, pad, get_mbus_config, 0,
1209 			       &mbus_config);
1210 	if (ret < 0 && ret != -ENOIOCTLCMD) {
1211 		cfe_err(cfe, "g_mbus_config failed\n");
1212 		goto err_clear_inte;
1213 	}
1214 
1215 	cfe->csi2.dphy.active_lanes = mbus_config.bus.mipi_csi2.num_data_lanes;
1216 	if (!cfe->csi2.dphy.active_lanes)
1217 		cfe->csi2.dphy.active_lanes = cfe->csi2.dphy.max_lanes;
1218 	if (cfe->csi2.dphy.active_lanes > cfe->csi2.dphy.max_lanes) {
1219 		cfe_err(cfe, "Device has requested %u data lanes, which is >%u configured in DT\n",
1220 			cfe->csi2.dphy.active_lanes, cfe->csi2.dphy.max_lanes);
1221 		ret = -EINVAL;
1222 		goto err_clear_inte;
1223 	}
1224 
1225 	link_freq = cfe_get_source_link_freq(cfe);
1226 	if (link_freq < 0)
1227 		goto err_clear_inte;
1228 
1229 	cfe->csi2.dphy.dphy_rate = div_s64(link_freq * 2, 1000000);
1230 	csi2_open_rx(&cfe->csi2);
1231 
1232 	cfe->streams_mask = 0;
1233 
1234 	for_each_active_route(&state->routing, route)
1235 		cfe->streams_mask |= BIT_ULL(route->sink_stream);
1236 
1237 	ret = v4l2_subdev_enable_streams(cfe->source_sd, cfe->source_pad,
1238 					 cfe->streams_mask);
1239 	if (ret) {
1240 		cfe_err(cfe, "stream on failed in subdev\n");
1241 		goto err_disable_cfe;
1242 	}
1243 
1244 	cfe_dbg(cfe, "Streaming enabled\n");
1245 
1246 	v4l2_subdev_unlock_state(state);
1247 
1248 	return 0;
1249 
1250 err_disable_cfe:
1251 	csi2_close_rx(&cfe->csi2);
1252 err_clear_inte:
1253 	cfg_reg_write(cfe, MIPICFG_INTE, 0);
1254 
1255 	cfe_stop_channel(node,
1256 			 is_fe_enabled(cfe) && test_all_nodes(cfe, NODE_ENABLED,
1257 							      NODE_STREAMING));
1258 err_unlock_state:
1259 	v4l2_subdev_unlock_state(state);
1260 	media_pipeline_stop(&node->pad);
1261 err_pm_put:
1262 	pm_runtime_put(&cfe->pdev->dev);
1263 err_streaming:
1264 	cfe_return_buffers(node, VB2_BUF_STATE_QUEUED);
1265 	clear_state(cfe, NODE_STREAMING, node->id);
1266 
1267 	return ret;
1268 }
1269 
1270 static void cfe_stop_streaming(struct vb2_queue *vq)
1271 {
1272 	struct cfe_node *node = vb2_get_drv_priv(vq);
1273 	struct cfe_device *cfe = node->cfe;
1274 	unsigned long flags;
1275 	bool fe_stop;
1276 
1277 	cfe_dbg(cfe, "%s: [%s]\n", __func__, node_desc[node->id].name);
1278 
1279 	spin_lock_irqsave(&cfe->state_lock, flags);
1280 	fe_stop = is_fe_enabled(cfe) &&
1281 		  test_all_nodes(cfe, NODE_ENABLED, NODE_STREAMING);
1282 
1283 	cfe->job_ready = false;
1284 	clear_state(cfe, NODE_STREAMING, node->id);
1285 	spin_unlock_irqrestore(&cfe->state_lock, flags);
1286 
1287 	cfe_stop_channel(node, fe_stop);
1288 
1289 	if (!test_any_node(cfe, NODE_STREAMING)) {
1290 		struct v4l2_subdev_state *state;
1291 		int ret;
1292 
1293 		state = v4l2_subdev_lock_and_get_active_state(&cfe->csi2.sd);
1294 
1295 		ret = v4l2_subdev_disable_streams(cfe->source_sd,
1296 						  cfe->source_pad,
1297 						  cfe->streams_mask);
1298 		if (ret)
1299 			cfe_err(cfe, "stream disable failed in subdev\n");
1300 
1301 		v4l2_subdev_unlock_state(state);
1302 
1303 		csi2_close_rx(&cfe->csi2);
1304 
1305 		cfg_reg_write(cfe, MIPICFG_INTE, 0);
1306 
1307 		cfe_dbg(cfe, "%s: Streaming disabled\n", __func__);
1308 	}
1309 
1310 	media_pipeline_stop(&node->pad);
1311 
1312 	/* Clear all queued buffers for the node */
1313 	cfe_return_buffers(node, VB2_BUF_STATE_ERROR);
1314 
1315 	pm_runtime_put(&cfe->pdev->dev);
1316 }
1317 
1318 static const struct vb2_ops cfe_video_qops = {
1319 	.queue_setup = cfe_queue_setup,
1320 	.buf_prepare = cfe_buffer_prepare,
1321 	.buf_queue = cfe_buffer_queue,
1322 	.start_streaming = cfe_start_streaming,
1323 	.stop_streaming = cfe_stop_streaming,
1324 };
1325 
1326 /*
1327  * v4l2 ioctl ops
1328  */
1329 
1330 static int cfe_querycap(struct file *file, void *priv,
1331 			struct v4l2_capability *cap)
1332 {
1333 	strscpy(cap->driver, CFE_MODULE_NAME, sizeof(cap->driver));
1334 	strscpy(cap->card, CFE_MODULE_NAME, sizeof(cap->card));
1335 
1336 	cap->capabilities |= V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_META_CAPTURE |
1337 			     V4L2_CAP_META_OUTPUT;
1338 
1339 	return 0;
1340 }
1341 
1342 static int cfe_enum_fmt_vid_cap(struct file *file, void *priv,
1343 				struct v4l2_fmtdesc *f)
1344 {
1345 	struct cfe_node *node = video_drvdata(file);
1346 	struct cfe_device *cfe = node->cfe;
1347 	unsigned int i, j;
1348 
1349 	if (!node_supports_image_output(node))
1350 		return -EINVAL;
1351 
1352 	cfe_dbg(cfe, "%s: [%s]\n", __func__, node_desc[node->id].name);
1353 
1354 	for (i = 0, j = 0; i < ARRAY_SIZE(formats); i++) {
1355 		if (f->mbus_code && formats[i].code != f->mbus_code)
1356 			continue;
1357 
1358 		if (formats[i].flags & CFE_FORMAT_FLAG_META_OUT ||
1359 		    formats[i].flags & CFE_FORMAT_FLAG_META_CAP)
1360 			continue;
1361 
1362 		if (is_fe_node(node) &&
1363 		    !(formats[i].flags & CFE_FORMAT_FLAG_FE_OUT))
1364 			continue;
1365 
1366 		if (j == f->index) {
1367 			f->pixelformat = formats[i].fourcc;
1368 			f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
1369 			return 0;
1370 		}
1371 		j++;
1372 	}
1373 
1374 	return -EINVAL;
1375 }
1376 
1377 static int cfe_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
1378 {
1379 	struct cfe_node *node = video_drvdata(file);
1380 
1381 	if (!node_supports_image(node))
1382 		return -EINVAL;
1383 
1384 	*f = node->vid_fmt;
1385 
1386 	return 0;
1387 }
1388 
1389 static int cfe_validate_fmt_vid_cap(struct cfe_node *node,
1390 				    struct v4l2_format *f)
1391 {
1392 	struct cfe_device *cfe = node->cfe;
1393 	const struct cfe_fmt *fmt;
1394 
1395 	cfe_dbg(cfe, "%s: [%s] %ux%u, V4L2 pix %p4cc\n", __func__,
1396 		node_desc[node->id].name, f->fmt.pix.width, f->fmt.pix.height,
1397 		&f->fmt.pix.pixelformat);
1398 
1399 	if (!node_supports_image_output(node))
1400 		return -EINVAL;
1401 
1402 	/*
1403 	 * Default to a format that works for both CSI2 and FE.
1404 	 */
1405 	fmt = find_format_by_pix(f->fmt.pix.pixelformat);
1406 	if (!fmt)
1407 		fmt = find_format_by_code(MEDIA_BUS_FMT_SBGGR10_1X10);
1408 
1409 	f->fmt.pix.pixelformat = fmt->fourcc;
1410 
1411 	if (is_fe_node(node) && fmt->remap[CFE_REMAP_16BIT]) {
1412 		f->fmt.pix.pixelformat = fmt->remap[CFE_REMAP_16BIT];
1413 		fmt = find_format_by_pix(f->fmt.pix.pixelformat);
1414 	}
1415 
1416 	f->fmt.pix.field = V4L2_FIELD_NONE;
1417 
1418 	cfe_calc_vid_format_size_bpl(cfe, fmt, f);
1419 
1420 	return 0;
1421 }
1422 
1423 static int cfe_s_fmt_vid_cap(struct file *file, void *priv,
1424 			     struct v4l2_format *f)
1425 {
1426 	struct cfe_node *node = video_drvdata(file);
1427 	struct cfe_device *cfe = node->cfe;
1428 	struct vb2_queue *q = &node->buffer_queue;
1429 	int ret;
1430 
1431 	if (vb2_is_busy(q))
1432 		return -EBUSY;
1433 
1434 	ret = cfe_validate_fmt_vid_cap(node, f);
1435 	if (ret)
1436 		return ret;
1437 
1438 	node->vid_fmt = *f;
1439 
1440 	cfe_dbg(cfe, "%s: Set %ux%u, V4L2 pix %p4cc\n", __func__,
1441 		node->vid_fmt.fmt.pix.width, node->vid_fmt.fmt.pix.height,
1442 		&node->vid_fmt.fmt.pix.pixelformat);
1443 
1444 	return 0;
1445 }
1446 
1447 static int cfe_try_fmt_vid_cap(struct file *file, void *priv,
1448 			       struct v4l2_format *f)
1449 {
1450 	struct cfe_node *node = video_drvdata(file);
1451 	struct cfe_device *cfe = node->cfe;
1452 
1453 	cfe_dbg(cfe, "%s: [%s]\n", __func__, node_desc[node->id].name);
1454 
1455 	return cfe_validate_fmt_vid_cap(node, f);
1456 }
1457 
1458 static int cfe_enum_fmt_meta(struct file *file, void *priv,
1459 			     struct v4l2_fmtdesc *f)
1460 {
1461 	struct cfe_node *node = video_drvdata(file);
1462 	struct cfe_device *cfe = node->cfe;
1463 
1464 	cfe_dbg(cfe, "%s: [%s]\n", __func__, node_desc[node->id].name);
1465 
1466 	if (!node_supports_meta(node))
1467 		return -EINVAL;
1468 
1469 	switch (node->id) {
1470 	case CSI2_CH0...CSI2_CH3:
1471 		f->flags = V4L2_FMT_FLAG_META_LINE_BASED;
1472 
1473 		switch (f->index) {
1474 		case 0:
1475 			f->pixelformat = V4L2_META_FMT_GENERIC_8;
1476 			return 0;
1477 		case 1:
1478 			f->pixelformat = V4L2_META_FMT_GENERIC_CSI2_10;
1479 			return 0;
1480 		case 2:
1481 			f->pixelformat = V4L2_META_FMT_GENERIC_CSI2_12;
1482 			return 0;
1483 		default:
1484 			return -EINVAL;
1485 		}
1486 	default:
1487 		break;
1488 	}
1489 
1490 	if (f->index != 0)
1491 		return -EINVAL;
1492 
1493 	switch (node->id) {
1494 	case FE_STATS:
1495 		f->pixelformat = V4L2_META_FMT_RPI_FE_STATS;
1496 		return 0;
1497 	case FE_CONFIG:
1498 		f->pixelformat = V4L2_META_FMT_RPI_FE_CFG;
1499 		return 0;
1500 	default:
1501 		return -EINVAL;
1502 	}
1503 }
1504 
1505 static int cfe_validate_fmt_meta(struct cfe_node *node, struct v4l2_format *f)
1506 {
1507 	struct cfe_device *cfe = node->cfe;
1508 	const struct cfe_fmt *fmt;
1509 
1510 	switch (node->id) {
1511 	case CSI2_CH0...CSI2_CH3:
1512 		cfe_dbg(cfe, "%s: [%s] %ux%u, V4L2 meta %p4cc\n", __func__,
1513 			node_desc[node->id].name, f->fmt.meta.width,
1514 			f->fmt.meta.height, &f->fmt.meta.dataformat);
1515 		break;
1516 	case FE_STATS:
1517 	case FE_CONFIG:
1518 		cfe_dbg(cfe, "%s: [%s] %u bytes, V4L2 meta %p4cc\n", __func__,
1519 			node_desc[node->id].name, f->fmt.meta.buffersize,
1520 			&f->fmt.meta.dataformat);
1521 		break;
1522 	default:
1523 		return -EINVAL;
1524 	}
1525 
1526 	if (!node_supports_meta(node))
1527 		return -EINVAL;
1528 
1529 	switch (node->id) {
1530 	case CSI2_CH0...CSI2_CH3:
1531 		fmt = find_format_by_pix(f->fmt.meta.dataformat);
1532 		if (!fmt || !(fmt->flags & CFE_FORMAT_FLAG_META_CAP))
1533 			fmt = find_format_by_pix(V4L2_META_FMT_GENERIC_CSI2_10);
1534 
1535 		f->fmt.meta.dataformat = fmt->fourcc;
1536 
1537 		cfe_calc_meta_format_size_bpl(cfe, fmt, f);
1538 
1539 		return 0;
1540 	case FE_STATS:
1541 		f->fmt.meta.dataformat = V4L2_META_FMT_RPI_FE_STATS;
1542 		f->fmt.meta.buffersize = sizeof(struct pisp_statistics);
1543 		return 0;
1544 	case FE_CONFIG:
1545 		f->fmt.meta.dataformat = V4L2_META_FMT_RPI_FE_CFG;
1546 		f->fmt.meta.buffersize = sizeof(struct pisp_fe_config);
1547 		return 0;
1548 	default:
1549 		return -EINVAL;
1550 	}
1551 }
1552 
1553 static int cfe_g_fmt_meta(struct file *file, void *priv, struct v4l2_format *f)
1554 {
1555 	struct cfe_node *node = video_drvdata(file);
1556 	struct cfe_device *cfe = node->cfe;
1557 
1558 	cfe_dbg(cfe, "%s: [%s]\n", __func__, node_desc[node->id].name);
1559 
1560 	if (!node_supports_meta(node))
1561 		return -EINVAL;
1562 
1563 	*f = node->meta_fmt;
1564 
1565 	return 0;
1566 }
1567 
1568 static int cfe_s_fmt_meta(struct file *file, void *priv, struct v4l2_format *f)
1569 {
1570 	struct cfe_node *node = video_drvdata(file);
1571 	struct cfe_device *cfe = node->cfe;
1572 	struct vb2_queue *q = &node->buffer_queue;
1573 	int ret;
1574 
1575 	cfe_dbg(cfe, "%s: [%s]\n", __func__, node_desc[node->id].name);
1576 
1577 	if (vb2_is_busy(q))
1578 		return -EBUSY;
1579 
1580 	if (!node_supports_meta(node))
1581 		return -EINVAL;
1582 
1583 	ret = cfe_validate_fmt_meta(node, f);
1584 	if (ret)
1585 		return ret;
1586 
1587 	node->meta_fmt = *f;
1588 
1589 	cfe_dbg(cfe, "%s: Set %p4cc\n", __func__,
1590 		&node->meta_fmt.fmt.meta.dataformat);
1591 
1592 	return 0;
1593 }
1594 
1595 static int cfe_try_fmt_meta(struct file *file, void *priv,
1596 			    struct v4l2_format *f)
1597 {
1598 	struct cfe_node *node = video_drvdata(file);
1599 	struct cfe_device *cfe = node->cfe;
1600 
1601 	cfe_dbg(cfe, "%s: [%s]\n", __func__, node_desc[node->id].name);
1602 	return cfe_validate_fmt_meta(node, f);
1603 }
1604 
1605 static int cfe_enum_framesizes(struct file *file, void *priv,
1606 			       struct v4l2_frmsizeenum *fsize)
1607 {
1608 	struct cfe_node *node = video_drvdata(file);
1609 	struct cfe_device *cfe = node->cfe;
1610 	const struct cfe_fmt *fmt;
1611 
1612 	cfe_dbg(cfe, "%s [%s]\n", __func__, node_desc[node->id].name);
1613 
1614 	if (fsize->index > 0)
1615 		return -EINVAL;
1616 
1617 	/* check for valid format */
1618 	fmt = find_format_by_pix(fsize->pixel_format);
1619 	if (!fmt) {
1620 		cfe_dbg(cfe, "Invalid pixel code: %x\n", fsize->pixel_format);
1621 		return -EINVAL;
1622 	}
1623 
1624 	/* TODO: Do we have limits on the step_width? */
1625 
1626 	fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
1627 	fsize->stepwise.min_width = MIN_WIDTH;
1628 	fsize->stepwise.max_width = MAX_WIDTH;
1629 	fsize->stepwise.step_width = 2;
1630 	fsize->stepwise.min_height = MIN_HEIGHT;
1631 	fsize->stepwise.max_height = MAX_HEIGHT;
1632 	fsize->stepwise.step_height = 1;
1633 
1634 	return 0;
1635 }
1636 
1637 static int cfe_vb2_ioctl_reqbufs(struct file *file, void *priv,
1638 				 struct v4l2_requestbuffers *p)
1639 {
1640 	struct video_device *vdev = video_devdata(file);
1641 	struct cfe_node *node = video_get_drvdata(vdev);
1642 	struct cfe_device *cfe = node->cfe;
1643 	int ret;
1644 
1645 	cfe_dbg(cfe, "%s: [%s] type:%u\n", __func__, node_desc[node->id].name,
1646 		p->type);
1647 
1648 	if (p->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
1649 	    p->type != V4L2_BUF_TYPE_META_CAPTURE &&
1650 	    p->type != V4L2_BUF_TYPE_META_OUTPUT)
1651 		return -EINVAL;
1652 
1653 	ret = vb2_queue_change_type(vdev->queue, p->type);
1654 	if (ret)
1655 		return ret;
1656 
1657 	return vb2_ioctl_reqbufs(file, priv, p);
1658 }
1659 
1660 static int cfe_vb2_ioctl_create_bufs(struct file *file, void *priv,
1661 				     struct v4l2_create_buffers *p)
1662 {
1663 	struct video_device *vdev = video_devdata(file);
1664 	struct cfe_node *node = video_get_drvdata(vdev);
1665 	struct cfe_device *cfe = node->cfe;
1666 	int ret;
1667 
1668 	cfe_dbg(cfe, "%s: [%s] type:%u\n", __func__, node_desc[node->id].name,
1669 		p->format.type);
1670 
1671 	if (p->format.type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
1672 	    p->format.type != V4L2_BUF_TYPE_META_CAPTURE &&
1673 	    p->format.type != V4L2_BUF_TYPE_META_OUTPUT)
1674 		return -EINVAL;
1675 
1676 	ret = vb2_queue_change_type(vdev->queue, p->format.type);
1677 	if (ret)
1678 		return ret;
1679 
1680 	return vb2_ioctl_create_bufs(file, priv, p);
1681 }
1682 
1683 static int cfe_subscribe_event(struct v4l2_fh *fh,
1684 			       const struct v4l2_event_subscription *sub)
1685 {
1686 	struct cfe_node *node = video_get_drvdata(fh->vdev);
1687 
1688 	switch (sub->type) {
1689 	case V4L2_EVENT_FRAME_SYNC:
1690 		if (!node_supports_image_output(node))
1691 			break;
1692 
1693 		return v4l2_event_subscribe(fh, sub, 2, NULL);
1694 	case V4L2_EVENT_SOURCE_CHANGE:
1695 		if (!node_supports_image_output(node) &&
1696 		    !node_supports_meta_output(node))
1697 			break;
1698 
1699 		return v4l2_event_subscribe(fh, sub, 4, NULL);
1700 	}
1701 
1702 	return v4l2_ctrl_subscribe_event(fh, sub);
1703 }
1704 
1705 static const struct v4l2_ioctl_ops cfe_ioctl_ops = {
1706 	.vidioc_querycap = cfe_querycap,
1707 	.vidioc_enum_fmt_vid_cap = cfe_enum_fmt_vid_cap,
1708 	.vidioc_g_fmt_vid_cap = cfe_g_fmt,
1709 	.vidioc_s_fmt_vid_cap = cfe_s_fmt_vid_cap,
1710 	.vidioc_try_fmt_vid_cap = cfe_try_fmt_vid_cap,
1711 
1712 	.vidioc_enum_fmt_meta_cap = cfe_enum_fmt_meta,
1713 	.vidioc_g_fmt_meta_cap = cfe_g_fmt_meta,
1714 	.vidioc_s_fmt_meta_cap = cfe_s_fmt_meta,
1715 	.vidioc_try_fmt_meta_cap = cfe_try_fmt_meta,
1716 
1717 	.vidioc_enum_fmt_meta_out = cfe_enum_fmt_meta,
1718 	.vidioc_g_fmt_meta_out = cfe_g_fmt_meta,
1719 	.vidioc_s_fmt_meta_out = cfe_s_fmt_meta,
1720 	.vidioc_try_fmt_meta_out = cfe_try_fmt_meta,
1721 
1722 	.vidioc_enum_framesizes = cfe_enum_framesizes,
1723 
1724 	.vidioc_reqbufs = cfe_vb2_ioctl_reqbufs,
1725 	.vidioc_create_bufs = cfe_vb2_ioctl_create_bufs,
1726 	.vidioc_prepare_buf = vb2_ioctl_prepare_buf,
1727 	.vidioc_querybuf = vb2_ioctl_querybuf,
1728 	.vidioc_qbuf = vb2_ioctl_qbuf,
1729 	.vidioc_dqbuf = vb2_ioctl_dqbuf,
1730 	.vidioc_expbuf = vb2_ioctl_expbuf,
1731 	.vidioc_streamon = vb2_ioctl_streamon,
1732 	.vidioc_streamoff = vb2_ioctl_streamoff,
1733 
1734 	.vidioc_subscribe_event = cfe_subscribe_event,
1735 	.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
1736 };
1737 
1738 static void cfe_notify(struct v4l2_subdev *sd, unsigned int notification,
1739 		       void *arg)
1740 {
1741 	struct cfe_device *cfe = to_cfe_device(sd->v4l2_dev);
1742 
1743 	switch (notification) {
1744 	case V4L2_DEVICE_NOTIFY_EVENT:
1745 		for (unsigned int i = 0; i < NUM_NODES; i++) {
1746 			struct cfe_node *node = &cfe->node[i];
1747 
1748 			if (check_state(cfe, NODE_REGISTERED, i))
1749 				continue;
1750 
1751 			v4l2_event_queue(&node->video_dev, arg);
1752 		}
1753 		break;
1754 	default:
1755 		break;
1756 	}
1757 }
1758 
1759 /* cfe capture driver file operations */
1760 static const struct v4l2_file_operations cfe_fops = {
1761 	.owner = THIS_MODULE,
1762 	.open = v4l2_fh_open,
1763 	.release = vb2_fop_release,
1764 	.poll = vb2_fop_poll,
1765 	.unlocked_ioctl = video_ioctl2,
1766 	.mmap = vb2_fop_mmap,
1767 };
1768 
1769 static int cfe_video_link_validate(struct media_link *link)
1770 {
1771 	struct video_device *vd = container_of(link->sink->entity,
1772 					       struct video_device, entity);
1773 	struct cfe_node *node = container_of(vd, struct cfe_node, video_dev);
1774 	struct cfe_device *cfe = node->cfe;
1775 	struct v4l2_mbus_framefmt *source_fmt;
1776 	struct v4l2_subdev_state *state;
1777 	struct v4l2_subdev *source_sd;
1778 	int ret = 0;
1779 
1780 	cfe_dbg(cfe, "%s: [%s] link \"%s\":%u -> \"%s\":%u\n", __func__,
1781 		node_desc[node->id].name,
1782 		link->source->entity->name, link->source->index,
1783 		link->sink->entity->name, link->sink->index);
1784 
1785 	if (!media_entity_remote_source_pad_unique(link->sink->entity)) {
1786 		cfe_err(cfe, "video node %s pad not connected\n", vd->name);
1787 		return -ENOTCONN;
1788 	}
1789 
1790 	source_sd = media_entity_to_v4l2_subdev(link->source->entity);
1791 
1792 	state = v4l2_subdev_lock_and_get_active_state(source_sd);
1793 
1794 	source_fmt = v4l2_subdev_state_get_format(state, link->source->index);
1795 	if (!source_fmt) {
1796 		ret = -EINVAL;
1797 		goto out;
1798 	}
1799 
1800 	if (is_image_output_node(node)) {
1801 		struct v4l2_pix_format *pix_fmt = &node->vid_fmt.fmt.pix;
1802 		const struct cfe_fmt *fmt;
1803 
1804 		if (source_fmt->width != pix_fmt->width ||
1805 		    source_fmt->height != pix_fmt->height) {
1806 			cfe_err(cfe, "Wrong width or height %ux%u (remote pad set to %ux%u)\n",
1807 				pix_fmt->width, pix_fmt->height,
1808 				source_fmt->width, source_fmt->height);
1809 			ret = -EINVAL;
1810 			goto out;
1811 		}
1812 
1813 		fmt = find_format_by_code_and_fourcc(source_fmt->code,
1814 						     pix_fmt->pixelformat);
1815 		if (!fmt) {
1816 			cfe_err(cfe, "Format mismatch!\n");
1817 			ret = -EINVAL;
1818 			goto out;
1819 		}
1820 	} else if (is_csi2_node(node) && is_meta_output_node(node)) {
1821 		struct v4l2_meta_format *meta_fmt = &node->meta_fmt.fmt.meta;
1822 		const struct cfe_fmt *fmt;
1823 
1824 		if (source_fmt->width != meta_fmt->width ||
1825 		    source_fmt->height != meta_fmt->height) {
1826 			cfe_err(cfe, "Wrong width or height %ux%u (remote pad set to %ux%u)\n",
1827 				meta_fmt->width, meta_fmt->height,
1828 				source_fmt->width, source_fmt->height);
1829 			ret = -EINVAL;
1830 			goto out;
1831 		}
1832 
1833 		fmt = find_format_by_code_and_fourcc(source_fmt->code,
1834 						     meta_fmt->dataformat);
1835 		if (!fmt) {
1836 			cfe_err(cfe, "Format mismatch!\n");
1837 			ret = -EINVAL;
1838 			goto out;
1839 		}
1840 	}
1841 
1842 out:
1843 	v4l2_subdev_unlock_state(state);
1844 
1845 	return ret;
1846 }
1847 
1848 static const struct media_entity_operations cfe_media_entity_ops = {
1849 	.link_validate = cfe_video_link_validate,
1850 };
1851 
1852 static int cfe_video_link_notify(struct media_link *link, u32 flags,
1853 				 unsigned int notification)
1854 {
1855 	struct media_device *mdev = link->graph_obj.mdev;
1856 	struct cfe_device *cfe = container_of(mdev, struct cfe_device, mdev);
1857 	struct media_entity *fe = &cfe->fe.sd.entity;
1858 	struct media_entity *csi2 = &cfe->csi2.sd.entity;
1859 	unsigned long lock_flags;
1860 
1861 	if (notification != MEDIA_DEV_NOTIFY_POST_LINK_CH)
1862 		return 0;
1863 
1864 	cfe_dbg(cfe, "%s: %s[%u] -> %s[%u] 0x%x", __func__,
1865 		link->source->entity->name, link->source->index,
1866 		link->sink->entity->name, link->sink->index, flags);
1867 
1868 	spin_lock_irqsave(&cfe->state_lock, lock_flags);
1869 
1870 	for (unsigned int i = 0; i < NUM_NODES; i++) {
1871 		if (link->sink->entity != &cfe->node[i].video_dev.entity &&
1872 		    link->source->entity != &cfe->node[i].video_dev.entity)
1873 			continue;
1874 
1875 		if (link->flags & MEDIA_LNK_FL_ENABLED)
1876 			set_state(cfe, NODE_ENABLED, i);
1877 		else
1878 			clear_state(cfe, NODE_ENABLED, i);
1879 
1880 		break;
1881 	}
1882 
1883 	spin_unlock_irqrestore(&cfe->state_lock, lock_flags);
1884 
1885 	if (link->source->entity != csi2)
1886 		return 0;
1887 	if (link->sink->entity != fe)
1888 		return 0;
1889 	if (link->sink->index != 0)
1890 		return 0;
1891 
1892 	cfe->fe_csi2_channel = -1;
1893 	if (link->flags & MEDIA_LNK_FL_ENABLED) {
1894 		if (link->source->index == node_desc[CSI2_CH0].link_pad)
1895 			cfe->fe_csi2_channel = CSI2_CH0;
1896 		else if (link->source->index == node_desc[CSI2_CH1].link_pad)
1897 			cfe->fe_csi2_channel = CSI2_CH1;
1898 		else if (link->source->index == node_desc[CSI2_CH2].link_pad)
1899 			cfe->fe_csi2_channel = CSI2_CH2;
1900 		else if (link->source->index == node_desc[CSI2_CH3].link_pad)
1901 			cfe->fe_csi2_channel = CSI2_CH3;
1902 	}
1903 
1904 	if (is_fe_enabled(cfe))
1905 		cfe_dbg(cfe, "%s: Found CSI2:%d -> FE:0 link\n", __func__,
1906 			cfe->fe_csi2_channel);
1907 	else
1908 		cfe_dbg(cfe, "%s: Unable to find CSI2:x -> FE:0 link\n",
1909 			__func__);
1910 
1911 	return 0;
1912 }
1913 
1914 static const struct media_device_ops cfe_media_device_ops = {
1915 	.link_notify = cfe_video_link_notify,
1916 };
1917 
1918 static void cfe_release(struct kref *kref)
1919 {
1920 	struct cfe_device *cfe = container_of(kref, struct cfe_device, kref);
1921 
1922 	media_device_cleanup(&cfe->mdev);
1923 
1924 	kfree(cfe);
1925 }
1926 
1927 static void cfe_put(struct cfe_device *cfe)
1928 {
1929 	kref_put(&cfe->kref, cfe_release);
1930 }
1931 
1932 static void cfe_get(struct cfe_device *cfe)
1933 {
1934 	kref_get(&cfe->kref);
1935 }
1936 
1937 static void cfe_node_release(struct video_device *vdev)
1938 {
1939 	struct cfe_node *node = video_get_drvdata(vdev);
1940 
1941 	cfe_put(node->cfe);
1942 }
1943 
1944 static int cfe_register_node(struct cfe_device *cfe, int id)
1945 {
1946 	struct video_device *vdev;
1947 	const struct cfe_fmt *fmt;
1948 	struct vb2_queue *q;
1949 	struct cfe_node *node = &cfe->node[id];
1950 	int ret;
1951 
1952 	node->cfe = cfe;
1953 	node->id = id;
1954 
1955 	if (node_supports_image(node)) {
1956 		if (node_supports_image_output(node))
1957 			node->vid_fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
1958 		else
1959 			node->vid_fmt.type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
1960 
1961 		fmt = find_format_by_code(cfe_default_format.code);
1962 		if (!fmt) {
1963 			cfe_err(cfe, "Failed to find format code\n");
1964 			return -EINVAL;
1965 		}
1966 
1967 		node->vid_fmt.fmt.pix.pixelformat = fmt->fourcc;
1968 		v4l2_fill_pix_format(&node->vid_fmt.fmt.pix,
1969 				     &cfe_default_format);
1970 
1971 		ret = cfe_validate_fmt_vid_cap(node, &node->vid_fmt);
1972 		if (ret)
1973 			return ret;
1974 	}
1975 
1976 	if (node_supports_meta(node)) {
1977 		if (node_supports_meta_output(node))
1978 			node->meta_fmt.type = V4L2_BUF_TYPE_META_CAPTURE;
1979 		else
1980 			node->meta_fmt.type = V4L2_BUF_TYPE_META_OUTPUT;
1981 
1982 		ret = cfe_validate_fmt_meta(node, &node->meta_fmt);
1983 		if (ret)
1984 			return ret;
1985 	}
1986 
1987 	mutex_init(&node->lock);
1988 
1989 	q = &node->buffer_queue;
1990 	q->type = node_supports_image(node) ? node->vid_fmt.type :
1991 					      node->meta_fmt.type;
1992 	q->io_modes = VB2_MMAP | VB2_DMABUF;
1993 	q->drv_priv = node;
1994 	q->ops = &cfe_video_qops;
1995 	q->mem_ops = &vb2_dma_contig_memops;
1996 	q->buf_struct_size = id == FE_CONFIG ? sizeof(struct cfe_config_buffer)
1997 					     : sizeof(struct cfe_buffer);
1998 	q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
1999 	q->lock = &node->lock;
2000 	q->min_queued_buffers = 1;
2001 	q->dev = &cfe->pdev->dev;
2002 
2003 	ret = vb2_queue_init(q);
2004 	if (ret) {
2005 		cfe_err(cfe, "vb2_queue_init() failed\n");
2006 		return ret;
2007 	}
2008 
2009 	INIT_LIST_HEAD(&node->dma_queue);
2010 
2011 	vdev = &node->video_dev;
2012 	vdev->release = cfe_node_release;
2013 	vdev->fops = &cfe_fops;
2014 	vdev->ioctl_ops = &cfe_ioctl_ops;
2015 	vdev->entity.ops = &cfe_media_entity_ops;
2016 	vdev->v4l2_dev = &cfe->v4l2_dev;
2017 	vdev->vfl_dir = (node_supports_image_output(node) ||
2018 			 node_supports_meta_output(node)) ?
2019 				VFL_DIR_RX :
2020 				VFL_DIR_TX;
2021 	vdev->queue = q;
2022 	vdev->lock = &node->lock;
2023 	vdev->device_caps = node_desc[id].caps;
2024 	vdev->device_caps |= V4L2_CAP_STREAMING | V4L2_CAP_IO_MC;
2025 
2026 	/* Define the device names */
2027 	snprintf(vdev->name, sizeof(vdev->name), "%s-%s", CFE_MODULE_NAME,
2028 		 node_desc[id].name);
2029 
2030 	video_set_drvdata(vdev, node);
2031 	node->pad.flags = node_desc[id].pad_flags;
2032 	media_entity_pads_init(&vdev->entity, 1, &node->pad);
2033 
2034 	if (!node_supports_image(node)) {
2035 		v4l2_disable_ioctl(&node->video_dev,
2036 				   VIDIOC_ENUM_FRAMEINTERVALS);
2037 		v4l2_disable_ioctl(&node->video_dev, VIDIOC_ENUM_FRAMESIZES);
2038 	}
2039 
2040 	ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
2041 	if (ret) {
2042 		cfe_err(cfe, "Unable to register video device %s\n",
2043 			vdev->name);
2044 		return ret;
2045 	}
2046 
2047 	cfe_info(cfe, "Registered [%s] node id %d as /dev/video%u\n",
2048 		 vdev->name, id, vdev->num);
2049 
2050 	/*
2051 	 * Acquire a reference to cfe, which will be released when the video
2052 	 * device will be unregistered and userspace will have closed all open
2053 	 * file handles.
2054 	 */
2055 	cfe_get(cfe);
2056 	set_state(cfe, NODE_REGISTERED, id);
2057 
2058 	return 0;
2059 }
2060 
2061 static void cfe_unregister_nodes(struct cfe_device *cfe)
2062 {
2063 	for (unsigned int i = 0; i < NUM_NODES; i++) {
2064 		struct cfe_node *node = &cfe->node[i];
2065 
2066 		if (check_state(cfe, NODE_REGISTERED, i)) {
2067 			clear_state(cfe, NODE_REGISTERED, i);
2068 			video_unregister_device(&node->video_dev);
2069 		}
2070 	}
2071 }
2072 
2073 static int cfe_link_node_pads(struct cfe_device *cfe)
2074 {
2075 	struct media_pad *remote_pad;
2076 	int ret;
2077 
2078 	/* Source -> CSI2 */
2079 
2080 	ret = v4l2_create_fwnode_links_to_pad(cfe->source_sd,
2081 					      &cfe->csi2.pad[CSI2_PAD_SINK],
2082 					      MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
2083 
2084 	if (ret) {
2085 		cfe_err(cfe, "Failed to create links to the source: %d\n", ret);
2086 		return ret;
2087 	}
2088 
2089 	remote_pad = media_pad_remote_pad_unique(&cfe->csi2.pad[CSI2_PAD_SINK]);
2090 	if (IS_ERR(remote_pad)) {
2091 		ret = PTR_ERR(remote_pad);
2092 		cfe_err(cfe, "Failed to get unique remote source pad: %d\n",
2093 			ret);
2094 		return ret;
2095 	}
2096 
2097 	cfe->source_pad = remote_pad->index;
2098 
2099 	for (unsigned int i = 0; i < CSI2_NUM_CHANNELS; i++) {
2100 		struct cfe_node *node = &cfe->node[i];
2101 
2102 		if (!check_state(cfe, NODE_REGISTERED, i))
2103 			continue;
2104 
2105 		/* CSI2 channel # -> /dev/video# */
2106 		ret = media_create_pad_link(&cfe->csi2.sd.entity,
2107 					    node_desc[i].link_pad,
2108 					    &node->video_dev.entity, 0, 0);
2109 		if (ret)
2110 			return ret;
2111 
2112 		if (node_supports_image(node)) {
2113 			/* CSI2 channel # -> FE Input */
2114 			ret = media_create_pad_link(&cfe->csi2.sd.entity,
2115 						    node_desc[i].link_pad,
2116 						    &cfe->fe.sd.entity,
2117 						    FE_STREAM_PAD, 0);
2118 			if (ret)
2119 				return ret;
2120 		}
2121 	}
2122 
2123 	for (unsigned int i = CSI2_NUM_CHANNELS; i < NUM_NODES; i++) {
2124 		struct cfe_node *node = &cfe->node[i];
2125 		struct media_entity *src, *dst;
2126 		unsigned int src_pad, dst_pad;
2127 
2128 		if (node_desc[i].pad_flags & MEDIA_PAD_FL_SINK) {
2129 			/* FE -> /dev/video# */
2130 			src = &cfe->fe.sd.entity;
2131 			src_pad = node_desc[i].link_pad;
2132 			dst = &node->video_dev.entity;
2133 			dst_pad = 0;
2134 		} else {
2135 			/* /dev/video# -> FE */
2136 			dst = &cfe->fe.sd.entity;
2137 			dst_pad = node_desc[i].link_pad;
2138 			src = &node->video_dev.entity;
2139 			src_pad = 0;
2140 		}
2141 
2142 		ret = media_create_pad_link(src, src_pad, dst, dst_pad, 0);
2143 		if (ret)
2144 			return ret;
2145 	}
2146 
2147 	return 0;
2148 }
2149 
2150 static int cfe_probe_complete(struct cfe_device *cfe)
2151 {
2152 	int ret;
2153 
2154 	cfe->v4l2_dev.notify = cfe_notify;
2155 
2156 	for (unsigned int i = 0; i < NUM_NODES; i++) {
2157 		ret = cfe_register_node(cfe, i);
2158 		if (ret) {
2159 			cfe_err(cfe, "Unable to register video node %u.\n", i);
2160 			goto unregister;
2161 		}
2162 	}
2163 
2164 	ret = cfe_link_node_pads(cfe);
2165 	if (ret) {
2166 		cfe_err(cfe, "Unable to link node pads.\n");
2167 		goto unregister;
2168 	}
2169 
2170 	ret = v4l2_device_register_subdev_nodes(&cfe->v4l2_dev);
2171 	if (ret) {
2172 		cfe_err(cfe, "Unable to register subdev nodes.\n");
2173 		goto unregister;
2174 	}
2175 
2176 	return 0;
2177 
2178 unregister:
2179 	cfe_unregister_nodes(cfe);
2180 	return ret;
2181 }
2182 
2183 static int cfe_async_bound(struct v4l2_async_notifier *notifier,
2184 			   struct v4l2_subdev *subdev,
2185 			   struct v4l2_async_connection *asd)
2186 {
2187 	struct cfe_device *cfe = to_cfe_device(notifier->v4l2_dev);
2188 
2189 	if (cfe->source_sd) {
2190 		cfe_err(cfe, "Rejecting subdev %s (Already set!!)",
2191 			subdev->name);
2192 		return 0;
2193 	}
2194 
2195 	cfe->source_sd = subdev;
2196 
2197 	cfe_dbg(cfe, "Using source %s for capture\n", subdev->name);
2198 
2199 	return 0;
2200 }
2201 
2202 static int cfe_async_complete(struct v4l2_async_notifier *notifier)
2203 {
2204 	struct cfe_device *cfe = to_cfe_device(notifier->v4l2_dev);
2205 
2206 	return cfe_probe_complete(cfe);
2207 }
2208 
2209 static const struct v4l2_async_notifier_operations cfe_async_ops = {
2210 	.bound = cfe_async_bound,
2211 	.complete = cfe_async_complete,
2212 };
2213 
2214 static int cfe_register_async_nf(struct cfe_device *cfe)
2215 {
2216 	struct platform_device *pdev = cfe->pdev;
2217 	struct v4l2_fwnode_endpoint ep = { .bus_type = V4L2_MBUS_CSI2_DPHY };
2218 	struct fwnode_handle *local_ep_fwnode;
2219 	struct v4l2_async_connection *asd;
2220 	int ret;
2221 
2222 	local_ep_fwnode = fwnode_graph_get_endpoint_by_id(pdev->dev.fwnode, 0,
2223 							  0, 0);
2224 	if (!local_ep_fwnode) {
2225 		cfe_err(cfe, "Failed to find local endpoint fwnode\n");
2226 		return -ENODEV;
2227 	}
2228 
2229 	/* Parse the local endpoint and validate its configuration. */
2230 	ret = v4l2_fwnode_endpoint_parse(local_ep_fwnode, &ep);
2231 	if (ret) {
2232 		cfe_err(cfe, "Failed to find remote endpoint fwnode\n");
2233 		goto err_put_local_fwnode;
2234 	}
2235 
2236 	for (unsigned int lane = 0; lane < ep.bus.mipi_csi2.num_data_lanes;
2237 	     lane++) {
2238 		if (ep.bus.mipi_csi2.data_lanes[lane] != lane + 1) {
2239 			cfe_err(cfe, "Data lanes reordering not supported\n");
2240 			ret = -EINVAL;
2241 			goto err_put_local_fwnode;
2242 		}
2243 	}
2244 
2245 	cfe->csi2.dphy.max_lanes = ep.bus.mipi_csi2.num_data_lanes;
2246 	cfe->csi2.bus_flags = ep.bus.mipi_csi2.flags;
2247 
2248 	/* Initialize and register the async notifier. */
2249 	v4l2_async_nf_init(&cfe->notifier, &cfe->v4l2_dev);
2250 	cfe->notifier.ops = &cfe_async_ops;
2251 
2252 	asd = v4l2_async_nf_add_fwnode_remote(&cfe->notifier, local_ep_fwnode,
2253 					      struct v4l2_async_connection);
2254 	if (IS_ERR(asd)) {
2255 		ret = PTR_ERR(asd);
2256 		cfe_err(cfe, "Error adding subdevice: %d\n", ret);
2257 		goto err_put_local_fwnode;
2258 	}
2259 
2260 	ret = v4l2_async_nf_register(&cfe->notifier);
2261 	if (ret) {
2262 		cfe_err(cfe, "Error registering async notifier: %d\n", ret);
2263 		goto err_nf_cleanup;
2264 	}
2265 
2266 	fwnode_handle_put(local_ep_fwnode);
2267 
2268 	return 0;
2269 
2270 err_nf_cleanup:
2271 	v4l2_async_nf_cleanup(&cfe->notifier);
2272 err_put_local_fwnode:
2273 	fwnode_handle_put(local_ep_fwnode);
2274 
2275 	return ret;
2276 }
2277 
2278 static int cfe_probe(struct platform_device *pdev)
2279 {
2280 	struct cfe_device *cfe;
2281 	char debugfs_name[32];
2282 	int ret;
2283 
2284 	cfe = kzalloc(sizeof(*cfe), GFP_KERNEL);
2285 	if (!cfe)
2286 		return -ENOMEM;
2287 
2288 	platform_set_drvdata(pdev, cfe);
2289 
2290 	kref_init(&cfe->kref);
2291 	cfe->pdev = pdev;
2292 	cfe->fe_csi2_channel = -1;
2293 	spin_lock_init(&cfe->state_lock);
2294 
2295 	cfe->csi2.base = devm_platform_ioremap_resource(pdev, 0);
2296 	if (IS_ERR(cfe->csi2.base)) {
2297 		dev_err(&pdev->dev, "Failed to get dma io block\n");
2298 		ret = PTR_ERR(cfe->csi2.base);
2299 		goto err_cfe_put;
2300 	}
2301 
2302 	cfe->csi2.dphy.base = devm_platform_ioremap_resource(pdev, 1);
2303 	if (IS_ERR(cfe->csi2.dphy.base)) {
2304 		dev_err(&pdev->dev, "Failed to get host io block\n");
2305 		ret = PTR_ERR(cfe->csi2.dphy.base);
2306 		goto err_cfe_put;
2307 	}
2308 
2309 	cfe->mipi_cfg_base = devm_platform_ioremap_resource(pdev, 2);
2310 	if (IS_ERR(cfe->mipi_cfg_base)) {
2311 		dev_err(&pdev->dev, "Failed to get mipi cfg io block\n");
2312 		ret = PTR_ERR(cfe->mipi_cfg_base);
2313 		goto err_cfe_put;
2314 	}
2315 
2316 	cfe->fe.base = devm_platform_ioremap_resource(pdev, 3);
2317 	if (IS_ERR(cfe->fe.base)) {
2318 		dev_err(&pdev->dev, "Failed to get pisp fe io block\n");
2319 		ret = PTR_ERR(cfe->fe.base);
2320 		goto err_cfe_put;
2321 	}
2322 
2323 	ret = platform_get_irq(pdev, 0);
2324 	if (ret <= 0) {
2325 		ret = -EINVAL;
2326 		goto err_cfe_put;
2327 	}
2328 
2329 	ret = devm_request_irq(&pdev->dev, ret, cfe_isr, 0, "rp1-cfe", cfe);
2330 	if (ret) {
2331 		dev_err(&pdev->dev, "Unable to request interrupt\n");
2332 		ret = -EINVAL;
2333 		goto err_cfe_put;
2334 	}
2335 
2336 	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
2337 	if (ret) {
2338 		dev_err(&pdev->dev, "DMA enable failed\n");
2339 		goto err_cfe_put;
2340 	}
2341 
2342 	ret = vb2_dma_contig_set_max_seg_size(&pdev->dev, UINT_MAX);
2343 	if (ret)
2344 		goto err_cfe_put;
2345 
2346 	/* TODO: Enable clock only when running. */
2347 	cfe->clk = devm_clk_get(&pdev->dev, NULL);
2348 	if (IS_ERR(cfe->clk)) {
2349 		ret = dev_err_probe(&pdev->dev, PTR_ERR(cfe->clk),
2350 				    "clock not found\n");
2351 		goto err_cfe_put;
2352 	}
2353 
2354 	cfe->mdev.dev = &pdev->dev;
2355 	cfe->mdev.ops = &cfe_media_device_ops;
2356 	strscpy(cfe->mdev.model, CFE_MODULE_NAME, sizeof(cfe->mdev.model));
2357 	strscpy(cfe->mdev.serial, "", sizeof(cfe->mdev.serial));
2358 	snprintf(cfe->mdev.bus_info, sizeof(cfe->mdev.bus_info), "platform:%s",
2359 		 dev_name(&pdev->dev));
2360 
2361 	media_device_init(&cfe->mdev);
2362 
2363 	cfe->v4l2_dev.mdev = &cfe->mdev;
2364 
2365 	ret = v4l2_device_register(&pdev->dev, &cfe->v4l2_dev);
2366 	if (ret) {
2367 		cfe_err(cfe, "Unable to register v4l2 device.\n");
2368 		goto err_cfe_put;
2369 	}
2370 
2371 	snprintf(debugfs_name, sizeof(debugfs_name), "rp1-cfe:%s",
2372 		 dev_name(&pdev->dev));
2373 	cfe->debugfs = debugfs_create_dir(debugfs_name, NULL);
2374 	debugfs_create_file("regs", 0440, cfe->debugfs, cfe,
2375 			    &mipi_cfg_regs_fops);
2376 
2377 	/* Enable the block power domain */
2378 	pm_runtime_enable(&pdev->dev);
2379 
2380 	ret = pm_runtime_resume_and_get(&cfe->pdev->dev);
2381 	if (ret)
2382 		goto err_runtime_disable;
2383 
2384 	cfe->csi2.v4l2_dev = &cfe->v4l2_dev;
2385 	ret = csi2_init(&cfe->csi2, cfe->debugfs);
2386 	if (ret) {
2387 		cfe_err(cfe, "Failed to init csi2 (%d)\n", ret);
2388 		goto err_runtime_put;
2389 	}
2390 
2391 	cfe->fe.v4l2_dev = &cfe->v4l2_dev;
2392 	ret = pisp_fe_init(&cfe->fe, cfe->debugfs);
2393 	if (ret) {
2394 		cfe_err(cfe, "Failed to init pisp fe (%d)\n", ret);
2395 		goto err_csi2_uninit;
2396 	}
2397 
2398 	cfe->mdev.hw_revision = cfe->fe.hw_revision;
2399 	ret = media_device_register(&cfe->mdev);
2400 	if (ret < 0) {
2401 		cfe_err(cfe, "Unable to register media-controller device.\n");
2402 		goto err_pisp_fe_uninit;
2403 	}
2404 
2405 	ret = cfe_register_async_nf(cfe);
2406 	if (ret) {
2407 		cfe_err(cfe, "Failed to connect subdevs\n");
2408 		goto err_media_unregister;
2409 	}
2410 
2411 	pm_runtime_put(&cfe->pdev->dev);
2412 
2413 	return 0;
2414 
2415 err_media_unregister:
2416 	media_device_unregister(&cfe->mdev);
2417 err_pisp_fe_uninit:
2418 	pisp_fe_uninit(&cfe->fe);
2419 err_csi2_uninit:
2420 	csi2_uninit(&cfe->csi2);
2421 err_runtime_put:
2422 	pm_runtime_put(&cfe->pdev->dev);
2423 err_runtime_disable:
2424 	pm_runtime_disable(&pdev->dev);
2425 	debugfs_remove(cfe->debugfs);
2426 	v4l2_device_unregister(&cfe->v4l2_dev);
2427 err_cfe_put:
2428 	cfe_put(cfe);
2429 
2430 	return ret;
2431 }
2432 
2433 static void cfe_remove(struct platform_device *pdev)
2434 {
2435 	struct cfe_device *cfe = platform_get_drvdata(pdev);
2436 
2437 	debugfs_remove(cfe->debugfs);
2438 
2439 	v4l2_async_nf_unregister(&cfe->notifier);
2440 	v4l2_async_nf_cleanup(&cfe->notifier);
2441 
2442 	media_device_unregister(&cfe->mdev);
2443 	cfe_unregister_nodes(cfe);
2444 
2445 	pisp_fe_uninit(&cfe->fe);
2446 	csi2_uninit(&cfe->csi2);
2447 
2448 	pm_runtime_disable(&pdev->dev);
2449 
2450 	v4l2_device_unregister(&cfe->v4l2_dev);
2451 
2452 	cfe_put(cfe);
2453 }
2454 
2455 static int cfe_runtime_suspend(struct device *dev)
2456 {
2457 	struct platform_device *pdev = to_platform_device(dev);
2458 	struct cfe_device *cfe = platform_get_drvdata(pdev);
2459 
2460 	clk_disable_unprepare(cfe->clk);
2461 
2462 	return 0;
2463 }
2464 
2465 static int cfe_runtime_resume(struct device *dev)
2466 {
2467 	struct platform_device *pdev = to_platform_device(dev);
2468 	struct cfe_device *cfe = platform_get_drvdata(pdev);
2469 	int ret;
2470 
2471 	ret = clk_prepare_enable(cfe->clk);
2472 	if (ret) {
2473 		dev_err(dev, "Unable to enable clock\n");
2474 		return ret;
2475 	}
2476 
2477 	return 0;
2478 }
2479 
2480 static const struct dev_pm_ops cfe_pm_ops = {
2481 	SET_RUNTIME_PM_OPS(cfe_runtime_suspend, cfe_runtime_resume, NULL)
2482 	SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
2483 				     pm_runtime_force_resume)
2484 };
2485 
2486 static const struct of_device_id cfe_of_match[] = {
2487 	{ .compatible = "raspberrypi,rp1-cfe" },
2488 	{ /* sentinel */ },
2489 };
2490 MODULE_DEVICE_TABLE(of, cfe_of_match);
2491 
2492 static struct platform_driver cfe_driver = {
2493 	.probe		= cfe_probe,
2494 	.remove		= cfe_remove,
2495 	.driver = {
2496 		.name	= CFE_MODULE_NAME,
2497 		.of_match_table = cfe_of_match,
2498 		.pm = &cfe_pm_ops,
2499 	},
2500 };
2501 
2502 module_platform_driver(cfe_driver);
2503 
2504 MODULE_AUTHOR("Naushir Patuck <naush@raspberrypi.com>");
2505 MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>");
2506 MODULE_DESCRIPTION("Raspberry Pi RP1 Camera Front End driver");
2507 MODULE_LICENSE("GPL");
2508 MODULE_VERSION(CFE_VERSION);
2509