xref: /linux/drivers/media/platform/raspberrypi/rp1-cfe/cfe.c (revision 7f71507851fc7764b36a3221839607d3a45c2025)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * RP1 Camera Front End Driver
4  *
5  * Copyright (c) 2021-2024 Raspberry Pi Ltd.
6  * Copyright (c) 2023-2024 Ideas on Board Oy
7  */
8 
9 #include <linux/clk.h>
10 #include <linux/debugfs.h>
11 #include <linux/delay.h>
12 #include <linux/device.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/err.h>
15 #include <linux/fwnode.h>
16 #include <linux/init.h>
17 #include <linux/interrupt.h>
18 #include <linux/io.h>
19 #include <linux/lcm.h>
20 #include <linux/math.h>
21 #include <linux/module.h>
22 #include <linux/platform_device.h>
23 #include <linux/pm_runtime.h>
24 #include <linux/property.h>
25 #include <linux/seq_file.h>
26 #include <linux/slab.h>
27 #include <linux/uaccess.h>
28 #include <linux/videodev2.h>
29 
30 #include <media/v4l2-async.h>
31 #include <media/v4l2-common.h>
32 #include <media/v4l2-ctrls.h>
33 #include <media/v4l2-dev.h>
34 #include <media/v4l2-device.h>
35 #include <media/v4l2-event.h>
36 #include <media/v4l2-fwnode.h>
37 #include <media/v4l2-ioctl.h>
38 #include <media/v4l2-mc.h>
39 #include <media/videobuf2-dma-contig.h>
40 
41 #include <linux/media/raspberrypi/pisp_fe_config.h>
42 #include <linux/media/raspberrypi/pisp_fe_statistics.h>
43 
44 #include "cfe-fmts.h"
45 #include "cfe.h"
46 #include "csi2.h"
47 #include "pisp-fe.h"
48 
49 #define CREATE_TRACE_POINTS
50 #include "cfe-trace.h"
51 
52 #define CFE_MODULE_NAME	"rp1-cfe"
53 #define CFE_VERSION	"1.0"
54 
55 #define cfe_dbg(cfe, fmt, arg...) dev_dbg(&(cfe)->pdev->dev, fmt, ##arg)
56 #define cfe_info(cfe, fmt, arg...) dev_info(&(cfe)->pdev->dev, fmt, ##arg)
57 #define cfe_err(cfe, fmt, arg...) dev_err(&(cfe)->pdev->dev, fmt, ##arg)
58 
59 /* MIPICFG registers */
60 #define MIPICFG_CFG		0x004
61 #define MIPICFG_INTR		0x028
62 #define MIPICFG_INTE		0x02c
63 #define MIPICFG_INTF		0x030
64 #define MIPICFG_INTS		0x034
65 
66 #define MIPICFG_CFG_SEL_CSI	BIT(0)
67 
68 #define MIPICFG_INT_CSI_DMA	BIT(0)
69 #define MIPICFG_INT_CSI_HOST	BIT(2)
70 #define MIPICFG_INT_PISP_FE	BIT(4)
71 
72 #define BPL_ALIGNMENT 16
73 #define MAX_BYTESPERLINE 0xffffff00
74 #define MAX_BUFFER_SIZE  0xffffff00
75 /*
76  * Max width is therefore determined by the max stride divided by the number of
77  * bits per pixel.
78  *
79  * However, to avoid overflow issues let's use a 16k maximum. This lets us
80  * calculate 16k * 16k * 4 with 32bits. If we need higher maximums, a careful
81  * review and adjustment of the code is needed so that it will deal with
82  * overflows correctly.
83  */
84 #define MAX_WIDTH 16384
85 #define MAX_HEIGHT MAX_WIDTH
86 /* Define a nominal minimum image size */
87 #define MIN_WIDTH 16
88 #define MIN_HEIGHT 16
89 
90 #define MIN_META_WIDTH 4
91 #define MIN_META_HEIGHT 1
92 
93 const struct v4l2_mbus_framefmt cfe_default_format = {
94 	.width = 640,
95 	.height = 480,
96 	.code = MEDIA_BUS_FMT_SRGGB10_1X10,
97 	.field = V4L2_FIELD_NONE,
98 	.colorspace = V4L2_COLORSPACE_RAW,
99 	.ycbcr_enc = V4L2_YCBCR_ENC_601,
100 	.quantization = V4L2_QUANTIZATION_FULL_RANGE,
101 	.xfer_func = V4L2_XFER_FUNC_NONE,
102 };
103 
104 enum node_ids {
105 	/* CSI2 HW output nodes first. */
106 	CSI2_CH0,
107 	CSI2_CH1,
108 	CSI2_CH2,
109 	CSI2_CH3,
110 	/* FE only nodes from here on. */
111 	FE_OUT0,
112 	FE_OUT1,
113 	FE_STATS,
114 	FE_CONFIG,
115 	NUM_NODES
116 };
117 
118 struct node_description {
119 	enum node_ids id;
120 	const char *name;
121 	unsigned int caps;
122 	unsigned int pad_flags;
123 	unsigned int link_pad;
124 };
125 
126 /* Must match the ordering of enum ids */
127 static const struct node_description node_desc[NUM_NODES] = {
128 	[CSI2_CH0] = {
129 		.name = "csi2-ch0",
130 		.caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_META_CAPTURE,
131 		.pad_flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT,
132 		.link_pad = CSI2_PAD_FIRST_SOURCE + 0
133 	},
134 	/*
135 	 * At the moment the main userspace component (libcamera) doesn't
136 	 * support metadata with video nodes that support both video and
137 	 * metadata. So for the time being this node is set to only support
138 	 * V4L2_CAP_META_CAPTURE.
139 	 */
140 	[CSI2_CH1] = {
141 		.name = "csi2-ch1",
142 		.caps = V4L2_CAP_META_CAPTURE,
143 		.pad_flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT,
144 		.link_pad = CSI2_PAD_FIRST_SOURCE + 1
145 	},
146 	[CSI2_CH2] = {
147 		.name = "csi2-ch2",
148 		.caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_META_CAPTURE,
149 		.pad_flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT,
150 		.link_pad = CSI2_PAD_FIRST_SOURCE + 2
151 	},
152 	[CSI2_CH3] = {
153 		.name = "csi2-ch3",
154 		.caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_META_CAPTURE,
155 		.pad_flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT,
156 		.link_pad = CSI2_PAD_FIRST_SOURCE + 3
157 	},
158 	[FE_OUT0] = {
159 		.name = "fe-image0",
160 		.caps = V4L2_CAP_VIDEO_CAPTURE,
161 		.pad_flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT,
162 		.link_pad = FE_OUTPUT0_PAD
163 	},
164 	[FE_OUT1] = {
165 		.name = "fe-image1",
166 		.caps = V4L2_CAP_VIDEO_CAPTURE,
167 		.pad_flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT,
168 		.link_pad = FE_OUTPUT1_PAD
169 	},
170 	[FE_STATS] = {
171 		.name = "fe-stats",
172 		.caps = V4L2_CAP_META_CAPTURE,
173 		.pad_flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT,
174 		.link_pad = FE_STATS_PAD
175 	},
176 	[FE_CONFIG] = {
177 		.name = "fe-config",
178 		.caps = V4L2_CAP_META_OUTPUT,
179 		.pad_flags = MEDIA_PAD_FL_SOURCE | MEDIA_PAD_FL_MUST_CONNECT,
180 		.link_pad = FE_CONFIG_PAD
181 	},
182 };
183 
184 #define is_fe_node(node) (((node)->id) >= FE_OUT0)
185 #define is_csi2_node(node) (!is_fe_node(node))
186 
187 #define node_supports_image_output(node) \
188 	(node_desc[(node)->id].caps & V4L2_CAP_VIDEO_CAPTURE)
189 #define node_supports_meta_output(node) \
190 	(node_desc[(node)->id].caps & V4L2_CAP_META_CAPTURE)
191 #define node_supports_image_input(node) \
192 	(node_desc[(node)->id].caps & V4L2_CAP_VIDEO_OUTPUT)
193 #define node_supports_meta_input(node) \
194 	(node_desc[(node)->id].caps & V4L2_CAP_META_OUTPUT)
195 #define node_supports_image(node) \
196 	(node_supports_image_output(node) || node_supports_image_input(node))
197 #define node_supports_meta(node) \
198 	(node_supports_meta_output(node) || node_supports_meta_input(node))
199 
200 #define is_image_output_node(node) \
201 	((node)->buffer_queue.type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
202 #define is_image_input_node(node) \
203 	((node)->buffer_queue.type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
204 #define is_image_node(node) \
205 	(is_image_output_node(node) || is_image_input_node(node))
206 #define is_meta_output_node(node) \
207 	((node)->buffer_queue.type == V4L2_BUF_TYPE_META_CAPTURE)
208 #define is_meta_input_node(node) \
209 	((node)->buffer_queue.type == V4L2_BUF_TYPE_META_OUTPUT)
210 #define is_meta_node(node) \
211 	(is_meta_output_node(node) || is_meta_input_node(node))
212 
213 /* To track state across all nodes. */
214 #define NODE_REGISTERED		BIT(0)
215 #define NODE_ENABLED		BIT(1)
216 #define NODE_STREAMING		BIT(2)
217 #define FS_INT			BIT(3)
218 #define FE_INT			BIT(4)
219 #define NUM_STATES		5
220 
221 struct cfe_buffer {
222 	struct vb2_v4l2_buffer vb;
223 	struct list_head list;
224 };
225 
226 struct cfe_config_buffer {
227 	struct cfe_buffer buf;
228 	struct pisp_fe_config config;
229 };
230 
231 static inline struct cfe_buffer *to_cfe_buffer(struct vb2_buffer *vb)
232 {
233 	return container_of(vb, struct cfe_buffer, vb.vb2_buf);
234 }
235 
236 static inline
237 struct cfe_config_buffer *to_cfe_config_buffer(struct cfe_buffer *buf)
238 {
239 	return container_of(buf, struct cfe_config_buffer, buf);
240 }
241 
242 struct cfe_node {
243 	/* Node id */
244 	enum node_ids id;
245 	/* Pointer pointing to current v4l2_buffer */
246 	struct cfe_buffer *cur_frm;
247 	/* Pointer pointing to next v4l2_buffer */
248 	struct cfe_buffer *next_frm;
249 	/* Used to store current pixel format */
250 	struct v4l2_format vid_fmt;
251 	/* Used to store current meta format */
252 	struct v4l2_format meta_fmt;
253 	/* Buffer queue used in video-buf */
254 	struct vb2_queue buffer_queue;
255 	/* Queue of filled frames */
256 	struct list_head dma_queue;
257 	/* lock used to access this structure */
258 	struct mutex lock;
259 	/* Identifies video device for this channel */
260 	struct video_device video_dev;
261 	/* Pointer to the parent handle */
262 	struct cfe_device *cfe;
263 	/* Media pad for this node */
264 	struct media_pad pad;
265 	/* Frame-start counter */
266 	unsigned int fs_count;
267 	/* Timestamp of the current buffer */
268 	u64 ts;
269 };
270 
271 struct cfe_device {
272 	struct dentry *debugfs;
273 	struct kref kref;
274 
275 	/* peripheral base address */
276 	void __iomem *mipi_cfg_base;
277 
278 	struct clk *clk;
279 
280 	/* V4l2 device */
281 	struct v4l2_device v4l2_dev;
282 	struct media_device mdev;
283 	struct media_pipeline pipe;
284 
285 	/* IRQ lock for node state and DMA queues */
286 	spinlock_t state_lock;
287 	bool job_ready;
288 	bool job_queued;
289 
290 	/* parent device */
291 	struct platform_device *pdev;
292 	/* subdevice async Notifier */
293 	struct v4l2_async_notifier notifier;
294 
295 	/* Source sub device */
296 	struct v4l2_subdev *source_sd;
297 	/* Source subdev's pad */
298 	u32 source_pad;
299 
300 	struct cfe_node node[NUM_NODES];
301 	DECLARE_BITMAP(node_flags, NUM_STATES * NUM_NODES);
302 
303 	struct csi2_device csi2;
304 	struct pisp_fe_device fe;
305 
306 	int fe_csi2_channel;
307 
308 	/* Mask of enabled streams */
309 	u64 streams_mask;
310 };
311 
312 static inline bool is_fe_enabled(struct cfe_device *cfe)
313 {
314 	return cfe->fe_csi2_channel != -1;
315 }
316 
317 static inline struct cfe_device *to_cfe_device(struct v4l2_device *v4l2_dev)
318 {
319 	return container_of(v4l2_dev, struct cfe_device, v4l2_dev);
320 }
321 
322 static inline u32 cfg_reg_read(struct cfe_device *cfe, u32 offset)
323 {
324 	return readl(cfe->mipi_cfg_base + offset);
325 }
326 
327 static inline void cfg_reg_write(struct cfe_device *cfe, u32 offset, u32 val)
328 {
329 	writel(val, cfe->mipi_cfg_base + offset);
330 }
331 
332 static bool check_state(struct cfe_device *cfe, unsigned long state,
333 			unsigned int node_id)
334 {
335 	unsigned long bit;
336 
337 	for_each_set_bit(bit, &state, sizeof(state)) {
338 		if (!test_bit(bit + (node_id * NUM_STATES), cfe->node_flags))
339 			return false;
340 	}
341 
342 	return true;
343 }
344 
345 static void set_state(struct cfe_device *cfe, unsigned long state,
346 		      unsigned int node_id)
347 {
348 	unsigned long bit;
349 
350 	for_each_set_bit(bit, &state, sizeof(state))
351 		set_bit(bit + (node_id * NUM_STATES), cfe->node_flags);
352 }
353 
354 static void clear_state(struct cfe_device *cfe, unsigned long state,
355 			unsigned int node_id)
356 {
357 	unsigned long bit;
358 
359 	for_each_set_bit(bit, &state, sizeof(state))
360 		clear_bit(bit + (node_id * NUM_STATES), cfe->node_flags);
361 }
362 
363 static bool test_any_node(struct cfe_device *cfe, unsigned long cond)
364 {
365 	for (unsigned int i = 0; i < NUM_NODES; i++) {
366 		if (check_state(cfe, cond, i))
367 			return true;
368 	}
369 
370 	return false;
371 }
372 
373 static bool test_all_nodes(struct cfe_device *cfe, unsigned long precond,
374 			   unsigned long cond)
375 {
376 	for (unsigned int i = 0; i < NUM_NODES; i++) {
377 		if (check_state(cfe, precond, i)) {
378 			if (!check_state(cfe, cond, i))
379 				return false;
380 		}
381 	}
382 
383 	return true;
384 }
385 
386 static int mipi_cfg_regs_show(struct seq_file *s, void *data)
387 {
388 	struct cfe_device *cfe = s->private;
389 	int ret;
390 
391 	ret = pm_runtime_resume_and_get(&cfe->pdev->dev);
392 	if (ret)
393 		return ret;
394 
395 #define DUMP(reg) seq_printf(s, #reg " \t0x%08x\n", cfg_reg_read(cfe, reg))
396 	DUMP(MIPICFG_CFG);
397 	DUMP(MIPICFG_INTR);
398 	DUMP(MIPICFG_INTE);
399 	DUMP(MIPICFG_INTF);
400 	DUMP(MIPICFG_INTS);
401 #undef DUMP
402 
403 	pm_runtime_put(&cfe->pdev->dev);
404 
405 	return 0;
406 }
407 
408 DEFINE_SHOW_ATTRIBUTE(mipi_cfg_regs);
409 
410 /* Format setup functions */
411 const struct cfe_fmt *find_format_by_code(u32 code)
412 {
413 	for (unsigned int i = 0; i < ARRAY_SIZE(formats); i++) {
414 		if (formats[i].code == code)
415 			return &formats[i];
416 	}
417 
418 	return NULL;
419 }
420 
421 const struct cfe_fmt *find_format_by_pix(u32 pixelformat)
422 {
423 	for (unsigned int i = 0; i < ARRAY_SIZE(formats); i++) {
424 		if (formats[i].fourcc == pixelformat)
425 			return &formats[i];
426 	}
427 
428 	return NULL;
429 }
430 
431 static const struct cfe_fmt *find_format_by_code_and_fourcc(u32 code,
432 							    u32 fourcc)
433 {
434 	for (unsigned int i = 0; i < ARRAY_SIZE(formats); i++) {
435 		if (formats[i].code == code && formats[i].fourcc == fourcc)
436 			return &formats[i];
437 	}
438 
439 	return NULL;
440 }
441 
442 /*
443  * Given the mbus code, find the 16 bit remapped code. Returns 0 if no remap
444  * possible.
445  */
446 u32 cfe_find_16bit_code(u32 code)
447 {
448 	const struct cfe_fmt *cfe_fmt;
449 
450 	cfe_fmt = find_format_by_code(code);
451 
452 	if (!cfe_fmt || !cfe_fmt->remap[CFE_REMAP_16BIT])
453 		return 0;
454 
455 	cfe_fmt = find_format_by_pix(cfe_fmt->remap[CFE_REMAP_16BIT]);
456 	if (!cfe_fmt)
457 		return 0;
458 
459 	return cfe_fmt->code;
460 }
461 
462 /*
463  * Given the mbus code, find the 8 bit compressed code. Returns 0 if no remap
464  * possible.
465  */
466 u32 cfe_find_compressed_code(u32 code)
467 {
468 	const struct cfe_fmt *cfe_fmt;
469 
470 	cfe_fmt = find_format_by_code(code);
471 
472 	if (!cfe_fmt || !cfe_fmt->remap[CFE_REMAP_COMPRESSED])
473 		return 0;
474 
475 	cfe_fmt = find_format_by_pix(cfe_fmt->remap[CFE_REMAP_COMPRESSED]);
476 	if (!cfe_fmt)
477 		return 0;
478 
479 	return cfe_fmt->code;
480 }
481 
482 static void cfe_calc_vid_format_size_bpl(struct cfe_device *cfe,
483 					 const struct cfe_fmt *fmt,
484 					 struct v4l2_format *f)
485 {
486 	unsigned int min_bytesperline;
487 
488 	v4l_bound_align_image(&f->fmt.pix.width, MIN_WIDTH, MAX_WIDTH, 2,
489 			      &f->fmt.pix.height, MIN_HEIGHT, MAX_HEIGHT, 0, 0);
490 
491 	min_bytesperline =
492 		ALIGN((f->fmt.pix.width * fmt->depth) >> 3, BPL_ALIGNMENT);
493 
494 	if (f->fmt.pix.bytesperline > min_bytesperline &&
495 	    f->fmt.pix.bytesperline <= MAX_BYTESPERLINE)
496 		f->fmt.pix.bytesperline =
497 			ALIGN(f->fmt.pix.bytesperline, BPL_ALIGNMENT);
498 	else
499 		f->fmt.pix.bytesperline = min_bytesperline;
500 
501 	f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline;
502 
503 	cfe_dbg(cfe, "%s: %p4cc size: %ux%u bpl:%u img_size:%u\n", __func__,
504 		&f->fmt.pix.pixelformat, f->fmt.pix.width, f->fmt.pix.height,
505 		f->fmt.pix.bytesperline, f->fmt.pix.sizeimage);
506 }
507 
508 static void cfe_calc_meta_format_size_bpl(struct cfe_device *cfe,
509 					  const struct cfe_fmt *fmt,
510 					  struct v4l2_format *f)
511 {
512 	v4l_bound_align_image(&f->fmt.meta.width, MIN_META_WIDTH, MAX_WIDTH, 2,
513 			      &f->fmt.meta.height, MIN_META_HEIGHT, MAX_HEIGHT,
514 			      0, 0);
515 
516 	f->fmt.meta.bytesperline = (f->fmt.meta.width * fmt->depth) >> 3;
517 	f->fmt.meta.buffersize = f->fmt.meta.height * f->fmt.pix.bytesperline;
518 
519 	cfe_dbg(cfe, "%s: %p4cc size: %ux%u bpl:%u buf_size:%u\n", __func__,
520 		&f->fmt.meta.dataformat, f->fmt.meta.width, f->fmt.meta.height,
521 		f->fmt.meta.bytesperline, f->fmt.meta.buffersize);
522 }
523 
524 static void cfe_schedule_next_csi2_job(struct cfe_device *cfe)
525 {
526 	struct cfe_buffer *buf;
527 	dma_addr_t addr;
528 
529 	for (unsigned int i = 0; i < CSI2_NUM_CHANNELS; i++) {
530 		struct cfe_node *node = &cfe->node[i];
531 		unsigned int stride, size;
532 
533 		if (!check_state(cfe, NODE_STREAMING, i))
534 			continue;
535 
536 		buf = list_first_entry(&node->dma_queue, struct cfe_buffer,
537 				       list);
538 		node->next_frm = buf;
539 		list_del(&buf->list);
540 
541 		trace_cfe_csi2_schedule(node->id, &buf->vb.vb2_buf);
542 
543 		if (is_meta_node(node)) {
544 			size = node->meta_fmt.fmt.meta.buffersize;
545 			/* We use CSI2_CH_CTRL_PACK_BYTES, so stride == 0 */
546 			stride = 0;
547 		} else {
548 			size = node->vid_fmt.fmt.pix.sizeimage;
549 			stride = node->vid_fmt.fmt.pix.bytesperline;
550 		}
551 
552 		addr = vb2_dma_contig_plane_dma_addr(&buf->vb.vb2_buf, 0);
553 		csi2_set_buffer(&cfe->csi2, node->id, addr, stride, size);
554 	}
555 }
556 
557 static void cfe_schedule_next_pisp_job(struct cfe_device *cfe)
558 {
559 	struct vb2_buffer *vb2_bufs[FE_NUM_PADS] = { 0 };
560 	struct cfe_config_buffer *config_buf;
561 	struct cfe_buffer *buf;
562 
563 	for (unsigned int i = CSI2_NUM_CHANNELS; i < NUM_NODES; i++) {
564 		struct cfe_node *node = &cfe->node[i];
565 
566 		if (!check_state(cfe, NODE_STREAMING, i))
567 			continue;
568 
569 		buf = list_first_entry(&node->dma_queue, struct cfe_buffer,
570 				       list);
571 
572 		trace_cfe_fe_schedule(node->id, &buf->vb.vb2_buf);
573 
574 		node->next_frm = buf;
575 		vb2_bufs[node_desc[i].link_pad] = &buf->vb.vb2_buf;
576 		list_del(&buf->list);
577 	}
578 
579 	config_buf = to_cfe_config_buffer(cfe->node[FE_CONFIG].next_frm);
580 	pisp_fe_submit_job(&cfe->fe, vb2_bufs, &config_buf->config);
581 }
582 
583 static bool cfe_check_job_ready(struct cfe_device *cfe)
584 {
585 	for (unsigned int i = 0; i < NUM_NODES; i++) {
586 		struct cfe_node *node = &cfe->node[i];
587 
588 		if (!check_state(cfe, NODE_ENABLED, i))
589 			continue;
590 
591 		if (list_empty(&node->dma_queue))
592 			return false;
593 	}
594 
595 	return true;
596 }
597 
598 static void cfe_prepare_next_job(struct cfe_device *cfe)
599 {
600 	trace_cfe_prepare_next_job(is_fe_enabled(cfe));
601 
602 	cfe->job_queued = true;
603 	cfe_schedule_next_csi2_job(cfe);
604 	if (is_fe_enabled(cfe))
605 		cfe_schedule_next_pisp_job(cfe);
606 
607 	/* Flag if another job is ready after this. */
608 	cfe->job_ready = cfe_check_job_ready(cfe);
609 }
610 
611 static void cfe_process_buffer_complete(struct cfe_node *node,
612 					enum vb2_buffer_state state)
613 {
614 	trace_cfe_buffer_complete(node->id, &node->cur_frm->vb);
615 
616 	node->cur_frm->vb.sequence = node->fs_count - 1;
617 	vb2_buffer_done(&node->cur_frm->vb.vb2_buf, state);
618 }
619 
620 static void cfe_queue_event_sof(struct cfe_node *node)
621 {
622 	struct v4l2_event event = {
623 		.type = V4L2_EVENT_FRAME_SYNC,
624 		.u.frame_sync.frame_sequence = node->fs_count - 1,
625 	};
626 
627 	v4l2_event_queue(&node->video_dev, &event);
628 }
629 
630 static void cfe_sof_isr(struct cfe_node *node)
631 {
632 	struct cfe_device *cfe = node->cfe;
633 	bool matching_fs = true;
634 
635 	trace_cfe_frame_start(node->id, node->fs_count);
636 
637 	/*
638 	 * If the sensor is producing unexpected frame event ordering over a
639 	 * sustained period of time, guard against the possibility of coming
640 	 * here and orphaning the cur_frm if it's not been dequeued already.
641 	 * Unfortunately, there is not enough hardware state to tell if this
642 	 * may have occurred.
643 	 */
644 	if (WARN(node->cur_frm, "%s: [%s] Orphaned frame at seq %u\n",
645 		 __func__, node_desc[node->id].name, node->fs_count))
646 		cfe_process_buffer_complete(node, VB2_BUF_STATE_ERROR);
647 
648 	node->cur_frm = node->next_frm;
649 	node->next_frm = NULL;
650 	node->fs_count++;
651 
652 	node->ts = ktime_get_ns();
653 	for (unsigned int i = 0; i < NUM_NODES; i++) {
654 		if (!check_state(cfe, NODE_STREAMING, i) || i == node->id)
655 			continue;
656 		/*
657 		 * This checks if any other node has seen a FS. If yes, use the
658 		 * same timestamp, eventually across all node buffers.
659 		 */
660 		if (cfe->node[i].fs_count >= node->fs_count)
661 			node->ts = cfe->node[i].ts;
662 		/*
663 		 * This checks if all other node have seen a matching FS. If
664 		 * yes, we can flag another job to be queued.
665 		 */
666 		if (matching_fs && cfe->node[i].fs_count != node->fs_count)
667 			matching_fs = false;
668 	}
669 
670 	if (matching_fs)
671 		cfe->job_queued = false;
672 
673 	if (node->cur_frm)
674 		node->cur_frm->vb.vb2_buf.timestamp = node->ts;
675 
676 	set_state(cfe, FS_INT, node->id);
677 	clear_state(cfe, FE_INT, node->id);
678 
679 	if (is_image_output_node(node))
680 		cfe_queue_event_sof(node);
681 }
682 
683 static void cfe_eof_isr(struct cfe_node *node)
684 {
685 	struct cfe_device *cfe = node->cfe;
686 
687 	trace_cfe_frame_end(node->id, node->fs_count - 1);
688 
689 	if (node->cur_frm)
690 		cfe_process_buffer_complete(node, VB2_BUF_STATE_DONE);
691 
692 	node->cur_frm = NULL;
693 	set_state(cfe, FE_INT, node->id);
694 	clear_state(cfe, FS_INT, node->id);
695 }
696 
697 static irqreturn_t cfe_isr(int irq, void *dev)
698 {
699 	struct cfe_device *cfe = dev;
700 	bool sof[NUM_NODES] = { 0 }, eof[NUM_NODES] = { 0 };
701 	u32 sts;
702 
703 	sts = cfg_reg_read(cfe, MIPICFG_INTS);
704 
705 	if (sts & MIPICFG_INT_CSI_DMA)
706 		csi2_isr(&cfe->csi2, sof, eof);
707 
708 	if (sts & MIPICFG_INT_PISP_FE)
709 		pisp_fe_isr(&cfe->fe, sof + CSI2_NUM_CHANNELS,
710 			    eof + CSI2_NUM_CHANNELS);
711 
712 	spin_lock(&cfe->state_lock);
713 
714 	for (unsigned int i = 0; i < NUM_NODES; i++) {
715 		struct cfe_node *node = &cfe->node[i];
716 
717 		/*
718 		 * The check_state(NODE_STREAMING) is to ensure we do not loop
719 		 * over the CSI2_CHx nodes when the FE is active since they
720 		 * generate interrupts even though the node is not streaming.
721 		 */
722 		if (!check_state(cfe, NODE_STREAMING, i) || !(sof[i] || eof[i]))
723 			continue;
724 
725 		/*
726 		 * There are 3 cases where we could get FS + FE_ACK at
727 		 * the same time:
728 		 * 1) FE of the current frame, and FS of the next frame.
729 		 * 2) FS + FE of the same frame.
730 		 * 3) FE of the current frame, and FS + FE of the next
731 		 *    frame. To handle this, see the sof handler below.
732 		 *
733 		 * (1) is handled implicitly by the ordering of the FE and FS
734 		 * handlers below.
735 		 */
736 		if (eof[i]) {
737 			/*
738 			 * The condition below tests for (2). Run the FS handler
739 			 * first before the FE handler, both for the current
740 			 * frame.
741 			 */
742 			if (sof[i] && !check_state(cfe, FS_INT, i)) {
743 				cfe_sof_isr(node);
744 				sof[i] = false;
745 			}
746 
747 			cfe_eof_isr(node);
748 		}
749 
750 		if (sof[i]) {
751 			/*
752 			 * The condition below tests for (3). In such cases, we
753 			 * come in here with FS flag set in the node state from
754 			 * the previous frame since it only gets cleared in
755 			 * cfe_eof_isr(). Handle the FE for the previous
756 			 * frame first before the FS handler for the current
757 			 * frame.
758 			 */
759 			if (check_state(cfe, FS_INT, node->id) &&
760 			    !check_state(cfe, FE_INT, node->id)) {
761 				cfe_dbg(cfe, "%s: [%s] Handling missing previous FE interrupt\n",
762 					__func__, node_desc[node->id].name);
763 				cfe_eof_isr(node);
764 			}
765 
766 			cfe_sof_isr(node);
767 		}
768 
769 		if (!cfe->job_queued && cfe->job_ready)
770 			cfe_prepare_next_job(cfe);
771 	}
772 
773 	spin_unlock(&cfe->state_lock);
774 
775 	return IRQ_HANDLED;
776 }
777 
778 /*
779  * Stream helpers
780  */
781 
782 static int cfe_get_vc_dt_fallback(struct cfe_device *cfe, u8 *vc, u8 *dt)
783 {
784 	struct v4l2_subdev_state *state;
785 	struct v4l2_mbus_framefmt *fmt;
786 	const struct cfe_fmt *cfe_fmt;
787 
788 	state = v4l2_subdev_get_locked_active_state(&cfe->csi2.sd);
789 
790 	fmt = v4l2_subdev_state_get_format(state, CSI2_PAD_SINK, 0);
791 	if (!fmt)
792 		return -EINVAL;
793 
794 	cfe_fmt = find_format_by_code(fmt->code);
795 	if (!cfe_fmt)
796 		return -EINVAL;
797 
798 	*vc = 0;
799 	*dt = cfe_fmt->csi_dt;
800 
801 	return 0;
802 }
803 
804 static int cfe_get_vc_dt(struct cfe_device *cfe, unsigned int channel, u8 *vc,
805 			 u8 *dt)
806 {
807 	struct v4l2_mbus_frame_desc remote_desc;
808 	struct v4l2_subdev_state *state;
809 	u32 sink_stream;
810 	unsigned int i;
811 	int ret;
812 
813 	state = v4l2_subdev_get_locked_active_state(&cfe->csi2.sd);
814 
815 	ret = v4l2_subdev_routing_find_opposite_end(&state->routing,
816 		CSI2_PAD_FIRST_SOURCE + channel, 0, NULL, &sink_stream);
817 	if (ret)
818 		return ret;
819 
820 	ret = v4l2_subdev_call(cfe->source_sd, pad, get_frame_desc,
821 			       cfe->source_pad, &remote_desc);
822 	if (ret == -ENOIOCTLCMD) {
823 		cfe_dbg(cfe, "source does not support get_frame_desc, use fallback\n");
824 		return cfe_get_vc_dt_fallback(cfe, vc, dt);
825 	} else if (ret) {
826 		cfe_err(cfe, "Failed to get frame descriptor\n");
827 		return ret;
828 	}
829 
830 	if (remote_desc.type != V4L2_MBUS_FRAME_DESC_TYPE_CSI2) {
831 		cfe_err(cfe, "Frame descriptor does not describe CSI-2 link");
832 		return -EINVAL;
833 	}
834 
835 	for (i = 0; i < remote_desc.num_entries; i++) {
836 		if (remote_desc.entry[i].stream == sink_stream)
837 			break;
838 	}
839 
840 	if (i == remote_desc.num_entries) {
841 		cfe_err(cfe, "Stream %u not found in remote frame desc\n",
842 			sink_stream);
843 		return -EINVAL;
844 	}
845 
846 	*vc = remote_desc.entry[i].bus.csi2.vc;
847 	*dt = remote_desc.entry[i].bus.csi2.dt;
848 
849 	return 0;
850 }
851 
852 static int cfe_start_channel(struct cfe_node *node)
853 {
854 	struct cfe_device *cfe = node->cfe;
855 	struct v4l2_subdev_state *state;
856 	struct v4l2_mbus_framefmt *source_fmt;
857 	const struct cfe_fmt *fmt;
858 	unsigned long flags;
859 	bool start_fe;
860 	int ret;
861 
862 	cfe_dbg(cfe, "%s: [%s]\n", __func__, node_desc[node->id].name);
863 
864 	start_fe = is_fe_enabled(cfe) &&
865 		   test_all_nodes(cfe, NODE_ENABLED, NODE_STREAMING);
866 
867 	state = v4l2_subdev_get_locked_active_state(&cfe->csi2.sd);
868 
869 	if (start_fe) {
870 		unsigned int width, height;
871 		u8 vc, dt;
872 
873 		cfe_dbg(cfe, "%s: %s using csi2 channel %d\n", __func__,
874 			node_desc[FE_OUT0].name, cfe->fe_csi2_channel);
875 
876 		ret = cfe_get_vc_dt(cfe, cfe->fe_csi2_channel, &vc, &dt);
877 		if (ret)
878 			return ret;
879 
880 		source_fmt = v4l2_subdev_state_get_format(state,
881 			node_desc[cfe->fe_csi2_channel].link_pad);
882 		fmt = find_format_by_code(source_fmt->code);
883 
884 		width = source_fmt->width;
885 		height = source_fmt->height;
886 
887 		/* Must have a valid CSI2 datatype. */
888 		WARN_ON(!fmt->csi_dt);
889 
890 		/*
891 		 * Start the associated CSI2 Channel as well.
892 		 *
893 		 * Must write to the ADDR register to latch the ctrl values
894 		 * even if we are connected to the front end. Once running,
895 		 * this is handled by the CSI2 AUTO_ARM mode.
896 		 */
897 		csi2_start_channel(&cfe->csi2, cfe->fe_csi2_channel,
898 				   CSI2_MODE_FE_STREAMING,
899 				   true, false, width, height, vc, dt);
900 		csi2_set_buffer(&cfe->csi2, cfe->fe_csi2_channel, 0, 0, -1);
901 		pisp_fe_start(&cfe->fe);
902 	}
903 
904 	if (is_csi2_node(node)) {
905 		unsigned int width = 0, height = 0;
906 		u8 vc, dt;
907 
908 		ret = cfe_get_vc_dt(cfe, node->id, &vc, &dt);
909 		if (ret) {
910 			if (start_fe) {
911 				csi2_stop_channel(&cfe->csi2,
912 						  cfe->fe_csi2_channel);
913 				pisp_fe_stop(&cfe->fe);
914 			}
915 
916 			return ret;
917 		}
918 
919 		u32 mode = CSI2_MODE_NORMAL;
920 
921 		source_fmt = v4l2_subdev_state_get_format(state,
922 			node_desc[node->id].link_pad);
923 		fmt = find_format_by_code(source_fmt->code);
924 
925 		/* Must have a valid CSI2 datatype. */
926 		WARN_ON(!fmt->csi_dt);
927 
928 		if (is_image_output_node(node)) {
929 			u32  pixfmt;
930 
931 			width = source_fmt->width;
932 			height = source_fmt->height;
933 
934 			pixfmt = node->vid_fmt.fmt.pix.pixelformat;
935 
936 			if (pixfmt == fmt->remap[CFE_REMAP_16BIT]) {
937 				mode = CSI2_MODE_REMAP;
938 			} else if (pixfmt == fmt->remap[CFE_REMAP_COMPRESSED]) {
939 				mode = CSI2_MODE_COMPRESSED;
940 				csi2_set_compression(&cfe->csi2, node->id,
941 						     CSI2_COMPRESSION_DELTA, 0,
942 						     0);
943 			}
944 		}
945 		/* Unconditionally start this CSI2 channel. */
946 		csi2_start_channel(&cfe->csi2, node->id,
947 				   mode,
948 				   /* Auto arm */
949 				   false,
950 				   /* Pack bytes */
951 				   is_meta_node(node) ? true : false,
952 				   width, height, vc, dt);
953 	}
954 
955 	spin_lock_irqsave(&cfe->state_lock, flags);
956 	if (cfe->job_ready && test_all_nodes(cfe, NODE_ENABLED, NODE_STREAMING))
957 		cfe_prepare_next_job(cfe);
958 	spin_unlock_irqrestore(&cfe->state_lock, flags);
959 
960 	return 0;
961 }
962 
963 static void cfe_stop_channel(struct cfe_node *node, bool fe_stop)
964 {
965 	struct cfe_device *cfe = node->cfe;
966 
967 	cfe_dbg(cfe, "%s: [%s] fe_stop %u\n", __func__,
968 		node_desc[node->id].name, fe_stop);
969 
970 	if (fe_stop) {
971 		csi2_stop_channel(&cfe->csi2, cfe->fe_csi2_channel);
972 		pisp_fe_stop(&cfe->fe);
973 	}
974 
975 	if (is_csi2_node(node))
976 		csi2_stop_channel(&cfe->csi2, node->id);
977 }
978 
979 static void cfe_return_buffers(struct cfe_node *node,
980 			       enum vb2_buffer_state state)
981 {
982 	struct cfe_device *cfe = node->cfe;
983 	struct cfe_buffer *buf, *tmp;
984 	unsigned long flags;
985 
986 	cfe_dbg(cfe, "%s: [%s]\n", __func__, node_desc[node->id].name);
987 
988 	spin_lock_irqsave(&cfe->state_lock, flags);
989 	list_for_each_entry_safe(buf, tmp, &node->dma_queue, list) {
990 		list_del(&buf->list);
991 		trace_cfe_return_buffer(node->id, buf->vb.vb2_buf.index, 2);
992 		vb2_buffer_done(&buf->vb.vb2_buf, state);
993 	}
994 
995 	if (node->cur_frm) {
996 		trace_cfe_return_buffer(node->id,
997 					node->cur_frm->vb.vb2_buf.index, 0);
998 		vb2_buffer_done(&node->cur_frm->vb.vb2_buf, state);
999 	}
1000 	if (node->next_frm && node->cur_frm != node->next_frm) {
1001 		trace_cfe_return_buffer(node->id,
1002 					node->next_frm->vb.vb2_buf.index, 1);
1003 		vb2_buffer_done(&node->next_frm->vb.vb2_buf, state);
1004 	}
1005 
1006 	node->cur_frm = NULL;
1007 	node->next_frm = NULL;
1008 	spin_unlock_irqrestore(&cfe->state_lock, flags);
1009 }
1010 
1011 /*
1012  * vb2 ops
1013  */
1014 
1015 static int cfe_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers,
1016 			   unsigned int *nplanes, unsigned int sizes[],
1017 			   struct device *alloc_devs[])
1018 {
1019 	struct cfe_node *node = vb2_get_drv_priv(vq);
1020 	struct cfe_device *cfe = node->cfe;
1021 	unsigned int size = is_image_node(node) ?
1022 				    node->vid_fmt.fmt.pix.sizeimage :
1023 				    node->meta_fmt.fmt.meta.buffersize;
1024 
1025 	cfe_dbg(cfe, "%s: [%s] type:%u\n", __func__, node_desc[node->id].name,
1026 		node->buffer_queue.type);
1027 
1028 	if (vq->max_num_buffers + *nbuffers < 3)
1029 		*nbuffers = 3 - vq->max_num_buffers;
1030 
1031 	if (*nplanes) {
1032 		if (sizes[0] < size) {
1033 			cfe_err(cfe, "sizes[0] %i < size %u\n", sizes[0], size);
1034 			return -EINVAL;
1035 		}
1036 		size = sizes[0];
1037 	}
1038 
1039 	*nplanes = 1;
1040 	sizes[0] = size;
1041 
1042 	return 0;
1043 }
1044 
1045 static int cfe_buffer_prepare(struct vb2_buffer *vb)
1046 {
1047 	struct cfe_node *node = vb2_get_drv_priv(vb->vb2_queue);
1048 	struct cfe_device *cfe = node->cfe;
1049 	struct cfe_buffer *buf = to_cfe_buffer(vb);
1050 	unsigned long size;
1051 
1052 	trace_cfe_buffer_prepare(node->id, vb);
1053 
1054 	size = is_image_node(node) ? node->vid_fmt.fmt.pix.sizeimage :
1055 				     node->meta_fmt.fmt.meta.buffersize;
1056 	if (vb2_plane_size(vb, 0) < size) {
1057 		cfe_err(cfe, "data will not fit into plane (%lu < %lu)\n",
1058 			vb2_plane_size(vb, 0), size);
1059 		return -EINVAL;
1060 	}
1061 
1062 	vb2_set_plane_payload(&buf->vb.vb2_buf, 0, size);
1063 
1064 	if (node->id == FE_CONFIG) {
1065 		struct cfe_config_buffer *b = to_cfe_config_buffer(buf);
1066 		void *addr = vb2_plane_vaddr(vb, 0);
1067 
1068 		memcpy(&b->config, addr, sizeof(struct pisp_fe_config));
1069 		return pisp_fe_validate_config(&cfe->fe, &b->config,
1070 					       &cfe->node[FE_OUT0].vid_fmt,
1071 					       &cfe->node[FE_OUT1].vid_fmt);
1072 	}
1073 
1074 	return 0;
1075 }
1076 
1077 static void cfe_buffer_queue(struct vb2_buffer *vb)
1078 {
1079 	struct cfe_node *node = vb2_get_drv_priv(vb->vb2_queue);
1080 	struct cfe_device *cfe = node->cfe;
1081 	struct cfe_buffer *buf = to_cfe_buffer(vb);
1082 	unsigned long flags;
1083 	bool schedule_now;
1084 
1085 	spin_lock_irqsave(&cfe->state_lock, flags);
1086 
1087 	list_add_tail(&buf->list, &node->dma_queue);
1088 
1089 	if (!cfe->job_ready)
1090 		cfe->job_ready = cfe_check_job_ready(cfe);
1091 
1092 	schedule_now = !cfe->job_queued && cfe->job_ready &&
1093 		       test_all_nodes(cfe, NODE_ENABLED, NODE_STREAMING);
1094 
1095 	trace_cfe_buffer_queue(node->id, vb, schedule_now);
1096 
1097 	if (schedule_now)
1098 		cfe_prepare_next_job(cfe);
1099 
1100 	spin_unlock_irqrestore(&cfe->state_lock, flags);
1101 }
1102 
1103 static s64 cfe_get_source_link_freq(struct cfe_device *cfe)
1104 {
1105 	struct v4l2_subdev_state *state;
1106 	s64 link_freq;
1107 	u32 bpp;
1108 
1109 	state = v4l2_subdev_get_locked_active_state(&cfe->csi2.sd);
1110 
1111 	/*
1112 	 * v4l2_get_link_freq() uses V4L2_CID_LINK_FREQ first, and falls back
1113 	 * to V4L2_CID_PIXEL_RATE if V4L2_CID_LINK_FREQ is not available.
1114 	 *
1115 	 * With multistream input there is no single pixel rate, and thus we
1116 	 * cannot use V4L2_CID_PIXEL_RATE, so we pass 0 as the bpp which
1117 	 * causes v4l2_get_link_freq() to return an error if it falls back to
1118 	 * V4L2_CID_PIXEL_RATE.
1119 	 */
1120 
1121 	if (state->routing.num_routes == 1) {
1122 		struct v4l2_subdev_route *route = &state->routing.routes[0];
1123 		struct v4l2_mbus_framefmt *source_fmt;
1124 		const struct cfe_fmt *fmt;
1125 
1126 		source_fmt = v4l2_subdev_state_get_format(state,
1127 							  route->sink_pad,
1128 							  route->sink_stream);
1129 
1130 		fmt = find_format_by_code(source_fmt->code);
1131 		if (!fmt)
1132 			return -EINVAL;
1133 
1134 		bpp = fmt->depth;
1135 	} else {
1136 		bpp = 0;
1137 	}
1138 
1139 	link_freq = v4l2_get_link_freq(cfe->source_sd->ctrl_handler, bpp,
1140 				       2 * cfe->csi2.dphy.active_lanes);
1141 	if (link_freq < 0)
1142 		cfe_err(cfe, "failed to get link freq for subdev '%s'\n",
1143 			cfe->source_sd->name);
1144 
1145 	return link_freq;
1146 }
1147 
1148 static int cfe_start_streaming(struct vb2_queue *vq, unsigned int count)
1149 {
1150 	struct v4l2_mbus_config mbus_config = { 0 };
1151 	struct cfe_node *node = vb2_get_drv_priv(vq);
1152 	struct cfe_device *cfe = node->cfe;
1153 	struct v4l2_subdev_state *state;
1154 	struct v4l2_subdev_route *route;
1155 	s64 link_freq;
1156 	int ret;
1157 
1158 	cfe_dbg(cfe, "%s: [%s]\n", __func__, node_desc[node->id].name);
1159 
1160 	if (!check_state(cfe, NODE_ENABLED, node->id)) {
1161 		cfe_err(cfe, "%s node link is not enabled.\n",
1162 			node_desc[node->id].name);
1163 		ret = -EINVAL;
1164 		goto err_streaming;
1165 	}
1166 
1167 	ret = pm_runtime_resume_and_get(&cfe->pdev->dev);
1168 	if (ret < 0) {
1169 		cfe_err(cfe, "pm_runtime_resume_and_get failed\n");
1170 		goto err_streaming;
1171 	}
1172 
1173 	/* When using the Frontend, we must enable the FE_CONFIG node. */
1174 	if (is_fe_enabled(cfe) &&
1175 	    !check_state(cfe, NODE_ENABLED, cfe->node[FE_CONFIG].id)) {
1176 		cfe_err(cfe, "FE enabled, but FE_CONFIG node is not\n");
1177 		ret = -EINVAL;
1178 		goto err_pm_put;
1179 	}
1180 
1181 	ret = media_pipeline_start(&node->pad, &cfe->pipe);
1182 	if (ret < 0) {
1183 		cfe_err(cfe, "Failed to start media pipeline: %d\n", ret);
1184 		goto err_pm_put;
1185 	}
1186 
1187 	state = v4l2_subdev_lock_and_get_active_state(&cfe->csi2.sd);
1188 
1189 	clear_state(cfe, FS_INT | FE_INT, node->id);
1190 	set_state(cfe, NODE_STREAMING, node->id);
1191 	node->fs_count = 0;
1192 
1193 	ret = cfe_start_channel(node);
1194 	if (ret)
1195 		goto err_unlock_state;
1196 
1197 	if (!test_all_nodes(cfe, NODE_ENABLED, NODE_STREAMING)) {
1198 		cfe_dbg(cfe, "Streaming on hold, as all nodes are not set to streaming yet\n");
1199 		v4l2_subdev_unlock_state(state);
1200 		return 0;
1201 	}
1202 
1203 	cfg_reg_write(cfe, MIPICFG_CFG, MIPICFG_CFG_SEL_CSI);
1204 	cfg_reg_write(cfe, MIPICFG_INTE,
1205 		      MIPICFG_INT_CSI_DMA | MIPICFG_INT_PISP_FE);
1206 
1207 	ret = v4l2_subdev_call(cfe->source_sd, pad, get_mbus_config, 0,
1208 			       &mbus_config);
1209 	if (ret < 0 && ret != -ENOIOCTLCMD) {
1210 		cfe_err(cfe, "g_mbus_config failed\n");
1211 		goto err_clear_inte;
1212 	}
1213 
1214 	cfe->csi2.dphy.active_lanes = mbus_config.bus.mipi_csi2.num_data_lanes;
1215 	if (!cfe->csi2.dphy.active_lanes)
1216 		cfe->csi2.dphy.active_lanes = cfe->csi2.dphy.max_lanes;
1217 	if (cfe->csi2.dphy.active_lanes > cfe->csi2.dphy.max_lanes) {
1218 		cfe_err(cfe, "Device has requested %u data lanes, which is >%u configured in DT\n",
1219 			cfe->csi2.dphy.active_lanes, cfe->csi2.dphy.max_lanes);
1220 		ret = -EINVAL;
1221 		goto err_clear_inte;
1222 	}
1223 
1224 	link_freq = cfe_get_source_link_freq(cfe);
1225 	if (link_freq < 0)
1226 		goto err_clear_inte;
1227 
1228 	cfe->csi2.dphy.dphy_rate = div_s64(link_freq * 2, 1000000);
1229 	csi2_open_rx(&cfe->csi2);
1230 
1231 	cfe->streams_mask = 0;
1232 
1233 	for_each_active_route(&state->routing, route)
1234 		cfe->streams_mask |= BIT_ULL(route->sink_stream);
1235 
1236 	ret = v4l2_subdev_enable_streams(cfe->source_sd, cfe->source_pad,
1237 					 cfe->streams_mask);
1238 	if (ret) {
1239 		cfe_err(cfe, "stream on failed in subdev\n");
1240 		goto err_disable_cfe;
1241 	}
1242 
1243 	cfe_dbg(cfe, "Streaming enabled\n");
1244 
1245 	v4l2_subdev_unlock_state(state);
1246 
1247 	return 0;
1248 
1249 err_disable_cfe:
1250 	csi2_close_rx(&cfe->csi2);
1251 err_clear_inte:
1252 	cfg_reg_write(cfe, MIPICFG_INTE, 0);
1253 
1254 	cfe_stop_channel(node,
1255 			 is_fe_enabled(cfe) && test_all_nodes(cfe, NODE_ENABLED,
1256 							      NODE_STREAMING));
1257 err_unlock_state:
1258 	v4l2_subdev_unlock_state(state);
1259 	media_pipeline_stop(&node->pad);
1260 err_pm_put:
1261 	pm_runtime_put(&cfe->pdev->dev);
1262 err_streaming:
1263 	cfe_return_buffers(node, VB2_BUF_STATE_QUEUED);
1264 	clear_state(cfe, NODE_STREAMING, node->id);
1265 
1266 	return ret;
1267 }
1268 
1269 static void cfe_stop_streaming(struct vb2_queue *vq)
1270 {
1271 	struct cfe_node *node = vb2_get_drv_priv(vq);
1272 	struct cfe_device *cfe = node->cfe;
1273 	unsigned long flags;
1274 	bool fe_stop;
1275 
1276 	cfe_dbg(cfe, "%s: [%s]\n", __func__, node_desc[node->id].name);
1277 
1278 	spin_lock_irqsave(&cfe->state_lock, flags);
1279 	fe_stop = is_fe_enabled(cfe) &&
1280 		  test_all_nodes(cfe, NODE_ENABLED, NODE_STREAMING);
1281 
1282 	cfe->job_ready = false;
1283 	clear_state(cfe, NODE_STREAMING, node->id);
1284 	spin_unlock_irqrestore(&cfe->state_lock, flags);
1285 
1286 	cfe_stop_channel(node, fe_stop);
1287 
1288 	if (!test_any_node(cfe, NODE_STREAMING)) {
1289 		struct v4l2_subdev_state *state;
1290 		int ret;
1291 
1292 		state = v4l2_subdev_lock_and_get_active_state(&cfe->csi2.sd);
1293 
1294 		ret = v4l2_subdev_disable_streams(cfe->source_sd,
1295 						  cfe->source_pad,
1296 						  cfe->streams_mask);
1297 		if (ret)
1298 			cfe_err(cfe, "stream disable failed in subdev\n");
1299 
1300 		v4l2_subdev_unlock_state(state);
1301 
1302 		csi2_close_rx(&cfe->csi2);
1303 
1304 		cfg_reg_write(cfe, MIPICFG_INTE, 0);
1305 
1306 		cfe_dbg(cfe, "%s: Streaming disabled\n", __func__);
1307 	}
1308 
1309 	media_pipeline_stop(&node->pad);
1310 
1311 	/* Clear all queued buffers for the node */
1312 	cfe_return_buffers(node, VB2_BUF_STATE_ERROR);
1313 
1314 	pm_runtime_put(&cfe->pdev->dev);
1315 }
1316 
1317 static const struct vb2_ops cfe_video_qops = {
1318 	.wait_prepare = vb2_ops_wait_prepare,
1319 	.wait_finish = vb2_ops_wait_finish,
1320 	.queue_setup = cfe_queue_setup,
1321 	.buf_prepare = cfe_buffer_prepare,
1322 	.buf_queue = cfe_buffer_queue,
1323 	.start_streaming = cfe_start_streaming,
1324 	.stop_streaming = cfe_stop_streaming,
1325 };
1326 
1327 /*
1328  * v4l2 ioctl ops
1329  */
1330 
1331 static int cfe_querycap(struct file *file, void *priv,
1332 			struct v4l2_capability *cap)
1333 {
1334 	strscpy(cap->driver, CFE_MODULE_NAME, sizeof(cap->driver));
1335 	strscpy(cap->card, CFE_MODULE_NAME, sizeof(cap->card));
1336 
1337 	cap->capabilities |= V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_META_CAPTURE |
1338 			     V4L2_CAP_META_OUTPUT;
1339 
1340 	return 0;
1341 }
1342 
1343 static int cfe_enum_fmt_vid_cap(struct file *file, void *priv,
1344 				struct v4l2_fmtdesc *f)
1345 {
1346 	struct cfe_node *node = video_drvdata(file);
1347 	struct cfe_device *cfe = node->cfe;
1348 	unsigned int i, j;
1349 
1350 	if (!node_supports_image_output(node))
1351 		return -EINVAL;
1352 
1353 	cfe_dbg(cfe, "%s: [%s]\n", __func__, node_desc[node->id].name);
1354 
1355 	for (i = 0, j = 0; i < ARRAY_SIZE(formats); i++) {
1356 		if (f->mbus_code && formats[i].code != f->mbus_code)
1357 			continue;
1358 
1359 		if (formats[i].flags & CFE_FORMAT_FLAG_META_OUT ||
1360 		    formats[i].flags & CFE_FORMAT_FLAG_META_CAP)
1361 			continue;
1362 
1363 		if (is_fe_node(node) &&
1364 		    !(formats[i].flags & CFE_FORMAT_FLAG_FE_OUT))
1365 			continue;
1366 
1367 		if (j == f->index) {
1368 			f->pixelformat = formats[i].fourcc;
1369 			f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
1370 			return 0;
1371 		}
1372 		j++;
1373 	}
1374 
1375 	return -EINVAL;
1376 }
1377 
1378 static int cfe_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
1379 {
1380 	struct cfe_node *node = video_drvdata(file);
1381 
1382 	if (!node_supports_image(node))
1383 		return -EINVAL;
1384 
1385 	*f = node->vid_fmt;
1386 
1387 	return 0;
1388 }
1389 
1390 static int cfe_validate_fmt_vid_cap(struct cfe_node *node,
1391 				    struct v4l2_format *f)
1392 {
1393 	struct cfe_device *cfe = node->cfe;
1394 	const struct cfe_fmt *fmt;
1395 
1396 	cfe_dbg(cfe, "%s: [%s] %ux%u, V4L2 pix %p4cc\n", __func__,
1397 		node_desc[node->id].name, f->fmt.pix.width, f->fmt.pix.height,
1398 		&f->fmt.pix.pixelformat);
1399 
1400 	if (!node_supports_image_output(node))
1401 		return -EINVAL;
1402 
1403 	/*
1404 	 * Default to a format that works for both CSI2 and FE.
1405 	 */
1406 	fmt = find_format_by_pix(f->fmt.pix.pixelformat);
1407 	if (!fmt)
1408 		fmt = find_format_by_code(MEDIA_BUS_FMT_SBGGR10_1X10);
1409 
1410 	f->fmt.pix.pixelformat = fmt->fourcc;
1411 
1412 	if (is_fe_node(node) && fmt->remap[CFE_REMAP_16BIT]) {
1413 		f->fmt.pix.pixelformat = fmt->remap[CFE_REMAP_16BIT];
1414 		fmt = find_format_by_pix(f->fmt.pix.pixelformat);
1415 	}
1416 
1417 	f->fmt.pix.field = V4L2_FIELD_NONE;
1418 
1419 	cfe_calc_vid_format_size_bpl(cfe, fmt, f);
1420 
1421 	return 0;
1422 }
1423 
1424 static int cfe_s_fmt_vid_cap(struct file *file, void *priv,
1425 			     struct v4l2_format *f)
1426 {
1427 	struct cfe_node *node = video_drvdata(file);
1428 	struct cfe_device *cfe = node->cfe;
1429 	struct vb2_queue *q = &node->buffer_queue;
1430 	int ret;
1431 
1432 	if (vb2_is_busy(q))
1433 		return -EBUSY;
1434 
1435 	ret = cfe_validate_fmt_vid_cap(node, f);
1436 	if (ret)
1437 		return ret;
1438 
1439 	node->vid_fmt = *f;
1440 
1441 	cfe_dbg(cfe, "%s: Set %ux%u, V4L2 pix %p4cc\n", __func__,
1442 		node->vid_fmt.fmt.pix.width, node->vid_fmt.fmt.pix.height,
1443 		&node->vid_fmt.fmt.pix.pixelformat);
1444 
1445 	return 0;
1446 }
1447 
1448 static int cfe_try_fmt_vid_cap(struct file *file, void *priv,
1449 			       struct v4l2_format *f)
1450 {
1451 	struct cfe_node *node = video_drvdata(file);
1452 	struct cfe_device *cfe = node->cfe;
1453 
1454 	cfe_dbg(cfe, "%s: [%s]\n", __func__, node_desc[node->id].name);
1455 
1456 	return cfe_validate_fmt_vid_cap(node, f);
1457 }
1458 
1459 static int cfe_enum_fmt_meta(struct file *file, void *priv,
1460 			     struct v4l2_fmtdesc *f)
1461 {
1462 	struct cfe_node *node = video_drvdata(file);
1463 	struct cfe_device *cfe = node->cfe;
1464 
1465 	cfe_dbg(cfe, "%s: [%s]\n", __func__, node_desc[node->id].name);
1466 
1467 	if (!node_supports_meta(node))
1468 		return -EINVAL;
1469 
1470 	switch (node->id) {
1471 	case CSI2_CH0...CSI2_CH3:
1472 		f->flags = V4L2_FMT_FLAG_META_LINE_BASED;
1473 
1474 		switch (f->index) {
1475 		case 0:
1476 			f->pixelformat = V4L2_META_FMT_GENERIC_8;
1477 			return 0;
1478 		case 1:
1479 			f->pixelformat = V4L2_META_FMT_GENERIC_CSI2_10;
1480 			return 0;
1481 		case 2:
1482 			f->pixelformat = V4L2_META_FMT_GENERIC_CSI2_12;
1483 			return 0;
1484 		default:
1485 			return -EINVAL;
1486 		}
1487 	default:
1488 		break;
1489 	}
1490 
1491 	if (f->index != 0)
1492 		return -EINVAL;
1493 
1494 	switch (node->id) {
1495 	case FE_STATS:
1496 		f->pixelformat = V4L2_META_FMT_RPI_FE_STATS;
1497 		return 0;
1498 	case FE_CONFIG:
1499 		f->pixelformat = V4L2_META_FMT_RPI_FE_CFG;
1500 		return 0;
1501 	default:
1502 		return -EINVAL;
1503 	}
1504 }
1505 
1506 static int cfe_validate_fmt_meta(struct cfe_node *node, struct v4l2_format *f)
1507 {
1508 	struct cfe_device *cfe = node->cfe;
1509 	const struct cfe_fmt *fmt;
1510 
1511 	switch (node->id) {
1512 	case CSI2_CH0...CSI2_CH3:
1513 		cfe_dbg(cfe, "%s: [%s] %ux%u, V4L2 meta %p4cc\n", __func__,
1514 			node_desc[node->id].name, f->fmt.meta.width,
1515 			f->fmt.meta.height, &f->fmt.meta.dataformat);
1516 		break;
1517 	case FE_STATS:
1518 	case FE_CONFIG:
1519 		cfe_dbg(cfe, "%s: [%s] %u bytes, V4L2 meta %p4cc\n", __func__,
1520 			node_desc[node->id].name, f->fmt.meta.buffersize,
1521 			&f->fmt.meta.dataformat);
1522 		break;
1523 	default:
1524 		return -EINVAL;
1525 	}
1526 
1527 	if (!node_supports_meta(node))
1528 		return -EINVAL;
1529 
1530 	switch (node->id) {
1531 	case CSI2_CH0...CSI2_CH3:
1532 		fmt = find_format_by_pix(f->fmt.meta.dataformat);
1533 		if (!fmt || !(fmt->flags & CFE_FORMAT_FLAG_META_CAP))
1534 			fmt = find_format_by_pix(V4L2_META_FMT_GENERIC_CSI2_10);
1535 
1536 		f->fmt.meta.dataformat = fmt->fourcc;
1537 
1538 		cfe_calc_meta_format_size_bpl(cfe, fmt, f);
1539 
1540 		return 0;
1541 	case FE_STATS:
1542 		f->fmt.meta.dataformat = V4L2_META_FMT_RPI_FE_STATS;
1543 		f->fmt.meta.buffersize = sizeof(struct pisp_statistics);
1544 		return 0;
1545 	case FE_CONFIG:
1546 		f->fmt.meta.dataformat = V4L2_META_FMT_RPI_FE_CFG;
1547 		f->fmt.meta.buffersize = sizeof(struct pisp_fe_config);
1548 		return 0;
1549 	default:
1550 		return -EINVAL;
1551 	}
1552 }
1553 
1554 static int cfe_g_fmt_meta(struct file *file, void *priv, struct v4l2_format *f)
1555 {
1556 	struct cfe_node *node = video_drvdata(file);
1557 	struct cfe_device *cfe = node->cfe;
1558 
1559 	cfe_dbg(cfe, "%s: [%s]\n", __func__, node_desc[node->id].name);
1560 
1561 	if (!node_supports_meta(node))
1562 		return -EINVAL;
1563 
1564 	*f = node->meta_fmt;
1565 
1566 	return 0;
1567 }
1568 
1569 static int cfe_s_fmt_meta(struct file *file, void *priv, struct v4l2_format *f)
1570 {
1571 	struct cfe_node *node = video_drvdata(file);
1572 	struct cfe_device *cfe = node->cfe;
1573 	struct vb2_queue *q = &node->buffer_queue;
1574 	int ret;
1575 
1576 	cfe_dbg(cfe, "%s: [%s]\n", __func__, node_desc[node->id].name);
1577 
1578 	if (vb2_is_busy(q))
1579 		return -EBUSY;
1580 
1581 	if (!node_supports_meta(node))
1582 		return -EINVAL;
1583 
1584 	ret = cfe_validate_fmt_meta(node, f);
1585 	if (ret)
1586 		return ret;
1587 
1588 	node->meta_fmt = *f;
1589 
1590 	cfe_dbg(cfe, "%s: Set %p4cc\n", __func__,
1591 		&node->meta_fmt.fmt.meta.dataformat);
1592 
1593 	return 0;
1594 }
1595 
1596 static int cfe_try_fmt_meta(struct file *file, void *priv,
1597 			    struct v4l2_format *f)
1598 {
1599 	struct cfe_node *node = video_drvdata(file);
1600 	struct cfe_device *cfe = node->cfe;
1601 
1602 	cfe_dbg(cfe, "%s: [%s]\n", __func__, node_desc[node->id].name);
1603 	return cfe_validate_fmt_meta(node, f);
1604 }
1605 
1606 static int cfe_enum_framesizes(struct file *file, void *priv,
1607 			       struct v4l2_frmsizeenum *fsize)
1608 {
1609 	struct cfe_node *node = video_drvdata(file);
1610 	struct cfe_device *cfe = node->cfe;
1611 	const struct cfe_fmt *fmt;
1612 
1613 	cfe_dbg(cfe, "%s [%s]\n", __func__, node_desc[node->id].name);
1614 
1615 	if (fsize->index > 0)
1616 		return -EINVAL;
1617 
1618 	/* check for valid format */
1619 	fmt = find_format_by_pix(fsize->pixel_format);
1620 	if (!fmt) {
1621 		cfe_dbg(cfe, "Invalid pixel code: %x\n", fsize->pixel_format);
1622 		return -EINVAL;
1623 	}
1624 
1625 	/* TODO: Do we have limits on the step_width? */
1626 
1627 	fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
1628 	fsize->stepwise.min_width = MIN_WIDTH;
1629 	fsize->stepwise.max_width = MAX_WIDTH;
1630 	fsize->stepwise.step_width = 2;
1631 	fsize->stepwise.min_height = MIN_HEIGHT;
1632 	fsize->stepwise.max_height = MAX_HEIGHT;
1633 	fsize->stepwise.step_height = 1;
1634 
1635 	return 0;
1636 }
1637 
1638 static int cfe_vb2_ioctl_reqbufs(struct file *file, void *priv,
1639 				 struct v4l2_requestbuffers *p)
1640 {
1641 	struct video_device *vdev = video_devdata(file);
1642 	struct cfe_node *node = video_get_drvdata(vdev);
1643 	struct cfe_device *cfe = node->cfe;
1644 	int ret;
1645 
1646 	cfe_dbg(cfe, "%s: [%s] type:%u\n", __func__, node_desc[node->id].name,
1647 		p->type);
1648 
1649 	if (p->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
1650 	    p->type != V4L2_BUF_TYPE_META_CAPTURE &&
1651 	    p->type != V4L2_BUF_TYPE_META_OUTPUT)
1652 		return -EINVAL;
1653 
1654 	ret = vb2_queue_change_type(vdev->queue, p->type);
1655 	if (ret)
1656 		return ret;
1657 
1658 	return vb2_ioctl_reqbufs(file, priv, p);
1659 }
1660 
1661 static int cfe_vb2_ioctl_create_bufs(struct file *file, void *priv,
1662 				     struct v4l2_create_buffers *p)
1663 {
1664 	struct video_device *vdev = video_devdata(file);
1665 	struct cfe_node *node = video_get_drvdata(vdev);
1666 	struct cfe_device *cfe = node->cfe;
1667 	int ret;
1668 
1669 	cfe_dbg(cfe, "%s: [%s] type:%u\n", __func__, node_desc[node->id].name,
1670 		p->format.type);
1671 
1672 	if (p->format.type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
1673 	    p->format.type != V4L2_BUF_TYPE_META_CAPTURE &&
1674 	    p->format.type != V4L2_BUF_TYPE_META_OUTPUT)
1675 		return -EINVAL;
1676 
1677 	ret = vb2_queue_change_type(vdev->queue, p->format.type);
1678 	if (ret)
1679 		return ret;
1680 
1681 	return vb2_ioctl_create_bufs(file, priv, p);
1682 }
1683 
1684 static int cfe_subscribe_event(struct v4l2_fh *fh,
1685 			       const struct v4l2_event_subscription *sub)
1686 {
1687 	struct cfe_node *node = video_get_drvdata(fh->vdev);
1688 
1689 	switch (sub->type) {
1690 	case V4L2_EVENT_FRAME_SYNC:
1691 		if (!node_supports_image_output(node))
1692 			break;
1693 
1694 		return v4l2_event_subscribe(fh, sub, 2, NULL);
1695 	case V4L2_EVENT_SOURCE_CHANGE:
1696 		if (!node_supports_image_output(node) &&
1697 		    !node_supports_meta_output(node))
1698 			break;
1699 
1700 		return v4l2_event_subscribe(fh, sub, 4, NULL);
1701 	}
1702 
1703 	return v4l2_ctrl_subscribe_event(fh, sub);
1704 }
1705 
1706 static const struct v4l2_ioctl_ops cfe_ioctl_ops = {
1707 	.vidioc_querycap = cfe_querycap,
1708 	.vidioc_enum_fmt_vid_cap = cfe_enum_fmt_vid_cap,
1709 	.vidioc_g_fmt_vid_cap = cfe_g_fmt,
1710 	.vidioc_s_fmt_vid_cap = cfe_s_fmt_vid_cap,
1711 	.vidioc_try_fmt_vid_cap = cfe_try_fmt_vid_cap,
1712 
1713 	.vidioc_enum_fmt_meta_cap = cfe_enum_fmt_meta,
1714 	.vidioc_g_fmt_meta_cap = cfe_g_fmt_meta,
1715 	.vidioc_s_fmt_meta_cap = cfe_s_fmt_meta,
1716 	.vidioc_try_fmt_meta_cap = cfe_try_fmt_meta,
1717 
1718 	.vidioc_enum_fmt_meta_out = cfe_enum_fmt_meta,
1719 	.vidioc_g_fmt_meta_out = cfe_g_fmt_meta,
1720 	.vidioc_s_fmt_meta_out = cfe_s_fmt_meta,
1721 	.vidioc_try_fmt_meta_out = cfe_try_fmt_meta,
1722 
1723 	.vidioc_enum_framesizes = cfe_enum_framesizes,
1724 
1725 	.vidioc_reqbufs = cfe_vb2_ioctl_reqbufs,
1726 	.vidioc_create_bufs = cfe_vb2_ioctl_create_bufs,
1727 	.vidioc_prepare_buf = vb2_ioctl_prepare_buf,
1728 	.vidioc_querybuf = vb2_ioctl_querybuf,
1729 	.vidioc_qbuf = vb2_ioctl_qbuf,
1730 	.vidioc_dqbuf = vb2_ioctl_dqbuf,
1731 	.vidioc_expbuf = vb2_ioctl_expbuf,
1732 	.vidioc_streamon = vb2_ioctl_streamon,
1733 	.vidioc_streamoff = vb2_ioctl_streamoff,
1734 
1735 	.vidioc_subscribe_event = cfe_subscribe_event,
1736 	.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
1737 };
1738 
1739 static void cfe_notify(struct v4l2_subdev *sd, unsigned int notification,
1740 		       void *arg)
1741 {
1742 	struct cfe_device *cfe = to_cfe_device(sd->v4l2_dev);
1743 
1744 	switch (notification) {
1745 	case V4L2_DEVICE_NOTIFY_EVENT:
1746 		for (unsigned int i = 0; i < NUM_NODES; i++) {
1747 			struct cfe_node *node = &cfe->node[i];
1748 
1749 			if (check_state(cfe, NODE_REGISTERED, i))
1750 				continue;
1751 
1752 			v4l2_event_queue(&node->video_dev, arg);
1753 		}
1754 		break;
1755 	default:
1756 		break;
1757 	}
1758 }
1759 
1760 /* cfe capture driver file operations */
1761 static const struct v4l2_file_operations cfe_fops = {
1762 	.owner = THIS_MODULE,
1763 	.open = v4l2_fh_open,
1764 	.release = vb2_fop_release,
1765 	.poll = vb2_fop_poll,
1766 	.unlocked_ioctl = video_ioctl2,
1767 	.mmap = vb2_fop_mmap,
1768 };
1769 
1770 static int cfe_video_link_validate(struct media_link *link)
1771 {
1772 	struct video_device *vd = container_of(link->sink->entity,
1773 					       struct video_device, entity);
1774 	struct cfe_node *node = container_of(vd, struct cfe_node, video_dev);
1775 	struct cfe_device *cfe = node->cfe;
1776 	struct v4l2_mbus_framefmt *source_fmt;
1777 	struct v4l2_subdev_state *state;
1778 	struct v4l2_subdev *source_sd;
1779 	int ret = 0;
1780 
1781 	cfe_dbg(cfe, "%s: [%s] link \"%s\":%u -> \"%s\":%u\n", __func__,
1782 		node_desc[node->id].name,
1783 		link->source->entity->name, link->source->index,
1784 		link->sink->entity->name, link->sink->index);
1785 
1786 	if (!media_entity_remote_source_pad_unique(link->sink->entity)) {
1787 		cfe_err(cfe, "video node %s pad not connected\n", vd->name);
1788 		return -ENOTCONN;
1789 	}
1790 
1791 	source_sd = media_entity_to_v4l2_subdev(link->source->entity);
1792 
1793 	state = v4l2_subdev_lock_and_get_active_state(source_sd);
1794 
1795 	source_fmt = v4l2_subdev_state_get_format(state, link->source->index);
1796 	if (!source_fmt) {
1797 		ret = -EINVAL;
1798 		goto out;
1799 	}
1800 
1801 	if (is_image_output_node(node)) {
1802 		struct v4l2_pix_format *pix_fmt = &node->vid_fmt.fmt.pix;
1803 		const struct cfe_fmt *fmt;
1804 
1805 		if (source_fmt->width != pix_fmt->width ||
1806 		    source_fmt->height != pix_fmt->height) {
1807 			cfe_err(cfe, "Wrong width or height %ux%u (remote pad set to %ux%u)\n",
1808 				pix_fmt->width, pix_fmt->height,
1809 				source_fmt->width, source_fmt->height);
1810 			ret = -EINVAL;
1811 			goto out;
1812 		}
1813 
1814 		fmt = find_format_by_code_and_fourcc(source_fmt->code,
1815 						     pix_fmt->pixelformat);
1816 		if (!fmt) {
1817 			cfe_err(cfe, "Format mismatch!\n");
1818 			ret = -EINVAL;
1819 			goto out;
1820 		}
1821 	} else if (is_csi2_node(node) && is_meta_output_node(node)) {
1822 		struct v4l2_meta_format *meta_fmt = &node->meta_fmt.fmt.meta;
1823 		const struct cfe_fmt *fmt;
1824 
1825 		if (source_fmt->width != meta_fmt->width ||
1826 		    source_fmt->height != meta_fmt->height) {
1827 			cfe_err(cfe, "Wrong width or height %ux%u (remote pad set to %ux%u)\n",
1828 				meta_fmt->width, meta_fmt->height,
1829 				source_fmt->width, source_fmt->height);
1830 			ret = -EINVAL;
1831 			goto out;
1832 		}
1833 
1834 		fmt = find_format_by_code_and_fourcc(source_fmt->code,
1835 						     meta_fmt->dataformat);
1836 		if (!fmt) {
1837 			cfe_err(cfe, "Format mismatch!\n");
1838 			ret = -EINVAL;
1839 			goto out;
1840 		}
1841 	}
1842 
1843 out:
1844 	v4l2_subdev_unlock_state(state);
1845 
1846 	return ret;
1847 }
1848 
1849 static const struct media_entity_operations cfe_media_entity_ops = {
1850 	.link_validate = cfe_video_link_validate,
1851 };
1852 
1853 static int cfe_video_link_notify(struct media_link *link, u32 flags,
1854 				 unsigned int notification)
1855 {
1856 	struct media_device *mdev = link->graph_obj.mdev;
1857 	struct cfe_device *cfe = container_of(mdev, struct cfe_device, mdev);
1858 	struct media_entity *fe = &cfe->fe.sd.entity;
1859 	struct media_entity *csi2 = &cfe->csi2.sd.entity;
1860 	unsigned long lock_flags;
1861 
1862 	if (notification != MEDIA_DEV_NOTIFY_POST_LINK_CH)
1863 		return 0;
1864 
1865 	cfe_dbg(cfe, "%s: %s[%u] -> %s[%u] 0x%x", __func__,
1866 		link->source->entity->name, link->source->index,
1867 		link->sink->entity->name, link->sink->index, flags);
1868 
1869 	spin_lock_irqsave(&cfe->state_lock, lock_flags);
1870 
1871 	for (unsigned int i = 0; i < NUM_NODES; i++) {
1872 		if (link->sink->entity != &cfe->node[i].video_dev.entity &&
1873 		    link->source->entity != &cfe->node[i].video_dev.entity)
1874 			continue;
1875 
1876 		if (link->flags & MEDIA_LNK_FL_ENABLED)
1877 			set_state(cfe, NODE_ENABLED, i);
1878 		else
1879 			clear_state(cfe, NODE_ENABLED, i);
1880 
1881 		break;
1882 	}
1883 
1884 	spin_unlock_irqrestore(&cfe->state_lock, lock_flags);
1885 
1886 	if (link->source->entity != csi2)
1887 		return 0;
1888 	if (link->sink->entity != fe)
1889 		return 0;
1890 	if (link->sink->index != 0)
1891 		return 0;
1892 
1893 	cfe->fe_csi2_channel = -1;
1894 	if (link->flags & MEDIA_LNK_FL_ENABLED) {
1895 		if (link->source->index == node_desc[CSI2_CH0].link_pad)
1896 			cfe->fe_csi2_channel = CSI2_CH0;
1897 		else if (link->source->index == node_desc[CSI2_CH1].link_pad)
1898 			cfe->fe_csi2_channel = CSI2_CH1;
1899 		else if (link->source->index == node_desc[CSI2_CH2].link_pad)
1900 			cfe->fe_csi2_channel = CSI2_CH2;
1901 		else if (link->source->index == node_desc[CSI2_CH3].link_pad)
1902 			cfe->fe_csi2_channel = CSI2_CH3;
1903 	}
1904 
1905 	if (is_fe_enabled(cfe))
1906 		cfe_dbg(cfe, "%s: Found CSI2:%d -> FE:0 link\n", __func__,
1907 			cfe->fe_csi2_channel);
1908 	else
1909 		cfe_dbg(cfe, "%s: Unable to find CSI2:x -> FE:0 link\n",
1910 			__func__);
1911 
1912 	return 0;
1913 }
1914 
1915 static const struct media_device_ops cfe_media_device_ops = {
1916 	.link_notify = cfe_video_link_notify,
1917 };
1918 
1919 static void cfe_release(struct kref *kref)
1920 {
1921 	struct cfe_device *cfe = container_of(kref, struct cfe_device, kref);
1922 
1923 	media_device_cleanup(&cfe->mdev);
1924 
1925 	kfree(cfe);
1926 }
1927 
1928 static void cfe_put(struct cfe_device *cfe)
1929 {
1930 	kref_put(&cfe->kref, cfe_release);
1931 }
1932 
1933 static void cfe_get(struct cfe_device *cfe)
1934 {
1935 	kref_get(&cfe->kref);
1936 }
1937 
1938 static void cfe_node_release(struct video_device *vdev)
1939 {
1940 	struct cfe_node *node = video_get_drvdata(vdev);
1941 
1942 	cfe_put(node->cfe);
1943 }
1944 
1945 static int cfe_register_node(struct cfe_device *cfe, int id)
1946 {
1947 	struct video_device *vdev;
1948 	const struct cfe_fmt *fmt;
1949 	struct vb2_queue *q;
1950 	struct cfe_node *node = &cfe->node[id];
1951 	int ret;
1952 
1953 	node->cfe = cfe;
1954 	node->id = id;
1955 
1956 	if (node_supports_image(node)) {
1957 		if (node_supports_image_output(node))
1958 			node->vid_fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
1959 		else
1960 			node->vid_fmt.type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
1961 
1962 		fmt = find_format_by_code(cfe_default_format.code);
1963 		if (!fmt) {
1964 			cfe_err(cfe, "Failed to find format code\n");
1965 			return -EINVAL;
1966 		}
1967 
1968 		node->vid_fmt.fmt.pix.pixelformat = fmt->fourcc;
1969 		v4l2_fill_pix_format(&node->vid_fmt.fmt.pix,
1970 				     &cfe_default_format);
1971 
1972 		ret = cfe_validate_fmt_vid_cap(node, &node->vid_fmt);
1973 		if (ret)
1974 			return ret;
1975 	}
1976 
1977 	if (node_supports_meta(node)) {
1978 		if (node_supports_meta_output(node))
1979 			node->meta_fmt.type = V4L2_BUF_TYPE_META_CAPTURE;
1980 		else
1981 			node->meta_fmt.type = V4L2_BUF_TYPE_META_OUTPUT;
1982 
1983 		ret = cfe_validate_fmt_meta(node, &node->meta_fmt);
1984 		if (ret)
1985 			return ret;
1986 	}
1987 
1988 	mutex_init(&node->lock);
1989 
1990 	q = &node->buffer_queue;
1991 	q->type = node_supports_image(node) ? node->vid_fmt.type :
1992 					      node->meta_fmt.type;
1993 	q->io_modes = VB2_MMAP | VB2_DMABUF;
1994 	q->drv_priv = node;
1995 	q->ops = &cfe_video_qops;
1996 	q->mem_ops = &vb2_dma_contig_memops;
1997 	q->buf_struct_size = id == FE_CONFIG ? sizeof(struct cfe_config_buffer)
1998 					     : sizeof(struct cfe_buffer);
1999 	q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
2000 	q->lock = &node->lock;
2001 	q->min_queued_buffers = 1;
2002 	q->dev = &cfe->pdev->dev;
2003 
2004 	ret = vb2_queue_init(q);
2005 	if (ret) {
2006 		cfe_err(cfe, "vb2_queue_init() failed\n");
2007 		return ret;
2008 	}
2009 
2010 	INIT_LIST_HEAD(&node->dma_queue);
2011 
2012 	vdev = &node->video_dev;
2013 	vdev->release = cfe_node_release;
2014 	vdev->fops = &cfe_fops;
2015 	vdev->ioctl_ops = &cfe_ioctl_ops;
2016 	vdev->entity.ops = &cfe_media_entity_ops;
2017 	vdev->v4l2_dev = &cfe->v4l2_dev;
2018 	vdev->vfl_dir = (node_supports_image_output(node) ||
2019 			 node_supports_meta_output(node)) ?
2020 				VFL_DIR_RX :
2021 				VFL_DIR_TX;
2022 	vdev->queue = q;
2023 	vdev->lock = &node->lock;
2024 	vdev->device_caps = node_desc[id].caps;
2025 	vdev->device_caps |= V4L2_CAP_STREAMING | V4L2_CAP_IO_MC;
2026 
2027 	/* Define the device names */
2028 	snprintf(vdev->name, sizeof(vdev->name), "%s-%s", CFE_MODULE_NAME,
2029 		 node_desc[id].name);
2030 
2031 	video_set_drvdata(vdev, node);
2032 	node->pad.flags = node_desc[id].pad_flags;
2033 	media_entity_pads_init(&vdev->entity, 1, &node->pad);
2034 
2035 	if (!node_supports_image(node)) {
2036 		v4l2_disable_ioctl(&node->video_dev,
2037 				   VIDIOC_ENUM_FRAMEINTERVALS);
2038 		v4l2_disable_ioctl(&node->video_dev, VIDIOC_ENUM_FRAMESIZES);
2039 	}
2040 
2041 	ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
2042 	if (ret) {
2043 		cfe_err(cfe, "Unable to register video device %s\n",
2044 			vdev->name);
2045 		return ret;
2046 	}
2047 
2048 	cfe_info(cfe, "Registered [%s] node id %d as /dev/video%u\n",
2049 		 vdev->name, id, vdev->num);
2050 
2051 	/*
2052 	 * Acquire a reference to cfe, which will be released when the video
2053 	 * device will be unregistered and userspace will have closed all open
2054 	 * file handles.
2055 	 */
2056 	cfe_get(cfe);
2057 	set_state(cfe, NODE_REGISTERED, id);
2058 
2059 	return 0;
2060 }
2061 
2062 static void cfe_unregister_nodes(struct cfe_device *cfe)
2063 {
2064 	for (unsigned int i = 0; i < NUM_NODES; i++) {
2065 		struct cfe_node *node = &cfe->node[i];
2066 
2067 		if (check_state(cfe, NODE_REGISTERED, i)) {
2068 			clear_state(cfe, NODE_REGISTERED, i);
2069 			video_unregister_device(&node->video_dev);
2070 		}
2071 	}
2072 }
2073 
2074 static int cfe_link_node_pads(struct cfe_device *cfe)
2075 {
2076 	struct media_pad *remote_pad;
2077 	int ret;
2078 
2079 	/* Source -> CSI2 */
2080 
2081 	ret = v4l2_create_fwnode_links_to_pad(cfe->source_sd,
2082 					      &cfe->csi2.pad[CSI2_PAD_SINK],
2083 					      MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
2084 
2085 	if (ret) {
2086 		cfe_err(cfe, "Failed to create links to the source: %d\n", ret);
2087 		return ret;
2088 	}
2089 
2090 	remote_pad = media_pad_remote_pad_unique(&cfe->csi2.pad[CSI2_PAD_SINK]);
2091 	if (IS_ERR(remote_pad)) {
2092 		ret = PTR_ERR(remote_pad);
2093 		cfe_err(cfe, "Failed to get unique remote source pad: %d\n",
2094 			ret);
2095 		return ret;
2096 	}
2097 
2098 	cfe->source_pad = remote_pad->index;
2099 
2100 	for (unsigned int i = 0; i < CSI2_NUM_CHANNELS; i++) {
2101 		struct cfe_node *node = &cfe->node[i];
2102 
2103 		if (!check_state(cfe, NODE_REGISTERED, i))
2104 			continue;
2105 
2106 		/* CSI2 channel # -> /dev/video# */
2107 		ret = media_create_pad_link(&cfe->csi2.sd.entity,
2108 					    node_desc[i].link_pad,
2109 					    &node->video_dev.entity, 0, 0);
2110 		if (ret)
2111 			return ret;
2112 
2113 		if (node_supports_image(node)) {
2114 			/* CSI2 channel # -> FE Input */
2115 			ret = media_create_pad_link(&cfe->csi2.sd.entity,
2116 						    node_desc[i].link_pad,
2117 						    &cfe->fe.sd.entity,
2118 						    FE_STREAM_PAD, 0);
2119 			if (ret)
2120 				return ret;
2121 		}
2122 	}
2123 
2124 	for (unsigned int i = CSI2_NUM_CHANNELS; i < NUM_NODES; i++) {
2125 		struct cfe_node *node = &cfe->node[i];
2126 		struct media_entity *src, *dst;
2127 		unsigned int src_pad, dst_pad;
2128 
2129 		if (node_desc[i].pad_flags & MEDIA_PAD_FL_SINK) {
2130 			/* FE -> /dev/video# */
2131 			src = &cfe->fe.sd.entity;
2132 			src_pad = node_desc[i].link_pad;
2133 			dst = &node->video_dev.entity;
2134 			dst_pad = 0;
2135 		} else {
2136 			/* /dev/video# -> FE */
2137 			dst = &cfe->fe.sd.entity;
2138 			dst_pad = node_desc[i].link_pad;
2139 			src = &node->video_dev.entity;
2140 			src_pad = 0;
2141 		}
2142 
2143 		ret = media_create_pad_link(src, src_pad, dst, dst_pad, 0);
2144 		if (ret)
2145 			return ret;
2146 	}
2147 
2148 	return 0;
2149 }
2150 
2151 static int cfe_probe_complete(struct cfe_device *cfe)
2152 {
2153 	int ret;
2154 
2155 	cfe->v4l2_dev.notify = cfe_notify;
2156 
2157 	for (unsigned int i = 0; i < NUM_NODES; i++) {
2158 		ret = cfe_register_node(cfe, i);
2159 		if (ret) {
2160 			cfe_err(cfe, "Unable to register video node %u.\n", i);
2161 			goto unregister;
2162 		}
2163 	}
2164 
2165 	ret = cfe_link_node_pads(cfe);
2166 	if (ret) {
2167 		cfe_err(cfe, "Unable to link node pads.\n");
2168 		goto unregister;
2169 	}
2170 
2171 	ret = v4l2_device_register_subdev_nodes(&cfe->v4l2_dev);
2172 	if (ret) {
2173 		cfe_err(cfe, "Unable to register subdev nodes.\n");
2174 		goto unregister;
2175 	}
2176 
2177 	return 0;
2178 
2179 unregister:
2180 	cfe_unregister_nodes(cfe);
2181 	return ret;
2182 }
2183 
2184 static int cfe_async_bound(struct v4l2_async_notifier *notifier,
2185 			   struct v4l2_subdev *subdev,
2186 			   struct v4l2_async_connection *asd)
2187 {
2188 	struct cfe_device *cfe = to_cfe_device(notifier->v4l2_dev);
2189 
2190 	if (cfe->source_sd) {
2191 		cfe_err(cfe, "Rejecting subdev %s (Already set!!)",
2192 			subdev->name);
2193 		return 0;
2194 	}
2195 
2196 	cfe->source_sd = subdev;
2197 
2198 	cfe_dbg(cfe, "Using source %s for capture\n", subdev->name);
2199 
2200 	return 0;
2201 }
2202 
2203 static int cfe_async_complete(struct v4l2_async_notifier *notifier)
2204 {
2205 	struct cfe_device *cfe = to_cfe_device(notifier->v4l2_dev);
2206 
2207 	return cfe_probe_complete(cfe);
2208 }
2209 
2210 static const struct v4l2_async_notifier_operations cfe_async_ops = {
2211 	.bound = cfe_async_bound,
2212 	.complete = cfe_async_complete,
2213 };
2214 
2215 static int cfe_register_async_nf(struct cfe_device *cfe)
2216 {
2217 	struct platform_device *pdev = cfe->pdev;
2218 	struct v4l2_fwnode_endpoint ep = { .bus_type = V4L2_MBUS_CSI2_DPHY };
2219 	struct fwnode_handle *local_ep_fwnode;
2220 	struct v4l2_async_connection *asd;
2221 	int ret;
2222 
2223 	local_ep_fwnode = fwnode_graph_get_endpoint_by_id(pdev->dev.fwnode, 0,
2224 							  0, 0);
2225 	if (!local_ep_fwnode) {
2226 		cfe_err(cfe, "Failed to find local endpoint fwnode\n");
2227 		return -ENODEV;
2228 	}
2229 
2230 	/* Parse the local endpoint and validate its configuration. */
2231 	ret = v4l2_fwnode_endpoint_parse(local_ep_fwnode, &ep);
2232 	if (ret) {
2233 		cfe_err(cfe, "Failed to find remote endpoint fwnode\n");
2234 		goto err_put_local_fwnode;
2235 	}
2236 
2237 	for (unsigned int lane = 0; lane < ep.bus.mipi_csi2.num_data_lanes;
2238 	     lane++) {
2239 		if (ep.bus.mipi_csi2.data_lanes[lane] != lane + 1) {
2240 			cfe_err(cfe, "Data lanes reordering not supported\n");
2241 			ret = -EINVAL;
2242 			goto err_put_local_fwnode;
2243 		}
2244 	}
2245 
2246 	cfe->csi2.dphy.max_lanes = ep.bus.mipi_csi2.num_data_lanes;
2247 	cfe->csi2.bus_flags = ep.bus.mipi_csi2.flags;
2248 
2249 	/* Initialize and register the async notifier. */
2250 	v4l2_async_nf_init(&cfe->notifier, &cfe->v4l2_dev);
2251 	cfe->notifier.ops = &cfe_async_ops;
2252 
2253 	asd = v4l2_async_nf_add_fwnode_remote(&cfe->notifier, local_ep_fwnode,
2254 					      struct v4l2_async_connection);
2255 	if (IS_ERR(asd)) {
2256 		ret = PTR_ERR(asd);
2257 		cfe_err(cfe, "Error adding subdevice: %d\n", ret);
2258 		goto err_put_local_fwnode;
2259 	}
2260 
2261 	ret = v4l2_async_nf_register(&cfe->notifier);
2262 	if (ret) {
2263 		cfe_err(cfe, "Error registering async notifier: %d\n", ret);
2264 		goto err_nf_cleanup;
2265 	}
2266 
2267 	fwnode_handle_put(local_ep_fwnode);
2268 
2269 	return 0;
2270 
2271 err_nf_cleanup:
2272 	v4l2_async_nf_cleanup(&cfe->notifier);
2273 err_put_local_fwnode:
2274 	fwnode_handle_put(local_ep_fwnode);
2275 
2276 	return ret;
2277 }
2278 
2279 static int cfe_probe(struct platform_device *pdev)
2280 {
2281 	struct cfe_device *cfe;
2282 	char debugfs_name[32];
2283 	int ret;
2284 
2285 	cfe = kzalloc(sizeof(*cfe), GFP_KERNEL);
2286 	if (!cfe)
2287 		return -ENOMEM;
2288 
2289 	platform_set_drvdata(pdev, cfe);
2290 
2291 	kref_init(&cfe->kref);
2292 	cfe->pdev = pdev;
2293 	cfe->fe_csi2_channel = -1;
2294 	spin_lock_init(&cfe->state_lock);
2295 
2296 	cfe->csi2.base = devm_platform_ioremap_resource(pdev, 0);
2297 	if (IS_ERR(cfe->csi2.base)) {
2298 		dev_err(&pdev->dev, "Failed to get dma io block\n");
2299 		ret = PTR_ERR(cfe->csi2.base);
2300 		goto err_cfe_put;
2301 	}
2302 
2303 	cfe->csi2.dphy.base = devm_platform_ioremap_resource(pdev, 1);
2304 	if (IS_ERR(cfe->csi2.dphy.base)) {
2305 		dev_err(&pdev->dev, "Failed to get host io block\n");
2306 		ret = PTR_ERR(cfe->csi2.dphy.base);
2307 		goto err_cfe_put;
2308 	}
2309 
2310 	cfe->mipi_cfg_base = devm_platform_ioremap_resource(pdev, 2);
2311 	if (IS_ERR(cfe->mipi_cfg_base)) {
2312 		dev_err(&pdev->dev, "Failed to get mipi cfg io block\n");
2313 		ret = PTR_ERR(cfe->mipi_cfg_base);
2314 		goto err_cfe_put;
2315 	}
2316 
2317 	cfe->fe.base = devm_platform_ioremap_resource(pdev, 3);
2318 	if (IS_ERR(cfe->fe.base)) {
2319 		dev_err(&pdev->dev, "Failed to get pisp fe io block\n");
2320 		ret = PTR_ERR(cfe->fe.base);
2321 		goto err_cfe_put;
2322 	}
2323 
2324 	ret = platform_get_irq(pdev, 0);
2325 	if (ret <= 0) {
2326 		ret = -EINVAL;
2327 		goto err_cfe_put;
2328 	}
2329 
2330 	ret = devm_request_irq(&pdev->dev, ret, cfe_isr, 0, "rp1-cfe", cfe);
2331 	if (ret) {
2332 		dev_err(&pdev->dev, "Unable to request interrupt\n");
2333 		ret = -EINVAL;
2334 		goto err_cfe_put;
2335 	}
2336 
2337 	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
2338 	if (ret) {
2339 		dev_err(&pdev->dev, "DMA enable failed\n");
2340 		goto err_cfe_put;
2341 	}
2342 
2343 	ret = vb2_dma_contig_set_max_seg_size(&pdev->dev, UINT_MAX);
2344 	if (ret)
2345 		goto err_cfe_put;
2346 
2347 	/* TODO: Enable clock only when running. */
2348 	cfe->clk = devm_clk_get(&pdev->dev, NULL);
2349 	if (IS_ERR(cfe->clk)) {
2350 		ret = dev_err_probe(&pdev->dev, PTR_ERR(cfe->clk),
2351 				    "clock not found\n");
2352 		goto err_cfe_put;
2353 	}
2354 
2355 	cfe->mdev.dev = &pdev->dev;
2356 	cfe->mdev.ops = &cfe_media_device_ops;
2357 	strscpy(cfe->mdev.model, CFE_MODULE_NAME, sizeof(cfe->mdev.model));
2358 	strscpy(cfe->mdev.serial, "", sizeof(cfe->mdev.serial));
2359 	snprintf(cfe->mdev.bus_info, sizeof(cfe->mdev.bus_info), "platform:%s",
2360 		 dev_name(&pdev->dev));
2361 
2362 	media_device_init(&cfe->mdev);
2363 
2364 	cfe->v4l2_dev.mdev = &cfe->mdev;
2365 
2366 	ret = v4l2_device_register(&pdev->dev, &cfe->v4l2_dev);
2367 	if (ret) {
2368 		cfe_err(cfe, "Unable to register v4l2 device.\n");
2369 		goto err_cfe_put;
2370 	}
2371 
2372 	snprintf(debugfs_name, sizeof(debugfs_name), "rp1-cfe:%s",
2373 		 dev_name(&pdev->dev));
2374 	cfe->debugfs = debugfs_create_dir(debugfs_name, NULL);
2375 	debugfs_create_file("regs", 0440, cfe->debugfs, cfe,
2376 			    &mipi_cfg_regs_fops);
2377 
2378 	/* Enable the block power domain */
2379 	pm_runtime_enable(&pdev->dev);
2380 
2381 	ret = pm_runtime_resume_and_get(&cfe->pdev->dev);
2382 	if (ret)
2383 		goto err_runtime_disable;
2384 
2385 	cfe->csi2.v4l2_dev = &cfe->v4l2_dev;
2386 	ret = csi2_init(&cfe->csi2, cfe->debugfs);
2387 	if (ret) {
2388 		cfe_err(cfe, "Failed to init csi2 (%d)\n", ret);
2389 		goto err_runtime_put;
2390 	}
2391 
2392 	cfe->fe.v4l2_dev = &cfe->v4l2_dev;
2393 	ret = pisp_fe_init(&cfe->fe, cfe->debugfs);
2394 	if (ret) {
2395 		cfe_err(cfe, "Failed to init pisp fe (%d)\n", ret);
2396 		goto err_csi2_uninit;
2397 	}
2398 
2399 	cfe->mdev.hw_revision = cfe->fe.hw_revision;
2400 	ret = media_device_register(&cfe->mdev);
2401 	if (ret < 0) {
2402 		cfe_err(cfe, "Unable to register media-controller device.\n");
2403 		goto err_pisp_fe_uninit;
2404 	}
2405 
2406 	ret = cfe_register_async_nf(cfe);
2407 	if (ret) {
2408 		cfe_err(cfe, "Failed to connect subdevs\n");
2409 		goto err_media_unregister;
2410 	}
2411 
2412 	pm_runtime_put(&cfe->pdev->dev);
2413 
2414 	return 0;
2415 
2416 err_media_unregister:
2417 	media_device_unregister(&cfe->mdev);
2418 err_pisp_fe_uninit:
2419 	pisp_fe_uninit(&cfe->fe);
2420 err_csi2_uninit:
2421 	csi2_uninit(&cfe->csi2);
2422 err_runtime_put:
2423 	pm_runtime_put(&cfe->pdev->dev);
2424 err_runtime_disable:
2425 	pm_runtime_disable(&pdev->dev);
2426 	debugfs_remove(cfe->debugfs);
2427 	v4l2_device_unregister(&cfe->v4l2_dev);
2428 err_cfe_put:
2429 	cfe_put(cfe);
2430 
2431 	return ret;
2432 }
2433 
2434 static void cfe_remove(struct platform_device *pdev)
2435 {
2436 	struct cfe_device *cfe = platform_get_drvdata(pdev);
2437 
2438 	debugfs_remove(cfe->debugfs);
2439 
2440 	v4l2_async_nf_unregister(&cfe->notifier);
2441 	v4l2_async_nf_cleanup(&cfe->notifier);
2442 
2443 	media_device_unregister(&cfe->mdev);
2444 	cfe_unregister_nodes(cfe);
2445 
2446 	pisp_fe_uninit(&cfe->fe);
2447 	csi2_uninit(&cfe->csi2);
2448 
2449 	pm_runtime_disable(&pdev->dev);
2450 
2451 	v4l2_device_unregister(&cfe->v4l2_dev);
2452 
2453 	cfe_put(cfe);
2454 }
2455 
2456 static int cfe_runtime_suspend(struct device *dev)
2457 {
2458 	struct platform_device *pdev = to_platform_device(dev);
2459 	struct cfe_device *cfe = platform_get_drvdata(pdev);
2460 
2461 	clk_disable_unprepare(cfe->clk);
2462 
2463 	return 0;
2464 }
2465 
2466 static int cfe_runtime_resume(struct device *dev)
2467 {
2468 	struct platform_device *pdev = to_platform_device(dev);
2469 	struct cfe_device *cfe = platform_get_drvdata(pdev);
2470 	int ret;
2471 
2472 	ret = clk_prepare_enable(cfe->clk);
2473 	if (ret) {
2474 		dev_err(dev, "Unable to enable clock\n");
2475 		return ret;
2476 	}
2477 
2478 	return 0;
2479 }
2480 
2481 static const struct dev_pm_ops cfe_pm_ops = {
2482 	SET_RUNTIME_PM_OPS(cfe_runtime_suspend, cfe_runtime_resume, NULL)
2483 	SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
2484 				     pm_runtime_force_resume)
2485 };
2486 
2487 static const struct of_device_id cfe_of_match[] = {
2488 	{ .compatible = "raspberrypi,rp1-cfe" },
2489 	{ /* sentinel */ },
2490 };
2491 MODULE_DEVICE_TABLE(of, cfe_of_match);
2492 
2493 static struct platform_driver cfe_driver = {
2494 	.probe		= cfe_probe,
2495 	.remove		= cfe_remove,
2496 	.driver = {
2497 		.name	= CFE_MODULE_NAME,
2498 		.of_match_table = cfe_of_match,
2499 		.pm = &cfe_pm_ops,
2500 	},
2501 };
2502 
2503 module_platform_driver(cfe_driver);
2504 
2505 MODULE_AUTHOR("Naushir Patuck <naush@raspberrypi.com>");
2506 MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>");
2507 MODULE_DESCRIPTION("Raspberry Pi RP1 Camera Front End driver");
2508 MODULE_LICENSE("GPL");
2509 MODULE_VERSION(CFE_VERSION);
2510