xref: /linux/drivers/media/platform/qcom/camss/camss-vfe-480.c (revision 702648721db590b3425c31ade294000e18808345)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * camss-vfe-480.c
4  *
5  * Qualcomm MSM Camera Subsystem - VFE (Video Front End) Module v480 (SM8250)
6  *
7  * Copyright (C) 2020-2021 Linaro Ltd.
8  * Copyright (C) 2021 Jonathan Marek
9  */
10 
11 #include <linux/delay.h>
12 #include <linux/interrupt.h>
13 #include <linux/io.h>
14 #include <linux/iopoll.h>
15 
16 #include "camss.h"
17 #include "camss-vfe.h"
18 
19 /* VFE 2/3 are lite and have a different register layout */
20 #define IS_LITE		(vfe->id >= 2 ? 1 : 0)
21 
22 #define VFE_HW_VERSION			(0x00)
23 
24 #define VFE_GLOBAL_RESET_CMD		(IS_LITE ? 0x0c : 0x1c)
25 #define	    GLOBAL_RESET_HW_AND_REG	(IS_LITE ? BIT(1) : BIT(0))
26 
27 #define VFE_REG_UPDATE_CMD		(IS_LITE ? 0x20 : 0x34)
28 static inline int reg_update_rdi(struct vfe_device *vfe, int n)
29 {
30 	return IS_LITE ? BIT(n) : BIT(1 + (n));
31 }
32 
33 #define	    REG_UPDATE_RDI		reg_update_rdi
34 #define VFE_IRQ_CMD			(IS_LITE ? 0x24 : 0x38)
35 #define     IRQ_CMD_GLOBAL_CLEAR	BIT(0)
36 
37 #define VFE_IRQ_MASK(n)			((IS_LITE ? 0x28 : 0x3c) + (n) * 4)
38 #define	    IRQ_MASK_0_RESET_ACK	(IS_LITE ? BIT(17) : BIT(0))
39 #define	    IRQ_MASK_0_BUS_TOP_IRQ	(IS_LITE ? BIT(4) : BIT(7))
40 #define VFE_IRQ_CLEAR(n)		((IS_LITE ? 0x34 : 0x48) + (n) * 4)
41 #define VFE_IRQ_STATUS(n)		((IS_LITE ? 0x40 : 0x54) + (n) * 4)
42 
43 #define BUS_REG_BASE			(IS_LITE ? 0x1a00 : 0xaa00)
44 
45 #define VFE_BUS_WM_CGC_OVERRIDE		(BUS_REG_BASE + 0x08)
46 #define		WM_CGC_OVERRIDE_ALL	(0x3FFFFFF)
47 
48 #define VFE_BUS_WM_TEST_BUS_CTRL	(BUS_REG_BASE + 0xdc)
49 
50 #define VFE_BUS_IRQ_MASK(n)		(BUS_REG_BASE + 0x18 + (n) * 4)
51 static inline int bus_irq_mask_0_rdi_rup(struct vfe_device *vfe, int n)
52 {
53 	return IS_LITE ? BIT(n) : BIT(3 + (n));
54 }
55 
56 #define     BUS_IRQ_MASK_0_RDI_RUP	bus_irq_mask_0_rdi_rup
57 static inline int bus_irq_mask_0_comp_done(struct vfe_device *vfe, int n)
58 {
59 	return IS_LITE ? BIT(4 + (n)) : BIT(6 + (n));
60 }
61 
62 #define     BUS_IRQ_MASK_0_COMP_DONE	bus_irq_mask_0_comp_done
63 #define VFE_BUS_IRQ_CLEAR(n)		(BUS_REG_BASE + 0x20 + (n) * 4)
64 #define VFE_BUS_IRQ_STATUS(n)		(BUS_REG_BASE + 0x28 + (n) * 4)
65 #define VFE_BUS_IRQ_CLEAR_GLOBAL	(BUS_REG_BASE + 0x30)
66 
67 #define VFE_BUS_WM_CFG(n)		(BUS_REG_BASE + 0x200 + (n) * 0x100)
68 #define		WM_CFG_EN			(0)
69 #define		WM_CFG_MODE			(16)
70 #define			MODE_QCOM_PLAIN	(0)
71 #define			MODE_MIPI_RAW	(1)
72 #define VFE_BUS_WM_IMAGE_ADDR(n)	(BUS_REG_BASE + 0x204 + (n) * 0x100)
73 #define VFE_BUS_WM_FRAME_INCR(n)	(BUS_REG_BASE + 0x208 + (n) * 0x100)
74 #define VFE_BUS_WM_IMAGE_CFG_0(n)	(BUS_REG_BASE + 0x20c + (n) * 0x100)
75 #define		WM_IMAGE_CFG_0_DEFAULT_WIDTH	(0xFFFF)
76 #define VFE_BUS_WM_IMAGE_CFG_1(n)	(BUS_REG_BASE + 0x210 + (n) * 0x100)
77 #define VFE_BUS_WM_IMAGE_CFG_2(n)	(BUS_REG_BASE + 0x214 + (n) * 0x100)
78 #define VFE_BUS_WM_PACKER_CFG(n)	(BUS_REG_BASE + 0x218 + (n) * 0x100)
79 #define VFE_BUS_WM_HEADER_ADDR(n)	(BUS_REG_BASE + 0x220 + (n) * 0x100)
80 #define VFE_BUS_WM_HEADER_INCR(n)	(BUS_REG_BASE + 0x224 + (n) * 0x100)
81 #define VFE_BUS_WM_HEADER_CFG(n)	(BUS_REG_BASE + 0x228 + (n) * 0x100)
82 
83 #define VFE_BUS_WM_IRQ_SUBSAMPLE_PERIOD(n)	(BUS_REG_BASE + 0x230 + (n) * 0x100)
84 #define VFE_BUS_WM_IRQ_SUBSAMPLE_PATTERN(n)	(BUS_REG_BASE + 0x234 + (n) * 0x100)
85 #define VFE_BUS_WM_FRAMEDROP_PERIOD(n)		(BUS_REG_BASE + 0x238 + (n) * 0x100)
86 #define VFE_BUS_WM_FRAMEDROP_PATTERN(n)		(BUS_REG_BASE + 0x23c + (n) * 0x100)
87 
88 #define VFE_BUS_WM_SYSTEM_CACHE_CFG(n)	(BUS_REG_BASE + 0x260 + (n) * 0x100)
89 #define VFE_BUS_WM_BURST_LIMIT(n)	(BUS_REG_BASE + 0x264 + (n) * 0x100)
90 
91 /* for titan 480, each bus client is hardcoded to a specific path
92  * and each bus client is part of a hardcoded "comp group"
93  */
94 #define RDI_WM(n)			((IS_LITE ? 0 : 23) + (n))
95 #define RDI_COMP_GROUP(n)		((IS_LITE ? 0 : 11) + (n))
96 
97 #define MAX_VFE_OUTPUT_LINES	4
98 
99 static u32 vfe_hw_version(struct vfe_device *vfe)
100 {
101 	u32 hw_version = readl_relaxed(vfe->base + VFE_HW_VERSION);
102 
103 	u32 gen = (hw_version >> 28) & 0xF;
104 	u32 rev = (hw_version >> 16) & 0xFFF;
105 	u32 step = hw_version & 0xFFFF;
106 
107 	dev_dbg(vfe->camss->dev, "VFE HW Version = %u.%u.%u\n", gen, rev, step);
108 
109 	return hw_version;
110 }
111 
112 static void vfe_global_reset(struct vfe_device *vfe)
113 {
114 	writel_relaxed(IRQ_MASK_0_RESET_ACK, vfe->base + VFE_IRQ_MASK(0));
115 	writel_relaxed(GLOBAL_RESET_HW_AND_REG, vfe->base + VFE_GLOBAL_RESET_CMD);
116 }
117 
118 static void vfe_wm_start(struct vfe_device *vfe, u8 wm, struct vfe_line *line)
119 {
120 	struct v4l2_pix_format_mplane *pix =
121 		&line->video_out.active_fmt.fmt.pix_mp;
122 
123 	wm = RDI_WM(wm); /* map to actual WM used (from wm=RDI index) */
124 
125 	/* no clock gating at bus input */
126 	writel_relaxed(WM_CGC_OVERRIDE_ALL, vfe->base + VFE_BUS_WM_CGC_OVERRIDE);
127 
128 	writel_relaxed(0x0, vfe->base + VFE_BUS_WM_TEST_BUS_CTRL);
129 
130 	writel_relaxed(pix->plane_fmt[0].bytesperline * pix->height,
131 		       vfe->base + VFE_BUS_WM_FRAME_INCR(wm));
132 	writel_relaxed(0xf, vfe->base + VFE_BUS_WM_BURST_LIMIT(wm));
133 	writel_relaxed(WM_IMAGE_CFG_0_DEFAULT_WIDTH,
134 		       vfe->base + VFE_BUS_WM_IMAGE_CFG_0(wm));
135 	writel_relaxed(pix->plane_fmt[0].bytesperline,
136 		       vfe->base + VFE_BUS_WM_IMAGE_CFG_2(wm));
137 	writel_relaxed(0, vfe->base + VFE_BUS_WM_PACKER_CFG(wm));
138 
139 	/* no dropped frames, one irq per frame */
140 	writel_relaxed(0, vfe->base + VFE_BUS_WM_FRAMEDROP_PERIOD(wm));
141 	writel_relaxed(1, vfe->base + VFE_BUS_WM_FRAMEDROP_PATTERN(wm));
142 	writel_relaxed(0, vfe->base + VFE_BUS_WM_IRQ_SUBSAMPLE_PERIOD(wm));
143 	writel_relaxed(1, vfe->base + VFE_BUS_WM_IRQ_SUBSAMPLE_PATTERN(wm));
144 
145 	writel_relaxed(1 << WM_CFG_EN | MODE_MIPI_RAW << WM_CFG_MODE,
146 		       vfe->base + VFE_BUS_WM_CFG(wm));
147 }
148 
149 static void vfe_wm_stop(struct vfe_device *vfe, u8 wm)
150 {
151 	wm = RDI_WM(wm); /* map to actual WM used (from wm=RDI index) */
152 	writel_relaxed(0, vfe->base + VFE_BUS_WM_CFG(wm));
153 }
154 
155 static void vfe_wm_update(struct vfe_device *vfe, u8 wm, u32 addr,
156 			  struct vfe_line *line)
157 {
158 	wm = RDI_WM(wm); /* map to actual WM used (from wm=RDI index) */
159 	writel_relaxed(addr, vfe->base + VFE_BUS_WM_IMAGE_ADDR(wm));
160 }
161 
162 static void vfe_reg_update(struct vfe_device *vfe, enum vfe_line_id line_id)
163 {
164 	vfe->reg_update |= REG_UPDATE_RDI(vfe, line_id);
165 	writel_relaxed(vfe->reg_update, vfe->base + VFE_REG_UPDATE_CMD);
166 }
167 
168 static inline void vfe_reg_update_clear(struct vfe_device *vfe,
169 					enum vfe_line_id line_id)
170 {
171 	vfe->reg_update &= ~REG_UPDATE_RDI(vfe, line_id);
172 }
173 
174 static void vfe_enable_irq_common(struct vfe_device *vfe)
175 {
176 	/* enable reset ack IRQ and top BUS status IRQ */
177 	writel_relaxed(IRQ_MASK_0_RESET_ACK | IRQ_MASK_0_BUS_TOP_IRQ,
178 		       vfe->base + VFE_IRQ_MASK(0));
179 }
180 
181 static void vfe_enable_lines_irq(struct vfe_device *vfe)
182 {
183 	int i;
184 	u32 bus_irq_mask = 0;
185 
186 	for (i = 0; i < MAX_VFE_OUTPUT_LINES; i++) {
187 		/* Enable IRQ for newly added lines, but also keep already running lines's IRQ */
188 		if (vfe->line[i].output.state == VFE_OUTPUT_RESERVED ||
189 		    vfe->line[i].output.state == VFE_OUTPUT_ON) {
190 			bus_irq_mask |= BUS_IRQ_MASK_0_RDI_RUP(vfe, i)
191 					| BUS_IRQ_MASK_0_COMP_DONE(vfe, RDI_COMP_GROUP(i));
192 			}
193 	}
194 
195 	writel_relaxed(bus_irq_mask, vfe->base + VFE_BUS_IRQ_MASK(0));
196 }
197 
198 static void vfe_isr_reg_update(struct vfe_device *vfe, enum vfe_line_id line_id);
199 static void vfe_isr_wm_done(struct vfe_device *vfe, u8 wm);
200 
201 /*
202  * vfe_isr - VFE module interrupt handler
203  * @irq: Interrupt line
204  * @dev: VFE device
205  *
206  * Return IRQ_HANDLED on success
207  */
208 static irqreturn_t vfe_isr(int irq, void *dev)
209 {
210 	struct vfe_device *vfe = dev;
211 	u32 status;
212 	int i;
213 
214 	status = readl_relaxed(vfe->base + VFE_IRQ_STATUS(0));
215 	writel_relaxed(status, vfe->base + VFE_IRQ_CLEAR(0));
216 	writel_relaxed(IRQ_CMD_GLOBAL_CLEAR, vfe->base + VFE_IRQ_CMD);
217 
218 	if (status & IRQ_MASK_0_RESET_ACK)
219 		vfe_isr_reset_ack(vfe);
220 
221 	if (status & IRQ_MASK_0_BUS_TOP_IRQ) {
222 		u32 status = readl_relaxed(vfe->base + VFE_BUS_IRQ_STATUS(0));
223 
224 		writel_relaxed(status, vfe->base + VFE_BUS_IRQ_CLEAR(0));
225 		writel_relaxed(1, vfe->base + VFE_BUS_IRQ_CLEAR_GLOBAL);
226 
227 		/* Loop through all WMs IRQs */
228 		for (i = 0; i < MSM_VFE_IMAGE_MASTERS_NUM; i++) {
229 			if (status & BUS_IRQ_MASK_0_RDI_RUP(vfe, i))
230 				vfe_isr_reg_update(vfe, i);
231 
232 			if (status & BUS_IRQ_MASK_0_COMP_DONE(vfe, RDI_COMP_GROUP(i)))
233 				vfe_isr_wm_done(vfe, i);
234 		}
235 	}
236 
237 	return IRQ_HANDLED;
238 }
239 
240 /*
241  * vfe_halt - Trigger halt on VFE module and wait to complete
242  * @vfe: VFE device
243  *
244  * Return 0 on success or a negative error code otherwise
245  */
246 static int vfe_halt(struct vfe_device *vfe)
247 {
248 	/* rely on vfe_disable_output() to stop the VFE */
249 	return 0;
250 }
251 
252 static int vfe_get_output(struct vfe_line *line)
253 {
254 	struct vfe_device *vfe = to_vfe(line);
255 	struct vfe_output *output;
256 	unsigned long flags;
257 
258 	spin_lock_irqsave(&vfe->output_lock, flags);
259 
260 	output = &line->output;
261 	if (output->state > VFE_OUTPUT_RESERVED) {
262 		dev_err(vfe->camss->dev, "Output is running\n");
263 		goto error;
264 	}
265 
266 	output->wm_num = 1;
267 
268 	/* Correspondence between VFE line number and WM number.
269 	 * line 0 -> RDI 0, line 1 -> RDI1, line 2 -> RDI2, line 3 -> PIX/RDI3
270 	 * Note this 1:1 mapping will not work for PIX streams.
271 	 */
272 	output->wm_idx[0] = line->id;
273 	vfe->wm_output_map[line->id] = line->id;
274 
275 	output->drop_update_idx = 0;
276 
277 	spin_unlock_irqrestore(&vfe->output_lock, flags);
278 
279 	return 0;
280 
281 error:
282 	spin_unlock_irqrestore(&vfe->output_lock, flags);
283 	output->state = VFE_OUTPUT_OFF;
284 
285 	return -EINVAL;
286 }
287 
288 static int vfe_enable_output(struct vfe_line *line)
289 {
290 	struct vfe_device *vfe = to_vfe(line);
291 	struct vfe_output *output = &line->output;
292 	unsigned long flags;
293 	unsigned int i;
294 
295 	spin_lock_irqsave(&vfe->output_lock, flags);
296 
297 	vfe_reg_update_clear(vfe, line->id);
298 
299 	if (output->state > VFE_OUTPUT_RESERVED) {
300 		dev_err(vfe->camss->dev, "Output is not in reserved state %d\n",
301 			output->state);
302 		spin_unlock_irqrestore(&vfe->output_lock, flags);
303 		return -EINVAL;
304 	}
305 
306 	WARN_ON(output->gen2.active_num);
307 
308 	output->state = VFE_OUTPUT_ON;
309 
310 	output->sequence = 0;
311 	output->wait_reg_update = 0;
312 	reinit_completion(&output->reg_update);
313 
314 	vfe_wm_start(vfe, output->wm_idx[0], line);
315 
316 	for (i = 0; i < 2; i++) {
317 		output->buf[i] = vfe_buf_get_pending(output);
318 		if (!output->buf[i])
319 			break;
320 		output->gen2.active_num++;
321 		vfe_wm_update(vfe, output->wm_idx[0], output->buf[i]->addr[0], line);
322 	}
323 
324 	vfe_reg_update(vfe, line->id);
325 
326 	spin_unlock_irqrestore(&vfe->output_lock, flags);
327 
328 	return 0;
329 }
330 
331 static int vfe_disable_output(struct vfe_line *line)
332 {
333 	struct vfe_device *vfe = to_vfe(line);
334 	struct vfe_output *output = &line->output;
335 	unsigned long flags;
336 	unsigned int i;
337 	bool done;
338 	int timeout = 0;
339 
340 	do {
341 		spin_lock_irqsave(&vfe->output_lock, flags);
342 		done = !output->gen2.active_num;
343 		spin_unlock_irqrestore(&vfe->output_lock, flags);
344 		usleep_range(10000, 20000);
345 
346 		if (timeout++ == 100) {
347 			dev_err(vfe->camss->dev, "VFE idle timeout - resetting\n");
348 			vfe_reset(vfe);
349 			output->gen2.active_num = 0;
350 			return 0;
351 		}
352 	} while (!done);
353 
354 	spin_lock_irqsave(&vfe->output_lock, flags);
355 	for (i = 0; i < output->wm_num; i++)
356 		vfe_wm_stop(vfe, output->wm_idx[i]);
357 	spin_unlock_irqrestore(&vfe->output_lock, flags);
358 
359 	return 0;
360 }
361 
362 /*
363  * vfe_enable - Enable streaming on VFE line
364  * @line: VFE line
365  *
366  * Return 0 on success or a negative error code otherwise
367  */
368 static int vfe_enable(struct vfe_line *line)
369 {
370 	struct vfe_device *vfe = to_vfe(line);
371 	int ret;
372 
373 	mutex_lock(&vfe->stream_lock);
374 
375 	if (!vfe->stream_count)
376 		vfe_enable_irq_common(vfe);
377 
378 	vfe->stream_count++;
379 
380 	vfe_enable_lines_irq(vfe);
381 
382 	mutex_unlock(&vfe->stream_lock);
383 
384 	ret = vfe_get_output(line);
385 	if (ret < 0)
386 		goto error_get_output;
387 
388 	ret = vfe_enable_output(line);
389 	if (ret < 0)
390 		goto error_enable_output;
391 
392 	vfe->was_streaming = 1;
393 
394 	return 0;
395 
396 error_enable_output:
397 	vfe_put_output(line);
398 
399 error_get_output:
400 	mutex_lock(&vfe->stream_lock);
401 
402 	vfe->stream_count--;
403 
404 	mutex_unlock(&vfe->stream_lock);
405 
406 	return ret;
407 }
408 
409 /*
410  * vfe_disable - Disable streaming on VFE line
411  * @line: VFE line
412  *
413  * Return 0 on success or a negative error code otherwise
414  */
415 static int vfe_disable(struct vfe_line *line)
416 {
417 	struct vfe_device *vfe = to_vfe(line);
418 
419 	vfe_disable_output(line);
420 
421 	vfe_put_output(line);
422 
423 	mutex_lock(&vfe->stream_lock);
424 
425 	vfe->stream_count--;
426 
427 	mutex_unlock(&vfe->stream_lock);
428 
429 	return 0;
430 }
431 
432 /*
433  * vfe_isr_reg_update - Process reg update interrupt
434  * @vfe: VFE Device
435  * @line_id: VFE line
436  */
437 static void vfe_isr_reg_update(struct vfe_device *vfe, enum vfe_line_id line_id)
438 {
439 	struct vfe_output *output;
440 	unsigned long flags;
441 
442 	spin_lock_irqsave(&vfe->output_lock, flags);
443 	vfe_reg_update_clear(vfe, line_id);
444 
445 	output = &vfe->line[line_id].output;
446 
447 	if (output->wait_reg_update) {
448 		output->wait_reg_update = 0;
449 		complete(&output->reg_update);
450 	}
451 
452 	spin_unlock_irqrestore(&vfe->output_lock, flags);
453 }
454 
455 /*
456  * vfe_isr_wm_done - Process write master done interrupt
457  * @vfe: VFE Device
458  * @wm: Write master id
459  */
460 static void vfe_isr_wm_done(struct vfe_device *vfe, u8 wm)
461 {
462 	struct vfe_line *line = &vfe->line[vfe->wm_output_map[wm]];
463 	struct camss_buffer *ready_buf;
464 	struct vfe_output *output;
465 	unsigned long flags;
466 	u32 index;
467 	u64 ts = ktime_get_ns();
468 
469 	spin_lock_irqsave(&vfe->output_lock, flags);
470 
471 	if (vfe->wm_output_map[wm] == VFE_LINE_NONE) {
472 		dev_err_ratelimited(vfe->camss->dev,
473 				    "Received wm done for unmapped index\n");
474 		goto out_unlock;
475 	}
476 	output = &vfe->line[vfe->wm_output_map[wm]].output;
477 
478 	ready_buf = output->buf[0];
479 	if (!ready_buf) {
480 		dev_err_ratelimited(vfe->camss->dev,
481 				    "Missing ready buf %d!\n", output->state);
482 		goto out_unlock;
483 	}
484 
485 	ready_buf->vb.vb2_buf.timestamp = ts;
486 	ready_buf->vb.sequence = output->sequence++;
487 
488 	index = 0;
489 	output->buf[0] = output->buf[1];
490 	if (output->buf[0])
491 		index = 1;
492 
493 	output->buf[index] = vfe_buf_get_pending(output);
494 
495 	if (output->buf[index])
496 		vfe_wm_update(vfe, output->wm_idx[0], output->buf[index]->addr[0], line);
497 	else
498 		output->gen2.active_num--;
499 
500 	spin_unlock_irqrestore(&vfe->output_lock, flags);
501 
502 	vb2_buffer_done(&ready_buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
503 
504 	return;
505 
506 out_unlock:
507 	spin_unlock_irqrestore(&vfe->output_lock, flags);
508 }
509 
510 /*
511  * vfe_pm_domain_off - Disable power domains specific to this VFE.
512  * @vfe: VFE Device
513  */
514 static void vfe_pm_domain_off(struct vfe_device *vfe)
515 {
516 	struct camss *camss = vfe->camss;
517 
518 	if (vfe->id >= camss->vfe_num)
519 		return;
520 
521 	device_link_del(camss->genpd_link[vfe->id]);
522 }
523 
524 /*
525  * vfe_pm_domain_on - Enable power domains specific to this VFE.
526  * @vfe: VFE Device
527  */
528 static int vfe_pm_domain_on(struct vfe_device *vfe)
529 {
530 	struct camss *camss = vfe->camss;
531 	enum vfe_line_id id = vfe->id;
532 
533 	if (id >= camss->vfe_num)
534 		return 0;
535 
536 	camss->genpd_link[id] = device_link_add(camss->dev, camss->genpd[id],
537 						DL_FLAG_STATELESS |
538 						DL_FLAG_PM_RUNTIME |
539 						DL_FLAG_RPM_ACTIVE);
540 	if (!camss->genpd_link[id])
541 		return -EINVAL;
542 
543 	return 0;
544 }
545 
546 /*
547  * vfe_queue_buffer - Add empty buffer
548  * @vid: Video device structure
549  * @buf: Buffer to be enqueued
550  *
551  * Add an empty buffer - depending on the current number of buffers it will be
552  * put in pending buffer queue or directly given to the hardware to be filled.
553  *
554  * Return 0 on success or a negative error code otherwise
555  */
556 static int vfe_queue_buffer(struct camss_video *vid,
557 			    struct camss_buffer *buf)
558 {
559 	struct vfe_line *line = container_of(vid, struct vfe_line, video_out);
560 	struct vfe_device *vfe = to_vfe(line);
561 	struct vfe_output *output;
562 	unsigned long flags;
563 
564 	output = &line->output;
565 
566 	spin_lock_irqsave(&vfe->output_lock, flags);
567 
568 	if (output->state == VFE_OUTPUT_ON && output->gen2.active_num < 2) {
569 		output->buf[output->gen2.active_num++] = buf;
570 		vfe_wm_update(vfe, output->wm_idx[0], buf->addr[0], line);
571 	} else {
572 		vfe_buf_add_pending(output, buf);
573 	}
574 
575 	spin_unlock_irqrestore(&vfe->output_lock, flags);
576 
577 	return 0;
578 }
579 
580 static const struct camss_video_ops vfe_video_ops_480 = {
581 	.queue_buffer = vfe_queue_buffer,
582 	.flush_buffers = vfe_flush_buffers,
583 };
584 
585 static void vfe_subdev_init(struct device *dev, struct vfe_device *vfe)
586 {
587 	vfe->video_ops = vfe_video_ops_480;
588 	vfe->line_num = MAX_VFE_OUTPUT_LINES;
589 }
590 
591 const struct vfe_hw_ops vfe_ops_480 = {
592 	.global_reset = vfe_global_reset,
593 	.hw_version = vfe_hw_version,
594 	.isr = vfe_isr,
595 	.pm_domain_off = vfe_pm_domain_off,
596 	.pm_domain_on = vfe_pm_domain_on,
597 	.subdev_init = vfe_subdev_init,
598 	.vfe_disable = vfe_disable,
599 	.vfe_enable = vfe_enable,
600 	.vfe_halt = vfe_halt,
601 };
602