xref: /linux/drivers/media/platform/renesas/rzg2l-cru/rzg2l-video.c (revision fcad9bbf9e1a7de6c53908954ba1b1a1ab11ef1e)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Driver for Renesas RZ/G2L CRU
4  *
5  * Copyright (C) 2022 Renesas Electronics Corp.
6  *
7  * Based on Renesas R-Car VIN
8  * Copyright (C) 2016 Renesas Electronics Corp.
9  * Copyright (C) 2011-2013 Renesas Solutions Corp.
10  * Copyright (C) 2013 Cogent Embedded, Inc., <source@cogentembedded.com>
11  * Copyright (C) 2008 Magnus Damm
12  */
13 
14 #include <linux/clk.h>
15 #include <linux/delay.h>
16 #include <linux/pm_runtime.h>
17 
18 #include <media/mipi-csi2.h>
19 #include <media/v4l2-ioctl.h>
20 #include <media/videobuf2-dma-contig.h>
21 
22 #include "rzg2l-cru.h"
23 #include "rzg2l-cru-regs.h"
24 
25 #define RZG2L_TIMEOUT_MS		100
26 #define RZG2L_RETRIES			10
27 
28 #define RZG2L_CRU_DEFAULT_FORMAT	V4L2_PIX_FMT_UYVY
29 #define RZG2L_CRU_DEFAULT_WIDTH		RZG2L_CRU_MIN_INPUT_WIDTH
30 #define RZG2L_CRU_DEFAULT_HEIGHT	RZG2L_CRU_MIN_INPUT_HEIGHT
31 #define RZG2L_CRU_DEFAULT_FIELD		V4L2_FIELD_NONE
32 #define RZG2L_CRU_DEFAULT_COLORSPACE	V4L2_COLORSPACE_SRGB
33 
34 #define RZG2L_CRU_STRIDE_MAX		32640
35 #define RZG2L_CRU_STRIDE_ALIGN		128
36 
37 struct rzg2l_cru_buffer {
38 	struct vb2_v4l2_buffer vb;
39 	struct list_head list;
40 };
41 
42 #define to_buf_list(vb2_buffer) \
43 	(&container_of(vb2_buffer, struct rzg2l_cru_buffer, vb)->list)
44 
45 /* -----------------------------------------------------------------------------
46  * DMA operations
47  */
48 static void __rzg2l_cru_write(struct rzg2l_cru_dev *cru, u32 offset, u32 value)
49 {
50 	const u16 *regs = cru->info->regs;
51 
52 	/*
53 	 * CRUnCTRL is a first register on all CRU supported SoCs so validate
54 	 * rest of the registers have valid offset being set in cru->info->regs.
55 	 */
56 	if (WARN_ON(offset >= RZG2L_CRU_MAX_REG) ||
57 	    WARN_ON(offset != CRUnCTRL && regs[offset] == 0))
58 		return;
59 
60 	iowrite32(value, cru->base + regs[offset]);
61 }
62 
63 static u32 __rzg2l_cru_read(struct rzg2l_cru_dev *cru, u32 offset)
64 {
65 	const u16 *regs = cru->info->regs;
66 
67 	/*
68 	 * CRUnCTRL is a first register on all CRU supported SoCs so validate
69 	 * rest of the registers have valid offset being set in cru->info->regs.
70 	 */
71 	if (WARN_ON(offset >= RZG2L_CRU_MAX_REG) ||
72 	    WARN_ON(offset != CRUnCTRL && regs[offset] == 0))
73 		return 0;
74 
75 	return ioread32(cru->base + regs[offset]);
76 }
77 
78 static __always_inline void
79 __rzg2l_cru_write_constant(struct rzg2l_cru_dev *cru, u32 offset, u32 value)
80 {
81 	const u16 *regs = cru->info->regs;
82 
83 	BUILD_BUG_ON(offset >= RZG2L_CRU_MAX_REG);
84 
85 	iowrite32(value, cru->base + regs[offset]);
86 }
87 
88 static __always_inline u32
89 __rzg2l_cru_read_constant(struct rzg2l_cru_dev *cru, u32 offset)
90 {
91 	const u16 *regs = cru->info->regs;
92 
93 	BUILD_BUG_ON(offset >= RZG2L_CRU_MAX_REG);
94 
95 	return ioread32(cru->base + regs[offset]);
96 }
97 
98 #define rzg2l_cru_write(cru, offset, value) \
99 	(__builtin_constant_p(offset) ? \
100 	 __rzg2l_cru_write_constant(cru, offset, value) : \
101 	 __rzg2l_cru_write(cru, offset, value))
102 
103 #define rzg2l_cru_read(cru, offset) \
104 	(__builtin_constant_p(offset) ? \
105 	 __rzg2l_cru_read_constant(cru, offset) : \
106 	 __rzg2l_cru_read(cru, offset))
107 
108 /* Need to hold qlock before calling */
109 static void return_unused_buffers(struct rzg2l_cru_dev *cru,
110 				  enum vb2_buffer_state state)
111 {
112 	struct rzg2l_cru_buffer *buf, *node;
113 	unsigned long flags;
114 	unsigned int i;
115 
116 	spin_lock_irqsave(&cru->qlock, flags);
117 	for (i = 0; i < cru->num_buf; i++) {
118 		if (cru->queue_buf[i]) {
119 			vb2_buffer_done(&cru->queue_buf[i]->vb2_buf,
120 					state);
121 			cru->queue_buf[i] = NULL;
122 		}
123 	}
124 
125 	list_for_each_entry_safe(buf, node, &cru->buf_list, list) {
126 		vb2_buffer_done(&buf->vb.vb2_buf, state);
127 		list_del(&buf->list);
128 	}
129 	spin_unlock_irqrestore(&cru->qlock, flags);
130 }
131 
132 static int rzg2l_cru_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers,
133 				 unsigned int *nplanes, unsigned int sizes[],
134 				 struct device *alloc_devs[])
135 {
136 	struct rzg2l_cru_dev *cru = vb2_get_drv_priv(vq);
137 
138 	/* Make sure the image size is large enough. */
139 	if (*nplanes)
140 		return sizes[0] < cru->format.sizeimage ? -EINVAL : 0;
141 
142 	*nplanes = 1;
143 	sizes[0] = cru->format.sizeimage;
144 
145 	return 0;
146 };
147 
148 static int rzg2l_cru_buffer_prepare(struct vb2_buffer *vb)
149 {
150 	struct rzg2l_cru_dev *cru = vb2_get_drv_priv(vb->vb2_queue);
151 	unsigned long size = cru->format.sizeimage;
152 
153 	if (vb2_plane_size(vb, 0) < size) {
154 		dev_err(cru->dev, "buffer too small (%lu < %lu)\n",
155 			vb2_plane_size(vb, 0), size);
156 		return -EINVAL;
157 	}
158 
159 	vb2_set_plane_payload(vb, 0, size);
160 
161 	return 0;
162 }
163 
164 static void rzg2l_cru_buffer_queue(struct vb2_buffer *vb)
165 {
166 	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
167 	struct rzg2l_cru_dev *cru = vb2_get_drv_priv(vb->vb2_queue);
168 	unsigned long flags;
169 
170 	spin_lock_irqsave(&cru->qlock, flags);
171 
172 	list_add_tail(to_buf_list(vbuf), &cru->buf_list);
173 
174 	spin_unlock_irqrestore(&cru->qlock, flags);
175 }
176 
177 static void rzg2l_cru_set_slot_addr(struct rzg2l_cru_dev *cru,
178 				    int slot, dma_addr_t addr)
179 {
180 	/*
181 	 * The address needs to be 512 bytes aligned. Driver should never accept
182 	 * settings that do not satisfy this in the first place...
183 	 */
184 	if (WARN_ON((addr) & RZG2L_CRU_HW_BUFFER_MASK))
185 		return;
186 
187 	/* Currently, we just use the buffer in 32 bits address */
188 	rzg2l_cru_write(cru, AMnMBxADDRL(slot), addr);
189 	rzg2l_cru_write(cru, AMnMBxADDRH(slot), 0);
190 
191 	cru->buf_addr[slot] = addr;
192 }
193 
194 /*
195  * Moves a buffer from the queue to the HW slot. If no buffer is
196  * available use the scratch buffer. The scratch buffer is never
197  * returned to userspace, its only function is to enable the capture
198  * loop to keep running.
199  */
200 static void rzg2l_cru_fill_hw_slot(struct rzg2l_cru_dev *cru, int slot)
201 {
202 	struct vb2_v4l2_buffer *vbuf;
203 	struct rzg2l_cru_buffer *buf;
204 	dma_addr_t phys_addr;
205 
206 	/* A already populated slot shall never be overwritten. */
207 	if (WARN_ON(cru->queue_buf[slot]))
208 		return;
209 
210 	dev_dbg(cru->dev, "Filling HW slot: %d\n", slot);
211 
212 	if (list_empty(&cru->buf_list)) {
213 		cru->queue_buf[slot] = NULL;
214 		phys_addr = cru->scratch_phys;
215 	} else {
216 		/* Keep track of buffer we give to HW */
217 		buf = list_entry(cru->buf_list.next,
218 				 struct rzg2l_cru_buffer, list);
219 		vbuf = &buf->vb;
220 		list_del_init(to_buf_list(vbuf));
221 		cru->queue_buf[slot] = vbuf;
222 
223 		/* Setup DMA */
224 		phys_addr = vb2_dma_contig_plane_dma_addr(&vbuf->vb2_buf, 0);
225 	}
226 
227 	rzg2l_cru_set_slot_addr(cru, slot, phys_addr);
228 }
229 
230 static void rzg2l_cru_initialize_axi(struct rzg2l_cru_dev *cru)
231 {
232 	const struct rzg2l_cru_info *info = cru->info;
233 	unsigned int slot;
234 	u32 amnaxiattr;
235 
236 	/*
237 	 * Set image data memory banks.
238 	 * Currently, we will use maximum address.
239 	 */
240 	rzg2l_cru_write(cru, AMnMBVALID, AMnMBVALID_MBVALID(cru->num_buf - 1));
241 
242 	for (slot = 0; slot < cru->num_buf; slot++)
243 		rzg2l_cru_fill_hw_slot(cru, slot);
244 
245 	if (info->has_stride) {
246 		u32 stride = cru->format.bytesperline;
247 		u32 amnis;
248 
249 		stride /= RZG2L_CRU_STRIDE_ALIGN;
250 		amnis = rzg2l_cru_read(cru, AMnIS) & ~AMnIS_IS_MASK;
251 		rzg2l_cru_write(cru, AMnIS, amnis | AMnIS_IS(stride));
252 	}
253 
254 	/* Set AXI burst max length to recommended setting */
255 	amnaxiattr = rzg2l_cru_read(cru, AMnAXIATTR) & ~AMnAXIATTR_AXILEN_MASK;
256 	amnaxiattr |= AMnAXIATTR_AXILEN;
257 	rzg2l_cru_write(cru, AMnAXIATTR, amnaxiattr);
258 }
259 
260 void rzg3e_cru_csi2_setup(struct rzg2l_cru_dev *cru,
261 			  const struct rzg2l_cru_ip_format *ip_fmt,
262 			  u8 csi_vc)
263 {
264 	const struct rzg2l_cru_info *info = cru->info;
265 	u32 icnmc = ICnMC_INF(ip_fmt->datatype);
266 
267 	icnmc |= rzg2l_cru_read(cru, info->image_conv) & ~ICnMC_INF_MASK;
268 
269 	/* Set virtual channel CSI2 */
270 	icnmc |= ICnMC_VCSEL(csi_vc);
271 
272 	rzg2l_cru_write(cru, ICnSVCNUM, csi_vc);
273 	rzg2l_cru_write(cru, ICnSVC, ICnSVC_SVC0(0) | ICnSVC_SVC1(1) |
274 			ICnSVC_SVC2(2) | ICnSVC_SVC3(3));
275 	rzg2l_cru_write(cru, info->image_conv, icnmc);
276 }
277 
278 void rzg2l_cru_csi2_setup(struct rzg2l_cru_dev *cru,
279 			  const struct rzg2l_cru_ip_format *ip_fmt,
280 			  u8 csi_vc)
281 {
282 	const struct rzg2l_cru_info *info = cru->info;
283 	u32 icnmc = ICnMC_INF(ip_fmt->datatype);
284 
285 	icnmc |= rzg2l_cru_read(cru, info->image_conv) & ~ICnMC_INF_MASK;
286 
287 	/* Set virtual channel CSI2 */
288 	icnmc |= ICnMC_VCSEL(csi_vc);
289 
290 	rzg2l_cru_write(cru, info->image_conv, icnmc);
291 }
292 
293 static int rzg2l_cru_initialize_image_conv(struct rzg2l_cru_dev *cru,
294 					   struct v4l2_mbus_framefmt *ip_sd_fmt,
295 					   u8 csi_vc)
296 {
297 	const struct rzg2l_cru_info *info = cru->info;
298 	const struct rzg2l_cru_ip_format *cru_video_fmt;
299 	const struct rzg2l_cru_ip_format *cru_ip_fmt;
300 
301 	cru_ip_fmt = rzg2l_cru_ip_code_to_fmt(ip_sd_fmt->code);
302 	info->csi_setup(cru, cru_ip_fmt, csi_vc);
303 
304 	/* Output format */
305 	cru_video_fmt = rzg2l_cru_ip_format_to_fmt(cru->format.pixelformat);
306 	if (!cru_video_fmt) {
307 		dev_err(cru->dev, "Invalid pixelformat (0x%x)\n",
308 			cru->format.pixelformat);
309 		return -EINVAL;
310 	}
311 
312 	/* If input and output use same colorspace, do bypass mode */
313 	if (cru_ip_fmt->yuv == cru_video_fmt->yuv)
314 		rzg2l_cru_write(cru, info->image_conv,
315 				rzg2l_cru_read(cru, info->image_conv) | ICnMC_CSCTHR);
316 	else
317 		rzg2l_cru_write(cru, info->image_conv,
318 				rzg2l_cru_read(cru, info->image_conv) & ~ICnMC_CSCTHR);
319 
320 	/* Set output data format */
321 	rzg2l_cru_write(cru, ICnDMR, cru_video_fmt->icndmr);
322 
323 	return 0;
324 }
325 
326 bool rz3e_fifo_empty(struct rzg2l_cru_dev *cru)
327 {
328 	u32 amnfifopntr = rzg2l_cru_read(cru, AMnFIFOPNTR);
329 
330 	if ((((amnfifopntr & AMnFIFOPNTR_FIFORPNTR_B1) >> 24) ==
331 	     ((amnfifopntr & AMnFIFOPNTR_FIFOWPNTR_B1) >> 8)) &&
332 	    (((amnfifopntr & AMnFIFOPNTR_FIFORPNTR_B0) >> 16) ==
333 	     (amnfifopntr & AMnFIFOPNTR_FIFOWPNTR_B0)))
334 		return true;
335 
336 	return false;
337 }
338 
339 bool rzg2l_fifo_empty(struct rzg2l_cru_dev *cru)
340 {
341 	u32 amnfifopntr, amnfifopntr_w, amnfifopntr_r_y;
342 
343 	amnfifopntr = rzg2l_cru_read(cru, AMnFIFOPNTR);
344 
345 	amnfifopntr_w = amnfifopntr & AMnFIFOPNTR_FIFOWPNTR;
346 	amnfifopntr_r_y =
347 		(amnfifopntr & AMnFIFOPNTR_FIFORPNTR_Y) >> 16;
348 	if (amnfifopntr_w == amnfifopntr_r_y)
349 		return true;
350 
351 	return amnfifopntr_w == amnfifopntr_r_y;
352 }
353 
354 void rzg2l_cru_stop_image_processing(struct rzg2l_cru_dev *cru)
355 {
356 	unsigned int retries = 0;
357 	unsigned long flags;
358 	u32 icnms;
359 
360 	spin_lock_irqsave(&cru->qlock, flags);
361 
362 	/* Disable and clear the interrupt */
363 	cru->info->disable_interrupts(cru);
364 
365 	/* Stop the operation of image conversion */
366 	rzg2l_cru_write(cru, ICnEN, 0);
367 
368 	/* Wait for streaming to stop */
369 	while ((rzg2l_cru_read(cru, ICnMS) & ICnMS_IA) && retries++ < RZG2L_RETRIES) {
370 		spin_unlock_irqrestore(&cru->qlock, flags);
371 		msleep(RZG2L_TIMEOUT_MS);
372 		spin_lock_irqsave(&cru->qlock, flags);
373 	}
374 
375 	icnms = rzg2l_cru_read(cru, ICnMS) & ICnMS_IA;
376 	if (icnms)
377 		dev_err(cru->dev, "Failed stop HW, something is seriously broken\n");
378 
379 	cru->state = RZG2L_CRU_DMA_STOPPED;
380 
381 	/* Wait until the FIFO becomes empty */
382 	for (retries = 5; retries > 0; retries--) {
383 		if (cru->info->fifo_empty(cru))
384 			break;
385 
386 		usleep_range(10, 20);
387 	}
388 
389 	/* Notify that FIFO is not empty here */
390 	if (!retries)
391 		dev_err(cru->dev, "Failed to empty FIFO\n");
392 
393 	/* Stop AXI bus */
394 	rzg2l_cru_write(cru, AMnAXISTP, AMnAXISTP_AXI_STOP);
395 
396 	/* Wait until the AXI bus stop */
397 	for (retries = 5; retries > 0; retries--) {
398 		if (rzg2l_cru_read(cru, AMnAXISTPACK) &
399 			AMnAXISTPACK_AXI_STOP_ACK)
400 			break;
401 
402 		usleep_range(10, 20);
403 	}
404 
405 	/* Notify that AXI bus can not stop here */
406 	if (!retries)
407 		dev_err(cru->dev, "Failed to stop AXI bus\n");
408 
409 	/* Cancel the AXI bus stop request */
410 	rzg2l_cru_write(cru, AMnAXISTP, 0);
411 
412 	/* Reset the CRU (AXI-master) */
413 	reset_control_assert(cru->aresetn);
414 
415 	/* Resets the image processing module */
416 	rzg2l_cru_write(cru, CRUnRST, 0);
417 
418 	spin_unlock_irqrestore(&cru->qlock, flags);
419 }
420 
421 static int rzg2l_cru_get_virtual_channel(struct rzg2l_cru_dev *cru)
422 {
423 	struct v4l2_mbus_frame_desc fd = { };
424 	struct media_pad *remote_pad;
425 	int ret;
426 
427 	remote_pad = media_pad_remote_pad_unique(&cru->ip.pads[RZG2L_CRU_IP_SINK]);
428 	ret = v4l2_subdev_call(cru->ip.remote, pad, get_frame_desc, remote_pad->index, &fd);
429 	if (ret < 0 && ret != -ENOIOCTLCMD) {
430 		dev_err(cru->dev, "get_frame_desc failed on IP remote subdev\n");
431 		return ret;
432 	}
433 	/* If remote subdev does not implement .get_frame_desc default to VC0. */
434 	if (ret == -ENOIOCTLCMD)
435 		return 0;
436 
437 	if (fd.type != V4L2_MBUS_FRAME_DESC_TYPE_CSI2) {
438 		dev_err(cru->dev, "get_frame_desc returned invalid bus type %d\n", fd.type);
439 		return -EINVAL;
440 	}
441 
442 	if (!fd.num_entries) {
443 		dev_err(cru->dev, "get_frame_desc returned zero entries\n");
444 		return -EINVAL;
445 	}
446 
447 	return fd.entry[0].bus.csi2.vc;
448 }
449 
450 void rzg3e_cru_enable_interrupts(struct rzg2l_cru_dev *cru)
451 {
452 	rzg2l_cru_write(cru, CRUnIE2, CRUnIE2_FSxE(cru->svc_channel));
453 	rzg2l_cru_write(cru, CRUnIE2, CRUnIE2_FExE(cru->svc_channel));
454 }
455 
456 void rzg3e_cru_disable_interrupts(struct rzg2l_cru_dev *cru)
457 {
458 	rzg2l_cru_write(cru, CRUnIE, 0);
459 	rzg2l_cru_write(cru, CRUnIE2, 0);
460 	rzg2l_cru_write(cru, CRUnINTS, rzg2l_cru_read(cru, CRUnINTS));
461 	rzg2l_cru_write(cru, CRUnINTS2, rzg2l_cru_read(cru, CRUnINTS2));
462 }
463 
464 void rzg2l_cru_enable_interrupts(struct rzg2l_cru_dev *cru)
465 {
466 	rzg2l_cru_write(cru, CRUnIE, CRUnIE_EFE);
467 }
468 
469 void rzg2l_cru_disable_interrupts(struct rzg2l_cru_dev *cru)
470 {
471 	rzg2l_cru_write(cru, CRUnIE, 0);
472 	rzg2l_cru_write(cru, CRUnINTS, 0x001f000f);
473 }
474 
475 int rzg2l_cru_start_image_processing(struct rzg2l_cru_dev *cru)
476 {
477 	struct v4l2_mbus_framefmt *fmt = rzg2l_cru_ip_get_src_fmt(cru);
478 	unsigned long flags;
479 	u8 csi_vc;
480 	int ret;
481 
482 	ret = rzg2l_cru_get_virtual_channel(cru);
483 	if (ret < 0)
484 		return ret;
485 	csi_vc = ret;
486 	cru->svc_channel = csi_vc;
487 
488 	spin_lock_irqsave(&cru->qlock, flags);
489 
490 	/* Select a video input */
491 	rzg2l_cru_write(cru, CRUnCTRL, CRUnCTRL_VINSEL(0));
492 
493 	/* Cancel the software reset for image processing block */
494 	rzg2l_cru_write(cru, CRUnRST, CRUnRST_VRESETN);
495 
496 	/* Disable and clear the interrupt before using */
497 	cru->info->disable_interrupts(cru);
498 
499 	/* Initialize the AXI master */
500 	rzg2l_cru_initialize_axi(cru);
501 
502 	/* Initialize image convert */
503 	ret = rzg2l_cru_initialize_image_conv(cru, fmt, csi_vc);
504 	if (ret) {
505 		spin_unlock_irqrestore(&cru->qlock, flags);
506 		return ret;
507 	}
508 
509 	/* Enable interrupt */
510 	cru->info->enable_interrupts(cru);
511 
512 	/* Enable image processing reception */
513 	rzg2l_cru_write(cru, ICnEN, ICnEN_ICEN);
514 
515 	spin_unlock_irqrestore(&cru->qlock, flags);
516 
517 	return 0;
518 }
519 
520 static int rzg2l_cru_set_stream(struct rzg2l_cru_dev *cru, int on)
521 {
522 	struct media_pipeline *pipe;
523 	struct v4l2_subdev *sd;
524 	struct media_pad *pad;
525 	int ret;
526 
527 	pad = media_pad_remote_pad_first(&cru->pad);
528 	if (!pad)
529 		return -EPIPE;
530 
531 	sd = media_entity_to_v4l2_subdev(pad->entity);
532 
533 	if (!on) {
534 		int stream_off_ret = 0;
535 
536 		ret = v4l2_subdev_call(sd, video, s_stream, 0);
537 		if (ret)
538 			stream_off_ret = ret;
539 
540 		ret = v4l2_subdev_call(sd, video, post_streamoff);
541 		if (ret == -ENOIOCTLCMD)
542 			ret = 0;
543 		if (ret && !stream_off_ret)
544 			stream_off_ret = ret;
545 
546 		video_device_pipeline_stop(&cru->vdev);
547 
548 		return stream_off_ret;
549 	}
550 
551 	pipe = media_entity_pipeline(&sd->entity) ? : &cru->vdev.pipe;
552 	ret = video_device_pipeline_start(&cru->vdev, pipe);
553 	if (ret)
554 		return ret;
555 
556 	ret = v4l2_subdev_call(sd, video, pre_streamon, 0);
557 	if (ret && ret != -ENOIOCTLCMD)
558 		goto pipe_line_stop;
559 
560 	ret = v4l2_subdev_call(sd, video, s_stream, 1);
561 	if (ret && ret != -ENOIOCTLCMD)
562 		goto err_s_stream;
563 
564 	return 0;
565 
566 err_s_stream:
567 	v4l2_subdev_call(sd, video, post_streamoff);
568 
569 pipe_line_stop:
570 	video_device_pipeline_stop(&cru->vdev);
571 
572 	return ret;
573 }
574 
575 static void rzg2l_cru_stop_streaming(struct rzg2l_cru_dev *cru)
576 {
577 	cru->state = RZG2L_CRU_DMA_STOPPING;
578 
579 	rzg2l_cru_set_stream(cru, 0);
580 }
581 
582 irqreturn_t rzg2l_cru_irq(int irq, void *data)
583 {
584 	struct rzg2l_cru_dev *cru = data;
585 	unsigned int handled = 0;
586 	unsigned long flags;
587 	u32 irq_status;
588 	u32 amnmbs;
589 	int slot;
590 
591 	spin_lock_irqsave(&cru->qlock, flags);
592 
593 	irq_status = rzg2l_cru_read(cru, CRUnINTS);
594 	if (!irq_status)
595 		goto done;
596 
597 	handled = 1;
598 
599 	rzg2l_cru_write(cru, CRUnINTS, rzg2l_cru_read(cru, CRUnINTS));
600 
601 	/* Nothing to do if capture status is 'RZG2L_CRU_DMA_STOPPED' */
602 	if (cru->state == RZG2L_CRU_DMA_STOPPED) {
603 		dev_dbg(cru->dev, "IRQ while state stopped\n");
604 		goto done;
605 	}
606 
607 	/* Increase stop retries if capture status is 'RZG2L_CRU_DMA_STOPPING' */
608 	if (cru->state == RZG2L_CRU_DMA_STOPPING) {
609 		if (irq_status & CRUnINTS_SFS)
610 			dev_dbg(cru->dev, "IRQ while state stopping\n");
611 		goto done;
612 	}
613 
614 	/* Prepare for capture and update state */
615 	amnmbs = rzg2l_cru_read(cru, AMnMBS);
616 	slot = amnmbs & AMnMBS_MBSTS;
617 
618 	/*
619 	 * AMnMBS.MBSTS indicates the destination of Memory Bank (MB).
620 	 * Recalculate to get the current transfer complete MB.
621 	 */
622 	if (slot == 0)
623 		slot = cru->num_buf - 1;
624 	else
625 		slot--;
626 
627 	/*
628 	 * To hand buffers back in a known order to userspace start
629 	 * to capture first from slot 0.
630 	 */
631 	if (cru->state == RZG2L_CRU_DMA_STARTING) {
632 		if (slot != 0) {
633 			dev_dbg(cru->dev, "Starting sync slot: %d\n", slot);
634 			goto done;
635 		}
636 
637 		dev_dbg(cru->dev, "Capture start synced!\n");
638 		cru->state = RZG2L_CRU_DMA_RUNNING;
639 	}
640 
641 	/* Capture frame */
642 	if (cru->queue_buf[slot]) {
643 		cru->queue_buf[slot]->field = cru->format.field;
644 		cru->queue_buf[slot]->sequence = cru->sequence;
645 		cru->queue_buf[slot]->vb2_buf.timestamp = ktime_get_ns();
646 		vb2_buffer_done(&cru->queue_buf[slot]->vb2_buf,
647 				VB2_BUF_STATE_DONE);
648 		cru->queue_buf[slot] = NULL;
649 	} else {
650 		/* Scratch buffer was used, dropping frame. */
651 		dev_dbg(cru->dev, "Dropping frame %u\n", cru->sequence);
652 	}
653 
654 	cru->sequence++;
655 
656 	/* Prepare for next frame */
657 	rzg2l_cru_fill_hw_slot(cru, slot);
658 
659 done:
660 	spin_unlock_irqrestore(&cru->qlock, flags);
661 
662 	return IRQ_RETVAL(handled);
663 }
664 
665 static int rzg3e_cru_get_current_slot(struct rzg2l_cru_dev *cru)
666 {
667 	u64 amnmadrs;
668 	int slot;
669 
670 	/*
671 	 * When AMnMADRSL is read, AMnMADRSH of the higher-order
672 	 * address also latches the address.
673 	 *
674 	 * AMnMADRSH must be read after AMnMADRSL has been read.
675 	 */
676 	amnmadrs = rzg2l_cru_read(cru, AMnMADRSL);
677 	amnmadrs |= (u64)rzg2l_cru_read(cru, AMnMADRSH) << 32;
678 
679 	/* Ensure amnmadrs is within this buffer range */
680 	for (slot = 0; slot < cru->num_buf; slot++) {
681 		if (amnmadrs >= cru->buf_addr[slot] &&
682 		    amnmadrs < cru->buf_addr[slot] + cru->format.sizeimage)
683 			return slot;
684 	}
685 
686 	dev_err(cru->dev, "Invalid MB address 0x%llx (out of range)\n", amnmadrs);
687 	return -EINVAL;
688 }
689 
690 irqreturn_t rzg3e_cru_irq(int irq, void *data)
691 {
692 	struct rzg2l_cru_dev *cru = data;
693 	u32 irq_status;
694 	int slot;
695 
696 	scoped_guard(spinlock, &cru->qlock) {
697 		irq_status = rzg2l_cru_read(cru, CRUnINTS2);
698 		if (!irq_status)
699 			return IRQ_NONE;
700 
701 		dev_dbg(cru->dev, "CRUnINTS2 0x%x\n", irq_status);
702 
703 		rzg2l_cru_write(cru, CRUnINTS2, rzg2l_cru_read(cru, CRUnINTS2));
704 
705 		/* Nothing to do if capture status is 'RZG2L_CRU_DMA_STOPPED' */
706 		if (cru->state == RZG2L_CRU_DMA_STOPPED) {
707 			dev_dbg(cru->dev, "IRQ while state stopped\n");
708 			return IRQ_HANDLED;
709 		}
710 
711 		if (cru->state == RZG2L_CRU_DMA_STOPPING) {
712 			if (irq_status & CRUnINTS2_FSxS(0) ||
713 			    irq_status & CRUnINTS2_FSxS(1) ||
714 			    irq_status & CRUnINTS2_FSxS(2) ||
715 			    irq_status & CRUnINTS2_FSxS(3))
716 				dev_dbg(cru->dev, "IRQ while state stopping\n");
717 			return IRQ_HANDLED;
718 		}
719 
720 		slot = rzg3e_cru_get_current_slot(cru);
721 		if (slot < 0)
722 			return IRQ_HANDLED;
723 
724 		dev_dbg(cru->dev, "Current written slot: %d\n", slot);
725 		cru->buf_addr[slot] = 0;
726 
727 		/*
728 		 * To hand buffers back in a known order to userspace start
729 		 * to capture first from slot 0.
730 		 */
731 		if (cru->state == RZG2L_CRU_DMA_STARTING) {
732 			if (slot != 0) {
733 				dev_dbg(cru->dev, "Starting sync slot: %d\n", slot);
734 				return IRQ_HANDLED;
735 			}
736 			dev_dbg(cru->dev, "Capture start synced!\n");
737 			cru->state = RZG2L_CRU_DMA_RUNNING;
738 		}
739 
740 		/* Capture frame */
741 		if (cru->queue_buf[slot]) {
742 			struct vb2_v4l2_buffer *buf = cru->queue_buf[slot];
743 
744 			buf->field = cru->format.field;
745 			buf->sequence = cru->sequence;
746 			buf->vb2_buf.timestamp = ktime_get_ns();
747 			vb2_buffer_done(&buf->vb2_buf, VB2_BUF_STATE_DONE);
748 			cru->queue_buf[slot] = NULL;
749 		} else {
750 			/* Scratch buffer was used, dropping frame. */
751 			dev_dbg(cru->dev, "Dropping frame %u\n", cru->sequence);
752 		}
753 
754 		cru->sequence++;
755 
756 		/* Prepare for next frame */
757 		rzg2l_cru_fill_hw_slot(cru, slot);
758 	}
759 
760 	return IRQ_HANDLED;
761 }
762 
763 static int rzg2l_cru_start_streaming_vq(struct vb2_queue *vq, unsigned int count)
764 {
765 	struct rzg2l_cru_dev *cru = vb2_get_drv_priv(vq);
766 	int ret;
767 
768 	ret = pm_runtime_resume_and_get(cru->dev);
769 	if (ret)
770 		return ret;
771 
772 	ret = clk_prepare_enable(cru->vclk);
773 	if (ret)
774 		goto err_pm_put;
775 
776 	/* Release reset state */
777 	ret = reset_control_deassert(cru->aresetn);
778 	if (ret) {
779 		dev_err(cru->dev, "failed to deassert aresetn\n");
780 		goto err_vclk_disable;
781 	}
782 
783 	ret = reset_control_deassert(cru->presetn);
784 	if (ret) {
785 		reset_control_assert(cru->aresetn);
786 		dev_err(cru->dev, "failed to deassert presetn\n");
787 		goto assert_aresetn;
788 	}
789 
790 	/* Allocate scratch buffer */
791 	cru->scratch = dma_alloc_coherent(cru->dev, cru->format.sizeimage,
792 					  &cru->scratch_phys, GFP_KERNEL);
793 	if (!cru->scratch) {
794 		return_unused_buffers(cru, VB2_BUF_STATE_QUEUED);
795 		dev_err(cru->dev, "Failed to allocate scratch buffer\n");
796 		ret = -ENOMEM;
797 		goto assert_presetn;
798 	}
799 
800 	cru->sequence = 0;
801 
802 	ret = rzg2l_cru_set_stream(cru, 1);
803 	if (ret) {
804 		return_unused_buffers(cru, VB2_BUF_STATE_QUEUED);
805 		goto out;
806 	}
807 
808 	cru->state = RZG2L_CRU_DMA_STARTING;
809 	dev_dbg(cru->dev, "Starting to capture\n");
810 	return 0;
811 
812 out:
813 	if (ret)
814 		dma_free_coherent(cru->dev, cru->format.sizeimage, cru->scratch,
815 				  cru->scratch_phys);
816 assert_presetn:
817 	reset_control_assert(cru->presetn);
818 
819 assert_aresetn:
820 	reset_control_assert(cru->aresetn);
821 
822 err_vclk_disable:
823 	clk_disable_unprepare(cru->vclk);
824 
825 err_pm_put:
826 	pm_runtime_put_sync(cru->dev);
827 
828 	return ret;
829 }
830 
831 static void rzg2l_cru_stop_streaming_vq(struct vb2_queue *vq)
832 {
833 	struct rzg2l_cru_dev *cru = vb2_get_drv_priv(vq);
834 
835 	rzg2l_cru_stop_streaming(cru);
836 
837 	/* Free scratch buffer */
838 	dma_free_coherent(cru->dev, cru->format.sizeimage,
839 			  cru->scratch, cru->scratch_phys);
840 
841 	return_unused_buffers(cru, VB2_BUF_STATE_ERROR);
842 
843 	reset_control_assert(cru->presetn);
844 	clk_disable_unprepare(cru->vclk);
845 	pm_runtime_put_sync(cru->dev);
846 }
847 
848 static const struct vb2_ops rzg2l_cru_qops = {
849 	.queue_setup		= rzg2l_cru_queue_setup,
850 	.buf_prepare		= rzg2l_cru_buffer_prepare,
851 	.buf_queue		= rzg2l_cru_buffer_queue,
852 	.start_streaming	= rzg2l_cru_start_streaming_vq,
853 	.stop_streaming		= rzg2l_cru_stop_streaming_vq,
854 };
855 
856 void rzg2l_cru_dma_unregister(struct rzg2l_cru_dev *cru)
857 {
858 	mutex_destroy(&cru->lock);
859 
860 	v4l2_device_unregister(&cru->v4l2_dev);
861 	vb2_queue_release(&cru->queue);
862 }
863 
864 int rzg2l_cru_dma_register(struct rzg2l_cru_dev *cru)
865 {
866 	struct vb2_queue *q = &cru->queue;
867 	unsigned int i;
868 	int ret;
869 
870 	/* Initialize the top-level structure */
871 	ret = v4l2_device_register(cru->dev, &cru->v4l2_dev);
872 	if (ret)
873 		return ret;
874 
875 	mutex_init(&cru->lock);
876 	INIT_LIST_HEAD(&cru->buf_list);
877 
878 	spin_lock_init(&cru->qlock);
879 
880 	cru->state = RZG2L_CRU_DMA_STOPPED;
881 
882 	for (i = 0; i < RZG2L_CRU_HW_BUFFER_MAX; i++)
883 		cru->queue_buf[i] = NULL;
884 
885 	/* buffer queue */
886 	q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
887 	q->io_modes = VB2_MMAP | VB2_DMABUF;
888 	q->lock = &cru->lock;
889 	q->drv_priv = cru;
890 	q->buf_struct_size = sizeof(struct rzg2l_cru_buffer);
891 	q->ops = &rzg2l_cru_qops;
892 	q->mem_ops = &vb2_dma_contig_memops;
893 	q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
894 	q->min_queued_buffers = 4;
895 	q->dev = cru->dev;
896 
897 	ret = vb2_queue_init(q);
898 	if (ret < 0) {
899 		dev_err(cru->dev, "failed to initialize VB2 queue\n");
900 		goto error;
901 	}
902 
903 	return 0;
904 
905 error:
906 	mutex_destroy(&cru->lock);
907 	v4l2_device_unregister(&cru->v4l2_dev);
908 	return ret;
909 }
910 
911 /* -----------------------------------------------------------------------------
912  * V4L2 stuff
913  */
914 
915 static void rzg2l_cru_format_align(struct rzg2l_cru_dev *cru,
916 				   struct v4l2_pix_format *pix)
917 {
918 	const struct rzg2l_cru_info *info = cru->info;
919 	const struct rzg2l_cru_ip_format *fmt;
920 
921 	fmt = rzg2l_cru_ip_format_to_fmt(pix->pixelformat);
922 	if (!fmt) {
923 		pix->pixelformat = RZG2L_CRU_DEFAULT_FORMAT;
924 		fmt = rzg2l_cru_ip_format_to_fmt(pix->pixelformat);
925 	}
926 
927 	switch (pix->field) {
928 	case V4L2_FIELD_TOP:
929 	case V4L2_FIELD_BOTTOM:
930 	case V4L2_FIELD_NONE:
931 	case V4L2_FIELD_INTERLACED_TB:
932 	case V4L2_FIELD_INTERLACED_BT:
933 	case V4L2_FIELD_INTERLACED:
934 		break;
935 	default:
936 		pix->field = RZG2L_CRU_DEFAULT_FIELD;
937 		break;
938 	}
939 
940 	/* Limit to CRU capabilities */
941 	v4l_bound_align_image(&pix->width, 320, info->max_width, 1,
942 			      &pix->height, 240, info->max_height, 2, 0);
943 
944 	if (info->has_stride) {
945 		u32 stride = clamp(pix->bytesperline, pix->width * fmt->bpp,
946 				   RZG2L_CRU_STRIDE_MAX);
947 		pix->bytesperline = round_up(stride, RZG2L_CRU_STRIDE_ALIGN);
948 	} else {
949 		pix->bytesperline = pix->width * fmt->bpp;
950 	}
951 
952 	pix->sizeimage = pix->bytesperline * pix->height;
953 
954 	dev_dbg(cru->dev, "Format %ux%u bpl: %u size: %u\n",
955 		pix->width, pix->height, pix->bytesperline, pix->sizeimage);
956 }
957 
958 static void rzg2l_cru_try_format(struct rzg2l_cru_dev *cru,
959 				 struct v4l2_pix_format *pix)
960 {
961 	/*
962 	 * The V4L2 specification clearly documents the colorspace fields
963 	 * as being set by drivers for capture devices. Using the values
964 	 * supplied by userspace thus wouldn't comply with the API. Until
965 	 * the API is updated force fixed values.
966 	 */
967 	pix->colorspace = RZG2L_CRU_DEFAULT_COLORSPACE;
968 	pix->xfer_func = V4L2_MAP_XFER_FUNC_DEFAULT(pix->colorspace);
969 	pix->ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(pix->colorspace);
970 	pix->quantization = V4L2_MAP_QUANTIZATION_DEFAULT(true, pix->colorspace,
971 							  pix->ycbcr_enc);
972 
973 	rzg2l_cru_format_align(cru, pix);
974 }
975 
976 static int rzg2l_cru_querycap(struct file *file, void *priv,
977 			      struct v4l2_capability *cap)
978 {
979 	strscpy(cap->driver, KBUILD_MODNAME, sizeof(cap->driver));
980 	strscpy(cap->card, "RZG2L_CRU", sizeof(cap->card));
981 
982 	return 0;
983 }
984 
985 static int rzg2l_cru_try_fmt_vid_cap(struct file *file, void *priv,
986 				     struct v4l2_format *f)
987 {
988 	struct rzg2l_cru_dev *cru = video_drvdata(file);
989 
990 	rzg2l_cru_try_format(cru, &f->fmt.pix);
991 
992 	return 0;
993 }
994 
995 static int rzg2l_cru_s_fmt_vid_cap(struct file *file, void *priv,
996 				   struct v4l2_format *f)
997 {
998 	struct rzg2l_cru_dev *cru = video_drvdata(file);
999 
1000 	if (vb2_is_busy(&cru->queue))
1001 		return -EBUSY;
1002 
1003 	rzg2l_cru_try_format(cru, &f->fmt.pix);
1004 
1005 	cru->format = f->fmt.pix;
1006 
1007 	return 0;
1008 }
1009 
1010 static int rzg2l_cru_g_fmt_vid_cap(struct file *file, void *priv,
1011 				   struct v4l2_format *f)
1012 {
1013 	struct rzg2l_cru_dev *cru = video_drvdata(file);
1014 
1015 	f->fmt.pix = cru->format;
1016 
1017 	return 0;
1018 }
1019 
1020 static int rzg2l_cru_enum_fmt_vid_cap(struct file *file, void *priv,
1021 				      struct v4l2_fmtdesc *f)
1022 {
1023 	const struct rzg2l_cru_ip_format *fmt;
1024 
1025 	fmt = rzg2l_cru_ip_index_to_fmt(f->index);
1026 	if (!fmt)
1027 		return -EINVAL;
1028 
1029 	f->pixelformat = fmt->format;
1030 
1031 	return 0;
1032 }
1033 
1034 static const struct v4l2_ioctl_ops rzg2l_cru_ioctl_ops = {
1035 	.vidioc_querycap		= rzg2l_cru_querycap,
1036 	.vidioc_try_fmt_vid_cap		= rzg2l_cru_try_fmt_vid_cap,
1037 	.vidioc_g_fmt_vid_cap		= rzg2l_cru_g_fmt_vid_cap,
1038 	.vidioc_s_fmt_vid_cap		= rzg2l_cru_s_fmt_vid_cap,
1039 	.vidioc_enum_fmt_vid_cap	= rzg2l_cru_enum_fmt_vid_cap,
1040 
1041 	.vidioc_reqbufs			= vb2_ioctl_reqbufs,
1042 	.vidioc_create_bufs		= vb2_ioctl_create_bufs,
1043 	.vidioc_querybuf		= vb2_ioctl_querybuf,
1044 	.vidioc_qbuf			= vb2_ioctl_qbuf,
1045 	.vidioc_dqbuf			= vb2_ioctl_dqbuf,
1046 	.vidioc_expbuf			= vb2_ioctl_expbuf,
1047 	.vidioc_prepare_buf		= vb2_ioctl_prepare_buf,
1048 	.vidioc_streamon		= vb2_ioctl_streamon,
1049 	.vidioc_streamoff		= vb2_ioctl_streamoff,
1050 };
1051 
1052 /* -----------------------------------------------------------------------------
1053  * Media controller file operations
1054  */
1055 
1056 static int rzg2l_cru_open(struct file *file)
1057 {
1058 	struct rzg2l_cru_dev *cru = video_drvdata(file);
1059 	int ret;
1060 
1061 	ret = mutex_lock_interruptible(&cru->lock);
1062 	if (ret)
1063 		return ret;
1064 
1065 	file->private_data = cru;
1066 	ret = v4l2_fh_open(file);
1067 	if (ret)
1068 		goto err_unlock;
1069 
1070 	mutex_unlock(&cru->lock);
1071 
1072 	return 0;
1073 
1074 err_unlock:
1075 	mutex_unlock(&cru->lock);
1076 
1077 	return ret;
1078 }
1079 
1080 static int rzg2l_cru_release(struct file *file)
1081 {
1082 	struct rzg2l_cru_dev *cru = video_drvdata(file);
1083 	int ret;
1084 
1085 	mutex_lock(&cru->lock);
1086 
1087 	/* the release helper will cleanup any on-going streaming. */
1088 	ret = _vb2_fop_release(file, NULL);
1089 
1090 	mutex_unlock(&cru->lock);
1091 
1092 	return ret;
1093 }
1094 
1095 static const struct v4l2_file_operations rzg2l_cru_fops = {
1096 	.owner		= THIS_MODULE,
1097 	.unlocked_ioctl	= video_ioctl2,
1098 	.open		= rzg2l_cru_open,
1099 	.release	= rzg2l_cru_release,
1100 	.poll		= vb2_fop_poll,
1101 	.mmap		= vb2_fop_mmap,
1102 	.read		= vb2_fop_read,
1103 };
1104 
1105 /* -----------------------------------------------------------------------------
1106  * Media entity operations
1107  */
1108 
1109 static int rzg2l_cru_video_link_validate(struct media_link *link)
1110 {
1111 	struct v4l2_subdev_format fmt = {
1112 		.which = V4L2_SUBDEV_FORMAT_ACTIVE,
1113 	};
1114 	const struct rzg2l_cru_ip_format *video_fmt;
1115 	struct v4l2_subdev *subdev;
1116 	struct rzg2l_cru_dev *cru;
1117 	int ret;
1118 
1119 	subdev = media_entity_to_v4l2_subdev(link->source->entity);
1120 	fmt.pad = link->source->index;
1121 	ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
1122 	if (ret < 0)
1123 		return ret == -ENOIOCTLCMD ? -EINVAL : ret;
1124 
1125 	cru = container_of(media_entity_to_video_device(link->sink->entity),
1126 			   struct rzg2l_cru_dev, vdev);
1127 	video_fmt = rzg2l_cru_ip_format_to_fmt(cru->format.pixelformat);
1128 
1129 	if (fmt.format.width != cru->format.width ||
1130 	    fmt.format.height != cru->format.height ||
1131 	    fmt.format.field != cru->format.field ||
1132 	    video_fmt->code != fmt.format.code)
1133 		return -EPIPE;
1134 
1135 	return 0;
1136 }
1137 
1138 static const struct media_entity_operations rzg2l_cru_video_media_ops = {
1139 	.link_validate = rzg2l_cru_video_link_validate,
1140 };
1141 
1142 static void rzg2l_cru_v4l2_init(struct rzg2l_cru_dev *cru)
1143 {
1144 	struct video_device *vdev = &cru->vdev;
1145 
1146 	vdev->v4l2_dev = &cru->v4l2_dev;
1147 	vdev->queue = &cru->queue;
1148 	snprintf(vdev->name, sizeof(vdev->name), "CRU output");
1149 	vdev->release = video_device_release_empty;
1150 	vdev->lock = &cru->lock;
1151 	vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
1152 	vdev->device_caps |= V4L2_CAP_IO_MC;
1153 	vdev->entity.ops = &rzg2l_cru_video_media_ops;
1154 	vdev->fops = &rzg2l_cru_fops;
1155 	vdev->ioctl_ops = &rzg2l_cru_ioctl_ops;
1156 
1157 	/* Set a default format */
1158 	cru->format.pixelformat	= RZG2L_CRU_DEFAULT_FORMAT;
1159 	cru->format.width = RZG2L_CRU_DEFAULT_WIDTH;
1160 	cru->format.height = RZG2L_CRU_DEFAULT_HEIGHT;
1161 	cru->format.field = RZG2L_CRU_DEFAULT_FIELD;
1162 	cru->format.colorspace = RZG2L_CRU_DEFAULT_COLORSPACE;
1163 	rzg2l_cru_format_align(cru, &cru->format);
1164 }
1165 
1166 void rzg2l_cru_video_unregister(struct rzg2l_cru_dev *cru)
1167 {
1168 	media_device_unregister(&cru->mdev);
1169 	video_unregister_device(&cru->vdev);
1170 }
1171 
1172 int rzg2l_cru_video_register(struct rzg2l_cru_dev *cru)
1173 {
1174 	struct video_device *vdev = &cru->vdev;
1175 	int ret;
1176 
1177 	if (video_is_registered(&cru->vdev)) {
1178 		struct media_entity *entity;
1179 
1180 		entity = &cru->vdev.entity;
1181 		if (!entity->graph_obj.mdev)
1182 			entity->graph_obj.mdev = &cru->mdev;
1183 		return 0;
1184 	}
1185 
1186 	rzg2l_cru_v4l2_init(cru);
1187 	video_set_drvdata(vdev, cru);
1188 	ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
1189 	if (ret) {
1190 		dev_err(cru->dev, "Failed to register video device\n");
1191 		return ret;
1192 	}
1193 
1194 	ret = media_device_register(&cru->mdev);
1195 	if (ret) {
1196 		video_unregister_device(&cru->vdev);
1197 		return ret;
1198 	}
1199 
1200 	return 0;
1201 }
1202