xref: /linux/drivers/media/platform/verisilicon/hantro_drv.c (revision 7255fcc80d4b525cc10cfaaf7f485830d4ed2000)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Hantro VPU codec driver
4  *
5  * Copyright (C) 2018 Collabora, Ltd.
6  * Copyright 2018 Google LLC.
7  *	Tomasz Figa <tfiga@chromium.org>
8  *
9  * Based on s5p-mfc driver by Samsung Electronics Co., Ltd.
10  * Copyright (C) 2011 Samsung Electronics Co., Ltd.
11  */
12 
13 #include <linux/clk.h>
14 #include <linux/module.h>
15 #include <linux/of.h>
16 #include <linux/platform_device.h>
17 #include <linux/pm.h>
18 #include <linux/pm_runtime.h>
19 #include <linux/slab.h>
20 #include <linux/videodev2.h>
21 #include <linux/workqueue.h>
22 #include <media/v4l2-event.h>
23 #include <media/v4l2-mem2mem.h>
24 #include <media/videobuf2-core.h>
25 #include <media/videobuf2-vmalloc.h>
26 
27 #include "hantro_v4l2.h"
28 #include "hantro.h"
29 #include "hantro_hw.h"
30 
31 #define DRIVER_NAME "hantro-vpu"
32 
33 int hantro_debug;
34 module_param_named(debug, hantro_debug, int, 0644);
35 MODULE_PARM_DESC(debug,
36 		 "Debug level - higher value produces more verbose messages");
37 
38 void *hantro_get_ctrl(struct hantro_ctx *ctx, u32 id)
39 {
40 	struct v4l2_ctrl *ctrl;
41 
42 	ctrl = v4l2_ctrl_find(&ctx->ctrl_handler, id);
43 	return ctrl ? ctrl->p_cur.p : NULL;
44 }
45 
46 dma_addr_t hantro_get_ref(struct hantro_ctx *ctx, u64 ts)
47 {
48 	struct vb2_queue *q = v4l2_m2m_get_dst_vq(ctx->fh.m2m_ctx);
49 	struct vb2_buffer *buf;
50 
51 	buf = vb2_find_buffer(q, ts);
52 	if (!buf)
53 		return 0;
54 	return hantro_get_dec_buf_addr(ctx, buf);
55 }
56 
57 static const struct v4l2_event hantro_eos_event = {
58 	.type = V4L2_EVENT_EOS
59 };
60 
61 static void hantro_job_finish_no_pm(struct hantro_dev *vpu,
62 				    struct hantro_ctx *ctx,
63 				    enum vb2_buffer_state result)
64 {
65 	struct vb2_v4l2_buffer *src, *dst;
66 
67 	src = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
68 	dst = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
69 
70 	if (WARN_ON(!src))
71 		return;
72 	if (WARN_ON(!dst))
73 		return;
74 
75 	src->sequence = ctx->sequence_out++;
76 	dst->sequence = ctx->sequence_cap++;
77 
78 	if (v4l2_m2m_is_last_draining_src_buf(ctx->fh.m2m_ctx, src)) {
79 		dst->flags |= V4L2_BUF_FLAG_LAST;
80 		v4l2_event_queue_fh(&ctx->fh, &hantro_eos_event);
81 		v4l2_m2m_mark_stopped(ctx->fh.m2m_ctx);
82 	}
83 
84 	v4l2_m2m_buf_done_and_job_finish(ctx->dev->m2m_dev, ctx->fh.m2m_ctx,
85 					 result);
86 }
87 
88 static void hantro_job_finish(struct hantro_dev *vpu,
89 			      struct hantro_ctx *ctx,
90 			      enum vb2_buffer_state result)
91 {
92 	pm_runtime_mark_last_busy(vpu->dev);
93 	pm_runtime_put_autosuspend(vpu->dev);
94 
95 	clk_bulk_disable(vpu->variant->num_clocks, vpu->clocks);
96 
97 	hantro_job_finish_no_pm(vpu, ctx, result);
98 }
99 
100 void hantro_irq_done(struct hantro_dev *vpu,
101 		     enum vb2_buffer_state result)
102 {
103 	struct hantro_ctx *ctx =
104 		v4l2_m2m_get_curr_priv(vpu->m2m_dev);
105 
106 	/*
107 	 * If cancel_delayed_work returns false
108 	 * the timeout expired. The watchdog is running,
109 	 * and will take care of finishing the job.
110 	 */
111 	if (cancel_delayed_work(&vpu->watchdog_work)) {
112 		if (result == VB2_BUF_STATE_DONE && ctx->codec_ops->done)
113 			ctx->codec_ops->done(ctx);
114 		hantro_job_finish(vpu, ctx, result);
115 	}
116 }
117 
118 void hantro_watchdog(struct work_struct *work)
119 {
120 	struct hantro_dev *vpu;
121 	struct hantro_ctx *ctx;
122 
123 	vpu = container_of(to_delayed_work(work),
124 			   struct hantro_dev, watchdog_work);
125 	ctx = v4l2_m2m_get_curr_priv(vpu->m2m_dev);
126 	if (ctx) {
127 		vpu_err("frame processing timed out!\n");
128 		if (ctx->codec_ops->reset)
129 			ctx->codec_ops->reset(ctx);
130 		hantro_job_finish(vpu, ctx, VB2_BUF_STATE_ERROR);
131 	}
132 }
133 
134 void hantro_start_prepare_run(struct hantro_ctx *ctx)
135 {
136 	struct vb2_v4l2_buffer *src_buf;
137 
138 	src_buf = hantro_get_src_buf(ctx);
139 	v4l2_ctrl_request_setup(src_buf->vb2_buf.req_obj.req,
140 				&ctx->ctrl_handler);
141 
142 	if (!ctx->is_encoder && !ctx->dev->variant->late_postproc) {
143 		if (hantro_needs_postproc(ctx, ctx->vpu_dst_fmt))
144 			hantro_postproc_enable(ctx);
145 		else
146 			hantro_postproc_disable(ctx);
147 	}
148 }
149 
150 void hantro_end_prepare_run(struct hantro_ctx *ctx)
151 {
152 	struct vb2_v4l2_buffer *src_buf;
153 
154 	if (!ctx->is_encoder && ctx->dev->variant->late_postproc) {
155 		if (hantro_needs_postproc(ctx, ctx->vpu_dst_fmt))
156 			hantro_postproc_enable(ctx);
157 		else
158 			hantro_postproc_disable(ctx);
159 	}
160 
161 	src_buf = hantro_get_src_buf(ctx);
162 	v4l2_ctrl_request_complete(src_buf->vb2_buf.req_obj.req,
163 				   &ctx->ctrl_handler);
164 
165 	/* Kick the watchdog. */
166 	schedule_delayed_work(&ctx->dev->watchdog_work,
167 			      msecs_to_jiffies(2000));
168 }
169 
170 static void device_run(void *priv)
171 {
172 	struct hantro_ctx *ctx = priv;
173 	struct vb2_v4l2_buffer *src, *dst;
174 	int ret;
175 
176 	src = hantro_get_src_buf(ctx);
177 	dst = hantro_get_dst_buf(ctx);
178 
179 	ret = pm_runtime_resume_and_get(ctx->dev->dev);
180 	if (ret < 0)
181 		goto err_cancel_job;
182 
183 	ret = clk_bulk_enable(ctx->dev->variant->num_clocks, ctx->dev->clocks);
184 	if (ret)
185 		goto err_cancel_job;
186 
187 	v4l2_m2m_buf_copy_metadata(src, dst, true);
188 
189 	if (ctx->codec_ops->run(ctx))
190 		goto err_cancel_job;
191 
192 	return;
193 
194 err_cancel_job:
195 	hantro_job_finish_no_pm(ctx->dev, ctx, VB2_BUF_STATE_ERROR);
196 }
197 
198 static const struct v4l2_m2m_ops vpu_m2m_ops = {
199 	.device_run = device_run,
200 };
201 
202 static int
203 queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq)
204 {
205 	struct hantro_ctx *ctx = priv;
206 	int ret;
207 
208 	src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
209 	src_vq->io_modes = VB2_MMAP | VB2_DMABUF;
210 	src_vq->drv_priv = ctx;
211 	src_vq->ops = &hantro_queue_ops;
212 	src_vq->mem_ops = &vb2_dma_contig_memops;
213 
214 	/*
215 	 * Driver does mostly sequential access, so sacrifice TLB efficiency
216 	 * for faster allocation. Also, no CPU access on the source queue,
217 	 * so no kernel mapping needed.
218 	 */
219 	src_vq->dma_attrs = DMA_ATTR_ALLOC_SINGLE_PAGES |
220 			    DMA_ATTR_NO_KERNEL_MAPPING;
221 	src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
222 	src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
223 	src_vq->lock = &ctx->dev->vpu_mutex;
224 	src_vq->dev = ctx->dev->v4l2_dev.dev;
225 	src_vq->supports_requests = true;
226 
227 	ret = vb2_queue_init(src_vq);
228 	if (ret)
229 		return ret;
230 
231 	dst_vq->bidirectional = true;
232 	dst_vq->mem_ops = &vb2_dma_contig_memops;
233 	dst_vq->dma_attrs = DMA_ATTR_ALLOC_SINGLE_PAGES;
234 	/*
235 	 * The Kernel needs access to the JPEG destination buffer for the
236 	 * JPEG encoder to fill in the JPEG headers.
237 	 */
238 	if (!ctx->is_encoder) {
239 		dst_vq->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
240 		dst_vq->max_num_buffers = MAX_POSTPROC_BUFFERS;
241 	}
242 
243 	dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
244 	dst_vq->io_modes = VB2_MMAP | VB2_DMABUF;
245 	dst_vq->drv_priv = ctx;
246 	dst_vq->ops = &hantro_queue_ops;
247 	dst_vq->buf_struct_size = sizeof(struct hantro_decoded_buffer);
248 	dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
249 	dst_vq->lock = &ctx->dev->vpu_mutex;
250 	dst_vq->dev = ctx->dev->v4l2_dev.dev;
251 
252 	return vb2_queue_init(dst_vq);
253 }
254 
255 static int hantro_try_ctrl(struct v4l2_ctrl *ctrl)
256 {
257 	if (ctrl->id == V4L2_CID_STATELESS_H264_SPS) {
258 		const struct v4l2_ctrl_h264_sps *sps = ctrl->p_new.p_h264_sps;
259 
260 		if (sps->chroma_format_idc > 1)
261 			/* Only 4:0:0 and 4:2:0 are supported */
262 			return -EINVAL;
263 		if (sps->bit_depth_luma_minus8 != sps->bit_depth_chroma_minus8)
264 			/* Luma and chroma bit depth mismatch */
265 			return -EINVAL;
266 		if (sps->bit_depth_luma_minus8 != 0)
267 			/* Only 8-bit is supported */
268 			return -EINVAL;
269 	} else if (ctrl->id == V4L2_CID_STATELESS_HEVC_SPS) {
270 		const struct v4l2_ctrl_hevc_sps *sps = ctrl->p_new.p_hevc_sps;
271 
272 		if (sps->bit_depth_luma_minus8 != 0 && sps->bit_depth_luma_minus8 != 2)
273 			/* Only 8-bit and 10-bit are supported */
274 			return -EINVAL;
275 	} else if (ctrl->id == V4L2_CID_STATELESS_VP9_FRAME) {
276 		const struct v4l2_ctrl_vp9_frame *dec_params = ctrl->p_new.p_vp9_frame;
277 
278 		/* We only support profile 0 */
279 		if (dec_params->profile != 0)
280 			return -EINVAL;
281 	} else if (ctrl->id == V4L2_CID_STATELESS_AV1_SEQUENCE) {
282 		const struct v4l2_ctrl_av1_sequence *sequence = ctrl->p_new.p_av1_sequence;
283 
284 		if (sequence->bit_depth != 8 && sequence->bit_depth != 10)
285 			return -EINVAL;
286 	}
287 
288 	return 0;
289 }
290 
291 static int hantro_jpeg_s_ctrl(struct v4l2_ctrl *ctrl)
292 {
293 	struct hantro_ctx *ctx;
294 
295 	ctx = container_of(ctrl->handler,
296 			   struct hantro_ctx, ctrl_handler);
297 
298 	vpu_debug(1, "s_ctrl: id = %d, val = %d\n", ctrl->id, ctrl->val);
299 
300 	switch (ctrl->id) {
301 	case V4L2_CID_JPEG_COMPRESSION_QUALITY:
302 		ctx->jpeg_quality = ctrl->val;
303 		break;
304 	default:
305 		return -EINVAL;
306 	}
307 
308 	return 0;
309 }
310 
311 static int hantro_vp9_s_ctrl(struct v4l2_ctrl *ctrl)
312 {
313 	struct hantro_ctx *ctx;
314 
315 	ctx = container_of(ctrl->handler,
316 			   struct hantro_ctx, ctrl_handler);
317 
318 	switch (ctrl->id) {
319 	case V4L2_CID_STATELESS_VP9_FRAME: {
320 		int bit_depth = ctrl->p_new.p_vp9_frame->bit_depth;
321 
322 		if (ctx->bit_depth == bit_depth)
323 			return 0;
324 
325 		return hantro_reset_raw_fmt(ctx, bit_depth, HANTRO_AUTO_POSTPROC);
326 	}
327 	default:
328 		return -EINVAL;
329 	}
330 
331 	return 0;
332 }
333 
334 static int hantro_hevc_s_ctrl(struct v4l2_ctrl *ctrl)
335 {
336 	struct hantro_ctx *ctx;
337 
338 	ctx = container_of(ctrl->handler,
339 			   struct hantro_ctx, ctrl_handler);
340 
341 	switch (ctrl->id) {
342 	case V4L2_CID_STATELESS_HEVC_SPS: {
343 		const struct v4l2_ctrl_hevc_sps *sps = ctrl->p_new.p_hevc_sps;
344 		int bit_depth = sps->bit_depth_luma_minus8 + 8;
345 
346 		if (ctx->bit_depth == bit_depth)
347 			return 0;
348 
349 		return hantro_reset_raw_fmt(ctx, bit_depth, HANTRO_AUTO_POSTPROC);
350 	}
351 	default:
352 		return -EINVAL;
353 	}
354 
355 	return 0;
356 }
357 
358 static int hantro_av1_s_ctrl(struct v4l2_ctrl *ctrl)
359 {
360 	struct hantro_ctx *ctx;
361 
362 	ctx = container_of(ctrl->handler,
363 			   struct hantro_ctx, ctrl_handler);
364 
365 	switch (ctrl->id) {
366 	case V4L2_CID_STATELESS_AV1_SEQUENCE:
367 	{
368 		int bit_depth = ctrl->p_new.p_av1_sequence->bit_depth;
369 		bool need_postproc = HANTRO_AUTO_POSTPROC;
370 
371 		if (ctrl->p_new.p_av1_sequence->flags
372 		    & V4L2_AV1_SEQUENCE_FLAG_FILM_GRAIN_PARAMS_PRESENT)
373 			need_postproc = HANTRO_FORCE_POSTPROC;
374 
375 		if (ctx->bit_depth == bit_depth &&
376 		    ctx->need_postproc == need_postproc)
377 			return 0;
378 
379 		return hantro_reset_raw_fmt(ctx, bit_depth, need_postproc);
380 	}
381 	default:
382 		return -EINVAL;
383 	}
384 
385 	return 0;
386 }
387 
388 static const struct v4l2_ctrl_ops hantro_ctrl_ops = {
389 	.try_ctrl = hantro_try_ctrl,
390 };
391 
392 static const struct v4l2_ctrl_ops hantro_jpeg_ctrl_ops = {
393 	.s_ctrl = hantro_jpeg_s_ctrl,
394 };
395 
396 static const struct v4l2_ctrl_ops hantro_vp9_ctrl_ops = {
397 	.s_ctrl = hantro_vp9_s_ctrl,
398 };
399 
400 static const struct v4l2_ctrl_ops hantro_hevc_ctrl_ops = {
401 	.try_ctrl = hantro_try_ctrl,
402 	.s_ctrl = hantro_hevc_s_ctrl,
403 };
404 
405 static const struct v4l2_ctrl_ops hantro_av1_ctrl_ops = {
406 	.try_ctrl = hantro_try_ctrl,
407 	.s_ctrl = hantro_av1_s_ctrl,
408 };
409 
410 #define HANTRO_JPEG_ACTIVE_MARKERS	(V4L2_JPEG_ACTIVE_MARKER_APP0 | \
411 					 V4L2_JPEG_ACTIVE_MARKER_COM | \
412 					 V4L2_JPEG_ACTIVE_MARKER_DQT | \
413 					 V4L2_JPEG_ACTIVE_MARKER_DHT)
414 
415 static const struct hantro_ctrl controls[] = {
416 	{
417 		.codec = HANTRO_JPEG_ENCODER,
418 		.cfg = {
419 			.id = V4L2_CID_JPEG_COMPRESSION_QUALITY,
420 			.min = 5,
421 			.max = 100,
422 			.step = 1,
423 			.def = 50,
424 			.ops = &hantro_jpeg_ctrl_ops,
425 		},
426 	}, {
427 		.codec = HANTRO_JPEG_ENCODER,
428 		.cfg = {
429 			.id = V4L2_CID_JPEG_ACTIVE_MARKER,
430 			.max = HANTRO_JPEG_ACTIVE_MARKERS,
431 			.def = HANTRO_JPEG_ACTIVE_MARKERS,
432 			/*
433 			 * Changing the set of active markers/segments also
434 			 * messes up the alignment of the JPEG header, which
435 			 * is needed to allow the hardware to write directly
436 			 * to the output buffer. Implementing this introduces
437 			 * a lot of complexity for little gain, as the markers
438 			 * enabled is already the minimum required set.
439 			 */
440 			.flags = V4L2_CTRL_FLAG_READ_ONLY,
441 		},
442 	}, {
443 		.codec = HANTRO_MPEG2_DECODER,
444 		.cfg = {
445 			.id = V4L2_CID_STATELESS_MPEG2_SEQUENCE,
446 		},
447 	}, {
448 		.codec = HANTRO_MPEG2_DECODER,
449 		.cfg = {
450 			.id = V4L2_CID_STATELESS_MPEG2_PICTURE,
451 		},
452 	}, {
453 		.codec = HANTRO_MPEG2_DECODER,
454 		.cfg = {
455 			.id = V4L2_CID_STATELESS_MPEG2_QUANTISATION,
456 		},
457 	}, {
458 		.codec = HANTRO_VP8_DECODER,
459 		.cfg = {
460 			.id = V4L2_CID_STATELESS_VP8_FRAME,
461 		},
462 	}, {
463 		.codec = HANTRO_H264_DECODER,
464 		.cfg = {
465 			.id = V4L2_CID_STATELESS_H264_DECODE_PARAMS,
466 		},
467 	}, {
468 		.codec = HANTRO_H264_DECODER,
469 		.cfg = {
470 			.id = V4L2_CID_STATELESS_H264_SPS,
471 			.ops = &hantro_ctrl_ops,
472 		},
473 	}, {
474 		.codec = HANTRO_H264_DECODER,
475 		.cfg = {
476 			.id = V4L2_CID_STATELESS_H264_PPS,
477 		},
478 	}, {
479 		.codec = HANTRO_H264_DECODER,
480 		.cfg = {
481 			.id = V4L2_CID_STATELESS_H264_SCALING_MATRIX,
482 		},
483 	}, {
484 		.codec = HANTRO_H264_DECODER,
485 		.cfg = {
486 			.id = V4L2_CID_STATELESS_H264_DECODE_MODE,
487 			.min = V4L2_STATELESS_H264_DECODE_MODE_FRAME_BASED,
488 			.def = V4L2_STATELESS_H264_DECODE_MODE_FRAME_BASED,
489 			.max = V4L2_STATELESS_H264_DECODE_MODE_FRAME_BASED,
490 		},
491 	}, {
492 		.codec = HANTRO_H264_DECODER,
493 		.cfg = {
494 			.id = V4L2_CID_STATELESS_H264_START_CODE,
495 			.min = V4L2_STATELESS_H264_START_CODE_ANNEX_B,
496 			.def = V4L2_STATELESS_H264_START_CODE_ANNEX_B,
497 			.max = V4L2_STATELESS_H264_START_CODE_ANNEX_B,
498 		},
499 	}, {
500 		.codec = HANTRO_H264_DECODER,
501 		.cfg = {
502 			.id = V4L2_CID_MPEG_VIDEO_H264_PROFILE,
503 			.min = V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE,
504 			.max = V4L2_MPEG_VIDEO_H264_PROFILE_HIGH,
505 			.menu_skip_mask =
506 			BIT(V4L2_MPEG_VIDEO_H264_PROFILE_EXTENDED),
507 			.def = V4L2_MPEG_VIDEO_H264_PROFILE_MAIN,
508 		}
509 	}, {
510 		.codec = HANTRO_HEVC_DECODER,
511 		.cfg = {
512 			.id = V4L2_CID_STATELESS_HEVC_DECODE_MODE,
513 			.min = V4L2_STATELESS_HEVC_DECODE_MODE_FRAME_BASED,
514 			.max = V4L2_STATELESS_HEVC_DECODE_MODE_FRAME_BASED,
515 			.def = V4L2_STATELESS_HEVC_DECODE_MODE_FRAME_BASED,
516 		},
517 	}, {
518 		.codec = HANTRO_HEVC_DECODER,
519 		.cfg = {
520 			.id = V4L2_CID_STATELESS_HEVC_START_CODE,
521 			.min = V4L2_STATELESS_HEVC_START_CODE_ANNEX_B,
522 			.max = V4L2_STATELESS_HEVC_START_CODE_ANNEX_B,
523 			.def = V4L2_STATELESS_HEVC_START_CODE_ANNEX_B,
524 		},
525 	}, {
526 		.codec = HANTRO_HEVC_DECODER,
527 		.cfg = {
528 			.id = V4L2_CID_MPEG_VIDEO_HEVC_PROFILE,
529 			.min = V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN,
530 			.max = V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_10,
531 			.def = V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN,
532 		},
533 	}, {
534 		.codec = HANTRO_HEVC_DECODER,
535 		.cfg = {
536 			.id = V4L2_CID_MPEG_VIDEO_HEVC_LEVEL,
537 			.min = V4L2_MPEG_VIDEO_HEVC_LEVEL_1,
538 			.max = V4L2_MPEG_VIDEO_HEVC_LEVEL_5_1,
539 		},
540 	}, {
541 		.codec = HANTRO_HEVC_DECODER,
542 		.cfg = {
543 			.id = V4L2_CID_STATELESS_HEVC_SPS,
544 			.ops = &hantro_hevc_ctrl_ops,
545 		},
546 	}, {
547 		.codec = HANTRO_HEVC_DECODER,
548 		.cfg = {
549 			.id = V4L2_CID_STATELESS_HEVC_PPS,
550 		},
551 	}, {
552 		.codec = HANTRO_HEVC_DECODER,
553 		.cfg = {
554 			.id = V4L2_CID_STATELESS_HEVC_DECODE_PARAMS,
555 		},
556 	}, {
557 		.codec = HANTRO_HEVC_DECODER,
558 		.cfg = {
559 			.id = V4L2_CID_STATELESS_HEVC_SCALING_MATRIX,
560 		},
561 	}, {
562 		.codec = HANTRO_VP9_DECODER,
563 		.cfg = {
564 			.id = V4L2_CID_STATELESS_VP9_FRAME,
565 			.ops = &hantro_vp9_ctrl_ops,
566 		},
567 	}, {
568 		.codec = HANTRO_VP9_DECODER,
569 		.cfg = {
570 			.id = V4L2_CID_STATELESS_VP9_COMPRESSED_HDR,
571 		},
572 	}, {
573 		.codec = HANTRO_AV1_DECODER,
574 		.cfg = {
575 			.id = V4L2_CID_STATELESS_AV1_FRAME,
576 		},
577 	}, {
578 		.codec = HANTRO_AV1_DECODER,
579 		.cfg = {
580 			.id = V4L2_CID_STATELESS_AV1_TILE_GROUP_ENTRY,
581 			.dims = { V4L2_AV1_MAX_TILE_COUNT },
582 		},
583 	}, {
584 		.codec = HANTRO_AV1_DECODER,
585 		.cfg = {
586 			.id = V4L2_CID_STATELESS_AV1_SEQUENCE,
587 			.ops = &hantro_av1_ctrl_ops,
588 		},
589 	}, {
590 		.codec = HANTRO_AV1_DECODER,
591 		.cfg = {
592 			.id = V4L2_CID_STATELESS_AV1_FILM_GRAIN,
593 		},
594 	},
595 };
596 
597 static int hantro_ctrls_setup(struct hantro_dev *vpu,
598 			      struct hantro_ctx *ctx,
599 			      int allowed_codecs)
600 {
601 	int i, num_ctrls = ARRAY_SIZE(controls);
602 
603 	v4l2_ctrl_handler_init(&ctx->ctrl_handler, num_ctrls);
604 
605 	for (i = 0; i < num_ctrls; i++) {
606 		if (!(allowed_codecs & controls[i].codec))
607 			continue;
608 
609 		v4l2_ctrl_new_custom(&ctx->ctrl_handler,
610 				     &controls[i].cfg, NULL);
611 		if (ctx->ctrl_handler.error) {
612 			vpu_err("Adding control (%d) failed %d\n",
613 				controls[i].cfg.id,
614 				ctx->ctrl_handler.error);
615 			v4l2_ctrl_handler_free(&ctx->ctrl_handler);
616 			return ctx->ctrl_handler.error;
617 		}
618 	}
619 	return v4l2_ctrl_handler_setup(&ctx->ctrl_handler);
620 }
621 
622 /*
623  * V4L2 file operations.
624  */
625 
626 static int hantro_open(struct file *filp)
627 {
628 	struct hantro_dev *vpu = video_drvdata(filp);
629 	struct video_device *vdev = video_devdata(filp);
630 	struct hantro_func *func = hantro_vdev_to_func(vdev);
631 	struct hantro_ctx *ctx;
632 	int allowed_codecs, ret;
633 
634 	/*
635 	 * We do not need any extra locking here, because we operate only
636 	 * on local data here, except reading few fields from dev, which
637 	 * do not change through device's lifetime (which is guaranteed by
638 	 * reference on module from open()) and V4L2 internal objects (such
639 	 * as vdev and ctx->fh), which have proper locking done in respective
640 	 * helper functions used here.
641 	 */
642 
643 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
644 	if (!ctx)
645 		return -ENOMEM;
646 
647 	ctx->dev = vpu;
648 	if (func->id == MEDIA_ENT_F_PROC_VIDEO_ENCODER) {
649 		allowed_codecs = vpu->variant->codec & HANTRO_ENCODERS;
650 		ctx->is_encoder = true;
651 	} else if (func->id == MEDIA_ENT_F_PROC_VIDEO_DECODER) {
652 		allowed_codecs = vpu->variant->codec & HANTRO_DECODERS;
653 		ctx->is_encoder = false;
654 	} else {
655 		ret = -ENODEV;
656 		goto err_ctx_free;
657 	}
658 
659 	ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(vpu->m2m_dev, ctx, queue_init);
660 	if (IS_ERR(ctx->fh.m2m_ctx)) {
661 		ret = PTR_ERR(ctx->fh.m2m_ctx);
662 		goto err_ctx_free;
663 	}
664 
665 	v4l2_fh_init(&ctx->fh, vdev);
666 	filp->private_data = &ctx->fh;
667 	v4l2_fh_add(&ctx->fh);
668 
669 	hantro_reset_fmts(ctx);
670 
671 	ret = hantro_ctrls_setup(vpu, ctx, allowed_codecs);
672 	if (ret) {
673 		vpu_err("Failed to set up controls\n");
674 		goto err_fh_free;
675 	}
676 	ctx->fh.ctrl_handler = &ctx->ctrl_handler;
677 
678 	return 0;
679 
680 err_fh_free:
681 	v4l2_fh_del(&ctx->fh);
682 	v4l2_fh_exit(&ctx->fh);
683 err_ctx_free:
684 	kfree(ctx);
685 	return ret;
686 }
687 
688 static int hantro_release(struct file *filp)
689 {
690 	struct hantro_ctx *ctx =
691 		container_of(filp->private_data, struct hantro_ctx, fh);
692 
693 	/*
694 	 * No need for extra locking because this was the last reference
695 	 * to this file.
696 	 */
697 	v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
698 	v4l2_fh_del(&ctx->fh);
699 	v4l2_fh_exit(&ctx->fh);
700 	v4l2_ctrl_handler_free(&ctx->ctrl_handler);
701 	kfree(ctx);
702 
703 	return 0;
704 }
705 
706 static const struct v4l2_file_operations hantro_fops = {
707 	.owner = THIS_MODULE,
708 	.open = hantro_open,
709 	.release = hantro_release,
710 	.poll = v4l2_m2m_fop_poll,
711 	.unlocked_ioctl = video_ioctl2,
712 	.mmap = v4l2_m2m_fop_mmap,
713 };
714 
715 static const struct of_device_id of_hantro_match[] = {
716 #ifdef CONFIG_VIDEO_HANTRO_ROCKCHIP
717 	{ .compatible = "rockchip,px30-vpu",   .data = &px30_vpu_variant, },
718 	{ .compatible = "rockchip,rk3036-vpu", .data = &rk3036_vpu_variant, },
719 	{ .compatible = "rockchip,rk3066-vpu", .data = &rk3066_vpu_variant, },
720 	{ .compatible = "rockchip,rk3288-vpu", .data = &rk3288_vpu_variant, },
721 	{ .compatible = "rockchip,rk3328-vpu", .data = &rk3328_vpu_variant, },
722 	{ .compatible = "rockchip,rk3399-vpu", .data = &rk3399_vpu_variant, },
723 	{ .compatible = "rockchip,rk3568-vepu", .data = &rk3568_vepu_variant, },
724 	{ .compatible = "rockchip,rk3568-vpu", .data = &rk3568_vpu_variant, },
725 	{ .compatible = "rockchip,rk3588-av1-vpu", .data = &rk3588_vpu981_variant, },
726 #endif
727 #ifdef CONFIG_VIDEO_HANTRO_IMX8M
728 	{ .compatible = "nxp,imx8mm-vpu-g1", .data = &imx8mm_vpu_g1_variant, },
729 	{ .compatible = "nxp,imx8mq-vpu", .data = &imx8mq_vpu_variant, },
730 	{ .compatible = "nxp,imx8mq-vpu-g1", .data = &imx8mq_vpu_g1_variant },
731 	{ .compatible = "nxp,imx8mq-vpu-g2", .data = &imx8mq_vpu_g2_variant },
732 #endif
733 #ifdef CONFIG_VIDEO_HANTRO_SAMA5D4
734 	{ .compatible = "microchip,sama5d4-vdec", .data = &sama5d4_vdec_variant, },
735 #endif
736 #ifdef CONFIG_VIDEO_HANTRO_SUNXI
737 	{ .compatible = "allwinner,sun50i-h6-vpu-g2", .data = &sunxi_vpu_variant, },
738 #endif
739 #ifdef CONFIG_VIDEO_HANTRO_STM32MP25
740 	{ .compatible = "st,stm32mp25-vdec", .data = &stm32mp25_vdec_variant, },
741 	{ .compatible = "st,stm32mp25-venc", .data = &stm32mp25_venc_variant, },
742 #endif
743 	{ /* sentinel */ }
744 };
745 MODULE_DEVICE_TABLE(of, of_hantro_match);
746 
747 static int hantro_register_entity(struct media_device *mdev,
748 				  struct media_entity *entity,
749 				  const char *entity_name,
750 				  struct media_pad *pads, int num_pads,
751 				  int function, struct video_device *vdev)
752 {
753 	char *name;
754 	int ret;
755 
756 	entity->obj_type = MEDIA_ENTITY_TYPE_BASE;
757 	if (function == MEDIA_ENT_F_IO_V4L) {
758 		entity->info.dev.major = VIDEO_MAJOR;
759 		entity->info.dev.minor = vdev->minor;
760 	}
761 
762 	name = devm_kasprintf(mdev->dev, GFP_KERNEL, "%s-%s", vdev->name,
763 			      entity_name);
764 	if (!name)
765 		return -ENOMEM;
766 
767 	entity->name = name;
768 	entity->function = function;
769 
770 	ret = media_entity_pads_init(entity, num_pads, pads);
771 	if (ret)
772 		return ret;
773 
774 	ret = media_device_register_entity(mdev, entity);
775 	if (ret)
776 		return ret;
777 
778 	return 0;
779 }
780 
781 static int hantro_attach_func(struct hantro_dev *vpu,
782 			      struct hantro_func *func)
783 {
784 	struct media_device *mdev = &vpu->mdev;
785 	struct media_link *link;
786 	int ret;
787 
788 	/* Create the three encoder entities with their pads */
789 	func->source_pad.flags = MEDIA_PAD_FL_SOURCE;
790 	ret = hantro_register_entity(mdev, &func->vdev.entity, "source",
791 				     &func->source_pad, 1, MEDIA_ENT_F_IO_V4L,
792 				     &func->vdev);
793 	if (ret)
794 		return ret;
795 
796 	func->proc_pads[0].flags = MEDIA_PAD_FL_SINK;
797 	func->proc_pads[1].flags = MEDIA_PAD_FL_SOURCE;
798 	ret = hantro_register_entity(mdev, &func->proc, "proc",
799 				     func->proc_pads, 2, func->id,
800 				     &func->vdev);
801 	if (ret)
802 		goto err_rel_entity0;
803 
804 	func->sink_pad.flags = MEDIA_PAD_FL_SINK;
805 	ret = hantro_register_entity(mdev, &func->sink, "sink",
806 				     &func->sink_pad, 1, MEDIA_ENT_F_IO_V4L,
807 				     &func->vdev);
808 	if (ret)
809 		goto err_rel_entity1;
810 
811 	/* Connect the three entities */
812 	ret = media_create_pad_link(&func->vdev.entity, 0, &func->proc, 0,
813 				    MEDIA_LNK_FL_IMMUTABLE |
814 				    MEDIA_LNK_FL_ENABLED);
815 	if (ret)
816 		goto err_rel_entity2;
817 
818 	ret = media_create_pad_link(&func->proc, 1, &func->sink, 0,
819 				    MEDIA_LNK_FL_IMMUTABLE |
820 				    MEDIA_LNK_FL_ENABLED);
821 	if (ret)
822 		goto err_rm_links0;
823 
824 	/* Create video interface */
825 	func->intf_devnode = media_devnode_create(mdev, MEDIA_INTF_T_V4L_VIDEO,
826 						  0, VIDEO_MAJOR,
827 						  func->vdev.minor);
828 	if (!func->intf_devnode) {
829 		ret = -ENOMEM;
830 		goto err_rm_links1;
831 	}
832 
833 	/* Connect the two DMA engines to the interface */
834 	link = media_create_intf_link(&func->vdev.entity,
835 				      &func->intf_devnode->intf,
836 				      MEDIA_LNK_FL_IMMUTABLE |
837 				      MEDIA_LNK_FL_ENABLED);
838 	if (!link) {
839 		ret = -ENOMEM;
840 		goto err_rm_devnode;
841 	}
842 
843 	link = media_create_intf_link(&func->sink, &func->intf_devnode->intf,
844 				      MEDIA_LNK_FL_IMMUTABLE |
845 				      MEDIA_LNK_FL_ENABLED);
846 	if (!link) {
847 		ret = -ENOMEM;
848 		goto err_rm_devnode;
849 	}
850 	return 0;
851 
852 err_rm_devnode:
853 	media_devnode_remove(func->intf_devnode);
854 
855 err_rm_links1:
856 	media_entity_remove_links(&func->sink);
857 
858 err_rm_links0:
859 	media_entity_remove_links(&func->proc);
860 	media_entity_remove_links(&func->vdev.entity);
861 
862 err_rel_entity2:
863 	media_device_unregister_entity(&func->sink);
864 
865 err_rel_entity1:
866 	media_device_unregister_entity(&func->proc);
867 
868 err_rel_entity0:
869 	media_device_unregister_entity(&func->vdev.entity);
870 	return ret;
871 }
872 
873 static void hantro_detach_func(struct hantro_func *func)
874 {
875 	media_devnode_remove(func->intf_devnode);
876 	media_entity_remove_links(&func->sink);
877 	media_entity_remove_links(&func->proc);
878 	media_entity_remove_links(&func->vdev.entity);
879 	media_device_unregister_entity(&func->sink);
880 	media_device_unregister_entity(&func->proc);
881 	media_device_unregister_entity(&func->vdev.entity);
882 }
883 
884 static int hantro_add_func(struct hantro_dev *vpu, unsigned int funcid)
885 {
886 	const struct of_device_id *match;
887 	struct hantro_func *func;
888 	struct video_device *vfd;
889 	int ret;
890 
891 	match = of_match_node(of_hantro_match, vpu->dev->of_node);
892 	func = devm_kzalloc(vpu->dev, sizeof(*func), GFP_KERNEL);
893 	if (!func) {
894 		v4l2_err(&vpu->v4l2_dev, "Failed to allocate video device\n");
895 		return -ENOMEM;
896 	}
897 
898 	func->id = funcid;
899 
900 	vfd = &func->vdev;
901 	vfd->fops = &hantro_fops;
902 	vfd->release = video_device_release_empty;
903 	vfd->lock = &vpu->vpu_mutex;
904 	vfd->v4l2_dev = &vpu->v4l2_dev;
905 	vfd->vfl_dir = VFL_DIR_M2M;
906 	vfd->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_M2M_MPLANE;
907 	vfd->ioctl_ops = &hantro_ioctl_ops;
908 	strscpy(vfd->name, match->compatible, sizeof(vfd->name));
909 	strlcat(vfd->name, funcid == MEDIA_ENT_F_PROC_VIDEO_ENCODER ?
910 		"-enc" : "-dec", sizeof(vfd->name));
911 
912 	if (funcid == MEDIA_ENT_F_PROC_VIDEO_ENCODER) {
913 		vpu->encoder = func;
914 		v4l2_disable_ioctl(vfd, VIDIOC_TRY_DECODER_CMD);
915 		v4l2_disable_ioctl(vfd, VIDIOC_DECODER_CMD);
916 	} else {
917 		vpu->decoder = func;
918 		v4l2_disable_ioctl(vfd, VIDIOC_TRY_ENCODER_CMD);
919 		v4l2_disable_ioctl(vfd, VIDIOC_ENCODER_CMD);
920 	}
921 
922 	video_set_drvdata(vfd, vpu);
923 
924 	ret = video_register_device(vfd, VFL_TYPE_VIDEO, -1);
925 	if (ret) {
926 		v4l2_err(&vpu->v4l2_dev, "Failed to register video device\n");
927 		return ret;
928 	}
929 
930 	ret = hantro_attach_func(vpu, func);
931 	if (ret) {
932 		v4l2_err(&vpu->v4l2_dev,
933 			 "Failed to attach functionality to the media device\n");
934 		goto err_unreg_dev;
935 	}
936 
937 	v4l2_info(&vpu->v4l2_dev, "registered %s as /dev/video%d\n", vfd->name,
938 		  vfd->num);
939 
940 	return 0;
941 
942 err_unreg_dev:
943 	video_unregister_device(vfd);
944 	return ret;
945 }
946 
947 static int hantro_add_enc_func(struct hantro_dev *vpu)
948 {
949 	if (!vpu->variant->enc_fmts)
950 		return 0;
951 
952 	return hantro_add_func(vpu, MEDIA_ENT_F_PROC_VIDEO_ENCODER);
953 }
954 
955 static int hantro_add_dec_func(struct hantro_dev *vpu)
956 {
957 	if (!vpu->variant->dec_fmts)
958 		return 0;
959 
960 	return hantro_add_func(vpu, MEDIA_ENT_F_PROC_VIDEO_DECODER);
961 }
962 
963 static void hantro_remove_func(struct hantro_dev *vpu,
964 			       unsigned int funcid)
965 {
966 	struct hantro_func *func;
967 
968 	if (funcid == MEDIA_ENT_F_PROC_VIDEO_ENCODER)
969 		func = vpu->encoder;
970 	else
971 		func = vpu->decoder;
972 
973 	if (!func)
974 		return;
975 
976 	hantro_detach_func(func);
977 	video_unregister_device(&func->vdev);
978 }
979 
980 static void hantro_remove_enc_func(struct hantro_dev *vpu)
981 {
982 	hantro_remove_func(vpu, MEDIA_ENT_F_PROC_VIDEO_ENCODER);
983 }
984 
985 static void hantro_remove_dec_func(struct hantro_dev *vpu)
986 {
987 	hantro_remove_func(vpu, MEDIA_ENT_F_PROC_VIDEO_DECODER);
988 }
989 
990 static const struct media_device_ops hantro_m2m_media_ops = {
991 	.req_validate = vb2_request_validate,
992 	.req_queue = v4l2_m2m_request_queue,
993 };
994 
995 static int hantro_probe(struct platform_device *pdev)
996 {
997 	const struct of_device_id *match;
998 	struct hantro_dev *vpu;
999 	int num_bases;
1000 	int i, ret;
1001 
1002 	vpu = devm_kzalloc(&pdev->dev, sizeof(*vpu), GFP_KERNEL);
1003 	if (!vpu)
1004 		return -ENOMEM;
1005 
1006 	vpu->dev = &pdev->dev;
1007 	vpu->pdev = pdev;
1008 	mutex_init(&vpu->vpu_mutex);
1009 	spin_lock_init(&vpu->irqlock);
1010 
1011 	match = of_match_node(of_hantro_match, pdev->dev.of_node);
1012 	vpu->variant = match->data;
1013 
1014 	/*
1015 	 * Support for nxp,imx8mq-vpu is kept for backwards compatibility
1016 	 * but it's deprecated. Please update your DTS file to use
1017 	 * nxp,imx8mq-vpu-g1 or nxp,imx8mq-vpu-g2 instead.
1018 	 */
1019 	if (of_device_is_compatible(pdev->dev.of_node, "nxp,imx8mq-vpu"))
1020 		dev_warn(&pdev->dev, "%s compatible is deprecated\n",
1021 			 match->compatible);
1022 
1023 	INIT_DELAYED_WORK(&vpu->watchdog_work, hantro_watchdog);
1024 
1025 	vpu->clocks = devm_kcalloc(&pdev->dev, vpu->variant->num_clocks,
1026 				   sizeof(*vpu->clocks), GFP_KERNEL);
1027 	if (!vpu->clocks)
1028 		return -ENOMEM;
1029 
1030 	if (vpu->variant->num_clocks > 1) {
1031 		for (i = 0; i < vpu->variant->num_clocks; i++)
1032 			vpu->clocks[i].id = vpu->variant->clk_names[i];
1033 
1034 		ret = devm_clk_bulk_get(&pdev->dev, vpu->variant->num_clocks,
1035 					vpu->clocks);
1036 		if (ret)
1037 			return ret;
1038 	} else {
1039 		/*
1040 		 * If the driver has a single clk, chances are there will be no
1041 		 * actual name in the DT bindings.
1042 		 */
1043 		vpu->clocks[0].clk = devm_clk_get(&pdev->dev, NULL);
1044 		if (IS_ERR(vpu->clocks[0].clk))
1045 			return PTR_ERR(vpu->clocks[0].clk);
1046 	}
1047 
1048 	vpu->resets = devm_reset_control_array_get_optional_exclusive(&pdev->dev);
1049 	if (IS_ERR(vpu->resets))
1050 		return PTR_ERR(vpu->resets);
1051 
1052 	num_bases = vpu->variant->num_regs ?: 1;
1053 	vpu->reg_bases = devm_kcalloc(&pdev->dev, num_bases,
1054 				      sizeof(*vpu->reg_bases), GFP_KERNEL);
1055 	if (!vpu->reg_bases)
1056 		return -ENOMEM;
1057 
1058 	for (i = 0; i < num_bases; i++) {
1059 		vpu->reg_bases[i] = vpu->variant->reg_names ?
1060 		      devm_platform_ioremap_resource_byname(pdev, vpu->variant->reg_names[i]) :
1061 		      devm_platform_ioremap_resource(pdev, 0);
1062 		if (IS_ERR(vpu->reg_bases[i]))
1063 			return PTR_ERR(vpu->reg_bases[i]);
1064 	}
1065 	vpu->enc_base = vpu->reg_bases[0] + vpu->variant->enc_offset;
1066 	vpu->dec_base = vpu->reg_bases[0] + vpu->variant->dec_offset;
1067 
1068 	/**
1069 	 * TODO: Eventually allow taking advantage of full 64-bit address space.
1070 	 * Until then we assume the MSB portion of buffers' base addresses is
1071 	 * always 0 due to this masking operation.
1072 	 */
1073 	ret = dma_set_coherent_mask(vpu->dev, DMA_BIT_MASK(32));
1074 	if (ret) {
1075 		dev_err(vpu->dev, "Could not set DMA coherent mask.\n");
1076 		return ret;
1077 	}
1078 	vb2_dma_contig_set_max_seg_size(&pdev->dev, DMA_BIT_MASK(32));
1079 
1080 	for (i = 0; i < vpu->variant->num_irqs; i++) {
1081 		const char *irq_name;
1082 		int irq;
1083 
1084 		if (!vpu->variant->irqs[i].handler)
1085 			continue;
1086 
1087 		if (vpu->variant->num_irqs > 1) {
1088 			irq_name = vpu->variant->irqs[i].name;
1089 			irq = platform_get_irq_byname(vpu->pdev, irq_name);
1090 		} else {
1091 			/*
1092 			 * If the driver has a single IRQ, chances are there
1093 			 * will be no actual name in the DT bindings.
1094 			 */
1095 			irq_name = "default";
1096 			irq = platform_get_irq(vpu->pdev, 0);
1097 		}
1098 		if (irq < 0)
1099 			return irq;
1100 
1101 		ret = devm_request_irq(vpu->dev, irq,
1102 				       vpu->variant->irqs[i].handler, 0,
1103 				       dev_name(vpu->dev), vpu);
1104 		if (ret) {
1105 			dev_err(vpu->dev, "Could not request %s IRQ.\n",
1106 				irq_name);
1107 			return ret;
1108 		}
1109 	}
1110 
1111 	if (vpu->variant->init) {
1112 		ret = vpu->variant->init(vpu);
1113 		if (ret) {
1114 			dev_err(&pdev->dev, "Failed to init VPU hardware\n");
1115 			return ret;
1116 		}
1117 	}
1118 
1119 	pm_runtime_set_autosuspend_delay(vpu->dev, 100);
1120 	pm_runtime_use_autosuspend(vpu->dev);
1121 	pm_runtime_enable(vpu->dev);
1122 
1123 	ret = reset_control_deassert(vpu->resets);
1124 	if (ret) {
1125 		dev_err(&pdev->dev, "Failed to deassert resets\n");
1126 		goto err_pm_disable;
1127 	}
1128 
1129 	ret = clk_bulk_prepare(vpu->variant->num_clocks, vpu->clocks);
1130 	if (ret) {
1131 		dev_err(&pdev->dev, "Failed to prepare clocks\n");
1132 		goto err_rst_assert;
1133 	}
1134 
1135 	ret = v4l2_device_register(&pdev->dev, &vpu->v4l2_dev);
1136 	if (ret) {
1137 		dev_err(&pdev->dev, "Failed to register v4l2 device\n");
1138 		goto err_clk_unprepare;
1139 	}
1140 	platform_set_drvdata(pdev, vpu);
1141 
1142 	vpu->m2m_dev = v4l2_m2m_init(&vpu_m2m_ops);
1143 	if (IS_ERR(vpu->m2m_dev)) {
1144 		v4l2_err(&vpu->v4l2_dev, "Failed to init mem2mem device\n");
1145 		ret = PTR_ERR(vpu->m2m_dev);
1146 		goto err_v4l2_unreg;
1147 	}
1148 
1149 	vpu->mdev.dev = vpu->dev;
1150 	strscpy(vpu->mdev.model, DRIVER_NAME, sizeof(vpu->mdev.model));
1151 	media_device_init(&vpu->mdev);
1152 	vpu->mdev.ops = &hantro_m2m_media_ops;
1153 	vpu->v4l2_dev.mdev = &vpu->mdev;
1154 
1155 	ret = hantro_add_enc_func(vpu);
1156 	if (ret) {
1157 		dev_err(&pdev->dev, "Failed to register encoder\n");
1158 		goto err_m2m_rel;
1159 	}
1160 
1161 	ret = hantro_add_dec_func(vpu);
1162 	if (ret) {
1163 		dev_err(&pdev->dev, "Failed to register decoder\n");
1164 		goto err_rm_enc_func;
1165 	}
1166 
1167 	ret = media_device_register(&vpu->mdev);
1168 	if (ret) {
1169 		v4l2_err(&vpu->v4l2_dev, "Failed to register mem2mem media device\n");
1170 		goto err_rm_dec_func;
1171 	}
1172 
1173 	return 0;
1174 
1175 err_rm_dec_func:
1176 	hantro_remove_dec_func(vpu);
1177 err_rm_enc_func:
1178 	hantro_remove_enc_func(vpu);
1179 err_m2m_rel:
1180 	media_device_cleanup(&vpu->mdev);
1181 	v4l2_m2m_release(vpu->m2m_dev);
1182 err_v4l2_unreg:
1183 	v4l2_device_unregister(&vpu->v4l2_dev);
1184 err_clk_unprepare:
1185 	clk_bulk_unprepare(vpu->variant->num_clocks, vpu->clocks);
1186 err_rst_assert:
1187 	reset_control_assert(vpu->resets);
1188 err_pm_disable:
1189 	pm_runtime_dont_use_autosuspend(vpu->dev);
1190 	pm_runtime_disable(vpu->dev);
1191 	return ret;
1192 }
1193 
1194 static void hantro_remove(struct platform_device *pdev)
1195 {
1196 	struct hantro_dev *vpu = platform_get_drvdata(pdev);
1197 
1198 	v4l2_info(&vpu->v4l2_dev, "Removing %s\n", pdev->name);
1199 
1200 	media_device_unregister(&vpu->mdev);
1201 	hantro_remove_dec_func(vpu);
1202 	hantro_remove_enc_func(vpu);
1203 	media_device_cleanup(&vpu->mdev);
1204 	v4l2_m2m_release(vpu->m2m_dev);
1205 	v4l2_device_unregister(&vpu->v4l2_dev);
1206 	clk_bulk_unprepare(vpu->variant->num_clocks, vpu->clocks);
1207 	reset_control_assert(vpu->resets);
1208 	pm_runtime_dont_use_autosuspend(vpu->dev);
1209 	pm_runtime_disable(vpu->dev);
1210 }
1211 
1212 #ifdef CONFIG_PM
1213 static int hantro_runtime_resume(struct device *dev)
1214 {
1215 	struct hantro_dev *vpu = dev_get_drvdata(dev);
1216 
1217 	if (vpu->variant->runtime_resume)
1218 		return vpu->variant->runtime_resume(vpu);
1219 
1220 	return 0;
1221 }
1222 #endif
1223 
1224 static const struct dev_pm_ops hantro_pm_ops = {
1225 	SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
1226 				pm_runtime_force_resume)
1227 	SET_RUNTIME_PM_OPS(NULL, hantro_runtime_resume, NULL)
1228 };
1229 
1230 static struct platform_driver hantro_driver = {
1231 	.probe = hantro_probe,
1232 	.remove_new = hantro_remove,
1233 	.driver = {
1234 		   .name = DRIVER_NAME,
1235 		   .of_match_table = of_hantro_match,
1236 		   .pm = &hantro_pm_ops,
1237 	},
1238 };
1239 module_platform_driver(hantro_driver);
1240 
1241 MODULE_LICENSE("GPL v2");
1242 MODULE_AUTHOR("Alpha Lin <Alpha.Lin@Rock-Chips.com>");
1243 MODULE_AUTHOR("Tomasz Figa <tfiga@chromium.org>");
1244 MODULE_AUTHOR("Ezequiel Garcia <ezequiel@collabora.com>");
1245 MODULE_DESCRIPTION("Hantro VPU codec driver");
1246