1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Hantro VPU codec driver
4 *
5 * Copyright (C) 2018 Collabora, Ltd.
6 * Copyright 2018 Google LLC.
7 * Tomasz Figa <tfiga@chromium.org>
8 *
9 * Based on s5p-mfc driver by Samsung Electronics Co., Ltd.
10 * Copyright (C) 2011 Samsung Electronics Co., Ltd.
11 */
12
13 #include <linux/clk.h>
14 #include <linux/module.h>
15 #include <linux/of.h>
16 #include <linux/platform_device.h>
17 #include <linux/pm.h>
18 #include <linux/pm_runtime.h>
19 #include <linux/slab.h>
20 #include <linux/videodev2.h>
21 #include <linux/workqueue.h>
22 #include <media/v4l2-event.h>
23 #include <media/v4l2-mem2mem.h>
24 #include <media/videobuf2-core.h>
25 #include <media/videobuf2-vmalloc.h>
26
27 #include "hantro_v4l2.h"
28 #include "hantro.h"
29 #include "hantro_hw.h"
30
31 #define DRIVER_NAME "hantro-vpu"
32
33 int hantro_debug;
34 module_param_named(debug, hantro_debug, int, 0644);
35 MODULE_PARM_DESC(debug,
36 "Debug level - higher value produces more verbose messages");
37
hantro_get_ctrl(struct hantro_ctx * ctx,u32 id)38 void *hantro_get_ctrl(struct hantro_ctx *ctx, u32 id)
39 {
40 struct v4l2_ctrl *ctrl;
41
42 ctrl = v4l2_ctrl_find(&ctx->ctrl_handler, id);
43 return ctrl ? ctrl->p_cur.p : NULL;
44 }
45
hantro_get_ref(struct hantro_ctx * ctx,u64 ts)46 dma_addr_t hantro_get_ref(struct hantro_ctx *ctx, u64 ts)
47 {
48 struct vb2_queue *q = v4l2_m2m_get_dst_vq(ctx->fh.m2m_ctx);
49 struct vb2_buffer *buf;
50
51 buf = vb2_find_buffer(q, ts);
52 if (!buf)
53 return 0;
54 return hantro_get_dec_buf_addr(ctx, buf);
55 }
56
57 static const struct v4l2_event hantro_eos_event = {
58 .type = V4L2_EVENT_EOS
59 };
60
hantro_job_finish_no_pm(struct hantro_dev * vpu,struct hantro_ctx * ctx,enum vb2_buffer_state result)61 static void hantro_job_finish_no_pm(struct hantro_dev *vpu,
62 struct hantro_ctx *ctx,
63 enum vb2_buffer_state result)
64 {
65 struct vb2_v4l2_buffer *src, *dst;
66
67 src = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
68 dst = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
69
70 if (WARN_ON(!src))
71 return;
72 if (WARN_ON(!dst))
73 return;
74
75 src->sequence = ctx->sequence_out++;
76 dst->sequence = ctx->sequence_cap++;
77
78 if (v4l2_m2m_is_last_draining_src_buf(ctx->fh.m2m_ctx, src)) {
79 dst->flags |= V4L2_BUF_FLAG_LAST;
80 v4l2_event_queue_fh(&ctx->fh, &hantro_eos_event);
81 v4l2_m2m_mark_stopped(ctx->fh.m2m_ctx);
82 }
83
84 v4l2_m2m_buf_done_and_job_finish(ctx->dev->m2m_dev, ctx->fh.m2m_ctx,
85 result);
86 }
87
hantro_job_finish(struct hantro_dev * vpu,struct hantro_ctx * ctx,enum vb2_buffer_state result)88 static void hantro_job_finish(struct hantro_dev *vpu,
89 struct hantro_ctx *ctx,
90 enum vb2_buffer_state result)
91 {
92 pm_runtime_put_autosuspend(vpu->dev);
93
94 clk_bulk_disable(vpu->variant->num_clocks, vpu->clocks);
95
96 hantro_job_finish_no_pm(vpu, ctx, result);
97 }
98
hantro_irq_done(struct hantro_dev * vpu,enum vb2_buffer_state result)99 void hantro_irq_done(struct hantro_dev *vpu,
100 enum vb2_buffer_state result)
101 {
102 struct hantro_ctx *ctx =
103 v4l2_m2m_get_curr_priv(vpu->m2m_dev);
104
105 /*
106 * If cancel_delayed_work returns false
107 * the timeout expired. The watchdog is running,
108 * and will take care of finishing the job.
109 */
110 if (cancel_delayed_work(&vpu->watchdog_work)) {
111 if (result == VB2_BUF_STATE_DONE && ctx->codec_ops->done)
112 ctx->codec_ops->done(ctx);
113 hantro_job_finish(vpu, ctx, result);
114 }
115 }
116
hantro_watchdog(struct work_struct * work)117 void hantro_watchdog(struct work_struct *work)
118 {
119 struct hantro_dev *vpu;
120 struct hantro_ctx *ctx;
121
122 vpu = container_of(to_delayed_work(work),
123 struct hantro_dev, watchdog_work);
124 ctx = v4l2_m2m_get_curr_priv(vpu->m2m_dev);
125 if (ctx) {
126 vpu_err("frame processing timed out!\n");
127 if (ctx->codec_ops->reset)
128 ctx->codec_ops->reset(ctx);
129 hantro_job_finish(vpu, ctx, VB2_BUF_STATE_ERROR);
130 }
131 }
132
hantro_start_prepare_run(struct hantro_ctx * ctx)133 void hantro_start_prepare_run(struct hantro_ctx *ctx)
134 {
135 struct vb2_v4l2_buffer *src_buf;
136
137 src_buf = hantro_get_src_buf(ctx);
138 v4l2_ctrl_request_setup(src_buf->vb2_buf.req_obj.req,
139 &ctx->ctrl_handler);
140
141 if (!ctx->is_encoder && !ctx->dev->variant->late_postproc) {
142 if (hantro_needs_postproc(ctx, ctx->vpu_dst_fmt))
143 hantro_postproc_enable(ctx);
144 else
145 hantro_postproc_disable(ctx);
146 }
147 }
148
hantro_end_prepare_run(struct hantro_ctx * ctx)149 void hantro_end_prepare_run(struct hantro_ctx *ctx)
150 {
151 struct vb2_v4l2_buffer *src_buf;
152
153 if (!ctx->is_encoder && ctx->dev->variant->late_postproc) {
154 if (hantro_needs_postproc(ctx, ctx->vpu_dst_fmt))
155 hantro_postproc_enable(ctx);
156 else
157 hantro_postproc_disable(ctx);
158 }
159
160 src_buf = hantro_get_src_buf(ctx);
161 v4l2_ctrl_request_complete(src_buf->vb2_buf.req_obj.req,
162 &ctx->ctrl_handler);
163
164 /* Kick the watchdog. */
165 schedule_delayed_work(&ctx->dev->watchdog_work,
166 msecs_to_jiffies(2000));
167 }
168
device_run(void * priv)169 static void device_run(void *priv)
170 {
171 struct hantro_ctx *ctx = priv;
172 struct vb2_v4l2_buffer *src, *dst;
173 int ret;
174
175 src = hantro_get_src_buf(ctx);
176 dst = hantro_get_dst_buf(ctx);
177
178 ret = pm_runtime_resume_and_get(ctx->dev->dev);
179 if (ret < 0)
180 goto err_cancel_job;
181
182 ret = clk_bulk_enable(ctx->dev->variant->num_clocks, ctx->dev->clocks);
183 if (ret)
184 goto err_cancel_job;
185
186 v4l2_m2m_buf_copy_metadata(src, dst, true);
187
188 if (ctx->codec_ops->run(ctx))
189 goto err_cancel_job;
190
191 return;
192
193 err_cancel_job:
194 hantro_job_finish_no_pm(ctx->dev, ctx, VB2_BUF_STATE_ERROR);
195 }
196
197 static const struct v4l2_m2m_ops vpu_m2m_ops = {
198 .device_run = device_run,
199 };
200
201 static int
queue_init(void * priv,struct vb2_queue * src_vq,struct vb2_queue * dst_vq)202 queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq)
203 {
204 struct hantro_ctx *ctx = priv;
205 int ret;
206
207 src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
208 src_vq->io_modes = VB2_MMAP | VB2_DMABUF;
209 src_vq->drv_priv = ctx;
210 src_vq->ops = &hantro_queue_ops;
211 src_vq->mem_ops = &vb2_dma_contig_memops;
212
213 /*
214 * Driver does mostly sequential access, so sacrifice TLB efficiency
215 * for faster allocation. Also, no CPU access on the source queue,
216 * so no kernel mapping needed.
217 */
218 src_vq->dma_attrs = DMA_ATTR_ALLOC_SINGLE_PAGES |
219 DMA_ATTR_NO_KERNEL_MAPPING;
220 src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
221 src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
222 src_vq->lock = &ctx->dev->vpu_mutex;
223 src_vq->dev = ctx->dev->v4l2_dev.dev;
224 src_vq->supports_requests = true;
225
226 ret = vb2_queue_init(src_vq);
227 if (ret)
228 return ret;
229
230 dst_vq->bidirectional = true;
231 dst_vq->mem_ops = &vb2_dma_contig_memops;
232 dst_vq->dma_attrs = DMA_ATTR_ALLOC_SINGLE_PAGES;
233 /*
234 * The Kernel needs access to the JPEG destination buffer for the
235 * JPEG encoder to fill in the JPEG headers.
236 */
237 if (!ctx->is_encoder) {
238 dst_vq->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
239 dst_vq->max_num_buffers = MAX_POSTPROC_BUFFERS;
240 }
241
242 dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
243 dst_vq->io_modes = VB2_MMAP | VB2_DMABUF;
244 dst_vq->drv_priv = ctx;
245 dst_vq->ops = &hantro_queue_ops;
246 dst_vq->buf_struct_size = sizeof(struct hantro_decoded_buffer);
247 dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
248 dst_vq->lock = &ctx->dev->vpu_mutex;
249 dst_vq->dev = ctx->dev->v4l2_dev.dev;
250
251 return vb2_queue_init(dst_vq);
252 }
253
hantro_try_ctrl(struct v4l2_ctrl * ctrl)254 static int hantro_try_ctrl(struct v4l2_ctrl *ctrl)
255 {
256 if (ctrl->id == V4L2_CID_STATELESS_H264_SPS) {
257 const struct v4l2_ctrl_h264_sps *sps = ctrl->p_new.p_h264_sps;
258
259 if (sps->chroma_format_idc > 1)
260 /* Only 4:0:0 and 4:2:0 are supported */
261 return -EINVAL;
262 if (sps->bit_depth_luma_minus8 != sps->bit_depth_chroma_minus8)
263 /* Luma and chroma bit depth mismatch */
264 return -EINVAL;
265 if (sps->bit_depth_luma_minus8 != 0)
266 /* Only 8-bit is supported */
267 return -EINVAL;
268 } else if (ctrl->id == V4L2_CID_STATELESS_HEVC_SPS) {
269 const struct v4l2_ctrl_hevc_sps *sps = ctrl->p_new.p_hevc_sps;
270
271 if (sps->bit_depth_luma_minus8 != 0 && sps->bit_depth_luma_minus8 != 2)
272 /* Only 8-bit and 10-bit are supported */
273 return -EINVAL;
274 } else if (ctrl->id == V4L2_CID_STATELESS_VP9_FRAME) {
275 const struct v4l2_ctrl_vp9_frame *dec_params = ctrl->p_new.p_vp9_frame;
276
277 /* We only support profile 0 */
278 if (dec_params->profile != 0)
279 return -EINVAL;
280 } else if (ctrl->id == V4L2_CID_STATELESS_AV1_SEQUENCE) {
281 const struct v4l2_ctrl_av1_sequence *sequence = ctrl->p_new.p_av1_sequence;
282
283 if (sequence->bit_depth != 8 && sequence->bit_depth != 10)
284 return -EINVAL;
285 }
286
287 return 0;
288 }
289
hantro_jpeg_s_ctrl(struct v4l2_ctrl * ctrl)290 static int hantro_jpeg_s_ctrl(struct v4l2_ctrl *ctrl)
291 {
292 struct hantro_ctx *ctx;
293
294 ctx = container_of(ctrl->handler,
295 struct hantro_ctx, ctrl_handler);
296
297 vpu_debug(1, "s_ctrl: id = %d, val = %d\n", ctrl->id, ctrl->val);
298
299 switch (ctrl->id) {
300 case V4L2_CID_JPEG_COMPRESSION_QUALITY:
301 ctx->jpeg_quality = ctrl->val;
302 break;
303 default:
304 return -EINVAL;
305 }
306
307 return 0;
308 }
309
hantro_vp9_s_ctrl(struct v4l2_ctrl * ctrl)310 static int hantro_vp9_s_ctrl(struct v4l2_ctrl *ctrl)
311 {
312 struct hantro_ctx *ctx;
313
314 ctx = container_of(ctrl->handler,
315 struct hantro_ctx, ctrl_handler);
316
317 switch (ctrl->id) {
318 case V4L2_CID_STATELESS_VP9_FRAME: {
319 int bit_depth = ctrl->p_new.p_vp9_frame->bit_depth;
320
321 if (ctx->bit_depth == bit_depth)
322 return 0;
323
324 return hantro_reset_raw_fmt(ctx, bit_depth, HANTRO_AUTO_POSTPROC);
325 }
326 default:
327 return -EINVAL;
328 }
329
330 return 0;
331 }
332
hantro_hevc_s_ctrl(struct v4l2_ctrl * ctrl)333 static int hantro_hevc_s_ctrl(struct v4l2_ctrl *ctrl)
334 {
335 struct hantro_ctx *ctx;
336
337 ctx = container_of(ctrl->handler,
338 struct hantro_ctx, ctrl_handler);
339
340 switch (ctrl->id) {
341 case V4L2_CID_STATELESS_HEVC_SPS: {
342 const struct v4l2_ctrl_hevc_sps *sps = ctrl->p_new.p_hevc_sps;
343 int bit_depth = sps->bit_depth_luma_minus8 + 8;
344
345 if (ctx->bit_depth == bit_depth)
346 return 0;
347
348 return hantro_reset_raw_fmt(ctx, bit_depth, HANTRO_AUTO_POSTPROC);
349 }
350 default:
351 return -EINVAL;
352 }
353
354 return 0;
355 }
356
hantro_av1_s_ctrl(struct v4l2_ctrl * ctrl)357 static int hantro_av1_s_ctrl(struct v4l2_ctrl *ctrl)
358 {
359 struct hantro_ctx *ctx;
360
361 ctx = container_of(ctrl->handler,
362 struct hantro_ctx, ctrl_handler);
363
364 switch (ctrl->id) {
365 case V4L2_CID_STATELESS_AV1_SEQUENCE:
366 {
367 int bit_depth = ctrl->p_new.p_av1_sequence->bit_depth;
368 bool need_postproc = HANTRO_AUTO_POSTPROC;
369
370 if (ctrl->p_new.p_av1_sequence->flags
371 & V4L2_AV1_SEQUENCE_FLAG_FILM_GRAIN_PARAMS_PRESENT)
372 need_postproc = HANTRO_FORCE_POSTPROC;
373
374 if (ctx->bit_depth == bit_depth &&
375 ctx->need_postproc == need_postproc)
376 return 0;
377
378 return hantro_reset_raw_fmt(ctx, bit_depth, need_postproc);
379 }
380 default:
381 return -EINVAL;
382 }
383
384 return 0;
385 }
386
387 static const struct v4l2_ctrl_ops hantro_ctrl_ops = {
388 .try_ctrl = hantro_try_ctrl,
389 };
390
391 static const struct v4l2_ctrl_ops hantro_jpeg_ctrl_ops = {
392 .s_ctrl = hantro_jpeg_s_ctrl,
393 };
394
395 static const struct v4l2_ctrl_ops hantro_vp9_ctrl_ops = {
396 .s_ctrl = hantro_vp9_s_ctrl,
397 };
398
399 static const struct v4l2_ctrl_ops hantro_hevc_ctrl_ops = {
400 .try_ctrl = hantro_try_ctrl,
401 .s_ctrl = hantro_hevc_s_ctrl,
402 };
403
404 static const struct v4l2_ctrl_ops hantro_av1_ctrl_ops = {
405 .try_ctrl = hantro_try_ctrl,
406 .s_ctrl = hantro_av1_s_ctrl,
407 };
408
409 #define HANTRO_JPEG_ACTIVE_MARKERS (V4L2_JPEG_ACTIVE_MARKER_APP0 | \
410 V4L2_JPEG_ACTIVE_MARKER_COM | \
411 V4L2_JPEG_ACTIVE_MARKER_DQT | \
412 V4L2_JPEG_ACTIVE_MARKER_DHT)
413
414 static const struct hantro_ctrl controls[] = {
415 {
416 .codec = HANTRO_JPEG_ENCODER,
417 .cfg = {
418 .id = V4L2_CID_JPEG_COMPRESSION_QUALITY,
419 .min = 5,
420 .max = 100,
421 .step = 1,
422 .def = 50,
423 .ops = &hantro_jpeg_ctrl_ops,
424 },
425 }, {
426 .codec = HANTRO_JPEG_ENCODER,
427 .cfg = {
428 .id = V4L2_CID_JPEG_ACTIVE_MARKER,
429 .max = HANTRO_JPEG_ACTIVE_MARKERS,
430 .def = HANTRO_JPEG_ACTIVE_MARKERS,
431 /*
432 * Changing the set of active markers/segments also
433 * messes up the alignment of the JPEG header, which
434 * is needed to allow the hardware to write directly
435 * to the output buffer. Implementing this introduces
436 * a lot of complexity for little gain, as the markers
437 * enabled is already the minimum required set.
438 */
439 .flags = V4L2_CTRL_FLAG_READ_ONLY,
440 },
441 }, {
442 .codec = HANTRO_MPEG2_DECODER,
443 .cfg = {
444 .id = V4L2_CID_STATELESS_MPEG2_SEQUENCE,
445 },
446 }, {
447 .codec = HANTRO_MPEG2_DECODER,
448 .cfg = {
449 .id = V4L2_CID_STATELESS_MPEG2_PICTURE,
450 },
451 }, {
452 .codec = HANTRO_MPEG2_DECODER,
453 .cfg = {
454 .id = V4L2_CID_STATELESS_MPEG2_QUANTISATION,
455 },
456 }, {
457 .codec = HANTRO_VP8_DECODER,
458 .cfg = {
459 .id = V4L2_CID_STATELESS_VP8_FRAME,
460 },
461 }, {
462 .codec = HANTRO_H264_DECODER,
463 .cfg = {
464 .id = V4L2_CID_STATELESS_H264_DECODE_PARAMS,
465 },
466 }, {
467 .codec = HANTRO_H264_DECODER,
468 .cfg = {
469 .id = V4L2_CID_STATELESS_H264_SPS,
470 .ops = &hantro_ctrl_ops,
471 },
472 }, {
473 .codec = HANTRO_H264_DECODER,
474 .cfg = {
475 .id = V4L2_CID_STATELESS_H264_PPS,
476 },
477 }, {
478 .codec = HANTRO_H264_DECODER,
479 .cfg = {
480 .id = V4L2_CID_STATELESS_H264_SCALING_MATRIX,
481 },
482 }, {
483 .codec = HANTRO_H264_DECODER,
484 .cfg = {
485 .id = V4L2_CID_STATELESS_H264_DECODE_MODE,
486 .min = V4L2_STATELESS_H264_DECODE_MODE_FRAME_BASED,
487 .def = V4L2_STATELESS_H264_DECODE_MODE_FRAME_BASED,
488 .max = V4L2_STATELESS_H264_DECODE_MODE_FRAME_BASED,
489 },
490 }, {
491 .codec = HANTRO_H264_DECODER,
492 .cfg = {
493 .id = V4L2_CID_STATELESS_H264_START_CODE,
494 .min = V4L2_STATELESS_H264_START_CODE_ANNEX_B,
495 .def = V4L2_STATELESS_H264_START_CODE_ANNEX_B,
496 .max = V4L2_STATELESS_H264_START_CODE_ANNEX_B,
497 },
498 }, {
499 .codec = HANTRO_H264_DECODER,
500 .cfg = {
501 .id = V4L2_CID_MPEG_VIDEO_H264_PROFILE,
502 .min = V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE,
503 .max = V4L2_MPEG_VIDEO_H264_PROFILE_HIGH,
504 .menu_skip_mask =
505 BIT(V4L2_MPEG_VIDEO_H264_PROFILE_EXTENDED),
506 .def = V4L2_MPEG_VIDEO_H264_PROFILE_MAIN,
507 }
508 }, {
509 .codec = HANTRO_HEVC_DECODER,
510 .cfg = {
511 .id = V4L2_CID_STATELESS_HEVC_DECODE_MODE,
512 .min = V4L2_STATELESS_HEVC_DECODE_MODE_FRAME_BASED,
513 .max = V4L2_STATELESS_HEVC_DECODE_MODE_FRAME_BASED,
514 .def = V4L2_STATELESS_HEVC_DECODE_MODE_FRAME_BASED,
515 },
516 }, {
517 .codec = HANTRO_HEVC_DECODER,
518 .cfg = {
519 .id = V4L2_CID_STATELESS_HEVC_START_CODE,
520 .min = V4L2_STATELESS_HEVC_START_CODE_ANNEX_B,
521 .max = V4L2_STATELESS_HEVC_START_CODE_ANNEX_B,
522 .def = V4L2_STATELESS_HEVC_START_CODE_ANNEX_B,
523 },
524 }, {
525 .codec = HANTRO_HEVC_DECODER,
526 .cfg = {
527 .id = V4L2_CID_MPEG_VIDEO_HEVC_PROFILE,
528 .min = V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN,
529 .max = V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_10,
530 .def = V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN,
531 },
532 }, {
533 .codec = HANTRO_HEVC_DECODER,
534 .cfg = {
535 .id = V4L2_CID_MPEG_VIDEO_HEVC_LEVEL,
536 .min = V4L2_MPEG_VIDEO_HEVC_LEVEL_1,
537 .max = V4L2_MPEG_VIDEO_HEVC_LEVEL_5_1,
538 },
539 }, {
540 .codec = HANTRO_HEVC_DECODER,
541 .cfg = {
542 .id = V4L2_CID_STATELESS_HEVC_SPS,
543 .ops = &hantro_hevc_ctrl_ops,
544 },
545 }, {
546 .codec = HANTRO_HEVC_DECODER,
547 .cfg = {
548 .id = V4L2_CID_STATELESS_HEVC_PPS,
549 },
550 }, {
551 .codec = HANTRO_HEVC_DECODER,
552 .cfg = {
553 .id = V4L2_CID_STATELESS_HEVC_DECODE_PARAMS,
554 },
555 }, {
556 .codec = HANTRO_HEVC_DECODER,
557 .cfg = {
558 .id = V4L2_CID_STATELESS_HEVC_SCALING_MATRIX,
559 },
560 }, {
561 .codec = HANTRO_VP9_DECODER,
562 .cfg = {
563 .id = V4L2_CID_STATELESS_VP9_FRAME,
564 .ops = &hantro_vp9_ctrl_ops,
565 },
566 }, {
567 .codec = HANTRO_VP9_DECODER,
568 .cfg = {
569 .id = V4L2_CID_STATELESS_VP9_COMPRESSED_HDR,
570 },
571 }, {
572 .codec = HANTRO_AV1_DECODER,
573 .cfg = {
574 .id = V4L2_CID_STATELESS_AV1_FRAME,
575 },
576 }, {
577 .codec = HANTRO_AV1_DECODER,
578 .cfg = {
579 .id = V4L2_CID_STATELESS_AV1_TILE_GROUP_ENTRY,
580 .dims = { V4L2_AV1_MAX_TILE_COUNT },
581 },
582 }, {
583 .codec = HANTRO_AV1_DECODER,
584 .cfg = {
585 .id = V4L2_CID_STATELESS_AV1_SEQUENCE,
586 .ops = &hantro_av1_ctrl_ops,
587 },
588 }, {
589 .codec = HANTRO_AV1_DECODER,
590 .cfg = {
591 .id = V4L2_CID_STATELESS_AV1_FILM_GRAIN,
592 },
593 },
594 };
595
hantro_ctrls_setup(struct hantro_dev * vpu,struct hantro_ctx * ctx,int allowed_codecs)596 static int hantro_ctrls_setup(struct hantro_dev *vpu,
597 struct hantro_ctx *ctx,
598 int allowed_codecs)
599 {
600 int i, num_ctrls = ARRAY_SIZE(controls);
601
602 v4l2_ctrl_handler_init(&ctx->ctrl_handler, num_ctrls);
603
604 for (i = 0; i < num_ctrls; i++) {
605 if (!(allowed_codecs & controls[i].codec))
606 continue;
607
608 v4l2_ctrl_new_custom(&ctx->ctrl_handler,
609 &controls[i].cfg, NULL);
610 if (ctx->ctrl_handler.error) {
611 vpu_err("Adding control (%d) failed %d\n",
612 controls[i].cfg.id,
613 ctx->ctrl_handler.error);
614 v4l2_ctrl_handler_free(&ctx->ctrl_handler);
615 return ctx->ctrl_handler.error;
616 }
617 }
618 return v4l2_ctrl_handler_setup(&ctx->ctrl_handler);
619 }
620
621 /*
622 * V4L2 file operations.
623 */
624
hantro_open(struct file * filp)625 static int hantro_open(struct file *filp)
626 {
627 struct hantro_dev *vpu = video_drvdata(filp);
628 struct video_device *vdev = video_devdata(filp);
629 struct hantro_func *func = hantro_vdev_to_func(vdev);
630 struct hantro_ctx *ctx;
631 int allowed_codecs, ret;
632
633 /*
634 * We do not need any extra locking here, because we operate only
635 * on local data here, except reading few fields from dev, which
636 * do not change through device's lifetime (which is guaranteed by
637 * reference on module from open()) and V4L2 internal objects (such
638 * as vdev and ctx->fh), which have proper locking done in respective
639 * helper functions used here.
640 */
641
642 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
643 if (!ctx)
644 return -ENOMEM;
645
646 ctx->dev = vpu;
647 if (func->id == MEDIA_ENT_F_PROC_VIDEO_ENCODER) {
648 allowed_codecs = vpu->variant->codec & HANTRO_ENCODERS;
649 ctx->is_encoder = true;
650 } else if (func->id == MEDIA_ENT_F_PROC_VIDEO_DECODER) {
651 allowed_codecs = vpu->variant->codec & HANTRO_DECODERS;
652 ctx->is_encoder = false;
653 } else {
654 ret = -ENODEV;
655 goto err_ctx_free;
656 }
657
658 ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(vpu->m2m_dev, ctx, queue_init);
659 if (IS_ERR(ctx->fh.m2m_ctx)) {
660 ret = PTR_ERR(ctx->fh.m2m_ctx);
661 goto err_ctx_free;
662 }
663
664 v4l2_fh_init(&ctx->fh, vdev);
665 filp->private_data = &ctx->fh;
666 v4l2_fh_add(&ctx->fh);
667
668 hantro_reset_fmts(ctx);
669
670 ret = hantro_ctrls_setup(vpu, ctx, allowed_codecs);
671 if (ret) {
672 vpu_err("Failed to set up controls\n");
673 goto err_fh_free;
674 }
675 ctx->fh.ctrl_handler = &ctx->ctrl_handler;
676
677 return 0;
678
679 err_fh_free:
680 v4l2_fh_del(&ctx->fh);
681 v4l2_fh_exit(&ctx->fh);
682 err_ctx_free:
683 kfree(ctx);
684 return ret;
685 }
686
hantro_release(struct file * filp)687 static int hantro_release(struct file *filp)
688 {
689 struct hantro_ctx *ctx =
690 container_of(filp->private_data, struct hantro_ctx, fh);
691
692 /*
693 * No need for extra locking because this was the last reference
694 * to this file.
695 */
696 v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
697 v4l2_fh_del(&ctx->fh);
698 v4l2_fh_exit(&ctx->fh);
699 v4l2_ctrl_handler_free(&ctx->ctrl_handler);
700 kfree(ctx);
701
702 return 0;
703 }
704
705 static const struct v4l2_file_operations hantro_fops = {
706 .owner = THIS_MODULE,
707 .open = hantro_open,
708 .release = hantro_release,
709 .poll = v4l2_m2m_fop_poll,
710 .unlocked_ioctl = video_ioctl2,
711 .mmap = v4l2_m2m_fop_mmap,
712 };
713
714 static const struct of_device_id of_hantro_match[] = {
715 #ifdef CONFIG_VIDEO_HANTRO_ROCKCHIP
716 { .compatible = "rockchip,px30-vpu", .data = &px30_vpu_variant, },
717 { .compatible = "rockchip,rk3036-vpu", .data = &rk3036_vpu_variant, },
718 { .compatible = "rockchip,rk3066-vpu", .data = &rk3066_vpu_variant, },
719 { .compatible = "rockchip,rk3288-vpu", .data = &rk3288_vpu_variant, },
720 { .compatible = "rockchip,rk3328-vpu", .data = &rk3328_vpu_variant, },
721 { .compatible = "rockchip,rk3399-vpu", .data = &rk3399_vpu_variant, },
722 { .compatible = "rockchip,rk3568-vepu", .data = &rk3568_vepu_variant, },
723 { .compatible = "rockchip,rk3568-vpu", .data = &rk3568_vpu_variant, },
724 { .compatible = "rockchip,rk3588-vepu121", .data = &rk3568_vepu_variant, },
725 { .compatible = "rockchip,rk3588-av1-vpu", .data = &rk3588_vpu981_variant, },
726 #endif
727 #ifdef CONFIG_VIDEO_HANTRO_IMX8M
728 { .compatible = "nxp,imx8mm-vpu-g1", .data = &imx8mm_vpu_g1_variant, },
729 { .compatible = "nxp,imx8mq-vpu", .data = &imx8mq_vpu_variant, },
730 { .compatible = "nxp,imx8mq-vpu-g1", .data = &imx8mq_vpu_g1_variant },
731 { .compatible = "nxp,imx8mq-vpu-g2", .data = &imx8mq_vpu_g2_variant },
732 #endif
733 #ifdef CONFIG_VIDEO_HANTRO_SAMA5D4
734 { .compatible = "microchip,sama5d4-vdec", .data = &sama5d4_vdec_variant, },
735 #endif
736 #ifdef CONFIG_VIDEO_HANTRO_SUNXI
737 { .compatible = "allwinner,sun50i-h6-vpu-g2", .data = &sunxi_vpu_variant, },
738 #endif
739 #ifdef CONFIG_VIDEO_HANTRO_STM32MP25
740 { .compatible = "st,stm32mp25-vdec", .data = &stm32mp25_vdec_variant, },
741 { .compatible = "st,stm32mp25-venc", .data = &stm32mp25_venc_variant, },
742 #endif
743 { /* sentinel */ }
744 };
745 MODULE_DEVICE_TABLE(of, of_hantro_match);
746
hantro_register_entity(struct media_device * mdev,struct media_entity * entity,const char * entity_name,struct media_pad * pads,int num_pads,int function,struct video_device * vdev)747 static int hantro_register_entity(struct media_device *mdev,
748 struct media_entity *entity,
749 const char *entity_name,
750 struct media_pad *pads, int num_pads,
751 int function, struct video_device *vdev)
752 {
753 char *name;
754 int ret;
755
756 entity->obj_type = MEDIA_ENTITY_TYPE_BASE;
757 if (function == MEDIA_ENT_F_IO_V4L) {
758 entity->info.dev.major = VIDEO_MAJOR;
759 entity->info.dev.minor = vdev->minor;
760 }
761
762 name = devm_kasprintf(mdev->dev, GFP_KERNEL, "%s-%s", vdev->name,
763 entity_name);
764 if (!name)
765 return -ENOMEM;
766
767 entity->name = name;
768 entity->function = function;
769
770 ret = media_entity_pads_init(entity, num_pads, pads);
771 if (ret)
772 return ret;
773
774 ret = media_device_register_entity(mdev, entity);
775 if (ret)
776 return ret;
777
778 return 0;
779 }
780
hantro_attach_func(struct hantro_dev * vpu,struct hantro_func * func)781 static int hantro_attach_func(struct hantro_dev *vpu,
782 struct hantro_func *func)
783 {
784 struct media_device *mdev = &vpu->mdev;
785 struct media_link *link;
786 int ret;
787
788 /* Create the three encoder entities with their pads */
789 func->source_pad.flags = MEDIA_PAD_FL_SOURCE;
790 ret = hantro_register_entity(mdev, &func->vdev.entity, "source",
791 &func->source_pad, 1, MEDIA_ENT_F_IO_V4L,
792 &func->vdev);
793 if (ret)
794 return ret;
795
796 func->proc_pads[0].flags = MEDIA_PAD_FL_SINK;
797 func->proc_pads[1].flags = MEDIA_PAD_FL_SOURCE;
798 ret = hantro_register_entity(mdev, &func->proc, "proc",
799 func->proc_pads, 2, func->id,
800 &func->vdev);
801 if (ret)
802 goto err_rel_entity0;
803
804 func->sink_pad.flags = MEDIA_PAD_FL_SINK;
805 ret = hantro_register_entity(mdev, &func->sink, "sink",
806 &func->sink_pad, 1, MEDIA_ENT_F_IO_V4L,
807 &func->vdev);
808 if (ret)
809 goto err_rel_entity1;
810
811 /* Connect the three entities */
812 ret = media_create_pad_link(&func->vdev.entity, 0, &func->proc, 0,
813 MEDIA_LNK_FL_IMMUTABLE |
814 MEDIA_LNK_FL_ENABLED);
815 if (ret)
816 goto err_rel_entity2;
817
818 ret = media_create_pad_link(&func->proc, 1, &func->sink, 0,
819 MEDIA_LNK_FL_IMMUTABLE |
820 MEDIA_LNK_FL_ENABLED);
821 if (ret)
822 goto err_rm_links0;
823
824 /* Create video interface */
825 func->intf_devnode = media_devnode_create(mdev, MEDIA_INTF_T_V4L_VIDEO,
826 0, VIDEO_MAJOR,
827 func->vdev.minor);
828 if (!func->intf_devnode) {
829 ret = -ENOMEM;
830 goto err_rm_links1;
831 }
832
833 /* Connect the two DMA engines to the interface */
834 link = media_create_intf_link(&func->vdev.entity,
835 &func->intf_devnode->intf,
836 MEDIA_LNK_FL_IMMUTABLE |
837 MEDIA_LNK_FL_ENABLED);
838 if (!link) {
839 ret = -ENOMEM;
840 goto err_rm_devnode;
841 }
842
843 link = media_create_intf_link(&func->sink, &func->intf_devnode->intf,
844 MEDIA_LNK_FL_IMMUTABLE |
845 MEDIA_LNK_FL_ENABLED);
846 if (!link) {
847 ret = -ENOMEM;
848 goto err_rm_devnode;
849 }
850 return 0;
851
852 err_rm_devnode:
853 media_devnode_remove(func->intf_devnode);
854
855 err_rm_links1:
856 media_entity_remove_links(&func->sink);
857
858 err_rm_links0:
859 media_entity_remove_links(&func->proc);
860 media_entity_remove_links(&func->vdev.entity);
861
862 err_rel_entity2:
863 media_device_unregister_entity(&func->sink);
864
865 err_rel_entity1:
866 media_device_unregister_entity(&func->proc);
867
868 err_rel_entity0:
869 media_device_unregister_entity(&func->vdev.entity);
870 return ret;
871 }
872
hantro_detach_func(struct hantro_func * func)873 static void hantro_detach_func(struct hantro_func *func)
874 {
875 media_devnode_remove(func->intf_devnode);
876 media_entity_remove_links(&func->sink);
877 media_entity_remove_links(&func->proc);
878 media_entity_remove_links(&func->vdev.entity);
879 media_device_unregister_entity(&func->sink);
880 media_device_unregister_entity(&func->proc);
881 media_device_unregister_entity(&func->vdev.entity);
882 }
883
hantro_add_func(struct hantro_dev * vpu,unsigned int funcid)884 static int hantro_add_func(struct hantro_dev *vpu, unsigned int funcid)
885 {
886 const struct of_device_id *match;
887 struct hantro_func *func;
888 struct video_device *vfd;
889 int ret;
890
891 match = of_match_node(of_hantro_match, vpu->dev->of_node);
892 func = devm_kzalloc(vpu->dev, sizeof(*func), GFP_KERNEL);
893 if (!func) {
894 v4l2_err(&vpu->v4l2_dev, "Failed to allocate video device\n");
895 return -ENOMEM;
896 }
897
898 func->id = funcid;
899
900 vfd = &func->vdev;
901 vfd->fops = &hantro_fops;
902 vfd->release = video_device_release_empty;
903 vfd->lock = &vpu->vpu_mutex;
904 vfd->v4l2_dev = &vpu->v4l2_dev;
905 vfd->vfl_dir = VFL_DIR_M2M;
906 vfd->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_M2M_MPLANE;
907 vfd->ioctl_ops = &hantro_ioctl_ops;
908 strscpy(vfd->name, match->compatible, sizeof(vfd->name));
909 strlcat(vfd->name, funcid == MEDIA_ENT_F_PROC_VIDEO_ENCODER ?
910 "-enc" : "-dec", sizeof(vfd->name));
911
912 if (funcid == MEDIA_ENT_F_PROC_VIDEO_ENCODER) {
913 vpu->encoder = func;
914 v4l2_disable_ioctl(vfd, VIDIOC_TRY_DECODER_CMD);
915 v4l2_disable_ioctl(vfd, VIDIOC_DECODER_CMD);
916 } else {
917 vpu->decoder = func;
918 v4l2_disable_ioctl(vfd, VIDIOC_TRY_ENCODER_CMD);
919 v4l2_disable_ioctl(vfd, VIDIOC_ENCODER_CMD);
920 }
921
922 video_set_drvdata(vfd, vpu);
923
924 ret = video_register_device(vfd, VFL_TYPE_VIDEO, -1);
925 if (ret) {
926 v4l2_err(&vpu->v4l2_dev, "Failed to register video device\n");
927 return ret;
928 }
929
930 ret = hantro_attach_func(vpu, func);
931 if (ret) {
932 v4l2_err(&vpu->v4l2_dev,
933 "Failed to attach functionality to the media device\n");
934 goto err_unreg_dev;
935 }
936
937 v4l2_info(&vpu->v4l2_dev, "registered %s as /dev/video%d\n", vfd->name,
938 vfd->num);
939
940 return 0;
941
942 err_unreg_dev:
943 video_unregister_device(vfd);
944 return ret;
945 }
946
hantro_add_enc_func(struct hantro_dev * vpu)947 static int hantro_add_enc_func(struct hantro_dev *vpu)
948 {
949 if (!vpu->variant->enc_fmts)
950 return 0;
951
952 return hantro_add_func(vpu, MEDIA_ENT_F_PROC_VIDEO_ENCODER);
953 }
954
hantro_add_dec_func(struct hantro_dev * vpu)955 static int hantro_add_dec_func(struct hantro_dev *vpu)
956 {
957 if (!vpu->variant->dec_fmts)
958 return 0;
959
960 return hantro_add_func(vpu, MEDIA_ENT_F_PROC_VIDEO_DECODER);
961 }
962
hantro_remove_func(struct hantro_dev * vpu,unsigned int funcid)963 static void hantro_remove_func(struct hantro_dev *vpu,
964 unsigned int funcid)
965 {
966 struct hantro_func *func;
967
968 if (funcid == MEDIA_ENT_F_PROC_VIDEO_ENCODER)
969 func = vpu->encoder;
970 else
971 func = vpu->decoder;
972
973 if (!func)
974 return;
975
976 hantro_detach_func(func);
977 video_unregister_device(&func->vdev);
978 }
979
hantro_remove_enc_func(struct hantro_dev * vpu)980 static void hantro_remove_enc_func(struct hantro_dev *vpu)
981 {
982 hantro_remove_func(vpu, MEDIA_ENT_F_PROC_VIDEO_ENCODER);
983 }
984
hantro_remove_dec_func(struct hantro_dev * vpu)985 static void hantro_remove_dec_func(struct hantro_dev *vpu)
986 {
987 hantro_remove_func(vpu, MEDIA_ENT_F_PROC_VIDEO_DECODER);
988 }
989
990 static const struct media_device_ops hantro_m2m_media_ops = {
991 .req_validate = vb2_request_validate,
992 .req_queue = v4l2_m2m_request_queue,
993 };
994
995 /*
996 * Some SoCs, like RK3588 have multiple identical Hantro cores, but the
997 * kernel is currently missing support for multi-core handling. Exposing
998 * separate devices for each core to userspace is bad, since that does
999 * not allow scheduling tasks properly (and creates ABI). With this workaround
1000 * the driver will only probe for the first core and early exit for the other
1001 * cores. Once the driver gains multi-core support, the same technique
1002 * for detecting the main core can be used to cluster all cores together.
1003 */
hantro_disable_multicore(struct hantro_dev * vpu)1004 static int hantro_disable_multicore(struct hantro_dev *vpu)
1005 {
1006 struct device_node *node = NULL;
1007 const char *compatible;
1008 bool is_main_core;
1009 int ret;
1010
1011 /* Intentionally ignores the fallback strings */
1012 ret = of_property_read_string(vpu->dev->of_node, "compatible", &compatible);
1013 if (ret)
1014 return ret;
1015
1016 /* The first compatible and available node found is considered the main core */
1017 do {
1018 node = of_find_compatible_node(node, NULL, compatible);
1019 if (of_device_is_available(node))
1020 break;
1021 } while (node);
1022
1023 if (!node)
1024 return -EINVAL;
1025
1026 is_main_core = (vpu->dev->of_node == node);
1027
1028 of_node_put(node);
1029
1030 if (!is_main_core) {
1031 dev_info(vpu->dev, "missing multi-core support, ignoring this instance\n");
1032 return -ENODEV;
1033 }
1034
1035 return 0;
1036 }
1037
hantro_probe(struct platform_device * pdev)1038 static int hantro_probe(struct platform_device *pdev)
1039 {
1040 const struct of_device_id *match;
1041 struct hantro_dev *vpu;
1042 int num_bases;
1043 int i, ret;
1044
1045 vpu = devm_kzalloc(&pdev->dev, sizeof(*vpu), GFP_KERNEL);
1046 if (!vpu)
1047 return -ENOMEM;
1048
1049 vpu->dev = &pdev->dev;
1050 vpu->pdev = pdev;
1051 mutex_init(&vpu->vpu_mutex);
1052 spin_lock_init(&vpu->irqlock);
1053
1054 match = of_match_node(of_hantro_match, pdev->dev.of_node);
1055 vpu->variant = match->data;
1056
1057 ret = hantro_disable_multicore(vpu);
1058 if (ret)
1059 return ret;
1060
1061 /*
1062 * Support for nxp,imx8mq-vpu is kept for backwards compatibility
1063 * but it's deprecated. Please update your DTS file to use
1064 * nxp,imx8mq-vpu-g1 or nxp,imx8mq-vpu-g2 instead.
1065 */
1066 if (of_device_is_compatible(pdev->dev.of_node, "nxp,imx8mq-vpu"))
1067 dev_warn(&pdev->dev, "%s compatible is deprecated\n",
1068 match->compatible);
1069
1070 INIT_DELAYED_WORK(&vpu->watchdog_work, hantro_watchdog);
1071
1072 vpu->clocks = devm_kcalloc(&pdev->dev, vpu->variant->num_clocks,
1073 sizeof(*vpu->clocks), GFP_KERNEL);
1074 if (!vpu->clocks)
1075 return -ENOMEM;
1076
1077 if (vpu->variant->num_clocks > 1) {
1078 for (i = 0; i < vpu->variant->num_clocks; i++)
1079 vpu->clocks[i].id = vpu->variant->clk_names[i];
1080
1081 ret = devm_clk_bulk_get(&pdev->dev, vpu->variant->num_clocks,
1082 vpu->clocks);
1083 if (ret)
1084 return ret;
1085 } else {
1086 /*
1087 * If the driver has a single clk, chances are there will be no
1088 * actual name in the DT bindings.
1089 */
1090 vpu->clocks[0].clk = devm_clk_get(&pdev->dev, NULL);
1091 if (IS_ERR(vpu->clocks[0].clk))
1092 return PTR_ERR(vpu->clocks[0].clk);
1093 }
1094
1095 vpu->resets = devm_reset_control_array_get_optional_exclusive(&pdev->dev);
1096 if (IS_ERR(vpu->resets))
1097 return PTR_ERR(vpu->resets);
1098
1099 num_bases = vpu->variant->num_regs ?: 1;
1100 vpu->reg_bases = devm_kcalloc(&pdev->dev, num_bases,
1101 sizeof(*vpu->reg_bases), GFP_KERNEL);
1102 if (!vpu->reg_bases)
1103 return -ENOMEM;
1104
1105 for (i = 0; i < num_bases; i++) {
1106 vpu->reg_bases[i] = vpu->variant->reg_names ?
1107 devm_platform_ioremap_resource_byname(pdev, vpu->variant->reg_names[i]) :
1108 devm_platform_ioremap_resource(pdev, 0);
1109 if (IS_ERR(vpu->reg_bases[i]))
1110 return PTR_ERR(vpu->reg_bases[i]);
1111 }
1112 vpu->enc_base = vpu->reg_bases[0] + vpu->variant->enc_offset;
1113 vpu->dec_base = vpu->reg_bases[0] + vpu->variant->dec_offset;
1114
1115 /**
1116 * TODO: Eventually allow taking advantage of full 64-bit address space.
1117 * Until then we assume the MSB portion of buffers' base addresses is
1118 * always 0 due to this masking operation.
1119 */
1120 ret = dma_set_coherent_mask(vpu->dev, DMA_BIT_MASK(32));
1121 if (ret) {
1122 dev_err(vpu->dev, "Could not set DMA coherent mask.\n");
1123 return ret;
1124 }
1125 vb2_dma_contig_set_max_seg_size(&pdev->dev, DMA_BIT_MASK(32));
1126
1127 for (i = 0; i < vpu->variant->num_irqs; i++) {
1128 const char *irq_name;
1129 int irq;
1130
1131 if (!vpu->variant->irqs[i].handler)
1132 continue;
1133
1134 if (vpu->variant->num_irqs > 1) {
1135 irq_name = vpu->variant->irqs[i].name;
1136 irq = platform_get_irq_byname(vpu->pdev, irq_name);
1137 } else {
1138 /*
1139 * If the driver has a single IRQ, chances are there
1140 * will be no actual name in the DT bindings.
1141 */
1142 irq_name = "default";
1143 irq = platform_get_irq(vpu->pdev, 0);
1144 }
1145 if (irq < 0)
1146 return irq;
1147
1148 ret = devm_request_irq(vpu->dev, irq,
1149 vpu->variant->irqs[i].handler, 0,
1150 dev_name(vpu->dev), vpu);
1151 if (ret) {
1152 dev_err(vpu->dev, "Could not request %s IRQ.\n",
1153 irq_name);
1154 return ret;
1155 }
1156 }
1157
1158 if (vpu->variant->init) {
1159 ret = vpu->variant->init(vpu);
1160 if (ret) {
1161 dev_err(&pdev->dev, "Failed to init VPU hardware\n");
1162 return ret;
1163 }
1164 }
1165
1166 pm_runtime_set_autosuspend_delay(vpu->dev, 100);
1167 pm_runtime_use_autosuspend(vpu->dev);
1168 pm_runtime_enable(vpu->dev);
1169
1170 ret = reset_control_deassert(vpu->resets);
1171 if (ret) {
1172 dev_err(&pdev->dev, "Failed to deassert resets\n");
1173 goto err_pm_disable;
1174 }
1175
1176 ret = clk_bulk_prepare(vpu->variant->num_clocks, vpu->clocks);
1177 if (ret) {
1178 dev_err(&pdev->dev, "Failed to prepare clocks\n");
1179 goto err_rst_assert;
1180 }
1181
1182 ret = v4l2_device_register(&pdev->dev, &vpu->v4l2_dev);
1183 if (ret) {
1184 dev_err(&pdev->dev, "Failed to register v4l2 device\n");
1185 goto err_clk_unprepare;
1186 }
1187 platform_set_drvdata(pdev, vpu);
1188
1189 vpu->m2m_dev = v4l2_m2m_init(&vpu_m2m_ops);
1190 if (IS_ERR(vpu->m2m_dev)) {
1191 v4l2_err(&vpu->v4l2_dev, "Failed to init mem2mem device\n");
1192 ret = PTR_ERR(vpu->m2m_dev);
1193 goto err_v4l2_unreg;
1194 }
1195
1196 vpu->mdev.dev = vpu->dev;
1197 strscpy(vpu->mdev.model, DRIVER_NAME, sizeof(vpu->mdev.model));
1198 media_device_init(&vpu->mdev);
1199 vpu->mdev.ops = &hantro_m2m_media_ops;
1200 vpu->v4l2_dev.mdev = &vpu->mdev;
1201
1202 ret = hantro_add_enc_func(vpu);
1203 if (ret) {
1204 dev_err(&pdev->dev, "Failed to register encoder\n");
1205 goto err_m2m_rel;
1206 }
1207
1208 ret = hantro_add_dec_func(vpu);
1209 if (ret) {
1210 dev_err(&pdev->dev, "Failed to register decoder\n");
1211 goto err_rm_enc_func;
1212 }
1213
1214 ret = media_device_register(&vpu->mdev);
1215 if (ret) {
1216 v4l2_err(&vpu->v4l2_dev, "Failed to register mem2mem media device\n");
1217 goto err_rm_dec_func;
1218 }
1219
1220 return 0;
1221
1222 err_rm_dec_func:
1223 hantro_remove_dec_func(vpu);
1224 err_rm_enc_func:
1225 hantro_remove_enc_func(vpu);
1226 err_m2m_rel:
1227 media_device_cleanup(&vpu->mdev);
1228 v4l2_m2m_release(vpu->m2m_dev);
1229 err_v4l2_unreg:
1230 v4l2_device_unregister(&vpu->v4l2_dev);
1231 err_clk_unprepare:
1232 clk_bulk_unprepare(vpu->variant->num_clocks, vpu->clocks);
1233 err_rst_assert:
1234 reset_control_assert(vpu->resets);
1235 err_pm_disable:
1236 pm_runtime_dont_use_autosuspend(vpu->dev);
1237 pm_runtime_disable(vpu->dev);
1238 return ret;
1239 }
1240
hantro_remove(struct platform_device * pdev)1241 static void hantro_remove(struct platform_device *pdev)
1242 {
1243 struct hantro_dev *vpu = platform_get_drvdata(pdev);
1244
1245 v4l2_info(&vpu->v4l2_dev, "Removing %s\n", pdev->name);
1246
1247 media_device_unregister(&vpu->mdev);
1248 hantro_remove_dec_func(vpu);
1249 hantro_remove_enc_func(vpu);
1250 media_device_cleanup(&vpu->mdev);
1251 v4l2_m2m_release(vpu->m2m_dev);
1252 v4l2_device_unregister(&vpu->v4l2_dev);
1253 clk_bulk_unprepare(vpu->variant->num_clocks, vpu->clocks);
1254 reset_control_assert(vpu->resets);
1255 pm_runtime_dont_use_autosuspend(vpu->dev);
1256 pm_runtime_disable(vpu->dev);
1257 }
1258
1259 #ifdef CONFIG_PM
hantro_runtime_resume(struct device * dev)1260 static int hantro_runtime_resume(struct device *dev)
1261 {
1262 struct hantro_dev *vpu = dev_get_drvdata(dev);
1263
1264 if (vpu->variant->runtime_resume)
1265 return vpu->variant->runtime_resume(vpu);
1266
1267 return 0;
1268 }
1269 #endif
1270
1271 static const struct dev_pm_ops hantro_pm_ops = {
1272 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
1273 pm_runtime_force_resume)
1274 SET_RUNTIME_PM_OPS(NULL, hantro_runtime_resume, NULL)
1275 };
1276
1277 static struct platform_driver hantro_driver = {
1278 .probe = hantro_probe,
1279 .remove = hantro_remove,
1280 .driver = {
1281 .name = DRIVER_NAME,
1282 .of_match_table = of_hantro_match,
1283 .pm = &hantro_pm_ops,
1284 },
1285 };
1286 module_platform_driver(hantro_driver);
1287
1288 MODULE_LICENSE("GPL v2");
1289 MODULE_AUTHOR("Alpha Lin <Alpha.Lin@Rock-Chips.com>");
1290 MODULE_AUTHOR("Tomasz Figa <tfiga@chromium.org>");
1291 MODULE_AUTHOR("Ezequiel Garcia <ezequiel@collabora.com>");
1292 MODULE_DESCRIPTION("Hantro VPU codec driver");
1293