1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Rockchip Video Decoder driver 4 * 5 * Copyright (C) 2019 Collabora, Ltd. 6 * 7 * Based on rkvdec driver by Google LLC. (Tomasz Figa <tfiga@chromium.org>) 8 * Based on s5p-mfc driver by Samsung Electronics Co., Ltd. 9 * Copyright (C) 2011 Samsung Electronics Co., Ltd. 10 */ 11 12 #include <linux/clk.h> 13 #include <linux/interrupt.h> 14 #include <linux/iommu.h> 15 #include <linux/module.h> 16 #include <linux/of.h> 17 #include <linux/platform_device.h> 18 #include <linux/pm.h> 19 #include <linux/pm_runtime.h> 20 #include <linux/slab.h> 21 #include <linux/videodev2.h> 22 #include <linux/workqueue.h> 23 #include <media/v4l2-event.h> 24 #include <media/v4l2-mem2mem.h> 25 #include <media/videobuf2-core.h> 26 #include <media/videobuf2-vmalloc.h> 27 28 #include "rkvdec.h" 29 #include "rkvdec-regs.h" 30 31 static bool rkvdec_image_fmt_match(enum rkvdec_image_fmt fmt1, 32 enum rkvdec_image_fmt fmt2) 33 { 34 return fmt1 == fmt2 || fmt2 == RKVDEC_IMG_FMT_ANY || 35 fmt1 == RKVDEC_IMG_FMT_ANY; 36 } 37 38 static bool rkvdec_image_fmt_changed(struct rkvdec_ctx *ctx, 39 enum rkvdec_image_fmt image_fmt) 40 { 41 if (image_fmt == RKVDEC_IMG_FMT_ANY) 42 return false; 43 44 return ctx->image_fmt != image_fmt; 45 } 46 47 static u32 rkvdec_enum_decoded_fmt(struct rkvdec_ctx *ctx, int index, 48 enum rkvdec_image_fmt image_fmt) 49 { 50 const struct rkvdec_coded_fmt_desc *desc = ctx->coded_fmt_desc; 51 int fmt_idx = -1; 52 unsigned int i; 53 54 if (WARN_ON(!desc)) 55 return 0; 56 57 for (i = 0; i < desc->num_decoded_fmts; i++) { 58 if (!rkvdec_image_fmt_match(desc->decoded_fmts[i].image_fmt, 59 image_fmt)) 60 continue; 61 fmt_idx++; 62 if (index == fmt_idx) 63 return desc->decoded_fmts[i].fourcc; 64 } 65 66 return 0; 67 } 68 69 static bool rkvdec_is_valid_fmt(struct rkvdec_ctx *ctx, u32 fourcc, 70 enum rkvdec_image_fmt image_fmt) 71 { 72 const struct rkvdec_coded_fmt_desc *desc = ctx->coded_fmt_desc; 73 unsigned int i; 74 75 for (i = 0; i < desc->num_decoded_fmts; i++) { 76 if (rkvdec_image_fmt_match(desc->decoded_fmts[i].image_fmt, 77 image_fmt) && 78 desc->decoded_fmts[i].fourcc == fourcc) 79 return true; 80 } 81 82 return false; 83 } 84 85 static void rkvdec_fill_decoded_pixfmt(struct rkvdec_ctx *ctx, 86 struct v4l2_pix_format_mplane *pix_mp) 87 { 88 v4l2_fill_pixfmt_mp(pix_mp, pix_mp->pixelformat, 89 pix_mp->width, pix_mp->height); 90 pix_mp->plane_fmt[0].sizeimage += 128 * 91 DIV_ROUND_UP(pix_mp->width, 16) * 92 DIV_ROUND_UP(pix_mp->height, 16); 93 } 94 95 static void rkvdec_reset_fmt(struct rkvdec_ctx *ctx, struct v4l2_format *f, 96 u32 fourcc) 97 { 98 memset(f, 0, sizeof(*f)); 99 f->fmt.pix_mp.pixelformat = fourcc; 100 f->fmt.pix_mp.field = V4L2_FIELD_NONE; 101 f->fmt.pix_mp.colorspace = V4L2_COLORSPACE_REC709; 102 f->fmt.pix_mp.ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT; 103 f->fmt.pix_mp.quantization = V4L2_QUANTIZATION_DEFAULT; 104 f->fmt.pix_mp.xfer_func = V4L2_XFER_FUNC_DEFAULT; 105 } 106 107 static void rkvdec_reset_decoded_fmt(struct rkvdec_ctx *ctx) 108 { 109 struct v4l2_format *f = &ctx->decoded_fmt; 110 u32 fourcc; 111 112 fourcc = rkvdec_enum_decoded_fmt(ctx, 0, ctx->image_fmt); 113 rkvdec_reset_fmt(ctx, f, fourcc); 114 f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; 115 f->fmt.pix_mp.width = ctx->coded_fmt.fmt.pix_mp.width; 116 f->fmt.pix_mp.height = ctx->coded_fmt.fmt.pix_mp.height; 117 rkvdec_fill_decoded_pixfmt(ctx, &f->fmt.pix_mp); 118 } 119 120 static int rkvdec_try_ctrl(struct v4l2_ctrl *ctrl) 121 { 122 struct rkvdec_ctx *ctx = container_of(ctrl->handler, struct rkvdec_ctx, ctrl_hdl); 123 const struct rkvdec_coded_fmt_desc *desc = ctx->coded_fmt_desc; 124 125 if (desc->ops->try_ctrl) 126 return desc->ops->try_ctrl(ctx, ctrl); 127 128 return 0; 129 } 130 131 static int rkvdec_s_ctrl(struct v4l2_ctrl *ctrl) 132 { 133 struct rkvdec_ctx *ctx = container_of(ctrl->handler, struct rkvdec_ctx, ctrl_hdl); 134 const struct rkvdec_coded_fmt_desc *desc = ctx->coded_fmt_desc; 135 enum rkvdec_image_fmt image_fmt; 136 struct vb2_queue *vq; 137 138 /* Check if this change requires a capture format reset */ 139 if (!desc->ops->get_image_fmt) 140 return 0; 141 142 image_fmt = desc->ops->get_image_fmt(ctx, ctrl); 143 if (rkvdec_image_fmt_changed(ctx, image_fmt)) { 144 vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, 145 V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE); 146 if (vb2_is_busy(vq)) 147 return -EBUSY; 148 149 ctx->image_fmt = image_fmt; 150 rkvdec_reset_decoded_fmt(ctx); 151 } 152 153 return 0; 154 } 155 156 static const struct v4l2_ctrl_ops rkvdec_ctrl_ops = { 157 .try_ctrl = rkvdec_try_ctrl, 158 .s_ctrl = rkvdec_s_ctrl, 159 }; 160 161 static const struct rkvdec_ctrl_desc rkvdec_h264_ctrl_descs[] = { 162 { 163 .cfg.id = V4L2_CID_STATELESS_H264_DECODE_PARAMS, 164 }, 165 { 166 .cfg.id = V4L2_CID_STATELESS_H264_SPS, 167 .cfg.ops = &rkvdec_ctrl_ops, 168 }, 169 { 170 .cfg.id = V4L2_CID_STATELESS_H264_PPS, 171 }, 172 { 173 .cfg.id = V4L2_CID_STATELESS_H264_SCALING_MATRIX, 174 }, 175 { 176 .cfg.id = V4L2_CID_STATELESS_H264_DECODE_MODE, 177 .cfg.min = V4L2_STATELESS_H264_DECODE_MODE_FRAME_BASED, 178 .cfg.max = V4L2_STATELESS_H264_DECODE_MODE_FRAME_BASED, 179 .cfg.def = V4L2_STATELESS_H264_DECODE_MODE_FRAME_BASED, 180 }, 181 { 182 .cfg.id = V4L2_CID_STATELESS_H264_START_CODE, 183 .cfg.min = V4L2_STATELESS_H264_START_CODE_ANNEX_B, 184 .cfg.def = V4L2_STATELESS_H264_START_CODE_ANNEX_B, 185 .cfg.max = V4L2_STATELESS_H264_START_CODE_ANNEX_B, 186 }, 187 { 188 .cfg.id = V4L2_CID_MPEG_VIDEO_H264_PROFILE, 189 .cfg.min = V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE, 190 .cfg.max = V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_422_INTRA, 191 .cfg.menu_skip_mask = 192 BIT(V4L2_MPEG_VIDEO_H264_PROFILE_EXTENDED) | 193 BIT(V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_444_PREDICTIVE), 194 .cfg.def = V4L2_MPEG_VIDEO_H264_PROFILE_MAIN, 195 }, 196 { 197 .cfg.id = V4L2_CID_MPEG_VIDEO_H264_LEVEL, 198 .cfg.min = V4L2_MPEG_VIDEO_H264_LEVEL_1_0, 199 .cfg.max = V4L2_MPEG_VIDEO_H264_LEVEL_5_1, 200 }, 201 }; 202 203 static const struct rkvdec_ctrls rkvdec_h264_ctrls = { 204 .ctrls = rkvdec_h264_ctrl_descs, 205 .num_ctrls = ARRAY_SIZE(rkvdec_h264_ctrl_descs), 206 }; 207 208 static const struct rkvdec_decoded_fmt_desc rkvdec_h264_decoded_fmts[] = { 209 { 210 .fourcc = V4L2_PIX_FMT_NV12, 211 .image_fmt = RKVDEC_IMG_FMT_420_8BIT, 212 }, 213 { 214 .fourcc = V4L2_PIX_FMT_NV15, 215 .image_fmt = RKVDEC_IMG_FMT_420_10BIT, 216 }, 217 { 218 .fourcc = V4L2_PIX_FMT_NV16, 219 .image_fmt = RKVDEC_IMG_FMT_422_8BIT, 220 }, 221 { 222 .fourcc = V4L2_PIX_FMT_NV20, 223 .image_fmt = RKVDEC_IMG_FMT_422_10BIT, 224 }, 225 }; 226 227 static const struct rkvdec_ctrl_desc rkvdec_vp9_ctrl_descs[] = { 228 { 229 .cfg.id = V4L2_CID_STATELESS_VP9_FRAME, 230 }, 231 { 232 .cfg.id = V4L2_CID_STATELESS_VP9_COMPRESSED_HDR, 233 }, 234 { 235 .cfg.id = V4L2_CID_MPEG_VIDEO_VP9_PROFILE, 236 .cfg.min = V4L2_MPEG_VIDEO_VP9_PROFILE_0, 237 .cfg.max = V4L2_MPEG_VIDEO_VP9_PROFILE_0, 238 .cfg.def = V4L2_MPEG_VIDEO_VP9_PROFILE_0, 239 }, 240 }; 241 242 static const struct rkvdec_ctrls rkvdec_vp9_ctrls = { 243 .ctrls = rkvdec_vp9_ctrl_descs, 244 .num_ctrls = ARRAY_SIZE(rkvdec_vp9_ctrl_descs), 245 }; 246 247 static const struct rkvdec_decoded_fmt_desc rkvdec_vp9_decoded_fmts[] = { 248 { 249 .fourcc = V4L2_PIX_FMT_NV12, 250 .image_fmt = RKVDEC_IMG_FMT_420_8BIT, 251 }, 252 }; 253 254 static const struct rkvdec_coded_fmt_desc rkvdec_coded_fmts[] = { 255 { 256 .fourcc = V4L2_PIX_FMT_H264_SLICE, 257 .frmsize = { 258 .min_width = 64, 259 .max_width = 4096, 260 .step_width = 64, 261 .min_height = 48, 262 .max_height = 2560, 263 .step_height = 16, 264 }, 265 .ctrls = &rkvdec_h264_ctrls, 266 .ops = &rkvdec_h264_fmt_ops, 267 .num_decoded_fmts = ARRAY_SIZE(rkvdec_h264_decoded_fmts), 268 .decoded_fmts = rkvdec_h264_decoded_fmts, 269 .subsystem_flags = VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF, 270 }, 271 { 272 .fourcc = V4L2_PIX_FMT_VP9_FRAME, 273 .frmsize = { 274 .min_width = 64, 275 .max_width = 4096, 276 .step_width = 64, 277 .min_height = 64, 278 .max_height = 2304, 279 .step_height = 64, 280 }, 281 .ctrls = &rkvdec_vp9_ctrls, 282 .ops = &rkvdec_vp9_fmt_ops, 283 .num_decoded_fmts = ARRAY_SIZE(rkvdec_vp9_decoded_fmts), 284 .decoded_fmts = rkvdec_vp9_decoded_fmts, 285 } 286 }; 287 288 static const struct rkvdec_coded_fmt_desc * 289 rkvdec_find_coded_fmt_desc(u32 fourcc) 290 { 291 unsigned int i; 292 293 for (i = 0; i < ARRAY_SIZE(rkvdec_coded_fmts); i++) { 294 if (rkvdec_coded_fmts[i].fourcc == fourcc) 295 return &rkvdec_coded_fmts[i]; 296 } 297 298 return NULL; 299 } 300 301 static void rkvdec_reset_coded_fmt(struct rkvdec_ctx *ctx) 302 { 303 struct v4l2_format *f = &ctx->coded_fmt; 304 305 ctx->coded_fmt_desc = &rkvdec_coded_fmts[0]; 306 rkvdec_reset_fmt(ctx, f, ctx->coded_fmt_desc->fourcc); 307 308 f->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; 309 f->fmt.pix_mp.width = ctx->coded_fmt_desc->frmsize.min_width; 310 f->fmt.pix_mp.height = ctx->coded_fmt_desc->frmsize.min_height; 311 312 if (ctx->coded_fmt_desc->ops->adjust_fmt) 313 ctx->coded_fmt_desc->ops->adjust_fmt(ctx, f); 314 } 315 316 static int rkvdec_enum_framesizes(struct file *file, void *priv, 317 struct v4l2_frmsizeenum *fsize) 318 { 319 const struct rkvdec_coded_fmt_desc *fmt; 320 321 if (fsize->index != 0) 322 return -EINVAL; 323 324 fmt = rkvdec_find_coded_fmt_desc(fsize->pixel_format); 325 if (!fmt) 326 return -EINVAL; 327 328 fsize->type = V4L2_FRMSIZE_TYPE_CONTINUOUS; 329 fsize->stepwise.min_width = 1; 330 fsize->stepwise.max_width = fmt->frmsize.max_width; 331 fsize->stepwise.step_width = 1; 332 fsize->stepwise.min_height = 1; 333 fsize->stepwise.max_height = fmt->frmsize.max_height; 334 fsize->stepwise.step_height = 1; 335 336 return 0; 337 } 338 339 static int rkvdec_querycap(struct file *file, void *priv, 340 struct v4l2_capability *cap) 341 { 342 struct rkvdec_dev *rkvdec = video_drvdata(file); 343 struct video_device *vdev = video_devdata(file); 344 345 strscpy(cap->driver, rkvdec->dev->driver->name, 346 sizeof(cap->driver)); 347 strscpy(cap->card, vdev->name, sizeof(cap->card)); 348 snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s", 349 rkvdec->dev->driver->name); 350 return 0; 351 } 352 353 static int rkvdec_try_capture_fmt(struct file *file, void *priv, 354 struct v4l2_format *f) 355 { 356 struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp; 357 struct rkvdec_ctx *ctx = fh_to_rkvdec_ctx(priv); 358 const struct rkvdec_coded_fmt_desc *coded_desc; 359 360 /* 361 * The codec context should point to a coded format desc, if the format 362 * on the coded end has not been set yet, it should point to the 363 * default value. 364 */ 365 coded_desc = ctx->coded_fmt_desc; 366 if (WARN_ON(!coded_desc)) 367 return -EINVAL; 368 369 if (!rkvdec_is_valid_fmt(ctx, pix_mp->pixelformat, ctx->image_fmt)) 370 pix_mp->pixelformat = rkvdec_enum_decoded_fmt(ctx, 0, 371 ctx->image_fmt); 372 373 /* Always apply the frmsize constraint of the coded end. */ 374 pix_mp->width = max(pix_mp->width, ctx->coded_fmt.fmt.pix_mp.width); 375 pix_mp->height = max(pix_mp->height, ctx->coded_fmt.fmt.pix_mp.height); 376 v4l2_apply_frmsize_constraints(&pix_mp->width, 377 &pix_mp->height, 378 &coded_desc->frmsize); 379 380 rkvdec_fill_decoded_pixfmt(ctx, pix_mp); 381 pix_mp->field = V4L2_FIELD_NONE; 382 383 return 0; 384 } 385 386 static int rkvdec_try_output_fmt(struct file *file, void *priv, 387 struct v4l2_format *f) 388 { 389 struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp; 390 struct rkvdec_ctx *ctx = fh_to_rkvdec_ctx(priv); 391 const struct rkvdec_coded_fmt_desc *desc; 392 393 desc = rkvdec_find_coded_fmt_desc(pix_mp->pixelformat); 394 if (!desc) { 395 pix_mp->pixelformat = rkvdec_coded_fmts[0].fourcc; 396 desc = &rkvdec_coded_fmts[0]; 397 } 398 399 v4l2_apply_frmsize_constraints(&pix_mp->width, 400 &pix_mp->height, 401 &desc->frmsize); 402 403 pix_mp->field = V4L2_FIELD_NONE; 404 /* All coded formats are considered single planar for now. */ 405 pix_mp->num_planes = 1; 406 407 if (desc->ops->adjust_fmt) { 408 int ret; 409 410 ret = desc->ops->adjust_fmt(ctx, f); 411 if (ret) 412 return ret; 413 } 414 415 return 0; 416 } 417 418 static int rkvdec_s_capture_fmt(struct file *file, void *priv, 419 struct v4l2_format *f) 420 { 421 struct rkvdec_ctx *ctx = fh_to_rkvdec_ctx(priv); 422 struct vb2_queue *vq; 423 int ret; 424 425 /* Change not allowed if queue is busy */ 426 vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, 427 V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE); 428 if (vb2_is_busy(vq)) 429 return -EBUSY; 430 431 ret = rkvdec_try_capture_fmt(file, priv, f); 432 if (ret) 433 return ret; 434 435 ctx->decoded_fmt = *f; 436 return 0; 437 } 438 439 static int rkvdec_s_output_fmt(struct file *file, void *priv, 440 struct v4l2_format *f) 441 { 442 struct rkvdec_ctx *ctx = fh_to_rkvdec_ctx(priv); 443 struct v4l2_m2m_ctx *m2m_ctx = ctx->fh.m2m_ctx; 444 const struct rkvdec_coded_fmt_desc *desc; 445 struct v4l2_format *cap_fmt; 446 struct vb2_queue *peer_vq, *vq; 447 int ret; 448 449 /* 450 * In order to support dynamic resolution change, the decoder admits 451 * a resolution change, as long as the pixelformat remains. Can't be 452 * done if streaming. 453 */ 454 vq = v4l2_m2m_get_vq(m2m_ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE); 455 if (vb2_is_streaming(vq) || 456 (vb2_is_busy(vq) && 457 f->fmt.pix_mp.pixelformat != ctx->coded_fmt.fmt.pix_mp.pixelformat)) 458 return -EBUSY; 459 460 /* 461 * Since format change on the OUTPUT queue will reset the CAPTURE 462 * queue, we can't allow doing so when the CAPTURE queue has buffers 463 * allocated. 464 */ 465 peer_vq = v4l2_m2m_get_vq(m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE); 466 if (vb2_is_busy(peer_vq)) 467 return -EBUSY; 468 469 ret = rkvdec_try_output_fmt(file, priv, f); 470 if (ret) 471 return ret; 472 473 desc = rkvdec_find_coded_fmt_desc(f->fmt.pix_mp.pixelformat); 474 if (!desc) 475 return -EINVAL; 476 ctx->coded_fmt_desc = desc; 477 ctx->coded_fmt = *f; 478 479 /* 480 * Current decoded format might have become invalid with newly 481 * selected codec, so reset it to default just to be safe and 482 * keep internal driver state sane. User is mandated to set 483 * the decoded format again after we return, so we don't need 484 * anything smarter. 485 * 486 * Note that this will propagates any size changes to the decoded format. 487 */ 488 ctx->image_fmt = RKVDEC_IMG_FMT_ANY; 489 rkvdec_reset_decoded_fmt(ctx); 490 491 /* Propagate colorspace information to capture. */ 492 cap_fmt = &ctx->decoded_fmt; 493 cap_fmt->fmt.pix_mp.colorspace = f->fmt.pix_mp.colorspace; 494 cap_fmt->fmt.pix_mp.xfer_func = f->fmt.pix_mp.xfer_func; 495 cap_fmt->fmt.pix_mp.ycbcr_enc = f->fmt.pix_mp.ycbcr_enc; 496 cap_fmt->fmt.pix_mp.quantization = f->fmt.pix_mp.quantization; 497 498 /* Enable format specific queue features */ 499 vq->subsystem_flags |= desc->subsystem_flags; 500 501 return 0; 502 } 503 504 static int rkvdec_g_output_fmt(struct file *file, void *priv, 505 struct v4l2_format *f) 506 { 507 struct rkvdec_ctx *ctx = fh_to_rkvdec_ctx(priv); 508 509 *f = ctx->coded_fmt; 510 return 0; 511 } 512 513 static int rkvdec_g_capture_fmt(struct file *file, void *priv, 514 struct v4l2_format *f) 515 { 516 struct rkvdec_ctx *ctx = fh_to_rkvdec_ctx(priv); 517 518 *f = ctx->decoded_fmt; 519 return 0; 520 } 521 522 static int rkvdec_enum_output_fmt(struct file *file, void *priv, 523 struct v4l2_fmtdesc *f) 524 { 525 if (f->index >= ARRAY_SIZE(rkvdec_coded_fmts)) 526 return -EINVAL; 527 528 f->pixelformat = rkvdec_coded_fmts[f->index].fourcc; 529 return 0; 530 } 531 532 static int rkvdec_enum_capture_fmt(struct file *file, void *priv, 533 struct v4l2_fmtdesc *f) 534 { 535 struct rkvdec_ctx *ctx = fh_to_rkvdec_ctx(priv); 536 u32 fourcc; 537 538 fourcc = rkvdec_enum_decoded_fmt(ctx, f->index, ctx->image_fmt); 539 if (!fourcc) 540 return -EINVAL; 541 542 f->pixelformat = fourcc; 543 return 0; 544 } 545 546 static const struct v4l2_ioctl_ops rkvdec_ioctl_ops = { 547 .vidioc_querycap = rkvdec_querycap, 548 .vidioc_enum_framesizes = rkvdec_enum_framesizes, 549 550 .vidioc_try_fmt_vid_cap_mplane = rkvdec_try_capture_fmt, 551 .vidioc_try_fmt_vid_out_mplane = rkvdec_try_output_fmt, 552 .vidioc_s_fmt_vid_out_mplane = rkvdec_s_output_fmt, 553 .vidioc_s_fmt_vid_cap_mplane = rkvdec_s_capture_fmt, 554 .vidioc_g_fmt_vid_out_mplane = rkvdec_g_output_fmt, 555 .vidioc_g_fmt_vid_cap_mplane = rkvdec_g_capture_fmt, 556 .vidioc_enum_fmt_vid_out = rkvdec_enum_output_fmt, 557 .vidioc_enum_fmt_vid_cap = rkvdec_enum_capture_fmt, 558 559 .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs, 560 .vidioc_querybuf = v4l2_m2m_ioctl_querybuf, 561 .vidioc_qbuf = v4l2_m2m_ioctl_qbuf, 562 .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf, 563 .vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf, 564 .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs, 565 .vidioc_expbuf = v4l2_m2m_ioctl_expbuf, 566 567 .vidioc_subscribe_event = v4l2_ctrl_subscribe_event, 568 .vidioc_unsubscribe_event = v4l2_event_unsubscribe, 569 570 .vidioc_streamon = v4l2_m2m_ioctl_streamon, 571 .vidioc_streamoff = v4l2_m2m_ioctl_streamoff, 572 573 .vidioc_decoder_cmd = v4l2_m2m_ioctl_stateless_decoder_cmd, 574 .vidioc_try_decoder_cmd = v4l2_m2m_ioctl_stateless_try_decoder_cmd, 575 }; 576 577 static int rkvdec_queue_setup(struct vb2_queue *vq, unsigned int *num_buffers, 578 unsigned int *num_planes, unsigned int sizes[], 579 struct device *alloc_devs[]) 580 { 581 struct rkvdec_ctx *ctx = vb2_get_drv_priv(vq); 582 struct v4l2_format *f; 583 unsigned int i; 584 585 if (V4L2_TYPE_IS_OUTPUT(vq->type)) 586 f = &ctx->coded_fmt; 587 else 588 f = &ctx->decoded_fmt; 589 590 if (*num_planes) { 591 if (*num_planes != f->fmt.pix_mp.num_planes) 592 return -EINVAL; 593 594 for (i = 0; i < f->fmt.pix_mp.num_planes; i++) { 595 if (sizes[i] < f->fmt.pix_mp.plane_fmt[i].sizeimage) 596 return -EINVAL; 597 } 598 } else { 599 *num_planes = f->fmt.pix_mp.num_planes; 600 for (i = 0; i < f->fmt.pix_mp.num_planes; i++) 601 sizes[i] = f->fmt.pix_mp.plane_fmt[i].sizeimage; 602 } 603 604 return 0; 605 } 606 607 static int rkvdec_buf_prepare(struct vb2_buffer *vb) 608 { 609 struct vb2_queue *vq = vb->vb2_queue; 610 struct rkvdec_ctx *ctx = vb2_get_drv_priv(vq); 611 struct v4l2_format *f; 612 unsigned int i; 613 614 if (V4L2_TYPE_IS_OUTPUT(vq->type)) 615 f = &ctx->coded_fmt; 616 else 617 f = &ctx->decoded_fmt; 618 619 for (i = 0; i < f->fmt.pix_mp.num_planes; ++i) { 620 u32 sizeimage = f->fmt.pix_mp.plane_fmt[i].sizeimage; 621 622 if (vb2_plane_size(vb, i) < sizeimage) 623 return -EINVAL; 624 } 625 626 /* 627 * Buffer's bytesused must be written by driver for CAPTURE buffers. 628 * (for OUTPUT buffers, if userspace passes 0 bytesused, v4l2-core sets 629 * it to buffer length). 630 */ 631 if (V4L2_TYPE_IS_CAPTURE(vq->type)) 632 vb2_set_plane_payload(vb, 0, f->fmt.pix_mp.plane_fmt[0].sizeimage); 633 634 return 0; 635 } 636 637 static void rkvdec_buf_queue(struct vb2_buffer *vb) 638 { 639 struct rkvdec_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); 640 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); 641 642 v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf); 643 } 644 645 static int rkvdec_buf_out_validate(struct vb2_buffer *vb) 646 { 647 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); 648 649 vbuf->field = V4L2_FIELD_NONE; 650 return 0; 651 } 652 653 static void rkvdec_buf_request_complete(struct vb2_buffer *vb) 654 { 655 struct rkvdec_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); 656 657 v4l2_ctrl_request_complete(vb->req_obj.req, &ctx->ctrl_hdl); 658 } 659 660 static int rkvdec_start_streaming(struct vb2_queue *q, unsigned int count) 661 { 662 struct rkvdec_ctx *ctx = vb2_get_drv_priv(q); 663 const struct rkvdec_coded_fmt_desc *desc; 664 int ret; 665 666 if (V4L2_TYPE_IS_CAPTURE(q->type)) 667 return 0; 668 669 desc = ctx->coded_fmt_desc; 670 if (WARN_ON(!desc)) 671 return -EINVAL; 672 673 if (desc->ops->start) { 674 ret = desc->ops->start(ctx); 675 if (ret) 676 return ret; 677 } 678 679 return 0; 680 } 681 682 static void rkvdec_queue_cleanup(struct vb2_queue *vq, u32 state) 683 { 684 struct rkvdec_ctx *ctx = vb2_get_drv_priv(vq); 685 686 while (true) { 687 struct vb2_v4l2_buffer *vbuf; 688 689 if (V4L2_TYPE_IS_OUTPUT(vq->type)) 690 vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx); 691 else 692 vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx); 693 694 if (!vbuf) 695 break; 696 697 v4l2_ctrl_request_complete(vbuf->vb2_buf.req_obj.req, 698 &ctx->ctrl_hdl); 699 v4l2_m2m_buf_done(vbuf, state); 700 } 701 } 702 703 static void rkvdec_stop_streaming(struct vb2_queue *q) 704 { 705 struct rkvdec_ctx *ctx = vb2_get_drv_priv(q); 706 707 if (V4L2_TYPE_IS_OUTPUT(q->type)) { 708 const struct rkvdec_coded_fmt_desc *desc = ctx->coded_fmt_desc; 709 710 if (WARN_ON(!desc)) 711 return; 712 713 if (desc->ops->stop) 714 desc->ops->stop(ctx); 715 } 716 717 rkvdec_queue_cleanup(q, VB2_BUF_STATE_ERROR); 718 } 719 720 static const struct vb2_ops rkvdec_queue_ops = { 721 .queue_setup = rkvdec_queue_setup, 722 .buf_prepare = rkvdec_buf_prepare, 723 .buf_queue = rkvdec_buf_queue, 724 .buf_out_validate = rkvdec_buf_out_validate, 725 .buf_request_complete = rkvdec_buf_request_complete, 726 .start_streaming = rkvdec_start_streaming, 727 .stop_streaming = rkvdec_stop_streaming, 728 }; 729 730 static int rkvdec_request_validate(struct media_request *req) 731 { 732 unsigned int count; 733 734 count = vb2_request_buffer_cnt(req); 735 if (!count) 736 return -ENOENT; 737 else if (count > 1) 738 return -EINVAL; 739 740 return vb2_request_validate(req); 741 } 742 743 static const struct media_device_ops rkvdec_media_ops = { 744 .req_validate = rkvdec_request_validate, 745 .req_queue = v4l2_m2m_request_queue, 746 }; 747 748 static void rkvdec_job_finish_no_pm(struct rkvdec_ctx *ctx, 749 enum vb2_buffer_state result) 750 { 751 if (ctx->coded_fmt_desc->ops->done) { 752 struct vb2_v4l2_buffer *src_buf, *dst_buf; 753 754 src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx); 755 dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx); 756 ctx->coded_fmt_desc->ops->done(ctx, src_buf, dst_buf, result); 757 } 758 759 v4l2_m2m_buf_done_and_job_finish(ctx->dev->m2m_dev, ctx->fh.m2m_ctx, 760 result); 761 } 762 763 static void rkvdec_job_finish(struct rkvdec_ctx *ctx, 764 enum vb2_buffer_state result) 765 { 766 struct rkvdec_dev *rkvdec = ctx->dev; 767 768 pm_runtime_mark_last_busy(rkvdec->dev); 769 pm_runtime_put_autosuspend(rkvdec->dev); 770 rkvdec_job_finish_no_pm(ctx, result); 771 } 772 773 void rkvdec_run_preamble(struct rkvdec_ctx *ctx, struct rkvdec_run *run) 774 { 775 struct media_request *src_req; 776 777 memset(run, 0, sizeof(*run)); 778 779 run->bufs.src = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx); 780 run->bufs.dst = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx); 781 782 /* Apply request(s) controls if needed. */ 783 src_req = run->bufs.src->vb2_buf.req_obj.req; 784 if (src_req) 785 v4l2_ctrl_request_setup(src_req, &ctx->ctrl_hdl); 786 787 v4l2_m2m_buf_copy_metadata(run->bufs.src, run->bufs.dst, true); 788 } 789 790 void rkvdec_run_postamble(struct rkvdec_ctx *ctx, struct rkvdec_run *run) 791 { 792 struct media_request *src_req = run->bufs.src->vb2_buf.req_obj.req; 793 794 if (src_req) 795 v4l2_ctrl_request_complete(src_req, &ctx->ctrl_hdl); 796 } 797 798 static void rkvdec_device_run(void *priv) 799 { 800 struct rkvdec_ctx *ctx = priv; 801 struct rkvdec_dev *rkvdec = ctx->dev; 802 const struct rkvdec_coded_fmt_desc *desc = ctx->coded_fmt_desc; 803 int ret; 804 805 if (WARN_ON(!desc)) 806 return; 807 808 ret = pm_runtime_resume_and_get(rkvdec->dev); 809 if (ret < 0) { 810 rkvdec_job_finish_no_pm(ctx, VB2_BUF_STATE_ERROR); 811 return; 812 } 813 814 ret = desc->ops->run(ctx); 815 if (ret) 816 rkvdec_job_finish(ctx, VB2_BUF_STATE_ERROR); 817 } 818 819 static const struct v4l2_m2m_ops rkvdec_m2m_ops = { 820 .device_run = rkvdec_device_run, 821 }; 822 823 static int rkvdec_queue_init(void *priv, 824 struct vb2_queue *src_vq, 825 struct vb2_queue *dst_vq) 826 { 827 struct rkvdec_ctx *ctx = priv; 828 struct rkvdec_dev *rkvdec = ctx->dev; 829 int ret; 830 831 src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; 832 src_vq->io_modes = VB2_MMAP | VB2_DMABUF; 833 src_vq->drv_priv = ctx; 834 src_vq->ops = &rkvdec_queue_ops; 835 src_vq->mem_ops = &vb2_dma_contig_memops; 836 837 /* 838 * Driver does mostly sequential access, so sacrifice TLB efficiency 839 * for faster allocation. Also, no CPU access on the source queue, 840 * so no kernel mapping needed. 841 */ 842 src_vq->dma_attrs = DMA_ATTR_ALLOC_SINGLE_PAGES | 843 DMA_ATTR_NO_KERNEL_MAPPING; 844 src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer); 845 src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY; 846 src_vq->lock = &rkvdec->vdev_lock; 847 src_vq->dev = rkvdec->v4l2_dev.dev; 848 src_vq->supports_requests = true; 849 src_vq->requires_requests = true; 850 851 ret = vb2_queue_init(src_vq); 852 if (ret) 853 return ret; 854 855 dst_vq->bidirectional = true; 856 dst_vq->mem_ops = &vb2_dma_contig_memops; 857 dst_vq->dma_attrs = DMA_ATTR_ALLOC_SINGLE_PAGES | 858 DMA_ATTR_NO_KERNEL_MAPPING; 859 dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; 860 dst_vq->io_modes = VB2_MMAP | VB2_DMABUF; 861 dst_vq->drv_priv = ctx; 862 dst_vq->ops = &rkvdec_queue_ops; 863 dst_vq->buf_struct_size = sizeof(struct rkvdec_decoded_buffer); 864 dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY; 865 dst_vq->lock = &rkvdec->vdev_lock; 866 dst_vq->dev = rkvdec->v4l2_dev.dev; 867 868 return vb2_queue_init(dst_vq); 869 } 870 871 static int rkvdec_add_ctrls(struct rkvdec_ctx *ctx, 872 const struct rkvdec_ctrls *ctrls) 873 { 874 unsigned int i; 875 876 for (i = 0; i < ctrls->num_ctrls; i++) { 877 const struct v4l2_ctrl_config *cfg = &ctrls->ctrls[i].cfg; 878 879 v4l2_ctrl_new_custom(&ctx->ctrl_hdl, cfg, ctx); 880 if (ctx->ctrl_hdl.error) 881 return ctx->ctrl_hdl.error; 882 } 883 884 return 0; 885 } 886 887 static int rkvdec_init_ctrls(struct rkvdec_ctx *ctx) 888 { 889 unsigned int i, nctrls = 0; 890 int ret; 891 892 for (i = 0; i < ARRAY_SIZE(rkvdec_coded_fmts); i++) 893 nctrls += rkvdec_coded_fmts[i].ctrls->num_ctrls; 894 895 v4l2_ctrl_handler_init(&ctx->ctrl_hdl, nctrls); 896 897 for (i = 0; i < ARRAY_SIZE(rkvdec_coded_fmts); i++) { 898 ret = rkvdec_add_ctrls(ctx, rkvdec_coded_fmts[i].ctrls); 899 if (ret) 900 goto err_free_handler; 901 } 902 903 ret = v4l2_ctrl_handler_setup(&ctx->ctrl_hdl); 904 if (ret) 905 goto err_free_handler; 906 907 ctx->fh.ctrl_handler = &ctx->ctrl_hdl; 908 return 0; 909 910 err_free_handler: 911 v4l2_ctrl_handler_free(&ctx->ctrl_hdl); 912 return ret; 913 } 914 915 static int rkvdec_open(struct file *filp) 916 { 917 struct rkvdec_dev *rkvdec = video_drvdata(filp); 918 struct rkvdec_ctx *ctx; 919 int ret; 920 921 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 922 if (!ctx) 923 return -ENOMEM; 924 925 ctx->dev = rkvdec; 926 rkvdec_reset_coded_fmt(ctx); 927 rkvdec_reset_decoded_fmt(ctx); 928 v4l2_fh_init(&ctx->fh, video_devdata(filp)); 929 930 ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(rkvdec->m2m_dev, ctx, 931 rkvdec_queue_init); 932 if (IS_ERR(ctx->fh.m2m_ctx)) { 933 ret = PTR_ERR(ctx->fh.m2m_ctx); 934 goto err_free_ctx; 935 } 936 937 ret = rkvdec_init_ctrls(ctx); 938 if (ret) 939 goto err_cleanup_m2m_ctx; 940 941 filp->private_data = &ctx->fh; 942 v4l2_fh_add(&ctx->fh); 943 944 return 0; 945 946 err_cleanup_m2m_ctx: 947 v4l2_m2m_ctx_release(ctx->fh.m2m_ctx); 948 949 err_free_ctx: 950 kfree(ctx); 951 return ret; 952 } 953 954 static int rkvdec_release(struct file *filp) 955 { 956 struct rkvdec_ctx *ctx = fh_to_rkvdec_ctx(filp->private_data); 957 958 v4l2_fh_del(&ctx->fh); 959 v4l2_m2m_ctx_release(ctx->fh.m2m_ctx); 960 v4l2_ctrl_handler_free(&ctx->ctrl_hdl); 961 v4l2_fh_exit(&ctx->fh); 962 kfree(ctx); 963 964 return 0; 965 } 966 967 static const struct v4l2_file_operations rkvdec_fops = { 968 .owner = THIS_MODULE, 969 .open = rkvdec_open, 970 .release = rkvdec_release, 971 .poll = v4l2_m2m_fop_poll, 972 .unlocked_ioctl = video_ioctl2, 973 .mmap = v4l2_m2m_fop_mmap, 974 }; 975 976 static int rkvdec_v4l2_init(struct rkvdec_dev *rkvdec) 977 { 978 int ret; 979 980 ret = v4l2_device_register(rkvdec->dev, &rkvdec->v4l2_dev); 981 if (ret) { 982 dev_err(rkvdec->dev, "Failed to register V4L2 device\n"); 983 return ret; 984 } 985 986 rkvdec->m2m_dev = v4l2_m2m_init(&rkvdec_m2m_ops); 987 if (IS_ERR(rkvdec->m2m_dev)) { 988 v4l2_err(&rkvdec->v4l2_dev, "Failed to init mem2mem device\n"); 989 ret = PTR_ERR(rkvdec->m2m_dev); 990 goto err_unregister_v4l2; 991 } 992 993 rkvdec->mdev.dev = rkvdec->dev; 994 strscpy(rkvdec->mdev.model, "rkvdec", sizeof(rkvdec->mdev.model)); 995 strscpy(rkvdec->mdev.bus_info, "platform:rkvdec", 996 sizeof(rkvdec->mdev.bus_info)); 997 media_device_init(&rkvdec->mdev); 998 rkvdec->mdev.ops = &rkvdec_media_ops; 999 rkvdec->v4l2_dev.mdev = &rkvdec->mdev; 1000 1001 rkvdec->vdev.lock = &rkvdec->vdev_lock; 1002 rkvdec->vdev.v4l2_dev = &rkvdec->v4l2_dev; 1003 rkvdec->vdev.fops = &rkvdec_fops; 1004 rkvdec->vdev.release = video_device_release_empty; 1005 rkvdec->vdev.vfl_dir = VFL_DIR_M2M; 1006 rkvdec->vdev.device_caps = V4L2_CAP_STREAMING | 1007 V4L2_CAP_VIDEO_M2M_MPLANE; 1008 rkvdec->vdev.ioctl_ops = &rkvdec_ioctl_ops; 1009 video_set_drvdata(&rkvdec->vdev, rkvdec); 1010 strscpy(rkvdec->vdev.name, "rkvdec", sizeof(rkvdec->vdev.name)); 1011 1012 ret = video_register_device(&rkvdec->vdev, VFL_TYPE_VIDEO, -1); 1013 if (ret) { 1014 v4l2_err(&rkvdec->v4l2_dev, "Failed to register video device\n"); 1015 goto err_cleanup_mc; 1016 } 1017 1018 ret = v4l2_m2m_register_media_controller(rkvdec->m2m_dev, &rkvdec->vdev, 1019 MEDIA_ENT_F_PROC_VIDEO_DECODER); 1020 if (ret) { 1021 v4l2_err(&rkvdec->v4l2_dev, 1022 "Failed to initialize V4L2 M2M media controller\n"); 1023 goto err_unregister_vdev; 1024 } 1025 1026 ret = media_device_register(&rkvdec->mdev); 1027 if (ret) { 1028 v4l2_err(&rkvdec->v4l2_dev, "Failed to register media device\n"); 1029 goto err_unregister_mc; 1030 } 1031 1032 return 0; 1033 1034 err_unregister_mc: 1035 v4l2_m2m_unregister_media_controller(rkvdec->m2m_dev); 1036 1037 err_unregister_vdev: 1038 video_unregister_device(&rkvdec->vdev); 1039 1040 err_cleanup_mc: 1041 media_device_cleanup(&rkvdec->mdev); 1042 v4l2_m2m_release(rkvdec->m2m_dev); 1043 1044 err_unregister_v4l2: 1045 v4l2_device_unregister(&rkvdec->v4l2_dev); 1046 return ret; 1047 } 1048 1049 static void rkvdec_v4l2_cleanup(struct rkvdec_dev *rkvdec) 1050 { 1051 media_device_unregister(&rkvdec->mdev); 1052 v4l2_m2m_unregister_media_controller(rkvdec->m2m_dev); 1053 video_unregister_device(&rkvdec->vdev); 1054 media_device_cleanup(&rkvdec->mdev); 1055 v4l2_m2m_release(rkvdec->m2m_dev); 1056 v4l2_device_unregister(&rkvdec->v4l2_dev); 1057 } 1058 1059 static void rkvdec_iommu_restore(struct rkvdec_dev *rkvdec) 1060 { 1061 if (rkvdec->empty_domain) { 1062 /* 1063 * To rewrite mapping into the attached IOMMU core, attach a new empty domain that 1064 * will program an empty table, then detach it to restore the default domain and 1065 * all cached mappings. 1066 * This is safely done in this interrupt handler to make sure no memory get mapped 1067 * through the IOMMU while the empty domain is attached. 1068 */ 1069 iommu_attach_device(rkvdec->empty_domain, rkvdec->dev); 1070 iommu_detach_device(rkvdec->empty_domain, rkvdec->dev); 1071 } 1072 } 1073 1074 static irqreturn_t rkvdec_irq_handler(int irq, void *priv) 1075 { 1076 struct rkvdec_dev *rkvdec = priv; 1077 struct rkvdec_ctx *ctx = v4l2_m2m_get_curr_priv(rkvdec->m2m_dev); 1078 enum vb2_buffer_state state; 1079 u32 status; 1080 1081 status = readl(rkvdec->regs + RKVDEC_REG_INTERRUPT); 1082 writel(0, rkvdec->regs + RKVDEC_REG_INTERRUPT); 1083 1084 if (status & RKVDEC_RDY_STA) { 1085 state = VB2_BUF_STATE_DONE; 1086 } else { 1087 state = VB2_BUF_STATE_ERROR; 1088 if (status & RKVDEC_SOFTRESET_RDY) 1089 rkvdec_iommu_restore(rkvdec); 1090 } 1091 1092 if (cancel_delayed_work(&rkvdec->watchdog_work)) 1093 rkvdec_job_finish(ctx, state); 1094 1095 return IRQ_HANDLED; 1096 } 1097 1098 static void rkvdec_watchdog_func(struct work_struct *work) 1099 { 1100 struct rkvdec_dev *rkvdec; 1101 struct rkvdec_ctx *ctx; 1102 1103 rkvdec = container_of(to_delayed_work(work), struct rkvdec_dev, 1104 watchdog_work); 1105 ctx = v4l2_m2m_get_curr_priv(rkvdec->m2m_dev); 1106 if (ctx) { 1107 dev_err(rkvdec->dev, "Frame processing timed out!\n"); 1108 writel(RKVDEC_IRQ_DIS, rkvdec->regs + RKVDEC_REG_INTERRUPT); 1109 writel(0, rkvdec->regs + RKVDEC_REG_SYSCTRL); 1110 rkvdec_job_finish(ctx, VB2_BUF_STATE_ERROR); 1111 } 1112 } 1113 1114 static const struct of_device_id of_rkvdec_match[] = { 1115 { .compatible = "rockchip,rk3399-vdec" }, 1116 { /* sentinel */ } 1117 }; 1118 MODULE_DEVICE_TABLE(of, of_rkvdec_match); 1119 1120 static const char * const rkvdec_clk_names[] = { 1121 "axi", "ahb", "cabac", "core" 1122 }; 1123 1124 static int rkvdec_probe(struct platform_device *pdev) 1125 { 1126 struct rkvdec_dev *rkvdec; 1127 unsigned int i; 1128 int ret, irq; 1129 1130 rkvdec = devm_kzalloc(&pdev->dev, sizeof(*rkvdec), GFP_KERNEL); 1131 if (!rkvdec) 1132 return -ENOMEM; 1133 1134 platform_set_drvdata(pdev, rkvdec); 1135 rkvdec->dev = &pdev->dev; 1136 mutex_init(&rkvdec->vdev_lock); 1137 INIT_DELAYED_WORK(&rkvdec->watchdog_work, rkvdec_watchdog_func); 1138 1139 rkvdec->clocks = devm_kcalloc(&pdev->dev, ARRAY_SIZE(rkvdec_clk_names), 1140 sizeof(*rkvdec->clocks), GFP_KERNEL); 1141 if (!rkvdec->clocks) 1142 return -ENOMEM; 1143 1144 for (i = 0; i < ARRAY_SIZE(rkvdec_clk_names); i++) 1145 rkvdec->clocks[i].id = rkvdec_clk_names[i]; 1146 1147 ret = devm_clk_bulk_get(&pdev->dev, ARRAY_SIZE(rkvdec_clk_names), 1148 rkvdec->clocks); 1149 if (ret) 1150 return ret; 1151 1152 rkvdec->regs = devm_platform_ioremap_resource(pdev, 0); 1153 if (IS_ERR(rkvdec->regs)) 1154 return PTR_ERR(rkvdec->regs); 1155 1156 ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); 1157 if (ret) { 1158 dev_err(&pdev->dev, "Could not set DMA coherent mask.\n"); 1159 return ret; 1160 } 1161 1162 if (iommu_get_domain_for_dev(&pdev->dev)) { 1163 rkvdec->empty_domain = iommu_paging_domain_alloc(rkvdec->dev); 1164 1165 if (!rkvdec->empty_domain) 1166 dev_warn(rkvdec->dev, "cannot alloc new empty domain\n"); 1167 } 1168 1169 vb2_dma_contig_set_max_seg_size(&pdev->dev, DMA_BIT_MASK(32)); 1170 1171 irq = platform_get_irq(pdev, 0); 1172 if (irq <= 0) 1173 return -ENXIO; 1174 1175 ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, 1176 rkvdec_irq_handler, IRQF_ONESHOT, 1177 dev_name(&pdev->dev), rkvdec); 1178 if (ret) { 1179 dev_err(&pdev->dev, "Could not request vdec IRQ\n"); 1180 return ret; 1181 } 1182 1183 pm_runtime_set_autosuspend_delay(&pdev->dev, 100); 1184 pm_runtime_use_autosuspend(&pdev->dev); 1185 pm_runtime_enable(&pdev->dev); 1186 1187 ret = rkvdec_v4l2_init(rkvdec); 1188 if (ret) 1189 goto err_disable_runtime_pm; 1190 1191 return 0; 1192 1193 err_disable_runtime_pm: 1194 pm_runtime_dont_use_autosuspend(&pdev->dev); 1195 pm_runtime_disable(&pdev->dev); 1196 return ret; 1197 } 1198 1199 static void rkvdec_remove(struct platform_device *pdev) 1200 { 1201 struct rkvdec_dev *rkvdec = platform_get_drvdata(pdev); 1202 1203 cancel_delayed_work_sync(&rkvdec->watchdog_work); 1204 1205 rkvdec_v4l2_cleanup(rkvdec); 1206 pm_runtime_disable(&pdev->dev); 1207 pm_runtime_dont_use_autosuspend(&pdev->dev); 1208 1209 if (rkvdec->empty_domain) 1210 iommu_domain_free(rkvdec->empty_domain); 1211 } 1212 1213 #ifdef CONFIG_PM 1214 static int rkvdec_runtime_resume(struct device *dev) 1215 { 1216 struct rkvdec_dev *rkvdec = dev_get_drvdata(dev); 1217 1218 return clk_bulk_prepare_enable(ARRAY_SIZE(rkvdec_clk_names), 1219 rkvdec->clocks); 1220 } 1221 1222 static int rkvdec_runtime_suspend(struct device *dev) 1223 { 1224 struct rkvdec_dev *rkvdec = dev_get_drvdata(dev); 1225 1226 clk_bulk_disable_unprepare(ARRAY_SIZE(rkvdec_clk_names), 1227 rkvdec->clocks); 1228 return 0; 1229 } 1230 #endif 1231 1232 static const struct dev_pm_ops rkvdec_pm_ops = { 1233 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, 1234 pm_runtime_force_resume) 1235 SET_RUNTIME_PM_OPS(rkvdec_runtime_suspend, rkvdec_runtime_resume, NULL) 1236 }; 1237 1238 static struct platform_driver rkvdec_driver = { 1239 .probe = rkvdec_probe, 1240 .remove = rkvdec_remove, 1241 .driver = { 1242 .name = "rkvdec", 1243 .of_match_table = of_rkvdec_match, 1244 .pm = &rkvdec_pm_ops, 1245 }, 1246 }; 1247 module_platform_driver(rkvdec_driver); 1248 1249 MODULE_AUTHOR("Boris Brezillon <boris.brezillon@collabora.com>"); 1250 MODULE_DESCRIPTION("Rockchip Video Decoder driver"); 1251 MODULE_LICENSE("GPL v2"); 1252