1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Driver for Cadence MIPI-CSI2 RX Controller v1.3 4 * 5 * Copyright (C) 2017 Cadence Design Systems Inc. 6 */ 7 8 #include <linux/clk.h> 9 #include <linux/delay.h> 10 #include <linux/io.h> 11 #include <linux/iopoll.h> 12 #include <linux/module.h> 13 #include <linux/of.h> 14 #include <linux/of_graph.h> 15 #include <linux/phy/phy.h> 16 #include <linux/platform_device.h> 17 #include <linux/reset.h> 18 #include <linux/slab.h> 19 20 #include <media/v4l2-ctrls.h> 21 #include <media/v4l2-device.h> 22 #include <media/v4l2-fwnode.h> 23 #include <media/v4l2-subdev.h> 24 25 #define CSI2RX_DEVICE_CFG_REG 0x000 26 27 #define CSI2RX_SOFT_RESET_REG 0x004 28 #define CSI2RX_SOFT_RESET_PROTOCOL BIT(1) 29 #define CSI2RX_SOFT_RESET_FRONT BIT(0) 30 31 #define CSI2RX_STATIC_CFG_REG 0x008 32 #define CSI2RX_STATIC_CFG_DLANE_MAP(llane, plane) ((plane) << (16 + (llane) * 4)) 33 #define CSI2RX_STATIC_CFG_LANES_MASK GENMASK(11, 8) 34 35 #define CSI2RX_DPHY_LANE_CTRL_REG 0x40 36 #define CSI2RX_DPHY_CL_RST BIT(16) 37 #define CSI2RX_DPHY_DL_RST(i) BIT((i) + 12) 38 #define CSI2RX_DPHY_CL_EN BIT(4) 39 #define CSI2RX_DPHY_DL_EN(i) BIT(i) 40 41 #define CSI2RX_STREAM_BASE(n) (((n) + 1) * 0x100) 42 43 #define CSI2RX_STREAM_CTRL_REG(n) (CSI2RX_STREAM_BASE(n) + 0x000) 44 #define CSI2RX_STREAM_CTRL_SOFT_RST BIT(4) 45 #define CSI2RX_STREAM_CTRL_STOP BIT(1) 46 #define CSI2RX_STREAM_CTRL_START BIT(0) 47 48 #define CSI2RX_STREAM_STATUS_REG(n) (CSI2RX_STREAM_BASE(n) + 0x004) 49 #define CSI2RX_STREAM_STATUS_RDY BIT(31) 50 51 #define CSI2RX_STREAM_DATA_CFG_REG(n) (CSI2RX_STREAM_BASE(n) + 0x008) 52 #define CSI2RX_STREAM_DATA_CFG_VC_SELECT(n) BIT((n) + 16) 53 54 #define CSI2RX_STREAM_CFG_REG(n) (CSI2RX_STREAM_BASE(n) + 0x00c) 55 #define CSI2RX_STREAM_CFG_FIFO_MODE_LARGE_BUF (1 << 8) 56 57 #define CSI2RX_LANES_MAX 4 58 #define CSI2RX_STREAMS_MAX 4 59 60 enum csi2rx_pads { 61 CSI2RX_PAD_SINK, 62 CSI2RX_PAD_SOURCE_STREAM0, 63 CSI2RX_PAD_SOURCE_STREAM1, 64 CSI2RX_PAD_SOURCE_STREAM2, 65 CSI2RX_PAD_SOURCE_STREAM3, 66 CSI2RX_PAD_MAX, 67 }; 68 69 struct csi2rx_fmt { 70 u32 code; 71 u8 bpp; 72 }; 73 74 struct csi2rx_priv { 75 struct device *dev; 76 unsigned int count; 77 78 /* 79 * Used to prevent race conditions between multiple, 80 * concurrent calls to start and stop. 81 */ 82 struct mutex lock; 83 84 void __iomem *base; 85 struct clk *sys_clk; 86 struct clk *p_clk; 87 struct clk *pixel_clk[CSI2RX_STREAMS_MAX]; 88 struct reset_control *sys_rst; 89 struct reset_control *p_rst; 90 struct reset_control *pixel_rst[CSI2RX_STREAMS_MAX]; 91 struct phy *dphy; 92 93 u8 lanes[CSI2RX_LANES_MAX]; 94 u8 num_lanes; 95 u8 max_lanes; 96 u8 max_streams; 97 bool has_internal_dphy; 98 99 struct v4l2_subdev subdev; 100 struct v4l2_async_notifier notifier; 101 struct media_pad pads[CSI2RX_PAD_MAX]; 102 103 /* Remote source */ 104 struct v4l2_subdev *source_subdev; 105 int source_pad; 106 }; 107 108 static const struct csi2rx_fmt formats[] = { 109 { .code = MEDIA_BUS_FMT_YUYV8_1X16, .bpp = 16, }, 110 { .code = MEDIA_BUS_FMT_UYVY8_1X16, .bpp = 16, }, 111 { .code = MEDIA_BUS_FMT_YVYU8_1X16, .bpp = 16, }, 112 { .code = MEDIA_BUS_FMT_VYUY8_1X16, .bpp = 16, }, 113 { .code = MEDIA_BUS_FMT_SBGGR8_1X8, .bpp = 8, }, 114 { .code = MEDIA_BUS_FMT_SGBRG8_1X8, .bpp = 8, }, 115 { .code = MEDIA_BUS_FMT_SGRBG8_1X8, .bpp = 8, }, 116 { .code = MEDIA_BUS_FMT_SRGGB8_1X8, .bpp = 8, }, 117 { .code = MEDIA_BUS_FMT_Y8_1X8, .bpp = 8, }, 118 { .code = MEDIA_BUS_FMT_SBGGR10_1X10, .bpp = 10, }, 119 { .code = MEDIA_BUS_FMT_SGBRG10_1X10, .bpp = 10, }, 120 { .code = MEDIA_BUS_FMT_SGRBG10_1X10, .bpp = 10, }, 121 { .code = MEDIA_BUS_FMT_SRGGB10_1X10, .bpp = 10, }, 122 { .code = MEDIA_BUS_FMT_RGB565_1X16, .bpp = 16, }, 123 { .code = MEDIA_BUS_FMT_RGB888_1X24, .bpp = 24, }, 124 { .code = MEDIA_BUS_FMT_BGR888_1X24, .bpp = 24, }, 125 }; 126 127 static const struct csi2rx_fmt *csi2rx_get_fmt_by_code(u32 code) 128 { 129 unsigned int i; 130 131 for (i = 0; i < ARRAY_SIZE(formats); i++) 132 if (formats[i].code == code) 133 return &formats[i]; 134 135 return NULL; 136 } 137 138 static inline 139 struct csi2rx_priv *v4l2_subdev_to_csi2rx(struct v4l2_subdev *subdev) 140 { 141 return container_of(subdev, struct csi2rx_priv, subdev); 142 } 143 144 static void csi2rx_reset(struct csi2rx_priv *csi2rx) 145 { 146 unsigned int i; 147 148 /* Reset module */ 149 writel(CSI2RX_SOFT_RESET_PROTOCOL | CSI2RX_SOFT_RESET_FRONT, 150 csi2rx->base + CSI2RX_SOFT_RESET_REG); 151 /* Reset individual streams. */ 152 for (i = 0; i < csi2rx->max_streams; i++) { 153 writel(CSI2RX_STREAM_CTRL_SOFT_RST, 154 csi2rx->base + CSI2RX_STREAM_CTRL_REG(i)); 155 } 156 157 usleep_range(10, 20); 158 159 /* Clear resets */ 160 writel(0, csi2rx->base + CSI2RX_SOFT_RESET_REG); 161 for (i = 0; i < csi2rx->max_streams; i++) 162 writel(0, csi2rx->base + CSI2RX_STREAM_CTRL_REG(i)); 163 } 164 165 static int csi2rx_configure_ext_dphy(struct csi2rx_priv *csi2rx) 166 { 167 struct media_pad *src_pad = 168 &csi2rx->source_subdev->entity.pads[csi2rx->source_pad]; 169 union phy_configure_opts opts = { }; 170 struct phy_configure_opts_mipi_dphy *cfg = &opts.mipi_dphy; 171 struct v4l2_subdev_format sd_fmt = { 172 .which = V4L2_SUBDEV_FORMAT_ACTIVE, 173 .pad = CSI2RX_PAD_SINK, 174 }; 175 const struct csi2rx_fmt *fmt; 176 s64 link_freq; 177 int ret; 178 179 ret = v4l2_subdev_call_state_active(&csi2rx->subdev, pad, get_fmt, 180 &sd_fmt); 181 if (ret < 0) 182 return ret; 183 184 fmt = csi2rx_get_fmt_by_code(sd_fmt.format.code); 185 186 link_freq = v4l2_get_link_freq(src_pad, 187 fmt->bpp, 2 * csi2rx->num_lanes); 188 if (link_freq < 0) 189 return link_freq; 190 191 ret = phy_mipi_dphy_get_default_config_for_hsclk(link_freq, 192 csi2rx->num_lanes, cfg); 193 if (ret) 194 return ret; 195 196 ret = phy_power_on(csi2rx->dphy); 197 if (ret) 198 return ret; 199 200 ret = phy_configure(csi2rx->dphy, &opts); 201 if (ret) { 202 phy_power_off(csi2rx->dphy); 203 return ret; 204 } 205 206 return 0; 207 } 208 209 static int csi2rx_start(struct csi2rx_priv *csi2rx) 210 { 211 unsigned int i; 212 unsigned long lanes_used = 0; 213 u32 reg; 214 int ret; 215 216 ret = clk_prepare_enable(csi2rx->p_clk); 217 if (ret) 218 return ret; 219 220 reset_control_deassert(csi2rx->p_rst); 221 csi2rx_reset(csi2rx); 222 223 reg = csi2rx->num_lanes << 8; 224 for (i = 0; i < csi2rx->num_lanes; i++) { 225 reg |= CSI2RX_STATIC_CFG_DLANE_MAP(i, csi2rx->lanes[i]); 226 set_bit(csi2rx->lanes[i], &lanes_used); 227 } 228 229 /* 230 * Even the unused lanes need to be mapped. In order to avoid 231 * to map twice to the same physical lane, keep the lanes used 232 * in the previous loop, and only map unused physical lanes to 233 * the rest of our logical lanes. 234 */ 235 for (i = csi2rx->num_lanes; i < csi2rx->max_lanes; i++) { 236 unsigned int idx = find_first_zero_bit(&lanes_used, 237 csi2rx->max_lanes); 238 set_bit(idx, &lanes_used); 239 reg |= CSI2RX_STATIC_CFG_DLANE_MAP(i, i + 1); 240 } 241 242 writel(reg, csi2rx->base + CSI2RX_STATIC_CFG_REG); 243 244 /* Enable DPHY clk and data lanes. */ 245 if (csi2rx->dphy) { 246 reg = CSI2RX_DPHY_CL_EN | CSI2RX_DPHY_CL_RST; 247 for (i = 0; i < csi2rx->num_lanes; i++) { 248 reg |= CSI2RX_DPHY_DL_EN(csi2rx->lanes[i] - 1); 249 reg |= CSI2RX_DPHY_DL_RST(csi2rx->lanes[i] - 1); 250 } 251 252 writel(reg, csi2rx->base + CSI2RX_DPHY_LANE_CTRL_REG); 253 254 ret = csi2rx_configure_ext_dphy(csi2rx); 255 if (ret) { 256 dev_err(csi2rx->dev, 257 "Failed to configure external DPHY: %d\n", ret); 258 goto err_disable_pclk; 259 } 260 } 261 262 /* 263 * Create a static mapping between the CSI virtual channels 264 * and the output stream. 265 * 266 * This should be enhanced, but v4l2 lacks the support for 267 * changing that mapping dynamically. 268 * 269 * We also cannot enable and disable independent streams here, 270 * hence the reference counting. 271 */ 272 for (i = 0; i < csi2rx->max_streams; i++) { 273 ret = clk_prepare_enable(csi2rx->pixel_clk[i]); 274 if (ret) 275 goto err_disable_pixclk; 276 277 reset_control_deassert(csi2rx->pixel_rst[i]); 278 279 writel(CSI2RX_STREAM_CFG_FIFO_MODE_LARGE_BUF, 280 csi2rx->base + CSI2RX_STREAM_CFG_REG(i)); 281 282 /* 283 * Enable one virtual channel. When multiple virtual channels 284 * are supported this will have to be changed. 285 */ 286 writel(CSI2RX_STREAM_DATA_CFG_VC_SELECT(0), 287 csi2rx->base + CSI2RX_STREAM_DATA_CFG_REG(i)); 288 289 writel(CSI2RX_STREAM_CTRL_START, 290 csi2rx->base + CSI2RX_STREAM_CTRL_REG(i)); 291 } 292 293 ret = clk_prepare_enable(csi2rx->sys_clk); 294 if (ret) 295 goto err_disable_pixclk; 296 297 reset_control_deassert(csi2rx->sys_rst); 298 299 ret = v4l2_subdev_call(csi2rx->source_subdev, video, s_stream, true); 300 if (ret) 301 goto err_disable_sysclk; 302 303 clk_disable_unprepare(csi2rx->p_clk); 304 305 return 0; 306 307 err_disable_sysclk: 308 clk_disable_unprepare(csi2rx->sys_clk); 309 err_disable_pixclk: 310 for (; i > 0; i--) { 311 reset_control_assert(csi2rx->pixel_rst[i - 1]); 312 clk_disable_unprepare(csi2rx->pixel_clk[i - 1]); 313 } 314 315 if (csi2rx->dphy) { 316 writel(0, csi2rx->base + CSI2RX_DPHY_LANE_CTRL_REG); 317 phy_power_off(csi2rx->dphy); 318 } 319 err_disable_pclk: 320 clk_disable_unprepare(csi2rx->p_clk); 321 322 return ret; 323 } 324 325 static void csi2rx_stop(struct csi2rx_priv *csi2rx) 326 { 327 unsigned int i; 328 u32 val; 329 int ret; 330 331 clk_prepare_enable(csi2rx->p_clk); 332 reset_control_assert(csi2rx->sys_rst); 333 clk_disable_unprepare(csi2rx->sys_clk); 334 335 for (i = 0; i < csi2rx->max_streams; i++) { 336 writel(CSI2RX_STREAM_CTRL_STOP, 337 csi2rx->base + CSI2RX_STREAM_CTRL_REG(i)); 338 339 ret = readl_relaxed_poll_timeout(csi2rx->base + 340 CSI2RX_STREAM_STATUS_REG(i), 341 val, 342 !(val & CSI2RX_STREAM_STATUS_RDY), 343 10, 10000); 344 if (ret) 345 dev_warn(csi2rx->dev, 346 "Failed to stop streaming on pad%u\n", i); 347 348 reset_control_assert(csi2rx->pixel_rst[i]); 349 clk_disable_unprepare(csi2rx->pixel_clk[i]); 350 } 351 352 reset_control_assert(csi2rx->p_rst); 353 clk_disable_unprepare(csi2rx->p_clk); 354 355 if (v4l2_subdev_call(csi2rx->source_subdev, video, s_stream, false)) 356 dev_warn(csi2rx->dev, "Couldn't disable our subdev\n"); 357 358 if (csi2rx->dphy) { 359 writel(0, csi2rx->base + CSI2RX_DPHY_LANE_CTRL_REG); 360 361 if (phy_power_off(csi2rx->dphy)) 362 dev_warn(csi2rx->dev, "Couldn't power off DPHY\n"); 363 } 364 } 365 366 static int csi2rx_s_stream(struct v4l2_subdev *subdev, int enable) 367 { 368 struct csi2rx_priv *csi2rx = v4l2_subdev_to_csi2rx(subdev); 369 int ret = 0; 370 371 mutex_lock(&csi2rx->lock); 372 373 if (enable) { 374 /* 375 * If we're not the first users, there's no need to 376 * enable the whole controller. 377 */ 378 if (!csi2rx->count) { 379 ret = csi2rx_start(csi2rx); 380 if (ret) 381 goto out; 382 } 383 384 csi2rx->count++; 385 } else { 386 csi2rx->count--; 387 388 /* 389 * Let the last user turn off the lights. 390 */ 391 if (!csi2rx->count) 392 csi2rx_stop(csi2rx); 393 } 394 395 out: 396 mutex_unlock(&csi2rx->lock); 397 return ret; 398 } 399 400 static int csi2rx_enum_mbus_code(struct v4l2_subdev *subdev, 401 struct v4l2_subdev_state *state, 402 struct v4l2_subdev_mbus_code_enum *code_enum) 403 { 404 if (code_enum->index >= ARRAY_SIZE(formats)) 405 return -EINVAL; 406 407 code_enum->code = formats[code_enum->index].code; 408 409 return 0; 410 } 411 412 static int csi2rx_set_fmt(struct v4l2_subdev *subdev, 413 struct v4l2_subdev_state *state, 414 struct v4l2_subdev_format *format) 415 { 416 struct v4l2_mbus_framefmt *fmt; 417 unsigned int i; 418 419 /* No transcoding, source and sink formats must match. */ 420 if (format->pad != CSI2RX_PAD_SINK) 421 return v4l2_subdev_get_fmt(subdev, state, format); 422 423 if (!csi2rx_get_fmt_by_code(format->format.code)) 424 format->format.code = formats[0].code; 425 426 format->format.field = V4L2_FIELD_NONE; 427 428 /* Set sink format */ 429 fmt = v4l2_subdev_state_get_format(state, format->pad); 430 *fmt = format->format; 431 432 /* Propagate to source formats */ 433 for (i = CSI2RX_PAD_SOURCE_STREAM0; i < CSI2RX_PAD_MAX; i++) { 434 fmt = v4l2_subdev_state_get_format(state, i); 435 *fmt = format->format; 436 } 437 438 return 0; 439 } 440 441 static int csi2rx_init_state(struct v4l2_subdev *subdev, 442 struct v4l2_subdev_state *state) 443 { 444 struct v4l2_subdev_format format = { 445 .pad = CSI2RX_PAD_SINK, 446 .format = { 447 .width = 640, 448 .height = 480, 449 .code = MEDIA_BUS_FMT_UYVY8_1X16, 450 .field = V4L2_FIELD_NONE, 451 .colorspace = V4L2_COLORSPACE_SRGB, 452 .ycbcr_enc = V4L2_YCBCR_ENC_601, 453 .quantization = V4L2_QUANTIZATION_LIM_RANGE, 454 .xfer_func = V4L2_XFER_FUNC_SRGB, 455 }, 456 }; 457 458 return csi2rx_set_fmt(subdev, state, &format); 459 } 460 461 static const struct v4l2_subdev_pad_ops csi2rx_pad_ops = { 462 .enum_mbus_code = csi2rx_enum_mbus_code, 463 .get_fmt = v4l2_subdev_get_fmt, 464 .set_fmt = csi2rx_set_fmt, 465 }; 466 467 static const struct v4l2_subdev_video_ops csi2rx_video_ops = { 468 .s_stream = csi2rx_s_stream, 469 }; 470 471 static const struct v4l2_subdev_ops csi2rx_subdev_ops = { 472 .video = &csi2rx_video_ops, 473 .pad = &csi2rx_pad_ops, 474 }; 475 476 static const struct v4l2_subdev_internal_ops csi2rx_internal_ops = { 477 .init_state = csi2rx_init_state, 478 }; 479 480 static const struct media_entity_operations csi2rx_media_ops = { 481 .link_validate = v4l2_subdev_link_validate, 482 }; 483 484 static int csi2rx_async_bound(struct v4l2_async_notifier *notifier, 485 struct v4l2_subdev *s_subdev, 486 struct v4l2_async_connection *asd) 487 { 488 struct v4l2_subdev *subdev = notifier->sd; 489 struct csi2rx_priv *csi2rx = v4l2_subdev_to_csi2rx(subdev); 490 491 csi2rx->source_pad = media_entity_get_fwnode_pad(&s_subdev->entity, 492 asd->match.fwnode, 493 MEDIA_PAD_FL_SOURCE); 494 if (csi2rx->source_pad < 0) { 495 dev_err(csi2rx->dev, "Couldn't find output pad for subdev %s\n", 496 s_subdev->name); 497 return csi2rx->source_pad; 498 } 499 500 csi2rx->source_subdev = s_subdev; 501 502 dev_dbg(csi2rx->dev, "Bound %s pad: %d\n", s_subdev->name, 503 csi2rx->source_pad); 504 505 return media_create_pad_link(&csi2rx->source_subdev->entity, 506 csi2rx->source_pad, 507 &csi2rx->subdev.entity, 0, 508 MEDIA_LNK_FL_ENABLED | 509 MEDIA_LNK_FL_IMMUTABLE); 510 } 511 512 static const struct v4l2_async_notifier_operations csi2rx_notifier_ops = { 513 .bound = csi2rx_async_bound, 514 }; 515 516 static int csi2rx_get_resources(struct csi2rx_priv *csi2rx, 517 struct platform_device *pdev) 518 { 519 unsigned char i; 520 u32 dev_cfg; 521 int ret; 522 523 csi2rx->base = devm_platform_ioremap_resource(pdev, 0); 524 if (IS_ERR(csi2rx->base)) 525 return PTR_ERR(csi2rx->base); 526 527 csi2rx->sys_clk = devm_clk_get(&pdev->dev, "sys_clk"); 528 if (IS_ERR(csi2rx->sys_clk)) { 529 dev_err(&pdev->dev, "Couldn't get sys clock\n"); 530 return PTR_ERR(csi2rx->sys_clk); 531 } 532 533 csi2rx->p_clk = devm_clk_get(&pdev->dev, "p_clk"); 534 if (IS_ERR(csi2rx->p_clk)) { 535 dev_err(&pdev->dev, "Couldn't get P clock\n"); 536 return PTR_ERR(csi2rx->p_clk); 537 } 538 539 csi2rx->sys_rst = devm_reset_control_get_optional_exclusive(&pdev->dev, 540 "sys"); 541 if (IS_ERR(csi2rx->sys_rst)) 542 return PTR_ERR(csi2rx->sys_rst); 543 544 csi2rx->p_rst = devm_reset_control_get_optional_exclusive(&pdev->dev, 545 "reg_bank"); 546 if (IS_ERR(csi2rx->p_rst)) 547 return PTR_ERR(csi2rx->p_rst); 548 549 csi2rx->dphy = devm_phy_optional_get(&pdev->dev, "dphy"); 550 if (IS_ERR(csi2rx->dphy)) { 551 dev_err(&pdev->dev, "Couldn't get external D-PHY\n"); 552 return PTR_ERR(csi2rx->dphy); 553 } 554 555 ret = clk_prepare_enable(csi2rx->p_clk); 556 if (ret) { 557 dev_err(&pdev->dev, "Couldn't prepare and enable P clock\n"); 558 return ret; 559 } 560 561 dev_cfg = readl(csi2rx->base + CSI2RX_DEVICE_CFG_REG); 562 clk_disable_unprepare(csi2rx->p_clk); 563 564 csi2rx->max_lanes = dev_cfg & 7; 565 if (csi2rx->max_lanes > CSI2RX_LANES_MAX) { 566 dev_err(&pdev->dev, "Invalid number of lanes: %u\n", 567 csi2rx->max_lanes); 568 return -EINVAL; 569 } 570 571 csi2rx->max_streams = (dev_cfg >> 4) & 7; 572 if (csi2rx->max_streams > CSI2RX_STREAMS_MAX) { 573 dev_err(&pdev->dev, "Invalid number of streams: %u\n", 574 csi2rx->max_streams); 575 return -EINVAL; 576 } 577 578 csi2rx->has_internal_dphy = dev_cfg & BIT(3) ? true : false; 579 580 /* 581 * FIXME: Once we'll have internal D-PHY support, the check 582 * will need to be removed. 583 */ 584 if (!csi2rx->dphy && csi2rx->has_internal_dphy) { 585 dev_err(&pdev->dev, "Internal D-PHY not supported yet\n"); 586 return -EINVAL; 587 } 588 589 for (i = 0; i < csi2rx->max_streams; i++) { 590 char name[16]; 591 592 snprintf(name, sizeof(name), "pixel_if%u_clk", i); 593 csi2rx->pixel_clk[i] = devm_clk_get(&pdev->dev, name); 594 if (IS_ERR(csi2rx->pixel_clk[i])) { 595 dev_err(&pdev->dev, "Couldn't get clock %s\n", name); 596 return PTR_ERR(csi2rx->pixel_clk[i]); 597 } 598 599 snprintf(name, sizeof(name), "pixel_if%u", i); 600 csi2rx->pixel_rst[i] = 601 devm_reset_control_get_optional_exclusive(&pdev->dev, 602 name); 603 if (IS_ERR(csi2rx->pixel_rst[i])) 604 return PTR_ERR(csi2rx->pixel_rst[i]); 605 } 606 607 return 0; 608 } 609 610 static int csi2rx_parse_dt(struct csi2rx_priv *csi2rx) 611 { 612 struct v4l2_fwnode_endpoint v4l2_ep = { .bus_type = 0 }; 613 struct v4l2_async_connection *asd; 614 struct fwnode_handle *fwh; 615 struct device_node *ep; 616 int ret; 617 618 ep = of_graph_get_endpoint_by_regs(csi2rx->dev->of_node, 0, 0); 619 if (!ep) 620 return -EINVAL; 621 622 fwh = of_fwnode_handle(ep); 623 ret = v4l2_fwnode_endpoint_parse(fwh, &v4l2_ep); 624 if (ret) { 625 dev_err(csi2rx->dev, "Could not parse v4l2 endpoint\n"); 626 of_node_put(ep); 627 return ret; 628 } 629 630 if (v4l2_ep.bus_type != V4L2_MBUS_CSI2_DPHY) { 631 dev_err(csi2rx->dev, "Unsupported media bus type: 0x%x\n", 632 v4l2_ep.bus_type); 633 of_node_put(ep); 634 return -EINVAL; 635 } 636 637 memcpy(csi2rx->lanes, v4l2_ep.bus.mipi_csi2.data_lanes, 638 sizeof(csi2rx->lanes)); 639 csi2rx->num_lanes = v4l2_ep.bus.mipi_csi2.num_data_lanes; 640 if (csi2rx->num_lanes > csi2rx->max_lanes) { 641 dev_err(csi2rx->dev, "Unsupported number of data-lanes: %d\n", 642 csi2rx->num_lanes); 643 of_node_put(ep); 644 return -EINVAL; 645 } 646 647 v4l2_async_subdev_nf_init(&csi2rx->notifier, &csi2rx->subdev); 648 649 asd = v4l2_async_nf_add_fwnode_remote(&csi2rx->notifier, fwh, 650 struct v4l2_async_connection); 651 of_node_put(ep); 652 if (IS_ERR(asd)) { 653 v4l2_async_nf_cleanup(&csi2rx->notifier); 654 return PTR_ERR(asd); 655 } 656 657 csi2rx->notifier.ops = &csi2rx_notifier_ops; 658 659 ret = v4l2_async_nf_register(&csi2rx->notifier); 660 if (ret) 661 v4l2_async_nf_cleanup(&csi2rx->notifier); 662 663 return ret; 664 } 665 666 static int csi2rx_probe(struct platform_device *pdev) 667 { 668 struct csi2rx_priv *csi2rx; 669 unsigned int i; 670 int ret; 671 672 csi2rx = kzalloc(sizeof(*csi2rx), GFP_KERNEL); 673 if (!csi2rx) 674 return -ENOMEM; 675 platform_set_drvdata(pdev, csi2rx); 676 csi2rx->dev = &pdev->dev; 677 mutex_init(&csi2rx->lock); 678 679 ret = csi2rx_get_resources(csi2rx, pdev); 680 if (ret) 681 goto err_free_priv; 682 683 ret = csi2rx_parse_dt(csi2rx); 684 if (ret) 685 goto err_free_priv; 686 687 csi2rx->subdev.owner = THIS_MODULE; 688 csi2rx->subdev.dev = &pdev->dev; 689 v4l2_subdev_init(&csi2rx->subdev, &csi2rx_subdev_ops); 690 csi2rx->subdev.internal_ops = &csi2rx_internal_ops; 691 v4l2_set_subdevdata(&csi2rx->subdev, &pdev->dev); 692 snprintf(csi2rx->subdev.name, sizeof(csi2rx->subdev.name), 693 "%s.%s", KBUILD_MODNAME, dev_name(&pdev->dev)); 694 695 /* Create our media pads */ 696 csi2rx->subdev.entity.function = MEDIA_ENT_F_VID_IF_BRIDGE; 697 csi2rx->pads[CSI2RX_PAD_SINK].flags = MEDIA_PAD_FL_SINK; 698 for (i = CSI2RX_PAD_SOURCE_STREAM0; i < CSI2RX_PAD_MAX; i++) 699 csi2rx->pads[i].flags = MEDIA_PAD_FL_SOURCE; 700 csi2rx->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; 701 csi2rx->subdev.entity.ops = &csi2rx_media_ops; 702 703 ret = media_entity_pads_init(&csi2rx->subdev.entity, CSI2RX_PAD_MAX, 704 csi2rx->pads); 705 if (ret) 706 goto err_cleanup; 707 708 ret = v4l2_subdev_init_finalize(&csi2rx->subdev); 709 if (ret) 710 goto err_cleanup; 711 712 ret = v4l2_async_register_subdev(&csi2rx->subdev); 713 if (ret < 0) 714 goto err_free_state; 715 716 dev_info(&pdev->dev, 717 "Probed CSI2RX with %u/%u lanes, %u streams, %s D-PHY\n", 718 csi2rx->num_lanes, csi2rx->max_lanes, csi2rx->max_streams, 719 csi2rx->dphy ? "external" : 720 csi2rx->has_internal_dphy ? "internal" : "no"); 721 722 return 0; 723 724 err_free_state: 725 v4l2_subdev_cleanup(&csi2rx->subdev); 726 err_cleanup: 727 v4l2_async_nf_unregister(&csi2rx->notifier); 728 v4l2_async_nf_cleanup(&csi2rx->notifier); 729 media_entity_cleanup(&csi2rx->subdev.entity); 730 err_free_priv: 731 kfree(csi2rx); 732 return ret; 733 } 734 735 static void csi2rx_remove(struct platform_device *pdev) 736 { 737 struct csi2rx_priv *csi2rx = platform_get_drvdata(pdev); 738 739 v4l2_async_nf_unregister(&csi2rx->notifier); 740 v4l2_async_nf_cleanup(&csi2rx->notifier); 741 v4l2_async_unregister_subdev(&csi2rx->subdev); 742 v4l2_subdev_cleanup(&csi2rx->subdev); 743 media_entity_cleanup(&csi2rx->subdev.entity); 744 kfree(csi2rx); 745 } 746 747 static const struct of_device_id csi2rx_of_table[] = { 748 { .compatible = "starfive,jh7110-csi2rx" }, 749 { .compatible = "cdns,csi2rx" }, 750 { }, 751 }; 752 MODULE_DEVICE_TABLE(of, csi2rx_of_table); 753 754 static struct platform_driver csi2rx_driver = { 755 .probe = csi2rx_probe, 756 .remove = csi2rx_remove, 757 758 .driver = { 759 .name = "cdns-csi2rx", 760 .of_match_table = csi2rx_of_table, 761 }, 762 }; 763 module_platform_driver(csi2rx_driver); 764 MODULE_AUTHOR("Maxime Ripard <maxime.ripard@bootlin.com>"); 765 MODULE_DESCRIPTION("Cadence CSI2-RX controller"); 766 MODULE_LICENSE("GPL"); 767