1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Driver for Cadence MIPI-CSI2 RX Controller v1.3 4 * 5 * Copyright (C) 2017 Cadence Design Systems Inc. 6 */ 7 8 #include <linux/clk.h> 9 #include <linux/delay.h> 10 #include <linux/io.h> 11 #include <linux/iopoll.h> 12 #include <linux/module.h> 13 #include <linux/of.h> 14 #include <linux/of_graph.h> 15 #include <linux/phy/phy.h> 16 #include <linux/platform_device.h> 17 #include <linux/reset.h> 18 #include <linux/slab.h> 19 20 #include <media/v4l2-ctrls.h> 21 #include <media/v4l2-device.h> 22 #include <media/v4l2-fwnode.h> 23 #include <media/v4l2-subdev.h> 24 25 #define CSI2RX_DEVICE_CFG_REG 0x000 26 27 #define CSI2RX_SOFT_RESET_REG 0x004 28 #define CSI2RX_SOFT_RESET_PROTOCOL BIT(1) 29 #define CSI2RX_SOFT_RESET_FRONT BIT(0) 30 31 #define CSI2RX_STATIC_CFG_REG 0x008 32 #define CSI2RX_STATIC_CFG_DLANE_MAP(llane, plane) ((plane) << (16 + (llane) * 4)) 33 #define CSI2RX_STATIC_CFG_LANES_MASK GENMASK(11, 8) 34 35 #define CSI2RX_DPHY_LANE_CTRL_REG 0x40 36 #define CSI2RX_DPHY_CL_RST BIT(16) 37 #define CSI2RX_DPHY_DL_RST(i) BIT((i) + 12) 38 #define CSI2RX_DPHY_CL_EN BIT(4) 39 #define CSI2RX_DPHY_DL_EN(i) BIT(i) 40 41 #define CSI2RX_STREAM_BASE(n) (((n) + 1) * 0x100) 42 43 #define CSI2RX_STREAM_CTRL_REG(n) (CSI2RX_STREAM_BASE(n) + 0x000) 44 #define CSI2RX_STREAM_CTRL_SOFT_RST BIT(4) 45 #define CSI2RX_STREAM_CTRL_STOP BIT(1) 46 #define CSI2RX_STREAM_CTRL_START BIT(0) 47 48 #define CSI2RX_STREAM_STATUS_REG(n) (CSI2RX_STREAM_BASE(n) + 0x004) 49 #define CSI2RX_STREAM_STATUS_RDY BIT(31) 50 51 #define CSI2RX_STREAM_DATA_CFG_REG(n) (CSI2RX_STREAM_BASE(n) + 0x008) 52 #define CSI2RX_STREAM_DATA_CFG_VC_SELECT(n) BIT((n) + 16) 53 54 #define CSI2RX_STREAM_CFG_REG(n) (CSI2RX_STREAM_BASE(n) + 0x00c) 55 #define CSI2RX_STREAM_CFG_FIFO_MODE_LARGE_BUF (1 << 8) 56 57 #define CSI2RX_LANES_MAX 4 58 #define CSI2RX_STREAMS_MAX 4 59 60 enum csi2rx_pads { 61 CSI2RX_PAD_SINK, 62 CSI2RX_PAD_SOURCE_STREAM0, 63 CSI2RX_PAD_SOURCE_STREAM1, 64 CSI2RX_PAD_SOURCE_STREAM2, 65 CSI2RX_PAD_SOURCE_STREAM3, 66 CSI2RX_PAD_MAX, 67 }; 68 69 struct csi2rx_fmt { 70 u32 code; 71 u8 bpp; 72 }; 73 74 struct csi2rx_priv { 75 struct device *dev; 76 unsigned int count; 77 78 /* 79 * Used to prevent race conditions between multiple, 80 * concurrent calls to start and stop. 81 */ 82 struct mutex lock; 83 84 void __iomem *base; 85 struct clk *sys_clk; 86 struct clk *p_clk; 87 struct clk *pixel_clk[CSI2RX_STREAMS_MAX]; 88 struct reset_control *sys_rst; 89 struct reset_control *p_rst; 90 struct reset_control *pixel_rst[CSI2RX_STREAMS_MAX]; 91 struct phy *dphy; 92 93 u8 lanes[CSI2RX_LANES_MAX]; 94 u8 num_lanes; 95 u8 max_lanes; 96 u8 max_streams; 97 bool has_internal_dphy; 98 99 struct v4l2_subdev subdev; 100 struct v4l2_async_notifier notifier; 101 struct media_pad pads[CSI2RX_PAD_MAX]; 102 103 /* Remote source */ 104 struct v4l2_subdev *source_subdev; 105 int source_pad; 106 }; 107 108 static const struct csi2rx_fmt formats[] = { 109 { .code = MEDIA_BUS_FMT_YUYV8_1X16, .bpp = 16, }, 110 { .code = MEDIA_BUS_FMT_UYVY8_1X16, .bpp = 16, }, 111 { .code = MEDIA_BUS_FMT_YVYU8_1X16, .bpp = 16, }, 112 { .code = MEDIA_BUS_FMT_VYUY8_1X16, .bpp = 16, }, 113 { .code = MEDIA_BUS_FMT_SBGGR8_1X8, .bpp = 8, }, 114 { .code = MEDIA_BUS_FMT_SGBRG8_1X8, .bpp = 8, }, 115 { .code = MEDIA_BUS_FMT_SGRBG8_1X8, .bpp = 8, }, 116 { .code = MEDIA_BUS_FMT_SRGGB8_1X8, .bpp = 8, }, 117 { .code = MEDIA_BUS_FMT_Y8_1X8, .bpp = 8, }, 118 { .code = MEDIA_BUS_FMT_SBGGR10_1X10, .bpp = 10, }, 119 { .code = MEDIA_BUS_FMT_SGBRG10_1X10, .bpp = 10, }, 120 { .code = MEDIA_BUS_FMT_SGRBG10_1X10, .bpp = 10, }, 121 { .code = MEDIA_BUS_FMT_SRGGB10_1X10, .bpp = 10, }, 122 { .code = MEDIA_BUS_FMT_RGB565_1X16, .bpp = 16, }, 123 { .code = MEDIA_BUS_FMT_RGB888_1X24, .bpp = 24, }, 124 { .code = MEDIA_BUS_FMT_BGR888_1X24, .bpp = 24, }, 125 }; 126 127 static const struct csi2rx_fmt *csi2rx_get_fmt_by_code(u32 code) 128 { 129 unsigned int i; 130 131 for (i = 0; i < ARRAY_SIZE(formats); i++) 132 if (formats[i].code == code) 133 return &formats[i]; 134 135 return NULL; 136 } 137 138 static inline 139 struct csi2rx_priv *v4l2_subdev_to_csi2rx(struct v4l2_subdev *subdev) 140 { 141 return container_of(subdev, struct csi2rx_priv, subdev); 142 } 143 144 static void csi2rx_reset(struct csi2rx_priv *csi2rx) 145 { 146 unsigned int i; 147 148 /* Reset module */ 149 writel(CSI2RX_SOFT_RESET_PROTOCOL | CSI2RX_SOFT_RESET_FRONT, 150 csi2rx->base + CSI2RX_SOFT_RESET_REG); 151 /* Reset individual streams. */ 152 for (i = 0; i < csi2rx->max_streams; i++) { 153 writel(CSI2RX_STREAM_CTRL_SOFT_RST, 154 csi2rx->base + CSI2RX_STREAM_CTRL_REG(i)); 155 } 156 157 usleep_range(10, 20); 158 159 /* Clear resets */ 160 writel(0, csi2rx->base + CSI2RX_SOFT_RESET_REG); 161 for (i = 0; i < csi2rx->max_streams; i++) 162 writel(0, csi2rx->base + CSI2RX_STREAM_CTRL_REG(i)); 163 } 164 165 static int csi2rx_configure_ext_dphy(struct csi2rx_priv *csi2rx) 166 { 167 union phy_configure_opts opts = { }; 168 struct phy_configure_opts_mipi_dphy *cfg = &opts.mipi_dphy; 169 struct v4l2_subdev_format sd_fmt = { 170 .which = V4L2_SUBDEV_FORMAT_ACTIVE, 171 .pad = CSI2RX_PAD_SINK, 172 }; 173 const struct csi2rx_fmt *fmt; 174 s64 link_freq; 175 int ret; 176 177 ret = v4l2_subdev_call_state_active(&csi2rx->subdev, pad, get_fmt, 178 &sd_fmt); 179 if (ret < 0) 180 return ret; 181 182 fmt = csi2rx_get_fmt_by_code(sd_fmt.format.code); 183 184 link_freq = v4l2_get_link_freq(csi2rx->source_subdev->ctrl_handler, 185 fmt->bpp, 2 * csi2rx->num_lanes); 186 if (link_freq < 0) 187 return link_freq; 188 189 ret = phy_mipi_dphy_get_default_config_for_hsclk(link_freq, 190 csi2rx->num_lanes, cfg); 191 if (ret) 192 return ret; 193 194 ret = phy_power_on(csi2rx->dphy); 195 if (ret) 196 return ret; 197 198 ret = phy_configure(csi2rx->dphy, &opts); 199 if (ret) { 200 phy_power_off(csi2rx->dphy); 201 return ret; 202 } 203 204 return 0; 205 } 206 207 static int csi2rx_start(struct csi2rx_priv *csi2rx) 208 { 209 unsigned int i; 210 unsigned long lanes_used = 0; 211 u32 reg; 212 int ret; 213 214 ret = clk_prepare_enable(csi2rx->p_clk); 215 if (ret) 216 return ret; 217 218 reset_control_deassert(csi2rx->p_rst); 219 csi2rx_reset(csi2rx); 220 221 reg = csi2rx->num_lanes << 8; 222 for (i = 0; i < csi2rx->num_lanes; i++) { 223 reg |= CSI2RX_STATIC_CFG_DLANE_MAP(i, csi2rx->lanes[i]); 224 set_bit(csi2rx->lanes[i], &lanes_used); 225 } 226 227 /* 228 * Even the unused lanes need to be mapped. In order to avoid 229 * to map twice to the same physical lane, keep the lanes used 230 * in the previous loop, and only map unused physical lanes to 231 * the rest of our logical lanes. 232 */ 233 for (i = csi2rx->num_lanes; i < csi2rx->max_lanes; i++) { 234 unsigned int idx = find_first_zero_bit(&lanes_used, 235 csi2rx->max_lanes); 236 set_bit(idx, &lanes_used); 237 reg |= CSI2RX_STATIC_CFG_DLANE_MAP(i, i + 1); 238 } 239 240 writel(reg, csi2rx->base + CSI2RX_STATIC_CFG_REG); 241 242 /* Enable DPHY clk and data lanes. */ 243 if (csi2rx->dphy) { 244 reg = CSI2RX_DPHY_CL_EN | CSI2RX_DPHY_CL_RST; 245 for (i = 0; i < csi2rx->num_lanes; i++) { 246 reg |= CSI2RX_DPHY_DL_EN(csi2rx->lanes[i] - 1); 247 reg |= CSI2RX_DPHY_DL_RST(csi2rx->lanes[i] - 1); 248 } 249 250 writel(reg, csi2rx->base + CSI2RX_DPHY_LANE_CTRL_REG); 251 252 ret = csi2rx_configure_ext_dphy(csi2rx); 253 if (ret) { 254 dev_err(csi2rx->dev, 255 "Failed to configure external DPHY: %d\n", ret); 256 goto err_disable_pclk; 257 } 258 } 259 260 /* 261 * Create a static mapping between the CSI virtual channels 262 * and the output stream. 263 * 264 * This should be enhanced, but v4l2 lacks the support for 265 * changing that mapping dynamically. 266 * 267 * We also cannot enable and disable independent streams here, 268 * hence the reference counting. 269 */ 270 for (i = 0; i < csi2rx->max_streams; i++) { 271 ret = clk_prepare_enable(csi2rx->pixel_clk[i]); 272 if (ret) 273 goto err_disable_pixclk; 274 275 reset_control_deassert(csi2rx->pixel_rst[i]); 276 277 writel(CSI2RX_STREAM_CFG_FIFO_MODE_LARGE_BUF, 278 csi2rx->base + CSI2RX_STREAM_CFG_REG(i)); 279 280 /* 281 * Enable one virtual channel. When multiple virtual channels 282 * are supported this will have to be changed. 283 */ 284 writel(CSI2RX_STREAM_DATA_CFG_VC_SELECT(0), 285 csi2rx->base + CSI2RX_STREAM_DATA_CFG_REG(i)); 286 287 writel(CSI2RX_STREAM_CTRL_START, 288 csi2rx->base + CSI2RX_STREAM_CTRL_REG(i)); 289 } 290 291 ret = clk_prepare_enable(csi2rx->sys_clk); 292 if (ret) 293 goto err_disable_pixclk; 294 295 reset_control_deassert(csi2rx->sys_rst); 296 297 ret = v4l2_subdev_call(csi2rx->source_subdev, video, s_stream, true); 298 if (ret) 299 goto err_disable_sysclk; 300 301 clk_disable_unprepare(csi2rx->p_clk); 302 303 return 0; 304 305 err_disable_sysclk: 306 clk_disable_unprepare(csi2rx->sys_clk); 307 err_disable_pixclk: 308 for (; i > 0; i--) { 309 reset_control_assert(csi2rx->pixel_rst[i - 1]); 310 clk_disable_unprepare(csi2rx->pixel_clk[i - 1]); 311 } 312 313 if (csi2rx->dphy) { 314 writel(0, csi2rx->base + CSI2RX_DPHY_LANE_CTRL_REG); 315 phy_power_off(csi2rx->dphy); 316 } 317 err_disable_pclk: 318 clk_disable_unprepare(csi2rx->p_clk); 319 320 return ret; 321 } 322 323 static void csi2rx_stop(struct csi2rx_priv *csi2rx) 324 { 325 unsigned int i; 326 u32 val; 327 int ret; 328 329 clk_prepare_enable(csi2rx->p_clk); 330 reset_control_assert(csi2rx->sys_rst); 331 clk_disable_unprepare(csi2rx->sys_clk); 332 333 for (i = 0; i < csi2rx->max_streams; i++) { 334 writel(CSI2RX_STREAM_CTRL_STOP, 335 csi2rx->base + CSI2RX_STREAM_CTRL_REG(i)); 336 337 ret = readl_relaxed_poll_timeout(csi2rx->base + 338 CSI2RX_STREAM_STATUS_REG(i), 339 val, 340 !(val & CSI2RX_STREAM_STATUS_RDY), 341 10, 10000); 342 if (ret) 343 dev_warn(csi2rx->dev, 344 "Failed to stop streaming on pad%u\n", i); 345 346 reset_control_assert(csi2rx->pixel_rst[i]); 347 clk_disable_unprepare(csi2rx->pixel_clk[i]); 348 } 349 350 reset_control_assert(csi2rx->p_rst); 351 clk_disable_unprepare(csi2rx->p_clk); 352 353 if (v4l2_subdev_call(csi2rx->source_subdev, video, s_stream, false)) 354 dev_warn(csi2rx->dev, "Couldn't disable our subdev\n"); 355 356 if (csi2rx->dphy) { 357 writel(0, csi2rx->base + CSI2RX_DPHY_LANE_CTRL_REG); 358 359 if (phy_power_off(csi2rx->dphy)) 360 dev_warn(csi2rx->dev, "Couldn't power off DPHY\n"); 361 } 362 } 363 364 static int csi2rx_s_stream(struct v4l2_subdev *subdev, int enable) 365 { 366 struct csi2rx_priv *csi2rx = v4l2_subdev_to_csi2rx(subdev); 367 int ret = 0; 368 369 mutex_lock(&csi2rx->lock); 370 371 if (enable) { 372 /* 373 * If we're not the first users, there's no need to 374 * enable the whole controller. 375 */ 376 if (!csi2rx->count) { 377 ret = csi2rx_start(csi2rx); 378 if (ret) 379 goto out; 380 } 381 382 csi2rx->count++; 383 } else { 384 csi2rx->count--; 385 386 /* 387 * Let the last user turn off the lights. 388 */ 389 if (!csi2rx->count) 390 csi2rx_stop(csi2rx); 391 } 392 393 out: 394 mutex_unlock(&csi2rx->lock); 395 return ret; 396 } 397 398 static int csi2rx_enum_mbus_code(struct v4l2_subdev *subdev, 399 struct v4l2_subdev_state *state, 400 struct v4l2_subdev_mbus_code_enum *code_enum) 401 { 402 if (code_enum->index >= ARRAY_SIZE(formats)) 403 return -EINVAL; 404 405 code_enum->code = formats[code_enum->index].code; 406 407 return 0; 408 } 409 410 static int csi2rx_set_fmt(struct v4l2_subdev *subdev, 411 struct v4l2_subdev_state *state, 412 struct v4l2_subdev_format *format) 413 { 414 struct v4l2_mbus_framefmt *fmt; 415 unsigned int i; 416 417 /* No transcoding, source and sink formats must match. */ 418 if (format->pad != CSI2RX_PAD_SINK) 419 return v4l2_subdev_get_fmt(subdev, state, format); 420 421 if (!csi2rx_get_fmt_by_code(format->format.code)) 422 format->format.code = formats[0].code; 423 424 format->format.field = V4L2_FIELD_NONE; 425 426 /* Set sink format */ 427 fmt = v4l2_subdev_state_get_format(state, format->pad); 428 *fmt = format->format; 429 430 /* Propagate to source formats */ 431 for (i = CSI2RX_PAD_SOURCE_STREAM0; i < CSI2RX_PAD_MAX; i++) { 432 fmt = v4l2_subdev_state_get_format(state, i); 433 *fmt = format->format; 434 } 435 436 return 0; 437 } 438 439 static int csi2rx_init_state(struct v4l2_subdev *subdev, 440 struct v4l2_subdev_state *state) 441 { 442 struct v4l2_subdev_format format = { 443 .pad = CSI2RX_PAD_SINK, 444 .format = { 445 .width = 640, 446 .height = 480, 447 .code = MEDIA_BUS_FMT_UYVY8_1X16, 448 .field = V4L2_FIELD_NONE, 449 .colorspace = V4L2_COLORSPACE_SRGB, 450 .ycbcr_enc = V4L2_YCBCR_ENC_601, 451 .quantization = V4L2_QUANTIZATION_LIM_RANGE, 452 .xfer_func = V4L2_XFER_FUNC_SRGB, 453 }, 454 }; 455 456 return csi2rx_set_fmt(subdev, state, &format); 457 } 458 459 static const struct v4l2_subdev_pad_ops csi2rx_pad_ops = { 460 .enum_mbus_code = csi2rx_enum_mbus_code, 461 .get_fmt = v4l2_subdev_get_fmt, 462 .set_fmt = csi2rx_set_fmt, 463 }; 464 465 static const struct v4l2_subdev_video_ops csi2rx_video_ops = { 466 .s_stream = csi2rx_s_stream, 467 }; 468 469 static const struct v4l2_subdev_ops csi2rx_subdev_ops = { 470 .video = &csi2rx_video_ops, 471 .pad = &csi2rx_pad_ops, 472 }; 473 474 static const struct v4l2_subdev_internal_ops csi2rx_internal_ops = { 475 .init_state = csi2rx_init_state, 476 }; 477 478 static const struct media_entity_operations csi2rx_media_ops = { 479 .link_validate = v4l2_subdev_link_validate, 480 }; 481 482 static int csi2rx_async_bound(struct v4l2_async_notifier *notifier, 483 struct v4l2_subdev *s_subdev, 484 struct v4l2_async_connection *asd) 485 { 486 struct v4l2_subdev *subdev = notifier->sd; 487 struct csi2rx_priv *csi2rx = v4l2_subdev_to_csi2rx(subdev); 488 489 csi2rx->source_pad = media_entity_get_fwnode_pad(&s_subdev->entity, 490 asd->match.fwnode, 491 MEDIA_PAD_FL_SOURCE); 492 if (csi2rx->source_pad < 0) { 493 dev_err(csi2rx->dev, "Couldn't find output pad for subdev %s\n", 494 s_subdev->name); 495 return csi2rx->source_pad; 496 } 497 498 csi2rx->source_subdev = s_subdev; 499 500 dev_dbg(csi2rx->dev, "Bound %s pad: %d\n", s_subdev->name, 501 csi2rx->source_pad); 502 503 return media_create_pad_link(&csi2rx->source_subdev->entity, 504 csi2rx->source_pad, 505 &csi2rx->subdev.entity, 0, 506 MEDIA_LNK_FL_ENABLED | 507 MEDIA_LNK_FL_IMMUTABLE); 508 } 509 510 static const struct v4l2_async_notifier_operations csi2rx_notifier_ops = { 511 .bound = csi2rx_async_bound, 512 }; 513 514 static int csi2rx_get_resources(struct csi2rx_priv *csi2rx, 515 struct platform_device *pdev) 516 { 517 unsigned char i; 518 u32 dev_cfg; 519 int ret; 520 521 csi2rx->base = devm_platform_ioremap_resource(pdev, 0); 522 if (IS_ERR(csi2rx->base)) 523 return PTR_ERR(csi2rx->base); 524 525 csi2rx->sys_clk = devm_clk_get(&pdev->dev, "sys_clk"); 526 if (IS_ERR(csi2rx->sys_clk)) { 527 dev_err(&pdev->dev, "Couldn't get sys clock\n"); 528 return PTR_ERR(csi2rx->sys_clk); 529 } 530 531 csi2rx->p_clk = devm_clk_get(&pdev->dev, "p_clk"); 532 if (IS_ERR(csi2rx->p_clk)) { 533 dev_err(&pdev->dev, "Couldn't get P clock\n"); 534 return PTR_ERR(csi2rx->p_clk); 535 } 536 537 csi2rx->sys_rst = devm_reset_control_get_optional_exclusive(&pdev->dev, 538 "sys"); 539 if (IS_ERR(csi2rx->sys_rst)) 540 return PTR_ERR(csi2rx->sys_rst); 541 542 csi2rx->p_rst = devm_reset_control_get_optional_exclusive(&pdev->dev, 543 "reg_bank"); 544 if (IS_ERR(csi2rx->p_rst)) 545 return PTR_ERR(csi2rx->p_rst); 546 547 csi2rx->dphy = devm_phy_optional_get(&pdev->dev, "dphy"); 548 if (IS_ERR(csi2rx->dphy)) { 549 dev_err(&pdev->dev, "Couldn't get external D-PHY\n"); 550 return PTR_ERR(csi2rx->dphy); 551 } 552 553 ret = clk_prepare_enable(csi2rx->p_clk); 554 if (ret) { 555 dev_err(&pdev->dev, "Couldn't prepare and enable P clock\n"); 556 return ret; 557 } 558 559 dev_cfg = readl(csi2rx->base + CSI2RX_DEVICE_CFG_REG); 560 clk_disable_unprepare(csi2rx->p_clk); 561 562 csi2rx->max_lanes = dev_cfg & 7; 563 if (csi2rx->max_lanes > CSI2RX_LANES_MAX) { 564 dev_err(&pdev->dev, "Invalid number of lanes: %u\n", 565 csi2rx->max_lanes); 566 return -EINVAL; 567 } 568 569 csi2rx->max_streams = (dev_cfg >> 4) & 7; 570 if (csi2rx->max_streams > CSI2RX_STREAMS_MAX) { 571 dev_err(&pdev->dev, "Invalid number of streams: %u\n", 572 csi2rx->max_streams); 573 return -EINVAL; 574 } 575 576 csi2rx->has_internal_dphy = dev_cfg & BIT(3) ? true : false; 577 578 /* 579 * FIXME: Once we'll have internal D-PHY support, the check 580 * will need to be removed. 581 */ 582 if (!csi2rx->dphy && csi2rx->has_internal_dphy) { 583 dev_err(&pdev->dev, "Internal D-PHY not supported yet\n"); 584 return -EINVAL; 585 } 586 587 for (i = 0; i < csi2rx->max_streams; i++) { 588 char name[16]; 589 590 snprintf(name, sizeof(name), "pixel_if%u_clk", i); 591 csi2rx->pixel_clk[i] = devm_clk_get(&pdev->dev, name); 592 if (IS_ERR(csi2rx->pixel_clk[i])) { 593 dev_err(&pdev->dev, "Couldn't get clock %s\n", name); 594 return PTR_ERR(csi2rx->pixel_clk[i]); 595 } 596 597 snprintf(name, sizeof(name), "pixel_if%u", i); 598 csi2rx->pixel_rst[i] = 599 devm_reset_control_get_optional_exclusive(&pdev->dev, 600 name); 601 if (IS_ERR(csi2rx->pixel_rst[i])) 602 return PTR_ERR(csi2rx->pixel_rst[i]); 603 } 604 605 return 0; 606 } 607 608 static int csi2rx_parse_dt(struct csi2rx_priv *csi2rx) 609 { 610 struct v4l2_fwnode_endpoint v4l2_ep = { .bus_type = 0 }; 611 struct v4l2_async_connection *asd; 612 struct fwnode_handle *fwh; 613 struct device_node *ep; 614 int ret; 615 616 ep = of_graph_get_endpoint_by_regs(csi2rx->dev->of_node, 0, 0); 617 if (!ep) 618 return -EINVAL; 619 620 fwh = of_fwnode_handle(ep); 621 ret = v4l2_fwnode_endpoint_parse(fwh, &v4l2_ep); 622 if (ret) { 623 dev_err(csi2rx->dev, "Could not parse v4l2 endpoint\n"); 624 of_node_put(ep); 625 return ret; 626 } 627 628 if (v4l2_ep.bus_type != V4L2_MBUS_CSI2_DPHY) { 629 dev_err(csi2rx->dev, "Unsupported media bus type: 0x%x\n", 630 v4l2_ep.bus_type); 631 of_node_put(ep); 632 return -EINVAL; 633 } 634 635 memcpy(csi2rx->lanes, v4l2_ep.bus.mipi_csi2.data_lanes, 636 sizeof(csi2rx->lanes)); 637 csi2rx->num_lanes = v4l2_ep.bus.mipi_csi2.num_data_lanes; 638 if (csi2rx->num_lanes > csi2rx->max_lanes) { 639 dev_err(csi2rx->dev, "Unsupported number of data-lanes: %d\n", 640 csi2rx->num_lanes); 641 of_node_put(ep); 642 return -EINVAL; 643 } 644 645 v4l2_async_subdev_nf_init(&csi2rx->notifier, &csi2rx->subdev); 646 647 asd = v4l2_async_nf_add_fwnode_remote(&csi2rx->notifier, fwh, 648 struct v4l2_async_connection); 649 of_node_put(ep); 650 if (IS_ERR(asd)) { 651 v4l2_async_nf_cleanup(&csi2rx->notifier); 652 return PTR_ERR(asd); 653 } 654 655 csi2rx->notifier.ops = &csi2rx_notifier_ops; 656 657 ret = v4l2_async_nf_register(&csi2rx->notifier); 658 if (ret) 659 v4l2_async_nf_cleanup(&csi2rx->notifier); 660 661 return ret; 662 } 663 664 static int csi2rx_probe(struct platform_device *pdev) 665 { 666 struct csi2rx_priv *csi2rx; 667 unsigned int i; 668 int ret; 669 670 csi2rx = kzalloc(sizeof(*csi2rx), GFP_KERNEL); 671 if (!csi2rx) 672 return -ENOMEM; 673 platform_set_drvdata(pdev, csi2rx); 674 csi2rx->dev = &pdev->dev; 675 mutex_init(&csi2rx->lock); 676 677 ret = csi2rx_get_resources(csi2rx, pdev); 678 if (ret) 679 goto err_free_priv; 680 681 ret = csi2rx_parse_dt(csi2rx); 682 if (ret) 683 goto err_free_priv; 684 685 csi2rx->subdev.owner = THIS_MODULE; 686 csi2rx->subdev.dev = &pdev->dev; 687 v4l2_subdev_init(&csi2rx->subdev, &csi2rx_subdev_ops); 688 csi2rx->subdev.internal_ops = &csi2rx_internal_ops; 689 v4l2_set_subdevdata(&csi2rx->subdev, &pdev->dev); 690 snprintf(csi2rx->subdev.name, sizeof(csi2rx->subdev.name), 691 "%s.%s", KBUILD_MODNAME, dev_name(&pdev->dev)); 692 693 /* Create our media pads */ 694 csi2rx->subdev.entity.function = MEDIA_ENT_F_VID_IF_BRIDGE; 695 csi2rx->pads[CSI2RX_PAD_SINK].flags = MEDIA_PAD_FL_SINK; 696 for (i = CSI2RX_PAD_SOURCE_STREAM0; i < CSI2RX_PAD_MAX; i++) 697 csi2rx->pads[i].flags = MEDIA_PAD_FL_SOURCE; 698 csi2rx->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; 699 csi2rx->subdev.entity.ops = &csi2rx_media_ops; 700 701 ret = media_entity_pads_init(&csi2rx->subdev.entity, CSI2RX_PAD_MAX, 702 csi2rx->pads); 703 if (ret) 704 goto err_cleanup; 705 706 ret = v4l2_subdev_init_finalize(&csi2rx->subdev); 707 if (ret) 708 goto err_cleanup; 709 710 ret = v4l2_async_register_subdev(&csi2rx->subdev); 711 if (ret < 0) 712 goto err_free_state; 713 714 dev_info(&pdev->dev, 715 "Probed CSI2RX with %u/%u lanes, %u streams, %s D-PHY\n", 716 csi2rx->num_lanes, csi2rx->max_lanes, csi2rx->max_streams, 717 csi2rx->dphy ? "external" : 718 csi2rx->has_internal_dphy ? "internal" : "no"); 719 720 return 0; 721 722 err_free_state: 723 v4l2_subdev_cleanup(&csi2rx->subdev); 724 err_cleanup: 725 v4l2_async_nf_unregister(&csi2rx->notifier); 726 v4l2_async_nf_cleanup(&csi2rx->notifier); 727 media_entity_cleanup(&csi2rx->subdev.entity); 728 err_free_priv: 729 kfree(csi2rx); 730 return ret; 731 } 732 733 static void csi2rx_remove(struct platform_device *pdev) 734 { 735 struct csi2rx_priv *csi2rx = platform_get_drvdata(pdev); 736 737 v4l2_async_nf_unregister(&csi2rx->notifier); 738 v4l2_async_nf_cleanup(&csi2rx->notifier); 739 v4l2_async_unregister_subdev(&csi2rx->subdev); 740 v4l2_subdev_cleanup(&csi2rx->subdev); 741 media_entity_cleanup(&csi2rx->subdev.entity); 742 kfree(csi2rx); 743 } 744 745 static const struct of_device_id csi2rx_of_table[] = { 746 { .compatible = "starfive,jh7110-csi2rx" }, 747 { .compatible = "cdns,csi2rx" }, 748 { }, 749 }; 750 MODULE_DEVICE_TABLE(of, csi2rx_of_table); 751 752 static struct platform_driver csi2rx_driver = { 753 .probe = csi2rx_probe, 754 .remove_new = csi2rx_remove, 755 756 .driver = { 757 .name = "cdns-csi2rx", 758 .of_match_table = csi2rx_of_table, 759 }, 760 }; 761 module_platform_driver(csi2rx_driver); 762 MODULE_AUTHOR("Maxime Ripard <maxime.ripard@bootlin.com>"); 763 MODULE_DESCRIPTION("Cadence CSI2-RX controller"); 764 MODULE_LICENSE("GPL"); 765