1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Driver for Cadence MIPI-CSI2 RX Controller v1.3 4 * 5 * Copyright (C) 2017 Cadence Design Systems Inc. 6 */ 7 8 #include <linux/clk.h> 9 #include <linux/delay.h> 10 #include <linux/io.h> 11 #include <linux/iopoll.h> 12 #include <linux/module.h> 13 #include <linux/of.h> 14 #include <linux/of_graph.h> 15 #include <linux/phy/phy.h> 16 #include <linux/platform_device.h> 17 #include <linux/reset.h> 18 #include <linux/slab.h> 19 20 #include <media/v4l2-ctrls.h> 21 #include <media/v4l2-device.h> 22 #include <media/v4l2-fwnode.h> 23 #include <media/v4l2-subdev.h> 24 25 #define CSI2RX_DEVICE_CFG_REG 0x000 26 27 #define CSI2RX_SOFT_RESET_REG 0x004 28 #define CSI2RX_SOFT_RESET_PROTOCOL BIT(1) 29 #define CSI2RX_SOFT_RESET_FRONT BIT(0) 30 31 #define CSI2RX_STATIC_CFG_REG 0x008 32 #define CSI2RX_STATIC_CFG_DLANE_MAP(llane, plane) ((plane) << (16 + (llane) * 4)) 33 #define CSI2RX_STATIC_CFG_LANES_MASK GENMASK(11, 8) 34 35 #define CSI2RX_DPHY_LANE_CTRL_REG 0x40 36 #define CSI2RX_DPHY_CL_RST BIT(16) 37 #define CSI2RX_DPHY_DL_RST(i) BIT((i) + 12) 38 #define CSI2RX_DPHY_CL_EN BIT(4) 39 #define CSI2RX_DPHY_DL_EN(i) BIT(i) 40 41 #define CSI2RX_STREAM_BASE(n) (((n) + 1) * 0x100) 42 43 #define CSI2RX_STREAM_CTRL_REG(n) (CSI2RX_STREAM_BASE(n) + 0x000) 44 #define CSI2RX_STREAM_CTRL_SOFT_RST BIT(4) 45 #define CSI2RX_STREAM_CTRL_STOP BIT(1) 46 #define CSI2RX_STREAM_CTRL_START BIT(0) 47 48 #define CSI2RX_STREAM_STATUS_REG(n) (CSI2RX_STREAM_BASE(n) + 0x004) 49 #define CSI2RX_STREAM_STATUS_RDY BIT(31) 50 51 #define CSI2RX_STREAM_DATA_CFG_REG(n) (CSI2RX_STREAM_BASE(n) + 0x008) 52 #define CSI2RX_STREAM_DATA_CFG_VC_SELECT(n) BIT((n) + 16) 53 54 #define CSI2RX_STREAM_CFG_REG(n) (CSI2RX_STREAM_BASE(n) + 0x00c) 55 #define CSI2RX_STREAM_CFG_FIFO_MODE_LARGE_BUF (1 << 8) 56 57 #define CSI2RX_LANES_MAX 4 58 #define CSI2RX_STREAMS_MAX 4 59 60 #define CSI2RX_ERROR_IRQS_REG 0x28 61 #define CSI2RX_ERROR_IRQS_MASK_REG 0x2C 62 63 #define CSI2RX_STREAM3_FIFO_OVERFLOW_IRQ BIT(19) 64 #define CSI2RX_STREAM2_FIFO_OVERFLOW_IRQ BIT(18) 65 #define CSI2RX_STREAM1_FIFO_OVERFLOW_IRQ BIT(17) 66 #define CSI2RX_STREAM0_FIFO_OVERFLOW_IRQ BIT(16) 67 #define CSI2RX_FRONT_TRUNC_HDR_IRQ BIT(12) 68 #define CSI2RX_PROT_TRUNCATED_PACKET_IRQ BIT(11) 69 #define CSI2RX_FRONT_LP_NO_PAYLOAD_IRQ BIT(10) 70 #define CSI2RX_SP_INVALID_RCVD_IRQ BIT(9) 71 #define CSI2RX_DATA_ID_IRQ BIT(7) 72 #define CSI2RX_HEADER_CORRECTED_ECC_IRQ BIT(6) 73 #define CSI2RX_HEADER_ECC_IRQ BIT(5) 74 #define CSI2RX_PAYLOAD_CRC_IRQ BIT(4) 75 76 #define CSI2RX_ECC_ERRORS GENMASK(7, 4) 77 #define CSI2RX_PACKET_ERRORS GENMASK(12, 9) 78 79 enum csi2rx_pads { 80 CSI2RX_PAD_SINK, 81 CSI2RX_PAD_SOURCE_STREAM0, 82 CSI2RX_PAD_SOURCE_STREAM1, 83 CSI2RX_PAD_SOURCE_STREAM2, 84 CSI2RX_PAD_SOURCE_STREAM3, 85 CSI2RX_PAD_MAX, 86 }; 87 88 struct csi2rx_fmt { 89 u32 code; 90 u8 bpp; 91 }; 92 93 struct csi2rx_event { 94 u32 mask; 95 const char *name; 96 }; 97 98 static const struct csi2rx_event csi2rx_events[] = { 99 { CSI2RX_STREAM3_FIFO_OVERFLOW_IRQ, "Overflow of the Stream 3 FIFO detected" }, 100 { CSI2RX_STREAM2_FIFO_OVERFLOW_IRQ, "Overflow of the Stream 2 FIFO detected" }, 101 { CSI2RX_STREAM1_FIFO_OVERFLOW_IRQ, "Overflow of the Stream 1 FIFO detected" }, 102 { CSI2RX_STREAM0_FIFO_OVERFLOW_IRQ, "Overflow of the Stream 0 FIFO detected" }, 103 { CSI2RX_FRONT_TRUNC_HDR_IRQ, "A truncated header [short or long] has been received" }, 104 { CSI2RX_PROT_TRUNCATED_PACKET_IRQ, "A truncated long packet has been received" }, 105 { CSI2RX_FRONT_LP_NO_PAYLOAD_IRQ, "A truncated long packet has been received. No payload" }, 106 { CSI2RX_SP_INVALID_RCVD_IRQ, "A reserved or invalid short packet has been received" }, 107 { CSI2RX_DATA_ID_IRQ, "Data ID error in the header packet" }, 108 { CSI2RX_HEADER_CORRECTED_ECC_IRQ, "ECC error detected and corrected" }, 109 { CSI2RX_HEADER_ECC_IRQ, "Unrecoverable ECC error" }, 110 { CSI2RX_PAYLOAD_CRC_IRQ, "CRC error" }, 111 }; 112 113 #define CSI2RX_NUM_EVENTS ARRAY_SIZE(csi2rx_events) 114 115 struct csi2rx_priv { 116 struct device *dev; 117 unsigned int count; 118 int error_irq; 119 120 /* 121 * Used to prevent race conditions between multiple, 122 * concurrent calls to start and stop. 123 */ 124 struct mutex lock; 125 126 void __iomem *base; 127 struct clk *sys_clk; 128 struct clk *p_clk; 129 struct clk *pixel_clk[CSI2RX_STREAMS_MAX]; 130 struct reset_control *sys_rst; 131 struct reset_control *p_rst; 132 struct reset_control *pixel_rst[CSI2RX_STREAMS_MAX]; 133 struct phy *dphy; 134 135 u8 lanes[CSI2RX_LANES_MAX]; 136 u8 num_lanes; 137 u8 max_lanes; 138 u8 max_streams; 139 bool has_internal_dphy; 140 u32 events[CSI2RX_NUM_EVENTS]; 141 142 struct v4l2_subdev subdev; 143 struct v4l2_async_notifier notifier; 144 struct media_pad pads[CSI2RX_PAD_MAX]; 145 146 /* Remote source */ 147 struct v4l2_subdev *source_subdev; 148 int source_pad; 149 }; 150 151 static const struct csi2rx_fmt formats[] = { 152 { .code = MEDIA_BUS_FMT_YUYV8_1X16, .bpp = 16, }, 153 { .code = MEDIA_BUS_FMT_UYVY8_1X16, .bpp = 16, }, 154 { .code = MEDIA_BUS_FMT_YVYU8_1X16, .bpp = 16, }, 155 { .code = MEDIA_BUS_FMT_VYUY8_1X16, .bpp = 16, }, 156 { .code = MEDIA_BUS_FMT_SBGGR8_1X8, .bpp = 8, }, 157 { .code = MEDIA_BUS_FMT_SGBRG8_1X8, .bpp = 8, }, 158 { .code = MEDIA_BUS_FMT_SGRBG8_1X8, .bpp = 8, }, 159 { .code = MEDIA_BUS_FMT_SRGGB8_1X8, .bpp = 8, }, 160 { .code = MEDIA_BUS_FMT_Y8_1X8, .bpp = 8, }, 161 { .code = MEDIA_BUS_FMT_SBGGR10_1X10, .bpp = 10, }, 162 { .code = MEDIA_BUS_FMT_SGBRG10_1X10, .bpp = 10, }, 163 { .code = MEDIA_BUS_FMT_SGRBG10_1X10, .bpp = 10, }, 164 { .code = MEDIA_BUS_FMT_SRGGB10_1X10, .bpp = 10, }, 165 { .code = MEDIA_BUS_FMT_RGB565_1X16, .bpp = 16, }, 166 { .code = MEDIA_BUS_FMT_RGB888_1X24, .bpp = 24, }, 167 { .code = MEDIA_BUS_FMT_BGR888_1X24, .bpp = 24, }, 168 }; 169 170 static void csi2rx_configure_error_irq_mask(void __iomem *base, 171 struct csi2rx_priv *csi2rx) 172 { 173 u32 error_irq_mask = 0; 174 175 error_irq_mask |= CSI2RX_ECC_ERRORS; 176 error_irq_mask |= CSI2RX_PACKET_ERRORS; 177 178 /* 179 * Iterate through all source pads and check if they are linked 180 * to an active remote pad. If an active remote pad is found, 181 * calculate the corresponding bit position and set it in 182 * mask, enabling the stream overflow error in the mask. 183 */ 184 for (int i = CSI2RX_PAD_SOURCE_STREAM0; i < CSI2RX_PAD_MAX; i++) { 185 struct media_pad *remote_pad; 186 187 remote_pad = media_pad_remote_pad_first(&csi2rx->pads[i]); 188 if (remote_pad) { 189 int pad = i - CSI2RX_PAD_SOURCE_STREAM0; 190 u32 bit_mask = CSI2RX_STREAM0_FIFO_OVERFLOW_IRQ << pad; 191 192 error_irq_mask |= bit_mask; 193 } 194 } 195 196 writel(error_irq_mask, base + CSI2RX_ERROR_IRQS_MASK_REG); 197 } 198 199 static irqreturn_t csi2rx_irq_handler(int irq, void *dev_id) 200 { 201 struct csi2rx_priv *csi2rx = dev_id; 202 int i; 203 u32 error_status, error_mask; 204 205 error_status = readl(csi2rx->base + CSI2RX_ERROR_IRQS_REG); 206 error_mask = readl(csi2rx->base + CSI2RX_ERROR_IRQS_MASK_REG); 207 208 for (i = 0; i < CSI2RX_NUM_EVENTS; i++) 209 if ((error_status & csi2rx_events[i].mask) && 210 (error_mask & csi2rx_events[i].mask)) 211 csi2rx->events[i]++; 212 213 writel(error_status, csi2rx->base + CSI2RX_ERROR_IRQS_REG); 214 215 return IRQ_HANDLED; 216 } 217 218 static const struct csi2rx_fmt *csi2rx_get_fmt_by_code(u32 code) 219 { 220 unsigned int i; 221 222 for (i = 0; i < ARRAY_SIZE(formats); i++) 223 if (formats[i].code == code) 224 return &formats[i]; 225 226 return NULL; 227 } 228 229 static inline 230 struct csi2rx_priv *v4l2_subdev_to_csi2rx(struct v4l2_subdev *subdev) 231 { 232 return container_of(subdev, struct csi2rx_priv, subdev); 233 } 234 235 static void csi2rx_reset(struct csi2rx_priv *csi2rx) 236 { 237 unsigned int i; 238 239 /* Reset module */ 240 writel(CSI2RX_SOFT_RESET_PROTOCOL | CSI2RX_SOFT_RESET_FRONT, 241 csi2rx->base + CSI2RX_SOFT_RESET_REG); 242 /* Reset individual streams. */ 243 for (i = 0; i < csi2rx->max_streams; i++) { 244 writel(CSI2RX_STREAM_CTRL_SOFT_RST, 245 csi2rx->base + CSI2RX_STREAM_CTRL_REG(i)); 246 } 247 248 usleep_range(10, 20); 249 250 /* Clear resets */ 251 writel(0, csi2rx->base + CSI2RX_SOFT_RESET_REG); 252 for (i = 0; i < csi2rx->max_streams; i++) 253 writel(0, csi2rx->base + CSI2RX_STREAM_CTRL_REG(i)); 254 } 255 256 static int csi2rx_configure_ext_dphy(struct csi2rx_priv *csi2rx) 257 { 258 struct media_pad *src_pad = 259 &csi2rx->source_subdev->entity.pads[csi2rx->source_pad]; 260 union phy_configure_opts opts = { }; 261 struct phy_configure_opts_mipi_dphy *cfg = &opts.mipi_dphy; 262 struct v4l2_subdev_format sd_fmt = { 263 .which = V4L2_SUBDEV_FORMAT_ACTIVE, 264 .pad = CSI2RX_PAD_SINK, 265 }; 266 const struct csi2rx_fmt *fmt; 267 s64 link_freq; 268 int ret; 269 270 ret = v4l2_subdev_call_state_active(&csi2rx->subdev, pad, get_fmt, 271 &sd_fmt); 272 if (ret < 0) 273 return ret; 274 275 fmt = csi2rx_get_fmt_by_code(sd_fmt.format.code); 276 277 link_freq = v4l2_get_link_freq(src_pad, 278 fmt->bpp, 2 * csi2rx->num_lanes); 279 if (link_freq < 0) 280 return link_freq; 281 282 ret = phy_mipi_dphy_get_default_config_for_hsclk(link_freq, 283 csi2rx->num_lanes, cfg); 284 if (ret) 285 return ret; 286 287 ret = phy_power_on(csi2rx->dphy); 288 if (ret) 289 return ret; 290 291 ret = phy_configure(csi2rx->dphy, &opts); 292 if (ret) { 293 phy_power_off(csi2rx->dphy); 294 return ret; 295 } 296 297 return 0; 298 } 299 300 static int csi2rx_start(struct csi2rx_priv *csi2rx) 301 { 302 unsigned int i; 303 unsigned long lanes_used = 0; 304 u32 reg; 305 int ret; 306 307 ret = clk_prepare_enable(csi2rx->p_clk); 308 if (ret) 309 return ret; 310 311 reset_control_deassert(csi2rx->p_rst); 312 csi2rx_reset(csi2rx); 313 314 if (csi2rx->error_irq >= 0) 315 csi2rx_configure_error_irq_mask(csi2rx->base, csi2rx); 316 317 reg = csi2rx->num_lanes << 8; 318 for (i = 0; i < csi2rx->num_lanes; i++) { 319 reg |= CSI2RX_STATIC_CFG_DLANE_MAP(i, csi2rx->lanes[i]); 320 set_bit(csi2rx->lanes[i], &lanes_used); 321 } 322 323 /* 324 * Even the unused lanes need to be mapped. In order to avoid 325 * to map twice to the same physical lane, keep the lanes used 326 * in the previous loop, and only map unused physical lanes to 327 * the rest of our logical lanes. 328 */ 329 for (i = csi2rx->num_lanes; i < csi2rx->max_lanes; i++) { 330 unsigned int idx = find_first_zero_bit(&lanes_used, 331 csi2rx->max_lanes); 332 set_bit(idx, &lanes_used); 333 reg |= CSI2RX_STATIC_CFG_DLANE_MAP(i, i + 1); 334 } 335 336 writel(reg, csi2rx->base + CSI2RX_STATIC_CFG_REG); 337 338 /* Enable DPHY clk and data lanes. */ 339 if (csi2rx->dphy) { 340 reg = CSI2RX_DPHY_CL_EN | CSI2RX_DPHY_CL_RST; 341 for (i = 0; i < csi2rx->num_lanes; i++) { 342 reg |= CSI2RX_DPHY_DL_EN(csi2rx->lanes[i] - 1); 343 reg |= CSI2RX_DPHY_DL_RST(csi2rx->lanes[i] - 1); 344 } 345 346 writel(reg, csi2rx->base + CSI2RX_DPHY_LANE_CTRL_REG); 347 348 ret = csi2rx_configure_ext_dphy(csi2rx); 349 if (ret) { 350 dev_err(csi2rx->dev, 351 "Failed to configure external DPHY: %d\n", ret); 352 goto err_disable_pclk; 353 } 354 } 355 356 /* 357 * Create a static mapping between the CSI virtual channels 358 * and the output stream. 359 * 360 * This should be enhanced, but v4l2 lacks the support for 361 * changing that mapping dynamically. 362 * 363 * We also cannot enable and disable independent streams here, 364 * hence the reference counting. 365 */ 366 for (i = 0; i < csi2rx->max_streams; i++) { 367 ret = clk_prepare_enable(csi2rx->pixel_clk[i]); 368 if (ret) 369 goto err_disable_pixclk; 370 371 reset_control_deassert(csi2rx->pixel_rst[i]); 372 373 writel(CSI2RX_STREAM_CFG_FIFO_MODE_LARGE_BUF, 374 csi2rx->base + CSI2RX_STREAM_CFG_REG(i)); 375 376 /* 377 * Enable one virtual channel. When multiple virtual channels 378 * are supported this will have to be changed. 379 */ 380 writel(CSI2RX_STREAM_DATA_CFG_VC_SELECT(0), 381 csi2rx->base + CSI2RX_STREAM_DATA_CFG_REG(i)); 382 383 writel(CSI2RX_STREAM_CTRL_START, 384 csi2rx->base + CSI2RX_STREAM_CTRL_REG(i)); 385 } 386 387 ret = clk_prepare_enable(csi2rx->sys_clk); 388 if (ret) 389 goto err_disable_pixclk; 390 391 reset_control_deassert(csi2rx->sys_rst); 392 393 ret = v4l2_subdev_call(csi2rx->source_subdev, video, s_stream, true); 394 if (ret) 395 goto err_disable_sysclk; 396 397 clk_disable_unprepare(csi2rx->p_clk); 398 399 return 0; 400 401 err_disable_sysclk: 402 clk_disable_unprepare(csi2rx->sys_clk); 403 err_disable_pixclk: 404 for (; i > 0; i--) { 405 reset_control_assert(csi2rx->pixel_rst[i - 1]); 406 clk_disable_unprepare(csi2rx->pixel_clk[i - 1]); 407 } 408 409 if (csi2rx->dphy) { 410 writel(0, csi2rx->base + CSI2RX_DPHY_LANE_CTRL_REG); 411 phy_power_off(csi2rx->dphy); 412 } 413 err_disable_pclk: 414 clk_disable_unprepare(csi2rx->p_clk); 415 416 return ret; 417 } 418 419 static void csi2rx_stop(struct csi2rx_priv *csi2rx) 420 { 421 unsigned int i; 422 u32 val; 423 int ret; 424 425 clk_prepare_enable(csi2rx->p_clk); 426 reset_control_assert(csi2rx->sys_rst); 427 clk_disable_unprepare(csi2rx->sys_clk); 428 429 writel(0, csi2rx->base + CSI2RX_ERROR_IRQS_MASK_REG); 430 431 for (i = 0; i < csi2rx->max_streams; i++) { 432 writel(CSI2RX_STREAM_CTRL_STOP, 433 csi2rx->base + CSI2RX_STREAM_CTRL_REG(i)); 434 435 ret = readl_relaxed_poll_timeout(csi2rx->base + 436 CSI2RX_STREAM_STATUS_REG(i), 437 val, 438 !(val & CSI2RX_STREAM_STATUS_RDY), 439 10, 10000); 440 if (ret) 441 dev_warn(csi2rx->dev, 442 "Failed to stop streaming on pad%u\n", i); 443 444 reset_control_assert(csi2rx->pixel_rst[i]); 445 clk_disable_unprepare(csi2rx->pixel_clk[i]); 446 } 447 448 reset_control_assert(csi2rx->p_rst); 449 clk_disable_unprepare(csi2rx->p_clk); 450 451 if (v4l2_subdev_call(csi2rx->source_subdev, video, s_stream, false)) 452 dev_warn(csi2rx->dev, "Couldn't disable our subdev\n"); 453 454 if (csi2rx->dphy) { 455 writel(0, csi2rx->base + CSI2RX_DPHY_LANE_CTRL_REG); 456 457 if (phy_power_off(csi2rx->dphy)) 458 dev_warn(csi2rx->dev, "Couldn't power off DPHY\n"); 459 } 460 } 461 462 static int csi2rx_log_status(struct v4l2_subdev *sd) 463 { 464 struct csi2rx_priv *csi2rx = v4l2_subdev_to_csi2rx(sd); 465 unsigned int i; 466 467 for (i = 0; i < CSI2RX_NUM_EVENTS; i++) { 468 if (csi2rx->events[i]) 469 dev_info(csi2rx->dev, "%s events: %d\n", 470 csi2rx_events[i].name, 471 csi2rx->events[i]); 472 } 473 474 return 0; 475 } 476 477 static int csi2rx_s_stream(struct v4l2_subdev *subdev, int enable) 478 { 479 struct csi2rx_priv *csi2rx = v4l2_subdev_to_csi2rx(subdev); 480 int ret = 0; 481 482 mutex_lock(&csi2rx->lock); 483 484 if (enable) { 485 /* 486 * If we're not the first users, there's no need to 487 * enable the whole controller. 488 */ 489 if (!csi2rx->count) { 490 ret = csi2rx_start(csi2rx); 491 if (ret) 492 goto out; 493 } 494 495 csi2rx->count++; 496 } else { 497 csi2rx->count--; 498 499 /* 500 * Let the last user turn off the lights. 501 */ 502 if (!csi2rx->count) 503 csi2rx_stop(csi2rx); 504 } 505 506 out: 507 mutex_unlock(&csi2rx->lock); 508 return ret; 509 } 510 511 static int csi2rx_enum_mbus_code(struct v4l2_subdev *subdev, 512 struct v4l2_subdev_state *state, 513 struct v4l2_subdev_mbus_code_enum *code_enum) 514 { 515 if (code_enum->index >= ARRAY_SIZE(formats)) 516 return -EINVAL; 517 518 code_enum->code = formats[code_enum->index].code; 519 520 return 0; 521 } 522 523 static int csi2rx_set_fmt(struct v4l2_subdev *subdev, 524 struct v4l2_subdev_state *state, 525 struct v4l2_subdev_format *format) 526 { 527 struct v4l2_mbus_framefmt *fmt; 528 unsigned int i; 529 530 /* No transcoding, source and sink formats must match. */ 531 if (format->pad != CSI2RX_PAD_SINK) 532 return v4l2_subdev_get_fmt(subdev, state, format); 533 534 if (!csi2rx_get_fmt_by_code(format->format.code)) 535 format->format.code = formats[0].code; 536 537 format->format.field = V4L2_FIELD_NONE; 538 539 /* Set sink format */ 540 fmt = v4l2_subdev_state_get_format(state, format->pad); 541 *fmt = format->format; 542 543 /* Propagate to source formats */ 544 for (i = CSI2RX_PAD_SOURCE_STREAM0; i < CSI2RX_PAD_MAX; i++) { 545 fmt = v4l2_subdev_state_get_format(state, i); 546 *fmt = format->format; 547 } 548 549 return 0; 550 } 551 552 static int csi2rx_init_state(struct v4l2_subdev *subdev, 553 struct v4l2_subdev_state *state) 554 { 555 struct v4l2_subdev_format format = { 556 .pad = CSI2RX_PAD_SINK, 557 .format = { 558 .width = 640, 559 .height = 480, 560 .code = MEDIA_BUS_FMT_UYVY8_1X16, 561 .field = V4L2_FIELD_NONE, 562 .colorspace = V4L2_COLORSPACE_SRGB, 563 .ycbcr_enc = V4L2_YCBCR_ENC_601, 564 .quantization = V4L2_QUANTIZATION_LIM_RANGE, 565 .xfer_func = V4L2_XFER_FUNC_SRGB, 566 }, 567 }; 568 569 return csi2rx_set_fmt(subdev, state, &format); 570 } 571 572 static const struct v4l2_subdev_pad_ops csi2rx_pad_ops = { 573 .enum_mbus_code = csi2rx_enum_mbus_code, 574 .get_fmt = v4l2_subdev_get_fmt, 575 .set_fmt = csi2rx_set_fmt, 576 }; 577 578 static const struct v4l2_subdev_video_ops csi2rx_video_ops = { 579 .s_stream = csi2rx_s_stream, 580 }; 581 582 static const struct v4l2_subdev_core_ops csi2rx_core_ops = { 583 .log_status = csi2rx_log_status, 584 }; 585 586 static const struct v4l2_subdev_ops csi2rx_subdev_ops = { 587 .core = &csi2rx_core_ops, 588 .video = &csi2rx_video_ops, 589 .pad = &csi2rx_pad_ops, 590 }; 591 592 static const struct v4l2_subdev_internal_ops csi2rx_internal_ops = { 593 .init_state = csi2rx_init_state, 594 }; 595 596 static const struct media_entity_operations csi2rx_media_ops = { 597 .link_validate = v4l2_subdev_link_validate, 598 }; 599 600 static int csi2rx_async_bound(struct v4l2_async_notifier *notifier, 601 struct v4l2_subdev *s_subdev, 602 struct v4l2_async_connection *asd) 603 { 604 struct v4l2_subdev *subdev = notifier->sd; 605 struct csi2rx_priv *csi2rx = v4l2_subdev_to_csi2rx(subdev); 606 607 csi2rx->source_pad = media_entity_get_fwnode_pad(&s_subdev->entity, 608 asd->match.fwnode, 609 MEDIA_PAD_FL_SOURCE); 610 if (csi2rx->source_pad < 0) { 611 dev_err(csi2rx->dev, "Couldn't find output pad for subdev %s\n", 612 s_subdev->name); 613 return csi2rx->source_pad; 614 } 615 616 csi2rx->source_subdev = s_subdev; 617 618 dev_dbg(csi2rx->dev, "Bound %s pad: %d\n", s_subdev->name, 619 csi2rx->source_pad); 620 621 return media_create_pad_link(&csi2rx->source_subdev->entity, 622 csi2rx->source_pad, 623 &csi2rx->subdev.entity, 0, 624 MEDIA_LNK_FL_ENABLED | 625 MEDIA_LNK_FL_IMMUTABLE); 626 } 627 628 static const struct v4l2_async_notifier_operations csi2rx_notifier_ops = { 629 .bound = csi2rx_async_bound, 630 }; 631 632 static int csi2rx_get_resources(struct csi2rx_priv *csi2rx, 633 struct platform_device *pdev) 634 { 635 unsigned char i; 636 u32 dev_cfg; 637 int ret; 638 639 csi2rx->base = devm_platform_ioremap_resource(pdev, 0); 640 if (IS_ERR(csi2rx->base)) 641 return PTR_ERR(csi2rx->base); 642 643 csi2rx->sys_clk = devm_clk_get(&pdev->dev, "sys_clk"); 644 if (IS_ERR(csi2rx->sys_clk)) { 645 dev_err(&pdev->dev, "Couldn't get sys clock\n"); 646 return PTR_ERR(csi2rx->sys_clk); 647 } 648 649 csi2rx->p_clk = devm_clk_get(&pdev->dev, "p_clk"); 650 if (IS_ERR(csi2rx->p_clk)) { 651 dev_err(&pdev->dev, "Couldn't get P clock\n"); 652 return PTR_ERR(csi2rx->p_clk); 653 } 654 655 csi2rx->sys_rst = devm_reset_control_get_optional_exclusive(&pdev->dev, 656 "sys"); 657 if (IS_ERR(csi2rx->sys_rst)) 658 return PTR_ERR(csi2rx->sys_rst); 659 660 csi2rx->p_rst = devm_reset_control_get_optional_exclusive(&pdev->dev, 661 "reg_bank"); 662 if (IS_ERR(csi2rx->p_rst)) 663 return PTR_ERR(csi2rx->p_rst); 664 665 csi2rx->dphy = devm_phy_optional_get(&pdev->dev, "dphy"); 666 if (IS_ERR(csi2rx->dphy)) { 667 dev_err(&pdev->dev, "Couldn't get external D-PHY\n"); 668 return PTR_ERR(csi2rx->dphy); 669 } 670 671 ret = clk_prepare_enable(csi2rx->p_clk); 672 if (ret) { 673 dev_err(&pdev->dev, "Couldn't prepare and enable P clock\n"); 674 return ret; 675 } 676 677 dev_cfg = readl(csi2rx->base + CSI2RX_DEVICE_CFG_REG); 678 clk_disable_unprepare(csi2rx->p_clk); 679 680 csi2rx->max_lanes = dev_cfg & 7; 681 if (csi2rx->max_lanes > CSI2RX_LANES_MAX) { 682 dev_err(&pdev->dev, "Invalid number of lanes: %u\n", 683 csi2rx->max_lanes); 684 return -EINVAL; 685 } 686 687 csi2rx->max_streams = (dev_cfg >> 4) & 7; 688 if (csi2rx->max_streams > CSI2RX_STREAMS_MAX) { 689 dev_err(&pdev->dev, "Invalid number of streams: %u\n", 690 csi2rx->max_streams); 691 return -EINVAL; 692 } 693 694 csi2rx->has_internal_dphy = dev_cfg & BIT(3) ? true : false; 695 696 /* 697 * FIXME: Once we'll have internal D-PHY support, the check 698 * will need to be removed. 699 */ 700 if (!csi2rx->dphy && csi2rx->has_internal_dphy) { 701 dev_err(&pdev->dev, "Internal D-PHY not supported yet\n"); 702 return -EINVAL; 703 } 704 705 for (i = 0; i < csi2rx->max_streams; i++) { 706 char name[16]; 707 708 snprintf(name, sizeof(name), "pixel_if%u_clk", i); 709 csi2rx->pixel_clk[i] = devm_clk_get(&pdev->dev, name); 710 if (IS_ERR(csi2rx->pixel_clk[i])) { 711 dev_err(&pdev->dev, "Couldn't get clock %s\n", name); 712 return PTR_ERR(csi2rx->pixel_clk[i]); 713 } 714 715 snprintf(name, sizeof(name), "pixel_if%u", i); 716 csi2rx->pixel_rst[i] = 717 devm_reset_control_get_optional_exclusive(&pdev->dev, 718 name); 719 if (IS_ERR(csi2rx->pixel_rst[i])) 720 return PTR_ERR(csi2rx->pixel_rst[i]); 721 } 722 723 return 0; 724 } 725 726 static int csi2rx_parse_dt(struct csi2rx_priv *csi2rx) 727 { 728 struct v4l2_fwnode_endpoint v4l2_ep = { .bus_type = 0 }; 729 struct v4l2_async_connection *asd; 730 struct fwnode_handle *fwh; 731 struct device_node *ep; 732 int ret; 733 734 ep = of_graph_get_endpoint_by_regs(csi2rx->dev->of_node, 0, 0); 735 if (!ep) 736 return -EINVAL; 737 738 fwh = of_fwnode_handle(ep); 739 ret = v4l2_fwnode_endpoint_parse(fwh, &v4l2_ep); 740 if (ret) { 741 dev_err(csi2rx->dev, "Could not parse v4l2 endpoint\n"); 742 of_node_put(ep); 743 return ret; 744 } 745 746 if (v4l2_ep.bus_type != V4L2_MBUS_CSI2_DPHY) { 747 dev_err(csi2rx->dev, "Unsupported media bus type: 0x%x\n", 748 v4l2_ep.bus_type); 749 of_node_put(ep); 750 return -EINVAL; 751 } 752 753 memcpy(csi2rx->lanes, v4l2_ep.bus.mipi_csi2.data_lanes, 754 sizeof(csi2rx->lanes)); 755 csi2rx->num_lanes = v4l2_ep.bus.mipi_csi2.num_data_lanes; 756 if (csi2rx->num_lanes > csi2rx->max_lanes) { 757 dev_err(csi2rx->dev, "Unsupported number of data-lanes: %d\n", 758 csi2rx->num_lanes); 759 of_node_put(ep); 760 return -EINVAL; 761 } 762 763 v4l2_async_subdev_nf_init(&csi2rx->notifier, &csi2rx->subdev); 764 765 asd = v4l2_async_nf_add_fwnode_remote(&csi2rx->notifier, fwh, 766 struct v4l2_async_connection); 767 of_node_put(ep); 768 if (IS_ERR(asd)) { 769 v4l2_async_nf_cleanup(&csi2rx->notifier); 770 return PTR_ERR(asd); 771 } 772 773 csi2rx->notifier.ops = &csi2rx_notifier_ops; 774 775 ret = v4l2_async_nf_register(&csi2rx->notifier); 776 if (ret) 777 v4l2_async_nf_cleanup(&csi2rx->notifier); 778 779 return ret; 780 } 781 782 static int csi2rx_probe(struct platform_device *pdev) 783 { 784 struct csi2rx_priv *csi2rx; 785 unsigned int i; 786 int ret; 787 788 csi2rx = kzalloc(sizeof(*csi2rx), GFP_KERNEL); 789 if (!csi2rx) 790 return -ENOMEM; 791 platform_set_drvdata(pdev, csi2rx); 792 csi2rx->dev = &pdev->dev; 793 mutex_init(&csi2rx->lock); 794 795 ret = csi2rx_get_resources(csi2rx, pdev); 796 if (ret) 797 goto err_free_priv; 798 799 ret = csi2rx_parse_dt(csi2rx); 800 if (ret) 801 goto err_free_priv; 802 803 csi2rx->subdev.owner = THIS_MODULE; 804 csi2rx->subdev.dev = &pdev->dev; 805 v4l2_subdev_init(&csi2rx->subdev, &csi2rx_subdev_ops); 806 csi2rx->subdev.internal_ops = &csi2rx_internal_ops; 807 v4l2_set_subdevdata(&csi2rx->subdev, &pdev->dev); 808 snprintf(csi2rx->subdev.name, sizeof(csi2rx->subdev.name), 809 "%s.%s", KBUILD_MODNAME, dev_name(&pdev->dev)); 810 811 /* Create our media pads */ 812 csi2rx->subdev.entity.function = MEDIA_ENT_F_VID_IF_BRIDGE; 813 csi2rx->pads[CSI2RX_PAD_SINK].flags = MEDIA_PAD_FL_SINK; 814 for (i = CSI2RX_PAD_SOURCE_STREAM0; i < CSI2RX_PAD_MAX; i++) 815 csi2rx->pads[i].flags = MEDIA_PAD_FL_SOURCE; 816 csi2rx->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; 817 csi2rx->subdev.entity.ops = &csi2rx_media_ops; 818 819 ret = media_entity_pads_init(&csi2rx->subdev.entity, CSI2RX_PAD_MAX, 820 csi2rx->pads); 821 if (ret) 822 goto err_cleanup; 823 824 csi2rx->error_irq = platform_get_irq_byname_optional(pdev, "error_irq"); 825 826 if (csi2rx->error_irq < 0) { 827 dev_dbg(csi2rx->dev, "Optional interrupt not defined, proceeding without it\n"); 828 } else { 829 ret = devm_request_irq(csi2rx->dev, csi2rx->error_irq, 830 csi2rx_irq_handler, 0, 831 dev_name(&pdev->dev), csi2rx); 832 if (ret) { 833 dev_err(csi2rx->dev, 834 "Unable to request interrupt: %d\n", ret); 835 goto err_cleanup; 836 } 837 } 838 839 ret = v4l2_subdev_init_finalize(&csi2rx->subdev); 840 if (ret) 841 goto err_cleanup; 842 843 ret = v4l2_async_register_subdev(&csi2rx->subdev); 844 if (ret < 0) 845 goto err_free_state; 846 847 dev_info(&pdev->dev, 848 "Probed CSI2RX with %u/%u lanes, %u streams, %s D-PHY\n", 849 csi2rx->num_lanes, csi2rx->max_lanes, csi2rx->max_streams, 850 csi2rx->dphy ? "external" : 851 csi2rx->has_internal_dphy ? "internal" : "no"); 852 853 return 0; 854 855 err_free_state: 856 v4l2_subdev_cleanup(&csi2rx->subdev); 857 err_cleanup: 858 v4l2_async_nf_unregister(&csi2rx->notifier); 859 v4l2_async_nf_cleanup(&csi2rx->notifier); 860 media_entity_cleanup(&csi2rx->subdev.entity); 861 err_free_priv: 862 kfree(csi2rx); 863 return ret; 864 } 865 866 static void csi2rx_remove(struct platform_device *pdev) 867 { 868 struct csi2rx_priv *csi2rx = platform_get_drvdata(pdev); 869 870 v4l2_async_nf_unregister(&csi2rx->notifier); 871 v4l2_async_nf_cleanup(&csi2rx->notifier); 872 v4l2_async_unregister_subdev(&csi2rx->subdev); 873 v4l2_subdev_cleanup(&csi2rx->subdev); 874 media_entity_cleanup(&csi2rx->subdev.entity); 875 kfree(csi2rx); 876 } 877 878 static const struct of_device_id csi2rx_of_table[] = { 879 { .compatible = "starfive,jh7110-csi2rx" }, 880 { .compatible = "cdns,csi2rx" }, 881 { }, 882 }; 883 MODULE_DEVICE_TABLE(of, csi2rx_of_table); 884 885 static struct platform_driver csi2rx_driver = { 886 .probe = csi2rx_probe, 887 .remove = csi2rx_remove, 888 889 .driver = { 890 .name = "cdns-csi2rx", 891 .of_match_table = csi2rx_of_table, 892 }, 893 }; 894 module_platform_driver(csi2rx_driver); 895 MODULE_AUTHOR("Maxime Ripard <maxime.ripard@bootlin.com>"); 896 MODULE_DESCRIPTION("Cadence CSI2-RX controller"); 897 MODULE_LICENSE("GPL"); 898