1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Driver for Cadence MIPI-CSI2 RX Controller v1.3
4 *
5 * Copyright (C) 2017 Cadence Design Systems Inc.
6 */
7
8 #include <linux/bitfield.h>
9 #include <linux/clk.h>
10 #include <linux/delay.h>
11 #include <linux/export.h>
12 #include <linux/io.h>
13 #include <linux/iopoll.h>
14 #include <linux/module.h>
15 #include <linux/of.h>
16 #include <linux/of_graph.h>
17 #include <linux/phy/phy.h>
18 #include <linux/platform_device.h>
19 #include <linux/reset.h>
20 #include <linux/slab.h>
21
22 #include <media/cadence/cdns-csi2rx.h>
23 #include <media/v4l2-ctrls.h>
24 #include <media/v4l2-device.h>
25 #include <media/v4l2-fwnode.h>
26 #include <media/v4l2-subdev.h>
27
28 #define CSI2RX_DEVICE_CFG_REG 0x000
29
30 #define CSI2RX_SOFT_RESET_REG 0x004
31 #define CSI2RX_SOFT_RESET_PROTOCOL BIT(1)
32 #define CSI2RX_SOFT_RESET_FRONT BIT(0)
33
34 #define CSI2RX_STATIC_CFG_REG 0x008
35 #define CSI2RX_STATIC_CFG_DLANE_MAP(llane, plane) ((plane) << (16 + (llane) * 4))
36 #define CSI2RX_STATIC_CFG_LANES_MASK GENMASK(11, 8)
37
38 #define CSI2RX_DPHY_LANE_CTRL_REG 0x40
39 #define CSI2RX_DPHY_CL_RST BIT(16)
40 #define CSI2RX_DPHY_DL_RST(i) BIT((i) + 12)
41 #define CSI2RX_DPHY_CL_EN BIT(4)
42 #define CSI2RX_DPHY_DL_EN(i) BIT(i)
43
44 #define CSI2RX_STREAM_BASE(n) (((n) + 1) * 0x100)
45
46 #define CSI2RX_STREAM_CTRL_REG(n) (CSI2RX_STREAM_BASE(n) + 0x000)
47 #define CSI2RX_STREAM_CTRL_SOFT_RST BIT(4)
48 #define CSI2RX_STREAM_CTRL_STOP BIT(1)
49 #define CSI2RX_STREAM_CTRL_START BIT(0)
50
51 #define CSI2RX_STREAM_STATUS_REG(n) (CSI2RX_STREAM_BASE(n) + 0x004)
52 #define CSI2RX_STREAM_STATUS_RDY BIT(31)
53
54 #define CSI2RX_STREAM_DATA_CFG_REG(n) (CSI2RX_STREAM_BASE(n) + 0x008)
55 #define CSI2RX_STREAM_DATA_CFG_VC_SELECT(n) BIT((n) + 16)
56
57 #define CSI2RX_STREAM_CFG_REG(n) (CSI2RX_STREAM_BASE(n) + 0x00c)
58 #define CSI2RX_STREAM_CFG_FIFO_MODE_LARGE_BUF BIT(8)
59 #define CSI2RX_STREAM_CFG_NUM_PIXELS_MASK GENMASK(5, 4)
60 #define CSI2RX_STREAM_CFG_NUM_PIXELS(n) ((n) >> 1U)
61
62 #define CSI2RX_LANES_MAX 4
63 #define CSI2RX_STREAMS_MAX 4
64
65 #define CSI2RX_ERROR_IRQS_REG 0x28
66 #define CSI2RX_ERROR_IRQS_MASK_REG 0x2C
67
68 #define CSI2RX_STREAM3_FIFO_OVERFLOW_IRQ BIT(19)
69 #define CSI2RX_STREAM2_FIFO_OVERFLOW_IRQ BIT(18)
70 #define CSI2RX_STREAM1_FIFO_OVERFLOW_IRQ BIT(17)
71 #define CSI2RX_STREAM0_FIFO_OVERFLOW_IRQ BIT(16)
72 #define CSI2RX_FRONT_TRUNC_HDR_IRQ BIT(12)
73 #define CSI2RX_PROT_TRUNCATED_PACKET_IRQ BIT(11)
74 #define CSI2RX_FRONT_LP_NO_PAYLOAD_IRQ BIT(10)
75 #define CSI2RX_SP_INVALID_RCVD_IRQ BIT(9)
76 #define CSI2RX_DATA_ID_IRQ BIT(7)
77 #define CSI2RX_HEADER_CORRECTED_ECC_IRQ BIT(6)
78 #define CSI2RX_HEADER_ECC_IRQ BIT(5)
79 #define CSI2RX_PAYLOAD_CRC_IRQ BIT(4)
80
81 #define CSI2RX_ECC_ERRORS GENMASK(7, 4)
82 #define CSI2RX_PACKET_ERRORS GENMASK(12, 9)
83
84 enum csi2rx_pads {
85 CSI2RX_PAD_SINK,
86 CSI2RX_PAD_SOURCE_STREAM0,
87 CSI2RX_PAD_SOURCE_STREAM1,
88 CSI2RX_PAD_SOURCE_STREAM2,
89 CSI2RX_PAD_SOURCE_STREAM3,
90 CSI2RX_PAD_MAX,
91 };
92
93 struct csi2rx_fmt {
94 u32 code;
95 /* width of a single pixel on CSI-2 bus */
96 u8 bpp;
97 /* max pixels per clock supported on output bus */
98 u8 max_pixels;
99 };
100
101 struct csi2rx_event {
102 u32 mask;
103 const char *name;
104 };
105
106 static const struct csi2rx_event csi2rx_events[] = {
107 { CSI2RX_STREAM3_FIFO_OVERFLOW_IRQ, "Overflow of the Stream 3 FIFO detected" },
108 { CSI2RX_STREAM2_FIFO_OVERFLOW_IRQ, "Overflow of the Stream 2 FIFO detected" },
109 { CSI2RX_STREAM1_FIFO_OVERFLOW_IRQ, "Overflow of the Stream 1 FIFO detected" },
110 { CSI2RX_STREAM0_FIFO_OVERFLOW_IRQ, "Overflow of the Stream 0 FIFO detected" },
111 { CSI2RX_FRONT_TRUNC_HDR_IRQ, "A truncated header [short or long] has been received" },
112 { CSI2RX_PROT_TRUNCATED_PACKET_IRQ, "A truncated long packet has been received" },
113 { CSI2RX_FRONT_LP_NO_PAYLOAD_IRQ, "A truncated long packet has been received. No payload" },
114 { CSI2RX_SP_INVALID_RCVD_IRQ, "A reserved or invalid short packet has been received" },
115 { CSI2RX_DATA_ID_IRQ, "Data ID error in the header packet" },
116 { CSI2RX_HEADER_CORRECTED_ECC_IRQ, "ECC error detected and corrected" },
117 { CSI2RX_HEADER_ECC_IRQ, "Unrecoverable ECC error" },
118 { CSI2RX_PAYLOAD_CRC_IRQ, "CRC error" },
119 };
120
121 #define CSI2RX_NUM_EVENTS ARRAY_SIZE(csi2rx_events)
122
123 struct csi2rx_priv {
124 struct device *dev;
125 unsigned int count;
126 int error_irq;
127
128 /*
129 * Used to prevent race conditions between multiple,
130 * concurrent calls to start and stop.
131 */
132 struct mutex lock;
133
134 void __iomem *base;
135 struct clk *sys_clk;
136 struct clk *p_clk;
137 struct clk *pixel_clk[CSI2RX_STREAMS_MAX];
138 struct reset_control *sys_rst;
139 struct reset_control *p_rst;
140 struct reset_control *pixel_rst[CSI2RX_STREAMS_MAX];
141 struct phy *dphy;
142
143 u8 num_pixels[CSI2RX_STREAMS_MAX];
144 u8 lanes[CSI2RX_LANES_MAX];
145 u8 num_lanes;
146 u8 max_lanes;
147 u8 max_streams;
148 bool has_internal_dphy;
149 u32 events[CSI2RX_NUM_EVENTS];
150
151 struct v4l2_subdev subdev;
152 struct v4l2_async_notifier notifier;
153 struct media_pad pads[CSI2RX_PAD_MAX];
154
155 /* Remote source */
156 struct v4l2_subdev *source_subdev;
157 int source_pad;
158 };
159
160 static const struct csi2rx_fmt formats[] = {
161 { .code = MEDIA_BUS_FMT_YUYV8_1X16, .bpp = 16, .max_pixels = 2, },
162 { .code = MEDIA_BUS_FMT_UYVY8_1X16, .bpp = 16, .max_pixels = 2, },
163 { .code = MEDIA_BUS_FMT_YVYU8_1X16, .bpp = 16, .max_pixels = 2, },
164 { .code = MEDIA_BUS_FMT_VYUY8_1X16, .bpp = 16, .max_pixels = 2, },
165 { .code = MEDIA_BUS_FMT_SBGGR8_1X8, .bpp = 8, .max_pixels = 4, },
166 { .code = MEDIA_BUS_FMT_SGBRG8_1X8, .bpp = 8, .max_pixels = 4, },
167 { .code = MEDIA_BUS_FMT_SGRBG8_1X8, .bpp = 8, .max_pixels = 4, },
168 { .code = MEDIA_BUS_FMT_SRGGB8_1X8, .bpp = 8, .max_pixels = 4, },
169 { .code = MEDIA_BUS_FMT_Y8_1X8, .bpp = 8, .max_pixels = 4, },
170 { .code = MEDIA_BUS_FMT_SBGGR10_1X10, .bpp = 10, .max_pixels = 2, },
171 { .code = MEDIA_BUS_FMT_SGBRG10_1X10, .bpp = 10, .max_pixels = 2, },
172 { .code = MEDIA_BUS_FMT_SGRBG10_1X10, .bpp = 10, .max_pixels = 2, },
173 { .code = MEDIA_BUS_FMT_SRGGB10_1X10, .bpp = 10, .max_pixels = 2, },
174 { .code = MEDIA_BUS_FMT_RGB565_1X16, .bpp = 16, .max_pixels = 1, },
175 { .code = MEDIA_BUS_FMT_RGB888_1X24, .bpp = 24, .max_pixels = 1, },
176 { .code = MEDIA_BUS_FMT_BGR888_1X24, .bpp = 24, .max_pixels = 1, },
177 };
178
csi2rx_configure_error_irq_mask(void __iomem * base,struct csi2rx_priv * csi2rx)179 static void csi2rx_configure_error_irq_mask(void __iomem *base,
180 struct csi2rx_priv *csi2rx)
181 {
182 u32 error_irq_mask = 0;
183
184 error_irq_mask |= CSI2RX_ECC_ERRORS;
185 error_irq_mask |= CSI2RX_PACKET_ERRORS;
186
187 /*
188 * Iterate through all source pads and check if they are linked
189 * to an active remote pad. If an active remote pad is found,
190 * calculate the corresponding bit position and set it in
191 * mask, enabling the stream overflow error in the mask.
192 */
193 for (int i = CSI2RX_PAD_SOURCE_STREAM0; i < CSI2RX_PAD_MAX; i++) {
194 struct media_pad *remote_pad;
195
196 remote_pad = media_pad_remote_pad_first(&csi2rx->pads[i]);
197 if (remote_pad) {
198 int pad = i - CSI2RX_PAD_SOURCE_STREAM0;
199 u32 bit_mask = CSI2RX_STREAM0_FIFO_OVERFLOW_IRQ << pad;
200
201 error_irq_mask |= bit_mask;
202 }
203 }
204
205 writel(error_irq_mask, base + CSI2RX_ERROR_IRQS_MASK_REG);
206 }
207
csi2rx_irq_handler(int irq,void * dev_id)208 static irqreturn_t csi2rx_irq_handler(int irq, void *dev_id)
209 {
210 struct csi2rx_priv *csi2rx = dev_id;
211 int i;
212 u32 error_status, error_mask;
213
214 error_status = readl(csi2rx->base + CSI2RX_ERROR_IRQS_REG);
215 error_mask = readl(csi2rx->base + CSI2RX_ERROR_IRQS_MASK_REG);
216
217 for (i = 0; i < CSI2RX_NUM_EVENTS; i++)
218 if ((error_status & csi2rx_events[i].mask) &&
219 (error_mask & csi2rx_events[i].mask))
220 csi2rx->events[i]++;
221
222 writel(error_status, csi2rx->base + CSI2RX_ERROR_IRQS_REG);
223
224 return IRQ_HANDLED;
225 }
226
csi2rx_get_fmt_by_code(u32 code)227 static const struct csi2rx_fmt *csi2rx_get_fmt_by_code(u32 code)
228 {
229 unsigned int i;
230
231 for (i = 0; i < ARRAY_SIZE(formats); i++)
232 if (formats[i].code == code)
233 return &formats[i];
234
235 return NULL;
236 }
237
238 static inline
v4l2_subdev_to_csi2rx(struct v4l2_subdev * subdev)239 struct csi2rx_priv *v4l2_subdev_to_csi2rx(struct v4l2_subdev *subdev)
240 {
241 return container_of(subdev, struct csi2rx_priv, subdev);
242 }
243
csi2rx_reset(struct csi2rx_priv * csi2rx)244 static void csi2rx_reset(struct csi2rx_priv *csi2rx)
245 {
246 unsigned int i;
247
248 /* Reset module */
249 writel(CSI2RX_SOFT_RESET_PROTOCOL | CSI2RX_SOFT_RESET_FRONT,
250 csi2rx->base + CSI2RX_SOFT_RESET_REG);
251 /* Reset individual streams. */
252 for (i = 0; i < csi2rx->max_streams; i++) {
253 writel(CSI2RX_STREAM_CTRL_SOFT_RST,
254 csi2rx->base + CSI2RX_STREAM_CTRL_REG(i));
255 }
256
257 usleep_range(10, 20);
258
259 /* Clear resets */
260 writel(0, csi2rx->base + CSI2RX_SOFT_RESET_REG);
261 for (i = 0; i < csi2rx->max_streams; i++)
262 writel(0, csi2rx->base + CSI2RX_STREAM_CTRL_REG(i));
263 }
264
csi2rx_configure_ext_dphy(struct csi2rx_priv * csi2rx)265 static int csi2rx_configure_ext_dphy(struct csi2rx_priv *csi2rx)
266 {
267 struct media_pad *src_pad =
268 &csi2rx->source_subdev->entity.pads[csi2rx->source_pad];
269 union phy_configure_opts opts = { };
270 struct phy_configure_opts_mipi_dphy *cfg = &opts.mipi_dphy;
271 struct v4l2_subdev_format sd_fmt = {
272 .which = V4L2_SUBDEV_FORMAT_ACTIVE,
273 .pad = CSI2RX_PAD_SINK,
274 };
275 const struct csi2rx_fmt *fmt;
276 s64 link_freq;
277 int ret;
278
279 ret = v4l2_subdev_call_state_active(&csi2rx->subdev, pad, get_fmt,
280 &sd_fmt);
281 if (ret < 0)
282 return ret;
283
284 fmt = csi2rx_get_fmt_by_code(sd_fmt.format.code);
285
286 link_freq = v4l2_get_link_freq(src_pad,
287 fmt->bpp, 2 * csi2rx->num_lanes);
288 if (link_freq < 0)
289 return link_freq;
290
291 ret = phy_mipi_dphy_get_default_config_for_hsclk(link_freq,
292 csi2rx->num_lanes, cfg);
293 if (ret)
294 return ret;
295
296 ret = phy_power_on(csi2rx->dphy);
297 if (ret)
298 return ret;
299
300 ret = phy_configure(csi2rx->dphy, &opts);
301 if (ret) {
302 phy_power_off(csi2rx->dphy);
303 return ret;
304 }
305
306 return 0;
307 }
308
csi2rx_start(struct csi2rx_priv * csi2rx)309 static int csi2rx_start(struct csi2rx_priv *csi2rx)
310 {
311 unsigned int i;
312 unsigned long lanes_used = 0;
313 u32 reg;
314 int ret;
315
316 ret = clk_prepare_enable(csi2rx->p_clk);
317 if (ret)
318 return ret;
319
320 reset_control_deassert(csi2rx->p_rst);
321 csi2rx_reset(csi2rx);
322
323 if (csi2rx->error_irq >= 0)
324 csi2rx_configure_error_irq_mask(csi2rx->base, csi2rx);
325
326 reg = csi2rx->num_lanes << 8;
327 for (i = 0; i < csi2rx->num_lanes; i++) {
328 reg |= CSI2RX_STATIC_CFG_DLANE_MAP(i, csi2rx->lanes[i]);
329 set_bit(csi2rx->lanes[i], &lanes_used);
330 }
331
332 /*
333 * Even the unused lanes need to be mapped. In order to avoid
334 * to map twice to the same physical lane, keep the lanes used
335 * in the previous loop, and only map unused physical lanes to
336 * the rest of our logical lanes.
337 */
338 for (i = csi2rx->num_lanes; i < csi2rx->max_lanes; i++) {
339 unsigned int idx = find_first_zero_bit(&lanes_used,
340 csi2rx->max_lanes);
341 set_bit(idx, &lanes_used);
342 reg |= CSI2RX_STATIC_CFG_DLANE_MAP(i, i + 1);
343 }
344
345 writel(reg, csi2rx->base + CSI2RX_STATIC_CFG_REG);
346
347 /* Enable DPHY clk and data lanes. */
348 if (csi2rx->dphy) {
349 reg = CSI2RX_DPHY_CL_EN | CSI2RX_DPHY_CL_RST;
350 for (i = 0; i < csi2rx->num_lanes; i++) {
351 reg |= CSI2RX_DPHY_DL_EN(csi2rx->lanes[i] - 1);
352 reg |= CSI2RX_DPHY_DL_RST(csi2rx->lanes[i] - 1);
353 }
354
355 writel(reg, csi2rx->base + CSI2RX_DPHY_LANE_CTRL_REG);
356
357 ret = csi2rx_configure_ext_dphy(csi2rx);
358 if (ret) {
359 dev_err(csi2rx->dev,
360 "Failed to configure external DPHY: %d\n", ret);
361 goto err_disable_pclk;
362 }
363 }
364
365 /*
366 * Create a static mapping between the CSI virtual channels
367 * and the output stream.
368 *
369 * This should be enhanced, but v4l2 lacks the support for
370 * changing that mapping dynamically.
371 *
372 * We also cannot enable and disable independent streams here,
373 * hence the reference counting.
374 */
375 for (i = 0; i < csi2rx->max_streams; i++) {
376 ret = clk_prepare_enable(csi2rx->pixel_clk[i]);
377 if (ret)
378 goto err_disable_pixclk;
379
380 reset_control_deassert(csi2rx->pixel_rst[i]);
381
382 writel(CSI2RX_STREAM_CFG_FIFO_MODE_LARGE_BUF |
383 FIELD_PREP(CSI2RX_STREAM_CFG_NUM_PIXELS_MASK,
384 csi2rx->num_pixels[i]),
385 csi2rx->base + CSI2RX_STREAM_CFG_REG(i));
386
387 /*
388 * Enable one virtual channel. When multiple virtual channels
389 * are supported this will have to be changed.
390 */
391 writel(CSI2RX_STREAM_DATA_CFG_VC_SELECT(0),
392 csi2rx->base + CSI2RX_STREAM_DATA_CFG_REG(i));
393
394 writel(CSI2RX_STREAM_CTRL_START,
395 csi2rx->base + CSI2RX_STREAM_CTRL_REG(i));
396 }
397
398 ret = clk_prepare_enable(csi2rx->sys_clk);
399 if (ret)
400 goto err_disable_pixclk;
401
402 reset_control_deassert(csi2rx->sys_rst);
403
404 ret = v4l2_subdev_call(csi2rx->source_subdev, video, s_stream, true);
405 if (ret)
406 goto err_disable_sysclk;
407
408 clk_disable_unprepare(csi2rx->p_clk);
409
410 return 0;
411
412 err_disable_sysclk:
413 clk_disable_unprepare(csi2rx->sys_clk);
414 err_disable_pixclk:
415 for (; i > 0; i--) {
416 reset_control_assert(csi2rx->pixel_rst[i - 1]);
417 clk_disable_unprepare(csi2rx->pixel_clk[i - 1]);
418 }
419
420 if (csi2rx->dphy) {
421 writel(0, csi2rx->base + CSI2RX_DPHY_LANE_CTRL_REG);
422 phy_power_off(csi2rx->dphy);
423 }
424 err_disable_pclk:
425 clk_disable_unprepare(csi2rx->p_clk);
426
427 return ret;
428 }
429
csi2rx_stop(struct csi2rx_priv * csi2rx)430 static void csi2rx_stop(struct csi2rx_priv *csi2rx)
431 {
432 unsigned int i;
433 u32 val;
434 int ret;
435
436 clk_prepare_enable(csi2rx->p_clk);
437 reset_control_assert(csi2rx->sys_rst);
438 clk_disable_unprepare(csi2rx->sys_clk);
439
440 writel(0, csi2rx->base + CSI2RX_ERROR_IRQS_MASK_REG);
441
442 for (i = 0; i < csi2rx->max_streams; i++) {
443 writel(CSI2RX_STREAM_CTRL_STOP,
444 csi2rx->base + CSI2RX_STREAM_CTRL_REG(i));
445
446 ret = readl_relaxed_poll_timeout(csi2rx->base +
447 CSI2RX_STREAM_STATUS_REG(i),
448 val,
449 !(val & CSI2RX_STREAM_STATUS_RDY),
450 10, 10000);
451 if (ret)
452 dev_warn(csi2rx->dev,
453 "Failed to stop streaming on pad%u\n", i);
454
455 reset_control_assert(csi2rx->pixel_rst[i]);
456 clk_disable_unprepare(csi2rx->pixel_clk[i]);
457 }
458
459 reset_control_assert(csi2rx->p_rst);
460 clk_disable_unprepare(csi2rx->p_clk);
461
462 if (v4l2_subdev_call(csi2rx->source_subdev, video, s_stream, false))
463 dev_warn(csi2rx->dev, "Couldn't disable our subdev\n");
464
465 if (csi2rx->dphy) {
466 writel(0, csi2rx->base + CSI2RX_DPHY_LANE_CTRL_REG);
467
468 if (phy_power_off(csi2rx->dphy))
469 dev_warn(csi2rx->dev, "Couldn't power off DPHY\n");
470 }
471 }
472
csi2rx_log_status(struct v4l2_subdev * sd)473 static int csi2rx_log_status(struct v4l2_subdev *sd)
474 {
475 struct csi2rx_priv *csi2rx = v4l2_subdev_to_csi2rx(sd);
476 unsigned int i;
477
478 for (i = 0; i < CSI2RX_NUM_EVENTS; i++) {
479 if (csi2rx->events[i])
480 dev_info(csi2rx->dev, "%s events: %d\n",
481 csi2rx_events[i].name,
482 csi2rx->events[i]);
483 }
484
485 return 0;
486 }
487
csi2rx_s_stream(struct v4l2_subdev * subdev,int enable)488 static int csi2rx_s_stream(struct v4l2_subdev *subdev, int enable)
489 {
490 struct csi2rx_priv *csi2rx = v4l2_subdev_to_csi2rx(subdev);
491 int ret = 0;
492
493 mutex_lock(&csi2rx->lock);
494
495 if (enable) {
496 /*
497 * If we're not the first users, there's no need to
498 * enable the whole controller.
499 */
500 if (!csi2rx->count) {
501 ret = csi2rx_start(csi2rx);
502 if (ret)
503 goto out;
504 }
505
506 csi2rx->count++;
507 } else {
508 csi2rx->count--;
509
510 /*
511 * Let the last user turn off the lights.
512 */
513 if (!csi2rx->count)
514 csi2rx_stop(csi2rx);
515 }
516
517 out:
518 mutex_unlock(&csi2rx->lock);
519 return ret;
520 }
521
csi2rx_enum_mbus_code(struct v4l2_subdev * subdev,struct v4l2_subdev_state * state,struct v4l2_subdev_mbus_code_enum * code_enum)522 static int csi2rx_enum_mbus_code(struct v4l2_subdev *subdev,
523 struct v4l2_subdev_state *state,
524 struct v4l2_subdev_mbus_code_enum *code_enum)
525 {
526 if (code_enum->index >= ARRAY_SIZE(formats))
527 return -EINVAL;
528
529 code_enum->code = formats[code_enum->index].code;
530
531 return 0;
532 }
533
csi2rx_set_fmt(struct v4l2_subdev * subdev,struct v4l2_subdev_state * state,struct v4l2_subdev_format * format)534 static int csi2rx_set_fmt(struct v4l2_subdev *subdev,
535 struct v4l2_subdev_state *state,
536 struct v4l2_subdev_format *format)
537 {
538 struct v4l2_mbus_framefmt *fmt;
539 unsigned int i;
540
541 /* No transcoding, source and sink formats must match. */
542 if (format->pad != CSI2RX_PAD_SINK)
543 return v4l2_subdev_get_fmt(subdev, state, format);
544
545 if (!csi2rx_get_fmt_by_code(format->format.code))
546 format->format.code = formats[0].code;
547
548 format->format.field = V4L2_FIELD_NONE;
549
550 /* Set sink format */
551 fmt = v4l2_subdev_state_get_format(state, format->pad);
552 *fmt = format->format;
553
554 /* Propagate to source formats */
555 for (i = CSI2RX_PAD_SOURCE_STREAM0; i < CSI2RX_PAD_MAX; i++) {
556 fmt = v4l2_subdev_state_get_format(state, i);
557 *fmt = format->format;
558 }
559
560 return 0;
561 }
562
csi2rx_init_state(struct v4l2_subdev * subdev,struct v4l2_subdev_state * state)563 static int csi2rx_init_state(struct v4l2_subdev *subdev,
564 struct v4l2_subdev_state *state)
565 {
566 struct v4l2_subdev_format format = {
567 .pad = CSI2RX_PAD_SINK,
568 .format = {
569 .width = 640,
570 .height = 480,
571 .code = MEDIA_BUS_FMT_UYVY8_1X16,
572 .field = V4L2_FIELD_NONE,
573 .colorspace = V4L2_COLORSPACE_SRGB,
574 .ycbcr_enc = V4L2_YCBCR_ENC_601,
575 .quantization = V4L2_QUANTIZATION_LIM_RANGE,
576 .xfer_func = V4L2_XFER_FUNC_SRGB,
577 },
578 };
579
580 return csi2rx_set_fmt(subdev, state, &format);
581 }
582
cdns_csi2rx_negotiate_ppc(struct v4l2_subdev * subdev,unsigned int pad,u8 * ppc)583 int cdns_csi2rx_negotiate_ppc(struct v4l2_subdev *subdev, unsigned int pad,
584 u8 *ppc)
585 {
586 struct csi2rx_priv *csi2rx = v4l2_subdev_to_csi2rx(subdev);
587 const struct csi2rx_fmt *csi_fmt;
588 struct v4l2_subdev_state *state;
589 struct v4l2_mbus_framefmt *fmt;
590
591 if (!ppc || pad < CSI2RX_PAD_SOURCE_STREAM0 || pad >= CSI2RX_PAD_MAX)
592 return -EINVAL;
593
594 state = v4l2_subdev_lock_and_get_active_state(subdev);
595 fmt = v4l2_subdev_state_get_format(state, pad);
596 csi_fmt = csi2rx_get_fmt_by_code(fmt->code);
597
598 /* Reduce requested PPC if it is too high */
599 *ppc = min(*ppc, csi_fmt->max_pixels);
600
601 v4l2_subdev_unlock_state(state);
602
603 csi2rx->num_pixels[pad - CSI2RX_PAD_SOURCE_STREAM0] =
604 CSI2RX_STREAM_CFG_NUM_PIXELS(*ppc);
605
606 return 0;
607 }
608 EXPORT_SYMBOL_FOR_MODULES(cdns_csi2rx_negotiate_ppc, "j721e-csi2rx");
609
610 static const struct v4l2_subdev_pad_ops csi2rx_pad_ops = {
611 .enum_mbus_code = csi2rx_enum_mbus_code,
612 .get_fmt = v4l2_subdev_get_fmt,
613 .set_fmt = csi2rx_set_fmt,
614 };
615
616 static const struct v4l2_subdev_video_ops csi2rx_video_ops = {
617 .s_stream = csi2rx_s_stream,
618 };
619
620 static const struct v4l2_subdev_core_ops csi2rx_core_ops = {
621 .log_status = csi2rx_log_status,
622 };
623
624 static const struct v4l2_subdev_ops csi2rx_subdev_ops = {
625 .core = &csi2rx_core_ops,
626 .video = &csi2rx_video_ops,
627 .pad = &csi2rx_pad_ops,
628 };
629
630 static const struct v4l2_subdev_internal_ops csi2rx_internal_ops = {
631 .init_state = csi2rx_init_state,
632 };
633
634 static const struct media_entity_operations csi2rx_media_ops = {
635 .link_validate = v4l2_subdev_link_validate,
636 .get_fwnode_pad = v4l2_subdev_get_fwnode_pad_1_to_1,
637 };
638
csi2rx_async_bound(struct v4l2_async_notifier * notifier,struct v4l2_subdev * s_subdev,struct v4l2_async_connection * asd)639 static int csi2rx_async_bound(struct v4l2_async_notifier *notifier,
640 struct v4l2_subdev *s_subdev,
641 struct v4l2_async_connection *asd)
642 {
643 struct v4l2_subdev *subdev = notifier->sd;
644 struct csi2rx_priv *csi2rx = v4l2_subdev_to_csi2rx(subdev);
645
646 csi2rx->source_pad = media_entity_get_fwnode_pad(&s_subdev->entity,
647 asd->match.fwnode,
648 MEDIA_PAD_FL_SOURCE);
649 if (csi2rx->source_pad < 0) {
650 dev_err(csi2rx->dev, "Couldn't find output pad for subdev %s\n",
651 s_subdev->name);
652 return csi2rx->source_pad;
653 }
654
655 csi2rx->source_subdev = s_subdev;
656
657 dev_dbg(csi2rx->dev, "Bound %s pad: %d\n", s_subdev->name,
658 csi2rx->source_pad);
659
660 return media_create_pad_link(&csi2rx->source_subdev->entity,
661 csi2rx->source_pad,
662 &csi2rx->subdev.entity, 0,
663 MEDIA_LNK_FL_ENABLED |
664 MEDIA_LNK_FL_IMMUTABLE);
665 }
666
667 static const struct v4l2_async_notifier_operations csi2rx_notifier_ops = {
668 .bound = csi2rx_async_bound,
669 };
670
csi2rx_get_resources(struct csi2rx_priv * csi2rx,struct platform_device * pdev)671 static int csi2rx_get_resources(struct csi2rx_priv *csi2rx,
672 struct platform_device *pdev)
673 {
674 unsigned char i;
675 u32 dev_cfg;
676 int ret;
677
678 csi2rx->base = devm_platform_ioremap_resource(pdev, 0);
679 if (IS_ERR(csi2rx->base))
680 return PTR_ERR(csi2rx->base);
681
682 csi2rx->sys_clk = devm_clk_get(&pdev->dev, "sys_clk");
683 if (IS_ERR(csi2rx->sys_clk)) {
684 dev_err(&pdev->dev, "Couldn't get sys clock\n");
685 return PTR_ERR(csi2rx->sys_clk);
686 }
687
688 csi2rx->p_clk = devm_clk_get(&pdev->dev, "p_clk");
689 if (IS_ERR(csi2rx->p_clk)) {
690 dev_err(&pdev->dev, "Couldn't get P clock\n");
691 return PTR_ERR(csi2rx->p_clk);
692 }
693
694 csi2rx->sys_rst = devm_reset_control_get_optional_exclusive(&pdev->dev,
695 "sys");
696 if (IS_ERR(csi2rx->sys_rst))
697 return PTR_ERR(csi2rx->sys_rst);
698
699 csi2rx->p_rst = devm_reset_control_get_optional_exclusive(&pdev->dev,
700 "reg_bank");
701 if (IS_ERR(csi2rx->p_rst))
702 return PTR_ERR(csi2rx->p_rst);
703
704 csi2rx->dphy = devm_phy_optional_get(&pdev->dev, "dphy");
705 if (IS_ERR(csi2rx->dphy)) {
706 dev_err(&pdev->dev, "Couldn't get external D-PHY\n");
707 return PTR_ERR(csi2rx->dphy);
708 }
709
710 ret = clk_prepare_enable(csi2rx->p_clk);
711 if (ret) {
712 dev_err(&pdev->dev, "Couldn't prepare and enable P clock\n");
713 return ret;
714 }
715
716 dev_cfg = readl(csi2rx->base + CSI2RX_DEVICE_CFG_REG);
717 clk_disable_unprepare(csi2rx->p_clk);
718
719 csi2rx->max_lanes = dev_cfg & 7;
720 if (csi2rx->max_lanes > CSI2RX_LANES_MAX) {
721 dev_err(&pdev->dev, "Invalid number of lanes: %u\n",
722 csi2rx->max_lanes);
723 return -EINVAL;
724 }
725
726 csi2rx->max_streams = (dev_cfg >> 4) & 7;
727 if (csi2rx->max_streams > CSI2RX_STREAMS_MAX) {
728 dev_err(&pdev->dev, "Invalid number of streams: %u\n",
729 csi2rx->max_streams);
730 return -EINVAL;
731 }
732
733 csi2rx->has_internal_dphy = dev_cfg & BIT(3) ? true : false;
734
735 /*
736 * FIXME: Once we'll have internal D-PHY support, the check
737 * will need to be removed.
738 */
739 if (!csi2rx->dphy && csi2rx->has_internal_dphy) {
740 dev_err(&pdev->dev, "Internal D-PHY not supported yet\n");
741 return -EINVAL;
742 }
743
744 for (i = 0; i < csi2rx->max_streams; i++) {
745 char name[16];
746
747 snprintf(name, sizeof(name), "pixel_if%u_clk", i);
748 csi2rx->pixel_clk[i] = devm_clk_get(&pdev->dev, name);
749 if (IS_ERR(csi2rx->pixel_clk[i])) {
750 dev_err(&pdev->dev, "Couldn't get clock %s\n", name);
751 return PTR_ERR(csi2rx->pixel_clk[i]);
752 }
753
754 snprintf(name, sizeof(name), "pixel_if%u", i);
755 csi2rx->pixel_rst[i] =
756 devm_reset_control_get_optional_exclusive(&pdev->dev,
757 name);
758 if (IS_ERR(csi2rx->pixel_rst[i]))
759 return PTR_ERR(csi2rx->pixel_rst[i]);
760 }
761
762 return 0;
763 }
764
csi2rx_parse_dt(struct csi2rx_priv * csi2rx)765 static int csi2rx_parse_dt(struct csi2rx_priv *csi2rx)
766 {
767 struct v4l2_fwnode_endpoint v4l2_ep = { .bus_type = 0 };
768 struct v4l2_async_connection *asd;
769 struct fwnode_handle *fwh;
770 struct device_node *ep;
771 int ret;
772
773 ep = of_graph_get_endpoint_by_regs(csi2rx->dev->of_node, 0, 0);
774 if (!ep)
775 return -EINVAL;
776
777 fwh = of_fwnode_handle(ep);
778 ret = v4l2_fwnode_endpoint_parse(fwh, &v4l2_ep);
779 if (ret) {
780 dev_err(csi2rx->dev, "Could not parse v4l2 endpoint\n");
781 of_node_put(ep);
782 return ret;
783 }
784
785 if (v4l2_ep.bus_type != V4L2_MBUS_CSI2_DPHY) {
786 dev_err(csi2rx->dev, "Unsupported media bus type: 0x%x\n",
787 v4l2_ep.bus_type);
788 of_node_put(ep);
789 return -EINVAL;
790 }
791
792 memcpy(csi2rx->lanes, v4l2_ep.bus.mipi_csi2.data_lanes,
793 sizeof(csi2rx->lanes));
794 csi2rx->num_lanes = v4l2_ep.bus.mipi_csi2.num_data_lanes;
795 if (csi2rx->num_lanes > csi2rx->max_lanes) {
796 dev_err(csi2rx->dev, "Unsupported number of data-lanes: %d\n",
797 csi2rx->num_lanes);
798 of_node_put(ep);
799 return -EINVAL;
800 }
801
802 v4l2_async_subdev_nf_init(&csi2rx->notifier, &csi2rx->subdev);
803
804 asd = v4l2_async_nf_add_fwnode_remote(&csi2rx->notifier, fwh,
805 struct v4l2_async_connection);
806 of_node_put(ep);
807 if (IS_ERR(asd)) {
808 v4l2_async_nf_cleanup(&csi2rx->notifier);
809 return PTR_ERR(asd);
810 }
811
812 csi2rx->notifier.ops = &csi2rx_notifier_ops;
813
814 ret = v4l2_async_nf_register(&csi2rx->notifier);
815 if (ret)
816 v4l2_async_nf_cleanup(&csi2rx->notifier);
817
818 return ret;
819 }
820
csi2rx_probe(struct platform_device * pdev)821 static int csi2rx_probe(struct platform_device *pdev)
822 {
823 struct csi2rx_priv *csi2rx;
824 unsigned int i;
825 int ret;
826
827 csi2rx = kzalloc_obj(*csi2rx);
828 if (!csi2rx)
829 return -ENOMEM;
830 platform_set_drvdata(pdev, csi2rx);
831 csi2rx->dev = &pdev->dev;
832 mutex_init(&csi2rx->lock);
833
834 ret = csi2rx_get_resources(csi2rx, pdev);
835 if (ret)
836 goto err_free_priv;
837
838 ret = csi2rx_parse_dt(csi2rx);
839 if (ret)
840 goto err_free_priv;
841
842 csi2rx->subdev.owner = THIS_MODULE;
843 csi2rx->subdev.dev = &pdev->dev;
844 v4l2_subdev_init(&csi2rx->subdev, &csi2rx_subdev_ops);
845 csi2rx->subdev.internal_ops = &csi2rx_internal_ops;
846 v4l2_set_subdevdata(&csi2rx->subdev, &pdev->dev);
847 snprintf(csi2rx->subdev.name, sizeof(csi2rx->subdev.name),
848 "%s.%s", KBUILD_MODNAME, dev_name(&pdev->dev));
849
850 /* Create our media pads */
851 csi2rx->subdev.entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
852 csi2rx->pads[CSI2RX_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
853 for (i = CSI2RX_PAD_SOURCE_STREAM0; i < CSI2RX_PAD_MAX; i++)
854 csi2rx->pads[i].flags = MEDIA_PAD_FL_SOURCE;
855 csi2rx->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
856 csi2rx->subdev.entity.ops = &csi2rx_media_ops;
857
858 ret = media_entity_pads_init(&csi2rx->subdev.entity, CSI2RX_PAD_MAX,
859 csi2rx->pads);
860 if (ret)
861 goto err_cleanup;
862
863 csi2rx->error_irq = platform_get_irq_byname_optional(pdev, "error_irq");
864
865 if (csi2rx->error_irq < 0) {
866 dev_dbg(csi2rx->dev, "Optional interrupt not defined, proceeding without it\n");
867 } else {
868 ret = devm_request_irq(csi2rx->dev, csi2rx->error_irq,
869 csi2rx_irq_handler, 0,
870 dev_name(&pdev->dev), csi2rx);
871 if (ret) {
872 dev_err(csi2rx->dev,
873 "Unable to request interrupt: %d\n", ret);
874 goto err_cleanup;
875 }
876 }
877
878 ret = v4l2_subdev_init_finalize(&csi2rx->subdev);
879 if (ret)
880 goto err_cleanup;
881
882 ret = v4l2_async_register_subdev(&csi2rx->subdev);
883 if (ret < 0)
884 goto err_free_state;
885
886 dev_info(&pdev->dev,
887 "Probed CSI2RX with %u/%u lanes, %u streams, %s D-PHY\n",
888 csi2rx->num_lanes, csi2rx->max_lanes, csi2rx->max_streams,
889 csi2rx->dphy ? "external" :
890 csi2rx->has_internal_dphy ? "internal" : "no");
891
892 return 0;
893
894 err_free_state:
895 v4l2_subdev_cleanup(&csi2rx->subdev);
896 err_cleanup:
897 v4l2_async_nf_unregister(&csi2rx->notifier);
898 v4l2_async_nf_cleanup(&csi2rx->notifier);
899 media_entity_cleanup(&csi2rx->subdev.entity);
900 err_free_priv:
901 kfree(csi2rx);
902 return ret;
903 }
904
csi2rx_remove(struct platform_device * pdev)905 static void csi2rx_remove(struct platform_device *pdev)
906 {
907 struct csi2rx_priv *csi2rx = platform_get_drvdata(pdev);
908
909 v4l2_async_nf_unregister(&csi2rx->notifier);
910 v4l2_async_nf_cleanup(&csi2rx->notifier);
911 v4l2_async_unregister_subdev(&csi2rx->subdev);
912 v4l2_subdev_cleanup(&csi2rx->subdev);
913 media_entity_cleanup(&csi2rx->subdev.entity);
914 kfree(csi2rx);
915 }
916
917 static const struct of_device_id csi2rx_of_table[] = {
918 { .compatible = "starfive,jh7110-csi2rx" },
919 { .compatible = "cdns,csi2rx" },
920 { },
921 };
922 MODULE_DEVICE_TABLE(of, csi2rx_of_table);
923
924 static struct platform_driver csi2rx_driver = {
925 .probe = csi2rx_probe,
926 .remove = csi2rx_remove,
927
928 .driver = {
929 .name = "cdns-csi2rx",
930 .of_match_table = csi2rx_of_table,
931 },
932 };
933 module_platform_driver(csi2rx_driver);
934 MODULE_AUTHOR("Maxime Ripard <maxime.ripard@bootlin.com>");
935 MODULE_DESCRIPTION("Cadence CSI2-RX controller");
936 MODULE_LICENSE("GPL");
937